summaryrefslogtreecommitdiffstats
path: root/chromium/sync
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/sync')
-rw-r--r--chromium/sync/DEPS14
-rw-r--r--chromium/sync/OWNERS11
-rw-r--r--chromium/sync/PRESUBMIT.py16
-rw-r--r--chromium/sync/api/DEPS7
-rw-r--r--chromium/sync/api/fake_syncable_service.cc64
-rw-r--r--chromium/sync/api/fake_syncable_service.h51
-rw-r--r--chromium/sync/api/string_ordinal.h47
-rw-r--r--chromium/sync/api/sync_change.cc86
-rw-r--r--chromium/sync/api/sync_change.h78
-rw-r--r--chromium/sync/api/sync_change_processor.cc13
-rw-r--r--chromium/sync/api/sync_change_processor.h53
-rw-r--r--chromium/sync/api/sync_change_unittest.cc139
-rw-r--r--chromium/sync/api/sync_data.cc153
-rw-r--r--chromium/sync/api/sync_data.h138
-rw-r--r--chromium/sync/api/sync_error.cc140
-rw-r--r--chromium/sync/api/sync_error.h107
-rw-r--r--chromium/sync/api/sync_error_factory.cc15
-rw-r--r--chromium/sync/api/sync_error_factory.h29
-rw-r--r--chromium/sync/api/sync_error_factory_mock.cc15
-rw-r--r--chromium/sync/api/sync_error_factory_mock.h26
-rw-r--r--chromium/sync/api/sync_error_unittest.cc137
-rw-r--r--chromium/sync/api/sync_merge_result.cc86
-rw-r--r--chromium/sync/api/sync_merge_result.h78
-rw-r--r--chromium/sync/api/sync_merge_result_unittest.cc77
-rw-r--r--chromium/sync/api/syncable_service.cc11
-rw-r--r--chromium/sync/api/syncable_service.h74
-rw-r--r--chromium/sync/api/time.h13
-rw-r--r--chromium/sync/base/DEPS2
-rw-r--r--chromium/sync/base/sync_export.h40
-rw-r--r--chromium/sync/engine/DEPS14
-rw-r--r--chromium/sync/engine/all_status.cc207
-rw-r--r--chromium/sync/engine/all_status.h86
-rw-r--r--chromium/sync/engine/apply_control_data_updates.cc221
-rw-r--r--chromium/sync/engine/apply_control_data_updates.h30
-rw-r--r--chromium/sync/engine/apply_control_data_updates_unittest.cc920
-rw-r--r--chromium/sync/engine/backoff_delay_provider.cc115
-rw-r--r--chromium/sync/engine/backoff_delay_provider.h56
-rw-r--r--chromium/sync/engine/backoff_delay_provider_unittest.cc119
-rw-r--r--chromium/sync/engine/commit.cc180
-rw-r--r--chromium/sync/engine/commit.h81
-rw-r--r--chromium/sync/engine/commit_util.cc440
-rw-r--r--chromium/sync/engine/commit_util.h64
-rw-r--r--chromium/sync/engine/conflict_resolver.cc237
-rw-r--r--chromium/sync/engine/conflict_resolver.h69
-rw-r--r--chromium/sync/engine/conflict_util.cc54
-rw-r--r--chromium/sync/engine/conflict_util.h34
-rw-r--r--chromium/sync/engine/download.cc426
-rw-r--r--chromium/sync/engine/download.h103
-rw-r--r--chromium/sync/engine/download_unittest.cc306
-rw-r--r--chromium/sync/engine/get_commit_ids.cc508
-rw-r--r--chromium/sync/engine/get_commit_ids.h42
-rw-r--r--chromium/sync/engine/net/DEPS3
-rw-r--r--chromium/sync/engine/net/server_connection_manager.cc396
-rw-r--r--chromium/sync/engine/net/server_connection_manager.h348
-rw-r--r--chromium/sync/engine/net/url_translator.cc57
-rw-r--r--chromium/sync/engine/net/url_translator.h27
-rw-r--r--chromium/sync/engine/nudge_source.cc26
-rw-r--r--chromium/sync/engine/nudge_source.h24
-rw-r--r--chromium/sync/engine/process_updates_util.cc329
-rw-r--r--chromium/sync/engine/process_updates_util.h73
-rw-r--r--chromium/sync/engine/sync_directory_commit_contribution.cc164
-rw-r--r--chromium/sync/engine/sync_directory_commit_contribution.h102
-rw-r--r--chromium/sync/engine/sync_directory_commit_contribution_unittest.cc235
-rw-r--r--chromium/sync/engine/sync_directory_commit_contributor.cc24
-rw-r--r--chromium/sync/engine/sync_directory_commit_contributor.h45
-rw-r--r--chromium/sync/engine/sync_directory_update_handler.cc148
-rw-r--r--chromium/sync/engine/sync_directory_update_handler.h97
-rw-r--r--chromium/sync/engine/sync_directory_update_handler_unittest.cc826
-rw-r--r--chromium/sync/engine/sync_engine_event.cc14
-rw-r--r--chromium/sync/engine/sync_engine_event.h83
-rw-r--r--chromium/sync/engine/sync_scheduler.cc12
-rw-r--r--chromium/sync/engine/sync_scheduler.h135
-rw-r--r--chromium/sync/engine/sync_scheduler_impl.cc911
-rw-r--r--chromium/sync/engine/sync_scheduler_impl.h336
-rw-r--r--chromium/sync/engine/sync_scheduler_unittest.cc1299
-rw-r--r--chromium/sync/engine/syncer.cc203
-rw-r--r--chromium/sync/engine/syncer.h118
-rw-r--r--chromium/sync/engine/syncer_proto_util.cc582
-rw-r--r--chromium/sync/engine/syncer_proto_util.h149
-rw-r--r--chromium/sync/engine/syncer_proto_util_unittest.cc314
-rw-r--r--chromium/sync/engine/syncer_types.h66
-rw-r--r--chromium/sync/engine/syncer_unittest.cc4811
-rw-r--r--chromium/sync/engine/syncer_util.cc650
-rw-r--r--chromium/sync/engine/syncer_util.h112
-rw-r--r--chromium/sync/engine/traffic_logger.cc46
-rw-r--r--chromium/sync/engine/traffic_logger.h25
-rw-r--r--chromium/sync/engine/traffic_recorder.cc143
-rw-r--r--chromium/sync/engine/traffic_recorder.h86
-rw-r--r--chromium/sync/engine/traffic_recorder_unittest.cc123
-rw-r--r--chromium/sync/engine/update_applicator.cc97
-rw-r--r--chromium/sync/engine/update_applicator.h78
-rw-r--r--chromium/sync/internal_api/DEPS13
-rw-r--r--chromium/sync/internal_api/README32
-rw-r--r--chromium/sync/internal_api/base_node.cc361
-rw-r--r--chromium/sync/internal_api/base_transaction.cc32
-rw-r--r--chromium/sync/internal_api/change_record.cc67
-rw-r--r--chromium/sync/internal_api/change_reorder_buffer.cc217
-rw-r--r--chromium/sync/internal_api/change_reorder_buffer.h91
-rw-r--r--chromium/sync/internal_api/debug_info_event_listener.cc270
-rw-r--r--chromium/sync/internal_api/debug_info_event_listener.h122
-rw-r--r--chromium/sync/internal_api/debug_info_event_listener_unittest.cc61
-rw-r--r--chromium/sync/internal_api/delete_journal.cc58
-rw-r--r--chromium/sync/internal_api/http_bridge.cc392
-rw-r--r--chromium/sync/internal_api/http_bridge_network_resources.cc29
-rw-r--r--chromium/sync/internal_api/http_bridge_unittest.cc520
-rw-r--r--chromium/sync/internal_api/internal_components_factory_impl.cc73
-rw-r--r--chromium/sync/internal_api/js_mutation_event_observer.cc111
-rw-r--r--chromium/sync/internal_api/js_mutation_event_observer.h71
-rw-r--r--chromium/sync/internal_api/js_mutation_event_observer_unittest.cc118
-rw-r--r--chromium/sync/internal_api/js_sync_encryption_handler_observer.cc128
-rw-r--r--chromium/sync/internal_api/js_sync_encryption_handler_observer.h64
-rw-r--r--chromium/sync/internal_api/js_sync_encryption_handler_observer_unittest.cc173
-rw-r--r--chromium/sync/internal_api/js_sync_manager_observer.cc99
-rw-r--r--chromium/sync/internal_api/js_sync_manager_observer.h58
-rw-r--r--chromium/sync/internal_api/js_sync_manager_observer_unittest.cc133
-rw-r--r--chromium/sync/internal_api/public/DEPS10
-rw-r--r--chromium/sync/internal_api/public/base/DEPS12
-rw-r--r--chromium/sync/internal_api/public/base/ack_handle.cc67
-rw-r--r--chromium/sync/internal_api/public/base/ack_handle.h47
-rw-r--r--chromium/sync/internal_api/public/base/cancelation_observer.cc13
-rw-r--r--chromium/sync/internal_api/public/base/cancelation_observer.h25
-rw-r--r--chromium/sync/internal_api/public/base/cancelation_signal.cc52
-rw-r--r--chromium/sync/internal_api/public/base/cancelation_signal.h72
-rw-r--r--chromium/sync/internal_api/public/base/cancelation_signal_unittest.cc169
-rw-r--r--chromium/sync/internal_api/public/base/enum_set.h285
-rw-r--r--chromium/sync/internal_api/public/base/enum_set_unittest.cc195
-rw-r--r--chromium/sync/internal_api/public/base/invalidation.cc182
-rw-r--r--chromium/sync/internal_api/public/base/invalidation.h125
-rw-r--r--chromium/sync/internal_api/public/base/invalidation_test_util.cc121
-rw-r--r--chromium/sync/internal_api/public/base/invalidation_test_util.h26
-rw-r--r--chromium/sync/internal_api/public/base/model_type.h312
-rw-r--r--chromium/sync/internal_api/public/base/model_type_test_util.cc65
-rw-r--r--chromium/sync/internal_api/public/base/model_type_test_util.h34
-rw-r--r--chromium/sync/internal_api/public/base/node_ordinal.cc47
-rw-r--r--chromium/sync/internal_api/public/base/node_ordinal.h56
-rw-r--r--chromium/sync/internal_api/public/base/node_ordinal_unittest.cc125
-rw-r--r--chromium/sync/internal_api/public/base/object_id_invalidation_map_test_util.cc128
-rw-r--r--chromium/sync/internal_api/public/base/object_id_invalidation_map_test_util.h20
-rw-r--r--chromium/sync/internal_api/public/base/ordinal.h486
-rw-r--r--chromium/sync/internal_api/public/base/ordinal_unittest.cc376
-rw-r--r--chromium/sync/internal_api/public/base/progress_marker_map.cc26
-rw-r--r--chromium/sync/internal_api/public/base/progress_marker_map.h35
-rw-r--r--chromium/sync/internal_api/public/base/unique_position.cc615
-rw-r--r--chromium/sync/internal_api/public/base/unique_position.h140
-rw-r--r--chromium/sync/internal_api/public/base/unique_position_unittest.cc680
-rw-r--r--chromium/sync/internal_api/public/base_node.h275
-rw-r--r--chromium/sync/internal_api/public/base_transaction.h60
-rw-r--r--chromium/sync/internal_api/public/change_record.h68
-rw-r--r--chromium/sync/internal_api/public/change_record_unittest.cc137
-rw-r--r--chromium/sync/internal_api/public/configure_reason.h39
-rw-r--r--chromium/sync/internal_api/public/data_type_association_stats.cc28
-rw-r--r--chromium/sync/internal_api/public/data_type_association_stats.h52
-rw-r--r--chromium/sync/internal_api/public/data_type_debug_info_listener.cc15
-rw-r--r--chromium/sync/internal_api/public/data_type_debug_info_listener.h50
-rw-r--r--chromium/sync/internal_api/public/delete_journal.h44
-rw-r--r--chromium/sync/internal_api/public/engine/model_safe_worker.cc175
-rw-r--r--chromium/sync/internal_api/public/engine/model_safe_worker.h160
-rw-r--r--chromium/sync/internal_api/public/engine/model_safe_worker_unittest.cc52
-rw-r--r--chromium/sync/internal_api/public/engine/passive_model_worker.cc36
-rw-r--r--chromium/sync/internal_api/public/engine/passive_model_worker.h42
-rw-r--r--chromium/sync/internal_api/public/engine/polling_constants.cc40
-rw-r--r--chromium/sync/internal_api/public/engine/polling_constants.h25
-rw-r--r--chromium/sync/internal_api/public/engine/sync_status.cc44
-rw-r--r--chromium/sync/internal_api/public/engine/sync_status.h112
-rw-r--r--chromium/sync/internal_api/public/http_bridge.h269
-rw-r--r--chromium/sync/internal_api/public/http_bridge_network_resources.h35
-rw-r--r--chromium/sync/internal_api/public/http_post_provider_factory.h39
-rw-r--r--chromium/sync/internal_api/public/http_post_provider_interface.h64
-rw-r--r--chromium/sync/internal_api/public/internal_components_factory.h104
-rw-r--r--chromium/sync/internal_api/public/internal_components_factory_impl.h51
-rw-r--r--chromium/sync/internal_api/public/network_resources.h33
-rw-r--r--chromium/sync/internal_api/public/network_time_update_callback.h28
-rw-r--r--chromium/sync/internal_api/public/read_node.h66
-rw-r--r--chromium/sync/internal_api/public/read_transaction.h52
-rw-r--r--chromium/sync/internal_api/public/sessions/model_neutral_state.cc40
-rw-r--r--chromium/sync/internal_api/public/sessions/model_neutral_state.h75
-rw-r--r--chromium/sync/internal_api/public/sessions/sync_session_snapshot.cc171
-rw-r--r--chromium/sync/internal_api/public/sessions/sync_session_snapshot.h96
-rw-r--r--chromium/sync/internal_api/public/sessions/sync_session_snapshot_unittest.cc90
-rw-r--r--chromium/sync/internal_api/public/sync_encryption_handler.cc23
-rw-r--r--chromium/sync/internal_api/public/sync_encryption_handler.h177
-rw-r--r--chromium/sync/internal_api/public/sync_manager.cc19
-rw-r--r--chromium/sync/internal_api/public/sync_manager.h425
-rw-r--r--chromium/sync/internal_api/public/sync_manager_factory.h31
-rw-r--r--chromium/sync/internal_api/public/user_share.h33
-rw-r--r--chromium/sync/internal_api/public/util/experiments.h35
-rw-r--r--chromium/sync/internal_api/public/util/immutable.h260
-rw-r--r--chromium/sync/internal_api/public/util/immutable_unittest.cc250
-rw-r--r--chromium/sync/internal_api/public/util/report_unrecoverable_error_function.h18
-rw-r--r--chromium/sync/internal_api/public/util/sync_string_conversions.cc57
-rw-r--r--chromium/sync/internal_api/public/util/sync_string_conversions.h25
-rw-r--r--chromium/sync/internal_api/public/util/syncer_error.cc44
-rw-r--r--chromium/sync/internal_api/public/util/syncer_error.h47
-rw-r--r--chromium/sync/internal_api/public/util/unrecoverable_error_handler.h27
-rw-r--r--chromium/sync/internal_api/public/util/unrecoverable_error_info.cc44
-rw-r--r--chromium/sync/internal_api/public/util/unrecoverable_error_info.h40
-rw-r--r--chromium/sync/internal_api/public/util/weak_handle.cc36
-rw-r--r--chromium/sync/internal_api/public/util/weak_handle.h379
-rw-r--r--chromium/sync/internal_api/public/util/weak_handle_unittest.cc326
-rw-r--r--chromium/sync/internal_api/public/write_node.h215
-rw-r--r--chromium/sync/internal_api/public/write_transaction.h64
-rw-r--r--chromium/sync/internal_api/read_node.cc97
-rw-r--r--chromium/sync/internal_api/read_transaction.cc43
-rw-r--r--chromium/sync/internal_api/sync_encryption_handler_impl.cc1651
-rw-r--r--chromium/sync/internal_api/sync_encryption_handler_impl.h315
-rw-r--r--chromium/sync/internal_api/sync_encryption_handler_impl_unittest.cc2282
-rw-r--r--chromium/sync/internal_api/sync_manager_factory.cc22
-rw-r--r--chromium/sync/internal_api/sync_manager_impl.cc1347
-rw-r--r--chromium/sync/internal_api/sync_manager_impl.h379
-rw-r--r--chromium/sync/internal_api/sync_manager_impl_unittest.cc3520
-rw-r--r--chromium/sync/internal_api/syncapi_internal.cc85
-rw-r--r--chromium/sync/internal_api/syncapi_internal.h38
-rw-r--r--chromium/sync/internal_api/syncapi_server_connection_manager.cc104
-rw-r--r--chromium/sync/internal_api/syncapi_server_connection_manager.h79
-rw-r--r--chromium/sync/internal_api/syncapi_server_connection_manager_unittest.cc134
-rw-r--r--chromium/sync/internal_api/user_share.cc15
-rw-r--r--chromium/sync/internal_api/write_node.cc522
-rw-r--r--chromium/sync/internal_api/write_transaction.cc39
-rw-r--r--chromium/sync/js/DEPS5
-rw-r--r--chromium/sync/js/README.js48
-rw-r--r--chromium/sync/js/js_arg_list.cc27
-rw-r--r--chromium/sync/js/js_arg_list.h44
-rw-r--r--chromium/sync/js/js_arg_list_unittest.cc40
-rw-r--r--chromium/sync/js/js_backend.h42
-rw-r--r--chromium/sync/js/js_controller.h51
-rw-r--r--chromium/sync/js/js_event_details.cc28
-rw-r--r--chromium/sync/js/js_event_details.h46
-rw-r--r--chromium/sync/js/js_event_details_unittest.cc36
-rw-r--r--chromium/sync/js/js_event_handler.h31
-rw-r--r--chromium/sync/js/js_reply_handler.h29
-rw-r--r--chromium/sync/js/js_test_util.cc137
-rw-r--r--chromium/sync/js/js_test_util.h108
-rw-r--r--chromium/sync/js/sync_js_controller.cc83
-rw-r--r--chromium/sync/js/sync_js_controller.h81
-rw-r--r--chromium/sync/js/sync_js_controller_unittest.cc147
-rw-r--r--chromium/sync/notifier/DEPS19
-rw-r--r--chromium/sync/notifier/ack_handler.cc15
-rw-r--r--chromium/sync/notifier/ack_handler.h42
-rw-r--r--chromium/sync/notifier/dropped_invalidation_tracker.cc42
-rw-r--r--chromium/sync/notifier/dropped_invalidation_tracker.h67
-rw-r--r--chromium/sync/notifier/fake_invalidation_handler.cc38
-rw-r--r--chromium/sync/notifier/fake_invalidation_handler.h41
-rw-r--r--chromium/sync/notifier/fake_invalidation_state_tracker.cc55
-rw-r--r--chromium/sync/notifier/fake_invalidation_state_tracker.h42
-rw-r--r--chromium/sync/notifier/fake_invalidator.cc64
-rw-r--r--chromium/sync/notifier/fake_invalidator.h49
-rw-r--r--chromium/sync/notifier/fake_invalidator_unittest.cc63
-rw-r--r--chromium/sync/notifier/invalidation_handler.h32
-rw-r--r--chromium/sync/notifier/invalidation_notifier.cc87
-rw-r--r--chromium/sync/notifier/invalidation_notifier.h105
-rw-r--r--chromium/sync/notifier/invalidation_notifier_unittest.cc87
-rw-r--r--chromium/sync/notifier/invalidation_state_tracker.h65
-rw-r--r--chromium/sync/notifier/invalidation_util.cc131
-rw-r--r--chromium/sync/notifier/invalidation_util.h73
-rw-r--r--chromium/sync/notifier/invalidator.h84
-rw-r--r--chromium/sync/notifier/invalidator_registrar.cc137
-rw-r--r--chromium/sync/notifier/invalidator_registrar.h92
-rw-r--r--chromium/sync/notifier/invalidator_registrar_unittest.cc158
-rw-r--r--chromium/sync/notifier/invalidator_state.cc55
-rw-r--r--chromium/sync/notifier/invalidator_state.h38
-rw-r--r--chromium/sync/notifier/invalidator_test_template.cc28
-rw-r--r--chromium/sync/notifier/invalidator_test_template.h377
-rw-r--r--chromium/sync/notifier/mock_ack_handler.cc85
-rw-r--r--chromium/sync/notifier/mock_ack_handler.h64
-rw-r--r--chromium/sync/notifier/non_blocking_invalidator.cc219
-rw-r--r--chromium/sync/notifier/non_blocking_invalidator.h80
-rw-r--r--chromium/sync/notifier/non_blocking_invalidator_unittest.cc100
-rw-r--r--chromium/sync/notifier/object_id_invalidation_map.cc121
-rw-r--r--chromium/sync/notifier/object_id_invalidation_map.h76
-rw-r--r--chromium/sync/notifier/object_id_invalidation_map_unittest.cc104
-rw-r--r--chromium/sync/notifier/p2p_invalidator.cc291
-rw-r--r--chromium/sync/notifier/p2p_invalidator.h147
-rw-r--r--chromium/sync/notifier/p2p_invalidator_unittest.cc355
-rw-r--r--chromium/sync/notifier/push_client_channel.cc65
-rw-r--r--chromium/sync/notifier/push_client_channel.h58
-rw-r--r--chromium/sync/notifier/push_client_channel_unittest.cc118
-rw-r--r--chromium/sync/notifier/registration_manager.cc305
-rw-r--r--chromium/sync/notifier/registration_manager.h187
-rw-r--r--chromium/sync/notifier/registration_manager_unittest.cc433
-rw-r--r--chromium/sync/notifier/single_object_invalidation_set.cc111
-rw-r--r--chromium/sync/notifier/single_object_invalidation_set.h63
-rw-r--r--chromium/sync/notifier/single_object_invalidation_set_unittest.cc110
-rw-r--r--chromium/sync/notifier/state_writer.h25
-rw-r--r--chromium/sync/notifier/sync_invalidation_listener.cc420
-rw-r--r--chromium/sync/notifier/sync_invalidation_listener.h191
-rw-r--r--chromium/sync/notifier/sync_invalidation_listener_unittest.cc1127
-rw-r--r--chromium/sync/notifier/sync_system_resources.cc383
-rw-r--r--chromium/sync/notifier/sync_system_resources.h243
-rw-r--r--chromium/sync/notifier/sync_system_resources_unittest.cc403
-rw-r--r--chromium/sync/notifier/unacked_invalidation_set.cc204
-rw-r--r--chromium/sync/notifier/unacked_invalidation_set.h117
-rw-r--r--chromium/sync/notifier/unacked_invalidation_set_test_util.cc181
-rw-r--r--chromium/sync/notifier/unacked_invalidation_set_test_util.h25
-rw-r--r--chromium/sync/notifier/unacked_invalidation_set_unittest.cc219
-rw-r--r--chromium/sync/protocol/DEPS4
-rw-r--r--chromium/sync/protocol/app_list_specifics.proto49
-rw-r--r--chromium/sync/protocol/app_notification_specifics.proto44
-rw-r--r--chromium/sync/protocol/app_setting_specifics.proto25
-rw-r--r--chromium/sync/protocol/app_specifics.proto60
-rw-r--r--chromium/sync/protocol/article_specifics.proto29
-rw-r--r--chromium/sync/protocol/autofill_specifics.proto63
-rw-r--r--chromium/sync/protocol/bookmark_specifics.proto34
-rw-r--r--chromium/sync/protocol/client_commands.proto34
-rw-r--r--chromium/sync/protocol/client_debug_info.proto196
-rw-r--r--chromium/sync/protocol/device_info_specifics.proto37
-rw-r--r--chromium/sync/protocol/dictionary_specifics.proto18
-rw-r--r--chromium/sync/protocol/encryption.proto32
-rw-r--r--chromium/sync/protocol/experiments_specifics.proto51
-rw-r--r--chromium/sync/protocol/extension_setting_specifics.proto28
-rw-r--r--chromium/sync/protocol/extension_specifics.proto39
-rw-r--r--chromium/sync/protocol/favicon_image_specifics.proto40
-rw-r--r--chromium/sync/protocol/favicon_tracking_specifics.proto24
-rw-r--r--chromium/sync/protocol/get_updates_caller_info.proto52
-rw-r--r--chromium/sync/protocol/history_delete_directive_specifics.proto48
-rw-r--r--chromium/sync/protocol/managed_user_setting_specifics.proto21
-rw-r--r--chromium/sync/protocol/managed_user_specifics.proto36
-rw-r--r--chromium/sync/protocol/nigori_specifics.proto130
-rw-r--r--chromium/sync/protocol/password_specifics.proto45
-rw-r--r--chromium/sync/protocol/preference_specifics.proto22
-rw-r--r--chromium/sync/protocol/priority_preference_specifics.proto22
-rw-r--r--chromium/sync/protocol/proto_enum_conversions.cc252
-rw-r--r--chromium/sync/protocol/proto_enum_conversions.h65
-rw-r--r--chromium/sync/protocol/proto_enum_conversions_unittest.cc95
-rw-r--r--chromium/sync/protocol/proto_value_conversions.cc1028
-rw-r--r--chromium/sync/protocol/proto_value_conversions.h288
-rw-r--r--chromium/sync/protocol/proto_value_conversions_unittest.cc360
-rw-r--r--chromium/sync/protocol/search_engine_specifics.proto82
-rw-r--r--chromium/sync/protocol/session_specifics.proto137
-rw-r--r--chromium/sync/protocol/sync.proto895
-rw-r--r--chromium/sync/protocol/sync_enums.proto148
-rw-r--r--chromium/sync/protocol/sync_protocol_error.cc65
-rw-r--r--chromium/sync/protocol/sync_protocol_error.h88
-rw-r--r--chromium/sync/protocol/synced_notification_data.proto130
-rw-r--r--chromium/sync/protocol/synced_notification_render.proto178
-rw-r--r--chromium/sync/protocol/synced_notification_specifics.proto24
-rw-r--r--chromium/sync/protocol/test.proto21
-rw-r--r--chromium/sync/protocol/theme_specifics.proto39
-rw-r--r--chromium/sync/protocol/typed_url_specifics.proto41
-rw-r--r--chromium/sync/protocol/unique_position.proto80
-rw-r--r--chromium/sync/sessions/DEPS12
-rw-r--r--chromium/sync/sessions/data_type_tracker.cc157
-rw-r--r--chromium/sync/sessions/data_type_tracker.h126
-rw-r--r--chromium/sync/sessions/debug_info_getter.h31
-rw-r--r--chromium/sync/sessions/nudge_tracker.cc219
-rw-r--r--chromium/sync/sessions/nudge_tracker.h139
-rw-r--r--chromium/sync/sessions/nudge_tracker_unittest.cc467
-rw-r--r--chromium/sync/sessions/status_controller.cc135
-rw-r--r--chromium/sync/sessions/status_controller.h116
-rw-r--r--chromium/sync/sessions/status_controller_unittest.cc46
-rw-r--r--chromium/sync/sessions/sync_session.cc89
-rw-r--r--chromium/sync/sessions/sync_session.h133
-rw-r--r--chromium/sync/sessions/sync_session_context.cc79
-rw-r--r--chromium/sync/sessions/sync_session_context.h227
-rw-r--r--chromium/sync/sessions/sync_session_unittest.cc150
-rw-r--r--chromium/sync/sessions/test_util.cc134
-rw-r--r--chromium/sync/sessions/test_util.h97
-rw-r--r--chromium/sync/sync.gyp1
-rw-r--r--chromium/sync/sync_android.gypi1
-rw-r--r--chromium/sync/sync_api.gypi30
-rw-r--r--chromium/sync/sync_core.gypi65
-rw-r--r--chromium/sync/sync_internal_api.gypi55
-rw-r--r--chromium/sync/sync_notifier.gypi18
-rw-r--r--chromium/sync/sync_proto.gypi5
-rw-r--r--chromium/sync/sync_tests.gypi189
-rw-r--r--chromium/sync/syncable/DEPS11
-rw-r--r--chromium/sync/syncable/blob.h20
-rw-r--r--chromium/sync/syncable/dir_open_result.h24
-rw-r--r--chromium/sync/syncable/directory.cc1296
-rw-r--r--chromium/sync/syncable/directory.h557
-rw-r--r--chromium/sync/syncable/directory_backing_store.cc1504
-rw-r--r--chromium/sync/syncable/directory_backing_store.h194
-rw-r--r--chromium/sync/syncable/directory_backing_store_unittest.cc3502
-rw-r--r--chromium/sync/syncable/directory_change_delegate.h51
-rw-r--r--chromium/sync/syncable/entry.cc169
-rw-r--r--chromium/sync/syncable/entry.h269
-rw-r--r--chromium/sync/syncable/entry_kernel.cc216
-rw-r--r--chromium/sync/syncable/entry_kernel.h358
-rw-r--r--chromium/sync/syncable/in_memory_directory_backing_store.cc50
-rw-r--r--chromium/sync/syncable/in_memory_directory_backing_store.h45
-rw-r--r--chromium/sync/syncable/invalid_directory_backing_store.cc25
-rw-r--r--chromium/sync/syncable/invalid_directory_backing_store.h31
-rw-r--r--chromium/sync/syncable/metahandle_set.h20
-rw-r--r--chromium/sync/syncable/model_neutral_mutable_entry.cc381
-rw-r--r--chromium/sync/syncable/model_neutral_mutable_entry.h116
-rw-r--r--chromium/sync/syncable/model_type.cc995
-rw-r--r--chromium/sync/syncable/model_type_unittest.cc108
-rw-r--r--chromium/sync/syncable/mutable_entry.cc247
-rw-r--r--chromium/sync/syncable/mutable_entry.h77
-rw-r--r--chromium/sync/syncable/nigori_handler.cc14
-rw-r--r--chromium/sync/syncable/nigori_handler.h64
-rw-r--r--chromium/sync/syncable/nigori_util.cc322
-rw-r--r--chromium/sync/syncable/nigori_util.h85
-rw-r--r--chromium/sync/syncable/nigori_util_unittest.cc50
-rw-r--r--chromium/sync/syncable/on_disk_directory_backing_store.cc117
-rw-r--r--chromium/sync/syncable/on_disk_directory_backing_store.h51
-rw-r--r--chromium/sync/syncable/parent_child_index.cc115
-rw-r--r--chromium/sync/syncable/parent_child_index.h66
-rw-r--r--chromium/sync/syncable/parent_child_index_unittest.cc344
-rw-r--r--chromium/sync/syncable/scoped_kernel_lock.cc19
-rw-r--r--chromium/sync/syncable/scoped_kernel_lock.h29
-rw-r--r--chromium/sync/syncable/scoped_parent_child_index_updater.cc28
-rw-r--r--chromium/sync/syncable/scoped_parent_child_index_updater.h37
-rw-r--r--chromium/sync/syncable/syncable-inl.h25
-rw-r--r--chromium/sync/syncable/syncable_base_transaction.cc79
-rw-r--r--chromium/sync/syncable/syncable_base_transaction.h85
-rw-r--r--chromium/sync/syncable/syncable_base_write_transaction.cc22
-rw-r--r--chromium/sync/syncable/syncable_base_write_transaction.h35
-rw-r--r--chromium/sync/syncable/syncable_changes_version.h31
-rw-r--r--chromium/sync/syncable/syncable_columns.h78
-rw-r--r--chromium/sync/syncable/syncable_delete_journal.cc143
-rw-r--r--chromium/sync/syncable/syncable_delete_journal.h104
-rw-r--r--chromium/sync/syncable/syncable_enum_conversions.cc178
-rw-r--r--chromium/sync/syncable/syncable_enum_conversions.h52
-rw-r--r--chromium/sync/syncable/syncable_enum_conversions_unittest.cc93
-rw-r--r--chromium/sync/syncable/syncable_id.cc72
-rw-r--r--chromium/sync/syncable/syncable_id.h132
-rw-r--r--chromium/sync/syncable/syncable_id_unittest.cc96
-rw-r--r--chromium/sync/syncable/syncable_model_neutral_write_transaction.cc33
-rw-r--r--chromium/sync/syncable/syncable_model_neutral_write_transaction.h44
-rw-r--r--chromium/sync/syncable/syncable_proto_util.cc32
-rw-r--r--chromium/sync/syncable/syncable_proto_util.h38
-rw-r--r--chromium/sync/syncable/syncable_read_transaction.cc22
-rw-r--r--chromium/sync/syncable/syncable_read_transaction.h33
-rw-r--r--chromium/sync/syncable/syncable_unittest.cc2303
-rw-r--r--chromium/sync/syncable/syncable_util.cc116
-rw-r--r--chromium/sync/syncable/syncable_util.h58
-rw-r--r--chromium/sync/syncable/syncable_util_unittest.cc32
-rw-r--r--chromium/sync/syncable/syncable_write_transaction.cc188
-rw-r--r--chromium/sync/syncable/syncable_write_transaction.h68
-rw-r--r--chromium/sync/syncable/transaction_observer.h27
-rw-r--r--chromium/sync/syncable/write_transaction_info.cc48
-rw-r--r--chromium/sync/syncable/write_transaction_info.h41
-rw-r--r--chromium/sync/tools/DEPS9
-rw-r--r--chromium/sync/tools/null_invalidation_state_tracker.cc59
-rw-r--r--chromium/sync/tools/null_invalidation_state_tracker.h36
-rw-r--r--chromium/sync/tools/sync_client.cc396
-rw-r--r--chromium/sync/tools/sync_listen_notifications.cc213
-rw-r--r--chromium/sync/tools/sync_tools.gyp77
-rw-r--r--chromium/sync/tools/testserver/DEPS3
-rw-r--r--chromium/sync/tools/testserver/OWNERS3
-rw-r--r--chromium/sync/tools/testserver/chromiumsync.py1643
-rwxr-xr-xchromium/sync/tools/testserver/chromiumsync_test.py680
-rw-r--r--chromium/sync/tools/testserver/run_sync_testserver.cc120
-rwxr-xr-xchromium/sync/tools/testserver/sync_testserver.py614
-rw-r--r--chromium/sync/tools/testserver/synced_notifications.html51
-rw-r--r--chromium/sync/tools/testserver/xmppserver.py603
-rwxr-xr-xchromium/sync/tools/testserver/xmppserver_test.py421
-rw-r--r--chromium/sync/util/DEPS13
-rw-r--r--chromium/sync/util/cryptographer.cc361
-rw-r--r--chromium/sync/util/cryptographer.h217
-rw-r--r--chromium/sync/util/cryptographer_unittest.cc204
-rw-r--r--chromium/sync/util/data_type_histogram.h129
-rw-r--r--chromium/sync/util/data_type_histogram_unittest.cc60
-rw-r--r--chromium/sync/util/encryptor.h27
-rw-r--r--chromium/sync/util/extensions_activity.cc39
-rw-r--r--chromium/sync/util/extensions_activity.h64
-rw-r--r--chromium/sync/util/get_session_name.cc88
-rw-r--r--chromium/sync/util/get_session_name.h28
-rw-r--r--chromium/sync/util/get_session_name_ios.h18
-rw-r--r--chromium/sync/util/get_session_name_ios.mm19
-rw-r--r--chromium/sync/util/get_session_name_linux.cc24
-rw-r--r--chromium/sync/util/get_session_name_linux.h19
-rw-r--r--chromium/sync/util/get_session_name_mac.h22
-rw-r--r--chromium/sync/util/get_session_name_mac.mm48
-rw-r--r--chromium/sync/util/get_session_name_unittest.cc76
-rw-r--r--chromium/sync/util/get_session_name_win.cc21
-rw-r--r--chromium/sync/util/get_session_name_win.h18
-rw-r--r--chromium/sync/util/logging.cc18
-rw-r--r--chromium/sync/util/logging.h34
-rw-r--r--chromium/sync/util/nigori.cc250
-rw-r--r--chromium/sync/util/nigori.h86
-rw-r--r--chromium/sync/util/nigori_unittest.cc170
-rw-r--r--chromium/sync/util/protobuf_unittest.cc35
-rw-r--r--chromium/sync/util/test_unrecoverable_error_handler.cc23
-rw-r--r--chromium/sync/util/test_unrecoverable_error_handler.h27
-rw-r--r--chromium/sync/util/time.cc24
-rw-r--r--chromium/sync/util/time.h29
477 files changed, 303 insertions, 83452 deletions
diff --git a/chromium/sync/DEPS b/chromium/sync/DEPS
deleted file mode 100644
index bbe2669b605..00000000000
--- a/chromium/sync/DEPS
+++ /dev/null
@@ -1,14 +0,0 @@
-include_rules = [
- # Repeat these from the top-level DEPS file so one can just run
- #
- # checkdeps.py sync
- #
- # to test.
- "+base",
- "+build",
- "+google_apis",
- "+testing",
-
- # Force subdirectories to explicitly define DEPS.
- "-sync",
-]
diff --git a/chromium/sync/OWNERS b/chromium/sync/OWNERS
deleted file mode 100644
index 99e1af22386..00000000000
--- a/chromium/sync/OWNERS
+++ /dev/null
@@ -1,11 +0,0 @@
-akalin@chromium.org
-atwilson@chromium.org
-lipalani@chromium.org
-nick@chromium.org
-rlarocque@chromium.org
-rsimha@chromium.org
-tim@chromium.org
-zea@chromium.org
-
-per-file sync_android.gypi=nyquist@chromium.org
-per-file sync_android.gypi=yfriedman@chromium.org
diff --git a/chromium/sync/PRESUBMIT.py b/chromium/sync/PRESUBMIT.py
deleted file mode 100644
index 8768cb07359..00000000000
--- a/chromium/sync/PRESUBMIT.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Chromium presubmit script for src/sync.
-
-See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
-for more details on the presubmit API built into gcl.
-"""
-
-def GetPreferredTrySlaves():
- return [
- 'linux_rel:sync_integration_tests',
- 'mac_rel:sync_integration_tests',
- 'win_rel:sync_integration_tests',
- ]
diff --git a/chromium/sync/api/DEPS b/chromium/sync/api/DEPS
deleted file mode 100644
index c526c086c4c..00000000000
--- a/chromium/sync/api/DEPS
+++ /dev/null
@@ -1,7 +0,0 @@
-include_rules = [
- "+sync/base",
- "+sync/internal_api/base_node.h",
- "+sync/internal_api/public",
- "+sync/protocol",
- "+sync/util",
-]
diff --git a/chromium/sync/api/fake_syncable_service.cc b/chromium/sync/api/fake_syncable_service.cc
deleted file mode 100644
index 0d76f2ef077..00000000000
--- a/chromium/sync/api/fake_syncable_service.cc
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/api/fake_syncable_service.h"
-
-#include "base/location.h"
-#include "sync/api/sync_error_factory.h"
-
-namespace syncer {
-
-FakeSyncableService::FakeSyncableService()
- : syncing_(false),
- type_(UNSPECIFIED) {}
-
-FakeSyncableService::~FakeSyncableService() {}
-
-void FakeSyncableService::set_merge_data_and_start_syncing_error(
- const SyncError& error) {
- merge_data_and_start_syncing_error_ = error;
-}
-
-void FakeSyncableService::set_process_sync_changes_error(
- const SyncError& error) {
- process_sync_changes_error_ = error;
-}
-
-bool FakeSyncableService::syncing() const {
- return syncing_;
-}
-
-// SyncableService implementation.
-SyncMergeResult FakeSyncableService::MergeDataAndStartSyncing(
- ModelType type,
- const SyncDataList& initial_sync_data,
- scoped_ptr<SyncChangeProcessor> sync_processor,
- scoped_ptr<SyncErrorFactory> sync_error_factory) {
- SyncMergeResult merge_result(type);
- sync_processor_ = sync_processor.Pass();
- type_ = type;
- if (!merge_data_and_start_syncing_error_.IsSet()) {
- syncing_ = true;
- } else {
- merge_result.set_error(merge_data_and_start_syncing_error_);
- }
- return merge_result;
-}
-
-void FakeSyncableService::StopSyncing(ModelType type) {
- syncing_ = false;
- sync_processor_.reset();
-}
-
-SyncDataList FakeSyncableService::GetAllSyncData(ModelType type) const {
- return SyncDataList();
-}
-
-SyncError FakeSyncableService::ProcessSyncChanges(
- const tracked_objects::Location& from_here,
- const SyncChangeList& change_list) {
- return process_sync_changes_error_;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/api/fake_syncable_service.h b/chromium/sync/api/fake_syncable_service.h
deleted file mode 100644
index 3d77a80ef6f..00000000000
--- a/chromium/sync/api/fake_syncable_service.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_API_FAKE_SYNCABLE_SERVICE_H_
-#define SYNC_API_FAKE_SYNCABLE_SERVICE_H_
-
-#include "sync/api/syncable_service.h"
-
-namespace syncer {
-
-class SyncErrorFactory;
-
-// A fake SyncableService that can return arbitrary values and maintains the
-// syncing status.
-class FakeSyncableService : public SyncableService {
- public:
- FakeSyncableService();
- virtual ~FakeSyncableService();
-
- // Setters for SyncableService implementation results.
- void set_merge_data_and_start_syncing_error(const SyncError& error);
- void set_process_sync_changes_error(const SyncError& error);
-
- // Whether we're syncing or not. Set on a successful MergeDataAndStartSyncing,
- // unset on StopSyncing. False by default.
- bool syncing() const;
-
- // SyncableService implementation.
- virtual SyncMergeResult MergeDataAndStartSyncing(
- ModelType type,
- const SyncDataList& initial_sync_data,
- scoped_ptr<SyncChangeProcessor> sync_processor,
- scoped_ptr<SyncErrorFactory> sync_error_factory) OVERRIDE;
- virtual void StopSyncing(ModelType type) OVERRIDE;
- virtual SyncDataList GetAllSyncData(ModelType type) const OVERRIDE;
- virtual SyncError ProcessSyncChanges(
- const tracked_objects::Location& from_here,
- const SyncChangeList& change_list) OVERRIDE;
-
- private:
- scoped_ptr<SyncChangeProcessor> sync_processor_;
- SyncError merge_data_and_start_syncing_error_;
- SyncError process_sync_changes_error_;
- bool syncing_;
- ModelType type_;
-};
-
-} // namespace syncer
-
-#endif // SYNC_API_FAKE_SYNCABLE_SERVICE_H_
diff --git a/chromium/sync/api/string_ordinal.h b/chromium/sync/api/string_ordinal.h
deleted file mode 100644
index 8952d47afec..00000000000
--- a/chromium/sync/api/string_ordinal.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_API_STRING_ORDINAL_H_
-#define SYNC_API_STRING_ORDINAL_H_
-
-#include "base/basictypes.h"
-#include "sync/internal_api/public/base/ordinal.h"
-
-namespace syncer {
-
-// A StringOrdinal is an Ordinal with range 'a'-'z' for printability
-// of its internal byte string representation. (Think of
-// StringOrdinal as being short for PrintableStringOrdinal.) It
-// should be used for data types that want to maintain one or more
-// orderings for nodes.
-//
-// Since StringOrdinals contain only printable characters, it is safe
-// to store as a string in a protobuf.
-
-struct StringOrdinalTraits {
- static const uint8 kZeroDigit = 'a';
- static const uint8 kMaxDigit = 'z';
- static const size_t kMinLength = 1;
-};
-
-typedef Ordinal<StringOrdinalTraits> StringOrdinal;
-
-COMPILE_ASSERT(StringOrdinal::kZeroDigit == 'a',
- StringOrdinalHasCorrectZeroDigit);
-COMPILE_ASSERT(StringOrdinal::kOneDigit == 'b',
- StringOrdinalHasCorrectOneDigit);
-COMPILE_ASSERT(StringOrdinal::kMidDigit == 'n',
- StringOrdinalHasCorrectMidDigit);
-COMPILE_ASSERT(StringOrdinal::kMaxDigit == 'z',
- StringOrdinalHasCorrectMaxDigit);
-COMPILE_ASSERT(StringOrdinal::kMidDigitValue == 13,
- StringOrdinalHasCorrectMidDigitValue);
-COMPILE_ASSERT(StringOrdinal::kMaxDigitValue == 25,
- StringOrdinalHasCorrectMaxDigitValue);
-COMPILE_ASSERT(StringOrdinal::kRadix == 26,
- StringOrdinalHasCorrectRadix);
-
-} // namespace syncer
-
-#endif // SYNC_API_STRING_ORDINAL_H_
diff --git a/chromium/sync/api/sync_change.cc b/chromium/sync/api/sync_change.cc
deleted file mode 100644
index 8dedd9cb176..00000000000
--- a/chromium/sync/api/sync_change.cc
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/api/sync_change.h"
-
-#include <ostream>
-
-namespace syncer {
-
-SyncChange::SyncChange() : change_type_(ACTION_INVALID) {
-}
-
-SyncChange::SyncChange(
- const tracked_objects::Location& from_here,
- SyncChangeType change_type,
- const SyncData& sync_data)
- : location_(from_here),
- change_type_(change_type),
- sync_data_(sync_data) {
- DCHECK(IsValid());
-}
-
-SyncChange::~SyncChange() {}
-
-bool SyncChange::IsValid() const {
- if (change_type_ == ACTION_INVALID || !sync_data_.IsValid())
- return false;
-
- // Data from the syncer must always have valid specifics.
- if (!sync_data_.IsLocal())
- return IsRealDataType(sync_data_.GetDataType());
-
- // Local changes must always have a tag and specify a valid datatype.
- if (sync_data_.GetTag().empty() ||
- !IsRealDataType(sync_data_.GetDataType())) {
- return false;
- }
-
- // Adds and updates must have a non-unique-title.
- if (change_type_ == ACTION_ADD || change_type_ == ACTION_UPDATE)
- return (!sync_data_.GetTitle().empty());
-
- return true;
-}
-
-SyncChange::SyncChangeType SyncChange::change_type() const {
- return change_type_;
-}
-
-SyncData SyncChange::sync_data() const {
- return sync_data_;
-}
-
-tracked_objects::Location SyncChange::location() const {
- return location_;
-}
-
-// static
-std::string SyncChange::ChangeTypeToString(SyncChangeType change_type) {
- switch (change_type) {
- case ACTION_INVALID:
- return "ACTION_INVALID";
- case ACTION_ADD:
- return "ACTION_ADD";
- case ACTION_UPDATE:
- return "ACTION_UPDATE";
- case ACTION_DELETE:
- return "ACTION_DELETE";
- default:
- NOTREACHED();
- }
- return std::string();
-}
-
-std::string SyncChange::ToString() const {
- return "{ " + location_.ToString() + ", changeType: " +
- ChangeTypeToString(change_type_) + ", syncData: " +
- sync_data_.ToString() + "}";
-}
-
-void PrintTo(const SyncChange& sync_change, std::ostream* os) {
- *os << sync_change.ToString();
-}
-
-} // namespace syncer
diff --git a/chromium/sync/api/sync_change.h b/chromium/sync/api/sync_change.h
deleted file mode 100644
index 8751fa1ea1b..00000000000
--- a/chromium/sync/api/sync_change.h
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_API_SYNC_CHANGE_H_
-#define SYNC_API_SYNC_CHANGE_H_
-
-#include <iosfwd>
-#include <string>
-#include <vector>
-
-#include "base/location.h"
-#include "sync/api/sync_data.h"
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-
-// A SyncChange object reflects a change to a piece of synced data. The change
-// can be either a delete, add, or an update. All data relevant to the change
-// is encapsulated within the SyncChange, which, once created, is immutable.
-// Note: it is safe and cheap to pass these by value or make copies, as they do
-// not create deep copies of their internal data.
-class SYNC_EXPORT SyncChange {
- public:
- enum SyncChangeType {
- ACTION_INVALID,
- ACTION_ADD,
- ACTION_UPDATE,
- ACTION_DELETE,
- };
-
- // Default constructor creates an invalid change.
- SyncChange();
- // Create a new change with the specified sync data.
- SyncChange(
- const tracked_objects::Location& from_here,
- SyncChangeType change_type,
- const SyncData& sync_data);
- ~SyncChange();
-
- // Copy constructor and assignment operator welcome.
-
- // Whether this change is valid. This must be true before attempting to access
- // the data.
- // Deletes: Requires valid tag when going to the syncer. Requires valid
- // specifics when coming from the syncer.
- // Adds, Updates: Require valid tag and specifics when going to the syncer.
- // Require only valid specifics when coming from the syncer.
- bool IsValid() const;
-
- // Getters.
- SyncChangeType change_type() const;
- SyncData sync_data() const;
- tracked_objects::Location location() const;
-
- // Returns a string representation of |change_type|.
- static std::string ChangeTypeToString(SyncChangeType change_type);
-
- // Returns a string representation of the entire object. Used for gmock
- // printing method, PrintTo.
- std::string ToString() const;
-
- private:
- tracked_objects::Location location_;
-
- SyncChangeType change_type_;
-
- // An immutable container for the data of this SyncChange. Whenever
- // SyncChanges are copied, they copy references to this data.
- SyncData sync_data_;
-};
-
-// gmock printer helper.
-SYNC_EXPORT void PrintTo(const SyncChange& sync_change, std::ostream* os);
-
-} // namespace syncer
-
-#endif // SYNC_API_SYNC_CHANGE_H_
diff --git a/chromium/sync/api/sync_change_processor.cc b/chromium/sync/api/sync_change_processor.cc
deleted file mode 100644
index 4a0607e07f4..00000000000
--- a/chromium/sync/api/sync_change_processor.cc
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/api/sync_change_processor.h"
-
-namespace syncer {
-
-SyncChangeProcessor::SyncChangeProcessor() {}
-
-SyncChangeProcessor::~SyncChangeProcessor() {}
-
-} // namespace syncer
diff --git a/chromium/sync/api/sync_change_processor.h b/chromium/sync/api/sync_change_processor.h
deleted file mode 100644
index 69494f0849c..00000000000
--- a/chromium/sync/api/sync_change_processor.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_API_SYNC_CHANGE_PROCESSOR_H_
-#define SYNC_API_SYNC_CHANGE_PROCESSOR_H_
-
-#include <vector>
-
-#include "sync/api/sync_data.h"
-#include "sync/api/sync_error.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-
-namespace tracked_objects {
-class Location;
-} // namespace tracked_objects
-
-namespace syncer {
-
-class SyncChange;
-
-typedef std::vector<SyncChange> SyncChangeList;
-
-// An interface for services that handle receiving SyncChanges.
-class SYNC_EXPORT SyncChangeProcessor {
- public:
- SyncChangeProcessor();
- virtual ~SyncChangeProcessor();
-
- // Process a list of SyncChanges.
- // Returns: A default SyncError (IsSet() == false) if no errors were
- // encountered, and a filled SyncError (IsSet() == true)
- // otherwise.
- // Inputs:
- // |from_here|: allows tracking of where sync changes originate.
- // |change_list|: is the list of sync changes in need of processing.
- virtual SyncError ProcessSyncChanges(
- const tracked_objects::Location& from_here,
- const SyncChangeList& change_list) = 0;
-
- // Fills a list of SyncData. This should create an up to date representation
- // of all the data known to the ChangeProcessor for |datatype|, and
- // should match/be a subset of the server's view of that datatype.
- //
- // WARNING: This can be a potentially slow & memory intensive operation and
- // should only be used when absolutely necessary / sparingly.
- virtual SyncDataList GetAllSyncData(ModelType type) const = 0;
-};
-
-} // namespace syncer
-
-#endif // SYNC_API_SYNC_CHANGE_PROCESSOR_H_
diff --git a/chromium/sync/api/sync_change_unittest.cc b/chromium/sync/api/sync_change_unittest.cc
deleted file mode 100644
index 8c2a7dd18bc..00000000000
--- a/chromium/sync/api/sync_change_unittest.cc
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/api/sync_change.h"
-
-#include <string>
-
-#include "base/memory/scoped_ptr.h"
-#include "base/time/time.h"
-#include "base/values.h"
-#include "sync/protocol/preference_specifics.pb.h"
-#include "sync/protocol/proto_value_conversions.h"
-#include "sync/protocol/sync.pb.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-// Ordered list of SyncChange's.
-typedef std::vector<SyncChange> SyncChangeList;
-
-namespace {
-
-typedef testing::Test SyncChangeTest;
-
-TEST_F(SyncChangeTest, LocalDelete) {
- SyncChange::SyncChangeType change_type = SyncChange::ACTION_DELETE;
- std::string tag = "client_tag";
- SyncChange e(FROM_HERE,
- change_type,
- SyncData::CreateLocalDelete(tag, PREFERENCES));
- EXPECT_EQ(change_type, e.change_type());
- EXPECT_EQ(tag, e.sync_data().GetTag());
- EXPECT_EQ(PREFERENCES, e.sync_data().GetDataType());
-}
-
-TEST_F(SyncChangeTest, LocalUpdate) {
- SyncChange::SyncChangeType change_type = SyncChange::ACTION_UPDATE;
- sync_pb::EntitySpecifics specifics;
- sync_pb::PreferenceSpecifics* pref_specifics = specifics.mutable_preference();
- pref_specifics->set_name("test");
- std::string tag = "client_tag";
- std::string title = "client_title";
- SyncChange e(FROM_HERE,
- change_type,
- SyncData::CreateLocalData(tag, title, specifics));
- EXPECT_EQ(change_type, e.change_type());
- EXPECT_EQ(tag, e.sync_data().GetTag());
- EXPECT_EQ(title, e.sync_data().GetTitle());
- EXPECT_EQ(PREFERENCES, e.sync_data().GetDataType());
- scoped_ptr<base::DictionaryValue> ref_spec(EntitySpecificsToValue(specifics));
- scoped_ptr<base::DictionaryValue> e_spec(EntitySpecificsToValue(
- e.sync_data().GetSpecifics()));
- EXPECT_TRUE(ref_spec->Equals(e_spec.get()));
-}
-
-TEST_F(SyncChangeTest, LocalAdd) {
- SyncChange::SyncChangeType change_type = SyncChange::ACTION_ADD;
- sync_pb::EntitySpecifics specifics;
- sync_pb::PreferenceSpecifics* pref_specifics = specifics.mutable_preference();
- pref_specifics->set_name("test");
- std::string tag = "client_tag";
- std::string title = "client_title";
- SyncChange e(FROM_HERE,
- change_type,
- SyncData::CreateLocalData(tag, title, specifics));
- EXPECT_EQ(change_type, e.change_type());
- EXPECT_EQ(tag, e.sync_data().GetTag());
- EXPECT_EQ(title, e.sync_data().GetTitle());
- EXPECT_EQ(PREFERENCES, e.sync_data().GetDataType());
- scoped_ptr<base::DictionaryValue> ref_spec(EntitySpecificsToValue(specifics));
- scoped_ptr<base::DictionaryValue> e_spec(EntitySpecificsToValue(
- e.sync_data().GetSpecifics()));
- EXPECT_TRUE(ref_spec->Equals(e_spec.get()));
-}
-
-TEST_F(SyncChangeTest, SyncerChanges) {
- SyncChangeList change_list;
-
- // Create an update.
- sync_pb::EntitySpecifics update_specifics;
- sync_pb::PreferenceSpecifics* pref_specifics =
- update_specifics.mutable_preference();
- pref_specifics->set_name("update");
- change_list.push_back(SyncChange(
- FROM_HERE,
- SyncChange::ACTION_UPDATE,
- SyncData::CreateRemoteData(1, update_specifics, base::Time())));
-
- // Create an add.
- sync_pb::EntitySpecifics add_specifics;
- pref_specifics = add_specifics.mutable_preference();
- pref_specifics->set_name("add");
- change_list.push_back(SyncChange(
- FROM_HERE,
- SyncChange::ACTION_ADD,
- SyncData::CreateRemoteData(2, add_specifics, base::Time())));
-
- // Create a delete.
- sync_pb::EntitySpecifics delete_specifics;
- pref_specifics = delete_specifics.mutable_preference();
- pref_specifics->set_name("add");
- change_list.push_back(SyncChange(
- FROM_HERE,
- SyncChange::ACTION_DELETE,
- SyncData::CreateRemoteData(3, delete_specifics, base::Time())));
-
- ASSERT_EQ(3U, change_list.size());
-
- // Verify update.
- SyncChange e = change_list[0];
- EXPECT_EQ(SyncChange::ACTION_UPDATE, e.change_type());
- EXPECT_EQ(PREFERENCES, e.sync_data().GetDataType());
- scoped_ptr<base::DictionaryValue> ref_spec(EntitySpecificsToValue(
- update_specifics));
- scoped_ptr<base::DictionaryValue> e_spec(EntitySpecificsToValue(
- e.sync_data().GetSpecifics()));
- EXPECT_TRUE(ref_spec->Equals(e_spec.get()));
-
- // Verify add.
- e = change_list[1];
- EXPECT_EQ(SyncChange::ACTION_ADD, e.change_type());
- EXPECT_EQ(PREFERENCES, e.sync_data().GetDataType());
- ref_spec.reset(EntitySpecificsToValue(add_specifics));
- e_spec.reset(EntitySpecificsToValue(e.sync_data().GetSpecifics()));
- EXPECT_TRUE(ref_spec->Equals(e_spec.get()));
-
- // Verify delete.
- e = change_list[2];
- EXPECT_EQ(SyncChange::ACTION_DELETE, e.change_type());
- EXPECT_EQ(PREFERENCES, e.sync_data().GetDataType());
- ref_spec.reset(EntitySpecificsToValue(delete_specifics));
- e_spec.reset(EntitySpecificsToValue(e.sync_data().GetSpecifics()));
- EXPECT_TRUE(ref_spec->Equals(e_spec.get()));
-}
-
-} // namespace
-
-} // namespace syncer
diff --git a/chromium/sync/api/sync_data.cc b/chromium/sync/api/sync_data.cc
deleted file mode 100644
index d52e04ec213..00000000000
--- a/chromium/sync/api/sync_data.cc
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/api/sync_data.h"
-
-#include <ostream>
-
-#include "base/json/json_writer.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/values.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/base_node.h"
-#include "sync/protocol/proto_value_conversions.h"
-#include "sync/protocol/sync.pb.h"
-
-namespace syncer {
-
-void SyncData::ImmutableSyncEntityTraits::InitializeWrapper(
- Wrapper* wrapper) {
- *wrapper = new sync_pb::SyncEntity();
-}
-
-void SyncData::ImmutableSyncEntityTraits::DestroyWrapper(
- Wrapper* wrapper) {
- delete *wrapper;
-}
-
-const sync_pb::SyncEntity& SyncData::ImmutableSyncEntityTraits::Unwrap(
- const Wrapper& wrapper) {
- return *wrapper;
-}
-
-sync_pb::SyncEntity* SyncData::ImmutableSyncEntityTraits::UnwrapMutable(
- Wrapper* wrapper) {
- return *wrapper;
-}
-
-void SyncData::ImmutableSyncEntityTraits::Swap(sync_pb::SyncEntity* t1,
- sync_pb::SyncEntity* t2) {
- t1->Swap(t2);
-}
-
-SyncData::SyncData()
- : is_valid_(false),
- id_(kInvalidId) {}
-
-SyncData::SyncData(int64 id,
- sync_pb::SyncEntity* entity,
- const base::Time& remote_modification_time)
- : is_valid_(true),
- id_(id),
- remote_modification_time_(remote_modification_time),
- immutable_entity_(entity) {}
-
-SyncData::~SyncData() {}
-
-// Static.
-SyncData SyncData::CreateLocalDelete(
- const std::string& sync_tag,
- ModelType datatype) {
- sync_pb::EntitySpecifics specifics;
- AddDefaultFieldValue(datatype, &specifics);
- return CreateLocalData(sync_tag, std::string(), specifics);
-}
-
-// Static.
-SyncData SyncData::CreateLocalData(
- const std::string& sync_tag,
- const std::string& non_unique_title,
- const sync_pb::EntitySpecifics& specifics) {
- sync_pb::SyncEntity entity;
- entity.set_client_defined_unique_tag(sync_tag);
- entity.set_non_unique_name(non_unique_title);
- entity.mutable_specifics()->CopyFrom(specifics);
- return SyncData(kInvalidId, &entity, base::Time());
-}
-
-// Static.
-SyncData SyncData::CreateRemoteData(
- int64 id, const sync_pb::EntitySpecifics& specifics,
- const base::Time& modification_time) {
- DCHECK_NE(id, kInvalidId);
- sync_pb::SyncEntity entity;
- entity.mutable_specifics()->CopyFrom(specifics);
- return SyncData(id, &entity, modification_time);
-}
-
-bool SyncData::IsValid() const {
- return is_valid_;
-}
-
-const sync_pb::EntitySpecifics& SyncData::GetSpecifics() const {
- return immutable_entity_.Get().specifics();
-}
-
-ModelType SyncData::GetDataType() const {
- return GetModelTypeFromSpecifics(GetSpecifics());
-}
-
-const std::string& SyncData::GetTag() const {
- DCHECK(IsLocal());
- return immutable_entity_.Get().client_defined_unique_tag();
-}
-
-const std::string& SyncData::GetTitle() const {
- // TODO(zea): set this for data coming from the syncer too.
- DCHECK(immutable_entity_.Get().has_non_unique_name());
- return immutable_entity_.Get().non_unique_name();
-}
-
-const base::Time& SyncData::GetRemoteModifiedTime() const {
- DCHECK(!IsLocal());
- return remote_modification_time_;
-}
-
-int64 SyncData::GetRemoteId() const {
- DCHECK(!IsLocal());
- return id_;
-}
-
-bool SyncData::IsLocal() const {
- return id_ == kInvalidId;
-}
-
-std::string SyncData::ToString() const {
- if (!IsValid())
- return "<Invalid SyncData>";
-
- std::string type = ModelTypeToString(GetDataType());
- std::string specifics;
- scoped_ptr<base::DictionaryValue> value(
- EntitySpecificsToValue(GetSpecifics()));
- base::JSONWriter::WriteWithOptions(value.get(),
- base::JSONWriter::OPTIONS_PRETTY_PRINT,
- &specifics);
-
- if (IsLocal()) {
- return "{ isLocal: true, type: " + type + ", tag: " + GetTag() +
- ", title: " + GetTitle() + ", specifics: " + specifics + "}";
- }
-
- std::string id = base::Int64ToString(GetRemoteId());
- return "{ isLocal: false, type: " + type + ", specifics: " + specifics +
- ", id: " + id + "}";
-}
-
-void PrintTo(const SyncData& sync_data, std::ostream* os) {
- *os << sync_data.ToString();
-}
-
-} // namespace syncer
diff --git a/chromium/sync/api/sync_data.h b/chromium/sync/api/sync_data.h
deleted file mode 100644
index 43f64f9185b..00000000000
--- a/chromium/sync/api/sync_data.h
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_API_SYNC_DATA_H_
-#define SYNC_API_SYNC_DATA_H_
-
-#include <iosfwd>
-#include <string>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/time/time.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/util/immutable.h"
-
-namespace sync_pb {
-class EntitySpecifics;
-class SyncEntity;
-} // namespace sync_pb
-
-namespace syncer {
-
-// A light-weight container for immutable sync data. Pass-by-value and storage
-// in STL containers are supported and encouraged if helpful.
-class SYNC_EXPORT SyncData {
- public:
- // Creates an empty and invalid SyncData.
- SyncData();
- ~SyncData();
-
- // Default copy and assign welcome.
-
- // Helper methods for creating SyncData objects for local data.
- // The sync tag must be a string unique to this datatype and is used as a node
- // identifier server-side.
- // For deletes: |datatype| must specify the datatype who node is being
- // deleted.
- // For adds/updates: the specifics must be valid and the non-unique title (can
- // be the same as sync tag) must be specfied.
- // Note: the non_unique_title is primarily for debug purposes, and will be
- // overwritten if the datatype is encrypted.
- static SyncData CreateLocalDelete(
- const std::string& sync_tag,
- ModelType datatype);
- static SyncData CreateLocalData(
- const std::string& sync_tag,
- const std::string& non_unique_title,
- const sync_pb::EntitySpecifics& specifics);
-
- // Helper method for creating SyncData objects originating from the syncer.
- static SyncData CreateRemoteData(
- int64 id,
- const sync_pb::EntitySpecifics& specifics,
- const base::Time& last_modified_time);
-
- // Whether this SyncData holds valid data. The only way to have a SyncData
- // without valid data is to use the default constructor.
- bool IsValid() const;
-
- // Return the datatype we're holding information about. Derived from the sync
- // datatype specifics.
- ModelType GetDataType() const;
-
- // Return the current sync datatype specifics.
- const sync_pb::EntitySpecifics& GetSpecifics() const;
-
- // Returns the value of the unique client tag. This is only set for data going
- // TO the syncer, not coming from.
- const std::string& GetTag() const;
-
- // Returns the non unique title (for debugging). Currently only set for data
- // going TO the syncer, not from.
- const std::string& GetTitle() const;
-
- // Returns the last motification time according to the server. This is
- // only valid if IsLocal() is false, and may be null if the SyncData
- // represents a deleted item.
- const base::Time& GetRemoteModifiedTime() const;
-
- // Should only be called by sync code when IsLocal() is false.
- int64 GetRemoteId() const;
-
- // Whether this sync data is for local data or data coming from the syncer.
- bool IsLocal() const;
-
- std::string ToString() const;
-
- // TODO(zea): Query methods for other sync properties: parent, successor, etc.
-
- private:
- // Necessary since we forward-declare sync_pb::SyncEntity; see
- // comments in immutable.h.
- struct ImmutableSyncEntityTraits {
- typedef sync_pb::SyncEntity* Wrapper;
-
- static void InitializeWrapper(Wrapper* wrapper);
-
- static void DestroyWrapper(Wrapper* wrapper);
-
- static const sync_pb::SyncEntity& Unwrap(const Wrapper& wrapper);
-
- static sync_pb::SyncEntity* UnwrapMutable(Wrapper* wrapper);
-
- static void Swap(sync_pb::SyncEntity* t1, sync_pb::SyncEntity* t2);
- };
-
- typedef Immutable<sync_pb::SyncEntity, ImmutableSyncEntityTraits>
- ImmutableSyncEntity;
-
- // Clears |entity|.
- SyncData(int64 id,
- sync_pb::SyncEntity* entity,
- const base::Time& remote_modification_time);
-
- // Whether this SyncData holds valid data.
- bool is_valid_;
-
- // Equal to kInvalidId iff this is local.
- int64 id_;
-
- // This is only valid if IsLocal() is false, and may be null if the
- // SyncData represents a deleted item.
- base::Time remote_modification_time_;
-
- // The actual shared sync entity being held.
- ImmutableSyncEntity immutable_entity_;
-};
-
-// gmock printer helper.
-void PrintTo(const SyncData& sync_data, std::ostream* os);
-
-typedef std::vector<SyncData> SyncDataList;
-
-} // namespace syncer
-
-#endif // SYNC_API_SYNC_DATA_H_
diff --git a/chromium/sync/api/sync_error.cc b/chromium/sync/api/sync_error.cc
deleted file mode 100644
index ee6885cb229..00000000000
--- a/chromium/sync/api/sync_error.cc
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/api/sync_error.h"
-
-#include <ostream>
-
-#include "base/location.h"
-#include "base/logging.h"
-#include "sync/internal_api/public/base/model_type.h"
-
-namespace syncer {
-
-SyncError::SyncError() {
- Clear();
-}
-
-SyncError::SyncError(const tracked_objects::Location& location,
- ErrorType error_type,
- const std::string& custom_message,
- ModelType model_type) {
- std::string type_message;
- switch (error_type) {
- case UNRECOVERABLE_ERROR:
- type_message = "unrecoverable error was encountered: ";
- break;
- case DATATYPE_ERROR:
- type_message = "datatype error was encountered: ";
- break;
- case PERSISTENCE_ERROR:
- type_message = "persistence error was encountered: ";
- break;
- case CRYPTO_ERROR:
- type_message = "cryptographer error was encountered: ";
- break;
- default:
- NOTREACHED();
- type_message = "invalid error: ";
- }
- Init(location, type_message + custom_message, model_type, error_type);
- PrintLogError();
-}
-
-SyncError::SyncError(const SyncError& other) {
- Copy(other);
-}
-
-SyncError::~SyncError() {
-}
-
-SyncError& SyncError::operator=(const SyncError& other) {
- if (this == &other) {
- return *this;
- }
- Copy(other);
- return *this;
-}
-
-void SyncError::Copy(const SyncError& other) {
- if (other.IsSet()) {
- Init(other.location(),
- other.message(),
- other.model_type(),
- other.error_type());
- } else {
- Clear();
- }
-}
-
-void SyncError::Clear() {
- location_.reset();
- message_ = std::string();
- model_type_ = UNSPECIFIED;
- error_type_ = UNSET;
-}
-
-void SyncError::Reset(const tracked_objects::Location& location,
- const std::string& message,
- ModelType model_type) {
- Init(location, message, model_type, DATATYPE_ERROR);
- PrintLogError();
-}
-
-void SyncError::Init(const tracked_objects::Location& location,
- const std::string& message,
- ModelType model_type,
- ErrorType error_type) {
- location_.reset(new tracked_objects::Location(location));
- message_ = message;
- model_type_ = model_type;
- error_type_ = error_type;
-}
-
-bool SyncError::IsSet() const {
- return error_type_ != UNSET;
-}
-
-
-const tracked_objects::Location& SyncError::location() const {
- CHECK(IsSet());
- return *location_;
-}
-
-const std::string& SyncError::message() const {
- CHECK(IsSet());
- return message_;
-}
-
-ModelType SyncError::model_type() const {
- CHECK(IsSet());
- return model_type_;
-}
-
-SyncError::ErrorType SyncError::error_type() const {
- CHECK(IsSet());
- return error_type_;
-}
-
-std::string SyncError::ToString() const {
- if (!IsSet()) {
- return std::string();
- }
- return location_->ToString() + ", " + ModelTypeToString(model_type_) +
- " " + message_;
-}
-
-void SyncError::PrintLogError() const {
- LAZY_STREAM(logging::LogMessage(location_->file_name(),
- location_->line_number(),
- logging::LOG_ERROR).stream(),
- LOG_IS_ON(ERROR))
- << ModelTypeToString(model_type_) << " " << message_;
-}
-
-void PrintTo(const SyncError& sync_error, std::ostream* os) {
- *os << sync_error.ToString();
-}
-
-} // namespace syncer
diff --git a/chromium/sync/api/sync_error.h b/chromium/sync/api/sync_error.h
deleted file mode 100644
index 306ed83fc72..00000000000
--- a/chromium/sync/api/sync_error.h
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_API_SYNC_ERROR_H_
-#define SYNC_API_SYNC_ERROR_H_
-
-#include <iosfwd>
-#include <string>
-
-#include "base/memory/scoped_ptr.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-
-namespace tracked_objects {
-class Location;
-} // namespace tracked_objects
-
-namespace syncer {
-
-// Sync errors are used for debug purposes and handled internally and/or
-// exposed through Chrome's "about:sync" internal page.
-// This class is copy-friendly and thread-safe.
-class SYNC_EXPORT SyncError {
- public:
- // Error types are used to distinguish general datatype errors (which result
- // in the datatype being disabled) from actionable sync errors (which might
- // have more complicated results).
- enum ErrorType {
- UNSET, // No error.
- UNRECOVERABLE_ERROR, // An unrecoverable runtime error was encountered, and
- // sync should be disabled completely.
- DATATYPE_ERROR, // A datatype error was encountered, and the datatype
- // should be disabled.
- PERSISTENCE_ERROR, // A persistence error was detected, and the
- // datataype should be associated after a sync update.
- CRYPTO_ERROR, // A cryptographer error was detected, and the
- // datatype should be associated after it is resolved.
- };
-
- // Default constructor refers to "no error", and IsSet() will return false.
- SyncError();
-
- // Create a new Sync error of type |error_type| triggered by |model_type|
- // from the specified location. IsSet() will return true afterward. Will
- // create and print an error specific message to LOG(ERROR).
- SyncError(const tracked_objects::Location& location,
- ErrorType error_type,
- const std::string& message,
- ModelType model_type);
-
- // Copy and assign via deep copy.
- SyncError(const SyncError& other);
- SyncError& operator=(const SyncError& other);
-
- ~SyncError();
-
- // Reset the current error to a new datatype error. May be called
- // irrespective of whether IsSet() is true. After this is called, IsSet()
- // will return true.
- // Will print the new error to LOG(ERROR).
- void Reset(const tracked_objects::Location& location,
- const std::string& message,
- ModelType type);
-
- // Whether this is a valid error or not.
- bool IsSet() const;
-
- // These must only be called if IsSet() is true.
- const tracked_objects::Location& location() const;
- const std::string& message() const;
- ModelType model_type() const;
- ErrorType error_type() const;
-
- // Returns empty string is IsSet() is false.
- std::string ToString() const;
- private:
- // Print error information to log.
- void PrintLogError() const;
-
- // Make a copy of a SyncError. If other.IsSet() == false, this->IsSet() will
- // now return false.
- void Copy(const SyncError& other);
-
- // Initialize the local error data with the specified error data. After this
- // is called, IsSet() will return true.
- void Init(const tracked_objects::Location& location,
- const std::string& message,
- ModelType model_type,
- ErrorType error_type);
-
- // Reset the error to it's default (unset) values.
- void Clear();
-
- // scoped_ptr is necessary because Location objects aren't assignable.
- scoped_ptr<tracked_objects::Location> location_;
- std::string message_;
- ModelType model_type_;
- ErrorType error_type_;
-};
-
-// gmock printer helper.
-SYNC_EXPORT void PrintTo(const SyncError& sync_error, std::ostream* os);
-
-} // namespace syncer
-
-#endif // SYNC_API_SYNC_ERROR_H_
diff --git a/chromium/sync/api/sync_error_factory.cc b/chromium/sync/api/sync_error_factory.cc
deleted file mode 100644
index 11888fff8a0..00000000000
--- a/chromium/sync/api/sync_error_factory.cc
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/api/sync_error_factory.h"
-
-namespace syncer {
-
-SyncErrorFactory::SyncErrorFactory() {
-}
-
-SyncErrorFactory::~SyncErrorFactory() {
-}
-
-} // namespace syncer
diff --git a/chromium/sync/api/sync_error_factory.h b/chromium/sync/api/sync_error_factory.h
deleted file mode 100644
index 9b306b726c1..00000000000
--- a/chromium/sync/api/sync_error_factory.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_API_SYNC_ERROR_FACTORY_H_
-#define SYNC_API_SYNC_ERROR_FACTORY_H_
-
-#include <string>
-
-#include "base/location.h"
-#include "sync/api/sync_error.h"
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-
-class SYNC_EXPORT SyncErrorFactory {
- public:
- SyncErrorFactory();
- virtual ~SyncErrorFactory();
-
- // Creates a SyncError object and uploads this call stack to breakpad.
- virtual SyncError CreateAndUploadError(
- const tracked_objects::Location& location,
- const std::string& message) = 0;
-};
-
-} // namespace syncer
-
-#endif // SYNC_API_SYNC_ERROR_FACTORY_H_
diff --git a/chromium/sync/api/sync_error_factory_mock.cc b/chromium/sync/api/sync_error_factory_mock.cc
deleted file mode 100644
index 5326bd93816..00000000000
--- a/chromium/sync/api/sync_error_factory_mock.cc
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/api/sync_error_factory_mock.h"
-
-namespace syncer {
-
-SyncErrorFactoryMock::SyncErrorFactoryMock() {
-}
-
-SyncErrorFactoryMock::~SyncErrorFactoryMock() {
-}
-
-} // namespace syncer
diff --git a/chromium/sync/api/sync_error_factory_mock.h b/chromium/sync/api/sync_error_factory_mock.h
deleted file mode 100644
index bd675ee2237..00000000000
--- a/chromium/sync/api/sync_error_factory_mock.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_API_SYNC_ERROR_FACTORY_MOCK_H_
-#define SYNC_API_SYNC_ERROR_FACTORY_MOCK_H_
-
-#include "sync/api/sync_error_factory.h"
-
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace syncer {
-
-class SyncErrorFactoryMock : public SyncErrorFactory {
- public:
- SyncErrorFactoryMock();
- virtual ~SyncErrorFactoryMock();
-
- MOCK_METHOD2(CreateAndUploadError, SyncError(
- const tracked_objects::Location& location,
- const std::string& message));
-};
-
-} // namespace syncer
-
-#endif // SYNC_API_SYNC_ERROR_FACTORY_MOCK_H_
diff --git a/chromium/sync/api/sync_error_unittest.cc b/chromium/sync/api/sync_error_unittest.cc
deleted file mode 100644
index 4505ac78e91..00000000000
--- a/chromium/sync/api/sync_error_unittest.cc
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/api/sync_error.h"
-
-#include <string>
-
-#include "base/location.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-namespace {
-
-using std::string;
-
-typedef testing::Test SyncErrorTest;
-
-TEST_F(SyncErrorTest, Unset) {
- SyncError error;
- EXPECT_FALSE(error.IsSet());
-}
-
-TEST_F(SyncErrorTest, Default) {
- tracked_objects::Location location = FROM_HERE;
- std::string msg = "test";
- ModelType type = PREFERENCES;
- SyncError error(location, SyncError::DATATYPE_ERROR, msg, type);
- ASSERT_TRUE(error.IsSet());
- EXPECT_EQ(location.line_number(), error.location().line_number());
- EXPECT_EQ("datatype error was encountered: " + msg, error.message());
- EXPECT_EQ(type, error.model_type());
-}
-
-TEST_F(SyncErrorTest, Reset) {
- tracked_objects::Location location = FROM_HERE;
- std::string msg = "test";
- ModelType type = PREFERENCES;
-
- SyncError error;
- EXPECT_FALSE(error.IsSet());
-
- error.Reset(location, msg, type);
- ASSERT_TRUE(error.IsSet());
- EXPECT_EQ(location.line_number(), error.location().line_number());
- EXPECT_EQ(msg, error.message());
- EXPECT_EQ(type, error.model_type());
-
- tracked_objects::Location location2 = FROM_HERE;
- std::string msg2 = "test";
- ModelType type2 = PREFERENCES;
- error.Reset(location2, msg2, type2);
- ASSERT_TRUE(error.IsSet());
- EXPECT_EQ(location2.line_number(), error.location().line_number());
- EXPECT_EQ(msg2, error.message());
- EXPECT_EQ(type2, error.model_type());
-}
-
-TEST_F(SyncErrorTest, Copy) {
- tracked_objects::Location location = FROM_HERE;
- std::string msg = "test";
- ModelType type = PREFERENCES;
-
- SyncError error1;
- EXPECT_FALSE(error1.IsSet());
- SyncError error2(error1);
- EXPECT_FALSE(error2.IsSet());
-
- error1.Reset(location, msg, type);
- ASSERT_TRUE(error1.IsSet());
- EXPECT_EQ(location.line_number(), error1.location().line_number());
- EXPECT_EQ(msg, error1.message());
- EXPECT_EQ(type, error1.model_type());
-
- SyncError error3(error1);
- ASSERT_TRUE(error3.IsSet());
- EXPECT_EQ(error1.location().line_number(), error3.location().line_number());
- EXPECT_EQ(error1.message(), error3.message());
- EXPECT_EQ(error1.model_type(), error3.model_type());
-
- SyncError error4;
- EXPECT_FALSE(error4.IsSet());
- SyncError error5(error4);
- EXPECT_FALSE(error5.IsSet());
-}
-
-TEST_F(SyncErrorTest, Assign) {
- tracked_objects::Location location = FROM_HERE;
- std::string msg = "test";
- ModelType type = PREFERENCES;
-
- SyncError error1;
- EXPECT_FALSE(error1.IsSet());
- SyncError error2;
- error2 = error1;
- EXPECT_FALSE(error2.IsSet());
-
- error1.Reset(location, msg, type);
- ASSERT_TRUE(error1.IsSet());
- EXPECT_EQ(location.line_number(), error1.location().line_number());
- EXPECT_EQ(msg, error1.message());
- EXPECT_EQ(type, error1.model_type());
-
- error2 = error1;
- ASSERT_TRUE(error2.IsSet());
- EXPECT_EQ(error1.location().line_number(), error2.location().line_number());
- EXPECT_EQ(error1.message(), error2.message());
- EXPECT_EQ(error1.model_type(), error2.model_type());
-
- error2 = SyncError();
- EXPECT_FALSE(error2.IsSet());
-}
-
-TEST_F(SyncErrorTest, ToString) {
- tracked_objects::Location location = FROM_HERE;
- std::string msg = "test";
- ModelType type = PREFERENCES;
- std::string expected = std::string(ModelTypeToString(type)) +
- " datatype error was encountered: " + msg;
- LOG(INFO) << "Expect " << expected;
- SyncError error(location, SyncError::DATATYPE_ERROR, msg, type);
- EXPECT_TRUE(error.IsSet());
- EXPECT_NE(string::npos, error.ToString().find(expected));
-
- SyncError error2;
- EXPECT_FALSE(error2.IsSet());
- EXPECT_EQ(std::string(), error2.ToString());
-
- error2 = error;
- EXPECT_TRUE(error2.IsSet());
- EXPECT_NE(string::npos, error.ToString().find(expected));
-}
-
-} // namespace
-
-} // namespace syncer
diff --git a/chromium/sync/api/sync_merge_result.cc b/chromium/sync/api/sync_merge_result.cc
deleted file mode 100644
index 601c5777ceb..00000000000
--- a/chromium/sync/api/sync_merge_result.cc
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/api/sync_merge_result.h"
-
-namespace syncer {
-
-SyncMergeResult::SyncMergeResult(ModelType type)
- : model_type_(type),
- num_items_before_association_(0),
- num_items_after_association_(0),
- num_items_added_(0),
- num_items_deleted_(0),
- num_items_modified_(0),
- pre_association_version_(0) {
-}
-
-SyncMergeResult::~SyncMergeResult() {
-}
-
-// Setters.
-void SyncMergeResult::set_error(SyncError error) {
- DCHECK(!error.IsSet() || model_type_ == error.model_type());
- error_ = error;
-}
-
-void SyncMergeResult::set_num_items_before_association(
- int num_items_before_association) {
- num_items_before_association_ = num_items_before_association;
-}
-
-void SyncMergeResult::set_num_items_after_association(
- int num_items_after_association) {
- num_items_after_association_ = num_items_after_association;
-}
-
-void SyncMergeResult::set_num_items_added(int num_items_added) {
- num_items_added_ = num_items_added;
-}
-
-void SyncMergeResult::set_num_items_deleted(int num_items_deleted) {
- num_items_deleted_ = num_items_deleted;
-}
-
-void SyncMergeResult::set_num_items_modified(int num_items_modified) {
- num_items_modified_ = num_items_modified;
-}
-
-void SyncMergeResult::set_pre_association_version(int64 version) {
- pre_association_version_ = version;
-}
-
-ModelType SyncMergeResult::model_type() const {
- return model_type_;
-}
-
-SyncError SyncMergeResult::error() const {
- return error_;
-}
-
-int SyncMergeResult::num_items_before_association() const {
- return num_items_before_association_;
-}
-
-int SyncMergeResult::num_items_after_association() const {
- return num_items_after_association_;
-}
-
-int SyncMergeResult::num_items_added() const {
- return num_items_added_;
-}
-
-int SyncMergeResult::num_items_deleted() const {
- return num_items_deleted_;
-}
-
-int SyncMergeResult::num_items_modified() const {
- return num_items_modified_;
-}
-
-int64 SyncMergeResult::pre_association_version() const {
- return pre_association_version_;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/api/sync_merge_result.h b/chromium/sync/api/sync_merge_result.h
deleted file mode 100644
index 712f4bd440a..00000000000
--- a/chromium/sync/api/sync_merge_result.h
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_API_SYNC_MERGE_RESULT_H_
-#define SYNC_API_SYNC_MERGE_RESULT_H_
-
-#include "sync/api/sync_error.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-
-namespace syncer {
-
-// A model-type-specific view of a sync merge. This class encapsulates the
-// state before and after the merge as well as the deltas and any error that
-// occurred.
-// Note: This class only tracks one side of the merge. In other words, if built
-// by the local SyncableService, all values correspond to the local state before
-// and after merging, and the delta's applied to that state. Sync's change
-// processor will create a separate merge result.
-class SYNC_EXPORT SyncMergeResult {
- public:
- // Initialize an empty merge result for model type |type|.
- explicit SyncMergeResult(ModelType type);
- ~SyncMergeResult();
-
- // Default copy and assign welcome.
-
- // Setters.
- // Note: if |error.IsSet()| is true, |error.type()| must match model_type_
- void set_error(SyncError error);
- void set_num_items_before_association(int num_items_before_association);
- void set_num_items_after_association(int num_items_after_association);
- void set_num_items_added(int num_items_added);
- void set_num_items_deleted(int num_items_deleted);
- void set_num_items_modified(int num_items_modified);
- void set_pre_association_version(int64 version);
-
- // Getters.
- ModelType model_type() const;
- SyncError error() const;
- int num_items_before_association() const;
- int num_items_after_association() const;
- int num_items_added() const;
- int num_items_deleted() const;
- int num_items_modified() const;
- int64 pre_association_version() const;
-
- private:
- // Make |this| into a copy of |other|.
- void CopyFrom(const SyncMergeResult& other);
-
- // The datatype that was associated.
- ModelType model_type_;
-
- // The error encountered during association. Unset if no error was
- // encountered.
- SyncError error_;
-
- // The state of the world before association.
- int num_items_before_association_;
-
- // The state of the world after association.
- int num_items_after_association_;
-
- // The changes that took place during association. In a correctly working
- // system these should be the deltas between before and after.
- int num_items_added_;
- int num_items_deleted_;
- int num_items_modified_;
-
- // Version of model before association.
- int64 pre_association_version_;
-};
-
-} // namespace syncer
-
-#endif // SYNC_API_SYNC_MERGE_RESULT_H_
diff --git a/chromium/sync/api/sync_merge_result_unittest.cc b/chromium/sync/api/sync_merge_result_unittest.cc
deleted file mode 100644
index b3f692764cf..00000000000
--- a/chromium/sync/api/sync_merge_result_unittest.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/api/sync_merge_result.h"
-
-#include "base/location.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-namespace {
-
-typedef testing::Test SyncMergeResultTest;
-
-TEST_F(SyncMergeResultTest, Unset) {
- SyncMergeResult merge_result(BOOKMARKS);
- EXPECT_FALSE(merge_result.error().IsSet());
- EXPECT_EQ(0, merge_result.num_items_before_association());
- EXPECT_EQ(0, merge_result.num_items_after_association());
- EXPECT_EQ(0, merge_result.num_items_added());
- EXPECT_EQ(0, merge_result.num_items_deleted());
- EXPECT_EQ(0, merge_result.num_items_modified());
-}
-
-TEST_F(SyncMergeResultTest, SetError) {
- SyncError error(FROM_HERE, SyncError::DATATYPE_ERROR, "message", BOOKMARKS);
- SyncMergeResult merge_result(BOOKMARKS);
-
- merge_result.set_error(error);
- EXPECT_TRUE(merge_result.error().IsSet());
- EXPECT_EQ(BOOKMARKS, merge_result.model_type());
-}
-
-TEST_F(SyncMergeResultTest, SetNumItemsBeforeAssociation) {
- SyncMergeResult merge_result(BOOKMARKS);
- EXPECT_EQ(0, merge_result.num_items_before_association());
-
- merge_result.set_num_items_before_association(10);
- EXPECT_EQ(10, merge_result.num_items_before_association());
-}
-
-TEST_F(SyncMergeResultTest, SetNumItemsAfterAssociation) {
- SyncMergeResult merge_result(BOOKMARKS);
- EXPECT_EQ(0, merge_result.num_items_after_association());
-
- merge_result.set_num_items_after_association(10);
- EXPECT_EQ(10, merge_result.num_items_after_association());
-}
-
-TEST_F(SyncMergeResultTest, SetNumItemsAdded) {
- SyncMergeResult merge_result(BOOKMARKS);
- EXPECT_EQ(0, merge_result.num_items_added());
-
- merge_result.set_num_items_added(10);
- EXPECT_EQ(10, merge_result.num_items_added());
-}
-
-TEST_F(SyncMergeResultTest, SetNumItemsDeleted) {
- SyncMergeResult merge_result(BOOKMARKS);
- EXPECT_EQ(0, merge_result.num_items_deleted());
-
- merge_result.set_num_items_deleted(10);
- EXPECT_EQ(10, merge_result.num_items_deleted());
-}
-
-TEST_F(SyncMergeResultTest, SetNumItemsModified) {
- SyncMergeResult merge_result(BOOKMARKS);
- EXPECT_EQ(0, merge_result.num_items_modified());
-
- merge_result.set_num_items_modified(10);
- EXPECT_EQ(10, merge_result.num_items_modified());
-}
-
-} // namespace
-
-} // namespace syncer
diff --git a/chromium/sync/api/syncable_service.cc b/chromium/sync/api/syncable_service.cc
deleted file mode 100644
index 66b1e278667..00000000000
--- a/chromium/sync/api/syncable_service.cc
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/api/syncable_service.h"
-
-namespace syncer {
-
-SyncableService::~SyncableService() {}
-
-} // namespace syncer
diff --git a/chromium/sync/api/syncable_service.h b/chromium/sync/api/syncable_service.h
deleted file mode 100644
index 81b6018b32a..00000000000
--- a/chromium/sync/api/syncable_service.h
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_API_SYNCABLE_SERVICE_H_
-#define SYNC_API_SYNCABLE_SERVICE_H_
-
-#include <vector>
-
-#include "base/callback.h"
-#include "base/compiler_specific.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "sync/api/sync_change_processor.h"
-#include "sync/api/sync_data.h"
-#include "sync/api/sync_error.h"
-#include "sync/api/sync_merge_result.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-
-namespace syncer {
-
-class SyncErrorFactory;
-
-// TODO(zea): remove SupportsWeakPtr in favor of having all SyncableService
-// implementers provide a way of getting a weak pointer to themselves.
-// See crbug.com/100114.
-class SYNC_EXPORT SyncableService
- : public SyncChangeProcessor,
- public base::SupportsWeakPtr<SyncableService> {
- public:
- // A StartSyncFlare is useful when your SyncableService has a need for sync
- // to start ASAP, typically because a local change event has occurred but
- // MergeDataAndStartSyncing hasn't been called yet, meaning you don't have a
- // SyncChangeProcessor. The sync subsystem will respond soon after invoking
- // Run() on your flare by calling MergeDataAndStartSyncing. The ModelType
- // parameter is included so that the recieving end can track usage and timing
- // statistics, make optimizations or tradeoffs by type, etc.
- typedef base::Callback<void(ModelType)> StartSyncFlare;
-
- // Informs the service to begin syncing the specified synced datatype |type|.
- // The service should then merge |initial_sync_data| into it's local data,
- // calling |sync_processor|'s ProcessSyncChanges as necessary to reconcile the
- // two. After this, the SyncableService's local data should match the server
- // data, and the service should be ready to receive and process any further
- // SyncChange's as they occur.
- // Returns: a SyncMergeResult whose error field reflects whether an error
- // was encountered while merging the two models. The merge result
- // may also contain optional merge statistics.
- virtual SyncMergeResult MergeDataAndStartSyncing(
- ModelType type,
- const SyncDataList& initial_sync_data,
- scoped_ptr<SyncChangeProcessor> sync_processor,
- scoped_ptr<SyncErrorFactory> error_handler) = 0;
-
- // Stop syncing the specified type and reset state.
- virtual void StopSyncing(ModelType type) = 0;
-
- // SyncChangeProcessor interface.
- // Process a list of new SyncChanges and update the local data as necessary.
- // Returns: A default SyncError (IsSet() == false) if no errors were
- // encountered, and a filled SyncError (IsSet() == true)
- // otherwise.
- virtual SyncError ProcessSyncChanges(
- const tracked_objects::Location& from_here,
- const SyncChangeList& change_list) OVERRIDE = 0;
-
- protected:
- virtual ~SyncableService();
-};
-
-} // namespace syncer
-
-#endif // SYNC_API_SYNCABLE_SERVICE_H_
diff --git a/chromium/sync/api/time.h b/chromium/sync/api/time.h
deleted file mode 100644
index 5df108b8ea3..00000000000
--- a/chromium/sync/api/time.h
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Publicly accessible header for time-related sync functions.
-// See sync/util/time.h for implemenation.
-
-#ifndef SYNC_API_TIME_H_
-#define SYNC_API_TIME_H_
-
-#include "sync/util/time.h"
-
-#endif // SYNC_API_TIME_H_
diff --git a/chromium/sync/base/DEPS b/chromium/sync/base/DEPS
deleted file mode 100644
index 48e88750d4a..00000000000
--- a/chromium/sync/base/DEPS
+++ /dev/null
@@ -1,2 +0,0 @@
-include_rules = [
-]
diff --git a/chromium/sync/base/sync_export.h b/chromium/sync/base/sync_export.h
deleted file mode 100644
index 83c65a68396..00000000000
--- a/chromium/sync/base/sync_export.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNC_EXPORT_H_
-#define SYNC_SYNC_EXPORT_H_
-
-#if defined(COMPONENT_BUILD)
-#if defined(WIN32)
-
-#if defined(SYNC_IMPLEMENTATION)
-#define SYNC_EXPORT __declspec(dllexport)
-#define SYNC_EXPORT_PRIVATE __declspec(dllexport)
-#elif defined(SYNC_TEST)
-#define SYNC_EXPORT __declspec(dllimport)
-#define SYNC_EXPORT_PRIVATE __declspec(dllimport)
-#else
-#define SYNC_EXPORT __declspec(dllimport)
-#define SYNC_EXPORT_PRIVATE
-#endif // defined(SYNC_IMPLEMENTATION)
-
-#else // defined(WIN32)
-#if defined(SYNC_IMPLEMENTATION)
-#define SYNC_EXPORT __attribute__((visibility("default")))
-#define SYNC_EXPORT_PRIVATE __attribute__((visibility("default")))
-#elif defined(SYNC_TEST)
-#define SYNC_EXPORT
-#define SYNC_EXPORT_PRIVATE __attribute__((visibility("default")))
-#else
-#define SYNC_EXPORT
-#define SYNC_EXPORT_PRIVATE
-#endif // defined(SYNC_IMPLEMENTATION)
-#endif
-
-#else // defined(COMPONENT_BUILD)
-#define SYNC_EXPORT
-#define SYNC_EXPORT_PRIVATE
-#endif
-
-#endif // SYNC_SYNC_EXPORT_H_
diff --git a/chromium/sync/engine/DEPS b/chromium/sync/engine/DEPS
deleted file mode 100644
index b43d31c6cd0..00000000000
--- a/chromium/sync/engine/DEPS
+++ /dev/null
@@ -1,14 +0,0 @@
-include_rules = [
- "+sync/base",
- "+sync/internal_api/public/base",
- "+sync/internal_api/public/engine",
- "+sync/internal_api/public/sessions",
- "+sync/internal_api/public/test",
- "+sync/internal_api/public/util",
- "+sync/notifier",
- "+sync/protocol",
- "+sync/sessions",
- "+sync/syncable",
- "+sync/test",
- "+sync/util",
-]
diff --git a/chromium/sync/engine/all_status.cc b/chromium/sync/engine/all_status.cc
deleted file mode 100644
index e5f51248ad2..00000000000
--- a/chromium/sync/engine/all_status.cc
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/all_status.h"
-
-#include <algorithm>
-
-#include "base/logging.h"
-#include "base/port.h"
-#include "sync/engine/net/server_connection_manager.h"
-#include "sync/internal_api/public/base/model_type.h"
-
-namespace syncer {
-
-AllStatus::AllStatus() {
- status_.notifications_enabled = false;
- status_.cryptographer_ready = false;
- status_.crypto_has_pending_keys = false;
-}
-
-AllStatus::~AllStatus() {
-}
-
-SyncStatus AllStatus::CreateBlankStatus() const {
- // Status is initialized with the previous status value. Variables
- // whose values accumulate (e.g. lifetime counters like updates_received)
- // are not to be cleared here.
- SyncStatus status = status_;
- status.encryption_conflicts = 0;
- status.hierarchy_conflicts = 0;
- status.server_conflicts = 0;
- status.committed_count = 0;
- status.updates_available = 0;
- return status;
-}
-
-SyncStatus AllStatus::CalcSyncing(const SyncEngineEvent &event) const {
- SyncStatus status = CreateBlankStatus();
- const sessions::SyncSessionSnapshot& snapshot = event.snapshot;
- status.encryption_conflicts = snapshot.num_encryption_conflicts();
- status.hierarchy_conflicts = snapshot.num_hierarchy_conflicts();
- status.server_conflicts = snapshot.num_server_conflicts();
- status.committed_count =
- snapshot.model_neutral_state().num_successful_commits;
-
- if (event.what_happened == SyncEngineEvent::SYNC_CYCLE_BEGIN) {
- status.syncing = true;
- } else if (event.what_happened == SyncEngineEvent::SYNC_CYCLE_ENDED) {
- status.syncing = false;
- }
-
- status.updates_available += snapshot.num_server_changes_remaining();
- status.sync_protocol_error =
- snapshot.model_neutral_state().sync_protocol_error;
-
- status.num_entries_by_type = snapshot.num_entries_by_type();
- status.num_to_delete_entries_by_type =
- snapshot.num_to_delete_entries_by_type();
-
- // Accumulate update count only once per session to avoid double-counting.
- if (event.what_happened == SyncEngineEvent::SYNC_CYCLE_ENDED) {
- status.updates_received +=
- snapshot.model_neutral_state().num_updates_downloaded_total;
- status.tombstone_updates_received +=
- snapshot.model_neutral_state().num_tombstone_updates_downloaded_total;
- status.reflected_updates_received +=
- snapshot.model_neutral_state().num_reflected_updates_downloaded_total;
- status.num_commits_total +=
- snapshot.model_neutral_state().num_successful_commits;
- status.num_local_overwrites_total +=
- snapshot.model_neutral_state().num_local_overwrites;
- status.num_server_overwrites_total +=
- snapshot.model_neutral_state().num_server_overwrites;
- if (snapshot.model_neutral_state().num_updates_downloaded_total == 0) {
- ++status.empty_get_updates;
- } else {
- ++status.nonempty_get_updates;
- }
- if (snapshot.model_neutral_state().num_successful_commits == 0) {
- ++status.sync_cycles_without_commits;
- } else {
- ++status.sync_cycles_with_commits;
- }
- if (snapshot.model_neutral_state().num_successful_commits == 0 &&
- snapshot.model_neutral_state().num_updates_downloaded_total == 0) {
- ++status.useless_sync_cycles;
- } else {
- ++status.useful_sync_cycles;
- }
- }
- return status;
-}
-
-void AllStatus::OnSyncEngineEvent(const SyncEngineEvent& event) {
- ScopedStatusLock lock(this);
- switch (event.what_happened) {
- case SyncEngineEvent::SYNC_CYCLE_BEGIN:
- case SyncEngineEvent::STATUS_CHANGED:
- case SyncEngineEvent::SYNC_CYCLE_ENDED:
- status_ = CalcSyncing(event);
- break;
- case SyncEngineEvent::STOP_SYNCING_PERMANENTLY:
- break;
- case SyncEngineEvent::ACTIONABLE_ERROR:
- status_ = CreateBlankStatus();
- status_.sync_protocol_error =
- event.snapshot.model_neutral_state().sync_protocol_error;
- break;
- case SyncEngineEvent::RETRY_TIME_CHANGED:
- status_.retry_time = event.retry_time;
- break;
- case SyncEngineEvent::THROTTLED_TYPES_CHANGED:
- status_.throttled_types = event.throttled_types;
- break;
- default:
- LOG(ERROR) << "Unrecognized Syncer Event: " << event.what_happened;
- break;
- }
-}
-
-SyncStatus AllStatus::status() const {
- base::AutoLock lock(mutex_);
- return status_;
-}
-
-void AllStatus::SetNotificationsEnabled(bool notifications_enabled) {
- ScopedStatusLock lock(this);
- status_.notifications_enabled = notifications_enabled;
-}
-
-void AllStatus::IncrementNotificationsReceived() {
- ScopedStatusLock lock(this);
- ++status_.notifications_received;
-}
-
-void AllStatus::SetEncryptedTypes(ModelTypeSet types) {
- ScopedStatusLock lock(this);
- status_.encrypted_types = types;
-}
-
-void AllStatus::SetCryptographerReady(bool ready) {
- ScopedStatusLock lock(this);
- status_.cryptographer_ready = ready;
-}
-
-void AllStatus::SetCryptoHasPendingKeys(bool has_pending_keys) {
- ScopedStatusLock lock(this);
- status_.crypto_has_pending_keys = has_pending_keys;
-}
-
-void AllStatus::SetPassphraseType(PassphraseType type) {
- ScopedStatusLock lock(this);
- status_.passphrase_type = type;
-}
-
-void AllStatus::SetHasKeystoreKey(bool has_keystore_key) {
- ScopedStatusLock lock(this);
- status_.has_keystore_key = has_keystore_key;
-}
-
-void AllStatus::SetKeystoreMigrationTime(const base::Time& migration_time) {
- ScopedStatusLock lock(this);
- status_.keystore_migration_time = migration_time;
-}
-
-void AllStatus::SetSyncId(const std::string& sync_id) {
- ScopedStatusLock lock(this);
- status_.sync_id = sync_id;
-}
-
-void AllStatus::SetInvalidatorClientId(
- const std::string& invalidator_client_id) {
- ScopedStatusLock lock(this);
- status_.invalidator_client_id = invalidator_client_id;
-}
-
-void AllStatus::IncrementNudgeCounter(NudgeSource source) {
- ScopedStatusLock lock(this);
- switch(source) {
- case NUDGE_SOURCE_LOCAL_REFRESH:
- status_.nudge_source_local_refresh++;
- return;
- case NUDGE_SOURCE_LOCAL:
- status_.nudge_source_local++;
- return;
- case NUDGE_SOURCE_NOTIFICATION:
- status_.nudge_source_notification++;
- return;
- case NUDGE_SOURCE_UNKNOWN:
- break;
- }
- // If we're here, the source is most likely
- // NUDGE_SOURCE_UNKNOWN. That shouldn't happen.
- NOTREACHED();
-}
-
-ScopedStatusLock::ScopedStatusLock(AllStatus* allstatus)
- : allstatus_(allstatus) {
- allstatus->mutex_.Acquire();
-}
-
-ScopedStatusLock::~ScopedStatusLock() {
- allstatus_->mutex_.Release();
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/all_status.h b/chromium/sync/engine/all_status.h
deleted file mode 100644
index 8954eaeb99a..00000000000
--- a/chromium/sync/engine/all_status.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// The AllStatus object watches various sync engine components and aggregates
-// the status of all of them into one place.
-
-#ifndef SYNC_INTERNAL_API_ALL_STATUS_H_
-#define SYNC_INTERNAL_API_ALL_STATUS_H_
-
-#include <map>
-#include <string>
-
-#include "base/compiler_specific.h"
-#include "base/synchronization/lock.h"
-#include "sync/engine/sync_engine_event.h"
-#include "sync/engine/syncer_types.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/engine/sync_status.h"
-#include "sync/engine/nudge_source.h"
-
-namespace syncer {
-
-class ScopedStatusLock;
-struct ServerConnectionEvent;
-
-// This class collects data and uses it to update its internal state. It can
-// return a snapshot of this state as a SyncerStatus object.
-//
-// Most of this data ends up on the about:sync page. But the page is only
-// 'pinged' to update itself at the end of a sync cycle. A user could refresh
-// manually, but unless their timing is excellent it's unlikely that a user will
-// see any state in mid-sync cycle. We have no plans to change this. However,
-// we will continue to collect data and update state mid-sync-cycle in case we
-// need to debug slow or stuck sync cycles.
-class AllStatus : public SyncEngineEventListener {
- friend class ScopedStatusLock;
- public:
- AllStatus();
- virtual ~AllStatus();
-
- virtual void OnSyncEngineEvent(const SyncEngineEvent& event) OVERRIDE;
-
- SyncStatus status() const;
-
- void SetNotificationsEnabled(bool notifications_enabled);
-
- void IncrementNotifiableCommits();
-
- void IncrementNotificationsReceived();
-
- void SetEncryptedTypes(ModelTypeSet types);
- void SetCryptographerReady(bool ready);
- void SetCryptoHasPendingKeys(bool has_pending_keys);
- void SetPassphraseType(PassphraseType type);
- void SetHasKeystoreKey(bool has_keystore_key);
- void SetKeystoreMigrationTime(const base::Time& migration_time);
-
- void SetSyncId(const std::string& sync_id);
- void SetInvalidatorClientId(const std::string& invalidator_client_id);
-
- void IncrementNudgeCounter(NudgeSource source);
-
- protected:
- // Examines syncer to calculate syncing and the unsynced count,
- // and returns a Status with new values.
- SyncStatus CalcSyncing(const SyncEngineEvent& event) const;
- SyncStatus CreateBlankStatus() const;
-
- SyncStatus status_;
-
- mutable base::Lock mutex_; // Protects all data members.
- DISALLOW_COPY_AND_ASSIGN(AllStatus);
-};
-
-class ScopedStatusLock {
- public:
- explicit ScopedStatusLock(AllStatus* allstatus);
- ~ScopedStatusLock();
- protected:
- AllStatus* allstatus_;
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_ALL_STATUS_H_
diff --git a/chromium/sync/engine/apply_control_data_updates.cc b/chromium/sync/engine/apply_control_data_updates.cc
deleted file mode 100644
index e97741ed336..00000000000
--- a/chromium/sync/engine/apply_control_data_updates.cc
+++ /dev/null
@@ -1,221 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/apply_control_data_updates.h"
-
-#include "base/metrics/histogram.h"
-#include "sync/engine/conflict_resolver.h"
-#include "sync/engine/conflict_util.h"
-#include "sync/engine/syncer_util.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/nigori_handler.h"
-#include "sync/syncable/nigori_util.h"
-#include "sync/syncable/syncable_write_transaction.h"
-#include "sync/util/cryptographer.h"
-
-namespace syncer {
-
-using syncable::GET_BY_SERVER_TAG;
-using syncable::IS_UNAPPLIED_UPDATE;
-using syncable::IS_UNSYNCED;
-using syncable::SERVER_SPECIFICS;
-using syncable::SPECIFICS;
-using syncable::SYNCER;
-
-void ApplyControlDataUpdates(syncable::Directory* dir) {
- syncable::WriteTransaction trans(FROM_HERE, SYNCER, dir);
-
- std::vector<int64> handles;
- dir->GetUnappliedUpdateMetaHandles(
- &trans, ToFullModelTypeSet(ControlTypes()), &handles);
-
- // First, go through and manually apply any new top level datatype nodes (so
- // that we don't have to worry about hitting a CONFLICT_HIERARCHY with an
- // entry because we haven't applied its parent yet).
- // TODO(sync): if at some point we support control datatypes with actual
- // hierarchies we'll need to revisit this logic.
- ModelTypeSet control_types = ControlTypes();
- for (ModelTypeSet::Iterator iter = control_types.First(); iter.Good();
- iter.Inc()) {
- syncable::MutableEntry entry(&trans,
- syncable::GET_BY_SERVER_TAG,
- ModelTypeToRootTag(iter.Get()));
- if (!entry.good())
- continue;
- if (!entry.GetIsUnappliedUpdate())
- continue;
-
- ModelType type = entry.GetServerModelType();
- if (type == NIGORI) {
- // Nigori node applications never fail.
- ApplyNigoriUpdate(&trans,
- &entry,
- dir->GetCryptographer(&trans));
- } else {
- ApplyControlUpdate(&trans,
- &entry,
- dir->GetCryptographer(&trans));
- }
- }
-
- // Go through the rest of the unapplied control updates, skipping over any
- // top level folders.
- for (std::vector<int64>::const_iterator iter = handles.begin();
- iter != handles.end(); ++iter) {
- syncable::MutableEntry entry(&trans, syncable::GET_BY_HANDLE, *iter);
- CHECK(entry.good());
- ModelType type = entry.GetServerModelType();
- CHECK(ControlTypes().Has(type));
- if (!entry.GetUniqueServerTag().empty()) {
- // We should have already applied all top level control nodes.
- DCHECK(!entry.GetIsUnappliedUpdate());
- continue;
- }
-
- ApplyControlUpdate(&trans,
- &entry,
- dir->GetCryptographer(&trans));
- }
-}
-
-// Update the nigori handler with the server's nigori node.
-//
-// If we have a locally modified nigori node, we merge them manually. This
-// handles the case where two clients both set a different passphrase. The
-// second client to attempt to commit will go into a state of having pending
-// keys, unioned the set of encrypted types, and eventually re-encrypt
-// everything with the passphrase of the first client and commit the set of
-// merged encryption keys. Until the second client provides the pending
-// passphrase, the cryptographer will preserve the encryption keys based on the
-// local passphrase, while the nigori node will preserve the server encryption
-// keys.
-void ApplyNigoriUpdate(syncable::WriteTransaction* const trans,
- syncable::MutableEntry* const entry,
- Cryptographer* cryptographer) {
- DCHECK(entry->GetIsUnappliedUpdate());
-
- // We apply the nigori update regardless of whether there's a conflict or
- // not in order to preserve any new encrypted types or encryption keys.
- // TODO(zea): consider having this return a bool reflecting whether it was a
- // valid update or not, and in the case of invalid updates not overwrite the
- // local data.
- const sync_pb::NigoriSpecifics& nigori =
- entry->GetServerSpecifics().nigori();
- trans->directory()->GetNigoriHandler()->ApplyNigoriUpdate(nigori, trans);
-
- // Make sure any unsynced changes are properly encrypted as necessary.
- // We only perform this if the cryptographer is ready. If not, these are
- // re-encrypted at SetDecryptionPassphrase time (via ReEncryptEverything).
- // This logic covers the case where the nigori update marked new datatypes
- // for encryption, but didn't change the passphrase.
- if (cryptographer->is_ready()) {
- // Note that we don't bother to encrypt any data for which IS_UNSYNCED
- // == false here. The machine that turned on encryption should know about
- // and re-encrypt all synced data. It's possible it could get interrupted
- // during this process, but we currently reencrypt everything at startup
- // as well, so as soon as a client is restarted with this datatype marked
- // for encryption, all the data should be updated as necessary.
-
- // If this fails, something is wrong with the cryptographer, but there's
- // nothing we can do about it here.
- DVLOG(1) << "Received new nigori, encrypting unsynced changes.";
- syncable::ProcessUnsyncedChangesForEncryption(trans);
- }
-
- if (!entry->GetIsUnsynced()) { // Update only.
- UpdateLocalDataFromServerData(trans, entry);
- } else { // Conflict.
- const sync_pb::EntitySpecifics& server_specifics =
- entry->GetServerSpecifics();
- const sync_pb::NigoriSpecifics& server_nigori = server_specifics.nigori();
- const sync_pb::EntitySpecifics& local_specifics =
- entry->GetSpecifics();
- const sync_pb::NigoriSpecifics& local_nigori = local_specifics.nigori();
-
- // We initialize the new nigori with the server state, and will override
- // it as necessary below.
- sync_pb::EntitySpecifics new_specifics = entry->GetServerSpecifics();
- sync_pb::NigoriSpecifics* new_nigori = new_specifics.mutable_nigori();
-
- // If the cryptographer is not ready, another client set a new encryption
- // passphrase. If we had migrated locally, we will re-migrate when the
- // pending keys are provided. If we had set a new custom passphrase locally
- // the user will have another chance to set a custom passphrase later
- // (assuming they hadn't set a custom passphrase on the other client).
- // Therefore, we only attempt to merge the nigori nodes if the cryptographer
- // is ready.
- // Note: we only update the encryption keybag if we're sure that we aren't
- // invalidating the keystore_decryptor_token (i.e. we're either
- // not migrated or we copying over all local state).
- if (cryptographer->is_ready()) {
- if (local_nigori.has_passphrase_type() &&
- server_nigori.has_passphrase_type()) {
- // They're both migrated, preserve the local nigori if the passphrase
- // type is more conservative.
- if (server_nigori.passphrase_type() ==
- sync_pb::NigoriSpecifics::KEYSTORE_PASSPHRASE &&
- local_nigori.passphrase_type() !=
- sync_pb::NigoriSpecifics::KEYSTORE_PASSPHRASE) {
- DCHECK(local_nigori.passphrase_type() ==
- sync_pb::NigoriSpecifics::FROZEN_IMPLICIT_PASSPHRASE ||
- local_nigori.passphrase_type() ==
- sync_pb::NigoriSpecifics::CUSTOM_PASSPHRASE);
- new_nigori->CopyFrom(local_nigori);
- cryptographer->GetKeys(new_nigori->mutable_encryption_keybag());
- }
- } else if (!local_nigori.has_passphrase_type() &&
- !server_nigori.has_passphrase_type()) {
- // Set the explicit passphrase based on the local state. If the server
- // had set an explict passphrase, we should have pending keys, so
- // should not reach this code.
- // Because neither side is migrated, we don't have to worry about the
- // keystore decryptor token.
- new_nigori->set_keybag_is_frozen(local_nigori.keybag_is_frozen());
- cryptographer->GetKeys(new_nigori->mutable_encryption_keybag());
- } else if (local_nigori.has_passphrase_type()) {
- // Local is migrated but server is not. Copy over the local migrated
- // data.
- new_nigori->CopyFrom(local_nigori);
- cryptographer->GetKeys(new_nigori->mutable_encryption_keybag());
- } // else leave the new nigori with the server state.
- }
-
- // Always update to the safest set of encrypted types.
- trans->directory()->GetNigoriHandler()->UpdateNigoriFromEncryptedTypes(
- new_nigori,
- trans);
-
- entry->PutSpecifics(new_specifics);
- DVLOG(1) << "Resolving simple conflict, merging nigori nodes: "
- << entry;
-
- conflict_util::OverwriteServerChanges(entry);
-
- UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict",
- ConflictResolver::NIGORI_MERGE,
- ConflictResolver::CONFLICT_RESOLUTION_SIZE);
- }
-}
-
-void ApplyControlUpdate(syncable::WriteTransaction* const trans,
- syncable::MutableEntry* const entry,
- Cryptographer* cryptographer) {
- DCHECK_NE(entry->GetServerModelType(), NIGORI);
- DCHECK(entry->GetIsUnappliedUpdate());
- if (entry->GetIsUnsynced()) {
- // We just let the server win all conflicts with control types.
- DVLOG(1) << "Ignoring local changes for control update.";
- conflict_util::IgnoreLocalChanges(entry);
- UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict",
- ConflictResolver::OVERWRITE_LOCAL,
- ConflictResolver::CONFLICT_RESOLUTION_SIZE);
- }
-
- UpdateAttemptResponse response = AttemptToUpdateEntry(
- trans, entry, cryptographer);
- DCHECK_EQ(SUCCESS, response);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/apply_control_data_updates.h b/chromium/sync/engine/apply_control_data_updates.h
deleted file mode 100644
index b825665ed16..00000000000
--- a/chromium/sync/engine/apply_control_data_updates.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_APPLY_CONTROL_DATA_UPDATES_H_
-#define SYNC_ENGINE_APPLY_CONTROL_DATA_UPDATES_H_
-
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-
-class Cryptographer;
-
-namespace syncable {
-class Directory;
-class MutableEntry;
-class WriteTransaction;
-}
-
-SYNC_EXPORT_PRIVATE void ApplyControlDataUpdates(syncable::Directory* dir);
-void ApplyNigoriUpdate(syncable::WriteTransaction* trans,
- syncable::MutableEntry* const entry,
- Cryptographer* cryptographer);
-void ApplyControlUpdate(syncable::WriteTransaction* const trans,
- syncable::MutableEntry* const entry,
- Cryptographer* cryptographer);
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_APPLY_CONTROL_DATA_UPDATES_H_
diff --git a/chromium/sync/engine/apply_control_data_updates_unittest.cc b/chromium/sync/engine/apply_control_data_updates_unittest.cc
deleted file mode 100644
index caacfbc1a11..00000000000
--- a/chromium/sync/engine/apply_control_data_updates_unittest.cc
+++ /dev/null
@@ -1,920 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/format_macros.h"
-#include "base/location.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/message_loop/message_loop.h"
-#include "base/strings/stringprintf.h"
-#include "sync/engine/apply_control_data_updates.h"
-#include "sync/engine/syncer.h"
-#include "sync/engine/syncer_util.h"
-#include "sync/internal_api/public/test/test_entry_factory.h"
-#include "sync/protocol/nigori_specifics.pb.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/nigori_util.h"
-#include "sync/syncable/syncable_read_transaction.h"
-#include "sync/syncable/syncable_util.h"
-#include "sync/syncable/syncable_write_transaction.h"
-#include "sync/test/engine/fake_model_worker.h"
-#include "sync/test/engine/test_directory_setter_upper.h"
-#include "sync/test/engine/test_id_factory.h"
-#include "sync/test/fake_sync_encryption_handler.h"
-#include "sync/util/cryptographer.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-using syncable::MutableEntry;
-using syncable::UNITTEST;
-using syncable::Id;
-
-class ApplyControlDataUpdatesTest : public ::testing::Test {
- public:
- protected:
- ApplyControlDataUpdatesTest() {}
- virtual ~ApplyControlDataUpdatesTest() {}
-
- virtual void SetUp() {
- dir_maker_.SetUp();
- entry_factory_.reset(new TestEntryFactory(directory()));
- }
-
- virtual void TearDown() {
- dir_maker_.TearDown();
- }
-
- syncable::Directory* directory() {
- return dir_maker_.directory();
- }
-
- TestIdFactory id_factory_;
- scoped_ptr<TestEntryFactory> entry_factory_;
- private:
- base::MessageLoop loop_; // Needed for directory init.
- TestDirectorySetterUpper dir_maker_;
-
- DISALLOW_COPY_AND_ASSIGN(ApplyControlDataUpdatesTest);
-};
-
-// Verify that applying a nigori node sets initial sync ended properly,
-// updates the set of encrypted types, and updates the cryptographer.
-TEST_F(ApplyControlDataUpdatesTest, NigoriUpdate) {
- // Storing the cryptographer separately is bad, but for this test we
- // know it's safe.
- Cryptographer* cryptographer;
- ModelTypeSet encrypted_types;
- encrypted_types.PutAll(SyncEncryptionHandler::SensitiveTypes());
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- cryptographer = directory()->GetCryptographer(&trans);
- EXPECT_TRUE(directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)
- .Equals(encrypted_types));
- }
-
- // Nigori node updates should update the Cryptographer.
- Cryptographer other_cryptographer(cryptographer->encryptor());
- KeyParams params = {"localhost", "dummy", "foobar"};
- other_cryptographer.AddKey(params);
-
- sync_pb::EntitySpecifics specifics;
- sync_pb::NigoriSpecifics* nigori = specifics.mutable_nigori();
- other_cryptographer.GetKeys(nigori->mutable_encryption_keybag());
- nigori->set_encrypt_everything(true);
- entry_factory_->CreateUnappliedNewItem(
- ModelTypeToRootTag(NIGORI), specifics, true);
- EXPECT_FALSE(cryptographer->has_pending_keys());
-
- ApplyControlDataUpdates(directory());
-
- EXPECT_FALSE(cryptographer->is_ready());
- EXPECT_TRUE(cryptographer->has_pending_keys());
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- EXPECT_TRUE(directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)
- .Equals(ModelTypeSet::All()));
- }
-}
-
-// Create some local unsynced and unencrypted data. Apply a nigori update that
-// turns on encryption for the unsynced data. Ensure we properly encrypt the
-// data as part of the nigori update. Apply another nigori update with no
-// changes. Ensure we ignore already-encrypted unsynced data and that nothing
-// breaks.
-TEST_F(ApplyControlDataUpdatesTest, EncryptUnsyncedChanges) {
- // Storing the cryptographer separately is bad, but for this test we
- // know it's safe.
- Cryptographer* cryptographer;
- ModelTypeSet encrypted_types;
- encrypted_types.PutAll(SyncEncryptionHandler::SensitiveTypes());
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- cryptographer = directory()->GetCryptographer(&trans);
- EXPECT_TRUE(directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)
- .Equals(encrypted_types));
-
- // With default encrypted_types, this should be true.
- EXPECT_TRUE(VerifyUnsyncedChangesAreEncrypted(&trans, encrypted_types));
-
- Syncer::UnsyncedMetaHandles handles;
- GetUnsyncedEntries(&trans, &handles);
- EXPECT_TRUE(handles.empty());
- }
-
- // Create unsynced bookmarks without encryption.
- // First item is a folder
- Id folder_id = id_factory_.NewLocalId();
- entry_factory_->CreateUnsyncedItem(folder_id, id_factory_.root(), "folder",
- true, BOOKMARKS, NULL);
- // Next five items are children of the folder
- size_t i;
- size_t batch_s = 5;
- for (i = 0; i < batch_s; ++i) {
- entry_factory_->CreateUnsyncedItem(id_factory_.NewLocalId(), folder_id,
- base::StringPrintf("Item %" PRIuS "", i),
- false, BOOKMARKS, NULL);
- }
- // Next five items are children of the root.
- for (; i < 2*batch_s; ++i) {
- entry_factory_->CreateUnsyncedItem(
- id_factory_.NewLocalId(), id_factory_.root(),
- base::StringPrintf("Item %" PRIuS "", i), false,
- BOOKMARKS, NULL);
- }
-
- KeyParams params = {"localhost", "dummy", "foobar"};
- cryptographer->AddKey(params);
- sync_pb::EntitySpecifics specifics;
- sync_pb::NigoriSpecifics* nigori = specifics.mutable_nigori();
- cryptographer->GetKeys(nigori->mutable_encryption_keybag());
- nigori->set_encrypt_everything(true);
- encrypted_types.Put(BOOKMARKS);
- entry_factory_->CreateUnappliedNewItem(
- ModelTypeToRootTag(NIGORI), specifics, true);
- EXPECT_FALSE(cryptographer->has_pending_keys());
- EXPECT_TRUE(cryptographer->is_ready());
-
- {
- // Ensure we have unsynced nodes that aren't properly encrypted.
- syncable::ReadTransaction trans(FROM_HERE, directory());
- EXPECT_FALSE(VerifyUnsyncedChangesAreEncrypted(&trans, encrypted_types));
-
- Syncer::UnsyncedMetaHandles handles;
- GetUnsyncedEntries(&trans, &handles);
- EXPECT_EQ(2*batch_s+1, handles.size());
- }
-
- ApplyControlDataUpdates(directory());
-
- EXPECT_FALSE(cryptographer->has_pending_keys());
- EXPECT_TRUE(cryptographer->is_ready());
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- // If ProcessUnsyncedChangesForEncryption worked, all our unsynced changes
- // should be encrypted now.
- EXPECT_TRUE(directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)
- .Equals(ModelTypeSet::All()));
- EXPECT_TRUE(VerifyUnsyncedChangesAreEncrypted(&trans, encrypted_types));
-
- Syncer::UnsyncedMetaHandles handles;
- GetUnsyncedEntries(&trans, &handles);
- EXPECT_EQ(2*batch_s+1, handles.size());
- }
-
- // Simulate another nigori update that doesn't change anything.
- {
- syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&trans, syncable::GET_BY_SERVER_TAG,
- ModelTypeToRootTag(NIGORI));
- ASSERT_TRUE(entry.good());
- entry.PutServerVersion(entry_factory_->GetNextRevision());
- entry.PutIsUnappliedUpdate(true);
- }
-
- ApplyControlDataUpdates(directory());
-
- EXPECT_FALSE(cryptographer->has_pending_keys());
- EXPECT_TRUE(cryptographer->is_ready());
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- // All our changes should still be encrypted.
- EXPECT_TRUE(directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)
- .Equals(ModelTypeSet::All()));
- EXPECT_TRUE(VerifyUnsyncedChangesAreEncrypted(&trans, encrypted_types));
-
- Syncer::UnsyncedMetaHandles handles;
- GetUnsyncedEntries(&trans, &handles);
- EXPECT_EQ(2*batch_s+1, handles.size());
- }
-}
-
-// Create some local unsynced and unencrypted changes. Receive a new nigori
-// node enabling their encryption but also introducing pending keys. Ensure
-// we apply the update properly without encrypting the unsynced changes or
-// breaking.
-TEST_F(ApplyControlDataUpdatesTest, CannotEncryptUnsyncedChanges) {
- // Storing the cryptographer separately is bad, but for this test we
- // know it's safe.
- Cryptographer* cryptographer;
- ModelTypeSet encrypted_types;
- encrypted_types.PutAll(SyncEncryptionHandler::SensitiveTypes());
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- cryptographer = directory()->GetCryptographer(&trans);
- EXPECT_TRUE(directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)
- .Equals(encrypted_types));
-
- // With default encrypted_types, this should be true.
- EXPECT_TRUE(VerifyUnsyncedChangesAreEncrypted(&trans, encrypted_types));
-
- Syncer::UnsyncedMetaHandles handles;
- GetUnsyncedEntries(&trans, &handles);
- EXPECT_TRUE(handles.empty());
- }
-
- // Create unsynced bookmarks without encryption.
- // First item is a folder
- Id folder_id = id_factory_.NewLocalId();
- entry_factory_->CreateUnsyncedItem(
- folder_id, id_factory_.root(), "folder", true,
- BOOKMARKS, NULL);
- // Next five items are children of the folder
- size_t i;
- size_t batch_s = 5;
- for (i = 0; i < batch_s; ++i) {
- entry_factory_->CreateUnsyncedItem(id_factory_.NewLocalId(), folder_id,
- base::StringPrintf("Item %" PRIuS "", i),
- false, BOOKMARKS, NULL);
- }
- // Next five items are children of the root.
- for (; i < 2*batch_s; ++i) {
- entry_factory_->CreateUnsyncedItem(
- id_factory_.NewLocalId(), id_factory_.root(),
- base::StringPrintf("Item %" PRIuS "", i), false,
- BOOKMARKS, NULL);
- }
-
- // We encrypt with new keys, triggering the local cryptographer to be unready
- // and unable to decrypt data (once updated).
- Cryptographer other_cryptographer(cryptographer->encryptor());
- KeyParams params = {"localhost", "dummy", "foobar"};
- other_cryptographer.AddKey(params);
- sync_pb::EntitySpecifics specifics;
- sync_pb::NigoriSpecifics* nigori = specifics.mutable_nigori();
- other_cryptographer.GetKeys(nigori->mutable_encryption_keybag());
- nigori->set_encrypt_everything(true);
- encrypted_types.Put(BOOKMARKS);
- entry_factory_->CreateUnappliedNewItem(
- ModelTypeToRootTag(NIGORI), specifics, true);
- EXPECT_FALSE(cryptographer->has_pending_keys());
-
- {
- // Ensure we have unsynced nodes that aren't properly encrypted.
- syncable::ReadTransaction trans(FROM_HERE, directory());
- EXPECT_FALSE(VerifyUnsyncedChangesAreEncrypted(&trans, encrypted_types));
- Syncer::UnsyncedMetaHandles handles;
- GetUnsyncedEntries(&trans, &handles);
- EXPECT_EQ(2*batch_s+1, handles.size());
- }
-
- ApplyControlDataUpdates(directory());
-
- EXPECT_FALSE(cryptographer->is_ready());
- EXPECT_TRUE(cryptographer->has_pending_keys());
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- // Since we have pending keys, we would have failed to encrypt, but the
- // cryptographer should be updated.
- EXPECT_FALSE(VerifyUnsyncedChangesAreEncrypted(&trans, encrypted_types));
- EXPECT_TRUE(directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)
- .Equals(ModelTypeSet::All()));
- EXPECT_FALSE(cryptographer->is_ready());
- EXPECT_TRUE(cryptographer->has_pending_keys());
-
- Syncer::UnsyncedMetaHandles handles;
- GetUnsyncedEntries(&trans, &handles);
- EXPECT_EQ(2*batch_s+1, handles.size());
- }
-}
-
-// Verify we handle a nigori node conflict by merging encryption keys and
-// types, but preserve the custom passphrase state of the server.
-// Initial sync ended should be set.
-TEST_F(ApplyControlDataUpdatesTest,
- NigoriConflictPendingKeysServerEncryptEverythingCustom) {
- Cryptographer* cryptographer;
- ModelTypeSet encrypted_types(SyncEncryptionHandler::SensitiveTypes());
- KeyParams other_params = {"localhost", "dummy", "foobar"};
- KeyParams local_params = {"localhost", "dummy", "local"};
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- cryptographer = directory()->GetCryptographer(&trans);
- EXPECT_TRUE(encrypted_types.Equals(
- directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)));
- }
-
- // Set up a temporary cryptographer to generate new keys with.
- Cryptographer other_cryptographer(cryptographer->encryptor());
- other_cryptographer.AddKey(other_params);
-
- // Create server specifics with pending keys, new encrypted types,
- // and a custom passphrase (unmigrated).
- sync_pb::EntitySpecifics server_specifics;
- sync_pb::NigoriSpecifics* server_nigori = server_specifics.mutable_nigori();
- other_cryptographer.GetKeys(server_nigori->mutable_encryption_keybag());
- server_nigori->set_encrypt_everything(true);
- server_nigori->set_keybag_is_frozen(true);
- int64 nigori_handle =
- entry_factory_->CreateUnappliedNewItem(kNigoriTag,
- server_specifics,
- true);
-
- // Initialize the local cryptographer with the local keys.
- cryptographer->AddKey(local_params);
- EXPECT_TRUE(cryptographer->is_ready());
-
- // Set up a local nigori with the local encryption keys and default encrypted
- // types.
- sync_pb::EntitySpecifics local_specifics;
- sync_pb::NigoriSpecifics* local_nigori = local_specifics.mutable_nigori();
- cryptographer->GetKeys(local_nigori->mutable_encryption_keybag());
- local_nigori->set_encrypt_everything(false);
- local_nigori->set_keybag_is_frozen(true);
- ASSERT_TRUE(entry_factory_->SetLocalSpecificsForItem(
- nigori_handle, local_specifics));
- // Apply the update locally so that UpdateFromEncryptedTypes knows what state
- // to use.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- cryptographer = directory()->GetCryptographer(&trans);
- directory()->GetNigoriHandler()->ApplyNigoriUpdate(
- *local_nigori,
- &trans);
- }
-
- EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
- EXPECT_TRUE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
- ApplyControlDataUpdates(directory());
- EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
- EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
-
- EXPECT_FALSE(cryptographer->is_ready());
- EXPECT_TRUE(cryptographer->is_initialized());
- EXPECT_TRUE(cryptographer->has_pending_keys());
- EXPECT_TRUE(other_cryptographer.CanDecryptUsingDefaultKey(
- entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().encryption_keybag()));
- EXPECT_TRUE(entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().keybag_is_frozen());
- EXPECT_TRUE(entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().encrypt_everything());
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- EXPECT_TRUE(directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)
- .Equals(ModelTypeSet::All()));
- }
-}
-
-// Verify we handle a nigori node conflict by merging encryption keys and
-// types, but preserve the custom passphrase state of the server.
-// Initial sync ended should be set.
-TEST_F(ApplyControlDataUpdatesTest,
- NigoriConflictPendingKeysLocalEncryptEverythingCustom) {
- Cryptographer* cryptographer;
- ModelTypeSet encrypted_types(SyncEncryptionHandler::SensitiveTypes());
- KeyParams other_params = {"localhost", "dummy", "foobar"};
- KeyParams local_params = {"localhost", "dummy", "local"};
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- cryptographer = directory()->GetCryptographer(&trans);
- EXPECT_TRUE(encrypted_types.Equals(
- directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)));
- }
-
- // Set up a temporary cryptographer to generate new keys with.
- Cryptographer other_cryptographer(cryptographer->encryptor());
- other_cryptographer.AddKey(other_params);
-
- // Create server specifics with pending keys, new encrypted types,
- // and a custom passphrase (unmigrated).
- sync_pb::EntitySpecifics server_specifics;
- sync_pb::NigoriSpecifics* server_nigori = server_specifics.mutable_nigori();
- other_cryptographer.GetKeys(server_nigori->mutable_encryption_keybag());
- server_nigori->set_encrypt_everything(false);
- server_nigori->set_keybag_is_frozen(false);
- int64 nigori_handle =
- entry_factory_->CreateUnappliedNewItem(kNigoriTag,
- server_specifics,
- true);
-
- // Initialize the local cryptographer with the local keys.
- cryptographer->AddKey(local_params);
- EXPECT_TRUE(cryptographer->is_ready());
-
- // Set up a local nigori with the local encryption keys and default encrypted
- // types.
- sync_pb::EntitySpecifics local_specifics;
- sync_pb::NigoriSpecifics* local_nigori = local_specifics.mutable_nigori();
- cryptographer->GetKeys(local_nigori->mutable_encryption_keybag());
- local_nigori->set_encrypt_everything(true);
- local_nigori->set_keybag_is_frozen(true);
- ASSERT_TRUE(entry_factory_->SetLocalSpecificsForItem(
- nigori_handle, local_specifics));
- // Apply the update locally so that UpdateFromEncryptedTypes knows what state
- // to use.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- cryptographer = directory()->GetCryptographer(&trans);
- directory()->GetNigoriHandler()->ApplyNigoriUpdate(
- *local_nigori,
- &trans);
- }
-
- EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
- EXPECT_TRUE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
- ApplyControlDataUpdates(directory());
- EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
- EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
-
- EXPECT_FALSE(cryptographer->is_ready());
- EXPECT_TRUE(cryptographer->is_initialized());
- EXPECT_TRUE(cryptographer->has_pending_keys());
- EXPECT_TRUE(other_cryptographer.CanDecryptUsingDefaultKey(
- entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().encryption_keybag()));
- EXPECT_FALSE(entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().keybag_is_frozen());
- EXPECT_TRUE(entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().encrypt_everything());
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- EXPECT_TRUE(directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)
- .Equals(ModelTypeSet::All()));
- }
-}
-
-// If the conflicting nigori has a subset of the local keys, the conflict
-// resolution should preserve the full local keys. Initial sync ended should be
-// set.
-TEST_F(ApplyControlDataUpdatesTest,
- NigoriConflictOldKeys) {
- Cryptographer* cryptographer;
- ModelTypeSet encrypted_types(SyncEncryptionHandler::SensitiveTypes());
- KeyParams old_params = {"localhost", "dummy", "old"};
- KeyParams new_params = {"localhost", "dummy", "new"};
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- cryptographer = directory()->GetCryptographer(&trans);
- EXPECT_TRUE(encrypted_types.Equals(
- directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)));
- }
-
- // Set up the cryptographer with old keys
- cryptographer->AddKey(old_params);
-
- // Create server specifics with old keys and new encrypted types.
- sync_pb::EntitySpecifics server_specifics;
- sync_pb::NigoriSpecifics* server_nigori = server_specifics.mutable_nigori();
- cryptographer->GetKeys(server_nigori->mutable_encryption_keybag());
- server_nigori->set_encrypt_everything(true);
- int64 nigori_handle =
- entry_factory_->CreateUnappliedNewItem(kNigoriTag,
- server_specifics,
- true);
-
- // Add the new keys to the cryptogrpaher
- cryptographer->AddKey(new_params);
- EXPECT_TRUE(cryptographer->is_ready());
-
- // Set up a local nigori with the superset of keys.
- sync_pb::EntitySpecifics local_specifics;
- sync_pb::NigoriSpecifics* local_nigori = local_specifics.mutable_nigori();
- cryptographer->GetKeys(local_nigori->mutable_encryption_keybag());
- local_nigori->set_encrypt_everything(false);
- ASSERT_TRUE(entry_factory_->SetLocalSpecificsForItem(
- nigori_handle, local_specifics));
- // Apply the update locally so that UpdateFromEncryptedTypes knows what state
- // to use.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- cryptographer = directory()->GetCryptographer(&trans);
- directory()->GetNigoriHandler()->ApplyNigoriUpdate(
- *local_nigori,
- &trans);
- }
-
- EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
- EXPECT_TRUE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
- ApplyControlDataUpdates(directory());
- EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
- EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
-
- EXPECT_TRUE(cryptographer->is_ready());
- EXPECT_TRUE(cryptographer->CanDecryptUsingDefaultKey(
- entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().encryption_keybag()));
- EXPECT_FALSE(entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().keybag_is_frozen());
- EXPECT_TRUE(entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().encrypt_everything());
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- EXPECT_TRUE(directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)
- .Equals(ModelTypeSet::All()));
- }
-}
-
-// If both nigoris are migrated, but we also set a custom passphrase locally,
-// the local nigori should be preserved.
-TEST_F(ApplyControlDataUpdatesTest,
- NigoriConflictBothMigratedLocalCustom) {
- Cryptographer* cryptographer;
- ModelTypeSet encrypted_types(SyncEncryptionHandler::SensitiveTypes());
- KeyParams old_params = {"localhost", "dummy", "old"};
- KeyParams new_params = {"localhost", "dummy", "new"};
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- cryptographer = directory()->GetCryptographer(&trans);
- EXPECT_TRUE(encrypted_types.Equals(
- directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)));
- }
-
- // Set up the cryptographer with new keys
- Cryptographer other_cryptographer(cryptographer->encryptor());
- other_cryptographer.AddKey(old_params);
-
- // Create server specifics with a migrated keystore passphrase type.
- sync_pb::EntitySpecifics server_specifics;
- sync_pb::NigoriSpecifics* server_nigori = server_specifics.mutable_nigori();
- other_cryptographer.GetKeys(server_nigori->mutable_encryption_keybag());
- server_nigori->set_encrypt_everything(false);
- server_nigori->set_keybag_is_frozen(true);
- server_nigori->set_passphrase_type(
- sync_pb::NigoriSpecifics::KEYSTORE_PASSPHRASE);
- server_nigori->mutable_keystore_decryptor_token();
- int64 nigori_handle =
- entry_factory_->CreateUnappliedNewItem(kNigoriTag,
- server_specifics,
- true);
-
- // Add the new keys to the cryptographer.
- cryptographer->AddKey(old_params);
- cryptographer->AddKey(new_params);
- EXPECT_TRUE(cryptographer->is_ready());
-
- // Set up a local nigori with a migrated custom passphrase type
- sync_pb::EntitySpecifics local_specifics;
- sync_pb::NigoriSpecifics* local_nigori = local_specifics.mutable_nigori();
- cryptographer->GetKeys(local_nigori->mutable_encryption_keybag());
- local_nigori->set_encrypt_everything(true);
- local_nigori->set_keybag_is_frozen(true);
- local_nigori->set_passphrase_type(
- sync_pb::NigoriSpecifics::CUSTOM_PASSPHRASE);
- ASSERT_TRUE(entry_factory_->SetLocalSpecificsForItem(
- nigori_handle, local_specifics));
- // Apply the update locally so that UpdateFromEncryptedTypes knows what state
- // to use.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- cryptographer = directory()->GetCryptographer(&trans);
- directory()->GetNigoriHandler()->ApplyNigoriUpdate(
- *local_nigori,
- &trans);
- }
-
- EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
- EXPECT_TRUE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
- ApplyControlDataUpdates(directory());
- EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
- EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
-
- EXPECT_TRUE(cryptographer->is_ready());
- EXPECT_TRUE(cryptographer->CanDecryptUsingDefaultKey(
- entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().encryption_keybag()));
- EXPECT_TRUE(entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().keybag_is_frozen());
- EXPECT_TRUE(entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().encrypt_everything());
- EXPECT_EQ(sync_pb::NigoriSpecifics::CUSTOM_PASSPHRASE,
- entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().passphrase_type());
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- EXPECT_TRUE(directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)
- .Equals(ModelTypeSet::All()));
- }
-}
-
-// If both nigoris are migrated, but a custom passphrase with a new key was
-// set remotely, the remote nigori should be preserved.
-TEST_F(ApplyControlDataUpdatesTest,
- NigoriConflictBothMigratedServerCustom) {
- Cryptographer* cryptographer;
- ModelTypeSet encrypted_types(SyncEncryptionHandler::SensitiveTypes());
- KeyParams old_params = {"localhost", "dummy", "old"};
- KeyParams new_params = {"localhost", "dummy", "new"};
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- cryptographer = directory()->GetCryptographer(&trans);
- EXPECT_TRUE(encrypted_types.Equals(
- directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)));
- }
-
- // Set up the cryptographer with both new keys and old keys.
- Cryptographer other_cryptographer(cryptographer->encryptor());
- other_cryptographer.AddKey(old_params);
- other_cryptographer.AddKey(new_params);
-
- // Create server specifics with a migrated custom passphrase type.
- sync_pb::EntitySpecifics server_specifics;
- sync_pb::NigoriSpecifics* server_nigori = server_specifics.mutable_nigori();
- other_cryptographer.GetKeys(server_nigori->mutable_encryption_keybag());
- server_nigori->set_encrypt_everything(true);
- server_nigori->set_keybag_is_frozen(true);
- server_nigori->set_passphrase_type(
- sync_pb::NigoriSpecifics::CUSTOM_PASSPHRASE);
- int64 nigori_handle =
- entry_factory_->CreateUnappliedNewItem(kNigoriTag,
- server_specifics,
- true);
-
- // Add the old keys to the cryptographer.
- cryptographer->AddKey(old_params);
- EXPECT_TRUE(cryptographer->is_ready());
-
- // Set up a local nigori with a migrated keystore passphrase type
- sync_pb::EntitySpecifics local_specifics;
- sync_pb::NigoriSpecifics* local_nigori = local_specifics.mutable_nigori();
- cryptographer->GetKeys(local_nigori->mutable_encryption_keybag());
- local_nigori->set_encrypt_everything(false);
- local_nigori->set_keybag_is_frozen(true);
- local_nigori->set_passphrase_type(
- sync_pb::NigoriSpecifics::KEYSTORE_PASSPHRASE);
- server_nigori->mutable_keystore_decryptor_token();
- ASSERT_TRUE(entry_factory_->SetLocalSpecificsForItem(
- nigori_handle, local_specifics));
- // Apply the update locally so that UpdateFromEncryptedTypes knows what state
- // to use.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- cryptographer = directory()->GetCryptographer(&trans);
- directory()->GetNigoriHandler()->ApplyNigoriUpdate(
- *local_nigori,
- &trans);
- }
-
- EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
- EXPECT_TRUE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
- ApplyControlDataUpdates(directory());
- EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
- EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
-
- EXPECT_TRUE(cryptographer->is_initialized());
- EXPECT_TRUE(cryptographer->has_pending_keys());
- EXPECT_TRUE(other_cryptographer.CanDecryptUsingDefaultKey(
- entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().encryption_keybag()));
- EXPECT_TRUE(entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().keybag_is_frozen());
- EXPECT_TRUE(entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().encrypt_everything());
- EXPECT_EQ(sync_pb::NigoriSpecifics::CUSTOM_PASSPHRASE,
- entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().passphrase_type());
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- EXPECT_TRUE(directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)
- .Equals(ModelTypeSet::All()));
- }
-}
-
-// If the local nigori is migrated but the server is not, preserve the local
-// nigori.
-TEST_F(ApplyControlDataUpdatesTest,
- NigoriConflictLocalMigrated) {
- Cryptographer* cryptographer;
- ModelTypeSet encrypted_types(SyncEncryptionHandler::SensitiveTypes());
- KeyParams old_params = {"localhost", "dummy", "old"};
- KeyParams new_params = {"localhost", "dummy", "new"};
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- cryptographer = directory()->GetCryptographer(&trans);
- EXPECT_TRUE(encrypted_types.Equals(
- directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)));
- }
-
- // Set up the cryptographer with both new keys and old keys.
- Cryptographer other_cryptographer(cryptographer->encryptor());
- other_cryptographer.AddKey(old_params);
-
- // Create server specifics with an unmigrated implicit passphrase type.
- sync_pb::EntitySpecifics server_specifics;
- sync_pb::NigoriSpecifics* server_nigori = server_specifics.mutable_nigori();
- other_cryptographer.GetKeys(server_nigori->mutable_encryption_keybag());
- server_nigori->set_encrypt_everything(true);
- server_nigori->set_keybag_is_frozen(false);
- int64 nigori_handle =
- entry_factory_->CreateUnappliedNewItem(kNigoriTag,
- server_specifics,
- true);
-
- // Add the old keys to the cryptographer.
- cryptographer->AddKey(old_params);
- cryptographer->AddKey(new_params);
- EXPECT_TRUE(cryptographer->is_ready());
-
- // Set up a local nigori with a migrated custom passphrase type
- sync_pb::EntitySpecifics local_specifics;
- sync_pb::NigoriSpecifics* local_nigori = local_specifics.mutable_nigori();
- cryptographer->GetKeys(local_nigori->mutable_encryption_keybag());
- local_nigori->set_encrypt_everything(true);
- local_nigori->set_keybag_is_frozen(true);
- local_nigori->set_passphrase_type(
- sync_pb::NigoriSpecifics::CUSTOM_PASSPHRASE);
- ASSERT_TRUE(entry_factory_->SetLocalSpecificsForItem(
- nigori_handle, local_specifics));
- // Apply the update locally so that UpdateFromEncryptedTypes knows what state
- // to use.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- cryptographer = directory()->GetCryptographer(&trans);
- directory()->GetNigoriHandler()->ApplyNigoriUpdate(
- *local_nigori,
- &trans);
- }
-
- EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
- EXPECT_TRUE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
- ApplyControlDataUpdates(directory());
- EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
- EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
-
- EXPECT_TRUE(cryptographer->is_ready());
- EXPECT_TRUE(cryptographer->CanDecryptUsingDefaultKey(
- entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().encryption_keybag()));
- EXPECT_TRUE(entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().keybag_is_frozen());
- EXPECT_TRUE(entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().encrypt_everything());
- EXPECT_EQ(sync_pb::NigoriSpecifics::CUSTOM_PASSPHRASE,
- entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().passphrase_type());
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- EXPECT_TRUE(directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)
- .Equals(ModelTypeSet::All()));
- }
-}
-
-// If the server nigori is migrated but the local is not, preserve the server
-// nigori.
-TEST_F(ApplyControlDataUpdatesTest,
- NigoriConflictServerMigrated) {
- Cryptographer* cryptographer;
- ModelTypeSet encrypted_types(SyncEncryptionHandler::SensitiveTypes());
- KeyParams old_params = {"localhost", "dummy", "old"};
- KeyParams new_params = {"localhost", "dummy", "new"};
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- cryptographer = directory()->GetCryptographer(&trans);
- EXPECT_TRUE(encrypted_types.Equals(
- directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)));
- }
-
- // Set up the cryptographer with both new keys and old keys.
- Cryptographer other_cryptographer(cryptographer->encryptor());
- other_cryptographer.AddKey(old_params);
-
- // Create server specifics with an migrated keystore passphrase type.
- sync_pb::EntitySpecifics server_specifics;
- sync_pb::NigoriSpecifics* server_nigori = server_specifics.mutable_nigori();
- other_cryptographer.GetKeys(server_nigori->mutable_encryption_keybag());
- server_nigori->set_encrypt_everything(false);
- server_nigori->set_keybag_is_frozen(true);
- server_nigori->set_passphrase_type(
- sync_pb::NigoriSpecifics::KEYSTORE_PASSPHRASE);
- server_nigori->mutable_keystore_decryptor_token();
- int64 nigori_handle =
- entry_factory_->CreateUnappliedNewItem(kNigoriTag,
- server_specifics,
- true);
-
- // Add the old keys to the cryptographer.
- cryptographer->AddKey(old_params);
- cryptographer->AddKey(new_params);
- EXPECT_TRUE(cryptographer->is_ready());
-
- // Set up a local nigori with a migrated custom passphrase type
- sync_pb::EntitySpecifics local_specifics;
- sync_pb::NigoriSpecifics* local_nigori = local_specifics.mutable_nigori();
- cryptographer->GetKeys(local_nigori->mutable_encryption_keybag());
- local_nigori->set_encrypt_everything(false);
- local_nigori->set_keybag_is_frozen(false);
- ASSERT_TRUE(entry_factory_->SetLocalSpecificsForItem(
- nigori_handle, local_specifics));
- // Apply the update locally so that UpdateFromEncryptedTypes knows what state
- // to use.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- cryptographer = directory()->GetCryptographer(&trans);
- directory()->GetNigoriHandler()->ApplyNigoriUpdate(
- *local_nigori,
- &trans);
- }
-
- EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
- EXPECT_TRUE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
- ApplyControlDataUpdates(directory());
- EXPECT_TRUE(entry_factory_->GetIsUnsyncedForItem(nigori_handle));
- EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(nigori_handle));
-
- EXPECT_TRUE(cryptographer->is_ready());
- // Note: we didn't overwrite the encryption keybag with the local keys. The
- // sync encryption handler will do that when it detects that the new
- // keybag is out of date (and update the keystore bootstrap if necessary).
- EXPECT_FALSE(cryptographer->CanDecryptUsingDefaultKey(
- entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().encryption_keybag()));
- EXPECT_TRUE(cryptographer->CanDecrypt(
- entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().encryption_keybag()));
- EXPECT_TRUE(entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().keybag_is_frozen());
- EXPECT_TRUE(entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().has_keystore_decryptor_token());
- EXPECT_EQ(sync_pb::NigoriSpecifics::KEYSTORE_PASSPHRASE,
- entry_factory_->GetLocalSpecificsForItem(nigori_handle).
- nigori().passphrase_type());
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- }
-}
-
-// Check that we can apply a simple control datatype node successfully.
-TEST_F(ApplyControlDataUpdatesTest, ControlApply) {
- std::string experiment_id = "experiment";
- sync_pb::EntitySpecifics specifics;
- specifics.mutable_experiments()->mutable_keystore_encryption()->
- set_enabled(true);
- int64 experiment_handle = entry_factory_->CreateUnappliedNewItem(
- experiment_id, specifics, false);
- ApplyControlDataUpdates(directory());
-
- EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(experiment_handle));
- EXPECT_TRUE(
- entry_factory_->GetLocalSpecificsForItem(experiment_handle).
- experiments().keystore_encryption().enabled());
-}
-
-// Verify that we apply top level folders before their children.
-TEST_F(ApplyControlDataUpdatesTest, ControlApplyParentBeforeChild) {
- std::string parent_id = "parent";
- std::string experiment_id = "experiment";
- sync_pb::EntitySpecifics specifics;
- specifics.mutable_experiments()->mutable_keystore_encryption()->
- set_enabled(true);
- int64 experiment_handle = entry_factory_->CreateUnappliedNewItemWithParent(
- experiment_id, specifics, parent_id);
- int64 parent_handle = entry_factory_->CreateUnappliedNewItem(
- parent_id, specifics, true);
- ApplyControlDataUpdates(directory());
-
- EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(parent_handle));
- EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(experiment_handle));
- EXPECT_TRUE(
- entry_factory_->GetLocalSpecificsForItem(experiment_handle).
- experiments().keystore_encryption().enabled());
-}
-
-// Verify that we handle control datatype conflicts by preserving the server
-// data.
-TEST_F(ApplyControlDataUpdatesTest, ControlConflict) {
- std::string experiment_id = "experiment";
- sync_pb::EntitySpecifics local_specifics, server_specifics;
- server_specifics.mutable_experiments()->mutable_keystore_encryption()->
- set_enabled(true);
- local_specifics.mutable_experiments()->mutable_keystore_encryption()->
- set_enabled(false);
- int64 experiment_handle = entry_factory_->CreateSyncedItem(
- experiment_id, EXPERIMENTS, false);
- entry_factory_->SetServerSpecificsForItem(experiment_handle,
- server_specifics);
- entry_factory_->SetLocalSpecificsForItem(experiment_handle,
- local_specifics);
- ApplyControlDataUpdates(directory());
-
- EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(experiment_handle));
- EXPECT_TRUE(
- entry_factory_->GetLocalSpecificsForItem(experiment_handle).
- experiments().keystore_encryption().enabled());
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/backoff_delay_provider.cc b/chromium/sync/engine/backoff_delay_provider.cc
deleted file mode 100644
index f8e2750876f..00000000000
--- a/chromium/sync/engine/backoff_delay_provider.cc
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/backoff_delay_provider.h"
-
-#include "base/rand_util.h"
-#include "sync/internal_api/public/engine/polling_constants.h"
-#include "sync/internal_api/public/sessions/model_neutral_state.h"
-#include "sync/internal_api/public/util/syncer_error.h"
-
-using base::TimeDelta;
-
-namespace syncer {
-
-// static
-BackoffDelayProvider* BackoffDelayProvider::FromDefaults() {
- return new BackoffDelayProvider(
- TimeDelta::FromSeconds(kInitialBackoffRetrySeconds),
- TimeDelta::FromSeconds(kInitialBackoffImmediateRetrySeconds));
-}
-
-// static
-BackoffDelayProvider* BackoffDelayProvider::WithShortInitialRetryOverride() {
- return new BackoffDelayProvider(
- TimeDelta::FromSeconds(kInitialBackoffShortRetrySeconds),
- TimeDelta::FromSeconds(kInitialBackoffImmediateRetrySeconds));
-}
-
-BackoffDelayProvider::BackoffDelayProvider(
- const base::TimeDelta& default_initial_backoff,
- const base::TimeDelta& short_initial_backoff)
- : default_initial_backoff_(default_initial_backoff),
- short_initial_backoff_(short_initial_backoff) {
-}
-
-BackoffDelayProvider::~BackoffDelayProvider() {}
-
-TimeDelta BackoffDelayProvider::GetDelay(const base::TimeDelta& last_delay) {
- if (last_delay.InSeconds() >= kMaxBackoffSeconds)
- return TimeDelta::FromSeconds(kMaxBackoffSeconds);
-
- // This calculates approx. base_delay_seconds * 2 +/- base_delay_seconds / 2
- int64 backoff_s =
- std::max(static_cast<int64>(1),
- last_delay.InSeconds() * kBackoffRandomizationFactor);
-
- // Flip a coin to randomize backoff interval by +/- 50%.
- int rand_sign = base::RandInt(0, 1) * 2 - 1;
-
- // Truncation is adequate for rounding here.
- backoff_s = backoff_s +
- (rand_sign * (last_delay.InSeconds() / kBackoffRandomizationFactor));
-
- // Cap the backoff interval.
- backoff_s = std::max(static_cast<int64>(1),
- std::min(backoff_s, kMaxBackoffSeconds));
-
- return TimeDelta::FromSeconds(backoff_s);
-}
-
-TimeDelta BackoffDelayProvider::GetInitialDelay(
- const sessions::ModelNeutralState& state) const {
- // NETWORK_CONNECTION_UNAVAILABLE implies we did not even manage to hit the
- // wire; the failure occurred locally. Note that if commit_result is *not*
- // UNSET, this implies download_updates_result succeeded. Also note that
- // last_get_key_result is coupled to last_download_updates_result in that
- // they are part of the same GetUpdates request, so we only check if
- // the download request is CONNECTION_UNAVAILABLE.
- //
- // TODO(tim): Should we treat NETWORK_IO_ERROR similarly? It's different
- // from CONNECTION_UNAVAILABLE in that a request may well have succeeded
- // in contacting the server (e.g we got a 200 back), but we failed
- // trying to parse the response (actual content length != HTTP response
- // header content length value). For now since we're considering
- // merging this code to branches and I haven't audited all the
- // NETWORK_IO_ERROR cases carefully, I'm going to target the fix
- // very tightly (see bug chromium-os:35073). DIRECTORY_LOOKUP_FAILED is
- // another example of something that shouldn't backoff, though the
- // scheduler should probably be handling these cases differently. See
- // the TODO(rlarocque) in ScheduleNextSync.
- if (state.commit_result == NETWORK_CONNECTION_UNAVAILABLE ||
- state.last_download_updates_result == NETWORK_CONNECTION_UNAVAILABLE) {
- return short_initial_backoff_;
- }
-
- if (SyncerErrorIsError(state.last_get_key_result))
- return default_initial_backoff_;
-
- // Note: If we received a MIGRATION_DONE on download updates, then commit
- // should not have taken place. Moreover, if we receive a MIGRATION_DONE
- // on commit, it means that download updates succeeded. Therefore, we only
- // need to check if either code is equal to SERVER_RETURN_MIGRATION_DONE,
- // and not if there were any more serious errors requiring the long retry.
- if (state.last_download_updates_result == SERVER_RETURN_MIGRATION_DONE ||
- state.commit_result == SERVER_RETURN_MIGRATION_DONE) {
- return short_initial_backoff_;
- }
-
- // When the server tells us we have a conflict, then we should download the
- // latest updates so we can see the conflict ourselves, resolve it locally,
- // then try again to commit. Running another sync cycle will do all these
- // things. There's no need to back off, we can do this immediately.
- //
- // TODO(sync): We shouldn't need to handle this in BackoffDelayProvider.
- // There should be a way to deal with protocol errors before we get to this
- // point.
- if (state.commit_result == SERVER_RETURN_CONFLICT) {
- return short_initial_backoff_;
- }
-
- return default_initial_backoff_;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/backoff_delay_provider.h b/chromium/sync/engine/backoff_delay_provider.h
deleted file mode 100644
index 892e6e89c79..00000000000
--- a/chromium/sync/engine/backoff_delay_provider.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_BACKOFF_DELAY_PROVIDER_H_
-#define SYNC_ENGINE_BACKOFF_DELAY_PROVIDER_H_
-
-#include "base/time/time.h"
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-
-namespace sessions {
-struct ModelNeutralState;
-}
-
-// A component used to get time delays associated with exponential backoff.
-class SYNC_EXPORT_PRIVATE BackoffDelayProvider {
- public:
- // Factory function to create a standard BackoffDelayProvider.
- static BackoffDelayProvider* FromDefaults();
-
- // Similar to above, but causes sync to retry very quickly (see
- // polling_constants.h) when it encounters an error before exponential
- // backoff.
- //
- // *** NOTE *** This should only be used if kSyncShortInitialRetryOverride
- // was passed to command line.
- static BackoffDelayProvider* WithShortInitialRetryOverride();
-
- virtual ~BackoffDelayProvider();
-
- // DDOS avoidance function. Calculates how long we should wait before trying
- // again after a failed sync attempt, where the last delay was |base_delay|.
- // TODO(tim): Look at URLRequestThrottlerEntryInterface.
- virtual base::TimeDelta GetDelay(const base::TimeDelta& last_delay);
-
- // Helper to calculate the initial value for exponential backoff.
- // See possible values and comments in polling_constants.h.
- virtual base::TimeDelta GetInitialDelay(
- const sessions::ModelNeutralState& state) const;
-
- protected:
- BackoffDelayProvider(const base::TimeDelta& default_initial_backoff,
- const base::TimeDelta& short_initial_backoff);
-
- private:
- const base::TimeDelta default_initial_backoff_;
- const base::TimeDelta short_initial_backoff_;
-
- DISALLOW_COPY_AND_ASSIGN(BackoffDelayProvider);
-};
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_BACKOFF_DELAY_PROVIDER_H_
diff --git a/chromium/sync/engine/backoff_delay_provider_unittest.cc b/chromium/sync/engine/backoff_delay_provider_unittest.cc
deleted file mode 100644
index 1bf17cbd5f5..00000000000
--- a/chromium/sync/engine/backoff_delay_provider_unittest.cc
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/backoff_delay_provider.h"
-
-#include "base/memory/scoped_ptr.h"
-#include "base/time/time.h"
-#include "sync/internal_api/public/engine/polling_constants.h"
-#include "sync/internal_api/public/sessions/model_neutral_state.h"
-#include "sync/internal_api/public/util/syncer_error.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using base::TimeDelta;
-
-namespace syncer {
-
-class BackoffDelayProviderTest : public testing::Test {};
-
-TEST_F(BackoffDelayProviderTest, GetRecommendedDelay) {
- scoped_ptr<BackoffDelayProvider> delay(BackoffDelayProvider::FromDefaults());
- EXPECT_EQ(TimeDelta::FromSeconds(1),
- delay->GetDelay(TimeDelta::FromSeconds(0)));
- EXPECT_LE(TimeDelta::FromSeconds(1),
- delay->GetDelay(TimeDelta::FromSeconds(1)));
- EXPECT_LE(TimeDelta::FromSeconds(50),
- delay->GetDelay(TimeDelta::FromSeconds(50)));
- EXPECT_LE(TimeDelta::FromSeconds(10),
- delay->GetDelay(TimeDelta::FromSeconds(10)));
- EXPECT_EQ(TimeDelta::FromSeconds(kMaxBackoffSeconds),
- delay->GetDelay(TimeDelta::FromSeconds(kMaxBackoffSeconds)));
- EXPECT_EQ(TimeDelta::FromSeconds(kMaxBackoffSeconds),
- delay->GetDelay(TimeDelta::FromSeconds(kMaxBackoffSeconds + 1)));
-}
-
-TEST_F(BackoffDelayProviderTest, GetInitialDelay) {
- scoped_ptr<BackoffDelayProvider> delay(BackoffDelayProvider::FromDefaults());
- sessions::ModelNeutralState state;
- state.last_get_key_result = SYNC_SERVER_ERROR;
- EXPECT_EQ(kInitialBackoffRetrySeconds,
- delay->GetInitialDelay(state).InSeconds());
-
- state.last_get_key_result = UNSET;
- state.last_download_updates_result = SERVER_RETURN_MIGRATION_DONE;
- EXPECT_EQ(kInitialBackoffImmediateRetrySeconds,
- delay->GetInitialDelay(state).InSeconds());
-
- state.last_download_updates_result = NETWORK_CONNECTION_UNAVAILABLE;
- EXPECT_EQ(kInitialBackoffImmediateRetrySeconds,
- delay->GetInitialDelay(state).InSeconds());
-
- state.last_download_updates_result = SERVER_RETURN_TRANSIENT_ERROR;
- EXPECT_EQ(kInitialBackoffRetrySeconds,
- delay->GetInitialDelay(state).InSeconds());
-
- state.last_download_updates_result = SERVER_RESPONSE_VALIDATION_FAILED;
- EXPECT_EQ(kInitialBackoffRetrySeconds,
- delay->GetInitialDelay(state).InSeconds());
-
- state.last_download_updates_result = SYNCER_OK;
- // Note that updating credentials triggers a canary job, trumping
- // the initial delay, but in theory we still expect this function to treat
- // it like any other error in the system (except migration).
- state.commit_result = SERVER_RETURN_INVALID_CREDENTIAL;
- EXPECT_EQ(kInitialBackoffRetrySeconds,
- delay->GetInitialDelay(state).InSeconds());
-
- state.commit_result = SERVER_RETURN_MIGRATION_DONE;
- EXPECT_EQ(kInitialBackoffImmediateRetrySeconds,
- delay->GetInitialDelay(state).InSeconds());
-
- state.commit_result = NETWORK_CONNECTION_UNAVAILABLE;
- EXPECT_EQ(kInitialBackoffImmediateRetrySeconds,
- delay->GetInitialDelay(state).InSeconds());
-
- state.commit_result = SERVER_RETURN_CONFLICT;
- EXPECT_EQ(kInitialBackoffImmediateRetrySeconds,
- delay->GetInitialDelay(state).InSeconds());
-}
-
-TEST_F(BackoffDelayProviderTest, GetInitialDelayWithOverride) {
- scoped_ptr<BackoffDelayProvider> delay(
- BackoffDelayProvider::WithShortInitialRetryOverride());
- sessions::ModelNeutralState state;
- state.last_get_key_result = SYNC_SERVER_ERROR;
- EXPECT_EQ(kInitialBackoffShortRetrySeconds,
- delay->GetInitialDelay(state).InSeconds());
-
- state.last_get_key_result = UNSET;
- state.last_download_updates_result = SERVER_RETURN_MIGRATION_DONE;
- EXPECT_EQ(kInitialBackoffImmediateRetrySeconds,
- delay->GetInitialDelay(state).InSeconds());
-
- state.last_download_updates_result = SERVER_RETURN_TRANSIENT_ERROR;
- EXPECT_EQ(kInitialBackoffShortRetrySeconds,
- delay->GetInitialDelay(state).InSeconds());
-
- state.last_download_updates_result = SERVER_RESPONSE_VALIDATION_FAILED;
- EXPECT_EQ(kInitialBackoffShortRetrySeconds,
- delay->GetInitialDelay(state).InSeconds());
-
- state.last_download_updates_result = SYNCER_OK;
- // Note that updating credentials triggers a canary job, trumping
- // the initial delay, but in theory we still expect this function to treat
- // it like any other error in the system (except migration).
- state.commit_result = SERVER_RETURN_INVALID_CREDENTIAL;
- EXPECT_EQ(kInitialBackoffShortRetrySeconds,
- delay->GetInitialDelay(state).InSeconds());
-
- state.commit_result = SERVER_RETURN_MIGRATION_DONE;
- EXPECT_EQ(kInitialBackoffImmediateRetrySeconds,
- delay->GetInitialDelay(state).InSeconds());
-
- state.commit_result = SERVER_RETURN_CONFLICT;
- EXPECT_EQ(kInitialBackoffImmediateRetrySeconds,
- delay->GetInitialDelay(state).InSeconds());
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/commit.cc b/chromium/sync/engine/commit.cc
deleted file mode 100644
index a8db1a41ee3..00000000000
--- a/chromium/sync/engine/commit.cc
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/commit.h"
-
-#include "base/debug/trace_event.h"
-#include "sync/engine/commit_util.h"
-#include "sync/engine/sync_directory_commit_contribution.h"
-#include "sync/engine/syncer.h"
-#include "sync/engine/syncer_proto_util.h"
-#include "sync/sessions/sync_session.h"
-
-namespace syncer {
-
-Commit::Commit(
- const std::map<ModelType, SyncDirectoryCommitContribution*>& contributions,
- const sync_pb::ClientToServerMessage& message,
- ExtensionsActivity::Records extensions_activity_buffer)
- : contributions_(contributions),
- deleter_(&contributions_),
- message_(message),
- extensions_activity_buffer_(extensions_activity_buffer),
- cleaned_up_(false) {
-}
-
-Commit::~Commit() {
- DCHECK(cleaned_up_);
-}
-
-Commit* Commit::Init(
- ModelTypeSet requested_types,
- size_t max_entries,
- const std::string& account_name,
- const std::string& cache_guid,
- CommitContributorMap* contributor_map,
- ExtensionsActivity* extensions_activity) {
- // Gather per-type contributions.
- ContributionMap contributions;
- size_t num_entries = 0;
- for (ModelTypeSet::Iterator it = requested_types.First();
- it.Good(); it.Inc()) {
- CommitContributorMap::iterator cm_it = contributor_map->find(it.Get());
- if (cm_it == contributor_map->end()) {
- NOTREACHED()
- << "Could not find requested type " << ModelTypeToString(it.Get())
- << " in contributor map.";
- continue;
- }
- size_t spaces_remaining = max_entries - num_entries;
- SyncDirectoryCommitContribution* contribution =
- cm_it->second->GetContribution(spaces_remaining);
- if (contribution) {
- num_entries += contribution->GetNumEntries();
- contributions.insert(std::make_pair(it.Get(), contribution));
- }
- if (num_entries == max_entries) {
- break; // No point in continuting to iterate in this case.
- }
- }
-
- // Give up if no one had anything to commit.
- if (contributions.empty())
- return NULL;
-
- sync_pb::ClientToServerMessage message;
- message.set_message_contents(sync_pb::ClientToServerMessage::COMMIT);
- message.set_share(account_name);
-
- sync_pb::CommitMessage* commit_message = message.mutable_commit();
- commit_message->set_cache_guid(cache_guid);
-
- // Set extensions activity if bookmark commits are present.
- ExtensionsActivity::Records extensions_activity_buffer;
- ContributionMap::iterator it = contributions.find(syncer::BOOKMARKS);
- if (it != contributions.end() && it->second->GetNumEntries() != 0) {
- commit_util::AddExtensionsActivityToMessage(
- extensions_activity,
- &extensions_activity_buffer,
- commit_message);
- }
-
- // Set the client config params.
- ModelTypeSet enabled_types;
- for (CommitContributorMap::iterator it = contributor_map->begin();
- it != contributor_map->end(); ++it) {
- enabled_types.Put(it->first);
- }
- commit_util::AddClientConfigParamsToMessage(enabled_types,
- commit_message);
-
- // Finally, serialize all our contributions.
- for (std::map<ModelType, SyncDirectoryCommitContribution*>::iterator it =
- contributions.begin(); it != contributions.end(); ++it) {
- it->second->AddToCommitMessage(&message);
- }
-
- // If we made it this far, then we've successfully prepared a commit message.
- return new Commit(contributions, message, extensions_activity_buffer);
-}
-
-SyncerError Commit::PostAndProcessResponse(
- sessions::SyncSession* session,
- sessions::StatusController* status,
- ExtensionsActivity* extensions_activity) {
- ModelTypeSet request_types;
- for (ContributionMap::const_iterator it = contributions_.begin();
- it != contributions_.end(); ++it) {
- request_types.Put(it->first);
- }
- session->mutable_status_controller()->set_commit_request_types(request_types);
-
- if (session->context()->debug_info_getter()) {
- sync_pb::DebugInfo* debug_info = message_.mutable_debug_info();
- session->context()->debug_info_getter()->GetDebugInfo(debug_info);
- }
-
- DVLOG(1) << "Sending commit message.";
- TRACE_EVENT_BEGIN0("sync", "PostCommit");
- const SyncerError post_result = SyncerProtoUtil::PostClientToServerMessage(
- &message_, &response_, session);
- TRACE_EVENT_END0("sync", "PostCommit");
-
- if (post_result != SYNCER_OK) {
- LOG(WARNING) << "Post commit failed";
- return post_result;
- }
-
- if (!response_.has_commit()) {
- LOG(WARNING) << "Commit response has no commit body!";
- return SERVER_RESPONSE_VALIDATION_FAILED;
- }
-
- size_t message_entries = message_.commit().entries_size();
- size_t response_entries = response_.commit().entryresponse_size();
- if (message_entries != response_entries) {
- LOG(ERROR)
- << "Commit response has wrong number of entries! "
- << "Expected: " << message_entries << ", "
- << "Got: " << response_entries;
- return SERVER_RESPONSE_VALIDATION_FAILED;
- }
-
- if (session->context()->debug_info_getter()) {
- // Clear debug info now that we have successfully sent it to the server.
- DVLOG(1) << "Clearing client debug info.";
- session->context()->debug_info_getter()->ClearDebugInfo();
- }
-
- // Let the contributors process the responses to each of their requests.
- SyncerError processing_result = SYNCER_OK;
- for (std::map<ModelType, SyncDirectoryCommitContribution*>::iterator it =
- contributions_.begin(); it != contributions_.end(); ++it) {
- TRACE_EVENT1("sync", "ProcessCommitResponse",
- "type", ModelTypeToString(it->first));
- SyncerError type_result =
- it->second->ProcessCommitResponse(response_, status);
- if (processing_result == SYNCER_OK && type_result != SYNCER_OK) {
- processing_result = type_result;
- }
- }
-
- // Handle bookmarks' special extensions activity stats.
- if (session->status_controller().
- model_neutral_state().num_successful_bookmark_commits == 0) {
- extensions_activity->PutRecords(extensions_activity_buffer_);
- }
-
- return processing_result;
-}
-
-void Commit::CleanUp() {
- for (ContributionMap::iterator it = contributions_.begin();
- it != contributions_.end(); ++it) {
- it->second->CleanUp();
- }
- cleaned_up_ = true;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/commit.h b/chromium/sync/engine/commit.h
deleted file mode 100644
index 4750971bc7c..00000000000
--- a/chromium/sync/engine/commit.h
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_COMMIT_H_
-#define SYNC_ENGINE_COMMIT_H_
-
-#include <map>
-
-#include "base/stl_util.h"
-#include "sync/base/sync_export.h"
-#include "sync/engine/sync_directory_commit_contribution.h"
-#include "sync/engine/sync_directory_commit_contributor.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/engine/model_safe_worker.h"
-#include "sync/internal_api/public/util/syncer_error.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/util/extensions_activity.h"
-
-namespace syncer {
-
-namespace sessions {
-class StatusController;
-class SyncSession;
-}
-
-class Syncer;
-
-// This class wraps the actions related to building and executing a single
-// commit operation.
-//
-// This class' most important responsibility is to manage the ContributionsMap.
-// This class serves as a container for those objects. Although it would have
-// been acceptable to let this class be a dumb container object, it turns out
-// that there was no other convenient place to put the Init() and
-// PostAndProcessCommitResponse() functions. So they ended up here.
-class SYNC_EXPORT_PRIVATE Commit {
- public:
- Commit(
- const std::map<ModelType, SyncDirectoryCommitContribution*>&
- contributions,
- const sync_pb::ClientToServerMessage& message,
- ExtensionsActivity::Records extensions_activity_buffer);
-
- // This destructor will DCHECK if CleanUp() has not been called.
- ~Commit();
-
- static Commit* Init(
- ModelTypeSet requested_types,
- size_t max_entries,
- const std::string& account_name,
- const std::string& cache_guid,
- CommitContributorMap* contributor_map,
- ExtensionsActivity* extensions_activity);
-
- SyncerError PostAndProcessResponse(
- sessions::SyncSession* session,
- sessions::StatusController* status,
- ExtensionsActivity* extensions_activity);
-
- // Cleans up state associated with this commit. Must be called before the
- // destructor.
- void CleanUp();
-
- private:
- typedef std::map<ModelType, SyncDirectoryCommitContribution*> ContributionMap;
-
- ContributionMap contributions_;
- STLValueDeleter<ContributionMap> deleter_;
-
- sync_pb::ClientToServerMessage message_;
- sync_pb::ClientToServerResponse response_;
- ExtensionsActivity::Records extensions_activity_buffer_;
-
- // Debug only flag used to indicate if it's safe to destruct the object.
- bool cleaned_up_;
-};
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_COMMIT_H_
diff --git a/chromium/sync/engine/commit_util.cc b/chromium/sync/engine/commit_util.cc
deleted file mode 100644
index 1081446b7a8..00000000000
--- a/chromium/sync/engine/commit_util.cc
+++ /dev/null
@@ -1,440 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/commit_util.h"
-
-#include <limits>
-#include <set>
-#include <string>
-#include <vector>
-
-#include "base/strings/string_util.h"
-#include "sync/engine/syncer_proto_util.h"
-#include "sync/internal_api/public/base/unique_position.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/sessions/sync_session.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/model_neutral_mutable_entry.h"
-#include "sync/syncable/syncable_base_transaction.h"
-#include "sync/syncable/syncable_base_write_transaction.h"
-#include "sync/syncable/syncable_changes_version.h"
-#include "sync/syncable/syncable_proto_util.h"
-#include "sync/syncable/syncable_util.h"
-#include "sync/util/time.h"
-
-using std::set;
-using std::string;
-using std::vector;
-
-namespace syncer {
-
-using sessions::SyncSession;
-using syncable::Entry;
-using syncable::IS_DEL;
-using syncable::IS_UNAPPLIED_UPDATE;
-using syncable::IS_UNSYNCED;
-using syncable::Id;
-using syncable::SPECIFICS;
-using syncable::UNIQUE_POSITION;
-
-namespace commit_util {
-
-void AddExtensionsActivityToMessage(
- ExtensionsActivity* activity,
- ExtensionsActivity::Records* extensions_activity_buffer,
- sync_pb::CommitMessage* message) {
- // This isn't perfect, since the set of extensions activity may not correlate
- // exactly with the items being committed. That's OK as long as we're looking
- // for a rough estimate of extensions activity, not an precise mapping of
- // which commits were triggered by which extension.
- //
- // We will push this list of extensions activity back into the
- // ExtensionsActivityMonitor if this commit fails. That's why we must keep a
- // copy of these records in the session.
- activity->GetAndClearRecords(extensions_activity_buffer);
-
- const ExtensionsActivity::Records& records = *extensions_activity_buffer;
- for (ExtensionsActivity::Records::const_iterator it =
- records.begin();
- it != records.end(); ++it) {
- sync_pb::ChromiumExtensionsActivity* activity_message =
- message->add_extensions_activity();
- activity_message->set_extension_id(it->second.extension_id);
- activity_message->set_bookmark_writes_since_last_commit(
- it->second.bookmark_write_count);
- }
-}
-
-void AddClientConfigParamsToMessage(
- ModelTypeSet enabled_types,
- sync_pb::CommitMessage* message) {
- sync_pb::ClientConfigParams* config_params = message->mutable_config_params();
- for (ModelTypeSet::Iterator it = enabled_types.First(); it.Good(); it.Inc()) {
- if (ProxyTypes().Has(it.Get()))
- continue;
- int field_number = GetSpecificsFieldNumberFromModelType(it.Get());
- config_params->mutable_enabled_type_ids()->Add(field_number);
- }
- config_params->set_tabs_datatype_enabled(
- enabled_types.Has(syncer::PROXY_TABS));
-}
-
-namespace {
-void SetEntrySpecifics(const Entry& meta_entry,
- sync_pb::SyncEntity* sync_entry) {
- // Add the new style extension and the folder bit.
- sync_entry->mutable_specifics()->CopyFrom(meta_entry.GetSpecifics());
- sync_entry->set_folder(meta_entry.GetIsDir());
-
- CHECK(!sync_entry->specifics().password().has_client_only_encrypted_data());
- DCHECK_EQ(meta_entry.GetModelType(), GetModelType(*sync_entry));
-}
-} // namespace
-
-void BuildCommitItem(
- const syncable::Entry& meta_entry,
- sync_pb::SyncEntity* sync_entry) {
- syncable::Id id = meta_entry.GetId();
- sync_entry->set_id_string(SyncableIdToProto(id));
-
- string name = meta_entry.GetNonUniqueName();
- CHECK(!name.empty()); // Make sure this isn't an update.
- // Note: Truncation is also performed in WriteNode::SetTitle(..). But this
- // call is still necessary to handle any title changes that might originate
- // elsewhere, or already be persisted in the directory.
- base::TruncateUTF8ToByteSize(name, 255, &name);
- sync_entry->set_name(name);
-
- // Set the non_unique_name. If we do, the server ignores
- // the |name| value (using |non_unique_name| instead), and will return
- // in the CommitResponse a unique name if one is generated.
- // We send both because it may aid in logging.
- sync_entry->set_non_unique_name(name);
-
- if (!meta_entry.GetUniqueClientTag().empty()) {
- sync_entry->set_client_defined_unique_tag(
- meta_entry.GetUniqueClientTag());
- }
-
- // Deleted items with server-unknown parent ids can be a problem so we set
- // the parent to 0. (TODO(sync): Still true in protocol?).
- Id new_parent_id;
- if (meta_entry.GetIsDel() &&
- !meta_entry.GetParentId().ServerKnows()) {
- new_parent_id = syncable::BaseTransaction::root_id();
- } else {
- new_parent_id = meta_entry.GetParentId();
- }
- sync_entry->set_parent_id_string(SyncableIdToProto(new_parent_id));
-
- // If our parent has changed, send up the old one so the server
- // can correctly deal with multiple parents.
- // TODO(nick): With the server keeping track of the primary sync parent,
- // it should not be necessary to provide the old_parent_id: the version
- // number should suffice.
- if (new_parent_id != meta_entry.GetServerParentId() &&
- 0 != meta_entry.GetBaseVersion() &&
- syncable::CHANGES_VERSION != meta_entry.GetBaseVersion()) {
- sync_entry->set_old_parent_id(
- SyncableIdToProto(meta_entry.GetServerParentId()));
- }
-
- int64 version = meta_entry.GetBaseVersion();
- if (syncable::CHANGES_VERSION == version || 0 == version) {
- // Undeletions are only supported for items that have a client tag.
- DCHECK(!id.ServerKnows() ||
- !meta_entry.GetUniqueClientTag().empty())
- << meta_entry;
-
- // Version 0 means to create or undelete an object.
- sync_entry->set_version(0);
- } else {
- DCHECK(id.ServerKnows()) << meta_entry;
- sync_entry->set_version(meta_entry.GetBaseVersion());
- }
- sync_entry->set_ctime(TimeToProtoTime(meta_entry.GetCtime()));
- sync_entry->set_mtime(TimeToProtoTime(meta_entry.GetMtime()));
-
- // Deletion is final on the server, let's move things and then delete them.
- if (meta_entry.GetIsDel()) {
- sync_entry->set_deleted(true);
- } else {
- if (meta_entry.GetSpecifics().has_bookmark()) {
- // Both insert_after_item_id and position_in_parent fields are set only
- // for legacy reasons. See comments in sync.proto for more information.
- const Id& prev_id = meta_entry.GetPredecessorId();
- string prev_id_string =
- prev_id.IsRoot() ? string() : prev_id.GetServerId();
- sync_entry->set_insert_after_item_id(prev_id_string);
- sync_entry->set_position_in_parent(
- meta_entry.GetUniquePosition().ToInt64());
- meta_entry.GetUniquePosition().ToProto(
- sync_entry->mutable_unique_position());
- }
- SetEntrySpecifics(meta_entry, sync_entry);
- }
-}
-
-
-// Helpers for ProcessSingleCommitResponse.
-namespace {
-
-void LogServerError(const sync_pb::CommitResponse_EntryResponse& res) {
- if (res.has_error_message())
- LOG(WARNING) << " " << res.error_message();
- else
- LOG(WARNING) << " No detailed error message returned from server";
-}
-
-const string& GetResultingPostCommitName(
- const sync_pb::SyncEntity& committed_entry,
- const sync_pb::CommitResponse_EntryResponse& entry_response) {
- const string& response_name =
- SyncerProtoUtil::NameFromCommitEntryResponse(entry_response);
- if (!response_name.empty())
- return response_name;
- return SyncerProtoUtil::NameFromSyncEntity(committed_entry);
-}
-
-bool UpdateVersionAfterCommit(
- const sync_pb::SyncEntity& committed_entry,
- const sync_pb::CommitResponse_EntryResponse& entry_response,
- const syncable::Id& pre_commit_id,
- syncable::ModelNeutralMutableEntry* local_entry) {
- int64 old_version = local_entry->GetBaseVersion();
- int64 new_version = entry_response.version();
- bool bad_commit_version = false;
- if (committed_entry.deleted() &&
- !local_entry->GetUniqueClientTag().empty()) {
- // If the item was deleted, and it's undeletable (uses the client tag),
- // change the version back to zero. We must set the version to zero so
- // that the server knows to re-create the item if it gets committed
- // later for undeletion.
- new_version = 0;
- } else if (!pre_commit_id.ServerKnows()) {
- bad_commit_version = 0 == new_version;
- } else {
- bad_commit_version = old_version > new_version;
- }
- if (bad_commit_version) {
- LOG(ERROR) << "Bad version in commit return for " << *local_entry
- << " new_id:" << SyncableIdFromProto(entry_response.id_string())
- << " new_version:" << entry_response.version();
- return false;
- }
-
- // Update the base version and server version. The base version must change
- // here, even if syncing_was_set is false; that's because local changes were
- // on top of the successfully committed version.
- local_entry->PutBaseVersion(new_version);
- DVLOG(1) << "Commit is changing base version of " << local_entry->GetId()
- << " to: " << new_version;
- local_entry->PutServerVersion(new_version);
- return true;
-}
-
-bool ChangeIdAfterCommit(
- const sync_pb::CommitResponse_EntryResponse& entry_response,
- const syncable::Id& pre_commit_id,
- syncable::ModelNeutralMutableEntry* local_entry) {
- syncable::BaseWriteTransaction* trans = local_entry->base_write_transaction();
- const syncable::Id& entry_response_id =
- SyncableIdFromProto(entry_response.id_string());
- if (entry_response_id != pre_commit_id) {
- if (pre_commit_id.ServerKnows()) {
- // The server can sometimes generate a new ID on commit; for example,
- // when committing an undeletion.
- DVLOG(1) << " ID changed while committing an old entry. "
- << pre_commit_id << " became " << entry_response_id << ".";
- }
- syncable::ModelNeutralMutableEntry same_id(
- trans,
- syncable::GET_BY_ID,
- entry_response_id);
- // We should trap this before this function.
- if (same_id.good()) {
- LOG(ERROR) << "ID clash with id " << entry_response_id
- << " during commit " << same_id;
- return false;
- }
- ChangeEntryIDAndUpdateChildren(trans, local_entry, entry_response_id);
- DVLOG(1) << "Changing ID to " << entry_response_id;
- }
- return true;
-}
-
-void UpdateServerFieldsAfterCommit(
- const sync_pb::SyncEntity& committed_entry,
- const sync_pb::CommitResponse_EntryResponse& entry_response,
- syncable::ModelNeutralMutableEntry* local_entry) {
-
- // We just committed an entry successfully, and now we want to make our view
- // of the server state consistent with the server state. We must be careful;
- // |entry_response| and |committed_entry| have some identically named
- // fields. We only want to consider fields from |committed_entry| when there
- // is not an overriding field in the |entry_response|. We do not want to
- // update the server data from the local data in the entry -- it's possible
- // that the local data changed during the commit, and even if not, the server
- // has the last word on the values of several properties.
-
- local_entry->PutServerIsDel(committed_entry.deleted());
- if (committed_entry.deleted()) {
- // Don't clobber any other fields of deleted objects.
- return;
- }
-
- local_entry->PutServerIsDir(
- (committed_entry.folder() ||
- committed_entry.bookmarkdata().bookmark_folder()));
- local_entry->PutServerSpecifics(committed_entry.specifics());
- local_entry->PutServerMtime(ProtoTimeToTime(committed_entry.mtime()));
- local_entry->PutServerCtime(ProtoTimeToTime(committed_entry.ctime()));
- if (committed_entry.has_unique_position()) {
- local_entry->PutServerUniquePosition(
- UniquePosition::FromProto(
- committed_entry.unique_position()));
- }
-
- // TODO(nick): The server doesn't set entry_response.server_parent_id in
- // practice; to update SERVER_PARENT_ID appropriately here we'd need to
- // get the post-commit ID of the parent indicated by
- // committed_entry.parent_id_string(). That should be inferrable from the
- // information we have, but it's a bit convoluted to pull it out directly.
- // Getting this right is important: SERVER_PARENT_ID gets fed back into
- // old_parent_id during the next commit.
- local_entry->PutServerParentId(local_entry->GetParentId());
- local_entry->PutServerNonUniqueName(
- GetResultingPostCommitName(committed_entry, entry_response));
-
- if (local_entry->GetIsUnappliedUpdate()) {
- // This shouldn't happen; an unapplied update shouldn't be committed, and
- // if it were, the commit should have failed. But if it does happen: we've
- // just overwritten the update info, so clear the flag.
- local_entry->PutIsUnappliedUpdate(false);
- }
-}
-
-void ProcessSuccessfulCommitResponse(
- const sync_pb::SyncEntity& committed_entry,
- const sync_pb::CommitResponse_EntryResponse& entry_response,
- const syncable::Id& pre_commit_id,
- syncable::ModelNeutralMutableEntry* local_entry,
- bool syncing_was_set, set<syncable::Id>* deleted_folders) {
- DCHECK(local_entry->GetIsUnsynced());
-
- // Update SERVER_VERSION and BASE_VERSION.
- if (!UpdateVersionAfterCommit(committed_entry, entry_response, pre_commit_id,
- local_entry)) {
- LOG(ERROR) << "Bad version in commit return for " << *local_entry
- << " new_id:" << SyncableIdFromProto(entry_response.id_string())
- << " new_version:" << entry_response.version();
- return;
- }
-
- // If the server gave us a new ID, apply it.
- if (!ChangeIdAfterCommit(entry_response, pre_commit_id, local_entry)) {
- return;
- }
-
- // Update our stored copy of the server state.
- UpdateServerFieldsAfterCommit(committed_entry, entry_response, local_entry);
-
- // If the item doesn't need to be committed again (an item might need to be
- // committed again if it changed locally during the commit), we can remove
- // it from the unsynced list.
- if (syncing_was_set) {
- local_entry->PutIsUnsynced(false);
- }
-
- // Make a note of any deleted folders, whose children would have
- // been recursively deleted.
- // TODO(nick): Here, commit_message.deleted() would be more correct than
- // local_entry->GetIsDel(). For example, an item could be renamed, and then
- // deleted during the commit of the rename. Unit test & fix.
- if (local_entry->GetIsDir() && local_entry->GetIsDel()) {
- deleted_folders->insert(local_entry->GetId());
- }
-}
-
-} // namespace
-
-sync_pb::CommitResponse::ResponseType
-ProcessSingleCommitResponse(
- syncable::BaseWriteTransaction* trans,
- const sync_pb::CommitResponse_EntryResponse& server_entry,
- const sync_pb::SyncEntity& commit_request_entry,
- int64 metahandle,
- set<syncable::Id>* deleted_folders) {
- syncable::ModelNeutralMutableEntry local_entry(
- trans,
- syncable::GET_BY_HANDLE,
- metahandle);
- CHECK(local_entry.good());
- bool syncing_was_set = local_entry.GetSyncing();
- local_entry.PutSyncing(false);
-
- sync_pb::CommitResponse::ResponseType response = server_entry.response_type();
- if (!sync_pb::CommitResponse::ResponseType_IsValid(response)) {
- LOG(ERROR) << "Commit response has unknown response type! Possibly out "
- "of date client?";
- return sync_pb::CommitResponse::INVALID_MESSAGE;
- }
- if (sync_pb::CommitResponse::TRANSIENT_ERROR == response) {
- DVLOG(1) << "Transient Error Committing: " << local_entry;
- LogServerError(server_entry);
- return sync_pb::CommitResponse::TRANSIENT_ERROR;
- }
- if (sync_pb::CommitResponse::INVALID_MESSAGE == response) {
- LOG(ERROR) << "Error Commiting: " << local_entry;
- LogServerError(server_entry);
- return response;
- }
- if (sync_pb::CommitResponse::CONFLICT == response) {
- DVLOG(1) << "Conflict Committing: " << local_entry;
- return response;
- }
- if (sync_pb::CommitResponse::RETRY == response) {
- DVLOG(1) << "Retry Committing: " << local_entry;
- return response;
- }
- if (sync_pb::CommitResponse::OVER_QUOTA == response) {
- LOG(WARNING) << "Hit deprecated OVER_QUOTA Committing: " << local_entry;
- return response;
- }
- if (!server_entry.has_id_string()) {
- LOG(ERROR) << "Commit response has no id";
- return sync_pb::CommitResponse::INVALID_MESSAGE;
- }
-
- // Implied by the IsValid call above, but here for clarity.
- DCHECK_EQ(sync_pb::CommitResponse::SUCCESS, response) << response;
- // Check to see if we've been given the ID of an existing entry. If so treat
- // it as an error response and retry later.
- const syncable::Id& server_entry_id =
- SyncableIdFromProto(server_entry.id_string());
- if (local_entry.GetId() != server_entry_id) {
- Entry e(trans, syncable::GET_BY_ID, server_entry_id);
- if (e.good()) {
- LOG(ERROR)
- << "Got duplicate id when commiting id: "
- << local_entry.GetId()
- << ". Treating as an error return";
- return sync_pb::CommitResponse::INVALID_MESSAGE;
- }
- }
-
- if (server_entry.version() == 0) {
- LOG(WARNING) << "Server returned a zero version on a commit response.";
- }
-
- ProcessSuccessfulCommitResponse(commit_request_entry, server_entry,
- local_entry.GetId(), &local_entry, syncing_was_set, deleted_folders);
- return response;
-}
-
-} // namespace commit_util
-
-} // namespace syncer
diff --git a/chromium/sync/engine/commit_util.h b/chromium/sync/engine/commit_util.h
deleted file mode 100644
index 387bdcf95e2..00000000000
--- a/chromium/sync/engine/commit_util.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_BUILD_COMMIT_UTIL_H_
-#define SYNC_ENGINE_BUILD_COMMIT_UTIL_H_
-
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/util/extensions_activity.h"
-
-namespace sync_pb {
-class CommitMessage;
-class SyncEntity;
-}
-
-namespace syncer {
-
-namespace syncable {
-class BaseTransaction;
-class Entry;
-class Id;
-class BaseWriteTransaction;
-}
-
-namespace commit_util {
-
-// Adds bookmark extensions activity report to |message|.
-SYNC_EXPORT_PRIVATE void AddExtensionsActivityToMessage(
- ExtensionsActivity* activity,
- ExtensionsActivity::Records* extensions_activity_buffer,
- sync_pb::CommitMessage* message);
-
-// Fills the config_params field of |message|.
-SYNC_EXPORT_PRIVATE void AddClientConfigParamsToMessage(
- ModelTypeSet enabled_types,
- sync_pb::CommitMessage* message);
-
-// Takes a snapshot of |meta_entry| and puts it into a protobuf suitable for use
-// in a commit request message.
-SYNC_EXPORT_PRIVATE void BuildCommitItem(
- const syncable::Entry& meta_entry,
- sync_pb::SyncEntity* sync_entry);
-
-// Process a single commit response. Updates the entry's SERVER fields using
-// |pb_commit_response| and |pb_committed_entry|.
-//
-// The |deleted_folders| parameter is a set of IDs that represent deleted
-// folders. This function will add its entry's ID to this set if it finds
-// itself processing a folder deletion.
-SYNC_EXPORT_PRIVATE
-sync_pb::CommitResponse::ResponseType ProcessSingleCommitResponse(
- syncable::BaseWriteTransaction* trans,
- const sync_pb::CommitResponse_EntryResponse& server_entry,
- const sync_pb::SyncEntity& commit_request_entry,
- int64 metahandle,
- std::set<syncable::Id>* deleted_folders);
-
-} // namespace commit_util
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_BUILD_COMMIT_UTIL_H_
diff --git a/chromium/sync/engine/conflict_resolver.cc b/chromium/sync/engine/conflict_resolver.cc
deleted file mode 100644
index c4814720478..00000000000
--- a/chromium/sync/engine/conflict_resolver.cc
+++ /dev/null
@@ -1,237 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/conflict_resolver.h"
-
-#include <list>
-#include <set>
-#include <string>
-
-#include "base/metrics/histogram.h"
-#include "sync/engine/conflict_util.h"
-#include "sync/engine/syncer_util.h"
-#include "sync/sessions/status_controller.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/syncable_write_transaction.h"
-#include "sync/util/cryptographer.h"
-
-using std::list;
-using std::set;
-
-namespace syncer {
-
-using sessions::StatusController;
-using syncable::Directory;
-using syncable::Entry;
-using syncable::Id;
-using syncable::MutableEntry;
-using syncable::WriteTransaction;
-
-ConflictResolver::ConflictResolver() {
-}
-
-ConflictResolver::~ConflictResolver() {
-}
-
-void ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
- const Id& id,
- const Cryptographer* cryptographer,
- StatusController* status) {
- MutableEntry entry(trans, syncable::GET_BY_ID, id);
- // Must be good as the entry won't have been cleaned up.
- CHECK(entry.good());
-
- // This function can only resolve simple conflicts. Simple conflicts have
- // both IS_UNSYNCED and IS_UNAPPLIED_UDPATE set.
- if (!entry.GetIsUnappliedUpdate() || !entry.GetIsUnsynced()) {
- // This is very unusual, but it can happen in tests. We may be able to
- // assert NOTREACHED() here when those tests are updated.
- return;
- }
-
- if (entry.GetIsDel() && entry.GetServerIsDel()) {
- // we've both deleted it, so lets just drop the need to commit/update this
- // entry.
- entry.PutIsUnsynced(false);
- entry.PutIsUnappliedUpdate(false);
- // we've made changes, but they won't help syncing progress.
- // METRIC simple conflict resolved by merge.
- return;
- }
-
- // This logic determines "client wins" vs. "server wins" strategy picking.
- // By the time we get to this point, we rely on the following to be true:
- // a) We can decrypt both the local and server data (else we'd be in
- // conflict encryption and not attempting to resolve).
- // b) All unsynced changes have been re-encrypted with the default key (
- // occurs either in AttemptToUpdateEntry, SetEncryptionPassphrase,
- // SetDecryptionPassphrase, or RefreshEncryption).
- // c) Base_server_specifics having a valid datatype means that we received
- // an undecryptable update that only changed specifics, and since then have
- // not received any further non-specifics-only or decryptable updates.
- // d) If the server_specifics match specifics, server_specifics are
- // encrypted with the default key, and all other visible properties match,
- // then we can safely ignore the local changes as redundant.
- // e) Otherwise if the base_server_specifics match the server_specifics, no
- // functional change must have been made server-side (else
- // base_server_specifics would have been cleared), and we can therefore
- // safely ignore the server changes as redundant.
- // f) Otherwise, it's in general safer to ignore local changes, with the
- // exception of deletion conflicts (choose to undelete) and conflicts
- // where the non_unique_name or parent don't match.
- if (!entry.GetServerIsDel()) {
- // TODO(nick): The current logic is arbitrary; instead, it ought to be made
- // consistent with the ModelAssociator behavior for a datatype. It would
- // be nice if we could route this back to ModelAssociator code to pick one
- // of three options: CLIENT, SERVER, or MERGE. Some datatypes (autofill)
- // are easily mergeable.
- // See http://crbug.com/77339.
- bool name_matches = entry.GetNonUniqueName() ==
- entry.GetServerNonUniqueName();
- bool parent_matches = entry.GetParentId() == entry.GetServerParentId();
- bool entry_deleted = entry.GetIsDel();
- // The position check might fail spuriously if one of the positions was
- // based on a legacy random suffix, rather than a deterministic one based on
- // originator_cache_guid and originator_item_id. If an item is being
- // modified regularly, it shouldn't take long for the suffix and position to
- // be updated, so such false failures shouldn't be a problem for long.
- //
- // Lucky for us, it's OK to be wrong here. The position_matches check is
- // allowed to return false negatives, as long as it returns no false
- // positives.
- bool position_matches = parent_matches &&
- entry.GetServerUniquePosition().Equals(entry.GetUniquePosition());
- const sync_pb::EntitySpecifics& specifics = entry.GetSpecifics();
- const sync_pb::EntitySpecifics& server_specifics =
- entry.GetServerSpecifics();
- const sync_pb::EntitySpecifics& base_server_specifics =
- entry.GetBaseServerSpecifics();
- std::string decrypted_specifics, decrypted_server_specifics;
- bool specifics_match = false;
- bool server_encrypted_with_default_key = false;
- if (specifics.has_encrypted()) {
- DCHECK(cryptographer->CanDecryptUsingDefaultKey(specifics.encrypted()));
- decrypted_specifics = cryptographer->DecryptToString(
- specifics.encrypted());
- } else {
- decrypted_specifics = specifics.SerializeAsString();
- }
- if (server_specifics.has_encrypted()) {
- server_encrypted_with_default_key =
- cryptographer->CanDecryptUsingDefaultKey(
- server_specifics.encrypted());
- decrypted_server_specifics = cryptographer->DecryptToString(
- server_specifics.encrypted());
- } else {
- decrypted_server_specifics = server_specifics.SerializeAsString();
- }
- if (decrypted_server_specifics == decrypted_specifics &&
- server_encrypted_with_default_key == specifics.has_encrypted()) {
- specifics_match = true;
- }
- bool base_server_specifics_match = false;
- if (server_specifics.has_encrypted() &&
- IsRealDataType(GetModelTypeFromSpecifics(base_server_specifics))) {
- std::string decrypted_base_server_specifics;
- if (!base_server_specifics.has_encrypted()) {
- decrypted_base_server_specifics =
- base_server_specifics.SerializeAsString();
- } else {
- decrypted_base_server_specifics = cryptographer->DecryptToString(
- base_server_specifics.encrypted());
- }
- if (decrypted_server_specifics == decrypted_base_server_specifics)
- base_server_specifics_match = true;
- }
-
- if (!entry_deleted && name_matches && parent_matches && specifics_match &&
- position_matches) {
- DVLOG(1) << "Resolving simple conflict, everything matches, ignoring "
- << "changes for: " << entry;
- conflict_util::IgnoreConflict(&entry);
- UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict",
- CHANGES_MATCH,
- CONFLICT_RESOLUTION_SIZE);
- } else if (base_server_specifics_match) {
- DVLOG(1) << "Resolving simple conflict, ignoring server encryption "
- << " changes for: " << entry;
- status->increment_num_server_overwrites();
- conflict_util::OverwriteServerChanges(&entry);
- UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict",
- IGNORE_ENCRYPTION,
- CONFLICT_RESOLUTION_SIZE);
- } else if (entry_deleted || !name_matches || !parent_matches) {
- // NOTE: The update application logic assumes that conflict resolution
- // will never result in changes to the local hierarchy. The entry_deleted
- // and !parent_matches cases here are critical to maintaining that
- // assumption.
- conflict_util::OverwriteServerChanges(&entry);
- status->increment_num_server_overwrites();
- DVLOG(1) << "Resolving simple conflict, overwriting server changes "
- << "for: " << entry;
- UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict",
- OVERWRITE_SERVER,
- CONFLICT_RESOLUTION_SIZE);
- } else {
- DVLOG(1) << "Resolving simple conflict, ignoring local changes for: "
- << entry;
- conflict_util::IgnoreLocalChanges(&entry);
- status->increment_num_local_overwrites();
- UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict",
- OVERWRITE_LOCAL,
- CONFLICT_RESOLUTION_SIZE);
- }
- // Now that we've resolved the conflict, clear the prev server
- // specifics.
- entry.PutBaseServerSpecifics(sync_pb::EntitySpecifics());
- } else { // SERVER_IS_DEL is true
- if (entry.GetIsDir()) {
- Directory::Metahandles children;
- trans->directory()->GetChildHandlesById(trans,
- entry.GetId(),
- &children);
- // If a server deleted folder has local contents it should be a hierarchy
- // conflict. Hierarchy conflicts should not be processed by this
- // function.
- DCHECK(children.empty());
- }
-
- // The entry is deleted on the server but still exists locally.
- // We undelete it by overwriting the server's tombstone with the local
- // data.
- conflict_util::OverwriteServerChanges(&entry);
- status->increment_num_server_overwrites();
- DVLOG(1) << "Resolving simple conflict, undeleting server entry: "
- << entry;
- UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict",
- UNDELETE,
- CONFLICT_RESOLUTION_SIZE);
- }
-}
-
-void ConflictResolver::ResolveConflicts(
- syncable::WriteTransaction* trans,
- const Cryptographer* cryptographer,
- const std::set<syncable::Id>& simple_conflict_ids,
- sessions::StatusController* status) {
- // Iterate over simple conflict items.
- set<Id>::const_iterator it;
- for (it = simple_conflict_ids.begin();
- it != simple_conflict_ids.end();
- ++it) {
- // We don't resolve conflicts for control types here.
- Entry conflicting_node(trans, syncable::GET_BY_ID, *it);
- CHECK(conflicting_node.good());
- if (IsControlType(
- GetModelTypeFromSpecifics(conflicting_node.GetSpecifics()))) {
- continue;
- }
-
- ProcessSimpleConflict(trans, *it, cryptographer, status);
- }
- return;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/conflict_resolver.h b/chromium/sync/engine/conflict_resolver.h
deleted file mode 100644
index 4ca614ddac6..00000000000
--- a/chromium/sync/engine/conflict_resolver.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// A class that watches the syncer and attempts to resolve any conflicts that
-// occur.
-
-#ifndef SYNC_ENGINE_CONFLICT_RESOLVER_H_
-#define SYNC_ENGINE_CONFLICT_RESOLVER_H_
-
-#include <set>
-
-#include "base/basictypes.h"
-#include "base/gtest_prod_util.h"
-#include "sync/engine/syncer_types.h"
-
-namespace syncer {
-
-namespace syncable {
-class Id;
-class WriteTransaction;
-} // namespace syncable
-
-class Cryptographer;
-
-namespace sessions {
-class StatusController;
-} // namespace sessions
-
-class ConflictResolver {
- friend class SyncerTest;
- FRIEND_TEST_ALL_PREFIXES(SyncerTest,
- ConflictResolverMergeOverwritesLocalEntry);
- public:
- // Enumeration of different conflict resolutions. Used for histogramming.
- enum SimpleConflictResolutions {
- OVERWRITE_LOCAL, // Resolved by overwriting local changes.
- OVERWRITE_SERVER, // Resolved by overwriting server changes.
- UNDELETE, // Resolved by undeleting local item.
- IGNORE_ENCRYPTION, // Resolved by ignoring an encryption-only server
- // change.
- NIGORI_MERGE, // Resolved by merging nigori nodes.
- CHANGES_MATCH, // Resolved by ignoring both local and server
- // changes because they matched.
- CONFLICT_RESOLUTION_SIZE,
- };
-
- ConflictResolver();
- ~ConflictResolver();
- // Called by the syncer at the end of a update/commit cycle.
- // Returns true if the syncer should try to apply its updates again.
- void ResolveConflicts(syncable::WriteTransaction* trans,
- const Cryptographer* cryptographer,
- const std::set<syncable::Id>& simple_conflict_ids,
- sessions::StatusController* status);
-
- private:
- void ProcessSimpleConflict(
- syncable::WriteTransaction* trans,
- const syncable::Id& id,
- const Cryptographer* cryptographer,
- sessions::StatusController* status);
-
- DISALLOW_COPY_AND_ASSIGN(ConflictResolver);
-};
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_CONFLICT_RESOLVER_H_
diff --git a/chromium/sync/engine/conflict_util.cc b/chromium/sync/engine/conflict_util.cc
deleted file mode 100644
index fc1e0762046..00000000000
--- a/chromium/sync/engine/conflict_util.cc
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/conflict_util.h"
-
-#include "sync/syncable/mutable_entry.h"
-
-namespace syncer {
-
-using syncable::BASE_VERSION;
-using syncable::IS_UNAPPLIED_UPDATE;
-using syncable::IS_UNSYNCED;
-using syncable::SERVER_VERSION;
-
-using syncable::MutableEntry;
-
-namespace conflict_util {
-
-// Allow the server's changes to take precedence.
-// This will take effect during the next ApplyUpdates step.
-void IgnoreLocalChanges(MutableEntry* entry) {
- DCHECK(entry->GetIsUnsynced());
- DCHECK(entry->GetIsUnappliedUpdate());
- entry->PutIsUnsynced(false);
-}
-
-// Overwrite the server with our own value.
-// We will commit our local data, overwriting the server, at the next
-// opportunity.
-void OverwriteServerChanges(MutableEntry* entry) {
- DCHECK(entry->GetIsUnsynced());
- DCHECK(entry->GetIsUnappliedUpdate());
- entry->PutBaseVersion(entry->GetServerVersion());
- entry->PutIsUnappliedUpdate(false);
-}
-
-// Having determined that everything matches, we ignore the non-conflict.
-void IgnoreConflict(MutableEntry* entry) {
- // If we didn't also unset IS_UNAPPLIED_UPDATE, then we would lose unsynced
- // positional data from adjacent entries when the server update gets applied
- // and the item is re-inserted into the PREV_ID/NEXT_ID linked list. This is
- // primarily an issue because we commit after applying updates, and is most
- // commonly seen when positional changes are made while a passphrase is
- // required (and hence there will be many encryption conflicts).
- DCHECK(entry->GetIsUnsynced());
- DCHECK(entry->GetIsUnappliedUpdate());
- entry->PutBaseVersion(entry->GetServerVersion());
- entry->PutIsUnappliedUpdate(false);
- entry->PutIsUnsynced(false);
-}
-
-} // namespace conflict_util
-} // namespace syncer
diff --git a/chromium/sync/engine/conflict_util.h b/chromium/sync/engine/conflict_util.h
deleted file mode 100644
index 0f3a58be455..00000000000
--- a/chromium/sync/engine/conflict_util.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Utility functions that act on syncable::MutableEntry to resolve conflicts.
-
-#ifndef SYNC_ENGINE_CONFLICT_UTIL_H_
-#define SYNC_ENGINE_CONFLICT_UTIL_H_
-
-namespace syncer {
-
-namespace syncable {
-class MutableEntry;
-}
-
-namespace conflict_util {
-
-// Marks the item as no longer requiring sync, allowing the server's version
-// to 'win' during the next update application step.
-void IgnoreLocalChanges(syncable::MutableEntry* entry);
-
-// Marks the item as no longer requiring update from server data. This will
-// cause the item to be committed to the server, overwriting the server's
-// version.
-void OverwriteServerChanges(syncable::MutableEntry* entry);
-
-// The local and server versions are identical, so unset the bits that put them
-// into a conflicting state.
-void IgnoreConflict(syncable::MutableEntry *trans);
-
-} // namespace conflict_util
-} // namespace syncer
-
-#endif // SYNC_ENGINE_CONFLICT_UTIL_H_
diff --git a/chromium/sync/engine/download.cc b/chromium/sync/engine/download.cc
deleted file mode 100644
index 2bc7f7a0a9d..00000000000
--- a/chromium/sync/engine/download.cc
+++ /dev/null
@@ -1,426 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/download.h"
-
-#include <string>
-
-#include "base/command_line.h"
-#include "sync/engine/process_updates_util.h"
-#include "sync/engine/sync_directory_update_handler.h"
-#include "sync/engine/syncer.h"
-#include "sync/engine/syncer_proto_util.h"
-#include "sync/sessions/nudge_tracker.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/nigori_handler.h"
-#include "sync/syncable/syncable_read_transaction.h"
-
-namespace syncer {
-
-using sessions::StatusController;
-using sessions::SyncSession;
-using sessions::SyncSessionContext;
-using std::string;
-
-namespace download {
-
-namespace {
-
-typedef std::map<ModelType, size_t> TypeToIndexMap;
-
-SyncerError HandleGetEncryptionKeyResponse(
- const sync_pb::ClientToServerResponse& update_response,
- syncable::Directory* dir) {
- bool success = false;
- if (update_response.get_updates().encryption_keys_size() == 0) {
- LOG(ERROR) << "Failed to receive encryption key from server.";
- return SERVER_RESPONSE_VALIDATION_FAILED;
- }
- syncable::ReadTransaction trans(FROM_HERE, dir);
- syncable::NigoriHandler* nigori_handler = dir->GetNigoriHandler();
- success = nigori_handler->SetKeystoreKeys(
- update_response.get_updates().encryption_keys(),
- &trans);
-
- DVLOG(1) << "GetUpdates returned "
- << update_response.get_updates().encryption_keys_size()
- << "encryption keys. Nigori keystore key "
- << (success ? "" : "not ") << "updated.";
- return (success ? SYNCER_OK : SERVER_RESPONSE_VALIDATION_FAILED);
-}
-
-sync_pb::SyncEnums::GetUpdatesOrigin ConvertConfigureSourceToOrigin(
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source) {
- switch (source) {
- // Configurations:
- case sync_pb::GetUpdatesCallerInfo::NEWLY_SUPPORTED_DATATYPE:
- return sync_pb::SyncEnums::NEWLY_SUPPORTED_DATATYPE;
- case sync_pb::GetUpdatesCallerInfo::MIGRATION:
- return sync_pb::SyncEnums::MIGRATION;
- case sync_pb::GetUpdatesCallerInfo::RECONFIGURATION:
- return sync_pb::SyncEnums::RECONFIGURATION;
- case sync_pb::GetUpdatesCallerInfo::NEW_CLIENT:
- return sync_pb::SyncEnums::NEW_CLIENT;
- default:
- NOTREACHED();
- return sync_pb::SyncEnums::UNKNOWN_ORIGIN;
- }
-}
-
-bool ShouldRequestEncryptionKey(
- SyncSessionContext* context) {
- bool need_encryption_key = false;
- if (context->keystore_encryption_enabled()) {
- syncable::Directory* dir = context->directory();
- syncable::ReadTransaction trans(FROM_HERE, dir);
- syncable::NigoriHandler* nigori_handler = dir->GetNigoriHandler();
- need_encryption_key = nigori_handler->NeedKeystoreKey(&trans);
- }
- return need_encryption_key;
-}
-
-void InitDownloadUpdatesContext(
- SyncSession* session,
- bool create_mobile_bookmarks_folder,
- sync_pb::ClientToServerMessage* message) {
- message->set_share(session->context()->account_name());
- message->set_message_contents(sync_pb::ClientToServerMessage::GET_UPDATES);
-
- sync_pb::GetUpdatesMessage* get_updates = message->mutable_get_updates();
-
- // We want folders for our associated types, always. If we were to set
- // this to false, the server would send just the non-container items
- // (e.g. Bookmark URLs but not their containing folders).
- get_updates->set_fetch_folders(true);
-
- get_updates->set_create_mobile_bookmarks_folder(
- create_mobile_bookmarks_folder);
- bool need_encryption_key = ShouldRequestEncryptionKey(session->context());
- get_updates->set_need_encryption_key(need_encryption_key);
-
- // Set legacy GetUpdatesMessage.GetUpdatesCallerInfo information.
- get_updates->mutable_caller_info()->set_notifications_enabled(
- session->context()->notifications_enabled());
-}
-
-void InitDownloadUpdatesProgress(
- ModelTypeSet proto_request_types,
- UpdateHandlerMap* handler_map,
- sync_pb::GetUpdatesMessage* get_updates) {
- for (ModelTypeSet::Iterator it = proto_request_types.First();
- it.Good(); it.Inc()) {
- UpdateHandlerMap::iterator handler_it = handler_map->find(it.Get());
- DCHECK(handler_it != handler_map->end());
- sync_pb::DataTypeProgressMarker* progress_marker =
- get_updates->add_from_progress_marker();
- handler_it->second->GetDownloadProgress(progress_marker);
- }
-}
-
-// Builds a map of ModelTypes to indices to progress markers in the given
-// |gu_response| message. The map is returned in the |index_map| parameter.
-void PartitionProgressMarkersByType(
- const sync_pb::GetUpdatesResponse& gu_response,
- ModelTypeSet request_types,
- TypeToIndexMap* index_map) {
- for (int i = 0; i < gu_response.new_progress_marker_size(); ++i) {
- int field_number = gu_response.new_progress_marker(i).data_type_id();
- ModelType model_type = GetModelTypeFromSpecificsFieldNumber(field_number);
- if (!IsRealDataType(model_type)) {
- DLOG(WARNING) << "Unknown field number " << field_number;
- continue;
- }
- if (!request_types.Has(model_type)) {
- DLOG(WARNING)
- << "Skipping unexpected progress marker for non-enabled type "
- << ModelTypeToString(model_type);
- continue;
- }
- index_map->insert(std::make_pair(model_type, i));
- }
-}
-
-// Examines the contents of the GetUpdates response message and forwards
-// relevant data to the UpdateHandlers for processing and persisting.
-bool ProcessUpdateResponseContents(
- const sync_pb::GetUpdatesResponse& gu_response,
- ModelTypeSet proto_request_types,
- UpdateHandlerMap* handler_map,
- StatusController* status) {
- TypeSyncEntityMap updates_by_type;
- PartitionUpdatesByType(gu_response, proto_request_types, &updates_by_type);
- DCHECK_EQ(proto_request_types.Size(), updates_by_type.size());
-
- TypeToIndexMap progress_index_by_type;
- PartitionProgressMarkersByType(gu_response,
- proto_request_types,
- &progress_index_by_type);
- if (proto_request_types.Size() != progress_index_by_type.size()) {
- NOTREACHED() << "Missing progress markers in GetUpdates response.";
- return false;
- }
-
- // Iterate over these maps in parallel, processing updates for each type.
- TypeToIndexMap::iterator progress_marker_iter =
- progress_index_by_type.begin();
- TypeSyncEntityMap::iterator updates_iter = updates_by_type.begin();
- for ( ; (progress_marker_iter != progress_index_by_type.end()
- && updates_iter != updates_by_type.end());
- ++progress_marker_iter, ++updates_iter) {
- DCHECK_EQ(progress_marker_iter->first, updates_iter->first);
- ModelType type = progress_marker_iter->first;
-
- UpdateHandlerMap::iterator update_handler_iter = handler_map->find(type);
-
- if (update_handler_iter != handler_map->end()) {
- update_handler_iter->second->ProcessGetUpdatesResponse(
- gu_response.new_progress_marker(progress_marker_iter->second),
- updates_iter->second,
- status);
- } else {
- DLOG(WARNING)
- << "Ignoring received updates of a type we can't handle. "
- << "Type is: " << ModelTypeToString(type);
- continue;
- }
- }
- DCHECK(progress_marker_iter == progress_index_by_type.end()
- && updates_iter == updates_by_type.end());
-
- return true;
-}
-
-} // namespace
-
-void BuildNormalDownloadUpdates(
- SyncSession* session,
- bool create_mobile_bookmarks_folder,
- ModelTypeSet request_types,
- const sessions::NudgeTracker& nudge_tracker,
- sync_pb::ClientToServerMessage* client_to_server_message) {
- // Request updates for all requested types.
- DVLOG(1) << "Getting updates for types "
- << ModelTypeSetToString(request_types);
- DCHECK(!request_types.Empty());
-
- InitDownloadUpdatesContext(
- session,
- create_mobile_bookmarks_folder,
- client_to_server_message);
-
- BuildNormalDownloadUpdatesImpl(
- Intersection(request_types, ProtocolTypes()),
- session->context()->update_handler_map(),
- nudge_tracker,
- client_to_server_message->mutable_get_updates());
-}
-
-void BuildNormalDownloadUpdatesImpl(
- ModelTypeSet proto_request_types,
- UpdateHandlerMap* update_handler_map,
- const sessions::NudgeTracker& nudge_tracker,
- sync_pb::GetUpdatesMessage* get_updates) {
- DCHECK(!proto_request_types.Empty());
-
- InitDownloadUpdatesProgress(
- proto_request_types,
- update_handler_map,
- get_updates);
-
- // Set legacy GetUpdatesMessage.GetUpdatesCallerInfo information.
- get_updates->mutable_caller_info()->set_source(
- nudge_tracker.updates_source());
-
- // Set the new and improved version of source, too.
- get_updates->set_get_updates_origin(sync_pb::SyncEnums::GU_TRIGGER);
-
- // Fill in the notification hints.
- for (int i = 0; i < get_updates->from_progress_marker_size(); ++i) {
- sync_pb::DataTypeProgressMarker* progress_marker =
- get_updates->mutable_from_progress_marker(i);
- ModelType type = GetModelTypeFromSpecificsFieldNumber(
- progress_marker->data_type_id());
-
- DCHECK(!nudge_tracker.IsTypeThrottled(type))
- << "Throttled types should have been removed from the request_types.";
-
- nudge_tracker.SetLegacyNotificationHint(type, progress_marker);
- nudge_tracker.FillProtoMessage(
- type,
- progress_marker->mutable_get_update_triggers());
- }
-}
-
-void BuildDownloadUpdatesForConfigure(
- SyncSession* session,
- bool create_mobile_bookmarks_folder,
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source,
- ModelTypeSet request_types,
- sync_pb::ClientToServerMessage* client_to_server_message) {
- // Request updates for all enabled types.
- DVLOG(1) << "Initial download for types "
- << ModelTypeSetToString(request_types);
-
- InitDownloadUpdatesContext(
- session,
- create_mobile_bookmarks_folder,
- client_to_server_message);
- BuildDownloadUpdatesForConfigureImpl(
- Intersection(request_types, ProtocolTypes()),
- session->context()->update_handler_map(),
- source,
- client_to_server_message->mutable_get_updates());
-}
-
-void BuildDownloadUpdatesForConfigureImpl(
- ModelTypeSet proto_request_types,
- UpdateHandlerMap* update_handler_map,
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source,
- sync_pb::GetUpdatesMessage* get_updates) {
- DCHECK(!proto_request_types.Empty());
-
- InitDownloadUpdatesProgress(
- proto_request_types,
- update_handler_map,
- get_updates);
-
- // Set legacy GetUpdatesMessage.GetUpdatesCallerInfo information.
- get_updates->mutable_caller_info()->set_source(source);
-
- // Set the new and improved version of source, too.
- sync_pb::SyncEnums::GetUpdatesOrigin origin =
- ConvertConfigureSourceToOrigin(source);
- get_updates->set_get_updates_origin(origin);
-}
-
-void BuildDownloadUpdatesForPoll(
- SyncSession* session,
- bool create_mobile_bookmarks_folder,
- ModelTypeSet request_types,
- sync_pb::ClientToServerMessage* client_to_server_message) {
- DVLOG(1) << "Polling for types "
- << ModelTypeSetToString(request_types);
-
- InitDownloadUpdatesContext(
- session,
- create_mobile_bookmarks_folder,
- client_to_server_message);
- BuildDownloadUpdatesForPollImpl(
- Intersection(request_types, ProtocolTypes()),
- session->context()->update_handler_map(),
- client_to_server_message->mutable_get_updates());
-}
-
-void BuildDownloadUpdatesForPollImpl(
- ModelTypeSet proto_request_types,
- UpdateHandlerMap* update_handler_map,
- sync_pb::GetUpdatesMessage* get_updates) {
- DCHECK(!proto_request_types.Empty());
-
- InitDownloadUpdatesProgress(
- proto_request_types,
- update_handler_map,
- get_updates);
-
- // Set legacy GetUpdatesMessage.GetUpdatesCallerInfo information.
- get_updates->mutable_caller_info()->set_source(
- sync_pb::GetUpdatesCallerInfo::PERIODIC);
-
- // Set the new and improved version of source, too.
- get_updates->set_get_updates_origin(sync_pb::SyncEnums::PERIODIC);
-}
-
-SyncerError ExecuteDownloadUpdates(
- ModelTypeSet request_types,
- SyncSession* session,
- sync_pb::ClientToServerMessage* msg) {
- sync_pb::ClientToServerResponse update_response;
- StatusController* status = session->mutable_status_controller();
- bool need_encryption_key = ShouldRequestEncryptionKey(session->context());
-
- if (session->context()->debug_info_getter()) {
- sync_pb::DebugInfo* debug_info = msg->mutable_debug_info();
- CopyClientDebugInfo(session->context()->debug_info_getter(), debug_info);
- }
-
- SyncerError result = SyncerProtoUtil::PostClientToServerMessage(
- msg,
- &update_response,
- session);
-
- DVLOG(2) << SyncerProtoUtil::ClientToServerResponseDebugString(
- update_response);
-
- if (result != SYNCER_OK) {
- LOG(ERROR) << "PostClientToServerMessage() failed during GetUpdates";
- return result;
- }
-
- DVLOG(1) << "GetUpdates "
- << " returned " << update_response.get_updates().entries_size()
- << " updates and indicated "
- << update_response.get_updates().changes_remaining()
- << " updates left on server.";
-
- if (session->context()->debug_info_getter()) {
- // Clear debug info now that we have successfully sent it to the server.
- DVLOG(1) << "Clearing client debug info.";
- session->context()->debug_info_getter()->ClearDebugInfo();
- }
-
- if (need_encryption_key ||
- update_response.get_updates().encryption_keys_size() > 0) {
- syncable::Directory* dir = session->context()->directory();
- status->set_last_get_key_result(
- HandleGetEncryptionKeyResponse(update_response, dir));
- }
-
- const ModelTypeSet proto_request_types =
- Intersection(request_types, ProtocolTypes());
-
- return ProcessResponse(update_response.get_updates(),
- proto_request_types,
- session->context()->update_handler_map(),
- status);
-}
-
-SyncerError ProcessResponse(
- const sync_pb::GetUpdatesResponse& gu_response,
- ModelTypeSet proto_request_types,
- UpdateHandlerMap* handler_map,
- StatusController* status) {
- status->increment_num_updates_downloaded_by(gu_response.entries_size());
-
- // The changes remaining field is used to prevent the client from looping. If
- // that field is being set incorrectly, we're in big trouble.
- if (!gu_response.has_changes_remaining()) {
- return SERVER_RESPONSE_VALIDATION_FAILED;
- }
- status->set_num_server_changes_remaining(gu_response.changes_remaining());
-
-
- if (!ProcessUpdateResponseContents(gu_response,
- proto_request_types,
- handler_map,
- status)) {
- return SERVER_RESPONSE_VALIDATION_FAILED;
- }
-
- if (gu_response.changes_remaining() == 0) {
- return SYNCER_OK;
- } else {
- return SERVER_MORE_TO_DOWNLOAD;
- }
-}
-
-void CopyClientDebugInfo(
- sessions::DebugInfoGetter* debug_info_getter,
- sync_pb::DebugInfo* debug_info) {
- DVLOG(1) << "Copying client debug info to send.";
- debug_info_getter->GetDebugInfo(debug_info);
-}
-
-} // namespace download
-
-} // namespace syncer
diff --git a/chromium/sync/engine/download.h b/chromium/sync/engine/download.h
deleted file mode 100644
index 5bc08d434e1..00000000000
--- a/chromium/sync/engine/download.h
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_DOWNLOAD_H_
-#define SYNC_ENGINE_DOWNLOAD_H_
-
-#include "sync/base/sync_export.h"
-#include "sync/engine/sync_directory_update_handler.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/util/syncer_error.h"
-#include "sync/protocol/sync.pb.h"
-
-namespace sync_pb {
-class DebugInfo;
-} // namespace sync_pb
-
-namespace syncer {
-
-namespace sessions {
-class DebugInfoGetter;
-class NudgeTracker;
-class StatusController;
-class SyncSession;
-} // namespace sessions
-
-namespace download {
-
-// This function executes a single GetUpdate request and stores the response in
-// the session's StatusController. It constructs the type of request used to
-// keep types in sync when in normal mode.
-SYNC_EXPORT_PRIVATE void BuildNormalDownloadUpdates(
- sessions::SyncSession* session,
- bool create_mobile_bookmarks_folder,
- ModelTypeSet request_types,
- const sessions::NudgeTracker& nudge_tracker,
- sync_pb::ClientToServerMessage* client_to_server_message);
-
-// Helper function. Defined here for testing.
-SYNC_EXPORT_PRIVATE void BuildNormalDownloadUpdatesImpl(
- ModelTypeSet proto_request_types,
- UpdateHandlerMap* update_handler_map,
- const sessions::NudgeTracker& nudge_tracker,
- sync_pb::GetUpdatesMessage* get_updates);
-
-// This function executes a single GetUpdate request and stores the response in
-// the session's StatusController. It constructs the type of request used to
-// initialize a type for the first time.
-SYNC_EXPORT_PRIVATE void BuildDownloadUpdatesForConfigure(
- sessions::SyncSession* session,
- bool create_mobile_bookmarks_folder,
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source,
- ModelTypeSet request_types,
- sync_pb::ClientToServerMessage* client_to_server_message);
-
-// Helper function. Defined here for testing.
-SYNC_EXPORT_PRIVATE void BuildDownloadUpdatesForConfigureImpl(
- ModelTypeSet proto_request_types,
- UpdateHandlerMap* update_handler_map,
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source,
- sync_pb::GetUpdatesMessage* get_updates);
-
-// This function executes a single GetUpdate request and stores the response in
-// the session's status controller. It constructs the type of request used for
-// periodic polling.
-SYNC_EXPORT_PRIVATE void BuildDownloadUpdatesForPoll(
- sessions::SyncSession* session,
- bool create_mobile_bookmarks_folder,
- ModelTypeSet request_types,
- sync_pb::ClientToServerMessage* client_to_server_message);
-
-// Helper function. Defined here for testing.
-SYNC_EXPORT_PRIVATE void BuildDownloadUpdatesForPollImpl(
- ModelTypeSet proto_request_types,
- UpdateHandlerMap* update_handler_map,
- sync_pb::GetUpdatesMessage* get_updates);
-
-// Sends the specified message to the server and stores the response in a member
-// of the |session|'s StatusController.
-SYNC_EXPORT_PRIVATE SyncerError
- ExecuteDownloadUpdates(ModelTypeSet request_types,
- sessions::SyncSession* session,
- sync_pb::ClientToServerMessage* msg);
-
-// Helper function for processing responses from the server.
-// Defined here for testing.
-SYNC_EXPORT_PRIVATE SyncerError ProcessResponse(
- const sync_pb::GetUpdatesResponse& gu_response,
- ModelTypeSet proto_request_types,
- UpdateHandlerMap* handler_map,
- sessions::StatusController* status);
-
-// Helper function to copy client debug info from debug_info_getter to
-// debug_info. Defined here for testing.
-SYNC_EXPORT_PRIVATE void CopyClientDebugInfo(
- sessions::DebugInfoGetter* debug_info_getter,
- sync_pb::DebugInfo* debug_info);
-
-} // namespace download
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_DOWNLOAD_H_
diff --git a/chromium/sync/engine/download_unittest.cc b/chromium/sync/engine/download_unittest.cc
deleted file mode 100644
index eae627791ab..00000000000
--- a/chromium/sync/engine/download_unittest.cc
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/download.h"
-
-#include "base/message_loop/message_loop.h"
-#include "base/stl_util.h"
-#include "sync/engine/sync_directory_update_handler.h"
-#include "sync/internal_api/public/base/model_type_test_util.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/sessions/debug_info_getter.h"
-#include "sync/sessions/nudge_tracker.h"
-#include "sync/sessions/status_controller.h"
-#include "sync/syncable/directory.h"
-#include "sync/test/engine/fake_model_worker.h"
-#include "sync/test/engine/test_directory_setter_upper.h"
-#include "sync/test/sessions/mock_debug_info_getter.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-using sessions::MockDebugInfoGetter;
-
-// A test fixture for tests exercising download updates functions.
-class DownloadUpdatesTest : public ::testing::Test {
- protected:
- DownloadUpdatesTest()
- : update_handler_map_deleter_(&update_handler_map_) {
- }
-
- virtual void SetUp() {
- dir_maker_.SetUp();
-
- AddUpdateHandler(AUTOFILL, GROUP_DB);
- AddUpdateHandler(BOOKMARKS, GROUP_UI);
- AddUpdateHandler(PREFERENCES, GROUP_UI);
- }
-
- virtual void TearDown() {
- dir_maker_.TearDown();
- }
-
- ModelTypeSet proto_request_types() {
- ModelTypeSet types;
- for (UpdateHandlerMap::iterator it = update_handler_map_.begin();
- it != update_handler_map_.end(); ++it) {
- types.Put(it->first);
- }
- return types;
- }
-
- syncable::Directory* directory() {
- return dir_maker_.directory();
- }
-
- UpdateHandlerMap* update_handler_map() {
- return &update_handler_map_;
- }
-
- void InitFakeUpdateResponse(sync_pb::GetUpdatesResponse* response) {
- ModelTypeSet types = proto_request_types();
-
- for (ModelTypeSet::Iterator it = types.First(); it.Good(); it.Inc()) {
- sync_pb::DataTypeProgressMarker* marker =
- response->add_new_progress_marker();
- marker->set_data_type_id(GetSpecificsFieldNumberFromModelType(it.Get()));
- marker->set_token("foobarbaz");
- }
-
- response->set_changes_remaining(0);
- }
-
- private:
- void AddUpdateHandler(ModelType type, ModelSafeGroup group) {
- DCHECK(directory());
- scoped_refptr<ModelSafeWorker> worker = new FakeModelWorker(group);
- SyncDirectoryUpdateHandler* handler =
- new SyncDirectoryUpdateHandler(directory(), type, worker);
- update_handler_map_.insert(std::make_pair(type, handler));
- }
-
- base::MessageLoop loop_; // Needed for directory init.
- TestDirectorySetterUpper dir_maker_;
-
- UpdateHandlerMap update_handler_map_;
- STLValueDeleter<UpdateHandlerMap> update_handler_map_deleter_;
-
- DISALLOW_COPY_AND_ASSIGN(DownloadUpdatesTest);
-};
-
-// Basic test to make sure nudges are expressed properly in the request.
-TEST_F(DownloadUpdatesTest, BookmarkNudge) {
- sessions::NudgeTracker nudge_tracker;
- nudge_tracker.RecordLocalChange(ModelTypeSet(BOOKMARKS));
-
- sync_pb::ClientToServerMessage msg;
- download::BuildNormalDownloadUpdatesImpl(proto_request_types(),
- update_handler_map(),
- nudge_tracker,
- msg.mutable_get_updates());
-
- const sync_pb::GetUpdatesMessage& gu_msg = msg.get_updates();
- EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::LOCAL,
- gu_msg.caller_info().source());
- EXPECT_EQ(sync_pb::SyncEnums::GU_TRIGGER, gu_msg.get_updates_origin());
- for (int i = 0; i < gu_msg.from_progress_marker_size(); ++i) {
- syncer::ModelType type = GetModelTypeFromSpecificsFieldNumber(
- gu_msg.from_progress_marker(i).data_type_id());
-
- const sync_pb::DataTypeProgressMarker& progress_marker =
- gu_msg.from_progress_marker(i);
- const sync_pb::GetUpdateTriggers& gu_trigger =
- progress_marker.get_update_triggers();
-
- // We perform some basic tests of GU trigger and source fields here. The
- // more complicated scenarios are tested by the NudgeTracker tests.
- if (type == BOOKMARKS) {
- EXPECT_TRUE(progress_marker.has_notification_hint());
- EXPECT_EQ("", progress_marker.notification_hint());
- EXPECT_EQ(1, gu_trigger.local_modification_nudges());
- EXPECT_EQ(0, gu_trigger.datatype_refresh_nudges());
- } else {
- EXPECT_FALSE(progress_marker.has_notification_hint());
- EXPECT_EQ(0, gu_trigger.local_modification_nudges());
- EXPECT_EQ(0, gu_trigger.datatype_refresh_nudges());
- }
- }
-}
-
-// Basic test to ensure invalidation payloads are expressed in the request.
-TEST_F(DownloadUpdatesTest, NotifyMany) {
- sessions::NudgeTracker nudge_tracker;
- nudge_tracker.RecordRemoteInvalidation(
- BuildInvalidationMap(AUTOFILL, 1, "autofill_payload"));
- nudge_tracker.RecordRemoteInvalidation(
- BuildInvalidationMap(BOOKMARKS, 1, "bookmark_payload"));
- nudge_tracker.RecordRemoteInvalidation(
- BuildInvalidationMap(PREFERENCES, 1, "preferences_payload"));
- ModelTypeSet notified_types;
- notified_types.Put(AUTOFILL);
- notified_types.Put(BOOKMARKS);
- notified_types.Put(PREFERENCES);
-
- sync_pb::ClientToServerMessage msg;
- download::BuildNormalDownloadUpdatesImpl(proto_request_types(),
- update_handler_map(),
- nudge_tracker,
- msg.mutable_get_updates());
-
- const sync_pb::GetUpdatesMessage& gu_msg = msg.get_updates();
- EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::NOTIFICATION,
- gu_msg.caller_info().source());
- EXPECT_EQ(sync_pb::SyncEnums::GU_TRIGGER, gu_msg.get_updates_origin());
- for (int i = 0; i < gu_msg.from_progress_marker_size(); ++i) {
- syncer::ModelType type = GetModelTypeFromSpecificsFieldNumber(
- gu_msg.from_progress_marker(i).data_type_id());
-
- const sync_pb::DataTypeProgressMarker& progress_marker =
- gu_msg.from_progress_marker(i);
- const sync_pb::GetUpdateTriggers& gu_trigger =
- progress_marker.get_update_triggers();
-
- // We perform some basic tests of GU trigger and source fields here. The
- // more complicated scenarios are tested by the NudgeTracker tests.
- if (notified_types.Has(type)) {
- EXPECT_TRUE(progress_marker.has_notification_hint());
- EXPECT_FALSE(progress_marker.notification_hint().empty());
- EXPECT_EQ(1, gu_trigger.notification_hint_size());
- } else {
- EXPECT_FALSE(progress_marker.has_notification_hint());
- EXPECT_EQ(0, gu_trigger.notification_hint_size());
- }
- }
-}
-
-TEST_F(DownloadUpdatesTest, ConfigureTest) {
- sync_pb::ClientToServerMessage msg;
- download::BuildDownloadUpdatesForConfigureImpl(
- proto_request_types(),
- update_handler_map(),
- sync_pb::GetUpdatesCallerInfo::RECONFIGURATION,
- msg.mutable_get_updates());
-
- const sync_pb::GetUpdatesMessage& gu_msg = msg.get_updates();
-
- EXPECT_EQ(sync_pb::SyncEnums::RECONFIGURATION, gu_msg.get_updates_origin());
- EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::RECONFIGURATION,
- gu_msg.caller_info().source());
-
- ModelTypeSet progress_types;
- for (int i = 0; i < gu_msg.from_progress_marker_size(); ++i) {
- syncer::ModelType type = GetModelTypeFromSpecificsFieldNumber(
- gu_msg.from_progress_marker(i).data_type_id());
- progress_types.Put(type);
- }
- EXPECT_TRUE(proto_request_types().Equals(progress_types));
-}
-
-TEST_F(DownloadUpdatesTest, PollTest) {
- sync_pb::ClientToServerMessage msg;
- download::BuildDownloadUpdatesForPollImpl(
- proto_request_types(),
- update_handler_map(),
- msg.mutable_get_updates());
-
- const sync_pb::GetUpdatesMessage& gu_msg = msg.get_updates();
-
- EXPECT_EQ(sync_pb::SyncEnums::PERIODIC, gu_msg.get_updates_origin());
- EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::PERIODIC,
- gu_msg.caller_info().source());
-
- ModelTypeSet progress_types;
- for (int i = 0; i < gu_msg.from_progress_marker_size(); ++i) {
- syncer::ModelType type = GetModelTypeFromSpecificsFieldNumber(
- gu_msg.from_progress_marker(i).data_type_id());
- progress_types.Put(type);
- }
- EXPECT_TRUE(proto_request_types().Equals(progress_types));
-}
-
-// Verify that a bogus response message is detected.
-TEST_F(DownloadUpdatesTest, InvalidResponse) {
- sync_pb::GetUpdatesResponse gu_response;
- InitFakeUpdateResponse(&gu_response);
-
- // This field is essential for making the client stop looping. If it's unset
- // then something is very wrong. The client should detect this.
- gu_response.clear_changes_remaining();
-
- sessions::StatusController status;
- SyncerError error = download::ProcessResponse(gu_response,
- proto_request_types(),
- update_handler_map(),
- &status);
- EXPECT_EQ(error, SERVER_RESPONSE_VALIDATION_FAILED);
-}
-
-// Verify that we correctly detect when there's more work to be done.
-TEST_F(DownloadUpdatesTest, MoreToDownloadResponse) {
- sync_pb::GetUpdatesResponse gu_response;
- InitFakeUpdateResponse(&gu_response);
- gu_response.set_changes_remaining(1);
-
- sessions::StatusController status;
- SyncerError error = download::ProcessResponse(gu_response,
- proto_request_types(),
- update_handler_map(),
- &status);
- EXPECT_EQ(error, SERVER_MORE_TO_DOWNLOAD);
-}
-
-// A simple scenario: No updates returned and nothing more to download.
-TEST_F(DownloadUpdatesTest, NormalResponseTest) {
- sync_pb::GetUpdatesResponse gu_response;
- InitFakeUpdateResponse(&gu_response);
- gu_response.set_changes_remaining(0);
-
- sessions::StatusController status;
- SyncerError error = download::ProcessResponse(gu_response,
- proto_request_types(),
- update_handler_map(),
- &status);
- EXPECT_EQ(error, SYNCER_OK);
-}
-
-class DownloadUpdatesDebugInfoTest : public ::testing::Test {
- public:
- DownloadUpdatesDebugInfoTest() {}
- virtual ~DownloadUpdatesDebugInfoTest() {}
-
- sessions::StatusController* status() {
- return &status_;
- }
-
- sessions::DebugInfoGetter* debug_info_getter() {
- return &debug_info_getter_;
- }
-
- void AddDebugEvent() {
- debug_info_getter_.AddDebugEvent();
- }
-
- private:
- sessions::StatusController status_;
- MockDebugInfoGetter debug_info_getter_;
-};
-
-
-// Verify CopyClientDebugInfo when there are no events to upload.
-TEST_F(DownloadUpdatesDebugInfoTest, VerifyCopyClientDebugInfo_Empty) {
- sync_pb::DebugInfo debug_info;
- download::CopyClientDebugInfo(debug_info_getter(), &debug_info);
- EXPECT_EQ(0, debug_info.events_size());
-}
-
-TEST_F(DownloadUpdatesDebugInfoTest, VerifyCopyOverwrites) {
- sync_pb::DebugInfo debug_info;
- AddDebugEvent();
- download::CopyClientDebugInfo(debug_info_getter(), &debug_info);
- EXPECT_EQ(1, debug_info.events_size());
- download::CopyClientDebugInfo(debug_info_getter(), &debug_info);
- EXPECT_EQ(1, debug_info.events_size());
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/get_commit_ids.cc b/chromium/sync/engine/get_commit_ids.cc
deleted file mode 100644
index 42faf21d435..00000000000
--- a/chromium/sync/engine/get_commit_ids.cc
+++ /dev/null
@@ -1,508 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/get_commit_ids.h"
-
-#include <set>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "sync/engine/syncer_util.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/nigori_handler.h"
-#include "sync/syncable/nigori_util.h"
-#include "sync/syncable/syncable_base_transaction.h"
-#include "sync/syncable/syncable_util.h"
-#include "sync/util/cryptographer.h"
-
-using std::set;
-using std::vector;
-
-namespace syncer {
-
-namespace {
-
-// Forward-declare some helper functions. This gives us more options for
-// ordering the function defintions within this file.
-
-// Filters |unsynced_handles| to remove all entries that do not belong to the
-// specified |requested_types|, or are not eligible for a commit at this time.
-void FilterUnreadyEntries(
- syncable::BaseTransaction* trans,
- ModelTypeSet requested_types,
- ModelTypeSet encrypted_types,
- bool passphrase_missing,
- const syncable::Directory::Metahandles& unsynced_handles,
- std::set<int64>* ready_unsynced_set);
-
-// Given a set of commit metahandles that are ready for commit
-// (|ready_unsynced_set|), sorts these into commit order and places up to
-// |max_entries| of them in the output parameter |out|.
-//
-// See the header file for an explanation of commit ordering.
-void OrderCommitIds(
- syncable::BaseTransaction* trans,
- size_t max_entries,
- const std::set<int64>& ready_unsynced_set,
- std::vector<int64>* out);
-
-} // namespace
-
-void GetCommitIdsForType(
- syncable::BaseTransaction* trans,
- ModelType type,
- size_t max_entries,
- syncable::Directory::Metahandles* out) {
- syncable::Directory* dir = trans->directory();
-
- // Gather the full set of unsynced items and store it in the session. They
- // are not in the correct order for commit.
- std::set<int64> ready_unsynced_set;
- syncable::Directory::Metahandles all_unsynced_handles;
- GetUnsyncedEntries(trans, &all_unsynced_handles);
-
- ModelTypeSet encrypted_types;
- bool passphrase_missing = false;
- Cryptographer* cryptographer = dir->GetCryptographer(trans);
- if (cryptographer) {
- encrypted_types = dir->GetNigoriHandler()->GetEncryptedTypes(trans);
- passphrase_missing = cryptographer->has_pending_keys();
- };
-
- // We filter out all unready entries from the set of unsynced handles. This
- // new set of ready and unsynced items is then what we use to determine what
- // is a candidate for commit. The caller is responsible for ensuring that no
- // throttled types are included among the requested_types.
- FilterUnreadyEntries(trans,
- ModelTypeSet(type),
- encrypted_types,
- passphrase_missing,
- all_unsynced_handles,
- &ready_unsynced_set);
-
- OrderCommitIds(trans, max_entries, ready_unsynced_set, out);
-
- for (size_t i = 0; i < out->size(); i++) {
- DVLOG(1) << "Debug commit batch result:" << (*out)[i];
- }
-}
-
-namespace {
-
-bool IsEntryInConflict(const syncable::Entry& entry) {
- if (entry.GetIsUnsynced() &&
- entry.GetServerVersion() > 0 &&
- (entry.GetServerVersion() > entry.GetBaseVersion())) {
- // The local and server versions don't match. The item must be in
- // conflict, so there's no point in attempting to commit.
- DCHECK(entry.GetIsUnappliedUpdate());
- DVLOG(1) << "Excluding entry from commit due to version mismatch "
- << entry;
- return true;
- }
- return false;
-}
-
-// An entry is not considered ready for commit if any are true:
-// 1. It's in conflict.
-// 2. It requires encryption (either the type is encrypted but a passphrase
-// is missing from the cryptographer, or the entry itself wasn't properly
-// encrypted).
-// 3. It's type is currently throttled.
-// 4. It's a delete but has not been committed.
-bool IsEntryReadyForCommit(ModelTypeSet requested_types,
- ModelTypeSet encrypted_types,
- bool passphrase_missing,
- const syncable::Entry& entry) {
- DCHECK(entry.GetIsUnsynced());
- if (IsEntryInConflict(entry))
- return false;
-
- const ModelType type = entry.GetModelType();
- // We special case the nigori node because even though it is considered an
- // "encrypted type", not all nigori node changes require valid encryption
- // (ex: sync_tabs).
- if ((type != NIGORI) && encrypted_types.Has(type) &&
- (passphrase_missing ||
- syncable::EntryNeedsEncryption(encrypted_types, entry))) {
- // This entry requires encryption but is not properly encrypted (possibly
- // due to the cryptographer not being initialized or the user hasn't
- // provided the most recent passphrase).
- DVLOG(1) << "Excluding entry from commit due to lack of encryption "
- << entry;
- return false;
- }
-
- // Ignore it if it's not in our set of requested types.
- if (!requested_types.Has(type))
- return false;
-
- if (entry.GetIsDel() && !entry.GetId().ServerKnows()) {
- // New clients (following the resolution of crbug.com/125381) should not
- // create such items. Old clients may have left some in the database
- // (crbug.com/132905), but we should now be cleaning them on startup.
- NOTREACHED() << "Found deleted and unsynced local item: " << entry;
- return false;
- }
-
- // Extra validity checks.
- syncable::Id id = entry.GetId();
- if (id == entry.GetParentId()) {
- CHECK(id.IsRoot()) << "Non-root item is self parenting." << entry;
- // If the root becomes unsynced it can cause us problems.
- NOTREACHED() << "Root item became unsynced " << entry;
- return false;
- }
-
- if (entry.IsRoot()) {
- NOTREACHED() << "Permanent item became unsynced " << entry;
- return false;
- }
-
- DVLOG(2) << "Entry is ready for commit: " << entry;
- return true;
-}
-
-// Filters |unsynced_handles| to remove all entries that do not belong to the
-// specified |requested_types|, or are not eligible for a commit at this time.
-void FilterUnreadyEntries(
- syncable::BaseTransaction* trans,
- ModelTypeSet requested_types,
- ModelTypeSet encrypted_types,
- bool passphrase_missing,
- const syncable::Directory::Metahandles& unsynced_handles,
- std::set<int64>* ready_unsynced_set) {
- for (syncable::Directory::Metahandles::const_iterator iter =
- unsynced_handles.begin(); iter != unsynced_handles.end(); ++iter) {
- syncable::Entry entry(trans, syncable::GET_BY_HANDLE, *iter);
- if (IsEntryReadyForCommit(requested_types,
- encrypted_types,
- passphrase_missing,
- entry)) {
- ready_unsynced_set->insert(*iter);
- }
- }
-}
-
-// This class helps to implement OrderCommitIds(). Its members track the
-// progress of a traversal while its methods extend it. It can return early if
-// the traversal reaches the desired size before the full traversal is complete.
-class Traversal {
- public:
- Traversal(
- syncable::BaseTransaction* trans,
- int64 max_entries,
- syncable::Directory::Metahandles* out);
- ~Traversal();
-
- // First step of traversal building. Adds non-deleted items in order.
- void AddCreatesAndMoves(const std::set<int64>& ready_unsynced_set);
-
- // Second step of traverals building. Appends deleted items.
- void AddDeletes(const std::set<int64>& ready_unsynced_set);
-
- private:
- // The following functions do not modify the traversal directly. They return
- // their results in the |result| vector instead.
- bool AddUncommittedParentsAndTheirPredecessors(
- const std::set<int64>& ready_unsynced_set,
- const syncable::Entry& item,
- syncable::Directory::Metahandles* result) const;
-
- void TryAddItem(const std::set<int64>& ready_unsynced_set,
- const syncable::Entry& item,
- syncable::Directory::Metahandles* result) const;
-
- void AddItemThenPredecessors(
- const std::set<int64>& ready_unsynced_set,
- const syncable::Entry& item,
- syncable::Directory::Metahandles* result) const;
-
- void AddPredecessorsThenItem(
- const std::set<int64>& ready_unsynced_set,
- const syncable::Entry& item,
- syncable::Directory::Metahandles* result) const;
-
- // Returns true if we've collected enough items.
- bool IsFull() const;
-
- // Returns true if the specified handle is already in the traversal.
- bool HaveItem(int64 handle) const;
-
- // Adds the specified handles to the traversal.
- void AppendManyToTraversal(const syncable::Directory::Metahandles& handles);
-
- // Adds the specifed handle to the traversal.
- void AppendToTraversal(int64 handle);
-
- syncable::Directory::Metahandles* out_;
- std::set<int64> added_handles_;
- const size_t max_entries_;
- syncable::BaseTransaction* trans_;
-
- DISALLOW_COPY_AND_ASSIGN(Traversal);
-};
-
-Traversal::Traversal(
- syncable::BaseTransaction* trans,
- int64 max_entries,
- syncable::Directory::Metahandles* out)
- : out_(out),
- max_entries_(max_entries),
- trans_(trans) { }
-
-Traversal::~Traversal() {}
-
-bool Traversal::AddUncommittedParentsAndTheirPredecessors(
- const std::set<int64>& ready_unsynced_set,
- const syncable::Entry& item,
- syncable::Directory::Metahandles* result) const {
- syncable::Directory::Metahandles dependencies;
- syncable::Id parent_id = item.GetParentId();
-
- // Climb the tree adding entries leaf -> root.
- while (!parent_id.ServerKnows()) {
- syncable::Entry parent(trans_, syncable::GET_BY_ID, parent_id);
- CHECK(parent.good()) << "Bad user-only parent in item path.";
- int64 handle = parent.GetMetahandle();
- if (HaveItem(handle)) {
- // We've already added this parent (and therefore all of its parents).
- // We can return early.
- break;
- }
- if (IsEntryInConflict(parent)) {
- // We ignore all entries that are children of a conflicing item. Return
- // false immediately to forget the traversal we've built up so far.
- DVLOG(1) << "Parent was in conflict, omitting " << item;
- return false;
- }
- AddItemThenPredecessors(ready_unsynced_set,
- parent,
- &dependencies);
- parent_id = parent.GetParentId();
- }
-
- // Reverse what we added to get the correct order.
- result->insert(result->end(), dependencies.rbegin(), dependencies.rend());
- return true;
-}
-
-// Adds the given item to the list if it is unsynced and ready for commit.
-void Traversal::TryAddItem(const std::set<int64>& ready_unsynced_set,
- const syncable::Entry& item,
- syncable::Directory::Metahandles* result) const {
- DCHECK(item.GetIsUnsynced());
- int64 item_handle = item.GetMetahandle();
- if (ready_unsynced_set.count(item_handle) != 0) {
- result->push_back(item_handle);
- }
-}
-
-// Adds the given item, and all its unsynced predecessors. The traversal will
-// be cut short if any item along the traversal is not IS_UNSYNCED, or if we
-// detect that this area of the tree has already been traversed. Items that are
-// not 'ready' for commit (see IsEntryReadyForCommit()) will not be added to the
-// list, though they will not stop the traversal.
-void Traversal::AddItemThenPredecessors(
- const std::set<int64>& ready_unsynced_set,
- const syncable::Entry& item,
- syncable::Directory::Metahandles* result) const {
- int64 item_handle = item.GetMetahandle();
- if (HaveItem(item_handle)) {
- // We've already added this item to the commit set, and so must have
- // already added the predecessors as well.
- return;
- }
- TryAddItem(ready_unsynced_set, item, result);
- if (item.GetIsDel())
- return; // Deleted items have no predecessors.
-
- syncable::Id prev_id = item.GetPredecessorId();
- while (!prev_id.IsRoot()) {
- syncable::Entry prev(trans_, syncable::GET_BY_ID, prev_id);
- CHECK(prev.good()) << "Bad id when walking predecessors.";
- if (!prev.GetIsUnsynced()) {
- // We're interested in "runs" of unsynced items. This item breaks
- // the streak, so we stop traversing.
- return;
- }
- int64 handle = prev.GetMetahandle();
- if (HaveItem(handle)) {
- // We've already added this item to the commit set, and so must have
- // already added the predecessors as well.
- return;
- }
- TryAddItem(ready_unsynced_set, prev, result);
- prev_id = prev.GetPredecessorId();
- }
-}
-
-// Same as AddItemThenPredecessor, but the traversal order will be reversed.
-void Traversal::AddPredecessorsThenItem(
- const std::set<int64>& ready_unsynced_set,
- const syncable::Entry& item,
- syncable::Directory::Metahandles* result) const {
- syncable::Directory::Metahandles dependencies;
- AddItemThenPredecessors(ready_unsynced_set, item, &dependencies);
-
- // Reverse what we added to get the correct order.
- result->insert(result->end(), dependencies.rbegin(), dependencies.rend());
-}
-
-bool Traversal::IsFull() const {
- return out_->size() >= max_entries_;
-}
-
-bool Traversal::HaveItem(int64 handle) const {
- return added_handles_.find(handle) != added_handles_.end();
-}
-
-void Traversal::AppendManyToTraversal(
- const syncable::Directory::Metahandles& handles) {
- out_->insert(out_->end(), handles.begin(), handles.end());
- added_handles_.insert(handles.begin(), handles.end());
-}
-
-void Traversal::AppendToTraversal(int64 metahandle) {
- out_->push_back(metahandle);
- added_handles_.insert(metahandle);
-}
-
-void Traversal::AddCreatesAndMoves(
- const std::set<int64>& ready_unsynced_set) {
- // Add moves and creates, and prepend their uncommitted parents.
- for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin();
- !IsFull() && iter != ready_unsynced_set.end(); ++iter) {
- int64 metahandle = *iter;
- if (HaveItem(metahandle))
- continue;
-
- syncable::Entry entry(trans_,
- syncable::GET_BY_HANDLE,
- metahandle);
- if (!entry.GetIsDel()) {
- // We only commit an item + its dependencies if it and all its
- // dependencies are not in conflict.
- syncable::Directory::Metahandles item_dependencies;
- if (AddUncommittedParentsAndTheirPredecessors(
- ready_unsynced_set,
- entry,
- &item_dependencies)) {
- AddPredecessorsThenItem(ready_unsynced_set,
- entry,
- &item_dependencies);
- AppendManyToTraversal(item_dependencies);
- }
- }
- }
-
- // It's possible that we overcommitted while trying to expand dependent
- // items. If so, truncate the set down to the allowed size.
- if (out_->size() > max_entries_)
- out_->resize(max_entries_);
-}
-
-void Traversal::AddDeletes(
- const std::set<int64>& ready_unsynced_set) {
- set<syncable::Id> legal_delete_parents;
-
- for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin();
- !IsFull() && iter != ready_unsynced_set.end(); ++iter) {
- int64 metahandle = *iter;
- if (HaveItem(metahandle))
- continue;
-
- syncable::Entry entry(trans_, syncable::GET_BY_HANDLE,
- metahandle);
-
- if (entry.GetIsDel()) {
- syncable::Entry parent(trans_, syncable::GET_BY_ID,
- entry.GetParentId());
- // If the parent is deleted and unsynced, then any children of that
- // parent don't need to be added to the delete queue.
- //
- // Note: the parent could be synced if there was an update deleting a
- // folder when we had a deleted all items in it.
- // We may get more updates, or we may want to delete the entry.
- if (parent.good() && parent.GetIsDel() && parent.GetIsUnsynced()) {
- // However, if an entry is moved, these rules can apply differently.
- //
- // If the entry was moved, then the destination parent was deleted,
- // then we'll miss it in the roll up. We have to add it in manually.
- // TODO(chron): Unit test for move / delete cases:
- // Case 1: Locally moved, then parent deleted
- // Case 2: Server moved, then locally issue recursive delete.
- if (entry.GetId().ServerKnows() &&
- entry.GetParentId() != entry.GetServerParentId()) {
- DVLOG(1) << "Inserting moved and deleted entry, will be missed by "
- << "delete roll." << entry.GetId();
-
- AppendToTraversal(metahandle);
- }
-
- // Skip this entry since it's a child of a parent that will be
- // deleted. The server will unroll the delete and delete the
- // child as well.
- continue;
- }
-
- legal_delete_parents.insert(entry.GetParentId());
- }
- }
-
- // We could store all the potential entries with a particular parent during
- // the above scan, but instead we rescan here. This is less efficient, but
- // we're dropping memory alloc/dealloc in favor of linear scans of recently
- // examined entries.
- //
- // Scan through the UnsyncedMetaHandles again. If we have a deleted
- // entry, then check if the parent is in legal_delete_parents.
- //
- // Parent being in legal_delete_parents means for the child:
- // a recursive delete is not currently happening (no recent deletes in same
- // folder)
- // parent did expect at least one old deleted child
- // parent was not deleted
- for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin();
- !IsFull() && iter != ready_unsynced_set.end(); ++iter) {
- int64 metahandle = *iter;
- if (HaveItem(metahandle))
- continue;
- syncable::Entry entry(trans_, syncable::GET_BY_HANDLE, metahandle);
- if (entry.GetIsDel()) {
- syncable::Id parent_id = entry.GetParentId();
- if (legal_delete_parents.count(parent_id)) {
- AppendToTraversal(metahandle);
- }
- }
- }
-}
-
-void OrderCommitIds(
- syncable::BaseTransaction* trans,
- size_t max_entries,
- const std::set<int64>& ready_unsynced_set,
- syncable::Directory::Metahandles* out) {
- // Commits follow these rules:
- // 1. Moves or creates are preceded by needed folder creates, from
- // root to leaf. For folders whose contents are ordered, moves
- // and creates appear in order.
- // 2. Moves/Creates before deletes.
- // 3. Deletes, collapsed.
- // We commit deleted moves under deleted items as moves when collapsing
- // delete trees.
-
- Traversal traversal(trans, max_entries, out);
-
- // Add moves and creates, and prepend their uncommitted parents.
- traversal.AddCreatesAndMoves(ready_unsynced_set);
-
- // Add all deletes.
- traversal.AddDeletes(ready_unsynced_set);
-}
-
-} // namespace
-
-} // namespace syncer
diff --git a/chromium/sync/engine/get_commit_ids.h b/chromium/sync/engine/get_commit_ids.h
deleted file mode 100644
index b435848e349..00000000000
--- a/chromium/sync/engine/get_commit_ids.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_GET_COMMIT_IDS_H_
-#define SYNC_ENGINE_GET_COMMIT_IDS_H_
-
-#include <vector>
-
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/syncable/directory.h"
-
-using std::vector;
-
-namespace syncer {
-
-namespace syncable {
-class BaseTransaction;
-}
-
-// Returns up to |max_entries| metahandles of entries that belong to the
-// specified |type| and are ready for commit.
-//
-// This function returns handles in "commit order". A valid commit ordering is
-// one where parents are placed before children, predecessors are placed before
-// successors, and deletes appear after creates and moves.
-//
-// The predecessor to successor rule was implemented when we tracked positions
-// within a folder that was sensitive to such things. The current positioning
-// system can handle receiving the elements within a folder out of order, so we
-// may be able to remove that functionality in the future.
-// See crbug.com/287938.
-SYNC_EXPORT_PRIVATE void GetCommitIdsForType(
- syncable::BaseTransaction* trans,
- ModelType type,
- size_t max_entries,
- std::vector<int64>* out);
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_GET_COMMIT_IDS_H_
diff --git a/chromium/sync/engine/net/DEPS b/chromium/sync/engine/net/DEPS
deleted file mode 100644
index 8fa9d48d882..00000000000
--- a/chromium/sync/engine/net/DEPS
+++ /dev/null
@@ -1,3 +0,0 @@
-include_rules = [
- "+net",
-]
diff --git a/chromium/sync/engine/net/server_connection_manager.cc b/chromium/sync/engine/net/server_connection_manager.cc
deleted file mode 100644
index 2cb781098a1..00000000000
--- a/chromium/sync/engine/net/server_connection_manager.cc
+++ /dev/null
@@ -1,396 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/net/server_connection_manager.h"
-
-#include <errno.h>
-
-#include <ostream>
-#include <string>
-#include <vector>
-
-#include "base/metrics/histogram.h"
-#include "build/build_config.h"
-#include "net/base/net_errors.h"
-#include "net/http/http_status_code.h"
-#include "sync/engine/net/url_translator.h"
-#include "sync/engine/syncer.h"
-#include "sync/internal_api/public/base/cancelation_signal.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/syncable/directory.h"
-#include "url/gurl.h"
-
-namespace syncer {
-
-using std::ostream;
-using std::string;
-using std::vector;
-
-static const char kSyncServerSyncPath[] = "/command/";
-
-HttpResponse::HttpResponse()
- : response_code(kUnsetResponseCode),
- content_length(kUnsetContentLength),
- payload_length(kUnsetPayloadLength),
- server_status(NONE) {}
-
-#define ENUM_CASE(x) case x: return #x; break
-
-const char* HttpResponse::GetServerConnectionCodeString(
- ServerConnectionCode code) {
- switch (code) {
- ENUM_CASE(NONE);
- ENUM_CASE(CONNECTION_UNAVAILABLE);
- ENUM_CASE(IO_ERROR);
- ENUM_CASE(SYNC_SERVER_ERROR);
- ENUM_CASE(SYNC_AUTH_ERROR);
- ENUM_CASE(SERVER_CONNECTION_OK);
- ENUM_CASE(RETRY);
- }
- NOTREACHED();
- return "";
-}
-
-#undef ENUM_CASE
-
-// TODO(clamy): check if all errors are in the right category.
-HttpResponse::ServerConnectionCode
-HttpResponse::ServerConnectionCodeFromNetError(int error_code) {
- switch (error_code) {
- case net::ERR_ABORTED:
- case net::ERR_SOCKET_NOT_CONNECTED:
- case net::ERR_NETWORK_CHANGED:
- case net::ERR_CONNECTION_FAILED:
- case net::ERR_NAME_NOT_RESOLVED:
- case net::ERR_INTERNET_DISCONNECTED:
- case net::ERR_NETWORK_ACCESS_DENIED:
- case net::ERR_NETWORK_IO_SUSPENDED:
- return CONNECTION_UNAVAILABLE;
- }
- return IO_ERROR;
-}
-
-ServerConnectionManager::Connection::Connection(
- ServerConnectionManager* scm) : scm_(scm) {
-}
-
-ServerConnectionManager::Connection::~Connection() {
-}
-
-bool ServerConnectionManager::Connection::ReadBufferResponse(
- string* buffer_out,
- HttpResponse* response,
- bool require_response) {
- if (net::HTTP_OK != response->response_code) {
- response->server_status = HttpResponse::SYNC_SERVER_ERROR;
- return false;
- }
-
- if (require_response && (1 > response->content_length))
- return false;
-
- const int64 bytes_read = ReadResponse(buffer_out,
- static_cast<int>(response->content_length));
- if (bytes_read != response->content_length) {
- response->server_status = HttpResponse::IO_ERROR;
- return false;
- }
- return true;
-}
-
-bool ServerConnectionManager::Connection::ReadDownloadResponse(
- HttpResponse* response,
- string* buffer_out) {
- const int64 bytes_read = ReadResponse(buffer_out,
- static_cast<int>(response->content_length));
-
- if (bytes_read != response->content_length) {
- LOG(ERROR) << "Mismatched content lengths, server claimed " <<
- response->content_length << ", but sent " << bytes_read;
- response->server_status = HttpResponse::IO_ERROR;
- return false;
- }
- return true;
-}
-
-ServerConnectionManager::ScopedConnectionHelper::ScopedConnectionHelper(
- ServerConnectionManager* manager, Connection* connection)
- : manager_(manager), connection_(connection) {}
-
-ServerConnectionManager::ScopedConnectionHelper::~ScopedConnectionHelper() {
- if (connection_)
- manager_->OnConnectionDestroyed(connection_.get());
- connection_.reset();
-}
-
-ServerConnectionManager::Connection*
-ServerConnectionManager::ScopedConnectionHelper::get() {
- return connection_.get();
-}
-
-namespace {
-
-string StripTrailingSlash(const string& s) {
- int stripped_end_pos = s.size();
- if (s.at(stripped_end_pos - 1) == '/') {
- stripped_end_pos = stripped_end_pos - 1;
- }
-
- return s.substr(0, stripped_end_pos);
-}
-
-} // namespace
-
-// TODO(chron): Use a GURL instead of string concatenation.
-string ServerConnectionManager::Connection::MakeConnectionURL(
- const string& sync_server,
- const string& path,
- bool use_ssl) const {
- string connection_url = (use_ssl ? "https://" : "http://");
- connection_url += sync_server;
- connection_url = StripTrailingSlash(connection_url);
- connection_url += path;
-
- return connection_url;
-}
-
-int ServerConnectionManager::Connection::ReadResponse(string* out_buffer,
- int length) {
- int bytes_read = buffer_.length();
- CHECK(length <= bytes_read);
- out_buffer->assign(buffer_);
- return bytes_read;
-}
-
-ScopedServerStatusWatcher::ScopedServerStatusWatcher(
- ServerConnectionManager* conn_mgr, HttpResponse* response)
- : conn_mgr_(conn_mgr),
- response_(response) {
- response->server_status = conn_mgr->server_status_;
-}
-
-ScopedServerStatusWatcher::~ScopedServerStatusWatcher() {
- conn_mgr_->SetServerStatus(response_->server_status);
-}
-
-ServerConnectionManager::ServerConnectionManager(
- const string& server,
- int port,
- bool use_ssl,
- CancelationSignal* cancelation_signal)
- : sync_server_(server),
- sync_server_port_(port),
- use_ssl_(use_ssl),
- proto_sync_path_(kSyncServerSyncPath),
- server_status_(HttpResponse::NONE),
- terminated_(false),
- active_connection_(NULL),
- cancelation_signal_(cancelation_signal),
- signal_handler_registered_(false) {
- signal_handler_registered_ = cancelation_signal_->TryRegisterHandler(this);
- if (!signal_handler_registered_) {
- // Calling a virtual function from a constructor. We can get away with it
- // here because ServerConnectionManager::OnSignalReceived() is the function
- // we want to call.
- OnSignalReceived();
- }
-}
-
-ServerConnectionManager::~ServerConnectionManager() {
- if (signal_handler_registered_) {
- cancelation_signal_->UnregisterHandler(this);
- }
-}
-
-ServerConnectionManager::Connection*
-ServerConnectionManager::MakeActiveConnection() {
- base::AutoLock lock(terminate_connection_lock_);
- DCHECK(!active_connection_);
- if (terminated_)
- return NULL;
-
- active_connection_ = MakeConnection();
- return active_connection_;
-}
-
-void ServerConnectionManager::OnConnectionDestroyed(Connection* connection) {
- DCHECK(connection);
- base::AutoLock lock(terminate_connection_lock_);
- // |active_connection_| can be NULL already if it was aborted. Also,
- // it can legitimately be a different Connection object if a new Connection
- // was created after a previous one was Aborted and destroyed.
- if (active_connection_ != connection)
- return;
-
- active_connection_ = NULL;
-}
-
-bool ServerConnectionManager::SetAuthToken(const std::string& auth_token) {
- DCHECK(thread_checker_.CalledOnValidThread());
- if (previously_invalidated_token != auth_token) {
- auth_token_.assign(auth_token);
- previously_invalidated_token = std::string();
- return true;
- }
-
- // This could happen in case like server outage/bug. E.g. token returned by
- // first request is considered invalid by sync server and because
- // of token server's caching policy, etc, same token is returned on second
- // request. Need to notify sync frontend again to request new token,
- // otherwise backend will stay in SYNC_AUTH_ERROR state while frontend thinks
- // everything is fine and takes no actions.
- SetServerStatus(HttpResponse::SYNC_AUTH_ERROR);
- return false;
-}
-
-void ServerConnectionManager::OnInvalidationCredentialsRejected() {
- InvalidateAndClearAuthToken();
- SetServerStatus(HttpResponse::SYNC_AUTH_ERROR);
-}
-
-void ServerConnectionManager::InvalidateAndClearAuthToken() {
- DCHECK(thread_checker_.CalledOnValidThread());
- // Copy over the token to previous invalid token.
- if (!auth_token_.empty()) {
- previously_invalidated_token.assign(auth_token_);
- auth_token_ = std::string();
- }
-}
-
-void ServerConnectionManager::SetServerStatus(
- HttpResponse::ServerConnectionCode server_status) {
- // SYNC_AUTH_ERROR is permanent error. Need to notify observer to take
- // action externally to resolve.
- if (server_status != HttpResponse::SYNC_AUTH_ERROR &&
- server_status_ == server_status) {
- return;
- }
- server_status_ = server_status;
- NotifyStatusChanged();
-}
-
-void ServerConnectionManager::NotifyStatusChanged() {
- DCHECK(thread_checker_.CalledOnValidThread());
- FOR_EACH_OBSERVER(ServerConnectionEventListener, listeners_,
- OnServerConnectionEvent(
- ServerConnectionEvent(server_status_)));
-}
-
-bool ServerConnectionManager::PostBufferWithCachedAuth(
- PostBufferParams* params, ScopedServerStatusWatcher* watcher) {
- DCHECK(thread_checker_.CalledOnValidThread());
- string path =
- MakeSyncServerPath(proto_sync_path(), MakeSyncQueryString(client_id_));
- return PostBufferToPath(params, path, auth_token(), watcher);
-}
-
-bool ServerConnectionManager::PostBufferToPath(PostBufferParams* params,
- const string& path, const string& auth_token,
- ScopedServerStatusWatcher* watcher) {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(watcher != NULL);
-
- // TODO(pavely): crbug.com/273096. Check for "credentials_lost" is added as
- // workaround for M29 blocker to avoid sending RPC to sync with known invalid
- // token but instead to trigger refreshing token in ProfileSyncService. Need
- // to clean it.
- if (auth_token.empty() || auth_token == "credentials_lost") {
- params->response.server_status = HttpResponse::SYNC_AUTH_ERROR;
- // Print a log to distinguish this "known failure" from others.
- LOG(WARNING) << "ServerConnectionManager forcing SYNC_AUTH_ERROR";
- return false;
- }
-
- // When our connection object falls out of scope, it clears itself from
- // active_connection_.
- ScopedConnectionHelper post(this, MakeActiveConnection());
- if (!post.get()) {
- params->response.server_status = HttpResponse::CONNECTION_UNAVAILABLE;
- return false;
- }
-
- // Note that |post| may be aborted by now, which will just cause Init to fail
- // with CONNECTION_UNAVAILABLE.
- bool ok = post.get()->Init(
- path.c_str(), auth_token, params->buffer_in, &params->response);
-
- if (params->response.server_status == HttpResponse::SYNC_AUTH_ERROR) {
- InvalidateAndClearAuthToken();
- }
-
- if (!ok || net::HTTP_OK != params->response.response_code)
- return false;
-
- if (post.get()->ReadBufferResponse(
- &params->buffer_out, &params->response, true)) {
- params->response.server_status = HttpResponse::SERVER_CONNECTION_OK;
- return true;
- }
- return false;
-}
-
-// Returns the current server parameters in server_url and port.
-void ServerConnectionManager::GetServerParameters(string* server_url,
- int* port,
- bool* use_ssl) const {
- if (server_url != NULL)
- *server_url = sync_server_;
- if (port != NULL)
- *port = sync_server_port_;
- if (use_ssl != NULL)
- *use_ssl = use_ssl_;
-}
-
-std::string ServerConnectionManager::GetServerHost() const {
- string server_url;
- int port;
- bool use_ssl;
- GetServerParameters(&server_url, &port, &use_ssl);
- // For unit tests.
- if (server_url.empty())
- return std::string();
- // We just want the hostname, so we don't need to switch on use_ssl.
- server_url = "http://" + server_url;
- GURL gurl(server_url);
- DCHECK(gurl.is_valid()) << gurl;
- return gurl.host();
-}
-
-void ServerConnectionManager::AddListener(
- ServerConnectionEventListener* listener) {
- DCHECK(thread_checker_.CalledOnValidThread());
- listeners_.AddObserver(listener);
-}
-
-void ServerConnectionManager::RemoveListener(
- ServerConnectionEventListener* listener) {
- DCHECK(thread_checker_.CalledOnValidThread());
- listeners_.RemoveObserver(listener);
-}
-
-ServerConnectionManager::Connection* ServerConnectionManager::MakeConnection()
-{
- return NULL; // For testing.
-}
-
-void ServerConnectionManager::OnSignalReceived() {
- base::AutoLock lock(terminate_connection_lock_);
- terminated_ = true;
- if (active_connection_)
- active_connection_->Abort();
-
- // Sever our ties to this connection object. Note that it still may exist,
- // since we don't own it, but it has been neutered.
- active_connection_ = NULL;
-}
-
-std::ostream& operator << (std::ostream& s, const struct HttpResponse& hr) {
- s << " Response Code (bogus on error): " << hr.response_code;
- s << " Content-Length (bogus on error): " << hr.content_length;
- s << " Server Status: "
- << HttpResponse::GetServerConnectionCodeString(hr.server_status);
- return s;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/net/server_connection_manager.h b/chromium/sync/engine/net/server_connection_manager.h
deleted file mode 100644
index e6a48f0e659..00000000000
--- a/chromium/sync/engine/net/server_connection_manager.h
+++ /dev/null
@@ -1,348 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_NET_SERVER_CONNECTION_MANAGER_H_
-#define SYNC_ENGINE_NET_SERVER_CONNECTION_MANAGER_H_
-
-#include <iosfwd>
-#include <string>
-
-#include "base/atomicops.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/observer_list.h"
-#include "base/strings/string_util.h"
-#include "base/synchronization/lock.h"
-#include "base/threading/non_thread_safe.h"
-#include "base/threading/thread_checker.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/cancelation_observer.h"
-#include "sync/syncable/syncable_id.h"
-
-namespace sync_pb {
-class ClientToServerMessage;
-}
-
-namespace syncer {
-
-class CancelationSignal;
-
-namespace syncable {
-class Directory;
-}
-
-static const int32 kUnsetResponseCode = -1;
-static const int32 kUnsetContentLength = -1;
-static const int32 kUnsetPayloadLength = -1;
-
-// HttpResponse gathers the relevant output properties of an HTTP request.
-// Depending on the value of the server_status code, response_code, and
-// content_length may not be valid.
-struct SYNC_EXPORT_PRIVATE HttpResponse {
- enum ServerConnectionCode {
- // For uninitialized state.
- NONE,
-
- // CONNECTION_UNAVAILABLE is returned when InternetConnect() fails.
- CONNECTION_UNAVAILABLE,
-
- // IO_ERROR is returned when reading/writing to a buffer has failed.
- IO_ERROR,
-
- // SYNC_SERVER_ERROR is returned when the HTTP status code indicates that
- // a non-auth error has occured.
- SYNC_SERVER_ERROR,
-
- // SYNC_AUTH_ERROR is returned when the HTTP status code indicates that an
- // auth error has occured (i.e. a 401 or sync-specific AUTH_INVALID
- // response)
- // TODO(tim): Caring about AUTH_INVALID is a layering violation. But
- // this app-specific logic is being added as a stable branch hotfix so
- // minimal changes prevail for the moment. Fix this! Bug 35060.
- SYNC_AUTH_ERROR,
-
- // SERVER_CONNECTION_OK is returned when request was handled correctly.
- SERVER_CONNECTION_OK,
-
- // RETRY is returned when a Commit request fails with a RETRY response from
- // the server.
- //
- // TODO(idana): the server no longer returns RETRY so we should remove this
- // value.
- RETRY,
- };
-
- // The HTTP Status code.
- int64 response_code;
-
- // The value of the Content-length header.
- int64 content_length;
-
- // The size of a download request's payload.
- int64 payload_length;
-
- // Identifies the type of failure, if any.
- ServerConnectionCode server_status;
-
- HttpResponse();
-
- static const char* GetServerConnectionCodeString(
- ServerConnectionCode code);
-
- static ServerConnectionCode ServerConnectionCodeFromNetError(
- int error_code);
-};
-
-struct ServerConnectionEvent {
- HttpResponse::ServerConnectionCode connection_code;
- explicit ServerConnectionEvent(HttpResponse::ServerConnectionCode code) :
- connection_code(code) {}
-};
-
-class SYNC_EXPORT_PRIVATE ServerConnectionEventListener {
- public:
- virtual void OnServerConnectionEvent(const ServerConnectionEvent& event) = 0;
- protected:
- virtual ~ServerConnectionEventListener() {}
-};
-
-class ServerConnectionManager;
-// A helper class that automatically notifies when the status changes.
-// TODO(tim): This class shouldn't be exposed outside of the implementation,
-// bug 35060.
-class SYNC_EXPORT_PRIVATE ScopedServerStatusWatcher
- : public base::NonThreadSafe {
- public:
- ScopedServerStatusWatcher(ServerConnectionManager* conn_mgr,
- HttpResponse* response);
- virtual ~ScopedServerStatusWatcher();
- private:
- ServerConnectionManager* const conn_mgr_;
- HttpResponse* const response_;
- DISALLOW_COPY_AND_ASSIGN(ScopedServerStatusWatcher);
-};
-
-// Use this class to interact with the sync server.
-// The ServerConnectionManager currently supports POSTing protocol buffers.
-//
-class SYNC_EXPORT_PRIVATE ServerConnectionManager : public CancelationObserver {
- public:
- // buffer_in - will be POSTed
- // buffer_out - string will be overwritten with response
- struct PostBufferParams {
- std::string buffer_in;
- std::string buffer_out;
- HttpResponse response;
- };
-
- // Abstract class providing network-layer functionality to the
- // ServerConnectionManager. Subclasses implement this using an HTTP stack of
- // their choice.
- class Connection {
- public:
- explicit Connection(ServerConnectionManager* scm);
- virtual ~Connection();
-
- // Called to initialize and perform an HTTP POST.
- virtual bool Init(const char* path,
- const std::string& auth_token,
- const std::string& payload,
- HttpResponse* response) = 0;
-
- // Immediately abandons a pending HTTP POST request and unblocks caller
- // in Init.
- virtual void Abort() = 0;
-
- bool ReadBufferResponse(std::string* buffer_out, HttpResponse* response,
- bool require_response);
- bool ReadDownloadResponse(HttpResponse* response, std::string* buffer_out);
-
- protected:
- std::string MakeConnectionURL(const std::string& sync_server,
- const std::string& path,
- bool use_ssl) const;
-
- void GetServerParams(std::string* server,
- int* server_port,
- bool* use_ssl) const {
- server->assign(scm_->sync_server_);
- *server_port = scm_->sync_server_port_;
- *use_ssl = scm_->use_ssl_;
- }
-
- std::string buffer_;
- ServerConnectionManager* scm_;
-
- private:
- int ReadResponse(void* buffer, int length);
- int ReadResponse(std::string* buffer, int length);
- };
-
- ServerConnectionManager(const std::string& server,
- int port,
- bool use_ssl,
- CancelationSignal* cancelation_signal);
-
- virtual ~ServerConnectionManager();
-
- // POSTS buffer_in and reads a response into buffer_out. Uses our currently
- // set auth token in our headers.
- //
- // Returns true if executed successfully.
- virtual bool PostBufferWithCachedAuth(PostBufferParams* params,
- ScopedServerStatusWatcher* watcher);
-
- void AddListener(ServerConnectionEventListener* listener);
- void RemoveListener(ServerConnectionEventListener* listener);
-
- inline HttpResponse::ServerConnectionCode server_status() const {
- DCHECK(thread_checker_.CalledOnValidThread());
- return server_status_;
- }
-
- const std::string client_id() const { return client_id_; }
-
- // Returns the current server parameters in server_url, port and use_ssl.
- void GetServerParameters(std::string* server_url,
- int* port,
- bool* use_ssl) const;
-
- std::string GetServerHost() const;
-
- // Factory method to create an Connection object we can use for
- // communication with the server.
- virtual Connection* MakeConnection();
-
- // Closes any active network connections to the sync server.
- // We expect this to get called on a different thread than the valid
- // ThreadChecker thread, as we want to kill any pending http traffic without
- // having to wait for the request to complete.
- virtual void OnSignalReceived() OVERRIDE FINAL;
-
- void set_client_id(const std::string& client_id) {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(client_id_.empty());
- client_id_.assign(client_id);
- }
-
- // Sets a new auth token and time.
- bool SetAuthToken(const std::string& auth_token);
-
- // Our out-of-band invalidations channel can encounter auth errors,
- // and when it does so it tells us via this method to prevent making more
- // requests with known-bad tokens. This will put the
- // ServerConnectionManager in an auth error state as if it received an
- // HTTP 401 from sync servers.
- void OnInvalidationCredentialsRejected();
-
- bool HasInvalidAuthToken() {
- return auth_token_.empty();
- }
-
- const std::string auth_token() const {
- DCHECK(thread_checker_.CalledOnValidThread());
- return auth_token_;
- }
-
- protected:
- inline std::string proto_sync_path() const {
- return proto_sync_path_;
- }
-
- // Updates server_status_ and notifies listeners if server_status_ changed
- void SetServerStatus(HttpResponse::ServerConnectionCode server_status);
-
- // NOTE: Tests rely on this protected function being virtual.
- //
- // Internal PostBuffer base function.
- virtual bool PostBufferToPath(PostBufferParams*,
- const std::string& path,
- const std::string& auth_token,
- ScopedServerStatusWatcher* watcher);
-
- // An internal helper to clear our auth_token_ and cache the old version
- // in |previously_invalidated_token_| to shelter us from retrying with a
- // known bad token.
- void InvalidateAndClearAuthToken();
-
- // Helper to check terminated flags and build a Connection object, installing
- // it as the |active_connection_|. If this ServerConnectionManager has been
- // terminated, this will return NULL.
- Connection* MakeActiveConnection();
-
- // Called by Connection objects as they are destroyed to allow the
- // ServerConnectionManager to cleanup active connections.
- void OnConnectionDestroyed(Connection* connection);
-
- // The sync_server_ is the server that requests will be made to.
- std::string sync_server_;
-
- // The sync_server_port_ is the port that HTTP requests will be made on.
- int sync_server_port_;
-
- // The unique id of the user's client.
- std::string client_id_;
-
- // Indicates whether or not requests should be made using HTTPS.
- bool use_ssl_;
-
- // The paths we post to.
- std::string proto_sync_path_;
-
- // The auth token to use in authenticated requests.
- std::string auth_token_;
-
- // The previous auth token that is invalid now.
- std::string previously_invalidated_token;
-
- ObserverList<ServerConnectionEventListener> listeners_;
-
- HttpResponse::ServerConnectionCode server_status_;
-
- base::ThreadChecker thread_checker_;
-
- // Protects all variables below to allow bailing out of active connections.
- base::Lock terminate_connection_lock_;
-
- // If true, we've been told to terminate IO and expect to be destroyed
- // shortly. No future network requests will be made.
- bool terminated_;
-
- // A non-owning pointer to any active http connection, so that we can abort
- // it if necessary.
- Connection* active_connection_;
-
- private:
- friend class Connection;
- friend class ScopedServerStatusWatcher;
-
- // A class to help deal with cleaning up active Connection objects when (for
- // ex) multiple early-exits are present in some scope. ScopedConnectionHelper
- // informs the ServerConnectionManager before the Connection object it takes
- // ownership of is destroyed.
- class ScopedConnectionHelper {
- public:
- // |manager| must outlive this. Takes ownership of |connection|.
- ScopedConnectionHelper(ServerConnectionManager* manager,
- Connection* connection);
- ~ScopedConnectionHelper();
- Connection* get();
- private:
- ServerConnectionManager* manager_;
- scoped_ptr<Connection> connection_;
- DISALLOW_COPY_AND_ASSIGN(ScopedConnectionHelper);
- };
-
- void NotifyStatusChanged();
-
- CancelationSignal* const cancelation_signal_;
- bool signal_handler_registered_;
-
- DISALLOW_COPY_AND_ASSIGN(ServerConnectionManager);
-};
-
-std::ostream& operator<<(std::ostream& s, const struct HttpResponse& hr);
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_NET_SERVER_CONNECTION_MANAGER_H_
diff --git a/chromium/sync/engine/net/url_translator.cc b/chromium/sync/engine/net/url_translator.cc
deleted file mode 100644
index e9cae5a183b..00000000000
--- a/chromium/sync/engine/net/url_translator.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Contains the definition of a few helper functions used for generating sync
-// URLs.
-
-#include "sync/engine/net/url_translator.h"
-
-#include "base/basictypes.h"
-#include "base/port.h"
-#include "net/base/escape.h"
-
-using std::string;
-
-namespace syncer {
-
-namespace {
-// Parameters that the server understands. (here, a-Z)
-const char kParameterClient[] = "client";
-const char kParameterClientID[] = "client_id";
-
-#if defined(GOOGLE_CHROME_BUILD)
-const char kClientName[] = "Google Chrome";
-#else
-const char kClientName[] = "Chromium";
-#endif // defined(GOOGLE_CHROME_BUILD)
-}
-
-// Convenience wrappers around CgiEscapePath().
-string CgiEscapeString(const char* src) {
- return CgiEscapeString(string(src));
-}
-
-string CgiEscapeString(const string& src) {
- return net::EscapeUrlEncodedData(src, true);
-}
-
-// This method appends the query string to the sync server path.
-string MakeSyncServerPath(const string& path, const string& query_string) {
- string result = path;
- result.append("?");
- result.append(query_string);
- return result;
-}
-
-string MakeSyncQueryString(const string& client_id) {
- string query;
- query += kParameterClient;
- query += "=" + CgiEscapeString(kClientName);
- query += "&";
- query += kParameterClientID;
- query += "=" + CgiEscapeString(client_id);
- return query;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/net/url_translator.h b/chromium/sync/engine/net/url_translator.h
deleted file mode 100644
index 3f8128f5490..00000000000
--- a/chromium/sync/engine/net/url_translator.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Contains the declaration of a few helper functions used for generating sync
-// URLs.
-
-#ifndef SYNC_ENGINE_NET_URL_TRANSLATOR_H_
-#define SYNC_ENGINE_NET_URL_TRANSLATOR_H_
-
-#include <string>
-
-namespace syncer {
-
-// Convenience wrappers around CgiEscapePath(), used by gaia_auth.
-std::string CgiEscapeString(const char* src);
-std::string CgiEscapeString(const std::string& src);
-
-// This method appends the query string to the sync server path.
-std::string MakeSyncServerPath(const std::string& path,
- const std::string& query_string);
-
-std::string MakeSyncQueryString(const std::string& client_id);
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_NET_URL_TRANSLATOR_H_
diff --git a/chromium/sync/engine/nudge_source.cc b/chromium/sync/engine/nudge_source.cc
deleted file mode 100644
index 62a24091e59..00000000000
--- a/chromium/sync/engine/nudge_source.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/nudge_source.h"
-
-#include "base/logging.h"
-
-namespace syncer {
-
-#define ENUM_CASE(x) case x: return #x; break
-
-const char* GetNudgeSourceString(NudgeSource nudge_source) {
- switch (nudge_source) {
- ENUM_CASE(NUDGE_SOURCE_UNKNOWN);
- ENUM_CASE(NUDGE_SOURCE_NOTIFICATION);
- ENUM_CASE(NUDGE_SOURCE_LOCAL);
- ENUM_CASE(NUDGE_SOURCE_LOCAL_REFRESH);
- };
- NOTREACHED();
- return "";
-}
-
-#undef ENUM_CASE
-
-} // namespace syncer
diff --git a/chromium/sync/engine/nudge_source.h b/chromium/sync/engine/nudge_source.h
deleted file mode 100644
index d60b51558de..00000000000
--- a/chromium/sync/engine/nudge_source.h
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_NUDGE_SOURCE_H_
-#define SYNC_ENGINE_NUDGE_SOURCE_H_
-
-namespace syncer {
-
-enum NudgeSource {
- NUDGE_SOURCE_UNKNOWN = 0,
- // We received an invalidation message and are nudging to check for updates.
- NUDGE_SOURCE_NOTIFICATION,
- // A local change occurred (e.g. bookmark moved).
- NUDGE_SOURCE_LOCAL,
- // A local event is triggering an optimistic datatype refresh.
- NUDGE_SOURCE_LOCAL_REFRESH,
-};
-
-const char* GetNudgeSourceString(NudgeSource nudge_source);
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_NUDGE_SOURCE_H_
diff --git a/chromium/sync/engine/process_updates_util.cc b/chromium/sync/engine/process_updates_util.cc
deleted file mode 100644
index 49e40b366d0..00000000000
--- a/chromium/sync/engine/process_updates_util.cc
+++ /dev/null
@@ -1,329 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/process_updates_util.h"
-
-#include "base/location.h"
-#include "sync/engine/syncer_proto_util.h"
-#include "sync/engine/syncer_util.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/model_neutral_mutable_entry.h"
-#include "sync/syncable/syncable_model_neutral_write_transaction.h"
-#include "sync/syncable/syncable_proto_util.h"
-#include "sync/syncable/syncable_util.h"
-#include "sync/util/cryptographer.h"
-
-namespace syncer {
-
-using sessions::StatusController;
-
-using syncable::GET_BY_ID;
-
-namespace {
-
-// This function attempts to determine whether or not this update is genuinely
-// new, or if it is a reflection of one of our own commits.
-//
-// There is a known inaccuracy in its implementation. If this update ends up
-// being applied to a local item with a different ID, we will count the change
-// as being a non-reflection update. Fortunately, the server usually updates
-// our IDs correctly in its commit response, so a new ID during GetUpdate should
-// be rare.
-//
-// The only secnarios I can think of where this might happen are:
-// - We commit a new item to the server, but we don't persist the
-// server-returned new ID to the database before we shut down. On the GetUpdate
-// following the next restart, we will receive an update from the server that
-// updates its local ID.
-// - When two attempts to create an item with identical UNIQUE_CLIENT_TAG values
-// collide at the server. I have seen this in testing. When it happens, the
-// test server will send one of the clients a response to upate its local ID so
-// that both clients will refer to the item using the same ID going forward. In
-// this case, we're right to assume that the update is not a reflection.
-//
-// For more information, see FindLocalIdToUpdate().
-bool UpdateContainsNewVersion(syncable::BaseTransaction *trans,
- const sync_pb::SyncEntity &update) {
- int64 existing_version = -1; // The server always sends positive versions.
- syncable::Entry existing_entry(trans, GET_BY_ID,
- SyncableIdFromProto(update.id_string()));
- if (existing_entry.good())
- existing_version = existing_entry.GetBaseVersion();
-
- if (!existing_entry.good() && update.deleted()) {
- // There are several possible explanations for this. The most common cases
- // will be first time sync and the redelivery of deletions we've already
- // synced, accepted, and purged from our database. In either case, the
- // update is useless to us. Let's count them all as "not new", even though
- // that may not always be entirely accurate.
- return false;
- }
-
- if (existing_entry.good() &&
- !existing_entry.GetUniqueClientTag().empty() &&
- existing_entry.GetIsDel() &&
- update.deleted()) {
- // Unique client tags will have their version set to zero when they're
- // deleted. The usual version comparison logic won't be able to detect
- // reflections of these items. Instead, we assume any received tombstones
- // are reflections. That should be correct most of the time.
- return false;
- }
-
- return existing_version < update.version();
-}
-
-} // namespace
-
-void PartitionUpdatesByType(
- const sync_pb::GetUpdatesResponse& updates,
- ModelTypeSet requested_types,
- TypeSyncEntityMap* updates_by_type) {
- int update_count = updates.entries().size();
- for (ModelTypeSet::Iterator it = requested_types.First();
- it.Good(); it.Inc()) {
- updates_by_type->insert(std::make_pair(it.Get(), SyncEntityList()));
- }
- for (int i = 0; i < update_count; ++i) {
- const sync_pb::SyncEntity& update = updates.entries(i);
- ModelType type = GetModelType(update);
- if (!IsRealDataType(type)) {
- NOTREACHED() << "Received update with invalid type.";
- continue;
- }
-
- TypeSyncEntityMap::iterator it = updates_by_type->find(type);
- if (it == updates_by_type->end()) {
- DLOG(WARNING) << "Skipping update for unexpected type "
- << ModelTypeToString(type);
- continue;
- }
-
- it->second.push_back(&update);
- }
-}
-
-void ProcessDownloadedUpdates(
- syncable::Directory* dir,
- syncable::ModelNeutralWriteTransaction* trans,
- ModelType type,
- const SyncEntityList& applicable_updates,
- sessions::StatusController* status) {
- for (SyncEntityList::const_iterator update_it = applicable_updates.begin();
- update_it != applicable_updates.end(); ++update_it) {
- DCHECK_EQ(type, GetModelType(**update_it));
- if (!UpdateContainsNewVersion(trans, **update_it))
- status->increment_num_reflected_updates_downloaded_by(1);
- if ((*update_it)->deleted())
- status->increment_num_tombstone_updates_downloaded_by(1);
- VerifyResult verify_result = VerifyUpdate(trans, **update_it, type);
- if (verify_result != VERIFY_SUCCESS && verify_result != VERIFY_UNDELETE)
- continue;
- ProcessUpdate(**update_it, dir->GetCryptographer(trans), trans);
- }
-}
-
-namespace {
-
-// In the event that IDs match, but tags differ AttemptReuniteClient tag
-// will have refused to unify the update.
-// We should not attempt to apply it at all since it violates consistency
-// rules.
-VerifyResult VerifyTagConsistency(
- const sync_pb::SyncEntity& entry,
- const syncable::ModelNeutralMutableEntry& same_id) {
- if (entry.has_client_defined_unique_tag() &&
- entry.client_defined_unique_tag() !=
- same_id.GetUniqueClientTag()) {
- return VERIFY_FAIL;
- }
- return VERIFY_UNDECIDED;
-}
-
-} // namespace
-
-VerifyResult VerifyUpdate(
- syncable::ModelNeutralWriteTransaction* trans,
- const sync_pb::SyncEntity& entry,
- ModelType requested_type) {
- syncable::Id id = SyncableIdFromProto(entry.id_string());
- VerifyResult result = VERIFY_FAIL;
-
- const bool deleted = entry.has_deleted() && entry.deleted();
- const bool is_directory = IsFolder(entry);
- const ModelType model_type = GetModelType(entry);
-
- if (!id.ServerKnows()) {
- LOG(ERROR) << "Illegal negative id in received updates";
- return result;
- }
- {
- const std::string name = SyncerProtoUtil::NameFromSyncEntity(entry);
- if (name.empty() && !deleted) {
- LOG(ERROR) << "Zero length name in non-deleted update";
- return result;
- }
- }
-
- syncable::ModelNeutralMutableEntry same_id(trans, GET_BY_ID, id);
- result = VerifyNewEntry(entry, &same_id, deleted);
-
- ModelType placement_type = !deleted ? GetModelType(entry)
- : same_id.good() ? same_id.GetModelType() : UNSPECIFIED;
-
- if (VERIFY_UNDECIDED == result) {
- result = VerifyTagConsistency(entry, same_id);
- }
-
- if (VERIFY_UNDECIDED == result) {
- if (deleted) {
- // For deletes the server could send tombostones for items that
- // the client did not request. If so ignore those items.
- if (IsRealDataType(placement_type) && requested_type != placement_type) {
- result = VERIFY_SKIP;
- } else {
- result = VERIFY_SUCCESS;
- }
- }
- }
-
- // If we have an existing entry, we check here for updates that break
- // consistency rules.
- if (VERIFY_UNDECIDED == result) {
- result = VerifyUpdateConsistency(trans, entry, deleted,
- is_directory, model_type, &same_id);
- }
-
- if (VERIFY_UNDECIDED == result)
- result = VERIFY_SUCCESS; // No news is good news.
-
- return result; // This might be VERIFY_SUCCESS as well
-}
-
-namespace {
-// Returns true if the entry is still ok to process.
-bool ReverifyEntry(syncable::ModelNeutralWriteTransaction* trans,
- const sync_pb::SyncEntity& entry,
- syncable::ModelNeutralMutableEntry* same_id) {
-
- const bool deleted = entry.has_deleted() && entry.deleted();
- const bool is_directory = IsFolder(entry);
- const ModelType model_type = GetModelType(entry);
-
- return VERIFY_SUCCESS == VerifyUpdateConsistency(trans,
- entry,
- deleted,
- is_directory,
- model_type,
- same_id);
-}
-} // namespace
-
-// Process a single update. Will avoid touching global state.
-void ProcessUpdate(
- const sync_pb::SyncEntity& update,
- const Cryptographer* cryptographer,
- syncable::ModelNeutralWriteTransaction* const trans) {
- const syncable::Id& server_id = SyncableIdFromProto(update.id_string());
- const std::string name = SyncerProtoUtil::NameFromSyncEntity(update);
-
- // Look to see if there's a local item that should recieve this update,
- // maybe due to a duplicate client tag or a lost commit response.
- syncable::Id local_id = FindLocalIdToUpdate(trans, update);
-
- // FindLocalEntryToUpdate has veto power.
- if (local_id.IsNull()) {
- return; // The entry has become irrelevant.
- }
-
- CreateNewEntry(trans, local_id);
-
- // We take a two step approach. First we store the entries data in the
- // server fields of a local entry and then move the data to the local fields
- syncable::ModelNeutralMutableEntry target_entry(trans, GET_BY_ID, local_id);
-
- // We need to run the Verify checks again; the world could have changed
- // since we last verified.
- if (!ReverifyEntry(trans, update, &target_entry)) {
- return; // The entry has become irrelevant.
- }
-
- // If we're repurposing an existing local entry with a new server ID,
- // change the ID now, after we're sure that the update can succeed.
- if (local_id != server_id) {
- DCHECK(!update.deleted());
- ChangeEntryIDAndUpdateChildren(trans, &target_entry, server_id);
- // When IDs change, versions become irrelevant. Forcing BASE_VERSION
- // to zero would ensure that this update gets applied, but would indicate
- // creation or undeletion if it were committed that way. Instead, prefer
- // forcing BASE_VERSION to entry.version() while also forcing
- // IS_UNAPPLIED_UPDATE to true. If the item is UNSYNCED, it's committable
- // from the new state; it may commit before the conflict resolver gets
- // a crack at it.
- if (target_entry.GetIsUnsynced() || target_entry.GetBaseVersion() > 0) {
- // If either of these conditions are met, then we can expect valid client
- // fields for this entry. When BASE_VERSION is positive, consistency is
- // enforced on the client fields at update-application time. Otherwise,
- // we leave the BASE_VERSION field alone; it'll get updated the first time
- // we successfully apply this update.
- target_entry.PutBaseVersion(update.version());
- }
- // Force application of this update, no matter what.
- target_entry.PutIsUnappliedUpdate(true);
- }
-
- // If this is a newly received undecryptable update, and the only thing that
- // has changed are the specifics, store the original decryptable specifics,
- // (on which any current or future local changes are based) before we
- // overwrite SERVER_SPECIFICS.
- // MTIME, CTIME, and NON_UNIQUE_NAME are not enforced.
-
- bool position_matches = false;
- if (target_entry.ShouldMaintainPosition() && !update.deleted()) {
- std::string update_tag = GetUniqueBookmarkTagFromUpdate(update);
- if (UniquePosition::IsValidSuffix(update_tag)) {
- position_matches = GetUpdatePosition(update, update_tag).Equals(
- target_entry.GetServerUniquePosition());
- } else {
- NOTREACHED();
- }
- } else {
- // If this item doesn't care about positions, then set this flag to true.
- position_matches = true;
- }
-
- if (!update.deleted() && !target_entry.GetServerIsDel() &&
- (SyncableIdFromProto(update.parent_id_string()) ==
- target_entry.GetServerParentId()) &&
- position_matches &&
- update.has_specifics() && update.specifics().has_encrypted() &&
- !cryptographer->CanDecrypt(update.specifics().encrypted())) {
- sync_pb::EntitySpecifics prev_specifics =
- target_entry.GetServerSpecifics();
- // We only store the old specifics if they were decryptable and applied and
- // there is no BASE_SERVER_SPECIFICS already. Else do nothing.
- if (!target_entry.GetIsUnappliedUpdate() &&
- !IsRealDataType(GetModelTypeFromSpecifics(
- target_entry.GetBaseServerSpecifics())) &&
- (!prev_specifics.has_encrypted() ||
- cryptographer->CanDecrypt(prev_specifics.encrypted()))) {
- DVLOG(2) << "Storing previous server specifcs: "
- << prev_specifics.SerializeAsString();
- target_entry.PutBaseServerSpecifics(prev_specifics);
- }
- } else if (IsRealDataType(GetModelTypeFromSpecifics(
- target_entry.GetBaseServerSpecifics()))) {
- // We have a BASE_SERVER_SPECIFICS, but a subsequent non-specifics-only
- // change arrived. As a result, we can't use the specifics alone to detect
- // changes, so we clear BASE_SERVER_SPECIFICS.
- target_entry.PutBaseServerSpecifics(
- sync_pb::EntitySpecifics());
- }
-
- UpdateServerFieldsFromUpdate(&target_entry, update, name);
-
- return;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/process_updates_util.h b/chromium/sync/engine/process_updates_util.h
deleted file mode 100644
index 6e8bc71b859..00000000000
--- a/chromium/sync/engine/process_updates_util.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_PROCESS_UPDATES_UTIL_H_
-#define SYNC_ENGINE_PROCESS_UPDATES_UTIL_H_
-
-#include <map>
-#include <vector>
-
-#include "base/compiler_specific.h"
-#include "sync/base/sync_export.h"
-#include "sync/engine/syncer_types.h"
-#include "sync/internal_api/public/base/model_type.h"
-
-namespace sync_pb {
-class GetUpdatesResponse;
-class SyncEntity;
-}
-
-namespace syncer {
-
-namespace sessions {
-class StatusController;
-}
-
-namespace syncable {
-class ModelNeutralWriteTransaction;
-class Directory;
-}
-
-class Cryptographer;
-
-// TODO(rlarocque): Move these definitions somewhere else?
-typedef std::vector<const sync_pb::SyncEntity*> SyncEntityList;
-typedef std::map<ModelType, SyncEntityList> TypeSyncEntityMap;
-
-// Given a GetUpdates response, iterates over all the returned items and
-// divides them according to their type. Outputs a map from model types to
-// received SyncEntities. The output map will have entries (possibly empty)
-// for all types in |requested_types|.
-void PartitionUpdatesByType(
- const sync_pb::GetUpdatesResponse& updates,
- ModelTypeSet requested_types,
- TypeSyncEntityMap* updates_by_type);
-
-// Processes all the updates associated with a single ModelType.
-void ProcessDownloadedUpdates(
- syncable::Directory* dir,
- syncable::ModelNeutralWriteTransaction* trans,
- ModelType type,
- const SyncEntityList& applicable_updates,
- sessions::StatusController* status);
-
-// Checks whether or not an update is fit for processing.
-//
-// The answer may be "no" if the update appears invalid, or it's not releveant
-// (ie. a delete for an item we've never heard of), or other reasons.
-VerifyResult VerifyUpdate(
- syncable::ModelNeutralWriteTransaction* trans,
- const sync_pb::SyncEntity& entry,
- ModelType requested_type);
-
-// If the update passes a series of checks, this function will copy
-// the SyncEntity's data into the SERVER side of the syncable::Directory.
-void ProcessUpdate(
- const sync_pb::SyncEntity& proto_update,
- const Cryptographer* cryptographer,
- syncable::ModelNeutralWriteTransaction* const trans);
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_PROCESS_UPDATES_UTIL_H_
diff --git a/chromium/sync/engine/sync_directory_commit_contribution.cc b/chromium/sync/engine/sync_directory_commit_contribution.cc
deleted file mode 100644
index f43131e300f..00000000000
--- a/chromium/sync/engine/sync_directory_commit_contribution.cc
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/sync_directory_commit_contribution.h"
-
-#include "sync/engine/commit_util.h"
-#include "sync/engine/get_commit_ids.h"
-#include "sync/engine/syncer_util.h"
-#include "sync/syncable/model_neutral_mutable_entry.h"
-#include "sync/syncable/syncable_model_neutral_write_transaction.h"
-
-namespace syncer {
-
-using syncable::GET_BY_HANDLE;
-using syncable::SYNCER;
-
-SyncDirectoryCommitContribution::~SyncDirectoryCommitContribution() {
- DCHECK(!syncing_bits_set_);
-}
-
-// static.
-SyncDirectoryCommitContribution* SyncDirectoryCommitContribution::Build(
- syncable::Directory* dir,
- ModelType type,
- size_t max_entries) {
- std::vector<int64> metahandles;
-
- syncable::ModelNeutralWriteTransaction trans(FROM_HERE, SYNCER, dir);
- GetCommitIdsForType(&trans, type, max_entries, &metahandles);
-
- if (metahandles.empty())
- return NULL;
-
- google::protobuf::RepeatedPtrField<sync_pb::SyncEntity> entities;
- for (std::vector<int64>::iterator it = metahandles.begin();
- it != metahandles.end(); ++it) {
- sync_pb::SyncEntity* entity = entities.Add();
- syncable::ModelNeutralMutableEntry entry(&trans, GET_BY_HANDLE, *it);
- commit_util::BuildCommitItem(entry, entity);
- entry.PutSyncing(true);
- }
-
- return new SyncDirectoryCommitContribution(metahandles, entities, dir);
-}
-
-void SyncDirectoryCommitContribution::AddToCommitMessage(
- sync_pb::ClientToServerMessage* msg) {
- DCHECK(syncing_bits_set_);
- sync_pb::CommitMessage* commit_message = msg->mutable_commit();
- entries_start_index_ = commit_message->entries_size();
- std::copy(entities_.begin(),
- entities_.end(),
- RepeatedPtrFieldBackInserter(commit_message->mutable_entries()));
-}
-
-SyncerError SyncDirectoryCommitContribution::ProcessCommitResponse(
- const sync_pb::ClientToServerResponse& response,
- sessions::StatusController* status) {
- DCHECK(syncing_bits_set_);
- const sync_pb::CommitResponse& commit_response = response.commit();
-
- int transient_error_commits = 0;
- int conflicting_commits = 0;
- int error_commits = 0;
- int successes = 0;
-
- std::set<syncable::Id> deleted_folders;
- {
- syncable::ModelNeutralWriteTransaction trans(FROM_HERE, SYNCER, dir_);
- for (size_t i = 0; i < metahandles_.size(); ++i) {
- sync_pb::CommitResponse::ResponseType response_type =
- commit_util::ProcessSingleCommitResponse(
- &trans,
- commit_response.entryresponse(entries_start_index_ + i),
- entities_.Get(i),
- metahandles_[i],
- &deleted_folders);
- switch (response_type) {
- case sync_pb::CommitResponse::INVALID_MESSAGE:
- ++error_commits;
- break;
- case sync_pb::CommitResponse::CONFLICT:
- ++conflicting_commits;
- status->increment_num_server_conflicts();
- break;
- case sync_pb::CommitResponse::SUCCESS:
- ++successes;
- {
- syncable::Entry e(&trans, GET_BY_HANDLE, metahandles_[i]);
- if (e.GetModelType() == BOOKMARKS)
- status->increment_num_successful_bookmark_commits();
- }
- status->increment_num_successful_commits();
- break;
- case sync_pb::CommitResponse::OVER_QUOTA:
- // We handle over quota like a retry, which is same as transient.
- case sync_pb::CommitResponse::RETRY:
- case sync_pb::CommitResponse::TRANSIENT_ERROR:
- ++transient_error_commits;
- break;
- default:
- LOG(FATAL) << "Bad return from ProcessSingleCommitResponse";
- }
- }
- MarkDeletedChildrenSynced(dir_, &trans, &deleted_folders);
- }
-
- int commit_count = static_cast<int>(metahandles_.size());
- if (commit_count == successes) {
- return SYNCER_OK;
- } else if (error_commits > 0) {
- return SERVER_RETURN_UNKNOWN_ERROR;
- } else if (transient_error_commits > 0) {
- return SERVER_RETURN_TRANSIENT_ERROR;
- } else if (conflicting_commits > 0) {
- // This means that the server already has an item with this version, but
- // we haven't seen that update yet.
- //
- // A well-behaved client should respond to this by proceeding to the
- // download updates phase, fetching the conflicting items, then attempting
- // to resolve the conflict. That's not what this client does.
- //
- // We don't currently have any code to support that exceptional control
- // flow. Instead, we abort the current sync cycle and start a new one. The
- // end result is the same.
- return SERVER_RETURN_CONFLICT;
- } else {
- LOG(FATAL) << "Inconsistent counts when processing commit response";
- return SYNCER_OK;
- }
-}
-
-void SyncDirectoryCommitContribution::CleanUp() {
- DCHECK(syncing_bits_set_);
- UnsetSyncingBits();
-}
-
-size_t SyncDirectoryCommitContribution::GetNumEntries() const {
- return metahandles_.size();
-}
-
-SyncDirectoryCommitContribution::SyncDirectoryCommitContribution(
- const std::vector<int64>& metahandles,
- const google::protobuf::RepeatedPtrField<sync_pb::SyncEntity>& entities,
- syncable::Directory* dir)
- : dir_(dir),
- metahandles_(metahandles),
- entities_(entities),
- entries_start_index_(0xDEADBEEF),
- syncing_bits_set_(true) {
-}
-
-void SyncDirectoryCommitContribution::UnsetSyncingBits() {
- syncable::ModelNeutralWriteTransaction trans(FROM_HERE, SYNCER, dir_);
- for (std::vector<int64>::const_iterator it = metahandles_.begin();
- it != metahandles_.end(); ++it) {
- syncable::ModelNeutralMutableEntry entry(&trans, GET_BY_HANDLE, *it);
- entry.PutSyncing(false);
- }
- syncing_bits_set_ = false;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/sync_directory_commit_contribution.h b/chromium/sync/engine/sync_directory_commit_contribution.h
deleted file mode 100644
index 89340566755..00000000000
--- a/chromium/sync/engine/sync_directory_commit_contribution.h
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_SYNC_DIRECTORY_COMMIT_CONTRIBUTION_H_
-#define SYNC_ENGINE_SYNC_DIRECTORY_COMMIT_CONTRIBUTION_H_
-
-#include <vector>
-
-#include "base/gtest_prod_util.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/util/syncer_error.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/sessions/status_controller.h"
-
-namespace syncer {
-
-namespace sessions {
-class StatusController;
-} // namespace sessions
-
-namespace syncable {
-class Directory;
-} // namespace syncable
-
-// This class represents a set of items belonging to a particular data type that
-// have been selected from the syncable Directory and prepared for commit.
-//
-// This class handles the bookkeeping related to the commit of these items,
-// including processing the commit response message and setting and unsetting
-// the SYNCING bits.
-class SYNC_EXPORT_PRIVATE SyncDirectoryCommitContribution {
- public:
- // This destructor will DCHECK if UnsetSyncingBits() has not been called yet.
- ~SyncDirectoryCommitContribution();
-
- // Build a CommitContribution from the IS_UNSYNCED items in |dir| with the
- // given |type|. The contribution will include at most |max_items| entries.
- //
- // This function may return NULL if this type has no items ready for and
- // requiring commit. This function may make model neutral changes to the
- // directory.
- static SyncDirectoryCommitContribution* Build(
- syncable::Directory* dir,
- ModelType type,
- size_t max_items);
-
- // Serialize this contribution's entries to the given commit request |msg|.
- //
- // This function is not const. It will update some state in this contribution
- // that will be used when processing the associated commit response. This
- // function should not be called more than once.
- void AddToCommitMessage(sync_pb::ClientToServerMessage* msg);
-
- // Updates this contribution's contents in accordance with the provided
- // |response|.
- //
- // This function may make model-neutral changes to the directory. It is not
- // valid to call this function unless AddToCommitMessage() was called earlier.
- // This function should not be called more than once.
- SyncerError ProcessCommitResponse(
- const sync_pb::ClientToServerResponse& response,
- sessions::StatusController* status);
-
- // Cleans up any temproary state associated with the commit. Must be called
- // before destruction.
- void CleanUp();
-
- // Returns the number of entries included in this contribution.
- size_t GetNumEntries() const;
-
- private:
- class SyncDirectoryCommitContributionTest;
- FRIEND_TEST_ALL_PREFIXES(SyncDirectoryCommitContributionTest, GatherByTypes);
- FRIEND_TEST_ALL_PREFIXES(SyncDirectoryCommitContributionTest,
- GatherAndTruncate);
-
- SyncDirectoryCommitContribution(
- const std::vector<int64>& metahandles,
- const google::protobuf::RepeatedPtrField<sync_pb::SyncEntity>& entities,
- syncable::Directory* directory);
-
- void UnsetSyncingBits();
-
- syncable::Directory* dir_;
- const std::vector<int64> metahandles_;
- const google::protobuf::RepeatedPtrField<sync_pb::SyncEntity> entities_;
- size_t entries_start_index_;
-
- // This flag is tracks whether or not the directory entries associated with
- // this commit still have their SYNCING bits set. These bits will be set when
- // the CommitContribution is created with Build() and unset when CleanUp() is
- // called. This flag must be unset by the time our destructor is called.
- bool syncing_bits_set_;
-
- DISALLOW_COPY_AND_ASSIGN(SyncDirectoryCommitContribution);
-};
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_SYNC_DIRECTORY_COMMIT_CONTRIBUTION_H_
diff --git a/chromium/sync/engine/sync_directory_commit_contribution_unittest.cc b/chromium/sync/engine/sync_directory_commit_contribution_unittest.cc
deleted file mode 100644
index 75f88bd8550..00000000000
--- a/chromium/sync/engine/sync_directory_commit_contribution_unittest.cc
+++ /dev/null
@@ -1,235 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/sync_directory_commit_contribution.h"
-
-#include "base/message_loop/message_loop.h"
-#include "sync/sessions/status_controller.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/syncable_read_transaction.h"
-#include "sync/syncable/syncable_write_transaction.h"
-#include "sync/test/engine/test_directory_setter_upper.h"
-#include "sync/test/engine/test_id_factory.h"
-#include "sync/test/engine/test_syncable_utils.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-class SyncDirectoryCommitContributionTest : public ::testing::Test {
- public:
- virtual void SetUp() OVERRIDE {
- dir_maker_.SetUp();
-
- syncable::WriteTransaction trans(FROM_HERE, syncable::UNITTEST, dir());
- CreateTypeRoot(&trans, dir(), PREFERENCES);
- CreateTypeRoot(&trans, dir(), EXTENSIONS);
- }
-
- virtual void TearDown() OVERRIDE {
- dir_maker_.TearDown();
- }
-
- protected:
- int64 CreateUnsyncedItem(syncable::WriteTransaction* trans,
- ModelType type,
- const std::string& tag) {
- syncable::Entry parent_entry(
- trans,
- syncable::GET_BY_SERVER_TAG,
- ModelTypeToRootTag(type));
- syncable::MutableEntry entry(
- trans,
- syncable::CREATE,
- type,
- parent_entry.GetId(),
- tag);
- entry.PutIsUnsynced(true);
- return entry.GetMetahandle();
- }
-
- void CreateSuccessfulCommitResponse(
- const sync_pb::SyncEntity& entity,
- sync_pb::CommitResponse::EntryResponse* response) {
- response->set_response_type(sync_pb::CommitResponse::SUCCESS);
- response->set_non_unique_name(entity.name());
- response->set_version(entity.version() + 1);
- response->set_parent_id_string(entity.parent_id_string());
-
- if (entity.id_string()[0] == '-') // Look for the - in 'c-1234' style IDs.
- response->set_id_string(id_factory_.NewServerId().GetServerId());
- else
- response->set_id_string(entity.id_string());
- }
-
- syncable::Directory* dir() {
- return dir_maker_.directory();
- }
-
- TestIdFactory id_factory_;
-
- private:
- base::MessageLoop loop_; // Neeed to initialize the directory.
- TestDirectorySetterUpper dir_maker_;
-};
-
-// Verify that the SyncDirectoryCommitContribution contains only entries of its
-// specified type.
-TEST_F(SyncDirectoryCommitContributionTest, GatherByTypes) {
- int64 pref1;
- {
- syncable::WriteTransaction trans(FROM_HERE, syncable::UNITTEST, dir());
- pref1 = CreateUnsyncedItem(&trans, PREFERENCES, "pref1");
- CreateUnsyncedItem(&trans, PREFERENCES, "pref2");
- CreateUnsyncedItem(&trans, EXTENSIONS, "extension1");
- }
-
- scoped_ptr<SyncDirectoryCommitContribution> cc(
- SyncDirectoryCommitContribution::Build(dir(), PREFERENCES, 5));
- ASSERT_EQ(2U, cc->GetNumEntries());
-
- const std::vector<int64>& metahandles = cc->metahandles_;
- EXPECT_TRUE(std::find(metahandles.begin(), metahandles.end(), pref1) !=
- metahandles.end());
- EXPECT_TRUE(std::find(metahandles.begin(), metahandles.end(), pref1) !=
- metahandles.end());
-
- cc->CleanUp();
-}
-
-// Verify that the SyncDirectoryCommitContributionTest builder function
-// truncates if necessary.
-TEST_F(SyncDirectoryCommitContributionTest, GatherAndTruncate) {
- int64 pref1;
- int64 pref2;
- {
- syncable::WriteTransaction trans(FROM_HERE, syncable::UNITTEST, dir());
- pref1 = CreateUnsyncedItem(&trans, PREFERENCES, "pref1");
- pref2 = CreateUnsyncedItem(&trans, PREFERENCES, "pref2");
- CreateUnsyncedItem(&trans, EXTENSIONS, "extension1");
- }
-
- scoped_ptr<SyncDirectoryCommitContribution> cc(
- SyncDirectoryCommitContribution::Build(dir(), PREFERENCES, 1));
- ASSERT_EQ(1U, cc->GetNumEntries());
-
- int64 only_metahandle = cc->metahandles_[0];
- EXPECT_TRUE(only_metahandle == pref1 || only_metahandle == pref2);
-
- cc->CleanUp();
-}
-
-// Sanity check for building commits from SyncDirectoryCommitContributions.
-// This test makes two CommitContribution objects of different types and uses
-// them to initialize a commit message. Then it checks that the contents of the
-// commit message match those of the directory they came from.
-TEST_F(SyncDirectoryCommitContributionTest, PrepareCommit) {
- {
- syncable::WriteTransaction trans(FROM_HERE, syncable::UNITTEST, dir());
- CreateUnsyncedItem(&trans, PREFERENCES, "pref1");
- CreateUnsyncedItem(&trans, PREFERENCES, "pref2");
- CreateUnsyncedItem(&trans, EXTENSIONS, "extension1");
- }
-
- scoped_ptr<SyncDirectoryCommitContribution> pref_cc(
- SyncDirectoryCommitContribution::Build(dir(), PREFERENCES, 25));
- scoped_ptr<SyncDirectoryCommitContribution> ext_cc(
- SyncDirectoryCommitContribution::Build(dir(), EXTENSIONS, 25));
-
- sync_pb::ClientToServerMessage message;
- pref_cc->AddToCommitMessage(&message);
- ext_cc->AddToCommitMessage(&message);
-
- const sync_pb::CommitMessage& commit_message = message.commit();
-
- std::set<syncable::Id> ids_for_commit;
- ASSERT_EQ(3, commit_message.entries_size());
- for (int i = 0; i < commit_message.entries_size(); ++i) {
- const sync_pb::SyncEntity& entity = commit_message.entries(i);
- // The entities in this test have client-style IDs since they've never been
- // committed before, so we must use CreateFromClientString to re-create them
- // from the commit message.
- ids_for_commit.insert(syncable::Id::CreateFromClientString(
- entity.id_string()));
- }
-
- ASSERT_EQ(3U, ids_for_commit.size());
- {
- syncable::ReadTransaction trans(FROM_HERE, dir());
- for (std::set<syncable::Id>::iterator it = ids_for_commit.begin();
- it != ids_for_commit.end(); ++it) {
- SCOPED_TRACE(it->value());
- syncable::Entry entry(&trans, syncable::GET_BY_ID, *it);
- ASSERT_TRUE(entry.good());
- EXPECT_TRUE(entry.GetSyncing());
- }
- }
-
- pref_cc->CleanUp();
- ext_cc->CleanUp();
-}
-
-// Creates some unsynced items, pretends to commit them, and hands back a
-// specially crafted response to the syncer in order to test commit response
-// processing. The response simulates a succesful commit scenario.
-TEST_F(SyncDirectoryCommitContributionTest, ProcessCommitResponse) {
- int64 pref1_handle;
- int64 pref2_handle;
- int64 ext1_handle;
- {
- syncable::WriteTransaction trans(FROM_HERE, syncable::UNITTEST, dir());
- pref1_handle = CreateUnsyncedItem(&trans, PREFERENCES, "pref1");
- pref2_handle = CreateUnsyncedItem(&trans, PREFERENCES, "pref2");
- ext1_handle = CreateUnsyncedItem(&trans, EXTENSIONS, "extension1");
- }
-
- scoped_ptr<SyncDirectoryCommitContribution> pref_cc(
- SyncDirectoryCommitContribution::Build(dir(), PREFERENCES, 25));
- scoped_ptr<SyncDirectoryCommitContribution> ext_cc(
- SyncDirectoryCommitContribution::Build(dir(), EXTENSIONS, 25));
-
- sync_pb::ClientToServerMessage message;
- pref_cc->AddToCommitMessage(&message);
- ext_cc->AddToCommitMessage(&message);
-
- const sync_pb::CommitMessage& commit_message = message.commit();
- ASSERT_EQ(3, commit_message.entries_size());
-
- sync_pb::ClientToServerResponse response;
- for (int i = 0; i < commit_message.entries_size(); ++i) {
- sync_pb::SyncEntity entity = commit_message.entries(i);
- sync_pb::CommitResponse_EntryResponse* entry_response =
- response.mutable_commit()->add_entryresponse();
- CreateSuccessfulCommitResponse(entity, entry_response);
- }
-
- sessions::StatusController status;
-
- // Process these in reverse order. Just because we can.
- ext_cc->ProcessCommitResponse(response, &status);
- pref_cc->ProcessCommitResponse(response, &status);
-
- {
- syncable::ReadTransaction trans(FROM_HERE, dir());
- syncable::Entry p1(&trans, syncable::GET_BY_HANDLE, pref1_handle);
- EXPECT_TRUE(p1.GetId().ServerKnows());
- EXPECT_FALSE(p1.GetSyncing());
- EXPECT_LT(0, p1.GetServerVersion());
-
- syncable::Entry p2(&trans, syncable::GET_BY_HANDLE, pref2_handle);
- EXPECT_TRUE(p2.GetId().ServerKnows());
- EXPECT_FALSE(p2.GetSyncing());
- EXPECT_LT(0, p2.GetServerVersion());
-
- syncable::Entry e1(&trans, syncable::GET_BY_HANDLE, ext1_handle);
- EXPECT_TRUE(e1.GetId().ServerKnows());
- EXPECT_FALSE(e1.GetSyncing());
- EXPECT_LT(0, e1.GetServerVersion());
- }
-
- pref_cc->CleanUp();
- ext_cc->CleanUp();
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/sync_directory_commit_contributor.cc b/chromium/sync/engine/sync_directory_commit_contributor.cc
deleted file mode 100644
index c87c8eda870..00000000000
--- a/chromium/sync/engine/sync_directory_commit_contributor.cc
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/sync_directory_commit_contributor.h"
-
-#include "sync/engine/sync_directory_commit_contribution.h"
-
-namespace syncer {
-
-SyncDirectoryCommitContributor::SyncDirectoryCommitContributor(
- syncable::Directory* dir,
- ModelType type)
- : dir_(dir),
- type_(type) {}
-
-SyncDirectoryCommitContributor::~SyncDirectoryCommitContributor() {}
-
-SyncDirectoryCommitContribution*
-SyncDirectoryCommitContributor::GetContribution(size_t max_entries) {
- return SyncDirectoryCommitContribution::Build(dir_, type_, max_entries);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/sync_directory_commit_contributor.h b/chromium/sync/engine/sync_directory_commit_contributor.h
deleted file mode 100644
index 6ffaeb7761a..00000000000
--- a/chromium/sync/engine/sync_directory_commit_contributor.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_SYNC_DIRECTORY_COMMIT_CONTRIBUTOR_H_
-#define SYNC_ENGINE_SYNC_DIRECTORY_COMMIT_CONTRIBUTOR_H_
-
-#include <map>
-
-#include "sync/internal_api/public/base/model_type.h"
-
-namespace syncer {
-
-class SyncDirectoryCommitContribution;
-
-namespace syncable {
-class Directory;
-}
-
-// This class represents the syncable::Directory as a source of items to commit
-// to the sync server.
-//
-// Each instance of this class represents a particular type within the
-// syncable::Directory. When asked, it will iterate through the directory, grab
-// any items of its type that are ready for commit, and return them in the form
-// of a SyncDirectoryCommitContribution.
-class SyncDirectoryCommitContributor {
- public:
- SyncDirectoryCommitContributor(syncable::Directory* dir, ModelType type);
- ~SyncDirectoryCommitContributor();
-
- SyncDirectoryCommitContribution* GetContribution(size_t max_entries);
-
- private:
- syncable::Directory* dir_;
- ModelType type_;
-};
-
-// TODO(rlarocque): Find a better place for this definition.
-typedef std::map<ModelType, SyncDirectoryCommitContributor*>
- CommitContributorMap;
-
-} // namespace
-
-#endif // SYNC_ENGINE_SYNC_DIRECTORY_COMMIT_CONTRIBUTOR_H_
diff --git a/chromium/sync/engine/sync_directory_update_handler.cc b/chromium/sync/engine/sync_directory_update_handler.cc
deleted file mode 100644
index 1a9bd1ec6de..00000000000
--- a/chromium/sync/engine/sync_directory_update_handler.cc
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/sync_directory_update_handler.h"
-
-#include "sync/engine/conflict_resolver.h"
-#include "sync/engine/process_updates_util.h"
-#include "sync/engine/update_applicator.h"
-#include "sync/sessions/status_controller.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/syncable_model_neutral_write_transaction.h"
-#include "sync/syncable/syncable_write_transaction.h"
-
-namespace syncer {
-
-using syncable::SYNCER;
-
-SyncDirectoryUpdateHandler::SyncDirectoryUpdateHandler(
- syncable::Directory* dir,
- ModelType type,
- scoped_refptr<ModelSafeWorker> worker)
- : dir_(dir),
- type_(type),
- worker_(worker) {}
-
-SyncDirectoryUpdateHandler::~SyncDirectoryUpdateHandler() {}
-
-void SyncDirectoryUpdateHandler::GetDownloadProgress(
- sync_pb::DataTypeProgressMarker* progress_marker) const {
- dir_->GetDownloadProgress(type_, progress_marker);
-}
-
-void SyncDirectoryUpdateHandler::ProcessGetUpdatesResponse(
- const sync_pb::DataTypeProgressMarker& progress_marker,
- const SyncEntityList& applicable_updates,
- sessions::StatusController* status) {
- syncable::ModelNeutralWriteTransaction trans(FROM_HERE, SYNCER, dir_);
- UpdateSyncEntities(&trans, applicable_updates, status);
- UpdateProgressMarker(progress_marker);
-}
-
-void SyncDirectoryUpdateHandler::ApplyUpdates(
- sessions::StatusController* status) {
- if (IsControlType(type_)) {
- return; // We don't process control types here.
- }
-
- if (!dir_->TypeHasUnappliedUpdates(type_)) {
- return; // No work to do. Skip this type.
- }
-
- WorkCallback c = base::Bind(
- &SyncDirectoryUpdateHandler::ApplyUpdatesImpl,
- // We wait until the callback is executed. We can safely use Unretained.
- base::Unretained(this),
- base::Unretained(status));
- worker_->DoWorkAndWaitUntilDone(c);
-}
-
-SyncerError SyncDirectoryUpdateHandler::ApplyUpdatesImpl(
- sessions::StatusController* status) {
- syncable::WriteTransaction trans(FROM_HERE, syncable::SYNCER, dir_);
-
- std::vector<int64> handles;
- dir_->GetUnappliedUpdateMetaHandles(
- &trans,
- FullModelTypeSet(type_),
- &handles);
-
- // First set of update application passes.
- UpdateApplicator applicator(dir_->GetCryptographer(&trans));
- applicator.AttemptApplications(&trans, handles);
- status->increment_num_updates_applied_by(applicator.updates_applied());
- status->increment_num_hierarchy_conflicts_by(
- applicator.hierarchy_conflicts());
- status->increment_num_encryption_conflicts_by(
- applicator.encryption_conflicts());
-
- if (applicator.simple_conflict_ids().size() != 0) {
- // Resolve the simple conflicts we just detected.
- ConflictResolver resolver;
- resolver.ResolveConflicts(&trans,
- dir_->GetCryptographer(&trans),
- applicator.simple_conflict_ids(),
- status);
-
- // Conflict resolution sometimes results in more updates to apply.
- handles.clear();
- dir_->GetUnappliedUpdateMetaHandles(
- &trans,
- FullModelTypeSet(type_),
- &handles);
-
- UpdateApplicator conflict_applicator(dir_->GetCryptographer(&trans));
- conflict_applicator.AttemptApplications(&trans, handles);
-
- // We count the number of updates from both applicator passes.
- status->increment_num_updates_applied_by(
- conflict_applicator.updates_applied());
-
- // Encryption conflicts should remain unchanged by the resolution of simple
- // conflicts. Those can only be solved by updating our nigori key bag.
- DCHECK_EQ(conflict_applicator.encryption_conflicts(),
- applicator.encryption_conflicts());
-
- // Hierarchy conflicts should also remain unchanged, for reasons that are
- // more subtle. Hierarchy conflicts exist when the application of a pending
- // update from the server would make the local folder hierarchy
- // inconsistent. The resolution of simple conflicts could never affect the
- // hierarchy conflicting item directly, because hierarchy conflicts are not
- // processed by the conflict resolver. It could, in theory, modify the
- // local hierarchy on which hierarchy conflict detection depends. However,
- // the conflict resolution algorithm currently in use does not allow this.
- DCHECK_EQ(conflict_applicator.hierarchy_conflicts(),
- applicator.hierarchy_conflicts());
-
- // There should be no simple conflicts remaining. We know this because the
- // resolver should have resolved all the conflicts we detected last time
- // and, by the two previous assertions, that no conflicts have been
- // downgraded from encryption or hierarchy down to simple.
- DCHECK(conflict_applicator.simple_conflict_ids().empty());
- }
-
- return SYNCER_OK;
-}
-
-void SyncDirectoryUpdateHandler::UpdateSyncEntities(
- syncable::ModelNeutralWriteTransaction* trans,
- const SyncEntityList& applicable_updates,
- sessions::StatusController* status) {
- ProcessDownloadedUpdates(dir_, trans, type_, applicable_updates, status);
-}
-
-void SyncDirectoryUpdateHandler::UpdateProgressMarker(
- const sync_pb::DataTypeProgressMarker& progress_marker) {
- int field_number = progress_marker.data_type_id();
- ModelType model_type = GetModelTypeFromSpecificsFieldNumber(field_number);
- if (!IsRealDataType(model_type) || type_ != model_type) {
- NOTREACHED()
- << "Update handler of type " << ModelTypeToString(type_)
- << " asked to process progress marker with invalid type "
- << field_number;
- }
- dir_->SetDownloadProgress(type_, progress_marker);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/sync_directory_update_handler.h b/chromium/sync/engine/sync_directory_update_handler.h
deleted file mode 100644
index ea4d791465f..00000000000
--- a/chromium/sync/engine/sync_directory_update_handler.h
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_SYNC_DIRECTORY_UPDATE_HANDLER_H_
-#define SYNC_ENGINE_SYNC_DIRECTORY_UPDATE_HANDLER_H_
-
-#include <map>
-
-#include "base/basictypes.h"
-#include "base/memory/ref_counted.h"
-#include "sync/base/sync_export.h"
-#include "sync/engine/process_updates_util.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/util/syncer_error.h"
-
-namespace sync_pb {
-class DataTypeProgressMarker;
-class GetUpdatesResponse;
-}
-
-namespace syncer {
-
-namespace sessions {
-class StatusController;
-}
-
-namespace syncable {
-class Directory;
-}
-
-class ModelSafeWorker;
-
-// This class represents the syncable::Directory's processes for requesting and
-// processing updates from the sync server.
-//
-// Each instance of this class represents a particular type in the
-// syncable::Directory. It can store and retreive that type's progress markers.
-// It can also process a set of received SyncEntities and store their data.
-class SYNC_EXPORT_PRIVATE SyncDirectoryUpdateHandler {
- public:
- SyncDirectoryUpdateHandler(syncable::Directory* dir,
- ModelType type,
- scoped_refptr<ModelSafeWorker> worker);
- ~SyncDirectoryUpdateHandler();
-
- // Fills the given parameter with the stored progress marker for this type.
- void GetDownloadProgress(
- sync_pb::DataTypeProgressMarker* progress_marker) const;
-
- // Processes the contents of a GetUpdates response message.
- //
- // Should be invoked with the progress marker and set of SyncEntities from a
- // single GetUpdates response message. The progress marker's type must match
- // this update handler's type, and the set of SyncEntities must include all
- // entities of this type found in the response message.
- void ProcessGetUpdatesResponse(
- const sync_pb::DataTypeProgressMarker& progress_marker,
- const SyncEntityList& applicable_updates,
- sessions::StatusController* status);
-
- // If there are updates to apply, apply them on the proper thread.
- // Delegates to ApplyUpdatesImpl().
- void ApplyUpdates(sessions::StatusController* status);
-
- private:
- friend class SyncDirectoryUpdateHandlerApplyUpdateTest;
- friend class SyncDirectoryUpdateHandlerProcessUpdateTest;
-
- // Processes the given SyncEntities and stores their data in the directory.
- // Their types must match this update handler's type.
- void UpdateSyncEntities(
- syncable::ModelNeutralWriteTransaction* trans,
- const SyncEntityList& applicable_updates,
- sessions::StatusController* status);
-
- // Stores the given progress marker in the directory.
- // Its type must match this update handler's type.
- void UpdateProgressMarker(
- const sync_pb::DataTypeProgressMarker& progress_marker);
-
- // Skips all checks and goes straight to applying the updates.
- SyncerError ApplyUpdatesImpl(sessions::StatusController* status);
-
- syncable::Directory* dir_;
- ModelType type_;
- scoped_refptr<ModelSafeWorker> worker_;
-
- DISALLOW_COPY_AND_ASSIGN(SyncDirectoryUpdateHandler);
-};
-
-// TODO(rlarocque): Find a better place to define this.
-typedef std::map<ModelType, SyncDirectoryUpdateHandler*> UpdateHandlerMap;
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_SYNC_DIRECTORY_UPDATE_HANDLER_H_
diff --git a/chromium/sync/engine/sync_directory_update_handler_unittest.cc b/chromium/sync/engine/sync_directory_update_handler_unittest.cc
deleted file mode 100644
index 86d447eba38..00000000000
--- a/chromium/sync/engine/sync_directory_update_handler_unittest.cc
+++ /dev/null
@@ -1,826 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/sync_directory_update_handler.h"
-
-#include "base/compiler_specific.h"
-#include "base/message_loop/message_loop.h"
-#include "base/stl_util.h"
-#include "sync/engine/syncer_proto_util.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/test/test_entry_factory.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/sessions/status_controller.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/syncable_model_neutral_write_transaction.h"
-#include "sync/syncable/syncable_proto_util.h"
-#include "sync/syncable/syncable_read_transaction.h"
-#include "sync/syncable/syncable_write_transaction.h"
-#include "sync/test/engine/fake_model_worker.h"
-#include "sync/test/engine/test_directory_setter_upper.h"
-#include "sync/test/engine/test_id_factory.h"
-#include "sync/test/engine/test_syncable_utils.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-using syncable::UNITTEST;
-
-// A test harness for tests that focus on processing updates.
-//
-// Update processing is what occurs when we first download updates. It converts
-// the received protobuf message into information in the syncable::Directory.
-// Any invalid or redundant updates will be dropped at this point.
-class SyncDirectoryUpdateHandlerProcessUpdateTest : public ::testing::Test {
- public:
- SyncDirectoryUpdateHandlerProcessUpdateTest()
- : ui_worker_(new FakeModelWorker(GROUP_UI)) {
- }
-
- virtual ~SyncDirectoryUpdateHandlerProcessUpdateTest() {}
-
- virtual void SetUp() OVERRIDE {
- dir_maker_.SetUp();
- }
-
- virtual void TearDown() OVERRIDE {
- dir_maker_.TearDown();
- }
-
- syncable::Directory* dir() {
- return dir_maker_.directory();
- }
- protected:
- scoped_ptr<sync_pb::SyncEntity> CreateUpdate(
- const std::string& id,
- const std::string& parent,
- const ModelType& type);
-
- // This exists mostly to give tests access to the protected member function.
- // Warning: This takes the syncable directory lock.
- void UpdateSyncEntities(
- SyncDirectoryUpdateHandler* handler,
- const SyncEntityList& applicable_updates,
- sessions::StatusController* status);
-
- // Another function to access private member functions.
- void UpdateProgressMarkers(
- SyncDirectoryUpdateHandler* handler,
- const sync_pb::DataTypeProgressMarker& progress);
-
- scoped_refptr<FakeModelWorker> ui_worker() {
- return ui_worker_;
- }
-
- private:
- base::MessageLoop loop_; // Needed to initialize the directory.
- TestDirectorySetterUpper dir_maker_;
- scoped_refptr<FakeModelWorker> ui_worker_;
-};
-
-scoped_ptr<sync_pb::SyncEntity>
-SyncDirectoryUpdateHandlerProcessUpdateTest::CreateUpdate(
- const std::string& id,
- const std::string& parent,
- const ModelType& type) {
- scoped_ptr<sync_pb::SyncEntity> e(new sync_pb::SyncEntity());
- e->set_id_string(id);
- e->set_parent_id_string(parent);
- e->set_non_unique_name(id);
- e->set_name(id);
- e->set_version(1000);
- AddDefaultFieldValue(type, e->mutable_specifics());
- return e.Pass();
-}
-
-void SyncDirectoryUpdateHandlerProcessUpdateTest::UpdateSyncEntities(
- SyncDirectoryUpdateHandler* handler,
- const SyncEntityList& applicable_updates,
- sessions::StatusController* status) {
- syncable::ModelNeutralWriteTransaction trans(FROM_HERE, UNITTEST, dir());
- handler->UpdateSyncEntities(&trans, applicable_updates, status);
-}
-
-void SyncDirectoryUpdateHandlerProcessUpdateTest::UpdateProgressMarkers(
- SyncDirectoryUpdateHandler* handler,
- const sync_pb::DataTypeProgressMarker& progress) {
- handler->UpdateProgressMarker(progress);
-}
-
-static const char kCacheGuid[] = "IrcjZ2jyzHDV9Io4+zKcXQ==";
-
-// Test that the bookmark tag is set on newly downloaded items.
-TEST_F(SyncDirectoryUpdateHandlerProcessUpdateTest, NewBookmarkTag) {
- SyncDirectoryUpdateHandler handler(dir(), BOOKMARKS, ui_worker());
- sync_pb::GetUpdatesResponse gu_response;
- sessions::StatusController status;
-
- // Add a bookmark item to the update message.
- std::string root = syncable::GetNullId().GetServerId();
- syncable::Id server_id = syncable::Id::CreateFromServerId("b1");
- scoped_ptr<sync_pb::SyncEntity> e =
- CreateUpdate(SyncableIdToProto(server_id), root, BOOKMARKS);
- e->set_originator_cache_guid(
- std::string(kCacheGuid, arraysize(kCacheGuid)-1));
- syncable::Id client_id = syncable::Id::CreateFromClientString("-2");
- e->set_originator_client_item_id(client_id.GetServerId());
- e->set_position_in_parent(0);
-
- // Add it to the applicable updates list.
- SyncEntityList bookmark_updates;
- bookmark_updates.push_back(e.get());
-
- // Process the update.
- UpdateSyncEntities(&handler, bookmark_updates, &status);
-
- syncable::ReadTransaction trans(FROM_HERE, dir());
- syncable::Entry entry(&trans, syncable::GET_BY_ID, server_id);
- ASSERT_TRUE(entry.good());
- EXPECT_TRUE(UniquePosition::IsValidSuffix(entry.GetUniqueBookmarkTag()));
- EXPECT_TRUE(entry.GetServerUniquePosition().IsValid());
-
- // If this assertion fails, that might indicate that the algorithm used to
- // generate bookmark tags has been modified. This could have implications for
- // bookmark ordering. Please make sure you know what you're doing if you
- // intend to make such a change.
- EXPECT_EQ("6wHRAb3kbnXV5GHrejp4/c1y5tw=", entry.GetUniqueBookmarkTag());
-}
-
-// Test the receipt of a type root node.
-TEST_F(SyncDirectoryUpdateHandlerProcessUpdateTest,
- ReceiveServerCreatedBookmarkFolders) {
- SyncDirectoryUpdateHandler handler(dir(), BOOKMARKS, ui_worker());
- sync_pb::GetUpdatesResponse gu_response;
- sessions::StatusController status;
-
- // Create an update that mimics the bookmark root.
- syncable::Id server_id = syncable::Id::CreateFromServerId("xyz");
- std::string root = syncable::GetNullId().GetServerId();
- scoped_ptr<sync_pb::SyncEntity> e =
- CreateUpdate(SyncableIdToProto(server_id), root, BOOKMARKS);
- e->set_server_defined_unique_tag("google_chrome_bookmarks");
- e->set_folder(true);
-
- // Add it to the applicable updates list.
- SyncEntityList bookmark_updates;
- bookmark_updates.push_back(e.get());
-
- EXPECT_FALSE(SyncerProtoUtil::ShouldMaintainPosition(*e));
-
- // Process it.
- UpdateSyncEntities(&handler, bookmark_updates, &status);
-
- // Verify the results.
- syncable::ReadTransaction trans(FROM_HERE, dir());
- syncable::Entry entry(&trans, syncable::GET_BY_ID, server_id);
- ASSERT_TRUE(entry.good());
-
- EXPECT_FALSE(entry.ShouldMaintainPosition());
- EXPECT_FALSE(entry.GetUniquePosition().IsValid());
- EXPECT_FALSE(entry.GetServerUniquePosition().IsValid());
- EXPECT_TRUE(entry.GetUniqueBookmarkTag().empty());
-}
-
-// Test the receipt of a non-bookmark item.
-TEST_F(SyncDirectoryUpdateHandlerProcessUpdateTest, ReceiveNonBookmarkItem) {
- SyncDirectoryUpdateHandler handler(dir(), PREFERENCES, ui_worker());
- sync_pb::GetUpdatesResponse gu_response;
- sessions::StatusController status;
-
- std::string root = syncable::GetNullId().GetServerId();
- syncable::Id server_id = syncable::Id::CreateFromServerId("xyz");
- scoped_ptr<sync_pb::SyncEntity> e =
- CreateUpdate(SyncableIdToProto(server_id), root, PREFERENCES);
- e->set_server_defined_unique_tag("9PGRuKdX5sHyGMB17CvYTXuC43I=");
-
- // Add it to the applicable updates list.
- SyncEntityList autofill_updates;
- autofill_updates.push_back(e.get());
-
- EXPECT_FALSE(SyncerProtoUtil::ShouldMaintainPosition(*e));
-
- // Process it.
- UpdateSyncEntities(&handler, autofill_updates, &status);
-
- syncable::ReadTransaction trans(FROM_HERE, dir());
- syncable::Entry entry(&trans, syncable::GET_BY_ID, server_id);
- ASSERT_TRUE(entry.good());
-
- EXPECT_FALSE(entry.ShouldMaintainPosition());
- EXPECT_FALSE(entry.GetUniquePosition().IsValid());
- EXPECT_FALSE(entry.GetServerUniquePosition().IsValid());
- EXPECT_TRUE(entry.GetUniqueBookmarkTag().empty());
-}
-
-// Tests the setting of progress markers.
-TEST_F(SyncDirectoryUpdateHandlerProcessUpdateTest, ProcessNewProgressMarkers) {
- SyncDirectoryUpdateHandler handler(dir(), BOOKMARKS, ui_worker());
-
- sync_pb::DataTypeProgressMarker progress;
- progress.set_data_type_id(GetSpecificsFieldNumberFromModelType(BOOKMARKS));
- progress.set_token("token");
-
- UpdateProgressMarkers(&handler, progress);
-
- sync_pb::DataTypeProgressMarker saved;
- dir()->GetDownloadProgress(BOOKMARKS, &saved);
-
- EXPECT_EQ(progress.token(), saved.token());
- EXPECT_EQ(progress.data_type_id(), saved.data_type_id());
-}
-
-// A test harness for tests that focus on applying updates.
-//
-// Update application is performed when we want to take updates that were
-// previously downloaded, processed, and stored in our syncable::Directory
-// and use them to update our local state (both the Directory's local state
-// and the model's local state, though these tests focus only on the Directory's
-// local state).
-//
-// This is kept separate from the update processing test in part for historical
-// reasons, and in part because these tests may require a bit more infrastrcture
-// in the future. Update application should happen on a different thread a lot
-// of the time so these tests may end up requiring more infrastructure than the
-// update processing tests. Currently, we're bypassing most of those issues by
-// using FakeModelWorkers, so there's not much difference between the two test
-// harnesses.
-class SyncDirectoryUpdateHandlerApplyUpdateTest : public ::testing::Test {
- public:
- SyncDirectoryUpdateHandlerApplyUpdateTest()
- : ui_worker_(new FakeModelWorker(GROUP_UI)),
- password_worker_(new FakeModelWorker(GROUP_PASSWORD)),
- passive_worker_(new FakeModelWorker(GROUP_PASSIVE)),
- update_handler_map_deleter_(&update_handler_map_) {}
-
- virtual void SetUp() OVERRIDE {
- dir_maker_.SetUp();
- entry_factory_.reset(new TestEntryFactory(directory()));
-
- update_handler_map_.insert(std::make_pair(
- BOOKMARKS,
- new SyncDirectoryUpdateHandler(directory(), BOOKMARKS, ui_worker_)));
- update_handler_map_.insert(std::make_pair(
- PASSWORDS,
- new SyncDirectoryUpdateHandler(directory(),
- PASSWORDS,
- password_worker_)));
- }
-
- virtual void TearDown() OVERRIDE {
- dir_maker_.TearDown();
- }
-
- protected:
- void ApplyBookmarkUpdates(sessions::StatusController* status) {
- update_handler_map_[BOOKMARKS]->ApplyUpdates(status);
- }
-
- void ApplyPasswordUpdates(sessions::StatusController* status) {
- update_handler_map_[PASSWORDS]->ApplyUpdates(status);
- }
-
- TestEntryFactory* entry_factory() {
- return entry_factory_.get();
- }
-
- syncable::Directory* directory() {
- return dir_maker_.directory();
- }
-
- private:
- base::MessageLoop loop_; // Needed to initialize the directory.
- TestDirectorySetterUpper dir_maker_;
- scoped_ptr<TestEntryFactory> entry_factory_;
-
- scoped_refptr<FakeModelWorker> ui_worker_;
- scoped_refptr<FakeModelWorker> password_worker_;
- scoped_refptr<FakeModelWorker> passive_worker_;
-
- UpdateHandlerMap update_handler_map_;
- STLValueDeleter<UpdateHandlerMap> update_handler_map_deleter_;
-};
-
-namespace {
-sync_pb::EntitySpecifics DefaultBookmarkSpecifics() {
- sync_pb::EntitySpecifics result;
- AddDefaultFieldValue(BOOKMARKS, &result);
- return result;
-}
-} // namespace
-
-// Test update application for a few bookmark items.
-TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest, SimpleBookmark) {
- sessions::StatusController status;
-
- std::string root_server_id = syncable::GetNullId().GetServerId();
- int64 parent_handle =
- entry_factory()->CreateUnappliedNewBookmarkItemWithParent(
- "parent", DefaultBookmarkSpecifics(), root_server_id);
- int64 child_handle =
- entry_factory()->CreateUnappliedNewBookmarkItemWithParent(
- "child", DefaultBookmarkSpecifics(), "parent");
-
- ApplyBookmarkUpdates(&status);
-
- EXPECT_EQ(0, status.num_encryption_conflicts())
- << "Simple update shouldn't result in conflicts";
- EXPECT_EQ(0, status.num_hierarchy_conflicts())
- << "Simple update shouldn't result in conflicts";
- EXPECT_EQ(2, status.num_updates_applied())
- << "All items should have been successfully applied";
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- syncable::Entry parent(&trans, syncable::GET_BY_HANDLE, parent_handle);
- syncable::Entry child(&trans, syncable::GET_BY_HANDLE, child_handle);
-
- ASSERT_TRUE(parent.good());
- ASSERT_TRUE(child.good());
-
- EXPECT_FALSE(parent.GetIsUnsynced());
- EXPECT_FALSE(parent.GetIsUnappliedUpdate());
- EXPECT_FALSE(child.GetIsUnsynced());
- EXPECT_FALSE(child.GetIsUnappliedUpdate());
- }
-}
-
-// Test that the applicator can handle updates delivered out of order.
-TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest,
- BookmarkChildrenBeforeParent) {
- // Start with some bookmarks whose parents are unknown.
- std::string root_server_id = syncable::GetNullId().GetServerId();
- int64 a_handle = entry_factory()->CreateUnappliedNewBookmarkItemWithParent(
- "a_child_created_first", DefaultBookmarkSpecifics(), "parent");
- int64 x_handle = entry_factory()->CreateUnappliedNewBookmarkItemWithParent(
- "x_child_created_first", DefaultBookmarkSpecifics(), "parent");
-
- // Update application will fail.
- sessions::StatusController status1;
- ApplyBookmarkUpdates(&status1);
- EXPECT_EQ(0, status1.num_updates_applied());
- EXPECT_EQ(2, status1.num_hierarchy_conflicts());
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- syncable::Entry a(&trans, syncable::GET_BY_HANDLE, a_handle);
- syncable::Entry x(&trans, syncable::GET_BY_HANDLE, x_handle);
-
- ASSERT_TRUE(a.good());
- ASSERT_TRUE(x.good());
-
- EXPECT_TRUE(a.GetIsUnappliedUpdate());
- EXPECT_TRUE(x.GetIsUnappliedUpdate());
- }
-
- // Now add their parent and a few siblings.
- entry_factory()->CreateUnappliedNewBookmarkItemWithParent(
- "parent", DefaultBookmarkSpecifics(), root_server_id);
- entry_factory()->CreateUnappliedNewBookmarkItemWithParent(
- "a_child_created_second", DefaultBookmarkSpecifics(), "parent");
- entry_factory()->CreateUnappliedNewBookmarkItemWithParent(
- "x_child_created_second", DefaultBookmarkSpecifics(), "parent");
-
- // Update application will succeed.
- sessions::StatusController status2;
- ApplyBookmarkUpdates(&status2);
- EXPECT_EQ(5, status2.num_updates_applied())
- << "All updates should have been successfully applied";
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- syncable::Entry a(&trans, syncable::GET_BY_HANDLE, a_handle);
- syncable::Entry x(&trans, syncable::GET_BY_HANDLE, x_handle);
-
- ASSERT_TRUE(a.good());
- ASSERT_TRUE(x.good());
-
- EXPECT_FALSE(a.GetIsUnappliedUpdate());
- EXPECT_FALSE(x.GetIsUnappliedUpdate());
- }
-}
-
-// Try to apply changes on an item that is both IS_UNSYNCED and
-// IS_UNAPPLIED_UPDATE. Conflict resolution should be performed.
-TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest, SimpleBookmarkConflict) {
- int64 handle = entry_factory()->CreateUnappliedAndUnsyncedBookmarkItem("x");
-
- int original_server_version = -10;
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- syncable::Entry e(&trans, syncable::GET_BY_HANDLE, handle);
- original_server_version = e.GetServerVersion();
- ASSERT_NE(original_server_version, e.GetBaseVersion());
- EXPECT_TRUE(e.GetIsUnsynced());
- }
-
- sessions::StatusController status;
- ApplyBookmarkUpdates(&status);
- EXPECT_EQ(1, status.num_server_overwrites())
- << "Unsynced and unapplied item conflict should be resolved";
- EXPECT_EQ(0, status.num_updates_applied())
- << "Update should not be applied; we should override the server.";
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- syncable::Entry e(&trans, syncable::GET_BY_HANDLE, handle);
- ASSERT_TRUE(e.good());
- EXPECT_EQ(original_server_version, e.GetServerVersion());
- EXPECT_EQ(original_server_version, e.GetBaseVersion());
- EXPECT_FALSE(e.GetIsUnappliedUpdate());
-
- // The unsynced flag will remain set until we successfully commit the item.
- EXPECT_TRUE(e.GetIsUnsynced());
- }
-}
-
-// Create a simple conflict that is also a hierarchy conflict. If we were to
-// follow the normal "server wins" logic, we'd end up violating hierarchy
-// constraints. The hierarchy conflict must take precedence. We can not allow
-// the update to be applied. The item must remain in the conflict state.
-TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest, HierarchyAndSimpleConflict) {
- // Create a simply-conflicting item. It will start with valid parent ids.
- int64 handle = entry_factory()->CreateUnappliedAndUnsyncedBookmarkItem(
- "orphaned_by_server");
- {
- // Manually set the SERVER_PARENT_ID to bad value.
- // A bad parent indicates a hierarchy conflict.
- syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- syncable::MutableEntry entry(&trans, syncable::GET_BY_HANDLE, handle);
- ASSERT_TRUE(entry.good());
-
- entry.PutServerParentId(TestIdFactory::MakeServer("bogus_parent"));
- }
-
- sessions::StatusController status;
- ApplyBookmarkUpdates(&status);
- EXPECT_EQ(0, status.num_updates_applied());
- EXPECT_EQ(0, status.num_server_overwrites());
- EXPECT_EQ(1, status.num_hierarchy_conflicts());
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- syncable::Entry e(&trans, syncable::GET_BY_HANDLE, handle);
- ASSERT_TRUE(e.good());
- EXPECT_TRUE(e.GetIsUnappliedUpdate());
- EXPECT_TRUE(e.GetIsUnsynced());
- }
-}
-
-// Attempt to apply an udpate that would create a bookmark folder loop. This
-// application should fail.
-TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest, BookmarkFolderLoop) {
- // Item 'X' locally has parent of 'root'. Server is updating it to have
- // parent of 'Y'.
-
- // Create it as a child of root node.
- int64 handle = entry_factory()->CreateSyncedItem("X", BOOKMARKS, true);
-
- {
- syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- syncable::MutableEntry entry(&trans, syncable::GET_BY_HANDLE, handle);
- ASSERT_TRUE(entry.good());
-
- // Re-parent from root to "Y"
- entry.PutServerVersion(entry_factory()->GetNextRevision());
- entry.PutIsUnappliedUpdate(true);
- entry.PutServerParentId(TestIdFactory::MakeServer("Y"));
- }
-
- // Item 'Y' is child of 'X'.
- entry_factory()->CreateUnsyncedItem(
- TestIdFactory::MakeServer("Y"), TestIdFactory::MakeServer("X"), "Y", true,
- BOOKMARKS, NULL);
-
- // If the server's update were applied, we would have X be a child of Y, and Y
- // as a child of X. That's a directory loop. The UpdateApplicator should
- // prevent the update from being applied and note that this is a hierarchy
- // conflict.
-
- sessions::StatusController status;
- ApplyBookmarkUpdates(&status);
-
- // This should count as a hierarchy conflict.
- EXPECT_EQ(1, status.num_hierarchy_conflicts());
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- syncable::Entry e(&trans, syncable::GET_BY_HANDLE, handle);
- ASSERT_TRUE(e.good());
- EXPECT_TRUE(e.GetIsUnappliedUpdate());
- EXPECT_FALSE(e.GetIsUnsynced());
- }
-}
-
-// Test update application where the update has been orphaned by a local folder
-// deletion. The update application attempt should fail.
-TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest,
- HierarchyConflictDeletedParent) {
- // Create a locally deleted parent item.
- int64 parent_handle;
- entry_factory()->CreateUnsyncedItem(
- syncable::Id::CreateFromServerId("parent"), TestIdFactory::root(),
- "parent", true, BOOKMARKS, &parent_handle);
- {
- syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- syncable::MutableEntry entry(&trans,
- syncable::GET_BY_HANDLE,
- parent_handle);
- entry.PutIsDel(true);
- }
-
- // Create an incoming child from the server.
- int64 child_handle = entry_factory()->CreateUnappliedNewItemWithParent(
- "child", DefaultBookmarkSpecifics(), "parent");
-
- // The server's update may seem valid to some other client, but on this client
- // that new item's parent no longer exists. The update should not be applied
- // and the update applicator should indicate this is a hierarchy conflict.
-
- sessions::StatusController status;
- ApplyBookmarkUpdates(&status);
- EXPECT_EQ(1, status.num_hierarchy_conflicts());
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- syncable::Entry child(&trans, syncable::GET_BY_HANDLE, child_handle);
- ASSERT_TRUE(child.good());
- EXPECT_TRUE(child.GetIsUnappliedUpdate());
- EXPECT_FALSE(child.GetIsUnsynced());
- }
-}
-
-// Attempt to apply an update that deletes a folder where the folder has
-// locally-created children. The update application should fail.
-TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest,
- HierarchyConflictDeleteNonEmptyDirectory) {
- // Create a server-deleted folder as a child of root node.
- int64 parent_handle =
- entry_factory()->CreateSyncedItem("parent", BOOKMARKS, true);
- {
- syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- syncable::MutableEntry entry(&trans,
- syncable::GET_BY_HANDLE,
- parent_handle);
- ASSERT_TRUE(entry.good());
-
- // Delete it on the server.
- entry.PutServerVersion(entry_factory()->GetNextRevision());
- entry.PutIsUnappliedUpdate(true);
- entry.PutServerParentId(TestIdFactory::root());
- entry.PutServerIsDel(true);
- }
-
- // Create a local child of the server-deleted directory.
- entry_factory()->CreateUnsyncedItem(
- TestIdFactory::MakeServer("child"), TestIdFactory::MakeServer("parent"),
- "child", false, BOOKMARKS, NULL);
-
- // The server's request to delete the directory must be ignored, otherwise our
- // unsynced new child would be orphaned. This is a hierarchy conflict.
-
- sessions::StatusController status;
- ApplyBookmarkUpdates(&status);
-
- // This should count as a hierarchy conflict.
- EXPECT_EQ(1, status.num_hierarchy_conflicts());
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- syncable::Entry parent(&trans, syncable::GET_BY_HANDLE, parent_handle);
- ASSERT_TRUE(parent.good());
- EXPECT_TRUE(parent.GetIsUnappliedUpdate());
- EXPECT_FALSE(parent.GetIsUnsynced());
- }
-}
-
-// Attempt to apply updates where the updated item's parent is not known to this
-// client. The update application attempt should fail.
-TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest,
- HierarchyConflictUnknownParent) {
- // We shouldn't be able to do anything with either of these items.
- int64 x_handle = entry_factory()->CreateUnappliedNewItemWithParent(
- "some_item", DefaultBookmarkSpecifics(), "unknown_parent");
- int64 y_handle = entry_factory()->CreateUnappliedNewItemWithParent(
- "some_other_item", DefaultBookmarkSpecifics(), "some_item");
-
- sessions::StatusController status;
- ApplyBookmarkUpdates(&status);
-
- EXPECT_EQ(2, status.num_hierarchy_conflicts())
- << "All updates with an unknown ancestors should be in conflict";
- EXPECT_EQ(0, status.num_updates_applied())
- << "No item with an unknown ancestor should be applied";
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- syncable::Entry x(&trans, syncable::GET_BY_HANDLE, x_handle);
- syncable::Entry y(&trans, syncable::GET_BY_HANDLE, y_handle);
- ASSERT_TRUE(x.good());
- ASSERT_TRUE(y.good());
- EXPECT_TRUE(x.GetIsUnappliedUpdate());
- EXPECT_TRUE(y.GetIsUnappliedUpdate());
- EXPECT_FALSE(x.GetIsUnsynced());
- EXPECT_FALSE(y.GetIsUnsynced());
- }
-}
-
-// Attempt application of a mix of items. Some update application attempts will
-// fail due to hierarchy conflicts. Others should succeed.
-TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest, ItemsBothKnownAndUnknown) {
- // See what happens when there's a mixture of good and bad updates.
- std::string root_server_id = syncable::GetNullId().GetServerId();
- int64 u1_handle = entry_factory()->CreateUnappliedNewItemWithParent(
- "first_unknown_item", DefaultBookmarkSpecifics(), "unknown_parent");
- int64 k1_handle = entry_factory()->CreateUnappliedNewItemWithParent(
- "first_known_item", DefaultBookmarkSpecifics(), root_server_id);
- int64 u2_handle = entry_factory()->CreateUnappliedNewItemWithParent(
- "second_unknown_item", DefaultBookmarkSpecifics(), "unknown_parent");
- int64 k2_handle = entry_factory()->CreateUnappliedNewItemWithParent(
- "second_known_item", DefaultBookmarkSpecifics(), "first_known_item");
- int64 k3_handle = entry_factory()->CreateUnappliedNewItemWithParent(
- "third_known_item", DefaultBookmarkSpecifics(), "fourth_known_item");
- int64 k4_handle = entry_factory()->CreateUnappliedNewItemWithParent(
- "fourth_known_item", DefaultBookmarkSpecifics(), root_server_id);
-
- sessions::StatusController status;
- ApplyBookmarkUpdates(&status);
-
- EXPECT_EQ(2, status.num_hierarchy_conflicts())
- << "The updates with unknown ancestors should be in conflict";
- EXPECT_EQ(4, status.num_updates_applied())
- << "The updates with known ancestors should be successfully applied";
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- syncable::Entry u1(&trans, syncable::GET_BY_HANDLE, u1_handle);
- syncable::Entry u2(&trans, syncable::GET_BY_HANDLE, u2_handle);
- syncable::Entry k1(&trans, syncable::GET_BY_HANDLE, k1_handle);
- syncable::Entry k2(&trans, syncable::GET_BY_HANDLE, k2_handle);
- syncable::Entry k3(&trans, syncable::GET_BY_HANDLE, k3_handle);
- syncable::Entry k4(&trans, syncable::GET_BY_HANDLE, k4_handle);
- ASSERT_TRUE(u1.good());
- ASSERT_TRUE(u2.good());
- ASSERT_TRUE(k1.good());
- ASSERT_TRUE(k2.good());
- ASSERT_TRUE(k3.good());
- ASSERT_TRUE(k4.good());
- EXPECT_TRUE(u1.GetIsUnappliedUpdate());
- EXPECT_TRUE(u2.GetIsUnappliedUpdate());
- EXPECT_FALSE(k1.GetIsUnappliedUpdate());
- EXPECT_FALSE(k2.GetIsUnappliedUpdate());
- EXPECT_FALSE(k3.GetIsUnappliedUpdate());
- EXPECT_FALSE(k4.GetIsUnappliedUpdate());
- }
-}
-
-// Attempt application of password upates where the passphrase is known.
-TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest, DecryptablePassword) {
- // Decryptable password updates should be applied.
- Cryptographer* cryptographer;
- {
- // Storing the cryptographer separately is bad, but for this test we
- // know it's safe.
- syncable::ReadTransaction trans(FROM_HERE, directory());
- cryptographer = directory()->GetCryptographer(&trans);
- }
-
- KeyParams params = {"localhost", "dummy", "foobar"};
- cryptographer->AddKey(params);
-
- sync_pb::EntitySpecifics specifics;
- sync_pb::PasswordSpecificsData data;
- data.set_origin("http://example.com");
-
- cryptographer->Encrypt(data,
- specifics.mutable_password()->mutable_encrypted());
- int64 handle =
- entry_factory()->CreateUnappliedNewItem("item", specifics, false);
-
- sessions::StatusController status;
- ApplyPasswordUpdates(&status);
-
- EXPECT_EQ(1, status.num_updates_applied())
- << "The updates that can be decrypted should be applied";
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- syncable::Entry e(&trans, syncable::GET_BY_HANDLE, handle);
- ASSERT_TRUE(e.good());
- EXPECT_FALSE(e.GetIsUnappliedUpdate());
- EXPECT_FALSE(e.GetIsUnsynced());
- }
-}
-
-// Attempt application of encrypted items when the passphrase is not known.
-TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest, UndecryptableData) {
- // Undecryptable updates should not be applied.
- sync_pb::EntitySpecifics encrypted_bookmark;
- encrypted_bookmark.mutable_encrypted();
- AddDefaultFieldValue(BOOKMARKS, &encrypted_bookmark);
- std::string root_server_id = syncable::GetNullId().GetServerId();
- int64 folder_handle = entry_factory()->CreateUnappliedNewItemWithParent(
- "folder",
- encrypted_bookmark,
- root_server_id);
- int64 bookmark_handle = entry_factory()->CreateUnappliedNewItem(
- "item2",
- encrypted_bookmark,
- false);
- sync_pb::EntitySpecifics encrypted_password;
- encrypted_password.mutable_password();
- int64 password_handle = entry_factory()->CreateUnappliedNewItem(
- "item3",
- encrypted_password,
- false);
-
- sessions::StatusController status;
- ApplyBookmarkUpdates(&status);
- ApplyPasswordUpdates(&status);
-
- EXPECT_EQ(3, status.num_encryption_conflicts())
- << "Updates that can't be decrypted should be in encryption conflict";
- EXPECT_EQ(0, status.num_updates_applied())
- << "No update that can't be decrypted should be applied";
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- syncable::Entry folder(&trans, syncable::GET_BY_HANDLE, folder_handle);
- syncable::Entry bm(&trans, syncable::GET_BY_HANDLE, bookmark_handle);
- syncable::Entry pw(&trans, syncable::GET_BY_HANDLE, password_handle);
- ASSERT_TRUE(folder.good());
- ASSERT_TRUE(bm.good());
- ASSERT_TRUE(pw.good());
- EXPECT_TRUE(folder.GetIsUnappliedUpdate());
- EXPECT_TRUE(bm.GetIsUnappliedUpdate());
- EXPECT_TRUE(pw.GetIsUnappliedUpdate());
- }
-}
-
-// Test a mix of decryptable and undecryptable updates.
-TEST_F(SyncDirectoryUpdateHandlerApplyUpdateTest, SomeUndecryptablePassword) {
- Cryptographer* cryptographer;
-
- int64 decryptable_handle = -1;
- int64 undecryptable_handle = -1;
-
- // Only decryptable password updates should be applied.
- {
- sync_pb::EntitySpecifics specifics;
- sync_pb::PasswordSpecificsData data;
- data.set_origin("http://example.com/1");
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- cryptographer = directory()->GetCryptographer(&trans);
-
- KeyParams params = {"localhost", "dummy", "foobar"};
- cryptographer->AddKey(params);
-
- cryptographer->Encrypt(data,
- specifics.mutable_password()->mutable_encrypted());
- }
- decryptable_handle =
- entry_factory()->CreateUnappliedNewItem("item1", specifics, false);
- }
- {
- // Create a new cryptographer, independent of the one in the session.
- Cryptographer other_cryptographer(cryptographer->encryptor());
- KeyParams params = {"localhost", "dummy", "bazqux"};
- other_cryptographer.AddKey(params);
-
- sync_pb::EntitySpecifics specifics;
- sync_pb::PasswordSpecificsData data;
- data.set_origin("http://example.com/2");
-
- other_cryptographer.Encrypt(data,
- specifics.mutable_password()->mutable_encrypted());
- undecryptable_handle =
- entry_factory()->CreateUnappliedNewItem("item2", specifics, false);
- }
-
- sessions::StatusController status;
- ApplyPasswordUpdates(&status);
-
- EXPECT_EQ(1, status.num_encryption_conflicts())
- << "The updates that can't be decrypted should be in encryption "
- << "conflict";
- EXPECT_EQ(1, status.num_updates_applied())
- << "The undecryptable password update shouldn't be applied";
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- syncable::Entry e1(&trans, syncable::GET_BY_HANDLE, decryptable_handle);
- syncable::Entry e2(&trans, syncable::GET_BY_HANDLE, undecryptable_handle);
- ASSERT_TRUE(e1.good());
- ASSERT_TRUE(e2.good());
- EXPECT_FALSE(e1.GetIsUnappliedUpdate());
- EXPECT_TRUE(e2.GetIsUnappliedUpdate());
- }
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/sync_engine_event.cc b/chromium/sync/engine/sync_engine_event.cc
deleted file mode 100644
index 2df3d6284c5..00000000000
--- a/chromium/sync/engine/sync_engine_event.cc
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/sync_engine_event.h"
-
-namespace syncer {
-
-SyncEngineEvent::SyncEngineEvent(EventCause cause) : what_happened(cause) {
-}
-
-SyncEngineEvent::~SyncEngineEvent() {}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/sync_engine_event.h b/chromium/sync/engine/sync_engine_event.h
deleted file mode 100644
index 026d3292a7a..00000000000
--- a/chromium/sync/engine/sync_engine_event.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_SYNC_ENGINE_EVENT_H_
-#define SYNC_ENGINE_SYNC_ENGINE_EVENT_H_
-
-#include <string>
-
-#include "base/observer_list.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/sessions/sync_session_snapshot.h"
-
-namespace syncable {
-class Id;
-}
-
-namespace syncer {
-
-struct SYNC_EXPORT_PRIVATE SyncEngineEvent {
- enum EventCause {
- ////////////////////////////////////////////////////////////////
- // Sent on entry of Syncer state machine
- SYNC_CYCLE_BEGIN,
-
- // Sent any time progress is made during a sync cycle.
- STATUS_CHANGED,
-
- // We have reached the SYNCER_END state in the main sync loop.
- SYNC_CYCLE_ENDED,
-
- ////////////////////////////////////////////////////////////////
- // Generated in response to specific protocol actions or events.
-
- // This is sent after the Syncer (and SyncerThread) have initiated self
- // halt due to no longer being permitted to communicate with the server.
- // The listener should sever the sync / browser connections and delete sync
- // data (i.e. as if the user clicked 'Stop Syncing' in the browser.
- STOP_SYNCING_PERMANENTLY,
-
- // This event is sent when we receive an actionable error. It is upto
- // the listeners to figure out the action to take using the snapshot sent.
- ACTIONABLE_ERROR,
-
- // This event is sent when scheduler decides to wait before next request
- // either because it gets throttled by server or because it backs off after
- // request failure. Retry time is passed in retry_time field of event.
- RETRY_TIME_CHANGED,
-
- // This event is sent when types are throttled or unthrottled.
- THROTTLED_TYPES_CHANGED,
- };
-
- explicit SyncEngineEvent(EventCause cause);
- ~SyncEngineEvent();
-
- EventCause what_happened;
-
- // The last session used for syncing.
- sessions::SyncSessionSnapshot snapshot;
-
- // Update-Client-Auth returns a new token for sync use.
- std::string updated_token;
-
- // Time when scheduler will try to send request after backoff.
- base::Time retry_time;
-
- // Set of types that are currently throttled.
- ModelTypeSet throttled_types;
-};
-
-class SYNC_EXPORT_PRIVATE SyncEngineEventListener {
- public:
- // TODO(tim): Consider splitting this up to multiple callbacks, rather than
- // have to do Event e(type); OnSyncEngineEvent(e); at all callsites,
- virtual void OnSyncEngineEvent(const SyncEngineEvent& event) = 0;
- protected:
- virtual ~SyncEngineEventListener() {}
-};
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_SYNC_ENGINE_EVENT_H_
diff --git a/chromium/sync/engine/sync_scheduler.cc b/chromium/sync/engine/sync_scheduler.cc
deleted file mode 100644
index d800b193f9d..00000000000
--- a/chromium/sync/engine/sync_scheduler.cc
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/sync_scheduler.h"
-
-namespace syncer {
-
-SyncScheduler::SyncScheduler() {}
-SyncScheduler::~SyncScheduler() {}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/sync_scheduler.h b/chromium/sync/engine/sync_scheduler.h
deleted file mode 100644
index aef01182336..00000000000
--- a/chromium/sync/engine/sync_scheduler.h
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// A class to schedule syncer tasks intelligently.
-#ifndef SYNC_ENGINE_SYNC_SCHEDULER_H_
-#define SYNC_ENGINE_SYNC_SCHEDULER_H_
-
-#include <string>
-
-#include "base/callback.h"
-#include "base/compiler_specific.h"
-#include "base/time/time.h"
-#include "sync/base/sync_export.h"
-#include "sync/engine/nudge_source.h"
-#include "sync/sessions/sync_session.h"
-
-namespace tracked_objects {
-class Location;
-} // namespace tracked_objects
-
-namespace syncer {
-
-class ObjectIdInvalidationMap;
-struct ServerConnectionEvent;
-
-struct SYNC_EXPORT_PRIVATE ConfigurationParams {
- ConfigurationParams();
- ConfigurationParams(
- const sync_pb::GetUpdatesCallerInfo::GetUpdatesSource& source,
- ModelTypeSet types_to_download,
- const ModelSafeRoutingInfo& routing_info,
- const base::Closure& ready_task,
- const base::Closure& retry_task);
- ~ConfigurationParams();
-
- // Source for the configuration.
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source;
- // The types that should be downloaded.
- ModelTypeSet types_to_download;
- // The new routing info (superset of types to be downloaded).
- ModelSafeRoutingInfo routing_info;
- // Callback to invoke on configuration completion.
- base::Closure ready_task;
- // Callback to invoke on configuration failure.
- base::Closure retry_task;
-};
-
-class SYNC_EXPORT_PRIVATE SyncScheduler
- : public sessions::SyncSession::Delegate {
- public:
- enum Mode {
- // In this mode, the thread only performs configuration tasks. This is
- // designed to make the case where we want to download updates for a
- // specific type only, and not continue syncing until we are moved into
- // normal mode.
- CONFIGURATION_MODE,
- // Resumes polling and allows nudges, drops configuration tasks. Runs
- // through entire sync cycle.
- NORMAL_MODE,
- };
-
- // All methods of SyncScheduler must be called on the same thread
- // (except for RequestEarlyExit()).
-
- SyncScheduler();
- virtual ~SyncScheduler();
-
- // Start the scheduler with the given mode. If the scheduler is
- // already started, switch to the given mode, although some
- // scheduled tasks from the old mode may still run.
- virtual void Start(Mode mode) = 0;
-
- // Schedules the configuration task specified by |params|. Returns true if
- // the configuration task executed immediately, false if it had to be
- // scheduled for a later attempt. |params.ready_task| is invoked whenever the
- // configuration task executes. |params.retry_task| is invoked once if the
- // configuration task could not execute. |params.ready_task| will still be
- // called when configuration finishes.
- // Note: must already be in CONFIGURATION mode.
- virtual void ScheduleConfiguration(const ConfigurationParams& params) = 0;
-
- // Request that the syncer avoid starting any new tasks and prepare for
- // shutdown.
- virtual void Stop() = 0;
-
- // The meat and potatoes. All three of the following methods will post a
- // delayed task to attempt the actual nudge (see ScheduleNudgeImpl).
- //
- // NOTE: |desired_delay| is best-effort. If a nudge is already scheduled to
- // depart earlier than Now() + delay, the scheduler can and will prefer to
- // batch the two so that only one nudge is sent (at the earlier time). Also,
- // as always with delayed tasks and timers, it's possible the task gets run
- // any time after |desired_delay|.
-
- // The LocalNudge indicates that we've made a local change, and that the
- // syncer should plan to commit this to the server some time soon.
- virtual void ScheduleLocalNudge(
- const base::TimeDelta& desired_delay,
- ModelTypeSet types,
- const tracked_objects::Location& nudge_location) = 0;
-
- // The LocalRefreshRequest occurs when we decide for some reason to manually
- // request updates. This should be used sparingly. For example, one of its
- // uses is to fetch the latest tab sync data when it's relevant to the UI on
- // platforms where tab sync is not registered for invalidations.
- virtual void ScheduleLocalRefreshRequest(
- const base::TimeDelta& desired_delay,
- ModelTypeSet types,
- const tracked_objects::Location& nudge_location) = 0;
-
- // Invalidations are notifications the server sends to let us know when other
- // clients have committed data. We need to contact the sync server (being
- // careful to pass along the "hints" delivered with those invalidations) in
- // order to fetch the update.
- virtual void ScheduleInvalidationNudge(
- const base::TimeDelta& desired_delay,
- const ObjectIdInvalidationMap& invalidations,
- const tracked_objects::Location& nudge_location) = 0;
-
- // Change status of notifications in the SyncSessionContext.
- virtual void SetNotificationsEnabled(bool notifications_enabled) = 0;
-
- virtual base::TimeDelta GetSessionsCommitDelay() const = 0;
-
- // Called when credentials are updated by the user.
- virtual void OnCredentialsUpdated() = 0;
-
- // Called when the network layer detects a connection status change.
- virtual void OnConnectionStatusChange() = 0;
-};
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_SYNC_SCHEDULER_H_
diff --git a/chromium/sync/engine/sync_scheduler_impl.cc b/chromium/sync/engine/sync_scheduler_impl.cc
deleted file mode 100644
index 0aaaba0f70b..00000000000
--- a/chromium/sync/engine/sync_scheduler_impl.cc
+++ /dev/null
@@ -1,911 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/sync_scheduler_impl.h"
-
-#include <algorithm>
-#include <cstring>
-
-#include "base/auto_reset.h"
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/compiler_specific.h"
-#include "base/location.h"
-#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
-#include "sync/engine/backoff_delay_provider.h"
-#include "sync/engine/syncer.h"
-#include "sync/notifier/object_id_invalidation_map.h"
-#include "sync/protocol/proto_enum_conversions.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/util/data_type_histogram.h"
-#include "sync/util/logging.h"
-
-using base::TimeDelta;
-using base::TimeTicks;
-
-namespace syncer {
-
-using sessions::SyncSession;
-using sessions::SyncSessionSnapshot;
-using sync_pb::GetUpdatesCallerInfo;
-
-namespace {
-
-bool ShouldRequestEarlyExit(const SyncProtocolError& error) {
- switch (error.error_type) {
- case SYNC_SUCCESS:
- case MIGRATION_DONE:
- case THROTTLED:
- case TRANSIENT_ERROR:
- return false;
- case NOT_MY_BIRTHDAY:
- case CLEAR_PENDING:
- case DISABLED_BY_ADMIN:
- // If we send terminate sync early then |sync_cycle_ended| notification
- // would not be sent. If there were no actions then |ACTIONABLE_ERROR|
- // notification wouldnt be sent either. Then the UI layer would be left
- // waiting forever. So assert we would send something.
- DCHECK_NE(error.action, UNKNOWN_ACTION);
- return true;
- case INVALID_CREDENTIAL:
- // The notification for this is handled by PostAndProcessHeaders|.
- // Server does no have to send any action for this.
- return true;
- // Make the default a NOTREACHED. So if a new error is introduced we
- // think about its expected functionality.
- default:
- NOTREACHED();
- return false;
- }
-}
-
-bool IsActionableError(
- const SyncProtocolError& error) {
- return (error.action != UNKNOWN_ACTION);
-}
-} // namespace
-
-ConfigurationParams::ConfigurationParams()
- : source(GetUpdatesCallerInfo::UNKNOWN) {}
-ConfigurationParams::ConfigurationParams(
- const sync_pb::GetUpdatesCallerInfo::GetUpdatesSource& source,
- ModelTypeSet types_to_download,
- const ModelSafeRoutingInfo& routing_info,
- const base::Closure& ready_task,
- const base::Closure& retry_task)
- : source(source),
- types_to_download(types_to_download),
- routing_info(routing_info),
- ready_task(ready_task),
- retry_task(retry_task) {
- DCHECK(!ready_task.is_null());
- DCHECK(!retry_task.is_null());
-}
-ConfigurationParams::~ConfigurationParams() {}
-
-SyncSchedulerImpl::WaitInterval::WaitInterval()
- : mode(UNKNOWN) {}
-
-SyncSchedulerImpl::WaitInterval::WaitInterval(Mode mode, TimeDelta length)
- : mode(mode), length(length) {}
-
-SyncSchedulerImpl::WaitInterval::~WaitInterval() {}
-
-#define ENUM_CASE(x) case x: return #x; break;
-
-const char* SyncSchedulerImpl::WaitInterval::GetModeString(Mode mode) {
- switch (mode) {
- ENUM_CASE(UNKNOWN);
- ENUM_CASE(EXPONENTIAL_BACKOFF);
- ENUM_CASE(THROTTLED);
- }
- NOTREACHED();
- return "";
-}
-
-GetUpdatesCallerInfo::GetUpdatesSource GetUpdatesFromNudgeSource(
- NudgeSource source) {
- switch (source) {
- case NUDGE_SOURCE_NOTIFICATION:
- return GetUpdatesCallerInfo::NOTIFICATION;
- case NUDGE_SOURCE_LOCAL:
- return GetUpdatesCallerInfo::LOCAL;
- case NUDGE_SOURCE_LOCAL_REFRESH:
- return GetUpdatesCallerInfo::DATATYPE_REFRESH;
- case NUDGE_SOURCE_UNKNOWN:
- return GetUpdatesCallerInfo::UNKNOWN;
- default:
- NOTREACHED();
- return GetUpdatesCallerInfo::UNKNOWN;
- }
-}
-
-// Helper macros to log with the syncer thread name; useful when there
-// are multiple syncer threads involved.
-
-#define SLOG(severity) LOG(severity) << name_ << ": "
-
-#define SDVLOG(verbose_level) DVLOG(verbose_level) << name_ << ": "
-
-#define SDVLOG_LOC(from_here, verbose_level) \
- DVLOG_LOC(from_here, verbose_level) << name_ << ": "
-
-namespace {
-
-const int kDefaultSessionsCommitDelaySeconds = 10;
-
-bool IsConfigRelatedUpdateSourceValue(
- GetUpdatesCallerInfo::GetUpdatesSource source) {
- switch (source) {
- case GetUpdatesCallerInfo::RECONFIGURATION:
- case GetUpdatesCallerInfo::MIGRATION:
- case GetUpdatesCallerInfo::NEW_CLIENT:
- case GetUpdatesCallerInfo::NEWLY_SUPPORTED_DATATYPE:
- return true;
- default:
- return false;
- }
-}
-
-} // namespace
-
-SyncSchedulerImpl::SyncSchedulerImpl(const std::string& name,
- BackoffDelayProvider* delay_provider,
- sessions::SyncSessionContext* context,
- Syncer* syncer)
- : name_(name),
- started_(false),
- syncer_short_poll_interval_seconds_(
- TimeDelta::FromSeconds(kDefaultShortPollIntervalSeconds)),
- syncer_long_poll_interval_seconds_(
- TimeDelta::FromSeconds(kDefaultLongPollIntervalSeconds)),
- sessions_commit_delay_(
- TimeDelta::FromSeconds(kDefaultSessionsCommitDelaySeconds)),
- mode_(NORMAL_MODE),
- delay_provider_(delay_provider),
- syncer_(syncer),
- session_context_(context),
- no_scheduling_allowed_(false),
- do_poll_after_credentials_updated_(false),
- weak_ptr_factory_(this),
- weak_ptr_factory_for_weak_handle_(this) {
- weak_handle_this_ = MakeWeakHandle(
- weak_ptr_factory_for_weak_handle_.GetWeakPtr());
-}
-
-SyncSchedulerImpl::~SyncSchedulerImpl() {
- DCHECK(CalledOnValidThread());
- Stop();
-}
-
-void SyncSchedulerImpl::OnCredentialsUpdated() {
- DCHECK(CalledOnValidThread());
-
- if (HttpResponse::SYNC_AUTH_ERROR ==
- session_context_->connection_manager()->server_status()) {
- OnServerConnectionErrorFixed();
- }
-}
-
-void SyncSchedulerImpl::OnConnectionStatusChange() {
- if (HttpResponse::CONNECTION_UNAVAILABLE ==
- session_context_->connection_manager()->server_status()) {
- // Optimistically assume that the connection is fixed and try
- // connecting.
- OnServerConnectionErrorFixed();
- }
-}
-
-void SyncSchedulerImpl::OnServerConnectionErrorFixed() {
- // There could be a pending nudge or configuration job in several cases:
- //
- // 1. We're in exponential backoff.
- // 2. We're silenced / throttled.
- // 3. A nudge was saved previously due to not having a valid auth token.
- // 4. A nudge was scheduled + saved while in configuration mode.
- //
- // In all cases except (2), we want to retry contacting the server. We
- // call TryCanaryJob to achieve this, and note that nothing -- not even a
- // canary job -- can bypass a THROTTLED WaitInterval. The only thing that
- // has the authority to do that is the Unthrottle timer.
- TryCanaryJob();
-}
-
-void SyncSchedulerImpl::Start(Mode mode) {
- DCHECK(CalledOnValidThread());
- std::string thread_name = base::MessageLoop::current()->thread_name();
- if (thread_name.empty())
- thread_name = "<Main thread>";
- SDVLOG(2) << "Start called from thread "
- << thread_name << " with mode " << GetModeString(mode);
- if (!started_) {
- started_ = true;
- SendInitialSnapshot();
- }
-
- DCHECK(!session_context_->account_name().empty());
- DCHECK(syncer_.get());
- Mode old_mode = mode_;
- mode_ = mode;
- AdjustPolling(UPDATE_INTERVAL); // Will kick start poll timer if needed.
-
- if (old_mode != mode_ &&
- mode_ == NORMAL_MODE &&
- nudge_tracker_.IsSyncRequired() &&
- CanRunNudgeJobNow(NORMAL_PRIORITY)) {
- // We just got back to normal mode. Let's try to run the work that was
- // queued up while we were configuring.
- TrySyncSessionJob(NORMAL_PRIORITY);
- }
-}
-
-ModelTypeSet SyncSchedulerImpl::GetEnabledAndUnthrottledTypes() {
- ModelTypeSet enabled_types = session_context_->enabled_types();
- ModelTypeSet throttled_types = nudge_tracker_.GetThrottledTypes();
- return Difference(enabled_types, throttled_types);
-}
-
-void SyncSchedulerImpl::SendInitialSnapshot() {
- DCHECK(CalledOnValidThread());
- scoped_ptr<SyncSession> dummy(SyncSession::Build(session_context_, this));
- SyncEngineEvent event(SyncEngineEvent::STATUS_CHANGED);
- event.snapshot = dummy->TakeSnapshot();
- session_context_->NotifyListeners(event);
-}
-
-namespace {
-
-// Helper to extract the routing info corresponding to types in
-// |types_to_download| from |current_routes|.
-void BuildModelSafeParams(
- ModelTypeSet types_to_download,
- const ModelSafeRoutingInfo& current_routes,
- ModelSafeRoutingInfo* result_routes) {
- for (ModelTypeSet::Iterator iter = types_to_download.First(); iter.Good();
- iter.Inc()) {
- ModelType type = iter.Get();
- ModelSafeRoutingInfo::const_iterator route = current_routes.find(type);
- DCHECK(route != current_routes.end());
- ModelSafeGroup group = route->second;
- (*result_routes)[type] = group;
- }
-}
-
-} // namespace.
-
-void SyncSchedulerImpl::ScheduleConfiguration(
- const ConfigurationParams& params) {
- DCHECK(CalledOnValidThread());
- DCHECK(IsConfigRelatedUpdateSourceValue(params.source));
- DCHECK_EQ(CONFIGURATION_MODE, mode_);
- DCHECK(!params.ready_task.is_null());
- CHECK(started_) << "Scheduler must be running to configure.";
- SDVLOG(2) << "Reconfiguring syncer.";
-
- // Only one configuration is allowed at a time. Verify we're not waiting
- // for a pending configure job.
- DCHECK(!pending_configure_params_);
-
- ModelSafeRoutingInfo restricted_routes;
- BuildModelSafeParams(params.types_to_download,
- params.routing_info,
- &restricted_routes);
- session_context_->set_routing_info(restricted_routes);
-
- // Only reconfigure if we have types to download.
- if (!params.types_to_download.Empty()) {
- pending_configure_params_.reset(new ConfigurationParams(params));
- TrySyncSessionJob(NORMAL_PRIORITY);
- } else {
- SDVLOG(2) << "No change in routing info, calling ready task directly.";
- params.ready_task.Run();
- }
-}
-
-bool SyncSchedulerImpl::CanRunJobNow(JobPriority priority) {
- DCHECK(CalledOnValidThread());
- if (wait_interval_ && wait_interval_->mode == WaitInterval::THROTTLED) {
- SDVLOG(1) << "Unable to run a job because we're throttled.";
- return false;
- }
-
- if (wait_interval_
- && wait_interval_->mode == WaitInterval::EXPONENTIAL_BACKOFF
- && priority != CANARY_PRIORITY) {
- SDVLOG(1) << "Unable to run a job because we're backing off.";
- return false;
- }
-
- if (session_context_->connection_manager()->HasInvalidAuthToken()) {
- SDVLOG(1) << "Unable to run a job because we have no valid auth token.";
- return false;
- }
-
- return true;
-}
-
-bool SyncSchedulerImpl::CanRunNudgeJobNow(JobPriority priority) {
- DCHECK(CalledOnValidThread());
-
- if (!CanRunJobNow(priority)) {
- SDVLOG(1) << "Unable to run a nudge job right now";
- return false;
- }
-
- const ModelTypeSet enabled_types = session_context_->enabled_types();
- if (nudge_tracker_.GetThrottledTypes().HasAll(enabled_types)) {
- SDVLOG(1) << "Not running a nudge because we're fully type throttled.";
- return false;
- }
-
- if (mode_ == CONFIGURATION_MODE) {
- SDVLOG(1) << "Not running nudge because we're in configuration mode.";
- return false;
- }
-
- return true;
-}
-
-void SyncSchedulerImpl::ScheduleLocalNudge(
- const TimeDelta& desired_delay,
- ModelTypeSet types,
- const tracked_objects::Location& nudge_location) {
- DCHECK(CalledOnValidThread());
- DCHECK(!types.Empty());
-
- SDVLOG_LOC(nudge_location, 2)
- << "Scheduling sync because of local change to "
- << ModelTypeSetToString(types);
- UpdateNudgeTimeRecords(types);
- nudge_tracker_.RecordLocalChange(types);
- ScheduleNudgeImpl(desired_delay, nudge_location);
-}
-
-void SyncSchedulerImpl::ScheduleLocalRefreshRequest(
- const TimeDelta& desired_delay,
- ModelTypeSet types,
- const tracked_objects::Location& nudge_location) {
- DCHECK(CalledOnValidThread());
- DCHECK(!types.Empty());
-
- SDVLOG_LOC(nudge_location, 2)
- << "Scheduling sync because of local refresh request for "
- << ModelTypeSetToString(types);
- nudge_tracker_.RecordLocalRefreshRequest(types);
- ScheduleNudgeImpl(desired_delay, nudge_location);
-}
-
-void SyncSchedulerImpl::ScheduleInvalidationNudge(
- const TimeDelta& desired_delay,
- const ObjectIdInvalidationMap& invalidation_map,
- const tracked_objects::Location& nudge_location) {
- DCHECK(CalledOnValidThread());
- DCHECK(!invalidation_map.Empty());
-
- SDVLOG_LOC(nudge_location, 2)
- << "Scheduling sync because we received invalidation for "
- << ModelTypeSetToString(
- ObjectIdSetToModelTypeSet(invalidation_map.GetObjectIds()));
- nudge_tracker_.RecordRemoteInvalidation(invalidation_map);
- ScheduleNudgeImpl(desired_delay, nudge_location);
-}
-
-// TODO(zea): Consider adding separate throttling/backoff for datatype
-// refresh requests.
-void SyncSchedulerImpl::ScheduleNudgeImpl(
- const TimeDelta& delay,
- const tracked_objects::Location& nudge_location) {
- DCHECK(CalledOnValidThread());
-
- if (no_scheduling_allowed_) {
- NOTREACHED() << "Illegal to schedule job while session in progress.";
- return;
- }
-
- if (!started_) {
- SDVLOG_LOC(nudge_location, 2)
- << "Dropping nudge, scheduler is not running.";
- return;
- }
-
- SDVLOG_LOC(nudge_location, 2)
- << "In ScheduleNudgeImpl with delay "
- << delay.InMilliseconds() << " ms";
-
- if (!CanRunNudgeJobNow(NORMAL_PRIORITY))
- return;
-
- TimeTicks incoming_run_time = TimeTicks::Now() + delay;
- if (!scheduled_nudge_time_.is_null() &&
- (scheduled_nudge_time_ < incoming_run_time)) {
- // Old job arrives sooner than this one. Don't reschedule it.
- return;
- }
-
- // Either there is no existing nudge in flight or the incoming nudge should be
- // made to arrive first (preempt) the existing nudge. We reschedule in either
- // case.
- SDVLOG_LOC(nudge_location, 2)
- << "Scheduling a nudge with "
- << delay.InMilliseconds() << " ms delay";
- scheduled_nudge_time_ = incoming_run_time;
- pending_wakeup_timer_.Start(
- nudge_location,
- delay,
- base::Bind(&SyncSchedulerImpl::PerformDelayedNudge,
- weak_ptr_factory_.GetWeakPtr()));
-}
-
-const char* SyncSchedulerImpl::GetModeString(SyncScheduler::Mode mode) {
- switch (mode) {
- ENUM_CASE(CONFIGURATION_MODE);
- ENUM_CASE(NORMAL_MODE);
- }
- return "";
-}
-
-void SyncSchedulerImpl::DoNudgeSyncSessionJob(JobPriority priority) {
- DCHECK(CalledOnValidThread());
- DCHECK(CanRunNudgeJobNow(priority));
-
- DVLOG(2) << "Will run normal mode sync cycle with types "
- << ModelTypeSetToString(session_context_->enabled_types());
- scoped_ptr<SyncSession> session(SyncSession::Build(session_context_, this));
- bool premature_exit = !syncer_->NormalSyncShare(
- GetEnabledAndUnthrottledTypes(),
- nudge_tracker_,
- session.get());
- AdjustPolling(FORCE_RESET);
- // Don't run poll job till the next time poll timer fires.
- do_poll_after_credentials_updated_ = false;
-
- bool success = !premature_exit
- && !sessions::HasSyncerError(
- session->status_controller().model_neutral_state());
-
- if (success) {
- // That cycle took care of any outstanding work we had.
- SDVLOG(2) << "Nudge succeeded.";
- nudge_tracker_.RecordSuccessfulSyncCycle();
- scheduled_nudge_time_ = base::TimeTicks();
-
- // If we're here, then we successfully reached the server. End all backoff.
- wait_interval_.reset();
- NotifyRetryTime(base::Time());
- return;
- } else {
- HandleFailure(session->status_controller().model_neutral_state());
- }
-}
-
-void SyncSchedulerImpl::DoConfigurationSyncSessionJob(JobPriority priority) {
- DCHECK(CalledOnValidThread());
- DCHECK_EQ(mode_, CONFIGURATION_MODE);
- DCHECK(pending_configure_params_ != NULL);
-
- if (!CanRunJobNow(priority)) {
- SDVLOG(2) << "Unable to run configure job right now.";
- if (!pending_configure_params_->retry_task.is_null()) {
- pending_configure_params_->retry_task.Run();
- pending_configure_params_->retry_task.Reset();
- }
- return;
- }
-
- SDVLOG(2) << "Will run configure SyncShare with types "
- << ModelTypeSetToString(session_context_->enabled_types());
- scoped_ptr<SyncSession> session(SyncSession::Build(session_context_, this));
- bool premature_exit = !syncer_->ConfigureSyncShare(
- session_context_->enabled_types(),
- pending_configure_params_->source,
- session.get());
- AdjustPolling(FORCE_RESET);
- // Don't run poll job till the next time poll timer fires.
- do_poll_after_credentials_updated_ = false;
-
- bool success = !premature_exit
- && !sessions::HasSyncerError(
- session->status_controller().model_neutral_state());
-
- if (success) {
- SDVLOG(2) << "Configure succeeded.";
- pending_configure_params_->ready_task.Run();
- pending_configure_params_.reset();
-
- // If we're here, then we successfully reached the server. End all backoff.
- wait_interval_.reset();
- NotifyRetryTime(base::Time());
- } else {
- HandleFailure(session->status_controller().model_neutral_state());
- // Sync cycle might receive response from server that causes scheduler to
- // stop and draws pending_configure_params_ invalid.
- if (started_ && !pending_configure_params_->retry_task.is_null()) {
- pending_configure_params_->retry_task.Run();
- pending_configure_params_->retry_task.Reset();
- }
- }
-}
-
-void SyncSchedulerImpl::HandleFailure(
- const sessions::ModelNeutralState& model_neutral_state) {
- if (IsCurrentlyThrottled()) {
- SDVLOG(2) << "Was throttled during previous sync cycle.";
- RestartWaiting();
- } else if (!IsBackingOff()) {
- // Setup our backoff if this is our first such failure.
- TimeDelta length = delay_provider_->GetDelay(
- delay_provider_->GetInitialDelay(model_neutral_state));
- wait_interval_.reset(
- new WaitInterval(WaitInterval::EXPONENTIAL_BACKOFF, length));
- SDVLOG(2) << "Sync cycle failed. Will back off for "
- << wait_interval_->length.InMilliseconds() << "ms.";
- RestartWaiting();
- }
-}
-
-void SyncSchedulerImpl::DoPollSyncSessionJob() {
- base::AutoReset<bool> protector(&no_scheduling_allowed_, true);
-
- if (!CanRunJobNow(NORMAL_PRIORITY)) {
- SDVLOG(2) << "Unable to run a poll job right now.";
- return;
- }
-
- if (mode_ != NORMAL_MODE) {
- SDVLOG(2) << "Not running poll job in configure mode.";
- return;
- }
-
- SDVLOG(2) << "Polling with types "
- << ModelTypeSetToString(session_context_->enabled_types());
- scoped_ptr<SyncSession> session(SyncSession::Build(session_context_, this));
- syncer_->PollSyncShare(
- GetEnabledAndUnthrottledTypes(),
- session.get());
-
- AdjustPolling(FORCE_RESET);
-
- if (IsCurrentlyThrottled()) {
- SDVLOG(2) << "Poll request got us throttled.";
- // The OnSilencedUntil() call set up the WaitInterval for us. All we need
- // to do is start the timer.
- RestartWaiting();
- }
-}
-
-void SyncSchedulerImpl::UpdateNudgeTimeRecords(ModelTypeSet types) {
- DCHECK(CalledOnValidThread());
- base::TimeTicks now = TimeTicks::Now();
- // Update timing information for how often datatypes are triggering nudges.
- for (ModelTypeSet::Iterator iter = types.First(); iter.Good(); iter.Inc()) {
- base::TimeTicks previous = last_local_nudges_by_model_type_[iter.Get()];
- last_local_nudges_by_model_type_[iter.Get()] = now;
- if (previous.is_null())
- continue;
-
-#define PER_DATA_TYPE_MACRO(type_str) \
- SYNC_FREQ_HISTOGRAM("Sync.Freq" type_str, now - previous);
- SYNC_DATA_TYPE_HISTOGRAM(iter.Get());
-#undef PER_DATA_TYPE_MACRO
- }
-}
-
-TimeDelta SyncSchedulerImpl::GetPollInterval() {
- return (!session_context_->notifications_enabled() ||
- !session_context_->ShouldFetchUpdatesBeforeCommit()) ?
- syncer_short_poll_interval_seconds_ :
- syncer_long_poll_interval_seconds_;
-}
-
-void SyncSchedulerImpl::AdjustPolling(PollAdjustType type) {
- DCHECK(CalledOnValidThread());
-
- TimeDelta poll = GetPollInterval();
- bool rate_changed = !poll_timer_.IsRunning() ||
- poll != poll_timer_.GetCurrentDelay();
-
- if (type == FORCE_RESET) {
- last_poll_reset_ = base::TimeTicks::Now();
- if (!rate_changed)
- poll_timer_.Reset();
- }
-
- if (!rate_changed)
- return;
-
- // Adjust poll rate.
- poll_timer_.Stop();
- poll_timer_.Start(FROM_HERE, poll, this,
- &SyncSchedulerImpl::PollTimerCallback);
-}
-
-void SyncSchedulerImpl::RestartWaiting() {
- CHECK(wait_interval_.get());
- DCHECK(wait_interval_->length >= TimeDelta::FromSeconds(0));
- NotifyRetryTime(base::Time::Now() + wait_interval_->length);
- SDVLOG(2) << "Starting WaitInterval timer of length "
- << wait_interval_->length.InMilliseconds() << "ms.";
- if (wait_interval_->mode == WaitInterval::THROTTLED) {
- pending_wakeup_timer_.Start(
- FROM_HERE,
- wait_interval_->length,
- base::Bind(&SyncSchedulerImpl::Unthrottle,
- weak_ptr_factory_.GetWeakPtr()));
- } else {
- pending_wakeup_timer_.Start(
- FROM_HERE,
- wait_interval_->length,
- base::Bind(&SyncSchedulerImpl::ExponentialBackoffRetry,
- weak_ptr_factory_.GetWeakPtr()));
- }
-}
-
-void SyncSchedulerImpl::Stop() {
- DCHECK(CalledOnValidThread());
- SDVLOG(2) << "Stop called";
-
- // Kill any in-flight method calls.
- weak_ptr_factory_.InvalidateWeakPtrs();
- wait_interval_.reset();
- NotifyRetryTime(base::Time());
- poll_timer_.Stop();
- pending_wakeup_timer_.Stop();
- pending_configure_params_.reset();
- if (started_)
- started_ = false;
-}
-
-// This is the only place where we invoke DoSyncSessionJob with canary
-// privileges. Everyone else should use NORMAL_PRIORITY.
-void SyncSchedulerImpl::TryCanaryJob() {
- TrySyncSessionJob(CANARY_PRIORITY);
-}
-
-void SyncSchedulerImpl::TrySyncSessionJob(JobPriority priority) {
- // Post call to TrySyncSessionJobImpl on current thread. Later request for
- // access token will be here.
- base::MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
- &SyncSchedulerImpl::TrySyncSessionJobImpl,
- weak_ptr_factory_.GetWeakPtr(),
- priority));
-}
-
-void SyncSchedulerImpl::TrySyncSessionJobImpl(JobPriority priority) {
- DCHECK(CalledOnValidThread());
- if (mode_ == CONFIGURATION_MODE) {
- if (pending_configure_params_) {
- SDVLOG(2) << "Found pending configure job";
- DoConfigurationSyncSessionJob(priority);
- }
- } else {
- DCHECK(mode_ == NORMAL_MODE);
- if (nudge_tracker_.IsSyncRequired() && CanRunNudgeJobNow(priority)) {
- SDVLOG(2) << "Found pending nudge job";
- DoNudgeSyncSessionJob(priority);
- } else if (do_poll_after_credentials_updated_ ||
- ((base::TimeTicks::Now() - last_poll_reset_) >= GetPollInterval())) {
- DoPollSyncSessionJob();
- // Poll timer fires infrequently. Usually by this time access token is
- // already expired and poll job will fail with auth error. Set flag to
- // retry poll once ProfileSyncService gets new access token, TryCanaryJob
- // will be called after access token is retrieved.
- if (HttpResponse::SYNC_AUTH_ERROR ==
- session_context_->connection_manager()->server_status()) {
- do_poll_after_credentials_updated_ = true;
- }
- }
- }
-
- if (priority == CANARY_PRIORITY) {
- // If this is canary job then whatever result was don't run poll job till
- // the next time poll timer fires.
- do_poll_after_credentials_updated_ = false;
- }
-
- if (IsBackingOff() && !pending_wakeup_timer_.IsRunning()) {
- // If we succeeded, our wait interval would have been cleared. If it hasn't
- // been cleared, then we should increase our backoff interval and schedule
- // another retry.
- TimeDelta length = delay_provider_->GetDelay(wait_interval_->length);
- wait_interval_.reset(
- new WaitInterval(WaitInterval::EXPONENTIAL_BACKOFF, length));
- SDVLOG(2) << "Sync cycle failed. Will back off for "
- << wait_interval_->length.InMilliseconds() << "ms.";
- RestartWaiting();
- }
-}
-
-void SyncSchedulerImpl::PollTimerCallback() {
- DCHECK(CalledOnValidThread());
- if (no_scheduling_allowed_) {
- // The no_scheduling_allowed_ flag is set by a function-scoped AutoReset in
- // functions that are called only on the sync thread. This function is also
- // called only on the sync thread, and only when it is posted by an expiring
- // timer. If we find that no_scheduling_allowed_ is set here, then
- // something is very wrong. Maybe someone mistakenly called us directly, or
- // mishandled the book-keeping for no_scheduling_allowed_.
- NOTREACHED() << "Illegal to schedule job while session in progress.";
- return;
- }
-
- TrySyncSessionJob(NORMAL_PRIORITY);
-}
-
-void SyncSchedulerImpl::Unthrottle() {
- DCHECK(CalledOnValidThread());
- DCHECK_EQ(WaitInterval::THROTTLED, wait_interval_->mode);
-
- // We're no longer throttled, so clear the wait interval.
- wait_interval_.reset();
- NotifyRetryTime(base::Time());
-
- // We treat this as a 'canary' in the sense that it was originally scheduled
- // to run some time ago, failed, and we now want to retry, versus a job that
- // was just created (e.g via ScheduleNudgeImpl). The main implication is
- // that we're careful to update routing info (etc) with such potentially
- // stale canary jobs.
- TryCanaryJob();
-}
-
-void SyncSchedulerImpl::TypeUnthrottle(base::TimeTicks unthrottle_time) {
- DCHECK(CalledOnValidThread());
- nudge_tracker_.UpdateTypeThrottlingState(unthrottle_time);
- NotifyThrottledTypesChanged(nudge_tracker_.GetThrottledTypes());
-
- if (nudge_tracker_.IsAnyTypeThrottled()) {
- base::TimeDelta time_until_next_unthrottle =
- nudge_tracker_.GetTimeUntilNextUnthrottle(unthrottle_time);
- type_unthrottle_timer_.Start(
- FROM_HERE,
- time_until_next_unthrottle,
- base::Bind(&SyncSchedulerImpl::TypeUnthrottle,
- weak_ptr_factory_.GetWeakPtr(),
- unthrottle_time + time_until_next_unthrottle));
- }
-
- // Maybe this is a good time to run a nudge job. Let's try it.
- if (nudge_tracker_.IsSyncRequired() && CanRunNudgeJobNow(NORMAL_PRIORITY))
- TrySyncSessionJob(NORMAL_PRIORITY);
-}
-
-void SyncSchedulerImpl::PerformDelayedNudge() {
- // Circumstances may have changed since we scheduled this delayed nudge.
- // We must check to see if it's OK to run the job before we do so.
- if (CanRunNudgeJobNow(NORMAL_PRIORITY))
- TrySyncSessionJob(NORMAL_PRIORITY);
-
- // We're not responsible for setting up any retries here. The functions that
- // first put us into a state that prevents successful sync cycles (eg. global
- // throttling, type throttling, network errors, transient errors) will also
- // setup the appropriate retry logic (eg. retry after timeout, exponential
- // backoff, retry when the network changes).
-}
-
-void SyncSchedulerImpl::ExponentialBackoffRetry() {
- TryCanaryJob();
-}
-
-void SyncSchedulerImpl::Notify(SyncEngineEvent::EventCause cause) {
- DCHECK(CalledOnValidThread());
- session_context_->NotifyListeners(SyncEngineEvent(cause));
-}
-
-void SyncSchedulerImpl::NotifyRetryTime(base::Time retry_time) {
- SyncEngineEvent event(SyncEngineEvent::RETRY_TIME_CHANGED);
- event.retry_time = retry_time;
- session_context_->NotifyListeners(event);
-}
-
-void SyncSchedulerImpl::NotifyThrottledTypesChanged(ModelTypeSet types) {
- SyncEngineEvent event(SyncEngineEvent::THROTTLED_TYPES_CHANGED);
- event.throttled_types = types;
- session_context_->NotifyListeners(event);
-}
-
-bool SyncSchedulerImpl::IsBackingOff() const {
- DCHECK(CalledOnValidThread());
- return wait_interval_.get() && wait_interval_->mode ==
- WaitInterval::EXPONENTIAL_BACKOFF;
-}
-
-void SyncSchedulerImpl::OnThrottled(const base::TimeDelta& throttle_duration) {
- DCHECK(CalledOnValidThread());
- wait_interval_.reset(new WaitInterval(WaitInterval::THROTTLED,
- throttle_duration));
- NotifyRetryTime(base::Time::Now() + wait_interval_->length);
-}
-
-void SyncSchedulerImpl::OnTypesThrottled(
- ModelTypeSet types,
- const base::TimeDelta& throttle_duration) {
- base::TimeTicks now = base::TimeTicks::Now();
-
- nudge_tracker_.SetTypesThrottledUntil(types, throttle_duration, now);
- base::TimeDelta time_until_next_unthrottle =
- nudge_tracker_.GetTimeUntilNextUnthrottle(now);
- type_unthrottle_timer_.Start(
- FROM_HERE,
- time_until_next_unthrottle,
- base::Bind(&SyncSchedulerImpl::TypeUnthrottle,
- weak_ptr_factory_.GetWeakPtr(),
- now + time_until_next_unthrottle));
- NotifyThrottledTypesChanged(nudge_tracker_.GetThrottledTypes());
-}
-
-bool SyncSchedulerImpl::IsCurrentlyThrottled() {
- DCHECK(CalledOnValidThread());
- return wait_interval_.get() && wait_interval_->mode ==
- WaitInterval::THROTTLED;
-}
-
-void SyncSchedulerImpl::OnReceivedShortPollIntervalUpdate(
- const base::TimeDelta& new_interval) {
- DCHECK(CalledOnValidThread());
- syncer_short_poll_interval_seconds_ = new_interval;
-}
-
-void SyncSchedulerImpl::OnReceivedLongPollIntervalUpdate(
- const base::TimeDelta& new_interval) {
- DCHECK(CalledOnValidThread());
- syncer_long_poll_interval_seconds_ = new_interval;
-}
-
-void SyncSchedulerImpl::OnReceivedSessionsCommitDelay(
- const base::TimeDelta& new_delay) {
- DCHECK(CalledOnValidThread());
- sessions_commit_delay_ = new_delay;
-}
-
-void SyncSchedulerImpl::OnReceivedClientInvalidationHintBufferSize(int size) {
- if (size > 0)
- nudge_tracker_.SetHintBufferSize(size);
- else
- NOTREACHED() << "Hint buffer size should be > 0.";
-}
-
-void SyncSchedulerImpl::OnActionableError(
- const sessions::SyncSessionSnapshot& snap) {
- DCHECK(CalledOnValidThread());
- SDVLOG(2) << "OnActionableError";
- SyncEngineEvent event(SyncEngineEvent::ACTIONABLE_ERROR);
- event.snapshot = snap;
- session_context_->NotifyListeners(event);
-}
-
-void SyncSchedulerImpl::OnSyncProtocolError(
- const sessions::SyncSessionSnapshot& snapshot) {
- DCHECK(CalledOnValidThread());
- if (ShouldRequestEarlyExit(
- snapshot.model_neutral_state().sync_protocol_error)) {
- SDVLOG(2) << "Sync Scheduler requesting early exit.";
- Stop();
- }
- if (IsActionableError(snapshot.model_neutral_state().sync_protocol_error))
- OnActionableError(snapshot);
-}
-
-void SyncSchedulerImpl::SetNotificationsEnabled(bool notifications_enabled) {
- DCHECK(CalledOnValidThread());
- session_context_->set_notifications_enabled(notifications_enabled);
- if (notifications_enabled)
- nudge_tracker_.OnInvalidationsEnabled();
- else
- nudge_tracker_.OnInvalidationsDisabled();
-}
-
-base::TimeDelta SyncSchedulerImpl::GetSessionsCommitDelay() const {
- DCHECK(CalledOnValidThread());
- return sessions_commit_delay_;
-}
-
-#undef SDVLOG_LOC
-
-#undef SDVLOG
-
-#undef SLOG
-
-#undef ENUM_CASE
-
-} // namespace syncer
diff --git a/chromium/sync/engine/sync_scheduler_impl.h b/chromium/sync/engine/sync_scheduler_impl.h
deleted file mode 100644
index 4c0dd57016b..00000000000
--- a/chromium/sync/engine/sync_scheduler_impl.h
+++ /dev/null
@@ -1,336 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_SYNC_SCHEDULER_IMPL_H_
-#define SYNC_ENGINE_SYNC_SCHEDULER_IMPL_H_
-
-#include <map>
-#include <string>
-
-#include "base/callback.h"
-#include "base/cancelable_callback.h"
-#include "base/compiler_specific.h"
-#include "base/gtest_prod_util.h"
-#include "base/memory/linked_ptr.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "base/threading/non_thread_safe.h"
-#include "base/time/time.h"
-#include "base/timer/timer.h"
-#include "sync/base/sync_export.h"
-#include "sync/engine/net/server_connection_manager.h"
-#include "sync/engine/nudge_source.h"
-#include "sync/engine/sync_scheduler.h"
-#include "sync/engine/syncer.h"
-#include "sync/internal_api/public/engine/polling_constants.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/sessions/nudge_tracker.h"
-#include "sync/sessions/sync_session.h"
-#include "sync/sessions/sync_session_context.h"
-
-namespace syncer {
-
-class BackoffDelayProvider;
-
-namespace sessions {
-struct ModelNeutralState;
-}
-
-class SYNC_EXPORT_PRIVATE SyncSchedulerImpl
- : public SyncScheduler,
- public base::NonThreadSafe {
- public:
- // |name| is a display string to identify the syncer thread. Takes
- // |ownership of |syncer| and |delay_provider|.
- SyncSchedulerImpl(const std::string& name,
- BackoffDelayProvider* delay_provider,
- sessions::SyncSessionContext* context,
- Syncer* syncer);
-
- // Calls Stop().
- virtual ~SyncSchedulerImpl();
-
- virtual void Start(Mode mode) OVERRIDE;
- virtual void ScheduleConfiguration(
- const ConfigurationParams& params) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void ScheduleLocalNudge(
- const base::TimeDelta& desired_delay,
- ModelTypeSet types,
- const tracked_objects::Location& nudge_location) OVERRIDE;
- virtual void ScheduleLocalRefreshRequest(
- const base::TimeDelta& desired_delay,
- ModelTypeSet types,
- const tracked_objects::Location& nudge_location) OVERRIDE;
- virtual void ScheduleInvalidationNudge(
- const base::TimeDelta& desired_delay,
- const ObjectIdInvalidationMap& invalidation_map,
- const tracked_objects::Location& nudge_location) OVERRIDE;
- virtual void SetNotificationsEnabled(bool notifications_enabled) OVERRIDE;
-
- virtual base::TimeDelta GetSessionsCommitDelay() const OVERRIDE;
-
- virtual void OnCredentialsUpdated() OVERRIDE;
- virtual void OnConnectionStatusChange() OVERRIDE;
-
- // SyncSession::Delegate implementation.
- virtual void OnThrottled(const base::TimeDelta& throttle_duration) OVERRIDE;
- virtual void OnTypesThrottled(
- ModelTypeSet types,
- const base::TimeDelta& throttle_duration) OVERRIDE;
- virtual bool IsCurrentlyThrottled() OVERRIDE;
- virtual void OnReceivedShortPollIntervalUpdate(
- const base::TimeDelta& new_interval) OVERRIDE;
- virtual void OnReceivedLongPollIntervalUpdate(
- const base::TimeDelta& new_interval) OVERRIDE;
- virtual void OnReceivedSessionsCommitDelay(
- const base::TimeDelta& new_delay) OVERRIDE;
- virtual void OnReceivedClientInvalidationHintBufferSize(int size) OVERRIDE;
- virtual void OnSyncProtocolError(
- const sessions::SyncSessionSnapshot& snapshot) OVERRIDE;
-
- private:
- enum JobPriority {
- // Non-canary jobs respect exponential backoff.
- NORMAL_PRIORITY,
- // Canary jobs bypass exponential backoff, so use with extreme caution.
- CANARY_PRIORITY
- };
-
- enum PollAdjustType {
- // Restart the poll interval.
- FORCE_RESET,
- // Restart the poll interval only if its length has changed.
- UPDATE_INTERVAL,
- };
-
- friend class SyncSchedulerTest;
- friend class SyncSchedulerWhiteboxTest;
- friend class SyncerTest;
-
- FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest, NoNudgesInConfigureMode);
- FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest,
- DropNudgeWhileExponentialBackOff);
- FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest, SaveNudge);
- FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest,
- SaveNudgeWhileTypeThrottled);
- FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest, ContinueNudge);
- FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest, ContinueConfiguration);
- FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest,
- SaveConfigurationWhileThrottled);
- FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest,
- SaveNudgeWhileThrottled);
- FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest,
- ContinueCanaryJobConfig);
- FRIEND_TEST_ALL_PREFIXES(SyncSchedulerTest, TransientPollFailure);
- FRIEND_TEST_ALL_PREFIXES(SyncSchedulerTest,
- ServerConnectionChangeDuringBackoff);
- FRIEND_TEST_ALL_PREFIXES(SyncSchedulerTest,
- ConnectionChangeCanaryPreemptedByNudge);
- FRIEND_TEST_ALL_PREFIXES(BackoffTriggersSyncSchedulerTest,
- FailGetEncryptionKey);
-
- struct SYNC_EXPORT_PRIVATE WaitInterval {
- enum Mode {
- // Uninitialized state, should not be set in practice.
- UNKNOWN = -1,
- // We enter a series of increasingly longer WaitIntervals if we experience
- // repeated transient failures. We retry at the end of each interval.
- EXPONENTIAL_BACKOFF,
- // A server-initiated throttled interval. We do not allow any syncing
- // during such an interval.
- THROTTLED,
- };
- WaitInterval();
- ~WaitInterval();
- WaitInterval(Mode mode, base::TimeDelta length);
-
- static const char* GetModeString(Mode mode);
-
- Mode mode;
- base::TimeDelta length;
- };
-
- static const char* GetModeString(Mode mode);
-
- // Invoke the syncer to perform a nudge job.
- void DoNudgeSyncSessionJob(JobPriority priority);
-
- // Invoke the syncer to perform a configuration job.
- void DoConfigurationSyncSessionJob(JobPriority priority);
-
- // Helper function for Do{Nudge,Configuration}SyncSessionJob.
- void HandleFailure(
- const sessions::ModelNeutralState& model_neutral_state);
-
- // Invoke the Syncer to perform a poll job.
- void DoPollSyncSessionJob();
-
- // Helper function to calculate poll interval.
- base::TimeDelta GetPollInterval();
-
- // Adjusts the poll timer to account for new poll interval, and possibly
- // resets the poll interval, depedning on the flag's value.
- void AdjustPolling(PollAdjustType type);
-
- // Helper to restart waiting with |wait_interval_|'s timer.
- void RestartWaiting();
-
- // Determines if we're allowed to contact the server right now.
- bool CanRunJobNow(JobPriority priority);
-
- // Determines if we're allowed to contact the server right now.
- bool CanRunNudgeJobNow(JobPriority priority);
-
- // If the scheduler's current state supports it, this will create a job based
- // on the passed in parameters and coalesce it with any other pending jobs,
- // then post a delayed task to run it. It may also choose to drop the job or
- // save it for later, depending on the scheduler's current state.
- void ScheduleNudgeImpl(
- const base::TimeDelta& delay,
- const tracked_objects::Location& nudge_location);
-
- // Returns true if the client is currently in exponential backoff.
- bool IsBackingOff() const;
-
- // Helper to signal all listeners registered with |session_context_|.
- void Notify(SyncEngineEvent::EventCause cause);
-
- // Helper to signal listeners about changed retry time.
- void NotifyRetryTime(base::Time retry_time);
-
- // Helper to signal listeners about changed throttled types.
- void NotifyThrottledTypesChanged(ModelTypeSet types);
-
- // Looks for pending work and, if it finds any, run this work at "canary"
- // priority.
- void TryCanaryJob();
-
- // At the moment TrySyncSessionJob just posts call to TrySyncSessionJobImpl on
- // current thread. In the future it will request access token here.
- void TrySyncSessionJob(JobPriority priority);
- void TrySyncSessionJobImpl(JobPriority priority);
-
- // Transitions out of the THROTTLED WaitInterval then calls TryCanaryJob().
- void Unthrottle();
-
- // Called when a per-type throttling interval expires.
- void TypeUnthrottle(base::TimeTicks unthrottle_time);
-
- // Runs a normal nudge job when the scheduled timer expires.
- void PerformDelayedNudge();
-
- // Attempts to exit EXPONENTIAL_BACKOFF by calling TryCanaryJob().
- void ExponentialBackoffRetry();
-
- // Called when the root cause of the current connection error is fixed.
- void OnServerConnectionErrorFixed();
-
- // Creates a session for a poll and performs the sync.
- void PollTimerCallback();
-
- // Returns the set of types that are enabled and not currently throttled.
- ModelTypeSet GetEnabledAndUnthrottledTypes();
-
- // Called as we are started to broadcast an initial session snapshot
- // containing data like initial_sync_ended. Important when the client starts
- // up and does not need to perform an initial sync.
- void SendInitialSnapshot();
-
- // This is used for histogramming and analysis of ScheduleNudge* APIs.
- // SyncScheduler is the ultimate choke-point for all such invocations (with
- // and without InvalidationState variants, all NudgeSources, etc) and as such
- // is the most flexible place to do this bookkeeping.
- void UpdateNudgeTimeRecords(ModelTypeSet types);
-
- virtual void OnActionableError(const sessions::SyncSessionSnapshot& snapshot);
-
- // For certain methods that need to worry about X-thread posting.
- WeakHandle<SyncSchedulerImpl> weak_handle_this_;
-
- // Used for logging.
- const std::string name_;
-
- // Set in Start(), unset in Stop().
- bool started_;
-
- // Modifiable versions of kDefaultLongPollIntervalSeconds which can be
- // updated by the server.
- base::TimeDelta syncer_short_poll_interval_seconds_;
- base::TimeDelta syncer_long_poll_interval_seconds_;
-
- // Server-tweakable sessions commit delay.
- base::TimeDelta sessions_commit_delay_;
-
- // Periodic timer for polling. See AdjustPolling.
- base::RepeatingTimer<SyncSchedulerImpl> poll_timer_;
-
- // The mode of operation.
- Mode mode_;
-
- // Current wait state. Null if we're not in backoff and not throttled.
- scoped_ptr<WaitInterval> wait_interval_;
-
- scoped_ptr<BackoffDelayProvider> delay_provider_;
-
- // The event that will wake us up.
- base::OneShotTimer<SyncSchedulerImpl> pending_wakeup_timer_;
-
- // An event that fires when data type throttling expires.
- base::OneShotTimer<SyncSchedulerImpl> type_unthrottle_timer_;
-
- // Storage for variables related to an in-progress configure request. Note
- // that (mode_ != CONFIGURATION_MODE) \implies !pending_configure_params_.
- scoped_ptr<ConfigurationParams> pending_configure_params_;
-
- // If we have a nudge pending to run soon, it will be listed here.
- base::TimeTicks scheduled_nudge_time_;
-
- // Keeps track of work that the syncer needs to handle.
- sessions::NudgeTracker nudge_tracker_;
-
- // Invoked to run through the sync cycle.
- scoped_ptr<Syncer> syncer_;
-
- sessions::SyncSessionContext* session_context_;
-
- // A map tracking LOCAL NudgeSource invocations of ScheduleNudge* APIs,
- // organized by datatype. Each datatype that was part of the types requested
- // in the call will have its TimeTicks value updated.
- typedef std::map<ModelType, base::TimeTicks> ModelTypeTimeMap;
- ModelTypeTimeMap last_local_nudges_by_model_type_;
-
- // Used as an "anti-reentrancy defensive assertion".
- // While true, it is illegal for any new scheduling activity to take place.
- // Ensures that higher layers don't break this law in response to events that
- // take place during a sync cycle. We call this out because such violations
- // could result in tight sync loops hitting sync servers.
- bool no_scheduling_allowed_;
-
- // crbug/251307. This is a workaround for M29. crbug/259913 tracks proper fix
- // for M30.
- // The issue is that poll job runs after few hours of inactivity and therefore
- // will always fail with auth error because of expired access token. Once
- // fresh access token is requested poll job is not retried.
- // The change is to remember that poll timer just fired and retry poll job
- // after credentials are updated.
- bool do_poll_after_credentials_updated_;
-
- // TryJob might get called for multiple reasons. It should only call
- // DoPollSyncSessionJob after some time since the last attempt.
- // last_poll_reset_ keeps track of when was last attempt.
- base::TimeTicks last_poll_reset_;
-
- base::WeakPtrFactory<SyncSchedulerImpl> weak_ptr_factory_;
-
- // A second factory specially for weak_handle_this_, to allow the handle
- // to be const and alleviate threading concerns.
- base::WeakPtrFactory<SyncSchedulerImpl> weak_ptr_factory_for_weak_handle_;
-
- DISALLOW_COPY_AND_ASSIGN(SyncSchedulerImpl);
-};
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_SYNC_SCHEDULER_IMPL_H_
diff --git a/chromium/sync/engine/sync_scheduler_unittest.cc b/chromium/sync/engine/sync_scheduler_unittest.cc
deleted file mode 100644
index e5876554e81..00000000000
--- a/chromium/sync/engine/sync_scheduler_unittest.cc
+++ /dev/null
@@ -1,1299 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/bind.h"
-#include "base/callback.h"
-#include "base/compiler_specific.h"
-#include "base/memory/weak_ptr.h"
-#include "base/message_loop/message_loop.h"
-#include "base/test/test_timeouts.h"
-#include "sync/engine/backoff_delay_provider.h"
-#include "sync/engine/sync_scheduler_impl.h"
-#include "sync/engine/syncer.h"
-#include "sync/internal_api/public/base/cancelation_signal.h"
-#include "sync/internal_api/public/base/model_type_test_util.h"
-#include "sync/notifier/invalidation_util.h"
-#include "sync/notifier/object_id_invalidation_map.h"
-#include "sync/sessions/test_util.h"
-#include "sync/test/callback_counter.h"
-#include "sync/test/engine/fake_model_worker.h"
-#include "sync/test/engine/mock_connection_manager.h"
-#include "sync/test/engine/test_directory_setter_upper.h"
-#include "sync/util/extensions_activity.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using base::TimeDelta;
-using base::TimeTicks;
-using testing::_;
-using testing::AtLeast;
-using testing::DoAll;
-using testing::Invoke;
-using testing::Mock;
-using testing::Return;
-using testing::WithArg;
-using testing::WithArgs;
-
-namespace syncer {
-using sessions::SyncSession;
-using sessions::SyncSessionContext;
-using sync_pb::GetUpdatesCallerInfo;
-
-class MockSyncer : public Syncer {
- public:
- MockSyncer();
- MOCK_METHOD3(NormalSyncShare, bool(ModelTypeSet,
- const sessions::NudgeTracker&,
- sessions::SyncSession*));
- MOCK_METHOD3(ConfigureSyncShare,
- bool(ModelTypeSet,
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource,
- SyncSession*));
- MOCK_METHOD2(PollSyncShare, bool(ModelTypeSet, sessions::SyncSession*));
-};
-
-MockSyncer::MockSyncer()
- : Syncer(NULL) {}
-
-typedef std::vector<TimeTicks> SyncShareTimes;
-
-void QuitLoopNow() {
- // We use QuitNow() instead of Quit() as the latter may get stalled
- // indefinitely in the presence of repeated timers with low delays
- // and a slow test (e.g., ThrottlingDoesThrottle [which has a poll
- // delay of 5ms] run under TSAN on the trybots).
- base::MessageLoop::current()->QuitNow();
-}
-
-void RunLoop() {
- base::MessageLoop::current()->Run();
-}
-
-void PumpLoop() {
- // Do it this way instead of RunAllPending to pump loop exactly once
- // (necessary in the presence of timers; see comment in
- // QuitLoopNow).
- base::MessageLoop::current()->PostTask(FROM_HERE, base::Bind(&QuitLoopNow));
- RunLoop();
-}
-
-void PumpLoopFor(base::TimeDelta time) {
- // Allow the loop to run for the specified amount of time.
- base::MessageLoop::current()->PostDelayedTask(
- FROM_HERE, base::Bind(&QuitLoopNow), time);
- RunLoop();
-}
-
-ModelSafeRoutingInfo TypesToRoutingInfo(ModelTypeSet types) {
- ModelSafeRoutingInfo routes;
- for (ModelTypeSet::Iterator iter = types.First(); iter.Good(); iter.Inc()) {
- routes[iter.Get()] = GROUP_PASSIVE;
- }
- return routes;
-}
-
-// Convenient to use in tests wishing to analyze SyncShare calls over time.
-static const size_t kMinNumSamples = 5;
-class SyncSchedulerTest : public testing::Test {
- public:
- SyncSchedulerTest() : syncer_(NULL), delay_(NULL), weak_ptr_factory_(this) {}
-
- class MockDelayProvider : public BackoffDelayProvider {
- public:
- MockDelayProvider() : BackoffDelayProvider(
- TimeDelta::FromSeconds(kInitialBackoffRetrySeconds),
- TimeDelta::FromSeconds(kInitialBackoffImmediateRetrySeconds)) {
- }
-
- MOCK_METHOD1(GetDelay, TimeDelta(const TimeDelta&));
- };
-
- virtual void SetUp() {
- dir_maker_.SetUp();
- syncer_ = new testing::StrictMock<MockSyncer>();
- delay_ = NULL;
- extensions_activity_ = new ExtensionsActivity();
-
- routing_info_[BOOKMARKS] = GROUP_UI;
- routing_info_[AUTOFILL] = GROUP_DB;
- routing_info_[THEMES] = GROUP_UI;
- routing_info_[NIGORI] = GROUP_PASSIVE;
-
- workers_.clear();
- workers_.push_back(make_scoped_refptr(new FakeModelWorker(GROUP_UI)));
- workers_.push_back(make_scoped_refptr(new FakeModelWorker(GROUP_DB)));
- workers_.push_back(make_scoped_refptr(new FakeModelWorker(GROUP_PASSIVE)));
-
- std::vector<ModelSafeWorker*> workers;
- for (std::vector<scoped_refptr<FakeModelWorker> >::iterator it =
- workers_.begin(); it != workers_.end(); ++it) {
- workers.push_back(it->get());
- }
-
- connection_.reset(new MockConnectionManager(directory(),
- &cancelation_signal_));
- connection_->SetServerReachable();
- context_.reset(new SyncSessionContext(
- connection_.get(), directory(), workers,
- extensions_activity_.get(),
- std::vector<SyncEngineEventListener*>(), NULL, NULL,
- true, // enable keystore encryption
- false, // force enable pre-commit GU avoidance
- "fake_invalidator_client_id"));
- context_->set_routing_info(routing_info_);
- context_->set_notifications_enabled(true);
- context_->set_account_name("Test");
- scheduler_.reset(
- new SyncSchedulerImpl("TestSyncScheduler",
- BackoffDelayProvider::FromDefaults(),
- context(),
- syncer_));
- }
-
- SyncSchedulerImpl* scheduler() { return scheduler_.get(); }
- const ModelSafeRoutingInfo& routing_info() { return routing_info_; }
- MockSyncer* syncer() { return syncer_; }
- MockDelayProvider* delay() { return delay_; }
- MockConnectionManager* connection() { return connection_.get(); }
- TimeDelta zero() { return TimeDelta::FromSeconds(0); }
- TimeDelta timeout() {
- return TestTimeouts::action_timeout();
- }
-
- virtual void TearDown() {
- PumpLoop();
- scheduler_.reset();
- PumpLoop();
- dir_maker_.TearDown();
- }
-
- void AnalyzePollRun(const SyncShareTimes& times, size_t min_num_samples,
- const TimeTicks& optimal_start, const TimeDelta& poll_interval) {
- EXPECT_GE(times.size(), min_num_samples);
- for (size_t i = 0; i < times.size(); i++) {
- SCOPED_TRACE(testing::Message() << "SyncShare # (" << i << ")");
- TimeTicks optimal_next_sync = optimal_start + poll_interval * i;
- EXPECT_GE(times[i], optimal_next_sync);
- }
- }
-
- void DoQuitLoopNow() {
- QuitLoopNow();
- }
-
- void StartSyncScheduler(SyncScheduler::Mode mode) {
- scheduler()->Start(mode);
- }
-
- // This stops the scheduler synchronously.
- void StopSyncScheduler() {
- base::MessageLoop::current()->PostTask(
- FROM_HERE,
- base::Bind(&SyncSchedulerTest::DoQuitLoopNow,
- weak_ptr_factory_.GetWeakPtr()));
- RunLoop();
- }
-
- bool RunAndGetBackoff() {
- ModelTypeSet nudge_types(BOOKMARKS);
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
-
- scheduler()->ScheduleLocalNudge(zero(), nudge_types, FROM_HERE);
- RunLoop();
-
- return scheduler()->IsBackingOff();
- }
-
- void UseMockDelayProvider() {
- delay_ = new MockDelayProvider();
- scheduler_->delay_provider_.reset(delay_);
- }
-
- SyncSessionContext* context() { return context_.get(); }
-
- ModelTypeSet GetThrottledTypes() {
- return scheduler_->nudge_tracker_.GetThrottledTypes();
- }
-
- private:
- syncable::Directory* directory() {
- return dir_maker_.directory();
- }
-
- base::MessageLoop loop_;
- TestDirectorySetterUpper dir_maker_;
- CancelationSignal cancelation_signal_;
- scoped_ptr<MockConnectionManager> connection_;
- scoped_ptr<SyncSessionContext> context_;
- scoped_ptr<SyncSchedulerImpl> scheduler_;
- MockSyncer* syncer_;
- MockDelayProvider* delay_;
- std::vector<scoped_refptr<FakeModelWorker> > workers_;
- scoped_refptr<ExtensionsActivity> extensions_activity_;
- ModelSafeRoutingInfo routing_info_;
- base::WeakPtrFactory<SyncSchedulerTest> weak_ptr_factory_;
-};
-
-void RecordSyncShareImpl(SyncShareTimes* times) {
- times->push_back(TimeTicks::Now());
-}
-
-ACTION_P(RecordSyncShare, times) {
- RecordSyncShareImpl(times);
- if (base::MessageLoop::current()->is_running())
- QuitLoopNow();
- return true;
-}
-
-ACTION_P2(RecordSyncShareMultiple, times, quit_after) {
- RecordSyncShareImpl(times);
- EXPECT_LE(times->size(), quit_after);
- if (times->size() >= quit_after &&
- base::MessageLoop::current()->is_running()) {
- QuitLoopNow();
- }
- return true;
-}
-
-ACTION_P(StopScheduler, scheduler) {
- scheduler->Stop();
-}
-
-ACTION(AddFailureAndQuitLoopNow) {
- ADD_FAILURE();
- QuitLoopNow();
- return true;
-}
-
-ACTION(QuitLoopNowAction) {
- QuitLoopNow();
- return true;
-}
-
-// Test nudge scheduling.
-TEST_F(SyncSchedulerTest, Nudge) {
- SyncShareTimes times;
- ModelTypeSet model_types(BOOKMARKS);
-
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateNormalSuccess),
- RecordSyncShare(&times)))
- .RetiresOnSaturation();
-
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
-
- scheduler()->ScheduleLocalNudge(zero(), model_types, FROM_HERE);
- RunLoop();
-
- Mock::VerifyAndClearExpectations(syncer());
-
- // Make sure a second, later, nudge is unaffected by first (no coalescing).
- SyncShareTimes times2;
- model_types.Remove(BOOKMARKS);
- model_types.Put(AUTOFILL);
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateNormalSuccess),
- RecordSyncShare(&times2)));
- scheduler()->ScheduleLocalNudge(zero(), model_types, FROM_HERE);
- RunLoop();
-}
-
-// Make sure a regular config command is scheduled fine in the absence of any
-// errors.
-TEST_F(SyncSchedulerTest, Config) {
- SyncShareTimes times;
- const ModelTypeSet model_types(BOOKMARKS);
-
- EXPECT_CALL(*syncer(), ConfigureSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateConfigureSuccess),
- RecordSyncShare(&times)));
-
- StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
-
- CallbackCounter ready_counter;
- CallbackCounter retry_counter;
- ConfigurationParams params(
- GetUpdatesCallerInfo::RECONFIGURATION,
- model_types,
- TypesToRoutingInfo(model_types),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&ready_counter)),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&retry_counter)));
- scheduler()->ScheduleConfiguration(params);
- PumpLoop();
- ASSERT_EQ(1, ready_counter.times_called());
- ASSERT_EQ(0, retry_counter.times_called());
-}
-
-// Simulate a failure and make sure the config request is retried.
-TEST_F(SyncSchedulerTest, ConfigWithBackingOff) {
- UseMockDelayProvider();
- EXPECT_CALL(*delay(), GetDelay(_))
- .WillRepeatedly(Return(TimeDelta::FromMilliseconds(1)));
- SyncShareTimes times;
- const ModelTypeSet model_types(BOOKMARKS);
-
- StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
-
- EXPECT_CALL(*syncer(), ConfigureSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateConfigureFailed),
- RecordSyncShare(&times)))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateConfigureFailed),
- RecordSyncShare(&times)));
-
- CallbackCounter ready_counter;
- CallbackCounter retry_counter;
- ConfigurationParams params(
- GetUpdatesCallerInfo::RECONFIGURATION,
- model_types,
- TypesToRoutingInfo(model_types),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&ready_counter)),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&retry_counter)));
- scheduler()->ScheduleConfiguration(params);
- RunLoop();
- ASSERT_EQ(0, ready_counter.times_called());
- ASSERT_EQ(1, retry_counter.times_called());
-
- // RunLoop() will trigger TryCanaryJob which will retry configuration.
- // Since retry_task was already called it shouldn't be called again.
- RunLoop();
- ASSERT_EQ(0, ready_counter.times_called());
- ASSERT_EQ(1, retry_counter.times_called());
-
- Mock::VerifyAndClearExpectations(syncer());
-
- EXPECT_CALL(*syncer(), ConfigureSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateConfigureSuccess),
- RecordSyncShare(&times)));
- RunLoop();
-
- ASSERT_EQ(1, ready_counter.times_called());
-}
-
-// Simuilate SyncSchedulerImpl::Stop being called in the middle of Configure.
-// This can happen if server returns NOT_MY_BIRTHDAY.
-TEST_F(SyncSchedulerTest, ConfigWithStop) {
- UseMockDelayProvider();
- EXPECT_CALL(*delay(), GetDelay(_))
- .WillRepeatedly(Return(TimeDelta::FromMilliseconds(1)));
- SyncShareTimes times;
- const ModelTypeSet model_types(BOOKMARKS);
-
- StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
-
- // Make ConfigureSyncShare call scheduler->Stop(). It is not supposed to call
- // retry_task or dereference configuration params.
- EXPECT_CALL(*syncer(), ConfigureSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateConfigureFailed),
- StopScheduler(scheduler()),
- RecordSyncShare(&times)));
-
- CallbackCounter ready_counter;
- CallbackCounter retry_counter;
- ConfigurationParams params(
- GetUpdatesCallerInfo::RECONFIGURATION,
- model_types,
- TypesToRoutingInfo(model_types),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&ready_counter)),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&retry_counter)));
- scheduler()->ScheduleConfiguration(params);
- PumpLoop();
- ASSERT_EQ(0, ready_counter.times_called());
- ASSERT_EQ(0, retry_counter.times_called());
-}
-
-// Issue a nudge when the config has failed. Make sure both the config and
-// nudge are executed.
-TEST_F(SyncSchedulerTest, NudgeWithConfigWithBackingOff) {
- const ModelTypeSet model_types(BOOKMARKS);
- UseMockDelayProvider();
- EXPECT_CALL(*delay(), GetDelay(_))
- .WillRepeatedly(Return(TimeDelta::FromMilliseconds(50)));
- SyncShareTimes times;
-
- StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
-
- // Request a configure and make sure it fails.
- EXPECT_CALL(*syncer(), ConfigureSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateConfigureFailed),
- RecordSyncShare(&times)));
- CallbackCounter ready_counter;
- CallbackCounter retry_counter;
- ConfigurationParams params(
- GetUpdatesCallerInfo::RECONFIGURATION,
- model_types,
- TypesToRoutingInfo(model_types),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&ready_counter)),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&retry_counter)));
- scheduler()->ScheduleConfiguration(params);
- RunLoop();
- ASSERT_EQ(0, ready_counter.times_called());
- ASSERT_EQ(1, retry_counter.times_called());
- Mock::VerifyAndClearExpectations(syncer());
-
- // Ask for a nudge while dealing with repeated configure failure.
- EXPECT_CALL(*syncer(), ConfigureSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateConfigureFailed),
- RecordSyncShare(&times)));
- scheduler()->ScheduleLocalNudge(zero(), model_types, FROM_HERE);
- RunLoop();
- // Note that we're not RunLoop()ing for the NUDGE we just scheduled, but
- // for the first retry attempt from the config job (after
- // waiting ~+/- 50ms).
- Mock::VerifyAndClearExpectations(syncer());
- ASSERT_EQ(0, ready_counter.times_called());
-
- // Let the next configure retry succeed.
- EXPECT_CALL(*syncer(), ConfigureSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateConfigureSuccess),
- RecordSyncShare(&times)));
- RunLoop();
-
- // Now change the mode so nudge can execute.
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateNormalSuccess),
- RecordSyncShare(&times)));
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
- PumpLoop();
-}
-
-// Test that nudges are coalesced.
-TEST_F(SyncSchedulerTest, NudgeCoalescing) {
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
-
- SyncShareTimes times;
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateNormalSuccess),
- RecordSyncShare(&times)));
- const ModelTypeSet types1(BOOKMARKS), types2(AUTOFILL), types3(THEMES);
- TimeDelta delay = zero();
- TimeTicks optimal_time = TimeTicks::Now() + delay;
- scheduler()->ScheduleLocalNudge(delay, types1, FROM_HERE);
- scheduler()->ScheduleLocalNudge(zero(), types2, FROM_HERE);
- RunLoop();
-
- ASSERT_EQ(1U, times.size());
- EXPECT_GE(times[0], optimal_time);
-
- Mock::VerifyAndClearExpectations(syncer());
-
- SyncShareTimes times2;
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateNormalSuccess),
- RecordSyncShare(&times2)));
- scheduler()->ScheduleLocalNudge(zero(), types3, FROM_HERE);
- RunLoop();
-}
-
-// Test that nudges are coalesced.
-TEST_F(SyncSchedulerTest, NudgeCoalescingWithDifferentTimings) {
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
-
- SyncShareTimes times;
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateNormalSuccess),
- RecordSyncShare(&times)));
- ModelTypeSet types1(BOOKMARKS), types2(AUTOFILL), types3;
-
- // Create a huge time delay.
- TimeDelta delay = TimeDelta::FromDays(1);
-
- scheduler()->ScheduleLocalNudge(delay, types1, FROM_HERE);
- scheduler()->ScheduleLocalNudge(zero(), types2, FROM_HERE);
-
- TimeTicks min_time = TimeTicks::Now();
- TimeTicks max_time = TimeTicks::Now() + delay;
-
- RunLoop();
- Mock::VerifyAndClearExpectations(syncer());
-
- // Make sure the sync happened at the right time.
- ASSERT_EQ(1U, times.size());
- EXPECT_GE(times[0], min_time);
- EXPECT_LE(times[0], max_time);
-}
-
-// Test nudge scheduling.
-TEST_F(SyncSchedulerTest, NudgeWithStates) {
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
-
- SyncShareTimes times1;
- ObjectIdInvalidationMap invalidations1 =
- BuildInvalidationMap(BOOKMARKS, 10, "test");
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateNormalSuccess),
- RecordSyncShare(&times1)))
- .RetiresOnSaturation();
- scheduler()->ScheduleInvalidationNudge(zero(), invalidations1, FROM_HERE);
- RunLoop();
-
- Mock::VerifyAndClearExpectations(syncer());
-
- // Make sure a second, later, nudge is unaffected by first (no coalescing).
- SyncShareTimes times2;
- ObjectIdInvalidationMap invalidations2 =
- BuildInvalidationMap(AUTOFILL, 10, "test2");
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateNormalSuccess),
- RecordSyncShare(&times2)));
- scheduler()->ScheduleInvalidationNudge(zero(), invalidations2, FROM_HERE);
- RunLoop();
-}
-
-// Test that polling works as expected.
-TEST_F(SyncSchedulerTest, Polling) {
- SyncShareTimes times;
- TimeDelta poll_interval(TimeDelta::FromMilliseconds(30));
- EXPECT_CALL(*syncer(), PollSyncShare(_,_)).Times(AtLeast(kMinNumSamples))
- .WillRepeatedly(DoAll(Invoke(sessions::test_util::SimulatePollSuccess),
- RecordSyncShareMultiple(&times, kMinNumSamples)));
-
- scheduler()->OnReceivedLongPollIntervalUpdate(poll_interval);
-
- TimeTicks optimal_start = TimeTicks::Now() + poll_interval;
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
-
- // Run again to wait for polling.
- RunLoop();
-
- StopSyncScheduler();
- AnalyzePollRun(times, kMinNumSamples, optimal_start, poll_interval);
-}
-
-// Test that the short poll interval is used.
-TEST_F(SyncSchedulerTest, PollNotificationsDisabled) {
- SyncShareTimes times;
- TimeDelta poll_interval(TimeDelta::FromMilliseconds(30));
- EXPECT_CALL(*syncer(), PollSyncShare(_,_)).Times(AtLeast(kMinNumSamples))
- .WillRepeatedly(DoAll(Invoke(sessions::test_util::SimulatePollSuccess),
- RecordSyncShareMultiple(&times, kMinNumSamples)));
-
- scheduler()->OnReceivedShortPollIntervalUpdate(poll_interval);
- scheduler()->SetNotificationsEnabled(false);
-
- TimeTicks optimal_start = TimeTicks::Now() + poll_interval;
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
-
- // Run again to wait for polling.
- RunLoop();
-
- StopSyncScheduler();
- AnalyzePollRun(times, kMinNumSamples, optimal_start, poll_interval);
-}
-
-// Test that polling intervals are updated when needed.
-TEST_F(SyncSchedulerTest, PollIntervalUpdate) {
- SyncShareTimes times;
- TimeDelta poll1(TimeDelta::FromMilliseconds(120));
- TimeDelta poll2(TimeDelta::FromMilliseconds(30));
- scheduler()->OnReceivedLongPollIntervalUpdate(poll1);
- EXPECT_CALL(*syncer(), PollSyncShare(_,_)).Times(AtLeast(kMinNumSamples))
- .WillOnce(DoAll(
- WithArgs<0,1>(
- sessions::test_util::SimulatePollIntervalUpdate(poll2)),
- Return(true)))
- .WillRepeatedly(
- DoAll(Invoke(sessions::test_util::SimulatePollSuccess),
- WithArg<1>(
- RecordSyncShareMultiple(&times, kMinNumSamples))));
-
- TimeTicks optimal_start = TimeTicks::Now() + poll1 + poll2;
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
-
- // Run again to wait for polling.
- RunLoop();
-
- StopSyncScheduler();
- AnalyzePollRun(times, kMinNumSamples, optimal_start, poll2);
-}
-
-// Test that the sessions commit delay is updated when needed.
-TEST_F(SyncSchedulerTest, SessionsCommitDelay) {
- SyncShareTimes times;
- TimeDelta delay1(TimeDelta::FromMilliseconds(120));
- TimeDelta delay2(TimeDelta::FromMilliseconds(30));
- scheduler()->OnReceivedSessionsCommitDelay(delay1);
-
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(
- DoAll(
- WithArgs<0,1,2>(
- sessions::test_util::SimulateSessionsCommitDelayUpdate(
- delay2)),
- Invoke(sessions::test_util::SimulateNormalSuccess),
- QuitLoopNowAction()));
-
- EXPECT_EQ(delay1, scheduler()->GetSessionsCommitDelay());
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
-
- EXPECT_EQ(delay1, scheduler()->GetSessionsCommitDelay());
- const ModelTypeSet model_types(BOOKMARKS);
- scheduler()->ScheduleLocalNudge(zero(), model_types, FROM_HERE);
- RunLoop();
-
- EXPECT_EQ(delay2, scheduler()->GetSessionsCommitDelay());
- StopSyncScheduler();
-}
-
-// Test that no syncing occurs when throttled.
-TEST_F(SyncSchedulerTest, ThrottlingDoesThrottle) {
- const ModelTypeSet types(BOOKMARKS);
- TimeDelta poll(TimeDelta::FromMilliseconds(5));
- TimeDelta throttle(TimeDelta::FromMinutes(10));
- scheduler()->OnReceivedLongPollIntervalUpdate(poll);
-
- EXPECT_CALL(*syncer(), ConfigureSyncShare(_,_,_))
- .WillOnce(DoAll(
- WithArg<2>(sessions::test_util::SimulateThrottled(throttle)),
- Return(true)))
- .WillRepeatedly(AddFailureAndQuitLoopNow());
-
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
-
- scheduler()->ScheduleLocalNudge(
- TimeDelta::FromMicroseconds(1), types, FROM_HERE);
- PumpLoop();
-
- StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
-
- CallbackCounter ready_counter;
- CallbackCounter retry_counter;
- ConfigurationParams params(
- GetUpdatesCallerInfo::RECONFIGURATION,
- types,
- TypesToRoutingInfo(types),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&ready_counter)),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&retry_counter)));
- scheduler()->ScheduleConfiguration(params);
- PumpLoop();
- ASSERT_EQ(0, ready_counter.times_called());
- ASSERT_EQ(1, retry_counter.times_called());
-
-}
-
-TEST_F(SyncSchedulerTest, ThrottlingExpiresFromPoll) {
- SyncShareTimes times;
- TimeDelta poll(TimeDelta::FromMilliseconds(15));
- TimeDelta throttle1(TimeDelta::FromMilliseconds(150));
- scheduler()->OnReceivedLongPollIntervalUpdate(poll);
-
- ::testing::InSequence seq;
- EXPECT_CALL(*syncer(), PollSyncShare(_,_))
- .WillOnce(DoAll(
- WithArg<1>(sessions::test_util::SimulateThrottled(throttle1)),
- Return(true)))
- .RetiresOnSaturation();
- EXPECT_CALL(*syncer(), PollSyncShare(_,_))
- .WillRepeatedly(DoAll(Invoke(sessions::test_util::SimulatePollSuccess),
- RecordSyncShareMultiple(&times, kMinNumSamples)));
-
- TimeTicks optimal_start = TimeTicks::Now() + poll + throttle1;
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
-
- // Run again to wait for polling.
- RunLoop();
-
- StopSyncScheduler();
- AnalyzePollRun(times, kMinNumSamples, optimal_start, poll);
-}
-
-TEST_F(SyncSchedulerTest, ThrottlingExpiresFromNudge) {
- SyncShareTimes times;
- TimeDelta poll(TimeDelta::FromDays(1));
- TimeDelta throttle1(TimeDelta::FromMilliseconds(150));
- scheduler()->OnReceivedLongPollIntervalUpdate(poll);
-
- ::testing::InSequence seq;
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(
- WithArg<2>(sessions::test_util::SimulateThrottled(throttle1)),
- Return(true)))
- .RetiresOnSaturation();
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateNormalSuccess),
- QuitLoopNowAction()));
-
- const ModelTypeSet types(BOOKMARKS);
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
- scheduler()->ScheduleLocalNudge(zero(), types, FROM_HERE);
-
- PumpLoop(); // To get PerformDelayedNudge called.
- PumpLoop(); // To get TrySyncSessionJob called
- EXPECT_TRUE(scheduler()->IsCurrentlyThrottled());
- RunLoop();
- EXPECT_FALSE(scheduler()->IsCurrentlyThrottled());
-
- StopSyncScheduler();
-}
-
-TEST_F(SyncSchedulerTest, ThrottlingExpiresFromConfigure) {
- SyncShareTimes times;
- TimeDelta poll(TimeDelta::FromDays(1));
- TimeDelta throttle1(TimeDelta::FromMilliseconds(150));
- scheduler()->OnReceivedLongPollIntervalUpdate(poll);
-
- ::testing::InSequence seq;
- EXPECT_CALL(*syncer(), ConfigureSyncShare(_,_,_))
- .WillOnce(DoAll(
- WithArg<2>(sessions::test_util::SimulateThrottled(throttle1)),
- Return(true)))
- .RetiresOnSaturation();
- EXPECT_CALL(*syncer(), ConfigureSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateConfigureSuccess),
- QuitLoopNowAction()));
-
- const ModelTypeSet types(BOOKMARKS);
- StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
-
- CallbackCounter ready_counter;
- CallbackCounter retry_counter;
- ConfigurationParams params(
- GetUpdatesCallerInfo::RECONFIGURATION,
- types,
- TypesToRoutingInfo(types),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&ready_counter)),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&retry_counter)));
- scheduler()->ScheduleConfiguration(params);
- PumpLoop();
- EXPECT_EQ(0, ready_counter.times_called());
- EXPECT_EQ(1, retry_counter.times_called());
- EXPECT_TRUE(scheduler()->IsCurrentlyThrottled());
-
- RunLoop();
- EXPECT_FALSE(scheduler()->IsCurrentlyThrottled());
-
- StopSyncScheduler();
-}
-
-TEST_F(SyncSchedulerTest, TypeThrottlingBlocksNudge) {
- UseMockDelayProvider();
- EXPECT_CALL(*delay(), GetDelay(_))
- .WillRepeatedly(Return(zero()));
-
- TimeDelta poll(TimeDelta::FromDays(1));
- TimeDelta throttle1(TimeDelta::FromSeconds(60));
- scheduler()->OnReceivedLongPollIntervalUpdate(poll);
-
- const ModelTypeSet types(BOOKMARKS);
-
- ::testing::InSequence seq;
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(
- WithArg<2>(
- sessions::test_util::SimulateTypesThrottled(types, throttle1)),
- Return(true)))
- .RetiresOnSaturation();
-
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
- scheduler()->ScheduleLocalNudge(zero(), types, FROM_HERE);
- PumpLoop(); // To get PerformDelayedNudge called.
- PumpLoop(); // To get TrySyncSessionJob called
- EXPECT_TRUE(GetThrottledTypes().HasAll(types));
-
- // This won't cause a sync cycle because the types are throttled.
- scheduler()->ScheduleLocalNudge(zero(), types, FROM_HERE);
- PumpLoop();
-
- StopSyncScheduler();
-}
-
-TEST_F(SyncSchedulerTest, TypeThrottlingDoesBlockOtherSources) {
- UseMockDelayProvider();
- EXPECT_CALL(*delay(), GetDelay(_))
- .WillRepeatedly(Return(zero()));
-
- SyncShareTimes times;
- TimeDelta poll(TimeDelta::FromDays(1));
- TimeDelta throttle1(TimeDelta::FromSeconds(60));
- scheduler()->OnReceivedLongPollIntervalUpdate(poll);
-
- const ModelTypeSet throttled_types(BOOKMARKS);
- const ModelTypeSet unthrottled_types(PREFERENCES);
-
- ::testing::InSequence seq;
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(
- WithArg<2>(
- sessions::test_util::SimulateTypesThrottled(
- throttled_types, throttle1)),
- Return(true)))
- .RetiresOnSaturation();
-
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
- scheduler()->ScheduleLocalNudge(zero(), throttled_types, FROM_HERE);
- PumpLoop(); // To get PerformDelayedNudge called.
- PumpLoop(); // To get TrySyncSessionJob called
- EXPECT_TRUE(GetThrottledTypes().HasAll(throttled_types));
-
- // Ignore invalidations for throttled types.
- ObjectIdInvalidationMap invalidations =
- BuildInvalidationMap(BOOKMARKS, 10, "test");
- scheduler()->ScheduleInvalidationNudge(zero(), invalidations, FROM_HERE);
- PumpLoop();
-
- // Ignore refresh requests for throttled types.
- scheduler()->ScheduleLocalRefreshRequest(zero(), throttled_types, FROM_HERE);
- PumpLoop();
-
- Mock::VerifyAndClearExpectations(syncer());
-
- // Local nudges for non-throttled types will trigger a sync.
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillRepeatedly(DoAll(Invoke(sessions::test_util::SimulateNormalSuccess),
- RecordSyncShare(&times)));
- scheduler()->ScheduleLocalNudge(zero(), unthrottled_types, FROM_HERE);
- RunLoop();
- Mock::VerifyAndClearExpectations(syncer());
-
- StopSyncScheduler();
-}
-
-// Test nudges / polls don't run in config mode and config tasks do.
-TEST_F(SyncSchedulerTest, ConfigurationMode) {
- TimeDelta poll(TimeDelta::FromMilliseconds(15));
- SyncShareTimes times;
- scheduler()->OnReceivedLongPollIntervalUpdate(poll);
-
- StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
-
- const ModelTypeSet nudge_types(AUTOFILL);
- scheduler()->ScheduleLocalNudge(zero(), nudge_types, FROM_HERE);
- scheduler()->ScheduleLocalNudge(zero(), nudge_types, FROM_HERE);
-
- const ModelTypeSet config_types(BOOKMARKS);
-
- EXPECT_CALL(*syncer(), ConfigureSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateConfigureSuccess),
- RecordSyncShare(&times)))
- .RetiresOnSaturation();
- CallbackCounter ready_counter;
- CallbackCounter retry_counter;
- ConfigurationParams params(
- GetUpdatesCallerInfo::RECONFIGURATION,
- config_types,
- TypesToRoutingInfo(config_types),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&ready_counter)),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&retry_counter)));
- scheduler()->ScheduleConfiguration(params);
- RunLoop();
- ASSERT_EQ(1, ready_counter.times_called());
- ASSERT_EQ(0, retry_counter.times_called());
-
- Mock::VerifyAndClearExpectations(syncer());
-
- // Switch to NORMAL_MODE to ensure NUDGES were properly saved and run.
- scheduler()->OnReceivedLongPollIntervalUpdate(TimeDelta::FromDays(1));
- SyncShareTimes times2;
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateNormalSuccess),
- RecordSyncShare(&times2)));
-
- // TODO(tim): Figure out how to remove this dangerous need to reset
- // routing info between mode switches.
- context()->set_routing_info(routing_info());
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
-
- RunLoop();
- Mock::VerifyAndClearExpectations(syncer());
-}
-
-class BackoffTriggersSyncSchedulerTest : public SyncSchedulerTest {
- virtual void SetUp() {
- SyncSchedulerTest::SetUp();
- UseMockDelayProvider();
- EXPECT_CALL(*delay(), GetDelay(_))
- .WillRepeatedly(Return(TimeDelta::FromMilliseconds(1)));
- }
-
- virtual void TearDown() {
- StopSyncScheduler();
- SyncSchedulerTest::TearDown();
- }
-};
-
-// Have the sycner fail during commit. Expect that the scheduler enters
-// backoff.
-TEST_F(BackoffTriggersSyncSchedulerTest, FailCommitOnce) {
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateCommitFailed),
- QuitLoopNowAction()));
- EXPECT_TRUE(RunAndGetBackoff());
-}
-
-// Have the syncer fail during download updates and succeed on the first
-// retry. Expect that this clears the backoff state.
-TEST_F(BackoffTriggersSyncSchedulerTest, FailDownloadOnceThenSucceed) {
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(
- Invoke(sessions::test_util::SimulateDownloadUpdatesFailed),
- Return(true)))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateNormalSuccess),
- QuitLoopNowAction()));
- EXPECT_FALSE(RunAndGetBackoff());
-}
-
-// Have the syncer fail during commit and succeed on the first retry. Expect
-// that this clears the backoff state.
-TEST_F(BackoffTriggersSyncSchedulerTest, FailCommitOnceThenSucceed) {
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(
- Invoke(sessions::test_util::SimulateCommitFailed),
- Return(true)))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateNormalSuccess),
- QuitLoopNowAction()));
- EXPECT_FALSE(RunAndGetBackoff());
-}
-
-// Have the syncer fail to download updates and fail again on the retry.
-// Expect this will leave the scheduler in backoff.
-TEST_F(BackoffTriggersSyncSchedulerTest, FailDownloadTwice) {
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(
- Invoke(sessions::test_util::SimulateDownloadUpdatesFailed),
- Return(true)))
- .WillRepeatedly(DoAll(
- Invoke(sessions::test_util::SimulateDownloadUpdatesFailed),
- QuitLoopNowAction()));
- EXPECT_TRUE(RunAndGetBackoff());
-}
-
-// Have the syncer fail to get the encryption key yet succeed in downloading
-// updates. Expect this will leave the scheduler in backoff.
-TEST_F(BackoffTriggersSyncSchedulerTest, FailGetEncryptionKey) {
- EXPECT_CALL(*syncer(), ConfigureSyncShare(_,_,_))
- .WillOnce(DoAll(
- Invoke(sessions::test_util::SimulateGetEncryptionKeyFailed),
- Return(true)))
- .WillRepeatedly(DoAll(
- Invoke(sessions::test_util::SimulateGetEncryptionKeyFailed),
- QuitLoopNowAction()));
- StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
-
- ModelTypeSet types(BOOKMARKS);
- CallbackCounter ready_counter;
- CallbackCounter retry_counter;
- ConfigurationParams params(
- GetUpdatesCallerInfo::RECONFIGURATION,
- types,
- TypesToRoutingInfo(types),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&ready_counter)),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&retry_counter)));
- scheduler()->ScheduleConfiguration(params);
- RunLoop();
-
- EXPECT_TRUE(scheduler()->IsBackingOff());
-}
-
-// Test that no polls or extraneous nudges occur when in backoff.
-TEST_F(SyncSchedulerTest, BackoffDropsJobs) {
- SyncShareTimes times;
- TimeDelta poll(TimeDelta::FromMilliseconds(5));
- const ModelTypeSet types(BOOKMARKS);
- scheduler()->OnReceivedLongPollIntervalUpdate(poll);
- UseMockDelayProvider();
-
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateCommitFailed),
- RecordSyncShareMultiple(&times, 1U)));
- EXPECT_CALL(*delay(), GetDelay(_)).
- WillRepeatedly(Return(TimeDelta::FromDays(1)));
-
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
-
- // This nudge should fail and put us into backoff. Thanks to our mock
- // GetDelay() setup above, this will be a long backoff.
- scheduler()->ScheduleLocalNudge(zero(), types, FROM_HERE);
- RunLoop();
-
- // From this point forward, no SyncShare functions should be invoked.
- Mock::VerifyAndClearExpectations(syncer());
-
- // Wait a while (10x poll interval) so a few poll jobs will be attempted.
- PumpLoopFor(poll * 10);
-
- // Try (and fail) to schedule a nudge.
- scheduler()->ScheduleLocalNudge(
- base::TimeDelta::FromMilliseconds(1),
- types,
- FROM_HERE);
-
- Mock::VerifyAndClearExpectations(syncer());
- Mock::VerifyAndClearExpectations(delay());
-
- EXPECT_CALL(*delay(), GetDelay(_)).Times(0);
-
- StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
-
- CallbackCounter ready_counter;
- CallbackCounter retry_counter;
- ConfigurationParams params(
- GetUpdatesCallerInfo::RECONFIGURATION,
- types,
- TypesToRoutingInfo(types),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&ready_counter)),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&retry_counter)));
- scheduler()->ScheduleConfiguration(params);
- PumpLoop();
- ASSERT_EQ(0, ready_counter.times_called());
- ASSERT_EQ(1, retry_counter.times_called());
-
-}
-
-// Test that backoff is shaping traffic properly with consecutive errors.
-TEST_F(SyncSchedulerTest, BackoffElevation) {
- SyncShareTimes times;
- UseMockDelayProvider();
-
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_)).Times(kMinNumSamples)
- .WillRepeatedly(DoAll(Invoke(sessions::test_util::SimulateCommitFailed),
- RecordSyncShareMultiple(&times, kMinNumSamples)));
-
- const TimeDelta first = TimeDelta::FromSeconds(kInitialBackoffRetrySeconds);
- const TimeDelta second = TimeDelta::FromMilliseconds(2);
- const TimeDelta third = TimeDelta::FromMilliseconds(3);
- const TimeDelta fourth = TimeDelta::FromMilliseconds(4);
- const TimeDelta fifth = TimeDelta::FromMilliseconds(5);
- const TimeDelta sixth = TimeDelta::FromDays(1);
-
- EXPECT_CALL(*delay(), GetDelay(first)).WillOnce(Return(second))
- .RetiresOnSaturation();
- EXPECT_CALL(*delay(), GetDelay(second)).WillOnce(Return(third))
- .RetiresOnSaturation();
- EXPECT_CALL(*delay(), GetDelay(third)).WillOnce(Return(fourth))
- .RetiresOnSaturation();
- EXPECT_CALL(*delay(), GetDelay(fourth)).WillOnce(Return(fifth))
- .RetiresOnSaturation();
- EXPECT_CALL(*delay(), GetDelay(fifth)).WillOnce(Return(sixth));
-
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
-
- // Run again with a nudge.
- scheduler()->ScheduleLocalNudge(zero(), ModelTypeSet(BOOKMARKS), FROM_HERE);
- RunLoop();
-
- ASSERT_EQ(kMinNumSamples, times.size());
- EXPECT_GE(times[1] - times[0], second);
- EXPECT_GE(times[2] - times[1], third);
- EXPECT_GE(times[3] - times[2], fourth);
- EXPECT_GE(times[4] - times[3], fifth);
-}
-
-// Test that things go back to normal once a retry makes forward progress.
-TEST_F(SyncSchedulerTest, BackoffRelief) {
- SyncShareTimes times;
- const TimeDelta poll(TimeDelta::FromMilliseconds(10));
- scheduler()->OnReceivedLongPollIntervalUpdate(poll);
- UseMockDelayProvider();
-
- const TimeDelta backoff = TimeDelta::FromMilliseconds(5);
- EXPECT_CALL(*delay(), GetDelay(_)).WillOnce(Return(backoff));
-
- // Optimal start for the post-backoff poll party.
- TimeTicks optimal_start = TimeTicks::Now();
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
-
- // Kick off the test with a failed nudge.
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateCommitFailed),
- RecordSyncShare(&times)));
- scheduler()->ScheduleLocalNudge(zero(), ModelTypeSet(BOOKMARKS), FROM_HERE);
- RunLoop();
- Mock::VerifyAndClearExpectations(syncer());
- TimeTicks optimal_job_time = optimal_start;
- ASSERT_EQ(1U, times.size());
- EXPECT_GE(times[0], optimal_job_time);
-
- // The retry succeeds.
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(
- Invoke(sessions::test_util::SimulateNormalSuccess),
- RecordSyncShare(&times)));
- RunLoop();
- Mock::VerifyAndClearExpectations(syncer());
- optimal_job_time = optimal_job_time + backoff;
- ASSERT_EQ(2U, times.size());
- EXPECT_GE(times[1], optimal_job_time);
-
- // Now let the Poll timer do its thing.
- EXPECT_CALL(*syncer(), PollSyncShare(_,_))
- .WillRepeatedly(DoAll(
- Invoke(sessions::test_util::SimulatePollSuccess),
- RecordSyncShareMultiple(&times, kMinNumSamples)));
- RunLoop();
- Mock::VerifyAndClearExpectations(syncer());
- ASSERT_EQ(kMinNumSamples, times.size());
- for (size_t i = 2; i < times.size(); i++) {
- optimal_job_time = optimal_job_time + poll;
- SCOPED_TRACE(testing::Message() << "SyncShare # (" << i << ")");
- EXPECT_GE(times[i], optimal_job_time);
- }
-
- StopSyncScheduler();
-}
-
-// Test that poll failures are ignored. They should have no effect on
-// subsequent poll attempts, nor should they trigger a backoff/retry.
-TEST_F(SyncSchedulerTest, TransientPollFailure) {
- SyncShareTimes times;
- const TimeDelta poll_interval(TimeDelta::FromMilliseconds(1));
- scheduler()->OnReceivedLongPollIntervalUpdate(poll_interval);
- UseMockDelayProvider(); // Will cause test failure if backoff is initiated.
-
- EXPECT_CALL(*syncer(), PollSyncShare(_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulatePollFailed),
- RecordSyncShare(&times)))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulatePollSuccess),
- RecordSyncShare(&times)));
-
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
-
- // Run the unsucessful poll. The failed poll should not trigger backoff.
- RunLoop();
- EXPECT_FALSE(scheduler()->IsBackingOff());
-
- // Run the successful poll.
- RunLoop();
- EXPECT_FALSE(scheduler()->IsBackingOff());
-}
-
-// Test that starting the syncer thread without a valid connection doesn't
-// break things when a connection is detected.
-TEST_F(SyncSchedulerTest, StartWhenNotConnected) {
- connection()->SetServerNotReachable();
- connection()->UpdateConnectionStatus();
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateConnectionFailure),
- Return(true)))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateNormalSuccess),
- Return(true)));
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
-
- scheduler()->ScheduleLocalNudge(zero(), ModelTypeSet(BOOKMARKS), FROM_HERE);
- // Should save the nudge for until after the server is reachable.
- base::MessageLoop::current()->RunUntilIdle();
-
- scheduler()->OnConnectionStatusChange();
- connection()->SetServerReachable();
- connection()->UpdateConnectionStatus();
- base::MessageLoop::current()->RunUntilIdle();
-}
-
-TEST_F(SyncSchedulerTest, ServerConnectionChangeDuringBackoff) {
- UseMockDelayProvider();
- EXPECT_CALL(*delay(), GetDelay(_))
- .WillRepeatedly(Return(TimeDelta::FromMilliseconds(0)));
-
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
- connection()->SetServerNotReachable();
- connection()->UpdateConnectionStatus();
-
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateConnectionFailure),
- Return(true)))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateNormalSuccess),
- Return(true)));
-
- scheduler()->ScheduleLocalNudge(zero(), ModelTypeSet(BOOKMARKS), FROM_HERE);
- PumpLoop(); // To get PerformDelayedNudge called.
- PumpLoop(); // Run the nudge, that will fail and schedule a quick retry.
- ASSERT_TRUE(scheduler()->IsBackingOff());
-
- // Before we run the scheduled canary, trigger a server connection change.
- scheduler()->OnConnectionStatusChange();
- connection()->SetServerReachable();
- connection()->UpdateConnectionStatus();
- base::MessageLoop::current()->RunUntilIdle();
-}
-
-// This was supposed to test the scenario where we receive a nudge while a
-// connection change canary is scheduled, but has not run yet. Since we've made
-// the connection change canary synchronous, this is no longer possible.
-TEST_F(SyncSchedulerTest, ConnectionChangeCanaryPreemptedByNudge) {
- UseMockDelayProvider();
- EXPECT_CALL(*delay(), GetDelay(_))
- .WillRepeatedly(Return(TimeDelta::FromMilliseconds(0)));
-
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
- connection()->SetServerNotReachable();
- connection()->UpdateConnectionStatus();
-
- EXPECT_CALL(*syncer(), NormalSyncShare(_,_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateConnectionFailure),
- Return(true)))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateNormalSuccess),
- Return(true)))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulateNormalSuccess),
- QuitLoopNowAction()));
-
- scheduler()->ScheduleLocalNudge(zero(), ModelTypeSet(BOOKMARKS), FROM_HERE);
-
- PumpLoop(); // To get PerformDelayedNudge called.
- PumpLoop(); // Run the nudge, that will fail and schedule a quick retry.
- ASSERT_TRUE(scheduler()->IsBackingOff());
-
- // Before we run the scheduled canary, trigger a server connection change.
- scheduler()->OnConnectionStatusChange();
- PumpLoop();
- connection()->SetServerReachable();
- connection()->UpdateConnectionStatus();
- scheduler()->ScheduleLocalNudge(zero(), ModelTypeSet(BOOKMARKS), FROM_HERE);
- base::MessageLoop::current()->RunUntilIdle();
-}
-
-// Tests that we don't crash trying to run two canaries at once if we receive
-// extra connection status change notifications. See crbug.com/190085.
-TEST_F(SyncSchedulerTest, DoubleCanaryInConfigure) {
- EXPECT_CALL(*syncer(), ConfigureSyncShare(_,_,_))
- .WillRepeatedly(DoAll(
- Invoke(sessions::test_util::SimulateConfigureConnectionFailure),
- Return(true)));
- StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
- connection()->SetServerNotReachable();
- connection()->UpdateConnectionStatus();
-
- ModelTypeSet model_types(BOOKMARKS);
- CallbackCounter ready_counter;
- CallbackCounter retry_counter;
- ConfigurationParams params(
- GetUpdatesCallerInfo::RECONFIGURATION,
- model_types,
- TypesToRoutingInfo(model_types),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&ready_counter)),
- base::Bind(&CallbackCounter::Callback, base::Unretained(&retry_counter)));
- scheduler()->ScheduleConfiguration(params);
-
- scheduler()->OnConnectionStatusChange();
- scheduler()->OnConnectionStatusChange();
-
- PumpLoop(); // Run the nudge, that will fail and schedule a quick retry.
-}
-
-TEST_F(SyncSchedulerTest, PollFromCanaryAfterAuthError) {
- SyncShareTimes times;
- TimeDelta poll(TimeDelta::FromMilliseconds(15));
- scheduler()->OnReceivedLongPollIntervalUpdate(poll);
-
- ::testing::InSequence seq;
- EXPECT_CALL(*syncer(), PollSyncShare(_,_))
- .WillRepeatedly(DoAll(Invoke(sessions::test_util::SimulatePollSuccess),
- RecordSyncShareMultiple(&times, kMinNumSamples)));
-
- connection()->SetServerStatus(HttpResponse::SYNC_AUTH_ERROR);
- StartSyncScheduler(SyncScheduler::NORMAL_MODE);
-
- // Run to wait for polling.
- RunLoop();
-
- // Normally OnCredentialsUpdated calls TryCanaryJob that doesn't run Poll,
- // but after poll finished with auth error from poll timer it should retry
- // poll once more
- EXPECT_CALL(*syncer(), PollSyncShare(_,_))
- .WillOnce(DoAll(Invoke(sessions::test_util::SimulatePollSuccess),
- RecordSyncShare(&times)));
- scheduler()->OnCredentialsUpdated();
- connection()->SetServerStatus(HttpResponse::SERVER_CONNECTION_OK);
- RunLoop();
- StopSyncScheduler();
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/syncer.cc b/chromium/sync/engine/syncer.cc
deleted file mode 100644
index 13e1f792618..00000000000
--- a/chromium/sync/engine/syncer.cc
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/syncer.h"
-
-#include "base/debug/trace_event.h"
-#include "base/location.h"
-#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
-#include "base/time/time.h"
-#include "build/build_config.h"
-#include "sync/engine/apply_control_data_updates.h"
-#include "sync/engine/commit.h"
-#include "sync/engine/conflict_resolver.h"
-#include "sync/engine/download.h"
-#include "sync/engine/net/server_connection_manager.h"
-#include "sync/engine/syncer_types.h"
-#include "sync/internal_api/public/base/cancelation_signal.h"
-#include "sync/internal_api/public/base/unique_position.h"
-#include "sync/internal_api/public/util/syncer_error.h"
-#include "sync/sessions/nudge_tracker.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/syncable-inl.h"
-
-using base::Time;
-using base::TimeDelta;
-using sync_pb::ClientCommand;
-
-namespace syncer {
-
-// TODO(akalin): We may want to propagate this switch up
-// eventually.
-#if defined(OS_ANDROID) || defined(OS_IOS)
-static const bool kCreateMobileBookmarksFolder = true;
-#else
-static const bool kCreateMobileBookmarksFolder = false;
-#endif
-
-using sessions::StatusController;
-using sessions::SyncSession;
-using sessions::NudgeTracker;
-
-Syncer::Syncer(syncer::CancelationSignal* cancelation_signal)
- : cancelation_signal_(cancelation_signal) {
-}
-
-Syncer::~Syncer() {}
-
-bool Syncer::ExitRequested() {
- return cancelation_signal_->IsSignalled();
-}
-
-bool Syncer::NormalSyncShare(ModelTypeSet request_types,
- const NudgeTracker& nudge_tracker,
- SyncSession* session) {
- HandleCycleBegin(session);
- VLOG(1) << "Downloading types " << ModelTypeSetToString(request_types);
- if (nudge_tracker.IsGetUpdatesRequired() ||
- session->context()->ShouldFetchUpdatesBeforeCommit()) {
- if (!DownloadAndApplyUpdates(
- request_types,
- session,
- base::Bind(&download::BuildNormalDownloadUpdates,
- session,
- kCreateMobileBookmarksFolder,
- request_types,
- base::ConstRef(nudge_tracker)))) {
- return HandleCycleEnd(session, nudge_tracker.updates_source());
- }
- }
-
- VLOG(1) << "Committing from types " << ModelTypeSetToString(request_types);
- SyncerError commit_result = BuildAndPostCommits(request_types, session);
- session->mutable_status_controller()->set_commit_result(commit_result);
-
- return HandleCycleEnd(session, nudge_tracker.updates_source());
-}
-
-bool Syncer::ConfigureSyncShare(
- ModelTypeSet request_types,
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source,
- SyncSession* session) {
- HandleCycleBegin(session);
- VLOG(1) << "Configuring types " << ModelTypeSetToString(request_types);
- DownloadAndApplyUpdates(
- request_types,
- session,
- base::Bind(&download::BuildDownloadUpdatesForConfigure,
- session,
- kCreateMobileBookmarksFolder,
- source,
- request_types));
- return HandleCycleEnd(session, source);
-}
-
-bool Syncer::PollSyncShare(ModelTypeSet request_types,
- SyncSession* session) {
- HandleCycleBegin(session);
- VLOG(1) << "Polling types " << ModelTypeSetToString(request_types);
- DownloadAndApplyUpdates(
- request_types,
- session,
- base::Bind(&download::BuildDownloadUpdatesForPoll,
- session,
- kCreateMobileBookmarksFolder,
- request_types));
- return HandleCycleEnd(session, sync_pb::GetUpdatesCallerInfo::PERIODIC);
-}
-
-void Syncer::ApplyUpdates(SyncSession* session) {
- TRACE_EVENT0("sync", "ApplyUpdates");
-
- ApplyControlDataUpdates(session->context()->directory());
-
- UpdateHandlerMap* handler_map = session->context()->update_handler_map();
- for (UpdateHandlerMap::iterator it = handler_map->begin();
- it != handler_map->end(); ++it) {
- it->second->ApplyUpdates(session->mutable_status_controller());
- }
-
- session->context()->set_hierarchy_conflict_detected(
- session->status_controller().num_hierarchy_conflicts() > 0);
-
- session->SendEventNotification(SyncEngineEvent::STATUS_CHANGED);
-}
-
-bool Syncer::DownloadAndApplyUpdates(
- ModelTypeSet request_types,
- SyncSession* session,
- base::Callback<void(sync_pb::ClientToServerMessage*)> build_fn) {
- SyncerError download_result = UNSET;
- do {
- TRACE_EVENT0("sync", "DownloadUpdates");
- sync_pb::ClientToServerMessage msg;
- build_fn.Run(&msg);
- download_result =
- download::ExecuteDownloadUpdates(request_types, session, &msg);
- session->mutable_status_controller()->set_last_download_updates_result(
- download_result);
- } while (download_result == SERVER_MORE_TO_DOWNLOAD);
-
- // Exit without applying if we're shutting down or an error was detected.
- if (download_result != SYNCER_OK)
- return false;
- if (ExitRequested())
- return false;
-
- ApplyUpdates(session);
- if (ExitRequested())
- return false;
- return true;
-}
-
-SyncerError Syncer::BuildAndPostCommits(ModelTypeSet requested_types,
- sessions::SyncSession* session) {
- // The ExitRequested() check is unnecessary, since we should start getting
- // errors from the ServerConnectionManager if an exist has been requested.
- // However, it doesn't hurt to check it anyway.
- while (!ExitRequested()) {
- scoped_ptr<Commit> commit(
- Commit::Init(
- requested_types,
- session->context()->max_commit_batch_size(),
- session->context()->account_name(),
- session->context()->directory()->cache_guid(),
- session->context()->commit_contributor_map(),
- session->context()->extensions_activity()));
- if (!commit) {
- break;
- }
-
- SyncerError error = commit->PostAndProcessResponse(
- session,
- session->mutable_status_controller(),
- session->context()->extensions_activity());
- commit->CleanUp();
- if (error != SYNCER_OK) {
- return error;
- }
- }
-
- return SYNCER_OK;
-}
-
-void Syncer::HandleCycleBegin(SyncSession* session) {
- session->mutable_status_controller()->UpdateStartTime();
- session->SendEventNotification(SyncEngineEvent::SYNC_CYCLE_BEGIN);
-}
-
-bool Syncer::HandleCycleEnd(
- SyncSession* session,
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source) {
- if (!ExitRequested()) {
- session->SendSyncCycleEndEventNotification(source);
- return true;
- } else {
- return false;
- }
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/syncer.h b/chromium/sync/engine/syncer.h
deleted file mode 100644
index 6154f910447..00000000000
--- a/chromium/sync/engine/syncer.h
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_SYNCER_H_
-#define SYNC_ENGINE_SYNCER_H_
-
-#include <utility>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/callback.h"
-#include "base/gtest_prod_util.h"
-#include "base/synchronization/lock.h"
-#include "sync/base/sync_export.h"
-#include "sync/engine/conflict_resolver.h"
-#include "sync/engine/syncer_types.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/sessions/sync_session.h"
-#include "sync/util/extensions_activity.h"
-
-namespace syncer {
-
-class CancelationSignal;
-
-// A Syncer provides a control interface for driving the sync cycle. These
-// cycles consist of downloading updates, parsing the response (aka. process
-// updates), applying updates while resolving conflicts, and committing local
-// changes. Some of these steps may be skipped if they're deemed to be
-// unnecessary.
-//
-// A Syncer instance expects to run on a dedicated thread. Calls to SyncShare()
-// may take an unbounded amount of time because it may block on network I/O, on
-// lock contention, or on tasks posted to other threads.
-class SYNC_EXPORT_PRIVATE Syncer {
- public:
- typedef std::vector<int64> UnsyncedMetaHandles;
-
- Syncer(CancelationSignal* cancelation_signal);
- virtual ~Syncer();
-
- bool ExitRequested();
-
- // Fetches and applies updates, resolves conflicts and commits local changes
- // for |request_types| as necessary until client and server states are in
- // sync. The |nudge_tracker| contains state that describes why the client is
- // out of sync and what must be done to bring it back into sync.
- virtual bool NormalSyncShare(ModelTypeSet request_types,
- const sessions::NudgeTracker& nudge_tracker,
- sessions::SyncSession* session);
-
- // Performs an initial download for the |request_types|. It is assumed that
- // the specified types have no local state, and that their associated change
- // processors are in "passive" mode, so none of the downloaded updates will be
- // applied to the model. The |source| is sent up to the server for debug
- // purposes. It describes the reson for performing this initial download.
- virtual bool ConfigureSyncShare(
- ModelTypeSet request_types,
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source,
- sessions::SyncSession* session);
-
- // Requests to download updates for the |request_types|. For a well-behaved
- // client with a working connection to the invalidations server, this should
- // be unnecessary. It may be invoked periodically to try to keep the client
- // in sync despite bugs or transient failures.
- virtual bool PollSyncShare(ModelTypeSet request_types,
- sessions::SyncSession* session);
-
- private:
- void ApplyUpdates(sessions::SyncSession* session);
- bool DownloadAndApplyUpdates(
- ModelTypeSet request_types,
- sessions::SyncSession* session,
- base::Callback<void(sync_pb::ClientToServerMessage*)> build_fn);
-
- // This function will commit batches of unsynced items to the server until the
- // number of unsynced and ready to commit items reaches zero or an error is
- // encountered. A request to exit early will be treated as an error and will
- // abort any blocking operations.
- SyncerError BuildAndPostCommits(
- ModelTypeSet request_types,
- sessions::SyncSession* session);
-
- void HandleCycleBegin(sessions::SyncSession* session);
- bool HandleCycleEnd(
- sessions::SyncSession* session,
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source);
-
- syncer::CancelationSignal* const cancelation_signal_;
-
- friend class SyncerTest;
- FRIEND_TEST_ALL_PREFIXES(SyncerTest, NameClashWithResolver);
- FRIEND_TEST_ALL_PREFIXES(SyncerTest, IllegalAndLegalUpdates);
- FRIEND_TEST_ALL_PREFIXES(SyncerTest, TestCommitListOrderingAndNewParent);
- FRIEND_TEST_ALL_PREFIXES(SyncerTest,
- TestCommitListOrderingAndNewParentAndChild);
- FRIEND_TEST_ALL_PREFIXES(SyncerTest, TestCommitListOrderingCounterexample);
- FRIEND_TEST_ALL_PREFIXES(SyncerTest, TestCommitListOrderingWithNesting);
- FRIEND_TEST_ALL_PREFIXES(SyncerTest, TestCommitListOrderingWithNewItems);
- FRIEND_TEST_ALL_PREFIXES(SyncerTest, TestGetUnsyncedAndSimpleCommit);
- FRIEND_TEST_ALL_PREFIXES(SyncerTest, TestPurgeWhileUnsynced);
- FRIEND_TEST_ALL_PREFIXES(SyncerTest, TestPurgeWhileUnapplied);
- FRIEND_TEST_ALL_PREFIXES(SyncerTest, UnappliedUpdateDuringCommit);
- FRIEND_TEST_ALL_PREFIXES(SyncerTest, DeletingEntryInFolder);
- FRIEND_TEST_ALL_PREFIXES(SyncerTest,
- LongChangelistCreatesFakeOrphanedEntries);
- FRIEND_TEST_ALL_PREFIXES(SyncerTest, QuicklyMergeDualCreatedHierarchy);
- FRIEND_TEST_ALL_PREFIXES(SyncerTest, LongChangelistWithApplicationConflict);
- FRIEND_TEST_ALL_PREFIXES(SyncerTest, DeletingEntryWithLocalEdits);
- FRIEND_TEST_ALL_PREFIXES(EntryCreatedInNewFolderTest,
- EntryCreatedInNewFolderMidSync);
-
- DISALLOW_COPY_AND_ASSIGN(Syncer);
-};
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_SYNCER_H_
diff --git a/chromium/sync/engine/syncer_proto_util.cc b/chromium/sync/engine/syncer_proto_util.cc
deleted file mode 100644
index bfd8151bef0..00000000000
--- a/chromium/sync/engine/syncer_proto_util.cc
+++ /dev/null
@@ -1,582 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/syncer_proto_util.h"
-
-#include "base/format_macros.h"
-#include "base/strings/stringprintf.h"
-#include "google_apis/google_api_keys.h"
-#include "sync/engine/net/server_connection_manager.h"
-#include "sync/engine/syncer.h"
-#include "sync/engine/syncer_types.h"
-#include "sync/engine/traffic_logger.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/protocol/sync_enums.pb.h"
-#include "sync/protocol/sync_protocol_error.h"
-#include "sync/sessions/sync_session.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/syncable-inl.h"
-#include "sync/syncable/syncable_proto_util.h"
-#include "sync/util/time.h"
-
-using std::string;
-using std::stringstream;
-using sync_pb::ClientToServerMessage;
-using sync_pb::ClientToServerResponse;
-
-namespace syncer {
-
-using sessions::SyncSession;
-using syncable::BASE_VERSION;
-using syncable::CTIME;
-using syncable::ID;
-using syncable::IS_DEL;
-using syncable::IS_DIR;
-using syncable::IS_UNSYNCED;
-using syncable::MTIME;
-using syncable::PARENT_ID;
-
-namespace {
-
-// Time to backoff syncing after receiving a throttled response.
-const int kSyncDelayAfterThrottled = 2 * 60 * 60; // 2 hours
-
-void LogResponseProfilingData(const ClientToServerResponse& response) {
- if (response.has_profiling_data()) {
- stringstream response_trace;
- response_trace << "Server response trace:";
-
- if (response.profiling_data().has_user_lookup_time()) {
- response_trace << " user lookup: "
- << response.profiling_data().user_lookup_time() << "ms";
- }
-
- if (response.profiling_data().has_meta_data_write_time()) {
- response_trace << " meta write: "
- << response.profiling_data().meta_data_write_time()
- << "ms";
- }
-
- if (response.profiling_data().has_meta_data_read_time()) {
- response_trace << " meta read: "
- << response.profiling_data().meta_data_read_time() << "ms";
- }
-
- if (response.profiling_data().has_file_data_write_time()) {
- response_trace << " file write: "
- << response.profiling_data().file_data_write_time()
- << "ms";
- }
-
- if (response.profiling_data().has_file_data_read_time()) {
- response_trace << " file read: "
- << response.profiling_data().file_data_read_time() << "ms";
- }
-
- if (response.profiling_data().has_total_request_time()) {
- response_trace << " total time: "
- << response.profiling_data().total_request_time() << "ms";
- }
- DVLOG(1) << response_trace.str();
- }
-}
-
-SyncerError ServerConnectionErrorAsSyncerError(
- const HttpResponse::ServerConnectionCode server_status) {
- switch (server_status) {
- case HttpResponse::CONNECTION_UNAVAILABLE:
- return NETWORK_CONNECTION_UNAVAILABLE;
- case HttpResponse::IO_ERROR:
- return NETWORK_IO_ERROR;
- case HttpResponse::SYNC_SERVER_ERROR:
- // FIXME what does this mean?
- return SYNC_SERVER_ERROR;
- case HttpResponse::SYNC_AUTH_ERROR:
- return SYNC_AUTH_ERROR;
- case HttpResponse::RETRY:
- return SERVER_RETURN_TRANSIENT_ERROR;
- case HttpResponse::SERVER_CONNECTION_OK:
- case HttpResponse::NONE:
- default:
- NOTREACHED();
- return UNSET;
- }
-}
-
-SyncProtocolErrorType ConvertSyncProtocolErrorTypePBToLocalType(
- const sync_pb::SyncEnums::ErrorType& error_type) {
- switch (error_type) {
- case sync_pb::SyncEnums::SUCCESS:
- return SYNC_SUCCESS;
- case sync_pb::SyncEnums::NOT_MY_BIRTHDAY:
- return NOT_MY_BIRTHDAY;
- case sync_pb::SyncEnums::THROTTLED:
- return THROTTLED;
- case sync_pb::SyncEnums::CLEAR_PENDING:
- return CLEAR_PENDING;
- case sync_pb::SyncEnums::TRANSIENT_ERROR:
- return TRANSIENT_ERROR;
- case sync_pb::SyncEnums::MIGRATION_DONE:
- return MIGRATION_DONE;
- case sync_pb::SyncEnums::DISABLED_BY_ADMIN:
- return DISABLED_BY_ADMIN;
- case sync_pb::SyncEnums::UNKNOWN:
- return UNKNOWN_ERROR;
- case sync_pb::SyncEnums::USER_NOT_ACTIVATED:
- case sync_pb::SyncEnums::AUTH_INVALID:
- case sync_pb::SyncEnums::ACCESS_DENIED:
- return INVALID_CREDENTIAL;
- default:
- NOTREACHED();
- return UNKNOWN_ERROR;
- }
-}
-
-ClientAction ConvertClientActionPBToLocalClientAction(
- const sync_pb::SyncEnums::Action& action) {
- switch (action) {
- case sync_pb::SyncEnums::UPGRADE_CLIENT:
- return UPGRADE_CLIENT;
- case sync_pb::SyncEnums::CLEAR_USER_DATA_AND_RESYNC:
- return CLEAR_USER_DATA_AND_RESYNC;
- case sync_pb::SyncEnums::ENABLE_SYNC_ON_ACCOUNT:
- return ENABLE_SYNC_ON_ACCOUNT;
- case sync_pb::SyncEnums::STOP_AND_RESTART_SYNC:
- return STOP_AND_RESTART_SYNC;
- case sync_pb::SyncEnums::DISABLE_SYNC_ON_CLIENT:
- return DISABLE_SYNC_ON_CLIENT;
- case sync_pb::SyncEnums::UNKNOWN_ACTION:
- return UNKNOWN_ACTION;
- default:
- NOTREACHED();
- return UNKNOWN_ACTION;
- }
-}
-
-} // namespace
-
-ModelTypeSet GetTypesToMigrate(const ClientToServerResponse& response) {
- ModelTypeSet to_migrate;
- for (int i = 0; i < response.migrated_data_type_id_size(); i++) {
- int field_number = response.migrated_data_type_id(i);
- ModelType model_type = GetModelTypeFromSpecificsFieldNumber(field_number);
- if (!IsRealDataType(model_type)) {
- DLOG(WARNING) << "Unknown field number " << field_number;
- continue;
- }
- to_migrate.Put(model_type);
- }
- return to_migrate;
-}
-
-SyncProtocolError ConvertErrorPBToLocalType(
- const sync_pb::ClientToServerResponse_Error& error) {
- SyncProtocolError sync_protocol_error;
- sync_protocol_error.error_type = ConvertSyncProtocolErrorTypePBToLocalType(
- error.error_type());
- sync_protocol_error.error_description = error.error_description();
- sync_protocol_error.url = error.url();
- sync_protocol_error.action = ConvertClientActionPBToLocalClientAction(
- error.action());
-
- if (error.error_data_type_ids_size() > 0) {
- // THROTTLED is currently the only error code that uses |error_data_types|.
- DCHECK_EQ(error.error_type(), sync_pb::SyncEnums::THROTTLED);
- for (int i = 0; i < error.error_data_type_ids_size(); ++i) {
- int field_number = error.error_data_type_ids(i);
- ModelType model_type =
- GetModelTypeFromSpecificsFieldNumber(field_number);
- if (!IsRealDataType(model_type)) {
- DLOG(WARNING) << "Unknown field number " << field_number;
- continue;
- }
- sync_protocol_error.error_data_types.Put(model_type);
- }
- }
-
- return sync_protocol_error;
-}
-
-// static
-bool SyncerProtoUtil::VerifyResponseBirthday(
- const ClientToServerResponse& response,
- syncable::Directory* dir) {
-
- std::string local_birthday = dir->store_birthday();
-
- if (local_birthday.empty()) {
- if (!response.has_store_birthday()) {
- LOG(WARNING) << "Expected a birthday on first sync.";
- return false;
- }
-
- DVLOG(1) << "New store birthday: " << response.store_birthday();
- dir->set_store_birthday(response.store_birthday());
- return true;
- }
-
- // Error situation, but we're not stuck.
- if (!response.has_store_birthday()) {
- LOG(WARNING) << "No birthday in server response?";
- return true;
- }
-
- if (response.store_birthday() != local_birthday) {
- LOG(WARNING) << "Birthday changed, showing syncer stuck";
- return false;
- }
-
- return true;
-}
-
-// static
-bool SyncerProtoUtil::IsSyncDisabledByAdmin(
- const sync_pb::ClientToServerResponse& response) {
- return (response.has_error_code() &&
- response.error_code() == sync_pb::SyncEnums::DISABLED_BY_ADMIN);
-}
-
-// static
-void SyncerProtoUtil::AddRequestBirthday(syncable::Directory* dir,
- ClientToServerMessage* msg) {
- if (!dir->store_birthday().empty())
- msg->set_store_birthday(dir->store_birthday());
-}
-
-// static
-void SyncerProtoUtil::AddBagOfChips(syncable::Directory* dir,
- ClientToServerMessage* msg) {
- msg->mutable_bag_of_chips()->ParseFromString(dir->bag_of_chips());
-}
-
-// static
-void SyncerProtoUtil::SetProtocolVersion(ClientToServerMessage* msg) {
- const int current_version =
- ClientToServerMessage::default_instance().protocol_version();
- msg->set_protocol_version(current_version);
-}
-
-// static
-bool SyncerProtoUtil::PostAndProcessHeaders(ServerConnectionManager* scm,
- sessions::SyncSession* session,
- const ClientToServerMessage& msg,
- ClientToServerResponse* response) {
- ServerConnectionManager::PostBufferParams params;
- DCHECK(msg.has_protocol_version());
- DCHECK_EQ(msg.protocol_version(),
- ClientToServerMessage::default_instance().protocol_version());
- msg.SerializeToString(&params.buffer_in);
-
- ScopedServerStatusWatcher server_status_watcher(scm, &params.response);
- // Fills in params.buffer_out and params.response.
- if (!scm->PostBufferWithCachedAuth(&params, &server_status_watcher)) {
- LOG(WARNING) << "Error posting from syncer:" << params.response;
- return false;
- }
-
- if (response->ParseFromString(params.buffer_out)) {
- // TODO(tim): This is an egregious layering violation (bug 35060).
- switch (response->error_code()) {
- case sync_pb::SyncEnums::ACCESS_DENIED:
- case sync_pb::SyncEnums::AUTH_INVALID:
- case sync_pb::SyncEnums::USER_NOT_ACTIVATED:
- // Fires on ScopedServerStatusWatcher
- params.response.server_status = HttpResponse::SYNC_AUTH_ERROR;
- return false;
- default:
- return true;
- }
- }
-
- return false;
-}
-
-base::TimeDelta SyncerProtoUtil::GetThrottleDelay(
- const ClientToServerResponse& response) {
- base::TimeDelta throttle_delay =
- base::TimeDelta::FromSeconds(kSyncDelayAfterThrottled);
- if (response.has_client_command()) {
- const sync_pb::ClientCommand& command = response.client_command();
- if (command.has_throttle_delay_seconds()) {
- throttle_delay =
- base::TimeDelta::FromSeconds(command.throttle_delay_seconds());
- }
- }
- return throttle_delay;
-}
-
-namespace {
-
-// Helper function for an assertion in PostClientToServerMessage.
-bool IsVeryFirstGetUpdates(const ClientToServerMessage& message) {
- if (!message.has_get_updates())
- return false;
- DCHECK_LT(0, message.get_updates().from_progress_marker_size());
- for (int i = 0; i < message.get_updates().from_progress_marker_size(); ++i) {
- if (!message.get_updates().from_progress_marker(i).token().empty())
- return false;
- }
- return true;
-}
-
-// TODO(lipalani) : Rename these function names as per the CR for issue 7740067.
-SyncProtocolError ConvertLegacyErrorCodeToNewError(
- const sync_pb::SyncEnums::ErrorType& error_type) {
- SyncProtocolError error;
- error.error_type = ConvertSyncProtocolErrorTypePBToLocalType(error_type);
- if (error_type == sync_pb::SyncEnums::CLEAR_PENDING ||
- error_type == sync_pb::SyncEnums::NOT_MY_BIRTHDAY) {
- error.action = DISABLE_SYNC_ON_CLIENT;
- } else if (error_type == sync_pb::SyncEnums::DISABLED_BY_ADMIN) {
- error.action = STOP_SYNC_FOR_DISABLED_ACCOUNT;
- } // There is no other action we can compute for legacy server.
- return error;
-}
-
-} // namespace
-
-// static
-SyncerError SyncerProtoUtil::PostClientToServerMessage(
- ClientToServerMessage* msg,
- ClientToServerResponse* response,
- SyncSession* session) {
- CHECK(response);
- DCHECK(!msg->get_updates().has_from_timestamp()); // Deprecated.
- DCHECK(!msg->get_updates().has_requested_types()); // Deprecated.
-
- // Add must-have fields.
- SetProtocolVersion(msg);
- AddRequestBirthday(session->context()->directory(), msg);
- DCHECK(msg->has_store_birthday() || IsVeryFirstGetUpdates(*msg));
- AddBagOfChips(session->context()->directory(), msg);
- msg->set_api_key(google_apis::GetAPIKey());
- msg->mutable_client_status()->CopyFrom(session->context()->client_status());
- msg->set_invalidator_client_id(session->context()->invalidator_client_id());
-
- syncable::Directory* dir = session->context()->directory();
-
- LogClientToServerMessage(*msg);
- session->context()->traffic_recorder()->RecordClientToServerMessage(*msg);
- if (!PostAndProcessHeaders(session->context()->connection_manager(), session,
- *msg, response)) {
- // There was an error establishing communication with the server.
- // We can not proceed beyond this point.
- const HttpResponse::ServerConnectionCode server_status =
- session->context()->connection_manager()->server_status();
-
- DCHECK_NE(server_status, HttpResponse::NONE);
- DCHECK_NE(server_status, HttpResponse::SERVER_CONNECTION_OK);
-
- return ServerConnectionErrorAsSyncerError(server_status);
- }
-
- LogClientToServerResponse(*response);
- session->context()->traffic_recorder()->RecordClientToServerResponse(
- *response);
-
- // Persist a bag of chips if it has been sent by the server.
- PersistBagOfChips(dir, *response);
-
- SyncProtocolError sync_protocol_error;
-
- // The DISABLED_BY_ADMIN error overrides other errors sent by the server.
- if (IsSyncDisabledByAdmin(*response)) {
- sync_protocol_error.error_type = DISABLED_BY_ADMIN;
- sync_protocol_error.action = STOP_SYNC_FOR_DISABLED_ACCOUNT;
- } else if (!VerifyResponseBirthday(*response, dir)) {
- // If sync isn't disabled, first check for a birthday mismatch error.
- sync_protocol_error.error_type = NOT_MY_BIRTHDAY;
- sync_protocol_error.action = DISABLE_SYNC_ON_CLIENT;
- } else if (response->has_error()) {
- // This is a new server. Just get the error from the protocol.
- sync_protocol_error = ConvertErrorPBToLocalType(response->error());
- } else {
- // Legacy server implementation. Compute the error based on |error_code|.
- sync_protocol_error = ConvertLegacyErrorCodeToNewError(
- response->error_code());
- }
-
- // Now set the error into the status so the layers above us could read it.
- sessions::StatusController* status = session->mutable_status_controller();
- status->set_sync_protocol_error(sync_protocol_error);
-
- // Inform the delegate of the error we got.
- session->delegate()->OnSyncProtocolError(session->TakeSnapshot());
-
- // Update our state for any other commands we've received.
- if (response->has_client_command()) {
- const sync_pb::ClientCommand& command = response->client_command();
- if (command.has_max_commit_batch_size()) {
- session->context()->set_max_commit_batch_size(
- command.max_commit_batch_size());
- }
-
- if (command.has_set_sync_long_poll_interval()) {
- session->delegate()->OnReceivedLongPollIntervalUpdate(
- base::TimeDelta::FromSeconds(command.set_sync_long_poll_interval()));
- }
-
- if (command.has_set_sync_poll_interval()) {
- session->delegate()->OnReceivedShortPollIntervalUpdate(
- base::TimeDelta::FromSeconds(command.set_sync_poll_interval()));
- }
-
- if (command.has_sessions_commit_delay_seconds()) {
- session->delegate()->OnReceivedSessionsCommitDelay(
- base::TimeDelta::FromSeconds(
- command.sessions_commit_delay_seconds()));
- }
-
- if (command.has_client_invalidation_hint_buffer_size()) {
- session->delegate()->OnReceivedClientInvalidationHintBufferSize(
- command.client_invalidation_hint_buffer_size());
- }
- }
-
- // Now do any special handling for the error type and decide on the return
- // value.
- switch (sync_protocol_error.error_type) {
- case UNKNOWN_ERROR:
- LOG(WARNING) << "Sync protocol out-of-date. The server is using a more "
- << "recent version.";
- return SERVER_RETURN_UNKNOWN_ERROR;
- case SYNC_SUCCESS:
- LogResponseProfilingData(*response);
- return SYNCER_OK;
- case THROTTLED:
- if (sync_protocol_error.error_data_types.Empty()) {
- DLOG(WARNING) << "Client fully throttled by syncer.";
- session->delegate()->OnThrottled(GetThrottleDelay(*response));
- } else {
- DLOG(WARNING) << "Some types throttled by syncer.";
- session->delegate()->OnTypesThrottled(
- sync_protocol_error.error_data_types,
- GetThrottleDelay(*response));
- }
- return SERVER_RETURN_THROTTLED;
- case TRANSIENT_ERROR:
- return SERVER_RETURN_TRANSIENT_ERROR;
- case MIGRATION_DONE:
- LOG_IF(ERROR, 0 >= response->migrated_data_type_id_size())
- << "MIGRATION_DONE but no types specified.";
- // TODO(akalin): This should be a set union.
- session->mutable_status_controller()->
- set_types_needing_local_migration(GetTypesToMigrate(*response));
- return SERVER_RETURN_MIGRATION_DONE;
- case CLEAR_PENDING:
- return SERVER_RETURN_CLEAR_PENDING;
- case NOT_MY_BIRTHDAY:
- return SERVER_RETURN_NOT_MY_BIRTHDAY;
- case DISABLED_BY_ADMIN:
- return SERVER_RETURN_DISABLED_BY_ADMIN;
- default:
- NOTREACHED();
- return UNSET;
- }
-}
-
-// static
-bool SyncerProtoUtil::ShouldMaintainPosition(
- const sync_pb::SyncEntity& sync_entity) {
- // Maintain positions for bookmarks that are not server-defined top-level
- // folders.
- return GetModelType(sync_entity) == BOOKMARKS
- && !(sync_entity.folder() &&
- !sync_entity.server_defined_unique_tag().empty());
-}
-
-// static
-void SyncerProtoUtil::CopyProtoBytesIntoBlob(const std::string& proto_bytes,
- syncable::Blob* blob) {
- syncable::Blob proto_blob(proto_bytes.begin(), proto_bytes.end());
- blob->swap(proto_blob);
-}
-
-// static
-bool SyncerProtoUtil::ProtoBytesEqualsBlob(const std::string& proto_bytes,
- const syncable::Blob& blob) {
- if (proto_bytes.size() != blob.size())
- return false;
- return std::equal(proto_bytes.begin(), proto_bytes.end(), blob.begin());
-}
-
-// static
-void SyncerProtoUtil::CopyBlobIntoProtoBytes(const syncable::Blob& blob,
- std::string* proto_bytes) {
- std::string blob_string(blob.begin(), blob.end());
- proto_bytes->swap(blob_string);
-}
-
-// static
-const std::string& SyncerProtoUtil::NameFromSyncEntity(
- const sync_pb::SyncEntity& entry) {
- if (entry.has_non_unique_name())
- return entry.non_unique_name();
- return entry.name();
-}
-
-// static
-const std::string& SyncerProtoUtil::NameFromCommitEntryResponse(
- const sync_pb::CommitResponse_EntryResponse& entry) {
- if (entry.has_non_unique_name())
- return entry.non_unique_name();
- return entry.name();
-}
-
-// static
-void SyncerProtoUtil::PersistBagOfChips(syncable::Directory* dir,
- const sync_pb::ClientToServerResponse& response) {
- if (!response.has_new_bag_of_chips())
- return;
- std::string bag_of_chips;
- if (response.new_bag_of_chips().SerializeToString(&bag_of_chips))
- dir->set_bag_of_chips(bag_of_chips);
-}
-
-std::string SyncerProtoUtil::SyncEntityDebugString(
- const sync_pb::SyncEntity& entry) {
- const std::string& mtime_str =
- GetTimeDebugString(ProtoTimeToTime(entry.mtime()));
- const std::string& ctime_str =
- GetTimeDebugString(ProtoTimeToTime(entry.ctime()));
- return base::StringPrintf(
- "id: %s, parent_id: %s, "
- "version: %" PRId64"d, "
- "mtime: %" PRId64"d (%s), "
- "ctime: %" PRId64"d (%s), "
- "name: %s, sync_timestamp: %" PRId64"d, "
- "%s ",
- entry.id_string().c_str(),
- entry.parent_id_string().c_str(),
- entry.version(),
- entry.mtime(), mtime_str.c_str(),
- entry.ctime(), ctime_str.c_str(),
- entry.name().c_str(), entry.sync_timestamp(),
- entry.deleted() ? "deleted, ":"");
-}
-
-namespace {
-std::string GetUpdatesResponseString(
- const sync_pb::GetUpdatesResponse& response) {
- std::string output;
- output.append("GetUpdatesResponse:\n");
- for (int i = 0; i < response.entries_size(); i++) {
- output.append(SyncerProtoUtil::SyncEntityDebugString(response.entries(i)));
- output.append("\n");
- }
- return output;
-}
-} // namespace
-
-std::string SyncerProtoUtil::ClientToServerResponseDebugString(
- const ClientToServerResponse& response) {
- // Add more handlers as needed.
- std::string output;
- if (response.has_get_updates())
- output.append(GetUpdatesResponseString(response.get_updates()));
- return output;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/syncer_proto_util.h b/chromium/sync/engine/syncer_proto_util.h
deleted file mode 100644
index 3fab8b50d50..00000000000
--- a/chromium/sync/engine/syncer_proto_util.h
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_SYNCER_PROTO_UTIL_H_
-#define SYNC_ENGINE_SYNCER_PROTO_UTIL_H_
-
-#include <string>
-
-#include "base/gtest_prod_util.h"
-#include "base/time/time.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/util/syncer_error.h"
-#include "sync/sessions/sync_session.h"
-#include "sync/syncable/blob.h"
-
-namespace sync_pb {
-class ClientToServerMessage;
-class ClientToServerResponse;
-class ClientToServerResponse_Error;
-class CommitResponse_EntryResponse;
-class EntitySpecifics;
-class SyncEntity;
-}
-
-namespace syncer {
-
-class ServerConnectionManager;
-
-namespace sessions {
-class SyncProtocolError;
-class SyncSessionContext;
-}
-
-namespace syncable {
-class Directory;
-class Entry;
-}
-
-// Returns the types to migrate from the data in |response|.
-SYNC_EXPORT_PRIVATE ModelTypeSet GetTypesToMigrate(
- const sync_pb::ClientToServerResponse& response);
-
-// Builds a SyncProtocolError from the data in |error|.
-SYNC_EXPORT_PRIVATE SyncProtocolError ConvertErrorPBToLocalType(
- const sync_pb::ClientToServerResponse_Error& error);
-
-class SYNC_EXPORT_PRIVATE SyncerProtoUtil {
- public:
- // Posts the given message and fills the buffer with the returned value.
- // Returns true on success. Also handles store birthday verification: will
- // produce a SyncError if the birthday is incorrect.
- // NOTE: This will add all fields that must be sent on every request, which
- // includes store birthday, protocol version, client chips, api keys, etc.
- static SyncerError PostClientToServerMessage(
- sync_pb::ClientToServerMessage* msg,
- sync_pb::ClientToServerResponse* response,
- sessions::SyncSession* session);
-
- static bool ShouldMaintainPosition(const sync_pb::SyncEntity& sync_entity);
-
- // Utility methods for converting between syncable::Blobs and protobuf byte
- // fields.
- static void CopyProtoBytesIntoBlob(const std::string& proto_bytes,
- syncable::Blob* blob);
- static bool ProtoBytesEqualsBlob(const std::string& proto_bytes,
- const syncable::Blob& blob);
- static void CopyBlobIntoProtoBytes(const syncable::Blob& blob,
- std::string* proto_bytes);
-
- // Extract the name field from a sync entity.
- static const std::string& NameFromSyncEntity(
- const sync_pb::SyncEntity& entry);
-
- // Extract the name field from a commit entry response.
- static const std::string& NameFromCommitEntryResponse(
- const sync_pb::CommitResponse_EntryResponse& entry);
-
- // Persist the bag of chips if it is present in the response.
- static void PersistBagOfChips(
- syncable::Directory* dir,
- const sync_pb::ClientToServerResponse& response);
-
- // EntitySpecifics is used as a filter for the GetUpdates message to tell
- // the server which datatypes to send back. This adds a datatype so that
- // it's included in the filter.
- static void AddToEntitySpecificDatatypesFilter(ModelType datatype,
- sync_pb::EntitySpecifics* filter);
-
- // Get a debug string representation of the client to server response.
- static std::string ClientToServerResponseDebugString(
- const sync_pb::ClientToServerResponse& response);
-
- // Get update contents as a string. Intended for logging, and intended
- // to have a smaller footprint than the protobuf's built-in pretty printer.
- static std::string SyncEntityDebugString(const sync_pb::SyncEntity& entry);
-
- // Pull the birthday from the dir and put it into the msg.
- static void AddRequestBirthday(syncable::Directory* dir,
- sync_pb::ClientToServerMessage* msg);
-
- // Pull the bag of chips from the dir and put it into the msg.
- static void AddBagOfChips(syncable::Directory* dir,
- sync_pb::ClientToServerMessage* msg);
-
-
- // Set the protocol version field in the outgoing message.
- static void SetProtocolVersion(sync_pb::ClientToServerMessage* msg);
-
- private:
- SyncerProtoUtil() {}
-
- // Helper functions for PostClientToServerMessage.
-
- // Verifies the store birthday, alerting/resetting as appropriate if there's a
- // mismatch. Return false if the syncer should be stuck.
- static bool VerifyResponseBirthday(
- const sync_pb::ClientToServerResponse& response,
- syncable::Directory* dir);
-
- // Returns true if sync is disabled by admin for a dasher account.
- static bool IsSyncDisabledByAdmin(
- const sync_pb::ClientToServerResponse& response);
-
- // Post the message using the scm, and do some processing on the returned
- // headers. Decode the server response.
- static bool PostAndProcessHeaders(ServerConnectionManager* scm,
- sessions::SyncSession* session,
- const sync_pb::ClientToServerMessage& msg,
- sync_pb::ClientToServerResponse* response);
-
- static base::TimeDelta GetThrottleDelay(
- const sync_pb::ClientToServerResponse& response);
-
- friend class SyncerProtoUtilTest;
- FRIEND_TEST_ALL_PREFIXES(SyncerProtoUtilTest, AddRequestBirthday);
- FRIEND_TEST_ALL_PREFIXES(SyncerProtoUtilTest, PostAndProcessHeaders);
- FRIEND_TEST_ALL_PREFIXES(SyncerProtoUtilTest, VerifyDisabledByAdmin);
- FRIEND_TEST_ALL_PREFIXES(SyncerProtoUtilTest, VerifyResponseBirthday);
- FRIEND_TEST_ALL_PREFIXES(SyncerProtoUtilTest, HandleThrottlingNoDatatypes);
- FRIEND_TEST_ALL_PREFIXES(SyncerProtoUtilTest, HandleThrottlingWithDatatypes);
-
- DISALLOW_COPY_AND_ASSIGN(SyncerProtoUtil);
-};
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_SYNCER_PROTO_UTIL_H_
diff --git a/chromium/sync/engine/syncer_proto_util_unittest.cc b/chromium/sync/engine/syncer_proto_util_unittest.cc
deleted file mode 100644
index 39f4fddbd47..00000000000
--- a/chromium/sync/engine/syncer_proto_util_unittest.cc
+++ /dev/null
@@ -1,314 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/syncer_proto_util.h"
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/message_loop/message_loop.h"
-#include "base/time/time.h"
-#include "sync/internal_api/public/base/cancelation_signal.h"
-#include "sync/internal_api/public/base/model_type_test_util.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/protocol/password_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/protocol/sync_enums.pb.h"
-#include "sync/sessions/sync_session_context.h"
-#include "sync/syncable/blob.h"
-#include "sync/syncable/directory.h"
-#include "sync/test/engine/mock_connection_manager.h"
-#include "sync/test/engine/test_directory_setter_upper.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using ::testing::_;
-
-using sync_pb::ClientToServerMessage;
-using sync_pb::CommitResponse_EntryResponse;
-using sync_pb::SyncEntity;
-
-namespace syncer {
-
-using sessions::SyncSessionContext;
-using syncable::Blob;
-
-class MockDelegate : public sessions::SyncSession::Delegate {
- public:
- MockDelegate() {}
- ~MockDelegate() {}
-
- MOCK_METHOD0(IsSyncingCurrentlySilenced, bool());
- MOCK_METHOD1(OnReceivedShortPollIntervalUpdate, void(const base::TimeDelta&));
- MOCK_METHOD1(OnReceivedLongPollIntervalUpdate ,void(const base::TimeDelta&));
- MOCK_METHOD1(OnReceivedSessionsCommitDelay, void(const base::TimeDelta&));
- MOCK_METHOD1(OnReceivedClientInvalidationHintBufferSize, void(int));
- MOCK_METHOD1(OnSyncProtocolError, void(const sessions::SyncSessionSnapshot&));
- MOCK_METHOD1(OnSilencedUntil, void(const base::TimeTicks&));
-};
-
-// Builds a ClientToServerResponse with some data type ids, including
-// invalid ones. GetTypesToMigrate() should return only the valid
-// model types.
-TEST(SyncerProtoUtil, GetTypesToMigrate) {
- sync_pb::ClientToServerResponse response;
- response.add_migrated_data_type_id(
- GetSpecificsFieldNumberFromModelType(BOOKMARKS));
- response.add_migrated_data_type_id(
- GetSpecificsFieldNumberFromModelType(HISTORY_DELETE_DIRECTIVES));
- response.add_migrated_data_type_id(-1);
- EXPECT_TRUE(
- GetTypesToMigrate(response).Equals(
- ModelTypeSet(BOOKMARKS, HISTORY_DELETE_DIRECTIVES)));
-}
-
-// Builds a ClientToServerResponse_Error with some error data type
-// ids, including invalid ones. ConvertErrorPBToLocalType() should
-// return a SyncProtocolError with only the valid model types.
-TEST(SyncerProtoUtil, ConvertErrorPBToLocalType) {
- sync_pb::ClientToServerResponse_Error error_pb;
- error_pb.set_error_type(sync_pb::SyncEnums::THROTTLED);
- error_pb.add_error_data_type_ids(
- GetSpecificsFieldNumberFromModelType(BOOKMARKS));
- error_pb.add_error_data_type_ids(
- GetSpecificsFieldNumberFromModelType(HISTORY_DELETE_DIRECTIVES));
- error_pb.add_error_data_type_ids(-1);
- SyncProtocolError error = ConvertErrorPBToLocalType(error_pb);
- EXPECT_TRUE(
- error.error_data_types.Equals(
- ModelTypeSet(BOOKMARKS, HISTORY_DELETE_DIRECTIVES)));
-}
-
-TEST(SyncerProtoUtil, TestBlobToProtocolBufferBytesUtilityFunctions) {
- unsigned char test_data1[] = {1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 4, 2, 9};
- unsigned char test_data2[] = {1, 99, 3, 4, 5, 6, 7, 8, 0, 1, 4, 2, 9};
- unsigned char test_data3[] = {99, 2, 3, 4, 5, 6, 7, 8};
-
- syncable::Blob test_blob1, test_blob2, test_blob3;
- for (size_t i = 0; i < arraysize(test_data1); ++i)
- test_blob1.push_back(test_data1[i]);
- for (size_t i = 0; i < arraysize(test_data2); ++i)
- test_blob2.push_back(test_data2[i]);
- for (size_t i = 0; i < arraysize(test_data3); ++i)
- test_blob3.push_back(test_data3[i]);
-
- std::string test_message1(reinterpret_cast<char*>(test_data1),
- arraysize(test_data1));
- std::string test_message2(reinterpret_cast<char*>(test_data2),
- arraysize(test_data2));
- std::string test_message3(reinterpret_cast<char*>(test_data3),
- arraysize(test_data3));
-
- EXPECT_TRUE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message1,
- test_blob1));
- EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message1,
- test_blob2));
- EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message1,
- test_blob3));
- EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message2,
- test_blob1));
- EXPECT_TRUE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message2,
- test_blob2));
- EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message2,
- test_blob3));
- EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message3,
- test_blob1));
- EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message3,
- test_blob2));
- EXPECT_TRUE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message3,
- test_blob3));
-
- Blob blob1_copy;
- EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message1,
- blob1_copy));
- SyncerProtoUtil::CopyProtoBytesIntoBlob(test_message1, &blob1_copy);
- EXPECT_TRUE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message1,
- blob1_copy));
-
- std::string message2_copy;
- EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(message2_copy,
- test_blob2));
- SyncerProtoUtil::CopyBlobIntoProtoBytes(test_blob2, &message2_copy);
- EXPECT_TRUE(SyncerProtoUtil::ProtoBytesEqualsBlob(message2_copy,
- test_blob2));
-}
-
-// Tests NameFromSyncEntity and NameFromCommitEntryResponse when only the name
-// field is provided.
-TEST(SyncerProtoUtil, NameExtractionOneName) {
- SyncEntity one_name_entity;
- CommitResponse_EntryResponse one_name_response;
-
- const std::string one_name_string("Eggheadednesses");
- one_name_entity.set_name(one_name_string);
- one_name_response.set_name(one_name_string);
-
- const std::string name_a =
- SyncerProtoUtil::NameFromSyncEntity(one_name_entity);
- EXPECT_EQ(one_name_string, name_a);
-}
-
-TEST(SyncerProtoUtil, NameExtractionOneUniqueName) {
- SyncEntity one_name_entity;
- CommitResponse_EntryResponse one_name_response;
-
- const std::string one_name_string("Eggheadednesses");
-
- one_name_entity.set_non_unique_name(one_name_string);
- one_name_response.set_non_unique_name(one_name_string);
-
- const std::string name_a =
- SyncerProtoUtil::NameFromSyncEntity(one_name_entity);
- EXPECT_EQ(one_name_string, name_a);
-}
-
-// Tests NameFromSyncEntity and NameFromCommitEntryResponse when both the name
-// field and the non_unique_name fields are provided.
-// Should prioritize non_unique_name.
-TEST(SyncerProtoUtil, NameExtractionTwoNames) {
- SyncEntity two_name_entity;
- CommitResponse_EntryResponse two_name_response;
-
- const std::string neuro("Neuroanatomists");
- const std::string oxyphen("Oxyphenbutazone");
-
- two_name_entity.set_name(oxyphen);
- two_name_entity.set_non_unique_name(neuro);
-
- two_name_response.set_name(oxyphen);
- two_name_response.set_non_unique_name(neuro);
-
- const std::string name_a =
- SyncerProtoUtil::NameFromSyncEntity(two_name_entity);
- EXPECT_EQ(neuro, name_a);
-}
-
-class SyncerProtoUtilTest : public testing::Test {
- public:
- virtual void SetUp() {
- dir_maker_.SetUp();
- }
-
- virtual void TearDown() {
- dir_maker_.TearDown();
- }
-
- syncable::Directory* directory() {
- return dir_maker_.directory();
- }
-
- protected:
- base::MessageLoop message_loop_;
- TestDirectorySetterUpper dir_maker_;
-};
-
-TEST_F(SyncerProtoUtilTest, VerifyResponseBirthday) {
- // Both sides empty
- EXPECT_TRUE(directory()->store_birthday().empty());
- sync_pb::ClientToServerResponse response;
- EXPECT_FALSE(SyncerProtoUtil::VerifyResponseBirthday(response, directory()));
-
- // Remote set, local empty
- response.set_store_birthday("flan");
- EXPECT_TRUE(SyncerProtoUtil::VerifyResponseBirthday(response, directory()));
- EXPECT_EQ(directory()->store_birthday(), "flan");
-
- // Remote empty, local set.
- response.clear_store_birthday();
- EXPECT_TRUE(SyncerProtoUtil::VerifyResponseBirthday(response, directory()));
- EXPECT_EQ(directory()->store_birthday(), "flan");
-
- // Doesn't match
- response.set_store_birthday("meat");
- EXPECT_FALSE(SyncerProtoUtil::VerifyResponseBirthday(response, directory()));
-
- response.set_error_code(sync_pb::SyncEnums::CLEAR_PENDING);
- EXPECT_FALSE(SyncerProtoUtil::VerifyResponseBirthday(response, directory()));
-}
-
-TEST_F(SyncerProtoUtilTest, VerifyDisabledByAdmin) {
- // No error code
- sync_pb::ClientToServerResponse response;
- EXPECT_FALSE(SyncerProtoUtil::IsSyncDisabledByAdmin(response));
-
- // Has error code, but not disabled
- response.set_error_code(sync_pb::SyncEnums::NOT_MY_BIRTHDAY);
- EXPECT_FALSE(SyncerProtoUtil::IsSyncDisabledByAdmin(response));
-
- // Has error code, and is disabled by admin
- response.set_error_code(sync_pb::SyncEnums::DISABLED_BY_ADMIN);
- EXPECT_TRUE(SyncerProtoUtil::IsSyncDisabledByAdmin(response));
-}
-
-TEST_F(SyncerProtoUtilTest, AddRequestBirthday) {
- EXPECT_TRUE(directory()->store_birthday().empty());
- ClientToServerMessage msg;
- SyncerProtoUtil::AddRequestBirthday(directory(), &msg);
- EXPECT_FALSE(msg.has_store_birthday());
-
- directory()->set_store_birthday("meat");
- SyncerProtoUtil::AddRequestBirthday(directory(), &msg);
- EXPECT_EQ(msg.store_birthday(), "meat");
-}
-
-class DummyConnectionManager : public ServerConnectionManager {
- public:
- DummyConnectionManager(CancelationSignal* signal)
- : ServerConnectionManager("unused", 0, false, signal),
- send_error_(false),
- access_denied_(false) {}
-
- virtual ~DummyConnectionManager() {}
- virtual bool PostBufferWithCachedAuth(
- PostBufferParams* params,
- ScopedServerStatusWatcher* watcher) OVERRIDE {
- if (send_error_) {
- return false;
- }
-
- sync_pb::ClientToServerResponse response;
- if (access_denied_) {
- response.set_error_code(sync_pb::SyncEnums::ACCESS_DENIED);
- }
- response.SerializeToString(&params->buffer_out);
-
- return true;
- }
-
- void set_send_error(bool send) {
- send_error_ = send;
- }
-
- void set_access_denied(bool denied) {
- access_denied_ = denied;
- }
-
- private:
- bool send_error_;
- bool access_denied_;
-};
-
-TEST_F(SyncerProtoUtilTest, PostAndProcessHeaders) {
- CancelationSignal signal;
- DummyConnectionManager dcm(&signal);
- ClientToServerMessage msg;
- SyncerProtoUtil::SetProtocolVersion(&msg);
- msg.set_share("required");
- msg.set_message_contents(ClientToServerMessage::GET_UPDATES);
- sync_pb::ClientToServerResponse response;
-
- dcm.set_send_error(true);
- EXPECT_FALSE(SyncerProtoUtil::PostAndProcessHeaders(&dcm, NULL,
- msg, &response));
-
- dcm.set_send_error(false);
- EXPECT_TRUE(SyncerProtoUtil::PostAndProcessHeaders(&dcm, NULL,
- msg, &response));
-
- dcm.set_access_denied(true);
- EXPECT_FALSE(SyncerProtoUtil::PostAndProcessHeaders(&dcm, NULL,
- msg, &response));
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/syncer_types.h b/chromium/sync/engine/syncer_types.h
deleted file mode 100644
index 36f3dbcf8dd..00000000000
--- a/chromium/sync/engine/syncer_types.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_SYNCER_TYPES_H_
-#define SYNC_ENGINE_SYNCER_TYPES_H_
-
-// The intent of this is to keep all shared data types and enums for the syncer
-// in a single place without having dependencies between other files.
-namespace syncer {
-
-enum UpdateAttemptResponse {
- // Update was applied or safely ignored.
- SUCCESS,
-
- // The conditions described by the following enum values are not mutually
- // exclusive. The list has been ordered according to priority in the case of
- // overlap, with highest priority first.
- //
- // For example, in the case where an item had both the IS_UNSYCNED and
- // IS_UNAPPLIED_UPDATE flags set (CONFLICT_SIMPLE), and a SERVER_PARENT_ID
- // that, if applied, would cause a directory loop (CONFLICT_HIERARCHY), and
- // specifics that we can't decrypt right now (CONFLICT_ENCRYPTION), the
- // UpdateApplicator would return CONFLICT_ENCRYPTION when attempting to
- // process the item.
- //
- // We do not attempt to resolve CONFLICT_HIERARCHY or CONFLICT_ENCRYPTION
- // items. We will leave these updates unapplied and wait for the server
- // to send us newer updates that will resolve the conflict.
-
- // We were unable to decrypt/encrypt this server data. As such, we can't make
- // forward progress on this node, but because the passphrase may not arrive
- // until later we don't want to get the syncer stuck. See UpdateApplicator
- // for how this is handled.
- CONFLICT_ENCRYPTION,
-
- // These are some updates that, if applied, would violate the tree's
- // invariants. Examples of this include the server adding children to locally
- // deleted directories and directory moves that would create loops.
- CONFLICT_HIERARCHY,
-
- // This indicates that item was modified both remotely (IS_UNAPPLIED_UPDATE)
- // and locally (IS_UNSYNCED). We use the ConflictResolver to decide which of
- // the changes should take priority, or if we can possibly merge the data.
- CONFLICT_SIMPLE
-};
-
-// Different results from the verify phase will yield different methods of
-// processing in the ProcessUpdates phase. The SKIP result means the entry
-// doesn't go to the ProcessUpdates phase.
-enum VerifyResult {
- VERIFY_FAIL,
- VERIFY_SUCCESS,
- VERIFY_UNDELETE,
- VERIFY_SKIP,
- VERIFY_UNDECIDED
-};
-
-enum VerifyCommitResult {
- VERIFY_UNSYNCABLE,
- VERIFY_OK,
-};
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_SYNCER_TYPES_H_
diff --git a/chromium/sync/engine/syncer_unittest.cc b/chromium/sync/engine/syncer_unittest.cc
deleted file mode 100644
index 19aff7c3b11..00000000000
--- a/chromium/sync/engine/syncer_unittest.cc
+++ /dev/null
@@ -1,4811 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Syncer unit tests. Unfortunately a lot of these tests
-// are outdated and need to be reworked and updated.
-
-#include <algorithm>
-#include <limits>
-#include <list>
-#include <map>
-#include <set>
-#include <string>
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/callback.h"
-#include "base/compiler_specific.h"
-#include "base/location.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/message_loop/message_loop.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/strings/stringprintf.h"
-#include "base/time/time.h"
-#include "build/build_config.h"
-#include "sync/engine/get_commit_ids.h"
-#include "sync/engine/net/server_connection_manager.h"
-#include "sync/engine/sync_scheduler_impl.h"
-#include "sync/engine/syncer.h"
-#include "sync/engine/syncer_proto_util.h"
-#include "sync/engine/traffic_recorder.h"
-#include "sync/internal_api/public/base/cancelation_signal.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/engine/model_safe_worker.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/protocol/nigori_specifics.pb.h"
-#include "sync/protocol/preference_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/sessions/sync_session_context.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/nigori_util.h"
-#include "sync/syncable/syncable_delete_journal.h"
-#include "sync/syncable/syncable_read_transaction.h"
-#include "sync/syncable/syncable_util.h"
-#include "sync/syncable/syncable_write_transaction.h"
-#include "sync/test/engine/fake_model_worker.h"
-#include "sync/test/engine/mock_connection_manager.h"
-#include "sync/test/engine/test_directory_setter_upper.h"
-#include "sync/test/engine/test_id_factory.h"
-#include "sync/test/engine/test_syncable_utils.h"
-#include "sync/test/fake_encryptor.h"
-#include "sync/test/fake_sync_encryption_handler.h"
-#include "sync/test/sessions/mock_debug_info_getter.h"
-#include "sync/util/cryptographer.h"
-#include "sync/util/extensions_activity.h"
-#include "sync/util/time.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using base::TimeDelta;
-
-using std::count;
-using std::map;
-using std::multimap;
-using std::set;
-using std::string;
-using std::vector;
-
-namespace syncer {
-
-using syncable::BaseTransaction;
-using syncable::Blob;
-using syncable::CountEntriesWithName;
-using syncable::Directory;
-using syncable::Entry;
-using syncable::GetFirstEntryWithName;
-using syncable::GetOnlyEntryWithName;
-using syncable::Id;
-using syncable::kEncryptedString;
-using syncable::MutableEntry;
-using syncable::WriteTransaction;
-
-using syncable::BASE_VERSION;
-using syncable::CREATE;
-using syncable::GET_BY_HANDLE;
-using syncable::GET_BY_ID;
-using syncable::GET_BY_CLIENT_TAG;
-using syncable::GET_BY_SERVER_TAG;
-using syncable::ID;
-using syncable::IS_DEL;
-using syncable::IS_DIR;
-using syncable::IS_UNAPPLIED_UPDATE;
-using syncable::IS_UNSYNCED;
-using syncable::META_HANDLE;
-using syncable::MTIME;
-using syncable::NON_UNIQUE_NAME;
-using syncable::PARENT_ID;
-using syncable::BASE_SERVER_SPECIFICS;
-using syncable::SERVER_IS_DEL;
-using syncable::SERVER_PARENT_ID;
-using syncable::SERVER_SPECIFICS;
-using syncable::SERVER_VERSION;
-using syncable::UNIQUE_CLIENT_TAG;
-using syncable::UNIQUE_SERVER_TAG;
-using syncable::SPECIFICS;
-using syncable::SYNCING;
-using syncable::UNITTEST;
-
-using sessions::MockDebugInfoGetter;
-using sessions::StatusController;
-using sessions::SyncSessionContext;
-using sessions::SyncSession;
-
-class SyncerTest : public testing::Test,
- public SyncSession::Delegate,
- public SyncEngineEventListener {
- protected:
- SyncerTest()
- : extensions_activity_(new ExtensionsActivity),
- syncer_(NULL),
- saw_syncer_event_(false),
- last_client_invalidation_hint_buffer_size_(10),
- traffic_recorder_(0, 0) {
-}
-
- // SyncSession::Delegate implementation.
- virtual void OnThrottled(const base::TimeDelta& throttle_duration) OVERRIDE {
- FAIL() << "Should not get silenced.";
- }
- virtual void OnTypesThrottled(
- ModelTypeSet types,
- const base::TimeDelta& throttle_duration) OVERRIDE {
- FAIL() << "Should not get silenced.";
- }
- virtual bool IsCurrentlyThrottled() OVERRIDE {
- return false;
- }
- virtual void OnReceivedLongPollIntervalUpdate(
- const base::TimeDelta& new_interval) OVERRIDE {
- last_long_poll_interval_received_ = new_interval;
- }
- virtual void OnReceivedShortPollIntervalUpdate(
- const base::TimeDelta& new_interval) OVERRIDE {
- last_short_poll_interval_received_ = new_interval;
- }
- virtual void OnReceivedSessionsCommitDelay(
- const base::TimeDelta& new_delay) OVERRIDE {
- last_sessions_commit_delay_seconds_ = new_delay;
- }
- virtual void OnReceivedClientInvalidationHintBufferSize(
- int size) OVERRIDE {
- last_client_invalidation_hint_buffer_size_ = size;
- }
- virtual void OnSyncProtocolError(
- const sessions::SyncSessionSnapshot& snapshot) OVERRIDE {
- }
-
- void GetWorkers(std::vector<ModelSafeWorker*>* out) {
- out->push_back(worker_.get());
- }
-
- void GetModelSafeRoutingInfo(ModelSafeRoutingInfo* out) {
- // We're just testing the sync engine here, so we shunt everything to
- // the SyncerThread. Datatypes which aren't enabled aren't in the map.
- for (ModelTypeSet::Iterator it = enabled_datatypes_.First();
- it.Good(); it.Inc()) {
- (*out)[it.Get()] = GROUP_PASSIVE;
- }
- }
-
- virtual void OnSyncEngineEvent(const SyncEngineEvent& event) OVERRIDE {
- DVLOG(1) << "HandleSyncEngineEvent in unittest " << event.what_happened;
- // we only test for entry-specific events, not status changed ones.
- switch (event.what_happened) {
- case SyncEngineEvent::SYNC_CYCLE_BEGIN: // Fall through.
- case SyncEngineEvent::STATUS_CHANGED:
- case SyncEngineEvent::SYNC_CYCLE_ENDED:
- return;
- default:
- CHECK(false) << "Handling unknown error type in unit tests!!";
- }
- saw_syncer_event_ = true;
- }
-
- void ResetSession() {
- session_.reset(SyncSession::Build(context_.get(), this));
- }
-
- void SyncShareNudge() {
- ResetSession();
-
- // Pretend we've seen a local change, to make the nudge_tracker look normal.
- nudge_tracker_.RecordLocalChange(ModelTypeSet(BOOKMARKS));
-
- EXPECT_TRUE(
- syncer_->NormalSyncShare(
- context_->enabled_types(),
- nudge_tracker_,
- session_.get()));
- }
-
- void SyncShareConfigure() {
- ResetSession();
- EXPECT_TRUE(syncer_->ConfigureSyncShare(
- context_->enabled_types(),
- sync_pb::GetUpdatesCallerInfo::RECONFIGURATION,
- session_.get()));
- }
-
- virtual void SetUp() {
- dir_maker_.SetUp();
- mock_server_.reset(new MockConnectionManager(directory(),
- &cancelation_signal_));
- debug_info_getter_.reset(new MockDebugInfoGetter);
- EnableDatatype(BOOKMARKS);
- EnableDatatype(NIGORI);
- EnableDatatype(PREFERENCES);
- EnableDatatype(NIGORI);
- worker_ = new FakeModelWorker(GROUP_PASSIVE);
- std::vector<SyncEngineEventListener*> listeners;
- listeners.push_back(this);
-
- ModelSafeRoutingInfo routing_info;
- std::vector<ModelSafeWorker*> workers;
-
- GetModelSafeRoutingInfo(&routing_info);
- GetWorkers(&workers);
-
- context_.reset(
- new SyncSessionContext(
- mock_server_.get(), directory(), workers,
- extensions_activity_,
- listeners, debug_info_getter_.get(), &traffic_recorder_,
- true, // enable keystore encryption
- false, // force enable pre-commit GU avoidance experiment
- "fake_invalidator_client_id"));
- context_->set_routing_info(routing_info);
- syncer_ = new Syncer(&cancelation_signal_);
-
- syncable::ReadTransaction trans(FROM_HERE, directory());
- syncable::Directory::Metahandles children;
- directory()->GetChildHandlesById(&trans, trans.root_id(), &children);
- ASSERT_EQ(0u, children.size());
- saw_syncer_event_ = false;
- root_id_ = TestIdFactory::root();
- parent_id_ = ids_.MakeServer("parent id");
- child_id_ = ids_.MakeServer("child id");
- directory()->set_store_birthday(mock_server_->store_birthday());
- mock_server_->SetKeystoreKey("encryption_key");
- }
-
- virtual void TearDown() {
- mock_server_.reset();
- delete syncer_;
- syncer_ = NULL;
- dir_maker_.TearDown();
- }
- void WriteTestDataToEntry(WriteTransaction* trans, MutableEntry* entry) {
- EXPECT_FALSE(entry->GetIsDir());
- EXPECT_FALSE(entry->GetIsDel());
- sync_pb::EntitySpecifics specifics;
- specifics.mutable_bookmark()->set_url("http://demo/");
- specifics.mutable_bookmark()->set_favicon("PNG");
- entry->PutSpecifics(specifics);
- entry->PutIsUnsynced(true);
- }
- void VerifyTestDataInEntry(BaseTransaction* trans, Entry* entry) {
- EXPECT_FALSE(entry->GetIsDir());
- EXPECT_FALSE(entry->GetIsDel());
- VerifyTestBookmarkDataInEntry(entry);
- }
- void VerifyTestBookmarkDataInEntry(Entry* entry) {
- const sync_pb::EntitySpecifics& specifics = entry->GetSpecifics();
- EXPECT_TRUE(specifics.has_bookmark());
- EXPECT_EQ("PNG", specifics.bookmark().favicon());
- EXPECT_EQ("http://demo/", specifics.bookmark().url());
- }
-
- void VerifyHierarchyConflictsReported(
- const sync_pb::ClientToServerMessage& message) {
- // Our request should have included a warning about hierarchy conflicts.
- const sync_pb::ClientStatus& client_status = message.client_status();
- EXPECT_TRUE(client_status.has_hierarchy_conflict_detected());
- EXPECT_TRUE(client_status.hierarchy_conflict_detected());
- }
-
- void VerifyNoHierarchyConflictsReported(
- const sync_pb::ClientToServerMessage& message) {
- // Our request should have reported no hierarchy conflicts detected.
- const sync_pb::ClientStatus& client_status = message.client_status();
- EXPECT_TRUE(client_status.has_hierarchy_conflict_detected());
- EXPECT_FALSE(client_status.hierarchy_conflict_detected());
- }
-
- void VerifyHierarchyConflictsUnspecified(
- const sync_pb::ClientToServerMessage& message) {
- // Our request should have neither confirmed nor denied hierarchy conflicts.
- const sync_pb::ClientStatus& client_status = message.client_status();
- EXPECT_FALSE(client_status.has_hierarchy_conflict_detected());
- }
-
- sync_pb::EntitySpecifics DefaultBookmarkSpecifics() {
- sync_pb::EntitySpecifics result;
- AddDefaultFieldValue(BOOKMARKS, &result);
- return result;
- }
-
- sync_pb::EntitySpecifics DefaultPreferencesSpecifics() {
- sync_pb::EntitySpecifics result;
- AddDefaultFieldValue(PREFERENCES, &result);
- return result;
- }
- // Enumeration of alterations to entries for commit ordering tests.
- enum EntryFeature {
- LIST_END = 0, // Denotes the end of the list of features from below.
- SYNCED, // Items are unsynced by default
- DELETED,
- OLD_MTIME,
- MOVED_FROM_ROOT,
- };
-
- struct CommitOrderingTest {
- // expected commit index.
- int commit_index;
- // Details about the item
- syncable::Id id;
- syncable::Id parent_id;
- EntryFeature features[10];
-
- static CommitOrderingTest MakeLastCommitItem() {
- CommitOrderingTest last_commit_item;
- last_commit_item.commit_index = -1;
- last_commit_item.id = TestIdFactory::root();
- return last_commit_item;
- }
- };
-
- void RunCommitOrderingTest(CommitOrderingTest* test) {
- map<int, syncable::Id> expected_positions;
- { // Transaction scope.
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- while (!test->id.IsRoot()) {
- if (test->commit_index >= 0) {
- map<int, syncable::Id>::value_type entry(test->commit_index,
- test->id);
- bool double_position = !expected_positions.insert(entry).second;
- ASSERT_FALSE(double_position) << "Two id's expected at one position";
- }
- string utf8_name = test->id.GetServerId();
- string name(utf8_name.begin(), utf8_name.end());
- MutableEntry entry(&trans, CREATE, BOOKMARKS, test->parent_id, name);
-
- entry.PutId(test->id);
- if (test->id.ServerKnows()) {
- entry.PutBaseVersion(5);
- entry.PutServerVersion(5);
- entry.PutServerParentId(test->parent_id);
- }
- entry.PutIsDir(true);
- entry.PutIsUnsynced(true);
- entry.PutSpecifics(DefaultBookmarkSpecifics());
- // Set the time to 30 seconds in the future to reduce the chance of
- // flaky tests.
- const base::Time& now_plus_30s =
- base::Time::Now() + base::TimeDelta::FromSeconds(30);
- const base::Time& now_minus_2h =
- base::Time::Now() - base::TimeDelta::FromHours(2);
- entry.PutMtime(now_plus_30s);
- for (size_t i = 0 ; i < arraysize(test->features) ; ++i) {
- switch (test->features[i]) {
- case LIST_END:
- break;
- case SYNCED:
- entry.PutIsUnsynced(false);
- break;
- case DELETED:
- entry.PutIsDel(true);
- break;
- case OLD_MTIME:
- entry.PutMtime(now_minus_2h);
- break;
- case MOVED_FROM_ROOT:
- entry.PutServerParentId(trans.root_id());
- break;
- default:
- FAIL() << "Bad value in CommitOrderingTest list";
- }
- }
- test++;
- }
- }
- SyncShareNudge();
- ASSERT_TRUE(expected_positions.size() ==
- mock_server_->committed_ids().size());
- // If this test starts failing, be aware other sort orders could be valid.
- for (size_t i = 0; i < expected_positions.size(); ++i) {
- SCOPED_TRACE(i);
- EXPECT_EQ(1u, expected_positions.count(i));
- EXPECT_EQ(expected_positions[i], mock_server_->committed_ids()[i]);
- }
- }
-
- const StatusController& status() {
- return session_->status_controller();
- }
-
- Directory* directory() {
- return dir_maker_.directory();
- }
-
- const std::string local_cache_guid() {
- return directory()->cache_guid();
- }
-
- const std::string foreign_cache_guid() {
- return "kqyg7097kro6GSUod+GSg==";
- }
-
- int64 CreateUnsyncedDirectory(const string& entry_name,
- const string& idstring) {
- return CreateUnsyncedDirectory(entry_name,
- syncable::Id::CreateFromServerId(idstring));
- }
-
- int64 CreateUnsyncedDirectory(const string& entry_name,
- const syncable::Id& id) {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(
- &wtrans, CREATE, BOOKMARKS, wtrans.root_id(), entry_name);
- EXPECT_TRUE(entry.good());
- entry.PutIsUnsynced(true);
- entry.PutIsDir(true);
- entry.PutSpecifics(DefaultBookmarkSpecifics());
- entry.PutBaseVersion(id.ServerKnows() ? 1 : 0);
- entry.PutId(id);
- return entry.GetMetahandle();
- }
-
- void EnableDatatype(ModelType model_type) {
- enabled_datatypes_.Put(model_type);
-
- ModelSafeRoutingInfo routing_info;
- GetModelSafeRoutingInfo(&routing_info);
-
- if (context_) {
- context_->set_routing_info(routing_info);
- }
-
- mock_server_->ExpectGetUpdatesRequestTypes(enabled_datatypes_);
- }
-
- void DisableDatatype(ModelType model_type) {
- enabled_datatypes_.Remove(model_type);
-
- ModelSafeRoutingInfo routing_info;
- GetModelSafeRoutingInfo(&routing_info);
-
- if (context_) {
- context_->set_routing_info(routing_info);
- }
-
- mock_server_->ExpectGetUpdatesRequestTypes(enabled_datatypes_);
- }
-
- Cryptographer* GetCryptographer(syncable::BaseTransaction* trans) {
- return directory()->GetCryptographer(trans);
- }
-
- // Configures SyncSessionContext and NudgeTracker so Syncer won't call
- // GetUpdates prior to Commit. This method can be used to ensure a Commit is
- // not preceeded by GetUpdates.
- void ConfigureNoGetUpdatesRequired() {
- context_->set_server_enabled_pre_commit_update_avoidance(true);
- nudge_tracker_.OnInvalidationsEnabled();
- nudge_tracker_.RecordSuccessfulSyncCycle();
-
- ASSERT_FALSE(context_->ShouldFetchUpdatesBeforeCommit());
- ASSERT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
- }
-
- base::MessageLoop message_loop_;
-
- // Some ids to aid tests. Only the root one's value is specific. The rest
- // are named for test clarity.
- // TODO(chron): Get rid of these inbuilt IDs. They only make it
- // more confusing.
- syncable::Id root_id_;
- syncable::Id parent_id_;
- syncable::Id child_id_;
-
- TestIdFactory ids_;
-
- TestDirectorySetterUpper dir_maker_;
- FakeEncryptor encryptor_;
- scoped_refptr<ExtensionsActivity> extensions_activity_;
- scoped_ptr<MockConnectionManager> mock_server_;
- CancelationSignal cancelation_signal_;
-
- Syncer* syncer_;
-
- scoped_ptr<SyncSession> session_;
- scoped_ptr<SyncSessionContext> context_;
- bool saw_syncer_event_;
- base::TimeDelta last_short_poll_interval_received_;
- base::TimeDelta last_long_poll_interval_received_;
- base::TimeDelta last_sessions_commit_delay_seconds_;
- int last_client_invalidation_hint_buffer_size_;
- scoped_refptr<ModelSafeWorker> worker_;
-
- ModelTypeSet enabled_datatypes_;
- TrafficRecorder traffic_recorder_;
- sessions::NudgeTracker nudge_tracker_;
- scoped_ptr<MockDebugInfoGetter> debug_info_getter_;
-
- DISALLOW_COPY_AND_ASSIGN(SyncerTest);
-};
-
-TEST_F(SyncerTest, TestCallGatherUnsyncedEntries) {
- {
- Syncer::UnsyncedMetaHandles handles;
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- GetUnsyncedEntries(&trans, &handles);
- }
- ASSERT_EQ(0u, handles.size());
- }
- // TODO(sync): When we can dynamically connect and disconnect the mock
- // ServerConnectionManager test disconnected GetUnsyncedEntries here. It's a
- // regression for a very old bug.
-}
-
-TEST_F(SyncerTest, GetCommitIdsFiltersThrottledEntries) {
- const ModelTypeSet throttled_types(BOOKMARKS);
- sync_pb::EntitySpecifics bookmark_data;
- AddDefaultFieldValue(BOOKMARKS, &bookmark_data);
-
- mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10,
- foreign_cache_guid(), "-1");
- SyncShareNudge();
-
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(A.good());
- A.PutIsUnsynced(true);
- A.PutSpecifics(bookmark_data);
- A.PutNonUniqueName("bookmark");
- }
-
- // Now sync without enabling bookmarks.
- mock_server_->ExpectGetUpdatesRequestTypes(
- Difference(context_->enabled_types(), ModelTypeSet(BOOKMARKS)));
- ResetSession();
- syncer_->NormalSyncShare(
- Difference(context_->enabled_types(), ModelTypeSet(BOOKMARKS)),
- nudge_tracker_,
- session_.get());
-
- {
- // Nothing should have been committed as bookmarks is throttled.
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
- Entry entryA(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(entryA.good());
- EXPECT_TRUE(entryA.GetIsUnsynced());
- }
-
- // Sync again with bookmarks enabled.
- mock_server_->ExpectGetUpdatesRequestTypes(context_->enabled_types());
- SyncShareNudge();
- {
- // It should have been committed.
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
- Entry entryA(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(entryA.good());
- EXPECT_FALSE(entryA.GetIsUnsynced());
- }
-}
-
-// We use a macro so we can preserve the error location.
-#define VERIFY_ENTRY(id, is_unapplied, is_unsynced, prev_initialized, \
- parent_id, version, server_version, id_fac, rtrans) \
- do { \
- Entry entryA(rtrans, syncable::GET_BY_ID, id_fac.FromNumber(id)); \
- ASSERT_TRUE(entryA.good()); \
- /* We don't use EXPECT_EQ here because when the left side param is false,
- gcc 4.6 warns about converting 'false' to pointer type for argument 1. */ \
- EXPECT_TRUE(is_unsynced == entryA.GetIsUnsynced()); \
- EXPECT_TRUE(is_unapplied == entryA.GetIsUnappliedUpdate()); \
- EXPECT_TRUE(prev_initialized == \
- IsRealDataType(GetModelTypeFromSpecifics( \
- entryA.GetBaseServerSpecifics()))); \
- EXPECT_TRUE(parent_id == -1 || \
- entryA.GetParentId()== id_fac.FromNumber(parent_id)); \
- EXPECT_EQ(version, entryA.GetBaseVersion()); \
- EXPECT_EQ(server_version, entryA.GetServerVersion()); \
- } while (0)
-
-TEST_F(SyncerTest, GetCommitIdsFiltersUnreadyEntries) {
- KeyParams key_params = {"localhost", "dummy", "foobar"};
- KeyParams other_params = {"localhost", "dummy", "foobar2"};
- sync_pb::EntitySpecifics bookmark, encrypted_bookmark;
- bookmark.mutable_bookmark()->set_url("url");
- bookmark.mutable_bookmark()->set_title("title");
- AddDefaultFieldValue(BOOKMARKS, &encrypted_bookmark);
- mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10,
- foreign_cache_guid(), "-1");
- mock_server_->AddUpdateDirectory(2, 0, "B", 10, 10,
- foreign_cache_guid(), "-2");
- mock_server_->AddUpdateDirectory(3, 0, "C", 10, 10,
- foreign_cache_guid(), "-3");
- mock_server_->AddUpdateDirectory(4, 0, "D", 10, 10,
- foreign_cache_guid(), "-4");
- SyncShareNudge();
- // Server side change will put A in conflict.
- mock_server_->AddUpdateDirectory(1, 0, "A", 20, 20,
- foreign_cache_guid(), "-1");
- {
- // Mark bookmarks as encrypted and set the cryptographer to have pending
- // keys.
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- Cryptographer other_cryptographer(&encryptor_);
- other_cryptographer.AddKey(other_params);
- sync_pb::EntitySpecifics specifics;
- sync_pb::NigoriSpecifics* nigori = specifics.mutable_nigori();
- other_cryptographer.GetKeys(nigori->mutable_encryption_keybag());
- dir_maker_.encryption_handler()->EnableEncryptEverything();
- // Set up with an old passphrase, but have pending keys
- GetCryptographer(&wtrans)->AddKey(key_params);
- GetCryptographer(&wtrans)->Encrypt(bookmark,
- encrypted_bookmark.mutable_encrypted());
- GetCryptographer(&wtrans)->SetPendingKeys(nigori->encryption_keybag());
-
- // In conflict but properly encrypted.
- MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(A.good());
- A.PutIsUnsynced(true);
- A.PutSpecifics(encrypted_bookmark);
- A.PutNonUniqueName(kEncryptedString);
- // Not in conflict and properly encrypted.
- MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(B.good());
- B.PutIsUnsynced(true);
- B.PutSpecifics(encrypted_bookmark);
- B.PutNonUniqueName(kEncryptedString);
- // Unencrypted specifics.
- MutableEntry C(&wtrans, GET_BY_ID, ids_.FromNumber(3));
- ASSERT_TRUE(C.good());
- C.PutIsUnsynced(true);
- C.PutNonUniqueName(kEncryptedString);
- // Unencrypted non_unique_name.
- MutableEntry D(&wtrans, GET_BY_ID, ids_.FromNumber(4));
- ASSERT_TRUE(D.good());
- D.PutIsUnsynced(true);
- D.PutSpecifics(encrypted_bookmark);
- D.PutNonUniqueName("not encrypted");
- }
- SyncShareNudge();
- {
- // Nothing should have commited due to bookmarks being encrypted and
- // the cryptographer having pending keys. A would have been resolved
- // as a simple conflict, but still be unsynced until the next sync cycle.
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
- VERIFY_ENTRY(1, false, true, false, 0, 20, 20, ids_, &rtrans);
- VERIFY_ENTRY(2, false, true, false, 0, 10, 10, ids_, &rtrans);
- VERIFY_ENTRY(3, false, true, false, 0, 10, 10, ids_, &rtrans);
- VERIFY_ENTRY(4, false, true, false, 0, 10, 10, ids_, &rtrans);
-
- // Resolve the pending keys.
- GetCryptographer(&rtrans)->DecryptPendingKeys(other_params);
- }
- SyncShareNudge();
- {
- // All properly encrypted and non-conflicting items should commit. "A" was
- // conflicting, but last sync cycle resolved it as simple conflict, so on
- // this sync cycle it committed succesfullly.
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
- // Committed successfully.
- VERIFY_ENTRY(1, false, false, false, 0, 21, 21, ids_, &rtrans);
- // Committed successfully.
- VERIFY_ENTRY(2, false, false, false, 0, 11, 11, ids_, &rtrans);
- // Was not properly encrypted.
- VERIFY_ENTRY(3, false, true, false, 0, 10, 10, ids_, &rtrans);
- // Was not properly encrypted.
- VERIFY_ENTRY(4, false, true, false, 0, 10, 10, ids_, &rtrans);
- }
- {
- // Fix the remaining items.
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry C(&wtrans, GET_BY_ID, ids_.FromNumber(3));
- ASSERT_TRUE(C.good());
- C.PutSpecifics(encrypted_bookmark);
- C.PutNonUniqueName(kEncryptedString);
- MutableEntry D(&wtrans, GET_BY_ID, ids_.FromNumber(4));
- ASSERT_TRUE(D.good());
- D.PutSpecifics(encrypted_bookmark);
- D.PutNonUniqueName(kEncryptedString);
- }
- SyncShareNudge();
- {
- const StatusController& status_controller = session_->status_controller();
- // Expect success.
- EXPECT_EQ(status_controller.model_neutral_state().commit_result, SYNCER_OK);
- // None should be unsynced anymore.
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
- VERIFY_ENTRY(1, false, false, false, 0, 21, 21, ids_, &rtrans);
- VERIFY_ENTRY(2, false, false, false, 0, 11, 11, ids_, &rtrans);
- VERIFY_ENTRY(3, false, false, false, 0, 11, 11, ids_, &rtrans);
- VERIFY_ENTRY(4, false, false, false, 0, 11, 11, ids_, &rtrans);
- }
-}
-
-TEST_F(SyncerTest, EncryptionAwareConflicts) {
- KeyParams key_params = {"localhost", "dummy", "foobar"};
- Cryptographer other_cryptographer(&encryptor_);
- other_cryptographer.AddKey(key_params);
- sync_pb::EntitySpecifics bookmark, encrypted_bookmark, modified_bookmark;
- bookmark.mutable_bookmark()->set_title("title");
- other_cryptographer.Encrypt(bookmark,
- encrypted_bookmark.mutable_encrypted());
- AddDefaultFieldValue(BOOKMARKS, &encrypted_bookmark);
- modified_bookmark.mutable_bookmark()->set_title("title2");
- other_cryptographer.Encrypt(modified_bookmark,
- modified_bookmark.mutable_encrypted());
- sync_pb::EntitySpecifics pref, encrypted_pref, modified_pref;
- pref.mutable_preference()->set_name("name");
- AddDefaultFieldValue(PREFERENCES, &encrypted_pref);
- other_cryptographer.Encrypt(pref,
- encrypted_pref.mutable_encrypted());
- modified_pref.mutable_preference()->set_name("name2");
- other_cryptographer.Encrypt(modified_pref,
- modified_pref.mutable_encrypted());
- {
- // Mark bookmarks and preferences as encrypted and set the cryptographer to
- // have pending keys.
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- sync_pb::EntitySpecifics specifics;
- sync_pb::NigoriSpecifics* nigori = specifics.mutable_nigori();
- other_cryptographer.GetKeys(nigori->mutable_encryption_keybag());
- dir_maker_.encryption_handler()->EnableEncryptEverything();
- GetCryptographer(&wtrans)->SetPendingKeys(nigori->encryption_keybag());
- EXPECT_TRUE(GetCryptographer(&wtrans)->has_pending_keys());
- }
-
- // We need to remember the exact position of our local items, so we can
- // make updates that do not modify those positions.
- UniquePosition pos1;
- UniquePosition pos2;
- UniquePosition pos3;
-
- mock_server_->AddUpdateSpecifics(1, 0, "A", 10, 10, true, 0, bookmark,
- foreign_cache_guid(), "-1");
- mock_server_->AddUpdateSpecifics(2, 1, "B", 10, 10, false, 2, bookmark,
- foreign_cache_guid(), "-2");
- mock_server_->AddUpdateSpecifics(3, 1, "C", 10, 10, false, 1, bookmark,
- foreign_cache_guid(), "-3");
- mock_server_->AddUpdateSpecifics(4, 0, "D", 10, 10, false, 0, pref);
- SyncShareNudge();
- {
- // Initial state. Everything is normal.
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
- VERIFY_ENTRY(1, false, false, false, 0, 10, 10, ids_, &rtrans);
- VERIFY_ENTRY(2, false, false, false, 1, 10, 10, ids_, &rtrans);
- VERIFY_ENTRY(3, false, false, false, 1, 10, 10, ids_, &rtrans);
- VERIFY_ENTRY(4, false, false, false, 0, 10, 10, ids_, &rtrans);
-
- Entry entry1(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(entry1.GetUniquePosition().Equals(
- entry1.GetServerUniquePosition()));
- pos1 = entry1.GetUniquePosition();
- Entry entry2(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(2));
- pos2 = entry2.GetUniquePosition();
- Entry entry3(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(3));
- pos3 = entry3.GetUniquePosition();
- }
-
- // Server side encryption will not be applied due to undecryptable data.
- // At this point, BASE_SERVER_SPECIFICS should be filled for all four items.
- mock_server_->AddUpdateSpecifics(1, 0, kEncryptedString, 20, 20, true, 0,
- encrypted_bookmark,
- foreign_cache_guid(), "-1");
- mock_server_->AddUpdateSpecifics(2, 1, kEncryptedString, 20, 20, false, 2,
- encrypted_bookmark,
- foreign_cache_guid(), "-2");
- mock_server_->AddUpdateSpecifics(3, 1, kEncryptedString, 20, 20, false, 1,
- encrypted_bookmark,
- foreign_cache_guid(), "-3");
- mock_server_->AddUpdateSpecifics(4, 0, kEncryptedString, 20, 20, false, 0,
- encrypted_pref,
- foreign_cache_guid(), "-4");
- SyncShareNudge();
- {
- // All should be unapplied due to being undecryptable and have a valid
- // BASE_SERVER_SPECIFICS.
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
- VERIFY_ENTRY(1, true, false, true, 0, 10, 20, ids_, &rtrans);
- VERIFY_ENTRY(2, true, false, true, 1, 10, 20, ids_, &rtrans);
- VERIFY_ENTRY(3, true, false, true, 1, 10, 20, ids_, &rtrans);
- VERIFY_ENTRY(4, true, false, true, 0, 10, 20, ids_, &rtrans);
- }
-
- // Server side change that don't modify anything should not affect
- // BASE_SERVER_SPECIFICS (such as name changes and mtime changes).
- mock_server_->AddUpdateSpecifics(1, 0, kEncryptedString, 30, 30, true, 0,
- encrypted_bookmark,
- foreign_cache_guid(), "-1");
- mock_server_->AddUpdateSpecifics(2, 1, kEncryptedString, 30, 30, false, 2,
- encrypted_bookmark,
- foreign_cache_guid(), "-2");
- // Item 3 doesn't change.
- mock_server_->AddUpdateSpecifics(4, 0, kEncryptedString, 30, 30, false, 0,
- encrypted_pref,
- foreign_cache_guid(), "-4");
- SyncShareNudge();
- {
- // Items 1, 2, and 4 should have newer server versions, 3 remains the same.
- // All should remain unapplied due to be undecryptable.
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
- VERIFY_ENTRY(1, true, false, true, 0, 10, 30, ids_, &rtrans);
- VERIFY_ENTRY(2, true, false, true, 1, 10, 30, ids_, &rtrans);
- VERIFY_ENTRY(3, true, false, true, 1, 10, 20, ids_, &rtrans);
- VERIFY_ENTRY(4, true, false, true, 0, 10, 30, ids_, &rtrans);
- }
-
- // Positional changes, parent changes, and specifics changes should reset
- // BASE_SERVER_SPECIFICS.
- // Became unencrypted.
- mock_server_->AddUpdateSpecifics(1, 0, "A", 40, 40, true, 0, bookmark,
- foreign_cache_guid(), "-1");
- // Reordered to after item 2.
- mock_server_->AddUpdateSpecifics(3, 1, kEncryptedString, 30, 30, false, 3,
- encrypted_bookmark,
- foreign_cache_guid(), "-3");
- SyncShareNudge();
- {
- // Items 2 and 4 should be the only ones with BASE_SERVER_SPECIFICS set.
- // Items 1 is now unencrypted, so should have applied normally.
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
- VERIFY_ENTRY(1, false, false, false, 0, 40, 40, ids_, &rtrans);
- VERIFY_ENTRY(2, true, false, true, 1, 10, 30, ids_, &rtrans);
- VERIFY_ENTRY(3, true, false, false, 1, 10, 30, ids_, &rtrans);
- VERIFY_ENTRY(4, true, false, true, 0, 10, 30, ids_, &rtrans);
- }
-
- // Make local changes, which should remain unsynced for items 2, 3, 4.
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(A.good());
- A.PutSpecifics(modified_bookmark);
- A.PutNonUniqueName(kEncryptedString);
- A.PutIsUnsynced(true);
- MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(B.good());
- B.PutSpecifics(modified_bookmark);
- B.PutNonUniqueName(kEncryptedString);
- B.PutIsUnsynced(true);
- MutableEntry C(&wtrans, GET_BY_ID, ids_.FromNumber(3));
- ASSERT_TRUE(C.good());
- C.PutSpecifics(modified_bookmark);
- C.PutNonUniqueName(kEncryptedString);
- C.PutIsUnsynced(true);
- MutableEntry D(&wtrans, GET_BY_ID, ids_.FromNumber(4));
- ASSERT_TRUE(D.good());
- D.PutSpecifics(modified_pref);
- D.PutNonUniqueName(kEncryptedString);
- D.PutIsUnsynced(true);
- }
- SyncShareNudge();
- {
- // Item 1 remains unsynced due to there being pending keys.
- // Items 2, 3, 4 should remain unsynced since they were not up to date.
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
- VERIFY_ENTRY(1, false, true, false, 0, 40, 40, ids_, &rtrans);
- VERIFY_ENTRY(2, true, true, true, 1, 10, 30, ids_, &rtrans);
- VERIFY_ENTRY(3, true, true, false, 1, 10, 30, ids_, &rtrans);
- VERIFY_ENTRY(4, true, true, true, 0, 10, 30, ids_, &rtrans);
- }
-
- {
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
- // Resolve the pending keys.
- GetCryptographer(&rtrans)->DecryptPendingKeys(key_params);
- }
- // First cycle resolves conflicts, second cycle commits changes.
- SyncShareNudge();
- EXPECT_EQ(2, status().model_neutral_state().num_server_overwrites);
- EXPECT_EQ(1, status().model_neutral_state().num_local_overwrites);
- // We successfully commited item(s).
- EXPECT_EQ(status().model_neutral_state().commit_result, SYNCER_OK);
- SyncShareNudge();
-
- // Everything should be resolved now. The local changes should have
- // overwritten the server changes for 2 and 4, while the server changes
- // overwrote the local for entry 3.
- EXPECT_EQ(0, status().model_neutral_state().num_server_overwrites);
- EXPECT_EQ(0, status().model_neutral_state().num_local_overwrites);
- EXPECT_EQ(status().model_neutral_state().commit_result, SYNCER_OK);
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
- VERIFY_ENTRY(1, false, false, false, 0, 41, 41, ids_, &rtrans);
- VERIFY_ENTRY(2, false, false, false, 1, 31, 31, ids_, &rtrans);
- VERIFY_ENTRY(3, false, false, false, 1, 30, 30, ids_, &rtrans);
- VERIFY_ENTRY(4, false, false, false, 0, 31, 31, ids_, &rtrans);
-}
-
-#undef VERIFY_ENTRY
-
-TEST_F(SyncerTest, TestGetUnsyncedAndSimpleCommit) {
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry parent(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "Pete");
- ASSERT_TRUE(parent.good());
- parent.PutIsUnsynced(true);
- parent.PutIsDir(true);
- parent.PutSpecifics(DefaultBookmarkSpecifics());
- parent.PutBaseVersion(1);
- parent.PutId(parent_id_);
- MutableEntry child(&wtrans, CREATE, BOOKMARKS, parent_id_, "Pete");
- ASSERT_TRUE(child.good());
- child.PutId(child_id_);
- child.PutBaseVersion(1);
- WriteTestDataToEntry(&wtrans, &child);
- }
-
- SyncShareNudge();
- ASSERT_EQ(2u, mock_server_->committed_ids().size());
- // If this test starts failing, be aware other sort orders could be valid.
- EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
- EXPECT_TRUE(child_id_ == mock_server_->committed_ids()[1]);
- {
- syncable::ReadTransaction rt(FROM_HERE, directory());
- Entry entry(&rt, syncable::GET_BY_ID, child_id_);
- ASSERT_TRUE(entry.good());
- VerifyTestDataInEntry(&rt, &entry);
- }
-}
-
-TEST_F(SyncerTest, TestPurgeWhileUnsynced) {
- // Similar to above, but throw a purge operation into the mix. Bug 49278.
- syncable::Id pref_node_id = TestIdFactory::MakeServer("Tim");
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry parent(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "Pete");
- ASSERT_TRUE(parent.good());
- parent.PutIsUnsynced(true);
- parent.PutIsDir(true);
- parent.PutSpecifics(DefaultBookmarkSpecifics());
- parent.PutBaseVersion(1);
- parent.PutId(parent_id_);
- MutableEntry child(&wtrans, CREATE, BOOKMARKS, parent_id_, "Pete");
- ASSERT_TRUE(child.good());
- child.PutId(child_id_);
- child.PutBaseVersion(1);
- WriteTestDataToEntry(&wtrans, &child);
-
- MutableEntry parent2(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "Tim");
- ASSERT_TRUE(parent2.good());
- parent2.PutIsUnsynced(true);
- parent2.PutIsDir(true);
- parent2.PutSpecifics(DefaultPreferencesSpecifics());
- parent2.PutBaseVersion(1);
- parent2.PutId(pref_node_id);
- }
-
- directory()->PurgeEntriesWithTypeIn(ModelTypeSet(PREFERENCES),
- ModelTypeSet(),
- ModelTypeSet());
-
- SyncShareNudge();
- ASSERT_EQ(2U, mock_server_->committed_ids().size());
- // If this test starts failing, be aware other sort orders could be valid.
- EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
- EXPECT_TRUE(child_id_ == mock_server_->committed_ids()[1]);
- {
- syncable::ReadTransaction rt(FROM_HERE, directory());
- Entry entry(&rt, syncable::GET_BY_ID, child_id_);
- ASSERT_TRUE(entry.good());
- VerifyTestDataInEntry(&rt, &entry);
- }
- directory()->SaveChanges();
- {
- syncable::ReadTransaction rt(FROM_HERE, directory());
- Entry entry(&rt, syncable::GET_BY_ID, pref_node_id);
- ASSERT_FALSE(entry.good());
- }
-}
-
-TEST_F(SyncerTest, TestPurgeWhileUnapplied) {
- // Similar to above, but for unapplied items. Bug 49278.
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry parent(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "Pete");
- ASSERT_TRUE(parent.good());
- parent.PutIsUnappliedUpdate(true);
- parent.PutIsDir(true);
- parent.PutSpecifics(DefaultBookmarkSpecifics());
- parent.PutBaseVersion(1);
- parent.PutId(parent_id_);
- }
-
- directory()->PurgeEntriesWithTypeIn(ModelTypeSet(BOOKMARKS),
- ModelTypeSet(),
- ModelTypeSet());
-
- SyncShareNudge();
- directory()->SaveChanges();
- {
- syncable::ReadTransaction rt(FROM_HERE, directory());
- Entry entry(&rt, syncable::GET_BY_ID, parent_id_);
- ASSERT_FALSE(entry.good());
- }
-}
-
-TEST_F(SyncerTest, TestPurgeWithJournal) {
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry parent(&wtrans, syncable::CREATE, BOOKMARKS, wtrans.root_id(),
- "Pete");
- ASSERT_TRUE(parent.good());
- parent.PutIsDir(true);
- parent.PutSpecifics(DefaultBookmarkSpecifics());
- parent.PutBaseVersion(1);
- parent.PutId(parent_id_);
- MutableEntry child(&wtrans, syncable::CREATE, BOOKMARKS, parent_id_,
- "Pete");
- ASSERT_TRUE(child.good());
- child.PutId(child_id_);
- child.PutBaseVersion(1);
- WriteTestDataToEntry(&wtrans, &child);
-
- MutableEntry parent2(&wtrans, syncable::CREATE, PREFERENCES,
- wtrans.root_id(), "Tim");
- ASSERT_TRUE(parent2.good());
- parent2.PutIsDir(true);
- parent2.PutSpecifics(DefaultPreferencesSpecifics());
- parent2.PutBaseVersion(1);
- parent2.PutId(TestIdFactory::MakeServer("Tim"));
- }
-
- directory()->PurgeEntriesWithTypeIn(ModelTypeSet(PREFERENCES, BOOKMARKS),
- ModelTypeSet(BOOKMARKS),
- ModelTypeSet());
- {
- // Verify bookmark nodes are saved in delete journal but not preference
- // node.
- syncable::ReadTransaction rt(FROM_HERE, directory());
- syncable::DeleteJournal* delete_journal = directory()->delete_journal();
- EXPECT_EQ(2u, delete_journal->GetDeleteJournalSize(&rt));
- syncable::EntryKernelSet journal_entries;
- directory()->delete_journal()->GetDeleteJournals(&rt, BOOKMARKS,
- &journal_entries);
- EXPECT_EQ(parent_id_, (*journal_entries.begin())->ref(syncable::ID));
- EXPECT_EQ(child_id_, (*journal_entries.rbegin())->ref(syncable::ID));
- }
-}
-
-TEST_F(SyncerTest, TestCommitListOrderingTwoItemsTall) {
- CommitOrderingTest items[] = {
- {1, ids_.FromNumber(-1001), ids_.FromNumber(-1000)},
- {0, ids_.FromNumber(-1000), ids_.FromNumber(0)},
- CommitOrderingTest::MakeLastCommitItem(),
- };
- RunCommitOrderingTest(items);
-}
-
-TEST_F(SyncerTest, TestCommitListOrderingThreeItemsTall) {
- CommitOrderingTest items[] = {
- {1, ids_.FromNumber(-2001), ids_.FromNumber(-2000)},
- {0, ids_.FromNumber(-2000), ids_.FromNumber(0)},
- {2, ids_.FromNumber(-2002), ids_.FromNumber(-2001)},
- CommitOrderingTest::MakeLastCommitItem(),
- };
- RunCommitOrderingTest(items);
-}
-
-TEST_F(SyncerTest, TestCommitListOrderingFourItemsTall) {
- CommitOrderingTest items[] = {
- {3, ids_.FromNumber(-2003), ids_.FromNumber(-2002)},
- {1, ids_.FromNumber(-2001), ids_.FromNumber(-2000)},
- {0, ids_.FromNumber(-2000), ids_.FromNumber(0)},
- {2, ids_.FromNumber(-2002), ids_.FromNumber(-2001)},
- CommitOrderingTest::MakeLastCommitItem(),
- };
- RunCommitOrderingTest(items);
-}
-
-TEST_F(SyncerTest, TestCommitListOrderingThreeItemsTallLimitedSize) {
- context_->set_max_commit_batch_size(2);
- CommitOrderingTest items[] = {
- {1, ids_.FromNumber(-2001), ids_.FromNumber(-2000)},
- {0, ids_.FromNumber(-2000), ids_.FromNumber(0)},
- {2, ids_.FromNumber(-2002), ids_.FromNumber(-2001)},
- CommitOrderingTest::MakeLastCommitItem(),
- };
- RunCommitOrderingTest(items);
-}
-
-TEST_F(SyncerTest, TestCommitListOrderingSingleDeletedItem) {
- CommitOrderingTest items[] = {
- {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED}},
- CommitOrderingTest::MakeLastCommitItem(),
- };
- RunCommitOrderingTest(items);
-}
-
-TEST_F(SyncerTest, TestCommitListOrderingSingleUncommittedDeletedItem) {
- CommitOrderingTest items[] = {
- {-1, ids_.FromNumber(-1000), ids_.FromNumber(0), {DELETED}},
- CommitOrderingTest::MakeLastCommitItem(),
- };
- RunCommitOrderingTest(items);
-}
-
-TEST_F(SyncerTest, TestCommitListOrderingSingleDeletedItemWithUnroll) {
- CommitOrderingTest items[] = {
- {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED}},
- CommitOrderingTest::MakeLastCommitItem(),
- };
- RunCommitOrderingTest(items);
-}
-
-TEST_F(SyncerTest,
- TestCommitListOrderingSingleLongDeletedItemWithUnroll) {
- CommitOrderingTest items[] = {
- {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
- CommitOrderingTest::MakeLastCommitItem(),
- };
- RunCommitOrderingTest(items);
-}
-
-TEST_F(SyncerTest, TestCommitListOrderingTwoLongDeletedItemWithUnroll) {
- CommitOrderingTest items[] = {
- {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
- {-1, ids_.FromNumber(1001), ids_.FromNumber(1000), {DELETED, OLD_MTIME}},
- CommitOrderingTest::MakeLastCommitItem(),
- };
- RunCommitOrderingTest(items);
-}
-
-TEST_F(SyncerTest, TestCommitListOrdering3LongDeletedItemsWithSizeLimit) {
- context_->set_max_commit_batch_size(2);
- CommitOrderingTest items[] = {
- {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
- {1, ids_.FromNumber(1001), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
- {2, ids_.FromNumber(1002), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
- CommitOrderingTest::MakeLastCommitItem(),
- };
- RunCommitOrderingTest(items);
-}
-
-TEST_F(SyncerTest, TestCommitListOrderingTwoDeletedItemsWithUnroll) {
- CommitOrderingTest items[] = {
- {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED}},
- {-1, ids_.FromNumber(1001), ids_.FromNumber(1000), {DELETED}},
- CommitOrderingTest::MakeLastCommitItem(),
- };
- RunCommitOrderingTest(items);
-}
-
-TEST_F(SyncerTest, TestCommitListOrderingComplexDeletionScenario) {
- CommitOrderingTest items[] = {
- { 0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
- {-1, ids_.FromNumber(1001), ids_.FromNumber(0), {SYNCED}},
- {1, ids_.FromNumber(1002), ids_.FromNumber(1001), {DELETED, OLD_MTIME}},
- {-1, ids_.FromNumber(1003), ids_.FromNumber(1001), {SYNCED}},
- {2, ids_.FromNumber(1004), ids_.FromNumber(1003), {DELETED}},
- CommitOrderingTest::MakeLastCommitItem(),
- };
- RunCommitOrderingTest(items);
-}
-
-TEST_F(SyncerTest,
- TestCommitListOrderingComplexDeletionScenarioWith2RecentDeletes) {
- CommitOrderingTest items[] = {
- { 0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
- {-1, ids_.FromNumber(1001), ids_.FromNumber(0), {SYNCED}},
- {1, ids_.FromNumber(1002), ids_.FromNumber(1001), {DELETED, OLD_MTIME}},
- {-1, ids_.FromNumber(1003), ids_.FromNumber(1001), {SYNCED}},
- {2, ids_.FromNumber(1004), ids_.FromNumber(1003), {DELETED}},
- {3, ids_.FromNumber(1005), ids_.FromNumber(1003), {DELETED}},
- CommitOrderingTest::MakeLastCommitItem(),
- };
- RunCommitOrderingTest(items);
-}
-
-TEST_F(SyncerTest, TestCommitListOrderingDeleteMovedItems) {
- CommitOrderingTest items[] = {
- {1, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
- {0, ids_.FromNumber(1001), ids_.FromNumber(1000), {DELETED, OLD_MTIME,
- MOVED_FROM_ROOT}},
- CommitOrderingTest::MakeLastCommitItem(),
- };
- RunCommitOrderingTest(items);
-}
-
-TEST_F(SyncerTest, TestCommitListOrderingWithNesting) {
- const base::Time& now_minus_2h =
- base::Time::Now() - base::TimeDelta::FromHours(2);
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- {
- MutableEntry parent(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "Bob");
- ASSERT_TRUE(parent.good());
- parent.PutIsUnsynced(true);
- parent.PutIsDir(true);
- parent.PutSpecifics(DefaultBookmarkSpecifics());
- parent.PutId(ids_.FromNumber(100));
- parent.PutBaseVersion(1);
- MutableEntry child(
- &wtrans, CREATE, BOOKMARKS, ids_.FromNumber(100), "Bob");
- ASSERT_TRUE(child.good());
- child.PutIsUnsynced(true);
- child.PutIsDir(true);
- child.PutSpecifics(DefaultBookmarkSpecifics());
- child.PutId(ids_.FromNumber(101));
- child.PutBaseVersion(1);
- MutableEntry grandchild(
- &wtrans, CREATE, BOOKMARKS, ids_.FromNumber(101), "Bob");
- ASSERT_TRUE(grandchild.good());
- grandchild.PutId(ids_.FromNumber(102));
- grandchild.PutIsUnsynced(true);
- grandchild.PutSpecifics(DefaultBookmarkSpecifics());
- grandchild.PutBaseVersion(1);
- }
- {
- // Create three deleted items which deletions we expect to be sent to the
- // server.
- MutableEntry parent(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "Pete");
- ASSERT_TRUE(parent.good());
- parent.PutId(ids_.FromNumber(103));
- parent.PutIsUnsynced(true);
- parent.PutIsDir(true);
- parent.PutSpecifics(DefaultBookmarkSpecifics());
- parent.PutIsDel(true);
- parent.PutBaseVersion(1);
- parent.PutMtime(now_minus_2h);
- MutableEntry child(
- &wtrans, CREATE, BOOKMARKS, ids_.FromNumber(103), "Pete");
- ASSERT_TRUE(child.good());
- child.PutId(ids_.FromNumber(104));
- child.PutIsUnsynced(true);
- child.PutIsDir(true);
- child.PutSpecifics(DefaultBookmarkSpecifics());
- child.PutIsDel(true);
- child.PutBaseVersion(1);
- child.PutMtime(now_minus_2h);
- MutableEntry grandchild(
- &wtrans, CREATE, BOOKMARKS, ids_.FromNumber(104), "Pete");
- ASSERT_TRUE(grandchild.good());
- grandchild.PutId(ids_.FromNumber(105));
- grandchild.PutIsUnsynced(true);
- grandchild.PutIsDel(true);
- grandchild.PutIsDir(false);
- grandchild.PutSpecifics(DefaultBookmarkSpecifics());
- grandchild.PutBaseVersion(1);
- grandchild.PutMtime(now_minus_2h);
- }
- }
-
- SyncShareNudge();
- ASSERT_EQ(6u, mock_server_->committed_ids().size());
- // This test will NOT unroll deletes because SERVER_PARENT_ID is not set.
- // It will treat these like moves.
- vector<syncable::Id> commit_ids(mock_server_->committed_ids());
- EXPECT_TRUE(ids_.FromNumber(100) == commit_ids[0]);
- EXPECT_TRUE(ids_.FromNumber(101) == commit_ids[1]);
- EXPECT_TRUE(ids_.FromNumber(102) == commit_ids[2]);
- // We don't guarantee the delete orders in this test, only that they occur
- // at the end.
- std::sort(commit_ids.begin() + 3, commit_ids.end());
- EXPECT_TRUE(ids_.FromNumber(103) == commit_ids[3]);
- EXPECT_TRUE(ids_.FromNumber(104) == commit_ids[4]);
- EXPECT_TRUE(ids_.FromNumber(105) == commit_ids[5]);
-}
-
-TEST_F(SyncerTest, TestCommitListOrderingWithNewItems) {
- syncable::Id parent1_id = ids_.MakeServer("p1");
- syncable::Id parent2_id = ids_.MakeServer("p2");
-
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry parent(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "1");
- ASSERT_TRUE(parent.good());
- parent.PutIsUnsynced(true);
- parent.PutIsDir(true);
- parent.PutSpecifics(DefaultBookmarkSpecifics());
- parent.PutId(parent1_id);
- MutableEntry child(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "2");
- ASSERT_TRUE(child.good());
- child.PutIsUnsynced(true);
- child.PutIsDir(true);
- child.PutSpecifics(DefaultBookmarkSpecifics());
- child.PutId(parent2_id);
- parent.PutBaseVersion(1);
- child.PutBaseVersion(1);
- }
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry parent(&wtrans, CREATE, BOOKMARKS, parent1_id, "A");
- ASSERT_TRUE(parent.good());
- parent.PutIsUnsynced(true);
- parent.PutIsDir(true);
- parent.PutSpecifics(DefaultBookmarkSpecifics());
- parent.PutId(ids_.FromNumber(102));
- MutableEntry child(&wtrans, CREATE, BOOKMARKS, parent1_id, "B");
- ASSERT_TRUE(child.good());
- child.PutIsUnsynced(true);
- child.PutIsDir(true);
- child.PutSpecifics(DefaultBookmarkSpecifics());
- child.PutId(ids_.FromNumber(-103));
- parent.PutBaseVersion(1);
- }
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry parent(&wtrans, CREATE, BOOKMARKS, parent2_id, "A");
- ASSERT_TRUE(parent.good());
- parent.PutIsUnsynced(true);
- parent.PutIsDir(true);
- parent.PutSpecifics(DefaultBookmarkSpecifics());
- parent.PutId(ids_.FromNumber(-104));
- MutableEntry child(&wtrans, CREATE, BOOKMARKS, parent2_id, "B");
- ASSERT_TRUE(child.good());
- child.PutIsUnsynced(true);
- child.PutIsDir(true);
- child.PutSpecifics(DefaultBookmarkSpecifics());
- child.PutId(ids_.FromNumber(105));
- child.PutBaseVersion(1);
- }
-
- SyncShareNudge();
- ASSERT_EQ(6u, mock_server_->committed_ids().size());
-
- // This strange iteration and std::count() usage is to allow the order to
- // vary. All we really care about is that parent1_id and parent2_id are the
- // first two IDs, and that the children make up the next four. Other than
- // that, ordering doesn't matter.
-
- vector<syncable::Id>::const_iterator i =
- mock_server_->committed_ids().begin();
- vector<syncable::Id>::const_iterator parents_begin = i;
- i++;
- i++;
- vector<syncable::Id>::const_iterator parents_end = i;
- vector<syncable::Id>::const_iterator children_begin = i;
- vector<syncable::Id>::const_iterator children_end =
- mock_server_->committed_ids().end();
-
- EXPECT_EQ(1, count(parents_begin, parents_end, parent1_id));
- EXPECT_EQ(1, count(parents_begin, parents_end, parent2_id));
-
- EXPECT_EQ(1, count(children_begin, children_end, ids_.FromNumber(-103)));
- EXPECT_EQ(1, count(children_begin, children_end, ids_.FromNumber(102)));
- EXPECT_EQ(1, count(children_begin, children_end, ids_.FromNumber(105)));
- EXPECT_EQ(1, count(children_begin, children_end, ids_.FromNumber(-104)));
-}
-
-TEST_F(SyncerTest, TestCommitListOrderingCounterexample) {
- syncable::Id child2_id = ids_.NewServerId();
-
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry parent(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "P");
- ASSERT_TRUE(parent.good());
- parent.PutIsUnsynced(true);
- parent.PutIsDir(true);
- parent.PutSpecifics(DefaultBookmarkSpecifics());
- parent.PutId(parent_id_);
- MutableEntry child1(&wtrans, CREATE, BOOKMARKS, parent_id_, "1");
- ASSERT_TRUE(child1.good());
- child1.PutIsUnsynced(true);
- child1.PutId(child_id_);
- child1.PutSpecifics(DefaultBookmarkSpecifics());
- MutableEntry child2(&wtrans, CREATE, BOOKMARKS, parent_id_, "2");
- ASSERT_TRUE(child2.good());
- child2.PutIsUnsynced(true);
- child2.PutSpecifics(DefaultBookmarkSpecifics());
- child2.PutId(child2_id);
-
- parent.PutBaseVersion(1);
- child1.PutBaseVersion(1);
- child2.PutBaseVersion(1);
- }
-
- SyncShareNudge();
- ASSERT_EQ(3u, mock_server_->committed_ids().size());
- EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
- // There are two possible valid orderings.
- if (child2_id == mock_server_->committed_ids()[1]) {
- EXPECT_TRUE(child2_id == mock_server_->committed_ids()[1]);
- EXPECT_TRUE(child_id_ == mock_server_->committed_ids()[2]);
- } else {
- EXPECT_TRUE(child_id_ == mock_server_->committed_ids()[1]);
- EXPECT_TRUE(child2_id == mock_server_->committed_ids()[2]);
- }
-}
-
-TEST_F(SyncerTest, TestCommitListOrderingAndNewParent) {
- string parent1_name = "1";
- string parent2_name = "A";
- string child_name = "B";
-
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry parent(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(),
- parent1_name);
- ASSERT_TRUE(parent.good());
- parent.PutIsUnsynced(true);
- parent.PutIsDir(true);
- parent.PutSpecifics(DefaultBookmarkSpecifics());
- parent.PutId(parent_id_);
- parent.PutBaseVersion(1);
- }
-
- syncable::Id parent2_id = ids_.NewLocalId();
- syncable::Id child_id = ids_.NewServerId();
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry parent2(
- &wtrans, CREATE, BOOKMARKS, parent_id_, parent2_name);
- ASSERT_TRUE(parent2.good());
- parent2.PutIsUnsynced(true);
- parent2.PutIsDir(true);
- parent2.PutSpecifics(DefaultBookmarkSpecifics());
- parent2.PutId(parent2_id);
-
- MutableEntry child(
- &wtrans, CREATE, BOOKMARKS, parent2_id, child_name);
- ASSERT_TRUE(child.good());
- child.PutIsUnsynced(true);
- child.PutIsDir(true);
- child.PutSpecifics(DefaultBookmarkSpecifics());
- child.PutId(child_id);
- child.PutBaseVersion(1);
- }
-
- SyncShareNudge();
- ASSERT_EQ(3u, mock_server_->committed_ids().size());
- // If this test starts failing, be aware other sort orders could be valid.
- EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
- EXPECT_TRUE(parent2_id == mock_server_->committed_ids()[1]);
- EXPECT_TRUE(child_id == mock_server_->committed_ids()[2]);
- {
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
- // Check that things committed correctly.
- Entry entry_1(&rtrans, syncable::GET_BY_ID, parent_id_);
- EXPECT_EQ(entry_1.GetNonUniqueName(), parent1_name);
- // Check that parent2 is a subfolder of parent1.
- EXPECT_EQ(1, CountEntriesWithName(&rtrans,
- parent_id_,
- parent2_name));
-
- // Parent2 was a local ID and thus should have changed on commit!
- Entry pre_commit_entry_parent2(&rtrans, syncable::GET_BY_ID, parent2_id);
- ASSERT_FALSE(pre_commit_entry_parent2.good());
-
- // Look up the new ID.
- Id parent2_committed_id =
- GetOnlyEntryWithName(&rtrans, parent_id_, parent2_name);
- EXPECT_TRUE(parent2_committed_id.ServerKnows());
-
- Entry child(&rtrans, syncable::GET_BY_ID, child_id);
- EXPECT_EQ(parent2_committed_id, child.GetParentId());
- }
-}
-
-TEST_F(SyncerTest, TestCommitListOrderingAndNewParentAndChild) {
- string parent_name = "1";
- string parent2_name = "A";
- string child_name = "B";
-
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry parent(&wtrans,
- CREATE, BOOKMARKS,
- wtrans.root_id(),
- parent_name);
- ASSERT_TRUE(parent.good());
- parent.PutIsUnsynced(true);
- parent.PutIsDir(true);
- parent.PutSpecifics(DefaultBookmarkSpecifics());
- parent.PutId(parent_id_);
- parent.PutBaseVersion(1);
- }
-
- int64 meta_handle_b;
- const Id parent2_local_id = ids_.NewLocalId();
- const Id child_local_id = ids_.NewLocalId();
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry parent2(&wtrans, CREATE, BOOKMARKS, parent_id_, parent2_name);
- ASSERT_TRUE(parent2.good());
- parent2.PutIsUnsynced(true);
- parent2.PutIsDir(true);
- parent2.PutSpecifics(DefaultBookmarkSpecifics());
-
- parent2.PutId(parent2_local_id);
- MutableEntry child(
- &wtrans, CREATE, BOOKMARKS, parent2_local_id, child_name);
- ASSERT_TRUE(child.good());
- child.PutIsUnsynced(true);
- child.PutIsDir(true);
- child.PutSpecifics(DefaultBookmarkSpecifics());
- child.PutId(child_local_id);
- meta_handle_b = child.GetMetahandle();
- }
-
- SyncShareNudge();
- ASSERT_EQ(3u, mock_server_->committed_ids().size());
- // If this test starts failing, be aware other sort orders could be valid.
- EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
- EXPECT_TRUE(parent2_local_id == mock_server_->committed_ids()[1]);
- EXPECT_TRUE(child_local_id == mock_server_->committed_ids()[2]);
- {
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
-
- Entry parent(&rtrans, syncable::GET_BY_ID,
- GetOnlyEntryWithName(&rtrans, rtrans.root_id(), parent_name));
- ASSERT_TRUE(parent.good());
- EXPECT_TRUE(parent.GetId().ServerKnows());
-
- Entry parent2(&rtrans, syncable::GET_BY_ID,
- GetOnlyEntryWithName(&rtrans, parent.GetId(), parent2_name));
- ASSERT_TRUE(parent2.good());
- EXPECT_TRUE(parent2.GetId().ServerKnows());
-
- // Id changed on commit, so this should fail.
- Entry local_parent2_id_entry(&rtrans,
- syncable::GET_BY_ID,
- parent2_local_id);
- ASSERT_FALSE(local_parent2_id_entry.good());
-
- Entry entry_b(&rtrans, syncable::GET_BY_HANDLE, meta_handle_b);
- EXPECT_TRUE(entry_b.GetId().ServerKnows());
- EXPECT_TRUE(parent2.GetId()== entry_b.GetParentId());
- }
-}
-
-TEST_F(SyncerTest, UpdateWithZeroLengthName) {
- // One illegal update
- mock_server_->AddUpdateDirectory(
- 1, 0, std::string(), 1, 10, foreign_cache_guid(), "-1");
- // And one legal one that we're going to delete.
- mock_server_->AddUpdateDirectory(2, 0, "FOO", 1, 10,
- foreign_cache_guid(), "-2");
- SyncShareNudge();
- // Delete the legal one. The new update has a null name.
- mock_server_->AddUpdateDirectory(
- 2, 0, std::string(), 2, 20, foreign_cache_guid(), "-2");
- mock_server_->SetLastUpdateDeleted();
- SyncShareNudge();
-}
-
-TEST_F(SyncerTest, TestBasicUpdate) {
- string id = "some_id";
- string parent_id = "0";
- string name = "in_root";
- int64 version = 10;
- int64 timestamp = 10;
- mock_server_->AddUpdateDirectory(id, parent_id, name, version, timestamp,
- foreign_cache_guid(), "-1");
-
- SyncShareNudge();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- Entry entry(&trans, GET_BY_ID,
- syncable::Id::CreateFromServerId("some_id"));
- ASSERT_TRUE(entry.good());
- EXPECT_TRUE(entry.GetIsDir());
- EXPECT_TRUE(entry.GetServerVersion()== version);
- EXPECT_TRUE(entry.GetBaseVersion()== version);
- EXPECT_FALSE(entry.GetIsUnappliedUpdate());
- EXPECT_FALSE(entry.GetIsUnsynced());
- EXPECT_FALSE(entry.GetServerIsDel());
- EXPECT_FALSE(entry.GetIsDel());
- }
-}
-
-TEST_F(SyncerTest, IllegalAndLegalUpdates) {
- Id root = TestIdFactory::root();
- // Should apply just fine.
- mock_server_->AddUpdateDirectory(1, 0, "in_root", 10, 10,
- foreign_cache_guid(), "-1");
-
- // Same name. But this SHOULD work.
- mock_server_->AddUpdateDirectory(2, 0, "in_root", 10, 10,
- foreign_cache_guid(), "-2");
-
- // Unknown parent: should never be applied. "-80" is a legal server ID,
- // because any string sent by the server is a legal server ID in the sync
- // protocol, but it's not the ID of any item known to the client. This
- // update should succeed validation, but be stuck in the unapplied state
- // until an item with the server ID "-80" arrives.
- mock_server_->AddUpdateDirectory(3, -80, "bad_parent", 10, 10,
- foreign_cache_guid(), "-3");
-
- SyncShareNudge();
-
- // Id 3 should be in conflict now.
- EXPECT_EQ(1, status().TotalNumConflictingItems());
- EXPECT_EQ(1, status().num_hierarchy_conflicts());
-
- // The only request in that loop should have been a GetUpdate.
- // At that point, we didn't know whether or not we had conflicts.
- ASSERT_TRUE(mock_server_->last_request().has_get_updates());
- VerifyHierarchyConflictsUnspecified(mock_server_->last_request());
-
- // These entries will be used in the second set of updates.
- mock_server_->AddUpdateDirectory(4, 0, "newer_version", 20, 10,
- foreign_cache_guid(), "-4");
- mock_server_->AddUpdateDirectory(5, 0, "circular1", 10, 10,
- foreign_cache_guid(), "-5");
- mock_server_->AddUpdateDirectory(6, 5, "circular2", 10, 10,
- foreign_cache_guid(), "-6");
- mock_server_->AddUpdateDirectory(9, 3, "bad_parent_child", 10, 10,
- foreign_cache_guid(), "-9");
- mock_server_->AddUpdateDirectory(100, 9, "bad_parent_child2", 10, 10,
- foreign_cache_guid(), "-100");
- mock_server_->AddUpdateDirectory(10, 0, "dir_to_bookmark", 10, 10,
- foreign_cache_guid(), "-10");
-
- SyncShareNudge();
- // The three items with an unresolved parent should be unapplied (3, 9, 100).
- // The name clash should also still be in conflict.
- EXPECT_EQ(3, status().TotalNumConflictingItems());
- EXPECT_EQ(3, status().num_hierarchy_conflicts());
-
- // This time around, we knew that there were conflicts.
- ASSERT_TRUE(mock_server_->last_request().has_get_updates());
- VerifyHierarchyConflictsReported(mock_server_->last_request());
-
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- // Even though it has the same name, it should work.
- Entry name_clash(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(name_clash.good());
- EXPECT_FALSE(name_clash.GetIsUnappliedUpdate())
- << "Duplicate name SHOULD be OK.";
-
- Entry bad_parent(&trans, GET_BY_ID, ids_.FromNumber(3));
- ASSERT_TRUE(bad_parent.good());
- EXPECT_TRUE(bad_parent.GetIsUnappliedUpdate())
- << "child of unknown parent should be in conflict";
-
- Entry bad_parent_child(&trans, GET_BY_ID, ids_.FromNumber(9));
- ASSERT_TRUE(bad_parent_child.good());
- EXPECT_TRUE(bad_parent_child.GetIsUnappliedUpdate())
- << "grandchild of unknown parent should be in conflict";
-
- Entry bad_parent_child2(&trans, GET_BY_ID, ids_.FromNumber(100));
- ASSERT_TRUE(bad_parent_child2.good());
- EXPECT_TRUE(bad_parent_child2.GetIsUnappliedUpdate())
- << "great-grandchild of unknown parent should be in conflict";
- }
-
- // Updating 1 should not affect item 2 of the same name.
- mock_server_->AddUpdateDirectory(1, 0, "new_name", 20, 20,
- foreign_cache_guid(), "-1");
-
- // Moving 5 under 6 will create a cycle: a conflict.
- mock_server_->AddUpdateDirectory(5, 6, "circular3", 20, 20,
- foreign_cache_guid(), "-5");
-
- // Flip the is_dir bit: should fail verify & be dropped.
- mock_server_->AddUpdateBookmark(10, 0, "dir_to_bookmark", 20, 20,
- foreign_cache_guid(), "-10");
- SyncShareNudge();
-
- // Version number older than last known: should fail verify & be dropped.
- mock_server_->AddUpdateDirectory(4, 0, "old_version", 10, 10,
- foreign_cache_guid(), "-4");
- SyncShareNudge();
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- Entry still_a_dir(&trans, GET_BY_ID, ids_.FromNumber(10));
- ASSERT_TRUE(still_a_dir.good());
- EXPECT_FALSE(still_a_dir.GetIsUnappliedUpdate());
- EXPECT_EQ(10u, still_a_dir.GetBaseVersion());
- EXPECT_EQ(10u, still_a_dir.GetServerVersion());
- EXPECT_TRUE(still_a_dir.GetIsDir());
-
- Entry rename(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(rename.good());
- EXPECT_EQ(root, rename.GetParentId());
- EXPECT_EQ("new_name", rename.GetNonUniqueName());
- EXPECT_FALSE(rename.GetIsUnappliedUpdate());
- EXPECT_TRUE(ids_.FromNumber(1) == rename.GetId());
- EXPECT_EQ(20u, rename.GetBaseVersion());
-
- Entry name_clash(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(name_clash.good());
- EXPECT_EQ(root, name_clash.GetParentId());
- EXPECT_TRUE(ids_.FromNumber(2) == name_clash.GetId());
- EXPECT_EQ(10u, name_clash.GetBaseVersion());
- EXPECT_EQ("in_root", name_clash.GetNonUniqueName());
-
- Entry ignored_old_version(&trans, GET_BY_ID, ids_.FromNumber(4));
- ASSERT_TRUE(ignored_old_version.good());
- EXPECT_TRUE(
- ignored_old_version.GetNonUniqueName()== "newer_version");
- EXPECT_FALSE(ignored_old_version.GetIsUnappliedUpdate());
- EXPECT_EQ(20u, ignored_old_version.GetBaseVersion());
-
- Entry circular_parent_issue(&trans, GET_BY_ID, ids_.FromNumber(5));
- ASSERT_TRUE(circular_parent_issue.good());
- EXPECT_TRUE(circular_parent_issue.GetIsUnappliedUpdate())
- << "circular move should be in conflict";
- EXPECT_TRUE(circular_parent_issue.GetParentId()== root_id_);
- EXPECT_TRUE(circular_parent_issue.GetServerParentId()==
- ids_.FromNumber(6));
- EXPECT_EQ(10u, circular_parent_issue.GetBaseVersion());
-
- Entry circular_parent_target(&trans, GET_BY_ID, ids_.FromNumber(6));
- ASSERT_TRUE(circular_parent_target.good());
- EXPECT_FALSE(circular_parent_target.GetIsUnappliedUpdate());
- EXPECT_TRUE(circular_parent_issue.GetId()==
- circular_parent_target.GetParentId());
- EXPECT_EQ(10u, circular_parent_target.GetBaseVersion());
- }
-
- EXPECT_FALSE(saw_syncer_event_);
- EXPECT_EQ(4, status().TotalNumConflictingItems());
- EXPECT_EQ(4, status().num_hierarchy_conflicts());
-}
-
-// A commit with a lost response produces an update that has to be reunited with
-// its parent.
-TEST_F(SyncerTest, CommitReuniteUpdateAdjustsChildren) {
- // Create a folder in the root.
- int64 metahandle_folder;
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(
- &trans, CREATE, BOOKMARKS, trans.root_id(), "new_folder");
- ASSERT_TRUE(entry.good());
- entry.PutIsDir(true);
- entry.PutSpecifics(DefaultBookmarkSpecifics());
- entry.PutIsUnsynced(true);
- metahandle_folder = entry.GetMetahandle();
- }
-
- // Verify it and pull the ID out of the folder.
- syncable::Id folder_id;
- int64 metahandle_entry;
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_HANDLE, metahandle_folder);
- ASSERT_TRUE(entry.good());
- folder_id = entry.GetId();
- ASSERT_TRUE(!folder_id.ServerKnows());
- }
-
- // Create an entry in the newly created folder.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&trans, CREATE, BOOKMARKS, folder_id, "new_entry");
- ASSERT_TRUE(entry.good());
- metahandle_entry = entry.GetMetahandle();
- WriteTestDataToEntry(&trans, &entry);
- }
-
- // Verify it and pull the ID out of the entry.
- syncable::Id entry_id;
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, syncable::GET_BY_HANDLE, metahandle_entry);
- ASSERT_TRUE(entry.good());
- EXPECT_EQ(folder_id, entry.GetParentId());
- EXPECT_EQ("new_entry", entry.GetNonUniqueName());
- entry_id = entry.GetId();
- EXPECT_TRUE(!entry_id.ServerKnows());
- VerifyTestDataInEntry(&trans, &entry);
- }
-
- // Now, to emulate a commit response failure, we just don't commit it.
- int64 new_version = 150; // any larger value.
- int64 timestamp = 20; // arbitrary value.
- syncable::Id new_folder_id =
- syncable::Id::CreateFromServerId("folder_server_id");
-
- // The following update should cause the folder to both apply the update, as
- // well as reassociate the id.
- mock_server_->AddUpdateDirectory(new_folder_id, root_id_,
- "new_folder", new_version, timestamp,
- local_cache_guid(), folder_id.GetServerId());
-
- // We don't want it accidentally committed, just the update applied.
- mock_server_->set_conflict_all_commits(true);
-
- // Alright! Apply that update!
- SyncShareNudge();
- {
- // The folder's ID should have been updated.
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry folder(&trans, GET_BY_HANDLE, metahandle_folder);
- ASSERT_TRUE(folder.good());
- EXPECT_EQ("new_folder", folder.GetNonUniqueName());
- EXPECT_TRUE(new_version == folder.GetBaseVersion());
- EXPECT_TRUE(new_folder_id == folder.GetId());
- EXPECT_TRUE(folder.GetId().ServerKnows());
- EXPECT_EQ(trans.root_id(), folder.GetParentId());
-
- // Since it was updated, the old folder should not exist.
- Entry old_dead_folder(&trans, GET_BY_ID, folder_id);
- EXPECT_FALSE(old_dead_folder.good());
-
- // The child's parent should have changed.
- Entry entry(&trans, syncable::GET_BY_HANDLE, metahandle_entry);
- ASSERT_TRUE(entry.good());
- EXPECT_EQ("new_entry", entry.GetNonUniqueName());
- EXPECT_EQ(new_folder_id, entry.GetParentId());
- EXPECT_TRUE(!entry.GetId().ServerKnows());
- VerifyTestDataInEntry(&trans, &entry);
- }
-}
-
-// A commit with a lost response produces an update that has to be reunited with
-// its parent.
-TEST_F(SyncerTest, CommitReuniteUpdate) {
- // Create an entry in the root.
- int64 entry_metahandle;
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&trans, CREATE, BOOKMARKS, trans.root_id(), "new_entry");
- ASSERT_TRUE(entry.good());
- entry_metahandle = entry.GetMetahandle();
- WriteTestDataToEntry(&trans, &entry);
- }
-
- // Verify it and pull the ID out.
- syncable::Id entry_id;
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- Entry entry(&trans, GET_BY_HANDLE, entry_metahandle);
- ASSERT_TRUE(entry.good());
- entry_id = entry.GetId();
- EXPECT_TRUE(!entry_id.ServerKnows());
- VerifyTestDataInEntry(&trans, &entry);
- }
-
- // Now, to emulate a commit response failure, we just don't commit it.
- int64 new_version = 150; // any larger value.
- int64 timestamp = 20; // arbitrary value.
- syncable::Id new_entry_id = syncable::Id::CreateFromServerId("server_id");
-
- // Generate an update from the server with a relevant ID reassignment.
- mock_server_->AddUpdateBookmark(new_entry_id, root_id_,
- "new_entry", new_version, timestamp,
- local_cache_guid(), entry_id.GetServerId());
-
- // We don't want it accidentally committed, just the update applied.
- mock_server_->set_conflict_all_commits(true);
-
- // Alright! Apply that update!
- SyncShareNudge();
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_HANDLE, entry_metahandle);
- ASSERT_TRUE(entry.good());
- EXPECT_TRUE(new_version == entry.GetBaseVersion());
- EXPECT_TRUE(new_entry_id == entry.GetId());
- EXPECT_EQ("new_entry", entry.GetNonUniqueName());
- }
-}
-
-// A commit with a lost response must work even if the local entry was deleted
-// before the update is applied. We should not duplicate the local entry in
-// this case, but just create another one alongside. We may wish to examine
-// this behavior in the future as it can create hanging uploads that never
-// finish, that must be cleaned up on the server side after some time.
-TEST_F(SyncerTest, CommitReuniteUpdateDoesNotChokeOnDeletedLocalEntry) {
- // Create a entry in the root.
- int64 entry_metahandle;
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&trans, CREATE, BOOKMARKS, trans.root_id(), "new_entry");
- ASSERT_TRUE(entry.good());
- entry_metahandle = entry.GetMetahandle();
- WriteTestDataToEntry(&trans, &entry);
- }
- // Verify it and pull the ID out.
- syncable::Id entry_id;
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_HANDLE, entry_metahandle);
- ASSERT_TRUE(entry.good());
- entry_id = entry.GetId();
- EXPECT_TRUE(!entry_id.ServerKnows());
- VerifyTestDataInEntry(&trans, &entry);
- }
-
- // Now, to emulate a commit response failure, we just don't commit it.
- int64 new_version = 150; // any larger value.
- int64 timestamp = 20; // arbitrary value.
- syncable::Id new_entry_id = syncable::Id::CreateFromServerId("server_id");
-
- // Generate an update from the server with a relevant ID reassignment.
- mock_server_->AddUpdateBookmark(new_entry_id, root_id_,
- "new_entry", new_version, timestamp,
- local_cache_guid(), entry_id.GetServerId());
-
- // We don't want it accidentally committed, just the update applied.
- mock_server_->set_conflict_all_commits(true);
-
- // Purposefully delete the entry now before the update application finishes.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- Id new_entry_id = GetOnlyEntryWithName(
- &trans, trans.root_id(), "new_entry");
- MutableEntry entry(&trans, GET_BY_ID, new_entry_id);
- ASSERT_TRUE(entry.good());
- entry.PutIsDel(true);
- }
-
- // Just don't CHECK fail in sync, have the update split.
- SyncShareNudge();
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Id new_entry_id = GetOnlyEntryWithName(
- &trans, trans.root_id(), "new_entry");
- Entry entry(&trans, GET_BY_ID, new_entry_id);
- ASSERT_TRUE(entry.good());
- EXPECT_FALSE(entry.GetIsDel());
-
- Entry old_entry(&trans, GET_BY_ID, entry_id);
- ASSERT_TRUE(old_entry.good());
- EXPECT_TRUE(old_entry.GetIsDel());
- }
-}
-
-// TODO(chron): Add more unsanitized name tests.
-TEST_F(SyncerTest, ConflictMatchingEntryHandlesUnsanitizedNames) {
- mock_server_->AddUpdateDirectory(1, 0, "A/A", 10, 10,
- foreign_cache_guid(), "-1");
- mock_server_->AddUpdateDirectory(2, 0, "B/B", 10, 10,
- foreign_cache_guid(), "-2");
- mock_server_->set_conflict_all_commits(true);
- SyncShareNudge();
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
-
- MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(A.good());
- A.PutIsUnsynced(true);
- A.PutIsUnappliedUpdate(true);
- A.PutServerVersion(20);
-
- MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(B.good());
- B.PutIsUnappliedUpdate(true);
- B.PutServerVersion(20);
- }
- SyncShareNudge();
- saw_syncer_event_ = false;
- mock_server_->set_conflict_all_commits(false);
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- Entry A(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(A.good());
- EXPECT_TRUE(A.GetIsUnsynced()== false);
- EXPECT_TRUE(A.GetIsUnappliedUpdate()== false);
- EXPECT_TRUE(A.GetServerVersion()== 20);
-
- Entry B(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(B.good());
- EXPECT_TRUE(B.GetIsUnsynced()== false);
- EXPECT_TRUE(B.GetIsUnappliedUpdate()== false);
- EXPECT_TRUE(B.GetServerVersion()== 20);
- }
-}
-
-TEST_F(SyncerTest, ConflictMatchingEntryHandlesNormalNames) {
- mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10,
- foreign_cache_guid(), "-1");
- mock_server_->AddUpdateDirectory(2, 0, "B", 10, 10,
- foreign_cache_guid(), "-2");
- mock_server_->set_conflict_all_commits(true);
- SyncShareNudge();
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
-
- MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(A.good());
- A.PutIsUnsynced(true);
- A.PutIsUnappliedUpdate(true);
- A.PutServerVersion(20);
-
- MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(B.good());
- B.PutIsUnappliedUpdate(true);
- B.PutServerVersion(20);
- }
- SyncShareNudge();
- saw_syncer_event_ = false;
- mock_server_->set_conflict_all_commits(false);
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- Entry A(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(A.good());
- EXPECT_TRUE(A.GetIsUnsynced()== false);
- EXPECT_TRUE(A.GetIsUnappliedUpdate()== false);
- EXPECT_TRUE(A.GetServerVersion()== 20);
-
- Entry B(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(B.good());
- EXPECT_TRUE(B.GetIsUnsynced()== false);
- EXPECT_TRUE(B.GetIsUnappliedUpdate()== false);
- EXPECT_TRUE(B.GetServerVersion()== 20);
- }
-}
-
-TEST_F(SyncerTest, ReverseFolderOrderingTest) {
- mock_server_->AddUpdateDirectory(4, 3, "ggchild", 10, 10,
- foreign_cache_guid(), "-4");
- mock_server_->AddUpdateDirectory(3, 2, "gchild", 10, 10,
- foreign_cache_guid(), "-3");
- mock_server_->AddUpdateDirectory(5, 4, "gggchild", 10, 10,
- foreign_cache_guid(), "-5");
- mock_server_->AddUpdateDirectory(2, 1, "child", 10, 10,
- foreign_cache_guid(), "-2");
- mock_server_->AddUpdateDirectory(1, 0, "parent", 10, 10,
- foreign_cache_guid(), "-1");
- SyncShareNudge();
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- Id child_id = GetOnlyEntryWithName(
- &trans, ids_.FromNumber(4), "gggchild");
- Entry child(&trans, GET_BY_ID, child_id);
- ASSERT_TRUE(child.good());
-}
-
-class EntryCreatedInNewFolderTest : public SyncerTest {
- public:
- void CreateFolderInBob() {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry bob(&trans,
- syncable::GET_BY_ID,
- GetOnlyEntryWithName(&trans,
- TestIdFactory::root(),
- "bob"));
- CHECK(bob.good());
-
- MutableEntry entry2(
- &trans, CREATE, BOOKMARKS, bob.GetId(), "bob");
- CHECK(entry2.good());
- entry2.PutIsDir(true);
- entry2.PutIsUnsynced(true);
- entry2.PutSpecifics(DefaultBookmarkSpecifics());
- }
-};
-
-TEST_F(EntryCreatedInNewFolderTest, EntryCreatedInNewFolderMidSync) {
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&trans, CREATE, BOOKMARKS, trans.root_id(), "bob");
- ASSERT_TRUE(entry.good());
- entry.PutIsDir(true);
- entry.PutIsUnsynced(true);
- entry.PutSpecifics(DefaultBookmarkSpecifics());
- }
-
- mock_server_->SetMidCommitCallback(
- base::Bind(&EntryCreatedInNewFolderTest::CreateFolderInBob,
- base::Unretained(this)));
- SyncShareNudge();
- // We loop until no unsynced handles remain, so we will commit both ids.
- EXPECT_EQ(2u, mock_server_->committed_ids().size());
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry parent_entry(&trans, syncable::GET_BY_ID,
- GetOnlyEntryWithName(&trans, TestIdFactory::root(), "bob"));
- ASSERT_TRUE(parent_entry.good());
-
- Id child_id =
- GetOnlyEntryWithName(&trans, parent_entry.GetId(), "bob");
- Entry child(&trans, syncable::GET_BY_ID, child_id);
- ASSERT_TRUE(child.good());
- EXPECT_EQ(parent_entry.GetId(), child.GetParentId());
- }
-}
-
-TEST_F(SyncerTest, NegativeIDInUpdate) {
- mock_server_->AddUpdateBookmark(-10, 0, "bad", 40, 40,
- foreign_cache_guid(), "-100");
- SyncShareNudge();
- // The negative id would make us CHECK!
-}
-
-TEST_F(SyncerTest, UnappliedUpdateOnCreatedItemItemDoesNotCrash) {
- int64 metahandle_fred;
- syncable::Id orig_id;
- {
- // Create an item.
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry fred_match(&trans, CREATE, BOOKMARKS, trans.root_id(),
- "fred_match");
- ASSERT_TRUE(fred_match.good());
- metahandle_fred = fred_match.GetMetahandle();
- orig_id = fred_match.GetId();
- WriteTestDataToEntry(&trans, &fred_match);
- }
- // Commit it.
- SyncShareNudge();
- EXPECT_EQ(1u, mock_server_->committed_ids().size());
- mock_server_->set_conflict_all_commits(true);
- syncable::Id fred_match_id;
- {
- // Now receive a change from outside.
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry fred_match(&trans, GET_BY_HANDLE, metahandle_fred);
- ASSERT_TRUE(fred_match.good());
- EXPECT_TRUE(fred_match.GetId().ServerKnows());
- fred_match_id = fred_match.GetId();
- mock_server_->AddUpdateBookmark(fred_match_id, trans.root_id(),
- "fred_match", 40, 40, local_cache_guid(), orig_id.GetServerId());
- }
- // Run the syncer.
- for (int i = 0 ; i < 30 ; ++i) {
- SyncShareNudge();
- }
-}
-
-/**
- * In the event that we have a double changed entry, that is changed on both
- * the client and the server, the conflict resolver should just drop one of
- * them and accept the other.
- */
-
-TEST_F(SyncerTest, DoublyChangedWithResolver) {
- syncable::Id local_id;
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry parent(&wtrans, CREATE, BOOKMARKS, root_id_, "Folder");
- ASSERT_TRUE(parent.good());
- parent.PutIsDir(true);
- parent.PutId(parent_id_);
- parent.PutBaseVersion(5);
- parent.PutSpecifics(DefaultBookmarkSpecifics());
- MutableEntry child(&wtrans, CREATE, BOOKMARKS, parent_id_, "Pete.htm");
- ASSERT_TRUE(child.good());
- local_id = child.GetId();
- child.PutId(child_id_);
- child.PutBaseVersion(10);
- WriteTestDataToEntry(&wtrans, &child);
- }
- mock_server_->AddUpdateBookmark(child_id_, parent_id_, "Pete2.htm", 11, 10,
- local_cache_guid(), local_id.GetServerId());
- mock_server_->set_conflict_all_commits(true);
- SyncShareNudge();
- syncable::Directory::Metahandles children;
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- directory()->GetChildHandlesById(&trans, parent_id_, &children);
- // We expect the conflict resolver to preserve the local entry.
- Entry child(&trans, syncable::GET_BY_ID, child_id_);
- ASSERT_TRUE(child.good());
- EXPECT_TRUE(child.GetIsUnsynced());
- EXPECT_FALSE(child.GetIsUnappliedUpdate());
- EXPECT_TRUE(child.GetSpecifics().has_bookmark());
- EXPECT_EQ("Pete.htm", child.GetNonUniqueName());
- VerifyTestBookmarkDataInEntry(&child);
- }
-
- // Only one entry, since we just overwrite one.
- EXPECT_EQ(1u, children.size());
- saw_syncer_event_ = false;
-}
-
-// We got this repro case when someone was editing bookmarks while sync was
-// occuring. The entry had changed out underneath the user.
-TEST_F(SyncerTest, CommitsUpdateDoesntAlterEntry) {
- const base::Time& test_time = ProtoTimeToTime(123456);
- syncable::Id local_id;
- int64 entry_metahandle;
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&wtrans, CREATE, BOOKMARKS, root_id_, "Pete");
- ASSERT_TRUE(entry.good());
- EXPECT_FALSE(entry.GetId().ServerKnows());
- local_id = entry.GetId();
- entry.PutIsDir(true);
- entry.PutSpecifics(DefaultBookmarkSpecifics());
- entry.PutIsUnsynced(true);
- entry.PutMtime(test_time);
- entry_metahandle = entry.GetMetahandle();
- }
- SyncShareNudge();
- syncable::Id id;
- int64 version;
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, syncable::GET_BY_HANDLE, entry_metahandle);
- ASSERT_TRUE(entry.good());
- id = entry.GetId();
- EXPECT_TRUE(id.ServerKnows());
- version = entry.GetBaseVersion();
- }
- sync_pb::SyncEntity* update = mock_server_->AddUpdateFromLastCommit();
- update->set_originator_cache_guid(local_cache_guid());
- update->set_originator_client_item_id(local_id.GetServerId());
- EXPECT_EQ("Pete", update->name());
- EXPECT_EQ(id.GetServerId(), update->id_string());
- EXPECT_EQ(root_id_.GetServerId(), update->parent_id_string());
- EXPECT_EQ(version, update->version());
- SyncShareNudge();
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, syncable::GET_BY_ID, id);
- ASSERT_TRUE(entry.good());
- EXPECT_TRUE(entry.GetMtime()== test_time);
- }
-}
-
-TEST_F(SyncerTest, ParentAndChildBothMatch) {
- const FullModelTypeSet all_types = FullModelTypeSet::All();
- syncable::Id parent_id = ids_.NewServerId();
- syncable::Id child_id = ids_.NewServerId();
- syncable::Id parent_local_id;
- syncable::Id child_local_id;
-
-
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry parent(&wtrans, CREATE, BOOKMARKS, root_id_, "Folder");
- ASSERT_TRUE(parent.good());
- parent_local_id = parent.GetId();
- parent.PutIsDir(true);
- parent.PutIsUnsynced(true);
- parent.PutId(parent_id);
- parent.PutBaseVersion(1);
- parent.PutSpecifics(DefaultBookmarkSpecifics());
-
- MutableEntry child(&wtrans, CREATE, BOOKMARKS, parent.GetId(), "test.htm");
- ASSERT_TRUE(child.good());
- child_local_id = child.GetId();
- child.PutId(child_id);
- child.PutBaseVersion(1);
- child.PutSpecifics(DefaultBookmarkSpecifics());
- WriteTestDataToEntry(&wtrans, &child);
- }
- mock_server_->AddUpdateDirectory(parent_id, root_id_, "Folder", 10, 10,
- local_cache_guid(),
- parent_local_id.GetServerId());
- mock_server_->AddUpdateBookmark(child_id, parent_id, "test.htm", 10, 10,
- local_cache_guid(),
- child_local_id.GetServerId());
- mock_server_->set_conflict_all_commits(true);
- SyncShareNudge();
- SyncShareNudge();
- SyncShareNudge();
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Directory::Metahandles children;
- directory()->GetChildHandlesById(&trans, root_id_, &children);
- EXPECT_EQ(1u, children.size());
- directory()->GetChildHandlesById(&trans, parent_id, &children);
- EXPECT_EQ(1u, children.size());
- std::vector<int64> unapplied;
- directory()->GetUnappliedUpdateMetaHandles(&trans, all_types, &unapplied);
- EXPECT_EQ(0u, unapplied.size());
- syncable::Directory::Metahandles unsynced;
- directory()->GetUnsyncedMetaHandles(&trans, &unsynced);
- EXPECT_EQ(0u, unsynced.size());
- saw_syncer_event_ = false;
- }
-}
-
-TEST_F(SyncerTest, CommittingNewDeleted) {
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&trans, CREATE, BOOKMARKS, trans.root_id(), "bob");
- entry.PutIsUnsynced(true);
- entry.PutIsDel(true);
- }
- SyncShareNudge();
- EXPECT_EQ(0u, mock_server_->committed_ids().size());
-}
-
-// Original problem synopsis:
-// Check failed: entry->GetBaseVersion()<= entry->GetServerVersion()
-// Client creates entry, client finishes committing entry. Between
-// commit and getting update back, we delete the entry.
-// We get the update for the entry, but the local one was modified
-// so we store the entry but don't apply it. IS_UNAPPLIED_UPDATE is set.
-// We commit deletion and get a new version number.
-// We apply unapplied updates again before we get the update about the deletion.
-// This means we have an unapplied update where server_version < base_version.
-TEST_F(SyncerTest, UnappliedUpdateDuringCommit) {
- // This test is a little fake.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&trans, CREATE, BOOKMARKS, trans.root_id(), "bob");
- entry.PutId(ids_.FromNumber(20));
- entry.PutBaseVersion(1);
- entry.PutServerVersion(1);
- entry.PutServerParentId(ids_.FromNumber(9999)); // Bad parent.
- entry.PutIsUnsynced(true);
- entry.PutIsUnappliedUpdate(true);
- entry.PutSpecifics(DefaultBookmarkSpecifics());
- entry.PutServerSpecifics(DefaultBookmarkSpecifics());
- entry.PutIsDel(false);
- }
- SyncShareNudge();
- EXPECT_EQ(1, session_->status_controller().TotalNumConflictingItems());
- saw_syncer_event_ = false;
-}
-
-// Original problem synopsis:
-// Illegal parent
-// Unexpected error during sync if we:
-// make a new folder bob
-// wait for sync
-// make a new folder fred
-// move bob into fred
-// remove bob
-// remove fred
-// if no syncing occured midway, bob will have an illegal parent
-TEST_F(SyncerTest, DeletingEntryInFolder) {
- // This test is a little fake.
- int64 existing_metahandle;
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&trans, CREATE, BOOKMARKS, trans.root_id(), "existing");
- ASSERT_TRUE(entry.good());
- entry.PutIsDir(true);
- entry.PutSpecifics(DefaultBookmarkSpecifics());
- entry.PutIsUnsynced(true);
- existing_metahandle = entry.GetMetahandle();
- }
- SyncShareNudge();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry newfolder(&trans, CREATE, BOOKMARKS, trans.root_id(), "new");
- ASSERT_TRUE(newfolder.good());
- newfolder.PutIsDir(true);
- newfolder.PutSpecifics(DefaultBookmarkSpecifics());
- newfolder.PutIsUnsynced(true);
-
- MutableEntry existing(&trans, GET_BY_HANDLE, existing_metahandle);
- ASSERT_TRUE(existing.good());
- existing.PutParentId(newfolder.GetId());
- existing.PutIsUnsynced(true);
- EXPECT_TRUE(existing.GetId().ServerKnows());
-
- newfolder.PutIsDel(true);
- existing.PutIsDel(true);
- }
- SyncShareNudge();
- EXPECT_EQ(0, status().num_server_conflicts());
-}
-
-TEST_F(SyncerTest, DeletingEntryWithLocalEdits) {
- int64 newfolder_metahandle;
-
- mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10,
- foreign_cache_guid(), "-1");
- SyncShareNudge();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry newfolder(
- &trans, CREATE, BOOKMARKS, ids_.FromNumber(1), "local");
- ASSERT_TRUE(newfolder.good());
- newfolder.PutIsUnsynced(true);
- newfolder.PutIsDir(true);
- newfolder.PutSpecifics(DefaultBookmarkSpecifics());
- newfolder_metahandle = newfolder.GetMetahandle();
- }
- mock_server_->AddUpdateDirectory(1, 0, "bob", 2, 20,
- foreign_cache_guid(), "-1");
- mock_server_->SetLastUpdateDeleted();
- SyncShareConfigure();
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, syncable::GET_BY_HANDLE, newfolder_metahandle);
- ASSERT_TRUE(entry.good());
- }
-}
-
-TEST_F(SyncerTest, FolderSwapUpdate) {
- mock_server_->AddUpdateDirectory(7801, 0, "bob", 1, 10,
- foreign_cache_guid(), "-7801");
- mock_server_->AddUpdateDirectory(1024, 0, "fred", 1, 10,
- foreign_cache_guid(), "-1024");
- SyncShareNudge();
- mock_server_->AddUpdateDirectory(1024, 0, "bob", 2, 20,
- foreign_cache_guid(), "-1024");
- mock_server_->AddUpdateDirectory(7801, 0, "fred", 2, 20,
- foreign_cache_guid(), "-7801");
- SyncShareNudge();
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry id1(&trans, GET_BY_ID, ids_.FromNumber(7801));
- ASSERT_TRUE(id1.good());
- EXPECT_TRUE("fred" == id1.GetNonUniqueName());
- EXPECT_TRUE(root_id_ == id1.GetParentId());
- Entry id2(&trans, GET_BY_ID, ids_.FromNumber(1024));
- ASSERT_TRUE(id2.good());
- EXPECT_TRUE("bob" == id2.GetNonUniqueName());
- EXPECT_TRUE(root_id_ == id2.GetParentId());
- }
- saw_syncer_event_ = false;
-}
-
-TEST_F(SyncerTest, NameCollidingFolderSwapWorksFine) {
- mock_server_->AddUpdateDirectory(7801, 0, "bob", 1, 10,
- foreign_cache_guid(), "-7801");
- mock_server_->AddUpdateDirectory(1024, 0, "fred", 1, 10,
- foreign_cache_guid(), "-1024");
- mock_server_->AddUpdateDirectory(4096, 0, "alice", 1, 10,
- foreign_cache_guid(), "-4096");
- SyncShareNudge();
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry id1(&trans, GET_BY_ID, ids_.FromNumber(7801));
- ASSERT_TRUE(id1.good());
- EXPECT_TRUE("bob" == id1.GetNonUniqueName());
- EXPECT_TRUE(root_id_ == id1.GetParentId());
- Entry id2(&trans, GET_BY_ID, ids_.FromNumber(1024));
- ASSERT_TRUE(id2.good());
- EXPECT_TRUE("fred" == id2.GetNonUniqueName());
- EXPECT_TRUE(root_id_ == id2.GetParentId());
- Entry id3(&trans, GET_BY_ID, ids_.FromNumber(4096));
- ASSERT_TRUE(id3.good());
- EXPECT_TRUE("alice" == id3.GetNonUniqueName());
- EXPECT_TRUE(root_id_ == id3.GetParentId());
- }
- mock_server_->AddUpdateDirectory(1024, 0, "bob", 2, 20,
- foreign_cache_guid(), "-1024");
- mock_server_->AddUpdateDirectory(7801, 0, "fred", 2, 20,
- foreign_cache_guid(), "-7801");
- mock_server_->AddUpdateDirectory(4096, 0, "bob", 2, 20,
- foreign_cache_guid(), "-4096");
- SyncShareNudge();
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry id1(&trans, GET_BY_ID, ids_.FromNumber(7801));
- ASSERT_TRUE(id1.good());
- EXPECT_TRUE("fred" == id1.GetNonUniqueName());
- EXPECT_TRUE(root_id_ == id1.GetParentId());
- Entry id2(&trans, GET_BY_ID, ids_.FromNumber(1024));
- ASSERT_TRUE(id2.good());
- EXPECT_TRUE("bob" == id2.GetNonUniqueName());
- EXPECT_TRUE(root_id_ == id2.GetParentId());
- Entry id3(&trans, GET_BY_ID, ids_.FromNumber(4096));
- ASSERT_TRUE(id3.good());
- EXPECT_TRUE("bob" == id3.GetNonUniqueName());
- EXPECT_TRUE(root_id_ == id3.GetParentId());
- }
- saw_syncer_event_ = false;
-}
-
-// Committing more than kDefaultMaxCommitBatchSize items requires that
-// we post more than one commit command to the server. This test makes
-// sure that scenario works as expected.
-TEST_F(SyncerTest, CommitManyItemsInOneGo_Success) {
- uint32 num_batches = 3;
- uint32 items_to_commit = kDefaultMaxCommitBatchSize * num_batches;
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- for (uint32 i = 0; i < items_to_commit; i++) {
- string nameutf8 = base::StringPrintf("%d", i);
- string name(nameutf8.begin(), nameutf8.end());
- MutableEntry e(&trans, CREATE, BOOKMARKS, trans.root_id(), name);
- e.PutIsUnsynced(true);
- e.PutIsDir(true);
- e.PutSpecifics(DefaultBookmarkSpecifics());
- }
- }
- ASSERT_EQ(items_to_commit, directory()->unsynced_entity_count());
-
- SyncShareNudge();
- EXPECT_EQ(num_batches, mock_server_->commit_messages().size());
- EXPECT_EQ(0, directory()->unsynced_entity_count());
-}
-
-// Test that a single failure to contact the server will cause us to exit the
-// commit loop immediately.
-TEST_F(SyncerTest, CommitManyItemsInOneGo_PostBufferFail) {
- uint32 num_batches = 3;
- uint32 items_to_commit = kDefaultMaxCommitBatchSize * num_batches;
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- for (uint32 i = 0; i < items_to_commit; i++) {
- string nameutf8 = base::StringPrintf("%d", i);
- string name(nameutf8.begin(), nameutf8.end());
- MutableEntry e(&trans, CREATE, BOOKMARKS, trans.root_id(), name);
- e.PutIsUnsynced(true);
- e.PutIsDir(true);
- e.PutSpecifics(DefaultBookmarkSpecifics());
- }
- }
- ASSERT_EQ(items_to_commit, directory()->unsynced_entity_count());
-
- // The second commit should fail. It will be preceded by one successful
- // GetUpdate and one succesful commit.
- mock_server_->FailNthPostBufferToPathCall(3);
- SyncShareNudge();
-
- EXPECT_EQ(1U, mock_server_->commit_messages().size());
- EXPECT_EQ(SYNC_SERVER_ERROR,
- session_->status_controller().model_neutral_state().commit_result);
- EXPECT_EQ(items_to_commit - kDefaultMaxCommitBatchSize,
- directory()->unsynced_entity_count());
-}
-
-// Test that a single conflict response from the server will cause us to exit
-// the commit loop immediately.
-TEST_F(SyncerTest, CommitManyItemsInOneGo_CommitConflict) {
- uint32 num_batches = 2;
- uint32 items_to_commit = kDefaultMaxCommitBatchSize * num_batches;
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- for (uint32 i = 0; i < items_to_commit; i++) {
- string nameutf8 = base::StringPrintf("%d", i);
- string name(nameutf8.begin(), nameutf8.end());
- MutableEntry e(&trans, CREATE, BOOKMARKS, trans.root_id(), name);
- e.PutIsUnsynced(true);
- e.PutIsDir(true);
- e.PutSpecifics(DefaultBookmarkSpecifics());
- }
- }
- ASSERT_EQ(items_to_commit, directory()->unsynced_entity_count());
-
- // Return a CONFLICT response for the first item.
- mock_server_->set_conflict_n_commits(1);
- SyncShareNudge();
-
- // We should stop looping at the first sign of trouble.
- EXPECT_EQ(1U, mock_server_->commit_messages().size());
- EXPECT_EQ(items_to_commit - (kDefaultMaxCommitBatchSize - 1),
- directory()->unsynced_entity_count());
-}
-
-// Tests that sending debug info events works.
-TEST_F(SyncerTest, SendDebugInfoEventsOnGetUpdates_HappyCase) {
- debug_info_getter_->AddDebugEvent();
- debug_info_getter_->AddDebugEvent();
-
- SyncShareNudge();
-
- // Verify we received one GetUpdates request with two debug info events.
- EXPECT_EQ(1U, mock_server_->requests().size());
- ASSERT_TRUE(mock_server_->last_request().has_get_updates());
- EXPECT_EQ(2, mock_server_->last_request().debug_info().events_size());
-
- SyncShareNudge();
-
- // See that we received another GetUpdates request, but that it contains no
- // debug info events.
- EXPECT_EQ(2U, mock_server_->requests().size());
- ASSERT_TRUE(mock_server_->last_request().has_get_updates());
- EXPECT_EQ(0, mock_server_->last_request().debug_info().events_size());
-
- debug_info_getter_->AddDebugEvent();
-
- SyncShareNudge();
-
- // See that we received another GetUpdates request and it contains one debug
- // info event.
- EXPECT_EQ(3U, mock_server_->requests().size());
- ASSERT_TRUE(mock_server_->last_request().has_get_updates());
- EXPECT_EQ(1, mock_server_->last_request().debug_info().events_size());
-}
-
-// Tests that debug info events are dropped on server error.
-TEST_F(SyncerTest, SendDebugInfoEventsOnGetUpdates_PostFailsDontDrop) {
- debug_info_getter_->AddDebugEvent();
- debug_info_getter_->AddDebugEvent();
-
- mock_server_->FailNextPostBufferToPathCall();
- SyncShareNudge();
-
- // Verify we attempted to send one GetUpdates request with two debug info
- // events.
- EXPECT_EQ(1U, mock_server_->requests().size());
- ASSERT_TRUE(mock_server_->last_request().has_get_updates());
- EXPECT_EQ(2, mock_server_->last_request().debug_info().events_size());
-
- SyncShareNudge();
-
- // See that the client resent the two debug info events.
- EXPECT_EQ(2U, mock_server_->requests().size());
- ASSERT_TRUE(mock_server_->last_request().has_get_updates());
- EXPECT_EQ(2, mock_server_->last_request().debug_info().events_size());
-
- // The previous send was successful so this next one shouldn't generate any
- // debug info events.
- SyncShareNudge();
- EXPECT_EQ(3U, mock_server_->requests().size());
- ASSERT_TRUE(mock_server_->last_request().has_get_updates());
- EXPECT_EQ(0, mock_server_->last_request().debug_info().events_size());
-}
-
-// Tests that sending debug info events on Commit works.
-TEST_F(SyncerTest, SendDebugInfoEventsOnCommit_HappyCase) {
- // Make sure GetUpdate isn't call as it would "steal" debug info events before
- // Commit has a chance to send them.
- ConfigureNoGetUpdatesRequired();
-
- // Generate a debug info event and trigger a commit.
- debug_info_getter_->AddDebugEvent();
- CreateUnsyncedDirectory("X", "id_X");
- SyncShareNudge();
-
- // Verify that the last request received is a Commit and that it contains a
- // debug info event.
- EXPECT_EQ(1U, mock_server_->requests().size());
- ASSERT_TRUE(mock_server_->last_request().has_commit());
- EXPECT_EQ(1, mock_server_->last_request().debug_info().events_size());
-
- // Generate another commit, but no debug info event.
- CreateUnsyncedDirectory("Y", "id_Y");
- SyncShareNudge();
-
- // See that it was received and contains no debug info events.
- EXPECT_EQ(2U, mock_server_->requests().size());
- ASSERT_TRUE(mock_server_->last_request().has_commit());
- EXPECT_EQ(0, mock_server_->last_request().debug_info().events_size());
-}
-
-// Tests that debug info events are not dropped on server error.
-TEST_F(SyncerTest, SendDebugInfoEventsOnCommit_PostFailsDontDrop) {
- // Make sure GetUpdate isn't call as it would "steal" debug info events before
- // Commit has a chance to send them.
- ConfigureNoGetUpdatesRequired();
-
- mock_server_->FailNextPostBufferToPathCall();
-
- // Generate a debug info event and trigger a commit.
- debug_info_getter_->AddDebugEvent();
- CreateUnsyncedDirectory("X", "id_X");
- SyncShareNudge();
-
- // Verify that the last request sent is a Commit and that it contains a debug
- // info event.
- EXPECT_EQ(1U, mock_server_->requests().size());
- ASSERT_TRUE(mock_server_->last_request().has_commit());
- EXPECT_EQ(1, mock_server_->last_request().debug_info().events_size());
-
- // Try again.
- SyncShareNudge();
-
- // Verify that we've received another Commit and that it contains a debug info
- // event (just like the previous one).
- EXPECT_EQ(2U, mock_server_->requests().size());
- ASSERT_TRUE(mock_server_->last_request().has_commit());
- EXPECT_EQ(1, mock_server_->last_request().debug_info().events_size());
-
- // Generate another commit and try again.
- CreateUnsyncedDirectory("Y", "id_Y");
- SyncShareNudge();
-
- // See that it was received and contains no debug info events.
- EXPECT_EQ(3U, mock_server_->requests().size());
- ASSERT_TRUE(mock_server_->last_request().has_commit());
- EXPECT_EQ(0, mock_server_->last_request().debug_info().events_size());
-}
-
-TEST_F(SyncerTest, HugeConflict) {
- int item_count = 300; // We should be able to do 300 or 3000 w/o issue.
-
- syncable::Id parent_id = ids_.NewServerId();
- syncable::Id last_id = parent_id;
- vector<syncable::Id> tree_ids;
-
- // Create a lot of updates for which the parent does not exist yet.
- // Generate a huge deep tree which should all fail to apply at first.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- for (int i = 0; i < item_count ; i++) {
- syncable::Id next_id = ids_.NewServerId();
- syncable::Id local_id = ids_.NewLocalId();
- tree_ids.push_back(next_id);
- mock_server_->AddUpdateDirectory(next_id, last_id, "BOB", 2, 20,
- foreign_cache_guid(),
- local_id.GetServerId());
- last_id = next_id;
- }
- }
- SyncShareNudge();
-
- // Check they're in the expected conflict state.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- for (int i = 0; i < item_count; i++) {
- Entry e(&trans, GET_BY_ID, tree_ids[i]);
- // They should all exist but none should be applied.
- ASSERT_TRUE(e.good());
- EXPECT_TRUE(e.GetIsDel());
- EXPECT_TRUE(e.GetIsUnappliedUpdate());
- }
- }
-
- // Add the missing parent directory.
- mock_server_->AddUpdateDirectory(parent_id, TestIdFactory::root(),
- "BOB", 2, 20, foreign_cache_guid(), "-3500");
- SyncShareNudge();
-
- // Now they should all be OK.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- for (int i = 0; i < item_count; i++) {
- Entry e(&trans, GET_BY_ID, tree_ids[i]);
- ASSERT_TRUE(e.good());
- EXPECT_FALSE(e.GetIsDel());
- EXPECT_FALSE(e.GetIsUnappliedUpdate());
- }
- }
-}
-
-TEST_F(SyncerTest, DontCrashOnCaseChange) {
- mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10,
- foreign_cache_guid(), "-1");
- SyncShareNudge();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry e(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(e.good());
- e.PutIsUnsynced(true);
- }
- mock_server_->set_conflict_all_commits(true);
- mock_server_->AddUpdateDirectory(1, 0, "BOB", 2, 20,
- foreign_cache_guid(), "-1");
- SyncShareNudge(); // USED TO CAUSE AN ASSERT
- saw_syncer_event_ = false;
-}
-
-TEST_F(SyncerTest, UnsyncedItemAndUpdate) {
- mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10,
- foreign_cache_guid(), "-1");
- SyncShareNudge();
- mock_server_->set_conflict_all_commits(true);
- mock_server_->AddUpdateDirectory(2, 0, "bob", 2, 20,
- foreign_cache_guid(), "-2");
- SyncShareNudge(); // USED TO CAUSE AN ASSERT
- saw_syncer_event_ = false;
-}
-
-TEST_F(SyncerTest, NewEntryAndAlteredServerEntrySharePath) {
- mock_server_->AddUpdateBookmark(1, 0, "Foo.htm", 10, 10,
- foreign_cache_guid(), "-1");
- SyncShareNudge();
- int64 local_folder_handle;
- syncable::Id local_folder_id;
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry new_entry(
- &wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "Bar.htm");
- ASSERT_TRUE(new_entry.good());
- local_folder_id = new_entry.GetId();
- local_folder_handle = new_entry.GetMetahandle();
- new_entry.PutIsUnsynced(true);
- new_entry.PutSpecifics(DefaultBookmarkSpecifics());
- MutableEntry old(&wtrans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(old.good());
- WriteTestDataToEntry(&wtrans, &old);
- }
- mock_server_->AddUpdateBookmark(1, 0, "Bar.htm", 20, 20,
- foreign_cache_guid(), "-1");
- mock_server_->set_conflict_all_commits(true);
- SyncShareNudge();
- saw_syncer_event_ = false;
- {
- // Update #20 should have been dropped in favor of the local version.
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry server(&wtrans, GET_BY_ID, ids_.FromNumber(1));
- MutableEntry local(&wtrans, GET_BY_HANDLE, local_folder_handle);
- ASSERT_TRUE(server.good());
- ASSERT_TRUE(local.good());
- EXPECT_TRUE(local.GetMetahandle()!= server.GetMetahandle());
- EXPECT_FALSE(server.GetIsUnappliedUpdate());
- EXPECT_FALSE(local.GetIsUnappliedUpdate());
- EXPECT_TRUE(server.GetIsUnsynced());
- EXPECT_TRUE(local.GetIsUnsynced());
- EXPECT_EQ("Foo.htm", server.GetNonUniqueName());
- EXPECT_EQ("Bar.htm", local.GetNonUniqueName());
- }
- // Allow local changes to commit.
- mock_server_->set_conflict_all_commits(false);
- SyncShareNudge();
- saw_syncer_event_ = false;
-
- // Now add a server change to make the two names equal. There should
- // be no conflict with that, since names are not unique.
- mock_server_->AddUpdateBookmark(1, 0, "Bar.htm", 30, 30,
- foreign_cache_guid(), "-1");
- SyncShareNudge();
- saw_syncer_event_ = false;
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry server(&wtrans, GET_BY_ID, ids_.FromNumber(1));
- MutableEntry local(&wtrans, GET_BY_HANDLE, local_folder_handle);
- ASSERT_TRUE(server.good());
- ASSERT_TRUE(local.good());
- EXPECT_TRUE(local.GetMetahandle()!= server.GetMetahandle());
- EXPECT_FALSE(server.GetIsUnappliedUpdate());
- EXPECT_FALSE(local.GetIsUnappliedUpdate());
- EXPECT_FALSE(server.GetIsUnsynced());
- EXPECT_FALSE(local.GetIsUnsynced());
- EXPECT_EQ("Bar.htm", server.GetNonUniqueName());
- EXPECT_EQ("Bar.htm", local.GetNonUniqueName());
- EXPECT_EQ("http://google.com", // Default from AddUpdateBookmark.
- server.GetSpecifics().bookmark().url());
- }
-}
-
-// Same as NewEntryAnddServerEntrySharePath, but using the old-style protocol.
-TEST_F(SyncerTest, NewEntryAndAlteredServerEntrySharePath_OldBookmarksProto) {
- mock_server_->set_use_legacy_bookmarks_protocol(true);
- mock_server_->AddUpdateBookmark(1, 0, "Foo.htm", 10, 10,
- foreign_cache_guid(), "-1");
- SyncShareNudge();
- int64 local_folder_handle;
- syncable::Id local_folder_id;
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry new_entry(
- &wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "Bar.htm");
- ASSERT_TRUE(new_entry.good());
- local_folder_id = new_entry.GetId();
- local_folder_handle = new_entry.GetMetahandle();
- new_entry.PutIsUnsynced(true);
- new_entry.PutSpecifics(DefaultBookmarkSpecifics());
- MutableEntry old(&wtrans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(old.good());
- WriteTestDataToEntry(&wtrans, &old);
- }
- mock_server_->AddUpdateBookmark(1, 0, "Bar.htm", 20, 20,
- foreign_cache_guid(), "-1");
- mock_server_->set_conflict_all_commits(true);
- SyncShareNudge();
- saw_syncer_event_ = false;
- {
- // Update #20 should have been dropped in favor of the local version.
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry server(&wtrans, GET_BY_ID, ids_.FromNumber(1));
- MutableEntry local(&wtrans, GET_BY_HANDLE, local_folder_handle);
- ASSERT_TRUE(server.good());
- ASSERT_TRUE(local.good());
- EXPECT_TRUE(local.GetMetahandle()!= server.GetMetahandle());
- EXPECT_FALSE(server.GetIsUnappliedUpdate());
- EXPECT_FALSE(local.GetIsUnappliedUpdate());
- EXPECT_TRUE(server.GetIsUnsynced());
- EXPECT_TRUE(local.GetIsUnsynced());
- EXPECT_EQ("Foo.htm", server.GetNonUniqueName());
- EXPECT_EQ("Bar.htm", local.GetNonUniqueName());
- }
- // Allow local changes to commit.
- mock_server_->set_conflict_all_commits(false);
- SyncShareNudge();
- saw_syncer_event_ = false;
-
- // Now add a server change to make the two names equal. There should
- // be no conflict with that, since names are not unique.
- mock_server_->AddUpdateBookmark(1, 0, "Bar.htm", 30, 30,
- foreign_cache_guid(), "-1");
- SyncShareNudge();
- saw_syncer_event_ = false;
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry server(&wtrans, GET_BY_ID, ids_.FromNumber(1));
- MutableEntry local(&wtrans, GET_BY_HANDLE, local_folder_handle);
- ASSERT_TRUE(server.good());
- ASSERT_TRUE(local.good());
- EXPECT_TRUE(local.GetMetahandle()!= server.GetMetahandle());
- EXPECT_FALSE(server.GetIsUnappliedUpdate());
- EXPECT_FALSE(local.GetIsUnappliedUpdate());
- EXPECT_FALSE(server.GetIsUnsynced());
- EXPECT_FALSE(local.GetIsUnsynced());
- EXPECT_EQ("Bar.htm", server.GetNonUniqueName());
- EXPECT_EQ("Bar.htm", local.GetNonUniqueName());
- EXPECT_EQ("http://google.com", // Default from AddUpdateBookmark.
- server.GetSpecifics().bookmark().url());
- }
-}
-
-// Circular links should be resolved by the server.
-TEST_F(SyncerTest, SiblingDirectoriesBecomeCircular) {
- // we don't currently resolve this. This test ensures we don't.
- mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10,
- foreign_cache_guid(), "-1");
- mock_server_->AddUpdateDirectory(2, 0, "B", 10, 10,
- foreign_cache_guid(), "-2");
- SyncShareNudge();
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(A.good());
- A.PutIsUnsynced(true);
- A.PutParentId(ids_.FromNumber(2));
- A.PutNonUniqueName("B");
- }
- mock_server_->AddUpdateDirectory(2, 1, "A", 20, 20,
- foreign_cache_guid(), "-2");
- mock_server_->set_conflict_all_commits(true);
- SyncShareNudge();
- saw_syncer_event_ = false;
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(A.good());
- MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(B.good());
- EXPECT_TRUE(A.GetNonUniqueName()== "B");
- EXPECT_TRUE(B.GetNonUniqueName()== "B");
- }
-}
-
-TEST_F(SyncerTest, SwapEntryNames) {
- // Simple transaction test.
- mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10,
- foreign_cache_guid(), "-1");
- mock_server_->AddUpdateDirectory(2, 0, "B", 10, 10,
- foreign_cache_guid(), "-2");
- mock_server_->set_conflict_all_commits(true);
- SyncShareNudge();
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(A.good());
- A.PutIsUnsynced(true);
- MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(B.good());
- B.PutIsUnsynced(true);
- A.PutNonUniqueName("C");
- B.PutNonUniqueName("A");
- A.PutNonUniqueName("B");
- }
- SyncShareNudge();
- saw_syncer_event_ = false;
-}
-
-TEST_F(SyncerTest, DualDeletionWithNewItemNameClash) {
- mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10,
- foreign_cache_guid(), "-1");
- mock_server_->AddUpdateBookmark(2, 0, "B", 10, 10,
- foreign_cache_guid(), "-2");
- mock_server_->set_conflict_all_commits(true);
- SyncShareNudge();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry B(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(B.good());
- WriteTestDataToEntry(&trans, &B);
- B.PutIsDel(true);
- }
- mock_server_->AddUpdateBookmark(2, 0, "A", 11, 11,
- foreign_cache_guid(), "-2");
- mock_server_->SetLastUpdateDeleted();
- SyncShareNudge();
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry B(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(B.good());
- EXPECT_FALSE(B.GetIsUnsynced());
- EXPECT_FALSE(B.GetIsUnappliedUpdate());
- }
- saw_syncer_event_ = false;
-}
-
-// When we undelete an entity as a result of conflict resolution, we reuse the
-// existing server id and preserve the old version, simply updating the server
-// version with the new non-deleted entity.
-TEST_F(SyncerTest, ResolveWeWroteTheyDeleted) {
- int64 bob_metahandle;
-
- mock_server_->AddUpdateBookmark(1, 0, "bob", 1, 10,
- foreign_cache_guid(), "-1");
- SyncShareNudge();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(bob.good());
- bob_metahandle = bob.GetMetahandle();
- WriteTestDataToEntry(&trans, &bob);
- }
- mock_server_->AddUpdateBookmark(1, 0, "bob", 2, 10,
- foreign_cache_guid(), "-1");
- mock_server_->SetLastUpdateDeleted();
- mock_server_->set_conflict_all_commits(true);
- SyncShareNudge();
- SyncShareNudge();
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry bob(&trans, GET_BY_HANDLE, bob_metahandle);
- ASSERT_TRUE(bob.good());
- EXPECT_TRUE(bob.GetIsUnsynced());
- EXPECT_TRUE(bob.GetId().ServerKnows());
- EXPECT_FALSE(bob.GetIsUnappliedUpdate());
- EXPECT_FALSE(bob.GetIsDel());
- EXPECT_EQ(2, bob.GetServerVersion());
- EXPECT_EQ(2, bob.GetBaseVersion());
- }
- saw_syncer_event_ = false;
-}
-
-// This test is to reproduce a check failure. Sometimes we would get a bad ID
-// back when creating an entry.
-TEST_F(SyncerTest, DuplicateIDReturn) {
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry folder(&trans, CREATE, BOOKMARKS, trans.root_id(), "bob");
- ASSERT_TRUE(folder.good());
- folder.PutIsUnsynced(true);
- folder.PutIsDir(true);
- folder.PutSpecifics(DefaultBookmarkSpecifics());
- MutableEntry folder2(&trans, CREATE, BOOKMARKS, trans.root_id(), "fred");
- ASSERT_TRUE(folder2.good());
- folder2.PutIsUnsynced(false);
- folder2.PutIsDir(true);
- folder2.PutSpecifics(DefaultBookmarkSpecifics());
- folder2.PutBaseVersion(3);
- folder2.PutId(syncable::Id::CreateFromServerId("mock_server:10000"));
- }
- mock_server_->set_next_new_id(10000);
- EXPECT_EQ(1u, directory()->unsynced_entity_count());
- // we get back a bad id in here (should never happen).
- SyncShareNudge();
- EXPECT_EQ(1u, directory()->unsynced_entity_count());
- SyncShareNudge(); // another bad id in here.
- EXPECT_EQ(0u, directory()->unsynced_entity_count());
- saw_syncer_event_ = false;
-}
-
-TEST_F(SyncerTest, DeletedEntryWithBadParentInLoopCalculation) {
- mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10,
- foreign_cache_guid(), "-1");
- SyncShareNudge();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(bob.good());
- // This is valid, because the parent could have gone away a long time ago.
- bob.PutParentId(ids_.FromNumber(54));
- bob.PutIsDel(true);
- bob.PutIsUnsynced(true);
- }
- mock_server_->AddUpdateDirectory(2, 1, "fred", 1, 10,
- foreign_cache_guid(), "-2");
- SyncShareNudge();
- SyncShareNudge();
-}
-
-TEST_F(SyncerTest, ConflictResolverMergesLocalDeleteAndServerUpdate) {
- syncable::Id local_id;
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
-
- MutableEntry local_deleted(
- &trans, CREATE, BOOKMARKS, trans.root_id(), "name");
- local_id = local_deleted.GetId();
- local_deleted.PutId(ids_.FromNumber(1));
- local_deleted.PutBaseVersion(1);
- local_deleted.PutIsDel(true);
- local_deleted.PutIsDir(false);
- local_deleted.PutIsUnsynced(true);
- local_deleted.PutSpecifics(DefaultBookmarkSpecifics());
- }
-
- mock_server_->AddUpdateBookmark(ids_.FromNumber(1), root_id_, "name", 10, 10,
- local_cache_guid(),
- local_id.GetServerId());
-
- // We don't care about actually committing, just the resolution.
- mock_server_->set_conflict_all_commits(true);
- SyncShareNudge();
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry local_deleted(&trans, GET_BY_ID, ids_.FromNumber(1));
- EXPECT_TRUE(local_deleted.GetBaseVersion()== 10);
- EXPECT_TRUE(local_deleted.GetIsUnappliedUpdate()== false);
- EXPECT_TRUE(local_deleted.GetIsUnsynced()== true);
- EXPECT_TRUE(local_deleted.GetIsDel()== true);
- EXPECT_TRUE(local_deleted.GetIsDir()== false);
- }
-}
-
-// See what happens if the IS_DIR bit gets flipped. This can cause us
-// all kinds of disasters.
-TEST_F(SyncerTest, UpdateFlipsTheFolderBit) {
- // Local object: a deleted directory (container), revision 1, unsynced.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
-
- MutableEntry local_deleted(
- &trans, CREATE, BOOKMARKS, trans.root_id(), "name");
- local_deleted.PutId(ids_.FromNumber(1));
- local_deleted.PutBaseVersion(1);
- local_deleted.PutIsDel(true);
- local_deleted.PutIsDir(true);
- local_deleted.PutIsUnsynced(true);
- local_deleted.PutSpecifics(DefaultBookmarkSpecifics());
- }
-
- // Server update: entry-type object (not a container), revision 10.
- mock_server_->AddUpdateBookmark(ids_.FromNumber(1), root_id_, "name", 10, 10,
- local_cache_guid(),
- ids_.FromNumber(1).GetServerId());
-
- // Don't attempt to commit.
- mock_server_->set_conflict_all_commits(true);
-
- // The syncer should not attempt to apply the invalid update.
- SyncShareNudge();
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry local_deleted(&trans, GET_BY_ID, ids_.FromNumber(1));
- EXPECT_TRUE(local_deleted.GetBaseVersion()== 1);
- EXPECT_TRUE(local_deleted.GetIsUnappliedUpdate()== false);
- EXPECT_TRUE(local_deleted.GetIsUnsynced()== true);
- EXPECT_TRUE(local_deleted.GetIsDel()== true);
- EXPECT_TRUE(local_deleted.GetIsDir()== true);
- }
-}
-
-// Bug Synopsis:
-// Merge conflict resolution will merge a new local entry with another entry
-// that needs updates, resulting in CHECK.
-TEST_F(SyncerTest, MergingExistingItems) {
- mock_server_->set_conflict_all_commits(true);
- mock_server_->AddUpdateBookmark(1, 0, "base", 10, 10,
- local_cache_guid(), "-1");
- SyncShareNudge();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(
- &trans, CREATE, BOOKMARKS, trans.root_id(), "Copy of base");
- WriteTestDataToEntry(&trans, &entry);
- }
- mock_server_->AddUpdateBookmark(1, 0, "Copy of base", 50, 50,
- local_cache_guid(), "-1");
- SyncShareNudge();
-}
-
-// In this test a long changelog contains a child at the start of the changelog
-// and a parent at the end. While these updates are in progress the client would
-// appear stuck.
-TEST_F(SyncerTest, LongChangelistWithApplicationConflict) {
- const int depth = 400;
- syncable::Id folder_id = ids_.FromNumber(1);
-
- // First we an item in a folder in the root. However the folder won't come
- // till much later.
- syncable::Id stuck_entry_id = TestIdFactory::FromNumber(99999);
- mock_server_->AddUpdateDirectory(stuck_entry_id,
- folder_id, "stuck", 1, 1,
- foreign_cache_guid(), "-99999");
- mock_server_->SetChangesRemaining(depth - 1);
- SyncShareNudge();
-
- // Buffer up a very long series of downloads.
- // We should never be stuck (conflict resolution shouldn't
- // kick in so long as we're making forward progress).
- for (int i = 0; i < depth; i++) {
- mock_server_->NextUpdateBatch();
- mock_server_->SetNewTimestamp(i + 1);
- mock_server_->SetChangesRemaining(depth - i);
- }
-
- SyncShareNudge();
-
- // Ensure our folder hasn't somehow applied.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry child(&trans, GET_BY_ID, stuck_entry_id);
- EXPECT_TRUE(child.good());
- EXPECT_TRUE(child.GetIsUnappliedUpdate());
- EXPECT_TRUE(child.GetIsDel());
- EXPECT_FALSE(child.GetIsUnsynced());
- }
-
- // And finally the folder.
- mock_server_->AddUpdateDirectory(folder_id,
- TestIdFactory::root(), "folder", 1, 1,
- foreign_cache_guid(), "-1");
- mock_server_->SetChangesRemaining(0);
- SyncShareNudge();
- SyncShareNudge();
- // Check that everything is as expected after the commit.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_ID, folder_id);
- ASSERT_TRUE(entry.good());
- Entry child(&trans, GET_BY_ID, stuck_entry_id);
- EXPECT_EQ(entry.GetId(), child.GetParentId());
- EXPECT_EQ("stuck", child.GetNonUniqueName());
- EXPECT_TRUE(child.good());
- }
-}
-
-TEST_F(SyncerTest, DontMergeTwoExistingItems) {
- mock_server_->set_conflict_all_commits(true);
- mock_server_->AddUpdateBookmark(1, 0, "base", 10, 10,
- foreign_cache_guid(), "-1");
- mock_server_->AddUpdateBookmark(2, 0, "base2", 10, 10,
- foreign_cache_guid(), "-2");
- SyncShareNudge();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(entry.good());
- entry.PutNonUniqueName("Copy of base");
- entry.PutIsUnsynced(true);
- }
- mock_server_->AddUpdateBookmark(1, 0, "Copy of base", 50, 50,
- foreign_cache_guid(), "-1");
- SyncShareNudge();
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry1(&trans, GET_BY_ID, ids_.FromNumber(1));
- EXPECT_FALSE(entry1.GetIsUnappliedUpdate());
- EXPECT_FALSE(entry1.GetIsUnsynced());
- EXPECT_FALSE(entry1.GetIsDel());
- Entry entry2(&trans, GET_BY_ID, ids_.FromNumber(2));
- EXPECT_FALSE(entry2.GetIsUnappliedUpdate());
- EXPECT_TRUE(entry2.GetIsUnsynced());
- EXPECT_FALSE(entry2.GetIsDel());
- EXPECT_EQ(entry1.GetNonUniqueName(), entry2.GetNonUniqueName());
- }
-}
-
-TEST_F(SyncerTest, TestUndeleteUpdate) {
- mock_server_->set_conflict_all_commits(true);
- mock_server_->AddUpdateDirectory(1, 0, "foo", 1, 1,
- foreign_cache_guid(), "-1");
- mock_server_->AddUpdateDirectory(2, 1, "bar", 1, 2,
- foreign_cache_guid(), "-2");
- SyncShareNudge();
- mock_server_->AddUpdateDirectory(2, 1, "bar", 2, 3,
- foreign_cache_guid(), "-2");
- mock_server_->SetLastUpdateDeleted();
- SyncShareNudge();
-
- int64 metahandle;
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(entry.good());
- EXPECT_TRUE(entry.GetIsDel());
- metahandle = entry.GetMetahandle();
- }
- mock_server_->AddUpdateDirectory(1, 0, "foo", 2, 4,
- foreign_cache_guid(), "-1");
- mock_server_->SetLastUpdateDeleted();
- SyncShareNudge();
- // This used to be rejected as it's an undeletion. Now, it results in moving
- // the delete path aside.
- mock_server_->AddUpdateDirectory(2, 1, "bar", 3, 5,
- foreign_cache_guid(), "-2");
- SyncShareNudge();
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(entry.good());
- EXPECT_TRUE(entry.GetIsDel());
- EXPECT_FALSE(entry.GetServerIsDel());
- EXPECT_TRUE(entry.GetIsUnappliedUpdate());
- EXPECT_NE(entry.GetMetahandle(), metahandle);
- }
-}
-
-TEST_F(SyncerTest, TestMoveSanitizedNamedFolder) {
- mock_server_->AddUpdateDirectory(1, 0, "foo", 1, 1,
- foreign_cache_guid(), "-1");
- mock_server_->AddUpdateDirectory(2, 0, ":::", 1, 2,
- foreign_cache_guid(), "-2");
- SyncShareNudge();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(entry.good());
- entry.PutParentId(ids_.FromNumber(1));
- EXPECT_TRUE(entry.PutIsUnsynced(true));
- }
- SyncShareNudge();
- // We use the same sync ts as before so our times match up.
- mock_server_->AddUpdateDirectory(2, 1, ":::", 2, 2,
- foreign_cache_guid(), "-2");
- SyncShareNudge();
-}
-
-// Don't crash when this occurs.
-TEST_F(SyncerTest, UpdateWhereParentIsNotAFolder) {
- mock_server_->AddUpdateBookmark(1, 0, "B", 10, 10,
- foreign_cache_guid(), "-1");
- mock_server_->AddUpdateDirectory(2, 1, "BookmarkParent", 10, 10,
- foreign_cache_guid(), "-2");
- // Used to cause a CHECK
- SyncShareNudge();
- {
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
- Entry good_entry(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(good_entry.good());
- EXPECT_FALSE(good_entry.GetIsUnappliedUpdate());
- Entry bad_parent(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(bad_parent.good());
- EXPECT_TRUE(bad_parent.GetIsUnappliedUpdate());
- }
-}
-
-TEST_F(SyncerTest, DirectoryUpdateTest) {
- Id in_root_id = ids_.NewServerId();
- Id in_in_root_id = ids_.NewServerId();
-
- mock_server_->AddUpdateDirectory(in_root_id, TestIdFactory::root(),
- "in_root_name", 2, 2,
- foreign_cache_guid(), "-1");
- mock_server_->AddUpdateDirectory(in_in_root_id, in_root_id,
- "in_in_root_name", 3, 3,
- foreign_cache_guid(), "-2");
- SyncShareNudge();
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry in_root(&trans, GET_BY_ID, in_root_id);
- ASSERT_TRUE(in_root.good());
- EXPECT_EQ("in_root_name", in_root.GetNonUniqueName());
- EXPECT_EQ(TestIdFactory::root(), in_root.GetParentId());
-
- Entry in_in_root(&trans, GET_BY_ID, in_in_root_id);
- ASSERT_TRUE(in_in_root.good());
- EXPECT_EQ("in_in_root_name", in_in_root.GetNonUniqueName());
- EXPECT_EQ(in_root_id, in_in_root.GetParentId());
- }
-}
-
-TEST_F(SyncerTest, DirectoryCommitTest) {
- syncable::Id in_root_id, in_dir_id;
- int64 foo_metahandle;
- int64 bar_metahandle;
-
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry parent(&wtrans, CREATE, BOOKMARKS, root_id_, "foo");
- ASSERT_TRUE(parent.good());
- parent.PutIsUnsynced(true);
- parent.PutIsDir(true);
- parent.PutSpecifics(DefaultBookmarkSpecifics());
- in_root_id = parent.GetId();
- foo_metahandle = parent.GetMetahandle();
-
- MutableEntry child(&wtrans, CREATE, BOOKMARKS, parent.GetId(), "bar");
- ASSERT_TRUE(child.good());
- child.PutIsUnsynced(true);
- child.PutIsDir(true);
- child.PutSpecifics(DefaultBookmarkSpecifics());
- bar_metahandle = child.GetMetahandle();
- in_dir_id = parent.GetId();
- }
- SyncShareNudge();
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry fail_by_old_id_entry(&trans, GET_BY_ID, in_root_id);
- ASSERT_FALSE(fail_by_old_id_entry.good());
-
- Entry foo_entry(&trans, GET_BY_HANDLE, foo_metahandle);
- ASSERT_TRUE(foo_entry.good());
- EXPECT_EQ("foo", foo_entry.GetNonUniqueName());
- EXPECT_NE(foo_entry.GetId(), in_root_id);
-
- Entry bar_entry(&trans, GET_BY_HANDLE, bar_metahandle);
- ASSERT_TRUE(bar_entry.good());
- EXPECT_EQ("bar", bar_entry.GetNonUniqueName());
- EXPECT_NE(bar_entry.GetId(), in_dir_id);
- EXPECT_EQ(foo_entry.GetId(), bar_entry.GetParentId());
- }
-}
-
-TEST_F(SyncerTest, TestClientCommandDuringUpdate) {
- using sync_pb::ClientCommand;
-
- ClientCommand* command = new ClientCommand();
- command->set_set_sync_poll_interval(8);
- command->set_set_sync_long_poll_interval(800);
- command->set_sessions_commit_delay_seconds(3141);
- command->set_client_invalidation_hint_buffer_size(11);
- mock_server_->AddUpdateDirectory(1, 0, "in_root", 1, 1,
- foreign_cache_guid(), "-1");
- mock_server_->SetGUClientCommand(command);
- SyncShareNudge();
-
- EXPECT_TRUE(TimeDelta::FromSeconds(8) ==
- last_short_poll_interval_received_);
- EXPECT_TRUE(TimeDelta::FromSeconds(800) ==
- last_long_poll_interval_received_);
- EXPECT_TRUE(TimeDelta::FromSeconds(3141) ==
- last_sessions_commit_delay_seconds_);
- EXPECT_EQ(11, last_client_invalidation_hint_buffer_size_);
-
- command = new ClientCommand();
- command->set_set_sync_poll_interval(180);
- command->set_set_sync_long_poll_interval(190);
- command->set_sessions_commit_delay_seconds(2718);
- command->set_client_invalidation_hint_buffer_size(9);
- mock_server_->AddUpdateDirectory(1, 0, "in_root", 1, 1,
- foreign_cache_guid(), "-1");
- mock_server_->SetGUClientCommand(command);
- SyncShareNudge();
-
- EXPECT_TRUE(TimeDelta::FromSeconds(180) ==
- last_short_poll_interval_received_);
- EXPECT_TRUE(TimeDelta::FromSeconds(190) ==
- last_long_poll_interval_received_);
- EXPECT_TRUE(TimeDelta::FromSeconds(2718) ==
- last_sessions_commit_delay_seconds_);
- EXPECT_EQ(9, last_client_invalidation_hint_buffer_size_);
-}
-
-TEST_F(SyncerTest, TestClientCommandDuringCommit) {
- using sync_pb::ClientCommand;
-
- ClientCommand* command = new ClientCommand();
- command->set_set_sync_poll_interval(8);
- command->set_set_sync_long_poll_interval(800);
- command->set_sessions_commit_delay_seconds(3141);
- command->set_client_invalidation_hint_buffer_size(11);
- CreateUnsyncedDirectory("X", "id_X");
- mock_server_->SetCommitClientCommand(command);
- SyncShareNudge();
-
- EXPECT_TRUE(TimeDelta::FromSeconds(8) ==
- last_short_poll_interval_received_);
- EXPECT_TRUE(TimeDelta::FromSeconds(800) ==
- last_long_poll_interval_received_);
- EXPECT_TRUE(TimeDelta::FromSeconds(3141) ==
- last_sessions_commit_delay_seconds_);
- EXPECT_EQ(11, last_client_invalidation_hint_buffer_size_);
-
- command = new ClientCommand();
- command->set_set_sync_poll_interval(180);
- command->set_set_sync_long_poll_interval(190);
- command->set_sessions_commit_delay_seconds(2718);
- command->set_client_invalidation_hint_buffer_size(9);
- CreateUnsyncedDirectory("Y", "id_Y");
- mock_server_->SetCommitClientCommand(command);
- SyncShareNudge();
-
- EXPECT_TRUE(TimeDelta::FromSeconds(180) ==
- last_short_poll_interval_received_);
- EXPECT_TRUE(TimeDelta::FromSeconds(190) ==
- last_long_poll_interval_received_);
- EXPECT_TRUE(TimeDelta::FromSeconds(2718) ==
- last_sessions_commit_delay_seconds_);
- EXPECT_EQ(9, last_client_invalidation_hint_buffer_size_);
-}
-
-TEST_F(SyncerTest, EnsureWeSendUpOldParent) {
- syncable::Id folder_one_id = ids_.FromNumber(1);
- syncable::Id folder_two_id = ids_.FromNumber(2);
-
- mock_server_->AddUpdateDirectory(folder_one_id, TestIdFactory::root(),
- "folder_one", 1, 1, foreign_cache_guid(), "-1");
- mock_server_->AddUpdateDirectory(folder_two_id, TestIdFactory::root(),
- "folder_two", 1, 1, foreign_cache_guid(), "-2");
- SyncShareNudge();
- {
- // A moved entry should send an "old parent."
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&trans, GET_BY_ID, folder_one_id);
- ASSERT_TRUE(entry.good());
- entry.PutParentId(folder_two_id);
- entry.PutIsUnsynced(true);
- // A new entry should send no "old parent."
- MutableEntry create(
- &trans, CREATE, BOOKMARKS, trans.root_id(), "new_folder");
- create.PutIsUnsynced(true);
- create.PutSpecifics(DefaultBookmarkSpecifics());
- }
- SyncShareNudge();
- const sync_pb::CommitMessage& commit = mock_server_->last_sent_commit();
- ASSERT_EQ(2, commit.entries_size());
- EXPECT_TRUE(commit.entries(0).parent_id_string() == "2");
- EXPECT_TRUE(commit.entries(0).old_parent_id() == "0");
- EXPECT_FALSE(commit.entries(1).has_old_parent_id());
-}
-
-TEST_F(SyncerTest, Test64BitVersionSupport) {
- int64 really_big_int = std::numeric_limits<int64>::max() - 12;
- const string name("ringo's dang orang ran rings around my o-ring");
- int64 item_metahandle;
-
- // Try writing max int64 to the version fields of a meta entry.
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), name);
- ASSERT_TRUE(entry.good());
- entry.PutBaseVersion(really_big_int);
- entry.PutServerVersion(really_big_int);
- entry.PutId(ids_.NewServerId());
- item_metahandle = entry.GetMetahandle();
- }
- // Now read it back out and make sure the value is max int64.
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
- Entry entry(&rtrans, syncable::GET_BY_HANDLE, item_metahandle);
- ASSERT_TRUE(entry.good());
- EXPECT_TRUE(really_big_int == entry.GetBaseVersion());
-}
-
-TEST_F(SyncerTest, TestSimpleUndelete) {
- Id id = ids_.MakeServer("undeletion item"), root = TestIdFactory::root();
- mock_server_->set_conflict_all_commits(true);
- // Let there be an entry from the server.
- mock_server_->AddUpdateBookmark(id, root, "foo", 1, 10,
- foreign_cache_guid(), "-1");
- SyncShareNudge();
- // Check it out and delete it.
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&wtrans, GET_BY_ID, id);
- ASSERT_TRUE(entry.good());
- EXPECT_FALSE(entry.GetIsUnappliedUpdate());
- EXPECT_FALSE(entry.GetIsUnsynced());
- EXPECT_FALSE(entry.GetIsDel());
- // Delete it locally.
- entry.PutIsDel(true);
- }
- SyncShareNudge();
- // Confirm we see IS_DEL and not SERVER_IS_DEL.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_ID, id);
- ASSERT_TRUE(entry.good());
- EXPECT_FALSE(entry.GetIsUnappliedUpdate());
- EXPECT_FALSE(entry.GetIsUnsynced());
- EXPECT_TRUE(entry.GetIsDel());
- EXPECT_FALSE(entry.GetServerIsDel());
- }
- SyncShareNudge();
- // Update from server confirming deletion.
- mock_server_->AddUpdateBookmark(id, root, "foo", 2, 11,
- foreign_cache_guid(), "-1");
- mock_server_->SetLastUpdateDeleted();
- SyncShareNudge();
- // IS_DEL AND SERVER_IS_DEL now both true.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_ID, id);
- ASSERT_TRUE(entry.good());
- EXPECT_FALSE(entry.GetIsUnappliedUpdate());
- EXPECT_FALSE(entry.GetIsUnsynced());
- EXPECT_TRUE(entry.GetIsDel());
- EXPECT_TRUE(entry.GetServerIsDel());
- }
- // Undelete from server.
- mock_server_->AddUpdateBookmark(id, root, "foo", 2, 12,
- foreign_cache_guid(), "-1");
- SyncShareNudge();
- // IS_DEL and SERVER_IS_DEL now both false.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_ID, id);
- ASSERT_TRUE(entry.good());
- EXPECT_FALSE(entry.GetIsUnappliedUpdate());
- EXPECT_FALSE(entry.GetIsUnsynced());
- EXPECT_FALSE(entry.GetIsDel());
- EXPECT_FALSE(entry.GetServerIsDel());
- }
-}
-
-TEST_F(SyncerTest, TestUndeleteWithMissingDeleteUpdate) {
- Id id = ids_.MakeServer("undeletion item"), root = TestIdFactory::root();
- // Let there be a entry, from the server.
- mock_server_->set_conflict_all_commits(true);
- mock_server_->AddUpdateBookmark(id, root, "foo", 1, 10,
- foreign_cache_guid(), "-1");
- SyncShareNudge();
- // Check it out and delete it.
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&wtrans, GET_BY_ID, id);
- ASSERT_TRUE(entry.good());
- EXPECT_FALSE(entry.GetIsUnappliedUpdate());
- EXPECT_FALSE(entry.GetIsUnsynced());
- EXPECT_FALSE(entry.GetIsDel());
- // Delete it locally.
- entry.PutIsDel(true);
- }
- SyncShareNudge();
- // Confirm we see IS_DEL and not SERVER_IS_DEL.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_ID, id);
- ASSERT_TRUE(entry.good());
- EXPECT_FALSE(entry.GetIsUnappliedUpdate());
- EXPECT_FALSE(entry.GetIsUnsynced());
- EXPECT_TRUE(entry.GetIsDel());
- EXPECT_FALSE(entry.GetServerIsDel());
- }
- SyncShareNudge();
- // Say we do not get an update from server confirming deletion. Undelete
- // from server
- mock_server_->AddUpdateBookmark(id, root, "foo", 2, 12,
- foreign_cache_guid(), "-1");
- SyncShareNudge();
- // IS_DEL and SERVER_IS_DEL now both false.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_ID, id);
- ASSERT_TRUE(entry.good());
- EXPECT_FALSE(entry.GetIsUnappliedUpdate());
- EXPECT_FALSE(entry.GetIsUnsynced());
- EXPECT_FALSE(entry.GetIsDel());
- EXPECT_FALSE(entry.GetServerIsDel());
- }
-}
-
-TEST_F(SyncerTest, TestUndeleteIgnoreCorrectlyUnappliedUpdate) {
- Id id1 = ids_.MakeServer("first"), id2 = ids_.MakeServer("second");
- Id root = TestIdFactory::root();
- // Duplicate! expect path clashing!
- mock_server_->set_conflict_all_commits(true);
- mock_server_->AddUpdateBookmark(id1, root, "foo", 1, 10,
- foreign_cache_guid(), "-1");
- mock_server_->AddUpdateBookmark(id2, root, "foo", 1, 10,
- foreign_cache_guid(), "-2");
- SyncShareNudge();
- mock_server_->AddUpdateBookmark(id2, root, "foo2", 2, 20,
- foreign_cache_guid(), "-2");
- SyncShareNudge(); // Now just don't explode.
-}
-
-TEST_F(SyncerTest, ClientTagServerCreatedUpdatesWork) {
- mock_server_->AddUpdateDirectory(1, 0, "permitem1", 1, 10,
- foreign_cache_guid(), "-1");
- mock_server_->SetLastUpdateClientTag("permfolder");
-
- SyncShareNudge();
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry perm_folder(&trans, GET_BY_CLIENT_TAG, "permfolder");
- ASSERT_TRUE(perm_folder.good());
- EXPECT_FALSE(perm_folder.GetIsDel());
- EXPECT_FALSE(perm_folder.GetIsUnappliedUpdate());
- EXPECT_FALSE(perm_folder.GetIsUnsynced());
- EXPECT_EQ(perm_folder.GetUniqueClientTag(), "permfolder");
- EXPECT_EQ(perm_folder.GetNonUniqueName(), "permitem1");
- }
-
- mock_server_->AddUpdateDirectory(1, 0, "permitem_renamed", 10, 100,
- foreign_cache_guid(), "-1");
- mock_server_->SetLastUpdateClientTag("permfolder");
- SyncShareNudge();
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- Entry perm_folder(&trans, GET_BY_CLIENT_TAG, "permfolder");
- ASSERT_TRUE(perm_folder.good());
- EXPECT_FALSE(perm_folder.GetIsDel());
- EXPECT_FALSE(perm_folder.GetIsUnappliedUpdate());
- EXPECT_FALSE(perm_folder.GetIsUnsynced());
- EXPECT_EQ(perm_folder.GetUniqueClientTag(), "permfolder");
- EXPECT_EQ(perm_folder.GetNonUniqueName(), "permitem_renamed");
- }
-}
-
-TEST_F(SyncerTest, ClientTagIllegalUpdateIgnored) {
- mock_server_->AddUpdateDirectory(1, 0, "permitem1", 1, 10,
- foreign_cache_guid(), "-1");
- mock_server_->SetLastUpdateClientTag("permfolder");
-
- SyncShareNudge();
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry perm_folder(&trans, GET_BY_CLIENT_TAG, "permfolder");
- ASSERT_TRUE(perm_folder.good());
- EXPECT_FALSE(perm_folder.GetIsUnappliedUpdate());
- EXPECT_FALSE(perm_folder.GetIsUnsynced());
- EXPECT_EQ(perm_folder.GetUniqueClientTag(), "permfolder");
- EXPECT_TRUE(perm_folder.GetNonUniqueName()== "permitem1");
- EXPECT_TRUE(perm_folder.GetId().ServerKnows());
- }
-
- mock_server_->AddUpdateDirectory(1, 0, "permitem_renamed", 10, 100,
- foreign_cache_guid(), "-1");
- mock_server_->SetLastUpdateClientTag("wrongtag");
- SyncShareNudge();
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- // This update is rejected because it has the same ID, but a
- // different tag than one that is already on the client.
- // The client has a ServerKnows ID, which cannot be overwritten.
- Entry rejected_update(&trans, GET_BY_CLIENT_TAG, "wrongtag");
- EXPECT_FALSE(rejected_update.good());
-
- Entry perm_folder(&trans, GET_BY_CLIENT_TAG, "permfolder");
- ASSERT_TRUE(perm_folder.good());
- EXPECT_FALSE(perm_folder.GetIsUnappliedUpdate());
- EXPECT_FALSE(perm_folder.GetIsUnsynced());
- EXPECT_EQ(perm_folder.GetNonUniqueName(), "permitem1");
- }
-}
-
-TEST_F(SyncerTest, ClientTagUncommittedTagMatchesUpdate) {
- int64 original_metahandle = 0;
-
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry pref(
- &trans, CREATE, PREFERENCES, ids_.root(), "name");
- ASSERT_TRUE(pref.good());
- pref.PutUniqueClientTag("tag");
- pref.PutIsUnsynced(true);
- EXPECT_FALSE(pref.GetIsUnappliedUpdate());
- EXPECT_FALSE(pref.GetId().ServerKnows());
- original_metahandle = pref.GetMetahandle();
- }
-
- syncable::Id server_id = TestIdFactory::MakeServer("id");
- mock_server_->AddUpdatePref(server_id.GetServerId(),
- ids_.root().GetServerId(),
- "tag", 10, 100);
- mock_server_->set_conflict_all_commits(true);
-
- SyncShareNudge();
- // This should cause client tag reunion, preserving the metahandle.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- Entry pref(&trans, GET_BY_CLIENT_TAG, "tag");
- ASSERT_TRUE(pref.good());
- EXPECT_FALSE(pref.GetIsDel());
- EXPECT_FALSE(pref.GetIsUnappliedUpdate());
- EXPECT_TRUE(pref.GetIsUnsynced());
- EXPECT_EQ(10, pref.GetBaseVersion());
- // Entry should have been given the new ID while preserving the
- // metahandle; client should have won the conflict resolution.
- EXPECT_EQ(original_metahandle, pref.GetMetahandle());
- EXPECT_EQ("tag", pref.GetUniqueClientTag());
- EXPECT_TRUE(pref.GetId().ServerKnows());
- }
-
- mock_server_->set_conflict_all_commits(false);
- SyncShareNudge();
-
- // The resolved entry ought to commit cleanly.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- Entry pref(&trans, GET_BY_CLIENT_TAG, "tag");
- ASSERT_TRUE(pref.good());
- EXPECT_FALSE(pref.GetIsDel());
- EXPECT_FALSE(pref.GetIsUnappliedUpdate());
- EXPECT_FALSE(pref.GetIsUnsynced());
- EXPECT_TRUE(10 < pref.GetBaseVersion());
- // Entry should have been given the new ID while preserving the
- // metahandle; client should have won the conflict resolution.
- EXPECT_EQ(original_metahandle, pref.GetMetahandle());
- EXPECT_EQ("tag", pref.GetUniqueClientTag());
- EXPECT_TRUE(pref.GetId().ServerKnows());
- }
-}
-
-TEST_F(SyncerTest, ClientTagConflictWithDeletedLocalEntry) {
- {
- // Create a deleted local entry with a unique client tag.
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry pref(
- &trans, CREATE, PREFERENCES, ids_.root(), "name");
- ASSERT_TRUE(pref.good());
- ASSERT_FALSE(pref.GetId().ServerKnows());
- pref.PutUniqueClientTag("tag");
- pref.PutIsUnsynced(true);
-
- // Note: IS_DEL && !ServerKnows() will clear the UNSYNCED bit.
- // (We never attempt to commit server-unknown deleted items, so this
- // helps us clean up those entries).
- pref.PutIsDel(true);
- }
-
- // Prepare an update with the same unique client tag.
- syncable::Id server_id = TestIdFactory::MakeServer("id");
- mock_server_->AddUpdatePref(server_id.GetServerId(),
- ids_.root().GetServerId(),
- "tag", 10, 100);
-
- SyncShareNudge();
- // The local entry will be overwritten.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- Entry pref(&trans, GET_BY_CLIENT_TAG, "tag");
- ASSERT_TRUE(pref.good());
- ASSERT_TRUE(pref.GetId().ServerKnows());
- EXPECT_FALSE(pref.GetIsDel());
- EXPECT_FALSE(pref.GetIsUnappliedUpdate());
- EXPECT_FALSE(pref.GetIsUnsynced());
- EXPECT_EQ(pref.GetBaseVersion(), 10);
- EXPECT_EQ(pref.GetUniqueClientTag(), "tag");
- }
-}
-
-TEST_F(SyncerTest, ClientTagUpdateClashesWithLocalEntry) {
- // This test is written assuming that ID comparison
- // will work out in a particular way.
- EXPECT_TRUE(ids_.FromNumber(1) < ids_.FromNumber(2));
- EXPECT_TRUE(ids_.FromNumber(3) < ids_.FromNumber(4));
-
- syncable::Id id1 = TestIdFactory::MakeServer("1");
- mock_server_->AddUpdatePref(id1.GetServerId(), ids_.root().GetServerId(),
- "tag1", 10, 100);
-
- syncable::Id id4 = TestIdFactory::MakeServer("4");
- mock_server_->AddUpdatePref(id4.GetServerId(), ids_.root().GetServerId(),
- "tag2", 11, 110);
-
- mock_server_->set_conflict_all_commits(true);
-
- SyncShareNudge();
- int64 tag1_metahandle = syncable::kInvalidMetaHandle;
- int64 tag2_metahandle = syncable::kInvalidMetaHandle;
- // This should cause client tag overwrite.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- Entry tag1(&trans, GET_BY_CLIENT_TAG, "tag1");
- ASSERT_TRUE(tag1.good());
- ASSERT_TRUE(tag1.GetId().ServerKnows());
- ASSERT_TRUE(id1 == tag1.GetId());
- EXPECT_FALSE(tag1.GetIsDel());
- EXPECT_FALSE(tag1.GetIsUnappliedUpdate());
- EXPECT_FALSE(tag1.GetIsUnsynced());
- EXPECT_EQ(10, tag1.GetBaseVersion());
- EXPECT_EQ("tag1", tag1.GetUniqueClientTag());
- tag1_metahandle = tag1.GetMetahandle();
-
- Entry tag2(&trans, GET_BY_CLIENT_TAG, "tag2");
- ASSERT_TRUE(tag2.good());
- ASSERT_TRUE(tag2.GetId().ServerKnows());
- ASSERT_TRUE(id4 == tag2.GetId());
- EXPECT_FALSE(tag2.GetIsDel());
- EXPECT_FALSE(tag2.GetIsUnappliedUpdate());
- EXPECT_FALSE(tag2.GetIsUnsynced());
- EXPECT_EQ(11, tag2.GetBaseVersion());
- EXPECT_EQ("tag2", tag2.GetUniqueClientTag());
- tag2_metahandle = tag2.GetMetahandle();
-
- syncable::Directory::Metahandles children;
- directory()->GetChildHandlesById(&trans, trans.root_id(), &children);
- ASSERT_EQ(2U, children.size());
- }
-
- syncable::Id id2 = TestIdFactory::MakeServer("2");
- mock_server_->AddUpdatePref(id2.GetServerId(), ids_.root().GetServerId(),
- "tag1", 12, 120);
- syncable::Id id3 = TestIdFactory::MakeServer("3");
- mock_server_->AddUpdatePref(id3.GetServerId(), ids_.root().GetServerId(),
- "tag2", 13, 130);
- SyncShareNudge();
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- Entry tag1(&trans, GET_BY_CLIENT_TAG, "tag1");
- ASSERT_TRUE(tag1.good());
- ASSERT_TRUE(tag1.GetId().ServerKnows());
- ASSERT_EQ(id1, tag1.GetId())
- << "ID 1 should be kept, since it was less than ID 2.";
- EXPECT_FALSE(tag1.GetIsDel());
- EXPECT_FALSE(tag1.GetIsUnappliedUpdate());
- EXPECT_FALSE(tag1.GetIsUnsynced());
- EXPECT_EQ(10, tag1.GetBaseVersion());
- EXPECT_EQ("tag1", tag1.GetUniqueClientTag());
- EXPECT_EQ(tag1_metahandle, tag1.GetMetahandle());
-
- Entry tag2(&trans, GET_BY_CLIENT_TAG, "tag2");
- ASSERT_TRUE(tag2.good());
- ASSERT_TRUE(tag2.GetId().ServerKnows());
- ASSERT_EQ(id3, tag2.GetId())
- << "ID 3 should be kept, since it was less than ID 4.";
- EXPECT_FALSE(tag2.GetIsDel());
- EXPECT_FALSE(tag2.GetIsUnappliedUpdate());
- EXPECT_FALSE(tag2.GetIsUnsynced());
- EXPECT_EQ(13, tag2.GetBaseVersion());
- EXPECT_EQ("tag2", tag2.GetUniqueClientTag());
- EXPECT_EQ(tag2_metahandle, tag2.GetMetahandle());
-
- syncable::Directory::Metahandles children;
- directory()->GetChildHandlesById(&trans, trans.root_id(), &children);
- ASSERT_EQ(2U, children.size());
- }
-}
-
-TEST_F(SyncerTest, ClientTagClashWithinBatchOfUpdates) {
- // This test is written assuming that ID comparison
- // will work out in a particular way.
- EXPECT_TRUE(ids_.FromNumber(1) < ids_.FromNumber(4));
- EXPECT_TRUE(ids_.FromNumber(201) < ids_.FromNumber(205));
-
- // Least ID: winner.
- mock_server_->AddUpdatePref(ids_.FromNumber(1).GetServerId(),
- ids_.root().GetServerId(), "tag a", 1, 10);
- mock_server_->AddUpdatePref(ids_.FromNumber(2).GetServerId(),
- ids_.root().GetServerId(), "tag a", 11, 110);
- mock_server_->AddUpdatePref(ids_.FromNumber(3).GetServerId(),
- ids_.root().GetServerId(), "tag a", 12, 120);
- mock_server_->AddUpdatePref(ids_.FromNumber(4).GetServerId(),
- ids_.root().GetServerId(), "tag a", 13, 130);
-
- mock_server_->AddUpdatePref(ids_.FromNumber(105).GetServerId(),
- ids_.root().GetServerId(), "tag b", 14, 140);
- mock_server_->AddUpdatePref(ids_.FromNumber(102).GetServerId(),
- ids_.root().GetServerId(), "tag b", 15, 150);
- // Least ID: winner.
- mock_server_->AddUpdatePref(ids_.FromNumber(101).GetServerId(),
- ids_.root().GetServerId(), "tag b", 16, 160);
- mock_server_->AddUpdatePref(ids_.FromNumber(104).GetServerId(),
- ids_.root().GetServerId(), "tag b", 17, 170);
-
- mock_server_->AddUpdatePref(ids_.FromNumber(205).GetServerId(),
- ids_.root().GetServerId(), "tag c", 18, 180);
- mock_server_->AddUpdatePref(ids_.FromNumber(202).GetServerId(),
- ids_.root().GetServerId(), "tag c", 19, 190);
- mock_server_->AddUpdatePref(ids_.FromNumber(204).GetServerId(),
- ids_.root().GetServerId(), "tag c", 20, 200);
- // Least ID: winner.
- mock_server_->AddUpdatePref(ids_.FromNumber(201).GetServerId(),
- ids_.root().GetServerId(), "tag c", 21, 210);
-
- mock_server_->set_conflict_all_commits(true);
-
- SyncShareNudge();
- // This should cause client tag overwrite.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- Entry tag_a(&trans, GET_BY_CLIENT_TAG, "tag a");
- ASSERT_TRUE(tag_a.good());
- EXPECT_TRUE(tag_a.GetId().ServerKnows());
- EXPECT_EQ(ids_.FromNumber(1), tag_a.GetId());
- EXPECT_FALSE(tag_a.GetIsDel());
- EXPECT_FALSE(tag_a.GetIsUnappliedUpdate());
- EXPECT_FALSE(tag_a.GetIsUnsynced());
- EXPECT_EQ(1, tag_a.GetBaseVersion());
- EXPECT_EQ("tag a", tag_a.GetUniqueClientTag());
-
- Entry tag_b(&trans, GET_BY_CLIENT_TAG, "tag b");
- ASSERT_TRUE(tag_b.good());
- EXPECT_TRUE(tag_b.GetId().ServerKnows());
- EXPECT_EQ(ids_.FromNumber(101), tag_b.GetId());
- EXPECT_FALSE(tag_b.GetIsDel());
- EXPECT_FALSE(tag_b.GetIsUnappliedUpdate());
- EXPECT_FALSE(tag_b.GetIsUnsynced());
- EXPECT_EQ(16, tag_b.GetBaseVersion());
- EXPECT_EQ("tag b", tag_b.GetUniqueClientTag());
-
- Entry tag_c(&trans, GET_BY_CLIENT_TAG, "tag c");
- ASSERT_TRUE(tag_c.good());
- EXPECT_TRUE(tag_c.GetId().ServerKnows());
- EXPECT_EQ(ids_.FromNumber(201), tag_c.GetId());
- EXPECT_FALSE(tag_c.GetIsDel());
- EXPECT_FALSE(tag_c.GetIsUnappliedUpdate());
- EXPECT_FALSE(tag_c.GetIsUnsynced());
- EXPECT_EQ(21, tag_c.GetBaseVersion());
- EXPECT_EQ("tag c", tag_c.GetUniqueClientTag());
-
- syncable::Directory::Metahandles children;
- directory()->GetChildHandlesById(&trans, trans.root_id(), &children);
- ASSERT_EQ(3U, children.size());
- }
-}
-
-TEST_F(SyncerTest, UniqueServerTagUpdates) {
- // As a hurdle, introduce an item whose name is the same as the tag value
- // we'll use later.
- int64 hurdle_handle = CreateUnsyncedDirectory("bob", "id_bob");
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry hurdle(&trans, GET_BY_HANDLE, hurdle_handle);
- ASSERT_TRUE(hurdle.good());
- ASSERT_TRUE(!hurdle.GetIsDel());
- ASSERT_TRUE(hurdle.GetUniqueServerTag().empty());
- ASSERT_TRUE(hurdle.GetNonUniqueName()== "bob");
-
- // Try to lookup by the tagname. These should fail.
- Entry tag_alpha(&trans, GET_BY_SERVER_TAG, "alpha");
- EXPECT_FALSE(tag_alpha.good());
- Entry tag_bob(&trans, GET_BY_SERVER_TAG, "bob");
- EXPECT_FALSE(tag_bob.good());
- }
-
- // Now download some tagged items as updates.
- mock_server_->AddUpdateDirectory(
- 1, 0, "update1", 1, 10, std::string(), std::string());
- mock_server_->SetLastUpdateServerTag("alpha");
- mock_server_->AddUpdateDirectory(
- 2, 0, "update2", 2, 20, std::string(), std::string());
- mock_server_->SetLastUpdateServerTag("bob");
- SyncShareNudge();
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- // The new items should be applied as new entries, and we should be able
- // to look them up by their tag values.
- Entry tag_alpha(&trans, GET_BY_SERVER_TAG, "alpha");
- ASSERT_TRUE(tag_alpha.good());
- ASSERT_TRUE(!tag_alpha.GetIsDel());
- ASSERT_TRUE(tag_alpha.GetUniqueServerTag()== "alpha");
- ASSERT_TRUE(tag_alpha.GetNonUniqueName()== "update1");
- Entry tag_bob(&trans, GET_BY_SERVER_TAG, "bob");
- ASSERT_TRUE(tag_bob.good());
- ASSERT_TRUE(!tag_bob.GetIsDel());
- ASSERT_TRUE(tag_bob.GetUniqueServerTag()== "bob");
- ASSERT_TRUE(tag_bob.GetNonUniqueName()== "update2");
- // The old item should be unchanged.
- Entry hurdle(&trans, GET_BY_HANDLE, hurdle_handle);
- ASSERT_TRUE(hurdle.good());
- ASSERT_TRUE(!hurdle.GetIsDel());
- ASSERT_TRUE(hurdle.GetUniqueServerTag().empty());
- ASSERT_TRUE(hurdle.GetNonUniqueName()== "bob");
- }
-}
-
-TEST_F(SyncerTest, GetUpdatesSetsRequestedTypes) {
- // The expectations of this test happen in the MockConnectionManager's
- // GetUpdates handler. EnableDatatype sets the expectation value from our
- // set of enabled/disabled datatypes.
- EnableDatatype(BOOKMARKS);
- SyncShareNudge();
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
-
- EnableDatatype(AUTOFILL);
- SyncShareNudge();
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
-
- EnableDatatype(PREFERENCES);
- SyncShareNudge();
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
-
- DisableDatatype(BOOKMARKS);
- SyncShareNudge();
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
-
- DisableDatatype(AUTOFILL);
- SyncShareNudge();
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
-
- DisableDatatype(PREFERENCES);
- EnableDatatype(AUTOFILL);
- SyncShareNudge();
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
-}
-
-// A typical scenario: server and client each have one update for the other.
-// This is the "happy path" alternative to UpdateFailsThenDontCommit.
-TEST_F(SyncerTest, UpdateThenCommit) {
- syncable::Id to_receive = ids_.NewServerId();
- syncable::Id to_commit = ids_.NewLocalId();
-
- mock_server_->AddUpdateDirectory(to_receive, ids_.root(), "x", 1, 10,
- foreign_cache_guid(), "-1");
- int64 commit_handle = CreateUnsyncedDirectory("y", to_commit);
- SyncShareNudge();
-
- // The sync cycle should have included a GetUpdate, then a commit. By the
- // time the commit happened, we should have known for sure that there were no
- // hierarchy conflicts, and reported this fact to the server.
- ASSERT_TRUE(mock_server_->last_request().has_commit());
- VerifyNoHierarchyConflictsReported(mock_server_->last_request());
-
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- Entry received(&trans, GET_BY_ID, to_receive);
- ASSERT_TRUE(received.good());
- EXPECT_FALSE(received.GetIsUnsynced());
- EXPECT_FALSE(received.GetIsUnappliedUpdate());
-
- Entry committed(&trans, GET_BY_HANDLE, commit_handle);
- ASSERT_TRUE(committed.good());
- EXPECT_FALSE(committed.GetIsUnsynced());
- EXPECT_FALSE(committed.GetIsUnappliedUpdate());
-}
-
-// Same as above, but this time we fail to download updates.
-// We should not attempt to commit anything unless we successfully downloaded
-// updates, otherwise we risk causing a server-side conflict.
-TEST_F(SyncerTest, UpdateFailsThenDontCommit) {
- syncable::Id to_receive = ids_.NewServerId();
- syncable::Id to_commit = ids_.NewLocalId();
-
- mock_server_->AddUpdateDirectory(to_receive, ids_.root(), "x", 1, 10,
- foreign_cache_guid(), "-1");
- int64 commit_handle = CreateUnsyncedDirectory("y", to_commit);
- mock_server_->FailNextPostBufferToPathCall();
- SyncShareNudge();
-
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- // We did not receive this update.
- Entry received(&trans, GET_BY_ID, to_receive);
- ASSERT_FALSE(received.good());
-
- // And our local update remains unapplied.
- Entry committed(&trans, GET_BY_HANDLE, commit_handle);
- ASSERT_TRUE(committed.good());
- EXPECT_TRUE(committed.GetIsUnsynced());
- EXPECT_FALSE(committed.GetIsUnappliedUpdate());
-
- // Inform the Mock we won't be fetching all updates.
- mock_server_->ClearUpdatesQueue();
-}
-
-// Downloads two updates and applies them successfully.
-// This is the "happy path" alternative to ConfigureFailsDontApplyUpdates.
-TEST_F(SyncerTest, ConfigureDownloadsTwoBatchesSuccess) {
- syncable::Id node1 = ids_.NewServerId();
- syncable::Id node2 = ids_.NewServerId();
-
- // Construct the first GetUpdates response.
- mock_server_->AddUpdateDirectory(node1, ids_.root(), "one", 1, 10,
- foreign_cache_guid(), "-2");
- mock_server_->SetChangesRemaining(1);
- mock_server_->NextUpdateBatch();
-
- // Construct the second GetUpdates response.
- mock_server_->AddUpdateDirectory(node2, ids_.root(), "two", 1, 20,
- foreign_cache_guid(), "-2");
-
- SyncShareConfigure();
-
- syncable::ReadTransaction trans(FROM_HERE, directory());
- // Both nodes should be downloaded and applied.
-
- Entry n1(&trans, GET_BY_ID, node1);
- ASSERT_TRUE(n1.good());
- EXPECT_FALSE(n1.GetIsUnappliedUpdate());
-
- Entry n2(&trans, GET_BY_ID, node2);
- ASSERT_TRUE(n2.good());
- EXPECT_FALSE(n2.GetIsUnappliedUpdate());
-}
-
-// Same as the above case, but this time the second batch fails to download.
-TEST_F(SyncerTest, ConfigureFailsDontApplyUpdates) {
- syncable::Id node1 = ids_.NewServerId();
- syncable::Id node2 = ids_.NewServerId();
-
- // The scenario: we have two batches of updates with one update each. A
- // normal confgure step would download all the updates one batch at a time and
- // apply them. This configure will succeed in downloading the first batch
- // then fail when downloading the second.
- mock_server_->FailNthPostBufferToPathCall(2);
-
- // Construct the first GetUpdates response.
- mock_server_->AddUpdateDirectory(node1, ids_.root(), "one", 1, 10,
- foreign_cache_guid(), "-1");
- mock_server_->SetChangesRemaining(1);
- mock_server_->NextUpdateBatch();
-
- // Consutrct the second GetUpdates response.
- mock_server_->AddUpdateDirectory(node2, ids_.root(), "two", 1, 20,
- foreign_cache_guid(), "-2");
-
- SyncShareConfigure();
-
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- // The first node was downloaded, but not applied.
- Entry n1(&trans, GET_BY_ID, node1);
- ASSERT_TRUE(n1.good());
- EXPECT_TRUE(n1.GetIsUnappliedUpdate());
-
- // The second node was not downloaded.
- Entry n2(&trans, GET_BY_ID, node2);
- EXPECT_FALSE(n2.good());
-
- // One update remains undownloaded.
- mock_server_->ClearUpdatesQueue();
-}
-
-TEST_F(SyncerTest, GetKeySuccess) {
- {
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
- EXPECT_TRUE(directory()->GetNigoriHandler()->NeedKeystoreKey(&rtrans));
- }
-
- SyncShareConfigure();
-
- EXPECT_EQ(session_->status_controller().last_get_key_result(), SYNCER_OK);
- {
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
- EXPECT_FALSE(directory()->GetNigoriHandler()->NeedKeystoreKey(&rtrans));
- }
-}
-
-TEST_F(SyncerTest, GetKeyEmpty) {
- {
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
- EXPECT_TRUE(directory()->GetNigoriHandler()->NeedKeystoreKey(&rtrans));
- }
-
- mock_server_->SetKeystoreKey(std::string());
- SyncShareConfigure();
-
- EXPECT_NE(session_->status_controller().last_get_key_result(), SYNCER_OK);
- {
- syncable::ReadTransaction rtrans(FROM_HERE, directory());
- EXPECT_TRUE(directory()->GetNigoriHandler()->NeedKeystoreKey(&rtrans));
- }
-}
-
-// Test what happens if a client deletes, then recreates, an object very
-// quickly. It is possible that the deletion gets sent as a commit, and
-// the undelete happens during the commit request. The principle here
-// is that with a single committing client, conflicts should never
-// be encountered, and a client encountering its past actions during
-// getupdates should never feed back to override later actions.
-//
-// In cases of ordering A-F below, the outcome should be the same.
-// Exercised by UndeleteDuringCommit:
-// A. Delete - commit - undelete - commitresponse.
-// B. Delete - commit - undelete - commitresponse - getupdates.
-// Exercised by UndeleteBeforeCommit:
-// C. Delete - undelete - commit - commitresponse.
-// D. Delete - undelete - commit - commitresponse - getupdates.
-// Exercised by UndeleteAfterCommit:
-// E. Delete - commit - commitresponse - undelete - commit
-// - commitresponse.
-// F. Delete - commit - commitresponse - undelete - commit -
-// - commitresponse - getupdates.
-class SyncerUndeletionTest : public SyncerTest {
- public:
- SyncerUndeletionTest()
- : client_tag_("foobar"),
- metahandle_(syncable::kInvalidMetaHandle) {
- }
-
- void Create() {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry perm_folder(
- &trans, CREATE, BOOKMARKS, ids_.root(), "clientname");
- ASSERT_TRUE(perm_folder.good());
- perm_folder.PutUniqueClientTag(client_tag_);
- perm_folder.PutIsUnsynced(true);
- perm_folder.PutSyncing(false);
- perm_folder.PutSpecifics(DefaultBookmarkSpecifics());
- EXPECT_FALSE(perm_folder.GetIsUnappliedUpdate());
- EXPECT_FALSE(perm_folder.GetId().ServerKnows());
- metahandle_ = perm_folder.GetMetahandle();
- local_id_ = perm_folder.GetId();
- }
-
- void Delete() {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&trans, GET_BY_CLIENT_TAG, client_tag_);
- ASSERT_TRUE(entry.good());
- EXPECT_EQ(metahandle_, entry.GetMetahandle());
- entry.PutIsDel(true);
- entry.PutIsUnsynced(true);
- entry.PutSyncing(false);
- }
-
- void Undelete() {
- WriteTransaction trans(FROM_HERE, UNITTEST, directory());
- MutableEntry entry(&trans, GET_BY_CLIENT_TAG, client_tag_);
- ASSERT_TRUE(entry.good());
- EXPECT_EQ(metahandle_, entry.GetMetahandle());
- EXPECT_TRUE(entry.GetIsDel());
- entry.PutIsDel(false);
- entry.PutIsUnsynced(true);
- entry.PutSyncing(false);
- }
-
- int64 GetMetahandleOfTag() {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_CLIENT_TAG, client_tag_);
- EXPECT_TRUE(entry.good());
- if (!entry.good()) {
- return syncable::kInvalidMetaHandle;
- }
- return entry.GetMetahandle();
- }
-
- void ExpectUnsyncedCreation() {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_CLIENT_TAG, client_tag_);
-
- EXPECT_EQ(metahandle_, entry.GetMetahandle());
- EXPECT_FALSE(entry.GetIsDel());
- EXPECT_FALSE(entry.GetServerIsDel()); // Never been committed.
- EXPECT_GE(0, entry.GetBaseVersion());
- EXPECT_TRUE(entry.GetIsUnsynced());
- EXPECT_FALSE(entry.GetIsUnappliedUpdate());
- }
-
- void ExpectUnsyncedUndeletion() {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_CLIENT_TAG, client_tag_);
-
- EXPECT_EQ(metahandle_, entry.GetMetahandle());
- EXPECT_FALSE(entry.GetIsDel());
- EXPECT_TRUE(entry.GetServerIsDel());
- EXPECT_EQ(0, entry.GetBaseVersion());
- EXPECT_TRUE(entry.GetIsUnsynced());
- EXPECT_FALSE(entry.GetIsUnappliedUpdate());
- EXPECT_TRUE(entry.GetId().ServerKnows());
- }
-
- void ExpectUnsyncedEdit() {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_CLIENT_TAG, client_tag_);
-
- EXPECT_EQ(metahandle_, entry.GetMetahandle());
- EXPECT_FALSE(entry.GetIsDel());
- EXPECT_FALSE(entry.GetServerIsDel());
- EXPECT_LT(0, entry.GetBaseVersion());
- EXPECT_TRUE(entry.GetIsUnsynced());
- EXPECT_FALSE(entry.GetIsUnappliedUpdate());
- EXPECT_TRUE(entry.GetId().ServerKnows());
- }
-
- void ExpectUnsyncedDeletion() {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_CLIENT_TAG, client_tag_);
-
- EXPECT_EQ(metahandle_, entry.GetMetahandle());
- EXPECT_TRUE(entry.GetIsDel());
- EXPECT_FALSE(entry.GetServerIsDel());
- EXPECT_TRUE(entry.GetIsUnsynced());
- EXPECT_FALSE(entry.GetIsUnappliedUpdate());
- EXPECT_LT(0, entry.GetBaseVersion());
- EXPECT_LT(0, entry.GetServerVersion());
- }
-
- void ExpectSyncedAndCreated() {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_CLIENT_TAG, client_tag_);
-
- EXPECT_EQ(metahandle_, entry.GetMetahandle());
- EXPECT_FALSE(entry.GetIsDel());
- EXPECT_FALSE(entry.GetServerIsDel());
- EXPECT_LT(0, entry.GetBaseVersion());
- EXPECT_EQ(entry.GetBaseVersion(), entry.GetServerVersion());
- EXPECT_FALSE(entry.GetIsUnsynced());
- EXPECT_FALSE(entry.GetIsUnappliedUpdate());
- }
-
- void ExpectSyncedAndDeleted() {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_CLIENT_TAG, client_tag_);
-
- EXPECT_EQ(metahandle_, entry.GetMetahandle());
- EXPECT_TRUE(entry.GetIsDel());
- EXPECT_TRUE(entry.GetServerIsDel());
- EXPECT_FALSE(entry.GetIsUnsynced());
- EXPECT_FALSE(entry.GetIsUnappliedUpdate());
- EXPECT_GE(0, entry.GetBaseVersion());
- EXPECT_GE(0, entry.GetServerVersion());
- }
-
- protected:
- const std::string client_tag_;
- syncable::Id local_id_;
- int64 metahandle_;
-};
-
-TEST_F(SyncerUndeletionTest, UndeleteDuringCommit) {
- Create();
- ExpectUnsyncedCreation();
- SyncShareNudge();
-
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- ExpectSyncedAndCreated();
-
- // Delete, begin committing the delete, then undelete while committing.
- Delete();
- ExpectUnsyncedDeletion();
- mock_server_->SetMidCommitCallback(
- base::Bind(&SyncerUndeletionTest::Undelete, base::Unretained(this)));
- SyncShareNudge();
-
- // We will continue to commit until all nodes are synced, so we expect
- // that both the delete and following undelete were committed. We haven't
- // downloaded any updates, though, so the SERVER fields will be the same
- // as they were at the start of the cycle.
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
-
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_HANDLE, metahandle_);
-
- // Server fields lag behind.
- EXPECT_FALSE(entry.GetServerIsDel());
-
- // We have committed the second (undelete) update.
- EXPECT_FALSE(entry.GetIsDel());
- EXPECT_FALSE(entry.GetIsUnsynced());
- EXPECT_FALSE(entry.GetIsUnappliedUpdate());
- }
-
- // Now, encounter a GetUpdates corresponding to the deletion from
- // the server. The undeletion should prevail again and be committed.
- // None of this should trigger any conflict detection -- it is perfectly
- // normal to recieve updates from our own commits.
- mock_server_->SetMidCommitCallback(base::Closure());
- sync_pb::SyncEntity* update = mock_server_->AddUpdateFromLastCommit();
- update->set_originator_cache_guid(local_cache_guid());
- update->set_originator_client_item_id(local_id_.GetServerId());
-
- SyncShareNudge();
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- ExpectSyncedAndCreated();
-}
-
-TEST_F(SyncerUndeletionTest, UndeleteBeforeCommit) {
- Create();
- ExpectUnsyncedCreation();
- SyncShareNudge();
-
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- ExpectSyncedAndCreated();
-
- // Delete and undelete, then sync to pick up the result.
- Delete();
- ExpectUnsyncedDeletion();
- Undelete();
- ExpectUnsyncedEdit(); // Edit, not undelete: server thinks it exists.
- SyncShareNudge();
-
- // The item ought to have committed successfully.
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- ExpectSyncedAndCreated();
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_HANDLE, metahandle_);
- EXPECT_EQ(2, entry.GetBaseVersion());
- }
-
- // Now, encounter a GetUpdates corresponding to the just-committed
- // update.
- sync_pb::SyncEntity* update = mock_server_->AddUpdateFromLastCommit();
- update->set_originator_cache_guid(local_cache_guid());
- update->set_originator_client_item_id(local_id_.GetServerId());
- SyncShareNudge();
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- ExpectSyncedAndCreated();
-}
-
-TEST_F(SyncerUndeletionTest, UndeleteAfterCommitButBeforeGetUpdates) {
- Create();
- ExpectUnsyncedCreation();
- SyncShareNudge();
-
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- ExpectSyncedAndCreated();
-
- // Delete and commit.
- Delete();
- ExpectUnsyncedDeletion();
- SyncShareNudge();
-
- // The item ought to have committed successfully.
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- ExpectSyncedAndDeleted();
-
- // Before the GetUpdates, the item is locally undeleted.
- Undelete();
- ExpectUnsyncedUndeletion();
-
- // Now, encounter a GetUpdates corresponding to the just-committed
- // deletion update. The undeletion should prevail.
- mock_server_->AddUpdateFromLastCommit();
- SyncShareNudge();
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- ExpectSyncedAndCreated();
-}
-
-TEST_F(SyncerUndeletionTest, UndeleteAfterDeleteAndGetUpdates) {
- Create();
- ExpectUnsyncedCreation();
- SyncShareNudge();
-
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- ExpectSyncedAndCreated();
-
- sync_pb::SyncEntity* update = mock_server_->AddUpdateFromLastCommit();
- update->set_originator_cache_guid(local_cache_guid());
- update->set_originator_client_item_id(local_id_.GetServerId());
- SyncShareNudge();
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- ExpectSyncedAndCreated();
-
- // Delete and commit.
- Delete();
- ExpectUnsyncedDeletion();
- SyncShareNudge();
-
- // The item ought to have committed successfully.
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- ExpectSyncedAndDeleted();
-
- // Now, encounter a GetUpdates corresponding to the just-committed
- // deletion update. Should be consistent.
- mock_server_->AddUpdateFromLastCommit();
- SyncShareNudge();
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- ExpectSyncedAndDeleted();
-
- // After the GetUpdates, the item is locally undeleted.
- Undelete();
- ExpectUnsyncedUndeletion();
-
- // Now, encounter a GetUpdates corresponding to the just-committed
- // deletion update. The undeletion should prevail.
- SyncShareNudge();
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- ExpectSyncedAndCreated();
-}
-
-// Test processing of undeletion GetUpdateses.
-TEST_F(SyncerUndeletionTest, UndeleteAfterOtherClientDeletes) {
- Create();
- ExpectUnsyncedCreation();
- SyncShareNudge();
-
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- ExpectSyncedAndCreated();
-
- // Add a delete from the server.
- sync_pb::SyncEntity* update1 = mock_server_->AddUpdateFromLastCommit();
- update1->set_originator_cache_guid(local_cache_guid());
- update1->set_originator_client_item_id(local_id_.GetServerId());
- SyncShareNudge();
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- ExpectSyncedAndCreated();
-
- // Some other client deletes the item.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_HANDLE, metahandle_);
- mock_server_->AddUpdateTombstone(entry.GetId());
- }
- SyncShareNudge();
-
- // The update ought to have applied successfully.
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- ExpectSyncedAndDeleted();
-
- // Undelete it locally.
- Undelete();
- ExpectUnsyncedUndeletion();
- SyncShareNudge();
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- ExpectSyncedAndCreated();
-
- // Now, encounter a GetUpdates corresponding to the just-committed
- // deletion update. The undeletion should prevail.
- sync_pb::SyncEntity* update2 = mock_server_->AddUpdateFromLastCommit();
- update2->set_originator_cache_guid(local_cache_guid());
- update2->set_originator_client_item_id(local_id_.GetServerId());
- SyncShareNudge();
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- ExpectSyncedAndCreated();
-}
-
-TEST_F(SyncerUndeletionTest, UndeleteAfterOtherClientDeletesImmediately) {
- Create();
- ExpectUnsyncedCreation();
- SyncShareNudge();
-
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- ExpectSyncedAndCreated();
-
- // Some other client deletes the item before we get a chance
- // to GetUpdates our original request.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_HANDLE, metahandle_);
- mock_server_->AddUpdateTombstone(entry.GetId());
- }
- SyncShareNudge();
-
- // The update ought to have applied successfully.
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- ExpectSyncedAndDeleted();
-
- // Undelete it locally.
- Undelete();
- ExpectUnsyncedUndeletion();
- SyncShareNudge();
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- ExpectSyncedAndCreated();
-
- // Now, encounter a GetUpdates corresponding to the just-committed
- // deletion update. The undeletion should prevail.
- sync_pb::SyncEntity* update = mock_server_->AddUpdateFromLastCommit();
- update->set_originator_cache_guid(local_cache_guid());
- update->set_originator_client_item_id(local_id_.GetServerId());
- SyncShareNudge();
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- ExpectSyncedAndCreated();
-}
-
-TEST_F(SyncerUndeletionTest, OtherClientUndeletes) {
- Create();
- ExpectUnsyncedCreation();
- SyncShareNudge();
-
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- ExpectSyncedAndCreated();
-
- // Get the updates of our just-committed entry.
- sync_pb::SyncEntity* update = mock_server_->AddUpdateFromLastCommit();
- update->set_originator_cache_guid(local_cache_guid());
- update->set_originator_client_item_id(local_id_.GetServerId());
- SyncShareNudge();
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- ExpectSyncedAndCreated();
-
- // We delete the item.
- Delete();
- ExpectUnsyncedDeletion();
- SyncShareNudge();
-
- // The update ought to have applied successfully.
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- ExpectSyncedAndDeleted();
-
- // Now, encounter a GetUpdates corresponding to the just-committed
- // deletion update.
- mock_server_->AddUpdateFromLastCommit();
- SyncShareNudge();
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- ExpectSyncedAndDeleted();
-
- // Some other client undeletes the item.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_HANDLE, metahandle_);
- mock_server_->AddUpdateBookmark(
- entry.GetId(),
- entry.GetParentId(),
- "Thadeusz", 100, 1000,
- local_cache_guid(), local_id_.GetServerId());
- }
- mock_server_->SetLastUpdateClientTag(client_tag_);
- SyncShareNudge();
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- ExpectSyncedAndCreated();
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_HANDLE, metahandle_);
- EXPECT_EQ("Thadeusz", entry.GetNonUniqueName());
- }
-}
-
-TEST_F(SyncerUndeletionTest, OtherClientUndeletesImmediately) {
- Create();
- ExpectUnsyncedCreation();
- SyncShareNudge();
-
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- ExpectSyncedAndCreated();
-
- // Get the updates of our just-committed entry.
- sync_pb::SyncEntity* update = mock_server_->AddUpdateFromLastCommit();
- update->set_originator_cache_guid(local_cache_guid());
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_HANDLE, metahandle_);
- update->set_originator_client_item_id(local_id_.GetServerId());
- }
- SyncShareNudge();
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- ExpectSyncedAndCreated();
-
- // We delete the item.
- Delete();
- ExpectUnsyncedDeletion();
- SyncShareNudge();
-
- // The update ought to have applied successfully.
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- ExpectSyncedAndDeleted();
-
- // Some other client undeletes before we see the update from our
- // commit.
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_HANDLE, metahandle_);
- mock_server_->AddUpdateBookmark(
- entry.GetId(),
- entry.GetParentId(),
- "Thadeusz", 100, 1000,
- local_cache_guid(), local_id_.GetServerId());
- }
- mock_server_->SetLastUpdateClientTag(client_tag_);
- SyncShareNudge();
- EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
- EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- ExpectSyncedAndCreated();
- {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Entry entry(&trans, GET_BY_HANDLE, metahandle_);
- EXPECT_EQ("Thadeusz", entry.GetNonUniqueName());
- }
-}
-
-enum {
- TEST_PARAM_BOOKMARK_ENABLE_BIT,
- TEST_PARAM_AUTOFILL_ENABLE_BIT,
- TEST_PARAM_BIT_COUNT
-};
-
-class MixedResult :
- public SyncerTest,
- public ::testing::WithParamInterface<int> {
- protected:
- bool ShouldFailBookmarkCommit() {
- return (GetParam() & (1 << TEST_PARAM_BOOKMARK_ENABLE_BIT)) == 0;
- }
- bool ShouldFailAutofillCommit() {
- return (GetParam() & (1 << TEST_PARAM_AUTOFILL_ENABLE_BIT)) == 0;
- }
-};
-
-INSTANTIATE_TEST_CASE_P(ExtensionsActivity,
- MixedResult,
- testing::Range(0, 1 << TEST_PARAM_BIT_COUNT));
-
-TEST_P(MixedResult, ExtensionsActivity) {
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
-
- MutableEntry pref(&wtrans, CREATE, PREFERENCES, wtrans.root_id(), "pref");
- ASSERT_TRUE(pref.good());
- pref.PutIsUnsynced(true);
-
- MutableEntry bookmark(
- &wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "bookmark");
- ASSERT_TRUE(bookmark.good());
- bookmark.PutIsUnsynced(true);
-
- if (ShouldFailBookmarkCommit()) {
- mock_server_->SetTransientErrorId(bookmark.GetId());
- }
-
- if (ShouldFailAutofillCommit()) {
- mock_server_->SetTransientErrorId(pref.GetId());
- }
- }
-
-
- // Put some extenions activity records into the monitor.
- {
- ExtensionsActivity::Records records;
- records["ABC"].extension_id = "ABC";
- records["ABC"].bookmark_write_count = 2049U;
- records["xyz"].extension_id = "xyz";
- records["xyz"].bookmark_write_count = 4U;
- context_->extensions_activity()->PutRecords(records);
- }
-
- SyncShareNudge();
-
- ExtensionsActivity::Records final_monitor_records;
- context_->extensions_activity()->GetAndClearRecords(&final_monitor_records);
- if (ShouldFailBookmarkCommit()) {
- ASSERT_EQ(2U, final_monitor_records.size())
- << "Should restore records after unsuccessful bookmark commit.";
- EXPECT_EQ("ABC", final_monitor_records["ABC"].extension_id);
- EXPECT_EQ("xyz", final_monitor_records["xyz"].extension_id);
- EXPECT_EQ(2049U, final_monitor_records["ABC"].bookmark_write_count);
- EXPECT_EQ(4U, final_monitor_records["xyz"].bookmark_write_count);
- } else {
- EXPECT_TRUE(final_monitor_records.empty())
- << "Should not restore records after successful bookmark commit.";
- }
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/syncer_util.cc b/chromium/sync/engine/syncer_util.cc
deleted file mode 100644
index 2235734aedb..00000000000
--- a/chromium/sync/engine/syncer_util.cc
+++ /dev/null
@@ -1,650 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/syncer_util.h"
-
-#include <algorithm>
-#include <set>
-#include <string>
-#include <vector>
-
-#include "base/base64.h"
-#include "base/location.h"
-#include "base/metrics/histogram.h"
-#include "base/strings/string_number_conversions.h"
-#include "sync/engine/conflict_resolver.h"
-#include "sync/engine/syncer_proto_util.h"
-#include "sync/engine/syncer_types.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/base/unique_position.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/protocol/password_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/model_neutral_mutable_entry.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/syncable_changes_version.h"
-#include "sync/syncable/syncable_model_neutral_write_transaction.h"
-#include "sync/syncable/syncable_proto_util.h"
-#include "sync/syncable/syncable_read_transaction.h"
-#include "sync/syncable/syncable_util.h"
-#include "sync/syncable/syncable_write_transaction.h"
-#include "sync/util/cryptographer.h"
-#include "sync/util/time.h"
-
-namespace syncer {
-
-using syncable::BASE_SERVER_SPECIFICS;
-using syncable::BASE_VERSION;
-using syncable::CHANGES_VERSION;
-using syncable::CREATE_NEW_UPDATE_ITEM;
-using syncable::CTIME;
-using syncable::Directory;
-using syncable::Entry;
-using syncable::GET_BY_HANDLE;
-using syncable::GET_BY_ID;
-using syncable::ID;
-using syncable::IS_DEL;
-using syncable::IS_DIR;
-using syncable::IS_UNAPPLIED_UPDATE;
-using syncable::IS_UNSYNCED;
-using syncable::Id;
-using syncable::META_HANDLE;
-using syncable::MTIME;
-using syncable::MutableEntry;
-using syncable::NON_UNIQUE_NAME;
-using syncable::PARENT_ID;
-using syncable::SERVER_CTIME;
-using syncable::SERVER_IS_DEL;
-using syncable::SERVER_IS_DIR;
-using syncable::SERVER_MTIME;
-using syncable::SERVER_NON_UNIQUE_NAME;
-using syncable::SERVER_PARENT_ID;
-using syncable::SERVER_SPECIFICS;
-using syncable::SERVER_UNIQUE_POSITION;
-using syncable::SERVER_VERSION;
-using syncable::SPECIFICS;
-using syncable::SYNCER;
-using syncable::UNIQUE_BOOKMARK_TAG;
-using syncable::UNIQUE_CLIENT_TAG;
-using syncable::UNIQUE_POSITION;
-using syncable::UNIQUE_SERVER_TAG;
-using syncable::WriteTransaction;
-
-syncable::Id FindLocalIdToUpdate(
- syncable::BaseTransaction* trans,
- const sync_pb::SyncEntity& update) {
- // Expected entry points of this function:
- // SyncEntity has NOT been applied to SERVER fields.
- // SyncEntity has NOT been applied to LOCAL fields.
- // DB has not yet been modified, no entries created for this update.
-
- const std::string& client_id = trans->directory()->cache_guid();
- const syncable::Id& update_id = SyncableIdFromProto(update.id_string());
-
- if (update.has_client_defined_unique_tag() &&
- !update.client_defined_unique_tag().empty()) {
- // When a server sends down a client tag, the following cases can occur:
- // 1) Client has entry for tag already, ID is server style, matches
- // 2) Client has entry for tag already, ID is server, doesn't match.
- // 3) Client has entry for tag already, ID is local, (never matches)
- // 4) Client has no entry for tag
-
- // Case 1, we don't have to do anything since the update will
- // work just fine. Update will end up in the proper entry, via ID lookup.
- // Case 2 - Happens very rarely due to lax enforcement of client tags
- // on the server, if two clients commit the same tag at the same time.
- // When this happens, we pick the lexically-least ID and ignore all other
- // items.
- // Case 3 - We need to replace the local ID with the server ID so that
- // this update gets targeted at the correct local entry; we expect conflict
- // resolution to occur.
- // Case 4 - Perfect. Same as case 1.
-
- syncable::Entry local_entry(trans, syncable::GET_BY_CLIENT_TAG,
- update.client_defined_unique_tag());
-
- // The SyncAPI equivalent of this function will return !good if IS_DEL.
- // The syncable version will return good even if IS_DEL.
- // TODO(chron): Unit test the case with IS_DEL and make sure.
- if (local_entry.good()) {
- if (local_entry.GetId().ServerKnows()) {
- if (local_entry.GetId() != update_id) {
- // Case 2.
- LOG(WARNING) << "Duplicated client tag.";
- if (local_entry.GetId() < update_id) {
- // Signal an error; drop this update on the floor. Note that
- // we don't server delete the item, because we don't allow it to
- // exist locally at all. So the item will remain orphaned on
- // the server, and we won't pay attention to it.
- return syncable::GetNullId();
- }
- }
- // Target this change to the existing local entry; later,
- // we'll change the ID of the local entry to update_id
- // if needed.
- return local_entry.GetId();
- } else {
- // Case 3: We have a local entry with the same client tag.
- // We should change the ID of the local entry to the server entry.
- // This will result in an server ID with base version == 0, but that's
- // a legal state for an item with a client tag. By changing the ID,
- // update will now be applied to local_entry.
- DCHECK(0 == local_entry.GetBaseVersion() ||
- CHANGES_VERSION == local_entry.GetBaseVersion());
- return local_entry.GetId();
- }
- }
- } else if (update.has_originator_cache_guid() &&
- update.originator_cache_guid() == client_id) {
- // If a commit succeeds, but the response does not come back fast enough
- // then the syncer might assume that it was never committed.
- // The server will track the client that sent up the original commit and
- // return this in a get updates response. When this matches a local
- // uncommitted item, we must mutate our local item and version to pick up
- // the committed version of the same item whose commit response was lost.
- // There is however still a race condition if the server has not
- // completed the commit by the time the syncer tries to get updates
- // again. To mitigate this, we need to have the server time out in
- // a reasonable span, our commit batches have to be small enough
- // to process within our HTTP response "assumed alive" time.
-
- // We need to check if we have an entry that didn't get its server
- // id updated correctly. The server sends down a client ID
- // and a local (negative) id. If we have a entry by that
- // description, we should update the ID and version to the
- // server side ones to avoid multiple copies of the same thing.
-
- syncable::Id client_item_id = syncable::Id::CreateFromClientString(
- update.originator_client_item_id());
- DCHECK(!client_item_id.ServerKnows());
- syncable::Entry local_entry(trans, GET_BY_ID, client_item_id);
-
- // If it exists, then our local client lost a commit response. Use
- // the local entry.
- if (local_entry.good() && !local_entry.GetIsDel()) {
- int64 old_version = local_entry.GetBaseVersion();
- int64 new_version = update.version();
- DCHECK_LE(old_version, 0);
- DCHECK_GT(new_version, 0);
- // Otherwise setting the base version could cause a consistency failure.
- // An entry should never be version 0 and SYNCED.
- DCHECK(local_entry.GetIsUnsynced());
-
- // Just a quick sanity check.
- DCHECK(!local_entry.GetId().ServerKnows());
-
- DVLOG(1) << "Reuniting lost commit response IDs. server id: "
- << update_id << " local id: " << local_entry.GetId()
- << " new version: " << new_version;
-
- return local_entry.GetId();
- }
- }
- // Fallback: target an entry having the server ID, creating one if needed.
- return update_id;
-}
-
-UpdateAttemptResponse AttemptToUpdateEntry(
- syncable::WriteTransaction* const trans,
- syncable::MutableEntry* const entry,
- Cryptographer* cryptographer) {
- CHECK(entry->good());
- if (!entry->GetIsUnappliedUpdate())
- return SUCCESS; // No work to do.
- syncable::Id id = entry->GetId();
- const sync_pb::EntitySpecifics& specifics = entry->GetServerSpecifics();
-
- // Only apply updates that we can decrypt. If we can't decrypt the update, it
- // is likely because the passphrase has not arrived yet. Because the
- // passphrase may not arrive within this GetUpdates, we can't just return
- // conflict, else we try to perform normal conflict resolution prematurely or
- // the syncer may get stuck. As such, we return CONFLICT_ENCRYPTION, which is
- // treated as an unresolvable conflict. See the description in syncer_types.h.
- // This prevents any unsynced changes from commiting and postpones conflict
- // resolution until all data can be decrypted.
- if (specifics.has_encrypted() &&
- !cryptographer->CanDecrypt(specifics.encrypted())) {
- // We can't decrypt this node yet.
- DVLOG(1) << "Received an undecryptable "
- << ModelTypeToString(entry->GetServerModelType())
- << " update, returning conflict_encryption.";
- return CONFLICT_ENCRYPTION;
- } else if (specifics.has_password() &&
- entry->GetUniqueServerTag().empty()) {
- // Passwords use their own legacy encryption scheme.
- const sync_pb::PasswordSpecifics& password = specifics.password();
- if (!cryptographer->CanDecrypt(password.encrypted())) {
- DVLOG(1) << "Received an undecryptable password update, returning "
- << "conflict_encryption.";
- return CONFLICT_ENCRYPTION;
- }
- }
-
- if (!entry->GetServerIsDel()) {
- syncable::Id new_parent = entry->GetServerParentId();
- Entry parent(trans, GET_BY_ID, new_parent);
- // A note on non-directory parents:
- // We catch most unfixable tree invariant errors at update receipt time,
- // however we deal with this case here because we may receive the child
- // first then the illegal parent. Instead of dealing with it twice in
- // different ways we deal with it once here to reduce the amount of code and
- // potential errors.
- if (!parent.good() || parent.GetIsDel() || !parent.GetIsDir()) {
- DVLOG(1) << "Entry has bad parent, returning conflict_hierarchy.";
- return CONFLICT_HIERARCHY;
- }
- if (entry->GetParentId() != new_parent) {
- if (!entry->GetIsDel() && !IsLegalNewParent(trans, id, new_parent)) {
- DVLOG(1) << "Not updating item " << id
- << ", illegal new parent (would cause loop).";
- return CONFLICT_HIERARCHY;
- }
- }
- } else if (entry->GetIsDir()) {
- Directory::Metahandles handles;
- trans->directory()->GetChildHandlesById(trans, id, &handles);
- if (!handles.empty()) {
- // If we have still-existing children, then we need to deal with
- // them before we can process this change.
- DVLOG(1) << "Not deleting directory; it's not empty " << *entry;
- return CONFLICT_HIERARCHY;
- }
- }
-
- if (entry->GetIsUnsynced()) {
- DVLOG(1) << "Skipping update, returning conflict for: " << id
- << " ; it's unsynced.";
- return CONFLICT_SIMPLE;
- }
-
- if (specifics.has_encrypted()) {
- DVLOG(2) << "Received a decryptable "
- << ModelTypeToString(entry->GetServerModelType())
- << " update, applying normally.";
- } else {
- DVLOG(2) << "Received an unencrypted "
- << ModelTypeToString(entry->GetServerModelType())
- << " update, applying normally.";
- }
-
- UpdateLocalDataFromServerData(trans, entry);
-
- return SUCCESS;
-}
-
-std::string GetUniqueBookmarkTagFromUpdate(const sync_pb::SyncEntity& update) {
- if (!update.has_originator_cache_guid() ||
- !update.has_originator_client_item_id()) {
- return std::string();
- }
-
- return syncable::GenerateSyncableBookmarkHash(
- update.originator_cache_guid(), update.originator_client_item_id());
-}
-
-UniquePosition GetUpdatePosition(const sync_pb::SyncEntity& update,
- const std::string& suffix) {
- DCHECK(UniquePosition::IsValidSuffix(suffix));
- if (!(SyncerProtoUtil::ShouldMaintainPosition(update))) {
- return UniquePosition::CreateInvalid();
- } else if (update.has_unique_position()) {
- return UniquePosition::FromProto(update.unique_position());
- } else if (update.has_position_in_parent()) {
- return UniquePosition::FromInt64(update.position_in_parent(), suffix);
- } else {
- return UniquePosition::CreateInvalid();
- }
-}
-
-namespace {
-
-// Helper to synthesize a new-style sync_pb::EntitySpecifics for use locally,
-// when the server speaks only the old sync_pb::SyncEntity_BookmarkData-based
-// protocol.
-void UpdateBookmarkSpecifics(const std::string& singleton_tag,
- const std::string& url,
- const std::string& favicon_bytes,
- syncable::ModelNeutralMutableEntry* local_entry) {
- // In the new-style protocol, the server no longer sends bookmark info for
- // the "google_chrome" folder. Mimic that here.
- if (singleton_tag == "google_chrome")
- return;
- sync_pb::EntitySpecifics pb;
- sync_pb::BookmarkSpecifics* bookmark = pb.mutable_bookmark();
- if (!url.empty())
- bookmark->set_url(url);
- if (!favicon_bytes.empty())
- bookmark->set_favicon(favicon_bytes);
- local_entry->PutServerSpecifics(pb);
-}
-
-void UpdateBookmarkPositioning(
- const sync_pb::SyncEntity& update,
- syncable::ModelNeutralMutableEntry* local_entry) {
- // Update our unique bookmark tag. In many cases this will be identical to
- // the tag we already have. However, clients that have recently upgraded to
- // versions that support unique positions will have incorrect tags. See the
- // v86 migration logic in directory_backing_store.cc for more information.
- //
- // Both the old and new values are unique to this element. Applying this
- // update will not risk the creation of conflicting unique tags.
- std::string bookmark_tag = GetUniqueBookmarkTagFromUpdate(update);
- if (UniquePosition::IsValidSuffix(bookmark_tag)) {
- local_entry->PutUniqueBookmarkTag(bookmark_tag);
- }
-
- // Update our position.
- UniquePosition update_pos =
- GetUpdatePosition(update, local_entry->GetUniqueBookmarkTag());
- if (update_pos.IsValid()) {
- local_entry->PutServerUniquePosition(update_pos);
- } else {
- // TODO(sync): This and other cases of unexpected input should be handled
- // better.
- NOTREACHED();
- }
-}
-
-} // namespace
-
-void UpdateServerFieldsFromUpdate(
- syncable::ModelNeutralMutableEntry* target,
- const sync_pb::SyncEntity& update,
- const std::string& name) {
- if (update.deleted()) {
- if (target->GetServerIsDel()) {
- // If we already think the item is server-deleted, we're done.
- // Skipping these cases prevents our committed deletions from coming
- // back and overriding subsequent undeletions. For non-deleted items,
- // the version number check has a similar effect.
- return;
- }
- // The server returns very lightweight replies for deletions, so we don't
- // clobber a bunch of fields on delete.
- target->PutServerIsDel(true);
- if (!target->GetUniqueClientTag().empty()) {
- // Items identified by the client unique tag are undeletable; when
- // they're deleted, they go back to version 0.
- target->PutServerVersion(0);
- } else {
- // Otherwise, fake a server version by bumping the local number.
- target->PutServerVersion(
- std::max(target->GetServerVersion(), target->GetBaseVersion()) + 1);
- }
- target->PutIsUnappliedUpdate(true);
- return;
- }
-
- DCHECK_EQ(target->GetId(), SyncableIdFromProto(update.id_string()))
- << "ID Changing not supported here";
- target->PutServerParentId(SyncableIdFromProto(update.parent_id_string()));
- target->PutServerNonUniqueName(name);
- target->PutServerVersion(update.version());
- target->PutServerCtime(ProtoTimeToTime(update.ctime()));
- target->PutServerMtime(ProtoTimeToTime(update.mtime()));
- target->PutServerIsDir(IsFolder(update));
- if (update.has_server_defined_unique_tag()) {
- const std::string& tag = update.server_defined_unique_tag();
- target->PutUniqueServerTag(tag);
- }
- if (update.has_client_defined_unique_tag()) {
- const std::string& tag = update.client_defined_unique_tag();
- target->PutUniqueClientTag(tag);
- }
- // Store the datatype-specific part as a protobuf.
- if (update.has_specifics()) {
- DCHECK_NE(GetModelType(update), UNSPECIFIED)
- << "Storing unrecognized datatype in sync database.";
- target->PutServerSpecifics(update.specifics());
- } else if (update.has_bookmarkdata()) {
- // Legacy protocol response for bookmark data.
- const sync_pb::SyncEntity::BookmarkData& bookmark = update.bookmarkdata();
- UpdateBookmarkSpecifics(update.server_defined_unique_tag(),
- bookmark.bookmark_url(),
- bookmark.bookmark_favicon(),
- target);
- }
- if (SyncerProtoUtil::ShouldMaintainPosition(update)) {
- UpdateBookmarkPositioning(update, target);
- }
-
- target->PutServerIsDel(update.deleted());
- // We only mark the entry as unapplied if its version is greater than the
- // local data. If we're processing the update that corresponds to one of our
- // commit we don't apply it as time differences may occur.
- if (update.version() > target->GetBaseVersion()) {
- target->PutIsUnappliedUpdate(true);
- }
-}
-
-// Creates a new Entry iff no Entry exists with the given id.
-void CreateNewEntry(syncable::ModelNeutralWriteTransaction *trans,
- const syncable::Id& id) {
- syncable::Entry entry(trans, GET_BY_ID, id);
- if (!entry.good()) {
- syncable::ModelNeutralMutableEntry new_entry(
- trans,
- syncable::CREATE_NEW_UPDATE_ITEM,
- id);
- }
-}
-
-// This function is called on an entry when we can update the user-facing data
-// from the server data.
-void UpdateLocalDataFromServerData(
- syncable::WriteTransaction* trans,
- syncable::MutableEntry* entry) {
- DCHECK(!entry->GetIsUnsynced());
- DCHECK(entry->GetIsUnappliedUpdate());
-
- DVLOG(2) << "Updating entry : " << *entry;
- // Start by setting the properties that determine the model_type.
- entry->PutSpecifics(entry->GetServerSpecifics());
- // Clear the previous server specifics now that we're applying successfully.
- entry->PutBaseServerSpecifics(sync_pb::EntitySpecifics());
- entry->PutIsDir(entry->GetServerIsDir());
- // This strange dance around the IS_DEL flag avoids problems when setting
- // the name.
- // TODO(chron): Is this still an issue? Unit test this codepath.
- if (entry->GetServerIsDel()) {
- entry->PutIsDel(true);
- } else {
- entry->PutNonUniqueName(entry->GetServerNonUniqueName());
- entry->PutParentId(entry->GetServerParentId());
- entry->PutUniquePosition(entry->GetServerUniquePosition());
- entry->PutIsDel(false);
- }
-
- entry->PutCtime(entry->GetServerCtime());
- entry->PutMtime(entry->GetServerMtime());
- entry->PutBaseVersion(entry->GetServerVersion());
- entry->PutIsDel(entry->GetServerIsDel());
- entry->PutIsUnappliedUpdate(false);
-}
-
-VerifyCommitResult ValidateCommitEntry(syncable::Entry* entry) {
- syncable::Id id = entry->GetId();
- if (id == entry->GetParentId()) {
- CHECK(id.IsRoot()) << "Non-root item is self parenting." << *entry;
- // If the root becomes unsynced it can cause us problems.
- LOG(ERROR) << "Root item became unsynced " << *entry;
- return VERIFY_UNSYNCABLE;
- }
- if (entry->IsRoot()) {
- LOG(ERROR) << "Permanent item became unsynced " << *entry;
- return VERIFY_UNSYNCABLE;
- }
- if (entry->GetIsDel() && !entry->GetId().ServerKnows()) {
- // Drop deleted uncommitted entries.
- return VERIFY_UNSYNCABLE;
- }
- return VERIFY_OK;
-}
-
-void MarkDeletedChildrenSynced(
- syncable::Directory* dir,
- syncable::BaseWriteTransaction* trans,
- std::set<syncable::Id>* deleted_folders) {
- // There's two options here.
- // 1. Scan deleted unsynced entries looking up their pre-delete tree for any
- // of the deleted folders.
- // 2. Take each folder and do a tree walk of all entries underneath it.
- // #2 has a lower big O cost, but writing code to limit the time spent inside
- // the transaction during each step is simpler with 1. Changing this decision
- // may be sensible if this code shows up in profiling.
- if (deleted_folders->empty())
- return;
- Directory::Metahandles handles;
- dir->GetUnsyncedMetaHandles(trans, &handles);
- if (handles.empty())
- return;
- Directory::Metahandles::iterator it;
- for (it = handles.begin() ; it != handles.end() ; ++it) {
- syncable::ModelNeutralMutableEntry entry(trans, GET_BY_HANDLE, *it);
- if (!entry.GetIsUnsynced() || !entry.GetIsDel())
- continue;
- syncable::Id id = entry.GetParentId();
- while (id != trans->root_id()) {
- if (deleted_folders->find(id) != deleted_folders->end()) {
- // We've synced the deletion of this deleted entries parent.
- entry.PutIsUnsynced(false);
- break;
- }
- Entry parent(trans, GET_BY_ID, id);
- if (!parent.good() || !parent.GetIsDel())
- break;
- id = parent.GetParentId();
- }
- }
-}
-
-VerifyResult VerifyNewEntry(
- const sync_pb::SyncEntity& update,
- syncable::Entry* target,
- const bool deleted) {
- if (target->good()) {
- // Not a new update.
- return VERIFY_UNDECIDED;
- }
- if (deleted) {
- // Deletion of an item we've never seen can be ignored.
- return VERIFY_SKIP;
- }
-
- return VERIFY_SUCCESS;
-}
-
-// Assumes we have an existing entry; check here for updates that break
-// consistency rules.
-VerifyResult VerifyUpdateConsistency(
- syncable::ModelNeutralWriteTransaction* trans,
- const sync_pb::SyncEntity& update,
- const bool deleted,
- const bool is_directory,
- ModelType model_type,
- syncable::ModelNeutralMutableEntry* target) {
-
- CHECK(target->good());
- const syncable::Id& update_id = SyncableIdFromProto(update.id_string());
-
- // If the update is a delete, we don't really need to worry at this stage.
- if (deleted)
- return VERIFY_SUCCESS;
-
- if (model_type == UNSPECIFIED) {
- // This update is to an item of a datatype we don't recognize. The server
- // shouldn't have sent it to us. Throw it on the ground.
- return VERIFY_SKIP;
- }
-
- if (target->GetServerVersion() > 0) {
- // Then we've had an update for this entry before.
- if (is_directory != target->GetServerIsDir() ||
- model_type != target->GetServerModelType()) {
- if (target->GetIsDel()) { // If we've deleted the item, we don't care.
- return VERIFY_SKIP;
- } else {
- LOG(ERROR) << "Server update doesn't agree with previous updates. ";
- LOG(ERROR) << " Entry: " << *target;
- LOG(ERROR) << " Update: "
- << SyncerProtoUtil::SyncEntityDebugString(update);
- return VERIFY_FAIL;
- }
- }
-
- if (!deleted && (target->GetId() == update_id) &&
- (target->GetServerIsDel() ||
- (!target->GetIsUnsynced() && target->GetIsDel() &&
- target->GetBaseVersion() > 0))) {
- // An undelete. The latter case in the above condition is for
- // when the server does not give us an update following the
- // commit of a delete, before undeleting.
- // Undeletion is common for items that reuse the client-unique tag.
- VerifyResult result = VerifyUndelete(trans, update, target);
- if (VERIFY_UNDECIDED != result)
- return result;
- }
- }
- if (target->GetBaseVersion() > 0) {
- // We've committed this update in the past.
- if (is_directory != target->GetIsDir() ||
- model_type != target->GetModelType()) {
- LOG(ERROR) << "Server update doesn't agree with committed item. ";
- LOG(ERROR) << " Entry: " << *target;
- LOG(ERROR) << " Update: "
- << SyncerProtoUtil::SyncEntityDebugString(update);
- return VERIFY_FAIL;
- }
- if (target->GetId() == update_id) {
- if (target->GetServerVersion() > update.version()) {
- LOG(WARNING) << "We've already seen a more recent version.";
- LOG(WARNING) << " Entry: " << *target;
- LOG(WARNING) << " Update: "
- << SyncerProtoUtil::SyncEntityDebugString(update);
- return VERIFY_SKIP;
- }
- }
- }
- return VERIFY_SUCCESS;
-}
-
-// Assumes we have an existing entry; verify an update that seems to be
-// expressing an 'undelete'
-VerifyResult VerifyUndelete(syncable::ModelNeutralWriteTransaction* trans,
- const sync_pb::SyncEntity& update,
- syncable::ModelNeutralMutableEntry* target) {
- // TODO(nick): We hit this path for items deleted items that the server
- // tells us to re-create; only deleted items with positive base versions
- // will hit this path. However, it's not clear how such an undeletion
- // would actually succeed on the server; in the protocol, a base
- // version of 0 is required to undelete an object. This codepath
- // should be deprecated in favor of client-tag style undeletion
- // (where items go to version 0 when they're deleted), or else
- // removed entirely (if this type of undeletion is indeed impossible).
- CHECK(target->good());
- DVLOG(1) << "Server update is attempting undelete. " << *target
- << "Update:" << SyncerProtoUtil::SyncEntityDebugString(update);
- // Move the old one aside and start over. It's too tricky to get the old one
- // back into a state that would pass CheckTreeInvariants().
- if (target->GetIsDel()) {
- if (target->GetUniqueClientTag().empty())
- LOG(WARNING) << "Doing move-aside undeletion on client-tagged item.";
- target->PutId(trans->directory()->NextId());
- target->PutUniqueClientTag(std::string());
- target->PutBaseVersion(CHANGES_VERSION);
- target->PutServerVersion(0);
- return VERIFY_SUCCESS;
- }
- if (update.version() < target->GetServerVersion()) {
- LOG(WARNING) << "Update older than current server version for "
- << *target << " Update:"
- << SyncerProtoUtil::SyncEntityDebugString(update);
- return VERIFY_SUCCESS; // Expected in new sync protocol.
- }
- return VERIFY_UNDECIDED;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/syncer_util.h b/chromium/sync/engine/syncer_util.h
deleted file mode 100644
index 575ab11d37e..00000000000
--- a/chromium/sync/engine/syncer_util.h
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Utility functions manipulating syncable::Entries, intended for use by the
-// syncer.
-
-#ifndef SYNC_ENGINE_SYNCER_UTIL_H_
-#define SYNC_ENGINE_SYNCER_UTIL_H_
-
-#include <set>
-#include <string>
-#include <vector>
-
-#include "sync/engine/syncer.h"
-#include "sync/engine/syncer_types.h"
-#include "sync/syncable/entry_kernel.h"
-#include "sync/syncable/metahandle_set.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/syncable_id.h"
-
-namespace sync_pb {
-class SyncEntity;
-} // namespace sync_pb
-
-namespace syncer {
-
-namespace syncable {
-class BaseTransaction;
-class ModelNeutralWriteTransaction;
-} // namespace syncable
-
-class Cryptographer;
-
-// If the server sent down a client-tagged entry, or an entry whose
-// commit response was lost, it is necessary to update a local entry
-// with an ID that doesn't match the ID of the update. Here, we
-// find the ID of such an entry, if it exists. This function may
-// determine that |server_entry| should be dropped; if so, it returns
-// the null ID -- callers must handle this case. When update application
-// should proceed normally with a new local entry, this function will
-// return server_entry.id(); the caller must create an entry with that
-// ID. This function does not alter the database.
-syncable::Id FindLocalIdToUpdate(
- syncable::BaseTransaction* trans,
- const sync_pb::SyncEntity& server_entry);
-
-UpdateAttemptResponse AttemptToUpdateEntry(
- syncable::WriteTransaction* const trans,
- syncable::MutableEntry* const entry,
- Cryptographer* cryptographer);
-
-// Returns the most accurate position information available in this update. It
-// prefers to use the unique_position() field, but will fall back to using the
-// int64-based position_in_parent if necessary.
-//
-// The suffix parameter is the unique bookmark tag for the item being updated.
-//
-// Will return an invalid position if no valid position can be constructed, or
-// if this type does not support positioning.
-UniquePosition GetUpdatePosition(const sync_pb::SyncEntity& update,
- const std::string& suffix);
-
-// Fetch the cache_guid and item_id-based unique bookmark tag from an update.
-// Will return an empty string if someting unexpected happens.
-std::string GetUniqueBookmarkTagFromUpdate(const sync_pb::SyncEntity& update);
-
-// Pass in name to avoid redundant UTF8 conversion.
-void UpdateServerFieldsFromUpdate(
- syncable::ModelNeutralMutableEntry* local_entry,
- const sync_pb::SyncEntity& server_entry,
- const std::string& name);
-
-// Creates a new Entry iff no Entry exists with the given id.
-void CreateNewEntry(syncable::ModelNeutralWriteTransaction *trans,
- const syncable::Id& id);
-
-// This function is called on an entry when we can update the user-facing data
-// from the server data.
-void UpdateLocalDataFromServerData(syncable::WriteTransaction* trans,
- syncable::MutableEntry* entry);
-
-VerifyCommitResult ValidateCommitEntry(syncable::Entry* entry);
-
-VerifyResult VerifyNewEntry(const sync_pb::SyncEntity& update,
- syncable::Entry* target,
- const bool deleted);
-
-// Assumes we have an existing entry; check here for updates that break
-// consistency rules.
-VerifyResult VerifyUpdateConsistency(
- syncable::ModelNeutralWriteTransaction* trans,
- const sync_pb::SyncEntity& update,
- const bool deleted,
- const bool is_directory,
- ModelType model_type,
- syncable::ModelNeutralMutableEntry* target);
-
-// Assumes we have an existing entry; verify an update that seems to be
-// expressing an 'undelete'
-VerifyResult VerifyUndelete(syncable::ModelNeutralWriteTransaction* trans,
- const sync_pb::SyncEntity& update,
- syncable::ModelNeutralMutableEntry* target);
-
-void MarkDeletedChildrenSynced(
- syncable::Directory* dir,
- syncable::BaseWriteTransaction* trans,
- std::set<syncable::Id>* deleted_folders);
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_SYNCER_UTIL_H_
diff --git a/chromium/sync/engine/traffic_logger.cc b/chromium/sync/engine/traffic_logger.cc
deleted file mode 100644
index 13b1c647981..00000000000
--- a/chromium/sync/engine/traffic_logger.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/traffic_logger.h"
-
-#include <string>
-
-#include "base/json/json_writer.h"
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/values.h"
-#include "sync/protocol/proto_value_conversions.h"
-#include "sync/protocol/sync.pb.h"
-
-namespace syncer {
-
-namespace {
-template <class T>
-void LogData(const T& data,
- base::DictionaryValue* (*to_dictionary_value)(const T&, bool),
- const std::string& description) {
- if (::logging::DEBUG_MODE && VLOG_IS_ON(1)) {
- scoped_ptr<base::DictionaryValue> value(
- (*to_dictionary_value)(data, true /* include_specifics */));
- std::string message;
- base::JSONWriter::WriteWithOptions(value.get(),
- base::JSONWriter::OPTIONS_PRETTY_PRINT,
- &message);
- DVLOG(1) << "\n" << description << "\n" << message << "\n";
- }
-}
-} // namespace
-
-void LogClientToServerMessage(const sync_pb::ClientToServerMessage& msg) {
- LogData(msg, &ClientToServerMessageToValue,
- "******Client To Server Message******");
-}
-
-void LogClientToServerResponse(
- const sync_pb::ClientToServerResponse& response) {
- LogData(response, &ClientToServerResponseToValue,
- "******Server Response******");
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/traffic_logger.h b/chromium/sync/engine/traffic_logger.h
deleted file mode 100644
index 02053951c31..00000000000
--- a/chromium/sync/engine/traffic_logger.h
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file has the functions to log all the sync related HTTP communication.
-// To get the log run a debug build of chrome with the flag
-// --vmodule=traffic_logger=1.
-
-#ifndef CHROME_BROWSER_SYNC_ENGINE_TRAFFIC_LOGGER_H_
-#define CHROME_BROWSER_SYNC_ENGINE_TRAFFIC_LOGGER_H_
-
-namespace sync_pb {
-class ClientToServerResponse;
-class ClientToServerMessage;
-} // namespace sync_pb
-
-namespace syncer {
-
-void LogClientToServerMessage(const sync_pb::ClientToServerMessage& msg);
-void LogClientToServerResponse(
- const sync_pb::ClientToServerResponse& response);
-
-} // namespace syncer
-
-#endif // CHROME_BROWSER_SYNC_ENGINE_TRAFFIC_LOGGER_H_
diff --git a/chromium/sync/engine/traffic_recorder.cc b/chromium/sync/engine/traffic_recorder.cc
deleted file mode 100644
index d3f2347c6f6..00000000000
--- a/chromium/sync/engine/traffic_recorder.cc
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/traffic_recorder.h"
-
-#include "base/json/json_writer.h"
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/values.h"
-#include "sync/protocol/proto_value_conversions.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/sessions/sync_session.h"
-#include "sync/util/time.h"
-
-namespace syncer {
-
-// Return current time.
-base::Time TrafficRecorder::GetTime() {
- return base::Time::Now();
-}
-
-TrafficRecorder::TrafficRecord::TrafficRecord(const std::string& message,
- TrafficMessageType message_type,
- bool truncated,
- base::Time time) :
- message(message),
- message_type(message_type),
- truncated(truncated),
- timestamp(time) {
-}
-
-TrafficRecorder::TrafficRecord::TrafficRecord()
- : message_type(UNKNOWN_MESSAGE_TYPE),
- truncated(false) {
-}
-
-TrafficRecorder::TrafficRecord::~TrafficRecord() {
-}
-
-TrafficRecorder::TrafficRecorder(unsigned int max_messages,
- unsigned int max_message_size)
- : max_messages_(max_messages),
- max_message_size_(max_message_size) {
-}
-
-TrafficRecorder::~TrafficRecorder() {
-}
-
-namespace {
-const char* GetMessageTypeString(TrafficRecorder::TrafficMessageType type) {
- switch(type) {
- case TrafficRecorder::CLIENT_TO_SERVER_MESSAGE:
- return "Request";
- case TrafficRecorder::CLIENT_TO_SERVER_RESPONSE:
- return "Response";
- default:
- NOTREACHED();
- return "";
- }
-}
-}
-
-base::DictionaryValue* TrafficRecorder::TrafficRecord::ToValue() const {
- scoped_ptr<base::DictionaryValue> value;
- if (truncated) {
- value.reset(new base::DictionaryValue());
- value->SetString("message_type",
- GetMessageTypeString(message_type));
- value->SetBoolean("truncated", true);
- } else if (message_type == TrafficRecorder::CLIENT_TO_SERVER_MESSAGE) {
- sync_pb::ClientToServerMessage message_proto;
- if (message_proto.ParseFromString(message))
- value.reset(
- ClientToServerMessageToValue(message_proto,
- false /* include_specifics */));
- } else if (message_type == TrafficRecorder::CLIENT_TO_SERVER_RESPONSE) {
- sync_pb::ClientToServerResponse message_proto;
- if (message_proto.ParseFromString(message))
- value.reset(
- ClientToServerResponseToValue(message_proto,
- false /* include_specifics */));
- } else {
- NOTREACHED();
- }
-
- value->SetString("timestamp", GetTimeDebugString(timestamp));
-
- return value.release();
-}
-
-
-base::ListValue* TrafficRecorder::ToValue() const {
- scoped_ptr<base::ListValue> value(new base::ListValue());
- std::deque<TrafficRecord>::const_iterator it;
- for (it = records_.begin(); it != records_.end(); ++it) {
- const TrafficRecord& record = *it;
- value->Append(record.ToValue());
- }
-
- return value.release();
-}
-
-
-void TrafficRecorder::AddTrafficToQueue(TrafficRecord* record) {
- records_.resize(records_.size() + 1);
- std::swap(records_.back(), *record);
-
- // We might have more records than our limit.
- // Maintain the size invariant by deleting items.
- while (records_.size() > max_messages_) {
- records_.pop_front();
- }
-}
-
-void TrafficRecorder::StoreProtoInQueue(
- const ::google::protobuf::MessageLite& msg,
- TrafficMessageType type) {
- bool truncated = false;
- std::string message;
- if (static_cast<unsigned int>(msg.ByteSize()) >= max_message_size_) {
- // TODO(lipalani): Trim the specifics to fit in size.
- truncated = true;
- } else {
- msg.SerializeToString(&message);
- }
-
- TrafficRecord record(message, type, truncated, GetTime());
- AddTrafficToQueue(&record);
-}
-
-void TrafficRecorder::RecordClientToServerMessage(
- const sync_pb::ClientToServerMessage& msg) {
- StoreProtoInQueue(msg, CLIENT_TO_SERVER_MESSAGE);
-}
-
-void TrafficRecorder::RecordClientToServerResponse(
- const sync_pb::ClientToServerResponse& response) {
- StoreProtoInQueue(response, CLIENT_TO_SERVER_RESPONSE);
-}
-
-} // namespace syncer
-
diff --git a/chromium/sync/engine/traffic_recorder.h b/chromium/sync/engine/traffic_recorder.h
deleted file mode 100644
index 55ee0b3ed70..00000000000
--- a/chromium/sync/engine/traffic_recorder.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CHROME_BROWSER_SYNC_ENGINE_TRAFFIC_RECORDER_H_
-#define CHROME_BROWSER_SYNC_ENGINE_TRAFFIC_RECORDER_H_
-
-#include <deque>
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/gtest_prod_util.h"
-#include "base/time/time.h"
-#include "base/values.h"
-#include "sync/base/sync_export.h"
-#include "sync/protocol/sync.pb.h"
-
-namespace sync_pb {
-class ClientToServerResponse;
-class ClientToServerMessage;
-}
-
-namespace syncer {
-
-class SYNC_EXPORT_PRIVATE TrafficRecorder {
- public:
- enum TrafficMessageType {
- CLIENT_TO_SERVER_MESSAGE,
- CLIENT_TO_SERVER_RESPONSE,
- UNKNOWN_MESSAGE_TYPE
- };
-
- struct SYNC_EXPORT_PRIVATE TrafficRecord {
- // The serialized message.
- std::string message;
- TrafficMessageType message_type;
- // If the message is too big to be kept in memory then it should be
- // truncated. For now the entire message is omitted if it is too big.
- // TODO(lipalani): Truncate the specifics to fit within size.
- bool truncated;
-
- TrafficRecord(const std::string& message,
- TrafficMessageType message_type,
- bool truncated,
- base::Time time);
- TrafficRecord();
- ~TrafficRecord();
- base::DictionaryValue* ToValue() const;
-
- // Time of record creation.
- base::Time timestamp;
- };
-
- TrafficRecorder(unsigned int max_messages, unsigned int max_message_size);
- virtual ~TrafficRecorder();
-
- void RecordClientToServerMessage(const sync_pb::ClientToServerMessage& msg);
- void RecordClientToServerResponse(
- const sync_pb::ClientToServerResponse& response);
- base::ListValue* ToValue() const;
-
- const std::deque<TrafficRecord>& records() {
- return records_;
- }
-
- private:
- void AddTrafficToQueue(TrafficRecord* record);
- void StoreProtoInQueue(const ::google::protobuf::MessageLite& msg,
- TrafficMessageType type);
-
- // Method to get record creation time.
- virtual base::Time GetTime();
-
- // Maximum number of messages stored in the queue.
- unsigned int max_messages_;
-
- // Maximum size of each message.
- unsigned int max_message_size_;
- std::deque<TrafficRecord> records_;
- DISALLOW_COPY_AND_ASSIGN(TrafficRecorder);
-};
-
-} // namespace syncer
-
-#endif // CHROME_BROWSER_SYNC_ENGINE_TRAFFIC_RECORDER_H_
-
diff --git a/chromium/sync/engine/traffic_recorder_unittest.cc b/chromium/sync/engine/traffic_recorder_unittest.cc
deleted file mode 100644
index 363a3c8c700..00000000000
--- a/chromium/sync/engine/traffic_recorder_unittest.cc
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/traffic_recorder.h"
-
-#include "base/memory/scoped_ptr.h"
-#include "base/time/time.h"
-#include "base/values.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/util/time.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-const unsigned int kMaxMessages = 10;
-const unsigned int kMaxMessageSize = 5 * 1024;
-
-// Ensure the number of records don't exceed |kMaxMessages|.
-TEST(TrafficRecorderTest, MaxRecordsTest) {
- TrafficRecorder recorder(kMaxMessages, kMaxMessageSize);
- sync_pb::ClientToServerResponse response;
-
- for (unsigned int i = 0; i < 2*kMaxMessages; ++i)
- recorder.RecordClientToServerResponse(response);
-
- EXPECT_EQ(recorder.records().size(), kMaxMessages);
-}
-
-// Ensure records with size greater than |kMaxMessageSize| are truncated.
-TEST(TrafficRecorderTest, MaxMessageSizeTest) {
- sync_pb::ClientToServerResponse response;
-
- sync_pb::ClientToServerResponse::Error* error = response.mutable_error();
- std::string error_description(kMaxMessageSize * 2, 'a');
- error->set_error_description(error_description);
-
- TrafficRecorder recorder(kMaxMessages, kMaxMessageSize);
- recorder.RecordClientToServerResponse(response);
-
- TrafficRecorder::TrafficRecord record = recorder.records().front();
- EXPECT_TRUE(record.truncated);
- EXPECT_TRUE(record.message.empty());
-}
-
-// Test implementation of TrafficRecorder.
-class TestTrafficRecorder : public TrafficRecorder {
- public:
- TestTrafficRecorder(unsigned int max_messages, unsigned int max_message_size)
- : TrafficRecorder(max_messages, max_message_size) {
- set_time(0);
- }
- virtual ~TestTrafficRecorder() {}
-
- virtual base::Time GetTime() OVERRIDE {
- return time_;
- }
-
- void set_time(int64 time) {
- time_ = ProtoTimeToTime(time);
- }
-
- void set_time(base::Time time) {
- time_ = time;
- }
-
- private:
- base::Time time_;
-};
-
-// Ensure that timestamp is recorded correctly in traffic record.
-TEST(TrafficRecorderTest, TimestampTest) {
- sync_pb::ClientToServerResponse response;
-
- TestTrafficRecorder recorder(kMaxMessages, kMaxMessageSize);
- recorder.set_time(3);
- recorder.RecordClientToServerResponse(response);
-
- base::Time expect_time = ProtoTimeToTime(3);
- TrafficRecorder::TrafficRecord record = recorder.records().front();
- EXPECT_EQ(expect_time, record.timestamp);
-}
-
-// Ensure that timestamps are recorded correctly in traffic records.
-TEST(TrafficRecorderTest, MultipleTimestampTest) {
- sync_pb::ClientToServerResponse response;
- base::Time sample_time_1 = ProtoTimeToTime(GG_INT64_C(1359484676659));
- base::Time sample_time_2 = ProtoTimeToTime(GG_INT64_C(135948467665932));
-
- TestTrafficRecorder recorder(kMaxMessages, kMaxMessageSize);
- recorder.set_time(sample_time_1);
- recorder.RecordClientToServerResponse(response);
- recorder.set_time(sample_time_2);
- recorder.RecordClientToServerResponse(response);
-
- TrafficRecorder::TrafficRecord record_1 = recorder.records().front();
- TrafficRecorder::TrafficRecord record_2 = recorder.records().back();
- EXPECT_EQ(sample_time_1, record_1.timestamp);
- EXPECT_EQ(sample_time_2, record_2.timestamp);
-}
-
-// Ensure that timestamp is added to ListValue of DictionaryValues in ToValue().
-TEST(TrafficRecorderTest, ToValueTimestampTest) {
- sync_pb::ClientToServerResponse response;
- base::Time sample_time = ProtoTimeToTime(GG_INT64_C(135948467665932));
- std::string expect_time_str = GetTimeDebugString(sample_time);
-
- TestTrafficRecorder recorder(kMaxMessages, kMaxMessageSize);
- recorder.set_time(sample_time);
- recorder.RecordClientToServerResponse(response);
-
- scoped_ptr<base::ListValue> value;
- value.reset(recorder.ToValue());
-
- base::DictionaryValue* record_value;
- std::string time_str;
-
- ASSERT_TRUE(value->GetDictionary(0, &record_value));
- EXPECT_TRUE(record_value->GetString("timestamp", &time_str));
- EXPECT_EQ(expect_time_str, time_str);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/update_applicator.cc b/chromium/sync/engine/update_applicator.cc
deleted file mode 100644
index e8731cecf4c..00000000000
--- a/chromium/sync/engine/update_applicator.cc
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/update_applicator.h"
-
-#include <vector>
-
-#include "base/logging.h"
-#include "sync/engine/syncer_util.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/syncable_id.h"
-#include "sync/syncable/syncable_write_transaction.h"
-
-using std::vector;
-
-namespace syncer {
-
-using syncable::ID;
-
-UpdateApplicator::UpdateApplicator(Cryptographer* cryptographer)
- : cryptographer_(cryptographer),
- updates_applied_(0),
- encryption_conflicts_(0),
- hierarchy_conflicts_(0) {
-}
-
-UpdateApplicator::~UpdateApplicator() {
-}
-
-// Attempt to apply all updates, using multiple passes if necessary.
-//
-// Some updates must be applied in order. For example, children must be created
-// after their parent folder is created. This function runs an O(n^2) algorithm
-// that will keep trying until there is nothing left to apply, or it stops
-// making progress, which would indicate that the hierarchy is invalid.
-//
-// The update applicator also has to deal with simple conflicts, which occur
-// when an item is modified on both the server and the local model. We remember
-// their IDs so they can be passed to the conflict resolver after all the other
-// applications are complete.
-//
-// Finally, there are encryption conflicts, which can occur when we don't have
-// access to all the Nigori keys. There's nothing we can do about them here.
-void UpdateApplicator::AttemptApplications(
- syncable::WriteTransaction* trans,
- const std::vector<int64>& handles) {
- std::vector<int64> to_apply = handles;
-
- DVLOG(1) << "UpdateApplicator running over " << to_apply.size() << " items.";
- while (!to_apply.empty()) {
- std::vector<int64> to_reapply;
-
- for (std::vector<int64>::iterator i = to_apply.begin();
- i != to_apply.end(); ++i) {
- syncable::MutableEntry entry(trans, syncable::GET_BY_HANDLE, *i);
- UpdateAttemptResponse result = AttemptToUpdateEntry(
- trans, &entry, cryptographer_);
-
- switch (result) {
- case SUCCESS:
- updates_applied_++;
- break;
- case CONFLICT_SIMPLE:
- simple_conflict_ids_.insert(entry.GetId());
- break;
- case CONFLICT_ENCRYPTION:
- encryption_conflicts_++;
- break;
- case CONFLICT_HIERARCHY:
- // The decision to classify these as hierarchy conflcits is tentative.
- // If we make any progress this round, we'll clear the hierarchy
- // conflict count and attempt to reapply these updates.
- to_reapply.push_back(*i);
- break;
- default:
- NOTREACHED();
- break;
- }
- }
-
- if (to_reapply.size() == to_apply.size()) {
- // We made no progress. Must be stubborn hierarchy conflicts.
- hierarchy_conflicts_ = to_apply.size();
- break;
- }
-
- // We made some progress, so prepare for what might be another iteration.
- // If everything went well, to_reapply will be empty and we'll break out on
- // the while condition.
- to_apply.swap(to_reapply);
- to_reapply.clear();
- }
-}
-
-} // namespace syncer
diff --git a/chromium/sync/engine/update_applicator.h b/chromium/sync/engine/update_applicator.h
deleted file mode 100644
index ff8fa157163..00000000000
--- a/chromium/sync/engine/update_applicator.h
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// An UpdateApplicator is used to iterate over a number of unapplied updates,
-// applying them to the client using the given syncer session.
-//
-// UpdateApplicator might resemble an iterator, but it actually keeps retrying
-// failed updates until no remaining updates can be successfully applied.
-
-#ifndef SYNC_ENGINE_UPDATE_APPLICATOR_H_
-#define SYNC_ENGINE_UPDATE_APPLICATOR_H_
-
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/port.h"
-#include "sync/internal_api/public/engine/model_safe_worker.h"
-#include "sync/syncable/syncable_id.h"
-#include "sync/sessions/status_controller.h"
-
-namespace syncer {
-
-namespace sessions {
-class StatusController;
-}
-
-namespace syncable {
-class WriteTransaction;
-class Entry;
-}
-
-class ConflictResolver;
-class Cryptographer;
-
-class UpdateApplicator {
- public:
- UpdateApplicator(Cryptographer* cryptographer);
- ~UpdateApplicator();
-
- // Attempt to apply the specified updates.
- void AttemptApplications(syncable::WriteTransaction* trans,
- const std::vector<int64>& handles);
-
- int updates_applied() {
- return updates_applied_;
- }
-
- int encryption_conflicts() {
- return encryption_conflicts_;
- }
-
- int hierarchy_conflicts() {
- return hierarchy_conflicts_;
- }
-
- const std::set<syncable::Id>& simple_conflict_ids() {
- return simple_conflict_ids_;
- }
-
- private:
- // If true, AttemptOneApplication will skip over |entry| and return true.
- bool SkipUpdate(const syncable::Entry& entry);
-
- // Used to decrypt sensitive sync nodes.
- Cryptographer* cryptographer_;
-
- DISALLOW_COPY_AND_ASSIGN(UpdateApplicator);
-
- int updates_applied_;
- int encryption_conflicts_;
- int hierarchy_conflicts_;
- std::set<syncable::Id> simple_conflict_ids_;
-};
-
-} // namespace syncer
-
-#endif // SYNC_ENGINE_UPDATE_APPLICATOR_H_
diff --git a/chromium/sync/internal_api/DEPS b/chromium/sync/internal_api/DEPS
deleted file mode 100644
index a6e2e4e0fe0..00000000000
--- a/chromium/sync/internal_api/DEPS
+++ /dev/null
@@ -1,13 +0,0 @@
-include_rules = [
- "+net",
- "+sync/base",
- "+sync/engine",
- "+sync/js",
- "+sync/notifier",
- "+sync/protocol",
- "+sync/sessions",
- "+sync/syncable",
- "+sync/test",
- "+sync/util",
- "+third_party/zlib", # For UniquePosition compression
-]
diff --git a/chromium/sync/internal_api/README b/chromium/sync/internal_api/README
deleted file mode 100644
index 32987bbab9d..00000000000
--- a/chromium/sync/internal_api/README
+++ /dev/null
@@ -1,32 +0,0 @@
-This file defines the "sync API", an interface to the syncer
-backend that exposes (1) the core functionality of maintaining a consistent
-local snapshot of a hierarchical object set; (2) a means to transactionally
-access and modify those objects; (3) a means to control client/server
-synchronization tasks, namely: pushing local object modifications to a
-server, pulling nonlocal object modifications from a server to this client,
-and resolving conflicts that may arise between the two; and (4) an
-abstraction of some external functionality that is to be provided by the
-host environment.
-
-This interface is used as the entry point into the syncer backend
-when the backend is compiled as a library and embedded in another
-application. A goal for this interface layer is to depend on very few
-external types, so that an application can use the sync backend
-without introducing a dependency on specific types. A non-goal is to
-have binary compatibility across versions or compilers; this allows the
-interface to use C++ classes. An application wishing to use the sync API
-should ideally compile the syncer backend and this API as part of the
-application's own build, to avoid e.g. mismatches in calling convention,
-structure padding, or name mangling that could arise if there were a
-compiler mismatch.
-
-The schema of the objects in the sync domain is based on the model, which
-is essentially a hierarchy of items and folders similar to a filesystem,
-but with a few important differences. The sync API contains fields
-such as URL to easily allow the embedding application to store web
-browser bookmarks. Also, the sync API allows duplicate titles in a parent.
-Consequently, it does not support looking up an object by title
-and parent, since such a lookup is not uniquely determined. Lastly,
-unlike a filesystem model, objects in the Sync API model have a strict
-ordering within a parent; the position is manipulable by callers, and
-children of a node can be enumerated in the order of their position.
diff --git a/chromium/sync/internal_api/base_node.cc b/chromium/sync/internal_api/base_node.cc
deleted file mode 100644
index 3f8f2f0aed2..00000000000
--- a/chromium/sync/internal_api/base_node.cc
+++ /dev/null
@@ -1,361 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/base_node.h"
-
-#include <stack>
-
-#include "base/strings/string_number_conversions.h"
-#include "base/strings/utf_string_conversions.h"
-#include "base/values.h"
-#include "sync/internal_api/public/base_transaction.h"
-#include "sync/internal_api/syncapi_internal.h"
-#include "sync/protocol/app_specifics.pb.h"
-#include "sync/protocol/autofill_specifics.pb.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/protocol/extension_specifics.pb.h"
-#include "sync/protocol/nigori_specifics.pb.h"
-#include "sync/protocol/password_specifics.pb.h"
-#include "sync/protocol/session_specifics.pb.h"
-#include "sync/protocol/theme_specifics.pb.h"
-#include "sync/protocol/typed_url_specifics.pb.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/syncable_id.h"
-#include "sync/util/time.h"
-
-using sync_pb::AutofillProfileSpecifics;
-
-namespace syncer {
-
-using syncable::SPECIFICS;
-
-// Helper function to look up the int64 metahandle of an object given the ID
-// string.
-static int64 IdToMetahandle(syncable::BaseTransaction* trans,
- const syncable::Id& id) {
- syncable::Entry entry(trans, syncable::GET_BY_ID, id);
- if (!entry.good())
- return kInvalidId;
- return entry.GetMetahandle();
-}
-
-static bool EndsWithSpace(const std::string& string) {
- return !string.empty() && *string.rbegin() == ' ';
-}
-
-// In the reverse direction, if a server name matches the pattern of a
-// server-illegal name followed by one or more spaces, remove the trailing
-// space.
-static void ServerNameToSyncAPIName(const std::string& server_name,
- std::string* out) {
- CHECK(out);
- int length_to_copy = server_name.length();
- if (IsNameServerIllegalAfterTrimming(server_name) &&
- EndsWithSpace(server_name)) {
- --length_to_copy;
- }
- *out = std::string(server_name.c_str(), length_to_copy);
-}
-
-BaseNode::BaseNode() : password_data_(new sync_pb::PasswordSpecificsData) {}
-
-BaseNode::~BaseNode() {}
-
-bool BaseNode::DecryptIfNecessary() {
- if (!GetEntry()->GetUniqueServerTag().empty())
- return true; // Ignore unique folders.
- const sync_pb::EntitySpecifics& specifics =
- GetEntry()->GetSpecifics();
- if (specifics.has_password()) {
- // Passwords have their own legacy encryption structure.
- scoped_ptr<sync_pb::PasswordSpecificsData> data(DecryptPasswordSpecifics(
- specifics, GetTransaction()->GetCryptographer()));
- if (!data) {
- LOG(ERROR) << "Failed to decrypt password specifics.";
- return false;
- }
- password_data_.swap(data);
- return true;
- }
-
- // We assume any node with the encrypted field set has encrypted data and if
- // not we have no work to do, with the exception of bookmarks. For bookmarks
- // we must make sure the bookmarks data has the title field supplied. If not,
- // we fill the unencrypted_data_ with a copy of the bookmark specifics that
- // follows the new bookmarks format.
- if (!specifics.has_encrypted()) {
- if (GetModelType() == BOOKMARKS &&
- !specifics.bookmark().has_title() &&
- !GetTitle().empty()) { // Last check ensures this isn't a new node.
- // We need to fill in the title.
- std::string title = GetTitle();
- std::string server_legal_title;
- SyncAPINameToServerName(title, &server_legal_title);
- DVLOG(1) << "Reading from legacy bookmark, manually returning title "
- << title;
- unencrypted_data_.CopyFrom(specifics);
- unencrypted_data_.mutable_bookmark()->set_title(
- server_legal_title);
- }
- return true;
- }
-
- const sync_pb::EncryptedData& encrypted = specifics.encrypted();
- std::string plaintext_data = GetTransaction()->GetCryptographer()->
- DecryptToString(encrypted);
- if (plaintext_data.length() == 0) {
- LOG(ERROR) << "Failed to decrypt encrypted node of type "
- << ModelTypeToString(GetModelType()) << ".";
- // Debugging for crbug.com/123223. We failed to decrypt the data, which
- // means we applied an update without having the key or lost the key at a
- // later point.
- CHECK(false);
- return false;
- } else if (!unencrypted_data_.ParseFromString(plaintext_data)) {
- // Debugging for crbug.com/123223. We should never succeed in decrypting
- // but fail to parse into a protobuf.
- CHECK(false);
- return false;
- }
- DVLOG(2) << "Decrypted specifics of type "
- << ModelTypeToString(GetModelType())
- << " with content: " << plaintext_data;
- return true;
-}
-
-const sync_pb::EntitySpecifics& BaseNode::GetUnencryptedSpecifics(
- const syncable::Entry* entry) const {
- const sync_pb::EntitySpecifics& specifics = entry->GetSpecifics();
- if (specifics.has_encrypted()) {
- DCHECK_NE(GetModelTypeFromSpecifics(unencrypted_data_), UNSPECIFIED);
- return unencrypted_data_;
- } else {
- // Due to the change in bookmarks format, we need to check to see if this is
- // a legacy bookmarks (and has no title field in the proto). If it is, we
- // return the unencrypted_data_, which was filled in with the title by
- // DecryptIfNecessary().
- if (GetModelType() == BOOKMARKS) {
- const sync_pb::BookmarkSpecifics& bookmark_specifics =
- specifics.bookmark();
- if (bookmark_specifics.has_title() ||
- GetTitle().empty() || // For the empty node case
- !GetEntry()->GetUniqueServerTag().empty()) {
- // It's possible we previously had to convert and set
- // |unencrypted_data_| but then wrote our own data, so we allow
- // |unencrypted_data_| to be non-empty.
- return specifics;
- } else {
- DCHECK_EQ(GetModelTypeFromSpecifics(unencrypted_data_), BOOKMARKS);
- return unencrypted_data_;
- }
- } else {
- DCHECK_EQ(GetModelTypeFromSpecifics(unencrypted_data_), UNSPECIFIED);
- return specifics;
- }
- }
-}
-
-int64 BaseNode::GetParentId() const {
- return IdToMetahandle(GetTransaction()->GetWrappedTrans(),
- GetEntry()->GetParentId());
-}
-
-int64 BaseNode::GetId() const {
- return GetEntry()->GetMetahandle();
-}
-
-base::Time BaseNode::GetModificationTime() const {
- return GetEntry()->GetMtime();
-}
-
-bool BaseNode::GetIsFolder() const {
- return GetEntry()->GetIsDir();
-}
-
-std::string BaseNode::GetTitle() const {
- std::string result;
- // TODO(zea): refactor bookmarks to not need this functionality.
- if (BOOKMARKS == GetModelType() &&
- GetEntry()->GetSpecifics().has_encrypted()) {
- // Special case for legacy bookmarks dealing with encryption.
- ServerNameToSyncAPIName(GetBookmarkSpecifics().title(), &result);
- } else {
- ServerNameToSyncAPIName(GetEntry()->GetNonUniqueName(),
- &result);
- }
- return result;
-}
-
-bool BaseNode::HasChildren() const {
- syncable::Directory* dir = GetTransaction()->GetDirectory();
- syncable::BaseTransaction* trans = GetTransaction()->GetWrappedTrans();
- return dir->HasChildren(trans, GetEntry()->GetId());
-}
-
-int64 BaseNode::GetPredecessorId() const {
- syncable::Id id_string = GetEntry()->GetPredecessorId();
- if (id_string.IsRoot())
- return kInvalidId;
- return IdToMetahandle(GetTransaction()->GetWrappedTrans(), id_string);
-}
-
-int64 BaseNode::GetSuccessorId() const {
- syncable::Id id_string = GetEntry()->GetSuccessorId();
- if (id_string.IsRoot())
- return kInvalidId;
- return IdToMetahandle(GetTransaction()->GetWrappedTrans(), id_string);
-}
-
-int64 BaseNode::GetFirstChildId() const {
- syncable::Id id_string = GetEntry()->GetFirstChildId();
- if (id_string.IsRoot())
- return kInvalidId;
- return IdToMetahandle(GetTransaction()->GetWrappedTrans(), id_string);
-}
-
-void BaseNode::GetChildIds(std::vector<int64>* result) const {
- GetEntry()->GetChildHandles(result);
-}
-
-int BaseNode::GetTotalNodeCount() const {
- return GetEntry()->GetTotalNodeCount();
-}
-
-int BaseNode::GetPositionIndex() const {
- return GetEntry()->GetPositionIndex();
-}
-
-base::DictionaryValue* BaseNode::GetSummaryAsValue() const {
- base::DictionaryValue* node_info = new base::DictionaryValue();
- node_info->SetString("id", base::Int64ToString(GetId()));
- node_info->SetBoolean("isFolder", GetIsFolder());
- node_info->SetString("title", GetTitle());
- node_info->Set("type", ModelTypeToValue(GetModelType()));
- return node_info;
-}
-
-base::DictionaryValue* BaseNode::GetDetailsAsValue() const {
- base::DictionaryValue* node_info = GetSummaryAsValue();
- node_info->SetString(
- "modificationTime", GetTimeDebugString(GetModificationTime()));
- node_info->SetString("parentId", base::Int64ToString(GetParentId()));
- // Specifics are already in the Entry value, so no need to duplicate
- // it here.
- node_info->SetString("externalId", base::Int64ToString(GetExternalId()));
- if (GetEntry()->ShouldMaintainPosition() &&
- !GetEntry()->GetIsDel()) {
- node_info->SetString("successorId", base::Int64ToString(GetSuccessorId()));
- node_info->SetString(
- "predecessorId", base::Int64ToString(GetPredecessorId()));
- }
- if (GetEntry()->GetIsDir()) {
- node_info->SetString(
- "firstChildId", base::Int64ToString(GetFirstChildId()));
- }
- node_info->Set(
- "entry", GetEntry()->ToValue(GetTransaction()->GetCryptographer()));
- return node_info;
-}
-
-int64 BaseNode::GetExternalId() const {
- return GetEntry()->GetLocalExternalId();
-}
-
-const sync_pb::AppSpecifics& BaseNode::GetAppSpecifics() const {
- DCHECK_EQ(GetModelType(), APPS);
- return GetEntitySpecifics().app();
-}
-
-const sync_pb::AutofillSpecifics& BaseNode::GetAutofillSpecifics() const {
- DCHECK_EQ(GetModelType(), AUTOFILL);
- return GetEntitySpecifics().autofill();
-}
-
-const AutofillProfileSpecifics& BaseNode::GetAutofillProfileSpecifics() const {
- DCHECK_EQ(GetModelType(), AUTOFILL_PROFILE);
- return GetEntitySpecifics().autofill_profile();
-}
-
-const sync_pb::BookmarkSpecifics& BaseNode::GetBookmarkSpecifics() const {
- DCHECK_EQ(GetModelType(), BOOKMARKS);
- return GetEntitySpecifics().bookmark();
-}
-
-const sync_pb::NigoriSpecifics& BaseNode::GetNigoriSpecifics() const {
- DCHECK_EQ(GetModelType(), NIGORI);
- return GetEntitySpecifics().nigori();
-}
-
-const sync_pb::PasswordSpecificsData& BaseNode::GetPasswordSpecifics() const {
- DCHECK_EQ(GetModelType(), PASSWORDS);
- return *password_data_;
-}
-
-const sync_pb::ThemeSpecifics& BaseNode::GetThemeSpecifics() const {
- DCHECK_EQ(GetModelType(), THEMES);
- return GetEntitySpecifics().theme();
-}
-
-const sync_pb::TypedUrlSpecifics& BaseNode::GetTypedUrlSpecifics() const {
- DCHECK_EQ(GetModelType(), TYPED_URLS);
- return GetEntitySpecifics().typed_url();
-}
-
-const sync_pb::ExtensionSpecifics& BaseNode::GetExtensionSpecifics() const {
- DCHECK_EQ(GetModelType(), EXTENSIONS);
- return GetEntitySpecifics().extension();
-}
-
-const sync_pb::SessionSpecifics& BaseNode::GetSessionSpecifics() const {
- DCHECK_EQ(GetModelType(), SESSIONS);
- return GetEntitySpecifics().session();
-}
-
-const sync_pb::ManagedUserSettingSpecifics&
- BaseNode::GetManagedUserSettingSpecifics() const {
- DCHECK_EQ(GetModelType(), MANAGED_USER_SETTINGS);
- return GetEntitySpecifics().managed_user_setting();
-}
-
-const sync_pb::ManagedUserSpecifics& BaseNode::GetManagedUserSpecifics() const {
- DCHECK_EQ(GetModelType(), MANAGED_USERS);
- return GetEntitySpecifics().managed_user();
-}
-
-const sync_pb::DeviceInfoSpecifics& BaseNode::GetDeviceInfoSpecifics() const {
- DCHECK_EQ(GetModelType(), DEVICE_INFO);
- return GetEntitySpecifics().device_info();
-}
-
-const sync_pb::ExperimentsSpecifics& BaseNode::GetExperimentsSpecifics() const {
- DCHECK_EQ(GetModelType(), EXPERIMENTS);
- return GetEntitySpecifics().experiments();
-}
-
-const sync_pb::PriorityPreferenceSpecifics&
- BaseNode::GetPriorityPreferenceSpecifics() const {
- DCHECK_EQ(GetModelType(), PRIORITY_PREFERENCES);
- return GetEntitySpecifics().priority_preference();
-}
-
-const sync_pb::EntitySpecifics& BaseNode::GetEntitySpecifics() const {
- return GetUnencryptedSpecifics(GetEntry());
-}
-
-ModelType BaseNode::GetModelType() const {
- return GetEntry()->GetModelType();
-}
-
-void BaseNode::SetUnencryptedSpecifics(
- const sync_pb::EntitySpecifics& specifics) {
- ModelType type = GetModelTypeFromSpecifics(specifics);
- DCHECK_NE(UNSPECIFIED, type);
- if (GetModelType() != UNSPECIFIED) {
- DCHECK_EQ(GetModelType(), type);
- }
- unencrypted_data_.CopyFrom(specifics);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/base_transaction.cc b/chromium/sync/internal_api/base_transaction.cc
deleted file mode 100644
index cbfb246ed6a..00000000000
--- a/chromium/sync/internal_api/base_transaction.cc
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/base_transaction.h"
-
-#include "sync/syncable/directory.h"
-#include "sync/syncable/nigori_handler.h"
-#include "sync/util/cryptographer.h"
-
-namespace syncer {
-
-//////////////////////////////////////////////////////////////////////////
-// BaseTransaction member definitions
-BaseTransaction::BaseTransaction(UserShare* share)
- : user_share_(share) {
- DCHECK(share && share->directory.get());
-}
-
-BaseTransaction::~BaseTransaction() {
-}
-
-Cryptographer* BaseTransaction::GetCryptographer() const {
- return GetDirectory()->GetCryptographer(this->GetWrappedTrans());
-}
-
-ModelTypeSet BaseTransaction::GetEncryptedTypes() const {
- return GetDirectory()->GetNigoriHandler()->GetEncryptedTypes(
- this->GetWrappedTrans());
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/change_record.cc b/chromium/sync/internal_api/change_record.cc
deleted file mode 100644
index 4894b3ce9e2..00000000000
--- a/chromium/sync/internal_api/change_record.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/change_record.h"
-
-#include "base/strings/string_number_conversions.h"
-#include "base/values.h"
-#include "sync/internal_api/public/base_node.h"
-#include "sync/internal_api/public/read_node.h"
-#include "sync/protocol/proto_value_conversions.h"
-
-namespace syncer {
-
-ChangeRecord::ChangeRecord()
- : id(kInvalidId), action(ACTION_ADD) {}
-
-ChangeRecord::~ChangeRecord() {}
-
-base::DictionaryValue* ChangeRecord::ToValue() const {
- base::DictionaryValue* value = new base::DictionaryValue();
- std::string action_str;
- switch (action) {
- case ACTION_ADD:
- action_str = "Add";
- break;
- case ACTION_DELETE:
- action_str = "Delete";
- break;
- case ACTION_UPDATE:
- action_str = "Update";
- break;
- default:
- NOTREACHED();
- action_str = "Unknown";
- break;
- }
- value->SetString("action", action_str);
- value->SetString("id", base::Int64ToString(id));
- if (action == ACTION_DELETE) {
- if (extra.get()) {
- value->Set("extra", extra->ToValue());
- }
- value->Set("specifics", EntitySpecificsToValue(specifics));
- }
- return value;
-}
-
-ExtraPasswordChangeRecordData::ExtraPasswordChangeRecordData() {}
-
-ExtraPasswordChangeRecordData::ExtraPasswordChangeRecordData(
- const sync_pb::PasswordSpecificsData& data)
- : unencrypted_(data) {
-}
-
-ExtraPasswordChangeRecordData::~ExtraPasswordChangeRecordData() {}
-
-base::DictionaryValue* ExtraPasswordChangeRecordData::ToValue() const {
- return PasswordSpecificsDataToValue(unencrypted_);
-}
-
-const sync_pb::PasswordSpecificsData&
- ExtraPasswordChangeRecordData::unencrypted() const {
- return unencrypted_;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/change_reorder_buffer.cc b/chromium/sync/internal_api/change_reorder_buffer.cc
deleted file mode 100644
index 572f56dc11f..00000000000
--- a/chromium/sync/internal_api/change_reorder_buffer.cc
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/change_reorder_buffer.h"
-
-#include <limits>
-#include <queue>
-#include <set>
-#include <utility> // for pair<>
-
-#include "sync/internal_api/public/base_node.h"
-#include "sync/internal_api/public/base_transaction.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/syncable_base_transaction.h"
-
-using std::numeric_limits;
-using std::pair;
-using std::queue;
-using std::set;
-
-namespace syncer {
-
-// Traversal provides a way to collect a set of nodes from the syncable
-// directory structure and then traverse them, along with any intermediate
-// nodes, in a top-down fashion, starting from a single common ancestor. A
-// Traversal starts out empty and is grown by means of the ExpandToInclude
-// method. Once constructed, the top(), begin_children(), and end_children()
-// methods can be used to explore the nodes in root-to-leaf order.
-class ChangeReorderBuffer::Traversal {
- public:
- typedef pair<int64, int64> ParentChildLink;
- typedef set<ParentChildLink> LinkSet;
-
- Traversal() : top_(kInvalidId) { }
-
- // Expand the traversal so that it includes the node indicated by
- // |child_handle|.
- void ExpandToInclude(syncable::BaseTransaction* trans,
- int64 child_handle) {
- // If |top_| is invalid, this is the first insertion -- easy.
- if (top_ == kInvalidId) {
- top_ = child_handle;
- return;
- }
-
- int64 node_to_include = child_handle;
- while (node_to_include != kInvalidId && node_to_include != top_) {
- int64 node_parent = 0;
-
- syncable::Entry node(trans, syncable::GET_BY_HANDLE, node_to_include);
- CHECK(node.good());
- if (node.GetId().IsRoot()) {
- // If we've hit the root, and the root isn't already in the tree
- // (it would have to be |top_| if it were), start a new expansion
- // upwards from |top_| to unite the original traversal with the
- // path we just added that goes from |child_handle| to the root.
- node_to_include = top_;
- top_ = node.GetMetahandle();
- } else {
- // Otherwise, get the parent ID so that we can add a ParentChildLink.
- syncable::Entry parent(trans, syncable::GET_BY_ID,
- node.GetParentId());
- CHECK(parent.good());
- node_parent = parent.GetMetahandle();
-
- ParentChildLink link(node_parent, node_to_include);
-
- // If the link exists in the LinkSet |links_|, we don't need to search
- // any higher; we are done.
- if (links_.find(link) != links_.end())
- return;
-
- // Otherwise, extend |links_|, and repeat on the parent.
- links_.insert(link);
- node_to_include = node_parent;
- }
- }
- }
-
- // Return the top node of the traversal. Use this as a starting point
- // for walking the tree.
- int64 top() const { return top_; }
-
- // Return an iterator corresponding to the first child (in the traversal)
- // of the node specified by |parent_id|. Iterate this return value until
- // it is equal to the value returned by end_children(parent_id). The
- // enumeration thus provided is unordered.
- LinkSet::const_iterator begin_children(int64 parent_id) const {
- return links_.upper_bound(
- ParentChildLink(parent_id, numeric_limits<int64>::min()));
- }
-
- // Return an iterator corresponding to the last child in the traversal
- // of the node specified by |parent_id|.
- LinkSet::const_iterator end_children(int64 parent_id) const {
- return begin_children(parent_id + 1);
- }
-
- private:
- // The topmost point in the directory hierarchy that is in the traversal,
- // and thus the first node to be traversed. If the traversal is empty,
- // this is kInvalidId. If the traversal contains exactly one member, |top_|
- // will be the solitary member, and |links_| will be empty.
- int64 top_;
- // A set of single-level links that compose the traversal below |top_|. The
- // (parent, child) ordering of values enables efficient lookup of children
- // given the parent handle, which is used for top-down traversal. |links_|
- // is expected to be connected -- every node that appears as a parent in a
- // link must either appear as a child of another link, or else be the
- // topmost node, |top_|.
- LinkSet links_;
-
- DISALLOW_COPY_AND_ASSIGN(Traversal);
-};
-
-ChangeReorderBuffer::ChangeReorderBuffer() {
-}
-
-ChangeReorderBuffer::~ChangeReorderBuffer() {
-}
-
-void ChangeReorderBuffer::PushAddedItem(int64 id) {
- operations_[id] = ChangeRecord::ACTION_ADD;
-}
-
-void ChangeReorderBuffer::PushDeletedItem(int64 id) {
- operations_[id] = ChangeRecord::ACTION_DELETE;
-}
-
-void ChangeReorderBuffer::PushUpdatedItem(int64 id) {
- operations_[id] = ChangeRecord::ACTION_UPDATE;
-}
-
-void ChangeReorderBuffer::SetExtraDataForId(
- int64 id,
- ExtraPasswordChangeRecordData* extra) {
- extra_data_[id] = make_linked_ptr<ExtraPasswordChangeRecordData>(extra);
-}
-
-void ChangeReorderBuffer::SetSpecificsForId(
- int64 id,
- const sync_pb::EntitySpecifics& specifics) {
- specifics_[id] = specifics;
-}
-
-void ChangeReorderBuffer::Clear() {
- operations_.clear();
-}
-
-bool ChangeReorderBuffer::IsEmpty() const {
- return operations_.empty();
-}
-
-bool ChangeReorderBuffer::GetAllChangesInTreeOrder(
- const BaseTransaction* sync_trans,
- ImmutableChangeRecordList* changes) {
- syncable::BaseTransaction* trans = sync_trans->GetWrappedTrans();
-
- // Step 1: Iterate through the operations, doing three things:
- // (a) Push deleted items straight into the |changelist|.
- // (b) Construct a traversal spanning all non-deleted items.
- // (c) Construct a set of all parent nodes of any position changes.
- Traversal traversal;
-
- ChangeRecordList changelist;
-
- OperationMap::const_iterator i;
- for (i = operations_.begin(); i != operations_.end(); ++i) {
- if (i->second == ChangeRecord::ACTION_DELETE) {
- ChangeRecord record;
- record.id = i->first;
- record.action = i->second;
- if (specifics_.find(record.id) != specifics_.end())
- record.specifics = specifics_[record.id];
- if (extra_data_.find(record.id) != extra_data_.end())
- record.extra = extra_data_[record.id];
- changelist.push_back(record);
- } else {
- traversal.ExpandToInclude(trans, i->first);
- }
- }
-
- // Step 2: Breadth-first expansion of the traversal.
- queue<int64> to_visit;
- to_visit.push(traversal.top());
- while (!to_visit.empty()) {
- int64 next = to_visit.front();
- to_visit.pop();
-
- // If the node has an associated action, output a change record.
- i = operations_.find(next);
- if (i != operations_.end()) {
- ChangeRecord record;
- record.id = next;
- record.action = i->second;
- if (specifics_.find(record.id) != specifics_.end())
- record.specifics = specifics_[record.id];
- if (extra_data_.find(record.id) != extra_data_.end())
- record.extra = extra_data_[record.id];
- changelist.push_back(record);
- }
-
- // Now add the children of |next| to |to_visit|.
- Traversal::LinkSet::const_iterator j = traversal.begin_children(next);
- Traversal::LinkSet::const_iterator end = traversal.end_children(next);
- for (; j != end; ++j) {
- CHECK(j->first == next);
- to_visit.push(j->second);
- }
- }
-
- *changes = ImmutableChangeRecordList(&changelist);
- return true;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/change_reorder_buffer.h b/chromium/sync/internal_api/change_reorder_buffer.h
deleted file mode 100644
index c1c577d43a7..00000000000
--- a/chromium/sync/internal_api/change_reorder_buffer.h
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Defines ChangeReorderBuffer, which can be used to sort a list of item
-// actions to achieve the ordering constraint required by the SyncObserver
-// interface of the SyncAPI.
-
-#ifndef SYNC_INTERNAL_API_CHANGE_REORDER_BUFFER_H_
-#define SYNC_INTERNAL_API_CHANGE_REORDER_BUFFER_H_
-
-#include <map>
-#include <vector>
-
-#include "base/compiler_specific.h"
-#include "base/memory/linked_ptr.h"
-#include "sync/internal_api/public/change_record.h"
-#include "sync/protocol/sync.pb.h"
-
-namespace syncer {
-
-class BaseTransaction;
-
-// ChangeReorderBuffer is a utility type which accepts an unordered set
-// of changes (via its Push methods), and yields an ImmutableChangeRecordList
-// (via the GetAllChangesInTreeOrder method) that are in the order that
-// the SyncObserver expects them to be. A buffer is initially empty.
-//
-// The ordering produced by ChangeReorderBuffer is as follows:
-// (a) All Deleted items appear first.
-// (b) For Updated and/or Added items, parents appear before their children.
-//
-// The sibling order is not necessarily preserved.
-class ChangeReorderBuffer {
- public:
- ChangeReorderBuffer();
- ~ChangeReorderBuffer();
-
- // Insert an item, identified by the metahandle |id|, into the reorder buffer.
- // This item will appear in the output list as an ACTION_ADD ChangeRecord.
- void PushAddedItem(int64 id);
-
- // Insert an item, identified by the metahandle |id|, into the reorder buffer.
- // This item will appear in the output list as an ACTION_DELETE ChangeRecord.
- void PushDeletedItem(int64 id);
-
- // Insert an item, identified by the metahandle |id|, into the reorder buffer.
- // This item will appear in the output list as an ACTION_UPDATE ChangeRecord.
- void PushUpdatedItem(int64 id);
-
- void SetExtraDataForId(int64 id, ExtraPasswordChangeRecordData* extra);
-
- void SetSpecificsForId(int64 id, const sync_pb::EntitySpecifics& specifics);
-
- // Reset the buffer, forgetting any pushed items, so that it can be used again
- // to reorder a new set of changes.
- void Clear();
-
- bool IsEmpty() const;
-
- // Output a reordered list of changes to |changes| using the items
- // that were pushed into the reorder buffer. |sync_trans| is used to
- // determine the ordering. Returns true if successful, or false if
- // an error was encountered.
- bool GetAllChangesInTreeOrder(
- const BaseTransaction* sync_trans,
- ImmutableChangeRecordList* changes) WARN_UNUSED_RESULT;
-
- private:
- class Traversal;
- typedef std::map<int64, ChangeRecord::Action> OperationMap;
- typedef std::map<int64, sync_pb::EntitySpecifics> SpecificsMap;
- typedef std::map<int64, linked_ptr<ExtraPasswordChangeRecordData> >
- ExtraDataMap;
-
- // Stores the items that have been pushed into the buffer, and the type of
- // operation that was associated with them.
- OperationMap operations_;
-
- // Stores entity-specific ChangeRecord data per-ID.
- SpecificsMap specifics_;
-
- // Stores type-specific extra data per-ID.
- ExtraDataMap extra_data_;
-
- DISALLOW_COPY_AND_ASSIGN(ChangeReorderBuffer);
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_CHANGE_REORDER_BUFFER_H_
diff --git a/chromium/sync/internal_api/debug_info_event_listener.cc b/chromium/sync/internal_api/debug_info_event_listener.cc
deleted file mode 100644
index f46c4ee2ef9..00000000000
--- a/chromium/sync/internal_api/debug_info_event_listener.cc
+++ /dev/null
@@ -1,270 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/debug_info_event_listener.h"
-
-#include "sync/notifier/object_id_invalidation_map.h"
-#include "sync/util/cryptographer.h"
-
-namespace syncer {
-
-using sessions::SyncSessionSnapshot;
-
-DebugInfoEventListener::DebugInfoEventListener()
- : events_dropped_(false),
- cryptographer_has_pending_keys_(false),
- cryptographer_ready_(false),
- weak_ptr_factory_(this) {
-}
-
-DebugInfoEventListener::~DebugInfoEventListener() {
-}
-
-void DebugInfoEventListener::OnSyncCycleCompleted(
- const SyncSessionSnapshot& snapshot) {
- DCHECK(thread_checker_.CalledOnValidThread());
- sync_pb::DebugEventInfo event_info;
- sync_pb::SyncCycleCompletedEventInfo* sync_completed_event_info =
- event_info.mutable_sync_cycle_completed_event_info();
-
- sync_completed_event_info->set_num_encryption_conflicts(
- snapshot.num_encryption_conflicts());
- sync_completed_event_info->set_num_hierarchy_conflicts(
- snapshot.num_hierarchy_conflicts());
- sync_completed_event_info->set_num_server_conflicts(
- snapshot.num_server_conflicts());
-
- sync_completed_event_info->set_num_updates_downloaded(
- snapshot.model_neutral_state().num_updates_downloaded_total);
- sync_completed_event_info->set_num_reflected_updates_downloaded(
- snapshot.model_neutral_state().num_reflected_updates_downloaded_total);
- sync_completed_event_info->mutable_caller_info()->set_source(
- snapshot.legacy_updates_source());
- sync_completed_event_info->mutable_caller_info()->set_notifications_enabled(
- snapshot.notifications_enabled());
-
- AddEventToQueue(event_info);
-}
-
-void DebugInfoEventListener::OnInitializationComplete(
- const WeakHandle<JsBackend>& js_backend,
- const WeakHandle<DataTypeDebugInfoListener>& debug_listener,
- bool success, ModelTypeSet restored_types) {
- DCHECK(thread_checker_.CalledOnValidThread());
- CreateAndAddEvent(sync_pb::DebugEventInfo::INITIALIZATION_COMPLETE);
-}
-
-void DebugInfoEventListener::OnConnectionStatusChange(
- ConnectionStatus status) {
- DCHECK(thread_checker_.CalledOnValidThread());
- CreateAndAddEvent(sync_pb::DebugEventInfo::CONNECTION_STATUS_CHANGE);
-}
-
-void DebugInfoEventListener::OnPassphraseRequired(
- PassphraseRequiredReason reason,
- const sync_pb::EncryptedData& pending_keys) {
- DCHECK(thread_checker_.CalledOnValidThread());
- CreateAndAddEvent(sync_pb::DebugEventInfo::PASSPHRASE_REQUIRED);
-}
-
-void DebugInfoEventListener::OnPassphraseAccepted() {
- DCHECK(thread_checker_.CalledOnValidThread());
- CreateAndAddEvent(sync_pb::DebugEventInfo::PASSPHRASE_ACCEPTED);
-}
-
-void DebugInfoEventListener::OnBootstrapTokenUpdated(
- const std::string& bootstrap_token, BootstrapTokenType type) {
- DCHECK(thread_checker_.CalledOnValidThread());
- if (type == PASSPHRASE_BOOTSTRAP_TOKEN) {
- CreateAndAddEvent(sync_pb::DebugEventInfo::BOOTSTRAP_TOKEN_UPDATED);
- return;
- }
- DCHECK_EQ(type, KEYSTORE_BOOTSTRAP_TOKEN);
- CreateAndAddEvent(sync_pb::DebugEventInfo::KEYSTORE_TOKEN_UPDATED);
-}
-
-void DebugInfoEventListener::OnStopSyncingPermanently() {
- DCHECK(thread_checker_.CalledOnValidThread());
- CreateAndAddEvent(sync_pb::DebugEventInfo::STOP_SYNCING_PERMANENTLY);
-}
-
-void DebugInfoEventListener::OnEncryptedTypesChanged(
- ModelTypeSet encrypted_types,
- bool encrypt_everything) {
- DCHECK(thread_checker_.CalledOnValidThread());
- CreateAndAddEvent(sync_pb::DebugEventInfo::ENCRYPTED_TYPES_CHANGED);
-}
-
-void DebugInfoEventListener::OnEncryptionComplete() {
- DCHECK(thread_checker_.CalledOnValidThread());
- CreateAndAddEvent(sync_pb::DebugEventInfo::ENCRYPTION_COMPLETE);
-}
-
-void DebugInfoEventListener::OnCryptographerStateChanged(
- Cryptographer* cryptographer) {
- DCHECK(thread_checker_.CalledOnValidThread());
- cryptographer_has_pending_keys_ = cryptographer->has_pending_keys();
- cryptographer_ready_ = cryptographer->is_ready();
-}
-
-void DebugInfoEventListener::OnPassphraseTypeChanged(
- PassphraseType type,
- base::Time explicit_passphrase_time) {
- DCHECK(thread_checker_.CalledOnValidThread());
- CreateAndAddEvent(sync_pb::DebugEventInfo::PASSPHRASE_TYPE_CHANGED);
-}
-
-void DebugInfoEventListener::OnActionableError(
- const SyncProtocolError& sync_error) {
- DCHECK(thread_checker_.CalledOnValidThread());
- CreateAndAddEvent(sync_pb::DebugEventInfo::ACTIONABLE_ERROR);
-}
-
-void DebugInfoEventListener::OnNudgeFromDatatype(ModelType datatype) {
- DCHECK(thread_checker_.CalledOnValidThread());
- sync_pb::DebugEventInfo event_info;
- event_info.set_nudging_datatype(
- GetSpecificsFieldNumberFromModelType(datatype));
- AddEventToQueue(event_info);
-}
-
-void DebugInfoEventListener::OnIncomingNotification(
- const ObjectIdInvalidationMap& invalidation_map) {
- DCHECK(thread_checker_.CalledOnValidThread());
- sync_pb::DebugEventInfo event_info;
- ModelTypeSet types =
- ObjectIdSetToModelTypeSet(invalidation_map.GetObjectIds());
-
- for (ModelTypeSet::Iterator it = types.First(); it.Good(); it.Inc()) {
- event_info.add_datatypes_notified_from_server(
- GetSpecificsFieldNumberFromModelType(it.Get()));
- }
-
- AddEventToQueue(event_info);
-}
-
-void DebugInfoEventListener::GetDebugInfo(sync_pb::DebugInfo* debug_info) {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK_LE(events_.size(), kMaxEntries);
-
- for (DebugEventInfoQueue::const_iterator iter = events_.begin();
- iter != events_.end();
- ++iter) {
- sync_pb::DebugEventInfo* event_info = debug_info->add_events();
- event_info->CopyFrom(*iter);
- }
-
- debug_info->set_events_dropped(events_dropped_);
- debug_info->set_cryptographer_ready(cryptographer_ready_);
- debug_info->set_cryptographer_has_pending_keys(
- cryptographer_has_pending_keys_);
-}
-
-void DebugInfoEventListener::ClearDebugInfo() {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK_LE(events_.size(), kMaxEntries);
-
- events_.clear();
- events_dropped_ = false;
-}
-
-base::WeakPtr<DataTypeDebugInfoListener> DebugInfoEventListener::GetWeakPtr() {
- DCHECK(thread_checker_.CalledOnValidThread());
- return weak_ptr_factory_.GetWeakPtr();
-}
-
-void DebugInfoEventListener::OnDataTypeConfigureComplete(
- const std::vector<DataTypeConfigurationStats>& configuration_stats) {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- for (size_t i = 0; i < configuration_stats.size(); ++i) {
- DCHECK(ProtocolTypes().Has(configuration_stats[i].model_type));
- const DataTypeAssociationStats& association_stats =
- configuration_stats[i].association_stats;
-
- sync_pb::DebugEventInfo association_event;
- sync_pb::DatatypeAssociationStats* datatype_stats =
- association_event.mutable_datatype_association_stats();
- datatype_stats->set_data_type_id(
- GetSpecificsFieldNumberFromModelType(
- configuration_stats[i].model_type));
- datatype_stats->set_num_local_items_before_association(
- association_stats.num_local_items_before_association);
- datatype_stats->set_num_sync_items_before_association(
- association_stats.num_sync_items_before_association);
- datatype_stats->set_num_local_items_after_association(
- association_stats.num_local_items_after_association);
- datatype_stats->set_num_sync_items_after_association(
- association_stats.num_sync_items_after_association);
- datatype_stats->set_num_local_items_added(
- association_stats.num_local_items_added);
- datatype_stats->set_num_local_items_deleted(
- association_stats.num_local_items_deleted);
- datatype_stats->set_num_local_items_modified(
- association_stats.num_local_items_modified);
- datatype_stats->set_num_sync_items_added(
- association_stats.num_sync_items_added);
- datatype_stats->set_num_sync_items_deleted(
- association_stats.num_sync_items_deleted);
- datatype_stats->set_num_sync_items_modified(
- association_stats.num_sync_items_modified);
- datatype_stats->set_local_version_pre_association(
- association_stats.local_version_pre_association);
- datatype_stats->set_sync_version_pre_association(
- association_stats.sync_version_pre_association);
- datatype_stats->set_had_error(association_stats.had_error);
- datatype_stats->set_association_wait_time_for_same_priority_us(
- association_stats.association_wait_time.InMicroseconds());
- datatype_stats->set_association_time_us(
- association_stats.association_time.InMicroseconds());
- datatype_stats->set_download_wait_time_us(
- configuration_stats[i].download_wait_time.InMicroseconds());
- datatype_stats->set_download_time_us(
- configuration_stats[i].download_time.InMicroseconds());
- datatype_stats->set_association_wait_time_for_high_priority_us(
- configuration_stats[i].association_wait_time_for_high_priority
- .InMicroseconds());
-
- for (ModelTypeSet::Iterator it =
- configuration_stats[i].high_priority_types_configured_before
- .First();
- it.Good(); it.Inc()) {
- datatype_stats->add_high_priority_type_configured_before(
- GetSpecificsFieldNumberFromModelType(it.Get()));
- }
-
- for (ModelTypeSet::Iterator it =
- configuration_stats[i].same_priority_types_configured_before
- .First();
- it.Good(); it.Inc()) {
- datatype_stats->add_same_priority_type_configured_before(
- GetSpecificsFieldNumberFromModelType(it.Get()));
- }
-
- AddEventToQueue(association_event);
- }
-}
-
-void DebugInfoEventListener::CreateAndAddEvent(
- sync_pb::DebugEventInfo::SingletonEventType type) {
- DCHECK(thread_checker_.CalledOnValidThread());
- sync_pb::DebugEventInfo event_info;
- event_info.set_singleton_event(type);
- AddEventToQueue(event_info);
-}
-
-void DebugInfoEventListener::AddEventToQueue(
- const sync_pb::DebugEventInfo& event_info) {
- DCHECK(thread_checker_.CalledOnValidThread());
- if (events_.size() >= kMaxEntries) {
- DVLOG(1) << "DebugInfoEventListener::AddEventToQueue Dropping an old event "
- << "because of full queue";
-
- events_.pop_front();
- events_dropped_ = true;
- }
- events_.push_back(event_info);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/debug_info_event_listener.h b/chromium/sync/internal_api/debug_info_event_listener.h
deleted file mode 100644
index 15cc0a66972..00000000000
--- a/chromium/sync/internal_api/debug_info_event_listener.h
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_DEBUG_INFO_EVENT_LISTENER_H_
-#define SYNC_INTERNAL_API_DEBUG_INFO_EVENT_LISTENER_H_
-
-#include <deque>
-#include <string>
-
-#include "base/compiler_specific.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/data_type_debug_info_listener.h"
-#include "sync/internal_api/public/sessions/sync_session_snapshot.h"
-#include "sync/internal_api/public/sync_encryption_handler.h"
-#include "sync/internal_api/public/sync_manager.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/js/js_backend.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/sessions/debug_info_getter.h"
-
-namespace syncer {
-
-// In order to track datatype association results, we need at least as many
-// entries as datatypes. Reserve additional space for other kinds of events that
-// are likely to happen during first sync or startup.
-const unsigned int kMaxEntries = MODEL_TYPE_COUNT + 10;
-
-// Listens to events and records them in a queue. And passes the events to
-// syncer when requested.
-// This class is not thread safe and should only be accessed on the sync thread.
-class SYNC_EXPORT_PRIVATE DebugInfoEventListener
- : public SyncManager::Observer,
- public SyncEncryptionHandler::Observer,
- public sessions::DebugInfoGetter,
- public DataTypeDebugInfoListener {
- public:
- DebugInfoEventListener();
- virtual ~DebugInfoEventListener();
-
- // SyncManager::Observer implementation.
- virtual void OnSyncCycleCompleted(
- const sessions::SyncSessionSnapshot& snapshot) OVERRIDE;
- virtual void OnInitializationComplete(
- const WeakHandle<JsBackend>& js_backend,
- const WeakHandle<DataTypeDebugInfoListener>& debug_listener,
- bool success, ModelTypeSet restored_types) OVERRIDE;
- virtual void OnConnectionStatusChange(
- ConnectionStatus connection_status) OVERRIDE;
- virtual void OnStopSyncingPermanently() OVERRIDE;
- virtual void OnActionableError(
- const SyncProtocolError& sync_error) OVERRIDE;
-
- // SyncEncryptionHandler::Observer implementation.
- virtual void OnPassphraseRequired(
- PassphraseRequiredReason reason,
- const sync_pb::EncryptedData& pending_keys) OVERRIDE;
- virtual void OnPassphraseAccepted() OVERRIDE;
- virtual void OnBootstrapTokenUpdated(
- const std::string& bootstrap_token,
- BootstrapTokenType type) OVERRIDE;
- virtual void OnEncryptedTypesChanged(
- ModelTypeSet encrypted_types,
- bool encrypt_everything) OVERRIDE;
- virtual void OnEncryptionComplete() OVERRIDE;
- virtual void OnCryptographerStateChanged(
- Cryptographer* cryptographer) OVERRIDE;
- virtual void OnPassphraseTypeChanged(
- PassphraseType type,
- base::Time explicit_passphrase_time) OVERRIDE;
-
- // Sync manager events.
- void OnNudgeFromDatatype(ModelType datatype);
- void OnIncomingNotification(const ObjectIdInvalidationMap& invalidations);
-
- // DebugInfoGetter implementation.
- virtual void GetDebugInfo(sync_pb::DebugInfo* debug_info) OVERRIDE;
-
- // DebugInfoGetter implementation.
- virtual void ClearDebugInfo() OVERRIDE;
-
- // DataTypeDebugInfoListener implementation.
- virtual void OnDataTypeConfigureComplete(
- const std::vector<DataTypeConfigurationStats>& configuration_stats)
- OVERRIDE;
-
- // Returns a weak pointer to this object.
- base::WeakPtr<DataTypeDebugInfoListener> GetWeakPtr();
-
- private:
- FRIEND_TEST_ALL_PREFIXES(DebugInfoEventListenerTest, VerifyEventsAdded);
- FRIEND_TEST_ALL_PREFIXES(DebugInfoEventListenerTest, VerifyQueueSize);
- FRIEND_TEST_ALL_PREFIXES(DebugInfoEventListenerTest, VerifyGetEvents);
- FRIEND_TEST_ALL_PREFIXES(DebugInfoEventListenerTest, VerifyClearEvents);
-
- void AddEventToQueue(const sync_pb::DebugEventInfo& event_info);
- void CreateAndAddEvent(sync_pb::DebugEventInfo::SingletonEventType type);
-
- typedef std::deque<sync_pb::DebugEventInfo> DebugEventInfoQueue;
- DebugEventInfoQueue events_;
-
- // True indicates we had to drop one or more events to keep our limit of
- // |kMaxEntries|.
- bool events_dropped_;
-
- // Cryptographer has keys that are not yet decrypted.
- bool cryptographer_has_pending_keys_;
-
- // Cryptographer is initialized and does not have pending keys.
- bool cryptographer_ready_;
-
- base::ThreadChecker thread_checker_;
-
- base::WeakPtrFactory<DebugInfoEventListener> weak_ptr_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(DebugInfoEventListener);
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_DEBUG_INFO_EVENT_LISTENER_H_
diff --git a/chromium/sync/internal_api/debug_info_event_listener_unittest.cc b/chromium/sync/internal_api/debug_info_event_listener_unittest.cc
deleted file mode 100644
index 31f16f87b63..00000000000
--- a/chromium/sync/internal_api/debug_info_event_listener_unittest.cc
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/debug_info_event_listener.h"
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-typedef testing::Test DebugInfoEventListenerTest;
-
-TEST_F(DebugInfoEventListenerTest, VerifyEventsAdded) {
- DebugInfoEventListener debug_info_event_listener;
- debug_info_event_listener.CreateAndAddEvent(
- sync_pb::DebugEventInfo::ENCRYPTION_COMPLETE);
- ASSERT_EQ(debug_info_event_listener.events_.size(), 1U);
- const sync_pb::DebugEventInfo& debug_info =
- debug_info_event_listener.events_.back();
- ASSERT_TRUE(debug_info.has_singleton_event());
- ASSERT_EQ(debug_info.singleton_event(),
- sync_pb::DebugEventInfo::ENCRYPTION_COMPLETE);
-}
-
-TEST_F(DebugInfoEventListenerTest, VerifyQueueSize) {
- DebugInfoEventListener debug_info_event_listener;
- for (unsigned int i = 0; i < 2*kMaxEntries; ++i) {
- debug_info_event_listener.CreateAndAddEvent(
- sync_pb::DebugEventInfo::ENCRYPTION_COMPLETE);
- }
- sync_pb::DebugInfo debug_info;
- debug_info_event_listener.GetDebugInfo(&debug_info);
- debug_info_event_listener.ClearDebugInfo();
- ASSERT_TRUE(debug_info.events_dropped());
- ASSERT_EQ(static_cast<int>(kMaxEntries), debug_info.events_size());
-}
-
-TEST_F(DebugInfoEventListenerTest, VerifyGetEvents) {
- DebugInfoEventListener debug_info_event_listener;
- debug_info_event_listener.CreateAndAddEvent(
- sync_pb::DebugEventInfo::ENCRYPTION_COMPLETE);
- ASSERT_EQ(debug_info_event_listener.events_.size(), 1U);
- sync_pb::DebugInfo debug_info;
- debug_info_event_listener.GetDebugInfo(&debug_info);
- ASSERT_EQ(debug_info_event_listener.events_.size(), 1U);
- ASSERT_EQ(debug_info.events_size(), 1);
- ASSERT_TRUE(debug_info.events(0).has_singleton_event());
- ASSERT_EQ(debug_info.events(0).singleton_event(),
- sync_pb::DebugEventInfo::ENCRYPTION_COMPLETE);
-}
-
-TEST_F(DebugInfoEventListenerTest, VerifyClearEvents) {
- DebugInfoEventListener debug_info_event_listener;
- debug_info_event_listener.CreateAndAddEvent(
- sync_pb::DebugEventInfo::ENCRYPTION_COMPLETE);
- ASSERT_EQ(debug_info_event_listener.events_.size(), 1U);
- debug_info_event_listener.ClearDebugInfo();
- ASSERT_EQ(debug_info_event_listener.events_.size(), 0U);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/delete_journal.cc b/chromium/sync/internal_api/delete_journal.cc
deleted file mode 100644
index ebeb5a76126..00000000000
--- a/chromium/sync/internal_api/delete_journal.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/delete_journal.h"
-
-#include "sync/internal_api/public/base_transaction.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/syncable_base_transaction.h"
-
-namespace syncer {
-
-// static
-void DeleteJournal::GetBookmarkDeleteJournals(
- BaseTransaction* trans, BookmarkDeleteJournalList *delete_journal_list) {
- syncer::syncable::EntryKernelSet deleted_entries;
- trans->GetDirectory()->delete_journal()->GetDeleteJournals(
- trans->GetWrappedTrans(), BOOKMARKS, &deleted_entries);
- std::set<int64> undecryptable_journal;
- for (syncer::syncable::EntryKernelSet::const_iterator i =
- deleted_entries.begin(); i != deleted_entries.end(); ++i) {
- delete_journal_list->push_back(BookmarkDeleteJournal());
- delete_journal_list->back().id = (*i)->ref(syncer::syncable::META_HANDLE);
- delete_journal_list->back().is_folder = (*i)->ref(syncer::syncable::IS_DIR);
-
- const sync_pb::EntitySpecifics& specifics = (*i)->ref(
- syncer::syncable::SPECIFICS);
- if (!specifics.has_encrypted()) {
- delete_journal_list->back().specifics = specifics;
- } else {
- std::string plaintext_data = trans->GetCryptographer()->DecryptToString(
- specifics.encrypted());
- sync_pb::EntitySpecifics unencrypted_data;
- if (plaintext_data.length() == 0 ||
- !unencrypted_data.ParseFromString(plaintext_data)) {
- // Fail to decrypt, Add this delete journal to purge.
- undecryptable_journal.insert(delete_journal_list->back().id);
- delete_journal_list->pop_back();
- } else {
- delete_journal_list->back().specifics = unencrypted_data;
- }
- }
- }
-
- if (!undecryptable_journal.empty()) {
- trans->GetDirectory()->delete_journal()->PurgeDeleteJournals(
- trans->GetWrappedTrans(), undecryptable_journal);
- }
-}
-
-// static
-void DeleteJournal::PurgeDeleteJournals(BaseTransaction* trans,
- const std::set<int64>& ids) {
- trans->GetDirectory()->delete_journal()->PurgeDeleteJournals(
- trans->GetWrappedTrans(), ids);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/http_bridge.cc b/chromium/sync/internal_api/http_bridge.cc
deleted file mode 100644
index 5827c9d77b5..00000000000
--- a/chromium/sync/internal_api/http_bridge.cc
+++ /dev/null
@@ -1,392 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/http_bridge.h"
-
-#include "base/message_loop/message_loop.h"
-#include "base/message_loop/message_loop_proxy.h"
-#include "base/strings/string_number_conversions.h"
-#include "net/base/load_flags.h"
-#include "net/base/net_errors.h"
-#include "net/cookies/cookie_monster.h"
-#include "net/dns/host_resolver.h"
-#include "net/http/http_cache.h"
-#include "net/http/http_network_layer.h"
-#include "net/http/http_response_headers.h"
-#include "net/proxy/proxy_service.h"
-#include "net/url_request/static_http_user_agent_settings.h"
-#include "net/url_request/url_fetcher.h"
-#include "net/url_request/url_request_context.h"
-#include "net/url_request/url_request_status.h"
-#include "sync/internal_api/public/base/cancelation_signal.h"
-
-namespace syncer {
-
-HttpBridge::RequestContextGetter::RequestContextGetter(
- net::URLRequestContextGetter* baseline_context_getter,
- const std::string& user_agent)
- : baseline_context_getter_(baseline_context_getter),
- network_task_runner_(
- baseline_context_getter_->GetNetworkTaskRunner()),
- user_agent_(user_agent) {
- DCHECK(baseline_context_getter_.get());
- DCHECK(network_task_runner_.get());
- DCHECK(!user_agent_.empty());
-}
-
-HttpBridge::RequestContextGetter::~RequestContextGetter() {}
-
-net::URLRequestContext*
-HttpBridge::RequestContextGetter::GetURLRequestContext() {
- // Lazily create the context.
- if (!context_) {
- net::URLRequestContext* baseline_context =
- baseline_context_getter_->GetURLRequestContext();
- context_.reset(
- new RequestContext(baseline_context, GetNetworkTaskRunner(),
- user_agent_));
- baseline_context_getter_ = NULL;
- }
-
- return context_.get();
-}
-
-scoped_refptr<base::SingleThreadTaskRunner>
-HttpBridge::RequestContextGetter::GetNetworkTaskRunner() const {
- return network_task_runner_;
-}
-
-HttpBridgeFactory::HttpBridgeFactory(
- net::URLRequestContextGetter* baseline_context_getter,
- const NetworkTimeUpdateCallback& network_time_update_callback,
- CancelationSignal* cancelation_signal)
- : baseline_request_context_getter_(baseline_context_getter),
- network_time_update_callback_(network_time_update_callback),
- cancelation_signal_(cancelation_signal) {
- // Registration should never fail. This should happen on the UI thread during
- // init. It would be impossible for a shutdown to have been requested at this
- // point.
- bool result = cancelation_signal_->TryRegisterHandler(this);
- DCHECK(result);
-}
-
-HttpBridgeFactory::~HttpBridgeFactory() {
- cancelation_signal_->UnregisterHandler(this);
-}
-
-void HttpBridgeFactory::Init(const std::string& user_agent) {
- base::AutoLock lock(context_getter_lock_);
-
- if (!baseline_request_context_getter_.get()) {
- // Uh oh. We've been aborted before we finished initializing. There's no
- // point in initializating further; let's just return right away.
- return;
- }
-
- request_context_getter_ =
- new HttpBridge::RequestContextGetter(
- baseline_request_context_getter_, user_agent);
-}
-
-HttpPostProviderInterface* HttpBridgeFactory::Create() {
- base::AutoLock lock(context_getter_lock_);
-
- // If we've been asked to shut down (something which may happen asynchronously
- // and at pretty much any time), then we won't have a request_context_getter_.
- // Some external mechanism must ensure that this function is not called after
- // we've been asked to shut down.
- CHECK(request_context_getter_.get());
-
- HttpBridge* http = new HttpBridge(request_context_getter_.get(),
- network_time_update_callback_);
- http->AddRef();
- return http;
-}
-
-void HttpBridgeFactory::Destroy(HttpPostProviderInterface* http) {
- static_cast<HttpBridge*>(http)->Release();
-}
-
-void HttpBridgeFactory::OnSignalReceived() {
- base::AutoLock lock(context_getter_lock_);
- // Release |baseline_request_context_getter_| as soon as possible so that it
- // is destroyed in the right order on its network task runner. The
- // |request_context_getter_| has a reference to the baseline, so we must
- // drop our reference to it, too.
- baseline_request_context_getter_ = NULL;
- request_context_getter_ = NULL;
-}
-
-HttpBridge::RequestContext::RequestContext(
- net::URLRequestContext* baseline_context,
- const scoped_refptr<base::SingleThreadTaskRunner>&
- network_task_runner,
- const std::string& user_agent)
- : baseline_context_(baseline_context),
- network_task_runner_(network_task_runner) {
- DCHECK(!user_agent.empty());
-
- // Create empty, in-memory cookie store.
- set_cookie_store(new net::CookieMonster(NULL, NULL));
-
- // We don't use a cache for bridged loads, but we do want to share proxy info.
- set_host_resolver(baseline_context->host_resolver());
- set_proxy_service(baseline_context->proxy_service());
- set_ssl_config_service(baseline_context->ssl_config_service());
-
- // We want to share the HTTP session data with the network layer factory,
- // which includes auth_cache for proxies.
- // Session is not refcounted so we need to be careful to not lose the parent
- // context.
- net::HttpNetworkSession* session =
- baseline_context->http_transaction_factory()->GetSession();
- DCHECK(session);
- set_http_transaction_factory(new net::HttpNetworkLayer(session));
-
- // TODO(timsteele): We don't currently listen for pref changes of these
- // fields or CookiePolicy; I'm not sure we want to strictly follow the
- // default settings, since for example if the user chooses to block all
- // cookies, sync will start failing. Also it seems like accept_lang/charset
- // should be tied to whatever the sync servers expect (if anything). These
- // fields should probably just be settable by sync backend; though we should
- // figure out if we need to give the user explicit control over policies etc.
- http_user_agent_settings_.reset(new net::StaticHttpUserAgentSettings(
- baseline_context->GetAcceptLanguage(),
- user_agent));
- set_http_user_agent_settings(http_user_agent_settings_.get());
-
- set_net_log(baseline_context->net_log());
-}
-
-HttpBridge::RequestContext::~RequestContext() {
- DCHECK(network_task_runner_->BelongsToCurrentThread());
- delete http_transaction_factory();
-}
-
-HttpBridge::URLFetchState::URLFetchState() : url_poster(NULL),
- aborted(false),
- request_completed(false),
- request_succeeded(false),
- http_response_code(-1),
- error_code(-1) {}
-HttpBridge::URLFetchState::~URLFetchState() {}
-
-HttpBridge::HttpBridge(
- HttpBridge::RequestContextGetter* context_getter,
- const NetworkTimeUpdateCallback& network_time_update_callback)
- : created_on_loop_(base::MessageLoop::current()),
- http_post_completed_(false, false),
- context_getter_for_request_(context_getter),
- network_task_runner_(
- context_getter_for_request_->GetNetworkTaskRunner()),
- network_time_update_callback_(network_time_update_callback) {
-}
-
-HttpBridge::~HttpBridge() {
-}
-
-void HttpBridge::SetExtraRequestHeaders(const char * headers) {
- DCHECK(extra_headers_.empty())
- << "HttpBridge::SetExtraRequestHeaders called twice.";
- extra_headers_.assign(headers);
-}
-
-void HttpBridge::SetURL(const char* url, int port) {
- DCHECK_EQ(base::MessageLoop::current(), created_on_loop_);
- if (DCHECK_IS_ON()) {
- base::AutoLock lock(fetch_state_lock_);
- DCHECK(!fetch_state_.request_completed);
- }
- DCHECK(url_for_request_.is_empty())
- << "HttpBridge::SetURL called more than once?!";
- GURL temp(url);
- GURL::Replacements replacements;
- std::string port_str = base::IntToString(port);
- replacements.SetPort(port_str.c_str(),
- url_parse::Component(0, port_str.length()));
- url_for_request_ = temp.ReplaceComponents(replacements);
-}
-
-void HttpBridge::SetPostPayload(const char* content_type,
- int content_length,
- const char* content) {
- DCHECK_EQ(base::MessageLoop::current(), created_on_loop_);
- if (DCHECK_IS_ON()) {
- base::AutoLock lock(fetch_state_lock_);
- DCHECK(!fetch_state_.request_completed);
- }
- DCHECK(content_type_.empty()) << "Bridge payload already set.";
- DCHECK_GE(content_length, 0) << "Content length < 0";
- content_type_ = content_type;
- if (!content || (content_length == 0)) {
- DCHECK_EQ(content_length, 0);
- request_content_ = " "; // TODO(timsteele): URLFetcher requires non-empty
- // content for POSTs whereas CURL does not, for now
- // we hack this to support the sync backend.
- } else {
- request_content_.assign(content, content_length);
- }
-}
-
-bool HttpBridge::MakeSynchronousPost(int* error_code, int* response_code) {
- DCHECK_EQ(base::MessageLoop::current(), created_on_loop_);
- if (DCHECK_IS_ON()) {
- base::AutoLock lock(fetch_state_lock_);
- DCHECK(!fetch_state_.request_completed);
- }
- DCHECK(url_for_request_.is_valid()) << "Invalid URL for request";
- DCHECK(!content_type_.empty()) << "Payload not set";
-
- if (!network_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&HttpBridge::CallMakeAsynchronousPost, this))) {
- // This usually happens when we're in a unit test.
- LOG(WARNING) << "Could not post CallMakeAsynchronousPost task";
- return false;
- }
-
- // Block until network request completes or is aborted. See
- // OnURLFetchComplete and Abort.
- http_post_completed_.Wait();
-
- base::AutoLock lock(fetch_state_lock_);
- DCHECK(fetch_state_.request_completed || fetch_state_.aborted);
- *error_code = fetch_state_.error_code;
- *response_code = fetch_state_.http_response_code;
- return fetch_state_.request_succeeded;
-}
-
-void HttpBridge::MakeAsynchronousPost() {
- DCHECK(network_task_runner_->BelongsToCurrentThread());
- base::AutoLock lock(fetch_state_lock_);
- DCHECK(!fetch_state_.request_completed);
- if (fetch_state_.aborted)
- return;
-
- DCHECK(context_getter_for_request_.get());
- fetch_state_.url_poster = net::URLFetcher::Create(
- url_for_request_, net::URLFetcher::POST, this);
- fetch_state_.url_poster->SetRequestContext(context_getter_for_request_.get());
- fetch_state_.url_poster->SetUploadData(content_type_, request_content_);
- fetch_state_.url_poster->SetExtraRequestHeaders(extra_headers_);
- fetch_state_.url_poster->SetLoadFlags(net::LOAD_DO_NOT_SEND_COOKIES);
- fetch_state_.start_time = base::Time::Now();
- fetch_state_.url_poster->Start();
-}
-
-int HttpBridge::GetResponseContentLength() const {
- DCHECK_EQ(base::MessageLoop::current(), created_on_loop_);
- base::AutoLock lock(fetch_state_lock_);
- DCHECK(fetch_state_.request_completed);
- return fetch_state_.response_content.size();
-}
-
-const char* HttpBridge::GetResponseContent() const {
- DCHECK_EQ(base::MessageLoop::current(), created_on_loop_);
- base::AutoLock lock(fetch_state_lock_);
- DCHECK(fetch_state_.request_completed);
- return fetch_state_.response_content.data();
-}
-
-const std::string HttpBridge::GetResponseHeaderValue(
- const std::string& name) const {
-
- DCHECK_EQ(base::MessageLoop::current(), created_on_loop_);
- base::AutoLock lock(fetch_state_lock_);
- DCHECK(fetch_state_.request_completed);
-
- std::string value;
- fetch_state_.response_headers->EnumerateHeader(NULL, name, &value);
- return value;
-}
-
-void HttpBridge::Abort() {
- base::AutoLock lock(fetch_state_lock_);
-
- // Release |request_context_getter_| as soon as possible so that it is
- // destroyed in the right order on its network task runner.
- context_getter_for_request_ = NULL;
-
- DCHECK(!fetch_state_.aborted);
- if (fetch_state_.aborted || fetch_state_.request_completed)
- return;
-
- fetch_state_.aborted = true;
- if (!network_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&HttpBridge::DestroyURLFetcherOnIOThread, this,
- fetch_state_.url_poster))) {
- // Madness ensues.
- NOTREACHED() << "Could not post task to delete URLFetcher";
- }
-
- fetch_state_.url_poster = NULL;
- fetch_state_.error_code = net::ERR_ABORTED;
- http_post_completed_.Signal();
-}
-
-void HttpBridge::DestroyURLFetcherOnIOThread(net::URLFetcher* fetcher) {
- DCHECK(network_task_runner_->BelongsToCurrentThread());
- delete fetcher;
-}
-
-void HttpBridge::OnURLFetchComplete(const net::URLFetcher* source) {
- DCHECK(network_task_runner_->BelongsToCurrentThread());
- base::AutoLock lock(fetch_state_lock_);
- if (fetch_state_.aborted)
- return;
-
- fetch_state_.end_time = base::Time::Now();
- fetch_state_.request_completed = true;
- fetch_state_.request_succeeded =
- (net::URLRequestStatus::SUCCESS == source->GetStatus().status());
- fetch_state_.http_response_code = source->GetResponseCode();
- fetch_state_.error_code = source->GetStatus().error();
-
- // Use a real (non-debug) log to facilitate troubleshooting in the wild.
- VLOG(2) << "HttpBridge::OnURLFetchComplete for: "
- << fetch_state_.url_poster->GetURL().spec();
- VLOG(1) << "HttpBridge received response code: "
- << fetch_state_.http_response_code;
-
- source->GetResponseAsString(&fetch_state_.response_content);
- fetch_state_.response_headers = source->GetResponseHeaders();
- UpdateNetworkTime();
-
- // End of the line for url_poster_. It lives only on the IO loop.
- // We defer deletion because we're inside a callback from a component of the
- // URLFetcher, so it seems most natural / "polite" to let the stack unwind.
- base::MessageLoop::current()->DeleteSoon(FROM_HERE, fetch_state_.url_poster);
- fetch_state_.url_poster = NULL;
-
- // Wake the blocked syncer thread in MakeSynchronousPost.
- // WARNING: DONT DO ANYTHING AFTER THIS CALL! |this| may be deleted!
- http_post_completed_.Signal();
-}
-
-net::URLRequestContextGetter* HttpBridge::GetRequestContextGetterForTest()
- const {
- base::AutoLock lock(fetch_state_lock_);
- return context_getter_for_request_.get();
-}
-
-void HttpBridge::UpdateNetworkTime() {
- std::string sane_time_str;
- if (!fetch_state_.request_succeeded || fetch_state_.start_time.is_null() ||
- fetch_state_.end_time < fetch_state_.start_time ||
- !fetch_state_.response_headers->EnumerateHeader(NULL, "Sane-Time-Millis",
- &sane_time_str)) {
- return;
- }
-
- int64 sane_time_ms = 0;
- if (base::StringToInt64(sane_time_str, &sane_time_ms)) {
- network_time_update_callback_.Run(
- base::Time::FromJsTime(sane_time_ms),
- base::TimeDelta::FromMilliseconds(1),
- fetch_state_.end_time - fetch_state_.start_time);
- }
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/http_bridge_network_resources.cc b/chromium/sync/internal_api/http_bridge_network_resources.cc
deleted file mode 100644
index 72716bd2b34..00000000000
--- a/chromium/sync/internal_api/http_bridge_network_resources.cc
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/http_bridge_network_resources.h"
-
-#include "base/memory/scoped_ptr.h"
-#include "net/url_request/url_request_context_getter.h"
-#include "sync/internal_api/public/base/cancelation_signal.h"
-#include "sync/internal_api/public/http_bridge.h"
-#include "sync/internal_api/public/http_post_provider_factory.h"
-#include "sync/internal_api/public/network_time_update_callback.h"
-
-namespace syncer {
-
-HttpBridgeNetworkResources::~HttpBridgeNetworkResources() {}
-
-scoped_ptr<HttpPostProviderFactory>
- HttpBridgeNetworkResources::GetHttpPostProviderFactory(
- net::URLRequestContextGetter* baseline_context_getter,
- const NetworkTimeUpdateCallback& network_time_update_callback,
- CancelationSignal* cancelation_signal) {
- return make_scoped_ptr<HttpPostProviderFactory>(
- new HttpBridgeFactory(baseline_context_getter,
- network_time_update_callback,
- cancelation_signal));
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/http_bridge_unittest.cc b/chromium/sync/internal_api/http_bridge_unittest.cc
deleted file mode 100644
index 5dd39bb2fd0..00000000000
--- a/chromium/sync/internal_api/http_bridge_unittest.cc
+++ /dev/null
@@ -1,520 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/message_loop/message_loop_proxy.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/threading/thread.h"
-#include "net/test/spawned_test_server/spawned_test_server.h"
-#include "net/url_request/test_url_fetcher_factory.h"
-#include "net/url_request/url_fetcher_delegate.h"
-#include "net/url_request/url_request_test_util.h"
-#include "sync/internal_api/public/base/cancelation_signal.h"
-#include "sync/internal_api/public/http_bridge.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-namespace {
-// TODO(timsteele): Should use PathService here. See Chromium Issue 3113.
-const base::FilePath::CharType kDocRoot[] =
- FILE_PATH_LITERAL("chrome/test/data");
-}
-
-class SyncHttpBridgeTest : public testing::Test {
- public:
- SyncHttpBridgeTest()
- : test_server_(net::SpawnedTestServer::TYPE_HTTP,
- net::SpawnedTestServer::kLocalhost,
- base::FilePath(kDocRoot)),
- fake_default_request_context_getter_(NULL),
- bridge_for_race_test_(NULL),
- io_thread_("IO thread") {
- }
-
- virtual void SetUp() {
- base::Thread::Options options;
- options.message_loop_type = base::MessageLoop::TYPE_IO;
- io_thread_.StartWithOptions(options);
- }
-
- virtual void TearDown() {
- if (fake_default_request_context_getter_) {
- GetIOThreadLoop()->ReleaseSoon(FROM_HERE,
- fake_default_request_context_getter_);
- fake_default_request_context_getter_ = NULL;
- }
- io_thread_.Stop();
- }
-
- HttpBridge* BuildBridge() {
- if (!fake_default_request_context_getter_) {
- fake_default_request_context_getter_ =
- new net::TestURLRequestContextGetter(io_thread_.message_loop_proxy());
- fake_default_request_context_getter_->AddRef();
- }
- HttpBridge* bridge = new HttpBridge(
- new HttpBridge::RequestContextGetter(
- fake_default_request_context_getter_,
- "user agent"),
- NetworkTimeUpdateCallback());
- return bridge;
- }
-
- static void Abort(HttpBridge* bridge) {
- bridge->Abort();
- }
-
- // Used by AbortAndReleaseBeforeFetchCompletes to test an interesting race
- // condition.
- void RunSyncThreadBridgeUseTest(base::WaitableEvent* signal_when_created,
- base::WaitableEvent* signal_when_released);
-
- static void TestSameHttpNetworkSession(base::MessageLoop* main_message_loop,
- SyncHttpBridgeTest* test) {
- scoped_refptr<HttpBridge> http_bridge(test->BuildBridge());
- EXPECT_TRUE(test->GetTestRequestContextGetter());
- net::HttpNetworkSession* test_session =
- test->GetTestRequestContextGetter()->GetURLRequestContext()->
- http_transaction_factory()->GetSession();
- EXPECT_EQ(test_session,
- http_bridge->GetRequestContextGetterForTest()->
- GetURLRequestContext()->
- http_transaction_factory()->GetSession());
- main_message_loop->PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
- }
-
- base::MessageLoop* GetIOThreadLoop() { return io_thread_.message_loop(); }
-
- // Note this is lazy created, so don't call this before your bridge.
- net::TestURLRequestContextGetter* GetTestRequestContextGetter() {
- return fake_default_request_context_getter_;
- }
-
- net::SpawnedTestServer test_server_;
-
- base::Thread* io_thread() { return &io_thread_; }
-
- HttpBridge* bridge_for_race_test() { return bridge_for_race_test_; }
-
- private:
- // A make-believe "default" request context, as would be returned by
- // Profile::GetDefaultRequestContext(). Created lazily by BuildBridge.
- net::TestURLRequestContextGetter* fake_default_request_context_getter_;
-
- HttpBridge* bridge_for_race_test_;
-
- // Separate thread for IO used by the HttpBridge.
- base::Thread io_thread_;
- base::MessageLoop loop_;
-};
-
-// An HttpBridge that doesn't actually make network requests and just calls
-// back with dummy response info.
-// TODO(tim): Instead of inheriting here we should inject a component
-// responsible for the MakeAsynchronousPost bit.
-class ShuntedHttpBridge : public HttpBridge {
- public:
- // If |never_finishes| is true, the simulated request never actually
- // returns.
- ShuntedHttpBridge(net::URLRequestContextGetter* baseline_context_getter,
- SyncHttpBridgeTest* test, bool never_finishes)
- : HttpBridge(
- new HttpBridge::RequestContextGetter(
- baseline_context_getter, "user agent"),
- NetworkTimeUpdateCallback()),
- test_(test), never_finishes_(never_finishes) { }
- protected:
- virtual void MakeAsynchronousPost() OVERRIDE {
- ASSERT_TRUE(base::MessageLoop::current() == test_->GetIOThreadLoop());
- if (never_finishes_)
- return;
-
- // We don't actually want to make a request for this test, so just callback
- // as if it completed.
- test_->GetIOThreadLoop()->PostTask(FROM_HERE,
- base::Bind(&ShuntedHttpBridge::CallOnURLFetchComplete, this));
- }
- private:
- virtual ~ShuntedHttpBridge() {}
-
- void CallOnURLFetchComplete() {
- ASSERT_TRUE(base::MessageLoop::current() == test_->GetIOThreadLoop());
- // We return no cookies and a dummy content response.
- net::ResponseCookies cookies;
-
- std::string response_content = "success!";
- net::TestURLFetcher fetcher(0, GURL(), NULL);
- fetcher.set_url(GURL("www.google.com"));
- fetcher.set_response_code(200);
- fetcher.set_cookies(cookies);
- fetcher.SetResponseString(response_content);
- OnURLFetchComplete(&fetcher);
- }
- SyncHttpBridgeTest* test_;
- bool never_finishes_;
-};
-
-void SyncHttpBridgeTest::RunSyncThreadBridgeUseTest(
- base::WaitableEvent* signal_when_created,
- base::WaitableEvent* signal_when_released) {
- scoped_refptr<net::URLRequestContextGetter> ctx_getter(
- new net::TestURLRequestContextGetter(io_thread_.message_loop_proxy()));
- {
- scoped_refptr<ShuntedHttpBridge> bridge(
- new ShuntedHttpBridge(ctx_getter.get(), this, true));
- bridge->SetURL("http://www.google.com", 9999);
- bridge->SetPostPayload("text/plain", 2, " ");
- bridge_for_race_test_ = bridge.get();
- signal_when_created->Signal();
-
- int os_error = 0;
- int response_code = 0;
- bridge->MakeSynchronousPost(&os_error, &response_code);
- bridge_for_race_test_ = NULL;
- }
- signal_when_released->Signal();
-}
-
-TEST_F(SyncHttpBridgeTest, TestUsesSameHttpNetworkSession) {
- // Run this test on the IO thread because we can only call
- // URLRequestContextGetter::GetURLRequestContext on the IO thread.
- io_thread()->message_loop()
- ->PostTask(FROM_HERE,
- base::Bind(&SyncHttpBridgeTest::TestSameHttpNetworkSession,
- base::MessageLoop::current(),
- this));
- base::MessageLoop::current()->Run();
-}
-
-// Test the HttpBridge without actually making any network requests.
-TEST_F(SyncHttpBridgeTest, TestMakeSynchronousPostShunted) {
- scoped_refptr<net::URLRequestContextGetter> ctx_getter(
- new net::TestURLRequestContextGetter(io_thread()->message_loop_proxy()));
- scoped_refptr<HttpBridge> http_bridge(
- new ShuntedHttpBridge(ctx_getter.get(), this, false));
- http_bridge->SetURL("http://www.google.com", 9999);
- http_bridge->SetPostPayload("text/plain", 2, " ");
-
- int os_error = 0;
- int response_code = 0;
- bool success = http_bridge->MakeSynchronousPost(&os_error, &response_code);
- EXPECT_TRUE(success);
- EXPECT_EQ(200, response_code);
- EXPECT_EQ(0, os_error);
-
- EXPECT_EQ(8, http_bridge->GetResponseContentLength());
- EXPECT_EQ(std::string("success!"),
- std::string(http_bridge->GetResponseContent()));
-}
-
-// Full round-trip test of the HttpBridge, using default UA string and
-// no request cookies.
-TEST_F(SyncHttpBridgeTest, TestMakeSynchronousPostLiveWithPayload) {
- ASSERT_TRUE(test_server_.Start());
-
- scoped_refptr<HttpBridge> http_bridge(BuildBridge());
-
- std::string payload = "this should be echoed back";
- GURL echo = test_server_.GetURL("echo");
- http_bridge->SetURL(echo.spec().c_str(), echo.IntPort());
- http_bridge->SetPostPayload("application/x-www-form-urlencoded",
- payload.length() + 1, payload.c_str());
- int os_error = 0;
- int response_code = 0;
- bool success = http_bridge->MakeSynchronousPost(&os_error, &response_code);
- EXPECT_TRUE(success);
- EXPECT_EQ(200, response_code);
- EXPECT_EQ(0, os_error);
-
- EXPECT_EQ(payload.length() + 1,
- static_cast<size_t>(http_bridge->GetResponseContentLength()));
- EXPECT_EQ(payload, std::string(http_bridge->GetResponseContent()));
-}
-
-// Full round-trip test of the HttpBridge.
-TEST_F(SyncHttpBridgeTest, TestMakeSynchronousPostLiveComprehensive) {
- ASSERT_TRUE(test_server_.Start());
-
- scoped_refptr<HttpBridge> http_bridge(BuildBridge());
-
- GURL echo_header = test_server_.GetURL("echoall");
- http_bridge->SetURL(echo_header.spec().c_str(), echo_header.IntPort());
-
- std::string test_payload = "###TEST PAYLOAD###";
- http_bridge->SetPostPayload("text/html", test_payload.length() + 1,
- test_payload.c_str());
-
- int os_error = 0;
- int response_code = 0;
- bool success = http_bridge->MakeSynchronousPost(&os_error, &response_code);
- EXPECT_TRUE(success);
- EXPECT_EQ(200, response_code);
- EXPECT_EQ(0, os_error);
-
- std::string response(http_bridge->GetResponseContent(),
- http_bridge->GetResponseContentLength());
- EXPECT_EQ(std::string::npos, response.find("Cookie:"));
- EXPECT_NE(std::string::npos, response.find("User-Agent: user agent"));
- EXPECT_NE(std::string::npos, response.find(test_payload.c_str()));
-}
-
-TEST_F(SyncHttpBridgeTest, TestExtraRequestHeaders) {
- ASSERT_TRUE(test_server_.Start());
-
- scoped_refptr<HttpBridge> http_bridge(BuildBridge());
-
- GURL echo_header = test_server_.GetURL("echoall");
-
- http_bridge->SetURL(echo_header.spec().c_str(), echo_header.IntPort());
- http_bridge->SetExtraRequestHeaders("test:fnord");
-
- std::string test_payload = "###TEST PAYLOAD###";
- http_bridge->SetPostPayload("text/html", test_payload.length() + 1,
- test_payload.c_str());
-
- int os_error = 0;
- int response_code = 0;
- bool success = http_bridge->MakeSynchronousPost(&os_error, &response_code);
- EXPECT_TRUE(success);
- EXPECT_EQ(200, response_code);
- EXPECT_EQ(0, os_error);
-
- std::string response(http_bridge->GetResponseContent(),
- http_bridge->GetResponseContentLength());
-
- EXPECT_NE(std::string::npos, response.find("fnord"));
- EXPECT_NE(std::string::npos, response.find(test_payload.c_str()));
-}
-
-TEST_F(SyncHttpBridgeTest, TestResponseHeader) {
- ASSERT_TRUE(test_server_.Start());
-
- scoped_refptr<HttpBridge> http_bridge(BuildBridge());
-
- GURL echo_header = test_server_.GetURL("echoall");
- http_bridge->SetURL(echo_header.spec().c_str(), echo_header.IntPort());
-
- std::string test_payload = "###TEST PAYLOAD###";
- http_bridge->SetPostPayload("text/html", test_payload.length() + 1,
- test_payload.c_str());
-
- int os_error = 0;
- int response_code = 0;
- bool success = http_bridge->MakeSynchronousPost(&os_error, &response_code);
- EXPECT_TRUE(success);
- EXPECT_EQ(200, response_code);
- EXPECT_EQ(0, os_error);
-
- EXPECT_EQ(http_bridge->GetResponseHeaderValue("Content-type"), "text/html");
- EXPECT_TRUE(http_bridge->GetResponseHeaderValue("invalid-header").empty());
-}
-
-TEST_F(SyncHttpBridgeTest, Abort) {
- scoped_refptr<net::URLRequestContextGetter> ctx_getter(
- new net::TestURLRequestContextGetter(io_thread()->message_loop_proxy()));
- scoped_refptr<ShuntedHttpBridge> http_bridge(
- new ShuntedHttpBridge(ctx_getter.get(), this, true));
- http_bridge->SetURL("http://www.google.com", 9999);
- http_bridge->SetPostPayload("text/plain", 2, " ");
-
- int os_error = 0;
- int response_code = 0;
-
- io_thread()->message_loop_proxy()->PostTask(
- FROM_HERE,
- base::Bind(&SyncHttpBridgeTest::Abort, http_bridge));
- bool success = http_bridge->MakeSynchronousPost(&os_error, &response_code);
- EXPECT_FALSE(success);
- EXPECT_EQ(net::ERR_ABORTED, os_error);
-}
-
-TEST_F(SyncHttpBridgeTest, AbortLate) {
- scoped_refptr<net::URLRequestContextGetter> ctx_getter(
- new net::TestURLRequestContextGetter(io_thread()->message_loop_proxy()));
- scoped_refptr<ShuntedHttpBridge> http_bridge(
- new ShuntedHttpBridge(ctx_getter.get(), this, false));
- http_bridge->SetURL("http://www.google.com", 9999);
- http_bridge->SetPostPayload("text/plain", 2, " ");
-
- int os_error = 0;
- int response_code = 0;
-
- bool success = http_bridge->MakeSynchronousPost(&os_error, &response_code);
- ASSERT_TRUE(success);
- http_bridge->Abort();
- // Ensures no double-free of URLFetcher, etc.
-}
-
-// Tests an interesting case where code using the HttpBridge aborts the fetch
-// and releases ownership before a pending fetch completed callback is issued by
-// the underlying URLFetcher (and before that URLFetcher is destroyed, which
-// would cancel the callback).
-TEST_F(SyncHttpBridgeTest, AbortAndReleaseBeforeFetchComplete) {
- base::Thread sync_thread("SyncThread");
- sync_thread.Start();
-
- // First, block the sync thread on the post.
- base::WaitableEvent signal_when_created(false, false);
- base::WaitableEvent signal_when_released(false, false);
- sync_thread.message_loop()->PostTask(FROM_HERE,
- base::Bind(&SyncHttpBridgeTest::RunSyncThreadBridgeUseTest,
- base::Unretained(this),
- &signal_when_created,
- &signal_when_released));
-
- // Stop IO so we can control order of operations.
- base::WaitableEvent io_waiter(false, false);
- ASSERT_TRUE(io_thread()->message_loop_proxy()->PostTask(
- FROM_HERE,
- base::Bind(&base::WaitableEvent::Wait, base::Unretained(&io_waiter))));
-
- signal_when_created.Wait(); // Wait till we have a bridge to abort.
- ASSERT_TRUE(bridge_for_race_test());
-
- // Schedule the fetch completion callback (but don't run it yet). Don't take
- // a reference to the bridge to mimic URLFetcher's handling of the delegate.
- net::URLFetcherDelegate* delegate =
- static_cast<net::URLFetcherDelegate*>(bridge_for_race_test());
- net::ResponseCookies cookies;
- std::string response_content = "success!";
- net::TestURLFetcher fetcher(0, GURL(), NULL);
- fetcher.set_url(GURL("www.google.com"));
- fetcher.set_response_code(200);
- fetcher.set_cookies(cookies);
- fetcher.SetResponseString(response_content);
- ASSERT_TRUE(io_thread()->message_loop_proxy()->PostTask(
- FROM_HERE,
- base::Bind(&net::URLFetcherDelegate::OnURLFetchComplete,
- base::Unretained(delegate), &fetcher)));
-
- // Abort the fetch. This should be smart enough to handle the case where
- // the bridge is destroyed before the callback scheduled above completes.
- bridge_for_race_test()->Abort();
-
- // Wait until the sync thread releases its ref on the bridge.
- signal_when_released.Wait();
- ASSERT_FALSE(bridge_for_race_test());
-
- // Unleash the hounds. The fetch completion callback should fire first, and
- // succeed even though we Release()d the bridge above because the call to
- // Abort should have held a reference.
- io_waiter.Signal();
-
- // Done.
- sync_thread.Stop();
- io_thread()->Stop();
-}
-
-void HttpBridgeRunOnSyncThread(
- net::URLRequestContextGetter* baseline_context_getter,
- CancelationSignal* factory_cancelation_signal,
- syncer::HttpPostProviderFactory** bridge_factory_out,
- syncer::HttpPostProviderInterface** bridge_out,
- base::WaitableEvent* signal_when_created,
- base::WaitableEvent* wait_for_shutdown) {
- scoped_ptr<syncer::HttpBridgeFactory> bridge_factory(
- new syncer::HttpBridgeFactory(baseline_context_getter,
- NetworkTimeUpdateCallback(),
- factory_cancelation_signal));
- bridge_factory->Init("test");
- *bridge_factory_out = bridge_factory.get();
-
- HttpPostProviderInterface* bridge = bridge_factory->Create();
- *bridge_out = bridge;
-
- signal_when_created->Signal();
- wait_for_shutdown->Wait();
-
- bridge_factory->Destroy(bridge);
-}
-
-void WaitOnIOThread(base::WaitableEvent* signal_wait_start,
- base::WaitableEvent* wait_done) {
- signal_wait_start->Signal();
- wait_done->Wait();
-}
-
-// Tests RequestContextGetter is properly released on IO thread even when
-// IO thread stops before sync thread.
-TEST_F(SyncHttpBridgeTest, RequestContextGetterReleaseOrder) {
- base::Thread sync_thread("SyncThread");
- sync_thread.Start();
-
- syncer::HttpPostProviderFactory* factory = NULL;
- syncer::HttpPostProviderInterface* bridge = NULL;
-
- scoped_refptr<net::URLRequestContextGetter> baseline_context_getter(
- new net::TestURLRequestContextGetter(io_thread()->message_loop_proxy()));
-
- base::WaitableEvent signal_when_created(false, false);
- base::WaitableEvent wait_for_shutdown(false, false);
-
- CancelationSignal release_request_context_signal;
-
- // Create bridge factory and factory on sync thread and wait for the creation
- // to finish.
- sync_thread.message_loop()->PostTask(FROM_HERE,
- base::Bind(&HttpBridgeRunOnSyncThread,
- base::Unretained(baseline_context_getter.get()),
- &release_request_context_signal ,&factory, &bridge,
- &signal_when_created, &wait_for_shutdown));
- signal_when_created.Wait();
-
- // Simulate sync shutdown by aborting bridge and shutting down factory on
- // frontend.
- bridge->Abort();
- release_request_context_signal.Signal();
-
- // Wait for sync's RequestContextGetter to be cleared on IO thread and
- // check for reference count.
- base::WaitableEvent signal_wait_start(false, false);
- base::WaitableEvent wait_done(false, false);
- io_thread()->message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&WaitOnIOThread, &signal_wait_start, &wait_done));
- signal_wait_start.Wait();
- // |baseline_context_getter| should have only one reference from local
- // variable.
- EXPECT_TRUE(baseline_context_getter->HasOneRef());
- baseline_context_getter = NULL;
-
- // Unblock and stop IO thread before sync thread.
- wait_done.Signal();
- io_thread()->Stop();
-
- // Unblock and stop sync thread.
- wait_for_shutdown.Signal();
- sync_thread.Stop();
-}
-
-// Attempt to release the URLRequestContextGetter before the HttpBridgeFactory
-// is initialized.
-TEST_F(SyncHttpBridgeTest, EarlyAbortFactory) {
- // In a real scenario, the following would happen on many threads. For
- // simplicity, this test uses only one thread.
-
- scoped_refptr<net::URLRequestContextGetter> baseline_context_getter(
- new net::TestURLRequestContextGetter(io_thread()->message_loop_proxy()));
- CancelationSignal release_request_context_signal;
-
- // UI Thread: Initialize the HttpBridgeFactory. The next step would be to
- // post a task to SBH::Core to have it initialized.
- scoped_ptr<syncer::HttpBridgeFactory> factory(new HttpBridgeFactory(
- baseline_context_getter,
- NetworkTimeUpdateCallback(),
- &release_request_context_signal));
-
- // UI Thread: A very early shutdown request arrives and executes on the UI
- // thread before the posted sync thread task is run.
- release_request_context_signal.Signal();
-
- // Sync thread: Finally run the posted task, only to find that our
- // HttpBridgeFactory has been neutered. Should not crash.
- factory->Init("TestUserAgent");
-
- // At this point, attempting to use the factory would trigger a crash. Both
- // this test and the real world code should make sure this never happens.
-};
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/internal_components_factory_impl.cc b/chromium/sync/internal_api/internal_components_factory_impl.cc
deleted file mode 100644
index 6ccb143e5ee..00000000000
--- a/chromium/sync/internal_api/internal_components_factory_impl.cc
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/internal_components_factory_impl.h"
-
-#include "sync/engine/backoff_delay_provider.h"
-#include "sync/engine/syncer.h"
-#include "sync/engine/sync_scheduler_impl.h"
-#include "sync/sessions/sync_session_context.h"
-#include "sync/syncable/on_disk_directory_backing_store.h"
-
-using base::TimeDelta;
-
-namespace syncer {
-
-InternalComponentsFactoryImpl::InternalComponentsFactoryImpl(
- const Switches& switches) : switches_(switches) {
-}
-
-InternalComponentsFactoryImpl::~InternalComponentsFactoryImpl() { }
-
-scoped_ptr<SyncScheduler> InternalComponentsFactoryImpl::BuildScheduler(
- const std::string& name,
- sessions::SyncSessionContext* context,
- CancelationSignal* cancelation_signal) {
-
- scoped_ptr<BackoffDelayProvider> delay(BackoffDelayProvider::FromDefaults());
-
- if (switches_.backoff_override == BACKOFF_SHORT_INITIAL_RETRY_OVERRIDE)
- delay.reset(BackoffDelayProvider::WithShortInitialRetryOverride());
-
- return scoped_ptr<SyncScheduler>(new SyncSchedulerImpl(
- name,
- delay.release(),
- context,
- new Syncer(cancelation_signal)));
-}
-
-scoped_ptr<sessions::SyncSessionContext>
-InternalComponentsFactoryImpl::BuildContext(
- ServerConnectionManager* connection_manager,
- syncable::Directory* directory,
- const std::vector<ModelSafeWorker*>& workers,
- ExtensionsActivity* extensions_activity,
- const std::vector<SyncEngineEventListener*>& listeners,
- sessions::DebugInfoGetter* debug_info_getter,
- TrafficRecorder* traffic_recorder,
- const std::string& invalidation_client_id) {
- return scoped_ptr<sessions::SyncSessionContext>(
- new sessions::SyncSessionContext(
- connection_manager, directory, workers, extensions_activity,
- listeners, debug_info_getter,
- traffic_recorder,
- switches_.encryption_method == ENCRYPTION_KEYSTORE,
- switches_.pre_commit_updates_policy ==
- FORCE_ENABLE_PRE_COMMIT_UPDATE_AVOIDANCE,
- invalidation_client_id));
-}
-
-scoped_ptr<syncable::DirectoryBackingStore>
-InternalComponentsFactoryImpl::BuildDirectoryBackingStore(
- const std::string& dir_name, const base::FilePath& backing_filepath) {
- return scoped_ptr<syncable::DirectoryBackingStore>(
- new syncable::OnDiskDirectoryBackingStore(dir_name, backing_filepath));
-}
-
-InternalComponentsFactory::Switches
-InternalComponentsFactoryImpl::GetSwitches() const {
- return switches_;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/js_mutation_event_observer.cc b/chromium/sync/internal_api/js_mutation_event_observer.cc
deleted file mode 100644
index 3f1056f9a93..00000000000
--- a/chromium/sync/internal_api/js_mutation_event_observer.cc
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/js_mutation_event_observer.h"
-
-#include <string>
-
-#include "base/location.h"
-#include "base/logging.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/values.h"
-#include "sync/js/js_event_details.h"
-#include "sync/js/js_event_handler.h"
-
-namespace syncer {
-
-JsMutationEventObserver::JsMutationEventObserver()
- : weak_ptr_factory_(this) {}
-
-JsMutationEventObserver::~JsMutationEventObserver() {
- DCHECK(CalledOnValidThread());
-}
-
-base::WeakPtr<JsMutationEventObserver> JsMutationEventObserver::AsWeakPtr() {
- return weak_ptr_factory_.GetWeakPtr();
-}
-
-void JsMutationEventObserver::InvalidateWeakPtrs() {
- weak_ptr_factory_.InvalidateWeakPtrs();
-}
-
-void JsMutationEventObserver::SetJsEventHandler(
- const WeakHandle<JsEventHandler>& event_handler) {
- event_handler_ = event_handler;
-}
-
-namespace {
-
-// Max number of changes we attempt to convert to values (to avoid
-// running out of memory).
-const size_t kChangeLimit = 100;
-
-} // namespace
-
-void JsMutationEventObserver::OnChangesApplied(
- ModelType model_type,
- int64 write_transaction_id,
- const ImmutableChangeRecordList& changes) {
- if (!event_handler_.IsInitialized()) {
- return;
- }
- base::DictionaryValue details;
- details.SetString("modelType", ModelTypeToString(model_type));
- details.SetString("writeTransactionId",
- base::Int64ToString(write_transaction_id));
- base::Value* changes_value = NULL;
- const size_t changes_size = changes.Get().size();
- if (changes_size <= kChangeLimit) {
- base::ListValue* changes_list = new base::ListValue();
- for (ChangeRecordList::const_iterator it =
- changes.Get().begin(); it != changes.Get().end(); ++it) {
- changes_list->Append(it->ToValue());
- }
- changes_value = changes_list;
- } else {
- changes_value =
- new base::StringValue(
- base::Uint64ToString(static_cast<uint64>(changes_size)) +
- " changes");
- }
- details.Set("changes", changes_value);
- HandleJsEvent(FROM_HERE, "onChangesApplied", JsEventDetails(&details));
-}
-
-void JsMutationEventObserver::OnChangesComplete(ModelType model_type) {
- if (!event_handler_.IsInitialized()) {
- return;
- }
- base::DictionaryValue details;
- details.SetString("modelType", ModelTypeToString(model_type));
- HandleJsEvent(FROM_HERE, "onChangesComplete", JsEventDetails(&details));
-}
-
-void JsMutationEventObserver::OnTransactionWrite(
- const syncable::ImmutableWriteTransactionInfo& write_transaction_info,
- ModelTypeSet models_with_changes) {
- DCHECK(CalledOnValidThread());
- if (!event_handler_.IsInitialized()) {
- return;
- }
- base::DictionaryValue details;
- details.Set("writeTransactionInfo",
- write_transaction_info.Get().ToValue(kChangeLimit));
- details.Set("modelsWithChanges",
- ModelTypeSetToValue(models_with_changes));
- HandleJsEvent(FROM_HERE, "onTransactionWrite", JsEventDetails(&details));
-}
-
-void JsMutationEventObserver::HandleJsEvent(
- const tracked_objects::Location& from_here,
- const std::string& name, const JsEventDetails& details) {
- if (!event_handler_.IsInitialized()) {
- NOTREACHED();
- return;
- }
- event_handler_.Call(from_here,
- &JsEventHandler::HandleJsEvent, name, details);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/js_mutation_event_observer.h b/chromium/sync/internal_api/js_mutation_event_observer.h
deleted file mode 100644
index 6c92646e391..00000000000
--- a/chromium/sync/internal_api/js_mutation_event_observer.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_JS_MUTATION_EVENT_OBSERVER_H_
-#define SYNC_INTERNAL_API_JS_MUTATION_EVENT_OBSERVER_H_
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/memory/weak_ptr.h"
-#include "base/threading/non_thread_safe.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/sync_manager.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/syncable/transaction_observer.h"
-
-namespace tracked_objects {
-class Location;
-} // namespace tracked_objects
-
-namespace syncer {
-
-class JsEventDetails;
-class JsEventHandler;
-
-// Observes all change- and transaction-related events and routes a
-// summarized version to a JsEventHandler.
-class SYNC_EXPORT_PRIVATE JsMutationEventObserver
- : public SyncManager::ChangeObserver,
- public syncable::TransactionObserver,
- public base::NonThreadSafe {
- public:
- JsMutationEventObserver();
-
- virtual ~JsMutationEventObserver();
-
- base::WeakPtr<JsMutationEventObserver> AsWeakPtr();
-
- void InvalidateWeakPtrs();
-
- void SetJsEventHandler(const WeakHandle<JsEventHandler>& event_handler);
-
- // SyncManager::ChangeObserver implementation.
- virtual void OnChangesApplied(
- ModelType model_type,
- int64 write_transaction_id,
- const ImmutableChangeRecordList& changes) OVERRIDE;
- virtual void OnChangesComplete(ModelType model_type) OVERRIDE;
-
- // syncable::TransactionObserver implementation.
- virtual void OnTransactionWrite(
- const syncable::ImmutableWriteTransactionInfo& write_transaction_info,
- ModelTypeSet models_with_changes) OVERRIDE;
-
- private:
- WeakHandle<JsEventHandler> event_handler_;
-
- void HandleJsEvent(
- const tracked_objects::Location& from_here,
- const std::string& name, const JsEventDetails& details);
-
- base::WeakPtrFactory<JsMutationEventObserver> weak_ptr_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(JsMutationEventObserver);
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_JS_MUTATION_EVENT_OBSERVER_H_
diff --git a/chromium/sync/internal_api/js_mutation_event_observer_unittest.cc b/chromium/sync/internal_api/js_mutation_event_observer_unittest.cc
deleted file mode 100644
index 7db697dc5a9..00000000000
--- a/chromium/sync/internal_api/js_mutation_event_observer_unittest.cc
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/js_mutation_event_observer.h"
-
-#include "base/basictypes.h"
-#include "base/message_loop/message_loop.h"
-#include "base/values.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/js/js_event_details.h"
-#include "sync/js/js_test_util.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace {
-
-using ::testing::InSequence;
-using ::testing::StrictMock;
-
-class JsMutationEventObserverTest : public testing::Test {
- protected:
- JsMutationEventObserverTest() {
- js_mutation_event_observer_.SetJsEventHandler(
- mock_js_event_handler_.AsWeakHandle());
- }
-
- private:
- // This must be destroyed after the member variables below in order
- // for WeakHandles to be destroyed properly.
- base::MessageLoop message_loop_;
-
- protected:
- StrictMock<MockJsEventHandler> mock_js_event_handler_;
- JsMutationEventObserver js_mutation_event_observer_;
-
- void PumpLoop() {
- message_loop_.RunUntilIdle();
- }
-};
-
-TEST_F(JsMutationEventObserverTest, OnChangesApplied) {
- InSequence dummy;
-
- // We don't test with passwords as that requires additional setup.
-
- // Build a list of example ChangeRecords.
- ChangeRecord changes[MODEL_TYPE_COUNT];
- for (int i = AUTOFILL_PROFILE; i < MODEL_TYPE_COUNT; ++i) {
- changes[i].id = i;
- switch (i % 3) {
- case 0:
- changes[i].action = ChangeRecord::ACTION_ADD;
- break;
- case 1:
- changes[i].action = ChangeRecord::ACTION_UPDATE;
- break;
- default:
- changes[i].action = ChangeRecord::ACTION_DELETE;
- break;
- }
- }
-
- // For each i, we call OnChangesApplied() with the first arg equal
- // to i cast to ModelType and the second argument with the changes
- // starting from changes[i].
-
- // Set expectations for each data type.
- for (int i = AUTOFILL_PROFILE; i < MODEL_TYPE_COUNT; ++i) {
- const std::string& model_type_str =
- ModelTypeToString(ModelTypeFromInt(i));
- base::DictionaryValue expected_details;
- expected_details.SetString("modelType", model_type_str);
- expected_details.SetString("writeTransactionId", "0");
- base::ListValue* expected_changes = new base::ListValue();
- expected_details.Set("changes", expected_changes);
- for (int j = i; j < MODEL_TYPE_COUNT; ++j) {
- expected_changes->Append(changes[j].ToValue());
- }
- EXPECT_CALL(mock_js_event_handler_,
- HandleJsEvent("onChangesApplied",
- HasDetailsAsDictionary(expected_details)));
- }
-
- // Fire OnChangesApplied() for each data type.
- for (int i = AUTOFILL_PROFILE; i < MODEL_TYPE_COUNT; ++i) {
- ChangeRecordList local_changes(changes + i, changes + arraysize(changes));
- js_mutation_event_observer_.OnChangesApplied(
- ModelTypeFromInt(i),
- 0, ImmutableChangeRecordList(&local_changes));
- }
-
- PumpLoop();
-}
-
-TEST_F(JsMutationEventObserverTest, OnChangesComplete) {
- InSequence dummy;
-
- for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
- base::DictionaryValue expected_details;
- expected_details.SetString(
- "modelType",
- ModelTypeToString(ModelTypeFromInt(i)));
- EXPECT_CALL(mock_js_event_handler_,
- HandleJsEvent("onChangesComplete",
- HasDetailsAsDictionary(expected_details)));
- }
-
- for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
- js_mutation_event_observer_.OnChangesComplete(
- ModelTypeFromInt(i));
- }
- PumpLoop();
-}
-
-} // namespace
-} // namespace syncer
diff --git a/chromium/sync/internal_api/js_sync_encryption_handler_observer.cc b/chromium/sync/internal_api/js_sync_encryption_handler_observer.cc
deleted file mode 100644
index d6bd50ecf58..00000000000
--- a/chromium/sync/internal_api/js_sync_encryption_handler_observer.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/js_sync_encryption_handler_observer.h"
-
-#include <cstddef>
-
-#include "base/location.h"
-#include "base/logging.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/values.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/util/sync_string_conversions.h"
-#include "sync/js/js_arg_list.h"
-#include "sync/js/js_event_details.h"
-#include "sync/js/js_event_handler.h"
-#include "sync/util/cryptographer.h"
-#include "sync/util/time.h"
-
-namespace syncer {
-
-JsSyncEncryptionHandlerObserver::JsSyncEncryptionHandlerObserver() {}
-
-JsSyncEncryptionHandlerObserver::~JsSyncEncryptionHandlerObserver() {}
-
-void JsSyncEncryptionHandlerObserver::SetJsEventHandler(
- const WeakHandle<JsEventHandler>& event_handler) {
- event_handler_ = event_handler;
-}
-
-void JsSyncEncryptionHandlerObserver::OnPassphraseRequired(
- PassphraseRequiredReason reason,
- const sync_pb::EncryptedData& pending_keys) {
- if (!event_handler_.IsInitialized()) {
- return;
- }
- base::DictionaryValue details;
- details.SetString("reason",
- PassphraseRequiredReasonToString(reason));
- HandleJsEvent(FROM_HERE, "onPassphraseRequired", JsEventDetails(&details));
-}
-
-void JsSyncEncryptionHandlerObserver::OnPassphraseAccepted() {
- if (!event_handler_.IsInitialized()) {
- return;
- }
- base::DictionaryValue details;
- HandleJsEvent(FROM_HERE, "onPassphraseAccepted", JsEventDetails(&details));
-}
-
-void JsSyncEncryptionHandlerObserver::OnBootstrapTokenUpdated(
- const std::string& boostrap_token,
- BootstrapTokenType type) {
- if (!event_handler_.IsInitialized()) {
- return;
- }
- base::DictionaryValue details;
- details.SetString("bootstrapToken", "<redacted>");
- details.SetString("type", BootstrapTokenTypeToString(type));
- HandleJsEvent(FROM_HERE, "onBootstrapTokenUpdated", JsEventDetails(&details));
-}
-
-void JsSyncEncryptionHandlerObserver::OnEncryptedTypesChanged(
- ModelTypeSet encrypted_types,
- bool encrypt_everything) {
- if (!event_handler_.IsInitialized()) {
- return;
- }
- base::DictionaryValue details;
- details.Set("encryptedTypes",
- ModelTypeSetToValue(encrypted_types));
- details.SetBoolean("encryptEverything", encrypt_everything);
- HandleJsEvent(FROM_HERE,
- "onEncryptedTypesChanged", JsEventDetails(&details));
-}
-
-void JsSyncEncryptionHandlerObserver::OnEncryptionComplete() {
- if (!event_handler_.IsInitialized()) {
- return;
- }
- base::DictionaryValue details;
- HandleJsEvent(FROM_HERE, "onEncryptionComplete", JsEventDetails());
-}
-
-void JsSyncEncryptionHandlerObserver::OnCryptographerStateChanged(
- Cryptographer* cryptographer) {
- if (!event_handler_.IsInitialized()) {
- return;
- }
- base::DictionaryValue details;
- details.SetBoolean("ready",
- cryptographer->is_ready());
- details.SetBoolean("hasPendingKeys",
- cryptographer->has_pending_keys());
- HandleJsEvent(FROM_HERE,
- "onCryptographerStateChanged",
- JsEventDetails(&details));
-}
-
-void JsSyncEncryptionHandlerObserver::OnPassphraseTypeChanged(
- PassphraseType type,
- base::Time explicit_passphrase_time) {
- if (!event_handler_.IsInitialized()) {
- return;
- }
- base::DictionaryValue details;
- details.SetString("passphraseType",
- PassphraseTypeToString(type));
- details.SetInteger("explicitPassphraseTime",
- TimeToProtoTime(explicit_passphrase_time));
- HandleJsEvent(FROM_HERE,
- "onPassphraseTypeChanged",
- JsEventDetails(&details));
-}
-
-void JsSyncEncryptionHandlerObserver::HandleJsEvent(
- const tracked_objects::Location& from_here,
- const std::string& name, const JsEventDetails& details) {
- if (!event_handler_.IsInitialized()) {
- NOTREACHED();
- return;
- }
- event_handler_.Call(from_here,
- &JsEventHandler::HandleJsEvent, name, details);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/js_sync_encryption_handler_observer.h b/chromium/sync/internal_api/js_sync_encryption_handler_observer.h
deleted file mode 100644
index 9c741782814..00000000000
--- a/chromium/sync/internal_api/js_sync_encryption_handler_observer.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_JS_SYNC_ENCRYPTION_HANDLER_OBSERVER_H_
-#define SYNC_INTERNAL_API_JS_SYNC_ENCRYPTION_HANDLER_OBSERVER_H_
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/sync_encryption_handler.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/protocol/sync_protocol_error.h"
-
-namespace tracked_objects {
-class Location;
-} // namespace tracked_objects
-
-namespace syncer {
-
-class JsEventDetails;
-class JsEventHandler;
-
-// Routes SyncEncryptionHandler events to a JsEventHandler.
-class SYNC_EXPORT_PRIVATE JsSyncEncryptionHandlerObserver
- : public SyncEncryptionHandler::Observer {
- public:
- JsSyncEncryptionHandlerObserver();
- virtual ~JsSyncEncryptionHandlerObserver();
-
- void SetJsEventHandler(const WeakHandle<JsEventHandler>& event_handler);
-
- // SyncEncryptionHandlerObserver::Observer implementation.
- virtual void OnPassphraseRequired(
- PassphraseRequiredReason reason,
- const sync_pb::EncryptedData& pending_keys) OVERRIDE;
- virtual void OnPassphraseAccepted() OVERRIDE;
- virtual void OnBootstrapTokenUpdated(
- const std::string& bootstrap_token,
- BootstrapTokenType type) OVERRIDE;
- virtual void OnEncryptedTypesChanged(
- ModelTypeSet encrypted_types,
- bool encrypt_everything) OVERRIDE;
- virtual void OnEncryptionComplete() OVERRIDE;
- virtual void OnCryptographerStateChanged(
- Cryptographer* cryptographer) OVERRIDE;
- virtual void OnPassphraseTypeChanged(
- PassphraseType type,
- base::Time explicit_passphrase_time) OVERRIDE;
-
- private:
- void HandleJsEvent(const tracked_objects::Location& from_here,
- const std::string& name, const JsEventDetails& details);
-
- WeakHandle<JsEventHandler> event_handler_;
-
- DISALLOW_COPY_AND_ASSIGN(JsSyncEncryptionHandlerObserver);
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_JS_SYNC_ENCRYPTION_HANDLER_OBSERVER_H_
diff --git a/chromium/sync/internal_api/js_sync_encryption_handler_observer_unittest.cc b/chromium/sync/internal_api/js_sync_encryption_handler_observer_unittest.cc
deleted file mode 100644
index 09fc4bf5213..00000000000
--- a/chromium/sync/internal_api/js_sync_encryption_handler_observer_unittest.cc
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/js_sync_encryption_handler_observer.h"
-
-#include "base/basictypes.h"
-#include "base/location.h"
-#include "base/message_loop/message_loop.h"
-#include "base/values.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/util/sync_string_conversions.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/js/js_event_details.h"
-#include "sync/js/js_test_util.h"
-#include "sync/test/fake_encryptor.h"
-#include "sync/util/cryptographer.h"
-#include "sync/util/time.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace {
-
-using ::testing::InSequence;
-using ::testing::StrictMock;
-
-class JsSyncEncryptionHandlerObserverTest : public testing::Test {
- protected:
- JsSyncEncryptionHandlerObserverTest() {
- js_sync_encryption_handler_observer_.SetJsEventHandler(
- mock_js_event_handler_.AsWeakHandle());
- }
-
- private:
- // This must be destroyed after the member variables below in order
- // for WeakHandles to be destroyed properly.
- base::MessageLoop message_loop_;
-
- protected:
- StrictMock<MockJsEventHandler> mock_js_event_handler_;
- JsSyncEncryptionHandlerObserver js_sync_encryption_handler_observer_;
-
- void PumpLoop() {
- message_loop_.RunUntilIdle();
- }
-};
-
-TEST_F(JsSyncEncryptionHandlerObserverTest, NoArgNotifiations) {
- InSequence dummy;
-
- EXPECT_CALL(mock_js_event_handler_,
- HandleJsEvent("onEncryptionComplete",
- HasDetails(JsEventDetails())));
-
- js_sync_encryption_handler_observer_.OnEncryptionComplete();
- PumpLoop();
-}
-
-TEST_F(JsSyncEncryptionHandlerObserverTest, OnPassphraseRequired) {
- InSequence dummy;
-
- base::DictionaryValue reason_passphrase_not_required_details;
- base::DictionaryValue reason_encryption_details;
- base::DictionaryValue reason_decryption_details;
-
- reason_passphrase_not_required_details.SetString(
- "reason",
- PassphraseRequiredReasonToString(REASON_PASSPHRASE_NOT_REQUIRED));
- reason_encryption_details.SetString(
- "reason",
- PassphraseRequiredReasonToString(REASON_ENCRYPTION));
- reason_decryption_details.SetString(
- "reason",
- PassphraseRequiredReasonToString(REASON_DECRYPTION));
-
- EXPECT_CALL(mock_js_event_handler_,
- HandleJsEvent("onPassphraseRequired",
- HasDetailsAsDictionary(
- reason_passphrase_not_required_details)));
- EXPECT_CALL(mock_js_event_handler_,
- HandleJsEvent("onPassphraseRequired",
- HasDetailsAsDictionary(reason_encryption_details)));
- EXPECT_CALL(mock_js_event_handler_,
- HandleJsEvent("onPassphraseRequired",
- HasDetailsAsDictionary(reason_decryption_details)));
-
- js_sync_encryption_handler_observer_.OnPassphraseRequired(
- REASON_PASSPHRASE_NOT_REQUIRED,
- sync_pb::EncryptedData());
- js_sync_encryption_handler_observer_.OnPassphraseRequired(REASON_ENCRYPTION,
- sync_pb::EncryptedData());
- js_sync_encryption_handler_observer_.OnPassphraseRequired(REASON_DECRYPTION,
- sync_pb::EncryptedData());
- PumpLoop();
-}
-
-TEST_F(JsSyncEncryptionHandlerObserverTest, OnBootstrapTokenUpdated) {
- base::DictionaryValue bootstrap_token_details;
- bootstrap_token_details.SetString("bootstrapToken", "<redacted>");
- bootstrap_token_details.SetString("type", "PASSPHRASE_BOOTSTRAP_TOKEN");
-
- EXPECT_CALL(mock_js_event_handler_,
- HandleJsEvent(
- "onBootstrapTokenUpdated",
- HasDetailsAsDictionary(bootstrap_token_details)));
-
- js_sync_encryption_handler_observer_.OnBootstrapTokenUpdated(
- "sensitive_token", PASSPHRASE_BOOTSTRAP_TOKEN);
- PumpLoop();
-}
-
-TEST_F(JsSyncEncryptionHandlerObserverTest, OnEncryptedTypesChanged) {
- base::DictionaryValue expected_details;
- base::ListValue* encrypted_type_values = new base::ListValue();
- const bool encrypt_everything = false;
- expected_details.Set("encryptedTypes", encrypted_type_values);
- expected_details.SetBoolean("encryptEverything", encrypt_everything);
- ModelTypeSet encrypted_types;
-
- for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
- ModelType type = ModelTypeFromInt(i);
- encrypted_types.Put(type);
- encrypted_type_values->Append(new base::StringValue(
- ModelTypeToString(type)));
- }
-
- EXPECT_CALL(mock_js_event_handler_,
- HandleJsEvent("onEncryptedTypesChanged",
- HasDetailsAsDictionary(expected_details)));
-
- js_sync_encryption_handler_observer_.OnEncryptedTypesChanged(
- encrypted_types, encrypt_everything);
- PumpLoop();
-}
-
-
-TEST_F(JsSyncEncryptionHandlerObserverTest, OnCryptographerStateChanged) {
- base::DictionaryValue expected_details;
- bool expected_ready = false;
- bool expected_pending = false;
- expected_details.SetBoolean("ready", expected_ready);
- expected_details.SetBoolean("hasPendingKeys", expected_pending);
- ModelTypeSet encrypted_types;
-
- EXPECT_CALL(mock_js_event_handler_,
- HandleJsEvent("onCryptographerStateChanged",
- HasDetailsAsDictionary(expected_details)));
-
- FakeEncryptor encryptor;
- Cryptographer cryptographer(&encryptor);
-
- js_sync_encryption_handler_observer_.OnCryptographerStateChanged(
- &cryptographer);
- PumpLoop();
-}
-
-TEST_F(JsSyncEncryptionHandlerObserverTest, OnPassphraseTypeChanged) {
- InSequence dummy;
-
- base::DictionaryValue passphrase_type_details;
- passphrase_type_details.SetString("passphraseType", "IMPLICIT_PASSPHRASE");
- passphrase_type_details.SetInteger("explicitPassphraseTime", 10);
- EXPECT_CALL(mock_js_event_handler_,
- HandleJsEvent("onPassphraseTypeChanged",
- HasDetailsAsDictionary(passphrase_type_details)));
-
- js_sync_encryption_handler_observer_.OnPassphraseTypeChanged(
- IMPLICIT_PASSPHRASE, ProtoTimeToTime(10));
- PumpLoop();
-}
-
-} // namespace
-} // namespace syncer
diff --git a/chromium/sync/internal_api/js_sync_manager_observer.cc b/chromium/sync/internal_api/js_sync_manager_observer.cc
deleted file mode 100644
index 9f8848d8499..00000000000
--- a/chromium/sync/internal_api/js_sync_manager_observer.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/js_sync_manager_observer.h"
-
-#include <cstddef>
-
-#include "base/location.h"
-#include "base/logging.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/values.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/change_record.h"
-#include "sync/internal_api/public/sessions/sync_session_snapshot.h"
-#include "sync/internal_api/public/util/sync_string_conversions.h"
-#include "sync/js/js_arg_list.h"
-#include "sync/js/js_event_details.h"
-#include "sync/js/js_event_handler.h"
-
-namespace syncer {
-
-JsSyncManagerObserver::JsSyncManagerObserver() {}
-
-JsSyncManagerObserver::~JsSyncManagerObserver() {}
-
-void JsSyncManagerObserver::SetJsEventHandler(
- const WeakHandle<JsEventHandler>& event_handler) {
- event_handler_ = event_handler;
-}
-
-void JsSyncManagerObserver::OnSyncCycleCompleted(
- const sessions::SyncSessionSnapshot& snapshot) {
- if (!event_handler_.IsInitialized()) {
- return;
- }
- base::DictionaryValue details;
- details.Set("snapshot", snapshot.ToValue());
- HandleJsEvent(FROM_HERE, "onSyncCycleCompleted", JsEventDetails(&details));
-}
-
-void JsSyncManagerObserver::OnConnectionStatusChange(ConnectionStatus status) {
- if (!event_handler_.IsInitialized()) {
- return;
- }
- base::DictionaryValue details;
- details.SetString("status", ConnectionStatusToString(status));
- HandleJsEvent(FROM_HERE,
- "onConnectionStatusChange", JsEventDetails(&details));
-}
-
-void JsSyncManagerObserver::OnActionableError(
- const SyncProtocolError& sync_error) {
- if (!event_handler_.IsInitialized()) {
- return;
- }
- base::DictionaryValue details;
- details.Set("syncError", sync_error.ToValue());
- HandleJsEvent(FROM_HERE, "onActionableError",
- JsEventDetails(&details));
-}
-
-void JsSyncManagerObserver::OnInitializationComplete(
- const WeakHandle<JsBackend>& js_backend,
- const WeakHandle<DataTypeDebugInfoListener>& debug_info_listener,
- bool success, syncer::ModelTypeSet restored_types) {
- if (!event_handler_.IsInitialized()) {
- return;
- }
- // Ignore the |js_backend| argument; it's not really convertible to
- // JSON anyway.
-
- base::DictionaryValue details;
- details.Set("restoredTypes", ModelTypeSetToValue(restored_types));
-
- HandleJsEvent(FROM_HERE,
- "onInitializationComplete",
- JsEventDetails(&details));
-}
-
-void JsSyncManagerObserver::OnStopSyncingPermanently() {
- if (!event_handler_.IsInitialized()) {
- return;
- }
- HandleJsEvent(FROM_HERE, "onStopSyncingPermanently", JsEventDetails());
-}
-
-void JsSyncManagerObserver::HandleJsEvent(
- const tracked_objects::Location& from_here,
- const std::string& name, const JsEventDetails& details) {
- if (!event_handler_.IsInitialized()) {
- NOTREACHED();
- return;
- }
- event_handler_.Call(from_here,
- &JsEventHandler::HandleJsEvent, name, details);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/js_sync_manager_observer.h b/chromium/sync/internal_api/js_sync_manager_observer.h
deleted file mode 100644
index 17a40e7bd70..00000000000
--- a/chromium/sync/internal_api/js_sync_manager_observer.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_JS_SYNC_MANAGER_OBSERVER_H_
-#define SYNC_INTERNAL_API_JS_SYNC_MANAGER_OBSERVER_H_
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/sync_manager.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/protocol/sync_protocol_error.h"
-
-namespace tracked_objects {
-class Location;
-} // namespace tracked_objects
-
-namespace syncer {
-
-class JsEventDetails;
-class JsEventHandler;
-
-// Routes SyncManager events to a JsEventHandler.
-class SYNC_EXPORT_PRIVATE JsSyncManagerObserver : public SyncManager::Observer {
- public:
- JsSyncManagerObserver();
- virtual ~JsSyncManagerObserver();
-
- void SetJsEventHandler(const WeakHandle<JsEventHandler>& event_handler);
-
- // SyncManager::Observer implementation.
- virtual void OnSyncCycleCompleted(
- const sessions::SyncSessionSnapshot& snapshot) OVERRIDE;
- virtual void OnConnectionStatusChange(ConnectionStatus status) OVERRIDE;
- virtual void OnInitializationComplete(
- const WeakHandle<JsBackend>& js_backend,
- const WeakHandle<DataTypeDebugInfoListener>& debug_info_listener,
- bool success,
- syncer::ModelTypeSet restored_types) OVERRIDE;
- virtual void OnStopSyncingPermanently() OVERRIDE;
- virtual void OnActionableError(
- const SyncProtocolError& sync_protocol_error) OVERRIDE;
-
- private:
- void HandleJsEvent(const tracked_objects::Location& from_here,
- const std::string& name, const JsEventDetails& details);
-
- WeakHandle<JsEventHandler> event_handler_;
-
- DISALLOW_COPY_AND_ASSIGN(JsSyncManagerObserver);
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_JS_SYNC_MANAGER_OBSERVER_H_
diff --git a/chromium/sync/internal_api/js_sync_manager_observer_unittest.cc b/chromium/sync/internal_api/js_sync_manager_observer_unittest.cc
deleted file mode 100644
index e4b8c64966f..00000000000
--- a/chromium/sync/internal_api/js_sync_manager_observer_unittest.cc
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/js_sync_manager_observer.h"
-
-#include "base/basictypes.h"
-#include "base/location.h"
-#include "base/message_loop/message_loop.h"
-#include "base/values.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/sessions/sync_session_snapshot.h"
-#include "sync/internal_api/public/util/sync_string_conversions.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/js/js_event_details.h"
-#include "sync/js/js_test_util.h"
-#include "sync/protocol/sync_protocol_error.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace {
-
-using ::testing::InSequence;
-using ::testing::StrictMock;
-
-class JsSyncManagerObserverTest : public testing::Test {
- protected:
- JsSyncManagerObserverTest() {
- js_sync_manager_observer_.SetJsEventHandler(
- mock_js_event_handler_.AsWeakHandle());
- }
-
- private:
- // This must be destroyed after the member variables below in order
- // for WeakHandles to be destroyed properly.
- base::MessageLoop message_loop_;
-
- protected:
- StrictMock<MockJsEventHandler> mock_js_event_handler_;
- JsSyncManagerObserver js_sync_manager_observer_;
-
- void PumpLoop() {
- message_loop_.RunUntilIdle();
- }
-};
-
-TEST_F(JsSyncManagerObserverTest, NoArgNotifiations) {
- InSequence dummy;
-
- EXPECT_CALL(mock_js_event_handler_,
- HandleJsEvent("onStopSyncingPermanently",
- HasDetails(JsEventDetails())));
-
- js_sync_manager_observer_.OnStopSyncingPermanently();
- PumpLoop();
-}
-
-TEST_F(JsSyncManagerObserverTest, OnInitializationComplete) {
- base::DictionaryValue expected_details;
- syncer::ModelTypeSet restored_types;
- restored_types.Put(BOOKMARKS);
- restored_types.Put(NIGORI);
- expected_details.Set("restoredTypes", ModelTypeSetToValue(restored_types));
-
- EXPECT_CALL(mock_js_event_handler_,
- HandleJsEvent("onInitializationComplete",
- HasDetailsAsDictionary(expected_details)));
-
- js_sync_manager_observer_.OnInitializationComplete(
- WeakHandle<JsBackend>(),
- WeakHandle<DataTypeDebugInfoListener>(),
- true,
- restored_types);
- PumpLoop();
-}
-
-TEST_F(JsSyncManagerObserverTest, OnSyncCycleCompleted) {
- sessions::SyncSessionSnapshot snapshot(
- sessions::ModelNeutralState(),
- ProgressMarkerMap(),
- false,
- 5,
- 2,
- 7,
- false,
- 0,
- base::Time::Now(),
- std::vector<int>(MODEL_TYPE_COUNT, 0),
- std::vector<int>(MODEL_TYPE_COUNT, 0),
- sync_pb::GetUpdatesCallerInfo::UNKNOWN);
- base::DictionaryValue expected_details;
- expected_details.Set("snapshot", snapshot.ToValue());
-
- EXPECT_CALL(mock_js_event_handler_,
- HandleJsEvent("onSyncCycleCompleted",
- HasDetailsAsDictionary(expected_details)));
-
- js_sync_manager_observer_.OnSyncCycleCompleted(snapshot);
- PumpLoop();
-}
-
-TEST_F(JsSyncManagerObserverTest, OnActionableError) {
- SyncProtocolError sync_error;
- sync_error.action = CLEAR_USER_DATA_AND_RESYNC;
- sync_error.error_type = TRANSIENT_ERROR;
- base::DictionaryValue expected_details;
- expected_details.Set("syncError", sync_error.ToValue());
-
- EXPECT_CALL(mock_js_event_handler_,
- HandleJsEvent("onActionableError",
- HasDetailsAsDictionary(expected_details)));
-
- js_sync_manager_observer_.OnActionableError(sync_error);
- PumpLoop();
-}
-
-
-TEST_F(JsSyncManagerObserverTest, OnConnectionStatusChange) {
- const ConnectionStatus kStatus = CONNECTION_AUTH_ERROR;
- base::DictionaryValue expected_details;
- expected_details.SetString("status",
- ConnectionStatusToString(kStatus));
-
- EXPECT_CALL(mock_js_event_handler_,
- HandleJsEvent("onConnectionStatusChange",
- HasDetailsAsDictionary(expected_details)));
-
- js_sync_manager_observer_.OnConnectionStatusChange(kStatus);
- PumpLoop();
-}
-
-} // namespace
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/DEPS b/chromium/sync/internal_api/public/DEPS
deleted file mode 100644
index 877a88d8933..00000000000
--- a/chromium/sync/internal_api/public/DEPS
+++ /dev/null
@@ -1,10 +0,0 @@
-include_rules = [
- "-sync",
- "+sync/base",
- "+sync/internal_api/public",
- "+sync/notifier",
- "+sync/protocol",
-
- # TODO(tim): Remove. Bug 131130
- "+sync/util/cryptographer.h"
-]
diff --git a/chromium/sync/internal_api/public/base/DEPS b/chromium/sync/internal_api/public/base/DEPS
deleted file mode 100644
index 9d46a8a6a63..00000000000
--- a/chromium/sync/internal_api/public/base/DEPS
+++ /dev/null
@@ -1,12 +0,0 @@
-include_rules = [
- # Invalidations headers depend on this. We should move them to sync/notifier
- # then remove this rule.
- "+google/cacheinvalidation",
-
- "-sync",
- "+sync/base",
- "+sync/internal_api/public/base",
- "+sync/internal_api/public/util",
- "+sync/notifier",
- "+sync/protocol",
-]
diff --git a/chromium/sync/internal_api/public/base/ack_handle.cc b/chromium/sync/internal_api/public/base/ack_handle.cc
deleted file mode 100644
index f5ddf121e3b..00000000000
--- a/chromium/sync/internal_api/public/base/ack_handle.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/base/ack_handle.h"
-
-#include <cstddef>
-#include "base/rand_util.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/values.h"
-
-namespace syncer {
-
-namespace {
-// Hopefully enough bytes for uniqueness.
-const size_t kBytesInHandle = 16;
-} // namespace
-
-AckHandle AckHandle::CreateUnique() {
- // This isn't a valid UUID, so we don't attempt to format it like one.
- uint8 random_bytes[kBytesInHandle];
- base::RandBytes(random_bytes, sizeof(random_bytes));
- return AckHandle(base::HexEncode(random_bytes, sizeof(random_bytes)),
- base::Time::Now());
-}
-
-AckHandle AckHandle::InvalidAckHandle() {
- return AckHandle(std::string(), base::Time());
-}
-
-bool AckHandle::Equals(const AckHandle& other) const {
- return state_ == other.state_ && timestamp_ == other.timestamp_;
-}
-
-scoped_ptr<base::DictionaryValue> AckHandle::ToValue() const {
- scoped_ptr<base::DictionaryValue> value(new base::DictionaryValue());
- value->SetString("state", state_);
- value->SetString("timestamp",
- base::Int64ToString(timestamp_.ToInternalValue()));
- return value.Pass();
-}
-
-bool AckHandle::ResetFromValue(const base::DictionaryValue& value) {
- if (!value.GetString("state", &state_))
- return false;
- std::string timestamp_as_string;
- if (!value.GetString("timestamp", &timestamp_as_string))
- return false;
- int64 timestamp_value;
- if (!base::StringToInt64(timestamp_as_string, &timestamp_value))
- return false;
- timestamp_ = base::Time::FromInternalValue(timestamp_value);
- return true;
-}
-
-bool AckHandle::IsValid() const {
- return !state_.empty();
-}
-
-AckHandle::AckHandle(const std::string& state, base::Time timestamp)
- : state_(state), timestamp_(timestamp) {
-}
-
-AckHandle::~AckHandle() {
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/base/ack_handle.h b/chromium/sync/internal_api/public/base/ack_handle.h
deleted file mode 100644
index 99d03af9eb5..00000000000
--- a/chromium/sync/internal_api/public/base/ack_handle.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_BASE_ACK_HANDLE_H
-#define SYNC_INTERNAL_API_PUBLIC_BASE_ACK_HANDLE_H
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/time/time.h"
-#include "sync/base/sync_export.h"
-
-namespace base {
-class DictionaryValue;
-}
-
-namespace syncer {
-
-// Opaque class that represents a local ack handle. We don't reuse the
-// invalidation ack handles to avoid unnecessary dependencies.
-class SYNC_EXPORT AckHandle {
- public:
- static AckHandle CreateUnique();
- static AckHandle InvalidAckHandle();
-
- bool Equals(const AckHandle& other) const;
-
- scoped_ptr<base::DictionaryValue> ToValue() const;
- bool ResetFromValue(const base::DictionaryValue& value);
-
- bool IsValid() const;
-
- ~AckHandle();
-
- private:
- // Explicitly copyable and assignable for STL containers.
- AckHandle(const std::string& state, base::Time timestamp);
-
- std::string state_;
- base::Time timestamp_;
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_BASE_ACK_HANDLE_H
diff --git a/chromium/sync/internal_api/public/base/cancelation_observer.cc b/chromium/sync/internal_api/public/base/cancelation_observer.cc
deleted file mode 100644
index f50b6a3c077..00000000000
--- a/chromium/sync/internal_api/public/base/cancelation_observer.cc
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/base/cancelation_observer.h"
-
-namespace syncer {
-
-CancelationObserver::CancelationObserver() {}
-
-CancelationObserver::~CancelationObserver() {}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/base/cancelation_observer.h b/chromium/sync/internal_api/public/base/cancelation_observer.h
deleted file mode 100644
index 7e677871b34..00000000000
--- a/chromium/sync/internal_api/public/base/cancelation_observer.h
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_BASE_CANCELATION_OBSERVER_H_
-#define SYNC_INTERNAL_API_PUBLIC_BASE_CANCELATION_OBSERVER_H_
-
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-
-// Interface for classes that handle signals from the CancelationSignal.
-class SYNC_EXPORT CancelationObserver {
- public:
- CancelationObserver();
- virtual ~CancelationObserver() = 0;
-
- // This may be called from a foreign thread while the CancelationSignal's lock
- // is held. The callee should avoid performing slow or blocking operations.
- virtual void OnSignalReceived() = 0;
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_BASE_CANCELATION_OBSERVER_H_
diff --git a/chromium/sync/internal_api/public/base/cancelation_signal.cc b/chromium/sync/internal_api/public/base/cancelation_signal.cc
deleted file mode 100644
index 94a479ba5cb..00000000000
--- a/chromium/sync/internal_api/public/base/cancelation_signal.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/base/cancelation_signal.h"
-
-#include "base/logging.h"
-#include "sync/internal_api/public/base/cancelation_observer.h"
-
-namespace syncer {
-
-CancelationSignal::CancelationSignal()
- : signalled_(false),
- handler_(NULL) { }
-
-CancelationSignal::~CancelationSignal() {
- DCHECK(!handler_);
-}
-
-bool CancelationSignal::TryRegisterHandler(CancelationObserver* handler) {
- base::AutoLock lock(signal_lock_);
- DCHECK(!handler_);
-
- if (signalled_)
- return false;
-
- handler_ = handler;
- return true;
-}
-
-void CancelationSignal::UnregisterHandler(CancelationObserver* handler) {
- base::AutoLock lock(signal_lock_);
- DCHECK_EQ(handler_, handler);
- handler_ = NULL;
-}
-
-bool CancelationSignal::IsSignalled() {
- base::AutoLock lock(signal_lock_);
- return signalled_;
-}
-
-void CancelationSignal::Signal() {
- base::AutoLock lock(signal_lock_);
- DCHECK(!signalled_);
-
- signalled_ = true;
- if (handler_) {
- handler_->OnSignalReceived();
- }
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/base/cancelation_signal.h b/chromium/sync/internal_api/public/base/cancelation_signal.h
deleted file mode 100644
index a074b62df6d..00000000000
--- a/chromium/sync/internal_api/public/base/cancelation_signal.h
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_BASE_CANCELATION_SIGNAL_H_
-#define SYNC_INTERNAL_API_PUBLIC_BASE_CANCELATION_SIGNAL_H_
-
-#include "base/synchronization/lock.h"
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-
-class CancelationObserver;
-
-// This class is used to allow one thread to request that another abort and
-// return early.
-//
-// The signalling thread owns this class and my call Signal() at any time.
-// After that call, this class' IsSignalled() will always return true. The
-// intended use case is that the task intending to support early exit will
-// periodically check the value of IsSignalled() to see if it should return
-// early.
-//
-// The receiving task may also choose to register an CancelationObserver whose
-// OnSignalReceived() method will be executed on the signaller's thread when
-// Signal() is called. This may be used for sending an early Signal() to a
-// WaitableEvent. The registration of the handler is necessarily racy. If
-// Signal() is executes before TryRegisterHandler(), TryRegisterHandler() will
-// not perform any registration and return false. That function's caller must
-// handle this case.
-//
-// This class supports only one handler, though it could easily support multiple
-// observers if we found a use case for such a feature.
-class SYNC_EXPORT_PRIVATE CancelationSignal {
- public:
- CancelationSignal();
- ~CancelationSignal();
-
- // Tries to register a handler to be invoked when Signal() is called.
- //
- // If Signal() has already been called, returns false without registering
- // the handler. Returns true when the registration is successful.
- //
- // If the registration was successful, the handler must be unregistered with
- // UnregisterHandler before this CancelationSignal is destroyed.
- bool TryRegisterHandler(CancelationObserver* handler);
-
- // Unregisters the abort handler.
- void UnregisterHandler(CancelationObserver* handler);
-
- // Returns true if Signal() has been called.
- bool IsSignalled();
-
- // Sets the stop_requested_ flag and calls the OnSignalReceived() method of
- // the registered handler, if there is one registered at the time.
- // SignalReceived() will be called with the |signal_lock_| held.
- void Signal();
-
- private:
- // Protects all members of this class.
- base::Lock signal_lock_;
-
- // True if Signal() has been invoked.
- bool signalled_;
-
- // The registered abort handler. May be NULL.
- CancelationObserver* handler_;
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_BASE_CANCELATION_SIGNAL_H_
diff --git a/chromium/sync/internal_api/public/base/cancelation_signal_unittest.cc b/chromium/sync/internal_api/public/base/cancelation_signal_unittest.cc
deleted file mode 100644
index 613e7560ede..00000000000
--- a/chromium/sync/internal_api/public/base/cancelation_signal_unittest.cc
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/base/cancelation_signal.h"
-
-#include "base/bind.h"
-#include "base/message_loop/message_loop.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/threading/thread.h"
-#include "sync/internal_api/public/base/cancelation_observer.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-class BlockingTask : public CancelationObserver {
- public:
- BlockingTask(CancelationSignal* cancel_signal);
- virtual ~BlockingTask();
-
- // Starts the |exec_thread_| and uses it to execute DoRun().
- void RunAsync(base::WaitableEvent* task_done_signal);
-
- // Blocks until canceled. Signals |task_done_signal| when finished.
- void Run(base::WaitableEvent* task_done_signal);
-
- // Implementation of CancelationObserver.
- // Wakes up the thread blocked in Run().
- virtual void OnSignalReceived() OVERRIDE;
-
- // Checks if we ever did successfully start waiting for |event_|. Be careful
- // with this. The flag itself is thread-unsafe, and the event that flips it
- // is racy.
- bool WasStarted();
-
- private:
- base::WaitableEvent event_;
- base::Thread exec_thread_;
- CancelationSignal* cancel_signal_;
- bool was_started_;
-};
-
-BlockingTask::BlockingTask(CancelationSignal* cancel_signal)
- : event_(true, false),
- exec_thread_("BlockingTaskBackgroundThread"),
- cancel_signal_(cancel_signal),
- was_started_(false) { }
-
-BlockingTask::~BlockingTask() {}
-
-void BlockingTask::RunAsync(base::WaitableEvent* task_done_signal) {
- exec_thread_.Start();
- exec_thread_.message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&BlockingTask::Run,
- base::Unretained(this),
- base::Unretained(task_done_signal)));
-}
-
-void BlockingTask::Run(base::WaitableEvent* task_done_signal) {
- if (cancel_signal_->TryRegisterHandler(this)) {
- DCHECK(!event_.IsSignaled());
- was_started_ = true;
- event_.Wait();
- }
- task_done_signal->Signal();
-}
-
-void BlockingTask::OnSignalReceived() {
- event_.Signal();
-}
-
-bool BlockingTask::WasStarted() {
- return was_started_;
-}
-
-class CancelationSignalTest : public ::testing::Test {
- public:
- CancelationSignalTest();
- virtual ~CancelationSignalTest();
-
- // Starts the blocking task on a background thread.
- void StartBlockingTask();
-
- // Cancels the blocking task.
- void CancelBlocking();
-
- // Verifies that the background task is not running. This could be beacause
- // it was canceled early or because it was canceled after it was started.
- //
- // This method may block for a brief period of time while waiting for the
- // background thread to make progress.
- bool VerifyTaskDone();
-
- // Verifies that the background task was canceled early.
- //
- // This method may block for a brief period of time while waiting for the
- // background thread to make progress.
- bool VerifyTaskNotStarted();
-
- private:
- base::MessageLoop main_loop_;
-
- CancelationSignal signal_;
- base::WaitableEvent task_done_event_;
- BlockingTask blocking_task_;
-};
-
-CancelationSignalTest::CancelationSignalTest()
- : task_done_event_(false, false), blocking_task_(&signal_) {}
-
-CancelationSignalTest::~CancelationSignalTest() {}
-
-void CancelationSignalTest::StartBlockingTask() {
- blocking_task_.RunAsync(&task_done_event_);
-}
-
-void CancelationSignalTest::CancelBlocking() {
- signal_.Signal();
-}
-
-bool CancelationSignalTest::VerifyTaskDone() {
- // Wait until BlockingTask::Run() has finished.
- task_done_event_.Wait();
- return true;
-}
-
-bool CancelationSignalTest::VerifyTaskNotStarted() {
- // Wait until BlockingTask::Run() has finished.
- task_done_event_.Wait();
-
- // Verify the background thread never started blocking.
- return !blocking_task_.WasStarted();
-}
-
-class FakeCancelationObserver : public CancelationObserver {
- virtual void OnSignalReceived() OVERRIDE { }
-};
-
-TEST(CancelationSignalTest_SingleThread, CheckFlags) {
- FakeCancelationObserver observer;
- CancelationSignal signal;
-
- EXPECT_FALSE(signal.IsSignalled());
- signal.Signal();
- EXPECT_TRUE(signal.IsSignalled());
- EXPECT_FALSE(signal.TryRegisterHandler(&observer));
-}
-
-// Send the cancelation signal before the task is started. This will ensure
-// that the task will never be attempted.
-TEST_F(CancelationSignalTest, CancelEarly) {
- CancelBlocking();
- StartBlockingTask();
- EXPECT_TRUE(VerifyTaskNotStarted());
-}
-
-// Send the cancelation signal after the request to start the task has been
-// posted. This is racy. The signal to stop may arrive before the signal to
-// run the task. If that happens, we end up with another instance of the
-// CancelEarly test defined earlier. If the signal requesting a stop arrives
-// after the task has been started, it should end up stopping the task.
-TEST_F(CancelationSignalTest, Cancel) {
- StartBlockingTask();
- CancelBlocking();
- EXPECT_TRUE(VerifyTaskDone());
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/base/enum_set.h b/chromium/sync/internal_api/public/base/enum_set.h
deleted file mode 100644
index 85d9a548ece..00000000000
--- a/chromium/sync/internal_api/public/base/enum_set.h
+++ /dev/null
@@ -1,285 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_BASE_ENUM_SET_H_
-#define SYNC_INTERNAL_API_PUBLIC_BASE_ENUM_SET_H_
-
-#include <bitset>
-#include <cstddef>
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/logging.h"
-
-namespace syncer {
-
-// Forward declarations needed for friend declarations.
-template <typename E, E MinEnumValue, E MaxEnumValue>
-class EnumSet;
-
-template <typename E, E Min, E Max>
-EnumSet<E, Min, Max> Union(EnumSet<E, Min, Max> set1,
- EnumSet<E, Min, Max> set2);
-
-template <typename E, E Min, E Max>
-EnumSet<E, Min, Max> Intersection(EnumSet<E, Min, Max> set1,
- EnumSet<E, Min, Max> set2);
-
-template <typename E, E Min, E Max>
-EnumSet<E, Min, Max> Difference(EnumSet<E, Min, Max> set1,
- EnumSet<E, Min, Max> set2);
-
-// An EnumSet is a set that can hold enum values between a min and a
-// max value (inclusive of both). It's essentially a wrapper around
-// std::bitset<> with stronger type enforcement, more descriptive
-// member function names, and an iterator interface.
-//
-// If you're working with enums with a small number of possible values
-// (say, fewer than 64), you can efficiently pass around an EnumSet
-// for that enum around by value.
-
-template <typename E, E MinEnumValue, E MaxEnumValue>
-class EnumSet {
- public:
- typedef E EnumType;
- static const E kMinValue = MinEnumValue;
- static const E kMaxValue = MaxEnumValue;
- static const size_t kValueCount = kMaxValue - kMinValue + 1;
- COMPILE_ASSERT(kMinValue < kMaxValue,
- min_value_must_be_less_than_max_value);
-
- private:
- // Declaration needed by Iterator.
- typedef std::bitset<kValueCount> EnumBitSet;
-
- public:
- // Iterator is a forward-only read-only iterator for EnumSet. Its
- // interface is deliberately distinct from an STL iterator as its
- // semantics are substantially different.
- //
- // Example usage:
- //
- // for (EnumSet<...>::Iterator it = enums.First(); it.Good(); it.Inc()) {
- // Process(it.Get());
- // }
- //
- // The iterator must not be outlived by the set. In particular, the
- // following is an error:
- //
- // EnumSet<...> SomeFn() { ... }
- //
- // /* ERROR */
- // for (EnumSet<...>::Iterator it = SomeFun().First(); ...
- //
- // Also, there are no guarantees as to what will happen if you
- // modify an EnumSet while traversing it with an iterator.
- class Iterator {
- public:
- // A default-constructed iterator can't do anything except check
- // Good(). You need to call First() on an EnumSet to get a usable
- // iterator.
- Iterator() : enums_(NULL), i_(kValueCount) {}
- ~Iterator() {}
-
- // Copy constructor and assignment welcome.
-
- // Returns true iff the iterator points to an EnumSet and it
- // hasn't yet traversed the EnumSet entirely.
- bool Good() const {
- return enums_ && i_ < kValueCount && enums_->test(i_);
- }
-
- // Returns the value the iterator currently points to. Good()
- // must hold.
- E Get() const {
- CHECK(Good());
- return FromIndex(i_);
- }
-
- // Moves the iterator to the next value in the EnumSet. Good()
- // must hold. Takes linear time.
- void Inc() {
- CHECK(Good());
- i_ = FindNext(i_ + 1);
- }
-
- private:
- friend Iterator EnumSet::First() const;
-
- explicit Iterator(const EnumBitSet& enums)
- : enums_(&enums), i_(FindNext(0)) {}
-
- size_t FindNext(size_t i) {
- while ((i < kValueCount) && !enums_->test(i)) {
- ++i;
- }
- return i;
- }
-
- const EnumBitSet* enums_;
- size_t i_;
- };
-
- // You can construct an EnumSet with 0, 1, 2, or 3 initial values.
-
- EnumSet() {}
-
- explicit EnumSet(E value) {
- Put(value);
- }
-
- EnumSet(E value1, E value2) {
- Put(value1);
- Put(value2);
- }
-
- EnumSet(E value1, E value2, E value3) {
- Put(value1);
- Put(value2);
- Put(value3);
- }
-
- // Returns an EnumSet with all possible values.
- static EnumSet All() {
- EnumBitSet enums;
- enums.set();
- return EnumSet(enums);
- }
-
- ~EnumSet() {}
-
- // Copy constructor and assignment welcome.
-
- // Set operations. Put, Retain, and Remove are basically
- // self-mutating versions of Union, Intersection, and Difference
- // (defined below).
-
- // Adds the given value (which must be in range) to our set.
- void Put(E value) {
- enums_.set(ToIndex(value));
- }
-
- // Adds all values in the given set to our set.
- void PutAll(EnumSet other) {
- enums_ |= other.enums_;
- }
-
- // There's no real need for a Retain(E) member function.
-
- // Removes all values not in the given set from our set.
- void RetainAll(EnumSet other) {
- enums_ &= other.enums_;
- }
-
- // If the given value is in range, removes it from our set.
- void Remove(E value) {
- if (InRange(value)) {
- enums_.reset(ToIndex(value));
- }
- }
-
- // Removes all values in the given set from our set.
- void RemoveAll(EnumSet other) {
- enums_ &= ~other.enums_;
- }
-
- // Removes all values from our set.
- void Clear() {
- enums_.reset();
- }
-
- // Returns true iff the given value is in range and a member of our
- // set.
- bool Has(E value) const {
- return InRange(value) && enums_.test(ToIndex(value));
- }
-
- // Returns true iff the given set is a subset of our set.
- bool HasAll(EnumSet other) const {
- return (enums_ & other.enums_) == other.enums_;
- }
-
- // Returns true iff our set and the given set contain exactly the
- // same values.
- bool Equals(const EnumSet& other) const {
- return enums_ == other.enums_;
- }
-
- // Returns true iff our set is empty.
- bool Empty() const {
- return !enums_.any();
- }
-
- // Returns how many values our set has.
- size_t Size() const {
- return enums_.count();
- }
-
- // Returns an iterator pointing to the first element (if any).
- Iterator First() const {
- return Iterator(enums_);
- }
-
- private:
- friend EnumSet Union<E, MinEnumValue, MaxEnumValue>(
- EnumSet set1, EnumSet set2);
- friend EnumSet Intersection<E, MinEnumValue, MaxEnumValue>(
- EnumSet set1, EnumSet set2);
- friend EnumSet Difference<E, MinEnumValue, MaxEnumValue>(
- EnumSet set1, EnumSet set2);
-
- explicit EnumSet(EnumBitSet enums) : enums_(enums) {}
-
- static bool InRange(E value) {
- return (value >= MinEnumValue) && (value <= MaxEnumValue);
- }
-
- // Converts a value to/from an index into |enums_|.
-
- static size_t ToIndex(E value) {
- DCHECK_GE(value, MinEnumValue);
- DCHECK_LE(value, MaxEnumValue);
- return value - MinEnumValue;
- }
-
- static E FromIndex(size_t i) {
- DCHECK_LT(i, kValueCount);
- return static_cast<E>(MinEnumValue + i);
- }
-
- EnumBitSet enums_;
-};
-
-template <typename E, E MinEnumValue, E MaxEnumValue>
-const E EnumSet<E, MinEnumValue, MaxEnumValue>::kMinValue;
-
-template <typename E, E MinEnumValue, E MaxEnumValue>
-const E EnumSet<E, MinEnumValue, MaxEnumValue>::kMaxValue;
-
-template <typename E, E MinEnumValue, E MaxEnumValue>
-const size_t EnumSet<E, MinEnumValue, MaxEnumValue>::kValueCount;
-
-// The usual set operations.
-
-template <typename E, E Min, E Max>
-EnumSet<E, Min, Max> Union(EnumSet<E, Min, Max> set1,
- EnumSet<E, Min, Max> set2) {
- return EnumSet<E, Min, Max>(set1.enums_ | set2.enums_);
-}
-
-template <typename E, E Min, E Max>
-EnumSet<E, Min, Max> Intersection(EnumSet<E, Min, Max> set1,
- EnumSet<E, Min, Max> set2) {
- return EnumSet<E, Min, Max>(set1.enums_ & set2.enums_);
-}
-
-template <typename E, E Min, E Max>
-EnumSet<E, Min, Max> Difference(EnumSet<E, Min, Max> set1,
- EnumSet<E, Min, Max> set2) {
- return EnumSet<E, Min, Max>(set1.enums_ & ~set2.enums_);
-}
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_BASE_ENUM_SET_H_
diff --git a/chromium/sync/internal_api/public/base/enum_set_unittest.cc b/chromium/sync/internal_api/public/base/enum_set_unittest.cc
deleted file mode 100644
index d4692a60c75..00000000000
--- a/chromium/sync/internal_api/public/base/enum_set_unittest.cc
+++ /dev/null
@@ -1,195 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/base/enum_set.h"
-
-#include "base/basictypes.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace {
-
-enum TestEnum {
- TEST_0,
- TEST_MIN = TEST_0,
- TEST_1,
- TEST_2,
- TEST_3,
- TEST_4,
- TEST_MAX = TEST_4,
- TEST_5
-};
-
-typedef EnumSet<TestEnum, TEST_MIN, TEST_MAX> TestEnumSet;
-
-class EnumSetTest : public ::testing::Test {};
-
-TEST_F(EnumSetTest, ClassConstants) {
- TestEnumSet enums;
- EXPECT_EQ(TEST_MIN, TestEnumSet::kMinValue);
- EXPECT_EQ(TEST_MAX, TestEnumSet::kMaxValue);
- EXPECT_EQ(static_cast<size_t>(5), TestEnumSet::kValueCount);
-}
-
-TEST_F(EnumSetTest, DefaultConstructor) {
- const TestEnumSet enums;
- EXPECT_TRUE(enums.Empty());
- EXPECT_EQ(static_cast<size_t>(0), enums.Size());
- EXPECT_FALSE(enums.Has(TEST_0));
- EXPECT_FALSE(enums.Has(TEST_1));
- EXPECT_FALSE(enums.Has(TEST_2));
- EXPECT_FALSE(enums.Has(TEST_3));
- EXPECT_FALSE(enums.Has(TEST_4));
-}
-
-TEST_F(EnumSetTest, OneArgConstructor) {
- const TestEnumSet enums(TEST_3);
- EXPECT_FALSE(enums.Empty());
- EXPECT_EQ(static_cast<size_t>(1), enums.Size());
- EXPECT_FALSE(enums.Has(TEST_0));
- EXPECT_FALSE(enums.Has(TEST_1));
- EXPECT_FALSE(enums.Has(TEST_2));
- EXPECT_TRUE(enums.Has(TEST_3));
- EXPECT_FALSE(enums.Has(TEST_4));
-}
-
-TEST_F(EnumSetTest, TwoArgConstructor) {
- const TestEnumSet enums(TEST_3, TEST_1);
- EXPECT_FALSE(enums.Empty());
- EXPECT_EQ(static_cast<size_t>(2), enums.Size());
- EXPECT_FALSE(enums.Has(TEST_0));
- EXPECT_TRUE(enums.Has(TEST_1));
- EXPECT_FALSE(enums.Has(TEST_2));
- EXPECT_TRUE(enums.Has(TEST_3));
- EXPECT_FALSE(enums.Has(TEST_4));
-}
-
-TEST_F(EnumSetTest, ThreeArgConstructor) {
- const TestEnumSet enums(TEST_3, TEST_1, TEST_0);
- EXPECT_FALSE(enums.Empty());
- EXPECT_EQ(static_cast<size_t>(3), enums.Size());
- EXPECT_TRUE(enums.Has(TEST_0));
- EXPECT_TRUE(enums.Has(TEST_1));
- EXPECT_FALSE(enums.Has(TEST_2));
- EXPECT_TRUE(enums.Has(TEST_3));
- EXPECT_FALSE(enums.Has(TEST_4));
-}
-
-TEST_F(EnumSetTest, All) {
- const TestEnumSet enums(TestEnumSet::All());
- EXPECT_FALSE(enums.Empty());
- EXPECT_EQ(static_cast<size_t>(5), enums.Size());
- EXPECT_TRUE(enums.Has(TEST_0));
- EXPECT_TRUE(enums.Has(TEST_1));
- EXPECT_TRUE(enums.Has(TEST_2));
- EXPECT_TRUE(enums.Has(TEST_3));
- EXPECT_TRUE(enums.Has(TEST_4));
-}
-
-TEST_F(EnumSetTest, Put) {
- TestEnumSet enums(TEST_3);
- enums.Put(TEST_2);
- EXPECT_TRUE(enums.Equals(TestEnumSet(TEST_2, TEST_3)));
- enums.Put(TEST_4);
- EXPECT_TRUE(enums.Equals(TestEnumSet(TEST_2, TEST_3, TEST_4)));
-}
-
-TEST_F(EnumSetTest, PutAll) {
- TestEnumSet enums(TEST_3, TEST_4);
- enums.PutAll(TestEnumSet(TEST_2, TEST_3));
- EXPECT_TRUE(enums.Equals(TestEnumSet(TEST_2, TEST_3, TEST_4)));
-}
-
-TEST_F(EnumSetTest, RetainAll) {
- TestEnumSet enums(TEST_3, TEST_4);
- enums.RetainAll(TestEnumSet(TEST_2, TEST_3));
- EXPECT_TRUE(enums.Equals(TestEnumSet(TEST_3)));
-}
-
-TEST_F(EnumSetTest, Remove) {
- TestEnumSet enums(TEST_3, TEST_4);
- enums.Remove(TEST_0);
- enums.Remove(TEST_2);
- EXPECT_TRUE(enums.Equals(TestEnumSet(TEST_3, TEST_4)));
- enums.Remove(TEST_3);
- EXPECT_TRUE(enums.Equals(TestEnumSet(TEST_4)));
- enums.Remove(TEST_4);
- enums.Remove(TEST_5);
- EXPECT_TRUE(enums.Empty());
-}
-
-TEST_F(EnumSetTest, RemoveAll) {
- TestEnumSet enums(TEST_3, TEST_4);
- enums.RemoveAll(TestEnumSet(TEST_2, TEST_3));
- EXPECT_TRUE(enums.Equals(TestEnumSet(TEST_4)));
-}
-
-TEST_F(EnumSetTest, Clear) {
- TestEnumSet enums(TEST_3, TEST_4);
- enums.Clear();
- EXPECT_TRUE(enums.Empty());
-}
-
-TEST_F(EnumSetTest, Has) {
- const TestEnumSet enums(TEST_3, TEST_4);
- EXPECT_FALSE(enums.Has(TEST_0));
- EXPECT_FALSE(enums.Has(TEST_1));
- EXPECT_FALSE(enums.Has(TEST_2));
- EXPECT_TRUE(enums.Has(TEST_3));
- EXPECT_TRUE(enums.Has(TEST_4));
- EXPECT_FALSE(enums.Has(TEST_5));
-}
-
-TEST_F(EnumSetTest, HasAll) {
- const TestEnumSet enums1(TEST_3, TEST_4);
- const TestEnumSet enums2(TEST_2, TEST_3);
- const TestEnumSet enums3 = Union(enums1, enums2);
- EXPECT_TRUE(enums1.HasAll(enums1));
- EXPECT_FALSE(enums1.HasAll(enums2));
- EXPECT_FALSE(enums1.HasAll(enums3));
-
- EXPECT_FALSE(enums2.HasAll(enums1));
- EXPECT_TRUE(enums2.HasAll(enums2));
- EXPECT_FALSE(enums2.HasAll(enums3));
-
- EXPECT_TRUE(enums3.HasAll(enums1));
- EXPECT_TRUE(enums3.HasAll(enums2));
- EXPECT_TRUE(enums3.HasAll(enums3));
-}
-
-TEST_F(EnumSetTest, Iterators) {
- const TestEnumSet enums1(TEST_3, TEST_4);
- TestEnumSet enums2;
- for (TestEnumSet::Iterator it = enums1.First(); it.Good(); it.Inc()) {
- enums2.Put(it.Get());
- }
- EXPECT_TRUE(enums1.Equals(enums2));
-}
-
-TEST_F(EnumSetTest, Union) {
- const TestEnumSet enums1(TEST_3, TEST_4);
- const TestEnumSet enums2(TEST_2, TEST_3);
- const TestEnumSet enums3 = Union(enums1, enums2);
-
- EXPECT_TRUE(enums3.Equals(TestEnumSet(TEST_2, TEST_3, TEST_4)));
-}
-
-TEST_F(EnumSetTest, Intersection) {
- const TestEnumSet enums1(TEST_3, TEST_4);
- const TestEnumSet enums2(TEST_2, TEST_3);
- const TestEnumSet enums3 = Intersection(enums1, enums2);
-
- EXPECT_TRUE(enums3.Equals(TestEnumSet(TEST_3)));
-}
-
-TEST_F(EnumSetTest, Difference) {
- const TestEnumSet enums1(TEST_3, TEST_4);
- const TestEnumSet enums2(TEST_2, TEST_3);
- const TestEnumSet enums3 = Difference(enums1, enums2);
-
- EXPECT_TRUE(enums3.Equals(TestEnumSet(TEST_4)));
-}
-
-} // namespace
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/base/invalidation.cc b/chromium/sync/internal_api/public/base/invalidation.cc
deleted file mode 100644
index ff7a5a78fd4..00000000000
--- a/chromium/sync/internal_api/public/base/invalidation.cc
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/base/invalidation.h"
-
-#include <cstddef>
-
-#include "base/json/json_string_value_serializer.h"
-#include "base/rand_util.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/values.h"
-#include "sync/notifier/ack_handler.h"
-#include "sync/notifier/dropped_invalidation_tracker.h"
-#include "sync/notifier/invalidation_util.h"
-
-namespace syncer {
-
-namespace {
-const char kObjectIdKey[] = "objectId";
-const char kIsUnknownVersionKey[] = "isUnknownVersion";
-const char kVersionKey[] = "version";
-const char kPayloadKey[] = "payload";
-const int64 kInvalidVersion = -1;
-}
-
-Invalidation Invalidation::Init(
- const invalidation::ObjectId& id,
- int64 version,
- const std::string& payload) {
- return Invalidation(id, false, version, payload, AckHandle::CreateUnique());
-}
-
-Invalidation Invalidation::InitUnknownVersion(
- const invalidation::ObjectId& id) {
- return Invalidation(id, true, kInvalidVersion,
- std::string(), AckHandle::CreateUnique());
-}
-
-Invalidation Invalidation::InitFromDroppedInvalidation(
- const Invalidation& dropped) {
- return Invalidation(dropped.id_, true, kInvalidVersion,
- std::string(), dropped.ack_handle_);
-}
-
-scoped_ptr<Invalidation> Invalidation::InitFromValue(
- const base::DictionaryValue& value) {
- invalidation::ObjectId id;
-
- const base::DictionaryValue* object_id_dict;
- if (!value.GetDictionary(kObjectIdKey, &object_id_dict)
- || !ObjectIdFromValue(*object_id_dict, &id)) {
- DLOG(WARNING) << "Failed to parse id";
- return scoped_ptr<Invalidation>();
- }
- bool is_unknown_version;
- if (!value.GetBoolean(kIsUnknownVersionKey, &is_unknown_version)) {
- DLOG(WARNING) << "Failed to parse is_unknown_version flag";
- return scoped_ptr<Invalidation>();
- }
- if (is_unknown_version) {
- return scoped_ptr<Invalidation>(new Invalidation(
- id,
- true,
- kInvalidVersion,
- std::string(),
- AckHandle::CreateUnique()));
- } else {
- int64 version;
- std::string version_as_string;
- if (!value.GetString(kVersionKey, &version_as_string)
- || !base::StringToInt64(version_as_string, &version)) {
- DLOG(WARNING) << "Failed to parse version";
- return scoped_ptr<Invalidation>();
- }
- std::string payload;
- if (!value.GetString(kPayloadKey, &payload)) {
- DLOG(WARNING) << "Failed to parse payload";
- return scoped_ptr<Invalidation>();
- }
- return scoped_ptr<Invalidation>(new Invalidation(
- id,
- false,
- version,
- payload,
- AckHandle::CreateUnique()));
- }
-}
-
-Invalidation::~Invalidation() {}
-
-invalidation::ObjectId Invalidation::object_id() const {
- return id_;
-}
-
-bool Invalidation::is_unknown_version() const {
- return is_unknown_version_;
-}
-
-int64 Invalidation::version() const {
- DCHECK(!is_unknown_version_);
- return version_;
-}
-
-const std::string& Invalidation::payload() const {
- DCHECK(!is_unknown_version_);
- return payload_;
-}
-
-const AckHandle& Invalidation::ack_handle() const {
- return ack_handle_;
-}
-
-void Invalidation::set_ack_handler(syncer::WeakHandle<AckHandler> handler) {
- ack_handler_ = handler;
-}
-
-bool Invalidation::SupportsAcknowledgement() const {
- return ack_handler_.IsInitialized();
-}
-
-void Invalidation::Acknowledge() const {
- if (SupportsAcknowledgement()) {
- ack_handler_.Call(FROM_HERE,
- &AckHandler::Acknowledge,
- id_,
- ack_handle_);
- }
-}
-
-void Invalidation::Drop(DroppedInvalidationTracker* tracker) const {
- DCHECK(tracker->object_id() == object_id());
- tracker->RecordDropEvent(ack_handler_, ack_handle_);
- if (SupportsAcknowledgement()) {
- ack_handler_.Call(FROM_HERE,
- &AckHandler::Drop,
- id_,
- ack_handle_);
- }
-}
-
-bool Invalidation::Equals(const Invalidation& other) const {
- return id_ == other.id_
- && is_unknown_version_ == other.is_unknown_version_
- && version_ == other.version_
- && payload_ == other.payload_;
-}
-
-scoped_ptr<base::DictionaryValue> Invalidation::ToValue() const {
- scoped_ptr<base::DictionaryValue> value(new base::DictionaryValue());
- value->Set(kObjectIdKey, ObjectIdToValue(id_).release());
- if (is_unknown_version_) {
- value->SetBoolean(kIsUnknownVersionKey, true);
- } else {
- value->SetBoolean(kIsUnknownVersionKey, false);
- value->SetString(kVersionKey, base::Int64ToString(version_));
- value->SetString(kPayloadKey, payload_);
- }
- return value.Pass();
-}
-
-std::string Invalidation::ToString() const {
- std::string output;
- JSONStringValueSerializer serializer(&output);
- serializer.set_pretty_print(true);
- serializer.Serialize(*ToValue().get());
- return output;
-}
-
-Invalidation::Invalidation(
- const invalidation::ObjectId& id,
- bool is_unknown_version,
- int64 version,
- const std::string& payload,
- AckHandle ack_handle)
- : id_(id),
- is_unknown_version_(is_unknown_version),
- version_(version),
- payload_(payload),
- ack_handle_(ack_handle) {}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/base/invalidation.h b/chromium/sync/internal_api/public/base/invalidation.h
deleted file mode 100644
index cf26112e224..00000000000
--- a/chromium/sync/internal_api/public/base/invalidation.h
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_BASE_INVALIDATION_H_
-#define SYNC_INTERNAL_API_PUBLIC_BASE_INVALIDATION_H_
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/values.h"
-#include "google/cacheinvalidation/include/types.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/ack_handle.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-
-namespace syncer {
-
-class DroppedInvalidationTracker;
-class AckHandler;
-
-// Represents a local invalidation, and is roughly analogous to
-// invalidation::Invalidation. Unlike invalidation::Invalidation, this class
-// supports "local" ack-tracking and simple serialization to pref values.
-class SYNC_EXPORT Invalidation {
- public:
- // Factory functions.
- static Invalidation Init(
- const invalidation::ObjectId& id,
- int64 version,
- const std::string& payload);
- static Invalidation InitUnknownVersion(const invalidation::ObjectId& id);
- static Invalidation InitFromDroppedInvalidation(const Invalidation& dropped);
- static scoped_ptr<Invalidation> InitFromValue(
- const base::DictionaryValue& value);
-
- ~Invalidation();
-
- // Compares two invalidations. The comparison ignores ack-tracking state.
- bool Equals(const Invalidation& other) const;
-
- invalidation::ObjectId object_id() const;
- bool is_unknown_version() const;
-
- // Safe to call only if is_unknown_version() returns false.
- int64 version() const;
-
- // Safe to call only if is_unknown_version() returns false.
- const std::string& payload() const;
-
- const AckHandle& ack_handle() const;
-
- // Sets the AckHandler to be used to track this Invalidation.
- //
- // This should be set by the class that generates the invalidation. Clients
- // of the Invalidations API should not need to call this.
- //
- // Note that some sources of invalidations do not support ack tracking, and do
- // not set the ack_handler. This will be hidden from users of this class.
- void set_ack_handler(syncer::WeakHandle<AckHandler> ack_handler);
-
- // Returns whether or not this instance supports ack tracking. This will
- // depend on whether or not the source of invaliadations supports
- // invalidations.
- //
- // Clients can safely ignore this flag. They can assume that all
- // invalidations support ack tracking. If they're wrong, then invalidations
- // will be less reliable, but their behavior will be no less correct.
- bool SupportsAcknowledgement() const;
-
- // Acknowledges the receipt of this invalidation.
- //
- // Clients should call this on a received invalidation when they have fully
- // processed the invalidation and persisted the results to disk. Once this
- // function is called, the invalidations system is under no obligation to
- // re-deliver this invalidation in the event of a crash or restart.
- void Acknowledge() const;
-
- // Informs the ack tracker that this invalidation will not be serviced.
- //
- // If a client's buffer reaches its limit and it is forced to start dropping
- // invalidations, it should call this function before dropping its
- // invalidations in order to allow the ack tracker to drop the invalidation,
- // too.
- //
- // The drop record will be tracked by the specified
- // DroppedInvalidationTracker. The caller should hang on to this tracker. It
- // will need to use it when it recovers from this drop event, or if it needs
- // to record another drop event for the same ObjectID. Refer to the
- // documentation of DroppedInvalidationTracker for more details.
- void Drop(DroppedInvalidationTracker* tracker) const;
-
- scoped_ptr<base::DictionaryValue> ToValue() const;
- std::string ToString() const;
-
- private:
- Invalidation(const invalidation::ObjectId& id,
- bool is_unknown_version,
- int64 version,
- const std::string& payload,
- AckHandle ack_handle);
-
- // The ObjectId to which this invalidation belongs.
- invalidation::ObjectId id_;
-
- // This flag is set to true if this is an unknown version invalidation.
- bool is_unknown_version_;
-
- // The version number of this invalidation. Should not be accessed if this is
- // an unkown version invalidation.
- int64 version_;
-
- // The payaload associated with this invalidation. Should not be accessed if
- // this is an unknown version invalidation.
- std::string payload_;
-
- // A locally generated unique ID used to manage local acknowledgements.
- AckHandle ack_handle_;
- syncer::WeakHandle<AckHandler> ack_handler_;
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_BASE_INVALIDATION_H_
diff --git a/chromium/sync/internal_api/public/base/invalidation_test_util.cc b/chromium/sync/internal_api/public/base/invalidation_test_util.cc
deleted file mode 100644
index 3c610dadedc..00000000000
--- a/chromium/sync/internal_api/public/base/invalidation_test_util.cc
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/base/invalidation_test_util.h"
-
-#include "base/basictypes.h"
-#include "base/json/json_writer.h"
-#include "base/json/string_escape.h"
-#include "base/values.h"
-#include "sync/internal_api/public/base/invalidation.h"
-
-namespace syncer {
-
-using ::testing::MakeMatcher;
-using ::testing::MatchResultListener;
-using ::testing::Matcher;
-using ::testing::MatcherInterface;
-using ::testing::PrintToString;
-
-namespace {
-
-class AckHandleEqMatcher
- : public MatcherInterface<const AckHandle&> {
- public:
- explicit AckHandleEqMatcher(const AckHandle& expected);
-
- virtual bool MatchAndExplain(const AckHandle& actual,
- MatchResultListener* listener) const;
- virtual void DescribeTo(::std::ostream* os) const;
- virtual void DescribeNegationTo(::std::ostream* os) const;
-
- private:
- const AckHandle expected_;
-
- DISALLOW_COPY_AND_ASSIGN(AckHandleEqMatcher);
-};
-
-AckHandleEqMatcher::AckHandleEqMatcher(const AckHandle& expected)
- : expected_(expected) {
-}
-
-bool AckHandleEqMatcher::MatchAndExplain(const AckHandle& actual,
- MatchResultListener* listener) const {
- return expected_.Equals(actual);
-}
-
-void AckHandleEqMatcher::DescribeTo(::std::ostream* os) const {
- *os << " is equal to " << PrintToString(expected_);
-}
-
-void AckHandleEqMatcher::DescribeNegationTo(::std::ostream* os) const {
- *os << " isn't equal to " << PrintToString(expected_);
-}
-
-class InvalidationEqMatcher
- : public MatcherInterface<const Invalidation&> {
- public:
- explicit InvalidationEqMatcher(const Invalidation& expected);
-
- virtual bool MatchAndExplain(const Invalidation& actual,
- MatchResultListener* listener) const;
- virtual void DescribeTo(::std::ostream* os) const;
- virtual void DescribeNegationTo(::std::ostream* os) const;
-
- private:
- const Invalidation expected_;
-
- DISALLOW_COPY_AND_ASSIGN(InvalidationEqMatcher);
-};
-
-InvalidationEqMatcher::InvalidationEqMatcher(
- const Invalidation& expected) : expected_(expected) {
-}
-
-bool InvalidationEqMatcher::MatchAndExplain(
- const Invalidation& actual, MatchResultListener* listener) const {
- if (!(expected_.object_id() == actual.object_id())) {
- return false;
- }
- if (expected_.is_unknown_version() && actual.is_unknown_version()) {
- return true;
- } else if (expected_.is_unknown_version() != actual.is_unknown_version()) {
- return false;
- } else {
- // Neither is unknown version.
- return expected_.payload() == actual.payload()
- && expected_.version() == actual.version();
- }
-}
-
-void InvalidationEqMatcher::DescribeTo(::std::ostream* os) const {
- *os << " is equal to " << PrintToString(expected_);
-}
-
-void InvalidationEqMatcher::DescribeNegationTo(::std::ostream* os) const {
- *os << " isn't equal to " << PrintToString(expected_);
-}
-
-} // namespace
-
-void PrintTo(const AckHandle& ack_handle, ::std::ostream* os ) {
- scoped_ptr<base::Value> value(ack_handle.ToValue());
- std::string printable_ack_handle;
- base::JSONWriter::Write(value.get(), &printable_ack_handle);
- *os << "{ ack_handle: " << printable_ack_handle << " }";
-}
-
-Matcher<const AckHandle&> Eq(const AckHandle& expected) {
- return MakeMatcher(new AckHandleEqMatcher(expected));
-}
-
-void PrintTo(const Invalidation& inv, ::std::ostream* os) {
- *os << "{ payload: " << inv.ToString() << " }";
-}
-
-Matcher<const Invalidation&> Eq(const Invalidation& expected) {
- return MakeMatcher(new InvalidationEqMatcher(expected));
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/base/invalidation_test_util.h b/chromium/sync/internal_api/public/base/invalidation_test_util.h
deleted file mode 100644
index e7c08caae0d..00000000000
--- a/chromium/sync/internal_api/public/base/invalidation_test_util.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_BASE_INVALIDATION_TEST_UTIL_H_
-#define SYNC_INTERNAL_API_PUBLIC_BASE_INVALIDATION_TEST_UTIL_H_
-
-#include <iosfwd>
-
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace syncer {
-
-class AckHandle;
-class Invalidation;
-
-void PrintTo(const AckHandle& ack_handle, ::std::ostream* os);
-::testing::Matcher<const AckHandle&> Eq(const AckHandle& expected);
-
-void PrintTo(const Invalidation& invalidation, ::std::ostream* os);
-
-::testing::Matcher<const Invalidation&> Eq(const Invalidation& expected);
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_BASE_INVALIDATION_TEST_UTIL_H_
diff --git a/chromium/sync/internal_api/public/base/model_type.h b/chromium/sync/internal_api/public/base/model_type.h
deleted file mode 100644
index c618c45043d..00000000000
--- a/chromium/sync/internal_api/public/base/model_type.h
+++ /dev/null
@@ -1,312 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Enumerate the various item subtypes that are supported by sync.
-// Each sync object is expected to have an immutable object type.
-// An object's type is inferred from the type of data it holds.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_BASE_MODEL_TYPE_H_
-#define SYNC_INTERNAL_API_PUBLIC_BASE_MODEL_TYPE_H_
-
-#include <set>
-#include <string>
-
-#include "base/logging.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/enum_set.h"
-
-namespace base {
-class ListValue;
-class StringValue;
-class Value;
-}
-
-namespace sync_pb {
-class EntitySpecifics;
-class SyncEntity;
-}
-
-namespace syncer {
-
-// TODO(akalin): Move the non-exported functions in this file to a
-// private header.
-
-enum ModelType {
- // Object type unknown. Objects may transition through
- // the unknown state during their initial creation, before
- // their properties are set. After deletion, object types
- // are generally preserved.
- UNSPECIFIED,
- // A permanent folder whose children may be of mixed
- // datatypes (e.g. the "Google Chrome" folder).
- TOP_LEVEL_FOLDER,
-
- // ------------------------------------ Start of "real" model types.
- // The model types declared before here are somewhat special, as they
- // they do not correspond to any browser data model. The remaining types
- // are bona fide model types; all have a related browser data model and
- // can be represented in the protocol using a specific Message type in the
- // EntitySpecifics protocol buffer.
- //
- // A bookmark folder or a bookmark URL object.
- BOOKMARKS,
- FIRST_USER_MODEL_TYPE = BOOKMARKS, // Declared 2nd, for debugger prettiness.
- FIRST_REAL_MODEL_TYPE = FIRST_USER_MODEL_TYPE,
-
- // A preference object.
- PREFERENCES,
- // A password object.
- PASSWORDS,
- // An AutofillProfile Object
- AUTOFILL_PROFILE,
- // An autofill object.
- AUTOFILL,
- // A themes object.
- THEMES,
- // A typed_url object.
- TYPED_URLS,
- // An extension object.
- EXTENSIONS,
- // An object representing a custom search engine.
- SEARCH_ENGINES,
- // An object representing a browser session.
- SESSIONS,
- // An app object.
- APPS,
- // An app setting from the extension settings API.
- APP_SETTINGS,
- // An extension setting from the extension settings API.
- EXTENSION_SETTINGS,
- // App notifications.
- APP_NOTIFICATIONS,
- // History delete directives.
- HISTORY_DELETE_DIRECTIVES,
- // Synced push notifications.
- SYNCED_NOTIFICATIONS,
- // Custom spelling dictionary.
- DICTIONARY,
- // Favicon images.
- FAVICON_IMAGES,
- // Favicon tracking information.
- FAVICON_TRACKING,
- // These preferences are synced before other user types and are never
- // encrypted.
- PRIORITY_PREFERENCES,
- // Managed user settings.
- MANAGED_USER_SETTINGS,
- // Managed users. Every managed user is a profile that is configured remotely
- // by this user and can have restrictions applied. MANAGED_USERS and
- // MANAGED_USER_SETTINGS can not be encrypted.
- MANAGED_USERS,
- // Distilled articles.
- ARTICLES,
- // App List items
- APP_LIST,
-
- // ---- Proxy types ----
- // Proxy types are excluded from the sync protocol, but are still considered
- // real user types. By convention, we prefix them with 'PROXY_' to distinguish
- // them from normal protocol types.
-
- // Tab sync. This is a placeholder type, so that Sessions can be implicitly
- // enabled for history sync and tabs sync.
- PROXY_TABS,
-
- FIRST_PROXY_TYPE = PROXY_TABS,
- LAST_PROXY_TYPE = PROXY_TABS,
-
- LAST_USER_MODEL_TYPE = PROXY_TABS,
-
- // ---- Control Types ----
- // An object representing a set of Nigori keys.
- NIGORI,
- FIRST_CONTROL_MODEL_TYPE = NIGORI,
- // Client-specific metadata.
- DEVICE_INFO,
- // Flags to enable experimental features.
- EXPERIMENTS,
- LAST_CONTROL_MODEL_TYPE = EXPERIMENTS,
-
- LAST_REAL_MODEL_TYPE = LAST_CONTROL_MODEL_TYPE,
-
- // If you are adding a new sync datatype that is exposed to the user via the
- // sync preferences UI, be sure to update the list in
- // chrome/browser/sync/user_selectable_sync_type.h so that the UMA histograms
- // for sync include your new type.
- // In this case, be sure to also update the UserSelectableTypes() definition
- // in sync/syncable/model_type.cc.
-
- MODEL_TYPE_COUNT,
-};
-
-typedef EnumSet<ModelType, FIRST_REAL_MODEL_TYPE, LAST_REAL_MODEL_TYPE>
- ModelTypeSet;
-typedef EnumSet<ModelType, UNSPECIFIED, LAST_REAL_MODEL_TYPE>
- FullModelTypeSet;
-
-inline ModelType ModelTypeFromInt(int i) {
- DCHECK_GE(i, 0);
- DCHECK_LT(i, MODEL_TYPE_COUNT);
- return static_cast<ModelType>(i);
-}
-
-// Used by tests outside of sync/.
-SYNC_EXPORT void AddDefaultFieldValue(ModelType datatype,
- sync_pb::EntitySpecifics* specifics);
-
-// Extract the model type of a SyncEntity protocol buffer. ModelType is a
-// local concept: the enum is not in the protocol. The SyncEntity's ModelType
-// is inferred from the presence of particular datatype field in the
-// entity specifics.
-SYNC_EXPORT_PRIVATE ModelType GetModelType(
- const sync_pb::SyncEntity& sync_entity);
-
-// Extract the model type from an EntitySpecifics field. Note that there
-// are some ModelTypes (like TOP_LEVEL_FOLDER) that can't be inferred this way;
-// prefer using GetModelType where possible.
-SYNC_EXPORT ModelType GetModelTypeFromSpecifics(
- const sync_pb::EntitySpecifics& specifics);
-
-// Protocol types are those types that have actual protocol buffer
-// representations. This distinguishes them from Proxy types, which have no
-// protocol representation and are never sent to the server.
-SYNC_EXPORT ModelTypeSet ProtocolTypes();
-
-// These are the normal user-controlled types. This is to distinguish from
-// ControlTypes which are always enabled. Note that some of these share a
-// preference flag, so not all of them are individually user-selectable.
-SYNC_EXPORT ModelTypeSet UserTypes();
-
-// These are the user-selectable data types.
-SYNC_EXPORT ModelTypeSet UserSelectableTypes();
-SYNC_EXPORT bool IsUserSelectableType(ModelType model_type);
-
-// This is the subset of UserTypes() that can be encrypted.
-SYNC_EXPORT_PRIVATE ModelTypeSet EncryptableUserTypes();
-
-// This is the subset of UserTypes() that have priority over other types. These
-// types are synced before other user types and are never encrypted.
-SYNC_EXPORT ModelTypeSet PriorityUserTypes();
-
-// Proxy types are placeholder types for handling implicitly enabling real
-// types. They do not exist at the server, and are simply used for
-// UI/Configuration logic.
-SYNC_EXPORT ModelTypeSet ProxyTypes();
-
-// Returns a list of all control types.
-//
-// The control types are intended to contain metadata nodes that are essential
-// for the normal operation of the syncer. As such, they have the following
-// special properties:
-// - They are downloaded early during SyncBackend initialization.
-// - They are always enabled. Users may not disable these types.
-// - Their contents are not encrypted automatically.
-// - They support custom update application and conflict resolution logic.
-// - All change processing occurs on the sync thread (GROUP_PASSIVE).
-SYNC_EXPORT ModelTypeSet ControlTypes();
-
-// Returns true if this is a control type.
-//
-// See comment above for more information on what makes these types special.
-SYNC_EXPORT bool IsControlType(ModelType model_type);
-
-// Core types are those data types used by sync's core functionality (i.e. not
-// user data types). These types are always enabled, and include ControlTypes().
-//
-// The set of all core types.
-SYNC_EXPORT ModelTypeSet CoreTypes();
-// Those core types that have high priority (includes ControlTypes()).
-SYNC_EXPORT ModelTypeSet PriorityCoreTypes();
-
-// Determine a model type from the field number of its associated
-// EntitySpecifics field. Returns UNSPECIFIED if the field number is
-// not recognized.
-//
-// If you're putting the result in a ModelTypeSet, you should use the
-// following pattern:
-//
-// ModelTypeSet model_types;
-// // Say we're looping through a list of items, each of which has a
-// // field number.
-// for (...) {
-// int field_number = ...;
-// ModelType model_type =
-// GetModelTypeFromSpecificsFieldNumber(field_number);
-// if (!IsRealDataType(model_type)) {
-// DLOG(WARNING) << "Unknown field number " << field_number;
-// continue;
-// }
-// model_types.Put(model_type);
-// }
-SYNC_EXPORT_PRIVATE ModelType GetModelTypeFromSpecificsFieldNumber(
- int field_number);
-
-// Return the field number of the EntitySpecifics field associated with
-// a model type.
-//
-// Used by tests outside of sync.
-SYNC_EXPORT int GetSpecificsFieldNumberFromModelType(
- ModelType model_type);
-
-FullModelTypeSet ToFullModelTypeSet(ModelTypeSet in);
-
-// TODO(sync): The functions below badly need some cleanup.
-
-// Returns a pointer to a string with application lifetime that represents
-// the name of |model_type|.
-SYNC_EXPORT const char* ModelTypeToString(ModelType model_type);
-
-// Some histograms take an integer parameter that represents a model type.
-// The mapping from ModelType to integer is defined here. It should match
-// the mapping from integer to labels defined in histograms.xml.
-SYNC_EXPORT int ModelTypeToHistogramInt(ModelType model_type);
-
-// Handles all model types, and not just real ones.
-//
-// Caller takes ownership of returned value.
-SYNC_EXPORT_PRIVATE base::StringValue* ModelTypeToValue(ModelType model_type);
-
-// Converts a Value into a ModelType - complement to ModelTypeToValue().
-SYNC_EXPORT_PRIVATE ModelType ModelTypeFromValue(const base::Value& value);
-
-// Returns the ModelType corresponding to the name |model_type_string|.
-SYNC_EXPORT ModelType ModelTypeFromString(
- const std::string& model_type_string);
-
-SYNC_EXPORT std::string ModelTypeSetToString(ModelTypeSet model_types);
-
-// Caller takes ownership of returned list.
-SYNC_EXPORT base::ListValue* ModelTypeSetToValue(ModelTypeSet model_types);
-
-SYNC_EXPORT ModelTypeSet ModelTypeSetFromValue(const base::ListValue& value);
-
-// Returns a string corresponding to the syncable tag for this datatype.
-SYNC_EXPORT std::string ModelTypeToRootTag(ModelType type);
-
-// Convert a real model type to a notification type (used for
-// subscribing to server-issued notifications). Returns true iff
-// |model_type| was a real model type and |notification_type| was
-// filled in.
-bool RealModelTypeToNotificationType(ModelType model_type,
- std::string* notification_type);
-
-// Converts a notification type to a real model type. Returns true
-// iff |notification_type| was the notification type of a real model
-// type and |model_type| was filled in.
-SYNC_EXPORT bool NotificationTypeToRealModelType(
- const std::string& notification_type,
- ModelType* model_type);
-
-// Returns true if |model_type| is a real datatype
-SYNC_EXPORT bool IsRealDataType(ModelType model_type);
-
-// Returns true if |model_type| is an act-once type. Act once types drop
-// entities after applying them. Drops are deletes that are not synced to other
-// clients.
-// TODO(haitaol): Make entries of act-once data types immutable.
-SYNC_EXPORT bool IsActOnceDataType(ModelType model_type);
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_BASE_MODEL_TYPE_H_
diff --git a/chromium/sync/internal_api/public/base/model_type_test_util.cc b/chromium/sync/internal_api/public/base/model_type_test_util.cc
deleted file mode 100644
index d9621bbbab9..00000000000
--- a/chromium/sync/internal_api/public/base/model_type_test_util.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/base/model_type_test_util.h"
-#include "sync/internal_api/public/base/ack_handle.h"
-
-namespace syncer {
-
-ObjectIdInvalidationMap BuildInvalidationMap(
- ModelType type,
- int version,
- const std::string& payload) {
- ObjectIdInvalidationMap map;
- invalidation::ObjectId id;
- bool result = RealModelTypeToObjectId(type, &id);
- DCHECK(result);
- map.Insert(Invalidation::Init(id, version, payload));
- return map;
-}
-
-void PrintTo(ModelTypeSet model_types, ::std::ostream* os) {
- *os << ModelTypeSetToString(model_types);
-}
-
-namespace {
-
-// Matcher implementation for HasModelTypes().
-class HasModelTypesMatcher
- : public ::testing::MatcherInterface<ModelTypeSet> {
- public:
- explicit HasModelTypesMatcher(ModelTypeSet expected_types)
- : expected_types_(expected_types) {}
-
- virtual ~HasModelTypesMatcher() {}
-
- virtual bool MatchAndExplain(
- ModelTypeSet model_types,
- ::testing::MatchResultListener* listener) const {
- // No need to annotate listener since we already define PrintTo().
- return model_types.Equals(expected_types_);
- }
-
- virtual void DescribeTo(::std::ostream* os) const {
- *os << "has model types " << ModelTypeSetToString(expected_types_);
- }
-
- virtual void DescribeNegationTo(::std::ostream* os) const {
- *os << "doesn't have model types "
- << ModelTypeSetToString(expected_types_);
- }
-
- private:
- const ModelTypeSet expected_types_;
-
- DISALLOW_COPY_AND_ASSIGN(HasModelTypesMatcher);
-};
-
-} // namespace
-
-::testing::Matcher<ModelTypeSet> HasModelTypes(ModelTypeSet expected_types) {
- return ::testing::MakeMatcher(new HasModelTypesMatcher(expected_types));
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/base/model_type_test_util.h b/chromium/sync/internal_api/public/base/model_type_test_util.h
deleted file mode 100644
index c724bf53687..00000000000
--- a/chromium/sync/internal_api/public/base/model_type_test_util.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_PUBLIC_API_BASE_MODEL_TYPE_TEST_UTIL_H_
-#define SYNC_INTERNAL_PUBLIC_API_BASE_MODEL_TYPE_TEST_UTIL_H_
-
-#include <ostream>
-
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/notifier/object_id_invalidation_map.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace syncer {
-
-// Builds an invaliation map that contains a single invalidation with specified
-// type, version, and payload. The type will be converted from a ModelType to
-// and ObjectId.
-ObjectIdInvalidationMap BuildInvalidationMap(
- ModelType type,
- int version,
- const std::string& payload);
-
-// Defined for googletest. Forwards to ModelTypeSetToString().
-void PrintTo(ModelTypeSet model_types, ::std::ostream* os);
-
-// A gmock matcher for ModelTypeSet. Use like:
-//
-// EXPECT_CALL(mock, ProcessModelTypes(HasModelTypes(expected_types)));
-::testing::Matcher<ModelTypeSet> HasModelTypes(ModelTypeSet expected_types);
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_PUBLIC_API_BASE_MODEL_TYPE_TEST_UTIL_H_
diff --git a/chromium/sync/internal_api/public/base/node_ordinal.cc b/chromium/sync/internal_api/public/base/node_ordinal.cc
deleted file mode 100644
index 1b5c42cd132..00000000000
--- a/chromium/sync/internal_api/public/base/node_ordinal.cc
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/base/node_ordinal.h"
-
-#include <algorithm>
-
-namespace syncer {
-
-NodeOrdinal Int64ToNodeOrdinal(int64 x) {
- uint64 y = static_cast<uint64>(x);
- y ^= 0x8000000000000000ULL;
- std::string bytes(NodeOrdinal::kMinLength, '\x00');
- if (y == 0) {
- // 0 is a special case since |bytes| must not be all zeros.
- bytes.push_back('\x80');
- } else {
- for (int i = 7; i >= 0; --i) {
- bytes[i] = static_cast<uint8>(y);
- y >>= 8;
- }
- }
- NodeOrdinal ordinal(bytes);
- DCHECK(ordinal.IsValid());
- return ordinal;
-}
-
-int64 NodeOrdinalToInt64(const NodeOrdinal& ordinal) {
- uint64 y = 0;
- const std::string& s = ordinal.ToInternalValue();
- size_t l = NodeOrdinal::kMinLength;
- if (s.length() < l) {
- NOTREACHED();
- l = s.length();
- }
- for (size_t i = 0; i < l; ++i) {
- const uint8 byte = s[l - i - 1];
- y |= static_cast<uint64>(byte) << (i * 8);
- }
- y ^= 0x8000000000000000ULL;
- // This is technically implementation-defined if y > INT64_MAX, so
- // we're assuming that we're on a twos-complement machine.
- return static_cast<int64>(y);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/base/node_ordinal.h b/chromium/sync/internal_api/public/base/node_ordinal.h
deleted file mode 100644
index bc9a04b1799..00000000000
--- a/chromium/sync/internal_api/public/base/node_ordinal.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_BASE_NODE_ORDINAL_H_
-#define SYNC_INTERNAL_API_PUBLIC_BASE_NODE_ORDINAL_H_
-
-#include "base/basictypes.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/ordinal.h"
-
-namespace syncer {
-
-// A NodeOrdinal is an Ordinal whose internal value comes from the
-// ordinal_in_parent field of SyncEntity (see sync.proto). It uses
-// the entire uint8 range for backwards compatibility with the old
-// int64-based positioning.
-
-struct NodeOrdinalTraits {
- static const uint8 kZeroDigit = 0;
- static const uint8 kMaxDigit = kuint8max;
- static const size_t kMinLength = 8;
-};
-
-typedef Ordinal<NodeOrdinalTraits> NodeOrdinal;
-
-COMPILE_ASSERT(static_cast<char>(NodeOrdinal::kZeroDigit) == '\x00',
- NodeOrdinalHasCorrectZeroDigit);
-COMPILE_ASSERT(static_cast<char>(NodeOrdinal::kOneDigit) == '\x01',
- NodeOrdinalHasCorrectOneDigit);
-COMPILE_ASSERT(static_cast<char>(NodeOrdinal::kMidDigit) == '\x80',
- NodeOrdinalHasCorrectMidDigit);
-COMPILE_ASSERT(static_cast<char>(NodeOrdinal::kMaxDigit) == '\xff',
- NodeOrdinalHasCorrectMaxDigit);
-COMPILE_ASSERT(NodeOrdinal::kMidDigitValue == 128,
- NodeOrdinalHasCorrectMidDigitValue);
-COMPILE_ASSERT(NodeOrdinal::kMaxDigitValue == 255,
- NodeOrdinalHasCorrectMaxDigitValue);
-COMPILE_ASSERT(NodeOrdinal::kRadix == 256,
- NodeOrdinalHasCorrectRadix);
-
-// Converts an int64 position (usually from the position_in_parent
-// field of SyncEntity) to a NodeOrdinal. This transformation
-// preserves the ordering relation: a < b under integer ordering if
-// and only if Int64ToNodeOrdinal(a) < Int64ToNodeOrdinal(b).
-SYNC_EXPORT_PRIVATE NodeOrdinal Int64ToNodeOrdinal(int64 x);
-
-// The inverse of Int64ToNodeOrdinal. This conversion is, in general,
-// lossy: NodeOrdinals can have arbitrary fidelity, while numeric
-// positions contain only 64 bits of information (in fact, this is the
-// reason we've moved away from them).
-SYNC_EXPORT_PRIVATE int64 NodeOrdinalToInt64(const NodeOrdinal& ordinal);
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_BASE_NODE_ORDINAL_H_
diff --git a/chromium/sync/internal_api/public/base/node_ordinal_unittest.cc b/chromium/sync/internal_api/public/base/node_ordinal_unittest.cc
deleted file mode 100644
index d951cb76b5b..00000000000
--- a/chromium/sync/internal_api/public/base/node_ordinal_unittest.cc
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "sync/internal_api/public/base/node_ordinal.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#include <algorithm>
-#include <cstddef>
-
-namespace syncer {
-
-namespace {
-
-const int64 kTestValues[] = {
- 0LL,
- 1LL, -1LL,
- 2LL, -2LL,
- 3LL, -3LL,
- 0x79LL, -0x79LL,
- 0x80LL, -0x80LL,
- 0x81LL, -0x81LL,
- 0xFELL, -0xFELL,
- 0xFFLL, -0xFFLL,
- 0x100LL, -0x100LL,
- 0x101LL, -0x101LL,
- 0xFA1AFELL, -0xFA1AFELL,
- 0xFFFFFFFELL, -0xFFFFFFFELL,
- 0xFFFFFFFFLL, -0xFFFFFFFFLL,
- 0x100000000LL, -0x100000000LL,
- 0x100000001LL, -0x100000001LL,
- 0xFFFFFFFFFFLL, -0xFFFFFFFFFFLL,
- 0x112358132134LL, -0x112358132134LL,
- 0xFEFFBEEFABC1234LL, -0xFEFFBEEFABC1234LL,
- kint64max,
- kint64min,
- kint64min + 1,
- kint64max - 1
-};
-
-const size_t kNumTestValues = arraysize(kTestValues);
-
-// Convert each test value to an ordinal. All ordinals should be
-// valid.
-TEST(NodeOrdinalTest, IsValid) {
- for (size_t i = 0; i < kNumTestValues; ++i) {
- const NodeOrdinal ordinal = Int64ToNodeOrdinal(kTestValues[i]);
- EXPECT_TRUE(ordinal.IsValid()) << "i = " << i;
- }
-}
-
-// Convert each test value to an ordinal. All ordinals should have
-// 8-byte strings, except for kint64min, which should have a 9-byte
-// string.
-TEST(NodeOrdinalTest, Size) {
- EXPECT_EQ(9U, Int64ToNodeOrdinal(kint64min).ToInternalValue().size());
-
- for (size_t i = 0; i < kNumTestValues; ++i) {
- if (kTestValues[i] == kint64min) {
- continue;
- }
- const NodeOrdinal ordinal = Int64ToNodeOrdinal(kTestValues[i]);
- EXPECT_EQ(8U, ordinal.ToInternalValue().size()) << "i = " << i;
- }
-}
-
-// Convert each test value to an ordinal and back. That resulting
-// value should be equal to the original value.
-TEST(NodeOrdinalTest, PositionToOrdinalToPosition) {
- for (size_t i = 0; i < kNumTestValues; ++i) {
- const int64 expected_value = kTestValues[i];
- const NodeOrdinal ordinal = Int64ToNodeOrdinal(expected_value);
- const int64 value = NodeOrdinalToInt64(ordinal);
- EXPECT_EQ(expected_value, value) << "i = " << i;
- }
-}
-
-template <typename T, typename LessThan = std::less<T> >
-class IndexedLessThan {
- public:
- IndexedLessThan(const T* values) : values_(values) {}
-
- bool operator()(int i1, int i2) {
- return less_than_(values_[i1], values_[i2]);
- }
-
- private:
- const T* values_;
- LessThan less_than_;
-};
-
-// Sort kTestValues by int64 value and then sort it by NodeOrdinal
-// value. kTestValues should not already be sorted (by either
-// comparator) and the two orderings should be the same.
-TEST(NodeOrdinalTest, ConsistentOrdering) {
- NodeOrdinal ordinals[kNumTestValues];
- std::vector<int> original_ordering(kNumTestValues);
- std::vector<int> int64_ordering(kNumTestValues);
- std::vector<int> ordinal_ordering(kNumTestValues);
- for (size_t i = 0; i < kNumTestValues; ++i) {
- ordinals[i] = Int64ToNodeOrdinal(kTestValues[i]);
- original_ordering[i] = int64_ordering[i] = ordinal_ordering[i] = i;
- }
-
- std::sort(int64_ordering.begin(), int64_ordering.end(),
- IndexedLessThan<int64>(kTestValues));
- std::sort(ordinal_ordering.begin(), ordinal_ordering.end(),
- IndexedLessThan<NodeOrdinal, NodeOrdinal::LessThanFn>(ordinals));
- EXPECT_NE(original_ordering, int64_ordering);
- EXPECT_EQ(int64_ordering, ordinal_ordering);
-}
-
-// Create two NodeOrdinals and create another one between them. It
-// should lie halfway between them.
-TEST(NodeOrdinalTest, CreateBetween) {
- const NodeOrdinal ordinal1("\1\1\1\1\1\1\1\1");
- const NodeOrdinal ordinal2("\1\1\1\1\1\1\1\3");
- EXPECT_EQ("\1\1\1\1\1\1\1\2",
- ordinal1.CreateBetween(ordinal2).ToInternalValue());
-}
-
-} // namespace
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/base/object_id_invalidation_map_test_util.cc b/chromium/sync/internal_api/public/base/object_id_invalidation_map_test_util.cc
deleted file mode 100644
index 777fc69f74b..00000000000
--- a/chromium/sync/internal_api/public/base/object_id_invalidation_map_test_util.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/base/object_id_invalidation_map_test_util.h"
-
-#include <algorithm>
-
-#include "base/basictypes.h"
-
-namespace syncer {
-
-using ::testing::MakeMatcher;
-using ::testing::MatchResultListener;
-using ::testing::Matcher;
-using ::testing::MatcherInterface;
-using ::testing::PrintToString;
-
-namespace {
-
-class ObjectIdInvalidationMapEqMatcher
- : public MatcherInterface<const ObjectIdInvalidationMap&> {
- public:
- explicit ObjectIdInvalidationMapEqMatcher(
- const ObjectIdInvalidationMap& expected);
-
- virtual bool MatchAndExplain(const ObjectIdInvalidationMap& lhs,
- MatchResultListener* listener) const;
- virtual void DescribeTo(::std::ostream* os) const;
- virtual void DescribeNegationTo(::std::ostream* os) const;
-
- private:
- const ObjectIdInvalidationMap expected_;
-
- DISALLOW_COPY_AND_ASSIGN(ObjectIdInvalidationMapEqMatcher);
-};
-
-ObjectIdInvalidationMapEqMatcher::ObjectIdInvalidationMapEqMatcher(
- const ObjectIdInvalidationMap& expected) : expected_(expected) {
-}
-
-namespace {
-
-struct InvalidationEqPredicate {
- InvalidationEqPredicate(const Invalidation& inv1)
- : inv1_(inv1) { }
-
- bool operator()(const Invalidation& inv2) {
- return inv1_.Equals(inv2);
- }
-
- const Invalidation& inv1_;
-};
-
-}
-
-bool ObjectIdInvalidationMapEqMatcher::MatchAndExplain(
- const ObjectIdInvalidationMap& actual,
- MatchResultListener* listener) const {
-
- std::vector<syncer::Invalidation> expected_invalidations;
- std::vector<syncer::Invalidation> actual_invalidations;
-
- expected_.GetAllInvalidations(&expected_invalidations);
- actual.GetAllInvalidations(&actual_invalidations);
-
- std::vector<syncer::Invalidation> expected_only;
- std::vector<syncer::Invalidation> actual_only;
-
- for (std::vector<syncer::Invalidation>::iterator it =
- expected_invalidations.begin();
- it != expected_invalidations.end(); ++it) {
- if (std::find_if(actual_invalidations.begin(),
- actual_invalidations.end(),
- InvalidationEqPredicate(*it))
- == actual_invalidations.end()) {
- expected_only.push_back(*it);
- }
- }
-
- for (std::vector<syncer::Invalidation>::iterator it =
- actual_invalidations.begin();
- it != actual_invalidations.end(); ++it) {
- if (std::find_if(expected_invalidations.begin(),
- expected_invalidations.end(),
- InvalidationEqPredicate(*it))
- == expected_invalidations.end()) {
- actual_only.push_back(*it);
- }
- }
-
- if (expected_only.empty() && actual_only.empty())
- return true;
-
- bool printed_header = false;
- if (!actual_only.empty()) {
- *listener << " which has these unexpected elements: "
- << PrintToString(actual_only);
- printed_header = true;
- }
-
- if (!expected_only.empty()) {
- *listener << (printed_header ? ",\nand" : "which")
- << " doesn't have these expected elements: "
- << PrintToString(expected_only);
- printed_header = true;
- }
-
- return false;
-}
-
-void ObjectIdInvalidationMapEqMatcher::DescribeTo(::std::ostream* os) const {
- *os << " is equal to " << PrintToString(expected_);
-}
-
-void ObjectIdInvalidationMapEqMatcher::DescribeNegationTo(
- ::std::ostream* os) const {
- *os << " isn't equal to " << PrintToString(expected_);
-}
-
-} // namespace
-
-Matcher<const ObjectIdInvalidationMap&> Eq(
- const ObjectIdInvalidationMap& expected) {
- return MakeMatcher(new ObjectIdInvalidationMapEqMatcher(expected));
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/base/object_id_invalidation_map_test_util.h b/chromium/sync/internal_api/public/base/object_id_invalidation_map_test_util.h
deleted file mode 100644
index 5d71979d1fd..00000000000
--- a/chromium/sync/internal_api/public/base/object_id_invalidation_map_test_util.h
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_BASE_OBJECT_ID_INVALIDATION_MAP_TEST_UTIL_H_
-#define SYNC_INTERNAL_API_PUBLIC_BASE_OBJECT_ID_INVALIDATION_MAP_TEST_UTIL_H_
-
-// Convince googletest to use the correct overload for PrintTo().
-#include "sync/internal_api/public/base/invalidation_test_util.h"
-#include "sync/notifier/object_id_invalidation_map.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace syncer {
-
-::testing::Matcher<const ObjectIdInvalidationMap&> Eq(
- const ObjectIdInvalidationMap& expected);
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_BASE_OBJECT_ID_INVALIDATION_MAP_TEST_UTIL_H_
diff --git a/chromium/sync/internal_api/public/base/ordinal.h b/chromium/sync/internal_api/public/base/ordinal.h
deleted file mode 100644
index cb67b4518b1..00000000000
--- a/chromium/sync/internal_api/public/base/ordinal.h
+++ /dev/null
@@ -1,486 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_BASE_ORDINAL_H_
-#define SYNC_INTERNAL_API_PUBLIC_BASE_ORDINAL_H_
-
-#include <algorithm>
-#include <cstddef>
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/json/string_escape.h"
-#include "base/logging.h"
-
-namespace syncer {
-
-// An Ordinal<T> is an object that can be used for ordering. The
-// Ordinal<T> class has an unbounded dense strict total order, which
-// mean for any Ordinal<T>s a, b and c:
-//
-// - a < b and b < c implies a < c (transitivity);
-// - exactly one of a < b, b < a and a = b holds (trichotomy);
-// - if a < b, there is a Ordinal<T> x such that a < x < b (density);
-// - there are Ordinals<T> x and y such that x < a < y (unboundedness).
-//
-// This means that when Ordinal<T> is used for sorting a list, if any
-// item changes its position in the list, only its Ordinal<T> value
-// has to change to represent the new order, and all the other values
-// can stay the same.
-//
-// An Ordinal<T> is internally represented as an array of bytes, so it
-// can be serialized to and deserialized from disk.
-//
-// The Traits class should look like the following:
-//
-// // Don't forget to #include "base/basictypes.h".
-// struct MyOrdinalTraits {
-// // There must be at least two distinct values greater than kZeroDigit
-// // and less than kMaxDigit.
-// static const uint8 kZeroDigit = '0';
-// static const uint8 kMaxDigit = '9';
-// // kMinLength must be positive.
-// static const size_t kMinLength = 1;
-// };
-//
-// An Ordinal<T> is valid iff its corresponding string has at least
-// kMinLength characters, does not contain any characters less than
-// kZeroDigit or greater than kMaxDigit, is not all zero digits, and
-// does not have any unnecessary trailing zero digits.
-//
-// Note that even if the native char type is signed, strings still
-// compare as if their they are unsigned. (This is explicitly in
-// C++11 but not in C++98, even though all implementations do so
-// anyway in practice.) Thus, it is safe to use any byte range for
-// Ordinal<T>s.
-template <typename Traits>
-class Ordinal {
- public:
- // Functors for use with STL algorithms and containers.
- class LessThanFn {
- public:
- LessThanFn();
-
- bool operator()(const Ordinal<Traits>& lhs,
- const Ordinal<Traits>& rhs) const;
- };
-
- class EqualsFn {
- public:
- EqualsFn();
-
- bool operator()(const Ordinal<Traits>& lhs,
- const Ordinal<Traits>& rhs) const;
- };
-
- // Creates an Ordinal from the given string of bytes. The Ordinal
- // may be valid or invalid.
- explicit Ordinal(const std::string& bytes);
-
- // Creates an invalid Ordinal.
- Ordinal();
-
- // Creates a valid initial Ordinal. This is called to create the first
- // element of Ordinal list (i.e. before we have any other values we can
- // generate from).
- static Ordinal CreateInitialOrdinal();
-
- // Returns true iff this Ordinal is valid. This takes constant
- // time.
- bool IsValid() const;
-
- // Returns true iff |*this| == |other| or |*this| and |other|
- // are both invalid.
- bool EqualsOrBothInvalid(const Ordinal& other) const;
-
- // Returns a printable string representation of the Ordinal suitable
- // for logging.
- std::string ToDebugString() const;
-
- // All remaining functions can only be called if IsValid() holds.
- // It is an error to call them if IsValid() is false.
-
- // Order-related functions.
-
- // Returns true iff |*this| < |other|.
- bool LessThan(const Ordinal& other) const;
-
- // Returns true iff |*this| > |other|.
- bool GreaterThan(const Ordinal& other) const;
-
- // Returns true iff |*this| == |other| (i.e. |*this| < |other| and
- // |other| < |*this| are both false).
- bool Equals(const Ordinal& other) const;
-
- // Given |*this| != |other|, returns a Ordinal x such that
- // min(|*this|, |other|) < x < max(|*this|, |other|). It is an error
- // to call this function when |*this| == |other|.
- Ordinal CreateBetween(const Ordinal& other) const;
-
- // Returns a Ordinal |x| such that |x| < |*this|.
- Ordinal CreateBefore() const;
-
- // Returns a Ordinal |x| such that |*this| < |x|.
- Ordinal CreateAfter() const;
-
- // Returns the string of bytes representing the Ordinal. It is
- // guaranteed that an Ordinal constructed from the returned string
- // will be valid.
- std::string ToInternalValue() const;
-
- // Use of copy constructor and default assignment for this class is allowed.
-
- // Constants for Ordinal digits.
- static const uint8 kZeroDigit = Traits::kZeroDigit;
- static const uint8 kMaxDigit = Traits::kMaxDigit;
- static const size_t kMinLength = Traits::kMinLength;
- static const uint8 kOneDigit = kZeroDigit + 1;
- static const uint8 kMidDigit = kOneDigit + (kMaxDigit - kOneDigit) / 2;
- static const unsigned int kMidDigitValue = kMidDigit - kZeroDigit;
- static const unsigned int kMaxDigitValue = kMaxDigit - kZeroDigit;
- static const unsigned int kRadix = kMaxDigitValue + 1;
-
- COMPILE_ASSERT(kOneDigit > kZeroDigit, OrdinalOneDigitGreaterThanMinDigit);
- COMPILE_ASSERT(kMidDigit > kOneDigit, OrdinalMidDigitGreaterThanOneDigit);
- COMPILE_ASSERT(kMaxDigit > kMidDigit, OrdinalMaxDigitGreaterThanMidDigit);
- COMPILE_ASSERT(kMinLength > 0, OrdinalMinLengthIsPositive);
- COMPILE_ASSERT(kMidDigitValue > 1, OrdinalMidDigitValueGreaterThanOne);
- COMPILE_ASSERT(kMaxDigitValue > kMidDigitValue,
- OrdinalMaxDigitValueGreaterThanMidDigitValue);
- COMPILE_ASSERT(kRadix == kMaxDigitValue + 1,
- OrdinalRadixIsMaxDigitValuePlusOne);
-
- private:
- // Returns true iff the given byte string satisfies the criteria for
- // a valid Ordinal.
- static bool IsValidOrdinalBytes(const std::string& bytes);
-
- // Returns the length that bytes.substr(0, length) would be with
- // trailing zero digits removed.
- static size_t GetLengthWithoutTrailingZeroDigits(
- const std::string& bytes,
- size_t length);
-
- // Returns the digit at position i, padding with zero digits if
- // required.
- static uint8 GetDigit(const std::string& bytes, size_t i);
-
- // Returns the digit value at position i, padding with 0 if required.
- static int GetDigitValue(const std::string& bytes, size_t i);
-
- // Adds the given value to |bytes| at position i, carrying when
- // necessary. Returns the left-most carry.
- static int AddDigitValue(std::string* bytes, size_t i, int digit_value);
-
- // Returns the proper length |bytes| should be resized to, i.e. the
- // smallest length such that |bytes| is still greater than
- // |lower_bound| and is still valid. |bytes| should be greater than
- // |lower_bound|.
- static size_t GetProperLength(const std::string& lower_bound,
- const std::string& bytes);
-
- // Compute the midpoint ordinal byte string that is between |start|
- // and |end|.
- static std::string ComputeMidpoint(const std::string& start,
- const std::string& end);
-
- // Create a Ordinal that is lexigraphically greater than |start| and
- // lexigraphically less than |end|. The returned Ordinal will be roughly
- // between |start| and |end|.
- static Ordinal<Traits> CreateOrdinalBetween(const Ordinal<Traits>& start,
- const Ordinal<Traits>& end);
-
- // The internal byte string representation of the Ordinal. Never
- // changes after construction except for assignment.
- std::string bytes_;
-
- // A cache of the result of IsValidOrdinalBytes(bytes_).
- bool is_valid_;
-};
-
-template <typename Traits> const uint8 Ordinal<Traits>::kZeroDigit;
-template <typename Traits> const uint8 Ordinal<Traits>::kMaxDigit;
-template <typename Traits> const size_t Ordinal<Traits>::kMinLength;
-template <typename Traits> const uint8 Ordinal<Traits>::kOneDigit;
-template <typename Traits> const uint8 Ordinal<Traits>::kMidDigit;
-template <typename Traits> const unsigned int Ordinal<Traits>::kMidDigitValue;
-template <typename Traits> const unsigned int Ordinal<Traits>::kMaxDigitValue;
-template <typename Traits> const unsigned int Ordinal<Traits>::kRadix;
-
-template <typename Traits>
-Ordinal<Traits>::LessThanFn::LessThanFn() {}
-
-template <typename Traits>
-bool Ordinal<Traits>::LessThanFn::operator()(const Ordinal<Traits>& lhs,
- const Ordinal<Traits>& rhs) const {
- return lhs.LessThan(rhs);
-}
-
-template <typename Traits>
-Ordinal<Traits>::EqualsFn::EqualsFn() {}
-
-template <typename Traits>
-bool Ordinal<Traits>::EqualsFn::operator()(const Ordinal<Traits>& lhs,
- const Ordinal<Traits>& rhs) const {
- return lhs.Equals(rhs);
-}
-
-template <typename Traits>
-Ordinal<Traits>::Ordinal(const std::string& bytes)
- : bytes_(bytes),
- is_valid_(IsValidOrdinalBytes(bytes_)) {}
-
-template <typename Traits>
-Ordinal<Traits>::Ordinal() : is_valid_(false) {}
-
-template <typename Traits>
-Ordinal<Traits> Ordinal<Traits>::CreateInitialOrdinal() {
- std::string bytes(Traits::kMinLength, kZeroDigit);
- bytes[0] = kMidDigit;
- return Ordinal(bytes);
-}
-
-template <typename Traits>
-bool Ordinal<Traits>::IsValid() const {
- DCHECK_EQ(IsValidOrdinalBytes(bytes_), is_valid_);
- return is_valid_;
-}
-
-template <typename Traits>
-bool Ordinal<Traits>::EqualsOrBothInvalid(const Ordinal& other) const {
- if (!IsValid() && !other.IsValid())
- return true;
-
- if (!IsValid() || !other.IsValid())
- return false;
-
- return Equals(other);
-}
-
-template <typename Traits>
-std::string Ordinal<Traits>::ToDebugString() const {
- std::string debug_string =
- base::EscapeBytesAsInvalidJSONString(bytes_, false /* put_in_quotes */);
- if (!is_valid_) {
- debug_string = "INVALID[" + debug_string + "]";
- }
- return debug_string;
-}
-
-template <typename Traits>
-bool Ordinal<Traits>::LessThan(const Ordinal& other) const {
- CHECK(IsValid());
- CHECK(other.IsValid());
- return bytes_ < other.bytes_;
-}
-
-template <typename Traits>
-bool Ordinal<Traits>::GreaterThan(const Ordinal& other) const {
- CHECK(IsValid());
- CHECK(other.IsValid());
- return bytes_ > other.bytes_;
-}
-
-template <typename Traits>
-bool Ordinal<Traits>::Equals(const Ordinal& other) const {
- CHECK(IsValid());
- CHECK(other.IsValid());
- return bytes_ == other.bytes_;
-}
-
-template <typename Traits>
-Ordinal<Traits> Ordinal<Traits>::CreateBetween(const Ordinal& other) const {
- CHECK(IsValid());
- CHECK(other.IsValid());
- CHECK(!Equals(other));
-
- if (LessThan(other)) {
- return CreateOrdinalBetween(*this, other);
- } else {
- return CreateOrdinalBetween(other, *this);
- }
-}
-
-template <typename Traits>
-Ordinal<Traits> Ordinal<Traits>::CreateBefore() const {
- CHECK(IsValid());
- // Create the smallest valid Ordinal of the appropriate length
- // to be the minimum boundary.
- const size_t length = bytes_.length();
- std::string start(length, kZeroDigit);
- start[length - 1] = kOneDigit;
- if (start == bytes_) {
- start[length - 1] = kZeroDigit;
- start += kOneDigit;
- }
-
- // Even though |start| is already a valid Ordinal that is less
- // than |*this|, we don't return it because we wouldn't have much space in
- // front of it to insert potential future values.
- return CreateBetween(Ordinal(start));
-}
-
-template <typename Traits>
-Ordinal<Traits> Ordinal<Traits>::CreateAfter() const {
- CHECK(IsValid());
- // Create the largest valid Ordinal of the appropriate length to be
- // the maximum boundary.
- std::string end(bytes_.length(), kMaxDigit);
- if (end == bytes_)
- end += kMaxDigit;
-
- // Even though |end| is already a valid Ordinal that is greater than
- // |*this|, we don't return it because we wouldn't have much space after
- // it to insert potential future values.
- return CreateBetween(Ordinal(end));
-}
-
-template <typename Traits>
-std::string Ordinal<Traits>::ToInternalValue() const {
- CHECK(IsValid());
- return bytes_;
-}
-
-template <typename Traits>
-bool Ordinal<Traits>::IsValidOrdinalBytes(const std::string& bytes) {
- const size_t length = bytes.length();
- if (length < kMinLength)
- return false;
-
- bool found_non_zero = false;
- for (size_t i = 0; i < length; ++i) {
- const uint8 byte = bytes[i];
- if (byte < kZeroDigit || byte > kMaxDigit)
- return false;
- if (byte > kZeroDigit)
- found_non_zero = true;
- }
- if (!found_non_zero)
- return false;
-
- if (length > kMinLength) {
- const uint8 last_byte = bytes[length - 1];
- if (last_byte == kZeroDigit)
- return false;
- }
-
- return true;
-}
-
-template <typename Traits>
-size_t Ordinal<Traits>::GetLengthWithoutTrailingZeroDigits(
- const std::string& bytes, size_t length) {
- DCHECK(!bytes.empty());
- DCHECK_GT(length, 0U);
-
- size_t end_position =
- bytes.find_last_not_of(static_cast<char>(kZeroDigit), length - 1);
-
- // If no non kZeroDigit is found then the string is a string of all zeros
- // digits so we return 0 as the correct length.
- if (end_position == std::string::npos)
- return 0;
-
- return end_position + 1;
-}
-
-template <typename Traits>
-uint8 Ordinal<Traits>::GetDigit(const std::string& bytes, size_t i) {
- return (i < bytes.length()) ? bytes[i] : kZeroDigit;
-}
-
-template <typename Traits>
-int Ordinal<Traits>::GetDigitValue(const std::string& bytes, size_t i) {
- return GetDigit(bytes, i) - kZeroDigit;
-}
-
-template <typename Traits>
-int Ordinal<Traits>::AddDigitValue(std::string* bytes,
- size_t i, int digit_value) {
- DCHECK_GE(i, 0U);
- DCHECK_LT(i, bytes->length());
-
- for (int j = static_cast<int>(i); j >= 0 && digit_value > 0; --j) {
- int byte_j_value = GetDigitValue(*bytes, j) + digit_value;
- digit_value = byte_j_value / kRadix;
- DCHECK_LE(digit_value, 1);
- byte_j_value %= kRadix;
- (*bytes)[j] = static_cast<char>(kZeroDigit + byte_j_value);
- }
- return digit_value;
-}
-
-template <typename Traits>
-size_t Ordinal<Traits>::GetProperLength(const std::string& lower_bound,
- const std::string& bytes) {
- CHECK_GT(bytes, lower_bound);
-
- size_t drop_length =
- GetLengthWithoutTrailingZeroDigits(bytes, bytes.length());
- // See if the |ordinal| can be truncated after its last non-zero
- // digit without affecting the ordering.
- if (drop_length > kMinLength) {
- size_t truncated_length =
- GetLengthWithoutTrailingZeroDigits(bytes, drop_length - 1);
-
- if (truncated_length > 0 &&
- bytes.compare(0, truncated_length, lower_bound) > 0)
- drop_length = truncated_length;
- }
- return std::max(drop_length, kMinLength);
-}
-
-template <typename Traits>
-std::string Ordinal<Traits>::ComputeMidpoint(
- const std::string& start,
- const std::string& end) {
- size_t max_size = std::max(start.length(), end.length()) + 1;
- std::string midpoint(max_size, kZeroDigit);
-
- // Perform the operation (start + end) / 2 left-to-right by
- // maintaining a "forward carry" which is either 0 or
- // kMidDigitValue. AddDigitValue() is in general O(n), but this
- // operation is still O(n) despite that; calls to AddDigitValue()
- // will overflow at most to the last position where AddDigitValue()
- // last overflowed.
- int forward_carry = 0;
- for (size_t i = 0; i < max_size; ++i) {
- const int sum_value = GetDigitValue(start, i) + GetDigitValue(end, i);
- const int digit_value = sum_value / 2 + forward_carry;
- // AddDigitValue returning a non-zero carry would imply that
- // midpoint[0] >= kMaxDigit, which one can show is impossible.
- CHECK_EQ(AddDigitValue(&midpoint, i, digit_value), 0);
- forward_carry = (sum_value % 2 == 1) ? kMidDigitValue : 0;
- }
- DCHECK_EQ(forward_carry, 0);
-
- return midpoint;
-}
-
-template <typename Traits>
-Ordinal<Traits> Ordinal<Traits>::CreateOrdinalBetween(
- const Ordinal<Traits>& start,
- const Ordinal<Traits>& end) {
- CHECK(start.IsValid());
- CHECK(end.IsValid());
- CHECK(start.LessThan(end));
- const std::string& start_bytes = start.ToInternalValue();
- const std::string& end_bytes = end.ToInternalValue();
- DCHECK_LT(start_bytes, end_bytes);
-
- std::string midpoint = ComputeMidpoint(start_bytes, end_bytes);
- const size_t proper_length = GetProperLength(start_bytes, midpoint);
- midpoint.resize(proper_length, kZeroDigit);
-
- DCHECK_GT(midpoint, start_bytes);
- DCHECK_LT(midpoint, end_bytes);
-
- Ordinal<Traits> midpoint_ordinal(midpoint);
- DCHECK(midpoint_ordinal.IsValid());
- return midpoint_ordinal;
-}
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_BASE_ORDINAL_H_
diff --git a/chromium/sync/internal_api/public/base/ordinal_unittest.cc b/chromium/sync/internal_api/public/base/ordinal_unittest.cc
deleted file mode 100644
index 8c77d6d6582..00000000000
--- a/chromium/sync/internal_api/public/base/ordinal_unittest.cc
+++ /dev/null
@@ -1,376 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "sync/internal_api/public/base/ordinal.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#include <algorithm>
-#include <cctype>
-#include <cstddef>
-#include <string>
-#include <vector>
-
-namespace syncer {
-
-namespace {
-
-struct TestOrdinalTraits {
- static const uint8 kZeroDigit = '0';
- static const uint8 kMaxDigit = '3';
- static const size_t kMinLength = 1;
-};
-
-struct LongOrdinalTraits {
- static const uint8 kZeroDigit = '0';
- static const uint8 kMaxDigit = '9';
- static const size_t kMinLength = 5;
-};
-
-struct LargeOrdinalTraits {
- static const uint8 kZeroDigit = 0;
- static const uint8 kMaxDigit = kuint8max;
- static const size_t kMinLength = 1;
-};
-
-typedef Ordinal<TestOrdinalTraits> TestOrdinal;
-typedef Ordinal<LongOrdinalTraits> LongOrdinal;
-typedef Ordinal<LargeOrdinalTraits> LargeOrdinal;
-
-COMPILE_ASSERT(TestOrdinal::kZeroDigit == '0',
- TestOrdinalHasCorrectZeroDigit);
-COMPILE_ASSERT(TestOrdinal::kOneDigit == '1',
- TestOrdinalHasCorrectOneDigit);
-COMPILE_ASSERT(TestOrdinal::kMidDigit == '2',
- TestOrdinalHasCorrectMidDigit);
-COMPILE_ASSERT(TestOrdinal::kMaxDigit == '3',
- TestOrdinalHasCorrectMaxDigit);
-COMPILE_ASSERT(TestOrdinal::kMidDigitValue == 2,
- TestOrdinalHasCorrectMidDigitValue);
-COMPILE_ASSERT(TestOrdinal::kMaxDigitValue == 3,
- TestOrdinalHasCorrectMaxDigitValue);
-COMPILE_ASSERT(TestOrdinal::kRadix == 4,
- TestOrdinalHasCorrectRadix);
-
-COMPILE_ASSERT(LongOrdinal::kZeroDigit == '0',
- LongOrdinalkZeroDigit_incorrect);
-COMPILE_ASSERT(LongOrdinal::kOneDigit == '1',
- LongOrdinalkOneDigit_incorrect);
-COMPILE_ASSERT(LongOrdinal::kMidDigit == '5',
- LongOrdinalkMidDigit_incorrect);
-COMPILE_ASSERT(LongOrdinal::kMaxDigit == '9',
- LongOrdinalkMaxDigit_incorrect);
-COMPILE_ASSERT(LongOrdinal::kMidDigitValue == 5,
- LongOrdinalkMidDigitValue_incorrect);
-COMPILE_ASSERT(LongOrdinal::kMaxDigitValue == 9,
- LongOrdinalkMaxDigitValue_incorrect);
-COMPILE_ASSERT(LongOrdinal::kRadix == 10,
- LongOrdinalkRadix_incorrect);
-
-COMPILE_ASSERT(static_cast<char>(LargeOrdinal::kZeroDigit) == '\x00',
- LargeOrdinalkZeroDigit_incorrect);
-COMPILE_ASSERT(static_cast<char>(LargeOrdinal::kOneDigit) == '\x01',
- LargeOrdinalkOneDigit_incorrect);
-COMPILE_ASSERT(static_cast<char>(LargeOrdinal::kMidDigit) == '\x80',
- LargeOrdinalkMidDigit_incorrect);
-COMPILE_ASSERT(static_cast<char>(LargeOrdinal::kMaxDigit) == '\xff',
- LargeOrdinalkMaxDigit_incorrect);
-COMPILE_ASSERT(LargeOrdinal::kMidDigitValue == 128,
- LargeOrdinalkMidDigitValue_incorrect);
-COMPILE_ASSERT(LargeOrdinal::kMaxDigitValue == 255,
- LargeOrdinalkMaxDigitValue_incorrect);
-COMPILE_ASSERT(LargeOrdinal::kRadix == 256,
- LargeOrdinalkRadix_incorrect);
-
-// Create Ordinals that satisfy all but one criterion for validity.
-// IsValid() should return false for all of them.
-TEST(Ordinal, Invalid) {
- // Length criterion.
- EXPECT_FALSE(TestOrdinal(std::string()).IsValid());
- EXPECT_FALSE(LongOrdinal("0001").IsValid());
-
- const char kBeforeZero[] = { '0' - 1, '\0' };
- const char kAfterNine[] = { '9' + 1, '\0' };
-
- // Character criterion.
- EXPECT_FALSE(TestOrdinal(kBeforeZero).IsValid());
- EXPECT_FALSE(TestOrdinal("4").IsValid());
- EXPECT_FALSE(LongOrdinal(std::string("0000") + kBeforeZero).IsValid());
- EXPECT_FALSE(LongOrdinal(std::string("0000") + kAfterNine).IsValid());
-
- // Zero criterion.
- EXPECT_FALSE(TestOrdinal("0").IsValid());
- EXPECT_FALSE(TestOrdinal("00000").IsValid());
-
- // Trailing zero criterion.
- EXPECT_FALSE(TestOrdinal("10").IsValid());
- EXPECT_FALSE(TestOrdinal("111110").IsValid());
-}
-
-// Create Ordinals that satisfy all criteria for validity.
-// IsValid() should return true for all of them.
-TEST(Ordinal, Valid) {
- // Length criterion.
- EXPECT_TRUE(TestOrdinal("1").IsValid());
- EXPECT_TRUE(LongOrdinal("10000").IsValid());
-}
-
-// Create Ordinals from CreateInitialOrdinal. They should be valid
-// and close to the middle of the range.
-TEST(Ordinal, CreateInitialOrdinal) {
- const TestOrdinal& ordinal1 = TestOrdinal::CreateInitialOrdinal();
- const LongOrdinal& ordinal2 = LongOrdinal::CreateInitialOrdinal();
- ASSERT_TRUE(ordinal1.IsValid());
- ASSERT_TRUE(ordinal2.IsValid());
- EXPECT_TRUE(ordinal1.Equals(TestOrdinal("2")));
- EXPECT_TRUE(ordinal2.Equals(LongOrdinal("50000")));
-}
-
-// Create an invalid and a valid Ordinal. EqualsOrBothInvalid should
-// return true if called reflexively and false otherwise.
-TEST(Ordinal, EqualsOrBothInvalid) {
- const TestOrdinal& valid_ordinal = TestOrdinal::CreateInitialOrdinal();
- const TestOrdinal invalid_ordinal;
-
- EXPECT_TRUE(valid_ordinal.EqualsOrBothInvalid(valid_ordinal));
- EXPECT_TRUE(invalid_ordinal.EqualsOrBothInvalid(invalid_ordinal));
- EXPECT_FALSE(invalid_ordinal.EqualsOrBothInvalid(valid_ordinal));
- EXPECT_FALSE(valid_ordinal.EqualsOrBothInvalid(invalid_ordinal));
-}
-
-// Create three Ordinals in order. LessThan should return values
-// consistent with that order.
-TEST(Ordinal, LessThan) {
- const TestOrdinal small_ordinal("1");
- const TestOrdinal middle_ordinal("2");
- const TestOrdinal big_ordinal("3");
-
- EXPECT_FALSE(small_ordinal.LessThan(small_ordinal));
- EXPECT_TRUE(small_ordinal.LessThan(middle_ordinal));
- EXPECT_TRUE(small_ordinal.LessThan(big_ordinal));
-
- EXPECT_FALSE(middle_ordinal.LessThan(small_ordinal));
- EXPECT_FALSE(middle_ordinal.LessThan(middle_ordinal));
- EXPECT_TRUE(middle_ordinal.LessThan(big_ordinal));
-
- EXPECT_FALSE(big_ordinal.LessThan(small_ordinal));
- EXPECT_FALSE(big_ordinal.LessThan(middle_ordinal));
- EXPECT_FALSE(big_ordinal.LessThan(big_ordinal));
-}
-
-// Create two single-digit ordinals with byte values 0 and 255. The
-// former should compare as less than the latter, even though the
-// native char type may be signed.
-TEST(Ordinal, LessThanLarge) {
- const LargeOrdinal small_ordinal("\x01");
- const LargeOrdinal big_ordinal("\xff");
-
- EXPECT_TRUE(small_ordinal.LessThan(big_ordinal));
-}
-
-// Create three Ordinals in order. GreaterThan should return values
-// consistent with that order.
-TEST(Ordinal, GreaterThan) {
- const LongOrdinal small_ordinal("10000");
- const LongOrdinal middle_ordinal("55555");
- const LongOrdinal big_ordinal("99999");
-
- EXPECT_FALSE(small_ordinal.GreaterThan(small_ordinal));
- EXPECT_FALSE(small_ordinal.GreaterThan(middle_ordinal));
- EXPECT_FALSE(small_ordinal.GreaterThan(big_ordinal));
-
- EXPECT_TRUE(middle_ordinal.GreaterThan(small_ordinal));
- EXPECT_FALSE(middle_ordinal.GreaterThan(middle_ordinal));
- EXPECT_FALSE(middle_ordinal.GreaterThan(big_ordinal));
-
- EXPECT_TRUE(big_ordinal.GreaterThan(small_ordinal));
- EXPECT_TRUE(big_ordinal.GreaterThan(middle_ordinal));
- EXPECT_FALSE(big_ordinal.GreaterThan(big_ordinal));
-}
-
-// Create two valid Ordinals. Equals should return true only when
-// called reflexively.
-TEST(Ordinal, Equals) {
- const TestOrdinal ordinal1("1");
- const TestOrdinal ordinal2("2");
-
- EXPECT_TRUE(ordinal1.Equals(ordinal1));
- EXPECT_FALSE(ordinal1.Equals(ordinal2));
-
- EXPECT_FALSE(ordinal2.Equals(ordinal1));
- EXPECT_TRUE(ordinal2.Equals(ordinal2));
-}
-
-// Create some valid ordinals from some byte strings.
-// ToInternalValue() should return the original byte string.
-TEST(OrdinalTest, ToInternalValue) {
- EXPECT_EQ("2", TestOrdinal("2").ToInternalValue());
- EXPECT_EQ("12345", LongOrdinal("12345").ToInternalValue());
- EXPECT_EQ("\1\2\3\4\5", LargeOrdinal("\1\2\3\4\5").ToInternalValue());
-}
-
-bool IsNonEmptyPrintableString(const std::string& str) {
- if (str.empty())
- return false;
- for (size_t i = 0; i < str.length(); ++i) {
- if (!isprint(str[i]))
- return false;
- }
- return true;
-}
-
-// Create some invalid/valid ordinals. ToDebugString() should always
-// return a non-empty printable string.
-TEST(OrdinalTest, ToDebugString) {
- EXPECT_TRUE(
- IsNonEmptyPrintableString(TestOrdinal().ToDebugString()));
- EXPECT_TRUE(
- IsNonEmptyPrintableString(TestOrdinal("invalid string").ToDebugString()));
- EXPECT_TRUE(
- IsNonEmptyPrintableString(TestOrdinal("2").ToDebugString()));
- EXPECT_TRUE(
- IsNonEmptyPrintableString(LongOrdinal("12345").ToDebugString()));
- EXPECT_TRUE(
- IsNonEmptyPrintableString(LargeOrdinal("\1\2\3\4\5").ToDebugString()));
-}
-
-// Create three Ordinals in order. LessThanFn should return values
-// consistent with that order.
-TEST(Ordinal, LessThanFn) {
- const TestOrdinal small_ordinal("1");
- const TestOrdinal middle_ordinal("2");
- const TestOrdinal big_ordinal("3");
-
- const TestOrdinal::LessThanFn less_than;
-
- EXPECT_FALSE(less_than(small_ordinal, small_ordinal));
- EXPECT_TRUE(less_than(small_ordinal, middle_ordinal));
- EXPECT_TRUE(less_than(small_ordinal, big_ordinal));
-
- EXPECT_FALSE(less_than(middle_ordinal, small_ordinal));
- EXPECT_FALSE(less_than(middle_ordinal, middle_ordinal));
- EXPECT_TRUE(less_than(middle_ordinal, big_ordinal));
-
- EXPECT_FALSE(less_than(big_ordinal, small_ordinal));
- EXPECT_FALSE(less_than(big_ordinal, middle_ordinal));
- EXPECT_FALSE(less_than(big_ordinal, big_ordinal));
-}
-
-template <typename Traits>
-std::string GetBetween(const std::string& ordinal_string1,
- const std::string& ordinal_string2) {
- const Ordinal<Traits> ordinal1(ordinal_string1);
- const Ordinal<Traits> ordinal2(ordinal_string2);
- const Ordinal<Traits> between1 = ordinal1.CreateBetween(ordinal2);
- const Ordinal<Traits> between2 = ordinal2.CreateBetween(ordinal1);
- EXPECT_TRUE(between1.Equals(between2));
- return between1.ToInternalValue();
-}
-
-// Create some Ordinals from single-digit strings. Given two strings
-// from this set, CreateBetween should return an Ordinal roughly between
-// them that are also single-digit when possible.
-TEST(Ordinal, CreateBetweenSingleDigit) {
- EXPECT_EQ("2", GetBetween<TestOrdinal>("1", "3"));
- EXPECT_EQ("12", GetBetween<TestOrdinal>("1", "2"));
- EXPECT_EQ("22", GetBetween<TestOrdinal>("2", "3"));
-}
-
-// Create some Ordinals from strings of various lengths. Given two
-// strings from this set, CreateBetween should return an Ordinal roughly
-// between them that have as few digits as possible.
-TEST(Ordinal, CreateBetweenDifferentLengths) {
- EXPECT_EQ("102", GetBetween<TestOrdinal>("1", "11"));
- EXPECT_EQ("2", GetBetween<TestOrdinal>("1", "31"));
- EXPECT_EQ("132", GetBetween<TestOrdinal>("13", "2"));
- EXPECT_EQ("2", GetBetween<TestOrdinal>("10001", "3"));
- EXPECT_EQ("20000", GetBetween<LongOrdinal>("10001", "30000"));
- EXPECT_EQ("2", GetBetween<TestOrdinal>("10002", "3"));
- EXPECT_EQ("20001", GetBetween<LongOrdinal>("10002", "30000"));
- EXPECT_EQ("2", GetBetween<TestOrdinal>("1", "30002"));
- EXPECT_EQ("20001", GetBetween<LongOrdinal>("10000", "30002"));
-}
-
-// Create some Ordinals specifically designed to trigger overflow
-// cases. Given two strings from this set, CreateBetween should
-// return an Ordinal roughly between them that have as few digits as
-// possible.
-TEST(Ordinal, CreateBetweenOverflow) {
- EXPECT_EQ("03", GetBetween<TestOrdinal>("01", "11"));
- EXPECT_EQ("13", GetBetween<TestOrdinal>("11", "21"));
- EXPECT_EQ("113", GetBetween<TestOrdinal>("111", "121"));
- EXPECT_EQ("2", GetBetween<TestOrdinal>("001", "333"));
- EXPECT_EQ("31", GetBetween<TestOrdinal>("222", "333"));
- EXPECT_EQ("3", GetBetween<TestOrdinal>("201", "333"));
- EXPECT_EQ("2", GetBetween<TestOrdinal>("003", "333"));
- EXPECT_EQ("2", GetBetween<TestOrdinal>("2223", "1113"));
-}
-
-// Create some Ordinals specifically designed to trigger digit
-// overflow cases. Given two strings from this set, CreateBetween
-// should return an Ordinal roughly between them that have as few digits
-// as possible.
-TEST(Ordinal, CreateBetweenOverflowLarge) {
- EXPECT_EQ("\x80", GetBetween<LargeOrdinal>("\x01\xff", "\xff\xff"));
- EXPECT_EQ("\xff\xfe\x80", GetBetween<LargeOrdinal>("\xff\xfe", "\xff\xff"));
-}
-
-// Create some Ordinals. CreateBefore should return an Ordinal
-// roughly halfway towards 0.
-TEST(Ordinal, CreateBefore) {
- EXPECT_EQ("02", TestOrdinal("1").CreateBefore().ToInternalValue());
- EXPECT_EQ("03", TestOrdinal("11").CreateBefore().ToInternalValue());
- EXPECT_EQ("03", TestOrdinal("12").CreateBefore().ToInternalValue());
- EXPECT_EQ("1", TestOrdinal("13").CreateBefore().ToInternalValue());
-}
-
-// Create some Ordinals. CreateAfter should return an Ordinal
-// roughly halfway towards 0.
-TEST(Ordinal, CreateAfter) {
- EXPECT_EQ("31", TestOrdinal("3").CreateAfter().ToInternalValue());
- EXPECT_EQ("322", TestOrdinal("32").CreateAfter().ToInternalValue());
- EXPECT_EQ("33322", TestOrdinal("3332").CreateAfter().ToInternalValue());
- EXPECT_EQ("3", TestOrdinal("22").CreateAfter().ToInternalValue());
- EXPECT_EQ("3", TestOrdinal("23").CreateAfter().ToInternalValue());
-}
-
-// Create two valid Ordinals. EqualsFn should return true only when
-// called reflexively.
-TEST(Ordinal, EqualsFn) {
- const TestOrdinal ordinal1("1");
- const TestOrdinal ordinal2("2");
-
- const TestOrdinal::EqualsFn equals;
-
- EXPECT_TRUE(equals(ordinal1, ordinal1));
- EXPECT_FALSE(equals(ordinal1, ordinal2));
-
- EXPECT_FALSE(equals(ordinal2, ordinal1));
- EXPECT_TRUE(equals(ordinal2,ordinal2));
-}
-
-// Create some Ordinals and shuffle them. Sorting them using
-// LessThanFn should produce the correct order.
-TEST(Ordinal, Sort) {
- const LongOrdinal ordinal1("12345");
- const LongOrdinal ordinal2("54321");
- const LongOrdinal ordinal3("87654");
- const LongOrdinal ordinal4("98765");
-
- std::vector<LongOrdinal> sorted_ordinals;
- sorted_ordinals.push_back(ordinal1);
- sorted_ordinals.push_back(ordinal2);
- sorted_ordinals.push_back(ordinal3);
- sorted_ordinals.push_back(ordinal4);
-
- std::vector<LongOrdinal> ordinals = sorted_ordinals;
- std::random_shuffle(ordinals.begin(), ordinals.end());
- std::sort(ordinals.begin(), ordinals.end(), LongOrdinal::LessThanFn());
- EXPECT_TRUE(std::equal(ordinals.begin(), ordinals.end(),
- sorted_ordinals.begin(), LongOrdinal::EqualsFn()));
-}
-
-} // namespace
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/base/progress_marker_map.cc b/chromium/sync/internal_api/public/base/progress_marker_map.cc
deleted file mode 100644
index ea1f177b7d3..00000000000
--- a/chromium/sync/internal_api/public/base/progress_marker_map.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/base/progress_marker_map.h"
-
-#include "base/json/json_writer.h"
-#include "base/json/string_escape.h"
-#include "base/values.h"
-
-namespace syncer {
-
-scoped_ptr<base::DictionaryValue> ProgressMarkerMapToValue(
- const ProgressMarkerMap& marker_map) {
- scoped_ptr<base::DictionaryValue> value(new base::DictionaryValue());
- for (ProgressMarkerMap::const_iterator it = marker_map.begin();
- it != marker_map.end(); ++it) {
- std::string printable_payload;
- base::EscapeJSONString(
- it->second, false /* put_in_quotes */, &printable_payload);
- value->SetString(ModelTypeToString(it->first), printable_payload);
- }
- return value.Pass();
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/base/progress_marker_map.h b/chromium/sync/internal_api/public/base/progress_marker_map.h
deleted file mode 100644
index cf022e2726b..00000000000
--- a/chromium/sync/internal_api/public/base/progress_marker_map.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Definition of ProgressMarkerMap and various utility functions.
-
-#ifndef SYNC_INTERNAL_PUBLIC_API_BASE_PROGRESS_MARKER_MAP_H_
-#define SYNC_INTERNAL_PUBLIC_API_BASE_PROGRESS_MARKER_MAP_H_
-
-#include <map>
-#include <string>
-
-#include "base/memory/scoped_ptr.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-
-// TODO(akalin,mmontgomery): Move the non-exported functions in this file to a
-// private header.
-
-namespace base {
-class DictionaryValue;
-}
-
-namespace syncer {
-
-// A container that maps ModelType to serialized
-// DataTypeProgressMarkers.
-typedef std::map<ModelType, std::string> ProgressMarkerMap;
-
-SYNC_EXPORT_PRIVATE scoped_ptr<base::DictionaryValue> ProgressMarkerMapToValue(
- const ProgressMarkerMap& marker_map);
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_PUBLIC_API_BASE_PROGRESS_MARKER_MAP_H_
diff --git a/chromium/sync/internal_api/public/base/unique_position.cc b/chromium/sync/internal_api/public/base/unique_position.cc
deleted file mode 100644
index 40bab6e175d..00000000000
--- a/chromium/sync/internal_api/public/base/unique_position.cc
+++ /dev/null
@@ -1,615 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/base/unique_position.h"
-
-#include "base/basictypes.h"
-#include "base/logging.h"
-#include "base/stl_util.h"
-#include "base/strings/string_number_conversions.h"
-#include "sync/protocol/unique_position.pb.h"
-#include "third_party/zlib/zlib.h"
-
-namespace syncer {
-
-const size_t UniquePosition::kSuffixLength = 28;
-const size_t UniquePosition::kCompressBytesThreshold = 128;
-
-// static.
-bool UniquePosition::IsValidSuffix(const std::string& suffix) {
- // The suffix must be exactly the specified length, otherwise unique suffixes
- // are not sufficient to guarantee unique positions (because prefix + suffix
- // == p + refixsuffix).
- return suffix.length() == kSuffixLength;
-}
-
-// static.
-bool UniquePosition::IsValidBytes(const std::string& bytes) {
- // The first condition ensures that our suffix uniqueness is sufficient to
- // guarantee position uniqueness. Otherwise, it's possible the end of some
- // prefix + some short suffix == some long suffix.
- // The second condition ensures that FindSmallerWithSuffix can always return a
- // result.
- return bytes.length() >= kSuffixLength
- && bytes[bytes.length()-1] != 0;
-}
-
-// static.
-UniquePosition UniquePosition::CreateInvalid() {
- UniquePosition pos;
- DCHECK(!pos.IsValid());
- return pos;
-}
-
-// static.
-UniquePosition UniquePosition::FromProto(const sync_pb::UniquePosition& proto) {
- if (proto.has_custom_compressed_v1()) {
- return UniquePosition(proto.custom_compressed_v1());
- } else if (proto.has_value() && !proto.value().empty()) {
- return UniquePosition(Compress(proto.value()));
- } else if (proto.has_compressed_value() && proto.has_uncompressed_length()) {
- uLongf uncompressed_len = proto.uncompressed_length();
- std::string un_gzipped;
-
- un_gzipped.resize(uncompressed_len);
- int result = uncompress(
- reinterpret_cast<Bytef*>(string_as_array(&un_gzipped)),
- &uncompressed_len,
- reinterpret_cast<const Bytef*>(proto.compressed_value().data()),
- proto.compressed_value().size());
- if (result != Z_OK) {
- DLOG(ERROR) << "Unzip failed " << result;
- return UniquePosition::CreateInvalid();
- }
- if (uncompressed_len != proto.uncompressed_length()) {
- DLOG(ERROR)
- << "Uncompressed length " << uncompressed_len
- << " did not match specified length " << proto.uncompressed_length();
- return UniquePosition::CreateInvalid();
- }
- return UniquePosition(Compress(un_gzipped));
- } else {
- return UniquePosition::CreateInvalid();
- }
-}
-
-// static.
-UniquePosition UniquePosition::FromInt64(
- int64 x, const std::string& suffix) {
- uint64 y = static_cast<uint64>(x);
- y ^= 0x8000000000000000ULL; // Make it non-negative.
- std::string bytes(8, 0);
- for (int i = 7; i >= 0; --i) {
- bytes[i] = static_cast<uint8>(y);
- y >>= 8;
- }
- return UniquePosition(bytes + suffix, suffix);
-}
-
-// static.
-UniquePosition UniquePosition::InitialPosition(
- const std::string& suffix) {
- DCHECK(IsValidSuffix(suffix));
- return UniquePosition(suffix, suffix);
-}
-
-// static.
-UniquePosition UniquePosition::Before(
- const UniquePosition& x,
- const std::string& suffix) {
- DCHECK(IsValidSuffix(suffix));
- DCHECK(x.IsValid());
- const std::string& before = FindSmallerWithSuffix(
- Uncompress(x.compressed_), suffix);
- return UniquePosition(before + suffix, suffix);
-}
-
-// static.
-UniquePosition UniquePosition::After(
- const UniquePosition& x,
- const std::string& suffix) {
- DCHECK(IsValidSuffix(suffix));
- DCHECK(x.IsValid());
- const std::string& after = FindGreaterWithSuffix(
- Uncompress(x.compressed_), suffix);
- return UniquePosition(after + suffix, suffix);
-}
-
-// static.
-UniquePosition UniquePosition::Between(
- const UniquePosition& before,
- const UniquePosition& after,
- const std::string& suffix) {
- DCHECK(before.IsValid());
- DCHECK(after.IsValid());
- DCHECK(before.LessThan(after));
- DCHECK(IsValidSuffix(suffix));
- const std::string& mid = FindBetweenWithSuffix(
- Uncompress(before.compressed_),
- Uncompress(after.compressed_),
- suffix);
- return UniquePosition(mid + suffix, suffix);
-}
-
-UniquePosition::UniquePosition() : is_valid_(false) {}
-
-bool UniquePosition::LessThan(const UniquePosition& other) const {
- DCHECK(this->IsValid());
- DCHECK(other.IsValid());
-
- return compressed_ < other.compressed_;
-}
-
-bool UniquePosition::Equals(const UniquePosition& other) const {
- if (!this->IsValid() && !other.IsValid())
- return true;
-
- return compressed_ == other.compressed_;
-}
-
-void UniquePosition::ToProto(sync_pb::UniquePosition* proto) const {
- proto->Clear();
-
- // This is the current preferred foramt.
- proto->set_custom_compressed_v1(compressed_);
-
- // Older clients used to write other formats. We don't bother doing that
- // anymore because that form of backwards compatibility is expensive. We no
- // longer want to pay that price just too support clients that have been
- // obsolete for a long time. See the proto definition for details.
-}
-
-void UniquePosition::SerializeToString(std::string* blob) const {
- DCHECK(blob);
- sync_pb::UniquePosition proto;
- ToProto(&proto);
- proto.SerializeToString(blob);
-}
-
-int64 UniquePosition::ToInt64() const {
- uint64 y = 0;
- const std::string& s = Uncompress(compressed_);
- size_t l = sizeof(int64);
- if (s.length() < l) {
- NOTREACHED();
- l = s.length();
- }
- for (size_t i = 0; i < l; ++i) {
- const uint8 byte = s[l - i - 1];
- y |= static_cast<uint64>(byte) << (i * 8);
- }
- y ^= 0x8000000000000000ULL;
- // This is technically implementation-defined if y > INT64_MAX, so
- // we're assuming that we're on a twos-complement machine.
- return static_cast<int64>(y);
-}
-
-bool UniquePosition::IsValid() const {
- return is_valid_;
-}
-
-std::string UniquePosition::ToDebugString() const {
- const std::string bytes = Uncompress(compressed_);
- if (bytes.empty())
- return std::string("INVALID[]");
-
- std::string debug_string = base::HexEncode(bytes.data(), bytes.length());
- if (!IsValid()) {
- debug_string = "INVALID[" + debug_string + "]";
- }
-
- std::string compressed_string =
- base::HexEncode(compressed_.data(), compressed_.length());
- debug_string.append(", compressed: " + compressed_string);
- return debug_string;
-}
-
-std::string UniquePosition::GetSuffixForTest() const {
- const std::string bytes = Uncompress(compressed_);
- const size_t prefix_len = bytes.length() - kSuffixLength;
- return bytes.substr(prefix_len, std::string::npos);
-}
-
-std::string UniquePosition::FindSmallerWithSuffix(
- const std::string& reference,
- const std::string& suffix) {
- size_t ref_zeroes = reference.find_first_not_of('\0');
- size_t suffix_zeroes = suffix.find_first_not_of('\0');
-
- // Neither of our inputs are allowed to have trailing zeroes, so the following
- // must be true.
- DCHECK_NE(ref_zeroes, std::string::npos);
- DCHECK_NE(suffix_zeroes, std::string::npos);
-
- if (suffix_zeroes > ref_zeroes) {
- // Implies suffix < ref.
- return std::string();
- }
-
- if (suffix.substr(suffix_zeroes) < reference.substr(ref_zeroes)) {
- // Prepend zeroes so the result has as many zero digits as |reference|.
- return std::string(ref_zeroes - suffix_zeroes, '\0');
- } else if (suffix_zeroes > 1) {
- // Prepend zeroes so the result has one more zero digit than |reference|.
- // We could also take the "else" branch below, but taking this branch will
- // give us a smaller result.
- return std::string(ref_zeroes - suffix_zeroes + 1, '\0');
- } else {
- // Prepend zeroes to match those in the |reference|, then something smaller
- // than the first non-zero digit in |reference|.
- char lt_digit = static_cast<uint8>(reference[ref_zeroes])/2;
- return std::string(ref_zeroes, '\0') + lt_digit;
- }
-}
-
-// static
-std::string UniquePosition::FindGreaterWithSuffix(
- const std::string& reference,
- const std::string& suffix) {
- size_t ref_FFs = reference.find_first_not_of(kuint8max);
- size_t suffix_FFs = suffix.find_first_not_of(kuint8max);
-
- if (ref_FFs == std::string::npos) {
- ref_FFs = reference.length();
- }
- if (suffix_FFs == std::string::npos) {
- suffix_FFs = suffix.length();
- }
-
- if (suffix_FFs > ref_FFs) {
- // Implies suffix > reference.
- return std::string();
- }
-
- if (suffix.substr(suffix_FFs) > reference.substr(ref_FFs)) {
- // Prepend FF digits to match those in |reference|.
- return std::string(ref_FFs - suffix_FFs, kuint8max);
- } else if (suffix_FFs > 1) {
- // Prepend enough leading FF digits so result has one more of them than
- // |reference| does. We could also take the "else" branch below, but this
- // gives us a smaller result.
- return std::string(ref_FFs - suffix_FFs + 1, kuint8max);
- } else {
- // Prepend FF digits to match those in |reference|, then something larger
- // than the first non-FF digit in |reference|.
- char gt_digit = static_cast<uint8>(reference[ref_FFs]) +
- (kuint8max - static_cast<uint8>(reference[ref_FFs]) + 1) / 2;
- return std::string(ref_FFs, kuint8max) + gt_digit;
- }
-}
-
-// static
-std::string UniquePosition::FindBetweenWithSuffix(
- const std::string& before,
- const std::string& after,
- const std::string& suffix) {
- DCHECK(IsValidSuffix(suffix));
- DCHECK_NE(before, after);
- DCHECK_LT(before, after);
-
- std::string mid;
-
- // Sometimes our suffix puts us where we want to be.
- if (before < suffix && suffix < after) {
- return std::string();
- }
-
- size_t i = 0;
- for ( ; i < std::min(before.length(), after.length()); ++i) {
- uint8 a_digit = before[i];
- uint8 b_digit = after[i];
-
- if (b_digit - a_digit >= 2) {
- mid.push_back(a_digit + (b_digit - a_digit)/2);
- return mid;
- } else if (a_digit == b_digit) {
- mid.push_back(a_digit);
-
- // Both strings are equal so far. Will appending the suffix at this point
- // give us the comparison we're looking for?
- if (before.substr(i+1) < suffix && suffix < after.substr(i+1)) {
- return mid;
- }
- } else {
- DCHECK_EQ(b_digit - a_digit, 1); // Implied by above if branches.
-
- // The two options are off by one digit. The choice of whether to round
- // up or down here will have consequences on what we do with the remaining
- // digits. Exploring both options is an optimization and is not required
- // for the correctness of this algorithm.
-
- // Option A: Round down the current digit. This makes our |mid| <
- // |after|, no matter what we append afterwards. We then focus on
- // appending digits until |mid| > |before|.
- std::string mid_a = mid;
- mid_a.push_back(a_digit);
- mid_a.append(FindGreaterWithSuffix(before.substr(i+1), suffix));
-
- // Option B: Round up the current digit. This makes our |mid| > |before|,
- // no matter what we append afterwards. We then focus on appending digits
- // until |mid| < |after|. Note that this option may not be viable if the
- // current digit is the last one in |after|, so we skip the option in that
- // case.
- if (after.length() > i+1) {
- std::string mid_b = mid;
- mid_b.push_back(b_digit);
- mid_b.append(FindSmallerWithSuffix(after.substr(i+1), suffix));
-
- // Does this give us a shorter position value? If so, use it.
- if (mid_b.length() < mid_a.length()) {
- return mid_b;
- }
- }
- return mid_a;
- }
- }
-
- // If we haven't found a midpoint yet, the following must be true:
- DCHECK_EQ(before.substr(0, i), after.substr(0, i));
- DCHECK_EQ(before, mid);
- DCHECK_LT(before.length(), after.length());
-
- // We know that we'll need to append at least one more byte to |mid| in the
- // process of making it < |after|. Appending any digit, regardless of the
- // value, will make |before| < |mid|. Therefore, the following will get us a
- // valid position.
-
- mid.append(FindSmallerWithSuffix(after.substr(i), suffix));
- return mid;
-}
-
-UniquePosition::UniquePosition(const std::string& internal_rep)
- : compressed_(internal_rep),
- is_valid_(IsValidBytes(Uncompress(internal_rep))) {
-}
-
-UniquePosition::UniquePosition(
- const std::string& uncompressed,
- const std::string& suffix)
- : compressed_(Compress(uncompressed)),
- is_valid_(IsValidBytes(uncompressed)) {
- DCHECK(uncompressed.rfind(suffix) + kSuffixLength == uncompressed.length());
- DCHECK(IsValidSuffix(suffix));
- DCHECK(IsValid());
-}
-
-// On custom compression:
-//
-// Let C(x) be the compression function and U(x) be the uncompression function.
-//
-// This compression scheme has a few special properties. For one, it is
-// order-preserving. For any two valid position strings x and y:
-// x < y <=> C(x) < C(y)
-// This allows us keep the position strings compressed as we sort them.
-//
-// The compressed format and the decode algorithm:
-//
-// The compressed string is a series of blocks, almost all of which are 8 bytes
-// in length. The only exception is the last block in the compressed string,
-// which may be a remainder block, which has length no greater than 7. The
-// full-length blocks are either repeated character blocks or plain data blocks.
-// All blocks are entirely self-contained. Their decoded values are independent
-// from that of their neighbours.
-//
-// A repeated character block is encoded into eight bytes and represents between
-// 4 and 2^31 repeated instances of a given character in the unencoded stream.
-// The encoding consists of a single character repeated four times, followed by
-// an encoded count. The encoded count is stored as a big-endian 32 bit
-// integer. There are 2^31 possible count values, and two encodings for each.
-// The high encoding is 'enc = kuint32max - count'; the low encoding is 'enc =
-// count'. At compression time, the algorithm will choose between the two
-// encodings based on which of the two will maintain the appropriate sort
-// ordering (by a process which will be described below). The decompression
-// algorithm need not concern itself with which encoding was used; it needs only
-// to decode it. The decoded value of this block is "count" instances of the
-// character that was repeated four times in the first half of this block.
-//
-// A plain data block is encoded into eight bytes and represents exactly eight
-// bytes of data in the unencoded stream. The plain data block must not begin
-// with the same character repeated four times. It is allowed to contain such a
-// four-character sequence, just not at the start of the block. The decoded
-// value of a plain data block is identical to its encoded value.
-//
-// A remainder block has length of at most seven. It is a shorter version of
-// the plain data block. It occurs only at the end of the encoded stream and
-// represents exactly as many bytes of unencoded data as its own length. Like a
-// plain data block, the remainder block never begins with the same character
-// repeated four times. The decoded value of this block is identical to its
-// encoded value.
-//
-// The encode algorithm:
-//
-// From the above description, it can be seen that there may be more than one
-// way to encode a given input string. The encoder must be careful to choose
-// the encoding that guarantees sort ordering.
-//
-// The rules for the encoder are as follows:
-// 1. Iterate through the input string and produce output blocks one at a time.
-// 2. Where possible (ie. where the next four bytes of input consist of the
-// same character repeated four times), produce a repeated data block of
-// maximum possible length.
-// 3. If there is at least 8 bytes of data remaining and it is not possible
-// to produce a repeated character block, produce a plain data block.
-// 4. If there are less than 8 bytes of data remaining and it is not possible
-// to produce a repeated character block, produce a remainder block.
-// 5. When producing a repeated character block, the count encoding must be
-// chosen in such a way that the sort ordering is maintained. The choice is
-// best illustrated by way of example:
-//
-// When comparing two strings, the first of which begins with of 8
-// instances of the letter 'B' and the second with 10 instances of the
-// letter 'B', which of the two should compare lower? The result depends
-// on the 9th character of the first string, since it will be compared
-// against the 9th 'B' in the second string. If that character is an 'A',
-// then the first string will compare lower. If it is a 'C', then the
-// first string will compare higher.
-//
-// The key insight is that the comparison value of a repeated character block
-// depends on the value of the character that follows it. If the character
-// follows the repeated character has a value greater than the repeated
-// character itself, then a shorter run length should translate to a higher
-// comparison value. Therefore, we encode its count using the low encoding.
-// Similarly, if the following character is lower, we use the high encoding.
-
-namespace {
-
-// Appends an encoded run length to |output_str|.
-static void WriteEncodedRunLength(uint32 length,
- bool high_encoding,
- std::string* output_str) {
- CHECK_GE(length, 4U);
- CHECK_LT(length, 0x80000000);
-
- // Step 1: Invert the count, if necessary, to account for the following digit.
- uint32 encoded_length;
- if (high_encoding) {
- encoded_length = 0xffffffff - length;
- } else {
- encoded_length = length;
- }
-
- // Step 2: Write it as big-endian so it compares correctly with memcmp(3).
- output_str->append(1, 0xff & (encoded_length >> 24U));
- output_str->append(1, 0xff & (encoded_length >> 16U));
- output_str->append(1, 0xff & (encoded_length >> 8U));
- output_str->append(1, 0xff & (encoded_length >> 0U));
-}
-
-// Reads an encoded run length for |str| at position |i|.
-static uint32 ReadEncodedRunLength(const std::string& str, size_t i) {
- DCHECK_LE(i + 4, str.length());
-
- // Step 1: Extract the big-endian count.
- uint32 encoded_length =
- ((uint8)(str[i+3]) << 0) |
- ((uint8)(str[i+2]) << 8) |
- ((uint8)(str[i+1]) << 16) |
- ((uint8)(str[i+0]) << 24);
-
- // Step 2: If this was an inverted count, un-invert it.
- uint32 length;
- if (encoded_length & 0x80000000) {
- length = 0xffffffff - encoded_length;
- } else {
- length = encoded_length;
- }
-
- return length;
-}
-
-// A series of four identical chars at the beginning of a block indicates
-// the beginning of a repeated character block.
-static bool IsRepeatedCharPrefix(const std::string& chars, size_t start_index) {
- return chars[start_index] == chars[start_index+1]
- && chars[start_index] == chars[start_index+2]
- && chars[start_index] == chars[start_index+3];
-}
-
-} // namespace
-
-// static
-// Wraps the CompressImpl function with a bunch of DCHECKs.
-std::string UniquePosition::Compress(const std::string& str) {
- DCHECK(IsValidBytes(str));
- std::string compressed = CompressImpl(str);
- DCHECK(IsValidCompressed(compressed));
- DCHECK_EQ(str, Uncompress(compressed));
- return compressed;
-}
-
-// static
-// Performs the order preserving run length compression of a given input string.
-std::string UniquePosition::CompressImpl(const std::string& str) {
- std::string output;
-
- // The compressed length will usually be at least as long as the suffix (28),
- // since the suffix bytes are mostly random. Most are a few bytes longer; a
- // small few are tens of bytes longer. Some early tests indicated that
- // roughly 99% had length 40 or smaller. We guess that pre-sizing for 48 is a
- // good trade-off, but that has not been confirmed through profiling.
- output.reserve(48);
-
- // Each loop iteration will consume 8, or N bytes, where N >= 4 and is the
- // length of a string of identical digits starting at i.
- for (size_t i = 0; i < str.length(); ) {
- if (i + 4 <= str.length() && IsRepeatedCharPrefix(str, i)) {
- // Four identical bytes in a row at this position means that we must start
- // a repeated character block. Begin by outputting those four bytes.
- output.append(str, i, 4);
-
- // Determine the size of the run.
- const char rep_digit = str[i];
- const size_t runs_until = str.find_first_not_of(rep_digit, i+4);
-
- // Handle the 'runs until end' special case specially.
- size_t run_length;
- bool encode_high; // True if the next byte is greater than |rep_digit|.
- if (runs_until == std::string::npos) {
- run_length = str.length() - i;
- encode_high = false;
- } else {
- run_length = runs_until - i;
- encode_high = static_cast<uint8>(str[runs_until]) >
- static_cast<uint8>(rep_digit);
- }
- DCHECK_LT(run_length, static_cast<size_t>(kint32max))
- << "This implementation can't encode run-lengths greater than 2^31.";
-
- WriteEncodedRunLength(run_length, encode_high, &output);
- i += run_length; // Jump forward by the size of the run length.
- } else {
- // Output up to eight bytes without any encoding.
- const size_t len = std::min(static_cast<size_t>(8), str.length() - i);
- output.append(str, i, len);
- i += len; // Jump forward by the amount of input consumed (usually 8).
- }
- }
-
- return output;
-}
-
-// static
-// Uncompresses strings that were compresed with UniquePosition::Compress.
-std::string UniquePosition::Uncompress(const std::string& str) {
- std::string output;
- size_t i = 0;
- // Iterate through the compressed string one block at a time.
- for (i = 0; i + 8 <= str.length(); i += 8) {
- if (IsRepeatedCharPrefix(str, i)) {
- // Found a repeated character block. Expand it.
- const char rep_digit = str[i];
- uint32 length = ReadEncodedRunLength(str, i+4);
- output.append(length, rep_digit);
- } else {
- // Found a regular block. Copy it.
- output.append(str, i, 8);
- }
- }
- // Copy the remaining bytes that were too small to form a block.
- output.append(str, i, std::string::npos);
- return output;
-}
-
-bool UniquePosition::IsValidCompressed(const std::string& str) {
- for (size_t i = 0; i + 8 <= str.length(); i += 8) {
- if (IsRepeatedCharPrefix(str, i)) {
- uint32 count = ReadEncodedRunLength(str, i+4);
- if (count < 4) {
- // A repeated character block should at least represent the four
- // characters that started it.
- return false;
- }
- if (str[i] == str[i+4]) {
- // Does the next digit after a count match the repeated character? Then
- // this is not the highest possible count.
- return false;
- }
- }
- }
- // We don't bother looking for the existence or checking the validity of
- // any partial blocks. There's no way they could be invalid anyway.
- return true;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/base/unique_position.h b/chromium/sync/internal_api/public/base/unique_position.h
deleted file mode 100644
index eee53240ab1..00000000000
--- a/chromium/sync/internal_api/public/base/unique_position.h
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_BASE_UNIQUE_POSITION_H_
-#define SYNC_INTERNAL_API_PUBLIC_BASE_UNIQUE_POSITION_H_
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "sync/base/sync_export.h"
-
-namespace sync_pb {
-class UniquePosition;
-}
-
-namespace syncer {
-
-// A class to represent positions.
-//
-// Valid UniquePosition objects have the following properties:
-//
-// - a < b and b < c implies a < c (transitivity);
-// - exactly one of a < b, b < a and a = b holds (trichotomy);
-// - if a < b, there is a UniquePosition such that a < x < b (density);
-// - there are UniquePositions x and y such that x < a < y (unboundedness);
-// - if a and b were constructed with different unique suffixes, then a != b.
-//
-// As long as all UniquePositions used to sort a list were created with unique
-// suffixes, then if any item changes its position in the list, only its
-// UniquePosition value has to change to represent the new order, and all other
-// values can stay the same.
-//
-// Note that the unique suffixes must be exactly |kSuffixLength| bytes long.
-//
-// The cost for all these features is potentially unbounded space usage. In
-// practice, however, most ordinals should be not much longer than the suffix.
-//
-// This class currently has several bookmarks-related assumptions built in,
-// though it could be adapted to be more generally useful.
-class SYNC_EXPORT_PRIVATE UniquePosition {
- public:
- static const size_t kSuffixLength;
- static const size_t kCompressBytesThreshold;
-
- static bool IsValidSuffix(const std::string& suffix);
- static bool IsValidBytes(const std::string& bytes);
-
- // Returns an invalid position.
- static UniquePosition CreateInvalid();
-
- // Converts from a 'sync_pb::UniquePosition' protobuf to a UniquePosition.
- // This may return an invalid position if the parsing fails.
- static UniquePosition FromProto(const sync_pb::UniquePosition& proto);
-
- // Creates a position with the given suffix. Ordering among positions created
- // from this function is the same as that of the integer parameters that were
- // passed in.
- static UniquePosition FromInt64(int64 i, const std::string& suffix);
-
- // Returns a valid position. Its ordering is not defined.
- static UniquePosition InitialPosition(const std::string& suffix);
-
- // Returns positions compare smaller than, greater than, or between the input
- // positions.
- static UniquePosition Before(const UniquePosition& x,
- const std::string& suffix);
- static UniquePosition After(const UniquePosition& x,
- const std::string& suffix);
- static UniquePosition Between(const UniquePosition& before,
- const UniquePosition& after,
- const std::string& suffix);
-
- // This constructor creates an invalid value.
- UniquePosition();
-
- bool LessThan(const UniquePosition& other) const;
- bool Equals(const UniquePosition& other) const;
-
- // Serializes the position's internal state to a protobuf.
- void ToProto(sync_pb::UniquePosition* proto) const;
-
- // Serializes the protobuf representation of this object as a string.
- void SerializeToString(std::string* blob) const;
-
- // Returns a human-readable representation of this item's internal state.
- std::string ToDebugString() const;
-
- // Returns the suffix.
- std::string GetSuffixForTest() const;
-
- // Performs a lossy conversion to an int64 position. Positions converted to
- // and from int64s using this and the FromInt64 function should maintain their
- // relative orderings unless the int64 values conflict.
- int64 ToInt64() const;
-
- bool IsValid() const;
-
- private:
- friend class UniquePositionTest;
-
- // Returns a string X such that (X ++ |suffix|) < |str|.
- // |str| must be a trailing substring of a valid ordinal.
- // |suffix| must be a valid unique suffix.
- static std::string FindSmallerWithSuffix(const std::string& str,
- const std::string& suffix);
- // Returns a string X such that (X ++ |suffix|) > |str|.
- // |str| must be a trailing substring of a valid ordinal.
- // |suffix| must be a valid unique suffix.
- static std::string FindGreaterWithSuffix(const std::string& str,
- const std::string& suffix);
- // Returns a string X such that |before| < (X ++ |suffix|) < |after|.
- // |before| and after must be a trailing substrings of valid ordinals.
- // |suffix| must be a valid unique suffix.
- static std::string FindBetweenWithSuffix(const std::string& before,
- const std::string& after,
- const std::string& suffix);
-
- // Expects a run-length compressed string as input. For internal use only.
- explicit UniquePosition(const std::string& internal_rep);
-
- // Expects an uncompressed prefix and suffix as input. The |suffix| parameter
- // must be a suffix of |uncompressed|. For internal use only.
- UniquePosition(const std::string& uncompressed, const std::string& suffix);
-
- // Implementation of an order-preserving run-length compression scheme.
- static std::string Compress(const std::string& input);
- static std::string CompressImpl(const std::string& input);
- static std::string Uncompress(const std::string& compressed);
- static bool IsValidCompressed(const std::string& str);
-
- // The position value after it has been run through the custom compression
- // algorithm. See Compress() and Uncompress() functions above.
- std::string compressed_;
- bool is_valid_;
-};
-
-} // namespace syncer;
-
-#endif // SYNC_INTERNAL_API_PUBLIC_BASE_UNIQUE_POSITION_H_
diff --git a/chromium/sync/internal_api/public/base/unique_position_unittest.cc b/chromium/sync/internal_api/public/base/unique_position_unittest.cc
deleted file mode 100644
index 4c0a66047f1..00000000000
--- a/chromium/sync/internal_api/public/base/unique_position_unittest.cc
+++ /dev/null
@@ -1,680 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/base/unique_position.h"
-
-#include <algorithm>
-#include <string>
-
-#include "base/base64.h"
-#include "base/basictypes.h"
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/sha1.h"
-#include "base/strings/string_number_conversions.h"
-#include "sync/protocol/unique_position.pb.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-namespace {
-
-class UniquePositionTest : public ::testing::Test {
- protected:
- // Accessor to fetch the length of the position's internal representation
- // We try to avoid having any test expectations on it because this is an
- // implementation detail.
- //
- // If you run the tests with --v=1, we'll print out some of the lengths
- // so you can see how well the algorithm performs in various insertion
- // scenarios.
- size_t GetLength(const UniquePosition& pos) {
- sync_pb::UniquePosition proto;
- pos.ToProto(&proto);
- return proto.ByteSize();
- }
-};
-
-// This function exploits internal knowledge of how the protobufs are serialized
-// to help us build UniquePositions from strings described in this file.
-static UniquePosition FromBytes(const std::string& bytes) {
- sync_pb::UniquePosition proto;
- proto.set_value(bytes);
- return UniquePosition::FromProto(proto);
-}
-
-const size_t kMinLength = UniquePosition::kSuffixLength;
-const size_t kGenericPredecessorLength = kMinLength + 2;
-const size_t kGenericSuccessorLength = kMinLength + 1;
-const size_t kBigPositionLength = kMinLength;
-const size_t kSmallPositionLength = kMinLength;
-
-// Be careful when adding more prefixes to this list.
-// We have to manually ensure each has a unique suffix.
-const UniquePosition kGenericPredecessor = FromBytes(
- (std::string(kGenericPredecessorLength, '\x23') + '\xFF'));
-const UniquePosition kGenericSuccessor = FromBytes(
- std::string(kGenericSuccessorLength, '\xAB') + '\xFF');
-const UniquePosition kBigPosition = FromBytes(
- std::string(kBigPositionLength - 1, '\xFF') + '\xFE' + '\xFF');
-const UniquePosition kBigPositionLessTwo = FromBytes(
- std::string(kBigPositionLength - 1, '\xFF') + '\xFC' + '\xFF');
-const UniquePosition kBiggerPosition = FromBytes(
- std::string(kBigPositionLength, '\xFF') + '\xFF');
-const UniquePosition kSmallPosition = FromBytes(
- std::string(kSmallPositionLength - 1, '\x00') + '\x01' + '\xFF');
-const UniquePosition kSmallPositionPlusOne = FromBytes(
- std::string(kSmallPositionLength - 1, '\x00') + '\x02' + '\xFF');
-const UniquePosition kHugePosition = FromBytes(
- std::string(UniquePosition::kCompressBytesThreshold, '\xFF') + '\xAB');
-
-const std::string kMinSuffix =
- std::string(UniquePosition::kSuffixLength - 1, '\x00') + '\x01';
-const std::string kMaxSuffix(UniquePosition::kSuffixLength, '\xFF');
-const std::string kNormalSuffix(
- "\x68\x44\x6C\x6B\x32\x58\x78\x34\x69\x70\x46\x34\x79\x49"
- "\x44\x4F\x66\x4C\x58\x41\x31\x34\x68\x59\x56\x43\x6F\x3D");
-
-::testing::AssertionResult LessThan(const char* m_expr,
- const char* n_expr,
- const UniquePosition &m,
- const UniquePosition &n) {
- if (m.LessThan(n))
- return ::testing::AssertionSuccess();
-
- return ::testing::AssertionFailure()
- << m_expr << " is not less than " << n_expr
- << " (" << m.ToDebugString() << " and " << n.ToDebugString() << ")";
-}
-
-::testing::AssertionResult Equals(const char* m_expr,
- const char* n_expr,
- const UniquePosition &m,
- const UniquePosition &n) {
- if (m.Equals(n))
- return ::testing::AssertionSuccess();
-
- return ::testing::AssertionFailure()
- << m_expr << " is not equal to " << n_expr
- << " (" << m.ToDebugString() << " != " << n.ToDebugString() << ")";
-}
-
-// Test that the code can read the uncompressed serialization format.
-TEST_F(UniquePositionTest, DeserializeObsoleteUncompressedPosition) {
- // We no longer support the encoding data in this format. This hard-coded
- // input is a serialization of kGenericPredecessor created by an older version
- // of this code.
- const char kSerializedCstr[] = {
- '\x0a', '\x1f', '\x23', '\x23', '\x23', '\x23', '\x23', '\x23', '\x23',
- '\x23', '\x23', '\x23', '\x23', '\x23', '\x23', '\x23', '\x23', '\x23',
- '\x23', '\x23', '\x23', '\x23', '\x23', '\x23', '\x23', '\x23', '\x23',
- '\x23', '\x23', '\x23', '\x23', '\x23', '\xff' };
- const std::string serialized(kSerializedCstr, sizeof(kSerializedCstr));
-
- sync_pb::UniquePosition proto;
- proto.ParseFromString(serialized);
-
- // Double-check that this test is testing what we think it tests.
- EXPECT_TRUE(proto.has_value());
- EXPECT_FALSE(proto.has_compressed_value());
- EXPECT_FALSE(proto.has_uncompressed_length());
-
- UniquePosition pos = UniquePosition::FromProto(proto);
- EXPECT_PRED_FORMAT2(Equals, kGenericPredecessor, pos);
-}
-
-// Test that the code can read the gzip serialization format.
-TEST_F(UniquePositionTest, DeserializeObsoleteGzippedPosition) {
- // We no longer support the encoding data in this format. This hard-coded
- // input is a serialization of kHugePosition created by an older version of
- // this code.
- const char kSerializedCstr[] = {
- '\x12', '\x0d', '\x78', '\x9c', '\xfb', '\xff', '\x7f', '\x60', '\xc1',
- '\x6a', '\x00', '\xa2', '\x4c', '\x80', '\x2c', '\x18', '\x81', '\x01' };
- const std::string serialized(kSerializedCstr, sizeof(kSerializedCstr));
-
- sync_pb::UniquePosition proto;
- proto.ParseFromString(serialized);
-
- // Double-check that this test is testing what we think it tests.
- EXPECT_FALSE(proto.has_value());
- EXPECT_TRUE(proto.has_compressed_value());
- EXPECT_TRUE(proto.has_uncompressed_length());
-
- UniquePosition pos = UniquePosition::FromProto(proto);
- EXPECT_PRED_FORMAT2(Equals, kHugePosition, pos);
-}
-
-class RelativePositioningTest : public UniquePositionTest { };
-
-const UniquePosition kPositionArray[] = {
- kGenericPredecessor,
- kGenericSuccessor,
- kBigPosition,
- kBigPositionLessTwo,
- kBiggerPosition,
- kSmallPosition,
- kSmallPositionPlusOne,
-};
-
-const UniquePosition kSortedPositionArray[] = {
- kSmallPosition,
- kSmallPositionPlusOne,
- kGenericPredecessor,
- kGenericSuccessor,
- kBigPositionLessTwo,
- kBigPosition,
- kBiggerPosition,
-};
-
-static const size_t kNumPositions = arraysize(kPositionArray);
-static const size_t kNumSortedPositions = arraysize(kSortedPositionArray);
-
-struct PositionLessThan {
- bool operator()(const UniquePosition& a, const UniquePosition& b) {
- return a.LessThan(b);
- }
-};
-
-// Returns true iff the given position's suffix matches the input parameter.
-static bool IsSuffixInUse(
- const UniquePosition& pos, const std::string& suffix) {
- return pos.GetSuffixForTest() == suffix;
-}
-
-// Test some basic properties of comparison and equality.
-TEST_F(RelativePositioningTest, ComparisonSanityTest1) {
- const UniquePosition& a = kPositionArray[0];
- ASSERT_TRUE(a.IsValid());
-
- // Necessarily true for any non-invalid positions.
- EXPECT_TRUE(a.Equals(a));
- EXPECT_FALSE(a.LessThan(a));
-}
-
-// Test some more properties of comparison and equality.
-TEST_F(RelativePositioningTest, ComparisonSanityTest2) {
- const UniquePosition& a = kPositionArray[0];
- const UniquePosition& b = kPositionArray[1];
-
- // These should pass for the specific a and b we have chosen (a < b).
- EXPECT_FALSE(a.Equals(b));
- EXPECT_TRUE(a.LessThan(b));
- EXPECT_FALSE(b.LessThan(a));
-}
-
-// Exercise comparision functions by sorting and re-sorting the list.
-TEST_F(RelativePositioningTest, SortPositions) {
- ASSERT_EQ(kNumPositions, kNumSortedPositions);
- UniquePosition positions[arraysize(kPositionArray)];
- for (size_t i = 0; i < kNumPositions; ++i) {
- positions[i] = kPositionArray[i];
- }
-
- std::sort(&positions[0], &positions[kNumPositions], PositionLessThan());
- for (size_t i = 0; i < kNumPositions; ++i) {
- EXPECT_TRUE(positions[i].Equals(kSortedPositionArray[i]))
- << "i: " << i << ", "
- << positions[i].ToDebugString() << " != "
- << kSortedPositionArray[i].ToDebugString();
- }
-
-}
-
-// Some more exercise for the comparison function.
-TEST_F(RelativePositioningTest, ReverseSortPositions) {
- ASSERT_EQ(kNumPositions, kNumSortedPositions);
- UniquePosition positions[arraysize(kPositionArray)];
- for (size_t i = 0; i < kNumPositions; ++i) {
- positions[i] = kPositionArray[i];
- }
-
- std::reverse(&positions[0], &positions[kNumPositions]);
- std::sort(&positions[0], &positions[kNumPositions], PositionLessThan());
- for (size_t i = 0; i < kNumPositions; ++i) {
- EXPECT_TRUE(positions[i].Equals(kSortedPositionArray[i]))
- << "i: " << i << ", "
- << positions[i].ToDebugString() << " != "
- << kSortedPositionArray[i].ToDebugString();
- }
-}
-
-class PositionInsertTest :
- public RelativePositioningTest,
- public ::testing::WithParamInterface<std::string> { };
-
-// Exercise InsertBetween with various insertion operations.
-TEST_P(PositionInsertTest, InsertBetween) {
- const std::string suffix = GetParam();
- ASSERT_TRUE(UniquePosition::IsValidSuffix(suffix));
-
- for (size_t i = 0; i < kNumSortedPositions; ++i) {
- const UniquePosition& predecessor = kSortedPositionArray[i];
- // Verify our suffixes are unique before we continue.
- if (IsSuffixInUse(predecessor, suffix))
- continue;
-
- for (size_t j = i + 1; j < kNumSortedPositions; ++j) {
- const UniquePosition& successor = kSortedPositionArray[j];
-
- // Another guard against non-unique suffixes.
- if (IsSuffixInUse(successor, suffix))
- continue;
-
- UniquePosition midpoint =
- UniquePosition::Between(predecessor, successor, suffix);
-
- EXPECT_PRED_FORMAT2(LessThan, predecessor, midpoint);
- EXPECT_PRED_FORMAT2(LessThan, midpoint, successor);
- }
- }
-}
-
-TEST_P(PositionInsertTest, InsertBefore) {
- const std::string suffix = GetParam();
- for (size_t i = 0; i < kNumSortedPositions; ++i) {
- const UniquePosition& successor = kSortedPositionArray[i];
- // Verify our suffixes are unique before we continue.
- if (IsSuffixInUse(successor, suffix))
- continue;
-
- UniquePosition before = UniquePosition::Before(successor, suffix);
-
- EXPECT_PRED_FORMAT2(LessThan, before, successor);
- }
-}
-
-TEST_P(PositionInsertTest, InsertAfter) {
- const std::string suffix = GetParam();
- for (size_t i = 0; i < kNumSortedPositions; ++i) {
- const UniquePosition& predecessor = kSortedPositionArray[i];
- // Verify our suffixes are unique before we continue.
- if (IsSuffixInUse(predecessor, suffix))
- continue;
-
- UniquePosition after = UniquePosition::After(predecessor, suffix);
-
- EXPECT_PRED_FORMAT2(LessThan, predecessor, after);
- }
-}
-
-TEST_P(PositionInsertTest, StressInsertAfter) {
- // Use two different suffixes to not violate our suffix uniqueness guarantee.
- const std::string& suffix_a = GetParam();
- std::string suffix_b = suffix_a;
- suffix_b[10] = suffix_b[10] ^ 0xff;
-
- UniquePosition pos = UniquePosition::InitialPosition(suffix_a);
- for (int i = 0; i < 1024; i++) {
- const std::string& suffix = (i % 2 == 0) ? suffix_b : suffix_a;
- UniquePosition next_pos = UniquePosition::After(pos, suffix);
- ASSERT_PRED_FORMAT2(LessThan, pos, next_pos);
- pos = next_pos;
- }
-
- VLOG(1) << "Length: " << GetLength(pos);
-}
-
-TEST_P(PositionInsertTest, StressInsertBefore) {
- // Use two different suffixes to not violate our suffix uniqueness guarantee.
- const std::string& suffix_a = GetParam();
- std::string suffix_b = suffix_a;
- suffix_b[10] = suffix_b[10] ^ 0xff;
-
- UniquePosition pos = UniquePosition::InitialPosition(suffix_a);
- for (int i = 0; i < 1024; i++) {
- const std::string& suffix = (i % 2 == 0) ? suffix_b : suffix_a;
- UniquePosition prev_pos = UniquePosition::Before(pos, suffix);
- ASSERT_PRED_FORMAT2(LessThan, prev_pos, pos);
- pos = prev_pos;
- }
-
- VLOG(1) << "Length: " << GetLength(pos);
-}
-
-TEST_P(PositionInsertTest, StressLeftInsertBetween) {
- // Use different suffixes to not violate our suffix uniqueness guarantee.
- const std::string& suffix_a = GetParam();
- std::string suffix_b = suffix_a;
- suffix_b[10] = suffix_b[10] ^ 0xff;
- std::string suffix_c = suffix_a;
- suffix_c[10] = suffix_c[10] ^ 0xf0;
-
- UniquePosition right_pos = UniquePosition::InitialPosition(suffix_c);
- UniquePosition left_pos = UniquePosition::Before(right_pos, suffix_a);
- for (int i = 0; i < 1024; i++) {
- const std::string& suffix = (i % 2 == 0) ? suffix_b : suffix_a;
- UniquePosition new_pos =
- UniquePosition::Between(left_pos, right_pos, suffix);
- ASSERT_PRED_FORMAT2(LessThan, left_pos, new_pos);
- ASSERT_PRED_FORMAT2(LessThan, new_pos, right_pos);
- left_pos = new_pos;
- }
-
- VLOG(1) << "Lengths: " << GetLength(left_pos) << ", " << GetLength(right_pos);
-}
-
-TEST_P(PositionInsertTest, StressRightInsertBetween) {
- // Use different suffixes to not violate our suffix uniqueness guarantee.
- const std::string& suffix_a = GetParam();
- std::string suffix_b = suffix_a;
- suffix_b[10] = suffix_b[10] ^ 0xff;
- std::string suffix_c = suffix_a;
- suffix_c[10] = suffix_c[10] ^ 0xf0;
-
- UniquePosition right_pos = UniquePosition::InitialPosition(suffix_a);
- UniquePosition left_pos = UniquePosition::Before(right_pos, suffix_c);
- for (int i = 0; i < 1024; i++) {
- const std::string& suffix = (i % 2 == 0) ? suffix_b : suffix_a;
- UniquePosition new_pos =
- UniquePosition::Between(left_pos, right_pos, suffix);
- ASSERT_PRED_FORMAT2(LessThan, left_pos, new_pos);
- ASSERT_PRED_FORMAT2(LessThan, new_pos, right_pos);
- right_pos = new_pos;
- }
-
- VLOG(1) << "Lengths: " << GetLength(left_pos) << ", " << GetLength(right_pos);
-}
-
-// Generates suffixes similar to those generated by the directory.
-// This may become obsolete if the suffix generation code is modified.
-class SuffixGenerator {
- public:
- explicit SuffixGenerator(const std::string& cache_guid)
- : cache_guid_(cache_guid),
- next_id_(-65535) {
- }
-
- std::string NextSuffix() {
- // This is not entirely realistic, but that should be OK. The current
- // suffix format is a base64'ed SHA1 hash, which should be fairly close to
- // random anyway.
- std::string input = cache_guid_ + base::Int64ToString(next_id_--);
- std::string output;
- base::Base64Encode(base::SHA1HashString(input), &output);
- return output;
- }
-
- private:
- const std::string cache_guid_;
- int64 next_id_;
-};
-
-// Cache guids generated in the same style as real clients.
-static const char kCacheGuidStr1[] = "tuiWdG8hV+8y4RT9N5Aikg==";
-static const char kCacheGuidStr2[] = "yaKb7zHtY06aue9a0vlZgw==";
-
-class PositionScenariosTest : public UniquePositionTest {
- public:
- PositionScenariosTest()
- : generator1_(std::string(kCacheGuidStr1, arraysize(kCacheGuidStr1)-1)),
- generator2_(std::string(kCacheGuidStr2, arraysize(kCacheGuidStr2)-1)) {
- }
-
- std::string NextClient1Suffix() {
- return generator1_.NextSuffix();
- }
-
- std::string NextClient2Suffix() {
- return generator2_.NextSuffix();
- }
-
- private:
- SuffixGenerator generator1_;
- SuffixGenerator generator2_;
-};
-
-// One client creating new bookmarks, always inserting at the end.
-TEST_F(PositionScenariosTest, OneClientInsertAtEnd) {
- UniquePosition pos =
- UniquePosition::InitialPosition(NextClient1Suffix());
- for (int i = 0; i < 1024; i++) {
- const std::string suffix = NextClient1Suffix();
- UniquePosition new_pos = UniquePosition::After(pos, suffix);
- ASSERT_PRED_FORMAT2(LessThan, pos, new_pos);
- pos = new_pos;
- }
-
- VLOG(1) << "Length: " << GetLength(pos);
-
- // Normally we wouldn't want to make an assertion about the internal
- // representation of our data, but we make an exception for this case.
- // If this scenario causes lengths to explode, we have a big problem.
- EXPECT_LT(GetLength(pos), 500U);
-}
-
-// Two clients alternately inserting entries at the end, with a strong
-// bias towards insertions by the first client.
-TEST_F(PositionScenariosTest, TwoClientsInsertAtEnd_A) {
- UniquePosition pos =
- UniquePosition::InitialPosition(NextClient1Suffix());
- for (int i = 0; i < 1024; i++) {
- std::string suffix;
- if (i % 5 == 0) {
- suffix = NextClient2Suffix();
- } else {
- suffix = NextClient1Suffix();
- }
-
- UniquePosition new_pos = UniquePosition::After(pos, suffix);
- ASSERT_PRED_FORMAT2(LessThan, pos, new_pos);
- pos = new_pos;
- }
-
- VLOG(1) << "Length: " << GetLength(pos);
- EXPECT_LT(GetLength(pos), 500U);
-}
-
-// Two clients alternately inserting entries at the end.
-TEST_F(PositionScenariosTest, TwoClientsInsertAtEnd_B) {
- UniquePosition pos =
- UniquePosition::InitialPosition(NextClient1Suffix());
- for (int i = 0; i < 1024; i++) {
- std::string suffix;
- if (i % 2 == 0) {
- suffix = NextClient1Suffix();
- } else {
- suffix = NextClient2Suffix();
- }
-
- UniquePosition new_pos = UniquePosition::After(pos, suffix);
- ASSERT_PRED_FORMAT2(LessThan, pos, new_pos);
- pos = new_pos;
- }
-
- VLOG(1) << "Length: " << GetLength(pos);
- EXPECT_LT(GetLength(pos), 500U);
-}
-
-INSTANTIATE_TEST_CASE_P(MinSuffix, PositionInsertTest,
- ::testing::Values(kMinSuffix));
-INSTANTIATE_TEST_CASE_P(MaxSuffix, PositionInsertTest,
- ::testing::Values(kMaxSuffix));
-INSTANTIATE_TEST_CASE_P(NormalSuffix, PositionInsertTest,
- ::testing::Values(kNormalSuffix));
-
-class PositionFromIntTest : public UniquePositionTest {
- public:
- PositionFromIntTest()
- : generator_(std::string(kCacheGuidStr1, arraysize(kCacheGuidStr1)-1)) {
- }
-
- protected:
- static const int64 kTestValues[];
- static const size_t kNumTestValues;
-
- std::string NextSuffix() {
- return generator_.NextSuffix();
- }
-
- private:
- SuffixGenerator generator_;
-};
-
-const int64 PositionFromIntTest::kTestValues[] = {
- 0LL,
- 1LL, -1LL,
- 2LL, -2LL,
- 3LL, -3LL,
- 0x79LL, -0x79LL,
- 0x80LL, -0x80LL,
- 0x81LL, -0x81LL,
- 0xFELL, -0xFELL,
- 0xFFLL, -0xFFLL,
- 0x100LL, -0x100LL,
- 0x101LL, -0x101LL,
- 0xFA1AFELL, -0xFA1AFELL,
- 0xFFFFFFFELL, -0xFFFFFFFELL,
- 0xFFFFFFFFLL, -0xFFFFFFFFLL,
- 0x100000000LL, -0x100000000LL,
- 0x100000001LL, -0x100000001LL,
- 0xFFFFFFFFFFLL, -0xFFFFFFFFFFLL,
- 0x112358132134LL, -0x112358132134LL,
- 0xFEFFBEEFABC1234LL, -0xFEFFBEEFABC1234LL,
- kint64max,
- kint64min,
- kint64min + 1,
- kint64max - 1
-};
-
-const size_t PositionFromIntTest::kNumTestValues =
-arraysize(PositionFromIntTest::kTestValues);
-
-TEST_F(PositionFromIntTest, IsValid) {
- for (size_t i = 0; i < kNumTestValues; ++i) {
- const UniquePosition pos =
- UniquePosition::FromInt64(kTestValues[i], NextSuffix());
- EXPECT_TRUE(pos.IsValid()) << "i = " << i << "; " << pos.ToDebugString();
- }
-}
-
-TEST_F(PositionFromIntTest, RoundTripConversion) {
- for (size_t i = 0; i < kNumTestValues; ++i) {
- const int64 expected_value = kTestValues[i];
- const UniquePosition pos =
- UniquePosition::FromInt64(kTestValues[i], NextSuffix());
- const int64 value = pos.ToInt64();
- EXPECT_EQ(expected_value, value) << "i = " << i;
- }
-}
-
-template <typename T, typename LessThan = std::less<T> >
-class IndexedLessThan {
- public:
- IndexedLessThan(const T* values) : values_(values) {}
-
- bool operator()(int i1, int i2) {
- return less_than_(values_[i1], values_[i2]);
- }
-
- private:
- const T* values_;
- LessThan less_than_;
-};
-
-TEST_F(PositionFromIntTest, ConsistentOrdering) {
- UniquePosition positions[kNumTestValues];
- std::vector<int> original_ordering(kNumTestValues);
- std::vector<int> int64_ordering(kNumTestValues);
- std::vector<int> position_ordering(kNumTestValues);
- for (size_t i = 0; i < kNumTestValues; ++i) {
- positions[i] = UniquePosition::FromInt64(
- kTestValues[i], NextSuffix());
- original_ordering[i] = int64_ordering[i] = position_ordering[i] = i;
- }
-
- std::sort(int64_ordering.begin(), int64_ordering.end(),
- IndexedLessThan<int64>(kTestValues));
- std::sort(position_ordering.begin(), position_ordering.end(),
- IndexedLessThan<UniquePosition, PositionLessThan>(positions));
- EXPECT_NE(original_ordering, int64_ordering);
- EXPECT_EQ(int64_ordering, position_ordering);
-}
-
-class CompressedPositionTest : public UniquePositionTest {
- public:
- CompressedPositionTest() {
- positions_.push_back(
- MakePosition( // Prefix starts with 256 0x00s
- std::string("\x00\x00\x00\x00\xFF\xFF\xFE\xFF" "\x01", 9),
- MakeSuffix('\x04')));
- positions_.push_back(
- MakePosition( // Prefix starts with four 0x00s
- std::string("\x00\x00\x00\x00\xFF\xFF\xFF\xFB" "\x01", 9),
- MakeSuffix('\x03')));
- positions_.push_back(
- MakePosition( // Prefix starts with four 0xFFs
- std::string("\xFF\xFF\xFF\xFF\x00\x00\x00\x04" "\x01", 9),
- MakeSuffix('\x01')));
- positions_.push_back(
- MakePosition( // Prefix starts with 256 0xFFs
- std::string("\xFF\xFF\xFF\xFF\x00\x00\x01\x00" "\x01", 9),
- MakeSuffix('\x02')));
- }
-
- private:
- UniquePosition MakePosition(const std::string& compressed_prefix,
- const std::string& compressed_suffix);
- std::string MakeSuffix(char unique_value);
-
- protected:
- std::vector<UniquePosition> positions_;
-};
-
-UniquePosition CompressedPositionTest::MakePosition(
- const std::string& compressed_prefix,
- const std::string& compressed_suffix) {
- sync_pb::UniquePosition proto;
- proto.set_custom_compressed_v1(
- std::string(compressed_prefix + compressed_suffix));
- return UniquePosition::FromProto(proto);
-}
-
-std::string CompressedPositionTest::MakeSuffix(char unique_value) {
- // We're dealing in compressed positions in this test. That means the
- // suffix should be compressed, too. To avoid complication, we use suffixes
- // that don't have any repeating digits, and therefore are identical in
- // compressed and uncompressed form.
- std::string suffix;
- for (size_t i = 0; i < UniquePosition::kSuffixLength; ++i) {
- suffix.push_back(static_cast<char>(i));
- }
- suffix[UniquePosition::kSuffixLength-1] = unique_value;
- return suffix;
-}
-
-// Make sure that serialization and deserialization routines are correct.
-TEST_F(CompressedPositionTest, SerializeAndDeserialize) {
- for (std::vector<UniquePosition>::const_iterator it = positions_.begin();
- it != positions_.end(); ++it) {
- SCOPED_TRACE("iteration: " + it->ToDebugString());
-
- sync_pb::UniquePosition proto;
- it->ToProto(&proto);
- UniquePosition deserialized = UniquePosition::FromProto(proto);
-
- EXPECT_PRED_FORMAT2(Equals, *it, deserialized);
- }
-}
-
-// Test that deserialization failures of protobufs where we know none of its
-// fields is not catastrophic. This may happen if all the fields currently
-// known to this client become deprecated in the future.
-TEST_F(CompressedPositionTest, DeserializeProtobufFromTheFuture) {
- sync_pb::UniquePosition proto;
- UniquePosition deserialized = UniquePosition::FromProto(proto);
- EXPECT_FALSE(deserialized.IsValid());
-}
-
-// Make sure the comparison functions are working correctly.
-// This requires values in the test harness to be hard-coded in ascending order.
-TEST_F(CompressedPositionTest, OrderingTest) {
- for (size_t i = 0; i < positions_.size()-1; ++i) {
- EXPECT_PRED_FORMAT2(LessThan, positions_[i], positions_[i+1]);
- }
-}
-
-} // namespace
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/base_node.h b/chromium/sync/internal_api/public/base_node.h
deleted file mode 100644
index 4f285dc68e3..00000000000
--- a/chromium/sync/internal_api/public/base_node.h
+++ /dev/null
@@ -1,275 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_BASE_NODE_H_
-#define SYNC_INTERNAL_API_PUBLIC_BASE_NODE_H_
-
-#include <string>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/gtest_prod_util.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/time/time.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/protocol/sync.pb.h"
-#include "url/gurl.h"
-
-// Forward declarations of internal class types so that sync API objects
-// may have opaque pointers to these types.
-namespace base {
-class DictionaryValue;
-}
-
-namespace sync_pb {
-class AppSpecifics;
-class AutofillSpecifics;
-class AutofillProfileSpecifics;
-class BookmarkSpecifics;
-class EntitySpecifics;
-class ExtensionSpecifics;
-class SessionSpecifics;
-class NigoriSpecifics;
-class PreferenceSpecifics;
-class PasswordSpecificsData;
-class ThemeSpecifics;
-class TypedUrlSpecifics;
-}
-
-namespace syncer {
-
-class BaseTransaction;
-
-namespace syncable {
-class BaseTransaction;
-class Entry;
-}
-
-// A valid BaseNode will never have an ID of zero.
-static const int64 kInvalidId = 0;
-
-// BaseNode wraps syncable::Entry, and corresponds to a single object's state.
-// This, like syncable::Entry, is intended for use on the stack. A valid
-// transaction is necessary to create a BaseNode or any of its children.
-// Unlike syncable::Entry, a sync API BaseNode is identified primarily by its
-// int64 metahandle, which we call an ID here.
-class SYNC_EXPORT BaseNode {
- public:
- // Enumerates the possible outcomes of trying to initialize a sync node.
- enum InitByLookupResult {
- INIT_OK,
- // Could not find an entry matching the lookup criteria.
- INIT_FAILED_ENTRY_NOT_GOOD,
- // Found an entry, but it is already deleted.
- INIT_FAILED_ENTRY_IS_DEL,
- // Found an entry, but was unable to decrypt.
- INIT_FAILED_DECRYPT_IF_NECESSARY,
- // A precondition was not met for calling init, such as legal input
- // arguments.
- INIT_FAILED_PRECONDITION,
- };
-
- // All subclasses of BaseNode must provide a way to initialize themselves by
- // doing an ID lookup. Returns false on failure. An invalid or deleted
- // ID will result in failure.
- virtual InitByLookupResult InitByIdLookup(int64 id) = 0;
-
- // All subclasses of BaseNode must also provide a way to initialize themselves
- // by doing a client tag lookup. Returns false on failure. A deleted node
- // will return FALSE.
- virtual InitByLookupResult InitByClientTagLookup(
- ModelType model_type,
- const std::string& tag) = 0;
-
- // Each object is identified by a 64-bit id (internally, the syncable
- // metahandle). These ids are strictly local handles. They will persist
- // on this client, but the same object on a different client may have a
- // different ID value.
- virtual int64 GetId() const;
-
- // Returns the modification time of the object.
- base::Time GetModificationTime() const;
-
- // Nodes are hierarchically arranged into a single-rooted tree.
- // InitByRootLookup on ReadNode allows access to the root. GetParentId is
- // how you find a node's parent.
- int64 GetParentId() const;
-
- // Nodes are either folders or not. This corresponds to the IS_DIR property
- // of syncable::Entry.
- bool GetIsFolder() const;
-
- // Returns the title of the object.
- // Uniqueness of the title is not enforced on siblings -- it is not an error
- // for two children to share a title.
- std::string GetTitle() const;
-
- // Returns the model type of this object. The model type is set at node
- // creation time and is expected never to change.
- ModelType GetModelType() const;
-
- // Getter specific to the BOOKMARK datatype. Returns protobuf
- // data. Can only be called if GetModelType() == BOOKMARK.
- const sync_pb::BookmarkSpecifics& GetBookmarkSpecifics() const;
-
- // Getter specific to the APPS datatype. Returns protobuf
- // data. Can only be called if GetModelType() == APPS.
- const sync_pb::AppSpecifics& GetAppSpecifics() const;
-
- // Getter specific to the AUTOFILL datatype. Returns protobuf
- // data. Can only be called if GetModelType() == AUTOFILL.
- const sync_pb::AutofillSpecifics& GetAutofillSpecifics() const;
-
- virtual const sync_pb::AutofillProfileSpecifics&
- GetAutofillProfileSpecifics() const;
-
- // Getter specific to the NIGORI datatype. Returns protobuf
- // data. Can only be called if GetModelType() == NIGORI.
- const sync_pb::NigoriSpecifics& GetNigoriSpecifics() const;
-
- // Getter specific to the PASSWORD datatype. Returns protobuf
- // data. Can only be called if GetModelType() == PASSWORD.
- const sync_pb::PasswordSpecificsData& GetPasswordSpecifics() const;
-
- // Getter specific to the PREFERENCE datatype. Returns protobuf
- // data. Can only be called if GetModelType() == PREFERENCE.
- const sync_pb::PreferenceSpecifics& GetPreferenceSpecifics() const;
-
- // Getter specific to the THEME datatype. Returns protobuf
- // data. Can only be called if GetModelType() == THEME.
- const sync_pb::ThemeSpecifics& GetThemeSpecifics() const;
-
- // Getter specific to the TYPED_URLS datatype. Returns protobuf
- // data. Can only be called if GetModelType() == TYPED_URLS.
- const sync_pb::TypedUrlSpecifics& GetTypedUrlSpecifics() const;
-
- // Getter specific to the EXTENSIONS datatype. Returns protobuf
- // data. Can only be called if GetModelType() == EXTENSIONS.
- const sync_pb::ExtensionSpecifics& GetExtensionSpecifics() const;
-
- // Getter specific to the SESSIONS datatype. Returns protobuf
- // data. Can only be called if GetModelType() == SESSIONS.
- const sync_pb::SessionSpecifics& GetSessionSpecifics() const;
-
- // Getter specific to the MANAGED_USER_SETTINGS datatype. Returns protobuf
- // data. Can only be called if GetModelType() == MANAGED_USER_SETTINGS.
- const sync_pb::ManagedUserSettingSpecifics&
- GetManagedUserSettingSpecifics() const;
-
- // Getter specific to the MANAGED_USERS datatype. Returns protobuf data.
- // Can only be called if GetModelType() == MANAGED_USERS.
- const sync_pb::ManagedUserSpecifics& GetManagedUserSpecifics() const;
-
- // Getter specific to the DEVICE_INFO datatype. Returns protobuf
- // data. Can only be called if GetModelType() == DEVICE_INFO.
- const sync_pb::DeviceInfoSpecifics& GetDeviceInfoSpecifics() const;
-
- // Getter specific to the EXPERIMENTS datatype. Returns protobuf
- // data. Can only be called if GetModelType() == EXPERIMENTS.
- const sync_pb::ExperimentsSpecifics& GetExperimentsSpecifics() const;
-
- // Getter specific to the PRIORITY_PREFERENCE datatype. Returns protobuf
- // data. Can only be called if GetModelType() == PRIORITY_PREFERENCE.
- const sync_pb::PriorityPreferenceSpecifics&
- GetPriorityPreferenceSpecifics() const;
-
- const sync_pb::EntitySpecifics& GetEntitySpecifics() const;
-
- // Returns the local external ID associated with the node.
- int64 GetExternalId() const;
-
- // Returns true iff this node has children.
- bool HasChildren() const;
-
- // Return the ID of the node immediately before this in the sibling order.
- // For the first node in the ordering, return 0.
- int64 GetPredecessorId() const;
-
- // Return the ID of the node immediately after this in the sibling order.
- // For the last node in the ordering, return 0.
- int64 GetSuccessorId() const;
-
- // Return the ID of the first child of this node. If this node has no
- // children, return 0.
- int64 GetFirstChildId() const;
-
- // Returns the IDs of the children of this node.
- // If this type supports user-defined positions the returned IDs will be in
- // the correct order.
- void GetChildIds(std::vector<int64>* result) const;
-
- // Returns the total number of nodes including and beneath this node.
- // Recursively iterates through all children.
- int GetTotalNodeCount() const;
-
- // Returns this item's position within its parent.
- // Do not call this function on items that do not support positioning
- // (ie. non-bookmarks).
- int GetPositionIndex() const;
-
- // These virtual accessors provide access to data members of derived classes.
- virtual const syncable::Entry* GetEntry() const = 0;
- virtual const BaseTransaction* GetTransaction() const = 0;
-
- // Dumps a summary of node info into a DictionaryValue and returns it.
- // Transfers ownership of the DictionaryValue to the caller.
- base::DictionaryValue* GetSummaryAsValue() const;
-
- // Dumps all node details into a DictionaryValue and returns it.
- // Transfers ownership of the DictionaryValue to the caller.
- base::DictionaryValue* GetDetailsAsValue() const;
-
- protected:
- BaseNode();
- virtual ~BaseNode();
-
- // Determines whether part of the entry is encrypted, and if so attempts to
- // decrypt it. Unless decryption is necessary and fails, this will always
- // return |true|. If the contents are encrypted, the decrypted data will be
- // stored in |unencrypted_data_|.
- // This method is invoked once when the BaseNode is initialized.
- bool DecryptIfNecessary();
-
- // Returns the unencrypted specifics associated with |entry|. If |entry| was
- // not encrypted, it directly returns |entry|'s EntitySpecifics. Otherwise,
- // returns |unencrypted_data_|.
- const sync_pb::EntitySpecifics& GetUnencryptedSpecifics(
- const syncable::Entry* entry) const;
-
- // Copy |specifics| into |unencrypted_data_|.
- void SetUnencryptedSpecifics(const sync_pb::EntitySpecifics& specifics);
-
- private:
- // Have to friend the test class as well to allow member functions to access
- // protected/private BaseNode methods.
- friend class SyncManagerTest;
- FRIEND_TEST_ALL_PREFIXES(SyncApiTest, GenerateSyncableHash);
- FRIEND_TEST_ALL_PREFIXES(SyncManagerTest, UpdateEntryWithEncryption);
- FRIEND_TEST_ALL_PREFIXES(SyncManagerTest,
- UpdatePasswordSetEntitySpecificsNoChange);
- FRIEND_TEST_ALL_PREFIXES(SyncManagerTest, UpdatePasswordSetPasswordSpecifics);
- FRIEND_TEST_ALL_PREFIXES(SyncManagerTest, UpdatePasswordNewPassphrase);
- FRIEND_TEST_ALL_PREFIXES(SyncManagerTest, UpdatePasswordReencryptEverything);
- FRIEND_TEST_ALL_PREFIXES(SyncManagerTest, SetBookmarkTitle);
- FRIEND_TEST_ALL_PREFIXES(SyncManagerTest, SetBookmarkTitleWithEncryption);
- FRIEND_TEST_ALL_PREFIXES(SyncManagerTest, SetNonBookmarkTitle);
- FRIEND_TEST_ALL_PREFIXES(SyncManagerTest, SetNonBookmarkTitleWithEncryption);
- FRIEND_TEST_ALL_PREFIXES(SyncManagerTest, SetPreviouslyEncryptedSpecifics);
- FRIEND_TEST_ALL_PREFIXES(SyncManagerTest, IncrementTransactionVersion);
-
- void* operator new(size_t size); // Node is meant for stack use only.
-
- // A holder for the unencrypted data stored in an encrypted node.
- sync_pb::EntitySpecifics unencrypted_data_;
-
- // Same as |unencrypted_data_|, but for legacy password encryption.
- scoped_ptr<sync_pb::PasswordSpecificsData> password_data_;
-
- DISALLOW_COPY_AND_ASSIGN(BaseNode);
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_BASE_NODE_H_
diff --git a/chromium/sync/internal_api/public/base_transaction.h b/chromium/sync/internal_api/public/base_transaction.h
deleted file mode 100644
index 2f4fa3c91b1..00000000000
--- a/chromium/sync/internal_api/public/base_transaction.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_BASE_TRANSACTION_H_
-#define SYNC_INTERNAL_API_PUBLIC_BASE_TRANSACTION_H_
-
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/user_share.h"
-#include "sync/util/cryptographer.h"
-
-namespace syncer {
-
-namespace syncable {
-class BaseTransaction;
-class Directory;
-}
-
-// Sync API's BaseTransaction, ReadTransaction, and WriteTransaction allow for
-// batching of several read and/or write operations. The read and write
-// operations are performed by creating ReadNode and WriteNode instances using
-// the transaction. These transaction classes wrap identically named classes in
-// syncable, and are used in a similar way. Unlike syncable::BaseTransaction,
-// whose construction requires an explicit syncable::Directory, a sync
-// API BaseTransaction is created from a UserShare object.
-class SYNC_EXPORT BaseTransaction {
- public:
- // Provide access to the underlying syncable objects from BaseNode.
- virtual syncable::BaseTransaction* GetWrappedTrans() const = 0;
- Cryptographer* GetCryptographer() const;
- ModelTypeSet GetEncryptedTypes() const;
-
- syncable::Directory* GetDirectory() const {
- if (!user_share_) {
- return NULL;
- } else {
- return user_share_->directory.get();
- }
- }
-
- UserShare* GetUserShare() const {
- return user_share_;
- }
-
- protected:
- explicit BaseTransaction(UserShare* share);
- virtual ~BaseTransaction();
-
- BaseTransaction() : user_share_(NULL) { }
-
- private:
- UserShare* user_share_;
-
- DISALLOW_COPY_AND_ASSIGN(BaseTransaction);
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_BASE_TRANSACTION_H_
diff --git a/chromium/sync/internal_api/public/change_record.h b/chromium/sync/internal_api/public/change_record.h
deleted file mode 100644
index 062d6fae0dd..00000000000
--- a/chromium/sync/internal_api/public/change_record.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_PUBLIC_API_CHANGE_RECORD_H_
-#define SYNC_INTERNAL_PUBLIC_API_CHANGE_RECORD_H_
-
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/memory/linked_ptr.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/util/immutable.h"
-#include "sync/protocol/password_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-
-namespace base {
-class DictionaryValue;
-} // namespace base
-
-namespace syncer {
-
-// TODO(zea): One day get passwords playing nicely with the rest of encryption
-// and get rid of this.
-class SYNC_EXPORT ExtraPasswordChangeRecordData {
- public:
- ExtraPasswordChangeRecordData();
- explicit ExtraPasswordChangeRecordData(
- const sync_pb::PasswordSpecificsData& data);
- virtual ~ExtraPasswordChangeRecordData();
-
- // Transfers ownership of the DictionaryValue to the caller.
- virtual base::DictionaryValue* ToValue() const;
-
- const sync_pb::PasswordSpecificsData& unencrypted() const;
- private:
- sync_pb::PasswordSpecificsData unencrypted_;
-};
-
-// ChangeRecord indicates a single item that changed as a result of a sync
-// operation. This gives the sync id of the node that changed, and the type
-// of change. To get the actual property values after an ADD or UPDATE, the
-// client should get the node with InitByIdLookup(), using the provided id.
-struct SYNC_EXPORT_PRIVATE ChangeRecord {
- enum Action {
- ACTION_ADD,
- ACTION_DELETE,
- ACTION_UPDATE,
- };
- ChangeRecord();
- ~ChangeRecord();
-
- // Transfers ownership of the DictionaryValue to the caller.
- base::DictionaryValue* ToValue() const;
-
- int64 id;
- Action action;
- sync_pb::EntitySpecifics specifics;
- linked_ptr<ExtraPasswordChangeRecordData> extra;
-};
-
-typedef std::vector<ChangeRecord> ChangeRecordList;
-
-typedef Immutable<ChangeRecordList> ImmutableChangeRecordList;
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_CHANGE_RECORD_H_
diff --git a/chromium/sync/internal_api/public/change_record_unittest.cc b/chromium/sync/internal_api/public/change_record_unittest.cc
deleted file mode 100644
index 201ed655da1..00000000000
--- a/chromium/sync/internal_api/public/change_record_unittest.cc
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/change_record.h"
-
-#include "base/memory/scoped_ptr.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/test/values_test_util.h"
-#include "base/values.h"
-#include "sync/protocol/extension_specifics.pb.h"
-#include "sync/protocol/proto_value_conversions.h"
-#include "sync/protocol/sync.pb.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace {
-
-using base::ExpectDictDictionaryValue;
-using base::ExpectDictStringValue;
-using testing::Invoke;
-using testing::StrictMock;
-
-class ChangeRecordTest : public testing::Test {};
-
-void ExpectChangeRecordActionValue(ChangeRecord::Action expected_value,
- const base::DictionaryValue& value,
- const std::string& key) {
- std::string str_value;
- EXPECT_TRUE(value.GetString(key, &str_value));
- switch (expected_value) {
- case ChangeRecord::ACTION_ADD:
- EXPECT_EQ("Add", str_value);
- break;
- case ChangeRecord::ACTION_UPDATE:
- EXPECT_EQ("Update", str_value);
- break;
- case ChangeRecord::ACTION_DELETE:
- EXPECT_EQ("Delete", str_value);
- break;
- default:
- NOTREACHED();
- break;
- }
-}
-
-void CheckChangeRecordValue(
- const ChangeRecord& record,
- const base::DictionaryValue& value) {
- ExpectChangeRecordActionValue(record.action, value, "action");
- ExpectDictStringValue(base::Int64ToString(record.id), value, "id");
- if (record.action == ChangeRecord::ACTION_DELETE) {
- scoped_ptr<base::DictionaryValue> expected_extra_value;
- if (record.extra.get()) {
- expected_extra_value.reset(record.extra->ToValue());
- }
- const base::Value* extra_value = NULL;
- EXPECT_EQ(record.extra.get() != NULL,
- value.Get("extra", &extra_value));
- EXPECT_TRUE(Value::Equals(extra_value, expected_extra_value.get()));
-
- scoped_ptr<base::DictionaryValue> expected_specifics_value(
- EntitySpecificsToValue(record.specifics));
- ExpectDictDictionaryValue(*expected_specifics_value,
- value, "specifics");
- }
-}
-
-class MockExtraChangeRecordData
- : public ExtraPasswordChangeRecordData {
- public:
- MOCK_CONST_METHOD0(ToValue, base::DictionaryValue*());
-};
-
-TEST_F(ChangeRecordTest, ChangeRecordToValue) {
- sync_pb::EntitySpecifics old_specifics;
- old_specifics.mutable_extension()->set_id("old");
- sync_pb::EntitySpecifics new_specifics;
- old_specifics.mutable_extension()->set_id("new");
-
- const int64 kTestId = 5;
-
- // Add
- {
- ChangeRecord record;
- record.action = ChangeRecord::ACTION_ADD;
- record.id = kTestId;
- record.specifics = old_specifics;
- record.extra.reset(new StrictMock<MockExtraChangeRecordData>());
- scoped_ptr<base::DictionaryValue> value(record.ToValue());
- CheckChangeRecordValue(record, *value);
- }
-
- // Update
- {
- ChangeRecord record;
- record.action = ChangeRecord::ACTION_UPDATE;
- record.id = kTestId;
- record.specifics = old_specifics;
- record.extra.reset(new StrictMock<MockExtraChangeRecordData>());
- scoped_ptr<base::DictionaryValue> value(record.ToValue());
- CheckChangeRecordValue(record, *value);
- }
-
- // Delete (no extra)
- {
- ChangeRecord record;
- record.action = ChangeRecord::ACTION_DELETE;
- record.id = kTestId;
- record.specifics = old_specifics;
- scoped_ptr<base::DictionaryValue> value(record.ToValue());
- CheckChangeRecordValue(record, *value);
- }
-
- // Delete (with extra)
- {
- ChangeRecord record;
- record.action = ChangeRecord::ACTION_DELETE;
- record.id = kTestId;
- record.specifics = old_specifics;
-
- base::DictionaryValue extra_value;
- extra_value.SetString("foo", "bar");
- scoped_ptr<StrictMock<MockExtraChangeRecordData> > extra(
- new StrictMock<MockExtraChangeRecordData>());
- EXPECT_CALL(*extra, ToValue()).Times(2).WillRepeatedly(
- Invoke(&extra_value, &base::DictionaryValue::DeepCopy));
-
- record.extra.reset(extra.release());
- scoped_ptr<base::DictionaryValue> value(record.ToValue());
- CheckChangeRecordValue(record, *value);
- }
-}
-
-} // namespace
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/configure_reason.h b/chromium/sync/internal_api/public/configure_reason.h
deleted file mode 100644
index 28a99f61cf9..00000000000
--- a/chromium/sync/internal_api/public/configure_reason.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_CONFIGURE_REASON_H_
-#define SYNC_INTERNAL_API_PUBLIC_CONFIGURE_REASON_H_
-
-namespace syncer {
-
-// Note: This should confirm with the enums in sync.proto for
-// GetUpdatesCallerInfo. They will have 1:1 mapping but this will only map
-// to a subset of the GetUpdatesCallerInfo enum values.
-enum ConfigureReason {
- // We should never be here during actual configure. This is for setting
- // default values.
- CONFIGURE_REASON_UNKNOWN,
-
- // The client is configuring because the user opted to sync a different set
- // of datatypes.
- CONFIGURE_REASON_RECONFIGURATION,
-
- // The client is configuring because the client is being asked to migrate.
- CONFIGURE_REASON_MIGRATION,
-
- // Setting up sync performs an initial config to download NIGORI data, and
- // also a config to download initial data once the user selects types.
- CONFIGURE_REASON_NEW_CLIENT,
-
- // A new datatype is enabled for syncing due to a client upgrade.
- CONFIGURE_REASON_NEWLY_ENABLED_DATA_TYPE,
-
- // A configuration due to enabling or disabling encrypted types due to
- // cryptographer errors/resolutions.
- CONFIGURE_REASON_CRYPTO,
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_CONFIGURE_REASON_H_
diff --git a/chromium/sync/internal_api/public/data_type_association_stats.cc b/chromium/sync/internal_api/public/data_type_association_stats.cc
deleted file mode 100644
index 4988de14ad7..00000000000
--- a/chromium/sync/internal_api/public/data_type_association_stats.cc
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/data_type_association_stats.h"
-
-namespace syncer {
-
-DataTypeAssociationStats::DataTypeAssociationStats()
- : num_local_items_before_association(0),
- num_sync_items_before_association(0),
- num_local_items_after_association(0),
- num_sync_items_after_association(0),
- num_local_items_added(0),
- num_local_items_deleted(0),
- num_local_items_modified(0),
- num_sync_items_added(0),
- num_sync_items_deleted(0),
- num_sync_items_modified(0),
- local_version_pre_association(0),
- sync_version_pre_association(0),
- had_error(false) {
-}
-
-DataTypeAssociationStats::~DataTypeAssociationStats() {
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/data_type_association_stats.h b/chromium/sync/internal_api/public/data_type_association_stats.h
deleted file mode 100644
index ba38f65603f..00000000000
--- a/chromium/sync/internal_api/public/data_type_association_stats.h
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_DATA_TYPE_ASSOCIATION_STATS_H_
-#define SYNC_INTERNAL_API_PUBLIC_DATA_TYPE_ASSOCIATION_STATS_H_
-
-#include "base/time/time.h"
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-
-// Container for datatype association results.
-struct SYNC_EXPORT DataTypeAssociationStats {
- DataTypeAssociationStats();
- ~DataTypeAssociationStats();
-
- // The state of the world before association.
- int num_local_items_before_association;
- int num_sync_items_before_association;
-
- // The state of the world after association.
- int num_local_items_after_association;
- int num_sync_items_after_association;
-
- // The changes that took place during association. In a correctly working
- // system these should be the deltas between before and after.
- int num_local_items_added;
- int num_local_items_deleted;
- int num_local_items_modified;
- int num_sync_items_added;
- int num_sync_items_deleted;
- int num_sync_items_modified;
-
- // Model versions before association.
- int64 local_version_pre_association;
- int64 sync_version_pre_association;
-
- // Whether a datatype unrecoverable error was encountered during association.
- bool had_error;
-
- // Waiting time within association manager for loading local models and
- // associating other types.
- base::TimeDelta association_wait_time;
-
- // Time spent on association.
- base::TimeDelta association_time;
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_DATA_TYPE_ASSOCIATION_STATS_H_
diff --git a/chromium/sync/internal_api/public/data_type_debug_info_listener.cc b/chromium/sync/internal_api/public/data_type_debug_info_listener.cc
deleted file mode 100644
index 08355dd83b4..00000000000
--- a/chromium/sync/internal_api/public/data_type_debug_info_listener.cc
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/data_type_debug_info_listener.h"
-
-namespace syncer {
-
-DataTypeConfigurationStats::DataTypeConfigurationStats()
- : model_type(UNSPECIFIED) {}
-
-DataTypeConfigurationStats::~DataTypeConfigurationStats() {}
-
-} // namespace syncer
-
diff --git a/chromium/sync/internal_api/public/data_type_debug_info_listener.h b/chromium/sync/internal_api/public/data_type_debug_info_listener.h
deleted file mode 100644
index 6395a0450a8..00000000000
--- a/chromium/sync/internal_api/public/data_type_debug_info_listener.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_DATA_TYPE_DEBUG_INFO_LISTENER_H_
-#define SYNC_INTERNAL_API_PUBLIC_DATA_TYPE_DEBUG_INFO_LISTENER_H_
-
-#include <vector>
-
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/data_type_association_stats.h"
-
-namespace syncer {
-
-struct SYNC_EXPORT DataTypeConfigurationStats {
- DataTypeConfigurationStats();
- ~DataTypeConfigurationStats();
-
- // The datatype that was configured.
- ModelType model_type;
-
- // Waiting time before downloading starts.
- base::TimeDelta download_wait_time;
-
- // Time spent on downloading data for first-sync data types.
- base::TimeDelta download_time;
-
- // Waiting time for association of higher priority types to finish before
- // asking association manager to associate.
- base::TimeDelta association_wait_time_for_high_priority;
-
- // Types configured before this type.
- ModelTypeSet high_priority_types_configured_before;
- ModelTypeSet same_priority_types_configured_before;
-
- // Association stats.
- DataTypeAssociationStats association_stats;
-};
-
-// Interface for the sync internals to listen to external sync events.
-class DataTypeDebugInfoListener {
- public:
- // Notify the listener that configuration of data types has completed.
- virtual void OnDataTypeConfigureComplete(
- const std::vector<DataTypeConfigurationStats>& configuration_stats) = 0;
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_DATA_TYPE_DEBUG_INFO_LISTENER_H_
diff --git a/chromium/sync/internal_api/public/delete_journal.h b/chromium/sync/internal_api/public/delete_journal.h
deleted file mode 100644
index 3152b021779..00000000000
--- a/chromium/sync/internal_api/public/delete_journal.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_DELETE_JOURNAL_H_
-#define SYNC_INTERNAL_API_PUBLIC_DELETE_JOURNAL_H_
-
-#include <vector>
-
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/protocol/sync.pb.h"
-
-namespace syncer {
-
-class BaseTransaction;
-
-struct BookmarkDeleteJournal {
- int64 id; // Metahandle of delete journal entry.
- bool is_folder;
- sync_pb::EntitySpecifics specifics;
-};
-typedef std::vector<BookmarkDeleteJournal> BookmarkDeleteJournalList;
-
-// Static APIs for passing delete journals between syncer::syncable namspace
-// and syncer namespace.
-class SYNC_EXPORT DeleteJournal {
- public:
- // Return info about deleted bookmark entries stored in the delete journal
- // of |trans|'s directory.
- // TODO(haitaol): remove this after SyncData supports bookmarks and
- // all types of delete journals can be returned as
- // SyncDataList.
- static void GetBookmarkDeleteJournals(
- BaseTransaction* trans, BookmarkDeleteJournalList *delete_journals);
-
- // Purge delete journals of given IDs from |trans|'s directory.
- static void PurgeDeleteJournals(BaseTransaction* trans,
- const std::set<int64>& ids);
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_DELETE_JOURNAL_H_
diff --git a/chromium/sync/internal_api/public/engine/model_safe_worker.cc b/chromium/sync/internal_api/public/engine/model_safe_worker.cc
deleted file mode 100644
index eb3fc1f1c3e..00000000000
--- a/chromium/sync/internal_api/public/engine/model_safe_worker.cc
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/engine/model_safe_worker.h"
-
-#include "base/bind.h"
-#include "base/json/json_writer.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/values.h"
-
-namespace syncer {
-
-base::DictionaryValue* ModelSafeRoutingInfoToValue(
- const ModelSafeRoutingInfo& routing_info) {
- base::DictionaryValue* dict = new base::DictionaryValue();
- for (ModelSafeRoutingInfo::const_iterator it = routing_info.begin();
- it != routing_info.end(); ++it) {
- dict->SetString(ModelTypeToString(it->first),
- ModelSafeGroupToString(it->second));
- }
- return dict;
-}
-
-std::string ModelSafeRoutingInfoToString(
- const ModelSafeRoutingInfo& routing_info) {
- scoped_ptr<base::DictionaryValue> dict(
- ModelSafeRoutingInfoToValue(routing_info));
- std::string json;
- base::JSONWriter::Write(dict.get(), &json);
- return json;
-}
-
-ModelTypeSet GetRoutingInfoTypes(const ModelSafeRoutingInfo& routing_info) {
- ModelTypeSet types;
- for (ModelSafeRoutingInfo::const_iterator it = routing_info.begin();
- it != routing_info.end(); ++it) {
- types.Put(it->first);
- }
- return types;
-}
-
-ModelSafeGroup GetGroupForModelType(const ModelType type,
- const ModelSafeRoutingInfo& routes) {
- ModelSafeRoutingInfo::const_iterator it = routes.find(type);
- if (it == routes.end()) {
- if (type != UNSPECIFIED && type != TOP_LEVEL_FOLDER)
- DVLOG(1) << "Entry does not belong to active ModelSafeGroup!";
- return GROUP_PASSIVE;
- }
- return it->second;
-}
-
-std::string ModelSafeGroupToString(ModelSafeGroup group) {
- switch (group) {
- case GROUP_UI:
- return "GROUP_UI";
- case GROUP_DB:
- return "GROUP_DB";
- case GROUP_FILE:
- return "GROUP_FILE";
- case GROUP_HISTORY:
- return "GROUP_HISTORY";
- case GROUP_PASSIVE:
- return "GROUP_PASSIVE";
- case GROUP_PASSWORD:
- return "GROUP_PASSWORD";
- default:
- NOTREACHED();
- return "INVALID";
- }
-}
-
-ModelSafeWorker::ModelSafeWorker(WorkerLoopDestructionObserver* observer)
- : stopped_(false),
- work_done_or_stopped_(false, false),
- observer_(observer),
- working_loop_(NULL),
- working_loop_set_wait_(true, false) {}
-
-ModelSafeWorker::~ModelSafeWorker() {}
-
-void ModelSafeWorker::RequestStop() {
- base::AutoLock al(stopped_lock_);
-
- // Set stop flag but don't signal work_done_or_stopped_ to unblock sync loop
- // because the worker may be working and depending on sync command object
- // living on sync thread. his prevents any *further* tasks from being posted
- // to worker threads (see DoWorkAndWaitUntilDone below), but note that one
- // may already be posted.
- stopped_ = true;
-}
-
-SyncerError ModelSafeWorker::DoWorkAndWaitUntilDone(const WorkCallback& work) {
- {
- base::AutoLock al(stopped_lock_);
- if (stopped_)
- return CANNOT_DO_WORK;
-
- CHECK(!work_done_or_stopped_.IsSignaled());
- }
-
- return DoWorkAndWaitUntilDoneImpl(work);
-}
-
-bool ModelSafeWorker::IsStopped() {
- base::AutoLock al(stopped_lock_);
- return stopped_;
-}
-
-void ModelSafeWorker::WillDestroyCurrentMessageLoop() {
- {
- base::AutoLock al(stopped_lock_);
- stopped_ = true;
-
- // Must signal to unblock syncer if it's waiting for a posted task to
- // finish. At this point, all pending tasks posted to the loop have been
- // destroyed (see MessageLoop::~MessageLoop). So syncer will be blocked
- // indefinitely without signaling here.
- work_done_or_stopped_.Signal();
-
- DVLOG(1) << ModelSafeGroupToString(GetModelSafeGroup())
- << " worker stops on destruction of its working thread.";
- }
-
- {
- base::AutoLock l(working_loop_lock_);
- working_loop_ = NULL;
- }
-
- if (observer_)
- observer_->OnWorkerLoopDestroyed(GetModelSafeGroup());
-}
-
-void ModelSafeWorker::SetWorkingLoopToCurrent() {
- base::AutoLock l(working_loop_lock_);
- DCHECK(!working_loop_);
- working_loop_ = base::MessageLoop::current();
- working_loop_set_wait_.Signal();
-}
-
-void ModelSafeWorker::UnregisterForLoopDestruction(
- base::Callback<void(ModelSafeGroup)> unregister_done_callback) {
- // Ok to wait until |working_loop_| is set because this is called on sync
- // loop.
- working_loop_set_wait_.Wait();
-
- {
- base::AutoLock l(working_loop_lock_);
- if (working_loop_ != NULL) {
- // Should be called on sync loop.
- DCHECK_NE(base::MessageLoop::current(), working_loop_);
- working_loop_->PostTask(
- FROM_HERE,
- base::Bind(&ModelSafeWorker::UnregisterForLoopDestructionAsync,
- this, unregister_done_callback));
- }
- }
-}
-
-void ModelSafeWorker::UnregisterForLoopDestructionAsync(
- base::Callback<void(ModelSafeGroup)> unregister_done_callback) {
- {
- base::AutoLock l(working_loop_lock_);
- if (!working_loop_)
- return;
- DCHECK_EQ(base::MessageLoop::current(), working_loop_);
- }
-
- DCHECK(stopped_);
- base::MessageLoop::current()->RemoveDestructionObserver(this);
- unregister_done_callback.Run(GetModelSafeGroup());
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/engine/model_safe_worker.h b/chromium/sync/internal_api/public/engine/model_safe_worker.h
deleted file mode 100644
index 9a523e973de..00000000000
--- a/chromium/sync/internal_api/public/engine/model_safe_worker.h
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_ENGINE_MODEL_SAFE_WORKER_H_
-#define SYNC_INTERNAL_API_PUBLIC_ENGINE_MODEL_SAFE_WORKER_H_
-
-#include <map>
-#include <string>
-#include <vector>
-
-#include "base/callback.h"
-#include "base/memory/ref_counted.h"
-#include "base/message_loop/message_loop.h"
-#include "base/synchronization/lock.h"
-#include "base/synchronization/waitable_event.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/util/syncer_error.h"
-
-namespace base {
-class DictionaryValue;
-} // namespace
-
-namespace syncer {
-
-// TODO(akalin): Move the non-exported functions in this file to a
-// private header.
-
-typedef base::Callback<enum SyncerError(void)> WorkCallback;
-
-enum ModelSafeGroup {
- GROUP_PASSIVE = 0, // Models that are just "passively" being synced; e.g.
- // changes to these models don't need to be pushed to a
- // native model.
- GROUP_UI, // Models that live on UI thread and are being synced.
- GROUP_DB, // Models that live on DB thread and are being synced.
- GROUP_FILE, // Models that live on FILE thread and are being synced.
- GROUP_HISTORY, // Models that live on history thread and are being
- // synced.
- GROUP_PASSWORD, // Models that live on the password thread and are
- // being synced. On windows and linux, this runs on the
- // DB thread.
- MODEL_SAFE_GROUP_COUNT,
-};
-
-SYNC_EXPORT std::string ModelSafeGroupToString(ModelSafeGroup group);
-
-// WorkerLoopDestructionObserver is notified when the thread where it works
-// is going to be destroyed.
-class WorkerLoopDestructionObserver {
- public:
- virtual void OnWorkerLoopDestroyed(ModelSafeGroup group) = 0;
-};
-
-// The Syncer uses a ModelSafeWorker for all tasks that could potentially
-// modify syncable entries (e.g under a WriteTransaction). The ModelSafeWorker
-// only knows how to do one thing, and that is take some work (in a fully
-// pre-bound callback) and have it performed (as in Run()) from a thread which
-// is guaranteed to be "model-safe", where "safe" refers to not allowing us to
-// cause an embedding application model to fall out of sync with the
-// syncable::Directory due to a race. Each ModelSafeWorker is affiliated with
-// a thread and does actual work on that thread. On the destruction of that
-// thread, the affiliated worker is effectively disabled to do more
-// work and will notify its observer.
-class SYNC_EXPORT ModelSafeWorker
- : public base::RefCountedThreadSafe<ModelSafeWorker>,
- public base::MessageLoop::DestructionObserver {
- public:
- // Subclass should implement to observe destruction of the loop where
- // it actually does work. Called on UI thread immediately after worker is
- // created.
- virtual void RegisterForLoopDestruction() = 0;
-
- // Called on sync loop from SyncBackendRegistrar::ShutDown(). Post task to
- // working loop to stop observing loop destruction and invoke
- // |unregister_done_callback|.
- virtual void UnregisterForLoopDestruction(
- base::Callback<void(ModelSafeGroup)> unregister_done_callback);
-
- // If not stopped, call DoWorkAndWaitUntilDoneImpl() to do work. Otherwise
- // return CANNOT_DO_WORK.
- SyncerError DoWorkAndWaitUntilDone(const WorkCallback& work);
-
- // Soft stop worker by setting stopped_ flag. Called when sync is disabled
- // or browser is shutting down. Called on UI loop.
- virtual void RequestStop();
-
- virtual ModelSafeGroup GetModelSafeGroup() = 0;
-
- // MessageLoop::DestructionObserver implementation.
- virtual void WillDestroyCurrentMessageLoop() OVERRIDE;
-
- protected:
- friend class base::RefCountedThreadSafe<ModelSafeWorker>;
-
- explicit ModelSafeWorker(WorkerLoopDestructionObserver* observer);
- virtual ~ModelSafeWorker();
-
- // Any time the Syncer performs model modifications (e.g employing a
- // WriteTransaction), it should be done by this method to ensure it is done
- // from a model-safe thread.
- virtual SyncerError DoWorkAndWaitUntilDoneImpl(const WorkCallback& work) = 0;
-
- base::WaitableEvent* work_done_or_stopped() {
- return &work_done_or_stopped_;
- }
-
- // Return true if the worker was stopped. Thread safe.
- bool IsStopped();
-
- // Subclass should call this in RegisterForLoopDestruction() from the loop
- // where work is done.
- void SetWorkingLoopToCurrent();
-
- private:
- void UnregisterForLoopDestructionAsync(
- base::Callback<void(ModelSafeGroup)> unregister_done_callback);
-
- // Whether the worker should/can do more work. Set when sync is disabled or
- // when the worker's working thread is to be destroyed.
- base::Lock stopped_lock_;
- bool stopped_;
-
- // Signal set when work on native thread is finished or when native thread
- // is to be destroyed so no more work can be done.
- base::WaitableEvent work_done_or_stopped_;
-
- // Notified when working thread of the worker is to be destroyed.
- WorkerLoopDestructionObserver* observer_;
-
- // Remember working loop for posting task to unregister destruction
- // observation from sync thread when shutting down sync.
- base::Lock working_loop_lock_;
- base::MessageLoop* working_loop_;
- base::WaitableEvent working_loop_set_wait_;
-};
-
-// A map that details which ModelSafeGroup each ModelType
-// belongs to. Routing info can change in response to the user enabling /
-// disabling sync for certain types, as well as model association completions.
-typedef std::map<ModelType, ModelSafeGroup> ModelSafeRoutingInfo;
-
-// Caller takes ownership of return value.
-SYNC_EXPORT_PRIVATE base::DictionaryValue* ModelSafeRoutingInfoToValue(
- const ModelSafeRoutingInfo& routing_info);
-
-SYNC_EXPORT std::string ModelSafeRoutingInfoToString(
- const ModelSafeRoutingInfo& routing_info);
-
-SYNC_EXPORT ModelTypeSet GetRoutingInfoTypes(
- const ModelSafeRoutingInfo& routing_info);
-
-SYNC_EXPORT ModelSafeGroup GetGroupForModelType(
- const ModelType type,
- const ModelSafeRoutingInfo& routes);
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_ENGINE_MODEL_SAFE_WORKER_H_
diff --git a/chromium/sync/internal_api/public/engine/model_safe_worker_unittest.cc b/chromium/sync/internal_api/public/engine/model_safe_worker_unittest.cc
deleted file mode 100644
index f2e8c0ee525..00000000000
--- a/chromium/sync/internal_api/public/engine/model_safe_worker_unittest.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/engine/model_safe_worker.h"
-
-#include "base/memory/scoped_ptr.h"
-#include "base/values.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace {
-
-class ModelSafeWorkerTest : public ::testing::Test {
-};
-
-TEST_F(ModelSafeWorkerTest, ModelSafeRoutingInfoToValue) {
- ModelSafeRoutingInfo routing_info;
- routing_info[BOOKMARKS] = GROUP_PASSIVE;
- routing_info[NIGORI] = GROUP_UI;
- routing_info[PREFERENCES] = GROUP_DB;
- base::DictionaryValue expected_value;
- expected_value.SetString("Bookmarks", "GROUP_PASSIVE");
- expected_value.SetString("Encryption keys", "GROUP_UI");
- expected_value.SetString("Preferences", "GROUP_DB");
- scoped_ptr<base::DictionaryValue> value(
- ModelSafeRoutingInfoToValue(routing_info));
- EXPECT_TRUE(value->Equals(&expected_value));
-}
-
-TEST_F(ModelSafeWorkerTest, ModelSafeRoutingInfoToString) {
- ModelSafeRoutingInfo routing_info;
- routing_info[BOOKMARKS] = GROUP_PASSIVE;
- routing_info[NIGORI] = GROUP_UI;
- routing_info[PREFERENCES] = GROUP_DB;
- EXPECT_EQ(
- "{\"Bookmarks\":\"GROUP_PASSIVE\",\"Encryption keys\":\"GROUP_UI\","
- "\"Preferences\":\"GROUP_DB\"}",
- ModelSafeRoutingInfoToString(routing_info));
-}
-
-TEST_F(ModelSafeWorkerTest, GetRoutingInfoTypes) {
- ModelSafeRoutingInfo routing_info;
- routing_info[BOOKMARKS] = GROUP_PASSIVE;
- routing_info[NIGORI] = GROUP_UI;
- routing_info[PREFERENCES] = GROUP_DB;
- const ModelTypeSet expected_types(BOOKMARKS, NIGORI, PREFERENCES);
- EXPECT_TRUE(GetRoutingInfoTypes(routing_info).Equals(expected_types));
-}
-
-} // namespace
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/engine/passive_model_worker.cc b/chromium/sync/internal_api/public/engine/passive_model_worker.cc
deleted file mode 100644
index 02036d500f8..00000000000
--- a/chromium/sync/internal_api/public/engine/passive_model_worker.cc
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/engine/passive_model_worker.h"
-
-#include "base/message_loop/message_loop.h"
-
-namespace syncer {
-
-PassiveModelWorker::PassiveModelWorker(const base::MessageLoop* sync_loop,
- WorkerLoopDestructionObserver* observer)
- : ModelSafeWorker(observer),
- sync_loop_(sync_loop) {
-}
-
-PassiveModelWorker::~PassiveModelWorker() {
-}
-
-void PassiveModelWorker::RegisterForLoopDestruction() {
- base::MessageLoop::current()->AddDestructionObserver(this);
- SetWorkingLoopToCurrent();
-}
-
-SyncerError PassiveModelWorker::DoWorkAndWaitUntilDoneImpl(
- const WorkCallback& work) {
- DCHECK_EQ(base::MessageLoop::current(), sync_loop_);
- // Simply do the work on the current thread.
- return work.Run();
-}
-
-ModelSafeGroup PassiveModelWorker::GetModelSafeGroup() {
- return GROUP_PASSIVE;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/engine/passive_model_worker.h b/chromium/sync/internal_api/public/engine/passive_model_worker.h
deleted file mode 100644
index 783731b9dd6..00000000000
--- a/chromium/sync/internal_api/public/engine/passive_model_worker.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_ENGINE_PASSIVE_MODEL_WORKER_H_
-#define SYNC_INTERNAL_API_PUBLIC_ENGINE_PASSIVE_MODEL_WORKER_H_
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/engine/model_safe_worker.h"
-#include "sync/internal_api/public/util/syncer_error.h"
-
-namespace syncer {
-
-// Implementation of ModelSafeWorker for passive types. All work is
-// done on the same thread DoWorkAndWaitUntilDone (i.e., the sync
-// thread).
-class SYNC_EXPORT PassiveModelWorker : public ModelSafeWorker {
- public:
- explicit PassiveModelWorker(const base::MessageLoop* sync_loop,
- WorkerLoopDestructionObserver* observer);
-
- // ModelSafeWorker implementation. Called on the sync thread.
- virtual void RegisterForLoopDestruction() OVERRIDE;
- virtual ModelSafeGroup GetModelSafeGroup() OVERRIDE;
-
- protected:
- virtual SyncerError DoWorkAndWaitUntilDoneImpl(
- const WorkCallback& work) OVERRIDE;
-
- private:
- virtual ~PassiveModelWorker();
-
- const base::MessageLoop* const sync_loop_;
-
- DISALLOW_COPY_AND_ASSIGN(PassiveModelWorker);
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_ENGINE_PASSIVE_MODEL_WORKER_H_
diff --git a/chromium/sync/internal_api/public/engine/polling_constants.cc b/chromium/sync/internal_api/public/engine/polling_constants.cc
deleted file mode 100644
index 9a1d3d1f012..00000000000
--- a/chromium/sync/internal_api/public/engine/polling_constants.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "sync/internal_api/public/engine/polling_constants.h"
-
-namespace syncer {
-
-// Server can overwrite these values via client commands.
-// Standard short poll. This is used when XMPP is off.
-// We use high values here to ensure that failure to receive poll updates from
-// the server doesn't result in rapid-fire polling from the client due to low
-// local limits.
-const int64 kDefaultShortPollIntervalSeconds = 3600 * 8;
-// Long poll is used when XMPP is on.
-const int64 kDefaultLongPollIntervalSeconds = 3600 * 12;
-
-// Maximum interval for exponential backoff.
-const int64 kMaxBackoffSeconds = 60 * 60 * 4; // 4 hours.
-
-// Backoff interval randomization factor.
-const int kBackoffRandomizationFactor = 2;
-
-// After a failure contacting sync servers, specifies how long to wait before
-// reattempting and entering exponential backoff if consecutive failures
-// occur.
-const int kInitialBackoffRetrySeconds = 60 * 5; // 5 minutes.
-
-// A dangerously short retry value that would not actually protect servers from
-// DDoS if it were used as a seed for exponential backoff, although the client
-// would still follow exponential backoff. Useful for debugging and tests (when
-// you don't want to wait 5 minutes).
-const int kInitialBackoffShortRetrySeconds = 1;
-
-// Similar to kInitialBackoffRetrySeconds above, but only to be used in
-// certain exceptional error cases, such as MIGRATION_DONE.
-const int kInitialBackoffImmediateRetrySeconds = 0;
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/engine/polling_constants.h b/chromium/sync/internal_api/public/engine/polling_constants.h
deleted file mode 100644
index 8f4f100b0ae..00000000000
--- a/chromium/sync/internal_api/public/engine/polling_constants.h
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Constants used by SyncScheduler when polling servers for updates.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_ENGINE_POLLING_CONSTANTS_H_
-#define SYNC_INTERNAL_API_PUBLIC_ENGINE_POLLING_CONSTANTS_H_
-
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-
-SYNC_EXPORT_PRIVATE extern const int64 kDefaultShortPollIntervalSeconds;
-SYNC_EXPORT_PRIVATE extern const int64 kDefaultLongPollIntervalSeconds;
-SYNC_EXPORT extern const int64 kMaxBackoffSeconds;
-SYNC_EXPORT extern const int kBackoffRandomizationFactor;
-SYNC_EXPORT_PRIVATE extern const int kInitialBackoffRetrySeconds;
-SYNC_EXPORT_PRIVATE extern const int kInitialBackoffShortRetrySeconds;
-SYNC_EXPORT_PRIVATE extern const int kInitialBackoffImmediateRetrySeconds;
-
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_ENGINE_POLLING_CONSTANTS_H_
diff --git a/chromium/sync/internal_api/public/engine/sync_status.cc b/chromium/sync/internal_api/public/engine/sync_status.cc
deleted file mode 100644
index 6291b290a4a..00000000000
--- a/chromium/sync/internal_api/public/engine/sync_status.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/engine/sync_status.h"
-
-namespace syncer {
-
-SyncStatus::SyncStatus()
- : notifications_enabled(false),
- notifications_received(0),
- encryption_conflicts(0),
- hierarchy_conflicts(0),
- server_conflicts(0),
- committed_count(0),
- syncing(false),
- updates_available(0),
- updates_received(0),
- reflected_updates_received(0),
- tombstone_updates_received(0),
- num_commits_total(0),
- num_local_overwrites_total(0),
- num_server_overwrites_total(0),
- nonempty_get_updates(0),
- empty_get_updates(0),
- sync_cycles_with_commits(0),
- sync_cycles_without_commits(0),
- useless_sync_cycles(0),
- useful_sync_cycles(0),
- nudge_source_notification(0),
- nudge_source_local(0),
- nudge_source_local_refresh(0),
- cryptographer_ready(false),
- crypto_has_pending_keys(false),
- has_keystore_key(false),
- passphrase_type(IMPLICIT_PASSPHRASE),
- num_entries_by_type(MODEL_TYPE_COUNT, 0),
- num_to_delete_entries_by_type(MODEL_TYPE_COUNT, 0){
-}
-
-SyncStatus::~SyncStatus() {
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/engine/sync_status.h b/chromium/sync/internal_api/public/engine/sync_status.h
deleted file mode 100644
index 124dac6b0e0..00000000000
--- a/chromium/sync/internal_api/public/engine/sync_status.h
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_ENGINE_STATUS_SUMMARY_H_
-#define SYNC_INTERNAL_API_PUBLIC_ENGINE_STATUS_SUMMARY_H_
-
-#include <string>
-
-#include "base/time/time.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/sync_encryption_handler.h"
-#include "sync/protocol/sync_protocol_error.h"
-
-namespace syncer {
-
-// Status encapsulates detailed state about the internals of the SyncManager.
-//
-// This struct is closely tied to the AllStatus object which uses instances of
-// it to track and report on the sync engine's internal state, and the functions
-// in sync_ui_util.cc which convert the contents of this struct into a
-// DictionaryValue used to populate the about:sync summary tab.
-struct SYNC_EXPORT SyncStatus {
- SyncStatus();
- ~SyncStatus();
-
- // TODO(akalin): Replace this with a NotificationsDisabledReason
- // variable.
- bool notifications_enabled; // True only if subscribed for notifications.
-
- // Notifications counters updated by the actions in synapi.
- int notifications_received;
-
- SyncProtocolError sync_protocol_error;
-
- // Number of encryption conflicts counted during most recent sync cycle.
- int encryption_conflicts;
-
- // Number of hierarchy conflicts counted during most recent sync cycle.
- int hierarchy_conflicts;
-
- // Number of items the server refused to commit due to conflict during most
- // recent sync cycle.
- int server_conflicts;
-
- // Number of items successfully committed during most recent sync cycle.
- int committed_count;
-
- bool syncing;
-
- // Total updates available. If zero, nothing left to download.
- int64 updates_available;
- // Total updates received by the syncer since browser start.
- int updates_received;
- // Total updates received that are echoes of our own changes.
- int reflected_updates_received;
- // Of updates_received, how many were tombstones.
- int tombstone_updates_received;
-
- // Total successful commits.
- int num_commits_total;
-
- // Total number of overwrites due to conflict resolver since browser start.
- int num_local_overwrites_total;
- int num_server_overwrites_total;
-
- // Count of empty and non empty getupdates;
- int nonempty_get_updates;
- int empty_get_updates;
-
- // Count of sync cycles that successfully committed items;
- int sync_cycles_with_commits;
- int sync_cycles_without_commits;
-
- // Count of useless and useful syncs we perform.
- int useless_sync_cycles;
- int useful_sync_cycles;
-
- // Nudge counts for each possible source
- int nudge_source_notification;
- int nudge_source_local;
- int nudge_source_local_refresh;
-
- // Encryption related.
- ModelTypeSet encrypted_types;
- bool cryptographer_ready;
- bool crypto_has_pending_keys;
- bool has_keystore_key;
- base::Time keystore_migration_time;
- PassphraseType passphrase_type;
-
- // Per-datatype throttled status.
- ModelTypeSet throttled_types;
-
- // The unique identifer for the sync store.
- std::string sync_id;
-
- // The unique identifier for the invalidation client.
- std::string invalidator_client_id;
-
- // Counters grouped by model type
- std::vector<int> num_entries_by_type;
- std::vector<int> num_to_delete_entries_by_type;
-
- // Time of next retry if sync scheduler is throttled or in backoff.
- base::Time retry_time;
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_ENGINE_STATUS_SUMMARY_H_
diff --git a/chromium/sync/internal_api/public/http_bridge.h b/chromium/sync/internal_api/public/http_bridge.h
deleted file mode 100644
index 74c7005fcea..00000000000
--- a/chromium/sync/internal_api/public/http_bridge.h
+++ /dev/null
@@ -1,269 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_HTTP_BRIDGE_H_
-#define SYNC_INTERNAL_API_PUBLIC_HTTP_BRIDGE_H_
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/gtest_prod_util.h"
-#include "base/memory/ref_counted.h"
-#include "base/synchronization/lock.h"
-#include "base/synchronization/waitable_event.h"
-#include "net/base/network_time_notifier.h"
-#include "net/url_request/url_fetcher_delegate.h"
-#include "net/url_request/url_request_context.h"
-#include "net/url_request/url_request_context_getter.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/cancelation_observer.h"
-#include "sync/internal_api/public/http_post_provider_factory.h"
-#include "sync/internal_api/public/http_post_provider_interface.h"
-#include "sync/internal_api/public/network_time_update_callback.h"
-#include "url/gurl.h"
-
-class HttpBridgeTest;
-
-namespace base {
-class MessageLoop;
-}
-
-namespace net {
-class HttpResponseHeaders;
-class HttpUserAgentSettings;
-class URLFetcher;
-}
-
-namespace syncer {
-
-class CancelationSignal;
-
-// A bridge between the syncer and Chromium HTTP layers.
-// Provides a way for the sync backend to use Chromium directly for HTTP
-// requests rather than depending on a third party provider (e.g libcurl).
-// This is a one-time use bridge. Create one for each request you want to make.
-// It is RefCountedThreadSafe because it can PostTask to the io loop, and thus
-// needs to stick around across context switches, etc.
-class SYNC_EXPORT_PRIVATE HttpBridge
- : public base::RefCountedThreadSafe<HttpBridge>,
- public HttpPostProviderInterface,
- public net::URLFetcherDelegate {
- public:
- // A request context used for HTTP requests bridged from the sync backend.
- // A bridged RequestContext has a dedicated in-memory cookie store and does
- // not use a cache. Thus the same type can be used for incognito mode.
- class RequestContext : public net::URLRequestContext {
- public:
- // |baseline_context| is used to obtain the accept-language
- // and proxy service information for bridged requests.
- // Typically |baseline_context| should be the net::URLRequestContext of the
- // currently active profile.
- RequestContext(
- net::URLRequestContext* baseline_context,
- const scoped_refptr<base::SingleThreadTaskRunner>&
- network_task_runner,
- const std::string& user_agent);
-
- // The destructor MUST be called on the IO thread.
- virtual ~RequestContext();
-
- private:
- net::URLRequestContext* const baseline_context_;
- const scoped_refptr<base::SingleThreadTaskRunner> network_task_runner_;
- scoped_ptr<net::HttpUserAgentSettings> http_user_agent_settings_;
-
- DISALLOW_COPY_AND_ASSIGN(RequestContext);
- };
-
- // Lazy-getter for RequestContext objects.
- class SYNC_EXPORT_PRIVATE RequestContextGetter
- : public net::URLRequestContextGetter {
- public:
- RequestContextGetter(
- net::URLRequestContextGetter* baseline_context_getter,
- const std::string& user_agent);
-
- // net::URLRequestContextGetter implementation.
- virtual net::URLRequestContext* GetURLRequestContext() OVERRIDE;
- virtual scoped_refptr<base::SingleThreadTaskRunner>
- GetNetworkTaskRunner() const OVERRIDE;
-
- protected:
- virtual ~RequestContextGetter();
-
- private:
- scoped_refptr<net::URLRequestContextGetter> baseline_context_getter_;
- const scoped_refptr<base::SingleThreadTaskRunner> network_task_runner_;
- // User agent to apply to the net::URLRequestContext.
- const std::string user_agent_;
-
- // Lazily initialized by GetURLRequestContext().
- scoped_ptr<RequestContext> context_;
-
- DISALLOW_COPY_AND_ASSIGN(RequestContextGetter);
- };
-
- HttpBridge(RequestContextGetter* context,
- const NetworkTimeUpdateCallback& network_time_update_callback);
-
- // HttpPostProvider implementation.
- virtual void SetExtraRequestHeaders(const char* headers) OVERRIDE;
- virtual void SetURL(const char* url, int port) OVERRIDE;
- virtual void SetPostPayload(const char* content_type, int content_length,
- const char* content) OVERRIDE;
- virtual bool MakeSynchronousPost(int* error_code,
- int* response_code) OVERRIDE;
- virtual void Abort() OVERRIDE;
-
- // WARNING: these response content methods are used to extract plain old data
- // and not null terminated strings, so you should make sure you have read
- // GetResponseContentLength() characters when using GetResponseContent. e.g
- // string r(b->GetResponseContent(), b->GetResponseContentLength()).
- virtual int GetResponseContentLength() const OVERRIDE;
- virtual const char* GetResponseContent() const OVERRIDE;
- virtual const std::string GetResponseHeaderValue(
- const std::string& name) const OVERRIDE;
-
- // net::URLFetcherDelegate implementation.
- virtual void OnURLFetchComplete(const net::URLFetcher* source) OVERRIDE;
-
- net::URLRequestContextGetter* GetRequestContextGetterForTest() const;
-
- protected:
- friend class base::RefCountedThreadSafe<HttpBridge>;
-
- virtual ~HttpBridge();
-
- // Protected virtual so the unit test can override to shunt network requests.
- virtual void MakeAsynchronousPost();
-
- private:
- friend class SyncHttpBridgeTest;
- friend class ::HttpBridgeTest;
-
- // Called on the IO loop to issue the network request. The extra level
- // of indirection is so that the unit test can override this behavior but we
- // still have a function to statically pass to PostTask.
- void CallMakeAsynchronousPost() { MakeAsynchronousPost(); }
-
- // Used to destroy a fetcher when the bridge is Abort()ed, to ensure that
- // a reference to |this| is held while flushing any pending fetch completion
- // callbacks coming from the IO thread en route to finally destroying the
- // fetcher.
- void DestroyURLFetcherOnIOThread(net::URLFetcher* fetcher);
-
- void UpdateNetworkTime();
-
- // The message loop of the thread we were created on. This is the thread that
- // will block on MakeSynchronousPost while the IO thread fetches data from
- // the network.
- // This should be the main syncer thread (SyncerThread) which is what blocks
- // on network IO through curl_easy_perform.
- base::MessageLoop* const created_on_loop_;
-
- // The URL to POST to.
- GURL url_for_request_;
-
- // POST payload information.
- std::string content_type_;
- std::string request_content_;
- std::string extra_headers_;
-
- // A waitable event we use to provide blocking semantics to
- // MakeSynchronousPost. We block created_on_loop_ while the IO loop fetches
- // network request.
- base::WaitableEvent http_post_completed_;
-
- struct URLFetchState {
- URLFetchState();
- ~URLFetchState();
- // Our hook into the network layer is a URLFetcher. USED ONLY ON THE IO
- // LOOP, so we can block created_on_loop_ while the fetch is in progress.
- // NOTE: This is not a scoped_ptr for a reason. It must be deleted on the
- // same thread that created it, which isn't the same thread |this| gets
- // deleted on. We must manually delete url_poster_ on the IO loop.
- net::URLFetcher* url_poster;
-
- // Start and finish time of request. Set immediately before sending
- // request and after receiving response.
- base::Time start_time;
- base::Time end_time;
-
- // Used to support 'Abort' functionality.
- bool aborted;
-
- // Cached response data.
- bool request_completed;
- bool request_succeeded;
- int http_response_code;
- int error_code;
- std::string response_content;
- scoped_refptr<net::HttpResponseHeaders> response_headers;
- };
-
- // This lock synchronizes use of state involved in the flow to fetch a URL
- // using URLFetcher, including |fetch_state_| and
- // |context_getter_for_request_| on any thread, for example, this flow needs
- // to be synchronized to gracefully clean up URLFetcher and return
- // appropriate values in |error_code|.
- mutable base::Lock fetch_state_lock_;
- URLFetchState fetch_state_;
-
- // Gets a customized net::URLRequestContext for bridged requests. See
- // RequestContext definition for details.
- scoped_refptr<RequestContextGetter> context_getter_for_request_;
-
- const scoped_refptr<base::SingleThreadTaskRunner> network_task_runner_;
-
- // Callback for updating network time.
- NetworkTimeUpdateCallback network_time_update_callback_;
-
- DISALLOW_COPY_AND_ASSIGN(HttpBridge);
-};
-
-class SYNC_EXPORT HttpBridgeFactory : public HttpPostProviderFactory,
- public CancelationObserver {
- public:
- HttpBridgeFactory(
- net::URLRequestContextGetter* baseline_context_getter,
- const NetworkTimeUpdateCallback& network_time_update_callback,
- CancelationSignal* cancelation_signal);
- virtual ~HttpBridgeFactory();
-
- // HttpPostProviderFactory:
- virtual void Init(const std::string& user_agent) OVERRIDE;
- virtual HttpPostProviderInterface* Create() OVERRIDE;
- virtual void Destroy(HttpPostProviderInterface* http) OVERRIDE;
-
- // CancelationObserver implementation:
- virtual void OnSignalReceived() OVERRIDE;
-
- private:
- // Protects |request_context_getter_| and |baseline_request_context_getter_|.
- base::Lock context_getter_lock_;
-
- // This request context is the starting point for the request_context_getter_
- // that we eventually use to make requests. During shutdown we must drop all
- // references to it before the ProfileSyncService's Shutdown() call is
- // complete.
- scoped_refptr<net::URLRequestContextGetter> baseline_request_context_getter_;
-
- // This request context is built on top of the baseline context and shares
- // common components. Takes a reference to the
- // baseline_request_context_getter_. It's mostly used on sync thread when
- // creating connection but is released as soon as possible during shutdown.
- // Protected by |context_getter_lock_|.
- scoped_refptr<HttpBridge::RequestContextGetter> request_context_getter_;
-
- NetworkTimeUpdateCallback network_time_update_callback_;
-
- CancelationSignal* const cancelation_signal_;
-
- DISALLOW_COPY_AND_ASSIGN(HttpBridgeFactory);
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_HTTP_BRIDGE_H_
diff --git a/chromium/sync/internal_api/public/http_bridge_network_resources.h b/chromium/sync/internal_api/public/http_bridge_network_resources.h
deleted file mode 100644
index 06d8284ba5c..00000000000
--- a/chromium/sync/internal_api/public/http_bridge_network_resources.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_HTTP_BRIDGE_NETWORK_RESOURCES_H_
-#define SYNC_INTERNAL_API_PUBLIC_HTTP_BRIDGE_NETWORK_RESOURCES_H_
-
-#include "base/memory/scoped_ptr.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/network_resources.h"
-#include "sync/internal_api/public/network_time_update_callback.h"
-
-namespace net {
-class URLRequestContextGetter;
-} // namespace net
-
-namespace syncer {
-
-class CancelationSignal;
-class HttpPostProviderFactory;
-
-class SYNC_EXPORT HttpBridgeNetworkResources : public NetworkResources {
- public:
- virtual ~HttpBridgeNetworkResources();
-
- // NetworkResources
- virtual scoped_ptr<HttpPostProviderFactory> GetHttpPostProviderFactory(
- net::URLRequestContextGetter* baseline_context_getter,
- const NetworkTimeUpdateCallback& network_time_update_callback,
- CancelationSignal* cancelation_signal) OVERRIDE;
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_HTTP_BRIDGE_NETWORK_RESOURCES_H_
diff --git a/chromium/sync/internal_api/public/http_post_provider_factory.h b/chromium/sync/internal_api/public/http_post_provider_factory.h
deleted file mode 100644
index 93466514ffb..00000000000
--- a/chromium/sync/internal_api/public/http_post_provider_factory.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_HTTP_POST_PROVIDER_FACTORY_H_
-#define SYNC_INTERNAL_API_PUBLIC_HTTP_POST_PROVIDER_FACTORY_H_
-
-#include <string>
-
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-
-class HttpPostProviderInterface;
-
-// A factory to create HttpPostProviders to hide details about the
-// implementations and dependencies.
-// A factory instance itself should be owned by whomever uses it to create
-// HttpPostProviders.
-class SYNC_EXPORT HttpPostProviderFactory {
- public:
- virtual ~HttpPostProviderFactory() {}
-
- virtual void Init(const std::string& user_agent) = 0;
-
- // Obtain a new HttpPostProviderInterface instance, owned by caller.
- virtual HttpPostProviderInterface* Create() = 0;
-
- // When the interface is no longer needed (ready to be cleaned up), clients
- // must call Destroy().
- // This allows actual HttpPostProvider subclass implementations to be
- // reference counted, which is useful if a particular implementation uses
- // multiple threads to serve network requests.
- virtual void Destroy(HttpPostProviderInterface* http) = 0;
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_HTTP_POST_PROVIDER_FACTORY_H_
diff --git a/chromium/sync/internal_api/public/http_post_provider_interface.h b/chromium/sync/internal_api/public/http_post_provider_interface.h
deleted file mode 100644
index 7e7ba3c965e..00000000000
--- a/chromium/sync/internal_api/public/http_post_provider_interface.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_HTTP_POST_PROVIDER_INTERFACE_H_
-#define SYNC_INTERNAL_API_PUBLIC_HTTP_POST_PROVIDER_INTERFACE_H_
-
-#include <string>
-
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-
-// An interface the embedding application (e.g. Chromium) implements to provide
-// required HTTP POST functionality to the syncer backend. This interface is
-// designed for one-time use. You create one, use it, and create another if you
-// want to make a subsequent POST.
-class SYNC_EXPORT_PRIVATE HttpPostProviderInterface {
- public:
- // Add additional headers to the request.
- virtual void SetExtraRequestHeaders(const char* headers) = 0;
-
- // Set the URL to POST to.
- virtual void SetURL(const char* url, int port) = 0;
-
- // Set the type, length and content of the POST payload.
- // |content_type| is a null-terminated MIME type specifier.
- // |content| is a data buffer; Do not interpret as a null-terminated string.
- // |content_length| is the total number of chars in |content|. It is used to
- // assign/copy |content| data.
- virtual void SetPostPayload(const char* content_type,
- int content_length,
- const char* content) = 0;
-
- // Returns true if the URL request succeeded. If the request failed,
- // error() may be non-zero and hence contain more information.
- virtual bool MakeSynchronousPost(int* error_code, int* response_code) = 0;
-
- // Get the length of the content returned in the HTTP response.
- // This does not count the trailing null-terminating character returned
- // by GetResponseContent, so it is analogous to calling string.length.
- virtual int GetResponseContentLength() const = 0;
-
- // Get the content returned in the HTTP response.
- // This is a null terminated string of characters.
- // Value should be copied.
- virtual const char* GetResponseContent() const = 0;
-
- // Get the value of a header returned in the HTTP response.
- // If the header is not present, returns the empty string.
- virtual const std::string GetResponseHeaderValue(
- const std::string& name) const = 0;
-
- // Abandon any pending POST and unblock caller in MakeSynchronousPost.
- // This must be safe to call from any thread.
- virtual void Abort() = 0;
-
- protected:
- virtual ~HttpPostProviderInterface() {}
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_HTTP_POST_PROVIDER_INTERFACE_H_
diff --git a/chromium/sync/internal_api/public/internal_components_factory.h b/chromium/sync/internal_api/public/internal_components_factory.h
deleted file mode 100644
index 616457d98a2..00000000000
--- a/chromium/sync/internal_api/public/internal_components_factory.h
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// InternalComponentsFactory exists so that tests can override creation of
-// components used by the SyncManager that are not exposed across the sync
-// API boundary.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_INTERNAL_COMPONENTS_FACTORY_H_
-#define SYNC_INTERNAL_API_PUBLIC_INTERNAL_COMPONENTS_FACTORY_H_
-
-#include <string>
-#include <vector>
-
-#include "base/files/file_path.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/engine/model_safe_worker.h"
-
-namespace syncer {
-
-class ExtensionsActivity;
-class ServerConnectionManager;
-class SyncEngineEventListener;
-class CancelationSignal;
-class SyncScheduler;
-class TrafficRecorder;
-
-namespace sessions {
-class DebugInfoGetter;
-class SyncSessionContext;
-}
-
-namespace syncable {
-class Directory;
-class DirectoryBackingStore;
-}
-
-class SYNC_EXPORT InternalComponentsFactory {
- public:
- enum EncryptionMethod {
- ENCRYPTION_LEGACY,
- // Option to enable support for keystore key based encryption.
- ENCRYPTION_KEYSTORE
- };
-
- enum BackoffOverride {
- BACKOFF_NORMAL,
- // Use this value for integration testing to avoid long delays /
- // timing out tests. Uses kInitialBackoffShortRetrySeconds (see
- // polling_constants.h) for all initial retries.
- BACKOFF_SHORT_INITIAL_RETRY_OVERRIDE
- };
-
- enum PreCommitUpdatesPolicy {
- // By default, the server will enable or disable this experiment through the
- // sync protocol's experiments data type.
- SERVER_CONTROLLED_PRE_COMMIT_UPDATE_AVOIANCE,
-
- // This flag overrides the server's decision and enables the pre-commit
- // update avoidance experiment.
- FORCE_ENABLE_PRE_COMMIT_UPDATE_AVOIDANCE,
- };
-
- // Configuration options for internal components. This struct is expected
- // to grow and shrink over time with transient features / experiments,
- // roughly following command line flags in chrome. Implementations of
- // InternalComponentsFactory can use this information to build components
- // with appropriate bells and whistles.
- struct Switches {
- EncryptionMethod encryption_method;
- BackoffOverride backoff_override;
- PreCommitUpdatesPolicy pre_commit_updates_policy;
- };
-
- virtual ~InternalComponentsFactory() {}
-
- virtual scoped_ptr<SyncScheduler> BuildScheduler(
- const std::string& name,
- sessions::SyncSessionContext* context,
- CancelationSignal* cancelation_signal) = 0;
-
- virtual scoped_ptr<sessions::SyncSessionContext> BuildContext(
- ServerConnectionManager* connection_manager,
- syncable::Directory* directory,
- const std::vector<ModelSafeWorker*>& workers,
- ExtensionsActivity* extensions_activity,
- const std::vector<SyncEngineEventListener*>& listeners,
- sessions::DebugInfoGetter* debug_info_getter,
- TrafficRecorder* traffic_recorder,
- const std::string& invalidator_client_id) = 0;
-
- virtual scoped_ptr<syncable::DirectoryBackingStore>
- BuildDirectoryBackingStore(
- const std::string& dir_name,
- const base::FilePath& backing_filepath) = 0;
-
- // Returns the Switches struct that this object is using as configuration, if
- // the implementation is making use of one.
- virtual Switches GetSwitches() const = 0;
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_INTERNAL_COMPONENTS_FACTORY_H_
diff --git a/chromium/sync/internal_api/public/internal_components_factory_impl.h b/chromium/sync/internal_api/public/internal_components_factory_impl.h
deleted file mode 100644
index 7b5c6978ed5..00000000000
--- a/chromium/sync/internal_api/public/internal_components_factory_impl.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// An InternalComponentsFactory implementation designed for real production /
-// normal use.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_INTERNAL_COMPONENTS_FACTORY_IMPL_H_
-#define SYNC_INTERNAL_API_PUBLIC_INTERNAL_COMPONENTS_FACTORY_IMPL_H_
-
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/internal_components_factory.h"
-
-namespace syncer {
-
-class SYNC_EXPORT InternalComponentsFactoryImpl
- : public InternalComponentsFactory {
- public:
- InternalComponentsFactoryImpl(const Switches& switches);
- virtual ~InternalComponentsFactoryImpl();
-
- virtual scoped_ptr<SyncScheduler> BuildScheduler(
- const std::string& name,
- sessions::SyncSessionContext* context,
- syncer::CancelationSignal* cancelation_signal) OVERRIDE;
-
- virtual scoped_ptr<sessions::SyncSessionContext> BuildContext(
- ServerConnectionManager* connection_manager,
- syncable::Directory* directory,
- const std::vector<ModelSafeWorker*>& workers,
- ExtensionsActivity* extensions_activity,
- const std::vector<SyncEngineEventListener*>& listeners,
- sessions::DebugInfoGetter* debug_info_getter,
- TrafficRecorder* traffic_recorder,
- const std::string& invalidator_client_id) OVERRIDE;
-
- virtual scoped_ptr<syncable::DirectoryBackingStore>
- BuildDirectoryBackingStore(
- const std::string& dir_name,
- const base::FilePath& backing_filepath) OVERRIDE;
-
- virtual Switches GetSwitches() const OVERRIDE;
-
- private:
- const Switches switches_;
- DISALLOW_COPY_AND_ASSIGN(InternalComponentsFactoryImpl);
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_INTERNAL_COMPONENTS_FACTORY_IMPL_H_
diff --git a/chromium/sync/internal_api/public/network_resources.h b/chromium/sync/internal_api/public/network_resources.h
deleted file mode 100644
index 448188c61a4..00000000000
--- a/chromium/sync/internal_api/public/network_resources.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_NETWORK_RESOURCES_H_
-#define SYNC_INTERNAL_API_PUBLIC_NETWORK_RESOURCES_H_
-
-#include "base/memory/scoped_ptr.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/network_time_update_callback.h"
-
-namespace net {
-class URLRequestContextGetter;
-} // namespace net
-
-namespace syncer {
-
-class CancelationSignal;
-class HttpPostProviderFactory;
-
-class SYNC_EXPORT NetworkResources {
- public:
- virtual ~NetworkResources() {}
-
- virtual scoped_ptr<HttpPostProviderFactory> GetHttpPostProviderFactory(
- net::URLRequestContextGetter* baseline_context_getter,
- const NetworkTimeUpdateCallback& network_time_update_callback,
- CancelationSignal* cancelation_signal) = 0;
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_NETWORK_RESOURCES_H_
diff --git a/chromium/sync/internal_api/public/network_time_update_callback.h b/chromium/sync/internal_api/public/network_time_update_callback.h
deleted file mode 100644
index 1efa2414982..00000000000
--- a/chromium/sync/internal_api/public/network_time_update_callback.h
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_NETWORK_TIME_UPDATE_CALLBACK_H_
-#define SYNC_INTERNAL_API_PUBLIC_NETWORK_TIME_UPDATE_CALLBACK_H_
-
-#include "base/callback.h"
-#include "base/time/time.h"
-
-namespace syncer {
-
-// TODO(pvalenzuela): Avoid duplication of this typedef by defining it in a
-// common location. This is duplicated here because its original definition in
-// NetworkTimeTracker cannot be depended on.
-//
-// Callback for updating the network time.
-// Params:
-// const base::Time& network_time - the new network time.
-// const base::TimeDelta& resolution - how precise the reading is.
-// const base::TimeDelta& latency - the http request's latency.
-typedef base::Callback<void(const base::Time&,
- const base::TimeDelta&,
- const base::TimeDelta&)> NetworkTimeUpdateCallback;
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_NETWORK_TIME_UPDATE_CALLBACK_H_
diff --git a/chromium/sync/internal_api/public/read_node.h b/chromium/sync/internal_api/public/read_node.h
deleted file mode 100644
index 50e7639e29b..00000000000
--- a/chromium/sync/internal_api/public/read_node.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_READ_NODE_H_
-#define SYNC_INTERNAL_API_PUBLIC_READ_NODE_H_
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/base_node.h"
-
-namespace syncer {
-
-// ReadNode wraps a syncable::Entry to provide the functionality of a
-// read-only BaseNode.
-class SYNC_EXPORT ReadNode : public BaseNode {
- public:
- // Create an unpopulated ReadNode on the given transaction. Call some flavor
- // of Init to populate the ReadNode with a database entry.
- explicit ReadNode(const BaseTransaction* transaction);
- virtual ~ReadNode();
-
- // A client must use one (and only one) of the following Init variants to
- // populate the node.
-
- // BaseNode implementation.
- virtual InitByLookupResult InitByIdLookup(int64 id) OVERRIDE;
- virtual InitByLookupResult InitByClientTagLookup(
- ModelType model_type,
- const std::string& tag) OVERRIDE;
-
- // There is always a root node, so this can't fail. The root node is
- // never mutable, so root lookup is only possible on a ReadNode.
- void InitByRootLookup();
-
- // Each server-created permanent node is tagged with a unique string.
- // Look up the node with the particular tag. If it does not exist,
- // return false.
- InitByLookupResult InitByTagLookup(const std::string& tag);
-
- // Implementation of BaseNode's abstract virtual accessors.
- virtual const syncable::Entry* GetEntry() const OVERRIDE;
- virtual const BaseTransaction* GetTransaction() const OVERRIDE;
-
- protected:
- ReadNode();
-
- private:
- void* operator new(size_t size); // Node is meant for stack use only.
-
- // The underlying syncable object which this class wraps.
- syncable::Entry* entry_;
-
- // The sync API transaction that is the parent of this node.
- const BaseTransaction* transaction_;
-
- DISALLOW_COPY_AND_ASSIGN(ReadNode);
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_READ_NODE_H_
diff --git a/chromium/sync/internal_api/public/read_transaction.h b/chromium/sync/internal_api/public/read_transaction.h
deleted file mode 100644
index 02e2633d8ef..00000000000
--- a/chromium/sync/internal_api/public/read_transaction.h
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_READ_TRANSACTION_H_
-#define SYNC_INTERNAL_API_PUBLIC_READ_TRANSACTION_H_
-
-#include "base/compiler_specific.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base_transaction.h"
-
-namespace tracked_objects {
-class Location;
-} // namespace tracked_objects
-
-namespace syncer {
-
-struct UserShare;
-
-// Sync API's ReadTransaction is a read-only BaseTransaction. It wraps
-// a syncable::ReadTransaction.
-class SYNC_EXPORT ReadTransaction : public BaseTransaction {
- public:
- // Start a new read-only transaction on the specified repository.
- ReadTransaction(const tracked_objects::Location& from_here,
- UserShare* share);
-
- // Resume the middle of a transaction. Will not close transaction.
- ReadTransaction(UserShare* share, syncable::BaseTransaction* trans);
-
- virtual ~ReadTransaction();
-
- // BaseTransaction override.
- virtual syncable::BaseTransaction* GetWrappedTrans() const OVERRIDE;
-
- // Return |transaction_version| of |type| stored in sync directory's
- // persisted info.
- int64 GetModelVersion(ModelType type);
-
- private:
- void* operator new(size_t size); // Transaction is meant for stack use only.
-
- // The underlying syncable object which this class wraps.
- syncable::BaseTransaction* transaction_;
- bool close_transaction_;
-
- DISALLOW_COPY_AND_ASSIGN(ReadTransaction);
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_READ_TRANSACTION_H_
diff --git a/chromium/sync/internal_api/public/sessions/model_neutral_state.cc b/chromium/sync/internal_api/public/sessions/model_neutral_state.cc
deleted file mode 100644
index fa2b0192ff1..00000000000
--- a/chromium/sync/internal_api/public/sessions/model_neutral_state.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/sessions/model_neutral_state.h"
-
-namespace syncer {
-namespace sessions {
-
-ModelNeutralState::ModelNeutralState()
- : num_successful_commits(0),
- num_successful_bookmark_commits(0),
- num_updates_downloaded_total(0),
- num_tombstone_updates_downloaded_total(0),
- num_reflected_updates_downloaded_total(0),
- num_updates_applied(0),
- num_encryption_conflicts(0),
- num_server_conflicts(0),
- num_hierarchy_conflicts(0),
- num_local_overwrites(0),
- num_server_overwrites(0),
- last_get_key_result(UNSET),
- last_download_updates_result(UNSET),
- commit_result(UNSET),
- items_committed(false),
- num_server_changes_remaining(0) {
-}
-
-ModelNeutralState::~ModelNeutralState() {}
-
-bool HasSyncerError(const ModelNeutralState& state) {
- const bool get_key_error = SyncerErrorIsError(state.last_get_key_result);
- const bool download_updates_error =
- SyncerErrorIsError(state.last_download_updates_result);
- const bool commit_error = SyncerErrorIsError(state.commit_result);
- return get_key_error || download_updates_error || commit_error;
-}
-
-} // namespace sessions
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/sessions/model_neutral_state.h b/chromium/sync/internal_api/public/sessions/model_neutral_state.h
deleted file mode 100644
index 4979d3ad101..00000000000
--- a/chromium/sync/internal_api/public/sessions/model_neutral_state.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SESSIONS_MODEL_NEUTRAL_STATE_H
-#define SYNC_SESSIONS_MODEL_NEUTRAL_STATE_H
-
-#include "base/basictypes.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/util/syncer_error.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/protocol/sync_protocol_error.h"
-
-namespace syncer {
-namespace sessions {
-
-// Grouping of all state that applies to all model types. Note that some
-// components of the global grouping can internally implement finer grained
-// scope control, but the top level entity is still a singleton with respect to
-// model types.
-struct SYNC_EXPORT ModelNeutralState {
- ModelNeutralState();
- ~ModelNeutralState();
-
- // The set of types for which commits were sent to the server.
- ModelTypeSet commit_request_types;
-
- int num_successful_commits;
-
- // This is needed for monitoring extensions activity.
- int num_successful_bookmark_commits;
-
- // Download event counters.
- int num_updates_downloaded_total;
- int num_tombstone_updates_downloaded_total;
- int num_reflected_updates_downloaded_total;
-
- // If the syncer encountered a MIGRATION_DONE code, these are the types that
- // the client must now "migrate", by purging and re-downloading all updates.
- ModelTypeSet types_needing_local_migration;
-
- // Update application and conflicts.
- int num_updates_applied;
- int num_encryption_conflicts;
- int num_server_conflicts;
- int num_hierarchy_conflicts;
-
- // Overwrites due to conflict resolution counters.
- int num_local_overwrites;
- int num_server_overwrites;
-
- // Any protocol errors that we received during this sync session.
- SyncProtocolError sync_protocol_error;
-
- // Records the most recent results of GetKey, PostCommit and GetUpdates
- // commands.
- SyncerError last_get_key_result;
- SyncerError last_download_updates_result;
- SyncerError commit_result;
-
- // Set to true by PostCommitMessageCommand if any commits were successful.
- bool items_committed;
-
- // Number of changes remaining, according to the server.
- // Take it as an estimate unless it's value is zero, in which case there
- // really is nothing more to download.
- int64 num_server_changes_remaining;
-};
-
-bool HasSyncerError(const ModelNeutralState& state);
-
-} // namespace sessions
-} // namespace syncer
-
-#endif // SYNC_SESSIONS_MODEL_NEUTRAL_STATE_H_
diff --git a/chromium/sync/internal_api/public/sessions/sync_session_snapshot.cc b/chromium/sync/internal_api/public/sessions/sync_session_snapshot.cc
deleted file mode 100644
index 97bc094ddda..00000000000
--- a/chromium/sync/internal_api/public/sessions/sync_session_snapshot.cc
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/sessions/sync_session_snapshot.h"
-
-#include "base/json/json_writer.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/values.h"
-#include "sync/protocol/proto_enum_conversions.h"
-
-namespace syncer {
-namespace sessions {
-
-SyncSessionSnapshot::SyncSessionSnapshot()
- : is_silenced_(false),
- num_encryption_conflicts_(0),
- num_hierarchy_conflicts_(0),
- num_server_conflicts_(0),
- notifications_enabled_(false),
- num_entries_(0),
- num_entries_by_type_(MODEL_TYPE_COUNT, 0),
- num_to_delete_entries_by_type_(MODEL_TYPE_COUNT, 0),
- is_initialized_(false) {
-}
-
-SyncSessionSnapshot::SyncSessionSnapshot(
- const ModelNeutralState& model_neutral_state,
- const ProgressMarkerMap& download_progress_markers,
- bool is_silenced,
- int num_encryption_conflicts,
- int num_hierarchy_conflicts,
- int num_server_conflicts,
- bool notifications_enabled,
- size_t num_entries,
- base::Time sync_start_time,
- const std::vector<int>& num_entries_by_type,
- const std::vector<int>& num_to_delete_entries_by_type,
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource legacy_updates_source)
- : model_neutral_state_(model_neutral_state),
- download_progress_markers_(download_progress_markers),
- is_silenced_(is_silenced),
- num_encryption_conflicts_(num_encryption_conflicts),
- num_hierarchy_conflicts_(num_hierarchy_conflicts),
- num_server_conflicts_(num_server_conflicts),
- notifications_enabled_(notifications_enabled),
- num_entries_(num_entries),
- sync_start_time_(sync_start_time),
- num_entries_by_type_(num_entries_by_type),
- num_to_delete_entries_by_type_(num_to_delete_entries_by_type),
- legacy_updates_source_(legacy_updates_source),
- is_initialized_(true) {
-}
-
-SyncSessionSnapshot::~SyncSessionSnapshot() {}
-
-base::DictionaryValue* SyncSessionSnapshot::ToValue() const {
- scoped_ptr<base::DictionaryValue> value(new base::DictionaryValue());
- value->SetInteger("numSuccessfulCommits",
- model_neutral_state_.num_successful_commits);
- value->SetInteger("numSuccessfulBookmarkCommits",
- model_neutral_state_.num_successful_bookmark_commits);
- value->SetInteger("numUpdatesDownloadedTotal",
- model_neutral_state_.num_updates_downloaded_total);
- value->SetInteger("numTombstoneUpdatesDownloadedTotal",
- model_neutral_state_.num_tombstone_updates_downloaded_total);
- value->SetInteger("numReflectedUpdatesDownloadedTotal",
- model_neutral_state_.num_reflected_updates_downloaded_total);
- value->SetInteger("numLocalOverwrites",
- model_neutral_state_.num_local_overwrites);
- value->SetInteger("numServerOverwrites",
- model_neutral_state_.num_server_overwrites);
- value->SetInteger(
- "numServerChangesRemaining",
- static_cast<int>(model_neutral_state_.num_server_changes_remaining));
- value->Set("downloadProgressMarkers",
- ProgressMarkerMapToValue(download_progress_markers_).release());
- value->SetBoolean("isSilenced", is_silenced_);
- // We don't care too much if we lose precision here, also.
- value->SetInteger("numEncryptionConflicts",
- num_encryption_conflicts_);
- value->SetInteger("numHierarchyConflicts",
- num_hierarchy_conflicts_);
- value->SetInteger("numServerConflicts",
- num_server_conflicts_);
- value->SetInteger("numEntries", num_entries_);
- value->SetString("legacySource",
- GetUpdatesSourceString(legacy_updates_source_));
- value->SetBoolean("notificationsEnabled", notifications_enabled_);
-
- scoped_ptr<base::DictionaryValue> counter_entries(
- new base::DictionaryValue());
- for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; i++) {
- scoped_ptr<base::DictionaryValue> type_entries(new base::DictionaryValue());
- type_entries->SetInteger("numEntries", num_entries_by_type_[i]);
- type_entries->SetInteger("numToDeleteEntries",
- num_to_delete_entries_by_type_[i]);
-
- const std::string model_type = ModelTypeToString(static_cast<ModelType>(i));
- counter_entries->Set(model_type, type_entries.release());
- }
- value->Set("counter_entries", counter_entries.release());
- return value.release();
-}
-
-std::string SyncSessionSnapshot::ToString() const {
- scoped_ptr<base::DictionaryValue> value(ToValue());
- std::string json;
- base::JSONWriter::WriteWithOptions(value.get(),
- base::JSONWriter::OPTIONS_PRETTY_PRINT,
- &json);
- return json;
-}
-
-int64 SyncSessionSnapshot::num_server_changes_remaining() const {
- return model_neutral_state().num_server_changes_remaining;
-}
-
-const ProgressMarkerMap&
- SyncSessionSnapshot::download_progress_markers() const {
- return download_progress_markers_;
-}
-
-bool SyncSessionSnapshot::is_silenced() const {
- return is_silenced_;
-}
-
-int SyncSessionSnapshot::num_encryption_conflicts() const {
- return num_encryption_conflicts_;
-}
-
-int SyncSessionSnapshot::num_hierarchy_conflicts() const {
- return num_hierarchy_conflicts_;
-}
-
-int SyncSessionSnapshot::num_server_conflicts() const {
- return num_server_conflicts_;
-}
-
-bool SyncSessionSnapshot::notifications_enabled() const {
- return notifications_enabled_;
-}
-
-size_t SyncSessionSnapshot::num_entries() const {
- return num_entries_;
-}
-
-base::Time SyncSessionSnapshot::sync_start_time() const {
- return sync_start_time_;
-}
-
-bool SyncSessionSnapshot::is_initialized() const {
- return is_initialized_;
-}
-
-const std::vector<int>& SyncSessionSnapshot::num_entries_by_type() const {
- return num_entries_by_type_;
-}
-
-const std::vector<int>&
-SyncSessionSnapshot::num_to_delete_entries_by_type() const {
- return num_to_delete_entries_by_type_;
-}
-
-sync_pb::GetUpdatesCallerInfo::GetUpdatesSource
-SyncSessionSnapshot::legacy_updates_source() const {
- return legacy_updates_source_;
-}
-
-} // namespace sessions
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/sessions/sync_session_snapshot.h b/chromium/sync/internal_api/public/sessions/sync_session_snapshot.h
deleted file mode 100644
index 97a97aa5965..00000000000
--- a/chromium/sync/internal_api/public/sessions/sync_session_snapshot.h
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_SESSIONS_SYNC_SESSION_SNAPSHOT_H_
-#define SYNC_INTERNAL_API_PUBLIC_SESSIONS_SYNC_SESSION_SNAPSHOT_H_
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/time/time.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/base/progress_marker_map.h"
-#include "sync/internal_api/public/sessions/model_neutral_state.h"
-
-namespace base {
-class DictionaryValue;
-}
-
-namespace syncer {
-namespace sessions {
-
-// An immutable snapshot of state from a SyncSession. Convenient to use as
-// part of notifications as it is inherently thread-safe.
-// TODO(zea): if copying this all over the place starts getting expensive,
-// consider passing around immutable references instead of values.
-// Default copy and assign welcome.
-class SYNC_EXPORT SyncSessionSnapshot {
- public:
- SyncSessionSnapshot();
- SyncSessionSnapshot(
- const ModelNeutralState& model_neutral_state,
- const ProgressMarkerMap& download_progress_markers,
- bool is_silenced,
- int num_encryption_conflicts,
- int num_hierarchy_conflicts,
- int num_server_conflicts,
- bool notifications_enabled,
- size_t num_entries,
- base::Time sync_start_time,
- const std::vector<int>& num_entries_by_type,
- const std::vector<int>& num_to_delete_entries_by_type,
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource legacy_updates_source);
- ~SyncSessionSnapshot();
-
- // Caller takes ownership of the returned dictionary.
- base::DictionaryValue* ToValue() const;
-
- std::string ToString() const;
-
- ModelNeutralState model_neutral_state() const {
- return model_neutral_state_;
- }
- int64 num_server_changes_remaining() const;
- const ProgressMarkerMap& download_progress_markers() const;
- bool is_silenced() const;
- int num_encryption_conflicts() const;
- int num_hierarchy_conflicts() const;
- int num_server_conflicts() const;
- bool notifications_enabled() const;
- size_t num_entries() const;
- base::Time sync_start_time() const;
- const std::vector<int>& num_entries_by_type() const;
- const std::vector<int>& num_to_delete_entries_by_type() const;
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource legacy_updates_source() const;
-
- // Set iff this snapshot was not built using the default constructor.
- bool is_initialized() const;
-
- private:
- ModelNeutralState model_neutral_state_;
- ProgressMarkerMap download_progress_markers_;
- bool is_silenced_;
- int num_encryption_conflicts_;
- int num_hierarchy_conflicts_;
- int num_server_conflicts_;
- bool notifications_enabled_;
- size_t num_entries_;
- base::Time sync_start_time_;
-
- std::vector<int> num_entries_by_type_;
- std::vector<int> num_to_delete_entries_by_type_;
-
- // This enum value used to be an important part of the sync protocol, but is
- // now deprecated. We continue to use it in the snapshot because there is
- // still some value in displaying it on the about:sync page.
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource legacy_updates_source_;
-
- bool is_initialized_;
-};
-
-} // namespace sessions
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_SESSIONS_SYNC_SESSION_SNAPSHOT_H_
diff --git a/chromium/sync/internal_api/public/sessions/sync_session_snapshot_unittest.cc b/chromium/sync/internal_api/public/sessions/sync_session_snapshot_unittest.cc
deleted file mode 100644
index 881ab0193ef..00000000000
--- a/chromium/sync/internal_api/public/sessions/sync_session_snapshot_unittest.cc
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/sessions/sync_session_snapshot.h"
-
-#include "base/memory/scoped_ptr.h"
-#include "base/test/values_test_util.h"
-#include "base/values.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace sessions {
-namespace {
-
-using base::ExpectDictBooleanValue;
-using base::ExpectDictDictionaryValue;
-using base::ExpectDictIntegerValue;
-using base::ExpectDictListValue;
-using base::ExpectDictStringValue;
-
-class SyncSessionSnapshotTest : public testing::Test {};
-
-TEST_F(SyncSessionSnapshotTest, SyncSessionSnapshotToValue) {
- ModelNeutralState model_neutral;
- model_neutral.num_server_changes_remaining = 105;
- model_neutral.num_successful_commits = 5;
- model_neutral.num_successful_bookmark_commits = 10;
- model_neutral.num_updates_downloaded_total = 100;
- model_neutral.num_tombstone_updates_downloaded_total = 200;
- model_neutral.num_reflected_updates_downloaded_total = 50;
- model_neutral.num_local_overwrites = 15;
- model_neutral.num_server_overwrites = 18;
-
- ProgressMarkerMap download_progress_markers;
- download_progress_markers[BOOKMARKS] = "test";
- download_progress_markers[APPS] = "apps";
- scoped_ptr<base::DictionaryValue> expected_download_progress_markers_value(
- ProgressMarkerMapToValue(download_progress_markers));
-
- const bool kIsSilenced = true;
- const int kNumEncryptionConflicts = 1054;
- const int kNumHierarchyConflicts = 1055;
- const int kNumServerConflicts = 1057;
-
- SyncSessionSnapshot snapshot(model_neutral,
- download_progress_markers,
- kIsSilenced,
- kNumEncryptionConflicts,
- kNumHierarchyConflicts,
- kNumServerConflicts,
- false,
- 0,
- base::Time::Now(),
- std::vector<int>(MODEL_TYPE_COUNT,0),
- std::vector<int>(MODEL_TYPE_COUNT, 0),
- sync_pb::GetUpdatesCallerInfo::UNKNOWN);
- scoped_ptr<base::DictionaryValue> value(snapshot.ToValue());
- EXPECT_EQ(17u, value->size());
- ExpectDictIntegerValue(model_neutral.num_successful_commits,
- *value, "numSuccessfulCommits");
- ExpectDictIntegerValue(model_neutral.num_successful_bookmark_commits,
- *value, "numSuccessfulBookmarkCommits");
- ExpectDictIntegerValue(model_neutral.num_updates_downloaded_total,
- *value, "numUpdatesDownloadedTotal");
- ExpectDictIntegerValue(model_neutral.num_tombstone_updates_downloaded_total,
- *value, "numTombstoneUpdatesDownloadedTotal");
- ExpectDictIntegerValue(model_neutral.num_reflected_updates_downloaded_total,
- *value, "numReflectedUpdatesDownloadedTotal");
- ExpectDictIntegerValue(model_neutral.num_local_overwrites,
- *value, "numLocalOverwrites");
- ExpectDictIntegerValue(model_neutral.num_server_overwrites,
- *value, "numServerOverwrites");
- ExpectDictIntegerValue(model_neutral.num_server_changes_remaining,
- *value, "numServerChangesRemaining");
- ExpectDictDictionaryValue(*expected_download_progress_markers_value,
- *value, "downloadProgressMarkers");
- ExpectDictBooleanValue(kIsSilenced, *value, "isSilenced");
- ExpectDictIntegerValue(kNumEncryptionConflicts, *value,
- "numEncryptionConflicts");
- ExpectDictIntegerValue(kNumHierarchyConflicts, *value,
- "numHierarchyConflicts");
- ExpectDictIntegerValue(kNumServerConflicts, *value,
- "numServerConflicts");
- ExpectDictBooleanValue(false, *value, "notificationsEnabled");
-}
-
-} // namespace
-} // namespace sessions
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/sync_encryption_handler.cc b/chromium/sync/internal_api/public/sync_encryption_handler.cc
deleted file mode 100644
index e967600d65d..00000000000
--- a/chromium/sync/internal_api/public/sync_encryption_handler.cc
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/sync_encryption_handler.h"
-
-namespace syncer {
-
-SyncEncryptionHandler::Observer::Observer() {}
-SyncEncryptionHandler::Observer::~Observer() {}
-
-SyncEncryptionHandler::SyncEncryptionHandler() {}
-SyncEncryptionHandler::~SyncEncryptionHandler() {}
-
-// Static.
-ModelTypeSet SyncEncryptionHandler::SensitiveTypes() {
- // It has its own encryption scheme, but we include it anyway.
- ModelTypeSet types;
- types.Put(PASSWORDS);
- return types;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/sync_encryption_handler.h b/chromium/sync/internal_api/public/sync_encryption_handler.h
deleted file mode 100644
index cd1cfc04f02..00000000000
--- a/chromium/sync/internal_api/public/sync_encryption_handler.h
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_SYNC_ENCRYPTION_HANDLER_H_
-#define SYNC_INTERNAL_API_PUBLIC_SYNC_ENCRYPTION_HANDLER_H_
-
-#include <string>
-
-#include "base/time/time.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-
-namespace sync_pb {
-class EncryptedData;
-}
-
-namespace syncer {
-
-class Cryptographer;
-
-// Reasons due to which Cryptographer might require a passphrase.
-enum PassphraseRequiredReason {
- REASON_PASSPHRASE_NOT_REQUIRED = 0, // Initial value.
- REASON_ENCRYPTION = 1, // The cryptographer requires a
- // passphrase for its first attempt at
- // encryption. Happens only during
- // migration or upgrade.
- REASON_DECRYPTION = 2, // The cryptographer requires a
- // passphrase for its first attempt at
- // decryption.
-};
-
-// The different states for the encryption passphrase. These control if and how
-// the user should be prompted for a decryption passphrase.
-enum PassphraseType {
- IMPLICIT_PASSPHRASE = 0, // GAIA-based passphrase (deprecated).
- KEYSTORE_PASSPHRASE = 1, // Keystore passphrase.
- FROZEN_IMPLICIT_PASSPHRASE = 2, // Frozen GAIA passphrase.
- CUSTOM_PASSPHRASE = 3, // User-provided passphrase.
-};
-
-// Enum used to distinguish which bootstrap encryption token is being updated.
-enum BootstrapTokenType {
- PASSPHRASE_BOOTSTRAP_TOKEN,
- KEYSTORE_BOOTSTRAP_TOKEN
-};
-
-// Sync's encryption handler. Handles tracking encrypted types, ensuring the
-// cryptographer encrypts with the proper key and has the most recent keybag,
-// and keeps the nigori node up to date.
-// Implementations of this class must be assumed to be non-thread-safe. All
-// methods must be invoked on the sync thread.
-class SYNC_EXPORT SyncEncryptionHandler {
- public:
- // All Observer methods are done synchronously from within a transaction and
- // on the sync thread.
- class SYNC_EXPORT Observer {
- public:
- Observer();
-
- // Called when user interaction is required to obtain a valid passphrase.
- // - If the passphrase is required for encryption, |reason| will be
- // REASON_ENCRYPTION.
- // - If the passphrase is required for the decryption of data that has
- // already been encrypted, |reason| will be REASON_DECRYPTION.
- // - If the passphrase is required because decryption failed, and a new
- // passphrase is required, |reason| will be REASON_SET_PASSPHRASE_FAILED.
- //
- // |pending_keys| is a copy of the cryptographer's pending keys, that may be
- // cached by the frontend for subsequent use by the UI.
- virtual void OnPassphraseRequired(
- PassphraseRequiredReason reason,
- const sync_pb::EncryptedData& pending_keys) = 0;
- // Called when the passphrase provided by the user has been accepted and is
- // now used to encrypt sync data.
-
- virtual void OnPassphraseAccepted() = 0;
- // |bootstrap_token| is an opaque base64 encoded representation of the key
- // generated by the current passphrase, and is provided to the observer for
- // persistence purposes and use in a future initialization of sync (e.g.
- // after restart). The boostrap token will always be derived from the most
- // recent GAIA password (for accounts with implicit passphrases), even if
- // the data is still encrypted with an older GAIA password. For accounts
- // with explicit passphrases, it will be the most recently seen custom
- // passphrase.
- virtual void OnBootstrapTokenUpdated(
- const std::string& bootstrap_token,
- BootstrapTokenType type) = 0;
-
- // Called when the set of encrypted types or the encrypt
- // everything flag has been changed. Note that encryption isn't
- // complete until the OnEncryptionComplete() notification has been
- // sent (see below).
- //
- // |encrypted_types| will always be a superset of
- // Cryptographer::SensitiveTypes(). If |encrypt_everything| is
- // true, |encrypted_types| will be the set of all known types.
- //
- // Until this function is called, observers can assume that the
- // set of encrypted types is Cryptographer::SensitiveTypes() and
- // that the encrypt everything flag is false.
- virtual void OnEncryptedTypesChanged(
- ModelTypeSet encrypted_types,
- bool encrypt_everything) = 0;
-
- // Called after we finish encrypting the current set of encrypted
- // types.
- virtual void OnEncryptionComplete() = 0;
-
- // The cryptographer has been updated. Listeners should check that their
- // own state matches the cryptographer.
- // Used primarily for debugging.
- virtual void OnCryptographerStateChanged(Cryptographer* cryptographer) = 0;
-
- // The passphrase type has changed. |type| is the new type,
- // |passphrase_time| is the time the passphrase was set (unset if |type|
- // is KEYSTORE_PASSPHRASE or the passphrase was set before we started
- // recording the time).
- virtual void OnPassphraseTypeChanged(PassphraseType type,
- base::Time passphrase_time) = 0;
-
- protected:
- virtual ~Observer();
- };
-
- SyncEncryptionHandler();
- virtual ~SyncEncryptionHandler();
-
- // Add/Remove SyncEncryptionHandler::Observers.
- virtual void AddObserver(Observer* observer) = 0;
- virtual void RemoveObserver(Observer* observer) = 0;
-
- // Reads the nigori node, updates internal state as needed, and, if an
- // empty/stale nigori node is detected, overwrites the existing
- // nigori node. Upon completion, if the cryptographer is still ready
- // attempts to re-encrypt all sync data.
- // Note: This method is expensive (it iterates through all encrypted types),
- // so should only be used sparingly (e.g. on startup).
- virtual void Init() = 0;
-
- // Attempts to re-encrypt encrypted data types using the passphrase provided.
- // Notifies observers of the result of the operation via OnPassphraseAccepted
- // or OnPassphraseRequired, updates the nigori node, and does re-encryption as
- // appropriate. If an explicit password has been set previously, we drop
- // subsequent requests to set a passphrase. If the cryptographer has pending
- // keys, and a new implicit passphrase is provided, we try decrypting the
- // pending keys with it, and if that fails, we cache the passphrase for
- // re-encryption once the pending keys are decrypted.
- virtual void SetEncryptionPassphrase(const std::string& passphrase,
- bool is_explicit) = 0;
-
- // Provides a passphrase for decrypting the user's existing sync data.
- // Notifies observers of the result of the operation via OnPassphraseAccepted
- // or OnPassphraseRequired, updates the nigori node, and does re-encryption as
- // appropriate if there is a previously cached encryption passphrase. It is an
- // error to call this when we don't have pending keys.
- virtual void SetDecryptionPassphrase(const std::string& passphrase) = 0;
-
- // Enables encryption of all datatypes.
- virtual void EnableEncryptEverything() = 0;
-
- // Whether encryption of all datatypes is enabled. If false, only sensitive
- // types are encrypted.
- virtual bool EncryptEverythingEnabled() const = 0;
-
- // Returns the current state of the passphrase needed to decrypt the
- // bag of encryption keys in the nigori node.
- virtual PassphraseType GetPassphraseType() const = 0;
-
- // The set of types that are always encrypted.
- static ModelTypeSet SensitiveTypes();
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_SYNC_ENCRYPTION_HANDLER_H_
diff --git a/chromium/sync/internal_api/public/sync_manager.cc b/chromium/sync/internal_api/public/sync_manager.cc
deleted file mode 100644
index 331a8af6067..00000000000
--- a/chromium/sync/internal_api/public/sync_manager.cc
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/sync_manager.h"
-
-namespace syncer {
-
-SyncManager::ChangeDelegate::~ChangeDelegate() {}
-
-SyncManager::ChangeObserver::~ChangeObserver() {}
-
-SyncManager::Observer::~Observer() {}
-
-SyncManager::SyncManager() {}
-
-SyncManager::~SyncManager() {}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/sync_manager.h b/chromium/sync/internal_api/public/sync_manager.h
deleted file mode 100644
index a5e6926d695..00000000000
--- a/chromium/sync/internal_api/public/sync_manager.h
+++ /dev/null
@@ -1,425 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_SYNC_MANAGER_H_
-#define SYNC_INTERNAL_API_PUBLIC_SYNC_MANAGER_H_
-
-#include <string>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/callback_forward.h"
-#include "base/files/file_path.h"
-#include "base/memory/ref_counted.h"
-#include "base/task_runner.h"
-#include "base/threading/thread_checker.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/change_record.h"
-#include "sync/internal_api/public/configure_reason.h"
-#include "sync/internal_api/public/engine/model_safe_worker.h"
-#include "sync/internal_api/public/engine/sync_status.h"
-#include "sync/internal_api/public/sync_encryption_handler.h"
-#include "sync/internal_api/public/util/report_unrecoverable_error_function.h"
-#include "sync/internal_api/public/util/unrecoverable_error_handler.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/notifier/invalidation_handler.h"
-#include "sync/protocol/sync_protocol_error.h"
-
-namespace sync_pb {
-class EncryptedData;
-} // namespace sync_pb
-
-namespace syncer {
-
-class BaseTransaction;
-class DataTypeDebugInfoListener;
-class Encryptor;
-struct Experiments;
-class ExtensionsActivity;
-class HttpPostProviderFactory;
-class InternalComponentsFactory;
-class JsBackend;
-class JsEventHandler;
-class SyncEncryptionHandler;
-class SyncScheduler;
-struct UserShare;
-class CancelationSignal;
-
-namespace sessions {
-class SyncSessionSnapshot;
-} // namespace sessions
-
-// Used by SyncManager::OnConnectionStatusChange().
-enum ConnectionStatus {
- CONNECTION_NOT_ATTEMPTED,
- CONNECTION_OK,
- CONNECTION_AUTH_ERROR,
- CONNECTION_SERVER_ERROR
-};
-
-// Contains everything needed to talk to and identify a user account.
-struct SyncCredentials {
- // The email associated with this account.
- std::string email;
- // The raw authentication token's bytes.
- std::string sync_token;
-};
-
-// SyncManager encapsulates syncable::Directory and serves as the parent of all
-// other objects in the sync API. If multiple threads interact with the same
-// local sync repository (i.e. the same sqlite database), they should share a
-// single SyncManager instance. The caller should typically create one
-// SyncManager for the lifetime of a user session.
-//
-// Unless stated otherwise, all methods of SyncManager should be called on the
-// same thread.
-class SYNC_EXPORT SyncManager : public syncer::InvalidationHandler {
- public:
- // An interface the embedding application implements to be notified
- // on change events. Note that these methods may be called on *any*
- // thread.
- class SYNC_EXPORT ChangeDelegate {
- public:
- // Notify the delegate that changes have been applied to the sync model.
- //
- // This will be invoked on the same thread as on which ApplyChanges was
- // called. |changes| is an array of size |change_count|, and contains the
- // ID of each individual item that was changed. |changes| exists only for
- // the duration of the call. If items of multiple data types change at
- // the same time, this method is invoked once per data type and |changes|
- // is restricted to items of the ModelType indicated by |model_type|.
- // Because the observer is passed a |trans|, the observer can assume a
- // read lock on the sync model that will be released after the function
- // returns.
- //
- // The SyncManager constructs |changes| in the following guaranteed order:
- //
- // 1. Deletions, from leaves up to parents.
- // 2. Updates to existing items with synced parents & predecessors.
- // 3. New items with synced parents & predecessors.
- // 4. Items with parents & predecessors in |changes|.
- // 5. Repeat #4 until all items are in |changes|.
- //
- // Thus, an implementation of OnChangesApplied should be able to
- // process the change records in the order without having to worry about
- // forward dependencies. But since deletions come before reparent
- // operations, a delete may temporarily orphan a node that is
- // updated later in the list.
- virtual void OnChangesApplied(
- ModelType model_type,
- int64 model_version,
- const BaseTransaction* trans,
- const ImmutableChangeRecordList& changes) = 0;
-
- // OnChangesComplete gets called when the TransactionComplete event is
- // posted (after OnChangesApplied finishes), after the transaction lock
- // and the change channel mutex are released.
- //
- // The purpose of this function is to support processors that require
- // split-transactions changes. For example, if a model processor wants to
- // perform blocking I/O due to a change, it should calculate the changes
- // while holding the transaction lock (from within OnChangesApplied), buffer
- // those changes, let the transaction fall out of scope, and then commit
- // those changes from within OnChangesComplete (postponing the blocking
- // I/O to when it no longer holds any lock).
- virtual void OnChangesComplete(ModelType model_type) = 0;
-
- protected:
- virtual ~ChangeDelegate();
- };
-
- // Like ChangeDelegate, except called only on the sync thread and
- // not while a transaction is held. For objects that want to know
- // when changes happen, but don't need to process them.
- class SYNC_EXPORT_PRIVATE ChangeObserver {
- public:
- // Ids referred to in |changes| may or may not be in the write
- // transaction specified by |write_transaction_id|. If they're
- // not, that means that the node didn't actually change, but we
- // marked them as changed for some other reason (e.g., siblings of
- // re-ordered nodes).
- //
- // TODO(sync, long-term): Ideally, ChangeDelegate/Observer would
- // be passed a transformed version of EntryKernelMutation instead
- // of a transaction that would have to be used to look up the
- // changed nodes. That is, ChangeDelegate::OnChangesApplied()
- // would still be called under the transaction, but all the needed
- // data will be passed down.
- //
- // Even more ideally, we would have sync semantics such that we'd
- // be able to apply changes without being under a transaction.
- // But that's a ways off...
- virtual void OnChangesApplied(
- ModelType model_type,
- int64 write_transaction_id,
- const ImmutableChangeRecordList& changes) = 0;
-
- virtual void OnChangesComplete(ModelType model_type) = 0;
-
- protected:
- virtual ~ChangeObserver();
- };
-
- // An interface the embedding application implements to receive
- // notifications from the SyncManager. Register an observer via
- // SyncManager::AddObserver. All methods are called only on the
- // sync thread.
- class SYNC_EXPORT Observer {
- public:
- // A round-trip sync-cycle took place and the syncer has resolved any
- // conflicts that may have arisen.
- virtual void OnSyncCycleCompleted(
- const sessions::SyncSessionSnapshot& snapshot) = 0;
-
- // Called when the status of the connection to the sync server has
- // changed.
- virtual void OnConnectionStatusChange(ConnectionStatus status) = 0;
-
- // Called when initialization is complete to the point that SyncManager can
- // process changes. This does not necessarily mean authentication succeeded
- // or that the SyncManager is online.
- // IMPORTANT: Creating any type of transaction before receiving this
- // notification is illegal!
- // WARNING: Calling methods on the SyncManager before receiving this
- // message, unless otherwise specified, produces undefined behavior.
- //
- // |js_backend| is what about:sync interacts with. It can emit
- // the following events:
-
- /**
- * @param {{ enabled: boolean }} details A dictionary containing:
- * - enabled: whether or not notifications are enabled.
- */
- // function onNotificationStateChange(details);
-
- /**
- * @param {{ changedTypes: Array.<string> }} details A dictionary
- * containing:
- * - changedTypes: a list of types (as strings) for which there
- are new updates.
- */
- // function onIncomingNotification(details);
-
- // Also, it responds to the following messages (all other messages
- // are ignored):
-
- /**
- * Gets the current notification state.
- *
- * @param {function(boolean)} callback Called with whether or not
- * notifications are enabled.
- */
- // function getNotificationState(callback);
-
- /**
- * Gets details about the root node.
- *
- * @param {function(!Object)} callback Called with details about the
- * root node.
- */
- // TODO(akalin): Change this to getRootNodeId or eliminate it
- // entirely.
- // function getRootNodeDetails(callback);
-
- /**
- * Gets summary information for a list of ids.
- *
- * @param {Array.<string>} idList List of 64-bit ids in decimal
- * string form.
- * @param {Array.<{id: string, title: string, isFolder: boolean}>}
- * callback Called with summaries for the nodes in idList that
- * exist.
- */
- // function getNodeSummariesById(idList, callback);
-
- /**
- * Gets detailed information for a list of ids.
- *
- * @param {Array.<string>} idList List of 64-bit ids in decimal
- * string form.
- * @param {Array.<!Object>} callback Called with detailed
- * information for the nodes in idList that exist.
- */
- // function getNodeDetailsById(idList, callback);
-
- /**
- * Gets child ids for a given id.
- *
- * @param {string} id 64-bit id in decimal string form of the parent
- * node.
- * @param {Array.<string>} callback Called with the (possibly empty)
- * list of child ids.
- */
- // function getChildNodeIds(id);
-
- virtual void OnInitializationComplete(
- const WeakHandle<JsBackend>& js_backend,
- const WeakHandle<DataTypeDebugInfoListener>& debug_info_listener,
- bool success,
- ModelTypeSet restored_types) = 0;
-
- // We are no longer permitted to communicate with the server. Sync should
- // be disabled and state cleaned up at once. This can happen for a number
- // of reasons, e.g. swapping from a test instance to production, or a
- // global stop syncing operation has wiped the store.
- virtual void OnStopSyncingPermanently() = 0;
-
- virtual void OnActionableError(
- const SyncProtocolError& sync_protocol_error) = 0;
-
- protected:
- virtual ~Observer();
- };
-
- SyncManager();
- virtual ~SyncManager();
-
- // Initialize the sync manager. |database_location| specifies the path of
- // the directory in which to locate a sqlite repository storing the syncer
- // backend state. Initialization will open the database, or create it if it
- // does not already exist. Returns false on failure.
- // |event_handler| is the JsEventHandler used to propagate events to
- // chrome://sync-internals. |event_handler| may be uninitialized.
- // |sync_server_and_path| and |sync_server_port| represent the Chrome sync
- // server to use, and |use_ssl| specifies whether to communicate securely;
- // the default is false.
- // |post_factory| will be owned internally and used to create
- // instances of an HttpPostProvider.
- // |model_safe_worker| ownership is given to the SyncManager.
- // |user_agent| is a 7-bit ASCII string suitable for use as the User-Agent
- // HTTP header. Used internally when collecting stats to classify clients.
- // |invalidator| is owned and used to listen for invalidations.
- // |invalidator_client_id| is used to unqiuely identify this client to the
- // invalidation notification server.
- // |restored_key_for_bootstrapping| is the key used to boostrap the
- // cryptographer
- // |keystore_encryption_enabled| determines whether we enable the keystore
- // encryption functionality in the cryptographer/nigori.
- // |report_unrecoverable_error_function| may be NULL.
- // |cancelation_signal| carries shutdown requests across threads. This one
- // will be used to cut short any network I/O and tell the syncer to exit
- // early.
- //
- // TODO(akalin): Replace the |post_factory| parameter with a
- // URLFetcher parameter.
- virtual void Init(
- const base::FilePath& database_location,
- const WeakHandle<JsEventHandler>& event_handler,
- const std::string& sync_server_and_path,
- int sync_server_port,
- bool use_ssl,
- scoped_ptr<HttpPostProviderFactory> post_factory,
- const std::vector<ModelSafeWorker*>& workers,
- ExtensionsActivity* extensions_activity,
- ChangeDelegate* change_delegate,
- const SyncCredentials& credentials,
- const std::string& invalidator_client_id,
- const std::string& restored_key_for_bootstrapping,
- const std::string& restored_keystore_key_for_bootstrapping,
- InternalComponentsFactory* internal_components_factory,
- Encryptor* encryptor,
- scoped_ptr<UnrecoverableErrorHandler> unrecoverable_error_handler,
- ReportUnrecoverableErrorFunction report_unrecoverable_error_function,
- CancelationSignal* cancelation_signal) = 0;
-
- // Throw an unrecoverable error from a transaction (mostly used for
- // testing).
- virtual void ThrowUnrecoverableError() = 0;
-
- virtual ModelTypeSet InitialSyncEndedTypes() = 0;
-
- // Returns those types within |types| that have an empty progress marker
- // token.
- virtual ModelTypeSet GetTypesWithEmptyProgressMarkerToken(
- ModelTypeSet types) = 0;
-
- // Purge from the directory those types with non-empty progress markers
- // but without initial synced ended set.
- // Returns false if an error occurred, true otherwise.
- virtual bool PurgePartiallySyncedTypes() = 0;
-
- // Update tokens that we're using in Sync. Email must stay the same.
- virtual void UpdateCredentials(const SyncCredentials& credentials) = 0;
-
- // Put the syncer in normal mode ready to perform nudges and polls.
- virtual void StartSyncingNormally(
- const ModelSafeRoutingInfo& routing_info) = 0;
-
- // Switches the mode of operation to CONFIGURATION_MODE and performs
- // any configuration tasks needed as determined by the params. Once complete,
- // syncer will remain in CONFIGURATION_MODE until StartSyncingNormally is
- // called.
- // Data whose types are not in |new_routing_info| are purged from sync
- // directory, unless they're part of |to_ignore|, in which case they're left
- // untouched. The purged data is backed up in delete journal for recovery in
- // next session if its type is in |to_journal|. If in |to_unapply|
- // only the local data is removed; the server data is preserved.
- // |ready_task| is invoked when the configuration completes.
- // |retry_task| is invoked if the configuration job could not immediately
- // execute. |ready_task| will still be called when it eventually
- // does finish.
- virtual void ConfigureSyncer(
- ConfigureReason reason,
- ModelTypeSet to_download,
- ModelTypeSet to_purge,
- ModelTypeSet to_journal,
- ModelTypeSet to_unapply,
- const ModelSafeRoutingInfo& new_routing_info,
- const base::Closure& ready_task,
- const base::Closure& retry_task) = 0;
-
- // Inform the syncer of a change in the invalidator's state.
- virtual void OnInvalidatorStateChange(InvalidatorState state) = 0;
-
- // Inform the syncer that its cached information about a type is obsolete.
- virtual void OnIncomingInvalidation(
- const ObjectIdInvalidationMap& invalidation_map) = 0;
-
- // Adds a listener to be notified of sync events.
- // NOTE: It is OK (in fact, it's probably a good idea) to call this before
- // having received OnInitializationCompleted.
- virtual void AddObserver(Observer* observer) = 0;
-
- // Remove the given observer. Make sure to call this if the
- // Observer is being destroyed so the SyncManager doesn't
- // potentially dereference garbage.
- virtual void RemoveObserver(Observer* observer) = 0;
-
- // Status-related getter. May be called on any thread.
- virtual SyncStatus GetDetailedStatus() const = 0;
-
- // Call periodically from a database-safe thread to persist recent changes
- // to the syncapi model.
- virtual void SaveChanges() = 0;
-
- // Issue a final SaveChanges, and close sqlite handles.
- virtual void ShutdownOnSyncThread() = 0;
-
- // May be called from any thread.
- virtual UserShare* GetUserShare() = 0;
-
- // Returns the cache_guid of the currently open database.
- // Requires that the SyncManager be initialized.
- virtual const std::string cache_guid() = 0;
-
- // Reads the nigori node to determine if any experimental features should
- // be enabled.
- // Note: opens a transaction. May be called on any thread.
- virtual bool ReceivedExperiment(Experiments* experiments) = 0;
-
- // Uses a read-only transaction to determine if the directory being synced has
- // any remaining unsynced items. May be called on any thread.
- virtual bool HasUnsyncedItems() = 0;
-
- // Returns the SyncManager's encryption handler.
- virtual SyncEncryptionHandler* GetEncryptionHandler() = 0;
-
- // Ask the SyncManager to fetch updates for the given types.
- virtual void RefreshTypes(ModelTypeSet types) = 0;
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_SYNC_MANAGER_H_
diff --git a/chromium/sync/internal_api/public/sync_manager_factory.h b/chromium/sync/internal_api/public/sync_manager_factory.h
deleted file mode 100644
index 12fa52fdef8..00000000000
--- a/chromium/sync/internal_api/public/sync_manager_factory.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_SYNC_MANAGER_FACTORY_H_
-#define SYNC_INTERNAL_API_PUBLIC_SYNC_MANAGER_FACTORY_H_
-
-#include <string>
-
-#include "base/memory/scoped_ptr.h"
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-
-class SyncManager;
-
-// Helper class to allow dependency injection of the SyncManager.
-class SYNC_EXPORT SyncManagerFactory {
- public:
- SyncManagerFactory();
- virtual ~SyncManagerFactory();
-
- virtual scoped_ptr<SyncManager> CreateSyncManager(std::string name);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SyncManagerFactory);
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_SYNC_MANAGER_FACTORY_H_
diff --git a/chromium/sync/internal_api/public/user_share.h b/chromium/sync/internal_api/public/user_share.h
deleted file mode 100644
index bdd6c2f4de7..00000000000
--- a/chromium/sync/internal_api/public/user_share.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_USER_SHARE_H_
-#define SYNC_INTERNAL_API_PUBLIC_USER_SHARE_H_
-
-#include <string>
-
-#include "base/memory/scoped_ptr.h"
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-
-namespace syncable {
-class Directory;
-}
-
-// A UserShare encapsulates the syncable pieces that represent an authenticated
-// user and their data (share).
-// This encompasses all pieces required to build transaction objects on the
-// syncable share.
-struct SYNC_EXPORT_PRIVATE UserShare {
- UserShare();
- ~UserShare();
-
- // The Directory itself, which is the parent of Transactions.
- scoped_ptr<syncable::Directory> directory;
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_USER_SHARE_H_
diff --git a/chromium/sync/internal_api/public/util/experiments.h b/chromium/sync/internal_api/public/util/experiments.h
deleted file mode 100644
index 144687d9689..00000000000
--- a/chromium/sync/internal_api/public/util/experiments.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_UTIL_EXPERIMENTS_
-#define SYNC_UTIL_EXPERIMENTS_
-
-#include "sync/internal_api/public/base/model_type.h"
-
-namespace syncer {
-
-const char kAutofillCullingTag[] = "autofill_culling";
-const char kFaviconSyncTag[] = "favicon_sync";
-const char kPreCommitUpdateAvoidanceTag[] = "pre_commit_update_avoidance";
-
-// A structure to hold the enable status of experimental sync features.
-struct Experiments {
- Experiments() : autofill_culling(false),
- favicon_sync_limit(200) {}
-
- bool Matches(const Experiments& rhs) {
- return (autofill_culling == rhs.autofill_culling &&
- favicon_sync_limit == rhs.favicon_sync_limit);
- }
-
- // Enable deletion of expired autofill entries (if autofill sync is enabled).
- bool autofill_culling;
-
- // The number of favicons that a client is permitted to sync.
- int favicon_sync_limit;
-};
-
-} // namespace syncer
-
-#endif // SYNC_UTIL_EXPERIMENTS_
diff --git a/chromium/sync/internal_api/public/util/immutable.h b/chromium/sync/internal_api/public/util/immutable.h
deleted file mode 100644
index 683a5e612d3..00000000000
--- a/chromium/sync/internal_api/public/util/immutable.h
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Immutable<T> provides an easy, cheap, and thread-safe way to pass
-// large immutable data around.
-//
-// For example, consider the following code:
-//
-// typedef std::vector<LargeObject> LargeObjectList;
-//
-// void ProcessStuff(const LargeObjectList& stuff) {
-// for (LargeObjectList::const_iterator it = stuff.begin();
-// it != stuff.end(); ++it) {
-// ... process it ...
-// }
-// }
-//
-// ...
-//
-// LargeObjectList my_stuff;
-// ... fill my_stuff with lots of LargeObjects ...
-// some_loop->PostTask(FROM_HERE, base::Bind(&ProcessStuff, my_stuff));
-//
-// The last line incurs the cost of copying my_stuff, which is
-// undesirable. Here's the above code re-written using Immutable<T>:
-//
-// void ProcessStuff(const Immutable<LargeObjectList>& stuff) {
-// for (LargeObjectList::const_iterator it = stuff.Get().begin();
-// it != stuff.Get().end(); ++it) {
-// ... process it ...
-// }
-// }
-//
-// ...
-//
-// LargeObjectList my_stuff;
-// ... fill my_stuff with lots of LargeObjects ...
-// some_loop->PostTask(
-// FROM_HERE, base::Bind(&ProcessStuff, MakeImmutable(&my_stuff)));
-//
-// The last line, which resets my_stuff to a default-initialized
-// state, incurs only the cost of a swap of LargeObjectLists, which is
-// O(1) for most STL container implementations. The data in my_stuff
-// is ref-counted (thread-safely), so it is freed as soon as
-// ProcessStuff is finished.
-//
-// NOTE: By default, Immutable<T> relies on ADL
-// (http://en.wikipedia.org/wiki/Argument-dependent_name_lookup) to
-// find a swap() function for T, falling back to std::swap() when
-// necessary. If you overload swap() for your type in its namespace,
-// or if you specialize std::swap() for your type, (see
-// http://stackoverflow.com/questions/11562/how-to-overload-stdswap
-// for discussion) Immutable<T> should be able to find it.
-//
-// Alternatively, you could explicitly control which swap function is
-// used by providing your own traits class or using one of the
-// pre-defined ones below. See comments on traits below for details.
-//
-// NOTE: Some complexity is necessary in order to use Immutable<T>
-// with forward-declared types. See comments on traits below for
-// details.
-
-#ifndef SYNC_UTIL_IMMUTABLE_H_
-#define SYNC_UTIL_IMMUTABLE_H_
-
-// For std::swap().
-#include <algorithm>
-
-#include "base/basictypes.h"
-#include "base/memory/ref_counted.h"
-
-namespace syncer {
-
-namespace internal {
-// This class is part of the Immutable implementation. DO NOT USE
-// THIS CLASS DIRECTLY YOURSELF.
-
-template <typename T, typename Traits>
-class ImmutableCore
- : public base::RefCountedThreadSafe<ImmutableCore<T, Traits> > {
- public:
- // wrapper_ is always explicitly default-initialized to handle
- // primitive types and the case where Traits::Wrapper == T.
-
- ImmutableCore() : wrapper_() {
- Traits::InitializeWrapper(&wrapper_);
- }
-
- explicit ImmutableCore(T* t) : wrapper_() {
- Traits::InitializeWrapper(&wrapper_);
- Traits::Swap(Traits::UnwrapMutable(&wrapper_), t);
- }
-
- const T& Get() const {
- return Traits::Unwrap(wrapper_);
- }
-
- private:
- ~ImmutableCore() {
- Traits::DestroyWrapper(&wrapper_);
- }
- friend class base::RefCountedThreadSafe<ImmutableCore<T, Traits> >;
-
- // This is semantically const, but we can't mark it a such as we
- // modify it in the constructor.
- typename Traits::Wrapper wrapper_;
-
- DISALLOW_COPY_AND_ASSIGN(ImmutableCore);
-};
-
-} // namespace internal
-
-// Traits usage notes
-// ------------------
-// The most common reason to use your own traits class is to provide
-// your own swap method. First, consider the pre-defined traits
-// classes HasSwapMemFn{ByRef,ByPtr} below. If neither of those work,
-// then define your own traits class inheriting from
-// DefaultImmutableTraits<YourType> (to pick up the defaults for
-// everything else) and provide your own Swap() method.
-//
-// Another reason to use your own traits class is to be able to use
-// Immutable<T> with a forward-declared type (important for protobuf
-// classes, when you want to avoid headers pulling in generated
-// headers). (This is why the Traits::Wrapper type exists; normally,
-// Traits::Wrapper is just T itself, but that needs to be changed for
-// forward-declared types.)
-//
-// For example, if you want to do this:
-//
-// my_class.h
-// ----------
-// #include ".../immutable.h"
-//
-// // Forward declaration.
-// class SomeOtherType;
-//
-// class MyClass {
-// ...
-// private:
-// // Doesn't work, as defaults traits class needs SomeOtherType's
-// // definition to be visible.
-// Immutable<SomeOtherType> foo_;
-// };
-//
-// You'll have to do this:
-//
-// my_class.h
-// ----------
-// #include ".../immutable.h"
-//
-// // Forward declaration.
-// class SomeOtherType;
-//
-// class MyClass {
-// ...
-// private:
-// struct ImmutableSomeOtherTypeTraits {
-// // scoped_ptr<SomeOtherType> won't work here, either.
-// typedef SomeOtherType* Wrapper;
-//
-// static void InitializeWrapper(Wrapper* wrapper);
-//
-// static void DestroyWrapper(Wrapper* wrapper);
-// ...
-// };
-//
-// typedef Immutable<SomeOtherType, ImmutableSomeOtherTypeTraits>
-// ImmutableSomeOtherType;
-//
-// ImmutableSomeOtherType foo_;
-// };
-//
-// my_class.cc
-// -----------
-// #include ".../some_other_type.h"
-//
-// void MyClass::ImmutableSomeOtherTypeTraits::InitializeWrapper(
-// Wrapper* wrapper) {
-// *wrapper = new SomeOtherType();
-// }
-//
-// void MyClass::ImmutableSomeOtherTypeTraits::DestroyWrapper(
-// Wrapper* wrapper) {
-// delete *wrapper;
-// }
-//
-// ...
-//
-// Also note that this incurs an additional memory allocation when you
-// create an Immutable<SomeOtherType>.
-
-template <typename T>
-struct DefaultImmutableTraits {
- typedef T Wrapper;
-
- static void InitializeWrapper(Wrapper* wrapper) {}
-
- static void DestroyWrapper(Wrapper* wrapper) {}
-
- static const T& Unwrap(const Wrapper& wrapper) { return wrapper; }
-
- static T* UnwrapMutable(Wrapper* wrapper) { return wrapper; }
-
- static void Swap(T* t1, T* t2) {
- // Uses ADL (see
- // http://en.wikipedia.org/wiki/Argument-dependent_name_lookup).
- using std::swap;
- swap(*t1, *t2);
- }
-};
-
-// Most STL containers have by-reference swap() member functions,
-// although they usually already overload std::swap() to use those.
-template <typename T>
-struct HasSwapMemFnByRef : public DefaultImmutableTraits<T> {
- static void Swap(T* t1, T* t2) {
- t1->swap(*t2);
- }
-};
-
-// Most Google-style objects have by-pointer Swap() member functions
-// (for example, generated protocol buffer classes).
-template <typename T>
-struct HasSwapMemFnByPtr : public DefaultImmutableTraits<T> {
- static void Swap(T* t1, T* t2) {
- t1->Swap(t2);
- }
-};
-
-template <typename T, typename Traits = DefaultImmutableTraits<T> >
-class Immutable {
- public:
- // Puts the underlying object in a default-initialized state.
- Immutable() : core_(new internal::ImmutableCore<T, Traits>()) {}
-
- // Copy constructor and assignment welcome.
-
- // Resets |t| to a default-initialized state.
- explicit Immutable(T* t)
- : core_(new internal::ImmutableCore<T, Traits>(t)) {}
-
- const T& Get() const {
- return core_->Get();
- }
-
- private:
- scoped_refptr<const internal::ImmutableCore<T, Traits> > core_;
-};
-
-// Helper function to avoid having to write out template arguments.
-template <typename T>
-Immutable<T> MakeImmutable(T* t) {
- return Immutable<T>(t);
-}
-
-} // namespace syncer
-
-#endif // SYNC_UTIL_IMMUTABLE_H_
diff --git a/chromium/sync/internal_api/public/util/immutable_unittest.cc b/chromium/sync/internal_api/public/util/immutable_unittest.cc
deleted file mode 100644
index f8494b60971..00000000000
--- a/chromium/sync/internal_api/public/util/immutable_unittest.cc
+++ /dev/null
@@ -1,250 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/util/immutable.h"
-
-#include <algorithm>
-#include <cstddef>
-#include <deque>
-#include <list>
-#include <set>
-#include <string>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/memory/ref_counted.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-// Helper class that keeps track of the token passed in at
-// construction and how many times that token is copied.
-class TokenCore : public base::RefCounted<TokenCore> {
- public:
- explicit TokenCore(const char* token) : token_(token), copy_count_(0) {}
-
- const char* GetToken() const { return token_; }
-
- void RecordCopy() { ++copy_count_; }
-
- int GetCopyCount() const { return copy_count_; }
-
- private:
- friend class base::RefCounted<TokenCore>;
-
- ~TokenCore() {}
-
- const char* const token_;
- int copy_count_;
-};
-
-enum SwapBehavior {
- USE_DEFAULT_SWAP,
- USE_FAST_SWAP_VIA_ADL,
- USE_FAST_SWAP_VIA_SPECIALIZATION
-};
-
-const char kEmptyToken[] = "<empty token>";
-
-// Base class for various token classes, differing in swap behavior.
-template <SwapBehavior>
-class TokenBase {
- public:
- TokenBase() : core_(new TokenCore(kEmptyToken)) {}
-
- explicit TokenBase(const char* token) : core_(new TokenCore(token)) {}
-
- TokenBase(const TokenBase& other) : core_(other.core_) {
- core_->RecordCopy();
- }
-
- TokenBase& operator=(const TokenBase& other) {
- core_ = other.core_;
- core_->RecordCopy();
- return *this;
- }
-
- const char* GetToken() const {
- return core_->GetToken();
- }
-
- int GetCopyCount() const {
- return core_->GetCopyCount();
- }
-
- // For associative containers.
- bool operator<(const TokenBase& other) const {
- return std::string(GetToken()) < std::string(other.GetToken());
- }
-
- // STL-style swap.
- void swap(TokenBase& other) {
- using std::swap;
- swap(other.core_, core_);
- }
-
- // Google-style swap.
- void Swap(TokenBase* other) {
- using std::swap;
- swap(other->core_, core_);
- }
-
- private:
- scoped_refptr<TokenCore> core_;
-};
-
-typedef TokenBase<USE_DEFAULT_SWAP> Token;
-typedef TokenBase<USE_FAST_SWAP_VIA_ADL> ADLToken;
-typedef TokenBase<USE_FAST_SWAP_VIA_SPECIALIZATION> SpecializationToken;
-
-void swap(ADLToken& t1, ADLToken& t2) {
- t1.Swap(&t2);
-}
-
-} // namespace syncer
-
-// Allowed by the standard (17.4.3.1/1).
-namespace std {
-
-template <>
-void swap(syncer::SpecializationToken& t1,
- syncer::SpecializationToken& t2) {
- t1.Swap(&t2);
-}
-
-} // namespace
-
-namespace syncer {
-namespace {
-
-class ImmutableTest : public ::testing::Test {};
-
-TEST_F(ImmutableTest, Int) {
- int x = 5;
- Immutable<int> ix(&x);
- EXPECT_EQ(5, ix.Get());
- EXPECT_EQ(0, x);
-}
-
-TEST_F(ImmutableTest, IntCopy) {
- int x = 5;
- Immutable<int> ix = Immutable<int>(&x);
- EXPECT_EQ(5, ix.Get());
- EXPECT_EQ(0, x);
-}
-
-TEST_F(ImmutableTest, IntAssign) {
- int x = 5;
- Immutable<int> ix;
- EXPECT_EQ(0, ix.Get());
- ix = Immutable<int>(&x);
- EXPECT_EQ(5, ix.Get());
- EXPECT_EQ(0, x);
-}
-
-TEST_F(ImmutableTest, IntMakeImmutable) {
- int x = 5;
- Immutable<int> ix = MakeImmutable(&x);
- EXPECT_EQ(5, ix.Get());
- EXPECT_EQ(0, x);
-}
-
-template <typename T, typename ImmutableT>
-void RunTokenTest(const char* token, bool expect_copies) {
- SCOPED_TRACE(token);
- T t(token);
- EXPECT_EQ(token, t.GetToken());
- EXPECT_EQ(0, t.GetCopyCount());
-
- ImmutableT immutable_t(&t);
- EXPECT_EQ(token, immutable_t.Get().GetToken());
- EXPECT_EQ(kEmptyToken, t.GetToken());
- EXPECT_EQ(expect_copies, immutable_t.Get().GetCopyCount() > 0);
- EXPECT_EQ(expect_copies, t.GetCopyCount() > 0);
-}
-
-TEST_F(ImmutableTest, Token) {
- RunTokenTest<Token, Immutable<Token> >("Token", true /* expect_copies */);
-}
-
-TEST_F(ImmutableTest, TokenSwapMemFnByRef) {
- RunTokenTest<Token, Immutable<Token, HasSwapMemFnByRef<Token> > >(
- "TokenSwapMemFnByRef", false /* expect_copies */);
-}
-
-TEST_F(ImmutableTest, TokenSwapMemFnByPtr) {
- RunTokenTest<Token, Immutable<Token, HasSwapMemFnByPtr<Token> > >(
- "TokenSwapMemFnByPtr", false /* expect_copies */);
-}
-
-TEST_F(ImmutableTest, ADLToken) {
- RunTokenTest<ADLToken, Immutable<ADLToken> >(
- "ADLToken", false /* expect_copies */);
-}
-
-TEST_F(ImmutableTest, SpecializationToken) {
- RunTokenTest<SpecializationToken, Immutable<SpecializationToken> >(
- "SpecializationToken", false /* expect_copies */);
-}
-
-template <typename C, typename ImmutableC>
-void RunTokenContainerTest(const char* token) {
- SCOPED_TRACE(token);
- const Token tokens[] = { Token(), Token(token) };
- const size_t token_count = arraysize(tokens);
- C c(tokens, tokens + token_count);
- const int copy_count = c.begin()->GetCopyCount();
- EXPECT_GT(copy_count, 0);
- for (typename C::const_iterator it = c.begin(); it != c.end(); ++it) {
- EXPECT_EQ(copy_count, it->GetCopyCount());
- }
-
- // Make sure that making the container immutable doesn't incur any
- // copies of the tokens.
- ImmutableC immutable_c(&c);
- EXPECT_TRUE(c.empty());
- ASSERT_EQ(token_count, immutable_c.Get().size());
- int i = 0;
- for (typename C::const_iterator it = c.begin(); it != c.end(); ++it) {
- EXPECT_EQ(tokens[i].GetToken(), it->GetToken());
- EXPECT_EQ(copy_count, it->GetCopyCount());
- ++i;
- }
-}
-
-TEST_F(ImmutableTest, Vector) {
- RunTokenContainerTest<std::vector<Token>, Immutable<std::vector<Token> > >(
- "Vector");
-}
-
-TEST_F(ImmutableTest, VectorSwapMemFnByRef) {
- RunTokenContainerTest<
- std::vector<Token>,
- Immutable<std::vector<Token>, HasSwapMemFnByRef<std::vector<Token> > > >(
- "VectorSwapMemFnByRef");
-}
-
-// http://crbug.com/129128
-#if defined(OS_WIN)
-#define MAYBE_Deque DISABLED_Deque
-#else
-#define MAYBE_Deque Deque
-#endif
-TEST_F(ImmutableTest, MAYBE_Deque) {
- RunTokenContainerTest<std::deque<Token>, Immutable<std::deque<Token> > >(
- "Deque");
-}
-
-TEST_F(ImmutableTest, List) {
- RunTokenContainerTest<std::list<Token>, Immutable<std::list<Token> > >(
- "List");
-}
-
-TEST_F(ImmutableTest, Set) {
- RunTokenContainerTest<std::set<Token>, Immutable<std::set<Token> > >(
- "Set");
-}
-
-} // namespace
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/util/report_unrecoverable_error_function.h b/chromium/sync/internal_api/public/util/report_unrecoverable_error_function.h
deleted file mode 100644
index c0686cafdd1..00000000000
--- a/chromium/sync/internal_api/public/util/report_unrecoverable_error_function.h
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_UTIL_REPORT_UNRECOVERABLE_ERROR_FUNCTION_H_
-#define SYNC_UTIL_REPORT_UNRECOVERABLE_ERROR_FUNCTION_H_
-
-namespace syncer {
-
-// A ReportUnrecoverableErrorFunction is a function that is called
-// immediately when an unrecoverable error is encountered. Unlike
-// UnrecoverableErrorHandler, it should just log the error and any
-// context surrounding it.
-typedef void (*ReportUnrecoverableErrorFunction)(void);
-
-} // namespace syncer
-
-#endif // SYNC_UTIL_REPORT_UNRECOVERABLE_ERROR_FUNCTION_H_
diff --git a/chromium/sync/internal_api/public/util/sync_string_conversions.cc b/chromium/sync/internal_api/public/util/sync_string_conversions.cc
deleted file mode 100644
index 495fadfb3c4..00000000000
--- a/chromium/sync/internal_api/public/util/sync_string_conversions.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/util/sync_string_conversions.h"
-
-#define ENUM_CASE(x) case x: return #x
-
-namespace syncer {
-
-const char* ConnectionStatusToString(ConnectionStatus status) {
- switch (status) {
- ENUM_CASE(CONNECTION_OK);
- ENUM_CASE(CONNECTION_AUTH_ERROR);
- ENUM_CASE(CONNECTION_SERVER_ERROR);
- default:
- NOTREACHED();
- return "INVALID_CONNECTION_STATUS";
- }
-}
-
-// Helper function that converts a PassphraseRequiredReason value to a string.
-const char* PassphraseRequiredReasonToString(
- PassphraseRequiredReason reason) {
- switch (reason) {
- ENUM_CASE(REASON_PASSPHRASE_NOT_REQUIRED);
- ENUM_CASE(REASON_ENCRYPTION);
- ENUM_CASE(REASON_DECRYPTION);
- default:
- NOTREACHED();
- return "INVALID_REASON";
- }
-}
-
-const char* PassphraseTypeToString(PassphraseType type) {
- switch (type) {
- ENUM_CASE(IMPLICIT_PASSPHRASE);
- ENUM_CASE(KEYSTORE_PASSPHRASE);
- ENUM_CASE(FROZEN_IMPLICIT_PASSPHRASE);
- ENUM_CASE(CUSTOM_PASSPHRASE);
- default:
- NOTREACHED();
- return "INVALID_PASSPHRASE_TYPE";
- }
-}
-
-const char* BootstrapTokenTypeToString(BootstrapTokenType type) {
- switch (type) {
- ENUM_CASE(PASSPHRASE_BOOTSTRAP_TOKEN);
- ENUM_CASE(KEYSTORE_BOOTSTRAP_TOKEN);
- default:
- NOTREACHED();
- return "INVALID_BOOTSTRAP_TOKEN_TYPE";
- }
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/util/sync_string_conversions.h b/chromium/sync/internal_api/public/util/sync_string_conversions.h
deleted file mode 100644
index 0b33a7dac3d..00000000000
--- a/chromium/sync/internal_api/public/util/sync_string_conversions.h
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_UTIL_SYNC_STRING_CONVERSIONS_H_
-#define SYNC_INTERNAL_API_PUBLIC_UTIL_SYNC_STRING_CONVERSIONS_H_
-
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/sync_encryption_handler.h"
-#include "sync/internal_api/public/sync_manager.h"
-
-namespace syncer {
-
-SYNC_EXPORT const char* ConnectionStatusToString(ConnectionStatus status);
-
-// Returns the string representation of a PassphraseRequiredReason value.
-SYNC_EXPORT const char* PassphraseRequiredReasonToString(
- PassphraseRequiredReason reason);
-
-SYNC_EXPORT const char* PassphraseTypeToString(PassphraseType type);
-
-const char* BootstrapTokenTypeToString(BootstrapTokenType type);
-}
-
-#endif // SYNC_INTERNAL_API_PUBLIC_UTIL_SYNC_STRING_CONVERSIONS_H_
diff --git a/chromium/sync/internal_api/public/util/syncer_error.cc b/chromium/sync/internal_api/public/util/syncer_error.cc
deleted file mode 100644
index e7cb66fbf48..00000000000
--- a/chromium/sync/internal_api/public/util/syncer_error.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/util/syncer_error.h"
-
-#include "base/logging.h"
-
-namespace syncer {
-
-#define ENUM_CASE(x) case x: return #x; break;
-const char* GetSyncerErrorString(SyncerError value) {
- switch (value) {
- ENUM_CASE(UNSET);
- ENUM_CASE(CANNOT_DO_WORK);
- ENUM_CASE(NETWORK_CONNECTION_UNAVAILABLE);
- ENUM_CASE(NETWORK_IO_ERROR);
- ENUM_CASE(SYNC_SERVER_ERROR);
- ENUM_CASE(SYNC_AUTH_ERROR);
- ENUM_CASE(SERVER_RETURN_INVALID_CREDENTIAL);
- ENUM_CASE(SERVER_RETURN_UNKNOWN_ERROR);
- ENUM_CASE(SERVER_RETURN_THROTTLED);
- ENUM_CASE(SERVER_RETURN_TRANSIENT_ERROR);
- ENUM_CASE(SERVER_RETURN_MIGRATION_DONE);
- ENUM_CASE(SERVER_RETURN_CLEAR_PENDING);
- ENUM_CASE(SERVER_RETURN_NOT_MY_BIRTHDAY);
- ENUM_CASE(SERVER_RETURN_CONFLICT);
- ENUM_CASE(SERVER_RESPONSE_VALIDATION_FAILED);
- ENUM_CASE(SERVER_RETURN_DISABLED_BY_ADMIN);
- ENUM_CASE(SERVER_MORE_TO_DOWNLOAD);
- ENUM_CASE(SYNCER_OK);
- }
- NOTREACHED();
- return "INVALID";
-}
-#undef ENUM_CASE
-
-bool SyncerErrorIsError(SyncerError error) {
- return error != UNSET
- && error != SYNCER_OK
- && error != SERVER_MORE_TO_DOWNLOAD;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/util/syncer_error.h b/chromium/sync/internal_api/public/util/syncer_error.h
deleted file mode 100644
index 02da22c1935..00000000000
--- a/chromium/sync/internal_api/public/util/syncer_error.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_UTIL_SYNCER_ERROR_H_
-#define SYNC_INTERNAL_API_PUBLIC_UTIL_SYNCER_ERROR_H_
-
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-
-// This enum describes all the possible results of a sync cycle.
-enum SYNC_EXPORT_PRIVATE SyncerError {
- UNSET = 0, // Default value.
- CANNOT_DO_WORK, // A model worker could not process a work item.
-
- NETWORK_CONNECTION_UNAVAILABLE, // Connectivity failure.
- NETWORK_IO_ERROR, // Response buffer read error.
- SYNC_SERVER_ERROR, // Non auth HTTP error.
- SYNC_AUTH_ERROR, // HTTP auth error.
-
- // Based on values returned by server. Most are defined in sync.proto.
- SERVER_RETURN_INVALID_CREDENTIAL,
- SERVER_RETURN_UNKNOWN_ERROR,
- SERVER_RETURN_THROTTLED,
- SERVER_RETURN_TRANSIENT_ERROR,
- SERVER_RETURN_MIGRATION_DONE,
- SERVER_RETURN_CLEAR_PENDING,
- SERVER_RETURN_NOT_MY_BIRTHDAY,
- SERVER_RETURN_CONFLICT,
- SERVER_RESPONSE_VALIDATION_FAILED,
- SERVER_RETURN_DISABLED_BY_ADMIN,
-
- SERVER_MORE_TO_DOWNLOAD,
-
- SYNCER_OK
-};
-
-SYNC_EXPORT const char* GetSyncerErrorString(SyncerError);
-
-// Helper to check that |error| is set to something (not UNSET) and is not
-// SYNCER_OK.
-bool SyncerErrorIsError(SyncerError error);
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_UTIL_SYNCER_ERROR_H_
diff --git a/chromium/sync/internal_api/public/util/unrecoverable_error_handler.h b/chromium/sync/internal_api/public/util/unrecoverable_error_handler.h
deleted file mode 100644
index 2bd24759939..00000000000
--- a/chromium/sync/internal_api/public/util/unrecoverable_error_handler.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_UTIL_UNRECOVERABLE_ERROR_HANDLER_H_
-#define SYNC_UTIL_UNRECOVERABLE_ERROR_HANDLER_H_
-
-#include <string>
-
-#include "base/location.h"
-
-namespace syncer {
-
-class UnrecoverableErrorHandler {
- public:
- // Call this when normal operation detects that the chrome model and the
- // syncer model are inconsistent, or similar. The ProfileSyncService will
- // try to avoid doing any work to avoid crashing or corrupting things
- // further, and will report an error status if queried.
- virtual void OnUnrecoverableError(const tracked_objects::Location& from_here,
- const std::string& message) = 0;
- virtual ~UnrecoverableErrorHandler() {}
-};
-
-} // namespace syncer
-
-#endif // SYNC_UTIL_UNRECOVERABLE_ERROR_HANDLER_H_
diff --git a/chromium/sync/internal_api/public/util/unrecoverable_error_info.cc b/chromium/sync/internal_api/public/util/unrecoverable_error_info.cc
deleted file mode 100644
index 22346503301..00000000000
--- a/chromium/sync/internal_api/public/util/unrecoverable_error_info.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/util/unrecoverable_error_info.h"
-
-namespace syncer {
-
-UnrecoverableErrorInfo::UnrecoverableErrorInfo()
- : is_set_(false) {
-}
-
-UnrecoverableErrorInfo::UnrecoverableErrorInfo(
- const tracked_objects::Location& location,
- const std::string& message)
- : location_(location),
- message_(message),
- is_set_(true) {
-}
-
-UnrecoverableErrorInfo::~UnrecoverableErrorInfo() {
-}
-
-void UnrecoverableErrorInfo::Reset(
- const tracked_objects::Location& location,
- const std::string& message) {
- location_ = location;
- message_ = message;
- is_set_ = true;
-}
-
-bool UnrecoverableErrorInfo::IsSet() const {
- return is_set_;
-}
-
-const tracked_objects::Location& UnrecoverableErrorInfo::location() const {
- return location_;
-}
-
-const std::string& UnrecoverableErrorInfo::message() const {
- return message_;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/util/unrecoverable_error_info.h b/chromium/sync/internal_api/public/util/unrecoverable_error_info.h
deleted file mode 100644
index 1fb9832572d..00000000000
--- a/chromium/sync/internal_api/public/util/unrecoverable_error_info.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_UTIL_UNRECOVERABLE_ERROR_INFO_H_
-#define SYNC_UTIL_UNRECOVERABLE_ERROR_INFO_H_
-// TODO(lipalani): Figure out the right location for this class so it is
-// accessible outside of sync engine as well.
-
-#include <string>
-
-#include "base/location.h"
-
-namespace syncer {
-
-class UnrecoverableErrorInfo {
- public:
- UnrecoverableErrorInfo();
- UnrecoverableErrorInfo(
- const tracked_objects::Location& location,
- const std::string& message);
- ~UnrecoverableErrorInfo();
-
- void Reset(const tracked_objects::Location& location,
- const std::string& message);
-
- bool IsSet() const;
-
- const tracked_objects::Location& location() const;
- const std::string& message() const;
-
- private:
- tracked_objects::Location location_;
- std::string message_;
- bool is_set_;
-};
-
-} // namespace syncer
-
-#endif // SYNC_UTIL_UNRECOVERABLE_ERROR_INFO_H_
diff --git a/chromium/sync/internal_api/public/util/weak_handle.cc b/chromium/sync/internal_api/public/util/weak_handle.cc
deleted file mode 100644
index 0ed142ee35b..00000000000
--- a/chromium/sync/internal_api/public/util/weak_handle.cc
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/util/weak_handle.h"
-
-#include <sstream>
-
-#include "base/callback.h"
-#include "base/location.h"
-#include "base/message_loop/message_loop_proxy.h"
-
-namespace syncer {
-
-namespace internal {
-
-WeakHandleCoreBase::WeakHandleCoreBase()
- : owner_loop_proxy_(base::MessageLoopProxy::current()) {}
-
-bool WeakHandleCoreBase::IsOnOwnerThread() const {
- return owner_loop_proxy_->BelongsToCurrentThread();
-}
-
-WeakHandleCoreBase::~WeakHandleCoreBase() {}
-
-void WeakHandleCoreBase::PostToOwnerThread(
- const tracked_objects::Location& from_here,
- const base::Closure& fn) const {
- if (!owner_loop_proxy_->PostTask(from_here, fn)) {
- DVLOG(1) << "Could not post task from " << from_here.ToString();
- }
-}
-
-} // namespace internal
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/util/weak_handle.h b/chromium/sync/internal_api/public/util/weak_handle.h
deleted file mode 100644
index c299be719c9..00000000000
--- a/chromium/sync/internal_api/public/util/weak_handle.h
+++ /dev/null
@@ -1,379 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Weak handles provides a way to refer to weak pointers from another
-// thread. This is useful because it is not safe to reference a weak
-// pointer from a thread other than the thread on which it was
-// created.
-//
-// Weak handles can be passed across threads, so for example, you can
-// use them to do the "real" work on one thread and get notified on
-// another thread:
-//
-// class FooIOWorker {
-// public:
-// FooIOWorker(const WeakHandle<Foo>& foo) : foo_(foo) {}
-//
-// void OnIOStart() {
-// foo_.Call(FROM_HERE, &Foo::OnIOStart);
-// }
-//
-// void OnIOEvent(IOEvent e) {
-// foo_.Call(FROM_HERE, &Foo::OnIOEvent, e);
-// }
-//
-// void OnIOError(IOError err) {
-// foo_.Call(FROM_HERE, &Foo::OnIOError, err);
-// }
-//
-// private:
-// const WeakHandle<Foo> foo_;
-// };
-//
-// class Foo : public SupportsWeakPtr<Foo>, public NonThreadSafe {
-// public:
-// Foo() {
-// SpawnFooIOWorkerOnIOThread(base::MakeWeakHandle(AsWeakPtr()));
-// }
-//
-// /* Will always be called on the correct thread, and only if this
-// object hasn't been destroyed. */
-// void OnIOStart() { DCHECK(CalledOnValidThread(); ... }
-// void OnIOEvent(IOEvent e) { DCHECK(CalledOnValidThread(); ... }
-// void OnIOError(IOError err) { DCHECK(CalledOnValidThread(); ... }
-// };
-
-#ifndef SYNC_UTIL_WEAK_HANDLE_H_
-#define SYNC_UTIL_WEAK_HANDLE_H_
-
-#include <cstddef>
-
-#include "base/basictypes.h"
-#include "base/bind.h"
-#include "base/callback_forward.h"
-#include "base/compiler_specific.h"
-#include "base/gtest_prod_util.h"
-#include "base/location.h"
-#include "base/logging.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/weak_ptr.h"
-#include "sync/base/sync_export.h"
-
-namespace base {
-class MessageLoopProxy;
-} // namespace base
-
-namespace tracked_objects {
-class Location;
-} // namespace tracked_objects
-
-namespace syncer {
-
-template <typename T> class WeakHandle;
-
-namespace internal {
-// These classes are part of the WeakHandle implementation. DO NOT
-// USE THESE CLASSES DIRECTLY YOURSELF.
-
-// Adapted from base/callback_internal.h.
-
-template <typename T>
-struct ParamTraits {
- typedef const T& ForwardType;
-};
-
-template <typename T>
-struct ParamTraits<T&> {
- typedef T& ForwardType;
-};
-
-template <typename T, size_t n>
-struct ParamTraits<T[n]> {
- typedef const T* ForwardType;
-};
-
-template <typename T>
-struct ParamTraits<T[]> {
- typedef const T* ForwardType;
-};
-
-// Base class for WeakHandleCore<T> to avoid template bloat. Handles
-// the interaction with the owner thread and its message loop.
-class SYNC_EXPORT WeakHandleCoreBase {
- public:
- // Assumes the current thread is the owner thread.
- WeakHandleCoreBase();
-
- // May be called on any thread.
- bool IsOnOwnerThread() const;
-
- protected:
- // May be destroyed on any thread.
- ~WeakHandleCoreBase();
-
- // May be called on any thread.
- void PostToOwnerThread(const tracked_objects::Location& from_here,
- const base::Closure& fn) const;
-
- private:
- // May be used on any thread.
- const scoped_refptr<base::MessageLoopProxy> owner_loop_proxy_;
-
- DISALLOW_COPY_AND_ASSIGN(WeakHandleCoreBase);
-};
-
-// WeakHandleCore<T> contains all the logic for WeakHandle<T>.
-template <typename T>
-class WeakHandleCore
- : public WeakHandleCoreBase,
- public base::RefCountedThreadSafe<WeakHandleCore<T> > {
- public:
- // Must be called on |ptr|'s owner thread, which is assumed to be
- // the current thread.
- explicit WeakHandleCore(const base::WeakPtr<T>& ptr) : ptr_(ptr) {}
-
- // Must be called on |ptr_|'s owner thread.
- base::WeakPtr<T> Get() const {
- CHECK(IsOnOwnerThread());
- return ptr_;
- }
-
- // Call(...) may be called on any thread, but all its arguments
- // should be safe to be bound and copied across threads.
-
- template <typename U>
- void Call(const tracked_objects::Location& from_here,
- void (U::*fn)(void)) const {
- PostToOwnerThread(
- from_here,
- Bind(&WeakHandleCore::template DoCall0<U>, this, fn));
- }
-
- template <typename U, typename A1>
- void Call(const tracked_objects::Location& from_here,
- void (U::*fn)(A1),
- typename ParamTraits<A1>::ForwardType a1) const {
- PostToOwnerThread(
- from_here,
- Bind(&WeakHandleCore::template DoCall1<U, A1>,
- this, fn, a1));
- }
-
- template <typename U, typename A1, typename A2>
- void Call(const tracked_objects::Location& from_here,
- void (U::*fn)(A1, A2),
- typename ParamTraits<A1>::ForwardType a1,
- typename ParamTraits<A2>::ForwardType a2) const {
- PostToOwnerThread(
- from_here,
- Bind(&WeakHandleCore::template DoCall2<U, A1, A2>,
- this, fn, a1, a2));
- }
-
- template <typename U, typename A1, typename A2, typename A3>
- void Call(const tracked_objects::Location& from_here,
- void (U::*fn)(A1, A2, A3),
- typename ParamTraits<A1>::ForwardType a1,
- typename ParamTraits<A2>::ForwardType a2,
- typename ParamTraits<A3>::ForwardType a3) const {
- PostToOwnerThread(
- from_here,
- Bind(&WeakHandleCore::template DoCall3<U, A1, A2, A3>,
- this, fn, a1, a2, a3));
- }
-
- template <typename U, typename A1, typename A2, typename A3, typename A4>
- void Call(const tracked_objects::Location& from_here,
- void (U::*fn)(A1, A2, A3, A4),
- typename ParamTraits<A1>::ForwardType a1,
- typename ParamTraits<A2>::ForwardType a2,
- typename ParamTraits<A3>::ForwardType a3,
- typename ParamTraits<A4>::ForwardType a4) const {
- PostToOwnerThread(
- from_here,
- Bind(&WeakHandleCore::template DoCall4<U, A1, A2, A3, A4>,
- this, fn, a1, a2, a3, a4));
- }
-
- private:
- friend class base::RefCountedThreadSafe<WeakHandleCore<T> >;
-
- // May be destroyed on any thread.
- ~WeakHandleCore() {}
-
- // GCC 4.2.1 on OS X gets confused if all the DoCall functions are
- // named the same, so we distinguish them.
-
- template <typename U>
- void DoCall0(void (U::*fn)(void)) const {
- CHECK(IsOnOwnerThread());
- if (!Get()) {
- return;
- }
- (Get().get()->*fn)();
- }
-
- template <typename U, typename A1>
- void DoCall1(void (U::*fn)(A1),
- typename ParamTraits<A1>::ForwardType a1) const {
- CHECK(IsOnOwnerThread());
- if (!Get()) {
- return;
- }
- (Get().get()->*fn)(a1);
- }
-
- template <typename U, typename A1, typename A2>
- void DoCall2(void (U::*fn)(A1, A2),
- typename ParamTraits<A1>::ForwardType a1,
- typename ParamTraits<A2>::ForwardType a2) const {
- CHECK(IsOnOwnerThread());
- if (!Get()) {
- return;
- }
- (Get().get()->*fn)(a1, a2);
- }
-
- template <typename U, typename A1, typename A2, typename A3>
- void DoCall3(void (U::*fn)(A1, A2, A3),
- typename ParamTraits<A1>::ForwardType a1,
- typename ParamTraits<A2>::ForwardType a2,
- typename ParamTraits<A3>::ForwardType a3) const {
- CHECK(IsOnOwnerThread());
- if (!Get()) {
- return;
- }
- (Get().get()->*fn)(a1, a2, a3);
- }
-
- template <typename U, typename A1, typename A2, typename A3, typename A4>
- void DoCall4(void (U::*fn)(A1, A2, A3, A4),
- typename ParamTraits<A1>::ForwardType a1,
- typename ParamTraits<A2>::ForwardType a2,
- typename ParamTraits<A3>::ForwardType a3,
- typename ParamTraits<A4>::ForwardType a4) const {
- CHECK(IsOnOwnerThread());
- if (!Get()) {
- return;
- }
- (Get().get()->*fn)(a1, a2, a3, a4);
- }
-
- // Must be dereferenced only on the owner thread. May be destroyed
- // from any thread.
- base::WeakPtr<T> ptr_;
-
- DISALLOW_COPY_AND_ASSIGN(WeakHandleCore);
-};
-
-} // namespace internal
-
-// May be destroyed on any thread.
-// Copying and assignment are welcome.
-template <typename T>
-class WeakHandle {
- public:
- // Creates an uninitialized WeakHandle.
- WeakHandle() {}
-
- // Creates an initialized WeakHandle from |ptr|.
- explicit WeakHandle(const base::WeakPtr<T>& ptr)
- : core_(new internal::WeakHandleCore<T>(ptr)) {}
-
- // Allow conversion from WeakHandle<U> to WeakHandle<T> if U is
- // convertible to T, but we *must* be on |other|'s owner thread.
- // Note that this doesn't override the regular copy constructor, so
- // that one can be called on any thread.
- template <typename U>
- WeakHandle(const WeakHandle<U>& other) // NOLINT
- : core_(
- other.IsInitialized() ?
- new internal::WeakHandleCore<T>(other.Get()) :
- NULL) {}
-
- // Returns true iff this WeakHandle is initialized. Note that being
- // initialized isn't a guarantee that the underlying object is still
- // alive.
- bool IsInitialized() const {
- return core_.get() != NULL;
- }
-
- // Resets to an uninitialized WeakHandle.
- void Reset() {
- core_ = NULL;
- }
-
- // Must be called only on the underlying object's owner thread.
- base::WeakPtr<T> Get() const {
- CHECK(IsInitialized());
- CHECK(core_->IsOnOwnerThread());
- return core_->Get();
- }
-
- // Call(...) may be called on any thread, but all its arguments
- // should be safe to be bound and copied across threads.
-
- template <typename U>
- void Call(const tracked_objects::Location& from_here,
- void (U::*fn)(void)) const {
- CHECK(IsInitialized());
- core_->Call(from_here, fn);
- }
-
- template <typename U, typename A1>
- void Call(const tracked_objects::Location& from_here,
- void (U::*fn)(A1),
- typename internal::ParamTraits<A1>::ForwardType a1) const {
- CHECK(IsInitialized());
- core_->Call(from_here, fn, a1);
- }
-
- template <typename U, typename A1, typename A2>
- void Call(const tracked_objects::Location& from_here,
- void (U::*fn)(A1, A2),
- typename internal::ParamTraits<A1>::ForwardType a1,
- typename internal::ParamTraits<A2>::ForwardType a2) const {
- CHECK(IsInitialized());
- core_->Call(from_here, fn, a1, a2);
- }
-
- template <typename U, typename A1, typename A2, typename A3>
- void Call(const tracked_objects::Location& from_here,
- void (U::*fn)(A1, A2, A3),
- typename internal::ParamTraits<A1>::ForwardType a1,
- typename internal::ParamTraits<A2>::ForwardType a2,
- typename internal::ParamTraits<A3>::ForwardType a3) const {
- CHECK(IsInitialized());
- core_->Call(from_here, fn, a1, a2, a3);
- }
-
- template <typename U, typename A1, typename A2, typename A3, typename A4>
- void Call(const tracked_objects::Location& from_here,
- void (U::*fn)(A1, A2, A3, A4),
- typename internal::ParamTraits<A1>::ForwardType a1,
- typename internal::ParamTraits<A2>::ForwardType a2,
- typename internal::ParamTraits<A3>::ForwardType a3,
- typename internal::ParamTraits<A4>::ForwardType a4) const {
- CHECK(IsInitialized());
- core_->Call(from_here, fn, a1, a2, a3, a4);
- }
-
- private:
- FRIEND_TEST_ALL_PREFIXES(WeakHandleTest,
- TypeConversionConstructor);
- FRIEND_TEST_ALL_PREFIXES(WeakHandleTest,
- TypeConversionConstructorAssignment);
-
- scoped_refptr<internal::WeakHandleCore<T> > core_;
-};
-
-// Makes a WeakHandle from a WeakPtr.
-template <typename T>
-WeakHandle<T> MakeWeakHandle(const base::WeakPtr<T>& ptr) {
- return WeakHandle<T>(ptr);
-}
-
-} // namespace syncer
-
-#endif // SYNC_UTIL_WEAK_HANDLE_H_
diff --git a/chromium/sync/internal_api/public/util/weak_handle_unittest.cc b/chromium/sync/internal_api/public/util/weak_handle_unittest.cc
deleted file mode 100644
index 72003291e19..00000000000
--- a/chromium/sync/internal_api/public/util/weak_handle_unittest.cc
+++ /dev/null
@@ -1,326 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/util/weak_handle.h"
-
-#include "base/bind.h"
-#include "base/compiler_specific.h"
-#include "base/location.h"
-#include "base/memory/weak_ptr.h"
-#include "base/message_loop/message_loop.h"
-#include "base/threading/thread.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-using ::testing::_;
-using ::testing::SaveArg;
-using ::testing::StrictMock;
-
-class Base {
- public:
- Base() : weak_ptr_factory_(this) {}
-
- WeakHandle<Base> AsWeakHandle() {
- return MakeWeakHandle(weak_ptr_factory_.GetWeakPtr());
- }
-
- void Kill() {
- weak_ptr_factory_.InvalidateWeakPtrs();
- }
-
- MOCK_METHOD0(Test, void());
- MOCK_METHOD1(Test1, void(const int&));
- MOCK_METHOD2(Test2, void(const int&, Base*));
- MOCK_METHOD3(Test3, void(const int&, Base*, float));
- MOCK_METHOD4(Test4, void(const int&, Base*, float, const char*));
-
- MOCK_METHOD1(TestWithSelf, void(const WeakHandle<Base>&));
-
- private:
- base::WeakPtrFactory<Base> weak_ptr_factory_;
-};
-
-class Derived : public Base, public base::SupportsWeakPtr<Derived> {};
-
-class WeakHandleTest : public ::testing::Test {
- protected:
- virtual void TearDown() {
- // Process any last-minute posted tasks.
- PumpLoop();
- }
-
- void PumpLoop() {
- message_loop_.RunUntilIdle();
- }
-
- static void CallTestFromOtherThread(tracked_objects::Location from_here,
- const WeakHandle<Base>& h) {
- base::Thread t("Test thread");
- ASSERT_TRUE(t.Start());
- t.message_loop()->PostTask(
- from_here, base::Bind(&WeakHandleTest::CallTest, from_here, h));
- }
-
- private:
- static void CallTest(tracked_objects::Location from_here,
- const WeakHandle<Base>& h) {
- h.Call(from_here, &Base::Test);
- }
-
- base::MessageLoop message_loop_;
-};
-
-TEST_F(WeakHandleTest, Uninitialized) {
- // Default.
- WeakHandle<int> h;
- EXPECT_FALSE(h.IsInitialized());
- // Copy.
- {
- WeakHandle<int> h2(h);
- EXPECT_FALSE(h2.IsInitialized());
- }
- // Assign.
- {
- WeakHandle<int> h2;
- h2 = h;
- EXPECT_FALSE(h.IsInitialized());
- }
-}
-
-TEST_F(WeakHandleTest, InitializedAfterDestroy) {
- WeakHandle<Base> h;
- {
- StrictMock<Base> b;
- h = b.AsWeakHandle();
- }
- EXPECT_TRUE(h.IsInitialized());
- EXPECT_FALSE(h.Get());
-}
-
-TEST_F(WeakHandleTest, InitializedAfterInvalidate) {
- StrictMock<Base> b;
- WeakHandle<Base> h = b.AsWeakHandle();
- b.Kill();
- EXPECT_TRUE(h.IsInitialized());
- EXPECT_FALSE(h.Get());
-}
-
-TEST_F(WeakHandleTest, Call) {
- StrictMock<Base> b;
- const char test_str[] = "test";
- EXPECT_CALL(b, Test());
- EXPECT_CALL(b, Test1(5));
- EXPECT_CALL(b, Test2(5, &b));
- EXPECT_CALL(b, Test3(5, &b, 5));
- EXPECT_CALL(b, Test4(5, &b, 5, test_str));
-
- WeakHandle<Base> h = b.AsWeakHandle();
- EXPECT_TRUE(h.IsInitialized());
-
- // Should run.
- h.Call(FROM_HERE, &Base::Test);
- h.Call(FROM_HERE, &Base::Test1, 5);
- h.Call(FROM_HERE, &Base::Test2, 5, &b);
- h.Call(FROM_HERE, &Base::Test3, 5, &b, 5);
- h.Call(FROM_HERE, &Base::Test4, 5, &b, 5, test_str);
- PumpLoop();
-}
-
-TEST_F(WeakHandleTest, CallAfterDestroy) {
- {
- StrictMock<Base> b;
- EXPECT_CALL(b, Test()).Times(0);
-
- WeakHandle<Base> h = b.AsWeakHandle();
- EXPECT_TRUE(h.IsInitialized());
-
- // Should not run.
- h.Call(FROM_HERE, &Base::Test);
- }
- PumpLoop();
-}
-
-TEST_F(WeakHandleTest, CallAfterInvalidate) {
- StrictMock<Base> b;
- EXPECT_CALL(b, Test()).Times(0);
-
- WeakHandle<Base> h = b.AsWeakHandle();
- EXPECT_TRUE(h.IsInitialized());
-
- // Should not run.
- h.Call(FROM_HERE, &Base::Test);
-
- b.Kill();
- PumpLoop();
-}
-
-TEST_F(WeakHandleTest, CallThreaded) {
- StrictMock<Base> b;
- EXPECT_CALL(b, Test());
-
- WeakHandle<Base> h = b.AsWeakHandle();
- // Should run.
- CallTestFromOtherThread(FROM_HERE, h);
- PumpLoop();
-}
-
-TEST_F(WeakHandleTest, CallAfterDestroyThreaded) {
- WeakHandle<Base> h;
- {
- StrictMock<Base> b;
- EXPECT_CALL(b, Test()).Times(0);
- h = b.AsWeakHandle();
- }
-
- // Should not run.
- CallTestFromOtherThread(FROM_HERE, h);
- PumpLoop();
-}
-
-TEST_F(WeakHandleTest, CallAfterInvalidateThreaded) {
- StrictMock<Base> b;
- EXPECT_CALL(b, Test()).Times(0);
-
- WeakHandle<Base> h = b.AsWeakHandle();
- b.Kill();
- // Should not run.
- CallTestFromOtherThread(FROM_HERE, h);
- PumpLoop();
-}
-
-TEST_F(WeakHandleTest, DeleteOnOtherThread) {
- StrictMock<Base> b;
- EXPECT_CALL(b, Test()).Times(0);
-
- WeakHandle<Base>* h = new WeakHandle<Base>(b.AsWeakHandle());
-
- {
- base::Thread t("Test thread");
- ASSERT_TRUE(t.Start());
- t.message_loop()->DeleteSoon(FROM_HERE, h);
- }
-
- PumpLoop();
-}
-
-void CallTestWithSelf(const WeakHandle<Base>& b1) {
- StrictMock<Base> b2;
- b1.Call(FROM_HERE, &Base::TestWithSelf, b2.AsWeakHandle());
-}
-
-TEST_F(WeakHandleTest, WithDestroyedThread) {
- StrictMock<Base> b1;
- WeakHandle<Base> b2;
- EXPECT_CALL(b1, TestWithSelf(_)).WillOnce(SaveArg<0>(&b2));
-
- {
- base::Thread t("Test thread");
- ASSERT_TRUE(t.Start());
- t.message_loop()->PostTask(FROM_HERE,
- base::Bind(&CallTestWithSelf,
- b1.AsWeakHandle()));
- }
-
- // Calls b1.TestWithSelf().
- PumpLoop();
-
- // Shouldn't do anything, since the thread is gone.
- b2.Call(FROM_HERE, &Base::Test);
-
- // |b2| shouldn't leak when it's destroyed, even if the original
- // thread is gone.
-}
-
-TEST_F(WeakHandleTest, InitializedAcrossCopyAssign) {
- StrictMock<Base> b;
- EXPECT_CALL(b, Test()).Times(3);
-
- EXPECT_TRUE(b.AsWeakHandle().IsInitialized());
- b.AsWeakHandle().Call(FROM_HERE, &Base::Test);
-
- {
- WeakHandle<Base> h(b.AsWeakHandle());
- EXPECT_TRUE(h.IsInitialized());
- h.Call(FROM_HERE, &Base::Test);
- h.Reset();
- EXPECT_FALSE(h.IsInitialized());
- }
-
- {
- WeakHandle<Base> h;
- h = b.AsWeakHandle();
- EXPECT_TRUE(h.IsInitialized());
- h.Call(FROM_HERE, &Base::Test);
- h.Reset();
- EXPECT_FALSE(h.IsInitialized());
- }
-
- PumpLoop();
-}
-
-TEST_F(WeakHandleTest, TypeConversionConstructor) {
- StrictMock<Derived> d;
- EXPECT_CALL(d, Test()).Times(2);
-
- const WeakHandle<Derived> weak_handle = MakeWeakHandle(d.AsWeakPtr());
-
- // Should trigger type conversion constructor.
- const WeakHandle<Base> base_weak_handle(weak_handle);
- // Should trigger regular copy constructor.
- const WeakHandle<Derived> derived_weak_handle(weak_handle);
-
- EXPECT_TRUE(base_weak_handle.IsInitialized());
- base_weak_handle.Call(FROM_HERE, &Base::Test);
-
- EXPECT_TRUE(derived_weak_handle.IsInitialized());
- // Copy constructor shouldn't construct a new |core_|.
- EXPECT_EQ(weak_handle.core_.get(), derived_weak_handle.core_.get());
- derived_weak_handle.Call(FROM_HERE, &Base::Test);
-
- PumpLoop();
-}
-
-TEST_F(WeakHandleTest, TypeConversionConstructorMakeWeakHandle) {
- const base::WeakPtr<Derived> weak_ptr;
-
- // Should trigger type conversion constructor after MakeWeakHandle.
- WeakHandle<Base> base_weak_handle(MakeWeakHandle(weak_ptr));
- // Should trigger regular copy constructor after MakeWeakHandle.
- const WeakHandle<Derived> derived_weak_handle(MakeWeakHandle(weak_ptr));
-
- EXPECT_TRUE(base_weak_handle.IsInitialized());
- EXPECT_TRUE(derived_weak_handle.IsInitialized());
-}
-
-TEST_F(WeakHandleTest, TypeConversionConstructorAssignment) {
- const WeakHandle<Derived> weak_handle =
- MakeWeakHandle(Derived().AsWeakPtr());
-
- // Should trigger type conversion constructor before the assignment.
- WeakHandle<Base> base_weak_handle;
- base_weak_handle = weak_handle;
- // Should trigger regular copy constructor before the assignment.
- WeakHandle<Derived> derived_weak_handle;
- derived_weak_handle = weak_handle;
-
- EXPECT_TRUE(base_weak_handle.IsInitialized());
- EXPECT_TRUE(derived_weak_handle.IsInitialized());
- // Copy constructor shouldn't construct a new |core_|.
- EXPECT_EQ(weak_handle.core_.get(), derived_weak_handle.core_.get());
-}
-
-TEST_F(WeakHandleTest, TypeConversionConstructorUninitialized) {
- const WeakHandle<Base> base_weak_handle = WeakHandle<Derived>();
- EXPECT_FALSE(base_weak_handle.IsInitialized());
-}
-
-TEST_F(WeakHandleTest, TypeConversionConstructorUninitializedAssignment) {
- WeakHandle<Base> base_weak_handle;
- base_weak_handle = WeakHandle<Derived>();
- EXPECT_FALSE(base_weak_handle.IsInitialized());
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/public/write_node.h b/chromium/sync/internal_api/public/write_node.h
deleted file mode 100644
index d3781e8b9e1..00000000000
--- a/chromium/sync/internal_api/public/write_node.h
+++ /dev/null
@@ -1,215 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_WRITE_NODE_H_
-#define SYNC_INTERNAL_API_PUBLIC_WRITE_NODE_H_
-
-#include <string>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/base_node.h"
-
-namespace sync_pb {
-class AppSpecifics;
-class AutofillSpecifics;
-class AutofillProfileSpecifics;
-class BookmarkSpecifics;
-class EntitySpecifics;
-class ExtensionSpecifics;
-class SessionSpecifics;
-class NigoriSpecifics;
-class PasswordSpecificsData;
-class ThemeSpecifics;
-class TypedUrlSpecifics;
-}
-
-namespace syncer {
-
-class Cryptographer;
-class WriteTransaction;
-
-namespace syncable {
-class Entry;
-class MutableEntry;
-}
-
-// WriteNode extends BaseNode to add mutation, and wraps
-// syncable::MutableEntry. A WriteTransaction is needed to create a WriteNode.
-class SYNC_EXPORT WriteNode : public BaseNode {
- public:
- enum InitUniqueByCreationResult {
- INIT_SUCCESS,
- // The tag passed into this method was empty.
- INIT_FAILED_EMPTY_TAG,
- // An entry with this tag already exists.
- INIT_FAILED_ENTRY_ALREADY_EXISTS,
- // The constructor for a new MutableEntry with the specified data failed.
- INIT_FAILED_COULD_NOT_CREATE_ENTRY,
- // Setting the predecessor failed
- INIT_FAILED_SET_PREDECESSOR,
- };
-
- // Create a WriteNode using the given transaction.
- explicit WriteNode(WriteTransaction* transaction);
- virtual ~WriteNode();
-
- // A client must use one (and only one) of the following Init variants to
- // populate the node.
-
- // BaseNode implementation.
- virtual InitByLookupResult InitByIdLookup(int64 id) OVERRIDE;
- virtual InitByLookupResult InitByClientTagLookup(
- ModelType model_type,
- const std::string& tag) OVERRIDE;
-
- // Create a new bookmark node with the specified parent and predecessor. Use
- // a NULL |predecessor| to indicate that this is to be the first child.
- // |predecessor| must be a child of |new_parent| or NULL. Returns false on
- // failure.
- bool InitBookmarkByCreation(const BaseNode& parent,
- const BaseNode* predecessor);
-
- // Create nodes using this function if they're unique items that
- // you want to fetch using client_tag. Note that the behavior of these
- // items is slightly different than that of normal items.
- // Most importantly, if it exists locally, this function will
- // actually undelete it
- // Client unique tagged nodes must NOT be folders.
- InitUniqueByCreationResult InitUniqueByCreation(
- ModelType model_type,
- const BaseNode& parent,
- const std::string& client_tag);
-
- // Each server-created permanent node is tagged with a unique string.
- // Look up the node with the particular tag. If it does not exist,
- // return false.
- InitByLookupResult InitByTagLookup(const std::string& tag);
-
- // These Set() functions correspond to the Get() functions of BaseNode.
- void SetIsFolder(bool folder);
- void SetTitle(const std::wstring& title);
-
- // External ID is a client-only field, so setting it doesn't cause the item to
- // be synced again.
- void SetExternalId(int64 external_id);
-
- // Remove this node and its children and sync deletion to server.
- void Tombstone();
-
- // If the node is known by server, remove it and its children but don't sync
- // deletion to server. Do nothing if the node is not known by server so that
- // server can have a record of the node.
- void Drop();
-
- // Set a new parent and position. Position is specified by |predecessor|; if
- // it is NULL, the node is moved to the first position. |predecessor| must
- // be a child of |new_parent| or NULL. Returns false on failure..
- bool SetPosition(const BaseNode& new_parent, const BaseNode* predecessor);
-
- // Set the bookmark specifics (url and favicon).
- // Should only be called if GetModelType() == BOOKMARK.
- void SetBookmarkSpecifics(const sync_pb::BookmarkSpecifics& specifics);
-
- // Generic set specifics method. Will extract the model type from |specifics|.
- void SetEntitySpecifics(const sync_pb::EntitySpecifics& specifics);
-
- // Resets the EntitySpecifics for this node based on the unencrypted data.
- // Will encrypt if necessary.
- void ResetFromSpecifics();
-
- // TODO(sync): Remove the setters below when the corresponding data
- // types are ported to the new sync service API.
-
- // Set the app specifics (id, update url, enabled state, etc).
- // Should only be called if GetModelType() == APPS.
- void SetAppSpecifics(const sync_pb::AppSpecifics& specifics);
-
- // Set the autofill specifics (name and value).
- // Should only be called if GetModelType() == AUTOFILL.
- void SetAutofillSpecifics(const sync_pb::AutofillSpecifics& specifics);
-
- void SetAutofillProfileSpecifics(
- const sync_pb::AutofillProfileSpecifics& specifics);
-
- // Set the nigori specifics.
- // Should only be called if GetModelType() == NIGORI.
- void SetNigoriSpecifics(const sync_pb::NigoriSpecifics& specifics);
-
- // Set the password specifics.
- // Should only be called if GetModelType() == PASSWORD.
- void SetPasswordSpecifics(const sync_pb::PasswordSpecificsData& specifics);
-
- // Set the theme specifics (name and value).
- // Should only be called if GetModelType() == THEME.
- void SetThemeSpecifics(const sync_pb::ThemeSpecifics& specifics);
-
- // Set the typed_url specifics (url, title, typed_count, etc).
- // Should only be called if GetModelType() == TYPED_URLS.
- void SetTypedUrlSpecifics(const sync_pb::TypedUrlSpecifics& specifics);
-
- // Set the extension specifics (id, update url, enabled state, etc).
- // Should only be called if GetModelType() == EXTENSIONS.
- void SetExtensionSpecifics(const sync_pb::ExtensionSpecifics& specifics);
-
- // Set the session specifics (windows, tabs, navigations etc.).
- // Should only be called if GetModelType() == SESSIONS.
- void SetSessionSpecifics(const sync_pb::SessionSpecifics& specifics);
-
- // Set the managed user setting specifics (name and value).
- // Should only be called if GetModelType() == MANAGED_USER_SETTINGS.
- void SetManagedUserSettingSpecifics(
- const sync_pb::ManagedUserSettingSpecifics& specifics);
-
- // Set the managed user setting specifics (name and value).
- // Should only be called if GetModelType() == MANAGED_USERS.
- void SetManagedUserSpecifics(const sync_pb::ManagedUserSpecifics& specifics);
-
- // Set the device info specifics.
- // Should only be called if GetModelType() == DEVICE_INFO.
- void SetDeviceInfoSpecifics(const sync_pb::DeviceInfoSpecifics& specifics);
-
- // Set the experiments specifics.
- // Should only be called if GetModelType() == EXPERIMENTS.
- void SetExperimentsSpecifics(const sync_pb::ExperimentsSpecifics& specifics);
-
- // Set the priority preference specifics.
- // Should only be called if GetModelType() == PRIORITY_PREFERENCE.
- void SetPriorityPreferenceSpecifics(
- const sync_pb::PriorityPreferenceSpecifics& specifics);
-
- // Implementation of BaseNode's abstract virtual accessors.
- virtual const syncable::Entry* GetEntry() const OVERRIDE;
-
- virtual const BaseTransaction* GetTransaction() const OVERRIDE;
-
- syncable::MutableEntry* GetMutableEntryForTest();
-
- private:
- FRIEND_TEST_ALL_PREFIXES(SyncManagerTest, EncryptBookmarksWithLegacyData);
-
- void* operator new(size_t size); // Node is meant for stack use only.
-
- // Helper to set the previous node.
- bool PutPredecessor(const BaseNode* predecessor) WARN_UNUSED_RESULT;
-
- // Sets IS_UNSYNCED and SYNCING to ensure this entry is considered in an
- // upcoming commit pass.
- void MarkForSyncing();
-
- // The underlying syncable object which this class wraps.
- syncable::MutableEntry* entry_;
-
- // The sync API transaction that is the parent of this node.
- WriteTransaction* transaction_;
-
- DISALLOW_COPY_AND_ASSIGN(WriteNode);
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_WRITE_NODE_H_
diff --git a/chromium/sync/internal_api/public/write_transaction.h b/chromium/sync/internal_api/public/write_transaction.h
deleted file mode 100644
index 9008b4f2b8a..00000000000
--- a/chromium/sync/internal_api/public/write_transaction.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_PUBLIC_WRITE_TRANSACTION_H_
-#define SYNC_INTERNAL_API_PUBLIC_WRITE_TRANSACTION_H_
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base_transaction.h"
-
-namespace tracked_objects {
-class Location;
-} // namespace tracked_objects
-
-namespace syncer {
-
-namespace syncable {
-class BaseTransaction;
-class WriteTransaction;
-} // namespace syncable
-
-// Sync API's WriteTransaction is a read/write BaseTransaction. It wraps
-// a syncable::WriteTransaction.
-//
-// NOTE: Only a single model type can be mutated for a given
-// WriteTransaction.
-class SYNC_EXPORT WriteTransaction : public BaseTransaction {
- public:
- // Start a new read/write transaction.
- WriteTransaction(const tracked_objects::Location& from_here,
- UserShare* share);
- // |transaction_version| stores updated model and nodes version if model
- // is changed by the transaction, or syncer::syncable::kInvalidTransaction
- // if not after transaction is closed. This constructor is used for model
- // types that support embassy data.
- WriteTransaction(const tracked_objects::Location& from_here,
- UserShare* share, int64* transaction_version);
- virtual ~WriteTransaction();
-
- // Provide access to the syncable transaction from the API WriteNode.
- virtual syncable::BaseTransaction* GetWrappedTrans() const OVERRIDE;
- syncable::WriteTransaction* GetWrappedWriteTrans() { return transaction_; }
-
- protected:
- WriteTransaction() {}
-
- void SetTransaction(syncable::WriteTransaction* trans) {
- transaction_ = trans;
- }
-
- private:
- void* operator new(size_t size); // Transaction is meant for stack use only.
-
- // The underlying syncable object which this class wraps.
- syncable::WriteTransaction* transaction_;
-
- DISALLOW_COPY_AND_ASSIGN(WriteTransaction);
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_WRITE_TRANSACTION_H_
diff --git a/chromium/sync/internal_api/read_node.cc b/chromium/sync/internal_api/read_node.cc
deleted file mode 100644
index ec85af64121..00000000000
--- a/chromium/sync/internal_api/read_node.cc
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/read_node.h"
-
-#include "base/logging.h"
-#include "sync/internal_api/public/base_transaction.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/syncable_base_transaction.h"
-#include "sync/syncable/syncable_util.h"
-
-namespace syncer {
-
-//////////////////////////////////////////////////////////////////////////
-// ReadNode member definitions
-ReadNode::ReadNode(const BaseTransaction* transaction)
- : entry_(NULL), transaction_(transaction) {
- DCHECK(transaction);
-}
-
-ReadNode::ReadNode() {
- entry_ = NULL;
- transaction_ = NULL;
-}
-
-ReadNode::~ReadNode() {
- delete entry_;
-}
-
-void ReadNode::InitByRootLookup() {
- DCHECK(!entry_) << "Init called twice";
- syncable::BaseTransaction* trans = transaction_->GetWrappedTrans();
- entry_ = new syncable::Entry(trans, syncable::GET_BY_ID, trans->root_id());
- if (!entry_->good())
- DCHECK(false) << "Could not lookup root node for reading.";
-}
-
-BaseNode::InitByLookupResult ReadNode::InitByIdLookup(int64 id) {
- DCHECK(!entry_) << "Init called twice";
- DCHECK_NE(id, kInvalidId);
- syncable::BaseTransaction* trans = transaction_->GetWrappedTrans();
- entry_ = new syncable::Entry(trans, syncable::GET_BY_HANDLE, id);
- if (!entry_->good())
- return INIT_FAILED_ENTRY_NOT_GOOD;
- if (entry_->GetIsDel())
- return INIT_FAILED_ENTRY_IS_DEL;
- ModelType model_type = GetModelType();
- LOG_IF(WARNING, model_type == UNSPECIFIED || model_type == TOP_LEVEL_FOLDER)
- << "SyncAPI InitByIdLookup referencing unusual object.";
- return DecryptIfNecessary() ? INIT_OK : INIT_FAILED_DECRYPT_IF_NECESSARY;
-}
-
-BaseNode::InitByLookupResult ReadNode::InitByClientTagLookup(
- ModelType model_type,
- const std::string& tag) {
- DCHECK(!entry_) << "Init called twice";
- if (tag.empty())
- return INIT_FAILED_PRECONDITION;
-
- const std::string hash = syncable::GenerateSyncableHash(model_type, tag);
-
- entry_ = new syncable::Entry(transaction_->GetWrappedTrans(),
- syncable::GET_BY_CLIENT_TAG, hash);
- if (!entry_->good())
- return INIT_FAILED_ENTRY_NOT_GOOD;
- if (entry_->GetIsDel())
- return INIT_FAILED_ENTRY_IS_DEL;
- return DecryptIfNecessary() ? INIT_OK : INIT_FAILED_DECRYPT_IF_NECESSARY;
-}
-
-const syncable::Entry* ReadNode::GetEntry() const {
- return entry_;
-}
-
-const BaseTransaction* ReadNode::GetTransaction() const {
- return transaction_;
-}
-
-BaseNode::InitByLookupResult ReadNode::InitByTagLookup(
- const std::string& tag) {
- DCHECK(!entry_) << "Init called twice";
- if (tag.empty())
- return INIT_FAILED_PRECONDITION;
- syncable::BaseTransaction* trans = transaction_->GetWrappedTrans();
- entry_ = new syncable::Entry(trans, syncable::GET_BY_SERVER_TAG, tag);
- if (!entry_->good())
- return INIT_FAILED_ENTRY_NOT_GOOD;
- if (entry_->GetIsDel())
- return INIT_FAILED_ENTRY_IS_DEL;
- ModelType model_type = GetModelType();
- LOG_IF(WARNING, model_type == UNSPECIFIED || model_type == TOP_LEVEL_FOLDER)
- << "SyncAPI InitByTagLookup referencing unusually typed object.";
- return DecryptIfNecessary() ? INIT_OK : INIT_FAILED_DECRYPT_IF_NECESSARY;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/read_transaction.cc b/chromium/sync/internal_api/read_transaction.cc
deleted file mode 100644
index 81e53400d2a..00000000000
--- a/chromium/sync/internal_api/read_transaction.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/read_transaction.h"
-
-#include "sync/syncable/directory.h"
-#include "sync/syncable/syncable_read_transaction.h"
-
-namespace syncer {
-
-//////////////////////////////////////////////////////////////////////////
-// ReadTransaction member definitions
-ReadTransaction::ReadTransaction(const tracked_objects::Location& from_here,
- UserShare* share)
- : BaseTransaction(share),
- transaction_(NULL),
- close_transaction_(true) {
- transaction_ = new syncable::ReadTransaction(from_here,
- share->directory.get());
-}
-
-ReadTransaction::ReadTransaction(UserShare* share,
- syncable::BaseTransaction* trans)
- : BaseTransaction(share),
- transaction_(trans),
- close_transaction_(false) {}
-
-ReadTransaction::~ReadTransaction() {
- if (close_transaction_) {
- delete transaction_;
- }
-}
-
-syncable::BaseTransaction* ReadTransaction::GetWrappedTrans() const {
- return transaction_;
-}
-
-int64 ReadTransaction::GetModelVersion(ModelType type) {
- return transaction_->directory()->GetTransactionVersion(type);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/sync_encryption_handler_impl.cc b/chromium/sync/internal_api/sync_encryption_handler_impl.cc
deleted file mode 100644
index 34bf0335c8f..00000000000
--- a/chromium/sync/internal_api/sync_encryption_handler_impl.cc
+++ /dev/null
@@ -1,1651 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/sync_encryption_handler_impl.h"
-
-#include <queue>
-#include <string>
-
-#include "base/base64.h"
-#include "base/bind.h"
-#include "base/json/json_string_value_serializer.h"
-#include "base/message_loop/message_loop.h"
-#include "base/metrics/histogram.h"
-#include "base/time/time.h"
-#include "base/tracked_objects.h"
-#include "sync/internal_api/public/read_node.h"
-#include "sync/internal_api/public/read_transaction.h"
-#include "sync/internal_api/public/user_share.h"
-#include "sync/internal_api/public/util/experiments.h"
-#include "sync/internal_api/public/util/sync_string_conversions.h"
-#include "sync/internal_api/public/write_node.h"
-#include "sync/internal_api/public/write_transaction.h"
-#include "sync/protocol/encryption.pb.h"
-#include "sync/protocol/nigori_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/nigori_util.h"
-#include "sync/syncable/syncable_base_transaction.h"
-#include "sync/util/cryptographer.h"
-#include "sync/util/encryptor.h"
-#include "sync/util/time.h"
-
-namespace syncer {
-
-namespace {
-
-// The maximum number of times we will automatically overwrite the nigori node
-// because the encryption keys don't match (per chrome instantiation).
-// We protect ourselves against nigori rollbacks, but it's possible two
-// different clients might have contrasting view of what the nigori node state
-// should be, in which case they might ping pong (see crbug.com/119207).
-static const int kNigoriOverwriteLimit = 10;
-
-// Enumeration of nigori keystore migration results (for use in UMA stats).
-enum NigoriMigrationResult {
- FAILED_TO_SET_DEFAULT_KEYSTORE,
- FAILED_TO_SET_NONDEFAULT_KEYSTORE,
- FAILED_TO_EXTRACT_DECRYPTOR,
- FAILED_TO_EXTRACT_KEYBAG,
- MIGRATION_SUCCESS_KEYSTORE_NONDEFAULT,
- MIGRATION_SUCCESS_KEYSTORE_DEFAULT,
- MIGRATION_SUCCESS_FROZEN_IMPLICIT,
- MIGRATION_SUCCESS_CUSTOM,
- MIGRATION_RESULT_SIZE,
-};
-
-enum NigoriMigrationState {
- MIGRATED,
- NOT_MIGRATED_CRYPTO_NOT_READY,
- NOT_MIGRATED_NO_KEYSTORE_KEY,
- NOT_MIGRATED_UNKNOWN_REASON,
- MIGRATION_STATE_SIZE,
-};
-
-// The new passphrase state is sufficient to determine whether a nigori node
-// is migrated to support keystore encryption. In addition though, we also
-// want to verify the conditions for proper keystore encryption functionality.
-// 1. Passphrase state is set.
-// 2. Migration time is set.
-// 3. Frozen keybag is true
-// 4. If passphrase state is keystore, keystore_decryptor_token is set.
-bool IsNigoriMigratedToKeystore(const sync_pb::NigoriSpecifics& nigori) {
- if (!nigori.has_passphrase_type())
- return false;
- if (!nigori.has_keystore_migration_time())
- return false;
- if (!nigori.keybag_is_frozen())
- return false;
- if (nigori.passphrase_type() ==
- sync_pb::NigoriSpecifics::IMPLICIT_PASSPHRASE)
- return false;
- if (nigori.passphrase_type() ==
- sync_pb::NigoriSpecifics::KEYSTORE_PASSPHRASE &&
- nigori.keystore_decryptor_token().blob().empty())
- return false;
- if (!nigori.has_keystore_migration_time())
- return false;
- return true;
-}
-
-PassphraseType ProtoPassphraseTypeToEnum(
- sync_pb::NigoriSpecifics::PassphraseType type) {
- switch(type) {
- case sync_pb::NigoriSpecifics::IMPLICIT_PASSPHRASE:
- return IMPLICIT_PASSPHRASE;
- case sync_pb::NigoriSpecifics::KEYSTORE_PASSPHRASE:
- return KEYSTORE_PASSPHRASE;
- case sync_pb::NigoriSpecifics::CUSTOM_PASSPHRASE:
- return CUSTOM_PASSPHRASE;
- case sync_pb::NigoriSpecifics::FROZEN_IMPLICIT_PASSPHRASE:
- return FROZEN_IMPLICIT_PASSPHRASE;
- default:
- NOTREACHED();
- return IMPLICIT_PASSPHRASE;
- };
-}
-
-sync_pb::NigoriSpecifics::PassphraseType
-EnumPassphraseTypeToProto(PassphraseType type) {
- switch(type) {
- case IMPLICIT_PASSPHRASE:
- return sync_pb::NigoriSpecifics::IMPLICIT_PASSPHRASE;
- case KEYSTORE_PASSPHRASE:
- return sync_pb::NigoriSpecifics::KEYSTORE_PASSPHRASE;
- case CUSTOM_PASSPHRASE:
- return sync_pb::NigoriSpecifics::CUSTOM_PASSPHRASE;
- case FROZEN_IMPLICIT_PASSPHRASE:
- return sync_pb::NigoriSpecifics::FROZEN_IMPLICIT_PASSPHRASE;
- default:
- NOTREACHED();
- return sync_pb::NigoriSpecifics::IMPLICIT_PASSPHRASE;
- };
-}
-
-bool IsExplicitPassphrase(PassphraseType type) {
- return type == CUSTOM_PASSPHRASE || type == FROZEN_IMPLICIT_PASSPHRASE;
-}
-
-// Keystore Bootstrap Token helper methods.
-// The bootstrap is a base64 encoded, encrypted, ListValue of keystore key
-// strings, with the current keystore key as the last value in the list.
-std::string PackKeystoreBootstrapToken(
- const std::vector<std::string>& old_keystore_keys,
- const std::string& current_keystore_key,
- Encryptor* encryptor) {
- if (current_keystore_key.empty())
- return std::string();
-
- base::ListValue keystore_key_values;
- for (size_t i = 0; i < old_keystore_keys.size(); ++i)
- keystore_key_values.AppendString(old_keystore_keys[i]);
- keystore_key_values.AppendString(current_keystore_key);
-
- // Update the bootstrap token.
- // The bootstrap is a base64 encoded, encrypted, ListValue of keystore key
- // strings, with the current keystore key as the last value in the list.
- std::string serialized_keystores;
- JSONStringValueSerializer json(&serialized_keystores);
- json.Serialize(keystore_key_values);
- std::string encrypted_keystores;
- encryptor->EncryptString(serialized_keystores,
- &encrypted_keystores);
- std::string keystore_bootstrap;
- base::Base64Encode(encrypted_keystores, &keystore_bootstrap);
- return keystore_bootstrap;
-}
-
-bool UnpackKeystoreBootstrapToken(
- const std::string& keystore_bootstrap_token,
- Encryptor* encryptor,
- std::vector<std::string>* old_keystore_keys,
- std::string* current_keystore_key) {
- if (keystore_bootstrap_token.empty())
- return false;
- std::string base64_decoded_keystore_bootstrap;
- if (!base::Base64Decode(keystore_bootstrap_token,
- &base64_decoded_keystore_bootstrap)) {
- return false;
- }
- std::string decrypted_keystore_bootstrap;
- if (!encryptor->DecryptString(base64_decoded_keystore_bootstrap,
- &decrypted_keystore_bootstrap)) {
- return false;
- }
- JSONStringValueSerializer json(&decrypted_keystore_bootstrap);
- scoped_ptr<base::Value> deserialized_keystore_keys(
- json.Deserialize(NULL, NULL));
- if (!deserialized_keystore_keys)
- return false;
- base::ListValue* internal_list_value = NULL;
- if (!deserialized_keystore_keys->GetAsList(&internal_list_value))
- return false;
- int number_of_keystore_keys = internal_list_value->GetSize();
- if (!internal_list_value->GetString(number_of_keystore_keys - 1,
- current_keystore_key)) {
- return false;
- }
- old_keystore_keys->resize(number_of_keystore_keys - 1);
- for (int i = 0; i < number_of_keystore_keys - 1; ++i)
- internal_list_value->GetString(i, &(*old_keystore_keys)[i]);
- return true;
-}
-
-} // namespace
-
-SyncEncryptionHandlerImpl::Vault::Vault(
- Encryptor* encryptor,
- ModelTypeSet encrypted_types)
- : cryptographer(encryptor),
- encrypted_types(encrypted_types) {
-}
-
-SyncEncryptionHandlerImpl::Vault::~Vault() {
-}
-
-SyncEncryptionHandlerImpl::SyncEncryptionHandlerImpl(
- UserShare* user_share,
- Encryptor* encryptor,
- const std::string& restored_key_for_bootstrapping,
- const std::string& restored_keystore_key_for_bootstrapping)
- : user_share_(user_share),
- vault_unsafe_(encryptor, SensitiveTypes()),
- encrypt_everything_(false),
- passphrase_type_(IMPLICIT_PASSPHRASE),
- nigori_overwrite_count_(0),
- weak_ptr_factory_(this) {
- // Restore the cryptographer's previous keys. Note that we don't add the
- // keystore keys into the cryptographer here, in case a migration was pending.
- vault_unsafe_.cryptographer.Bootstrap(restored_key_for_bootstrapping);
-
- // If this fails, we won't have a valid keystore key, and will simply request
- // new ones from the server on the next DownloadUpdates.
- UnpackKeystoreBootstrapToken(
- restored_keystore_key_for_bootstrapping,
- encryptor,
- &old_keystore_keys_,
- &keystore_key_);
-}
-
-SyncEncryptionHandlerImpl::~SyncEncryptionHandlerImpl() {}
-
-void SyncEncryptionHandlerImpl::AddObserver(Observer* observer) {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(!observers_.HasObserver(observer));
- observers_.AddObserver(observer);
-}
-
-void SyncEncryptionHandlerImpl::RemoveObserver(Observer* observer) {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(observers_.HasObserver(observer));
- observers_.RemoveObserver(observer);
-}
-
-void SyncEncryptionHandlerImpl::Init() {
- DCHECK(thread_checker_.CalledOnValidThread());
- WriteTransaction trans(FROM_HERE, user_share_);
- WriteNode node(&trans);
-
- if (node.InitByTagLookup(kNigoriTag) != BaseNode::INIT_OK)
- return;
- if (!ApplyNigoriUpdateImpl(node.GetNigoriSpecifics(),
- trans.GetWrappedTrans())) {
- WriteEncryptionStateToNigori(&trans);
- }
-
- bool has_pending_keys = UnlockVault(
- trans.GetWrappedTrans()).cryptographer.has_pending_keys();
- bool is_ready = UnlockVault(
- trans.GetWrappedTrans()).cryptographer.is_ready();
- // Log the state of the cryptographer regardless of migration state.
- UMA_HISTOGRAM_BOOLEAN("Sync.CryptographerReady", is_ready);
- UMA_HISTOGRAM_BOOLEAN("Sync.CryptographerPendingKeys", has_pending_keys);
- if (IsNigoriMigratedToKeystore(node.GetNigoriSpecifics())) {
- // This account has a nigori node that has been migrated to support
- // keystore.
- UMA_HISTOGRAM_ENUMERATION("Sync.NigoriMigrationState",
- MIGRATED,
- MIGRATION_STATE_SIZE);
- if (has_pending_keys && passphrase_type_ == KEYSTORE_PASSPHRASE) {
- // If this is happening, it means the keystore decryptor is either
- // undecryptable with the available keystore keys or does not match the
- // nigori keybag's encryption key. Otherwise we're simply missing the
- // keystore key.
- UMA_HISTOGRAM_BOOLEAN("Sync.KeystoreDecryptionFailed",
- !keystore_key_.empty());
- }
- } else if (!is_ready) {
- // Migration cannot occur until the cryptographer is ready (initialized
- // with GAIA password and any pending keys resolved).
- UMA_HISTOGRAM_ENUMERATION("Sync.NigoriMigrationState",
- NOT_MIGRATED_CRYPTO_NOT_READY,
- MIGRATION_STATE_SIZE);
- } else if (keystore_key_.empty()) {
- // The client has no keystore key, either because it is not yet enabled or
- // the server is not sending a valid keystore key.
- UMA_HISTOGRAM_ENUMERATION("Sync.NigoriMigrationState",
- NOT_MIGRATED_NO_KEYSTORE_KEY,
- MIGRATION_STATE_SIZE);
- } else {
- // If the above conditions have been met and the nigori node is still not
- // migrated, something failed in the migration process.
- UMA_HISTOGRAM_ENUMERATION("Sync.NigoriMigrationState",
- NOT_MIGRATED_UNKNOWN_REASON,
- MIGRATION_STATE_SIZE);
- }
-
-
- // Always trigger an encrypted types and cryptographer state change event at
- // init time so observers get the initial values.
- FOR_EACH_OBSERVER(
- Observer, observers_,
- OnEncryptedTypesChanged(
- UnlockVault(trans.GetWrappedTrans()).encrypted_types,
- encrypt_everything_));
- FOR_EACH_OBSERVER(
- SyncEncryptionHandler::Observer,
- observers_,
- OnCryptographerStateChanged(
- &UnlockVaultMutable(trans.GetWrappedTrans())->cryptographer));
-
- // If the cryptographer is not ready (either it has pending keys or we
- // failed to initialize it), we don't want to try and re-encrypt the data.
- // If we had encrypted types, the DataTypeManager will block, preventing
- // sync from happening until the the passphrase is provided.
- if (UnlockVault(trans.GetWrappedTrans()).cryptographer.is_ready())
- ReEncryptEverything(&trans);
-}
-
-void SyncEncryptionHandlerImpl::SetEncryptionPassphrase(
- const std::string& passphrase,
- bool is_explicit) {
- DCHECK(thread_checker_.CalledOnValidThread());
- // We do not accept empty passphrases.
- if (passphrase.empty()) {
- NOTREACHED() << "Cannot encrypt with an empty passphrase.";
- return;
- }
-
- // All accesses to the cryptographer are protected by a transaction.
- WriteTransaction trans(FROM_HERE, user_share_);
- KeyParams key_params = {"localhost", "dummy", passphrase};
- WriteNode node(&trans);
- if (node.InitByTagLookup(kNigoriTag) != BaseNode::INIT_OK) {
- NOTREACHED();
- return;
- }
-
- Cryptographer* cryptographer =
- &UnlockVaultMutable(trans.GetWrappedTrans())->cryptographer;
-
- // Once we've migrated to keystore, the only way to set a passphrase for
- // encryption is to set a custom passphrase.
- if (IsNigoriMigratedToKeystore(node.GetNigoriSpecifics())) {
- if (!is_explicit) {
- DCHECK(cryptographer->is_ready());
- // The user is setting a new implicit passphrase. At this point we don't
- // care, so drop it on the floor. This is safe because if we have a
- // migrated nigori node, then we don't need to create an initial
- // encryption key.
- LOG(WARNING) << "Ignoring new implicit passphrase. Keystore migration "
- << "already performed.";
- return;
- }
- // Will fail if we already have an explicit passphrase or we have pending
- // keys.
- SetCustomPassphrase(passphrase, &trans, &node);
-
- // When keystore migration occurs, the "CustomEncryption" UMA stat must be
- // logged as true.
- UMA_HISTOGRAM_BOOLEAN("Sync.CustomEncryption", true);
- return;
- }
-
- std::string bootstrap_token;
- sync_pb::EncryptedData pending_keys;
- if (cryptographer->has_pending_keys())
- pending_keys = cryptographer->GetPendingKeys();
- bool success = false;
-
- // There are six cases to handle here:
- // 1. The user has no pending keys and is setting their current GAIA password
- // as the encryption passphrase. This happens either during first time sync
- // with a clean profile, or after re-authenticating on a profile that was
- // already signed in with the cryptographer ready.
- // 2. The user has no pending keys, and is overwriting an (already provided)
- // implicit passphrase with an explicit (custom) passphrase.
- // 3. The user has pending keys for an explicit passphrase that is somehow set
- // to their current GAIA passphrase.
- // 4. The user has pending keys encrypted with their current GAIA passphrase
- // and the caller passes in the current GAIA passphrase.
- // 5. The user has pending keys encrypted with an older GAIA passphrase
- // and the caller passes in the current GAIA passphrase.
- // 6. The user has previously done encryption with an explicit passphrase.
- // Furthermore, we enforce the fact that the bootstrap encryption token will
- // always be derived from the newest GAIA password if the account is using
- // an implicit passphrase (even if the data is encrypted with an old GAIA
- // password). If the account is using an explicit (custom) passphrase, the
- // bootstrap token will be derived from the most recently provided explicit
- // passphrase (that was able to decrypt the data).
- if (!IsExplicitPassphrase(passphrase_type_)) {
- if (!cryptographer->has_pending_keys()) {
- if (cryptographer->AddKey(key_params)) {
- // Case 1 and 2. We set a new GAIA passphrase when there are no pending
- // keys (1), or overwriting an implicit passphrase with a new explicit
- // one (2) when there are no pending keys.
- if (is_explicit) {
- DVLOG(1) << "Setting explicit passphrase for encryption.";
- passphrase_type_ = CUSTOM_PASSPHRASE;
- custom_passphrase_time_ = base::Time::Now();
- FOR_EACH_OBSERVER(SyncEncryptionHandler::Observer, observers_,
- OnPassphraseTypeChanged(
- passphrase_type_,
- GetExplicitPassphraseTime()));
- } else {
- DVLOG(1) << "Setting implicit passphrase for encryption.";
- }
- cryptographer->GetBootstrapToken(&bootstrap_token);
-
- // With M26, sync accounts can be in only one of two encryption states:
- // 1) Encrypt only passwords with an implicit passphrase.
- // 2) Encrypt all sync datatypes with an explicit passphrase.
- // We deprecate the "EncryptAllData" and "CustomPassphrase" histograms,
- // and keep track of an account's encryption state via the
- // "CustomEncryption" histogram. See http://crbug.com/131478.
- UMA_HISTOGRAM_BOOLEAN("Sync.CustomEncryption", is_explicit);
-
- success = true;
- } else {
- NOTREACHED() << "Failed to add key to cryptographer.";
- success = false;
- }
- } else { // cryptographer->has_pending_keys() == true
- if (is_explicit) {
- // This can only happen if the nigori node is updated with a new
- // implicit passphrase while a client is attempting to set a new custom
- // passphrase (race condition).
- DVLOG(1) << "Failing because an implicit passphrase is already set.";
- success = false;
- } else { // is_explicit == false
- if (cryptographer->DecryptPendingKeys(key_params)) {
- // Case 4. We successfully decrypted with the implicit GAIA passphrase
- // passed in.
- DVLOG(1) << "Implicit internal passphrase accepted for decryption.";
- cryptographer->GetBootstrapToken(&bootstrap_token);
- success = true;
- } else {
- // Case 5. Encryption was done with an old GAIA password, but we were
- // provided with the current GAIA password. We need to generate a new
- // bootstrap token to preserve it. We build a temporary cryptographer
- // to allow us to extract these params without polluting our current
- // cryptographer.
- DVLOG(1) << "Implicit internal passphrase failed to decrypt, adding "
- << "anyways as default passphrase and persisting via "
- << "bootstrap token.";
- Cryptographer temp_cryptographer(cryptographer->encryptor());
- temp_cryptographer.AddKey(key_params);
- temp_cryptographer.GetBootstrapToken(&bootstrap_token);
- // We then set the new passphrase as the default passphrase of the
- // real cryptographer, even though we have pending keys. This is safe,
- // as although Cryptographer::is_initialized() will now be true,
- // is_ready() will remain false due to having pending keys.
- cryptographer->AddKey(key_params);
- success = false;
- }
- } // is_explicit
- } // cryptographer->has_pending_keys()
- } else { // IsExplicitPassphrase(passphrase_type_) == true.
- // Case 6. We do not want to override a previously set explicit passphrase,
- // so we return a failure.
- DVLOG(1) << "Failing because an explicit passphrase is already set.";
- success = false;
- }
-
- DVLOG_IF(1, !success)
- << "Failure in SetEncryptionPassphrase; notifying and returning.";
- DVLOG_IF(1, success)
- << "Successfully set encryption passphrase; updating nigori and "
- "reencrypting.";
-
- FinishSetPassphrase(success, bootstrap_token, &trans, &node);
-}
-
-void SyncEncryptionHandlerImpl::SetDecryptionPassphrase(
- const std::string& passphrase) {
- DCHECK(thread_checker_.CalledOnValidThread());
- // We do not accept empty passphrases.
- if (passphrase.empty()) {
- NOTREACHED() << "Cannot decrypt with an empty passphrase.";
- return;
- }
-
- // All accesses to the cryptographer are protected by a transaction.
- WriteTransaction trans(FROM_HERE, user_share_);
- KeyParams key_params = {"localhost", "dummy", passphrase};
- WriteNode node(&trans);
- if (node.InitByTagLookup(kNigoriTag) != BaseNode::INIT_OK) {
- NOTREACHED();
- return;
- }
-
- // Once we've migrated to keystore, we're only ever decrypting keys derived
- // from an explicit passphrase. But, for clients without a keystore key yet
- // (either not on by default or failed to download one), we still support
- // decrypting with a gaia passphrase, and therefore bypass the
- // DecryptPendingKeysWithExplicitPassphrase logic.
- if (IsNigoriMigratedToKeystore(node.GetNigoriSpecifics()) &&
- IsExplicitPassphrase(passphrase_type_)) {
- DecryptPendingKeysWithExplicitPassphrase(passphrase, &trans, &node);
- return;
- }
-
- Cryptographer* cryptographer =
- &UnlockVaultMutable(trans.GetWrappedTrans())->cryptographer;
- if (!cryptographer->has_pending_keys()) {
- // Note that this *can* happen in a rare situation where data is
- // re-encrypted on another client while a SetDecryptionPassphrase() call is
- // in-flight on this client. It is rare enough that we choose to do nothing.
- NOTREACHED() << "Attempt to set decryption passphrase failed because there "
- << "were no pending keys.";
- return;
- }
-
- std::string bootstrap_token;
- sync_pb::EncryptedData pending_keys;
- pending_keys = cryptographer->GetPendingKeys();
- bool success = false;
-
- // There are three cases to handle here:
- // 7. We're using the current GAIA password to decrypt the pending keys. This
- // happens when signing in to an account with a previously set implicit
- // passphrase, where the data is already encrypted with the newest GAIA
- // password.
- // 8. The user is providing an old GAIA password to decrypt the pending keys.
- // In this case, the user is using an implicit passphrase, but has changed
- // their password since they last encrypted their data, and therefore
- // their current GAIA password was unable to decrypt the data. This will
- // happen when the user is setting up a new profile with a previously
- // encrypted account (after changing passwords).
- // 9. The user is providing a previously set explicit passphrase to decrypt
- // the pending keys.
- if (!IsExplicitPassphrase(passphrase_type_)) {
- if (cryptographer->is_initialized()) {
- // We only want to change the default encryption key to the pending
- // one if the pending keybag already contains the current default.
- // This covers the case where a different client re-encrypted
- // everything with a newer gaia passphrase (and hence the keybag
- // contains keys from all previously used gaia passphrases).
- // Otherwise, we're in a situation where the pending keys are
- // encrypted with an old gaia passphrase, while the default is the
- // current gaia passphrase. In that case, we preserve the default.
- Cryptographer temp_cryptographer(cryptographer->encryptor());
- temp_cryptographer.SetPendingKeys(cryptographer->GetPendingKeys());
- if (temp_cryptographer.DecryptPendingKeys(key_params)) {
- // Check to see if the pending bag of keys contains the current
- // default key.
- sync_pb::EncryptedData encrypted;
- cryptographer->GetKeys(&encrypted);
- if (temp_cryptographer.CanDecrypt(encrypted)) {
- DVLOG(1) << "Implicit user provided passphrase accepted for "
- << "decryption, overwriting default.";
- // Case 7. The pending keybag contains the current default. Go ahead
- // and update the cryptographer, letting the default change.
- cryptographer->DecryptPendingKeys(key_params);
- cryptographer->GetBootstrapToken(&bootstrap_token);
- success = true;
- } else {
- // Case 8. The pending keybag does not contain the current default
- // encryption key. We decrypt the pending keys here, and in
- // FinishSetPassphrase, re-encrypt everything with the current GAIA
- // passphrase instead of the passphrase just provided by the user.
- DVLOG(1) << "Implicit user provided passphrase accepted for "
- << "decryption, restoring implicit internal passphrase "
- << "as default.";
- std::string bootstrap_token_from_current_key;
- cryptographer->GetBootstrapToken(
- &bootstrap_token_from_current_key);
- cryptographer->DecryptPendingKeys(key_params);
- // Overwrite the default from the pending keys.
- cryptographer->AddKeyFromBootstrapToken(
- bootstrap_token_from_current_key);
- success = true;
- }
- } else { // !temp_cryptographer.DecryptPendingKeys(..)
- DVLOG(1) << "Implicit user provided passphrase failed to decrypt.";
- success = false;
- } // temp_cryptographer.DecryptPendingKeys(...)
- } else { // cryptographer->is_initialized() == false
- if (cryptographer->DecryptPendingKeys(key_params)) {
- // This can happpen in two cases:
- // - First time sync on android, where we'll never have a
- // !user_provided passphrase.
- // - This is a restart for a client that lost their bootstrap token.
- // In both cases, we should go ahead and initialize the cryptographer
- // and persist the new bootstrap token.
- //
- // Note: at this point, we cannot distinguish between cases 7 and 8
- // above. This user provided passphrase could be the current or the
- // old. But, as long as we persist the token, there's nothing more
- // we can do.
- cryptographer->GetBootstrapToken(&bootstrap_token);
- DVLOG(1) << "Implicit user provided passphrase accepted, initializing"
- << " cryptographer.";
- success = true;
- } else {
- DVLOG(1) << "Implicit user provided passphrase failed to decrypt.";
- success = false;
- }
- } // cryptographer->is_initialized()
- } else { // nigori_has_explicit_passphrase == true
- // Case 9. Encryption was done with an explicit passphrase, and we decrypt
- // with the passphrase provided by the user.
- if (cryptographer->DecryptPendingKeys(key_params)) {
- DVLOG(1) << "Explicit passphrase accepted for decryption.";
- cryptographer->GetBootstrapToken(&bootstrap_token);
- success = true;
- } else {
- DVLOG(1) << "Explicit passphrase failed to decrypt.";
- success = false;
- }
- } // nigori_has_explicit_passphrase
-
- DVLOG_IF(1, !success)
- << "Failure in SetDecryptionPassphrase; notifying and returning.";
- DVLOG_IF(1, success)
- << "Successfully set decryption passphrase; updating nigori and "
- "reencrypting.";
-
- FinishSetPassphrase(success, bootstrap_token, &trans, &node);
-}
-
-void SyncEncryptionHandlerImpl::EnableEncryptEverything() {
- DCHECK(thread_checker_.CalledOnValidThread());
- WriteTransaction trans(FROM_HERE, user_share_);
- DVLOG(1) << "Enabling encrypt everything.";
- if (encrypt_everything_)
- return;
- EnableEncryptEverythingImpl(trans.GetWrappedTrans());
- WriteEncryptionStateToNigori(&trans);
- if (UnlockVault(trans.GetWrappedTrans()).cryptographer.is_ready())
- ReEncryptEverything(&trans);
-}
-
-bool SyncEncryptionHandlerImpl::EncryptEverythingEnabled() const {
- DCHECK(thread_checker_.CalledOnValidThread());
- return encrypt_everything_;
-}
-
-PassphraseType SyncEncryptionHandlerImpl::GetPassphraseType() const {
- DCHECK(thread_checker_.CalledOnValidThread());
- return passphrase_type_;
-}
-
-// Note: this is called from within a syncable transaction, so we need to post
-// tasks if we want to do any work that creates a new sync_api transaction.
-void SyncEncryptionHandlerImpl::ApplyNigoriUpdate(
- const sync_pb::NigoriSpecifics& nigori,
- syncable::BaseTransaction* const trans) {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(trans);
- if (!ApplyNigoriUpdateImpl(nigori, trans)) {
- base::MessageLoop::current()->PostTask(
- FROM_HERE,
- base::Bind(&SyncEncryptionHandlerImpl::RewriteNigori,
- weak_ptr_factory_.GetWeakPtr()));
- }
-
- FOR_EACH_OBSERVER(
- SyncEncryptionHandler::Observer,
- observers_,
- OnCryptographerStateChanged(
- &UnlockVaultMutable(trans)->cryptographer));
-}
-
-void SyncEncryptionHandlerImpl::UpdateNigoriFromEncryptedTypes(
- sync_pb::NigoriSpecifics* nigori,
- syncable::BaseTransaction* const trans) const {
- DCHECK(thread_checker_.CalledOnValidThread());
- syncable::UpdateNigoriFromEncryptedTypes(UnlockVault(trans).encrypted_types,
- encrypt_everything_,
- nigori);
-}
-
-bool SyncEncryptionHandlerImpl::NeedKeystoreKey(
- syncable::BaseTransaction* const trans) const {
- DCHECK(thread_checker_.CalledOnValidThread());
- return keystore_key_.empty();
-}
-
-bool SyncEncryptionHandlerImpl::SetKeystoreKeys(
- const google::protobuf::RepeatedPtrField<google::protobuf::string>& keys,
- syncable::BaseTransaction* const trans) {
- DCHECK(thread_checker_.CalledOnValidThread());
- if (keys.size() == 0)
- return false;
- // The last key in the vector is the current keystore key. The others are kept
- // around for decryption only.
- const std::string& raw_keystore_key = keys.Get(keys.size() - 1);
- if (raw_keystore_key.empty())
- return false;
-
- // Note: in order to Pack the keys, they must all be base64 encoded (else
- // JSON serialization fails).
- base::Base64Encode(raw_keystore_key, &keystore_key_);
-
- // Go through and save the old keystore keys. We always persist all keystore
- // keys the server sends us.
- old_keystore_keys_.resize(keys.size() - 1);
- for (int i = 0; i < keys.size() - 1; ++i)
- base::Base64Encode(keys.Get(i), &old_keystore_keys_[i]);
-
- Cryptographer* cryptographer = &UnlockVaultMutable(trans)->cryptographer;
-
- // Update the bootstrap token. If this fails, we persist an empty string,
- // which will force us to download the keystore keys again on the next
- // restart.
- std::string keystore_bootstrap = PackKeystoreBootstrapToken(
- old_keystore_keys_,
- keystore_key_,
- cryptographer->encryptor());
- DCHECK_EQ(keystore_bootstrap.empty(), keystore_key_.empty());
- FOR_EACH_OBSERVER(SyncEncryptionHandler::Observer, observers_,
- OnBootstrapTokenUpdated(keystore_bootstrap,
- KEYSTORE_BOOTSTRAP_TOKEN));
- DVLOG(1) << "Keystore bootstrap token updated.";
-
- // If this is a first time sync, we get the encryption keys before we process
- // the nigori node. Just return for now, ApplyNigoriUpdate will be invoked
- // once we have the nigori node.
- syncable::Entry entry(trans, syncable::GET_BY_SERVER_TAG, kNigoriTag);
- if (!entry.good())
- return true;
-
- const sync_pb::NigoriSpecifics& nigori =
- entry.GetSpecifics().nigori();
- if (cryptographer->has_pending_keys() &&
- IsNigoriMigratedToKeystore(nigori) &&
- !nigori.keystore_decryptor_token().blob().empty()) {
- // If the nigori is already migrated and we have pending keys, we might
- // be able to decrypt them using either the keystore decryptor token
- // or the existing keystore keys.
- DecryptPendingKeysWithKeystoreKey(keystore_key_,
- nigori.keystore_decryptor_token(),
- cryptographer);
- }
-
- // Note that triggering migration will have no effect if we're already
- // properly migrated with the newest keystore keys.
- if (ShouldTriggerMigration(nigori, *cryptographer)) {
- base::MessageLoop::current()->PostTask(
- FROM_HERE,
- base::Bind(&SyncEncryptionHandlerImpl::RewriteNigori,
- weak_ptr_factory_.GetWeakPtr()));
- }
-
- return true;
-}
-
-ModelTypeSet SyncEncryptionHandlerImpl::GetEncryptedTypes(
- syncable::BaseTransaction* const trans) const {
- return UnlockVault(trans).encrypted_types;
-}
-
-Cryptographer* SyncEncryptionHandlerImpl::GetCryptographerUnsafe() {
- DCHECK(thread_checker_.CalledOnValidThread());
- return &vault_unsafe_.cryptographer;
-}
-
-ModelTypeSet SyncEncryptionHandlerImpl::GetEncryptedTypesUnsafe() {
- DCHECK(thread_checker_.CalledOnValidThread());
- return vault_unsafe_.encrypted_types;
-}
-
-bool SyncEncryptionHandlerImpl::MigratedToKeystore() {
- DCHECK(thread_checker_.CalledOnValidThread());
- ReadTransaction trans(FROM_HERE, user_share_);
- ReadNode nigori_node(&trans);
- if (nigori_node.InitByTagLookup(kNigoriTag) != BaseNode::INIT_OK)
- return false;
- return IsNigoriMigratedToKeystore(nigori_node.GetNigoriSpecifics());
-}
-
-base::Time SyncEncryptionHandlerImpl::migration_time() const {
- return migration_time_;
-}
-
-base::Time SyncEncryptionHandlerImpl::custom_passphrase_time() const {
- return custom_passphrase_time_;
-}
-
-// This function iterates over all encrypted types. There are many scenarios in
-// which data for some or all types is not currently available. In that case,
-// the lookup of the root node will fail and we will skip encryption for that
-// type.
-void SyncEncryptionHandlerImpl::ReEncryptEverything(
- WriteTransaction* trans) {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(UnlockVault(trans->GetWrappedTrans()).cryptographer.is_ready());
- for (ModelTypeSet::Iterator iter =
- UnlockVault(trans->GetWrappedTrans()).encrypted_types.First();
- iter.Good(); iter.Inc()) {
- if (iter.Get() == PASSWORDS || IsControlType(iter.Get()))
- continue; // These types handle encryption differently.
-
- ReadNode type_root(trans);
- std::string tag = ModelTypeToRootTag(iter.Get());
- if (type_root.InitByTagLookup(tag) != BaseNode::INIT_OK)
- continue; // Don't try to reencrypt if the type's data is unavailable.
-
- // Iterate through all children of this datatype.
- std::queue<int64> to_visit;
- int64 child_id = type_root.GetFirstChildId();
- to_visit.push(child_id);
- while (!to_visit.empty()) {
- child_id = to_visit.front();
- to_visit.pop();
- if (child_id == kInvalidId)
- continue;
-
- WriteNode child(trans);
- if (child.InitByIdLookup(child_id) != BaseNode::INIT_OK)
- continue; // Possible for locally deleted items.
- if (child.GetIsFolder()) {
- to_visit.push(child.GetFirstChildId());
- }
- if (child.GetEntry()->GetUniqueServerTag().empty()) {
- // Rewrite the specifics of the node with encrypted data if necessary
- // (only rewrite the non-unique folders).
- child.ResetFromSpecifics();
- }
- to_visit.push(child.GetSuccessorId());
- }
- }
-
- // Passwords are encrypted with their own legacy scheme. Passwords are always
- // encrypted so we don't need to check GetEncryptedTypes() here.
- ReadNode passwords_root(trans);
- std::string passwords_tag = ModelTypeToRootTag(PASSWORDS);
- if (passwords_root.InitByTagLookup(passwords_tag) ==
- BaseNode::INIT_OK) {
- int64 child_id = passwords_root.GetFirstChildId();
- while (child_id != kInvalidId) {
- WriteNode child(trans);
- if (child.InitByIdLookup(child_id) != BaseNode::INIT_OK) {
- NOTREACHED();
- return;
- }
- child.SetPasswordSpecifics(child.GetPasswordSpecifics());
- child_id = child.GetSuccessorId();
- }
- }
-
- DVLOG(1) << "Re-encrypt everything complete.";
-
- // NOTE: We notify from within a transaction.
- FOR_EACH_OBSERVER(SyncEncryptionHandler::Observer, observers_,
- OnEncryptionComplete());
-}
-
-bool SyncEncryptionHandlerImpl::ApplyNigoriUpdateImpl(
- const sync_pb::NigoriSpecifics& nigori,
- syncable::BaseTransaction* const trans) {
- DCHECK(thread_checker_.CalledOnValidThread());
- DVLOG(1) << "Applying nigori node update.";
- bool nigori_types_need_update = !UpdateEncryptedTypesFromNigori(nigori,
- trans);
-
- if (nigori.custom_passphrase_time() != 0) {
- custom_passphrase_time_ =
- ProtoTimeToTime(nigori.custom_passphrase_time());
- }
- bool is_nigori_migrated = IsNigoriMigratedToKeystore(nigori);
- if (is_nigori_migrated) {
- DCHECK(nigori.has_keystore_migration_time());
- migration_time_ = ProtoTimeToTime(nigori.keystore_migration_time());
- PassphraseType nigori_passphrase_type =
- ProtoPassphraseTypeToEnum(nigori.passphrase_type());
-
- // Only update the local passphrase state if it's a valid transition:
- // - implicit -> keystore
- // - implicit -> frozen implicit
- // - implicit -> custom
- // - keystore -> custom
- // Note: frozen implicit -> custom is not technically a valid transition,
- // but we let it through here as well in case future versions do add support
- // for this transition.
- if (passphrase_type_ != nigori_passphrase_type &&
- nigori_passphrase_type != IMPLICIT_PASSPHRASE &&
- (passphrase_type_ == IMPLICIT_PASSPHRASE ||
- nigori_passphrase_type == CUSTOM_PASSPHRASE)) {
- DVLOG(1) << "Changing passphrase state from "
- << PassphraseTypeToString(passphrase_type_)
- << " to "
- << PassphraseTypeToString(nigori_passphrase_type);
- passphrase_type_ = nigori_passphrase_type;
- FOR_EACH_OBSERVER(SyncEncryptionHandler::Observer, observers_,
- OnPassphraseTypeChanged(
- passphrase_type_,
- GetExplicitPassphraseTime()));
- }
- if (passphrase_type_ == KEYSTORE_PASSPHRASE && encrypt_everything_) {
- // This is the case where another client that didn't support keystore
- // encryption attempted to enable full encryption. We detect it
- // and switch the passphrase type to frozen implicit passphrase instead
- // due to full encryption not being compatible with keystore passphrase.
- // Because the local passphrase type will not match the nigori passphrase
- // type, we will trigger a rewrite and subsequently a re-migration.
- DVLOG(1) << "Changing passphrase state to FROZEN_IMPLICIT_PASSPHRASE "
- << "due to full encryption.";
- passphrase_type_ = FROZEN_IMPLICIT_PASSPHRASE;
- FOR_EACH_OBSERVER(SyncEncryptionHandler::Observer, observers_,
- OnPassphraseTypeChanged(
- passphrase_type_,
- GetExplicitPassphraseTime()));
- }
- } else {
- // It's possible that while we're waiting for migration a client that does
- // not have keystore encryption enabled switches to a custom passphrase.
- if (nigori.keybag_is_frozen() &&
- passphrase_type_ != CUSTOM_PASSPHRASE) {
- passphrase_type_ = CUSTOM_PASSPHRASE;
- FOR_EACH_OBSERVER(SyncEncryptionHandler::Observer, observers_,
- OnPassphraseTypeChanged(
- passphrase_type_,
- GetExplicitPassphraseTime()));
- }
- }
-
- Cryptographer* cryptographer = &UnlockVaultMutable(trans)->cryptographer;
- bool nigori_needs_new_keys = false;
- if (!nigori.encryption_keybag().blob().empty()) {
- // We only update the default key if this was a new explicit passphrase.
- // Else, since it was decryptable, it must not have been a new key.
- bool need_new_default_key = false;
- if (is_nigori_migrated) {
- need_new_default_key = IsExplicitPassphrase(
- ProtoPassphraseTypeToEnum(nigori.passphrase_type()));
- } else {
- need_new_default_key = nigori.keybag_is_frozen();
- }
- if (!AttemptToInstallKeybag(nigori.encryption_keybag(),
- need_new_default_key,
- cryptographer)) {
- // Check to see if we can decrypt the keybag using the keystore decryptor
- // token.
- cryptographer->SetPendingKeys(nigori.encryption_keybag());
- if (!nigori.keystore_decryptor_token().blob().empty() &&
- !keystore_key_.empty()) {
- if (DecryptPendingKeysWithKeystoreKey(keystore_key_,
- nigori.keystore_decryptor_token(),
- cryptographer)) {
- nigori_needs_new_keys =
- cryptographer->KeybagIsStale(nigori.encryption_keybag());
- } else {
- LOG(ERROR) << "Failed to decrypt pending keys using keystore "
- << "bootstrap key.";
- }
- }
- } else {
- // Keybag was installed. We write back our local keybag into the nigori
- // node if the nigori node's keybag either contains less keys or
- // has a different default key.
- nigori_needs_new_keys =
- cryptographer->KeybagIsStale(nigori.encryption_keybag());
- }
- } else {
- // The nigori node has an empty encryption keybag. Attempt to write our
- // local encryption keys into it.
- LOG(WARNING) << "Nigori had empty encryption keybag.";
- nigori_needs_new_keys = true;
- }
-
- // If we've completed a sync cycle and the cryptographer isn't ready
- // yet or has pending keys, prompt the user for a passphrase.
- if (cryptographer->has_pending_keys()) {
- DVLOG(1) << "OnPassphraseRequired Sent";
- sync_pb::EncryptedData pending_keys = cryptographer->GetPendingKeys();
- FOR_EACH_OBSERVER(SyncEncryptionHandler::Observer, observers_,
- OnPassphraseRequired(REASON_DECRYPTION,
- pending_keys));
- } else if (!cryptographer->is_ready()) {
- DVLOG(1) << "OnPassphraseRequired sent because cryptographer is not "
- << "ready";
- FOR_EACH_OBSERVER(SyncEncryptionHandler::Observer, observers_,
- OnPassphraseRequired(REASON_ENCRYPTION,
- sync_pb::EncryptedData()));
- }
-
- // Check if the current local encryption state is stricter/newer than the
- // nigori state. If so, we need to overwrite the nigori node with the local
- // state.
- bool passphrase_type_matches = true;
- if (!is_nigori_migrated) {
- DCHECK(passphrase_type_ == CUSTOM_PASSPHRASE ||
- passphrase_type_ == IMPLICIT_PASSPHRASE);
- passphrase_type_matches =
- nigori.keybag_is_frozen() == IsExplicitPassphrase(passphrase_type_);
- } else {
- passphrase_type_matches =
- (ProtoPassphraseTypeToEnum(nigori.passphrase_type()) ==
- passphrase_type_);
- }
- if (!passphrase_type_matches ||
- nigori.encrypt_everything() != encrypt_everything_ ||
- nigori_types_need_update ||
- nigori_needs_new_keys) {
- DVLOG(1) << "Triggering nigori rewrite.";
- return false;
- }
- return true;
-}
-
-void SyncEncryptionHandlerImpl::RewriteNigori() {
- DVLOG(1) << "Writing local encryption state into nigori.";
- DCHECK(thread_checker_.CalledOnValidThread());
- WriteTransaction trans(FROM_HERE, user_share_);
- WriteEncryptionStateToNigori(&trans);
-}
-
-void SyncEncryptionHandlerImpl::WriteEncryptionStateToNigori(
- WriteTransaction* trans) {
- DCHECK(thread_checker_.CalledOnValidThread());
- WriteNode nigori_node(trans);
- // This can happen in tests that don't have nigori nodes.
- if (nigori_node.InitByTagLookup(kNigoriTag) != BaseNode::INIT_OK)
- return;
-
- sync_pb::NigoriSpecifics nigori = nigori_node.GetNigoriSpecifics();
- const Cryptographer& cryptographer =
- UnlockVault(trans->GetWrappedTrans()).cryptographer;
-
- // Will not do anything if we shouldn't or can't migrate. Otherwise
- // migrates, writing the full encryption state as it does.
- if (!AttemptToMigrateNigoriToKeystore(trans, &nigori_node)) {
- if (cryptographer.is_ready() &&
- nigori_overwrite_count_ < kNigoriOverwriteLimit) {
- // Does not modify the encrypted blob if the unencrypted data already
- // matches what is about to be written.
- sync_pb::EncryptedData original_keys = nigori.encryption_keybag();
- if (!cryptographer.GetKeys(nigori.mutable_encryption_keybag()))
- NOTREACHED();
-
- if (nigori.encryption_keybag().SerializeAsString() !=
- original_keys.SerializeAsString()) {
- // We've updated the nigori node's encryption keys. In order to prevent
- // a possible looping of two clients constantly overwriting each other,
- // we limit the absolute number of overwrites per client instantiation.
- nigori_overwrite_count_++;
- UMA_HISTOGRAM_COUNTS("Sync.AutoNigoriOverwrites",
- nigori_overwrite_count_);
- }
-
- // Note: we don't try to set keybag_is_frozen here since if that
- // is lost the user can always set it again (and we don't want to clobber
- // any migration state). The main goal at this point is to preserve
- // the encryption keys so all data remains decryptable.
- }
- syncable::UpdateNigoriFromEncryptedTypes(
- UnlockVault(trans->GetWrappedTrans()).encrypted_types,
- encrypt_everything_,
- &nigori);
- if (!custom_passphrase_time_.is_null()) {
- nigori.set_custom_passphrase_time(
- TimeToProtoTime(custom_passphrase_time_));
- }
-
- // If nothing has changed, this is a no-op.
- nigori_node.SetNigoriSpecifics(nigori);
- }
-}
-
-bool SyncEncryptionHandlerImpl::UpdateEncryptedTypesFromNigori(
- const sync_pb::NigoriSpecifics& nigori,
- syncable::BaseTransaction* const trans) {
- DCHECK(thread_checker_.CalledOnValidThread());
- ModelTypeSet* encrypted_types = &UnlockVaultMutable(trans)->encrypted_types;
- if (nigori.encrypt_everything()) {
- EnableEncryptEverythingImpl(trans);
- DCHECK(encrypted_types->Equals(EncryptableUserTypes()));
- return true;
- } else if (encrypt_everything_) {
- DCHECK(encrypted_types->Equals(EncryptableUserTypes()));
- return false;
- }
-
- ModelTypeSet nigori_encrypted_types;
- nigori_encrypted_types = syncable::GetEncryptedTypesFromNigori(nigori);
- nigori_encrypted_types.PutAll(SensitiveTypes());
-
- // If anything more than the sensitive types were encrypted, and
- // encrypt_everything is not explicitly set to false, we assume it means
- // a client intended to enable encrypt everything.
- if (!nigori.has_encrypt_everything() &&
- !Difference(nigori_encrypted_types, SensitiveTypes()).Empty()) {
- if (!encrypt_everything_) {
- encrypt_everything_ = true;
- *encrypted_types = EncryptableUserTypes();
- FOR_EACH_OBSERVER(
- Observer, observers_,
- OnEncryptedTypesChanged(*encrypted_types, encrypt_everything_));
- }
- DCHECK(encrypted_types->Equals(EncryptableUserTypes()));
- return false;
- }
-
- MergeEncryptedTypes(nigori_encrypted_types, trans);
- return encrypted_types->Equals(nigori_encrypted_types);
-}
-
-void SyncEncryptionHandlerImpl::SetCustomPassphrase(
- const std::string& passphrase,
- WriteTransaction* trans,
- WriteNode* nigori_node) {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(IsNigoriMigratedToKeystore(nigori_node->GetNigoriSpecifics()));
- KeyParams key_params = {"localhost", "dummy", passphrase};
-
- if (passphrase_type_ != KEYSTORE_PASSPHRASE) {
- DVLOG(1) << "Failing to set a custom passphrase because one has already "
- << "been set.";
- FinishSetPassphrase(false, std::string(), trans, nigori_node);
- return;
- }
-
- Cryptographer* cryptographer =
- &UnlockVaultMutable(trans->GetWrappedTrans())->cryptographer;
- if (cryptographer->has_pending_keys()) {
- // This theoretically shouldn't happen, because the only way to have pending
- // keys after migrating to keystore support is if a custom passphrase was
- // set, which should update passpshrase_state_ and should be caught by the
- // if statement above. For the sake of safety though, we check for it in
- // case a client is misbehaving.
- LOG(ERROR) << "Failing to set custom passphrase because of pending keys.";
- FinishSetPassphrase(false, std::string(), trans, nigori_node);
- return;
- }
-
- std::string bootstrap_token;
- if (cryptographer->AddKey(key_params)) {
- DVLOG(1) << "Setting custom passphrase.";
- cryptographer->GetBootstrapToken(&bootstrap_token);
- passphrase_type_ = CUSTOM_PASSPHRASE;
- custom_passphrase_time_ = base::Time::Now();
- FOR_EACH_OBSERVER(SyncEncryptionHandler::Observer, observers_,
- OnPassphraseTypeChanged(
- passphrase_type_,
- GetExplicitPassphraseTime()));
- } else {
- NOTREACHED() << "Failed to add key to cryptographer.";
- return;
- }
- FinishSetPassphrase(true, bootstrap_token, trans, nigori_node);
-}
-
-void SyncEncryptionHandlerImpl::DecryptPendingKeysWithExplicitPassphrase(
- const std::string& passphrase,
- WriteTransaction* trans,
- WriteNode* nigori_node) {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(IsExplicitPassphrase(passphrase_type_));
- KeyParams key_params = {"localhost", "dummy", passphrase};
-
- Cryptographer* cryptographer =
- &UnlockVaultMutable(trans->GetWrappedTrans())->cryptographer;
- if (!cryptographer->has_pending_keys()) {
- // Note that this *can* happen in a rare situation where data is
- // re-encrypted on another client while a SetDecryptionPassphrase() call is
- // in-flight on this client. It is rare enough that we choose to do nothing.
- NOTREACHED() << "Attempt to set decryption passphrase failed because there "
- << "were no pending keys.";
- return;
- }
-
- DCHECK(IsExplicitPassphrase(passphrase_type_));
- bool success = false;
- std::string bootstrap_token;
- if (cryptographer->DecryptPendingKeys(key_params)) {
- DVLOG(1) << "Explicit passphrase accepted for decryption.";
- cryptographer->GetBootstrapToken(&bootstrap_token);
- success = true;
- } else {
- DVLOG(1) << "Explicit passphrase failed to decrypt.";
- success = false;
- }
- if (success && !keystore_key_.empty()) {
- // Should already be part of the encryption keybag, but we add it just
- // in case.
- KeyParams key_params = {"localhost", "dummy", keystore_key_};
- cryptographer->AddNonDefaultKey(key_params);
- }
- FinishSetPassphrase(success, bootstrap_token, trans, nigori_node);
-}
-
-void SyncEncryptionHandlerImpl::FinishSetPassphrase(
- bool success,
- const std::string& bootstrap_token,
- WriteTransaction* trans,
- WriteNode* nigori_node) {
- DCHECK(thread_checker_.CalledOnValidThread());
- FOR_EACH_OBSERVER(
- SyncEncryptionHandler::Observer,
- observers_,
- OnCryptographerStateChanged(
- &UnlockVaultMutable(trans->GetWrappedTrans())->cryptographer));
-
- // It's possible we need to change the bootstrap token even if we failed to
- // set the passphrase (for example if we need to preserve the new GAIA
- // passphrase).
- if (!bootstrap_token.empty()) {
- DVLOG(1) << "Passphrase bootstrap token updated.";
- FOR_EACH_OBSERVER(SyncEncryptionHandler::Observer, observers_,
- OnBootstrapTokenUpdated(bootstrap_token,
- PASSPHRASE_BOOTSTRAP_TOKEN));
- }
-
- const Cryptographer& cryptographer =
- UnlockVault(trans->GetWrappedTrans()).cryptographer;
- if (!success) {
- if (cryptographer.is_ready()) {
- LOG(ERROR) << "Attempt to change passphrase failed while cryptographer "
- << "was ready.";
- } else if (cryptographer.has_pending_keys()) {
- FOR_EACH_OBSERVER(SyncEncryptionHandler::Observer, observers_,
- OnPassphraseRequired(REASON_DECRYPTION,
- cryptographer.GetPendingKeys()));
- } else {
- FOR_EACH_OBSERVER(SyncEncryptionHandler::Observer, observers_,
- OnPassphraseRequired(REASON_ENCRYPTION,
- sync_pb::EncryptedData()));
- }
- return;
- }
- DCHECK(success);
- DCHECK(cryptographer.is_ready());
-
- // Will do nothing if we're already properly migrated or unable to migrate
- // (in otherwords, if ShouldTriggerMigration is false).
- // Otherwise will update the nigori node with the current migrated state,
- // writing all encryption state as it does.
- if (!AttemptToMigrateNigoriToKeystore(trans, nigori_node)) {
- sync_pb::NigoriSpecifics nigori(nigori_node->GetNigoriSpecifics());
- // Does not modify nigori.encryption_keybag() if the original decrypted
- // data was the same.
- if (!cryptographer.GetKeys(nigori.mutable_encryption_keybag()))
- NOTREACHED();
- if (IsNigoriMigratedToKeystore(nigori)) {
- DCHECK(keystore_key_.empty() || IsExplicitPassphrase(passphrase_type_));
- DVLOG(1) << "Leaving nigori migration state untouched after setting"
- << " passphrase.";
- } else {
- nigori.set_keybag_is_frozen(
- IsExplicitPassphrase(passphrase_type_));
- }
- // If we set a new custom passphrase, store the timestamp.
- if (!custom_passphrase_time_.is_null()) {
- nigori.set_custom_passphrase_time(
- TimeToProtoTime(custom_passphrase_time_));
- }
- nigori_node->SetNigoriSpecifics(nigori);
- }
-
- // Must do this after OnPassphraseTypeChanged, in order to ensure the PSS
- // checks the passphrase state after it has been set.
- FOR_EACH_OBSERVER(SyncEncryptionHandler::Observer, observers_,
- OnPassphraseAccepted());
-
- // Does nothing if everything is already encrypted.
- // TODO(zea): If we just migrated and enabled encryption, this will be
- // redundant. Figure out a way to not do this unnecessarily.
- ReEncryptEverything(trans);
-}
-
-void SyncEncryptionHandlerImpl::MergeEncryptedTypes(
- ModelTypeSet new_encrypted_types,
- syncable::BaseTransaction* const trans) {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- // Only UserTypes may be encrypted.
- DCHECK(EncryptableUserTypes().HasAll(new_encrypted_types));
-
- ModelTypeSet* encrypted_types = &UnlockVaultMutable(trans)->encrypted_types;
- if (!encrypted_types->HasAll(new_encrypted_types)) {
- *encrypted_types = new_encrypted_types;
- FOR_EACH_OBSERVER(
- Observer, observers_,
- OnEncryptedTypesChanged(*encrypted_types, encrypt_everything_));
- }
-}
-
-SyncEncryptionHandlerImpl::Vault* SyncEncryptionHandlerImpl::UnlockVaultMutable(
- syncable::BaseTransaction* const trans) {
- DCHECK_EQ(user_share_->directory.get(), trans->directory());
- return &vault_unsafe_;
-}
-
-const SyncEncryptionHandlerImpl::Vault& SyncEncryptionHandlerImpl::UnlockVault(
- syncable::BaseTransaction* const trans) const {
- DCHECK_EQ(user_share_->directory.get(), trans->directory());
- return vault_unsafe_;
-}
-
-bool SyncEncryptionHandlerImpl::ShouldTriggerMigration(
- const sync_pb::NigoriSpecifics& nigori,
- const Cryptographer& cryptographer) const {
- DCHECK(thread_checker_.CalledOnValidThread());
- // Don't migrate if there are pending encryption keys (because data
- // encrypted with the pending keys will not be decryptable).
- if (cryptographer.has_pending_keys())
- return false;
- if (IsNigoriMigratedToKeystore(nigori)) {
- // If the nigori is already migrated but does not reflect the explicit
- // passphrase state, remigrate. Similarly, if the nigori has an explicit
- // passphrase but does not have full encryption, or the nigori has an
- // implicit passphrase but does have full encryption, re-migrate.
- // Note that this is to defend against other clients without keystore
- // encryption enabled transitioning to states that are no longer valid.
- if (passphrase_type_ != KEYSTORE_PASSPHRASE &&
- nigori.passphrase_type() ==
- sync_pb::NigoriSpecifics::KEYSTORE_PASSPHRASE) {
- return true;
- } else if (IsExplicitPassphrase(passphrase_type_) &&
- !encrypt_everything_) {
- return true;
- } else if (passphrase_type_ == KEYSTORE_PASSPHRASE &&
- encrypt_everything_) {
- return true;
- } else if (
- cryptographer.is_ready() &&
- !cryptographer.CanDecryptUsingDefaultKey(nigori.encryption_keybag())) {
- // We need to overwrite the keybag. This might involve overwriting the
- // keystore decryptor too.
- return true;
- } else if (old_keystore_keys_.size() > 0 && !keystore_key_.empty()) {
- // Check to see if a server key rotation has happened, but the nigori
- // node's keys haven't been rotated yet, and hence we should re-migrate.
- // Note that once a key rotation has been performed, we no longer
- // preserve backwards compatibility, and the keybag will therefore be
- // encrypted with the current keystore key.
- Cryptographer temp_cryptographer(cryptographer.encryptor());
- KeyParams keystore_params = {"localhost", "dummy", keystore_key_};
- temp_cryptographer.AddKey(keystore_params);
- if (!temp_cryptographer.CanDecryptUsingDefaultKey(
- nigori.encryption_keybag())) {
- return true;
- }
- }
- return false;
- } else if (keystore_key_.empty()) {
- // If we haven't already migrated, we don't want to do anything unless
- // a keystore key is available (so that those clients without keystore
- // encryption enabled aren't forced into new states, e.g. frozen implicit
- // passphrase).
- return false;
- }
- return true;
-}
-
-bool SyncEncryptionHandlerImpl::AttemptToMigrateNigoriToKeystore(
- WriteTransaction* trans,
- WriteNode* nigori_node) {
- DCHECK(thread_checker_.CalledOnValidThread());
- const sync_pb::NigoriSpecifics& old_nigori =
- nigori_node->GetNigoriSpecifics();
- Cryptographer* cryptographer =
- &UnlockVaultMutable(trans->GetWrappedTrans())->cryptographer;
-
- if (!ShouldTriggerMigration(old_nigori, *cryptographer))
- return false;
-
- DVLOG(1) << "Starting nigori migration to keystore support.";
- sync_pb::NigoriSpecifics migrated_nigori(old_nigori);
-
- PassphraseType new_passphrase_type = passphrase_type_;
- bool new_encrypt_everything = encrypt_everything_;
- if (encrypt_everything_ && !IsExplicitPassphrase(passphrase_type_)) {
- DVLOG(1) << "Switching to frozen implicit passphrase due to already having "
- << "full encryption.";
- new_passphrase_type = FROZEN_IMPLICIT_PASSPHRASE;
- migrated_nigori.clear_keystore_decryptor_token();
- } else if (IsExplicitPassphrase(passphrase_type_)) {
- DVLOG_IF(1, !encrypt_everything_) << "Enabling encrypt everything due to "
- << "explicit passphrase";
- new_encrypt_everything = true;
- migrated_nigori.clear_keystore_decryptor_token();
- } else {
- DCHECK(!encrypt_everything_);
- new_passphrase_type = KEYSTORE_PASSPHRASE;
- DVLOG(1) << "Switching to keystore passphrase state.";
- }
- migrated_nigori.set_encrypt_everything(new_encrypt_everything);
- migrated_nigori.set_passphrase_type(
- EnumPassphraseTypeToProto(new_passphrase_type));
- migrated_nigori.set_keybag_is_frozen(true);
-
- if (!keystore_key_.empty()) {
- KeyParams key_params = {"localhost", "dummy", keystore_key_};
- if ((old_keystore_keys_.size() > 0 &&
- new_passphrase_type == KEYSTORE_PASSPHRASE) ||
- !cryptographer->is_initialized()) {
- // Either at least one key rotation has been performed, so we no longer
- // care about backwards compatibility, or we're generating keystore-based
- // encryption keys without knowing the GAIA password (and therefore the
- // cryptographer is not initialized), so we can't support backwards
- // compatibility. Ensure the keystore key is the default key.
- DVLOG(1) << "Migrating keybag to keystore key.";
- bool cryptographer_was_ready = cryptographer->is_ready();
- if (!cryptographer->AddKey(key_params)) {
- LOG(ERROR) << "Failed to add keystore key as default key";
- UMA_HISTOGRAM_ENUMERATION("Sync.AttemptNigoriMigration",
- FAILED_TO_SET_DEFAULT_KEYSTORE,
- MIGRATION_RESULT_SIZE);
- return false;
- }
- if (!cryptographer_was_ready && cryptographer->is_ready()) {
- FOR_EACH_OBSERVER(
- SyncEncryptionHandler::Observer,
- observers_,
- OnPassphraseAccepted());
- }
- } else {
- // We're in backwards compatible mode -- either the account has an
- // explicit passphrase, or we want to preserve the current GAIA-based key
- // as the default because we can (there have been no key rotations since
- // the migration).
- DVLOG(1) << "Migrating keybag while preserving old key";
- if (!cryptographer->AddNonDefaultKey(key_params)) {
- LOG(ERROR) << "Failed to add keystore key as non-default key.";
- UMA_HISTOGRAM_ENUMERATION("Sync.AttemptNigoriMigration",
- FAILED_TO_SET_NONDEFAULT_KEYSTORE,
- MIGRATION_RESULT_SIZE);
- return false;
- }
- }
- }
- if (!old_keystore_keys_.empty()) {
- // Go through and add all the old keystore keys as non default keys, so
- // they'll be preserved in the encryption_keybag when we next write the
- // nigori node.
- for (std::vector<std::string>::const_iterator iter =
- old_keystore_keys_.begin(); iter != old_keystore_keys_.end();
- ++iter) {
- KeyParams key_params = {"localhost", "dummy", *iter};
- cryptographer->AddNonDefaultKey(key_params);
- }
- }
- if (new_passphrase_type == KEYSTORE_PASSPHRASE &&
- !GetKeystoreDecryptor(
- *cryptographer,
- keystore_key_,
- migrated_nigori.mutable_keystore_decryptor_token())) {
- LOG(ERROR) << "Failed to extract keystore decryptor token.";
- UMA_HISTOGRAM_ENUMERATION("Sync.AttemptNigoriMigration",
- FAILED_TO_EXTRACT_DECRYPTOR,
- MIGRATION_RESULT_SIZE);
- return false;
- }
- if (!cryptographer->GetKeys(migrated_nigori.mutable_encryption_keybag())) {
- LOG(ERROR) << "Failed to extract encryption keybag.";
- UMA_HISTOGRAM_ENUMERATION("Sync.AttemptNigoriMigration",
- FAILED_TO_EXTRACT_KEYBAG,
- MIGRATION_RESULT_SIZE);
- return false;
- }
-
- if (migration_time_.is_null())
- migration_time_ = base::Time::Now();
- migrated_nigori.set_keystore_migration_time(TimeToProtoTime(migration_time_));
-
- if (!custom_passphrase_time_.is_null()) {
- migrated_nigori.set_custom_passphrase_time(
- TimeToProtoTime(custom_passphrase_time_));
- }
-
- FOR_EACH_OBSERVER(
- SyncEncryptionHandler::Observer,
- observers_,
- OnCryptographerStateChanged(cryptographer));
- if (passphrase_type_ != new_passphrase_type) {
- passphrase_type_ = new_passphrase_type;
- FOR_EACH_OBSERVER(SyncEncryptionHandler::Observer, observers_,
- OnPassphraseTypeChanged(
- passphrase_type_,
- GetExplicitPassphraseTime()));
- }
-
- if (new_encrypt_everything && !encrypt_everything_) {
- EnableEncryptEverythingImpl(trans->GetWrappedTrans());
- ReEncryptEverything(trans);
- } else if (!cryptographer->CanDecryptUsingDefaultKey(
- old_nigori.encryption_keybag())) {
- DVLOG(1) << "Rencrypting everything due to key rotation.";
- ReEncryptEverything(trans);
- }
-
- DVLOG(1) << "Completing nigori migration to keystore support.";
- nigori_node->SetNigoriSpecifics(migrated_nigori);
-
- switch (new_passphrase_type) {
- case KEYSTORE_PASSPHRASE:
- if (old_keystore_keys_.size() > 0) {
- UMA_HISTOGRAM_ENUMERATION("Sync.AttemptNigoriMigration",
- MIGRATION_SUCCESS_KEYSTORE_NONDEFAULT,
- MIGRATION_RESULT_SIZE);
- } else {
- UMA_HISTOGRAM_ENUMERATION("Sync.AttemptNigoriMigration",
- MIGRATION_SUCCESS_KEYSTORE_DEFAULT,
- MIGRATION_RESULT_SIZE);
- }
- break;
- case FROZEN_IMPLICIT_PASSPHRASE:
- UMA_HISTOGRAM_ENUMERATION("Sync.AttemptNigoriMigration",
- MIGRATION_SUCCESS_FROZEN_IMPLICIT,
- MIGRATION_RESULT_SIZE);
- break;
- case CUSTOM_PASSPHRASE:
- UMA_HISTOGRAM_ENUMERATION("Sync.AttemptNigoriMigration",
- MIGRATION_SUCCESS_CUSTOM,
- MIGRATION_RESULT_SIZE);
- break;
- default:
- NOTREACHED();
- break;
- }
- return true;
-}
-
-bool SyncEncryptionHandlerImpl::GetKeystoreDecryptor(
- const Cryptographer& cryptographer,
- const std::string& keystore_key,
- sync_pb::EncryptedData* encrypted_blob) {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(!keystore_key.empty());
- DCHECK(cryptographer.is_ready());
- std::string serialized_nigori;
- serialized_nigori = cryptographer.GetDefaultNigoriKey();
- if (serialized_nigori.empty()) {
- LOG(ERROR) << "Failed to get cryptographer bootstrap token.";
- return false;
- }
- Cryptographer temp_cryptographer(cryptographer.encryptor());
- KeyParams key_params = {"localhost", "dummy", keystore_key};
- if (!temp_cryptographer.AddKey(key_params))
- return false;
- if (!temp_cryptographer.EncryptString(serialized_nigori, encrypted_blob))
- return false;
- return true;
-}
-
-bool SyncEncryptionHandlerImpl::AttemptToInstallKeybag(
- const sync_pb::EncryptedData& keybag,
- bool update_default,
- Cryptographer* cryptographer) {
- if (!cryptographer->CanDecrypt(keybag))
- return false;
- cryptographer->InstallKeys(keybag);
- if (update_default)
- cryptographer->SetDefaultKey(keybag.key_name());
- return true;
-}
-
-void SyncEncryptionHandlerImpl::EnableEncryptEverythingImpl(
- syncable::BaseTransaction* const trans) {
- ModelTypeSet* encrypted_types = &UnlockVaultMutable(trans)->encrypted_types;
- if (encrypt_everything_) {
- DCHECK(encrypted_types->Equals(EncryptableUserTypes()));
- return;
- }
- encrypt_everything_ = true;
- *encrypted_types = EncryptableUserTypes();
- FOR_EACH_OBSERVER(
- Observer, observers_,
- OnEncryptedTypesChanged(*encrypted_types, encrypt_everything_));
-}
-
-bool SyncEncryptionHandlerImpl::DecryptPendingKeysWithKeystoreKey(
- const std::string& keystore_key,
- const sync_pb::EncryptedData& keystore_decryptor_token,
- Cryptographer* cryptographer) {
- DCHECK(cryptographer->has_pending_keys());
- if (keystore_decryptor_token.blob().empty())
- return false;
- Cryptographer temp_cryptographer(cryptographer->encryptor());
-
- // First, go through and all all the old keystore keys to the temporary
- // cryptographer.
- for (size_t i = 0; i < old_keystore_keys_.size(); ++i) {
- KeyParams old_key_params = {"localhost", "dummy", old_keystore_keys_[i]};
- temp_cryptographer.AddKey(old_key_params);
- }
-
- // Then add the current keystore key as the default key and see if we can
- // decrypt.
- KeyParams keystore_params = {"localhost", "dummy", keystore_key_};
- if (temp_cryptographer.AddKey(keystore_params) &&
- temp_cryptographer.CanDecrypt(keystore_decryptor_token)) {
- // Someone else migrated the nigori for us! How generous! Go ahead and
- // install both the keystore key and the new default encryption key
- // (i.e. the one provided by the keystore decryptor token) into the
- // cryptographer.
- // The keystore decryptor token is a keystore key encrypted blob containing
- // the current serialized default encryption key (and as such should be
- // able to decrypt the nigori node's encryption keybag).
- // Note: it's possible a key rotation has happened since the migration, and
- // we're decrypting using an old keystore key. In that case we need to
- // ensure we re-encrypt using the newest key.
- DVLOG(1) << "Attempting to decrypt pending keys using "
- << "keystore decryptor token.";
- std::string serialized_nigori =
- temp_cryptographer.DecryptToString(keystore_decryptor_token);
-
- // This will decrypt the pending keys and add them if possible. The key
- // within |serialized_nigori| will be the default after.
- cryptographer->ImportNigoriKey(serialized_nigori);
-
- if (!temp_cryptographer.CanDecryptUsingDefaultKey(
- keystore_decryptor_token)) {
- // The keystore decryptor token was derived from an old keystore key.
- // A key rotation is necessary, so set the current keystore key as the
- // default key (which will trigger a re-migration).
- DVLOG(1) << "Pending keys based on old keystore key. Setting newest "
- << "keystore key as default.";
- cryptographer->AddKey(keystore_params);
- } else {
- // Theoretically the encryption keybag should already contain the keystore
- // key. We explicitly add it as a safety measure.
- DVLOG(1) << "Pending keys based on newest keystore key.";
- cryptographer->AddNonDefaultKey(keystore_params);
- }
- if (cryptographer->is_ready()) {
- std::string bootstrap_token;
- cryptographer->GetBootstrapToken(&bootstrap_token);
- DVLOG(1) << "Keystore decryptor token decrypted pending keys.";
- FOR_EACH_OBSERVER(
- SyncEncryptionHandler::Observer,
- observers_,
- OnPassphraseAccepted());
- FOR_EACH_OBSERVER(
- SyncEncryptionHandler::Observer,
- observers_,
- OnBootstrapTokenUpdated(bootstrap_token,
- PASSPHRASE_BOOTSTRAP_TOKEN));
- FOR_EACH_OBSERVER(
- SyncEncryptionHandler::Observer,
- observers_,
- OnCryptographerStateChanged(cryptographer));
- return true;
- }
- }
- return false;
-}
-
-base::Time SyncEncryptionHandlerImpl::GetExplicitPassphraseTime() const {
- if (passphrase_type_ == FROZEN_IMPLICIT_PASSPHRASE)
- return migration_time();
- else if (passphrase_type_ == CUSTOM_PASSPHRASE)
- return custom_passphrase_time();
- return base::Time();
-}
-
-} // namespace browser_sync
diff --git a/chromium/sync/internal_api/sync_encryption_handler_impl.h b/chromium/sync/internal_api/sync_encryption_handler_impl.h
deleted file mode 100644
index 89621c74058..00000000000
--- a/chromium/sync/internal_api/sync_encryption_handler_impl.h
+++ /dev/null
@@ -1,315 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_SYNC_ENCRYPTION_HANDLER_IMPL_H_
-#define SYNC_INTERNAL_API_SYNC_ENCRYPTION_HANDLER_IMPL_H_
-
-#include <string>
-
-#include "base/compiler_specific.h"
-#include "base/gtest_prod_util.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "base/observer_list.h"
-#include "base/threading/thread_checker.h"
-#include "base/time/time.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/sync_encryption_handler.h"
-#include "sync/syncable/nigori_handler.h"
-#include "sync/util/cryptographer.h"
-
-namespace syncer {
-
-class Encryptor;
-struct UserShare;
-class WriteNode;
-class WriteTransaction;
-
-// Sync encryption handler implementation.
-//
-// This class acts as the respository of all sync encryption state, and handles
-// encryption related changes/queries coming from both the chrome side and
-// the sync side (via NigoriHandler). It is capable of modifying all sync data
-// (re-encryption), updating the encrypted types, changing the encryption keys,
-// and creating/receiving nigori node updates.
-//
-// The class should live as long as the directory itself in order to ensure
-// any data read/written is properly decrypted/encrypted.
-//
-// Note: See sync_encryption_handler.h for a description of the chrome visible
-// methods and what they do, and nigori_handler.h for a description of the
-// sync methods.
-// All methods are non-thread-safe and should only be called from the sync
-// thread unless explicitly noted otherwise.
-class SYNC_EXPORT_PRIVATE SyncEncryptionHandlerImpl
- : public SyncEncryptionHandler,
- public syncable::NigoriHandler {
- public:
- SyncEncryptionHandlerImpl(
- UserShare* user_share,
- Encryptor* encryptor,
- const std::string& restored_key_for_bootstrapping,
- const std::string& restored_keystore_key_for_bootstrapping);
- virtual ~SyncEncryptionHandlerImpl();
-
- // SyncEncryptionHandler implementation.
- virtual void AddObserver(Observer* observer) OVERRIDE;
- virtual void RemoveObserver(Observer* observer) OVERRIDE;
- virtual void Init() OVERRIDE;
- virtual void SetEncryptionPassphrase(const std::string& passphrase,
- bool is_explicit) OVERRIDE;
- virtual void SetDecryptionPassphrase(const std::string& passphrase) OVERRIDE;
- virtual void EnableEncryptEverything() OVERRIDE;
- virtual bool EncryptEverythingEnabled() const OVERRIDE;
- virtual PassphraseType GetPassphraseType() const OVERRIDE;
-
- // NigoriHandler implementation.
- // Note: all methods are invoked while the caller holds a transaction.
- virtual void ApplyNigoriUpdate(
- const sync_pb::NigoriSpecifics& nigori,
- syncable::BaseTransaction* const trans) OVERRIDE;
- virtual void UpdateNigoriFromEncryptedTypes(
- sync_pb::NigoriSpecifics* nigori,
- syncable::BaseTransaction* const trans) const OVERRIDE;
- virtual bool NeedKeystoreKey(
- syncable::BaseTransaction* const trans) const OVERRIDE;
- virtual bool SetKeystoreKeys(
- const google::protobuf::RepeatedPtrField<google::protobuf::string>& keys,
- syncable::BaseTransaction* const trans) OVERRIDE;
- // Can be called from any thread.
- virtual ModelTypeSet GetEncryptedTypes(
- syncable::BaseTransaction* const trans) const OVERRIDE;
-
- // Unsafe getters. Use only if sync is not up and running and there is no risk
- // of other threads calling this.
- Cryptographer* GetCryptographerUnsafe();
- ModelTypeSet GetEncryptedTypesUnsafe();
-
- bool MigratedToKeystore();
- base::Time migration_time() const;
- base::Time custom_passphrase_time() const;
-
- private:
- friend class SyncEncryptionHandlerImplTest;
- FRIEND_TEST_ALL_PREFIXES(SyncEncryptionHandlerImplTest,
- NigoriEncryptionTypes);
- FRIEND_TEST_ALL_PREFIXES(SyncEncryptionHandlerImplTest,
- EncryptEverythingExplicit);
- FRIEND_TEST_ALL_PREFIXES(SyncEncryptionHandlerImplTest,
- EncryptEverythingImplicit);
- FRIEND_TEST_ALL_PREFIXES(SyncEncryptionHandlerImplTest,
- UnknownSensitiveTypes);
- FRIEND_TEST_ALL_PREFIXES(SyncEncryptionHandlerImplTest,
- GetKeystoreDecryptor);
- FRIEND_TEST_ALL_PREFIXES(SyncEncryptionHandlerImplTest,
- ReceiveMigratedNigoriKeystorePass);
- FRIEND_TEST_ALL_PREFIXES(SyncEncryptionHandlerImplTest,
- ReceiveUmigratedNigoriAfterMigration);
- FRIEND_TEST_ALL_PREFIXES(SyncEncryptionHandlerImplTest,
- ReceiveOldMigratedNigori);
- FRIEND_TEST_ALL_PREFIXES(SyncEncryptionHandlerImplTest,
- SetKeystoreAfterReceivingMigratedNigori);
- FRIEND_TEST_ALL_PREFIXES(SyncEncryptionHandlerImplTest,
- SetCustomPassAfterMigration);
- FRIEND_TEST_ALL_PREFIXES(SyncEncryptionHandlerImplTest,
- SetCustomPassAfterMigrationNoKeystoreKey);
- FRIEND_TEST_ALL_PREFIXES(SyncEncryptionHandlerImplTest,
- SetImplicitPassAfterMigrationNoKeystoreKey);
- FRIEND_TEST_ALL_PREFIXES(SyncEncryptionHandlerImplTest,
- MigrateOnEncryptEverythingKeystorePassphrase);
- FRIEND_TEST_ALL_PREFIXES(SyncEncryptionHandlerImplTest,
- ReceiveMigratedNigoriWithOldPassphrase);
-
- // Container for members that require thread safety protection. All members
- // that can be accessed from more than one thread should be held here and
- // accessed via UnlockVault(..) and UnlockVaultMutable(..), which enforce
- // that a transaction is held.
- struct Vault {
- Vault(Encryptor* encryptor, ModelTypeSet encrypted_types);
- ~Vault();
-
- // Sync's cryptographer. Used for encrypting and decrypting sync data.
- Cryptographer cryptographer;
- // The set of types that require encryption.
- ModelTypeSet encrypted_types;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(Vault);
- };
-
- // Iterate over all encrypted types ensuring each entry is properly encrypted.
- void ReEncryptEverything(WriteTransaction* trans);
-
- // Apply a nigori update. Updates internal and cryptographer state.
- // Returns true on success, false if |nigori| was incompatible, and the
- // nigori node must be corrected.
- // Note: must be called from within a transaction.
- bool ApplyNigoriUpdateImpl(const sync_pb::NigoriSpecifics& nigori,
- syncable::BaseTransaction* const trans);
-
- // Wrapper around WriteEncryptionStateToNigori that creates a new write
- // transaction.
- void RewriteNigori();
-
- // Write the current encryption state into the nigori node. This includes
- // the encrypted types/encrypt everything state, as well as the keybag/
- // explicit passphrase state (if the cryptographer is ready).
- void WriteEncryptionStateToNigori(WriteTransaction* trans);
-
- // Updates local encrypted types from |nigori|.
- // Returns true if the local set of encrypted types either matched or was
- // a subset of that in |nigori|. Returns false if the local state already
- // had stricter encryption than |nigori|, and the nigori node needs to be
- // updated with the newer encryption state.
- // Note: must be called from within a transaction.
- bool UpdateEncryptedTypesFromNigori(
- const sync_pb::NigoriSpecifics& nigori,
- syncable::BaseTransaction* const trans);
-
- // TODO(zea): make these public and have them replace SetEncryptionPassphrase
- // and SetDecryptionPassphrase.
- // Helper methods for handling passphrases once keystore migration has taken
- // place.
- //
- // Sets a new custom passphrase. Should only be called if a custom passphrase
- // is not already set.
- // Triggers OnPassphraseAccepted on success, OnPassphraseRequired if a custom
- // passphrase already existed.
- void SetCustomPassphrase(const std::string& passphrase,
- WriteTransaction* trans,
- WriteNode* nigori_node);
- // Decrypt the encryption keybag using a user provided passphrase.
- // Should only be called if the current passphrase is a frozen implicit
- // passphrase or a custom passphrase.
- // Triggers OnPassphraseAccepted on success, OnPassphraseRequired on failure.
- void DecryptPendingKeysWithExplicitPassphrase(const std::string& passphrase,
- WriteTransaction* trans,
- WriteNode* nigori_node);
-
- // The final step of SetEncryptionPassphrase and SetDecryptionPassphrase that
- // notifies observers of the result of the set passphrase operation, updates
- // the nigori node, and does re-encryption.
- // |success|: true if the operation was successful and false otherwise. If
- // success == false, we send an OnPassphraseRequired notification.
- // |bootstrap_token|: used to inform observers if the cryptographer's
- // bootstrap token was updated.
- // |is_explicit|: used to differentiate between a custom passphrase (true) and
- // a GAIA passphrase that is implicitly used for encryption
- // (false).
- // |trans| and |nigori_node|: used to access data in the cryptographer.
- void FinishSetPassphrase(bool success,
- const std::string& bootstrap_token,
- WriteTransaction* trans,
- WriteNode* nigori_node);
-
- // Merges the given set of encrypted types with the existing set and emits a
- // notification if necessary.
- // Note: must be called from within a transaction.
- void MergeEncryptedTypes(ModelTypeSet new_encrypted_types,
- syncable::BaseTransaction* const trans);
-
- // Helper methods for ensuring transactions are held when accessing
- // |vault_unsafe_|.
- Vault* UnlockVaultMutable(syncable::BaseTransaction* const trans);
- const Vault& UnlockVault(syncable::BaseTransaction* const trans) const;
-
- // Helper method for determining if migration of a nigori node should be
- // triggered or not.
- // Conditions for triggering migration:
- // 1. Cryptographer has no pending keys
- // 2. Nigori node isn't already properly migrated or we need to rotate keys.
- // 3. Keystore key is available.
- // Note: if the nigori node is migrated but has an invalid state, will return
- // true (e.g. node has KEYSTORE_PASSPHRASE, local is CUSTOM_PASSPHRASE).
- bool ShouldTriggerMigration(const sync_pb::NigoriSpecifics& nigori,
- const Cryptographer& cryptographer) const;
-
- // Performs the actual migration of the |nigori_node| to support keystore
- // encryption iff ShouldTriggerMigration(..) returns true.
- bool AttemptToMigrateNigoriToKeystore(WriteTransaction* trans,
- WriteNode* nigori_node);
-
- // Fill |encrypted_blob| with the keystore decryptor token if
- // |encrypted_blob|'s contents didn't already contain the key.
- // The keystore decryptor token is the serialized current default encryption
- // key, encrypted with the keystore key.
- bool GetKeystoreDecryptor(
- const Cryptographer& cryptographer,
- const std::string& keystore_key,
- sync_pb::EncryptedData* encrypted_blob);
-
- // Helper method for installing the keys encrypted in |encryption_keybag|
- // into |cryptographer|.
- // Returns true on success, false if we were unable to install the keybag.
- // Will not update the default key.
- bool AttemptToInstallKeybag(const sync_pb::EncryptedData& keybag,
- bool update_default,
- Cryptographer* cryptographer);
-
- // Helper method for decrypting pending keys with the keystore bootstrap.
- // If successful, the default will become the key encrypted in the keystore
- // bootstrap, and will return true. Else will return false.
- bool DecryptPendingKeysWithKeystoreKey(
- const std::string& keystore_key,
- const sync_pb::EncryptedData& keystore_bootstrap,
- Cryptographer* cryptographer);
-
- // Helper to enable encrypt everything, notifying observers if necessary.
- // Will not perform re-encryption.
- void EnableEncryptEverythingImpl(syncable::BaseTransaction* const trans);
-
- // If an explicit passphrase is in use, returns the time at which it was set
- // (if known). Else return base::Time().
- base::Time GetExplicitPassphraseTime() const;
-
- base::ThreadChecker thread_checker_;
-
- ObserverList<SyncEncryptionHandler::Observer> observers_;
-
- // The current user share (for creating transactions).
- UserShare* user_share_;
-
- // Container for all data that can be accessed from multiple threads. Do not
- // access this object directly. Instead access it via UnlockVault(..) and
- // UnlockVaultMutable(..).
- Vault vault_unsafe_;
-
- // Sync encryption state that is only modified and accessed from the sync
- // thread.
- // Whether all current and future types should be encrypted.
- bool encrypt_everything_;
- // The current state of the passphrase required to decrypt the encryption
- // keys stored in the nigori node.
- PassphraseType passphrase_type_;
-
- // The current keystore key provided by the server.
- std::string keystore_key_;
-
- // The set of old keystore keys. Every time a key rotation occurs, the server
- // sends down all previous keystore keys as well as the new key. We preserve
- // the old keys so that when we re-encrypt we can ensure they're all added to
- // the keybag (and to detect that a key rotation has occurred).
- std::vector<std::string> old_keystore_keys_;
-
- // The number of times we've automatically (i.e. not via SetPassphrase or
- // conflict resolver) updated the nigori's encryption keys in this chrome
- // instantiation.
- int nigori_overwrite_count_;
-
- // The time the nigori was migrated to support keystore encryption.
- base::Time migration_time_;
-
- // The time the custom passphrase was set for this account. Not valid
- // if there is no custom passphrase or the custom passphrase was set
- // before support for this field was added.
- base::Time custom_passphrase_time_;
-
- base::WeakPtrFactory<SyncEncryptionHandlerImpl> weak_ptr_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(SyncEncryptionHandlerImpl);
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_PUBLIC_SYNC_ENCRYPTION_HANDLER_IMPL_H_
diff --git a/chromium/sync/internal_api/sync_encryption_handler_impl_unittest.cc b/chromium/sync/internal_api/sync_encryption_handler_impl_unittest.cc
deleted file mode 100644
index 2f09cf91013..00000000000
--- a/chromium/sync/internal_api/sync_encryption_handler_impl_unittest.cc
+++ /dev/null
@@ -1,2282 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/sync_encryption_handler_impl.h"
-
-#include <string>
-
-#include "base/base64.h"
-#include "base/json/json_string_value_serializer.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/message_loop/message_loop.h"
-#include "base/tracked_objects.h"
-#include "sync/internal_api/public/base/model_type_test_util.h"
-#include "sync/internal_api/public/read_node.h"
-#include "sync/internal_api/public/read_transaction.h"
-#include "sync/internal_api/public/test/test_user_share.h"
-#include "sync/internal_api/public/write_node.h"
-#include "sync/internal_api/public/write_transaction.h"
-#include "sync/protocol/nigori_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/syncable_write_transaction.h"
-#include "sync/test/engine/test_id_factory.h"
-#include "sync/test/fake_encryptor.h"
-#include "sync/util/cryptographer.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-namespace {
-
-using ::testing::_;
-using ::testing::AnyNumber;
-using ::testing::AtLeast;
-using ::testing::Mock;
-using ::testing::SaveArg;
-using ::testing::StrictMock;
-
-// The raw keystore key the server sends.
-static const char kRawKeystoreKey[] = "keystore_key";
-// Base64 encoded version of |kRawKeystoreKey|.
-static const char kKeystoreKey[] = "a2V5c3RvcmVfa2V5";
-
-class SyncEncryptionHandlerObserverMock
- : public SyncEncryptionHandler::Observer {
- public:
- MOCK_METHOD2(OnPassphraseRequired,
- void(PassphraseRequiredReason,
- const sync_pb::EncryptedData&)); // NOLINT
- MOCK_METHOD0(OnPassphraseAccepted, void()); // NOLINT
- MOCK_METHOD2(OnBootstrapTokenUpdated,
- void(const std::string&, BootstrapTokenType type)); // NOLINT
- MOCK_METHOD2(OnEncryptedTypesChanged,
- void(ModelTypeSet, bool)); // NOLINT
- MOCK_METHOD0(OnEncryptionComplete, void()); // NOLINT
- MOCK_METHOD1(OnCryptographerStateChanged, void(Cryptographer*)); // NOLINT
- MOCK_METHOD2(OnPassphraseTypeChanged, void(PassphraseType,
- base::Time)); // NOLINT
-};
-
-google::protobuf::RepeatedPtrField<google::protobuf::string>
-BuildEncryptionKeyProto(std::string encryption_key) {
- google::protobuf::RepeatedPtrField<google::protobuf::string> keys;
- keys.Add()->assign(encryption_key);
- return keys;
-}
-
-} // namespace
-
-class SyncEncryptionHandlerImplTest : public ::testing::Test {
- public:
- SyncEncryptionHandlerImplTest() {}
- virtual ~SyncEncryptionHandlerImplTest() {}
-
- virtual void SetUp() {
- test_user_share_.SetUp();
- SetUpEncryption();
- CreateRootForType(NIGORI);
- }
-
- virtual void TearDown() {
- PumpLoop();
- test_user_share_.TearDown();
- }
-
- protected:
- void SetUpEncryption() {
- encryption_handler_.reset(
- new SyncEncryptionHandlerImpl(user_share(),
- &encryptor_,
- std::string(),
- std::string() /* bootstrap tokens */));
- encryption_handler_->AddObserver(&observer_);
- }
-
- void CreateRootForType(ModelType model_type) {
- syncer::syncable::Directory* directory = user_share()->directory.get();
-
- std::string tag_name = ModelTypeToRootTag(model_type);
-
- syncable::WriteTransaction wtrans(FROM_HERE, syncable::UNITTEST, directory);
- syncable::MutableEntry node(&wtrans,
- syncable::CREATE,
- model_type,
- wtrans.root_id(),
- tag_name);
- node.PutUniqueServerTag(tag_name);
- node.PutIsDir(true);
- node.PutServerIsDir(false);
- node.PutIsUnsynced(false);
- node.PutIsUnappliedUpdate(false);
- node.PutServerVersion(20);
- node.PutBaseVersion(20);
- node.PutIsDel(false);
- node.PutId(ids_.MakeServer(tag_name));
- sync_pb::EntitySpecifics specifics;
- syncer::AddDefaultFieldValue(model_type, &specifics);
- node.PutSpecifics(specifics);
- }
-
- void PumpLoop() {
- message_loop_.RunUntilIdle();
- }
-
- // Getters for tests.
- UserShare* user_share() { return test_user_share_.user_share(); }
- SyncEncryptionHandlerImpl* encryption_handler() {
- return encryption_handler_.get();
- }
- SyncEncryptionHandlerObserverMock* observer() { return &observer_; }
- Cryptographer* GetCryptographer() {
- return encryption_handler_->GetCryptographerUnsafe();
- }
-
- void VerifyMigratedNigori(PassphraseType passphrase_type,
- const std::string& passphrase) {
- VerifyMigratedNigoriWithTimestamp(0, passphrase_type, passphrase);
- }
-
- void VerifyMigratedNigoriWithTimestamp(
- int64 migration_time,
- PassphraseType passphrase_type,
- const std::string& passphrase) {
- ReadTransaction trans(FROM_HERE, user_share());
- ReadNode nigori_node(&trans);
- ASSERT_EQ(nigori_node.InitByTagLookup(kNigoriTag), BaseNode::INIT_OK);
- const sync_pb::NigoriSpecifics& nigori = nigori_node.GetNigoriSpecifics();
- if (migration_time > 0)
- EXPECT_EQ(migration_time, nigori.keystore_migration_time());
- else
- EXPECT_TRUE(nigori.has_keystore_migration_time());
- EXPECT_TRUE(nigori.keybag_is_frozen());
- if (passphrase_type == CUSTOM_PASSPHRASE ||
- passphrase_type == FROZEN_IMPLICIT_PASSPHRASE) {
- EXPECT_TRUE(nigori.encrypt_everything());
- EXPECT_TRUE(nigori.keystore_decryptor_token().blob().empty());
- if (passphrase_type == CUSTOM_PASSPHRASE) {
- EXPECT_EQ(sync_pb::NigoriSpecifics::CUSTOM_PASSPHRASE,
- nigori.passphrase_type());
- if (!encryption_handler()->custom_passphrase_time().is_null()) {
- EXPECT_EQ(nigori.custom_passphrase_time(),
- TimeToProtoTime(
- encryption_handler()->custom_passphrase_time()));
- }
- } else {
- EXPECT_EQ(sync_pb::NigoriSpecifics::FROZEN_IMPLICIT_PASSPHRASE,
- nigori.passphrase_type());
- }
- } else {
- EXPECT_FALSE(nigori.encrypt_everything());
- EXPECT_FALSE(nigori.keystore_decryptor_token().blob().empty());
- EXPECT_EQ(sync_pb::NigoriSpecifics::KEYSTORE_PASSPHRASE,
- nigori.passphrase_type());
- Cryptographer keystore_cryptographer(&encryptor_);
- KeyParams params = {"localhost", "dummy", kKeystoreKey};
- keystore_cryptographer.AddKey(params);
- EXPECT_TRUE(keystore_cryptographer.CanDecryptUsingDefaultKey(
- nigori.keystore_decryptor_token()));
- }
-
- Cryptographer temp_cryptographer(&encryptor_);
- KeyParams params = {"localhost", "dummy", passphrase};
- temp_cryptographer.AddKey(params);
- EXPECT_TRUE(temp_cryptographer.CanDecryptUsingDefaultKey(
- nigori.encryption_keybag()));
- }
-
- sync_pb::NigoriSpecifics BuildMigratedNigori(
- PassphraseType passphrase_type,
- int64 migration_time,
- const std::string& default_passphrase,
- const std::string& keystore_key) {
- DCHECK_NE(passphrase_type, IMPLICIT_PASSPHRASE);
- Cryptographer other_cryptographer(GetCryptographer()->encryptor());
-
- std::string default_key = default_passphrase;
- if (default_key.empty()) {
- default_key = keystore_key;
- } else {
- KeyParams keystore_params = {"localhost", "dummy", keystore_key};
- other_cryptographer.AddKey(keystore_params);
- }
- KeyParams params = {"localhost", "dummy", default_key};
- other_cryptographer.AddKey(params);
- EXPECT_TRUE(other_cryptographer.is_ready());
-
- sync_pb::NigoriSpecifics nigori;
- other_cryptographer.GetKeys(nigori.mutable_encryption_keybag());
- nigori.set_keybag_is_frozen(true);
- nigori.set_keystore_migration_time(migration_time);
-
- if (passphrase_type == KEYSTORE_PASSPHRASE) {
- sync_pb::EncryptedData keystore_decryptor_token;
- EXPECT_TRUE(encryption_handler()->GetKeystoreDecryptor(
- other_cryptographer,
- keystore_key,
- &keystore_decryptor_token));
- nigori.mutable_keystore_decryptor_token()->CopyFrom(
- keystore_decryptor_token);
- nigori.set_passphrase_type(sync_pb::NigoriSpecifics::KEYSTORE_PASSPHRASE);
- } else {
- nigori.set_encrypt_everything(true);
- nigori.set_passphrase_type(
- passphrase_type == CUSTOM_PASSPHRASE ?
- sync_pb::NigoriSpecifics::CUSTOM_PASSPHRASE :
- sync_pb::NigoriSpecifics::FROZEN_IMPLICIT_PASSPHRASE);
- }
- return nigori;
- }
-
- // Build a migrated nigori node with the specified default passphrase
- // and keystore key and initialize the encryption handler with it.
- void InitKeystoreMigratedNigori(int64 migration_time,
- const std::string& default_passphrase,
- const std::string& keystore_key) {
- {
- WriteTransaction trans(FROM_HERE, user_share());
- WriteNode nigori_node(&trans);
- ASSERT_EQ(nigori_node.InitByTagLookup(kNigoriTag), BaseNode::INIT_OK);
- sync_pb::NigoriSpecifics nigori = BuildMigratedNigori(
- KEYSTORE_PASSPHRASE,
- migration_time,
- default_passphrase,
- keystore_key);
- nigori_node.SetNigoriSpecifics(nigori);
- }
-
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(KEYSTORE_PASSPHRASE, _));
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AtLeast(1));
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, false));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete()).Times(AtLeast(1));
- encryption_handler()->Init();
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), KEYSTORE_PASSPHRASE);
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
- Mock::VerifyAndClearExpectations(observer());
- }
-
- // Build a migrated nigori node with the specified default passphrase
- // as a custom passphrase.
- void InitCustomPassMigratedNigori(int64 migration_time,
- const std::string& default_passphrase) {
- {
- WriteTransaction trans(FROM_HERE, user_share());
- WriteNode nigori_node(&trans);
- ASSERT_EQ(nigori_node.InitByTagLookup(kNigoriTag), BaseNode::INIT_OK);
- sync_pb::NigoriSpecifics nigori = BuildMigratedNigori(
- CUSTOM_PASSPHRASE,
- migration_time,
- default_passphrase,
- kKeystoreKey);
- nigori_node.SetNigoriSpecifics(nigori);
- }
-
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(CUSTOM_PASSPHRASE, _));
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AtLeast(1));
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, true)).Times(AtLeast(1));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete()).Times(AtLeast(1));
- encryption_handler()->Init();
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), CUSTOM_PASSPHRASE);
- EXPECT_TRUE(encryption_handler()->EncryptEverythingEnabled());
- Mock::VerifyAndClearExpectations(observer());
- }
-
- // Build an unmigrated nigori node with the specified passphrase and type and
- // initialize the encryption handler with it.
- void InitUnmigratedNigori(const std::string& default_passphrase,
- PassphraseType passphrase_type) {
- DCHECK_NE(passphrase_type, FROZEN_IMPLICIT_PASSPHRASE);
- Cryptographer other_cryptographer(GetCryptographer()->encryptor());
- KeyParams default_key = {"localhost", "dummy", default_passphrase};
- other_cryptographer.AddKey(default_key);
- EXPECT_TRUE(other_cryptographer.is_ready());
-
- {
- WriteTransaction trans(FROM_HERE, user_share());
- WriteNode nigori_node(&trans);
- ASSERT_EQ(nigori_node.InitByTagLookup(kNigoriTag), BaseNode::INIT_OK);
- sync_pb::NigoriSpecifics nigori;
- other_cryptographer.GetKeys(nigori.mutable_encryption_keybag());
- nigori.set_keybag_is_frozen(passphrase_type == CUSTOM_PASSPHRASE);
- nigori_node.SetNigoriSpecifics(nigori);
- }
-
- if (passphrase_type != IMPLICIT_PASSPHRASE) {
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(passphrase_type, _));
- }
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AtLeast(1));
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, false));
- encryption_handler()->Init();
- EXPECT_FALSE(encryption_handler()->MigratedToKeystore());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), passphrase_type);
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
- Mock::VerifyAndClearExpectations(observer());
- }
-
- protected:
- TestUserShare test_user_share_;
- FakeEncryptor encryptor_;
- scoped_ptr<SyncEncryptionHandlerImpl> encryption_handler_;
- StrictMock<SyncEncryptionHandlerObserverMock> observer_;
- TestIdFactory ids_;
- base::MessageLoop message_loop_;
-};
-
-// Verify that the encrypted types are being written to and read from the
-// nigori node properly.
-TEST_F(SyncEncryptionHandlerImplTest, NigoriEncryptionTypes) {
- sync_pb::NigoriSpecifics nigori;
-
- StrictMock<SyncEncryptionHandlerObserverMock> observer2;
- SyncEncryptionHandlerImpl handler2(user_share(),
- &encryptor_,
- std::string(),
- std::string() /* bootstrap tokens */);
- handler2.AddObserver(&observer2);
-
- // Just set the sensitive types (shouldn't trigger any notifications).
- ModelTypeSet encrypted_types(SyncEncryptionHandler::SensitiveTypes());
- {
- WriteTransaction trans(FROM_HERE, user_share());
- encryption_handler()->MergeEncryptedTypes(
- encrypted_types,
- trans.GetWrappedTrans());
- encryption_handler()->UpdateNigoriFromEncryptedTypes(
- &nigori,
- trans.GetWrappedTrans());
- handler2.UpdateEncryptedTypesFromNigori(nigori, trans.GetWrappedTrans());
- }
- EXPECT_TRUE(encrypted_types.Equals(
- encryption_handler()->GetEncryptedTypesUnsafe()));
- EXPECT_TRUE(encrypted_types.Equals(
- handler2.GetEncryptedTypesUnsafe()));
-
- Mock::VerifyAndClearExpectations(observer());
- Mock::VerifyAndClearExpectations(&observer2);
-
- ModelTypeSet encrypted_user_types = EncryptableUserTypes();
-
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(
- HasModelTypes(encrypted_user_types), false));
- EXPECT_CALL(observer2,
- OnEncryptedTypesChanged(
- HasModelTypes(encrypted_user_types), false));
-
- // Set all encrypted types
- encrypted_types = EncryptableUserTypes();
- {
- WriteTransaction trans(FROM_HERE, user_share());
- encryption_handler()->MergeEncryptedTypes(
- encrypted_types,
- trans.GetWrappedTrans());
- encryption_handler()->UpdateNigoriFromEncryptedTypes(
- &nigori,
- trans.GetWrappedTrans());
- handler2.UpdateEncryptedTypesFromNigori(nigori, trans.GetWrappedTrans());
- }
- EXPECT_TRUE(encrypted_types.Equals(
- encryption_handler()->GetEncryptedTypesUnsafe()));
- EXPECT_TRUE(encrypted_types.Equals(handler2.GetEncryptedTypesUnsafe()));
-
- // Receiving an empty nigori should not reset any encrypted types or trigger
- // an observer notification.
- Mock::VerifyAndClearExpectations(observer());
- Mock::VerifyAndClearExpectations(&observer2);
- nigori = sync_pb::NigoriSpecifics();
- {
- WriteTransaction trans(FROM_HERE, user_share());
- handler2.UpdateEncryptedTypesFromNigori(nigori, trans.GetWrappedTrans());
- }
- EXPECT_TRUE(encrypted_types.Equals(
- encryption_handler()->GetEncryptedTypesUnsafe()));
-}
-
-// Verify the encryption handler processes the encrypt everything field
-// properly.
-TEST_F(SyncEncryptionHandlerImplTest, EncryptEverythingExplicit) {
- sync_pb::NigoriSpecifics nigori;
- nigori.set_encrypt_everything(true);
-
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(
- HasModelTypes(EncryptableUserTypes()), true));
-
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
- ModelTypeSet encrypted_types =
- encryption_handler()->GetEncryptedTypesUnsafe();
- EXPECT_TRUE(encrypted_types.Equals(ModelTypeSet(PASSWORDS)));
-
- {
- WriteTransaction trans(FROM_HERE, user_share());
- encryption_handler()->UpdateEncryptedTypesFromNigori(
- nigori,
- trans.GetWrappedTrans());
- }
-
- EXPECT_TRUE(encryption_handler()->EncryptEverythingEnabled());
- encrypted_types = encryption_handler()->GetEncryptedTypesUnsafe();
- EXPECT_TRUE(encrypted_types.HasAll(EncryptableUserTypes()));
-
- // Receiving the nigori node again shouldn't trigger another notification.
- Mock::VerifyAndClearExpectations(observer());
- {
- WriteTransaction trans(FROM_HERE, user_share());
- encryption_handler()->UpdateEncryptedTypesFromNigori(
- nigori,
- trans.GetWrappedTrans());
- }
-}
-
-// Verify the encryption handler can detect an implicit encrypt everything state
-// (from clients that failed to write the encrypt everything field).
-TEST_F(SyncEncryptionHandlerImplTest, EncryptEverythingImplicit) {
- sync_pb::NigoriSpecifics nigori;
- nigori.set_encrypt_bookmarks(true); // Non-passwords = encrypt everything
-
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(
- HasModelTypes(EncryptableUserTypes()), true));
-
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
- ModelTypeSet encrypted_types =
- encryption_handler()->GetEncryptedTypesUnsafe();
- EXPECT_TRUE(encrypted_types.Equals(ModelTypeSet(PASSWORDS)));
-
- {
- WriteTransaction trans(FROM_HERE, user_share());
- encryption_handler()->UpdateEncryptedTypesFromNigori(
- nigori,
- trans.GetWrappedTrans());
- }
-
- EXPECT_TRUE(encryption_handler()->EncryptEverythingEnabled());
- encrypted_types = encryption_handler()->GetEncryptedTypesUnsafe();
- EXPECT_TRUE(encrypted_types.HasAll(EncryptableUserTypes()));
-
- // Receiving a nigori node with encrypt everything explicitly set shouldn't
- // trigger another notification.
- Mock::VerifyAndClearExpectations(observer());
- nigori.set_encrypt_everything(true);
- {
- WriteTransaction trans(FROM_HERE, user_share());
- encryption_handler()->UpdateEncryptedTypesFromNigori(
- nigori,
- trans.GetWrappedTrans());
- }
-}
-
-// Verify the encryption handler can deal with new versions treating new types
-// as Sensitive, and that it does not consider this an implicit encrypt
-// everything case.
-TEST_F(SyncEncryptionHandlerImplTest, UnknownSensitiveTypes) {
- sync_pb::NigoriSpecifics nigori;
- nigori.set_encrypt_everything(false);
- nigori.set_encrypt_bookmarks(true);
-
- ModelTypeSet expected_encrypted_types =
- SyncEncryptionHandler::SensitiveTypes();
- expected_encrypted_types.Put(BOOKMARKS);
-
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(
- HasModelTypes(expected_encrypted_types), false));
-
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
- ModelTypeSet encrypted_types =
- encryption_handler()->GetEncryptedTypesUnsafe();
- EXPECT_TRUE(encrypted_types.Equals(ModelTypeSet(PASSWORDS)));
-
- {
- WriteTransaction trans(FROM_HERE, user_share());
- encryption_handler()->UpdateEncryptedTypesFromNigori(
- nigori,
- trans.GetWrappedTrans());
- }
-
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
- encrypted_types = encryption_handler()->GetEncryptedTypesUnsafe();
- EXPECT_TRUE(encrypted_types.Equals(ModelTypeSet(BOOKMARKS, PASSWORDS)));
-}
-
-// Receive an old nigori with old encryption keys and encrypted types. We should
-// not revert our default key or encrypted types, and should post a task to
-// overwrite the existing nigori with the correct data.
-TEST_F(SyncEncryptionHandlerImplTest, ReceiveOldNigori) {
- KeyParams old_key = {"localhost", "dummy", "old"};
- KeyParams current_key = {"localhost", "dummy", "cur"};
-
- // Data for testing encryption/decryption.
- Cryptographer other_cryptographer(GetCryptographer()->encryptor());
- other_cryptographer.AddKey(old_key);
- sync_pb::EntitySpecifics other_encrypted_specifics;
- other_encrypted_specifics.mutable_bookmark()->set_title("title");
- other_cryptographer.Encrypt(
- other_encrypted_specifics,
- other_encrypted_specifics.mutable_encrypted());
- sync_pb::EntitySpecifics our_encrypted_specifics;
- our_encrypted_specifics.mutable_bookmark()->set_title("title2");
- ModelTypeSet encrypted_types = EncryptableUserTypes();
-
- // Set up the current encryption state (containing both keys and encrypt
- // everything).
- sync_pb::NigoriSpecifics current_nigori_specifics;
- GetCryptographer()->AddKey(old_key);
- GetCryptographer()->AddKey(current_key);
- GetCryptographer()->Encrypt(
- our_encrypted_specifics,
- our_encrypted_specifics.mutable_encrypted());
- GetCryptographer()->GetKeys(
- current_nigori_specifics.mutable_encryption_keybag());
- current_nigori_specifics.set_encrypt_everything(true);
-
- EXPECT_CALL(*observer(), OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(), OnEncryptedTypesChanged(
- HasModelTypes(EncryptableUserTypes()), true));
- {
- // Update the encryption handler.
- WriteTransaction trans(FROM_HERE, user_share());
- encryption_handler()->ApplyNigoriUpdate(
- current_nigori_specifics,
- trans.GetWrappedTrans());
- }
- Mock::VerifyAndClearExpectations(observer());
-
- // Now set up the old nigori specifics and apply it on top.
- // Has an old set of keys, and no encrypted types.
- sync_pb::NigoriSpecifics old_nigori;
- other_cryptographer.GetKeys(old_nigori.mutable_encryption_keybag());
-
- EXPECT_CALL(*observer(), OnCryptographerStateChanged(_)).Times(AnyNumber());
- {
- // Update the encryption handler.
- WriteTransaction trans(FROM_HERE, user_share());
- encryption_handler()->ApplyNigoriUpdate(
- old_nigori,
- trans.GetWrappedTrans());
- }
- EXPECT_TRUE(GetCryptographer()->is_ready());
- EXPECT_FALSE(GetCryptographer()->has_pending_keys());
-
- // Encryption handler should have posted a task to overwrite the old
- // specifics.
- PumpLoop();
-
- {
- // The cryptographer should be able to decrypt both sets of keys and still
- // be encrypting with the newest, and the encrypted types should be the
- // most recent.
- // In addition, the nigori node should match the current encryption state.
- ReadTransaction trans(FROM_HERE, user_share());
- ReadNode nigori_node(&trans);
- ASSERT_EQ(nigori_node.InitByTagLookup(kNigoriTag), BaseNode::INIT_OK);
- const sync_pb::NigoriSpecifics& nigori = nigori_node.GetNigoriSpecifics();
- EXPECT_TRUE(GetCryptographer()->CanDecryptUsingDefaultKey(
- our_encrypted_specifics.encrypted()));
- EXPECT_TRUE(GetCryptographer()->CanDecrypt(
- other_encrypted_specifics.encrypted()));
- EXPECT_TRUE(GetCryptographer()->CanDecrypt(nigori.encryption_keybag()));
- EXPECT_TRUE(nigori.encrypt_everything());
- EXPECT_TRUE(
- GetCryptographer()->CanDecryptUsingDefaultKey(
- nigori.encryption_keybag()));
- }
- EXPECT_TRUE(encryption_handler()->EncryptEverythingEnabled());
-}
-
-// Ensure setting the keystore key works, updates the bootstrap token, and
-// triggers a non-backwards compatible migration. Then verify that the
-// bootstrap token can be correctly parsed by the encryption handler at startup
-// time.
-TEST_F(SyncEncryptionHandlerImplTest, SetKeystoreMigratesAndUpdatesBootstrap) {
- // Passing no keys should do nothing.
- EXPECT_CALL(*observer(), OnBootstrapTokenUpdated(_, _)).Times(0);
- {
- WriteTransaction trans(FROM_HERE, user_share());
- EXPECT_FALSE(GetCryptographer()->is_initialized());
- EXPECT_TRUE(encryption_handler()->NeedKeystoreKey(trans.GetWrappedTrans()));
- EXPECT_FALSE(encryption_handler()->SetKeystoreKeys(
- BuildEncryptionKeyProto(std::string()), trans.GetWrappedTrans()));
- EXPECT_TRUE(encryption_handler()->NeedKeystoreKey(trans.GetWrappedTrans()));
- }
- Mock::VerifyAndClearExpectations(observer());
-
- // Build a set of keystore keys.
- const char kRawOldKeystoreKey[] = "old_keystore_key";
- std::string old_keystore_key;
- base::Base64Encode(kRawOldKeystoreKey, &old_keystore_key);
- google::protobuf::RepeatedPtrField<google::protobuf::string> keys;
- keys.Add()->assign(kRawOldKeystoreKey);
- keys.Add()->assign(kRawKeystoreKey);
-
- // Pass them to the encryption handler, triggering a migration and bootstrap
- // token update.
- std::string encoded_key;
- std::string keystore_bootstrap;
- EXPECT_CALL(*observer(), OnEncryptionComplete());
- EXPECT_CALL(*observer(), OnCryptographerStateChanged(_));
- EXPECT_CALL(*observer(), OnPassphraseAccepted());
- EXPECT_CALL(*observer(), OnPassphraseTypeChanged(KEYSTORE_PASSPHRASE, _));
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_,
- KEYSTORE_BOOTSTRAP_TOKEN)).
- WillOnce(SaveArg<0>(&keystore_bootstrap));
- {
- WriteTransaction trans(FROM_HERE, user_share());
- EXPECT_TRUE(
- encryption_handler()->SetKeystoreKeys(
- keys,
- trans.GetWrappedTrans()));
- EXPECT_FALSE(
- encryption_handler()->NeedKeystoreKey(trans.GetWrappedTrans()));
- EXPECT_FALSE(GetCryptographer()->is_initialized());
- }
- PumpLoop();
- EXPECT_TRUE(GetCryptographer()->is_initialized());
- VerifyMigratedNigori(KEYSTORE_PASSPHRASE, kKeystoreKey);
-
- // Ensure the bootstrap is encoded properly (a base64 encoded encrypted blob
- // of list values containing the keystore keys).
- std::string decoded_bootstrap;
- ASSERT_TRUE(base::Base64Decode(keystore_bootstrap, &decoded_bootstrap));
- std::string decrypted_bootstrap;
- ASSERT_TRUE(
- GetCryptographer()->encryptor()->DecryptString(decoded_bootstrap,
- &decrypted_bootstrap));
- JSONStringValueSerializer json(decrypted_bootstrap);
- scoped_ptr<base::Value> deserialized_keystore_keys(
- json.Deserialize(NULL, NULL));
- ASSERT_TRUE(deserialized_keystore_keys.get());
- base::ListValue* keystore_list = NULL;
- deserialized_keystore_keys->GetAsList(&keystore_list);
- ASSERT_TRUE(keystore_list);
- ASSERT_EQ(2U, keystore_list->GetSize());
- std::string test_string;
- keystore_list->GetString(0, &test_string);
- ASSERT_EQ(old_keystore_key, test_string);
- keystore_list->GetString(1, &test_string);
- ASSERT_EQ(kKeystoreKey, test_string);
-
-
- // Now make sure a new encryption handler can correctly parse the bootstrap
- // token.
- SyncEncryptionHandlerImpl handler2(user_share(),
- &encryptor_,
- std::string(), // Cryptographer bootstrap.
- keystore_bootstrap);
-
- {
- WriteTransaction trans(FROM_HERE, user_share());
- EXPECT_FALSE(handler2.NeedKeystoreKey(trans.GetWrappedTrans()));
- }
-}
-
-// Ensure GetKeystoreDecryptor only updates the keystore decryptor token if it
-// wasn't already set properly. Otherwise, the decryptor should remain the
-// same.
-TEST_F(SyncEncryptionHandlerImplTest, GetKeystoreDecryptor) {
- const char kCurKey[] = "cur";
- sync_pb::EncryptedData encrypted;
- Cryptographer other_cryptographer(GetCryptographer()->encryptor());
- KeyParams cur_key = {"localhost", "dummy", kCurKey};
- other_cryptographer.AddKey(cur_key);
- EXPECT_TRUE(other_cryptographer.is_ready());
- EXPECT_TRUE(encryption_handler()->GetKeystoreDecryptor(
- other_cryptographer,
- kKeystoreKey,
- &encrypted));
- std::string serialized = encrypted.SerializeAsString();
- EXPECT_TRUE(encryption_handler()->GetKeystoreDecryptor(
- other_cryptographer,
- kKeystoreKey,
- &encrypted));
- EXPECT_EQ(serialized, encrypted.SerializeAsString());
-}
-
-// Test that we don't attempt to migrate while an implicit passphrase is pending
-// and that once we do decrypt pending keys we migrate the nigori. Once
-// migrated, we should be in keystore passphrase state.
-TEST_F(SyncEncryptionHandlerImplTest, MigrateOnDecryptImplicitPass) {
- const char kOtherKey[] = "other";
- {
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- ReadTransaction trans(FROM_HERE, user_share());
- encryption_handler()->SetKeystoreKeys(BuildEncryptionKeyProto(
- kRawKeystoreKey),
- trans.GetWrappedTrans());
- Mock::VerifyAndClearExpectations(observer());
- }
- EXPECT_FALSE(encryption_handler()->MigratedToKeystore());
-
- {
- WriteTransaction trans(FROM_HERE, user_share());
- WriteNode nigori_node(&trans);
- ASSERT_EQ(nigori_node.InitByTagLookup(kNigoriTag), BaseNode::INIT_OK);
- Cryptographer other_cryptographer(GetCryptographer()->encryptor());
- KeyParams other_key = {"localhost", "dummy", kOtherKey};
- other_cryptographer.AddKey(other_key);
-
- sync_pb::NigoriSpecifics nigori;
- other_cryptographer.GetKeys(nigori.mutable_encryption_keybag());
- nigori.set_keybag_is_frozen(false);
- nigori.set_encrypt_everything(false);
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnPassphraseRequired(_, _));
- encryption_handler()->ApplyNigoriUpdate(nigori, trans.GetWrappedTrans());
- nigori_node.SetNigoriSpecifics(nigori);
- }
- // Run any tasks posted via AppplyNigoriUpdate.
- PumpLoop();
- EXPECT_FALSE(encryption_handler()->MigratedToKeystore());
- Mock::VerifyAndClearExpectations(observer());
-
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnPassphraseAccepted());
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(KEYSTORE_PASSPHRASE, _));
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete());
- EXPECT_FALSE(encryption_handler()->MigratedToKeystore());
- encryption_handler()->SetDecryptionPassphrase(kOtherKey);
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_EQ(KEYSTORE_PASSPHRASE, encryption_handler()->GetPassphraseType());
- VerifyMigratedNigori(KEYSTORE_PASSPHRASE, kOtherKey);
-}
-
-// Test that we don't attempt to migrate while a custom passphrase is pending,
-// and that once we do decrypt pending keys we migrate the nigori. Once
-// migrated, we should be in custom passphrase state with encrypt everything.
-TEST_F(SyncEncryptionHandlerImplTest, MigrateOnDecryptCustomPass) {
- const char kOtherKey[] = "other";
- {
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- ReadTransaction trans(FROM_HERE, user_share());
- encryption_handler()->SetKeystoreKeys(BuildEncryptionKeyProto(
- kRawKeystoreKey),
- trans.GetWrappedTrans());
- Mock::VerifyAndClearExpectations(observer());
- }
- EXPECT_FALSE(encryption_handler()->MigratedToKeystore());
-
- {
- WriteTransaction trans(FROM_HERE, user_share());
- WriteNode nigori_node(&trans);
- ASSERT_EQ(nigori_node.InitByTagLookup(kNigoriTag), BaseNode::INIT_OK);
- Cryptographer other_cryptographer(GetCryptographer()->encryptor());
- KeyParams other_key = {"localhost", "dummy", kOtherKey};
- other_cryptographer.AddKey(other_key);
-
- sync_pb::NigoriSpecifics nigori;
- other_cryptographer.GetKeys(nigori.mutable_encryption_keybag());
- nigori.set_keybag_is_frozen(true);
- nigori.set_encrypt_everything(false);
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnPassphraseRequired(_, _));
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(CUSTOM_PASSPHRASE, _));
- encryption_handler()->ApplyNigoriUpdate(nigori, trans.GetWrappedTrans());
- nigori_node.SetNigoriSpecifics(nigori);
- }
- // Run any tasks posted via AppplyNigoriUpdate.
- PumpLoop();
- EXPECT_FALSE(encryption_handler()->MigratedToKeystore());
- Mock::VerifyAndClearExpectations(observer());
-
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnPassphraseAccepted());
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, true));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete()).Times(2);
- EXPECT_FALSE(encryption_handler()->MigratedToKeystore());
- encryption_handler()->SetDecryptionPassphrase(kOtherKey);
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_EQ(CUSTOM_PASSPHRASE, encryption_handler()->GetPassphraseType());
- VerifyMigratedNigori(CUSTOM_PASSPHRASE, kOtherKey);
-}
-
-// Test that we trigger a migration when we set the keystore key, had an
-// implicit passphrase, and did not have encrypt everything. We should switch
-// to KEYSTORE_PASSPHRASE.
-TEST_F(SyncEncryptionHandlerImplTest, MigrateOnKeystoreKeyAvailableImplicit) {
- const char kCurKey[] = "cur";
- KeyParams current_key = {"localhost", "dummy", kCurKey};
- GetCryptographer()->AddKey(current_key);
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, false));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete());
- encryption_handler()->Init();
- Mock::VerifyAndClearExpectations(observer());
-
- {
- ReadTransaction trans(FROM_HERE, user_share());
- // Once we provide a keystore key, we should perform the migration.
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- encryption_handler()->SetKeystoreKeys(BuildEncryptionKeyProto(
- kRawKeystoreKey),
- trans.GetWrappedTrans());
- }
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(KEYSTORE_PASSPHRASE, _));
- // The actual migration gets posted, so run all pending tasks.
- PumpLoop();
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_EQ(KEYSTORE_PASSPHRASE,
- encryption_handler()->GetPassphraseType());
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
- VerifyMigratedNigori(KEYSTORE_PASSPHRASE, kCurKey);
-}
-
-// Test that we trigger a migration when we set the keystore key, had an
-// implicit passphrase, and encrypt everything enabled. We should switch to
-// FROZEN_IMPLICIT_PASSPHRASE.
-TEST_F(SyncEncryptionHandlerImplTest,
- MigrateOnKeystoreKeyAvailableFrozenImplicit) {
- const char kCurKey[] = "cur";
- KeyParams current_key = {"localhost", "dummy", kCurKey};
- GetCryptographer()->AddKey(current_key);
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, false));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete());
- encryption_handler()->Init();
- Mock::VerifyAndClearExpectations(observer());
-
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, true));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete());
- encryption_handler()->EnableEncryptEverything();
-
- {
- ReadTransaction trans(FROM_HERE, user_share());
- // Once we provide a keystore key, we should perform the migration.
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- encryption_handler()->SetKeystoreKeys(BuildEncryptionKeyProto(
- kRawKeystoreKey),
- trans.GetWrappedTrans());
- }
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(FROZEN_IMPLICIT_PASSPHRASE, _));
- // The actual migration gets posted, so run all pending tasks.
- PumpLoop();
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_EQ(FROZEN_IMPLICIT_PASSPHRASE,
- encryption_handler()->GetPassphraseType());
- EXPECT_TRUE(encryption_handler()->EncryptEverythingEnabled());
- VerifyMigratedNigori(FROZEN_IMPLICIT_PASSPHRASE, kCurKey);
-}
-
-// Test that we trigger a migration when we set the keystore key, had a
-// custom passphrase, and encrypt everything enabled. The passphrase state
-// should remain as CUSTOM_PASSPHRASE, and encrypt everything stay the same.
-TEST_F(SyncEncryptionHandlerImplTest,
- MigrateOnKeystoreKeyAvailableCustomWithEncryption) {
- const char kCurKey[] = "cur";
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnPassphraseRequired(_, _));
- EXPECT_CALL(*observer(),
- OnPassphraseAccepted());
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, false));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete());
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(CUSTOM_PASSPHRASE, _));
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- encryption_handler()->Init();
- encryption_handler()->SetEncryptionPassphrase(kCurKey, true);
- EXPECT_FALSE(encryption_handler()->custom_passphrase_time().is_null());
- Mock::VerifyAndClearExpectations(observer());
-
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, true));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete());
- encryption_handler()->EnableEncryptEverything();
- Mock::VerifyAndClearExpectations(observer());
-
- {
- ReadTransaction trans(FROM_HERE, user_share());
- // Once we provide a keystore key, we should perform the migration.
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- encryption_handler()->SetKeystoreKeys(BuildEncryptionKeyProto(
- kRawKeystoreKey),
- trans.GetWrappedTrans());
- }
- // The actual migration gets posted, so run all pending tasks.
- PumpLoop();
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_EQ(CUSTOM_PASSPHRASE,
- encryption_handler()->GetPassphraseType());
- EXPECT_TRUE(encryption_handler()->EncryptEverythingEnabled());
- VerifyMigratedNigori(CUSTOM_PASSPHRASE, kCurKey);
-}
-
-// Test that we trigger a migration when we set the keystore key, had a
-// custom passphrase, and did not have encrypt everything. The passphrase state
-// should remain as CUSTOM_PASSPHRASE, and encrypt everything should be enabled.
-TEST_F(SyncEncryptionHandlerImplTest,
- MigrateOnKeystoreKeyAvailableCustomNoEncryption) {
- const char kCurKey[] = "cur";
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnPassphraseRequired(_, _));
- EXPECT_CALL(*observer(),
- OnPassphraseAccepted());
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, false));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete());
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(CUSTOM_PASSPHRASE, _));
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- encryption_handler()->Init();
- encryption_handler()->SetEncryptionPassphrase(kCurKey, true);
- EXPECT_FALSE(encryption_handler()->custom_passphrase_time().is_null());
- Mock::VerifyAndClearExpectations(observer());
-
- {
- ReadTransaction trans(FROM_HERE, user_share());
- // Once we provide a keystore key, we should perform the migration.
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- encryption_handler()->SetKeystoreKeys(BuildEncryptionKeyProto(
- kRawKeystoreKey),
- trans.GetWrappedTrans());
- }
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, true));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete());
- // The actual migration gets posted, so run all pending tasks.
- PumpLoop();
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_EQ(CUSTOM_PASSPHRASE,
- encryption_handler()->GetPassphraseType());
- EXPECT_TRUE(encryption_handler()->EncryptEverythingEnabled());
- VerifyMigratedNigori(CUSTOM_PASSPHRASE, kCurKey);
-}
-
-// Test that we can handle receiving a migrated nigori node in the
-// KEYSTORE_PASS state, and use the keystore decryptor token to decrypt the
-// keybag.
-TEST_F(SyncEncryptionHandlerImplTest, ReceiveMigratedNigoriKeystorePass) {
- const char kCurKey[] = "cur";
- sync_pb::EncryptedData keystore_decryptor_token;
- Cryptographer other_cryptographer(GetCryptographer()->encryptor());
- KeyParams cur_key = {"localhost", "dummy", kCurKey};
- other_cryptographer.AddKey(cur_key);
- EXPECT_TRUE(other_cryptographer.is_ready());
- EXPECT_TRUE(encryption_handler()->GetKeystoreDecryptor(
- other_cryptographer,
- kKeystoreKey,
- &keystore_decryptor_token));
- EXPECT_FALSE(encryption_handler()->MigratedToKeystore());
- EXPECT_FALSE(GetCryptographer()->is_ready());
- EXPECT_NE(encryption_handler()->GetPassphraseType(), KEYSTORE_PASSPHRASE);
-
- // Now build a nigori node with the generated keystore decryptor token and
- // initialize the encryption handler with it. The cryptographer should be
- // initialized properly to decrypt both kCurKey and kKeystoreKey.
- {
- WriteTransaction trans(FROM_HERE, user_share());
- WriteNode nigori_node(&trans);
- ASSERT_EQ(nigori_node.InitByTagLookup(kNigoriTag), BaseNode::INIT_OK);
- sync_pb::NigoriSpecifics nigori;
- nigori.mutable_keystore_decryptor_token()->CopyFrom(
- keystore_decryptor_token);
- other_cryptographer.GetKeys(nigori.mutable_encryption_keybag());
- nigori.set_keybag_is_frozen(true);
- nigori.set_keystore_migration_time(1);
- nigori.set_passphrase_type(sync_pb::NigoriSpecifics::KEYSTORE_PASSPHRASE);
-
- EXPECT_CALL(*observer(), OnPassphraseAccepted());
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(KEYSTORE_PASSPHRASE, _));
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- encryption_handler()->SetKeystoreKeys(BuildEncryptionKeyProto(
- kRawKeystoreKey),
- trans.GetWrappedTrans());
- encryption_handler()->ApplyNigoriUpdate(nigori, trans.GetWrappedTrans());
- nigori_node.SetNigoriSpecifics(nigori);
- }
- // Run any tasks posted via AppplyNigoriUpdate.
- PumpLoop();
- Mock::VerifyAndClearExpectations(observer());
-
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_TRUE(GetCryptographer()->is_ready());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), KEYSTORE_PASSPHRASE);
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
- VerifyMigratedNigoriWithTimestamp(1, KEYSTORE_PASSPHRASE, kCurKey);
-
- // Check that the cryptographer still encrypts with the current key.
- sync_pb::EncryptedData current_encrypted;
- other_cryptographer.EncryptString("string", &current_encrypted);
- EXPECT_TRUE(GetCryptographer()->CanDecryptUsingDefaultKey(current_encrypted));
-
- // Check that the cryptographer can decrypt keystore key based encryption.
- Cryptographer keystore_cryptographer(GetCryptographer()->encryptor());
- KeyParams keystore_key = {"localhost", "dummy", kKeystoreKey};
- keystore_cryptographer.AddKey(keystore_key);
- sync_pb::EncryptedData keystore_encrypted;
- keystore_cryptographer.EncryptString("string", &keystore_encrypted);
- EXPECT_TRUE(GetCryptographer()->CanDecrypt(keystore_encrypted));
-}
-
-// Test that we handle receiving migrated nigori's with
-// FROZEN_IMPLICIT_PASSPHRASE state. We should be in a pending key state until
-// we supply the pending frozen implicit passphrase key.
-TEST_F(SyncEncryptionHandlerImplTest, ReceiveMigratedNigoriFrozenImplicitPass) {
- const char kCurKey[] = "cur";
- sync_pb::EncryptedData encrypted;
- Cryptographer other_cryptographer(GetCryptographer()->encryptor());
- KeyParams cur_key = {"localhost", "dummy", kCurKey};
- other_cryptographer.AddKey(cur_key);
- EXPECT_FALSE(encryption_handler()->MigratedToKeystore());
-
- {
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- ReadTransaction trans(FROM_HERE, user_share());
- encryption_handler()->SetKeystoreKeys(BuildEncryptionKeyProto(
- kRawKeystoreKey),
- trans.GetWrappedTrans());
- }
- EXPECT_FALSE(encryption_handler()->MigratedToKeystore());
-
- {
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(FROZEN_IMPLICIT_PASSPHRASE, _));
- EXPECT_CALL(*observer(),
- OnPassphraseRequired(_, _));
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, true));
- WriteTransaction trans(FROM_HERE, user_share());
- WriteNode nigori_node(&trans);
- ASSERT_EQ(nigori_node.InitByTagLookup(kNigoriTag), BaseNode::INIT_OK);
- sync_pb::NigoriSpecifics nigori;
- nigori.set_keybag_is_frozen(true);
- nigori.set_passphrase_type(
- sync_pb::NigoriSpecifics::FROZEN_IMPLICIT_PASSPHRASE);
- nigori.set_keystore_migration_time(1);
- nigori.set_encrypt_everything(true);
- other_cryptographer.GetKeys(nigori.mutable_encryption_keybag());
- encryption_handler()->ApplyNigoriUpdate(nigori, trans.GetWrappedTrans());
- nigori_node.SetNigoriSpecifics(nigori);
- }
- // Run any tasks posted via AppplyNigoriUpdate.
- PumpLoop();
- Mock::VerifyAndClearExpectations(observer());
-
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_EQ(FROZEN_IMPLICIT_PASSPHRASE,
- encryption_handler()->GetPassphraseType());
- EXPECT_TRUE(GetCryptographer()->has_pending_keys());
- EXPECT_TRUE(encryption_handler()->EncryptEverythingEnabled());
-
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnEncryptionComplete());
- EXPECT_CALL(*observer(),
- OnPassphraseAccepted());
- encryption_handler()->SetDecryptionPassphrase(kCurKey);
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_TRUE(GetCryptographer()->is_ready());
- VerifyMigratedNigoriWithTimestamp(1, FROZEN_IMPLICIT_PASSPHRASE, kCurKey);
-
- // Check that the cryptographer still encrypts with the current key.
- sync_pb::EncryptedData current_encrypted;
- other_cryptographer.EncryptString("string", &current_encrypted);
- EXPECT_TRUE(GetCryptographer()->CanDecryptUsingDefaultKey(current_encrypted));
-
- // Check that the cryptographer can decrypt keystore key based encryption.
- Cryptographer keystore_cryptographer(GetCryptographer()->encryptor());
- KeyParams keystore_key = {"localhost", "dummy", kKeystoreKey};
- keystore_cryptographer.AddKey(keystore_key);
- sync_pb::EncryptedData keystore_encrypted;
- keystore_cryptographer.EncryptString("string", &keystore_encrypted);
- EXPECT_TRUE(GetCryptographer()->CanDecrypt(keystore_encrypted));
-}
-
-// Test that we handle receiving migrated nigori's with
-// CUSTOM_PASSPHRASE state. We should be in a pending key state until we
-// provide the custom passphrase key.
-TEST_F(SyncEncryptionHandlerImplTest, ReceiveMigratedNigoriCustomPass) {
- const char kCurKey[] = "cur";
- sync_pb::EncryptedData encrypted;
- Cryptographer other_cryptographer(GetCryptographer()->encryptor());
- KeyParams cur_key = {"localhost", "dummy", kCurKey};
- other_cryptographer.AddKey(cur_key);
- EXPECT_FALSE(encryption_handler()->MigratedToKeystore());
-
- {
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- ReadTransaction trans(FROM_HERE, user_share());
- encryption_handler()->SetKeystoreKeys(BuildEncryptionKeyProto(
- kRawKeystoreKey),
- trans.GetWrappedTrans());
- }
- EXPECT_FALSE(encryption_handler()->MigratedToKeystore());
-
- {
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(CUSTOM_PASSPHRASE, _));
- EXPECT_CALL(*observer(),
- OnPassphraseRequired(_, _));
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, true));
- WriteTransaction trans(FROM_HERE, user_share());
- WriteNode nigori_node(&trans);
- ASSERT_EQ(nigori_node.InitByTagLookup(kNigoriTag), BaseNode::INIT_OK);
- sync_pb::NigoriSpecifics nigori;
- nigori.set_keybag_is_frozen(true);
- nigori.set_passphrase_type(sync_pb::NigoriSpecifics::CUSTOM_PASSPHRASE);
- nigori.set_keystore_migration_time(1);
- nigori.set_encrypt_everything(true);
- other_cryptographer.GetKeys(nigori.mutable_encryption_keybag());
- encryption_handler()->ApplyNigoriUpdate(nigori, trans.GetWrappedTrans());
- nigori_node.SetNigoriSpecifics(nigori);
- }
- // Run any tasks posted via AppplyNigoriUpdate.
- PumpLoop();
- Mock::VerifyAndClearExpectations(observer());
-
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_EQ(CUSTOM_PASSPHRASE, encryption_handler()->GetPassphraseType());
- EXPECT_TRUE(GetCryptographer()->has_pending_keys());
- EXPECT_TRUE(encryption_handler()->EncryptEverythingEnabled());
-
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnEncryptionComplete());
- EXPECT_CALL(*observer(),
- OnPassphraseAccepted());
- encryption_handler()->SetDecryptionPassphrase(kCurKey);
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_TRUE(GetCryptographer()->is_ready());
- VerifyMigratedNigoriWithTimestamp(1, CUSTOM_PASSPHRASE, kCurKey);
-
- // Check that the cryptographer still encrypts with the current key.
- sync_pb::EncryptedData current_encrypted;
- other_cryptographer.EncryptString("string", &current_encrypted);
- EXPECT_TRUE(GetCryptographer()->CanDecryptUsingDefaultKey(current_encrypted));
-
- // Check that the cryptographer can decrypt keystore key based encryption.
- Cryptographer keystore_cryptographer(GetCryptographer()->encryptor());
- KeyParams keystore_key = {"localhost", "dummy", kKeystoreKey};
- keystore_cryptographer.AddKey(keystore_key);
- sync_pb::EncryptedData keystore_encrypted;
- keystore_cryptographer.EncryptString("string", &keystore_encrypted);
- EXPECT_TRUE(GetCryptographer()->CanDecrypt(keystore_encrypted));
-}
-
-// Test that if we have a migrated nigori with a custom passphrase, then receive
-// and old implicit passphrase nigori, we properly overwrite it with the current
-// state.
-TEST_F(SyncEncryptionHandlerImplTest, ReceiveUnmigratedNigoriAfterMigration) {
- const char kOldKey[] = "old";
- const char kCurKey[] = "cur";
- sync_pb::EncryptedData encrypted;
- KeyParams old_key = {"localhost", "dummy", kOldKey};
- KeyParams cur_key = {"localhost", "dummy", kCurKey};
- GetCryptographer()->AddKey(old_key);
- GetCryptographer()->AddKey(cur_key);
-
- // Build a migrated nigori with full encryption.
- {
- WriteTransaction trans(FROM_HERE, user_share());
- WriteNode nigori_node(&trans);
- ASSERT_EQ(nigori_node.InitByTagLookup(kNigoriTag), BaseNode::INIT_OK);
- sync_pb::NigoriSpecifics nigori;
- GetCryptographer()->GetKeys(nigori.mutable_encryption_keybag());
- nigori.set_keybag_is_frozen(true);
- nigori.set_keystore_migration_time(1);
- nigori.set_passphrase_type(sync_pb::NigoriSpecifics::CUSTOM_PASSPHRASE);
- nigori.set_encrypt_everything(true);
- nigori_node.SetNigoriSpecifics(nigori);
- }
-
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(CUSTOM_PASSPHRASE, _));
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, true)).Times(2);
- EXPECT_CALL(*observer(),
- OnEncryptionComplete());
- encryption_handler()->Init();
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_TRUE(GetCryptographer()->is_ready());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), CUSTOM_PASSPHRASE);
- EXPECT_TRUE(encryption_handler()->EncryptEverythingEnabled());
- VerifyMigratedNigoriWithTimestamp(1, CUSTOM_PASSPHRASE, kCurKey);
-
- {
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- ReadTransaction trans(FROM_HERE, user_share());
- encryption_handler()->SetKeystoreKeys(BuildEncryptionKeyProto(
- kRawKeystoreKey),
- trans.GetWrappedTrans());
- }
- Mock::VerifyAndClearExpectations(observer());
-
- // Now build an old unmigrated nigori node with old encrypted types. We should
- // properly overwrite it with the migrated + encrypt everything state.
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(), OnEncryptionComplete());
- {
- Cryptographer other_cryptographer(GetCryptographer()->encryptor());
- other_cryptographer.AddKey(old_key);
- WriteTransaction trans(FROM_HERE, user_share());
- WriteNode nigori_node(&trans);
- ASSERT_EQ(nigori_node.InitByTagLookup(kNigoriTag), BaseNode::INIT_OK);
- sync_pb::NigoriSpecifics nigori;
- other_cryptographer.GetKeys(nigori.mutable_encryption_keybag());
- nigori.set_keybag_is_frozen(false);
- nigori.set_encrypt_everything(false);
- encryption_handler()->ApplyNigoriUpdate(nigori, trans.GetWrappedTrans());
- nigori_node.SetNigoriSpecifics(nigori);
- }
- PumpLoop();
-
- // Verify we're still migrated and have proper encryption state.
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_TRUE(GetCryptographer()->is_ready());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), CUSTOM_PASSPHRASE);
- EXPECT_TRUE(encryption_handler()->EncryptEverythingEnabled());
- VerifyMigratedNigoriWithTimestamp(1, CUSTOM_PASSPHRASE, kCurKey);
-}
-
-// Test that if we have a migrated nigori with a custom passphrase, then receive
-// a migrated nigori with a keystore passphrase, we properly overwrite it with
-// the current state.
-TEST_F(SyncEncryptionHandlerImplTest, ReceiveOldMigratedNigori) {
- const char kOldKey[] = "old";
- const char kCurKey[] = "cur";
- sync_pb::EncryptedData encrypted;
- KeyParams old_key = {"localhost", "dummy", kOldKey};
- KeyParams cur_key = {"localhost", "dummy", kCurKey};
- GetCryptographer()->AddKey(old_key);
- GetCryptographer()->AddKey(cur_key);
-
- // Build a migrated nigori with full encryption.
- {
- WriteTransaction trans(FROM_HERE, user_share());
- WriteNode nigori_node(&trans);
- ASSERT_EQ(nigori_node.InitByTagLookup(kNigoriTag), BaseNode::INIT_OK);
- sync_pb::NigoriSpecifics nigori;
- GetCryptographer()->GetKeys(nigori.mutable_encryption_keybag());
- nigori.set_keybag_is_frozen(true);
- nigori.set_keystore_migration_time(1);
- nigori.set_passphrase_type(sync_pb::NigoriSpecifics::CUSTOM_PASSPHRASE);
- nigori.set_encrypt_everything(true);
- nigori_node.SetNigoriSpecifics(nigori);
- }
-
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(CUSTOM_PASSPHRASE, _));
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, true)).Times(2);
- EXPECT_CALL(*observer(),
- OnEncryptionComplete());
- encryption_handler()->Init();
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_TRUE(GetCryptographer()->is_ready());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), CUSTOM_PASSPHRASE);
- EXPECT_TRUE(encryption_handler()->EncryptEverythingEnabled());
- VerifyMigratedNigoriWithTimestamp(1, CUSTOM_PASSPHRASE, kCurKey);
-
- {
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- ReadTransaction trans(FROM_HERE, user_share());
- encryption_handler()->SetKeystoreKeys(BuildEncryptionKeyProto(
- kRawKeystoreKey),
- trans.GetWrappedTrans());
- }
- Mock::VerifyAndClearExpectations(observer());
-
- // Now build an old keystore nigori node with old encrypted types. We should
- // properly overwrite it with the migrated + encrypt everything state.
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(), OnEncryptionComplete());
- {
- WriteTransaction trans(FROM_HERE, user_share());
- WriteNode nigori_node(&trans);
- ASSERT_EQ(nigori_node.InitByTagLookup(kNigoriTag), BaseNode::INIT_OK);
- sync_pb::NigoriSpecifics nigori;
- Cryptographer other_cryptographer(GetCryptographer()->encryptor());
- other_cryptographer.AddKey(old_key);
- encryption_handler()->GetKeystoreDecryptor(
- other_cryptographer,
- kKeystoreKey,
- nigori.mutable_keystore_decryptor_token());
- other_cryptographer.GetKeys(nigori.mutable_encryption_keybag());
- nigori.set_keybag_is_frozen(true);
- nigori.set_encrypt_everything(false);
- nigori.set_passphrase_type(sync_pb::NigoriSpecifics::KEYSTORE_PASSPHRASE);
- nigori.set_keystore_migration_time(1);
- encryption_handler()->ApplyNigoriUpdate(nigori, trans.GetWrappedTrans());
- nigori_node.SetNigoriSpecifics(nigori);
- }
- PumpLoop();
-
- // Verify we're still migrated and have proper encryption state.
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_TRUE(GetCryptographer()->is_ready());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), CUSTOM_PASSPHRASE);
- EXPECT_TRUE(encryption_handler()->EncryptEverythingEnabled());
- VerifyMigratedNigoriWithTimestamp(1, CUSTOM_PASSPHRASE, kCurKey);
-}
-
-// Test that if we receive the keystore key after receiving a migrated nigori
-// node, we properly use the keystore decryptor token to decrypt the keybag.
-TEST_F(SyncEncryptionHandlerImplTest, SetKeystoreAfterReceivingMigratedNigori) {
- const char kCurKey[] = "cur";
- sync_pb::EncryptedData keystore_decryptor_token;
- Cryptographer other_cryptographer(GetCryptographer()->encryptor());
- KeyParams cur_key = {"localhost", "dummy", kCurKey};
- other_cryptographer.AddKey(cur_key);
- EXPECT_TRUE(other_cryptographer.is_ready());
- EXPECT_TRUE(encryption_handler()->GetKeystoreDecryptor(
- other_cryptographer,
- kKeystoreKey,
- &keystore_decryptor_token));
- EXPECT_FALSE(encryption_handler()->MigratedToKeystore());
- EXPECT_FALSE(GetCryptographer()->is_ready());
- EXPECT_NE(encryption_handler()->GetPassphraseType(), KEYSTORE_PASSPHRASE);
-
- // Now build a nigori node with the generated keystore decryptor token and
- // initialize the encryption handler with it. The cryptographer should be
- // initialized properly to decrypt both kCurKey and kKeystoreKey.
- {
- WriteTransaction trans(FROM_HERE, user_share());
- WriteNode nigori_node(&trans);
- ASSERT_EQ(nigori_node.InitByTagLookup(kNigoriTag), BaseNode::INIT_OK);
- sync_pb::NigoriSpecifics nigori;
- nigori.mutable_keystore_decryptor_token()->CopyFrom(
- keystore_decryptor_token);
- other_cryptographer.GetKeys(nigori.mutable_encryption_keybag());
- nigori.set_keybag_is_frozen(true);
- nigori.set_keystore_migration_time(1);
- nigori.set_passphrase_type(sync_pb::NigoriSpecifics::KEYSTORE_PASSPHRASE);
-
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(KEYSTORE_PASSPHRASE, _));
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnPassphraseRequired(_, _));
- encryption_handler()->ApplyNigoriUpdate(nigori, trans.GetWrappedTrans());
- nigori_node.SetNigoriSpecifics(nigori);
- }
- // Run any tasks posted via AppplyNigoriUpdate.
- PumpLoop();
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_TRUE(GetCryptographer()->has_pending_keys());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), KEYSTORE_PASSPHRASE);
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
- Mock::VerifyAndClearExpectations(observer());
-
- EXPECT_CALL(*observer(), OnPassphraseAccepted());
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- {
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- ReadTransaction trans(FROM_HERE, user_share());
- encryption_handler()->SetKeystoreKeys(BuildEncryptionKeyProto(
- kRawKeystoreKey),
- trans.GetWrappedTrans());
- }
- PumpLoop();
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_TRUE(GetCryptographer()->is_ready());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), KEYSTORE_PASSPHRASE);
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
- VerifyMigratedNigoriWithTimestamp(1, KEYSTORE_PASSPHRASE, kCurKey);
-
- // Check that the cryptographer still encrypts with the current key.
- sync_pb::EncryptedData current_encrypted;
- other_cryptographer.EncryptString("string", &current_encrypted);
- EXPECT_TRUE(GetCryptographer()->CanDecryptUsingDefaultKey(current_encrypted));
-
- // Check that the cryptographer can decrypt keystore key based encryption.
- Cryptographer keystore_cryptographer(GetCryptographer()->encryptor());
- KeyParams keystore_key = {"localhost", "dummy", kKeystoreKey};
- keystore_cryptographer.AddKey(keystore_key);
- sync_pb::EncryptedData keystore_encrypted;
- keystore_cryptographer.EncryptString("string", &keystore_encrypted);
- EXPECT_TRUE(GetCryptographer()->CanDecrypt(keystore_encrypted));
-}
-
-// Test that after receiving a migrated nigori and decrypting it using the
-// keystore key, we can then switch to a custom passphrase. The nigori should
-// remain migrated and encrypt everything should be enabled.
-TEST_F(SyncEncryptionHandlerImplTest, SetCustomPassAfterMigration) {
- const char kOldKey[] = "old";
- sync_pb::EncryptedData keystore_decryptor_token;
- Cryptographer other_cryptographer(GetCryptographer()->encryptor());
- KeyParams cur_key = {"localhost", "dummy", kOldKey};
- other_cryptographer.AddKey(cur_key);
- EXPECT_TRUE(other_cryptographer.is_ready());
- EXPECT_TRUE(encryption_handler()->GetKeystoreDecryptor(
- other_cryptographer,
- kKeystoreKey,
- &keystore_decryptor_token));
-
- // Build a nigori node with the generated keystore decryptor token and
- // initialize the encryption handler with it. The cryptographer should be
- // initialized properly to decrypt both kOldKey and kKeystoreKey.
- {
- WriteTransaction trans(FROM_HERE, user_share());
- WriteNode nigori_node(&trans);
- ASSERT_EQ(nigori_node.InitByTagLookup(kNigoriTag), BaseNode::INIT_OK);
- sync_pb::NigoriSpecifics nigori;
- nigori.mutable_keystore_decryptor_token()->CopyFrom(
- keystore_decryptor_token);
- other_cryptographer.GetKeys(nigori.mutable_encryption_keybag());
- nigori.set_keybag_is_frozen(true);
- nigori.set_keystore_migration_time(1);
- nigori.set_passphrase_type(sync_pb::NigoriSpecifics::KEYSTORE_PASSPHRASE);
- nigori_node.SetNigoriSpecifics(nigori);
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- encryption_handler()->SetKeystoreKeys(BuildEncryptionKeyProto(
- kRawKeystoreKey),
- trans.GetWrappedTrans());
- }
-
- EXPECT_CALL(*observer(), OnPassphraseAccepted());
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(KEYSTORE_PASSPHRASE, _));
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, false));
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete());
- encryption_handler()->Init();
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_TRUE(GetCryptographer()->is_ready());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), KEYSTORE_PASSPHRASE);
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
- Mock::VerifyAndClearExpectations(observer());
-
- const char kNewKey[] = "new_key";
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(CUSTOM_PASSPHRASE, _));
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(*observer(),
- OnPassphraseAccepted());
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, true));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete()).Times(2);
- encryption_handler()->SetEncryptionPassphrase(kNewKey, true);
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_TRUE(GetCryptographer()->is_ready());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), CUSTOM_PASSPHRASE);
- EXPECT_TRUE(encryption_handler()->EncryptEverythingEnabled());
- EXPECT_FALSE(encryption_handler()->custom_passphrase_time().is_null());
- VerifyMigratedNigoriWithTimestamp(1, CUSTOM_PASSPHRASE, kNewKey);
-
- // Check that the cryptographer can decrypt the old key.
- sync_pb::EncryptedData old_encrypted;
- other_cryptographer.EncryptString("string", &old_encrypted);
- EXPECT_TRUE(GetCryptographer()->CanDecrypt(old_encrypted));
-
- // Check that the cryptographer can decrypt keystore key based encryption.
- Cryptographer keystore_cryptographer(GetCryptographer()->encryptor());
- KeyParams keystore_key = {"localhost", "dummy", kKeystoreKey};
- keystore_cryptographer.AddKey(keystore_key);
- sync_pb::EncryptedData keystore_encrypted;
- keystore_cryptographer.EncryptString("string", &keystore_encrypted);
- EXPECT_TRUE(GetCryptographer()->CanDecrypt(keystore_encrypted));
-
- // Check the the cryptographer is encrypting with the new key.
- KeyParams new_key = {"localhost", "dummy", kNewKey};
- Cryptographer new_cryptographer(GetCryptographer()->encryptor());
- new_cryptographer.AddKey(new_key);
- sync_pb::EncryptedData new_encrypted;
- new_cryptographer.EncryptString("string", &new_encrypted);
- EXPECT_TRUE(GetCryptographer()->CanDecryptUsingDefaultKey(new_encrypted));
-}
-
-// Test that if a client without a keystore key (e.g. one without keystore
-// encryption enabled) receives a migrated nigori and then attempts to set a
-// custom passphrase, it also enables encrypt everything. The nigori node
-// should remain migrated.
-TEST_F(SyncEncryptionHandlerImplTest,
- SetCustomPassAfterMigrationNoKeystoreKey) {
- const char kOldKey[] = "old";
- sync_pb::EncryptedData keystore_decryptor_token;
- Cryptographer other_cryptographer(GetCryptographer()->encryptor());
- KeyParams cur_key = {"localhost", "dummy", kOldKey};
- other_cryptographer.AddKey(cur_key);
- KeyParams keystore_key = {"localhost", "dummy", kKeystoreKey};
- other_cryptographer.AddNonDefaultKey(keystore_key);
- EXPECT_TRUE(other_cryptographer.is_ready());
- EXPECT_TRUE(encryption_handler()->GetKeystoreDecryptor(
- other_cryptographer,
- kKeystoreKey,
- &keystore_decryptor_token));
-
- // Build a nigori node with the generated keystore decryptor token and
- // initialize the encryption handler with it. The cryptographer will have
- // pending keys until we provide the decryption passphrase.
- {
- WriteTransaction trans(FROM_HERE, user_share());
- WriteNode nigori_node(&trans);
- ASSERT_EQ(nigori_node.InitByTagLookup(kNigoriTag), BaseNode::INIT_OK);
- sync_pb::NigoriSpecifics nigori;
- nigori.mutable_keystore_decryptor_token()->CopyFrom(
- keystore_decryptor_token);
- other_cryptographer.GetKeys(nigori.mutable_encryption_keybag());
- nigori.set_keybag_is_frozen(true);
- nigori.set_keystore_migration_time(1);
- nigori.set_passphrase_type(sync_pb::NigoriSpecifics::KEYSTORE_PASSPHRASE);
- nigori_node.SetNigoriSpecifics(nigori);
- }
-
- EXPECT_CALL(*observer(),
- OnPassphraseRequired(_, _));
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(KEYSTORE_PASSPHRASE, _));
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, false));
- encryption_handler()->Init();
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_TRUE(GetCryptographer()->has_pending_keys());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), KEYSTORE_PASSPHRASE);
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
- Mock::VerifyAndClearExpectations(observer());
-
- EXPECT_CALL(*observer(),
- OnPassphraseAccepted());
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete());
- encryption_handler()->SetDecryptionPassphrase(kOldKey);
- EXPECT_TRUE(GetCryptographer()->is_ready());
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
- Mock::VerifyAndClearExpectations(observer());
-
- const char kNewKey[] = "new_key";
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(CUSTOM_PASSPHRASE, _));
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(*observer(),
- OnPassphraseAccepted());
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, true));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete()).Times(2);
- encryption_handler()->SetEncryptionPassphrase(kNewKey, true);
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_TRUE(GetCryptographer()->is_ready());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), CUSTOM_PASSPHRASE);
- EXPECT_TRUE(encryption_handler()->EncryptEverythingEnabled());
- EXPECT_FALSE(encryption_handler()->custom_passphrase_time().is_null());
- VerifyMigratedNigoriWithTimestamp(1, CUSTOM_PASSPHRASE, kNewKey);
-
- // Check that the cryptographer can decrypt the old key.
- sync_pb::EncryptedData old_encrypted;
- other_cryptographer.EncryptString("string", &old_encrypted);
- EXPECT_TRUE(GetCryptographer()->CanDecrypt(old_encrypted));
-
- // Check that the cryptographer can still decrypt keystore key based
- // encryption (should have been extracted from the encryption keybag).
- Cryptographer keystore_cryptographer(GetCryptographer()->encryptor());
- keystore_cryptographer.AddKey(keystore_key);
- sync_pb::EncryptedData keystore_encrypted;
- keystore_cryptographer.EncryptString("string", &keystore_encrypted);
- EXPECT_TRUE(GetCryptographer()->CanDecrypt(keystore_encrypted));
-
- // Check the the cryptographer is encrypting with the new key.
- KeyParams new_key = {"localhost", "dummy", kNewKey};
- Cryptographer new_cryptographer(GetCryptographer()->encryptor());
- new_cryptographer.AddKey(new_key);
- sync_pb::EncryptedData new_encrypted;
- new_cryptographer.EncryptString("string", &new_encrypted);
- EXPECT_TRUE(GetCryptographer()->CanDecryptUsingDefaultKey(new_encrypted));
-}
-
-// Test that if a client without a keystore key (e.g. one without keystore
-// encryption enabled) receives a migrated nigori and then attempts to set a
-// new implicit passphrase, we do not modify the nigori node (the implicit
-// passphrase is dropped).
-TEST_F(SyncEncryptionHandlerImplTest,
- SetImplicitPassAfterMigrationNoKeystoreKey) {
- const char kOldKey[] = "old";
- sync_pb::EncryptedData keystore_decryptor_token;
- Cryptographer other_cryptographer(GetCryptographer()->encryptor());
- KeyParams cur_key = {"localhost", "dummy", kOldKey};
- other_cryptographer.AddKey(cur_key);
- KeyParams keystore_key = {"localhost", "dummy", kKeystoreKey};
- other_cryptographer.AddNonDefaultKey(keystore_key);
- EXPECT_TRUE(other_cryptographer.is_ready());
- EXPECT_TRUE(encryption_handler()->GetKeystoreDecryptor(
- other_cryptographer,
- kKeystoreKey,
- &keystore_decryptor_token));
-
- // Build a nigori node with the generated keystore decryptor token and
- // initialize the encryption handler with it. The cryptographer will have
- // pending keys until we provide the decryption passphrase.
- {
- WriteTransaction trans(FROM_HERE, user_share());
- WriteNode nigori_node(&trans);
- ASSERT_EQ(nigori_node.InitByTagLookup(kNigoriTag), BaseNode::INIT_OK);
- sync_pb::NigoriSpecifics nigori;
- nigori.mutable_keystore_decryptor_token()->CopyFrom(
- keystore_decryptor_token);
- other_cryptographer.GetKeys(nigori.mutable_encryption_keybag());
- nigori.set_keybag_is_frozen(true);
- nigori.set_keystore_migration_time(1);
- nigori.set_passphrase_type(sync_pb::NigoriSpecifics::KEYSTORE_PASSPHRASE);
- nigori_node.SetNigoriSpecifics(nigori);
- }
-
- EXPECT_CALL(*observer(),
- OnPassphraseRequired(_, _));
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(KEYSTORE_PASSPHRASE, _));
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, false));
- encryption_handler()->Init();
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_TRUE(GetCryptographer()->has_pending_keys());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), KEYSTORE_PASSPHRASE);
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
- Mock::VerifyAndClearExpectations(observer());
-
- EXPECT_CALL(*observer(),
- OnPassphraseAccepted());
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete());
- encryption_handler()->SetDecryptionPassphrase(kOldKey);
- EXPECT_TRUE(GetCryptographer()->is_ready());
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
- Mock::VerifyAndClearExpectations(observer());
-
- // Should get dropped on the floor silently.
- const char kNewKey[] = "new_key";
- encryption_handler()->SetEncryptionPassphrase(kNewKey, false);
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_TRUE(GetCryptographer()->is_ready());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), KEYSTORE_PASSPHRASE);
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
- VerifyMigratedNigoriWithTimestamp(1, KEYSTORE_PASSPHRASE, kOldKey);
-
- // Check that the cryptographer can decrypt the old key.
- sync_pb::EncryptedData old_encrypted;
- other_cryptographer.EncryptString("string", &old_encrypted);
- EXPECT_TRUE(GetCryptographer()->CanDecryptUsingDefaultKey(old_encrypted));
-
- // Check that the cryptographer can still decrypt keystore key based
- // encryption (due to extracting the keystore key from the encryption keybag).
- Cryptographer keystore_cryptographer(GetCryptographer()->encryptor());
- keystore_cryptographer.AddKey(keystore_key);
- sync_pb::EncryptedData keystore_encrypted;
- keystore_cryptographer.EncryptString("string", &keystore_encrypted);
- EXPECT_TRUE(GetCryptographer()->CanDecrypt(keystore_encrypted));
-
- // Check the the cryptographer does not have the new key.
- KeyParams new_key = {"localhost", "dummy", kNewKey};
- Cryptographer new_cryptographer(GetCryptographer()->encryptor());
- new_cryptographer.AddKey(new_key);
- sync_pb::EncryptedData new_encrypted;
- new_cryptographer.EncryptString("string", &new_encrypted);
- EXPECT_FALSE(GetCryptographer()->CanDecryptUsingDefaultKey(new_encrypted));
-}
-
-// Test that if a client without a keystore key (e.g. one without keystore
-// encryption enabled) receives a migrated nigori in keystore passphrase state
-// and then attempts to enable encrypt everything, we switch to a custom
-// passphrase. The nigori should remain migrated.
-TEST_F(SyncEncryptionHandlerImplTest,
- MigrateOnEncryptEverythingKeystorePassphrase) {
- const char kCurKey[] = "cur";
- sync_pb::EncryptedData keystore_decryptor_token;
- Cryptographer other_cryptographer(GetCryptographer()->encryptor());
- KeyParams cur_key = {"localhost", "dummy", kCurKey};
- other_cryptographer.AddKey(cur_key);
- KeyParams keystore_key = {"localhost", "dummy", kKeystoreKey};
- other_cryptographer.AddNonDefaultKey(keystore_key);
- EXPECT_TRUE(other_cryptographer.is_ready());
- EXPECT_TRUE(encryption_handler()->GetKeystoreDecryptor(
- other_cryptographer,
- kKeystoreKey,
- &keystore_decryptor_token));
-
- // Build a nigori node with the generated keystore decryptor token and
- // initialize the encryption handler with it. The cryptographer will have
- // pending keys until we provide the decryption passphrase.
- {
- WriteTransaction trans(FROM_HERE, user_share());
- WriteNode nigori_node(&trans);
- ASSERT_EQ(nigori_node.InitByTagLookup(kNigoriTag), BaseNode::INIT_OK);
- sync_pb::NigoriSpecifics nigori;
- nigori.mutable_keystore_decryptor_token()->CopyFrom(
- keystore_decryptor_token);
- other_cryptographer.GetKeys(nigori.mutable_encryption_keybag());
- nigori.set_keybag_is_frozen(true);
- nigori.set_keystore_migration_time(1);
- nigori.set_passphrase_type(sync_pb::NigoriSpecifics::KEYSTORE_PASSPHRASE);
- nigori_node.SetNigoriSpecifics(nigori);
- }
- EXPECT_CALL(*observer(),
- OnPassphraseRequired(_, _));
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(KEYSTORE_PASSPHRASE, _));
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, false));
- encryption_handler()->Init();
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_TRUE(GetCryptographer()->has_pending_keys());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), KEYSTORE_PASSPHRASE);
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
- Mock::VerifyAndClearExpectations(observer());
-
- EXPECT_CALL(*observer(),
- OnPassphraseAccepted());
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete());
- encryption_handler()->SetDecryptionPassphrase(kCurKey);
- Mock::VerifyAndClearExpectations(observer());
-
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(FROZEN_IMPLICIT_PASSPHRASE, _));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete());
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, true));
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- encryption_handler()->EnableEncryptEverything();
- Mock::VerifyAndClearExpectations(observer());
-
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_TRUE(GetCryptographer()->is_ready());
- EXPECT_EQ(FROZEN_IMPLICIT_PASSPHRASE,
- encryption_handler()->GetPassphraseType());
- EXPECT_TRUE(encryption_handler()->EncryptEverythingEnabled());
- VerifyMigratedNigoriWithTimestamp(1, FROZEN_IMPLICIT_PASSPHRASE, kCurKey);
-
- // Check that the cryptographer is encrypting using the frozen current key.
- sync_pb::EncryptedData current_encrypted;
- other_cryptographer.EncryptString("string", &current_encrypted);
- EXPECT_TRUE(GetCryptographer()->CanDecryptUsingDefaultKey(current_encrypted));
-
- // Check that the cryptographer can still decrypt keystore key based
- // encryption (due to extracting the keystore key from the encryption keybag).
- Cryptographer keystore_cryptographer(GetCryptographer()->encryptor());
- keystore_cryptographer.AddKey(keystore_key);
- sync_pb::EncryptedData keystore_encrypted;
- keystore_cryptographer.EncryptString("string", &keystore_encrypted);
- EXPECT_TRUE(GetCryptographer()->CanDecrypt(keystore_encrypted));
-}
-
-// If we receive a nigori migrated and with a KEYSTORE_PASSPHRASE type, but
-// using an old default key (i.e. old GAIA password), we should overwrite the
-// nigori, updating the keybag and keystore decryptor.
-TEST_F(SyncEncryptionHandlerImplTest,
- ReceiveMigratedNigoriWithOldPassphrase) {
- const char kOldKey[] = "old";
- const char kCurKey[] = "cur";
- sync_pb::EncryptedData encrypted;
- KeyParams old_key = {"localhost", "dummy", kOldKey};
- KeyParams cur_key = {"localhost", "dummy", kCurKey};
- GetCryptographer()->AddKey(old_key);
- GetCryptographer()->AddKey(cur_key);
-
- Cryptographer other_cryptographer(GetCryptographer()->encryptor());
- other_cryptographer.AddKey(old_key);
- EXPECT_TRUE(other_cryptographer.is_ready());
-
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, false));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete());
- encryption_handler()->Init();
- EXPECT_TRUE(GetCryptographer()->is_ready());
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
-
- {
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- ReadTransaction trans(FROM_HERE, user_share());
- encryption_handler()->SetKeystoreKeys(BuildEncryptionKeyProto(
- kRawKeystoreKey),
- trans.GetWrappedTrans());
- }
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(KEYSTORE_PASSPHRASE, _));
- PumpLoop();
- Mock::VerifyAndClearExpectations(observer());
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), KEYSTORE_PASSPHRASE);
- VerifyMigratedNigori(KEYSTORE_PASSPHRASE, kCurKey);
-
- // Now build an old keystore passphrase nigori node.
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(), OnEncryptionComplete());
- {
- WriteTransaction trans(FROM_HERE, user_share());
- WriteNode nigori_node(&trans);
- ASSERT_EQ(nigori_node.InitByTagLookup(kNigoriTag), BaseNode::INIT_OK);
- sync_pb::NigoriSpecifics nigori;
- Cryptographer other_cryptographer(GetCryptographer()->encryptor());
- other_cryptographer.AddKey(old_key);
- encryption_handler()->GetKeystoreDecryptor(
- other_cryptographer,
- kKeystoreKey,
- nigori.mutable_keystore_decryptor_token());
- other_cryptographer.GetKeys(nigori.mutable_encryption_keybag());
- nigori.set_keybag_is_frozen(true);
- nigori.set_encrypt_everything(false);
- nigori.set_passphrase_type(sync_pb::NigoriSpecifics::KEYSTORE_PASSPHRASE);
- nigori.set_keystore_migration_time(1);
- encryption_handler()->ApplyNigoriUpdate(nigori, trans.GetWrappedTrans());
- nigori_node.SetNigoriSpecifics(nigori);
- }
- PumpLoop();
-
- // Verify we're still migrated and have proper encryption state.
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_TRUE(GetCryptographer()->is_ready());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), KEYSTORE_PASSPHRASE);
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
- VerifyMigratedNigori(KEYSTORE_PASSPHRASE, kCurKey);
-}
-
-// Trigger a key rotation upon receiving new keys if we already had a keystore
-// migrated nigori with the gaia key as the default (still in backwards
-// compatible mode).
-TEST_F(SyncEncryptionHandlerImplTest, RotateKeysGaiaDefault) {
- // Destroy the existing nigori node so we init without a nigori node.
- TearDown();
- test_user_share_.SetUp();
- SetUpEncryption();
-
- const char kOldGaiaKey[] = "old_gaia_key";
- const char kRawOldKeystoreKey[] = "old_keystore_key";
- std::string old_keystore_key;
- base::Base64Encode(kRawOldKeystoreKey, &old_keystore_key);
- {
- ReadTransaction trans(FROM_HERE, user_share());
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- encryption_handler()->SetKeystoreKeys(BuildEncryptionKeyProto(
- kRawOldKeystoreKey),
- trans.GetWrappedTrans());
- }
- PumpLoop();
- Mock::VerifyAndClearExpectations(observer());
-
- // Then init the nigori node with a backwards compatible set of keys.
- CreateRootForType(NIGORI);
- EXPECT_CALL(*observer(), OnPassphraseAccepted());
- InitKeystoreMigratedNigori(1, kOldGaiaKey, old_keystore_key);
-
- // Now set some new keystore keys.
- EXPECT_CALL(*observer(), OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(), OnEncryptionComplete());
- {
- google::protobuf::RepeatedPtrField<google::protobuf::string> keys;
- keys.Add()->assign(kRawOldKeystoreKey);
- keys.Add()->assign(kRawKeystoreKey);
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- ReadTransaction trans(FROM_HERE, user_share());
- encryption_handler()->SetKeystoreKeys(keys,
- trans.GetWrappedTrans());
- }
- // Pump for any posted tasks.
- PumpLoop();
- Mock::VerifyAndClearExpectations(observer());
-
- // Verify we're still migrated and have proper encryption state. We should
- // have rotated the keybag so that it's now encrypted with the newest keystore
- // key (instead of the old gaia key).
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_TRUE(GetCryptographer()->is_ready());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), KEYSTORE_PASSPHRASE);
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
- VerifyMigratedNigori(KEYSTORE_PASSPHRASE, kKeystoreKey);
-}
-
-// Trigger a key rotation upon receiving new keys if we already had a keystore
-// migrated nigori with the keystore key as the default.
-TEST_F(SyncEncryptionHandlerImplTest, RotateKeysKeystoreDefault) {
- // Destroy the existing nigori node so we init without a nigori node.
- TearDown();
- test_user_share_.SetUp();
- SetUpEncryption();
-
- const char kRawOldKeystoreKey[] = "old_keystore_key";
- std::string old_keystore_key;
- base::Base64Encode(kRawOldKeystoreKey, &old_keystore_key);
- {
- ReadTransaction trans(FROM_HERE, user_share());
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- encryption_handler()->SetKeystoreKeys(BuildEncryptionKeyProto(
- kRawOldKeystoreKey),
- trans.GetWrappedTrans());
- }
- PumpLoop();
- Mock::VerifyAndClearExpectations(observer());
-
- // Then init the nigori node with a non-backwards compatible set of keys.
- CreateRootForType(NIGORI);
- EXPECT_CALL(*observer(), OnPassphraseAccepted());
- InitKeystoreMigratedNigori(1, old_keystore_key, old_keystore_key);
-
- // Now set some new keystore keys.
- EXPECT_CALL(*observer(), OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(), OnEncryptionComplete());
- {
- google::protobuf::RepeatedPtrField<google::protobuf::string> keys;
- keys.Add()->assign(kRawOldKeystoreKey);
- keys.Add()->assign(kRawKeystoreKey);
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- ReadTransaction trans(FROM_HERE, user_share());
- encryption_handler()->SetKeystoreKeys(keys,
- trans.GetWrappedTrans());
- }
- // Pump for any posted tasks.
- PumpLoop();
- Mock::VerifyAndClearExpectations(observer());
-
- // Verify we're still migrated and have proper encryption state. We should
- // have rotated the keybag so that it's now encrypted with the newest keystore
- // key (instead of the old gaia key).
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_TRUE(GetCryptographer()->is_ready());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), KEYSTORE_PASSPHRASE);
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
- VerifyMigratedNigori(KEYSTORE_PASSPHRASE, kKeystoreKey);
-}
-
-// Trigger a key rotation upon when a pending gaia passphrase is resolved.
-TEST_F(SyncEncryptionHandlerImplTest, RotateKeysAfterPendingGaiaResolved) {
- const char kOldGaiaKey[] = "old_gaia_key";
- const char kRawOldKeystoreKey[] = "old_keystore_key";
-
- EXPECT_CALL(*observer(), OnPassphraseRequired(_, _));
- InitUnmigratedNigori(kOldGaiaKey, IMPLICIT_PASSPHRASE);
-
- {
- // Pass multiple keystore keys, signaling a rotation has happened.
- google::protobuf::RepeatedPtrField<google::protobuf::string> keys;
- keys.Add()->assign(kRawOldKeystoreKey);
- keys.Add()->assign(kRawKeystoreKey);
- ReadTransaction trans(FROM_HERE, user_share());
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- encryption_handler()->SetKeystoreKeys(keys,
- trans.GetWrappedTrans());
- }
- PumpLoop();
- Mock::VerifyAndClearExpectations(observer());
-
- // Resolve the pending keys. This should trigger the key rotation.
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnPassphraseAccepted());
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(KEYSTORE_PASSPHRASE, _));
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete()).Times(AtLeast(1));
- EXPECT_FALSE(encryption_handler()->MigratedToKeystore());
- encryption_handler()->SetDecryptionPassphrase(kOldGaiaKey);
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_EQ(KEYSTORE_PASSPHRASE, encryption_handler()->GetPassphraseType());
- VerifyMigratedNigori(KEYSTORE_PASSPHRASE, kKeystoreKey);
-}
-
-// When signing in for the first time, make sure we can rotate keys if we
-// already have a keystore migrated nigori.
-TEST_F(SyncEncryptionHandlerImplTest, RotateKeysGaiaDefaultOnInit) {
- // Destroy the existing nigori node so we init without a nigori node.
- TearDown();
- test_user_share_.SetUp();
- SetUpEncryption();
-
- const char kOldGaiaKey[] = "old_gaia_key";
- const char kRawOldKeystoreKey[] = "old_keystore_key";
- std::string old_keystore_key;
- base::Base64Encode(kRawOldKeystoreKey, &old_keystore_key);
-
- // Set two keys, signaling that a rotation has been performed. No nigori
- // node is present yet, so we can't rotate.
- {
- google::protobuf::RepeatedPtrField<google::protobuf::string> keys;
- keys.Add()->assign(kRawOldKeystoreKey);
- keys.Add()->assign(kRawKeystoreKey);
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- ReadTransaction trans(FROM_HERE, user_share());
- encryption_handler()->SetKeystoreKeys(keys,
- trans.GetWrappedTrans());
- }
-
- // Then init the nigori node with an old set of keys.
- CreateRootForType(NIGORI);
- EXPECT_CALL(*observer(), OnPassphraseAccepted());
- InitKeystoreMigratedNigori(1, kOldGaiaKey, old_keystore_key);
- PumpLoop();
- Mock::VerifyAndClearExpectations(observer());
-
- // Verify we're still migrated and have proper encryption state. We should
- // have rotated the keybag so that it's now encrypted with the newest keystore
- // key (instead of the old gaia key).
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_TRUE(GetCryptographer()->is_ready());
- EXPECT_EQ(encryption_handler()->GetPassphraseType(), KEYSTORE_PASSPHRASE);
- EXPECT_FALSE(encryption_handler()->EncryptEverythingEnabled());
- VerifyMigratedNigori(KEYSTORE_PASSPHRASE, kKeystoreKey);
-}
-
-// Trigger a key rotation when a migrated nigori (with an old keystore key) is
-// applied.
-TEST_F(SyncEncryptionHandlerImplTest, RotateKeysWhenMigratedNigoriArrives) {
- const char kOldGaiaKey[] = "old_gaia_key";
- const char kRawOldKeystoreKey[] = "old_keystore_key";
- std::string old_keystore_key;
- base::Base64Encode(kRawOldKeystoreKey, &old_keystore_key);
-
- EXPECT_CALL(*observer(), OnPassphraseRequired(_, _));
- InitUnmigratedNigori(kOldGaiaKey, IMPLICIT_PASSPHRASE);
-
- {
- // Pass multiple keystore keys, signaling a rotation has happened.
- google::protobuf::RepeatedPtrField<google::protobuf::string> keys;
- keys.Add()->assign(kRawOldKeystoreKey);
- keys.Add()->assign(kRawKeystoreKey);
- ReadTransaction trans(FROM_HERE, user_share());
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- encryption_handler()->SetKeystoreKeys(keys,
- trans.GetWrappedTrans());
- }
- PumpLoop();
- Mock::VerifyAndClearExpectations(observer());
-
- // Now simulate downloading a nigori node that was migrated before the
- // keys were rotated, and hence still encrypt with the old gaia key.
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnPassphraseAccepted());
- EXPECT_CALL(*observer(),
- OnPassphraseTypeChanged(KEYSTORE_PASSPHRASE, _));
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete()).Times(AtLeast(1));
- {
- sync_pb::NigoriSpecifics nigori = BuildMigratedNigori(
- KEYSTORE_PASSPHRASE,
- 1,
- kOldGaiaKey,
- old_keystore_key);
- // Update the encryption handler.
- WriteTransaction trans(FROM_HERE, user_share());
- encryption_handler()->ApplyNigoriUpdate(
- nigori,
- trans.GetWrappedTrans());
- }
- EXPECT_FALSE(encryption_handler()->MigratedToKeystore());
- PumpLoop();
-
- EXPECT_TRUE(encryption_handler()->MigratedToKeystore());
- EXPECT_EQ(KEYSTORE_PASSPHRASE, encryption_handler()->GetPassphraseType());
- VerifyMigratedNigori(KEYSTORE_PASSPHRASE, kKeystoreKey);
-}
-
-// Verify that performing a migration while having more than one keystore key
-// preserves a custom passphrase.
-TEST_F(SyncEncryptionHandlerImplTest, RotateKeysUnmigratedCustomPassphrase) {
- const char kCustomPass[] = "custom_passphrase";
- const char kRawOldKeystoreKey[] = "old_keystore_key";
-
- EXPECT_CALL(*observer(), OnPassphraseRequired(_, _));
- InitUnmigratedNigori(kCustomPass, CUSTOM_PASSPHRASE);
-
- {
- // Pass multiple keystore keys, signaling a rotation has happened.
- google::protobuf::RepeatedPtrField<google::protobuf::string> keys;
- keys.Add()->assign(kRawOldKeystoreKey);
- keys.Add()->assign(kRawKeystoreKey);
- ReadTransaction trans(FROM_HERE, user_share());
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- encryption_handler()->SetKeystoreKeys(keys,
- trans.GetWrappedTrans());
- }
- PumpLoop();
- Mock::VerifyAndClearExpectations(observer());
-
- // Pass the decryption passphrase. This will also trigger the migration,
- // but should not overwrite the default key.
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnPassphraseAccepted());
- EXPECT_CALL(*observer(),
- OnEncryptedTypesChanged(_, true));
- EXPECT_CALL(*observer(),
- OnEncryptionComplete()).Times(AnyNumber());
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- encryption_handler()->SetDecryptionPassphrase(kCustomPass);
- Mock::VerifyAndClearExpectations(observer());
-
- VerifyMigratedNigori(CUSTOM_PASSPHRASE, kCustomPass);
-}
-
-// Verify that a key rotation done after we've migrated a custom passphrase
-// nigori node preserves the custom passphrase.
-TEST_F(SyncEncryptionHandlerImplTest, RotateKeysMigratedCustomPassphrase) {
- const char kCustomPass[] = "custom_passphrase";
- const char kRawOldKeystoreKey[] = "old_keystore_key";
-
- KeyParams custom_key = {"localhost", "dummy", kCustomPass};
- GetCryptographer()->AddKey(custom_key);
-
- InitCustomPassMigratedNigori(1, kCustomPass);
- VerifyMigratedNigoriWithTimestamp(1, CUSTOM_PASSPHRASE, kCustomPass);
-
- {
- // Pass multiple keystore keys, signaling a rotation has happened.
- google::protobuf::RepeatedPtrField<google::protobuf::string> keys;
- keys.Add()->assign(kRawOldKeystoreKey);
- keys.Add()->assign(kRawKeystoreKey);
- ReadTransaction trans(FROM_HERE, user_share());
- EXPECT_CALL(*observer(),
- OnBootstrapTokenUpdated(_, KEYSTORE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(*observer(),
- OnCryptographerStateChanged(_)).Times(AnyNumber());
- encryption_handler()->SetKeystoreKeys(keys,
- trans.GetWrappedTrans());
- }
- PumpLoop();
- Mock::VerifyAndClearExpectations(observer());
-
- VerifyMigratedNigoriWithTimestamp(1, CUSTOM_PASSPHRASE, kCustomPass);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/sync_manager_factory.cc b/chromium/sync/internal_api/sync_manager_factory.cc
deleted file mode 100644
index 9d784b46ada..00000000000
--- a/chromium/sync/internal_api/sync_manager_factory.cc
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/sync_manager_factory.h"
-
-#include "sync/internal_api/sync_manager_impl.h"
-
-namespace syncer {
-
-SyncManagerFactory::SyncManagerFactory() {
-}
-
-SyncManagerFactory::~SyncManagerFactory() {
-}
-
-scoped_ptr<SyncManager> SyncManagerFactory::CreateSyncManager(
- const std::string name) {
- return scoped_ptr<SyncManager>(new SyncManagerImpl(name));
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/sync_manager_impl.cc b/chromium/sync/internal_api/sync_manager_impl.cc
deleted file mode 100644
index 7e2d34bc2de..00000000000
--- a/chromium/sync/internal_api/sync_manager_impl.cc
+++ /dev/null
@@ -1,1347 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/sync_manager_impl.h"
-
-#include <string>
-
-#include "base/base64.h"
-#include "base/bind.h"
-#include "base/callback.h"
-#include "base/compiler_specific.h"
-#include "base/json/json_writer.h"
-#include "base/memory/ref_counted.h"
-#include "base/metrics/histogram.h"
-#include "base/observer_list.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/values.h"
-#include "sync/engine/sync_scheduler.h"
-#include "sync/engine/syncer_types.h"
-#include "sync/internal_api/change_reorder_buffer.h"
-#include "sync/internal_api/public/base/cancelation_signal.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/base_node.h"
-#include "sync/internal_api/public/configure_reason.h"
-#include "sync/internal_api/public/engine/polling_constants.h"
-#include "sync/internal_api/public/http_post_provider_factory.h"
-#include "sync/internal_api/public/internal_components_factory.h"
-#include "sync/internal_api/public/read_node.h"
-#include "sync/internal_api/public/read_transaction.h"
-#include "sync/internal_api/public/user_share.h"
-#include "sync/internal_api/public/util/experiments.h"
-#include "sync/internal_api/public/write_node.h"
-#include "sync/internal_api/public/write_transaction.h"
-#include "sync/internal_api/syncapi_internal.h"
-#include "sync/internal_api/syncapi_server_connection_manager.h"
-#include "sync/js/js_arg_list.h"
-#include "sync/js/js_event_details.h"
-#include "sync/js/js_event_handler.h"
-#include "sync/js/js_reply_handler.h"
-#include "sync/notifier/invalidation_util.h"
-#include "sync/notifier/invalidator.h"
-#include "sync/notifier/object_id_invalidation_map.h"
-#include "sync/protocol/proto_value_conversions.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/in_memory_directory_backing_store.h"
-#include "sync/syncable/on_disk_directory_backing_store.h"
-
-using base::TimeDelta;
-using sync_pb::GetUpdatesCallerInfo;
-
-namespace syncer {
-
-using sessions::SyncSessionContext;
-using syncable::ImmutableWriteTransactionInfo;
-using syncable::SPECIFICS;
-using syncable::UNIQUE_POSITION;
-
-namespace {
-
-// Delays for syncer nudges.
-static const int kDefaultNudgeDelayMilliseconds = 200;
-static const int kPreferencesNudgeDelayMilliseconds = 2000;
-static const int kSyncRefreshDelayMsec = 500;
-static const int kSyncSchedulerDelayMsec = 250;
-
-// Maximum count and size for traffic recorder.
-static const unsigned int kMaxMessagesToRecord = 10;
-static const unsigned int kMaxMessageSizeToRecord = 5 * 1024;
-
-GetUpdatesCallerInfo::GetUpdatesSource GetSourceFromReason(
- ConfigureReason reason) {
- switch (reason) {
- case CONFIGURE_REASON_RECONFIGURATION:
- return GetUpdatesCallerInfo::RECONFIGURATION;
- case CONFIGURE_REASON_MIGRATION:
- return GetUpdatesCallerInfo::MIGRATION;
- case CONFIGURE_REASON_NEW_CLIENT:
- return GetUpdatesCallerInfo::NEW_CLIENT;
- case CONFIGURE_REASON_NEWLY_ENABLED_DATA_TYPE:
- case CONFIGURE_REASON_CRYPTO:
- return GetUpdatesCallerInfo::NEWLY_SUPPORTED_DATATYPE;
- default:
- NOTREACHED();
- }
- return GetUpdatesCallerInfo::UNKNOWN;
-}
-
-} // namespace
-
-// A class to calculate nudge delays for types.
-class NudgeStrategy {
- public:
- static TimeDelta GetNudgeDelayTimeDelta(const ModelType& model_type,
- SyncManagerImpl* core) {
- NudgeDelayStrategy delay_type = GetNudgeDelayStrategy(model_type);
- return GetNudgeDelayTimeDeltaFromType(delay_type,
- model_type,
- core);
- }
-
- private:
- // Possible types of nudge delay for datatypes.
- // Note: These are just hints. If a sync happens then all dirty entries
- // would be committed as part of the sync.
- enum NudgeDelayStrategy {
- // Sync right away.
- IMMEDIATE,
-
- // Sync this change while syncing another change.
- ACCOMPANY_ONLY,
-
- // The datatype does not use one of the predefined wait times but defines
- // its own wait time logic for nudge.
- CUSTOM,
- };
-
- static NudgeDelayStrategy GetNudgeDelayStrategy(const ModelType& type) {
- switch (type) {
- case AUTOFILL:
- return ACCOMPANY_ONLY;
- case PREFERENCES:
- case SESSIONS:
- case FAVICON_IMAGES:
- case FAVICON_TRACKING:
- return CUSTOM;
- default:
- return IMMEDIATE;
- }
- }
-
- static TimeDelta GetNudgeDelayTimeDeltaFromType(
- const NudgeDelayStrategy& delay_type, const ModelType& model_type,
- const SyncManagerImpl* core) {
- CHECK(core);
- TimeDelta delay = TimeDelta::FromMilliseconds(
- kDefaultNudgeDelayMilliseconds);
- switch (delay_type) {
- case IMMEDIATE:
- delay = TimeDelta::FromMilliseconds(
- kDefaultNudgeDelayMilliseconds);
- break;
- case ACCOMPANY_ONLY:
- delay = TimeDelta::FromSeconds(kDefaultShortPollIntervalSeconds);
- break;
- case CUSTOM:
- switch (model_type) {
- case PREFERENCES:
- delay = TimeDelta::FromMilliseconds(
- kPreferencesNudgeDelayMilliseconds);
- break;
- case SESSIONS:
- case FAVICON_IMAGES:
- case FAVICON_TRACKING:
- delay = core->scheduler()->GetSessionsCommitDelay();
- break;
- default:
- NOTREACHED();
- }
- break;
- default:
- NOTREACHED();
- }
- return delay;
- }
-};
-
-SyncManagerImpl::SyncManagerImpl(const std::string& name)
- : name_(name),
- change_delegate_(NULL),
- initialized_(false),
- observing_network_connectivity_changes_(false),
- invalidator_state_(DEFAULT_INVALIDATION_ERROR),
- traffic_recorder_(kMaxMessagesToRecord, kMaxMessageSizeToRecord),
- encryptor_(NULL),
- report_unrecoverable_error_function_(NULL),
- weak_ptr_factory_(this) {
- // Pre-fill |notification_info_map_|.
- for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
- notification_info_map_.insert(
- std::make_pair(ModelTypeFromInt(i), NotificationInfo()));
- }
-
- // Bind message handlers.
- BindJsMessageHandler(
- "getNotificationState",
- &SyncManagerImpl::GetNotificationState);
- BindJsMessageHandler(
- "getNotificationInfo",
- &SyncManagerImpl::GetNotificationInfo);
- BindJsMessageHandler(
- "getRootNodeDetails",
- &SyncManagerImpl::GetRootNodeDetails);
- BindJsMessageHandler(
- "getNodeSummariesById",
- &SyncManagerImpl::GetNodeSummariesById);
- BindJsMessageHandler(
- "getNodeDetailsById",
- &SyncManagerImpl::GetNodeDetailsById);
- BindJsMessageHandler(
- "getAllNodes",
- &SyncManagerImpl::GetAllNodes);
- BindJsMessageHandler(
- "getChildNodeIds",
- &SyncManagerImpl::GetChildNodeIds);
- BindJsMessageHandler(
- "getClientServerTraffic",
- &SyncManagerImpl::GetClientServerTraffic);
-}
-
-SyncManagerImpl::~SyncManagerImpl() {
- DCHECK(thread_checker_.CalledOnValidThread());
- CHECK(!initialized_);
-}
-
-SyncManagerImpl::NotificationInfo::NotificationInfo() : total_count(0) {}
-SyncManagerImpl::NotificationInfo::~NotificationInfo() {}
-
-base::DictionaryValue* SyncManagerImpl::NotificationInfo::ToValue() const {
- base::DictionaryValue* value = new base::DictionaryValue();
- value->SetInteger("totalCount", total_count);
- value->SetString("payload", payload);
- return value;
-}
-
-bool SyncManagerImpl::VisiblePositionsDiffer(
- const syncable::EntryKernelMutation& mutation) const {
- const syncable::EntryKernel& a = mutation.original;
- const syncable::EntryKernel& b = mutation.mutated;
- if (!b.ShouldMaintainPosition())
- return false;
- if (!a.ref(UNIQUE_POSITION).Equals(b.ref(UNIQUE_POSITION)))
- return true;
- if (a.ref(syncable::PARENT_ID) != b.ref(syncable::PARENT_ID))
- return true;
- return false;
-}
-
-bool SyncManagerImpl::VisiblePropertiesDiffer(
- const syncable::EntryKernelMutation& mutation,
- Cryptographer* cryptographer) const {
- const syncable::EntryKernel& a = mutation.original;
- const syncable::EntryKernel& b = mutation.mutated;
- const sync_pb::EntitySpecifics& a_specifics = a.ref(SPECIFICS);
- const sync_pb::EntitySpecifics& b_specifics = b.ref(SPECIFICS);
- DCHECK_EQ(GetModelTypeFromSpecifics(a_specifics),
- GetModelTypeFromSpecifics(b_specifics));
- ModelType model_type = GetModelTypeFromSpecifics(b_specifics);
- // Suppress updates to items that aren't tracked by any browser model.
- if (model_type < FIRST_REAL_MODEL_TYPE ||
- !a.ref(syncable::UNIQUE_SERVER_TAG).empty()) {
- return false;
- }
- if (a.ref(syncable::IS_DIR) != b.ref(syncable::IS_DIR))
- return true;
- if (!AreSpecificsEqual(cryptographer,
- a.ref(syncable::SPECIFICS),
- b.ref(syncable::SPECIFICS))) {
- return true;
- }
- // We only care if the name has changed if neither specifics is encrypted
- // (encrypted nodes blow away the NON_UNIQUE_NAME).
- if (!a_specifics.has_encrypted() && !b_specifics.has_encrypted() &&
- a.ref(syncable::NON_UNIQUE_NAME) != b.ref(syncable::NON_UNIQUE_NAME))
- return true;
- if (VisiblePositionsDiffer(mutation))
- return true;
- return false;
-}
-
-void SyncManagerImpl::ThrowUnrecoverableError() {
- DCHECK(thread_checker_.CalledOnValidThread());
- ReadTransaction trans(FROM_HERE, GetUserShare());
- trans.GetWrappedTrans()->OnUnrecoverableError(
- FROM_HERE, "Simulating unrecoverable error for testing purposes.");
-}
-
-ModelTypeSet SyncManagerImpl::InitialSyncEndedTypes() {
- return directory()->InitialSyncEndedTypes();
-}
-
-ModelTypeSet SyncManagerImpl::GetTypesWithEmptyProgressMarkerToken(
- ModelTypeSet types) {
- ModelTypeSet result;
- for (ModelTypeSet::Iterator i = types.First(); i.Good(); i.Inc()) {
- sync_pb::DataTypeProgressMarker marker;
- directory()->GetDownloadProgress(i.Get(), &marker);
-
- if (marker.token().empty())
- result.Put(i.Get());
- }
- return result;
-}
-
-void SyncManagerImpl::ConfigureSyncer(
- ConfigureReason reason,
- ModelTypeSet to_download,
- ModelTypeSet to_purge,
- ModelTypeSet to_journal,
- ModelTypeSet to_unapply,
- const ModelSafeRoutingInfo& new_routing_info,
- const base::Closure& ready_task,
- const base::Closure& retry_task) {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(!ready_task.is_null());
- DCHECK(!retry_task.is_null());
-
- DVLOG(1) << "Configuring -"
- << "\n\t" << "current types: "
- << ModelTypeSetToString(GetRoutingInfoTypes(new_routing_info))
- << "\n\t" << "types to download: "
- << ModelTypeSetToString(to_download)
- << "\n\t" << "types to purge: "
- << ModelTypeSetToString(to_purge)
- << "\n\t" << "types to journal: "
- << ModelTypeSetToString(to_journal)
- << "\n\t" << "types to unapply: "
- << ModelTypeSetToString(to_unapply);
- if (!PurgeDisabledTypes(to_purge,
- to_journal,
- to_unapply)) {
- // We failed to cleanup the types. Invoke the ready task without actually
- // configuring any types. The caller should detect this as a configuration
- // failure and act appropriately.
- ready_task.Run();
- return;
- }
-
- ConfigurationParams params(GetSourceFromReason(reason),
- to_download,
- new_routing_info,
- ready_task,
- retry_task);
-
- scheduler_->Start(SyncScheduler::CONFIGURATION_MODE);
- scheduler_->ScheduleConfiguration(params);
-}
-
-void SyncManagerImpl::Init(
- const base::FilePath& database_location,
- const WeakHandle<JsEventHandler>& event_handler,
- const std::string& sync_server_and_path,
- int port,
- bool use_ssl,
- scoped_ptr<HttpPostProviderFactory> post_factory,
- const std::vector<ModelSafeWorker*>& workers,
- ExtensionsActivity* extensions_activity,
- SyncManager::ChangeDelegate* change_delegate,
- const SyncCredentials& credentials,
- const std::string& invalidator_client_id,
- const std::string& restored_key_for_bootstrapping,
- const std::string& restored_keystore_key_for_bootstrapping,
- InternalComponentsFactory* internal_components_factory,
- Encryptor* encryptor,
- scoped_ptr<UnrecoverableErrorHandler> unrecoverable_error_handler,
- ReportUnrecoverableErrorFunction report_unrecoverable_error_function,
- CancelationSignal* cancelation_signal) {
- CHECK(!initialized_);
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(post_factory.get());
- DCHECK(!credentials.email.empty());
- DCHECK(!credentials.sync_token.empty());
- DCHECK(cancelation_signal);
- DVLOG(1) << "SyncManager starting Init...";
-
- weak_handle_this_ = MakeWeakHandle(weak_ptr_factory_.GetWeakPtr());
-
- change_delegate_ = change_delegate;
-
- AddObserver(&js_sync_manager_observer_);
- SetJsEventHandler(event_handler);
-
- AddObserver(&debug_info_event_listener_);
-
- database_path_ = database_location.Append(
- syncable::Directory::kSyncDatabaseFilename);
- encryptor_ = encryptor;
- unrecoverable_error_handler_ = unrecoverable_error_handler.Pass();
- report_unrecoverable_error_function_ = report_unrecoverable_error_function;
-
- allstatus_.SetHasKeystoreKey(
- !restored_keystore_key_for_bootstrapping.empty());
- sync_encryption_handler_.reset(new SyncEncryptionHandlerImpl(
- &share_,
- encryptor,
- restored_key_for_bootstrapping,
- restored_keystore_key_for_bootstrapping));
- sync_encryption_handler_->AddObserver(this);
- sync_encryption_handler_->AddObserver(&debug_info_event_listener_);
- sync_encryption_handler_->AddObserver(&js_sync_encryption_handler_observer_);
-
- base::FilePath absolute_db_path = database_path_;
- DCHECK(absolute_db_path.IsAbsolute());
-
- scoped_ptr<syncable::DirectoryBackingStore> backing_store =
- internal_components_factory->BuildDirectoryBackingStore(
- credentials.email, absolute_db_path).Pass();
-
- DCHECK(backing_store.get());
- const std::string& username = credentials.email;
- share_.directory.reset(
- new syncable::Directory(
- backing_store.release(),
- unrecoverable_error_handler_.get(),
- report_unrecoverable_error_function_,
- sync_encryption_handler_.get(),
- sync_encryption_handler_->GetCryptographerUnsafe()));
-
- DVLOG(1) << "Username: " << username;
- if (!OpenDirectory(username)) {
- NotifyInitializationFailure();
- LOG(ERROR) << "Sync manager initialization failed!";
- return;
- }
-
- connection_manager_.reset(new SyncAPIServerConnectionManager(
- sync_server_and_path, port, use_ssl,
- post_factory.release(), cancelation_signal));
- connection_manager_->set_client_id(directory()->cache_guid());
- connection_manager_->AddListener(this);
-
- std::string sync_id = directory()->cache_guid();
-
- allstatus_.SetSyncId(sync_id);
- allstatus_.SetInvalidatorClientId(invalidator_client_id);
-
- DVLOG(1) << "Setting sync client ID: " << sync_id;
- DVLOG(1) << "Setting invalidator client ID: " << invalidator_client_id;
-
- // Build a SyncSessionContext and store the worker in it.
- DVLOG(1) << "Sync is bringing up SyncSessionContext.";
- std::vector<SyncEngineEventListener*> listeners;
- listeners.push_back(&allstatus_);
- listeners.push_back(this);
- session_context_ = internal_components_factory->BuildContext(
- connection_manager_.get(),
- directory(),
- workers,
- extensions_activity,
- listeners,
- &debug_info_event_listener_,
- &traffic_recorder_,
- invalidator_client_id).Pass();
- session_context_->set_account_name(credentials.email);
- scheduler_ = internal_components_factory->BuildScheduler(
- name_, session_context_.get(), cancelation_signal).Pass();
-
- scheduler_->Start(SyncScheduler::CONFIGURATION_MODE);
-
- initialized_ = true;
-
- net::NetworkChangeNotifier::AddIPAddressObserver(this);
- net::NetworkChangeNotifier::AddConnectionTypeObserver(this);
- observing_network_connectivity_changes_ = true;
-
- UpdateCredentials(credentials);
-
- NotifyInitializationSuccess();
-}
-
-void SyncManagerImpl::NotifyInitializationSuccess() {
- FOR_EACH_OBSERVER(SyncManager::Observer, observers_,
- OnInitializationComplete(
- MakeWeakHandle(weak_ptr_factory_.GetWeakPtr()),
- MakeWeakHandle(debug_info_event_listener_.GetWeakPtr()),
- true, InitialSyncEndedTypes()));
-}
-
-void SyncManagerImpl::NotifyInitializationFailure() {
- FOR_EACH_OBSERVER(SyncManager::Observer, observers_,
- OnInitializationComplete(
- MakeWeakHandle(weak_ptr_factory_.GetWeakPtr()),
- MakeWeakHandle(debug_info_event_listener_.GetWeakPtr()),
- false, ModelTypeSet()));
-}
-
-void SyncManagerImpl::OnPassphraseRequired(
- PassphraseRequiredReason reason,
- const sync_pb::EncryptedData& pending_keys) {
- // Does nothing.
-}
-
-void SyncManagerImpl::OnPassphraseAccepted() {
- // Does nothing.
-}
-
-void SyncManagerImpl::OnBootstrapTokenUpdated(
- const std::string& bootstrap_token,
- BootstrapTokenType type) {
- if (type == KEYSTORE_BOOTSTRAP_TOKEN)
- allstatus_.SetHasKeystoreKey(true);
-}
-
-void SyncManagerImpl::OnEncryptedTypesChanged(ModelTypeSet encrypted_types,
- bool encrypt_everything) {
- allstatus_.SetEncryptedTypes(encrypted_types);
-}
-
-void SyncManagerImpl::OnEncryptionComplete() {
- // Does nothing.
-}
-
-void SyncManagerImpl::OnCryptographerStateChanged(
- Cryptographer* cryptographer) {
- allstatus_.SetCryptographerReady(cryptographer->is_ready());
- allstatus_.SetCryptoHasPendingKeys(cryptographer->has_pending_keys());
- allstatus_.SetKeystoreMigrationTime(
- sync_encryption_handler_->migration_time());
-}
-
-void SyncManagerImpl::OnPassphraseTypeChanged(
- PassphraseType type,
- base::Time explicit_passphrase_time) {
- allstatus_.SetPassphraseType(type);
- allstatus_.SetKeystoreMigrationTime(
- sync_encryption_handler_->migration_time());
-}
-
-void SyncManagerImpl::StartSyncingNormally(
- const ModelSafeRoutingInfo& routing_info) {
- // Start the sync scheduler.
- // TODO(sync): We always want the newest set of routes when we switch back
- // to normal mode. Figure out how to enforce set_routing_info is always
- // appropriately set and that it's only modified when switching to normal
- // mode.
- DCHECK(thread_checker_.CalledOnValidThread());
- session_context_->set_routing_info(routing_info);
- scheduler_->Start(SyncScheduler::NORMAL_MODE);
-}
-
-syncable::Directory* SyncManagerImpl::directory() {
- return share_.directory.get();
-}
-
-const SyncScheduler* SyncManagerImpl::scheduler() const {
- return scheduler_.get();
-}
-
-bool SyncManagerImpl::GetHasInvalidAuthTokenForTest() const {
- return connection_manager_->HasInvalidAuthToken();
-}
-
-bool SyncManagerImpl::OpenDirectory(const std::string& username) {
- DCHECK(!initialized_) << "Should only happen once";
-
- // Set before Open().
- change_observer_ = MakeWeakHandle(js_mutation_event_observer_.AsWeakPtr());
- WeakHandle<syncable::TransactionObserver> transaction_observer(
- MakeWeakHandle(js_mutation_event_observer_.AsWeakPtr()));
-
- syncable::DirOpenResult open_result = syncable::NOT_INITIALIZED;
- open_result = directory()->Open(username, this, transaction_observer);
- if (open_result != syncable::OPENED) {
- LOG(ERROR) << "Could not open share for:" << username;
- return false;
- }
-
- // Unapplied datatypes (those that do not have initial sync ended set) get
- // re-downloaded during any configuration. But, it's possible for a datatype
- // to have a progress marker but not have initial sync ended yet, making
- // it a candidate for migration. This is a problem, as the DataTypeManager
- // does not support a migration while it's already in the middle of a
- // configuration. As a result, any partially synced datatype can stall the
- // DTM, waiting for the configuration to complete, which it never will due
- // to the migration error. In addition, a partially synced nigori will
- // trigger the migration logic before the backend is initialized, resulting
- // in crashes. We therefore detect and purge any partially synced types as
- // part of initialization.
- if (!PurgePartiallySyncedTypes())
- return false;
-
- return true;
-}
-
-bool SyncManagerImpl::PurgePartiallySyncedTypes() {
- ModelTypeSet partially_synced_types = ModelTypeSet::All();
- partially_synced_types.RemoveAll(InitialSyncEndedTypes());
- partially_synced_types.RemoveAll(GetTypesWithEmptyProgressMarkerToken(
- ModelTypeSet::All()));
-
- DVLOG(1) << "Purging partially synced types "
- << ModelTypeSetToString(partially_synced_types);
- UMA_HISTOGRAM_COUNTS("Sync.PartiallySyncedTypes",
- partially_synced_types.Size());
- if (partially_synced_types.Empty())
- return true;
- return directory()->PurgeEntriesWithTypeIn(partially_synced_types,
- ModelTypeSet(),
- ModelTypeSet());
-}
-
-bool SyncManagerImpl::PurgeDisabledTypes(
- ModelTypeSet to_purge,
- ModelTypeSet to_journal,
- ModelTypeSet to_unapply) {
- if (to_purge.Empty())
- return true;
- DVLOG(1) << "Purging disabled types " << ModelTypeSetToString(to_purge);
- DCHECK(to_purge.HasAll(to_journal));
- DCHECK(to_purge.HasAll(to_unapply));
- return directory()->PurgeEntriesWithTypeIn(to_purge, to_journal, to_unapply);
-}
-
-void SyncManagerImpl::UpdateCredentials(const SyncCredentials& credentials) {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(initialized_);
- DCHECK(!credentials.email.empty());
- DCHECK(!credentials.sync_token.empty());
-
- observing_network_connectivity_changes_ = true;
- if (!connection_manager_->SetAuthToken(credentials.sync_token))
- return; // Auth token is known to be invalid, so exit early.
-
- scheduler_->OnCredentialsUpdated();
-
- // TODO(zea): pass the credential age to the debug info event listener.
-}
-
-void SyncManagerImpl::AddObserver(SyncManager::Observer* observer) {
- DCHECK(thread_checker_.CalledOnValidThread());
- observers_.AddObserver(observer);
-}
-
-void SyncManagerImpl::RemoveObserver(SyncManager::Observer* observer) {
- DCHECK(thread_checker_.CalledOnValidThread());
- observers_.RemoveObserver(observer);
-}
-
-void SyncManagerImpl::ShutdownOnSyncThread() {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- // Prevent any in-flight method calls from running. Also
- // invalidates |weak_handle_this_| and |change_observer_|.
- weak_ptr_factory_.InvalidateWeakPtrs();
- js_mutation_event_observer_.InvalidateWeakPtrs();
-
- scheduler_.reset();
- session_context_.reset();
-
- if (sync_encryption_handler_) {
- sync_encryption_handler_->RemoveObserver(&debug_info_event_listener_);
- sync_encryption_handler_->RemoveObserver(this);
- }
-
- SetJsEventHandler(WeakHandle<JsEventHandler>());
- RemoveObserver(&js_sync_manager_observer_);
-
- RemoveObserver(&debug_info_event_listener_);
-
- // |connection_manager_| may end up being NULL here in tests (in synchronous
- // initialization mode).
- //
- // TODO(akalin): Fix this behavior.
- if (connection_manager_)
- connection_manager_->RemoveListener(this);
- connection_manager_.reset();
-
- net::NetworkChangeNotifier::RemoveIPAddressObserver(this);
- net::NetworkChangeNotifier::RemoveConnectionTypeObserver(this);
- observing_network_connectivity_changes_ = false;
-
- if (initialized_ && directory()) {
- directory()->SaveChanges();
- }
-
- share_.directory.reset();
-
- change_delegate_ = NULL;
-
- initialized_ = false;
-
- // We reset these here, since only now we know they will not be
- // accessed from other threads (since we shut down everything).
- change_observer_.Reset();
- weak_handle_this_.Reset();
-}
-
-void SyncManagerImpl::OnIPAddressChanged() {
- if (!observing_network_connectivity_changes_) {
- DVLOG(1) << "IP address change dropped.";
- return;
- }
- DVLOG(1) << "IP address change detected.";
- OnNetworkConnectivityChangedImpl();
-}
-
-void SyncManagerImpl::OnConnectionTypeChanged(
- net::NetworkChangeNotifier::ConnectionType) {
- if (!observing_network_connectivity_changes_) {
- DVLOG(1) << "Connection type change dropped.";
- return;
- }
- DVLOG(1) << "Connection type change detected.";
- OnNetworkConnectivityChangedImpl();
-}
-
-void SyncManagerImpl::OnNetworkConnectivityChangedImpl() {
- DCHECK(thread_checker_.CalledOnValidThread());
- scheduler_->OnConnectionStatusChange();
-}
-
-void SyncManagerImpl::OnServerConnectionEvent(
- const ServerConnectionEvent& event) {
- DCHECK(thread_checker_.CalledOnValidThread());
- if (event.connection_code ==
- HttpResponse::SERVER_CONNECTION_OK) {
- FOR_EACH_OBSERVER(SyncManager::Observer, observers_,
- OnConnectionStatusChange(CONNECTION_OK));
- }
-
- if (event.connection_code == HttpResponse::SYNC_AUTH_ERROR) {
- observing_network_connectivity_changes_ = false;
- FOR_EACH_OBSERVER(SyncManager::Observer, observers_,
- OnConnectionStatusChange(CONNECTION_AUTH_ERROR));
- }
-
- if (event.connection_code == HttpResponse::SYNC_SERVER_ERROR) {
- FOR_EACH_OBSERVER(SyncManager::Observer, observers_,
- OnConnectionStatusChange(CONNECTION_SERVER_ERROR));
- }
-}
-
-void SyncManagerImpl::HandleTransactionCompleteChangeEvent(
- ModelTypeSet models_with_changes) {
- // This notification happens immediately after the transaction mutex is
- // released. This allows work to be performed without blocking other threads
- // from acquiring a transaction.
- if (!change_delegate_)
- return;
-
- // Call commit.
- for (ModelTypeSet::Iterator it = models_with_changes.First();
- it.Good(); it.Inc()) {
- change_delegate_->OnChangesComplete(it.Get());
- change_observer_.Call(
- FROM_HERE,
- &SyncManager::ChangeObserver::OnChangesComplete,
- it.Get());
- }
-}
-
-ModelTypeSet
-SyncManagerImpl::HandleTransactionEndingChangeEvent(
- const ImmutableWriteTransactionInfo& write_transaction_info,
- syncable::BaseTransaction* trans) {
- // This notification happens immediately before a syncable WriteTransaction
- // falls out of scope. It happens while the channel mutex is still held,
- // and while the transaction mutex is held, so it cannot be re-entrant.
- if (!change_delegate_ || change_records_.empty())
- return ModelTypeSet();
-
- // This will continue the WriteTransaction using a read only wrapper.
- // This is the last chance for read to occur in the WriteTransaction
- // that's closing. This special ReadTransaction will not close the
- // underlying transaction.
- ReadTransaction read_trans(GetUserShare(), trans);
-
- ModelTypeSet models_with_changes;
- for (ChangeRecordMap::const_iterator it = change_records_.begin();
- it != change_records_.end(); ++it) {
- DCHECK(!it->second.Get().empty());
- ModelType type = ModelTypeFromInt(it->first);
- change_delegate_->
- OnChangesApplied(type, trans->directory()->GetTransactionVersion(type),
- &read_trans, it->second);
- change_observer_.Call(FROM_HERE,
- &SyncManager::ChangeObserver::OnChangesApplied,
- type, write_transaction_info.Get().id, it->second);
- models_with_changes.Put(type);
- }
- change_records_.clear();
- return models_with_changes;
-}
-
-void SyncManagerImpl::HandleCalculateChangesChangeEventFromSyncApi(
- const ImmutableWriteTransactionInfo& write_transaction_info,
- syncable::BaseTransaction* trans,
- std::vector<int64>* entries_changed) {
- // We have been notified about a user action changing a sync model.
- LOG_IF(WARNING, !change_records_.empty()) <<
- "CALCULATE_CHANGES called with unapplied old changes.";
-
- // The mutated model type, or UNSPECIFIED if nothing was mutated.
- ModelTypeSet mutated_model_types;
-
- const syncable::ImmutableEntryKernelMutationMap& mutations =
- write_transaction_info.Get().mutations;
- for (syncable::EntryKernelMutationMap::const_iterator it =
- mutations.Get().begin(); it != mutations.Get().end(); ++it) {
- if (!it->second.mutated.ref(syncable::IS_UNSYNCED)) {
- continue;
- }
-
- ModelType model_type =
- GetModelTypeFromSpecifics(it->second.mutated.ref(SPECIFICS));
- if (model_type < FIRST_REAL_MODEL_TYPE) {
- NOTREACHED() << "Permanent or underspecified item changed via syncapi.";
- continue;
- }
-
- // Found real mutation.
- if (model_type != UNSPECIFIED) {
- mutated_model_types.Put(model_type);
- entries_changed->push_back(it->second.mutated.ref(syncable::META_HANDLE));
- }
- }
-
- // Nudge if necessary.
- if (!mutated_model_types.Empty()) {
- if (weak_handle_this_.IsInitialized()) {
- weak_handle_this_.Call(FROM_HERE,
- &SyncManagerImpl::RequestNudgeForDataTypes,
- FROM_HERE,
- mutated_model_types);
- } else {
- NOTREACHED();
- }
- }
-}
-
-void SyncManagerImpl::SetExtraChangeRecordData(int64 id,
- ModelType type, ChangeReorderBuffer* buffer,
- Cryptographer* cryptographer, const syncable::EntryKernel& original,
- bool existed_before, bool exists_now) {
- // If this is a deletion and the datatype was encrypted, we need to decrypt it
- // and attach it to the buffer.
- if (!exists_now && existed_before) {
- sync_pb::EntitySpecifics original_specifics(original.ref(SPECIFICS));
- if (type == PASSWORDS) {
- // Passwords must use their own legacy ExtraPasswordChangeRecordData.
- scoped_ptr<sync_pb::PasswordSpecificsData> data(
- DecryptPasswordSpecifics(original_specifics, cryptographer));
- if (!data) {
- NOTREACHED();
- return;
- }
- buffer->SetExtraDataForId(id, new ExtraPasswordChangeRecordData(*data));
- } else if (original_specifics.has_encrypted()) {
- // All other datatypes can just create a new unencrypted specifics and
- // attach it.
- const sync_pb::EncryptedData& encrypted = original_specifics.encrypted();
- if (!cryptographer->Decrypt(encrypted, &original_specifics)) {
- NOTREACHED();
- return;
- }
- }
- buffer->SetSpecificsForId(id, original_specifics);
- }
-}
-
-void SyncManagerImpl::HandleCalculateChangesChangeEventFromSyncer(
- const ImmutableWriteTransactionInfo& write_transaction_info,
- syncable::BaseTransaction* trans,
- std::vector<int64>* entries_changed) {
- // We only expect one notification per sync step, so change_buffers_ should
- // contain no pending entries.
- LOG_IF(WARNING, !change_records_.empty()) <<
- "CALCULATE_CHANGES called with unapplied old changes.";
-
- ChangeReorderBuffer change_buffers[MODEL_TYPE_COUNT];
-
- Cryptographer* crypto = directory()->GetCryptographer(trans);
- const syncable::ImmutableEntryKernelMutationMap& mutations =
- write_transaction_info.Get().mutations;
- for (syncable::EntryKernelMutationMap::const_iterator it =
- mutations.Get().begin(); it != mutations.Get().end(); ++it) {
- bool existed_before = !it->second.original.ref(syncable::IS_DEL);
- bool exists_now = !it->second.mutated.ref(syncable::IS_DEL);
-
- // Omit items that aren't associated with a model.
- ModelType type =
- GetModelTypeFromSpecifics(it->second.mutated.ref(SPECIFICS));
- if (type < FIRST_REAL_MODEL_TYPE)
- continue;
-
- int64 handle = it->first;
- if (exists_now && !existed_before)
- change_buffers[type].PushAddedItem(handle);
- else if (!exists_now && existed_before)
- change_buffers[type].PushDeletedItem(handle);
- else if (exists_now && existed_before &&
- VisiblePropertiesDiffer(it->second, crypto)) {
- change_buffers[type].PushUpdatedItem(handle);
- }
-
- SetExtraChangeRecordData(handle, type, &change_buffers[type], crypto,
- it->second.original, existed_before, exists_now);
- }
-
- ReadTransaction read_trans(GetUserShare(), trans);
- for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
- if (!change_buffers[i].IsEmpty()) {
- if (change_buffers[i].GetAllChangesInTreeOrder(&read_trans,
- &(change_records_[i]))) {
- for (size_t j = 0; j < change_records_[i].Get().size(); ++j)
- entries_changed->push_back((change_records_[i].Get())[j].id);
- }
- if (change_records_[i].Get().empty())
- change_records_.erase(i);
- }
- }
-}
-
-TimeDelta SyncManagerImpl::GetNudgeDelayTimeDelta(
- const ModelType& model_type) {
- return NudgeStrategy::GetNudgeDelayTimeDelta(model_type, this);
-}
-
-void SyncManagerImpl::RequestNudgeForDataTypes(
- const tracked_objects::Location& nudge_location,
- ModelTypeSet types) {
- debug_info_event_listener_.OnNudgeFromDatatype(types.First().Get());
-
- // TODO(lipalani) : Calculate the nudge delay based on all types.
- base::TimeDelta nudge_delay = NudgeStrategy::GetNudgeDelayTimeDelta(
- types.First().Get(),
- this);
- allstatus_.IncrementNudgeCounter(NUDGE_SOURCE_LOCAL);
- scheduler_->ScheduleLocalNudge(nudge_delay,
- types,
- nudge_location);
-}
-
-void SyncManagerImpl::OnSyncEngineEvent(const SyncEngineEvent& event) {
- DCHECK(thread_checker_.CalledOnValidThread());
- // Only send an event if this is due to a cycle ending and this cycle
- // concludes a canonical "sync" process; that is, based on what is known
- // locally we are "all happy" and up-to-date. There may be new changes on
- // the server, but we'll get them on a subsequent sync.
- //
- // Notifications are sent at the end of every sync cycle, regardless of
- // whether we should sync again.
- if (event.what_happened == SyncEngineEvent::SYNC_CYCLE_ENDED) {
- if (!initialized_) {
- DVLOG(1) << "OnSyncCycleCompleted not sent because sync api is not "
- << "initialized";
- return;
- }
-
- DVLOG(1) << "Sending OnSyncCycleCompleted";
- FOR_EACH_OBSERVER(SyncManager::Observer, observers_,
- OnSyncCycleCompleted(event.snapshot));
- }
-
- if (event.what_happened == SyncEngineEvent::STOP_SYNCING_PERMANENTLY) {
- FOR_EACH_OBSERVER(SyncManager::Observer, observers_,
- OnStopSyncingPermanently());
- return;
- }
-
- if (event.what_happened == SyncEngineEvent::ACTIONABLE_ERROR) {
- FOR_EACH_OBSERVER(
- SyncManager::Observer, observers_,
- OnActionableError(
- event.snapshot.model_neutral_state().sync_protocol_error));
- return;
- }
-}
-
-void SyncManagerImpl::SetJsEventHandler(
- const WeakHandle<JsEventHandler>& event_handler) {
- js_event_handler_ = event_handler;
- js_sync_manager_observer_.SetJsEventHandler(js_event_handler_);
- js_mutation_event_observer_.SetJsEventHandler(js_event_handler_);
- js_sync_encryption_handler_observer_.SetJsEventHandler(js_event_handler_);
-}
-
-void SyncManagerImpl::ProcessJsMessage(
- const std::string& name, const JsArgList& args,
- const WeakHandle<JsReplyHandler>& reply_handler) {
- if (!initialized_) {
- NOTREACHED();
- return;
- }
-
- if (!reply_handler.IsInitialized()) {
- DVLOG(1) << "Uninitialized reply handler; dropping unknown message "
- << name << " with args " << args.ToString();
- return;
- }
-
- JsMessageHandler js_message_handler = js_message_handlers_[name];
- if (js_message_handler.is_null()) {
- DVLOG(1) << "Dropping unknown message " << name
- << " with args " << args.ToString();
- return;
- }
-
- reply_handler.Call(FROM_HERE,
- &JsReplyHandler::HandleJsReply,
- name, js_message_handler.Run(args));
-}
-
-void SyncManagerImpl::BindJsMessageHandler(
- const std::string& name,
- UnboundJsMessageHandler unbound_message_handler) {
- js_message_handlers_[name] =
- base::Bind(unbound_message_handler, base::Unretained(this));
-}
-
-base::DictionaryValue* SyncManagerImpl::NotificationInfoToValue(
- const NotificationInfoMap& notification_info) {
- base::DictionaryValue* value = new base::DictionaryValue();
-
- for (NotificationInfoMap::const_iterator it = notification_info.begin();
- it != notification_info.end(); ++it) {
- const std::string model_type_str = ModelTypeToString(it->first);
- value->Set(model_type_str, it->second.ToValue());
- }
-
- return value;
-}
-
-std::string SyncManagerImpl::NotificationInfoToString(
- const NotificationInfoMap& notification_info) {
- scoped_ptr<base::DictionaryValue> value(
- NotificationInfoToValue(notification_info));
- std::string str;
- base::JSONWriter::Write(value.get(), &str);
- return str;
-}
-
-JsArgList SyncManagerImpl::GetNotificationState(
- const JsArgList& args) {
- const std::string& notification_state =
- InvalidatorStateToString(invalidator_state_);
- DVLOG(1) << "GetNotificationState: " << notification_state;
- base::ListValue return_args;
- return_args.Append(new base::StringValue(notification_state));
- return JsArgList(&return_args);
-}
-
-JsArgList SyncManagerImpl::GetNotificationInfo(
- const JsArgList& args) {
- DVLOG(1) << "GetNotificationInfo: "
- << NotificationInfoToString(notification_info_map_);
- base::ListValue return_args;
- return_args.Append(NotificationInfoToValue(notification_info_map_));
- return JsArgList(&return_args);
-}
-
-JsArgList SyncManagerImpl::GetRootNodeDetails(
- const JsArgList& args) {
- ReadTransaction trans(FROM_HERE, GetUserShare());
- ReadNode root(&trans);
- root.InitByRootLookup();
- base::ListValue return_args;
- return_args.Append(root.GetDetailsAsValue());
- return JsArgList(&return_args);
-}
-
-JsArgList SyncManagerImpl::GetClientServerTraffic(
- const JsArgList& args) {
- base::ListValue return_args;
- base::ListValue* value = traffic_recorder_.ToValue();
- if (value != NULL)
- return_args.Append(value);
- return JsArgList(&return_args);
-}
-
-namespace {
-
-int64 GetId(const base::ListValue& ids, int i) {
- std::string id_str;
- if (!ids.GetString(i, &id_str)) {
- return kInvalidId;
- }
- int64 id = kInvalidId;
- if (!base::StringToInt64(id_str, &id)) {
- return kInvalidId;
- }
- return id;
-}
-
-JsArgList GetNodeInfoById(
- const JsArgList& args,
- UserShare* user_share,
- base::DictionaryValue* (BaseNode::*info_getter)() const) {
- CHECK(info_getter);
- base::ListValue return_args;
- base::ListValue* node_summaries = new base::ListValue();
- return_args.Append(node_summaries);
- const base::ListValue* id_list = NULL;
- ReadTransaction trans(FROM_HERE, user_share);
- if (args.Get().GetList(0, &id_list)) {
- CHECK(id_list);
- for (size_t i = 0; i < id_list->GetSize(); ++i) {
- int64 id = GetId(*id_list, i);
- if (id == kInvalidId) {
- continue;
- }
- ReadNode node(&trans);
- if (node.InitByIdLookup(id) != BaseNode::INIT_OK) {
- continue;
- }
- node_summaries->Append((node.*info_getter)());
- }
- }
- return JsArgList(&return_args);
-}
-
-} // namespace
-
-JsArgList SyncManagerImpl::GetNodeSummariesById(const JsArgList& args) {
- return GetNodeInfoById(args, GetUserShare(), &BaseNode::GetSummaryAsValue);
-}
-
-JsArgList SyncManagerImpl::GetNodeDetailsById(const JsArgList& args) {
- return GetNodeInfoById(args, GetUserShare(), &BaseNode::GetDetailsAsValue);
-}
-
-JsArgList SyncManagerImpl::GetAllNodes(const JsArgList& args) {
- base::ListValue return_args;
- base::ListValue* result = new base::ListValue();
- return_args.Append(result);
-
- ReadTransaction trans(FROM_HERE, GetUserShare());
- std::vector<const syncable::EntryKernel*> entry_kernels;
- trans.GetDirectory()->GetAllEntryKernels(trans.GetWrappedTrans(),
- &entry_kernels);
-
- for (std::vector<const syncable::EntryKernel*>::const_iterator it =
- entry_kernels.begin(); it != entry_kernels.end(); ++it) {
- result->Append((*it)->ToValue(trans.GetCryptographer()));
- }
-
- return JsArgList(&return_args);
-}
-
-JsArgList SyncManagerImpl::GetChildNodeIds(const JsArgList& args) {
- base::ListValue return_args;
- base::ListValue* child_ids = new base::ListValue();
- return_args.Append(child_ids);
- int64 id = GetId(args.Get(), 0);
- if (id != kInvalidId) {
- ReadTransaction trans(FROM_HERE, GetUserShare());
- syncable::Directory::Metahandles child_handles;
- trans.GetDirectory()->GetChildHandlesByHandle(trans.GetWrappedTrans(),
- id, &child_handles);
- for (syncable::Directory::Metahandles::const_iterator it =
- child_handles.begin(); it != child_handles.end(); ++it) {
- child_ids->Append(new base::StringValue(base::Int64ToString(*it)));
- }
- }
- return JsArgList(&return_args);
-}
-
-void SyncManagerImpl::UpdateNotificationInfo(
- const ObjectIdInvalidationMap& invalidation_map) {
- ObjectIdSet ids = invalidation_map.GetObjectIds();
- for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
- ModelType type = UNSPECIFIED;
- if (!ObjectIdToRealModelType(*it, &type)) {
- continue;
- }
- const SingleObjectInvalidationSet& type_invalidations =
- invalidation_map.ForObject(*it);
- for (SingleObjectInvalidationSet::const_iterator inv_it =
- type_invalidations.begin(); inv_it != type_invalidations.end();
- ++inv_it) {
- NotificationInfo* info = &notification_info_map_[type];
- info->total_count++;
- std::string payload =
- inv_it->is_unknown_version() ? "UNKNOWN" : inv_it->payload();
- info->payload = payload;
- }
- }
-}
-
-void SyncManagerImpl::OnInvalidatorStateChange(InvalidatorState state) {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- const std::string& state_str = InvalidatorStateToString(state);
- invalidator_state_ = state;
- DVLOG(1) << "Invalidator state changed to: " << state_str;
- const bool notifications_enabled =
- (invalidator_state_ == INVALIDATIONS_ENABLED);
- allstatus_.SetNotificationsEnabled(notifications_enabled);
- scheduler_->SetNotificationsEnabled(notifications_enabled);
-
- if (js_event_handler_.IsInitialized()) {
- base::DictionaryValue details;
- details.SetString("state", state_str);
- js_event_handler_.Call(FROM_HERE,
- &JsEventHandler::HandleJsEvent,
- "onNotificationStateChange",
- JsEventDetails(&details));
- }
-}
-
-void SyncManagerImpl::OnIncomingInvalidation(
- const ObjectIdInvalidationMap& invalidation_map) {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- // We should never receive IDs from non-sync objects.
- ObjectIdSet ids = invalidation_map.GetObjectIds();
- for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
- ModelType type;
- if (!ObjectIdToRealModelType(*it, &type)) {
- DLOG(WARNING) << "Notification has invalid id: " << ObjectIdToString(*it);
- }
- }
-
- if (invalidation_map.Empty()) {
- LOG(WARNING) << "Sync received invalidation without any type information.";
- } else {
- allstatus_.IncrementNudgeCounter(NUDGE_SOURCE_NOTIFICATION);
- scheduler_->ScheduleInvalidationNudge(
- TimeDelta::FromMilliseconds(kSyncSchedulerDelayMsec),
- invalidation_map, FROM_HERE);
- allstatus_.IncrementNotificationsReceived();
- UpdateNotificationInfo(invalidation_map);
- debug_info_event_listener_.OnIncomingNotification(invalidation_map);
- }
-
- if (js_event_handler_.IsInitialized()) {
- base::DictionaryValue details;
- base::ListValue* changed_types = new base::ListValue();
- details.Set("changedTypes", changed_types);
-
- ObjectIdSet id_set = invalidation_map.GetObjectIds();
- ModelTypeSet nudged_types = ObjectIdSetToModelTypeSet(id_set);
- DCHECK(!nudged_types.Empty());
- for (ModelTypeSet::Iterator it = nudged_types.First();
- it.Good(); it.Inc()) {
- const std::string model_type_str = ModelTypeToString(it.Get());
- changed_types->Append(new base::StringValue(model_type_str));
- }
- details.SetString("source", "REMOTE_INVALIDATION");
- js_event_handler_.Call(FROM_HERE,
- &JsEventHandler::HandleJsEvent,
- "onIncomingNotification",
- JsEventDetails(&details));
- }
-}
-
-void SyncManagerImpl::RefreshTypes(ModelTypeSet types) {
- DCHECK(thread_checker_.CalledOnValidThread());
- if (types.Empty()) {
- LOG(WARNING) << "Sync received refresh request with no types specified.";
- } else {
- allstatus_.IncrementNudgeCounter(NUDGE_SOURCE_LOCAL_REFRESH);
- scheduler_->ScheduleLocalRefreshRequest(
- TimeDelta::FromMilliseconds(kSyncRefreshDelayMsec),
- types, FROM_HERE);
- }
-
- if (js_event_handler_.IsInitialized()) {
- base::DictionaryValue details;
- base::ListValue* changed_types = new base::ListValue();
- details.Set("changedTypes", changed_types);
- for (ModelTypeSet::Iterator it = types.First(); it.Good(); it.Inc()) {
- const std::string& model_type_str =
- ModelTypeToString(it.Get());
- changed_types->Append(new base::StringValue(model_type_str));
- }
- details.SetString("source", "LOCAL_INVALIDATION");
- js_event_handler_.Call(FROM_HERE,
- &JsEventHandler::HandleJsEvent,
- "onIncomingNotification",
- JsEventDetails(&details));
- }
-}
-
-SyncStatus SyncManagerImpl::GetDetailedStatus() const {
- return allstatus_.status();
-}
-
-void SyncManagerImpl::SaveChanges() {
- directory()->SaveChanges();
-}
-
-UserShare* SyncManagerImpl::GetUserShare() {
- DCHECK(initialized_);
- return &share_;
-}
-
-const std::string SyncManagerImpl::cache_guid() {
- DCHECK(initialized_);
- return directory()->cache_guid();
-}
-
-bool SyncManagerImpl::ReceivedExperiment(Experiments* experiments) {
- ReadTransaction trans(FROM_HERE, GetUserShare());
- ReadNode nigori_node(&trans);
- if (nigori_node.InitByTagLookup(kNigoriTag) != BaseNode::INIT_OK) {
- DVLOG(1) << "Couldn't find Nigori node.";
- return false;
- }
- bool found_experiment = false;
-
- ReadNode autofill_culling_node(&trans);
- if (autofill_culling_node.InitByClientTagLookup(
- syncer::EXPERIMENTS,
- syncer::kAutofillCullingTag) == BaseNode::INIT_OK &&
- autofill_culling_node.GetExperimentsSpecifics().
- autofill_culling().enabled()) {
- experiments->autofill_culling = true;
- found_experiment = true;
- }
-
- ReadNode favicon_sync_node(&trans);
- if (favicon_sync_node.InitByClientTagLookup(
- syncer::EXPERIMENTS,
- syncer::kFaviconSyncTag) == BaseNode::INIT_OK) {
- experiments->favicon_sync_limit =
- favicon_sync_node.GetExperimentsSpecifics().favicon_sync().
- favicon_sync_limit();
- found_experiment = true;
- }
-
- ReadNode pre_commit_update_avoidance_node(&trans);
- if (pre_commit_update_avoidance_node.InitByClientTagLookup(
- syncer::EXPERIMENTS,
- syncer::kPreCommitUpdateAvoidanceTag) == BaseNode::INIT_OK) {
- session_context_->set_server_enabled_pre_commit_update_avoidance(
- pre_commit_update_avoidance_node.GetExperimentsSpecifics().
- pre_commit_update_avoidance().enabled());
- // We don't bother setting found_experiment. The frontend doesn't need to
- // know about this.
- }
-
- return found_experiment;
-}
-
-bool SyncManagerImpl::HasUnsyncedItems() {
- ReadTransaction trans(FROM_HERE, GetUserShare());
- return (trans.GetWrappedTrans()->directory()->unsynced_entity_count() != 0);
-}
-
-SyncEncryptionHandler* SyncManagerImpl::GetEncryptionHandler() {
- return sync_encryption_handler_.get();
-}
-
-// static.
-int SyncManagerImpl::GetDefaultNudgeDelay() {
- return kDefaultNudgeDelayMilliseconds;
-}
-
-// static.
-int SyncManagerImpl::GetPreferencesNudgeDelay() {
- return kPreferencesNudgeDelayMilliseconds;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/sync_manager_impl.h b/chromium/sync/internal_api/sync_manager_impl.h
deleted file mode 100644
index 5d5cfaab552..00000000000
--- a/chromium/sync/internal_api/sync_manager_impl.h
+++ /dev/null
@@ -1,379 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_SYNC_MANAGER_H_
-#define SYNC_INTERNAL_API_SYNC_MANAGER_H_
-
-#include <string>
-#include <vector>
-
-#include "net/base/network_change_notifier.h"
-#include "sync/base/sync_export.h"
-#include "sync/engine/all_status.h"
-#include "sync/engine/net/server_connection_manager.h"
-#include "sync/engine/sync_engine_event.h"
-#include "sync/engine/traffic_recorder.h"
-#include "sync/internal_api/change_reorder_buffer.h"
-#include "sync/internal_api/debug_info_event_listener.h"
-#include "sync/internal_api/js_mutation_event_observer.h"
-#include "sync/internal_api/js_sync_encryption_handler_observer.h"
-#include "sync/internal_api/js_sync_manager_observer.h"
-#include "sync/internal_api/public/sync_manager.h"
-#include "sync/internal_api/public/user_share.h"
-#include "sync/internal_api/sync_encryption_handler_impl.h"
-#include "sync/js/js_backend.h"
-#include "sync/notifier/invalidation_handler.h"
-#include "sync/notifier/invalidator_state.h"
-#include "sync/syncable/directory_change_delegate.h"
-#include "sync/util/cryptographer.h"
-#include "sync/util/time.h"
-
-namespace syncer {
-
-class SyncAPIServerConnectionManager;
-class WriteNode;
-class WriteTransaction;
-
-namespace sessions {
-class SyncSessionContext;
-}
-
-// SyncManager encapsulates syncable::Directory and serves as the parent of all
-// other objects in the sync API. If multiple threads interact with the same
-// local sync repository (i.e. the same sqlite database), they should share a
-// single SyncManager instance. The caller should typically create one
-// SyncManager for the lifetime of a user session.
-//
-// Unless stated otherwise, all methods of SyncManager should be called on the
-// same thread.
-class SYNC_EXPORT_PRIVATE SyncManagerImpl :
- public SyncManager,
- public net::NetworkChangeNotifier::IPAddressObserver,
- public net::NetworkChangeNotifier::ConnectionTypeObserver,
- public JsBackend,
- public SyncEngineEventListener,
- public ServerConnectionEventListener,
- public syncable::DirectoryChangeDelegate,
- public SyncEncryptionHandler::Observer {
- public:
- // Create an uninitialized SyncManager. Callers must Init() before using.
- explicit SyncManagerImpl(const std::string& name);
- virtual ~SyncManagerImpl();
-
- // SyncManager implementation.
- virtual void Init(
- const base::FilePath& database_location,
- const WeakHandle<JsEventHandler>& event_handler,
- const std::string& sync_server_and_path,
- int sync_server_port,
- bool use_ssl,
- scoped_ptr<HttpPostProviderFactory> post_factory,
- const std::vector<ModelSafeWorker*>& workers,
- ExtensionsActivity* extensions_activity,
- SyncManager::ChangeDelegate* change_delegate,
- const SyncCredentials& credentials,
- const std::string& invalidator_client_id,
- const std::string& restored_key_for_bootstrapping,
- const std::string& restored_keystore_key_for_bootstrapping,
- InternalComponentsFactory* internal_components_factory,
- Encryptor* encryptor,
- scoped_ptr<UnrecoverableErrorHandler> unrecoverable_error_handler,
- ReportUnrecoverableErrorFunction
- report_unrecoverable_error_function,
- CancelationSignal* cancelation_signal) OVERRIDE;
- virtual void ThrowUnrecoverableError() OVERRIDE;
- virtual ModelTypeSet InitialSyncEndedTypes() OVERRIDE;
- virtual ModelTypeSet GetTypesWithEmptyProgressMarkerToken(
- ModelTypeSet types) OVERRIDE;
- virtual bool PurgePartiallySyncedTypes() OVERRIDE;
- virtual void UpdateCredentials(const SyncCredentials& credentials) OVERRIDE;
- virtual void StartSyncingNormally(
- const ModelSafeRoutingInfo& routing_info) OVERRIDE;
- virtual void ConfigureSyncer(
- ConfigureReason reason,
- ModelTypeSet to_download,
- ModelTypeSet to_purge,
- ModelTypeSet to_journal,
- ModelTypeSet to_unapply,
- const ModelSafeRoutingInfo& new_routing_info,
- const base::Closure& ready_task,
- const base::Closure& retry_task) OVERRIDE;
- virtual void OnInvalidatorStateChange(InvalidatorState state) OVERRIDE;
- virtual void OnIncomingInvalidation(
- const ObjectIdInvalidationMap& invalidation_map) OVERRIDE;
- virtual void AddObserver(SyncManager::Observer* observer) OVERRIDE;
- virtual void RemoveObserver(SyncManager::Observer* observer) OVERRIDE;
- virtual SyncStatus GetDetailedStatus() const OVERRIDE;
- virtual void SaveChanges() OVERRIDE;
- virtual void ShutdownOnSyncThread() OVERRIDE;
- virtual UserShare* GetUserShare() OVERRIDE;
- virtual const std::string cache_guid() OVERRIDE;
- virtual bool ReceivedExperiment(Experiments* experiments) OVERRIDE;
- virtual bool HasUnsyncedItems() OVERRIDE;
- virtual SyncEncryptionHandler* GetEncryptionHandler() OVERRIDE;
-
- // SyncEncryptionHandler::Observer implementation.
- virtual void OnPassphraseRequired(
- PassphraseRequiredReason reason,
- const sync_pb::EncryptedData& pending_keys) OVERRIDE;
- virtual void OnPassphraseAccepted() OVERRIDE;
- virtual void OnBootstrapTokenUpdated(
- const std::string& bootstrap_token,
- BootstrapTokenType type) OVERRIDE;
- virtual void OnEncryptedTypesChanged(
- ModelTypeSet encrypted_types,
- bool encrypt_everything) OVERRIDE;
- virtual void OnEncryptionComplete() OVERRIDE;
- virtual void OnCryptographerStateChanged(
- Cryptographer* cryptographer) OVERRIDE;
- virtual void OnPassphraseTypeChanged(
- PassphraseType type,
- base::Time explicit_passphrase_time) OVERRIDE;
-
- static int GetDefaultNudgeDelay();
- static int GetPreferencesNudgeDelay();
-
- // SyncEngineEventListener implementation.
- virtual void OnSyncEngineEvent(const SyncEngineEvent& event) OVERRIDE;
-
- // ServerConnectionEventListener implementation.
- virtual void OnServerConnectionEvent(
- const ServerConnectionEvent& event) OVERRIDE;
-
- // JsBackend implementation.
- virtual void SetJsEventHandler(
- const WeakHandle<JsEventHandler>& event_handler) OVERRIDE;
- virtual void ProcessJsMessage(
- const std::string& name, const JsArgList& args,
- const WeakHandle<JsReplyHandler>& reply_handler) OVERRIDE;
-
- // DirectoryChangeDelegate implementation.
- // This listener is called upon completion of a syncable transaction, and
- // builds the list of sync-engine initiated changes that will be forwarded to
- // the SyncManager's Observers.
- virtual void HandleTransactionCompleteChangeEvent(
- ModelTypeSet models_with_changes) OVERRIDE;
- virtual ModelTypeSet HandleTransactionEndingChangeEvent(
- const syncable::ImmutableWriteTransactionInfo& write_transaction_info,
- syncable::BaseTransaction* trans) OVERRIDE;
- virtual void HandleCalculateChangesChangeEventFromSyncApi(
- const syncable::ImmutableWriteTransactionInfo& write_transaction_info,
- syncable::BaseTransaction* trans,
- std::vector<int64>* entries_changed) OVERRIDE;
- virtual void HandleCalculateChangesChangeEventFromSyncer(
- const syncable::ImmutableWriteTransactionInfo& write_transaction_info,
- syncable::BaseTransaction* trans,
- std::vector<int64>* entries_changed) OVERRIDE;
-
- // Handle explicit requests to fetch updates for the given types.
- virtual void RefreshTypes(ModelTypeSet types) OVERRIDE;
-
- // These OnYYYChanged() methods are only called by our NetworkChangeNotifier.
- // Called when IP address of primary interface changes.
- virtual void OnIPAddressChanged() OVERRIDE;
- // Called when the connection type of the system has changed.
- virtual void OnConnectionTypeChanged(
- net::NetworkChangeNotifier::ConnectionType) OVERRIDE;
-
- const SyncScheduler* scheduler() const;
-
- bool GetHasInvalidAuthTokenForTest() const;
-
- protected:
- // Helper functions. Virtual for testing.
- virtual void NotifyInitializationSuccess();
- virtual void NotifyInitializationFailure();
-
- private:
- friend class SyncManagerTest;
- FRIEND_TEST_ALL_PREFIXES(SyncManagerTest, NudgeDelayTest);
- FRIEND_TEST_ALL_PREFIXES(SyncManagerTest, OnNotificationStateChange);
- FRIEND_TEST_ALL_PREFIXES(SyncManagerTest, OnIncomingNotification);
- FRIEND_TEST_ALL_PREFIXES(SyncManagerTest, PurgeDisabledTypes);
- FRIEND_TEST_ALL_PREFIXES(SyncManagerTest, PurgeUnappliedTypes);
-
- struct NotificationInfo {
- NotificationInfo();
- ~NotificationInfo();
-
- int total_count;
- std::string payload;
-
- // Returned pointer owned by the caller.
- base::DictionaryValue* ToValue() const;
- };
-
- base::TimeDelta GetNudgeDelayTimeDelta(const ModelType& model_type);
-
- typedef std::map<ModelType, NotificationInfo> NotificationInfoMap;
- typedef JsArgList (SyncManagerImpl::*UnboundJsMessageHandler)(
- const JsArgList&);
- typedef base::Callback<JsArgList(const JsArgList&)> JsMessageHandler;
- typedef std::map<std::string, JsMessageHandler> JsMessageHandlerMap;
-
- // Determine if the parents or predecessors differ between the old and new
- // versions of an entry. Note that a node's index may change without its
- // UNIQUE_POSITION changing if its sibling nodes were changed. To handle such
- // cases, we rely on the caller to treat a position update on any sibling as
- // updating the positions of all siblings.
- bool VisiblePositionsDiffer(
- const syncable::EntryKernelMutation& mutation) const;
-
- // Determine if any of the fields made visible to clients of the Sync API
- // differ between the versions of an entry stored in |a| and |b|. A return
- // value of false means that it should be OK to ignore this change.
- bool VisiblePropertiesDiffer(
- const syncable::EntryKernelMutation& mutation,
- Cryptographer* cryptographer) const;
-
- // Open the directory named with |username|.
- bool OpenDirectory(const std::string& username);
-
- // Purge those disabled types as specified by |to_purge|. |to_journal| and
- // |to_unapply| specify subsets that require special handling. |to_journal|
- // types are saved into the delete journal, while |to_unapply| have only
- // their local data deleted, while their server data is preserved.
- bool PurgeDisabledTypes(ModelTypeSet to_purge,
- ModelTypeSet to_journal,
- ModelTypeSet to_unapply);
-
- void RequestNudgeForDataTypes(
- const tracked_objects::Location& nudge_location,
- ModelTypeSet type);
-
- // If this is a deletion for a password, sets the legacy
- // ExtraPasswordChangeRecordData field of |buffer|. Otherwise sets
- // |buffer|'s specifics field to contain the unencrypted data.
- void SetExtraChangeRecordData(int64 id,
- ModelType type,
- ChangeReorderBuffer* buffer,
- Cryptographer* cryptographer,
- const syncable::EntryKernel& original,
- bool existed_before,
- bool exists_now);
-
- // Called for every notification. This updates the notification statistics
- // to be displayed in about:sync.
- void UpdateNotificationInfo(const ObjectIdInvalidationMap& invalidation_map);
-
- // Checks for server reachabilty and requests a nudge.
- void OnNetworkConnectivityChangedImpl();
-
- // Helper function used only by the constructor.
- void BindJsMessageHandler(
- const std::string& name, UnboundJsMessageHandler unbound_message_handler);
-
- // Returned pointer is owned by the caller.
- static base::DictionaryValue* NotificationInfoToValue(
- const NotificationInfoMap& notification_info);
-
- static std::string NotificationInfoToString(
- const NotificationInfoMap& notification_info);
-
- // JS message handlers.
- JsArgList GetNotificationState(const JsArgList& args);
- JsArgList GetNotificationInfo(const JsArgList& args);
- JsArgList GetRootNodeDetails(const JsArgList& args);
- JsArgList GetAllNodes(const JsArgList& args);
- JsArgList GetNodeSummariesById(const JsArgList& args);
- JsArgList GetNodeDetailsById(const JsArgList& args);
- JsArgList GetChildNodeIds(const JsArgList& args);
- JsArgList GetClientServerTraffic(const JsArgList& args);
-
- syncable::Directory* directory();
-
- base::FilePath database_path_;
-
- const std::string name_;
-
- base::ThreadChecker thread_checker_;
-
- // Thread-safe handle used by
- // HandleCalculateChangesChangeEventFromSyncApi(), which can be
- // called from any thread. Valid only between between calls to
- // Init() and Shutdown().
- //
- // TODO(akalin): Ideally, we wouldn't need to store this; instead,
- // we'd have another worker class which implements
- // HandleCalculateChangesChangeEventFromSyncApi() and we'd pass it a
- // WeakHandle when we construct it.
- WeakHandle<SyncManagerImpl> weak_handle_this_;
-
- // We give a handle to share_ to clients of the API for use when constructing
- // any transaction type.
- UserShare share_;
-
- // This can be called from any thread, but only between calls to
- // OpenDirectory() and ShutdownOnSyncThread().
- WeakHandle<SyncManager::ChangeObserver> change_observer_;
-
- ObserverList<SyncManager::Observer> observers_;
-
- // The ServerConnectionManager used to abstract communication between the
- // client (the Syncer) and the sync server.
- scoped_ptr<SyncAPIServerConnectionManager> connection_manager_;
-
- // A container of various bits of information used by the SyncScheduler to
- // create SyncSessions. Must outlive the SyncScheduler.
- scoped_ptr<sessions::SyncSessionContext> session_context_;
-
- // The scheduler that runs the Syncer. Needs to be explicitly
- // Start()ed.
- scoped_ptr<SyncScheduler> scheduler_;
-
- // A multi-purpose status watch object that aggregates stats from various
- // sync components.
- AllStatus allstatus_;
-
- // Each element of this map is a store of change records produced by
- // HandleChangeEventFromSyncer during the CALCULATE_CHANGES step. The changes
- // are grouped by model type, and are stored here in tree order to be
- // forwarded to the observer slightly later, at the TRANSACTION_ENDING step
- // by HandleTransactionEndingChangeEvent. The list is cleared after observer
- // finishes processing.
- typedef std::map<int, ImmutableChangeRecordList> ChangeRecordMap;
- ChangeRecordMap change_records_;
-
- SyncManager::ChangeDelegate* change_delegate_;
-
- // Set to true once Init has been called.
- bool initialized_;
-
- bool observing_network_connectivity_changes_;
-
- InvalidatorState invalidator_state_;
-
- // Map used to store the notification info to be displayed in
- // about:sync page.
- NotificationInfoMap notification_info_map_;
-
- // These are for interacting with chrome://sync-internals.
- JsMessageHandlerMap js_message_handlers_;
- WeakHandle<JsEventHandler> js_event_handler_;
- JsSyncManagerObserver js_sync_manager_observer_;
- JsMutationEventObserver js_mutation_event_observer_;
- JsSyncEncryptionHandlerObserver js_sync_encryption_handler_observer_;
-
- // This is for keeping track of client events to send to the server.
- DebugInfoEventListener debug_info_event_listener_;
-
- TrafficRecorder traffic_recorder_;
-
- Encryptor* encryptor_;
- scoped_ptr<UnrecoverableErrorHandler> unrecoverable_error_handler_;
- ReportUnrecoverableErrorFunction report_unrecoverable_error_function_;
-
- // Sync's encryption handler. It tracks the set of encrypted types, manages
- // changing passphrases, and in general handles sync-specific interactions
- // with the cryptographer.
- scoped_ptr<SyncEncryptionHandlerImpl> sync_encryption_handler_;
-
- base::WeakPtrFactory<SyncManagerImpl> weak_ptr_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(SyncManagerImpl);
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_SYNC_MANAGER_H_
diff --git a/chromium/sync/internal_api/sync_manager_impl_unittest.cc b/chromium/sync/internal_api/sync_manager_impl_unittest.cc
deleted file mode 100644
index 91be3b80dfd..00000000000
--- a/chromium/sync/internal_api/sync_manager_impl_unittest.cc
+++ /dev/null
@@ -1,3520 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Unit tests for the SyncApi. Note that a lot of the underlying
-// functionality is provided by the Syncable layer, which has its own
-// unit tests. We'll test SyncApi specific things in this harness.
-
-#include <cstddef>
-#include <map>
-
-#include "base/basictypes.h"
-#include "base/callback.h"
-#include "base/compiler_specific.h"
-#include "base/files/scoped_temp_dir.h"
-#include "base/format_macros.h"
-#include "base/location.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/message_loop/message_loop.h"
-#include "base/message_loop/message_loop_proxy.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/strings/stringprintf.h"
-#include "base/strings/utf_string_conversions.h"
-#include "base/test/values_test_util.h"
-#include "base/values.h"
-#include "sync/engine/sync_scheduler.h"
-#include "sync/internal_api/public/base/cancelation_signal.h"
-#include "sync/internal_api/public/base/model_type_test_util.h"
-#include "sync/internal_api/public/change_record.h"
-#include "sync/internal_api/public/engine/model_safe_worker.h"
-#include "sync/internal_api/public/engine/polling_constants.h"
-#include "sync/internal_api/public/http_post_provider_factory.h"
-#include "sync/internal_api/public/http_post_provider_interface.h"
-#include "sync/internal_api/public/read_node.h"
-#include "sync/internal_api/public/read_transaction.h"
-#include "sync/internal_api/public/test/test_entry_factory.h"
-#include "sync/internal_api/public/test/test_internal_components_factory.h"
-#include "sync/internal_api/public/test/test_user_share.h"
-#include "sync/internal_api/public/write_node.h"
-#include "sync/internal_api/public/write_transaction.h"
-#include "sync/internal_api/sync_encryption_handler_impl.h"
-#include "sync/internal_api/sync_manager_impl.h"
-#include "sync/internal_api/syncapi_internal.h"
-#include "sync/js/js_arg_list.h"
-#include "sync/js/js_backend.h"
-#include "sync/js/js_event_handler.h"
-#include "sync/js/js_reply_handler.h"
-#include "sync/js/js_test_util.h"
-#include "sync/notifier/fake_invalidation_handler.h"
-#include "sync/notifier/invalidation_handler.h"
-#include "sync/notifier/invalidator.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/protocol/encryption.pb.h"
-#include "sync/protocol/extension_specifics.pb.h"
-#include "sync/protocol/password_specifics.pb.h"
-#include "sync/protocol/preference_specifics.pb.h"
-#include "sync/protocol/proto_value_conversions.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/sessions/sync_session.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/nigori_util.h"
-#include "sync/syncable/syncable_id.h"
-#include "sync/syncable/syncable_read_transaction.h"
-#include "sync/syncable/syncable_util.h"
-#include "sync/syncable/syncable_write_transaction.h"
-#include "sync/test/callback_counter.h"
-#include "sync/test/engine/fake_model_worker.h"
-#include "sync/test/engine/fake_sync_scheduler.h"
-#include "sync/test/engine/test_id_factory.h"
-#include "sync/test/fake_encryptor.h"
-#include "sync/util/cryptographer.h"
-#include "sync/util/extensions_activity.h"
-#include "sync/util/test_unrecoverable_error_handler.h"
-#include "sync/util/time.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using base::ExpectDictStringValue;
-using testing::_;
-using testing::DoAll;
-using testing::InSequence;
-using testing::Return;
-using testing::SaveArg;
-using testing::StrictMock;
-
-namespace syncer {
-
-using sessions::SyncSessionSnapshot;
-using syncable::GET_BY_HANDLE;
-using syncable::IS_DEL;
-using syncable::IS_UNSYNCED;
-using syncable::NON_UNIQUE_NAME;
-using syncable::SPECIFICS;
-using syncable::kEncryptedString;
-
-namespace {
-
-void ExpectInt64Value(int64 expected_value,
- const base::DictionaryValue& value,
- const std::string& key) {
- std::string int64_str;
- EXPECT_TRUE(value.GetString(key, &int64_str));
- int64 val = 0;
- EXPECT_TRUE(base::StringToInt64(int64_str, &val));
- EXPECT_EQ(expected_value, val);
-}
-
-void ExpectTimeValue(const base::Time& expected_value,
- const base::DictionaryValue& value,
- const std::string& key) {
- std::string time_str;
- EXPECT_TRUE(value.GetString(key, &time_str));
- EXPECT_EQ(GetTimeDebugString(expected_value), time_str);
-}
-
-// Makes a non-folder child of the root node. Returns the id of the
-// newly-created node.
-int64 MakeNode(UserShare* share,
- ModelType model_type,
- const std::string& client_tag) {
- WriteTransaction trans(FROM_HERE, share);
- ReadNode root_node(&trans);
- root_node.InitByRootLookup();
- WriteNode node(&trans);
- WriteNode::InitUniqueByCreationResult result =
- node.InitUniqueByCreation(model_type, root_node, client_tag);
- EXPECT_EQ(WriteNode::INIT_SUCCESS, result);
- node.SetIsFolder(false);
- return node.GetId();
-}
-
-// Makes a folder child of a non-root node. Returns the id of the
-// newly-created node.
-int64 MakeFolderWithParent(UserShare* share,
- ModelType model_type,
- int64 parent_id,
- BaseNode* predecessor) {
- WriteTransaction trans(FROM_HERE, share);
- ReadNode parent_node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK, parent_node.InitByIdLookup(parent_id));
- WriteNode node(&trans);
- EXPECT_TRUE(node.InitBookmarkByCreation(parent_node, predecessor));
- node.SetIsFolder(true);
- return node.GetId();
-}
-
-int64 MakeBookmarkWithParent(UserShare* share,
- int64 parent_id,
- BaseNode* predecessor) {
- WriteTransaction trans(FROM_HERE, share);
- ReadNode parent_node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK, parent_node.InitByIdLookup(parent_id));
- WriteNode node(&trans);
- EXPECT_TRUE(node.InitBookmarkByCreation(parent_node, predecessor));
- return node.GetId();
-}
-
-// Creates the "synced" root node for a particular datatype. We use the syncable
-// methods here so that the syncer treats these nodes as if they were already
-// received from the server.
-int64 MakeServerNodeForType(UserShare* share,
- ModelType model_type) {
- sync_pb::EntitySpecifics specifics;
- AddDefaultFieldValue(model_type, &specifics);
- syncable::WriteTransaction trans(
- FROM_HERE, syncable::UNITTEST, share->directory.get());
- // Attempt to lookup by nigori tag.
- std::string type_tag = ModelTypeToRootTag(model_type);
- syncable::Id node_id = syncable::Id::CreateFromServerId(type_tag);
- syncable::MutableEntry entry(&trans, syncable::CREATE_NEW_UPDATE_ITEM,
- node_id);
- EXPECT_TRUE(entry.good());
- entry.PutBaseVersion(1);
- entry.PutServerVersion(1);
- entry.PutIsUnappliedUpdate(false);
- entry.PutServerParentId(syncable::GetNullId());
- entry.PutServerIsDir(true);
- entry.PutIsDir(true);
- entry.PutServerSpecifics(specifics);
- entry.PutUniqueServerTag(type_tag);
- entry.PutNonUniqueName(type_tag);
- entry.PutIsDel(false);
- entry.PutSpecifics(specifics);
- return entry.GetMetahandle();
-}
-
-// Simulates creating a "synced" node as a child of the root datatype node.
-int64 MakeServerNode(UserShare* share, ModelType model_type,
- const std::string& client_tag,
- const std::string& hashed_tag,
- const sync_pb::EntitySpecifics& specifics) {
- syncable::WriteTransaction trans(
- FROM_HERE, syncable::UNITTEST, share->directory.get());
- syncable::Entry root_entry(&trans, syncable::GET_BY_SERVER_TAG,
- ModelTypeToRootTag(model_type));
- EXPECT_TRUE(root_entry.good());
- syncable::Id root_id = root_entry.GetId();
- syncable::Id node_id = syncable::Id::CreateFromServerId(client_tag);
- syncable::MutableEntry entry(&trans, syncable::CREATE_NEW_UPDATE_ITEM,
- node_id);
- EXPECT_TRUE(entry.good());
- entry.PutBaseVersion(1);
- entry.PutServerVersion(1);
- entry.PutIsUnappliedUpdate(false);
- entry.PutServerParentId(root_id);
- entry.PutParentId(root_id);
- entry.PutServerIsDir(false);
- entry.PutIsDir(false);
- entry.PutServerSpecifics(specifics);
- entry.PutNonUniqueName(client_tag);
- entry.PutUniqueClientTag(hashed_tag);
- entry.PutIsDel(false);
- entry.PutSpecifics(specifics);
- return entry.GetMetahandle();
-}
-
-} // namespace
-
-class SyncApiTest : public testing::Test {
- public:
- virtual void SetUp() {
- test_user_share_.SetUp();
- }
-
- virtual void TearDown() {
- test_user_share_.TearDown();
- }
-
- protected:
- base::MessageLoop message_loop_;
- TestUserShare test_user_share_;
-};
-
-TEST_F(SyncApiTest, SanityCheckTest) {
- {
- ReadTransaction trans(FROM_HERE, test_user_share_.user_share());
- EXPECT_TRUE(trans.GetWrappedTrans());
- }
- {
- WriteTransaction trans(FROM_HERE, test_user_share_.user_share());
- EXPECT_TRUE(trans.GetWrappedTrans());
- }
- {
- // No entries but root should exist
- ReadTransaction trans(FROM_HERE, test_user_share_.user_share());
- ReadNode node(&trans);
- // Metahandle 1 can be root, sanity check 2
- EXPECT_EQ(BaseNode::INIT_FAILED_ENTRY_NOT_GOOD, node.InitByIdLookup(2));
- }
-}
-
-TEST_F(SyncApiTest, BasicTagWrite) {
- {
- ReadTransaction trans(FROM_HERE, test_user_share_.user_share());
- ReadNode root_node(&trans);
- root_node.InitByRootLookup();
- EXPECT_EQ(root_node.GetFirstChildId(), 0);
- }
-
- ignore_result(MakeNode(test_user_share_.user_share(),
- BOOKMARKS, "testtag"));
-
- {
- ReadTransaction trans(FROM_HERE, test_user_share_.user_share());
- ReadNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(BOOKMARKS, "testtag"));
-
- ReadNode root_node(&trans);
- root_node.InitByRootLookup();
- EXPECT_NE(node.GetId(), 0);
- EXPECT_EQ(node.GetId(), root_node.GetFirstChildId());
- }
-}
-
-TEST_F(SyncApiTest, ModelTypesSiloed) {
- {
- WriteTransaction trans(FROM_HERE, test_user_share_.user_share());
- ReadNode root_node(&trans);
- root_node.InitByRootLookup();
- EXPECT_EQ(root_node.GetFirstChildId(), 0);
- }
-
- ignore_result(MakeNode(test_user_share_.user_share(),
- BOOKMARKS, "collideme"));
- ignore_result(MakeNode(test_user_share_.user_share(),
- PREFERENCES, "collideme"));
- ignore_result(MakeNode(test_user_share_.user_share(),
- AUTOFILL, "collideme"));
-
- {
- ReadTransaction trans(FROM_HERE, test_user_share_.user_share());
-
- ReadNode bookmarknode(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- bookmarknode.InitByClientTagLookup(BOOKMARKS,
- "collideme"));
-
- ReadNode prefnode(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- prefnode.InitByClientTagLookup(PREFERENCES,
- "collideme"));
-
- ReadNode autofillnode(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- autofillnode.InitByClientTagLookup(AUTOFILL,
- "collideme"));
-
- EXPECT_NE(bookmarknode.GetId(), prefnode.GetId());
- EXPECT_NE(autofillnode.GetId(), prefnode.GetId());
- EXPECT_NE(bookmarknode.GetId(), autofillnode.GetId());
- }
-}
-
-TEST_F(SyncApiTest, ReadMissingTagsFails) {
- {
- ReadTransaction trans(FROM_HERE, test_user_share_.user_share());
- ReadNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_FAILED_ENTRY_NOT_GOOD,
- node.InitByClientTagLookup(BOOKMARKS,
- "testtag"));
- }
- {
- WriteTransaction trans(FROM_HERE, test_user_share_.user_share());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_FAILED_ENTRY_NOT_GOOD,
- node.InitByClientTagLookup(BOOKMARKS,
- "testtag"));
- }
-}
-
-// TODO(chron): Hook this all up to the server and write full integration tests
-// for update->undelete behavior.
-TEST_F(SyncApiTest, TestDeleteBehavior) {
- int64 node_id;
- int64 folder_id;
- std::string test_title("test1");
-
- {
- WriteTransaction trans(FROM_HERE, test_user_share_.user_share());
- ReadNode root_node(&trans);
- root_node.InitByRootLookup();
-
- // we'll use this spare folder later
- WriteNode folder_node(&trans);
- EXPECT_TRUE(folder_node.InitBookmarkByCreation(root_node, NULL));
- folder_id = folder_node.GetId();
-
- WriteNode wnode(&trans);
- WriteNode::InitUniqueByCreationResult result =
- wnode.InitUniqueByCreation(BOOKMARKS, root_node, "testtag");
- EXPECT_EQ(WriteNode::INIT_SUCCESS, result);
- wnode.SetIsFolder(false);
- wnode.SetTitle(UTF8ToWide(test_title));
-
- node_id = wnode.GetId();
- }
-
- // Ensure we can delete something with a tag.
- {
- WriteTransaction trans(FROM_HERE, test_user_share_.user_share());
- WriteNode wnode(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- wnode.InitByClientTagLookup(BOOKMARKS,
- "testtag"));
- EXPECT_FALSE(wnode.GetIsFolder());
- EXPECT_EQ(wnode.GetTitle(), test_title);
-
- wnode.Tombstone();
- }
-
- // Lookup of a node which was deleted should return failure,
- // but have found some data about the node.
- {
- ReadTransaction trans(FROM_HERE, test_user_share_.user_share());
- ReadNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_FAILED_ENTRY_IS_DEL,
- node.InitByClientTagLookup(BOOKMARKS,
- "testtag"));
- // Note that for proper function of this API this doesn't need to be
- // filled, we're checking just to make sure the DB worked in this test.
- EXPECT_EQ(node.GetTitle(), test_title);
- }
-
- {
- WriteTransaction trans(FROM_HERE, test_user_share_.user_share());
- ReadNode folder_node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK, folder_node.InitByIdLookup(folder_id));
-
- WriteNode wnode(&trans);
- // This will undelete the tag.
- WriteNode::InitUniqueByCreationResult result =
- wnode.InitUniqueByCreation(BOOKMARKS, folder_node, "testtag");
- EXPECT_EQ(WriteNode::INIT_SUCCESS, result);
- EXPECT_EQ(wnode.GetIsFolder(), false);
- EXPECT_EQ(wnode.GetParentId(), folder_node.GetId());
- EXPECT_EQ(wnode.GetId(), node_id);
- EXPECT_NE(wnode.GetTitle(), test_title); // Title should be cleared
- wnode.SetTitle(UTF8ToWide(test_title));
- }
-
- // Now look up should work.
- {
- ReadTransaction trans(FROM_HERE, test_user_share_.user_share());
- ReadNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(BOOKMARKS,
- "testtag"));
- EXPECT_EQ(node.GetTitle(), test_title);
- EXPECT_EQ(node.GetModelType(), BOOKMARKS);
- }
-}
-
-TEST_F(SyncApiTest, WriteAndReadPassword) {
- KeyParams params = {"localhost", "username", "passphrase"};
- {
- ReadTransaction trans(FROM_HERE, test_user_share_.user_share());
- trans.GetCryptographer()->AddKey(params);
- }
- {
- WriteTransaction trans(FROM_HERE, test_user_share_.user_share());
- ReadNode root_node(&trans);
- root_node.InitByRootLookup();
-
- WriteNode password_node(&trans);
- WriteNode::InitUniqueByCreationResult result =
- password_node.InitUniqueByCreation(PASSWORDS,
- root_node, "foo");
- EXPECT_EQ(WriteNode::INIT_SUCCESS, result);
- sync_pb::PasswordSpecificsData data;
- data.set_password_value("secret");
- password_node.SetPasswordSpecifics(data);
- }
- {
- ReadTransaction trans(FROM_HERE, test_user_share_.user_share());
- ReadNode root_node(&trans);
- root_node.InitByRootLookup();
-
- ReadNode password_node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- password_node.InitByClientTagLookup(PASSWORDS, "foo"));
- const sync_pb::PasswordSpecificsData& data =
- password_node.GetPasswordSpecifics();
- EXPECT_EQ("secret", data.password_value());
- }
-}
-
-TEST_F(SyncApiTest, WriteEncryptedTitle) {
- KeyParams params = {"localhost", "username", "passphrase"};
- {
- ReadTransaction trans(FROM_HERE, test_user_share_.user_share());
- trans.GetCryptographer()->AddKey(params);
- }
- test_user_share_.encryption_handler()->EnableEncryptEverything();
- int bookmark_id;
- {
- WriteTransaction trans(FROM_HERE, test_user_share_.user_share());
- ReadNode root_node(&trans);
- root_node.InitByRootLookup();
-
- WriteNode bookmark_node(&trans);
- ASSERT_TRUE(bookmark_node.InitBookmarkByCreation(root_node, NULL));
- bookmark_id = bookmark_node.GetId();
- bookmark_node.SetTitle(UTF8ToWide("foo"));
-
- WriteNode pref_node(&trans);
- WriteNode::InitUniqueByCreationResult result =
- pref_node.InitUniqueByCreation(PREFERENCES, root_node, "bar");
- ASSERT_EQ(WriteNode::INIT_SUCCESS, result);
- pref_node.SetTitle(UTF8ToWide("bar"));
- }
- {
- ReadTransaction trans(FROM_HERE, test_user_share_.user_share());
- ReadNode root_node(&trans);
- root_node.InitByRootLookup();
-
- ReadNode bookmark_node(&trans);
- ASSERT_EQ(BaseNode::INIT_OK, bookmark_node.InitByIdLookup(bookmark_id));
- EXPECT_EQ("foo", bookmark_node.GetTitle());
- EXPECT_EQ(kEncryptedString,
- bookmark_node.GetEntry()->GetNonUniqueName());
-
- ReadNode pref_node(&trans);
- ASSERT_EQ(BaseNode::INIT_OK,
- pref_node.InitByClientTagLookup(PREFERENCES,
- "bar"));
- EXPECT_EQ(kEncryptedString, pref_node.GetTitle());
- }
-}
-
-TEST_F(SyncApiTest, BaseNodeSetSpecifics) {
- int64 child_id = MakeNode(test_user_share_.user_share(),
- BOOKMARKS, "testtag");
- WriteTransaction trans(FROM_HERE, test_user_share_.user_share());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK, node.InitByIdLookup(child_id));
-
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_bookmark()->set_url("http://www.google.com");
-
- EXPECT_NE(entity_specifics.SerializeAsString(),
- node.GetEntitySpecifics().SerializeAsString());
- node.SetEntitySpecifics(entity_specifics);
- EXPECT_EQ(entity_specifics.SerializeAsString(),
- node.GetEntitySpecifics().SerializeAsString());
-}
-
-TEST_F(SyncApiTest, BaseNodeSetSpecificsPreservesUnknownFields) {
- int64 child_id = MakeNode(test_user_share_.user_share(),
- BOOKMARKS, "testtag");
- WriteTransaction trans(FROM_HERE, test_user_share_.user_share());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK, node.InitByIdLookup(child_id));
- EXPECT_TRUE(node.GetEntitySpecifics().unknown_fields().empty());
-
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_bookmark()->set_url("http://www.google.com");
- entity_specifics.mutable_unknown_fields()->AddFixed32(5, 100);
- node.SetEntitySpecifics(entity_specifics);
- EXPECT_FALSE(node.GetEntitySpecifics().unknown_fields().empty());
-
- entity_specifics.mutable_unknown_fields()->Clear();
- node.SetEntitySpecifics(entity_specifics);
- EXPECT_FALSE(node.GetEntitySpecifics().unknown_fields().empty());
-}
-
-namespace {
-
-void CheckNodeValue(const BaseNode& node, const base::DictionaryValue& value,
- bool is_detailed) {
- size_t expected_field_count = 4;
-
- ExpectInt64Value(node.GetId(), value, "id");
- {
- bool is_folder = false;
- EXPECT_TRUE(value.GetBoolean("isFolder", &is_folder));
- EXPECT_EQ(node.GetIsFolder(), is_folder);
- }
- ExpectDictStringValue(node.GetTitle(), value, "title");
-
- ModelType expected_model_type = node.GetModelType();
- std::string type_str;
- EXPECT_TRUE(value.GetString("type", &type_str));
- if (expected_model_type >= FIRST_REAL_MODEL_TYPE) {
- ModelType model_type = ModelTypeFromString(type_str);
- EXPECT_EQ(expected_model_type, model_type);
- } else if (expected_model_type == TOP_LEVEL_FOLDER) {
- EXPECT_EQ("Top-level folder", type_str);
- } else if (expected_model_type == UNSPECIFIED) {
- EXPECT_EQ("Unspecified", type_str);
- } else {
- ADD_FAILURE();
- }
-
- if (is_detailed) {
- {
- scoped_ptr<base::DictionaryValue> expected_entry(
- node.GetEntry()->ToValue(NULL));
- const base::Value* entry = NULL;
- EXPECT_TRUE(value.Get("entry", &entry));
- EXPECT_TRUE(base::Value::Equals(entry, expected_entry.get()));
- }
-
- ExpectInt64Value(node.GetParentId(), value, "parentId");
- ExpectTimeValue(node.GetModificationTime(), value, "modificationTime");
- ExpectInt64Value(node.GetExternalId(), value, "externalId");
- expected_field_count += 4;
-
- if (value.HasKey("predecessorId")) {
- ExpectInt64Value(node.GetPredecessorId(), value, "predecessorId");
- expected_field_count++;
- }
- if (value.HasKey("successorId")) {
- ExpectInt64Value(node.GetSuccessorId(), value, "successorId");
- expected_field_count++;
- }
- if (value.HasKey("firstChildId")) {
- ExpectInt64Value(node.GetFirstChildId(), value, "firstChildId");
- expected_field_count++;
- }
- }
-
- EXPECT_EQ(expected_field_count, value.size());
-}
-
-} // namespace
-
-TEST_F(SyncApiTest, BaseNodeGetSummaryAsValue) {
- ReadTransaction trans(FROM_HERE, test_user_share_.user_share());
- ReadNode node(&trans);
- node.InitByRootLookup();
- scoped_ptr<base::DictionaryValue> details(node.GetSummaryAsValue());
- if (details) {
- CheckNodeValue(node, *details, false);
- } else {
- ADD_FAILURE();
- }
-}
-
-TEST_F(SyncApiTest, BaseNodeGetDetailsAsValue) {
- ReadTransaction trans(FROM_HERE, test_user_share_.user_share());
- ReadNode node(&trans);
- node.InitByRootLookup();
- scoped_ptr<base::DictionaryValue> details(node.GetDetailsAsValue());
- if (details) {
- CheckNodeValue(node, *details, true);
- } else {
- ADD_FAILURE();
- }
-}
-
-TEST_F(SyncApiTest, EmptyTags) {
- WriteTransaction trans(FROM_HERE, test_user_share_.user_share());
- ReadNode root_node(&trans);
- root_node.InitByRootLookup();
- WriteNode node(&trans);
- std::string empty_tag;
- WriteNode::InitUniqueByCreationResult result =
- node.InitUniqueByCreation(TYPED_URLS, root_node, empty_tag);
- EXPECT_NE(WriteNode::INIT_SUCCESS, result);
- EXPECT_EQ(BaseNode::INIT_FAILED_PRECONDITION,
- node.InitByTagLookup(empty_tag));
-}
-
-// Test counting nodes when the type's root node has no children.
-TEST_F(SyncApiTest, GetTotalNodeCountEmpty) {
- int64 type_root = MakeServerNodeForType(test_user_share_.user_share(),
- BOOKMARKS);
- {
- ReadTransaction trans(FROM_HERE, test_user_share_.user_share());
- ReadNode type_root_node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- type_root_node.InitByIdLookup(type_root));
- EXPECT_EQ(1, type_root_node.GetTotalNodeCount());
- }
-}
-
-// Test counting nodes when there is one child beneath the type's root.
-TEST_F(SyncApiTest, GetTotalNodeCountOneChild) {
- int64 type_root = MakeServerNodeForType(test_user_share_.user_share(),
- BOOKMARKS);
- int64 parent = MakeFolderWithParent(test_user_share_.user_share(),
- BOOKMARKS,
- type_root,
- NULL);
- {
- ReadTransaction trans(FROM_HERE, test_user_share_.user_share());
- ReadNode type_root_node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- type_root_node.InitByIdLookup(type_root));
- EXPECT_EQ(2, type_root_node.GetTotalNodeCount());
- ReadNode parent_node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- parent_node.InitByIdLookup(parent));
- EXPECT_EQ(1, parent_node.GetTotalNodeCount());
- }
-}
-
-// Test counting nodes when there are multiple children beneath the type root,
-// and one of those children has children of its own.
-TEST_F(SyncApiTest, GetTotalNodeCountMultipleChildren) {
- int64 type_root = MakeServerNodeForType(test_user_share_.user_share(),
- BOOKMARKS);
- int64 parent = MakeFolderWithParent(test_user_share_.user_share(),
- BOOKMARKS,
- type_root,
- NULL);
- ignore_result(MakeFolderWithParent(test_user_share_.user_share(),
- BOOKMARKS,
- type_root,
- NULL));
- int64 child1 = MakeFolderWithParent(
- test_user_share_.user_share(),
- BOOKMARKS,
- parent,
- NULL);
- ignore_result(MakeBookmarkWithParent(
- test_user_share_.user_share(),
- parent,
- NULL));
- ignore_result(MakeBookmarkWithParent(
- test_user_share_.user_share(),
- child1,
- NULL));
-
- {
- ReadTransaction trans(FROM_HERE, test_user_share_.user_share());
- ReadNode type_root_node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- type_root_node.InitByIdLookup(type_root));
- EXPECT_EQ(6, type_root_node.GetTotalNodeCount());
- ReadNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByIdLookup(parent));
- EXPECT_EQ(4, node.GetTotalNodeCount());
- }
-}
-
-namespace {
-
-class TestHttpPostProviderInterface : public HttpPostProviderInterface {
- public:
- virtual ~TestHttpPostProviderInterface() {}
-
- virtual void SetExtraRequestHeaders(const char* headers) OVERRIDE {}
- virtual void SetURL(const char* url, int port) OVERRIDE {}
- virtual void SetPostPayload(const char* content_type,
- int content_length,
- const char* content) OVERRIDE {}
- virtual bool MakeSynchronousPost(int* error_code, int* response_code)
- OVERRIDE {
- return false;
- }
- virtual int GetResponseContentLength() const OVERRIDE {
- return 0;
- }
- virtual const char* GetResponseContent() const OVERRIDE {
- return "";
- }
- virtual const std::string GetResponseHeaderValue(
- const std::string& name) const OVERRIDE {
- return std::string();
- }
- virtual void Abort() OVERRIDE {}
-};
-
-class TestHttpPostProviderFactory : public HttpPostProviderFactory {
- public:
- virtual ~TestHttpPostProviderFactory() {}
- virtual void Init(const std::string& user_agent) OVERRIDE { }
- virtual HttpPostProviderInterface* Create() OVERRIDE {
- return new TestHttpPostProviderInterface();
- }
- virtual void Destroy(HttpPostProviderInterface* http) OVERRIDE {
- delete static_cast<TestHttpPostProviderInterface*>(http);
- }
-};
-
-class SyncManagerObserverMock : public SyncManager::Observer {
- public:
- MOCK_METHOD1(OnSyncCycleCompleted,
- void(const SyncSessionSnapshot&)); // NOLINT
- MOCK_METHOD4(OnInitializationComplete,
- void(const WeakHandle<JsBackend>&,
- const WeakHandle<DataTypeDebugInfoListener>&,
- bool,
- syncer::ModelTypeSet)); // NOLINT
- MOCK_METHOD1(OnConnectionStatusChange, void(ConnectionStatus)); // NOLINT
- MOCK_METHOD0(OnStopSyncingPermanently, void()); // NOLINT
- MOCK_METHOD1(OnUpdatedToken, void(const std::string&)); // NOLINT
- MOCK_METHOD1(OnActionableError,
- void(const SyncProtocolError&)); // NOLINT
-};
-
-class SyncEncryptionHandlerObserverMock
- : public SyncEncryptionHandler::Observer {
- public:
- MOCK_METHOD2(OnPassphraseRequired,
- void(PassphraseRequiredReason,
- const sync_pb::EncryptedData&)); // NOLINT
- MOCK_METHOD0(OnPassphraseAccepted, void()); // NOLINT
- MOCK_METHOD2(OnBootstrapTokenUpdated,
- void(const std::string&, BootstrapTokenType type)); // NOLINT
- MOCK_METHOD2(OnEncryptedTypesChanged,
- void(ModelTypeSet, bool)); // NOLINT
- MOCK_METHOD0(OnEncryptionComplete, void()); // NOLINT
- MOCK_METHOD1(OnCryptographerStateChanged, void(Cryptographer*)); // NOLINT
- MOCK_METHOD2(OnPassphraseTypeChanged, void(PassphraseType,
- base::Time)); // NOLINT
-};
-
-} // namespace
-
-class SyncManagerTest : public testing::Test,
- public SyncManager::ChangeDelegate {
- protected:
- enum NigoriStatus {
- DONT_WRITE_NIGORI,
- WRITE_TO_NIGORI
- };
-
- enum EncryptionStatus {
- UNINITIALIZED,
- DEFAULT_ENCRYPTION,
- FULL_ENCRYPTION
- };
-
- SyncManagerTest()
- : sync_manager_("Test sync manager") {
- switches_.encryption_method =
- InternalComponentsFactory::ENCRYPTION_KEYSTORE;
- }
-
- virtual ~SyncManagerTest() {
- }
-
- // Test implementation.
- void SetUp() {
- ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
-
- extensions_activity_ = new ExtensionsActivity();
-
- SyncCredentials credentials;
- credentials.email = "foo@bar.com";
- credentials.sync_token = "sometoken";
-
- sync_manager_.AddObserver(&manager_observer_);
- EXPECT_CALL(manager_observer_, OnInitializationComplete(_, _, _, _)).
- WillOnce(DoAll(SaveArg<0>(&js_backend_),
- SaveArg<2>(&initialization_succeeded_)));
-
- EXPECT_FALSE(js_backend_.IsInitialized());
-
- std::vector<ModelSafeWorker*> workers;
- ModelSafeRoutingInfo routing_info;
- GetModelSafeRoutingInfo(&routing_info);
-
- // This works only because all routing info types are GROUP_PASSIVE.
- // If we had types in other groups, we would need additional workers
- // to support them.
- scoped_refptr<ModelSafeWorker> worker = new FakeModelWorker(GROUP_PASSIVE);
- workers.push_back(worker.get());
-
- // Takes ownership of |fake_invalidator_|.
- sync_manager_.Init(
- temp_dir_.path(),
- WeakHandle<JsEventHandler>(),
- "bogus",
- 0,
- false,
- scoped_ptr<HttpPostProviderFactory>(new TestHttpPostProviderFactory()),
- workers,
- extensions_activity_.get(),
- this,
- credentials,
- "fake_invalidator_client_id",
- std::string(),
- std::string(), // bootstrap tokens
- scoped_ptr<InternalComponentsFactory>(GetFactory()).get(),
- &encryptor_,
- scoped_ptr<UnrecoverableErrorHandler>(
- new TestUnrecoverableErrorHandler).Pass(),
- NULL,
- &cancelation_signal_);
-
- sync_manager_.GetEncryptionHandler()->AddObserver(&encryption_observer_);
-
- EXPECT_TRUE(js_backend_.IsInitialized());
-
- if (initialization_succeeded_) {
- for (ModelSafeRoutingInfo::iterator i = routing_info.begin();
- i != routing_info.end(); ++i) {
- type_roots_[i->first] = MakeServerNodeForType(
- sync_manager_.GetUserShare(), i->first);
- }
- }
- PumpLoop();
- }
-
- void TearDown() {
- sync_manager_.RemoveObserver(&manager_observer_);
- sync_manager_.ShutdownOnSyncThread();
- PumpLoop();
- }
-
- void GetModelSafeRoutingInfo(ModelSafeRoutingInfo* out) {
- (*out)[NIGORI] = GROUP_PASSIVE;
- (*out)[DEVICE_INFO] = GROUP_PASSIVE;
- (*out)[EXPERIMENTS] = GROUP_PASSIVE;
- (*out)[BOOKMARKS] = GROUP_PASSIVE;
- (*out)[THEMES] = GROUP_PASSIVE;
- (*out)[SESSIONS] = GROUP_PASSIVE;
- (*out)[PASSWORDS] = GROUP_PASSIVE;
- (*out)[PREFERENCES] = GROUP_PASSIVE;
- (*out)[PRIORITY_PREFERENCES] = GROUP_PASSIVE;
- }
-
- virtual void OnChangesApplied(
- ModelType model_type,
- int64 model_version,
- const BaseTransaction* trans,
- const ImmutableChangeRecordList& changes) OVERRIDE {}
-
- virtual void OnChangesComplete(ModelType model_type) OVERRIDE {}
-
- // Helper methods.
- bool SetUpEncryption(NigoriStatus nigori_status,
- EncryptionStatus encryption_status) {
- UserShare* share = sync_manager_.GetUserShare();
-
- // We need to create the nigori node as if it were an applied server update.
- int64 nigori_id = GetIdForDataType(NIGORI);
- if (nigori_id == kInvalidId)
- return false;
-
- // Set the nigori cryptographer information.
- if (encryption_status == FULL_ENCRYPTION)
- sync_manager_.GetEncryptionHandler()->EnableEncryptEverything();
-
- WriteTransaction trans(FROM_HERE, share);
- Cryptographer* cryptographer = trans.GetCryptographer();
- if (!cryptographer)
- return false;
- if (encryption_status != UNINITIALIZED) {
- KeyParams params = {"localhost", "dummy", "foobar"};
- cryptographer->AddKey(params);
- } else {
- DCHECK_NE(nigori_status, WRITE_TO_NIGORI);
- }
- if (nigori_status == WRITE_TO_NIGORI) {
- sync_pb::NigoriSpecifics nigori;
- cryptographer->GetKeys(nigori.mutable_encryption_keybag());
- share->directory->GetNigoriHandler()->UpdateNigoriFromEncryptedTypes(
- &nigori,
- trans.GetWrappedTrans());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK, node.InitByIdLookup(nigori_id));
- node.SetNigoriSpecifics(nigori);
- }
- return cryptographer->is_ready();
- }
-
- int64 GetIdForDataType(ModelType type) {
- if (type_roots_.count(type) == 0)
- return 0;
- return type_roots_[type];
- }
-
- void PumpLoop() {
- message_loop_.RunUntilIdle();
- }
-
- void SendJsMessage(const std::string& name, const JsArgList& args,
- const WeakHandle<JsReplyHandler>& reply_handler) {
- js_backend_.Call(FROM_HERE, &JsBackend::ProcessJsMessage,
- name, args, reply_handler);
- PumpLoop();
- }
-
- void SetJsEventHandler(const WeakHandle<JsEventHandler>& event_handler) {
- js_backend_.Call(FROM_HERE, &JsBackend::SetJsEventHandler,
- event_handler);
- PumpLoop();
- }
-
- // Looks up an entry by client tag and resets IS_UNSYNCED value to false.
- // Returns true if entry was previously unsynced, false if IS_UNSYNCED was
- // already false.
- bool ResetUnsyncedEntry(ModelType type,
- const std::string& client_tag) {
- UserShare* share = sync_manager_.GetUserShare();
- syncable::WriteTransaction trans(
- FROM_HERE, syncable::UNITTEST, share->directory.get());
- const std::string hash = syncable::GenerateSyncableHash(type, client_tag);
- syncable::MutableEntry entry(&trans, syncable::GET_BY_CLIENT_TAG,
- hash);
- EXPECT_TRUE(entry.good());
- if (!entry.GetIsUnsynced())
- return false;
- entry.PutIsUnsynced(false);
- return true;
- }
-
- virtual InternalComponentsFactory* GetFactory() {
- return new TestInternalComponentsFactory(GetSwitches(), STORAGE_IN_MEMORY);
- }
-
- // Returns true if we are currently encrypting all sync data. May
- // be called on any thread.
- bool EncryptEverythingEnabledForTest() {
- return sync_manager_.GetEncryptionHandler()->EncryptEverythingEnabled();
- }
-
- // Gets the set of encrypted types from the cryptographer
- // Note: opens a transaction. May be called from any thread.
- ModelTypeSet GetEncryptedTypes() {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- return GetEncryptedTypesWithTrans(&trans);
- }
-
- ModelTypeSet GetEncryptedTypesWithTrans(BaseTransaction* trans) {
- return trans->GetDirectory()->GetNigoriHandler()->
- GetEncryptedTypes(trans->GetWrappedTrans());
- }
-
- void SimulateInvalidatorStateChangeForTest(InvalidatorState state) {
- DCHECK(sync_manager_.thread_checker_.CalledOnValidThread());
- sync_manager_.OnInvalidatorStateChange(state);
- }
-
- void TriggerOnIncomingNotificationForTest(ModelTypeSet model_types) {
- DCHECK(sync_manager_.thread_checker_.CalledOnValidThread());
- ObjectIdSet id_set = ModelTypeSetToObjectIdSet(model_types);
- ObjectIdInvalidationMap invalidation_map =
- ObjectIdInvalidationMap::InvalidateAll(id_set);
- sync_manager_.OnIncomingInvalidation(invalidation_map);
- }
-
- void SetProgressMarkerForType(ModelType type, bool set) {
- if (set) {
- sync_pb::DataTypeProgressMarker marker;
- marker.set_token("token");
- marker.set_data_type_id(GetSpecificsFieldNumberFromModelType(type));
- sync_manager_.directory()->SetDownloadProgress(type, marker);
- } else {
- sync_pb::DataTypeProgressMarker marker;
- sync_manager_.directory()->SetDownloadProgress(type, marker);
- }
- }
-
- InternalComponentsFactory::Switches GetSwitches() const {
- return switches_;
- }
-
- private:
- // Needed by |sync_manager_|.
- base::MessageLoop message_loop_;
- // Needed by |sync_manager_|.
- base::ScopedTempDir temp_dir_;
- // Sync Id's for the roots of the enabled datatypes.
- std::map<ModelType, int64> type_roots_;
- scoped_refptr<ExtensionsActivity> extensions_activity_;
-
- protected:
- FakeEncryptor encryptor_;
- SyncManagerImpl sync_manager_;
- CancelationSignal cancelation_signal_;
- WeakHandle<JsBackend> js_backend_;
- bool initialization_succeeded_;
- StrictMock<SyncManagerObserverMock> manager_observer_;
- StrictMock<SyncEncryptionHandlerObserverMock> encryption_observer_;
- InternalComponentsFactory::Switches switches_;
-};
-
-TEST_F(SyncManagerTest, ProcessJsMessage) {
- const JsArgList kNoArgs;
-
- StrictMock<MockJsReplyHandler> reply_handler;
-
- base::ListValue disabled_args;
- disabled_args.Append(new base::StringValue("TRANSIENT_INVALIDATION_ERROR"));
-
- EXPECT_CALL(reply_handler,
- HandleJsReply("getNotificationState",
- HasArgsAsList(disabled_args)));
-
- // This message should be dropped.
- SendJsMessage("unknownMessage", kNoArgs, reply_handler.AsWeakHandle());
-
- SendJsMessage("getNotificationState", kNoArgs, reply_handler.AsWeakHandle());
-}
-
-TEST_F(SyncManagerTest, ProcessJsMessageGetRootNodeDetails) {
- const JsArgList kNoArgs;
-
- StrictMock<MockJsReplyHandler> reply_handler;
-
- JsArgList return_args;
-
- EXPECT_CALL(reply_handler,
- HandleJsReply("getRootNodeDetails", _))
- .WillOnce(SaveArg<1>(&return_args));
-
- SendJsMessage("getRootNodeDetails", kNoArgs, reply_handler.AsWeakHandle());
-
- EXPECT_EQ(1u, return_args.Get().GetSize());
- const base::DictionaryValue* node_info = NULL;
- EXPECT_TRUE(return_args.Get().GetDictionary(0, &node_info));
- if (node_info) {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- ReadNode node(&trans);
- node.InitByRootLookup();
- CheckNodeValue(node, *node_info, true);
- } else {
- ADD_FAILURE();
- }
-}
-
-void CheckGetNodesByIdReturnArgs(SyncManager* sync_manager,
- const JsArgList& return_args,
- int64 id,
- bool is_detailed) {
- EXPECT_EQ(1u, return_args.Get().GetSize());
- const base::ListValue* nodes = NULL;
- ASSERT_TRUE(return_args.Get().GetList(0, &nodes));
- ASSERT_TRUE(nodes);
- EXPECT_EQ(1u, nodes->GetSize());
- const base::DictionaryValue* node_info = NULL;
- EXPECT_TRUE(nodes->GetDictionary(0, &node_info));
- ASSERT_TRUE(node_info);
- ReadTransaction trans(FROM_HERE, sync_manager->GetUserShare());
- ReadNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK, node.InitByIdLookup(id));
- CheckNodeValue(node, *node_info, is_detailed);
-}
-
-class SyncManagerGetNodesByIdTest : public SyncManagerTest {
- protected:
- virtual ~SyncManagerGetNodesByIdTest() {}
-
- void RunGetNodesByIdTest(const char* message_name, bool is_detailed) {
- int64 root_id = kInvalidId;
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- ReadNode root_node(&trans);
- root_node.InitByRootLookup();
- root_id = root_node.GetId();
- }
-
- int64 child_id =
- MakeNode(sync_manager_.GetUserShare(), BOOKMARKS, "testtag");
-
- StrictMock<MockJsReplyHandler> reply_handler;
-
- JsArgList return_args;
-
- const int64 ids[] = { root_id, child_id };
-
- EXPECT_CALL(reply_handler,
- HandleJsReply(message_name, _))
- .Times(arraysize(ids)).WillRepeatedly(SaveArg<1>(&return_args));
-
- for (size_t i = 0; i < arraysize(ids); ++i) {
- base::ListValue args;
- base::ListValue* id_values = new base::ListValue();
- args.Append(id_values);
- id_values->Append(new base::StringValue(base::Int64ToString(ids[i])));
- SendJsMessage(message_name,
- JsArgList(&args), reply_handler.AsWeakHandle());
-
- CheckGetNodesByIdReturnArgs(&sync_manager_, return_args,
- ids[i], is_detailed);
- }
- }
-
- void RunGetNodesByIdFailureTest(const char* message_name) {
- StrictMock<MockJsReplyHandler> reply_handler;
-
- base::ListValue empty_list_args;
- empty_list_args.Append(new base::ListValue());
-
- EXPECT_CALL(reply_handler,
- HandleJsReply(message_name,
- HasArgsAsList(empty_list_args)))
- .Times(6);
-
- {
- base::ListValue args;
- SendJsMessage(message_name,
- JsArgList(&args), reply_handler.AsWeakHandle());
- }
-
- {
- base::ListValue args;
- args.Append(new base::ListValue());
- SendJsMessage(message_name,
- JsArgList(&args), reply_handler.AsWeakHandle());
- }
-
- {
- base::ListValue args;
- base::ListValue* ids = new base::ListValue();
- args.Append(ids);
- ids->Append(new base::StringValue(std::string()));
- SendJsMessage(
- message_name, JsArgList(&args), reply_handler.AsWeakHandle());
- }
-
- {
- base::ListValue args;
- base::ListValue* ids = new base::ListValue();
- args.Append(ids);
- ids->Append(new base::StringValue("nonsense"));
- SendJsMessage(message_name,
- JsArgList(&args), reply_handler.AsWeakHandle());
- }
-
- {
- base::ListValue args;
- base::ListValue* ids = new base::ListValue();
- args.Append(ids);
- ids->Append(new base::StringValue("0"));
- SendJsMessage(message_name,
- JsArgList(&args), reply_handler.AsWeakHandle());
- }
-
- {
- base::ListValue args;
- base::ListValue* ids = new base::ListValue();
- args.Append(ids);
- ids->Append(new base::StringValue("9999"));
- SendJsMessage(message_name,
- JsArgList(&args), reply_handler.AsWeakHandle());
- }
- }
-};
-
-TEST_F(SyncManagerGetNodesByIdTest, GetNodeSummariesById) {
- RunGetNodesByIdTest("getNodeSummariesById", false);
-}
-
-TEST_F(SyncManagerGetNodesByIdTest, GetNodeDetailsById) {
- RunGetNodesByIdTest("getNodeDetailsById", true);
-}
-
-TEST_F(SyncManagerGetNodesByIdTest, GetNodeSummariesByIdFailure) {
- RunGetNodesByIdFailureTest("getNodeSummariesById");
-}
-
-TEST_F(SyncManagerGetNodesByIdTest, GetNodeDetailsByIdFailure) {
- RunGetNodesByIdFailureTest("getNodeDetailsById");
-}
-
-TEST_F(SyncManagerTest, GetChildNodeIds) {
- StrictMock<MockJsReplyHandler> reply_handler;
-
- JsArgList return_args;
-
- EXPECT_CALL(reply_handler,
- HandleJsReply("getChildNodeIds", _))
- .Times(1).WillRepeatedly(SaveArg<1>(&return_args));
-
- {
- base::ListValue args;
- args.Append(new base::StringValue("1"));
- SendJsMessage("getChildNodeIds",
- JsArgList(&args), reply_handler.AsWeakHandle());
- }
-
- EXPECT_EQ(1u, return_args.Get().GetSize());
- const base::ListValue* nodes = NULL;
- ASSERT_TRUE(return_args.Get().GetList(0, &nodes));
- ASSERT_TRUE(nodes);
- EXPECT_EQ(9u, nodes->GetSize());
-}
-
-TEST_F(SyncManagerTest, GetChildNodeIdsFailure) {
- StrictMock<MockJsReplyHandler> reply_handler;
-
- base::ListValue empty_list_args;
- empty_list_args.Append(new base::ListValue());
-
- EXPECT_CALL(reply_handler,
- HandleJsReply("getChildNodeIds",
- HasArgsAsList(empty_list_args)))
- .Times(5);
-
- {
- base::ListValue args;
- SendJsMessage("getChildNodeIds",
- JsArgList(&args), reply_handler.AsWeakHandle());
- }
-
- {
- base::ListValue args;
- args.Append(new base::StringValue(std::string()));
- SendJsMessage(
- "getChildNodeIds", JsArgList(&args), reply_handler.AsWeakHandle());
- }
-
- {
- base::ListValue args;
- args.Append(new base::StringValue("nonsense"));
- SendJsMessage("getChildNodeIds",
- JsArgList(&args), reply_handler.AsWeakHandle());
- }
-
- {
- base::ListValue args;
- args.Append(new base::StringValue("0"));
- SendJsMessage("getChildNodeIds",
- JsArgList(&args), reply_handler.AsWeakHandle());
- }
-
- {
- base::ListValue args;
- args.Append(new base::StringValue("9999"));
- SendJsMessage("getChildNodeIds",
- JsArgList(&args), reply_handler.AsWeakHandle());
- }
-}
-
-TEST_F(SyncManagerTest, GetAllNodesTest) {
- StrictMock<MockJsReplyHandler> reply_handler;
- JsArgList return_args;
-
- EXPECT_CALL(reply_handler,
- HandleJsReply("getAllNodes", _))
- .Times(1).WillRepeatedly(SaveArg<1>(&return_args));
-
- {
- base::ListValue args;
- SendJsMessage("getAllNodes",
- JsArgList(&args), reply_handler.AsWeakHandle());
- }
-
- // There's not much value in verifying every attribute on every node here.
- // Most of the value of this test has already been achieved: we've verified we
- // can call the above function without crashing or leaking memory.
- //
- // Let's just check the list size and a few of its elements. Anything more
- // would make this test brittle without greatly increasing our chances of
- // catching real bugs.
-
- const base::ListValue* node_list;
- const base::DictionaryValue* first_result;
-
- // The resulting argument list should have one argument, a list of nodes.
- ASSERT_EQ(1U, return_args.Get().GetSize());
- ASSERT_TRUE(return_args.Get().GetList(0, &node_list));
-
- // The database creation logic depends on the routing info.
- // Refer to setup methods for more information.
- ModelSafeRoutingInfo routes;
- GetModelSafeRoutingInfo(&routes);
- size_t directory_size = routes.size() + 1;
-
- ASSERT_EQ(directory_size, node_list->GetSize());
- ASSERT_TRUE(node_list->GetDictionary(0, &first_result));
- EXPECT_TRUE(first_result->HasKey("ID"));
- EXPECT_TRUE(first_result->HasKey("NON_UNIQUE_NAME"));
-}
-
-// Simulate various invalidator state changes. Those should propagate
-// JS events.
-TEST_F(SyncManagerTest, OnInvalidatorStateChangeJsEvents) {
- StrictMock<MockJsEventHandler> event_handler;
-
- base::DictionaryValue enabled_details;
- enabled_details.SetString("state", "INVALIDATIONS_ENABLED");
- base::DictionaryValue credentials_rejected_details;
- credentials_rejected_details.SetString(
- "state", "INVALIDATION_CREDENTIALS_REJECTED");
- base::DictionaryValue transient_error_details;
- transient_error_details.SetString("state", "TRANSIENT_INVALIDATION_ERROR");
- base::DictionaryValue auth_error_details;
- auth_error_details.SetString("status", "CONNECTION_AUTH_ERROR");
-
- EXPECT_CALL(event_handler,
- HandleJsEvent("onNotificationStateChange",
- HasDetailsAsDictionary(enabled_details)));
-
- EXPECT_CALL(
- event_handler,
- HandleJsEvent("onNotificationStateChange",
- HasDetailsAsDictionary(credentials_rejected_details)))
- .Times(2);
-
- EXPECT_CALL(event_handler,
- HandleJsEvent("onNotificationStateChange",
- HasDetailsAsDictionary(transient_error_details)));
-
- // Test needs to simulate INVALIDATION_CREDENTIALS_REJECTED with event handler
- // attached because this is the only time when CONNECTION_AUTH_ERROR
- // notification will be generated, therefore the only chance to verify that
- // "onConnectionStatusChange" event is delivered
- SetJsEventHandler(event_handler.AsWeakHandle());
- SimulateInvalidatorStateChangeForTest(INVALIDATION_CREDENTIALS_REJECTED);
- SetJsEventHandler(WeakHandle<JsEventHandler>());
-
- SimulateInvalidatorStateChangeForTest(INVALIDATIONS_ENABLED);
- SimulateInvalidatorStateChangeForTest(INVALIDATION_CREDENTIALS_REJECTED);
- SimulateInvalidatorStateChangeForTest(TRANSIENT_INVALIDATION_ERROR);
-
- SetJsEventHandler(event_handler.AsWeakHandle());
- SimulateInvalidatorStateChangeForTest(INVALIDATIONS_ENABLED);
- SimulateInvalidatorStateChangeForTest(INVALIDATION_CREDENTIALS_REJECTED);
- SimulateInvalidatorStateChangeForTest(TRANSIENT_INVALIDATION_ERROR);
- SetJsEventHandler(WeakHandle<JsEventHandler>());
-
- SimulateInvalidatorStateChangeForTest(INVALIDATIONS_ENABLED);
- SimulateInvalidatorStateChangeForTest(INVALIDATION_CREDENTIALS_REJECTED);
- SimulateInvalidatorStateChangeForTest(TRANSIENT_INVALIDATION_ERROR);
-
- // Should trigger the replies.
- PumpLoop();
-}
-
-TEST_F(SyncManagerTest, OnIncomingNotification) {
- StrictMock<MockJsEventHandler> event_handler;
-
- const ModelTypeSet empty_model_types;
- const ModelTypeSet model_types(
- BOOKMARKS, THEMES);
-
- // Build expected_args to have a single argument with the string
- // equivalents of model_types.
- base::DictionaryValue expected_details;
- {
- base::ListValue* model_type_list = new base::ListValue();
- expected_details.SetString("source", "REMOTE_INVALIDATION");
- expected_details.Set("changedTypes", model_type_list);
- for (ModelTypeSet::Iterator it = model_types.First();
- it.Good(); it.Inc()) {
- model_type_list->Append(
- new base::StringValue(ModelTypeToString(it.Get())));
- }
- }
-
- EXPECT_CALL(event_handler,
- HandleJsEvent("onIncomingNotification",
- HasDetailsAsDictionary(expected_details)));
-
- TriggerOnIncomingNotificationForTest(empty_model_types);
- TriggerOnIncomingNotificationForTest(model_types);
-
- SetJsEventHandler(event_handler.AsWeakHandle());
- TriggerOnIncomingNotificationForTest(model_types);
- SetJsEventHandler(WeakHandle<JsEventHandler>());
-
- TriggerOnIncomingNotificationForTest(empty_model_types);
- TriggerOnIncomingNotificationForTest(model_types);
-
- // Should trigger the replies.
- PumpLoop();
-}
-
-TEST_F(SyncManagerTest, RefreshEncryptionReady) {
- EXPECT_TRUE(SetUpEncryption(WRITE_TO_NIGORI, DEFAULT_ENCRYPTION));
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete());
- EXPECT_CALL(encryption_observer_, OnCryptographerStateChanged(_));
- EXPECT_CALL(encryption_observer_, OnEncryptedTypesChanged(_, false));
-
- sync_manager_.GetEncryptionHandler()->Init();
- PumpLoop();
-
- const ModelTypeSet encrypted_types = GetEncryptedTypes();
- EXPECT_TRUE(encrypted_types.Has(PASSWORDS));
- EXPECT_FALSE(EncryptEverythingEnabledForTest());
-
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- ReadNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByIdLookup(GetIdForDataType(NIGORI)));
- sync_pb::NigoriSpecifics nigori = node.GetNigoriSpecifics();
- EXPECT_TRUE(nigori.has_encryption_keybag());
- Cryptographer* cryptographer = trans.GetCryptographer();
- EXPECT_TRUE(cryptographer->is_ready());
- EXPECT_TRUE(cryptographer->CanDecrypt(nigori.encryption_keybag()));
- }
-}
-
-// Attempt to refresh encryption when nigori not downloaded.
-TEST_F(SyncManagerTest, RefreshEncryptionNotReady) {
- // Don't set up encryption (no nigori node created).
-
- // Should fail. Triggers an OnPassphraseRequired because the cryptographer
- // is not ready.
- EXPECT_CALL(encryption_observer_, OnPassphraseRequired(_, _)).Times(1);
- EXPECT_CALL(encryption_observer_, OnCryptographerStateChanged(_));
- EXPECT_CALL(encryption_observer_, OnEncryptedTypesChanged(_, false));
- sync_manager_.GetEncryptionHandler()->Init();
- PumpLoop();
-
- const ModelTypeSet encrypted_types = GetEncryptedTypes();
- EXPECT_TRUE(encrypted_types.Has(PASSWORDS)); // Hardcoded.
- EXPECT_FALSE(EncryptEverythingEnabledForTest());
-}
-
-// Attempt to refresh encryption when nigori is empty.
-TEST_F(SyncManagerTest, RefreshEncryptionEmptyNigori) {
- EXPECT_TRUE(SetUpEncryption(DONT_WRITE_NIGORI, DEFAULT_ENCRYPTION));
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete()).Times(1);
- EXPECT_CALL(encryption_observer_, OnCryptographerStateChanged(_));
- EXPECT_CALL(encryption_observer_, OnEncryptedTypesChanged(_, false));
-
- // Should write to nigori.
- sync_manager_.GetEncryptionHandler()->Init();
- PumpLoop();
-
- const ModelTypeSet encrypted_types = GetEncryptedTypes();
- EXPECT_TRUE(encrypted_types.Has(PASSWORDS)); // Hardcoded.
- EXPECT_FALSE(EncryptEverythingEnabledForTest());
-
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- ReadNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByIdLookup(GetIdForDataType(NIGORI)));
- sync_pb::NigoriSpecifics nigori = node.GetNigoriSpecifics();
- EXPECT_TRUE(nigori.has_encryption_keybag());
- Cryptographer* cryptographer = trans.GetCryptographer();
- EXPECT_TRUE(cryptographer->is_ready());
- EXPECT_TRUE(cryptographer->CanDecrypt(nigori.encryption_keybag()));
- }
-}
-
-TEST_F(SyncManagerTest, EncryptDataTypesWithNoData) {
- EXPECT_TRUE(SetUpEncryption(WRITE_TO_NIGORI, DEFAULT_ENCRYPTION));
- EXPECT_CALL(encryption_observer_,
- OnEncryptedTypesChanged(
- HasModelTypes(EncryptableUserTypes()), true));
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete());
- sync_manager_.GetEncryptionHandler()->EnableEncryptEverything();
- EXPECT_TRUE(EncryptEverythingEnabledForTest());
-}
-
-TEST_F(SyncManagerTest, EncryptDataTypesWithData) {
- size_t batch_size = 5;
- EXPECT_TRUE(SetUpEncryption(WRITE_TO_NIGORI, DEFAULT_ENCRYPTION));
-
- // Create some unencrypted unsynced data.
- int64 folder = MakeFolderWithParent(sync_manager_.GetUserShare(),
- BOOKMARKS,
- GetIdForDataType(BOOKMARKS),
- NULL);
- // First batch_size nodes are children of folder.
- size_t i;
- for (i = 0; i < batch_size; ++i) {
- MakeBookmarkWithParent(sync_manager_.GetUserShare(), folder, NULL);
- }
- // Next batch_size nodes are a different type and on their own.
- for (; i < 2*batch_size; ++i) {
- MakeNode(sync_manager_.GetUserShare(), SESSIONS,
- base::StringPrintf("%" PRIuS "", i));
- }
- // Last batch_size nodes are a third type that will not need encryption.
- for (; i < 3*batch_size; ++i) {
- MakeNode(sync_manager_.GetUserShare(), THEMES,
- base::StringPrintf("%" PRIuS "", i));
- }
-
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- EXPECT_TRUE(GetEncryptedTypesWithTrans(&trans).Equals(
- SyncEncryptionHandler::SensitiveTypes()));
- EXPECT_TRUE(syncable::VerifyDataTypeEncryptionForTest(
- trans.GetWrappedTrans(),
- BOOKMARKS,
- false /* not encrypted */));
- EXPECT_TRUE(syncable::VerifyDataTypeEncryptionForTest(
- trans.GetWrappedTrans(),
- SESSIONS,
- false /* not encrypted */));
- EXPECT_TRUE(syncable::VerifyDataTypeEncryptionForTest(
- trans.GetWrappedTrans(),
- THEMES,
- false /* not encrypted */));
- }
-
- EXPECT_CALL(encryption_observer_,
- OnEncryptedTypesChanged(
- HasModelTypes(EncryptableUserTypes()), true));
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete());
- sync_manager_.GetEncryptionHandler()->EnableEncryptEverything();
- EXPECT_TRUE(EncryptEverythingEnabledForTest());
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- EXPECT_TRUE(GetEncryptedTypesWithTrans(&trans).Equals(
- EncryptableUserTypes()));
- EXPECT_TRUE(syncable::VerifyDataTypeEncryptionForTest(
- trans.GetWrappedTrans(),
- BOOKMARKS,
- true /* is encrypted */));
- EXPECT_TRUE(syncable::VerifyDataTypeEncryptionForTest(
- trans.GetWrappedTrans(),
- SESSIONS,
- true /* is encrypted */));
- EXPECT_TRUE(syncable::VerifyDataTypeEncryptionForTest(
- trans.GetWrappedTrans(),
- THEMES,
- true /* is encrypted */));
- }
-
- // Trigger's a ReEncryptEverything with new passphrase.
- testing::Mock::VerifyAndClearExpectations(&encryption_observer_);
- EXPECT_CALL(encryption_observer_,
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(encryption_observer_, OnPassphraseAccepted());
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete());
- EXPECT_CALL(encryption_observer_, OnCryptographerStateChanged(_));
- EXPECT_CALL(encryption_observer_,
- OnPassphraseTypeChanged(CUSTOM_PASSPHRASE, _));
- sync_manager_.GetEncryptionHandler()->SetEncryptionPassphrase(
- "new_passphrase", true);
- EXPECT_TRUE(EncryptEverythingEnabledForTest());
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- EXPECT_TRUE(GetEncryptedTypesWithTrans(&trans).Equals(
- EncryptableUserTypes()));
- EXPECT_TRUE(syncable::VerifyDataTypeEncryptionForTest(
- trans.GetWrappedTrans(),
- BOOKMARKS,
- true /* is encrypted */));
- EXPECT_TRUE(syncable::VerifyDataTypeEncryptionForTest(
- trans.GetWrappedTrans(),
- SESSIONS,
- true /* is encrypted */));
- EXPECT_TRUE(syncable::VerifyDataTypeEncryptionForTest(
- trans.GetWrappedTrans(),
- THEMES,
- true /* is encrypted */));
- }
- // Calling EncryptDataTypes with an empty encrypted types should not trigger
- // a reencryption and should just notify immediately.
- testing::Mock::VerifyAndClearExpectations(&encryption_observer_);
- EXPECT_CALL(encryption_observer_,
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN)).Times(0);
- EXPECT_CALL(encryption_observer_, OnPassphraseAccepted()).Times(0);
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete()).Times(0);
- sync_manager_.GetEncryptionHandler()->EnableEncryptEverything();
-}
-
-// Test that when there are no pending keys and the cryptographer is not
-// initialized, we add a key based on the current GAIA password.
-// (case 1 in SyncManager::SyncInternal::SetEncryptionPassphrase)
-TEST_F(SyncManagerTest, SetInitialGaiaPass) {
- EXPECT_FALSE(SetUpEncryption(DONT_WRITE_NIGORI, UNINITIALIZED));
- EXPECT_CALL(encryption_observer_,
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(encryption_observer_, OnPassphraseAccepted());
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete());
- EXPECT_CALL(encryption_observer_, OnCryptographerStateChanged(_));
- sync_manager_.GetEncryptionHandler()->SetEncryptionPassphrase(
- "new_passphrase",
- false);
- EXPECT_EQ(IMPLICIT_PASSPHRASE,
- sync_manager_.GetEncryptionHandler()->GetPassphraseType());
- EXPECT_FALSE(EncryptEverythingEnabledForTest());
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- ReadNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK, node.InitByTagLookup(kNigoriTag));
- sync_pb::NigoriSpecifics nigori = node.GetNigoriSpecifics();
- Cryptographer* cryptographer = trans.GetCryptographer();
- EXPECT_TRUE(cryptographer->is_ready());
- EXPECT_TRUE(cryptographer->CanDecrypt(nigori.encryption_keybag()));
- }
-}
-
-// Test that when there are no pending keys and we have on the old GAIA
-// password, we update and re-encrypt everything with the new GAIA password.
-// (case 1 in SyncManager::SyncInternal::SetEncryptionPassphrase)
-TEST_F(SyncManagerTest, UpdateGaiaPass) {
- EXPECT_TRUE(SetUpEncryption(WRITE_TO_NIGORI, DEFAULT_ENCRYPTION));
- Cryptographer verifier(&encryptor_);
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- Cryptographer* cryptographer = trans.GetCryptographer();
- std::string bootstrap_token;
- cryptographer->GetBootstrapToken(&bootstrap_token);
- verifier.Bootstrap(bootstrap_token);
- }
- EXPECT_CALL(encryption_observer_,
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(encryption_observer_, OnPassphraseAccepted());
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete());
- EXPECT_CALL(encryption_observer_, OnCryptographerStateChanged(_));
- sync_manager_.GetEncryptionHandler()->SetEncryptionPassphrase(
- "new_passphrase",
- false);
- EXPECT_EQ(IMPLICIT_PASSPHRASE,
- sync_manager_.GetEncryptionHandler()->GetPassphraseType());
- EXPECT_FALSE(EncryptEverythingEnabledForTest());
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- Cryptographer* cryptographer = trans.GetCryptographer();
- EXPECT_TRUE(cryptographer->is_ready());
- // Verify the default key has changed.
- sync_pb::EncryptedData encrypted;
- cryptographer->GetKeys(&encrypted);
- EXPECT_FALSE(verifier.CanDecrypt(encrypted));
- }
-}
-
-// Sets a new explicit passphrase. This should update the bootstrap token
-// and re-encrypt everything.
-// (case 2 in SyncManager::SyncInternal::SetEncryptionPassphrase)
-TEST_F(SyncManagerTest, SetPassphraseWithPassword) {
- Cryptographer verifier(&encryptor_);
- EXPECT_TRUE(SetUpEncryption(WRITE_TO_NIGORI, DEFAULT_ENCRYPTION));
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- // Store the default (soon to be old) key.
- Cryptographer* cryptographer = trans.GetCryptographer();
- std::string bootstrap_token;
- cryptographer->GetBootstrapToken(&bootstrap_token);
- verifier.Bootstrap(bootstrap_token);
-
- ReadNode root_node(&trans);
- root_node.InitByRootLookup();
-
- WriteNode password_node(&trans);
- WriteNode::InitUniqueByCreationResult result =
- password_node.InitUniqueByCreation(PASSWORDS,
- root_node, "foo");
- EXPECT_EQ(WriteNode::INIT_SUCCESS, result);
- sync_pb::PasswordSpecificsData data;
- data.set_password_value("secret");
- password_node.SetPasswordSpecifics(data);
- }
- EXPECT_CALL(encryption_observer_,
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(encryption_observer_, OnPassphraseAccepted());
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete());
- EXPECT_CALL(encryption_observer_, OnCryptographerStateChanged(_));
- EXPECT_CALL(encryption_observer_,
- OnPassphraseTypeChanged(CUSTOM_PASSPHRASE, _));
- sync_manager_.GetEncryptionHandler()->SetEncryptionPassphrase(
- "new_passphrase",
- true);
- EXPECT_EQ(CUSTOM_PASSPHRASE,
- sync_manager_.GetEncryptionHandler()->GetPassphraseType());
- EXPECT_FALSE(EncryptEverythingEnabledForTest());
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- Cryptographer* cryptographer = trans.GetCryptographer();
- EXPECT_TRUE(cryptographer->is_ready());
- // Verify the default key has changed.
- sync_pb::EncryptedData encrypted;
- cryptographer->GetKeys(&encrypted);
- EXPECT_FALSE(verifier.CanDecrypt(encrypted));
-
- ReadNode password_node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- password_node.InitByClientTagLookup(PASSWORDS,
- "foo"));
- const sync_pb::PasswordSpecificsData& data =
- password_node.GetPasswordSpecifics();
- EXPECT_EQ("secret", data.password_value());
- }
-}
-
-// Manually set the pending keys in the cryptographer/nigori to reflect the data
-// being encrypted with a new (unprovided) GAIA password, then supply the
-// password.
-// (case 7 in SyncManager::SyncInternal::SetDecryptionPassphrase)
-TEST_F(SyncManagerTest, SupplyPendingGAIAPass) {
- EXPECT_TRUE(SetUpEncryption(WRITE_TO_NIGORI, DEFAULT_ENCRYPTION));
- Cryptographer other_cryptographer(&encryptor_);
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- Cryptographer* cryptographer = trans.GetCryptographer();
- std::string bootstrap_token;
- cryptographer->GetBootstrapToken(&bootstrap_token);
- other_cryptographer.Bootstrap(bootstrap_token);
-
- // Now update the nigori to reflect the new keys, and update the
- // cryptographer to have pending keys.
- KeyParams params = {"localhost", "dummy", "passphrase2"};
- other_cryptographer.AddKey(params);
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK, node.InitByTagLookup(kNigoriTag));
- sync_pb::NigoriSpecifics nigori;
- other_cryptographer.GetKeys(nigori.mutable_encryption_keybag());
- cryptographer->SetPendingKeys(nigori.encryption_keybag());
- EXPECT_TRUE(cryptographer->has_pending_keys());
- node.SetNigoriSpecifics(nigori);
- }
- EXPECT_CALL(encryption_observer_,
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(encryption_observer_, OnPassphraseAccepted());
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete());
- EXPECT_CALL(encryption_observer_, OnCryptographerStateChanged(_));
- sync_manager_.GetEncryptionHandler()->SetDecryptionPassphrase("passphrase2");
- EXPECT_EQ(IMPLICIT_PASSPHRASE,
- sync_manager_.GetEncryptionHandler()->GetPassphraseType());
- EXPECT_FALSE(EncryptEverythingEnabledForTest());
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- Cryptographer* cryptographer = trans.GetCryptographer();
- EXPECT_TRUE(cryptographer->is_ready());
- // Verify we're encrypting with the new key.
- sync_pb::EncryptedData encrypted;
- cryptographer->GetKeys(&encrypted);
- EXPECT_TRUE(other_cryptographer.CanDecrypt(encrypted));
- }
-}
-
-// Manually set the pending keys in the cryptographer/nigori to reflect the data
-// being encrypted with an old (unprovided) GAIA password. Attempt to supply
-// the current GAIA password and verify the bootstrap token is updated. Then
-// supply the old GAIA password, and verify we re-encrypt all data with the
-// new GAIA password.
-// (cases 4 and 5 in SyncManager::SyncInternal::SetEncryptionPassphrase)
-TEST_F(SyncManagerTest, SupplyPendingOldGAIAPass) {
- EXPECT_TRUE(SetUpEncryption(WRITE_TO_NIGORI, DEFAULT_ENCRYPTION));
- Cryptographer other_cryptographer(&encryptor_);
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- Cryptographer* cryptographer = trans.GetCryptographer();
- std::string bootstrap_token;
- cryptographer->GetBootstrapToken(&bootstrap_token);
- other_cryptographer.Bootstrap(bootstrap_token);
-
- // Now update the nigori to reflect the new keys, and update the
- // cryptographer to have pending keys.
- KeyParams params = {"localhost", "dummy", "old_gaia"};
- other_cryptographer.AddKey(params);
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK, node.InitByTagLookup(kNigoriTag));
- sync_pb::NigoriSpecifics nigori;
- other_cryptographer.GetKeys(nigori.mutable_encryption_keybag());
- node.SetNigoriSpecifics(nigori);
- cryptographer->SetPendingKeys(nigori.encryption_keybag());
-
- // other_cryptographer now contains all encryption keys, and is encrypting
- // with the newest gaia.
- KeyParams new_params = {"localhost", "dummy", "new_gaia"};
- other_cryptographer.AddKey(new_params);
- }
- // The bootstrap token should have been updated. Save it to ensure it's based
- // on the new GAIA password.
- std::string bootstrap_token;
- EXPECT_CALL(encryption_observer_,
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN))
- .WillOnce(SaveArg<0>(&bootstrap_token));
- EXPECT_CALL(encryption_observer_, OnPassphraseRequired(_,_));
- EXPECT_CALL(encryption_observer_, OnCryptographerStateChanged(_));
- sync_manager_.GetEncryptionHandler()->SetEncryptionPassphrase(
- "new_gaia",
- false);
- EXPECT_EQ(IMPLICIT_PASSPHRASE,
- sync_manager_.GetEncryptionHandler()->GetPassphraseType());
- EXPECT_FALSE(EncryptEverythingEnabledForTest());
- testing::Mock::VerifyAndClearExpectations(&encryption_observer_);
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- Cryptographer* cryptographer = trans.GetCryptographer();
- EXPECT_TRUE(cryptographer->is_initialized());
- EXPECT_FALSE(cryptographer->is_ready());
- // Verify we're encrypting with the new key, even though we have pending
- // keys.
- sync_pb::EncryptedData encrypted;
- other_cryptographer.GetKeys(&encrypted);
- EXPECT_TRUE(cryptographer->CanDecrypt(encrypted));
- }
- EXPECT_CALL(encryption_observer_,
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(encryption_observer_, OnPassphraseAccepted());
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete());
- EXPECT_CALL(encryption_observer_, OnCryptographerStateChanged(_));
- sync_manager_.GetEncryptionHandler()->SetEncryptionPassphrase(
- "old_gaia",
- false);
- EXPECT_EQ(IMPLICIT_PASSPHRASE,
- sync_manager_.GetEncryptionHandler()->GetPassphraseType());
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- Cryptographer* cryptographer = trans.GetCryptographer();
- EXPECT_TRUE(cryptographer->is_ready());
-
- // Verify we're encrypting with the new key.
- sync_pb::EncryptedData encrypted;
- other_cryptographer.GetKeys(&encrypted);
- EXPECT_TRUE(cryptographer->CanDecrypt(encrypted));
-
- // Verify the saved bootstrap token is based on the new gaia password.
- Cryptographer temp_cryptographer(&encryptor_);
- temp_cryptographer.Bootstrap(bootstrap_token);
- EXPECT_TRUE(temp_cryptographer.CanDecrypt(encrypted));
- }
-}
-
-// Manually set the pending keys in the cryptographer/nigori to reflect the data
-// being encrypted with an explicit (unprovided) passphrase, then supply the
-// passphrase.
-// (case 9 in SyncManager::SyncInternal::SetDecryptionPassphrase)
-TEST_F(SyncManagerTest, SupplyPendingExplicitPass) {
- EXPECT_TRUE(SetUpEncryption(WRITE_TO_NIGORI, DEFAULT_ENCRYPTION));
- Cryptographer other_cryptographer(&encryptor_);
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- Cryptographer* cryptographer = trans.GetCryptographer();
- std::string bootstrap_token;
- cryptographer->GetBootstrapToken(&bootstrap_token);
- other_cryptographer.Bootstrap(bootstrap_token);
-
- // Now update the nigori to reflect the new keys, and update the
- // cryptographer to have pending keys.
- KeyParams params = {"localhost", "dummy", "explicit"};
- other_cryptographer.AddKey(params);
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK, node.InitByTagLookup(kNigoriTag));
- sync_pb::NigoriSpecifics nigori;
- other_cryptographer.GetKeys(nigori.mutable_encryption_keybag());
- cryptographer->SetPendingKeys(nigori.encryption_keybag());
- EXPECT_TRUE(cryptographer->has_pending_keys());
- nigori.set_keybag_is_frozen(true);
- node.SetNigoriSpecifics(nigori);
- }
- EXPECT_CALL(encryption_observer_, OnCryptographerStateChanged(_));
- EXPECT_CALL(encryption_observer_,
- OnPassphraseTypeChanged(CUSTOM_PASSPHRASE, _));
- EXPECT_CALL(encryption_observer_, OnPassphraseRequired(_, _));
- EXPECT_CALL(encryption_observer_, OnEncryptedTypesChanged(_, false));
- sync_manager_.GetEncryptionHandler()->Init();
- EXPECT_CALL(encryption_observer_,
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(encryption_observer_, OnPassphraseAccepted());
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete());
- EXPECT_CALL(encryption_observer_, OnCryptographerStateChanged(_));
- sync_manager_.GetEncryptionHandler()->SetDecryptionPassphrase("explicit");
- EXPECT_EQ(CUSTOM_PASSPHRASE,
- sync_manager_.GetEncryptionHandler()->GetPassphraseType());
- EXPECT_FALSE(EncryptEverythingEnabledForTest());
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- Cryptographer* cryptographer = trans.GetCryptographer();
- EXPECT_TRUE(cryptographer->is_ready());
- // Verify we're encrypting with the new key.
- sync_pb::EncryptedData encrypted;
- cryptographer->GetKeys(&encrypted);
- EXPECT_TRUE(other_cryptographer.CanDecrypt(encrypted));
- }
-}
-
-// Manually set the pending keys in the cryptographer/nigori to reflect the data
-// being encrypted with a new (unprovided) GAIA password, then supply the
-// password as a user-provided password.
-// This is the android case 7/8.
-TEST_F(SyncManagerTest, SupplyPendingGAIAPassUserProvided) {
- EXPECT_FALSE(SetUpEncryption(DONT_WRITE_NIGORI, UNINITIALIZED));
- Cryptographer other_cryptographer(&encryptor_);
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- Cryptographer* cryptographer = trans.GetCryptographer();
- // Now update the nigori to reflect the new keys, and update the
- // cryptographer to have pending keys.
- KeyParams params = {"localhost", "dummy", "passphrase"};
- other_cryptographer.AddKey(params);
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK, node.InitByTagLookup(kNigoriTag));
- sync_pb::NigoriSpecifics nigori;
- other_cryptographer.GetKeys(nigori.mutable_encryption_keybag());
- node.SetNigoriSpecifics(nigori);
- cryptographer->SetPendingKeys(nigori.encryption_keybag());
- EXPECT_FALSE(cryptographer->is_ready());
- }
- EXPECT_CALL(encryption_observer_,
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(encryption_observer_, OnPassphraseAccepted());
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete());
- EXPECT_CALL(encryption_observer_, OnCryptographerStateChanged(_));
- sync_manager_.GetEncryptionHandler()->SetEncryptionPassphrase(
- "passphrase",
- false);
- EXPECT_EQ(IMPLICIT_PASSPHRASE,
- sync_manager_.GetEncryptionHandler()->GetPassphraseType());
- EXPECT_FALSE(EncryptEverythingEnabledForTest());
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- Cryptographer* cryptographer = trans.GetCryptographer();
- EXPECT_TRUE(cryptographer->is_ready());
- }
-}
-
-TEST_F(SyncManagerTest, SetPassphraseWithEmptyPasswordNode) {
- EXPECT_TRUE(SetUpEncryption(WRITE_TO_NIGORI, DEFAULT_ENCRYPTION));
- int64 node_id = 0;
- std::string tag = "foo";
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- ReadNode root_node(&trans);
- root_node.InitByRootLookup();
-
- WriteNode password_node(&trans);
- WriteNode::InitUniqueByCreationResult result =
- password_node.InitUniqueByCreation(PASSWORDS, root_node, tag);
- EXPECT_EQ(WriteNode::INIT_SUCCESS, result);
- node_id = password_node.GetId();
- }
- EXPECT_CALL(encryption_observer_,
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(encryption_observer_, OnPassphraseAccepted());
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete());
- EXPECT_CALL(encryption_observer_, OnCryptographerStateChanged(_));
- EXPECT_CALL(encryption_observer_,
- OnPassphraseTypeChanged(CUSTOM_PASSPHRASE, _));
- sync_manager_.GetEncryptionHandler()->SetEncryptionPassphrase(
- "new_passphrase",
- true);
- EXPECT_EQ(CUSTOM_PASSPHRASE,
- sync_manager_.GetEncryptionHandler()->GetPassphraseType());
- EXPECT_FALSE(EncryptEverythingEnabledForTest());
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- ReadNode password_node(&trans);
- EXPECT_EQ(BaseNode::INIT_FAILED_DECRYPT_IF_NECESSARY,
- password_node.InitByClientTagLookup(PASSWORDS,
- tag));
- }
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- ReadNode password_node(&trans);
- EXPECT_EQ(BaseNode::INIT_FAILED_DECRYPT_IF_NECESSARY,
- password_node.InitByIdLookup(node_id));
- }
-}
-
-TEST_F(SyncManagerTest, NudgeDelayTest) {
- EXPECT_EQ(sync_manager_.GetNudgeDelayTimeDelta(BOOKMARKS),
- base::TimeDelta::FromMilliseconds(
- SyncManagerImpl::GetDefaultNudgeDelay()));
-
- EXPECT_EQ(sync_manager_.GetNudgeDelayTimeDelta(AUTOFILL),
- base::TimeDelta::FromSeconds(
- kDefaultShortPollIntervalSeconds));
-
- EXPECT_EQ(sync_manager_.GetNudgeDelayTimeDelta(PREFERENCES),
- base::TimeDelta::FromMilliseconds(
- SyncManagerImpl::GetPreferencesNudgeDelay()));
-}
-
-// Friended by WriteNode, so can't be in an anonymouse namespace.
-TEST_F(SyncManagerTest, EncryptBookmarksWithLegacyData) {
- EXPECT_TRUE(SetUpEncryption(WRITE_TO_NIGORI, DEFAULT_ENCRYPTION));
- std::string title;
- SyncAPINameToServerName("Google", &title);
- std::string url = "http://www.google.com";
- std::string raw_title2 = ".."; // An invalid cosmo title.
- std::string title2;
- SyncAPINameToServerName(raw_title2, &title2);
- std::string url2 = "http://www.bla.com";
-
- // Create a bookmark using the legacy format.
- int64 node_id1 = MakeNode(sync_manager_.GetUserShare(),
- BOOKMARKS,
- "testtag");
- int64 node_id2 = MakeNode(sync_manager_.GetUserShare(),
- BOOKMARKS,
- "testtag2");
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK, node.InitByIdLookup(node_id1));
-
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_bookmark()->set_url(url);
- node.SetEntitySpecifics(entity_specifics);
-
- // Set the old style title.
- syncable::MutableEntry* node_entry = node.entry_;
- node_entry->PutNonUniqueName(title);
-
- WriteNode node2(&trans);
- EXPECT_EQ(BaseNode::INIT_OK, node2.InitByIdLookup(node_id2));
-
- sync_pb::EntitySpecifics entity_specifics2;
- entity_specifics2.mutable_bookmark()->set_url(url2);
- node2.SetEntitySpecifics(entity_specifics2);
-
- // Set the old style title.
- syncable::MutableEntry* node_entry2 = node2.entry_;
- node_entry2->PutNonUniqueName(title2);
- }
-
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- ReadNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK, node.InitByIdLookup(node_id1));
- EXPECT_EQ(BOOKMARKS, node.GetModelType());
- EXPECT_EQ(title, node.GetTitle());
- EXPECT_EQ(title, node.GetBookmarkSpecifics().title());
- EXPECT_EQ(url, node.GetBookmarkSpecifics().url());
-
- ReadNode node2(&trans);
- EXPECT_EQ(BaseNode::INIT_OK, node2.InitByIdLookup(node_id2));
- EXPECT_EQ(BOOKMARKS, node2.GetModelType());
- // We should de-canonicalize the title in GetTitle(), but the title in the
- // specifics should be stored in the server legal form.
- EXPECT_EQ(raw_title2, node2.GetTitle());
- EXPECT_EQ(title2, node2.GetBookmarkSpecifics().title());
- EXPECT_EQ(url2, node2.GetBookmarkSpecifics().url());
- }
-
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- EXPECT_TRUE(syncable::VerifyDataTypeEncryptionForTest(
- trans.GetWrappedTrans(),
- BOOKMARKS,
- false /* not encrypted */));
- }
-
- EXPECT_CALL(encryption_observer_,
- OnEncryptedTypesChanged(
- HasModelTypes(EncryptableUserTypes()), true));
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete());
- sync_manager_.GetEncryptionHandler()->EnableEncryptEverything();
- EXPECT_TRUE(EncryptEverythingEnabledForTest());
-
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- EXPECT_TRUE(GetEncryptedTypesWithTrans(&trans).Equals(
- EncryptableUserTypes()));
- EXPECT_TRUE(syncable::VerifyDataTypeEncryptionForTest(
- trans.GetWrappedTrans(),
- BOOKMARKS,
- true /* is encrypted */));
-
- ReadNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK, node.InitByIdLookup(node_id1));
- EXPECT_EQ(BOOKMARKS, node.GetModelType());
- EXPECT_EQ(title, node.GetTitle());
- EXPECT_EQ(title, node.GetBookmarkSpecifics().title());
- EXPECT_EQ(url, node.GetBookmarkSpecifics().url());
-
- ReadNode node2(&trans);
- EXPECT_EQ(BaseNode::INIT_OK, node2.InitByIdLookup(node_id2));
- EXPECT_EQ(BOOKMARKS, node2.GetModelType());
- // We should de-canonicalize the title in GetTitle(), but the title in the
- // specifics should be stored in the server legal form.
- EXPECT_EQ(raw_title2, node2.GetTitle());
- EXPECT_EQ(title2, node2.GetBookmarkSpecifics().title());
- EXPECT_EQ(url2, node2.GetBookmarkSpecifics().url());
- }
-}
-
-// Create a bookmark and set the title/url, then verify the data was properly
-// set. This replicates the unique way bookmarks have of creating sync nodes.
-// See BookmarkChangeProcessor::PlaceSyncNode(..).
-TEST_F(SyncManagerTest, CreateLocalBookmark) {
- std::string title = "title";
- std::string url = "url";
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- ReadNode bookmark_root(&trans);
- ASSERT_EQ(BaseNode::INIT_OK,
- bookmark_root.InitByTagLookup(ModelTypeToRootTag(BOOKMARKS)));
- WriteNode node(&trans);
- ASSERT_TRUE(node.InitBookmarkByCreation(bookmark_root, NULL));
- node.SetIsFolder(false);
- node.SetTitle(UTF8ToWide(title));
-
- sync_pb::BookmarkSpecifics bookmark_specifics(node.GetBookmarkSpecifics());
- bookmark_specifics.set_url(url);
- node.SetBookmarkSpecifics(bookmark_specifics);
- }
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- ReadNode bookmark_root(&trans);
- ASSERT_EQ(BaseNode::INIT_OK,
- bookmark_root.InitByTagLookup(ModelTypeToRootTag(BOOKMARKS)));
- int64 child_id = bookmark_root.GetFirstChildId();
-
- ReadNode node(&trans);
- ASSERT_EQ(BaseNode::INIT_OK, node.InitByIdLookup(child_id));
- EXPECT_FALSE(node.GetIsFolder());
- EXPECT_EQ(title, node.GetTitle());
- EXPECT_EQ(url, node.GetBookmarkSpecifics().url());
- }
-}
-
-// Verifies WriteNode::UpdateEntryWithEncryption does not make unnecessary
-// changes.
-TEST_F(SyncManagerTest, UpdateEntryWithEncryption) {
- std::string client_tag = "title";
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_bookmark()->set_url("url");
- entity_specifics.mutable_bookmark()->set_title("title");
- MakeServerNode(sync_manager_.GetUserShare(), BOOKMARKS, client_tag,
- syncable::GenerateSyncableHash(BOOKMARKS,
- client_tag),
- entity_specifics);
- // New node shouldn't start off unsynced.
- EXPECT_FALSE(ResetUnsyncedEntry(BOOKMARKS, client_tag));
- // Manually change to the same data. Should not set is_unsynced.
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(BOOKMARKS, client_tag));
- node.SetEntitySpecifics(entity_specifics);
- }
- EXPECT_FALSE(ResetUnsyncedEntry(BOOKMARKS, client_tag));
-
- // Encrypt the datatatype, should set is_unsynced.
- EXPECT_CALL(encryption_observer_,
- OnEncryptedTypesChanged(
- HasModelTypes(EncryptableUserTypes()), true));
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete());
- EXPECT_TRUE(SetUpEncryption(WRITE_TO_NIGORI, FULL_ENCRYPTION));
-
- EXPECT_CALL(encryption_observer_, OnCryptographerStateChanged(_));
- EXPECT_CALL(encryption_observer_, OnEncryptedTypesChanged(_, true));
- sync_manager_.GetEncryptionHandler()->Init();
- PumpLoop();
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- ReadNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(BOOKMARKS, client_tag));
- const syncable::Entry* node_entry = node.GetEntry();
- const sync_pb::EntitySpecifics& specifics = node_entry->GetSpecifics();
- EXPECT_TRUE(specifics.has_encrypted());
- EXPECT_EQ(kEncryptedString, node_entry->GetNonUniqueName());
- Cryptographer* cryptographer = trans.GetCryptographer();
- EXPECT_TRUE(cryptographer->is_ready());
- EXPECT_TRUE(cryptographer->CanDecryptUsingDefaultKey(
- specifics.encrypted()));
- }
- EXPECT_TRUE(ResetUnsyncedEntry(BOOKMARKS, client_tag));
-
- // Set a new passphrase. Should set is_unsynced.
- testing::Mock::VerifyAndClearExpectations(&encryption_observer_);
- EXPECT_CALL(encryption_observer_,
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(encryption_observer_, OnPassphraseAccepted());
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete());
- EXPECT_CALL(encryption_observer_, OnCryptographerStateChanged(_));
- EXPECT_CALL(encryption_observer_,
- OnPassphraseTypeChanged(CUSTOM_PASSPHRASE, _));
- sync_manager_.GetEncryptionHandler()->SetEncryptionPassphrase(
- "new_passphrase",
- true);
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- ReadNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(BOOKMARKS, client_tag));
- const syncable::Entry* node_entry = node.GetEntry();
- const sync_pb::EntitySpecifics& specifics = node_entry->GetSpecifics();
- EXPECT_TRUE(specifics.has_encrypted());
- EXPECT_EQ(kEncryptedString, node_entry->GetNonUniqueName());
- Cryptographer* cryptographer = trans.GetCryptographer();
- EXPECT_TRUE(cryptographer->is_ready());
- EXPECT_TRUE(cryptographer->CanDecryptUsingDefaultKey(
- specifics.encrypted()));
- }
- EXPECT_TRUE(ResetUnsyncedEntry(BOOKMARKS, client_tag));
-
- // Force a re-encrypt everything. Should not set is_unsynced.
- testing::Mock::VerifyAndClearExpectations(&encryption_observer_);
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete());
- EXPECT_CALL(encryption_observer_, OnCryptographerStateChanged(_));
- EXPECT_CALL(encryption_observer_, OnEncryptedTypesChanged(_, true));
-
- sync_manager_.GetEncryptionHandler()->Init();
- PumpLoop();
-
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- ReadNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(BOOKMARKS, client_tag));
- const syncable::Entry* node_entry = node.GetEntry();
- const sync_pb::EntitySpecifics& specifics = node_entry->GetSpecifics();
- EXPECT_TRUE(specifics.has_encrypted());
- EXPECT_EQ(kEncryptedString, node_entry->GetNonUniqueName());
- Cryptographer* cryptographer = trans.GetCryptographer();
- EXPECT_TRUE(cryptographer->CanDecryptUsingDefaultKey(
- specifics.encrypted()));
- }
- EXPECT_FALSE(ResetUnsyncedEntry(BOOKMARKS, client_tag));
-
- // Manually change to the same data. Should not set is_unsynced.
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(BOOKMARKS, client_tag));
- node.SetEntitySpecifics(entity_specifics);
- const syncable::Entry* node_entry = node.GetEntry();
- const sync_pb::EntitySpecifics& specifics = node_entry->GetSpecifics();
- EXPECT_TRUE(specifics.has_encrypted());
- EXPECT_FALSE(node_entry->GetIsUnsynced());
- EXPECT_EQ(kEncryptedString, node_entry->GetNonUniqueName());
- Cryptographer* cryptographer = trans.GetCryptographer();
- EXPECT_TRUE(cryptographer->CanDecryptUsingDefaultKey(
- specifics.encrypted()));
- }
- EXPECT_FALSE(ResetUnsyncedEntry(BOOKMARKS, client_tag));
-
- // Manually change to different data. Should set is_unsynced.
- {
- entity_specifics.mutable_bookmark()->set_url("url2");
- entity_specifics.mutable_bookmark()->set_title("title2");
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(BOOKMARKS, client_tag));
- node.SetEntitySpecifics(entity_specifics);
- const syncable::Entry* node_entry = node.GetEntry();
- const sync_pb::EntitySpecifics& specifics = node_entry->GetSpecifics();
- EXPECT_TRUE(specifics.has_encrypted());
- EXPECT_TRUE(node_entry->GetIsUnsynced());
- EXPECT_EQ(kEncryptedString, node_entry->GetNonUniqueName());
- Cryptographer* cryptographer = trans.GetCryptographer();
- EXPECT_TRUE(cryptographer->CanDecryptUsingDefaultKey(
- specifics.encrypted()));
- }
-}
-
-// Passwords have their own handling for encryption. Verify it does not result
-// in unnecessary writes via SetEntitySpecifics.
-TEST_F(SyncManagerTest, UpdatePasswordSetEntitySpecificsNoChange) {
- std::string client_tag = "title";
- EXPECT_TRUE(SetUpEncryption(WRITE_TO_NIGORI, DEFAULT_ENCRYPTION));
- sync_pb::EntitySpecifics entity_specifics;
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- Cryptographer* cryptographer = trans.GetCryptographer();
- sync_pb::PasswordSpecificsData data;
- data.set_password_value("secret");
- cryptographer->Encrypt(
- data,
- entity_specifics.mutable_password()->
- mutable_encrypted());
- }
- MakeServerNode(sync_manager_.GetUserShare(), PASSWORDS, client_tag,
- syncable::GenerateSyncableHash(PASSWORDS,
- client_tag),
- entity_specifics);
- // New node shouldn't start off unsynced.
- EXPECT_FALSE(ResetUnsyncedEntry(PASSWORDS, client_tag));
-
- // Manually change to the same data via SetEntitySpecifics. Should not set
- // is_unsynced.
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(PASSWORDS, client_tag));
- node.SetEntitySpecifics(entity_specifics);
- }
- EXPECT_FALSE(ResetUnsyncedEntry(PASSWORDS, client_tag));
-}
-
-// Passwords have their own handling for encryption. Verify it does not result
-// in unnecessary writes via SetPasswordSpecifics.
-TEST_F(SyncManagerTest, UpdatePasswordSetPasswordSpecifics) {
- std::string client_tag = "title";
- EXPECT_TRUE(SetUpEncryption(WRITE_TO_NIGORI, DEFAULT_ENCRYPTION));
- sync_pb::EntitySpecifics entity_specifics;
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- Cryptographer* cryptographer = trans.GetCryptographer();
- sync_pb::PasswordSpecificsData data;
- data.set_password_value("secret");
- cryptographer->Encrypt(
- data,
- entity_specifics.mutable_password()->
- mutable_encrypted());
- }
- MakeServerNode(sync_manager_.GetUserShare(), PASSWORDS, client_tag,
- syncable::GenerateSyncableHash(PASSWORDS,
- client_tag),
- entity_specifics);
- // New node shouldn't start off unsynced.
- EXPECT_FALSE(ResetUnsyncedEntry(PASSWORDS, client_tag));
-
- // Manually change to the same data via SetPasswordSpecifics. Should not set
- // is_unsynced.
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(PASSWORDS, client_tag));
- node.SetPasswordSpecifics(node.GetPasswordSpecifics());
- }
- EXPECT_FALSE(ResetUnsyncedEntry(PASSWORDS, client_tag));
-
- // Manually change to different data. Should set is_unsynced.
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(PASSWORDS, client_tag));
- Cryptographer* cryptographer = trans.GetCryptographer();
- sync_pb::PasswordSpecificsData data;
- data.set_password_value("secret2");
- cryptographer->Encrypt(
- data,
- entity_specifics.mutable_password()->mutable_encrypted());
- node.SetPasswordSpecifics(data);
- const syncable::Entry* node_entry = node.GetEntry();
- EXPECT_TRUE(node_entry->GetIsUnsynced());
- }
-}
-
-// Passwords have their own handling for encryption. Verify setting a new
-// passphrase updates the data.
-TEST_F(SyncManagerTest, UpdatePasswordNewPassphrase) {
- std::string client_tag = "title";
- EXPECT_TRUE(SetUpEncryption(WRITE_TO_NIGORI, DEFAULT_ENCRYPTION));
- sync_pb::EntitySpecifics entity_specifics;
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- Cryptographer* cryptographer = trans.GetCryptographer();
- sync_pb::PasswordSpecificsData data;
- data.set_password_value("secret");
- cryptographer->Encrypt(
- data,
- entity_specifics.mutable_password()->mutable_encrypted());
- }
- MakeServerNode(sync_manager_.GetUserShare(), PASSWORDS, client_tag,
- syncable::GenerateSyncableHash(PASSWORDS,
- client_tag),
- entity_specifics);
- // New node shouldn't start off unsynced.
- EXPECT_FALSE(ResetUnsyncedEntry(PASSWORDS, client_tag));
-
- // Set a new passphrase. Should set is_unsynced.
- testing::Mock::VerifyAndClearExpectations(&encryption_observer_);
- EXPECT_CALL(encryption_observer_,
- OnBootstrapTokenUpdated(_, PASSPHRASE_BOOTSTRAP_TOKEN));
- EXPECT_CALL(encryption_observer_, OnPassphraseAccepted());
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete());
- EXPECT_CALL(encryption_observer_, OnCryptographerStateChanged(_));
- EXPECT_CALL(encryption_observer_,
- OnPassphraseTypeChanged(CUSTOM_PASSPHRASE, _));
- sync_manager_.GetEncryptionHandler()->SetEncryptionPassphrase(
- "new_passphrase",
- true);
- EXPECT_EQ(CUSTOM_PASSPHRASE,
- sync_manager_.GetEncryptionHandler()->GetPassphraseType());
- EXPECT_TRUE(ResetUnsyncedEntry(PASSWORDS, client_tag));
-}
-
-// Passwords have their own handling for encryption. Verify it does not result
-// in unnecessary writes via ReencryptEverything.
-TEST_F(SyncManagerTest, UpdatePasswordReencryptEverything) {
- std::string client_tag = "title";
- EXPECT_TRUE(SetUpEncryption(WRITE_TO_NIGORI, DEFAULT_ENCRYPTION));
- sync_pb::EntitySpecifics entity_specifics;
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- Cryptographer* cryptographer = trans.GetCryptographer();
- sync_pb::PasswordSpecificsData data;
- data.set_password_value("secret");
- cryptographer->Encrypt(
- data,
- entity_specifics.mutable_password()->mutable_encrypted());
- }
- MakeServerNode(sync_manager_.GetUserShare(), PASSWORDS, client_tag,
- syncable::GenerateSyncableHash(PASSWORDS,
- client_tag),
- entity_specifics);
- // New node shouldn't start off unsynced.
- EXPECT_FALSE(ResetUnsyncedEntry(PASSWORDS, client_tag));
-
- // Force a re-encrypt everything. Should not set is_unsynced.
- testing::Mock::VerifyAndClearExpectations(&encryption_observer_);
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete());
- EXPECT_CALL(encryption_observer_, OnCryptographerStateChanged(_));
- EXPECT_CALL(encryption_observer_, OnEncryptedTypesChanged(_, false));
- sync_manager_.GetEncryptionHandler()->Init();
- PumpLoop();
- EXPECT_FALSE(ResetUnsyncedEntry(PASSWORDS, client_tag));
-}
-
-// Verify SetTitle(..) doesn't unnecessarily set IS_UNSYNCED for bookmarks
-// when we write the same data, but does set it when we write new data.
-TEST_F(SyncManagerTest, SetBookmarkTitle) {
- std::string client_tag = "title";
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_bookmark()->set_url("url");
- entity_specifics.mutable_bookmark()->set_title("title");
- MakeServerNode(sync_manager_.GetUserShare(), BOOKMARKS, client_tag,
- syncable::GenerateSyncableHash(BOOKMARKS,
- client_tag),
- entity_specifics);
- // New node shouldn't start off unsynced.
- EXPECT_FALSE(ResetUnsyncedEntry(BOOKMARKS, client_tag));
-
- // Manually change to the same title. Should not set is_unsynced.
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(BOOKMARKS, client_tag));
- node.SetTitle(UTF8ToWide(client_tag));
- }
- EXPECT_FALSE(ResetUnsyncedEntry(BOOKMARKS, client_tag));
-
- // Manually change to new title. Should set is_unsynced.
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(BOOKMARKS, client_tag));
- node.SetTitle(UTF8ToWide("title2"));
- }
- EXPECT_TRUE(ResetUnsyncedEntry(BOOKMARKS, client_tag));
-}
-
-// Verify SetTitle(..) doesn't unnecessarily set IS_UNSYNCED for encrypted
-// bookmarks when we write the same data, but does set it when we write new
-// data.
-TEST_F(SyncManagerTest, SetBookmarkTitleWithEncryption) {
- std::string client_tag = "title";
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_bookmark()->set_url("url");
- entity_specifics.mutable_bookmark()->set_title("title");
- MakeServerNode(sync_manager_.GetUserShare(), BOOKMARKS, client_tag,
- syncable::GenerateSyncableHash(BOOKMARKS,
- client_tag),
- entity_specifics);
- // New node shouldn't start off unsynced.
- EXPECT_FALSE(ResetUnsyncedEntry(BOOKMARKS, client_tag));
-
- // Encrypt the datatatype, should set is_unsynced.
- EXPECT_CALL(encryption_observer_,
- OnEncryptedTypesChanged(
- HasModelTypes(EncryptableUserTypes()), true));
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete());
- EXPECT_TRUE(SetUpEncryption(WRITE_TO_NIGORI, FULL_ENCRYPTION));
- EXPECT_CALL(encryption_observer_, OnCryptographerStateChanged(_));
- EXPECT_CALL(encryption_observer_, OnEncryptedTypesChanged(_, true));
- sync_manager_.GetEncryptionHandler()->Init();
- PumpLoop();
- EXPECT_TRUE(ResetUnsyncedEntry(BOOKMARKS, client_tag));
-
- // Manually change to the same title. Should not set is_unsynced.
- // NON_UNIQUE_NAME should be kEncryptedString.
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(BOOKMARKS, client_tag));
- node.SetTitle(UTF8ToWide(client_tag));
- const syncable::Entry* node_entry = node.GetEntry();
- const sync_pb::EntitySpecifics& specifics = node_entry->GetSpecifics();
- EXPECT_TRUE(specifics.has_encrypted());
- EXPECT_EQ(kEncryptedString, node_entry->GetNonUniqueName());
- }
- EXPECT_FALSE(ResetUnsyncedEntry(BOOKMARKS, client_tag));
-
- // Manually change to new title. Should set is_unsynced. NON_UNIQUE_NAME
- // should still be kEncryptedString.
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(BOOKMARKS, client_tag));
- node.SetTitle(UTF8ToWide("title2"));
- const syncable::Entry* node_entry = node.GetEntry();
- const sync_pb::EntitySpecifics& specifics = node_entry->GetSpecifics();
- EXPECT_TRUE(specifics.has_encrypted());
- EXPECT_EQ(kEncryptedString, node_entry->GetNonUniqueName());
- }
- EXPECT_TRUE(ResetUnsyncedEntry(BOOKMARKS, client_tag));
-}
-
-// Verify SetTitle(..) doesn't unnecessarily set IS_UNSYNCED for non-bookmarks
-// when we write the same data, but does set it when we write new data.
-TEST_F(SyncManagerTest, SetNonBookmarkTitle) {
- std::string client_tag = "title";
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_preference()->set_name("name");
- entity_specifics.mutable_preference()->set_value("value");
- MakeServerNode(sync_manager_.GetUserShare(),
- PREFERENCES,
- client_tag,
- syncable::GenerateSyncableHash(PREFERENCES,
- client_tag),
- entity_specifics);
- // New node shouldn't start off unsynced.
- EXPECT_FALSE(ResetUnsyncedEntry(PREFERENCES, client_tag));
-
- // Manually change to the same title. Should not set is_unsynced.
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(PREFERENCES, client_tag));
- node.SetTitle(UTF8ToWide(client_tag));
- }
- EXPECT_FALSE(ResetUnsyncedEntry(PREFERENCES, client_tag));
-
- // Manually change to new title. Should set is_unsynced.
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(PREFERENCES, client_tag));
- node.SetTitle(UTF8ToWide("title2"));
- }
- EXPECT_TRUE(ResetUnsyncedEntry(PREFERENCES, client_tag));
-}
-
-// Verify SetTitle(..) doesn't unnecessarily set IS_UNSYNCED for encrypted
-// non-bookmarks when we write the same data or when we write new data
-// data (should remained kEncryptedString).
-TEST_F(SyncManagerTest, SetNonBookmarkTitleWithEncryption) {
- std::string client_tag = "title";
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_preference()->set_name("name");
- entity_specifics.mutable_preference()->set_value("value");
- MakeServerNode(sync_manager_.GetUserShare(),
- PREFERENCES,
- client_tag,
- syncable::GenerateSyncableHash(PREFERENCES,
- client_tag),
- entity_specifics);
- // New node shouldn't start off unsynced.
- EXPECT_FALSE(ResetUnsyncedEntry(PREFERENCES, client_tag));
-
- // Encrypt the datatatype, should set is_unsynced.
- EXPECT_CALL(encryption_observer_,
- OnEncryptedTypesChanged(
- HasModelTypes(EncryptableUserTypes()), true));
- EXPECT_CALL(encryption_observer_, OnEncryptionComplete());
- EXPECT_TRUE(SetUpEncryption(WRITE_TO_NIGORI, FULL_ENCRYPTION));
- EXPECT_CALL(encryption_observer_, OnCryptographerStateChanged(_));
- EXPECT_CALL(encryption_observer_, OnEncryptedTypesChanged(_, true));
- sync_manager_.GetEncryptionHandler()->Init();
- PumpLoop();
- EXPECT_TRUE(ResetUnsyncedEntry(PREFERENCES, client_tag));
-
- // Manually change to the same title. Should not set is_unsynced.
- // NON_UNIQUE_NAME should be kEncryptedString.
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(PREFERENCES, client_tag));
- node.SetTitle(UTF8ToWide(client_tag));
- const syncable::Entry* node_entry = node.GetEntry();
- const sync_pb::EntitySpecifics& specifics = node_entry->GetSpecifics();
- EXPECT_TRUE(specifics.has_encrypted());
- EXPECT_EQ(kEncryptedString, node_entry->GetNonUniqueName());
- }
- EXPECT_FALSE(ResetUnsyncedEntry(PREFERENCES, client_tag));
-
- // Manually change to new title. Should not set is_unsynced because the
- // NON_UNIQUE_NAME should still be kEncryptedString.
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(PREFERENCES, client_tag));
- node.SetTitle(UTF8ToWide("title2"));
- const syncable::Entry* node_entry = node.GetEntry();
- const sync_pb::EntitySpecifics& specifics = node_entry->GetSpecifics();
- EXPECT_TRUE(specifics.has_encrypted());
- EXPECT_EQ(kEncryptedString, node_entry->GetNonUniqueName());
- EXPECT_FALSE(node_entry->GetIsUnsynced());
- }
-}
-
-// Ensure that titles are truncated to 255 bytes, and attempting to reset
-// them to their longer version does not set IS_UNSYNCED.
-TEST_F(SyncManagerTest, SetLongTitle) {
- const int kNumChars = 512;
- const std::string kClientTag = "tag";
- std::string title(kNumChars, '0');
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_preference()->set_name("name");
- entity_specifics.mutable_preference()->set_value("value");
- MakeServerNode(sync_manager_.GetUserShare(),
- PREFERENCES,
- "short_title",
- syncable::GenerateSyncableHash(PREFERENCES,
- kClientTag),
- entity_specifics);
- // New node shouldn't start off unsynced.
- EXPECT_FALSE(ResetUnsyncedEntry(PREFERENCES, kClientTag));
-
- // Manually change to the long title. Should set is_unsynced.
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(PREFERENCES, kClientTag));
- node.SetTitle(UTF8ToWide(title));
- EXPECT_EQ(node.GetTitle(), title.substr(0, 255));
- }
- EXPECT_TRUE(ResetUnsyncedEntry(PREFERENCES, kClientTag));
-
- // Manually change to the same title. Should not set is_unsynced.
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(PREFERENCES, kClientTag));
- node.SetTitle(UTF8ToWide(title));
- EXPECT_EQ(node.GetTitle(), title.substr(0, 255));
- }
- EXPECT_FALSE(ResetUnsyncedEntry(PREFERENCES, kClientTag));
-
- // Manually change to new title. Should set is_unsynced.
- {
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(PREFERENCES, kClientTag));
- node.SetTitle(UTF8ToWide("title2"));
- }
- EXPECT_TRUE(ResetUnsyncedEntry(PREFERENCES, kClientTag));
-}
-
-// Create an encrypted entry when the cryptographer doesn't think the type is
-// marked for encryption. Ensure reads/writes don't break and don't unencrypt
-// the data.
-TEST_F(SyncManagerTest, SetPreviouslyEncryptedSpecifics) {
- std::string client_tag = "tag";
- std::string url = "url";
- std::string url2 = "new_url";
- std::string title = "title";
- sync_pb::EntitySpecifics entity_specifics;
- EXPECT_TRUE(SetUpEncryption(WRITE_TO_NIGORI, DEFAULT_ENCRYPTION));
- {
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- Cryptographer* crypto = trans.GetCryptographer();
- sync_pb::EntitySpecifics bm_specifics;
- bm_specifics.mutable_bookmark()->set_title("title");
- bm_specifics.mutable_bookmark()->set_url("url");
- sync_pb::EncryptedData encrypted;
- crypto->Encrypt(bm_specifics, &encrypted);
- entity_specifics.mutable_encrypted()->CopyFrom(encrypted);
- AddDefaultFieldValue(BOOKMARKS, &entity_specifics);
- }
- MakeServerNode(sync_manager_.GetUserShare(), BOOKMARKS, client_tag,
- syncable::GenerateSyncableHash(BOOKMARKS,
- client_tag),
- entity_specifics);
-
- {
- // Verify the data.
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- ReadNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(BOOKMARKS, client_tag));
- EXPECT_EQ(title, node.GetTitle());
- EXPECT_EQ(url, node.GetBookmarkSpecifics().url());
- }
-
- {
- // Overwrite the url (which overwrites the specifics).
- WriteTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- WriteNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(BOOKMARKS, client_tag));
-
- sync_pb::BookmarkSpecifics bookmark_specifics(node.GetBookmarkSpecifics());
- bookmark_specifics.set_url(url2);
- node.SetBookmarkSpecifics(bookmark_specifics);
- }
-
- {
- // Verify it's still encrypted and it has the most recent url.
- ReadTransaction trans(FROM_HERE, sync_manager_.GetUserShare());
- ReadNode node(&trans);
- EXPECT_EQ(BaseNode::INIT_OK,
- node.InitByClientTagLookup(BOOKMARKS, client_tag));
- EXPECT_EQ(title, node.GetTitle());
- EXPECT_EQ(url2, node.GetBookmarkSpecifics().url());
- const syncable::Entry* node_entry = node.GetEntry();
- EXPECT_EQ(kEncryptedString, node_entry->GetNonUniqueName());
- const sync_pb::EntitySpecifics& specifics = node_entry->GetSpecifics();
- EXPECT_TRUE(specifics.has_encrypted());
- }
-}
-
-// Verify transaction version of a model type is incremented when node of
-// that type is updated.
-TEST_F(SyncManagerTest, IncrementTransactionVersion) {
- ModelSafeRoutingInfo routing_info;
- GetModelSafeRoutingInfo(&routing_info);
-
- {
- ReadTransaction read_trans(FROM_HERE, sync_manager_.GetUserShare());
- for (ModelSafeRoutingInfo::iterator i = routing_info.begin();
- i != routing_info.end(); ++i) {
- // Transaction version is incremented when SyncManagerTest::SetUp()
- // creates a node of each type.
- EXPECT_EQ(1,
- sync_manager_.GetUserShare()->directory->
- GetTransactionVersion(i->first));
- }
- }
-
- // Create bookmark node to increment transaction version of bookmark model.
- std::string client_tag = "title";
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_bookmark()->set_url("url");
- entity_specifics.mutable_bookmark()->set_title("title");
- MakeServerNode(sync_manager_.GetUserShare(), BOOKMARKS, client_tag,
- syncable::GenerateSyncableHash(BOOKMARKS,
- client_tag),
- entity_specifics);
-
- {
- ReadTransaction read_trans(FROM_HERE, sync_manager_.GetUserShare());
- for (ModelSafeRoutingInfo::iterator i = routing_info.begin();
- i != routing_info.end(); ++i) {
- EXPECT_EQ(i->first == BOOKMARKS ? 2 : 1,
- sync_manager_.GetUserShare()->directory->
- GetTransactionVersion(i->first));
- }
- }
-}
-
-class MockSyncScheduler : public FakeSyncScheduler {
- public:
- MockSyncScheduler() : FakeSyncScheduler() {}
- virtual ~MockSyncScheduler() {}
-
- MOCK_METHOD1(Start, void(SyncScheduler::Mode));
- MOCK_METHOD1(ScheduleConfiguration, void(const ConfigurationParams&));
-};
-
-class ComponentsFactory : public TestInternalComponentsFactory {
- public:
- ComponentsFactory(const Switches& switches,
- SyncScheduler* scheduler_to_use,
- sessions::SyncSessionContext** session_context)
- : TestInternalComponentsFactory(switches, syncer::STORAGE_IN_MEMORY),
- scheduler_to_use_(scheduler_to_use),
- session_context_(session_context) {}
- virtual ~ComponentsFactory() {}
-
- virtual scoped_ptr<SyncScheduler> BuildScheduler(
- const std::string& name,
- sessions::SyncSessionContext* context,
- CancelationSignal* stop_handle) OVERRIDE {
- *session_context_ = context;
- return scheduler_to_use_.Pass();
- }
-
- private:
- scoped_ptr<SyncScheduler> scheduler_to_use_;
- sessions::SyncSessionContext** session_context_;
-};
-
-class SyncManagerTestWithMockScheduler : public SyncManagerTest {
- public:
- SyncManagerTestWithMockScheduler() : scheduler_(NULL) {}
- virtual InternalComponentsFactory* GetFactory() OVERRIDE {
- scheduler_ = new MockSyncScheduler();
- return new ComponentsFactory(GetSwitches(), scheduler_, &session_context_);
- }
-
- MockSyncScheduler* scheduler() { return scheduler_; }
- sessions::SyncSessionContext* session_context() {
- return session_context_;
- }
-
- private:
- MockSyncScheduler* scheduler_;
- sessions::SyncSessionContext* session_context_;
-};
-
-// Test that the configuration params are properly created and sent to
-// ScheduleConfigure. No callback should be invoked. Any disabled datatypes
-// should be purged.
-TEST_F(SyncManagerTestWithMockScheduler, BasicConfiguration) {
- ConfigureReason reason = CONFIGURE_REASON_RECONFIGURATION;
- ModelTypeSet types_to_download(BOOKMARKS, PREFERENCES);
- ModelSafeRoutingInfo new_routing_info;
- GetModelSafeRoutingInfo(&new_routing_info);
- ModelTypeSet enabled_types = GetRoutingInfoTypes(new_routing_info);
- ModelTypeSet disabled_types = Difference(ModelTypeSet::All(), enabled_types);
-
- ConfigurationParams params;
- EXPECT_CALL(*scheduler(), Start(SyncScheduler::CONFIGURATION_MODE));
- EXPECT_CALL(*scheduler(), ScheduleConfiguration(_)).
- WillOnce(SaveArg<0>(&params));
-
- // Set data for all types.
- ModelTypeSet protocol_types = ProtocolTypes();
- for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
- iter.Inc()) {
- SetProgressMarkerForType(iter.Get(), true);
- }
-
- CallbackCounter ready_task_counter, retry_task_counter;
- sync_manager_.ConfigureSyncer(
- reason,
- types_to_download,
- disabled_types,
- ModelTypeSet(),
- ModelTypeSet(),
- new_routing_info,
- base::Bind(&CallbackCounter::Callback,
- base::Unretained(&ready_task_counter)),
- base::Bind(&CallbackCounter::Callback,
- base::Unretained(&retry_task_counter)));
- EXPECT_EQ(0, ready_task_counter.times_called());
- EXPECT_EQ(0, retry_task_counter.times_called());
- EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::RECONFIGURATION,
- params.source);
- EXPECT_TRUE(types_to_download.Equals(params.types_to_download));
- EXPECT_EQ(new_routing_info, params.routing_info);
-
- // Verify all the disabled types were purged.
- EXPECT_TRUE(sync_manager_.InitialSyncEndedTypes().Equals(
- enabled_types));
- EXPECT_TRUE(sync_manager_.GetTypesWithEmptyProgressMarkerToken(
- ModelTypeSet::All()).Equals(disabled_types));
-}
-
-// Test that on a reconfiguration (configuration where the session context
-// already has routing info), only those recently disabled types are purged.
-TEST_F(SyncManagerTestWithMockScheduler, ReConfiguration) {
- ConfigureReason reason = CONFIGURE_REASON_RECONFIGURATION;
- ModelTypeSet types_to_download(BOOKMARKS, PREFERENCES);
- ModelTypeSet disabled_types = ModelTypeSet(THEMES, SESSIONS);
- ModelSafeRoutingInfo old_routing_info;
- ModelSafeRoutingInfo new_routing_info;
- GetModelSafeRoutingInfo(&old_routing_info);
- new_routing_info = old_routing_info;
- new_routing_info.erase(THEMES);
- new_routing_info.erase(SESSIONS);
- ModelTypeSet enabled_types = GetRoutingInfoTypes(new_routing_info);
-
- ConfigurationParams params;
- EXPECT_CALL(*scheduler(), Start(SyncScheduler::CONFIGURATION_MODE));
- EXPECT_CALL(*scheduler(), ScheduleConfiguration(_)).
- WillOnce(SaveArg<0>(&params));
-
- // Set data for all types except those recently disabled (so we can verify
- // only those recently disabled are purged) .
- ModelTypeSet protocol_types = ProtocolTypes();
- for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
- iter.Inc()) {
- if (!disabled_types.Has(iter.Get())) {
- SetProgressMarkerForType(iter.Get(), true);
- } else {
- SetProgressMarkerForType(iter.Get(), false);
- }
- }
-
- // Set the context to have the old routing info.
- session_context()->set_routing_info(old_routing_info);
-
- CallbackCounter ready_task_counter, retry_task_counter;
- sync_manager_.ConfigureSyncer(
- reason,
- types_to_download,
- ModelTypeSet(),
- ModelTypeSet(),
- ModelTypeSet(),
- new_routing_info,
- base::Bind(&CallbackCounter::Callback,
- base::Unretained(&ready_task_counter)),
- base::Bind(&CallbackCounter::Callback,
- base::Unretained(&retry_task_counter)));
- EXPECT_EQ(0, ready_task_counter.times_called());
- EXPECT_EQ(0, retry_task_counter.times_called());
- EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::RECONFIGURATION,
- params.source);
- EXPECT_TRUE(types_to_download.Equals(params.types_to_download));
- EXPECT_EQ(new_routing_info, params.routing_info);
-
- // Verify only the recently disabled types were purged.
- EXPECT_TRUE(sync_manager_.GetTypesWithEmptyProgressMarkerToken(
- ProtocolTypes()).Equals(disabled_types));
-}
-
-// Test that PurgePartiallySyncedTypes purges only those types that have not
-// fully completed their initial download and apply.
-TEST_F(SyncManagerTest, PurgePartiallySyncedTypes) {
- ModelSafeRoutingInfo routing_info;
- GetModelSafeRoutingInfo(&routing_info);
- ModelTypeSet enabled_types = GetRoutingInfoTypes(routing_info);
-
- UserShare* share = sync_manager_.GetUserShare();
-
- // The test harness automatically initializes all types in the routing info.
- // Check that autofill is not among them.
- ASSERT_FALSE(enabled_types.Has(AUTOFILL));
-
- // Further ensure that the test harness did not create its root node.
- {
- syncable::ReadTransaction trans(FROM_HERE, share->directory.get());
- syncable::Entry autofill_root_node(&trans, syncable::GET_BY_SERVER_TAG,
- ModelTypeToRootTag(AUTOFILL));
- ASSERT_FALSE(autofill_root_node.good());
- }
-
- // One more redundant check.
- ASSERT_FALSE(sync_manager_.InitialSyncEndedTypes().Has(AUTOFILL));
-
- // Give autofill a progress marker.
- sync_pb::DataTypeProgressMarker autofill_marker;
- autofill_marker.set_data_type_id(
- GetSpecificsFieldNumberFromModelType(AUTOFILL));
- autofill_marker.set_token("token");
- share->directory->SetDownloadProgress(AUTOFILL, autofill_marker);
-
- // Also add a pending autofill root node update from the server.
- TestEntryFactory factory_(share->directory.get());
- int autofill_meta = factory_.CreateUnappliedRootNode(AUTOFILL);
-
- // Preferences is an enabled type. Check that the harness initialized it.
- ASSERT_TRUE(enabled_types.Has(PREFERENCES));
- ASSERT_TRUE(sync_manager_.InitialSyncEndedTypes().Has(PREFERENCES));
-
- // Give preferencse a progress marker.
- sync_pb::DataTypeProgressMarker prefs_marker;
- prefs_marker.set_data_type_id(
- GetSpecificsFieldNumberFromModelType(PREFERENCES));
- prefs_marker.set_token("token");
- share->directory->SetDownloadProgress(PREFERENCES, prefs_marker);
-
- // Add a fully synced preferences node under the root.
- std::string pref_client_tag = "prefABC";
- std::string pref_hashed_tag = "hashXYZ";
- sync_pb::EntitySpecifics pref_specifics;
- AddDefaultFieldValue(PREFERENCES, &pref_specifics);
- int pref_meta = MakeServerNode(
- share, PREFERENCES, pref_client_tag, pref_hashed_tag, pref_specifics);
-
- // And now, the purge.
- EXPECT_TRUE(sync_manager_.PurgePartiallySyncedTypes());
-
- // Ensure that autofill lost its progress marker, but preferences did not.
- ModelTypeSet empty_tokens =
- sync_manager_.GetTypesWithEmptyProgressMarkerToken(ModelTypeSet::All());
- EXPECT_TRUE(empty_tokens.Has(AUTOFILL));
- EXPECT_FALSE(empty_tokens.Has(PREFERENCES));
-
- // Ensure that autofill lots its node, but preferences did not.
- {
- syncable::ReadTransaction trans(FROM_HERE, share->directory.get());
- syncable::Entry autofill_node(&trans, GET_BY_HANDLE, autofill_meta);
- syncable::Entry pref_node(&trans, GET_BY_HANDLE, pref_meta);
- EXPECT_FALSE(autofill_node.good());
- EXPECT_TRUE(pref_node.good());
- }
-}
-
-// Test CleanupDisabledTypes properly purges all disabled types as specified
-// by the previous and current enabled params.
-TEST_F(SyncManagerTest, PurgeDisabledTypes) {
- ModelSafeRoutingInfo routing_info;
- GetModelSafeRoutingInfo(&routing_info);
- ModelTypeSet enabled_types = GetRoutingInfoTypes(routing_info);
- ModelTypeSet disabled_types = Difference(ModelTypeSet::All(), enabled_types);
-
- // The harness should have initialized the enabled_types for us.
- EXPECT_TRUE(enabled_types.Equals(sync_manager_.InitialSyncEndedTypes()));
-
- // Set progress markers for all types.
- ModelTypeSet protocol_types = ProtocolTypes();
- for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
- iter.Inc()) {
- SetProgressMarkerForType(iter.Get(), true);
- }
-
- // Verify all the enabled types remain after cleanup, and all the disabled
- // types were purged.
- sync_manager_.PurgeDisabledTypes(disabled_types,
- ModelTypeSet(),
- ModelTypeSet());
- EXPECT_TRUE(enabled_types.Equals(sync_manager_.InitialSyncEndedTypes()));
- EXPECT_TRUE(disabled_types.Equals(
- sync_manager_.GetTypesWithEmptyProgressMarkerToken(ModelTypeSet::All())));
-
- // Disable some more types.
- disabled_types.Put(BOOKMARKS);
- disabled_types.Put(PREFERENCES);
- ModelTypeSet new_enabled_types =
- Difference(ModelTypeSet::All(), disabled_types);
-
- // Verify only the non-disabled types remain after cleanup.
- sync_manager_.PurgeDisabledTypes(disabled_types,
- ModelTypeSet(),
- ModelTypeSet());
- EXPECT_TRUE(new_enabled_types.Equals(sync_manager_.InitialSyncEndedTypes()));
- EXPECT_TRUE(disabled_types.Equals(
- sync_manager_.GetTypesWithEmptyProgressMarkerToken(ModelTypeSet::All())));
-}
-
-// Test PurgeDisabledTypes properly unapplies types by deleting their local data
-// and preserving their server data and progress marker.
-TEST_F(SyncManagerTest, PurgeUnappliedTypes) {
- ModelSafeRoutingInfo routing_info;
- GetModelSafeRoutingInfo(&routing_info);
- ModelTypeSet unapplied_types = ModelTypeSet(BOOKMARKS, PREFERENCES);
- ModelTypeSet enabled_types = GetRoutingInfoTypes(routing_info);
- ModelTypeSet disabled_types = Difference(ModelTypeSet::All(), enabled_types);
-
- // The harness should have initialized the enabled_types for us.
- EXPECT_TRUE(enabled_types.Equals(sync_manager_.InitialSyncEndedTypes()));
-
- // Set progress markers for all types.
- ModelTypeSet protocol_types = ProtocolTypes();
- for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
- iter.Inc()) {
- SetProgressMarkerForType(iter.Get(), true);
- }
-
- // Add the following kinds of items:
- // 1. Fully synced preference.
- // 2. Locally created preference, server unknown, unsynced
- // 3. Locally deleted preference, server known, unsynced
- // 4. Server deleted preference, locally known.
- // 5. Server created preference, locally unknown, unapplied.
- // 6. A fully synced bookmark (no unique_client_tag).
- UserShare* share = sync_manager_.GetUserShare();
- sync_pb::EntitySpecifics pref_specifics;
- AddDefaultFieldValue(PREFERENCES, &pref_specifics);
- sync_pb::EntitySpecifics bm_specifics;
- AddDefaultFieldValue(BOOKMARKS, &bm_specifics);
- int pref1_meta = MakeServerNode(
- share, PREFERENCES, "pref1", "hash1", pref_specifics);
- int64 pref2_meta = MakeNode(share, PREFERENCES, "pref2");
- int pref3_meta = MakeServerNode(
- share, PREFERENCES, "pref3", "hash3", pref_specifics);
- int pref4_meta = MakeServerNode(
- share, PREFERENCES, "pref4", "hash4", pref_specifics);
- int pref5_meta = MakeServerNode(
- share, PREFERENCES, "pref5", "hash5", pref_specifics);
- int bookmark_meta = MakeServerNode(
- share, BOOKMARKS, "bookmark", "", bm_specifics);
-
- {
- syncable::WriteTransaction trans(FROM_HERE,
- syncable::SYNCER,
- share->directory.get());
- // Pref's 1 and 2 are already set up properly.
- // Locally delete pref 3.
- syncable::MutableEntry pref3(&trans, GET_BY_HANDLE, pref3_meta);
- pref3.PutIsDel(true);
- pref3.PutIsUnsynced(true);
- // Delete pref 4 at the server.
- syncable::MutableEntry pref4(&trans, GET_BY_HANDLE, pref4_meta);
- pref4.PutServerIsDel(true);
- pref4.PutIsUnappliedUpdate(true);
- pref4.PutServerVersion(2);
- // Pref 5 is an new unapplied update.
- syncable::MutableEntry pref5(&trans, GET_BY_HANDLE, pref5_meta);
- pref5.PutIsUnappliedUpdate(true);
- pref5.PutIsDel(true);
- pref5.PutBaseVersion(-1);
- // Bookmark is already set up properly
- }
-
- // Take a snapshot to clear all the dirty bits.
- share->directory.get()->SaveChanges();
-
- // Now request a purge for the unapplied types.
- disabled_types.PutAll(unapplied_types);
- sync_manager_.PurgeDisabledTypes(disabled_types,
- ModelTypeSet(),
- unapplied_types);
-
- // Verify the unapplied types still have progress markers and initial sync
- // ended after cleanup.
- EXPECT_TRUE(sync_manager_.InitialSyncEndedTypes().HasAll(unapplied_types));
- EXPECT_TRUE(
- sync_manager_.GetTypesWithEmptyProgressMarkerToken(unapplied_types).
- Empty());
-
- // Ensure the items were unapplied as necessary.
- {
- syncable::ReadTransaction trans(FROM_HERE, share->directory.get());
- syncable::Entry pref_node(&trans, GET_BY_HANDLE, pref1_meta);
- ASSERT_TRUE(pref_node.good());
- EXPECT_TRUE(pref_node.GetKernelCopy().is_dirty());
- EXPECT_FALSE(pref_node.GetIsUnsynced());
- EXPECT_TRUE(pref_node.GetIsUnappliedUpdate());
- EXPECT_TRUE(pref_node.GetIsDel());
- EXPECT_GT(pref_node.GetServerVersion(), 0);
- EXPECT_EQ(pref_node.GetBaseVersion(), -1);
-
- // Pref 2 should just be locally deleted.
- syncable::Entry pref2_node(&trans, GET_BY_HANDLE, pref2_meta);
- ASSERT_TRUE(pref2_node.good());
- EXPECT_TRUE(pref2_node.GetKernelCopy().is_dirty());
- EXPECT_FALSE(pref2_node.GetIsUnsynced());
- EXPECT_TRUE(pref2_node.GetIsDel());
- EXPECT_FALSE(pref2_node.GetIsUnappliedUpdate());
- EXPECT_TRUE(pref2_node.GetIsDel());
- EXPECT_EQ(pref2_node.GetServerVersion(), 0);
- EXPECT_EQ(pref2_node.GetBaseVersion(), -1);
-
- syncable::Entry pref3_node(&trans, GET_BY_HANDLE, pref3_meta);
- ASSERT_TRUE(pref3_node.good());
- EXPECT_TRUE(pref3_node.GetKernelCopy().is_dirty());
- EXPECT_FALSE(pref3_node.GetIsUnsynced());
- EXPECT_TRUE(pref3_node.GetIsUnappliedUpdate());
- EXPECT_TRUE(pref3_node.GetIsDel());
- EXPECT_GT(pref3_node.GetServerVersion(), 0);
- EXPECT_EQ(pref3_node.GetBaseVersion(), -1);
-
- syncable::Entry pref4_node(&trans, GET_BY_HANDLE, pref4_meta);
- ASSERT_TRUE(pref4_node.good());
- EXPECT_TRUE(pref4_node.GetKernelCopy().is_dirty());
- EXPECT_FALSE(pref4_node.GetIsUnsynced());
- EXPECT_TRUE(pref4_node.GetIsUnappliedUpdate());
- EXPECT_TRUE(pref4_node.GetIsDel());
- EXPECT_GT(pref4_node.GetServerVersion(), 0);
- EXPECT_EQ(pref4_node.GetBaseVersion(), -1);
-
- // Pref 5 should remain untouched.
- syncable::Entry pref5_node(&trans, GET_BY_HANDLE, pref5_meta);
- ASSERT_TRUE(pref5_node.good());
- EXPECT_FALSE(pref5_node.GetKernelCopy().is_dirty());
- EXPECT_FALSE(pref5_node.GetIsUnsynced());
- EXPECT_TRUE(pref5_node.GetIsUnappliedUpdate());
- EXPECT_TRUE(pref5_node.GetIsDel());
- EXPECT_GT(pref5_node.GetServerVersion(), 0);
- EXPECT_EQ(pref5_node.GetBaseVersion(), -1);
-
- syncable::Entry bookmark_node(&trans, GET_BY_HANDLE, bookmark_meta);
- ASSERT_TRUE(bookmark_node.good());
- EXPECT_TRUE(bookmark_node.GetKernelCopy().is_dirty());
- EXPECT_FALSE(bookmark_node.GetIsUnsynced());
- EXPECT_TRUE(bookmark_node.GetIsUnappliedUpdate());
- EXPECT_TRUE(bookmark_node.GetIsDel());
- EXPECT_GT(bookmark_node.GetServerVersion(), 0);
- EXPECT_EQ(bookmark_node.GetBaseVersion(), -1);
- }
-}
-
-// A test harness to exercise the code that processes and passes changes from
-// the "SYNCER"-WriteTransaction destructor, through the SyncManager, to the
-// ChangeProcessor.
-class SyncManagerChangeProcessingTest : public SyncManagerTest {
- public:
- virtual void OnChangesApplied(
- ModelType model_type,
- int64 model_version,
- const BaseTransaction* trans,
- const ImmutableChangeRecordList& changes) OVERRIDE {
- last_changes_ = changes;
- }
-
- virtual void OnChangesComplete(ModelType model_type) OVERRIDE {}
-
- const ImmutableChangeRecordList& GetRecentChangeList() {
- return last_changes_;
- }
-
- UserShare* share() {
- return sync_manager_.GetUserShare();
- }
-
- // Set some flags so our nodes reasonably approximate the real world scenario
- // and can get past CheckTreeInvariants.
- //
- // It's never going to be truly accurate, since we're squashing update
- // receipt, processing and application into a single transaction.
- void SetNodeProperties(syncable::MutableEntry *entry) {
- entry->PutId(id_factory_.NewServerId());
- entry->PutBaseVersion(10);
- entry->PutServerVersion(10);
- }
-
- // Looks for the given change in the list. Returns the index at which it was
- // found. Returns -1 on lookup failure.
- size_t FindChangeInList(int64 id, ChangeRecord::Action action) {
- SCOPED_TRACE(id);
- for (size_t i = 0; i < last_changes_.Get().size(); ++i) {
- if (last_changes_.Get()[i].id == id
- && last_changes_.Get()[i].action == action) {
- return i;
- }
- }
- ADD_FAILURE() << "Failed to find specified change";
- return -1;
- }
-
- // Returns the current size of the change list.
- //
- // Note that spurious changes do not necessarily indicate a problem.
- // Assertions on change list size can help detect problems, but it may be
- // necessary to reduce their strictness if the implementation changes.
- size_t GetChangeListSize() {
- return last_changes_.Get().size();
- }
-
- protected:
- ImmutableChangeRecordList last_changes_;
- TestIdFactory id_factory_;
-};
-
-// Test creation of a folder and a bookmark.
-TEST_F(SyncManagerChangeProcessingTest, AddBookmarks) {
- int64 type_root = GetIdForDataType(BOOKMARKS);
- int64 folder_id = kInvalidId;
- int64 child_id = kInvalidId;
-
- // Create a folder and a bookmark under it.
- {
- syncable::WriteTransaction trans(
- FROM_HERE, syncable::SYNCER, share()->directory.get());
- syncable::Entry root(&trans, syncable::GET_BY_HANDLE, type_root);
- ASSERT_TRUE(root.good());
-
- syncable::MutableEntry folder(&trans, syncable::CREATE,
- BOOKMARKS, root.GetId(), "folder");
- ASSERT_TRUE(folder.good());
- SetNodeProperties(&folder);
- folder.PutIsDir(true);
- folder_id = folder.GetMetahandle();
-
- syncable::MutableEntry child(&trans, syncable::CREATE,
- BOOKMARKS, folder.GetId(), "child");
- ASSERT_TRUE(child.good());
- SetNodeProperties(&child);
- child_id = child.GetMetahandle();
- }
-
- // The closing of the above scope will delete the transaction. Its processed
- // changes should be waiting for us in a member of the test harness.
- EXPECT_EQ(2UL, GetChangeListSize());
-
- // We don't need to check these return values here. The function will add a
- // non-fatal failure if these changes are not found.
- size_t folder_change_pos =
- FindChangeInList(folder_id, ChangeRecord::ACTION_ADD);
- size_t child_change_pos =
- FindChangeInList(child_id, ChangeRecord::ACTION_ADD);
-
- // Parents are delivered before children.
- EXPECT_LT(folder_change_pos, child_change_pos);
-}
-
-// Test moving a bookmark into an empty folder.
-TEST_F(SyncManagerChangeProcessingTest, MoveBookmarkIntoEmptyFolder) {
- int64 type_root = GetIdForDataType(BOOKMARKS);
- int64 folder_b_id = kInvalidId;
- int64 child_id = kInvalidId;
-
- // Create two folders. Place a child under folder A.
- {
- syncable::WriteTransaction trans(
- FROM_HERE, syncable::SYNCER, share()->directory.get());
- syncable::Entry root(&trans, syncable::GET_BY_HANDLE, type_root);
- ASSERT_TRUE(root.good());
-
- syncable::MutableEntry folder_a(&trans, syncable::CREATE,
- BOOKMARKS, root.GetId(), "folderA");
- ASSERT_TRUE(folder_a.good());
- SetNodeProperties(&folder_a);
- folder_a.PutIsDir(true);
-
- syncable::MutableEntry folder_b(&trans, syncable::CREATE,
- BOOKMARKS, root.GetId(), "folderB");
- ASSERT_TRUE(folder_b.good());
- SetNodeProperties(&folder_b);
- folder_b.PutIsDir(true);
- folder_b_id = folder_b.GetMetahandle();
-
- syncable::MutableEntry child(&trans, syncable::CREATE,
- BOOKMARKS, folder_a.GetId(),
- "child");
- ASSERT_TRUE(child.good());
- SetNodeProperties(&child);
- child_id = child.GetMetahandle();
- }
-
- // Close that transaction. The above was to setup the initial scenario. The
- // real test starts now.
-
- // Move the child from folder A to folder B.
- {
- syncable::WriteTransaction trans(
- FROM_HERE, syncable::SYNCER, share()->directory.get());
-
- syncable::Entry folder_b(&trans, syncable::GET_BY_HANDLE, folder_b_id);
- syncable::MutableEntry child(&trans, syncable::GET_BY_HANDLE, child_id);
-
- child.PutParentId(folder_b.GetId());
- }
-
- EXPECT_EQ(1UL, GetChangeListSize());
-
- // Verify that this was detected as a real change. An early version of the
- // UniquePosition code had a bug where moves from one folder to another were
- // ignored unless the moved node's UniquePosition value was also changed in
- // some way.
- FindChangeInList(child_id, ChangeRecord::ACTION_UPDATE);
-}
-
-// Test moving a bookmark into a non-empty folder.
-TEST_F(SyncManagerChangeProcessingTest, MoveIntoPopulatedFolder) {
- int64 type_root = GetIdForDataType(BOOKMARKS);
- int64 child_a_id = kInvalidId;
- int64 child_b_id = kInvalidId;
-
- // Create two folders. Place one child each under folder A and folder B.
- {
- syncable::WriteTransaction trans(
- FROM_HERE, syncable::SYNCER, share()->directory.get());
- syncable::Entry root(&trans, syncable::GET_BY_HANDLE, type_root);
- ASSERT_TRUE(root.good());
-
- syncable::MutableEntry folder_a(&trans, syncable::CREATE,
- BOOKMARKS, root.GetId(), "folderA");
- ASSERT_TRUE(folder_a.good());
- SetNodeProperties(&folder_a);
- folder_a.PutIsDir(true);
-
- syncable::MutableEntry folder_b(&trans, syncable::CREATE,
- BOOKMARKS, root.GetId(), "folderB");
- ASSERT_TRUE(folder_b.good());
- SetNodeProperties(&folder_b);
- folder_b.PutIsDir(true);
-
- syncable::MutableEntry child_a(&trans, syncable::CREATE,
- BOOKMARKS, folder_a.GetId(),
- "childA");
- ASSERT_TRUE(child_a.good());
- SetNodeProperties(&child_a);
- child_a_id = child_a.GetMetahandle();
-
- syncable::MutableEntry child_b(&trans, syncable::CREATE,
- BOOKMARKS, folder_b.GetId(),
- "childB");
- SetNodeProperties(&child_b);
- child_b_id = child_b.GetMetahandle();
-
- }
-
- // Close that transaction. The above was to setup the initial scenario. The
- // real test starts now.
-
- {
- syncable::WriteTransaction trans(
- FROM_HERE, syncable::SYNCER, share()->directory.get());
-
- syncable::MutableEntry child_a(&trans, syncable::GET_BY_HANDLE, child_a_id);
- syncable::MutableEntry child_b(&trans, syncable::GET_BY_HANDLE, child_b_id);
-
- // Move child A from folder A to folder B and update its position.
- child_a.PutParentId(child_b.GetParentId());
- child_a.PutPredecessor(child_b.GetId());
- }
-
- EXPECT_EQ(1UL, GetChangeListSize());
-
- // Verify that only child a is in the change list.
- // (This function will add a failure if the lookup fails.)
- FindChangeInList(child_a_id, ChangeRecord::ACTION_UPDATE);
-}
-
-// Tests the ordering of deletion changes.
-TEST_F(SyncManagerChangeProcessingTest, DeletionsAndChanges) {
- int64 type_root = GetIdForDataType(BOOKMARKS);
- int64 folder_a_id = kInvalidId;
- int64 folder_b_id = kInvalidId;
- int64 child_id = kInvalidId;
-
- // Create two folders. Place a child under folder A.
- {
- syncable::WriteTransaction trans(
- FROM_HERE, syncable::SYNCER, share()->directory.get());
- syncable::Entry root(&trans, syncable::GET_BY_HANDLE, type_root);
- ASSERT_TRUE(root.good());
-
- syncable::MutableEntry folder_a(&trans, syncable::CREATE,
- BOOKMARKS, root.GetId(), "folderA");
- ASSERT_TRUE(folder_a.good());
- SetNodeProperties(&folder_a);
- folder_a.PutIsDir(true);
- folder_a_id = folder_a.GetMetahandle();
-
- syncable::MutableEntry folder_b(&trans, syncable::CREATE,
- BOOKMARKS, root.GetId(), "folderB");
- ASSERT_TRUE(folder_b.good());
- SetNodeProperties(&folder_b);
- folder_b.PutIsDir(true);
- folder_b_id = folder_b.GetMetahandle();
-
- syncable::MutableEntry child(&trans, syncable::CREATE,
- BOOKMARKS, folder_a.GetId(),
- "child");
- ASSERT_TRUE(child.good());
- SetNodeProperties(&child);
- child_id = child.GetMetahandle();
- }
-
- // Close that transaction. The above was to setup the initial scenario. The
- // real test starts now.
-
- {
- syncable::WriteTransaction trans(
- FROM_HERE, syncable::SYNCER, share()->directory.get());
-
- syncable::MutableEntry folder_a(
- &trans, syncable::GET_BY_HANDLE, folder_a_id);
- syncable::MutableEntry folder_b(
- &trans, syncable::GET_BY_HANDLE, folder_b_id);
- syncable::MutableEntry child(&trans, syncable::GET_BY_HANDLE, child_id);
-
- // Delete folder B and its child.
- child.PutIsDel(true);
- folder_b.PutIsDel(true);
-
- // Make an unrelated change to folder A.
- folder_a.PutNonUniqueName("NewNameA");
- }
-
- EXPECT_EQ(3UL, GetChangeListSize());
-
- size_t folder_a_pos =
- FindChangeInList(folder_a_id, ChangeRecord::ACTION_UPDATE);
- size_t folder_b_pos =
- FindChangeInList(folder_b_id, ChangeRecord::ACTION_DELETE);
- size_t child_pos = FindChangeInList(child_id, ChangeRecord::ACTION_DELETE);
-
- // Deletes should appear before updates.
- EXPECT_LT(child_pos, folder_a_pos);
- EXPECT_LT(folder_b_pos, folder_a_pos);
-}
-
-// During initialization SyncManagerImpl loads sqlite database. If it fails to
-// do so it should fail initialization. This test verifies this behavior.
-// Test reuses SyncManagerImpl initialization from SyncManagerTest but overrides
-// InternalComponentsFactory to return DirectoryBackingStore that always fails
-// to load.
-class SyncManagerInitInvalidStorageTest : public SyncManagerTest {
- public:
- SyncManagerInitInvalidStorageTest() {
- }
-
- virtual InternalComponentsFactory* GetFactory() OVERRIDE {
- return new TestInternalComponentsFactory(GetSwitches(), STORAGE_INVALID);
- }
-};
-
-// SyncManagerInitInvalidStorageTest::GetFactory will return
-// DirectoryBackingStore that ensures that SyncManagerImpl::OpenDirectory fails.
-// SyncManagerImpl initialization is done in SyncManagerTest::SetUp. This test's
-// task is to ensure that SyncManagerImpl reported initialization failure in
-// OnInitializationComplete callback.
-TEST_F(SyncManagerInitInvalidStorageTest, FailToOpenDatabase) {
- EXPECT_FALSE(initialization_succeeded_);
-}
-
-} // namespace
diff --git a/chromium/sync/internal_api/syncapi_internal.cc b/chromium/sync/internal_api/syncapi_internal.cc
deleted file mode 100644
index a6530ac87b5..00000000000
--- a/chromium/sync/internal_api/syncapi_internal.cc
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/syncapi_internal.h"
-
-#include "base/memory/scoped_ptr.h"
-#include "sync/protocol/password_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/util/cryptographer.h"
-
-namespace syncer {
-
-sync_pb::PasswordSpecificsData* DecryptPasswordSpecifics(
- const sync_pb::EntitySpecifics& specifics, Cryptographer* crypto) {
- if (!specifics.has_password())
- return NULL;
- const sync_pb::PasswordSpecifics& password_specifics = specifics.password();
- if (!password_specifics.has_encrypted())
- return NULL;
- const sync_pb::EncryptedData& encrypted = password_specifics.encrypted();
- scoped_ptr<sync_pb::PasswordSpecificsData> data(
- new sync_pb::PasswordSpecificsData);
- if (!crypto->Decrypt(encrypted, data.get()))
- return NULL;
- return data.release();
-}
-
-// The list of names which are reserved for use by the server.
-static const char* kForbiddenServerNames[] = { "", ".", ".." };
-
-// When taking a name from the syncapi, append a space if it matches the
-// pattern of a server-illegal name followed by zero or more spaces.
-void SyncAPINameToServerName(const std::string& syncer_name,
- std::string* out) {
- *out = syncer_name;
- if (IsNameServerIllegalAfterTrimming(*out))
- out->append(" ");
-}
-
-// Checks whether |name| is a server-illegal name followed by zero or more space
-// characters. The three server-illegal names are the empty string, dot, and
-// dot-dot. Very long names (>255 bytes in UTF-8 Normalization Form C) are
-// also illegal, but are not considered here.
-bool IsNameServerIllegalAfterTrimming(const std::string& name) {
- size_t untrimmed_count = name.find_last_not_of(' ') + 1;
- for (size_t i = 0; i < arraysize(kForbiddenServerNames); ++i) {
- if (name.compare(0, untrimmed_count, kForbiddenServerNames[i]) == 0)
- return true;
- }
- return false;
-}
-
-// Compare the values of two EntitySpecifics, accounting for encryption.
-bool AreSpecificsEqual(const Cryptographer* cryptographer,
- const sync_pb::EntitySpecifics& left,
- const sync_pb::EntitySpecifics& right) {
- // Note that we can't compare encrypted strings directly as they are seeded
- // with a random value.
- std::string left_plaintext, right_plaintext;
- if (left.has_encrypted()) {
- if (!cryptographer->CanDecrypt(left.encrypted())) {
- NOTREACHED() << "Attempting to compare undecryptable data.";
- return false;
- }
- left_plaintext = cryptographer->DecryptToString(left.encrypted());
- } else {
- left_plaintext = left.SerializeAsString();
- }
- if (right.has_encrypted()) {
- if (!cryptographer->CanDecrypt(right.encrypted())) {
- NOTREACHED() << "Attempting to compare undecryptable data.";
- return false;
- }
- right_plaintext = cryptographer->DecryptToString(right.encrypted());
- } else {
- right_plaintext = right.SerializeAsString();
- }
- if (left_plaintext == right_plaintext) {
- return true;
- }
- return false;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/syncapi_internal.h b/chromium/sync/internal_api/syncapi_internal.h
deleted file mode 100644
index 06689aad9cc..00000000000
--- a/chromium/sync/internal_api/syncapi_internal.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_SYNCAPI_INTERNAL_H_
-#define SYNC_INTERNAL_API_SYNCAPI_INTERNAL_H_
-
-// The functions defined are shared among some of the classes that implement
-// the internal sync API. They are not to be used by clients of the API.
-
-#include <string>
-
-#include "sync/base/sync_export.h"
-
-namespace sync_pb {
-class EntitySpecifics;
-class PasswordSpecificsData;
-}
-
-namespace syncer {
-
-class Cryptographer;
-
-sync_pb::PasswordSpecificsData* DecryptPasswordSpecifics(
- const sync_pb::EntitySpecifics& specifics,
- Cryptographer* crypto);
-
-SYNC_EXPORT_PRIVATE void SyncAPINameToServerName(const std::string& syncer_name,
- std::string* out);
-
-bool IsNameServerIllegalAfterTrimming(const std::string& name);
-
-bool AreSpecificsEqual(const Cryptographer* cryptographer,
- const sync_pb::EntitySpecifics& left,
- const sync_pb::EntitySpecifics& right);
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_SYNCAPI_INTERNAL_H_
diff --git a/chromium/sync/internal_api/syncapi_server_connection_manager.cc b/chromium/sync/internal_api/syncapi_server_connection_manager.cc
deleted file mode 100644
index 35ca1e2a5ec..00000000000
--- a/chromium/sync/internal_api/syncapi_server_connection_manager.cc
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/syncapi_server_connection_manager.h"
-
-#include "net/base/net_errors.h"
-#include "net/http/http_status_code.h"
-#include "sync/internal_api/public/http_post_provider_factory.h"
-#include "sync/internal_api/public/http_post_provider_interface.h"
-
-namespace syncer {
-
-SyncAPIBridgedConnection::SyncAPIBridgedConnection(
- ServerConnectionManager* scm,
- HttpPostProviderFactory* factory)
- : Connection(scm), factory_(factory) {
- post_provider_ = factory_->Create();
-}
-
-SyncAPIBridgedConnection::~SyncAPIBridgedConnection() {
- DCHECK(post_provider_);
- factory_->Destroy(post_provider_);
- post_provider_ = NULL;
-}
-
-bool SyncAPIBridgedConnection::Init(const char* path,
- const std::string& auth_token,
- const std::string& payload,
- HttpResponse* response) {
- std::string sync_server;
- int sync_server_port = 0;
- bool use_ssl = false;
- GetServerParams(&sync_server, &sync_server_port, &use_ssl);
- std::string connection_url = MakeConnectionURL(sync_server, path, use_ssl);
-
- HttpPostProviderInterface* http = post_provider_;
- http->SetURL(connection_url.c_str(), sync_server_port);
-
- if (!auth_token.empty()) {
- std::string headers;
- headers = "Authorization: Bearer " + auth_token;
- http->SetExtraRequestHeaders(headers.c_str());
- }
-
- // Must be octet-stream, or the payload may be parsed for a cookie.
- http->SetPostPayload("application/octet-stream", payload.length(),
- payload.data());
-
- // Issue the POST, blocking until it finishes.
- int error_code = 0;
- int response_code = 0;
- if (!http->MakeSynchronousPost(&error_code, &response_code)) {
- DVLOG(1) << "Http POST failed, error returns: " << error_code;
- response->server_status = HttpResponse::ServerConnectionCodeFromNetError(
- error_code);
- return false;
- }
-
- // We got a server response, copy over response codes and content.
- response->response_code = response_code;
- response->content_length =
- static_cast<int64>(http->GetResponseContentLength());
- response->payload_length =
- static_cast<int64>(http->GetResponseContentLength());
- if (response->response_code < 400)
- response->server_status = HttpResponse::SERVER_CONNECTION_OK;
- else if (response->response_code == net::HTTP_UNAUTHORIZED)
- response->server_status = HttpResponse::SYNC_AUTH_ERROR;
- else
- response->server_status = HttpResponse::SYNC_SERVER_ERROR;
-
- // Write the content into our buffer.
- buffer_.assign(http->GetResponseContent(), http->GetResponseContentLength());
- return true;
-}
-
-void SyncAPIBridgedConnection::Abort() {
- DCHECK(post_provider_);
- post_provider_->Abort();
-}
-
-SyncAPIServerConnectionManager::SyncAPIServerConnectionManager(
- const std::string& server,
- int port,
- bool use_ssl,
- HttpPostProviderFactory* factory,
- CancelationSignal* cancelation_signal)
- : ServerConnectionManager(server,
- port,
- use_ssl,
- cancelation_signal),
- post_provider_factory_(factory) {
- DCHECK(post_provider_factory_.get());
-}
-
-SyncAPIServerConnectionManager::~SyncAPIServerConnectionManager() {}
-
-ServerConnectionManager::Connection*
-SyncAPIServerConnectionManager::MakeConnection() {
- return new SyncAPIBridgedConnection(this, post_provider_factory_.get());
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/syncapi_server_connection_manager.h b/chromium/sync/internal_api/syncapi_server_connection_manager.h
deleted file mode 100644
index 118d31458ca..00000000000
--- a/chromium/sync/internal_api/syncapi_server_connection_manager.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_SYNCAPI_SERVER_CONNECTION_MANAGER_H_
-#define SYNC_INTERNAL_API_SYNCAPI_SERVER_CONNECTION_MANAGER_H_
-
-#include <string>
-
-#include "base/compiler_specific.h"
-#include "base/gtest_prod_util.h"
-#include "base/memory/scoped_ptr.h"
-#include "sync/base/sync_export.h"
-#include "sync/engine/net/server_connection_manager.h"
-
-namespace syncer {
-
-class HttpPostProviderFactory;
-class HttpPostProviderInterface;
-
-// This provides HTTP Post functionality through the interface provided
-// to the sync API by the application hosting the syncer backend.
-class SyncAPIBridgedConnection : public ServerConnectionManager::Connection {
- public:
- SyncAPIBridgedConnection(ServerConnectionManager* scm,
- HttpPostProviderFactory* factory);
-
- virtual ~SyncAPIBridgedConnection();
-
- virtual bool Init(const char* path,
- const std::string& auth_token,
- const std::string& payload,
- HttpResponse* response) OVERRIDE;
-
- virtual void Abort() OVERRIDE;
-
- private:
- // Pointer to the factory we use for creating HttpPostProviders. We do not
- // own |factory_|.
- HttpPostProviderFactory* factory_;
-
- HttpPostProviderInterface* post_provider_;
-
- DISALLOW_COPY_AND_ASSIGN(SyncAPIBridgedConnection);
-};
-
-// A ServerConnectionManager subclass used by the syncapi layer. We use a
-// subclass so that we can override MakePost() to generate a POST object using
-// an instance of the HttpPostProviderFactory class.
-class SYNC_EXPORT_PRIVATE SyncAPIServerConnectionManager
- : public ServerConnectionManager {
- public:
- // Takes ownership of factory.
- SyncAPIServerConnectionManager(const std::string& server,
- int port,
- bool use_ssl,
- HttpPostProviderFactory* factory,
- CancelationSignal* cancelation_signal);
- virtual ~SyncAPIServerConnectionManager();
-
- // ServerConnectionManager overrides.
- virtual Connection* MakeConnection() OVERRIDE;
-
- private:
- FRIEND_TEST_ALL_PREFIXES(SyncAPIServerConnectionManagerTest,
- VeryEarlyAbortPost);
- FRIEND_TEST_ALL_PREFIXES(SyncAPIServerConnectionManagerTest, EarlyAbortPost);
- FRIEND_TEST_ALL_PREFIXES(SyncAPIServerConnectionManagerTest, AbortPost);
-
- // A factory creating concrete HttpPostProviders for use whenever we need to
- // issue a POST to sync servers.
- scoped_ptr<HttpPostProviderFactory> post_provider_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(SyncAPIServerConnectionManager);
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_SYNCAPI_SERVER_CONNECTION_MANAGER_H_
diff --git a/chromium/sync/internal_api/syncapi_server_connection_manager_unittest.cc b/chromium/sync/internal_api/syncapi_server_connection_manager_unittest.cc
deleted file mode 100644
index 2cb3dff3c78..00000000000
--- a/chromium/sync/internal_api/syncapi_server_connection_manager_unittest.cc
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/syncapi_server_connection_manager.h"
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/compiler_specific.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/test/test_timeouts.h"
-#include "base/threading/thread.h"
-#include "base/time/time.h"
-#include "net/base/net_errors.h"
-#include "sync/internal_api/public/base/cancelation_signal.h"
-#include "sync/internal_api/public/http_post_provider_factory.h"
-#include "sync/internal_api/public/http_post_provider_interface.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace {
-
-using base::TimeDelta;
-
-class BlockingHttpPost : public HttpPostProviderInterface {
- public:
- BlockingHttpPost() : wait_for_abort_(false, false) {}
- virtual ~BlockingHttpPost() {}
-
- virtual void SetExtraRequestHeaders(const char* headers) OVERRIDE {}
- virtual void SetURL(const char* url, int port) OVERRIDE {}
- virtual void SetPostPayload(const char* content_type,
- int content_length,
- const char* content) OVERRIDE {}
- virtual bool MakeSynchronousPost(int* error_code, int* response_code)
- OVERRIDE {
- wait_for_abort_.TimedWait(TestTimeouts::action_max_timeout());
- *error_code = net::ERR_ABORTED;
- return false;
- }
- virtual int GetResponseContentLength() const OVERRIDE {
- return 0;
- }
- virtual const char* GetResponseContent() const OVERRIDE {
- return "";
- }
- virtual const std::string GetResponseHeaderValue(
- const std::string& name) const OVERRIDE {
- return std::string();
- }
- virtual void Abort() OVERRIDE {
- wait_for_abort_.Signal();
- }
- private:
- base::WaitableEvent wait_for_abort_;
-};
-
-class BlockingHttpPostFactory : public HttpPostProviderFactory {
- public:
- virtual ~BlockingHttpPostFactory() {}
- virtual void Init(const std::string& user_agent) OVERRIDE {}
- virtual HttpPostProviderInterface* Create() OVERRIDE {
- return new BlockingHttpPost();
- }
- virtual void Destroy(HttpPostProviderInterface* http) OVERRIDE {
- delete static_cast<BlockingHttpPost*>(http);
- }
-};
-
-} // namespace
-
-// Ask the ServerConnectionManager to stop before it is created.
-TEST(SyncAPIServerConnectionManagerTest, VeryEarlyAbortPost) {
- CancelationSignal signal;
- signal.Signal();
- SyncAPIServerConnectionManager server(
- "server", 0, true, new BlockingHttpPostFactory(), &signal);
-
- ServerConnectionManager::PostBufferParams params;
- ScopedServerStatusWatcher watcher(&server, &params.response);
-
- bool result = server.PostBufferToPath(
- &params, "/testpath", "testauth", &watcher);
-
- EXPECT_FALSE(result);
- EXPECT_EQ(HttpResponse::CONNECTION_UNAVAILABLE,
- params.response.server_status);
-}
-
-// Ask the ServerConnectionManager to stop before its first request is made.
-TEST(SyncAPIServerConnectionManagerTest, EarlyAbortPost) {
- CancelationSignal signal;
- SyncAPIServerConnectionManager server(
- "server", 0, true, new BlockingHttpPostFactory(), &signal);
-
- ServerConnectionManager::PostBufferParams params;
- ScopedServerStatusWatcher watcher(&server, &params.response);
-
- signal.Signal();
- bool result = server.PostBufferToPath(
- &params, "/testpath", "testauth", &watcher);
-
- EXPECT_FALSE(result);
- EXPECT_EQ(HttpResponse::CONNECTION_UNAVAILABLE,
- params.response.server_status);
-}
-
-// Ask the ServerConnectionManager to stop during a request.
-TEST(SyncAPIServerConnectionManagerTest, AbortPost) {
- CancelationSignal signal;
- SyncAPIServerConnectionManager server(
- "server", 0, true, new BlockingHttpPostFactory(), &signal);
-
- ServerConnectionManager::PostBufferParams params;
- ScopedServerStatusWatcher watcher(&server, &params.response);
-
- base::Thread abort_thread("Test_AbortThread");
- ASSERT_TRUE(abort_thread.Start());
- abort_thread.message_loop()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&CancelationSignal::Signal,
- base::Unretained(&signal)),
- TestTimeouts::tiny_timeout());
-
- bool result = server.PostBufferToPath(
- &params, "/testpath", "testauth", &watcher);
-
- EXPECT_FALSE(result);
- EXPECT_EQ(HttpResponse::CONNECTION_UNAVAILABLE,
- params.response.server_status);
- abort_thread.Stop();
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/user_share.cc b/chromium/sync/internal_api/user_share.cc
deleted file mode 100644
index 1a3736d2e44..00000000000
--- a/chromium/sync/internal_api/user_share.cc
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/user_share.h"
-
-#include "sync/syncable/directory.h"
-
-namespace syncer {
-
-UserShare::UserShare() {}
-
-UserShare::~UserShare() {}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/write_node.cc b/chromium/sync/internal_api/write_node.cc
deleted file mode 100644
index 55f56b78d3f..00000000000
--- a/chromium/sync/internal_api/write_node.cc
+++ /dev/null
@@ -1,522 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/write_node.h"
-
-#include "base/strings/string_util.h"
-#include "base/strings/utf_string_conversions.h"
-#include "base/values.h"
-#include "sync/internal_api/public/base_transaction.h"
-#include "sync/internal_api/public/write_transaction.h"
-#include "sync/internal_api/syncapi_internal.h"
-#include "sync/protocol/app_specifics.pb.h"
-#include "sync/protocol/autofill_specifics.pb.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/protocol/extension_specifics.pb.h"
-#include "sync/protocol/password_specifics.pb.h"
-#include "sync/protocol/session_specifics.pb.h"
-#include "sync/protocol/theme_specifics.pb.h"
-#include "sync/protocol/typed_url_specifics.pb.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/nigori_util.h"
-#include "sync/syncable/syncable_util.h"
-#include "sync/util/cryptographer.h"
-
-using std::string;
-using std::vector;
-
-namespace syncer {
-
-using syncable::kEncryptedString;
-using syncable::SPECIFICS;
-
-static const char kDefaultNameForNewNodes[] = " ";
-
-void WriteNode::SetIsFolder(bool folder) {
- if (entry_->GetIsDir() == folder)
- return; // Skip redundant changes.
-
- entry_->PutIsDir(folder);
- MarkForSyncing();
-}
-
-void WriteNode::SetTitle(const std::wstring& title) {
- DCHECK_NE(GetModelType(), UNSPECIFIED);
- ModelType type = GetModelType();
- // It's possible the nigori lost the set of encrypted types. If the current
- // specifics are already encrypted, we want to ensure we continue encrypting.
- bool needs_encryption = GetTransaction()->GetEncryptedTypes().Has(type) ||
- entry_->GetSpecifics().has_encrypted();
-
- // If this datatype is encrypted and is not a bookmark, we disregard the
- // specified title in favor of kEncryptedString. For encrypted bookmarks the
- // NON_UNIQUE_NAME will still be kEncryptedString, but we store the real title
- // into the specifics. All strings compared are server legal strings.
- std::string new_legal_title;
- if (type != BOOKMARKS && needs_encryption) {
- new_legal_title = kEncryptedString;
- } else {
- SyncAPINameToServerName(WideToUTF8(title), &new_legal_title);
- base::TruncateUTF8ToByteSize(new_legal_title, 255, &new_legal_title);
- }
-
- std::string current_legal_title;
- if (BOOKMARKS == type &&
- entry_->GetSpecifics().has_encrypted()) {
- // Encrypted bookmarks only have their title in the unencrypted specifics.
- current_legal_title = GetBookmarkSpecifics().title();
- } else {
- // Non-bookmarks and legacy bookmarks (those with no title in their
- // specifics) store their title in NON_UNIQUE_NAME. Non-legacy bookmarks
- // store their title in specifics as well as NON_UNIQUE_NAME.
- current_legal_title = entry_->GetNonUniqueName();
- }
-
- bool title_matches = (current_legal_title == new_legal_title);
- bool encrypted_without_overwriting_name = (needs_encryption &&
- entry_->GetNonUniqueName() != kEncryptedString);
-
- // If the title matches and the NON_UNIQUE_NAME is properly overwritten as
- // necessary, nothing needs to change.
- if (title_matches && !encrypted_without_overwriting_name) {
- DVLOG(2) << "Title matches, dropping change.";
- return;
- }
-
- // For bookmarks, we also set the title field in the specifics.
- // TODO(zea): refactor bookmarks to not need this functionality.
- if (GetModelType() == BOOKMARKS) {
- sync_pb::EntitySpecifics specifics = GetEntitySpecifics();
- specifics.mutable_bookmark()->set_title(new_legal_title);
- SetEntitySpecifics(specifics); // Does it's own encryption checking.
- }
-
- // For bookmarks, this has to happen after we set the title in the specifics,
- // because the presence of a title in the NON_UNIQUE_NAME is what controls
- // the logic deciding whether this is an empty node or a legacy bookmark.
- // See BaseNode::GetUnencryptedSpecific(..).
- if (needs_encryption)
- entry_->PutNonUniqueName(kEncryptedString);
- else
- entry_->PutNonUniqueName(new_legal_title);
-
- DVLOG(1) << "Overwriting title of type "
- << ModelTypeToString(type)
- << " and marking for syncing.";
- MarkForSyncing();
-}
-
-void WriteNode::SetAppSpecifics(
- const sync_pb::AppSpecifics& new_value) {
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_app()->CopyFrom(new_value);
- SetEntitySpecifics(entity_specifics);
-}
-
-void WriteNode::SetAutofillSpecifics(
- const sync_pb::AutofillSpecifics& new_value) {
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_autofill()->CopyFrom(new_value);
- SetEntitySpecifics(entity_specifics);
-}
-
-void WriteNode::SetAutofillProfileSpecifics(
- const sync_pb::AutofillProfileSpecifics& new_value) {
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_autofill_profile()->
- CopyFrom(new_value);
- SetEntitySpecifics(entity_specifics);
-}
-
-void WriteNode::SetBookmarkSpecifics(
- const sync_pb::BookmarkSpecifics& new_value) {
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_bookmark()->CopyFrom(new_value);
- SetEntitySpecifics(entity_specifics);
-}
-
-void WriteNode::SetNigoriSpecifics(
- const sync_pb::NigoriSpecifics& new_value) {
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_nigori()->CopyFrom(new_value);
- SetEntitySpecifics(entity_specifics);
-}
-
-void WriteNode::SetPasswordSpecifics(
- const sync_pb::PasswordSpecificsData& data) {
- DCHECK_EQ(GetModelType(), PASSWORDS);
-
- Cryptographer* cryptographer = GetTransaction()->GetCryptographer();
-
- // We have to do the idempotency check here (vs in UpdateEntryWithEncryption)
- // because Passwords have their encrypted data within the PasswordSpecifics,
- // vs within the EntitySpecifics like all the other types.
- const sync_pb::EntitySpecifics& old_specifics = GetEntry()->GetSpecifics();
- sync_pb::EntitySpecifics entity_specifics;
- // Copy over the old specifics if they exist.
- if (GetModelTypeFromSpecifics(old_specifics) == PASSWORDS) {
- entity_specifics.CopyFrom(old_specifics);
- } else {
- AddDefaultFieldValue(PASSWORDS, &entity_specifics);
- }
- sync_pb::PasswordSpecifics* password_specifics =
- entity_specifics.mutable_password();
- // This will only update password_specifics if the underlying unencrypted blob
- // was different from |data| or was not encrypted with the proper passphrase.
- if (!cryptographer->Encrypt(data, password_specifics->mutable_encrypted())) {
- NOTREACHED() << "Failed to encrypt password, possibly due to sync node "
- << "corruption";
- return;
- }
- SetEntitySpecifics(entity_specifics);
-}
-
-void WriteNode::SetThemeSpecifics(
- const sync_pb::ThemeSpecifics& new_value) {
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_theme()->CopyFrom(new_value);
- SetEntitySpecifics(entity_specifics);
-}
-
-void WriteNode::SetSessionSpecifics(
- const sync_pb::SessionSpecifics& new_value) {
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_session()->CopyFrom(new_value);
- SetEntitySpecifics(entity_specifics);
-}
-
-void WriteNode::SetManagedUserSettingSpecifics(
- const sync_pb::ManagedUserSettingSpecifics& new_value) {
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_managed_user_setting()->CopyFrom(new_value);
- SetEntitySpecifics(entity_specifics);
-}
-
-void WriteNode::SetManagedUserSpecifics(
- const sync_pb::ManagedUserSpecifics& new_value) {
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_managed_user()->CopyFrom(new_value);
- SetEntitySpecifics(entity_specifics);
-}
-
-void WriteNode::SetDeviceInfoSpecifics(
- const sync_pb::DeviceInfoSpecifics& new_value) {
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_device_info()->CopyFrom(new_value);
- SetEntitySpecifics(entity_specifics);
-}
-
-void WriteNode::SetExperimentsSpecifics(
- const sync_pb::ExperimentsSpecifics& new_value) {
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_experiments()->CopyFrom(new_value);
- SetEntitySpecifics(entity_specifics);
-}
-
-void WriteNode::SetPriorityPreferenceSpecifics(
- const sync_pb::PriorityPreferenceSpecifics& new_value) {
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_priority_preference()->CopyFrom(new_value);
- SetEntitySpecifics(entity_specifics);
-}
-
-void WriteNode::SetEntitySpecifics(
- const sync_pb::EntitySpecifics& new_value) {
- ModelType new_specifics_type =
- GetModelTypeFromSpecifics(new_value);
- CHECK(!new_value.password().has_client_only_encrypted_data());
- DCHECK_NE(new_specifics_type, UNSPECIFIED);
- DVLOG(1) << "Writing entity specifics of type "
- << ModelTypeToString(new_specifics_type);
- DCHECK_EQ(new_specifics_type, GetModelType());
-
- // Preserve unknown fields.
- const sync_pb::EntitySpecifics& old_specifics = entry_->GetSpecifics();
- sync_pb::EntitySpecifics new_specifics;
- new_specifics.CopyFrom(new_value);
- new_specifics.mutable_unknown_fields()->MergeFrom(
- old_specifics.unknown_fields());
-
- // Will update the entry if encryption was necessary.
- if (!UpdateEntryWithEncryption(GetTransaction()->GetWrappedTrans(),
- new_specifics,
- entry_)) {
- return;
- }
- if (entry_->GetSpecifics().has_encrypted()) {
- // EncryptIfNecessary already updated the entry for us and marked for
- // syncing if it was needed. Now we just make a copy of the unencrypted
- // specifics so that if this node is updated, we do not have to decrypt the
- // old data. Note that this only modifies the node's local data, not the
- // entry itself.
- SetUnencryptedSpecifics(new_value);
- }
-
- DCHECK_EQ(new_specifics_type, GetModelType());
-}
-
-void WriteNode::ResetFromSpecifics() {
- SetEntitySpecifics(GetEntitySpecifics());
-}
-
-void WriteNode::SetTypedUrlSpecifics(
- const sync_pb::TypedUrlSpecifics& new_value) {
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_typed_url()->CopyFrom(new_value);
- SetEntitySpecifics(entity_specifics);
-}
-
-void WriteNode::SetExtensionSpecifics(
- const sync_pb::ExtensionSpecifics& new_value) {
- sync_pb::EntitySpecifics entity_specifics;
- entity_specifics.mutable_extension()->CopyFrom(new_value);
- SetEntitySpecifics(entity_specifics);
-}
-
-void WriteNode::SetExternalId(int64 id) {
- if (GetExternalId() != id)
- entry_->PutLocalExternalId(id);
-}
-
-WriteNode::WriteNode(WriteTransaction* transaction)
- : entry_(NULL), transaction_(transaction) {
- DCHECK(transaction);
-}
-
-WriteNode::~WriteNode() {
- delete entry_;
-}
-
-// Find an existing node matching the ID |id|, and bind this WriteNode to it.
-// Return true on success.
-BaseNode::InitByLookupResult WriteNode::InitByIdLookup(int64 id) {
- DCHECK(!entry_) << "Init called twice";
- DCHECK_NE(id, kInvalidId);
- entry_ = new syncable::MutableEntry(transaction_->GetWrappedWriteTrans(),
- syncable::GET_BY_HANDLE, id);
- if (!entry_->good())
- return INIT_FAILED_ENTRY_NOT_GOOD;
- if (entry_->GetIsDel())
- return INIT_FAILED_ENTRY_IS_DEL;
- return DecryptIfNecessary() ? INIT_OK : INIT_FAILED_DECRYPT_IF_NECESSARY;
-}
-
-// Find a node by client tag, and bind this WriteNode to it.
-// Return true if the write node was found, and was not deleted.
-// Undeleting a deleted node is possible by ClientTag.
-BaseNode::InitByLookupResult WriteNode::InitByClientTagLookup(
- ModelType model_type,
- const std::string& tag) {
- DCHECK(!entry_) << "Init called twice";
- if (tag.empty())
- return INIT_FAILED_PRECONDITION;
-
- const std::string hash = syncable::GenerateSyncableHash(model_type, tag);
-
- entry_ = new syncable::MutableEntry(transaction_->GetWrappedWriteTrans(),
- syncable::GET_BY_CLIENT_TAG, hash);
- if (!entry_->good())
- return INIT_FAILED_ENTRY_NOT_GOOD;
- if (entry_->GetIsDel())
- return INIT_FAILED_ENTRY_IS_DEL;
- return DecryptIfNecessary() ? INIT_OK : INIT_FAILED_DECRYPT_IF_NECESSARY;
-}
-
-BaseNode::InitByLookupResult WriteNode::InitByTagLookup(
- const std::string& tag) {
- DCHECK(!entry_) << "Init called twice";
- if (tag.empty())
- return INIT_FAILED_PRECONDITION;
- entry_ = new syncable::MutableEntry(transaction_->GetWrappedWriteTrans(),
- syncable::GET_BY_SERVER_TAG, tag);
- if (!entry_->good())
- return INIT_FAILED_ENTRY_NOT_GOOD;
- if (entry_->GetIsDel())
- return INIT_FAILED_ENTRY_IS_DEL;
- ModelType model_type = GetModelType();
- DCHECK_EQ(model_type, NIGORI);
- return INIT_OK;
-}
-
-// Create a new node with default properties, and bind this WriteNode to it.
-// Return true on success.
-bool WriteNode::InitBookmarkByCreation(const BaseNode& parent,
- const BaseNode* predecessor) {
- DCHECK(!entry_) << "Init called twice";
- // |predecessor| must be a child of |parent| or NULL.
- if (predecessor && predecessor->GetParentId() != parent.GetId()) {
- DCHECK(false);
- return false;
- }
-
- syncable::Id parent_id = parent.GetEntry()->GetId();
-
- // Start out with a dummy name. We expect
- // the caller to set a meaningful name after creation.
- string dummy(kDefaultNameForNewNodes);
-
- entry_ = new syncable::MutableEntry(transaction_->GetWrappedWriteTrans(),
- syncable::CREATE, BOOKMARKS,
- parent_id, dummy);
-
- if (!entry_->good())
- return false;
-
- // Entries are untitled folders by default.
- entry_->PutIsDir(true);
-
- // Now set the predecessor, which sets IS_UNSYNCED as necessary.
- return PutPredecessor(predecessor);
-}
-
-// Create a new node with default properties and a client defined unique tag,
-// and bind this WriteNode to it.
-// Return true on success. If the tag exists in the database, then
-// we will attempt to undelete the node.
-// TODO(chron): Code datatype into hash tag.
-// TODO(chron): Is model type ever lost?
-WriteNode::InitUniqueByCreationResult WriteNode::InitUniqueByCreation(
- ModelType model_type,
- const BaseNode& parent,
- const std::string& tag) {
- // This DCHECK will only fail if init is called twice.
- DCHECK(!entry_);
- if (tag.empty()) {
- LOG(WARNING) << "InitUniqueByCreation failed due to empty tag.";
- return INIT_FAILED_EMPTY_TAG;
- }
-
- const std::string hash = syncable::GenerateSyncableHash(model_type, tag);
-
- syncable::Id parent_id = parent.GetEntry()->GetId();
-
- // Start out with a dummy name. We expect
- // the caller to set a meaningful name after creation.
- string dummy(kDefaultNameForNewNodes);
-
- // Check if we have this locally and need to undelete it.
- scoped_ptr<syncable::MutableEntry> existing_entry(
- new syncable::MutableEntry(transaction_->GetWrappedWriteTrans(),
- syncable::GET_BY_CLIENT_TAG, hash));
-
- if (existing_entry->good()) {
- if (existing_entry->GetIsDel()) {
- // Rules for undelete:
- // BASE_VERSION: Must keep the same.
- // ID: Essential to keep the same.
- // META_HANDLE: Must be the same, so we can't "split" the entry.
- // IS_DEL: Must be set to false, will cause reindexing.
- // This one is weird because IS_DEL is true for "update only"
- // items. It should be OK to undelete an update only.
- // MTIME/CTIME: Seems reasonable to just leave them alone.
- // IS_UNSYNCED: Must set this to true or face database insurrection.
- // We do this below this block.
- // IS_UNAPPLIED_UPDATE: Either keep it the same or also set BASE_VERSION
- // to SERVER_VERSION. We keep it the same here.
- // IS_DIR: We'll leave it the same.
- // SPECIFICS: Reset it.
-
- existing_entry->PutIsDel(false);
-
- // Client tags are immutable and must be paired with the ID.
- // If a server update comes down with an ID and client tag combo,
- // and it already exists, always overwrite it and store only one copy.
- // We have to undelete entries because we can't disassociate IDs from
- // tags and updates.
-
- existing_entry->PutNonUniqueName(dummy);
- existing_entry->PutParentId(parent_id);
- entry_ = existing_entry.release();
- } else {
- return INIT_FAILED_ENTRY_ALREADY_EXISTS;
- }
- } else {
- entry_ = new syncable::MutableEntry(transaction_->GetWrappedWriteTrans(),
- syncable::CREATE,
- model_type, parent_id, dummy);
- if (!entry_->good())
- return INIT_FAILED_COULD_NOT_CREATE_ENTRY;
-
- // Only set IS_DIR for new entries. Don't bitflip undeleted ones.
- entry_->PutUniqueClientTag(hash);
- }
-
- // We don't support directory and tag combinations.
- entry_->PutIsDir(false);
-
- // Now set the predecessor, which sets IS_UNSYNCED as necessary.
- bool success = PutPredecessor(NULL);
- if (!success)
- return INIT_FAILED_SET_PREDECESSOR;
-
- return INIT_SUCCESS;
-}
-
-bool WriteNode::SetPosition(const BaseNode& new_parent,
- const BaseNode* predecessor) {
- // |predecessor| must be a child of |new_parent| or NULL.
- if (predecessor && predecessor->GetParentId() != new_parent.GetId()) {
- DCHECK(false);
- return false;
- }
-
- syncable::Id new_parent_id = new_parent.GetEntry()->GetId();
-
- // Filter out redundant changes if both the parent and the predecessor match.
- if (new_parent_id == entry_->GetParentId()) {
- const syncable::Id& old = entry_->GetPredecessorId();
- if ((!predecessor && old.IsRoot()) ||
- (predecessor && (old == predecessor->GetEntry()->GetId()))) {
- return true;
- }
- }
-
- entry_->PutParentId(new_parent_id);
-
- // Now set the predecessor, which sets IS_UNSYNCED as necessary.
- return PutPredecessor(predecessor);
-}
-
-const syncable::Entry* WriteNode::GetEntry() const {
- return entry_;
-}
-
-const BaseTransaction* WriteNode::GetTransaction() const {
- return transaction_;
-}
-
-syncable::MutableEntry* WriteNode::GetMutableEntryForTest() {
- return entry_;
-}
-
-void WriteNode::Tombstone() {
- // These lines must be in this order. The call to Put(IS_DEL) might choose to
- // unset the IS_UNSYNCED bit if the item was not known to the server at the
- // time of deletion. It's important that the bit not be reset in that case.
- MarkForSyncing();
- entry_->PutIsDel(true);
-}
-
-void WriteNode::Drop() {
- if (entry_->GetId().ServerKnows()) {
- entry_->PutIsDel(true);
- }
-}
-
-bool WriteNode::PutPredecessor(const BaseNode* predecessor) {
- syncable::Id predecessor_id = predecessor ?
- predecessor->GetEntry()->GetId() : syncable::Id();
- if (!entry_->PutPredecessor(predecessor_id))
- return false;
- // Mark this entry as unsynced, to wake up the syncer.
- MarkForSyncing();
-
- return true;
-}
-
-void WriteNode::MarkForSyncing() {
- syncable::MarkForSyncing(entry_);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/internal_api/write_transaction.cc b/chromium/sync/internal_api/write_transaction.cc
deleted file mode 100644
index d33093dac32..00000000000
--- a/chromium/sync/internal_api/write_transaction.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/write_transaction.h"
-
-#include "sync/syncable/syncable_write_transaction.h"
-
-namespace syncer {
-
-//////////////////////////////////////////////////////////////////////////
-// WriteTransaction member definitions
-WriteTransaction::WriteTransaction(const tracked_objects::Location& from_here,
- UserShare* share)
- : BaseTransaction(share),
- transaction_(NULL) {
- transaction_ = new syncable::WriteTransaction(from_here, syncable::SYNCAPI,
- share->directory.get());
-}
-
-WriteTransaction::WriteTransaction(const tracked_objects::Location& from_here,
- UserShare* share,
- int64* new_model_version)
- : BaseTransaction(share),
- transaction_(NULL) {
- transaction_ = new syncable::WriteTransaction(from_here,
- share->directory.get(),
- new_model_version);
-}
-
-WriteTransaction::~WriteTransaction() {
- delete transaction_;
-}
-
-syncable::BaseTransaction* WriteTransaction::GetWrappedTrans() const {
- return transaction_;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/js/DEPS b/chromium/sync/js/DEPS
deleted file mode 100644
index aae40c0a6d0..00000000000
--- a/chromium/sync/js/DEPS
+++ /dev/null
@@ -1,5 +0,0 @@
-include_rules = [
- "+sync/base",
- "+sync/internal_api/public/util",
- "+sync/util",
-]
diff --git a/chromium/sync/js/README.js b/chromium/sync/js/README.js
deleted file mode 100644
index 0fbfa66bc0e..00000000000
--- a/chromium/sync/js/README.js
+++ /dev/null
@@ -1,48 +0,0 @@
-Overview of chrome://sync-internals
------------------------------------
-
-This note explains how chrome://sync-internals (also known as
-about:sync) interacts with the sync service/backend.
-
-Basically, chrome://sync-internals sends messages to the sync backend
-and the sync backend sends the reply asynchronously. The sync backend
-also asynchronously raises events which chrome://sync-internals listen
-to.
-
-A message and its reply has a name and a list of arguments, which is
-basically a wrapper around an immutable ListValue.
-
-An event has a name and a details object, which is represented by a
-JsEventDetails (js_event_details.h) object, which is basically a
-wrapper around an immutable DictionaryValue.
-
-Message/event flow
-------------------
-
-chrome://sync-internals is represented by SyncInternalsUI
-(chrome/browser/ui/webui/sync_internals_ui.h). SyncInternalsUI
-interacts with the sync service via a JsController (js_controller.h)
-object, which has a ProcessJsMessage() method that just delegates to
-an underlying JsBackend instance (js_backend.h). The SyncInternalsUI
-object also registers itself (as a JsEventHandler
-[js_event_handler.h]) to the JsController object, and any events
-raised by the JsBackend are propagated to the JsController and then to
-the registered JsEventHandlers.
-
-The ProcessJsMessage() takes a WeakHandle (weak_handle.h) to a
-JsReplyHandler (js_reply_handler.h), which the backend uses to send
-replies safely across threads. SyncInternalsUI implements
-JsReplyHandler, so it simply passes itself as the reply handler when
-it calls ProcessJsMessage() on the JsController.
-
-The following objects live on the UI thread:
-
-- SyncInternalsUI (implements JsEventHandler, JsReplyHandler)
-- SyncJsController (implements JsController, JsEventHandler)
-
-The following objects live on the sync thread:
-
-- SyncManager::SyncInternal (implements JsBackend)
-
-Of course, none of these objects need to know where the other objects
-live, since they interact via WeakHandles.
diff --git a/chromium/sync/js/js_arg_list.cc b/chromium/sync/js/js_arg_list.cc
deleted file mode 100644
index e3317e5f574..00000000000
--- a/chromium/sync/js/js_arg_list.cc
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/js/js_arg_list.h"
-
-#include "base/json/json_writer.h"
-
-namespace syncer {
-
-JsArgList::JsArgList() {}
-
-JsArgList::JsArgList(base::ListValue* args) : args_(args) {}
-
-JsArgList::~JsArgList() {}
-
-const base::ListValue& JsArgList::Get() const {
- return args_.Get();
-}
-
-std::string JsArgList::ToString() const {
- std::string str;
- base::JSONWriter::Write(&Get(), &str);
- return str;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/js/js_arg_list.h b/chromium/sync/js/js_arg_list.h
deleted file mode 100644
index 34a0cf69669..00000000000
--- a/chromium/sync/js/js_arg_list.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_JS_JS_ARG_LIST_H_
-#define SYNC_JS_JS_ARG_LIST_H_
-
-// See README.js for design comments.
-
-#include <string>
-
-#include "base/values.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/util/immutable.h"
-
-namespace syncer {
-
-// A thin wrapper around Immutable<ListValue>. Used for passing
-// around argument lists to different threads.
-class SYNC_EXPORT JsArgList {
- public:
- // Uses an empty argument list.
- JsArgList();
-
- // Takes over the data in |args|, leaving |args| empty.
- explicit JsArgList(base::ListValue* args);
-
- ~JsArgList();
-
- const base::ListValue& Get() const;
-
- std::string ToString() const;
-
- // Copy constructor and assignment operator welcome.
-
- private:
- typedef Immutable<base::ListValue, HasSwapMemFnByPtr<base::ListValue> >
- ImmutableListValue;
- ImmutableListValue args_;
-};
-
-} // namespace syncer
-
-#endif // SYNC_JS_JS_ARG_LIST_H_
diff --git a/chromium/sync/js/js_arg_list_unittest.cc b/chromium/sync/js/js_arg_list_unittest.cc
deleted file mode 100644
index 2a10286751b..00000000000
--- a/chromium/sync/js/js_arg_list_unittest.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/js/js_arg_list.h"
-
-#include "base/memory/scoped_ptr.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace {
-
-class JsArgListTest : public testing::Test {};
-
-TEST_F(JsArgListTest, EmptyList) {
- JsArgList arg_list;
- EXPECT_TRUE(arg_list.Get().empty());
- EXPECT_EQ("[]", arg_list.ToString());
-}
-
-TEST_F(JsArgListTest, FromList) {
- scoped_ptr<base::ListValue> list(new base::ListValue());
- list->Append(new base::FundamentalValue(false));
- list->Append(new base::FundamentalValue(5));
- base::DictionaryValue* dict = new base::DictionaryValue();
- list->Append(dict);
- dict->SetString("foo", "bar");
- dict->Set("baz", new base::ListValue());
-
- scoped_ptr<base::ListValue> list_copy(list->DeepCopy());
-
- JsArgList arg_list(list.get());
-
- // |arg_list| should take over |list|'s data.
- EXPECT_TRUE(list->empty());
- EXPECT_TRUE(arg_list.Get().Equals(list_copy.get()));
-}
-
-} // namespace
-} // namespace syncer
diff --git a/chromium/sync/js/js_backend.h b/chromium/sync/js/js_backend.h
deleted file mode 100644
index e39600a8bcb..00000000000
--- a/chromium/sync/js/js_backend.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_JS_JS_BACKEND_H_
-#define SYNC_JS_JS_BACKEND_H_
-
-// See README.js for design comments.
-
-#include <string>
-
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-
-class JsArgList;
-class JsEventHandler;
-class JsReplyHandler;
-template <typename T> class WeakHandle;
-
-// Interface representing the backend of chrome://sync-internals. A
-// JsBackend can handle messages and can emit events to a
-// JsEventHandler.
-class SYNC_EXPORT_PRIVATE JsBackend {
- public:
- // Starts emitting events to the given handler, if initialized.
- virtual void SetJsEventHandler(
- const WeakHandle<JsEventHandler>& event_handler) = 0;
-
- // Processes the given message and replies via the given handler, if
- // initialized.
- virtual void ProcessJsMessage(
- const std::string& name, const JsArgList& args,
- const WeakHandle<JsReplyHandler>& reply_handler) = 0;
-
- protected:
- virtual ~JsBackend() {}
-};
-
-} // namespace syncer
-
-#endif // SYNC_JS_JS_BACKEND_H_
diff --git a/chromium/sync/js/js_controller.h b/chromium/sync/js/js_controller.h
deleted file mode 100644
index 08432bf004e..00000000000
--- a/chromium/sync/js/js_controller.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_JS_JS_CONTROLLER_H_
-#define SYNC_JS_JS_CONTROLLER_H_
-
-// See README.js for design comments.
-
-#include <string>
-
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-
-class JsArgList;
-class JsEventHandler;
-class JsReplyHandler;
-template <typename T> class WeakHandle;
-
-// An interface for objects that JsEventHandlers directly interact
-// with. JsEventHandlers can add themselves to receive events and
-// also send messages which will eventually reach the backend.
-class SYNC_EXPORT JsController {
- public:
- // Adds an event handler which will start receiving JS events (not
- // immediately, so this can be called in the handler's constructor).
- // Multiple event handlers are supported, but each event handler
- // must be added at most once.
- //
- // Ideally, we'd take WeakPtrs, but we need the raw pointer values
- // to be able to look them up for removal.
- virtual void AddJsEventHandler(JsEventHandler* event_handler) = 0;
-
- // Removes the given event handler if it has been added. It will
- // immediately stop receiving any JS events.
- virtual void RemoveJsEventHandler(JsEventHandler* event_handler) = 0;
-
- // Processes a JS message. The reply (if any) will be sent to
- // |reply_handler| if it is initialized.
- virtual void ProcessJsMessage(
- const std::string& name, const JsArgList& args,
- const WeakHandle<JsReplyHandler>& reply_handler) = 0;
-
- protected:
- virtual ~JsController() {}
-};
-
-} // namespace syncer
-
-#endif // SYNC_JS_JS_CONTROLLER_H_
diff --git a/chromium/sync/js/js_event_details.cc b/chromium/sync/js/js_event_details.cc
deleted file mode 100644
index 0517b39bb9f..00000000000
--- a/chromium/sync/js/js_event_details.cc
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/js/js_event_details.h"
-
-#include "base/json/json_writer.h"
-
-namespace syncer {
-
-JsEventDetails::JsEventDetails() {}
-
-JsEventDetails::JsEventDetails(base::DictionaryValue* details)
- : details_(details) {}
-
-JsEventDetails::~JsEventDetails() {}
-
-const base::DictionaryValue& JsEventDetails::Get() const {
- return details_.Get();
-}
-
-std::string JsEventDetails::ToString() const {
- std::string str;
- base::JSONWriter::Write(&Get(), &str);
- return str;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/js/js_event_details.h b/chromium/sync/js/js_event_details.h
deleted file mode 100644
index ae970cceb88..00000000000
--- a/chromium/sync/js/js_event_details.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_JS_JS_EVENT_DETAILS_H_
-#define SYNC_JS_JS_EVENT_DETAILS_H_
-
-// See README.js for design comments.
-
-#include <string>
-
-#include "base/values.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/util/immutable.h"
-
-namespace syncer {
-
-// A thin wrapper around Immutable<DictionaryValue>. Used for passing
-// around event details to different threads.
-class SYNC_EXPORT JsEventDetails {
- public:
- // Uses an empty dictionary.
- JsEventDetails();
-
- // Takes over the data in |details|, leaving |details| empty.
- explicit JsEventDetails(base::DictionaryValue* details);
-
- ~JsEventDetails();
-
- const base::DictionaryValue& Get() const;
-
- std::string ToString() const;
-
- // Copy constructor and assignment operator welcome.
-
- private:
- typedef Immutable<base::DictionaryValue,
- HasSwapMemFnByPtr<base::DictionaryValue> >
- ImmutableDictionaryValue;
-
- ImmutableDictionaryValue details_;
-};
-
-} // namespace syncer
-
-#endif // SYNC_JS_JS_EVENT_DETAILS_H_
diff --git a/chromium/sync/js/js_event_details_unittest.cc b/chromium/sync/js/js_event_details_unittest.cc
deleted file mode 100644
index d93d5219a19..00000000000
--- a/chromium/sync/js/js_event_details_unittest.cc
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/js/js_event_details.h"
-
-#include "base/memory/scoped_ptr.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace {
-
-class JsEventDetailsTest : public testing::Test {};
-
-TEST_F(JsEventDetailsTest, EmptyList) {
- JsEventDetails details;
- EXPECT_TRUE(details.Get().empty());
- EXPECT_EQ("{}", details.ToString());
-}
-
-TEST_F(JsEventDetailsTest, FromDictionary) {
- base::DictionaryValue dict;
- dict.SetString("foo", "bar");
- dict.Set("baz", new base::ListValue());
-
- scoped_ptr<base::DictionaryValue> dict_copy(dict.DeepCopy());
-
- JsEventDetails details(&dict);
-
- // |details| should take over |dict|'s data.
- EXPECT_TRUE(dict.empty());
- EXPECT_TRUE(details.Get().Equals(dict_copy.get()));
-}
-
-} // namespace
-} // namespace syncer
diff --git a/chromium/sync/js/js_event_handler.h b/chromium/sync/js/js_event_handler.h
deleted file mode 100644
index 2e2213efefb..00000000000
--- a/chromium/sync/js/js_event_handler.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_JS_JS_EVENT_HANDLER_H_
-#define SYNC_JS_JS_EVENT_HANDLER_H_
-
-// See README.js for design comments.
-
-#include <string>
-
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-
-class JsEventDetails;
-
-// An interface for objects that handle Javascript events (e.g.,
-// WebUIs).
-class SYNC_EXPORT JsEventHandler {
- public:
- virtual void HandleJsEvent(
- const std::string& name, const JsEventDetails& details) = 0;
-
- protected:
- virtual ~JsEventHandler() {}
-};
-
-} // namespace syncer
-
-#endif // SYNC_JS_JS_EVENT_HANDLER_H_
diff --git a/chromium/sync/js/js_reply_handler.h b/chromium/sync/js/js_reply_handler.h
deleted file mode 100644
index 3026b7b4da0..00000000000
--- a/chromium/sync/js/js_reply_handler.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_JS_JS_REPLY_HANDLER_H_
-#define SYNC_JS_JS_REPLY_HANDLER_H_
-
-// See README.js for design comments.
-
-#include <string>
-
-namespace syncer {
-
-class JsArgList;
-
-// An interface for objects that handle Javascript message replies
-// (e.g., WebUIs).
-class JsReplyHandler {
- public:
- virtual void HandleJsReply(
- const std::string& name, const JsArgList& args) = 0;
-
- protected:
- virtual ~JsReplyHandler() {}
-};
-
-} // namespace syncer
-
-#endif // SYNC_JS_JS_REPLY_HANDLER_H_
diff --git a/chromium/sync/js/js_test_util.cc b/chromium/sync/js/js_test_util.cc
deleted file mode 100644
index 331efccbb1e..00000000000
--- a/chromium/sync/js/js_test_util.cc
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/js/js_test_util.h"
-
-#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "sync/js/js_arg_list.h"
-#include "sync/js/js_event_details.h"
-
-namespace syncer {
-
-void PrintTo(const JsArgList& args, ::std::ostream* os) {
- *os << args.ToString();
-}
-
-void PrintTo(const JsEventDetails& details, ::std::ostream* os) {
- *os << details.ToString();
-}
-
-namespace {
-
-// Matcher implementation for HasArgs().
-class HasArgsMatcher
- : public ::testing::MatcherInterface<const JsArgList&> {
- public:
- explicit HasArgsMatcher(const JsArgList& expected_args)
- : expected_args_(expected_args) {}
-
- virtual ~HasArgsMatcher() {}
-
- virtual bool MatchAndExplain(
- const JsArgList& args,
- ::testing::MatchResultListener* listener) const {
- // No need to annotate listener since we already define PrintTo().
- return args.Get().Equals(&expected_args_.Get());
- }
-
- virtual void DescribeTo(::std::ostream* os) const {
- *os << "has args " << expected_args_.ToString();
- }
-
- virtual void DescribeNegationTo(::std::ostream* os) const {
- *os << "doesn't have args " << expected_args_.ToString();
- }
-
- private:
- const JsArgList expected_args_;
-
- DISALLOW_COPY_AND_ASSIGN(HasArgsMatcher);
-};
-
-// Matcher implementation for HasDetails().
-class HasDetailsMatcher
- : public ::testing::MatcherInterface<const JsEventDetails&> {
- public:
- explicit HasDetailsMatcher(const JsEventDetails& expected_details)
- : expected_details_(expected_details) {}
-
- virtual ~HasDetailsMatcher() {}
-
- virtual bool MatchAndExplain(
- const JsEventDetails& details,
- ::testing::MatchResultListener* listener) const {
- // No need to annotate listener since we already define PrintTo().
- return details.Get().Equals(&expected_details_.Get());
- }
-
- virtual void DescribeTo(::std::ostream* os) const {
- *os << "has details " << expected_details_.ToString();
- }
-
- virtual void DescribeNegationTo(::std::ostream* os) const {
- *os << "doesn't have details " << expected_details_.ToString();
- }
-
- private:
- const JsEventDetails expected_details_;
-
- DISALLOW_COPY_AND_ASSIGN(HasDetailsMatcher);
-};
-
-} // namespace
-
-::testing::Matcher<const JsArgList&> HasArgs(const JsArgList& expected_args) {
- return ::testing::MakeMatcher(new HasArgsMatcher(expected_args));
-}
-
-::testing::Matcher<const JsArgList&> HasArgsAsList(
- const base::ListValue& expected_args) {
- scoped_ptr<base::ListValue> expected_args_copy(expected_args.DeepCopy());
- return HasArgs(JsArgList(expected_args_copy.get()));
-}
-
-::testing::Matcher<const JsEventDetails&> HasDetails(
- const JsEventDetails& expected_details) {
- return ::testing::MakeMatcher(new HasDetailsMatcher(expected_details));
-}
-
-::testing::Matcher<const JsEventDetails&> HasDetailsAsDictionary(
- const base::DictionaryValue& expected_details) {
- scoped_ptr<base::DictionaryValue> expected_details_copy(
- expected_details.DeepCopy());
- return HasDetails(JsEventDetails(expected_details_copy.get()));
-}
-
-MockJsBackend::MockJsBackend() {}
-
-MockJsBackend::~MockJsBackend() {}
-
-WeakHandle<JsBackend> MockJsBackend::AsWeakHandle() {
- return MakeWeakHandle(AsWeakPtr());
-}
-
-MockJsController::MockJsController() {}
-
-MockJsController::~MockJsController() {}
-
-MockJsEventHandler::MockJsEventHandler() {}
-
-WeakHandle<JsEventHandler> MockJsEventHandler::AsWeakHandle() {
- return MakeWeakHandle(AsWeakPtr());
-}
-
-MockJsEventHandler::~MockJsEventHandler() {}
-
-MockJsReplyHandler::MockJsReplyHandler() {}
-
-MockJsReplyHandler::~MockJsReplyHandler() {}
-
-WeakHandle<JsReplyHandler> MockJsReplyHandler::AsWeakHandle() {
- return MakeWeakHandle(AsWeakPtr());
-}
-
-} // namespace syncer
-
diff --git a/chromium/sync/js/js_test_util.h b/chromium/sync/js/js_test_util.h
deleted file mode 100644
index 9cf91fbcfb7..00000000000
--- a/chromium/sync/js/js_test_util.h
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_JS_JS_TEST_UTIL_H_
-#define SYNC_JS_JS_TEST_UTIL_H_
-
-#include <ostream>
-#include <string>
-
-#include "base/memory/weak_ptr.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/js/js_backend.h"
-#include "sync/js/js_controller.h"
-#include "sync/js/js_event_handler.h"
-#include "sync/js/js_reply_handler.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace base {
-class DictionaryValue;
-class ListValue;
-}
-
-namespace syncer {
-
-class JsArgList;
-class JsEventDetails;
-
-// Defined for googletest. Equivalent to "*os << args.ToString()".
-void PrintTo(const JsArgList& args, ::std::ostream* os);
-void PrintTo(const JsEventDetails& details, ::std::ostream* os);
-
-// A gmock matcher for JsArgList. Use like:
-//
-// EXPECT_CALL(mock, HandleJsReply("foo", HasArgs(expected_args)));
-::testing::Matcher<const JsArgList&> HasArgs(const JsArgList& expected_args);
-
-// Like HasArgs() but takes a ListValue instead.
-::testing::Matcher<const JsArgList&> HasArgsAsList(
- const base::ListValue& expected_args);
-
-// A gmock matcher for JsEventDetails. Use like:
-//
-// EXPECT_CALL(mock, HandleJsEvent("foo", HasArgs(expected_details)));
-::testing::Matcher<const JsEventDetails&> HasDetails(
- const JsEventDetails& expected_details);
-
-// Like HasDetails() but takes a DictionaryValue instead.
-::testing::Matcher<const JsEventDetails&> HasDetailsAsDictionary(
- const base::DictionaryValue& expected_details);
-
-// Mocks.
-
-class MockJsBackend : public JsBackend,
- public base::SupportsWeakPtr<MockJsBackend> {
- public:
- MockJsBackend();
- virtual ~MockJsBackend();
-
- WeakHandle<JsBackend> AsWeakHandle();
-
- MOCK_METHOD1(SetJsEventHandler, void(const WeakHandle<JsEventHandler>&));
- MOCK_METHOD3(ProcessJsMessage, void(const ::std::string&, const JsArgList&,
- const WeakHandle<JsReplyHandler>&));
-};
-
-class MockJsController : public JsController,
- public base::SupportsWeakPtr<MockJsController> {
- public:
- MockJsController();
- virtual ~MockJsController();
-
- MOCK_METHOD1(AddJsEventHandler, void(JsEventHandler*));
- MOCK_METHOD1(RemoveJsEventHandler, void(JsEventHandler*));
- MOCK_METHOD3(ProcessJsMessage,
- void(const ::std::string&, const JsArgList&,
- const WeakHandle<JsReplyHandler>&));
-};
-
-class MockJsEventHandler
- : public JsEventHandler,
- public base::SupportsWeakPtr<MockJsEventHandler> {
- public:
- MockJsEventHandler();
- virtual ~MockJsEventHandler();
-
- WeakHandle<JsEventHandler> AsWeakHandle();
-
- MOCK_METHOD2(HandleJsEvent,
- void(const ::std::string&, const JsEventDetails&));
-};
-
-class MockJsReplyHandler
- : public JsReplyHandler,
- public base::SupportsWeakPtr<MockJsReplyHandler> {
- public:
- MockJsReplyHandler();
- virtual ~MockJsReplyHandler();
-
- WeakHandle<JsReplyHandler> AsWeakHandle();
-
- MOCK_METHOD2(HandleJsReply,
- void(const ::std::string&, const JsArgList&));
-};
-
-} // namespace syncer
-
-#endif // SYNC_JS_JS_TEST_UTIL_H_
diff --git a/chromium/sync/js/sync_js_controller.cc b/chromium/sync/js/sync_js_controller.cc
deleted file mode 100644
index 4d3148fdd03..00000000000
--- a/chromium/sync/js/sync_js_controller.cc
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/js/sync_js_controller.h"
-
-#include "base/location.h"
-#include "sync/js/js_backend.h"
-#include "sync/js/js_event_details.h"
-
-namespace syncer {
-
-SyncJsController::PendingJsMessage::PendingJsMessage(
- const std::string& name, const JsArgList& args,
- const WeakHandle<JsReplyHandler>& reply_handler)
- : name(name), args(args), reply_handler(reply_handler) {}
-
-SyncJsController::PendingJsMessage::~PendingJsMessage() {}
-
-SyncJsController::SyncJsController() {}
-
-SyncJsController::~SyncJsController() {
- AttachJsBackend(WeakHandle<JsBackend>());
-}
-
-void SyncJsController::AddJsEventHandler(JsEventHandler* event_handler) {
- js_event_handlers_.AddObserver(event_handler);
- UpdateBackendEventHandler();
-}
-
-void SyncJsController::RemoveJsEventHandler(JsEventHandler* event_handler) {
- js_event_handlers_.RemoveObserver(event_handler);
- UpdateBackendEventHandler();
-}
-
-void SyncJsController::AttachJsBackend(
- const WeakHandle<JsBackend>& js_backend) {
- js_backend_ = js_backend;
- UpdateBackendEventHandler();
-
- if (js_backend_.IsInitialized()) {
- // Process any queued messages.
- for (PendingJsMessageList::const_iterator it =
- pending_js_messages_.begin();
- it != pending_js_messages_.end(); ++it) {
- js_backend_.Call(FROM_HERE, &JsBackend::ProcessJsMessage,
- it->name, it->args, it->reply_handler);
- }
- }
-}
-
-void SyncJsController::ProcessJsMessage(
- const std::string& name, const JsArgList& args,
- const WeakHandle<JsReplyHandler>& reply_handler) {
- if (js_backend_.IsInitialized()) {
- js_backend_.Call(FROM_HERE, &JsBackend::ProcessJsMessage,
- name, args, reply_handler);
- } else {
- pending_js_messages_.push_back(
- PendingJsMessage(name, args, reply_handler));
- }
-}
-
-void SyncJsController::HandleJsEvent(const std::string& name,
- const JsEventDetails& details) {
- FOR_EACH_OBSERVER(JsEventHandler, js_event_handlers_,
- HandleJsEvent(name, details));
-}
-
-void SyncJsController::UpdateBackendEventHandler() {
- if (js_backend_.IsInitialized()) {
- // To avoid making the backend send useless events, we clear the
- // event handler we pass to it if we don't have any event
- // handlers.
- WeakHandle<JsEventHandler> backend_event_handler =
- js_event_handlers_.might_have_observers() ?
- MakeWeakHandle(AsWeakPtr()) : WeakHandle<SyncJsController>();
- js_backend_.Call(FROM_HERE, &JsBackend::SetJsEventHandler,
- backend_event_handler);
- }
-}
-
-} // namespace syncer
diff --git a/chromium/sync/js/sync_js_controller.h b/chromium/sync/js/sync_js_controller.h
deleted file mode 100644
index 0e259f860af..00000000000
--- a/chromium/sync/js/sync_js_controller.h
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_JS_SYNC_JS_CONTROLLER_H_
-#define SYNC_JS_SYNC_JS_CONTROLLER_H_
-
-#include <string>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/memory/weak_ptr.h"
-#include "base/observer_list.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/js/js_arg_list.h"
-#include "sync/js/js_controller.h"
-#include "sync/js/js_event_handler.h"
-
-namespace syncer {
-
-class JsBackend;
-
-// A class that mediates between the sync JsEventHandlers and the sync
-// JsBackend.
-class SYNC_EXPORT SyncJsController
- : public JsController, public JsEventHandler,
- public base::SupportsWeakPtr<SyncJsController> {
- public:
- SyncJsController();
-
- virtual ~SyncJsController();
-
- // Sets the backend to route all messages to (if initialized).
- // Sends any queued-up messages if |backend| is initialized.
- void AttachJsBackend(const WeakHandle<JsBackend>& js_backend);
-
- // JsController implementation.
- virtual void AddJsEventHandler(JsEventHandler* event_handler) OVERRIDE;
- virtual void RemoveJsEventHandler(JsEventHandler* event_handler) OVERRIDE;
- // Queues up any messages that are sent when there is no attached
- // initialized backend.
- virtual void ProcessJsMessage(
- const std::string& name, const JsArgList& args,
- const WeakHandle<JsReplyHandler>& reply_handler) OVERRIDE;
-
- // JsEventHandler implementation.
- virtual void HandleJsEvent(const std::string& name,
- const JsEventDetails& details) OVERRIDE;
-
- private:
- // A struct used to hold the arguments to ProcessJsMessage() for
- // future invocation.
- struct PendingJsMessage {
- std::string name;
- JsArgList args;
- WeakHandle<JsReplyHandler> reply_handler;
-
- PendingJsMessage(const std::string& name, const JsArgList& args,
- const WeakHandle<JsReplyHandler>& reply_handler);
-
- ~PendingJsMessage();
- };
-
- typedef std::vector<PendingJsMessage> PendingJsMessageList;
-
- // Sets |js_backend_|'s event handler depending on how many
- // underlying event handlers we have.
- void UpdateBackendEventHandler();
-
- WeakHandle<JsBackend> js_backend_;
- ObserverList<JsEventHandler> js_event_handlers_;
- PendingJsMessageList pending_js_messages_;
-
- DISALLOW_COPY_AND_ASSIGN(SyncJsController);
-};
-
-} // namespace syncer
-
-#endif // SYNC_JS_SYNC_JS_CONTROLLER_H_
diff --git a/chromium/sync/js/sync_js_controller_unittest.cc b/chromium/sync/js/sync_js_controller_unittest.cc
deleted file mode 100644
index eca617c2d45..00000000000
--- a/chromium/sync/js/sync_js_controller_unittest.cc
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/js/sync_js_controller.h"
-
-#include "base/message_loop/message_loop.h"
-#include "base/values.h"
-#include "sync/js/js_arg_list.h"
-#include "sync/js/js_event_details.h"
-#include "sync/js/js_test_util.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace {
-
-using ::testing::_;
-using ::testing::InSequence;
-using ::testing::Mock;
-using ::testing::StrictMock;
-
-class SyncJsControllerTest : public testing::Test {
- protected:
- void PumpLoop() {
- message_loop_.RunUntilIdle();
- }
-
- private:
- base::MessageLoop message_loop_;
-};
-
-ACTION_P(ReplyToMessage, reply_name) {
- arg2.Call(FROM_HERE, &JsReplyHandler::HandleJsReply, reply_name, JsArgList());
-}
-
-TEST_F(SyncJsControllerTest, Messages) {
- InSequence dummy;
- // |mock_backend| needs to outlive |sync_js_controller|.
- StrictMock<MockJsBackend> mock_backend;
- StrictMock<MockJsReplyHandler> mock_reply_handler;
- SyncJsController sync_js_controller;
-
- base::ListValue arg_list1, arg_list2;
- arg_list1.Append(new base::FundamentalValue(false));
- arg_list2.Append(new base::FundamentalValue(5));
- JsArgList args1(&arg_list1), args2(&arg_list2);
-
- EXPECT_CALL(mock_backend, SetJsEventHandler(_));
- EXPECT_CALL(mock_backend, ProcessJsMessage("test1", HasArgs(args2), _))
- .WillOnce(ReplyToMessage("test1_reply"));
- EXPECT_CALL(mock_backend, ProcessJsMessage("test2", HasArgs(args1), _))
- .WillOnce(ReplyToMessage("test2_reply"));
-
- sync_js_controller.AttachJsBackend(mock_backend.AsWeakHandle());
- sync_js_controller.ProcessJsMessage("test1",
- args2,
- mock_reply_handler.AsWeakHandle());
- sync_js_controller.ProcessJsMessage("test2",
- args1,
- mock_reply_handler.AsWeakHandle());
-
- // The replies should be waiting on our message loop.
- EXPECT_CALL(mock_reply_handler, HandleJsReply("test1_reply", _));
- EXPECT_CALL(mock_reply_handler, HandleJsReply("test2_reply", _));
- PumpLoop();
-
- // Let destructor of |sync_js_controller| call RemoveBackend().
-}
-
-TEST_F(SyncJsControllerTest, QueuedMessages) {
- // |mock_backend| needs to outlive |sync_js_controller|.
- StrictMock<MockJsBackend> mock_backend;
- StrictMock<MockJsReplyHandler> mock_reply_handler;
- SyncJsController sync_js_controller;
-
- base::ListValue arg_list1, arg_list2;
- arg_list1.Append(new base::FundamentalValue(false));
- arg_list2.Append(new base::FundamentalValue(5));
- JsArgList args1(&arg_list1), args2(&arg_list2);
-
- // Should queue messages.
- sync_js_controller.ProcessJsMessage(
- "test1",
- args2,
- mock_reply_handler.AsWeakHandle());
- sync_js_controller.ProcessJsMessage(
- "test2",
- args1,
- mock_reply_handler.AsWeakHandle());
-
- // Should do nothing.
- PumpLoop();
- Mock::VerifyAndClearExpectations(&mock_backend);
-
-
- // Should call the queued messages.
- EXPECT_CALL(mock_backend, SetJsEventHandler(_));
- EXPECT_CALL(mock_backend, ProcessJsMessage("test1", HasArgs(args2), _))
- .WillOnce(ReplyToMessage("test1_reply"));
- EXPECT_CALL(mock_backend, ProcessJsMessage("test2", HasArgs(args1), _))
- .WillOnce(ReplyToMessage("test2_reply"));
- EXPECT_CALL(mock_reply_handler, HandleJsReply("test1_reply", _));
- EXPECT_CALL(mock_reply_handler, HandleJsReply("test2_reply", _));
-
- sync_js_controller.AttachJsBackend(mock_backend.AsWeakHandle());
- PumpLoop();
-
- // Should do nothing.
- sync_js_controller.AttachJsBackend(WeakHandle<JsBackend>());
- PumpLoop();
-
- // Should also do nothing.
- sync_js_controller.AttachJsBackend(WeakHandle<JsBackend>());
- PumpLoop();
-}
-
-TEST_F(SyncJsControllerTest, Events) {
- InSequence dummy;
- SyncJsController sync_js_controller;
-
- base::DictionaryValue details_dict1, details_dict2;
- details_dict1.SetString("foo", "bar");
- details_dict2.SetInteger("baz", 5);
- JsEventDetails details1(&details_dict1), details2(&details_dict2);
-
- StrictMock<MockJsEventHandler> event_handler1, event_handler2;
- EXPECT_CALL(event_handler1, HandleJsEvent("event", HasDetails(details1)));
- EXPECT_CALL(event_handler2, HandleJsEvent("event", HasDetails(details1)));
- EXPECT_CALL(event_handler1,
- HandleJsEvent("anotherevent", HasDetails(details2)));
- EXPECT_CALL(event_handler2,
- HandleJsEvent("anotherevent", HasDetails(details2)));
-
- sync_js_controller.AddJsEventHandler(&event_handler1);
- sync_js_controller.AddJsEventHandler(&event_handler2);
- sync_js_controller.HandleJsEvent("event", details1);
- sync_js_controller.HandleJsEvent("anotherevent", details2);
- sync_js_controller.RemoveJsEventHandler(&event_handler1);
- sync_js_controller.RemoveJsEventHandler(&event_handler2);
- sync_js_controller.HandleJsEvent("droppedevent", details2);
-
- PumpLoop();
-}
-
-} // namespace
-} // namespace syncer
diff --git a/chromium/sync/notifier/DEPS b/chromium/sync/notifier/DEPS
deleted file mode 100644
index e02d522377d..00000000000
--- a/chromium/sync/notifier/DEPS
+++ /dev/null
@@ -1,19 +0,0 @@
-include_rules = [
- "+google/cacheinvalidation",
- "+jingle/notifier",
- "+net/base/backoff_entry.h",
- "+net/base/mock_host_resolver.h",
- "+net/url_request/url_request_context.h",
- "+net/url_request/url_request_test_util.h",
-
- "+sync/base",
- "+sync/internal_api/public/base",
- "+sync/internal_api/public/util",
- "+sync/protocol/service_constants.h",
- "+sync/util",
-
- # unit tests depend on talk/base.
- "+talk/base",
- # sync_notifier depends on the xmpp part of libjingle.
- "+talk/xmpp",
-]
diff --git a/chromium/sync/notifier/ack_handler.cc b/chromium/sync/notifier/ack_handler.cc
deleted file mode 100644
index 3b31b2b7b2e..00000000000
--- a/chromium/sync/notifier/ack_handler.cc
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/ack_handler.h"
-
-#include "sync/internal_api/public/base/invalidation.h"
-
-namespace syncer {
-
-AckHandler::AckHandler() {}
-
-AckHandler::~AckHandler() {}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/ack_handler.h b/chromium/sync/notifier/ack_handler.h
deleted file mode 100644
index f1fc16fa702..00000000000
--- a/chromium/sync/notifier/ack_handler.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_NOTIFIER_ACK_HANDLER_H_
-#define SYNC_NOTIFIER_ACK_HANDLER_H_
-
-#include <vector>
-
-#include "sync/base/sync_export.h"
-
-namespace invalidation {
-class ObjectId;
-} // namespace invalidation
-
-namespace syncer {
-
-class AckHandle;
-
-// An interface for classes that keep track of invalidation acknowledgements.
-//
-// We don't expect to support more than one "real" implementation of AckHandler,
-// but this interface is very useful for testing and implementation hiding.
-class SYNC_EXPORT AckHandler {
- public:
- AckHandler();
- virtual ~AckHandler() = 0;
-
- // Record the local acknowledgement of an invalidation identified by |handle|.
- virtual void Acknowledge(
- const invalidation::ObjectId& id,
- const AckHandle& handle) = 0;
-
- // Record the drop of an invalidation identified by |handle|.
- virtual void Drop(
- const invalidation::ObjectId& id,
- const AckHandle& handle) = 0;
-};
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_ACK_HANDLER_H_
diff --git a/chromium/sync/notifier/dropped_invalidation_tracker.cc b/chromium/sync/notifier/dropped_invalidation_tracker.cc
deleted file mode 100644
index 8599cc258c0..00000000000
--- a/chromium/sync/notifier/dropped_invalidation_tracker.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/dropped_invalidation_tracker.h"
-
-#include "sync/internal_api/public/base/invalidation.h"
-
-namespace syncer {
-
-DroppedInvalidationTracker::DroppedInvalidationTracker(
- const invalidation::ObjectId& id)
- : id_(id),
- drop_ack_handle_(AckHandle::InvalidAckHandle()) {}
-
-DroppedInvalidationTracker::~DroppedInvalidationTracker() {}
-
-const invalidation::ObjectId& DroppedInvalidationTracker::object_id() const {
- return id_;
-}
-
-void DroppedInvalidationTracker::RecordDropEvent(
- WeakHandle<AckHandler> handler, AckHandle handle) {
- drop_ack_handler_ = handler;
- drop_ack_handle_ = handle;
-}
-
-void DroppedInvalidationTracker::RecordRecoveryFromDropEvent() {
- if (drop_ack_handler_.IsInitialized()) {
- drop_ack_handler_.Call(FROM_HERE,
- &AckHandler::Acknowledge,
- id_,
- drop_ack_handle_);
- }
- drop_ack_handler_ = syncer::WeakHandle<AckHandler>();
-}
-
-bool DroppedInvalidationTracker::IsRecoveringFromDropEvent() const {
- return drop_ack_handler_.IsInitialized();
-}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/dropped_invalidation_tracker.h b/chromium/sync/notifier/dropped_invalidation_tracker.h
deleted file mode 100644
index 877187ed0bc..00000000000
--- a/chromium/sync/notifier/dropped_invalidation_tracker.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_NOTIFIER_DROPPED_INVALIDATION_TRACKER_H_
-#define SYNC_NOTIFIER_DROPPED_INVALIDATION_TRACKER_H_
-
-#include "google/cacheinvalidation/include/types.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/ack_handle.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/notifier/ack_handler.h"
-
-namespace syncer {
-
-class Invalidation;
-
-// Helps InvalidationHandlers keep track of dropped invalidations for a given
-// ObjectId.
-//
-// The intent of this class is to hide some of the implementation details around
-// how the invalidations system manages dropping and drop recovery. Any
-// invalidation handler that intends to buffer and occasionally drop
-// invalidations should keep one instance of it per registered ObjectId.
-//
-// When an invalidation handler wishes to drop an invalidation, it must provide
-// an instance of this class to that Invalidation's Drop() method. In order to
-// indicate recovery from a drop, the handler can call this class'
-// RecordRecoveryFromDropEvent().
-class SYNC_EXPORT DroppedInvalidationTracker {
- public:
- explicit DroppedInvalidationTracker(const invalidation::ObjectId& id);
- ~DroppedInvalidationTracker();
-
- const invalidation::ObjectId& object_id() const;
-
- // Called by Invalidation::Drop() to keep track of a drop event.
- //
- // Takes ownership of the internals belonging to a soon to be discarded
- // dropped invalidation. See also the comment for this class'
- // |drop_ack_handler_| member.
- void RecordDropEvent(WeakHandle<AckHandler> handler, AckHandle handle);
-
- // Returns true if we're still recovering from a drop event.
- bool IsRecoveringFromDropEvent() const;
-
- // Called by the InvalidationHandler when it recovers from the drop event.
- void RecordRecoveryFromDropEvent();
-
- private:
- invalidation::ObjectId id_;
- AckHandle drop_ack_handle_;
-
- // A WeakHandle to the enitity responsible for persisting invalidation
- // acknowledgement state on disk. We can get away with using a WeakHandle
- // because we don't care if our drop recovery message doesn't gets delivered
- // in some shutdown cases. If that happens, we'll have to process the
- // invalidation state again on the next restart. It would be a waste of time
- // and resources, but otherwise not particularly harmful.
- WeakHandle<AckHandler> drop_ack_handler_;
-
- DISALLOW_COPY_AND_ASSIGN(DroppedInvalidationTracker);
-};
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_DROPPED_INVALIDATION_TRACKER_H_
diff --git a/chromium/sync/notifier/fake_invalidation_handler.cc b/chromium/sync/notifier/fake_invalidation_handler.cc
deleted file mode 100644
index 30cb2fd3495..00000000000
--- a/chromium/sync/notifier/fake_invalidation_handler.cc
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/fake_invalidation_handler.h"
-
-namespace syncer {
-
-FakeInvalidationHandler::FakeInvalidationHandler()
- : state_(DEFAULT_INVALIDATION_ERROR),
- invalidation_count_(0) {}
-
-FakeInvalidationHandler::~FakeInvalidationHandler() {}
-
-InvalidatorState FakeInvalidationHandler::GetInvalidatorState() const {
- return state_;
-}
-
-const ObjectIdInvalidationMap&
-FakeInvalidationHandler::GetLastInvalidationMap() const {
- return last_invalidation_map_;
-}
-
-int FakeInvalidationHandler::GetInvalidationCount() const {
- return invalidation_count_;
-}
-
-void FakeInvalidationHandler::OnInvalidatorStateChange(InvalidatorState state) {
- state_ = state;
-}
-
-void FakeInvalidationHandler::OnIncomingInvalidation(
- const ObjectIdInvalidationMap& invalidation_map) {
- last_invalidation_map_ = invalidation_map;
- ++invalidation_count_;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/fake_invalidation_handler.h b/chromium/sync/notifier/fake_invalidation_handler.h
deleted file mode 100644
index 81b877a5bcd..00000000000
--- a/chromium/sync/notifier/fake_invalidation_handler.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_NOTIFIER_FAKE_SYNC_NOTIFIER_OBSERVER_H_
-#define SYNC_NOTIFIER_FAKE_SYNC_NOTIFIER_OBSERVER_H_
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "sync/notifier/invalidation_handler.h"
-#include "sync/notifier/object_id_invalidation_map.h"
-
-namespace syncer {
-
-class FakeInvalidationHandler : public InvalidationHandler {
- public:
- FakeInvalidationHandler();
- virtual ~FakeInvalidationHandler();
-
- InvalidatorState GetInvalidatorState() const;
- const ObjectIdInvalidationMap& GetLastInvalidationMap() const;
- int GetInvalidationCount() const;
-
- // InvalidationHandler implementation.
- virtual void OnInvalidatorStateChange(InvalidatorState state) OVERRIDE;
- virtual void OnIncomingInvalidation(
- const ObjectIdInvalidationMap& invalidation_map) OVERRIDE;
-
- private:
- InvalidatorState state_;
- ObjectIdInvalidationMap last_invalidation_map_;
- int invalidation_count_;
-
- DISALLOW_COPY_AND_ASSIGN(FakeInvalidationHandler);
-};
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_FAKE_SYNC_NOTIFIER_OBSERVER_H_
diff --git a/chromium/sync/notifier/fake_invalidation_state_tracker.cc b/chromium/sync/notifier/fake_invalidation_state_tracker.cc
deleted file mode 100644
index 47e2f0f0dc7..00000000000
--- a/chromium/sync/notifier/fake_invalidation_state_tracker.cc
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/fake_invalidation_state_tracker.h"
-
-#include "base/bind.h"
-#include "base/callback.h"
-#include "base/location.h"
-#include "base/task_runner.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-const int64 FakeInvalidationStateTracker::kMinVersion = kint64min;
-
-FakeInvalidationStateTracker::FakeInvalidationStateTracker() {}
-
-FakeInvalidationStateTracker::~FakeInvalidationStateTracker() {}
-
-void FakeInvalidationStateTracker::SetInvalidatorClientId(
- const std::string& client_id) {
- Clear();
- invalidator_client_id_ = client_id;
-}
-
-std::string FakeInvalidationStateTracker::GetInvalidatorClientId() const {
- return invalidator_client_id_;
-}
-
-void FakeInvalidationStateTracker::SetBootstrapData(
- const std::string& data) {
- bootstrap_data_ = data;
-}
-
-std::string FakeInvalidationStateTracker::GetBootstrapData() const {
- return bootstrap_data_;
-}
-
-void FakeInvalidationStateTracker::SetSavedInvalidations(
- const UnackedInvalidationsMap& states) {
- unacked_invalidations_map_ = states;
-}
-
-UnackedInvalidationsMap
-FakeInvalidationStateTracker::GetSavedInvalidations() const {
- return unacked_invalidations_map_;
-}
-
-void FakeInvalidationStateTracker::Clear() {
- invalidator_client_id_.clear();
- bootstrap_data_.clear();
-}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/fake_invalidation_state_tracker.h b/chromium/sync/notifier/fake_invalidation_state_tracker.h
deleted file mode 100644
index d1daaba121f..00000000000
--- a/chromium/sync/notifier/fake_invalidation_state_tracker.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_NOTIFIER_FAKE_INVALIDATION_STATE_TRACKER_H_
-#define SYNC_NOTIFIER_FAKE_INVALIDATION_STATE_TRACKER_H_
-
-#include "base/memory/weak_ptr.h"
-#include "sync/notifier/invalidation_state_tracker.h"
-
-namespace syncer {
-
-// InvalidationStateTracker implementation that simply keeps track of
-// the max versions and invalidation state in memory.
-class FakeInvalidationStateTracker
- : public InvalidationStateTracker,
- public base::SupportsWeakPtr<FakeInvalidationStateTracker> {
- public:
- FakeInvalidationStateTracker();
- virtual ~FakeInvalidationStateTracker();
-
- // InvalidationStateTracker implementation.
- virtual void SetInvalidatorClientId(const std::string& client_id) OVERRIDE;
- virtual std::string GetInvalidatorClientId() const OVERRIDE;
- virtual void SetBootstrapData(const std::string& data) OVERRIDE;
- virtual std::string GetBootstrapData() const OVERRIDE;
- virtual void SetSavedInvalidations(
- const UnackedInvalidationsMap& states) OVERRIDE;
- virtual UnackedInvalidationsMap GetSavedInvalidations() const OVERRIDE;
- virtual void Clear() OVERRIDE;
-
- static const int64 kMinVersion;
-
- private:
- std::string invalidator_client_id_;
- std::string bootstrap_data_;
- UnackedInvalidationsMap unacked_invalidations_map_;
-};
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_FAKE_INVALIDATION_STATE_TRACKER_H_
diff --git a/chromium/sync/notifier/fake_invalidator.cc b/chromium/sync/notifier/fake_invalidator.cc
deleted file mode 100644
index 3e1ce32250b..00000000000
--- a/chromium/sync/notifier/fake_invalidator.cc
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/fake_invalidator.h"
-
-#include "sync/notifier/object_id_invalidation_map.h"
-
-namespace syncer {
-
-FakeInvalidator::FakeInvalidator() {}
-
-FakeInvalidator::~FakeInvalidator() {}
-
-bool FakeInvalidator::IsHandlerRegistered(InvalidationHandler* handler) const {
- return registrar_.IsHandlerRegisteredForTest(handler);
-}
-
-ObjectIdSet FakeInvalidator::GetRegisteredIds(
- InvalidationHandler* handler) const {
- return registrar_.GetRegisteredIds(handler);
-}
-
-const std::string& FakeInvalidator::GetCredentialsEmail() const {
- return email_;
-}
-
-const std::string& FakeInvalidator::GetCredentialsToken() const {
- return token_;
-}
-
-void FakeInvalidator::EmitOnInvalidatorStateChange(InvalidatorState state) {
- registrar_.UpdateInvalidatorState(state);
-}
-
-void FakeInvalidator::EmitOnIncomingInvalidation(
- const ObjectIdInvalidationMap& invalidation_map) {
- registrar_.DispatchInvalidationsToHandlers(invalidation_map);
-}
-
-void FakeInvalidator::RegisterHandler(InvalidationHandler* handler) {
- registrar_.RegisterHandler(handler);
-}
-
-void FakeInvalidator::UpdateRegisteredIds(InvalidationHandler* handler,
- const ObjectIdSet& ids) {
- registrar_.UpdateRegisteredIds(handler, ids);
-}
-
-void FakeInvalidator::UnregisterHandler(InvalidationHandler* handler) {
- registrar_.UnregisterHandler(handler);
-}
-
-InvalidatorState FakeInvalidator::GetInvalidatorState() const {
- return registrar_.GetInvalidatorState();
-}
-
-void FakeInvalidator::UpdateCredentials(
- const std::string& email, const std::string& token) {
- email_ = email;
- token_ = token;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/fake_invalidator.h b/chromium/sync/notifier/fake_invalidator.h
deleted file mode 100644
index b2173eef4ac..00000000000
--- a/chromium/sync/notifier/fake_invalidator.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_NOTIFIER_FAKE_INVALIDATOR_H_
-#define SYNC_NOTIFIER_FAKE_INVALIDATOR_H_
-
-#include <string>
-
-#include "base/compiler_specific.h"
-#include "sync/notifier/invalidation_util.h"
-#include "sync/notifier/invalidator.h"
-#include "sync/notifier/invalidator_registrar.h"
-
-namespace syncer {
-
-class FakeInvalidator : public Invalidator {
- public:
- FakeInvalidator();
- virtual ~FakeInvalidator();
-
- bool IsHandlerRegistered(InvalidationHandler* handler) const;
- ObjectIdSet GetRegisteredIds(InvalidationHandler* handler) const;
- const std::string& GetUniqueId() const;
- const std::string& GetCredentialsEmail() const;
- const std::string& GetCredentialsToken() const;
-
- void EmitOnInvalidatorStateChange(InvalidatorState state);
- void EmitOnIncomingInvalidation(
- const ObjectIdInvalidationMap& invalidation_map);
-
- virtual void RegisterHandler(InvalidationHandler* handler) OVERRIDE;
- virtual void UpdateRegisteredIds(InvalidationHandler* handler,
- const ObjectIdSet& ids) OVERRIDE;
- virtual void UnregisterHandler(InvalidationHandler* handler) OVERRIDE;
- virtual InvalidatorState GetInvalidatorState() const OVERRIDE;
- virtual void UpdateCredentials(
- const std::string& email, const std::string& token) OVERRIDE;
-
- private:
- InvalidatorRegistrar registrar_;
- std::string state_;
- std::string email_;
- std::string token_;
-};
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_FAKE_INVALIDATOR_H_
diff --git a/chromium/sync/notifier/fake_invalidator_unittest.cc b/chromium/sync/notifier/fake_invalidator_unittest.cc
deleted file mode 100644
index d8cae840b25..00000000000
--- a/chromium/sync/notifier/fake_invalidator_unittest.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/compiler_specific.h"
-#include "base/memory/scoped_ptr.h"
-#include "sync/notifier/fake_invalidator.h"
-#include "sync/notifier/invalidator_test_template.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-namespace {
-
-class FakeInvalidatorTestDelegate {
- public:
- FakeInvalidatorTestDelegate() {}
-
- ~FakeInvalidatorTestDelegate() {
- DestroyInvalidator();
- }
-
- void CreateInvalidator(
- const std::string& invalidator_client_id,
- const std::string& initial_state,
- const base::WeakPtr<InvalidationStateTracker>&
- invalidation_state_tracker) {
- DCHECK(!invalidator_.get());
- invalidator_.reset(new FakeInvalidator());
- }
-
- FakeInvalidator* GetInvalidator() {
- return invalidator_.get();
- }
-
- void DestroyInvalidator() {
- invalidator_.reset();
- }
-
- void WaitForInvalidator() {
- // Do Nothing.
- }
-
- void TriggerOnInvalidatorStateChange(InvalidatorState state) {
- invalidator_->EmitOnInvalidatorStateChange(state);
- }
-
- void TriggerOnIncomingInvalidation(
- const ObjectIdInvalidationMap& invalidation_map) {
- invalidator_->EmitOnIncomingInvalidation(invalidation_map);
- }
-
- private:
- scoped_ptr<FakeInvalidator> invalidator_;
-};
-
-INSTANTIATE_TYPED_TEST_CASE_P(
- FakeInvalidatorTest, InvalidatorTest,
- FakeInvalidatorTestDelegate);
-
-} // namespace
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/invalidation_handler.h b/chromium/sync/notifier/invalidation_handler.h
deleted file mode 100644
index 2f5149f262f..00000000000
--- a/chromium/sync/notifier/invalidation_handler.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_NOTIFIER_INVALIDATION_HANDLER_H_
-#define SYNC_NOTIFIER_INVALIDATION_HANDLER_H_
-
-#include "sync/base/sync_export.h"
-#include "sync/notifier/invalidator_state.h"
-
-namespace syncer {
-
-class ObjectIdInvalidationMap;
-
-class SYNC_EXPORT InvalidationHandler {
- public:
- // Called when the invalidator state changes.
- virtual void OnInvalidatorStateChange(InvalidatorState state) = 0;
-
- // Called when a invalidation is received. The per-id states are in
- // |id_state_map| and the source is in |source|. Note that this may be
- // called regardless of the current invalidator state.
- virtual void OnIncomingInvalidation(
- const ObjectIdInvalidationMap& invalidation_map) = 0;
-
- protected:
- virtual ~InvalidationHandler() {}
-};
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_INVALIDATION_HANDLER_H_
diff --git a/chromium/sync/notifier/invalidation_notifier.cc b/chromium/sync/notifier/invalidation_notifier.cc
deleted file mode 100644
index a509409c76d..00000000000
--- a/chromium/sync/notifier/invalidation_notifier.cc
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/invalidation_notifier.h"
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/message_loop/message_loop_proxy.h"
-#include "base/metrics/histogram.h"
-#include "google/cacheinvalidation/include/invalidation-client-factory.h"
-#include "jingle/notifier/listener/push_client.h"
-#include "net/url_request/url_request_context.h"
-#include "sync/notifier/invalidation_handler.h"
-#include "talk/xmpp/jid.h"
-#include "talk/xmpp/xmppclientsettings.h"
-
-namespace syncer {
-
-InvalidationNotifier::InvalidationNotifier(
- scoped_ptr<notifier::PushClient> push_client,
- const std::string& invalidator_client_id,
- const UnackedInvalidationsMap& saved_invalidations,
- const std::string& invalidation_bootstrap_data,
- const WeakHandle<InvalidationStateTracker>& invalidation_state_tracker,
- const std::string& client_info)
- : state_(STOPPED),
- saved_invalidations_(saved_invalidations),
- invalidation_state_tracker_(invalidation_state_tracker),
- client_info_(client_info),
- invalidator_client_id_(invalidator_client_id),
- invalidation_bootstrap_data_(invalidation_bootstrap_data),
- invalidation_listener_(push_client.Pass()) {
-}
-
-InvalidationNotifier::~InvalidationNotifier() {
- DCHECK(CalledOnValidThread());
-}
-
-void InvalidationNotifier::RegisterHandler(InvalidationHandler* handler) {
- DCHECK(CalledOnValidThread());
- registrar_.RegisterHandler(handler);
-}
-
-void InvalidationNotifier::UpdateRegisteredIds(InvalidationHandler* handler,
- const ObjectIdSet& ids) {
- DCHECK(CalledOnValidThread());
- registrar_.UpdateRegisteredIds(handler, ids);
- invalidation_listener_.UpdateRegisteredIds(registrar_.GetAllRegisteredIds());
-}
-
-void InvalidationNotifier::UnregisterHandler(InvalidationHandler* handler) {
- DCHECK(CalledOnValidThread());
- registrar_.UnregisterHandler(handler);
-}
-
-InvalidatorState InvalidationNotifier::GetInvalidatorState() const {
- DCHECK(CalledOnValidThread());
- return registrar_.GetInvalidatorState();
-}
-
-void InvalidationNotifier::UpdateCredentials(
- const std::string& email, const std::string& token) {
- if (state_ == STOPPED) {
- invalidation_listener_.Start(
- base::Bind(&invalidation::CreateInvalidationClient),
- invalidator_client_id_, client_info_, invalidation_bootstrap_data_,
- saved_invalidations_,
- invalidation_state_tracker_,
- this);
- state_ = STARTED;
- }
- invalidation_listener_.UpdateCredentials(email, token);
-}
-
-void InvalidationNotifier::OnInvalidate(
- const ObjectIdInvalidationMap& invalidation_map) {
- DCHECK(CalledOnValidThread());
- registrar_.DispatchInvalidationsToHandlers(invalidation_map);
-}
-
-void InvalidationNotifier::OnInvalidatorStateChange(InvalidatorState state) {
- DCHECK(CalledOnValidThread());
- registrar_.UpdateInvalidatorState(state);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/invalidation_notifier.h b/chromium/sync/notifier/invalidation_notifier.h
deleted file mode 100644
index a11608c1611..00000000000
--- a/chromium/sync/notifier/invalidation_notifier.h
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// An implementation of Invalidator that wraps an invalidation
-// client. Handles the details of connecting to XMPP and hooking it
-// up to the invalidation client.
-//
-// You probably don't want to use this directly; use
-// NonBlockingInvalidator.
-
-#ifndef SYNC_NOTIFIER_INVALIDATION_NOTIFIER_H_
-#define SYNC_NOTIFIER_INVALIDATION_NOTIFIER_H_
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/threading/non_thread_safe.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/notifier/invalidation_state_tracker.h"
-#include "sync/notifier/invalidator.h"
-#include "sync/notifier/invalidator_registrar.h"
-#include "sync/notifier/sync_invalidation_listener.h"
-
-namespace notifier {
-class PushClient;
-} // namespace notifier
-
-namespace syncer {
-
-// This class must live on the IO thread.
-class SYNC_EXPORT_PRIVATE InvalidationNotifier
- : public Invalidator,
- public SyncInvalidationListener::Delegate,
- public base::NonThreadSafe {
- public:
- // |invalidation_state_tracker| must be initialized.
- InvalidationNotifier(
- scoped_ptr<notifier::PushClient> push_client,
- const std::string& invalidator_client_id,
- const UnackedInvalidationsMap& saved_invalidations,
- const std::string& invalidation_bootstrap_data,
- const WeakHandle<InvalidationStateTracker>&
- invalidation_state_tracker,
- const std::string& client_info);
-
- virtual ~InvalidationNotifier();
-
- // Invalidator implementation.
- virtual void RegisterHandler(InvalidationHandler* handler) OVERRIDE;
- virtual void UpdateRegisteredIds(InvalidationHandler* handler,
- const ObjectIdSet& ids) OVERRIDE;
- virtual void UnregisterHandler(InvalidationHandler* handler) OVERRIDE;
- virtual InvalidatorState GetInvalidatorState() const OVERRIDE;
- virtual void UpdateCredentials(
- const std::string& email, const std::string& token) OVERRIDE;
-
- // SyncInvalidationListener::Delegate implementation.
- virtual void OnInvalidate(
- const ObjectIdInvalidationMap& invalidation_map) OVERRIDE;
- virtual void OnInvalidatorStateChange(InvalidatorState state) OVERRIDE;
-
- private:
- // We start off in the STOPPED state. When we get our initial
- // credentials, we connect and move to the CONNECTING state. When
- // we're connected we start the invalidation client and move to the
- // STARTED state. We never go back to a previous state.
- enum State {
- STOPPED,
- CONNECTING,
- STARTED
- };
- State state_;
-
- InvalidatorRegistrar registrar_;
-
- // Passed to |invalidation_listener_|.
- const UnackedInvalidationsMap saved_invalidations_;
-
- // Passed to |invalidation_listener_|.
- const WeakHandle<InvalidationStateTracker>
- invalidation_state_tracker_;
-
- // Passed to |invalidation_listener_|.
- const std::string client_info_;
-
- // The client ID to pass to |invalidation_listener_|.
- const std::string invalidator_client_id_;
-
- // The initial bootstrap data to pass to |invalidation_listener_|.
- const std::string invalidation_bootstrap_data_;
-
- // The invalidation listener.
- SyncInvalidationListener invalidation_listener_;
-
- DISALLOW_COPY_AND_ASSIGN(InvalidationNotifier);
-};
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_INVALIDATION_NOTIFIER_H_
diff --git a/chromium/sync/notifier/invalidation_notifier_unittest.cc b/chromium/sync/notifier/invalidation_notifier_unittest.cc
deleted file mode 100644
index bc92e23f9fd..00000000000
--- a/chromium/sync/notifier/invalidation_notifier_unittest.cc
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/invalidation_notifier.h"
-
-#include "base/memory/scoped_ptr.h"
-#include "base/message_loop/message_loop.h"
-#include "jingle/notifier/base/fake_base_task.h"
-#include "jingle/notifier/base/notifier_options.h"
-#include "jingle/notifier/listener/fake_push_client.h"
-#include "net/url_request/url_request_test_util.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/notifier/fake_invalidation_handler.h"
-#include "sync/notifier/fake_invalidation_state_tracker.h"
-#include "sync/notifier/invalidation_state_tracker.h"
-#include "sync/notifier/invalidator_test_template.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-namespace {
-
-class InvalidationNotifierTestDelegate {
- public:
- InvalidationNotifierTestDelegate() {}
-
- ~InvalidationNotifierTestDelegate() {
- DestroyInvalidator();
- }
-
- void CreateInvalidator(
- const std::string& invalidator_client_id,
- const std::string& initial_state,
- const base::WeakPtr<InvalidationStateTracker>&
- invalidation_state_tracker) {
- DCHECK(!invalidator_.get());
- invalidator_.reset(
- new InvalidationNotifier(
- scoped_ptr<notifier::PushClient>(new notifier::FakePushClient()),
- invalidator_client_id,
- UnackedInvalidationsMap(),
- initial_state,
- MakeWeakHandle(invalidation_state_tracker),
- "fake_client_info"));
- }
-
- Invalidator* GetInvalidator() {
- return invalidator_.get();
- }
-
- void DestroyInvalidator() {
- // Stopping the invalidation notifier stops its scheduler, which deletes
- // any pending tasks without running them. Some tasks "run and delete"
- // another task, so they must be run in order to avoid leaking the inner
- // task. Stopping does not schedule any tasks, so it's both necessary and
- // sufficient to drain the task queue before stopping the notifier.
- message_loop_.RunUntilIdle();
- invalidator_.reset();
- }
-
- void WaitForInvalidator() {
- message_loop_.RunUntilIdle();
- }
-
- void TriggerOnInvalidatorStateChange(InvalidatorState state) {
- invalidator_->OnInvalidatorStateChange(state);
- }
-
- void TriggerOnIncomingInvalidation(
- const ObjectIdInvalidationMap& invalidation_map) {
- invalidator_->OnInvalidate(invalidation_map);
- }
-
- private:
- base::MessageLoop message_loop_;
- scoped_ptr<InvalidationNotifier> invalidator_;
-};
-
-INSTANTIATE_TYPED_TEST_CASE_P(
- InvalidationNotifierTest, InvalidatorTest,
- InvalidationNotifierTestDelegate);
-
-} // namespace
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/invalidation_state_tracker.h b/chromium/sync/notifier/invalidation_state_tracker.h
deleted file mode 100644
index 81a07eae5c5..00000000000
--- a/chromium/sync/notifier/invalidation_state_tracker.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// An InvalidationStateTracker is an interface that handles persisting state
-// needed for invalidations. Currently, it is responsible for managing the
-// following information:
-// - Max version seen from the invalidation server to help dedupe invalidations.
-// - Bootstrap data for the invalidation client.
-// - Payloads and locally generated ack handles, to support local acking.
-
-#ifndef SYNC_NOTIFIER_INVALIDATION_STATE_TRACKER_H_
-#define SYNC_NOTIFIER_INVALIDATION_STATE_TRACKER_H_
-
-#include <map>
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/callback_forward.h"
-#include "base/memory/ref_counted.h"
-#include "google/cacheinvalidation/include/types.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/invalidation.h"
-#include "sync/notifier/invalidation_util.h"
-#include "sync/notifier/unacked_invalidation_set.h"
-
-namespace base {
-class TaskRunner;
-} // namespace base
-
-namespace syncer {
-
-class InvalidationStateTracker {
- public:
- InvalidationStateTracker() {}
-
- // The per-client unique ID used to register the invalidation client with the
- // server. This is used to squelch invalidation notifications that originate
- // from changes made by this client.
- virtual void SetInvalidatorClientId(const std::string& data) = 0;
- virtual std::string GetInvalidatorClientId() const = 0;
-
- // Used by invalidation::InvalidationClient for persistence. |data| is an
- // opaque blob that an invalidation client can use after a restart to
- // bootstrap itself. |data| is binary data (not valid UTF8, embedded nulls,
- // etc).
- virtual void SetBootstrapData(const std::string& data) = 0;
- virtual std::string GetBootstrapData() const = 0;
-
- // Used to store invalidations that have been acked to the server, but not yet
- // handled by our clients. We store these invalidations on disk so we won't
- // lose them if we need to restart.
- virtual void SetSavedInvalidations(const UnackedInvalidationsMap& states) = 0;
- virtual UnackedInvalidationsMap GetSavedInvalidations() const = 0;
-
- // Erases invalidation versions, client ID, and state stored on disk.
- virtual void Clear() = 0;
-
- protected:
- virtual ~InvalidationStateTracker() {}
-};
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_INVALIDATION_STATE_TRACKER_H_
diff --git a/chromium/sync/notifier/invalidation_util.cc b/chromium/sync/notifier/invalidation_util.cc
deleted file mode 100644
index 27acd38c8d4..00000000000
--- a/chromium/sync/notifier/invalidation_util.cc
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/invalidation_util.h"
-
-#include <ostream>
-#include <sstream>
-
-#include "base/json/json_writer.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/values.h"
-#include "google/cacheinvalidation/include/types.h"
-#include "google/cacheinvalidation/types.pb.h"
-#include "sync/internal_api/public/base/invalidation.h"
-
-namespace invalidation {
-void PrintTo(const invalidation::ObjectId& id, std::ostream* os) {
- *os << syncer::ObjectIdToString(id);
-}
-} // namespace invalidation
-
-namespace syncer {
-
-bool ObjectIdLessThan::operator()(const invalidation::ObjectId& lhs,
- const invalidation::ObjectId& rhs) const {
- return (lhs.source() < rhs.source()) ||
- (lhs.source() == rhs.source() && lhs.name() < rhs.name());
-}
-
-bool InvalidationVersionLessThan::operator()(
- const Invalidation& a,
- const Invalidation& b) const {
- DCHECK(a.object_id() == b.object_id())
- << "a: " << ObjectIdToString(a.object_id()) << ", "
- << "b: " << ObjectIdToString(a.object_id());
-
- if (a.is_unknown_version() && !b.is_unknown_version())
- return true;
-
- if (!a.is_unknown_version() && b.is_unknown_version())
- return false;
-
- if (a.is_unknown_version() && b.is_unknown_version())
- return false;
-
- return a.version() < b.version();
-}
-
-bool RealModelTypeToObjectId(ModelType model_type,
- invalidation::ObjectId* object_id) {
- std::string notification_type;
- if (!RealModelTypeToNotificationType(model_type, &notification_type)) {
- return false;
- }
- object_id->Init(ipc::invalidation::ObjectSource::CHROME_SYNC,
- notification_type);
- return true;
-}
-
-bool ObjectIdToRealModelType(const invalidation::ObjectId& object_id,
- ModelType* model_type) {
- return NotificationTypeToRealModelType(object_id.name(), model_type);
-}
-
-scoped_ptr<base::DictionaryValue> ObjectIdToValue(
- const invalidation::ObjectId& object_id) {
- scoped_ptr<base::DictionaryValue> value(new base::DictionaryValue());
- value->SetInteger("source", object_id.source());
- value->SetString("name", object_id.name());
- return value.Pass();
-}
-
-bool ObjectIdFromValue(const base::DictionaryValue& value,
- invalidation::ObjectId* out) {
- *out = invalidation::ObjectId();
- std::string name;
- int source = 0;
- if (!value.GetInteger("source", &source) ||
- !value.GetString("name", &name)) {
- return false;
- }
- *out = invalidation::ObjectId(source, name);
- return true;
-}
-
-std::string ObjectIdToString(
- const invalidation::ObjectId& object_id) {
- scoped_ptr<base::DictionaryValue> value(ObjectIdToValue(object_id));
- std::string str;
- base::JSONWriter::Write(value.get(), &str);
- return str;
-}
-
-ObjectIdSet ModelTypeSetToObjectIdSet(ModelTypeSet model_types) {
- ObjectIdSet ids;
- for (ModelTypeSet::Iterator it = model_types.First(); it.Good(); it.Inc()) {
- invalidation::ObjectId model_type_as_id;
- if (!RealModelTypeToObjectId(it.Get(), &model_type_as_id)) {
- DLOG(WARNING) << "Invalid model type " << it.Get();
- continue;
- }
- ids.insert(model_type_as_id);
- }
- return ids;
-}
-
-ModelTypeSet ObjectIdSetToModelTypeSet(const ObjectIdSet& ids) {
- ModelTypeSet model_types;
- for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
- ModelType model_type;
- if (!ObjectIdToRealModelType(*it, &model_type)) {
- DLOG(WARNING) << "Invalid object ID " << ObjectIdToString(*it);
- continue;
- }
- model_types.Put(model_type);
- }
- return model_types;
-}
-
-std::string InvalidationToString(
- const invalidation::Invalidation& invalidation) {
- std::stringstream ss;
- ss << "{ ";
- ss << "object_id: " << ObjectIdToString(invalidation.object_id()) << ", ";
- ss << "version: " << invalidation.version();
- ss << " }";
- return ss.str();
-}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/invalidation_util.h b/chromium/sync/notifier/invalidation_util.h
deleted file mode 100644
index 699550e7a69..00000000000
--- a/chromium/sync/notifier/invalidation_util.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Various utilities for dealing with invalidation data types.
-
-#ifndef SYNC_NOTIFIER_INVALIDATION_UTIL_H_
-#define SYNC_NOTIFIER_INVALIDATION_UTIL_H_
-
-#include <iosfwd>
-#include <set>
-#include <string>
-
-#include "base/memory/scoped_ptr.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-
-namespace base {
-class DictionaryValue;
-} // namespace
-
-namespace invalidation {
-
-class Invalidation;
-class ObjectId;
-
-// Gmock print helper
-SYNC_EXPORT_PRIVATE void PrintTo(const invalidation::ObjectId& id,
- std::ostream* os);
-
-} // namespace invalidation
-
-namespace syncer {
-
-class Invalidation;
-
-struct SYNC_EXPORT ObjectIdLessThan {
- bool operator()(const invalidation::ObjectId& lhs,
- const invalidation::ObjectId& rhs) const;
-};
-
-struct InvalidationVersionLessThan {
- bool operator()(const syncer::Invalidation& a,
- const syncer::Invalidation& b) const;
-};
-
-typedef std::set<invalidation::ObjectId, ObjectIdLessThan> ObjectIdSet;
-
-SYNC_EXPORT bool RealModelTypeToObjectId(ModelType model_type,
- invalidation::ObjectId* object_id);
-
-bool ObjectIdToRealModelType(const invalidation::ObjectId& object_id,
- ModelType* model_type);
-
-// Caller owns the returned DictionaryValue.
-scoped_ptr<base::DictionaryValue> ObjectIdToValue(
- const invalidation::ObjectId& object_id);
-
-bool ObjectIdFromValue(const base::DictionaryValue& value,
- invalidation::ObjectId* out);
-
-SYNC_EXPORT_PRIVATE std::string ObjectIdToString(
- const invalidation::ObjectId& object_id);
-
-SYNC_EXPORT_PRIVATE ObjectIdSet ModelTypeSetToObjectIdSet(ModelTypeSet models);
-ModelTypeSet ObjectIdSetToModelTypeSet(const ObjectIdSet& ids);
-
-std::string InvalidationToString(
- const invalidation::Invalidation& invalidation);
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_INVALIDATION_UTIL_H_
diff --git a/chromium/sync/notifier/invalidator.h b/chromium/sync/notifier/invalidator.h
deleted file mode 100644
index b6a7467ac9f..00000000000
--- a/chromium/sync/notifier/invalidator.h
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Interface to the invalidator, which is an object that receives
-// invalidations for registered object IDs. The corresponding
-// InvalidationHandler is notifier when such an event occurs.
-
-#ifndef SYNC_NOTIFIER_INVALIDATOR_H_
-#define SYNC_NOTIFIER_INVALIDATOR_H_
-
-#include <string>
-
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/notifier/invalidation_util.h"
-#include "sync/notifier/invalidator_state.h"
-
-namespace syncer {
-class InvalidationHandler;
-
-class SYNC_EXPORT Invalidator {
- public:
- Invalidator() {}
- virtual ~Invalidator() {}
-
- // Clients should follow the pattern below:
- //
- // When starting the client:
- //
- // invalidator->RegisterHandler(client_handler);
- //
- // When the set of IDs to register changes for the client during its lifetime
- // (i.e., between calls to RegisterHandler(client_handler) and
- // UnregisterHandler(client_handler):
- //
- // invalidator->UpdateRegisteredIds(client_handler, client_ids);
- //
- // When shutting down the client for profile shutdown:
- //
- // invalidator->UnregisterHandler(client_handler);
- //
- // Note that there's no call to UpdateRegisteredIds() -- this is because the
- // invalidation API persists registrations across browser restarts.
- //
- // When permanently shutting down the client, e.g. when disabling the related
- // feature:
- //
- // invalidator->UpdateRegisteredIds(client_handler, ObjectIdSet());
- // invalidator->UnregisterHandler(client_handler);
- //
- // It is an error to have registered handlers when an invalidator is
- // destroyed; clients must ensure that they unregister themselves
- // before then.
-
- // Starts sending notifications to |handler|. |handler| must not be NULL,
- // and it must not already be registered.
- virtual void RegisterHandler(InvalidationHandler* handler) = 0;
-
- // Updates the set of ObjectIds associated with |handler|. |handler| must
- // not be NULL, and must already be registered. An ID must be registered for
- // at most one handler.
- virtual void UpdateRegisteredIds(InvalidationHandler* handler,
- const ObjectIdSet& ids) = 0;
-
- // Stops sending notifications to |handler|. |handler| must not be NULL, and
- // it must already be registered. Note that this doesn't unregister the IDs
- // associated with |handler|.
- virtual void UnregisterHandler(InvalidationHandler* handler) = 0;
-
- // Returns the current invalidator state. When called from within
- // InvalidationHandler::OnInvalidatorStateChange(), this must return
- // the updated state.
- virtual InvalidatorState GetInvalidatorState() const = 0;
-
- // The observers won't be notified of any notifications until
- // UpdateCredentials is called at least once. It can be called more than
- // once.
- virtual void UpdateCredentials(
- const std::string& email, const std::string& token) = 0;
-};
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_INVALIDATOR_H_
diff --git a/chromium/sync/notifier/invalidator_registrar.cc b/chromium/sync/notifier/invalidator_registrar.cc
deleted file mode 100644
index 1c9c50cbbf2..00000000000
--- a/chromium/sync/notifier/invalidator_registrar.cc
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/invalidator_registrar.h"
-
-#include <cstddef>
-#include <iterator>
-#include <utility>
-
-#include "base/logging.h"
-#include "sync/notifier/object_id_invalidation_map.h"
-
-namespace syncer {
-
-InvalidatorRegistrar::InvalidatorRegistrar()
- : state_(DEFAULT_INVALIDATION_ERROR) {}
-
-InvalidatorRegistrar::~InvalidatorRegistrar() {
- DCHECK(thread_checker_.CalledOnValidThread());
- CHECK(!handlers_.might_have_observers());
- CHECK(handler_to_ids_map_.empty());
-}
-
-void InvalidatorRegistrar::RegisterHandler(InvalidationHandler* handler) {
- DCHECK(thread_checker_.CalledOnValidThread());
- CHECK(handler);
- CHECK(!handlers_.HasObserver(handler));
- handlers_.AddObserver(handler);
-}
-
-void InvalidatorRegistrar::UpdateRegisteredIds(
- InvalidationHandler* handler,
- const ObjectIdSet& ids) {
- DCHECK(thread_checker_.CalledOnValidThread());
- CHECK(handler);
- CHECK(handlers_.HasObserver(handler));
-
- for (HandlerIdsMap::const_iterator it = handler_to_ids_map_.begin();
- it != handler_to_ids_map_.end(); ++it) {
- if (it->first == handler) {
- continue;
- }
-
- std::vector<invalidation::ObjectId> intersection;
- std::set_intersection(
- it->second.begin(), it->second.end(),
- ids.begin(), ids.end(),
- std::inserter(intersection, intersection.end()),
- ObjectIdLessThan());
- CHECK(intersection.empty())
- << "Duplicate registration: trying to register "
- << ObjectIdToString(*intersection.begin()) << " for "
- << handler << " when it's already registered for "
- << it->first;
- }
-
- if (ids.empty()) {
- handler_to_ids_map_.erase(handler);
- } else {
- handler_to_ids_map_[handler] = ids;
- }
-}
-
-void InvalidatorRegistrar::UnregisterHandler(InvalidationHandler* handler) {
- DCHECK(thread_checker_.CalledOnValidThread());
- CHECK(handler);
- CHECK(handlers_.HasObserver(handler));
- handlers_.RemoveObserver(handler);
- handler_to_ids_map_.erase(handler);
-}
-
-ObjectIdSet InvalidatorRegistrar::GetRegisteredIds(
- InvalidationHandler* handler) const {
- DCHECK(thread_checker_.CalledOnValidThread());
- HandlerIdsMap::const_iterator lookup = handler_to_ids_map_.find(handler);
- if (lookup != handler_to_ids_map_.end()) {
- return lookup->second;
- } else {
- return ObjectIdSet();
- }
-}
-
-ObjectIdSet InvalidatorRegistrar::GetAllRegisteredIds() const {
- DCHECK(thread_checker_.CalledOnValidThread());
- ObjectIdSet registered_ids;
- for (HandlerIdsMap::const_iterator it = handler_to_ids_map_.begin();
- it != handler_to_ids_map_.end(); ++it) {
- registered_ids.insert(it->second.begin(), it->second.end());
- }
- return registered_ids;
-}
-
-void InvalidatorRegistrar::DispatchInvalidationsToHandlers(
- const ObjectIdInvalidationMap& invalidation_map) {
- DCHECK(thread_checker_.CalledOnValidThread());
- // If we have no handlers, there's nothing to do.
- if (!handlers_.might_have_observers()) {
- return;
- }
-
- for (HandlerIdsMap::iterator it = handler_to_ids_map_.begin();
- it != handler_to_ids_map_.end(); ++it) {
- ObjectIdInvalidationMap to_emit =
- invalidation_map.GetSubsetWithObjectIds(it->second);
- if (!to_emit.Empty()) {
- it->first->OnIncomingInvalidation(to_emit);
- }
- }
-}
-
-void InvalidatorRegistrar::UpdateInvalidatorState(InvalidatorState state) {
- DCHECK(thread_checker_.CalledOnValidThread());
- DVLOG(1) << "New invalidator state: " << InvalidatorStateToString(state_)
- << " -> " << InvalidatorStateToString(state);
- state_ = state;
- FOR_EACH_OBSERVER(InvalidationHandler, handlers_,
- OnInvalidatorStateChange(state));
-}
-
-InvalidatorState InvalidatorRegistrar::GetInvalidatorState() const {
- DCHECK(thread_checker_.CalledOnValidThread());
- return state_;
-}
-
-bool InvalidatorRegistrar::IsHandlerRegisteredForTest(
- InvalidationHandler* handler) const {
- DCHECK(thread_checker_.CalledOnValidThread());
- return handlers_.HasObserver(handler);
-}
-
-void InvalidatorRegistrar::DetachFromThreadForTest() {
- DCHECK(thread_checker_.CalledOnValidThread());
- thread_checker_.DetachFromThread();
-}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/invalidator_registrar.h b/chromium/sync/notifier/invalidator_registrar.h
deleted file mode 100644
index fb6b3881c3c..00000000000
--- a/chromium/sync/notifier/invalidator_registrar.h
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_NOTIFIER_INVALIDATOR_REGISTRAR_H_
-#define SYNC_NOTIFIER_INVALIDATOR_REGISTRAR_H_
-
-#include <map>
-
-#include "base/basictypes.h"
-#include "base/observer_list.h"
-#include "base/threading/thread_checker.h"
-#include "sync/base/sync_export.h"
-#include "sync/notifier/invalidation_handler.h"
-#include "sync/notifier/invalidation_util.h"
-
-namespace invalidation {
-class ObjectId;
-} // namespace invalidation
-
-namespace syncer {
-
-class ObjectIdInvalidationMap;
-
-// A helper class for implementations of the Invalidator interface. It helps
-// keep track of registered handlers and which object ID registrations are
-// associated with which handlers, so implementors can just reuse the logic
-// here to dispatch invalidations and other interesting notifications.
-class SYNC_EXPORT InvalidatorRegistrar {
- public:
- InvalidatorRegistrar();
-
- // It is an error to have registered handlers on destruction.
- ~InvalidatorRegistrar();
-
- // Starts sending notifications to |handler|. |handler| must not be NULL,
- // and it must already be registered.
- void RegisterHandler(InvalidationHandler* handler);
-
- // Updates the set of ObjectIds associated with |handler|. |handler| must
- // not be NULL, and must already be registered. An ID must be registered for
- // at most one handler.
- void UpdateRegisteredIds(InvalidationHandler* handler,
- const ObjectIdSet& ids);
-
- // Stops sending notifications to |handler|. |handler| must not be NULL, and
- // it must already be registered. Note that this doesn't unregister the IDs
- // associated with |handler|.
- void UnregisterHandler(InvalidationHandler* handler);
-
- ObjectIdSet GetRegisteredIds(InvalidationHandler* handler) const;
-
- // Returns the set of all IDs that are registered to some handler (even
- // handlers that have been unregistered).
- ObjectIdSet GetAllRegisteredIds() const;
-
- // Sorts incoming invalidations into a bucket for each handler and then
- // dispatches the batched invalidations to the corresponding handler.
- // Invalidations for IDs with no corresponding handler are dropped, as are
- // invalidations for handlers that are not added.
- void DispatchInvalidationsToHandlers(
- const ObjectIdInvalidationMap& invalidation_map);
-
- // Updates the invalidator state to the given one and then notifies
- // all handlers. Note that the order is important; handlers that
- // call GetInvalidatorState() when notified will see the new state.
- void UpdateInvalidatorState(InvalidatorState state);
-
- // Returns the current invalidator state. When called from within
- // InvalidationHandler::OnInvalidatorStateChange(), this returns the
- // updated state.
- InvalidatorState GetInvalidatorState() const;
-
- bool IsHandlerRegisteredForTest(InvalidationHandler* handler) const;
-
- // Needed for death tests.
- void DetachFromThreadForTest();
-
- private:
- typedef std::map<InvalidationHandler*, ObjectIdSet> HandlerIdsMap;
-
- base::ThreadChecker thread_checker_;
- ObserverList<InvalidationHandler> handlers_;
- HandlerIdsMap handler_to_ids_map_;
- InvalidatorState state_;
-
- DISALLOW_COPY_AND_ASSIGN(InvalidatorRegistrar);
-};
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_INVALIDATOR_REGISTRAR_H_
diff --git a/chromium/sync/notifier/invalidator_registrar_unittest.cc b/chromium/sync/notifier/invalidator_registrar_unittest.cc
deleted file mode 100644
index c527bc5e023..00000000000
--- a/chromium/sync/notifier/invalidator_registrar_unittest.cc
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/memory/scoped_ptr.h"
-#include "google/cacheinvalidation/types.pb.h"
-#include "sync/notifier/fake_invalidation_handler.h"
-#include "sync/notifier/invalidator_registrar.h"
-#include "sync/notifier/invalidator_test_template.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-namespace {
-
-// We test InvalidatorRegistrar by wrapping it in an Invalidator and
-// running the usual Invalidator tests.
-
-// Thin Invalidator wrapper around InvalidatorRegistrar.
-class RegistrarInvalidator : public Invalidator {
- public:
- RegistrarInvalidator() {}
- virtual ~RegistrarInvalidator() {}
-
- InvalidatorRegistrar* GetRegistrar() {
- return &registrar_;
- }
-
- // Invalidator implementation.
- virtual void RegisterHandler(InvalidationHandler* handler) OVERRIDE {
- registrar_.RegisterHandler(handler);
- }
-
- virtual void UpdateRegisteredIds(InvalidationHandler* handler,
- const ObjectIdSet& ids) OVERRIDE {
- registrar_.UpdateRegisteredIds(handler, ids);
- }
-
- virtual void UnregisterHandler(InvalidationHandler* handler) OVERRIDE {
- registrar_.UnregisterHandler(handler);
- }
-
- virtual InvalidatorState GetInvalidatorState() const OVERRIDE {
- return registrar_.GetInvalidatorState();
- }
-
- virtual void UpdateCredentials(
- const std::string& email, const std::string& token) OVERRIDE {
- // Do nothing.
- }
-
- private:
- InvalidatorRegistrar registrar_;
-
- DISALLOW_COPY_AND_ASSIGN(RegistrarInvalidator);
-};
-
-class RegistrarInvalidatorTestDelegate {
- public:
- RegistrarInvalidatorTestDelegate() {}
-
- ~RegistrarInvalidatorTestDelegate() {
- DestroyInvalidator();
- }
-
- void CreateInvalidator(
- const std::string& invalidator_client_id,
- const std::string& initial_state,
- const base::WeakPtr<InvalidationStateTracker>&
- invalidation_state_tracker) {
- DCHECK(!invalidator_.get());
- invalidator_.reset(new RegistrarInvalidator());
- }
-
- RegistrarInvalidator* GetInvalidator() {
- return invalidator_.get();
- }
-
- void DestroyInvalidator() {
- invalidator_.reset();
- }
-
- void WaitForInvalidator() {
- // Do nothing.
- }
-
- void TriggerOnInvalidatorStateChange(InvalidatorState state) {
- invalidator_->GetRegistrar()->UpdateInvalidatorState(state);
- }
-
- void TriggerOnIncomingInvalidation(
- const ObjectIdInvalidationMap& invalidation_map) {
- invalidator_->GetRegistrar()->DispatchInvalidationsToHandlers(
- invalidation_map);
- }
-
- private:
- scoped_ptr<RegistrarInvalidator> invalidator_;
-};
-
-INSTANTIATE_TYPED_TEST_CASE_P(
- RegistrarInvalidatorTest, InvalidatorTest,
- RegistrarInvalidatorTestDelegate);
-
-class InvalidatorRegistrarTest : public testing::Test {};
-
-// Technically the tests below can be part of InvalidatorTest, but we
-// want to keep the number of death tests down.
-
-// When we expect a death via CHECK(), we can't match against the
-// CHECK() message since they are removed in official builds.
-
-#if GTEST_HAS_DEATH_TEST
-// Having registered handlers on destruction should cause a CHECK.
-TEST_F(InvalidatorRegistrarTest, RegisteredHandlerOnDestruction) {
- scoped_ptr<InvalidatorRegistrar> registrar(new InvalidatorRegistrar());
- FakeInvalidationHandler handler;
-
- registrar->RegisterHandler(&handler);
-
- EXPECT_DEATH({ registrar.reset(); }, "");
-
- ASSERT_TRUE(registrar.get());
- registrar->UnregisterHandler(&handler);
-}
-
-// Multiple registrations by different handlers on the same object ID should
-// cause a CHECK.
-TEST_F(InvalidatorRegistrarTest, MultipleRegistration) {
- const invalidation::ObjectId id1(ipc::invalidation::ObjectSource::TEST, "a");
- const invalidation::ObjectId id2(ipc::invalidation::ObjectSource::TEST, "a");
-
- InvalidatorRegistrar registrar;
-
- FakeInvalidationHandler handler1;
- registrar.RegisterHandler(&handler1);
-
- FakeInvalidationHandler handler2;
- registrar.RegisterHandler(&handler2);
-
- ObjectIdSet ids;
- ids.insert(id1);
- ids.insert(id2);
- registrar.UpdateRegisteredIds(&handler1, ids);
-
- registrar.DetachFromThreadForTest();
- EXPECT_DEATH({ registrar.UpdateRegisteredIds(&handler2, ids); }, "");
-
- registrar.UnregisterHandler(&handler2);
- registrar.UnregisterHandler(&handler1);
-}
-#endif // GTEST_HAS_DEATH_TEST
-
-} // namespace
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/invalidator_state.cc b/chromium/sync/notifier/invalidator_state.cc
deleted file mode 100644
index 0e19222d239..00000000000
--- a/chromium/sync/notifier/invalidator_state.cc
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/invalidator_state.h"
-
-#include "base/logging.h"
-
-namespace syncer {
-
-const char* InvalidatorStateToString(InvalidatorState state) {
- switch (state) {
- case TRANSIENT_INVALIDATION_ERROR:
- return "TRANSIENT_INVALIDATION_ERROR";
- case INVALIDATION_CREDENTIALS_REJECTED:
- return "INVALIDATION_CREDENTIALS_REJECTED";
- case INVALIDATIONS_ENABLED:
- return "INVALIDATIONS_ENABLED";
- default:
- NOTREACHED();
- return "UNKNOWN_INVALIDATOR_STATE";
- }
-}
-
-InvalidatorState FromNotifierReason(
- notifier::NotificationsDisabledReason reason) {
- switch (reason) {
- case notifier::NO_NOTIFICATION_ERROR:
- return INVALIDATIONS_ENABLED;
- case notifier::TRANSIENT_NOTIFICATION_ERROR:
- return TRANSIENT_INVALIDATION_ERROR;
- case notifier::NOTIFICATION_CREDENTIALS_REJECTED:
- return INVALIDATION_CREDENTIALS_REJECTED;
- default:
- NOTREACHED();
- return DEFAULT_INVALIDATION_ERROR;
- }
-}
-
-notifier::NotificationsDisabledReason ToNotifierReasonForTest(
- InvalidatorState state) {
- switch (state) {
- case TRANSIENT_INVALIDATION_ERROR:
- return notifier::TRANSIENT_NOTIFICATION_ERROR;
- case INVALIDATION_CREDENTIALS_REJECTED:
- return notifier::NOTIFICATION_CREDENTIALS_REJECTED;
- case INVALIDATIONS_ENABLED:
- // Fall through.
- default:
- NOTREACHED();
- return notifier::TRANSIENT_NOTIFICATION_ERROR;
- }
-}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/invalidator_state.h b/chromium/sync/notifier/invalidator_state.h
deleted file mode 100644
index bb4b3d61555..00000000000
--- a/chromium/sync/notifier/invalidator_state.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_NOTIFIER_INVALIDATOR_STATE_H_
-#define SYNC_NOTIFIER_INVALIDATOR_STATE_H_
-
-#include "jingle/notifier/listener/push_client_observer.h"
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-
-enum InvalidatorState {
- // Failure states
- // --------------
- // There is an underlying transient problem (e.g., network- or
- // XMPP-related).
- TRANSIENT_INVALIDATION_ERROR,
- DEFAULT_INVALIDATION_ERROR = TRANSIENT_INVALIDATION_ERROR,
- // Our credentials have been rejected.
- INVALIDATION_CREDENTIALS_REJECTED,
-
- // Invalidations are fully working.
- INVALIDATIONS_ENABLED
-};
-
-SYNC_EXPORT const char* InvalidatorStateToString(InvalidatorState state);
-
-InvalidatorState FromNotifierReason(
- notifier::NotificationsDisabledReason reason);
-
-// Should not be called when |state| == INVALIDATIONS_ENABLED.
-SYNC_EXPORT_PRIVATE notifier::NotificationsDisabledReason
- ToNotifierReasonForTest(InvalidatorState state);
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_INVALIDATOR_STATE_H_
diff --git a/chromium/sync/notifier/invalidator_test_template.cc b/chromium/sync/notifier/invalidator_test_template.cc
deleted file mode 100644
index 7fc838547eb..00000000000
--- a/chromium/sync/notifier/invalidator_test_template.cc
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/invalidator_test_template.h"
-
-namespace syncer {
-namespace internal {
-
-BoundFakeInvalidationHandler::BoundFakeInvalidationHandler(
- const Invalidator& invalidator)
- : invalidator_(invalidator),
- last_retrieved_state_(DEFAULT_INVALIDATION_ERROR) {}
-
-BoundFakeInvalidationHandler::~BoundFakeInvalidationHandler() {}
-
-InvalidatorState BoundFakeInvalidationHandler::GetLastRetrievedState() const {
- return last_retrieved_state_;
-}
-
-void BoundFakeInvalidationHandler::OnInvalidatorStateChange(
- InvalidatorState state) {
- FakeInvalidationHandler::OnInvalidatorStateChange(state);
- last_retrieved_state_ = invalidator_.GetInvalidatorState();
-}
-
-} // namespace internal
-} // namespace syncer
diff --git a/chromium/sync/notifier/invalidator_test_template.h b/chromium/sync/notifier/invalidator_test_template.h
deleted file mode 100644
index 67cd0536eb7..00000000000
--- a/chromium/sync/notifier/invalidator_test_template.h
+++ /dev/null
@@ -1,377 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This class defines tests that implementations of Invalidator should pass in
-// order to be conformant. Here's how you use it to test your implementation.
-//
-// Say your class is called MyInvalidator. Then you need to define a class
-// called MyInvalidatorTestDelegate in my_sync_notifier_unittest.cc like this:
-//
-// class MyInvalidatorTestDelegate {
-// public:
-// MyInvalidatorTestDelegate() ...
-//
-// ~MyInvalidatorTestDelegate() {
-// // DestroyInvalidator() may not be explicitly called by tests.
-// DestroyInvalidator();
-// }
-//
-// // Create the Invalidator implementation with the given parameters.
-// void CreateInvalidator(
-// const std::string& initial_state,
-// const base::WeakPtr<InvalidationStateTracker>&
-// invalidation_state_tracker) {
-// ...
-// }
-//
-// // Should return the Invalidator implementation. Only called after
-// // CreateInvalidator and before DestroyInvalidator.
-// MyInvalidator* GetInvalidator() {
-// ...
-// }
-//
-// // Destroy the Invalidator implementation.
-// void DestroyInvalidator() {
-// ...
-// }
-//
-// // Called after a call to SetUniqueId(), or UpdateCredentials() on the
-// // Invalidator implementation. Should block until the effects of the
-// // call are visible on the current thread.
-// void WaitForInvalidator() {
-// ...
-// }
-//
-// // The Trigger* functions below should block until the effects of
-// // the call are visible on the current thread.
-//
-// // Should cause OnInvalidatorStateChange() to be called on all
-// // observers of the Invalidator implementation with the given
-// // parameters.
-// void TriggerOnInvalidatorStateChange(InvalidatorState state) {
-// ...
-// }
-//
-// // Should cause OnIncomingInvalidation() to be called on all
-// // observers of the Invalidator implementation with the given
-// // parameters.
-// void TriggerOnIncomingInvalidation(
-// const ObjectIdInvalidationMap& invalidation_map) {
-// ...
-// }
-// };
-//
-// The InvalidatorTest test harness will have a member variable of
-// this delegate type and will call its functions in the various
-// tests.
-//
-// Then you simply #include this file as well as gtest.h and add the
-// following statement to my_sync_notifier_unittest.cc:
-//
-// INSTANTIATE_TYPED_TEST_CASE_P(
-// MyInvalidator, InvalidatorTest, MyInvalidatorTestDelegate);
-//
-// Easy!
-
-#ifndef SYNC_NOTIFIER_INVALIDATOR_TEST_TEMPLATE_H_
-#define SYNC_NOTIFIER_INVALIDATOR_TEST_TEMPLATE_H_
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "google/cacheinvalidation/include/types.h"
-#include "google/cacheinvalidation/types.pb.h"
-#include "sync/internal_api/public/base/object_id_invalidation_map_test_util.h"
-#include "sync/notifier/fake_invalidation_handler.h"
-#include "sync/notifier/fake_invalidation_state_tracker.h"
-#include "sync/notifier/invalidator.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-template <typename InvalidatorTestDelegate>
-class InvalidatorTest : public testing::Test {
- protected:
- InvalidatorTest()
- : id1(ipc::invalidation::ObjectSource::TEST, "a"),
- id2(ipc::invalidation::ObjectSource::TEST, "b"),
- id3(ipc::invalidation::ObjectSource::TEST, "c"),
- id4(ipc::invalidation::ObjectSource::TEST, "d") {
- }
-
- Invalidator* CreateAndInitializeInvalidator() {
- this->delegate_.CreateInvalidator("fake_invalidator_client_id",
- "fake_initial_state",
- this->fake_tracker_.AsWeakPtr());
- Invalidator* const invalidator = this->delegate_.GetInvalidator();
-
- this->delegate_.WaitForInvalidator();
- invalidator->UpdateCredentials("foo@bar.com", "fake_token");
- this->delegate_.WaitForInvalidator();
-
- return invalidator;
- }
-
- FakeInvalidationStateTracker fake_tracker_;
- InvalidatorTestDelegate delegate_;
-
- const invalidation::ObjectId id1;
- const invalidation::ObjectId id2;
- const invalidation::ObjectId id3;
- const invalidation::ObjectId id4;
-};
-
-TYPED_TEST_CASE_P(InvalidatorTest);
-
-// Initialize the invalidator, register a handler, register some IDs for that
-// handler, and then unregister the handler, dispatching invalidations in
-// between. The handler should only see invalidations when its registered and
-// its IDs are registered.
-TYPED_TEST_P(InvalidatorTest, Basic) {
- Invalidator* const invalidator = this->CreateAndInitializeInvalidator();
-
- FakeInvalidationHandler handler;
-
- invalidator->RegisterHandler(&handler);
-
- ObjectIdInvalidationMap invalidation_map;
- invalidation_map.Insert(Invalidation::Init(this->id1, 1, "1"));
- invalidation_map.Insert(Invalidation::Init(this->id2, 2, "2"));
- invalidation_map.Insert(Invalidation::Init(this->id3, 3, "3"));
-
- // Should be ignored since no IDs are registered to |handler|.
- this->delegate_.TriggerOnIncomingInvalidation(invalidation_map);
- EXPECT_EQ(0, handler.GetInvalidationCount());
-
- ObjectIdSet ids;
- ids.insert(this->id1);
- ids.insert(this->id2);
- invalidator->UpdateRegisteredIds(&handler, ids);
-
- this->delegate_.TriggerOnInvalidatorStateChange(INVALIDATIONS_ENABLED);
- EXPECT_EQ(INVALIDATIONS_ENABLED, handler.GetInvalidatorState());
-
- ObjectIdInvalidationMap expected_invalidations;
- expected_invalidations.Insert(Invalidation::Init(this->id1, 1, "1"));
- expected_invalidations.Insert(Invalidation::Init(this->id2, 2, "2"));
-
- this->delegate_.TriggerOnIncomingInvalidation(invalidation_map);
- EXPECT_EQ(1, handler.GetInvalidationCount());
- EXPECT_THAT(expected_invalidations, Eq(handler.GetLastInvalidationMap()));
-
- ids.erase(this->id1);
- ids.insert(this->id3);
- invalidator->UpdateRegisteredIds(&handler, ids);
-
- expected_invalidations = ObjectIdInvalidationMap();
- expected_invalidations.Insert(Invalidation::Init(this->id2, 2, "2"));
- expected_invalidations.Insert(Invalidation::Init(this->id3, 3, "3"));
-
- // Removed object IDs should not be notified, newly-added ones should.
- this->delegate_.TriggerOnIncomingInvalidation(invalidation_map);
- EXPECT_EQ(2, handler.GetInvalidationCount());
- EXPECT_THAT(expected_invalidations, Eq(handler.GetLastInvalidationMap()));
-
- this->delegate_.TriggerOnInvalidatorStateChange(TRANSIENT_INVALIDATION_ERROR);
- EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR,
- handler.GetInvalidatorState());
-
- this->delegate_.TriggerOnInvalidatorStateChange(
- INVALIDATION_CREDENTIALS_REJECTED);
- EXPECT_EQ(INVALIDATION_CREDENTIALS_REJECTED,
- handler.GetInvalidatorState());
-
- invalidator->UnregisterHandler(&handler);
-
- // Should be ignored since |handler| isn't registered anymore.
- this->delegate_.TriggerOnIncomingInvalidation(invalidation_map);
- EXPECT_EQ(2, handler.GetInvalidationCount());
-}
-
-// Register handlers and some IDs for those handlers, register a handler with
-// no IDs, and register a handler with some IDs but unregister it. Then,
-// dispatch some invalidations and invalidations. Handlers that are registered
-// should get invalidations, and the ones that have registered IDs should
-// receive invalidations for those IDs.
-TYPED_TEST_P(InvalidatorTest, MultipleHandlers) {
- Invalidator* const invalidator = this->CreateAndInitializeInvalidator();
-
- FakeInvalidationHandler handler1;
- FakeInvalidationHandler handler2;
- FakeInvalidationHandler handler3;
- FakeInvalidationHandler handler4;
-
- invalidator->RegisterHandler(&handler1);
- invalidator->RegisterHandler(&handler2);
- invalidator->RegisterHandler(&handler3);
- invalidator->RegisterHandler(&handler4);
-
- {
- ObjectIdSet ids;
- ids.insert(this->id1);
- ids.insert(this->id2);
- invalidator->UpdateRegisteredIds(&handler1, ids);
- }
-
- {
- ObjectIdSet ids;
- ids.insert(this->id3);
- invalidator->UpdateRegisteredIds(&handler2, ids);
- }
-
- // Don't register any IDs for handler3.
-
- {
- ObjectIdSet ids;
- ids.insert(this->id4);
- invalidator->UpdateRegisteredIds(&handler4, ids);
- }
-
- invalidator->UnregisterHandler(&handler4);
-
- this->delegate_.TriggerOnInvalidatorStateChange(INVALIDATIONS_ENABLED);
- EXPECT_EQ(INVALIDATIONS_ENABLED, handler1.GetInvalidatorState());
- EXPECT_EQ(INVALIDATIONS_ENABLED, handler2.GetInvalidatorState());
- EXPECT_EQ(INVALIDATIONS_ENABLED, handler3.GetInvalidatorState());
- EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR, handler4.GetInvalidatorState());
-
- {
- ObjectIdInvalidationMap invalidation_map;
- invalidation_map.Insert(Invalidation::Init(this->id1, 1, "1"));
- invalidation_map.Insert(Invalidation::Init(this->id2, 2, "2"));
- invalidation_map.Insert(Invalidation::Init(this->id3, 3, "3"));
- invalidation_map.Insert(Invalidation::Init(this->id4, 4, "4"));
-
- this->delegate_.TriggerOnIncomingInvalidation(invalidation_map);
-
- ObjectIdInvalidationMap expected_invalidations;
- expected_invalidations.Insert(Invalidation::Init(this->id1, 1, "1"));
- expected_invalidations.Insert(Invalidation::Init(this->id2, 2, "2"));
-
- EXPECT_EQ(1, handler1.GetInvalidationCount());
- EXPECT_THAT(expected_invalidations, Eq(handler1.GetLastInvalidationMap()));
-
- expected_invalidations = ObjectIdInvalidationMap();
- expected_invalidations.Insert(Invalidation::Init(this->id3, 3, "3"));
-
- EXPECT_EQ(1, handler2.GetInvalidationCount());
- EXPECT_THAT(expected_invalidations, Eq(handler2.GetLastInvalidationMap()));
-
- EXPECT_EQ(0, handler3.GetInvalidationCount());
- EXPECT_EQ(0, handler4.GetInvalidationCount());
- }
-
- this->delegate_.TriggerOnInvalidatorStateChange(TRANSIENT_INVALIDATION_ERROR);
- EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR, handler1.GetInvalidatorState());
- EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR, handler2.GetInvalidatorState());
- EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR, handler3.GetInvalidatorState());
- EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR, handler4.GetInvalidatorState());
-
- invalidator->UnregisterHandler(&handler3);
- invalidator->UnregisterHandler(&handler2);
- invalidator->UnregisterHandler(&handler1);
-}
-
-// Make sure that passing an empty set to UpdateRegisteredIds clears the
-// corresponding entries for the handler.
-TYPED_TEST_P(InvalidatorTest, EmptySetUnregisters) {
- Invalidator* const invalidator = this->CreateAndInitializeInvalidator();
-
- FakeInvalidationHandler handler1;
-
- // Control observer.
- FakeInvalidationHandler handler2;
-
- invalidator->RegisterHandler(&handler1);
- invalidator->RegisterHandler(&handler2);
-
- {
- ObjectIdSet ids;
- ids.insert(this->id1);
- ids.insert(this->id2);
- invalidator->UpdateRegisteredIds(&handler1, ids);
- }
-
- {
- ObjectIdSet ids;
- ids.insert(this->id3);
- invalidator->UpdateRegisteredIds(&handler2, ids);
- }
-
- // Unregister the IDs for the first observer. It should not receive any
- // further invalidations.
- invalidator->UpdateRegisteredIds(&handler1, ObjectIdSet());
-
- this->delegate_.TriggerOnInvalidatorStateChange(INVALIDATIONS_ENABLED);
- EXPECT_EQ(INVALIDATIONS_ENABLED, handler1.GetInvalidatorState());
- EXPECT_EQ(INVALIDATIONS_ENABLED, handler2.GetInvalidatorState());
-
- {
- ObjectIdInvalidationMap invalidation_map;
- invalidation_map.Insert(Invalidation::Init(this->id1, 1, "1"));
- invalidation_map.Insert(Invalidation::Init(this->id2, 2, "2"));
- invalidation_map.Insert(Invalidation::Init(this->id3, 3, "3"));
- this->delegate_.TriggerOnIncomingInvalidation(invalidation_map);
- EXPECT_EQ(0, handler1.GetInvalidationCount());
- EXPECT_EQ(1, handler2.GetInvalidationCount());
- }
-
- this->delegate_.TriggerOnInvalidatorStateChange(TRANSIENT_INVALIDATION_ERROR);
- EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR, handler1.GetInvalidatorState());
- EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR, handler2.GetInvalidatorState());
-
- invalidator->UnregisterHandler(&handler2);
- invalidator->UnregisterHandler(&handler1);
-}
-
-namespace internal {
-
-// A FakeInvalidationHandler that is "bound" to a specific
-// Invalidator. This is for cross-referencing state information with
-// the bound Invalidator.
-class BoundFakeInvalidationHandler : public FakeInvalidationHandler {
- public:
- explicit BoundFakeInvalidationHandler(const Invalidator& invalidator);
- virtual ~BoundFakeInvalidationHandler();
-
- // Returns the last return value of GetInvalidatorState() on the
- // bound invalidator from the last time the invalidator state
- // changed.
- InvalidatorState GetLastRetrievedState() const;
-
- // InvalidationHandler implementation.
- virtual void OnInvalidatorStateChange(InvalidatorState state) OVERRIDE;
-
- private:
- const Invalidator& invalidator_;
- InvalidatorState last_retrieved_state_;
-
- DISALLOW_COPY_AND_ASSIGN(BoundFakeInvalidationHandler);
-};
-
-} // namespace internal
-
-TYPED_TEST_P(InvalidatorTest, GetInvalidatorStateAlwaysCurrent) {
- Invalidator* const invalidator = this->CreateAndInitializeInvalidator();
-
- internal::BoundFakeInvalidationHandler handler(*invalidator);
- invalidator->RegisterHandler(&handler);
-
- this->delegate_.TriggerOnInvalidatorStateChange(INVALIDATIONS_ENABLED);
- EXPECT_EQ(INVALIDATIONS_ENABLED, handler.GetInvalidatorState());
- EXPECT_EQ(INVALIDATIONS_ENABLED, handler.GetLastRetrievedState());
-
- this->delegate_.TriggerOnInvalidatorStateChange(TRANSIENT_INVALIDATION_ERROR);
- EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR, handler.GetInvalidatorState());
- EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR, handler.GetLastRetrievedState());
-
- invalidator->UnregisterHandler(&handler);
-}
-
-REGISTER_TYPED_TEST_CASE_P(InvalidatorTest,
- Basic, MultipleHandlers, EmptySetUnregisters,
- GetInvalidatorStateAlwaysCurrent);
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_INVALIDATOR_TEST_TEMPLATE_H_
diff --git a/chromium/sync/notifier/mock_ack_handler.cc b/chromium/sync/notifier/mock_ack_handler.cc
deleted file mode 100644
index 6a4c834e27f..00000000000
--- a/chromium/sync/notifier/mock_ack_handler.cc
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/mock_ack_handler.h"
-
-#include "sync/internal_api/public/base/ack_handle.h"
-#include "sync/internal_api/public/base/invalidation.h"
-
-namespace syncer {
-
-namespace {
-
-struct AckHandleMatcher {
- AckHandleMatcher(const AckHandle& handle);
- bool operator()(const syncer::Invalidation& invalidation) const;
-
- syncer::AckHandle handle_;
-};
-
-AckHandleMatcher::AckHandleMatcher(const AckHandle& handle)
- : handle_(handle) {}
-
-bool AckHandleMatcher::operator()(
- const syncer::Invalidation& invalidation) const {
- return handle_.Equals(invalidation.ack_handle());
-}
-
-} // namespace
-
-MockAckHandler::MockAckHandler() {}
-
-MockAckHandler::~MockAckHandler() {}
-
-void MockAckHandler::RegisterInvalidation(Invalidation* invalidation) {
- unacked_invalidations_.push_back(*invalidation);
- invalidation->set_ack_handler(WeakHandleThis());
-}
-
-void MockAckHandler::RegisterUnsentInvalidation(Invalidation* invalidation) {
- unsent_invalidations_.push_back(*invalidation);
-}
-
-bool MockAckHandler::IsUnacked(const Invalidation& invalidation) const {
- AckHandleMatcher matcher(invalidation.ack_handle());
- InvalidationVector::const_iterator it = std::find_if(
- unacked_invalidations_.begin(),
- unacked_invalidations_.end(),
- matcher);
- return it != unacked_invalidations_.end();
-}
-
-bool MockAckHandler::IsUnsent(const Invalidation& invalidation) const {
- AckHandleMatcher matcher(invalidation.ack_handle());
- InvalidationVector::const_iterator it1 = std::find_if(
- unsent_invalidations_.begin(),
- unsent_invalidations_.end(),
- matcher);
- return it1 != unsent_invalidations_.end();
-}
-
-void MockAckHandler::Acknowledge(
- const invalidation::ObjectId& id,
- const AckHandle& handle) {
- AckHandleMatcher matcher(handle);
- InvalidationVector::iterator it = std::find_if(
- unacked_invalidations_.begin(),
- unacked_invalidations_.end(),
- matcher);
- if (it != unacked_invalidations_.end()) {
- acked_invalidations_.push_back(*it);
- unacked_invalidations_.erase(it);
- }
-}
-
-void MockAckHandler::Drop(
- const invalidation::ObjectId& id,
- const AckHandle& handle) {
-}
-
-WeakHandle<AckHandler> MockAckHandler::WeakHandleThis() {
- return WeakHandle<AckHandler>(AsWeakPtr());
-}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/mock_ack_handler.h b/chromium/sync/notifier/mock_ack_handler.h
deleted file mode 100644
index bf6ecc939a1..00000000000
--- a/chromium/sync/notifier/mock_ack_handler.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_NOTIFIER_MOCK_ACK_HANDLER_H_
-#define SYNC_NOTIFIER_MOCK_ACK_HANDLER_H_
-
-#include <vector>
-
-#include "base/compiler_specific.h"
-#include "base/memory/weak_ptr.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/notifier/ack_handler.h"
-
-namespace syncer {
-
-class Invalidation;
-
-// This AckHandler implementation colaborates with the FakeInvalidationService
-// to enable unit tests to assert that invalidations are being acked properly.
-class SYNC_EXPORT MockAckHandler
- : public AckHandler,
- public base::SupportsWeakPtr<MockAckHandler> {
- public:
- MockAckHandler();
- virtual ~MockAckHandler();
-
- // Sets up some internal state to track this invalidation, and modifies it so
- // that its Acknowledge() and Drop() methods will route back to us.
- void RegisterInvalidation(Invalidation* invalidation);
-
- // No one was listening for this invalidation, so no one will receive it or
- // ack it. We keep track of it anyway to let tests make assertions about it.
- void RegisterUnsentInvalidation(Invalidation* invalidation);
-
- // Returns true if the specified invalidaition has been delivered, but has not
- // been acknowledged yet.
- bool IsUnacked(const Invalidation& invalidation) const;
-
- // Returns true if the specified invalidation was never delivered.
- bool IsUnsent(const Invalidation& invalidation) const;
-
- // Implementation of AckHandler.
- virtual void Acknowledge(
- const invalidation::ObjectId& id,
- const AckHandle& handle) OVERRIDE;
- virtual void Drop(
- const invalidation::ObjectId& id,
- const AckHandle& handle) OVERRIDE;
-
- private:
- typedef std::vector<syncer::Invalidation> InvalidationVector;
-
- WeakHandle<AckHandler> WeakHandleThis();
-
- InvalidationVector unsent_invalidations_;
- InvalidationVector unacked_invalidations_;
- InvalidationVector acked_invalidations_;
-};
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_MOCK_ACK_HANDLER_H_
diff --git a/chromium/sync/notifier/non_blocking_invalidator.cc b/chromium/sync/notifier/non_blocking_invalidator.cc
deleted file mode 100644
index bd0596722b0..00000000000
--- a/chromium/sync/notifier/non_blocking_invalidator.cc
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/non_blocking_invalidator.h"
-
-#include <cstddef>
-
-#include "base/location.h"
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/single_thread_task_runner.h"
-#include "base/thread_task_runner_handle.h"
-#include "base/threading/thread.h"
-#include "jingle/notifier/listener/push_client.h"
-#include "sync/notifier/invalidation_notifier.h"
-#include "sync/notifier/object_id_invalidation_map.h"
-
-namespace syncer {
-
-class NonBlockingInvalidator::Core
- : public base::RefCountedThreadSafe<NonBlockingInvalidator::Core>,
- // InvalidationHandler to observe the InvalidationNotifier we create.
- public InvalidationHandler {
- public:
- // Called on parent thread. |delegate_observer| should be
- // initialized.
- explicit Core(
- const WeakHandle<InvalidationHandler>& delegate_observer);
-
- // Helpers called on I/O thread.
- void Initialize(
- const notifier::NotifierOptions& notifier_options,
- const std::string& invalidator_client_id,
- const UnackedInvalidationsMap& saved_invalidations,
- const std::string& invalidation_bootstrap_data,
- const WeakHandle<InvalidationStateTracker>& invalidation_state_tracker,
- const std::string& client_info);
- void Teardown();
- void UpdateRegisteredIds(const ObjectIdSet& ids);
- void UpdateCredentials(const std::string& email, const std::string& token);
-
- // InvalidationHandler implementation (all called on I/O thread by
- // InvalidationNotifier).
- virtual void OnInvalidatorStateChange(InvalidatorState reason) OVERRIDE;
- virtual void OnIncomingInvalidation(
- const ObjectIdInvalidationMap& invalidation_map) OVERRIDE;
-
- private:
- friend class
- base::RefCountedThreadSafe<NonBlockingInvalidator::Core>;
- // Called on parent or I/O thread.
- virtual ~Core();
-
- // The variables below should be used only on the I/O thread.
- const WeakHandle<InvalidationHandler> delegate_observer_;
- scoped_ptr<InvalidationNotifier> invalidation_notifier_;
- scoped_refptr<base::SingleThreadTaskRunner> network_task_runner_;
-
- DISALLOW_COPY_AND_ASSIGN(Core);
-};
-
-NonBlockingInvalidator::Core::Core(
- const WeakHandle<InvalidationHandler>& delegate_observer)
- : delegate_observer_(delegate_observer) {
- DCHECK(delegate_observer_.IsInitialized());
-}
-
-NonBlockingInvalidator::Core::~Core() {
-}
-
-void NonBlockingInvalidator::Core::Initialize(
- const notifier::NotifierOptions& notifier_options,
- const std::string& invalidator_client_id,
- const UnackedInvalidationsMap& saved_invalidations,
- const std::string& invalidation_bootstrap_data,
- const WeakHandle<InvalidationStateTracker>& invalidation_state_tracker,
- const std::string& client_info) {
- DCHECK(notifier_options.request_context_getter.get());
- DCHECK_EQ(notifier::NOTIFICATION_SERVER,
- notifier_options.notification_method);
- network_task_runner_ = notifier_options.request_context_getter->
- GetNetworkTaskRunner();
- DCHECK(network_task_runner_->BelongsToCurrentThread());
- invalidation_notifier_.reset(
- new InvalidationNotifier(
- notifier::PushClient::CreateDefaultOnIOThread(notifier_options),
- invalidator_client_id,
- saved_invalidations,
- invalidation_bootstrap_data,
- invalidation_state_tracker,
- client_info));
- invalidation_notifier_->RegisterHandler(this);
-}
-
-void NonBlockingInvalidator::Core::Teardown() {
- DCHECK(network_task_runner_->BelongsToCurrentThread());
- invalidation_notifier_->UnregisterHandler(this);
- invalidation_notifier_.reset();
- network_task_runner_ = NULL;
-}
-
-void NonBlockingInvalidator::Core::UpdateRegisteredIds(const ObjectIdSet& ids) {
- DCHECK(network_task_runner_->BelongsToCurrentThread());
- invalidation_notifier_->UpdateRegisteredIds(this, ids);
-}
-
-void NonBlockingInvalidator::Core::UpdateCredentials(const std::string& email,
- const std::string& token) {
- DCHECK(network_task_runner_->BelongsToCurrentThread());
- invalidation_notifier_->UpdateCredentials(email, token);
-}
-
-void NonBlockingInvalidator::Core::OnInvalidatorStateChange(
- InvalidatorState reason) {
- DCHECK(network_task_runner_->BelongsToCurrentThread());
- delegate_observer_.Call(
- FROM_HERE, &InvalidationHandler::OnInvalidatorStateChange, reason);
-}
-
-void NonBlockingInvalidator::Core::OnIncomingInvalidation(
- const ObjectIdInvalidationMap& invalidation_map) {
- DCHECK(network_task_runner_->BelongsToCurrentThread());
- delegate_observer_.Call(FROM_HERE,
- &InvalidationHandler::OnIncomingInvalidation,
- invalidation_map);
-}
-
-NonBlockingInvalidator::NonBlockingInvalidator(
- const notifier::NotifierOptions& notifier_options,
- const std::string& invalidator_client_id,
- const UnackedInvalidationsMap& saved_invalidations,
- const std::string& invalidation_bootstrap_data,
- const WeakHandle<InvalidationStateTracker>&
- invalidation_state_tracker,
- const std::string& client_info)
- : parent_task_runner_(base::ThreadTaskRunnerHandle::Get()),
- network_task_runner_(
- notifier_options.request_context_getter->GetNetworkTaskRunner()),
- weak_ptr_factory_(this) {
- core_ = new Core(MakeWeakHandle(weak_ptr_factory_.GetWeakPtr()));
-
- if (!network_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(
- &NonBlockingInvalidator::Core::Initialize,
- core_.get(),
- notifier_options,
- invalidator_client_id,
- saved_invalidations,
- invalidation_bootstrap_data,
- invalidation_state_tracker,
- client_info))) {
- NOTREACHED();
- }
-}
-
-NonBlockingInvalidator::~NonBlockingInvalidator() {
- DCHECK(parent_task_runner_->BelongsToCurrentThread());
- if (!network_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&NonBlockingInvalidator::Core::Teardown,
- core_.get()))) {
- DVLOG(1) << "Network thread stopped before invalidator is destroyed.";
- }
-}
-
-void NonBlockingInvalidator::RegisterHandler(InvalidationHandler* handler) {
- DCHECK(parent_task_runner_->BelongsToCurrentThread());
- registrar_.RegisterHandler(handler);
-}
-
-void NonBlockingInvalidator::UpdateRegisteredIds(InvalidationHandler* handler,
- const ObjectIdSet& ids) {
- DCHECK(parent_task_runner_->BelongsToCurrentThread());
- registrar_.UpdateRegisteredIds(handler, ids);
- if (!network_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(
- &NonBlockingInvalidator::Core::UpdateRegisteredIds,
- core_.get(),
- registrar_.GetAllRegisteredIds()))) {
- NOTREACHED();
- }
-}
-
-void NonBlockingInvalidator::UnregisterHandler(InvalidationHandler* handler) {
- DCHECK(parent_task_runner_->BelongsToCurrentThread());
- registrar_.UnregisterHandler(handler);
-}
-
-InvalidatorState NonBlockingInvalidator::GetInvalidatorState() const {
- DCHECK(parent_task_runner_->BelongsToCurrentThread());
- return registrar_.GetInvalidatorState();
-}
-
-void NonBlockingInvalidator::UpdateCredentials(const std::string& email,
- const std::string& token) {
- DCHECK(parent_task_runner_->BelongsToCurrentThread());
- if (!network_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&NonBlockingInvalidator::Core::UpdateCredentials,
- core_.get(), email, token))) {
- NOTREACHED();
- }
-}
-
-void NonBlockingInvalidator::OnInvalidatorStateChange(InvalidatorState state) {
- DCHECK(parent_task_runner_->BelongsToCurrentThread());
- registrar_.UpdateInvalidatorState(state);
-}
-
-void NonBlockingInvalidator::OnIncomingInvalidation(
- const ObjectIdInvalidationMap& invalidation_map) {
- DCHECK(parent_task_runner_->BelongsToCurrentThread());
- registrar_.DispatchInvalidationsToHandlers(invalidation_map);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/non_blocking_invalidator.h b/chromium/sync/notifier/non_blocking_invalidator.h
deleted file mode 100644
index d40166adfc6..00000000000
--- a/chromium/sync/notifier/non_blocking_invalidator.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// An implementation of SyncNotifier that wraps InvalidationNotifier
-// on its own thread.
-
-#ifndef SYNC_NOTIFIER_NON_BLOCKING_INVALIDATOR_H_
-#define SYNC_NOTIFIER_NON_BLOCKING_INVALIDATOR_H_
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/weak_ptr.h"
-#include "jingle/notifier/base/notifier_options.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/notifier/invalidation_handler.h"
-#include "sync/notifier/invalidation_state_tracker.h"
-#include "sync/notifier/invalidator.h"
-#include "sync/notifier/invalidator_registrar.h"
-
-namespace base {
-class SingleThreadTaskRunner;
-} // namespace base
-
-namespace syncer {
-
-class SYNC_EXPORT_PRIVATE NonBlockingInvalidator
- : public Invalidator,
- // InvalidationHandler to "observe" our Core via WeakHandle.
- public InvalidationHandler {
- public:
- // |invalidation_state_tracker| must be initialized.
- NonBlockingInvalidator(
- const notifier::NotifierOptions& notifier_options,
- const std::string& invalidator_client_id,
- const UnackedInvalidationsMap& saved_invalidations,
- const std::string& invalidation_bootstrap_data,
- const WeakHandle<InvalidationStateTracker>&
- invalidation_state_tracker,
- const std::string& client_info);
-
- virtual ~NonBlockingInvalidator();
-
- // Invalidator implementation.
- virtual void RegisterHandler(InvalidationHandler* handler) OVERRIDE;
- virtual void UpdateRegisteredIds(InvalidationHandler* handler,
- const ObjectIdSet& ids) OVERRIDE;
- virtual void UnregisterHandler(InvalidationHandler* handler) OVERRIDE;
- virtual InvalidatorState GetInvalidatorState() const OVERRIDE;
- virtual void UpdateCredentials(
- const std::string& email, const std::string& token) OVERRIDE;
-
- // InvalidationHandler implementation.
- virtual void OnInvalidatorStateChange(InvalidatorState state) OVERRIDE;
- virtual void OnIncomingInvalidation(
- const ObjectIdInvalidationMap& invalidation_map) OVERRIDE;
-
- private:
- class Core;
-
- InvalidatorRegistrar registrar_;
-
- // The real guts of NonBlockingInvalidator, which allows this class to live
- // completely on the parent thread.
- scoped_refptr<Core> core_;
- scoped_refptr<base::SingleThreadTaskRunner> parent_task_runner_;
- scoped_refptr<base::SingleThreadTaskRunner> network_task_runner_;
-
- base::WeakPtrFactory<NonBlockingInvalidator> weak_ptr_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(NonBlockingInvalidator);
-};
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_NON_BLOCKING_INVALIDATOR_H_
diff --git a/chromium/sync/notifier/non_blocking_invalidator_unittest.cc b/chromium/sync/notifier/non_blocking_invalidator_unittest.cc
deleted file mode 100644
index 0c439c502c6..00000000000
--- a/chromium/sync/notifier/non_blocking_invalidator_unittest.cc
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/non_blocking_invalidator.h"
-
-#include "base/bind_helpers.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/message_loop/message_loop.h"
-#include "base/run_loop.h"
-#include "base/threading/thread.h"
-#include "google/cacheinvalidation/types.pb.h"
-#include "jingle/notifier/base/fake_base_task.h"
-#include "net/url_request/url_request_test_util.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/notifier/fake_invalidation_handler.h"
-#include "sync/notifier/invalidation_state_tracker.h"
-#include "sync/notifier/invalidator_test_template.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-namespace {
-
-class NonBlockingInvalidatorTestDelegate {
- public:
- NonBlockingInvalidatorTestDelegate() : io_thread_("IO thread") {}
-
- ~NonBlockingInvalidatorTestDelegate() {
- DestroyInvalidator();
- }
-
- void CreateInvalidator(
- const std::string& invalidator_client_id,
- const std::string& initial_state,
- const base::WeakPtr<InvalidationStateTracker>&
- invalidation_state_tracker) {
- DCHECK(!invalidator_.get());
- base::Thread::Options options;
- options.message_loop_type = base::MessageLoop::TYPE_IO;
- io_thread_.StartWithOptions(options);
- request_context_getter_ =
- new net::TestURLRequestContextGetter(io_thread_.message_loop_proxy());
- notifier::NotifierOptions invalidator_options;
- invalidator_options.request_context_getter = request_context_getter_;
- invalidator_.reset(
- new NonBlockingInvalidator(
- invalidator_options,
- invalidator_client_id,
- UnackedInvalidationsMap(),
- initial_state,
- MakeWeakHandle(invalidation_state_tracker),
- "fake_client_info"));
- }
-
- Invalidator* GetInvalidator() {
- return invalidator_.get();
- }
-
- void DestroyInvalidator() {
- invalidator_.reset();
- request_context_getter_ = NULL;
- io_thread_.Stop();
- message_loop_.RunUntilIdle();
- }
-
- void WaitForInvalidator() {
- base::RunLoop run_loop;
- ASSERT_TRUE(
- io_thread_.message_loop_proxy()->PostTaskAndReply(
- FROM_HERE,
- base::Bind(&base::DoNothing),
- run_loop.QuitClosure()));
- run_loop.Run();
- }
-
- void TriggerOnInvalidatorStateChange(InvalidatorState state) {
- invalidator_->OnInvalidatorStateChange(state);
- }
-
- void TriggerOnIncomingInvalidation(
- const ObjectIdInvalidationMap& invalidation_map) {
- invalidator_->OnIncomingInvalidation(invalidation_map);
- }
-
- private:
- base::MessageLoop message_loop_;
- base::Thread io_thread_;
- scoped_refptr<net::URLRequestContextGetter> request_context_getter_;
- scoped_ptr<NonBlockingInvalidator> invalidator_;
-};
-
-INSTANTIATE_TYPED_TEST_CASE_P(
- NonBlockingInvalidatorTest, InvalidatorTest,
- NonBlockingInvalidatorTestDelegate);
-
-} // namespace
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/object_id_invalidation_map.cc b/chromium/sync/notifier/object_id_invalidation_map.cc
deleted file mode 100644
index 1082eaa29be..00000000000
--- a/chromium/sync/notifier/object_id_invalidation_map.cc
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/object_id_invalidation_map.h"
-
-#include "base/json/json_string_value_serializer.h"
-
-namespace syncer {
-
-// static
-ObjectIdInvalidationMap ObjectIdInvalidationMap::InvalidateAll(
- const ObjectIdSet& ids) {
- ObjectIdInvalidationMap invalidate_all;
- for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
- invalidate_all.Insert(Invalidation::InitUnknownVersion(*it));
- }
- return invalidate_all;
-}
-
-ObjectIdInvalidationMap::ObjectIdInvalidationMap() {}
-
-ObjectIdInvalidationMap::~ObjectIdInvalidationMap() {}
-
-ObjectIdSet ObjectIdInvalidationMap::GetObjectIds() const {
- ObjectIdSet ret;
- for (IdToListMap::const_iterator it = map_.begin(); it != map_.end(); ++it) {
- ret.insert(it->first);
- }
- return ret;
-}
-
-bool ObjectIdInvalidationMap::Empty() const {
- return map_.empty();
-}
-
-void ObjectIdInvalidationMap::Insert(const Invalidation& invalidation) {
- map_[invalidation.object_id()].Insert(invalidation);
-}
-
-ObjectIdInvalidationMap ObjectIdInvalidationMap::GetSubsetWithObjectIds(
- const ObjectIdSet& ids) const {
- IdToListMap new_map;
- for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
- IdToListMap::const_iterator lookup = map_.find(*it);
- if (lookup != map_.end()) {
- new_map[*it] = lookup->second;
- }
- }
- return ObjectIdInvalidationMap(new_map);
-}
-
-const SingleObjectInvalidationSet& ObjectIdInvalidationMap::ForObject(
- invalidation::ObjectId id) const {
- IdToListMap::const_iterator lookup = map_.find(id);
- DCHECK(lookup != map_.end());
- DCHECK(!lookup->second.IsEmpty());
- return lookup->second;
-}
-
-void ObjectIdInvalidationMap::GetAllInvalidations(
- std::vector<syncer::Invalidation>* out) const {
- for (IdToListMap::const_iterator it = map_.begin(); it != map_.end(); ++it) {
- out->insert(out->begin(), it->second.begin(), it->second.end());
- }
-}
-void ObjectIdInvalidationMap::AcknowledgeAll() const {
- for (IdToListMap::const_iterator it1 = map_.begin();
- it1 != map_.end(); ++it1) {
- for (SingleObjectInvalidationSet::const_iterator it2 = it1->second.begin();
- it2 != it1->second.end(); ++it2) {
- it2->Acknowledge();
- }
- }
-}
-
-bool ObjectIdInvalidationMap::operator==(
- const ObjectIdInvalidationMap& other) const {
- return map_ == other.map_;
-}
-
-scoped_ptr<base::ListValue> ObjectIdInvalidationMap::ToValue() const {
- scoped_ptr<base::ListValue> value(new base::ListValue());
- for (IdToListMap::const_iterator it1 = map_.begin();
- it1 != map_.end(); ++it1) {
- for (SingleObjectInvalidationSet::const_iterator it2 =
- it1->second.begin(); it2 != it1->second.end(); ++it2) {
- value->Append(it2->ToValue().release());
- }
- }
- return value.Pass();
-}
-
-bool ObjectIdInvalidationMap::ResetFromValue(const base::ListValue& value) {
- map_.clear();
- for (size_t i = 0; i < value.GetSize(); ++i) {
- const DictionaryValue* dict;
- if (!value.GetDictionary(i, &dict)) {
- return false;
- }
- scoped_ptr<Invalidation> invalidation = Invalidation::InitFromValue(*dict);
- if (!invalidation) {
- return false;
- }
- Insert(*invalidation.get());
- }
- return true;
-}
-
-std::string ObjectIdInvalidationMap::ToString() const {
- std::string output;
- JSONStringValueSerializer serializer(&output);
- serializer.set_pretty_print(true);
- serializer.Serialize(*ToValue().get());
- return output;
-}
-
-ObjectIdInvalidationMap::ObjectIdInvalidationMap(const IdToListMap& map)
- : map_(map) {}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/object_id_invalidation_map.h b/chromium/sync/notifier/object_id_invalidation_map.h
deleted file mode 100644
index 3494a62aa2a..00000000000
--- a/chromium/sync/notifier/object_id_invalidation_map.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_NOTIFIER_OBJECT_ID_INVALIDATION_MAP_H_
-#define SYNC_NOTIFIER_OBJECT_ID_INVALIDATION_MAP_H_
-
-#include <map>
-#include <vector>
-
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/invalidation.h"
-#include "sync/notifier/invalidation_util.h"
-#include "sync/notifier/single_object_invalidation_set.h"
-
-namespace syncer {
-
-// A set of notifications with some helper methods to organize them by object ID
-// and version number.
-class SYNC_EXPORT ObjectIdInvalidationMap {
- public:
- // Creates an invalidation map that includes an 'unknown version'
- // invalidation for each specified ID in |ids|.
- static ObjectIdInvalidationMap InvalidateAll(const ObjectIdSet& ids);
-
- ObjectIdInvalidationMap();
- ~ObjectIdInvalidationMap();
-
- // Returns set of ObjectIds for which at least one invalidation is present.
- ObjectIdSet GetObjectIds() const;
-
- // Returns true if this map contains no invalidations.
- bool Empty() const;
-
- // Returns true if both maps contain the same set of invalidations.
- bool operator==(const ObjectIdInvalidationMap& other) const;
-
- // Inserts a new invalidation into this map.
- void Insert(const Invalidation& invalidation);
-
- // Returns a new map containing the subset of invaliations from this map
- // whose IDs were in the specified |ids| set.
- ObjectIdInvalidationMap GetSubsetWithObjectIds(const ObjectIdSet& ids) const;
-
- // Returns the subset of invalidations with IDs matching |id|.
- const SingleObjectInvalidationSet& ForObject(
- invalidation::ObjectId id) const;
-
- // Returns the contents of this map in a single vector.
- void GetAllInvalidations(std::vector<syncer::Invalidation>* out) const;
-
- // Call Acknowledge() on all contained Invalidations.
- void AcknowledgeAll() const;
-
- // Serialize this map to a value.
- scoped_ptr<base::ListValue> ToValue() const;
-
- // Deserialize the value into a map and use it to re-initialize this object.
- bool ResetFromValue(const base::ListValue& value);
-
- // Prints the contentes of this map as a human-readable string.
- std::string ToString() const;
-
- private:
- typedef std::map<invalidation::ObjectId,
- SingleObjectInvalidationSet,
- ObjectIdLessThan> IdToListMap;
-
- ObjectIdInvalidationMap(const IdToListMap& map);
-
- IdToListMap map_;
-};
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_OBJECT_ID_INVALIDATION_MAP_H_
diff --git a/chromium/sync/notifier/object_id_invalidation_map_unittest.cc b/chromium/sync/notifier/object_id_invalidation_map_unittest.cc
deleted file mode 100644
index 1acd920b799..00000000000
--- a/chromium/sync/notifier/object_id_invalidation_map_unittest.cc
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/object_id_invalidation_map.h"
-
-#include "google/cacheinvalidation/types.pb.h"
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-namespace {
-
-class ObjectIdInvalidationMapTest : public testing::Test {
- public:
- ObjectIdInvalidationMapTest()
- : kIdOne(ipc::invalidation::ObjectSource::TEST, "one"),
- kIdTwo(ipc::invalidation::ObjectSource::TEST, "two"),
- kInv1(Invalidation::Init(kIdOne, 10, "ten")) {
- set1.insert(kIdOne);
- set2.insert(kIdTwo);
- all_set.insert(kIdOne);
- all_set.insert(kIdTwo);
-
- one_invalidation.Insert(kInv1);
- invalidate_all = ObjectIdInvalidationMap::InvalidateAll(all_set);
- }
-
- protected:
- const invalidation::ObjectId kIdOne;
- const invalidation::ObjectId kIdTwo;
- const Invalidation kInv1;
-
- ObjectIdSet set1;
- ObjectIdSet set2;
- ObjectIdSet all_set;
- ObjectIdInvalidationMap empty;
- ObjectIdInvalidationMap one_invalidation;
- ObjectIdInvalidationMap invalidate_all;
-};
-
-TEST_F(ObjectIdInvalidationMapTest, Empty) {
- EXPECT_TRUE(empty.Empty());
- EXPECT_FALSE(one_invalidation.Empty());
- EXPECT_FALSE(invalidate_all.Empty());
-}
-
-TEST_F(ObjectIdInvalidationMapTest, Equality) {
- ObjectIdInvalidationMap empty2;
- EXPECT_TRUE(empty == empty2);
-
- ObjectIdInvalidationMap one_invalidation2;
- one_invalidation2.Insert(kInv1);
- EXPECT_TRUE(one_invalidation == one_invalidation2);
-
- EXPECT_FALSE(empty == invalidate_all);
-}
-
-TEST_F(ObjectIdInvalidationMapTest, GetObjectIds) {
- EXPECT_EQ(ObjectIdSet(), empty.GetObjectIds());
- EXPECT_EQ(set1, one_invalidation.GetObjectIds());
- EXPECT_EQ(all_set, invalidate_all.GetObjectIds());
-}
-
-TEST_F(ObjectIdInvalidationMapTest, GetSubsetWithObjectIds) {
- EXPECT_TRUE(empty.GetSubsetWithObjectIds(set1).Empty());
-
- EXPECT_TRUE(one_invalidation.GetSubsetWithObjectIds(set1) ==
- one_invalidation);
- EXPECT_TRUE(one_invalidation.GetSubsetWithObjectIds(all_set) ==
- one_invalidation);
- EXPECT_TRUE(one_invalidation.GetSubsetWithObjectIds(set2).Empty());
-
- EXPECT_TRUE(invalidate_all.GetSubsetWithObjectIds(ObjectIdSet()).Empty());
-}
-
-TEST_F(ObjectIdInvalidationMapTest, SerializeEmpty) {
- scoped_ptr<base::ListValue> value = empty.ToValue();
- ASSERT_TRUE(value.get());
- ObjectIdInvalidationMap deserialized;
- deserialized.ResetFromValue(*value.get());
- EXPECT_TRUE(empty == deserialized);
-}
-
-TEST_F(ObjectIdInvalidationMapTest, SerializeOneInvalidation) {
- scoped_ptr<base::ListValue> value = one_invalidation.ToValue();
- ASSERT_TRUE(value.get());
- ObjectIdInvalidationMap deserialized;
- deserialized.ResetFromValue(*value.get());
- EXPECT_TRUE(one_invalidation == deserialized);
-}
-
-TEST_F(ObjectIdInvalidationMapTest, SerializeInvalidateAll) {
- scoped_ptr<base::ListValue> value = invalidate_all.ToValue();
- ASSERT_TRUE(value.get());
- ObjectIdInvalidationMap deserialized;
- deserialized.ResetFromValue(*value.get());
- EXPECT_TRUE(invalidate_all == deserialized);
-}
-
-} // namespace
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/p2p_invalidator.cc b/chromium/sync/notifier/p2p_invalidator.cc
deleted file mode 100644
index cd82e6145e8..00000000000
--- a/chromium/sync/notifier/p2p_invalidator.cc
+++ /dev/null
@@ -1,291 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/p2p_invalidator.h"
-
-#include <algorithm>
-#include <iterator>
-
-#include "base/json/json_reader.h"
-#include "base/json/json_writer.h"
-#include "base/logging.h"
-#include "base/values.h"
-#include "jingle/notifier/listener/push_client.h"
-#include "sync/notifier/invalidation_handler.h"
-#include "sync/notifier/invalidation_util.h"
-#include "sync/notifier/object_id_invalidation_map.h"
-
-namespace syncer {
-
-const char kSyncP2PNotificationChannel[] = "http://www.google.com/chrome/sync";
-
-namespace {
-
-const char kNotifySelf[] = "notifySelf";
-const char kNotifyOthers[] = "notifyOthers";
-const char kNotifyAll[] = "notifyAll";
-
-const char kSenderIdKey[] = "senderId";
-const char kNotificationTypeKey[] = "notificationType";
-const char kInvalidationsKey[] = "invalidations";
-
-} // namespace
-
-std::string P2PNotificationTargetToString(P2PNotificationTarget target) {
- switch (target) {
- case NOTIFY_SELF:
- return kNotifySelf;
- case NOTIFY_OTHERS:
- return kNotifyOthers;
- case NOTIFY_ALL:
- return kNotifyAll;
- default:
- NOTREACHED();
- return std::string();
- }
-}
-
-P2PNotificationTarget P2PNotificationTargetFromString(
- const std::string& target_str) {
- if (target_str == kNotifySelf) {
- return NOTIFY_SELF;
- }
- if (target_str == kNotifyOthers) {
- return NOTIFY_OTHERS;
- }
- if (target_str == kNotifyAll) {
- return NOTIFY_ALL;
- }
- LOG(WARNING) << "Could not parse " << target_str;
- return NOTIFY_SELF;
-}
-
-P2PNotificationData::P2PNotificationData()
- : target_(NOTIFY_SELF) {}
-
-P2PNotificationData::P2PNotificationData(
- const std::string& sender_id,
- P2PNotificationTarget target,
- const ObjectIdInvalidationMap& invalidation_map)
- : sender_id_(sender_id),
- target_(target),
- invalidation_map_(invalidation_map) {}
-
-P2PNotificationData::~P2PNotificationData() {}
-
-bool P2PNotificationData::IsTargeted(const std::string& id) const {
- switch (target_) {
- case NOTIFY_SELF:
- return sender_id_ == id;
- case NOTIFY_OTHERS:
- return sender_id_ != id;
- case NOTIFY_ALL:
- return true;
- default:
- NOTREACHED();
- return false;
- }
-}
-
-const ObjectIdInvalidationMap&
-P2PNotificationData::GetIdInvalidationMap() const {
- return invalidation_map_;
-}
-
-bool P2PNotificationData::Equals(const P2PNotificationData& other) const {
- return
- (sender_id_ == other.sender_id_) &&
- (target_ == other.target_) &&
- (invalidation_map_ == other.invalidation_map_);
-}
-
-std::string P2PNotificationData::ToString() const {
- scoped_ptr<base::DictionaryValue> dict(new base::DictionaryValue());
- dict->SetString(kSenderIdKey, sender_id_);
- dict->SetString(kNotificationTypeKey,
- P2PNotificationTargetToString(target_));
- dict->Set(kInvalidationsKey, invalidation_map_.ToValue().release());
- std::string json;
- base::JSONWriter::Write(dict.get(), &json);
- return json;
-}
-
-bool P2PNotificationData::ResetFromString(const std::string& str) {
- scoped_ptr<base::Value> data_value(base::JSONReader::Read(str));
- const base::DictionaryValue* data_dict = NULL;
- if (!data_value.get() || !data_value->GetAsDictionary(&data_dict)) {
- LOG(WARNING) << "Could not parse " << str << " as a dictionary";
- return false;
- }
- if (!data_dict->GetString(kSenderIdKey, &sender_id_)) {
- LOG(WARNING) << "Could not find string value for " << kSenderIdKey;
- }
- std::string target_str;
- if (!data_dict->GetString(kNotificationTypeKey, &target_str)) {
- LOG(WARNING) << "Could not find string value for "
- << kNotificationTypeKey;
- }
- target_ = P2PNotificationTargetFromString(target_str);
- const base::ListValue* invalidation_map_list = NULL;
- if (!data_dict->GetList(kInvalidationsKey, &invalidation_map_list) ||
- !invalidation_map_.ResetFromValue(*invalidation_map_list)) {
- LOG(WARNING) << "Could not parse " << kInvalidationsKey;
- }
- return true;
-}
-
-P2PInvalidator::P2PInvalidator(scoped_ptr<notifier::PushClient> push_client,
- const std::string& invalidator_client_id,
- P2PNotificationTarget send_notification_target)
- : push_client_(push_client.Pass()),
- invalidator_client_id_(invalidator_client_id),
- logged_in_(false),
- notifications_enabled_(false),
- send_notification_target_(send_notification_target) {
- DCHECK(send_notification_target_ == NOTIFY_OTHERS ||
- send_notification_target_ == NOTIFY_ALL);
- push_client_->AddObserver(this);
-}
-
-P2PInvalidator::~P2PInvalidator() {
- DCHECK(thread_checker_.CalledOnValidThread());
- push_client_->RemoveObserver(this);
-}
-
-void P2PInvalidator::RegisterHandler(InvalidationHandler* handler) {
- DCHECK(thread_checker_.CalledOnValidThread());
- registrar_.RegisterHandler(handler);
-}
-
-void P2PInvalidator::UpdateRegisteredIds(InvalidationHandler* handler,
- const ObjectIdSet& ids) {
- DCHECK(thread_checker_.CalledOnValidThread());
- ObjectIdSet new_ids;
- const ObjectIdSet& old_ids = registrar_.GetRegisteredIds(handler);
- std::set_difference(ids.begin(), ids.end(),
- old_ids.begin(), old_ids.end(),
- std::inserter(new_ids, new_ids.end()),
- ObjectIdLessThan());
- registrar_.UpdateRegisteredIds(handler, ids);
- const P2PNotificationData notification_data(
- invalidator_client_id_,
- send_notification_target_,
- ObjectIdInvalidationMap::InvalidateAll(ids));
- SendNotificationData(notification_data);
-}
-
-void P2PInvalidator::UnregisterHandler(InvalidationHandler* handler) {
- DCHECK(thread_checker_.CalledOnValidThread());
- registrar_.UnregisterHandler(handler);
-}
-
-InvalidatorState P2PInvalidator::GetInvalidatorState() const {
- DCHECK(thread_checker_.CalledOnValidThread());
- return registrar_.GetInvalidatorState();
-}
-
-void P2PInvalidator::UpdateCredentials(
- const std::string& email, const std::string& token) {
- DCHECK(thread_checker_.CalledOnValidThread());
- notifier::Subscription subscription;
- subscription.channel = kSyncP2PNotificationChannel;
- // There may be some subtle issues around case sensitivity of the
- // from field, but it doesn't matter too much since this is only
- // used in p2p mode (which is only used in testing).
- subscription.from = email;
- push_client_->UpdateSubscriptions(
- notifier::SubscriptionList(1, subscription));
- // If already logged in, the new credentials will take effect on the
- // next reconnection.
- push_client_->UpdateCredentials(email, token);
- logged_in_ = true;
-}
-
-void P2PInvalidator::SendInvalidation(const ObjectIdSet& ids) {
- DCHECK(thread_checker_.CalledOnValidThread());
- ObjectIdInvalidationMap invalidation_map =
- ObjectIdInvalidationMap::InvalidateAll(ids);
- const P2PNotificationData notification_data(
- invalidator_client_id_, send_notification_target_, invalidation_map);
- SendNotificationData(notification_data);
-}
-
-void P2PInvalidator::OnNotificationsEnabled() {
- DCHECK(thread_checker_.CalledOnValidThread());
- bool just_turned_on = (notifications_enabled_ == false);
- notifications_enabled_ = true;
- registrar_.UpdateInvalidatorState(INVALIDATIONS_ENABLED);
- if (just_turned_on) {
- const P2PNotificationData notification_data(
- invalidator_client_id_,
- NOTIFY_SELF,
- ObjectIdInvalidationMap::InvalidateAll(
- registrar_.GetAllRegisteredIds()));
- SendNotificationData(notification_data);
- }
-}
-
-void P2PInvalidator::OnNotificationsDisabled(
- notifier::NotificationsDisabledReason reason) {
- DCHECK(thread_checker_.CalledOnValidThread());
- registrar_.UpdateInvalidatorState(FromNotifierReason(reason));
-}
-
-void P2PInvalidator::OnIncomingNotification(
- const notifier::Notification& notification) {
- DCHECK(thread_checker_.CalledOnValidThread());
- DVLOG(1) << "Received notification " << notification.ToString();
- if (!logged_in_) {
- DVLOG(1) << "Not logged in yet -- not emitting notification";
- return;
- }
- if (!notifications_enabled_) {
- DVLOG(1) << "Notifications not on -- not emitting notification";
- return;
- }
- if (notification.channel != kSyncP2PNotificationChannel) {
- LOG(WARNING) << "Notification from unexpected source "
- << notification.channel;
- }
- P2PNotificationData notification_data;
- if (!notification_data.ResetFromString(notification.data)) {
- LOG(WARNING) << "Could not parse notification data from "
- << notification.data;
- notification_data = P2PNotificationData(
- invalidator_client_id_,
- NOTIFY_ALL,
- ObjectIdInvalidationMap::InvalidateAll(
- registrar_.GetAllRegisteredIds()));
- }
- if (!notification_data.IsTargeted(invalidator_client_id_)) {
- DVLOG(1) << "Not a target of the notification -- "
- << "not emitting notification";
- return;
- }
- registrar_.DispatchInvalidationsToHandlers(
- notification_data.GetIdInvalidationMap());
-}
-
-void P2PInvalidator::SendNotificationDataForTest(
- const P2PNotificationData& notification_data) {
- DCHECK(thread_checker_.CalledOnValidThread());
- SendNotificationData(notification_data);
-}
-
-void P2PInvalidator::SendNotificationData(
- const P2PNotificationData& notification_data) {
- DCHECK(thread_checker_.CalledOnValidThread());
- if (notification_data.GetIdInvalidationMap().Empty()) {
- DVLOG(1) << "Not sending XMPP notification with empty state map: "
- << notification_data.ToString();
- return;
- }
- notifier::Notification notification;
- notification.channel = kSyncP2PNotificationChannel;
- notification.data = notification_data.ToString();
- DVLOG(1) << "Sending XMPP notification: " << notification.ToString();
- push_client_->SendNotification(notification);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/p2p_invalidator.h b/chromium/sync/notifier/p2p_invalidator.h
deleted file mode 100644
index e0bd5bc2316..00000000000
--- a/chromium/sync/notifier/p2p_invalidator.h
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// An invalidator that uses p2p invalidations based on XMPP push
-// notifications. Used only for sync integration tests.
-
-#ifndef SYNC_NOTIFIER_P2P_INVALIDATOR_H_
-#define SYNC_NOTIFIER_P2P_INVALIDATOR_H_
-
-#include <string>
-
-#include "base/compiler_specific.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "base/observer_list.h"
-#include "base/threading/thread_checker.h"
-#include "jingle/notifier/base/notifier_options.h"
-#include "jingle/notifier/listener/push_client.h"
-#include "jingle/notifier/listener/push_client_observer.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/notifier/invalidator.h"
-#include "sync/notifier/invalidator_registrar.h"
-#include "sync/notifier/invalidator_state.h"
-#include "sync/notifier/object_id_invalidation_map.h"
-
-namespace notifier {
-class PushClient;
-} // namespace notifier
-
-namespace syncer {
-
-// The channel to use for sync notifications.
-SYNC_EXPORT extern const char kSyncP2PNotificationChannel[];
-
-// The intended recipient(s) of a P2P notification.
-enum P2PNotificationTarget {
- NOTIFY_SELF,
- FIRST_NOTIFICATION_TARGET = NOTIFY_SELF,
- NOTIFY_OTHERS,
- NOTIFY_ALL,
- LAST_NOTIFICATION_TARGET = NOTIFY_ALL
-};
-
-SYNC_EXPORT_PRIVATE std::string P2PNotificationTargetToString(
- P2PNotificationTarget target);
-
-// If |target_str| can't be parsed, assumes NOTIFY_SELF.
-SYNC_EXPORT_PRIVATE P2PNotificationTarget P2PNotificationTargetFromString(
- const std::string& target_str);
-
-// Helper notification data class that can be serialized to and
-// deserialized from a string.
-class SYNC_EXPORT_PRIVATE P2PNotificationData {
- public:
- // Initializes with an empty sender ID, target set to NOTIFY_SELF,
- // and empty changed types.
- P2PNotificationData();
- P2PNotificationData(const std::string& sender_id,
- P2PNotificationTarget target,
- const ObjectIdInvalidationMap& invalidation_map);
-
- ~P2PNotificationData();
-
- // Returns true if the given ID is targeted by this notification.
- bool IsTargeted(const std::string& id) const;
-
- const ObjectIdInvalidationMap& GetIdInvalidationMap() const;
-
- bool Equals(const P2PNotificationData& other) const;
-
- std::string ToString() const;
-
- // Returns whether parsing |str| was successful. If parsing was
- // unsuccessful, the state of the notification is undefined.
- bool ResetFromString(const std::string& str);
-
- private:
- // The unique ID of the client that sent the notification.
- std::string sender_id_;
- // The intendent recipient(s) of the notification.
- P2PNotificationTarget target_;
- // The invalidation map for the notification.
- ObjectIdInvalidationMap invalidation_map_;
-};
-
-class SYNC_EXPORT_PRIVATE P2PInvalidator
- : public Invalidator,
- public NON_EXPORTED_BASE(notifier::PushClientObserver) {
- public:
- // The |send_notification_target| parameter was added to allow us to send
- // self-notifications in some cases, but not others. The value should be
- // either NOTIFY_ALL to send notifications to all clients, or NOTIFY_OTHERS
- // to send notifications to all clients except for the one that triggered the
- // notification. See crbug.com/97780.
- P2PInvalidator(scoped_ptr<notifier::PushClient> push_client,
- const std::string& invalidator_client_id,
- P2PNotificationTarget send_notification_target);
-
- virtual ~P2PInvalidator();
-
- // Invalidator implementation.
- virtual void RegisterHandler(InvalidationHandler* handler) OVERRIDE;
- virtual void UpdateRegisteredIds(InvalidationHandler* handler,
- const ObjectIdSet& ids) OVERRIDE;
- virtual void UnregisterHandler(InvalidationHandler* handler) OVERRIDE;
- virtual InvalidatorState GetInvalidatorState() const OVERRIDE;
- virtual void UpdateCredentials(
- const std::string& email, const std::string& token) OVERRIDE;
-
- // PushClientObserver implementation.
- virtual void OnNotificationsEnabled() OVERRIDE;
- virtual void OnNotificationsDisabled(
- notifier::NotificationsDisabledReason reason) OVERRIDE;
- virtual void OnIncomingNotification(
- const notifier::Notification& notification) OVERRIDE;
-
- void SendInvalidation(const ObjectIdSet& ids);
-
- void SendNotificationDataForTest(
- const P2PNotificationData& notification_data);
-
- private:
- void SendNotificationData(const P2PNotificationData& notification_data);
-
- base::ThreadChecker thread_checker_;
-
- InvalidatorRegistrar registrar_;
-
- // The push client.
- scoped_ptr<notifier::PushClient> push_client_;
- // Our unique ID.
- std::string invalidator_client_id_;
- // Whether we have called UpdateCredentials() yet.
- bool logged_in_;
- bool notifications_enabled_;
- // Which set of clients should be sent notifications.
- P2PNotificationTarget send_notification_target_;
-
- DISALLOW_COPY_AND_ASSIGN(P2PInvalidator);
-};
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_P2P_INVALIDATOR_H_
diff --git a/chromium/sync/notifier/p2p_invalidator_unittest.cc b/chromium/sync/notifier/p2p_invalidator_unittest.cc
deleted file mode 100644
index 7898fee4c58..00000000000
--- a/chromium/sync/notifier/p2p_invalidator_unittest.cc
+++ /dev/null
@@ -1,355 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/p2p_invalidator.h"
-
-#include <cstddef>
-
-#include "jingle/notifier/listener/fake_push_client.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/notifier/fake_invalidation_handler.h"
-#include "sync/notifier/invalidator_test_template.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-namespace {
-
-class P2PInvalidatorTestDelegate {
- public:
- P2PInvalidatorTestDelegate() : fake_push_client_(NULL) {}
-
- ~P2PInvalidatorTestDelegate() {
- DestroyInvalidator();
- }
-
- void CreateInvalidator(
- const std::string& invalidator_client_id,
- const std::string& initial_state,
- const base::WeakPtr<InvalidationStateTracker>&
- invalidation_state_tracker) {
- DCHECK(!fake_push_client_);
- DCHECK(!invalidator_.get());
- fake_push_client_ = new notifier::FakePushClient();
- invalidator_.reset(
- new P2PInvalidator(
- scoped_ptr<notifier::PushClient>(fake_push_client_),
- invalidator_client_id,
- NOTIFY_OTHERS));
- }
-
- P2PInvalidator* GetInvalidator() {
- return invalidator_.get();
- }
-
- notifier::FakePushClient* GetPushClient() {
- return fake_push_client_;
- }
-
- void DestroyInvalidator() {
- invalidator_.reset();
- fake_push_client_ = NULL;
- }
-
- void WaitForInvalidator() {
- // Do Nothing.
- }
-
- void TriggerOnInvalidatorStateChange(InvalidatorState state) {
- if (state == INVALIDATIONS_ENABLED) {
- fake_push_client_->EnableNotifications();
- } else {
- fake_push_client_->DisableNotifications(ToNotifierReasonForTest(state));
- }
- }
-
- void TriggerOnIncomingInvalidation(
- const ObjectIdInvalidationMap& invalidation_map) {
- const P2PNotificationData notification_data(
- std::string(), NOTIFY_ALL, invalidation_map);
- notifier::Notification notification;
- notification.channel = kSyncP2PNotificationChannel;
- notification.data = notification_data.ToString();
- fake_push_client_->SimulateIncomingNotification(notification);
- }
-
- private:
- // Owned by |invalidator_|.
- notifier::FakePushClient* fake_push_client_;
- scoped_ptr<P2PInvalidator> invalidator_;
-};
-
-class P2PInvalidatorTest : public testing::Test {
- protected:
- P2PInvalidatorTest()
- : next_sent_notification_to_reflect_(0) {
- delegate_.CreateInvalidator("sender",
- "fake_state",
- base::WeakPtr<InvalidationStateTracker>());
- delegate_.GetInvalidator()->RegisterHandler(&fake_handler_);
- }
-
- virtual ~P2PInvalidatorTest() {
- delegate_.GetInvalidator()->UnregisterHandler(&fake_handler_);
- }
-
- ObjectIdInvalidationMap MakeInvalidationMap(ModelTypeSet types) {
- ObjectIdInvalidationMap invalidations;
- ObjectIdSet ids = ModelTypeSetToObjectIdSet(types);
- return ObjectIdInvalidationMap::InvalidateAll(ids);
- }
-
- // Simulate receiving all the notifications we sent out since last
- // time this was called.
- void ReflectSentNotifications() {
- const std::vector<notifier::Notification>& sent_notifications =
- delegate_.GetPushClient()->sent_notifications();
- for(size_t i = next_sent_notification_to_reflect_;
- i < sent_notifications.size(); ++i) {
- delegate_.GetInvalidator()->OnIncomingNotification(sent_notifications[i]);
- }
- next_sent_notification_to_reflect_ = sent_notifications.size();
- }
-
- FakeInvalidationHandler fake_handler_;
- P2PInvalidatorTestDelegate delegate_;
-
- private:
- size_t next_sent_notification_to_reflect_;
-};
-
-// Make sure the P2PNotificationTarget <-> string conversions work.
-TEST_F(P2PInvalidatorTest, P2PNotificationTarget) {
- for (int i = FIRST_NOTIFICATION_TARGET;
- i <= LAST_NOTIFICATION_TARGET; ++i) {
- P2PNotificationTarget target = static_cast<P2PNotificationTarget>(i);
- const std::string& target_str = P2PNotificationTargetToString(target);
- EXPECT_FALSE(target_str.empty());
- EXPECT_EQ(target, P2PNotificationTargetFromString(target_str));
- }
- EXPECT_EQ(NOTIFY_SELF, P2PNotificationTargetFromString("unknown"));
-}
-
-// Make sure notification targeting works correctly.
-TEST_F(P2PInvalidatorTest, P2PNotificationDataIsTargeted) {
- {
- const P2PNotificationData notification_data(
- "sender", NOTIFY_SELF, ObjectIdInvalidationMap());
- EXPECT_TRUE(notification_data.IsTargeted("sender"));
- EXPECT_FALSE(notification_data.IsTargeted("other1"));
- EXPECT_FALSE(notification_data.IsTargeted("other2"));
- }
- {
- const P2PNotificationData notification_data(
- "sender", NOTIFY_OTHERS, ObjectIdInvalidationMap());
- EXPECT_FALSE(notification_data.IsTargeted("sender"));
- EXPECT_TRUE(notification_data.IsTargeted("other1"));
- EXPECT_TRUE(notification_data.IsTargeted("other2"));
- }
- {
- const P2PNotificationData notification_data(
- "sender", NOTIFY_ALL, ObjectIdInvalidationMap());
- EXPECT_TRUE(notification_data.IsTargeted("sender"));
- EXPECT_TRUE(notification_data.IsTargeted("other1"));
- EXPECT_TRUE(notification_data.IsTargeted("other2"));
- }
-}
-
-// Make sure the P2PNotificationData <-> string conversions work for a
-// default-constructed P2PNotificationData.
-TEST_F(P2PInvalidatorTest, P2PNotificationDataDefault) {
- const P2PNotificationData notification_data;
- EXPECT_TRUE(notification_data.IsTargeted(std::string()));
- EXPECT_FALSE(notification_data.IsTargeted("other1"));
- EXPECT_FALSE(notification_data.IsTargeted("other2"));
- EXPECT_TRUE(notification_data.GetIdInvalidationMap().Empty());
- const std::string& notification_data_str = notification_data.ToString();
- EXPECT_EQ(
- "{\"invalidations\":[],\"notificationType\":\"notifySelf\","
- "\"senderId\":\"\"}", notification_data_str);
-
- P2PNotificationData notification_data_parsed;
- EXPECT_TRUE(notification_data_parsed.ResetFromString(notification_data_str));
- EXPECT_TRUE(notification_data.Equals(notification_data_parsed));
-}
-
-// Make sure the P2PNotificationData <-> string conversions work for a
-// non-default-constructed P2PNotificationData.
-TEST_F(P2PInvalidatorTest, P2PNotificationDataNonDefault) {
- ObjectIdInvalidationMap invalidation_map =
- ObjectIdInvalidationMap::InvalidateAll(
- ModelTypeSetToObjectIdSet(ModelTypeSet(BOOKMARKS, THEMES)));
- const P2PNotificationData notification_data("sender",
- NOTIFY_ALL,
- invalidation_map);
- EXPECT_TRUE(notification_data.IsTargeted("sender"));
- EXPECT_TRUE(notification_data.IsTargeted("other1"));
- EXPECT_TRUE(notification_data.IsTargeted("other2"));
- EXPECT_EQ(invalidation_map, notification_data.GetIdInvalidationMap());
- const std::string& notification_data_str = notification_data.ToString();
- EXPECT_EQ(
- "{\"invalidations\":["
- "{\"isUnknownVersion\":true,"
- "\"objectId\":{\"name\":\"BOOKMARK\",\"source\":1004}},"
- "{\"isUnknownVersion\":true,"
- "\"objectId\":{\"name\":\"THEME\",\"source\":1004}}"
- "],\"notificationType\":\"notifyAll\","
- "\"senderId\":\"sender\"}", notification_data_str);
-
- P2PNotificationData notification_data_parsed;
- EXPECT_TRUE(notification_data_parsed.ResetFromString(notification_data_str));
- EXPECT_TRUE(notification_data.Equals(notification_data_parsed));
-}
-
-// Set up the P2PInvalidator, simulate a successful connection, and send
-// a notification with the default target (NOTIFY_OTHERS). The
-// observer should receive only a notification from the call to
-// UpdateEnabledTypes().
-TEST_F(P2PInvalidatorTest, NotificationsBasic) {
- const ModelTypeSet enabled_types(BOOKMARKS, PREFERENCES);
-
- P2PInvalidator* const invalidator = delegate_.GetInvalidator();
- notifier::FakePushClient* const push_client = delegate_.GetPushClient();
-
- invalidator->UpdateRegisteredIds(&fake_handler_,
- ModelTypeSetToObjectIdSet(enabled_types));
-
- const char kEmail[] = "foo@bar.com";
- const char kToken[] = "token";
- invalidator->UpdateCredentials(kEmail, kToken);
- {
- notifier::Subscription expected_subscription;
- expected_subscription.channel = kSyncP2PNotificationChannel;
- expected_subscription.from = kEmail;
- EXPECT_TRUE(notifier::SubscriptionListsEqual(
- push_client->subscriptions(),
- notifier::SubscriptionList(1, expected_subscription)));
- }
- EXPECT_EQ(kEmail, push_client->email());
- EXPECT_EQ(kToken, push_client->token());
-
- ReflectSentNotifications();
- push_client->EnableNotifications();
- EXPECT_EQ(INVALIDATIONS_ENABLED, fake_handler_.GetInvalidatorState());
-
- ReflectSentNotifications();
- EXPECT_EQ(1, fake_handler_.GetInvalidationCount());
- EXPECT_THAT(
- MakeInvalidationMap(enabled_types),
- Eq(fake_handler_.GetLastInvalidationMap()));
-
- // Sent with target NOTIFY_OTHERS so should not be propagated to
- // |fake_handler_|.
- invalidator->SendInvalidation(
- ModelTypeSetToObjectIdSet(ModelTypeSet(THEMES, APPS)));
-
- ReflectSentNotifications();
- EXPECT_EQ(1, fake_handler_.GetInvalidationCount());
-}
-
-// Set up the P2PInvalidator and send out notifications with various
-// target settings. The notifications received by the observer should
-// be consistent with the target settings.
-TEST_F(P2PInvalidatorTest, SendNotificationData) {
- const ModelTypeSet enabled_types(BOOKMARKS, PREFERENCES, THEMES);
- const ModelTypeSet changed_types(THEMES, APPS);
- const ModelTypeSet expected_types(THEMES);
-
- const ObjectIdInvalidationMap& invalidation_map =
- MakeInvalidationMap(changed_types);
-
- P2PInvalidator* const invalidator = delegate_.GetInvalidator();
- notifier::FakePushClient* const push_client = delegate_.GetPushClient();
-
- invalidator->UpdateRegisteredIds(&fake_handler_,
- ModelTypeSetToObjectIdSet(enabled_types));
-
- invalidator->UpdateCredentials("foo@bar.com", "fake_token");
-
- ReflectSentNotifications();
- push_client->EnableNotifications();
- EXPECT_EQ(INVALIDATIONS_ENABLED, fake_handler_.GetInvalidatorState());
-
- ReflectSentNotifications();
- EXPECT_EQ(1, fake_handler_.GetInvalidationCount());
- EXPECT_EQ(ModelTypeSetToObjectIdSet(enabled_types),
- fake_handler_.GetLastInvalidationMap().GetObjectIds());
-
- // Should be dropped.
- invalidator->SendNotificationDataForTest(P2PNotificationData());
- ReflectSentNotifications();
- EXPECT_EQ(1, fake_handler_.GetInvalidationCount());
-
- const ObjectIdSet& expected_ids = ModelTypeSetToObjectIdSet(expected_types);
-
- // Should be propagated.
- invalidator->SendNotificationDataForTest(
- P2PNotificationData("sender", NOTIFY_SELF, invalidation_map));
- ReflectSentNotifications();
- EXPECT_EQ(2, fake_handler_.GetInvalidationCount());
- EXPECT_EQ(expected_ids,
- fake_handler_.GetLastInvalidationMap().GetObjectIds());
-
- // Should be dropped.
- invalidator->SendNotificationDataForTest(
- P2PNotificationData("sender2", NOTIFY_SELF, invalidation_map));
- ReflectSentNotifications();
- EXPECT_EQ(2, fake_handler_.GetInvalidationCount());
-
- // Should be dropped.
- invalidator->SendNotificationDataForTest(
- P2PNotificationData("sender", NOTIFY_SELF, ObjectIdInvalidationMap()));
- ReflectSentNotifications();
- EXPECT_EQ(2, fake_handler_.GetInvalidationCount());
-
- // Should be dropped.
- invalidator->SendNotificationDataForTest(
- P2PNotificationData("sender", NOTIFY_OTHERS, invalidation_map));
- ReflectSentNotifications();
- EXPECT_EQ(2, fake_handler_.GetInvalidationCount());
-
- // Should be propagated.
- invalidator->SendNotificationDataForTest(
- P2PNotificationData("sender2", NOTIFY_OTHERS, invalidation_map));
- ReflectSentNotifications();
- EXPECT_EQ(3, fake_handler_.GetInvalidationCount());
- EXPECT_EQ(expected_ids,
- fake_handler_.GetLastInvalidationMap().GetObjectIds());
-
- // Should be dropped.
- invalidator->SendNotificationDataForTest(
- P2PNotificationData("sender2", NOTIFY_OTHERS, ObjectIdInvalidationMap()));
- ReflectSentNotifications();
- EXPECT_EQ(3, fake_handler_.GetInvalidationCount());
-
- // Should be propagated.
- invalidator->SendNotificationDataForTest(
- P2PNotificationData("sender", NOTIFY_ALL, invalidation_map));
- ReflectSentNotifications();
- EXPECT_EQ(4, fake_handler_.GetInvalidationCount());
- EXPECT_EQ(expected_ids,
- fake_handler_.GetLastInvalidationMap().GetObjectIds());
-
- // Should be propagated.
- invalidator->SendNotificationDataForTest(
- P2PNotificationData("sender2", NOTIFY_ALL, invalidation_map));
- ReflectSentNotifications();
- EXPECT_EQ(5, fake_handler_.GetInvalidationCount());
- EXPECT_EQ(expected_ids,
- fake_handler_.GetLastInvalidationMap().GetObjectIds());
-
- // Should be dropped.
- invalidator->SendNotificationDataForTest(
- P2PNotificationData("sender2", NOTIFY_ALL, ObjectIdInvalidationMap()));
- ReflectSentNotifications();
- EXPECT_EQ(5, fake_handler_.GetInvalidationCount());
-}
-
-INSTANTIATE_TYPED_TEST_CASE_P(
- P2PInvalidatorTest, InvalidatorTest,
- P2PInvalidatorTestDelegate);
-
-} // namespace
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/push_client_channel.cc b/chromium/sync/notifier/push_client_channel.cc
deleted file mode 100644
index a067a215d64..00000000000
--- a/chromium/sync/notifier/push_client_channel.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/push_client_channel.h"
-
-#include "base/stl_util.h"
-#include "google/cacheinvalidation/client_gateway.pb.h"
-#include "jingle/notifier/listener/push_client.h"
-
-namespace syncer {
-
-namespace {
-
-const char kBotJid[] = "tango@bot.talk.google.com";
-const char kChannelName[] = "tango_raw";
-
-} // namespace
-
-PushClientChannel::PushClientChannel(
- scoped_ptr<notifier::PushClient> push_client)
- : push_client_(push_client.Pass()) {
- push_client_->AddObserver(this);
- notifier::Subscription subscription;
- subscription.channel = kChannelName;
- subscription.from = "";
- notifier::SubscriptionList subscriptions;
- subscriptions.push_back(subscription);
- push_client_->UpdateSubscriptions(subscriptions);
-}
-
-PushClientChannel::~PushClientChannel() {
- push_client_->RemoveObserver(this);
-}
-
-void PushClientChannel::UpdateCredentials(
- const std::string& email, const std::string& token) {
- push_client_->UpdateCredentials(email, token);
-}
-
-void PushClientChannel::SendEncodedMessage(const std::string& encoded_message) {
- notifier::Recipient recipient;
- recipient.to = kBotJid;
- notifier::Notification notification;
- notification.channel = kChannelName;
- notification.recipients.push_back(recipient);
- notification.data = encoded_message;
- push_client_->SendNotification(notification);
-}
-
-void PushClientChannel::OnNotificationsEnabled() {
- NotifyStateChange(INVALIDATIONS_ENABLED);
-}
-
-void PushClientChannel::OnNotificationsDisabled(
- notifier::NotificationsDisabledReason reason) {
- NotifyStateChange(FromNotifierReason(reason));
-}
-
-void PushClientChannel::OnIncomingNotification(
- const notifier::Notification& notification) {
- DeliverIncomingMessage(notification.data);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/push_client_channel.h b/chromium/sync/notifier/push_client_channel.h
deleted file mode 100644
index fa029ab91cd..00000000000
--- a/chromium/sync/notifier/push_client_channel.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_NOTIFIER_PUSH_CLIENT_CHANNEL_H_
-#define SYNC_NOTIFIER_PUSH_CLIENT_CHANNEL_H_
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/memory/scoped_ptr.h"
-#include "jingle/notifier/listener/push_client_observer.h"
-#include "sync/base/sync_export.h"
-#include "sync/notifier/sync_system_resources.h"
-
-namespace notifier {
-class PushClient;
-} // namespace notifier
-
-namespace syncer {
-
-// A PushClientChannel is an implementation of NetworkChannel that
-// routes messages through a PushClient.
-class SYNC_EXPORT_PRIVATE PushClientChannel
- : public SyncNetworkChannel,
- public NON_EXPORTED_BASE(notifier::PushClientObserver) {
- public:
- // |push_client| is guaranteed to be destroyed only when this object
- // is destroyed.
- explicit PushClientChannel(scoped_ptr<notifier::PushClient> push_client);
-
- virtual ~PushClientChannel();
-
- // If not connected, connects with the given credentials. If
- // already connected, the next connection attempt will use the given
- // credentials.
- void UpdateCredentials(const std::string& email, const std::string& token);
-
- // SyncNetworkChannel implementation.
- virtual void SendEncodedMessage(const std::string& encoded_message) OVERRIDE;
-
- // notifier::PushClient::Observer implementation.
- virtual void OnNotificationsEnabled() OVERRIDE;
- virtual void OnNotificationsDisabled(
- notifier::NotificationsDisabledReason reason) OVERRIDE;
- virtual void OnIncomingNotification(
- const notifier::Notification& notification) OVERRIDE;
-
- private:
- scoped_ptr<notifier::PushClient> push_client_;
-
- DISALLOW_COPY_AND_ASSIGN(PushClientChannel);
-};
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_PUSH_CLIENT_CHANNEL_H_
diff --git a/chromium/sync/notifier/push_client_channel_unittest.cc b/chromium/sync/notifier/push_client_channel_unittest.cc
deleted file mode 100644
index d0b75e41776..00000000000
--- a/chromium/sync/notifier/push_client_channel_unittest.cc
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/push_client_channel.h"
-
-#include <cstddef>
-#include <string>
-
-#include "base/compiler_specific.h"
-#include "jingle/notifier/listener/fake_push_client.h"
-#include "jingle/notifier/listener/notification_defines.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace {
-
-class PushClientChannelTest
- : public ::testing::Test,
- public SyncNetworkChannel::Observer {
- protected:
- PushClientChannelTest()
- : fake_push_client_(new notifier::FakePushClient()),
- push_client_channel_(
- scoped_ptr<notifier::PushClient>(fake_push_client_)),
- last_invalidator_state_(DEFAULT_INVALIDATION_ERROR) {
- push_client_channel_.AddObserver(this);
- push_client_channel_.SetMessageReceiver(
- invalidation::NewPermanentCallback(
- this, &PushClientChannelTest::OnIncomingMessage));
- push_client_channel_.SetSystemResources(NULL);
- }
-
- virtual ~PushClientChannelTest() {
- push_client_channel_.RemoveObserver(this);
- }
-
- virtual void OnNetworkChannelStateChanged(
- InvalidatorState invalidator_state) OVERRIDE {
- last_invalidator_state_ = invalidator_state;
- }
-
- void OnIncomingMessage(std::string incoming_message) {
- last_message_ = incoming_message;
- }
-
- notifier::FakePushClient* fake_push_client_;
- PushClientChannel push_client_channel_;
- std::string last_message_;
- InvalidatorState last_invalidator_state_;
-};
-
-const char kMessage[] = "message";
-const char kServiceContext[] = "service context";
-const int64 kSchedulingHash = 100;
-
-// Make sure the channel subscribes to the correct notifications
-// channel on construction.
-TEST_F(PushClientChannelTest, Subscriptions) {
- notifier::Subscription expected_subscription;
- expected_subscription.channel = "tango_raw";
- EXPECT_TRUE(notifier::SubscriptionListsEqual(
- fake_push_client_->subscriptions(),
- notifier::SubscriptionList(1, expected_subscription)));
-}
-
-// Call UpdateCredentials on the channel. It should propagate it to
-// the push client.
-TEST_F(PushClientChannelTest, UpdateCredentials) {
- const char kEmail[] = "foo@bar.com";
- const char kToken[] = "token";
- EXPECT_TRUE(fake_push_client_->email().empty());
- EXPECT_TRUE(fake_push_client_->token().empty());
- push_client_channel_.UpdateCredentials(kEmail, kToken);
- EXPECT_EQ(kEmail, fake_push_client_->email());
- EXPECT_EQ(kToken, fake_push_client_->token());
-}
-
-// Simulate push client state changes on the push client. It should
-// propagate to the channel.
-TEST_F(PushClientChannelTest, OnPushClientStateChange) {
- EXPECT_EQ(DEFAULT_INVALIDATION_ERROR, last_invalidator_state_);
- fake_push_client_->EnableNotifications();
- EXPECT_EQ(INVALIDATIONS_ENABLED, last_invalidator_state_);
- fake_push_client_->DisableNotifications(
- notifier::TRANSIENT_NOTIFICATION_ERROR);
- EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR, last_invalidator_state_);
- fake_push_client_->DisableNotifications(
- notifier::NOTIFICATION_CREDENTIALS_REJECTED);
- EXPECT_EQ(INVALIDATION_CREDENTIALS_REJECTED, last_invalidator_state_);
-}
-
-// Call SendMessage on the channel. It should propagate it to the
-// push client.
-TEST_F(PushClientChannelTest, SendMessage) {
- EXPECT_TRUE(fake_push_client_->sent_notifications().empty());
- push_client_channel_.SendMessage(kMessage);
- ASSERT_EQ(1u, fake_push_client_->sent_notifications().size());
-}
-
-// Simulate an incoming notification. It should be decoded properly
-// by the channel.
-TEST_F(PushClientChannelTest, OnIncomingNotification) {
- notifier::Notification notification;
- notification.data =
- PushClientChannel::EncodeMessageForTest(
- kMessage, kServiceContext, kSchedulingHash);
-
- fake_push_client_->SimulateIncomingNotification(notification);
- EXPECT_EQ(kServiceContext,
- push_client_channel_.GetServiceContextForTest());
- EXPECT_EQ(kSchedulingHash,
- push_client_channel_.GetSchedulingHashForTest());
- EXPECT_EQ(kMessage, last_message_);
-}
-
-} // namespace
-} // namespace syncer
diff --git a/chromium/sync/notifier/registration_manager.cc b/chromium/sync/notifier/registration_manager.cc
deleted file mode 100644
index 4c59e012f8b..00000000000
--- a/chromium/sync/notifier/registration_manager.cc
+++ /dev/null
@@ -1,305 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/registration_manager.h"
-
-#include <algorithm>
-#include <cstddef>
-#include <iterator>
-#include <string>
-#include <utility>
-
-#include "base/rand_util.h"
-#include "base/stl_util.h"
-#include "google/cacheinvalidation/include/invalidation-client.h"
-#include "google/cacheinvalidation/include/types.h"
-#include "sync/notifier/invalidation_util.h"
-
-namespace syncer {
-
-RegistrationManager::PendingRegistrationInfo::PendingRegistrationInfo() {}
-
-RegistrationManager::RegistrationStatus::RegistrationStatus(
- const invalidation::ObjectId& id, RegistrationManager* manager)
- : id(id),
- registration_manager(manager),
- enabled(true),
- state(invalidation::InvalidationListener::UNREGISTERED) {
- DCHECK(registration_manager);
-}
-
-RegistrationManager::RegistrationStatus::~RegistrationStatus() {}
-
-void RegistrationManager::RegistrationStatus::DoRegister() {
- CHECK(enabled);
- // We might be called explicitly, so stop the timer manually and
- // reset the delay.
- registration_timer.Stop();
- delay = base::TimeDelta();
- registration_manager->DoRegisterId(id);
- DCHECK(!last_registration_request.is_null());
-}
-
-void RegistrationManager::RegistrationStatus::Disable() {
- enabled = false;
- state = invalidation::InvalidationListener::UNREGISTERED;
- registration_timer.Stop();
- delay = base::TimeDelta();
-}
-
-const int RegistrationManager::kInitialRegistrationDelaySeconds = 5;
-const int RegistrationManager::kRegistrationDelayExponent = 2;
-const double RegistrationManager::kRegistrationDelayMaxJitter = 0.5;
-const int RegistrationManager::kMinRegistrationDelaySeconds = 1;
-// 1 hour.
-const int RegistrationManager::kMaxRegistrationDelaySeconds = 60 * 60;
-
-RegistrationManager::RegistrationManager(
- invalidation::InvalidationClient* invalidation_client)
- : invalidation_client_(invalidation_client) {
- DCHECK(invalidation_client_);
-}
-
-RegistrationManager::~RegistrationManager() {
- DCHECK(CalledOnValidThread());
- STLDeleteValues(&registration_statuses_);
-}
-
-ObjectIdSet RegistrationManager::UpdateRegisteredIds(const ObjectIdSet& ids) {
- DCHECK(CalledOnValidThread());
-
- const ObjectIdSet& old_ids = GetRegisteredIds();
- const ObjectIdSet& to_register = ids;
- ObjectIdSet to_unregister;
- std::set_difference(old_ids.begin(), old_ids.end(),
- ids.begin(), ids.end(),
- std::inserter(to_unregister, to_unregister.begin()),
- ObjectIdLessThan());
-
- for (ObjectIdSet::const_iterator it = to_unregister.begin();
- it != to_unregister.end(); ++it) {
- UnregisterId(*it);
- }
-
- for (ObjectIdSet::const_iterator it = to_register.begin();
- it != to_register.end(); ++it) {
- if (!ContainsKey(registration_statuses_, *it)) {
- registration_statuses_.insert(
- std::make_pair(*it, new RegistrationStatus(*it, this)));
- }
- if (!IsIdRegistered(*it)) {
- TryRegisterId(*it, false /* is-retry */);
- }
- }
-
- return to_unregister;
-}
-
-void RegistrationManager::MarkRegistrationLost(
- const invalidation::ObjectId& id) {
- DCHECK(CalledOnValidThread());
- RegistrationStatusMap::const_iterator it = registration_statuses_.find(id);
- if (it == registration_statuses_.end()) {
- DLOG(WARNING) << "Attempt to mark non-existent registration for "
- << ObjectIdToString(id) << " as lost";
- return;
- }
- if (!it->second->enabled) {
- return;
- }
- it->second->state = invalidation::InvalidationListener::UNREGISTERED;
- bool is_retry = !it->second->last_registration_request.is_null();
- TryRegisterId(id, is_retry);
-}
-
-void RegistrationManager::MarkAllRegistrationsLost() {
- DCHECK(CalledOnValidThread());
- for (RegistrationStatusMap::const_iterator it =
- registration_statuses_.begin();
- it != registration_statuses_.end(); ++it) {
- if (IsIdRegistered(it->first)) {
- MarkRegistrationLost(it->first);
- }
- }
-}
-
-void RegistrationManager::DisableId(const invalidation::ObjectId& id) {
- DCHECK(CalledOnValidThread());
- RegistrationStatusMap::const_iterator it = registration_statuses_.find(id);
- if (it == registration_statuses_.end()) {
- DLOG(WARNING) << "Attempt to disable non-existent registration for "
- << ObjectIdToString(id);
- return;
- }
- it->second->Disable();
-}
-
-// static
-double RegistrationManager::CalculateBackoff(
- double retry_interval,
- double initial_retry_interval,
- double min_retry_interval,
- double max_retry_interval,
- double backoff_exponent,
- double jitter,
- double max_jitter) {
- // scaled_jitter lies in [-max_jitter, max_jitter].
- double scaled_jitter = jitter * max_jitter;
- double new_retry_interval =
- (retry_interval == 0.0) ?
- (initial_retry_interval * (1.0 + scaled_jitter)) :
- (retry_interval * (backoff_exponent + scaled_jitter));
- return std::max(min_retry_interval,
- std::min(max_retry_interval, new_retry_interval));
-}
-
-ObjectIdSet RegistrationManager::GetRegisteredIdsForTest() const {
- return GetRegisteredIds();
-}
-
-RegistrationManager::PendingRegistrationMap
- RegistrationManager::GetPendingRegistrationsForTest() const {
- DCHECK(CalledOnValidThread());
- PendingRegistrationMap pending_registrations;
- for (RegistrationStatusMap::const_iterator it =
- registration_statuses_.begin();
- it != registration_statuses_.end(); ++it) {
- const invalidation::ObjectId& id = it->first;
- RegistrationStatus* status = it->second;
- if (status->registration_timer.IsRunning()) {
- pending_registrations[id].last_registration_request =
- status->last_registration_request;
- pending_registrations[id].registration_attempt =
- status->last_registration_attempt;
- pending_registrations[id].delay = status->delay;
- pending_registrations[id].actual_delay =
- status->registration_timer.GetCurrentDelay();
- }
- }
- return pending_registrations;
-}
-
-void RegistrationManager::FirePendingRegistrationsForTest() {
- DCHECK(CalledOnValidThread());
- for (RegistrationStatusMap::const_iterator it =
- registration_statuses_.begin();
- it != registration_statuses_.end(); ++it) {
- if (it->second->registration_timer.IsRunning()) {
- it->second->DoRegister();
- }
- }
-}
-
-double RegistrationManager::GetJitter() {
- // |jitter| lies in [-1.0, 1.0), which is low-biased, but only
- // barely.
- //
- // TODO(akalin): Fix the bias.
- return 2.0 * base::RandDouble() - 1.0;
-}
-
-void RegistrationManager::TryRegisterId(const invalidation::ObjectId& id,
- bool is_retry) {
- DCHECK(CalledOnValidThread());
- RegistrationStatusMap::const_iterator it = registration_statuses_.find(id);
- if (it == registration_statuses_.end()) {
- DLOG(FATAL) << "TryRegisterId called on " << ObjectIdToString(id)
- << " which is not in the registration map";
- return;
- }
- RegistrationStatus* status = it->second;
- if (!status->enabled) {
- // Disabled, so do nothing.
- return;
- }
- status->last_registration_attempt = base::Time::Now();
- if (is_retry) {
- // If we're a retry, we must have tried at least once before.
- DCHECK(!status->last_registration_request.is_null());
- // delay = max(0, (now - last request) + next_delay)
- status->delay =
- (status->last_registration_request -
- status->last_registration_attempt) +
- status->next_delay;
- base::TimeDelta delay =
- (status->delay <= base::TimeDelta()) ?
- base::TimeDelta() : status->delay;
- DVLOG(2) << "Registering "
- << ObjectIdToString(id) << " in "
- << delay.InMilliseconds() << " ms";
- status->registration_timer.Stop();
- status->registration_timer.Start(FROM_HERE,
- delay, status, &RegistrationManager::RegistrationStatus::DoRegister);
- double next_delay_seconds =
- CalculateBackoff(static_cast<double>(status->next_delay.InSeconds()),
- kInitialRegistrationDelaySeconds,
- kMinRegistrationDelaySeconds,
- kMaxRegistrationDelaySeconds,
- kRegistrationDelayExponent,
- GetJitter(),
- kRegistrationDelayMaxJitter);
- status->next_delay =
- base::TimeDelta::FromSeconds(static_cast<int64>(next_delay_seconds));
- DVLOG(2) << "New next delay for "
- << ObjectIdToString(id) << " is "
- << status->next_delay.InSeconds() << " seconds";
- } else {
- DVLOG(2) << "Not a retry -- registering "
- << ObjectIdToString(id) << " immediately";
- status->delay = base::TimeDelta();
- status->next_delay = base::TimeDelta();
- status->DoRegister();
- }
-}
-
-void RegistrationManager::DoRegisterId(const invalidation::ObjectId& id) {
- DCHECK(CalledOnValidThread());
- invalidation_client_->Register(id);
- RegistrationStatusMap::const_iterator it = registration_statuses_.find(id);
- if (it == registration_statuses_.end()) {
- DLOG(FATAL) << "DoRegisterId called on " << ObjectIdToString(id)
- << " which is not in the registration map";
- return;
- }
- it->second->state = invalidation::InvalidationListener::REGISTERED;
- it->second->last_registration_request = base::Time::Now();
-}
-
-void RegistrationManager::UnregisterId(const invalidation::ObjectId& id) {
- DCHECK(CalledOnValidThread());
- invalidation_client_->Unregister(id);
- RegistrationStatusMap::iterator it = registration_statuses_.find(id);
- if (it == registration_statuses_.end()) {
- DLOG(FATAL) << "UnregisterId called on " << ObjectIdToString(id)
- << " which is not in the registration map";
- return;
- }
- delete it->second;
- registration_statuses_.erase(it);
-}
-
-
-ObjectIdSet RegistrationManager::GetRegisteredIds() const {
- DCHECK(CalledOnValidThread());
- ObjectIdSet ids;
- for (RegistrationStatusMap::const_iterator it =
- registration_statuses_.begin();
- it != registration_statuses_.end(); ++it) {
- if (IsIdRegistered(it->first)) {
- ids.insert(it->first);
- }
- }
- return ids;
-}
-
-bool RegistrationManager::IsIdRegistered(
- const invalidation::ObjectId& id) const {
- DCHECK(CalledOnValidThread());
- RegistrationStatusMap::const_iterator it =
- registration_statuses_.find(id);
- return it != registration_statuses_.end() &&
- it->second->state == invalidation::InvalidationListener::REGISTERED;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/registration_manager.h b/chromium/sync/notifier/registration_manager.h
deleted file mode 100644
index e190a7fad55..00000000000
--- a/chromium/sync/notifier/registration_manager.h
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// A class that manages the registration of types for server-issued
-// notifications.
-
-#ifndef SYNC_NOTIFIER_REGISTRATION_MANAGER_H_
-#define SYNC_NOTIFIER_REGISTRATION_MANAGER_H_
-
-#include <map>
-
-#include "base/basictypes.h"
-#include "base/threading/non_thread_safe.h"
-#include "base/time/time.h"
-#include "base/timer/timer.h"
-// For invalidation::InvalidationListener::RegistrationState.
-#include "google/cacheinvalidation/include/invalidation-listener.h"
-#include "google/cacheinvalidation/include/types.h"
-#include "sync/base/sync_export.h"
-#include "sync/notifier/invalidation_util.h"
-
-namespace syncer {
-
-using ::invalidation::InvalidationListener;
-
-// Manages the details of registering types for invalidation.
-// Implements exponential backoff for repeated registration attempts
-// to the invalidation client.
-//
-// TODO(akalin): Consolidate exponential backoff code. Other
-// implementations include the syncer thread (both versions) and XMPP
-// retries. The most sophisticated one is URLRequestThrottler; making
-// that generic should work for everyone.
-class SYNC_EXPORT_PRIVATE RegistrationManager : public base::NonThreadSafe {
- public:
- // Constants for exponential backoff (used by tests).
- static const int kInitialRegistrationDelaySeconds;
- static const int kRegistrationDelayExponent;
- static const double kRegistrationDelayMaxJitter;
- static const int kMinRegistrationDelaySeconds;
- static const int kMaxRegistrationDelaySeconds;
-
- // Types used by testing functions.
- struct PendingRegistrationInfo {
- PendingRegistrationInfo();
-
- // Last time a registration request was actually sent.
- base::Time last_registration_request;
- // Time the registration was attempted.
- base::Time registration_attempt;
- // The calculated delay of the pending registration (which may be
- // negative).
- base::TimeDelta delay;
- // The delay of the timer, which should be max(delay, 0).
- base::TimeDelta actual_delay;
- };
- // Map of object IDs with pending registrations to info about the
- // pending registration.
- typedef std::map<invalidation::ObjectId,
- PendingRegistrationInfo,
- ObjectIdLessThan>
- PendingRegistrationMap;
-
- // Does not take ownership of |invalidation_client_|.
- explicit RegistrationManager(
- invalidation::InvalidationClient* invalidation_client);
-
- virtual ~RegistrationManager();
-
- // Registers all object IDs included in the given set (that are not
- // already disabled) and unregisters all other object IDs. The return value is
- // the set of IDs that was unregistered.
- ObjectIdSet UpdateRegisteredIds(const ObjectIdSet& ids);
-
- // Marks the registration for the |id| lost and re-registers
- // it (unless it's disabled).
- void MarkRegistrationLost(const invalidation::ObjectId& id);
-
- // Marks registrations lost for all enabled object IDs and re-registers them.
- void MarkAllRegistrationsLost();
-
- // Marks the registration for the |id| permanently lost and blocks any future
- // registration attempts.
- void DisableId(const invalidation::ObjectId& id);
-
- // Calculate exponential backoff. |jitter| must be Uniform[-1.0, 1.0].
- static double CalculateBackoff(double retry_interval,
- double initial_retry_interval,
- double min_retry_interval,
- double max_retry_interval,
- double backoff_exponent,
- double jitter,
- double max_jitter);
-
- // The functions below should only be used in tests.
-
- // Gets all currently registered ids.
- ObjectIdSet GetRegisteredIdsForTest() const;
-
- // Gets all pending registrations and their next min delays.
- PendingRegistrationMap GetPendingRegistrationsForTest() const;
-
- // Run pending registrations immediately.
- void FirePendingRegistrationsForTest();
-
- protected:
- // Overrideable for testing purposes.
- virtual double GetJitter();
-
- private:
- struct RegistrationStatus {
- RegistrationStatus(const invalidation::ObjectId& id,
- RegistrationManager* manager);
- ~RegistrationStatus();
-
- // Calls registration_manager->DoRegister(model_type). (needed by
- // |registration_timer|). Should only be called if |enabled| is
- // true.
- void DoRegister();
-
- // Sets |enabled| to false and resets other variables.
- void Disable();
-
- // The object for which this is the status.
- const invalidation::ObjectId id;
- // The parent registration manager.
- RegistrationManager* const registration_manager;
-
- // Whether this data type should be registered. Set to false if
- // we get a non-transient registration failure.
- bool enabled;
- // The current registration state.
- InvalidationListener::RegistrationState state;
- // When we last sent a registration request.
- base::Time last_registration_request;
- // When we last tried to register.
- base::Time last_registration_attempt;
- // The calculated delay of any pending registration (which may be
- // negative).
- base::TimeDelta delay;
- // The minimum time to wait until any next registration attempt.
- // Increased after each consecutive failure.
- base::TimeDelta next_delay;
- // The actual timer for registration.
- base::OneShotTimer<RegistrationStatus> registration_timer;
-
- DISALLOW_COPY_AND_ASSIGN(RegistrationStatus);
- };
- typedef std::map<invalidation::ObjectId,
- RegistrationStatus*,
- ObjectIdLessThan>
- RegistrationStatusMap;
-
- // Does nothing if the given id is disabled. Otherwise, if
- // |is_retry| is not set, registers the given type immediately and
- // resets all backoff parameters. If |is_retry| is set, registers
- // the given type at some point in the future and increases the
- // delay until the next retry.
- void TryRegisterId(const invalidation::ObjectId& id,
- bool is_retry);
-
- // Registers the given id, which must be valid, immediately.
- // Updates |last_registration| in the appropriate
- // RegistrationStatus. Should only be called by
- // RegistrationStatus::DoRegister().
- void DoRegisterId(const invalidation::ObjectId& id);
-
- // Unregisters the given object ID.
- void UnregisterId(const invalidation::ObjectId& id);
-
- // Gets all currently registered ids.
- ObjectIdSet GetRegisteredIds() const;
-
- // Returns true iff the given object ID is registered.
- bool IsIdRegistered(const invalidation::ObjectId& id) const;
-
- RegistrationStatusMap registration_statuses_;
- // Weak pointer.
- invalidation::InvalidationClient* invalidation_client_;
-
- DISALLOW_COPY_AND_ASSIGN(RegistrationManager);
-};
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_REGISTRATION_MANAGER_H_
diff --git a/chromium/sync/notifier/registration_manager_unittest.cc b/chromium/sync/notifier/registration_manager_unittest.cc
deleted file mode 100644
index c8328fcf993..00000000000
--- a/chromium/sync/notifier/registration_manager_unittest.cc
+++ /dev/null
@@ -1,433 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/registration_manager.h"
-
-#include <algorithm>
-#include <cmath>
-#include <cstddef>
-#include <deque>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/message_loop/message_loop.h"
-#include "base/stl_util.h"
-#include "google/cacheinvalidation/include/invalidation-client.h"
-#include "sync/notifier/invalidation_util.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace {
-
-// Fake registration manager that lets you override jitter.
-class FakeRegistrationManager : public RegistrationManager {
- public:
- explicit FakeRegistrationManager(
- invalidation::InvalidationClient* invalidation_client)
- : RegistrationManager(invalidation_client),
- jitter_(0.0) {}
-
- virtual ~FakeRegistrationManager() {}
-
- void SetJitter(double jitter) {
- jitter_ = jitter;
- }
-
- protected:
- virtual double GetJitter() OVERRIDE {
- return jitter_;
- }
-
- private:
- double jitter_;
-
- DISALLOW_COPY_AND_ASSIGN(FakeRegistrationManager);
-};
-
-// Fake invalidation client that just stores the currently-registered
-// object IDs.
-class FakeInvalidationClient : public invalidation::InvalidationClient {
- public:
- FakeInvalidationClient() {}
-
- virtual ~FakeInvalidationClient() {}
-
- void LoseRegistration(const invalidation::ObjectId& oid) {
- EXPECT_TRUE(ContainsKey(registered_ids_, oid));
- registered_ids_.erase(oid);
- }
-
- void LoseAllRegistrations() {
- registered_ids_.clear();
- }
-
- // invalidation::InvalidationClient implementation.
-
- virtual void Start() OVERRIDE {}
- virtual void Stop() OVERRIDE {}
- virtual void Acknowledge(const invalidation::AckHandle& handle) OVERRIDE {}
-
- virtual void Register(const invalidation::ObjectId& oid) OVERRIDE {
- EXPECT_FALSE(ContainsKey(registered_ids_, oid));
- registered_ids_.insert(oid);
- }
-
- virtual void Register(
- const std::vector<invalidation::ObjectId>& oids) OVERRIDE {
- // Unused for now.
- }
-
- virtual void Unregister(const invalidation::ObjectId& oid) OVERRIDE {
- EXPECT_TRUE(ContainsKey(registered_ids_, oid));
- registered_ids_.erase(oid);
- }
-
- virtual void Unregister(
- const std::vector<invalidation::ObjectId>& oids) OVERRIDE {
- // Unused for now.
- }
-
- const ObjectIdSet& GetRegisteredIdsForTest() const {
- return registered_ids_;
- }
-
- private:
- ObjectIdSet registered_ids_;
-
- DISALLOW_COPY_AND_ASSIGN(FakeInvalidationClient);
-};
-
-size_t kObjectIdsCount = 5;
-
-invalidation::ObjectId GetIdForIndex(size_t index) {
- char name[2] = "a";
- name[0] += static_cast<char>(index);
- return invalidation::ObjectId(1 + index, name);
-}
-
-ObjectIdSet GetSequenceOfIdsStartingAt(size_t start, size_t count) {
- ObjectIdSet ids;
- for (size_t i = start; i < start + count; ++i)
- ids.insert(GetIdForIndex(i));
- return ids;
-}
-
-ObjectIdSet GetSequenceOfIds(size_t count) {
- return GetSequenceOfIdsStartingAt(0, count);
-}
-
-void ExpectPendingRegistrations(
- const ObjectIdSet& expected_pending_ids,
- double expected_delay_seconds,
- const RegistrationManager::PendingRegistrationMap& pending_registrations) {
- ObjectIdSet pending_ids;
- for (RegistrationManager::PendingRegistrationMap::const_iterator it =
- pending_registrations.begin(); it != pending_registrations.end();
- ++it) {
- SCOPED_TRACE(ObjectIdToString(it->first));
- pending_ids.insert(it->first);
- base::TimeDelta offset =
- it->second.last_registration_request -
- it->second.registration_attempt;
- base::TimeDelta expected_delay =
- base::TimeDelta::FromSeconds(
- static_cast<int64>(expected_delay_seconds)) + offset;
- // TODO(akalin): Add base::PrintTo() for base::Time and
- // base::TimeDeltas.
- EXPECT_EQ(expected_delay, it->second.delay)
- << expected_delay.InMicroseconds()
- << ", " << it->second.delay.InMicroseconds();
- if (it->second.delay <= base::TimeDelta()) {
- EXPECT_EQ(base::TimeDelta(), it->second.actual_delay);
- } else {
- EXPECT_EQ(it->second.actual_delay, it->second.delay);
- }
- }
- EXPECT_EQ(expected_pending_ids, pending_ids);
-}
-
-class RegistrationManagerTest : public testing::Test {
- protected:
- RegistrationManagerTest()
- : fake_registration_manager_(&fake_invalidation_client_) {}
-
- virtual ~RegistrationManagerTest() {}
-
- void LoseRegistrations(const ObjectIdSet& oids) {
- for (ObjectIdSet::const_iterator it = oids.begin(); it != oids.end();
- ++it) {
- fake_invalidation_client_.LoseRegistration(*it);
- fake_registration_manager_.MarkRegistrationLost(*it);
- }
- }
-
- void DisableIds(const ObjectIdSet& oids) {
- for (ObjectIdSet::const_iterator it = oids.begin(); it != oids.end();
- ++it) {
- fake_invalidation_client_.LoseRegistration(*it);
- fake_registration_manager_.DisableId(*it);
- }
- }
-
- // Used by MarkRegistrationLostBackoff* tests.
- void RunBackoffTest(double jitter) {
- fake_registration_manager_.SetJitter(jitter);
- ObjectIdSet ids = GetSequenceOfIds(kObjectIdsCount);
- fake_registration_manager_.UpdateRegisteredIds(ids);
-
- // Lose some ids.
- ObjectIdSet lost_ids = GetSequenceOfIds(2);
- LoseRegistrations(lost_ids);
- ExpectPendingRegistrations(
- lost_ids, 0.0,
- fake_registration_manager_.GetPendingRegistrationsForTest());
-
- // Trigger another failure to start delaying.
- fake_registration_manager_.FirePendingRegistrationsForTest();
- LoseRegistrations(lost_ids);
-
- double scaled_jitter =
- jitter * RegistrationManager::kRegistrationDelayMaxJitter;
-
- double expected_delay =
- RegistrationManager::kInitialRegistrationDelaySeconds *
- (1.0 + scaled_jitter);
- expected_delay = std::floor(expected_delay);
- ExpectPendingRegistrations(
- lost_ids, expected_delay,
- fake_registration_manager_.GetPendingRegistrationsForTest());
-
- // Trigger another failure.
- fake_registration_manager_.FirePendingRegistrationsForTest();
- LoseRegistrations(lost_ids);
- expected_delay *=
- RegistrationManager::kRegistrationDelayExponent + scaled_jitter;
- expected_delay = std::floor(expected_delay);
- ExpectPendingRegistrations(
- lost_ids, expected_delay,
- fake_registration_manager_.GetPendingRegistrationsForTest());
-
- // Trigger enough failures to hit the ceiling.
- while (expected_delay < RegistrationManager::kMaxRegistrationDelaySeconds) {
- fake_registration_manager_.FirePendingRegistrationsForTest();
- LoseRegistrations(lost_ids);
- expected_delay *=
- RegistrationManager::kRegistrationDelayExponent + scaled_jitter;
- expected_delay = std::floor(expected_delay);
- }
- ExpectPendingRegistrations(
- lost_ids,
- RegistrationManager::kMaxRegistrationDelaySeconds,
- fake_registration_manager_.GetPendingRegistrationsForTest());
- }
-
- FakeInvalidationClient fake_invalidation_client_;
- FakeRegistrationManager fake_registration_manager_;
-
- private:
- // Needed by timers in RegistrationManager.
- base::MessageLoop message_loop_;
-
- DISALLOW_COPY_AND_ASSIGN(RegistrationManagerTest);
-};
-
-// Basic test of UpdateRegisteredIds to make sure we properly register
-// new IDs and unregister any IDs no longer in the set.
-TEST_F(RegistrationManagerTest, UpdateRegisteredIds) {
- ObjectIdSet ids = GetSequenceOfIds(kObjectIdsCount - 1);
-
- EXPECT_TRUE(fake_registration_manager_.GetRegisteredIdsForTest().empty());
- EXPECT_TRUE(fake_invalidation_client_.GetRegisteredIdsForTest().empty());
-
- ObjectIdSet expected_unregistered_ids;
-
- ObjectIdSet unregistered_ids =
- fake_registration_manager_.UpdateRegisteredIds(ids);
- EXPECT_EQ(expected_unregistered_ids, unregistered_ids);
- EXPECT_EQ(ids, fake_registration_manager_.GetRegisteredIdsForTest());
- EXPECT_EQ(ids, fake_invalidation_client_.GetRegisteredIdsForTest());
-
- ids.insert(GetIdForIndex(kObjectIdsCount - 1));
- ids.erase(GetIdForIndex(kObjectIdsCount - 2));
- unregistered_ids = fake_registration_manager_.UpdateRegisteredIds(ids);
- expected_unregistered_ids.insert(GetIdForIndex(kObjectIdsCount - 2));
- EXPECT_EQ(expected_unregistered_ids, unregistered_ids);
- EXPECT_EQ(ids, fake_registration_manager_.GetRegisteredIdsForTest());
- EXPECT_EQ(ids, fake_invalidation_client_.GetRegisteredIdsForTest());
-}
-
-int GetRoundedBackoff(double retry_interval, double jitter) {
- const double kInitialRetryInterval = 3.0;
- const double kMinRetryInterval = 2.0;
- const double kMaxRetryInterval = 20.0;
- const double kBackoffExponent = 2.0;
- const double kMaxJitter = 0.5;
-
- return static_cast<int>(
- RegistrationManager::CalculateBackoff(retry_interval,
- kInitialRetryInterval,
- kMinRetryInterval,
- kMaxRetryInterval,
- kBackoffExponent,
- jitter,
- kMaxJitter));
-}
-
-TEST_F(RegistrationManagerTest, CalculateBackoff) {
- // Test initial.
- EXPECT_EQ(2, GetRoundedBackoff(0.0, -1.0));
- EXPECT_EQ(3, GetRoundedBackoff(0.0, 0.0));
- EXPECT_EQ(4, GetRoundedBackoff(0.0, +1.0));
-
- // Test non-initial.
- EXPECT_EQ(4, GetRoundedBackoff(3.0, -1.0));
- EXPECT_EQ(6, GetRoundedBackoff(3.0, 0.0));
- EXPECT_EQ(7, GetRoundedBackoff(3.0, +1.0));
-
- EXPECT_EQ(7, GetRoundedBackoff(5.0, -1.0));
- EXPECT_EQ(10, GetRoundedBackoff(5.0, 0.0));
- EXPECT_EQ(12, GetRoundedBackoff(5.0, +1.0));
-
- // Test ceiling.
- EXPECT_EQ(19, GetRoundedBackoff(13.0, -1.0));
- EXPECT_EQ(20, GetRoundedBackoff(13.0, 0.0));
- EXPECT_EQ(20, GetRoundedBackoff(13.0, +1.0));
-}
-
-// Losing a registration should queue automatic re-registration.
-TEST_F(RegistrationManagerTest, MarkRegistrationLost) {
- ObjectIdSet ids = GetSequenceOfIds(kObjectIdsCount);
-
- fake_registration_manager_.UpdateRegisteredIds(ids);
- EXPECT_TRUE(
- fake_registration_manager_.GetPendingRegistrationsForTest().empty());
-
- // Lose some ids.
- ObjectIdSet lost_ids = GetSequenceOfIds(3);
- ObjectIdSet non_lost_ids = GetSequenceOfIdsStartingAt(3, kObjectIdsCount - 3);
- LoseRegistrations(lost_ids);
- ExpectPendingRegistrations(
- lost_ids, 0.0,
- fake_registration_manager_.GetPendingRegistrationsForTest());
- EXPECT_EQ(non_lost_ids, fake_registration_manager_.GetRegisteredIdsForTest());
- EXPECT_EQ(non_lost_ids, fake_invalidation_client_.GetRegisteredIdsForTest());
-
- // Pretend we waited long enough to re-register.
- fake_registration_manager_.FirePendingRegistrationsForTest();
- EXPECT_EQ(ids, fake_registration_manager_.GetRegisteredIdsForTest());
- EXPECT_EQ(ids, fake_invalidation_client_.GetRegisteredIdsForTest());
-}
-
-TEST_F(RegistrationManagerTest, MarkRegistrationLostBackoffLow) {
- RunBackoffTest(-1.0);
-}
-
-TEST_F(RegistrationManagerTest, MarkRegistrationLostBackoffMid) {
- RunBackoffTest(0.0);
-}
-
-TEST_F(RegistrationManagerTest, MarkRegistrationLostBackoffHigh) {
- RunBackoffTest(+1.0);
-}
-
-// Exponential backoff on lost registrations should be reset to zero if
-// UpdateRegisteredIds is called.
-TEST_F(RegistrationManagerTest, MarkRegistrationLostBackoffReset) {
- ObjectIdSet ids = GetSequenceOfIds(kObjectIdsCount);
-
- fake_registration_manager_.UpdateRegisteredIds(ids);
-
- // Lose some ids.
- ObjectIdSet lost_ids = GetSequenceOfIds(2);
- LoseRegistrations(lost_ids);
- ExpectPendingRegistrations(
- lost_ids, 0.0,
- fake_registration_manager_.GetPendingRegistrationsForTest());
-
- // Trigger another failure to start delaying.
- fake_registration_manager_.FirePendingRegistrationsForTest();
- LoseRegistrations(lost_ids);
- double expected_delay =
- RegistrationManager::kInitialRegistrationDelaySeconds;
- ExpectPendingRegistrations(
- lost_ids, expected_delay,
- fake_registration_manager_.GetPendingRegistrationsForTest());
-
- // Set ids again.
- fake_registration_manager_.UpdateRegisteredIds(ids);
- ExpectPendingRegistrations(
- ObjectIdSet(),
- 0.0,
- fake_registration_manager_.GetPendingRegistrationsForTest());
-}
-
-TEST_F(RegistrationManagerTest, MarkAllRegistrationsLost) {
- ObjectIdSet ids = GetSequenceOfIds(kObjectIdsCount);
-
- fake_registration_manager_.UpdateRegisteredIds(ids);
-
- fake_invalidation_client_.LoseAllRegistrations();
- fake_registration_manager_.MarkAllRegistrationsLost();
-
- EXPECT_TRUE(fake_registration_manager_.GetRegisteredIdsForTest().empty());
- EXPECT_TRUE(fake_invalidation_client_.GetRegisteredIdsForTest().empty());
-
- ExpectPendingRegistrations(
- ids, 0.0,
- fake_registration_manager_.GetPendingRegistrationsForTest());
-
- // Trigger another failure to start delaying.
- fake_registration_manager_.FirePendingRegistrationsForTest();
- fake_invalidation_client_.LoseAllRegistrations();
- fake_registration_manager_.MarkAllRegistrationsLost();
- double expected_delay =
- RegistrationManager::kInitialRegistrationDelaySeconds;
- ExpectPendingRegistrations(
- ids, expected_delay,
- fake_registration_manager_.GetPendingRegistrationsForTest());
-
- // Pretend we waited long enough to re-register.
- fake_registration_manager_.FirePendingRegistrationsForTest();
- EXPECT_EQ(ids, fake_registration_manager_.GetRegisteredIdsForTest());
- EXPECT_EQ(ids, fake_invalidation_client_.GetRegisteredIdsForTest());
-}
-
-// IDs that are disabled should not be re-registered by UpdateRegisteredIds or
-// automatic re-registration if that registration is lost.
-TEST_F(RegistrationManagerTest, DisableId) {
- ObjectIdSet ids = GetSequenceOfIds(kObjectIdsCount);
-
- fake_registration_manager_.UpdateRegisteredIds(ids);
- EXPECT_TRUE(
- fake_registration_manager_.GetPendingRegistrationsForTest().empty());
-
- // Disable some ids.
- ObjectIdSet disabled_ids = GetSequenceOfIds(3);
- ObjectIdSet enabled_ids = GetSequenceOfIdsStartingAt(3, kObjectIdsCount - 3);
- DisableIds(disabled_ids);
- ExpectPendingRegistrations(
- ObjectIdSet(),
- 0.0,
- fake_registration_manager_.GetPendingRegistrationsForTest());
- EXPECT_EQ(enabled_ids, fake_registration_manager_.GetRegisteredIdsForTest());
- EXPECT_EQ(enabled_ids, fake_invalidation_client_.GetRegisteredIdsForTest());
-
- fake_registration_manager_.UpdateRegisteredIds(ids);
- EXPECT_EQ(enabled_ids, fake_registration_manager_.GetRegisteredIdsForTest());
-
- fake_registration_manager_.MarkRegistrationLost(
- *disabled_ids.begin());
- ExpectPendingRegistrations(
- ObjectIdSet(),
- 0.0,
- fake_registration_manager_.GetPendingRegistrationsForTest());
-
- fake_registration_manager_.MarkAllRegistrationsLost();
- ExpectPendingRegistrations(
- enabled_ids, 0.0,
- fake_registration_manager_.GetPendingRegistrationsForTest());
-}
-
-} // namespace
-} // namespace syncer
diff --git a/chromium/sync/notifier/single_object_invalidation_set.cc b/chromium/sync/notifier/single_object_invalidation_set.cc
deleted file mode 100644
index 6da3972fc72..00000000000
--- a/chromium/sync/notifier/single_object_invalidation_set.cc
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/single_object_invalidation_set.h"
-
-#include "base/values.h"
-#include "sync/notifier/invalidation_util.h"
-
-namespace syncer {
-
-SingleObjectInvalidationSet::SingleObjectInvalidationSet() {}
-
-SingleObjectInvalidationSet::~SingleObjectInvalidationSet() {}
-
-void SingleObjectInvalidationSet::Insert(const Invalidation& invalidation) {
- invalidations_.insert(invalidation);
-}
-
-void SingleObjectInvalidationSet::InsertAll(
- const SingleObjectInvalidationSet& other) {
- invalidations_.insert(other.begin(), other.end());
-}
-
-void SingleObjectInvalidationSet::Clear() {
- invalidations_.clear();
-}
-
-bool SingleObjectInvalidationSet::StartsWithUnknownVersion() const {
- return !invalidations_.empty() &&
- invalidations_.begin()->is_unknown_version();
-}
-
-size_t SingleObjectInvalidationSet::GetSize() const {
- return invalidations_.size();
-}
-
-bool SingleObjectInvalidationSet::IsEmpty() const {
- return invalidations_.empty();
-}
-
-namespace {
-
-struct InvalidationComparator {
- bool operator()(const Invalidation& inv1, const Invalidation& inv2) {
- return inv1.Equals(inv2);
- }
-};
-
-} // namespace
-
-bool SingleObjectInvalidationSet::operator==(
- const SingleObjectInvalidationSet& other) const {
- return std::equal(invalidations_.begin(),
- invalidations_.end(),
- other.invalidations_.begin(),
- InvalidationComparator());
-}
-
-SingleObjectInvalidationSet::const_iterator
-SingleObjectInvalidationSet::begin() const {
- return invalidations_.begin();
-}
-
-SingleObjectInvalidationSet::const_iterator
-SingleObjectInvalidationSet::end() const {
- return invalidations_.end();
-}
-
-SingleObjectInvalidationSet::const_reverse_iterator
-SingleObjectInvalidationSet::rbegin() const {
- return invalidations_.rbegin();
-}
-
-SingleObjectInvalidationSet::const_reverse_iterator
-SingleObjectInvalidationSet::rend() const {
- return invalidations_.rend();
-}
-
-const Invalidation& SingleObjectInvalidationSet::back() const {
- return *invalidations_.rbegin();
-}
-
-scoped_ptr<base::ListValue> SingleObjectInvalidationSet::ToValue() const {
- scoped_ptr<base::ListValue> value(new ListValue);
- for (InvalidationsSet::const_iterator it = invalidations_.begin();
- it != invalidations_.end(); ++it) {
- value->Append(it->ToValue().release());
- }
- return value.Pass();
-}
-
-bool SingleObjectInvalidationSet::ResetFromValue(
- const base::ListValue& list) {
- for (size_t i = 0; i < list.GetSize(); ++i) {
- const base::DictionaryValue* dict;
- if (!list.GetDictionary(i, &dict)) {
- DLOG(WARNING) << "Could not find invalidation at index " << i;
- return false;
- }
- scoped_ptr<Invalidation> invalidation = Invalidation::InitFromValue(*dict);
- if (!invalidation) {
- DLOG(WARNING) << "Failed to parse invalidation at index " << i;
- return false;
- }
- invalidations_.insert(*invalidation);
- }
- return true;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/single_object_invalidation_set.h b/chromium/sync/notifier/single_object_invalidation_set.h
deleted file mode 100644
index e6f4d759aed..00000000000
--- a/chromium/sync/notifier/single_object_invalidation_set.h
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_NOTIFIER_SINGLE_OBJECT_INVALIDATION_SET_H_
-#define SYNC_NOTIFIER_SINGLE_OBJECT_INVALIDATION_SET_H_
-
-#include <set>
-
-#include "base/memory/scoped_ptr.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/invalidation.h"
-#include "sync/notifier/invalidation_util.h"
-
-namespace base {
-class ListValue;
-} // namespace base
-
-namespace syncer {
-
-// Holds a list of invalidations that all share the same Object ID.
-//
-// The list is kept sorted by version to make it easier to perform common
-// operations, like checking for an unknown version invalidation or fetching the
-// highest invalidation with the highest version number.
-class SYNC_EXPORT SingleObjectInvalidationSet {
- public:
- typedef std::set<Invalidation, InvalidationVersionLessThan> InvalidationsSet;
- typedef InvalidationsSet::const_iterator const_iterator;
- typedef InvalidationsSet::const_reverse_iterator const_reverse_iterator;
-
- SingleObjectInvalidationSet();
- ~SingleObjectInvalidationSet();
-
- void Insert(const Invalidation& invalidation);
- void InsertAll(const SingleObjectInvalidationSet& other);
- void Clear();
-
- // Returns true if this list contains an unknown version.
- //
- // Unknown version invalidations always end up at the start of the list,
- // because they have the lowest possible value in the sort ordering.
- bool StartsWithUnknownVersion() const;
- size_t GetSize() const;
- bool IsEmpty() const;
- bool operator==(const SingleObjectInvalidationSet& other) const;
-
- const_iterator begin() const;
- const_iterator end() const;
- const_reverse_iterator rbegin() const;
- const_reverse_iterator rend() const;
- const Invalidation& back() const;
-
- scoped_ptr<base::ListValue> ToValue() const;
- bool ResetFromValue(const base::ListValue& list);
-
- private:
- InvalidationsSet invalidations_;
-};
-
-} // syncer
-
-#endif // SYNC_NOTIFIER_SINGLE_OBJECT_INVALIDATION_SET_H_
diff --git a/chromium/sync/notifier/single_object_invalidation_set_unittest.cc b/chromium/sync/notifier/single_object_invalidation_set_unittest.cc
deleted file mode 100644
index 3fe074e10cf..00000000000
--- a/chromium/sync/notifier/single_object_invalidation_set_unittest.cc
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/single_object_invalidation_set.h"
-
-#include "google/cacheinvalidation/types.pb.h"
-#include "sync/internal_api/public/base/invalidation_test_util.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-namespace {
-
-class SingleObjectInvalidationSetTest : public testing::Test {
- public:
- SingleObjectInvalidationSetTest()
- : kId(ipc::invalidation::ObjectSource::TEST, "one") {
- }
- protected:
- const invalidation::ObjectId kId;
-};
-
-TEST_F(SingleObjectInvalidationSetTest, InsertionAndOrdering) {
- SingleObjectInvalidationSet l1;
- SingleObjectInvalidationSet l2;
-
- Invalidation inv0 = Invalidation::InitUnknownVersion(kId);
- Invalidation inv1 = Invalidation::Init(kId, 1, "one");
- Invalidation inv2 = Invalidation::Init(kId, 5, "five");
-
- l1.Insert(inv0);
- l1.Insert(inv1);
- l1.Insert(inv2);
-
- l2.Insert(inv1);
- l2.Insert(inv2);
- l2.Insert(inv0);
-
- ASSERT_EQ(3U, l1.GetSize());
- ASSERT_EQ(3U, l2.GetSize());
-
- SingleObjectInvalidationSet::const_iterator it1 = l1.begin();
- SingleObjectInvalidationSet::const_iterator it2 = l2.begin();
- EXPECT_THAT(inv0, Eq(*it1));
- EXPECT_THAT(inv0, Eq(*it2));
- it1++;
- it2++;
- EXPECT_THAT(inv1, Eq(*it1));
- EXPECT_THAT(inv1, Eq(*it2));
- it1++;
- it2++;
- EXPECT_THAT(inv2, Eq(*it1));
- EXPECT_THAT(inv2, Eq(*it2));
- it1++;
- it2++;
- EXPECT_TRUE(it1 == l1.end());
- EXPECT_TRUE(it2 == l2.end());
-}
-
-TEST_F(SingleObjectInvalidationSetTest, StartWithUnknownVersion) {
- SingleObjectInvalidationSet list;
- EXPECT_FALSE(list.StartsWithUnknownVersion());
-
- list.Insert(Invalidation::Init(kId, 1, "one"));
- EXPECT_FALSE(list.StartsWithUnknownVersion());
-
- list.Insert(Invalidation::InitUnknownVersion(kId));
- EXPECT_TRUE(list.StartsWithUnknownVersion());
-
- list.Clear();
- EXPECT_FALSE(list.StartsWithUnknownVersion());
-}
-
-TEST_F(SingleObjectInvalidationSetTest, SerializeEmpty) {
- SingleObjectInvalidationSet list;
-
- scoped_ptr<base::ListValue> value = list.ToValue();
- ASSERT_TRUE(value.get());
- SingleObjectInvalidationSet deserialized;
- deserialized.ResetFromValue(*value.get());
- EXPECT_TRUE(list == deserialized);
-}
-
-TEST_F(SingleObjectInvalidationSetTest, SerializeOne) {
- SingleObjectInvalidationSet list;
- list.Insert(Invalidation::Init(kId, 1, "one"));
-
- scoped_ptr<base::ListValue> value = list.ToValue();
- ASSERT_TRUE(value.get());
- SingleObjectInvalidationSet deserialized;
- deserialized.ResetFromValue(*value.get());
- EXPECT_TRUE(list == deserialized);
-}
-
-TEST_F(SingleObjectInvalidationSetTest, SerializeMany) {
- SingleObjectInvalidationSet list;
- list.Insert(Invalidation::Init(kId, 1, "one"));
- list.Insert(Invalidation::InitUnknownVersion(kId));
-
- scoped_ptr<base::ListValue> value = list.ToValue();
- ASSERT_TRUE(value.get());
- SingleObjectInvalidationSet deserialized;
- deserialized.ResetFromValue(*value.get());
- EXPECT_TRUE(list == deserialized);
-}
-
-} // namespace
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/state_writer.h b/chromium/sync/notifier/state_writer.h
deleted file mode 100644
index b40bd45a245..00000000000
--- a/chromium/sync/notifier/state_writer.h
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Simple interface for something that persists state.
-
-#ifndef SYNC_NOTIFIER_STATE_WRITER_H_
-#define SYNC_NOTIFIER_STATE_WRITER_H_
-
-#include <string>
-
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-
-class SYNC_EXPORT_PRIVATE StateWriter {
- public:
- virtual ~StateWriter() {}
-
- virtual void WriteState(const std::string& state) = 0;
-};
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_STATE_WRITER_H_
diff --git a/chromium/sync/notifier/sync_invalidation_listener.cc b/chromium/sync/notifier/sync_invalidation_listener.cc
deleted file mode 100644
index 9adb897bd1b..00000000000
--- a/chromium/sync/notifier/sync_invalidation_listener.cc
+++ /dev/null
@@ -1,420 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/sync_invalidation_listener.h"
-
-#include <vector>
-
-#include "base/bind.h"
-#include "base/callback.h"
-#include "base/compiler_specific.h"
-#include "base/logging.h"
-#include "base/tracked_objects.h"
-#include "google/cacheinvalidation/include/invalidation-client.h"
-#include "google/cacheinvalidation/include/types.h"
-#include "google/cacheinvalidation/types.pb.h"
-#include "jingle/notifier/listener/push_client.h"
-#include "sync/notifier/invalidation_util.h"
-#include "sync/notifier/object_id_invalidation_map.h"
-#include "sync/notifier/registration_manager.h"
-
-namespace {
-
-const char kApplicationName[] = "chrome-sync";
-
-} // namespace
-
-namespace syncer {
-
-SyncInvalidationListener::Delegate::~Delegate() {}
-
-SyncInvalidationListener::SyncInvalidationListener(
- scoped_ptr<notifier::PushClient> push_client)
- : push_client_channel_(push_client.Pass()),
- sync_system_resources_(&push_client_channel_, this),
- delegate_(NULL),
- ticl_state_(DEFAULT_INVALIDATION_ERROR),
- push_client_state_(DEFAULT_INVALIDATION_ERROR),
- weak_ptr_factory_(this) {
- DCHECK(CalledOnValidThread());
- push_client_channel_.AddObserver(this);
-}
-
-SyncInvalidationListener::~SyncInvalidationListener() {
- DCHECK(CalledOnValidThread());
- push_client_channel_.RemoveObserver(this);
- Stop();
- DCHECK(!delegate_);
-}
-
-void SyncInvalidationListener::Start(
- const CreateInvalidationClientCallback&
- create_invalidation_client_callback,
- const std::string& client_id, const std::string& client_info,
- const std::string& invalidation_bootstrap_data,
- const UnackedInvalidationsMap& initial_unacked_invalidations,
- const WeakHandle<InvalidationStateTracker>& invalidation_state_tracker,
- Delegate* delegate) {
- DCHECK(CalledOnValidThread());
- Stop();
-
- sync_system_resources_.set_platform(client_info);
- sync_system_resources_.Start();
-
- // The Storage resource is implemented as a write-through cache. We populate
- // it with the initial state on startup, so subsequent writes go to disk and
- // update the in-memory cache, while reads just return the cached state.
- sync_system_resources_.storage()->SetInitialState(
- invalidation_bootstrap_data);
-
- unacked_invalidations_map_ = initial_unacked_invalidations;
- invalidation_state_tracker_ = invalidation_state_tracker;
- DCHECK(invalidation_state_tracker_.IsInitialized());
-
- DCHECK(!delegate_);
- DCHECK(delegate);
- delegate_ = delegate;
-
-#if defined(OS_IOS)
- int client_type = ipc::invalidation::ClientType::CHROME_SYNC_IOS;
-#else
- int client_type = ipc::invalidation::ClientType::CHROME_SYNC;
-#endif
- invalidation_client_.reset(
- create_invalidation_client_callback.Run(
- &sync_system_resources_, client_type, client_id,
- kApplicationName, this));
- invalidation_client_->Start();
-
- registration_manager_.reset(
- new RegistrationManager(invalidation_client_.get()));
-}
-
-void SyncInvalidationListener::UpdateCredentials(
- const std::string& email, const std::string& token) {
- DCHECK(CalledOnValidThread());
- push_client_channel_.UpdateCredentials(email, token);
-}
-
-void SyncInvalidationListener::UpdateRegisteredIds(const ObjectIdSet& ids) {
- DCHECK(CalledOnValidThread());
- registered_ids_ = ids;
- // |ticl_state_| can go to INVALIDATIONS_ENABLED even without a
- // working XMPP connection (as observed by us), so check it instead
- // of GetState() (see http://crbug.com/139424).
- if (ticl_state_ == INVALIDATIONS_ENABLED && registration_manager_) {
- DoRegistrationUpdate();
- }
-}
-
-void SyncInvalidationListener::Ready(
- invalidation::InvalidationClient* client) {
- DCHECK(CalledOnValidThread());
- DCHECK_EQ(client, invalidation_client_.get());
- ticl_state_ = INVALIDATIONS_ENABLED;
- EmitStateChange();
- DoRegistrationUpdate();
-}
-
-void SyncInvalidationListener::Invalidate(
- invalidation::InvalidationClient* client,
- const invalidation::Invalidation& invalidation,
- const invalidation::AckHandle& ack_handle) {
- DCHECK(CalledOnValidThread());
- DCHECK_EQ(client, invalidation_client_.get());
- client->Acknowledge(ack_handle);
-
- const invalidation::ObjectId& id = invalidation.object_id();
-
- std::string payload;
- // payload() CHECK()'s has_payload(), so we must check it ourselves first.
- if (invalidation.has_payload())
- payload = invalidation.payload();
-
- DVLOG(2) << "Received invalidation with version " << invalidation.version()
- << " for " << ObjectIdToString(id);
-
- ObjectIdInvalidationMap invalidations;
- Invalidation inv = Invalidation::Init(id, invalidation.version(), payload);
- inv.set_ack_handler(GetThisAsAckHandler());
- invalidations.Insert(inv);
-
- DispatchInvalidations(invalidations);
-}
-
-void SyncInvalidationListener::InvalidateUnknownVersion(
- invalidation::InvalidationClient* client,
- const invalidation::ObjectId& object_id,
- const invalidation::AckHandle& ack_handle) {
- DCHECK(CalledOnValidThread());
- DCHECK_EQ(client, invalidation_client_.get());
- DVLOG(1) << "InvalidateUnknownVersion";
- client->Acknowledge(ack_handle);
-
- ObjectIdInvalidationMap invalidations;
- Invalidation unknown_version = Invalidation::InitUnknownVersion(object_id);
- unknown_version.set_ack_handler(GetThisAsAckHandler());
- invalidations.Insert(unknown_version);
-
- DispatchInvalidations(invalidations);
-}
-
-// This should behave as if we got an invalidation with version
-// UNKNOWN_OBJECT_VERSION for all known data types.
-void SyncInvalidationListener::InvalidateAll(
- invalidation::InvalidationClient* client,
- const invalidation::AckHandle& ack_handle) {
- DCHECK(CalledOnValidThread());
- DCHECK_EQ(client, invalidation_client_.get());
- DVLOG(1) << "InvalidateAll";
- client->Acknowledge(ack_handle);
-
- ObjectIdInvalidationMap invalidations;
- for (ObjectIdSet::iterator it = registered_ids_.begin();
- it != registered_ids_.end(); ++it) {
- Invalidation unknown_version = Invalidation::InitUnknownVersion(*it);
- unknown_version.set_ack_handler(GetThisAsAckHandler());
- invalidations.Insert(unknown_version);
- }
-
- DispatchInvalidations(invalidations);
-}
-
-// If a handler is registered, emit right away. Otherwise, save it for later.
-void SyncInvalidationListener::DispatchInvalidations(
- const ObjectIdInvalidationMap& invalidations) {
- DCHECK(CalledOnValidThread());
-
- ObjectIdInvalidationMap to_save = invalidations;
- ObjectIdInvalidationMap to_emit =
- invalidations.GetSubsetWithObjectIds(registered_ids_);
-
- SaveInvalidations(to_save);
- EmitSavedInvalidations(to_emit);
-}
-
-void SyncInvalidationListener::SaveInvalidations(
- const ObjectIdInvalidationMap& to_save) {
- ObjectIdSet objects_to_save = to_save.GetObjectIds();
- for (ObjectIdSet::const_iterator it = objects_to_save.begin();
- it != objects_to_save.end(); ++it) {
- UnackedInvalidationsMap::iterator lookup =
- unacked_invalidations_map_.find(*it);
- if (lookup == unacked_invalidations_map_.end()) {
- lookup = unacked_invalidations_map_.insert(
- std::make_pair(*it, UnackedInvalidationSet(*it))).first;
- }
- lookup->second.AddSet(to_save.ForObject(*it));
- }
-
- invalidation_state_tracker_.Call(
- FROM_HERE,
- &InvalidationStateTracker::SetSavedInvalidations,
- unacked_invalidations_map_);
-}
-
-void SyncInvalidationListener::EmitSavedInvalidations(
- const ObjectIdInvalidationMap& to_emit) {
- DVLOG(2) << "Emitting invalidations: " << to_emit.ToString();
- delegate_->OnInvalidate(to_emit);
-}
-
-void SyncInvalidationListener::InformRegistrationStatus(
- invalidation::InvalidationClient* client,
- const invalidation::ObjectId& object_id,
- InvalidationListener::RegistrationState new_state) {
- DCHECK(CalledOnValidThread());
- DCHECK_EQ(client, invalidation_client_.get());
- DVLOG(1) << "InformRegistrationStatus: "
- << ObjectIdToString(object_id) << " " << new_state;
-
- if (new_state != InvalidationListener::REGISTERED) {
- // Let |registration_manager_| handle the registration backoff policy.
- registration_manager_->MarkRegistrationLost(object_id);
- }
-}
-
-void SyncInvalidationListener::InformRegistrationFailure(
- invalidation::InvalidationClient* client,
- const invalidation::ObjectId& object_id,
- bool is_transient,
- const std::string& error_message) {
- DCHECK(CalledOnValidThread());
- DCHECK_EQ(client, invalidation_client_.get());
- DVLOG(1) << "InformRegistrationFailure: "
- << ObjectIdToString(object_id)
- << "is_transient=" << is_transient
- << ", message=" << error_message;
-
- if (is_transient) {
- // We don't care about |unknown_hint|; we let
- // |registration_manager_| handle the registration backoff policy.
- registration_manager_->MarkRegistrationLost(object_id);
- } else {
- // Non-transient failures require an action to resolve. This could happen
- // because:
- // - the server doesn't yet recognize the data type, which could happen for
- // brand-new data types.
- // - the user has changed his password and hasn't updated it yet locally.
- // Either way, block future registration attempts for |object_id|. However,
- // we don't forget any saved invalidation state since we may use it once the
- // error is addressed.
- registration_manager_->DisableId(object_id);
- }
-}
-
-void SyncInvalidationListener::ReissueRegistrations(
- invalidation::InvalidationClient* client,
- const std::string& prefix,
- int prefix_length) {
- DCHECK(CalledOnValidThread());
- DCHECK_EQ(client, invalidation_client_.get());
- DVLOG(1) << "AllRegistrationsLost";
- registration_manager_->MarkAllRegistrationsLost();
-}
-
-void SyncInvalidationListener::InformError(
- invalidation::InvalidationClient* client,
- const invalidation::ErrorInfo& error_info) {
- DCHECK(CalledOnValidThread());
- DCHECK_EQ(client, invalidation_client_.get());
- LOG(ERROR) << "Ticl error " << error_info.error_reason() << ": "
- << error_info.error_message()
- << " (transient = " << error_info.is_transient() << ")";
- if (error_info.error_reason() == invalidation::ErrorReason::AUTH_FAILURE) {
- ticl_state_ = INVALIDATION_CREDENTIALS_REJECTED;
- } else {
- ticl_state_ = TRANSIENT_INVALIDATION_ERROR;
- }
- EmitStateChange();
-}
-
-void SyncInvalidationListener::Acknowledge(
- const invalidation::ObjectId& id,
- const syncer::AckHandle& handle) {
- UnackedInvalidationsMap::iterator lookup =
- unacked_invalidations_map_.find(id);
- if (lookup == unacked_invalidations_map_.end()) {
- DLOG(WARNING) << "Received acknowledgement for untracked object ID";
- return;
- }
- lookup->second.Acknowledge(handle);
- invalidation_state_tracker_.Call(
- FROM_HERE,
- &InvalidationStateTracker::SetSavedInvalidations,
- unacked_invalidations_map_);
-}
-
-void SyncInvalidationListener::Drop(
- const invalidation::ObjectId& id,
- const syncer::AckHandle& handle) {
- UnackedInvalidationsMap::iterator lookup =
- unacked_invalidations_map_.find(id);
- if (lookup == unacked_invalidations_map_.end()) {
- DLOG(WARNING) << "Received drop for untracked object ID";
- return;
- }
- lookup->second.Drop(handle);
- invalidation_state_tracker_.Call(
- FROM_HERE,
- &InvalidationStateTracker::SetSavedInvalidations,
- unacked_invalidations_map_);
-}
-
-void SyncInvalidationListener::WriteState(const std::string& state) {
- DCHECK(CalledOnValidThread());
- DVLOG(1) << "WriteState";
- invalidation_state_tracker_.Call(
- FROM_HERE, &InvalidationStateTracker::SetBootstrapData, state);
-}
-
-void SyncInvalidationListener::DoRegistrationUpdate() {
- DCHECK(CalledOnValidThread());
- const ObjectIdSet& unregistered_ids =
- registration_manager_->UpdateRegisteredIds(registered_ids_);
- for (ObjectIdSet::iterator it = unregistered_ids.begin();
- it != unregistered_ids.end(); ++it) {
- unacked_invalidations_map_.erase(*it);
- }
- invalidation_state_tracker_.Call(
- FROM_HERE,
- &InvalidationStateTracker::SetSavedInvalidations,
- unacked_invalidations_map_);
-
- ObjectIdInvalidationMap object_id_invalidation_map;
- for (UnackedInvalidationsMap::iterator map_it =
- unacked_invalidations_map_.begin();
- map_it != unacked_invalidations_map_.end(); ++map_it) {
- if (registered_ids_.find(map_it->first) == registered_ids_.end()) {
- continue;
- }
- map_it->second.ExportInvalidations(
- GetThisAsAckHandler(),
- &object_id_invalidation_map);
- }
-
- // There's no need to run these through DispatchInvalidations(); they've
- // already been saved to storage (that's where we found them) so all we need
- // to do now is emit them.
- EmitSavedInvalidations(object_id_invalidation_map);
-}
-
-void SyncInvalidationListener::StopForTest() {
- DCHECK(CalledOnValidThread());
- Stop();
-}
-
-void SyncInvalidationListener::Stop() {
- DCHECK(CalledOnValidThread());
- if (!invalidation_client_) {
- return;
- }
-
- registration_manager_.reset();
- sync_system_resources_.Stop();
- invalidation_client_->Stop();
-
- invalidation_client_.reset();
- delegate_ = NULL;
-
- ticl_state_ = DEFAULT_INVALIDATION_ERROR;
- push_client_state_ = DEFAULT_INVALIDATION_ERROR;
-}
-
-InvalidatorState SyncInvalidationListener::GetState() const {
- DCHECK(CalledOnValidThread());
- if (ticl_state_ == INVALIDATION_CREDENTIALS_REJECTED ||
- push_client_state_ == INVALIDATION_CREDENTIALS_REJECTED) {
- // If either the ticl or the push client rejected our credentials,
- // return INVALIDATION_CREDENTIALS_REJECTED.
- return INVALIDATION_CREDENTIALS_REJECTED;
- }
- if (ticl_state_ == INVALIDATIONS_ENABLED &&
- push_client_state_ == INVALIDATIONS_ENABLED) {
- // If the ticl is ready and the push client notifications are
- // enabled, return INVALIDATIONS_ENABLED.
- return INVALIDATIONS_ENABLED;
- }
- // Otherwise, we have a transient error.
- return TRANSIENT_INVALIDATION_ERROR;
-}
-
-void SyncInvalidationListener::EmitStateChange() {
- DCHECK(CalledOnValidThread());
- delegate_->OnInvalidatorStateChange(GetState());
-}
-
-WeakHandle<AckHandler> SyncInvalidationListener::GetThisAsAckHandler() {
- DCHECK(CalledOnValidThread());
- return WeakHandle<AckHandler>(weak_ptr_factory_.GetWeakPtr());
-}
-
-void SyncInvalidationListener::OnNetworkChannelStateChanged(
- InvalidatorState invalidator_state) {
- DCHECK(CalledOnValidThread());
- push_client_state_ = invalidator_state;
- EmitStateChange();
-}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/sync_invalidation_listener.h b/chromium/sync/notifier/sync_invalidation_listener.h
deleted file mode 100644
index 2b4632d1da7..00000000000
--- a/chromium/sync/notifier/sync_invalidation_listener.h
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// A simple wrapper around invalidation::InvalidationClient that
-// handles all the startup/shutdown details and hookups.
-
-#ifndef SYNC_NOTIFIER_SYNC_INVALIDATION_LISTENER_H_
-#define SYNC_NOTIFIER_SYNC_INVALIDATION_LISTENER_H_
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/callback_forward.h"
-#include "base/compiler_specific.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "base/threading/non_thread_safe.h"
-#include "google/cacheinvalidation/include/invalidation-listener.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/notifier/ack_handler.h"
-#include "sync/notifier/invalidation_state_tracker.h"
-#include "sync/notifier/invalidator_state.h"
-#include "sync/notifier/push_client_channel.h"
-#include "sync/notifier/state_writer.h"
-#include "sync/notifier/sync_system_resources.h"
-#include "sync/notifier/unacked_invalidation_set.h"
-
-namespace buzz {
-class XmppTaskParentInterface;
-} // namespace buzz
-
-namespace notifier {
-class PushClient;
-} // namespace notifier
-
-namespace syncer {
-
-class ObjectIdInvalidationMap;
-class RegistrationManager;
-
-// SyncInvalidationListener is not thread-safe and lives on the sync
-// thread.
-class SYNC_EXPORT_PRIVATE SyncInvalidationListener
- : public NON_EXPORTED_BASE(invalidation::InvalidationListener),
- public StateWriter,
- public SyncNetworkChannel::Observer,
- public AckHandler,
- public base::NonThreadSafe {
- public:
- typedef base::Callback<invalidation::InvalidationClient*(
- invalidation::SystemResources*,
- int,
- const invalidation::string&,
- const invalidation::string&,
- invalidation::InvalidationListener*)> CreateInvalidationClientCallback;
-
- class SYNC_EXPORT_PRIVATE Delegate {
- public:
- virtual ~Delegate();
-
- virtual void OnInvalidate(
- const ObjectIdInvalidationMap& invalidations) = 0;
-
- virtual void OnInvalidatorStateChange(InvalidatorState state) = 0;
- };
-
- explicit SyncInvalidationListener(
- scoped_ptr<notifier::PushClient> push_client);
-
- // Calls Stop().
- virtual ~SyncInvalidationListener();
-
- // Does not take ownership of |delegate| or |state_writer|.
- // |invalidation_state_tracker| must be initialized.
- void Start(
- const CreateInvalidationClientCallback&
- create_invalidation_client_callback,
- const std::string& client_id, const std::string& client_info,
- const std::string& invalidation_bootstrap_data,
- const UnackedInvalidationsMap& initial_object_states,
- const WeakHandle<InvalidationStateTracker>& invalidation_state_tracker,
- Delegate* delegate);
-
- void UpdateCredentials(const std::string& email, const std::string& token);
-
- // Update the set of object IDs that we're interested in getting
- // notifications for. May be called at any time.
- void UpdateRegisteredIds(const ObjectIdSet& ids);
-
- // invalidation::InvalidationListener implementation.
- virtual void Ready(
- invalidation::InvalidationClient* client) OVERRIDE;
- virtual void Invalidate(
- invalidation::InvalidationClient* client,
- const invalidation::Invalidation& invalidation,
- const invalidation::AckHandle& ack_handle) OVERRIDE;
- virtual void InvalidateUnknownVersion(
- invalidation::InvalidationClient* client,
- const invalidation::ObjectId& object_id,
- const invalidation::AckHandle& ack_handle) OVERRIDE;
- virtual void InvalidateAll(
- invalidation::InvalidationClient* client,
- const invalidation::AckHandle& ack_handle) OVERRIDE;
- virtual void InformRegistrationStatus(
- invalidation::InvalidationClient* client,
- const invalidation::ObjectId& object_id,
- invalidation::InvalidationListener::RegistrationState reg_state) OVERRIDE;
- virtual void InformRegistrationFailure(
- invalidation::InvalidationClient* client,
- const invalidation::ObjectId& object_id,
- bool is_transient,
- const std::string& error_message) OVERRIDE;
- virtual void ReissueRegistrations(
- invalidation::InvalidationClient* client,
- const std::string& prefix,
- int prefix_length) OVERRIDE;
- virtual void InformError(
- invalidation::InvalidationClient* client,
- const invalidation::ErrorInfo& error_info) OVERRIDE;
-
- // AckHandler implementation.
- virtual void Acknowledge(
- const invalidation::ObjectId& id,
- const syncer::AckHandle& handle) OVERRIDE;
- virtual void Drop(
- const invalidation::ObjectId& id,
- const syncer::AckHandle& handle) OVERRIDE;
-
- // StateWriter implementation.
- virtual void WriteState(const std::string& state) OVERRIDE;
-
- // SyncNetworkChannel::Observer implementation.
- virtual void OnNetworkChannelStateChanged(
- InvalidatorState invalidator_state) OVERRIDE;
-
- void DoRegistrationUpdate();
-
- void StopForTest();
-
- private:
- void Stop();
-
- InvalidatorState GetState() const;
-
- void EmitStateChange();
-
- // Sends invalidations to their appropriate destination.
- //
- // If there are no observers registered for them, they will be saved for
- // later.
- //
- // If there are observers registered, they will be saved (to make sure we
- // don't drop them until they've been acted on) and emitted to the observers.
- void DispatchInvalidations(const ObjectIdInvalidationMap& invalidations);
-
- // Saves invalidations.
- //
- // This call isn't synchronous so we can't guarantee these invalidations will
- // be safely on disk by the end of the call, but it should ensure that the
- // data makes it to disk eventually.
- void SaveInvalidations(const ObjectIdInvalidationMap& to_save);
-
- // Emits previously saved invalidations to their registered observers.
- void EmitSavedInvalidations(const ObjectIdInvalidationMap& to_emit);
-
- WeakHandle<AckHandler> GetThisAsAckHandler();
-
- PushClientChannel push_client_channel_;
- SyncSystemResources sync_system_resources_;
- UnackedInvalidationsMap unacked_invalidations_map_;
- WeakHandle<InvalidationStateTracker> invalidation_state_tracker_;
- Delegate* delegate_;
- scoped_ptr<invalidation::InvalidationClient> invalidation_client_;
- scoped_ptr<RegistrationManager> registration_manager_;
- // Stored to pass to |registration_manager_| on start.
- ObjectIdSet registered_ids_;
-
- // The states of the ticl and the push client.
- InvalidatorState ticl_state_;
- InvalidatorState push_client_state_;
-
- base::WeakPtrFactory<SyncInvalidationListener> weak_ptr_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(SyncInvalidationListener);
-};
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_SYNC_INVALIDATION_LISTENER_H_
diff --git a/chromium/sync/notifier/sync_invalidation_listener_unittest.cc b/chromium/sync/notifier/sync_invalidation_listener_unittest.cc
deleted file mode 100644
index 2808b9727a7..00000000000
--- a/chromium/sync/notifier/sync_invalidation_listener_unittest.cc
+++ /dev/null
@@ -1,1127 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <cstddef>
-#include <map>
-#include <set>
-#include <string>
-#include <vector>
-
-#include "base/compiler_specific.h"
-#include "base/message_loop/message_loop.h"
-#include "base/stl_util.h"
-#include "google/cacheinvalidation/include/invalidation-client.h"
-#include "google/cacheinvalidation/include/types.h"
-#include "jingle/notifier/listener/fake_push_client.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/notifier/dropped_invalidation_tracker.h"
-#include "sync/notifier/fake_invalidation_state_tracker.h"
-#include "sync/notifier/invalidation_util.h"
-#include "sync/notifier/object_id_invalidation_map.h"
-#include "sync/notifier/sync_invalidation_listener.h"
-#include "sync/notifier/unacked_invalidation_set_test_util.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-namespace {
-
-using invalidation::AckHandle;
-using invalidation::ObjectId;
-
-const char kClientId[] = "client_id";
-const char kClientInfo[] = "client_info";
-
-const char kState[] = "state";
-const char kNewState[] = "new_state";
-
-const char kPayload1[] = "payload1";
-const char kPayload2[] = "payload2";
-
-const int64 kVersion1 = 1LL;
-const int64 kVersion2 = 2LL;
-
-const int kChromeSyncSourceId = 1004;
-
-struct AckHandleLessThan {
- bool operator()(const AckHandle& lhs, const AckHandle& rhs) const {
- return lhs.handle_data() < rhs.handle_data();
- }
-};
-
-typedef std::set<AckHandle, AckHandleLessThan> AckHandleSet;
-
-// Fake invalidation::InvalidationClient implementation that keeps
-// track of registered IDs and acked handles.
-class FakeInvalidationClient : public invalidation::InvalidationClient {
- public:
- FakeInvalidationClient() : started_(false) {}
- virtual ~FakeInvalidationClient() {}
-
- const ObjectIdSet& GetRegisteredIds() const {
- return registered_ids_;
- }
-
- void ClearAckedHandles() {
- acked_handles_.clear();
- }
-
- bool IsAckedHandle(const AckHandle& ack_handle) const {
- return (acked_handles_.find(ack_handle) != acked_handles_.end());
- }
-
- // invalidation::InvalidationClient implementation.
-
- virtual void Start() OVERRIDE {
- started_ = true;
- }
-
- virtual void Stop() OVERRIDE {
- started_ = false;
- }
-
- virtual void Register(const ObjectId& object_id) OVERRIDE {
- if (!started_) {
- ADD_FAILURE();
- return;
- }
- registered_ids_.insert(object_id);
- }
-
- virtual void Register(
- const invalidation::vector<ObjectId>& object_ids) OVERRIDE {
- if (!started_) {
- ADD_FAILURE();
- return;
- }
- registered_ids_.insert(object_ids.begin(), object_ids.end());
- }
-
- virtual void Unregister(const ObjectId& object_id) OVERRIDE {
- if (!started_) {
- ADD_FAILURE();
- return;
- }
- registered_ids_.erase(object_id);
- }
-
- virtual void Unregister(
- const invalidation::vector<ObjectId>& object_ids) OVERRIDE {
- if (!started_) {
- ADD_FAILURE();
- return;
- }
- for (invalidation::vector<ObjectId>::const_iterator
- it = object_ids.begin(); it != object_ids.end(); ++it) {
- registered_ids_.erase(*it);
- }
- }
-
- virtual void Acknowledge(const AckHandle& ack_handle) OVERRIDE {
- if (!started_) {
- ADD_FAILURE();
- return;
- }
- acked_handles_.insert(ack_handle);
- }
-
- private:
- bool started_;
- ObjectIdSet registered_ids_;
- AckHandleSet acked_handles_;
-};
-
-// Fake delegate tkat keeps track of invalidation counts, payloads,
-// and state.
-class FakeDelegate : public SyncInvalidationListener::Delegate {
- public:
- explicit FakeDelegate(SyncInvalidationListener* listener)
- : state_(TRANSIENT_INVALIDATION_ERROR),
- drop_handlers_deleter_(&drop_handlers_) {}
- virtual ~FakeDelegate() {}
-
- size_t GetInvalidationCount(const ObjectId& id) const {
- Map::const_iterator it = invalidations_.find(id);
- if (it == invalidations_.end()) {
- return 0;
- } else {
- return it->second.size();
- }
- }
-
- int64 GetVersion(const ObjectId& id) const {
- Map::const_iterator it = invalidations_.find(id);
- if (it == invalidations_.end()) {
- ADD_FAILURE() << "No invalidations for ID " << ObjectIdToString(id);
- return 0;
- } else {
- return it->second.back().version();
- }
- }
-
- std::string GetPayload(const ObjectId& id) const {
- Map::const_iterator it = invalidations_.find(id);
- if (it == invalidations_.end()) {
- ADD_FAILURE() << "No invalidations for ID " << ObjectIdToString(id);
- return 0;
- } else {
- return it->second.back().payload();
- }
- }
-
- bool IsUnknownVersion(const ObjectId& id) const {
- Map::const_iterator it = invalidations_.find(id);
- if (it == invalidations_.end()) {
- ADD_FAILURE() << "No invalidations for ID " << ObjectIdToString(id);
- return false;
- } else {
- return it->second.back().is_unknown_version();
- }
- }
-
- bool StartsWithUnknownVersion(const ObjectId& id) const {
- Map::const_iterator it = invalidations_.find(id);
- if (it == invalidations_.end()) {
- ADD_FAILURE() << "No invalidations for ID " << ObjectIdToString(id);
- return false;
- } else {
- return it->second.front().is_unknown_version();
- }
- }
-
- InvalidatorState GetInvalidatorState() const {
- return state_;
- }
-
- DroppedInvalidationTracker* GetDropTrackerForObject(const ObjectId& id) {
- DropHandlers::iterator it = drop_handlers_.find(id);
- if (it == drop_handlers_.end()) {
- drop_handlers_.insert(
- std::make_pair(id, new DroppedInvalidationTracker(id)));
- return drop_handlers_.find(id)->second;
- } else {
- return it->second;
- }
- }
-
- void AcknowledgeNthInvalidation(const ObjectId& id, size_t n) {
- List& list = invalidations_[id];
- List::iterator it = list.begin() + n;
- it->Acknowledge();
- }
-
- void AcknowledgeAll(const ObjectId& id) {
- List& list = invalidations_[id];
- for (List::iterator it = list.begin(); it != list.end(); ++it) {
- it->Acknowledge();
- }
- }
-
- void DropNthInvalidation(const ObjectId& id, size_t n) {
- DroppedInvalidationTracker* drop_tracker = GetDropTrackerForObject(id);
- List& list = invalidations_[id];
- List::iterator it = list.begin() + n;
- it->Drop(drop_tracker);
- }
-
- void RecoverFromDropEvent(const ObjectId& id) {
- DroppedInvalidationTracker* drop_tracker = GetDropTrackerForObject(id);
- drop_tracker->RecordRecoveryFromDropEvent();
- }
-
- // SyncInvalidationListener::Delegate implementation.
- virtual void OnInvalidate(
- const ObjectIdInvalidationMap& invalidation_map) OVERRIDE {
- ObjectIdSet ids = invalidation_map.GetObjectIds();
- for (ObjectIdSet::iterator it = ids.begin(); it != ids.end(); ++it) {
- const SingleObjectInvalidationSet& incoming =
- invalidation_map.ForObject(*it);
- List& list = invalidations_[*it];
- list.insert(list.end(), incoming.begin(), incoming.end());
- }
- }
-
- virtual void OnInvalidatorStateChange(InvalidatorState state) OVERRIDE {
- state_ = state;
- }
-
- private:
- typedef std::vector<Invalidation> List;
- typedef std::map<ObjectId, List, ObjectIdLessThan> Map;
- typedef std::map<ObjectId,
- DroppedInvalidationTracker*,
- ObjectIdLessThan> DropHandlers;
-
- Map invalidations_;
- InvalidatorState state_;
- DropHandlers drop_handlers_;
- STLValueDeleter<DropHandlers> drop_handlers_deleter_;
-};
-
-invalidation::InvalidationClient* CreateFakeInvalidationClient(
- FakeInvalidationClient** fake_invalidation_client,
- invalidation::SystemResources* resources,
- int client_type,
- const invalidation::string& client_name,
- const invalidation::string& application_name,
- invalidation::InvalidationListener* listener) {
- *fake_invalidation_client = new FakeInvalidationClient();
- return *fake_invalidation_client;
-}
-
-class SyncInvalidationListenerTest : public testing::Test {
- protected:
- SyncInvalidationListenerTest()
- : kBookmarksId_(kChromeSyncSourceId, "BOOKMARK"),
- kPreferencesId_(kChromeSyncSourceId, "PREFERENCE"),
- kExtensionsId_(kChromeSyncSourceId, "EXTENSION"),
- kAppsId_(kChromeSyncSourceId, "APP"),
- fake_push_client_(new notifier::FakePushClient()),
- fake_invalidation_client_(NULL),
- listener_(scoped_ptr<notifier::PushClient>(fake_push_client_)),
- fake_delegate_(&listener_) {}
-
- virtual void SetUp() {
- StartClient();
-
- registered_ids_.insert(kBookmarksId_);
- registered_ids_.insert(kPreferencesId_);
- listener_.UpdateRegisteredIds(registered_ids_);
- }
-
- virtual void TearDown() {
- StopClient();
- }
-
- // Restart client without re-registering IDs.
- void RestartClient() {
- StopClient();
- StartClient();
- }
-
- void StartClient() {
- fake_invalidation_client_ = NULL;
- listener_.Start(base::Bind(&CreateFakeInvalidationClient,
- &fake_invalidation_client_),
- kClientId, kClientInfo, kState,
- fake_tracker_.GetSavedInvalidations(),
- MakeWeakHandle(fake_tracker_.AsWeakPtr()),
- &fake_delegate_);
- DCHECK(fake_invalidation_client_);
- }
-
- void StopClient() {
- // listener_.StopForTest() stops the invalidation scheduler, which
- // deletes any pending tasks without running them. Some tasks
- // "run and delete" another task, so they must be run in order to
- // avoid leaking the inner task. listener_.StopForTest() does not
- // schedule any tasks, so it's both necessary and sufficient to
- // drain the task queue before calling it.
- FlushPendingWrites();
- fake_invalidation_client_ = NULL;
- listener_.StopForTest();
- }
-
- size_t GetInvalidationCount(const ObjectId& id) const {
- return fake_delegate_.GetInvalidationCount(id);
- }
-
- int64 GetVersion(const ObjectId& id) const {
- return fake_delegate_.GetVersion(id);
- }
-
- std::string GetPayload(const ObjectId& id) const {
- return fake_delegate_.GetPayload(id);
- }
-
- bool IsUnknownVersion(const ObjectId& id) const {
- return fake_delegate_.IsUnknownVersion(id);
- }
-
- bool StartsWithUnknownVersion(const ObjectId& id) const {
- return fake_delegate_.StartsWithUnknownVersion(id);
- }
-
- void AcknowledgeNthInvalidation(const ObjectId& id, size_t n) {
- fake_delegate_.AcknowledgeNthInvalidation(id, n);
- }
-
- void DropNthInvalidation(const ObjectId& id, size_t n) {
- return fake_delegate_.DropNthInvalidation(id, n);
- }
-
- void RecoverFromDropEvent(const ObjectId& id) {
- return fake_delegate_.RecoverFromDropEvent(id);
- }
-
- void AcknowledgeAll(const ObjectId& id) {
- fake_delegate_.AcknowledgeAll(id);
- }
-
- InvalidatorState GetInvalidatorState() const {
- return fake_delegate_.GetInvalidatorState();
- }
-
- std::string GetInvalidatorClientId() const {
- return fake_tracker_.GetInvalidatorClientId();
- }
-
- std::string GetBootstrapData() const {
- return fake_tracker_.GetBootstrapData();
- }
-
- UnackedInvalidationsMap GetSavedInvalidations() {
- // Allow any queued writes to go through first.
- FlushPendingWrites();
- return fake_tracker_.GetSavedInvalidations();
- }
-
- SingleObjectInvalidationSet GetSavedInvalidationsForType(const ObjectId& id) {
- const UnackedInvalidationsMap& saved_state = GetSavedInvalidations();
- UnackedInvalidationsMap::const_iterator it =
- saved_state.find(kBookmarksId_);
- if (it == saved_state.end()) {
- ADD_FAILURE() << "No state saved for ID " << ObjectIdToString(id);
- return SingleObjectInvalidationSet();
- }
- ObjectIdInvalidationMap map;
- it->second.ExportInvalidations(WeakHandle<AckHandler>(), &map);
- if (map.Empty()) {
- return SingleObjectInvalidationSet();
- } else {
- return map.ForObject(id);
- }
- }
-
- ObjectIdSet GetRegisteredIds() const {
- return fake_invalidation_client_->GetRegisteredIds();
- }
-
- // |payload| can be NULL.
- void FireInvalidate(const ObjectId& object_id,
- int64 version, const char* payload) {
- invalidation::Invalidation inv;
- if (payload) {
- inv = invalidation::Invalidation(object_id, version, payload);
- } else {
- inv = invalidation::Invalidation(object_id, version);
- }
- const AckHandle ack_handle("fakedata");
- fake_invalidation_client_->ClearAckedHandles();
- listener_.Invalidate(fake_invalidation_client_, inv, ack_handle);
- EXPECT_TRUE(fake_invalidation_client_->IsAckedHandle(ack_handle));
- }
-
- // |payload| can be NULL, but not |type_name|.
- void FireInvalidateUnknownVersion(const ObjectId& object_id) {
- const AckHandle ack_handle("fakedata_unknown");
- fake_invalidation_client_->ClearAckedHandles();
- listener_.InvalidateUnknownVersion(fake_invalidation_client_,
- object_id,
- ack_handle);
- EXPECT_TRUE(fake_invalidation_client_->IsAckedHandle(ack_handle));
- }
-
- void FireInvalidateAll() {
- const AckHandle ack_handle("fakedata_all");
- fake_invalidation_client_->ClearAckedHandles();
- listener_.InvalidateAll(fake_invalidation_client_, ack_handle);
- EXPECT_TRUE(fake_invalidation_client_->IsAckedHandle(ack_handle));
- }
-
- void WriteState(const std::string& new_state) {
- listener_.WriteState(new_state);
-
- // Pump message loop to trigger
- // InvalidationStateTracker::WriteState().
- FlushPendingWrites();
- }
-
- void FlushPendingWrites() {
- message_loop_.RunUntilIdle();
- }
-
- void EnableNotifications() {
- fake_push_client_->EnableNotifications();
- }
-
- void DisableNotifications(notifier::NotificationsDisabledReason reason) {
- fake_push_client_->DisableNotifications(reason);
- }
-
- const ObjectId kBookmarksId_;
- const ObjectId kPreferencesId_;
- const ObjectId kExtensionsId_;
- const ObjectId kAppsId_;
-
- ObjectIdSet registered_ids_;
-
- private:
- base::MessageLoop message_loop_;
- notifier::FakePushClient* const fake_push_client_;
-
- protected:
- // A derrived test needs direct access to this.
- FakeInvalidationStateTracker fake_tracker_;
-
- // Tests need to access these directly.
- FakeInvalidationClient* fake_invalidation_client_;
- SyncInvalidationListener listener_;
-
- private:
- FakeDelegate fake_delegate_;
-};
-
-// Write a new state to the client. It should propagate to the
-// tracker.
-TEST_F(SyncInvalidationListenerTest, WriteState) {
- WriteState(kNewState);
-
- EXPECT_EQ(kNewState, GetBootstrapData());
-}
-
-// Invalidation tests.
-
-// Fire an invalidation without a payload. It should be processed,
-// the payload should remain empty, and the version should be updated.
-TEST_F(SyncInvalidationListenerTest, InvalidateNoPayload) {
- const ObjectId& id = kBookmarksId_;
-
- FireInvalidate(id, kVersion1, NULL);
-
- ASSERT_EQ(1U, GetInvalidationCount(id));
- ASSERT_FALSE(IsUnknownVersion(id));
- EXPECT_EQ(kVersion1, GetVersion(id));
- EXPECT_EQ("", GetPayload(id));
-}
-
-// Fire an invalidation with an empty payload. It should be
-// processed, the payload should remain empty, and the version should
-// be updated.
-TEST_F(SyncInvalidationListenerTest, InvalidateEmptyPayload) {
- const ObjectId& id = kBookmarksId_;
-
- FireInvalidate(id, kVersion1, "");
-
- ASSERT_EQ(1U, GetInvalidationCount(id));
- ASSERT_FALSE(IsUnknownVersion(id));
- EXPECT_EQ(kVersion1, GetVersion(id));
- EXPECT_EQ("", GetPayload(id));
-}
-
-// Fire an invalidation with a payload. It should be processed, and
-// both the payload and the version should be updated.
-TEST_F(SyncInvalidationListenerTest, InvalidateWithPayload) {
- const ObjectId& id = kPreferencesId_;
-
- FireInvalidate(id, kVersion1, kPayload1);
-
- ASSERT_EQ(1U, GetInvalidationCount(id));
- ASSERT_FALSE(IsUnknownVersion(id));
- EXPECT_EQ(kVersion1, GetVersion(id));
- EXPECT_EQ(kPayload1, GetPayload(id));
-}
-
-// Fire ten invalidations in a row. All should be received.
-TEST_F(SyncInvalidationListenerTest, ManyInvalidations_NoDrop) {
- const int kRepeatCount = 10;
- const ObjectId& id = kPreferencesId_;
- int64 initial_version = kVersion1;
- for (int64 i = initial_version; i < initial_version + kRepeatCount; ++i) {
- FireInvalidate(id, i, kPayload1);
- }
- ASSERT_EQ(static_cast<size_t>(kRepeatCount), GetInvalidationCount(id));
- ASSERT_FALSE(IsUnknownVersion(id));
- EXPECT_EQ(kPayload1, GetPayload(id));
- EXPECT_EQ(initial_version + kRepeatCount - 1, GetVersion(id));
-}
-
-// Fire an invalidation for an unregistered object ID with a payload. It should
-// still be processed, and both the payload and the version should be updated.
-TEST_F(SyncInvalidationListenerTest, InvalidateBeforeRegistration_Simple) {
- const ObjectId kUnregisteredId(kChromeSyncSourceId, "unregistered");
- const ObjectId& id = kUnregisteredId;
- ObjectIdSet ids;
- ids.insert(id);
-
- EXPECT_EQ(0U, GetInvalidationCount(id));
-
- FireInvalidate(id, kVersion1, kPayload1);
-
- ASSERT_EQ(0U, GetInvalidationCount(id));
-
- EnableNotifications();
- listener_.Ready(fake_invalidation_client_);
- listener_.UpdateRegisteredIds(ids);
-
- ASSERT_EQ(1U, GetInvalidationCount(id));
- ASSERT_FALSE(IsUnknownVersion(id));
- EXPECT_EQ(kVersion1, GetVersion(id));
- EXPECT_EQ(kPayload1, GetPayload(id));
-}
-
-// Fire ten invalidations before an object registers. Some invalidations will
-// be dropped an replaced with an unknown version invalidation.
-TEST_F(SyncInvalidationListenerTest, InvalidateBeforeRegistration_Drop) {
- const int kRepeatCount =
- UnackedInvalidationSet::kMaxBufferedInvalidations + 1;
- const ObjectId kUnregisteredId(kChromeSyncSourceId, "unregistered");
- const ObjectId& id = kUnregisteredId;
- ObjectIdSet ids;
- ids.insert(id);
-
- EXPECT_EQ(0U, GetInvalidationCount(id));
-
- int64 initial_version = kVersion1;
- for (int64 i = initial_version; i < initial_version + kRepeatCount; ++i) {
- FireInvalidate(id, i, kPayload1);
- }
-
- EnableNotifications();
- listener_.Ready(fake_invalidation_client_);
- listener_.UpdateRegisteredIds(ids);
-
- ASSERT_EQ(UnackedInvalidationSet::kMaxBufferedInvalidations,
- GetInvalidationCount(id));
- ASSERT_FALSE(IsUnknownVersion(id));
- EXPECT_EQ(initial_version + kRepeatCount - 1, GetVersion(id));
- EXPECT_EQ(kPayload1, GetPayload(id));
- EXPECT_TRUE(StartsWithUnknownVersion(id));
-}
-
-// Fire an invalidation, then fire another one with a lower version. Both
-// should be received.
-TEST_F(SyncInvalidationListenerTest, InvalidateVersion) {
- const ObjectId& id = kPreferencesId_;
-
- FireInvalidate(id, kVersion2, kPayload2);
-
- ASSERT_EQ(1U, GetInvalidationCount(id));
- ASSERT_FALSE(IsUnknownVersion(id));
- EXPECT_EQ(kVersion2, GetVersion(id));
- EXPECT_EQ(kPayload2, GetPayload(id));
-
- FireInvalidate(id, kVersion1, kPayload1);
-
- ASSERT_EQ(2U, GetInvalidationCount(id));
- ASSERT_FALSE(IsUnknownVersion(id));
-
- EXPECT_EQ(kVersion1, GetVersion(id));
- EXPECT_EQ(kPayload1, GetPayload(id));
-}
-
-// Fire an invalidation with an unknown version.
-TEST_F(SyncInvalidationListenerTest, InvalidateUnknownVersion) {
- const ObjectId& id = kBookmarksId_;
-
- FireInvalidateUnknownVersion(id);
-
- ASSERT_EQ(1U, GetInvalidationCount(id));
- EXPECT_TRUE(IsUnknownVersion(id));
-}
-
-// Fire an invalidation for all enabled IDs.
-TEST_F(SyncInvalidationListenerTest, InvalidateAll) {
- FireInvalidateAll();
-
- for (ObjectIdSet::const_iterator it = registered_ids_.begin();
- it != registered_ids_.end(); ++it) {
- ASSERT_EQ(1U, GetInvalidationCount(*it));
- EXPECT_TRUE(IsUnknownVersion(*it));
- }
-}
-
-// Test a simple scenario for multiple IDs.
-TEST_F(SyncInvalidationListenerTest, InvalidateMultipleIds) {
- FireInvalidate(kBookmarksId_, 3, NULL);
-
- ASSERT_EQ(1U, GetInvalidationCount(kBookmarksId_));
- ASSERT_FALSE(IsUnknownVersion(kBookmarksId_));
- EXPECT_EQ(3, GetVersion(kBookmarksId_));
- EXPECT_EQ("", GetPayload(kBookmarksId_));
-
- // kExtensionId is not registered, so the invalidation should not get through.
- FireInvalidate(kExtensionsId_, 2, NULL);
- ASSERT_EQ(0U, GetInvalidationCount(kExtensionsId_));
-}
-
-// Registration tests.
-
-// With IDs already registered, enable notifications then ready the
-// client. The IDs should be registered only after the client is
-// readied.
-TEST_F(SyncInvalidationListenerTest, RegisterEnableReady) {
- EXPECT_TRUE(GetRegisteredIds().empty());
-
- EnableNotifications();
-
- EXPECT_TRUE(GetRegisteredIds().empty());
-
- listener_.Ready(fake_invalidation_client_);
-
- EXPECT_EQ(registered_ids_, GetRegisteredIds());
-}
-
-// With IDs already registered, ready the client then enable
-// notifications. The IDs should be registered after the client is
-// readied.
-TEST_F(SyncInvalidationListenerTest, RegisterReadyEnable) {
- EXPECT_TRUE(GetRegisteredIds().empty());
-
- listener_.Ready(fake_invalidation_client_);
-
- EXPECT_EQ(registered_ids_, GetRegisteredIds());
-
- EnableNotifications();
-
- EXPECT_EQ(registered_ids_, GetRegisteredIds());
-}
-
-// Unregister the IDs, enable notifications, re-register the IDs, then
-// ready the client. The IDs should be registered only after the
-// client is readied.
-TEST_F(SyncInvalidationListenerTest, EnableRegisterReady) {
- listener_.UpdateRegisteredIds(ObjectIdSet());
-
- EXPECT_TRUE(GetRegisteredIds().empty());
-
- EnableNotifications();
-
- EXPECT_TRUE(GetRegisteredIds().empty());
-
- listener_.UpdateRegisteredIds(registered_ids_);
-
- EXPECT_TRUE(GetRegisteredIds().empty());
-
- listener_.Ready(fake_invalidation_client_);
-
- EXPECT_EQ(registered_ids_, GetRegisteredIds());
-}
-
-// Unregister the IDs, enable notifications, ready the client, then
-// re-register the IDs. The IDs should be registered only after the
-// client is readied.
-TEST_F(SyncInvalidationListenerTest, EnableReadyRegister) {
- listener_.UpdateRegisteredIds(ObjectIdSet());
-
- EXPECT_TRUE(GetRegisteredIds().empty());
-
- EnableNotifications();
-
- EXPECT_TRUE(GetRegisteredIds().empty());
-
- listener_.Ready(fake_invalidation_client_);
-
- EXPECT_TRUE(GetRegisteredIds().empty());
-
- listener_.UpdateRegisteredIds(registered_ids_);
-
- EXPECT_EQ(registered_ids_, GetRegisteredIds());
-}
-
-// Unregister the IDs, ready the client, enable notifications, then
-// re-register the IDs. The IDs should be registered only after the
-// client is readied.
-TEST_F(SyncInvalidationListenerTest, ReadyEnableRegister) {
- listener_.UpdateRegisteredIds(ObjectIdSet());
-
- EXPECT_TRUE(GetRegisteredIds().empty());
-
- EnableNotifications();
-
- EXPECT_TRUE(GetRegisteredIds().empty());
-
- listener_.Ready(fake_invalidation_client_);
-
- EXPECT_TRUE(GetRegisteredIds().empty());
-
- listener_.UpdateRegisteredIds(registered_ids_);
-
- EXPECT_EQ(registered_ids_, GetRegisteredIds());
-}
-
-// Unregister the IDs, ready the client, re-register the IDs, then
-// enable notifications. The IDs should be registered only after the
-// client is readied.
-//
-// This test is important: see http://crbug.com/139424.
-TEST_F(SyncInvalidationListenerTest, ReadyRegisterEnable) {
- listener_.UpdateRegisteredIds(ObjectIdSet());
-
- EXPECT_TRUE(GetRegisteredIds().empty());
-
- listener_.Ready(fake_invalidation_client_);
-
- EXPECT_TRUE(GetRegisteredIds().empty());
-
- listener_.UpdateRegisteredIds(registered_ids_);
-
- EXPECT_EQ(registered_ids_, GetRegisteredIds());
-
- EnableNotifications();
-
- EXPECT_EQ(registered_ids_, GetRegisteredIds());
-}
-
-// With IDs already registered, ready the client, restart the client,
-// then re-ready it. The IDs should still be registered.
-TEST_F(SyncInvalidationListenerTest, RegisterTypesPreserved) {
- EXPECT_TRUE(GetRegisteredIds().empty());
-
- listener_.Ready(fake_invalidation_client_);
-
- EXPECT_EQ(registered_ids_, GetRegisteredIds());
-
- RestartClient();
-
- EXPECT_TRUE(GetRegisteredIds().empty());
-
- listener_.Ready(fake_invalidation_client_);
-
- EXPECT_EQ(registered_ids_, GetRegisteredIds());
-}
-
-// Make sure that state is correctly purged from the local invalidation state
-// map cache when an ID is unregistered.
-TEST_F(SyncInvalidationListenerTest, UnregisterCleansUpStateMapCache) {
- const ObjectId& id = kBookmarksId_;
- listener_.Ready(fake_invalidation_client_);
-
- EXPECT_TRUE(GetSavedInvalidations().empty());
- FireInvalidate(id, 1, "hello");
- EXPECT_EQ(1U, GetSavedInvalidations().size());
- EXPECT_TRUE(ContainsKey(GetSavedInvalidations(), id));
- FireInvalidate(kPreferencesId_, 2, "world");
- EXPECT_EQ(2U, GetSavedInvalidations().size());
-
- EXPECT_TRUE(ContainsKey(GetSavedInvalidations(), id));
- EXPECT_TRUE(ContainsKey(GetSavedInvalidations(), kPreferencesId_));
-
- ObjectIdSet ids;
- ids.insert(id);
- listener_.UpdateRegisteredIds(ids);
- EXPECT_EQ(1U, GetSavedInvalidations().size());
- EXPECT_TRUE(ContainsKey(GetSavedInvalidations(), id));
-}
-
-TEST_F(SyncInvalidationListenerTest, DuplicateInvaldiations_Simple) {
- const ObjectId& id = kBookmarksId_;
- listener_.Ready(fake_invalidation_client_);
-
- // Send a stream of invalidations, including two copies of the second.
- FireInvalidate(id, 1, "one");
- FireInvalidate(id, 2, "two");
- FireInvalidate(id, 3, "three");
- FireInvalidate(id, 2, "two");
-
- // Expect that the duplicate was discarded.
- SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
- EXPECT_EQ(3U, list.GetSize());
- SingleObjectInvalidationSet::const_iterator it = list.begin();
- EXPECT_EQ(1, it->version());
- it++;
- EXPECT_EQ(2, it->version());
- it++;
- EXPECT_EQ(3, it->version());
-}
-
-TEST_F(SyncInvalidationListenerTest, DuplicateInvalidations_NearBufferLimit) {
- const size_t kPairsToSend = UnackedInvalidationSet::kMaxBufferedInvalidations;
- const ObjectId& id = kBookmarksId_;
- listener_.Ready(fake_invalidation_client_);
-
- // We will have enough buffer space in the state tracker for all these
- // invalidations only if duplicates are ignored.
- for (size_t i = 0; i < kPairsToSend; ++i) {
- FireInvalidate(id, i, "payload");
- FireInvalidate(id, i, "payload");
- }
-
- // Expect that the state map ignored duplicates.
- SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
- EXPECT_EQ(kPairsToSend, list.GetSize());
- EXPECT_FALSE(list.begin()->is_unknown_version());
-
- // Expect that all invalidations (including duplicates) were emitted.
- EXPECT_EQ(kPairsToSend*2, GetInvalidationCount(id));
-
- // Acknowledge all invalidations to clear the internal state.
- AcknowledgeAll(id);
- EXPECT_TRUE(GetSavedInvalidationsForType(id).IsEmpty());
-}
-
-TEST_F(SyncInvalidationListenerTest, DuplicateInvalidations_UnknownVersion) {
- const ObjectId& id = kBookmarksId_;
- listener_.Ready(fake_invalidation_client_);
-
- FireInvalidateUnknownVersion(id);
- FireInvalidateUnknownVersion(id);
-
- {
- SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
- EXPECT_EQ(1U, list.GetSize());
- }
-
- // Acknowledge the second. There should be no effect on the stored list.
- ASSERT_EQ(2U, GetInvalidationCount(id));
- AcknowledgeNthInvalidation(id, 1);
- {
- SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
- EXPECT_EQ(1U, list.GetSize());
- }
-
- // Acknowledge the first. This should remove the invalidation from the list.
- ASSERT_EQ(2U, GetInvalidationCount(id));
- AcknowledgeNthInvalidation(id, 0);
- {
- SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
- EXPECT_EQ(0U, list.GetSize());
- }
-}
-
-// Make sure that acknowledgements erase items from the local store.
-TEST_F(SyncInvalidationListenerTest, AcknowledgementsCleanUpStateMapCache) {
- const ObjectId& id = kBookmarksId_;
- listener_.Ready(fake_invalidation_client_);
-
- EXPECT_TRUE(GetSavedInvalidations().empty());
- FireInvalidate(id, 10, "hello");
- FireInvalidate(id, 20, "world");
- FireInvalidateUnknownVersion(id);
-
- // Expect that all three invalidations have been saved to permanent storage.
- {
- SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
- ASSERT_EQ(3U, list.GetSize());
- EXPECT_TRUE(list.begin()->is_unknown_version());
- EXPECT_EQ(20, list.back().version());
- }
-
- // Acknowledge the second sent invaldiation (version 20) and verify it was
- // removed from storage.
- AcknowledgeNthInvalidation(id, 1);
- {
- SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
- ASSERT_EQ(2U, list.GetSize());
- EXPECT_TRUE(list.begin()->is_unknown_version());
- EXPECT_EQ(10, list.back().version());
- }
-
- // Acknowledge the last sent invalidation (unknown version) and verify it was
- // removed from storage.
- AcknowledgeNthInvalidation(id, 2);
- {
- SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
- ASSERT_EQ(1U, list.GetSize());
- EXPECT_FALSE(list.begin()->is_unknown_version());
- EXPECT_EQ(10, list.back().version());
- }
-}
-
-// Make sure that drops erase items from the local store.
-TEST_F(SyncInvalidationListenerTest, DropsCleanUpStateMapCache) {
- const ObjectId& id = kBookmarksId_;
- listener_.Ready(fake_invalidation_client_);
-
- EXPECT_TRUE(GetSavedInvalidations().empty());
- FireInvalidate(id, 10, "hello");
- FireInvalidate(id, 20, "world");
- FireInvalidateUnknownVersion(id);
-
- // Expect that all three invalidations have been saved to permanent storage.
- {
- SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
- ASSERT_EQ(3U, list.GetSize());
- EXPECT_TRUE(list.begin()->is_unknown_version());
- EXPECT_EQ(20, list.back().version());
- }
-
- // Drop the second sent invalidation (version 20) and verify it was removed
- // from storage. Also verify we still have an unknown version invalidation.
- DropNthInvalidation(id, 1);
- {
- SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
- ASSERT_EQ(2U, list.GetSize());
- EXPECT_TRUE(list.begin()->is_unknown_version());
- EXPECT_EQ(10, list.back().version());
- }
-
- // Drop the remaining invalidation. Verify an unknown version is all that
- // remains.
- DropNthInvalidation(id, 0);
- {
- SingleObjectInvalidationSet list = GetSavedInvalidationsForType(id);
- ASSERT_EQ(1U, list.GetSize());
- EXPECT_TRUE(list.begin()->is_unknown_version());
- }
-
- // Announce that the delegate has recovered from the drop. Verify no
- // invalidations remain saved.
- RecoverFromDropEvent(id);
- EXPECT_TRUE(GetSavedInvalidationsForType(id).IsEmpty());
-
- RecoverFromDropEvent(id);
-}
-
-// Without readying the client, disable notifications, then enable
-// them. The listener should still think notifications are disabled.
-TEST_F(SyncInvalidationListenerTest, EnableNotificationsNotReady) {
- EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR,
- GetInvalidatorState());
-
- DisableNotifications(
- notifier::TRANSIENT_NOTIFICATION_ERROR);
-
- EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR, GetInvalidatorState());
-
- DisableNotifications(notifier::NOTIFICATION_CREDENTIALS_REJECTED);
-
- EXPECT_EQ(INVALIDATION_CREDENTIALS_REJECTED, GetInvalidatorState());
-
- EnableNotifications();
-
- EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR, GetInvalidatorState());
-}
-
-// Enable notifications then Ready the invalidation client. The
-// delegate should then be ready.
-TEST_F(SyncInvalidationListenerTest, EnableNotificationsThenReady) {
- EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR, GetInvalidatorState());
-
- EnableNotifications();
-
- EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR, GetInvalidatorState());
-
- listener_.Ready(fake_invalidation_client_);
-
- EXPECT_EQ(INVALIDATIONS_ENABLED, GetInvalidatorState());
-}
-
-// Ready the invalidation client then enable notifications. The
-// delegate should then be ready.
-TEST_F(SyncInvalidationListenerTest, ReadyThenEnableNotifications) {
- EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR, GetInvalidatorState());
-
- listener_.Ready(fake_invalidation_client_);
-
- EXPECT_EQ(TRANSIENT_INVALIDATION_ERROR, GetInvalidatorState());
-
- EnableNotifications();
-
- EXPECT_EQ(INVALIDATIONS_ENABLED, GetInvalidatorState());
-}
-
-// Enable notifications and ready the client. Then disable
-// notifications with an auth error and re-enable notifications. The
-// delegate should go into an auth error mode and then back out.
-TEST_F(SyncInvalidationListenerTest, PushClientAuthError) {
- EnableNotifications();
- listener_.Ready(fake_invalidation_client_);
-
- EXPECT_EQ(INVALIDATIONS_ENABLED, GetInvalidatorState());
-
- DisableNotifications(
- notifier::NOTIFICATION_CREDENTIALS_REJECTED);
-
- EXPECT_EQ(INVALIDATION_CREDENTIALS_REJECTED, GetInvalidatorState());
-
- EnableNotifications();
-
- EXPECT_EQ(INVALIDATIONS_ENABLED, GetInvalidatorState());
-}
-
-// Enable notifications and ready the client. Then simulate an auth
-// error from the invalidation client. Simulate some notification
-// events, then re-ready the client. The delegate should go into an
-// auth error mode and come out of it only after the client is ready.
-TEST_F(SyncInvalidationListenerTest, InvalidationClientAuthError) {
- EnableNotifications();
- listener_.Ready(fake_invalidation_client_);
-
- EXPECT_EQ(INVALIDATIONS_ENABLED, GetInvalidatorState());
-
- listener_.InformError(
- fake_invalidation_client_,
- invalidation::ErrorInfo(
- invalidation::ErrorReason::AUTH_FAILURE,
- false /* is_transient */,
- "auth error",
- invalidation::ErrorContext()));
-
- EXPECT_EQ(INVALIDATION_CREDENTIALS_REJECTED, GetInvalidatorState());
-
- DisableNotifications(notifier::TRANSIENT_NOTIFICATION_ERROR);
-
- EXPECT_EQ(INVALIDATION_CREDENTIALS_REJECTED, GetInvalidatorState());
-
- DisableNotifications(notifier::TRANSIENT_NOTIFICATION_ERROR);
-
- EXPECT_EQ(INVALIDATION_CREDENTIALS_REJECTED, GetInvalidatorState());
-
- EnableNotifications();
-
- EXPECT_EQ(INVALIDATION_CREDENTIALS_REJECTED, GetInvalidatorState());
-
- listener_.Ready(fake_invalidation_client_);
-
- EXPECT_EQ(INVALIDATIONS_ENABLED, GetInvalidatorState());
-}
-
-// A variant of SyncInvalidationListenerTest that starts with some initial
-// state. We make not attempt to abstract away the contents of this state. The
-// tests that make use of this harness depend on its implementation details.
-class SyncInvalidationListenerTest_WithInitialState
- : public SyncInvalidationListenerTest {
- public:
- virtual void SetUp() {
- UnackedInvalidationSet bm_state(kBookmarksId_);
- UnackedInvalidationSet ext_state(kExtensionsId_);
-
- Invalidation bm_unknown = Invalidation::InitUnknownVersion(kBookmarksId_);
- Invalidation bm_v100 = Invalidation::Init(kBookmarksId_, 100, "hundred");
- bm_state.Add(bm_unknown);
- bm_state.Add(bm_v100);
-
- Invalidation ext_v10 = Invalidation::Init(kExtensionsId_, 10, "ten");
- Invalidation ext_v20 = Invalidation::Init(kExtensionsId_, 20, "twenty");
- ext_state.Add(ext_v10);
- ext_state.Add(ext_v20);
-
- initial_state.insert(std::make_pair(kBookmarksId_, bm_state));
- initial_state.insert(std::make_pair(kExtensionsId_, ext_state));
-
- fake_tracker_.SetSavedInvalidations(initial_state);
-
- SyncInvalidationListenerTest::SetUp();
- }
-
- UnackedInvalidationsMap initial_state;
-};
-
-// Verify that saved invalidations are forwarded when handlers register.
-TEST_F(SyncInvalidationListenerTest_WithInitialState,
- ReceiveSavedInvalidations) {
- EnableNotifications();
- listener_.Ready(fake_invalidation_client_);
-
- EXPECT_THAT(initial_state, test_util::Eq(GetSavedInvalidations()));
-
- ASSERT_EQ(2U, GetInvalidationCount(kBookmarksId_));
- EXPECT_EQ(100, GetVersion(kBookmarksId_));
-
- ASSERT_EQ(0U, GetInvalidationCount(kExtensionsId_));
-
- FireInvalidate(kExtensionsId_, 30, "thirty");
-
- ObjectIdSet ids = GetRegisteredIds();
- ids.insert(kExtensionsId_);
- listener_.UpdateRegisteredIds(ids);
-
- ASSERT_EQ(3U, GetInvalidationCount(kExtensionsId_));
- EXPECT_EQ(30, GetVersion(kExtensionsId_));
-}
-
-} // namespace
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/sync_system_resources.cc b/chromium/sync/notifier/sync_system_resources.cc
deleted file mode 100644
index 99e3ee5ea5d..00000000000
--- a/chromium/sync/notifier/sync_system_resources.cc
+++ /dev/null
@@ -1,383 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/sync_system_resources.h"
-
-#include <cstdlib>
-#include <cstring>
-#include <string>
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
-#include "base/stl_util.h"
-#include "base/strings/string_util.h"
-#include "base/strings/stringprintf.h"
-#include "google/cacheinvalidation/client_gateway.pb.h"
-#include "google/cacheinvalidation/deps/callback.h"
-#include "google/cacheinvalidation/include/types.h"
-#include "sync/notifier/invalidation_util.h"
-
-namespace syncer {
-
-SyncLogger::SyncLogger() {}
-SyncLogger::~SyncLogger() {}
-
-void SyncLogger::Log(LogLevel level, const char* file, int line,
- const char* format, ...) {
- logging::LogSeverity log_severity = -2; // VLOG(2)
- bool emit_log = false;
- switch (level) {
- case FINE_LEVEL:
- log_severity = -2; // VLOG(2)
- emit_log = VLOG_IS_ON(2);
- break;
- case INFO_LEVEL:
- log_severity = -1; // VLOG(1)
- emit_log = VLOG_IS_ON(1);
- break;
- case WARNING_LEVEL:
- log_severity = logging::LOG_WARNING;
- emit_log = LOG_IS_ON(WARNING);
- break;
- case SEVERE_LEVEL:
- log_severity = logging::LOG_ERROR;
- emit_log = LOG_IS_ON(ERROR);
- break;
- }
- if (emit_log) {
- va_list ap;
- va_start(ap, format);
- std::string result;
- base::StringAppendV(&result, format, ap);
- logging::LogMessage(file, line, log_severity).stream() << result;
- va_end(ap);
- }
-}
-
-void SyncLogger::SetSystemResources(invalidation::SystemResources* resources) {
- // Do nothing.
-}
-
-SyncInvalidationScheduler::SyncInvalidationScheduler()
- : created_on_loop_(base::MessageLoop::current()),
- is_started_(false),
- is_stopped_(false),
- weak_factory_(this) {
- CHECK(created_on_loop_);
-}
-
-SyncInvalidationScheduler::~SyncInvalidationScheduler() {
- CHECK_EQ(created_on_loop_, base::MessageLoop::current());
- CHECK(is_stopped_);
-}
-
-void SyncInvalidationScheduler::Start() {
- CHECK_EQ(created_on_loop_, base::MessageLoop::current());
- CHECK(!is_started_);
- is_started_ = true;
- is_stopped_ = false;
- weak_factory_.InvalidateWeakPtrs();
-}
-
-void SyncInvalidationScheduler::Stop() {
- CHECK_EQ(created_on_loop_, base::MessageLoop::current());
- is_stopped_ = true;
- is_started_ = false;
- weak_factory_.InvalidateWeakPtrs();
- STLDeleteElements(&posted_tasks_);
- posted_tasks_.clear();
-}
-
-void SyncInvalidationScheduler::Schedule(invalidation::TimeDelta delay,
- invalidation::Closure* task) {
- DCHECK(invalidation::IsCallbackRepeatable(task));
- CHECK_EQ(created_on_loop_, base::MessageLoop::current());
-
- if (!is_started_) {
- delete task;
- return;
- }
-
- posted_tasks_.insert(task);
- base::MessageLoop::current()->PostDelayedTask(
- FROM_HERE, base::Bind(&SyncInvalidationScheduler::RunPostedTask,
- weak_factory_.GetWeakPtr(), task),
- delay);
-}
-
-bool SyncInvalidationScheduler::IsRunningOnThread() const {
- return created_on_loop_ == base::MessageLoop::current();
-}
-
-invalidation::Time SyncInvalidationScheduler::GetCurrentTime() const {
- CHECK_EQ(created_on_loop_, base::MessageLoop::current());
- return base::Time::Now();
-}
-
-void SyncInvalidationScheduler::SetSystemResources(
- invalidation::SystemResources* resources) {
- // Do nothing.
-}
-
-void SyncInvalidationScheduler::RunPostedTask(invalidation::Closure* task) {
- CHECK_EQ(created_on_loop_, base::MessageLoop::current());
- task->Run();
- posted_tasks_.erase(task);
- delete task;
-}
-
-SyncNetworkChannel::SyncNetworkChannel()
- : invalidator_state_(DEFAULT_INVALIDATION_ERROR),
- scheduling_hash_(0) {
-}
-
-SyncNetworkChannel::~SyncNetworkChannel() {
- STLDeleteElements(&network_status_receivers_);
-}
-
-void SyncNetworkChannel::SendMessage(const std::string& outgoing_message) {
- std::string encoded_message;
- EncodeMessage(&encoded_message, outgoing_message, service_context_,
- scheduling_hash_);
- SendEncodedMessage(encoded_message);
-}
-
-void SyncNetworkChannel::SetMessageReceiver(
- invalidation::MessageCallback* incoming_receiver) {
- incoming_receiver_.reset(incoming_receiver);
-}
-
-void SyncNetworkChannel::AddNetworkStatusReceiver(
- invalidation::NetworkStatusCallback* network_status_receiver) {
- network_status_receiver->Run(invalidator_state_ == INVALIDATIONS_ENABLED);
- network_status_receivers_.push_back(network_status_receiver);
-}
-
-void SyncNetworkChannel::SetSystemResources(
- invalidation::SystemResources* resources) {
- // Do nothing.
-}
-
-void SyncNetworkChannel::AddObserver(Observer* observer) {
- observers_.AddObserver(observer);
-}
-
-void SyncNetworkChannel::RemoveObserver(Observer* observer) {
- observers_.RemoveObserver(observer);
-}
-
-const std::string& SyncNetworkChannel::GetServiceContextForTest() const {
- return service_context_;
-}
-
-int64 SyncNetworkChannel::GetSchedulingHashForTest() const {
- return scheduling_hash_;
-}
-
-std::string SyncNetworkChannel::EncodeMessageForTest(
- const std::string& message, const std::string& service_context,
- int64 scheduling_hash) {
- std::string encoded_message;
- EncodeMessage(&encoded_message, message, service_context, scheduling_hash);
- return encoded_message;
-}
-
-bool SyncNetworkChannel::DecodeMessageForTest(
- const std::string& data,
- std::string* message,
- std::string* service_context,
- int64* scheduling_hash) {
- return DecodeMessage(data, message, service_context, scheduling_hash);
-}
-
-void SyncNetworkChannel::NotifyStateChange(InvalidatorState invalidator_state) {
- // Remember state for future NetworkStatusReceivers.
- invalidator_state_ = invalidator_state;
- // Notify NetworkStatusReceivers in cacheinvalidation.
- for (NetworkStatusReceiverList::const_iterator it =
- network_status_receivers_.begin();
- it != network_status_receivers_.end(); ++it) {
- (*it)->Run(invalidator_state_ == INVALIDATIONS_ENABLED);
- }
- // Notify observers.
- FOR_EACH_OBSERVER(Observer, observers_,
- OnNetworkChannelStateChanged(invalidator_state_));
-}
-
-void SyncNetworkChannel::DeliverIncomingMessage(const std::string& data) {
- if (!incoming_receiver_) {
- DLOG(ERROR) << "No receiver for incoming notification";
- return;
- }
- std::string message;
- if (!DecodeMessage(data,
- &message, &service_context_, &scheduling_hash_)) {
- DLOG(ERROR) << "Could not parse ClientGatewayMessage";
- return;
- }
- incoming_receiver_->Run(message);
-}
-
-void SyncNetworkChannel::EncodeMessage(
- std::string* encoded_message,
- const std::string& message,
- const std::string& service_context,
- int64 scheduling_hash) {
- ipc::invalidation::ClientGatewayMessage envelope;
- envelope.set_is_client_to_server(true);
- if (!service_context.empty()) {
- envelope.set_service_context(service_context);
- envelope.set_rpc_scheduling_hash(scheduling_hash);
- }
- envelope.set_network_message(message);
- envelope.SerializeToString(encoded_message);
-}
-
-
-bool SyncNetworkChannel::DecodeMessage(
- const std::string& data,
- std::string* message,
- std::string* service_context,
- int64* scheduling_hash) {
- ipc::invalidation::ClientGatewayMessage envelope;
- if (!envelope.ParseFromString(data)) {
- return false;
- }
- *message = envelope.network_message();
- if (envelope.has_service_context()) {
- *service_context = envelope.service_context();
- }
- if (envelope.has_rpc_scheduling_hash()) {
- *scheduling_hash = envelope.rpc_scheduling_hash();
- }
- return true;
-}
-
-
-SyncStorage::SyncStorage(StateWriter* state_writer,
- invalidation::Scheduler* scheduler)
- : state_writer_(state_writer),
- scheduler_(scheduler) {
- DCHECK(state_writer_);
- DCHECK(scheduler_);
-}
-
-SyncStorage::~SyncStorage() {}
-
-void SyncStorage::WriteKey(const std::string& key, const std::string& value,
- invalidation::WriteKeyCallback* done) {
- CHECK(state_writer_);
- // TODO(ghc): actually write key,value associations, and don't invoke the
- // callback until the operation completes.
- state_writer_->WriteState(value);
- cached_state_ = value;
- // According to the cache invalidation API folks, we can do this as
- // long as we make sure to clear the persistent state that we start
- // up the cache invalidation client with. However, we musn't do it
- // right away, as we may be called under a lock that the callback
- // uses.
- scheduler_->Schedule(
- invalidation::Scheduler::NoDelay(),
- invalidation::NewPermanentCallback(
- this, &SyncStorage::RunAndDeleteWriteKeyCallback,
- done));
-}
-
-void SyncStorage::ReadKey(const std::string& key,
- invalidation::ReadKeyCallback* done) {
- DCHECK(scheduler_->IsRunningOnThread()) << "not running on scheduler thread";
- RunAndDeleteReadKeyCallback(done, cached_state_);
-}
-
-void SyncStorage::DeleteKey(const std::string& key,
- invalidation::DeleteKeyCallback* done) {
- // TODO(ghc): Implement.
- LOG(WARNING) << "ignoring call to DeleteKey(" << key << ", callback)";
-}
-
-void SyncStorage::ReadAllKeys(invalidation::ReadAllKeysCallback* done) {
- // TODO(ghc): Implement.
- LOG(WARNING) << "ignoring call to ReadAllKeys(callback)";
-}
-
-void SyncStorage::SetSystemResources(
- invalidation::SystemResources* resources) {
- // Do nothing.
-}
-
-void SyncStorage::RunAndDeleteWriteKeyCallback(
- invalidation::WriteKeyCallback* callback) {
- callback->Run(
- invalidation::Status(invalidation::Status::SUCCESS, std::string()));
- delete callback;
-}
-
-void SyncStorage::RunAndDeleteReadKeyCallback(
- invalidation::ReadKeyCallback* callback, const std::string& value) {
- callback->Run(std::make_pair(
- invalidation::Status(invalidation::Status::SUCCESS, std::string()),
- value));
- delete callback;
-}
-
-SyncSystemResources::SyncSystemResources(
- SyncNetworkChannel* sync_network_channel,
- StateWriter* state_writer)
- : is_started_(false),
- logger_(new SyncLogger()),
- internal_scheduler_(new SyncInvalidationScheduler()),
- listener_scheduler_(new SyncInvalidationScheduler()),
- storage_(new SyncStorage(state_writer, internal_scheduler_.get())),
- sync_network_channel_(sync_network_channel) {
-}
-
-SyncSystemResources::~SyncSystemResources() {
- Stop();
-}
-
-void SyncSystemResources::Start() {
- internal_scheduler_->Start();
- listener_scheduler_->Start();
- is_started_ = true;
-}
-
-void SyncSystemResources::Stop() {
- internal_scheduler_->Stop();
- listener_scheduler_->Stop();
-}
-
-bool SyncSystemResources::IsStarted() const {
- return is_started_;
-}
-
-void SyncSystemResources::set_platform(const std::string& platform) {
- platform_ = platform;
-}
-
-std::string SyncSystemResources::platform() const {
- return platform_;
-}
-
-SyncLogger* SyncSystemResources::logger() {
- return logger_.get();
-}
-
-SyncStorage* SyncSystemResources::storage() {
- return storage_.get();
-}
-
-SyncNetworkChannel* SyncSystemResources::network() {
- return sync_network_channel_;
-}
-
-SyncInvalidationScheduler* SyncSystemResources::internal_scheduler() {
- return internal_scheduler_.get();
-}
-
-SyncInvalidationScheduler* SyncSystemResources::listener_scheduler() {
- return listener_scheduler_.get();
-}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/sync_system_resources.h b/chromium/sync/notifier/sync_system_resources.h
deleted file mode 100644
index 3ddc7087c94..00000000000
--- a/chromium/sync/notifier/sync_system_resources.h
+++ /dev/null
@@ -1,243 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Simple system resources class that uses the current message loop
-// for scheduling. Assumes the current message loop is already
-// running.
-
-#ifndef SYNC_NOTIFIER_SYNC_SYSTEM_RESOURCES_H_
-#define SYNC_NOTIFIER_SYNC_SYSTEM_RESOURCES_H_
-
-#include <set>
-#include <string>
-#include <vector>
-
-#include "base/compiler_specific.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "base/message_loop/message_loop.h"
-#include "base/threading/non_thread_safe.h"
-#include "google/cacheinvalidation/include/system-resources.h"
-#include "sync/base/sync_export.h"
-#include "sync/notifier/invalidator_state.h"
-#include "sync/notifier/state_writer.h"
-
-namespace syncer {
-
-class SyncLogger : public invalidation::Logger {
- public:
- SyncLogger();
-
- virtual ~SyncLogger();
-
- // invalidation::Logger implementation.
- virtual void Log(LogLevel level, const char* file, int line,
- const char* format, ...) OVERRIDE;
-
- virtual void SetSystemResources(
- invalidation::SystemResources* resources) OVERRIDE;
-};
-
-class SyncInvalidationScheduler : public invalidation::Scheduler {
- public:
- SyncInvalidationScheduler();
-
- virtual ~SyncInvalidationScheduler();
-
- // Start and stop the scheduler.
- void Start();
- void Stop();
-
- // invalidation::Scheduler implementation.
- virtual void Schedule(invalidation::TimeDelta delay,
- invalidation::Closure* task) OVERRIDE;
-
- virtual bool IsRunningOnThread() const OVERRIDE;
-
- virtual invalidation::Time GetCurrentTime() const OVERRIDE;
-
- virtual void SetSystemResources(
- invalidation::SystemResources* resources) OVERRIDE;
-
- private:
- // Runs the task, deletes it, and removes it from |posted_tasks_|.
- void RunPostedTask(invalidation::Closure* task);
-
- // Holds all posted tasks that have not yet been run.
- std::set<invalidation::Closure*> posted_tasks_;
-
- const base::MessageLoop* created_on_loop_;
- bool is_started_;
- bool is_stopped_;
-
- base::WeakPtrFactory<SyncInvalidationScheduler> weak_factory_;
-};
-
-// SyncNetworkChannel implements common tasks needed to interact with
-// invalidation library:
-// - registering message and network status callbacks
-// - Encoding/Decoding message to ClientGatewayMessage
-// - notifying observers about network channel state change
-// Implementation of particular network protocol should implement
-// SendEncodedMessage and call NotifyStateChange and DeliverIncomingMessage.
-class SYNC_EXPORT_PRIVATE SyncNetworkChannel
- : public NON_EXPORTED_BASE(invalidation::NetworkChannel) {
- public:
- class Observer {
- public:
- // Called when network channel state changes. Possible states are:
- // - INVALIDATIONS_ENABLED : connection is established and working
- // - TRANSIENT_INVALIDATION_ERROR : no network, connection lost, etc.
- // - INVALIDATION_CREDENTIALS_REJECTED : Issues with auth token
- virtual void OnNetworkChannelStateChanged(
- InvalidatorState invalidator_state) = 0;
- };
-
- SyncNetworkChannel();
-
- virtual ~SyncNetworkChannel();
-
- // invalidation::NetworkChannel implementation.
- virtual void SendMessage(const std::string& outgoing_message) OVERRIDE;
- virtual void SetMessageReceiver(
- invalidation::MessageCallback* incoming_receiver) OVERRIDE;
- virtual void AddNetworkStatusReceiver(
- invalidation::NetworkStatusCallback* network_status_receiver) OVERRIDE;
- virtual void SetSystemResources(
- invalidation::SystemResources* resources) OVERRIDE;
-
- // Subclass should implement SendEncodedMessage to send encoded message to
- // Tango over network.
- virtual void SendEncodedMessage(const std::string& encoded_message) = 0;
-
- // Classes interested in network channel state changes should implement
- // SyncNetworkChannel::Observer and register here.
- void AddObserver(Observer* observer);
- void RemoveObserver(Observer* observer);
-
- const std::string& GetServiceContextForTest() const;
-
- int64 GetSchedulingHashForTest() const;
-
- static std::string EncodeMessageForTest(
- const std::string& message,
- const std::string& service_context,
- int64 scheduling_hash);
-
- static bool DecodeMessageForTest(
- const std::string& notification,
- std::string* message,
- std::string* service_context,
- int64* scheduling_hash);
-
- protected:
- // Subclass should notify about connection state through NotifyStateChange.
- void NotifyStateChange(InvalidatorState invalidator_state);
- // Subclass should call DeliverIncomingMessage for message to reach
- // invalidations library.
- void DeliverIncomingMessage(const std::string& message);
-
- private:
- typedef std::vector<invalidation::NetworkStatusCallback*>
- NetworkStatusReceiverList;
-
- static void EncodeMessage(
- std::string* encoded_message,
- const std::string& message,
- const std::string& service_context,
- int64 scheduling_hash);
-
- static bool DecodeMessage(
- const std::string& data,
- std::string* message,
- std::string* service_context,
- int64* scheduling_hash);
-
- // Callbacks into invalidation library
- scoped_ptr<invalidation::MessageCallback> incoming_receiver_;
- NetworkStatusReceiverList network_status_receivers_;
-
- // Last channel state for new network status receivers.
- InvalidatorState invalidator_state_;
-
- ObserverList<Observer> observers_;
-
- std::string service_context_;
- int64 scheduling_hash_;
-};
-
-class SyncStorage : public invalidation::Storage {
- public:
- SyncStorage(StateWriter* state_writer, invalidation::Scheduler* scheduler);
-
- virtual ~SyncStorage();
-
- void SetInitialState(const std::string& value) {
- cached_state_ = value;
- }
-
- // invalidation::Storage implementation.
- virtual void WriteKey(const std::string& key, const std::string& value,
- invalidation::WriteKeyCallback* done) OVERRIDE;
-
- virtual void ReadKey(const std::string& key,
- invalidation::ReadKeyCallback* done) OVERRIDE;
-
- virtual void DeleteKey(const std::string& key,
- invalidation::DeleteKeyCallback* done) OVERRIDE;
-
- virtual void ReadAllKeys(
- invalidation::ReadAllKeysCallback* key_callback) OVERRIDE;
-
- virtual void SetSystemResources(
- invalidation::SystemResources* resources) OVERRIDE;
-
- private:
- // Runs the given storage callback with SUCCESS status and deletes it.
- void RunAndDeleteWriteKeyCallback(
- invalidation::WriteKeyCallback* callback);
-
- // Runs the given callback with the given value and deletes it.
- void RunAndDeleteReadKeyCallback(
- invalidation::ReadKeyCallback* callback, const std::string& value);
-
- StateWriter* state_writer_;
- invalidation::Scheduler* scheduler_;
- std::string cached_state_;
-};
-
-class SYNC_EXPORT_PRIVATE SyncSystemResources
- : public NON_EXPORTED_BASE(invalidation::SystemResources) {
- public:
- SyncSystemResources(SyncNetworkChannel* sync_network_channel,
- StateWriter* state_writer);
-
- virtual ~SyncSystemResources();
-
- // invalidation::SystemResources implementation.
- virtual void Start() OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual bool IsStarted() const OVERRIDE;
- virtual void set_platform(const std::string& platform);
- virtual std::string platform() const OVERRIDE;
- virtual SyncLogger* logger() OVERRIDE;
- virtual SyncStorage* storage() OVERRIDE;
- virtual SyncNetworkChannel* network() OVERRIDE;
- virtual SyncInvalidationScheduler* internal_scheduler() OVERRIDE;
- virtual SyncInvalidationScheduler* listener_scheduler() OVERRIDE;
-
- private:
- bool is_started_;
- std::string platform_;
- scoped_ptr<SyncLogger> logger_;
- scoped_ptr<SyncInvalidationScheduler> internal_scheduler_;
- scoped_ptr<SyncInvalidationScheduler> listener_scheduler_;
- scoped_ptr<SyncStorage> storage_;
- // sync_network_channel_ is owned by SyncInvalidationListener.
- SyncNetworkChannel* sync_network_channel_;
-};
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_SYNC_SYSTEM_RESOURCES_H_
diff --git a/chromium/sync/notifier/sync_system_resources_unittest.cc b/chromium/sync/notifier/sync_system_resources_unittest.cc
deleted file mode 100644
index f63e90621a0..00000000000
--- a/chromium/sync/notifier/sync_system_resources_unittest.cc
+++ /dev/null
@@ -1,403 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/sync_system_resources.h"
-
-#include <string>
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/callback.h"
-#include "base/message_loop/message_loop.h"
-
-#include "google/cacheinvalidation/include/types.h"
-#include "jingle/notifier/listener/fake_push_client.h"
-#include "sync/notifier/push_client_channel.h"
-#include "sync/notifier/state_writer.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace {
-
-using ::testing::_;
-using ::testing::SaveArg;
-
-class MockStateWriter : public StateWriter {
- public:
- MOCK_METHOD1(WriteState, void(const std::string&));
-};
-
-class MockClosure {
- public:
- MOCK_CONST_METHOD0(Run, void(void));
- base::Closure* CreateClosure() {
- return new base::Closure(
- base::Bind(&MockClosure::Run, base::Unretained(this)));
- }
-};
-
-class MockStorageCallback {
- public:
- MOCK_CONST_METHOD1(Run, void(invalidation::Status));
- base::Callback<void(invalidation::Status)>* CreateCallback() {
- return new base::Callback<void(invalidation::Status)>(
- base::Bind(&MockStorageCallback::Run, base::Unretained(this)));
- }
-};
-
-class SyncSystemResourcesTest : public testing::Test {
- protected:
- SyncSystemResourcesTest()
- : push_client_channel_(
- scoped_ptr<notifier::PushClient>(new notifier::FakePushClient())),
- sync_system_resources_(&push_client_channel_, &mock_state_writer_) {}
-
- virtual ~SyncSystemResourcesTest() {}
-
- void ScheduleShouldNotRun() {
- {
- // Owned by ScheduleImmediately.
- MockClosure mock_closure;
- base::Closure* should_not_run = mock_closure.CreateClosure();
- EXPECT_CALL(mock_closure, Run()).Times(0);
- sync_system_resources_.internal_scheduler()->Schedule(
- invalidation::Scheduler::NoDelay(), should_not_run);
- }
- {
- // Owned by ScheduleOnListenerThread.
- MockClosure mock_closure;
- base::Closure* should_not_run = mock_closure.CreateClosure();
- EXPECT_CALL(mock_closure, Run()).Times(0);
- sync_system_resources_.listener_scheduler()->Schedule(
- invalidation::Scheduler::NoDelay(), should_not_run);
- }
- {
- // Owned by ScheduleWithDelay.
- MockClosure mock_closure;
- base::Closure* should_not_run = mock_closure.CreateClosure();
- EXPECT_CALL(mock_closure, Run()).Times(0);
- sync_system_resources_.internal_scheduler()->Schedule(
- invalidation::TimeDelta::FromSeconds(0), should_not_run);
- }
- }
-
- // Needed by |sync_system_resources_|.
- base::MessageLoop message_loop_;
- MockStateWriter mock_state_writer_;
- PushClientChannel push_client_channel_;
- SyncSystemResources sync_system_resources_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SyncSystemResourcesTest);
-};
-
-// Make sure current_time() doesn't crash or leak.
-TEST_F(SyncSystemResourcesTest, CurrentTime) {
- invalidation::Time current_time =
- sync_system_resources_.internal_scheduler()->GetCurrentTime();
- DVLOG(1) << "current_time returned: " << current_time.ToInternalValue();
-}
-
-// Make sure Log() doesn't crash or leak.
-TEST_F(SyncSystemResourcesTest, Log) {
- sync_system_resources_.logger()->Log(SyncLogger::INFO_LEVEL,
- __FILE__, __LINE__, "%s %d",
- "test string", 5);
-}
-
-TEST_F(SyncSystemResourcesTest, ScheduleBeforeStart) {
- ScheduleShouldNotRun();
- sync_system_resources_.Start();
-}
-
-TEST_F(SyncSystemResourcesTest, ScheduleAfterStop) {
- sync_system_resources_.Start();
- sync_system_resources_.Stop();
- ScheduleShouldNotRun();
-}
-
-TEST_F(SyncSystemResourcesTest, ScheduleAndStop) {
- sync_system_resources_.Start();
- ScheduleShouldNotRun();
- sync_system_resources_.Stop();
-}
-
-TEST_F(SyncSystemResourcesTest, ScheduleAndDestroy) {
- sync_system_resources_.Start();
- ScheduleShouldNotRun();
-}
-
-TEST_F(SyncSystemResourcesTest, ScheduleImmediately) {
- sync_system_resources_.Start();
- MockClosure mock_closure;
- EXPECT_CALL(mock_closure, Run());
- sync_system_resources_.internal_scheduler()->Schedule(
- invalidation::Scheduler::NoDelay(), mock_closure.CreateClosure());
- message_loop_.RunUntilIdle();
-}
-
-TEST_F(SyncSystemResourcesTest, ScheduleOnListenerThread) {
- sync_system_resources_.Start();
- MockClosure mock_closure;
- EXPECT_CALL(mock_closure, Run());
- sync_system_resources_.listener_scheduler()->Schedule(
- invalidation::Scheduler::NoDelay(), mock_closure.CreateClosure());
- EXPECT_TRUE(
- sync_system_resources_.internal_scheduler()->IsRunningOnThread());
- message_loop_.RunUntilIdle();
-}
-
-TEST_F(SyncSystemResourcesTest, ScheduleWithZeroDelay) {
- sync_system_resources_.Start();
- MockClosure mock_closure;
- EXPECT_CALL(mock_closure, Run());
- sync_system_resources_.internal_scheduler()->Schedule(
- invalidation::TimeDelta::FromSeconds(0), mock_closure.CreateClosure());
- message_loop_.RunUntilIdle();
-}
-
-// TODO(akalin): Figure out how to test with a non-zero delay.
-
-TEST_F(SyncSystemResourcesTest, WriteState) {
- sync_system_resources_.Start();
- EXPECT_CALL(mock_state_writer_, WriteState(_));
- // Owned by WriteState.
- MockStorageCallback mock_storage_callback;
- invalidation::Status results(invalidation::Status::PERMANENT_FAILURE,
- "fake-failure");
- EXPECT_CALL(mock_storage_callback, Run(_))
- .WillOnce(SaveArg<0>(&results));
- sync_system_resources_.storage()->WriteKey(
- std::string(), "state", mock_storage_callback.CreateCallback());
- message_loop_.RunUntilIdle();
- EXPECT_EQ(invalidation::Status(invalidation::Status::SUCCESS, std::string()),
- results);
-}
-
-class TestSyncNetworkChannel : public SyncNetworkChannel {
- public:
- TestSyncNetworkChannel() {}
- virtual ~TestSyncNetworkChannel() {}
-
- using SyncNetworkChannel::NotifyStateChange;
- using SyncNetworkChannel::DeliverIncomingMessage;
-
- virtual void SendEncodedMessage(const std::string& encoded_message) OVERRIDE {
- last_encoded_message_ = encoded_message;
- }
-
- std::string last_encoded_message_;
-};
-
-class SyncNetworkChannelTest
- : public testing::Test,
- public SyncNetworkChannel::Observer {
- protected:
- SyncNetworkChannelTest()
- : last_invalidator_state_(DEFAULT_INVALIDATION_ERROR),
- connected_(false) {
- network_channel_.AddObserver(this);
- network_channel_.SetMessageReceiver(
- invalidation::NewPermanentCallback(
- this, &SyncNetworkChannelTest::OnIncomingMessage));
- network_channel_.AddNetworkStatusReceiver(
- invalidation::NewPermanentCallback(
- this, &SyncNetworkChannelTest::OnNetworkStatusChange));
- }
-
- virtual ~SyncNetworkChannelTest() {
- network_channel_.RemoveObserver(this);
- }
-
- virtual void OnNetworkChannelStateChanged(
- InvalidatorState invalidator_state) OVERRIDE {
- last_invalidator_state_ = invalidator_state;
- }
-
- void OnIncomingMessage(std::string incoming_message) {
- last_message_ = incoming_message;
- }
-
- void OnNetworkStatusChange(bool connected) {
- connected_ = connected;
- }
-
- TestSyncNetworkChannel network_channel_;
- InvalidatorState last_invalidator_state_;
- std::string last_message_;
- bool connected_;
-};
-
-const char kMessage[] = "message";
-const char kServiceContext[] = "service context";
-const int64 kSchedulingHash = 100;
-
-// Encode a message with some context and then decode it. The decoded info
-// should match the original info.
-TEST_F(SyncNetworkChannelTest, EncodeDecode) {
- const std::string& data =
- SyncNetworkChannel::EncodeMessageForTest(
- kMessage, kServiceContext, kSchedulingHash);
- std::string message;
- std::string service_context;
- int64 scheduling_hash = 0LL;
- EXPECT_TRUE(SyncNetworkChannel::DecodeMessageForTest(
- data, &message, &service_context, &scheduling_hash));
- EXPECT_EQ(kMessage, message);
- EXPECT_EQ(kServiceContext, service_context);
- EXPECT_EQ(kSchedulingHash, scheduling_hash);
-}
-
-// Encode a message with no context and then decode it. The decoded message
-// should match the original message, but the context and hash should be
-// untouched.
-TEST_F(SyncNetworkChannelTest, EncodeDecodeNoContext) {
- const std::string& data =
- SyncNetworkChannel::EncodeMessageForTest(
- kMessage, std::string(), kSchedulingHash);
- std::string message;
- std::string service_context = kServiceContext;
- int64 scheduling_hash = kSchedulingHash + 1;
- EXPECT_TRUE(SyncNetworkChannel::DecodeMessageForTest(
- data, &message, &service_context, &scheduling_hash));
- EXPECT_EQ(kMessage, message);
- EXPECT_EQ(kServiceContext, service_context);
- EXPECT_EQ(kSchedulingHash + 1, scheduling_hash);
-}
-
-// Decode an empty notification. It should result in an empty message
-// but should leave the context and hash untouched.
-TEST_F(SyncNetworkChannelTest, DecodeEmpty) {
- std::string message = kMessage;
- std::string service_context = kServiceContext;
- int64 scheduling_hash = kSchedulingHash;
- EXPECT_TRUE(SyncNetworkChannel::DecodeMessageForTest(
- std::string(), &message, &service_context, &scheduling_hash));
- EXPECT_TRUE(message.empty());
- EXPECT_EQ(kServiceContext, service_context);
- EXPECT_EQ(kSchedulingHash, scheduling_hash);
-}
-
-// Try to decode a garbage notification. It should leave all its
-// arguments untouched and return false.
-TEST_F(SyncNetworkChannelTest, DecodeGarbage) {
- std::string data = "garbage";
- std::string message = kMessage;
- std::string service_context = kServiceContext;
- int64 scheduling_hash = kSchedulingHash;
- EXPECT_FALSE(SyncNetworkChannel::DecodeMessageForTest(
- data, &message, &service_context, &scheduling_hash));
- EXPECT_EQ(kMessage, message);
- EXPECT_EQ(kServiceContext, service_context);
- EXPECT_EQ(kSchedulingHash, scheduling_hash);
-}
-
-// Simulate network channel state change. It should propagate to observer.
-TEST_F(SyncNetworkChannelTest, OnNetworkChannelStateChanged) {
- EXPECT_EQ(DEFAULT_INVALIDATION_ERROR, last_invalidator_state_);
- EXPECT_FALSE(connected_);
- network_channel_.NotifyStateChange(INVALIDATIONS_ENABLED);
- EXPECT_EQ(INVALIDATIONS_ENABLED, last_invalidator_state_);
- EXPECT_TRUE(connected_);
- network_channel_.NotifyStateChange(INVALIDATION_CREDENTIALS_REJECTED);
- EXPECT_EQ(INVALIDATION_CREDENTIALS_REJECTED, last_invalidator_state_);
- EXPECT_FALSE(connected_);
-}
-
-// Call SendMessage on the channel. SendEncodedMessage should be called for it.
-TEST_F(SyncNetworkChannelTest, SendMessage) {
- network_channel_.SendMessage(kMessage);
- std::string expected_encoded_message =
- SyncNetworkChannel::EncodeMessageForTest(
- kMessage,
- network_channel_.GetServiceContextForTest(),
- network_channel_.GetSchedulingHashForTest());
- ASSERT_EQ(expected_encoded_message, network_channel_.last_encoded_message_);
-}
-
-// Simulate an incoming notification. It should be decoded properly
-// by the channel.
-TEST_F(SyncNetworkChannelTest, OnIncomingMessage) {
- const std::string message =
- SyncNetworkChannel::EncodeMessageForTest(
- kMessage, kServiceContext, kSchedulingHash);
-
- network_channel_.DeliverIncomingMessage(message);
- EXPECT_EQ(kServiceContext,
- network_channel_.GetServiceContextForTest());
- EXPECT_EQ(kSchedulingHash,
- network_channel_.GetSchedulingHashForTest());
- EXPECT_EQ(kMessage, last_message_);
-}
-
-// Simulate an incoming notification with no receiver. It should be dropped by
-// the channel.
-TEST_F(SyncNetworkChannelTest, OnIncomingMessageNoReceiver) {
- const std::string message =
- SyncNetworkChannel::EncodeMessageForTest(
- kMessage, kServiceContext, kSchedulingHash);
-
- network_channel_.SetMessageReceiver(NULL);
- network_channel_.DeliverIncomingMessage(message);
- EXPECT_TRUE(network_channel_.GetServiceContextForTest().empty());
- EXPECT_EQ(static_cast<int64>(0),
- network_channel_.GetSchedulingHashForTest());
- EXPECT_TRUE(last_message_.empty());
-}
-
-// Simulate an incoming garbage notification. It should be dropped by
-// the channel.
-TEST_F(SyncNetworkChannelTest, OnIncomingMessageGarbage) {
- std::string message = "garbage";
-
- network_channel_.DeliverIncomingMessage(message);
- EXPECT_TRUE(network_channel_.GetServiceContextForTest().empty());
- EXPECT_EQ(static_cast<int64>(0),
- network_channel_.GetSchedulingHashForTest());
- EXPECT_TRUE(last_message_.empty());
-}
-
-// Send a message, simulate an incoming message with context, and then
-// send the same message again. The first sent message should not
-// have any context, but the second sent message should have the
-// context from the incoming emssage.
-TEST_F(SyncNetworkChannelTest, PersistedMessageState) {
- network_channel_.SendMessage(kMessage);
- ASSERT_FALSE(network_channel_.last_encoded_message_.empty());
- {
- std::string message;
- std::string service_context;
- int64 scheduling_hash = 0LL;
- EXPECT_TRUE(SyncNetworkChannel::DecodeMessageForTest(
- network_channel_.last_encoded_message_,
- &message, &service_context, &scheduling_hash));
- EXPECT_EQ(kMessage, message);
- EXPECT_TRUE(service_context.empty());
- EXPECT_EQ(0LL, scheduling_hash);
- }
-
- const std::string& encoded_message =
- SyncNetworkChannel::EncodeMessageForTest(
- kMessage, kServiceContext, kSchedulingHash);
- network_channel_.DeliverIncomingMessage(encoded_message);
-
- network_channel_.last_encoded_message_.clear();
- network_channel_.SendMessage(kMessage);
- ASSERT_FALSE(network_channel_.last_encoded_message_.empty());
- {
- std::string message;
- std::string service_context;
- int64 scheduling_hash = 0LL;
- EXPECT_TRUE(SyncNetworkChannel::DecodeMessageForTest(
- network_channel_.last_encoded_message_,
- &message, &service_context, &scheduling_hash));
- EXPECT_EQ(kMessage, message);
- EXPECT_EQ(kServiceContext, service_context);
- EXPECT_EQ(kSchedulingHash, scheduling_hash);
- }
-}
-
-} // namespace
-} // namespace syncer
diff --git a/chromium/sync/notifier/unacked_invalidation_set.cc b/chromium/sync/notifier/unacked_invalidation_set.cc
deleted file mode 100644
index 705dbd2ded8..00000000000
--- a/chromium/sync/notifier/unacked_invalidation_set.cc
+++ /dev/null
@@ -1,204 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/unacked_invalidation_set.h"
-
-#include "base/strings/string_number_conversions.h"
-#include "sync/internal_api/public/base/ack_handle.h"
-#include "sync/notifier/object_id_invalidation_map.h"
-#include "sync/notifier/sync_invalidation_listener.h"
-
-namespace {
-
-const char kSourceKey[] = "source";
-const char kNameKey[] = "name";
-const char kInvalidationListKey[] = "invalidation-list";
-
-} // namespace
-
-namespace syncer {
-
-const size_t UnackedInvalidationSet::kMaxBufferedInvalidations = 5;
-
-// static
-UnackedInvalidationSet::UnackedInvalidationSet(
- invalidation::ObjectId id)
- : registered_(false),
- object_id_(id) {}
-
-UnackedInvalidationSet::~UnackedInvalidationSet() {}
-
-const invalidation::ObjectId& UnackedInvalidationSet::object_id() const {
- return object_id_;
-}
-
-void UnackedInvalidationSet::Add(
- const Invalidation& invalidation) {
- SingleObjectInvalidationSet set;
- set.Insert(invalidation);
- AddSet(set);
- if (!registered_)
- Truncate(kMaxBufferedInvalidations);
-}
-
-void UnackedInvalidationSet::AddSet(
- const SingleObjectInvalidationSet& invalidations) {
- invalidations_.insert(invalidations.begin(), invalidations.end());
- if (!registered_)
- Truncate(kMaxBufferedInvalidations);
-}
-
-void UnackedInvalidationSet::ExportInvalidations(
- WeakHandle<AckHandler> ack_handler,
- ObjectIdInvalidationMap* out) const {
- for (SingleObjectInvalidationSet::const_iterator it = invalidations_.begin();
- it != invalidations_.end(); ++it) {
- // Copy the invalidation and set the copy's ack_handler.
- Invalidation inv(*it);
- inv.set_ack_handler(ack_handler);
- out->Insert(inv);
- }
-}
-
-void UnackedInvalidationSet::Clear() {
- invalidations_.clear();
-}
-
-void UnackedInvalidationSet::SetHandlerIsRegistered() {
- registered_ = true;
-}
-
-void UnackedInvalidationSet::SetHandlerIsUnregistered() {
- registered_ = false;
- Truncate(kMaxBufferedInvalidations);
-}
-
-// Removes the matching ack handle from the list.
-void UnackedInvalidationSet::Acknowledge(const AckHandle& handle) {
- bool handle_found = false;
- for (SingleObjectInvalidationSet::const_iterator it = invalidations_.begin();
- it != invalidations_.end(); ++it) {
- if (it->ack_handle().Equals(handle)) {
- invalidations_.erase(*it);
- handle_found = true;
- break;
- }
- }
- DLOG_IF(WARNING, !handle_found)
- << "Unrecognized to ack for object " << ObjectIdToString(object_id_);
- (void)handle_found; // Silence unused variable warning in release builds.
-}
-
-// Erase the invalidation with matching ack handle from the list. Also creates
-// an 'UnknownVersion' invalidation with the same ack handle and places it at
-// the beginning of the list. If an unknown version invalidation currently
-// exists, it is replaced.
-void UnackedInvalidationSet::Drop(const AckHandle& handle) {
- SingleObjectInvalidationSet::const_iterator it;
- for (it = invalidations_.begin(); it != invalidations_.end(); ++it) {
- if (it->ack_handle().Equals(handle)) {
- break;
- }
- }
- if (it == invalidations_.end()) {
- DLOG(WARNING) << "Unrecognized drop request for object "
- << ObjectIdToString(object_id_);
- return;
- }
-
- Invalidation unknown_version = Invalidation::InitFromDroppedInvalidation(*it);
- invalidations_.erase(*it);
-
- // If an unknown version is in the list, we remove it so we can replace it.
- if (!invalidations_.empty() && invalidations_.begin()->is_unknown_version()) {
- invalidations_.erase(*invalidations_.begin());
- }
-
- invalidations_.insert(unknown_version);
-}
-
-scoped_ptr<base::DictionaryValue> UnackedInvalidationSet::ToValue() const {
- scoped_ptr<base::DictionaryValue> value(new base::DictionaryValue);
- value->SetString(kSourceKey, base::IntToString(object_id_.source()));
- value->SetString(kNameKey, object_id_.name());
-
- scoped_ptr<base::ListValue> list_value(new ListValue);
- for (InvalidationsSet::const_iterator it = invalidations_.begin();
- it != invalidations_.end(); ++it) {
- list_value->Append(it->ToValue().release());
- }
- value->Set(kInvalidationListKey, list_value.release());
-
- return value.Pass();
-}
-
-bool UnackedInvalidationSet::ResetFromValue(
- const base::DictionaryValue& value) {
- std::string source_str;
- if (!value.GetString(kSourceKey, &source_str)) {
- DLOG(WARNING) << "Unable to deserialize source";
- return false;
- }
- int source = 0;
- if (!base::StringToInt(source_str, &source)) {
- DLOG(WARNING) << "Invalid source: " << source_str;
- return false;
- }
- std::string name;
- if (!value.GetString(kNameKey, &name)) {
- DLOG(WARNING) << "Unable to deserialize name";
- return false;
- }
- object_id_ = invalidation::ObjectId(source, name);
- const base::ListValue* invalidation_list = NULL;
- if (!value.GetList(kInvalidationListKey, &invalidation_list)
- || !ResetListFromValue(*invalidation_list)) {
- // Earlier versions of this class did not set this field, so we don't treat
- // parsing errors here as a fatal failure.
- DLOG(WARNING) << "Unable to deserialize invalidation list.";
- }
- return true;
-}
-
-bool UnackedInvalidationSet::ResetListFromValue(
- const base::ListValue& list) {
- for (size_t i = 0; i < list.GetSize(); ++i) {
- const base::DictionaryValue* dict;
- if (!list.GetDictionary(i, &dict)) {
- DLOG(WARNING) << "Failed to get invalidation dictionary at index " << i;
- return false;
- }
- scoped_ptr<Invalidation> invalidation = Invalidation::InitFromValue(*dict);
- if (!invalidation) {
- DLOG(WARNING) << "Failed to parse invalidation at index " << i;
- return false;
- }
- invalidations_.insert(*invalidation.get());
- }
- return true;
-}
-
-void UnackedInvalidationSet::Truncate(size_t max_size) {
- DCHECK_GT(max_size, 0U);
-
- if (invalidations_.size() <= max_size) {
- return;
- }
-
- while (invalidations_.size() > max_size) {
- invalidations_.erase(*invalidations_.begin());
- }
-
- // We dropped some invalidations. We remember the fact that an unknown
- // amount of information has been lost by ensuring this list begins with
- // an UnknownVersion invalidation. We remove the oldest remaining
- // invalidation to make room for it.
- invalidation::ObjectId id = invalidations_.begin()->object_id();
- invalidations_.erase(*invalidations_.begin());
-
- Invalidation unknown_version = Invalidation::InitUnknownVersion(id);
- invalidations_.insert(unknown_version);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/unacked_invalidation_set.h b/chromium/sync/notifier/unacked_invalidation_set.h
deleted file mode 100644
index aae9cdab3eb..00000000000
--- a/chromium/sync/notifier/unacked_invalidation_set.h
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_NOTIFIER_UNACKED_INVALIDATION_SET_H_
-#define SYNC_NOTIFIER_UNACKED_INVALIDATION_SET_H_
-
-#include <vector>
-
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/invalidation.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/notifier/invalidation_util.h"
-
-namespace base {
-class DictionaryValue;
-} // namespace base
-
-namespace syncer {
-
-namespace test_util {
-class UnackedInvalidationSetEqMatcher;
-} // test_util
-
-class SingleObjectInvalidationSet;
-class ObjectIdInvalidationMap;
-class AckHandle;
-
-// Manages the set of invalidations that are awaiting local acknowledgement for
-// a particular ObjectId. This set of invalidations will be persisted across
-// restarts, though this class is not directly responsible for that.
-class SYNC_EXPORT UnackedInvalidationSet {
- public:
- static const size_t kMaxBufferedInvalidations;
-
- UnackedInvalidationSet(invalidation::ObjectId id);
- ~UnackedInvalidationSet();
-
- // Returns the ObjectID of the invalidations this class is tracking.
- const invalidation::ObjectId& object_id() const;
-
- // Adds a new invalidation to the set awaiting acknowledgement.
- void Add(const Invalidation& invalidation);
-
- // Adds many new invalidations to the set awaiting acknowledgement.
- void AddSet(const SingleObjectInvalidationSet& invalidations);
-
- // Exports the set of invalidations awaiting acknowledgement as an
- // ObjectIdInvalidationMap. Each of these invalidations will be associated
- // with the given |ack_handler|.
- //
- // The contents of the UnackedInvalidationSet are not directly modified by
- // this procedure, but the AckHandles stored in those exported invalidations
- // are likely to end up back here in calls to Acknowledge() or Drop().
- void ExportInvalidations(WeakHandle<AckHandler> ack_handler,
- ObjectIdInvalidationMap* out) const;
-
- // Removes all stored invalidations from this object.
- void Clear();
-
- // Indicates that a handler has registered to handle these invalidations.
- //
- // Registrations with the invalidations server persist across restarts, but
- // registrations from InvalidationHandlers to the InvalidationService are not.
- // In the time immediately after a restart, it's possible that the server
- // will send us invalidations, and we won't have a handler to send them to.
- //
- // The SetIsRegistered() call indicates that this period has come to an end.
- // There is now a handler that can receive these invalidations. Once this
- // function has been called, the kMaxBufferedInvalidations limit will be
- // ignored. It is assumed that the handler will manage its own buffer size.
- void SetHandlerIsRegistered();
-
- // Indicates that the handler has now unregistered itself.
- //
- // This causes the object to resume enforcement of the
- // kMaxBufferedInvalidations limit.
- void SetHandlerIsUnregistered();
-
- // Given an AckHandle belonging to one of the contained invalidations, finds
- // the invalidation and drops it from the list. It is considered to be
- // acknowledged, so there is no need to continue maintaining its state.
- void Acknowledge(const AckHandle& handle);
-
- // Given an AckHandle belonging to one of the contained invalidations, finds
- // the invalidation, drops it from the list, and adds additional state to
- // indicate that this invalidation has been lost without being acted on.
- void Drop(const AckHandle& handle);
-
- scoped_ptr<base::DictionaryValue> ToValue() const;
- bool ResetFromValue(const base::DictionaryValue& value);
-
- private:
- // Allow this test helper to have access to our internals.
- friend class test_util::UnackedInvalidationSetEqMatcher;
-
- typedef std::set<Invalidation, InvalidationVersionLessThan> InvalidationsSet;
-
- bool ResetListFromValue(const base::ListValue& value);
-
- // Limits the list size to the given maximum. This function will correctly
- // update this class' internal data to indicate if invalidations have been
- // dropped.
- void Truncate(size_t max_size);
-
- bool registered_;
- invalidation::ObjectId object_id_;
- InvalidationsSet invalidations_;
-};
-
-typedef std::map<invalidation::ObjectId,
- UnackedInvalidationSet,
- ObjectIdLessThan> UnackedInvalidationsMap;
-
-} // namespace syncer
-
-#endif // SYNC_NOTIFIER_UNACKED_INVALIDATION_SET_H_
diff --git a/chromium/sync/notifier/unacked_invalidation_set_test_util.cc b/chromium/sync/notifier/unacked_invalidation_set_test_util.cc
deleted file mode 100644
index 8961574c9f9..00000000000
--- a/chromium/sync/notifier/unacked_invalidation_set_test_util.cc
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/unacked_invalidation_set_test_util.h"
-
-#include "base/json/json_string_value_serializer.h"
-#include "sync/notifier/object_id_invalidation_map.h"
-#include "testing/gmock/include/gmock/gmock-matchers.h"
-
-namespace syncer {
-
-using ::testing::MakeMatcher;
-using ::testing::MatchResultListener;
-using ::testing::Matcher;
-using ::testing::MatcherInterface;
-using ::testing::PrintToString;
-
-namespace test_util {
-
-// This class needs to be declared outside the null namespace so the
-// UnackedInvalidationSet can declare it as a friend. This class needs access
-// to the UnackedInvalidationSet internals to implement its comparispon
-// function.
-class UnackedInvalidationSetEqMatcher
- : public testing::MatcherInterface<const UnackedInvalidationSet&> {
- public:
- explicit UnackedInvalidationSetEqMatcher(
- const UnackedInvalidationSet& expected);
-
- virtual bool MatchAndExplain(
- const UnackedInvalidationSet& actual,
- MatchResultListener* listener) const OVERRIDE;
- virtual void DescribeTo(::std::ostream* os) const OVERRIDE;
- virtual void DescribeNegationTo(::std::ostream* os) const OVERRIDE;
-
- private:
- const UnackedInvalidationSet expected_;
-
- DISALLOW_COPY_AND_ASSIGN(UnackedInvalidationSetEqMatcher);
-};
-
-namespace {
-
-struct InvalidationEq {
- bool operator()(const syncer::Invalidation& a,
- const syncer::Invalidation& b) const {
- return a.Equals(b);
- }
-};
-
-} // namespace
-
-UnackedInvalidationSetEqMatcher::UnackedInvalidationSetEqMatcher(
- const UnackedInvalidationSet& expected)
- : expected_(expected) {}
-
-bool UnackedInvalidationSetEqMatcher::MatchAndExplain(
- const UnackedInvalidationSet& actual,
- MatchResultListener* listener) const {
- // Use our friendship with this class to compare the internals of two
- // instances.
- //
- // Note that the registration status is intentionally not considered
- // when performing this comparison.
- return expected_.object_id_ == actual.object_id_
- && std::equal(expected_.invalidations_.begin(),
- expected_.invalidations_.end(),
- actual.invalidations_.begin(),
- InvalidationEq());
-}
-
-void UnackedInvalidationSetEqMatcher::DescribeTo(::std::ostream* os) const {
- *os << " is equal to " << PrintToString(expected_);
-}
-
-void UnackedInvalidationSetEqMatcher::DescribeNegationTo(
- ::std::ostream* os) const {
- *os << " isn't equal to " << PrintToString(expected_);
-}
-
-// We're done declaring UnackedInvalidationSetEqMatcher. Everything else can
-// go into the null namespace.
-namespace {
-
-ObjectIdInvalidationMap UnackedInvalidationsMapToObjectIdInvalidationMap(
- const UnackedInvalidationsMap& state_map) {
- ObjectIdInvalidationMap object_id_invalidation_map;
- for (UnackedInvalidationsMap::const_iterator it = state_map.begin();
- it != state_map.end(); ++it) {
- it->second.ExportInvalidations(syncer::WeakHandle<AckHandler>(),
- &object_id_invalidation_map);
- }
- return object_id_invalidation_map;
-}
-
-class UnackedInvalidationsMapEqMatcher
- : public testing::MatcherInterface<const UnackedInvalidationsMap&> {
- public:
- explicit UnackedInvalidationsMapEqMatcher(
- const UnackedInvalidationsMap& expected);
-
- virtual bool MatchAndExplain(const UnackedInvalidationsMap& actual,
- MatchResultListener* listener) const;
- virtual void DescribeTo(::std::ostream* os) const;
- virtual void DescribeNegationTo(::std::ostream* os) const;
-
- private:
- const UnackedInvalidationsMap expected_;
-
- DISALLOW_COPY_AND_ASSIGN(UnackedInvalidationsMapEqMatcher);
-};
-
-UnackedInvalidationsMapEqMatcher::UnackedInvalidationsMapEqMatcher(
- const UnackedInvalidationsMap& expected)
- : expected_(expected) {
-}
-
-bool UnackedInvalidationsMapEqMatcher::MatchAndExplain(
- const UnackedInvalidationsMap& actual,
- MatchResultListener* listener) const {
- ObjectIdInvalidationMap expected_inv =
- UnackedInvalidationsMapToObjectIdInvalidationMap(expected_);
- ObjectIdInvalidationMap actual_inv =
- UnackedInvalidationsMapToObjectIdInvalidationMap(actual);
-
- return expected_inv == actual_inv;
-}
-
-void UnackedInvalidationsMapEqMatcher::DescribeTo(
- ::std::ostream* os) const {
- *os << " is equal to " << PrintToString(expected_);
-}
-
-void UnackedInvalidationsMapEqMatcher::DescribeNegationTo(
- ::std::ostream* os) const {
- *os << " isn't equal to " << PrintToString(expected_);
-}
-
-} // namespace
-
-void PrintTo(const UnackedInvalidationSet& invalidations,
- ::std::ostream* os) {
- scoped_ptr<base::DictionaryValue> value = invalidations.ToValue();
-
- std::string output;
- JSONStringValueSerializer serializer(&output);
- serializer.set_pretty_print(true);
- serializer.Serialize(*value.get());
-
- (*os) << output;
-}
-
-void PrintTo(const UnackedInvalidationsMap& map, ::std::ostream* os) {
- scoped_ptr<base::ListValue> list(new base::ListValue);
- for (UnackedInvalidationsMap::const_iterator it = map.begin();
- it != map.end(); ++it) {
- list->Append(it->second.ToValue().release());
- }
-
- std::string output;
- JSONStringValueSerializer serializer(&output);
- serializer.set_pretty_print(true);
- serializer.Serialize(*list.get());
-
- (*os) << output;
-}
-
-Matcher<const UnackedInvalidationSet&> Eq(
- const UnackedInvalidationSet& expected) {
- return MakeMatcher(new UnackedInvalidationSetEqMatcher(expected));
-}
-
-Matcher<const UnackedInvalidationsMap&> Eq(
- const UnackedInvalidationsMap& expected) {
- return MakeMatcher(new UnackedInvalidationsMapEqMatcher(expected));
-}
-
-} // namespace test_util
-
-};
diff --git a/chromium/sync/notifier/unacked_invalidation_set_test_util.h b/chromium/sync/notifier/unacked_invalidation_set_test_util.h
deleted file mode 100644
index e93726b28aa..00000000000
--- a/chromium/sync/notifier/unacked_invalidation_set_test_util.h
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/unacked_invalidation_set.h"
-
-#include "testing/gmock/include/gmock/gmock-matchers.h"
-
-namespace syncer {
-
-namespace test_util {
-
-void PrintTo(const UnackedInvalidationSet& invalidations, ::std::ostream* os);
-
-void PrintTo(const UnackedInvalidationsMap& map, ::std::ostream* os);
-
-::testing::Matcher<const UnackedInvalidationSet&> Eq(
- const UnackedInvalidationSet& expected);
-
-::testing::Matcher<const UnackedInvalidationsMap&> Eq(
- const UnackedInvalidationsMap& expected);
-
-} // namespace test_util
-
-} // namespace syncer
diff --git a/chromium/sync/notifier/unacked_invalidation_set_unittest.cc b/chromium/sync/notifier/unacked_invalidation_set_unittest.cc
deleted file mode 100644
index d6549ab186d..00000000000
--- a/chromium/sync/notifier/unacked_invalidation_set_unittest.cc
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/notifier/unacked_invalidation_set.h"
-
-#include "base/json/json_string_value_serializer.h"
-#include "sync/notifier/object_id_invalidation_map.h"
-#include "sync/notifier/single_object_invalidation_set.h"
-#include "sync/notifier/unacked_invalidation_set_test_util.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-class UnackedInvalidationSetTest : public testing::Test {
- public:
- UnackedInvalidationSetTest()
- : kObjectId_(10, "ASDF"),
- unacked_invalidations_(kObjectId_) {}
-
- SingleObjectInvalidationSet GetStoredInvalidations() {
- ObjectIdInvalidationMap map;
- unacked_invalidations_.ExportInvalidations(WeakHandle<AckHandler>(), &map);
- ObjectIdSet ids = map.GetObjectIds();
- if (ids.find(kObjectId_) != ids.end()) {
- return map.ForObject(kObjectId_);
- } else {
- return SingleObjectInvalidationSet();
- }
- }
-
- const invalidation::ObjectId kObjectId_;
- UnackedInvalidationSet unacked_invalidations_;
-};
-
-namespace {
-
-// Test storage and retrieval of zero invalidations.
-TEST_F(UnackedInvalidationSetTest, Empty) {
- EXPECT_EQ(0U, GetStoredInvalidations().GetSize());
-}
-
-// Test storage and retrieval of a single invalidation.
-TEST_F(UnackedInvalidationSetTest, OneInvalidation) {
- Invalidation inv1 = Invalidation::Init(kObjectId_, 10, "payload");
- unacked_invalidations_.Add(inv1);
-
- SingleObjectInvalidationSet set = GetStoredInvalidations();
- ASSERT_EQ(1U, set.GetSize());
- EXPECT_FALSE(set.StartsWithUnknownVersion());
-}
-
-// Test that calling Clear() returns us to the empty state.
-TEST_F(UnackedInvalidationSetTest, Clear) {
- Invalidation inv1 = Invalidation::Init(kObjectId_, 10, "payload");
- unacked_invalidations_.Add(inv1);
- unacked_invalidations_.Clear();
-
- EXPECT_EQ(0U, GetStoredInvalidations().GetSize());
-}
-
-// Test that repeated unknown version invalidations are squashed together.
-TEST_F(UnackedInvalidationSetTest, UnknownVersions) {
- Invalidation inv1 = Invalidation::Init(kObjectId_, 10, "payload");
- Invalidation inv2 = Invalidation::InitUnknownVersion(kObjectId_);
- Invalidation inv3 = Invalidation::InitUnknownVersion(kObjectId_);
- unacked_invalidations_.Add(inv1);
- unacked_invalidations_.Add(inv2);
- unacked_invalidations_.Add(inv3);
-
- SingleObjectInvalidationSet set = GetStoredInvalidations();
- ASSERT_EQ(2U, set.GetSize());
- EXPECT_TRUE(set.StartsWithUnknownVersion());
-}
-
-// Tests that no truncation occurs while we're under the limit.
-TEST_F(UnackedInvalidationSetTest, NoTruncation) {
- size_t kMax = UnackedInvalidationSet::kMaxBufferedInvalidations;
-
- for (size_t i = 0; i < kMax; ++i) {
- Invalidation inv = Invalidation::Init(kObjectId_, i, "payload");
- unacked_invalidations_.Add(inv);
- }
-
- SingleObjectInvalidationSet set = GetStoredInvalidations();
- ASSERT_EQ(kMax, set.GetSize());
- EXPECT_FALSE(set.StartsWithUnknownVersion());
- EXPECT_EQ(0, set.begin()->version());
- EXPECT_EQ(kMax-1, static_cast<size_t>(set.rbegin()->version()));
-}
-
-// Test that truncation happens as we reach the limit.
-TEST_F(UnackedInvalidationSetTest, Truncation) {
- size_t kMax = UnackedInvalidationSet::kMaxBufferedInvalidations;
-
- for (size_t i = 0; i < kMax + 1; ++i) {
- Invalidation inv = Invalidation::Init(kObjectId_, i, "payload");
- unacked_invalidations_.Add(inv);
- }
-
- SingleObjectInvalidationSet set = GetStoredInvalidations();
- ASSERT_EQ(kMax, set.GetSize());
- EXPECT_TRUE(set.StartsWithUnknownVersion());
- EXPECT_TRUE(set.begin()->is_unknown_version());
- EXPECT_EQ(kMax, static_cast<size_t>(set.rbegin()->version()));
-}
-
-// Test that we don't truncate while a handler is registered.
-TEST_F(UnackedInvalidationSetTest, RegistrationAndTruncation) {
- unacked_invalidations_.SetHandlerIsRegistered();
-
- size_t kMax = UnackedInvalidationSet::kMaxBufferedInvalidations;
-
- for (size_t i = 0; i < kMax + 1; ++i) {
- Invalidation inv = Invalidation::Init(kObjectId_, i, "payload");
- unacked_invalidations_.Add(inv);
- }
-
- SingleObjectInvalidationSet set = GetStoredInvalidations();
- ASSERT_EQ(kMax+1, set.GetSize());
- EXPECT_FALSE(set.StartsWithUnknownVersion());
- EXPECT_EQ(0, set.begin()->version());
- EXPECT_EQ(kMax, static_cast<size_t>(set.rbegin()->version()));
-
- // Unregistering should re-enable truncation.
- unacked_invalidations_.SetHandlerIsUnregistered();
- SingleObjectInvalidationSet set2 = GetStoredInvalidations();
- ASSERT_EQ(kMax, set2.GetSize());
- EXPECT_TRUE(set2.StartsWithUnknownVersion());
- EXPECT_TRUE(set2.begin()->is_unknown_version());
- EXPECT_EQ(kMax, static_cast<size_t>(set2.rbegin()->version()));
-}
-
-// Test acknowledgement.
-TEST_F(UnackedInvalidationSetTest, Acknowledge) {
- // inv2 is included in this test just to make sure invalidations that
- // are supposed to be unaffected by this operation will be unaffected.
-
- // We don't expect to be receiving acks or drops unless this flag is set.
- // Not that it makes much of a difference in behavior.
- unacked_invalidations_.SetHandlerIsRegistered();
-
- Invalidation inv1 = Invalidation::Init(kObjectId_, 10, "payload");
- Invalidation inv2 = Invalidation::InitUnknownVersion(kObjectId_);
- AckHandle inv1_handle = inv1.ack_handle();
-
- unacked_invalidations_.Add(inv1);
- unacked_invalidations_.Add(inv2);
-
- unacked_invalidations_.Acknowledge(inv1_handle);
-
- SingleObjectInvalidationSet set = GetStoredInvalidations();
- EXPECT_EQ(1U, set.GetSize());
- EXPECT_TRUE(set.StartsWithUnknownVersion());
-}
-
-// Test drops.
-TEST_F(UnackedInvalidationSetTest, Drop) {
- // inv2 is included in this test just to make sure invalidations that
- // are supposed to be unaffected by this operation will be unaffected.
-
- // We don't expect to be receiving acks or drops unless this flag is set.
- // Not that it makes much of a difference in behavior.
- unacked_invalidations_.SetHandlerIsRegistered();
-
- Invalidation inv1 = Invalidation::Init(kObjectId_, 10, "payload");
- Invalidation inv2 = Invalidation::Init(kObjectId_, 15, "payload");
- AckHandle inv1_handle = inv1.ack_handle();
-
- unacked_invalidations_.Add(inv1);
- unacked_invalidations_.Add(inv2);
-
- unacked_invalidations_.Drop(inv1_handle);
-
- SingleObjectInvalidationSet set = GetStoredInvalidations();
- ASSERT_EQ(2U, set.GetSize());
- EXPECT_TRUE(set.StartsWithUnknownVersion());
- EXPECT_EQ(15, set.rbegin()->version());
-}
-
-class UnackedInvalidationSetSerializationTest
- : public UnackedInvalidationSetTest {
- public:
- UnackedInvalidationSet SerializeDeserialize() {
- scoped_ptr<base::DictionaryValue> value = unacked_invalidations_.ToValue();
- UnackedInvalidationSet deserialized(kObjectId_);
- deserialized.ResetFromValue(*value.get());
- return deserialized;
- }
-};
-
-TEST_F(UnackedInvalidationSetSerializationTest, Empty) {
- UnackedInvalidationSet deserialized = SerializeDeserialize();
- EXPECT_THAT(unacked_invalidations_, test_util::Eq(deserialized));
-}
-
-TEST_F(UnackedInvalidationSetSerializationTest, OneInvalidation) {
- Invalidation inv = Invalidation::Init(kObjectId_, 10, "payload");
- unacked_invalidations_.Add(inv);
-
- UnackedInvalidationSet deserialized = SerializeDeserialize();
- EXPECT_THAT(unacked_invalidations_, test_util::Eq(deserialized));
-}
-
-TEST_F(UnackedInvalidationSetSerializationTest, WithUnknownVersion) {
- Invalidation inv1 = Invalidation::Init(kObjectId_, 10, "payload");
- Invalidation inv2 = Invalidation::InitUnknownVersion(kObjectId_);
- Invalidation inv3 = Invalidation::InitUnknownVersion(kObjectId_);
- unacked_invalidations_.Add(inv1);
- unacked_invalidations_.Add(inv2);
- unacked_invalidations_.Add(inv3);
-
- UnackedInvalidationSet deserialized = SerializeDeserialize();
- EXPECT_THAT(unacked_invalidations_, test_util::Eq(deserialized));
-}
-
-} // namespace
-
-} // namespace syncer
diff --git a/chromium/sync/protocol/DEPS b/chromium/sync/protocol/DEPS
deleted file mode 100644
index f8f9de57c0a..00000000000
--- a/chromium/sync/protocol/DEPS
+++ /dev/null
@@ -1,4 +0,0 @@
-include_rules = [
- "+sync/base",
- "+sync/internal_api/public/base",
-]
diff --git a/chromium/sync/protocol/app_list_specifics.proto b/chromium/sync/protocol/app_list_specifics.proto
deleted file mode 100644
index c677a4bd0c7..00000000000
--- a/chromium/sync/protocol/app_list_specifics.proto
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for the app list (aka app launcher).
-
-// Update proto_{value,enum}_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-// Properties of app list objects.
-message AppListSpecifics {
- // Unique identifier for the item:
- // * TYPE_FOLDER: Folder id (generated)
- // * TYPE_APP: App Id
- // * TYPE_URL: Url
- optional string item_id = 1;
-
- // What type of item this is.
- enum AppListItemType {
- // An extension app.
- TYPE_APP = 1;
- // A request to remove any matching default installed apps.
- TYPE_REMOVE_DEFAULT_APP = 2;
- // A folder containing entries whose |parent_id| matches |item_id|.
- TYPE_FOLDER = 3;
- // A URL shortcut (functionally equivalent to a bookmark).
- TYPE_URL = 4;
- }
- optional AppListItemType item_type = 2;
-
- // Item name (FOLDER or URL).
- optional string item_name = 3;
-
- // Id of the parent (folder) item.
- optional string parent_id = 4;
-
- // Which page this item will appear on in the app list.
- optional string page_ordinal = 5;
-
- // Where on a page this item will appear.
- optional string item_ordinal = 6;
-}
diff --git a/chromium/sync/protocol/app_notification_specifics.proto b/chromium/sync/protocol/app_notification_specifics.proto
deleted file mode 100644
index 58436c7ad00..00000000000
--- a/chromium/sync/protocol/app_notification_specifics.proto
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for app notifications.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-// Properties of an app notification.
-
-// An App Notification, to be delivered from Chrome Apps to the
-// Chrome browser through the Notification API.
-message AppNotification {
- // Globally unique id. This is more robust for uniquely identifying each
- // notification and hence gives us flexibility in the future. In absence
- // of this, unique id would be (app_id, creation_timestamp_ms). But that
- // relies on creation_timestamp_ms being high resolution and is not
- // globally unique - only unique for a given user.
- optional string guid = 1;
- // Metadata, not shown directly to the user.
- // The unique App Id, as created by the webstore and used to
- // delegate messages to the applications. This is defined as 32 characters
- optional string app_id = 2;
- // Timestamp when the message was created in milliseconds.
- // This is seperate from ctime as this is only set by the application.
- optional int64 creation_timestamp_ms = 3;
-
- // Payload - these fields are visible to the user content is defined by the
- // app. The fields are described in:
- // chrome/browser/extensions/app_notification.h
- optional string title = 4;
- optional string body_text = 5;
- optional string link_url = 6;
- optional string link_text = 7;
-}
-
diff --git a/chromium/sync/protocol/app_setting_specifics.proto b/chromium/sync/protocol/app_setting_specifics.proto
deleted file mode 100644
index d936c668db0..00000000000
--- a/chromium/sync/protocol/app_setting_specifics.proto
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for an app setting.
-// This is the same as for an extension setting, but uses a separate datatype
-// in order to control syncability separately.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-import "extension_setting_specifics.proto";
-
-// Properties of app setting sync objects; just an extension setting.
-message AppSettingSpecifics {
- optional ExtensionSettingSpecifics extension_setting = 1;
-}
-
diff --git a/chromium/sync/protocol/app_specifics.proto b/chromium/sync/protocol/app_specifics.proto
deleted file mode 100644
index a7f09ec1be1..00000000000
--- a/chromium/sync/protocol/app_specifics.proto
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for apps.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-import "extension_specifics.proto";
-
-// Settings related to push notifications for apps.
-message AppNotificationSettings {
- // DEPRECATED: Use oauth_client_id below.
- // Whether or not the user has setup notifications at least once.
- // The value for this field will start out false and will be set
- // to true when the user accepts receiving notifications for the
- // first time and then it will always remain true.
- optional bool initial_setup_done = 1;
-
- // Whether or not the user has disabled notifications.
- optional bool disabled = 2;
-
- // OAuth2 client id to which the user granted the notification permission.
- // This field will start out empty.
- // It will be set when the user accepts receiving notifications.
- // This field is used when the user revokes the notifications permission.
- // Note that it is never cleared after it was set once. Hence, the presence
- // of this field can be used to determine if the user has setup notifications
- // at least once for the given app.
- optional string oauth_client_id = 3;
-}
-
-// Properties of app sync objects.
-//
-// For now, an app is just an extension. We keep the two data types
-// separate for future-proofing purposes.
-message AppSpecifics {
- // Extension data.
- optional ExtensionSpecifics extension = 1;
-
- // Notification settings.
- optional AppNotificationSettings notification_settings = 2;
-
- // This controls where on a page this application icon will appear.
- optional string app_launch_ordinal = 3;
-
- // This specifics which page the application icon will appear on in the NTP.
- // This values only provide the order within the application pages, not within
- // all of the panels in the NTP.
- optional string page_ordinal = 4;
-}
-
diff --git a/chromium/sync/protocol/article_specifics.proto b/chromium/sync/protocol/article_specifics.proto
deleted file mode 100644
index 00631fd16ec..00000000000
--- a/chromium/sync/protocol/article_specifics.proto
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for the article.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-// Properties of Article objects.
-message ArticleSpecifics {
- // Next ID to use: 4
-
- optional string entry_id = 1;
-
- optional string title = 2;
-
- repeated ArticlePage pages = 3;
-}
-
-message ArticlePage {
- // Next ID to use: 2
-
- optional string url = 1;
-}
diff --git a/chromium/sync/protocol/autofill_specifics.proto b/chromium/sync/protocol/autofill_specifics.proto
deleted file mode 100644
index f816e90da6f..00000000000
--- a/chromium/sync/protocol/autofill_specifics.proto
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for autofill.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-// Properties of autofill sync objects.
-
-// An AutofillProfile.
-message AutofillProfileSpecifics {
- optional string guid = 15;
- optional string origin = 16;
-
- // Contact info.
- repeated string name_first = 2;
- repeated string name_middle = 3;
- repeated string name_last = 4;
- repeated string email_address = 5;
- optional string company_name = 6;
-
- // Home address.
- optional string address_home_line1 = 7;
- optional string address_home_line2 = 8;
- optional string address_home_city = 9;
- optional string address_home_state = 10;
- optional string address_home_zip = 11;
- optional string address_home_country = 12;
-
- // Phone.
- repeated string phone_home_whole_number = 13;
-
- // Deprecated.
- optional string label = 1 [deprecated=true];
- optional string phone_fax_whole_number = 14 [deprecated=true];
-}
-
-message AutofillSpecifics {
- // If any of these 3 fields are present, then all 3 should be, and it implies
- // that this entity represents a classic autofill object. In this case,
- // none of the autofill++ objects below should be present.
- optional string name = 1;
- optional string value = 2;
- repeated int64 usage_timestamp = 3;
-
- // An autofill++ profile object. If present, indicates this entity
- // represents an AutofillProfile exclusively, and no other fields (such as
- // name/value or credit_card) should be present.
- optional AutofillProfileSpecifics profile = 4;
-
- // Obsolete credit card fields.
- // optional bytes deprecated_encrypted_credit_card = 5;
- // optional AutofillCreditCardSpecifics deprecated_credit_card = 6;
-}
diff --git a/chromium/sync/protocol/bookmark_specifics.proto b/chromium/sync/protocol/bookmark_specifics.proto
deleted file mode 100644
index 5ac93ba577a..00000000000
--- a/chromium/sync/protocol/bookmark_specifics.proto
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for bookmarks.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-// Corresponds to a single meta info key/value pair for a bookmark node.
-message MetaInfo {
- optional string key = 1;
- optional string value = 2;
-}
-
-// Properties of bookmark sync objects.
-message BookmarkSpecifics {
- optional string url = 1;
- optional bytes favicon = 2;
- optional string title = 3;
- // Corresponds to BookmarkNode::date_added() and is the internal value from
- // base::Time.
- optional int64 creation_time_us = 4;
- optional string icon_url = 5;
- repeated MetaInfo meta_info = 6;
-}
-
diff --git a/chromium/sync/protocol/client_commands.proto b/chromium/sync/protocol/client_commands.proto
deleted file mode 100644
index c3c18ef76c0..00000000000
--- a/chromium/sync/protocol/client_commands.proto
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol for communication between sync client and server.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-message ClientCommand {
- // Time to wait before sending any requests to the server.
- optional int32 set_sync_poll_interval = 1; // in seconds
- optional int32 set_sync_long_poll_interval = 2; // in seconds
-
- optional int32 max_commit_batch_size = 3;
-
- // Number of seconds to delay between a sessions
- // action and sending a commit message to the
- // server
- optional int32 sessions_commit_delay_seconds = 4;
-
- // Number of seconds to delay before the throttled client should retry.
- optional int32 throttle_delay_seconds = 5;
-
- // Maximum number of local nudges to buffer per-type.
- optional int32 client_invalidation_hint_buffer_size = 6;
-};
diff --git a/chromium/sync/protocol/client_debug_info.proto b/chromium/sync/protocol/client_debug_info.proto
deleted file mode 100644
index 171909fb0b3..00000000000
--- a/chromium/sync/protocol/client_debug_info.proto
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol for debug info clients can send to the sync server.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-import "get_updates_caller_info.proto";
-
-// Per-type hint information.
-message TypeHint {
- // The data type this hint applied to.
- optional int32 data_type_id = 1;
-
- // Whether or not a valid hint is provided.
- optional bool has_valid_hint = 2;
-}
-
-// Information about the source that triggered a sync.
-message SourceInfo {
- // An enum indicating the reason for the nudge.
- optional GetUpdatesCallerInfo.GetUpdatesSource source = 1;
-
- // The per-type hint information associated with the nudge.
- repeated TypeHint type_hint = 2;
-}
-
-// The additional info here is from the StatusController. They get sent when
-// the event SYNC_CYCLE_COMPLETED is sent.
-message SyncCycleCompletedEventInfo {
- // optional bool syncer_stuck = 1; // Was always false, now obsolete.
-
- // The client has never set these values correctly. It set
- // num_blocking_conflicts to the total number of conflicts detected and set
- // num_non_blocking_conflicts to the number of blocking (aka. simple)
- // conflicts.
- //
- // These counters have been deprecated to avoid further confusion. The newer
- // counters provide more detail and are less buggy.
- optional int32 num_blocking_conflicts = 2 [deprecated = true];
- optional int32 num_non_blocking_conflicts = 3 [deprecated = true];
-
- // These new conflict counters replace the ones above.
- optional int32 num_encryption_conflicts = 4;
- optional int32 num_hierarchy_conflicts = 5;
- optional int32 num_simple_conflicts = 6; // No longer sent since M24.
- optional int32 num_server_conflicts = 7;
-
- // Counts to track the effective usefulness of our GetUpdate requests.
- optional int32 num_updates_downloaded = 8;
- optional int32 num_reflected_updates_downloaded = 9;
- optional GetUpdatesCallerInfo caller_info = 10;
-
- // A list of all the sources that were merged into this session.
- //
- // Some scenarios, notably mode switches and canary jobs, can spuriously add
- // back-to-back duplicate sources to this list.
- repeated SourceInfo source_info = 11;
-}
-
-// Datatype specifics statistics gathered at association time.
-message DatatypeAssociationStats {
- // The datatype that was associated.
- optional int32 data_type_id = 1;
-
- // The state of the world before association.
- optional int32 num_local_items_before_association = 2;
- optional int32 num_sync_items_before_association = 3;
-
- // The state of the world after association.
- optional int32 num_local_items_after_association = 4;
- optional int32 num_sync_items_after_association = 5;
-
- // The changes that got us from before to after. In a correctly working
- // system these should be the deltas between before and after.
- optional int32 num_local_items_added = 6;
- optional int32 num_local_items_deleted = 7;
- optional int32 num_local_items_modified = 8;
- optional int32 num_sync_items_added = 9;
- optional int32 num_sync_items_deleted = 10;
- optional int32 num_sync_items_modified = 11;
-
- // Model versions before association. Ideally local and sync model should
- // have same version if models were persisted properly in last session.
- // Note: currently version is only set on bookmark model.
- optional int64 local_version_pre_association = 20;
- optional int64 sync_version_pre_association = 21;
-
- // The data type ran into an error during model association.
- optional bool had_error = 12;
-
- // Waiting time before downloading starts. This measures the time between
- // receiving configuration request for a set of data types to starting
- // downloading data of this type.
- optional int64 download_wait_time_us = 15;
-
- // Time spent on downloading sync data for first time sync.
- // Note: This measures the time between asking backend to download data to
- // being notified of download-ready by backend. So it consists of
- // time on data downloading and processing at sync backend. But
- // downloading time should dominate. It's also the total time spent on
- // downloading data of all types in the priority group of
- // |data_type_id| instead of just one data type.
- optional int64 download_time_us = 13;
-
- // Waiting time for higher priority types to finish association. This
- // measures the time between finishing downloading data to requesting
- // association manager to associate this batch of types. High priority types
- // have near zero waiting time.
- optional int64 association_wait_time_for_high_priority_us = 16;
-
- // Waiting time for other types with same priority during association.
- // Data type manger sends types of same priority to association manager to
- // configure as a batch. Association manager configures one type at a time.
- // This measures the time between when a type is sent to association manager
- // (among other types) to when association manager starts configuring the
- // type. Total wait time before association is
- // |association_wait_time_for_high_priority_us| +
- // |association_wait_time_for_same_priority_us|
- optional int64 association_wait_time_for_same_priority_us = 14;
-
- // Time spent on model association.
- optional int64 association_time_us = 17;
-
- // Higher priority type that's configured before this type.
- repeated int32 high_priority_type_configured_before = 18;
-
- // Same priority type that's configured before this type.
- repeated int32 same_priority_type_configured_before = 19;
-}
-
-message DebugEventInfo {
- // Singleton event types. These events have no further information beyond
- // the fact that the event happened.
- enum SingletonEventType {
- CONNECTION_STATUS_CHANGE = 1; // Connection status change. Note this
- // gets generated even during a successful
- // connection.
- UPDATED_TOKEN = 2; // Client received an updated token.
- PASSPHRASE_REQUIRED = 3; // Cryptographer needs passphrase.
- PASSPHRASE_ACCEPTED = 4; // Passphrase was accepted by cryptographer.
- INITIALIZATION_COMPLETE = 5; // Sync Initialization is complete.
-
- // |STOP_SYNCING_PERMANENTLY| event should never be seen by the server in
- // the absence of bugs.
- STOP_SYNCING_PERMANENTLY = 6; // Server sent stop syncing permanently.
-
- ENCRYPTION_COMPLETE = 7; // Client has finished encrypting all data.
- ACTIONABLE_ERROR = 8; // Client received an actionable error.
- ENCRYPTED_TYPES_CHANGED = 9; // Set of encrypted types has changed.
- // NOTE: until m25 bootstrap token updated also
- // shared this field (erroneously).
- PASSPHRASE_TYPE_CHANGED = 10; // The encryption passphrase state changed.
- KEYSTORE_TOKEN_UPDATED = 11; // A new keystore encryption token was
- // persisted.
- CONFIGURE_COMPLETE = 12; // The datatype manager has finished an
- // at least partially successful
- // configuration and is once again syncing
- // with the server.
- BOOTSTRAP_TOKEN_UPDATED = 13; // A new cryptographer bootstrap token was
- // generated.
- }
-
- // Each of the following fields correspond to different kinds of events. as
- // a result, only one is set during any single DebugEventInfo.
- // A singleton event. See enum definition.
- optional SingletonEventType singleton_event = 1;
- // A sync cycle completed.
- optional SyncCycleCompletedEventInfo sync_cycle_completed_event_info = 2;
- // A datatype triggered a nudge.
- optional int32 nudging_datatype = 3;
- // A notification triggered a nudge.
- repeated int32 datatypes_notified_from_server = 4;
- // A datatype finished model association.
- optional DatatypeAssociationStats datatype_association_stats = 5;
-}
-
-message DebugInfo {
- repeated DebugEventInfo events = 1;
-
- // Whether cryptographer is ready to encrypt and decrypt data.
- optional bool cryptographer_ready = 2;
-
- // Cryptographer has pending keys which indicates the correct passphrase
- // has not been provided yet.
- optional bool cryptographer_has_pending_keys = 3;
-
- // Indicates client has dropped some events to save bandwidth.
- optional bool events_dropped = 4;
-}
diff --git a/chromium/sync/protocol/device_info_specifics.proto b/chromium/sync/protocol/device_info_specifics.proto
deleted file mode 100644
index 67eb4698a7a..00000000000
--- a/chromium/sync/protocol/device_info_specifics.proto
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for nigori keys.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-import "sync_enums.proto";
-
-// Information about a device that is running a sync-enabled Chrome browser. We
-// are mapping the per-client cache guid to more specific information about the
-// device.
-message DeviceInfoSpecifics {
- // The cache_guid created to identify a sync client on this device.
- optional string cache_guid = 1;
-
- // A non-unique but human readable name to describe this client.
- optional string client_name = 2;
-
- // The platform of the device.
- optional SyncEnums.DeviceType device_type = 3;
-
- // The UserAgent used when contacting the Chrome Sync server.
- optional string sync_user_agent = 4;
-
- // The Chrome instance's version. Updated (if necessary) on every startup.
- optional string chrome_version = 5;
-}
diff --git a/chromium/sync/protocol/dictionary_specifics.proto b/chromium/sync/protocol/dictionary_specifics.proto
deleted file mode 100644
index 46e49e83396..00000000000
--- a/chromium/sync/protocol/dictionary_specifics.proto
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for the dictionary.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-// Properties of Dictionary objects.
-message DictionarySpecifics {
- // A spelling which when typed is treated as a correctly spelled word.
- optional string word = 1;
-}
diff --git a/chromium/sync/protocol/encryption.proto b/chromium/sync/protocol/encryption.proto
deleted file mode 100644
index 202ec3394db..00000000000
--- a/chromium/sync/protocol/encryption.proto
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Common sync protocol for encrypted data.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-// Encrypted sync data consists of two parts: a key name and a blob. Key name is
-// the name of the key that was used to encrypt blob and blob is encrypted data
-// itself.
-//
-// The reason we need to keep track of the key name is that a sync user can
-// change their passphrase (and thus their encryption key) at any time. When
-// that happens, we make a best effort to reencrypt all nodes with the new
-// passphrase, but since we don't have transactions on the server-side, we
-// cannot guarantee that every node will be reencrypted. As a workaround, we
-// keep track of all keys, assign each key a name (by using that key to encrypt
-// a well known string) and keep track of which key was used to encrypt each
-// node.
-message EncryptedData {
- optional string key_name = 1;
- optional string blob = 2;
-};
diff --git a/chromium/sync/protocol/experiments_specifics.proto b/chromium/sync/protocol/experiments_specifics.proto
deleted file mode 100644
index 1e8732fd028..00000000000
--- a/chromium/sync/protocol/experiments_specifics.proto
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for experimental feature flags.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-// A flag to enable support for keystore encryption.
-message KeystoreEncryptionFlags {
- optional bool enabled = 1;
-}
-
-// Whether history delete directives are enabled.
-message HistoryDeleteDirectives {
- optional bool enabled = 1;
-}
-
-// Whether this client should cull (delete) expired autofill
-// entries when autofill sync is enabled.
-message AutofillCullingFlags {
- optional bool enabled = 1;
-}
-
-// Whether the favicon sync datatypes are enabled, and what parameters
-// they should operate under.
-message FaviconSyncFlags {
- optional bool enabled = 1;
- optional int32 favicon_sync_limit = 2 [default = 200];
-}
-
-// Flags for enabling the experimental no-precommit GU feature.
-message PreCommitUpdateAvoidanceFlags {
- optional bool enabled = 1;
-}
-
-// Contains one flag or set of related flags. Each node of the experiments type
-// will have a unique_client_tag identifying which flags it contains. By
-// convention, the tag name should match the sub-message name.
-message ExperimentsSpecifics {
- optional KeystoreEncryptionFlags keystore_encryption = 1;
- optional HistoryDeleteDirectives history_delete_directives = 2;
- optional AutofillCullingFlags autofill_culling = 3;
- optional FaviconSyncFlags favicon_sync = 4;
- optional PreCommitUpdateAvoidanceFlags pre_commit_update_avoidance = 5;
-}
diff --git a/chromium/sync/protocol/extension_setting_specifics.proto b/chromium/sync/protocol/extension_setting_specifics.proto
deleted file mode 100644
index 62947c50cbe..00000000000
--- a/chromium/sync/protocol/extension_setting_specifics.proto
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for an extension setting.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-// Properties of extension setting sync objects.
-message ExtensionSettingSpecifics {
- // Id of the extension the setting is for.
- optional string extension_id = 1;
-
- // Setting key.
- optional string key = 2;
-
- // Setting value serialized as JSON.
- optional string value = 3;
-}
-
diff --git a/chromium/sync/protocol/extension_specifics.proto b/chromium/sync/protocol/extension_specifics.proto
deleted file mode 100644
index e3e1dfbfa8d..00000000000
--- a/chromium/sync/protocol/extension_specifics.proto
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for extensions.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-// Properties of extension sync objects.
-//
-// Merge policy: the settings for the higher version number win; in
-// the case of a tie, server wins.
-message ExtensionSpecifics {
- // Globally unique id for this extension.
- optional string id = 1;
- // The known installed version.
- optional string version = 2;
-
- // Auto-update URL to use for this extension. May be blank, in
- // which case the default one (i.e., the one for the Chrome
- // Extensions Gallery) is used.
- optional string update_url = 3;
- // Whether or not this extension is enabled.
- optional bool enabled = 4;
- // Whether or not this extension is enabled in incognito mode.
- optional bool incognito_enabled = 5;
-
- // The name of the extension. Used only for debugging.
- optional string name = 6;
-}
-
diff --git a/chromium/sync/protocol/favicon_image_specifics.proto b/chromium/sync/protocol/favicon_image_specifics.proto
deleted file mode 100644
index c5eb63b2114..00000000000
--- a/chromium/sync/protocol/favicon_image_specifics.proto
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for the favicon image specifics.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-message FaviconData {
- // The image data for the favicon (PNG encoded).
- optional bytes favicon = 1;
- // The favicon dimensions.
- optional int32 width = 2;
- optional int32 height = 3;
-}
-
-// Image data for favicons. Multiple resolutions are supported.
-message FaviconImageSpecifics {
- // The url of the favicon image.
- optional string favicon_url = 1;
-
- // The favicons associated with this source, one per scale type.
- // Favicons images should be stored into the field that best corresponds to
- // their scale. If multiple favicons are appropriate, the highest resolution
- // that doesn't violate the maximum should be stored.
-
- // A normal low-resolution web favicon (max resolution 16x16).
- optional FaviconData favicon_web = 2;
- // A slightly higher-resolution web favicon (max resolution 32x32).
- optional FaviconData favicon_web_32 = 3;
- // A high-resolution touch favicon (not precomposed, max resolution 64x64).
- optional FaviconData favicon_touch_64 = 4;
- // A high-resolution touch favicon (precomposed, max resolution 64x64).
- optional FaviconData favicon_touch_precomposed_64 = 5;
-}
diff --git a/chromium/sync/protocol/favicon_tracking_specifics.proto b/chromium/sync/protocol/favicon_tracking_specifics.proto
deleted file mode 100644
index c3b06fe00a5..00000000000
--- a/chromium/sync/protocol/favicon_tracking_specifics.proto
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for the favicon tracking type.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-// Tracking info for of favicon images. These control expiration of images
-// from sync based on recency, bookmark state, etc.
-message FaviconTrackingSpecifics {
- // The url of the favicon image.
- optional string favicon_url= 1;
- // The last time a page using this favicon was visited (in milliseconds
- // since linux epoch).
- optional int64 last_visit_time_ms = 3;
- // Whether this favicon is currently bookmarked or not.
- optional bool is_bookmarked = 4;
-}
diff --git a/chromium/sync/protocol/get_updates_caller_info.proto b/chromium/sync/protocol/get_updates_caller_info.proto
deleted file mode 100644
index a54232489f7..00000000000
--- a/chromium/sync/protocol/get_updates_caller_info.proto
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-message GetUpdatesCallerInfo {
- // This message was deprecated in M28. The preferred represenation of this
- // information is now the GetUpdatesOrigin enum, which is defined in
- // sync_enums.proto.
- enum GetUpdatesSource {
- UNKNOWN = 0; // The source was not set by the caller.
- FIRST_UPDATE = 1; // First request after browser restart. Not to
- // be confused with "NEW_CLIENT".
- LOCAL = 2; // The source of the update was a local change.
- NOTIFICATION = 3; // The source of the update was a p2p notification.
- PERIODIC = 4; // The source of the update was periodic polling.
- SYNC_CYCLE_CONTINUATION = 5; // The source of the update was a
- // continuation of a previous sync cycle.
- // No longer sent as of M24.
-
- // This value is deprecated and was never used in production.
- // CLEAR_PRIVATE_DATA = 6;
-
- NEWLY_SUPPORTED_DATATYPE = 7; // The client is in configuration mode
- // because it's syncing all datatypes, and
- // support for a new datatype was recently
- // released via a software auto-update.
- MIGRATION = 8; // The client is in configuration mode because a
- // MIGRATION_DONE error previously returned by the
- // server necessitated resynchronization.
- NEW_CLIENT = 9; // The client is in configuration mode because the
- // user enabled sync for the first time. Not to be
- // confused with FIRST_UPDATE.
- RECONFIGURATION = 10; // The client is in configuration mode because the
- // user opted to sync a different set of datatypes.
- DATATYPE_REFRESH = 11; // A datatype has requested a refresh. This is
- // typically used when datatype's have custom
- // sync UI, e.g. sessions.
- }
-
- required GetUpdatesSource source = 1;
-
- // True only if notifications were enabled for this GetUpdateMessage.
- optional bool notifications_enabled = 2;
-};
-
diff --git a/chromium/sync/protocol/history_delete_directive_specifics.proto b/chromium/sync/protocol/history_delete_directive_specifics.proto
deleted file mode 100644
index d647ddd175a..00000000000
--- a/chromium/sync/protocol/history_delete_directive_specifics.proto
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for history delete directives.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-// All timestamps below are from Sane Time (
-// http://www.chromium.org/developers/design-documents/sane-time )
-// and are in microseconds since the Unix epoch.
-
-// Properties of history delete directive sync objects.
-message HistoryDeleteDirectiveSpecifics {
- // Exactly one of the fields below must be filled in. Otherwise, this
- // delete directive must be ignored.
- optional GlobalIdDirective global_id_directive = 1;
- optional TimeRangeDirective time_range_directive = 2;
-}
-
-message GlobalIdDirective {
- // The global IDs of the navigations to delete.
- repeated int64 global_id = 1;
-
- // Time range for searching for navigations to delete. Client should delete
- // all navigations to a URL between [start_time_usec, end_time_usec]
- // if one of them matches a |global_id|.
- optional int64 start_time_usec = 2;
- optional int64 end_time_usec = 3;
-}
-
-message TimeRangeDirective {
- // Both fields below must be filled in. Otherwise, this delete directive
- // must be ignored.
-
- // The time on or after which entries must be deleted.
- optional int64 start_time_usec = 1;
- // The time on or before which entries must be deleted.
- optional int64 end_time_usec = 2;
-}
diff --git a/chromium/sync/protocol/managed_user_setting_specifics.proto b/chromium/sync/protocol/managed_user_setting_specifics.proto
deleted file mode 100644
index fcd1d964267..00000000000
--- a/chromium/sync/protocol/managed_user_setting_specifics.proto
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for managed user settings.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-// Properties of managed user setting sync objects.
-message ManagedUserSettingSpecifics {
- optional string name = 1;
- optional string value = 2;
-}
diff --git a/chromium/sync/protocol/managed_user_specifics.proto b/chromium/sync/protocol/managed_user_specifics.proto
deleted file mode 100644
index 6c819080803..00000000000
--- a/chromium/sync/protocol/managed_user_specifics.proto
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for managed user settings.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-// Properties of managed user sync objects.
-message ManagedUserSpecifics {
- // A randomly-generated identifier for the managed user.
- optional string id = 1;
- // The human-visible name of the managed user
- optional string name = 2;
- // This flag is set by the server to acknowledge that it has committed a
- // newly created managed user.
- optional bool acknowledged = 3 [default = false];
- // Master key for managed user cryptohome.
- optional string master_key = 4;
- // A string representing the index of the supervised user avatar on Chrome.
- // It has the following format:
- // "chrome-avatar-index:INDEX" where INDEX is an integer.
- optional string chrome_avatar = 5;
- // A string representing the index of the supervised user avatar on Chrome OS.
- // It has the following format:
- // "chromeos-avatar-index:INDEX" where INDEX is an integer.
- optional string chromeos_avatar = 6;
-}
diff --git a/chromium/sync/protocol/nigori_specifics.proto b/chromium/sync/protocol/nigori_specifics.proto
deleted file mode 100644
index 87e6c3771f0..00000000000
--- a/chromium/sync/protocol/nigori_specifics.proto
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for nigori keys.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-import "encryption.proto";
-
-message NigoriKey {
- optional string name = 1;
- optional bytes user_key = 2;
- optional bytes encryption_key = 3;
- optional bytes mac_key = 4;
-}
-
-message NigoriKeyBag {
- repeated NigoriKey key = 2;
-}
-
-// Properties of nigori sync object.
-message NigoriSpecifics {
- optional EncryptedData encryption_keybag = 1;
- // Once keystore migration is performed, we have to freeze the keybag so that
- // older clients (that don't support keystore encryption) do not attempt to
- // update the keybag.
- // Previously |using_explicit_passphrase|.
- optional bool keybag_is_frozen = 2;
-
- // Obsolete encryption fields. These were deprecated due to legacy versions
- // that understand their usage but did not perform encryption properly.
- // optional bool deprecated_encrypt_bookmarks = 3;
- // optional bool deprecated_encrypt_preferences = 4;
- // optional bool deprecated_encrypt_autofill_profile = 5;
- // optional bool deprecated_encrypt_autofill = 6;
- // optional bool deprecated_encrypt_themes = 7;
- // optional bool deprecated_encrypt_typed_urls = 8;
- // optional bool deprecated_encrypt_extensions = 9;
- // optional bool deprecated_encrypt_sessions = 10;
- // optional bool deprecated_encrypt_apps = 11;
- // optional bool deprecated_encrypt_search_engines = 12;
-
- // Booleans corresponding to whether a datatype should be encrypted.
- // Passwords are always encrypted, so we don't need a field here.
- // History delete directives need to be consumable by the server, and
- // thus can't be encrypted.
- // Synced Notifications need to be consumed by the server (the read flag)
- // and thus can't be encrypted.
- optional bool encrypt_bookmarks = 13;
- optional bool encrypt_preferences = 14;
- optional bool encrypt_autofill_profile = 15;
- optional bool encrypt_autofill = 16;
- optional bool encrypt_themes = 17;
- optional bool encrypt_typed_urls = 18;
- optional bool encrypt_extensions = 19;
- optional bool encrypt_sessions = 20;
- optional bool encrypt_apps = 21;
- optional bool encrypt_search_engines = 22;
-
- // Deprecated on clients where tab sync is enabled by default.
- // optional bool sync_tabs = 23;
-
- // If true, all current and future datatypes will be encrypted.
- optional bool encrypt_everything = 24;
-
- optional bool encrypt_extension_settings = 25;
- optional bool encrypt_app_notifications = 26;
- optional bool encrypt_app_settings = 27;
-
- // User device information. Contains information about each device that has a
- // sync-enabled Chrome browser connected to the user account.
- // This has been moved to the DeviceInfo message.
- // repeated DeviceInformation deprecated_device_information = 28;
-
- // Enable syncing favicons as part of tab sync.
- optional bool sync_tab_favicons = 29;
-
- // The state of the passphrase required to decrypt |encryption_keybag|.
- enum PassphraseType {
- // Gaia-based encryption passphrase. Deprecated.
- IMPLICIT_PASSPHRASE = 1;
- // Keystore key encryption passphrase. Uses |keystore_bootstrap| to
- // decrypt |encryption_keybag|.
- KEYSTORE_PASSPHRASE = 2;
- // Previous Gaia-based passphrase frozen and treated as a custom passphrase.
- FROZEN_IMPLICIT_PASSPHRASE = 3;
- // User provided custom passphrase.
- CUSTOM_PASSPHRASE = 4;
- }
- optional PassphraseType passphrase_type = 30
- [default = IMPLICIT_PASSPHRASE];
-
- // The keystore decryptor token blob. Encrypted with the keystore key, and
- // contains the encryption key used to decrypt |encryption_keybag|.
- // Only set if passphrase_state == KEYSTORE_PASSPHRASE.
- optional EncryptedData keystore_decryptor_token = 31;
-
- // The time (in epoch milliseconds) at which the keystore migration was
- // performed.
- optional int64 keystore_migration_time = 32;
-
- // The time (in epoch milliseconds) at which a custom passphrase was set.
- // Note: this field may not be set if the custom passphrase was applied before
- // this field was introduced.
- optional int64 custom_passphrase_time = 33;
-
- // Boolean corresponding to whether custom spelling dictionary should be
- // encrypted.
- optional bool encrypt_dictionary = 34;
-
- // Boolean corresponding to Whether to encrypt favicons data or not.
- optional bool encrypt_favicon_images = 35;
- optional bool encrypt_favicon_tracking = 36;
-
- // Boolean corresponding to whether articles should be encrypted.
- optional bool encrypt_articles = 37;
-
- // Boolean corresponding to whether app list items should be encrypted.
- optional bool encrypt_app_list = 38;
-}
-
diff --git a/chromium/sync/protocol/password_specifics.proto b/chromium/sync/protocol/password_specifics.proto
deleted file mode 100644
index ad5a5121cae..00000000000
--- a/chromium/sync/protocol/password_specifics.proto
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for password data.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-import "encryption.proto";
-
-// These are the properties that get serialized into the |encrypted| field of
-// PasswordSpecifics.
-message PasswordSpecificsData {
- optional int32 scheme = 1;
- optional string signon_realm = 2;
- optional string origin = 3;
- optional string action = 4;
- optional string username_element = 5;
- optional string username_value = 6;
- optional string password_element = 7;
- optional string password_value = 8;
- optional bool ssl_valid = 9;
- optional bool preferred = 10;
- optional int64 date_created = 11;
- optional bool blacklisted = 12;
-}
-
-// Properties of password sync objects.
-message PasswordSpecifics {
- // The actual password data. Contains an encrypted PasswordSpecificsData
- // message.
- optional EncryptedData encrypted = 1;
- // An unsynced field for use internally on the client. This field should
- // never be set in any network-based communications.
- optional PasswordSpecificsData client_only_encrypted_data = 2;
-}
-
diff --git a/chromium/sync/protocol/preference_specifics.proto b/chromium/sync/protocol/preference_specifics.proto
deleted file mode 100644
index 4827c5dc20c..00000000000
--- a/chromium/sync/protocol/preference_specifics.proto
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for preferences.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-// Properties of preference sync objects.
-message PreferenceSpecifics {
- optional string name = 1;
- optional string value = 2;
-}
-
diff --git a/chromium/sync/protocol/priority_preference_specifics.proto b/chromium/sync/protocol/priority_preference_specifics.proto
deleted file mode 100644
index 97b554e3ed6..00000000000
--- a/chromium/sync/protocol/priority_preference_specifics.proto
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for priority preferences.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-import "preference_specifics.proto";
-
-// Properties of a synced priority preference.
-message PriorityPreferenceSpecifics {
- optional PreferenceSpecifics preference = 1;
-}
diff --git a/chromium/sync/protocol/proto_enum_conversions.cc b/chromium/sync/protocol/proto_enum_conversions.cc
deleted file mode 100644
index af958222414..00000000000
--- a/chromium/sync/protocol/proto_enum_conversions.cc
+++ /dev/null
@@ -1,252 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Keep this file in sync with the .proto files in this directory.
-
-#include "sync/protocol/proto_enum_conversions.h"
-
-#include "base/basictypes.h"
-#include "base/logging.h"
-
-namespace syncer {
-
-#define ASSERT_ENUM_BOUNDS(enum_parent, enum_type, enum_min, enum_max) \
- COMPILE_ASSERT(enum_parent::enum_type##_MIN == enum_parent::enum_min, \
- enum_type##_MIN_not_##enum_min); \
- COMPILE_ASSERT(enum_parent::enum_type##_MAX == enum_parent::enum_max, \
- enum_type##_MAX_not_##enum_max);
-
-#define ENUM_CASE(enum_parent, enum_value) \
- case enum_parent::enum_value: return #enum_value
-
-const char* GetAppListItemTypeString(
- sync_pb::AppListSpecifics::AppListItemType item_type) {
- ASSERT_ENUM_BOUNDS(sync_pb::AppListSpecifics, AppListItemType,
- TYPE_APP, TYPE_URL);
- switch (item_type) {
- ENUM_CASE(sync_pb::AppListSpecifics, TYPE_APP);
- ENUM_CASE(sync_pb::AppListSpecifics, TYPE_REMOVE_DEFAULT_APP);
- ENUM_CASE(sync_pb::AppListSpecifics, TYPE_FOLDER);
- ENUM_CASE(sync_pb::AppListSpecifics, TYPE_URL);
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetBrowserTypeString(
- sync_pb::SessionWindow::BrowserType browser_type) {
- ASSERT_ENUM_BOUNDS(sync_pb::SessionWindow, BrowserType,
- TYPE_TABBED, TYPE_POPUP);
- switch (browser_type) {
- ENUM_CASE(sync_pb::SessionWindow, TYPE_TABBED);
- ENUM_CASE(sync_pb::SessionWindow, TYPE_POPUP);
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetPageTransitionString(
- sync_pb::SyncEnums::PageTransition page_transition) {
- ASSERT_ENUM_BOUNDS(sync_pb::SyncEnums, PageTransition,
- LINK, KEYWORD_GENERATED);
- switch (page_transition) {
- ENUM_CASE(sync_pb::SyncEnums, LINK);
- ENUM_CASE(sync_pb::SyncEnums, TYPED);
- ENUM_CASE(sync_pb::SyncEnums, AUTO_BOOKMARK);
- ENUM_CASE(sync_pb::SyncEnums, AUTO_SUBFRAME);
- ENUM_CASE(sync_pb::SyncEnums, MANUAL_SUBFRAME);
- ENUM_CASE(sync_pb::SyncEnums, GENERATED);
- ENUM_CASE(sync_pb::SyncEnums, AUTO_TOPLEVEL);
- ENUM_CASE(sync_pb::SyncEnums, FORM_SUBMIT);
- ENUM_CASE(sync_pb::SyncEnums, RELOAD);
- ENUM_CASE(sync_pb::SyncEnums, KEYWORD);
- ENUM_CASE(sync_pb::SyncEnums, KEYWORD_GENERATED);
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetPageTransitionRedirectTypeString(
- sync_pb::SyncEnums::PageTransitionRedirectType
- page_transition_qualifier) {
- ASSERT_ENUM_BOUNDS(sync_pb::SyncEnums, PageTransitionRedirectType,
- CLIENT_REDIRECT, SERVER_REDIRECT);
- switch (page_transition_qualifier) {
- ENUM_CASE(sync_pb::SyncEnums, CLIENT_REDIRECT);
- ENUM_CASE(sync_pb::SyncEnums, SERVER_REDIRECT);
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetUpdatesSourceString(
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource updates_source) {
- ASSERT_ENUM_BOUNDS(sync_pb::GetUpdatesCallerInfo, GetUpdatesSource,
- UNKNOWN, DATATYPE_REFRESH);
- switch (updates_source) {
- ENUM_CASE(sync_pb::GetUpdatesCallerInfo, UNKNOWN);
- ENUM_CASE(sync_pb::GetUpdatesCallerInfo, FIRST_UPDATE);
- ENUM_CASE(sync_pb::GetUpdatesCallerInfo, LOCAL);
- ENUM_CASE(sync_pb::GetUpdatesCallerInfo, NOTIFICATION);
- ENUM_CASE(sync_pb::GetUpdatesCallerInfo, PERIODIC);
- ENUM_CASE(sync_pb::GetUpdatesCallerInfo, SYNC_CYCLE_CONTINUATION);
- ENUM_CASE(sync_pb::GetUpdatesCallerInfo, NEWLY_SUPPORTED_DATATYPE);
- ENUM_CASE(sync_pb::GetUpdatesCallerInfo, MIGRATION);
- ENUM_CASE(sync_pb::GetUpdatesCallerInfo, NEW_CLIENT);
- ENUM_CASE(sync_pb::GetUpdatesCallerInfo, RECONFIGURATION);
- ENUM_CASE(sync_pb::GetUpdatesCallerInfo, DATATYPE_REFRESH);
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetUpdatesOriginString(
- sync_pb::SyncEnums::GetUpdatesOrigin origin) {
- ASSERT_ENUM_BOUNDS(sync_pb::SyncEnums, GetUpdatesOrigin,
- UNKNOWN_ORIGIN, GU_TRIGGER);
- switch (origin) {
- ENUM_CASE(sync_pb::SyncEnums, UNKNOWN_ORIGIN);
- ENUM_CASE(sync_pb::SyncEnums, PERIODIC);
- ENUM_CASE(sync_pb::SyncEnums, NEWLY_SUPPORTED_DATATYPE);
- ENUM_CASE(sync_pb::SyncEnums, MIGRATION);
- ENUM_CASE(sync_pb::SyncEnums, NEW_CLIENT);
- ENUM_CASE(sync_pb::SyncEnums, RECONFIGURATION);
- ENUM_CASE(sync_pb::SyncEnums, GU_TRIGGER);
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetResponseTypeString(
- sync_pb::CommitResponse::ResponseType response_type) {
- ASSERT_ENUM_BOUNDS(sync_pb::CommitResponse, ResponseType, SUCCESS,
- TRANSIENT_ERROR);
- switch (response_type) {
- ENUM_CASE(sync_pb::CommitResponse, SUCCESS);
- ENUM_CASE(sync_pb::CommitResponse, CONFLICT);
- ENUM_CASE(sync_pb::CommitResponse, RETRY);
- ENUM_CASE(sync_pb::CommitResponse, INVALID_MESSAGE);
- ENUM_CASE(sync_pb::CommitResponse, OVER_QUOTA);
- ENUM_CASE(sync_pb::CommitResponse, TRANSIENT_ERROR);
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetErrorTypeString(sync_pb::SyncEnums::ErrorType error_type) {
- ASSERT_ENUM_BOUNDS(sync_pb::SyncEnums, ErrorType, SUCCESS, UNKNOWN);
- switch (error_type) {
- ENUM_CASE(sync_pb::SyncEnums, SUCCESS);
- ENUM_CASE(sync_pb::SyncEnums, ACCESS_DENIED);
- ENUM_CASE(sync_pb::SyncEnums, NOT_MY_BIRTHDAY);
- ENUM_CASE(sync_pb::SyncEnums, THROTTLED);
- ENUM_CASE(sync_pb::SyncEnums, AUTH_EXPIRED);
- ENUM_CASE(sync_pb::SyncEnums, USER_NOT_ACTIVATED);
- ENUM_CASE(sync_pb::SyncEnums, AUTH_INVALID);
- ENUM_CASE(sync_pb::SyncEnums, CLEAR_PENDING);
- ENUM_CASE(sync_pb::SyncEnums, TRANSIENT_ERROR);
- ENUM_CASE(sync_pb::SyncEnums, MIGRATION_DONE);
- ENUM_CASE(sync_pb::SyncEnums, DISABLED_BY_ADMIN);
- ENUM_CASE(sync_pb::SyncEnums, UNKNOWN);
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetActionString(sync_pb::SyncEnums::Action action) {
- ASSERT_ENUM_BOUNDS(sync_pb::SyncEnums, Action,
- UPGRADE_CLIENT, UNKNOWN_ACTION);
- switch (action) {
- ENUM_CASE(sync_pb::SyncEnums, UPGRADE_CLIENT);
- ENUM_CASE(sync_pb::SyncEnums, CLEAR_USER_DATA_AND_RESYNC);
- ENUM_CASE(sync_pb::SyncEnums, ENABLE_SYNC_ON_ACCOUNT);
- ENUM_CASE(sync_pb::SyncEnums, STOP_AND_RESTART_SYNC);
- ENUM_CASE(sync_pb::SyncEnums, DISABLE_SYNC_ON_CLIENT);
- ENUM_CASE(sync_pb::SyncEnums, UNKNOWN_ACTION);
- }
- NOTREACHED();
- return "";
-
-}
-
-const char* GetDeviceTypeString(
- sync_pb::SyncEnums::DeviceType device_type) {
- ASSERT_ENUM_BOUNDS(sync_pb::SyncEnums, DeviceType, TYPE_WIN, TYPE_TABLET);
- switch (device_type) {
- ENUM_CASE(sync_pb::SyncEnums, TYPE_WIN);
- ENUM_CASE(sync_pb::SyncEnums, TYPE_MAC);
- ENUM_CASE(sync_pb::SyncEnums, TYPE_LINUX);
- ENUM_CASE(sync_pb::SyncEnums, TYPE_CROS);
- ENUM_CASE(sync_pb::SyncEnums, TYPE_OTHER);
- ENUM_CASE(sync_pb::SyncEnums, TYPE_PHONE);
- ENUM_CASE(sync_pb::SyncEnums, TYPE_TABLET);
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetFaviconTypeString(
- sync_pb::SessionTab::FaviconType favicon_type) {
- ASSERT_ENUM_BOUNDS(sync_pb::SessionTab, FaviconType, TYPE_WEB_FAVICON,
- TYPE_WEB_FAVICON);
- switch (favicon_type) {
- ENUM_CASE(sync_pb::SessionTab, TYPE_WEB_FAVICON);
- }
- NOTREACHED();
- return "";
-}
-
-const char* PassphraseTypeString(
- sync_pb::NigoriSpecifics::PassphraseType type) {
- ASSERT_ENUM_BOUNDS(sync_pb::NigoriSpecifics, PassphraseType,
- IMPLICIT_PASSPHRASE, CUSTOM_PASSPHRASE);
- switch (type) {
- ENUM_CASE(sync_pb::NigoriSpecifics, IMPLICIT_PASSPHRASE);
- ENUM_CASE(sync_pb::NigoriSpecifics, KEYSTORE_PASSPHRASE);
- ENUM_CASE(sync_pb::NigoriSpecifics, FROZEN_IMPLICIT_PASSPHRASE);
- ENUM_CASE(sync_pb::NigoriSpecifics, CUSTOM_PASSPHRASE);
- }
- NOTREACHED();
- return "";
-}
-
-const char* SingletonEventTypeString(
- sync_pb::DebugEventInfo::SingletonEventType type) {
- ASSERT_ENUM_BOUNDS(sync_pb::DebugEventInfo, SingletonEventType,
- CONNECTION_STATUS_CHANGE, BOOTSTRAP_TOKEN_UPDATED);
- switch (type) {
- ENUM_CASE(sync_pb::DebugEventInfo, CONNECTION_STATUS_CHANGE);
- ENUM_CASE(sync_pb::DebugEventInfo, UPDATED_TOKEN);
- ENUM_CASE(sync_pb::DebugEventInfo, PASSPHRASE_REQUIRED);
- ENUM_CASE(sync_pb::DebugEventInfo, PASSPHRASE_ACCEPTED);
- ENUM_CASE(sync_pb::DebugEventInfo, INITIALIZATION_COMPLETE);
- ENUM_CASE(sync_pb::DebugEventInfo, STOP_SYNCING_PERMANENTLY);
- ENUM_CASE(sync_pb::DebugEventInfo, ENCRYPTION_COMPLETE);
- ENUM_CASE(sync_pb::DebugEventInfo, ACTIONABLE_ERROR);
- ENUM_CASE(sync_pb::DebugEventInfo, ENCRYPTED_TYPES_CHANGED);
- ENUM_CASE(sync_pb::DebugEventInfo, PASSPHRASE_TYPE_CHANGED);
- ENUM_CASE(sync_pb::DebugEventInfo, KEYSTORE_TOKEN_UPDATED);
- ENUM_CASE(sync_pb::DebugEventInfo, CONFIGURE_COMPLETE);
- ENUM_CASE(sync_pb::DebugEventInfo, BOOTSTRAP_TOKEN_UPDATED);
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetBlockedStateString(
- sync_pb::TabNavigation::BlockedState state) {
- ASSERT_ENUM_BOUNDS(sync_pb::TabNavigation, BlockedState,
- STATE_ALLOWED, STATE_BLOCKED);
- switch (state) {
- ENUM_CASE(sync_pb::TabNavigation, STATE_ALLOWED);
- ENUM_CASE(sync_pb::TabNavigation, STATE_BLOCKED);
- }
- NOTREACHED();
- return "";
-}
-
-#undef ASSERT_ENUM_BOUNDS
-#undef ENUM_CASE
-
-} // namespace syncer
diff --git a/chromium/sync/protocol/proto_enum_conversions.h b/chromium/sync/protocol/proto_enum_conversions.h
deleted file mode 100644
index 6812cb7b8d0..00000000000
--- a/chromium/sync/protocol/proto_enum_conversions.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_PROTOCOL_PROTO_ENUM_CONVERSIONS_H_
-#define SYNC_PROTOCOL_PROTO_ENUM_CONVERSIONS_H_
-
-// Keep this file in sync with the .proto files in this directory.
-
-#include "sync/base/sync_export.h"
-#include "sync/protocol/app_list_specifics.pb.h"
-#include "sync/protocol/client_debug_info.pb.h"
-#include "sync/protocol/session_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-
-// Utility functions to get the string equivalent for some sync proto
-// enums.
-
-namespace syncer {
-
-// The returned strings (which don't have to be freed) are in ASCII.
-// The result of passing in an invalid enum value is undefined.
-
-SYNC_EXPORT_PRIVATE const char* GetAppListItemTypeString(
- sync_pb::AppListSpecifics::AppListItemType item_type);
-
-SYNC_EXPORT_PRIVATE const char* GetBrowserTypeString(
- sync_pb::SessionWindow::BrowserType browser_type);
-
-SYNC_EXPORT_PRIVATE const char* GetPageTransitionString(
- sync_pb::SyncEnums::PageTransition page_transition);
-
-SYNC_EXPORT_PRIVATE const char* GetPageTransitionRedirectTypeString(
- sync_pb::SyncEnums::PageTransitionRedirectType
- redirect_type);
-
-SYNC_EXPORT const char* GetUpdatesSourceString(
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource updates_source);
-
-SYNC_EXPORT const char* GetUpdatesOriginString(
- sync_pb::SyncEnums::GetUpdatesOrigin origin);
-
-SYNC_EXPORT_PRIVATE const char* GetResponseTypeString(
- sync_pb::CommitResponse::ResponseType response_type);
-
-SYNC_EXPORT_PRIVATE const char* GetErrorTypeString(
- sync_pb::SyncEnums::ErrorType error_type);
-
-SYNC_EXPORT_PRIVATE const char* GetActionString(
- sync_pb::SyncEnums::Action action);
-
-const char* GetDeviceTypeString(sync_pb::SyncEnums::DeviceType device_type);
-
-const char* GetFaviconTypeString(sync_pb::SessionTab::FaviconType favicon_type);
-
-const char* PassphraseTypeString(sync_pb::NigoriSpecifics::PassphraseType type);
-
-const char* SingletonEventTypeString(
- sync_pb::DebugEventInfo::SingletonEventType type);
-
-const char* GetBlockedStateString(sync_pb::TabNavigation::BlockedState state);
-
-} // namespace syncer
-
-#endif // SYNC_PROTOCOL_PROTO_ENUM_CONVERSIONS_H_
diff --git a/chromium/sync/protocol/proto_enum_conversions_unittest.cc b/chromium/sync/protocol/proto_enum_conversions_unittest.cc
deleted file mode 100644
index 7b323a53a8f..00000000000
--- a/chromium/sync/protocol/proto_enum_conversions_unittest.cc
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Keep this file in sync with the .proto files in this directory.
-
-#include "sync/protocol/proto_enum_conversions.h"
-
-#include <string>
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace {
-
-class ProtoEnumConversionsTest : public testing::Test {
-};
-
-template <class T>
-void TestEnumStringFunction(const char* (*enum_string_fn)(T),
- int enum_min, int enum_max) {
- for (int i = enum_min; i <= enum_max; ++i) {
- const std::string& str = enum_string_fn(static_cast<T>(i));
- EXPECT_FALSE(str.empty());
- }
-}
-
-TEST_F(ProtoEnumConversionsTest, GetAppListItemTypeString) {
- TestEnumStringFunction(
- GetAppListItemTypeString,
- sync_pb::AppListSpecifics::AppListItemType_MIN,
- sync_pb::AppListSpecifics::AppListItemType_MAX);
-}
-
-TEST_F(ProtoEnumConversionsTest, GetBrowserTypeString) {
- TestEnumStringFunction(
- GetBrowserTypeString,
- sync_pb::SessionWindow::BrowserType_MIN,
- sync_pb::SessionWindow::BrowserType_MAX);
-}
-
-TEST_F(ProtoEnumConversionsTest, GetPageTransitionString) {
- TestEnumStringFunction(
- GetPageTransitionString,
- sync_pb::SyncEnums::PageTransition_MIN,
- sync_pb::SyncEnums::PageTransition_MAX);
-}
-
-TEST_F(ProtoEnumConversionsTest, GetPageTransitionQualifierString) {
- TestEnumStringFunction(
- GetPageTransitionRedirectTypeString,
- sync_pb::SyncEnums::PageTransitionRedirectType_MIN,
- sync_pb::SyncEnums::PageTransitionRedirectType_MAX);
-}
-
-TEST_F(ProtoEnumConversionsTest, GetUpdatesSourceString) {
- TestEnumStringFunction(
- GetUpdatesSourceString,
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource_MIN,
- sync_pb::GetUpdatesCallerInfo::PERIODIC);
- TestEnumStringFunction(
- GetUpdatesSourceString,
- sync_pb::GetUpdatesCallerInfo::NEWLY_SUPPORTED_DATATYPE,
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource_MAX);
-}
-
-TEST_F(ProtoEnumConversionsTest, GetResponseTypeString) {
- TestEnumStringFunction(
- GetResponseTypeString,
- sync_pb::CommitResponse::ResponseType_MIN,
- sync_pb::CommitResponse::ResponseType_MAX);
-}
-
-TEST_F(ProtoEnumConversionsTest, GetErrorTypeString) {
- // We have a gap, so we need to do two ranges.
- TestEnumStringFunction(
- GetErrorTypeString,
- sync_pb::SyncEnums::ErrorType_MIN,
- sync_pb::SyncEnums::MIGRATION_DONE);
- TestEnumStringFunction(
- GetErrorTypeString,
- sync_pb::SyncEnums::UNKNOWN,
- sync_pb::SyncEnums::ErrorType_MAX);
-
-}
-
-TEST_F(ProtoEnumConversionsTest, GetActionString) {
- TestEnumStringFunction(
- GetActionString,
- sync_pb::SyncEnums::Action_MIN,
- sync_pb::SyncEnums::Action_MAX);
-}
-
-} // namespace
-} // namespace syncer
diff --git a/chromium/sync/protocol/proto_value_conversions.cc b/chromium/sync/protocol/proto_value_conversions.cc
deleted file mode 100644
index 5c8a7ab6457..00000000000
--- a/chromium/sync/protocol/proto_value_conversions.cc
+++ /dev/null
@@ -1,1028 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Keep this file in sync with the .proto files in this directory.
-
-#include "sync/protocol/proto_value_conversions.h"
-
-#include <string>
-
-#include "base/base64.h"
-#include "base/basictypes.h"
-#include "base/logging.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/values.h"
-#include "sync/internal_api/public/base/unique_position.h"
-#include "sync/protocol/app_list_specifics.pb.h"
-#include "sync/protocol/app_notification_specifics.pb.h"
-#include "sync/protocol/app_setting_specifics.pb.h"
-#include "sync/protocol/app_specifics.pb.h"
-#include "sync/protocol/autofill_specifics.pb.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/protocol/dictionary_specifics.pb.h"
-#include "sync/protocol/encryption.pb.h"
-#include "sync/protocol/experiments_specifics.pb.h"
-#include "sync/protocol/extension_setting_specifics.pb.h"
-#include "sync/protocol/extension_specifics.pb.h"
-#include "sync/protocol/favicon_image_specifics.pb.h"
-#include "sync/protocol/favicon_tracking_specifics.pb.h"
-#include "sync/protocol/history_delete_directive_specifics.pb.h"
-#include "sync/protocol/nigori_specifics.pb.h"
-#include "sync/protocol/password_specifics.pb.h"
-#include "sync/protocol/preference_specifics.pb.h"
-#include "sync/protocol/priority_preference_specifics.pb.h"
-#include "sync/protocol/proto_enum_conversions.h"
-#include "sync/protocol/search_engine_specifics.pb.h"
-#include "sync/protocol/session_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/protocol/synced_notification_specifics.pb.h"
-#include "sync/protocol/theme_specifics.pb.h"
-#include "sync/protocol/typed_url_specifics.pb.h"
-#include "sync/protocol/unique_position.pb.h"
-
-namespace syncer {
-
-namespace {
-
-// Basic Type -> Value functions.
-
-base::StringValue* MakeInt64Value(int64 x) {
- return new base::StringValue(base::Int64ToString(x));
-}
-
-// TODO(akalin): Perhaps make JSONWriter support BinaryValue and use
-// that instead of a StringValue.
-base::StringValue* MakeBytesValue(const std::string& bytes) {
- std::string bytes_base64;
- base::Base64Encode(bytes, &bytes_base64);
- return new base::StringValue(bytes_base64);
-}
-
-base::StringValue* MakeStringValue(const std::string& str) {
- return new base::StringValue(str);
-}
-
-// T is the enum type.
-template <class T>
-base::StringValue* MakeEnumValue(T t, const char* (*converter_fn)(T)) {
- return new base::StringValue(converter_fn(t));
-}
-
-// T is the field type, F is either RepeatedField or RepeatedPtrField,
-// and V is a subclass of Value.
-template <class T, class F, class V>
-base::ListValue* MakeRepeatedValue(const F& fields, V* (*converter_fn)(T)) {
- base::ListValue* list = new base::ListValue();
- for (typename F::const_iterator it = fields.begin(); it != fields.end();
- ++it) {
- list->Append(converter_fn(*it));
- }
- return list;
-}
-
-} // namespace
-
-// Helper macros to reduce the amount of boilerplate.
-
-#define SET(field, fn) \
- if (proto.has_##field()) { \
- value->Set(#field, fn(proto.field())); \
- }
-#define SET_REP(field, fn) \
- value->Set(#field, MakeRepeatedValue(proto.field(), fn))
-#define SET_ENUM(field, fn) \
- value->Set(#field, MakeEnumValue(proto.field(), fn))
-
-#define SET_BOOL(field) SET(field, new base::FundamentalValue)
-#define SET_BYTES(field) SET(field, MakeBytesValue)
-#define SET_INT32(field) SET(field, MakeInt64Value)
-#define SET_INT32_REP(field) SET_REP(field, MakeInt64Value)
-#define SET_INT64(field) SET(field, MakeInt64Value)
-#define SET_INT64_REP(field) SET_REP(field, MakeInt64Value)
-#define SET_STR(field) SET(field, new base::StringValue)
-#define SET_STR_REP(field) \
- value->Set(#field, \
- MakeRepeatedValue<const std::string&, \
- google::protobuf::RepeatedPtrField< \
- std::string >, \
- base::StringValue>(proto.field(), \
- MakeStringValue))
-#define SET_EXPERIMENT_ENABLED_FIELD(field) \
- do { \
- if (proto.has_##field() && \
- proto.field().has_enabled()) { \
- value->Set(#field, \
- new base::FundamentalValue( \
- proto.field().enabled())); \
- } \
- } while (0)
-
-#define SET_FIELD(field, fn) \
- do { \
- if (specifics.has_##field()) { \
- value->Set(#field, fn(specifics.field())); \
- } \
- } while (0)
-
-// If you add another macro, don't forget to add an #undef at the end
-// of this file, too.
-
-base::DictionaryValue* EncryptedDataToValue(
- const sync_pb::EncryptedData& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(key_name);
- // TODO(akalin): Shouldn't blob be of type bytes instead of string?
- SET_BYTES(blob);
- return value;
-}
-
-base::DictionaryValue* AppSettingsToValue(
- const sync_pb::AppNotificationSettings& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_BOOL(initial_setup_done);
- SET_BOOL(disabled);
- SET_STR(oauth_client_id);
- return value;
-}
-
-base::DictionaryValue* SessionHeaderToValue(
- const sync_pb::SessionHeader& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_REP(window, SessionWindowToValue);
- SET_STR(client_name);
- SET_ENUM(device_type, GetDeviceTypeString);
- return value;
-}
-
-base::DictionaryValue* SessionTabToValue(const sync_pb::SessionTab& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_INT32(tab_id);
- SET_INT32(window_id);
- SET_INT32(tab_visual_index);
- SET_INT32(current_navigation_index);
- SET_BOOL(pinned);
- SET_STR(extension_app_id);
- SET_REP(navigation, TabNavigationToValue);
- SET_BYTES(favicon);
- SET_ENUM(favicon_type, GetFaviconTypeString);
- SET_STR(favicon_source);
- return value;
-}
-
-base::DictionaryValue* SessionWindowToValue(
- const sync_pb::SessionWindow& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_INT32(window_id);
- SET_INT32(selected_tab_index);
- SET_INT32_REP(tab);
- SET_ENUM(browser_type, GetBrowserTypeString);
- return value;
-}
-
-base::DictionaryValue* TabNavigationToValue(
- const sync_pb::TabNavigation& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(virtual_url);
- SET_STR(referrer);
- SET_STR(title);
- SET_STR(state);
- SET_ENUM(page_transition, GetPageTransitionString);
- SET_ENUM(redirect_type, GetPageTransitionRedirectTypeString);
- SET_INT32(unique_id);
- SET_INT64(timestamp_msec);
- SET_BOOL(navigation_forward_back);
- SET_BOOL(navigation_from_address_bar);
- SET_BOOL(navigation_home_page);
- SET_BOOL(navigation_chain_start);
- SET_BOOL(navigation_chain_end);
- SET_INT64(global_id);
- SET_STR(search_terms);
- SET_STR(favicon_url);
- SET_ENUM(blocked_state, GetBlockedStateString);
- SET_STR_REP(content_pack_categories);
- SET_INT32(http_status_code);
- return value;
-}
-
-base::DictionaryValue* PasswordSpecificsDataToValue(
- const sync_pb::PasswordSpecificsData& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_INT32(scheme);
- SET_STR(signon_realm);
- SET_STR(origin);
- SET_STR(action);
- SET_STR(username_element);
- SET_STR(username_value);
- SET_STR(password_element);
- value->SetString("password_value", "<redacted>");
- SET_BOOL(ssl_valid);
- SET_BOOL(preferred);
- SET_INT64(date_created);
- SET_BOOL(blacklisted);
- return value;
-}
-
-base::DictionaryValue* GlobalIdDirectiveToValue(
- const sync_pb::GlobalIdDirective& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_INT64_REP(global_id);
- SET_INT64(start_time_usec);
- SET_INT64(end_time_usec);
- return value;
-}
-
-base::DictionaryValue* TimeRangeDirectiveToValue(
- const sync_pb::TimeRangeDirective& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_INT64(start_time_usec);
- SET_INT64(end_time_usec);
- return value;
-}
-
-base::DictionaryValue* SyncedNotificationImageToValue(
- const sync_pb::SyncedNotificationImage& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(url);
- SET_STR(alt_text);
- SET_INT32(preferred_width);
- SET_INT32(preferred_height);
- return value;
-}
-
-base::DictionaryValue* SyncedNotificationProfileImageToValue(
- const sync_pb::SyncedNotificationProfileImage& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(image_url);
- SET_STR(oid);
- SET_STR(display_name);
- return value;
-}
-
-base::DictionaryValue* MediaToValue(
- const sync_pb::Media& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET(image, SyncedNotificationImageToValue);
- return value;
-}
-
-base::DictionaryValue* SyncedNotificationActionToValue(
- const sync_pb::SyncedNotificationAction& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(text);
- SET(icon, SyncedNotificationImageToValue);
- SET_STR(url);
- SET_STR(request_data);
- SET_STR(accessibility_label);
- return value;
-}
-
-base::DictionaryValue* SyncedNotificationDestiationToValue(
- const sync_pb::SyncedNotificationDestination& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(text);
- SET(icon, SyncedNotificationImageToValue);
- SET_STR(url);
- SET_STR(accessibility_label);
- return value;
-}
-
-base::DictionaryValue* TargetToValue(
- const sync_pb::Target& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET(destination, SyncedNotificationDestiationToValue);
- SET(action, SyncedNotificationActionToValue);
- SET_STR(target_key);
- return value;
-}
-
-base::DictionaryValue* SimpleCollapsedLayoutToValue(
- const sync_pb::SimpleCollapsedLayout& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET(app_icon, SyncedNotificationImageToValue);
- SET_REP(profile_image, SyncedNotificationProfileImageToValue);
- SET_STR(heading);
- SET_STR(description);
- SET_STR(annotation);
- SET_REP(media, MediaToValue);
- return value;
-}
-
-base::DictionaryValue* CollapsedInfoToValue(
- const sync_pb::CollapsedInfo& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET(simple_collapsed_layout, SimpleCollapsedLayoutToValue);
- SET_INT64(creation_timestamp_usec);
- SET(default_destination, SyncedNotificationDestiationToValue);
- SET_REP(target, TargetToValue);
- return value;
-}
-
-base::DictionaryValue* SyncedNotificationToValue(
- const sync_pb::SyncedNotification& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(type);
- SET_STR(external_id);
- // TODO(petewil) Add SyncedNotificationCreator here if we ever need it.
- return value;
-}
-
-base::DictionaryValue* RenderInfoToValue(
- const sync_pb::SyncedNotificationRenderInfo& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- // TODO(petewil): Add the expanded info values once we start using them.
- SET(collapsed_info, CollapsedInfoToValue);
- return value;
-}
-
-base::DictionaryValue* CoalescedNotificationToValue(
- const sync_pb::CoalescedSyncedNotification& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(key);
- SET_STR(app_id);
- SET_REP(notification, SyncedNotificationToValue);
- SET(render_info, RenderInfoToValue);
- SET_INT32(read_state);
- SET_INT64(creation_time_msec);
- SET_INT32(priority);
- return value;
-}
-
-base::DictionaryValue* AppListSpecificsToValue(
- const sync_pb::AppListSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(item_id);
- SET_ENUM(item_type, GetAppListItemTypeString);
- SET_STR(item_name);
- SET_STR(parent_id);
- SET_STR(page_ordinal);
- SET_STR(item_ordinal);
-
- return value;
-}
-
-base::DictionaryValue* AppNotificationToValue(
- const sync_pb::AppNotification& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(guid);
- SET_STR(app_id);
- SET_INT64(creation_timestamp_ms);
- SET_STR(title);
- SET_STR(body_text);
- SET_STR(link_url);
- SET_STR(link_text);
- return value;
-}
-
-base::DictionaryValue* AppSettingSpecificsToValue(
- const sync_pb::AppSettingSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET(extension_setting, ExtensionSettingSpecificsToValue);
- return value;
-}
-
-base::DictionaryValue* AppSpecificsToValue(
- const sync_pb::AppSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET(extension, ExtensionSpecificsToValue);
- SET(notification_settings, AppSettingsToValue);
- SET_STR(app_launch_ordinal);
- SET_STR(page_ordinal);
-
- return value;
-}
-
-base::DictionaryValue* AutofillSpecificsToValue(
- const sync_pb::AutofillSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(name);
- SET_STR(value);
- SET_INT64_REP(usage_timestamp);
- SET(profile, AutofillProfileSpecificsToValue);
- return value;
-}
-
-base::DictionaryValue* AutofillProfileSpecificsToValue(
- const sync_pb::AutofillProfileSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(guid);
- SET_STR(origin);
-
- SET_STR_REP(name_first);
- SET_STR_REP(name_middle);
- SET_STR_REP(name_last);
- SET_STR_REP(email_address);
- SET_STR(company_name);
-
- SET_STR(address_home_line1);
- SET_STR(address_home_line2);
- SET_STR(address_home_city);
- SET_STR(address_home_state);
- SET_STR(address_home_zip);
- SET_STR(address_home_country);
-
- SET_STR_REP(phone_home_whole_number);
- return value;
-}
-
-base::DictionaryValue* MetaInfoToValue(
- const sync_pb::MetaInfo& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(key);
- SET_STR(value);
- return value;
-}
-
-base::DictionaryValue* BookmarkSpecificsToValue(
- const sync_pb::BookmarkSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(url);
- SET_BYTES(favicon);
- SET_STR(title);
- SET_INT64(creation_time_us);
- SET_STR(icon_url);
- SET_REP(meta_info, &MetaInfoToValue);
- return value;
-}
-
-base::DictionaryValue* DeviceInfoSpecificsToValue(
- const sync_pb::DeviceInfoSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(cache_guid);
- SET_STR(client_name);
- SET_ENUM(device_type, GetDeviceTypeString);
- SET_STR(sync_user_agent);
- SET_STR(chrome_version);
- return value;
-}
-
-base::DictionaryValue* DictionarySpecificsToValue(
- const sync_pb::DictionarySpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(word);
- return value;
-}
-
-namespace {
-
-base::DictionaryValue* FaviconSyncFlagsToValue(
- const sync_pb::FaviconSyncFlags& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_BOOL(enabled);
- SET_INT32(favicon_sync_limit);
- return value;
-}
-
-} // namespace
-
-base::DictionaryValue* ExperimentsSpecificsToValue(
- const sync_pb::ExperimentsSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_EXPERIMENT_ENABLED_FIELD(keystore_encryption);
- SET_EXPERIMENT_ENABLED_FIELD(history_delete_directives);
- SET_EXPERIMENT_ENABLED_FIELD(autofill_culling);
- SET_EXPERIMENT_ENABLED_FIELD(pre_commit_update_avoidance);
- if (proto.has_favicon_sync())
- SET(favicon_sync, FaviconSyncFlagsToValue);
- return value;
-}
-
-base::DictionaryValue* ExtensionSettingSpecificsToValue(
- const sync_pb::ExtensionSettingSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(extension_id);
- SET_STR(key);
- SET_STR(value);
- return value;
-}
-
-base::DictionaryValue* ExtensionSpecificsToValue(
- const sync_pb::ExtensionSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(id);
- SET_STR(version);
- SET_STR(update_url);
- SET_BOOL(enabled);
- SET_BOOL(incognito_enabled);
- SET_STR(name);
- return value;
-}
-
-namespace {
-base::DictionaryValue* FaviconDataToValue(
- const sync_pb::FaviconData& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_BYTES(favicon);
- SET_INT32(width);
- SET_INT32(height);
- return value;
-}
-} // namespace
-
-base::DictionaryValue* FaviconImageSpecificsToValue(
- const sync_pb::FaviconImageSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(favicon_url);
- SET(favicon_web, FaviconDataToValue);
- SET(favicon_web_32, FaviconDataToValue);
- SET(favicon_touch_64, FaviconDataToValue);
- SET(favicon_touch_precomposed_64, FaviconDataToValue);
- return value;
-}
-
-base::DictionaryValue* FaviconTrackingSpecificsToValue(
- const sync_pb::FaviconTrackingSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(favicon_url);
- SET_INT64(last_visit_time_ms)
- SET_BOOL(is_bookmarked);
- return value;
-}
-
-base::DictionaryValue* HistoryDeleteDirectiveSpecificsToValue(
- const sync_pb::HistoryDeleteDirectiveSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET(global_id_directive, GlobalIdDirectiveToValue);
- SET(time_range_directive, TimeRangeDirectiveToValue);
- return value;
-}
-
-base::DictionaryValue* ManagedUserSettingSpecificsToValue(
- const sync_pb::ManagedUserSettingSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(name);
- SET_STR(value);
- return value;
-}
-
-base::DictionaryValue* ManagedUserSpecificsToValue(
- const sync_pb::ManagedUserSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(id);
- SET_STR(name);
- SET_BOOL(acknowledged);
- SET_STR(master_key);
- SET_STR(chrome_avatar);
- SET_STR(chromeos_avatar);
- return value;
-}
-
-base::DictionaryValue* NigoriSpecificsToValue(
- const sync_pb::NigoriSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET(encryption_keybag, EncryptedDataToValue);
- SET_BOOL(keybag_is_frozen);
- SET_BOOL(encrypt_bookmarks);
- SET_BOOL(encrypt_preferences);
- SET_BOOL(encrypt_autofill_profile);
- SET_BOOL(encrypt_autofill);
- SET_BOOL(encrypt_themes);
- SET_BOOL(encrypt_typed_urls);
- SET_BOOL(encrypt_extension_settings);
- SET_BOOL(encrypt_extensions);
- SET_BOOL(encrypt_sessions);
- SET_BOOL(encrypt_app_settings);
- SET_BOOL(encrypt_apps);
- SET_BOOL(encrypt_search_engines);
- SET_BOOL(encrypt_dictionary);
- SET_BOOL(encrypt_articles);
- SET_BOOL(encrypt_app_list);
- SET_BOOL(encrypt_everything);
- SET_BOOL(sync_tab_favicons);
- SET_ENUM(passphrase_type, PassphraseTypeString);
- SET(keystore_decryptor_token, EncryptedDataToValue);
- SET_INT64(keystore_migration_time);
- SET_INT64(custom_passphrase_time);
- return value;
-}
-
-base::DictionaryValue* ArticlePageToValue(
- const sync_pb::ArticlePage& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(url);
- return value;
-}
-
-base::DictionaryValue* ArticleSpecificsToValue(
- const sync_pb::ArticleSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(entry_id);
- SET_STR(title);
- SET_REP(pages, ArticlePageToValue);
- return value;
-}
-
-base::DictionaryValue* PasswordSpecificsToValue(
- const sync_pb::PasswordSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET(encrypted, EncryptedDataToValue);
- return value;
-}
-
-base::DictionaryValue* PreferenceSpecificsToValue(
- const sync_pb::PreferenceSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(name);
- SET_STR(value);
- return value;
-}
-
-base::DictionaryValue* PriorityPreferenceSpecificsToValue(
- const sync_pb::PriorityPreferenceSpecifics& specifics) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_FIELD(preference, PreferenceSpecificsToValue);
- return value;
-}
-
-base::DictionaryValue* SyncedNotificationSpecificsToValue(
- const sync_pb::SyncedNotificationSpecifics& proto) {
- // There is a lot of data, for now just use heading, description, key, and
- // the read state.
- // TODO(petewil): Eventually add more data here.
- base::DictionaryValue* value = new base::DictionaryValue();
- SET(coalesced_notification, CoalescedNotificationToValue);
- return value;
-}
-
-base::DictionaryValue* SearchEngineSpecificsToValue(
- const sync_pb::SearchEngineSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(short_name);
- SET_STR(keyword);
- SET_STR(favicon_url);
- SET_STR(url);
- SET_BOOL(safe_for_autoreplace);
- SET_STR(originating_url);
- SET_INT64(date_created);
- SET_STR(input_encodings);
- SET_BOOL(show_in_default_list);
- SET_STR(suggestions_url);
- SET_INT32(prepopulate_id);
- SET_BOOL(autogenerate_keyword);
- SET_STR(instant_url);
- SET_INT64(last_modified);
- SET_STR(sync_guid);
- SET_STR_REP(alternate_urls);
- SET_STR(search_terms_replacement_key);
- SET_STR(image_url);
- SET_STR(search_url_post_params);
- SET_STR(suggestions_url_post_params);
- SET_STR(instant_url_post_params);
- SET_STR(image_url_post_params);
- SET_STR(new_tab_url);
- return value;
-}
-
-base::DictionaryValue* SessionSpecificsToValue(
- const sync_pb::SessionSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(session_tag);
- SET(header, SessionHeaderToValue);
- SET(tab, SessionTabToValue);
- SET_INT32(tab_node_id);
- return value;
-}
-
-base::DictionaryValue* ThemeSpecificsToValue(
- const sync_pb::ThemeSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_BOOL(use_custom_theme);
- SET_BOOL(use_system_theme_by_default);
- SET_STR(custom_theme_name);
- SET_STR(custom_theme_id);
- SET_STR(custom_theme_update_url);
- return value;
-}
-
-base::DictionaryValue* TypedUrlSpecificsToValue(
- const sync_pb::TypedUrlSpecifics& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(url);
- SET_STR(title);
- SET_BOOL(hidden);
- SET_INT64_REP(visits);
- SET_INT32_REP(visit_transitions);
- return value;
-}
-
-base::DictionaryValue* EntitySpecificsToValue(
- const sync_pb::EntitySpecifics& specifics) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_FIELD(app, AppSpecificsToValue);
- SET_FIELD(app_list, AppListSpecificsToValue);
- SET_FIELD(app_notification, AppNotificationToValue);
- SET_FIELD(app_setting, AppSettingSpecificsToValue);
- SET_FIELD(article, ArticleSpecificsToValue);
- SET_FIELD(autofill, AutofillSpecificsToValue);
- SET_FIELD(autofill_profile, AutofillProfileSpecificsToValue);
- SET_FIELD(bookmark, BookmarkSpecificsToValue);
- SET_FIELD(device_info, DeviceInfoSpecificsToValue);
- SET_FIELD(dictionary, DictionarySpecificsToValue);
- SET_FIELD(experiments, ExperimentsSpecificsToValue);
- SET_FIELD(extension, ExtensionSpecificsToValue);
- SET_FIELD(extension_setting, ExtensionSettingSpecificsToValue);
- SET_FIELD(favicon_image, FaviconImageSpecificsToValue);
- SET_FIELD(favicon_tracking, FaviconTrackingSpecificsToValue);
- SET_FIELD(history_delete_directive, HistoryDeleteDirectiveSpecificsToValue);
- SET_FIELD(managed_user_setting, ManagedUserSettingSpecificsToValue);
- SET_FIELD(managed_user, ManagedUserSpecificsToValue);
- SET_FIELD(nigori, NigoriSpecificsToValue);
- SET_FIELD(password, PasswordSpecificsToValue);
- SET_FIELD(preference, PreferenceSpecificsToValue);
- SET_FIELD(priority_preference, PriorityPreferenceSpecificsToValue);
- SET_FIELD(search_engine, SearchEngineSpecificsToValue);
- SET_FIELD(session, SessionSpecificsToValue);
- SET_FIELD(synced_notification, SyncedNotificationSpecificsToValue);
- SET_FIELD(theme, ThemeSpecificsToValue);
- SET_FIELD(typed_url, TypedUrlSpecificsToValue);
- return value;
-}
-
-namespace {
-
-base::StringValue* UniquePositionToStringValue(
- const sync_pb::UniquePosition& proto) {
- UniquePosition pos = UniquePosition::FromProto(proto);
- return new base::StringValue(pos.ToDebugString());
-}
-
-base::DictionaryValue* SyncEntityToValue(const sync_pb::SyncEntity& proto,
- bool include_specifics) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(id_string);
- SET_STR(parent_id_string);
- SET_STR(old_parent_id);
- SET_INT64(version);
- SET_INT64(mtime);
- SET_INT64(ctime);
- SET_STR(name);
- SET_STR(non_unique_name);
- SET_INT64(sync_timestamp);
- SET_STR(server_defined_unique_tag);
- SET_INT64(position_in_parent);
- SET(unique_position, UniquePositionToStringValue);
- SET_STR(insert_after_item_id);
- SET_BOOL(deleted);
- SET_STR(originator_cache_guid);
- SET_STR(originator_client_item_id);
- if (include_specifics)
- SET(specifics, EntitySpecificsToValue);
- SET_BOOL(folder);
- SET_STR(client_defined_unique_tag);
- return value;
-}
-
-base::ListValue* SyncEntitiesToValue(
- const ::google::protobuf::RepeatedPtrField<sync_pb::SyncEntity>& entities,
- bool include_specifics) {
- base::ListValue* list = new base::ListValue();
- ::google::protobuf::RepeatedPtrField<sync_pb::SyncEntity>::const_iterator it;
- for (it = entities.begin(); it != entities.end(); ++it) {
- list->Append(SyncEntityToValue(*it, include_specifics));
- }
-
- return list;
-}
-
-base::DictionaryValue* ChromiumExtensionActivityToValue(
- const sync_pb::ChromiumExtensionsActivity& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(extension_id);
- SET_INT32(bookmark_writes_since_last_commit);
- return value;
-}
-
-base::DictionaryValue* CommitMessageToValue(
- const sync_pb::CommitMessage& proto,
- bool include_specifics) {
- base::DictionaryValue* value = new base::DictionaryValue();
- value->Set("entries",
- SyncEntitiesToValue(proto.entries(), include_specifics));
- SET_STR(cache_guid);
- SET_REP(extensions_activity, ChromiumExtensionActivityToValue);
- SET(config_params, ClientConfigParamsToValue);
- return value;
-}
-
-base::DictionaryValue* GetUpdateTriggersToValue(
- const sync_pb::GetUpdateTriggers& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR_REP(notification_hint);
- SET_BOOL(client_dropped_hints);
- SET_BOOL(invalidations_out_of_sync);
- SET_INT64(local_modification_nudges);
- SET_INT64(datatype_refresh_nudges);
- return value;
-}
-
-base::DictionaryValue* DataTypeProgressMarkerToValue(
- const sync_pb::DataTypeProgressMarker& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_INT32(data_type_id);
- SET_BYTES(token);
- SET_INT64(timestamp_token_for_migration);
- SET_STR(notification_hint);
- SET(get_update_triggers, GetUpdateTriggersToValue);
- return value;
-}
-
-base::DictionaryValue* GetUpdatesCallerInfoToValue(
- const sync_pb::GetUpdatesCallerInfo& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_ENUM(source, GetUpdatesSourceString);
- SET_BOOL(notifications_enabled);
- return value;
-}
-
-base::DictionaryValue* GetUpdatesMessageToValue(
- const sync_pb::GetUpdatesMessage& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET(caller_info, GetUpdatesCallerInfoToValue);
- SET_BOOL(fetch_folders);
- SET_INT32(batch_size);
- SET_REP(from_progress_marker, DataTypeProgressMarkerToValue);
- SET_BOOL(streaming);
- SET_BOOL(need_encryption_key);
- SET_BOOL(create_mobile_bookmarks_folder);
- SET_ENUM(get_updates_origin, GetUpdatesOriginString);
- return value;
-}
-
-base::DictionaryValue* ClientStatusToValue(const sync_pb::ClientStatus& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_BOOL(hierarchy_conflict_detected);
- return value;
-}
-
-base::DictionaryValue* EntryResponseToValue(
- const sync_pb::CommitResponse::EntryResponse& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_ENUM(response_type, GetResponseTypeString);
- SET_STR(id_string);
- SET_STR(parent_id_string);
- SET_INT64(position_in_parent);
- SET_INT64(version);
- SET_STR(name);
- SET_STR(error_message);
- SET_INT64(mtime);
- return value;
-}
-
-base::DictionaryValue* CommitResponseToValue(
- const sync_pb::CommitResponse& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_REP(entryresponse, EntryResponseToValue);
- return value;
-}
-
-base::DictionaryValue* GetUpdatesResponseToValue(
- const sync_pb::GetUpdatesResponse& proto,
- bool include_specifics) {
- base::DictionaryValue* value = new base::DictionaryValue();
- value->Set("entries",
- SyncEntitiesToValue(proto.entries(), include_specifics));
- SET_INT64(changes_remaining);
- SET_REP(new_progress_marker, DataTypeProgressMarkerToValue);
- return value;
-}
-
-base::DictionaryValue* ClientCommandToValue(
- const sync_pb::ClientCommand& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_INT32(set_sync_poll_interval);
- SET_INT32(set_sync_long_poll_interval);
- SET_INT32(max_commit_batch_size);
- SET_INT32(sessions_commit_delay_seconds);
- SET_INT32(throttle_delay_seconds);
- SET_INT32(client_invalidation_hint_buffer_size);
- return value;
-}
-
-base::DictionaryValue* ErrorToValue(
- const sync_pb::ClientToServerResponse::Error& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_ENUM(error_type, GetErrorTypeString);
- SET_STR(error_description);
- SET_STR(url);
- SET_ENUM(action, GetActionString);
- return value;
-}
-
-} // namespace
-
-base::DictionaryValue* ClientToServerResponseToValue(
- const sync_pb::ClientToServerResponse& proto,
- bool include_specifics) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET(commit, CommitResponseToValue);
- if (proto.has_get_updates()) {
- value->Set("get_updates", GetUpdatesResponseToValue(proto.get_updates(),
- include_specifics));
- }
-
- SET(error, ErrorToValue);
- SET_ENUM(error_code, GetErrorTypeString);
- SET_STR(error_message);
- SET_STR(store_birthday);
- SET(client_command, ClientCommandToValue);
- SET_INT32_REP(migrated_data_type_id);
- return value;
-}
-
-base::DictionaryValue* ClientToServerMessageToValue(
- const sync_pb::ClientToServerMessage& proto,
- bool include_specifics) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_STR(share);
- SET_INT32(protocol_version);
- if (proto.has_commit()) {
- value->Set("commit",
- CommitMessageToValue(proto.commit(), include_specifics));
- }
-
- SET(get_updates, GetUpdatesMessageToValue);
- SET_STR(store_birthday);
- SET_BOOL(sync_problem_detected);
- SET(debug_info, DebugInfoToValue);
- SET(client_status, ClientStatusToValue);
- return value;
-}
-
-base::DictionaryValue* DatatypeAssociationStatsToValue(
- const sync_pb::DatatypeAssociationStats& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_INT32(data_type_id);
- SET_INT32(num_local_items_before_association);
- SET_INT32(num_sync_items_before_association);
- SET_INT32(num_local_items_after_association);
- SET_INT32(num_sync_items_after_association);
- SET_INT32(num_local_items_added);
- SET_INT32(num_local_items_deleted);
- SET_INT32(num_local_items_modified);
- SET_INT32(num_sync_items_added);
- SET_INT32(num_sync_items_deleted);
- SET_INT32(num_sync_items_modified);
- SET_INT64(local_version_pre_association);
- SET_INT64(sync_version_pre_association)
- SET_BOOL(had_error);
- SET_INT64(download_wait_time_us);
- SET_INT64(download_time_us);
- SET_INT64(association_wait_time_for_high_priority_us);
- SET_INT64(association_wait_time_for_same_priority_us);
- return value;
-}
-
-base::DictionaryValue* DebugEventInfoToValue(
- const sync_pb::DebugEventInfo& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_ENUM(singleton_event, SingletonEventTypeString);
- SET(sync_cycle_completed_event_info, SyncCycleCompletedEventInfoToValue);
- SET_INT32(nudging_datatype);
- SET_INT32_REP(datatypes_notified_from_server);
- SET(datatype_association_stats, DatatypeAssociationStatsToValue);
- return value;
-}
-
-base::DictionaryValue* DebugInfoToValue(const sync_pb::DebugInfo& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_REP(events, DebugEventInfoToValue);
- SET_BOOL(cryptographer_ready);
- SET_BOOL(cryptographer_has_pending_keys);
- SET_BOOL(events_dropped);
- return value;
-}
-
-base::DictionaryValue* SyncCycleCompletedEventInfoToValue(
- const sync_pb::SyncCycleCompletedEventInfo& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_INT32(num_encryption_conflicts);
- SET_INT32(num_hierarchy_conflicts);
- SET_INT32(num_server_conflicts);
- SET_INT32(num_updates_downloaded);
- SET_INT32(num_reflected_updates_downloaded);
- SET(caller_info, GetUpdatesCallerInfoToValue);
- return value;
-}
-
-base::DictionaryValue* ClientConfigParamsToValue(
- const sync_pb::ClientConfigParams& proto) {
- base::DictionaryValue* value = new base::DictionaryValue();
- SET_INT32_REP(enabled_type_ids);
- SET_BOOL(tabs_datatype_enabled);
- return value;
-}
-
-#undef SET
-#undef SET_REP
-
-#undef SET_BOOL
-#undef SET_BYTES
-#undef SET_INT32
-#undef SET_INT64
-#undef SET_INT64_REP
-#undef SET_STR
-#undef SET_STR_REP
-
-#undef SET_FIELD
-
-} // namespace syncer
diff --git a/chromium/sync/protocol/proto_value_conversions.h b/chromium/sync/protocol/proto_value_conversions.h
deleted file mode 100644
index 9bf45e3267e..00000000000
--- a/chromium/sync/protocol/proto_value_conversions.h
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Keep this file in sync with the .proto files in this directory.
-
-#ifndef SYNC_PROTOCOL_PROTO_VALUE_CONVERSIONS_H_
-#define SYNC_PROTOCOL_PROTO_VALUE_CONVERSIONS_H_
-
-#include "sync/base/sync_export.h"
-
-namespace base {
-class DictionaryValue;
-}
-
-namespace sync_pb {
-class AppListSpecifics;
-class AppNotification;
-class AppNotificationSettings;
-class AppSettingSpecifics;
-class AppSpecifics;
-class ArticleSpecifics;
-class AutofillProfileSpecifics;
-class AutofillSpecifics;
-class BookmarkSpecifics;
-class ClientConfigParams;
-class ClientToServerMessage;
-class ClientToServerResponse;
-class CoalescedSyncedNotification;
-class CollapsedInfo;
-class DatatypeAssociationStats;
-class DebugEventInfo;
-class DebugInfo;
-class DeviceInfoSpecifics;
-class DeviceInformation;
-class DictionarySpecifics;
-class EncryptedData;
-class EntitySpecifics;
-class EverythingDirective;
-class ExperimentsSpecifics;
-class ExtensionSettingSpecifics;
-class ExtensionSpecifics;
-class FaviconImageSpecifics;
-class FaviconTrackingSpecifics;
-class GlobalIdDirective;
-class HistoryDeleteDirectiveSpecifics;
-class KeystoreEncryptionFlagsSpecifics;
-class Media;
-class ManagedUserSettingSpecifics;
-class ManagedUserSpecifics;
-class NigoriSpecifics;
-class PasswordSpecifics;
-class PasswordSpecificsData;
-class PreferenceSpecifics;
-class PriorityPreferenceSpecifics;
-class SearchEngineSpecifics;
-class SessionHeader;
-class SessionSpecifics;
-class SessionTab;
-class SessionWindow;
-class SimpleCollapsedLayout;
-class SyncCycleCompletedEventInfo;
-class SyncedNotification;
-class SyncedNotificationAction;
-class SyncedNotificationDestination;
-class SyncedNotificationImage;
-class SyncedNotificationProfileImage;
-class SyncedNotificationRenderInfo;
-class SyncedNotificationSpecifics;
-class TabNavigation;
-class Target;
-class ThemeSpecifics;
-class TimeRangeDirective;
-class TypedUrlSpecifics;
-} // namespace sync_pb
-
-// Utility functions to convert sync protocol buffers to dictionaries.
-// Each protocol field is mapped to a key of the same name. Repeated
-// fields are mapped to array values and sub-messages are mapped to
-// sub-dictionary values.
-//
-// TODO(akalin): Add has_* information.
-//
-// TODO(akalin): Improve enum support.
-
-namespace syncer {
-
-// Ownership of all returned DictionaryValues are transferred to the
-// caller.
-
-// TODO(akalin): Perhaps extend this to decrypt?
-SYNC_EXPORT_PRIVATE base::DictionaryValue* EncryptedDataToValue(
- const sync_pb::EncryptedData& encrypted_data);
-
-// Sub-protocol of AppListSpecifics.
-SYNC_EXPORT_PRIVATE base::DictionaryValue* AppListSpecificsToValue(
- const sync_pb::AppListSpecifics& proto);
-
-// Sub-protocol of AppSpecifics.
-SYNC_EXPORT_PRIVATE base::DictionaryValue* AppSettingsToValue(
- const sync_pb::AppNotificationSettings& app_notification_settings);
-
-// Sub-protocols of SessionSpecifics.
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* SessionHeaderToValue(
- const sync_pb::SessionHeader& session_header);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* SessionTabToValue(
- const sync_pb::SessionTab& session_tab);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* SessionWindowToValue(
- const sync_pb::SessionWindow& session_window);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* TabNavigationToValue(
- const sync_pb::TabNavigation& tab_navigation);
-
-// Sub-protocol of PasswordSpecifics.
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* PasswordSpecificsDataToValue(
- const sync_pb::PasswordSpecificsData& password_specifics_data);
-
-// Sub-protocol of NigoriSpecifics.
-
-base::DictionaryValue* DeviceInformationToValue(
- const sync_pb::DeviceInformation& device_information);
-
-// Sub-protocol of HistoryDeleteDirectiveSpecifics.
-
-base::DictionaryValue* GlobalIdDirectiveToValue(
- const sync_pb::GlobalIdDirective& global_id_directive);
-
-base::DictionaryValue* TimeRangeDirectiveToValue(
- const sync_pb::TimeRangeDirective& time_range_directive);
-
-// Sub-protocol of Experiments.
-
-base::DictionaryValue* KeystoreEncryptionToValue(
- const sync_pb::KeystoreEncryptionFlagsSpecifics& proto);
-
-// Sub protocols of SyncedNotifications.
-
-base::DictionaryValue* SimpleCollapsedLayoutToValue(
- const sync_pb::SimpleCollapsedLayout& proto);
-
-base::DictionaryValue* CollapsedInfoToValue(
- const sync_pb::CollapsedInfo& proto);
-
-base::DictionaryValue* RenderInfoToValue(
- const sync_pb::SyncedNotificationRenderInfo& proto);
-
-base::DictionaryValue* CoalescedNotificationToValue(
- const sync_pb::CoalescedSyncedNotification& proto);
-
-base::DictionaryValue* SyncedNotificationActionToValue(
- const sync_pb::SyncedNotificationAction& action);
-
-base::DictionaryValue* SyncedNotificationDestinationToValue(
- const sync_pb::SyncedNotificationDestination& destination);
-
-base::DictionaryValue* SyncedNotificationToValue(
- const sync_pb::SyncedNotification& notification);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* SessionSpecificsToValue(
- const sync_pb::SessionSpecifics& session_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* SyncedNotificationImageToValue(
- const sync_pb::SyncedNotificationImage& image);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue*
- SyncedNotificationProfileImageToValue(
- const sync_pb::SyncedNotificationProfileImage& image);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* TargetToValue(
- const sync_pb::Target& target);
-
-// Main *SpecificsToValue functions.
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* AppNotificationToValue(
- const sync_pb::AppNotification& app_notification_specifics);
-
-base::DictionaryValue* AppSettingSpecificsToValue(
- const sync_pb::AppSettingSpecifics& app_setting_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* AppSpecificsToValue(
- const sync_pb::AppSpecifics& app_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* ArticleSpecificsToValue(
- const sync_pb::ArticleSpecifics& article_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* AutofillSpecificsToValue(
- const sync_pb::AutofillSpecifics& autofill_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* AutofillProfileSpecificsToValue(
- const sync_pb::AutofillProfileSpecifics& autofill_profile_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* BookmarkSpecificsToValue(
- const sync_pb::BookmarkSpecifics& bookmark_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* DeviceInfoSpecificsToValue(
- const sync_pb::DeviceInfoSpecifics& device_info_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* DictionarySpecificsToValue(
- const sync_pb::DictionarySpecifics& dictionary_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* ExperimentsSpecificsToValue(
- const sync_pb::ExperimentsSpecifics& proto);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* PriorityPreferenceSpecificsToValue(
- const sync_pb::PriorityPreferenceSpecifics& proto);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* ExtensionSettingSpecificsToValue(
- const sync_pb::ExtensionSettingSpecifics& extension_setting_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* ExtensionSpecificsToValue(
- const sync_pb::ExtensionSpecifics& extension_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* FaviconImageSpecificsToValue(
- const sync_pb::FaviconImageSpecifics& favicon_image_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* FaviconTrackingSpecificsToValue(
- const sync_pb::FaviconTrackingSpecifics& favicon_tracking_specifics);
-
-SYNC_EXPORT base::DictionaryValue* HistoryDeleteDirectiveSpecificsToValue(
- const sync_pb::HistoryDeleteDirectiveSpecifics&
- history_delete_directive_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* ManagedUserSettingSpecificsToValue(
- const sync_pb::ManagedUserSettingSpecifics& managed_user_setting_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* ManagedUserSpecificsToValue(
- const sync_pb::ManagedUserSpecifics& managed_user_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* MediaToValue(
- const sync_pb::Media& media);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* NigoriSpecificsToValue(
- const sync_pb::NigoriSpecifics& nigori_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* PasswordSpecificsToValue(
- const sync_pb::PasswordSpecifics& password_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* PreferenceSpecificsToValue(
- const sync_pb::PreferenceSpecifics& password_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* SyncedNotificationSpecificsToValue(
- const sync_pb::SyncedNotificationSpecifics&
- synced_notification_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* SearchEngineSpecificsToValue(
- const sync_pb::SearchEngineSpecifics& search_engine_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* ThemeSpecificsToValue(
- const sync_pb::ThemeSpecifics& theme_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* TypedUrlSpecificsToValue(
- const sync_pb::TypedUrlSpecifics& typed_url_specifics);
-
-// Any present extensions are mapped to sub-dictionary values with the
-// key equal to the extension name.
-SYNC_EXPORT_PRIVATE base::DictionaryValue* EntitySpecificsToValue(
- const sync_pb::EntitySpecifics& specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* ClientToServerMessageToValue(
- const sync_pb::ClientToServerMessage& proto,
- bool include_specifics);
-
-SYNC_EXPORT_PRIVATE base::DictionaryValue* ClientToServerResponseToValue(
- const sync_pb::ClientToServerResponse& proto,
- bool include_specifics);
-
-base::DictionaryValue* DatatypeAssociationStatsToValue(
- const sync_pb::DatatypeAssociationStats& proto);
-
-base::DictionaryValue* DebugEventInfoToValue(
- const sync_pb::DebugEventInfo& proto);
-
-base::DictionaryValue* DebugInfoToValue(
- const sync_pb::DebugInfo& proto);
-
-base::DictionaryValue* SyncCycleCompletedEventInfoToValue(
- const sync_pb::SyncCycleCompletedEventInfo& proto);
-
-base::DictionaryValue* ClientConfigParamsToValue(
- const sync_pb::ClientConfigParams& proto);
-
-} // namespace syncer
-
-#endif // SYNC_PROTOCOL_PROTO_VALUE_CONVERSIONS_H_
diff --git a/chromium/sync/protocol/proto_value_conversions_unittest.cc b/chromium/sync/protocol/proto_value_conversions_unittest.cc
deleted file mode 100644
index 1366dd58900..00000000000
--- a/chromium/sync/protocol/proto_value_conversions_unittest.cc
+++ /dev/null
@@ -1,360 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Keep this file in sync with the .proto files in this directory.
-
-#include "sync/protocol/proto_value_conversions.h"
-
-#include "base/memory/scoped_ptr.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/time/time.h"
-#include "base/values.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/protocol/app_notification_specifics.pb.h"
-#include "sync/protocol/app_setting_specifics.pb.h"
-#include "sync/protocol/app_specifics.pb.h"
-#include "sync/protocol/autofill_specifics.pb.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/protocol/device_info_specifics.pb.h"
-#include "sync/protocol/encryption.pb.h"
-#include "sync/protocol/experiments_specifics.pb.h"
-#include "sync/protocol/extension_setting_specifics.pb.h"
-#include "sync/protocol/extension_specifics.pb.h"
-#include "sync/protocol/favicon_image_specifics.pb.h"
-#include "sync/protocol/favicon_tracking_specifics.pb.h"
-#include "sync/protocol/managed_user_setting_specifics.pb.h"
-#include "sync/protocol/nigori_specifics.pb.h"
-#include "sync/protocol/password_specifics.pb.h"
-#include "sync/protocol/preference_specifics.pb.h"
-#include "sync/protocol/priority_preference_specifics.pb.h"
-#include "sync/protocol/search_engine_specifics.pb.h"
-#include "sync/protocol/session_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/protocol/theme_specifics.pb.h"
-#include "sync/protocol/typed_url_specifics.pb.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace {
-
-class ProtoValueConversionsTest : public testing::Test {
- protected:
- template <class T>
- void TestSpecificsToValue(
- base::DictionaryValue* (*specifics_to_value)(const T&)) {
- const T& specifics(T::default_instance());
- scoped_ptr<base::DictionaryValue> value(specifics_to_value(specifics));
- // We can't do much but make sure that this doesn't crash.
- }
-};
-
-TEST_F(ProtoValueConversionsTest, ProtoChangeCheck) {
- // If this number changes, that means we added or removed a data
- // type. Don't forget to add a unit test for {New
- // type}SpecificsToValue below.
- EXPECT_EQ(30, MODEL_TYPE_COUNT);
-
- // We'd also like to check if we changed any field in our messages.
- // However, that's hard to do: sizeof could work, but it's
- // platform-dependent. default_instance().ByteSize() won't change
- // for most changes, since most of our fields are optional. So we
- // just settle for comments in the proto files.
-}
-
-TEST_F(ProtoValueConversionsTest, EncryptedDataToValue) {
- TestSpecificsToValue(EncryptedDataToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, SessionHeaderToValue) {
- TestSpecificsToValue(SessionHeaderToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, SessionTabToValue) {
- TestSpecificsToValue(SessionTabToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, SessionWindowToValue) {
- TestSpecificsToValue(SessionWindowToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, TabNavigationToValue) {
- TestSpecificsToValue(TabNavigationToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, PasswordSpecificsData) {
- sync_pb::PasswordSpecificsData specifics;
- specifics.set_password_value("secret");
- scoped_ptr<base::DictionaryValue> value(
- PasswordSpecificsDataToValue(specifics));
- EXPECT_FALSE(value->empty());
- std::string password_value;
- EXPECT_TRUE(value->GetString("password_value", &password_value));
- EXPECT_EQ("<redacted>", password_value);
-}
-
-TEST_F(ProtoValueConversionsTest, AppListSpecificsToValue) {
- TestSpecificsToValue(AppListSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, AppNotificationToValue) {
- TestSpecificsToValue(AppNotificationToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, AppSettingSpecificsToValue) {
- sync_pb::AppNotificationSettings specifics;
- specifics.set_disabled(true);
- specifics.set_oauth_client_id("some_id_value");
- scoped_ptr<base::DictionaryValue> value(AppSettingsToValue(specifics));
- EXPECT_FALSE(value->empty());
- bool disabled_value = false;
- std::string oauth_client_id_value;
- EXPECT_TRUE(value->GetBoolean("disabled", &disabled_value));
- EXPECT_EQ(true, disabled_value);
- EXPECT_TRUE(value->GetString("oauth_client_id", &oauth_client_id_value));
- EXPECT_EQ("some_id_value", oauth_client_id_value);
-}
-
-TEST_F(ProtoValueConversionsTest, AppSpecificsToValue) {
- TestSpecificsToValue(AppSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, AutofillSpecificsToValue) {
- TestSpecificsToValue(AutofillSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, AutofillProfileSpecificsToValue) {
- TestSpecificsToValue(AutofillProfileSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, BookmarkSpecificsToValue) {
- TestSpecificsToValue(BookmarkSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, BookmarkSpecificsData) {
- const base::Time creation_time(base::Time::Now());
- const std::string icon_url = "http://www.google.com/favicon.ico";
- sync_pb::BookmarkSpecifics specifics;
- specifics.set_creation_time_us(creation_time.ToInternalValue());
- specifics.set_icon_url(icon_url);
- sync_pb::MetaInfo* meta_1 = specifics.add_meta_info();
- meta_1->set_key("key1");
- meta_1->set_value("value1");
- sync_pb::MetaInfo* meta_2 = specifics.add_meta_info();
- meta_2->set_key("key2");
- meta_2->set_value("value2");
-
- scoped_ptr<base::DictionaryValue> value(BookmarkSpecificsToValue(specifics));
- EXPECT_FALSE(value->empty());
- std::string encoded_time;
- EXPECT_TRUE(value->GetString("creation_time_us", &encoded_time));
- EXPECT_EQ(base::Int64ToString(creation_time.ToInternalValue()), encoded_time);
- std::string encoded_icon_url;
- EXPECT_TRUE(value->GetString("icon_url", &encoded_icon_url));
- EXPECT_EQ(icon_url, encoded_icon_url);
- base::ListValue* meta_info_list;
- ASSERT_TRUE(value->GetList("meta_info", &meta_info_list));
- EXPECT_EQ(2u, meta_info_list->GetSize());
- base::DictionaryValue* meta_info;
- std::string meta_key;
- std::string meta_value;
- ASSERT_TRUE(meta_info_list->GetDictionary(0, &meta_info));
- EXPECT_TRUE(meta_info->GetString("key", &meta_key));
- EXPECT_TRUE(meta_info->GetString("value", &meta_value));
- EXPECT_EQ("key1", meta_key);
- EXPECT_EQ("value1", meta_value);
- ASSERT_TRUE(meta_info_list->GetDictionary(1, &meta_info));
- EXPECT_TRUE(meta_info->GetString("key", &meta_key));
- EXPECT_TRUE(meta_info->GetString("value", &meta_value));
- EXPECT_EQ("key2", meta_key);
- EXPECT_EQ("value2", meta_value);
-}
-
-TEST_F(ProtoValueConversionsTest, PriorityPreferenceSpecificsToValue) {
- TestSpecificsToValue(PriorityPreferenceSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, DeviceInfoSpecificsToValue) {
- TestSpecificsToValue(DeviceInfoSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, ExperimentsSpecificsToValue) {
- TestSpecificsToValue(ExperimentsSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, ExtensionSettingSpecificsToValue) {
- TestSpecificsToValue(ExtensionSettingSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, ExtensionSpecificsToValue) {
- TestSpecificsToValue(ExtensionSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, FaviconImageSpecificsToValue) {
- TestSpecificsToValue(FaviconImageSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, FaviconTrackingSpecificsToValue) {
- TestSpecificsToValue(FaviconTrackingSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, HistoryDeleteDirectiveSpecificsToValue) {
- TestSpecificsToValue(HistoryDeleteDirectiveSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, ManagedUserSettingSpecificsToValue) {
- TestSpecificsToValue(ManagedUserSettingSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, ManagedUserSpecificsToValue) {
- TestSpecificsToValue(ManagedUserSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, NigoriSpecificsToValue) {
- TestSpecificsToValue(NigoriSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, PasswordSpecificsToValue) {
- TestSpecificsToValue(PasswordSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, PreferenceSpecificsToValue) {
- TestSpecificsToValue(PreferenceSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, SearchEngineSpecificsToValue) {
- TestSpecificsToValue(SearchEngineSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, SessionSpecificsToValue) {
- TestSpecificsToValue(SessionSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, SyncedNotificationSpecificsToValue) {
- TestSpecificsToValue(SyncedNotificationSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, ThemeSpecificsToValue) {
- TestSpecificsToValue(ThemeSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, TypedUrlSpecificsToValue) {
- TestSpecificsToValue(TypedUrlSpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, DictionarySpecificsToValue) {
- TestSpecificsToValue(DictionarySpecificsToValue);
-}
-
-TEST_F(ProtoValueConversionsTest, ArticleSpecificsToValue) {
- TestSpecificsToValue(ArticleSpecificsToValue);
-}
-
-// TODO(akalin): Figure out how to better test EntitySpecificsToValue.
-
-TEST_F(ProtoValueConversionsTest, EntitySpecificsToValue) {
- sync_pb::EntitySpecifics specifics;
- // Touch the extensions to make sure it shows up in the generated
- // value.
-#define SET_FIELD(key) (void)specifics.mutable_##key()
-
- SET_FIELD(app);
- SET_FIELD(app_list);
- SET_FIELD(app_notification);
- SET_FIELD(app_setting);
- SET_FIELD(article);
- SET_FIELD(autofill);
- SET_FIELD(autofill_profile);
- SET_FIELD(bookmark);
- SET_FIELD(device_info);
- SET_FIELD(dictionary);
- SET_FIELD(experiments);
- SET_FIELD(extension);
- SET_FIELD(extension_setting);
- SET_FIELD(favicon_image);
- SET_FIELD(favicon_tracking);
- SET_FIELD(history_delete_directive);
- SET_FIELD(managed_user_setting);
- SET_FIELD(managed_user);
- SET_FIELD(nigori);
- SET_FIELD(password);
- SET_FIELD(preference);
- SET_FIELD(priority_preference);
- SET_FIELD(search_engine);
- SET_FIELD(session);
- SET_FIELD(synced_notification);
- SET_FIELD(theme);
- SET_FIELD(typed_url);
-
-#undef SET_FIELD
-
- scoped_ptr<base::DictionaryValue> value(EntitySpecificsToValue(specifics));
- EXPECT_EQ(MODEL_TYPE_COUNT - FIRST_REAL_MODEL_TYPE -
- (LAST_PROXY_TYPE - FIRST_PROXY_TYPE + 1),
- static_cast<int>(value->size()));
-}
-
-namespace {
-// Returns whether the given value has specifics under the entries in the given
-// path.
-bool ValueHasSpecifics(const base::DictionaryValue& value,
- const std::string& path) {
- const base::ListValue* entities_list = NULL;
- const base::DictionaryValue* entry_dictionary = NULL;
- const base::DictionaryValue* specifics_dictionary = NULL;
-
- if (!value.GetList(path, &entities_list))
- return false;
-
- if (!entities_list->GetDictionary(0, &entry_dictionary))
- return false;
-
- return entry_dictionary->GetDictionary("specifics",
- &specifics_dictionary);
-}
-} // namespace
-
-// Create a ClientToServerMessage with an EntitySpecifics. Converting it to
-// a value should respect the |include_specifics| flag.
-TEST_F(ProtoValueConversionsTest, ClientToServerMessageToValue) {
- sync_pb::ClientToServerMessage message;
- sync_pb::CommitMessage* commit_message = message.mutable_commit();
- sync_pb::SyncEntity* entity = commit_message->add_entries();
- entity->mutable_specifics();
-
- scoped_ptr<base::DictionaryValue> value_with_specifics(
- ClientToServerMessageToValue(message, true /* include_specifics */));
- EXPECT_FALSE(value_with_specifics->empty());
- EXPECT_TRUE(ValueHasSpecifics(*(value_with_specifics.get()),
- "commit.entries"));
-
- scoped_ptr<base::DictionaryValue> value_without_specifics(
- ClientToServerMessageToValue(message, false /* include_specifics */));
- EXPECT_FALSE(value_without_specifics->empty());
- EXPECT_FALSE(ValueHasSpecifics(*(value_without_specifics.get()),
- "commit.entries"));
-}
-
-// Create a ClientToServerResponse with an EntitySpecifics. Converting it to
-// a value should respect the |include_specifics| flag.
-TEST_F(ProtoValueConversionsTest, ClientToServerResponseToValue) {
- sync_pb::ClientToServerResponse message;
- sync_pb::GetUpdatesResponse* response = message.mutable_get_updates();
- sync_pb::SyncEntity* entity = response->add_entries();
- entity->mutable_specifics();
-
- scoped_ptr<base::DictionaryValue> value_with_specifics(
- ClientToServerResponseToValue(message, true /* include_specifics */));
- EXPECT_FALSE(value_with_specifics->empty());
- EXPECT_TRUE(ValueHasSpecifics(*(value_with_specifics.get()),
- "get_updates.entries"));
-
- scoped_ptr<base::DictionaryValue> value_without_specifics(
- ClientToServerResponseToValue(message, false /* include_specifics */));
- EXPECT_FALSE(value_without_specifics->empty());
- EXPECT_FALSE(ValueHasSpecifics(*(value_without_specifics.get()),
- "get_updates.entries"));
-}
-
-} // namespace
-} // namespace syncer
diff --git a/chromium/sync/protocol/search_engine_specifics.proto b/chromium/sync/protocol/search_engine_specifics.proto
deleted file mode 100644
index 8db631c8073..00000000000
--- a/chromium/sync/protocol/search_engine_specifics.proto
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for custom search engines.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-// Properties of custom search engine sync objects.
-message SearchEngineSpecifics {
- // The description of the search engine.
- optional string short_name = 1;
- // The search engine keyword for omnibox access.
- optional string keyword = 2;
- // A URL to the favicon to show in the search engines options page.
- optional string favicon_url = 3;
- // The actual parameterized search engine query URL.
- optional string url = 4;
- // A flag signifying whether it is safe to automatically modify this search
- // engine entry or not.
- optional bool safe_for_autoreplace = 5;
- // The URL to the OSD file this search engine entry came from.
- optional string originating_url = 6;
- // The date this search engine entry was created. A UTC timestamp with units
- // in microseconds.
- optional int64 date_created = 7;
- // A list of supported input encodings.
- optional string input_encodings = 8;
- // Whether this entry is shown in the list of default search engines or not.
- optional bool show_in_default_list = 9;
- // The parameterized URL that provides suggestions as the user types.
- optional string suggestions_url = 10;
- // The ID associated with the prepopulate data this search engine comes from.
- // Set to zero if it was not prepopulated.
- optional int32 prepopulate_id = 11;
- // DEPRECATED: Whether to autogenerate a keyword for the search engine or not.
- // Do not write to this field in the future. We preserve this for now so we
- // can read the field in order to migrate existing data that sets this bit.
- optional bool autogenerate_keyword = 12;
- // ID 13 reserved - previously used by |logo_id|, now deprecated.
- // Obsolete field. This used to represent whether or not this search engine
- // entry was created automatically by an administrator via group policy. This
- // notion no longer exists amongst synced search engines as we do not want to
- // sync managed search engines.
- // optional bool deprecated_created_by_policy = 14;
- // The parameterized URL that is used for Instant results.
- optional string instant_url = 15;
- // ID 16 reserved - previously used by |id|, now deprecated.
- // The last time this entry was modified by a user. A UTC timestamp with units
- // in microseconds.
- optional int64 last_modified = 17;
- // The primary identifier of this search engine entry for Sync.
- optional string sync_guid = 18;
- // A list of URL patterns that can be used, in addition to |url| and
- // |instant_url|, to extract search terms from a URL.
- repeated string alternate_urls = 19;
- // Parameter that, if present in a search URL, indicates that Chrome should
- // perform search terms replacement in the omnibox.
- optional string search_terms_replacement_key = 20;
- // The parameterized URL that provides image results according to the image
- // content or image URL provided by user.
- optional string image_url = 21;
-
- // The following post_params are comma-separated lists used to specify the
- // post parameters for the corresponding search URL.
- optional string search_url_post_params = 22;
- optional string suggestions_url_post_params = 23;
- optional string instant_url_post_params = 24;
- optional string image_url_post_params = 25;
-
- // The parameterized URL for a search provider specified new tab page.
- optional string new_tab_url = 26;
-}
-
diff --git a/chromium/sync/protocol/session_specifics.proto b/chromium/sync/protocol/session_specifics.proto
deleted file mode 100644
index 4bb8b3e11a6..00000000000
--- a/chromium/sync/protocol/session_specifics.proto
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for sessions.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-import "sync_enums.proto";
-
-message SessionSpecifics {
- // Unique id for the client.
- optional string session_tag = 1;
- optional SessionHeader header = 2;
- optional SessionTab tab = 3;
-
- // The local tab id used by sync. Unique across all nodes for that client.
- optional int32 tab_node_id = 4 [default = -1];
-}
-
-// Properties of session sync objects.
-message SessionHeader {
- // Each session is composed of windows.
- repeated SessionWindow window = 2;
- // A non-unique but human-readable name to describe this client.
- optional string client_name = 3;
- // The type of device.
- optional SyncEnums.DeviceType device_type = 4;
-}
-
-message SessionWindow {
- // Unique (to the owner) id for this window.
- optional int32 window_id = 1;
- // Index of the selected tab in tabs; -1 if no tab is selected.
- optional int32 selected_tab_index = 2 [default = -1];
- // Type of the browser. Currently we only store browsers of type
- // TYPE_TABBED and TYPE_POPUP.
- enum BrowserType {
- TYPE_TABBED = 1;
- TYPE_POPUP = 2;
- }
- optional BrowserType browser_type = 3 [default = TYPE_TABBED];
- // The tabs that compose a window (correspond to tab id's).
- repeated int32 tab = 4;
-}
-
-message SessionTab {
- // Unique (to the owner) id for this tab.
- optional int32 tab_id = 1;
- // The unique id for the window this tab belongs to.
- optional int32 window_id = 2;
- // Visual index of the tab within its window. There may be gaps in these
- // values.
- optional int32 tab_visual_index = 3 [default = -1];
- // Identifies the index of the current navigation in navigations. For
- // example, if this is 2 it means the current navigation is navigations[2].
- optional int32 current_navigation_index = 4 [default = -1];
- // True if the tab is pinned.
- optional bool pinned = 5 [default = false];
- // If non-empty, this tab is an app tab and this is the id of the extension.
- optional string extension_app_id = 6;
- // Tabs are navigated, and the navigation data is here.
- repeated TabNavigation navigation = 7;
-
- // Fields 8 through 11 are deprecated.
- // The favicon for the current url the tab is displaying. Either empty
- // or a valid PNG encoded favicon.
- optional bytes favicon = 8;
- // The type of favicon. For now only normal web favicons are supported.
- enum FaviconType {
- TYPE_WEB_FAVICON = 1;
- }
- optional FaviconType favicon_type = 9;
- // The url of the actual favicon (as opposed to the page using the favicon).
- optional string favicon_source = 11;
-}
-
-message TabNavigation {
- // The index in the NavigationController. If this is -1, it means this
- // TabNavigation is bogus.
- // optional int32 index = 1 [default = -1]; // obsolete.
- // The virtual URL, when nonempty, will override the actual URL of the page
- // when we display it to the user.
- optional string virtual_url = 2;
- // The referring URL, which can be empty.
- optional string referrer = 3;
- // The title of the page.
- optional string title = 4;
- // Content state is an opaque blob created by WebKit that represents the
- // state of the page. This includes form entries and scroll position for each
- // frame.
- optional string state = 5;
- // The core transition type.
- optional SyncEnums.PageTransition page_transition = 6 [default = TYPED];
- // If this transition was triggered by a redirect, the redirect type.
- optional SyncEnums.PageTransitionRedirectType redirect_type = 7;
- // The unique navigation id (within this client).
- optional int32 unique_id = 8;
- // Timestamp for when this navigation last occurred (in client time).
- // If the user goes back/foward in history the timestamp may refresh.
- optional int64 timestamp_msec = 9;
- // User used the Forward or Back button to navigate among browsing history.
- optional bool navigation_forward_back = 10;
- // User used the address bar to trigger this navigation.
- optional bool navigation_from_address_bar = 11;
- // User is navigating to the home page.
- optional bool navigation_home_page = 12;
- // The beginning of a navigation chain.
- optional bool navigation_chain_start = 13;
- // The last transition in a redirect chain.
- optional bool navigation_chain_end = 14;
- // The id for this navigation, which is globally unique with high
- // probability.
- optional int64 global_id = 15;
- // Search terms extracted from the URL.
- optional string search_terms = 16;
- // The favicon url associated with this page.
- optional string favicon_url = 17;
- enum BlockedState {
- STATE_ALLOWED = 1;
- STATE_BLOCKED = 2;
- }
- // Whether access to the URL was allowed or blocked.
- optional BlockedState blocked_state = 18 [default=STATE_ALLOWED];
- // A list of category identifiers for the URL.
- repeated string content_pack_categories = 19;
- // The status code from the last navigation.
- optional int32 http_status_code = 20;
-}
diff --git a/chromium/sync/protocol/sync.proto b/chromium/sync/protocol/sync.proto
deleted file mode 100644
index ecaceefbf6a..00000000000
--- a/chromium/sync/protocol/sync.proto
+++ /dev/null
@@ -1,895 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol for communication between sync client and server.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-import "app_list_specifics.proto";
-import "app_notification_specifics.proto";
-import "app_setting_specifics.proto";
-import "app_specifics.proto";
-import "article_specifics.proto";
-import "autofill_specifics.proto";
-import "bookmark_specifics.proto";
-import "client_commands.proto";
-import "client_debug_info.proto";
-import "device_info_specifics.proto";
-import "dictionary_specifics.proto";
-import "encryption.proto";
-import "experiments_specifics.proto";
-import "extension_setting_specifics.proto";
-import "extension_specifics.proto";
-import "favicon_image_specifics.proto";
-import "favicon_tracking_specifics.proto";
-import "get_updates_caller_info.proto";
-import "history_delete_directive_specifics.proto";
-import "nigori_specifics.proto";
-import "managed_user_setting_specifics.proto";
-import "managed_user_specifics.proto";
-import "password_specifics.proto";
-import "preference_specifics.proto";
-import "priority_preference_specifics.proto";
-import "search_engine_specifics.proto";
-import "session_specifics.proto";
-import "sync_enums.proto";
-import "synced_notification_specifics.proto";
-import "theme_specifics.proto";
-import "typed_url_specifics.proto";
-import "unique_position.proto";
-
-// Used for inspecting how long we spent performing operations in different
-// backends. All times must be in millis.
-message ProfilingData {
- optional int64 meta_data_write_time = 1;
- optional int64 file_data_write_time = 2;
- optional int64 user_lookup_time = 3;
- optional int64 meta_data_read_time = 4;
- optional int64 file_data_read_time = 5;
- optional int64 total_request_time = 6;
-}
-
-message EntitySpecifics {
- // If a datatype is encrypted, this field will contain the encrypted
- // original EntitySpecifics. The extension for the datatype will continue
- // to exist, but contain only the default values.
- // Note that currently passwords employ their own legacy encryption scheme and
- // do not use this field.
- optional EncryptedData encrypted = 1;
-
- // To add new datatype-specific fields to the protocol, extend
- // EntitySpecifics. First, pick a non-colliding tag number by
- // picking a revision number of one of your past commits
- // to src.chromium.org. Then, in a different protocol buffer
- // definition, define your message type, and add an optional field
- // to the list below using the unique tag value you selected.
- //
- // optional MyDatatypeSpecifics my_datatype = 32222;
- //
- // where:
- // - 32222 is the non-colliding tag number you picked earlier.
- // - MyDatatypeSpecifics is the type (probably a message type defined
- // in your new .proto file) that you want to associate with each
- // object of the new datatype.
- // - my_datatype is the field identifier you'll use to access the
- // datatype specifics from the code.
- //
- // Server implementations are obligated to preserve the contents of
- // EntitySpecifics when it contains unrecognized fields. In this
- // way, it is possible to add new datatype fields without having
- // to update the server.
- //
- // Note: The tag selection process is based on legacy versions of the
- // protocol which used protobuf extensions. We have kept the process
- // consistent as the old values cannot change. The 5+ digit nature of the
- // tags also makes them recognizable (individually and collectively) from
- // noise in logs and debugging contexts, and creating a divergent subset of
- // tags would only make things a bit more confusing.
-
- optional AutofillSpecifics autofill = 31729;
- optional BookmarkSpecifics bookmark = 32904;
- optional PreferenceSpecifics preference = 37702;
- optional TypedUrlSpecifics typed_url = 40781;
- optional ThemeSpecifics theme = 41210;
- optional AppNotification app_notification = 45184;
- optional PasswordSpecifics password = 45873;
- optional NigoriSpecifics nigori = 47745;
- optional ExtensionSpecifics extension = 48119;
- optional AppSpecifics app = 48364;
- optional SessionSpecifics session = 50119;
- optional AutofillProfileSpecifics autofill_profile = 63951;
- optional SearchEngineSpecifics search_engine = 88610;
- optional ExtensionSettingSpecifics extension_setting = 96159;
- optional AppSettingSpecifics app_setting = 103656;
- optional HistoryDeleteDirectiveSpecifics history_delete_directive = 150251;
- optional SyncedNotificationSpecifics synced_notification = 153108;
- optional DeviceInfoSpecifics device_info = 154522;
- optional ExperimentsSpecifics experiments = 161496;
- optional PriorityPreferenceSpecifics priority_preference = 163425;
- optional DictionarySpecifics dictionary = 170540;
- optional FaviconTrackingSpecifics favicon_tracking = 181534;
- optional FaviconImageSpecifics favicon_image = 182019;
- optional ManagedUserSettingSpecifics managed_user_setting = 186662;
- optional ManagedUserSpecifics managed_user = 194582;
- optional ArticleSpecifics article = 223759;
- optional AppListSpecifics app_list = 229170;
-}
-
-message SyncEntity {
- // This item's identifier. In a commit of a new item, this will be a
- // client-generated ID. If the commit succeeds, the server will generate
- // a globally unique ID and return it to the committing client in the
- // CommitResponse.EntryResponse. In the context of a GetUpdatesResponse,
- // |id_string| is always the server generated ID. The original
- // client-generated ID is preserved in the |originator_client_id| field.
- // Present in both GetUpdatesResponse and CommitMessage.
- optional string id_string = 1;
-
- // An id referencing this item's parent in the hierarchy. In a
- // CommitMessage, it is accepted for this to be a client-generated temporary
- // ID if there was a new created item with that ID appearing earlier
- // in the message. In all other situations, it is a server ID.
- // Present in both GetUpdatesResponse and CommitMessage.
- optional string parent_id_string = 2;
-
- // old_parent_id is only set in commits and indicates the old server
- // parent(s) to remove. When omitted, the old parent is the same as
- // the new.
- // Present only in CommitMessage.
- optional string old_parent_id = 3;
-
- // The version of this item -- a monotonically increasing value that is
- // maintained by for each item. If zero in a CommitMessage, the server
- // will interpret this entity as a newly-created item and generate a
- // new server ID and an initial version number. If nonzero in a
- // CommitMessage, this item is treated as an update to an existing item, and
- // the server will use |id_string| to locate the item. Then, if the item's
- // current version on the server does not match |version|, the commit will
- // fail for that item. The server will not update it, and will return
- // a result code of CONFLICT. In a GetUpdatesResponse, |version| is
- // always positive and indentifies the revision of the item data being sent
- // to the client.
- // Present in both GetUpdatesResponse and CommitMessage.
- required int64 version = 4;
-
- // Last modification time (in java time milliseconds)
- // Present in both GetUpdatesResponse and CommitMessage.
- optional int64 mtime = 5;
-
- // Creation time.
- // Present in both GetUpdatesResponse and CommitMessage.
- optional int64 ctime = 6;
-
- // The name of this item.
- // Historical note:
- // Since November 2010, this value is no different from non_unique_name.
- // Before then, server implementations would maintain a unique-within-parent
- // value separate from its base, "non-unique" value. Clients had not
- // depended on the uniqueness of the property since November 2009; it was
- // removed from Chromium by http://codereview.chromium.org/371029 .
- // Present in both GetUpdatesResponse and CommitMessage.
- required string name = 7;
-
- // The name of this item. Same as |name|.
- // |non_unique_name| should take precedence over the |name| value if both
- // are supplied. For efficiency, clients and servers should avoid setting
- // this redundant value.
- // Present in both GetUpdatesResponse and CommitMessage.
- optional string non_unique_name = 8;
-
- // A value from a monotonically increasing sequence that indicates when
- // this item was last updated on the server. This is now equivalent
- // to version. This is now deprecated in favor of version.
- // Present only in GetUpdatesResponse.
- optional int64 sync_timestamp = 9;
-
- // If present, this tag identifies this item as being a uniquely
- // instanced item. The server ensures that there is never more
- // than one entity in a user's store with the same tag value.
- // This value is used to identify and find e.g. the "Google Chrome" settings
- // folder without relying on it existing at a particular path, or having
- // a particular name, in the data store.
- //
- // This variant of the tag is created by the server, so clients can't create
- // an item with a tag using this field.
- //
- // Use client_defined_unique_tag if you want to create one from the client.
- //
- // An item can't have both a client_defined_unique_tag and
- // a server_defined_unique_tag.
- //
- // Present only in GetUpdatesResponse.
- optional string server_defined_unique_tag = 10;
-
- // If this group is present, it implies that this SyncEntity corresponds to
- // a bookmark or a bookmark folder.
- //
- // This group is deprecated; clients should use the bookmark EntitySpecifics
- // protocol buffer extension instead.
- optional group BookmarkData = 11 {
- // We use a required field to differentiate between a bookmark and a
- // bookmark folder.
- // Present in both GetUpdatesMessage and CommitMessage.
- required bool bookmark_folder = 12;
-
- // For bookmark objects, contains the bookmark's URL.
- // Present in both GetUpdatesResponse and CommitMessage.
- optional string bookmark_url = 13;
-
- // For bookmark objects, contains the bookmark's favicon. The favicon is
- // represented as a 16X16 PNG image.
- // Present in both GetUpdatesResponse and CommitMessage.
- optional bytes bookmark_favicon = 14;
- }
-
- // Supplies a numeric position for this item, relative to other items with the
- // same parent. Deprecated in M26, though clients are still required to set
- // it.
- //
- // Present in both GetUpdatesResponse and CommitMessage.
- //
- // At one point this was used as an alternative / supplement to
- // the deprecated |insert_after_item_id|, but now it, too, has been
- // deprecated.
- //
- // In order to maintain compatibility with older clients, newer clients should
- // still set this field. Its value should be based on the first 8 bytes of
- // this item's |unique_position|.
- //
- // Nerwer clients must also support the receipt of items that contain
- // |position_in_parent| but no |unique_position|. They should locally convert
- // the given int64 position to a UniquePosition.
- //
- // The conversion from int64 to UniquePosition is as follows:
- // The int64 value will have its sign bit flipped then placed in big endian
- // order as the first 8 bytes of the UniquePosition. The subsequent bytes of
- // the UniquePosition will consist of the item's unique suffix.
- //
- // Conversion from UniquePosition to int64 reverses this process: the first 8
- // bytes of the position are to be interpreted as a big endian int64 value
- // with its sign bit flipped.
- optional int64 position_in_parent = 15;
-
- // Contains the ID of the element (under the same parent) after which this
- // element resides. An empty string indicates that the element is the first
- // element in the parent. This value is used during commits to specify
- // a relative position for a position change. In the context of
- // a GetUpdatesMessage, |position_in_parent| is used instead to
- // communicate position.
- //
- // Present only in CommitMessage.
- //
- // This is deprecated. Clients are allowed to omit this as long as they
- // include |position_in_parent| instead.
- optional string insert_after_item_id = 16;
-
- // Arbitrary key/value pairs associated with this item.
- // Present in both GetUpdatesResponse and CommitMessage.
- // Deprecated.
- // optional ExtendedAttributes extended_attributes = 17;
-
- // If true, indicates that this item has been (or should be) deleted.
- // Present in both GetUpdatesResponse and CommitMessage.
- optional bool deleted = 18 [default = false];
-
- // A GUID that identifies the the sync client who initially committed
- // this entity. This value corresponds to |cache_guid| in CommitMessage.
- // This field, along with |originator_client_item_id|, can be used to
- // reunite the original with its official committed version in the case
- // where a client does not receive or process the commit response for
- // some reason.
- //
- // Present only in GetUpdatesResponse.
- //
- // This field is also used in determining the unique identifier used in
- // bookmarks' unique_position field.
- optional string originator_cache_guid = 19;
-
- // The local item id of this entry from the client that initially
- // committed this entity. Typically a negative integer.
- // Present only in GetUpdatesResponse.
- //
- // This field is also used in determinging the unique identifier used in
- // bookmarks' unique_position field.
- optional string originator_client_item_id = 20;
-
- // Extensible container for datatype-specific data.
- // This became available in version 23 of the protocol.
- optional EntitySpecifics specifics = 21;
-
- // Indicate whether this is a folder or not. Available in version 23+.
- optional bool folder = 22 [default = false];
-
- // A client defined unique hash for this entity.
- // Similar to server_defined_unique_tag.
- //
- // When initially committing an entity, a client can request that the entity
- // is unique per that account. To do so, the client should specify a
- // client_defined_unique_tag. At most one entity per tag value may exist.
- // per account. The server will enforce uniqueness on this tag
- // and fail attempts to create duplicates of this tag.
- // Will be returned in any updates for this entity.
- //
- // The difference between server_defined_unique_tag and
- // client_defined_unique_tag is the creator of the entity. Server defined
- // tags are entities created by the server at account creation,
- // while client defined tags are entities created by the client at any time.
- //
- // During GetUpdates, a sync entity update will come back with ONE of:
- // a) Originator and cache id - If client committed the item as non "unique"
- // b) Server tag - If server committed the item as unique
- // c) Client tag - If client committed the item as unique
- //
- // May be present in CommitMessages for the initial creation of an entity.
- // If present in Commit updates for the entity, it will be ignored.
- //
- // Available in version 24+.
- //
- // May be returned in GetUpdatesMessage and sent up in CommitMessage.
- //
- optional string client_defined_unique_tag = 23;
-
- // This positioning system had a relatively short life. It was made obsolete
- // by |unique_position| before either the client or server made much of an
- // attempt to support it. In fact, no client ever read or set this field.
- //
- // Deprecated in M26.
- optional bytes ordinal_in_parent = 24;
-
- // This is the fourth attempt at positioning.
- //
- // This field is present in both GetUpdatesResponse and CommitMessage, if the
- // item's type requires it and the client that wrote the item supports it (M26
- // or higher). Clients must also be prepared to handle updates from clients
- // that do not set this field. See the comments on
- // |server_position_in_parent| for more information on how this is handled.
- //
- // This field will not be set for items whose type ignores positioning.
- // Clients should not attempt to read this field on the receipt of an item of
- // a type that ignores positioning.
- //
- // Refer to its definition in unique_position.proto for more information about
- // its internal representation.
- optional UniquePosition unique_position = 25;
-};
-
-// This message contains diagnostic information used to correlate
-// commit-related traffic with extensions-related mutations to the
-// data models in chromium. It plays no functional role in
-// processing this CommitMessage.
-message ChromiumExtensionsActivity {
- // The human-readable ID identifying the extension responsible
- // for the traffic reported in this ChromiumExtensionsActivity.
- optional string extension_id = 1;
-
- // How many times the extension successfully invoked a write
- // operation through the bookmarks API since the last CommitMessage.
- optional uint32 bookmark_writes_since_last_commit = 2;
-};
-
-// Client specific configuration information.
-message ClientConfigParams {
- // The set of data types this client has enabled. Note that this does not
- // include proxy types, as they do not have protocol field numbers and are
- // placeholder types that implicitly enable protocol types.
- repeated int32 enabled_type_ids = 1;
-
- // Whether the PROXY_TABS proxy datatype is enabled on this client.
- optional bool tabs_datatype_enabled = 2;
-};
-
-message CommitMessage {
- repeated SyncEntity entries = 1;
-
- // A GUID that identifies the committing sync client. This value will be
- // returned as originator_cache_guid for any new items.
- optional string cache_guid = 2;
-
- repeated ChromiumExtensionsActivity extensions_activity = 3;
-
- // The configuration of this client at commit time. Used by the server to
- // make commit-time decisions about how to process datatypes that might
- // involve server-side interaction, and e.g require explicit user intent for
- // syncing a particular data type regardless of whether a commit for that
- // datatype is currently being sent up.
- optional ClientConfigParams config_params = 4;
-};
-
-// This message communicates additional per-type information related to
-// requests with origin GU_TRIGGER. This message is not relevant when any
-// other origin value is used.
-// Introduced in M29.
-message GetUpdateTriggers {
- // An opaque-to-the-client string of bytes, received through a notification,
- // that the server may interpret as a hint about the location of the latest
- // version of the data for this type.
- //
- // Note that this will eventually replace the 'optional' field of the same
- // name defined in the progress marker, but the client and server should
- // support both until it's safe to deprecate the old one.
- //
- // This field was introduced in M29.
- repeated string notification_hint = 1;
-
- // This flag is set if the client was forced to drop hints because the number
- // of queued hints exceeded its limit. The oldest hints will be discarded
- // first. Introduced in M29.
- optional bool client_dropped_hints = 2;
-
- // This flag is set if the invalidation server reports that it may have
- // dropped some invalidations at some point. The client will also drop any
- // locally cached hints that are older than the server-did-drop notification.
- //
- // TODO(sync): Determine the format for this.
- //
- // optional bool server_dropped_hints = 6;
-
- // This flag is set when the client suspects that its list of invalidation
- // hints may be incomplete. This may be the case if:
- // - The client is syncing for the first time.
- // - The client has just restarted and it was unable to keep track of
- // invalidations that were received prior to the restart.
- // - The client's connection to the invalidation server is currently or
- // was recently broken.
- //
- // It's difficult to provide more details here. This is implemented by
- // setting the flag to false whenever anything that might adversely affect
- // notifications happens (eg. a crash, restart on a platform that doesn't
- // support invalidation ack-tracking, transient invalidation error) and is
- // unset only after we've experienced one successful sync cycle while
- // notifications were enabled.
- //
- // This flag was introduced in M29.
- optional bool invalidations_out_of_sync = 3;
-
- // This counts the number of times the syncer has been asked to commit
- // changes for this type since the last successful sync cycle. The number of
- // nudges may not be related to the actual number of items modified. It
- // often correlates with the number of user actions, but that's not always
- // the case.
- // Introduced in M29.
- optional int64 local_modification_nudges = 4;
-
- // This counts the number of times the syncer has been explicitly asked to
- // fetch updates for this type since the last successful sync cycle. These
- // explicit refresh requests should be relatively rare on most platforms, and
- // associated with user actions. For example, at the time of this writing
- // the most common (only?) source of refresh requests is when a user opens
- // the new tab page on a platform that does not support sessions
- // invalidations.
- // Introduced in M29.
- optional int64 datatype_refresh_nudges = 5;
-}
-
-message DataTypeProgressMarker {
- // An integer identifying the data type whose progress is tracked by this
- // marker. The legitimate values of this field correspond to the protobuf
- // field numbers of all EntitySpecifics fields supported by the server.
- // These values are externally declared in per-datatype .proto files.
- optional int32 data_type_id = 1;
-
- // An opaque-to-the-client sequence of bytes that the server may interpret
- // as an indicator of the client's knowledge state. If this is empty or
- // omitted by the client, it indicates that the client is initiating a
- // a first-time sync of this datatype. Otherwise, clients must supply a
- // value previously returned by the server in an earlier GetUpdatesResponse.
- // These values are not comparable or generable on the client.
- //
- // The opaque semantics of this field are to afford server implementations
- // some flexibility in implementing progress tracking. For instance,
- // a server implementation built on top of a distributed storage service --
- // or multiple heterogenous such services -- might need to supply a vector
- // of totally ordered monotonic update timestamps, rather than a single
- // monotonically increasing value. Other optimizations may also be
- // possible if the server is allowed to embed arbitrary information in
- // the progress token.
- //
- // Server implementations should keep the size of these tokens relatively
- // small, on the order of tens of bytes, and they should remain small
- // regardless of the number of items synchronized. (A possible bad server
- // implementation would be for progress_token to contain a list of all the
- // items ever sent to the client. Servers shouldn't do this.)
- optional bytes token = 2;
-
- // Clients that previously downloaded updates synced using the timestamp based
- // progress tracking mechanism, but which wish to switch over to the opaque
- // token mechanism can set this field in a GetUpdatesMessage. The server
- // will perform a get updates operation as normal from the indicated
- // timestamp, and return only an opaque progress token.
- optional int64 timestamp_token_for_migration = 3;
-
- // An opaque-to-the-client string of bytes, received through a notification,
- // that the server may interpret as a hint about the location of the latest
- // version of the data for this type.
- //
- // Deprecated in M29. We should use the repeated field version in the
- // PerClientTypeState instead.
- optional string notification_hint = 4;
-
- // This field will be included only in GetUpdates with origin GU_TRIGGER.
- optional GetUpdateTriggers get_update_triggers = 5;
-}
-
-message GetUpdatesMessage {
- // Indicates the client's current progress in downloading updates. A
- // from_timestamp value of zero means that the client is requesting a first-
- // time sync. After that point, clients should fill in this value with the
- // value returned in the last-seen GetUpdatesResponse.new_timestamp.
- //
- // from_timestamp has been deprecated; clients should use
- // |from_progress_marker| instead, which allows more flexibility.
- optional int64 from_timestamp = 1;
-
- // Indicates the reason for the GetUpdatesMessage.
- // Deprecated in M29. We should eventually rely on GetUpdatesOrigin instead.
- // Newer clients will support both systems during the transition period.
- optional GetUpdatesCallerInfo caller_info = 2;
-
- // Indicates whether related folders should be fetched.
- optional bool fetch_folders = 3 [default = true];
-
- // The presence of an individual EntitySpecifics field indicates that the
- // client requests sync object types associated with that field. This
- // determination depends only on the presence of the field, not its
- // contents -- thus clients should send empty messages as the field value.
- // For backwards compatibility only bookmark objects will be sent to the
- // client should requested_types not be present.
- //
- // requested_types may contain multiple EntitySpecifics fields -- in this
- // event, the server will return items of all the indicated types.
- //
- // requested_types has been deprecated; clients should use
- // |from_progress_marker| instead, which allows more flexibility.
- optional EntitySpecifics requested_types = 4;
-
- // Client-requested limit on the maximum number of updates to return at once.
- // The server may opt to return fewer updates than this amount, but it should
- // not return more.
- optional int32 batch_size = 5;
-
- // Per-datatype progress marker. If present, the server will ignore
- // the values of requested_types and from_timestamp, using this instead.
- //
- // With the exception of certain configuration or initial sync requests, the
- // client should include one instance of this field for each enabled data
- // type.
- repeated DataTypeProgressMarker from_progress_marker = 6;
-
- // Indicates whether the response should be sent in chunks. This may be
- // needed for devices with limited memory resources. If true, the response
- // will include one or more ClientToServerResponses, with the frist one
- // containing GetUpdatesMetadataResponse, and the remaining ones, if any,
- // containing GetUpdatesStreamingResponse. These ClientToServerResponses are
- // delimited by a length prefix, which is encoded as a varint.
- optional bool streaming = 7 [default = false];
-
- // Whether the client needs the server to provide an encryption key for this
- // account.
- // Note: this should typically only be set on the first GetUpdates a client
- // requests. Clients are expected to persist the encryption key from then on.
- // The allowed frequency for requesting encryption keys is much lower than
- // other datatypes, so repeated usage will likely result in throttling.
- optional bool need_encryption_key = 8 [default = false];
-
- // Whether to create the mobile bookmarks folder if it's not
- // already created. Should be set to true only by mobile clients.
- optional bool create_mobile_bookmarks_folder = 1000 [default = false];
-
- // This value is an udpated version of the GetUpdatesCallerInfo's
- // GetUpdatesSource. It describes the reason for the GetUpdate request.
- // Introduced in M29.
- optional SyncEnums.GetUpdatesOrigin get_updates_origin = 9;
-};
-
-message AuthenticateMessage {
- required string auth_token = 1;
-};
-
-message ClearUserDataMessage {
-};
-
-message ClearUserDataResponse {
-};
-
-// The client must preserve, store, and resend the chip bag with
-// every request. The server depends on the chip bag in order
-// to precisely choreograph a client-server state machines.
-//
-// Because the client stores and sends this data on every request,
-// the contents of the chip bag should be kept relatively small.
-//
-// If the server does not return a chip bag, the client must assume
-// that there has been no change to the chip bag. The client must
-// resend the bag of chips it had prior on the next request.
-//
-// The client must make the chip bag durable if and only if it
-// processes the response from the server.
-message ChipBag {
- // Server chips are deliberately oqaque, allowing the server
- // to encapsulate its state machine logic.
- optional bytes server_chips = 1;
-}
-
-// Information about the syncer's state.
-message ClientStatus {
- // Flag to indicate if the client has detected hierarchy conflcits. The flag
- // is left unset if update application has not been attempted yet.
- //
- // The server should attempt to resolve any hierarchy conflicts when this flag
- // is set. The client may not assume that any particular action will be
- // taken. There is no guarantee the problem will be addressed in a reasonable
- // amount of time.
- optional bool hierarchy_conflict_detected = 1;
-}
-
-message ClientToServerMessage {
- required string share = 1;
- optional int32 protocol_version = 2 [default = 31];
- enum Contents {
- COMMIT = 1;
- GET_UPDATES = 2;
- AUTHENTICATE = 3;
- CLEAR_DATA = 4;
- }
-
- required Contents message_contents = 3;
- optional CommitMessage commit = 4;
- optional GetUpdatesMessage get_updates = 5;
- optional AuthenticateMessage authenticate = 6;
-
- // Request to clear all Chromium data from the server.
- optional ClearUserDataMessage clear_user_data = 9;
-
- optional string store_birthday = 7; // Opaque store ID; if it changes, duck!
- // The client sets this if it detects a sync issue. The server will tell it
- // if it should perform a refresh.
- optional bool sync_problem_detected = 8 [default = false];
-
- // Client side state information for debugging purpose.
- // This is only sent on the first getupdates of every sync cycle,
- // as an optimization to save bandwidth.
- optional DebugInfo debug_info = 10;
-
- // Per-client state for use by the server. Sent with every message sent to the
- // server.
- optional ChipBag bag_of_chips = 11;
-
- // Google API key.
- optional string api_key = 12;
-
- // Client's self-reported state.
- // The client should set this on every message sent to the server, though its
- // member fields may often be unset.
- optional ClientStatus client_status = 13;
-
- // The ID that our invalidation client used to identify itself to the server.
- // Sending the ID here allows the server to not send notifications of our own
- // changes to our invalidator.
- optional string invalidator_client_id = 14;
-};
-
-// This request allows the client to convert a specific crash identifier
-// into more general information (e.g. hash of the crashing call stack)
-// suitable for upload in an (authenticated) DebugInfo event.
-message GetCrashInfoRequest {
- // Id of the uploaded crash.
- optional string crash_id = 1;
-
- // Time that the crash occurred.
- optional int64 crash_time_millis = 2;
-}
-
-// Proto to be written in its entirety to the debug info log.
-message GetCrashInfoResponse {
- // Hash of the crashing call stack.
- optional string stack_id = 1;
-
- // Time of the crash, potentially rounded to remove
- // significant bits.
- optional int64 crash_time_millis = 2;
-}
-
-message CommitResponse {
- enum ResponseType {
- SUCCESS = 1;
- CONFLICT = 2; // You're out of date; update and check your data
- // TODO(ncarter): What's the difference between RETRY and TRANSIENT_ERROR?
- RETRY = 3; // Someone has a conflicting, non-expired session open
- INVALID_MESSAGE = 4; // What the client sent was invalid, and trying again
- // won't help.
- OVER_QUOTA = 5; // This operation would put you, or you are, over quota
- TRANSIENT_ERROR = 6; // Something went wrong; try again in a bit
- }
- repeated group EntryResponse = 1 {
- required ResponseType response_type = 2;
-
- // Sync servers may also return a new ID for an existing item, indicating
- // a new entry's been created to hold the data the client's sending up.
- optional string id_string = 3;
-
- // should be filled if our parent was assigned a new ID.
- optional string parent_id_string = 4;
-
- // This value is the same as the position_in_parent value returned within
- // the SyncEntity message in GetUpdatesResponse. There was a time when the
- // client would attempt to honor this position, but nowadays the server
- // should ensure it is no different from the position_in_parent sent up in
- // the commit request and the client should not read it.
- optional int64 position_in_parent = 5;
-
- // The item's current version.
- optional int64 version = 6;
-
- // Allows the server to move-aside an entry as it's being committed.
- // This name is the same as the name field returned within the SyncEntity
- // message in GetUpdatesResponse.
- optional string name = 7;
-
- // This name is the same as the non_unique_name field returned within the
- // SyncEntity message in GetUpdatesResponse.
- optional string non_unique_name = 8;
-
- optional string error_message = 9;
-
- // Last modification time (in java time milliseconds). Allows the server
- // to override the client-supplied mtime during a commit operation.
- optional int64 mtime = 10;
- }
-};
-
-message GetUpdatesResponse {
- // New sync entries that the client should apply.
- repeated SyncEntity entries = 1;
-
- // If there are more changes on the server that weren't processed during this
- // GetUpdates request, the client should send another GetUpdates request and
- // use new_timestamp as the from_timestamp value within GetUpdatesMessage.
- //
- // This field has been deprecated and will be returned only to clients
- // that set the also-deprecated |from_timestamp| field in the update request.
- // Clients should use |from_progress_marker| and |new_progress_marker|
- // instead.
- optional int64 new_timestamp = 2;
-
- // DEPRECATED FIELD - server does not set this anymore.
- optional int64 deprecated_newest_timestamp = 3;
-
- // Approximate count of changes remaining - use this for UI feedback.
- // If present and zero, this estimate is firm: the server has no changes
- // after the current batch.
- optional int64 changes_remaining = 4;
-
- // Opaque, per-datatype timestamp-like tokens. A client should use this
- // field in lieu of new_timestamp, which is deprecated in newer versions
- // of the protocol. Clients should retain and persist the values returned
- // in this field, and present them back to the server to indicate the
- // starting point for future update requests.
- //
- // This will be sent only if the client provided |from_progress_marker|
- // in the update request.
- //
- // The server may provide a new progress marker even if this is the end of
- // the batch, or if there were no new updates on the server; and the client
- // must save these. If the server does not provide a |new_progress_marker|
- // value for a particular datatype, when the request provided a
- // |from_progress_marker| value for that datatype, the client should
- // interpret this to mean "no change from the previous state" and retain its
- // previous progress-marker value for that datatype.
- //
- // Progress markers in the context of a response will never have the
- // |timestamp_token_for_migration| field set.
- repeated DataTypeProgressMarker new_progress_marker = 5;
-
- // The current encryption keys associated with this account. Will be set if
- // the GetUpdatesMessage in the request had need_encryption_key == true or
- // the server has updated the set of encryption keys (e.g. due to a key
- // rotation).
- repeated bytes encryption_keys = 6;
-};
-
-// The metadata response for GetUpdatesMessage. This response is sent when
-// streaming is set to true in the request. It is prefixed with a length
-// delimiter, which is encoded in varint.
-message GetUpdatesMetadataResponse {
- // Approximate count of changes remaining. Detailed comment is available in
- // GetUpdatesResponse.
- optional int64 changes_remaining = 1;
-
- // Opaque, per-datatype timestamp-like tokens. Detailed comment is available
- // in GetUpdatesResponse.
- repeated DataTypeProgressMarker new_progress_marker = 2;
-};
-
-// The streaming response message for GetUpdatesMessage. This message is sent
-// when streaming is set to true in the request. There may be multiple
-// GetUpdatesStreamingResponse messages in a response. This type of messages
-// is preceded by GetUpdatesMetadataResponse. It is prefixed with a length
-// delimiter, which is encoded in varint.
-message GetUpdatesStreamingResponse {
- // New sync entries that the client should apply.
- repeated SyncEntity entries = 1;
-};
-
-// A user-identifying struct. For a given Google account the email and display
-// name can change, but obfuscated_id should be constant.
-// The obfuscated id is optional because at least one planned use of the proto
-// (sharing) does not require it.
-message UserIdentification {
- required string email = 1; // the user's full primary email address.
- optional string display_name = 2; // the user's display name.
- optional string obfuscated_id = 3; // an obfuscated, opaque user id.
-};
-
-message AuthenticateResponse {
- // Optional only for backward compatibility.
- optional UserIdentification user = 1;
-};
-
-message ThrottleParameters {
- // Deprecated. Remove this from the server side.
- required int32 min_measure_payload_size = 1;
- required double target_utilization = 2;
- required double measure_interval_max = 3;
- required double measure_interval_min = 4;
- required double observation_window = 5;
-};
-
-message ClientToServerResponse {
- optional CommitResponse commit = 1;
- optional GetUpdatesResponse get_updates = 2;
- optional AuthenticateResponse authenticate = 3;
-
- // Up until protocol_version 24, the default was SUCCESS which made it
- // impossible to add new enum values since older clients would parse any
- // out-of-range value as SUCCESS. Starting with 25, unless explicitly set,
- // the error_code will be UNKNOWN so that clients know when they're
- // out-of-date. Note also that when using protocol_version < 25,
- // TRANSIENT_ERROR is not supported. Instead, the server sends back a HTTP
- // 400 error code. This is deprecated now.
- optional SyncEnums.ErrorType error_code = 4 [default = UNKNOWN];
- optional string error_message = 5;
-
- // Opaque store ID; if it changes, the contents of the client's cache
- // is meaningless to this server. This happens most typically when
- // you switch from one storage backend instance (say, a test instance)
- // to another (say, the official instance).
- optional string store_birthday = 6;
-
- optional ClientCommand client_command = 7;
- optional ProfilingData profiling_data = 8;
- optional ClearUserDataResponse clear_user_data = 9;
- optional GetUpdatesMetadataResponse stream_metadata = 10;
- // If GetUpdatesStreamingResponse is contained in the ClientToServerResponse,
- // none of the other fields (error_code and etc) will be set.
- optional GetUpdatesStreamingResponse stream_data = 11;
-
- // The data types whose storage has been migrated. Present when the value of
- // error_code is MIGRATION_DONE.
- repeated int32 migrated_data_type_id = 12;
-
- message Error {
- optional SyncEnums.ErrorType error_type = 1 [default = UNKNOWN];
- optional string error_description = 2;
- optional string url = 3;
- optional SyncEnums.Action action = 4 [default = UNKNOWN_ACTION];
-
- // Currently only meaningful if |error_type| is throttled. If this field
- // is absent then the whole client (all datatypes) is throttled.
- repeated int32 error_data_type_ids = 5;
- }
- optional Error error = 13;
-
- // The new per-client state for this client. If set, should be persisted and
- // sent with any subsequent ClientToServerMessages.
- optional ChipBag new_bag_of_chips = 14;
-};
-
diff --git a/chromium/sync/protocol/sync_enums.proto b/chromium/sync/protocol/sync_enums.proto
deleted file mode 100644
index 348d6d1d354..00000000000
--- a/chromium/sync/protocol/sync_enums.proto
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol for communication between sync client and server.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-message SyncEnums {
- // These events are sent by |SyncManager| class. Note: In the code they each
- // of these events have some additional info but we are not sending them to
- // server.
- enum EventType {
- AUTH_ERROR = 1; // Auth error. Note this gets generated even during
- // successful auth with the error set to none.
- UPDATED_TOKEN = 2; // Client received an updated token.
- PASSPHRASE_REQUIRED = 3; // Cryptographer needs passphrase.
- PASSPHRASE_ACCEPTED = 4; // Passphrase was accepted by cryptographer.
- INITIALIZATION_COMPLETE = 5; // Sync Initialization is complete.
-
- // |STOP_SYNCING_PERMANENTLY| event should never be seen by the server in
- // the absence of bugs.
- STOP_SYNCING_PERMANENTLY = 6; // Server sent stop syncing permanently.
-
- ENCRYPTED_TYPES_CHANGED = 9; // Set of encrypted types has changed.
- ENCRYPTION_COMPLETE = 7; // Client has finished encrypting all data.
- ACTIONABLE_ERROR = 8; // Client received an actionable error.
- }
-
- // See content/public/common/page_transition_types.h for detailed
- // information on the values of PageTransition and
- // PageTransitionRedirectType below.
-
- // Types of transitions between pages.
- enum PageTransition {
- LINK = 0;
- TYPED = 1;
- AUTO_BOOKMARK = 2;
- AUTO_SUBFRAME = 3;
- MANUAL_SUBFRAME = 4;
- GENERATED = 5;
- AUTO_TOPLEVEL = 6;
- FORM_SUBMIT = 7;
- RELOAD = 8;
- KEYWORD = 9;
- KEYWORD_GENERATED = 10;
- // The below two were mistakenly added but never properly used. They are
- // actually transition qualifiers, and are set independently of other
- // qualifiers and of the main transitions. See session_specifics.proto for
- // the list of synced transition qualifiers.
- // CHAIN_START = 12; Deprecated.
- // CHAIN_END = 13; Deprecated.
- }
-
- // Types of redirects that triggered a transition.
- enum PageTransitionRedirectType {
- CLIENT_REDIRECT = 1;
- SERVER_REDIRECT = 2;
- }
-
- enum ErrorType {
- SUCCESS = 0;
- ACCESS_DENIED = 1; // Returned when the user doesn't have access to
- // store (instead of HTTP 401).
- NOT_MY_BIRTHDAY = 2; // Returned when the server and client disagree on
- // the store birthday.
- THROTTLED = 3; // Returned when the store has exceeded the
- // allowed bandwidth utilization.
- AUTH_EXPIRED = 4; // Auth token or cookie has expired.
- USER_NOT_ACTIVATED = 5; // User doesn't have the Chrome bit set on that
- // Google Account.
- AUTH_INVALID = 6; // Auth token or cookie is otherwise invalid.
- CLEAR_PENDING = 7; // A clear of the user data is pending (e.g.
- // initiated by privacy request). Client should
- // come back later.
- TRANSIENT_ERROR = 8; // A transient error occured (eg. backend
- // timeout). Client should try again later.
- MIGRATION_DONE = 9; // Migration has finished for one or more data
- // types. Client should clear the cache for
- // these data types only and then re-sync with
- // a server.
- DISABLED_BY_ADMIN = 10; // An administrator disabled sync for this domain.
- UNKNOWN = 100; // Unknown value. This should never be explicitly
- // used; it is the default value when an
- // out-of-date client parses a value it doesn't
- // recognize.
- }
-
- enum Action {
- UPGRADE_CLIENT = 0; // Upgrade the client to latest version.
- CLEAR_USER_DATA_AND_RESYNC = 1; // Clear user data from dashboard and
- // setup sync again.
- ENABLE_SYNC_ON_ACCOUNT = 2; // The administrator needs to enable sync
- // on the account.
- STOP_AND_RESTART_SYNC = 3; // Stop sync and set up sync again.
- DISABLE_SYNC_ON_CLIENT = 4; // Wipe the client of all sync data and
- // stop syncing.
- UNKNOWN_ACTION = 5; // This is the default.
- }
-
- enum DeviceType {
- TYPE_WIN = 1;
- TYPE_MAC = 2;
- TYPE_LINUX = 3;
- TYPE_CROS = 4;
- TYPE_OTHER = 5;
- TYPE_PHONE = 6;
- TYPE_TABLET = 7;
- }
-
- // This is the successor to GetUpdatesSource. It merges the "normal mode"
- // values (LOCAL, NOTIFICATION and DATATYPE_REFRESH), which were never really
- // mutually exclusive to being with, into the GU_TRIGGER value. It also
- // drops support for some old values that are not supported by newer clients.
- //
- // Mind the gaps: Some values are intentionally unused because we want to
- // keep the values in sync with GetUpdatesSource as much as possible. Please
- // don't add any values < 12 unless there's a good reason for it.
- //
- // Introduced in M28.
- enum GetUpdatesOrigin {
- UNKNOWN_ORIGIN = 0; // The source was not set by the caller.
- PERIODIC = 4; // The source of the update was periodic polling.
- NEWLY_SUPPORTED_DATATYPE = 7; // The client is in configuration mode
- // because it's syncing all datatypes, and
- // support for a new datatype was recently
- // released via a software auto-update.
- MIGRATION = 8; // The client is in configuration mode because a
- // MIGRATION_DONE error previously returned by the
- // server necessitated resynchronization.
- NEW_CLIENT = 9; // The client is in configuration mode because the
- // user enabled sync for the first time. Not to be
- // confused with FIRST_UPDATE.
- RECONFIGURATION = 10; // The client is in configuration mode because the
- // user opted to sync a different set of datatypes.
- GU_TRIGGER = 12; // The client is in 'normal' mode. It may have several
- // reasons for requesting an update. See the per-type
- // GetUpdateTriggers message for more details.
- }
-}
diff --git a/chromium/sync/protocol/sync_protocol_error.cc b/chromium/sync/protocol/sync_protocol_error.cc
deleted file mode 100644
index cd22e9a46f6..00000000000
--- a/chromium/sync/protocol/sync_protocol_error.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/protocol/sync_protocol_error.h"
-
-#include <string>
-
-#include "base/logging.h"
-#include "base/values.h"
-
-namespace syncer {
-#define ENUM_CASE(x) case x: return #x; break;
-
-const char* GetSyncErrorTypeString(SyncProtocolErrorType type) {
- switch (type) {
- ENUM_CASE(SYNC_SUCCESS);
- ENUM_CASE(NOT_MY_BIRTHDAY);
- ENUM_CASE(THROTTLED);
- ENUM_CASE(CLEAR_PENDING);
- ENUM_CASE(TRANSIENT_ERROR);
- ENUM_CASE(NON_RETRIABLE_ERROR);
- ENUM_CASE(MIGRATION_DONE);
- ENUM_CASE(INVALID_CREDENTIAL);
- ENUM_CASE(DISABLED_BY_ADMIN);
- ENUM_CASE(UNKNOWN_ERROR);
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetClientActionString(ClientAction action) {
- switch (action) {
- ENUM_CASE(UPGRADE_CLIENT);
- ENUM_CASE(CLEAR_USER_DATA_AND_RESYNC);
- ENUM_CASE(ENABLE_SYNC_ON_ACCOUNT);
- ENUM_CASE(STOP_AND_RESTART_SYNC);
- ENUM_CASE(DISABLE_SYNC_ON_CLIENT);
- ENUM_CASE(STOP_SYNC_FOR_DISABLED_ACCOUNT);
- ENUM_CASE(UNKNOWN_ACTION);
- }
- NOTREACHED();
- return "";
-}
-
-SyncProtocolError::SyncProtocolError()
- : error_type(UNKNOWN_ERROR),
- action(UNKNOWN_ACTION) {
-}
-
-SyncProtocolError::~SyncProtocolError() {
-}
-
-base::DictionaryValue* SyncProtocolError::ToValue() const {
- base::DictionaryValue* value = new base::DictionaryValue();
- value->SetString("ErrorType",
- GetSyncErrorTypeString(error_type));
- value->SetString("ErrorDescription", error_description);
- value->SetString("url", url);
- value->SetString("action", GetClientActionString(action));
- return value;
-}
-
-} // namespace syncer
-
diff --git a/chromium/sync/protocol/sync_protocol_error.h b/chromium/sync/protocol/sync_protocol_error.h
deleted file mode 100644
index be9232d2198..00000000000
--- a/chromium/sync/protocol/sync_protocol_error.h
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-#ifndef SYNC_PROTOCOL_SYNC_PROTOCOL_ERROR_H_
-#define SYNC_PROTOCOL_SYNC_PROTOCOL_ERROR_H_
-
-#include <string>
-
-#include "base/values.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-
-namespace syncer{
-
-enum SyncProtocolErrorType {
- // Success case.
- SYNC_SUCCESS,
-
- // Birthday does not match that of the server.
- NOT_MY_BIRTHDAY,
-
- // Server is busy. Try later.
- THROTTLED,
-
- // Clear user data is being currently executed by the server.
- CLEAR_PENDING,
-
- // Server cannot service the request now.
- TRANSIENT_ERROR,
-
- // Server does not wish the client to retry any more until the action has
- // been taken.
- NON_RETRIABLE_ERROR,
-
- // Indicates the datatypes have been migrated and the client should resync
- // them to get the latest progress markers.
- MIGRATION_DONE,
-
- // Invalid Credential.
- INVALID_CREDENTIAL,
-
- // An administrator disabled sync for this domain.
- DISABLED_BY_ADMIN,
-
- // The default value.
- UNKNOWN_ERROR
-};
-
-enum ClientAction {
- // Upgrade the client to latest version.
- UPGRADE_CLIENT,
-
- // Clear user data and setup sync again.
- CLEAR_USER_DATA_AND_RESYNC,
-
- // Set the bit on the account to enable sync.
- ENABLE_SYNC_ON_ACCOUNT,
-
- // Stop sync and restart sync.
- STOP_AND_RESTART_SYNC,
-
- // Wipe this client of any sync data.
- DISABLE_SYNC_ON_CLIENT,
-
- // Account is disabled by admin. Stop sync, clear prefs and show message on
- // settings page that account is disabled.
- STOP_SYNC_FOR_DISABLED_ACCOUNT,
-
- // The default. No action.
- UNKNOWN_ACTION
-};
-
-struct SYNC_EXPORT SyncProtocolError {
- SyncProtocolErrorType error_type;
- std::string error_description;
- std::string url;
- ClientAction action;
- ModelTypeSet error_data_types;
- SyncProtocolError();
- ~SyncProtocolError();
- base::DictionaryValue* ToValue() const;
-};
-
-SYNC_EXPORT const char* GetSyncErrorTypeString(SyncProtocolErrorType type);
-SYNC_EXPORT const char* GetClientActionString(ClientAction action);
-} // namespace syncer
-#endif // SYNC_PROTOCOL_SYNC_PROTOCOL_ERROR_H_
-
diff --git a/chromium/sync/protocol/synced_notification_data.proto b/chromium/sync/protocol/synced_notification_data.proto
deleted file mode 100644
index 72c612d725b..00000000000
--- a/chromium/sync/protocol/synced_notification_data.proto
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for push notifications..
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-import "synced_notification_render.proto";
-
-// This message allows clients to identify a notification they have created.
-message SyncedNotificationIdentifier {
- // The application that the notification is a part of.
- optional string app_id = 1;
-
- // Notifications with the same coalescing key (isolated to the same app_id)
- // will be grouped together when fetched.
- optional string coalescing_key = 2;
-}
-
-message SyncedNotificationCreator {
- // The gaia id of the creator. If a notification does not have a clear
- // creator, skip this and follow the directions below to use a system creator.
- optional int64 gaia_id = 1;
-
- // Indicates that the creator is a "system" creator. Example of these are
- // notifications sent to the user where the addressee is "Google", such as the
- // "You have violated our TOS, and have 3 days to fix it or you'll lose your
- // account" notifications. If is_system is set, gaia_id must not be set and
- // instead the app_id field must be set.
- optional bool is_system = 2;
-
- // Only set this in the system-creator case.
- optional string app_id = 3;
-}
-
-message SyncedNotificationRecipients {
- repeated int64 gaia_id = 1;
-
- // For now, only support gaia id recipients. Add more recipient types via
- // 'repeated Type other_type = X' when necessary.
-}
-
-message SyncedNotification {
- // A secondary type that is isolated within the same app_id.
- //
- // NOTE: For ASBE support purposes this must be in the format [A-Za-z_]+.
- optional string type = 1;
-
- // Whatever string the client entered during creation. If no external_id is
- // specified, the notification can no longer be identified individually for
- // fetching/deleting, etc...
- optional string external_id = 2;
-
- // The creator of the notification.
- optional SyncedNotificationCreator creator = 3;
-
- // Client specific data.
- optional MapData client_data = 4;
-}
-
-message CoalescedSyncedNotification {
- // An opaque string key used to identify individual coalesced notifications.
- optional string key = 1;
-
- optional string app_id = 2;
-
- // All the notifications that are grouped together.
- repeated SyncedNotification notification = 3;
-
- // Data that is used directly by endpoints to render notifications in the case
- // where no "native" app can handle the notification.
- optional SyncedNotificationRenderInfo render_info = 4;
-
- // Read state will be per coalesced notification.
- enum ReadState {
- UNREAD = 1;
- READ = 2;
- DISMISSED = 3;
- }
- optional ReadState read_state = 5;
-
- // The time when the LATEST notification of the coalesced notification is
- // created (in milliseconds since the linux epoch).
- optional uint64 creation_time_msec = 6;
-
- enum Priority {
- LOW = 1;
- STANDARD = 2;
- HIGH = 3;
- // We will most likely add at least one more priority in the near future.
- };
- optional Priority priority = 7;
-}
-
-message SyncedNotificationList {
- repeated CoalescedSyncedNotification coalesced_notification = 1;
-}
-
-// MapData, Data, and ListData are used to sending aribitrary payloads
-// between instances of applications using Synced Notifications. The
-// schema atop MapData will be defined by the client application.
-message MapData {
- message Entry {
- optional string key = 1;
- optional Data value = 2;
- };
- repeated Entry entry = 1;
-};
-
-message Data {
- optional bool boolean_value = 1;
- optional int32 int_value = 2;
- optional double float_value = 3;
- optional string string_value = 4;
- optional ListData list_value = 5;
- optional MapData map_value = 6;
-};
-
-message ListData {
- repeated Data value = 1;
-}; \ No newline at end of file
diff --git a/chromium/sync/protocol/synced_notification_render.proto b/chromium/sync/protocol/synced_notification_render.proto
deleted file mode 100644
index 5ee073ed4f1..00000000000
--- a/chromium/sync/protocol/synced_notification_render.proto
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-// Data that is used directly by endpoints to render notifications in the case
-// where no "native" app can handle the notification.
-message SyncedNotificationRenderInfo {
- // Render information for the collapsed (summary) view of a notification.
- optional CollapsedInfo collapsed_info = 1;
-
- // Render information for the expanded view of a notification.
- optional ExpandedInfo expanded_info = 2;
-}
-
-// Render information for the collapsed (summary) view of a coalesced
-// notification.
-message CollapsedInfo {
- optional SimpleCollapsedLayout simple_collapsed_layout = 1;
-
- // The creation time of the notification in microseconds since the UNIX
- // epoch.
- optional uint64 creation_timestamp_usec = 2;
-
- // The default destination target.
- optional SyncedNotificationDestination default_destination = 3;
-
- repeated Target target = 4;
-
- // Defines a repeated list of meta tags that provide some context on what
- // this collapsed info is describing. Nothing about the display of this
- // collapsed info is defined by the meta tags.
- repeated string meta_tag = 5;
-}
-
-// Render information for the expanded (detail) view of a coalesced
-// notification.
-message ExpandedInfo {
- optional SimpleExpandedLayout simple_expanded_layout = 1;
-
- // Collapsed information for each notification in the coalesced group.
- repeated CollapsedInfo collapsed_info = 2;
-
- // A set of targets for actions the user can take, or destinations the
- // viewer can be taken to. These relate to the coalesced notification.
- repeated Target target = 3;
-
- // Enhanced context for the expanded view.
- repeated string meta_tag = 4;
-}
-
-message SimpleCollapsedLayout {
- // Application icon.
- optional SyncedNotificationImage app_icon = 1;
-
- // Profile image(s) of the notification creator(s) to show in the
- // collapsed UI.
- repeated SyncedNotificationProfileImage profile_image = 2;
-
- // Heading - often the name(s) of the notification creator(s).
- optional string heading = 3;
-
- // Description - often the action that generated the notification.
- optional string description = 4;
-
- // Media - one or more shared media items.
- repeated Media media = 5;
-
- // Annotation - often the annotation of the entity generating the
- // notification.
- optional string annotation = 6;
-}
-
-message SimpleExpandedLayout {
- // Title - often the title of the underlying entity referred to by the
- // notification(s).
- optional string title = 1;
-
- // Text content - often a snippet of text from the underlying entity
- // reference or the notification.
- optional string text = 2;
-
- repeated Media media = 3;
-
- // Profile image, usually this is the creator of the referenced entity.
- optional SyncedNotificationProfileImage profile_image = 4;
-
- // A set of targets for actions the user can take, or destinations the
- // viewer can be taken to. Usually these relate to the referenced entity.
- repeated Target target = 5;
-}
-
-// Media.
-message Media {
- // TOOD(jro): Do we need other media types?
- optional SyncedNotificationImage image = 1;
-}
-
-// Secondary destinations and actions grouped into a message to account for
-// ordering.
-message Target {
- // URL that the user will be taken to by clicking on the notification.
- optional SyncedNotificationDestination destination = 1;
- // URI to POST if the user clicks on a button.
- optional SyncedNotificationAction action = 2;
-
- // A key to identify this target within a group of targets.
- optional string target_key = 3;
-}
-
-// A Destination is a target URL that the user can be taken to by clicking on or
-// selecting the notification or part thereof.
-message SyncedNotificationDestination {
- // The description for the link to the destination.
- optional string text = 1;
-
- // The icon to use for the link to the destination.
- optional SyncedNotificationImage icon = 2;
-
- // The destination URL.
- optional string url = 3;
-
- // Optional label to aid accessibility.
- optional string accessibility_label = 4;
-}
-
-// An Action encapsulates an UI component that trigger certain programmable
-// actions. Depending on the endpoint, this may show up as a HTML button, an
-// action button associated with the notification on native mobile, a link, or
-// even the notification card itself.
-message SyncedNotificationAction {
- // The description for the Action.
- optional string text = 1;
-
- // The icon to use for the Action.
- optional SyncedNotificationImage icon = 2;
-
- // The URL that performs the action.
- optional string url = 3;
-
- // Additional request data.
- optional string request_data = 4;
-
- // Optional label to aid accessibility.
- optional string accessibility_label= 5;
-
- // Defines a repeated list of meta tags that provide some context on this
- // action. Nothing about the display of this action is defined by the tags.
- repeated string meta_tag = 6;
-}
-
-message SyncedNotificationImage {
- // Note that the image may be from any source. Clients wishing to resize the
- // image should ensure the image is proxied appropriately.
- optional string url = 1;
- optional string alt_text = 2;
- optional int32 preferred_width = 3;
- optional int32 preferred_height = 4;
-}
-
-message SyncedNotificationProfileImage {
- // Url for the image.
- optional string image_url = 1;
- // Object id for the image.
- optional string oid = 2;
- // Name to display for this image.
- optional string display_name = 3;
-}
diff --git a/chromium/sync/protocol/synced_notification_specifics.proto b/chromium/sync/protocol/synced_notification_specifics.proto
deleted file mode 100644
index 373ac576320..00000000000
--- a/chromium/sync/protocol/synced_notification_specifics.proto
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for synced notifications.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-import "synced_notification_data.proto";
-import "synced_notification_render.proto";
-
-// Properties of SyncedNotificationSpecifics objects.
-message SyncedNotificationSpecifics {
- // The notification from the server.
- optional CoalescedSyncedNotification coalesced_notification = 1;
-}
diff --git a/chromium/sync/protocol/test.proto b/chromium/sync/protocol/test.proto
deleted file mode 100644
index 7453cee8c9e..00000000000
--- a/chromium/sync/protocol/test.proto
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Protocol messages used only for testing.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-message UnknownFieldsTestA {
- required bool foo = 1;
-}
-
-message UnknownFieldsTestB {
- required bool foo = 1;
- required bool bar = 2;
-}
diff --git a/chromium/sync/protocol/theme_specifics.proto b/chromium/sync/protocol/theme_specifics.proto
deleted file mode 100644
index f8318e89837..00000000000
--- a/chromium/sync/protocol/theme_specifics.proto
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for themes.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-// Properties of theme sync objects.
-message ThemeSpecifics {
- // If set, we're using a custom theme and all custom_* fields should be
- // present. If not set, we use the default or system theme (see below)
- // and all custom_* fields should be omitted.
- optional bool use_custom_theme = 1;
- // This field is only set (i.e., not cleared) on platforms that have
- // a distinction between the system theme and the default theme, but
- // other platforms must be careful to pass through the set state (not
- // just the value) of this flag.
- //
- // If true, we use the system theme by default (i.e., when we don't use
- // a custom theme) for platforms that make a distinction between the
- // default theme and the system theme. Has no effect if use_custom_theme
- // is set.
- optional bool use_system_theme_by_default = 2;
-
- // Custom-theme-specific fields.
- optional string custom_theme_name = 3;
- optional string custom_theme_id = 4;
- optional string custom_theme_update_url = 5;
-}
-
diff --git a/chromium/sync/protocol/typed_url_specifics.proto b/chromium/sync/protocol/typed_url_specifics.proto
deleted file mode 100644
index 1f8a4b81ffd..00000000000
--- a/chromium/sync/protocol/typed_url_specifics.proto
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Sync protocol datatype extension for typed urls.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-// Properties of typed_url sync objects - fields correspond to similarly named
-// fields in history::URLRow.
-message TypedUrlSpecifics {
- // Actual URL that was typed.
- optional string url = 1;
- // Title of the page corresponding to this URL.
- optional string title = 2;
- // The number of times this URL was typed. Deprecated since we no longer sync
- // this information (it's inferred by the history code from the transition
- // type for each visit).
- // optional int32 typed_count = 3; (deprecated)
- // True if the URL should be used for auto-complete.
- optional bool hidden = 4;
- // The number of visits (includes page reloads).
- // repeated int64 visit = 5; (deprecated)
- // The number of explicit navigations to this URL (excludes page reloads) -
- // deprecated as we no longer sync this (history code infers this from the
- // |visits| array).
- // optional int32 visited_count = 6; (deprecated)
- // Timestamps for all visits to this URL.
- repeated int64 visits = 7 [packed=true];
- // The PageTransition::Type for each of the visits in the |visit| array. Both
- // arrays must be the same length.
- repeated int32 visit_transitions = 8 [packed=true];
-}
diff --git a/chromium/sync/protocol/unique_position.proto b/chromium/sync/protocol/unique_position.proto
deleted file mode 100644
index 4864f27ae0e..00000000000
--- a/chromium/sync/protocol/unique_position.proto
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Protobuf representation of the UniquePosition class.
-
-// Update proto_value_conversions{.h,.cc,_unittest.cc} if you change
-// any fields in this file.
-
-syntax = "proto2";
-
-option optimize_for = LITE_RUNTIME;
-option retain_unknown_fields = true;
-
-package sync_pb;
-
-// A UniquePosition is a string of bytes.
-//
-// Unique positions are unique per-item, since they are guaranteed to end with a
-// fixed-length suffix that is unique per-item. The position string may not end
-// with a '\0' byte.
-//
-// Prior to the suffix is a series of arbitrary bytes of arbitrary length.
-// Items under the same parent are positioned relative to each other by a
-// lexicographic comparison of their UniquePosition values.
-message UniquePosition {
- // History:
- //
- // Unique positions were first introduced in M28. This change was rolled out
- // in such a way that it would try to maintain backwards compatibilty with
- // clients that understood only the old int64-based positions.
- //
- // At first, clients supported only the 'value' field. This version never
- // made it to stable. We later added support for the 'compressed_value'
- // field, and clients would populate either one or the other.
- //
- // In M30, we added the custom_compressed_v1 representation. This
- // representation was better than the previous implementations in almost every
- // way. However, we could not use it right away, since older clients would
- // not understand it. We decided to write both the old-style ('value' or
- // 'custom_compressed') representation and the 'custom_compressed_v1'
- // repersentations to every protobuf during the transition period. Protobufs
- // written during this transition period would be readable by clients who
- // understand at least one of the two formats.
- //
- // In M33, we dropped support for writing the backwards-compatibility fields.
- // Protobufs written by this version or later are not be intelligible by
- // clients with version M29 or older. Those clients will end up making use of
- // the old int64 position fallback mechanism.
-
- // The uncompressed string of bytes representing the position.
- //
- // Deprecated. See history note above.
- optional bytes value = 1;
-
- // The client may choose to write a compressed position to this field instead
- // of populating the 'value' above. If it chooses to use compression, the
- // 'value' field above must be empty. The position value will be compressed
- // with gzip and stored in the compressed_value field. The position's
- // uncompressed length must be specified and written to the
- // uncompressed_length field.
- //
- // Deprecated. See history note above.
- optional bytes compressed_value = 2;
- optional uint64 uncompressed_length = 3;
-
- // This encoding uses compression scheme designed especially for unique
- // positions. It has the property that X < Y precisely when Compressed(X) <
- // Compressed(Y), which is very useful when the most common operation is to
- // compare these positions against each other. Their values may remain
- // compressed in memory.
- //
- // The compression scheme is implemented and documented in
- // sync/internal_api/base/unique_position.cc.
- //
- // As of M30, this is the preferred encoding. Newer clients may continue to
- // populate the 'value' and 'compressed_value' fields to ensure backwards
- // compatibility, but they will always try to read from this field first.
- optional bytes custom_compressed_v1 = 4;
-}
diff --git a/chromium/sync/sessions/DEPS b/chromium/sync/sessions/DEPS
deleted file mode 100644
index b4042e3ae9a..00000000000
--- a/chromium/sync/sessions/DEPS
+++ /dev/null
@@ -1,12 +0,0 @@
-include_rules = [
- "+sync/base",
- "+sync/engine",
- "+sync/internal_api/public/base",
- "+sync/internal_api/public/engine",
- "+sync/internal_api/public/sessions",
- "+sync/notifier",
- "+sync/protocol",
- "+sync/syncable",
- "+sync/test",
- "+sync/util",
-]
diff --git a/chromium/sync/sessions/data_type_tracker.cc b/chromium/sync/sessions/data_type_tracker.cc
deleted file mode 100644
index b0b464923ce..00000000000
--- a/chromium/sync/sessions/data_type_tracker.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/sessions/data_type_tracker.h"
-
-#include "base/logging.h"
-#include "sync/internal_api/public/base/invalidation.h"
-#include "sync/notifier/single_object_invalidation_set.h"
-#include "sync/sessions/nudge_tracker.h"
-
-namespace syncer {
-namespace sessions {
-
-DataTypeTracker::DataTypeTracker()
- : local_nudge_count_(0),
- local_refresh_request_count_(0),
- local_payload_overflow_(false),
- server_payload_overflow_(false),
- payload_buffer_size_(NudgeTracker::kDefaultMaxPayloadsPerType) { }
-
-DataTypeTracker::~DataTypeTracker() { }
-
-void DataTypeTracker::RecordLocalChange() {
- local_nudge_count_++;
-}
-
-void DataTypeTracker::RecordLocalRefreshRequest() {
- local_refresh_request_count_++;
-}
-
-void DataTypeTracker::RecordRemoteInvalidations(
- const SingleObjectInvalidationSet& invalidations) {
- for (SingleObjectInvalidationSet::const_iterator it =
- invalidations.begin(); it != invalidations.end(); ++it) {
- if (it->is_unknown_version()) {
- server_payload_overflow_ = true;
- } else {
- pending_payloads_.push_back(it->payload());
- if (pending_payloads_.size() > payload_buffer_size_) {
- // Drop the oldest payload if we've overflowed.
- pending_payloads_.pop_front();
- local_payload_overflow_ = true;
- }
- }
- }
-}
-
-void DataTypeTracker::RecordSuccessfulSyncCycle() {
- // If we were throttled, then we would have been excluded from this cycle's
- // GetUpdates and Commit actions. Our state remains unchanged.
- if (IsThrottled())
- return;
-
- local_nudge_count_ = 0;
- local_refresh_request_count_ = 0;
- pending_payloads_.clear();
- local_payload_overflow_ = false;
- server_payload_overflow_ = false;
-}
-
-// This limit will take effect on all future invalidations received.
-void DataTypeTracker::UpdatePayloadBufferSize(size_t new_size) {
- payload_buffer_size_ = new_size;
-}
-
-bool DataTypeTracker::IsSyncRequired() const {
- return !IsThrottled() &&
- (local_nudge_count_ > 0 ||
- local_refresh_request_count_ > 0 ||
- HasPendingInvalidation() ||
- local_payload_overflow_ ||
- server_payload_overflow_);
-}
-
-bool DataTypeTracker::IsGetUpdatesRequired() const {
- return !IsThrottled() &&
- (local_refresh_request_count_ > 0 ||
- HasPendingInvalidation() ||
- local_payload_overflow_ ||
- server_payload_overflow_);
-}
-
-bool DataTypeTracker::HasLocalChangePending() const {
- return local_nudge_count_ > 0;
-}
-
-bool DataTypeTracker::HasPendingInvalidation() const {
- return !pending_payloads_.empty();
-}
-
-std::string DataTypeTracker::GetMostRecentInvalidationPayload() const {
- return pending_payloads_.back();
-}
-
-void DataTypeTracker::SetLegacyNotificationHint(
- sync_pb::DataTypeProgressMarker* progress) const {
- DCHECK(!IsThrottled())
- << "We should not make requests if the type is throttled.";
-
- if (HasPendingInvalidation()) {
- // The old-style source info can contain only one hint per type. We grab
- // the most recent, to mimic the old coalescing behaviour.
- progress->set_notification_hint(GetMostRecentInvalidationPayload());
- } else if (HasLocalChangePending()) {
- // The old-style source info sent up an empty string (as opposed to
- // nothing at all) when the type was locally nudged, but had not received
- // any invalidations.
- progress->set_notification_hint("");
- }
-}
-
-void DataTypeTracker::FillGetUpdatesTriggersMessage(
- sync_pb::GetUpdateTriggers* msg) const {
- // Fill the list of payloads, if applicable. The payloads must be ordered
- // oldest to newest, so we insert them in the same order as we've been storing
- // them internally.
- for (PayloadList::const_iterator payload_it = pending_payloads_.begin();
- payload_it != pending_payloads_.end(); ++payload_it) {
- msg->add_notification_hint(*payload_it);
- }
-
- msg->set_client_dropped_hints(local_payload_overflow_);
- msg->set_local_modification_nudges(local_nudge_count_);
- msg->set_datatype_refresh_nudges(local_refresh_request_count_);
-
- // TODO(rlarocque): Support Tango trickles. See crbug.com/223437.
- // msg->set_server_dropped_hints(server_payload_oveflow_);
-}
-
-bool DataTypeTracker::IsThrottled() const {
- return !unthrottle_time_.is_null();
-}
-
-base::TimeDelta DataTypeTracker::GetTimeUntilUnthrottle(
- base::TimeTicks now) const {
- if (!IsThrottled()) {
- NOTREACHED();
- return base::TimeDelta::FromSeconds(0);
- }
- return std::max(base::TimeDelta::FromSeconds(0),
- unthrottle_time_ - now);
-}
-
-void DataTypeTracker::ThrottleType(base::TimeDelta duration,
- base::TimeTicks now) {
- unthrottle_time_ = std::max(unthrottle_time_, now + duration);
-}
-
-void DataTypeTracker::UpdateThrottleState(base::TimeTicks now) {
- if (now >= unthrottle_time_) {
- unthrottle_time_ = base::TimeTicks();
- }
-}
-
-} // namespace sessions
-} // namespace syncer
diff --git a/chromium/sync/sessions/data_type_tracker.h b/chromium/sync/sessions/data_type_tracker.h
deleted file mode 100644
index 6ecaa0eb7c8..00000000000
--- a/chromium/sync/sessions/data_type_tracker.h
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// A class to track the per-type scheduling data.
-#ifndef SYNC_SESSIONS_DATA_TYPE_TRACKER_H_
-#define SYNC_SESSIONS_DATA_TYPE_TRACKER_H_
-
-#include <deque>
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/time/time.h"
-#include "sync/protocol/sync.pb.h"
-
-namespace syncer {
-
-class Invalidation;
-class SingleObjectInvalidationSet;
-
-namespace sessions {
-
-typedef std::deque<std::string> PayloadList;
-
-class DataTypeTracker {
- public:
- DataTypeTracker();
- ~DataTypeTracker();
-
- // For STL compatibility, we do not forbid the creation of a default copy
- // constructor and assignment operator.
-
- // Tracks that a local change has been made to this type.
- void RecordLocalChange();
-
- // Tracks that a local refresh request has been made for this type.
- void RecordLocalRefreshRequest();
-
- // Tracks that we received invalidation notifications for this type.
- void RecordRemoteInvalidations(
- const SingleObjectInvalidationSet& invalidations);
-
- // Records that a sync cycle has been performed successfully.
- // Generally, this means that all local changes have been committed and all
- // remote changes have been downloaded, so we can clear any flags related to
- // pending work.
- void RecordSuccessfulSyncCycle();
-
- // Updates the size of the invalidations payload buffer.
- void UpdatePayloadBufferSize(size_t new_size);
-
- // Returns true if there is a good reason to perform a sync cycle. This does
- // not take into account whether or not now is a good time to perform a sync
- // cycle. That's for the scheduler to decide.
- bool IsSyncRequired() const;
-
- // Returns true if there is a good reason to fetch updates for this type as
- // part of the next sync cycle.
- bool IsGetUpdatesRequired() const;
-
- // Returns true if there is an uncommitted local change.
- bool HasLocalChangePending() const;
-
- // Returns true if we've received an invalidation since we last fetched
- // updates.
- bool HasPendingInvalidation() const;
-
- // Returns the most recent invalidation payload.
- std::string GetMostRecentInvalidationPayload() const;
-
- // Fills in the legacy invalidaiton payload information fields.
- void SetLegacyNotificationHint(
- sync_pb::DataTypeProgressMarker* progress) const;
-
- // Fills some type-specific contents of a GetUpdates request protobuf. These
- // messages provide the server with the information it needs to decide how to
- // handle a request.
- void FillGetUpdatesTriggersMessage(sync_pb::GetUpdateTriggers* msg) const;
-
- // Returns true if the type is currently throttled.
- bool IsThrottled() const;
-
- // Returns the time until this type's throttling interval expires. Should not
- // be called unless IsThrottled() returns true. The returned value will be
- // increased to zero if it would otherwise have been negative.
- base::TimeDelta GetTimeUntilUnthrottle(base::TimeTicks now) const;
-
- // Throttles the type from |now| until |now| + |duration|.
- void ThrottleType(base::TimeDelta duration, base::TimeTicks now);
-
- // Unthrottles the type if |now| >= the throttle expiry time.
- void UpdateThrottleState(base::TimeTicks now);
-
- private:
- // Number of local change nudges received for this type since the last
- // successful sync cycle.
- int local_nudge_count_;
-
- // Number of local refresh requests received for this type since the last
- // successful sync cycle.
- int local_refresh_request_count_;
-
- // The list of invalidation payloads received since the last successful sync
- // cycle. This list may be incomplete. See also: local_payload_overflow_ and
- // server_payload_overflow_.
- PayloadList pending_payloads_;
-
- // This flag is set if the the local buffer space was been exhausted, causing
- // us to prematurely discard the invalidation payloads stored locally.
- bool local_payload_overflow_;
-
- // This flag is set if the server buffer space was exchauseted, causing the
- // server to prematurely discard some invalidation payloads.
- bool server_payload_overflow_;
-
- size_t payload_buffer_size_;
-
- // If !unthrottle_time_.is_null(), this type is throttled and may not download
- // or commit data until the specified time.
- base::TimeTicks unthrottle_time_;
-};
-
-} // namespace syncer
-} // namespace sessions
-
-#endif // SYNC_SESSIONS_DATA_TYPE_TRACKER_H_
diff --git a/chromium/sync/sessions/debug_info_getter.h b/chromium/sync/sessions/debug_info_getter.h
deleted file mode 100644
index 7efe0cb649f..00000000000
--- a/chromium/sync/sessions/debug_info_getter.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SESSIONS_DEBUG_INFO_GETTER_H_
-#define SYNC_SESSIONS_DEBUG_INFO_GETTER_H_
-
-#include "sync/base/sync_export.h"
-#include "sync/protocol/sync.pb.h"
-
-namespace syncer {
-namespace sessions {
-
-// This is the interface that needs to be implemented by the event listener
-// to communicate the debug info data to the syncer.
-class SYNC_EXPORT_PRIVATE DebugInfoGetter {
- public:
- // Gets the client debug info. Be sure to clear the info to ensure the data
- // isn't sent multiple times.
- virtual void GetDebugInfo(sync_pb::DebugInfo* debug_info) = 0;
-
- // Clears the debug info.
- virtual void ClearDebugInfo() = 0;
-
- virtual ~DebugInfoGetter() {}
-};
-
-} // namespace sessions
-} // namespace syncer
-
-#endif // SYNC_SESSIONS_DEBUG_INFO_GETTER_H_
diff --git a/chromium/sync/sessions/nudge_tracker.cc b/chromium/sync/sessions/nudge_tracker.cc
deleted file mode 100644
index 94bef81a350..00000000000
--- a/chromium/sync/sessions/nudge_tracker.cc
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/sessions/nudge_tracker.h"
-
-#include "base/basictypes.h"
-#include "sync/internal_api/public/base/invalidation.h"
-#include "sync/notifier/invalidation_util.h"
-#include "sync/notifier/object_id_invalidation_map.h"
-#include "sync/protocol/sync.pb.h"
-
-namespace syncer {
-namespace sessions {
-
-size_t NudgeTracker::kDefaultMaxPayloadsPerType = 10;
-
-NudgeTracker::NudgeTracker()
- : updates_source_(sync_pb::GetUpdatesCallerInfo::UNKNOWN),
- invalidations_enabled_(false),
- invalidations_out_of_sync_(true) {
- ModelTypeSet protocol_types = ProtocolTypes();
- // Default initialize all the type trackers.
- for (ModelTypeSet::Iterator it = protocol_types.First(); it.Good();
- it.Inc()) {
- type_trackers_[it.Get()] = DataTypeTracker();
- }
-}
-
-NudgeTracker::~NudgeTracker() { }
-
-bool NudgeTracker::IsSyncRequired() const {
- for (TypeTrackerMap::const_iterator it = type_trackers_.begin();
- it != type_trackers_.end(); ++it) {
- if (it->second.IsSyncRequired()) {
- return true;
- }
- }
- return false;
-}
-
-bool NudgeTracker::IsGetUpdatesRequired() const {
- if (invalidations_out_of_sync_)
- return true;
- for (TypeTrackerMap::const_iterator it = type_trackers_.begin();
- it != type_trackers_.end(); ++it) {
- if (it->second.IsGetUpdatesRequired()) {
- return true;
- }
- }
- return false;
-}
-
-void NudgeTracker::RecordSuccessfulSyncCycle() {
- updates_source_ = sync_pb::GetUpdatesCallerInfo::UNKNOWN;
-
- // A successful cycle while invalidations are enabled puts us back into sync.
- invalidations_out_of_sync_ = !invalidations_enabled_;
-
- for (TypeTrackerMap::iterator it = type_trackers_.begin();
- it != type_trackers_.end(); ++it) {
- it->second.RecordSuccessfulSyncCycle();
- }
-}
-
-void NudgeTracker::RecordLocalChange(ModelTypeSet types) {
- // Don't overwrite an NOTIFICATION or DATATYPE_REFRESH source. The server
- // makes some assumptions about the source; overriding these sources with
- // LOCAL could lead to incorrect behaviour. This is part of the reason why
- // we're deprecating 'source' in favor of 'origin'.
- if (updates_source_ != sync_pb::GetUpdatesCallerInfo::NOTIFICATION
- && updates_source_ != sync_pb::GetUpdatesCallerInfo::DATATYPE_REFRESH) {
- updates_source_ = sync_pb::GetUpdatesCallerInfo::LOCAL;
- }
-
- for (ModelTypeSet::Iterator it = types.First(); it.Good(); it.Inc()) {
- DCHECK(type_trackers_.find(it.Get()) != type_trackers_.end());
- type_trackers_[it.Get()].RecordLocalChange();
- }
-}
-
-void NudgeTracker::RecordLocalRefreshRequest(ModelTypeSet types) {
- // Don't overwrite an NOTIFICATION source. The server makes some assumptions
- // about the source. Overriding this source with LOCAL could lead to
- // incorrect behaviour. This is part of the reason why we're deprecating
- // 'source' in favor of 'origin'.
- if (updates_source_ != sync_pb::GetUpdatesCallerInfo::NOTIFICATION) {
- updates_source_ = sync_pb::GetUpdatesCallerInfo::DATATYPE_REFRESH;
- }
-
- for (ModelTypeSet::Iterator it = types.First(); it.Good(); it.Inc()) {
- DCHECK(type_trackers_.find(it.Get()) != type_trackers_.end());
- type_trackers_[it.Get()].RecordLocalRefreshRequest();
- }
-}
-
-void NudgeTracker::RecordRemoteInvalidation(
- const ObjectIdInvalidationMap& invalidation_map) {
- updates_source_ = sync_pb::GetUpdatesCallerInfo::NOTIFICATION;
-
- ObjectIdSet ids = invalidation_map.GetObjectIds();
- for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
- ModelType type;
- if (!ObjectIdToRealModelType(*it, &type)) {
- NOTREACHED()
- << "Object ID " << ObjectIdToString(*it)
- << " does not map to valid model type";
- }
- DCHECK(type_trackers_.find(type) != type_trackers_.end());
- type_trackers_[type].RecordRemoteInvalidations(
- invalidation_map.ForObject(*it));
- }
-}
-
-void NudgeTracker::OnInvalidationsEnabled() {
- invalidations_enabled_ = true;
-}
-
-void NudgeTracker::OnInvalidationsDisabled() {
- invalidations_enabled_ = false;
- invalidations_out_of_sync_ = true;
-}
-
-void NudgeTracker::SetTypesThrottledUntil(
- ModelTypeSet types,
- base::TimeDelta length,
- base::TimeTicks now) {
- for (ModelTypeSet::Iterator it = types.First(); it.Good(); it.Inc()) {
- type_trackers_[it.Get()].ThrottleType(length, now);
- }
-}
-
-void NudgeTracker::UpdateTypeThrottlingState(base::TimeTicks now) {
- for (TypeTrackerMap::iterator it = type_trackers_.begin();
- it != type_trackers_.end(); ++it) {
- it->second.UpdateThrottleState(now);
- }
-}
-
-bool NudgeTracker::IsAnyTypeThrottled() const {
- for (TypeTrackerMap::const_iterator it = type_trackers_.begin();
- it != type_trackers_.end(); ++it) {
- if (it->second.IsThrottled()) {
- return true;
- }
- }
- return false;
-}
-
-bool NudgeTracker::IsTypeThrottled(ModelType type) const {
- DCHECK(type_trackers_.find(type) != type_trackers_.end());
- return type_trackers_.find(type)->second.IsThrottled();
-}
-
-base::TimeDelta NudgeTracker::GetTimeUntilNextUnthrottle(
- base::TimeTicks now) const {
- DCHECK(IsAnyTypeThrottled()) << "This function requires a pending unthrottle";
- const base::TimeDelta kMaxTimeDelta =
- base::TimeDelta::FromInternalValue(kint64max);
-
- // Return min of GetTimeUntilUnthrottle() values for all IsThrottled() types.
- base::TimeDelta time_until_next_unthrottle = kMaxTimeDelta;
- for (TypeTrackerMap::const_iterator it = type_trackers_.begin();
- it != type_trackers_.end(); ++it) {
- if (it->second.IsThrottled()) {
- time_until_next_unthrottle =
- std::min(time_until_next_unthrottle,
- it->second.GetTimeUntilUnthrottle(now));
- }
- }
- DCHECK(kMaxTimeDelta != time_until_next_unthrottle);
-
- return time_until_next_unthrottle;
-}
-
-ModelTypeSet NudgeTracker::GetThrottledTypes() const {
- ModelTypeSet result;
- for (TypeTrackerMap::const_iterator it = type_trackers_.begin();
- it != type_trackers_.end(); ++it) {
- if (it->second.IsThrottled()) {
- result.Put(it->first);
- }
- }
- return result;
-}
-
-void NudgeTracker::SetLegacyNotificationHint(
- ModelType type,
- sync_pb::DataTypeProgressMarker* progress) const {
- DCHECK(type_trackers_.find(type) != type_trackers_.end());
- type_trackers_.find(type)->second.SetLegacyNotificationHint(progress);
-}
-
-sync_pb::GetUpdatesCallerInfo::GetUpdatesSource NudgeTracker::updates_source()
- const {
- return updates_source_;
-}
-
-void NudgeTracker::FillProtoMessage(
- ModelType type,
- sync_pb::GetUpdateTriggers* msg) const {
- DCHECK(type_trackers_.find(type) != type_trackers_.end());
-
- // Fill what we can from the global data.
- msg->set_invalidations_out_of_sync(invalidations_out_of_sync_);
-
- // Delegate the type-specific work to the DataTypeTracker class.
- type_trackers_.find(type)->second.FillGetUpdatesTriggersMessage(msg);
-}
-
-void NudgeTracker::SetHintBufferSize(size_t size) {
- for (TypeTrackerMap::iterator it = type_trackers_.begin();
- it != type_trackers_.end(); ++it) {
- it->second.UpdatePayloadBufferSize(size);
- }
-}
-
-} // namespace sessions
-} // namespace syncer
diff --git a/chromium/sync/sessions/nudge_tracker.h b/chromium/sync/sessions/nudge_tracker.h
deleted file mode 100644
index fcd01503410..00000000000
--- a/chromium/sync/sessions/nudge_tracker.h
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// A class to track the outstanding work required to bring the client back into
-// sync with the server.
-#ifndef SYNC_SESSIONS_NUDGE_TRACKER_H_
-#define SYNC_SESSIONS_NUDGE_TRACKER_H_
-
-#include <list>
-#include <map>
-
-#include "base/compiler_specific.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/sessions/data_type_tracker.h"
-
-namespace syncer {
-
-class ObjectIdInvalidationMap;
-
-namespace sessions {
-
-class SYNC_EXPORT_PRIVATE NudgeTracker {
- public:
- static size_t kDefaultMaxPayloadsPerType;
-
- NudgeTracker();
- ~NudgeTracker();
-
- // Returns true if there is a good reason for performing a sync cycle.
- // This does not take into account whether or not this is a good *time* to
- // perform a sync cycle; that's the scheduler's job.
- bool IsSyncRequired() const;
-
- // Returns true if there is a good reason for performing a get updates
- // request as part of the next sync cycle.
- bool IsGetUpdatesRequired() const;
-
- // Tells this class that all required update fetching and committing has
- // completed successfully.
- void RecordSuccessfulSyncCycle();
-
- // Takes note of a local change.
- void RecordLocalChange(ModelTypeSet types);
-
- // Takes note of a locally issued request to refresh a data type.
- void RecordLocalRefreshRequest(ModelTypeSet types);
-
- // Takes note of the receipt of an invalidation notice from the server.
- void RecordRemoteInvalidation(
- const ObjectIdInvalidationMap& invalidation_map);
-
- // These functions should be called to keep this class informed of the status
- // of the connection to the invalidations server.
- void OnInvalidationsEnabled();
- void OnInvalidationsDisabled();
-
- // Marks |types| as being throttled from |now| until |now| + |length|.
- void SetTypesThrottledUntil(ModelTypeSet types,
- base::TimeDelta length,
- base::TimeTicks now);
-
- // Removes any throttling that have expired by time |now|.
- void UpdateTypeThrottlingState(base::TimeTicks now);
-
- // Returns the time of the next type unthrottling, relative to
- // the input |now| value.
- base::TimeDelta GetTimeUntilNextUnthrottle(base::TimeTicks now) const;
-
- // Returns true if any type is currenlty throttled.
- bool IsAnyTypeThrottled() const;
-
- // Returns true if |type| is currently throttled.
- bool IsTypeThrottled(ModelType type) const;
-
- // Returns the set of currently throttled types.
- ModelTypeSet GetThrottledTypes() const;
-
- // Returns the 'source' of the GetUpdate request.
- //
- // This flag is deprecated, but still used by the server. There can be more
- // than one reason to perform a particular sync cycle. The GetUpdatesTrigger
- // message will contain more reliable information about the reasons for
- // performing a sync.
- //
- // See the implementation for important information about the coalesce logic.
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource updates_source() const;
-
- // Fills a GetUpdatesTrigger message for the next GetUpdates request. This is
- // used by the DownloadUpdatesCommand to dump lots of useful per-type state
- // information into the GetUpdate request before sending it off to the server.
- void FillProtoMessage(
- ModelType type,
- sync_pb::GetUpdateTriggers* msg) const;
-
- // Fills a ProgressMarker with single legacy notification hint expected by the
- // sync server. Newer servers will rely on the data set by FillProtoMessage()
- // instead of this.
- void SetLegacyNotificationHint(
- ModelType type,
- sync_pb::DataTypeProgressMarker* progress) const;
-
- // Adjusts the number of hints that can be stored locally.
- void SetHintBufferSize(size_t size);
-
- private:
- typedef std::map<ModelType, DataTypeTracker> TypeTrackerMap;
-
- TypeTrackerMap type_trackers_;
-
- // Merged updates source. This should be obsolete, but the server still
- // relies on it for some heuristics.
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource updates_source_;
-
- // Tracks whether or not invalidations are currently enabled.
- bool invalidations_enabled_;
-
- // This flag is set if suspect that some technical malfunction or known bug
- // may have left us with some unserviced invalidations.
- //
- // Keeps track of whether or not we're fully in sync with the invalidation
- // server. This can be false even if invalidations are enabled and working
- // correctly. For example, until we get ack-tracking working properly, we
- // won't persist invalidations between restarts, so we may be out of sync when
- // we restart. The only way to get back into sync is to have invalidations
- // enabled, then complete a sync cycle to make sure we're fully up to date.
- bool invalidations_out_of_sync_;
-
- size_t num_payloads_per_type_;
-
- DISALLOW_COPY_AND_ASSIGN(NudgeTracker);
-};
-
-} // namespace sessions
-} // namespace syncer
-
-#endif // SYNC_SESSIONS_NUDGE_TRACKER_H_
diff --git a/chromium/sync/sessions/nudge_tracker_unittest.cc b/chromium/sync/sessions/nudge_tracker_unittest.cc
deleted file mode 100644
index 450d17fe3da..00000000000
--- a/chromium/sync/sessions/nudge_tracker_unittest.cc
+++ /dev/null
@@ -1,467 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/base/model_type_test_util.h"
-#include "sync/notifier/invalidation_util.h"
-#include "sync/notifier/object_id_invalidation_map.h"
-#include "sync/sessions/nudge_tracker.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-namespace {
-
-testing::AssertionResult ModelTypeSetEquals(ModelTypeSet a, ModelTypeSet b) {
- if (a.Equals(b)) {
- return testing::AssertionSuccess();
- } else {
- return testing::AssertionFailure()
- << "Left side " << ModelTypeSetToString(a)
- << ", does not match rigth side: " << ModelTypeSetToString(b);
- }
-}
-
-} // namespace
-
-namespace sessions {
-
-class NudgeTrackerTest : public ::testing::Test {
- public:
- NudgeTrackerTest() {
- SetInvalidationsInSync();
- }
-
- static size_t GetHintBufferSize() {
- // Assumes that no test has adjusted this size.
- return NudgeTracker::kDefaultMaxPayloadsPerType;
- }
-
- bool InvalidationsOutOfSync() const {
- // We don't currently track invalidations out of sync on a per-type basis.
- sync_pb::GetUpdateTriggers gu_trigger;
- nudge_tracker_.FillProtoMessage(BOOKMARKS, &gu_trigger);
- return gu_trigger.invalidations_out_of_sync();
- }
-
- int ProtoLocallyModifiedCount(ModelType type) const {
- sync_pb::GetUpdateTriggers gu_trigger;
- nudge_tracker_.FillProtoMessage(type, &gu_trigger);
- return gu_trigger.local_modification_nudges();
- }
-
- int ProtoRefreshRequestedCount(ModelType type) const {
- sync_pb::GetUpdateTriggers gu_trigger;
- nudge_tracker_.FillProtoMessage(type, &gu_trigger);
- return gu_trigger.datatype_refresh_nudges();
- }
-
- void SetInvalidationsInSync() {
- nudge_tracker_.OnInvalidationsEnabled();
- nudge_tracker_.RecordSuccessfulSyncCycle();
- }
-
- protected:
- NudgeTracker nudge_tracker_;
-};
-
-// Exercise an empty NudgeTracker.
-// Use with valgrind to detect uninitialized members.
-TEST_F(NudgeTrackerTest, EmptyNudgeTracker) {
- // Now we're at the normal, "idle" state.
- EXPECT_FALSE(nudge_tracker_.IsSyncRequired());
- EXPECT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
- EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::UNKNOWN,
- nudge_tracker_.updates_source());
-
- sync_pb::GetUpdateTriggers gu_trigger;
- nudge_tracker_.FillProtoMessage(BOOKMARKS, &gu_trigger);
-
- EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::UNKNOWN,
- nudge_tracker_.updates_source());
-}
-
-// Verify that nudges override each other based on a priority order.
-// LOCAL < DATATYPE_REFRESH < NOTIFICATION
-TEST_F(NudgeTrackerTest, SourcePriorities) {
- // Track a local nudge.
- nudge_tracker_.RecordLocalChange(ModelTypeSet(BOOKMARKS));
- EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::LOCAL,
- nudge_tracker_.updates_source());
-
- // A refresh request will override it.
- nudge_tracker_.RecordLocalRefreshRequest(ModelTypeSet(TYPED_URLS));
- EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::DATATYPE_REFRESH,
- nudge_tracker_.updates_source());
-
- // Another local nudge will not be enough to change it.
- nudge_tracker_.RecordLocalChange(ModelTypeSet(BOOKMARKS));
- EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::DATATYPE_REFRESH,
- nudge_tracker_.updates_source());
-
- // An invalidation will override the refresh request source.
- ObjectIdInvalidationMap invalidation_map =
- BuildInvalidationMap(PREFERENCES, 1, "hint");
- nudge_tracker_.RecordRemoteInvalidation(invalidation_map);
- EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::NOTIFICATION,
- nudge_tracker_.updates_source());
-
- // Neither local nudges nor refresh requests will override it.
- nudge_tracker_.RecordLocalChange(ModelTypeSet(BOOKMARKS));
- EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::NOTIFICATION,
- nudge_tracker_.updates_source());
- nudge_tracker_.RecordLocalRefreshRequest(ModelTypeSet(TYPED_URLS));
- EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::NOTIFICATION,
- nudge_tracker_.updates_source());
-}
-
-TEST_F(NudgeTrackerTest, HintCoalescing) {
- // Easy case: record one hint.
- {
- ObjectIdInvalidationMap invalidation_map =
- BuildInvalidationMap(BOOKMARKS, 1, "bm_hint_1");
- nudge_tracker_.RecordRemoteInvalidation(invalidation_map);
-
- sync_pb::GetUpdateTriggers gu_trigger;
- nudge_tracker_.FillProtoMessage(BOOKMARKS, &gu_trigger);
- ASSERT_EQ(1, gu_trigger.notification_hint_size());
- EXPECT_EQ("bm_hint_1", gu_trigger.notification_hint(0));
- EXPECT_FALSE(gu_trigger.client_dropped_hints());
- }
-
- // Record a second hint for the same type.
- {
- ObjectIdInvalidationMap invalidation_map =
- BuildInvalidationMap(BOOKMARKS, 2, "bm_hint_2");
- nudge_tracker_.RecordRemoteInvalidation(invalidation_map);
-
- sync_pb::GetUpdateTriggers gu_trigger;
- nudge_tracker_.FillProtoMessage(BOOKMARKS, &gu_trigger);
- ASSERT_EQ(2, gu_trigger.notification_hint_size());
-
- // Expect the most hint recent is last in the list.
- EXPECT_EQ("bm_hint_1", gu_trigger.notification_hint(0));
- EXPECT_EQ("bm_hint_2", gu_trigger.notification_hint(1));
- EXPECT_FALSE(gu_trigger.client_dropped_hints());
- }
-
- // Record a hint for a different type.
- {
- ObjectIdInvalidationMap invalidation_map =
- BuildInvalidationMap(PASSWORDS, 1, "pw_hint_1");
- nudge_tracker_.RecordRemoteInvalidation(invalidation_map);
-
- // Re-verify the bookmarks to make sure they're unaffected.
- sync_pb::GetUpdateTriggers bm_gu_trigger;
- nudge_tracker_.FillProtoMessage(BOOKMARKS, &bm_gu_trigger);
- ASSERT_EQ(2, bm_gu_trigger.notification_hint_size());
- EXPECT_EQ("bm_hint_1", bm_gu_trigger.notification_hint(0));
- EXPECT_EQ("bm_hint_2",
- bm_gu_trigger.notification_hint(1)); // most recent last.
- EXPECT_FALSE(bm_gu_trigger.client_dropped_hints());
-
- // Verify the new type, too.
- sync_pb::GetUpdateTriggers pw_gu_trigger;
- nudge_tracker_.FillProtoMessage(PASSWORDS, &pw_gu_trigger);
- ASSERT_EQ(1, pw_gu_trigger.notification_hint_size());
- EXPECT_EQ("pw_hint_1", pw_gu_trigger.notification_hint(0));
- EXPECT_FALSE(pw_gu_trigger.client_dropped_hints());
- }
-}
-
-TEST_F(NudgeTrackerTest, DropHintsLocally) {
- ObjectIdInvalidationMap invalidation_map =
- BuildInvalidationMap(BOOKMARKS, 1, "hint");
-
- for (size_t i = 0; i < GetHintBufferSize(); ++i) {
- nudge_tracker_.RecordRemoteInvalidation(invalidation_map);
- }
- {
- sync_pb::GetUpdateTriggers gu_trigger;
- nudge_tracker_.FillProtoMessage(BOOKMARKS, &gu_trigger);
- EXPECT_EQ(GetHintBufferSize(),
- static_cast<size_t>(gu_trigger.notification_hint_size()));
- EXPECT_FALSE(gu_trigger.client_dropped_hints());
- }
-
- // Force an overflow.
- ObjectIdInvalidationMap invalidation_map2 =
- BuildInvalidationMap(BOOKMARKS, 1000, "new_hint");
- nudge_tracker_.RecordRemoteInvalidation(invalidation_map2);
-
- {
- sync_pb::GetUpdateTriggers gu_trigger;
- nudge_tracker_.FillProtoMessage(BOOKMARKS, &gu_trigger);
- EXPECT_EQ(GetHintBufferSize(),
- static_cast<size_t>(gu_trigger.notification_hint_size()));
- EXPECT_TRUE(gu_trigger.client_dropped_hints());
-
- // Verify the newest hint was not dropped and is the last in the list.
- EXPECT_EQ("new_hint", gu_trigger.notification_hint(GetHintBufferSize()-1));
-
- // Verify the oldest hint, too.
- EXPECT_EQ("hint", gu_trigger.notification_hint(0));
- }
-}
-
-// TODO(rlarocque): Add trickles support. See crbug.com/223437.
-// TEST_F(NudgeTrackerTest, DropHintsAtServer);
-
-// Checks the behaviour of the invalidations-out-of-sync flag.
-TEST_F(NudgeTrackerTest, EnableDisableInvalidations) {
- // Start with invalidations offline.
- nudge_tracker_.OnInvalidationsDisabled();
- EXPECT_TRUE(InvalidationsOutOfSync());
- EXPECT_TRUE(nudge_tracker_.IsGetUpdatesRequired());
-
- // Simply enabling invalidations does not bring us back into sync.
- nudge_tracker_.OnInvalidationsEnabled();
- EXPECT_TRUE(InvalidationsOutOfSync());
- EXPECT_TRUE(nudge_tracker_.IsGetUpdatesRequired());
-
- // We must successfully complete a sync cycle while invalidations are enabled
- // to be sure that we're in sync.
- nudge_tracker_.RecordSuccessfulSyncCycle();
- EXPECT_FALSE(InvalidationsOutOfSync());
- EXPECT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
-
- // If the invalidator malfunctions, we go become unsynced again.
- nudge_tracker_.OnInvalidationsDisabled();
- EXPECT_TRUE(InvalidationsOutOfSync());
- EXPECT_TRUE(nudge_tracker_.IsGetUpdatesRequired());
-
- // A sync cycle while invalidations are disabled won't reset the flag.
- nudge_tracker_.RecordSuccessfulSyncCycle();
- EXPECT_TRUE(InvalidationsOutOfSync());
- EXPECT_TRUE(nudge_tracker_.IsGetUpdatesRequired());
-
- // Nor will the re-enabling of invalidations be sufficient, even now that
- // we've had a successful sync cycle.
- nudge_tracker_.RecordSuccessfulSyncCycle();
- EXPECT_TRUE(InvalidationsOutOfSync());
- EXPECT_TRUE(nudge_tracker_.IsGetUpdatesRequired());
-}
-
-// Tests that locally modified types are correctly written out to the
-// GetUpdateTriggers proto.
-TEST_F(NudgeTrackerTest, WriteLocallyModifiedTypesToProto) {
- // Should not be locally modified by default.
- EXPECT_EQ(0, ProtoLocallyModifiedCount(PREFERENCES));
-
- // Record a local bookmark change. Verify it was registered correctly.
- nudge_tracker_.RecordLocalChange(ModelTypeSet(PREFERENCES));
- EXPECT_EQ(1, ProtoLocallyModifiedCount(PREFERENCES));
-
- // Record a successful sync cycle. Verify the count is cleared.
- nudge_tracker_.RecordSuccessfulSyncCycle();
- EXPECT_EQ(0, ProtoLocallyModifiedCount(PREFERENCES));
-}
-
-// Tests that refresh requested types are correctly written out to the
-// GetUpdateTriggers proto.
-TEST_F(NudgeTrackerTest, WriteRefreshRequestedTypesToProto) {
- // There should be no refresh requested by default.
- EXPECT_EQ(0, ProtoRefreshRequestedCount(SESSIONS));
-
- // Record a local refresh request. Verify it was registered correctly.
- nudge_tracker_.RecordLocalRefreshRequest(ModelTypeSet(SESSIONS));
- EXPECT_EQ(1, ProtoRefreshRequestedCount(SESSIONS));
-
- // Record a successful sync cycle. Verify the count is cleared.
- nudge_tracker_.RecordSuccessfulSyncCycle();
- EXPECT_EQ(0, ProtoRefreshRequestedCount(SESSIONS));
-}
-
-// Basic tests for the IsSyncRequired() flag.
-TEST_F(NudgeTrackerTest, IsSyncRequired) {
- EXPECT_FALSE(nudge_tracker_.IsSyncRequired());
-
- // Local changes.
- nudge_tracker_.RecordLocalChange(ModelTypeSet(SESSIONS));
- EXPECT_TRUE(nudge_tracker_.IsSyncRequired());
- nudge_tracker_.RecordSuccessfulSyncCycle();
- EXPECT_FALSE(nudge_tracker_.IsSyncRequired());
-
- // Refresh requests.
- nudge_tracker_.RecordLocalRefreshRequest(ModelTypeSet(SESSIONS));
- EXPECT_TRUE(nudge_tracker_.IsSyncRequired());
- nudge_tracker_.RecordSuccessfulSyncCycle();
- EXPECT_FALSE(nudge_tracker_.IsSyncRequired());
-
- // Invalidations.
- ObjectIdInvalidationMap invalidation_map =
- BuildInvalidationMap(PREFERENCES, 1, "hint");
- nudge_tracker_.RecordRemoteInvalidation(invalidation_map);
- EXPECT_TRUE(nudge_tracker_.IsSyncRequired());
- nudge_tracker_.RecordSuccessfulSyncCycle();
- EXPECT_FALSE(nudge_tracker_.IsSyncRequired());
-}
-
-// Basic tests for the IsGetUpdatesRequired() flag.
-TEST_F(NudgeTrackerTest, IsGetUpdatesRequired) {
- EXPECT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
-
- // Local changes.
- nudge_tracker_.RecordLocalChange(ModelTypeSet(SESSIONS));
- EXPECT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
- nudge_tracker_.RecordSuccessfulSyncCycle();
- EXPECT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
-
- // Refresh requests.
- nudge_tracker_.RecordLocalRefreshRequest(ModelTypeSet(SESSIONS));
- EXPECT_TRUE(nudge_tracker_.IsGetUpdatesRequired());
- nudge_tracker_.RecordSuccessfulSyncCycle();
- EXPECT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
-
- // Invalidations.
- ObjectIdInvalidationMap invalidation_map =
- BuildInvalidationMap(PREFERENCES, 1, "hint");
- nudge_tracker_.RecordRemoteInvalidation(invalidation_map);
- EXPECT_TRUE(nudge_tracker_.IsGetUpdatesRequired());
- nudge_tracker_.RecordSuccessfulSyncCycle();
- EXPECT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
-}
-
-// Test IsSyncRequired() responds correctly to data type throttling.
-TEST_F(NudgeTrackerTest, IsSyncRequired_Throttling) {
- const base::TimeTicks t0 = base::TimeTicks::FromInternalValue(1234);
- const base::TimeDelta throttle_length = base::TimeDelta::FromMinutes(10);
- const base::TimeTicks t1 = t0 + throttle_length;
-
- EXPECT_FALSE(nudge_tracker_.IsSyncRequired());
-
- // A local change to sessions enables the flag.
- nudge_tracker_.RecordLocalChange(ModelTypeSet(SESSIONS));
- EXPECT_TRUE(nudge_tracker_.IsSyncRequired());
-
- // But the throttling of sessions unsets it.
- nudge_tracker_.SetTypesThrottledUntil(ModelTypeSet(SESSIONS),
- throttle_length,
- t0);
- EXPECT_FALSE(nudge_tracker_.IsSyncRequired());
-
- // A refresh request for bookmarks means we have reason to sync again.
- nudge_tracker_.RecordLocalRefreshRequest(ModelTypeSet(BOOKMARKS));
- EXPECT_TRUE(nudge_tracker_.IsSyncRequired());
-
- // A successful sync cycle means we took care of bookmarks.
- nudge_tracker_.RecordSuccessfulSyncCycle();
- EXPECT_FALSE(nudge_tracker_.IsSyncRequired());
-
- // But we still haven't dealt with sessions. We'll need to remember
- // that sessions are out of sync and re-enable the flag when their
- // throttling interval expires.
- nudge_tracker_.UpdateTypeThrottlingState(t1);
- EXPECT_FALSE(nudge_tracker_.IsTypeThrottled(SESSIONS));
- EXPECT_TRUE(nudge_tracker_.IsSyncRequired());
-}
-
-// Test IsGetUpdatesRequired() responds correctly to data type throttling.
-TEST_F(NudgeTrackerTest, IsGetUpdatesRequired_Throttling) {
- const base::TimeTicks t0 = base::TimeTicks::FromInternalValue(1234);
- const base::TimeDelta throttle_length = base::TimeDelta::FromMinutes(10);
- const base::TimeTicks t1 = t0 + throttle_length;
-
- EXPECT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
-
- // A refresh request to sessions enables the flag.
- nudge_tracker_.RecordLocalRefreshRequest(ModelTypeSet(SESSIONS));
- EXPECT_TRUE(nudge_tracker_.IsGetUpdatesRequired());
-
- // But the throttling of sessions unsets it.
- nudge_tracker_.SetTypesThrottledUntil(ModelTypeSet(SESSIONS),
- throttle_length,
- t0);
- EXPECT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
-
- // A refresh request for bookmarks means we have reason to sync again.
- nudge_tracker_.RecordLocalRefreshRequest(ModelTypeSet(BOOKMARKS));
- EXPECT_TRUE(nudge_tracker_.IsGetUpdatesRequired());
-
- // A successful sync cycle means we took care of bookmarks.
- nudge_tracker_.RecordSuccessfulSyncCycle();
- EXPECT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
-
- // But we still haven't dealt with sessions. We'll need to remember
- // that sessions are out of sync and re-enable the flag when their
- // throttling interval expires.
- nudge_tracker_.UpdateTypeThrottlingState(t1);
- EXPECT_FALSE(nudge_tracker_.IsTypeThrottled(SESSIONS));
- EXPECT_TRUE(nudge_tracker_.IsGetUpdatesRequired());
-}
-
-// Tests throttling-related getter functions when no types are throttled.
-TEST_F(NudgeTrackerTest, NoTypesThrottled) {
- EXPECT_FALSE(nudge_tracker_.IsAnyTypeThrottled());
- EXPECT_FALSE(nudge_tracker_.IsTypeThrottled(SESSIONS));
- EXPECT_TRUE(nudge_tracker_.GetThrottledTypes().Empty());
-}
-
-// Tests throttling-related getter functions when some types are throttled.
-TEST_F(NudgeTrackerTest, ThrottleAndUnthrottle) {
- const base::TimeTicks t0 = base::TimeTicks::FromInternalValue(1234);
- const base::TimeDelta throttle_length = base::TimeDelta::FromMinutes(10);
- const base::TimeTicks t1 = t0 + throttle_length;
-
- nudge_tracker_.SetTypesThrottledUntil(ModelTypeSet(SESSIONS, PREFERENCES),
- throttle_length,
- t0);
-
- EXPECT_TRUE(nudge_tracker_.IsAnyTypeThrottled());
- EXPECT_TRUE(nudge_tracker_.IsTypeThrottled(SESSIONS));
- EXPECT_TRUE(nudge_tracker_.IsTypeThrottled(PREFERENCES));
- EXPECT_FALSE(nudge_tracker_.GetThrottledTypes().Empty());
- EXPECT_EQ(throttle_length, nudge_tracker_.GetTimeUntilNextUnthrottle(t0));
-
- nudge_tracker_.UpdateTypeThrottlingState(t1);
-
- EXPECT_FALSE(nudge_tracker_.IsAnyTypeThrottled());
- EXPECT_FALSE(nudge_tracker_.IsTypeThrottled(SESSIONS));
- EXPECT_TRUE(nudge_tracker_.GetThrottledTypes().Empty());
-}
-
-TEST_F(NudgeTrackerTest, OverlappingThrottleIntervals) {
- const base::TimeTicks t0 = base::TimeTicks::FromInternalValue(1234);
- const base::TimeDelta throttle1_length = base::TimeDelta::FromMinutes(10);
- const base::TimeDelta throttle2_length = base::TimeDelta::FromMinutes(20);
- const base::TimeTicks t1 = t0 + throttle1_length;
- const base::TimeTicks t2 = t0 + throttle2_length;
-
- // Setup the longer of two intervals.
- nudge_tracker_.SetTypesThrottledUntil(ModelTypeSet(SESSIONS, PREFERENCES),
- throttle2_length,
- t0);
- EXPECT_TRUE(ModelTypeSetEquals(
- ModelTypeSet(SESSIONS, PREFERENCES),
- nudge_tracker_.GetThrottledTypes()));
- EXPECT_EQ(throttle2_length,
- nudge_tracker_.GetTimeUntilNextUnthrottle(t0));
-
- // Setup the shorter interval.
- nudge_tracker_.SetTypesThrottledUntil(ModelTypeSet(SESSIONS, BOOKMARKS),
- throttle1_length,
- t0);
- EXPECT_TRUE(ModelTypeSetEquals(
- ModelTypeSet(SESSIONS, PREFERENCES, BOOKMARKS),
- nudge_tracker_.GetThrottledTypes()));
- EXPECT_EQ(throttle1_length,
- nudge_tracker_.GetTimeUntilNextUnthrottle(t0));
-
- // Expire the first interval.
- nudge_tracker_.UpdateTypeThrottlingState(t1);
-
- // SESSIONS appeared in both intervals. We expect it will be throttled for
- // the longer of the two, so it's still throttled at time t1.
- EXPECT_TRUE(ModelTypeSetEquals(
- ModelTypeSet(SESSIONS, PREFERENCES),
- nudge_tracker_.GetThrottledTypes()));
- EXPECT_EQ(throttle2_length - throttle1_length,
- nudge_tracker_.GetTimeUntilNextUnthrottle(t1));
-
- // Expire the second interval.
- nudge_tracker_.UpdateTypeThrottlingState(t2);
- EXPECT_TRUE(nudge_tracker_.GetThrottledTypes().Empty());
-}
-
-} // namespace sessions
-} // namespace syncer
diff --git a/chromium/sync/sessions/status_controller.cc b/chromium/sync/sessions/status_controller.cc
deleted file mode 100644
index 752b9ab47f6..00000000000
--- a/chromium/sync/sessions/status_controller.cc
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/sessions/status_controller.h"
-
-#include <vector>
-
-#include "base/basictypes.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/protocol/sync_protocol_error.h"
-
-namespace syncer {
-namespace sessions {
-
-StatusController::StatusController() {
-}
-
-StatusController::~StatusController() {}
-
-void StatusController::increment_num_updates_downloaded_by(int value) {
- model_neutral_.num_updates_downloaded_total += value;
-}
-
-void StatusController::set_types_needing_local_migration(ModelTypeSet types) {
- model_neutral_.types_needing_local_migration = types;
-}
-
-void StatusController::increment_num_tombstone_updates_downloaded_by(
- int value) {
- model_neutral_.num_tombstone_updates_downloaded_total += value;
-}
-
-void StatusController::increment_num_reflected_updates_downloaded_by(
- int value) {
- model_neutral_.num_reflected_updates_downloaded_total += value;
-}
-
-void StatusController::set_num_server_changes_remaining(
- int64 changes_remaining) {
- model_neutral_.num_server_changes_remaining = changes_remaining;
-}
-
-void StatusController::UpdateStartTime() {
- sync_start_time_ = base::Time::Now();
-}
-
-void StatusController::set_num_successful_bookmark_commits(int value) {
- model_neutral_.num_successful_bookmark_commits = value;
-}
-
-void StatusController::increment_num_successful_bookmark_commits() {
- model_neutral_.num_successful_bookmark_commits++;
-}
-
-void StatusController::increment_num_successful_commits() {
- model_neutral_.num_successful_commits++;
-}
-
-void StatusController::increment_num_updates_applied_by(int value) {
- model_neutral_.num_updates_applied += value;
-}
-
-void StatusController::increment_num_encryption_conflicts_by(int value) {
- model_neutral_.num_encryption_conflicts += value;
-}
-
-void StatusController::increment_num_hierarchy_conflicts_by(int value) {
- model_neutral_.num_hierarchy_conflicts += value;
-}
-
-void StatusController::increment_num_server_conflicts() {
- model_neutral_.num_server_conflicts++;
-}
-
-void StatusController::increment_num_local_overwrites() {
- model_neutral_.num_local_overwrites++;
-}
-
-void StatusController::increment_num_server_overwrites() {
- model_neutral_.num_server_overwrites++;
-}
-
-void StatusController::set_sync_protocol_error(
- const SyncProtocolError& error) {
- model_neutral_.sync_protocol_error = error;
-}
-
-void StatusController::set_last_get_key_result(const SyncerError result) {
- model_neutral_.last_get_key_result = result;
-}
-
-void StatusController::set_last_download_updates_result(
- const SyncerError result) {
- model_neutral_.last_download_updates_result = result;
-}
-
-void StatusController::set_commit_result(const SyncerError result) {
- model_neutral_.commit_result = result;
-}
-
-SyncerError StatusController::last_get_key_result() const {
- return model_neutral_.last_get_key_result;
-}
-
-int StatusController::num_updates_applied() const {
- return model_neutral_.num_updates_applied;
-}
-
-int StatusController::num_server_overwrites() const {
- return model_neutral_.num_server_overwrites;
-}
-
-int StatusController::num_encryption_conflicts() const {
- return model_neutral_.num_encryption_conflicts;
-}
-
-int StatusController::num_hierarchy_conflicts() const {
- return model_neutral_.num_hierarchy_conflicts;
-}
-
-int StatusController::num_server_conflicts() const {
- return model_neutral_.num_server_conflicts;
-}
-
-int StatusController::TotalNumConflictingItems() const {
- int sum = 0;
- sum += num_encryption_conflicts();
- sum += num_hierarchy_conflicts();
- sum += num_server_conflicts();
- return sum;
-}
-
-} // namespace sessions
-} // namespace syncer
diff --git a/chromium/sync/sessions/status_controller.h b/chromium/sync/sessions/status_controller.h
deleted file mode 100644
index 005f158a81e..00000000000
--- a/chromium/sync/sessions/status_controller.h
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// StatusController handles all counter and status related number crunching and
-// state tracking on behalf of a SyncSession.
-//
-// This object may be accessed from many different threads. It will be accessed
-// most often from the syncer thread. However, when update application is in
-// progress it may also be accessed from the worker threads. This is safe
-// because only one of them will run at a time, and the syncer thread will be
-// blocked until update application completes.
-//
-// This object contains only global state. None of its members are per model
-// type counters.
-
-#ifndef SYNC_SESSIONS_STATUS_CONTROLLER_H_
-#define SYNC_SESSIONS_STATUS_CONTROLLER_H_
-
-#include <map>
-#include <vector>
-
-#include "base/logging.h"
-#include "base/stl_util.h"
-#include "base/time/time.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/engine/model_safe_worker.h"
-#include "sync/internal_api/public/sessions/model_neutral_state.h"
-
-namespace syncer {
-namespace sessions {
-
-class SYNC_EXPORT_PRIVATE StatusController {
- public:
- explicit StatusController();
- ~StatusController();
-
- // ClientToServer messages.
- const ModelTypeSet commit_request_types() const {
- return model_neutral_.commit_request_types;
- }
- void set_commit_request_types(ModelTypeSet value) {
- model_neutral_.commit_request_types = value;
- }
-
- // Changelog related state.
- int64 num_server_changes_remaining() const {
- return model_neutral_.num_server_changes_remaining;
- }
-
- // Various conflict counters.
- int num_encryption_conflicts() const;
- int num_hierarchy_conflicts() const;
- int num_server_conflicts() const;
-
- // Aggregate sum of all conflicting items over all conflict types.
- int TotalNumConflictingItems() const;
-
- // Number of successfully applied updates.
- int num_updates_applied() const;
-
- int num_server_overwrites() const;
-
- base::Time sync_start_time() const {
- // The time at which we sent the first GetUpdates command for this sync.
- return sync_start_time_;
- }
-
- const ModelNeutralState& model_neutral_state() const {
- return model_neutral_;
- }
-
- SyncerError last_get_key_result() const;
-
- // Download counters.
- void set_num_server_changes_remaining(int64 changes_remaining);
- void increment_num_updates_downloaded_by(int value);
- void increment_num_tombstone_updates_downloaded_by(int value);
- void increment_num_reflected_updates_downloaded_by(int value);
-
- // Update application and conflict resolution counters.
- void increment_num_updates_applied_by(int value);
- void increment_num_encryption_conflicts_by(int value);
- void increment_num_hierarchy_conflicts_by(int value);
- void increment_num_server_conflicts();
- void increment_num_local_overwrites();
- void increment_num_server_overwrites();
-
- // Commit counters.
- void increment_num_successful_commits();
- void increment_num_successful_bookmark_commits();
- void set_num_successful_bookmark_commits(int value);
-
- // Server communication status tracking.
- void set_sync_protocol_error(const SyncProtocolError& error);
- void set_last_get_key_result(const SyncerError result);
- void set_last_download_updates_result(const SyncerError result);
- void set_commit_result(const SyncerError result);
-
- // A very important flag used to inform frontend of need to migrate.
- void set_types_needing_local_migration(ModelTypeSet types);
-
- void UpdateStartTime();
-
- private:
- ModelNeutralState model_neutral_;
-
- base::Time sync_start_time_;
-
- DISALLOW_COPY_AND_ASSIGN(StatusController);
-};
-
-} // namespace sessions
-} // namespace syncer
-
-#endif // SYNC_SESSIONS_STATUS_CONTROLLER_H_
diff --git a/chromium/sync/sessions/status_controller_unittest.cc b/chromium/sync/sessions/status_controller_unittest.cc
deleted file mode 100644
index c29bc5f717a..00000000000
--- a/chromium/sync/sessions/status_controller_unittest.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/sessions/sync_session.h"
-#include "sync/test/engine/test_id_factory.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace sessions {
-
-class StatusControllerTest : public testing::Test { };
-
-// This test is useful, as simple as it sounds, due to the copy-paste prone
-// nature of status_controller.cc (we have had bugs in the past where a set_foo
-// method was actually setting |bar_| instead!).
-TEST_F(StatusControllerTest, ReadYourWrites) {
- StatusController status;
- status.set_num_server_changes_remaining(13);
- EXPECT_EQ(13, status.num_server_changes_remaining());
-
- status.set_last_download_updates_result(SYNCER_OK);
- EXPECT_EQ(SYNCER_OK,
- status.model_neutral_state().last_download_updates_result);
-
- status.set_commit_result(SYNC_AUTH_ERROR);
- EXPECT_EQ(SYNC_AUTH_ERROR, status.model_neutral_state().commit_result);
-
- for (int i = 0; i < 14; i++)
- status.increment_num_successful_commits();
- EXPECT_EQ(14, status.model_neutral_state().num_successful_commits);
-}
-
-// Test TotalNumConflictingItems
-TEST_F(StatusControllerTest, TotalNumConflictingItems) {
- StatusController status;
- EXPECT_EQ(0, status.TotalNumConflictingItems());
-
- status.increment_num_server_conflicts();
- status.increment_num_hierarchy_conflicts_by(3);
- status.increment_num_encryption_conflicts_by(2);
- EXPECT_EQ(6, status.TotalNumConflictingItems());
-}
-
-} // namespace sessions
-} // namespace syncer
diff --git a/chromium/sync/sessions/sync_session.cc b/chromium/sync/sessions/sync_session.cc
deleted file mode 100644
index 030ef0f0f58..00000000000
--- a/chromium/sync/sessions/sync_session.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/sessions/sync_session.h"
-
-#include <algorithm>
-#include <iterator>
-
-#include "base/logging.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/engine/model_safe_worker.h"
-#include "sync/syncable/directory.h"
-
-namespace syncer {
-namespace sessions {
-
-// static
-SyncSession* SyncSession::Build(SyncSessionContext* context,
- Delegate* delegate) {
- return new SyncSession(context, delegate);
-}
-
-SyncSession::SyncSession(
- SyncSessionContext* context,
- Delegate* delegate)
- : context_(context),
- delegate_(delegate) {
- status_controller_.reset(new StatusController());
-}
-
-SyncSession::~SyncSession() {}
-
-SyncSessionSnapshot SyncSession::TakeSnapshot() const {
- return TakeSnapshotWithSource(sync_pb::GetUpdatesCallerInfo::UNKNOWN);
-}
-
-SyncSessionSnapshot SyncSession::TakeSnapshotWithSource(
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource legacy_updates_source) const {
- syncable::Directory* dir = context_->directory();
-
- ProgressMarkerMap download_progress_markers;
- for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
- ModelType type(ModelTypeFromInt(i));
- dir->GetDownloadProgressAsString(type, &download_progress_markers[type]);
- }
-
- std::vector<int> num_entries_by_type(MODEL_TYPE_COUNT, 0);
- std::vector<int> num_to_delete_entries_by_type(MODEL_TYPE_COUNT, 0);
- dir->CollectMetaHandleCounts(&num_entries_by_type,
- &num_to_delete_entries_by_type);
-
- SyncSessionSnapshot snapshot(
- status_controller_->model_neutral_state(),
- download_progress_markers,
- delegate_->IsCurrentlyThrottled(),
- status_controller_->num_encryption_conflicts(),
- status_controller_->num_hierarchy_conflicts(),
- status_controller_->num_server_conflicts(),
- context_->notifications_enabled(),
- dir->GetEntriesCount(),
- status_controller_->sync_start_time(),
- num_entries_by_type,
- num_to_delete_entries_by_type,
- legacy_updates_source);
-
- return snapshot;
-}
-
-void SyncSession::SendSyncCycleEndEventNotification(
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source) {
- SyncEngineEvent event(SyncEngineEvent::SYNC_CYCLE_ENDED);
- event.snapshot = TakeSnapshotWithSource(source);
-
- DVLOG(1) << "Sending cycle end event with snapshot: "
- << event.snapshot.ToString();
- context()->NotifyListeners(event);
-}
-
-void SyncSession::SendEventNotification(SyncEngineEvent::EventCause cause) {
- SyncEngineEvent event(cause);
- event.snapshot = TakeSnapshot();
-
- DVLOG(1) << "Sending event with snapshot: " << event.snapshot.ToString();
- context()->NotifyListeners(event);
-}
-
-} // namespace sessions
-} // namespace syncer
diff --git a/chromium/sync/sessions/sync_session.h b/chromium/sync/sessions/sync_session.h
deleted file mode 100644
index f5767206d2a..00000000000
--- a/chromium/sync/sessions/sync_session.h
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// A class representing an attempt to synchronize the local syncable data
-// store with a sync server. A SyncSession instance is passed as a stateful
-// bundle throughout the sync cycle. The SyncSession is not reused across
-// sync cycles; each cycle starts with a new one.
-
-#ifndef SYNC_SESSIONS_SYNC_SESSION_H_
-#define SYNC_SESSIONS_SYNC_SESSION_H_
-
-#include <map>
-#include <set>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/time/time.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/engine/model_safe_worker.h"
-#include "sync/internal_api/public/sessions/sync_session_snapshot.h"
-#include "sync/sessions/status_controller.h"
-#include "sync/sessions/sync_session_context.h"
-
-namespace syncer {
-class ModelSafeWorker;
-
-namespace sessions {
-
-class NudgeTracker;
-
-class SYNC_EXPORT_PRIVATE SyncSession {
- public:
- // The Delegate services events that occur during the session requiring an
- // explicit (and session-global) action, as opposed to events that are simply
- // recorded in per-session state.
- class SYNC_EXPORT_PRIVATE Delegate {
- public:
- // The client was throttled and should cease-and-desist syncing activity
- // until the specified time.
- virtual void OnThrottled(const base::TimeDelta& throttle_duration) = 0;
-
- // Some of the client's types were throttled.
- virtual void OnTypesThrottled(
- ModelTypeSet types,
- const base::TimeDelta& throttle_duration) = 0;
-
- // Silenced intervals can be out of phase with individual sessions, so the
- // delegate is the only thing that can give an authoritative answer for
- // "is syncing silenced right now". This shouldn't be necessary very often
- // as the delegate ensures no session is started if syncing is silenced.
- // ** Note ** This will return true if silencing commenced during this
- // session and the interval has not yet elapsed, but the contract here is
- // solely based on absolute time values. So, this cannot be used to infer
- // that any given session _instance_ is silenced. An example of reasonable
- // use is for UI reporting.
- virtual bool IsCurrentlyThrottled() = 0;
-
- // The client has been instructed to change its short poll interval.
- virtual void OnReceivedShortPollIntervalUpdate(
- const base::TimeDelta& new_interval) = 0;
-
- // The client has been instructed to change its long poll interval.
- virtual void OnReceivedLongPollIntervalUpdate(
- const base::TimeDelta& new_interval) = 0;
-
- // The client has been instructed to change its sessions commit
- // delay.
- virtual void OnReceivedSessionsCommitDelay(
- const base::TimeDelta& new_delay) = 0;
-
- // Called for the syncer to respond to the error sent by the server.
- virtual void OnSyncProtocolError(
- const sessions::SyncSessionSnapshot& snapshot) = 0;
-
- // Called when the server wants to change the number of hints the client
- // will buffer locally.
- virtual void OnReceivedClientInvalidationHintBufferSize(int size) = 0;
-
- protected:
- virtual ~Delegate() {}
- };
-
- // Build a session without a nudge tracker. Used for poll or configure type
- // sync cycles.
- static SyncSession* Build(SyncSessionContext* context,
- Delegate* delegate);
- ~SyncSession();
-
- // Builds a thread-safe and read-only copy of the current session state.
- SyncSessionSnapshot TakeSnapshot() const;
- SyncSessionSnapshot TakeSnapshotWithSource(
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource legacy_updates_source)
- const;
-
- // Builds and sends a snapshot to the session context's listeners.
- void SendSyncCycleEndEventNotification(
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source);
- void SendEventNotification(SyncEngineEvent::EventCause cause);
-
- // TODO(akalin): Split this into context() and mutable_context().
- SyncSessionContext* context() const { return context_; }
- Delegate* delegate() const { return delegate_; }
- const StatusController& status_controller() const {
- return *status_controller_.get();
- }
- StatusController* mutable_status_controller() {
- return status_controller_.get();
- }
-
- private:
- SyncSession(SyncSessionContext* context, Delegate* delegate);
-
- // The context for this session, guaranteed to outlive |this|.
- SyncSessionContext* const context_;
-
- // The delegate for this session, must never be NULL.
- Delegate* const delegate_;
-
- // Our controller for various status and error counters.
- scoped_ptr<StatusController> status_controller_;
-
- DISALLOW_COPY_AND_ASSIGN(SyncSession);
-};
-
-} // namespace sessions
-} // namespace syncer
-
-#endif // SYNC_SESSIONS_SYNC_SESSION_H_
diff --git a/chromium/sync/sessions/sync_session_context.cc b/chromium/sync/sessions/sync_session_context.cc
deleted file mode 100644
index aa5dfa54044..00000000000
--- a/chromium/sync/sessions/sync_session_context.cc
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/sessions/sync_session_context.h"
-
-#include "sync/sessions/debug_info_getter.h"
-#include "sync/util/extensions_activity.h"
-
-namespace syncer {
-namespace sessions {
-
-SyncSessionContext::SyncSessionContext(
- ServerConnectionManager* connection_manager,
- syncable::Directory* directory,
- const std::vector<ModelSafeWorker*>& workers,
- ExtensionsActivity* extensions_activity,
- const std::vector<SyncEngineEventListener*>& listeners,
- DebugInfoGetter* debug_info_getter,
- TrafficRecorder* traffic_recorder,
- bool keystore_encryption_enabled,
- bool client_enabled_pre_commit_update_avoidance,
- const std::string& invalidator_client_id)
- : connection_manager_(connection_manager),
- directory_(directory),
- update_handler_deleter_(&update_handler_map_),
- commit_contributor_deleter_(&commit_contributor_map_),
- extensions_activity_(extensions_activity),
- notifications_enabled_(false),
- max_commit_batch_size_(kDefaultMaxCommitBatchSize),
- debug_info_getter_(debug_info_getter),
- traffic_recorder_(traffic_recorder),
- keystore_encryption_enabled_(keystore_encryption_enabled),
- invalidator_client_id_(invalidator_client_id),
- server_enabled_pre_commit_update_avoidance_(false),
- client_enabled_pre_commit_update_avoidance_(
- client_enabled_pre_commit_update_avoidance) {
- for (size_t i = 0u; i < workers.size(); ++i) {
- workers_.insert(
- std::make_pair(workers[i]->GetModelSafeGroup(), workers[i]));
- }
-
- std::vector<SyncEngineEventListener*>::const_iterator it;
- for (it = listeners.begin(); it != listeners.end(); ++it)
- listeners_.AddObserver(*it);
-}
-
-SyncSessionContext::~SyncSessionContext() {
-}
-
-void SyncSessionContext::set_routing_info(
- const ModelSafeRoutingInfo& routing_info) {
- enabled_types_ = GetRoutingInfoTypes(routing_info);
-
- // TODO(rlarocque): This is not a good long-term solution. We must find a
- // better way to initialize the set of CommitContributors and UpdateHandlers.
- STLDeleteValues<UpdateHandlerMap>(&update_handler_map_);
- STLDeleteValues<CommitContributorMap>(&commit_contributor_map_);
- for (ModelSafeRoutingInfo::const_iterator routing_iter = routing_info.begin();
- routing_iter != routing_info.end(); ++routing_iter) {
- ModelType type = routing_iter->first;
- ModelSafeGroup group = routing_iter->second;
- std::map<ModelSafeGroup, scoped_refptr<ModelSafeWorker> >::iterator
- worker_it = workers_.find(group);
- DCHECK(worker_it != workers_.end());
- scoped_refptr<ModelSafeWorker> worker = worker_it->second;
-
- SyncDirectoryUpdateHandler* handler =
- new SyncDirectoryUpdateHandler(directory(), type, worker);
- update_handler_map_.insert(std::make_pair(type, handler));
-
- SyncDirectoryCommitContributor* contributor =
- new SyncDirectoryCommitContributor(directory(), type);
- commit_contributor_map_.insert(std::make_pair(type, contributor));
- }
-}
-
-} // namespace sessions
-} // namespace syncer
diff --git a/chromium/sync/sessions/sync_session_context.h b/chromium/sync/sessions/sync_session_context.h
deleted file mode 100644
index 5995ab151db..00000000000
--- a/chromium/sync/sessions/sync_session_context.h
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// SyncSessionContext encapsulates the contextual information and engine
-// components specific to a SyncSession. Unlike the SyncSession, the context
-// can be reused across several sync cycles.
-//
-// The context does not take ownership of its pointer members. It's up to
-// the surrounding classes to ensure those members remain valid while the
-// context is in use.
-//
-// It can only be used from the SyncerThread.
-
-#ifndef SYNC_SESSIONS_SYNC_SESSION_CONTEXT_H_
-#define SYNC_SESSIONS_SYNC_SESSION_CONTEXT_H_
-
-#include <map>
-#include <string>
-#include <vector>
-
-#include "base/stl_util.h"
-#include "sync/base/sync_export.h"
-#include "sync/engine/sync_directory_commit_contributor.h"
-#include "sync/engine/sync_directory_update_handler.h"
-#include "sync/engine/sync_engine_event.h"
-#include "sync/engine/syncer_types.h"
-#include "sync/engine/traffic_recorder.h"
-#include "sync/internal_api/public/engine/model_safe_worker.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/sessions/debug_info_getter.h"
-
-namespace syncer {
-
-class ExtensionsActivity;
-class ServerConnectionManager;
-
-namespace syncable {
-class Directory;
-}
-
-// Default number of items a client can commit in a single message.
-static const int kDefaultMaxCommitBatchSize = 25;
-
-namespace sessions {
-class TestScopedSessionEventListener;
-
-class SYNC_EXPORT_PRIVATE SyncSessionContext {
- public:
- SyncSessionContext(ServerConnectionManager* connection_manager,
- syncable::Directory* directory,
- const std::vector<ModelSafeWorker*>& workers,
- ExtensionsActivity* extensions_activity,
- const std::vector<SyncEngineEventListener*>& listeners,
- DebugInfoGetter* debug_info_getter,
- TrafficRecorder* traffic_recorder,
- bool keystore_encryption_enabled,
- bool client_enabled_pre_commit_update_avoidance,
- const std::string& invalidator_client_id);
-
- ~SyncSessionContext();
-
- ServerConnectionManager* connection_manager() {
- return connection_manager_;
- }
- syncable::Directory* directory() {
- return directory_;
- }
-
- ModelTypeSet enabled_types() const {
- return enabled_types_;
- }
-
- void set_routing_info(const ModelSafeRoutingInfo& routing_info);
-
- UpdateHandlerMap* update_handler_map() {
- return &update_handler_map_;
- }
-
- CommitContributorMap* commit_contributor_map() {
- return &commit_contributor_map_;
- }
-
- ExtensionsActivity* extensions_activity() {
- return extensions_activity_.get();
- }
-
- DebugInfoGetter* debug_info_getter() {
- return debug_info_getter_;
- }
-
- // Talk notification status.
- void set_notifications_enabled(bool enabled) {
- notifications_enabled_ = enabled;
- }
- bool notifications_enabled() { return notifications_enabled_; }
-
- // Account name, set once a directory has been opened.
- void set_account_name(const std::string& name) {
- DCHECK(account_name_.empty());
- account_name_ = name;
- }
- const std::string& account_name() const { return account_name_; }
-
- void set_max_commit_batch_size(int batch_size) {
- max_commit_batch_size_ = batch_size;
- }
- int32 max_commit_batch_size() const { return max_commit_batch_size_; }
-
- void NotifyListeners(const SyncEngineEvent& event) {
- FOR_EACH_OBSERVER(SyncEngineEventListener, listeners_,
- OnSyncEngineEvent(event));
- }
-
- TrafficRecorder* traffic_recorder() {
- return traffic_recorder_;
- }
-
- bool keystore_encryption_enabled() const {
- return keystore_encryption_enabled_;
- }
-
- void set_hierarchy_conflict_detected(bool value) {
- client_status_.set_hierarchy_conflict_detected(value);
- }
-
- const sync_pb::ClientStatus& client_status() const {
- return client_status_;
- }
-
- const std::string& invalidator_client_id() const {
- return invalidator_client_id_;
- }
-
- bool ShouldFetchUpdatesBeforeCommit() const {
- return !(server_enabled_pre_commit_update_avoidance_ ||
- client_enabled_pre_commit_update_avoidance_);
- }
-
- void set_server_enabled_pre_commit_update_avoidance(bool value) {
- server_enabled_pre_commit_update_avoidance_ = value;
- }
-
- private:
- // Rather than force clients to set and null-out various context members, we
- // extend our encapsulation boundary to scoped helpers that take care of this
- // once they are allocated. See definitions of these below.
- friend class TestScopedSessionEventListener;
-
- ObserverList<SyncEngineEventListener> listeners_;
-
- ServerConnectionManager* const connection_manager_;
- syncable::Directory* const directory_;
-
- // The set of enabled types. Derrived from the routing info set with
- // set_routing_info().
- ModelTypeSet enabled_types_;
-
- // A map of 'update handlers', one for each enabled type.
- // This must be kept in sync with the routing info. Our temporary solution to
- // that problem is to initialize this map in set_routing_info().
- UpdateHandlerMap update_handler_map_;
-
- // Deleter for the |update_handler_map_|.
- STLValueDeleter<UpdateHandlerMap> update_handler_deleter_;
-
- // A map of 'commit contributors', one for each enabled type.
- // This must be kept in sync with the routing info. Our temporary solution to
- // that problem is to initialize this map in set_routing_info().
- CommitContributorMap commit_contributor_map_;
-
- // Deleter for the |commit_contributor_map_|.
- STLValueDeleter<CommitContributorMap> commit_contributor_deleter_;
-
- // The set of ModelSafeWorkers. Used to execute tasks of various threads.
- std::map<ModelSafeGroup, scoped_refptr<ModelSafeWorker> > workers_;
-
- // We use this to stuff extensions activity into CommitMessages so the server
- // can correlate commit traffic with extension-related bookmark mutations.
- scoped_refptr<ExtensionsActivity> extensions_activity_;
-
- // Kept up to date with talk events to determine whether notifications are
- // enabled. True only if the notification channel is authorized and open.
- bool notifications_enabled_;
-
- // The name of the account being synced.
- std::string account_name_;
-
- // The server limits the number of items a client can commit in one batch.
- int max_commit_batch_size_;
-
- // We use this to get debug info to send to the server for debugging
- // client behavior on server side.
- DebugInfoGetter* const debug_info_getter_;
-
- TrafficRecorder* traffic_recorder_;
-
- // Satus information to be sent up to the server.
- sync_pb::ClientStatus client_status_;
-
- // Temporary variable while keystore encryption is behind a flag. True if
- // we should attempt performing keystore encryption related work, false if
- // the experiment is not enabled.
- bool keystore_encryption_enabled_;
-
- // This is a copy of the identifier the that the invalidations client used to
- // register itself with the invalidations server during startup. We need to
- // provide this to the sync server when we make changes to enable it to
- // prevent us from receiving notifications of changes we make ourselves.
- const std::string invalidator_client_id_;
-
- // Flag to enable or disable the no pre-commit GetUpdates experiment. When
- // this flag is set to false, the syncer has the option of not performing at
- // GetUpdates request when there is nothing to fetch.
- bool server_enabled_pre_commit_update_avoidance_;
-
- // If true, indicates that we've been passed a command-line flag to force
- // enable the pre-commit update avoidance experiment described above.
- const bool client_enabled_pre_commit_update_avoidance_;
-
- DISALLOW_COPY_AND_ASSIGN(SyncSessionContext);
-};
-
-} // namespace sessions
-} // namespace syncer
-
-#endif // SYNC_SESSIONS_SYNC_SESSION_CONTEXT_H_
diff --git a/chromium/sync/sessions/sync_session_unittest.cc b/chromium/sync/sessions/sync_session_unittest.cc
deleted file mode 100644
index e712552f580..00000000000
--- a/chromium/sync/sessions/sync_session_unittest.cc
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/sessions/sync_session.h"
-
-#include "base/compiler_specific.h"
-#include "base/location.h"
-#include "base/memory/ref_counted.h"
-#include "base/message_loop/message_loop.h"
-#include "sync/engine/syncer_types.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/sessions/status_controller.h"
-#include "sync/syncable/syncable_id.h"
-#include "sync/syncable/syncable_write_transaction.h"
-#include "sync/test/engine/fake_model_worker.h"
-#include "sync/test/engine/test_directory_setter_upper.h"
-#include "sync/util/extensions_activity.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-using syncable::WriteTransaction;
-
-namespace sessions {
-namespace {
-
-class SyncSessionTest : public testing::Test,
- public SyncSession::Delegate {
- public:
- SyncSessionTest() : controller_invocations_allowed_(false) {}
-
- SyncSession* MakeSession() {
- return SyncSession::Build(context_.get(), this);
- }
-
- virtual void SetUp() {
- extensions_activity_ = new ExtensionsActivity();
-
- routes_.clear();
- routes_[BOOKMARKS] = GROUP_UI;
- routes_[AUTOFILL] = GROUP_DB;
- scoped_refptr<ModelSafeWorker> passive_worker(
- new FakeModelWorker(GROUP_PASSIVE));
- scoped_refptr<ModelSafeWorker> ui_worker(
- new FakeModelWorker(GROUP_UI));
- scoped_refptr<ModelSafeWorker> db_worker(
- new FakeModelWorker(GROUP_DB));
- workers_.clear();
- workers_.push_back(passive_worker);
- workers_.push_back(ui_worker);
- workers_.push_back(db_worker);
-
- std::vector<ModelSafeWorker*> workers;
- GetWorkers(&workers);
-
- context_.reset(
- new SyncSessionContext(
- NULL,
- NULL,
- workers,
- extensions_activity_.get(),
- std::vector<SyncEngineEventListener*>(),
- NULL,
- NULL,
- true, // enable keystore encryption
- false, // force enable pre-commit GU avoidance experiment
- "fake_invalidator_client_id"));
- context_->set_routing_info(routes_);
-
- session_.reset(MakeSession());
- }
- virtual void TearDown() {
- session_.reset();
- context_.reset();
- }
-
- virtual void OnThrottled(const base::TimeDelta& throttle_duration) OVERRIDE {
- FailControllerInvocationIfDisabled("OnThrottled");
- }
- virtual void OnTypesThrottled(
- ModelTypeSet types,
- const base::TimeDelta& throttle_duration) OVERRIDE {
- FailControllerInvocationIfDisabled("OnTypesThrottled");
- }
- virtual bool IsCurrentlyThrottled() OVERRIDE {
- FailControllerInvocationIfDisabled("IsSyncingCurrentlySilenced");
- return false;
- }
- virtual void OnReceivedLongPollIntervalUpdate(
- const base::TimeDelta& new_interval) OVERRIDE {
- FailControllerInvocationIfDisabled("OnReceivedLongPollIntervalUpdate");
- }
- virtual void OnReceivedShortPollIntervalUpdate(
- const base::TimeDelta& new_interval) OVERRIDE {
- FailControllerInvocationIfDisabled("OnReceivedShortPollIntervalUpdate");
- }
- virtual void OnReceivedSessionsCommitDelay(
- const base::TimeDelta& new_delay) OVERRIDE {
- FailControllerInvocationIfDisabled("OnReceivedSessionsCommitDelay");
- }
- virtual void OnReceivedClientInvalidationHintBufferSize(
- int size) OVERRIDE {
- FailControllerInvocationIfDisabled(
- "OnReceivedClientInvalidationHintBufferSize");
- }
- virtual void OnSyncProtocolError(
- const sessions::SyncSessionSnapshot& snapshot) OVERRIDE {
- FailControllerInvocationIfDisabled("SyncProtocolError");
- }
-
- void GetWorkers(std::vector<ModelSafeWorker*>* out) const {
- out->clear();
- for (std::vector<scoped_refptr<ModelSafeWorker> >::const_iterator it =
- workers_.begin(); it != workers_.end(); ++it) {
- out->push_back(it->get());
- }
- }
- void GetModelSafeRoutingInfo(ModelSafeRoutingInfo* out) const {
- *out = routes_;
- }
-
- StatusController* status() { return session_->mutable_status_controller(); }
- protected:
- void FailControllerInvocationIfDisabled(const std::string& msg) {
- if (!controller_invocations_allowed_)
- FAIL() << msg;
- }
-
- ModelTypeSet ParamsMeaningAllEnabledTypes() {
- ModelTypeSet request_params(BOOKMARKS, AUTOFILL);
- return request_params;
- }
-
- ModelTypeSet ParamsMeaningJustOneEnabledType() {
- return ModelTypeSet(AUTOFILL);
- }
-
- base::MessageLoop message_loop_;
- bool controller_invocations_allowed_;
- scoped_ptr<SyncSession> session_;
- scoped_ptr<SyncSessionContext> context_;
- std::vector<scoped_refptr<ModelSafeWorker> > workers_;
- ModelSafeRoutingInfo routes_;
- scoped_refptr<ExtensionsActivity> extensions_activity_;
-};
-
-} // namespace
-} // namespace sessions
-} // namespace syncer
diff --git a/chromium/sync/sessions/test_util.cc b/chromium/sync/sessions/test_util.cc
deleted file mode 100644
index 538ed0f88c0..00000000000
--- a/chromium/sync/sessions/test_util.cc
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/sessions/test_util.h"
-
-namespace syncer {
-namespace sessions {
-namespace test_util {
-
-void SimulateGetEncryptionKeyFailed(
- ModelTypeSet requsted_types,
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source,
- sessions::SyncSession* session) {
- session->mutable_status_controller()->set_last_get_key_result(
- SERVER_RESPONSE_VALIDATION_FAILED);
- session->mutable_status_controller()->set_last_download_updates_result(
- SYNCER_OK);
-}
-
-void SimulateConfigureSuccess(
- ModelTypeSet requsted_types,
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source,
- sessions::SyncSession* session) {
- ASSERT_EQ(0U, session->status_controller().num_server_changes_remaining());
- session->mutable_status_controller()->set_last_get_key_result(SYNCER_OK);
- session->mutable_status_controller()->set_last_download_updates_result(
- SYNCER_OK);
-}
-
-void SimulateConfigureFailed(
- ModelTypeSet requsted_types,
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source,
- sessions::SyncSession* session) {
- session->mutable_status_controller()->set_last_get_key_result(SYNCER_OK);
- session->mutable_status_controller()->set_last_download_updates_result(
- SERVER_RETURN_TRANSIENT_ERROR);
-}
-
-void SimulateConfigureConnectionFailure(
- ModelTypeSet requsted_types,
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source,
- sessions::SyncSession* session) {
- session->mutable_status_controller()->set_last_get_key_result(SYNCER_OK);
- session->mutable_status_controller()->set_last_download_updates_result(
- NETWORK_CONNECTION_UNAVAILABLE);
-}
-
-void SimulateNormalSuccess(ModelTypeSet requested_types,
- const sessions::NudgeTracker& nudge_tracker,
- sessions::SyncSession* session) {
- ASSERT_EQ(0U, session->status_controller().num_server_changes_remaining());
- session->mutable_status_controller()->set_commit_result(SYNCER_OK);
- session->mutable_status_controller()->set_last_download_updates_result(
- SYNCER_OK);
-}
-
-void SimulateDownloadUpdatesFailed(
- ModelTypeSet requested_types,
- const sessions::NudgeTracker& nudge_tracker,
- sessions::SyncSession* session) {
- session->mutable_status_controller()->set_last_download_updates_result(
- SERVER_RETURN_TRANSIENT_ERROR);
-}
-
-void SimulateCommitFailed(ModelTypeSet requested_types,
- const sessions::NudgeTracker& nudge_tracker,
- sessions::SyncSession* session) {
- session->mutable_status_controller()->set_last_get_key_result(SYNCER_OK);
- session->mutable_status_controller()->set_last_download_updates_result(
- SYNCER_OK);
- session->mutable_status_controller()->set_commit_result(
- SERVER_RETURN_TRANSIENT_ERROR);
-}
-
-void SimulateConnectionFailure(
- ModelTypeSet requested_types,
- const sessions::NudgeTracker& nudge_tracker,
- sessions::SyncSession* session) {
- session->mutable_status_controller()->set_last_download_updates_result(
- NETWORK_CONNECTION_UNAVAILABLE);
-}
-
-void SimulatePollSuccess(ModelTypeSet requested_types,
- sessions::SyncSession* session) {
- ASSERT_EQ(0U, session->status_controller().num_server_changes_remaining());
- session->mutable_status_controller()->set_last_download_updates_result(
- SYNCER_OK);
-}
-
-void SimulatePollFailed(ModelTypeSet requested_types,
- sessions::SyncSession* session) {
- ASSERT_EQ(0U, session->status_controller().num_server_changes_remaining());
- session->mutable_status_controller()->set_last_download_updates_result(
- SERVER_RETURN_TRANSIENT_ERROR);
-}
-
-void SimulateThrottledImpl(
- sessions::SyncSession* session,
- const base::TimeDelta& delta) {
- session->mutable_status_controller()->set_last_download_updates_result(
- SERVER_RETURN_THROTTLED);
- session->delegate()->OnThrottled(delta);
-}
-
-void SimulateTypesThrottledImpl(
- sessions::SyncSession* session,
- ModelTypeSet types,
- const base::TimeDelta& delta) {
- session->mutable_status_controller()->set_last_download_updates_result(
- SERVER_RETURN_THROTTLED);
- session->delegate()->OnTypesThrottled(types, delta);
-}
-
-void SimulatePollIntervalUpdateImpl(
- ModelTypeSet requested_types,
- sessions::SyncSession* session,
- const base::TimeDelta& new_poll) {
- SimulatePollSuccess(requested_types, session);
- session->delegate()->OnReceivedLongPollIntervalUpdate(new_poll);
-}
-
-void SimulateSessionsCommitDelayUpdateImpl(
- ModelTypeSet requested_types,
- const sessions::NudgeTracker& nudge_tracker,
- sessions::SyncSession* session,
- const base::TimeDelta& new_delay) {
- SimulateNormalSuccess(requested_types, nudge_tracker, session);
- session->delegate()->OnReceivedSessionsCommitDelay(new_delay);
-}
-
-} // namespace test_util
-} // namespace sessions
-} // namespace syncer
diff --git a/chromium/sync/sessions/test_util.h b/chromium/sync/sessions/test_util.h
deleted file mode 100644
index c670128c6d0..00000000000
--- a/chromium/sync/sessions/test_util.h
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Utils to simulate various outcomes of a sync session.
-#ifndef SYNC_SESSIONS_TEST_UTIL_H_
-#define SYNC_SESSIONS_TEST_UTIL_H_
-
-#include "sync/engine/syncer.h"
-#include "sync/sessions/sync_session.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace sessions {
-namespace test_util {
-
-// Configure sync cycle successes and failures.
-void SimulateGetEncryptionKeyFailed(
- ModelTypeSet requested_types,
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source,
- sessions::SyncSession* session);
-void SimulateConfigureSuccess(
- ModelTypeSet requested_types,
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source,
- sessions::SyncSession* session);
-void SimulateConfigureFailed(
- ModelTypeSet requested_types,
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source,
- sessions::SyncSession* session);
-void SimulateConfigureConnectionFailure(
- ModelTypeSet requested_types,
- sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source,
- sessions::SyncSession* session);
-
-// Normal mode sync cycle successes and failures.
-void SimulateNormalSuccess(ModelTypeSet requested_types,
- const sessions::NudgeTracker& nudge_tracker,
- sessions::SyncSession* session);
-void SimulateDownloadUpdatesFailed(ModelTypeSet requested_types,
- const sessions::NudgeTracker& nudge_tracker,
- sessions::SyncSession* session);
-void SimulateCommitFailed(ModelTypeSet requested_types,
- const sessions::NudgeTracker& nudge_tracker,
- sessions::SyncSession* session);
-void SimulateConnectionFailure(ModelTypeSet requested_types,
- const sessions::NudgeTracker& nudge_tracker,
- sessions::SyncSession* session);
-
-// Poll successes and failures.
-void SimulatePollSuccess(ModelTypeSet requested_types,
- sessions::SyncSession* session);
-void SimulatePollFailed(ModelTypeSet requested_types,
- sessions::SyncSession* session);
-
-void SimulateThrottledImpl(sessions::SyncSession* session,
- const base::TimeDelta& delta);
-
-void SimulateTypesThrottledImpl(
- sessions::SyncSession* session,
- ModelTypeSet types,
- const base::TimeDelta& delta);
-
-// Works with poll cycles.
-void SimulatePollIntervalUpdateImpl(
- ModelTypeSet requested_types,
- sessions::SyncSession* session,
- const base::TimeDelta& new_poll);
-
-// Works with normal cycles.
-void SimulateSessionsCommitDelayUpdateImpl(
- ModelTypeSet requested_types,
- const sessions::NudgeTracker& nudge_tracker,
- sessions::SyncSession* session,
- const base::TimeDelta& new_delay);
-
-ACTION_P(SimulateThrottled, throttle) {
- SimulateThrottledImpl(arg0, throttle);
-}
-
-ACTION_P2(SimulateTypesThrottled, types, throttle) {
- SimulateTypesThrottledImpl(arg0, types, throttle);
-}
-
-ACTION_P(SimulatePollIntervalUpdate, poll) {
- SimulatePollIntervalUpdateImpl(arg0, arg1, poll);
-}
-
-ACTION_P(SimulateSessionsCommitDelayUpdate, poll) {
- SimulateSessionsCommitDelayUpdateImpl(arg0, arg1, arg2, poll);
-}
-
-} // namespace test_util
-} // namespace sessions
-} // namespace syncer
-
-#endif // SYNC_SESSIONS_TEST_UTIL_H_
diff --git a/chromium/sync/sync.gyp b/chromium/sync/sync.gyp
index f71a872cde9..6a14e84f0fb 100644
--- a/chromium/sync/sync.gyp
+++ b/chromium/sync/sync.gyp
@@ -104,6 +104,7 @@
# The sync protocol buffer library.
{
+ # GN version: //sync/protocol
'target_name': 'sync_proto',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
diff --git a/chromium/sync/sync_android.gypi b/chromium/sync/sync_android.gypi
index bdeb2e4d787..40e4be28395 100644
--- a/chromium/sync/sync_android.gypi
+++ b/chromium/sync/sync_android.gypi
@@ -11,7 +11,6 @@
'type': 'none',
'variables': {
'java_in_dir': '../sync/android/java',
- 'jni_generator_ptr_type': 'long',
},
'dependencies': [
'../base/base.gyp:base_java',
diff --git a/chromium/sync/sync_api.gypi b/chromium/sync/sync_api.gypi
index 188fc324224..8aaf7c7cdf1 100644
--- a/chromium/sync/sync_api.gypi
+++ b/chromium/sync/sync_api.gypi
@@ -13,21 +13,39 @@
'../base/base.gyp:base',
],
'sources': [
+ 'api/attachments/attachment.cc',
+ 'api/attachments/attachment.h',
+ 'api/attachments/attachment_downloader.cc',
+ 'api/attachments/attachment_downloader.h',
+ 'api/attachments/attachment_id.cc',
+ 'api/attachments/attachment_id.h',
+ 'api/attachments/attachment_service.cc',
+ 'api/attachments/attachment_service.h',
+ 'api/attachments/attachment_service_impl.cc',
+ 'api/attachments/attachment_service_impl.h',
+ 'api/attachments/attachment_service_proxy.cc',
+ 'api/attachments/attachment_service_proxy.h',
+ 'api/attachments/attachment_service_proxy_for_test.cc',
+ 'api/attachments/attachment_service_proxy_for_test.h',
+ 'api/attachments/attachment_store.cc',
+ 'api/attachments/attachment_store.h',
+ 'api/attachments/attachment_uploader.cc',
+ 'api/attachments/attachment_uploader.h',
'api/string_ordinal.h',
'api/syncable_service.cc',
'api/syncable_service.h',
- 'api/sync_data.h',
'api/sync_data.cc',
- 'api/sync_change.h',
+ 'api/sync_data.h',
'api/sync_change.cc',
- 'api/sync_change_processor.h',
+ 'api/sync_change.h',
'api/sync_change_processor.cc',
- 'api/sync_error.h',
+ 'api/sync_change_processor.h',
'api/sync_error.cc',
- 'api/sync_error_factory.h',
+ 'api/sync_error.h',
'api/sync_error_factory.cc',
- 'api/sync_merge_result.h',
+ 'api/sync_error_factory.h',
'api/sync_merge_result.cc',
+ 'api/sync_merge_result.h',
'api/time.h',
],
}
diff --git a/chromium/sync/sync_core.gypi b/chromium/sync/sync_core.gypi
index f4912fe055c..8772d43db5e 100644
--- a/chromium/sync/sync_core.gypi
+++ b/chromium/sync/sync_core.gypi
@@ -34,38 +34,64 @@
'engine/apply_control_data_updates.h',
'engine/backoff_delay_provider.cc',
'engine/backoff_delay_provider.h',
- 'engine/commit_util.cc',
- 'engine/commit_util.h',
'engine/commit.cc',
'engine/commit.h',
- 'engine/sync_directory_update_handler.cc',
- 'engine/sync_directory_update_handler.h',
- 'engine/sync_directory_commit_contribution.cc',
- 'engine/sync_directory_commit_contribution.h',
- 'engine/sync_directory_commit_contributor.cc',
- 'engine/sync_directory_commit_contributor.h',
+ 'engine/commit_contribution.cc',
+ 'engine/commit_contribution.h',
+ 'engine/commit_contributor.cc',
+ 'engine/commit_contributor.h',
+ 'engine/commit_processor.cc',
+ 'engine/commit_processor.h',
+ 'engine/commit_util.cc',
+ 'engine/commit_util.h',
'engine/conflict_resolver.cc',
'engine/conflict_resolver.h',
'engine/conflict_util.cc',
'engine/conflict_util.h',
- 'engine/download.cc',
- 'engine/download.h',
+ 'engine/directory_commit_contribution.cc',
+ 'engine/directory_commit_contribution.h',
+ 'engine/directory_commit_contributor.cc',
+ 'engine/directory_commit_contributor.h',
+ 'engine/directory_update_handler.cc',
+ 'engine/directory_update_handler.h',
'engine/get_commit_ids.cc',
'engine/get_commit_ids.h',
+ 'engine/get_updates_delegate.cc',
+ 'engine/get_updates_delegate.h',
+ 'engine/get_updates_processor.cc',
+ 'engine/get_updates_processor.h',
+ 'engine/model_thread_sync_entity.cc',
+ 'engine/model_thread_sync_entity.h',
'engine/net/server_connection_manager.cc',
'engine/net/server_connection_manager.h',
'engine/net/url_translator.cc',
'engine/net/url_translator.h',
+ 'engine/non_blocking_sync_common.cc',
+ 'engine/non_blocking_sync_common.h',
+ 'engine/non_blocking_type_commit_contribution.cc',
+ 'engine/non_blocking_type_commit_contribution.h',
+ 'engine/non_blocking_type_processor.cc',
+ 'engine/non_blocking_type_processor.h',
+ 'engine/non_blocking_type_processor_core.cc',
+ 'engine/non_blocking_type_processor_core.h',
+ 'engine/non_blocking_type_processor_core_interface.cc',
+ 'engine/non_blocking_type_processor_core_interface.h',
+ 'engine/non_blocking_type_processor_interface.cc',
+ 'engine/non_blocking_type_processor_interface.h',
'engine/nudge_source.cc',
'engine/nudge_source.h',
'engine/process_updates_util.cc',
'engine/process_updates_util.h',
- 'engine/sync_engine_event.cc',
- 'engine/sync_engine_event.h',
+ 'engine/sync_cycle_event.cc',
+ 'engine/sync_cycle_event.h',
+ 'engine/sync_engine_event_listener.cc',
+ 'engine/sync_engine_event_listener.h',
'engine/sync_scheduler.cc',
'engine/sync_scheduler.h',
'engine/sync_scheduler_impl.cc',
'engine/sync_scheduler_impl.h',
+ 'engine/sync_thread_sync_entity.cc',
+ 'engine/sync_thread_sync_entity.h',
'engine/syncer.cc',
'engine/syncer.h',
'engine/syncer_proto_util.cc',
@@ -75,18 +101,15 @@
'engine/syncer_util.h',
'engine/traffic_logger.cc',
'engine/traffic_logger.h',
- 'engine/traffic_recorder.cc',
- 'engine/traffic_recorder.h',
'engine/update_applicator.cc',
'engine/update_applicator.h',
- 'js/js_arg_list.cc',
- 'js/js_arg_list.h',
+ 'engine/update_handler.cc',
+ 'engine/update_handler.h',
'js/js_backend.h',
'js/js_controller.h',
'js/js_event_details.cc',
'js/js_event_details.h',
'js/js_event_handler.h',
- 'js/js_reply_handler.h',
'js/sync_js_controller.cc',
'js/sync_js_controller.h',
'protocol/proto_enum_conversions.cc',
@@ -98,6 +121,10 @@
'sessions/data_type_tracker.cc',
'sessions/data_type_tracker.h',
'sessions/debug_info_getter.h',
+ 'sessions/directory_type_debug_info_emitter.cc',
+ 'sessions/directory_type_debug_info_emitter.h',
+ 'sessions/model_type_registry.cc',
+ 'sessions/model_type_registry.h',
'sessions/nudge_tracker.cc',
'sessions/nudge_tracker.h',
'sessions/status_controller.cc',
@@ -173,12 +200,12 @@
'util/extensions_activity.h',
'util/get_session_name.cc',
'util/get_session_name.h',
- 'util/get_session_name_ios.mm',
'util/get_session_name_ios.h',
+ 'util/get_session_name_ios.mm',
'util/get_session_name_linux.cc',
'util/get_session_name_linux.h',
- 'util/get_session_name_mac.mm',
'util/get_session_name_mac.h',
+ 'util/get_session_name_mac.mm',
'util/get_session_name_win.cc',
'util/get_session_name_win.h',
'util/logging.cc',
diff --git a/chromium/sync/sync_internal_api.gypi b/chromium/sync/sync_internal_api.gypi
index abcfa4f1b58..f0ae375a917 100644
--- a/chromium/sync/sync_internal_api.gypi
+++ b/chromium/sync/sync_internal_api.gypi
@@ -16,14 +16,26 @@
'../url/url.gyp:url_lib',
],
'sources': [
+ 'internal_api/attachments/attachment_downloader_impl.cc',
+ 'internal_api/attachments/attachment_uploader_impl.cc',
+ 'internal_api/attachments/fake_attachment_downloader.cc',
+ 'internal_api/attachments/fake_attachment_store.cc',
+ 'internal_api/attachments/fake_attachment_uploader.cc',
'internal_api/base_node.cc',
'internal_api/base_transaction.cc',
'internal_api/change_record.cc',
'internal_api/change_reorder_buffer.cc',
'internal_api/change_reorder_buffer.h',
- 'internal_api/delete_journal.cc',
'internal_api/debug_info_event_listener.cc',
'internal_api/debug_info_event_listener.h',
+ 'internal_api/delete_journal.cc',
+ 'internal_api/events/commit_request_event.cc',
+ 'internal_api/events/commit_response_event.cc',
+ 'internal_api/events/configure_get_updates_request_event.cc',
+ 'internal_api/events/get_updates_response_event.cc',
+ 'internal_api/events/normal_get_updates_request_event.cc',
+ 'internal_api/events/poll_get_updates_request_event.cc',
+ 'internal_api/events/protocol_event.cc',
'internal_api/http_bridge.cc',
'internal_api/http_bridge_network_resources.cc',
'internal_api/internal_components_factory_impl.cc',
@@ -33,16 +45,27 @@
'internal_api/js_sync_encryption_handler_observer.h',
'internal_api/js_sync_manager_observer.cc',
'internal_api/js_sync_manager_observer.h',
- 'internal_api/public/base/enum_set.h',
+ 'internal_api/protocol_event_buffer.cc',
+ 'internal_api/protocol_event_buffer.h',
+ 'internal_api/public/attachments/attachment_downloader_impl.h',
+ 'internal_api/public/attachments/attachment_uploader_impl.h',
+ 'internal_api/public/attachments/fake_attachment_downloader.h',
+ 'internal_api/public/attachments/fake_attachment_store.h',
+ 'internal_api/public/attachments/fake_attachment_uploader.h',
'internal_api/public/base/ack_handle.cc',
'internal_api/public/base/ack_handle.h',
+ 'internal_api/public/base/attachment_id_proto.cc',
+ 'internal_api/public/base/attachment_id_proto.h',
'internal_api/public/base/cancelation_observer.cc',
'internal_api/public/base/cancelation_observer.h',
'internal_api/public/base/cancelation_signal.cc',
'internal_api/public/base/cancelation_signal.h',
'internal_api/public/base/enum_set.h',
+ 'internal_api/public/base/enum_set.h',
'internal_api/public/base/invalidation.cc',
'internal_api/public/base/invalidation.h',
+ 'internal_api/public/base/invalidator_state.cc',
+ 'internal_api/public/base/invalidator_state.h',
'internal_api/public/base/model_type.h',
'internal_api/public/base/node_ordinal.cc',
'internal_api/public/base/node_ordinal.h',
@@ -68,6 +91,13 @@
'internal_api/public/engine/polling_constants.h',
'internal_api/public/engine/sync_status.cc',
'internal_api/public/engine/sync_status.h',
+ 'internal_api/public/events/commit_request_event.h',
+ 'internal_api/public/events/commit_response_event.h',
+ 'internal_api/public/events/configure_get_updates_request_event.h',
+ 'internal_api/public/events/get_updates_response_event.h',
+ 'internal_api/public/events/normal_get_updates_request_event.h',
+ 'internal_api/public/events/poll_get_updates_request_event.h',
+ 'internal_api/public/events/protocol_event.h',
'internal_api/public/http_bridge.h',
'internal_api/public/http_bridge_network_resources.h',
'internal_api/public/http_post_provider_factory.h',
@@ -77,10 +107,20 @@
'internal_api/public/network_resources.h',
'internal_api/public/read_node.h',
'internal_api/public/read_transaction.h',
+ 'internal_api/public/sessions/commit_counters.cc',
+ 'internal_api/public/sessions/commit_counters.h',
'internal_api/public/sessions/model_neutral_state.cc',
'internal_api/public/sessions/model_neutral_state.h',
+ 'internal_api/public/sessions/status_counters.cc',
+ 'internal_api/public/sessions/status_counters.h',
'internal_api/public/sessions/sync_session_snapshot.cc',
'internal_api/public/sessions/sync_session_snapshot.h',
+ 'internal_api/public/sessions/type_debug_info_observer.cc',
+ 'internal_api/public/sessions/type_debug_info_observer.h',
+ 'internal_api/public/sessions/update_counters.cc',
+ 'internal_api/public/sessions/update_counters.h',
+ 'internal_api/public/sync_auth_provider.h',
+ 'internal_api/public/sync_core_proxy.h',
'internal_api/public/sync_encryption_handler.cc',
'internal_api/public/sync_encryption_handler.h',
'internal_api/public/sync_manager.cc',
@@ -103,11 +143,22 @@
'internal_api/public/write_transaction.h',
'internal_api/read_node.cc',
'internal_api/read_transaction.cc',
+ 'internal_api/sync_backup_manager.cc',
+ 'internal_api/sync_backup_manager.h',
+ 'internal_api/sync_core.cc',
+ 'internal_api/sync_core.h',
+ 'internal_api/sync_core_proxy.cc',
+ 'internal_api/sync_core_proxy_impl.cc',
+ 'internal_api/sync_core_proxy_impl.h',
'internal_api/sync_encryption_handler_impl.cc',
'internal_api/sync_encryption_handler_impl.h',
'internal_api/sync_manager_factory.cc',
'internal_api/sync_manager_impl.cc',
'internal_api/sync_manager_impl.h',
+ 'internal_api/sync_rollback_manager.cc',
+ 'internal_api/sync_rollback_manager.h',
+ 'internal_api/sync_rollback_manager_base.cc',
+ 'internal_api/sync_rollback_manager_base.h',
'internal_api/syncapi_internal.cc',
'internal_api/syncapi_internal.h',
'internal_api/syncapi_server_connection_manager.cc',
diff --git a/chromium/sync/sync_notifier.gypi b/chromium/sync/sync_notifier.gypi
index 7986bd70562..9c6dc38d974 100644
--- a/chromium/sync/sync_notifier.gypi
+++ b/chromium/sync/sync_notifier.gypi
@@ -28,16 +28,13 @@
'notifier/dropped_invalidation_tracker.cc',
'notifier/dropped_invalidation_tracker.h',
'notifier/invalidation_handler.h',
+ 'notifier/invalidation_state_tracker.cc',
'notifier/invalidation_state_tracker.h',
'notifier/invalidation_util.cc',
'notifier/invalidation_util.h',
'notifier/unacked_invalidation_set.cc',
'notifier/unacked_invalidation_set.h',
'notifier/invalidator.h',
- 'notifier/invalidator_registrar.cc',
- 'notifier/invalidator_registrar.h',
- 'notifier/invalidator_state.cc',
- 'notifier/invalidator_state.h',
'notifier/mock_ack_handler.cc',
'notifier/mock_ack_handler.h',
'notifier/object_id_invalidation_map.cc',
@@ -48,21 +45,8 @@
'conditions': [
['OS != "android"', {
'sources': [
- 'notifier/invalidation_notifier.cc',
- 'notifier/invalidation_notifier.h',
- 'notifier/non_blocking_invalidator.cc',
- 'notifier/non_blocking_invalidator.h',
- 'notifier/p2p_invalidator.cc',
- 'notifier/p2p_invalidator.h',
- 'notifier/push_client_channel.cc',
- 'notifier/push_client_channel.h',
'notifier/registration_manager.cc',
'notifier/registration_manager.h',
- 'notifier/state_writer.h',
- 'notifier/sync_invalidation_listener.cc',
- 'notifier/sync_invalidation_listener.h',
- 'notifier/sync_system_resources.cc',
- 'notifier/sync_system_resources.h',
],
}],
],
diff --git a/chromium/sync/sync_proto.gypi b/chromium/sync/sync_proto.gypi
index 968ee9dd486..fd002bf3a94 100644
--- a/chromium/sync/sync_proto.gypi
+++ b/chromium/sync/sync_proto.gypi
@@ -10,11 +10,14 @@
'SYNC_IMPLEMENTATION',
],
'sources': [
+ # NOTE: If you add a file to this list, also add it to
+ # sync/protocol/BUILD.gn
'protocol/app_notification_specifics.proto',
'protocol/app_setting_specifics.proto',
'protocol/app_specifics.proto',
'protocol/app_list_specifics.proto',
'protocol/article_specifics.proto',
+ 'protocol/attachments.proto',
'protocol/autofill_specifics.proto',
'protocol/bookmark_specifics.proto',
'protocol/client_commands.proto',
@@ -31,6 +34,7 @@
'protocol/history_delete_directive_specifics.proto',
'protocol/nigori_specifics.proto',
'protocol/managed_user_setting_specifics.proto',
+ 'protocol/managed_user_shared_setting_specifics.proto',
'protocol/managed_user_specifics.proto',
'protocol/password_specifics.proto',
'protocol/preference_specifics.proto',
@@ -39,6 +43,7 @@
'protocol/session_specifics.proto',
'protocol/sync.proto',
'protocol/sync_enums.proto',
+ 'protocol/synced_notification_app_info_specifics.proto',
'protocol/synced_notification_data.proto',
'protocol/synced_notification_render.proto',
'protocol/synced_notification_specifics.proto',
diff --git a/chromium/sync/sync_tests.gypi b/chromium/sync/sync_tests.gypi
index 683ca52204b..6212dd4707e 100644
--- a/chromium/sync/sync_tests.gypi
+++ b/chromium/sync/sync_tests.gypi
@@ -40,11 +40,21 @@
'test/engine/fake_sync_scheduler.h',
'test/engine/mock_connection_manager.cc',
'test/engine/mock_connection_manager.h',
+ 'test/engine/mock_non_blocking_type_processor_core.cc',
+ 'test/engine/mock_non_blocking_type_processor_core.h',
+ 'test/engine/mock_non_blocking_type_processor.cc',
+ 'test/engine/mock_non_blocking_type_processor.h',
+ 'test/engine/mock_update_handler.cc',
+ 'test/engine/mock_update_handler.h',
+ 'test/engine/single_type_mock_server.cc',
+ 'test/engine/single_type_mock_server.h',
'test/engine/test_directory_setter_upper.cc',
'test/engine/test_directory_setter_upper.h',
'test/engine/test_id_factory.h',
'test/engine/test_syncable_utils.cc',
'test/engine/test_syncable_utils.h',
+ 'test/engine/injectable_sync_core_proxy.cc',
+ 'test/engine/injectable_sync_core_proxy.h',
'test/fake_encryptor.cc',
'test/fake_encryptor.h',
'test/fake_sync_encryption_handler.cc',
@@ -90,6 +100,52 @@
],
},
+ # Test support files for the fake sync server.
+ {
+ 'target_name': 'test_support_sync_fake_server',
+ 'type': 'static_library',
+ 'variables': { 'enable_wexit_time_destructors': 1, },
+ 'include_dirs': [
+ '..',
+ ],
+ 'dependencies': [
+ '../base/base.gyp:base',
+ '../net/net.gyp:net',
+ '../testing/gtest.gyp:gtest',
+ '../third_party/protobuf/protobuf.gyp:protobuf_lite',
+ 'sync',
+ ],
+ 'export_dependent_settings': [
+ 'sync',
+ ],
+ 'sources': [
+ 'test/fake_server/bookmark_entity.cc',
+ 'test/fake_server/bookmark_entity.h',
+ 'test/fake_server/bookmark_entity_builder.cc',
+ 'test/fake_server/bookmark_entity_builder.h',
+ 'test/fake_server/entity_builder.cc',
+ 'test/fake_server/entity_builder.h',
+ 'test/fake_server/entity_builder_factory.cc',
+ 'test/fake_server/entity_builder_factory.h',
+ 'test/fake_server/fake_server.cc',
+ 'test/fake_server/fake_server.h',
+ 'test/fake_server/fake_server_entity.cc',
+ 'test/fake_server/fake_server_entity.h',
+ 'test/fake_server/fake_server_http_post_provider.cc',
+ 'test/fake_server/fake_server_http_post_provider.h',
+ 'test/fake_server/fake_server_network_resources.cc',
+ 'test/fake_server/fake_server_network_resources.h',
+ 'test/fake_server/fake_server_verifier.cc',
+ 'test/fake_server/fake_server_verifier.h',
+ 'test/fake_server/permanent_entity.cc',
+ 'test/fake_server/permanent_entity.h',
+ 'test/fake_server/tombstone_entity.cc',
+ 'test/fake_server/tombstone_entity.h',
+ 'test/fake_server/unique_client_entity.cc',
+ 'test/fake_server/unique_client_entity.h',
+ ],
+ },
+
# Test support files for the 'sync_notifier' target.
{
'target_name': 'test_support_sync_notifier',
@@ -111,14 +167,6 @@
'sync',
],
'sources': [
- 'notifier/fake_invalidation_handler.cc',
- 'notifier/fake_invalidation_handler.h',
- 'notifier/fake_invalidation_state_tracker.cc',
- 'notifier/fake_invalidation_state_tracker.h',
- 'notifier/fake_invalidator.cc',
- 'notifier/fake_invalidator.h',
- 'notifier/invalidator_test_template.cc',
- 'notifier/invalidator_test_template.h',
'notifier/unacked_invalidation_set_test_util.cc',
'notifier/unacked_invalidation_set_test_util.h',
'internal_api/public/base/object_id_invalidation_map_test_util.h',
@@ -152,11 +200,13 @@
'internal_api/public/base/invalidation_test_util.cc',
'internal_api/public/base/invalidation_test_util.h',
'internal_api/public/test/fake_sync_manager.h',
+ 'internal_api/public/test/null_sync_core_proxy.h',
'internal_api/public/test/sync_manager_factory_for_profile_sync_test.h',
'internal_api/public/test/test_entry_factory.h',
'internal_api/public/test/test_internal_components_factory.h',
'internal_api/public/test/test_user_share.h',
'internal_api/test/fake_sync_manager.cc',
+ 'internal_api/test/null_sync_core_proxy.cc',
'internal_api/test/sync_manager_factory_for_profile_sync_test.cc',
'internal_api/test/sync_manager_for_profile_sync_test.cc',
'internal_api/test/sync_manager_for_profile_sync_test.h',
@@ -187,6 +237,10 @@
'sources': [
'api/fake_syncable_service.cc',
'api/fake_syncable_service.h',
+ 'api/fake_sync_change_processor.cc',
+ 'api/fake_sync_change_processor.h',
+ 'api/sync_change_processor_wrapper_for_test.cc',
+ 'api/sync_change_processor_wrapper_for_test.h',
'api/sync_error_factory_mock.cc',
'api/sync_error_factory_mock.h',
],
@@ -242,22 +296,28 @@
'internal_api/public/util/weak_handle_unittest.cc',
'engine/apply_control_data_updates_unittest.cc',
'engine/backoff_delay_provider_unittest.cc',
- 'engine/download_unittest.cc',
+ 'engine/directory_commit_contribution_unittest.cc',
+ 'engine/directory_update_handler_unittest.cc',
+ 'engine/get_updates_processor_unittest.cc',
+ 'engine/model_thread_sync_entity_unittest.cc',
+ 'engine/non_blocking_type_processor_core_unittest.cc',
+ 'engine/non_blocking_type_processor_unittest.cc',
'engine/sync_scheduler_unittest.cc',
+ 'engine/sync_thread_sync_entity_unittest.cc',
'engine/syncer_proto_util_unittest.cc',
'engine/syncer_unittest.cc',
- 'engine/sync_directory_commit_contribution_unittest.cc',
- 'engine/sync_directory_update_handler_unittest.cc',
- 'engine/traffic_recorder_unittest.cc',
- 'js/js_arg_list_unittest.cc',
+ 'engine/syncer_util_unittest.cc',
'js/js_event_details_unittest.cc',
'js/sync_js_controller_unittest.cc',
'protocol/proto_enum_conversions_unittest.cc',
'protocol/proto_value_conversions_unittest.cc',
+ 'sessions/model_type_registry_unittest.cc',
'sessions/nudge_tracker_unittest.cc',
'sessions/status_controller_unittest.cc',
- 'sessions/sync_session_unittest.cc',
+ 'syncable/directory_unittest.cc',
+ 'syncable/directory_unittest.h',
'syncable/directory_backing_store_unittest.cc',
+ 'syncable/entry_kernel_unittest.cc',
'syncable/model_type_unittest.cc',
'syncable/nigori_util_unittest.cc',
'syncable/parent_child_index_unittest.cc',
@@ -285,6 +345,7 @@
'suppress_wildcard': 1,
'dependencies': [
'../base/base.gyp:base',
+ '../google_apis/google_apis.gyp:google_apis',
'../jingle/jingle.gyp:notifier_test_util',
'../net/net.gyp:net_test_support',
'../testing/gmock.gyp:gmock',
@@ -298,6 +359,7 @@
# happens in the dependents.
'export_dependent_settings': [
'../base/base.gyp:base',
+ '../google_apis/google_apis.gyp:google_apis',
'../jingle/jingle.gyp:notifier_test_util',
'../net/net.gyp:net_test_support',
'../testing/gmock.gyp:gmock',
@@ -314,17 +376,9 @@
'conditions': [
['OS != "android"', {
'sources': [
- 'notifier/fake_invalidator_unittest.cc',
- 'notifier/invalidation_notifier_unittest.cc',
- 'notifier/invalidator_registrar_unittest.cc',
- 'notifier/non_blocking_invalidator_unittest.cc',
'notifier/object_id_invalidation_map_unittest.cc',
- 'notifier/p2p_invalidator_unittest.cc',
- 'notifier/push_client_channel_unittest.cc',
'notifier/registration_manager_unittest.cc',
'notifier/single_object_invalidation_set_unittest.cc',
- 'notifier/sync_invalidation_listener_unittest.cc',
- 'notifier/sync_system_resources_unittest.cc',
'notifier/unacked_invalidation_set_unittest.cc',
],
}],
@@ -343,6 +397,7 @@
'suppress_wildcard': 1,
'dependencies': [
'../base/base.gyp:base',
+ '../google_apis/google_apis.gyp:google_apis_test_support',
'../net/net.gyp:net',
'../net/net.gyp:net_test_support',
'../testing/gmock.gyp:gmock',
@@ -366,16 +421,26 @@
'..',
],
'sources': [
+ 'internal_api/attachments/attachment_downloader_impl_unittest.cc',
+ 'internal_api/attachments/attachment_uploader_impl_unittest.cc',
+ 'internal_api/attachments/fake_attachment_downloader_unittest.cc',
+ 'internal_api/attachments/fake_attachment_store_unittest.cc',
+ 'internal_api/attachments/fake_attachment_uploader_unittest.cc',
'internal_api/debug_info_event_listener_unittest.cc',
'internal_api/http_bridge_unittest.cc',
'internal_api/js_mutation_event_observer_unittest.cc',
'internal_api/js_sync_encryption_handler_observer_unittest.cc',
'internal_api/js_sync_manager_observer_unittest.cc',
+ 'internal_api/protocol_event_buffer_unittest.cc',
'internal_api/public/change_record_unittest.cc',
'internal_api/public/sessions/sync_session_snapshot_unittest.cc',
- 'internal_api/syncapi_server_connection_manager_unittest.cc',
+ 'internal_api/sync_backup_manager_unittest.cc',
+ 'internal_api/sync_core_proxy_impl_unittest.cc',
'internal_api/sync_encryption_handler_impl_unittest.cc',
'internal_api/sync_manager_impl_unittest.cc',
+ 'internal_api/sync_rollback_manager_base_unittest.cc',
+ 'internal_api/sync_rollback_manager_unittest.cc',
+ 'internal_api/syncapi_server_connection_manager_unittest.cc',
],
'conditions': [
['OS == "ios"', {
@@ -415,7 +480,12 @@
'..',
],
'sources': [
+ 'api/attachments/attachment_unittest.cc',
+ 'api/attachments/attachment_id_unittest.cc',
+ 'api/attachments/attachment_service_impl_unittest.cc',
+ 'api/attachments/attachment_service_proxy_unittest.cc',
'api/sync_change_unittest.cc',
+ 'api/sync_data_unittest.cc',
'api/sync_error_unittest.cc',
'api/sync_merge_result_unittest.cc',
],
@@ -442,12 +512,12 @@
# TODO(akalin): This is needed because histogram.cc uses
# leak_annotations.h, which pulls this in. Make 'base'
# propagate this dependency.
- ['OS=="linux" and linux_use_tcmalloc==1', {
+ ['OS=="linux" and use_allocator!="none"', {
'dependencies': [
'../base/allocator/allocator.gyp:allocator',
],
}],
- ['OS == "android" and gtest_target_type == "shared_library"', {
+ ['OS == "android"', {
'dependencies': [
'../testing/android/native_test.gyp:native_test_native_code',
],
@@ -496,29 +566,6 @@
'conditions': [
['OS != "ios"', {
'targets': [
- {
- 'target_name': 'sync_tools_helper',
- 'type': 'static_library',
- 'defines': [
- 'SYNC_IMPLEMENTATION',
- ],
- 'include_dirs': [
- '..',
- ],
- 'dependencies': [
- '../base/base.gyp:base',
- 'sync',
- ],
- 'export_dependent_settings': [
- '../base/base.gyp:base',
- 'sync',
- ],
- 'sources': [
- 'tools/null_invalidation_state_tracker.cc',
- 'tools/null_invalidation_state_tracker.h',
- ],
- },
-
# A tool that can be used to launch a python sync server instance.
{
'target_name': 'run_sync_testserver',
@@ -534,47 +581,6 @@
'tools/testserver/run_sync_testserver.cc',
],
},
-
- # A tool to listen to sync notifications and print them out.
- {
- 'target_name': 'sync_listen_notifications',
- 'type': 'executable',
- 'defines': [
- 'SYNC_TEST',
- ],
- 'dependencies': [
- '../base/base.gyp:base',
- '../jingle/jingle.gyp:notifier',
- '../net/net.gyp:net',
- '../net/net.gyp:net_test_support',
- 'sync',
- 'sync_tools_helper',
- ],
- 'sources': [
- 'tools/sync_listen_notifications.cc',
- ],
- },
-
- # A standalone command-line sync client.
- {
- 'target_name': 'sync_client',
- 'type': 'executable',
- 'defines': [
- 'SYNC_TEST',
- ],
- 'dependencies': [
- '../base/base.gyp:base',
- '../jingle/jingle.gyp:notifier',
- '../net/net.gyp:net',
- '../net/net.gyp:net_test_support',
- 'sync',
- 'sync_tools_helper',
- 'test_support_sync_core'
- ],
- 'sources': [
- 'tools/sync_client.cc',
- ],
- },
],
}],
['OS == "android"', {
@@ -606,9 +612,7 @@
},
],
}],
- # Special target to wrap a gtest_target_type==shared_library
- # sync_unit_tests into an android apk for execution.
- ['OS == "android" and gtest_target_type == "shared_library"', {
+ ['OS == "android"', {
'targets': [
{
'target_name': 'sync_unit_tests_apk',
@@ -618,7 +622,6 @@
],
'variables': {
'test_suite_name': 'sync_unit_tests',
- 'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)sync_unit_tests<(SHARED_LIB_SUFFIX)',
},
'includes': [ '../build/apk_test.gypi' ],
},
diff --git a/chromium/sync/syncable/DEPS b/chromium/sync/syncable/DEPS
deleted file mode 100644
index 0ea66104d92..00000000000
--- a/chromium/sync/syncable/DEPS
+++ /dev/null
@@ -1,11 +0,0 @@
-include_rules = [
- "+net/base/escape.h",
- "+sql",
- "+sync/base",
- "+sync/internal_api/public/base",
- "+sync/internal_api/public/engine",
- "+sync/internal_api/public/util",
- "+sync/protocol",
- "+sync/test",
- "+sync/util",
-]
diff --git a/chromium/sync/syncable/blob.h b/chromium/sync/syncable/blob.h
deleted file mode 100644
index 0f68a2418b5..00000000000
--- a/chromium/sync/syncable/blob.h
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_BLOB_H_
-#define SYNC_SYNCABLE_BLOB_H_
-
-#include <vector>
-
-#include "base/basictypes.h" // For uint8.
-
-namespace syncer {
-namespace syncable {
-
-typedef std::vector<uint8> Blob;
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_BLOB_H_
diff --git a/chromium/sync/syncable/dir_open_result.h b/chromium/sync/syncable/dir_open_result.h
deleted file mode 100644
index 0df3c6ca119..00000000000
--- a/chromium/sync/syncable/dir_open_result.h
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_DIR_OPEN_RESULT_H_
-#define SYNC_SYNCABLE_DIR_OPEN_RESULT_H_
-
-namespace syncer {
-namespace syncable {
-enum DirOpenResult { NOT_INITIALIZED,
- OPENED, // success.
- FAILED_NEWER_VERSION, // DB version is too new.
- FAILED_MAKE_REPOSITORY, // Couldn't create subdir.
- FAILED_OPEN_DATABASE, // sqlite_open() failed.
- FAILED_DISK_FULL, // The disk is full.
- FAILED_DATABASE_CORRUPT, // Something is wrong with the DB
- FAILED_LOGICAL_CORRUPTION, // Invalid database contents
- FAILED_IN_UNITTEST, // For tests.
- FAILED_INITIAL_WRITE, // Early write to DB failed.
-};
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_DIR_OPEN_RESULT_H_
diff --git a/chromium/sync/syncable/directory.cc b/chromium/sync/syncable/directory.cc
deleted file mode 100644
index 33b7e15be6c..00000000000
--- a/chromium/sync/syncable/directory.cc
+++ /dev/null
@@ -1,1296 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/directory.h"
-
-#include <iterator>
-
-#include "base/base64.h"
-#include "base/debug/trace_event.h"
-#include "base/stl_util.h"
-#include "base/strings/string_number_conversions.h"
-#include "sync/internal_api/public/base/unique_position.h"
-#include "sync/internal_api/public/util/unrecoverable_error_handler.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/entry_kernel.h"
-#include "sync/syncable/in_memory_directory_backing_store.h"
-#include "sync/syncable/on_disk_directory_backing_store.h"
-#include "sync/syncable/scoped_kernel_lock.h"
-#include "sync/syncable/scoped_parent_child_index_updater.h"
-#include "sync/syncable/syncable-inl.h"
-#include "sync/syncable/syncable_base_transaction.h"
-#include "sync/syncable/syncable_changes_version.h"
-#include "sync/syncable/syncable_read_transaction.h"
-#include "sync/syncable/syncable_util.h"
-#include "sync/syncable/syncable_write_transaction.h"
-
-using std::string;
-
-namespace syncer {
-namespace syncable {
-
-// static
-const base::FilePath::CharType Directory::kSyncDatabaseFilename[] =
- FILE_PATH_LITERAL("SyncData.sqlite3");
-
-Directory::PersistedKernelInfo::PersistedKernelInfo()
- : next_id(0) {
- ModelTypeSet protocol_types = ProtocolTypes();
- for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
- iter.Inc()) {
- reset_download_progress(iter.Get());
- transaction_version[iter.Get()] = 0;
- }
-}
-
-Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
-
-void Directory::PersistedKernelInfo::reset_download_progress(
- ModelType model_type) {
- download_progress[model_type].set_data_type_id(
- GetSpecificsFieldNumberFromModelType(model_type));
- // An empty-string token indicates no prior knowledge.
- download_progress[model_type].set_token(std::string());
-}
-
-Directory::SaveChangesSnapshot::SaveChangesSnapshot()
- : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
-}
-
-Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
- STLDeleteElements(&dirty_metas);
- STLDeleteElements(&delete_journals);
-}
-
-Directory::Kernel::Kernel(
- const std::string& name,
- const KernelLoadInfo& info, DirectoryChangeDelegate* delegate,
- const WeakHandle<TransactionObserver>& transaction_observer)
- : next_write_transaction_id(0),
- name(name),
- info_status(Directory::KERNEL_SHARE_INFO_VALID),
- persisted_info(info.kernel_info),
- cache_guid(info.cache_guid),
- next_metahandle(info.max_metahandle + 1),
- delegate(delegate),
- transaction_observer(transaction_observer) {
- DCHECK(delegate);
- DCHECK(transaction_observer.IsInitialized());
-}
-
-Directory::Kernel::~Kernel() {
- STLDeleteContainerPairSecondPointers(metahandles_map.begin(),
- metahandles_map.end());
-}
-
-Directory::Directory(
- DirectoryBackingStore* store,
- UnrecoverableErrorHandler* unrecoverable_error_handler,
- ReportUnrecoverableErrorFunction report_unrecoverable_error_function,
- NigoriHandler* nigori_handler,
- Cryptographer* cryptographer)
- : kernel_(NULL),
- store_(store),
- unrecoverable_error_handler_(unrecoverable_error_handler),
- report_unrecoverable_error_function_(
- report_unrecoverable_error_function),
- unrecoverable_error_set_(false),
- nigori_handler_(nigori_handler),
- cryptographer_(cryptographer),
- invariant_check_level_(VERIFY_CHANGES) {
-}
-
-Directory::~Directory() {
- Close();
-}
-
-DirOpenResult Directory::Open(
- const string& name,
- DirectoryChangeDelegate* delegate,
- const WeakHandle<TransactionObserver>& transaction_observer) {
- TRACE_EVENT0("sync", "SyncDatabaseOpen");
-
- const DirOpenResult result =
- OpenImpl(name, delegate, transaction_observer);
-
- if (OPENED != result)
- Close();
- return result;
-}
-
-void Directory::InitializeIndices(MetahandlesMap* handles_map) {
- kernel_->metahandles_map.swap(*handles_map);
- for (MetahandlesMap::const_iterator it = kernel_->metahandles_map.begin();
- it != kernel_->metahandles_map.end(); ++it) {
- EntryKernel* entry = it->second;
- if (ParentChildIndex::ShouldInclude(entry))
- kernel_->parent_child_index.Insert(entry);
- const int64 metahandle = entry->ref(META_HANDLE);
- if (entry->ref(IS_UNSYNCED))
- kernel_->unsynced_metahandles.insert(metahandle);
- if (entry->ref(IS_UNAPPLIED_UPDATE)) {
- const ModelType type = entry->GetServerModelType();
- kernel_->unapplied_update_metahandles[type].insert(metahandle);
- }
- if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
- DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
- kernel_->server_tags_map.end())
- << "Unexpected duplicate use of client tag";
- kernel_->server_tags_map[entry->ref(UNIQUE_SERVER_TAG)] = entry;
- }
- if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
- DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
- kernel_->server_tags_map.end())
- << "Unexpected duplicate use of server tag";
- kernel_->client_tags_map[entry->ref(UNIQUE_CLIENT_TAG)] = entry;
- }
- DCHECK(kernel_->ids_map.find(entry->ref(ID).value()) ==
- kernel_->ids_map.end()) << "Unexpected duplicate use of ID";
- kernel_->ids_map[entry->ref(ID).value()] = entry;
- DCHECK(!entry->is_dirty());
- }
-}
-
-DirOpenResult Directory::OpenImpl(
- const string& name,
- DirectoryChangeDelegate* delegate,
- const WeakHandle<TransactionObserver>&
- transaction_observer) {
- KernelLoadInfo info;
- // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
- // swap these later.
- Directory::MetahandlesMap tmp_handles_map;
- JournalIndex delete_journals;
-
- DirOpenResult result =
- store_->Load(&tmp_handles_map, &delete_journals, &info);
- if (OPENED != result)
- return result;
-
- kernel_ = new Kernel(name, info, delegate, transaction_observer);
- delete_journal_.reset(new DeleteJournal(&delete_journals));
- InitializeIndices(&tmp_handles_map);
-
- // Write back the share info to reserve some space in 'next_id'. This will
- // prevent local ID reuse in the case of an early crash. See the comments in
- // TakeSnapshotForSaveChanges() or crbug.com/142987 for more information.
- kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
- if (!SaveChanges())
- return FAILED_INITIAL_WRITE;
-
- return OPENED;
-}
-
-DeleteJournal* Directory::delete_journal() {
- DCHECK(delete_journal_.get());
- return delete_journal_.get();
-}
-
-void Directory::Close() {
- store_.reset();
- if (kernel_) {
- delete kernel_;
- kernel_ = NULL;
- }
-}
-
-void Directory::OnUnrecoverableError(const BaseTransaction* trans,
- const tracked_objects::Location& location,
- const std::string & message) {
- DCHECK(trans != NULL);
- unrecoverable_error_set_ = true;
- unrecoverable_error_handler_->OnUnrecoverableError(location,
- message);
-}
-
-EntryKernel* Directory::GetEntryById(const Id& id) {
- ScopedKernelLock lock(this);
- return GetEntryById(id, &lock);
-}
-
-EntryKernel* Directory::GetEntryById(const Id& id,
- ScopedKernelLock* const lock) {
- DCHECK(kernel_);
- // Find it in the in memory ID index.
- IdsMap::iterator id_found = kernel_->ids_map.find(id.value());
- if (id_found != kernel_->ids_map.end()) {
- return id_found->second;
- }
- return NULL;
-}
-
-EntryKernel* Directory::GetEntryByClientTag(const string& tag) {
- ScopedKernelLock lock(this);
- DCHECK(kernel_);
-
- TagsMap::iterator it = kernel_->client_tags_map.find(tag);
- if (it != kernel_->client_tags_map.end()) {
- return it->second;
- }
- return NULL;
-}
-
-EntryKernel* Directory::GetEntryByServerTag(const string& tag) {
- ScopedKernelLock lock(this);
- DCHECK(kernel_);
- TagsMap::iterator it = kernel_->server_tags_map.find(tag);
- if (it != kernel_->server_tags_map.end()) {
- return it->second;
- }
- return NULL;
-}
-
-EntryKernel* Directory::GetEntryByHandle(int64 metahandle) {
- ScopedKernelLock lock(this);
- return GetEntryByHandle(metahandle, &lock);
-}
-
-EntryKernel* Directory::GetEntryByHandle(int64 metahandle,
- ScopedKernelLock* lock) {
- // Look up in memory
- MetahandlesMap::iterator found =
- kernel_->metahandles_map.find(metahandle);
- if (found != kernel_->metahandles_map.end()) {
- // Found it in memory. Easy.
- return found->second;
- }
- return NULL;
-}
-
-bool Directory::GetChildHandlesById(
- BaseTransaction* trans, const Id& parent_id,
- Directory::Metahandles* result) {
- if (!SyncAssert(this == trans->directory(), FROM_HERE,
- "Directories don't match", trans))
- return false;
- result->clear();
-
- ScopedKernelLock lock(this);
- AppendChildHandles(lock, parent_id, result);
- return true;
-}
-
-bool Directory::GetChildHandlesByHandle(
- BaseTransaction* trans, int64 handle,
- Directory::Metahandles* result) {
- if (!SyncAssert(this == trans->directory(), FROM_HERE,
- "Directories don't match", trans))
- return false;
-
- result->clear();
-
- ScopedKernelLock lock(this);
- EntryKernel* kernel = GetEntryByHandle(handle, &lock);
- if (!kernel)
- return true;
-
- AppendChildHandles(lock, kernel->ref(ID), result);
- return true;
-}
-
-int Directory::GetTotalNodeCount(
- BaseTransaction* trans,
- EntryKernel* kernel) const {
- if (!SyncAssert(this == trans->directory(), FROM_HERE,
- "Directories don't match", trans))
- return false;
-
- int count = 1;
- std::deque<const OrderedChildSet*> child_sets;
-
- GetChildSetForKernel(trans, kernel, &child_sets);
- while (!child_sets.empty()) {
- const OrderedChildSet* set = child_sets.front();
- child_sets.pop_front();
- for (OrderedChildSet::const_iterator it = set->begin();
- it != set->end(); ++it) {
- count++;
- GetChildSetForKernel(trans, *it, &child_sets);
- }
- }
-
- return count;
-}
-
-void Directory::GetChildSetForKernel(
- BaseTransaction* trans,
- EntryKernel* kernel,
- std::deque<const OrderedChildSet*>* child_sets) const {
- if (!kernel->ref(IS_DIR))
- return; // Not a directory => no children.
-
- const OrderedChildSet* descendants =
- kernel_->parent_child_index.GetChildren(kernel->ref(ID));
- if (!descendants)
- return; // This directory has no children.
-
- // Add our children to the list of items to be traversed.
- child_sets->push_back(descendants);
-}
-
-int Directory::GetPositionIndex(
- BaseTransaction* trans,
- EntryKernel* kernel) const {
- const OrderedChildSet* siblings =
- kernel_->parent_child_index.GetChildren(kernel->ref(PARENT_ID));
-
- OrderedChildSet::const_iterator it = siblings->find(kernel);
- return std::distance(siblings->begin(), it);
-}
-
-EntryKernel* Directory::GetRootEntry() {
- return GetEntryById(Id());
-}
-
-bool Directory::InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry) {
- ScopedKernelLock lock(this);
- return InsertEntry(trans, entry, &lock);
-}
-
-bool Directory::InsertEntry(BaseWriteTransaction* trans,
- EntryKernel* entry,
- ScopedKernelLock* lock) {
- DCHECK(NULL != lock);
- if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans))
- return false;
-
- static const char error[] = "Entry already in memory index.";
-
- if (!SyncAssert(
- kernel_->metahandles_map.insert(
- std::make_pair(entry->ref(META_HANDLE), entry)).second,
- FROM_HERE,
- error,
- trans)) {
- return false;
- }
- if (!SyncAssert(
- kernel_->ids_map.insert(
- std::make_pair(entry->ref(ID).value(), entry)).second,
- FROM_HERE,
- error,
- trans)) {
- return false;
- }
- if (ParentChildIndex::ShouldInclude(entry)) {
- if (!SyncAssert(kernel_->parent_child_index.Insert(entry),
- FROM_HERE,
- error,
- trans)) {
- return false;
- }
- }
-
- // Should NEVER be created with a client tag or server tag.
- if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE,
- "Server tag should be empty", trans)) {
- return false;
- }
- if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE,
- "Client tag should be empty", trans))
- return false;
-
- return true;
-}
-
-bool Directory::ReindexId(BaseWriteTransaction* trans,
- EntryKernel* const entry,
- const Id& new_id) {
- ScopedKernelLock lock(this);
- if (NULL != GetEntryById(new_id, &lock))
- return false;
-
- {
- // Update the indices that depend on the ID field.
- ScopedParentChildIndexUpdater updater_b(lock, entry,
- &kernel_->parent_child_index);
- size_t num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
- DCHECK_EQ(1U, num_erased);
- entry->put(ID, new_id);
- kernel_->ids_map[entry->ref(ID).value()] = entry;
- }
- return true;
-}
-
-bool Directory::ReindexParentId(BaseWriteTransaction* trans,
- EntryKernel* const entry,
- const Id& new_parent_id) {
- ScopedKernelLock lock(this);
-
- {
- // Update the indices that depend on the PARENT_ID field.
- ScopedParentChildIndexUpdater index_updater(lock, entry,
- &kernel_->parent_child_index);
- entry->put(PARENT_ID, new_parent_id);
- }
- return true;
-}
-
-bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const {
- DCHECK(trans != NULL);
- return unrecoverable_error_set_;
-}
-
-void Directory::ClearDirtyMetahandles() {
- kernel_->transaction_mutex.AssertAcquired();
- kernel_->dirty_metahandles.clear();
-}
-
-bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans,
- const EntryKernel* const entry) const {
- bool safe = entry->ref(IS_DEL) && !entry->is_dirty() &&
- !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) &&
- !entry->ref(IS_UNSYNCED);
-
- if (safe) {
- int64 handle = entry->ref(META_HANDLE);
- const ModelType type = entry->GetServerModelType();
- if (!SyncAssert(kernel_->dirty_metahandles.count(handle) == 0U,
- FROM_HERE,
- "Dirty metahandles should be empty", trans))
- return false;
- // TODO(tim): Bug 49278.
- if (!SyncAssert(!kernel_->unsynced_metahandles.count(handle),
- FROM_HERE,
- "Unsynced handles should be empty",
- trans))
- return false;
- if (!SyncAssert(!kernel_->unapplied_update_metahandles[type].count(handle),
- FROM_HERE,
- "Unapplied metahandles should be empty",
- trans))
- return false;
- }
-
- return safe;
-}
-
-void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
- ReadTransaction trans(FROM_HERE, this);
- ScopedKernelLock lock(this);
-
- // If there is an unrecoverable error then just bail out.
- if (unrecoverable_error_set(&trans))
- return;
-
- // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
- // clear dirty flags.
- for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles.begin();
- i != kernel_->dirty_metahandles.end(); ++i) {
- EntryKernel* entry = GetEntryByHandle(*i, &lock);
- if (!entry)
- continue;
- // Skip over false positives; it happens relatively infrequently.
- if (!entry->is_dirty())
- continue;
- snapshot->dirty_metas.insert(snapshot->dirty_metas.end(),
- new EntryKernel(*entry));
- DCHECK_EQ(1U, kernel_->dirty_metahandles.count(*i));
- // We don't bother removing from the index here as we blow the entire thing
- // in a moment, and it unnecessarily complicates iteration.
- entry->clear_dirty(NULL);
- }
- ClearDirtyMetahandles();
-
- // Set purged handles.
- DCHECK(snapshot->metahandles_to_purge.empty());
- snapshot->metahandles_to_purge.swap(kernel_->metahandles_to_purge);
-
- // Fill kernel_info_status and kernel_info.
- snapshot->kernel_info = kernel_->persisted_info;
- // To avoid duplicates when the process crashes, we record the next_id to be
- // greater magnitude than could possibly be reached before the next save
- // changes. In other words, it's effectively impossible for the user to
- // generate 65536 new bookmarks in 3 seconds.
- snapshot->kernel_info.next_id -= 65536;
- snapshot->kernel_info_status = kernel_->info_status;
- // This one we reset on failure.
- kernel_->info_status = KERNEL_SHARE_INFO_VALID;
-
- delete_journal_->TakeSnapshotAndClear(
- &trans, &snapshot->delete_journals, &snapshot->delete_journals_to_purge);
-}
-
-bool Directory::SaveChanges() {
- bool success = false;
-
- base::AutoLock scoped_lock(kernel_->save_changes_mutex);
-
- // Snapshot and save.
- SaveChangesSnapshot snapshot;
- TakeSnapshotForSaveChanges(&snapshot);
- success = store_->SaveChanges(snapshot);
-
- // Handle success or failure.
- if (success)
- success = VacuumAfterSaveChanges(snapshot);
- else
- HandleSaveChangesFailure(snapshot);
- return success;
-}
-
-bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
- if (snapshot.dirty_metas.empty())
- return true;
-
- // Need a write transaction as we are about to permanently purge entries.
- WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this);
- ScopedKernelLock lock(this);
- // Now drop everything we can out of memory.
- for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
- i != snapshot.dirty_metas.end(); ++i) {
- MetahandlesMap::iterator found =
- kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
- EntryKernel* entry = (found == kernel_->metahandles_map.end() ?
- NULL : found->second);
- if (entry && SafeToPurgeFromMemory(&trans, entry)) {
- // We now drop deleted metahandles that are up to date on both the client
- // and the server.
- size_t num_erased = 0;
- num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
- DCHECK_EQ(1u, num_erased);
- num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
- DCHECK_EQ(1u, num_erased);
- if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
- num_erased =
- kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
- DCHECK_EQ(1u, num_erased);
- }
- if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
- num_erased =
- kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
- DCHECK_EQ(1u, num_erased);
- }
- if (!SyncAssert(!kernel_->parent_child_index.Contains(entry),
- FROM_HERE,
- "Deleted entry still present",
- (&trans)))
- return false;
- delete entry;
- }
- if (trans.unrecoverable_error_set())
- return false;
- }
- return true;
-}
-
-void Directory::UnapplyEntry(EntryKernel* entry) {
- int64 handle = entry->ref(META_HANDLE);
- ModelType server_type = GetModelTypeFromSpecifics(
- entry->ref(SERVER_SPECIFICS));
-
- // Clear enough so that on the next sync cycle all local data will
- // be overwritten.
- // Note: do not modify the root node in order to preserve the
- // initial sync ended bit for this type (else on the next restart
- // this type will be treated as disabled and therefore fully purged).
- if (IsRealDataType(server_type) &&
- ModelTypeToRootTag(server_type) == entry->ref(UNIQUE_SERVER_TAG)) {
- return;
- }
-
- // Set the unapplied bit if this item has server data.
- if (IsRealDataType(server_type) && !entry->ref(IS_UNAPPLIED_UPDATE)) {
- entry->put(IS_UNAPPLIED_UPDATE, true);
- kernel_->unapplied_update_metahandles[server_type].insert(handle);
- entry->mark_dirty(&kernel_->dirty_metahandles);
- }
-
- // Unset the unsynced bit.
- if (entry->ref(IS_UNSYNCED)) {
- kernel_->unsynced_metahandles.erase(handle);
- entry->put(IS_UNSYNCED, false);
- entry->mark_dirty(&kernel_->dirty_metahandles);
- }
-
- // Mark the item as locally deleted. No deleted items are allowed in the
- // parent child index.
- if (!entry->ref(IS_DEL)) {
- kernel_->parent_child_index.Remove(entry);
- entry->put(IS_DEL, true);
- entry->mark_dirty(&kernel_->dirty_metahandles);
- }
-
- // Set the version to the "newly created" version.
- if (entry->ref(BASE_VERSION) != CHANGES_VERSION) {
- entry->put(BASE_VERSION, CHANGES_VERSION);
- entry->mark_dirty(&kernel_->dirty_metahandles);
- }
-
- // At this point locally created items that aren't synced will become locally
- // deleted items, and purged on the next snapshot. All other items will match
- // the state they would have had if they were just created via a server
- // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..).
-}
-
-void Directory::DeleteEntry(bool save_to_journal,
- EntryKernel* entry,
- EntryKernelSet* entries_to_journal) {
- int64 handle = entry->ref(META_HANDLE);
- ModelType server_type = GetModelTypeFromSpecifics(
- entry->ref(SERVER_SPECIFICS));
-
- kernel_->metahandles_to_purge.insert(handle);
-
- size_t num_erased = 0;
- num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
- DCHECK_EQ(1u, num_erased);
- num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
- DCHECK_EQ(1u, num_erased);
- num_erased = kernel_->unsynced_metahandles.erase(handle);
- DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0);
- num_erased =
- kernel_->unapplied_update_metahandles[server_type].erase(handle);
- DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0);
- if (kernel_->parent_child_index.Contains(entry))
- kernel_->parent_child_index.Remove(entry);
-
- if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
- num_erased =
- kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
- DCHECK_EQ(1u, num_erased);
- }
- if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
- num_erased =
- kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
- DCHECK_EQ(1u, num_erased);
- }
-
- if (save_to_journal) {
- entries_to_journal->insert(entry);
- } else {
- delete entry;
- }
-}
-
-bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types,
- ModelTypeSet types_to_journal,
- ModelTypeSet types_to_unapply) {
- disabled_types.RemoveAll(ProxyTypes());
-
- if (disabled_types.Empty())
- return true;
-
- {
- WriteTransaction trans(FROM_HERE, PURGE_ENTRIES, this);
-
- EntryKernelSet entries_to_journal;
- STLElementDeleter<EntryKernelSet> journal_deleter(&entries_to_journal);
-
- {
- ScopedKernelLock lock(this);
-
- // We iterate in two passes to avoid a bug in STLport (which is used in
- // the Android build). There are some versions of that library where a
- // hash_map's iterators can be invalidated when an item is erased from the
- // hash_map.
- // See http://sourceforge.net/p/stlport/bugs/239/.
-
- std::set<EntryKernel*> to_purge;
- for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
- it != kernel_->metahandles_map.end(); ++it) {
- const sync_pb::EntitySpecifics& local_specifics =
- it->second->ref(SPECIFICS);
- const sync_pb::EntitySpecifics& server_specifics =
- it->second->ref(SERVER_SPECIFICS);
- ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
- ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
-
- if ((IsRealDataType(local_type) && disabled_types.Has(local_type)) ||
- (IsRealDataType(server_type) && disabled_types.Has(server_type))) {
- to_purge.insert(it->second);
- }
- }
-
- for (std::set<EntryKernel*>::iterator it = to_purge.begin();
- it != to_purge.end(); ++it) {
- EntryKernel* entry = *it;
-
- const sync_pb::EntitySpecifics& local_specifics =
- (*it)->ref(SPECIFICS);
- const sync_pb::EntitySpecifics& server_specifics =
- (*it)->ref(SERVER_SPECIFICS);
- ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
- ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
-
- if (types_to_unapply.Has(local_type) ||
- types_to_unapply.Has(server_type)) {
- UnapplyEntry(entry);
- } else {
- bool save_to_journal =
- (types_to_journal.Has(local_type) ||
- types_to_journal.Has(server_type)) &&
- (delete_journal_->IsDeleteJournalEnabled(local_type) ||
- delete_journal_->IsDeleteJournalEnabled(server_type));
- DeleteEntry(save_to_journal, entry, &entries_to_journal);
- }
- }
-
- delete_journal_->AddJournalBatch(&trans, entries_to_journal);
-
- // Ensure meta tracking for these data types reflects the purged state.
- for (ModelTypeSet::Iterator it = disabled_types.First();
- it.Good(); it.Inc()) {
- kernel_->persisted_info.transaction_version[it.Get()] = 0;
-
- // Don't discard progress markers for unapplied types.
- if (!types_to_unapply.Has(it.Get()))
- kernel_->persisted_info.reset_download_progress(it.Get());
- }
- }
- }
- return true;
-}
-
-void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
- WriteTransaction trans(FROM_HERE, HANDLE_SAVE_FAILURE, this);
- ScopedKernelLock lock(this);
- kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
-
- // Because we optimistically cleared the dirty bit on the real entries when
- // taking the snapshot, we must restore it on failure. Not doing this could
- // cause lost data, if no other changes are made to the in-memory entries
- // that would cause the dirty bit to get set again. Setting the bit ensures
- // that SaveChanges will at least try again later.
- for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
- i != snapshot.dirty_metas.end(); ++i) {
- MetahandlesMap::iterator found =
- kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
- if (found != kernel_->metahandles_map.end()) {
- found->second->mark_dirty(&kernel_->dirty_metahandles);
- }
- }
-
- kernel_->metahandles_to_purge.insert(snapshot.metahandles_to_purge.begin(),
- snapshot.metahandles_to_purge.end());
-
- // Restore delete journals.
- delete_journal_->AddJournalBatch(&trans, snapshot.delete_journals);
- delete_journal_->PurgeDeleteJournals(&trans,
- snapshot.delete_journals_to_purge);
-}
-
-void Directory::GetDownloadProgress(
- ModelType model_type,
- sync_pb::DataTypeProgressMarker* value_out) const {
- ScopedKernelLock lock(this);
- return value_out->CopyFrom(
- kernel_->persisted_info.download_progress[model_type]);
-}
-
-void Directory::GetDownloadProgressAsString(
- ModelType model_type,
- std::string* value_out) const {
- ScopedKernelLock lock(this);
- kernel_->persisted_info.download_progress[model_type].SerializeToString(
- value_out);
-}
-
-size_t Directory::GetEntriesCount() const {
- ScopedKernelLock lock(this);
- return kernel_->metahandles_map.size();
-}
-
-void Directory::SetDownloadProgress(
- ModelType model_type,
- const sync_pb::DataTypeProgressMarker& new_progress) {
- ScopedKernelLock lock(this);
- kernel_->persisted_info.download_progress[model_type].CopyFrom(new_progress);
- kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
-}
-
-int64 Directory::GetTransactionVersion(ModelType type) const {
- kernel_->transaction_mutex.AssertAcquired();
- return kernel_->persisted_info.transaction_version[type];
-}
-
-void Directory::IncrementTransactionVersion(ModelType type) {
- kernel_->transaction_mutex.AssertAcquired();
- kernel_->persisted_info.transaction_version[type]++;
-}
-
-ModelTypeSet Directory::InitialSyncEndedTypes() {
- syncable::ReadTransaction trans(FROM_HERE, this);
- ModelTypeSet protocol_types = ProtocolTypes();
- ModelTypeSet initial_sync_ended_types;
- for (ModelTypeSet::Iterator i = protocol_types.First(); i.Good(); i.Inc()) {
- if (InitialSyncEndedForType(&trans, i.Get())) {
- initial_sync_ended_types.Put(i.Get());
- }
- }
- return initial_sync_ended_types;
-}
-
-bool Directory::InitialSyncEndedForType(ModelType type) {
- syncable::ReadTransaction trans(FROM_HERE, this);
- return InitialSyncEndedForType(&trans, type);
-}
-
-bool Directory::InitialSyncEndedForType(
- BaseTransaction* trans, ModelType type) {
- // True iff the type's root node has been received and applied.
- syncable::Entry entry(trans,
- syncable::GET_BY_SERVER_TAG,
- ModelTypeToRootTag(type));
- return entry.good() && entry.GetBaseVersion() != CHANGES_VERSION;
-}
-
-string Directory::store_birthday() const {
- ScopedKernelLock lock(this);
- return kernel_->persisted_info.store_birthday;
-}
-
-void Directory::set_store_birthday(const string& store_birthday) {
- ScopedKernelLock lock(this);
- if (kernel_->persisted_info.store_birthday == store_birthday)
- return;
- kernel_->persisted_info.store_birthday = store_birthday;
- kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
-}
-
-string Directory::bag_of_chips() const {
- ScopedKernelLock lock(this);
- return kernel_->persisted_info.bag_of_chips;
-}
-
-void Directory::set_bag_of_chips(const string& bag_of_chips) {
- ScopedKernelLock lock(this);
- if (kernel_->persisted_info.bag_of_chips == bag_of_chips)
- return;
- kernel_->persisted_info.bag_of_chips = bag_of_chips;
- kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
-}
-
-
-string Directory::cache_guid() const {
- // No need to lock since nothing ever writes to it after load.
- return kernel_->cache_guid;
-}
-
-NigoriHandler* Directory::GetNigoriHandler() {
- return nigori_handler_;
-}
-
-Cryptographer* Directory::GetCryptographer(const BaseTransaction* trans) {
- DCHECK_EQ(this, trans->directory());
- return cryptographer_;
-}
-
-void Directory::GetAllMetaHandles(BaseTransaction* trans,
- MetahandleSet* result) {
- result->clear();
- ScopedKernelLock lock(this);
- for (MetahandlesMap::iterator i = kernel_->metahandles_map.begin();
- i != kernel_->metahandles_map.end(); ++i) {
- result->insert(i->first);
- }
-}
-
-void Directory::GetAllEntryKernels(BaseTransaction* trans,
- std::vector<const EntryKernel*>* result) {
- result->clear();
- ScopedKernelLock lock(this);
- for (MetahandlesMap::iterator i = kernel_->metahandles_map.begin();
- i != kernel_->metahandles_map.end(); ++i) {
- result->push_back(i->second);
- }
-}
-
-void Directory::GetUnsyncedMetaHandles(BaseTransaction* trans,
- Metahandles* result) {
- result->clear();
- ScopedKernelLock lock(this);
- copy(kernel_->unsynced_metahandles.begin(),
- kernel_->unsynced_metahandles.end(), back_inserter(*result));
-}
-
-int64 Directory::unsynced_entity_count() const {
- ScopedKernelLock lock(this);
- return kernel_->unsynced_metahandles.size();
-}
-
-bool Directory::TypeHasUnappliedUpdates(ModelType type) {
- ScopedKernelLock lock(this);
- return !kernel_->unapplied_update_metahandles[type].empty();
-}
-
-void Directory::GetUnappliedUpdateMetaHandles(
- BaseTransaction* trans,
- FullModelTypeSet server_types,
- std::vector<int64>* result) {
- result->clear();
- ScopedKernelLock lock(this);
- for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
- const ModelType type = ModelTypeFromInt(i);
- if (server_types.Has(type)) {
- std::copy(kernel_->unapplied_update_metahandles[type].begin(),
- kernel_->unapplied_update_metahandles[type].end(),
- back_inserter(*result));
- }
- }
-}
-
-void Directory::CollectMetaHandleCounts(
- std::vector<int>* num_entries_by_type,
- std::vector<int>* num_to_delete_entries_by_type) {
- syncable::ReadTransaction trans(FROM_HERE, this);
- ScopedKernelLock lock(this);
-
- for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
- it != kernel_->metahandles_map.end(); ++it) {
- EntryKernel* entry = it->second;
- const ModelType type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
- (*num_entries_by_type)[type]++;
- if (entry->ref(IS_DEL))
- (*num_to_delete_entries_by_type)[type]++;
- }
-}
-
-bool Directory::CheckInvariantsOnTransactionClose(
- syncable::BaseTransaction* trans,
- const MetahandleSet& modified_handles) {
- // NOTE: The trans may be in the process of being destructed. Be careful if
- // you wish to call any of its virtual methods.
- switch (invariant_check_level_) {
- case FULL_DB_VERIFICATION: {
- MetahandleSet all_handles;
- GetAllMetaHandles(trans, &all_handles);
- return CheckTreeInvariants(trans, all_handles);
- }
- case VERIFY_CHANGES: {
- return CheckTreeInvariants(trans, modified_handles);
- }
- case OFF: {
- return true;
- }
- }
- NOTREACHED();
- return false;
-}
-
-bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction* trans) {
- MetahandleSet handles;
- GetAllMetaHandles(trans, &handles);
- return CheckTreeInvariants(trans, handles);
-}
-
-bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
- const MetahandleSet& handles) {
- MetahandleSet::const_iterator i;
- for (i = handles.begin() ; i != handles.end() ; ++i) {
- int64 metahandle = *i;
- Entry e(trans, GET_BY_HANDLE, metahandle);
- if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans))
- return false;
- syncable::Id id = e.GetId();
- syncable::Id parentid = e.GetParentId();
-
- if (id.IsRoot()) {
- if (!SyncAssert(e.GetIsDir(), FROM_HERE,
- "Entry should be a directory",
- trans))
- return false;
- if (!SyncAssert(parentid.IsRoot(), FROM_HERE,
- "Entry should be root",
- trans))
- return false;
- if (!SyncAssert(!e.GetIsUnsynced(), FROM_HERE,
- "Entry should be sycned",
- trans))
- return false;
- continue;
- }
-
- if (!e.GetIsDel()) {
- if (!SyncAssert(id != parentid, FROM_HERE,
- "Id should be different from parent id.",
- trans))
- return false;
- if (!SyncAssert(!e.GetNonUniqueName().empty(), FROM_HERE,
- "Non unique name should not be empty.",
- trans))
- return false;
- int safety_count = handles.size() + 1;
- while (!parentid.IsRoot()) {
- Entry parent(trans, GET_BY_ID, parentid);
- if (!SyncAssert(parent.good(), FROM_HERE,
- "Parent entry is not valid.",
- trans))
- return false;
- if (handles.end() == handles.find(parent.GetMetahandle()))
- break; // Skip further checking if parent was unmodified.
- if (!SyncAssert(parent.GetIsDir(), FROM_HERE,
- "Parent should be a directory",
- trans))
- return false;
- if (!SyncAssert(!parent.GetIsDel(), FROM_HERE,
- "Parent should not have been marked for deletion.",
- trans))
- return false;
- if (!SyncAssert(handles.end() != handles.find(parent.GetMetahandle()),
- FROM_HERE,
- "Parent should be in the index.",
- trans))
- return false;
- parentid = parent.GetParentId();
- if (!SyncAssert(--safety_count > 0, FROM_HERE,
- "Count should be greater than zero.",
- trans))
- return false;
- }
- }
- int64 base_version = e.GetBaseVersion();
- int64 server_version = e.GetServerVersion();
- bool using_unique_client_tag = !e.GetUniqueClientTag().empty();
- if (CHANGES_VERSION == base_version || 0 == base_version) {
- if (e.GetIsUnappliedUpdate()) {
- // Must be a new item, or a de-duplicated unique client tag
- // that was created both locally and remotely.
- if (!using_unique_client_tag) {
- if (!SyncAssert(e.GetIsDel(), FROM_HERE,
- "The entry should not have been deleted.",
- trans))
- return false;
- }
- // It came from the server, so it must have a server ID.
- if (!SyncAssert(id.ServerKnows(), FROM_HERE,
- "The id should be from a server.",
- trans))
- return false;
- } else {
- if (e.GetIsDir()) {
- // TODO(chron): Implement this mode if clients ever need it.
- // For now, you can't combine a client tag and a directory.
- if (!SyncAssert(!using_unique_client_tag, FROM_HERE,
- "Directory cannot have a client tag.",
- trans))
- return false;
- }
- // Should be an uncomitted item, or a successfully deleted one.
- if (!e.GetIsDel()) {
- if (!SyncAssert(e.GetIsUnsynced(), FROM_HERE,
- "The item should be unsynced.",
- trans))
- return false;
- }
- // If the next check failed, it would imply that an item exists
- // on the server, isn't waiting for application locally, but either
- // is an unsynced create or a sucessful delete in the local copy.
- // Either way, that's a mismatch.
- if (!SyncAssert(0 == server_version, FROM_HERE,
- "Server version should be zero.",
- trans))
- return false;
- // Items that aren't using the unique client tag should have a zero
- // base version only if they have a local ID. Items with unique client
- // tags are allowed to use the zero base version for undeletion and
- // de-duplication; the unique client tag trumps the server ID.
- if (!using_unique_client_tag) {
- if (!SyncAssert(!id.ServerKnows(), FROM_HERE,
- "Should be a client only id.",
- trans))
- return false;
- }
- }
- } else {
- if (!SyncAssert(id.ServerKnows(),
- FROM_HERE,
- "Should be a server id.",
- trans))
- return false;
- }
- // Server-unknown items that are locally deleted should not be sent up to
- // the server. They must be !IS_UNSYNCED.
- if (!SyncAssert(!(!id.ServerKnows() && e.GetIsDel() && e.GetIsUnsynced()),
- FROM_HERE,
- "Locally deleted item must not be unsynced.",
- trans)) {
- return false;
- }
- }
- return true;
-}
-
-void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level) {
- invariant_check_level_ = check_level;
-}
-
-int64 Directory::NextMetahandle() {
- ScopedKernelLock lock(this);
- int64 metahandle = (kernel_->next_metahandle)++;
- return metahandle;
-}
-
-// Always returns a client ID that is the string representation of a negative
-// number.
-Id Directory::NextId() {
- int64 result;
- {
- ScopedKernelLock lock(this);
- result = (kernel_->persisted_info.next_id)--;
- kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
- }
- DCHECK_LT(result, 0);
- return Id::CreateFromClientString(base::Int64ToString(result));
-}
-
-bool Directory::HasChildren(BaseTransaction* trans, const Id& id) {
- ScopedKernelLock lock(this);
- return kernel_->parent_child_index.GetChildren(id) != NULL;
-}
-
-Id Directory::GetFirstChildId(BaseTransaction* trans,
- const EntryKernel* parent) {
- DCHECK(parent);
- DCHECK(parent->ref(IS_DIR));
-
- ScopedKernelLock lock(this);
- const OrderedChildSet* children =
- kernel_->parent_child_index.GetChildren(parent->ref(ID));
-
- // We're expected to return root if there are no children.
- if (!children)
- return Id();
-
- return (*children->begin())->ref(ID);
-}
-
-syncable::Id Directory::GetPredecessorId(EntryKernel* e) {
- ScopedKernelLock lock(this);
-
- DCHECK(ParentChildIndex::ShouldInclude(e));
- const OrderedChildSet* children =
- kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
- DCHECK(children && !children->empty());
- OrderedChildSet::const_iterator i = children->find(e);
- DCHECK(i != children->end());
-
- if (i == children->begin()) {
- return Id();
- } else {
- i--;
- return (*i)->ref(ID);
- }
-}
-
-syncable::Id Directory::GetSuccessorId(EntryKernel* e) {
- ScopedKernelLock lock(this);
-
- DCHECK(ParentChildIndex::ShouldInclude(e));
- const OrderedChildSet* children =
- kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
- DCHECK(children && !children->empty());
- OrderedChildSet::const_iterator i = children->find(e);
- DCHECK(i != children->end());
-
- i++;
- if (i == children->end()) {
- return Id();
- } else {
- return (*i)->ref(ID);
- }
-}
-
-// TODO(rlarocque): Remove all support for placing ShouldMaintainPosition()
-// items as siblings of items that do not maintain postions. It is required
-// only for tests. See crbug.com/178282.
-void Directory::PutPredecessor(EntryKernel* e, EntryKernel* predecessor) {
- DCHECK(!e->ref(IS_DEL));
- if (!e->ShouldMaintainPosition()) {
- DCHECK(!e->ref(UNIQUE_POSITION).IsValid());
- return;
- }
- std::string suffix = e->ref(UNIQUE_BOOKMARK_TAG);
- DCHECK(!suffix.empty());
-
- // Remove our item from the ParentChildIndex and remember to re-add it later.
- ScopedKernelLock lock(this);
- ScopedParentChildIndexUpdater updater(lock, e, &kernel_->parent_child_index);
-
- // Note: The ScopedParentChildIndexUpdater will update this set for us as we
- // leave this function.
- const OrderedChildSet* siblings =
- kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
-
- if (!siblings) {
- // This parent currently has no other children.
- DCHECK(predecessor->ref(ID).IsRoot());
- UniquePosition pos = UniquePosition::InitialPosition(suffix);
- e->put(UNIQUE_POSITION, pos);
- return;
- }
-
- if (predecessor->ref(ID).IsRoot()) {
- // We have at least one sibling, and we're inserting to the left of them.
- UniquePosition successor_pos = (*siblings->begin())->ref(UNIQUE_POSITION);
-
- UniquePosition pos;
- if (!successor_pos.IsValid()) {
- // If all our successors are of non-positionable types, just create an
- // initial position. We arbitrarily choose to sort invalid positions to
- // the right of the valid positions.
- //
- // We really shouldn't need to support this. See TODO above.
- pos = UniquePosition::InitialPosition(suffix);
- } else {
- DCHECK(!siblings->empty());
- pos = UniquePosition::Before(successor_pos, suffix);
- }
-
- e->put(UNIQUE_POSITION, pos);
- return;
- }
-
- // We can't support placing an item after an invalid position. Fortunately,
- // the tests don't exercise this particular case. We should not support
- // siblings with invalid positions at all. See TODO above.
- DCHECK(predecessor->ref(UNIQUE_POSITION).IsValid());
-
- OrderedChildSet::const_iterator neighbour = siblings->find(predecessor);
- DCHECK(neighbour != siblings->end());
-
- ++neighbour;
- if (neighbour == siblings->end()) {
- // Inserting at the end of the list.
- UniquePosition pos = UniquePosition::After(
- predecessor->ref(UNIQUE_POSITION),
- suffix);
- e->put(UNIQUE_POSITION, pos);
- return;
- }
-
- EntryKernel* successor = *neighbour;
-
- // Another mixed valid and invalid position case. This one could be supported
- // in theory, but we're trying to deprecate support for siblings with and
- // without valid positions. See TODO above.
- DCHECK(successor->ref(UNIQUE_POSITION).IsValid());
-
- // Finally, the normal case: inserting between two elements.
- UniquePosition pos = UniquePosition::Between(
- predecessor->ref(UNIQUE_POSITION),
- successor->ref(UNIQUE_POSITION),
- suffix);
- e->put(UNIQUE_POSITION, pos);
- return;
-}
-
-// TODO(rlarocque): Avoid this indirection. Just return the set.
-void Directory::AppendChildHandles(const ScopedKernelLock& lock,
- const Id& parent_id,
- Directory::Metahandles* result) {
- const OrderedChildSet* children =
- kernel_->parent_child_index.GetChildren(parent_id);
- if (!children)
- return;
-
- for (OrderedChildSet::const_iterator i = children->begin();
- i != children->end(); ++i) {
- DCHECK_EQ(parent_id, (*i)->ref(PARENT_ID));
- result->push_back((*i)->ref(META_HANDLE));
- }
-}
-
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/directory.h b/chromium/sync/syncable/directory.h
deleted file mode 100644
index 0206dbb458b..00000000000
--- a/chromium/sync/syncable/directory.h
+++ /dev/null
@@ -1,557 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_DIRECTORY_H_
-#define SYNC_SYNCABLE_DIRECTORY_H_
-
-#include <deque>
-#include <set>
-#include <string>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/containers/hash_tables.h"
-#include "base/file_util.h"
-#include "base/gtest_prod_util.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/util/report_unrecoverable_error_function.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/syncable/dir_open_result.h"
-#include "sync/syncable/entry_kernel.h"
-#include "sync/syncable/metahandle_set.h"
-#include "sync/syncable/parent_child_index.h"
-#include "sync/syncable/syncable_delete_journal.h"
-
-namespace syncer {
-
-class Cryptographer;
-class TestUserShare;
-class UnrecoverableErrorHandler;
-
-namespace syncable {
-
-class BaseTransaction;
-class BaseWriteTransaction;
-class DirectoryChangeDelegate;
-class DirectoryBackingStore;
-class NigoriHandler;
-class ScopedKernelLock;
-class TransactionObserver;
-class WriteTransaction;
-
-enum InvariantCheckLevel {
- OFF = 0, // No checking.
- VERIFY_CHANGES = 1, // Checks only mutated entries. Does not check hierarchy.
- FULL_DB_VERIFICATION = 2 // Check every entry. This can be expensive.
-};
-
-class SYNC_EXPORT Directory {
- friend class BaseTransaction;
- friend class Entry;
- friend class ModelNeutralMutableEntry;
- friend class MutableEntry;
- friend class ReadTransaction;
- friend class ScopedKernelLock;
- friend class WriteTransaction;
- friend class SyncableDirectoryTest;
- friend class syncer::TestUserShare;
- FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest, ManageDeleteJournals);
- FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest,
- TakeSnapshotGetsAllDirtyHandlesTest);
- FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest,
- TakeSnapshotGetsOnlyDirtyHandlesTest);
- FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest,
- TakeSnapshotGetsMetahandlesToPurge);
-
- public:
- typedef std::vector<int64> Metahandles;
-
- // Be careful when using these hash_map containers. According to the spec,
- // inserting into them may invalidate all iterators.
- //
- // It gets worse, though. The Anroid STL library has a bug that means it may
- // invalidate all iterators when you erase from the map, too. That means that
- // you can't iterate while erasing. STLDeleteElements(), std::remove_if(),
- // and other similar functions are off-limits too, until this bug is fixed.
- //
- // See http://sourceforge.net/p/stlport/bugs/239/.
- typedef base::hash_map<int64, EntryKernel*> MetahandlesMap;
- typedef base::hash_map<std::string, EntryKernel*> IdsMap;
- typedef base::hash_map<std::string, EntryKernel*> TagsMap;
-
- static const base::FilePath::CharType kSyncDatabaseFilename[];
-
- // The dirty/clean state of kernel fields backed by the share_info table.
- // This is public so it can be used in SaveChangesSnapshot for persistence.
- enum KernelShareInfoStatus {
- KERNEL_SHARE_INFO_INVALID,
- KERNEL_SHARE_INFO_VALID,
- KERNEL_SHARE_INFO_DIRTY
- };
-
- // Various data that the Directory::Kernel we are backing (persisting data
- // for) needs saved across runs of the application.
- struct SYNC_EXPORT_PRIVATE PersistedKernelInfo {
- PersistedKernelInfo();
- ~PersistedKernelInfo();
-
- // Set the |download_progress| entry for the given model to a
- // "first sync" start point. When such a value is sent to the server,
- // a full download of all objects of the model will be initiated.
- void reset_download_progress(ModelType model_type);
-
- // Last sync timestamp fetched from the server.
- sync_pb::DataTypeProgressMarker download_progress[MODEL_TYPE_COUNT];
- // Sync-side transaction version per data type. Monotonically incremented
- // when updating native model. A copy is also saved in native model.
- // Later out-of-sync models can be detected and fixed by comparing
- // transaction versions of sync model and native model.
- // TODO(hatiaol): implement detection and fixing of out-of-sync models.
- // Bug 154858.
- int64 transaction_version[MODEL_TYPE_COUNT];
- // The store birthday we were given by the server. Contents are opaque to
- // the client.
- std::string store_birthday;
- // The next local ID that has not been used with this cache-GUID.
- int64 next_id;
- // The serialized bag of chips we were given by the server. Contents are
- // opaque to the client. This is the serialization of a message of type
- // ChipBag defined in sync.proto. It can contains NULL characters.
- std::string bag_of_chips;
- };
-
- // What the Directory needs on initialization to create itself and its Kernel.
- // Filled by DirectoryBackingStore::Load.
- struct KernelLoadInfo {
- PersistedKernelInfo kernel_info;
- std::string cache_guid; // Created on first initialization, never changes.
- int64 max_metahandle; // Computed (using sql MAX aggregate) on init.
- KernelLoadInfo() : max_metahandle(0) {
- }
- };
-
- // When the Directory is told to SaveChanges, a SaveChangesSnapshot is
- // constructed and forms a consistent snapshot of what needs to be sent to
- // the backing store.
- struct SYNC_EXPORT_PRIVATE SaveChangesSnapshot {
- SaveChangesSnapshot();
- ~SaveChangesSnapshot();
-
- KernelShareInfoStatus kernel_info_status;
- PersistedKernelInfo kernel_info;
- EntryKernelSet dirty_metas;
- MetahandleSet metahandles_to_purge;
- EntryKernelSet delete_journals;
- MetahandleSet delete_journals_to_purge;
- };
-
- // Does not take ownership of |encryptor|.
- // |report_unrecoverable_error_function| may be NULL.
- // Takes ownership of |store|.
- Directory(
- DirectoryBackingStore* store,
- UnrecoverableErrorHandler* unrecoverable_error_handler,
- ReportUnrecoverableErrorFunction
- report_unrecoverable_error_function,
- NigoriHandler* nigori_handler,
- Cryptographer* cryptographer);
- virtual ~Directory();
-
- // Does not take ownership of |delegate|, which must not be NULL.
- // Starts sending events to |delegate| if the returned result is
- // OPENED. Note that events to |delegate| may be sent from *any*
- // thread. |transaction_observer| must be initialized.
- DirOpenResult Open(const std::string& name,
- DirectoryChangeDelegate* delegate,
- const WeakHandle<TransactionObserver>&
- transaction_observer);
-
- // Stops sending events to the delegate and the transaction
- // observer.
- void Close();
-
- int64 NextMetahandle();
- // Returns a negative integer unique to this client.
- syncable::Id NextId();
-
- bool good() const { return NULL != kernel_; }
-
- // The download progress is an opaque token provided by the sync server
- // to indicate the continuation state of the next GetUpdates operation.
- void GetDownloadProgress(
- ModelType type,
- sync_pb::DataTypeProgressMarker* value_out) const;
- void GetDownloadProgressAsString(
- ModelType type,
- std::string* value_out) const;
- size_t GetEntriesCount() const;
- void SetDownloadProgress(
- ModelType type,
- const sync_pb::DataTypeProgressMarker& value);
-
- // Gets/Increments transaction version of a model type. Must be called when
- // holding kernel mutex.
- int64 GetTransactionVersion(ModelType type) const;
- void IncrementTransactionVersion(ModelType type);
-
- ModelTypeSet InitialSyncEndedTypes();
- bool InitialSyncEndedForType(ModelType type);
- bool InitialSyncEndedForType(BaseTransaction* trans, ModelType type);
-
- const std::string& name() const { return kernel_->name; }
-
- // (Account) Store birthday is opaque to the client, so we keep it in the
- // format it is in the proto buffer in case we switch to a binary birthday
- // later.
- std::string store_birthday() const;
- void set_store_birthday(const std::string& store_birthday);
-
- // (Account) Bag of chip is an opaque state used by the server to track the
- // client.
- std::string bag_of_chips() const;
- void set_bag_of_chips(const std::string& bag_of_chips);
-
- // Unique to each account / client pair.
- std::string cache_guid() const;
-
- // Returns a pointer to our Nigori node handler.
- NigoriHandler* GetNigoriHandler();
-
- // Returns a pointer to our cryptographer. Does not transfer ownership.
- // Not thread safe, so should only be accessed while holding a transaction.
- Cryptographer* GetCryptographer(const BaseTransaction* trans);
-
- // Returns true if the directory had encountered an unrecoverable error.
- // Note: Any function in |Directory| that can be called without holding a
- // transaction need to check if the Directory already has an unrecoverable
- // error on it.
- bool unrecoverable_error_set(const BaseTransaction* trans) const;
-
- // Called to immediately report an unrecoverable error (but don't
- // propagate it up).
- void ReportUnrecoverableError() {
- if (report_unrecoverable_error_function_) {
- report_unrecoverable_error_function_();
- }
- }
-
- // Called to set the unrecoverable error on the directory and to propagate
- // the error to upper layers.
- void OnUnrecoverableError(const BaseTransaction* trans,
- const tracked_objects::Location& location,
- const std::string & message);
-
- DeleteJournal* delete_journal();
-
- // Returns the child meta handles (even those for deleted/unlinked
- // nodes) for given parent id. Clears |result| if there are no
- // children.
- bool GetChildHandlesById(BaseTransaction*, const Id& parent_id,
- Metahandles* result);
-
- // Returns the child meta handles (even those for deleted/unlinked
- // nodes) for given meta handle. Clears |result| if there are no
- // children.
- bool GetChildHandlesByHandle(BaseTransaction*, int64 handle,
- Metahandles* result);
-
- // Counts all items under the given node, including the node itself.
- int GetTotalNodeCount(BaseTransaction*, EntryKernel* kernel_) const;
-
- // Returns this item's position within its parent folder.
- // The left-most item is 0, second left-most is 1, etc.
- int GetPositionIndex(BaseTransaction*, EntryKernel* kernel_) const;
-
- // Returns true iff |id| has children.
- bool HasChildren(BaseTransaction* trans, const Id& id);
-
- // Find the first child in the positional ordering under a parent,
- // and fill in |*first_child_id| with its id. Fills in a root Id if
- // parent has no children. Returns true if the first child was
- // successfully found, or false if an error was encountered.
- Id GetFirstChildId(BaseTransaction* trans, const EntryKernel* parent);
-
- // These functions allow one to fetch the next or previous item under
- // the same folder. Returns the "root" ID if there is no predecessor
- // or successor.
- //
- // TODO(rlarocque): These functions are used mainly for tree traversal. We
- // should replace these with an iterator API. See crbug.com/178275.
- syncable::Id GetPredecessorId(EntryKernel*);
- syncable::Id GetSuccessorId(EntryKernel*);
-
- // Places |e| as a successor to |predecessor|. If |predecessor| is NULL,
- // |e| will be placed as the left-most item in its folder.
- //
- // Both |e| and |predecessor| must be valid entries under the same parent.
- //
- // TODO(rlarocque): This function includes limited support for placing items
- // with valid positions (ie. Bookmarks) as siblings of items that have no set
- // ordering (ie. Autofill items). This support is required only for tests,
- // and should be removed. See crbug.com/178282.
- void PutPredecessor(EntryKernel* e, EntryKernel* predecessor);
-
- // SaveChanges works by taking a consistent snapshot of the current Directory
- // state and indices (by deep copy) under a ReadTransaction, passing this
- // snapshot to the backing store under no transaction, and finally cleaning
- // up by either purging entries no longer needed (this part done under a
- // WriteTransaction) or rolling back the dirty bits. It also uses
- // internal locking to enforce SaveChanges operations are mutually exclusive.
- //
- // WARNING: THIS METHOD PERFORMS SYNCHRONOUS I/O VIA SQLITE.
- bool SaveChanges();
-
- // Fill in |result| with all entry kernels.
- void GetAllEntryKernels(BaseTransaction* trans,
- std::vector<const EntryKernel*>* result);
-
- // Returns the number of entities with the unsynced bit set.
- int64 unsynced_entity_count() const;
-
- // Get GetUnsyncedMetaHandles should only be called after SaveChanges and
- // before any new entries have been created. The intention is that the
- // syncer should call it from its PerformSyncQueries member.
- void GetUnsyncedMetaHandles(BaseTransaction* trans,
- Metahandles* result);
-
- // Returns whether or not this |type| has unapplied updates.
- bool TypeHasUnappliedUpdates(ModelType type);
-
- // Get all the metahandles for unapplied updates for a given set of
- // server types.
- void GetUnappliedUpdateMetaHandles(BaseTransaction* trans,
- FullModelTypeSet server_types,
- std::vector<int64>* result);
-
- // Get metahandle counts for various criteria to show on the
- // about:sync page. The information is computed on the fly
- // each time. If this results in a significant performance hit,
- // additional data structures can be added to cache results.
- void CollectMetaHandleCounts(std::vector<int>* num_entries_by_type,
- std::vector<int>* num_to_delete_entries_by_type);
-
- // Sets the level of invariant checking performed after transactions.
- void SetInvariantCheckLevel(InvariantCheckLevel check_level);
-
- // Checks tree metadata consistency following a transaction. It is intended
- // to provide a reasonable tradeoff between performance and comprehensiveness
- // and may be used in release code.
- bool CheckInvariantsOnTransactionClose(
- syncable::BaseTransaction* trans,
- const MetahandleSet& modified_handles);
-
- // Forces a full check of the directory. This operation may be slow and
- // should not be invoked outside of tests.
- bool FullyCheckTreeInvariants(BaseTransaction *trans);
-
- // Purges data associated with any entries whose ModelType or ServerModelType
- // is found in |disabled_types|, from sync directory _both_ in memory and on
- // disk. Only valid, "real" model types are allowed in |disabled_types| (see
- // model_type.h for definitions).
- // 1. Data associated with |types_to_journal| is saved in the delete journal
- // to help prevent back-from-dead problem due to offline delete in the next
- // sync session. |types_to_journal| must be a subset of |disabled_types|.
- // 2. Data associated with |types_to_unapply| is reset to an "unapplied"
- // state, wherein all local data is deleted and IS_UNAPPLIED is set to true.
- // This is useful when there's no benefit in discarding the currently
- // downloaded state, such as when there are cryptographer errors.
- // |types_to_unapply| must be a subset of |disabled_types|.
- // 3. All other data is purged entirely.
- // Note: "Purge" is just meant to distinguish from "deleting" entries, which
- // means something different in the syncable namespace.
- // WARNING! This can be real slow, as it iterates over all entries.
- // WARNING! Performs synchronous I/O.
- // Returns: true on success, false if an error was encountered.
- virtual bool PurgeEntriesWithTypeIn(ModelTypeSet disabled_types,
- ModelTypeSet types_to_journal,
- ModelTypeSet types_to_unapply);
-
- protected: // for friends, mainly used by Entry constructors
- virtual EntryKernel* GetEntryByHandle(int64 handle);
- virtual EntryKernel* GetEntryByHandle(int64 metahandle,
- ScopedKernelLock* lock);
- virtual EntryKernel* GetEntryById(const Id& id);
- EntryKernel* GetEntryByServerTag(const std::string& tag);
- virtual EntryKernel* GetEntryByClientTag(const std::string& tag);
- EntryKernel* GetRootEntry();
- bool ReindexId(BaseWriteTransaction* trans, EntryKernel* const entry,
- const Id& new_id);
- bool ReindexParentId(BaseWriteTransaction* trans, EntryKernel* const entry,
- const Id& new_parent_id);
- void ClearDirtyMetahandles();
-
- DirOpenResult OpenImpl(
- const std::string& name,
- DirectoryChangeDelegate* delegate,
- const WeakHandle<TransactionObserver>& transaction_observer);
-
- private:
- struct Kernel {
- // |delegate| must not be NULL. |transaction_observer| must be
- // initialized.
- Kernel(const std::string& name, const KernelLoadInfo& info,
- DirectoryChangeDelegate* delegate,
- const WeakHandle<TransactionObserver>& transaction_observer);
-
- ~Kernel();
-
- // Implements ReadTransaction / WriteTransaction using a simple lock.
- base::Lock transaction_mutex;
-
- // Protected by transaction_mutex. Used by WriteTransactions.
- int64 next_write_transaction_id;
-
- // The name of this directory.
- std::string const name;
-
- // Protects all members below.
- // The mutex effectively protects all the indices, but not the
- // entries themselves. So once a pointer to an entry is pulled
- // from the index, the mutex can be unlocked and entry read or written.
- //
- // Never hold the mutex and do anything with the database or any
- // other buffered IO. Violating this rule will result in deadlock.
- base::Lock mutex;
-
- // Entries indexed by metahandle. This container is considered to be the
- // owner of all EntryKernels, which may be referened by the other
- // containers. If you remove an EntryKernel from this map, you probably
- // want to remove it from all other containers and delete it, too.
- MetahandlesMap metahandles_map;
-
- // Entries indexed by id
- IdsMap ids_map;
-
- // Entries indexed by server tag.
- // This map does not include any entries with non-existent server tags.
- TagsMap server_tags_map;
-
- // Entries indexed by client tag.
- // This map does not include any entries with non-existent client tags.
- // IS_DEL items are included.
- TagsMap client_tags_map;
-
- // Contains non-deleted items, indexed according to parent and position
- // within parent. Protected by the ScopedKernelLock.
- ParentChildIndex parent_child_index;
-
- // 3 in-memory indices on bits used extremely frequently by the syncer.
- // |unapplied_update_metahandles| is keyed by the server model type.
- MetahandleSet unapplied_update_metahandles[MODEL_TYPE_COUNT];
- MetahandleSet unsynced_metahandles;
- // Contains metahandles that are most likely dirty (though not
- // necessarily). Dirtyness is confirmed in TakeSnapshotForSaveChanges().
- MetahandleSet dirty_metahandles;
-
- // When a purge takes place, we remove items from all our indices and stash
- // them in here so that SaveChanges can persist their permanent deletion.
- MetahandleSet metahandles_to_purge;
-
- KernelShareInfoStatus info_status;
-
- // These 3 members are backed in the share_info table, and
- // their state is marked by the flag above.
-
- // A structure containing the Directory state that is written back into the
- // database on SaveChanges.
- PersistedKernelInfo persisted_info;
-
- // A unique identifier for this account's cache db, used to generate
- // unique server IDs. No need to lock, only written at init time.
- const std::string cache_guid;
-
- // It doesn't make sense for two threads to run SaveChanges at the same
- // time; this mutex protects that activity.
- base::Lock save_changes_mutex;
-
- // The next metahandle is protected by kernel mutex.
- int64 next_metahandle;
-
- // The delegate for directory change events. Must not be NULL.
- DirectoryChangeDelegate* const delegate;
-
- // The transaction observer.
- const WeakHandle<TransactionObserver> transaction_observer;
- };
-
- // These private versions expect the kernel lock to already be held
- // before calling.
- EntryKernel* GetEntryById(const Id& id, ScopedKernelLock* const lock);
-
- // A helper that implements the logic of checking tree invariants.
- bool CheckTreeInvariants(syncable::BaseTransaction* trans,
- const MetahandleSet& handles);
-
- // Helper to prime metahandles_map, ids_map, parent_child_index,
- // unsynced_metahandles, unapplied_update_metahandles, server_tags_map and
- // client_tags_map from metahandles_index. The input |handles_map| will be
- // cleared during the initialization process.
- void InitializeIndices(MetahandlesMap* handles_map);
-
- // Constructs a consistent snapshot of the current Directory state and
- // indices (by deep copy) under a ReadTransaction for use in |snapshot|.
- // See SaveChanges() for more information.
- void TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot);
-
- // Purges from memory any unused, safe to remove entries that were
- // successfully deleted on disk as a result of the SaveChanges that processed
- // |snapshot|. See SaveChanges() for more information.
- bool VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot);
-
- // Rolls back dirty bits in the event that the SaveChanges that
- // processed |snapshot| failed, for example, due to no disk space.
- void HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot);
-
- // For new entry creation only
- bool InsertEntry(BaseWriteTransaction* trans,
- EntryKernel* entry, ScopedKernelLock* lock);
- bool InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry);
-
- // Used by CheckTreeInvariants
- void GetAllMetaHandles(BaseTransaction* trans, MetahandleSet* result);
- bool SafeToPurgeFromMemory(WriteTransaction* trans,
- const EntryKernel* const entry) const;
-
- // A helper used by GetTotalNodeCount.
- void GetChildSetForKernel(
- BaseTransaction*,
- EntryKernel* kernel_,
- std::deque<const OrderedChildSet*>* child_sets) const;
-
- // Append the handles of the children of |parent_id| to |result|.
- void AppendChildHandles(
- const ScopedKernelLock& lock,
- const Id& parent_id, Directory::Metahandles* result);
-
- // Helper methods used by PurgeDisabledTypes.
- void UnapplyEntry(EntryKernel* entry);
- void DeleteEntry(bool save_to_journal,
- EntryKernel* entry,
- EntryKernelSet* entries_to_journal);
-
- Kernel* kernel_;
-
- scoped_ptr<DirectoryBackingStore> store_;
-
- UnrecoverableErrorHandler* const unrecoverable_error_handler_;
- const ReportUnrecoverableErrorFunction report_unrecoverable_error_function_;
- bool unrecoverable_error_set_;
-
- // Not owned.
- NigoriHandler* const nigori_handler_;
- Cryptographer* const cryptographer_;
-
- InvariantCheckLevel invariant_check_level_;
-
- // Maintain deleted entries not in |kernel_| until it's verified that they
- // are deleted in native models as well.
- scoped_ptr<DeleteJournal> delete_journal_;
-
- DISALLOW_COPY_AND_ASSIGN(Directory);
-};
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_DIRECTORY_H_
diff --git a/chromium/sync/syncable/directory_backing_store.cc b/chromium/sync/syncable/directory_backing_store.cc
deleted file mode 100644
index 417a4d10e33..00000000000
--- a/chromium/sync/syncable/directory_backing_store.cc
+++ /dev/null
@@ -1,1504 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/directory_backing_store.h"
-
-#include "build/build_config.h"
-
-#include <limits>
-
-#include "base/base64.h"
-#include "base/debug/trace_event.h"
-#include "base/logging.h"
-#include "base/rand_util.h"
-#include "base/strings/stringprintf.h"
-#include "base/time/time.h"
-#include "sql/connection.h"
-#include "sql/statement.h"
-#include "sql/transaction.h"
-#include "sync/internal_api/public/base/node_ordinal.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/syncable/syncable-inl.h"
-#include "sync/syncable/syncable_columns.h"
-#include "sync/syncable/syncable_util.h"
-#include "sync/util/time.h"
-
-using std::string;
-
-namespace syncer {
-namespace syncable {
-
-// This just has to be big enough to hold an UPDATE or INSERT statement that
-// modifies all the columns in the entry table.
-static const string::size_type kUpdateStatementBufferSize = 2048;
-
-// Increment this version whenever updating DB tables.
-const int32 kCurrentDBVersion = 86;
-
-// Iterate over the fields of |entry| and bind each to |statement| for
-// updating. Returns the number of args bound.
-void BindFields(const EntryKernel& entry,
- sql::Statement* statement) {
- int index = 0;
- int i = 0;
- for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
- statement->BindInt64(index++, entry.ref(static_cast<Int64Field>(i)));
- }
- for ( ; i < TIME_FIELDS_END; ++i) {
- statement->BindInt64(index++,
- TimeToProtoTime(
- entry.ref(static_cast<TimeField>(i))));
- }
- for ( ; i < ID_FIELDS_END; ++i) {
- statement->BindString(index++, entry.ref(static_cast<IdField>(i)).s_);
- }
- for ( ; i < BIT_FIELDS_END; ++i) {
- statement->BindInt(index++, entry.ref(static_cast<BitField>(i)));
- }
- for ( ; i < STRING_FIELDS_END; ++i) {
- statement->BindString(index++, entry.ref(static_cast<StringField>(i)));
- }
- for ( ; i < PROTO_FIELDS_END; ++i) {
- std::string temp;
- entry.ref(static_cast<ProtoField>(i)).SerializeToString(&temp);
- statement->BindBlob(index++, temp.data(), temp.length());
- }
- for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
- std::string temp;
- entry.ref(static_cast<UniquePositionField>(i)).SerializeToString(&temp);
- statement->BindBlob(index++, temp.data(), temp.length());
- }
-}
-
-// The caller owns the returned EntryKernel*. Assumes the statement currently
-// points to a valid row in the metas table. Returns NULL to indicate that
-// it detected a corruption in the data on unpacking.
-scoped_ptr<EntryKernel> UnpackEntry(sql::Statement* statement) {
- scoped_ptr<EntryKernel> kernel(new EntryKernel());
- DCHECK_EQ(statement->ColumnCount(), static_cast<int>(FIELD_COUNT));
- int i = 0;
- for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
- kernel->put(static_cast<Int64Field>(i), statement->ColumnInt64(i));
- }
- for ( ; i < TIME_FIELDS_END; ++i) {
- kernel->put(static_cast<TimeField>(i),
- ProtoTimeToTime(statement->ColumnInt64(i)));
- }
- for ( ; i < ID_FIELDS_END; ++i) {
- kernel->mutable_ref(static_cast<IdField>(i)).s_ =
- statement->ColumnString(i);
- }
- for ( ; i < BIT_FIELDS_END; ++i) {
- kernel->put(static_cast<BitField>(i), (0 != statement->ColumnInt(i)));
- }
- for ( ; i < STRING_FIELDS_END; ++i) {
- kernel->put(static_cast<StringField>(i),
- statement->ColumnString(i));
- }
- for ( ; i < PROTO_FIELDS_END; ++i) {
- kernel->mutable_ref(static_cast<ProtoField>(i)).ParseFromArray(
- statement->ColumnBlob(i), statement->ColumnByteLength(i));
- }
- for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
- std::string temp;
- statement->ColumnBlobAsString(i, &temp);
-
- sync_pb::UniquePosition proto;
- if (!proto.ParseFromString(temp)) {
- DVLOG(1) << "Unpacked invalid position. Assuming the DB is corrupt";
- return scoped_ptr<EntryKernel>();
- }
-
- kernel->mutable_ref(static_cast<UniquePositionField>(i)) =
- UniquePosition::FromProto(proto);
- }
- return kernel.Pass();
-}
-
-namespace {
-
-string ComposeCreateTableColumnSpecs() {
- const ColumnSpec* begin = g_metas_columns;
- const ColumnSpec* end = g_metas_columns + arraysize(g_metas_columns);
- string query;
- query.reserve(kUpdateStatementBufferSize);
- char separator = '(';
- for (const ColumnSpec* column = begin; column != end; ++column) {
- query.push_back(separator);
- separator = ',';
- query.append(column->name);
- query.push_back(' ');
- query.append(column->spec);
- }
- query.push_back(')');
- return query;
-}
-
-void AppendColumnList(std::string* output) {
- const char* joiner = " ";
- // Be explicit in SELECT order to match up with UnpackEntry.
- for (int i = BEGIN_FIELDS; i < FIELD_COUNT; ++i) {
- output->append(joiner);
- output->append(ColumnName(i));
- joiner = ", ";
- }
-}
-
-} // namespace
-
-///////////////////////////////////////////////////////////////////////////////
-// DirectoryBackingStore implementation.
-
-DirectoryBackingStore::DirectoryBackingStore(const string& dir_name)
- : db_(new sql::Connection()),
- dir_name_(dir_name),
- needs_column_refresh_(false) {
- db_->set_histogram_tag("SyncDirectory");
- db_->set_page_size(4096);
- db_->set_cache_size(32);
-}
-
-DirectoryBackingStore::DirectoryBackingStore(const string& dir_name,
- sql::Connection* db)
- : db_(db),
- dir_name_(dir_name),
- needs_column_refresh_(false) {
-}
-
-DirectoryBackingStore::~DirectoryBackingStore() {
-}
-
-bool DirectoryBackingStore::DeleteEntries(EntryTable from,
- const MetahandleSet& handles) {
- if (handles.empty())
- return true;
-
- sql::Statement statement;
- // Call GetCachedStatement() separately to get different statements for
- // different tables.
- switch (from) {
- case METAS_TABLE:
- statement.Assign(db_->GetCachedStatement(
- SQL_FROM_HERE, "DELETE FROM metas WHERE metahandle = ?"));
- break;
- case DELETE_JOURNAL_TABLE:
- statement.Assign(db_->GetCachedStatement(
- SQL_FROM_HERE, "DELETE FROM deleted_metas WHERE metahandle = ?"));
- break;
- }
-
- for (MetahandleSet::const_iterator i = handles.begin(); i != handles.end();
- ++i) {
- statement.BindInt64(0, *i);
- if (!statement.Run())
- return false;
- statement.Reset(true);
- }
- return true;
-}
-
-bool DirectoryBackingStore::SaveChanges(
- const Directory::SaveChangesSnapshot& snapshot) {
- DCHECK(CalledOnValidThread());
- DCHECK(db_->is_open());
-
- // Back out early if there is nothing to write.
- bool save_info =
- (Directory::KERNEL_SHARE_INFO_DIRTY == snapshot.kernel_info_status);
- if (snapshot.dirty_metas.empty() && snapshot.metahandles_to_purge.empty() &&
- snapshot.delete_journals.empty() &&
- snapshot.delete_journals_to_purge.empty() && !save_info) {
- return true;
- }
-
- sql::Transaction transaction(db_.get());
- if (!transaction.Begin())
- return false;
-
- PrepareSaveEntryStatement(METAS_TABLE, &save_meta_statment_);
- for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
- i != snapshot.dirty_metas.end(); ++i) {
- DCHECK((*i)->is_dirty());
- if (!SaveEntryToDB(&save_meta_statment_, **i))
- return false;
- }
-
- if (!DeleteEntries(METAS_TABLE, snapshot.metahandles_to_purge))
- return false;
-
- PrepareSaveEntryStatement(DELETE_JOURNAL_TABLE,
- &save_delete_journal_statment_);
- for (EntryKernelSet::const_iterator i = snapshot.delete_journals.begin();
- i != snapshot.delete_journals.end(); ++i) {
- if (!SaveEntryToDB(&save_delete_journal_statment_, **i))
- return false;
- }
-
- if (!DeleteEntries(DELETE_JOURNAL_TABLE, snapshot.delete_journals_to_purge))
- return false;
-
- if (save_info) {
- const Directory::PersistedKernelInfo& info = snapshot.kernel_info;
- sql::Statement s1(db_->GetCachedStatement(
- SQL_FROM_HERE,
- "UPDATE share_info "
- "SET store_birthday = ?, "
- "next_id = ?, "
- "bag_of_chips = ?"));
- s1.BindString(0, info.store_birthday);
- s1.BindInt64(1, info.next_id);
- s1.BindBlob(2, info.bag_of_chips.data(), info.bag_of_chips.size());
-
- if (!s1.Run())
- return false;
- DCHECK_EQ(db_->GetLastChangeCount(), 1);
-
- sql::Statement s2(db_->GetCachedStatement(
- SQL_FROM_HERE,
- "INSERT OR REPLACE "
- "INTO models (model_id, progress_marker, transaction_version) "
- "VALUES (?, ?, ?)"));
-
- ModelTypeSet protocol_types = ProtocolTypes();
- for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
- iter.Inc()) {
- ModelType type = iter.Get();
- // We persist not ModelType but rather a protobuf-derived ID.
- string model_id = ModelTypeEnumToModelId(type);
- string progress_marker;
- info.download_progress[type].SerializeToString(&progress_marker);
- s2.BindBlob(0, model_id.data(), model_id.length());
- s2.BindBlob(1, progress_marker.data(), progress_marker.length());
- s2.BindInt64(2, info.transaction_version[type]);
- if (!s2.Run())
- return false;
- DCHECK_EQ(db_->GetLastChangeCount(), 1);
- s2.Reset(true);
- }
- }
-
- return transaction.Commit();
-}
-
-bool DirectoryBackingStore::InitializeTables() {
- sql::Transaction transaction(db_.get());
- if (!transaction.Begin())
- return false;
-
- int version_on_disk = GetVersion();
-
- // Upgrade from version 67. Version 67 was widely distributed as the original
- // Bookmark Sync release. Version 68 removed unique naming.
- if (version_on_disk == 67) {
- if (MigrateVersion67To68())
- version_on_disk = 68;
- }
- // Version 69 introduced additional datatypes.
- if (version_on_disk == 68) {
- if (MigrateVersion68To69())
- version_on_disk = 69;
- }
-
- if (version_on_disk == 69) {
- if (MigrateVersion69To70())
- version_on_disk = 70;
- }
-
- // Version 71 changed the sync progress information to be per-datatype.
- if (version_on_disk == 70) {
- if (MigrateVersion70To71())
- version_on_disk = 71;
- }
-
- // Version 72 removed extended attributes, a legacy way to do extensible
- // key/value information, stored in their own table.
- if (version_on_disk == 71) {
- if (MigrateVersion71To72())
- version_on_disk = 72;
- }
-
- // Version 73 added a field for notification state.
- if (version_on_disk == 72) {
- if (MigrateVersion72To73())
- version_on_disk = 73;
- }
-
- // Version 74 added state for the autofill migration.
- if (version_on_disk == 73) {
- if (MigrateVersion73To74())
- version_on_disk = 74;
- }
-
- // Version 75 migrated from int64-based timestamps to per-datatype tokens.
- if (version_on_disk == 74) {
- if (MigrateVersion74To75())
- version_on_disk = 75;
- }
-
- // Version 76 removed all (5) autofill migration related columns.
- if (version_on_disk == 75) {
- if (MigrateVersion75To76())
- version_on_disk = 76;
- }
-
- // Version 77 standardized all time fields to ms since the Unix
- // epoch.
- if (version_on_disk == 76) {
- if (MigrateVersion76To77())
- version_on_disk = 77;
- }
-
- // Version 78 added the column base_server_specifics to the metas table.
- if (version_on_disk == 77) {
- if (MigrateVersion77To78())
- version_on_disk = 78;
- }
-
- // Version 79 migration is a one-time fix for some users in a bad state.
- if (version_on_disk == 78) {
- if (MigrateVersion78To79())
- version_on_disk = 79;
- }
-
- // Version 80 migration is adding the bag_of_chips column.
- if (version_on_disk == 79) {
- if (MigrateVersion79To80())
- version_on_disk = 80;
- }
-
- // Version 81 replaces the int64 server_position_in_parent_field
- // with a blob server_ordinal_in_parent field.
- if (version_on_disk == 80) {
- if (MigrateVersion80To81())
- version_on_disk = 81;
- }
-
- // Version 82 migration added transaction_version column per data type.
- if (version_on_disk == 81) {
- if (MigrateVersion81To82())
- version_on_disk = 82;
- }
-
- // Version 83 migration added transaction_version column per sync entry.
- if (version_on_disk == 82) {
- if (MigrateVersion82To83())
- version_on_disk = 83;
- }
-
- // Version 84 migration added deleted_metas table.
- if (version_on_disk == 83) {
- if (MigrateVersion83To84())
- version_on_disk = 84;
- }
-
- // Version 85 migration removes the initial_sync_ended bits.
- if (version_on_disk == 84) {
- if (MigrateVersion84To85())
- version_on_disk = 85;
- }
-
- // Version 86 migration converts bookmarks to the unique positioning system.
- // It also introduces a new field to store a unique ID for each bookmark.
- if (version_on_disk == 85) {
- if (MigrateVersion85To86())
- version_on_disk = 86;
- }
-
- // If one of the migrations requested it, drop columns that aren't current.
- // It's only safe to do this after migrating all the way to the current
- // version.
- if (version_on_disk == kCurrentDBVersion && needs_column_refresh_) {
- if (!RefreshColumns())
- version_on_disk = 0;
- }
-
- // A final, alternative catch-all migration to simply re-sync everything.
- if (version_on_disk != kCurrentDBVersion) {
- if (version_on_disk > kCurrentDBVersion)
- return false;
-
- // Fallback (re-sync everything) migration path.
- DVLOG(1) << "Old/null sync database, version " << version_on_disk;
- // Delete the existing database (if any), and create a fresh one.
- DropAllTables();
- if (!CreateTables())
- return false;
- }
-
- sql::Statement s(db_->GetUniqueStatement(
- "SELECT db_create_version, db_create_time FROM share_info"));
- if (!s.Step())
- return false;
- string db_create_version = s.ColumnString(0);
- int db_create_time = s.ColumnInt(1);
- DVLOG(1) << "DB created at " << db_create_time << " by version " <<
- db_create_version;
-
- return transaction.Commit();
-}
-
-// This function drops unused columns by creating a new table that contains only
-// the currently used columns then copying all rows from the old tables into
-// this new one. The tables are then rearranged so the new replaces the old.
-bool DirectoryBackingStore::RefreshColumns() {
- DCHECK(needs_column_refresh_);
-
- // Create a new table named temp_metas.
- SafeDropTable("temp_metas");
- if (!CreateMetasTable(true))
- return false;
-
- // Populate temp_metas from metas.
- //
- // At this point, the metas table may contain columns belonging to obsolete
- // schema versions. This statement explicitly lists only the columns that
- // belong to the current schema version, so the obsolete columns will be
- // effectively dropped once we rename temp_metas over top of metas.
- std::string query = "INSERT INTO temp_metas (";
- AppendColumnList(&query);
- query.append(") SELECT ");
- AppendColumnList(&query);
- query.append(" FROM metas");
- if (!db_->Execute(query.c_str()))
- return false;
-
- // Drop metas.
- SafeDropTable("metas");
-
- // Rename temp_metas -> metas.
- if (!db_->Execute("ALTER TABLE temp_metas RENAME TO metas"))
- return false;
-
- // Repeat the process for share_info.
- SafeDropTable("temp_share_info");
- if (!CreateShareInfoTable(true))
- return false;
-
- // TODO(rlarocque, 124140): Remove notification_state.
- if (!db_->Execute(
- "INSERT INTO temp_share_info (id, name, store_birthday, "
- "db_create_version, db_create_time, next_id, cache_guid,"
- "notification_state, bag_of_chips) "
- "SELECT id, name, store_birthday, db_create_version, "
- "db_create_time, next_id, cache_guid, notification_state, "
- "bag_of_chips "
- "FROM share_info"))
- return false;
-
- SafeDropTable("share_info");
- if (!db_->Execute("ALTER TABLE temp_share_info RENAME TO share_info"))
- return false;
-
- needs_column_refresh_ = false;
- return true;
-}
-
-bool DirectoryBackingStore::LoadEntries(
- Directory::MetahandlesMap* handles_map) {
- string select;
- select.reserve(kUpdateStatementBufferSize);
- select.append("SELECT ");
- AppendColumnList(&select);
- select.append(" FROM metas");
-
- sql::Statement s(db_->GetUniqueStatement(select.c_str()));
-
- while (s.Step()) {
- scoped_ptr<EntryKernel> kernel = UnpackEntry(&s);
- // A null kernel is evidence of external data corruption.
- if (!kernel)
- return false;
-
- int64 handle = kernel->ref(META_HANDLE);
- (*handles_map)[handle] = kernel.release();
- }
- return s.Succeeded();
-}
-
-bool DirectoryBackingStore::LoadDeleteJournals(
- JournalIndex* delete_journals) {
- string select;
- select.reserve(kUpdateStatementBufferSize);
- select.append("SELECT ");
- AppendColumnList(&select);
- select.append(" FROM deleted_metas");
-
- sql::Statement s(db_->GetUniqueStatement(select.c_str()));
-
- while (s.Step()) {
- scoped_ptr<EntryKernel> kernel = UnpackEntry(&s);
- // A null kernel is evidence of external data corruption.
- if (!kernel)
- return false;
- delete_journals->insert(kernel.release());
- }
- return s.Succeeded();
-}
-
-bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) {
- {
- sql::Statement s(
- db_->GetUniqueStatement(
- "SELECT store_birthday, next_id, cache_guid, bag_of_chips "
- "FROM share_info"));
- if (!s.Step())
- return false;
-
- info->kernel_info.store_birthday = s.ColumnString(0);
- info->kernel_info.next_id = s.ColumnInt64(1);
- info->cache_guid = s.ColumnString(2);
- s.ColumnBlobAsString(3, &(info->kernel_info.bag_of_chips));
-
- // Verify there was only one row returned.
- DCHECK(!s.Step());
- DCHECK(s.Succeeded());
- }
-
- {
- sql::Statement s(
- db_->GetUniqueStatement(
- "SELECT model_id, progress_marker, "
- "transaction_version FROM models"));
-
- while (s.Step()) {
- ModelType type = ModelIdToModelTypeEnum(s.ColumnBlob(0),
- s.ColumnByteLength(0));
- if (type != UNSPECIFIED && type != TOP_LEVEL_FOLDER) {
- info->kernel_info.download_progress[type].ParseFromArray(
- s.ColumnBlob(1), s.ColumnByteLength(1));
- info->kernel_info.transaction_version[type] = s.ColumnInt64(2);
- }
- }
- if (!s.Succeeded())
- return false;
- }
- {
- sql::Statement s(
- db_->GetUniqueStatement(
- "SELECT MAX(metahandle) FROM metas"));
- if (!s.Step())
- return false;
-
- info->max_metahandle = s.ColumnInt64(0);
-
- // Verify only one row was returned.
- DCHECK(!s.Step());
- DCHECK(s.Succeeded());
- }
- return true;
-}
-
-/* static */
-bool DirectoryBackingStore::SaveEntryToDB(sql::Statement* save_statement,
- const EntryKernel& entry) {
- save_statement->Reset(true);
- BindFields(entry, save_statement);
- return save_statement->Run();
-}
-
-bool DirectoryBackingStore::DropDeletedEntries() {
- if (!db_->Execute("DELETE FROM metas "
- "WHERE is_del > 0 "
- "AND is_unsynced < 1 "
- "AND is_unapplied_update < 1")) {
- return false;
- }
- if (!db_->Execute("DELETE FROM metas "
- "WHERE is_del > 0 "
- "AND id LIKE 'c%'")) {
- return false;
- }
- return true;
-}
-
-bool DirectoryBackingStore::SafeDropTable(const char* table_name) {
- string query = "DROP TABLE IF EXISTS ";
- query.append(table_name);
- return db_->Execute(query.c_str());
-}
-
-void DirectoryBackingStore::DropAllTables() {
- SafeDropTable("metas");
- SafeDropTable("temp_metas");
- SafeDropTable("share_info");
- SafeDropTable("temp_share_info");
- SafeDropTable("share_version");
- SafeDropTable("extended_attributes");
- SafeDropTable("models");
- SafeDropTable("temp_models");
- needs_column_refresh_ = false;
-}
-
-// static
-ModelType DirectoryBackingStore::ModelIdToModelTypeEnum(
- const void* data, int size) {
- sync_pb::EntitySpecifics specifics;
- if (!specifics.ParseFromArray(data, size))
- return UNSPECIFIED;
- return GetModelTypeFromSpecifics(specifics);
-}
-
-// static
-string DirectoryBackingStore::ModelTypeEnumToModelId(ModelType model_type) {
- sync_pb::EntitySpecifics specifics;
- AddDefaultFieldValue(model_type, &specifics);
- return specifics.SerializeAsString();
-}
-
-// static
-std::string DirectoryBackingStore::GenerateCacheGUID() {
- // Generate a GUID with 128 bits of randomness.
- const int kGuidBytes = 128 / 8;
- std::string guid;
- base::Base64Encode(base::RandBytesAsString(kGuidBytes), &guid);
- return guid;
-}
-
-bool DirectoryBackingStore::MigrateToSpecifics(
- const char* old_columns,
- const char* specifics_column,
- void (*handler_function)(sql::Statement* old_value_query,
- int old_value_column,
- sync_pb::EntitySpecifics* mutable_new_value)) {
- std::string query_sql = base::StringPrintf(
- "SELECT metahandle, %s, %s FROM metas", specifics_column, old_columns);
- std::string update_sql = base::StringPrintf(
- "UPDATE metas SET %s = ? WHERE metahandle = ?", specifics_column);
-
- sql::Statement query(db_->GetUniqueStatement(query_sql.c_str()));
- sql::Statement update(db_->GetUniqueStatement(update_sql.c_str()));
-
- while (query.Step()) {
- int64 metahandle = query.ColumnInt64(0);
- std::string new_value_bytes;
- query.ColumnBlobAsString(1, &new_value_bytes);
- sync_pb::EntitySpecifics new_value;
- new_value.ParseFromString(new_value_bytes);
- handler_function(&query, 2, &new_value);
- new_value.SerializeToString(&new_value_bytes);
-
- update.BindBlob(0, new_value_bytes.data(), new_value_bytes.length());
- update.BindInt64(1, metahandle);
- if (!update.Run())
- return false;
- update.Reset(true);
- }
- return query.Succeeded();
-}
-
-bool DirectoryBackingStore::SetVersion(int version) {
- sql::Statement s(db_->GetCachedStatement(
- SQL_FROM_HERE, "UPDATE share_version SET data = ?"));
- s.BindInt(0, version);
-
- return s.Run();
-}
-
-int DirectoryBackingStore::GetVersion() {
- if (!db_->DoesTableExist("share_version"))
- return 0;
-
- sql::Statement statement(db_->GetUniqueStatement(
- "SELECT data FROM share_version"));
- if (statement.Step()) {
- return statement.ColumnInt(0);
- } else {
- return 0;
- }
-}
-
-bool DirectoryBackingStore::MigrateVersion67To68() {
- // This change simply removed three columns:
- // string NAME
- // string UNSANITIZED_NAME
- // string SERVER_NAME
- // No data migration is necessary, but we should do a column refresh.
- SetVersion(68);
- needs_column_refresh_ = true;
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion69To70() {
- // Added "unique_client_tag", renamed "singleton_tag" to unique_server_tag
- SetVersion(70);
- if (!db_->Execute(
- "ALTER TABLE metas ADD COLUMN unique_server_tag varchar"))
- return false;
- if (!db_->Execute(
- "ALTER TABLE metas ADD COLUMN unique_client_tag varchar"))
- return false;
- needs_column_refresh_ = true;
-
- if (!db_->Execute(
- "UPDATE metas SET unique_server_tag = singleton_tag"))
- return false;
-
- return true;
-}
-
-namespace {
-
-// Callback passed to MigrateToSpecifics for the v68->v69 migration. See
-// MigrateVersion68To69().
-void EncodeBookmarkURLAndFavicon(sql::Statement* old_value_query,
- int old_value_column,
- sync_pb::EntitySpecifics* mutable_new_value) {
- // Extract data from the column trio we expect.
- bool old_is_bookmark_object = old_value_query->ColumnBool(old_value_column);
- std::string old_url = old_value_query->ColumnString(old_value_column + 1);
- std::string old_favicon;
- old_value_query->ColumnBlobAsString(old_value_column + 2, &old_favicon);
- bool old_is_dir = old_value_query->ColumnBool(old_value_column + 3);
-
- if (old_is_bookmark_object) {
- sync_pb::BookmarkSpecifics* bookmark_data =
- mutable_new_value->mutable_bookmark();
- if (!old_is_dir) {
- bookmark_data->set_url(old_url);
- bookmark_data->set_favicon(old_favicon);
- }
- }
-}
-
-} // namespace
-
-bool DirectoryBackingStore::MigrateVersion68To69() {
- // In Version 68, there were columns on table 'metas':
- // string BOOKMARK_URL
- // string SERVER_BOOKMARK_URL
- // blob BOOKMARK_FAVICON
- // blob SERVER_BOOKMARK_FAVICON
- // In version 69, these columns went away in favor of storing
- // a serialized EntrySpecifics protobuf in the columns:
- // protobuf blob SPECIFICS
- // protobuf blob SERVER_SPECIFICS
- // For bookmarks, EntrySpecifics is extended as per
- // bookmark_specifics.proto. This migration converts bookmarks from the
- // former scheme to the latter scheme.
-
- // First, add the two new columns to the schema.
- if (!db_->Execute(
- "ALTER TABLE metas ADD COLUMN specifics blob"))
- return false;
- if (!db_->Execute(
- "ALTER TABLE metas ADD COLUMN server_specifics blob"))
- return false;
-
- // Next, fold data from the old columns into the new protobuf columns.
- if (!MigrateToSpecifics(("is_bookmark_object, bookmark_url, "
- "bookmark_favicon, is_dir"),
- "specifics",
- &EncodeBookmarkURLAndFavicon)) {
- return false;
- }
- if (!MigrateToSpecifics(("server_is_bookmark_object, "
- "server_bookmark_url, "
- "server_bookmark_favicon, "
- "server_is_dir"),
- "server_specifics",
- &EncodeBookmarkURLAndFavicon)) {
- return false;
- }
-
- // Lastly, fix up the "Google Chrome" folder, which is of the TOP_LEVEL_FOLDER
- // ModelType: it shouldn't have BookmarkSpecifics.
- if (!db_->Execute(
- "UPDATE metas SET specifics = NULL, server_specifics = NULL WHERE "
- "singleton_tag IN ('google_chrome')"))
- return false;
-
- SetVersion(69);
- needs_column_refresh_ = true; // Trigger deletion of old columns.
- return true;
-}
-
-// Version 71, the columns 'initial_sync_ended' and 'last_sync_timestamp'
-// were removed from the share_info table. They were replaced by
-// the 'models' table, which has these values on a per-datatype basis.
-bool DirectoryBackingStore::MigrateVersion70To71() {
- if (!CreateV71ModelsTable())
- return false;
-
- // Move data from the old share_info columns to the new models table.
- {
- sql::Statement fetch(db_->GetUniqueStatement(
- "SELECT last_sync_timestamp, initial_sync_ended FROM share_info"));
- if (!fetch.Step())
- return false;
-
- int64 last_sync_timestamp = fetch.ColumnInt64(0);
- bool initial_sync_ended = fetch.ColumnBool(1);
-
- // Verify there were no additional rows returned.
- DCHECK(!fetch.Step());
- DCHECK(fetch.Succeeded());
-
- sql::Statement update(db_->GetUniqueStatement(
- "INSERT INTO models (model_id, "
- "last_download_timestamp, initial_sync_ended) VALUES (?, ?, ?)"));
- string bookmark_model_id = ModelTypeEnumToModelId(BOOKMARKS);
- update.BindBlob(0, bookmark_model_id.data(), bookmark_model_id.size());
- update.BindInt64(1, last_sync_timestamp);
- update.BindBool(2, initial_sync_ended);
-
- if (!update.Run())
- return false;
- }
-
- // Drop the columns from the old share_info table via a temp table.
- const bool kCreateAsTempShareInfo = true;
-
- if (!CreateShareInfoTableVersion71(kCreateAsTempShareInfo))
- return false;
- if (!db_->Execute(
- "INSERT INTO temp_share_info (id, name, store_birthday, "
- "db_create_version, db_create_time, next_id, cache_guid) "
- "SELECT id, name, store_birthday, db_create_version, "
- "db_create_time, next_id, cache_guid FROM share_info"))
- return false;
- SafeDropTable("share_info");
- if (!db_->Execute(
- "ALTER TABLE temp_share_info RENAME TO share_info"))
- return false;
- SetVersion(71);
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion71To72() {
- // Version 72 removed a table 'extended_attributes', whose
- // contents didn't matter.
- SafeDropTable("extended_attributes");
- SetVersion(72);
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion72To73() {
- // Version 73 added one column to the table 'share_info': notification_state
- if (!db_->Execute(
- "ALTER TABLE share_info ADD COLUMN notification_state BLOB"))
- return false;
- SetVersion(73);
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion73To74() {
- // Version 74 added the following columns to the table 'share_info':
- // autofill_migration_state
- // bookmarks_added_during_autofill_migration
- // autofill_migration_time
- // autofill_entries_added_during_migration
- // autofill_profiles_added_during_migration
-
- if (!db_->Execute(
- "ALTER TABLE share_info ADD COLUMN "
- "autofill_migration_state INT default 0"))
- return false;
-
- if (!db_->Execute(
- "ALTER TABLE share_info ADD COLUMN "
- "bookmarks_added_during_autofill_migration "
- "INT default 0"))
- return false;
-
- if (!db_->Execute(
- "ALTER TABLE share_info ADD COLUMN autofill_migration_time "
- "INT default 0"))
- return false;
-
- if (!db_->Execute(
- "ALTER TABLE share_info ADD COLUMN "
- "autofill_entries_added_during_migration "
- "INT default 0"))
- return false;
-
- if (!db_->Execute(
- "ALTER TABLE share_info ADD COLUMN "
- "autofill_profiles_added_during_migration "
- "INT default 0"))
- return false;
-
- SetVersion(74);
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion74To75() {
- // In version 74, there was a table 'models':
- // blob model_id (entity specifics, primary key)
- // int last_download_timestamp
- // boolean initial_sync_ended
- // In version 75, we deprecated the integer-valued last_download_timestamp,
- // using insted a protobuf-valued progress_marker field:
- // blob progress_marker
- // The progress_marker values are initialized from the value of
- // last_download_timestamp, thereby preserving the download state.
-
- // Move aside the old table and create a new empty one at the current schema.
- if (!db_->Execute("ALTER TABLE models RENAME TO temp_models"))
- return false;
- if (!CreateV75ModelsTable())
- return false;
-
- sql::Statement query(db_->GetUniqueStatement(
- "SELECT model_id, last_download_timestamp, initial_sync_ended "
- "FROM temp_models"));
-
- sql::Statement update(db_->GetUniqueStatement(
- "INSERT INTO models (model_id, "
- "progress_marker, initial_sync_ended) VALUES (?, ?, ?)"));
-
- while (query.Step()) {
- ModelType type = ModelIdToModelTypeEnum(query.ColumnBlob(0),
- query.ColumnByteLength(0));
- if (type != UNSPECIFIED) {
- // Set the |timestamp_token_for_migration| on a new
- // DataTypeProgressMarker, using the old value of last_download_timestamp.
- // The server will turn this into a real token on our behalf the next
- // time we check for updates.
- sync_pb::DataTypeProgressMarker progress_marker;
- progress_marker.set_data_type_id(
- GetSpecificsFieldNumberFromModelType(type));
- progress_marker.set_timestamp_token_for_migration(query.ColumnInt64(1));
- std::string progress_blob;
- progress_marker.SerializeToString(&progress_blob);
-
- update.BindBlob(0, query.ColumnBlob(0), query.ColumnByteLength(0));
- update.BindBlob(1, progress_blob.data(), progress_blob.length());
- update.BindBool(2, query.ColumnBool(2));
- if (!update.Run())
- return false;
- update.Reset(true);
- }
- }
- if (!query.Succeeded())
- return false;
-
- // Drop the old table.
- SafeDropTable("temp_models");
-
- SetVersion(75);
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion75To76() {
- // This change removed five columns:
- // autofill_migration_state
- // bookmarks_added_during_autofill_migration
- // autofill_migration_time
- // autofill_entries_added_during_migration
- // autofill_profiles_added_during_migration
- // No data migration is necessary, but we should do a column refresh.
- SetVersion(76);
- needs_column_refresh_ = true;
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion76To77() {
- // This change changes the format of stored timestamps to ms since
- // the Unix epoch.
-#if defined(OS_WIN)
-// On Windows, we used to store timestamps in FILETIME format (100s of
-// ns since Jan 1, 1601). Magic numbers taken from
-// http://stackoverflow.com/questions/5398557/
-// java-library-for-dealing-with-win32-filetime
-// .
-#define TO_UNIX_TIME_MS(x) #x " = " #x " / 10000 - 11644473600000"
-#else
-// On other platforms, we used to store timestamps in time_t format (s
-// since the Unix epoch).
-#define TO_UNIX_TIME_MS(x) #x " = " #x " * 1000"
-#endif
- sql::Statement update_timestamps(db_->GetUniqueStatement(
- "UPDATE metas SET "
- TO_UNIX_TIME_MS(mtime) ", "
- TO_UNIX_TIME_MS(server_mtime) ", "
- TO_UNIX_TIME_MS(ctime) ", "
- TO_UNIX_TIME_MS(server_ctime)));
-#undef TO_UNIX_TIME_MS
- if (!update_timestamps.Run())
- return false;
- SetVersion(77);
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion77To78() {
- // Version 78 added one column to table 'metas': base_server_specifics.
- if (!db_->Execute(
- "ALTER TABLE metas ADD COLUMN base_server_specifics BLOB")) {
- return false;
- }
- SetVersion(78);
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion78To79() {
- // Some users are stuck with a DB that causes them to reuse existing IDs. We
- // perform this one-time fixup on all users to help the few that are stuck.
- // See crbug.com/142987 for details.
- if (!db_->Execute(
- "UPDATE share_info SET next_id = next_id - 65536")) {
- return false;
- }
- SetVersion(79);
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion79To80() {
- if (!db_->Execute(
- "ALTER TABLE share_info ADD COLUMN bag_of_chips BLOB"))
- return false;
- sql::Statement update(db_->GetUniqueStatement(
- "UPDATE share_info SET bag_of_chips = ?"));
- // An empty message is serialized to an empty string.
- update.BindBlob(0, NULL, 0);
- if (!update.Run())
- return false;
- SetVersion(80);
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion80To81() {
- if(!db_->Execute(
- "ALTER TABLE metas ADD COLUMN server_ordinal_in_parent BLOB"))
- return false;
-
- sql::Statement get_positions(db_->GetUniqueStatement(
- "SELECT metahandle, server_position_in_parent FROM metas"));
-
- sql::Statement put_ordinals(db_->GetUniqueStatement(
- "UPDATE metas SET server_ordinal_in_parent = ?"
- "WHERE metahandle = ?"));
-
- while(get_positions.Step()) {
- int64 metahandle = get_positions.ColumnInt64(0);
- int64 position = get_positions.ColumnInt64(1);
-
- const std::string& ordinal = Int64ToNodeOrdinal(position).ToInternalValue();
- put_ordinals.BindBlob(0, ordinal.data(), ordinal.length());
- put_ordinals.BindInt64(1, metahandle);
-
- if(!put_ordinals.Run())
- return false;
- put_ordinals.Reset(true);
- }
-
- SetVersion(81);
- needs_column_refresh_ = true;
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion81To82() {
- if (!db_->Execute(
- "ALTER TABLE models ADD COLUMN transaction_version BIGINT default 0"))
- return false;
- sql::Statement update(db_->GetUniqueStatement(
- "UPDATE models SET transaction_version = 0"));
- if (!update.Run())
- return false;
- SetVersion(82);
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion82To83() {
- // Version 83 added transaction_version on sync node.
- if (!db_->Execute(
- "ALTER TABLE metas ADD COLUMN transaction_version BIGINT default 0"))
- return false;
- sql::Statement update(db_->GetUniqueStatement(
- "UPDATE metas SET transaction_version = 0"));
- if (!update.Run())
- return false;
- SetVersion(83);
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion83To84() {
- // Version 84 added deleted_metas table to store deleted metas until we know
- // for sure that the deletions are persisted in native models.
- string query = "CREATE TABLE deleted_metas ";
- query.append(ComposeCreateTableColumnSpecs());
- if (!db_->Execute(query.c_str()))
- return false;
- SetVersion(84);
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion84To85() {
- // Version 85 removes the initial_sync_ended flag.
- if (!db_->Execute("ALTER TABLE models RENAME TO temp_models"))
- return false;
- if (!CreateModelsTable())
- return false;
- if (!db_->Execute("INSERT INTO models SELECT "
- "model_id, progress_marker, transaction_version "
- "FROM temp_models")) {
- return false;
- }
- SafeDropTable("temp_models");
-
- SetVersion(85);
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion85To86() {
- // Version 86 removes both server ordinals and local NEXT_ID, PREV_ID and
- // SERVER_{POSITION,ORDINAL}_IN_PARENT and replaces them with UNIQUE_POSITION
- // and SERVER_UNIQUE_POSITION.
- if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
- "server_unique_position BLOB")) {
- return false;
- }
- if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
- "unique_position BLOB")) {
- return false;
- }
- if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
- "unique_bookmark_tag VARCHAR")) {
- return false;
- }
-
- // Fetch the cache_guid from the DB, because we don't otherwise have access to
- // it from here.
- sql::Statement get_cache_guid(db_->GetUniqueStatement(
- "SELECT cache_guid FROM share_info"));
- if (!get_cache_guid.Step()) {
- return false;
- }
- std::string cache_guid = get_cache_guid.ColumnString(0);
- DCHECK(!get_cache_guid.Step());
- DCHECK(get_cache_guid.Succeeded());
-
- sql::Statement get(db_->GetUniqueStatement(
- "SELECT "
- " metahandle, "
- " id, "
- " specifics, "
- " is_dir, "
- " unique_server_tag, "
- " server_ordinal_in_parent "
- "FROM metas"));
-
- // Note that we set both the local and server position based on the server
- // position. We wll lose any unsynced local position changes. Unfortunately,
- // there's nothing we can do to avoid that. The NEXT_ID / PREV_ID values
- // can't be translated into a UNIQUE_POSTION in a reliable way.
- sql::Statement put(db_->GetCachedStatement(
- SQL_FROM_HERE,
- "UPDATE metas SET"
- " server_unique_position = ?,"
- " unique_position = ?,"
- " unique_bookmark_tag = ?"
- "WHERE metahandle = ?"));
-
- while (get.Step()) {
- int64 metahandle = get.ColumnInt64(0);
-
- std::string id_string;
- get.ColumnBlobAsString(1, &id_string);
-
- sync_pb::EntitySpecifics specifics;
- specifics.ParseFromArray(
- get.ColumnBlob(2), get.ColumnByteLength(2));
-
- bool is_dir = get.ColumnBool(3);
-
- std::string server_unique_tag = get.ColumnString(4);
-
- std::string ordinal_string;
- get.ColumnBlobAsString(5, &ordinal_string);
- NodeOrdinal ordinal(ordinal_string);
-
-
- std::string unique_bookmark_tag;
-
- // We only maintain positions for bookmarks that are not server-defined
- // top-level folders.
- UniquePosition position;
- if (GetModelTypeFromSpecifics(specifics) == BOOKMARKS
- && !(is_dir && !server_unique_tag.empty())) {
- if (id_string.at(0) == 'c') {
- // We found an uncommitted item. This is rare, but fortunate. This
- // means we can set the bookmark tag according to the originator client
- // item ID and originator cache guid, because (unlike the other case) we
- // know that this client is the originator.
- unique_bookmark_tag = syncable::GenerateSyncableBookmarkHash(
- cache_guid,
- id_string.substr(1));
- } else {
- // If we've already committed the item, then we don't know who the
- // originator was. We do not have access to the originator client item
- // ID and originator cache guid at this point.
- //
- // We will base our hash entirely on the server ID instead. This is
- // incorrect, but at least all clients that undergo this migration step
- // will be incorrect in the same way.
- //
- // To get everyone back into a synced state, we will update the bookmark
- // tag according to the originator_cache_guid and originator_item_id
- // when we see updates for this item. That should ensure that commonly
- // modified items will end up with the proper tag values eventually.
- unique_bookmark_tag = syncable::GenerateSyncableBookmarkHash(
- std::string(), // cache_guid left intentionally blank.
- id_string.substr(1));
- }
-
- int64 int_position = NodeOrdinalToInt64(ordinal);
- position = UniquePosition::FromInt64(int_position, unique_bookmark_tag);
- } else {
- // Leave bookmark_tag and position at their default (invalid) values.
- }
-
- std::string position_blob;
- position.SerializeToString(&position_blob);
- put.BindBlob(0, position_blob.data(), position_blob.length());
- put.BindBlob(1, position_blob.data(), position_blob.length());
- put.BindBlob(2, unique_bookmark_tag.data(), unique_bookmark_tag.length());
- put.BindInt64(3, metahandle);
-
- if (!put.Run())
- return false;
- put.Reset(true);
- }
-
- SetVersion(86);
- needs_column_refresh_ = true;
- return true;
-}
-
-bool DirectoryBackingStore::CreateTables() {
- DVLOG(1) << "First run, creating tables";
- // Create two little tables share_version and share_info
- if (!db_->Execute(
- "CREATE TABLE share_version ("
- "id VARCHAR(128) primary key, data INT)")) {
- return false;
- }
-
- {
- sql::Statement s(db_->GetUniqueStatement(
- "INSERT INTO share_version VALUES(?, ?)"));
- s.BindString(0, dir_name_);
- s.BindInt(1, kCurrentDBVersion);
-
- if (!s.Run())
- return false;
- }
-
- const bool kCreateAsTempShareInfo = false;
- if (!CreateShareInfoTable(kCreateAsTempShareInfo)) {
- return false;
- }
-
- {
- sql::Statement s(db_->GetUniqueStatement(
- "INSERT INTO share_info VALUES"
- "(?, " // id
- "?, " // name
- "?, " // store_birthday
- "?, " // db_create_version
- "?, " // db_create_time
- "-2, " // next_id
- "?, " // cache_guid
- // TODO(rlarocque, 124140): Remove notification_state field.
- "?, " // notification_state
- "?);")); // bag_of_chips
- s.BindString(0, dir_name_); // id
- s.BindString(1, dir_name_); // name
- s.BindString(2, std::string()); // store_birthday
- // TODO(akalin): Remove this unused db_create_version field. (Or
- // actually use it for something.) http://crbug.com/118356
- s.BindString(3, "Unknown"); // db_create_version
- s.BindInt(4, static_cast<int32>(time(0))); // db_create_time
- s.BindString(5, GenerateCacheGUID()); // cache_guid
- // TODO(rlarocque, 124140): Remove this unused notification-state field.
- s.BindBlob(6, NULL, 0); // notification_state
- s.BindBlob(7, NULL, 0); // bag_of_chips
- if (!s.Run())
- return false;
- }
-
- if (!CreateModelsTable())
- return false;
-
- // Create the big metas table.
- if (!CreateMetasTable(false))
- return false;
-
- {
- // Insert the entry for the root into the metas table.
- const int64 now = TimeToProtoTime(base::Time::Now());
- sql::Statement s(db_->GetUniqueStatement(
- "INSERT INTO metas "
- "( id, metahandle, is_dir, ctime, mtime ) "
- "VALUES ( \"r\", 1, 1, ?, ? )"));
- s.BindInt64(0, now);
- s.BindInt64(1, now);
-
- if (!s.Run())
- return false;
- }
-
- return true;
-}
-
-bool DirectoryBackingStore::CreateMetasTable(bool is_temporary) {
- string query = "CREATE TABLE ";
- query.append(is_temporary ? "temp_metas" : "metas");
- query.append(ComposeCreateTableColumnSpecs());
- if (!db_->Execute(query.c_str()))
- return false;
-
- // Create a deleted_metas table to save copies of deleted metas until the
- // deletions are persisted. For simplicity, don't try to migrate existing
- // data because it's rarely used.
- SafeDropTable("deleted_metas");
- query = "CREATE TABLE deleted_metas ";
- query.append(ComposeCreateTableColumnSpecs());
- return db_->Execute(query.c_str());
-}
-
-bool DirectoryBackingStore::CreateV71ModelsTable() {
- // This is an old schema for the Models table, used from versions 71 to 74.
- return db_->Execute(
- "CREATE TABLE models ("
- "model_id BLOB primary key, "
- "last_download_timestamp INT, "
- // Gets set if the syncer ever gets updates from the
- // server and the server returns 0. Lets us detect the
- // end of the initial sync.
- "initial_sync_ended BOOLEAN default 0)");
-}
-
-bool DirectoryBackingStore::CreateV75ModelsTable() {
- // This is an old schema for the Models table, used from versions 75 to 80.
- return db_->Execute(
- "CREATE TABLE models ("
- "model_id BLOB primary key, "
- "progress_marker BLOB, "
- // Gets set if the syncer ever gets updates from the
- // server and the server returns 0. Lets us detect the
- // end of the initial sync.
- "initial_sync_ended BOOLEAN default 0)");
-}
-
-bool DirectoryBackingStore::CreateModelsTable() {
- // This is the current schema for the Models table, from version 81
- // onward. If you change the schema, you'll probably want to double-check
- // the use of this function in the v84-v85 migration.
- return db_->Execute(
- "CREATE TABLE models ("
- "model_id BLOB primary key, "
- "progress_marker BLOB, "
- // Gets set if the syncer ever gets updates from the
- // server and the server returns 0. Lets us detect the
- // end of the initial sync.
- "transaction_version BIGINT default 0)");
-}
-
-bool DirectoryBackingStore::CreateShareInfoTable(bool is_temporary) {
- const char* name = is_temporary ? "temp_share_info" : "share_info";
- string query = "CREATE TABLE ";
- query.append(name);
- // This is the current schema for the ShareInfo table, from version 76
- // onward.
- query.append(" ("
- "id TEXT primary key, "
- "name TEXT, "
- "store_birthday TEXT, "
- "db_create_version TEXT, "
- "db_create_time INT, "
- "next_id INT default -2, "
- "cache_guid TEXT, "
- // TODO(rlarocque, 124140): Remove notification_state field.
- "notification_state BLOB, "
- "bag_of_chips BLOB"
- ")");
- return db_->Execute(query.c_str());
-}
-
-bool DirectoryBackingStore::CreateShareInfoTableVersion71(
- bool is_temporary) {
- const char* name = is_temporary ? "temp_share_info" : "share_info";
- string query = "CREATE TABLE ";
- query.append(name);
- // This is the schema for the ShareInfo table used from versions 71 to 72.
- query.append(" ("
- "id TEXT primary key, "
- "name TEXT, "
- "store_birthday TEXT, "
- "db_create_version TEXT, "
- "db_create_time INT, "
- "next_id INT default -2, "
- "cache_guid TEXT )");
- return db_->Execute(query.c_str());
-}
-
-// This function checks to see if the given list of Metahandles has any nodes
-// whose PARENT_ID values refer to ID values that do not actually exist.
-// Returns true on success.
-bool DirectoryBackingStore::VerifyReferenceIntegrity(
- const Directory::MetahandlesMap* handles_map) {
- TRACE_EVENT0("sync", "SyncDatabaseIntegrityCheck");
- using namespace syncable;
- typedef base::hash_set<std::string> IdsSet;
-
- IdsSet ids_set;
- bool is_ok = true;
-
- for (Directory::MetahandlesMap::const_iterator it = handles_map->begin();
- it != handles_map->end(); ++it) {
- EntryKernel* entry = it->second;
- bool is_duplicate_id = !(ids_set.insert(entry->ref(ID).value()).second);
- is_ok = is_ok && !is_duplicate_id;
- }
-
- IdsSet::iterator end = ids_set.end();
- for (Directory::MetahandlesMap::const_iterator it = handles_map->begin();
- it != handles_map->end(); ++it) {
- EntryKernel* entry = it->second;
- bool parent_exists = (ids_set.find(entry->ref(PARENT_ID).value()) != end);
- if (!parent_exists) {
- return false;
- }
- }
- return is_ok;
-}
-
-void DirectoryBackingStore::PrepareSaveEntryStatement(
- EntryTable table, sql::Statement* save_statement) {
- if (save_statement->is_valid())
- return;
-
- string query;
- query.reserve(kUpdateStatementBufferSize);
- switch (table) {
- case METAS_TABLE:
- query.append("INSERT OR REPLACE INTO metas ");
- break;
- case DELETE_JOURNAL_TABLE:
- query.append("INSERT OR REPLACE INTO deleted_metas ");
- break;
- }
-
- string values;
- values.reserve(kUpdateStatementBufferSize);
- values.append(" VALUES ");
- const char* separator = "( ";
- int i = 0;
- for (i = BEGIN_FIELDS; i < FIELD_COUNT; ++i) {
- query.append(separator);
- values.append(separator);
- separator = ", ";
- query.append(ColumnName(i));
- values.append("?");
- }
- query.append(" ) ");
- values.append(" )");
- query.append(values);
- save_statement->Assign(db_->GetUniqueStatement(
- base::StringPrintf(query.c_str(), "metas").c_str()));
-}
-
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/directory_backing_store.h b/chromium/sync/syncable/directory_backing_store.h
deleted file mode 100644
index 995a014a588..00000000000
--- a/chromium/sync/syncable/directory_backing_store.h
+++ /dev/null
@@ -1,194 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_DIRECTORY_BACKING_STORE_H_
-#define SYNC_SYNCABLE_DIRECTORY_BACKING_STORE_H_
-
-#include <string>
-
-#include "base/memory/scoped_ptr.h"
-#include "base/threading/non_thread_safe.h"
-#include "sql/connection.h"
-#include "sql/statement.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/syncable/dir_open_result.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/metahandle_set.h"
-
-namespace sync_pb {
-class EntitySpecifics;
-}
-
-namespace syncer {
-namespace syncable {
-
-SYNC_EXPORT_PRIVATE extern const int32 kCurrentDBVersion;
-
-struct ColumnSpec;
-
-// Interface that provides persistence for a syncable::Directory object. You can
-// load all the persisted data to prime a syncable::Directory on startup by
-// invoking Load. The only other thing you (or more correctly, a Directory) can
-// do here is save any changes that have occurred since calling Load, which can
-// be done periodically as often as desired.
-//
-// The DirectoryBackingStore will own an sqlite lock on its database for most of
-// its lifetime. You must not have two DirectoryBackingStore objects accessing
-// the database simultaneously. Because the lock exists at the database level,
-// not even two separate browser instances would be able to acquire it
-// simultaneously.
-//
-// This class is abstract so that we can extend it in interesting ways for use
-// in tests. The concrete class used in non-test scenarios is
-// OnDiskDirectoryBackingStore.
-class SYNC_EXPORT_PRIVATE DirectoryBackingStore : public base::NonThreadSafe {
- public:
- explicit DirectoryBackingStore(const std::string& dir_name);
- virtual ~DirectoryBackingStore();
-
- // Loads and drops all currently persisted meta entries into |handles_map|
- // and loads appropriate persisted kernel info into |info_bucket|.
- //
- // This function can perform some cleanup tasks behind the scenes. It will
- // clean up unused entries from the database and migrate to the latest
- // database version. The caller can safely ignore these details.
- //
- // NOTE: On success (return value of OPENED), the buckets are populated with
- // newly allocated items, meaning ownership is bestowed upon the caller.
- virtual DirOpenResult Load(Directory::MetahandlesMap* handles_map,
- JournalIndex* delete_journals,
- Directory::KernelLoadInfo* kernel_load_info) = 0;
-
- // Updates the on-disk store with the input |snapshot| as a database
- // transaction. Does NOT open any syncable transactions as this would cause
- // opening transactions elsewhere to block on synchronous I/O.
- // DO NOT CALL THIS FROM MORE THAN ONE THREAD EVER. Also, whichever thread
- // calls SaveChanges *must* be the thread that owns/destroys |this|.
- virtual bool SaveChanges(const Directory::SaveChangesSnapshot& snapshot);
-
- protected:
- // For test classes.
- DirectoryBackingStore(const std::string& dir_name,
- sql::Connection* connection);
-
- // General Directory initialization and load helpers.
- bool InitializeTables();
- bool CreateTables();
-
- // Create 'share_info' or 'temp_share_info' depending on value of
- // is_temporary. Returns an sqlite
- bool CreateShareInfoTable(bool is_temporary);
-
- bool CreateShareInfoTableVersion71(bool is_temporary);
- // Create 'metas' or 'temp_metas' depending on value of is_temporary. Also
- // create a 'deleted_metas' table using same schema.
- bool CreateMetasTable(bool is_temporary);
- bool CreateModelsTable();
- bool CreateV71ModelsTable();
- bool CreateV75ModelsTable();
-
- // We don't need to load any synced and applied deleted entries, we can
- // in fact just purge them forever on startup.
- bool DropDeletedEntries();
- // Drops a table if it exists, harmless if the table did not already exist.
- bool SafeDropTable(const char* table_name);
-
- // Load helpers for entries and attributes.
- bool LoadEntries(Directory::MetahandlesMap* handles_map);
- bool LoadDeleteJournals(JournalIndex* delete_journals);
- bool LoadInfo(Directory::KernelLoadInfo* info);
-
- // Save/update helpers for entries. Return false if sqlite commit fails.
- static bool SaveEntryToDB(sql::Statement* save_statement,
- const EntryKernel& entry);
- bool SaveNewEntryToDB(const EntryKernel& entry);
- bool UpdateEntryToDB(const EntryKernel& entry);
-
- // Close save_dbhandle_. Broken out for testing.
- void EndSave();
-
- enum EntryTable {
- METAS_TABLE,
- DELETE_JOURNAL_TABLE,
- };
- // Removes each entry whose metahandle is in |handles| from the table
- // specified by |from| table. Does synchronous I/O. Returns false on error.
- bool DeleteEntries(EntryTable from, const MetahandleSet& handles);
-
- // Drop all tables in preparation for reinitialization.
- void DropAllTables();
-
- // Serialization helpers for ModelType. These convert between
- // the ModelType enum and the values we persist in the database to identify
- // a model. We persist a default instance of the specifics protobuf as the
- // ID, rather than the enum value.
- static ModelType ModelIdToModelTypeEnum(const void* data, int length);
- static std::string ModelTypeEnumToModelId(ModelType model_type);
-
- static std::string GenerateCacheGUID();
-
- // Runs an integrity check on the current database. If the
- // integrity check fails, false is returned and error is populated
- // with an error message.
- bool CheckIntegrity(sqlite3* handle, std::string* error) const;
-
- // Checks that the references between sync nodes is consistent.
- static bool VerifyReferenceIntegrity(
- const Directory::MetahandlesMap* handles_map);
-
- // Migration utilities.
- bool RefreshColumns();
- bool SetVersion(int version);
- int GetVersion();
-
- bool MigrateToSpecifics(const char* old_columns,
- const char* specifics_column,
- void(*handler_function) (
- sql::Statement* old_value_query,
- int old_value_column,
- sync_pb::EntitySpecifics* mutable_new_value));
-
- // Individual version migrations.
- bool MigrateVersion67To68();
- bool MigrateVersion68To69();
- bool MigrateVersion69To70();
- bool MigrateVersion70To71();
- bool MigrateVersion71To72();
- bool MigrateVersion72To73();
- bool MigrateVersion73To74();
- bool MigrateVersion74To75();
- bool MigrateVersion75To76();
- bool MigrateVersion76To77();
- bool MigrateVersion77To78();
- bool MigrateVersion78To79();
- bool MigrateVersion79To80();
- bool MigrateVersion80To81();
- bool MigrateVersion81To82();
- bool MigrateVersion82To83();
- bool MigrateVersion83To84();
- bool MigrateVersion84To85();
- bool MigrateVersion85To86();
-
- scoped_ptr<sql::Connection> db_;
- sql::Statement save_meta_statment_;
- sql::Statement save_delete_journal_statment_;
- std::string dir_name_;
-
- // Set to true if migration left some old columns around that need to be
- // discarded.
- bool needs_column_refresh_;
-
- private:
- // Prepares |save_statement| for saving entries in |table|.
- void PrepareSaveEntryStatement(EntryTable table,
- sql::Statement* save_statement);
-
- DISALLOW_COPY_AND_ASSIGN(DirectoryBackingStore);
-};
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_DIRECTORY_BACKING_STORE_H_
diff --git a/chromium/sync/syncable/directory_backing_store_unittest.cc b/chromium/sync/syncable/directory_backing_store_unittest.cc
deleted file mode 100644
index c40d564bdd3..00000000000
--- a/chromium/sync/syncable/directory_backing_store_unittest.cc
+++ /dev/null
@@ -1,3502 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-#include <string>
-
-#include "base/file_util.h"
-#include "base/files/file_path.h"
-#include "base/files/scoped_temp_dir.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/stl_util.h"
-#include "base/strings/string_number_conversions.h"
-#include "sql/connection.h"
-#include "sql/statement.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/node_ordinal.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/syncable/directory_backing_store.h"
-#include "sync/syncable/on_disk_directory_backing_store.h"
-#include "sync/syncable/syncable-inl.h"
-#include "sync/test/test_directory_backing_store.h"
-#include "sync/util/time.h"
-#include "testing/gtest/include/gtest/gtest-param-test.h"
-
-namespace syncer {
-namespace syncable {
-
-SYNC_EXPORT_PRIVATE extern const int32 kCurrentDBVersion;
-
-class MigrationTest : public testing::TestWithParam<int> {
- public:
- virtual void SetUp() {
- ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
- }
-
- protected:
- std::string GetUsername() {
- return "nick@chromium.org";
- }
-
- base::FilePath GetDatabasePath() {
- return temp_dir_.path().Append(Directory::kSyncDatabaseFilename);
- }
-
- static bool LoadAndIgnoreReturnedData(DirectoryBackingStore *dbs) {
- Directory::MetahandlesMap tmp_handles_map;
- JournalIndex delete_journals;
- STLValueDeleter<Directory::MetahandlesMap> deleter(&tmp_handles_map);
- Directory::KernelLoadInfo kernel_load_info;
- return dbs->Load(&tmp_handles_map, &delete_journals, &kernel_load_info) ==
- OPENED;
- }
-
- void SetUpVersion67Database(sql::Connection* connection);
- void SetUpVersion68Database(sql::Connection* connection);
- void SetUpVersion69Database(sql::Connection* connection);
- void SetUpVersion70Database(sql::Connection* connection);
- void SetUpVersion71Database(sql::Connection* connection);
- void SetUpVersion72Database(sql::Connection* connection);
- void SetUpVersion73Database(sql::Connection* connection);
- void SetUpVersion74Database(sql::Connection* connection);
- void SetUpVersion75Database(sql::Connection* connection);
- void SetUpVersion76Database(sql::Connection* connection);
- void SetUpVersion77Database(sql::Connection* connection);
- void SetUpVersion78Database(sql::Connection* connection);
- void SetUpVersion79Database(sql::Connection* connection);
- void SetUpVersion80Database(sql::Connection* connection);
- void SetUpVersion81Database(sql::Connection* connection);
- void SetUpVersion82Database(sql::Connection* connection);
- void SetUpVersion83Database(sql::Connection* connection);
- void SetUpVersion84Database(sql::Connection* connection);
- void SetUpVersion85Database(sql::Connection* connection);
- void SetUpVersion86Database(sql::Connection* connection);
-
- void SetUpCurrentDatabaseAndCheckVersion(sql::Connection* connection) {
- SetUpVersion86Database(connection); // Prepopulates data.
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), connection));
- ASSERT_EQ(kCurrentDBVersion, dbs->GetVersion());
-
- ASSERT_TRUE(LoadAndIgnoreReturnedData(dbs.get()));
- ASSERT_FALSE(dbs->needs_column_refresh_);
- }
-
- private:
- base::ScopedTempDir temp_dir_;
-};
-
-class DirectoryBackingStoreTest : public MigrationTest {};
-
-#if defined(OS_WIN)
-
-// On Windows, we used to store timestamps in FILETIME format.
-#define LEGACY_META_PROTO_TIMES_1 129079956640320000LL
-#define LEGACY_META_PROTO_TIMES_2 128976886618480000LL
-#define LEGACY_META_PROTO_TIMES_4 129002163642690000LL
-#define LEGACY_META_PROTO_TIMES_5 129001555500000000LL
-#define LEGACY_META_PROTO_TIMES_6 129053976170000000LL
-#define LEGACY_META_PROTO_TIMES_7 128976864758480000LL
-#define LEGACY_META_PROTO_TIMES_8 128976864758480000LL
-#define LEGACY_META_PROTO_TIMES_9 128976864758480000LL
-#define LEGACY_META_PROTO_TIMES_10 128976864758480000LL
-#define LEGACY_META_PROTO_TIMES_11 129079956948440000LL
-#define LEGACY_META_PROTO_TIMES_12 129079957513650000LL
-#define LEGACY_META_PROTO_TIMES_13 129079957985300000LL
-#define LEGACY_META_PROTO_TIMES_14 129079958383000000LL
-
-#define LEGACY_META_PROTO_TIMES_STR_1 "129079956640320000"
-#define LEGACY_META_PROTO_TIMES_STR_2 "128976886618480000"
-#define LEGACY_META_PROTO_TIMES_STR_4 "129002163642690000"
-#define LEGACY_META_PROTO_TIMES_STR_5 "129001555500000000"
-#define LEGACY_META_PROTO_TIMES_STR_6 "129053976170000000"
-#define LEGACY_META_PROTO_TIMES_STR_7 "128976864758480000"
-#define LEGACY_META_PROTO_TIMES_STR_8 "128976864758480000"
-#define LEGACY_META_PROTO_TIMES_STR_9 "128976864758480000"
-#define LEGACY_META_PROTO_TIMES_STR_10 "128976864758480000"
-#define LEGACY_META_PROTO_TIMES_STR_11 "129079956948440000"
-#define LEGACY_META_PROTO_TIMES_STR_12 "129079957513650000"
-#define LEGACY_META_PROTO_TIMES_STR_13 "129079957985300000"
-#define LEGACY_META_PROTO_TIMES_STR_14 "129079958383000000"
-
-// Generated via:
-//
-// ruby -ane '$F[1].sub!("LEGACY_", ""); $F[2] = Integer($F[2].sub!("LL", "")) /
-// 10000 - 11644473600000; print "#{$F[0]} #{$F[1]} #{$F[2]}LL"'
-//
-// Magic numbers taken from
-// http://stackoverflow.com/questions/5398557/
-// java-library-for-dealing-with-win32-filetime .
-
-// Now we store them in Java format (ms since the Unix epoch).
-#define META_PROTO_TIMES_1 1263522064032LL
-#define META_PROTO_TIMES_2 1253215061848LL
-#define META_PROTO_TIMES_4 1255742764269LL
-#define META_PROTO_TIMES_5 1255681950000LL
-#define META_PROTO_TIMES_6 1260924017000LL
-#define META_PROTO_TIMES_7 1253212875848LL
-#define META_PROTO_TIMES_8 1253212875848LL
-#define META_PROTO_TIMES_9 1253212875848LL
-#define META_PROTO_TIMES_10 1253212875848LL
-#define META_PROTO_TIMES_11 1263522094844LL
-#define META_PROTO_TIMES_12 1263522151365LL
-#define META_PROTO_TIMES_13 1263522198530LL
-#define META_PROTO_TIMES_14 1263522238300LL
-
-#define META_PROTO_TIMES_STR_1 "1263522064032"
-#define META_PROTO_TIMES_STR_2 "1253215061848"
-#define META_PROTO_TIMES_STR_4 "1255742764269"
-#define META_PROTO_TIMES_STR_5 "1255681950000"
-#define META_PROTO_TIMES_STR_6 "1260924017000"
-#define META_PROTO_TIMES_STR_7 "1253212875848"
-#define META_PROTO_TIMES_STR_8 "1253212875848"
-#define META_PROTO_TIMES_STR_9 "1253212875848"
-#define META_PROTO_TIMES_STR_10 "1253212875848"
-#define META_PROTO_TIMES_STR_11 "1263522094844"
-#define META_PROTO_TIMES_STR_12 "1263522151365"
-#define META_PROTO_TIMES_STR_13 "1263522198530"
-#define META_PROTO_TIMES_STR_14 "1263522238300"
-
-#else
-
-// On other platforms, we used to store timestamps in time_t format (s
-// since the Unix epoch).
-#define LEGACY_META_PROTO_TIMES_1 1263522064LL
-#define LEGACY_META_PROTO_TIMES_2 1253215061LL
-#define LEGACY_META_PROTO_TIMES_4 1255742764LL
-#define LEGACY_META_PROTO_TIMES_5 1255681950LL
-#define LEGACY_META_PROTO_TIMES_6 1260924017LL
-#define LEGACY_META_PROTO_TIMES_7 1253212875LL
-#define LEGACY_META_PROTO_TIMES_8 1253212875LL
-#define LEGACY_META_PROTO_TIMES_9 1253212875LL
-#define LEGACY_META_PROTO_TIMES_10 1253212875LL
-#define LEGACY_META_PROTO_TIMES_11 1263522094LL
-#define LEGACY_META_PROTO_TIMES_12 1263522151LL
-#define LEGACY_META_PROTO_TIMES_13 1263522198LL
-#define LEGACY_META_PROTO_TIMES_14 1263522238LL
-
-#define LEGACY_META_PROTO_TIMES_STR_1 "1263522064"
-#define LEGACY_META_PROTO_TIMES_STR_2 "1253215061"
-#define LEGACY_META_PROTO_TIMES_STR_4 "1255742764"
-#define LEGACY_META_PROTO_TIMES_STR_5 "1255681950"
-#define LEGACY_META_PROTO_TIMES_STR_6 "1260924017"
-#define LEGACY_META_PROTO_TIMES_STR_7 "1253212875"
-#define LEGACY_META_PROTO_TIMES_STR_8 "1253212875"
-#define LEGACY_META_PROTO_TIMES_STR_9 "1253212875"
-#define LEGACY_META_PROTO_TIMES_STR_10 "1253212875"
-#define LEGACY_META_PROTO_TIMES_STR_11 "1263522094"
-#define LEGACY_META_PROTO_TIMES_STR_12 "1263522151"
-#define LEGACY_META_PROTO_TIMES_STR_13 "1263522198"
-#define LEGACY_META_PROTO_TIMES_STR_14 "1263522238"
-
-// Now we store them in Java format (ms since the Unix epoch).
-#define META_PROTO_TIMES_1 1263522064000LL
-#define META_PROTO_TIMES_2 1253215061000LL
-#define META_PROTO_TIMES_4 1255742764000LL
-#define META_PROTO_TIMES_5 1255681950000LL
-#define META_PROTO_TIMES_6 1260924017000LL
-#define META_PROTO_TIMES_7 1253212875000LL
-#define META_PROTO_TIMES_8 1253212875000LL
-#define META_PROTO_TIMES_9 1253212875000LL
-#define META_PROTO_TIMES_10 1253212875000LL
-#define META_PROTO_TIMES_11 1263522094000LL
-#define META_PROTO_TIMES_12 1263522151000LL
-#define META_PROTO_TIMES_13 1263522198000LL
-#define META_PROTO_TIMES_14 1263522238000LL
-
-#define META_PROTO_TIMES_STR_1 "1263522064000"
-#define META_PROTO_TIMES_STR_2 "1253215061000"
-#define META_PROTO_TIMES_STR_4 "1255742764000"
-#define META_PROTO_TIMES_STR_5 "1255681950000"
-#define META_PROTO_TIMES_STR_6 "1260924017000"
-#define META_PROTO_TIMES_STR_7 "1253212875000"
-#define META_PROTO_TIMES_STR_8 "1253212875000"
-#define META_PROTO_TIMES_STR_9 "1253212875000"
-#define META_PROTO_TIMES_STR_10 "1253212875000"
-#define META_PROTO_TIMES_STR_11 "1263522094000"
-#define META_PROTO_TIMES_STR_12 "1263522151000"
-#define META_PROTO_TIMES_STR_13 "1263522198000"
-#define META_PROTO_TIMES_STR_14 "1263522238000"
-
-#endif
-
-// Helper macros for the database dumps in the SetUpVersion*Database
-// functions.
-#define LEGACY_META_PROTO_TIMES(x) LEGACY_META_PROTO_TIMES_##x
-#define LEGACY_META_PROTO_TIMES_STR(x) LEGACY_META_PROTO_TIMES_STR_##x
-#define LEGACY_PROTO_TIME_VALS(x) \
- LEGACY_META_PROTO_TIMES_STR(x) "," \
- LEGACY_META_PROTO_TIMES_STR(x) "," \
- LEGACY_META_PROTO_TIMES_STR(x) "," \
- LEGACY_META_PROTO_TIMES_STR(x)
-#define META_PROTO_TIMES(x) META_PROTO_TIMES_##x
-#define META_PROTO_TIMES_STR(x) META_PROTO_TIMES_STR_##x
-#define META_PROTO_TIMES_VALS(x) \
- META_PROTO_TIMES_STR(x) "," \
- META_PROTO_TIMES_STR(x) "," \
- META_PROTO_TIMES_STR(x) "," \
- META_PROTO_TIMES_STR(x)
-
-namespace {
-
-// Helper functions for testing.
-
-enum ShouldIncludeDeletedItems {
- INCLUDE_DELETED_ITEMS,
- DONT_INCLUDE_DELETED_ITEMS
-};
-
-// Returns a map from metahandle -> expected legacy time (in proto
-// format).
-std::map<int64, int64> GetExpectedLegacyMetaProtoTimes(
- enum ShouldIncludeDeletedItems include_deleted) {
- std::map<int64, int64> expected_legacy_meta_proto_times;
- expected_legacy_meta_proto_times[1] = LEGACY_META_PROTO_TIMES(1);
- if (include_deleted == INCLUDE_DELETED_ITEMS) {
- expected_legacy_meta_proto_times[2] = LEGACY_META_PROTO_TIMES(2);
- expected_legacy_meta_proto_times[4] = LEGACY_META_PROTO_TIMES(4);
- expected_legacy_meta_proto_times[5] = LEGACY_META_PROTO_TIMES(5);
- }
- expected_legacy_meta_proto_times[6] = LEGACY_META_PROTO_TIMES(6);
- expected_legacy_meta_proto_times[7] = LEGACY_META_PROTO_TIMES(7);
- expected_legacy_meta_proto_times[8] = LEGACY_META_PROTO_TIMES(8);
- expected_legacy_meta_proto_times[9] = LEGACY_META_PROTO_TIMES(9);
- expected_legacy_meta_proto_times[10] = LEGACY_META_PROTO_TIMES(10);
- expected_legacy_meta_proto_times[11] = LEGACY_META_PROTO_TIMES(11);
- expected_legacy_meta_proto_times[12] = LEGACY_META_PROTO_TIMES(12);
- expected_legacy_meta_proto_times[13] = LEGACY_META_PROTO_TIMES(13);
- expected_legacy_meta_proto_times[14] = LEGACY_META_PROTO_TIMES(14);
- return expected_legacy_meta_proto_times;
-}
-
-// Returns a map from metahandle -> expected time (in proto format).
-std::map<int64, int64> GetExpectedMetaProtoTimes(
- enum ShouldIncludeDeletedItems include_deleted) {
- std::map<int64, int64> expected_meta_proto_times;
- expected_meta_proto_times[1] = META_PROTO_TIMES(1);
- if (include_deleted == INCLUDE_DELETED_ITEMS) {
- expected_meta_proto_times[2] = META_PROTO_TIMES(2);
- expected_meta_proto_times[4] = META_PROTO_TIMES(4);
- expected_meta_proto_times[5] = META_PROTO_TIMES(5);
- }
- expected_meta_proto_times[6] = META_PROTO_TIMES(6);
- expected_meta_proto_times[7] = META_PROTO_TIMES(7);
- expected_meta_proto_times[8] = META_PROTO_TIMES(8);
- expected_meta_proto_times[9] = META_PROTO_TIMES(9);
- expected_meta_proto_times[10] = META_PROTO_TIMES(10);
- expected_meta_proto_times[11] = META_PROTO_TIMES(11);
- expected_meta_proto_times[12] = META_PROTO_TIMES(12);
- expected_meta_proto_times[13] = META_PROTO_TIMES(13);
- expected_meta_proto_times[14] = META_PROTO_TIMES(14);
- return expected_meta_proto_times;
-}
-
-// Returns a map from metahandle -> expected time (as a Time object).
-std::map<int64, base::Time> GetExpectedMetaTimes() {
- std::map<int64, base::Time> expected_meta_times;
- const std::map<int64, int64>& expected_meta_proto_times =
- GetExpectedMetaProtoTimes(INCLUDE_DELETED_ITEMS);
- for (std::map<int64, int64>::const_iterator it =
- expected_meta_proto_times.begin();
- it != expected_meta_proto_times.end(); ++it) {
- expected_meta_times[it->first] = ProtoTimeToTime(it->second);
- }
- return expected_meta_times;
-}
-
-// Extracts a map from metahandle -> time (in proto format) from the
-// given database.
-std::map<int64, int64> GetMetaProtoTimes(sql::Connection *db) {
- sql::Statement s(db->GetCachedStatement(
- SQL_FROM_HERE,
- "SELECT metahandle, mtime, server_mtime, ctime, server_ctime "
- "FROM metas"));
- EXPECT_EQ(5, s.ColumnCount());
- std::map<int64, int64> meta_times;
- while (s.Step()) {
- int64 metahandle = s.ColumnInt64(0);
- int64 mtime = s.ColumnInt64(1);
- int64 server_mtime = s.ColumnInt64(2);
- int64 ctime = s.ColumnInt64(3);
- int64 server_ctime = s.ColumnInt64(4);
- EXPECT_EQ(mtime, server_mtime);
- EXPECT_EQ(mtime, ctime);
- EXPECT_EQ(mtime, server_ctime);
- meta_times[metahandle] = mtime;
- }
- EXPECT_TRUE(s.Succeeded());
- return meta_times;
-}
-
-::testing::AssertionResult AssertTimesMatch(const char* t1_expr,
- const char* t2_expr,
- const base::Time& t1,
- const base::Time& t2) {
- if (t1 == t2)
- return ::testing::AssertionSuccess();
-
- return ::testing::AssertionFailure()
- << t1_expr << " and " << t2_expr
- << " (internal values: " << t1.ToInternalValue()
- << " and " << t2.ToInternalValue()
- << ") (proto time: " << TimeToProtoTime(t1)
- << " and " << TimeToProtoTime(t2)
- << ") do not match";
-}
-
-// Expect that all time fields of the given entry kernel will be the
-// given time.
-void ExpectTime(const EntryKernel& entry_kernel,
- const base::Time& expected_time) {
- EXPECT_PRED_FORMAT2(AssertTimesMatch,
- expected_time, entry_kernel.ref(CTIME));
- EXPECT_PRED_FORMAT2(AssertTimesMatch,
- expected_time, entry_kernel.ref(SERVER_CTIME));
- EXPECT_PRED_FORMAT2(AssertTimesMatch,
- expected_time, entry_kernel.ref(MTIME));
- EXPECT_PRED_FORMAT2(AssertTimesMatch,
- expected_time, entry_kernel.ref(SERVER_MTIME));
-}
-
-// Expect that all the entries in |entries| have times matching those in
-// the given map (from metahandle to expect time).
-void ExpectTimes(const Directory::MetahandlesMap& handles_map,
- const std::map<int64, base::Time>& expected_times) {
- for (Directory::MetahandlesMap::const_iterator it = handles_map.begin();
- it != handles_map.end(); ++it) {
- int64 meta_handle = it->first;
- SCOPED_TRACE(meta_handle);
- std::map<int64, base::Time>::const_iterator it2 =
- expected_times.find(meta_handle);
- if (it2 == expected_times.end()) {
- ADD_FAILURE() << "Could not find expected time for " << meta_handle;
- continue;
- }
- ExpectTime(*it->second, it2->second);
- }
-}
-
-} // namespace
-
-void MigrationTest::SetUpVersion67Database(sql::Connection* connection) {
- // This is a version 67 database dump whose contents were backformed from
- // the contents of the version 68 database dump (the v68 migration was
- // actually written first).
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE extended_attributes(metahandle bigint, key varchar(127), "
- "value blob, PRIMARY KEY(metahandle, key) ON CONFLICT REPLACE);"
- "CREATE TABLE metas (metahandle bigint primary key ON CONFLICT FAIL,"
- "base_version bigint default -1,server_version bigint default 0,"
- "mtime bigint default 0,server_mtime bigint default 0,"
- "ctime bigint default 0,server_ctime bigint default 0,"
- "server_position_in_parent bigint default 0,"
- "local_external_id bigint default 0,id varchar(255) default 'r',"
- "parent_id varchar(255) default 'r',"
- "server_parent_id varchar(255) default 'r',"
- "prev_id varchar(255) default 'r',next_id varchar(255) default 'r',"
- "is_unsynced bit default 0,is_unapplied_update bit default 0,"
- "is_del bit default 0,is_dir bit default 0,"
- "is_bookmark_object bit default 0,server_is_dir bit default 0,"
- "server_is_del bit default 0,server_is_bookmark_object bit default 0,"
- "name varchar(255), " /* COLLATE PATHNAME, */
- "unsanitized_name varchar(255)," /* COLLATE PATHNAME, */
- "non_unique_name varchar,"
- "server_name varchar(255)," /* COLLATE PATHNAME */
- "server_non_unique_name varchar,"
- "bookmark_url varchar,server_bookmark_url varchar,"
- "singleton_tag varchar,bookmark_favicon blob,"
- "server_bookmark_favicon blob);"
- "INSERT INTO metas VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
- ",0,0,'r','r','r','r','r',0,0,0,1,0,0,0,0,NULL,"
- "NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);"
- "INSERT INTO metas VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
- ",-2097152,"
- "4,'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,1,0,1,1,"
- "'Deleted Item',NULL,'Deleted Item','Deleted Item','Deleted Item',"
- "'http://www.google.com/','http://www.google.com/2',NULL,'AASGASGA',"
- "'ASADGADGADG');"
- "INSERT INTO metas VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
- ",-3145728,"
- "3,'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,1,0,1,1,"
- "'Welcome to Chromium',NULL,'Welcome to Chromium',"
- "'Welcome to Chromium','Welcome to Chromium',"
- "'http://www.google.com/chrome/intl/en/welcome.html',"
- "'http://www.google.com/chrome/intl/en/welcome.html',NULL,NULL,"
- "NULL);"
- "INSERT INTO metas VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
- ",1048576,"
- "7,'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,1,0,1,1,"
- "'Google',NULL,'Google','Google','Google','http://www.google.com/',"
- "'http://www.google.com/',NULL,'AGASGASG','AGFDGASG');"
- "INSERT INTO metas VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
- ",-4194304,"
- "6,'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,1,0,1,"
- "'The Internet',NULL,'The Internet','The Internet',"
- "'The Internet',NULL,NULL,NULL,NULL,NULL);"
- "INSERT INTO metas VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
- ","
- "1048576,0,'s_ID_7','r','r','r','r',0,0,0,1,1,1,0,1,"
- "'Google Chrome',NULL,'Google Chrome','Google Chrome',"
- "'Google Chrome',NULL,NULL,'google_chrome',NULL,NULL);"
- "INSERT INTO metas VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
- ",1048576,"
- "0,'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,1,0,1,'Bookmarks',"
- "NULL,'Bookmarks','Bookmarks','Bookmarks',NULL,NULL,"
- "'google_chrome_bookmarks',NULL,NULL);"
- "INSERT INTO metas VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
- ","
- "1048576,1,'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,1,0,"
- "1,'Bookmark Bar',NULL,'Bookmark Bar','Bookmark Bar','Bookmark Bar',"
- "NULL,NULL,'bookmark_bar',NULL,NULL);"
- "INSERT INTO metas VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
- ",2097152,"
- "2,'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,1,0,1,"
- "'Other Bookmarks',NULL,'Other Bookmarks','Other Bookmarks',"
- "'Other Bookmarks',NULL,NULL,'other_bookmarks',"
- "NULL,NULL);"
- "INSERT INTO metas VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
- ",-1048576,"
- "8,'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,1,0,0,1,"
- "'Home (The Chromium Projects)',NULL,'Home (The Chromium Projects)',"
- "'Home (The Chromium Projects)','Home (The Chromium Projects)',"
- "'http://dev.chromium.org/','http://dev.chromium.org/other',NULL,"
- "'AGATWA','AFAGVASF');"
- "INSERT INTO metas VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
- ",0,9,"
- "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,1,0,1,"
- "'Extra Bookmarks',NULL,'Extra Bookmarks','Extra Bookmarks',"
- "'Extra Bookmarks',NULL,NULL,NULL,NULL,NULL);"
- "INSERT INTO metas VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
- ",-917504,"
- "10,'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,1,0,0,"
- "1,'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'ICANN Internet Corporation for Assigned Names and Numbers',"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'http://www.icann.com/','http://www.icann.com/',NULL,"
- "'PNGAXF0AAFF','DAAFASF');"
- "INSERT INTO metas VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
- ",1048576,"
- "11,'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,1,0,0,1,"
- "'The WebKit Open Source Project',NULL,"
- "'The WebKit Open Source Project','The WebKit Open Source Project',"
- "'The WebKit Open Source Project','http://webkit.org/',"
- "'http://webkit.org/x',NULL,'PNGX','PNG2Y');"
- "CREATE TABLE share_info (id VARCHAR(128) primary key, "
- "last_sync_timestamp INT, name VARCHAR(128), "
- "initial_sync_ended BIT default 0, store_birthday VARCHAR(256), "
- "db_create_version VARCHAR(128), db_create_time int, "
- "next_id bigint default -2, cache_guid VARCHAR(32));"
- "INSERT INTO share_info VALUES('nick@chromium.org',694,"
- "'nick@chromium.org',1,'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb',"
- "'Unknown',1263522064,-65542,"
- "'9010788312004066376x-6609234393368420856x');"
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO share_version VALUES('nick@chromium.org',68);"));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion68Database(sql::Connection* connection) {
- // This sets up an actual version 68 database dump. The IDs were
- // canonicalized to be less huge, and the favicons were overwritten
- // with random junk so that they didn't contain any unprintable
- // characters. A few server URLs were tweaked so that they'd be
- // different from the local URLs. Lastly, the custom collation on
- // the server_non_unique_name column was removed.
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE extended_attributes(metahandle bigint, key varchar(127), "
- "value blob, PRIMARY KEY(metahandle, key) ON CONFLICT REPLACE);"
- "CREATE TABLE metas (metahandle bigint primary key ON CONFLICT FAIL,"
- "base_version bigint default -1,server_version bigint default 0,"
- "mtime bigint default 0,server_mtime bigint default 0,"
- "ctime bigint default 0,server_ctime bigint default 0,"
- "server_position_in_parent bigint default 0,"
- "local_external_id bigint default 0,id varchar(255) default 'r',"
- "parent_id varchar(255) default 'r',"
- "server_parent_id varchar(255) default 'r',"
- "prev_id varchar(255) default 'r',next_id varchar(255) default 'r',"
- "is_unsynced bit default 0,is_unapplied_update bit default 0,"
- "is_del bit default 0,is_dir bit default 0,"
- "is_bookmark_object bit default 0,server_is_dir bit default 0,"
- "server_is_del bit default 0,"
- "server_is_bookmark_object bit default 0,"
- "non_unique_name varchar,server_non_unique_name varchar(255),"
- "bookmark_url varchar,server_bookmark_url varchar,"
- "singleton_tag varchar,bookmark_favicon blob,"
- "server_bookmark_favicon blob);"
- "INSERT INTO metas VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
- ",0,0,'r','r','r','r','r',0,0,0,1,0,0,0,0,NULL,"
- "NULL,NULL,NULL,NULL,NULL,NULL);"
- "INSERT INTO metas VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
- ",-2097152,"
- "4,'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,1,0,1,1,"
- "'Deleted Item','Deleted Item','http://www.google.com/',"
- "'http://www.google.com/2',NULL,'AASGASGA','ASADGADGADG');"
- "INSERT INTO metas VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
- ",-3145728,"
- "3,'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,1,0,1,1,"
- "'Welcome to Chromium','Welcome to Chromium',"
- "'http://www.google.com/chrome/intl/en/welcome.html',"
- "'http://www.google.com/chrome/intl/en/welcome.html',NULL,NULL,"
- "NULL);"
- "INSERT INTO metas VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
- ",1048576,"
- "7,'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,1,0,1,1,"
- "'Google','Google','http://www.google.com/',"
- "'http://www.google.com/',NULL,'AGASGASG','AGFDGASG');"
- "INSERT INTO metas VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
- ",-4194304,"
- "6,'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,1,0,1,"
- "'The Internet','The Internet',NULL,NULL,NULL,NULL,NULL);"
- "INSERT INTO metas VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
- ","
- "1048576,0,'s_ID_7','r','r','r','r',0,0,0,1,1,1,0,1,"
- "'Google Chrome','Google Chrome',NULL,NULL,'google_chrome',NULL,"
- "NULL);"
- "INSERT INTO metas VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
- ",1048576,"
- "0,'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,1,0,1,'Bookmarks',"
- "'Bookmarks',NULL,NULL,'google_chrome_bookmarks',NULL,NULL);"
- "INSERT INTO metas VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
- ","
- "1048576,1,'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,1,0,"
- "1,'Bookmark Bar','Bookmark Bar',NULL,NULL,'bookmark_bar',NULL,"
- "NULL);"
- "INSERT INTO metas VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
- ",2097152,"
- "2,'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,1,0,1,"
- "'Other Bookmarks','Other Bookmarks',NULL,NULL,'other_bookmarks',"
- "NULL,NULL);"
- "INSERT INTO metas VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
- ",-1048576,"
- "8,'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,1,0,0,1,"
- "'Home (The Chromium Projects)','Home (The Chromium Projects)',"
- "'http://dev.chromium.org/','http://dev.chromium.org/other',NULL,"
- "'AGATWA','AFAGVASF');"
- "INSERT INTO metas VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
- ",0,9,"
- "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,1,0,1,"
- "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,NULL,NULL,NULL);"
- "INSERT INTO metas VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
- ",-917504,"
- "10,'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,1,0,0,"
- "1,'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'http://www.icann.com/','http://www.icann.com/',NULL,"
- "'PNGAXF0AAFF','DAAFASF');"
- "INSERT INTO metas VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
- ",1048576,"
- "11,'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,1,0,0,1,"
- "'The WebKit Open Source Project','The WebKit Open Source Project',"
- "'http://webkit.org/','http://webkit.org/x',NULL,'PNGX','PNG2Y');"
- "CREATE TABLE share_info (id VARCHAR(128) primary key, "
- "last_sync_timestamp INT, name VARCHAR(128), "
- "initial_sync_ended BIT default 0, store_birthday VARCHAR(256), "
- "db_create_version VARCHAR(128), db_create_time int, "
- "next_id bigint default -2, cache_guid VARCHAR(32));"
- "INSERT INTO share_info VALUES('nick@chromium.org',694,"
- "'nick@chromium.org',1,'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb',"
- "'Unknown',1263522064,-65542,"
- "'9010788312004066376x-6609234393368420856x');"
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO share_version VALUES('nick@chromium.org',68);"));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion69Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE extended_attributes(metahandle bigint, key varchar(127), "
- "value blob, PRIMARY KEY(metahandle, key) ON CONFLICT REPLACE);"
- "CREATE TABLE metas (metahandle bigint primary key ON CONFLICT FAIL,"
- "base_version bigint default -1,server_version bigint default 0,"
- "mtime bigint default 0,server_mtime bigint default 0,"
- "ctime bigint default 0,server_ctime bigint default 0,"
- "server_position_in_parent bigint default 0,"
- "local_external_id bigint default 0,id varchar(255) default 'r',"
- "parent_id varchar(255) default 'r',"
- "server_parent_id varchar(255) default 'r',"
- "prev_id varchar(255) default 'r',next_id varchar(255) default 'r',"
- "is_unsynced bit default 0,is_unapplied_update bit default 0,"
- "is_del bit default 0,is_dir bit default 0,"
- "is_bookmark_object bit default 0,server_is_dir bit default 0,"
- "server_is_del bit default 0,"
- "server_is_bookmark_object bit default 0,"
- "non_unique_name varchar,server_non_unique_name varchar(255),"
- "bookmark_url varchar,server_bookmark_url varchar,"
- "singleton_tag varchar,bookmark_favicon blob,"
- "server_bookmark_favicon blob, specifics blob, "
- "server_specifics blob);"
- "INSERT INTO metas VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
- ",0,0,'r','r','r','r','r',0,0,0,1,0,0,0,0,NULL,NULL,NULL,NULL,NULL,"
- "NULL,NULL,X'',X'');"
- "INSERT INTO metas VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
- ",-2097152,"
- "4,'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,1,0,1,1,"
- "'Deleted Item','Deleted Item','http://www.google.com/',"
- "'http://www.google.com/2',NULL,'AASGASGA','ASADGADGADG',"
- "X'C28810220A16687474703A2F2F7777772E676F6F676C652E636F6D2F120841415"
- "34741534741',X'C28810260A17687474703A2F2F7777772E676F6F676C652E636F"
- "6D2F32120B4153414447414447414447');"
- "INSERT INTO metas VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
- ",-3145728,"
- "3,'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,1,0,1,1,"
- "'Welcome to Chromium','Welcome to Chromium',"
- "'http://www.google.com/chrome/intl/en/welcome.html',"
- "'http://www.google.com/chrome/intl/en/welcome.html',NULL,NULL,NULL,"
- "X'C28810350A31687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6"
- "D652F696E746C2F656E2F77656C636F6D652E68746D6C1200',X'C28810350A3168"
- "7474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F6"
- "56E2F77656C636F6D652E68746D6C1200');"
- "INSERT INTO metas VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
- ",1048576,7,"
- "'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,1,0,1,1,"
- "'Google','Google','http://www.google.com/',"
- "'http://www.google.com/',NULL,'AGASGASG','AGFDGASG',X'C28810220A166"
- "87474703A2F2F7777772E676F6F676C652E636F6D2F12084147415347415347',X'"
- "C28810220A16687474703A2F2F7777772E676F6F676C652E636F6D2F12084147464"
- "447415347');"
- "INSERT INTO metas VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
- ",-4194304,6"
- ",'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,1,0,1,'The Internet',"
- "'The Internet',NULL,NULL,NULL,NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO metas VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
- ",1048576,0,"
- "'s_ID_7','r','r','r','r',0,0,0,1,1,1,0,1,'Google Chrome',"
- "'Google Chrome',NULL,NULL,'google_chrome',NULL,NULL,NULL,NULL);"
- "INSERT INTO metas VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
- ",1048576,0,"
- "'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,1,0,1,'Bookmarks',"
- "'Bookmarks',NULL,NULL,'google_chrome_bookmarks',NULL,NULL,"
- "X'C2881000',X'C2881000');"
- "INSERT INTO metas VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
- ",1048576,1,"
- "'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,1,0,1,"
- "'Bookmark Bar','Bookmark Bar',NULL,NULL,'bookmark_bar',NULL,NULL,"
- "X'C2881000',X'C2881000');"
- "INSERT INTO metas VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
- ",2097152,2,"
- "'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,1,0,1,"
- "'Other Bookmarks','Other Bookmarks',NULL,NULL,'other_bookmarks',"
- "NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO metas VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
- ",-1048576,"
- "8,'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,1,0,0,1,"
- "'Home (The Chromium Projects)','Home (The Chromium Projects)',"
- "'http://dev.chromium.org/','http://dev.chromium.org/other',NULL,"
- "'AGATWA','AFAGVASF',X'C28810220A18687474703A2F2F6465762E6368726F6D6"
- "9756D2E6F72672F1206414741545741',X'C28810290A1D687474703A2F2F646576"
- "2E6368726F6D69756D2E6F72672F6F7468657212084146414756415346');"
- "INSERT INTO metas VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
- ",0,9,"
- "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,1,0,1,"
- "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,NULL,NULL,NULL,"
- "X'C2881000',X'C2881000');"
- "INSERT INTO metas VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
- ",-917504,"
- "10,'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,1,0,0,"
- "1,'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'http://www.icann.com/','http://www.icann.com/',NULL,'PNGAXF0AAFF',"
- "'DAAFASF',X'C28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F1"
- "20B504E474158463041414646',X'C28810200A15687474703A2F2F7777772E6963"
- "616E6E2E636F6D2F120744414146415346');"
- "INSERT INTO metas VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
- ",1048576,11,"
- "'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,1,0,0,1,"
- "'The WebKit Open Source Project','The WebKit Open Source Project',"
- "'http://webkit.org/','http://webkit.org/x',NULL,'PNGX','PNG2Y',"
- "X'C288101A0A12687474703A2F2F7765626B69742E6F72672F1204504E4758',X'C2"
- "88101C0A13687474703A2F2F7765626B69742E6F72672F781205504E473259');"
- "CREATE TABLE share_info (id VARCHAR(128) primary key, "
- "last_sync_timestamp INT, name VARCHAR(128), "
- "initial_sync_ended BIT default 0, store_birthday VARCHAR(256), "
- "db_create_version VARCHAR(128), db_create_time int, "
- "next_id bigint default -2, cache_guid VARCHAR(32));"
- "INSERT INTO share_info VALUES('nick@chromium.org',694,"
- "'nick@chromium.org',1,'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb',"
- "'Unknown',1263522064,-65542,"
- "'9010788312004066376x-6609234393368420856x');"
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO share_version VALUES('nick@chromium.org',69);"
- ));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion70Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE extended_attributes(metahandle bigint, key varchar(127), "
- "value blob, PRIMARY KEY(metahandle, key) ON CONFLICT REPLACE);"
- "CREATE TABLE share_info (id VARCHAR(128) primary key, "
- "last_sync_timestamp INT, name VARCHAR(128), "
- "initial_sync_ended BIT default 0, store_birthday VARCHAR(256), "
- "db_create_version VARCHAR(128), db_create_time int, "
- "next_id bigint default -2, cache_guid VARCHAR(32));"
- "INSERT INTO share_info VALUES('nick@chromium.org',694,"
- "'nick@chromium.org',1,'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb',"
- "'Unknown',1263522064,-65542,"
- "'9010788312004066376x-6609234393368420856x');"
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO share_version VALUES('nick@chromium.org',70);"
- "CREATE TABLE metas(metahandle bigint primary key ON CONFLICT FAIL,"
- "base_version bigint default -1,server_version bigint default 0,"
- "mtime bigint default 0,server_mtime bigint default 0,"
- "ctime bigint default 0,server_ctime bigint default 0,"
- "server_position_in_parent bigint default 0,"
- "local_external_id bigint default 0,id varchar(255) default 'r',"
- "parent_id varchar(255) default 'r',"
- "server_parent_id varchar(255) default 'r',"
- "prev_id varchar(255) default 'r',next_id varchar(255) default 'r',"
- "is_unsynced bit default 0,is_unapplied_update bit default 0,"
- "is_del bit default 0,is_dir bit default 0,"
- "server_is_dir bit default 0,server_is_del bit default 0,"
- "non_unique_name varchar,server_non_unique_name varchar(255),"
- "unique_server_tag varchar,unique_client_tag varchar,"
- "specifics blob,server_specifics blob);"
- "INSERT INTO metas VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
- ",0,0,'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'');"
- "INSERT INTO metas VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2) ","
- "-2097152,4,'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,0,"
- "1,'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A16687474703A"
- "2F2F7777772E676F6F676C652E636F6D2F12084141534741534741',X'C2881026"
- "0A17687474703A2F2F7777772E676F6F676C652E636F6D2F32120B415341444741"
- "4447414447');"
- "INSERT INTO metas VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
- ",-3145728,"
- "3,'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,0,1,"
- "'Welcome to Chromium','Welcome to Chromium',NULL,NULL,X'C28810350A"
- "31687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E74"
- "6C2F656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687474703A2F"
- "2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656E2F7765"
- "6C636F6D652E68746D6C1200');"
- "INSERT INTO metas VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
- ",1048576,7,"
- "'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google',"
- "'Google',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C6"
- "52E636F6D2F12084147415347415347',X'C28810220A16687474703A2F2F77777"
- "72E676F6F676C652E636F6D2F12084147464447415347');"
- "INSERT INTO metas VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
- ",-4194304,"
- "6,'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,0,'The Internet',"
- "'The Internet',NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO metas VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
- ",1048576,0,"
- "'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Google Chrome',"
- "'Google Chrome','google_chrome',NULL,NULL,NULL);"
- "INSERT INTO metas VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
- ",1048576,0,"
- "'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks',"
- "'Bookmarks','google_chrome_bookmarks',NULL,X'C2881000',"
- "X'C2881000');"
- "INSERT INTO metas VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
- ",1048576,"
- "1,'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,"
- "'Bookmark Bar','Bookmark Bar','bookmark_bar',NULL,X'C2881000',"
- "X'C2881000');"
- "INSERT INTO metas VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
- ","
- "2097152,2,'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,0,"
- "'Other Bookmarks','Other Bookmarks','other_bookmarks',NULL,"
- "X'C2881000',X'C2881000');"
- "INSERT INTO metas VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
- ",-1048576,"
- "8,'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,0,0,"
- "'Home (The Chromium Projects)','Home (The Chromium Projects)',"
- "NULL,NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756D2E6F"
- "72672F1206414741545741',X'C28810290A1D687474703A2F2F6465762E636872"
- "6F6D69756D2E6F72672F6F7468657212084146414756415346');"
- "INSERT INTO metas VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
- ",0,9,"
- "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,"
- "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C2881000',"
- "X'C2881000');"
- "INSERT INTO metas VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
- ",-917504,"
- "10,'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,0,0,"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "NULL,NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F"
- "120B504E474158463041414646',X'C28810200A15687474703A2F2F7777772E69"
- "63616E6E2E636F6D2F120744414146415346');"
- "INSERT INTO metas VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
- ",1048576,"
- "11,'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,0,0,"
- "'The WebKit Open Source Project','The WebKit Open Source Project',"
- "NULL,NULL,X'C288101A0A12687474703A2F2F7765626B69742E6F72672F120450"
- "4E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F78120550"
- "4E473259');"
- ));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion71Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE extended_attributes(metahandle bigint, key varchar(127), "
- "value blob, PRIMARY KEY(metahandle, key) ON CONFLICT REPLACE);"
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',71);"
- "CREATE TABLE metas(metahandle bigint primary key ON CONFLICT FAIL,"
- "base_version bigint default -1,server_version bigint default 0,"
- "mtime bigint default 0,server_mtime bigint default 0,ctime bigint "
- "default 0,server_ctime bigint default 0,server_position_in_parent "
- "bigint default 0,local_external_id bigint default 0,id varchar(255) "
- "default 'r',parent_id varchar(255) default 'r',server_parent_id "
- "varchar(255) default 'r',prev_id varchar(255) default 'r',next_id "
- "varchar(255) default 'r',is_unsynced bit default 0,"
- "is_unapplied_update bit default 0,is_del bit default 0,is_dir bit "
- "default 0,server_is_dir bit default 0,server_is_del bit default 0,"
- "non_unique_name varchar,server_non_unique_name varchar(255),"
- "unique_server_tag varchar,unique_client_tag varchar,specifics blob,"
- "server_specifics blob);"
- "INSERT INTO 'metas' VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
- ",0,0,'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,"
- "NULL,NULL,X'',X'');"
- "INSERT INTO 'metas' VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
- ",-2097152,4,"
- "'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,0,1,"
- "'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A16687474703A2F2F"
- "7777772E676F6F676C652E636F6D2F12084141534741534741',X'C28810260A1768"
- "7474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534144474144474144"
- "47');"
- "INSERT INTO 'metas' VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
- ",-3145728,3,"
- "'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,0,1,"
- "'Welcome to Chromium','Welcome to Chromium',NULL,NULL,X'C28810350A31"
- "687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F"
- "656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687474703A2F2F7777"
- "772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656E2F77656C636F6D"
- "652E68746D6C1200');"
- "INSERT INTO 'metas' VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
- ",1048576,7,"
- "'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google',"
- "'Google',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C652"
- "E636F6D2F12084147415347415347',X'C28810220A16687474703A2F2F7777772E6"
- "76F6F676C652E636F6D2F12084147464447415347');"
- "INSERT INTO 'metas' VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
- ",-4194304,6,"
- "'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,0,'The Internet',"
- "'The Internet',NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
- ",1048576,0,"
- "'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Google Chrome','Google Chrome'"
- ",'google_chrome',NULL,NULL,NULL);"
- "INSERT INTO 'metas' VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
- ",1048576,0,"
- "'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks',"
- "'Bookmarks','google_chrome_bookmarks',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
- ",1048576,1,"
- "'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar',"
- "'Bookmark Bar','bookmark_bar',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
- ",2097152,2,"
- "'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,0,"
- "'Other Bookmarks','Other Bookmarks','other_bookmarks',NULL,"
- "X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
- ",-1048576,8,"
- "'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,0,0,"
- "'Home (The Chromium Projects)','Home (The Chromium Projects)',NULL,"
- "NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756D2E6F72672F1"
- "206414741545741',X'C28810290A1D687474703A2F2F6465762E6368726F6D69756"
- "D2E6F72672F6F7468657212084146414756415346');"
- "INSERT INTO 'metas' VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
- ",0,9,"
- "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,"
- "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C2881000',"
- "X'C2881000');"
- "INSERT INTO 'metas' VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
- ",-917504,10,"
- "'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,0,0,"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',NULL,"
- "NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F120B504"
- "E474158463041414646',X'C28810200A15687474703A2F2F7777772E6963616E6E2"
- "E636F6D2F120744414146415346');"
- "INSERT INTO 'metas' VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
- ",1048576,11,"
- "'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,0,0,"
- "'The WebKit Open Source Project','The WebKit Open Source Project',"
- "NULL,NULL,""X'C288101A0A12687474703A2F2F7765626B69742E6F72672F120450"
- "4E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F781205504E"
- "473259');"
- "CREATE TABLE models (model_id BLOB primary key, "
- "last_download_timestamp INT, initial_sync_ended BOOLEAN default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',694,1);"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, "
- "store_birthday TEXT, db_create_version TEXT, db_create_time INT, "
- "next_id INT default -2, cache_guid TEXT);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
- "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-65542,"
- "'9010788312004066376x-6609234393368420856x');"));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion72Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',72);"
- "CREATE TABLE metas(metahandle bigint primary key ON CONFLICT FAIL,"
- "base_version bigint default -1,server_version bigint default 0,"
- "mtime bigint default 0,server_mtime bigint default 0,ctime bigint "
- "default 0,server_ctime bigint default 0,server_position_in_parent "
- "bigint default 0,local_external_id bigint default 0,id varchar(255) "
- "default 'r',parent_id varchar(255) default 'r',server_parent_id "
- "varchar(255) default 'r',prev_id varchar(255) default 'r',next_id "
- "varchar(255) default 'r',is_unsynced bit default 0,"
- "is_unapplied_update bit default 0,is_del bit default 0,is_dir bit "
- "default 0,server_is_dir bit default 0,server_is_del bit default 0,"
- "non_unique_name varchar,server_non_unique_name varchar(255),"
- "unique_server_tag varchar,unique_client_tag varchar,specifics blob,"
- "server_specifics blob);"
- "INSERT INTO 'metas' VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
- ",0,0,'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,"
- "NULL,NULL,X'',X'');"
- "INSERT INTO 'metas' VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
- ",-2097152,4,"
- "'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,0,1,"
- "'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A16687474703A2F2F"
- "7777772E676F6F676C652E636F6D2F12084141534741534741',X'C28810260A1768"
- "7474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534144474144474144"
- "47');"
- "INSERT INTO 'metas' VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
- ",-3145728,3,"
- "'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,0,1,"
- "'Welcome to Chromium','Welcome to Chromium',NULL,NULL,X'C28810350A31"
- "687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F"
- "656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687474703A2F2F7777"
- "772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656E2F77656C636F6D"
- "652E68746D6C1200');"
- "INSERT INTO 'metas' VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
- ",1048576,7,"
- "'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google',"
- "'Google',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C652"
- "E636F6D2F12084147415347415347',X'C28810220A16687474703A2F2F7777772E6"
- "76F6F676C652E636F6D2F12084147464447415347');"
- "INSERT INTO 'metas' VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
- ",-4194304,6,"
- "'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,0,'The Internet',"
- "'The Internet',NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
- ",1048576,0,"
- "'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Google Chrome','Google Chrome'"
- ",'google_chrome',NULL,NULL,NULL);"
- "INSERT INTO 'metas' VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
- ",1048576,0,"
- "'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks',"
- "'Bookmarks','google_chrome_bookmarks',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
- ",1048576,1,"
- "'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar',"
- "'Bookmark Bar','bookmark_bar',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
- ",2097152,2,"
- "'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,0,"
- "'Other Bookmarks','Other Bookmarks','other_bookmarks',NULL,"
- "X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
- ",-1048576,8,"
- "'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,0,0,"
- "'Home (The Chromium Projects)','Home (The Chromium Projects)',NULL,"
- "NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756D2E6F72672F1"
- "206414741545741',X'C28810290A1D687474703A2F2F6465762E6368726F6D69756"
- "D2E6F72672F6F7468657212084146414756415346');"
- "INSERT INTO 'metas' VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
- ",0,9,"
- "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,"
- "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C2881000',"
- "X'C2881000');"
- "INSERT INTO 'metas' VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
- ",-917504,10,"
- "'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,0,0,"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',NULL,"
- "NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F120B504"
- "E474158463041414646',X'C28810200A15687474703A2F2F7777772E6963616E6E2"
- "E636F6D2F120744414146415346');"
- "INSERT INTO 'metas' VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
- ",1048576,11,"
- "'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,0,0,"
- "'The WebKit Open Source Project','The WebKit Open Source Project',"
- "NULL,NULL,""X'C288101A0A12687474703A2F2F7765626B69742E6F72672F120450"
- "4E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F781205504E"
- "473259');"
- "CREATE TABLE models (model_id BLOB primary key, "
- "last_download_timestamp INT, initial_sync_ended BOOLEAN default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',694,1);"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, "
- "store_birthday TEXT, db_create_version TEXT, db_create_time INT, "
- "next_id INT default -2, cache_guid TEXT);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
- "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-65542,"
- "'9010788312004066376x-6609234393368420856x');"));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion73Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',73);"
- "CREATE TABLE metas(metahandle bigint primary key ON CONFLICT FAIL,"
- "base_version bigint default -1,server_version bigint default 0,"
- "mtime bigint default 0,server_mtime bigint default 0,ctime bigint "
- "default 0,server_ctime bigint default 0,server_position_in_parent "
- "bigint default 0,local_external_id bigint default 0,id varchar(255) "
- "default 'r',parent_id varchar(255) default 'r',server_parent_id "
- "varchar(255) default 'r',prev_id varchar(255) default 'r',next_id "
- "varchar(255) default 'r',is_unsynced bit default 0,"
- "is_unapplied_update bit default 0,is_del bit default 0,is_dir bit "
- "default 0,server_is_dir bit default 0,server_is_del bit default 0,"
- "non_unique_name varchar,server_non_unique_name varchar(255),"
- "unique_server_tag varchar,unique_client_tag varchar,specifics blob,"
- "server_specifics blob);"
- "INSERT INTO 'metas' VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
- ",0,0,'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,"
- "NULL,NULL,X'',X'');"
- "INSERT INTO 'metas' VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
- ",-2097152,4,"
- "'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,0,1,"
- "'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A16687474703A2F2F"
- "7777772E676F6F676C652E636F6D2F12084141534741534741',X'C28810260A1768"
- "7474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534144474144474144"
- "47');"
- "INSERT INTO 'metas' VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
- ",-3145728,3,"
- "'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,0,1,"
- "'Welcome to Chromium','Welcome to Chromium',NULL,NULL,X'C28810350A31"
- "687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F"
- "656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687474703A2F2F7777"
- "772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656E2F77656C636F6D"
- "652E68746D6C1200');"
- "INSERT INTO 'metas' VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
- ",1048576,7,"
- "'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google',"
- "'Google',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C652"
- "E636F6D2F12084147415347415347',X'C28810220A16687474703A2F2F7777772E6"
- "76F6F676C652E636F6D2F12084147464447415347');"
- "INSERT INTO 'metas' VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
- ",-4194304,6,"
- "'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,0,'The Internet',"
- "'The Internet',NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
- ",1048576,0,"
- "'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Google Chrome','Google Chrome'"
- ",'google_chrome',NULL,NULL,NULL);"
- "INSERT INTO 'metas' VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
- ",1048576,0,"
- "'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks',"
- "'Bookmarks','google_chrome_bookmarks',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
- ",1048576,1,"
- "'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar',"
- "'Bookmark Bar','bookmark_bar',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
- ",2097152,2,"
- "'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,0,"
- "'Other Bookmarks','Other Bookmarks','other_bookmarks',NULL,"
- "X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
- ",-1048576,8,"
- "'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,0,0,"
- "'Home (The Chromium Projects)','Home (The Chromium Projects)',NULL,"
- "NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756D2E6F72672F1"
- "206414741545741',X'C28810290A1D687474703A2F2F6465762E6368726F6D69756"
- "D2E6F72672F6F7468657212084146414756415346');"
- "INSERT INTO 'metas' VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
- ",0,9,"
- "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,"
- "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C2881000',"
- "X'C2881000');"
- "INSERT INTO 'metas' VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
- ",-917504,10,"
- "'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,0,0,"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',NULL,"
- "NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F120B504"
- "E474158463041414646',X'C28810200A15687474703A2F2F7777772E6963616E6E2"
- "E636F6D2F120744414146415346');"
- "INSERT INTO 'metas' VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
- ",1048576,11,"
- "'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,0,0,"
- "'The WebKit Open Source Project','The WebKit Open Source Project',"
- "NULL,NULL,""X'C288101A0A12687474703A2F2F7765626B69742E6F72672F120450"
- "4E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F781205504E"
- "473259');"
- "CREATE TABLE models (model_id BLOB primary key, "
- "last_download_timestamp INT, initial_sync_ended BOOLEAN default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',694,1);"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, "
- "store_birthday TEXT, db_create_version TEXT, db_create_time INT, "
- "next_id INT default -2, cache_guid TEXT, "
- "notification_state BLOB);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
- "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-65542,"
- "'9010788312004066376x-6609234393368420856x',X'C2881000');"));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion74Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',74);"
- "CREATE TABLE models (model_id BLOB primary key, last_download_timestamp"
- " INT, initial_sync_ended BOOLEAN default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',694,1);"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthd"
- "ay TEXT, db_create_version TEXT, db_create_time INT, next_id INT de"
- "fault -2, cache_guid TEXT , notification_state BLOB, autofill_migra"
- "tion_state INT default 0, bookmarks_added_during_autofill_migration"
- " INT default 0, autofill_migration_time INT default 0, autofill_ent"
- "ries_added_during_migration INT default 0, autofill_profiles_added_"
- "during_migration INT default 0);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org'"
- ",'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-65542"
- ",'9010788312004066376x-6609234393368420856x',NULL,0,0,0,0,0);"
- "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,bas"
- "e_version bigint default -1,server_version bigint default 0,mtime b"
- "igint default 0,server_mtime bigint default 0,ctime bigint default "
- "0,server_ctime bigint default 0,server_position_in_parent bigint de"
- "fault 0,local_external_id bigint default 0,id varchar(255) default "
- "'r',parent_id varchar(255) default 'r',server_parent_id varchar(255"
- ") default 'r',prev_id varchar(255) default 'r',next_id varchar(255)"
- " default 'r',is_unsynced bit default 0,is_unapplied_update bit defa"
- "ult 0,is_del bit default 0,is_dir bit default 0,server_is_dir bit d"
- "efault 0,server_is_del bit default 0,non_unique_name varchar,server"
- "_non_unique_name varchar(255),unique_server_tag varchar,unique_clie"
- "nt_tag varchar,specifics blob,server_specifics blob);"
- "INSERT INTO 'metas' VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
- ",0,0,'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'"
- "');"
- "INSERT INTO 'metas' VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
- ",-2097152,4,'s_ID_2','s_ID"
- "_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,0,1,'Deleted Item','Deleted "
- "Item',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C652E6"
- "36F6D2F12084141534741534741',X'C28810260A17687474703A2F2F7777772E67"
- "6F6F676C652E636F6D2F32120B4153414447414447414447');"
- "INSERT INTO 'metas' VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
- ",-3145728,3,'s_ID_4','s_ID"
- "_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,0,1,'Welcome to Chromium','W"
- "elcome to Chromium',NULL,NULL,X'C28810350A31687474703A2F2F7777772E6"
- "76F6F676C652E636F6D2F6368726F6D652F696E746C2F656E2F77656C636F6D652E"
- "68746D6C1200',X'C28810350A31687474703A2F2F7777772E676F6F676C652E636"
- "F6D2F6368726F6D652F696E746C2F656E2F77656C636F6D652E68746D6C1200');"
- "INSERT INTO 'metas' VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
- ",1048576,7,'s_ID_5','s_ID_"
- "9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google','Google',NULL,NU"
- "LL,X'C28810220A16687474703A2F2F7777772E676F6F676C652E636F6D2F120841"
- "47415347415347',X'C28810220A16687474703A2F2F7777772E676F6F676C652E6"
- "36F6D2F12084147464447415347');"
- "INSERT INTO 'metas' VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
- ",-4194304,6,'s_ID_6','s_ID"
- "_9','s_ID_9','r','r',0,0,0,1,1,0,'The Internet','The Internet',NULL"
- ",NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
- ",1048576,0,'s_ID_7','r','r"
- "','r','r',0,0,0,1,1,0,'Google Chrome','Google Chrome','google_chrom"
- "e',NULL,NULL,NULL);"
- "INSERT INTO 'metas' VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
- ",1048576,0,'s_ID_8','s_ID_"
- "7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks','Bookmarks','google_chr"
- "ome_bookmarks',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
- ",1048576,1,'s_ID_9','s_ID_"
- "8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar','Bookmark Bar'"
- ",'bookmark_bar',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
- ",2097152,2,'s_ID_10','s_I"
- "D_8','s_ID_8','s_ID_9','r',0,0,0,1,1,0,'Other Bookmarks','Other Boo"
- "kmarks','other_bookmarks',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
- ",-1048576,8,'s_ID_11','s_"
- "ID_6','s_ID_6','r','s_ID_13',0,0,0,0,0,0,'Home (The Chromium Projec"
- "ts)','Home (The Chromium Projects)',NULL,NULL,X'C28810220A186874747"
- "03A2F2F6465762E6368726F6D69756D2E6F72672F1206414741545741',X'C28810"
- "290A1D687474703A2F2F6465762E6368726F6D69756D2E6F72672F6F74686572120"
- "84146414756415346');"
- "INSERT INTO 'metas' VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
- ",0,9,'s_ID_12','s_ID_6','"
- "s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,'Extra Bookmarks','Extra Bo"
- "okmarks',NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
- ",-917504,10,'s_ID_13','s_"
- "ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,0,0,'ICANN | Internet Co"
- "rporation for Assigned Names and Numbers','ICANN | Internet Corpora"
- "tion for Assigned Names and Numbers',NULL,NULL,X'C28810240A15687474"
- "703A2F2F7777772E6963616E6E2E636F6D2F120B504E474158463041414646',X'C"
- "28810200A15687474703A2F2F7777772E6963616E6E2E636F6D2F12074441414641"
- "5346');"
- "INSERT INTO 'metas' VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
- ",1048576,11,'s_ID_14','s_"
- "ID_6','s_ID_6','s_ID_12','r',0,0,0,0,0,0,'The WebKit Open Source Pr"
- "oject','The WebKit Open Source Project',NULL,NULL,X'C288101A0A12687"
- "474703A2F2F7765626B69742E6F72672F1204504E4758',X'C288101C0A13687474"
- "703A2F2F7765626B69742E6F72672F781205504E473259');"
- ));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion75Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',75);"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthd"
- "ay TEXT, db_create_version TEXT, db_create_time INT, next_id INT de"
- "fault -2, cache_guid TEXT , notification_state BLOB, autofill_migra"
- "tion_state INT default 0,bookmarks_added_during_autofill_migration "
- "INT default 0, autofill_migration_time INT default 0, autofill_entr"
- "ies_added_during_migration INT default 0, autofill_profiles_added_d"
- "uring_migration INT default 0);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org"
- "','c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-655"
- "42,'9010788312004066376x-6609234393368420856x',NULL,0,0,0,0,0);"
- "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, "
- "initial_sync_ended BOOLEAN default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605',1);"
- "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,ba"
- "se_version bigint default -1,server_version bigint default 0,mtime"
- " bigint default 0,server_mtime bigint default 0,ctime bigint defau"
- "lt 0,server_ctime bigint default 0,server_position_in_parent bigin"
- "t default 0,local_external_id bigint default 0,id varchar(255) def"
- "ault 'r',parent_id varchar(255) default 'r',server_parent_id varch"
- "ar(255) default 'r',prev_id varchar(255) default 'r',next_id varch"
- "ar(255) default 'r',is_unsynced bit default 0,is_unapplied_update "
- "bit default 0,is_del bit default 0,is_dir bit default 0,server_is_"
- "dir bit default 0,server_is_del bit default 0,non_unique_name varc"
- "har,server_non_unique_name varchar(255),unique_server_tag varchar,"
- "unique_client_tag varchar,specifics blob,server_specifics blob);"
- "INSERT INTO 'metas' VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
- ",0,0,'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NUL"
- "L,X'',X'');"
- "INSERT INTO 'metas' VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
- ",-2097152,4,'s_ID_"
- "2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,0,1,'Deleted Ite"
- "m','Deleted Item',NULL,NULL,X'C28810220A16687474703A2F2F7777772"
- "E676F6F676C652E636F6D2F12084141534741534741',X'C28810260A176874"
- "74703A2F2F7777772E676F6F676C652E636F6D2F32120B41534144474144474"
- "14447');"
- "INSERT INTO 'metas' VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
- ",-3145728,3,'s_ID_"
- "4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,0,1,'Welcome to "
- "Chromium','Welcome to Chromium',NULL,NULL,X'C28810350A316874747"
- "03A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F65"
- "6E2F77656C636F6D652E68746D6C1200',X'C28810350A31687474703A2F2F7"
- "777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656E2F7765"
- "6C636F6D652E68746D6C1200');"
- "INSERT INTO 'metas' VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
- ",1048576,7,'s_ID_5"
- "','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google','Goo"
- "gle',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C65"
- "2E636F6D2F12084147415347415347',X'C28810220A16687474703A2F2F777"
- "7772E676F6F676C652E636F6D2F12084147464447415347');"
- "INSERT INTO 'metas' VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
- ",-4194304,6,'s_ID_"
- "6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,0,'The Internet','The In"
- "ternet',NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
- ",1048576,0,'s_ID_7"
- "','r','r','r','r',0,0,0,1,1,0,'Google Chrome','Google Chrome','"
- "google_chrome',NULL,NULL,NULL);"
- "INSERT INTO 'metas' VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
- ",1048576,0,'s_ID_8"
- "','s_ID_7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks','Bookmarks'"
- ",'google_chrome_bookmarks',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
- ",1048576,1,'s_ID_9"
- "','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar','B"
- "ookmark Bar','bookmark_bar',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
- ",2097152,2,'s_ID_"
- "10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,0,'Other Bookmarks"
- "','Other Bookmarks','other_bookmarks',NULL,X'C2881000',X'C28810"
- "00');"
- "INSERT INTO 'metas' VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
- ",-1048576,8,'s_ID"
- "_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,0,0,'Home (The Chr"
- "omium Projects)','Home (The Chromium Projects)',NULL,NULL,X'C28"
- "810220A18687474703A2F2F6465762E6368726F6D69756D2E6F72672F120641"
- "4741545741',X'C28810290A1D687474703A2F2F6465762E6368726F6D69756"
- "D2E6F72672F6F7468657212084146414756415346');"
- "INSERT INTO 'metas' VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
- ",0,9,'s_ID_12','s"
- "_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,'Extra Bookmark"
- "s','Extra Bookmarks',NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
- ",-917504,10,'s_ID"
- "_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,0,0,'ICANN |"
- " Internet Corporation for Assigned Names and Numbers','ICANN | "
- "Internet Corporation for Assigned Names and Numbers',NULL,NULL,"
- "X'C28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F120B504"
- "E474158463041414646',X'C28810200A15687474703A2F2F7777772E696361"
- "6E6E2E636F6D2F120744414146415346');"
- "INSERT INTO 'metas' VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
- ",1048576,11,'s_ID"
- "_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,0,0,'The WebKit Op"
- "en Source Project','The WebKit Open Source Project',NULL,NULL,X"
- "'C288101A0A12687474703A2F2F7765626B69742E6F72672F1204504E4758',"
- "X'C288101C0A13687474703A2F2F7765626B69742E6F72672F781205504E473"
- "259');"
- ));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion76Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',76);"
- "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, in"
- "itial_sync_ended BOOLEAN default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605',1);"
- "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,base"
- "_version bigint default -1,server_version bigint default 0,mtime big"
- "int default 0,server_mtime bigint default 0,ctime bigint default 0,s"
- "erver_ctime bigint default 0,server_position_in_parent bigint defaul"
- "t 0,local_external_id bigint default 0,id varchar(255) default 'r',p"
- "arent_id varchar(255) default 'r',server_parent_id varchar(255) defa"
- "ult 'r',prev_id varchar(255) default 'r',next_id varchar(255) defaul"
- "t 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
- "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,"
- "server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
- "e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
- "har,specifics blob,server_specifics blob);"
- "INSERT INTO 'metas' VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
- ",0,0,'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'')"
- ";"
- "INSERT INTO 'metas' VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
- ",-2097152,4,'s_ID_2','s_ID_9"
- "','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,0,1,'Deleted Item','Deleted Ite"
- "m',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C652E636F6"
- "D2F12084141534741534741',X'C28810260A17687474703A2F2F7777772E676F6F6"
- "76C652E636F6D2F32120B4153414447414447414447');"
- "INSERT INTO 'metas' VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
- ",-3145728,3,'s_ID_4','s_ID_9"
- "','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,0,1,'Welcome to Chromium','Welc"
- "ome to Chromium',NULL,NULL,X'C28810350A31687474703A2F2F7777772E676F6"
- "F676C652E636F6D2F6368726F6D652F696E746C2F656E2F77656C636F6D652E68746"
- "D6C1200',X'C28810350A31687474703A2F2F7777772E676F6F676C652E636F6D2F6"
- "368726F6D652F696E746C2F656E2F77656C636F6D652E68746D6C1200');"
- "INSERT INTO 'metas' VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
- ",1048576,7,'s_ID_5','s_ID_9'"
- ",'s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google','Google',NULL,NULL,"
- "X'C28810220A16687474703A2F2F7777772E676F6F676C652E636F6D2F1208414741"
- "5347415347',X'C28810220A16687474703A2F2F7777772E676F6F676C652E636F6D"
- "2F12084147464447415347');"
- "INSERT INTO 'metas' VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
- ",-4194304,6,'s_ID_6','s_ID_9"
- "','s_ID_9','r','r',0,0,0,1,1,0,'The Internet','The Internet',NULL,NU"
- "LL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
- ",1048576,0,'s_ID_7','r','r',"
- "'r','r',0,0,0,1,1,0,'Google Chrome','Google Chrome','google_chrome',"
- "NULL,NULL,NULL);"
- "INSERT INTO 'metas' VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
- ",1048576,0,'s_ID_8','s_ID_7'"
- ",'s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks','Bookmarks','google_chrome"
- "_bookmarks',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
- ",1048576,1,'s_ID_9','s_ID_8'"
- ",'s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar','Bookmark Bar','b"
- "ookmark_bar',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
- ",2097152,2,'s_ID_10','s_ID_"
- "8','s_ID_8','s_ID_9','r',0,0,0,1,1,0,'Other Bookmarks','Other Bookma"
- "rks','other_bookmarks',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
- ",-1048576,8,'s_ID_11','s_ID"
- "_6','s_ID_6','r','s_ID_13',0,0,0,0,0,0,'Home (The Chromium Projects)"
- "','Home (The Chromium Projects)',NULL,NULL,X'C28810220A18687474703A2"
- "F2F6465762E6368726F6D69756D2E6F72672F1206414741545741',X'C28810290A1"
- "D687474703A2F2F6465762E6368726F6D69756D2E6F72672F6F74686572120841464"
- "14756415346');"
- "INSERT INTO 'metas' VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
- ",0,9,'s_ID_12','s_ID_6','s_"
- "ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,'Extra Bookmarks','Extra Bookm"
- "arks',NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
- ",-917504,10,'s_ID_13','s_ID"
- "_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,0,0,'ICANN | Internet Corpo"
- "ration for Assigned Names and Numbers','ICANN | Internet Corporation"
- " for Assigned Names and Numbers',NULL,NULL,X'C28810240A15687474703A2"
- "F2F7777772E6963616E6E2E636F6D2F120B504E474158463041414646',X'C288102"
- "00A15687474703A2F2F7777772E6963616E6E2E636F6D2F120744414146415346');"
- "INSERT INTO 'metas' VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
- ",1048576,11,'s_ID_14','s_ID"
- "_6','s_ID_6','s_ID_12','r',0,0,0,0,0,0,'The WebKit Open Source Proje"
- "ct','The WebKit Open Source Project',NULL,NULL,X'C288101A0A126874747"
- "03A2F2F7765626B69742E6F72672F1204504E4758',X'C288101C0A13687474703A2"
- "F2F7765626B69742E6F72672F781205504E473259');"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthda"
- "y TEXT, db_create_version TEXT, db_create_time INT, next_id INT defa"
- "ult -2, cache_guid TEXT , notification_state BLOB);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
- "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-65542,'"
- "9010788312004066376x-6609234393368420856x',NULL);"
- ));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion77Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',77);"
- "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, in"
- "itial_sync_ended BOOLEAN default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605',1);"
- "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,base"
- "_version bigint default -1,server_version bigint default 0,server_po"
- "sition_in_parent bigint default 0,local_external_id bigint default 0"
- ",mtime bigint default 0,server_mtime bigint default 0,ctime bigint d"
- "efault 0,server_ctime bigint default 0,id varchar(255) default 'r',p"
- "arent_id varchar(255) default 'r',server_parent_id varchar(255) defa"
- "ult 'r',prev_id varchar(255) default 'r',next_id varchar(255) defaul"
- "t 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
- "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,"
- "server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
- "e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
- "har,specifics blob,server_specifics blob);"
- "INSERT INTO 'metas' VALUES(1,-1,0,0,0," META_PROTO_TIMES_VALS(1)
- ",'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'');"
- "INSERT INTO 'metas' VALUES(2,669,669,-2097152,4,"
- META_PROTO_TIMES_VALS(2) ",'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_"
- "2',0,0,1,0,0,1,'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A1"
- "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084141534741534741',X"
- "'C28810260A17687474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534"
- "14447414447414447');"
- "INSERT INTO 'metas' VALUES(4,681,681,-3145728,3,"
- META_PROTO_TIMES_VALS(4) ",'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_"
- "4',0,0,1,0,0,1,'Welcome to Chromium','Welcome to Chromium',NULL,NULL"
- ",X'C28810350A31687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6"
- "D652F696E746C2F656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687"
- "474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656"
- "E2F77656C636F6D652E68746D6C1200');"
- "INSERT INTO 'metas' VALUES(5,677,677,1048576,7," META_PROTO_TIMES_VALS(5)
- ",'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google','"
- "Google',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C652E"
- "636F6D2F12084147415347415347',X'C28810220A16687474703A2F2F7777772E67"
- "6F6F676C652E636F6D2F12084147464447415347');"
- "INSERT INTO 'metas' VALUES(6,694,694,-4194304,6,"
- META_PROTO_TIMES_VALS(6) ",'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1"
- ",1,0,'The Internet','The Internet',NULL,NULL,X'C2881000',X'C2881000'"
- ");"
- "INSERT INTO 'metas' VALUES(7,663,663,1048576,0," META_PROTO_TIMES_VALS(7)
- ",'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Google Chrome','Goo"
- "gle Chrome','google_chrome',NULL,NULL,NULL);"
- "INSERT INTO 'metas' VALUES(8,664,664,1048576,0," META_PROTO_TIMES_VALS(8)
- ",'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks','Bookmar"
- "ks','google_chrome_bookmarks',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(9,665,665,1048576,1," META_PROTO_TIMES_VALS(9)
- ",'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar'"
- ",'Bookmark Bar','bookmark_bar',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(10,666,666,2097152,2,"
- META_PROTO_TIMES_VALS(10) ",'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',"
- "0,0,0,1,1,0,'Other Bookmarks','Other Bookmarks','other_bookmarks',NU"
- "LL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(11,683,683,-1048576,8,"
- META_PROTO_TIMES_VALS(11) ",'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13'"
- ",0,0,0,0,0,0,'Home (The Chromium Projects)','Home (The Chromium Proj"
- "ects)',NULL,NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756"
- "D2E6F72672F1206414741545741',X'C28810290A1D687474703A2F2F6465762E636"
- "8726F6D69756D2E6F72672F6F7468657212084146414756415346');"
- "INSERT INTO 'metas' VALUES(12,685,685,0,9," META_PROTO_TIMES_VALS(12)
- ",'s_ID_12','s_ID_6','s_"
- "ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,'Extra Bookmarks','Extra Bookm"
- "arks',NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(13,687,687,-917504,10,"
- META_PROTO_TIMES_VALS(13) ",'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_"
- "ID_12',0,0,0,0,0,0,'ICANN | Internet Corporation for Assigned Names "
- "and Numbers','ICANN | Internet Corporation for Assigned Names and Nu"
- "mbers',NULL,NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636"
- "F6D2F120B504E474158463041414646',X'C28810200A15687474703A2F2F7777772"
- "E6963616E6E2E636F6D2F120744414146415346');"
- "INSERT INTO 'metas' VALUES(14,692,692,1048576,11,"
- META_PROTO_TIMES_VALS(14) ",'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r'"
- ",0,0,0,0,0,0,'The WebKit Open Source Project','The WebKit Open Sourc"
- "e Project',NULL,NULL,X'C288101A0A12687474703A2F2F7765626B69742E6F726"
- "72F1204504E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F7"
- "81205504E473259');"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthda"
- "y TEXT, db_create_version TEXT, db_create_time INT, next_id INT defa"
- "ult -2, cache_guid TEXT , notification_state BLOB);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
- "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-65542,'"
- "9010788312004066376x-6609234393368420856x',NULL);"
- ));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion78Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',78);"
- "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, in"
- "itial_sync_ended BOOLEAN default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605',1);"
- "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,base"
- "_version bigint default -1,server_version bigint default 0,server_po"
- "sition_in_parent bigint default 0,local_external_id bigint default 0"
- ",mtime bigint default 0,server_mtime bigint default 0,ctime bigint d"
- "efault 0,server_ctime bigint default 0,id varchar(255) default 'r',p"
- "arent_id varchar(255) default 'r',server_parent_id varchar(255) defa"
- "ult 'r',prev_id varchar(255) default 'r',next_id varchar(255) defaul"
- "t 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
- "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,"
- "server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
- "e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
- "har,specifics blob,server_specifics blob, base_server_specifics BLOB"
- ");"
- "INSERT INTO 'metas' VALUES(1,-1,0,0,0," META_PROTO_TIMES_VALS(1) ",'r','"
- "r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'',NULL);"
- "INSERT INTO 'metas' VALUES(2,669,669,-2097152,4,"
- META_PROTO_TIMES_VALS(2) ",'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_"
- "2',0,0,1,0,0,1,'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A1"
- "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084141534741534741',X"
- "'C28810260A17687474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534"
- "14447414447414447',NULL);"
- "INSERT INTO 'metas' VALUES(4,681,681,-3145728,3,"
- META_PROTO_TIMES_VALS(4) ",'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_"
- "4',0,0,1,0,0,1,'Welcome to Chromium','Welcome to Chromium',NULL,NULL"
- ",X'C28810350A31687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6"
- "D652F696E746C2F656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687"
- "474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656"
- "E2F77656C636F6D652E68746D6C1200',NULL);"
- "INSERT INTO 'metas' VALUES(5,677,677,1048576,7,"
- META_PROTO_TIMES_VALS(5) ",'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_"
- "5',0,0,1,0,0,1,'Google','Google',NULL,NULL,X'C28810220A16687474703A2"
- "F2F7777772E676F6F676C652E636F6D2F12084147415347415347',X'C28810220A1"
- "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084147464447415347',N"
- "ULL);"
- "INSERT INTO 'metas' VALUES(6,694,694,-4194304,6,"
- META_PROTO_TIMES_VALS(6) ",'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1"
- ",1,0,'The Internet','The Internet',NULL,NULL,X'C2881000',X'C2881000'"
- ",NULL);"
- "INSERT INTO 'metas' VALUES(7,663,663,1048576,0,"
- META_PROTO_TIMES_VALS(7) ",'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Goog"
- "le Chrome','Google Chrome','google_chrome',NULL,NULL,NULL,NULL);"
- "INSERT INTO 'metas' VALUES(8,664,664,1048576,0,"
- META_PROTO_TIMES_VALS(8) ",'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1"
- ",1,0,'Bookmarks','Bookmarks','google_chrome_bookmarks',NULL,X'C28810"
- "00',X'C2881000',NULL);"
- "INSERT INTO 'metas' VALUES(9,665,665,1048576,1,"
- META_PROTO_TIMES_VALS(9) ",'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0"
- ",0,0,1,1,0,'Bookmark Bar','Bookmark Bar','bookmark_bar',NULL,X'C2881"
- "000',X'C2881000',NULL);"
- "INSERT INTO 'metas' VALUES(10,666,666,2097152,2,"
- META_PROTO_TIMES_VALS(10) ",'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',"
- "0,0,0,1,1,0,'Other Bookmarks','Other Bookmarks','other_bookmarks',NU"
- "LL,X'C2881000',X'C2881000',NULL);"
- "INSERT INTO 'metas' VALUES(11,683,683,-1048576,8,"
- META_PROTO_TIMES_VALS(11) ",'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13'"
- ",0,0,0,0,0,0,'Home (The Chromium Projects)','Home (The Chromium Proj"
- "ects)',NULL,NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756"
- "D2E6F72672F1206414741545741',X'C28810290A1D687474703A2F2F6465762E636"
- "8726F6D69756D2E6F72672F6F7468657212084146414756415346',NULL);"
- "INSERT INTO 'metas' VALUES(12,685,685,0,9,"
- META_PROTO_TIMES_VALS(12) ",'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_"
- "ID_14',0,0,0,1,1,0,'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C"
- "2881000',X'C2881000',NULL);"
- "INSERT INTO 'metas' VALUES(13,687,687,-917504,10,"
- META_PROTO_TIMES_VALS(13) ",'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_"
- "ID_12',0,0,0,0,0,0,'ICANN | Internet Corporation for Assigned Names "
- "and Numbers','ICANN | Internet Corporation for Assigned Names and Nu"
- "mbers',NULL,NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636"
- "F6D2F120B504E474158463041414646',X'C28810200A15687474703A2F2F7777772"
- "E6963616E6E2E636F6D2F120744414146415346',NULL);"
- "INSERT INTO 'metas' VALUES(14,692,692,1048576,11,"
- META_PROTO_TIMES_VALS(14) ",'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r'"
- ",0,0,0,0,0,0,'The WebKit Open Source Project','The WebKit Open Sourc"
- "e Project',NULL,NULL,X'C288101A0A12687474703A2F2F7765626B69742E6F726"
- "72F1204504E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F7"
- "81205504E473259',NULL);"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthda"
- "y TEXT, db_create_version TEXT, db_create_time INT, next_id INT defa"
- "ult -2, cache_guid TEXT , notification_state BLOB);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
- "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-65542,'"
- "9010788312004066376x-6609234393368420856x',NULL);"
- ));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion79Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',79);"
- "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, in"
- "itial_sync_ended BOOLEAN default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605',1);"
- "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,base"
- "_version bigint default -1,server_version bigint default 0,server_po"
- "sition_in_parent bigint default 0,local_external_id bigint default 0"
- ",mtime bigint default 0,server_mtime bigint default 0,ctime bigint d"
- "efault 0,server_ctime bigint default 0,id varchar(255) default 'r',p"
- "arent_id varchar(255) default 'r',server_parent_id varchar(255) defa"
- "ult 'r',prev_id varchar(255) default 'r',next_id varchar(255) defaul"
- "t 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
- "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,"
- "server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
- "e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
- "har,specifics blob,server_specifics blob, base_server_specifics BLOB"
- ");"
- "INSERT INTO 'metas' VALUES(1,-1,0,0,0," META_PROTO_TIMES_VALS(1) ",'r','"
- "r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'',NULL);"
- "INSERT INTO 'metas' VALUES(2,669,669,-2097152,4,"
- META_PROTO_TIMES_VALS(2) ",'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_"
- "2',0,0,1,0,0,1,'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A1"
- "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084141534741534741',X"
- "'C28810260A17687474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534"
- "14447414447414447',NULL);"
- "INSERT INTO 'metas' VALUES(4,681,681,-3145728,3,"
- META_PROTO_TIMES_VALS(4) ",'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_"
- "4',0,0,1,0,0,1,'Welcome to Chromium','Welcome to Chromium',NULL,NULL"
- ",X'C28810350A31687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6"
- "D652F696E746C2F656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687"
- "474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656"
- "E2F77656C636F6D652E68746D6C1200',NULL);"
- "INSERT INTO 'metas' VALUES(5,677,677,1048576,7,"
- META_PROTO_TIMES_VALS(5) ",'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_"
- "5',0,0,1,0,0,1,'Google','Google',NULL,NULL,X'C28810220A16687474703A2"
- "F2F7777772E676F6F676C652E636F6D2F12084147415347415347',X'C28810220A1"
- "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084147464447415347',N"
- "ULL);"
- "INSERT INTO 'metas' VALUES(6,694,694,-4194304,6,"
- META_PROTO_TIMES_VALS(6) ",'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1"
- ",1,0,'The Internet','The Internet',NULL,NULL,X'C2881000',X'C2881000'"
- ",NULL);"
- "INSERT INTO 'metas' VALUES(7,663,663,1048576,0,"
- META_PROTO_TIMES_VALS(7) ",'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Goog"
- "le Chrome','Google Chrome','google_chrome',NULL,NULL,NULL,NULL);"
- "INSERT INTO 'metas' VALUES(8,664,664,1048576,0,"
- META_PROTO_TIMES_VALS(8) ",'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1"
- ",1,0,'Bookmarks','Bookmarks','google_chrome_bookmarks',NULL,X'C28810"
- "00',X'C2881000',NULL);"
- "INSERT INTO 'metas' VALUES(9,665,665,1048576,1,"
- META_PROTO_TIMES_VALS(9) ",'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0"
- ",0,0,1,1,0,'Bookmark Bar','Bookmark Bar','bookmark_bar',NULL,X'C2881"
- "000',X'C2881000',NULL);"
- "INSERT INTO 'metas' VALUES(10,666,666,2097152,2,"
- META_PROTO_TIMES_VALS(10) ",'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',"
- "0,0,0,1,1,0,'Other Bookmarks','Other Bookmarks','other_bookmarks',NU"
- "LL,X'C2881000',X'C2881000',NULL);"
- "INSERT INTO 'metas' VALUES(11,683,683,-1048576,8,"
- META_PROTO_TIMES_VALS(11) ",'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13'"
- ",0,0,0,0,0,0,'Home (The Chromium Projects)','Home (The Chromium Proj"
- "ects)',NULL,NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756"
- "D2E6F72672F1206414741545741',X'C28810290A1D687474703A2F2F6465762E636"
- "8726F6D69756D2E6F72672F6F7468657212084146414756415346',NULL);"
- "INSERT INTO 'metas' VALUES(12,685,685,0,9,"
- META_PROTO_TIMES_VALS(12) ",'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_"
- "ID_14',0,0,0,1,1,0,'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C"
- "2881000',X'C2881000',NULL);"
- "INSERT INTO 'metas' VALUES(13,687,687,-917504,10,"
- META_PROTO_TIMES_VALS(13) ",'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_"
- "ID_12',0,0,0,0,0,0,'ICANN | Internet Corporation for Assigned Names "
- "and Numbers','ICANN | Internet Corporation for Assigned Names and Nu"
- "mbers',NULL,NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636"
- "F6D2F120B504E474158463041414646',X'C28810200A15687474703A2F2F7777772"
- "E6963616E6E2E636F6D2F120744414146415346',NULL);"
- "INSERT INTO 'metas' VALUES(14,692,692,1048576,11,"
- META_PROTO_TIMES_VALS(14) ",'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r'"
- ",0,0,0,0,0,0,'The WebKit Open Source Project','The WebKit Open Sourc"
- "e Project',NULL,NULL,X'C288101A0A12687474703A2F2F7765626B69742E6F726"
- "72F1204504E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F7"
- "81205504E473259',NULL);"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthda"
- "y TEXT, db_create_version TEXT, db_create_time INT, next_id INT defa"
- "ult -2, cache_guid TEXT , notification_state BLOB);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
- "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,"
- "-131078,'9010788312004066376x-6609234393368420856x',NULL);"
- ));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion80Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',80);"
- "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, in"
- "itial_sync_ended BOOLEAN default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605',1);"
- "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,base"
- "_version bigint default -1,server_version bigint default 0,server_po"
- "sition_in_parent bigint default 0,local_external_id bigint default 0"
- ",mtime bigint default 0,server_mtime bigint default 0,ctime bigint d"
- "efault 0,server_ctime bigint default 0,id varchar(255) default 'r',p"
- "arent_id varchar(255) default 'r',server_parent_id varchar(255) defa"
- "ult 'r',prev_id varchar(255) default 'r',next_id varchar(255) defaul"
- "t 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
- "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,"
- "server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
- "e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
- "har,specifics blob,server_specifics blob, base_server_specifics BLOB"
- ");"
- "INSERT INTO 'metas' VALUES(1,-1,0,0,0," META_PROTO_TIMES_VALS(1) ",'r','"
- "r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'',NULL);"
- "INSERT INTO 'metas' VALUES(2,669,669,-2097152,4,"
- META_PROTO_TIMES_VALS(2) ",'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_"
- "2',0,0,1,0,0,1,'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A1"
- "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084141534741534741',X"
- "'C28810260A17687474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534"
- "14447414447414447',NULL);"
- "INSERT INTO 'metas' VALUES(4,681,681,-3145728,3,"
- META_PROTO_TIMES_VALS(4) ",'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_"
- "4',0,0,1,0,0,1,'Welcome to Chromium','Welcome to Chromium',NULL,NULL"
- ",X'C28810350A31687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6"
- "D652F696E746C2F656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687"
- "474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656"
- "E2F77656C636F6D652E68746D6C1200',NULL);"
- "INSERT INTO 'metas' VALUES(5,677,677,1048576,7,"
- META_PROTO_TIMES_VALS(5) ",'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_"
- "5',0,0,1,0,0,1,'Google','Google',NULL,NULL,X'C28810220A16687474703A2"
- "F2F7777772E676F6F676C652E636F6D2F12084147415347415347',X'C28810220A1"
- "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084147464447415347',N"
- "ULL);"
- "INSERT INTO 'metas' VALUES(6,694,694,-4194304,6,"
- META_PROTO_TIMES_VALS(6) ",'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1"
- ",1,0,'The Internet','The Internet',NULL,NULL,X'C2881000',X'C2881000'"
- ",NULL);"
- "INSERT INTO 'metas' VALUES(7,663,663,1048576,0,"
- META_PROTO_TIMES_VALS(7) ",'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Goog"
- "le Chrome','Google Chrome','google_chrome',NULL,NULL,NULL,NULL);"
- "INSERT INTO 'metas' VALUES(8,664,664,1048576,0,"
- META_PROTO_TIMES_VALS(8) ",'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1"
- ",1,0,'Bookmarks','Bookmarks','google_chrome_bookmarks',NULL,X'C28810"
- "00',X'C2881000',NULL);"
- "INSERT INTO 'metas' VALUES(9,665,665,1048576,1,"
- META_PROTO_TIMES_VALS(9) ",'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0"
- ",0,0,1,1,0,'Bookmark Bar','Bookmark Bar','bookmark_bar',NULL,X'C2881"
- "000',X'C2881000',NULL);"
- "INSERT INTO 'metas' VALUES(10,666,666,2097152,2,"
- META_PROTO_TIMES_VALS(10) ",'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',"
- "0,0,0,1,1,0,'Other Bookmarks','Other Bookmarks','other_bookmarks',NU"
- "LL,X'C2881000',X'C2881000',NULL);"
- "INSERT INTO 'metas' VALUES(11,683,683,-1048576,8,"
- META_PROTO_TIMES_VALS(11) ",'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13'"
- ",0,0,0,0,0,0,'Home (The Chromium Projects)','Home (The Chromium Proj"
- "ects)',NULL,NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756"
- "D2E6F72672F1206414741545741',X'C28810290A1D687474703A2F2F6465762E636"
- "8726F6D69756D2E6F72672F6F7468657212084146414756415346',NULL);"
- "INSERT INTO 'metas' VALUES(12,685,685,0,9,"
- META_PROTO_TIMES_VALS(12) ",'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_"
- "ID_14',0,0,0,1,1,0,'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C"
- "2881000',X'C2881000',NULL);"
- "INSERT INTO 'metas' VALUES(13,687,687,-917504,10,"
- META_PROTO_TIMES_VALS(13) ",'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_"
- "ID_12',0,0,0,0,0,0,'ICANN | Internet Corporation for Assigned Names "
- "and Numbers','ICANN | Internet Corporation for Assigned Names and Nu"
- "mbers',NULL,NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636"
- "F6D2F120B504E474158463041414646',X'C28810200A15687474703A2F2F7777772"
- "E6963616E6E2E636F6D2F120744414146415346',NULL);"
- "INSERT INTO 'metas' VALUES(14,692,692,1048576,11,"
- META_PROTO_TIMES_VALS(14) ",'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r'"
- ",0,0,0,0,0,0,'The WebKit Open Source Project','The WebKit Open Sourc"
- "e Project',NULL,NULL,X'C288101A0A12687474703A2F2F7765626B69742E6F726"
- "72F1204504E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F7"
- "81205504E473259',NULL);"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthda"
- "y TEXT, db_create_version TEXT, db_create_time INT, next_id INT defa"
- "ult -2, cache_guid TEXT , notification_state BLOB, bag_of_chips "
- "blob);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
- "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,"
- "-131078,'9010788312004066376x-6609234393368420856x',NULL, NULL);"
- ));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-
-// Helper definitions to create the version 81 DB tables.
-namespace {
-
-const int V80_ROW_COUNT = 13;
-const int64 V80_POSITIONS[V80_ROW_COUNT] = {
- 0,
- -2097152,
- -3145728,
- 1048576,
- -4194304,
- 1048576,
- 1048576,
- 1048576,
- 2097152,
- -1048576,
- 0,
- -917504,
- 1048576
-};
-
-std::string V81_Ordinal(int n) {
- return Int64ToNodeOrdinal(V80_POSITIONS[n]).ToInternalValue();
-}
-
-} //namespace
-
-// Unlike the earlier versions, the rows for version 81 are generated
-// programmatically to accurately handle unprintable characters for the
-// server_ordinal_in_parent field.
-void MigrationTest::SetUpVersion81Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',81);"
- "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, in"
- "itial_sync_ended BOOLEAN default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605',1);"
- "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,base"
- "_version bigint default -1,server_version bigint default 0, "
- "local_external_id bigint default 0"
- ",mtime bigint default 0,server_mtime bigint default 0,ctime bigint d"
- "efault 0,server_ctime bigint default 0,id varchar(255) default 'r',p"
- "arent_id varchar(255) default 'r',server_parent_id varchar(255) defa"
- "ult 'r',prev_id varchar(255) default 'r',next_id varchar(255) defaul"
- "t 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
- "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,"
- "server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
- "e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
- "har,specifics blob,server_specifics blob, base_server_specifics BLOB"
- ", server_ordinal_in_parent blob);"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthda"
- "y TEXT, db_create_version TEXT, db_create_time INT, next_id INT defa"
- "ult -2, cache_guid TEXT , notification_state BLOB, bag_of_chips "
- "blob);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
- "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,"
- "-131078,'9010788312004066376x-6609234393368420856x',NULL, NULL);"));
-
- const char* insert_stmts[V80_ROW_COUNT] = {
- "INSERT INTO 'metas' VALUES(1,-1,0,0," META_PROTO_TIMES_VALS(1) ",'r','"
- "r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'',NULL,?);",
- "INSERT INTO 'metas' VALUES(2,669,669,4,"
- META_PROTO_TIMES_VALS(2) ",'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_"
- "2',0,0,1,0,0,1,'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A1"
- "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084141534741534741',X"
- "'C28810260A17687474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534"
- "14447414447414447',NULL,?);",
- "INSERT INTO 'metas' VALUES(4,681,681,3,"
- META_PROTO_TIMES_VALS(4) ",'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_"
- "4',0,0,1,0,0,1,'Welcome to Chromium','Welcome to Chromium',NULL,NULL"
- ",X'C28810350A31687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6"
- "D652F696E746C2F656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687"
- "474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656"
- "E2F77656C636F6D652E68746D6C1200',NULL,?);",
- "INSERT INTO 'metas' VALUES(5,677,677,7,"
- META_PROTO_TIMES_VALS(5) ",'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_"
- "5',0,0,1,0,0,1,'Google','Google',NULL,NULL,X'C28810220A16687474703A2"
- "F2F7777772E676F6F676C652E636F6D2F12084147415347415347',X'C28810220A1"
- "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084147464447415347',N"
- "ULL,?);",
- "INSERT INTO 'metas' VALUES(6,694,694,6,"
- META_PROTO_TIMES_VALS(6) ",'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1"
- ",1,0,'The Internet','The Internet',NULL,NULL,X'C2881000',X'C2881000'"
- ",NULL,?);",
- "INSERT INTO 'metas' VALUES(7,663,663,0,"
- META_PROTO_TIMES_VALS(7) ",'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Goog"
- "le Chrome','Google Chrome','google_chrome',NULL,NULL,NULL,NULL,?);",
- "INSERT INTO 'metas' VALUES(8,664,664,0,"
- META_PROTO_TIMES_VALS(8) ",'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1"
- ",1,0,'Bookmarks','Bookmarks','google_chrome_bookmarks',NULL,X'C28810"
- "00',X'C2881000',NULL,?);",
- "INSERT INTO 'metas' VALUES(9,665,665,1,"
- META_PROTO_TIMES_VALS(9) ",'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0"
- ",0,0,1,1,0,'Bookmark Bar','Bookmark Bar','bookmark_bar',NULL,X'C2881"
- "000',X'C2881000',NULL,?);",
- "INSERT INTO 'metas' VALUES(10,666,666,2,"
- META_PROTO_TIMES_VALS(10) ",'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',"
- "0,0,0,1,1,0,'Other Bookmarks','Other Bookmarks','other_bookmarks',NU"
- "LL,X'C2881000',X'C2881000',NULL,?);",
- "INSERT INTO 'metas' VALUES(11,683,683,8,"
- META_PROTO_TIMES_VALS(11) ",'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13'"
- ",0,0,0,0,0,0,'Home (The Chromium Projects)','Home (The Chromium Proj"
- "ects)',NULL,NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756"
- "D2E6F72672F1206414741545741',X'C28810290A1D687474703A2F2F6465762E636"
- "8726F6D69756D2E6F72672F6F7468657212084146414756415346',NULL,?);",
- "INSERT INTO 'metas' VALUES(12,685,685,9,"
- META_PROTO_TIMES_VALS(12) ",'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_"
- "ID_14',0,0,0,1,1,0,'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C"
- "2881000',X'C2881000',NULL,?);",
- "INSERT INTO 'metas' VALUES(13,687,687,10,"
- META_PROTO_TIMES_VALS(13) ",'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_"
- "ID_12',0,0,0,0,0,0,'ICANN | Internet Corporation for Assigned Names "
- "and Numbers','ICANN | Internet Corporation for Assigned Names and Nu"
- "mbers',NULL,NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636"
- "F6D2F120B504E474158463041414646',X'C28810200A15687474703A2F2F7777772"
- "E6963616E6E2E636F6D2F120744414146415346',NULL,?);",
- "INSERT INTO 'metas' VALUES(14,692,692,11,"
- META_PROTO_TIMES_VALS(14) ",'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r'"
- ",0,0,0,0,0,0,'The WebKit Open Source Project','The WebKit Open Sourc"
- "e Project',NULL,NULL,X'C288101A0A12687474703A2F2F7765626B69742E6F726"
- "72F1204504E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F7"
- "81205504E473259',NULL,?);" };
-
- for (int i = 0; i < V80_ROW_COUNT; i++) {
- sql::Statement s(connection->GetUniqueStatement(insert_stmts[i]));
- std::string ord = V81_Ordinal(i);
- s.BindBlob(0, ord.data(), ord.length());
- ASSERT_TRUE(s.Run());
- s.Reset(true);
- }
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion82Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',82);"
- "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, in"
- "itial_sync_ended BOOLEAN default 0, transaction_version BIGINT "
- "default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605',1, 1);"
- "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,base"
- "_version bigint default -1,server_version bigint default 0, "
- "local_external_id bigint default 0"
- ",mtime bigint default 0,server_mtime bigint default 0,ctime bigint d"
- "efault 0,server_ctime bigint default 0,id varchar(255) default 'r',p"
- "arent_id varchar(255) default 'r',server_parent_id varchar(255) defa"
- "ult 'r',prev_id varchar(255) default 'r',next_id varchar(255) defaul"
- "t 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
- "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,"
- "server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
- "e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
- "har,specifics blob,server_specifics blob, base_server_specifics BLOB"
- ", server_ordinal_in_parent blob);"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthda"
- "y TEXT, db_create_version TEXT, db_create_time INT, next_id INT defa"
- "ult -2, cache_guid TEXT , notification_state BLOB, bag_of_chips "
- "blob);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
- "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,"
- "-131078,'9010788312004066376x-6609234393368420856x',NULL, NULL);"));
-
- const char* insert_stmts[V80_ROW_COUNT] = {
- "INSERT INTO 'metas' VALUES(1,-1,0,0," META_PROTO_TIMES_VALS(1) ",'r','"
- "r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'',NULL,?);",
- "INSERT INTO 'metas' VALUES(2,669,669,4,"
- META_PROTO_TIMES_VALS(2) ",'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_"
- "2',0,0,1,0,0,1,'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A1"
- "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084141534741534741',X"
- "'C28810260A17687474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534"
- "14447414447414447',NULL,?);",
- "INSERT INTO 'metas' VALUES(4,681,681,3,"
- META_PROTO_TIMES_VALS(4) ",'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_"
- "4',0,0,1,0,0,1,'Welcome to Chromium','Welcome to Chromium',NULL,NULL"
- ",X'C28810350A31687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6"
- "D652F696E746C2F656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687"
- "474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656"
- "E2F77656C636F6D652E68746D6C1200',NULL,?);",
- "INSERT INTO 'metas' VALUES(5,677,677,7,"
- META_PROTO_TIMES_VALS(5) ",'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_"
- "5',0,0,1,0,0,1,'Google','Google',NULL,NULL,X'C28810220A16687474703A2"
- "F2F7777772E676F6F676C652E636F6D2F12084147415347415347',X'C28810220A1"
- "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084147464447415347',N"
- "ULL,?);",
- "INSERT INTO 'metas' VALUES(6,694,694,6,"
- META_PROTO_TIMES_VALS(6) ",'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1"
- ",1,0,'The Internet','The Internet',NULL,NULL,X'C2881000',X'C2881000'"
- ",NULL,?);",
- "INSERT INTO 'metas' VALUES(7,663,663,0,"
- META_PROTO_TIMES_VALS(7) ",'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Goog"
- "le Chrome','Google Chrome','google_chrome',NULL,NULL,NULL,NULL,?);",
- "INSERT INTO 'metas' VALUES(8,664,664,0,"
- META_PROTO_TIMES_VALS(8) ",'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1"
- ",1,0,'Bookmarks','Bookmarks','google_chrome_bookmarks',NULL,X'C28810"
- "00',X'C2881000',NULL,?);",
- "INSERT INTO 'metas' VALUES(9,665,665,1,"
- META_PROTO_TIMES_VALS(9) ",'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0"
- ",0,0,1,1,0,'Bookmark Bar','Bookmark Bar','bookmark_bar',NULL,X'C2881"
- "000',X'C2881000',NULL,?);",
- "INSERT INTO 'metas' VALUES(10,666,666,2,"
- META_PROTO_TIMES_VALS(10) ",'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',"
- "0,0,0,1,1,0,'Other Bookmarks','Other Bookmarks','other_bookmarks',NU"
- "LL,X'C2881000',X'C2881000',NULL,?);",
- "INSERT INTO 'metas' VALUES(11,683,683,8,"
- META_PROTO_TIMES_VALS(11) ",'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13'"
- ",0,0,0,0,0,0,'Home (The Chromium Projects)','Home (The Chromium Proj"
- "ects)',NULL,NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756"
- "D2E6F72672F1206414741545741',X'C28810290A1D687474703A2F2F6465762E636"
- "8726F6D69756D2E6F72672F6F7468657212084146414756415346',NULL,?);",
- "INSERT INTO 'metas' VALUES(12,685,685,9,"
- META_PROTO_TIMES_VALS(12) ",'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_"
- "ID_14',0,0,0,1,1,0,'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C"
- "2881000',X'C2881000',NULL,?);",
- "INSERT INTO 'metas' VALUES(13,687,687,10,"
- META_PROTO_TIMES_VALS(13) ",'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_"
- "ID_12',0,0,0,0,0,0,'ICANN | Internet Corporation for Assigned Names "
- "and Numbers','ICANN | Internet Corporation for Assigned Names and Nu"
- "mbers',NULL,NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636"
- "F6D2F120B504E474158463041414646',X'C28810200A15687474703A2F2F7777772"
- "E6963616E6E2E636F6D2F120744414146415346',NULL,?);",
- "INSERT INTO 'metas' VALUES(14,692,692,11,"
- META_PROTO_TIMES_VALS(14) ",'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r'"
- ",0,0,0,0,0,0,'The WebKit Open Source Project','The WebKit Open Sourc"
- "e Project',NULL,NULL,X'C288101A0A12687474703A2F2F7765626B69742E6F726"
- "72F1204504E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F7"
- "81205504E473259',NULL,?);" };
-
- for (int i = 0; i < V80_ROW_COUNT; i++) {
- sql::Statement s(connection->GetUniqueStatement(insert_stmts[i]));
- std::string ord = V81_Ordinal(i);
- s.BindBlob(0, ord.data(), ord.length());
- ASSERT_TRUE(s.Run());
- s.Reset(true);
- }
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion83Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',83);"
- "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, in"
- "itial_sync_ended BOOLEAN default 0, transaction_version BIGINT "
- "default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605',1, 1);"
- "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,base"
- "_version bigint default -1,server_version bigint default 0, "
- "local_external_id bigint default 0"
- ",mtime bigint default 0,server_mtime bigint default 0,ctime bigint d"
- "efault 0,server_ctime bigint default 0,id varchar(255) default 'r',p"
- "arent_id varchar(255) default 'r',server_parent_id varchar(255) defa"
- "ult 'r',prev_id varchar(255) default 'r',next_id varchar(255) defaul"
- "t 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
- "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,"
- "server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
- "e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
- "har,specifics blob,server_specifics blob, base_server_specifics BLOB"
- ", server_ordinal_in_parent blob, transaction_version bigint default "
- "0);"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthda"
- "y TEXT, db_create_version TEXT, db_create_time INT, next_id INT defa"
- "ult -2, cache_guid TEXT , notification_state BLOB, bag_of_chips "
- "blob);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
- "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,"
- "-131078,'9010788312004066376x-6609234393368420856x',NULL, NULL);"));
-
- const char* insert_stmts[V80_ROW_COUNT] = {
- "INSERT INTO 'metas' VALUES(1,-1,0,0," META_PROTO_TIMES_VALS(1) ",'r','"
- "r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(2,669,669,4,"
- META_PROTO_TIMES_VALS(2) ",'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_"
- "2',0,0,1,0,0,1,'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A1"
- "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084141534741534741',X"
- "'C28810260A17687474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534"
- "14447414447414447',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(4,681,681,3,"
- META_PROTO_TIMES_VALS(4) ",'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_"
- "4',0,0,1,0,0,1,'Welcome to Chromium','Welcome to Chromium',NULL,NULL"
- ",X'C28810350A31687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6"
- "D652F696E746C2F656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687"
- "474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656"
- "E2F77656C636F6D652E68746D6C1200',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(5,677,677,7,"
- META_PROTO_TIMES_VALS(5) ",'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_"
- "5',0,0,1,0,0,1,'Google','Google',NULL,NULL,X'C28810220A16687474703A2"
- "F2F7777772E676F6F676C652E636F6D2F12084147415347415347',X'C28810220A1"
- "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084147464447415347',N"
- "ULL,?,0);",
- "INSERT INTO 'metas' VALUES(6,694,694,6,"
- META_PROTO_TIMES_VALS(6) ",'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1"
- ",1,0,'The Internet','The Internet',NULL,NULL,X'C2881000',X'C2881000'"
- ",NULL,?,0);",
- "INSERT INTO 'metas' VALUES(7,663,663,0,"
- META_PROTO_TIMES_VALS(7) ",'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Goog"
- "le Chrome','Google Chrome','google_chrome',NULL,NULL,NULL,NULL,?,0);"
- "",
- "INSERT INTO 'metas' VALUES(8,664,664,0,"
- META_PROTO_TIMES_VALS(8) ",'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1"
- ",1,0,'Bookmarks','Bookmarks','google_chrome_bookmarks',NULL,X'C28810"
- "00',X'C2881000',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(9,665,665,1,"
- META_PROTO_TIMES_VALS(9) ",'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0"
- ",0,0,1,1,0,'Bookmark Bar','Bookmark Bar','bookmark_bar',NULL,X'C2881"
- "000',X'C2881000',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(10,666,666,2,"
- META_PROTO_TIMES_VALS(10) ",'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',"
- "0,0,0,1,1,0,'Other Bookmarks','Other Bookmarks','other_bookmarks',NU"
- "LL,X'C2881000',X'C2881000',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(11,683,683,8,"
- META_PROTO_TIMES_VALS(11) ",'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13'"
- ",0,0,0,0,0,0,'Home (The Chromium Projects)','Home (The Chromium Proj"
- "ects)',NULL,NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756"
- "D2E6F72672F1206414741545741',X'C28810290A1D687474703A2F2F6465762E636"
- "8726F6D69756D2E6F72672F6F7468657212084146414756415346',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(12,685,685,9,"
- META_PROTO_TIMES_VALS(12) ",'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_"
- "ID_14',0,0,0,1,1,0,'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C"
- "2881000',X'C2881000',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(13,687,687,10,"
- META_PROTO_TIMES_VALS(13) ",'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_"
- "ID_12',0,0,0,0,0,0,'ICANN | Internet Corporation for Assigned Names "
- "and Numbers','ICANN | Internet Corporation for Assigned Names and Nu"
- "mbers',NULL,NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636"
- "F6D2F120B504E474158463041414646',X'C28810200A15687474703A2F2F7777772"
- "E6963616E6E2E636F6D2F120744414146415346',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(14,692,692,11,"
- META_PROTO_TIMES_VALS(14) ",'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r'"
- ",0,0,0,0,0,0,'The WebKit Open Source Project','The WebKit Open Sourc"
- "e Project',NULL,NULL,X'C288101A0A12687474703A2F2F7765626B69742E6F726"
- "72F1204504E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F7"
- "81205504E473259',NULL,?,0);" };
-
- for (int i = 0; i < V80_ROW_COUNT; i++) {
- sql::Statement s(connection->GetUniqueStatement(insert_stmts[i]));
- std::string ord = V81_Ordinal(i);
- s.BindBlob(0, ord.data(), ord.length());
- ASSERT_TRUE(s.Run());
- s.Reset(true);
- }
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion84Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',84);"
- "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, in"
- "itial_sync_ended BOOLEAN default 0, transaction_version BIGINT "
- "default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605',1, 1);"
- "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,base"
- "_version bigint default -1,server_version bigint default 0, "
- "local_external_id bigint default 0"
- ",mtime bigint default 0,server_mtime bigint default 0,ctime bigint d"
- "efault 0,server_ctime bigint default 0,id varchar(255) default 'r',p"
- "arent_id varchar(255) default 'r',server_parent_id varchar(255) defa"
- "ult 'r',prev_id varchar(255) default 'r',next_id varchar(255) defaul"
- "t 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
- "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,"
- "server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
- "e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
- "har,specifics blob,server_specifics blob, base_server_specifics BLOB"
- ", server_ordinal_in_parent blob, transaction_version bigint default "
- "0);"
- "CREATE TABLE 'deleted_metas'"
- "(metahandle bigint primary key ON CONFLICT FAIL,base"
- "_version bigint default -1,server_version bigint default 0, "
- "local_external_id bigint default 0"
- ",mtime bigint default 0,server_mtime bigint default 0,ctime bigint d"
- "efault 0,server_ctime bigint default 0,id varchar(255) default 'r',p"
- "arent_id varchar(255) default 'r',server_parent_id varchar(255) defa"
- "ult 'r',prev_id varchar(255) default 'r',next_id varchar(255) defaul"
- "t 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
- "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,"
- "server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
- "e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
- "har,specifics blob,server_specifics blob, base_server_specifics BLOB"
- ", server_ordinal_in_parent blob, transaction_version bigint default "
- "0);"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthda"
- "y TEXT, db_create_version TEXT, db_create_time INT, next_id INT defa"
- "ult -2, cache_guid TEXT , notification_state BLOB, bag_of_chips "
- "blob);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
- "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,"
- "-131078,'9010788312004066376x-6609234393368420856x',NULL, NULL);"));
-
- const char* insert_stmts[V80_ROW_COUNT] = {
- "INSERT INTO 'metas' VALUES(1,-1,0,0," META_PROTO_TIMES_VALS(1) ",'r','"
- "r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(2,669,669,4,"
- META_PROTO_TIMES_VALS(2) ",'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_"
- "2',0,0,1,0,0,1,'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A1"
- "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084141534741534741',X"
- "'C28810260A17687474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534"
- "14447414447414447',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(4,681,681,3,"
- META_PROTO_TIMES_VALS(4) ",'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_"
- "4',0,0,1,0,0,1,'Welcome to Chromium','Welcome to Chromium',NULL,NULL"
- ",X'C28810350A31687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6"
- "D652F696E746C2F656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687"
- "474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656"
- "E2F77656C636F6D652E68746D6C1200',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(5,677,677,7,"
- META_PROTO_TIMES_VALS(5) ",'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_"
- "5',0,0,1,0,0,1,'Google','Google',NULL,NULL,X'C28810220A16687474703A2"
- "F2F7777772E676F6F676C652E636F6D2F12084147415347415347',X'C28810220A1"
- "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084147464447415347',N"
- "ULL,?,0);",
- "INSERT INTO 'metas' VALUES(6,694,694,6,"
- META_PROTO_TIMES_VALS(6) ",'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1"
- ",1,0,'The Internet','The Internet',NULL,NULL,X'C2881000',X'C2881000'"
- ",NULL,?,0);",
- "INSERT INTO 'metas' VALUES(7,663,663,0,"
- META_PROTO_TIMES_VALS(7) ",'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Goog"
- "le Chrome','Google Chrome','google_chrome',NULL,NULL,NULL,NULL,?,0);"
- "",
- "INSERT INTO 'metas' VALUES(8,664,664,0,"
- META_PROTO_TIMES_VALS(8) ",'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1"
- ",1,0,'Bookmarks','Bookmarks','google_chrome_bookmarks',NULL,X'C28810"
- "00',X'C2881000',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(9,665,665,1,"
- META_PROTO_TIMES_VALS(9) ",'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0"
- ",0,0,1,1,0,'Bookmark Bar','Bookmark Bar','bookmark_bar',NULL,X'C2881"
- "000',X'C2881000',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(10,666,666,2,"
- META_PROTO_TIMES_VALS(10) ",'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',"
- "0,0,0,1,1,0,'Other Bookmarks','Other Bookmarks','other_bookmarks',NU"
- "LL,X'C2881000',X'C2881000',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(11,683,683,8,"
- META_PROTO_TIMES_VALS(11) ",'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13'"
- ",0,0,0,0,0,0,'Home (The Chromium Projects)','Home (The Chromium Proj"
- "ects)',NULL,NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756"
- "D2E6F72672F1206414741545741',X'C28810290A1D687474703A2F2F6465762E636"
- "8726F6D69756D2E6F72672F6F7468657212084146414756415346',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(12,685,685,9,"
- META_PROTO_TIMES_VALS(12) ",'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_"
- "ID_14',0,0,0,1,1,0,'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C"
- "2881000',X'C2881000',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(13,687,687,10,"
- META_PROTO_TIMES_VALS(13) ",'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_"
- "ID_12',0,0,0,0,0,0,'ICANN | Internet Corporation for Assigned Names "
- "and Numbers','ICANN | Internet Corporation for Assigned Names and Nu"
- "mbers',NULL,NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636"
- "F6D2F120B504E474158463041414646',X'C28810200A15687474703A2F2F7777772"
- "E6963616E6E2E636F6D2F120744414146415346',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(14,692,692,11,"
- META_PROTO_TIMES_VALS(14) ",'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r'"
- ",0,0,0,0,0,0,'The WebKit Open Source Project','The WebKit Open Sourc"
- "e Project',NULL,NULL,X'C288101A0A12687474703A2F2F7765626B69742E6F726"
- "72F1204504E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F7"
- "81205504E473259',NULL,?,0);" };
-
- for (int i = 0; i < V80_ROW_COUNT; i++) {
- sql::Statement s(connection->GetUniqueStatement(insert_stmts[i]));
- std::string ord = V81_Ordinal(i);
- s.BindBlob(0, ord.data(), ord.length());
- ASSERT_TRUE(s.Run());
- s.Reset(true);
- }
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion85Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',85);"
- "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, "
- "transaction_version BIGINT default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605', 1);"
- "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,base"
- "_version bigint default -1,server_version bigint default 0, "
- "local_external_id bigint default 0"
- ",mtime bigint default 0,server_mtime bigint default 0,ctime bigint d"
- "efault 0,server_ctime bigint default 0,id varchar(255) default 'r',p"
- "arent_id varchar(255) default 'r',server_parent_id varchar(255) defa"
- "ult 'r',prev_id varchar(255) default 'r',next_id varchar(255) defaul"
- "t 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
- "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,"
- "server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
- "e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
- "har,specifics blob,server_specifics blob, base_server_specifics BLOB"
- ", server_ordinal_in_parent blob, transaction_version bigint default "
- "0);"
- "CREATE TABLE 'deleted_metas'"
- "(metahandle bigint primary key ON CONFLICT FAIL,base"
- "_version bigint default -1,server_version bigint default 0, "
- "local_external_id bigint default 0"
- ",mtime bigint default 0,server_mtime bigint default 0,ctime bigint d"
- "efault 0,server_ctime bigint default 0,id varchar(255) default 'r',p"
- "arent_id varchar(255) default 'r',server_parent_id varchar(255) defa"
- "ult 'r',prev_id varchar(255) default 'r',next_id varchar(255) defaul"
- "t 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
- "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,"
- "server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
- "e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
- "har,specifics blob,server_specifics blob, base_server_specifics BLOB"
- ", server_ordinal_in_parent blob, transaction_version bigint default "
- "0);"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthda"
- "y TEXT, db_create_version TEXT, db_create_time INT, next_id INT defa"
- "ult -2, cache_guid TEXT , notification_state BLOB, bag_of_chips "
- "blob);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
- "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,"
- "-131078,'9010788312004066376x-6609234393368420856x',NULL, NULL);"));
-
- const char* insert_stmts[V80_ROW_COUNT] = {
- "INSERT INTO 'metas' VALUES(1,-1,0,0," META_PROTO_TIMES_VALS(1) ",'r','"
- "r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(2,669,669,4,"
- META_PROTO_TIMES_VALS(2) ",'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_"
- "2',0,0,1,0,0,1,'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A1"
- "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084141534741534741',X"
- "'C28810260A17687474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534"
- "14447414447414447',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(4,681,681,3,"
- META_PROTO_TIMES_VALS(4) ",'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_"
- "4',0,0,1,0,0,1,'Welcome to Chromium','Welcome to Chromium',NULL,NULL"
- ",X'C28810350A31687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6"
- "D652F696E746C2F656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687"
- "474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656"
- "E2F77656C636F6D652E68746D6C1200',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(5,677,677,7,"
- META_PROTO_TIMES_VALS(5) ",'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_"
- "5',0,0,1,0,0,1,'Google','Google',NULL,NULL,X'C28810220A16687474703A2"
- "F2F7777772E676F6F676C652E636F6D2F12084147415347415347',X'C28810220A1"
- "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084147464447415347',N"
- "ULL,?,0);",
- "INSERT INTO 'metas' VALUES(6,694,694,6,"
- META_PROTO_TIMES_VALS(6) ",'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1"
- ",1,0,'The Internet','The Internet',NULL,NULL,X'C2881000',X'C2881000'"
- ",NULL,?,0);",
- "INSERT INTO 'metas' VALUES(7,663,663,0,"
- META_PROTO_TIMES_VALS(7) ",'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Goog"
- "le Chrome','Google Chrome','google_chrome',NULL,NULL,NULL,NULL,?,0);"
- "",
- "INSERT INTO 'metas' VALUES(8,664,664,0,"
- META_PROTO_TIMES_VALS(8) ",'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1"
- ",1,0,'Bookmarks','Bookmarks','google_chrome_bookmarks',NULL,X'C28810"
- "00',X'C2881000',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(9,665,665,1,"
- META_PROTO_TIMES_VALS(9) ",'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0"
- ",0,0,1,1,0,'Bookmark Bar','Bookmark Bar','bookmark_bar',NULL,X'C2881"
- "000',X'C2881000',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(10,666,666,2,"
- META_PROTO_TIMES_VALS(10) ",'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',"
- "0,0,0,1,1,0,'Other Bookmarks','Other Bookmarks','other_bookmarks',NU"
- "LL,X'C2881000',X'C2881000',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(11,683,683,8,"
- META_PROTO_TIMES_VALS(11) ",'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13'"
- ",0,0,0,0,0,0,'Home (The Chromium Projects)','Home (The Chromium Proj"
- "ects)',NULL,NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756"
- "D2E6F72672F1206414741545741',X'C28810290A1D687474703A2F2F6465762E636"
- "8726F6D69756D2E6F72672F6F7468657212084146414756415346',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(12,685,685,9,"
- META_PROTO_TIMES_VALS(12) ",'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_"
- "ID_14',0,0,0,1,1,0,'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C"
- "2881000',X'C2881000',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(13,687,687,10,"
- META_PROTO_TIMES_VALS(13) ",'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_"
- "ID_12',0,0,0,0,0,0,'ICANN | Internet Corporation for Assigned Names "
- "and Numbers','ICANN | Internet Corporation for Assigned Names and Nu"
- "mbers',NULL,NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636"
- "F6D2F120B504E474158463041414646',X'C28810200A15687474703A2F2F7777772"
- "E6963616E6E2E636F6D2F120744414146415346',NULL,?,0);",
- "INSERT INTO 'metas' VALUES(14,692,692,11,"
- META_PROTO_TIMES_VALS(14) ",'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r'"
- ",0,0,0,0,0,0,'The WebKit Open Source Project','The WebKit Open Sourc"
- "e Project',NULL,NULL,X'C288101A0A12687474703A2F2F7765626B69742E6F726"
- "72F1204504E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F7"
- "81205504E473259',NULL,?,0);" };
-
- for (int i = 0; i < V80_ROW_COUNT; i++) {
- sql::Statement s(connection->GetUniqueStatement(insert_stmts[i]));
- std::string ord = V81_Ordinal(i);
- s.BindBlob(0, ord.data(), ord.length());
- ASSERT_TRUE(s.Run());
- s.Reset(true);
- }
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion86Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',86);"
- "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB,"
- " transaction_version BIGINT default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605',1);"
- "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,b"
- "ase_version bigint default -1,server_version bigint default 0,local_e"
- "xternal_id bigint default 0,transaction_version bigint default 0,mtim"
- "e bigint default 0,server_mtime bigint default 0,ctime bigint default"
- " 0,server_ctime bigint default 0,id varchar(255) default 'r',parent_i"
- "d varchar(255) default 'r',server_parent_id varchar(255) default 'r',"
- "is_unsynced bit default 0,is_unapplied_update bit default 0,is_del bi"
- "t default 0,is_dir bit default 0,server_is_dir bit default 0,server_i"
- "s_del bit default 0,non_unique_name varchar,server_non_unique_name va"
- "rchar(255),unique_server_tag varchar,unique_client_tag varchar,specif"
- "ics blob,server_specifics blob,base_server_specifics blob,server_uniq"
- "ue_position blob,unique_position blob,unique_bookmark_tag blob);"
- "INSERT INTO 'metas' VALUES(1,-1,0,0,0,"
- META_PROTO_TIMES_VALS(1)
- ",'r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,"
- "X'',X'',NULL,X'',X'',X'');"
- "INSERT INTO 'metas' VALUES(6,694,694,6,0,"
- META_PROTO_TIMES_VALS(6) ",'s_ID_6','s_ID_9','s_ID_9',0,0,0,1,1,0,'T"
- "he Internet','The Internet',NULL,NULL,X'C2881000',X'C2881000',NULL,X'"
- "',X'',X'');"
- "INSERT INTO 'metas' VALUES(7,663,663,0,0,"
- META_PROTO_TIMES_VALS(7) ",'s_ID_7','r','r',0,0,0,1,1,0,'Google Chro"
- "me','Google Chrome','google_chrome',NULL,NULL,NULL,NULL,X'',X'',X'');"
- "INSERT INTO 'metas' VALUES(8,664,664,0,0,"
- META_PROTO_TIMES_VALS(8) ",'s_ID_8','s_ID_7','s_ID_7',0,0,0,1,1,0,'B"
- "ookmarks','Bookmarks','google_chrome_bookmarks',NULL,X'C2881000',X'C2"
- "881000',NULL,X'',X'',X'');"
- "INSERT INTO 'metas' VALUES(9,665,665,1,0,"
- META_PROTO_TIMES_VALS(9) ",'s_ID_9','s_ID_8','s_ID_8',0,0,0,1,1,0,'B"
- "ookmark Bar','Bookmark Bar','bookmark_bar',NULL,X'C2881000',X'C288100"
- "0',NULL,X'',X'',X'');"
- "INSERT INTO 'metas' VALUES(10,666,666,2,0,"
- META_PROTO_TIMES_VALS(10) ",'s_ID_10','s_ID_8','s_ID_8',0,0,0,1,1,0,"
- "'Other Bookmarks','Other Bookmarks','other_bookmarks',NULL,X'C2881000"
- "',X'C2881000',NULL,X'',X'',X'');"
- "INSERT INTO 'metas' VALUES(11,683,683,8,0,"
- META_PROTO_TIMES_VALS(11) ",'s_ID_11','s_ID_6','s_ID_6',0,0,0,0,0,0,"
- "'Home (The Chromium Projects)','Home (The Chromium Projects)',NULL,NU"
- "LL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756D2E6F72672F1206"
- "414741545741',X'C28810290A1D687474703A2F2F6465762E6368726F6D69756D2E6"
- "F72672F6F7468657212084146414756415346',NULL,X'',X'',X'');"
- "INSERT INTO 'metas' VALUES(12,685,685,9,0,"
- META_PROTO_TIMES_VALS(12) ",'s_ID_12','s_ID_6','s_ID_6',0,0,0,1,1,0,"
- "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C2881000',X'C2881000'"
- ",NULL,X'',X'',X'');"
- "INSERT INTO 'metas' VALUES(13,687,687,10,0,"
- META_PROTO_TIMES_VALS(13) ",'s_ID_13','s_ID_6','s_ID_6',0,0,0,0,0,0"
- ",'ICANN | Internet Corporation for Assigned Names and Numbers','ICANN"
- " | Internet Corporation for Assigned Names and Numbers',NULL,NULL,X'C"
- "28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F120B504E47415846"
- "3041414646',X'C28810200A15687474703A2F2F7777772E6963616E6E2E636F6D2F1"
- "20744414146415346',NULL,X'',X'',X'');"
- "INSERT INTO 'metas' VALUES(14,692,692,11,0,"
- META_PROTO_TIMES_VALS(14) ",'s_ID_14','s_ID_6','s_ID_6',0,0,0,0,0,0"
- ",'The WebKit Open Source Project','The WebKit Open Source Project',NU"
- "LL,NULL,X'C288101A0A12687474703A2F2F7765626B69742E6F72672F1204504E475"
- "8',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F781205504E473259"
- "',NULL,X'',X'',X'');"
- "CREATE TABLE deleted_metas (metahandle bigint primary key ON CONFLICT"
- " FAIL,base_version bigint default -1,server_version bigint default 0,"
- "local_external_id bigint default 0,transaction_version bigint default"
- " 0,mtime bigint default 0,server_mtime bigint default 0,ctime bigint "
- "default 0,server_ctime bigint default 0,id varchar(255) default 'r',p"
- "arent_id varchar(255) default 'r',server_parent_id varchar(255) defau"
- "lt 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
- "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,s"
- "erver_is_del bit default 0,non_unique_name varchar,server_non_unique_"
- "name varchar(255),unique_server_tag varchar,unique_client_tag varchar"
- ",specifics blob,server_specifics blob,base_server_specifics blob,serv"
- "er_unique_position blob,unique_position blob,unique_bookmark_tag blob"
- ");"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birt"
- "hday TEXT, db_create_version TEXT, db_create_time INT, next_id INT de"
- "fault -2, cache_guid TEXT, notification_state BLOB, bag_of_chips BLOB"
- ");"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.or"
- "g','c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-13107"
- "8,'9010788312004066376x-6609234393368420856x',NULL,NULL);"));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion67To68) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
-
- SetUpVersion67Database(&connection);
-
- // Columns existing before version 67.
- ASSERT_TRUE(connection.DoesColumnExist("metas", "name"));
- ASSERT_TRUE(connection.DoesColumnExist("metas", "unsanitized_name"));
- ASSERT_TRUE(connection.DoesColumnExist("metas", "server_name"));
-
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
-
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion67To68());
- ASSERT_EQ(68, dbs->GetVersion());
- ASSERT_TRUE(dbs->needs_column_refresh_);
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion68To69) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion68Database(&connection);
-
- {
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
-
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion68To69());
- ASSERT_EQ(69, dbs->GetVersion());
- ASSERT_TRUE(dbs->needs_column_refresh_);
- }
-
- ASSERT_TRUE(connection.DoesColumnExist("metas", "specifics"));
- ASSERT_TRUE(connection.DoesColumnExist("metas", "server_specifics"));
- sql::Statement s(connection.GetUniqueStatement("SELECT non_unique_name,"
- "is_del, is_dir, id, specifics, server_specifics FROM metas "
- "WHERE metahandle = 2"));
- ASSERT_TRUE(s.Step());
- ASSERT_EQ("Deleted Item", s.ColumnString(0));
- ASSERT_TRUE(s.ColumnBool(1));
- ASSERT_FALSE(s.ColumnBool(2));
- ASSERT_EQ("s_ID_2", s.ColumnString(3));
- sync_pb::EntitySpecifics specifics;
- specifics.ParseFromArray(s.ColumnBlob(4), s.ColumnByteLength(4));
- ASSERT_TRUE(specifics.has_bookmark());
- ASSERT_EQ("http://www.google.com/", specifics.bookmark().url());
- ASSERT_EQ("AASGASGA", specifics.bookmark().favicon());
- specifics.ParseFromArray(s.ColumnBlob(5), s.ColumnByteLength(5));
- ASSERT_TRUE(specifics.has_bookmark());
- ASSERT_EQ("http://www.google.com/2", specifics.bookmark().url());
- ASSERT_EQ("ASADGADGADG", specifics.bookmark().favicon());
- ASSERT_FALSE(s.Step());
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion69To70) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion69Database(&connection);
-
- ASSERT_TRUE(connection.DoesColumnExist("metas", "singleton_tag"));
- ASSERT_FALSE(connection.DoesColumnExist("metas", "unique_server_tag"));
- ASSERT_FALSE(connection.DoesColumnExist("metas", "unique_client_tag"));
-
- {
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
-
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion69To70());
- ASSERT_EQ(70, dbs->GetVersion());
- ASSERT_TRUE(dbs->needs_column_refresh_);
- }
-
- EXPECT_TRUE(connection.DoesColumnExist("metas", "unique_server_tag"));
- EXPECT_TRUE(connection.DoesColumnExist("metas", "unique_client_tag"));
- sql::Statement s(connection.GetUniqueStatement("SELECT id"
- " FROM metas WHERE unique_server_tag = 'google_chrome'"));
- ASSERT_TRUE(s.Step());
- EXPECT_EQ("s_ID_7", s.ColumnString(0));
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion70To71) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion70Database(&connection);
-
- ASSERT_TRUE(connection.DoesColumnExist("share_info", "last_sync_timestamp"));
- ASSERT_TRUE(connection.DoesColumnExist("share_info", "initial_sync_ended"));
- ASSERT_FALSE(connection.DoesTableExist("models"));
-
- {
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
-
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion70To71());
- ASSERT_EQ(71, dbs->GetVersion());
- ASSERT_FALSE(dbs->needs_column_refresh_);
- }
-
- ASSERT_FALSE(connection.DoesColumnExist("share_info", "last_sync_timestamp"));
- ASSERT_FALSE(connection.DoesColumnExist("share_info", "initial_sync_ended"));
- ASSERT_TRUE(connection.DoesTableExist("models"));
- ASSERT_TRUE(connection.DoesColumnExist("models", "initial_sync_ended"));
- ASSERT_TRUE(connection.DoesColumnExist("models", "last_download_timestamp"));
- ASSERT_TRUE(connection.DoesColumnExist("models", "model_id"));
-
- sql::Statement s(connection.GetUniqueStatement("SELECT model_id, "
- "initial_sync_ended, last_download_timestamp FROM models"));
- ASSERT_TRUE(s.Step());
- std::string model_id = s.ColumnString(0);
- EXPECT_EQ("C2881000", base::HexEncode(model_id.data(), model_id.size()))
- << "Model ID is expected to be the empty BookmarkSpecifics proto.";
- EXPECT_TRUE(s.ColumnBool(1));
- EXPECT_EQ(694, s.ColumnInt64(2));
- ASSERT_FALSE(s.Step());
-}
-
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion71To72) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion71Database(&connection);
-
- ASSERT_TRUE(connection.DoesTableExist("extended_attributes"));
-
- {
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
-
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion71To72());
- ASSERT_EQ(72, dbs->GetVersion());
- ASSERT_FALSE(dbs->needs_column_refresh_);
- }
-
- ASSERT_FALSE(connection.DoesTableExist("extended_attributes"));
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion72To73) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion72Database(&connection);
-
- ASSERT_FALSE(connection.DoesColumnExist("share_info", "notification_state"));
-
- {
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
-
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion72To73());
- ASSERT_EQ(73, dbs->GetVersion());
- ASSERT_FALSE(dbs->needs_column_refresh_);
- }
-
- ASSERT_TRUE(connection.DoesColumnExist("share_info", "notification_state"));
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion73To74) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion73Database(&connection);
-
- ASSERT_FALSE(
- connection.DoesColumnExist("share_info", "autofill_migration_state"));
- ASSERT_FALSE(
- connection.DoesColumnExist("share_info",
- "bookmarks_added_during_autofill_migration"));
- ASSERT_FALSE(
- connection.DoesColumnExist("share_info", "autofill_migration_time"));
- ASSERT_FALSE(
- connection.DoesColumnExist("share_info",
- "autofill_entries_added_during_migration"));
-
- ASSERT_FALSE(
- connection.DoesColumnExist("share_info",
- "autofill_profiles_added_during_migration"));
-
- {
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
-
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion73To74());
- ASSERT_EQ(74, dbs->GetVersion());
- ASSERT_FALSE(dbs->needs_column_refresh_);
- }
-
- ASSERT_TRUE(
- connection.DoesColumnExist("share_info", "autofill_migration_state"));
- ASSERT_TRUE(
- connection.DoesColumnExist("share_info",
- "bookmarks_added_during_autofill_migration"));
- ASSERT_TRUE(
- connection.DoesColumnExist("share_info", "autofill_migration_time"));
- ASSERT_TRUE(
- connection.DoesColumnExist("share_info",
- "autofill_entries_added_during_migration"));
-
- ASSERT_TRUE(
- connection.DoesColumnExist("share_info",
- "autofill_profiles_added_during_migration"));
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion74To75) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion74Database(&connection);
-
- ASSERT_FALSE(connection.DoesColumnExist("models", "progress_marker"));
- ASSERT_TRUE(connection.DoesColumnExist("models", "last_download_timestamp"));
-
- {
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
-
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion74To75());
- ASSERT_EQ(75, dbs->GetVersion());
- ASSERT_FALSE(dbs->needs_column_refresh_);
- }
-
- ASSERT_TRUE(connection.DoesColumnExist("models", "progress_marker"));
- ASSERT_FALSE(connection.DoesColumnExist("models", "last_download_timestamp"));
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion75To76) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion75Database(&connection);
-
- ASSERT_TRUE(
- connection.DoesColumnExist("share_info", "autofill_migration_state"));
- ASSERT_TRUE(connection.DoesColumnExist("share_info",
- "bookmarks_added_during_autofill_migration"));
- ASSERT_TRUE(
- connection.DoesColumnExist("share_info", "autofill_migration_time"));
- ASSERT_TRUE(connection.DoesColumnExist("share_info",
- "autofill_entries_added_during_migration"));
- ASSERT_TRUE(connection.DoesColumnExist("share_info",
- "autofill_profiles_added_during_migration"));
-
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion75To76());
- ASSERT_EQ(76, dbs->GetVersion());
- ASSERT_TRUE(dbs->needs_column_refresh_);
- // Cannot actual refresh columns due to version 76 not containing all
- // necessary columns.
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion76To77) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion76Database(&connection);
-
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
- ASSERT_FALSE(dbs->needs_column_refresh_);
-
- EXPECT_EQ(GetExpectedLegacyMetaProtoTimes(INCLUDE_DELETED_ITEMS),
- GetMetaProtoTimes(dbs->db_.get()));
- // Since the proto times are expected to be in a legacy format, they may not
- // be compatible with ProtoTimeToTime, so we don't call ExpectTimes().
-
- ASSERT_TRUE(dbs->MigrateVersion76To77());
- ASSERT_EQ(77, dbs->GetVersion());
-
- EXPECT_EQ(GetExpectedMetaProtoTimes(INCLUDE_DELETED_ITEMS),
- GetMetaProtoTimes(dbs->db_.get()));
- // Cannot actually load entries due to version 77 not having all required
- // columns.
- ASSERT_FALSE(dbs->needs_column_refresh_);
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion77To78) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion77Database(&connection);
-
- ASSERT_FALSE(connection.DoesColumnExist("metas", "BASE_SERVER_SPECIFICS"));
-
- {
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion77To78());
- ASSERT_EQ(78, dbs->GetVersion());
-
- ASSERT_FALSE(dbs->needs_column_refresh_);
- }
-
- ASSERT_TRUE(connection.DoesColumnExist("metas", "base_server_specifics"));
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion78To79) {
- const int kInitialNextId = -65542;
-
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion78Database(&connection);
-
- // Double-check the original next_id is what we think it is.
- sql::Statement s(connection.GetUniqueStatement(
- "SELECT next_id FROM share_info"));
- s.Step();
- ASSERT_EQ(kInitialNextId, s.ColumnInt(0));
-
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion78To79());
- ASSERT_EQ(79, dbs->GetVersion());
- ASSERT_FALSE(dbs->needs_column_refresh_);
-
- // Ensure the next_id has been incremented.
- Directory::MetahandlesMap handles_map;
- JournalIndex delete_journals;;
- STLValueDeleter<Directory::MetahandlesMap> deleter(&handles_map);
- Directory::KernelLoadInfo load_info;
-
- s.Clear();
- ASSERT_TRUE(dbs->Load(&handles_map, &delete_journals, &load_info));
- EXPECT_LE(load_info.kernel_info.next_id, kInitialNextId - 65536);
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion79To80) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion79Database(&connection);
-
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion79To80());
- ASSERT_EQ(80, dbs->GetVersion());
- ASSERT_FALSE(dbs->needs_column_refresh_);
-
- // Ensure the bag_of_chips has been set.
- Directory::MetahandlesMap handles_map;
- JournalIndex delete_journals;;
- STLValueDeleter<Directory::MetahandlesMap> deleter(&handles_map);
- Directory::KernelLoadInfo load_info;
-
- ASSERT_TRUE(dbs->Load(&handles_map, &delete_journals, &load_info));
- // Check that the initial value is the serialization of an empty ChipBag.
- sync_pb::ChipBag chip_bag;
- std::string serialized_chip_bag;
- ASSERT_TRUE(chip_bag.SerializeToString(&serialized_chip_bag));
- EXPECT_EQ(serialized_chip_bag, load_info.kernel_info.bag_of_chips);
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion80To81) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion80Database(&connection);
-
- sql::Statement s(connection.GetUniqueStatement(
- "SELECT metahandle, server_position_in_parent "
- "FROM metas WHERE unique_server_tag = 'google_chrome'"));
- ASSERT_TRUE(s.Step());
- ASSERT_EQ(sql::COLUMN_TYPE_INTEGER, s.ColumnType(1));
-
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
- ASSERT_TRUE(dbs->MigrateVersion80To81());
- ASSERT_EQ(81, dbs->GetVersion());
-
- // Test that ordinal values are preserved correctly.
- sql::Statement new_s(connection.GetUniqueStatement(
- "SELECT metahandle, server_ordinal_in_parent "
- "FROM metas WHERE unique_server_tag = 'google_chrome'"));
- ASSERT_TRUE(new_s.Step());
- ASSERT_EQ(sql::COLUMN_TYPE_BLOB, new_s.ColumnType(1));
-
- std::string expected_ordinal = Int64ToNodeOrdinal(1048576).ToInternalValue();
- std::string actual_ordinal;
- new_s.ColumnBlobAsString(1, &actual_ordinal);
- ASSERT_EQ(expected_ordinal, actual_ordinal);
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion81To82) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion81Database(&connection);
- ASSERT_FALSE(connection.DoesColumnExist("models", "transaction_version"));
-
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion81To82());
- ASSERT_EQ(82, dbs->GetVersion());
- ASSERT_FALSE(dbs->needs_column_refresh_);
-
- ASSERT_TRUE(connection.DoesColumnExist("models", "transaction_version"));
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion82To83) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion82Database(&connection);
- ASSERT_FALSE(connection.DoesColumnExist("metas", "transaction_version"));
-
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
- ASSERT_TRUE(dbs->MigrateVersion82To83());
- ASSERT_EQ(83, dbs->GetVersion());
-
- ASSERT_TRUE(connection.DoesColumnExist("metas", "transaction_version"));
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion83To84) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion83Database(&connection);
- ASSERT_FALSE(connection.DoesTableExist("deleted_metas"));
-
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
- ASSERT_TRUE(dbs->MigrateVersion83To84());
- ASSERT_EQ(84, dbs->GetVersion());
-
- ASSERT_TRUE(connection.DoesTableExist("deleted_metas"));
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion84To85) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion84Database(&connection);
- ASSERT_TRUE(connection.DoesColumnExist("models", "initial_sync_ended"));
-
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
- ASSERT_TRUE(dbs->MigrateVersion84To85());
- ASSERT_EQ(85, dbs->GetVersion());
- ASSERT_FALSE(connection.DoesColumnExist("models", "initial_sync_ended"));
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion85To86) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion85Database(&connection);
- EXPECT_TRUE(connection.DoesColumnExist("metas", "next_id"));
- EXPECT_TRUE(connection.DoesColumnExist("metas", "prev_id"));
- EXPECT_TRUE(connection.DoesColumnExist("metas", "server_ordinal_in_parent"));
- EXPECT_FALSE(connection.DoesColumnExist("metas", "unique_position"));
- EXPECT_FALSE(connection.DoesColumnExist("metas", "server_unique_position"));
- EXPECT_FALSE(connection.DoesColumnExist("metas", "unique_bookmark_tag"));
-
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
- ASSERT_TRUE(dbs->MigrateVersion85To86());
- EXPECT_EQ(86, dbs->GetVersion());
- EXPECT_TRUE(connection.DoesColumnExist("metas", "unique_position"));
- EXPECT_TRUE(connection.DoesColumnExist("metas", "server_unique_position"));
- EXPECT_TRUE(connection.DoesColumnExist("metas", "unique_bookmark_tag"));
- ASSERT_TRUE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->RefreshColumns());
- EXPECT_FALSE(connection.DoesColumnExist("metas", "next_id"));
- EXPECT_FALSE(connection.DoesColumnExist("metas", "prev_id"));
- EXPECT_FALSE(connection.DoesColumnExist("metas", "server_ordinal_in_parent"));
-
- {
- Directory::MetahandlesMap handles_map;
- STLValueDeleter<Directory::MetahandlesMap> deleter(&handles_map);
- dbs->LoadEntries(&handles_map);
-
- // Grab a bookmark and examine it.
- Directory::MetahandlesMap::iterator i = handles_map.find(5);
- ASSERT_FALSE(i == handles_map.end());
- EntryKernel* bm = i->second;
- ASSERT_EQ(bm->ref(ID).value(), "s_ID_5");
-
- EXPECT_TRUE(bm->ref(UNIQUE_POSITION).IsValid());
- EXPECT_TRUE(bm->ref(SERVER_UNIQUE_POSITION).IsValid());
- EXPECT_EQ(UniquePosition::kSuffixLength,
- bm->ref(UNIQUE_BOOKMARK_TAG).length());
-
- // Grab a non-bookmark and examine it.
- Directory::MetahandlesMap::iterator j = handles_map.find(1);
-
- ASSERT_FALSE(j == handles_map.end());
- EntryKernel* root = j->second;
- ASSERT_EQ(root->ref(ID).value(), "r");
-
- EXPECT_FALSE(root->ref(UNIQUE_POSITION).IsValid());
- EXPECT_FALSE(root->ref(SERVER_UNIQUE_POSITION).IsValid());
- EXPECT_TRUE(root->ref(UNIQUE_BOOKMARK_TAG).empty());
-
- // Make sure we didn't mistake the bookmark root node for a real bookmark.
- Directory::MetahandlesMap::iterator k = handles_map.find(8);
- ASSERT_FALSE(k == handles_map.end());
- EntryKernel* bm_root = k->second;
- ASSERT_EQ(bm_root->ref(ID).value(), "s_ID_8");
- ASSERT_EQ(bm_root->ref(UNIQUE_SERVER_TAG), "google_chrome_bookmarks");
-
- EXPECT_FALSE(bm_root->ref(UNIQUE_POSITION).IsValid());
- EXPECT_FALSE(bm_root->ref(SERVER_UNIQUE_POSITION).IsValid());
- EXPECT_TRUE(bm_root->ref(UNIQUE_BOOKMARK_TAG).empty());
-
- // Make sure we didn't assign positions to server-created folders, either.
- Directory::MetahandlesMap::iterator l = handles_map.find(10);
- ASSERT_FALSE(l == handles_map.end());
- EntryKernel* perm_folder = l->second;
- ASSERT_EQ(perm_folder->ref(ID).value(), "s_ID_10");
- ASSERT_EQ(perm_folder->ref(UNIQUE_SERVER_TAG), "other_bookmarks");
-
- EXPECT_FALSE(perm_folder->ref(UNIQUE_POSITION).IsValid());
- EXPECT_FALSE(perm_folder->ref(SERVER_UNIQUE_POSITION).IsValid());
- EXPECT_TRUE(perm_folder->ref(UNIQUE_BOOKMARK_TAG).empty());
-
- // Make sure that the syncable::Directory and the migration code agree on
- // which items should or should not have unique position values. This test
- // may become obsolete if the directory's definition of that function
- // changes, but, until then, this is a useful test.
- for (Directory::MetahandlesMap::iterator it = handles_map.begin();
- it != handles_map.end(); it++) {
- SCOPED_TRACE(it->second->ref(ID));
- if (it->second->ShouldMaintainPosition()) {
- EXPECT_TRUE(it->second->ref(UNIQUE_POSITION).IsValid());
- EXPECT_TRUE(it->second->ref(SERVER_UNIQUE_POSITION).IsValid());
- EXPECT_FALSE(it->second->ref(UNIQUE_BOOKMARK_TAG).empty());
- } else {
- EXPECT_FALSE(it->second->ref(UNIQUE_POSITION).IsValid());
- EXPECT_FALSE(it->second->ref(SERVER_UNIQUE_POSITION).IsValid());
- EXPECT_TRUE(it->second->ref(UNIQUE_BOOKMARK_TAG).empty());
- }
- }
- }
-}
-
-TEST_F(DirectoryBackingStoreTest, DetectInvalidPosition) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion86Database(&connection);
-
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
- ASSERT_EQ(86, dbs->GetVersion());
-
- // Insert row with bad position.
- sql::Statement s(connection.GetUniqueStatement(
- "INSERT INTO metas "
- "( id, metahandle, is_dir, ctime, mtime,"
- " unique_position, server_unique_position) "
- "VALUES('c-invalid', 9999, 1, 0, 0, 'BAD_POS', 'BAD_POS')"));
- ASSERT_TRUE(s.Run());
-
- // Trying to unpack this entry should signal that the DB is corrupted.
- Directory::MetahandlesMap handles_map;
- JournalIndex delete_journals;;
- STLValueDeleter<Directory::MetahandlesMap> deleter(&handles_map);
- Directory::KernelLoadInfo kernel_load_info;
- ASSERT_EQ(FAILED_DATABASE_CORRUPT,
- dbs->Load(&handles_map, &delete_journals, &kernel_load_info));
-}
-
-TEST_P(MigrationTest, ToCurrentVersion) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- switch (GetParam()) {
- case 67:
- SetUpVersion67Database(&connection);
- break;
- case 68:
- SetUpVersion68Database(&connection);
- break;
- case 69:
- SetUpVersion69Database(&connection);
- break;
- case 70:
- SetUpVersion70Database(&connection);
- break;
- case 71:
- SetUpVersion71Database(&connection);
- break;
- case 72:
- SetUpVersion72Database(&connection);
- break;
- case 73:
- SetUpVersion73Database(&connection);
- break;
- case 74:
- SetUpVersion74Database(&connection);
- break;
- case 75:
- SetUpVersion75Database(&connection);
- break;
- case 76:
- SetUpVersion76Database(&connection);
- break;
- case 77:
- SetUpVersion77Database(&connection);
- break;
- case 78:
- SetUpVersion78Database(&connection);
- break;
- case 79:
- SetUpVersion79Database(&connection);
- break;
- case 80:
- SetUpVersion80Database(&connection);
- break;
- case 81:
- SetUpVersion81Database(&connection);
- break;
- case 82:
- SetUpVersion82Database(&connection);
- break;
- case 83:
- SetUpVersion83Database(&connection);
- break;
- case 84:
- SetUpVersion84Database(&connection);
- break;
- case 85:
- SetUpVersion85Database(&connection);
- break;
- case 86:
- SetUpVersion86Database(&connection);
- break;
- default:
- // If you see this error, it may mean that you've increased the
- // database version number but you haven't finished adding unit tests
- // for the database migration code. You need to need to supply a
- // SetUpVersionXXDatabase function with a dump of the test database
- // at the old schema. Here's one way to do that:
- // 1. Start on a clean tree (with none of your pending schema changes).
- // 2. Set a breakpoint in this function and run the unit test.
- // 3. Allow this test to run to completion (step out of the call),
- // without allowing ~MigrationTest to execute.
- // 4. Examine this->temp_dir_ to determine the location of the
- // test database (it is currently of the version you need).
- // 5. Dump this using the sqlite3 command line tool:
- // > .output foo_dump.sql
- // > .dump
- // 6. Replace the timestamp columns with META_PROTO_TIMES(x) (or
- // LEGACY_META_PROTO_TIMES(x) if before Version 77).
- FAIL() << "Need to supply database dump for version " << GetParam();
- }
-
- syncable::Directory::KernelLoadInfo dir_info;
- Directory::MetahandlesMap handles_map;
- JournalIndex delete_journals;;
- STLValueDeleter<Directory::MetahandlesMap> index_deleter(&handles_map);
-
- {
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
- ASSERT_EQ(OPENED, dbs->Load(&handles_map, &delete_journals, &dir_info));
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_EQ(kCurrentDBVersion, dbs->GetVersion());
- }
-
- // Columns deleted in Version 67.
- ASSERT_FALSE(connection.DoesColumnExist("metas", "name"));
- ASSERT_FALSE(connection.DoesColumnExist("metas", "unsanitized_name"));
- ASSERT_FALSE(connection.DoesColumnExist("metas", "server_name"));
-
- // Columns added in Version 68.
- ASSERT_TRUE(connection.DoesColumnExist("metas", "specifics"));
- ASSERT_TRUE(connection.DoesColumnExist("metas", "server_specifics"));
-
- // Columns deleted in Version 68.
- ASSERT_FALSE(connection.DoesColumnExist("metas", "is_bookmark_object"));
- ASSERT_FALSE(connection.DoesColumnExist("metas",
- "server_is_bookmark_object"));
- ASSERT_FALSE(connection.DoesColumnExist("metas", "bookmark_favicon"));
- ASSERT_FALSE(connection.DoesColumnExist("metas", "bookmark_url"));
- ASSERT_FALSE(connection.DoesColumnExist("metas", "server_bookmark_url"));
-
- // Renamed a column in Version 70
- ASSERT_FALSE(connection.DoesColumnExist("metas", "singleton_tag"));
- ASSERT_TRUE(connection.DoesColumnExist("metas", "unique_server_tag"));
- ASSERT_TRUE(connection.DoesColumnExist("metas", "unique_client_tag"));
-
- // Removed extended attributes in Version 72.
- ASSERT_FALSE(connection.DoesTableExist("extended_attributes"));
-
- // Columns added in Version 73.
- ASSERT_TRUE(connection.DoesColumnExist("share_info", "notification_state"));
-
- // Column replaced in version 75.
- ASSERT_TRUE(connection.DoesColumnExist("models", "progress_marker"));
- ASSERT_FALSE(connection.DoesColumnExist("models", "last_download_timestamp"));
-
- // Columns removed in version 76.
- ASSERT_FALSE(
- connection.DoesColumnExist("share_info", "autofill_migration_state"));
- ASSERT_FALSE(connection.DoesColumnExist("share_info",
- "bookmarks_added_during_autofill_migration"));
- ASSERT_FALSE(
- connection.DoesColumnExist("share_info", "autofill_migration_time"));
- ASSERT_FALSE(connection.DoesColumnExist("share_info",
- "autofill_entries_added_during_migration"));
- ASSERT_FALSE(connection.DoesColumnExist("share_info",
- "autofill_profiles_added_during_migration"));
-
- // Column added in version 78.
- ASSERT_TRUE(connection.DoesColumnExist("metas", "base_server_specifics"));
-
- // Column added in version 82.
- ASSERT_TRUE(connection.DoesColumnExist("models", "transaction_version"));
-
- // Column added in version 83.
- ASSERT_TRUE(connection.DoesColumnExist("metas", "transaction_version"));
-
- // Table added in version 84.
- ASSERT_TRUE(connection.DoesTableExist("deleted_metas"));
-
- // Column removed in version 85.
- ASSERT_FALSE(connection.DoesColumnExist("models", "initial_sync_ended"));
-
- // Check download_progress state (v75 migration)
- ASSERT_EQ(694,
- dir_info.kernel_info.download_progress[BOOKMARKS]
- .timestamp_token_for_migration());
- ASSERT_FALSE(
- dir_info.kernel_info.download_progress[BOOKMARKS]
- .has_token());
- ASSERT_EQ(32904,
- dir_info.kernel_info.download_progress[BOOKMARKS]
- .data_type_id());
- ASSERT_FALSE(
- dir_info.kernel_info.download_progress[THEMES]
- .has_timestamp_token_for_migration());
- ASSERT_TRUE(
- dir_info.kernel_info.download_progress[THEMES]
- .has_token());
- ASSERT_TRUE(
- dir_info.kernel_info.download_progress[THEMES]
- .token().empty());
- ASSERT_EQ(41210,
- dir_info.kernel_info.download_progress[THEMES]
- .data_type_id());
-
- // Check metas
- EXPECT_EQ(GetExpectedMetaProtoTimes(DONT_INCLUDE_DELETED_ITEMS),
- GetMetaProtoTimes(&connection));
- ExpectTimes(handles_map, GetExpectedMetaTimes());
-
- Directory::MetahandlesMap::iterator it = handles_map.find(1);
- ASSERT_TRUE(it != handles_map.end());
- ASSERT_EQ(1, it->second->ref(META_HANDLE));
- EXPECT_TRUE(it->second->ref(ID).IsRoot());
-
- it = handles_map.find(6);
- ASSERT_EQ(6, it->second->ref(META_HANDLE));
- EXPECT_TRUE(it->second->ref(IS_DIR));
- EXPECT_TRUE(it->second->ref(SERVER_IS_DIR));
- EXPECT_FALSE(
- it->second->ref(SPECIFICS).bookmark().has_url());
- EXPECT_FALSE(
- it->second->ref(SERVER_SPECIFICS).bookmark().has_url());
- EXPECT_FALSE(
- it->second->ref(SPECIFICS).bookmark().has_favicon());
- EXPECT_FALSE(it->second->ref(SERVER_SPECIFICS).bookmark().has_favicon());
-
- it = handles_map.find(7);
- ASSERT_EQ(7, it->second->ref(META_HANDLE));
- EXPECT_EQ("google_chrome", it->second->ref(UNIQUE_SERVER_TAG));
- EXPECT_FALSE(it->second->ref(SPECIFICS).has_bookmark());
- EXPECT_FALSE(it->second->ref(SERVER_SPECIFICS).has_bookmark());
-
- it = handles_map.find(8);
- ASSERT_EQ(8, it->second->ref(META_HANDLE));
- EXPECT_EQ("google_chrome_bookmarks", it->second->ref(UNIQUE_SERVER_TAG));
- EXPECT_TRUE(it->second->ref(SPECIFICS).has_bookmark());
- EXPECT_TRUE(it->second->ref(SERVER_SPECIFICS).has_bookmark());
-
- it = handles_map.find(9);
- ASSERT_EQ(9, it->second->ref(META_HANDLE));
- EXPECT_EQ("bookmark_bar", it->second->ref(UNIQUE_SERVER_TAG));
- EXPECT_TRUE(it->second->ref(SPECIFICS).has_bookmark());
- EXPECT_TRUE(it->second->ref(SERVER_SPECIFICS).has_bookmark());
-
- it = handles_map.find(10);
- ASSERT_EQ(10, it->second->ref(META_HANDLE));
- EXPECT_FALSE(it->second->ref(IS_DEL));
- EXPECT_TRUE(it->second->ref(SPECIFICS).has_bookmark());
- EXPECT_TRUE(it->second->ref(SERVER_SPECIFICS).has_bookmark());
- EXPECT_FALSE(it->second->ref(SPECIFICS).bookmark().has_url());
- EXPECT_FALSE(
- it->second->ref(SPECIFICS).bookmark().has_favicon());
- EXPECT_FALSE(
- it->second->ref(SERVER_SPECIFICS).bookmark().has_url());
- EXPECT_FALSE(it->second->ref(SERVER_SPECIFICS).bookmark().has_favicon());
- EXPECT_EQ("other_bookmarks", it->second->ref(UNIQUE_SERVER_TAG));
- EXPECT_EQ("Other Bookmarks", it->second->ref(NON_UNIQUE_NAME));
- EXPECT_EQ("Other Bookmarks", it->second->ref(SERVER_NON_UNIQUE_NAME));
-
- it = handles_map.find(11);
- ASSERT_EQ(11, it->second->ref(META_HANDLE));
- EXPECT_FALSE(it->second->ref(IS_DEL));
- EXPECT_FALSE(it->second->ref(IS_DIR));
- EXPECT_TRUE(it->second->ref(SPECIFICS).has_bookmark());
- EXPECT_TRUE(it->second->ref(SERVER_SPECIFICS).has_bookmark());
- EXPECT_EQ("http://dev.chromium.org/",
- it->second->ref(SPECIFICS).bookmark().url());
- EXPECT_EQ("AGATWA",
- it->second->ref(SPECIFICS).bookmark().favicon());
- EXPECT_EQ("http://dev.chromium.org/other",
- it->second->ref(SERVER_SPECIFICS).bookmark().url());
- EXPECT_EQ("AFAGVASF",
- it->second->ref(SERVER_SPECIFICS).bookmark().favicon());
- EXPECT_EQ("", it->second->ref(UNIQUE_SERVER_TAG));
- EXPECT_EQ("Home (The Chromium Projects)", it->second->ref(NON_UNIQUE_NAME));
- EXPECT_EQ("Home (The Chromium Projects)",
- it->second->ref(SERVER_NON_UNIQUE_NAME));
-
- it = handles_map.find(12);
- ASSERT_EQ(12, it->second->ref(META_HANDLE));
- EXPECT_FALSE(it->second->ref(IS_DEL));
- EXPECT_TRUE(it->second->ref(IS_DIR));
- EXPECT_EQ("Extra Bookmarks", it->second->ref(NON_UNIQUE_NAME));
- EXPECT_EQ("Extra Bookmarks", it->second->ref(SERVER_NON_UNIQUE_NAME));
- EXPECT_TRUE(it->second->ref(SPECIFICS).has_bookmark());
- EXPECT_TRUE(it->second->ref(SERVER_SPECIFICS).has_bookmark());
- EXPECT_FALSE(
- it->second->ref(SPECIFICS).bookmark().has_url());
- EXPECT_FALSE(
- it->second->ref(SERVER_SPECIFICS).bookmark().has_url());
- EXPECT_FALSE(
- it->second->ref(SPECIFICS).bookmark().has_favicon());
- EXPECT_FALSE(it->second->ref(SERVER_SPECIFICS).bookmark().has_favicon());
-
- it = handles_map.find(13);
- ASSERT_EQ(13, it->second->ref(META_HANDLE));
-
- it = handles_map.find(14);
- ASSERT_EQ(14, it->second->ref(META_HANDLE));
-
- ASSERT_EQ(static_cast<size_t>(10), handles_map.size());
-}
-
-INSTANTIATE_TEST_CASE_P(DirectoryBackingStore, MigrationTest,
- testing::Range(67, kCurrentDBVersion + 1));
-
-TEST_F(DirectoryBackingStoreTest, ModelTypeIds) {
- ModelTypeSet protocol_types = ProtocolTypes();
- for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
- iter.Inc()) {
- std::string model_id =
- TestDirectoryBackingStore::ModelTypeEnumToModelId(iter.Get());
- EXPECT_EQ(iter.Get(),
- TestDirectoryBackingStore::ModelIdToModelTypeEnum(model_id.data(),
- model_id.size()));
- }
-}
-
-namespace {
-
-class OnDiskDirectoryBackingStoreForTest : public OnDiskDirectoryBackingStore {
- public:
- OnDiskDirectoryBackingStoreForTest(const std::string& dir_name,
- const base::FilePath& backing_filepath);
- virtual ~OnDiskDirectoryBackingStoreForTest();
- bool DidFailFirstOpenAttempt();
-
- protected:
- virtual void ReportFirstTryOpenFailure() OVERRIDE;
-
- private:
- bool first_open_failed_;
-};
-
-OnDiskDirectoryBackingStoreForTest::OnDiskDirectoryBackingStoreForTest(
- const std::string& dir_name,
- const base::FilePath& backing_filepath) :
- OnDiskDirectoryBackingStore(dir_name, backing_filepath),
- first_open_failed_(false) { }
-
-OnDiskDirectoryBackingStoreForTest::~OnDiskDirectoryBackingStoreForTest() { }
-
-void OnDiskDirectoryBackingStoreForTest::ReportFirstTryOpenFailure() {
- // Do nothing, just like we would in release-mode. In debug mode, we DCHECK.
- first_open_failed_ = true;
-}
-
-bool OnDiskDirectoryBackingStoreForTest::DidFailFirstOpenAttempt() {
- return first_open_failed_;
-}
-
-} // namespace
-
-// This is a whitebox test intended to exercise the code path where the on-disk
-// directory load code decides to delete the current directory and start fresh.
-//
-// This is considered "minor" corruption because the database recreation is
-// expected to succeed. The alternative, where recreation does not succeed (ie.
-// due to read-only file system), is not tested here.
-TEST_F(DirectoryBackingStoreTest, MinorCorruption) {
- {
- scoped_ptr<OnDiskDirectoryBackingStore> dbs(
- new OnDiskDirectoryBackingStore(GetUsername(), GetDatabasePath()));
- EXPECT_TRUE(LoadAndIgnoreReturnedData(dbs.get()));
- }
-
- // Corrupt the root node.
- {
- sql::Connection connection;
- ASSERT_TRUE(connection.Open(GetDatabasePath()));
- ASSERT_TRUE(connection.Execute(
- "UPDATE metas SET parent_id='bogus' WHERE id = 'r';"));
- }
-
- {
- scoped_ptr<OnDiskDirectoryBackingStoreForTest> dbs(
- new OnDiskDirectoryBackingStoreForTest(GetUsername(),
- GetDatabasePath()));
-
- EXPECT_TRUE(LoadAndIgnoreReturnedData(dbs.get()));
- EXPECT_TRUE(dbs->DidFailFirstOpenAttempt());
- }
-}
-
-TEST_F(DirectoryBackingStoreTest, DeleteEntries) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
-
- SetUpCurrentDatabaseAndCheckVersion(&connection);
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
- Directory::MetahandlesMap handles_map;
- JournalIndex delete_journals;
- Directory::KernelLoadInfo kernel_load_info;
- STLValueDeleter<Directory::MetahandlesMap> index_deleter(&handles_map);
-
- dbs->Load(&handles_map, &delete_journals, &kernel_load_info);
- size_t initial_size = handles_map.size();
- ASSERT_LT(0U, initial_size) << "Test requires handles_map to delete.";
- int64 first_to_die = handles_map.begin()->second->ref(META_HANDLE);
- MetahandleSet to_delete;
- to_delete.insert(first_to_die);
- EXPECT_TRUE(dbs->DeleteEntries(TestDirectoryBackingStore::METAS_TABLE,
- to_delete));
-
- STLDeleteValues(&handles_map);
- dbs->LoadEntries(&handles_map);
-
- EXPECT_EQ(initial_size - 1, handles_map.size());
- bool delete_failed = false;
- for (Directory::MetahandlesMap::iterator it = handles_map.begin();
- it != handles_map.end(); ++it) {
- if (it->first == first_to_die) {
- delete_failed = true;
- break;
- }
- }
- EXPECT_FALSE(delete_failed);
-
- to_delete.clear();
- for (Directory::MetahandlesMap::iterator it = handles_map.begin();
- it != handles_map.end(); ++it) {
- to_delete.insert(it->first);
- }
-
- EXPECT_TRUE(dbs->DeleteEntries(TestDirectoryBackingStore::METAS_TABLE,
- to_delete));
-
- STLDeleteValues(&handles_map);
- dbs->LoadEntries(&handles_map);
- EXPECT_EQ(0U, handles_map.size());
-}
-
-TEST_F(DirectoryBackingStoreTest, GenerateCacheGUID) {
- const std::string& guid1 = TestDirectoryBackingStore::GenerateCacheGUID();
- const std::string& guid2 = TestDirectoryBackingStore::GenerateCacheGUID();
- EXPECT_EQ(24U, guid1.size());
- EXPECT_EQ(24U, guid2.size());
- // In theory this test can fail, but it won't before the universe
- // dies of heat death.
- EXPECT_NE(guid1, guid2);
-}
-
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/directory_change_delegate.h b/chromium/sync/syncable/directory_change_delegate.h
deleted file mode 100644
index fb76450f2b3..00000000000
--- a/chromium/sync/syncable/directory_change_delegate.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_DIRECTORY_CHANGE_DELEGATE_H_
-#define SYNC_SYNCABLE_DIRECTORY_CHANGE_DELEGATE_H_
-
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/syncable/write_transaction_info.h"
-
-namespace syncer {
-namespace syncable {
-
-// This is an interface for listening to directory change events, triggered by
-// the releasing of the syncable transaction. The delegate performs work to
-// 1. Calculate changes, depending on the source of the transaction
-// (HandleCalculateChangesChangeEventFromSyncer/Syncapi).
-// 2. Perform final work while the transaction is held
-// (HandleTransactionEndingChangeEvent).
-// 3. Perform any work that should be done after the transaction is released.
-// (HandleTransactionCompleteChangeEvent).
-//
-// Note that these methods may be called on *any* thread.
-class SYNC_EXPORT_PRIVATE DirectoryChangeDelegate {
- public:
- // Returns the handles of changed entries in |entry_changed|.
- virtual void HandleCalculateChangesChangeEventFromSyncApi(
- const ImmutableWriteTransactionInfo& write_transaction_info,
- BaseTransaction* trans,
- std::vector<int64>* entries_changed) = 0;
- // Returns the handles of changed entries in |entry_changed|.
- virtual void HandleCalculateChangesChangeEventFromSyncer(
- const ImmutableWriteTransactionInfo& write_transaction_info,
- BaseTransaction* trans,
- std::vector<int64>* entries_changed) = 0;
- // Must return the set of all ModelTypes that were modified in the
- // transaction.
- virtual ModelTypeSet HandleTransactionEndingChangeEvent(
- const ImmutableWriteTransactionInfo& write_transaction_info,
- BaseTransaction* trans) = 0;
- virtual void HandleTransactionCompleteChangeEvent(
- ModelTypeSet models_with_changes) = 0;
- protected:
- virtual ~DirectoryChangeDelegate() {}
-};
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_DIRECTORY_CHANGE_DELEGATE_H_
diff --git a/chromium/sync/syncable/entry.cc b/chromium/sync/syncable/entry.cc
deleted file mode 100644
index 852c33e32ed..00000000000
--- a/chromium/sync/syncable/entry.cc
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/entry.h"
-
-#include <iomanip>
-
-#include "base/json/string_escape.h"
-#include "base/strings/string_util.h"
-#include "sync/syncable/blob.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/syncable_base_transaction.h"
-#include "sync/syncable/syncable_columns.h"
-
-using std::string;
-
-namespace syncer {
-namespace syncable {
-
-Entry::Entry(BaseTransaction* trans, GetById, const Id& id)
- : basetrans_(trans) {
- kernel_ = trans->directory()->GetEntryById(id);
-}
-
-Entry::Entry(BaseTransaction* trans, GetByClientTag, const string& tag)
- : basetrans_(trans) {
- kernel_ = trans->directory()->GetEntryByClientTag(tag);
-}
-
-Entry::Entry(BaseTransaction* trans, GetByServerTag, const string& tag)
- : basetrans_(trans) {
- kernel_ = trans->directory()->GetEntryByServerTag(tag);
-}
-
-Entry::Entry(BaseTransaction* trans, GetByHandle, int64 metahandle)
- : basetrans_(trans) {
- kernel_ = trans->directory()->GetEntryByHandle(metahandle);
-}
-
-Directory* Entry::dir() const {
- return basetrans_->directory();
-}
-
-base::DictionaryValue* Entry::ToValue(Cryptographer* cryptographer) const {
- base::DictionaryValue* entry_info = new base::DictionaryValue();
- entry_info->SetBoolean("good", good());
- if (good()) {
- entry_info->Set("kernel", kernel_->ToValue(cryptographer));
- entry_info->Set("modelType",
- ModelTypeToValue(GetModelType()));
- entry_info->SetBoolean("existsOnClientBecauseNameIsNonEmpty",
- ExistsOnClientBecauseNameIsNonEmpty());
- entry_info->SetBoolean("isRoot", IsRoot());
- }
- return entry_info;
-}
-
-ModelType Entry::GetServerModelType() const {
- ModelType specifics_type = kernel_->GetServerModelType();
- if (specifics_type != UNSPECIFIED)
- return specifics_type;
-
- // Otherwise, we don't have a server type yet. That should only happen
- // if the item is an uncommitted locally created item.
- // It's possible we'll need to relax these checks in the future; they're
- // just here for now as a safety measure.
- DCHECK(GetIsUnsynced());
- DCHECK_EQ(GetServerVersion(), 0);
- DCHECK(GetServerIsDel());
- // Note: can't enforce !GetId().ServerKnows() here because that could
- // actually happen if we hit AttemptReuniteLostCommitResponses.
- return UNSPECIFIED;
-}
-
-ModelType Entry::GetModelType() const {
- ModelType specifics_type = GetModelTypeFromSpecifics(GetSpecifics());
- if (specifics_type != UNSPECIFIED)
- return specifics_type;
- if (IsRoot())
- return TOP_LEVEL_FOLDER;
- // Loose check for server-created top-level folders that aren't
- // bound to a particular model type.
- if (!GetUniqueServerTag().empty() && GetIsDir())
- return TOP_LEVEL_FOLDER;
-
- return UNSPECIFIED;
-}
-
-Id Entry::GetPredecessorId() const {
- return dir()->GetPredecessorId(kernel_);
-}
-
-Id Entry::GetSuccessorId() const {
- return dir()->GetSuccessorId(kernel_);
-}
-
-Id Entry::GetFirstChildId() const {
- return dir()->GetFirstChildId(basetrans_, kernel_);
-}
-
-void Entry::GetChildHandles(std::vector<int64>* result) const {
- dir()->GetChildHandlesById(basetrans_, GetId(), result);
-}
-
-int Entry::GetTotalNodeCount() const {
- return dir()->GetTotalNodeCount(basetrans_, kernel_);
-}
-
-int Entry::GetPositionIndex() const {
- return dir()->GetPositionIndex(basetrans_, kernel_);
-}
-
-bool Entry::ShouldMaintainPosition() const {
- return kernel_->ShouldMaintainPosition();
-}
-
-std::ostream& operator<<(std::ostream& s, const Blob& blob) {
- for (Blob::const_iterator i = blob.begin(); i != blob.end(); ++i)
- s << std::hex << std::setw(2)
- << std::setfill('0') << static_cast<unsigned int>(*i);
- return s << std::dec;
-}
-
-std::ostream& operator<<(std::ostream& os, const Entry& entry) {
- int i;
- EntryKernel* const kernel = entry.kernel_;
- for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
- os << g_metas_columns[i].name << ": "
- << kernel->ref(static_cast<Int64Field>(i)) << ", ";
- }
- for ( ; i < TIME_FIELDS_END; ++i) {
- os << g_metas_columns[i].name << ": "
- << GetTimeDebugString(kernel->ref(static_cast<TimeField>(i))) << ", ";
- }
- for ( ; i < ID_FIELDS_END; ++i) {
- os << g_metas_columns[i].name << ": "
- << kernel->ref(static_cast<IdField>(i)) << ", ";
- }
- os << "Flags: ";
- for ( ; i < BIT_FIELDS_END; ++i) {
- if (kernel->ref(static_cast<BitField>(i)))
- os << g_metas_columns[i].name << ", ";
- }
- for ( ; i < STRING_FIELDS_END; ++i) {
- const std::string& field = kernel->ref(static_cast<StringField>(i));
- os << g_metas_columns[i].name << ": " << field << ", ";
- }
- for ( ; i < PROTO_FIELDS_END; ++i) {
- std::string escaped_str = base::EscapeBytesAsInvalidJSONString(
- kernel->ref(static_cast<ProtoField>(i)).SerializeAsString(),
- false);
- os << g_metas_columns[i].name << ": " << escaped_str << ", ";
- }
- for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
- os << g_metas_columns[i].name << ": "
- << kernel->ref(static_cast<UniquePositionField>(i)).ToDebugString()
- << ", ";
- }
- os << "TempFlags: ";
- for ( ; i < BIT_TEMPS_END; ++i) {
- if (kernel->ref(static_cast<BitTemp>(i)))
- os << "#" << i - BIT_TEMPS_BEGIN << ", ";
- }
- return os;
-}
-
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/entry.h b/chromium/sync/syncable/entry.h
deleted file mode 100644
index 09ff9c7004f..00000000000
--- a/chromium/sync/syncable/entry.h
+++ /dev/null
@@ -1,269 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_ENTRY_H_
-#define SYNC_SYNCABLE_ENTRY_H_
-
-#include "sync/base/sync_export.h"
-#include "sync/syncable/entry_kernel.h"
-
-namespace syncer {
-class Cryptographer;
-class ReadNode;
-
-namespace syncable {
-
-class Directory;
-class BaseTransaction;
-
-// A read-only meta entry
-// Instead of:
-// Entry e = transaction.GetById(id);
-// use:
-// Entry e(transaction, GET_BY_ID, id);
-//
-// Why? The former would require a copy constructor, and it would be difficult
-// to enforce that an entry never outlived its transaction if there were a copy
-// constructor.
-enum GetById {
- GET_BY_ID
-};
-
-enum GetByClientTag {
- GET_BY_CLIENT_TAG
-};
-
-enum GetByServerTag {
- GET_BY_SERVER_TAG
-};
-
-enum GetByHandle {
- GET_BY_HANDLE
-};
-
-class SYNC_EXPORT Entry {
- public:
- // After constructing, you must check good() to test whether the Get
- // succeeded.
- Entry(BaseTransaction* trans, GetByHandle, int64 handle);
- Entry(BaseTransaction* trans, GetById, const Id& id);
- Entry(BaseTransaction* trans, GetByServerTag, const std::string& tag);
- Entry(BaseTransaction* trans, GetByClientTag, const std::string& tag);
-
- bool good() const { return 0 != kernel_; }
-
- BaseTransaction* trans() const { return basetrans_; }
-
- // Field accessors.
- int64 GetMetahandle() const {
- DCHECK(kernel_);
- return kernel_->ref(META_HANDLE);
- }
-
- int64 GetBaseVersion() const {
- DCHECK(kernel_);
- return kernel_->ref(BASE_VERSION);
- }
-
- int64 GetServerVersion() const {
- DCHECK(kernel_);
- return kernel_->ref(SERVER_VERSION);
- }
-
- int64 GetLocalExternalId() const {
- DCHECK(kernel_);
- return kernel_->ref(LOCAL_EXTERNAL_ID);
- }
-
- int64 GetTransactionVersion() const {
- DCHECK(kernel_);
- return kernel_->ref(TRANSACTION_VERSION);
- }
-
- const base::Time& GetMtime() const {
- DCHECK(kernel_);
- return kernel_->ref(MTIME);
- }
-
- const base::Time& GetServerMtime() const {
- DCHECK(kernel_);
- return kernel_->ref(SERVER_MTIME);
- }
-
- const base::Time& GetCtime() const {
- DCHECK(kernel_);
- return kernel_->ref(CTIME);
- }
-
- const base::Time& GetServerCtime() const {
- DCHECK(kernel_);
- return kernel_->ref(SERVER_CTIME);
- }
-
- Id GetId() const {
- DCHECK(kernel_);
- return kernel_->ref(ID);
- }
-
- Id GetParentId() const {
- DCHECK(kernel_);
- return kernel_->ref(PARENT_ID);
- }
-
- Id GetServerParentId() const {
- DCHECK(kernel_);
- return kernel_->ref(SERVER_PARENT_ID);
- }
-
- bool GetIsUnsynced() const {
- DCHECK(kernel_);
- return kernel_->ref(IS_UNSYNCED);
- }
-
- bool GetIsUnappliedUpdate() const {
- DCHECK(kernel_);
- return kernel_->ref(IS_UNAPPLIED_UPDATE);
- }
-
- bool GetIsDel() const {
- DCHECK(kernel_);
- return kernel_->ref(IS_DEL);
- }
-
- bool GetIsDir() const {
- DCHECK(kernel_);
- return kernel_->ref(IS_DIR);
- }
-
- bool GetServerIsDir() const {
- DCHECK(kernel_);
- return kernel_->ref(SERVER_IS_DIR);
- }
-
- bool GetServerIsDel() const {
- DCHECK(kernel_);
- return kernel_->ref(SERVER_IS_DEL);
- }
-
- const std::string& GetNonUniqueName() const {
- DCHECK(kernel_);
- return kernel_->ref(NON_UNIQUE_NAME);
- }
-
- const std::string& GetServerNonUniqueName() const {
- DCHECK(kernel_);
- return kernel_->ref(SERVER_NON_UNIQUE_NAME);
- }
-
- const std::string& GetUniqueServerTag() const {
- DCHECK(kernel_);
- return kernel_->ref(UNIQUE_SERVER_TAG);
- }
-
- const std::string& GetUniqueClientTag() const {
- DCHECK(kernel_);
- return kernel_->ref(UNIQUE_CLIENT_TAG);
- }
-
- const std::string& GetUniqueBookmarkTag() const {
- DCHECK(kernel_);
- return kernel_->ref(UNIQUE_BOOKMARK_TAG);
- }
-
- const sync_pb::EntitySpecifics& GetSpecifics() const {
- DCHECK(kernel_);
- return kernel_->ref(SPECIFICS);
- }
-
- const sync_pb::EntitySpecifics& GetServerSpecifics() const {
- DCHECK(kernel_);
- return kernel_->ref(SERVER_SPECIFICS);
- }
-
- const sync_pb::EntitySpecifics& GetBaseServerSpecifics() const {
- DCHECK(kernel_);
- return kernel_->ref(BASE_SERVER_SPECIFICS);
- }
-
- const UniquePosition& GetServerUniquePosition() const {
- DCHECK(kernel_);
- return kernel_->ref(SERVER_UNIQUE_POSITION);
- }
-
- const UniquePosition& GetUniquePosition() const {
- DCHECK(kernel_);
- return kernel_->ref(UNIQUE_POSITION);
- }
-
- bool GetSyncing() const {
- DCHECK(kernel_);
- return kernel_->ref(SYNCING);
- }
-
- ModelType GetServerModelType() const;
- ModelType GetModelType() const;
-
- Id GetPredecessorId() const;
- Id GetSuccessorId() const;
- Id GetFirstChildId() const;
- int GetTotalNodeCount() const;
-
- int GetPositionIndex() const;
-
- // Returns a vector of this node's children's handles.
- // Clears |result| if there are no children. If this node is of a type that
- // supports user-defined ordering then the resulting vector will be in the
- // proper order.
- void GetChildHandles(std::vector<int64>* result) const;
-
- inline bool ExistsOnClientBecauseNameIsNonEmpty() const {
- DCHECK(kernel_);
- return !kernel_->ref(NON_UNIQUE_NAME).empty();
- }
-
- inline bool IsRoot() const {
- DCHECK(kernel_);
- return kernel_->ref(ID).IsRoot();
- }
-
- // Returns true if this is an entry that is expected to maintain a certain
- // sort ordering relative to its siblings under the same parent.
- bool ShouldMaintainPosition() const;
-
- Directory* dir() const;
-
- const EntryKernel GetKernelCopy() const {
- return *kernel_;
- }
-
- // Dumps all entry info into a DictionaryValue and returns it.
- // Transfers ownership of the DictionaryValue to the caller.
- base::DictionaryValue* ToValue(Cryptographer* cryptographer) const;
-
- protected: // Don't allow creation on heap, except by sync API wrappers.
- void* operator new(size_t size) { return (::operator new)(size); }
-
- inline explicit Entry(BaseTransaction* trans)
- : basetrans_(trans),
- kernel_(NULL) { }
-
- protected:
- BaseTransaction* const basetrans_;
-
- EntryKernel* kernel_;
-
- private:
- friend class Directory;
- friend class syncer::ReadNode;
- friend std::ostream& operator << (std::ostream& s, const Entry& e);
-
- DISALLOW_COPY_AND_ASSIGN(Entry);
-};
-
-std::ostream& operator<<(std::ostream& os, const Entry& entry);
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_ENTRY_H_
diff --git a/chromium/sync/syncable/entry_kernel.cc b/chromium/sync/syncable/entry_kernel.cc
deleted file mode 100644
index d872695243b..00000000000
--- a/chromium/sync/syncable/entry_kernel.cc
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/entry_kernel.h"
-
-#include "base/strings/string_number_conversions.h"
-#include "sync/protocol/proto_value_conversions.h"
-#include "sync/syncable/syncable_enum_conversions.h"
-#include "sync/util/cryptographer.h"
-
-namespace syncer {
-namespace syncable {
-
-EntryKernel::EntryKernel() : dirty_(false) {
- // Everything else should already be default-initialized.
- for (int i = INT64_FIELDS_BEGIN; i < INT64_FIELDS_END; ++i) {
- int64_fields[i] = 0;
- }
-}
-
-EntryKernel::~EntryKernel() {}
-
-ModelType EntryKernel::GetModelType() const {
- ModelType specifics_type = GetModelTypeFromSpecifics(ref(SPECIFICS));
- if (specifics_type != UNSPECIFIED)
- return specifics_type;
- if (ref(ID).IsRoot())
- return TOP_LEVEL_FOLDER;
- // Loose check for server-created top-level folders that aren't
- // bound to a particular model type.
- if (!ref(UNIQUE_SERVER_TAG).empty() && ref(SERVER_IS_DIR))
- return TOP_LEVEL_FOLDER;
-
- return UNSPECIFIED;
-}
-
-ModelType EntryKernel::GetServerModelType() const {
- ModelType specifics_type = GetModelTypeFromSpecifics(ref(SERVER_SPECIFICS));
- if (specifics_type != UNSPECIFIED)
- return specifics_type;
- if (ref(ID).IsRoot())
- return TOP_LEVEL_FOLDER;
- // Loose check for server-created top-level folders that aren't
- // bound to a particular model type.
- if (!ref(UNIQUE_SERVER_TAG).empty() && ref(SERVER_IS_DIR))
- return TOP_LEVEL_FOLDER;
-
- return UNSPECIFIED;
-}
-
-bool EntryKernel::ShouldMaintainPosition() const {
- // We maintain positions for all bookmarks, except those that are
- // server-created top-level folders.
- return (GetModelTypeFromSpecifics(ref(SPECIFICS)) == syncer::BOOKMARKS)
- && !(!ref(UNIQUE_SERVER_TAG).empty() && ref(IS_DIR));
-}
-
-namespace {
-
-// Utility function to loop through a set of enum values and add the
-// field keys/values in the kernel to the given dictionary.
-//
-// V should be convertible to Value.
-template <class T, class U, class V>
-void SetFieldValues(const EntryKernel& kernel,
- base::DictionaryValue* dictionary_value,
- const char* (*enum_key_fn)(T),
- V* (*enum_value_fn)(U),
- int field_key_min, int field_key_max) {
- DCHECK_LE(field_key_min, field_key_max);
- for (int i = field_key_min; i <= field_key_max; ++i) {
- T field = static_cast<T>(i);
- const std::string& key = enum_key_fn(field);
- V* value = enum_value_fn(kernel.ref(field));
- dictionary_value->Set(key, value);
- }
-}
-
-void SetEncryptableProtoValues(
- const EntryKernel& kernel,
- Cryptographer* cryptographer,
- base::DictionaryValue* dictionary_value,
- int field_key_min, int field_key_max) {
- DCHECK_LE(field_key_min, field_key_max);
- for (int i = field_key_min; i <= field_key_max; ++i) {
- ProtoField field = static_cast<ProtoField>(i);
- const std::string& key = GetProtoFieldString(field);
-
- base::DictionaryValue* value = NULL;
- sync_pb::EntitySpecifics decrypted;
- const sync_pb::EncryptedData& encrypted = kernel.ref(field).encrypted();
- if (cryptographer &&
- kernel.ref(field).has_encrypted() &&
- cryptographer->CanDecrypt(encrypted) &&
- cryptographer->Decrypt(encrypted, &decrypted)) {
- value = EntitySpecificsToValue(decrypted);
- value->SetBoolean("encrypted", true);
- } else {
- value = EntitySpecificsToValue(kernel.ref(field));
- }
- dictionary_value->Set(key, value);
- }
-}
-
-// Helper functions for SetFieldValues().
-
-base::StringValue* Int64ToValue(int64 i) {
- return new base::StringValue(base::Int64ToString(i));
-}
-
-base::StringValue* TimeToValue(const base::Time& t) {
- return new base::StringValue(GetTimeDebugString(t));
-}
-
-base::StringValue* IdToValue(const Id& id) {
- return id.ToValue();
-}
-
-base::FundamentalValue* BooleanToValue(bool bool_val) {
- return new base::FundamentalValue(bool_val);
-}
-
-base::StringValue* StringToValue(const std::string& str) {
- return new base::StringValue(str);
-}
-
-base::StringValue* UniquePositionToValue(const UniquePosition& pos) {
- return base::Value::CreateStringValue(pos.ToDebugString());
-}
-
-} // namespace
-
-base::DictionaryValue* EntryKernel::ToValue(
- Cryptographer* cryptographer) const {
- base::DictionaryValue* kernel_info = new base::DictionaryValue();
- kernel_info->SetBoolean("isDirty", is_dirty());
- kernel_info->Set("serverModelType", ModelTypeToValue(GetServerModelType()));
-
- // Int64 fields.
- SetFieldValues(*this, kernel_info,
- &GetMetahandleFieldString, &Int64ToValue,
- INT64_FIELDS_BEGIN, META_HANDLE);
- SetFieldValues(*this, kernel_info,
- &GetBaseVersionString, &Int64ToValue,
- META_HANDLE + 1, BASE_VERSION);
- SetFieldValues(*this, kernel_info,
- &GetInt64FieldString, &Int64ToValue,
- BASE_VERSION + 1, INT64_FIELDS_END - 1);
-
- // Time fields.
- SetFieldValues(*this, kernel_info,
- &GetTimeFieldString, &TimeToValue,
- TIME_FIELDS_BEGIN, TIME_FIELDS_END - 1);
-
- // ID fields.
- SetFieldValues(*this, kernel_info,
- &GetIdFieldString, &IdToValue,
- ID_FIELDS_BEGIN, ID_FIELDS_END - 1);
-
- // Bit fields.
- SetFieldValues(*this, kernel_info,
- &GetIndexedBitFieldString, &BooleanToValue,
- BIT_FIELDS_BEGIN, INDEXED_BIT_FIELDS_END - 1);
- SetFieldValues(*this, kernel_info,
- &GetIsDelFieldString, &BooleanToValue,
- INDEXED_BIT_FIELDS_END, IS_DEL);
- SetFieldValues(*this, kernel_info,
- &GetBitFieldString, &BooleanToValue,
- IS_DEL + 1, BIT_FIELDS_END - 1);
-
- // String fields.
- {
- // Pick out the function overload we want.
- SetFieldValues(*this, kernel_info,
- &GetStringFieldString, &StringToValue,
- STRING_FIELDS_BEGIN, STRING_FIELDS_END - 1);
- }
-
- // Proto fields.
- SetEncryptableProtoValues(*this, cryptographer, kernel_info,
- PROTO_FIELDS_BEGIN, PROTO_FIELDS_END - 1);
-
- // UniquePosition fields
- SetFieldValues(*this, kernel_info,
- &GetUniquePositionFieldString, &UniquePositionToValue,
- UNIQUE_POSITION_FIELDS_BEGIN, UNIQUE_POSITION_FIELDS_END - 1);
-
- // Bit temps.
- SetFieldValues(*this, kernel_info,
- &GetBitTempString, &BooleanToValue,
- BIT_TEMPS_BEGIN, BIT_TEMPS_END - 1);
-
- return kernel_info;
-}
-
-base::ListValue* EntryKernelMutationMapToValue(
- const EntryKernelMutationMap& mutations) {
- base::ListValue* list = new base::ListValue();
- for (EntryKernelMutationMap::const_iterator it = mutations.begin();
- it != mutations.end(); ++it) {
- list->Append(EntryKernelMutationToValue(it->second));
- }
- return list;
-}
-
-base::DictionaryValue* EntryKernelMutationToValue(
- const EntryKernelMutation& mutation) {
- base::DictionaryValue* dict = new base::DictionaryValue();
- dict->Set("original", mutation.original.ToValue(NULL));
- dict->Set("mutated", mutation.mutated.ToValue(NULL));
- return dict;
-}
-
-} // namespace syncer
-} // namespace syncable
diff --git a/chromium/sync/syncable/entry_kernel.h b/chromium/sync/syncable/entry_kernel.h
deleted file mode 100644
index 562ebdb431b..00000000000
--- a/chromium/sync/syncable/entry_kernel.h
+++ /dev/null
@@ -1,358 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_ENTRY_KERNEL_H_
-#define SYNC_SYNCABLE_ENTRY_KERNEL_H_
-
-#include <set>
-
-#include "base/time/time.h"
-#include "base/values.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/base/unique_position.h"
-#include "sync/internal_api/public/util/immutable.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/syncable/metahandle_set.h"
-#include "sync/syncable/syncable_id.h"
-#include "sync/util/time.h"
-
-namespace syncer {
-
-class Cryptographer;
-
-namespace syncable {
-
-// Things you need to update if you change any of the fields below:
-// - EntryKernel struct in this file
-// - syncable_columns.h
-// - syncable_enum_conversions{.h,.cc,_unittest.cc}
-// - EntryKernel::EntryKernel(), EntryKernel::ToValue() in entry_kernel.cc
-// - operator<< in Entry.cc
-// - BindFields() and UnpackEntry() in directory_backing_store.cc
-// - TestSimpleFieldsPreservedDuringSaveChanges in syncable_unittest.cc
-
-static const int64 kInvalidMetaHandle = 0;
-
-enum {
- BEGIN_FIELDS = 0,
- INT64_FIELDS_BEGIN = BEGIN_FIELDS
-};
-
-enum MetahandleField {
- // Primary key into the table. Keep this as a handle to the meta entry
- // across transactions.
- META_HANDLE = INT64_FIELDS_BEGIN
-};
-
-enum BaseVersion {
- // After initial upload, the version is controlled by the server, and is
- // increased whenever the data or metadata changes on the server.
- BASE_VERSION = META_HANDLE + 1,
-};
-
-enum Int64Field {
- SERVER_VERSION = BASE_VERSION + 1,
- LOCAL_EXTERNAL_ID, // ID of an item in the external local storage that this
- // entry is associated with. (such as bookmarks.js)
- TRANSACTION_VERSION,
- INT64_FIELDS_END
-};
-
-enum {
- INT64_FIELDS_COUNT = INT64_FIELDS_END - INT64_FIELDS_BEGIN,
- TIME_FIELDS_BEGIN = INT64_FIELDS_END,
-};
-
-enum TimeField {
- MTIME = TIME_FIELDS_BEGIN,
- SERVER_MTIME,
- CTIME,
- SERVER_CTIME,
- TIME_FIELDS_END,
-};
-
-enum {
- TIME_FIELDS_COUNT = TIME_FIELDS_END - TIME_FIELDS_BEGIN,
- ID_FIELDS_BEGIN = TIME_FIELDS_END,
-};
-
-enum IdField {
- // Code in InitializeTables relies on ID being the first IdField value.
- ID = ID_FIELDS_BEGIN,
- PARENT_ID,
- SERVER_PARENT_ID,
- ID_FIELDS_END
-};
-
-enum {
- ID_FIELDS_COUNT = ID_FIELDS_END - ID_FIELDS_BEGIN,
- BIT_FIELDS_BEGIN = ID_FIELDS_END
-};
-
-enum IndexedBitField {
- IS_UNSYNCED = BIT_FIELDS_BEGIN,
- IS_UNAPPLIED_UPDATE,
- INDEXED_BIT_FIELDS_END,
-};
-
-enum IsDelField {
- IS_DEL = INDEXED_BIT_FIELDS_END,
-};
-
-enum BitField {
- IS_DIR = IS_DEL + 1,
- SERVER_IS_DIR,
- SERVER_IS_DEL,
- BIT_FIELDS_END
-};
-
-enum {
- BIT_FIELDS_COUNT = BIT_FIELDS_END - BIT_FIELDS_BEGIN,
- STRING_FIELDS_BEGIN = BIT_FIELDS_END
-};
-
-enum StringField {
- // Name, will be truncated by server. Can be duplicated in a folder.
- NON_UNIQUE_NAME = STRING_FIELDS_BEGIN,
- // The server version of |NON_UNIQUE_NAME|.
- SERVER_NON_UNIQUE_NAME,
-
- // A tag string which identifies this node as a particular top-level
- // permanent object. The tag can be thought of as a unique key that
- // identifies a singleton instance.
- UNIQUE_SERVER_TAG, // Tagged by the server
- UNIQUE_CLIENT_TAG, // Tagged by the client
- UNIQUE_BOOKMARK_TAG, // Client tags for bookmark items
- STRING_FIELDS_END,
-};
-
-enum {
- STRING_FIELDS_COUNT = STRING_FIELDS_END - STRING_FIELDS_BEGIN,
- PROTO_FIELDS_BEGIN = STRING_FIELDS_END
-};
-
-// From looking at the sqlite3 docs, it's not directly stated, but it
-// seems the overhead for storing a NULL blob is very small.
-enum ProtoField {
- SPECIFICS = PROTO_FIELDS_BEGIN,
- SERVER_SPECIFICS,
- BASE_SERVER_SPECIFICS,
- PROTO_FIELDS_END,
-};
-
-enum {
- PROTO_FIELDS_COUNT = PROTO_FIELDS_END - PROTO_FIELDS_BEGIN,
- UNIQUE_POSITION_FIELDS_BEGIN = PROTO_FIELDS_END
-};
-
-enum UniquePositionField {
- SERVER_UNIQUE_POSITION = UNIQUE_POSITION_FIELDS_BEGIN,
- UNIQUE_POSITION,
- UNIQUE_POSITION_FIELDS_END
-};
-
-enum {
- UNIQUE_POSITION_FIELDS_COUNT =
- UNIQUE_POSITION_FIELDS_END - UNIQUE_POSITION_FIELDS_BEGIN,
- FIELD_COUNT = UNIQUE_POSITION_FIELDS_END - BEGIN_FIELDS,
- // Past this point we have temporaries, stored in memory only.
- BEGIN_TEMPS = UNIQUE_POSITION_FIELDS_END,
- BIT_TEMPS_BEGIN = BEGIN_TEMPS,
-};
-
-enum BitTemp {
- // Not to be confused with IS_UNSYNCED, this bit is used to detect local
- // changes to items that happen during the server Commit operation.
- SYNCING = BIT_TEMPS_BEGIN,
- BIT_TEMPS_END,
-};
-
-enum {
- BIT_TEMPS_COUNT = BIT_TEMPS_END - BIT_TEMPS_BEGIN
-};
-
-
-
-struct SYNC_EXPORT_PRIVATE EntryKernel {
- private:
- std::string string_fields[STRING_FIELDS_COUNT];
- sync_pb::EntitySpecifics specifics_fields[PROTO_FIELDS_COUNT];
- int64 int64_fields[INT64_FIELDS_COUNT];
- base::Time time_fields[TIME_FIELDS_COUNT];
- Id id_fields[ID_FIELDS_COUNT];
- UniquePosition unique_position_fields[UNIQUE_POSITION_FIELDS_COUNT];
- std::bitset<BIT_FIELDS_COUNT> bit_fields;
- std::bitset<BIT_TEMPS_COUNT> bit_temps;
-
- public:
- EntryKernel();
- ~EntryKernel();
-
- // Set the dirty bit, and optionally add this entry's metahandle to
- // a provided index on dirty bits in |dirty_index|. Parameter may be null,
- // and will result only in setting the dirty bit of this entry.
- inline void mark_dirty(syncable::MetahandleSet* dirty_index) {
- if (!dirty_ && dirty_index) {
- DCHECK_NE(0, ref(META_HANDLE));
- dirty_index->insert(ref(META_HANDLE));
- }
- dirty_ = true;
- }
-
- // Clear the dirty bit, and optionally remove this entry's metahandle from
- // a provided index on dirty bits in |dirty_index|. Parameter may be null,
- // and will result only in clearing dirty bit of this entry.
- inline void clear_dirty(syncable::MetahandleSet* dirty_index) {
- if (dirty_ && dirty_index) {
- DCHECK_NE(0, ref(META_HANDLE));
- dirty_index->erase(ref(META_HANDLE));
- }
- dirty_ = false;
- }
-
- inline bool is_dirty() const {
- return dirty_;
- }
-
- // Setters.
- inline void put(MetahandleField field, int64 value) {
- int64_fields[field - INT64_FIELDS_BEGIN] = value;
- }
- inline void put(Int64Field field, int64 value) {
- int64_fields[field - INT64_FIELDS_BEGIN] = value;
- }
- inline void put(TimeField field, const base::Time& value) {
- // Round-trip to proto time format and back so that we have
- // consistent time resolutions (ms).
- time_fields[field - TIME_FIELDS_BEGIN] =
- ProtoTimeToTime(TimeToProtoTime(value));
- }
- inline void put(IdField field, const Id& value) {
- id_fields[field - ID_FIELDS_BEGIN] = value;
- }
- inline void put(BaseVersion field, int64 value) {
- int64_fields[field - INT64_FIELDS_BEGIN] = value;
- }
- inline void put(IndexedBitField field, bool value) {
- bit_fields[field - BIT_FIELDS_BEGIN] = value;
- }
- inline void put(IsDelField field, bool value) {
- bit_fields[field - BIT_FIELDS_BEGIN] = value;
- }
- inline void put(BitField field, bool value) {
- bit_fields[field - BIT_FIELDS_BEGIN] = value;
- }
- inline void put(StringField field, const std::string& value) {
- string_fields[field - STRING_FIELDS_BEGIN] = value;
- }
- inline void put(ProtoField field, const sync_pb::EntitySpecifics& value) {
- specifics_fields[field - PROTO_FIELDS_BEGIN].CopyFrom(value);
- }
- inline void put(UniquePositionField field, const UniquePosition& value) {
- unique_position_fields[field - UNIQUE_POSITION_FIELDS_BEGIN] = value;
- }
- inline void put(BitTemp field, bool value) {
- bit_temps[field - BIT_TEMPS_BEGIN] = value;
- }
-
- // Const ref getters.
- inline int64 ref(MetahandleField field) const {
- return int64_fields[field - INT64_FIELDS_BEGIN];
- }
- inline int64 ref(Int64Field field) const {
- return int64_fields[field - INT64_FIELDS_BEGIN];
- }
- inline const base::Time& ref(TimeField field) const {
- return time_fields[field - TIME_FIELDS_BEGIN];
- }
- inline const Id& ref(IdField field) const {
- return id_fields[field - ID_FIELDS_BEGIN];
- }
- inline int64 ref(BaseVersion field) const {
- return int64_fields[field - INT64_FIELDS_BEGIN];
- }
- inline bool ref(IndexedBitField field) const {
- return bit_fields[field - BIT_FIELDS_BEGIN];
- }
- inline bool ref(IsDelField field) const {
- return bit_fields[field - BIT_FIELDS_BEGIN];
- }
- inline bool ref(BitField field) const {
- return bit_fields[field - BIT_FIELDS_BEGIN];
- }
- inline const std::string& ref(StringField field) const {
- return string_fields[field - STRING_FIELDS_BEGIN];
- }
- inline const sync_pb::EntitySpecifics& ref(ProtoField field) const {
- return specifics_fields[field - PROTO_FIELDS_BEGIN];
- }
- inline const UniquePosition& ref(UniquePositionField field) const {
- return unique_position_fields[field - UNIQUE_POSITION_FIELDS_BEGIN];
- }
- inline bool ref(BitTemp field) const {
- return bit_temps[field - BIT_TEMPS_BEGIN];
- }
-
- // Non-const, mutable ref getters for object types only.
- inline std::string& mutable_ref(StringField field) {
- return string_fields[field - STRING_FIELDS_BEGIN];
- }
- inline sync_pb::EntitySpecifics& mutable_ref(ProtoField field) {
- return specifics_fields[field - PROTO_FIELDS_BEGIN];
- }
- inline Id& mutable_ref(IdField field) {
- return id_fields[field - ID_FIELDS_BEGIN];
- }
- inline UniquePosition& mutable_ref(UniquePositionField field) {
- return unique_position_fields[field - UNIQUE_POSITION_FIELDS_BEGIN];
- }
-
- ModelType GetModelType() const;
- ModelType GetServerModelType() const;
- bool ShouldMaintainPosition() const;
-
- // Dumps all kernel info into a DictionaryValue and returns it.
- // Transfers ownership of the DictionaryValue to the caller.
- // Note: |cryptographer| is an optional parameter for use in decrypting
- // encrypted specifics. If it is NULL or the specifics are not decryptsble,
- // they will be serialized as empty proto's.
- base::DictionaryValue* ToValue(Cryptographer* cryptographer) const;
-
- private:
- // Tracks whether this entry needs to be saved to the database.
- bool dirty_;
-};
-
-class EntryKernelLessByMetaHandle {
- public:
- inline bool operator()(const EntryKernel* a,
- const EntryKernel* b) const {
- return a->ref(META_HANDLE) < b->ref(META_HANDLE);
- }
-};
-
-typedef std::set<const EntryKernel*, EntryKernelLessByMetaHandle>
- EntryKernelSet;
-
-struct EntryKernelMutation {
- EntryKernel original, mutated;
-};
-
-typedef std::map<int64, EntryKernelMutation> EntryKernelMutationMap;
-
-typedef Immutable<EntryKernelMutationMap> ImmutableEntryKernelMutationMap;
-
-// Caller owns the return value.
-base::DictionaryValue* EntryKernelMutationToValue(
- const EntryKernelMutation& mutation);
-
-// Caller owns the return value.
-base::ListValue* EntryKernelMutationMapToValue(
- const EntryKernelMutationMap& mutations);
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_ENTRY_KERNEL_H_
diff --git a/chromium/sync/syncable/in_memory_directory_backing_store.cc b/chromium/sync/syncable/in_memory_directory_backing_store.cc
deleted file mode 100644
index 57995f82378..00000000000
--- a/chromium/sync/syncable/in_memory_directory_backing_store.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/in_memory_directory_backing_store.h"
-
-namespace syncer {
-namespace syncable {
-
-InMemoryDirectoryBackingStore::InMemoryDirectoryBackingStore(
- const std::string& dir_name)
- : DirectoryBackingStore(dir_name),
- consistent_cache_guid_requested_(false) {
-}
-
-DirOpenResult InMemoryDirectoryBackingStore::Load(
- Directory::MetahandlesMap* handles_map,
- JournalIndex* delete_journals,
- Directory::KernelLoadInfo* kernel_load_info) {
- if (!db_->is_open()) {
- if (!db_->OpenInMemory())
- return FAILED_OPEN_DATABASE;
- }
-
- if (!InitializeTables())
- return FAILED_OPEN_DATABASE;
-
- if (consistent_cache_guid_requested_) {
- if (!db_->Execute("UPDATE share_info "
- "SET cache_guid = 'IrcjZ2jyzHDV9Io4+zKcXQ=='")) {
- return FAILED_OPEN_DATABASE;
- }
- }
-
- if (!DropDeletedEntries())
- return FAILED_DATABASE_CORRUPT;
- if (!LoadEntries(handles_map))
- return FAILED_DATABASE_CORRUPT;
- if (!LoadDeleteJournals(delete_journals))
- return FAILED_DATABASE_CORRUPT;
- if (!LoadInfo(kernel_load_info))
- return FAILED_DATABASE_CORRUPT;
- if (!VerifyReferenceIntegrity(handles_map))
- return FAILED_DATABASE_CORRUPT;
-
- return OPENED;
-}
-
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/in_memory_directory_backing_store.h b/chromium/sync/syncable/in_memory_directory_backing_store.h
deleted file mode 100644
index ff12996b87a..00000000000
--- a/chromium/sync/syncable/in_memory_directory_backing_store.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_IN_MEMORY_DIRECTORY_BACKING_STORE_H_
-#define SYNC_SYNCABLE_IN_MEMORY_DIRECTORY_BACKING_STORE_H_
-
-#include "sync/syncable/directory_backing_store.h"
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-namespace syncable {
-
-// This implementation of DirectoryBackingStore is used in tests that do not
-// require us to write to a file. An in-memory sqlite database is much faster
-// than an on-disk database, so this can result in significant speedups in our
-// unit tests.
-//
-// An InMemoryDirectoryBackingStore cannot load data from existing databases.
-// When an InMemoryDirectoryBackingStore is destroyed, all data stored in this
-// database is lost. If these limitations are a problem for you, consider using
-// TestDirectoryBackingStore.
-class SYNC_EXPORT_PRIVATE InMemoryDirectoryBackingStore
- : public DirectoryBackingStore {
- public:
- explicit InMemoryDirectoryBackingStore(const std::string& dir_name);
- virtual DirOpenResult Load(
- Directory::MetahandlesMap* handles_map,
- JournalIndex* delete_journals,
- Directory::KernelLoadInfo* kernel_load_info) OVERRIDE;
-
- void request_consistent_cache_guid() {
- consistent_cache_guid_requested_ = true;
- }
-
- private:
- bool consistent_cache_guid_requested_;
-
- DISALLOW_COPY_AND_ASSIGN(InMemoryDirectoryBackingStore);
-};
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_IN_MEMORY_DIRECTORY_BACKING_STORE_H_
diff --git a/chromium/sync/syncable/invalid_directory_backing_store.cc b/chromium/sync/syncable/invalid_directory_backing_store.cc
deleted file mode 100644
index 63455217978..00000000000
--- a/chromium/sync/syncable/invalid_directory_backing_store.cc
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/invalid_directory_backing_store.h"
-
-namespace syncer {
-namespace syncable {
-
-InvalidDirectoryBackingStore::InvalidDirectoryBackingStore()
- : DirectoryBackingStore("some_fake_user") {
-}
-
-InvalidDirectoryBackingStore::~InvalidDirectoryBackingStore() {
-}
-
-DirOpenResult InvalidDirectoryBackingStore::Load(
- Directory::MetahandlesMap* handles_map,
- JournalIndex* delete_journals,
- Directory::KernelLoadInfo* kernel_load_info) {
- return FAILED_OPEN_DATABASE;
-}
-
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/invalid_directory_backing_store.h b/chromium/sync/syncable/invalid_directory_backing_store.h
deleted file mode 100644
index 48344487fc2..00000000000
--- a/chromium/sync/syncable/invalid_directory_backing_store.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_INVALID_DIRECTORY_BACKING_STORE_H_
-#define SYNC_SYNCABLE_INVALID_DIRECTORY_BACKING_STORE_H_
-
-#include "sync/syncable/directory_backing_store.h"
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-namespace syncable {
-
-// A class used to test scenarios where loading a directory fails.
-class SYNC_EXPORT_PRIVATE InvalidDirectoryBackingStore
- : public DirectoryBackingStore {
- public:
- InvalidDirectoryBackingStore();
- virtual ~InvalidDirectoryBackingStore();
- virtual DirOpenResult Load(
- Directory::MetahandlesMap* handles_map,
- JournalIndex* delete_journals,
- Directory::KernelLoadInfo* kernel_load_info) OVERRIDE;
- private:
- DISALLOW_COPY_AND_ASSIGN(InvalidDirectoryBackingStore);
-};
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_INVALID_DIRECTORY_BACKING_STORE_H_
diff --git a/chromium/sync/syncable/metahandle_set.h b/chromium/sync/syncable/metahandle_set.h
deleted file mode 100644
index 5b4e4251798..00000000000
--- a/chromium/sync/syncable/metahandle_set.h
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_METAHANDLE_SET_
-#define SYNC_SYNCABLE_METAHANDLE_SET_
-
-#include <set>
-
-#include "base/basictypes.h"
-
-namespace syncer {
-namespace syncable {
-
-typedef std::set<int64> MetahandleSet;
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_METAHANDLE_SET_
diff --git a/chromium/sync/syncable/model_neutral_mutable_entry.cc b/chromium/sync/syncable/model_neutral_mutable_entry.cc
deleted file mode 100644
index d778abacef7..00000000000
--- a/chromium/sync/syncable/model_neutral_mutable_entry.cc
+++ /dev/null
@@ -1,381 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/model_neutral_mutable_entry.h"
-
-#include <string>
-
-#include "sync/internal_api/public/base/unique_position.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/scoped_kernel_lock.h"
-#include "sync/syncable/syncable_changes_version.h"
-#include "sync/syncable/syncable_util.h"
-#include "sync/syncable/syncable_write_transaction.h"
-
-using std::string;
-
-namespace syncer {
-
-namespace syncable {
-
-ModelNeutralMutableEntry::ModelNeutralMutableEntry(BaseWriteTransaction* trans,
- CreateNewUpdateItem,
- const Id& id)
- : Entry(trans), base_write_transaction_(trans) {
- Entry same_id(trans, GET_BY_ID, id);
- kernel_ = NULL;
- if (same_id.good()) {
- return; // already have an item with this ID.
- }
- scoped_ptr<EntryKernel> kernel(new EntryKernel());
-
- kernel->put(ID, id);
- kernel->put(META_HANDLE, trans->directory()->NextMetahandle());
- kernel->mark_dirty(&trans->directory()->kernel_->dirty_metahandles);
- kernel->put(IS_DEL, true);
- // We match the database defaults here
- kernel->put(BASE_VERSION, CHANGES_VERSION);
- if (!trans->directory()->InsertEntry(trans, kernel.get())) {
- return; // Failed inserting.
- }
- trans->TrackChangesTo(kernel.get());
-
- kernel_ = kernel.release();
-}
-
-ModelNeutralMutableEntry::ModelNeutralMutableEntry(
- BaseWriteTransaction* trans, GetById, const Id& id)
- : Entry(trans, GET_BY_ID, id), base_write_transaction_(trans) {
-}
-
-ModelNeutralMutableEntry::ModelNeutralMutableEntry(
- BaseWriteTransaction* trans, GetByHandle, int64 metahandle)
- : Entry(trans, GET_BY_HANDLE, metahandle), base_write_transaction_(trans) {
-}
-
-ModelNeutralMutableEntry::ModelNeutralMutableEntry(
- BaseWriteTransaction* trans, GetByClientTag, const std::string& tag)
- : Entry(trans, GET_BY_CLIENT_TAG, tag), base_write_transaction_(trans) {
-}
-
-ModelNeutralMutableEntry::ModelNeutralMutableEntry(
- BaseWriteTransaction* trans, GetByServerTag, const string& tag)
- : Entry(trans, GET_BY_SERVER_TAG, tag), base_write_transaction_(trans) {
-}
-
-void ModelNeutralMutableEntry::PutBaseVersion(int64 value) {
- DCHECK(kernel_);
- base_write_transaction_->TrackChangesTo(kernel_);
- if (kernel_->ref(BASE_VERSION) != value) {
- kernel_->put(BASE_VERSION, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
-void ModelNeutralMutableEntry::PutServerVersion(int64 value) {
- DCHECK(kernel_);
- base_write_transaction_->TrackChangesTo(kernel_);
- if (kernel_->ref(SERVER_VERSION) != value) {
- ScopedKernelLock lock(dir());
- kernel_->put(SERVER_VERSION, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
-void ModelNeutralMutableEntry::PutServerMtime(base::Time value) {
- DCHECK(kernel_);
- base_write_transaction_->TrackChangesTo(kernel_);
- if (kernel_->ref(SERVER_MTIME) != value) {
- kernel_->put(SERVER_MTIME, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
-void ModelNeutralMutableEntry::PutServerCtime(base::Time value) {
- DCHECK(kernel_);
- base_write_transaction_->TrackChangesTo(kernel_);
- if (kernel_->ref(SERVER_CTIME) != value) {
- kernel_->put(SERVER_CTIME, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
-bool ModelNeutralMutableEntry::PutId(const Id& value) {
- DCHECK(kernel_);
- base_write_transaction_->TrackChangesTo(kernel_);
- if (kernel_->ref(ID) != value) {
- if (!dir()->ReindexId(base_write_transaction(), kernel_, value))
- return false;
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
- return true;
-}
-
-void ModelNeutralMutableEntry::PutServerParentId(const Id& value) {
- DCHECK(kernel_);
- base_write_transaction_->TrackChangesTo(kernel_);
-
- if (kernel_->ref(SERVER_PARENT_ID) != value) {
- kernel_->put(SERVER_PARENT_ID, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
-bool ModelNeutralMutableEntry::PutIsUnsynced(bool value) {
- DCHECK(kernel_);
- base_write_transaction_->TrackChangesTo(kernel_);
- if (kernel_->ref(IS_UNSYNCED) != value) {
- MetahandleSet* index = &dir()->kernel_->unsynced_metahandles;
-
- ScopedKernelLock lock(dir());
- if (value) {
- if (!SyncAssert(index->insert(kernel_->ref(META_HANDLE)).second,
- FROM_HERE,
- "Could not insert",
- base_write_transaction())) {
- return false;
- }
- } else {
- if (!SyncAssert(1U == index->erase(kernel_->ref(META_HANDLE)),
- FROM_HERE,
- "Entry Not succesfully erased",
- base_write_transaction())) {
- return false;
- }
- }
- kernel_->put(IS_UNSYNCED, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
- return true;
-}
-
-bool ModelNeutralMutableEntry::PutIsUnappliedUpdate(bool value) {
- DCHECK(kernel_);
- base_write_transaction_->TrackChangesTo(kernel_);
- if (kernel_->ref(IS_UNAPPLIED_UPDATE) != value) {
- // Use kernel_->GetServerModelType() instead of
- // GetServerModelType() as we may trigger some DCHECKs in the
- // latter.
- MetahandleSet* index = &dir()->kernel_->unapplied_update_metahandles[
- kernel_->GetServerModelType()];
-
- ScopedKernelLock lock(dir());
- if (value) {
- if (!SyncAssert(index->insert(kernel_->ref(META_HANDLE)).second,
- FROM_HERE,
- "Could not insert",
- base_write_transaction())) {
- return false;
- }
- } else {
- if (!SyncAssert(1U == index->erase(kernel_->ref(META_HANDLE)),
- FROM_HERE,
- "Entry Not succesfully erased",
- base_write_transaction())) {
- return false;
- }
- }
- kernel_->put(IS_UNAPPLIED_UPDATE, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
- return true;
-}
-
-void ModelNeutralMutableEntry::PutServerIsDir(bool value) {
- DCHECK(kernel_);
- base_write_transaction_->TrackChangesTo(kernel_);
- bool old_value = kernel_->ref(SERVER_IS_DIR);
- if (old_value != value) {
- kernel_->put(SERVER_IS_DIR, value);
- kernel_->mark_dirty(GetDirtyIndexHelper());
- }
-}
-
-void ModelNeutralMutableEntry::PutServerIsDel(bool value) {
- DCHECK(kernel_);
- base_write_transaction_->TrackChangesTo(kernel_);
- bool old_value = kernel_->ref(SERVER_IS_DEL);
- if (old_value != value) {
- kernel_->put(SERVER_IS_DEL, value);
- kernel_->mark_dirty(GetDirtyIndexHelper());
- }
-
- // Update delete journal for existence status change on server side here
- // instead of in PutIsDel() because IS_DEL may not be updated due to
- // early returns when processing updates. And because
- // UpdateDeleteJournalForServerDelete() checks for SERVER_IS_DEL, it has
- // to be called on sync thread.
- dir()->delete_journal()->UpdateDeleteJournalForServerDelete(
- base_write_transaction(), old_value, *kernel_);
-}
-
-void ModelNeutralMutableEntry::PutServerNonUniqueName(
- const std::string& value) {
- DCHECK(kernel_);
- base_write_transaction_->TrackChangesTo(kernel_);
-
- if (kernel_->ref(SERVER_NON_UNIQUE_NAME) != value) {
- kernel_->put(SERVER_NON_UNIQUE_NAME, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
-bool ModelNeutralMutableEntry::PutUniqueServerTag(const string& new_tag) {
- if (new_tag == kernel_->ref(UNIQUE_SERVER_TAG)) {
- return true;
- }
-
- base_write_transaction_->TrackChangesTo(kernel_);
- ScopedKernelLock lock(dir());
- // Make sure your new value is not in there already.
- if (dir()->kernel_->server_tags_map.find(new_tag) !=
- dir()->kernel_->server_tags_map.end()) {
- DVLOG(1) << "Detected duplicate server tag";
- return false;
- }
- dir()->kernel_->server_tags_map.erase(
- kernel_->ref(UNIQUE_SERVER_TAG));
- kernel_->put(UNIQUE_SERVER_TAG, new_tag);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- if (!new_tag.empty()) {
- dir()->kernel_->server_tags_map[new_tag] = kernel_;
- }
-
- return true;
-}
-
-bool ModelNeutralMutableEntry::PutUniqueClientTag(const string& new_tag) {
- if (new_tag == kernel_->ref(UNIQUE_CLIENT_TAG)) {
- return true;
- }
-
- base_write_transaction_->TrackChangesTo(kernel_);
- ScopedKernelLock lock(dir());
- // Make sure your new value is not in there already.
- if (dir()->kernel_->client_tags_map.find(new_tag) !=
- dir()->kernel_->client_tags_map.end()) {
- DVLOG(1) << "Detected duplicate client tag";
- return false;
- }
- dir()->kernel_->client_tags_map.erase(
- kernel_->ref(UNIQUE_CLIENT_TAG));
- kernel_->put(UNIQUE_CLIENT_TAG, new_tag);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- if (!new_tag.empty()) {
- dir()->kernel_->client_tags_map[new_tag] = kernel_;
- }
-
- return true;
-}
-
-void ModelNeutralMutableEntry::PutUniqueBookmarkTag(const std::string& tag) {
- // This unique tag will eventually be used as the unique suffix when adjusting
- // this bookmark's position. Let's make sure it's a valid suffix.
- if (!UniquePosition::IsValidSuffix(tag)) {
- NOTREACHED();
- return;
- }
-
- if (!kernel_->ref(UNIQUE_BOOKMARK_TAG).empty() &&
- tag != kernel_->ref(UNIQUE_BOOKMARK_TAG)) {
- // There is only one scenario where our tag is expected to change. That
- // scenario occurs when our current tag is a non-correct tag assigned during
- // the UniquePosition migration.
- std::string migration_generated_tag =
- GenerateSyncableBookmarkHash(std::string(),
- kernel_->ref(ID).GetServerId());
- DCHECK_EQ(migration_generated_tag, kernel_->ref(UNIQUE_BOOKMARK_TAG));
- }
-
- kernel_->put(UNIQUE_BOOKMARK_TAG, tag);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
-}
-
-void ModelNeutralMutableEntry::PutServerSpecifics(
- const sync_pb::EntitySpecifics& value) {
- DCHECK(kernel_);
- CHECK(!value.password().has_client_only_encrypted_data());
- base_write_transaction_->TrackChangesTo(kernel_);
- // TODO(ncarter): This is unfortunately heavyweight. Can we do
- // better?
- if (kernel_->ref(SERVER_SPECIFICS).SerializeAsString() !=
- value.SerializeAsString()) {
- if (kernel_->ref(IS_UNAPPLIED_UPDATE)) {
- // Remove ourselves from unapplied_update_metahandles with our
- // old server type.
- const ModelType old_server_type = kernel_->GetServerModelType();
- const int64 metahandle = kernel_->ref(META_HANDLE);
- size_t erase_count =
- dir()->kernel_->unapplied_update_metahandles[old_server_type]
- .erase(metahandle);
- DCHECK_EQ(erase_count, 1u);
- }
-
- kernel_->put(SERVER_SPECIFICS, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
-
- if (kernel_->ref(IS_UNAPPLIED_UPDATE)) {
- // Add ourselves back into unapplied_update_metahandles with our
- // new server type.
- const ModelType new_server_type = kernel_->GetServerModelType();
- const int64 metahandle = kernel_->ref(META_HANDLE);
- dir()->kernel_->unapplied_update_metahandles[new_server_type]
- .insert(metahandle);
- }
- }
-}
-
-void ModelNeutralMutableEntry::PutBaseServerSpecifics(
- const sync_pb::EntitySpecifics& value) {
- DCHECK(kernel_);
- CHECK(!value.password().has_client_only_encrypted_data());
- base_write_transaction_->TrackChangesTo(kernel_);
- // TODO(ncarter): This is unfortunately heavyweight. Can we do
- // better?
- if (kernel_->ref(BASE_SERVER_SPECIFICS).SerializeAsString()
- != value.SerializeAsString()) {
- kernel_->put(BASE_SERVER_SPECIFICS, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
-void ModelNeutralMutableEntry::PutServerUniquePosition(
- const UniquePosition& value) {
- DCHECK(kernel_);
- base_write_transaction_->TrackChangesTo(kernel_);
- if(!kernel_->ref(SERVER_UNIQUE_POSITION).Equals(value)) {
- // We should never overwrite a valid position with an invalid one.
- DCHECK(value.IsValid());
- ScopedKernelLock lock(dir());
- kernel_->put(SERVER_UNIQUE_POSITION, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
-void ModelNeutralMutableEntry::PutSyncing(bool value) {
- kernel_->put(SYNCING, value);
-}
-
-void ModelNeutralMutableEntry::PutParentIdPropertyOnly(const Id& parent_id) {
- base_write_transaction_->TrackChangesTo(kernel_);
- dir()->ReindexParentId(base_write_transaction(), kernel_, parent_id);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
-}
-
-void ModelNeutralMutableEntry::UpdateTransactionVersion(int64 value) {
- ScopedKernelLock lock(dir());
- kernel_->put(TRANSACTION_VERSION, value);
- kernel_->mark_dirty(&(dir()->kernel_->dirty_metahandles));
-}
-
-ModelNeutralMutableEntry::ModelNeutralMutableEntry(BaseWriteTransaction* trans)
- : Entry(trans), base_write_transaction_(trans) {}
-
-MetahandleSet* ModelNeutralMutableEntry::GetDirtyIndexHelper() {
- return &dir()->kernel_->dirty_metahandles;
-}
-
-} // namespace syncable
-
-} // namespace syncer
diff --git a/chromium/sync/syncable/model_neutral_mutable_entry.h b/chromium/sync/syncable/model_neutral_mutable_entry.h
deleted file mode 100644
index e2292e7045d..00000000000
--- a/chromium/sync/syncable/model_neutral_mutable_entry.h
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_MODEL_NEUTRAL_MUTABLE_ENTRY_H_
-#define SYNC_SYNCABLE_MODEL_NEUTRAL_MUTABLE_ENTRY_H_
-
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/syncable/entry.h"
-
-namespace syncer {
-class WriteNode;
-
-namespace syncable {
-
-class BaseWriteTransaction;
-
-enum CreateNewUpdateItem {
- CREATE_NEW_UPDATE_ITEM
-};
-
-// This Entry includes all the operations one can safely perform on the sync
-// thread. In particular, it does not expose setters to make changes that need
-// to be communicated to the model (and the model's thread). It is not possible
-// to change an entry's SPECIFICS or UNIQUE_POSITION fields with this kind of
-// entry.
-class SYNC_EXPORT_PRIVATE ModelNeutralMutableEntry : public Entry {
- public:
- ModelNeutralMutableEntry(BaseWriteTransaction* trans,
- CreateNewUpdateItem,
- const Id& id);
- ModelNeutralMutableEntry(BaseWriteTransaction* trans, GetByHandle, int64);
- ModelNeutralMutableEntry(BaseWriteTransaction* trans, GetById, const Id&);
- ModelNeutralMutableEntry(
- BaseWriteTransaction* trans,
- GetByClientTag,
- const std::string& tag);
- ModelNeutralMutableEntry(
- BaseWriteTransaction* trans,
- GetByServerTag,
- const std::string& tag);
-
- inline BaseWriteTransaction* base_write_transaction() const {
- return base_write_transaction_;
- }
-
- // Non-model-changing setters. These setters will change properties internal
- // to the node. These fields are important for bookkeeping in the sync
- // internals, but it is not necessary to communicate changes in these fields
- // to the local models.
- //
- // Some of them trigger the re-indexing of the entry. They return true on
- // success and false on failure, which occurs when putting the value would
- // have caused a duplicate in the index. The setters that never fail return
- // void.
- void PutBaseVersion(int64 value);
- void PutServerVersion(int64 value);
- void PutServerMtime(base::Time value);
- void PutServerCtime(base::Time value);
- bool PutId(const Id& value);
- void PutServerParentId(const Id& value);
- bool PutIsUnsynced(bool value);
- bool PutIsUnappliedUpdate(bool value);
- void PutServerIsDir(bool value);
- void PutServerIsDel(bool value);
- void PutServerNonUniqueName(const std::string& value);
- bool PutUniqueServerTag(const std::string& value);
- bool PutUniqueClientTag(const std::string& value);
- void PutUniqueBookmarkTag(const std::string& tag);
- void PutServerSpecifics(const sync_pb::EntitySpecifics& value);
- void PutBaseServerSpecifics(const sync_pb::EntitySpecifics& value);
- void PutServerUniquePosition(const UniquePosition& value);
- void PutSyncing(bool value);
-
- // Do a simple property-only update of the PARENT_ID field. Use with caution.
- //
- // The normal Put(IS_PARENT) call will move the item to the front of the
- // sibling order to maintain the linked list invariants when the parent
- // changes. That's usually what you want to do, but it's inappropriate
- // when the caller is trying to change the parent ID of a the whole set
- // of children (e.g. because the ID changed during a commit). For those
- // cases, there's this function. It will corrupt the sibling ordering
- // if you're not careful.
- void PutParentIdPropertyOnly(const Id& parent_id);
-
- // This is similar to what one would expect from Put(TRANSACTION_VERSION),
- // except that it doesn't bother to invoke 'SaveOriginals'. Calling that
- // function is at best unnecessary, since the transaction will have already
- // used its list of mutations by the time this function is called.
- void UpdateTransactionVersion(int64 version);
-
- protected:
- explicit ModelNeutralMutableEntry(BaseWriteTransaction* trans);
-
- syncable::MetahandleSet* GetDirtyIndexHelper();
-
- private:
- friend class syncer::WriteNode;
- friend class Directory;
-
- // Don't allow creation on heap, except by sync API wrappers.
- void* operator new(size_t size) { return (::operator new)(size); }
-
- // Kind of redundant. We should reduce the number of pointers
- // floating around if at all possible. Could we store this in Directory?
- // Scope: Set on construction, never changed after that.
- BaseWriteTransaction* const base_write_transaction_;
-
- DISALLOW_COPY_AND_ASSIGN(ModelNeutralMutableEntry);
-};
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_MODEL_NEUTRAL_MUTABLE_ENTRY_H_
diff --git a/chromium/sync/syncable/model_type.cc b/chromium/sync/syncable/model_type.cc
deleted file mode 100644
index fa331187323..00000000000
--- a/chromium/sync/syncable/model_type.cc
+++ /dev/null
@@ -1,995 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/base/model_type.h"
-
-#include "base/strings/string_split.h"
-#include "base/values.h"
-#include "sync/protocol/app_notification_specifics.pb.h"
-#include "sync/protocol/app_setting_specifics.pb.h"
-#include "sync/protocol/app_specifics.pb.h"
-#include "sync/protocol/autofill_specifics.pb.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/protocol/extension_setting_specifics.pb.h"
-#include "sync/protocol/extension_specifics.pb.h"
-#include "sync/protocol/nigori_specifics.pb.h"
-#include "sync/protocol/password_specifics.pb.h"
-#include "sync/protocol/preference_specifics.pb.h"
-#include "sync/protocol/search_engine_specifics.pb.h"
-#include "sync/protocol/session_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/protocol/theme_specifics.pb.h"
-#include "sync/protocol/typed_url_specifics.pb.h"
-#include "sync/syncable/syncable_proto_util.h"
-
-namespace syncer {
-
-void AddDefaultFieldValue(ModelType datatype,
- sync_pb::EntitySpecifics* specifics) {
- if (!ProtocolTypes().Has(datatype)) {
- NOTREACHED() << "Only protocol types have field values.";
- return;
- }
- switch (datatype) {
- case BOOKMARKS:
- specifics->mutable_bookmark();
- break;
- case PASSWORDS:
- specifics->mutable_password();
- break;
- case PREFERENCES:
- specifics->mutable_preference();
- break;
- case AUTOFILL:
- specifics->mutable_autofill();
- break;
- case AUTOFILL_PROFILE:
- specifics->mutable_autofill_profile();
- break;
- case THEMES:
- specifics->mutable_theme();
- break;
- case TYPED_URLS:
- specifics->mutable_typed_url();
- break;
- case EXTENSIONS:
- specifics->mutable_extension();
- break;
- case NIGORI:
- specifics->mutable_nigori();
- break;
- case SEARCH_ENGINES:
- specifics->mutable_search_engine();
- break;
- case SESSIONS:
- specifics->mutable_session();
- break;
- case APPS:
- specifics->mutable_app();
- break;
- case APP_LIST:
- specifics->mutable_app_list();
- break;
- case APP_SETTINGS:
- specifics->mutable_app_setting();
- break;
- case EXTENSION_SETTINGS:
- specifics->mutable_extension_setting();
- break;
- case APP_NOTIFICATIONS:
- specifics->mutable_app_notification();
- break;
- case HISTORY_DELETE_DIRECTIVES:
- specifics->mutable_history_delete_directive();
- break;
- case SYNCED_NOTIFICATIONS:
- specifics->mutable_synced_notification();
- break;
- case DEVICE_INFO:
- specifics->mutable_device_info();
- break;
- case EXPERIMENTS:
- specifics->mutable_experiments();
- break;
- case PRIORITY_PREFERENCES:
- specifics->mutable_priority_preference();
- break;
- case DICTIONARY:
- specifics->mutable_dictionary();
- break;
- case FAVICON_IMAGES:
- specifics->mutable_favicon_image();
- break;
- case FAVICON_TRACKING:
- specifics->mutable_favicon_tracking();
- break;
- case MANAGED_USER_SETTINGS:
- specifics->mutable_managed_user_setting();
- break;
- case MANAGED_USERS:
- specifics->mutable_managed_user();
- break;
- case ARTICLES:
- specifics->mutable_article();
- break;
- default:
- NOTREACHED() << "No known extension for model type.";
- }
-}
-
-ModelType GetModelTypeFromSpecificsFieldNumber(int field_number) {
- ModelTypeSet protocol_types = ProtocolTypes();
- for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
- iter.Inc()) {
- if (GetSpecificsFieldNumberFromModelType(iter.Get()) == field_number)
- return iter.Get();
- }
- return UNSPECIFIED;
-}
-
-int GetSpecificsFieldNumberFromModelType(ModelType model_type) {
- if (!ProtocolTypes().Has(model_type)) {
- NOTREACHED() << "Only protocol types have field values.";
- return 0;
- }
- switch (model_type) {
- case BOOKMARKS:
- return sync_pb::EntitySpecifics::kBookmarkFieldNumber;
- break;
- case PASSWORDS:
- return sync_pb::EntitySpecifics::kPasswordFieldNumber;
- break;
- case PREFERENCES:
- return sync_pb::EntitySpecifics::kPreferenceFieldNumber;
- break;
- case AUTOFILL:
- return sync_pb::EntitySpecifics::kAutofillFieldNumber;
- break;
- case AUTOFILL_PROFILE:
- return sync_pb::EntitySpecifics::kAutofillProfileFieldNumber;
- break;
- case THEMES:
- return sync_pb::EntitySpecifics::kThemeFieldNumber;
- break;
- case TYPED_URLS:
- return sync_pb::EntitySpecifics::kTypedUrlFieldNumber;
- break;
- case EXTENSIONS:
- return sync_pb::EntitySpecifics::kExtensionFieldNumber;
- break;
- case NIGORI:
- return sync_pb::EntitySpecifics::kNigoriFieldNumber;
- break;
- case SEARCH_ENGINES:
- return sync_pb::EntitySpecifics::kSearchEngineFieldNumber;
- break;
- case SESSIONS:
- return sync_pb::EntitySpecifics::kSessionFieldNumber;
- break;
- case APPS:
- return sync_pb::EntitySpecifics::kAppFieldNumber;
- break;
- case APP_LIST:
- return sync_pb::EntitySpecifics::kAppListFieldNumber;
- break;
- case APP_SETTINGS:
- return sync_pb::EntitySpecifics::kAppSettingFieldNumber;
- break;
- case EXTENSION_SETTINGS:
- return sync_pb::EntitySpecifics::kExtensionSettingFieldNumber;
- break;
- case APP_NOTIFICATIONS:
- return sync_pb::EntitySpecifics::kAppNotificationFieldNumber;
- break;
- case HISTORY_DELETE_DIRECTIVES:
- return sync_pb::EntitySpecifics::kHistoryDeleteDirectiveFieldNumber;
- case SYNCED_NOTIFICATIONS:
- return sync_pb::EntitySpecifics::kSyncedNotificationFieldNumber;
- case DEVICE_INFO:
- return sync_pb::EntitySpecifics::kDeviceInfoFieldNumber;
- break;
- case EXPERIMENTS:
- return sync_pb::EntitySpecifics::kExperimentsFieldNumber;
- break;
- case PRIORITY_PREFERENCES:
- return sync_pb::EntitySpecifics::kPriorityPreferenceFieldNumber;
- break;
- case DICTIONARY:
- return sync_pb::EntitySpecifics::kDictionaryFieldNumber;
- break;
- case FAVICON_IMAGES:
- return sync_pb::EntitySpecifics::kFaviconImageFieldNumber;
- case FAVICON_TRACKING:
- return sync_pb::EntitySpecifics::kFaviconTrackingFieldNumber;
- case MANAGED_USER_SETTINGS:
- return sync_pb::EntitySpecifics::kManagedUserSettingFieldNumber;
- case MANAGED_USERS:
- return sync_pb::EntitySpecifics::kManagedUserFieldNumber;
- case ARTICLES:
- return sync_pb::EntitySpecifics::kArticleFieldNumber;
- default:
- NOTREACHED() << "No known extension for model type.";
- return 0;
- }
- NOTREACHED() << "Needed for linux_keep_shadow_stacks because of "
- << "http://gcc.gnu.org/bugzilla/show_bug.cgi?id=20681";
- return 0;
-}
-
-FullModelTypeSet ToFullModelTypeSet(ModelTypeSet in) {
- FullModelTypeSet out;
- for (ModelTypeSet::Iterator i = in.First(); i.Good(); i.Inc()) {
- out.Put(i.Get());
- }
- return out;
-}
-
-// Note: keep this consistent with GetModelType in entry.cc!
-ModelType GetModelType(const sync_pb::SyncEntity& sync_entity) {
- DCHECK(!IsRoot(sync_entity)); // Root shouldn't ever go over the wire.
-
- // Backwards compatibility with old (pre-specifics) protocol.
- if (sync_entity.has_bookmarkdata())
- return BOOKMARKS;
-
- ModelType specifics_type = GetModelTypeFromSpecifics(sync_entity.specifics());
- if (specifics_type != UNSPECIFIED)
- return specifics_type;
-
- // Loose check for server-created top-level folders that aren't
- // bound to a particular model type.
- if (!sync_entity.server_defined_unique_tag().empty() &&
- IsFolder(sync_entity)) {
- return TOP_LEVEL_FOLDER;
- }
-
- // This is an item of a datatype we can't understand. Maybe it's
- // from the future? Either we mis-encoded the object, or the
- // server sent us entries it shouldn't have.
- NOTREACHED() << "Unknown datatype in sync proto.";
- return UNSPECIFIED;
-}
-
-ModelType GetModelTypeFromSpecifics(const sync_pb::EntitySpecifics& specifics) {
- if (specifics.has_bookmark())
- return BOOKMARKS;
-
- if (specifics.has_password())
- return PASSWORDS;
-
- if (specifics.has_preference())
- return PREFERENCES;
-
- if (specifics.has_autofill())
- return AUTOFILL;
-
- if (specifics.has_autofill_profile())
- return AUTOFILL_PROFILE;
-
- if (specifics.has_theme())
- return THEMES;
-
- if (specifics.has_typed_url())
- return TYPED_URLS;
-
- if (specifics.has_extension())
- return EXTENSIONS;
-
- if (specifics.has_nigori())
- return NIGORI;
-
- if (specifics.has_app())
- return APPS;
-
- if (specifics.has_app_list())
- return APP_LIST;
-
- if (specifics.has_search_engine())
- return SEARCH_ENGINES;
-
- if (specifics.has_session())
- return SESSIONS;
-
- if (specifics.has_app_setting())
- return APP_SETTINGS;
-
- if (specifics.has_extension_setting())
- return EXTENSION_SETTINGS;
-
- if (specifics.has_app_notification())
- return APP_NOTIFICATIONS;
-
- if (specifics.has_history_delete_directive())
- return HISTORY_DELETE_DIRECTIVES;
-
- if (specifics.has_synced_notification())
- return SYNCED_NOTIFICATIONS;
-
- if (specifics.has_device_info())
- return DEVICE_INFO;
-
- if (specifics.has_experiments())
- return EXPERIMENTS;
-
- if (specifics.has_priority_preference())
- return PRIORITY_PREFERENCES;
-
- if (specifics.has_dictionary())
- return DICTIONARY;
-
- if (specifics.has_favicon_image())
- return FAVICON_IMAGES;
-
- if (specifics.has_favicon_tracking())
- return FAVICON_TRACKING;
-
- if (specifics.has_managed_user_setting())
- return MANAGED_USER_SETTINGS;
-
- if (specifics.has_managed_user())
- return MANAGED_USERS;
-
- if (specifics.has_article())
- return ARTICLES;
-
- return UNSPECIFIED;
-}
-
-ModelTypeSet ProtocolTypes() {
- ModelTypeSet set = ModelTypeSet::All();
- set.RemoveAll(ProxyTypes());
- return set;
-}
-
-ModelTypeSet UserTypes() {
- ModelTypeSet set;
- // TODO(sync): We should be able to build the actual enumset's internal
- // bitset value here at compile time, rather than performing an iteration
- // every time.
- for (int i = FIRST_USER_MODEL_TYPE; i <= LAST_USER_MODEL_TYPE; ++i) {
- set.Put(ModelTypeFromInt(i));
- }
- return set;
-}
-
-ModelTypeSet UserSelectableTypes() {
- ModelTypeSet set;
- // Although the order doesn't technically matter here, it's clearer to keep
- // these in the same order as their definition in the ModelType enum.
- set.Put(BOOKMARKS);
- set.Put(PREFERENCES);
- set.Put(PASSWORDS);
- set.Put(AUTOFILL);
- set.Put(THEMES);
- set.Put(TYPED_URLS);
- set.Put(EXTENSIONS);
- set.Put(APPS);
- set.Put(PROXY_TABS);
- return set;
-}
-
-bool IsUserSelectableType(ModelType model_type) {
- return UserSelectableTypes().Has(model_type);
-}
-
-ModelTypeSet EncryptableUserTypes() {
- ModelTypeSet encryptable_user_types = UserTypes();
- // We never encrypt history delete directives.
- encryptable_user_types.Remove(HISTORY_DELETE_DIRECTIVES);
- // Synced notifications are not encrypted since the server must see changes.
- encryptable_user_types.Remove(SYNCED_NOTIFICATIONS);
- // Priority preferences are not encrypted because they might be synced before
- // encryption is ready.
- encryptable_user_types.Remove(PRIORITY_PREFERENCES);
- // Managed user settings are not encrypted since they are set server-side.
- encryptable_user_types.Remove(MANAGED_USER_SETTINGS);
- // Managed users are not encrypted since they are managed server-side.
- encryptable_user_types.Remove(MANAGED_USERS);
- // Proxy types have no sync representation and are therefore not encrypted.
- // Note however that proxy types map to one or more protocol types, which
- // may or may not be encrypted themselves.
- encryptable_user_types.RemoveAll(ProxyTypes());
- return encryptable_user_types;
-}
-
-ModelTypeSet PriorityUserTypes() {
- return ModelTypeSet(PRIORITY_PREFERENCES);
-}
-
-ModelTypeSet ControlTypes() {
- ModelTypeSet set;
- // TODO(sync): We should be able to build the actual enumset's internal
- // bitset value here at compile time, rather than performing an iteration
- // every time.
- for (int i = FIRST_CONTROL_MODEL_TYPE; i <= LAST_CONTROL_MODEL_TYPE; ++i) {
- set.Put(ModelTypeFromInt(i));
- }
-
- return set;
-}
-
-ModelTypeSet ProxyTypes() {
- ModelTypeSet set;
- set.Put(PROXY_TABS);
- return set;
-}
-
-bool IsControlType(ModelType model_type) {
- return ControlTypes().Has(model_type);
-}
-
-ModelTypeSet CoreTypes() {
- syncer::ModelTypeSet result;
- result.PutAll(PriorityCoreTypes());
-
- // The following are low priority core types.
- result.Put(SYNCED_NOTIFICATIONS);
-
- return result;
-}
-
-ModelTypeSet PriorityCoreTypes() {
- syncer::ModelTypeSet result;
- result.PutAll(ControlTypes());
-
- // The following are non-control core types.
- result.Put(MANAGED_USERS);
-
- return result;
-}
-
-const char* ModelTypeToString(ModelType model_type) {
- // This is used in serialization routines as well as for displaying debug
- // information. Do not attempt to change these string values unless you know
- // what you're doing.
- switch (model_type) {
- case TOP_LEVEL_FOLDER:
- return "Top Level Folder";
- case UNSPECIFIED:
- return "Unspecified";
- case BOOKMARKS:
- return "Bookmarks";
- case PREFERENCES:
- return "Preferences";
- case PASSWORDS:
- return "Passwords";
- case AUTOFILL:
- return "Autofill";
- case THEMES:
- return "Themes";
- case TYPED_URLS:
- return "Typed URLs";
- case EXTENSIONS:
- return "Extensions";
- case NIGORI:
- return "Encryption keys";
- case SEARCH_ENGINES:
- return "Search Engines";
- case SESSIONS:
- return "Sessions";
- case APPS:
- return "Apps";
- case APP_LIST:
- return "App List";
- case AUTOFILL_PROFILE:
- return "Autofill Profiles";
- case APP_SETTINGS:
- return "App settings";
- case EXTENSION_SETTINGS:
- return "Extension settings";
- case APP_NOTIFICATIONS:
- return "App Notifications";
- case HISTORY_DELETE_DIRECTIVES:
- return "History Delete Directives";
- case SYNCED_NOTIFICATIONS:
- return "Synced Notifications";
- case DEVICE_INFO:
- return "Device Info";
- case EXPERIMENTS:
- return "Experiments";
- case PRIORITY_PREFERENCES:
- return "Priority Preferences";
- case DICTIONARY:
- return "Dictionary";
- case FAVICON_IMAGES:
- return "Favicon Images";
- case FAVICON_TRACKING:
- return "Favicon Tracking";
- case MANAGED_USER_SETTINGS:
- return "Managed User Settings";
- case MANAGED_USERS:
- return "Managed Users";
- case ARTICLES:
- return "Articles";
- case PROXY_TABS:
- return "Tabs";
- default:
- break;
- }
- NOTREACHED() << "No known extension for model type.";
- return "INVALID";
-}
-
-// The normal rules about histograms apply here. Always append to the bottom of
-// the list, and be careful to not reuse integer values that have already been
-// assigned. Don't forget to update histograms.xml when you make changes to
-// this list.
-int ModelTypeToHistogramInt(ModelType model_type) {
- switch (model_type) {
- case UNSPECIFIED:
- return 0;
- case TOP_LEVEL_FOLDER:
- return 1;
- case BOOKMARKS:
- return 2;
- case PREFERENCES:
- return 3;
- case PASSWORDS:
- return 4;
- case AUTOFILL_PROFILE:
- return 5;
- case AUTOFILL:
- return 6;
- case THEMES:
- return 7;
- case TYPED_URLS:
- return 8;
- case EXTENSIONS:
- return 9;
- case SEARCH_ENGINES:
- return 10;
- case SESSIONS:
- return 11;
- case APPS:
- return 12;
- case APP_SETTINGS:
- return 13;
- case EXTENSION_SETTINGS:
- return 14;
- case APP_NOTIFICATIONS:
- return 15;
- case HISTORY_DELETE_DIRECTIVES:
- return 16;
- case NIGORI:
- return 17;
- case DEVICE_INFO:
- return 18;
- case EXPERIMENTS:
- return 19;
- case SYNCED_NOTIFICATIONS:
- return 20;
- case PRIORITY_PREFERENCES:
- return 21;
- case DICTIONARY:
- return 22;
- case FAVICON_IMAGES:
- return 23;
- case FAVICON_TRACKING:
- return 24;
- case PROXY_TABS:
- return 25;
- case MANAGED_USER_SETTINGS:
- return 26;
- case MANAGED_USERS:
- return 27;
- case ARTICLES:
- return 28;
- case APP_LIST:
- return 29;
- // Silence a compiler warning.
- case MODEL_TYPE_COUNT:
- return 0;
- }
- return 0;
-}
-
-base::StringValue* ModelTypeToValue(ModelType model_type) {
- if (model_type >= FIRST_REAL_MODEL_TYPE) {
- return new base::StringValue(ModelTypeToString(model_type));
- } else if (model_type == TOP_LEVEL_FOLDER) {
- return new base::StringValue("Top-level folder");
- } else if (model_type == UNSPECIFIED) {
- return new base::StringValue("Unspecified");
- }
- NOTREACHED();
- return new base::StringValue(std::string());
-}
-
-ModelType ModelTypeFromValue(const base::Value& value) {
- if (value.IsType(base::Value::TYPE_STRING)) {
- std::string result;
- CHECK(value.GetAsString(&result));
- return ModelTypeFromString(result);
- } else if (value.IsType(base::Value::TYPE_INTEGER)) {
- int result;
- CHECK(value.GetAsInteger(&result));
- return ModelTypeFromInt(result);
- } else {
- NOTREACHED() << "Unsupported value type: " << value.GetType();
- return UNSPECIFIED;
- }
-}
-
-ModelType ModelTypeFromString(const std::string& model_type_string) {
- if (model_type_string == "Bookmarks")
- return BOOKMARKS;
- else if (model_type_string == "Preferences")
- return PREFERENCES;
- else if (model_type_string == "Passwords")
- return PASSWORDS;
- else if (model_type_string == "Autofill")
- return AUTOFILL;
- else if (model_type_string == "Autofill Profiles")
- return AUTOFILL_PROFILE;
- else if (model_type_string == "Themes")
- return THEMES;
- else if (model_type_string == "Typed URLs")
- return TYPED_URLS;
- else if (model_type_string == "Extensions")
- return EXTENSIONS;
- else if (model_type_string == "Encryption keys")
- return NIGORI;
- else if (model_type_string == "Search Engines")
- return SEARCH_ENGINES;
- else if (model_type_string == "Sessions")
- return SESSIONS;
- else if (model_type_string == "Apps")
- return APPS;
- else if (model_type_string == "App List")
- return APP_LIST;
- else if (model_type_string == "App settings")
- return APP_SETTINGS;
- else if (model_type_string == "Extension settings")
- return EXTENSION_SETTINGS;
- else if (model_type_string == "App Notifications")
- return APP_NOTIFICATIONS;
- else if (model_type_string == "History Delete Directives")
- return HISTORY_DELETE_DIRECTIVES;
- else if (model_type_string == "Synced Notifications")
- return SYNCED_NOTIFICATIONS;
- else if (model_type_string == "Device Info")
- return DEVICE_INFO;
- else if (model_type_string == "Experiments")
- return EXPERIMENTS;
- else if (model_type_string == "Priority Preferences")
- return PRIORITY_PREFERENCES;
- else if (model_type_string == "Dictionary")
- return DICTIONARY;
- else if (model_type_string == "Favicon Images")
- return FAVICON_IMAGES;
- else if (model_type_string == "Favicon Tracking")
- return FAVICON_TRACKING;
- else if (model_type_string == "Managed User Settings")
- return MANAGED_USER_SETTINGS;
- else if (model_type_string == "Managed Users")
- return MANAGED_USERS;
- else if (model_type_string == "Articles")
- return ARTICLES;
- else if (model_type_string == "Tabs")
- return PROXY_TABS;
- else
- NOTREACHED() << "No known model type corresponding to "
- << model_type_string << ".";
- return UNSPECIFIED;
-}
-
-std::string ModelTypeSetToString(ModelTypeSet model_types) {
- std::string result;
- for (ModelTypeSet::Iterator it = model_types.First(); it.Good(); it.Inc()) {
- if (!result.empty()) {
- result += ", ";
- }
- result += ModelTypeToString(it.Get());
- }
- return result;
-}
-
-base::ListValue* ModelTypeSetToValue(ModelTypeSet model_types) {
- base::ListValue* value = new base::ListValue();
- for (ModelTypeSet::Iterator it = model_types.First(); it.Good(); it.Inc()) {
- value->Append(new base::StringValue(ModelTypeToString(it.Get())));
- }
- return value;
-}
-
-ModelTypeSet ModelTypeSetFromValue(const base::ListValue& value) {
- ModelTypeSet result;
- for (base::ListValue::const_iterator i = value.begin();
- i != value.end(); ++i) {
- result.Put(ModelTypeFromValue(**i));
- }
- return result;
-}
-
-// TODO(zea): remove all hardcoded tags in model associators and have them use
-// this instead.
-// NOTE: Proxy types should return empty strings (so that we don't NOTREACHED
-// in tests when we verify they have no root node).
-std::string ModelTypeToRootTag(ModelType type) {
- switch (type) {
- case BOOKMARKS:
- return "google_chrome_bookmarks";
- case PREFERENCES:
- return "google_chrome_preferences";
- case PASSWORDS:
- return "google_chrome_passwords";
- case AUTOFILL:
- return "google_chrome_autofill";
- case THEMES:
- return "google_chrome_themes";
- case TYPED_URLS:
- return "google_chrome_typed_urls";
- case EXTENSIONS:
- return "google_chrome_extensions";
- case NIGORI:
- return "google_chrome_nigori";
- case SEARCH_ENGINES:
- return "google_chrome_search_engines";
- case SESSIONS:
- return "google_chrome_sessions";
- case APPS:
- return "google_chrome_apps";
- case APP_LIST:
- return "google_chrome_app_list";
- case AUTOFILL_PROFILE:
- return "google_chrome_autofill_profiles";
- case APP_SETTINGS:
- return "google_chrome_app_settings";
- case EXTENSION_SETTINGS:
- return "google_chrome_extension_settings";
- case APP_NOTIFICATIONS:
- return "google_chrome_app_notifications";
- case HISTORY_DELETE_DIRECTIVES:
- return "google_chrome_history_delete_directives";
- case SYNCED_NOTIFICATIONS:
- return "google_chrome_synced_notifications";
- case DEVICE_INFO:
- return "google_chrome_device_info";
- case EXPERIMENTS:
- return "google_chrome_experiments";
- case PRIORITY_PREFERENCES:
- return "google_chrome_priority_preferences";
- case DICTIONARY:
- return "google_chrome_dictionary";
- case FAVICON_IMAGES:
- return "google_chrome_favicon_images";
- case FAVICON_TRACKING:
- return "google_chrome_favicon_tracking";
- case MANAGED_USER_SETTINGS:
- return "google_chrome_managed_user_settings";
- case MANAGED_USERS:
- return "google_chrome_managed_users";
- case ARTICLES:
- return "google_chrome_articles";
- case PROXY_TABS:
- return std::string();
- default:
- break;
- }
- NOTREACHED() << "No known extension for model type.";
- return "INVALID";
-}
-
-// TODO(akalin): Figure out a better way to do these mappings.
-// Note: Do not include proxy types in this list. They should never receive
-// or trigger notifications.
-namespace {
-const char kBookmarkNotificationType[] = "BOOKMARK";
-const char kPreferenceNotificationType[] = "PREFERENCE";
-const char kPasswordNotificationType[] = "PASSWORD";
-const char kAutofillNotificationType[] = "AUTOFILL";
-const char kThemeNotificationType[] = "THEME";
-const char kTypedUrlNotificationType[] = "TYPED_URL";
-const char kExtensionNotificationType[] = "EXTENSION";
-const char kExtensionSettingNotificationType[] = "EXTENSION_SETTING";
-const char kNigoriNotificationType[] = "NIGORI";
-const char kAppSettingNotificationType[] = "APP_SETTING";
-const char kAppNotificationType[] = "APP";
-const char kAppListNotificationType[] = "APP_LIST";
-const char kSearchEngineNotificationType[] = "SEARCH_ENGINE";
-const char kSessionNotificationType[] = "SESSION";
-const char kAutofillProfileNotificationType[] = "AUTOFILL_PROFILE";
-const char kAppNotificationNotificationType[] = "APP_NOTIFICATION";
-const char kHistoryDeleteDirectiveNotificationType[] =
- "HISTORY_DELETE_DIRECTIVE";
-const char kSyncedNotificationType[] = "SYNCED_NOTIFICATION";
-const char kDeviceInfoNotificationType[] = "DEVICE_INFO";
-const char kExperimentsNotificationType[] = "EXPERIMENTS";
-const char kPriorityPreferenceNotificationType[] = "PRIORITY_PREFERENCE";
-const char kDictionaryNotificationType[] = "DICTIONARY";
-const char kFaviconImageNotificationType[] = "FAVICON_IMAGE";
-const char kFaviconTrackingNotificationType[] = "FAVICON_TRACKING";
-const char kManagedUserSettingNotificationType[] = "MANAGED_USER_SETTING";
-const char kManagedUserNotificationType[] = "MANAGED_USER";
-const char kArticleNotificationType[] = "ARTICLE";
-} // namespace
-
-bool RealModelTypeToNotificationType(ModelType model_type,
- std::string* notification_type) {
- switch (model_type) {
- case BOOKMARKS:
- *notification_type = kBookmarkNotificationType;
- return true;
- case PREFERENCES:
- *notification_type = kPreferenceNotificationType;
- return true;
- case PASSWORDS:
- *notification_type = kPasswordNotificationType;
- return true;
- case AUTOFILL:
- *notification_type = kAutofillNotificationType;
- return true;
- case THEMES:
- *notification_type = kThemeNotificationType;
- return true;
- case TYPED_URLS:
- *notification_type = kTypedUrlNotificationType;
- return true;
- case EXTENSIONS:
- *notification_type = kExtensionNotificationType;
- return true;
- case NIGORI:
- *notification_type = kNigoriNotificationType;
- return true;
- case APP_SETTINGS:
- *notification_type = kAppSettingNotificationType;
- return true;
- case APPS:
- *notification_type = kAppNotificationType;
- return true;
- case APP_LIST:
- *notification_type = kAppListNotificationType;
- return true;
- case SEARCH_ENGINES:
- *notification_type = kSearchEngineNotificationType;
- return true;
- case SESSIONS:
- *notification_type = kSessionNotificationType;
- return true;
- case AUTOFILL_PROFILE:
- *notification_type = kAutofillProfileNotificationType;
- return true;
- case EXTENSION_SETTINGS:
- *notification_type = kExtensionSettingNotificationType;
- return true;
- case APP_NOTIFICATIONS:
- *notification_type = kAppNotificationNotificationType;
- return true;
- case HISTORY_DELETE_DIRECTIVES:
- *notification_type = kHistoryDeleteDirectiveNotificationType;
- return true;
- case SYNCED_NOTIFICATIONS:
- *notification_type = kSyncedNotificationType;
- return true;
- case DEVICE_INFO:
- *notification_type = kDeviceInfoNotificationType;
- return true;
- case EXPERIMENTS:
- *notification_type = kExperimentsNotificationType;
- return true;
- case PRIORITY_PREFERENCES:
- *notification_type = kPriorityPreferenceNotificationType;
- return true;
- case DICTIONARY:
- *notification_type = kDictionaryNotificationType;
- return true;
- case FAVICON_IMAGES:
- *notification_type = kFaviconImageNotificationType;
- return true;
- case FAVICON_TRACKING:
- *notification_type = kFaviconTrackingNotificationType;
- return true;
- case MANAGED_USER_SETTINGS:
- *notification_type = kManagedUserSettingNotificationType;
- return true;
- case MANAGED_USERS:
- *notification_type = kManagedUserNotificationType;
- return true;
- case ARTICLES:
- *notification_type = kArticleNotificationType;
- return true;
- default:
- break;
- }
- notification_type->clear();
- return false;
-}
-
-bool NotificationTypeToRealModelType(const std::string& notification_type,
- ModelType* model_type) {
- if (notification_type == kBookmarkNotificationType) {
- *model_type = BOOKMARKS;
- return true;
- } else if (notification_type == kPreferenceNotificationType) {
- *model_type = PREFERENCES;
- return true;
- } else if (notification_type == kPasswordNotificationType) {
- *model_type = PASSWORDS;
- return true;
- } else if (notification_type == kAutofillNotificationType) {
- *model_type = AUTOFILL;
- return true;
- } else if (notification_type == kThemeNotificationType) {
- *model_type = THEMES;
- return true;
- } else if (notification_type == kTypedUrlNotificationType) {
- *model_type = TYPED_URLS;
- return true;
- } else if (notification_type == kExtensionNotificationType) {
- *model_type = EXTENSIONS;
- return true;
- } else if (notification_type == kNigoriNotificationType) {
- *model_type = NIGORI;
- return true;
- } else if (notification_type == kAppNotificationType) {
- *model_type = APPS;
- return true;
- } else if (notification_type == kAppListNotificationType) {
- *model_type = APP_LIST;
- return true;
- } else if (notification_type == kSearchEngineNotificationType) {
- *model_type = SEARCH_ENGINES;
- return true;
- } else if (notification_type == kSessionNotificationType) {
- *model_type = SESSIONS;
- return true;
- } else if (notification_type == kAutofillProfileNotificationType) {
- *model_type = AUTOFILL_PROFILE;
- return true;
- } else if (notification_type == kAppSettingNotificationType) {
- *model_type = APP_SETTINGS;
- return true;
- } else if (notification_type == kExtensionSettingNotificationType) {
- *model_type = EXTENSION_SETTINGS;
- return true;
- } else if (notification_type == kAppNotificationNotificationType) {
- *model_type = APP_NOTIFICATIONS;
- return true;
- } else if (notification_type == kHistoryDeleteDirectiveNotificationType) {
- *model_type = HISTORY_DELETE_DIRECTIVES;
- return true;
- } else if (notification_type == kSyncedNotificationType) {
- *model_type = SYNCED_NOTIFICATIONS;
- return true;
- } else if (notification_type == kDeviceInfoNotificationType) {
- *model_type = DEVICE_INFO;
- return true;
- } else if (notification_type == kExperimentsNotificationType) {
- *model_type = EXPERIMENTS;
- return true;
- } else if (notification_type == kPriorityPreferenceNotificationType) {
- *model_type = PRIORITY_PREFERENCES;
- return true;
- } else if (notification_type == kDictionaryNotificationType) {
- *model_type = DICTIONARY;
- return true;
- } else if (notification_type == kFaviconImageNotificationType) {
- *model_type = FAVICON_IMAGES;
- return true;
- } else if (notification_type == kFaviconTrackingNotificationType) {
- *model_type = FAVICON_TRACKING;
- return true;
- } else if (notification_type == kManagedUserSettingNotificationType) {
- *model_type = MANAGED_USER_SETTINGS;
- return true;
- } else if (notification_type == kManagedUserNotificationType) {
- *model_type = MANAGED_USERS;
- return true;
- } else if (notification_type == kArticleNotificationType) {
- *model_type = ARTICLES;
- return true;
- }
- *model_type = UNSPECIFIED;
- return false;
-}
-
-bool IsRealDataType(ModelType model_type) {
- return model_type >= FIRST_REAL_MODEL_TYPE && model_type < MODEL_TYPE_COUNT;
-}
-
-bool IsActOnceDataType(ModelType model_type) {
- return model_type == HISTORY_DELETE_DIRECTIVES;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/syncable/model_type_unittest.cc b/chromium/sync/syncable/model_type_unittest.cc
deleted file mode 100644
index 737aea3f312..00000000000
--- a/chromium/sync/syncable/model_type_unittest.cc
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/base/model_type.h"
-
-#include <string>
-
-#include "base/memory/scoped_ptr.h"
-#include "base/test/values_test_util.h"
-#include "base/values.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace {
-
-class ModelTypeTest : public testing::Test {};
-
-TEST_F(ModelTypeTest, ModelTypeToValue) {
- for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
- ModelType model_type = ModelTypeFromInt(i);
- base::ExpectStringValue(ModelTypeToString(model_type),
- ModelTypeToValue(model_type));
- }
- base::ExpectStringValue("Top-level folder",
- ModelTypeToValue(TOP_LEVEL_FOLDER));
- base::ExpectStringValue("Unspecified",
- ModelTypeToValue(UNSPECIFIED));
-}
-
-TEST_F(ModelTypeTest, ModelTypeFromValue) {
- for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
- ModelType model_type = ModelTypeFromInt(i);
- scoped_ptr<StringValue> value(ModelTypeToValue(model_type));
- EXPECT_EQ(model_type, ModelTypeFromValue(*value));
- }
-}
-
-TEST_F(ModelTypeTest, ModelTypeSetToValue) {
- const ModelTypeSet model_types(BOOKMARKS, APPS);
-
- scoped_ptr<base::ListValue> value(ModelTypeSetToValue(model_types));
- EXPECT_EQ(2u, value->GetSize());
- std::string types[2];
- EXPECT_TRUE(value->GetString(0, &types[0]));
- EXPECT_TRUE(value->GetString(1, &types[1]));
- EXPECT_EQ("Bookmarks", types[0]);
- EXPECT_EQ("Apps", types[1]);
-}
-
-TEST_F(ModelTypeTest, ModelTypeSetFromValue) {
- // Try empty set first.
- ModelTypeSet model_types;
- scoped_ptr<base::ListValue> value(ModelTypeSetToValue(model_types));
- EXPECT_TRUE(model_types.Equals(ModelTypeSetFromValue(*value)));
-
- // Now try with a few random types.
- model_types.Put(BOOKMARKS);
- model_types.Put(APPS);
- value.reset(ModelTypeSetToValue(model_types));
- EXPECT_TRUE(model_types.Equals(ModelTypeSetFromValue(*value)));
-}
-
-TEST_F(ModelTypeTest, IsRealDataType) {
- EXPECT_FALSE(IsRealDataType(UNSPECIFIED));
- EXPECT_FALSE(IsRealDataType(MODEL_TYPE_COUNT));
- EXPECT_FALSE(IsRealDataType(TOP_LEVEL_FOLDER));
- EXPECT_TRUE(IsRealDataType(FIRST_REAL_MODEL_TYPE));
- EXPECT_TRUE(IsRealDataType(BOOKMARKS));
- EXPECT_TRUE(IsRealDataType(APPS));
-}
-
-// Make sure we can convert ModelTypes to and from specifics field
-// numbers.
-TEST_F(ModelTypeTest, ModelTypeToFromSpecificsFieldNumber) {
- ModelTypeSet protocol_types = ProtocolTypes();
- for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
- iter.Inc()) {
- int field_number = GetSpecificsFieldNumberFromModelType(iter.Get());
- EXPECT_EQ(iter.Get(),
- GetModelTypeFromSpecificsFieldNumber(field_number));
- }
-}
-
-TEST_F(ModelTypeTest, ModelTypeOfInvalidSpecificsFieldNumber) {
- EXPECT_EQ(UNSPECIFIED, GetModelTypeFromSpecificsFieldNumber(0));
-}
-
-TEST_F(ModelTypeTest, ModelTypeHistogramMapping) {
- std::set<int> histogram_values;
- ModelTypeSet all_types = ModelTypeSet::All();
- for (ModelTypeSet::Iterator it = all_types.First(); it.Good(); it.Inc()) {
- SCOPED_TRACE(ModelTypeToString(it.Get()));
- int histogram_value = ModelTypeToHistogramInt(it.Get());
-
- EXPECT_TRUE(histogram_values.insert(histogram_value).second)
- << "Expected histogram values to be unique";
-
- // This is not necessary for the mapping to be valid, but most instances of
- // UMA_HISTOGRAM that use this mapping specify MODEL_TYPE_COUNT as the
- // maximum possible value. If you break this assumption, you should update
- // those histograms.
- EXPECT_LT(histogram_value, MODEL_TYPE_COUNT);
- }
-}
-
-} // namespace
-} // namespace syncer
diff --git a/chromium/sync/syncable/mutable_entry.cc b/chromium/sync/syncable/mutable_entry.cc
deleted file mode 100644
index 863e65b8b32..00000000000
--- a/chromium/sync/syncable/mutable_entry.cc
+++ /dev/null
@@ -1,247 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/mutable_entry.h"
-
-#include "base/memory/scoped_ptr.h"
-#include "sync/internal_api/public/base/unique_position.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/scoped_kernel_lock.h"
-#include "sync/syncable/scoped_parent_child_index_updater.h"
-#include "sync/syncable/syncable-inl.h"
-#include "sync/syncable/syncable_changes_version.h"
-#include "sync/syncable/syncable_util.h"
-#include "sync/syncable/syncable_write_transaction.h"
-
-using std::string;
-
-namespace syncer {
-namespace syncable {
-
-void MutableEntry::Init(WriteTransaction* trans,
- ModelType model_type,
- const Id& parent_id,
- const string& name) {
- scoped_ptr<EntryKernel> kernel(new EntryKernel);
- kernel_ = NULL;
-
- kernel->put(ID, trans->directory_->NextId());
- kernel->put(META_HANDLE, trans->directory_->NextMetahandle());
- kernel->mark_dirty(&trans->directory_->kernel_->dirty_metahandles);
- kernel->put(PARENT_ID, parent_id);
- kernel->put(NON_UNIQUE_NAME, name);
- const base::Time& now = base::Time::Now();
- kernel->put(CTIME, now);
- kernel->put(MTIME, now);
- // We match the database defaults here
- kernel->put(BASE_VERSION, CHANGES_VERSION);
-
- // Normally the SPECIFICS setting code is wrapped in logic to deal with
- // unknown fields and encryption. Since all we want to do here is ensure that
- // GetModelType() returns a correct value from the very beginning, these
- // few lines are sufficient.
- sync_pb::EntitySpecifics specifics;
- AddDefaultFieldValue(model_type, &specifics);
- kernel->put(SPECIFICS, specifics);
-
- // Because this entry is new, it was originally deleted.
- kernel->put(IS_DEL, true);
- trans->TrackChangesTo(kernel.get());
- kernel->put(IS_DEL, false);
-
- // Now swap the pointers.
- kernel_ = kernel.release();
-}
-
-MutableEntry::MutableEntry(WriteTransaction* trans,
- Create,
- ModelType model_type,
- const Id& parent_id,
- const string& name)
- : ModelNeutralMutableEntry(trans), write_transaction_(trans) {
- Init(trans, model_type, parent_id, name);
- // We need to have a valid position ready before we can index the item.
- if (model_type == BOOKMARKS) {
- // Base the tag off of our cache-guid and local "c-" style ID.
- std::string unique_tag = syncable::GenerateSyncableBookmarkHash(
- trans->directory()->cache_guid(), GetId().GetServerId());
- kernel_->put(UNIQUE_BOOKMARK_TAG, unique_tag);
- kernel_->put(UNIQUE_POSITION, UniquePosition::InitialPosition(unique_tag));
- } else {
- DCHECK(!ShouldMaintainPosition());
- }
-
- bool result = trans->directory()->InsertEntry(trans, kernel_);
- DCHECK(result);
-}
-
-MutableEntry::MutableEntry(WriteTransaction* trans, CreateNewUpdateItem,
- const Id& id)
- : ModelNeutralMutableEntry(trans, CREATE_NEW_UPDATE_ITEM, id),
- write_transaction_(trans) {}
-
-MutableEntry::MutableEntry(WriteTransaction* trans, GetById, const Id& id)
- : ModelNeutralMutableEntry(trans, GET_BY_ID, id),
- write_transaction_(trans) {
-}
-
-MutableEntry::MutableEntry(WriteTransaction* trans, GetByHandle,
- int64 metahandle)
- : ModelNeutralMutableEntry(trans, GET_BY_HANDLE, metahandle),
- write_transaction_(trans) {
-}
-
-MutableEntry::MutableEntry(WriteTransaction* trans, GetByClientTag,
- const std::string& tag)
- : ModelNeutralMutableEntry(trans, GET_BY_CLIENT_TAG, tag),
- write_transaction_(trans) {
-}
-
-MutableEntry::MutableEntry(WriteTransaction* trans, GetByServerTag,
- const string& tag)
- : ModelNeutralMutableEntry(trans, GET_BY_SERVER_TAG, tag),
- write_transaction_(trans) {
-}
-
-void MutableEntry::PutLocalExternalId(int64 value) {
- DCHECK(kernel_);
- write_transaction()->TrackChangesTo(kernel_);
- if (kernel_->ref(LOCAL_EXTERNAL_ID) != value) {
- ScopedKernelLock lock(dir());
- kernel_->put(LOCAL_EXTERNAL_ID, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
-void MutableEntry::PutMtime(base::Time value) {
- DCHECK(kernel_);
- write_transaction()->TrackChangesTo(kernel_);
- if (kernel_->ref(MTIME) != value) {
- kernel_->put(MTIME, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
-void MutableEntry::PutCtime(base::Time value) {
- DCHECK(kernel_);
- write_transaction()->TrackChangesTo(kernel_);
- if (kernel_->ref(CTIME) != value) {
- kernel_->put(CTIME, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
-void MutableEntry::PutParentId(const Id& value) {
- DCHECK(kernel_);
- write_transaction()->TrackChangesTo(kernel_);
- if (kernel_->ref(PARENT_ID) != value) {
- PutParentIdPropertyOnly(value);
- if (!GetIsDel()) {
- if (!PutPredecessor(Id())) {
- // TODO(lipalani) : Propagate the error to caller. crbug.com/100444.
- NOTREACHED();
- }
- }
- }
-}
-
-void MutableEntry::PutIsDir(bool value) {
- DCHECK(kernel_);
- write_transaction()->TrackChangesTo(kernel_);
- bool old_value = kernel_->ref(IS_DIR);
- if (old_value != value) {
- kernel_->put(IS_DIR, value);
- kernel_->mark_dirty(GetDirtyIndexHelper());
- }
-}
-
-void MutableEntry::PutIsDel(bool value) {
- DCHECK(kernel_);
- write_transaction()->TrackChangesTo(kernel_);
- if (value == kernel_->ref(IS_DEL)) {
- return;
- }
- if (value) {
- // If the server never knew about this item and it's deleted then we don't
- // need to keep it around. Unsetting IS_UNSYNCED will:
- // - Ensure that the item is never committed to the server.
- // - Allow any items with the same UNIQUE_CLIENT_TAG created on other
- // clients to override this entry.
- // - Let us delete this entry permanently through
- // DirectoryBackingStore::DropDeletedEntries() when we next restart sync.
- // This will save memory and avoid crbug.com/125381.
- if (!GetId().ServerKnows()) {
- PutIsUnsynced(false);
- }
- }
-
- {
- ScopedKernelLock lock(dir());
- // Some indices don't include deleted items and must be updated
- // upon a value change.
- ScopedParentChildIndexUpdater updater(lock, kernel_,
- &dir()->kernel_->parent_child_index);
-
- kernel_->put(IS_DEL, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
-void MutableEntry::PutNonUniqueName(const std::string& value) {
- DCHECK(kernel_);
- write_transaction()->TrackChangesTo(kernel_);
-
- if (kernel_->ref(NON_UNIQUE_NAME) != value) {
- kernel_->put(NON_UNIQUE_NAME, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
-void MutableEntry::PutSpecifics(const sync_pb::EntitySpecifics& value) {
- DCHECK(kernel_);
- CHECK(!value.password().has_client_only_encrypted_data());
- write_transaction()->TrackChangesTo(kernel_);
- // TODO(ncarter): This is unfortunately heavyweight. Can we do
- // better?
- if (kernel_->ref(SPECIFICS).SerializeAsString() !=
- value.SerializeAsString()) {
- kernel_->put(SPECIFICS, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
-void MutableEntry::PutUniquePosition(const UniquePosition& value) {
- DCHECK(kernel_);
- write_transaction()->TrackChangesTo(kernel_);
- if(!kernel_->ref(UNIQUE_POSITION).Equals(value)) {
- // We should never overwrite a valid position with an invalid one.
- DCHECK(value.IsValid());
- ScopedKernelLock lock(dir());
- ScopedParentChildIndexUpdater updater(
- lock, kernel_, &dir()->kernel_->parent_child_index);
- kernel_->put(UNIQUE_POSITION, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
- }
-}
-
-bool MutableEntry::PutPredecessor(const Id& predecessor_id) {
- MutableEntry predecessor(write_transaction(), GET_BY_ID, predecessor_id);
- if (!predecessor.good())
- return false;
- dir()->PutPredecessor(kernel_, predecessor.kernel_);
- return true;
-}
-
-// This function sets only the flags needed to get this entry to sync.
-bool MarkForSyncing(MutableEntry* e) {
- DCHECK_NE(static_cast<MutableEntry*>(NULL), e);
- DCHECK(!e->IsRoot()) << "We shouldn't mark a permanent object for syncing.";
- if (!(e->PutIsUnsynced(true)))
- return false;
- e->PutSyncing(false);
- return true;
-}
-
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/mutable_entry.h b/chromium/sync/syncable/mutable_entry.h
deleted file mode 100644
index 8c2f2ab5492..00000000000
--- a/chromium/sync/syncable/mutable_entry.h
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_MUTABLE_ENTRY_H_
-#define SYNC_SYNCABLE_MUTABLE_ENTRY_H_
-
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/metahandle_set.h"
-#include "sync/syncable/model_neutral_mutable_entry.h"
-
-namespace syncer {
-class WriteNode;
-
-namespace syncable {
-
-enum Create {
- CREATE
-};
-
-class WriteTransaction;
-
-// A mutable meta entry. Changes get committed to the database when the
-// WriteTransaction is destroyed.
-class SYNC_EXPORT_PRIVATE MutableEntry : public ModelNeutralMutableEntry {
- void Init(WriteTransaction* trans, ModelType model_type,
- const Id& parent_id, const std::string& name);
-
- public:
- MutableEntry(WriteTransaction* trans, CreateNewUpdateItem, const Id& id);
- MutableEntry(WriteTransaction* trans, Create, ModelType model_type,
- const Id& parent_id, const std::string& name);
- MutableEntry(WriteTransaction* trans, GetByHandle, int64);
- MutableEntry(WriteTransaction* trans, GetById, const Id&);
- MutableEntry(WriteTransaction* trans, GetByClientTag, const std::string& tag);
- MutableEntry(WriteTransaction* trans, GetByServerTag, const std::string& tag);
-
- inline WriteTransaction* write_transaction() const {
- return write_transaction_;
- }
-
- // Model-changing setters. These setters make user-visible changes that will
- // need to be communicated either to the local model or the sync server.
- void PutLocalExternalId(int64 value);
- void PutMtime(base::Time value);
- void PutCtime(base::Time value);
- void PutParentId(const Id& value);
- void PutIsDir(bool value);
- void PutIsDel(bool value);
- void PutNonUniqueName(const std::string& value);
- void PutSpecifics(const sync_pb::EntitySpecifics& value);
- void PutUniquePosition(const UniquePosition& value);
-
- // Sets the position of this item, and updates the entry kernels of the
- // adjacent siblings so that list invariants are maintained. Returns false
- // and fails if |predecessor_id| does not identify a sibling. Pass the root
- // ID to put the node in first position.
- bool PutPredecessor(const Id& predecessor_id);
-
- private:
- // Kind of redundant. We should reduce the number of pointers
- // floating around if at all possible. Could we store this in Directory?
- // Scope: Set on construction, never changed after that.
- WriteTransaction* const write_transaction_;
-
- DISALLOW_COPY_AND_ASSIGN(MutableEntry);
-};
-
-// This function sets only the flags needed to get this entry to sync.
-bool MarkForSyncing(syncable::MutableEntry* e);
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_MUTABLE_ENTRY_H_
diff --git a/chromium/sync/syncable/nigori_handler.cc b/chromium/sync/syncable/nigori_handler.cc
deleted file mode 100644
index e193c893dde..00000000000
--- a/chromium/sync/syncable/nigori_handler.cc
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/nigori_handler.h"
-
-namespace syncer {
-namespace syncable {
-
-NigoriHandler::NigoriHandler() {}
-NigoriHandler::~NigoriHandler() {}
-
-} // namespace syncer
-} // namespace syncable
diff --git a/chromium/sync/syncable/nigori_handler.h b/chromium/sync/syncable/nigori_handler.h
deleted file mode 100644
index d39ddf92a50..00000000000
--- a/chromium/sync/syncable/nigori_handler.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_NIGORI_HANDLER_H_
-#define SYNC_SYNCABLE_NIGORI_HANDLER_H_
-
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-
-namespace google{
-namespace protobuf{
-template <typename T>
-class RepeatedPtrField;
-}
-}
-
-namespace sync_pb {
-class NigoriSpecifics;
-}
-
-namespace syncer {
-namespace syncable {
-
-class BaseTransaction;
-
-// Sync internal interface for dealing with nigori node and querying
-// the current set of encrypted types. Not thread safe, so a sync transaction
-// must be held by a caller whenever invoking methods.
-class SYNC_EXPORT_PRIVATE NigoriHandler {
- public:
- NigoriHandler();
- virtual ~NigoriHandler();
-
- // Apply a nigori node update, updating the internal encryption state
- // accordingly.
- virtual void ApplyNigoriUpdate(
- const sync_pb::NigoriSpecifics& nigori,
- syncable::BaseTransaction* const trans) = 0;
-
- // Store the current encrypt everything/encrypted types state into |nigori|.
- virtual void UpdateNigoriFromEncryptedTypes(
- sync_pb::NigoriSpecifics* nigori,
- syncable::BaseTransaction* const trans) const = 0;
-
- // Whether a keystore key needs to be requested from the sync server.
- virtual bool NeedKeystoreKey(
- syncable::BaseTransaction* const trans) const = 0;
-
- // Set the keystore keys the server returned for this account.
- // Returns true on success, false otherwise.
- virtual bool SetKeystoreKeys(
- const google::protobuf::RepeatedPtrField<std::string>& keys,
- syncable::BaseTransaction* const trans) = 0;
-
- // Returns the set of currently encrypted types.
- virtual ModelTypeSet GetEncryptedTypes(
- syncable::BaseTransaction* const trans) const = 0;
-};
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_NIGORI_HANDLER_H_
diff --git a/chromium/sync/syncable/nigori_util.cc b/chromium/sync/syncable/nigori_util.cc
deleted file mode 100644
index 107a68f233b..00000000000
--- a/chromium/sync/syncable/nigori_util.cc
+++ /dev/null
@@ -1,322 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/nigori_util.h"
-
-#include <queue>
-#include <string>
-#include <vector>
-
-#include "base/json/json_writer.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/nigori_handler.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/syncable_util.h"
-#include "sync/syncable/syncable_write_transaction.h"
-#include "sync/util/cryptographer.h"
-
-namespace syncer {
-namespace syncable {
-
-bool ProcessUnsyncedChangesForEncryption(
- WriteTransaction* const trans) {
- NigoriHandler* nigori_handler = trans->directory()->GetNigoriHandler();
- ModelTypeSet encrypted_types = nigori_handler->GetEncryptedTypes(trans);
- Cryptographer* cryptographer = trans->directory()->GetCryptographer(trans);
- DCHECK(cryptographer->is_ready());
-
- // Get list of all datatypes with unsynced changes. It's possible that our
- // local changes need to be encrypted if encryption for that datatype was
- // just turned on (and vice versa).
- // Note: we do not attempt to re-encrypt data with a new key here as key
- // changes in this code path are likely due to consistency issues (we have
- // to be updated to a key we already have, e.g. an old key).
- std::vector<int64> handles;
- GetUnsyncedEntries(trans, &handles);
- for (size_t i = 0; i < handles.size(); ++i) {
- MutableEntry entry(trans, GET_BY_HANDLE, handles[i]);
- const sync_pb::EntitySpecifics& specifics = entry.GetSpecifics();
- // Ignore types that don't need encryption or entries that are already
- // encrypted.
- if (!SpecificsNeedsEncryption(encrypted_types, specifics))
- continue;
- if (!UpdateEntryWithEncryption(trans, specifics, &entry))
- return false;
- }
- return true;
-}
-
-bool VerifyUnsyncedChangesAreEncrypted(
- BaseTransaction* const trans,
- ModelTypeSet encrypted_types) {
- std::vector<int64> handles;
- GetUnsyncedEntries(trans, &handles);
- for (size_t i = 0; i < handles.size(); ++i) {
- Entry entry(trans, GET_BY_HANDLE, handles[i]);
- if (!entry.good()) {
- NOTREACHED();
- return false;
- }
- if (EntryNeedsEncryption(encrypted_types, entry))
- return false;
- }
- return true;
-}
-
-bool EntryNeedsEncryption(ModelTypeSet encrypted_types,
- const Entry& entry) {
- if (!entry.GetUniqueServerTag().empty())
- return false; // We don't encrypt unique server nodes.
- ModelType type = entry.GetModelType();
- if (type == PASSWORDS || IsControlType(type))
- return false;
- // Checking NON_UNIQUE_NAME is not necessary for the correctness of encrypting
- // the data, nor for determining if data is encrypted. We simply ensure it has
- // been overwritten to avoid any possible leaks of sensitive data.
- return SpecificsNeedsEncryption(encrypted_types, entry.GetSpecifics()) ||
- (encrypted_types.Has(type) &&
- entry.GetNonUniqueName() != kEncryptedString);
-}
-
-bool SpecificsNeedsEncryption(ModelTypeSet encrypted_types,
- const sync_pb::EntitySpecifics& specifics) {
- const ModelType type = GetModelTypeFromSpecifics(specifics);
- if (type == PASSWORDS || IsControlType(type))
- return false; // These types have their own encryption schemes.
- if (!encrypted_types.Has(type))
- return false; // This type does not require encryption
- return !specifics.has_encrypted();
-}
-
-// Mainly for testing.
-bool VerifyDataTypeEncryptionForTest(
- BaseTransaction* const trans,
- ModelType type,
- bool is_encrypted) {
- Cryptographer* cryptographer = trans->directory()->GetCryptographer(trans);
- if (type == PASSWORDS || IsControlType(type)) {
- NOTREACHED();
- return true;
- }
- std::string type_tag = ModelTypeToRootTag(type);
- Entry type_root(trans, GET_BY_SERVER_TAG, type_tag);
- if (!type_root.good()) {
- NOTREACHED();
- return false;
- }
-
- std::queue<Id> to_visit;
- Id id_string = type_root.GetFirstChildId();
- to_visit.push(id_string);
- while (!to_visit.empty()) {
- id_string = to_visit.front();
- to_visit.pop();
- if (id_string.IsRoot())
- continue;
-
- Entry child(trans, GET_BY_ID, id_string);
- if (!child.good()) {
- NOTREACHED();
- return false;
- }
- if (child.GetIsDir()) {
- Id child_id_string = child.GetFirstChildId();
- // Traverse the children.
- to_visit.push(child_id_string);
- }
- const sync_pb::EntitySpecifics& specifics = child.GetSpecifics();
- DCHECK_EQ(type, child.GetModelType());
- DCHECK_EQ(type, GetModelTypeFromSpecifics(specifics));
- // We don't encrypt the server's permanent items.
- if (child.GetUniqueServerTag().empty()) {
- if (specifics.has_encrypted() != is_encrypted)
- return false;
- if (specifics.has_encrypted()) {
- if (child.GetNonUniqueName() != kEncryptedString)
- return false;
- if (!cryptographer->CanDecryptUsingDefaultKey(specifics.encrypted()))
- return false;
- }
- }
- // Push the successor.
- to_visit.push(child.GetSuccessorId());
- }
- return true;
-}
-
-bool UpdateEntryWithEncryption(
- BaseTransaction* const trans,
- const sync_pb::EntitySpecifics& new_specifics,
- syncable::MutableEntry* entry) {
- NigoriHandler* nigori_handler = trans->directory()->GetNigoriHandler();
- Cryptographer* cryptographer = trans->directory()->GetCryptographer(trans);
- ModelType type = GetModelTypeFromSpecifics(new_specifics);
- DCHECK_GE(type, FIRST_REAL_MODEL_TYPE);
- const sync_pb::EntitySpecifics& old_specifics = entry->GetSpecifics();
- const ModelTypeSet encrypted_types = nigori_handler->GetEncryptedTypes(trans);
- // It's possible the nigori lost the set of encrypted types. If the current
- // specifics are already encrypted, we want to ensure we continue encrypting.
- bool was_encrypted = old_specifics.has_encrypted();
- sync_pb::EntitySpecifics generated_specifics;
- if (new_specifics.has_encrypted()) {
- NOTREACHED() << "New specifics already has an encrypted blob.";
- return false;
- }
- if ((!SpecificsNeedsEncryption(encrypted_types, new_specifics) &&
- !was_encrypted) ||
- !cryptographer->is_initialized()) {
- // No encryption required or we are unable to encrypt.
- generated_specifics.CopyFrom(new_specifics);
- } else {
- // Encrypt new_specifics into generated_specifics.
- if (VLOG_IS_ON(2)) {
- scoped_ptr<base::DictionaryValue> value(entry->ToValue(NULL));
- std::string info;
- base::JSONWriter::WriteWithOptions(value.get(),
- base::JSONWriter::OPTIONS_PRETTY_PRINT,
- &info);
- DVLOG(2) << "Encrypting specifics of type "
- << ModelTypeToString(type)
- << " with content: "
- << info;
- }
- // Only copy over the old specifics if it is of the right type and already
- // encrypted. The first time we encrypt a node we start from scratch, hence
- // removing all the unencrypted data, but from then on we only want to
- // update the node if the data changes or the encryption key changes.
- if (GetModelTypeFromSpecifics(old_specifics) == type &&
- was_encrypted) {
- generated_specifics.CopyFrom(old_specifics);
- } else {
- AddDefaultFieldValue(type, &generated_specifics);
- }
- // Does not change anything if underlying encrypted blob was already up
- // to date and encrypted with the default key.
- if (!cryptographer->Encrypt(new_specifics,
- generated_specifics.mutable_encrypted())) {
- NOTREACHED() << "Could not encrypt data for node of type "
- << ModelTypeToString(type);
- return false;
- }
- }
-
- // It's possible this entry was encrypted but didn't properly overwrite the
- // non_unique_name (see crbug.com/96314).
- bool encrypted_without_overwriting_name = (was_encrypted &&
- entry->GetNonUniqueName() != kEncryptedString);
-
- // If we're encrypted but the name wasn't overwritten properly we still want
- // to rewrite the entry, irrespective of whether the specifics match.
- if (!encrypted_without_overwriting_name &&
- old_specifics.SerializeAsString() ==
- generated_specifics.SerializeAsString()) {
- DVLOG(2) << "Specifics of type " << ModelTypeToString(type)
- << " already match, dropping change.";
- return true;
- }
-
- if (generated_specifics.has_encrypted()) {
- // Overwrite the possibly sensitive non-specifics data.
- entry->PutNonUniqueName(kEncryptedString);
- // For bookmarks we actually put bogus data into the unencrypted specifics,
- // else the server will try to do it for us.
- if (type == BOOKMARKS) {
- sync_pb::BookmarkSpecifics* bookmark_specifics =
- generated_specifics.mutable_bookmark();
- if (!entry->GetIsDir())
- bookmark_specifics->set_url(kEncryptedString);
- bookmark_specifics->set_title(kEncryptedString);
- }
- }
- entry->PutSpecifics(generated_specifics);
- DVLOG(1) << "Overwriting specifics of type "
- << ModelTypeToString(type)
- << " and marking for syncing.";
- syncable::MarkForSyncing(entry);
- return true;
-}
-
-void UpdateNigoriFromEncryptedTypes(ModelTypeSet encrypted_types,
- bool encrypt_everything,
- sync_pb::NigoriSpecifics* nigori) {
- nigori->set_encrypt_everything(encrypt_everything);
- COMPILE_ASSERT(30 == MODEL_TYPE_COUNT, UpdateEncryptedTypes);
- nigori->set_encrypt_bookmarks(
- encrypted_types.Has(BOOKMARKS));
- nigori->set_encrypt_preferences(
- encrypted_types.Has(PREFERENCES));
- nigori->set_encrypt_autofill_profile(
- encrypted_types.Has(AUTOFILL_PROFILE));
- nigori->set_encrypt_autofill(encrypted_types.Has(AUTOFILL));
- nigori->set_encrypt_themes(encrypted_types.Has(THEMES));
- nigori->set_encrypt_typed_urls(
- encrypted_types.Has(TYPED_URLS));
- nigori->set_encrypt_extension_settings(
- encrypted_types.Has(EXTENSION_SETTINGS));
- nigori->set_encrypt_extensions(
- encrypted_types.Has(EXTENSIONS));
- nigori->set_encrypt_search_engines(
- encrypted_types.Has(SEARCH_ENGINES));
- nigori->set_encrypt_sessions(encrypted_types.Has(SESSIONS));
- nigori->set_encrypt_app_settings(
- encrypted_types.Has(APP_SETTINGS));
- nigori->set_encrypt_apps(encrypted_types.Has(APPS));
- nigori->set_encrypt_app_notifications(
- encrypted_types.Has(APP_NOTIFICATIONS));
- nigori->set_encrypt_dictionary(encrypted_types.Has(DICTIONARY));
- nigori->set_encrypt_favicon_images(encrypted_types.Has(FAVICON_IMAGES));
- nigori->set_encrypt_favicon_tracking(encrypted_types.Has(FAVICON_TRACKING));
- nigori->set_encrypt_articles(encrypted_types.Has(ARTICLES));
- nigori->set_encrypt_app_list(encrypted_types.Has(APP_LIST));
-}
-
-ModelTypeSet GetEncryptedTypesFromNigori(
- const sync_pb::NigoriSpecifics& nigori) {
- if (nigori.encrypt_everything())
- return ModelTypeSet::All();
-
- ModelTypeSet encrypted_types;
- COMPILE_ASSERT(30 == MODEL_TYPE_COUNT, UpdateEncryptedTypes);
- if (nigori.encrypt_bookmarks())
- encrypted_types.Put(BOOKMARKS);
- if (nigori.encrypt_preferences())
- encrypted_types.Put(PREFERENCES);
- if (nigori.encrypt_autofill_profile())
- encrypted_types.Put(AUTOFILL_PROFILE);
- if (nigori.encrypt_autofill())
- encrypted_types.Put(AUTOFILL);
- if (nigori.encrypt_themes())
- encrypted_types.Put(THEMES);
- if (nigori.encrypt_typed_urls())
- encrypted_types.Put(TYPED_URLS);
- if (nigori.encrypt_extension_settings())
- encrypted_types.Put(EXTENSION_SETTINGS);
- if (nigori.encrypt_extensions())
- encrypted_types.Put(EXTENSIONS);
- if (nigori.encrypt_search_engines())
- encrypted_types.Put(SEARCH_ENGINES);
- if (nigori.encrypt_sessions())
- encrypted_types.Put(SESSIONS);
- if (nigori.encrypt_app_settings())
- encrypted_types.Put(APP_SETTINGS);
- if (nigori.encrypt_apps())
- encrypted_types.Put(APPS);
- if (nigori.encrypt_app_notifications())
- encrypted_types.Put(APP_NOTIFICATIONS);
- if (nigori.encrypt_dictionary())
- encrypted_types.Put(DICTIONARY);
- if (nigori.encrypt_favicon_images())
- encrypted_types.Put(FAVICON_IMAGES);
- if (nigori.encrypt_favicon_tracking())
- encrypted_types.Put(FAVICON_TRACKING);
- if (nigori.encrypt_articles())
- encrypted_types.Put(ARTICLES);
- if (nigori.encrypt_app_list())
- encrypted_types.Put(APP_LIST);
- return encrypted_types;
-}
-
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/nigori_util.h b/chromium/sync/syncable/nigori_util.h
deleted file mode 100644
index 7f57a722b27..00000000000
--- a/chromium/sync/syncable/nigori_util.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Various utility methods for nigori-based multi-type encryption.
-
-#ifndef SYNC_SYNCABLE_NIGORI_UTIL_H_
-#define SYNC_SYNCABLE_NIGORI_UTIL_H_
-
-#include "base/compiler_specific.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/protocol/nigori_specifics.pb.h"
-
-namespace sync_pb {
-class EntitySpecifics;
-}
-
-namespace syncer {
-class Cryptographer;
-
-namespace syncable {
-
-const char kEncryptedString[] = "encrypted";
-
-class BaseTransaction;
-class Entry;
-class MutableEntry;
-class WriteTransaction;
-
-// Check if our unsyced changes are encrypted if they need to be based on
-// |encrypted_types|.
-// Returns: true if all unsynced data that should be encrypted is.
-// false if some unsynced changes need to be encrypted.
-// This method is similar to ProcessUnsyncedChangesForEncryption but does not
-// modify the data and does not care if data is unnecessarily encrypted.
-SYNC_EXPORT_PRIVATE bool VerifyUnsyncedChangesAreEncrypted(
- BaseTransaction* const trans,
- ModelTypeSet encrypted_types);
-
-// Processes all unsynced changes and ensures they are appropriately encrypted
-// or unencrypted, based on |encrypted_types|.
-bool ProcessUnsyncedChangesForEncryption(
- WriteTransaction* const trans);
-
-// Returns true if the entry requires encryption but is not encrypted, false
-// otherwise. Note: this does not check that already encrypted entries are
-// encrypted with the proper key.
-bool EntryNeedsEncryption(ModelTypeSet encrypted_types,
- const Entry& entry);
-
-// Same as EntryNeedsEncryption, but looks at specifics.
-SYNC_EXPORT_PRIVATE bool SpecificsNeedsEncryption(
- ModelTypeSet encrypted_types,
- const sync_pb::EntitySpecifics& specifics);
-
-// Verifies all data of type |type| is encrypted appropriately.
-SYNC_EXPORT_PRIVATE bool VerifyDataTypeEncryptionForTest(
- BaseTransaction* const trans,
- ModelType type,
- bool is_encrypted) WARN_UNUSED_RESULT;
-
-// Stores |new_specifics| into |entry|, encrypting if necessary.
-// Returns false if an error encrypting occurred (does not modify |entry|).
-// Note: gracefully handles new_specifics aliasing with entry->GetSpecifics().
-bool UpdateEntryWithEncryption(
- BaseTransaction* const trans,
- const sync_pb::EntitySpecifics& new_specifics,
- MutableEntry* entry);
-
-// Updates |nigori| to match the encryption state specified by |encrypted_types|
-// and |encrypt_everything|.
-SYNC_EXPORT_PRIVATE void UpdateNigoriFromEncryptedTypes(
- ModelTypeSet encrypted_types,
- bool encrypt_everything,
- sync_pb::NigoriSpecifics* nigori);
-
-// Extracts the set of encrypted types from a nigori node.
-ModelTypeSet GetEncryptedTypesFromNigori(
- const sync_pb::NigoriSpecifics& nigori);
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_NIGORI_UTIL_H_
diff --git a/chromium/sync/syncable/nigori_util_unittest.cc b/chromium/sync/syncable/nigori_util_unittest.cc
deleted file mode 100644
index 52e39a0f92b..00000000000
--- a/chromium/sync/syncable/nigori_util_unittest.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/syncable/nigori_util.h"
-#include "sync/util/cryptographer.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace syncable {
-
-typedef testing::Test NigoriUtilTest;
-
-TEST(NigoriUtilTest, SpecificsNeedsEncryption) {
- ModelTypeSet encrypted_types;
- encrypted_types.Put(BOOKMARKS);
- encrypted_types.Put(PASSWORDS);
-
- sync_pb::EntitySpecifics specifics;
- EXPECT_FALSE(SpecificsNeedsEncryption(ModelTypeSet(), specifics));
- EXPECT_FALSE(SpecificsNeedsEncryption(encrypted_types, specifics));
-
- AddDefaultFieldValue(PREFERENCES, &specifics);
- EXPECT_FALSE(SpecificsNeedsEncryption(encrypted_types, specifics));
-
- sync_pb::EntitySpecifics bookmark_specifics;
- AddDefaultFieldValue(BOOKMARKS, &bookmark_specifics);
- EXPECT_TRUE(SpecificsNeedsEncryption(encrypted_types, bookmark_specifics));
-
- bookmark_specifics.mutable_bookmark()->set_title("title");
- bookmark_specifics.mutable_bookmark()->set_url("url");
- EXPECT_TRUE(SpecificsNeedsEncryption(encrypted_types, bookmark_specifics));
- EXPECT_FALSE(SpecificsNeedsEncryption(ModelTypeSet(), bookmark_specifics));
-
- bookmark_specifics.mutable_encrypted();
- EXPECT_FALSE(SpecificsNeedsEncryption(encrypted_types, bookmark_specifics));
- EXPECT_FALSE(SpecificsNeedsEncryption(ModelTypeSet(), bookmark_specifics));
-
- sync_pb::EntitySpecifics password_specifics;
- AddDefaultFieldValue(PASSWORDS, &password_specifics);
- EXPECT_FALSE(SpecificsNeedsEncryption(encrypted_types, password_specifics));
-}
-
-// ProcessUnsyncedChangesForEncryption and other methods that rely on the syncer
-// are tested in apply_updates_command_unittest.cc
-
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/on_disk_directory_backing_store.cc b/chromium/sync/syncable/on_disk_directory_backing_store.cc
deleted file mode 100644
index 1bebf9aff2f..00000000000
--- a/chromium/sync/syncable/on_disk_directory_backing_store.cc
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/on_disk_directory_backing_store.h"
-
-#include "base/logging.h"
-#include "base/stl_util.h"
-#include "base/metrics/histogram.h"
-#include "sync/syncable/syncable-inl.h"
-
-namespace syncer {
-namespace syncable {
-
-namespace {
-
-enum HistogramResultEnum {
- FIRST_TRY_SUCCESS,
- SECOND_TRY_SUCCESS,
- SECOND_TRY_FAILURE,
- RESULT_COUNT
-};
-
-} // namespace
-
-OnDiskDirectoryBackingStore::OnDiskDirectoryBackingStore(
- const std::string& dir_name, const base::FilePath& backing_filepath)
- : DirectoryBackingStore(dir_name),
- allow_failure_for_test_(false),
- backing_filepath_(backing_filepath) {
- db_->set_exclusive_locking();
- db_->set_page_size(4096);
-}
-
-OnDiskDirectoryBackingStore::~OnDiskDirectoryBackingStore() { }
-
-DirOpenResult OnDiskDirectoryBackingStore::TryLoad(
- Directory::MetahandlesMap* handles_map,
- JournalIndex* delete_journals,
- Directory::KernelLoadInfo* kernel_load_info) {
- DCHECK(CalledOnValidThread());
- if (!db_->is_open()) {
- if (!db_->Open(backing_filepath_))
- return FAILED_OPEN_DATABASE;
- }
-
- if (!InitializeTables())
- return FAILED_OPEN_DATABASE;
-
- if (!DropDeletedEntries())
- return FAILED_DATABASE_CORRUPT;
- if (!LoadEntries(handles_map))
- return FAILED_DATABASE_CORRUPT;
- if (!LoadDeleteJournals(delete_journals))
- return FAILED_DATABASE_CORRUPT;
- if (!LoadInfo(kernel_load_info))
- return FAILED_DATABASE_CORRUPT;
- if (!VerifyReferenceIntegrity(handles_map))
- return FAILED_DATABASE_CORRUPT;
-
- return OPENED;
-
-}
-
-DirOpenResult OnDiskDirectoryBackingStore::Load(
- Directory::MetahandlesMap* handles_map,
- JournalIndex* delete_journals,
- Directory::KernelLoadInfo* kernel_load_info) {
- DirOpenResult result = TryLoad(handles_map, delete_journals,
- kernel_load_info);
- if (result == OPENED) {
- UMA_HISTOGRAM_ENUMERATION(
- "Sync.DirectoryOpenResult", FIRST_TRY_SUCCESS, RESULT_COUNT);
- return OPENED;
- }
-
- ReportFirstTryOpenFailure();
-
- // The fallback: delete the current database and return a fresh one. We can
- // fetch the user's data from the cloud.
- STLDeleteValues(handles_map);
- STLDeleteElements(delete_journals);
- db_.reset(new sql::Connection);
- // TODO: Manually propagating the default database settings is
- // brittle. Either have a helper to set these up (or generate a new
- // connection), or add something like Reset() to sql::Connection.
- db_->set_exclusive_locking();
- db_->set_page_size(4096);
- db_->set_histogram_tag("SyncDirectory");
- base::DeleteFile(backing_filepath_, false);
-
- result = TryLoad(handles_map, delete_journals, kernel_load_info);
- if (result == OPENED) {
- UMA_HISTOGRAM_ENUMERATION(
- "Sync.DirectoryOpenResult", SECOND_TRY_SUCCESS, RESULT_COUNT);
- } else {
- UMA_HISTOGRAM_ENUMERATION(
- "Sync.DirectoryOpenResult", SECOND_TRY_FAILURE, RESULT_COUNT);
- }
-
- return result;
-}
-
-void OnDiskDirectoryBackingStore::ReportFirstTryOpenFailure() {
- // In debug builds, the last thing we want is to silently clear the database.
- // It's full of evidence that might help us determine what went wrong. It
- // might be sqlite's fault, but it could also be a bug in sync. We crash
- // immediately so a developer can investigate.
- //
- // Developers: If you're not interested in debugging this right now, just move
- // aside the 'Sync Data' directory in your profile. This is similar to what
- // the code would do if this DCHECK were disabled.
- NOTREACHED() << "Crashing to preserve corrupt sync database";
-}
-
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/on_disk_directory_backing_store.h b/chromium/sync/syncable/on_disk_directory_backing_store.h
deleted file mode 100644
index 5930bd27ba2..00000000000
--- a/chromium/sync/syncable/on_disk_directory_backing_store.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_ON_DISK_DIRECTORY_BACKING_STORE_H_
-#define SYNC_SYNCABLE_ON_DISK_DIRECTORY_BACKING_STORE_H_
-
-#include "base/files/file_path.h"
-#include "sync/base/sync_export.h"
-#include "sync/syncable/directory_backing_store.h"
-
-namespace syncer {
-namespace syncable {
-
-// This is the concrete class that provides a useful implementation of
-// DirectoryBackingStore.
-class SYNC_EXPORT_PRIVATE OnDiskDirectoryBackingStore
- : public DirectoryBackingStore {
- public:
- OnDiskDirectoryBackingStore(const std::string& dir_name,
- const base::FilePath& backing_filepath);
- virtual ~OnDiskDirectoryBackingStore();
- virtual DirOpenResult Load(
- Directory::MetahandlesMap* handles_map,
- JournalIndex* delete_journals,
- Directory::KernelLoadInfo* kernel_load_info) OVERRIDE;
-
- // A helper function that will make one attempt to load the directory.
- // Unlike Load(), it does not attempt to recover from failure.
- DirOpenResult TryLoad(
- Directory::MetahandlesMap* handles_map,
- JournalIndex* delete_journals,
- Directory::KernelLoadInfo* kernel_load_info);
-
- protected:
- // Subclasses may override this to avoid a possible DCHECK.
- virtual void ReportFirstTryOpenFailure();
-
- private:
- FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MinorCorruption);
-
- bool allow_failure_for_test_;
- base::FilePath backing_filepath_;
-
- DISALLOW_COPY_AND_ASSIGN(OnDiskDirectoryBackingStore);
-};
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_ON_DISK_DIRECTORY_BACKING_STORE_H_
diff --git a/chromium/sync/syncable/parent_child_index.cc b/chromium/sync/syncable/parent_child_index.cc
deleted file mode 100644
index 71fb92e4111..00000000000
--- a/chromium/sync/syncable/parent_child_index.cc
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/parent_child_index.h"
-
-#include "base/stl_util.h"
-
-#include "sync/syncable/entry_kernel.h"
-#include "sync/syncable/syncable_id.h"
-
-namespace syncer {
-namespace syncable {
-
-bool ChildComparator::operator()(
- const syncable::EntryKernel* a,
- const syncable::EntryKernel* b) const {
- const UniquePosition& a_pos = a->ref(UNIQUE_POSITION);
- const UniquePosition& b_pos = b->ref(UNIQUE_POSITION);
-
- if (a_pos.IsValid() && b_pos.IsValid()) {
- // Position is important to this type.
- return a_pos.LessThan(b_pos);
- } else if (a_pos.IsValid() && !b_pos.IsValid()) {
- // TODO(rlarocque): Remove this case.
- // An item with valid position as sibling of one with invalid position.
- // We should not support this, but the tests rely on it. For now, just
- // move all invalid position items to the right.
- return true;
- } else if (!a_pos.IsValid() && b_pos.IsValid()) {
- // TODO(rlarocque): Remove this case.
- // Mirror of the above case.
- return false;
- } else {
- // Position doesn't matter.
- DCHECK(!a->ref(UNIQUE_POSITION).IsValid());
- DCHECK(!b->ref(UNIQUE_POSITION).IsValid());
- return a->ref(ID) < b->ref(ID);
- }
-}
-
-ParentChildIndex::ParentChildIndex() {
-}
-
-ParentChildIndex::~ParentChildIndex() {
- STLDeleteContainerPairSecondPointers(
- parent_children_map_.begin(), parent_children_map_.end());
-}
-
-bool ParentChildIndex::ShouldInclude(const EntryKernel* entry) {
- // This index excludes deleted items and the root item. The root
- // item is excluded so that it doesn't show up as a child of itself.
- return !entry->ref(IS_DEL) && !entry->ref(ID).IsRoot();
-}
-
-bool ParentChildIndex::Insert(EntryKernel* entry) {
- DCHECK(ShouldInclude(entry));
-
- const syncable::Id& parent_id = entry->ref(PARENT_ID);
- OrderedChildSet* children = NULL;
- ParentChildrenMap::iterator i = parent_children_map_.find(parent_id);
- if (i != parent_children_map_.end()) {
- children = i->second;
- } else {
- children = new OrderedChildSet();
- parent_children_map_.insert(std::make_pair(parent_id, children));
- }
-
- return children->insert(entry).second;
-}
-
-// Like the other containers used to help support the syncable::Directory, this
-// one does not own any EntryKernels. This function removes references to the
-// given EntryKernel but does not delete it.
-void ParentChildIndex::Remove(EntryKernel* e) {
- ParentChildrenMap::iterator parent =
- parent_children_map_.find(e->ref(PARENT_ID));
- DCHECK(parent != parent_children_map_.end());
-
- OrderedChildSet* children = parent->second;
- OrderedChildSet::iterator j = children->find(e);
- DCHECK(j != children->end());
-
- children->erase(j);
- if (children->empty()) {
- delete children;
- parent_children_map_.erase(parent);
- }
-}
-
-bool ParentChildIndex::Contains(EntryKernel *e) const {
- const syncable::Id& parent_id = e->ref(PARENT_ID);
- ParentChildrenMap::const_iterator parent =
- parent_children_map_.find(parent_id);
- if (parent == parent_children_map_.end()) {
- return false;
- }
- const OrderedChildSet* children = parent->second;
- DCHECK(children && !children->empty());
- return children->count(e) > 0;
-}
-
-const OrderedChildSet* ParentChildIndex::GetChildren(const syncable::Id& id) {
- ParentChildrenMap::iterator parent = parent_children_map_.find(id);
- if (parent == parent_children_map_.end()) {
- return NULL;
- }
-
- // A successful lookup implies at least some children exist.
- DCHECK(!parent->second->empty());
- return parent->second;
-}
-
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/parent_child_index.h b/chromium/sync/syncable/parent_child_index.h
deleted file mode 100644
index fd0f2e89c83..00000000000
--- a/chromium/sync/syncable/parent_child_index.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_PARENT_CHILD_INDEX
-#define SYNC_SYNCABLE_PARENT_CHILD_INDEX
-
-#include <map>
-#include <set>
-
-#include "base/basictypes.h"
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-namespace syncable {
-
-struct EntryKernel;
-class Id;
-class ParentChildIndex;
-
-// A node ordering function.
-struct SYNC_EXPORT_PRIVATE ChildComparator {
- bool operator() (const EntryKernel* a, const EntryKernel* b) const;
-};
-
-// An ordered set of nodes.
-typedef std::set<EntryKernel*, ChildComparator> OrderedChildSet;
-
-// Container that tracks parent-child relationships.
-// Provides fast lookup of all items under a given parent.
-class SYNC_EXPORT_PRIVATE ParentChildIndex {
- public:
- ParentChildIndex();
- ~ParentChildIndex();
-
- // Returns whether or not this entry belongs in the index.
- // True for all non-deleted, non-root entries.
- static bool ShouldInclude(const EntryKernel* e);
-
- // Inserts a given child into the index.
- bool Insert(EntryKernel* e);
-
- // Removes a given child from the index.
- void Remove(EntryKernel* e);
-
- // Returns true if this item is in the index as a child.
- bool Contains(EntryKernel* e) const;
-
- // Returns all children of the entry with the given Id. Returns NULL if the
- // node has no children or the Id does not identify a valid directory node.
- const OrderedChildSet* GetChildren(const Id& id);
-
- private:
- typedef std::map<syncable::Id, OrderedChildSet*> ParentChildrenMap;
-
- // A map of parent IDs to children.
- // Parents with no children are not included in this map.
- ParentChildrenMap parent_children_map_;
-
- DISALLOW_COPY_AND_ASSIGN(ParentChildIndex);
-};
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_PARENT_CHILD_INDEX
diff --git a/chromium/sync/syncable/parent_child_index_unittest.cc b/chromium/sync/syncable/parent_child_index_unittest.cc
deleted file mode 100644
index 5ae9d27bd87..00000000000
--- a/chromium/sync/syncable/parent_child_index_unittest.cc
+++ /dev/null
@@ -1,344 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/parent_child_index.h"
-
-#include <list>
-
-#include "base/stl_util.h"
-#include "base/strings/string_number_conversions.h"
-#include "sync/syncable/entry_kernel.h"
-#include "sync/syncable/syncable_util.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace syncable {
-
-namespace {
-
-static const std::string kCacheGuid = "8HhNIHlEOCGQbIAALr9QEg==";
-
-class ParentChildIndexTest : public testing::Test {
- public:
- virtual void TearDown() {
- // To make memory management easier, we take ownership of all EntryKernels
- // returned by our factory methods and delete them here.
- STLDeleteElements(&owned_entry_kernels_);
- }
-
- // Unfortunately, we can't use the regular Entry factory methods, because the
- // ParentChildIndex deals in EntryKernels.
-
- static syncable::Id GetBookmarkRootId() {
- return syncable::Id::CreateFromServerId("bookmark_folder");
- }
-
- static syncable::Id GetBookmarkId(int n) {
- return syncable::Id::CreateFromServerId("b" + base::IntToString(n));
- }
-
- static syncable::Id GetClientUniqueId(int n) {
- return syncable::Id::CreateFromServerId("c" + base::IntToString(n));
- }
-
- EntryKernel* MakeRoot() {
- // Mimics the root node.
- EntryKernel* root = new EntryKernel();
- root->put(META_HANDLE, 1);
- root->put(BASE_VERSION, -1);
- root->put(SERVER_VERSION, 0);
- root->put(IS_DIR, true);
- root->put(ID, syncable::Id());
- root->put(PARENT_ID, syncable::Id());
- root->put(SERVER_PARENT_ID, syncable::Id());
-
- owned_entry_kernels_.push_back(root);
- return root;
- }
-
- EntryKernel* MakeBookmarkRoot() {
- // Mimics a server-created bookmark folder.
- EntryKernel* folder = new EntryKernel;
- folder->put(META_HANDLE, 1);
- folder->put(BASE_VERSION, 9);
- folder->put(SERVER_VERSION, 9);
- folder->put(IS_DIR, true);
- folder->put(ID, GetBookmarkRootId());
- folder->put(SERVER_PARENT_ID, syncable::Id());
- folder->put(PARENT_ID, syncable::Id());
- folder->put(UNIQUE_SERVER_TAG, "google_chrome_bookmarks");
-
- owned_entry_kernels_.push_back(folder);
- return folder;
- }
-
- EntryKernel* MakeBookmark(int n, int pos, bool is_dir) {
- // Mimics a regular bookmark or folder.
- EntryKernel* bm = new EntryKernel();
- bm->put(META_HANDLE, n);
- bm->put(BASE_VERSION, 10);
- bm->put(SERVER_VERSION, 10);
- bm->put(IS_DIR, is_dir);
- bm->put(ID, GetBookmarkId(n));
- bm->put(PARENT_ID, GetBookmarkRootId());
- bm->put(SERVER_PARENT_ID, GetBookmarkRootId());
-
- bm->put(UNIQUE_BOOKMARK_TAG,
- syncable::GenerateSyncableBookmarkHash(kCacheGuid,
- bm->ref(ID).GetServerId()));
-
- UniquePosition unique_pos =
- UniquePosition::FromInt64(pos, bm->ref(UNIQUE_BOOKMARK_TAG));
- bm->put(UNIQUE_POSITION, unique_pos);
- bm->put(SERVER_UNIQUE_POSITION, unique_pos);
-
- owned_entry_kernels_.push_back(bm);
- return bm;
- }
-
- EntryKernel* MakeUniqueClientItem(int n) {
- EntryKernel* item = new EntryKernel();
- item->put(META_HANDLE, n);
- item->put(BASE_VERSION, 10);
- item->put(SERVER_VERSION, 10);
- item->put(IS_DIR, false);
- item->put(ID, GetClientUniqueId(n));
- item->put(PARENT_ID, syncable::Id());
- item->put(SERVER_PARENT_ID, syncable::Id());
- item->put(UNIQUE_CLIENT_TAG, base::IntToString(n));
-
- owned_entry_kernels_.push_back(item);
- return item;
- }
-
- ParentChildIndex index_;
-
- private:
- std::list<EntryKernel*> owned_entry_kernels_;
-};
-
-TEST_F(ParentChildIndexTest, TestRootNode) {
- EntryKernel* root = MakeRoot();
- EXPECT_FALSE(ParentChildIndex::ShouldInclude(root));
-}
-
-TEST_F(ParentChildIndexTest, TestBookmarkRootFolder) {
- EntryKernel* bm_folder = MakeBookmarkRoot();
- EXPECT_TRUE(ParentChildIndex::ShouldInclude(bm_folder));
-}
-
-// Tests iteration over a set of siblings.
-TEST_F(ParentChildIndexTest, ChildInsertionAndIteration) {
- EntryKernel* bm_folder = MakeBookmarkRoot();
- index_.Insert(bm_folder);
-
- // Make some folder and non-folder entries.
- EntryKernel* b1 = MakeBookmark(1, 1, false);
- EntryKernel* b2 = MakeBookmark(2, 2, false);
- EntryKernel* b3 = MakeBookmark(3, 3, true);
- EntryKernel* b4 = MakeBookmark(4, 4, false);
-
- // Insert them out-of-order to test different cases.
- index_.Insert(b3); // Only child.
- index_.Insert(b4); // Right-most child.
- index_.Insert(b1); // Left-most child.
- index_.Insert(b2); // Between existing items.
-
- // Double-check they've been added.
- EXPECT_TRUE(index_.Contains(b1));
- EXPECT_TRUE(index_.Contains(b2));
- EXPECT_TRUE(index_.Contains(b3));
- EXPECT_TRUE(index_.Contains(b4));
-
- // Check the ordering.
- const OrderedChildSet* children = index_.GetChildren(GetBookmarkRootId());
- ASSERT_TRUE(children);
- ASSERT_EQ(children->size(), 4UL);
- OrderedChildSet::const_iterator it = children->begin();
- EXPECT_EQ(*it, b1);
- it++;
- EXPECT_EQ(*it, b2);
- it++;
- EXPECT_EQ(*it, b3);
- it++;
- EXPECT_EQ(*it, b4);
- it++;
- EXPECT_TRUE(it == children->end());
-}
-
-// Tests iteration when hierarchy is involved.
-TEST_F(ParentChildIndexTest, ChildInsertionAndIterationWithHierarchy) {
- EntryKernel* bm_folder = MakeBookmarkRoot();
- index_.Insert(bm_folder);
-
- // Just below the root, we have folders f1 and f2.
- EntryKernel* f1 = MakeBookmark(1, 1, false);
- EntryKernel* f2 = MakeBookmark(2, 2, false);
- EntryKernel* f3 = MakeBookmark(3, 3, false);
-
- // Under folder f1, we have two bookmarks.
- EntryKernel* f1_b1 = MakeBookmark(101, 1, false);
- f1_b1->put(PARENT_ID, GetBookmarkId(1));
- EntryKernel* f1_b2 = MakeBookmark(102, 2, false);
- f1_b2->put(PARENT_ID, GetBookmarkId(1));
-
- // Under folder f2, there is one bookmark.
- EntryKernel* f2_b1 = MakeBookmark(201, 1, false);
- f2_b1->put(PARENT_ID, GetBookmarkId(2));
-
- // Under folder f3, there is nothing.
-
- // Insert in a strange order, because we can.
- index_.Insert(f1_b2);
- index_.Insert(f2);
- index_.Insert(f2_b1);
- index_.Insert(f1);
- index_.Insert(f1_b1);
- index_.Insert(f3);
-
- OrderedChildSet::const_iterator it;
-
- // Iterate over children of the bookmark root.
- const OrderedChildSet* top_children = index_.GetChildren(GetBookmarkRootId());
- ASSERT_TRUE(top_children);
- ASSERT_EQ(top_children->size(), 3UL);
- it = top_children->begin();
- EXPECT_EQ(*it, f1);
- it++;
- EXPECT_EQ(*it, f2);
- it++;
- EXPECT_EQ(*it, f3);
- it++;
- EXPECT_TRUE(it == top_children->end());
-
- // Iterate over children of the first folder.
- const OrderedChildSet* f1_children = index_.GetChildren(GetBookmarkId(1));
- ASSERT_TRUE(f1_children);
- ASSERT_EQ(f1_children->size(), 2UL);
- it = f1_children->begin();
- EXPECT_EQ(*it, f1_b1);
- it++;
- EXPECT_EQ(*it, f1_b2);
- it++;
- EXPECT_TRUE(it == f1_children->end());
-
- // Iterate over children of the second folder.
- const OrderedChildSet* f2_children = index_.GetChildren(GetBookmarkId(2));
- ASSERT_TRUE(f2_children);
- ASSERT_EQ(f2_children->size(), 1UL);
- it = f2_children->begin();
- EXPECT_EQ(*it, f2_b1);
- it++;
- EXPECT_TRUE(it == f2_children->end());
-
- // Check for children of the third folder.
- const OrderedChildSet* f3_children = index_.GetChildren(GetBookmarkId(3));
- EXPECT_FALSE(f3_children);
-}
-
-// Tests removing items.
-TEST_F(ParentChildIndexTest, RemoveWithHierarchy) {
- EntryKernel* bm_folder = MakeBookmarkRoot();
- index_.Insert(bm_folder);
-
- // Just below the root, we have folders f1 and f2.
- EntryKernel* f1 = MakeBookmark(1, 1, false);
- EntryKernel* f2 = MakeBookmark(2, 2, false);
- EntryKernel* f3 = MakeBookmark(3, 3, false);
-
- // Under folder f1, we have two bookmarks.
- EntryKernel* f1_b1 = MakeBookmark(101, 1, false);
- f1_b1->put(PARENT_ID, GetBookmarkId(1));
- EntryKernel* f1_b2 = MakeBookmark(102, 2, false);
- f1_b2->put(PARENT_ID, GetBookmarkId(1));
-
- // Under folder f2, there is one bookmark.
- EntryKernel* f2_b1 = MakeBookmark(201, 1, false);
- f2_b1->put(PARENT_ID, GetBookmarkId(2));
-
- // Under folder f3, there is nothing.
-
- // Insert in any order.
- index_.Insert(f2_b1);
- index_.Insert(f3);
- index_.Insert(f1_b2);
- index_.Insert(f1);
- index_.Insert(f2);
- index_.Insert(f1_b1);
-
- // Check that all are in the index.
- EXPECT_TRUE(index_.Contains(f1));
- EXPECT_TRUE(index_.Contains(f2));
- EXPECT_TRUE(index_.Contains(f3));
- EXPECT_TRUE(index_.Contains(f1_b1));
- EXPECT_TRUE(index_.Contains(f1_b2));
- EXPECT_TRUE(index_.Contains(f2_b1));
-
- // Remove them all in any order.
- index_.Remove(f3);
- EXPECT_FALSE(index_.Contains(f3));
- index_.Remove(f1_b2);
- EXPECT_FALSE(index_.Contains(f1_b2));
- index_.Remove(f2_b1);
- EXPECT_FALSE(index_.Contains(f2_b1));
- index_.Remove(f1);
- EXPECT_FALSE(index_.Contains(f1));
- index_.Remove(f2);
- EXPECT_FALSE(index_.Contains(f2));
- index_.Remove(f1_b1);
- EXPECT_FALSE(index_.Contains(f1_b1));
-}
-
-// Test that involves two non-ordered items.
-TEST_F(ParentChildIndexTest, UnorderedChildren) {
- // Make two unique client tag items under the root node.
- EntryKernel* u1 = MakeUniqueClientItem(1);
- EntryKernel* u2 = MakeUniqueClientItem(2);
-
- EXPECT_FALSE(u1->ShouldMaintainPosition());
- EXPECT_FALSE(u2->ShouldMaintainPosition());
-
- index_.Insert(u1);
- index_.Insert(u2);
-
- const OrderedChildSet* children = index_.GetChildren(syncable::Id());
- EXPECT_EQ(children->count(u1), 1UL);
- EXPECT_EQ(children->count(u2), 1UL);
- EXPECT_EQ(children->size(), 2UL);
-}
-
-// Test ordered and non-ordered entries under the same parent.
-// TODO(rlarocque): We should not need to support this.
-TEST_F(ParentChildIndexTest, OrderedAndUnorderedChildren) {
- EntryKernel* bm_folder = MakeBookmarkRoot();
- index_.Insert(bm_folder);
-
- EntryKernel* b1 = MakeBookmark(1, 1, false);
- EntryKernel* b2 = MakeBookmark(2, 2, false);
- EntryKernel* u1 = MakeUniqueClientItem(1);
- u1->put(PARENT_ID, GetBookmarkRootId());
-
- index_.Insert(b1);
- index_.Insert(u1);
- index_.Insert(b2);
-
- const OrderedChildSet* children = index_.GetChildren(GetBookmarkRootId());
- ASSERT_TRUE(children);
- EXPECT_EQ(children->size(), 3UL);
-
- // Ensure that the non-positionable item is moved to the far right.
- OrderedChildSet::const_iterator it = children->begin();
- EXPECT_EQ(*it, b1);
- it++;
- EXPECT_EQ(*it, b2);
- it++;
- EXPECT_EQ(*it, u1);
- it++;
- EXPECT_TRUE(it == children->end());
-}
-
-} // namespace
-} // namespace syncable
-} // namespace syncer
-
diff --git a/chromium/sync/syncable/scoped_kernel_lock.cc b/chromium/sync/syncable/scoped_kernel_lock.cc
deleted file mode 100644
index 4a2742035fe..00000000000
--- a/chromium/sync/syncable/scoped_kernel_lock.cc
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/scoped_kernel_lock.h"
-
-#include "sync/syncable/directory.h"
-
-namespace syncer {
-namespace syncable {
-
-ScopedKernelLock::ScopedKernelLock(const Directory* dir)
- : scoped_lock_(dir->kernel_->mutex), dir_(const_cast<Directory*>(dir)) {
-}
-
-ScopedKernelLock::~ScopedKernelLock() {}
-
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/scoped_kernel_lock.h b/chromium/sync/syncable/scoped_kernel_lock.h
deleted file mode 100644
index affc375ab81..00000000000
--- a/chromium/sync/syncable/scoped_kernel_lock.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_SCOPED_KERNEL_LOCK_H_
-#define SYNC_SYNCABLE_SCOPED_KERNEL_LOCK_H_
-
-#include "base/basictypes.h"
-#include "base/synchronization/lock.h"
-
-namespace syncer {
-namespace syncable {
-
-class Directory;
-
-class ScopedKernelLock {
- public:
- explicit ScopedKernelLock(const Directory* dir);
- ~ScopedKernelLock();
-
- base::AutoLock scoped_lock_;
- Directory* const dir_;
- DISALLOW_COPY_AND_ASSIGN(ScopedKernelLock);
-};
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_SCOPED_KERNEL_LOCK_H_
diff --git a/chromium/sync/syncable/scoped_parent_child_index_updater.cc b/chromium/sync/syncable/scoped_parent_child_index_updater.cc
deleted file mode 100644
index 0dc3e953323..00000000000
--- a/chromium/sync/syncable/scoped_parent_child_index_updater.cc
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/scoped_parent_child_index_updater.h"
-
-#include "sync/syncable/parent_child_index.h"
-
-namespace syncer {
-namespace syncable {
-
-ScopedParentChildIndexUpdater::ScopedParentChildIndexUpdater(
- ScopedKernelLock& proof_of_lock,
- EntryKernel* entry,
- ParentChildIndex* index) : entry_(entry), index_(index) {
- if (ParentChildIndex::ShouldInclude(entry_)) {
- index_->Remove(entry_);
- }
-}
-
-ScopedParentChildIndexUpdater::~ScopedParentChildIndexUpdater() {
- if (ParentChildIndex::ShouldInclude(entry_)) {
- index_->Insert(entry_);
- }
-}
-
-} // namespace syncer
-} // namespace syncable
diff --git a/chromium/sync/syncable/scoped_parent_child_index_updater.h b/chromium/sync/syncable/scoped_parent_child_index_updater.h
deleted file mode 100644
index 89385feb980..00000000000
--- a/chromium/sync/syncable/scoped_parent_child_index_updater.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_PARENT_CHILD_INDEX_UPDATER_H_
-#define SYNC_SYNCABLE_PARENT_CHILD_INDEX_UPDATER_H_
-
-#include "base/basictypes.h"
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-namespace syncable {
-
-class ParentChildIndex;
-class ScopedKernelLock;
-struct EntryKernel;
-
-// Temporarily removes an item from the ParentChildIndex and re-adds it this
-// object goes out of scope.
-class ScopedParentChildIndexUpdater {
- public:
- ScopedParentChildIndexUpdater(ScopedKernelLock& proof_of_lock,
- EntryKernel* entry,
- ParentChildIndex* index);
- ~ScopedParentChildIndexUpdater();
-
- private:
- EntryKernel* entry_;
- ParentChildIndex* index_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedParentChildIndexUpdater);
-};
-
-} // namespace syncer
-} // namespace syncable
-
-#endif // SYNC_SYNCABLE_PARENT_CHILD_INDEX_UPDATER_H_
diff --git a/chromium/sync/syncable/syncable-inl.h b/chromium/sync/syncable/syncable-inl.h
deleted file mode 100644
index b9817cd8528..00000000000
--- a/chromium/sync/syncable/syncable-inl.h
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_SYNCABLE_INL_H_
-#define SYNC_SYNCABLE_SYNCABLE_INL_H_
-
-#include "sync/syncable/entry_kernel.h"
-
-namespace syncer {
-namespace syncable {
-
-template <typename FieldType, FieldType field_index>
-class LessField {
- public:
- inline bool operator() (const syncable::EntryKernel* a,
- const syncable::EntryKernel* b) const {
- return a->ref(field_index) < b->ref(field_index);
- }
-};
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_SYNCABLE_INL_H_
diff --git a/chromium/sync/syncable/syncable_base_transaction.cc b/chromium/sync/syncable/syncable_base_transaction.cc
deleted file mode 100644
index a1d3e852084..00000000000
--- a/chromium/sync/syncable/syncable_base_transaction.cc
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/syncable_base_transaction.h"
-
-#include "base/debug/trace_event.h"
-#include "sync/syncable/directory.h"
-
-namespace syncer {
-namespace syncable {
-
-// static
-Id BaseTransaction::root_id() {
- return Id();
-}
-
-Directory* BaseTransaction::directory() const {
- return directory_;
-}
-
-void BaseTransaction::Lock() {
- TRACE_EVENT2("sync_lock_contention", "AcquireLock",
- "src_file", from_here_.file_name(),
- "src_func", from_here_.function_name());
-
- directory_->kernel_->transaction_mutex.Acquire();
-}
-
-void BaseTransaction::Unlock() {
- directory_->kernel_->transaction_mutex.Release();
-}
-
-void BaseTransaction::OnUnrecoverableError(
- const tracked_objects::Location& location,
- const std::string& message) {
- unrecoverable_error_set_ = true;
- unrecoverable_error_location_ = location;
- unrecoverable_error_msg_ = message;
-
- // Note: We dont call the Directory's OnUnrecoverableError method right
- // away. Instead we wait to unwind the stack and in the destructor of the
- // transaction we would call the OnUnrecoverableError method.
-
- directory()->ReportUnrecoverableError();
-}
-
-bool BaseTransaction::unrecoverable_error_set() const {
- return unrecoverable_error_set_;
-}
-
-void BaseTransaction::HandleUnrecoverableErrorIfSet() {
- if (unrecoverable_error_set_) {
- directory()->OnUnrecoverableError(this,
- unrecoverable_error_location_,
- unrecoverable_error_msg_);
- }
-}
-
-BaseTransaction::BaseTransaction(const tracked_objects::Location& from_here,
- const char* name,
- WriterTag writer,
- Directory* directory)
- : from_here_(from_here), name_(name), writer_(writer),
- directory_(directory), unrecoverable_error_set_(false) {
- // TODO(lipalani): Don't issue a good transaction if the directory has
- // unrecoverable error set. And the callers have to check trans.good before
- // proceeding.
- TRACE_EVENT_BEGIN2("sync", name_,
- "src_file", from_here_.file_name(),
- "src_func", from_here_.function_name());
-}
-
-BaseTransaction::~BaseTransaction() {
- TRACE_EVENT_END0("sync", name_);
-}
-
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/syncable_base_transaction.h b/chromium/sync/syncable/syncable_base_transaction.h
deleted file mode 100644
index 58c7bfafbf5..00000000000
--- a/chromium/sync/syncable/syncable_base_transaction.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_SYNCABLE_BASE_TRANSACTION_H_
-#define SYNC_SYNCABLE_SYNCABLE_BASE_TRANSACTION_H_
-
-#include "base/location.h"
-#include "sync/base/sync_export.h"
-#include "sync/syncable/syncable_id.h"
-
-namespace syncer {
-namespace syncable {
-
-class Directory;
-
-// A WriteTransaction has a writer tag describing which body of code is doing
-// the write. This is defined up here since WriteTransactionInfo also contains
-// one.
-enum WriterTag {
- INVALID,
- SYNCER,
- AUTHWATCHER,
- UNITTEST,
- VACUUM_AFTER_SAVE,
- HANDLE_SAVE_FAILURE,
- PURGE_ENTRIES,
- SYNCAPI,
-};
-
-// Make sure to update this if you update WriterTag.
-std::string WriterTagToString(WriterTag writer_tag);
-
-class SYNC_EXPORT BaseTransaction {
- public:
- static Id root_id();
-
- Directory* directory() const;
-
- virtual ~BaseTransaction();
-
- // This should be called when a database corruption is detected and there is
- // no way for us to recover short of wiping the database clean. When this is
- // called we set a bool in the transaction. The caller has to unwind the
- // stack. When the destructor for the transaction is called it acts upon the
- // bool and calls the Directory to handle the unrecoverable error.
- void OnUnrecoverableError(const tracked_objects::Location& location,
- const std::string& message);
-
- bool unrecoverable_error_set() const;
-
- protected:
- BaseTransaction(const tracked_objects::Location& from_here,
- const char* name,
- WriterTag writer,
- Directory* directory);
-
- void Lock();
- void Unlock();
-
- // This should be called before unlocking because it calls the Direcotry's
- // OnUnrecoverableError method which is not protected by locks and could
- // be called from any thread. Holding the transaction lock ensures only one
- // thread could call the method at a time.
- void HandleUnrecoverableErrorIfSet();
-
- const tracked_objects::Location from_here_;
- const char* const name_;
- WriterTag writer_;
- Directory* const directory_;
-
- // Error information.
- bool unrecoverable_error_set_;
- tracked_objects::Location unrecoverable_error_location_;
- std::string unrecoverable_error_msg_;
-
- private:
- friend class Entry;
- DISALLOW_COPY_AND_ASSIGN(BaseTransaction);
-};
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_SYNCABLE_BASE_TRANSACTION_H_
diff --git a/chromium/sync/syncable/syncable_base_write_transaction.cc b/chromium/sync/syncable/syncable_base_write_transaction.cc
deleted file mode 100644
index a575c699fb5..00000000000
--- a/chromium/sync/syncable/syncable_base_write_transaction.cc
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/syncable_base_write_transaction.h"
-
-namespace syncer {
-namespace syncable {
-
-BaseWriteTransaction::BaseWriteTransaction(
- const tracked_objects::Location location,
- const char* name,
- WriterTag writer,
- Directory* directory)
- : BaseTransaction(location, name, writer, directory) {
-}
-
-BaseWriteTransaction::~BaseWriteTransaction() {}
-
-} // namespace syncable
-} // namespace syncer
-
diff --git a/chromium/sync/syncable/syncable_base_write_transaction.h b/chromium/sync/syncable/syncable_base_write_transaction.h
deleted file mode 100644
index 8ea91a1b106..00000000000
--- a/chromium/sync/syncable/syncable_base_write_transaction.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_SYNCABLE_BASE_WRITE_TRANSACTION_H_
-#define SYNC_SYNCABLE_SYNCABLE_BASE_WRITE_TRANSACTION_H_
-
-#include "sync/base/sync_export.h"
-#include "sync/syncable/syncable_base_transaction.h"
-
-namespace syncer {
-namespace syncable {
-
-// A base class shared by both ModelNeutralWriteTransaction and
-// WriteTransaction.
-class SYNC_EXPORT BaseWriteTransaction : public BaseTransaction {
- public:
- virtual void TrackChangesTo(const EntryKernel* entry) = 0;
-
- protected:
- BaseWriteTransaction(
- const tracked_objects::Location location,
- const char* name,
- WriterTag writer,
- Directory* directory);
- virtual ~BaseWriteTransaction();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(BaseWriteTransaction);
-};
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_SYNCABLE_BASE_WRITE_TRANSACTION_H_
diff --git a/chromium/sync/syncable/syncable_changes_version.h b/chromium/sync/syncable/syncable_changes_version.h
deleted file mode 100644
index 9b57aedaf41..00000000000
--- a/chromium/sync/syncable/syncable_changes_version.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_SYNCABLE_CHANGES_VERSION_H_
-#define SYNC_SYNCABLE_SYNCABLE_CHANGES_VERSION_H_
-
-namespace syncer {
-namespace syncable {
-
-// For the most part, the sync engine treats version numbers as opaque values.
-// However, there are parts of our code base that break this abstraction, and
-// depend on the following two invariants:
-//
-// 1. CHANGES_VERSION is less than 0.
-// 2. The server only issues positive version numbers.
-//
-// Breaking these abstractions makes some operations 10 times
-// faster. If either of these invariants change, then those queries
-// must be revisited.
-
-enum {
- CHANGES_VERSION = -1
-};
-
-#define CHANGES_VERSION_STRING "-1"
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_SYNCABLE_CHANGES_VERSION_H_
diff --git a/chromium/sync/syncable/syncable_columns.h b/chromium/sync/syncable/syncable_columns.h
deleted file mode 100644
index 9d45c7404ad..00000000000
--- a/chromium/sync/syncable/syncable_columns.h
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_SYNCABLE_COLUMNS_H_
-#define SYNC_SYNCABLE_SYNCABLE_COLUMNS_H_
-
-#include "sync/syncable/entry_kernel.h"
-#include "sync/syncable/syncable_changes_version.h"
-
-namespace syncer {
-namespace syncable {
-
-struct ColumnSpec {
- const char* name;
- const char* spec;
-};
-
-// Must be in exact same order as fields in entry_kernel.h.
-static const ColumnSpec g_metas_columns[] = {
- //////////////////////////////////////
- // int64s
- {"metahandle", "bigint primary key ON CONFLICT FAIL"},
- {"base_version", "bigint default " CHANGES_VERSION_STRING},
- {"server_version", "bigint default 0"},
- // This is the item ID that we store for the embedding application.
- {"local_external_id", "bigint default 0"},
- {"transaction_version", "bigint default 0"},
- // These timestamps are kept in the same format as that of the
- // protocol (ms since Unix epoch).
- {"mtime", "bigint default 0"},
- {"server_mtime", "bigint default 0"},
- {"ctime", "bigint default 0"},
- {"server_ctime", "bigint default 0"},
- //////////////////////////////////////
- // Ids
- {"id", "varchar(255) default \"r\""},
- {"parent_id", "varchar(255) default \"r\""},
- {"server_parent_id", "varchar(255) default \"r\""},
- //////////////////////////////////////
- // bits
- {"is_unsynced", "bit default 0"},
- {"is_unapplied_update", "bit default 0"},
- {"is_del", "bit default 0"},
- {"is_dir", "bit default 0"},
- {"server_is_dir", "bit default 0"},
- {"server_is_del", "bit default 0"},
- //////////////////////////////////////
- // Strings
- {"non_unique_name", "varchar"},
- {"server_non_unique_name", "varchar(255)"},
- {"unique_server_tag", "varchar"},
- {"unique_client_tag", "varchar"},
- {"unique_bookmark_tag", "varchar"},
- //////////////////////////////////////
- // Blobs (serialized protos).
- {"specifics", "blob"},
- {"server_specifics", "blob"},
- {"base_server_specifics", "blob"},
- //////////////////////////////////////
- // Blobs (positions).
- {"server_unique_position", "blob"},
- {"unique_position", "blob"},
-};
-
-// At least enforce that there are equal number of column names and fields.
-COMPILE_ASSERT(arraysize(g_metas_columns) >= FIELD_COUNT, missing_column_name);
-COMPILE_ASSERT(arraysize(g_metas_columns) <= FIELD_COUNT, extra_column_names);
-
-static inline const char* ColumnName(int field) {
- DCHECK(field < BEGIN_TEMPS);
- return g_metas_columns[field].name;
-}
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_SYNCABLE_COLUMNS_H_
diff --git a/chromium/sync/syncable/syncable_delete_journal.cc b/chromium/sync/syncable/syncable_delete_journal.cc
deleted file mode 100644
index 0aa04fa69d2..00000000000
--- a/chromium/sync/syncable/syncable_delete_journal.cc
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/syncable_delete_journal.h"
-
-#include "base/stl_util.h"
-#include "sync/internal_api/public/base/model_type.h"
-
-namespace syncer {
-namespace syncable {
-
-DeleteJournal::DeleteJournal(JournalIndex* initial_journal) {
- CHECK(initial_journal);
- delete_journals_.swap(*initial_journal);
-}
-
-DeleteJournal::~DeleteJournal() {
- STLDeleteElements(&delete_journals_);
-}
-
-size_t DeleteJournal::GetDeleteJournalSize(BaseTransaction* trans) const {
- DCHECK(trans);
- return delete_journals_.size();
-}
-
-void DeleteJournal::UpdateDeleteJournalForServerDelete(
- BaseTransaction* trans, bool was_deleted, const EntryKernel& entry) {
- DCHECK(trans);
-
- // Should be sufficient to check server type only but check for local
- // type too because of incomplete test setup.
- if (!(IsDeleteJournalEnabled(entry.GetServerModelType()) ||
- IsDeleteJournalEnabled(
- GetModelTypeFromSpecifics(entry.ref(SPECIFICS))))) {
- return;
- }
-
- JournalIndex::iterator it = delete_journals_.find(&entry);
-
- if (entry.ref(SERVER_IS_DEL)) {
- if (it == delete_journals_.end()) {
- // New delete.
- EntryKernel* t = new EntryKernel(entry);
- delete_journals_.insert(t);
- delete_journals_to_purge_.erase(t->ref(META_HANDLE));
- }
- } else {
- // Undelete. This could happen in two cases:
- // * An entry was deleted then undeleted, i.e. server delete was
- // overwritten because of entry has unsynced data locally.
- // * A data type was broken, i.e. encountered unrecoverable error, in last
- // sync session and all its entries were duplicated in delete journals.
- // On restart, entries are recreated from downloads and recreation calls
- // UpdateDeleteJournals() to remove live entries from delete journals,
- // thus only deleted entries remain in journals.
- if (it != delete_journals_.end()) {
- delete_journals_to_purge_.insert((*it)->ref(META_HANDLE));
- delete *it;
- delete_journals_.erase(it);
- } else if (was_deleted) {
- delete_journals_to_purge_.insert(entry.ref(META_HANDLE));
- }
- }
-}
-
-void DeleteJournal::GetDeleteJournals(BaseTransaction* trans,
- ModelType type,
- EntryKernelSet* deleted_entries) {
- DCHECK(trans);
- for (JournalIndex::const_iterator it = delete_journals_.begin();
- it != delete_journals_.end(); ++it) {
- if ((*it)->GetServerModelType() == type ||
- GetModelTypeFromSpecifics((*it)->ref(SPECIFICS)) == type) {
- deleted_entries->insert(*it);
- }
- }
- passive_delete_journal_types_.Put(type);
-}
-
-void DeleteJournal::PurgeDeleteJournals(BaseTransaction* trans,
- const MetahandleSet& to_purge) {
- DCHECK(trans);
- JournalIndex::iterator it = delete_journals_.begin();
- while (it != delete_journals_.end()) {
- int64 handle = (*it)->ref(META_HANDLE);
- if (to_purge.count(handle)) {
- delete *it;
- delete_journals_.erase(it++);
- } else {
- ++it;
- }
- }
- delete_journals_to_purge_.insert(to_purge.begin(), to_purge.end());
-}
-
-void DeleteJournal::TakeSnapshotAndClear(BaseTransaction* trans,
- EntryKernelSet* journal_entries,
- MetahandleSet* journals_to_purge) {
- DCHECK(trans);
- // Move passive delete journals to snapshot. Will copy back if snapshot fails
- // to save.
- JournalIndex::iterator it = delete_journals_.begin();
- while (it != delete_journals_.end()) {
- if (passive_delete_journal_types_.Has((*it)->GetServerModelType()) ||
- passive_delete_journal_types_.Has(GetModelTypeFromSpecifics(
- (*it)->ref(SPECIFICS)))) {
- journal_entries->insert(*it);
- delete_journals_.erase(it++);
- } else {
- ++it;
- }
- }
- *journals_to_purge = delete_journals_to_purge_;
- delete_journals_to_purge_.clear();
-}
-
-void DeleteJournal::AddJournalBatch(BaseTransaction* trans,
- const EntryKernelSet& entries) {
- DCHECK(trans);
- EntryKernel needle;
- for (EntryKernelSet::const_iterator i = entries.begin();
- i != entries.end(); ++i) {
- needle.put(ID, (*i)->ref(ID));
- if (delete_journals_.find(&needle) == delete_journals_.end()) {
- delete_journals_.insert(new EntryKernel(**i));
- }
- delete_journals_to_purge_.erase((*i)->ref(META_HANDLE));
- }
-}
-
-/* static */
-bool DeleteJournal::IsDeleteJournalEnabled(ModelType type) {
- switch (type) {
- case BOOKMARKS:
- return true;
- default:
- return false;
- }
-}
-
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/syncable_delete_journal.h b/chromium/sync/syncable/syncable_delete_journal.h
deleted file mode 100644
index a4bf9ce319d..00000000000
--- a/chromium/sync/syncable/syncable_delete_journal.h
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_SYNCABLE_DELETE_JOURNAL_H_
-#define SYNC_SYNCABLE_SYNCABLE_DELETE_JOURNAL_H_
-
-#include <set>
-
-#include "base/gtest_prod_util.h"
-#include "base/synchronization/lock.h"
-#include "sync/base/sync_export.h"
-#include "sync/syncable/metahandle_set.h"
-#include "sync/syncable/syncable-inl.h"
-
-namespace syncer {
-namespace syncable {
-
-class BaseTransaction;
-struct EntryKernel;
-
-typedef std::set<const EntryKernel*, LessField<IdField, ID> > JournalIndex;
-
-// DeleteJournal manages deleted entries that are not in sync directory until
-// it's safe to drop them after the deletion is confirmed with native models.
-// DeleteJournal is thread-safe and can be accessed on any thread. Has to hold
-// a valid transaction object when calling methods of DeleteJournal, thus each
-// method requires a non-null |trans| parameter.
-class SYNC_EXPORT_PRIVATE DeleteJournal {
- public:
- FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest, ManageDeleteJournals);
-
- // Initialize |delete_journals_| using |intitial_journal|, whose content is
- // destroyed during initialization.
- explicit DeleteJournal(JournalIndex* initial_journal);
- ~DeleteJournal();
-
- // For testing only.
- size_t GetDeleteJournalSize(BaseTransaction* trans) const;
-
- // Add/remove |entry| to/from |delete_journals_| according to its
- // SERVER_IS_DEL field and |was_deleted|. Called on sync thread.
- void UpdateDeleteJournalForServerDelete(BaseTransaction* trans,
- bool was_deleted,
- const EntryKernel& entry);
-
- // Return entries of specified type in |delete_journals_|. This should be
- // called ONCE in model association. |deleted_entries| can be used to
- // detect deleted sync data that's not persisted in native model to
- // prevent back-from-dead problem. |deleted_entries| are only valid during
- // lifetime of |trans|. |type| is added to |passive_delete_journal_types_| to
- // enable periodically saving/clearing of delete journals of |type| because
- // new journals added later are not needed until next model association.
- // Can be called on any thread.
- void GetDeleteJournals(BaseTransaction* trans, ModelType type,
- EntryKernelSet* deleted_entries);
-
- // Purge entries of specified type in |delete_journals_| if their handles are
- // in |to_purge|. This should be called after model association and
- // |to_purge| should contain handles of the entries whose deletions are
- // confirmed in native model. Can be called on any thread.
- void PurgeDeleteJournals(BaseTransaction* trans,
- const MetahandleSet& to_purge);
-
- // Move entries in |delete_journals_| whose types are in
- // |passive_delete_journal_types_| to |journal_entries|. Move handles in
- // |delete_journals_to_purge_| to |journals_to_purge|. Called on sync thread.
- void TakeSnapshotAndClear(BaseTransaction* trans,
- EntryKernelSet* journal_entries,
- MetahandleSet* journals_to_purge);
-
- // Add |entries| to |delete_journals_| regardless of their SERVER_IS_DEL
- // value. This is used to:
- // * restore delete journals from snapshot if snapshot failed to save.
- // * batch add entries of a data type with unrecoverable error to delete
- // journal before purging them.
- // Called on sync thread.
- void AddJournalBatch(BaseTransaction* trans, const EntryKernelSet& entries);
-
- // Return true if delete journals of |type| are maintained.
- static bool IsDeleteJournalEnabled(ModelType type);
-
- private:
- // Contains deleted entries that may not be persisted in native models. And
- // in case of unrecoverable error, all purged entries are moved here for
- // bookkeeping to prevent back-from-dead entries that are deleted elsewhere
- // when sync's down.
- JournalIndex delete_journals_;
-
- // Contains meta handles of deleted entries that have been persisted or
- // undeleted, thus can be removed from database.
- MetahandleSet delete_journals_to_purge_;
-
- // Delete journals of these types can be cleared from memory after being
- // saved to database.
- ModelTypeSet passive_delete_journal_types_;
-
- DISALLOW_COPY_AND_ASSIGN(DeleteJournal);
-};
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_SYNCABLE_DELETE_JOURNAL_H_
diff --git a/chromium/sync/syncable/syncable_enum_conversions.cc b/chromium/sync/syncable/syncable_enum_conversions.cc
deleted file mode 100644
index 8f27912604d..00000000000
--- a/chromium/sync/syncable/syncable_enum_conversions.cc
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Keep this file in sync with entry_kernel.h.
-
-#include "sync/syncable/syncable_enum_conversions.h"
-
-#include "base/basictypes.h"
-#include "base/logging.h"
-
-namespace syncer {
-namespace syncable {
-
-// We can't tokenize expected_min/expected_max since it can be a
-// general expression.
-#define ASSERT_ENUM_BOUNDS(enum_min, enum_max, expected_min, expected_max) \
- COMPILE_ASSERT(static_cast<int>(enum_min) == \
- static_cast<int>(expected_min), \
- enum_min##_not_expected_min); \
- COMPILE_ASSERT(static_cast<int>(enum_max) == \
- static_cast<int>(expected_max), \
- enum_max##_not_expected_max);
-
-#define ENUM_CASE(enum_value) case enum_value: return #enum_value
-
-const char* GetMetahandleFieldString(MetahandleField metahandle_field) {
- ASSERT_ENUM_BOUNDS(META_HANDLE, META_HANDLE,
- INT64_FIELDS_BEGIN, BASE_VERSION - 1);
- switch (metahandle_field) {
- ENUM_CASE(META_HANDLE);
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetBaseVersionString(BaseVersion base_version) {
- ASSERT_ENUM_BOUNDS(BASE_VERSION, BASE_VERSION,
- META_HANDLE + 1, SERVER_VERSION - 1);
- switch (base_version) {
- ENUM_CASE(BASE_VERSION);
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetInt64FieldString(Int64Field int64_field) {
- ASSERT_ENUM_BOUNDS(SERVER_VERSION, TRANSACTION_VERSION,
- BASE_VERSION + 1, INT64_FIELDS_END - 1);
- switch (int64_field) {
- ENUM_CASE(SERVER_VERSION);
- ENUM_CASE(LOCAL_EXTERNAL_ID);
- ENUM_CASE(TRANSACTION_VERSION);
- case INT64_FIELDS_END: break;
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetTimeFieldString(TimeField time_field) {
- ASSERT_ENUM_BOUNDS(MTIME, SERVER_CTIME,
- TIME_FIELDS_BEGIN, TIME_FIELDS_END - 1);
- switch (time_field) {
- ENUM_CASE(MTIME);
- ENUM_CASE(SERVER_MTIME);
- ENUM_CASE(CTIME);
- ENUM_CASE(SERVER_CTIME);
- case TIME_FIELDS_END: break;
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetIdFieldString(IdField id_field) {
- ASSERT_ENUM_BOUNDS(ID, SERVER_PARENT_ID,
- ID_FIELDS_BEGIN, ID_FIELDS_END - 1);
- switch (id_field) {
- ENUM_CASE(ID);
- ENUM_CASE(PARENT_ID);
- ENUM_CASE(SERVER_PARENT_ID);
- case ID_FIELDS_END: break;
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetIndexedBitFieldString(IndexedBitField indexed_bit_field) {
- ASSERT_ENUM_BOUNDS(IS_UNSYNCED, IS_UNAPPLIED_UPDATE,
- BIT_FIELDS_BEGIN, INDEXED_BIT_FIELDS_END - 1);
- switch (indexed_bit_field) {
- ENUM_CASE(IS_UNSYNCED);
- ENUM_CASE(IS_UNAPPLIED_UPDATE);
- case INDEXED_BIT_FIELDS_END: break;
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetIsDelFieldString(IsDelField is_del_field) {
- ASSERT_ENUM_BOUNDS(IS_DEL, IS_DEL,
- INDEXED_BIT_FIELDS_END, IS_DIR - 1);
- switch (is_del_field) {
- ENUM_CASE(IS_DEL);
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetBitFieldString(BitField bit_field) {
- ASSERT_ENUM_BOUNDS(IS_DIR, SERVER_IS_DEL,
- IS_DEL + 1, BIT_FIELDS_END - 1);
- switch (bit_field) {
- ENUM_CASE(IS_DIR);
- ENUM_CASE(SERVER_IS_DIR);
- ENUM_CASE(SERVER_IS_DEL);
- case BIT_FIELDS_END: break;
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetStringFieldString(StringField string_field) {
- ASSERT_ENUM_BOUNDS(NON_UNIQUE_NAME, UNIQUE_BOOKMARK_TAG,
- STRING_FIELDS_BEGIN, STRING_FIELDS_END - 1);
- switch (string_field) {
- ENUM_CASE(NON_UNIQUE_NAME);
- ENUM_CASE(SERVER_NON_UNIQUE_NAME);
- ENUM_CASE(UNIQUE_SERVER_TAG);
- ENUM_CASE(UNIQUE_CLIENT_TAG);
- ENUM_CASE(UNIQUE_BOOKMARK_TAG);
- case STRING_FIELDS_END: break;
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetProtoFieldString(ProtoField proto_field) {
- ASSERT_ENUM_BOUNDS(SPECIFICS, BASE_SERVER_SPECIFICS,
- PROTO_FIELDS_BEGIN, PROTO_FIELDS_END - 1);
- switch (proto_field) {
- ENUM_CASE(SPECIFICS);
- ENUM_CASE(SERVER_SPECIFICS);
- ENUM_CASE(BASE_SERVER_SPECIFICS);
- case PROTO_FIELDS_END: break;
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetUniquePositionFieldString(UniquePositionField position_field) {
- ASSERT_ENUM_BOUNDS(SERVER_UNIQUE_POSITION, UNIQUE_POSITION,
- UNIQUE_POSITION_FIELDS_BEGIN,
- UNIQUE_POSITION_FIELDS_END - 1);
- switch(position_field) {
- ENUM_CASE(SERVER_UNIQUE_POSITION);
- ENUM_CASE(UNIQUE_POSITION);
- case UNIQUE_POSITION_FIELDS_END: break;
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetBitTempString(BitTemp bit_temp) {
- ASSERT_ENUM_BOUNDS(SYNCING, SYNCING,
- BIT_TEMPS_BEGIN, BIT_TEMPS_END - 1);
- switch (bit_temp) {
- ENUM_CASE(SYNCING);
- case BIT_TEMPS_END: break;
- }
- NOTREACHED();
- return "";
-}
-
-#undef ENUM_CASE
-#undef ASSERT_ENUM_BOUNDS
-
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/syncable_enum_conversions.h b/chromium/sync/syncable/syncable_enum_conversions.h
deleted file mode 100644
index 12f6428591a..00000000000
--- a/chromium/sync/syncable/syncable_enum_conversions.h
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_SYNCABLE_ENUM_CONVERSIONS_H_
-#define SYNC_SYNCABLE_SYNCABLE_ENUM_CONVERSIONS_H_
-
-// Keep this file in sync with entry_kernel.h.
-
-#include "sync/base/sync_export.h"
-#include "sync/syncable/entry_kernel.h"
-
-// Utility functions to get the string equivalent for some syncable
-// enums.
-
-namespace syncer {
-namespace syncable {
-
-// The returned strings (which don't have to be freed) are in ASCII.
-// The result of passing in an invalid enum value is undefined.
-
-SYNC_EXPORT_PRIVATE const char* GetMetahandleFieldString(
- MetahandleField metahandle_field);
-
-SYNC_EXPORT_PRIVATE const char* GetBaseVersionString(BaseVersion base_version);
-
-SYNC_EXPORT_PRIVATE const char* GetInt64FieldString(Int64Field int64_field);
-
-SYNC_EXPORT_PRIVATE const char* GetTimeFieldString(TimeField time_field);
-
-SYNC_EXPORT_PRIVATE const char* GetIdFieldString(IdField id_field);
-
-SYNC_EXPORT_PRIVATE const char* GetIndexedBitFieldString(
- IndexedBitField indexed_bit_field);
-
-SYNC_EXPORT_PRIVATE const char* GetIsDelFieldString(IsDelField is_del_field);
-
-SYNC_EXPORT_PRIVATE const char* GetBitFieldString(BitField bit_field);
-
-SYNC_EXPORT_PRIVATE const char* GetStringFieldString(StringField string_field);
-
-SYNC_EXPORT_PRIVATE const char* GetProtoFieldString(ProtoField proto_field);
-
-SYNC_EXPORT_PRIVATE const char* GetUniquePositionFieldString(
- UniquePositionField position_field);
-
-SYNC_EXPORT_PRIVATE const char* GetBitTempString(BitTemp bit_temp);
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_SYNCABLE_ENUM_CONVERSIONS_H_
diff --git a/chromium/sync/syncable/syncable_enum_conversions_unittest.cc b/chromium/sync/syncable/syncable_enum_conversions_unittest.cc
deleted file mode 100644
index f74d1301ae4..00000000000
--- a/chromium/sync/syncable/syncable_enum_conversions_unittest.cc
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Keep this file in sync with entry_kernel.h.
-
-#include "sync/syncable/syncable_enum_conversions.h"
-
-#include <string>
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace syncable {
-namespace {
-
-class SyncableEnumConversionsTest : public testing::Test {
-};
-
-template <class T>
-void TestEnumStringFunction(const char* (*enum_string_fn)(T),
- int enum_min, int enum_max) {
- EXPECT_LE(enum_min, enum_max);
- for (int i = enum_min; i <= enum_max; ++i) {
- const std::string& str = enum_string_fn(static_cast<T>(i));
- EXPECT_FALSE(str.empty());
- }
-}
-
-TEST_F(SyncableEnumConversionsTest, GetMetahandleFieldString) {
- TestEnumStringFunction(
- GetMetahandleFieldString, INT64_FIELDS_BEGIN, META_HANDLE);
-}
-
-TEST_F(SyncableEnumConversionsTest, GetBaseVersionString) {
- TestEnumStringFunction(
- GetBaseVersionString, META_HANDLE + 1, BASE_VERSION);
-}
-
-TEST_F(SyncableEnumConversionsTest, GetInt64FieldString) {
- TestEnumStringFunction(
- GetInt64FieldString, BASE_VERSION + 1, INT64_FIELDS_END - 1);
-}
-
-TEST_F(SyncableEnumConversionsTest, GetTimeFieldString) {
- TestEnumStringFunction(
- GetTimeFieldString, TIME_FIELDS_BEGIN, TIME_FIELDS_END - 1);
-}
-
-TEST_F(SyncableEnumConversionsTest, GetIdFieldString) {
- TestEnumStringFunction(
- GetIdFieldString, ID_FIELDS_BEGIN, ID_FIELDS_END - 1);
-}
-
-TEST_F(SyncableEnumConversionsTest, GetIndexedBitFieldString) {
- TestEnumStringFunction(
- GetIndexedBitFieldString, BIT_FIELDS_BEGIN, INDEXED_BIT_FIELDS_END - 1);
-}
-
-TEST_F(SyncableEnumConversionsTest, GetIsDelFieldString) {
- TestEnumStringFunction(
- GetIsDelFieldString, INDEXED_BIT_FIELDS_END, IS_DEL);
-}
-
-TEST_F(SyncableEnumConversionsTest, GetBitFieldString) {
- TestEnumStringFunction(
- GetBitFieldString, IS_DEL + 1, BIT_FIELDS_END - 1);
-}
-
-TEST_F(SyncableEnumConversionsTest, GetStringFieldString) {
- TestEnumStringFunction(
- GetStringFieldString, STRING_FIELDS_BEGIN, STRING_FIELDS_END - 1);
-}
-
-TEST_F(SyncableEnumConversionsTest, GetProtoFieldString) {
- TestEnumStringFunction(
- GetProtoFieldString, PROTO_FIELDS_BEGIN, PROTO_FIELDS_END - 1);
-}
-
-TEST_F(SyncableEnumConversionsTest, GetUniquePositionFieldString) {
- TestEnumStringFunction(
- GetUniquePositionFieldString,
- UNIQUE_POSITION_FIELDS_BEGIN, UNIQUE_POSITION_FIELDS_END - 1);
-}
-
-TEST_F(SyncableEnumConversionsTest, GetBitTempString) {
- TestEnumStringFunction(
- GetBitTempString, BIT_TEMPS_BEGIN, BIT_TEMPS_END - 1);
-}
-
-} // namespace
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/syncable_id.cc b/chromium/sync/syncable/syncable_id.cc
deleted file mode 100644
index 614fd3e128a..00000000000
--- a/chromium/sync/syncable/syncable_id.cc
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/syncable_id.h"
-
-#include <iosfwd>
-
-#include "base/values.h"
-
-using std::ostream;
-using std::string;
-
-namespace syncer {
-namespace syncable {
-
-ostream& operator<<(ostream& out, const Id& id) {
- out << id.s_;
- return out;
-}
-
-base::StringValue* Id::ToValue() const {
- return new base::StringValue(s_);
-}
-
-string Id::GetServerId() const {
- // Currently root is the string "0". We need to decide on a true value.
- // "" would be convenient here, as the IsRoot call would not be needed.
- if (IsRoot())
- return "0";
- return s_.substr(1);
-}
-
-Id Id::CreateFromServerId(const string& server_id) {
- Id id;
- if (server_id == "0")
- id.s_ = "r";
- else
- id.s_ = string("s") + server_id;
- return id;
-}
-
-Id Id::CreateFromClientString(const string& local_id) {
- Id id;
- if (local_id == "0")
- id.s_ = "r";
- else
- id.s_ = string("c") + local_id;
- return id;
-}
-
-Id Id::GetLexicographicSuccessor() const {
- // The successor of a string is given by appending the least
- // character in the alphabet.
- Id id = *this;
- id.s_.push_back(0);
- return id;
-}
-
-// static
-Id Id::GetLeastIdForLexicographicComparison() {
- Id id;
- id.s_.clear();
- return id;
-}
-
-Id GetNullId() {
- return Id(); // Currently == root.
-}
-
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/syncable_id.h b/chromium/sync/syncable/syncable_id.h
deleted file mode 100644
index bbfeb9325d7..00000000000
--- a/chromium/sync/syncable/syncable_id.h
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_SYNCABLE_ID_H_
-#define SYNC_SYNCABLE_SYNCABLE_ID_H_
-
-#include <iosfwd>
-#include <limits>
-#include <sstream>
-#include <string>
-
-#include "base/containers/hash_tables.h"
-#include "base/memory/scoped_ptr.h"
-#include "sync/base/sync_export.h"
-
-class MockConnectionManager;
-
-namespace base {
-class StringValue;
-}
-
-namespace sql {
-class Statement;
-}
-
-namespace syncer {
-namespace syncable {
-struct EntryKernel;
-class Id;
-
-SYNC_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& out, const Id& id);
-
-// For historical reasons, 3 concepts got everloaded into the Id:
-// 1. A unique, opaque identifier for the object.
-// 2. Flag specifing whether server know about this object.
-// 3. Flag for root.
-//
-// We originally wrapped an integer for this information, but now we use a
-// string. It will have one of three forms:
-// 1. c<client only opaque id> for client items that have not been committed.
-// 2. r for the root item.
-// 3. s<server provided opaque id> for items that the server knows about.
-class SYNC_EXPORT Id {
- public:
- // This constructor will be handy even when we move away from int64s, just
- // for unit tests.
- inline Id() : s_("r") { }
- inline Id(const Id& that) {
- Copy(that);
- }
- inline Id& operator = (const Id& that) {
- Copy(that);
- return *this;
- }
- inline void Copy(const Id& that) {
- this->s_ = that.s_;
- }
- inline bool IsRoot() const {
- return "r" == s_;
- }
- inline bool ServerKnows() const {
- return s_[0] == 's' || s_ == "r";
- }
-
- // TODO(sync): We could use null here, but to ease conversion we use "r".
- // fix this, this is madness :)
- inline bool IsNull() const {
- return IsRoot();
- }
- inline void Clear() {
- s_ = "r";
- }
- inline int compare(const Id& that) const {
- return s_.compare(that.s_);
- }
- inline bool operator == (const Id& that) const {
- return s_ == that.s_;
- }
- inline bool operator != (const Id& that) const {
- return s_ != that.s_;
- }
- inline bool operator < (const Id& that) const {
- return s_ < that.s_;
- }
- inline bool operator > (const Id& that) const {
- return s_ > that.s_;
- }
-
- const std::string& value() const {
- return s_;
- }
-
- // Return the next highest ID in the lexicographic ordering. This is
- // useful for computing upper bounds on std::sets that are ordered
- // by operator<.
- Id GetLexicographicSuccessor() const;
-
- // Dumps the ID as a value and returns it. Transfers ownership of
- // the StringValue to the caller.
- base::StringValue* ToValue() const;
-
- // Three functions are used to work with our proto buffers.
- std::string GetServerId() const;
- static Id CreateFromServerId(const std::string& server_id);
- // This should only be used if you get back a reference to a local
- // id from the server. Returns a client only opaque id.
- static Id CreateFromClientString(const std::string& local_id);
-
- // This method returns an ID that will compare less than any valid ID.
- // The returned ID is not a valid ID itself. This is useful for
- // computing lower bounds on std::sets that are ordered by operator<.
- static Id GetLeastIdForLexicographicComparison();
-
- private:
- friend scoped_ptr<EntryKernel> UnpackEntry(sql::Statement* statement);
- friend void BindFields(const EntryKernel& entry,
- sql::Statement* statement);
- SYNC_EXPORT_PRIVATE friend std::ostream& operator<<(std::ostream& out,
- const Id& id);
- friend class MockConnectionManager;
- friend class SyncableIdTest;
-
- std::string s_;
-};
-
-SYNC_EXPORT_PRIVATE Id GetNullId();
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_SYNCABLE_ID_H_
diff --git a/chromium/sync/syncable/syncable_id_unittest.cc b/chromium/sync/syncable/syncable_id_unittest.cc
deleted file mode 100644
index e0b014387fb..00000000000
--- a/chromium/sync/syncable/syncable_id_unittest.cc
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/syncable_id.h"
-
-#include <vector>
-
-#include "base/memory/scoped_ptr.h"
-#include "base/test/values_test_util.h"
-#include "base/values.h"
-#include "sync/test/engine/test_id_factory.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace syncable {
-
-using std::vector;
-
-class SyncableIdTest : public testing::Test { };
-
-TEST(SyncableIdTest, TestIDCreation) {
- vector<Id> v;
- v.push_back(TestIdFactory::FromNumber(5));
- v.push_back(TestIdFactory::FromNumber(1));
- v.push_back(TestIdFactory::FromNumber(-5));
- v.push_back(TestIdFactory::MakeLocal("A"));
- v.push_back(TestIdFactory::MakeLocal("B"));
- v.push_back(TestIdFactory::MakeServer("A"));
- v.push_back(TestIdFactory::MakeServer("B"));
- v.push_back(Id::CreateFromServerId("-5"));
- v.push_back(Id::CreateFromClientString("A"));
- v.push_back(Id::CreateFromServerId("A"));
-
- for (vector<Id>::iterator i = v.begin(); i != v.end(); ++i) {
- for (vector<Id>::iterator j = v.begin(); j != i; ++j) {
- ASSERT_NE(*i, *j) << "mis equated two distinct ids";
- }
- ASSERT_EQ(*i, *i) << "self-equality failed";
- Id copy1 = *i;
- Id copy2 = *i;
- ASSERT_EQ(copy1, copy2) << "equality after copy failed";
- }
-}
-
-TEST(SyncableIdTest, GetLeastIdForLexicographicComparison) {
- vector<Id> v;
- v.push_back(Id::CreateFromServerId("z5"));
- v.push_back(Id::CreateFromServerId("z55"));
- v.push_back(Id::CreateFromServerId("z6"));
- v.push_back(Id::CreateFromClientString("zA-"));
- v.push_back(Id::CreateFromClientString("zA--"));
- v.push_back(Id::CreateFromServerId("zA--"));
-
- for (int i = 0; i <= 255; ++i) {
- std::string one_character_id;
- one_character_id.push_back(i);
- v.push_back(Id::CreateFromClientString(one_character_id));
- }
-
- for (vector<Id>::iterator i = v.begin(); i != v.end(); ++i) {
- // The following looks redundant, but we're testing a custom operator<.
- ASSERT_LT(Id::GetLeastIdForLexicographicComparison(), *i);
- ASSERT_NE(*i, i->GetLexicographicSuccessor());
- ASSERT_NE(i->GetLexicographicSuccessor(), *i);
- ASSERT_LT(*i, i->GetLexicographicSuccessor());
- ASSERT_GT(i->GetLexicographicSuccessor(), *i);
- for (vector<Id>::iterator j = v.begin(); j != v.end(); ++j) {
- if (j == i)
- continue;
- if (*j < *i) {
- ASSERT_LT(j->GetLexicographicSuccessor(), *i);
- ASSERT_LT(j->GetLexicographicSuccessor(),
- i->GetLexicographicSuccessor());
- ASSERT_LT(*j, i->GetLexicographicSuccessor());
- } else {
- ASSERT_GT(j->GetLexicographicSuccessor(), *i);
- ASSERT_GT(j->GetLexicographicSuccessor(),
- i->GetLexicographicSuccessor());
- ASSERT_GT(*j, i->GetLexicographicSuccessor());
- }
- }
- }
-}
-
-TEST(SyncableIdTest, ToValue) {
- base::ExpectStringValue("r", Id::CreateFromServerId("0").ToValue());
- base::ExpectStringValue("svalue", Id::CreateFromServerId("value").ToValue());
-
- base::ExpectStringValue("r", Id::CreateFromClientString("0").ToValue());
- base::ExpectStringValue("cvalue",
- Id::CreateFromClientString("value").ToValue());
-}
-
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/syncable_model_neutral_write_transaction.cc b/chromium/sync/syncable/syncable_model_neutral_write_transaction.cc
deleted file mode 100644
index 9aaf7400726..00000000000
--- a/chromium/sync/syncable/syncable_model_neutral_write_transaction.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/syncable_model_neutral_write_transaction.h"
-
-#include "sync/syncable/directory.h"
-
-namespace syncer {
-namespace syncable {
-
-ModelNeutralWriteTransaction::ModelNeutralWriteTransaction(
- const tracked_objects::Location& location,
- WriterTag writer, Directory* directory)
- : BaseWriteTransaction(location,
- "ModelNeutralWriteTransaction",
- writer,
- directory) {
- Lock();
-}
-
-ModelNeutralWriteTransaction::~ModelNeutralWriteTransaction() {
- directory()->CheckInvariantsOnTransactionClose(this, modified_handles_);
- HandleUnrecoverableErrorIfSet();
- Unlock();
-}
-
-void ModelNeutralWriteTransaction::TrackChangesTo(const EntryKernel* entry) {
- modified_handles_.insert(entry->ref(META_HANDLE));
-}
-
-} // namespace syncer
-} // namespace syncable
diff --git a/chromium/sync/syncable/syncable_model_neutral_write_transaction.h b/chromium/sync/syncable/syncable_model_neutral_write_transaction.h
deleted file mode 100644
index f96725ed69f..00000000000
--- a/chromium/sync/syncable/syncable_model_neutral_write_transaction.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_SYNCABLE_MODEL_NEUTRAL_WRITE_TRANSACTION_H_
-#define SYNC_SYNCABLE_SYNCABLE_MODEL_NEUTRAL_WRITE_TRANSACTION_H_
-
-#include "sync/base/sync_export.h"
-#include "sync/syncable/metahandle_set.h"
-#include "sync/syncable/syncable_base_write_transaction.h"
-
-namespace syncer {
-namespace syncable {
-
-// A transaction used to instantiate Entries or ModelNeutralMutableEntries.
-//
-// This allows it to be used when making changes to sync entity properties that
-// do not need to be kept in sync with the associated native model.
-//
-// This class differs internally from WriteTransactions in that it does a less
-// good job of tracking and reporting on changes to the entries modified within
-// its scope. This is because its changes do not need to be reported to the
-// DirectoryChangeDelegate.
-class SYNC_EXPORT_PRIVATE ModelNeutralWriteTransaction
- : public BaseWriteTransaction {
- public:
- ModelNeutralWriteTransaction(
- const tracked_objects::Location& location,
- WriterTag writer,
- Directory* directory);
- virtual ~ModelNeutralWriteTransaction();
-
- virtual void TrackChangesTo(const EntryKernel* entry) OVERRIDE;
-
- private:
- MetahandleSet modified_handles_;
-
- DISALLOW_COPY_AND_ASSIGN(ModelNeutralWriteTransaction);
-};
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_SYNCABLE_MODEL_NEUTRAL_WRITE_TRANSACTION_H_
diff --git a/chromium/sync/syncable/syncable_proto_util.cc b/chromium/sync/syncable/syncable_proto_util.cc
deleted file mode 100644
index 4f35b1930e5..00000000000
--- a/chromium/sync/syncable/syncable_proto_util.cc
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/syncable_proto_util.h"
-
-#include "sync/protocol/sync.pb.h"
-
-namespace syncer {
-
-syncable::Id SyncableIdFromProto(const std::string& proto_string) {
- return syncable::Id::CreateFromServerId(proto_string);
-}
-
-std::string SyncableIdToProto(const syncable::Id& syncable_id) {
- return syncable_id.GetServerId();
-}
-
-bool IsFolder(const sync_pb::SyncEntity& entity) {
- // TODO(sync): The checks for has_folder() and has_bookmarkdata() are likely
- // no longer necessary. We should remove them if we can convince ourselves
- // that doing so won't break anything.
- return ((entity.has_folder() && entity.folder()) ||
- (entity.has_bookmarkdata() &&
- entity.bookmarkdata().bookmark_folder()));
-}
-
-bool IsRoot(const sync_pb::SyncEntity& entity) {
- return SyncableIdFromProto(entity.id_string()).IsRoot();
-}
-
-} // namespace syncer
diff --git a/chromium/sync/syncable/syncable_proto_util.h b/chromium/sync/syncable/syncable_proto_util.h
deleted file mode 100644
index 305fc4cc8f6..00000000000
--- a/chromium/sync/syncable/syncable_proto_util.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNCABLE_PROTOCOL_PROTO_UTIL_H_
-#define SYNCABLE_PROTOCOL_PROTO_UTIL_H_
-
-#include <string>
-
-#include "sync/base/sync_export.h"
-#include "sync/syncable/syncable_id.h"
-
-namespace sync_pb {
-class SyncEntity;
-}
-
-namespace syncer {
-
-// Converts from a specially formatted string field to a syncable::Id. Used
-// when interpreting the fields of protocol buffers received from the server.
-syncable::Id SyncableIdFromProto(const std::string& proto_string);
-
-// Converts from a syncable::Id to a formatted std::string. This is useful for
-// populating the fields of a protobuf which will be sent to the server.
-SYNC_EXPORT_PRIVATE std::string SyncableIdToProto(
- const syncable::Id& syncable_id);
-
-// Helper function to determine if this SyncEntity's properties indicate that it
-// is a folder.
-bool IsFolder(const sync_pb::SyncEntity& entity);
-
-// Helper function to determine if this SyncEntity's properties indicate that it
-// is the root node.
-bool IsRoot(const sync_pb::SyncEntity& entity);
-
-} // namespace syncer
-
-#endif // SYNCABLE_PROTOCOL_PROTO_UTIL_H_
diff --git a/chromium/sync/syncable/syncable_read_transaction.cc b/chromium/sync/syncable/syncable_read_transaction.cc
deleted file mode 100644
index fcdb77e21f9..00000000000
--- a/chromium/sync/syncable/syncable_read_transaction.cc
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/syncable_read_transaction.h"
-
-namespace syncer {
-namespace syncable {
-
-ReadTransaction::ReadTransaction(const tracked_objects::Location& location,
- Directory* directory)
- : BaseTransaction(location, "ReadTransaction", INVALID, directory) {
- Lock();
-}
-
-ReadTransaction::~ReadTransaction() {
- HandleUnrecoverableErrorIfSet();
- Unlock();
-}
-
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/syncable_read_transaction.h b/chromium/sync/syncable/syncable_read_transaction.h
deleted file mode 100644
index 2b9729d4662..00000000000
--- a/chromium/sync/syncable/syncable_read_transaction.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_SYNCABLE_READ_TRANSACTION_H_
-#define SYNC_SYNCABLE_SYNCABLE_READ_TRANSACTION_H_
-
-#include "sync/base/sync_export.h"
-#include "sync/syncable/syncable_base_transaction.h"
-
-namespace syncer {
-class ReadTransaction;
-namespace syncable {
-
-// Locks db in constructor, unlocks in destructor.
-class SYNC_EXPORT ReadTransaction : public BaseTransaction {
- public:
- ReadTransaction(const tracked_objects::Location& from_here,
- Directory* directory);
-
- virtual ~ReadTransaction();
-
- protected: // Don't allow creation on heap, except by sync API wrapper.
- friend class syncer::ReadTransaction;
- void* operator new(size_t size) { return (::operator new)(size); }
-
- DISALLOW_COPY_AND_ASSIGN(ReadTransaction);
-};
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_SYNCABLE_READ_TRANSACTION_H_
diff --git a/chromium/sync/syncable/syncable_unittest.cc b/chromium/sync/syncable/syncable_unittest.cc
deleted file mode 100644
index cfcc2db92d2..00000000000
--- a/chromium/sync/syncable/syncable_unittest.cc
+++ /dev/null
@@ -1,2303 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/file_util.h"
-#include "base/files/file_path.h"
-#include "base/files/scoped_temp_dir.h"
-#include "base/location.h"
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/message_loop/message_loop.h"
-#include "base/stl_util.h"
-#include "base/strings/stringprintf.h"
-#include "base/synchronization/condition_variable.h"
-#include "base/test/values_test_util.h"
-#include "base/threading/platform_thread.h"
-#include "base/values.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/syncable/directory_backing_store.h"
-#include "sync/syncable/directory_change_delegate.h"
-#include "sync/syncable/in_memory_directory_backing_store.h"
-#include "sync/syncable/metahandle_set.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/on_disk_directory_backing_store.h"
-#include "sync/syncable/syncable_proto_util.h"
-#include "sync/syncable/syncable_read_transaction.h"
-#include "sync/syncable/syncable_util.h"
-#include "sync/syncable/syncable_write_transaction.h"
-#include "sync/test/engine/test_id_factory.h"
-#include "sync/test/engine/test_syncable_utils.h"
-#include "sync/test/fake_encryptor.h"
-#include "sync/test/null_directory_change_delegate.h"
-#include "sync/test/null_transaction_observer.h"
-#include "sync/util/test_unrecoverable_error_handler.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace syncable {
-
-using base::ExpectDictBooleanValue;
-using base::ExpectDictStringValue;
-
-class SyncableKernelTest : public testing::Test {};
-
-// TODO(akalin): Add unit tests for EntryKernel::ContainsString().
-
-TEST_F(SyncableKernelTest, ToValue) {
- EntryKernel kernel;
- scoped_ptr<base::DictionaryValue> value(kernel.ToValue(NULL));
- if (value) {
- // Not much to check without repeating the ToValue() code.
- EXPECT_TRUE(value->HasKey("isDirty"));
- // The extra +2 is for "isDirty" and "serverModelType".
- EXPECT_EQ(BIT_TEMPS_END - BEGIN_FIELDS + 2,
- static_cast<int>(value->size()));
- } else {
- ADD_FAILURE();
- }
-}
-
-namespace {
-void PutDataAsBookmarkFavicon(WriteTransaction* wtrans,
- MutableEntry* e,
- const char* bytes,
- size_t bytes_length) {
- sync_pb::EntitySpecifics specifics;
- specifics.mutable_bookmark()->set_url("http://demo/");
- specifics.mutable_bookmark()->set_favicon(bytes, bytes_length);
- e->PutSpecifics(specifics);
-}
-
-void ExpectDataFromBookmarkFaviconEquals(BaseTransaction* trans,
- Entry* e,
- const char* bytes,
- size_t bytes_length) {
- ASSERT_TRUE(e->good());
- ASSERT_TRUE(e->GetSpecifics().has_bookmark());
- ASSERT_EQ("http://demo/", e->GetSpecifics().bookmark().url());
- ASSERT_EQ(std::string(bytes, bytes_length),
- e->GetSpecifics().bookmark().favicon());
-}
-} // namespace
-
-class SyncableGeneralTest : public testing::Test {
- public:
- static const char kIndexTestName[];
- virtual void SetUp() {
- ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
- db_path_ = temp_dir_.path().Append(
- FILE_PATH_LITERAL("SyncableTest.sqlite3"));
- }
-
- virtual void TearDown() {
- }
- protected:
- base::MessageLoop message_loop_;
- base::ScopedTempDir temp_dir_;
- NullDirectoryChangeDelegate delegate_;
- FakeEncryptor encryptor_;
- TestUnrecoverableErrorHandler handler_;
- base::FilePath db_path_;
-};
-
-const char SyncableGeneralTest::kIndexTestName[] = "IndexTest";
-
-TEST_F(SyncableGeneralTest, General) {
- Directory dir(new InMemoryDirectoryBackingStore("SimpleTest"),
- &handler_,
- NULL,
- NULL,
- NULL);
-
- ASSERT_EQ(OPENED, dir.Open(
- "SimpleTest", &delegate_, NullTransactionObserver()));
-
- int64 root_metahandle;
- {
- ReadTransaction rtrans(FROM_HERE, &dir);
- Entry e(&rtrans, GET_BY_ID, rtrans.root_id());
- ASSERT_TRUE(e.good());
- root_metahandle = e.GetMetahandle();
- }
-
- int64 written_metahandle;
- const Id id = TestIdFactory::FromNumber(99);
- std::string name = "Jeff";
- // Test simple read operations on an empty DB.
- {
- ReadTransaction rtrans(FROM_HERE, &dir);
- Entry e(&rtrans, GET_BY_ID, id);
- ASSERT_FALSE(e.good()); // Hasn't been written yet.
-
- Directory::Metahandles child_handles;
- dir.GetChildHandlesById(&rtrans, rtrans.root_id(), &child_handles);
- EXPECT_TRUE(child_handles.empty());
-
- dir.GetChildHandlesByHandle(&rtrans, root_metahandle, &child_handles);
- EXPECT_TRUE(child_handles.empty());
- }
-
- // Test creating a new meta entry.
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, &dir);
- MutableEntry me(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), name);
- ASSERT_TRUE(me.good());
- me.PutId(id);
- me.PutBaseVersion(1);
- written_metahandle = me.GetMetahandle();
- }
-
- // Test GetChildHandles* after something is now in the DB.
- // Also check that GET_BY_ID works.
- {
- ReadTransaction rtrans(FROM_HERE, &dir);
- Entry e(&rtrans, GET_BY_ID, id);
- ASSERT_TRUE(e.good());
-
- Directory::Metahandles child_handles;
- dir.GetChildHandlesById(&rtrans, rtrans.root_id(), &child_handles);
- EXPECT_EQ(1u, child_handles.size());
-
- for (Directory::Metahandles::iterator i = child_handles.begin();
- i != child_handles.end(); ++i) {
- EXPECT_EQ(*i, written_metahandle);
- }
-
- dir.GetChildHandlesByHandle(&rtrans, root_metahandle, &child_handles);
- EXPECT_EQ(1u, child_handles.size());
-
- for (Directory::Metahandles::iterator i = child_handles.begin();
- i != child_handles.end(); ++i) {
- EXPECT_EQ(*i, written_metahandle);
- }
- }
-
- // Test writing data to an entity. Also check that GET_BY_HANDLE works.
- static const char s[] = "Hello World.";
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, &dir);
- MutableEntry e(&trans, GET_BY_HANDLE, written_metahandle);
- ASSERT_TRUE(e.good());
- PutDataAsBookmarkFavicon(&trans, &e, s, sizeof(s));
- }
-
- // Test reading back the contents that we just wrote.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, &dir);
- MutableEntry e(&trans, GET_BY_HANDLE, written_metahandle);
- ASSERT_TRUE(e.good());
- ExpectDataFromBookmarkFaviconEquals(&trans, &e, s, sizeof(s));
- }
-
- // Verify it exists in the folder.
- {
- ReadTransaction rtrans(FROM_HERE, &dir);
- EXPECT_EQ(1, CountEntriesWithName(&rtrans, rtrans.root_id(), name));
- }
-
- // Now delete it.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, &dir);
- MutableEntry e(&trans, GET_BY_HANDLE, written_metahandle);
- e.PutIsDel(true);
-
- EXPECT_EQ(0, CountEntriesWithName(&trans, trans.root_id(), name));
- }
-
- dir.SaveChanges();
-}
-
-TEST_F(SyncableGeneralTest, ChildrenOps) {
- Directory dir(new InMemoryDirectoryBackingStore("SimpleTest"),
- &handler_,
- NULL,
- NULL,
- NULL);
- ASSERT_EQ(OPENED, dir.Open(
- "SimpleTest", &delegate_, NullTransactionObserver()));
-
- int64 written_metahandle;
- const Id id = TestIdFactory::FromNumber(99);
- std::string name = "Jeff";
- {
- ReadTransaction rtrans(FROM_HERE, &dir);
- Entry e(&rtrans, GET_BY_ID, id);
- ASSERT_FALSE(e.good()); // Hasn't been written yet.
-
- Entry root(&rtrans, GET_BY_ID, rtrans.root_id());
- ASSERT_TRUE(root.good());
- EXPECT_FALSE(dir.HasChildren(&rtrans, rtrans.root_id()));
- EXPECT_TRUE(root.GetFirstChildId().IsRoot());
- }
-
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, &dir);
- MutableEntry me(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), name);
- ASSERT_TRUE(me.good());
- me.PutId(id);
- me.PutBaseVersion(1);
- written_metahandle = me.GetMetahandle();
- }
-
- // Test children ops after something is now in the DB.
- {
- ReadTransaction rtrans(FROM_HERE, &dir);
- Entry e(&rtrans, GET_BY_ID, id);
- ASSERT_TRUE(e.good());
-
- Entry child(&rtrans, GET_BY_HANDLE, written_metahandle);
- ASSERT_TRUE(child.good());
-
- Entry root(&rtrans, GET_BY_ID, rtrans.root_id());
- ASSERT_TRUE(root.good());
- EXPECT_TRUE(dir.HasChildren(&rtrans, rtrans.root_id()));
- EXPECT_EQ(e.GetId(), root.GetFirstChildId());
- }
-
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, &dir);
- MutableEntry me(&wtrans, GET_BY_HANDLE, written_metahandle);
- ASSERT_TRUE(me.good());
- me.PutIsDel(true);
- }
-
- // Test children ops after the children have been deleted.
- {
- ReadTransaction rtrans(FROM_HERE, &dir);
- Entry e(&rtrans, GET_BY_ID, id);
- ASSERT_TRUE(e.good());
-
- Entry root(&rtrans, GET_BY_ID, rtrans.root_id());
- ASSERT_TRUE(root.good());
- EXPECT_FALSE(dir.HasChildren(&rtrans, rtrans.root_id()));
- EXPECT_TRUE(root.GetFirstChildId().IsRoot());
- }
-
- dir.SaveChanges();
-}
-
-TEST_F(SyncableGeneralTest, ClientIndexRebuildsProperly) {
- int64 written_metahandle;
- TestIdFactory factory;
- const Id id = factory.NewServerId();
- std::string name = "cheesepuffs";
- std::string tag = "dietcoke";
-
- // Test creating a new meta entry.
- {
- Directory dir(new OnDiskDirectoryBackingStore(kIndexTestName, db_path_),
- &handler_,
- NULL,
- NULL,
- NULL);
- ASSERT_EQ(OPENED, dir.Open(kIndexTestName, &delegate_,
- NullTransactionObserver()));
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, &dir);
- MutableEntry me(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), name);
- ASSERT_TRUE(me.good());
- me.PutId(id);
- me.PutBaseVersion(1);
- me.PutUniqueClientTag(tag);
- written_metahandle = me.GetMetahandle();
- }
- dir.SaveChanges();
- }
-
- // The DB was closed. Now reopen it. This will cause index regeneration.
- {
- Directory dir(new OnDiskDirectoryBackingStore(kIndexTestName, db_path_),
- &handler_,
- NULL,
- NULL,
- NULL);
- ASSERT_EQ(OPENED, dir.Open(kIndexTestName,
- &delegate_, NullTransactionObserver()));
-
- ReadTransaction trans(FROM_HERE, &dir);
- Entry me(&trans, GET_BY_CLIENT_TAG, tag);
- ASSERT_TRUE(me.good());
- EXPECT_EQ(me.GetId(), id);
- EXPECT_EQ(me.GetBaseVersion(), 1);
- EXPECT_EQ(me.GetUniqueClientTag(), tag);
- EXPECT_EQ(me.GetMetahandle(), written_metahandle);
- }
-}
-
-TEST_F(SyncableGeneralTest, ClientIndexRebuildsDeletedProperly) {
- TestIdFactory factory;
- const Id id = factory.NewServerId();
- std::string tag = "dietcoke";
-
- // Test creating a deleted, unsynced, server meta entry.
- {
- Directory dir(new OnDiskDirectoryBackingStore(kIndexTestName, db_path_),
- &handler_,
- NULL,
- NULL,
- NULL);
- ASSERT_EQ(OPENED, dir.Open(kIndexTestName, &delegate_,
- NullTransactionObserver()));
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, &dir);
- MutableEntry me(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "deleted");
- ASSERT_TRUE(me.good());
- me.PutId(id);
- me.PutBaseVersion(1);
- me.PutUniqueClientTag(tag);
- me.PutIsDel(true);
- me.PutIsUnsynced(true); // Or it might be purged.
- }
- dir.SaveChanges();
- }
-
- // The DB was closed. Now reopen it. This will cause index regeneration.
- // Should still be present and valid in the client tag index.
- {
- Directory dir(new OnDiskDirectoryBackingStore(kIndexTestName, db_path_),
- &handler_,
- NULL,
- NULL,
- NULL);
- ASSERT_EQ(OPENED, dir.Open(kIndexTestName, &delegate_,
- NullTransactionObserver()));
-
- ReadTransaction trans(FROM_HERE, &dir);
- Entry me(&trans, GET_BY_CLIENT_TAG, tag);
- ASSERT_TRUE(me.good());
- EXPECT_EQ(me.GetId(), id);
- EXPECT_EQ(me.GetUniqueClientTag(), tag);
- EXPECT_TRUE(me.GetIsDel());
- EXPECT_TRUE(me.GetIsUnsynced());
- }
-}
-
-TEST_F(SyncableGeneralTest, ToValue) {
- Directory dir(new InMemoryDirectoryBackingStore("SimpleTest"),
- &handler_,
- NULL,
- NULL,
- NULL);
- ASSERT_EQ(OPENED, dir.Open(
- "SimpleTest", &delegate_, NullTransactionObserver()));
-
- const Id id = TestIdFactory::FromNumber(99);
- {
- ReadTransaction rtrans(FROM_HERE, &dir);
- Entry e(&rtrans, GET_BY_ID, id);
- EXPECT_FALSE(e.good()); // Hasn't been written yet.
-
- scoped_ptr<base::DictionaryValue> value(e.ToValue(NULL));
- ExpectDictBooleanValue(false, *value, "good");
- EXPECT_EQ(1u, value->size());
- }
-
- // Test creating a new meta entry.
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, &dir);
- MutableEntry me(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "new");
- ASSERT_TRUE(me.good());
- me.PutId(id);
- me.PutBaseVersion(1);
-
- scoped_ptr<base::DictionaryValue> value(me.ToValue(NULL));
- ExpectDictBooleanValue(true, *value, "good");
- EXPECT_TRUE(value->HasKey("kernel"));
- ExpectDictStringValue("Bookmarks", *value, "modelType");
- ExpectDictBooleanValue(true, *value, "existsOnClientBecauseNameIsNonEmpty");
- ExpectDictBooleanValue(false, *value, "isRoot");
- }
-
- dir.SaveChanges();
-}
-
-// Test that the bookmark tag generation algorithm remains unchanged.
-TEST_F(SyncableGeneralTest, BookmarkTagTest) {
- InMemoryDirectoryBackingStore* store = new InMemoryDirectoryBackingStore("x");
-
- // The two inputs that form the bookmark tag are the directory's cache_guid
- // and its next_id value. We don't need to take any action to ensure
- // consistent next_id values, but we do need to explicitly request that our
- // InMemoryDirectoryBackingStore always return the same cache_guid.
- store->request_consistent_cache_guid();
-
- Directory dir(store, &handler_, NULL, NULL, NULL);
- ASSERT_EQ(OPENED, dir.Open("x", &delegate_, NullTransactionObserver()));
-
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, &dir);
- MutableEntry bm(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "bm");
- bm.PutIsUnsynced(true);
-
- // If this assertion fails, that might indicate that the algorithm used to
- // generate bookmark tags has been modified. This could have implications
- // for bookmark ordering. Please make sure you know what you're doing if
- // you intend to make such a change.
- ASSERT_EQ("6wHRAb3kbnXV5GHrejp4/c1y5tw=", bm.GetUniqueBookmarkTag());
- }
-}
-
-// A test fixture for syncable::Directory. Uses an in-memory database to keep
-// the unit tests fast.
-class SyncableDirectoryTest : public testing::Test {
- protected:
- base::MessageLoop message_loop_;
- static const char kName[];
-
- virtual void SetUp() {
- dir_.reset(new Directory(new InMemoryDirectoryBackingStore(kName),
- &handler_,
- NULL,
- NULL,
- NULL));
- ASSERT_TRUE(dir_.get());
- ASSERT_EQ(OPENED, dir_->Open(kName, &delegate_,
- NullTransactionObserver()));
- ASSERT_TRUE(dir_->good());
- }
-
- virtual void TearDown() {
- if (dir_)
- dir_->SaveChanges();
- dir_.reset();
- }
-
- void GetAllMetaHandles(BaseTransaction* trans, MetahandleSet* result) {
- dir_->GetAllMetaHandles(trans, result);
- }
-
- bool IsInDirtyMetahandles(int64 metahandle) {
- return 1 == dir_->kernel_->dirty_metahandles.count(metahandle);
- }
-
- bool IsInMetahandlesToPurge(int64 metahandle) {
- return 1 == dir_->kernel_->metahandles_to_purge.count(metahandle);
- }
-
- void CheckPurgeEntriesWithTypeInSucceeded(ModelTypeSet types_to_purge,
- bool before_reload) {
- SCOPED_TRACE(testing::Message("Before reload: ") << before_reload);
- {
- ReadTransaction trans(FROM_HERE, dir_.get());
- MetahandleSet all_set;
- dir_->GetAllMetaHandles(&trans, &all_set);
- EXPECT_EQ(4U, all_set.size());
- if (before_reload)
- EXPECT_EQ(6U, dir_->kernel_->metahandles_to_purge.size());
- for (MetahandleSet::iterator iter = all_set.begin();
- iter != all_set.end(); ++iter) {
- Entry e(&trans, GET_BY_HANDLE, *iter);
- const ModelType local_type = e.GetModelType();
- const ModelType server_type = e.GetServerModelType();
-
- // Note the dance around incrementing |it|, since we sometimes erase().
- if ((IsRealDataType(local_type) &&
- types_to_purge.Has(local_type)) ||
- (IsRealDataType(server_type) &&
- types_to_purge.Has(server_type))) {
- FAIL() << "Illegal type should have been deleted.";
- }
- }
- }
-
- for (ModelTypeSet::Iterator it = types_to_purge.First();
- it.Good(); it.Inc()) {
- EXPECT_FALSE(dir_->InitialSyncEndedForType(it.Get()));
- }
- EXPECT_FALSE(types_to_purge.Has(BOOKMARKS));
- EXPECT_TRUE(dir_->InitialSyncEndedForType(BOOKMARKS));
- }
-
- FakeEncryptor encryptor_;
- TestUnrecoverableErrorHandler handler_;
- scoped_ptr<Directory> dir_;
- NullDirectoryChangeDelegate delegate_;
-
- // Creates an empty entry and sets the ID field to a default one.
- void CreateEntry(const std::string& entryname) {
- CreateEntry(entryname, TestIdFactory::FromNumber(-99));
- }
-
- // Creates an empty entry and sets the ID field to id.
- void CreateEntry(const std::string& entryname, const int id) {
- CreateEntry(entryname, TestIdFactory::FromNumber(id));
- }
- void CreateEntry(const std::string& entryname, Id id) {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry me(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), entryname);
- ASSERT_TRUE(me.good());
- me.PutId(id);
- me.PutIsUnsynced(true);
- }
-
- void ValidateEntry(BaseTransaction* trans,
- int64 id,
- bool check_name,
- const std::string& name,
- int64 base_version,
- int64 server_version,
- bool is_del);
-
- // When a directory is saved then loaded from disk, it will pass through
- // DropDeletedEntries(). This will remove some entries from the directory.
- // This function is intended to simulate that process.
- //
- // WARNING: The directory will be deleted by this operation. You should
- // not have any pointers to the directory (open transactions included)
- // when you call this.
- DirOpenResult SimulateSaveAndReloadDir();
-
- // This function will close and re-open the directory without saving any
- // pending changes. This is intended to simulate the recovery from a crash
- // scenario. The same warnings for SimulateSaveAndReloadDir apply here.
- DirOpenResult SimulateCrashAndReloadDir();
-
- private:
- // A helper function for Simulate{Save,Crash}AndReloadDir.
- DirOpenResult ReloadDirImpl();
-};
-
-TEST_F(SyncableDirectoryTest, TakeSnapshotGetsMetahandlesToPurge) {
- const int metas_to_create = 50;
- MetahandleSet expected_purges;
- MetahandleSet all_handles;
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- for (int i = 0; i < metas_to_create; i++) {
- MutableEntry e(&trans, CREATE, BOOKMARKS, trans.root_id(), "foo");
- e.PutIsUnsynced(true);
- sync_pb::EntitySpecifics specs;
- if (i % 2 == 0) {
- AddDefaultFieldValue(BOOKMARKS, &specs);
- expected_purges.insert(e.GetMetahandle());
- all_handles.insert(e.GetMetahandle());
- } else {
- AddDefaultFieldValue(PREFERENCES, &specs);
- all_handles.insert(e.GetMetahandle());
- }
- e.PutSpecifics(specs);
- e.PutServerSpecifics(specs);
- }
- }
-
- ModelTypeSet to_purge(BOOKMARKS);
- dir_->PurgeEntriesWithTypeIn(to_purge, ModelTypeSet(), ModelTypeSet());
-
- Directory::SaveChangesSnapshot snapshot1;
- base::AutoLock scoped_lock(dir_->kernel_->save_changes_mutex);
- dir_->TakeSnapshotForSaveChanges(&snapshot1);
- EXPECT_TRUE(expected_purges == snapshot1.metahandles_to_purge);
-
- to_purge.Clear();
- to_purge.Put(PREFERENCES);
- dir_->PurgeEntriesWithTypeIn(to_purge, ModelTypeSet(), ModelTypeSet());
-
- dir_->HandleSaveChangesFailure(snapshot1);
-
- Directory::SaveChangesSnapshot snapshot2;
- dir_->TakeSnapshotForSaveChanges(&snapshot2);
- EXPECT_TRUE(all_handles == snapshot2.metahandles_to_purge);
-}
-
-TEST_F(SyncableDirectoryTest, TakeSnapshotGetsAllDirtyHandlesTest) {
- const int metahandles_to_create = 100;
- std::vector<int64> expected_dirty_metahandles;
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- for (int i = 0; i < metahandles_to_create; i++) {
- MutableEntry e(&trans, CREATE, BOOKMARKS, trans.root_id(), "foo");
- expected_dirty_metahandles.push_back(e.GetMetahandle());
- e.PutIsUnsynced(true);
- }
- }
- // Fake SaveChanges() and make sure we got what we expected.
- {
- Directory::SaveChangesSnapshot snapshot;
- base::AutoLock scoped_lock(dir_->kernel_->save_changes_mutex);
- dir_->TakeSnapshotForSaveChanges(&snapshot);
- // Make sure there's an entry for each new metahandle. Make sure all
- // entries are marked dirty.
- ASSERT_EQ(expected_dirty_metahandles.size(), snapshot.dirty_metas.size());
- for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
- i != snapshot.dirty_metas.end(); ++i) {
- ASSERT_TRUE((*i)->is_dirty());
- }
- dir_->VacuumAfterSaveChanges(snapshot);
- }
- // Put a new value with existing transactions as well as adding new ones.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- std::vector<int64> new_dirty_metahandles;
- for (std::vector<int64>::const_iterator i =
- expected_dirty_metahandles.begin();
- i != expected_dirty_metahandles.end(); ++i) {
- // Change existing entries to directories to dirty them.
- MutableEntry e1(&trans, GET_BY_HANDLE, *i);
- e1.PutIsDir(true);
- e1.PutIsUnsynced(true);
- // Add new entries
- MutableEntry e2(&trans, CREATE, BOOKMARKS, trans.root_id(), "bar");
- e2.PutIsUnsynced(true);
- new_dirty_metahandles.push_back(e2.GetMetahandle());
- }
- expected_dirty_metahandles.insert(expected_dirty_metahandles.end(),
- new_dirty_metahandles.begin(), new_dirty_metahandles.end());
- }
- // Fake SaveChanges() and make sure we got what we expected.
- {
- Directory::SaveChangesSnapshot snapshot;
- base::AutoLock scoped_lock(dir_->kernel_->save_changes_mutex);
- dir_->TakeSnapshotForSaveChanges(&snapshot);
- // Make sure there's an entry for each new metahandle. Make sure all
- // entries are marked dirty.
- EXPECT_EQ(expected_dirty_metahandles.size(), snapshot.dirty_metas.size());
- for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
- i != snapshot.dirty_metas.end(); ++i) {
- EXPECT_TRUE((*i)->is_dirty());
- }
- dir_->VacuumAfterSaveChanges(snapshot);
- }
-}
-
-TEST_F(SyncableDirectoryTest, TakeSnapshotGetsOnlyDirtyHandlesTest) {
- const int metahandles_to_create = 100;
-
- // half of 2 * metahandles_to_create
- const unsigned int number_changed = 100u;
- std::vector<int64> expected_dirty_metahandles;
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- for (int i = 0; i < metahandles_to_create; i++) {
- MutableEntry e(&trans, CREATE, BOOKMARKS, trans.root_id(), "foo");
- expected_dirty_metahandles.push_back(e.GetMetahandle());
- e.PutIsUnsynced(true);
- }
- }
- dir_->SaveChanges();
- // Put a new value with existing transactions as well as adding new ones.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- std::vector<int64> new_dirty_metahandles;
- for (std::vector<int64>::const_iterator i =
- expected_dirty_metahandles.begin();
- i != expected_dirty_metahandles.end(); ++i) {
- // Change existing entries to directories to dirty them.
- MutableEntry e1(&trans, GET_BY_HANDLE, *i);
- ASSERT_TRUE(e1.good());
- e1.PutIsDir(true);
- e1.PutIsUnsynced(true);
- // Add new entries
- MutableEntry e2(&trans, CREATE, BOOKMARKS, trans.root_id(), "bar");
- e2.PutIsUnsynced(true);
- new_dirty_metahandles.push_back(e2.GetMetahandle());
- }
- expected_dirty_metahandles.insert(expected_dirty_metahandles.end(),
- new_dirty_metahandles.begin(), new_dirty_metahandles.end());
- }
- dir_->SaveChanges();
- // Don't make any changes whatsoever and ensure nothing comes back.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- for (std::vector<int64>::const_iterator i =
- expected_dirty_metahandles.begin();
- i != expected_dirty_metahandles.end(); ++i) {
- MutableEntry e(&trans, GET_BY_HANDLE, *i);
- ASSERT_TRUE(e.good());
- // We aren't doing anything to dirty these entries.
- }
- }
- // Fake SaveChanges() and make sure we got what we expected.
- {
- Directory::SaveChangesSnapshot snapshot;
- base::AutoLock scoped_lock(dir_->kernel_->save_changes_mutex);
- dir_->TakeSnapshotForSaveChanges(&snapshot);
- // Make sure there are no dirty_metahandles.
- EXPECT_EQ(0u, snapshot.dirty_metas.size());
- dir_->VacuumAfterSaveChanges(snapshot);
- }
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- bool should_change = false;
- for (std::vector<int64>::const_iterator i =
- expected_dirty_metahandles.begin();
- i != expected_dirty_metahandles.end(); ++i) {
- // Maybe change entries by flipping IS_DIR.
- MutableEntry e(&trans, GET_BY_HANDLE, *i);
- ASSERT_TRUE(e.good());
- should_change = !should_change;
- if (should_change) {
- bool not_dir = !e.GetIsDir();
- e.PutIsDir(not_dir);
- e.PutIsUnsynced(true);
- }
- }
- }
- // Fake SaveChanges() and make sure we got what we expected.
- {
- Directory::SaveChangesSnapshot snapshot;
- base::AutoLock scoped_lock(dir_->kernel_->save_changes_mutex);
- dir_->TakeSnapshotForSaveChanges(&snapshot);
- // Make sure there's an entry for each changed metahandle. Make sure all
- // entries are marked dirty.
- EXPECT_EQ(number_changed, snapshot.dirty_metas.size());
- for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
- i != snapshot.dirty_metas.end(); ++i) {
- EXPECT_TRUE((*i)->is_dirty());
- }
- dir_->VacuumAfterSaveChanges(snapshot);
- }
-}
-
-// Test delete journals management.
-TEST_F(SyncableDirectoryTest, ManageDeleteJournals) {
- sync_pb::EntitySpecifics bookmark_specifics;
- AddDefaultFieldValue(BOOKMARKS, &bookmark_specifics);
- bookmark_specifics.mutable_bookmark()->set_url("url");
-
- Id id1 = TestIdFactory::FromNumber(-1);
- Id id2 = TestIdFactory::FromNumber(-2);
- int64 handle1 = 0;
- int64 handle2 = 0;
- {
- // Create two bookmark entries and save in database.
- CreateEntry("item1", id1);
- CreateEntry("item2", id2);
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry item1(&trans, GET_BY_ID, id1);
- ASSERT_TRUE(item1.good());
- handle1 = item1.GetMetahandle();
- item1.PutSpecifics(bookmark_specifics);
- item1.PutServerSpecifics(bookmark_specifics);
- MutableEntry item2(&trans, GET_BY_ID, id2);
- ASSERT_TRUE(item2.good());
- handle2 = item2.GetMetahandle();
- item2.PutSpecifics(bookmark_specifics);
- item2.PutServerSpecifics(bookmark_specifics);
- }
- ASSERT_EQ(OPENED, SimulateSaveAndReloadDir());
- }
-
- { // Test adding and saving delete journals.
- DeleteJournal* delete_journal = dir_->delete_journal();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- EntryKernelSet journal_entries;
- delete_journal->GetDeleteJournals(&trans, BOOKMARKS, &journal_entries);
- ASSERT_EQ(0u, journal_entries.size());
-
- // Set SERVER_IS_DEL of the entries to true and they should be added to
- // delete journals.
- MutableEntry item1(&trans, GET_BY_ID, id1);
- ASSERT_TRUE(item1.good());
- item1.PutServerIsDel(true);
- MutableEntry item2(&trans, GET_BY_ID, id2);
- ASSERT_TRUE(item2.good());
- item2.PutServerIsDel(true);
- EntryKernel tmp;
- tmp.put(ID, id1);
- EXPECT_TRUE(delete_journal->delete_journals_.count(&tmp));
- tmp.put(ID, id2);
- EXPECT_TRUE(delete_journal->delete_journals_.count(&tmp));
- }
-
- // Save delete journals in database and verify memory clearing.
- ASSERT_TRUE(dir_->SaveChanges());
- {
- ReadTransaction trans(FROM_HERE, dir_.get());
- EXPECT_EQ(0u, delete_journal->GetDeleteJournalSize(&trans));
- }
- ASSERT_EQ(OPENED, SimulateSaveAndReloadDir());
- }
-
- {
- {
- // Test reading delete journals from database.
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- DeleteJournal* delete_journal = dir_->delete_journal();
- EntryKernelSet journal_entries;
- delete_journal->GetDeleteJournals(&trans, BOOKMARKS, &journal_entries);
- ASSERT_EQ(2u, journal_entries.size());
- EntryKernel tmp;
- tmp.put(META_HANDLE, handle1);
- EXPECT_TRUE(journal_entries.count(&tmp));
- tmp.put(META_HANDLE, handle2);
- EXPECT_TRUE(journal_entries.count(&tmp));
-
- // Purge item2.
- MetahandleSet to_purge;
- to_purge.insert(handle2);
- delete_journal->PurgeDeleteJournals(&trans, to_purge);
-
- // Verify that item2 is purged from journals in memory and will be
- // purged from database.
- tmp.put(ID, id2);
- EXPECT_FALSE(delete_journal->delete_journals_.count(&tmp));
- EXPECT_EQ(1u, delete_journal->delete_journals_to_purge_.size());
- EXPECT_TRUE(delete_journal->delete_journals_to_purge_.count(handle2));
- }
- ASSERT_EQ(OPENED, SimulateSaveAndReloadDir());
- }
-
- {
- {
- // Verify purged entry is gone in database.
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- DeleteJournal* delete_journal = dir_->delete_journal();
- EntryKernelSet journal_entries;
- delete_journal->GetDeleteJournals(&trans, BOOKMARKS, &journal_entries);
- ASSERT_EQ(1u, journal_entries.size());
- EntryKernel tmp;
- tmp.put(ID, id1);
- tmp.put(META_HANDLE, handle1);
- EXPECT_TRUE(journal_entries.count(&tmp));
-
- // Undelete item1.
- MutableEntry item1(&trans, GET_BY_ID, id1);
- ASSERT_TRUE(item1.good());
- item1.PutServerIsDel(false);
- EXPECT_TRUE(delete_journal->delete_journals_.empty());
- EXPECT_EQ(1u, delete_journal->delete_journals_to_purge_.size());
- EXPECT_TRUE(delete_journal->delete_journals_to_purge_.count(handle1));
- }
- ASSERT_EQ(OPENED, SimulateSaveAndReloadDir());
- }
-
- {
- // Verify undeleted entry is gone from database.
- ReadTransaction trans(FROM_HERE, dir_.get());
- DeleteJournal* delete_journal = dir_->delete_journal();
- ASSERT_EQ(0u, delete_journal->GetDeleteJournalSize(&trans));
- }
-}
-
-const char SyncableDirectoryTest::kName[] = "Foo";
-
-namespace {
-
-TEST_F(SyncableDirectoryTest, TestBasicLookupNonExistantID) {
- ReadTransaction rtrans(FROM_HERE, dir_.get());
- Entry e(&rtrans, GET_BY_ID, TestIdFactory::FromNumber(-99));
- ASSERT_FALSE(e.good());
-}
-
-TEST_F(SyncableDirectoryTest, TestBasicLookupValidID) {
- CreateEntry("rtc");
- ReadTransaction rtrans(FROM_HERE, dir_.get());
- Entry e(&rtrans, GET_BY_ID, TestIdFactory::FromNumber(-99));
- ASSERT_TRUE(e.good());
-}
-
-TEST_F(SyncableDirectoryTest, TestDelete) {
- std::string name = "peanut butter jelly time";
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry e1(&trans, CREATE, BOOKMARKS, trans.root_id(), name);
- ASSERT_TRUE(e1.good());
- e1.PutIsDel(true);
- MutableEntry e2(&trans, CREATE, BOOKMARKS, trans.root_id(), name);
- ASSERT_TRUE(e2.good());
- e2.PutIsDel(true);
- MutableEntry e3(&trans, CREATE, BOOKMARKS, trans.root_id(), name);
- ASSERT_TRUE(e3.good());
- e3.PutIsDel(true);
-
- e1.PutIsDel(false);
- e2.PutIsDel(false);
- e3.PutIsDel(false);
-
- e1.PutIsDel(true);
- e2.PutIsDel(true);
- e3.PutIsDel(true);
-}
-
-TEST_F(SyncableDirectoryTest, TestGetUnsynced) {
- Directory::Metahandles handles;
- int64 handle1, handle2;
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- dir_->GetUnsyncedMetaHandles(&trans, &handles);
- ASSERT_TRUE(0 == handles.size());
-
- MutableEntry e1(&trans, CREATE, BOOKMARKS, trans.root_id(), "abba");
- ASSERT_TRUE(e1.good());
- handle1 = e1.GetMetahandle();
- e1.PutBaseVersion(1);
- e1.PutIsDir(true);
- e1.PutId(TestIdFactory::FromNumber(101));
-
- MutableEntry e2(&trans, CREATE, BOOKMARKS, e1.GetId(), "bread");
- ASSERT_TRUE(e2.good());
- handle2 = e2.GetMetahandle();
- e2.PutBaseVersion(1);
- e2.PutId(TestIdFactory::FromNumber(102));
- }
- dir_->SaveChanges();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- dir_->GetUnsyncedMetaHandles(&trans, &handles);
- ASSERT_TRUE(0 == handles.size());
-
- MutableEntry e3(&trans, GET_BY_HANDLE, handle1);
- ASSERT_TRUE(e3.good());
- e3.PutIsUnsynced(true);
- }
- dir_->SaveChanges();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- dir_->GetUnsyncedMetaHandles(&trans, &handles);
- ASSERT_TRUE(1 == handles.size());
- ASSERT_TRUE(handle1 == handles[0]);
-
- MutableEntry e4(&trans, GET_BY_HANDLE, handle2);
- ASSERT_TRUE(e4.good());
- e4.PutIsUnsynced(true);
- }
- dir_->SaveChanges();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- dir_->GetUnsyncedMetaHandles(&trans, &handles);
- ASSERT_TRUE(2 == handles.size());
- if (handle1 == handles[0]) {
- ASSERT_TRUE(handle2 == handles[1]);
- } else {
- ASSERT_TRUE(handle2 == handles[0]);
- ASSERT_TRUE(handle1 == handles[1]);
- }
-
- MutableEntry e5(&trans, GET_BY_HANDLE, handle1);
- ASSERT_TRUE(e5.good());
- ASSERT_TRUE(e5.GetIsUnsynced());
- ASSERT_TRUE(e5.PutIsUnsynced(false));
- ASSERT_FALSE(e5.GetIsUnsynced());
- }
- dir_->SaveChanges();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- dir_->GetUnsyncedMetaHandles(&trans, &handles);
- ASSERT_TRUE(1 == handles.size());
- ASSERT_TRUE(handle2 == handles[0]);
- }
-}
-
-TEST_F(SyncableDirectoryTest, TestGetUnappliedUpdates) {
- std::vector<int64> handles;
- int64 handle1, handle2;
- const FullModelTypeSet all_types = FullModelTypeSet::All();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- dir_->GetUnappliedUpdateMetaHandles(&trans, all_types, &handles);
- ASSERT_TRUE(0 == handles.size());
-
- MutableEntry e1(&trans, CREATE, BOOKMARKS, trans.root_id(), "abba");
- ASSERT_TRUE(e1.good());
- handle1 = e1.GetMetahandle();
- e1.PutIsUnappliedUpdate(false);
- e1.PutBaseVersion(1);
- e1.PutId(TestIdFactory::FromNumber(101));
- e1.PutIsDir(true);
-
- MutableEntry e2(&trans, CREATE, BOOKMARKS, e1.GetId(), "bread");
- ASSERT_TRUE(e2.good());
- handle2 = e2.GetMetahandle();
- e2.PutIsUnappliedUpdate(false);
- e2.PutBaseVersion(1);
- e2.PutId(TestIdFactory::FromNumber(102));
- }
- dir_->SaveChanges();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- dir_->GetUnappliedUpdateMetaHandles(&trans, all_types, &handles);
- ASSERT_TRUE(0 == handles.size());
-
- MutableEntry e3(&trans, GET_BY_HANDLE, handle1);
- ASSERT_TRUE(e3.good());
- e3.PutIsUnappliedUpdate(true);
- }
- dir_->SaveChanges();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- dir_->GetUnappliedUpdateMetaHandles(&trans, all_types, &handles);
- ASSERT_TRUE(1 == handles.size());
- ASSERT_TRUE(handle1 == handles[0]);
-
- MutableEntry e4(&trans, GET_BY_HANDLE, handle2);
- ASSERT_TRUE(e4.good());
- e4.PutIsUnappliedUpdate(true);
- }
- dir_->SaveChanges();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- dir_->GetUnappliedUpdateMetaHandles(&trans, all_types, &handles);
- ASSERT_TRUE(2 == handles.size());
- if (handle1 == handles[0]) {
- ASSERT_TRUE(handle2 == handles[1]);
- } else {
- ASSERT_TRUE(handle2 == handles[0]);
- ASSERT_TRUE(handle1 == handles[1]);
- }
-
- MutableEntry e5(&trans, GET_BY_HANDLE, handle1);
- ASSERT_TRUE(e5.good());
- e5.PutIsUnappliedUpdate(false);
- }
- dir_->SaveChanges();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- dir_->GetUnappliedUpdateMetaHandles(&trans, all_types, &handles);
- ASSERT_TRUE(1 == handles.size());
- ASSERT_TRUE(handle2 == handles[0]);
- }
-}
-
-
-TEST_F(SyncableDirectoryTest, DeleteBug_531383) {
- // Try to evoke a check failure...
- TestIdFactory id_factory;
- int64 grandchild_handle;
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry parent(&wtrans, CREATE, BOOKMARKS, id_factory.root(), "Bob");
- ASSERT_TRUE(parent.good());
- parent.PutIsDir(true);
- parent.PutId(id_factory.NewServerId());
- parent.PutBaseVersion(1);
- MutableEntry child(&wtrans, CREATE, BOOKMARKS, parent.GetId(), "Bob");
- ASSERT_TRUE(child.good());
- child.PutIsDir(true);
- child.PutId(id_factory.NewServerId());
- child.PutBaseVersion(1);
- MutableEntry grandchild(&wtrans, CREATE, BOOKMARKS, child.GetId(), "Bob");
- ASSERT_TRUE(grandchild.good());
- grandchild.PutId(id_factory.NewServerId());
- grandchild.PutBaseVersion(1);
- grandchild.PutIsDel(true);
- MutableEntry twin(&wtrans, CREATE, BOOKMARKS, child.GetId(), "Bob");
- ASSERT_TRUE(twin.good());
- twin.PutIsDel(true);
- grandchild.PutIsDel(false);
-
- grandchild_handle = grandchild.GetMetahandle();
- }
- dir_->SaveChanges();
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry grandchild(&wtrans, GET_BY_HANDLE, grandchild_handle);
- grandchild.PutIsDel(true); // Used to CHECK fail here.
- }
-}
-
-static inline bool IsLegalNewParent(const Entry& a, const Entry& b) {
- return IsLegalNewParent(a.trans(), a.GetId(), b.GetId());
-}
-
-TEST_F(SyncableDirectoryTest, TestIsLegalNewParent) {
- TestIdFactory id_factory;
- WriteTransaction wtrans(FROM_HERE, UNITTEST, dir_.get());
- Entry root(&wtrans, GET_BY_ID, id_factory.root());
- ASSERT_TRUE(root.good());
- MutableEntry parent(&wtrans, CREATE, BOOKMARKS, root.GetId(), "Bob");
- ASSERT_TRUE(parent.good());
- parent.PutIsDir(true);
- parent.PutId(id_factory.NewServerId());
- parent.PutBaseVersion(1);
- MutableEntry child(&wtrans, CREATE, BOOKMARKS, parent.GetId(), "Bob");
- ASSERT_TRUE(child.good());
- child.PutIsDir(true);
- child.PutId(id_factory.NewServerId());
- child.PutBaseVersion(1);
- MutableEntry grandchild(&wtrans, CREATE, BOOKMARKS, child.GetId(), "Bob");
- ASSERT_TRUE(grandchild.good());
- grandchild.PutId(id_factory.NewServerId());
- grandchild.PutBaseVersion(1);
-
- MutableEntry parent2(&wtrans, CREATE, BOOKMARKS, root.GetId(), "Pete");
- ASSERT_TRUE(parent2.good());
- parent2.PutIsDir(true);
- parent2.PutId(id_factory.NewServerId());
- parent2.PutBaseVersion(1);
- MutableEntry child2(&wtrans, CREATE, BOOKMARKS, parent2.GetId(), "Pete");
- ASSERT_TRUE(child2.good());
- child2.PutIsDir(true);
- child2.PutId(id_factory.NewServerId());
- child2.PutBaseVersion(1);
- MutableEntry grandchild2(&wtrans, CREATE, BOOKMARKS, child2.GetId(), "Pete");
- ASSERT_TRUE(grandchild2.good());
- grandchild2.PutId(id_factory.NewServerId());
- grandchild2.PutBaseVersion(1);
- // resulting tree
- // root
- // / |
- // parent parent2
- // | |
- // child child2
- // | |
- // grandchild grandchild2
- ASSERT_TRUE(IsLegalNewParent(child, root));
- ASSERT_TRUE(IsLegalNewParent(child, parent));
- ASSERT_FALSE(IsLegalNewParent(child, child));
- ASSERT_FALSE(IsLegalNewParent(child, grandchild));
- ASSERT_TRUE(IsLegalNewParent(child, parent2));
- ASSERT_TRUE(IsLegalNewParent(child, grandchild2));
- ASSERT_FALSE(IsLegalNewParent(parent, grandchild));
- ASSERT_FALSE(IsLegalNewParent(root, grandchild));
- ASSERT_FALSE(IsLegalNewParent(parent, grandchild));
-}
-
-TEST_F(SyncableDirectoryTest, TestEntryIsInFolder) {
- // Create a subdir and an entry.
- int64 entry_handle;
- syncable::Id folder_id;
- syncable::Id entry_id;
- std::string entry_name = "entry";
-
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry folder(&trans, CREATE, BOOKMARKS, trans.root_id(), "folder");
- ASSERT_TRUE(folder.good());
- folder.PutIsDir(true);
- EXPECT_TRUE(folder.PutIsUnsynced(true));
- folder_id = folder.GetId();
-
- MutableEntry entry(&trans, CREATE, BOOKMARKS, folder.GetId(), entry_name);
- ASSERT_TRUE(entry.good());
- entry_handle = entry.GetMetahandle();
- entry.PutIsUnsynced(true);
- entry_id = entry.GetId();
- }
-
- // Make sure we can find the entry in the folder.
- {
- ReadTransaction trans(FROM_HERE, dir_.get());
- EXPECT_EQ(0, CountEntriesWithName(&trans, trans.root_id(), entry_name));
- EXPECT_EQ(1, CountEntriesWithName(&trans, folder_id, entry_name));
-
- Entry entry(&trans, GET_BY_ID, entry_id);
- ASSERT_TRUE(entry.good());
- EXPECT_EQ(entry_handle, entry.GetMetahandle());
- EXPECT_TRUE(entry.GetNonUniqueName()== entry_name);
- EXPECT_TRUE(entry.GetParentId()== folder_id);
- }
-}
-
-TEST_F(SyncableDirectoryTest, TestParentIdIndexUpdate) {
- std::string child_name = "child";
-
- WriteTransaction wt(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry parent_folder(&wt, CREATE, BOOKMARKS, wt.root_id(), "folder1");
- parent_folder.PutIsUnsynced(true);
- parent_folder.PutIsDir(true);
-
- MutableEntry parent_folder2(&wt, CREATE, BOOKMARKS, wt.root_id(), "folder2");
- parent_folder2.PutIsUnsynced(true);
- parent_folder2.PutIsDir(true);
-
- MutableEntry child(&wt, CREATE, BOOKMARKS, parent_folder.GetId(), child_name);
- child.PutIsDir(true);
- child.PutIsUnsynced(true);
-
- ASSERT_TRUE(child.good());
-
- EXPECT_EQ(0, CountEntriesWithName(&wt, wt.root_id(), child_name));
- EXPECT_EQ(parent_folder.GetId(), child.GetParentId());
- EXPECT_EQ(1, CountEntriesWithName(&wt, parent_folder.GetId(), child_name));
- EXPECT_EQ(0, CountEntriesWithName(&wt, parent_folder2.GetId(), child_name));
- child.PutParentId(parent_folder2.GetId());
- EXPECT_EQ(parent_folder2.GetId(), child.GetParentId());
- EXPECT_EQ(0, CountEntriesWithName(&wt, parent_folder.GetId(), child_name));
- EXPECT_EQ(1, CountEntriesWithName(&wt, parent_folder2.GetId(), child_name));
-}
-
-TEST_F(SyncableDirectoryTest, TestNoReindexDeletedItems) {
- std::string folder_name = "folder";
- std::string new_name = "new_name";
-
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry folder(&trans, CREATE, BOOKMARKS, trans.root_id(), folder_name);
- ASSERT_TRUE(folder.good());
- folder.PutIsDir(true);
- folder.PutIsDel(true);
-
- EXPECT_EQ(0, CountEntriesWithName(&trans, trans.root_id(), folder_name));
-
- MutableEntry deleted(&trans, GET_BY_ID, folder.GetId());
- ASSERT_TRUE(deleted.good());
- deleted.PutParentId(trans.root_id());
- deleted.PutNonUniqueName(new_name);
-
- EXPECT_EQ(0, CountEntriesWithName(&trans, trans.root_id(), folder_name));
- EXPECT_EQ(0, CountEntriesWithName(&trans, trans.root_id(), new_name));
-}
-
-TEST_F(SyncableDirectoryTest, TestCaseChangeRename) {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry folder(&trans, CREATE, BOOKMARKS, trans.root_id(), "CaseChange");
- ASSERT_TRUE(folder.good());
- folder.PutParentId(trans.root_id());
- folder.PutNonUniqueName("CASECHANGE");
- folder.PutIsDel(true);
-}
-
-// Create items of each model type, and check that GetModelType and
-// GetServerModelType return the right value.
-TEST_F(SyncableDirectoryTest, GetModelType) {
- TestIdFactory id_factory;
- ModelTypeSet protocol_types = ProtocolTypes();
- for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
- iter.Inc()) {
- ModelType datatype = iter.Get();
- SCOPED_TRACE(testing::Message("Testing model type ") << datatype);
- switch (datatype) {
- case UNSPECIFIED:
- case TOP_LEVEL_FOLDER:
- continue; // Datatype isn't a function of Specifics.
- default:
- break;
- }
- sync_pb::EntitySpecifics specifics;
- AddDefaultFieldValue(datatype, &specifics);
-
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- MutableEntry folder(&trans, CREATE, BOOKMARKS, trans.root_id(), "Folder");
- ASSERT_TRUE(folder.good());
- folder.PutId(id_factory.NewServerId());
- folder.PutSpecifics(specifics);
- folder.PutBaseVersion(1);
- folder.PutIsDir(true);
- folder.PutIsDel(false);
- ASSERT_EQ(datatype, folder.GetModelType());
-
- MutableEntry item(&trans, CREATE, BOOKMARKS, trans.root_id(), "Item");
- ASSERT_TRUE(item.good());
- item.PutId(id_factory.NewServerId());
- item.PutSpecifics(specifics);
- item.PutBaseVersion(1);
- item.PutIsDir(false);
- item.PutIsDel(false);
- ASSERT_EQ(datatype, item.GetModelType());
-
- // It's critical that deletion records retain their datatype, so that
- // they can be dispatched to the appropriate change processor.
- MutableEntry deleted_item(
- &trans, CREATE, BOOKMARKS, trans.root_id(), "Deleted Item");
- ASSERT_TRUE(item.good());
- deleted_item.PutId(id_factory.NewServerId());
- deleted_item.PutSpecifics(specifics);
- deleted_item.PutBaseVersion(1);
- deleted_item.PutIsDir(false);
- deleted_item.PutIsDel(true);
- ASSERT_EQ(datatype, deleted_item.GetModelType());
-
- MutableEntry server_folder(&trans, CREATE_NEW_UPDATE_ITEM,
- id_factory.NewServerId());
- ASSERT_TRUE(server_folder.good());
- server_folder.PutServerSpecifics(specifics);
- server_folder.PutBaseVersion(1);
- server_folder.PutServerIsDir(true);
- server_folder.PutServerIsDel(false);
- ASSERT_EQ(datatype, server_folder.GetServerModelType());
-
- MutableEntry server_item(&trans, CREATE_NEW_UPDATE_ITEM,
- id_factory.NewServerId());
- ASSERT_TRUE(server_item.good());
- server_item.PutServerSpecifics(specifics);
- server_item.PutBaseVersion(1);
- server_item.PutServerIsDir(false);
- server_item.PutServerIsDel(false);
- ASSERT_EQ(datatype, server_item.GetServerModelType());
-
- sync_pb::SyncEntity folder_entity;
- folder_entity.set_id_string(SyncableIdToProto(id_factory.NewServerId()));
- folder_entity.set_deleted(false);
- folder_entity.set_folder(true);
- folder_entity.mutable_specifics()->CopyFrom(specifics);
- ASSERT_EQ(datatype, GetModelType(folder_entity));
-
- sync_pb::SyncEntity item_entity;
- item_entity.set_id_string(SyncableIdToProto(id_factory.NewServerId()));
- item_entity.set_deleted(false);
- item_entity.set_folder(false);
- item_entity.mutable_specifics()->CopyFrom(specifics);
- ASSERT_EQ(datatype, GetModelType(item_entity));
- }
-}
-
-// A test that roughly mimics the directory interaction that occurs when a
-// bookmark folder and entry are created then synced for the first time. It is
-// a more common variant of the 'DeletedAndUnsyncedChild' scenario tested below.
-TEST_F(SyncableDirectoryTest, ChangeEntryIDAndUpdateChildren_ParentAndChild) {
- TestIdFactory id_factory;
- Id orig_parent_id;
- Id orig_child_id;
-
- {
- // Create two client-side items, a parent and child.
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- MutableEntry parent(&trans, CREATE, BOOKMARKS, id_factory.root(), "parent");
- parent.PutIsDir(true);
- parent.PutIsUnsynced(true);
-
- MutableEntry child(&trans, CREATE, BOOKMARKS, parent.GetId(), "child");
- child.PutIsUnsynced(true);
-
- orig_parent_id = parent.GetId();
- orig_child_id = child.GetId();
- }
-
- {
- // Simulate what happens after committing two items. Their IDs will be
- // replaced with server IDs. The child is renamed first, then the parent.
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- MutableEntry parent(&trans, GET_BY_ID, orig_parent_id);
- MutableEntry child(&trans, GET_BY_ID, orig_child_id);
-
- ChangeEntryIDAndUpdateChildren(&trans, &child, id_factory.NewServerId());
- child.PutIsUnsynced(false);
- child.PutBaseVersion(1);
- child.PutServerVersion(1);
-
- ChangeEntryIDAndUpdateChildren(&trans, &parent, id_factory.NewServerId());
- parent.PutIsUnsynced(false);
- parent.PutBaseVersion(1);
- parent.PutServerVersion(1);
- }
-
- // Final check for validity.
- EXPECT_EQ(OPENED, SimulateSaveAndReloadDir());
-}
-
-// A test based on the scenario where we create a bookmark folder and entry
-// locally, but with a twist. In this case, the bookmark is deleted before we
-// are able to sync either it or its parent folder. This scenario used to cause
-// directory corruption, see crbug.com/125381.
-TEST_F(SyncableDirectoryTest,
- ChangeEntryIDAndUpdateChildren_DeletedAndUnsyncedChild) {
- TestIdFactory id_factory;
- Id orig_parent_id;
- Id orig_child_id;
-
- {
- // Create two client-side items, a parent and child.
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- MutableEntry parent(&trans, CREATE, BOOKMARKS, id_factory.root(), "parent");
- parent.PutIsDir(true);
- parent.PutIsUnsynced(true);
-
- MutableEntry child(&trans, CREATE, BOOKMARKS, parent.GetId(), "child");
- child.PutIsUnsynced(true);
-
- orig_parent_id = parent.GetId();
- orig_child_id = child.GetId();
- }
-
- {
- // Delete the child.
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- MutableEntry child(&trans, GET_BY_ID, orig_child_id);
- child.PutIsDel(true);
- }
-
- {
- // Simulate what happens after committing the parent. Its ID will be
- // replaced with server a ID.
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- MutableEntry parent(&trans, GET_BY_ID, orig_parent_id);
-
- ChangeEntryIDAndUpdateChildren(&trans, &parent, id_factory.NewServerId());
- parent.PutIsUnsynced(false);
- parent.PutBaseVersion(1);
- parent.PutServerVersion(1);
- }
-
- // Final check for validity.
- EXPECT_EQ(OPENED, SimulateSaveAndReloadDir());
-}
-
-// Ask the directory to generate a unique ID. Close and re-open the database
-// without saving, then ask for another unique ID. Verify IDs are not reused.
-// This scenario simulates a crash within the first few seconds of operation.
-TEST_F(SyncableDirectoryTest, LocalIdReuseTest) {
- Id pre_crash_id = dir_->NextId();
- SimulateCrashAndReloadDir();
- Id post_crash_id = dir_->NextId();
- EXPECT_NE(pre_crash_id, post_crash_id);
-}
-
-// Ask the directory to generate a unique ID. Save the directory. Close and
-// re-open the database without saving, then ask for another unique ID. Verify
-// IDs are not reused. This scenario simulates a steady-state crash.
-TEST_F(SyncableDirectoryTest, LocalIdReuseTestWithSave) {
- Id pre_crash_id = dir_->NextId();
- dir_->SaveChanges();
- SimulateCrashAndReloadDir();
- Id post_crash_id = dir_->NextId();
- EXPECT_NE(pre_crash_id, post_crash_id);
-}
-
-// Ensure that the unsynced, is_del and server unkown entries that may have been
-// left in the database by old clients will be deleted when we open the old
-// database.
-TEST_F(SyncableDirectoryTest, OldClientLeftUnsyncedDeletedLocalItem) {
- // We must create an entry with the offending properties. This is done with
- // some abuse of the MutableEntry's API; it doesn't expect us to modify an
- // item after it is deleted. If this hack becomes impractical we will need to
- // find a new way to simulate this scenario.
-
- TestIdFactory id_factory;
-
- // Happy-path: These valid entries should not get deleted.
- Id server_knows_id = id_factory.NewServerId();
- Id not_is_del_id = id_factory.NewLocalId();
-
- // The ID of the entry which will be unsynced, is_del and !ServerKnows().
- Id zombie_id = id_factory.NewLocalId();
-
- // We're about to do some bad things. Tell the directory verification
- // routines to look the other way.
- dir_->SetInvariantCheckLevel(OFF);
-
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- // Create an uncommitted tombstone entry.
- MutableEntry server_knows(&trans, CREATE, BOOKMARKS, id_factory.root(),
- "server_knows");
- server_knows.PutId(server_knows_id);
- server_knows.PutIsUnsynced(true);
- server_knows.PutIsDel(true);
- server_knows.PutBaseVersion(5);
- server_knows.PutServerVersion(4);
-
- // Create a valid update entry.
- MutableEntry not_is_del(
- &trans, CREATE, BOOKMARKS, id_factory.root(), "not_is_del");
- not_is_del.PutId(not_is_del_id);
- not_is_del.PutIsDel(false);
- not_is_del.PutIsUnsynced(true);
-
- // Create a tombstone which should never be sent to the server because the
- // server never knew about the item's existence.
- //
- // New clients should never put entries into this state. We work around
- // this by setting IS_DEL before setting IS_UNSYNCED, something which the
- // client should never do in practice.
- MutableEntry zombie(&trans, CREATE, BOOKMARKS, id_factory.root(), "zombie");
- zombie.PutId(zombie_id);
- zombie.PutIsDel(true);
- zombie.PutIsUnsynced(true);
- }
-
- ASSERT_EQ(OPENED, SimulateSaveAndReloadDir());
-
- {
- ReadTransaction trans(FROM_HERE, dir_.get());
-
- // The directory loading routines should have cleaned things up, making it
- // safe to check invariants once again.
- dir_->FullyCheckTreeInvariants(&trans);
-
- Entry server_knows(&trans, GET_BY_ID, server_knows_id);
- EXPECT_TRUE(server_knows.good());
-
- Entry not_is_del(&trans, GET_BY_ID, not_is_del_id);
- EXPECT_TRUE(not_is_del.good());
-
- Entry zombie(&trans, GET_BY_ID, zombie_id);
- EXPECT_FALSE(zombie.good());
- }
-}
-
-TEST_F(SyncableDirectoryTest, PositionWithNullSurvivesSaveAndReload) {
- TestIdFactory id_factory;
- Id null_child_id;
- const char null_cstr[] = "\0null\0test";
- std::string null_str(null_cstr, arraysize(null_cstr) - 1);
- // Pad up to the minimum length with 0x7f characters, then add a string that
- // contains a few NULLs to the end. This is slightly wrong, since the suffix
- // part of a UniquePosition shouldn't contain NULLs, but it's good enough for
- // this test.
- std::string suffix =
- std::string(UniquePosition::kSuffixLength - null_str.length(), '\x7f')
- + null_str;
- UniquePosition null_pos = UniquePosition::FromInt64(10, suffix);
-
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- MutableEntry parent(&trans, CREATE, BOOKMARKS, id_factory.root(), "parent");
- parent.PutIsDir(true);
- parent.PutIsUnsynced(true);
-
- MutableEntry child(&trans, CREATE, BOOKMARKS, parent.GetId(), "child");
- child.PutIsUnsynced(true);
- child.PutUniquePosition(null_pos);
- child.PutServerUniquePosition(null_pos);
-
- null_child_id = child.GetId();
- }
-
- EXPECT_EQ(OPENED, SimulateSaveAndReloadDir());
-
- {
- ReadTransaction trans(FROM_HERE, dir_.get());
-
- Entry null_ordinal_child(&trans, GET_BY_ID, null_child_id);
- EXPECT_TRUE(
- null_pos.Equals(null_ordinal_child.GetUniquePosition()));
- EXPECT_TRUE(
- null_pos.Equals(null_ordinal_child.GetServerUniquePosition()));
- }
-}
-
-// An OnDirectoryBackingStore that can be set to always fail SaveChanges.
-class TestBackingStore : public OnDiskDirectoryBackingStore {
- public:
- TestBackingStore(const std::string& dir_name,
- const base::FilePath& backing_filepath);
-
- virtual ~TestBackingStore();
-
- virtual bool SaveChanges(const Directory::SaveChangesSnapshot& snapshot)
- OVERRIDE;
-
- void StartFailingSaveChanges() {
- fail_save_changes_ = true;
- }
-
- private:
- bool fail_save_changes_;
-};
-
-TestBackingStore::TestBackingStore(const std::string& dir_name,
- const base::FilePath& backing_filepath)
- : OnDiskDirectoryBackingStore(dir_name, backing_filepath),
- fail_save_changes_(false) {
-}
-
-TestBackingStore::~TestBackingStore() { }
-
-bool TestBackingStore::SaveChanges(
- const Directory::SaveChangesSnapshot& snapshot){
- if (fail_save_changes_) {
- return false;
- } else {
- return OnDiskDirectoryBackingStore::SaveChanges(snapshot);
- }
-}
-
-// A directory whose Save() function can be set to always fail.
-class TestDirectory : public Directory {
- public:
- // A factory function used to work around some initialization order issues.
- static TestDirectory* Create(
- Encryptor *encryptor,
- UnrecoverableErrorHandler *handler,
- const std::string& dir_name,
- const base::FilePath& backing_filepath);
-
- virtual ~TestDirectory();
-
- void StartFailingSaveChanges() {
- backing_store_->StartFailingSaveChanges();
- }
-
- private:
- TestDirectory(Encryptor* encryptor,
- UnrecoverableErrorHandler* handler,
- TestBackingStore* backing_store);
-
- TestBackingStore* backing_store_;
-};
-
-TestDirectory* TestDirectory::Create(
- Encryptor *encryptor,
- UnrecoverableErrorHandler *handler,
- const std::string& dir_name,
- const base::FilePath& backing_filepath) {
- TestBackingStore* backing_store =
- new TestBackingStore(dir_name, backing_filepath);
- return new TestDirectory(encryptor, handler, backing_store);
-}
-
-TestDirectory::TestDirectory(Encryptor* encryptor,
- UnrecoverableErrorHandler* handler,
- TestBackingStore* backing_store)
- : Directory(backing_store, handler, NULL, NULL, NULL),
- backing_store_(backing_store) {
-}
-
-TestDirectory::~TestDirectory() { }
-
-TEST(OnDiskSyncableDirectory, FailInitialWrite) {
- FakeEncryptor encryptor;
- TestUnrecoverableErrorHandler handler;
- base::ScopedTempDir temp_dir;
- ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- base::FilePath file_path = temp_dir.path().Append(
- FILE_PATH_LITERAL("Test.sqlite3"));
- std::string name = "user@x.com";
- NullDirectoryChangeDelegate delegate;
-
- scoped_ptr<TestDirectory> test_dir(
- TestDirectory::Create(&encryptor, &handler, name, file_path));
-
- test_dir->StartFailingSaveChanges();
- ASSERT_EQ(FAILED_INITIAL_WRITE, test_dir->Open(name, &delegate,
- NullTransactionObserver()));
-}
-
-// A variant of SyncableDirectoryTest that uses a real sqlite database.
-class OnDiskSyncableDirectoryTest : public SyncableDirectoryTest {
- protected:
- // SetUp() is called before each test case is run.
- // The sqlite3 DB is deleted before each test is run.
- virtual void SetUp() {
- ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
- file_path_ = temp_dir_.path().Append(
- FILE_PATH_LITERAL("Test.sqlite3"));
- base::DeleteFile(file_path_, true);
- CreateDirectory();
- }
-
- virtual void TearDown() {
- // This also closes file handles.
- dir_->SaveChanges();
- dir_.reset();
- base::DeleteFile(file_path_, true);
- }
-
- // Creates a new directory. Deletes the old directory, if it exists.
- void CreateDirectory() {
- test_directory_ =
- TestDirectory::Create(&encryptor_, &handler_, kName, file_path_);
- dir_.reset(test_directory_);
- ASSERT_TRUE(dir_.get());
- ASSERT_EQ(OPENED, dir_->Open(kName, &delegate_,
- NullTransactionObserver()));
- ASSERT_TRUE(dir_->good());
- }
-
- void SaveAndReloadDir() {
- dir_->SaveChanges();
- CreateDirectory();
- }
-
- void StartFailingSaveChanges() {
- test_directory_->StartFailingSaveChanges();
- }
-
- TestDirectory *test_directory_; // mirrors scoped_ptr<Directory> dir_
- base::ScopedTempDir temp_dir_;
- base::FilePath file_path_;
-};
-
-TEST_F(OnDiskSyncableDirectoryTest, TestPurgeEntriesWithTypeIn) {
- sync_pb::EntitySpecifics bookmark_specs;
- sync_pb::EntitySpecifics autofill_specs;
- sync_pb::EntitySpecifics preference_specs;
- AddDefaultFieldValue(BOOKMARKS, &bookmark_specs);
- AddDefaultFieldValue(PREFERENCES, &preference_specs);
- AddDefaultFieldValue(AUTOFILL, &autofill_specs);
-
- ModelTypeSet types_to_purge(PREFERENCES, AUTOFILL);
-
- TestIdFactory id_factory;
- // Create some items for each type.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- // Make it look like these types have completed initial sync.
- CreateTypeRoot(&trans, dir_.get(), BOOKMARKS);
- CreateTypeRoot(&trans, dir_.get(), PREFERENCES);
- CreateTypeRoot(&trans, dir_.get(), AUTOFILL);
-
- // Add more nodes for this type. Technically, they should be placed under
- // the proper type root nodes but the assertions in this test won't notice
- // if their parent isn't quite right.
- MutableEntry item1(&trans, CREATE, BOOKMARKS, trans.root_id(), "Item");
- ASSERT_TRUE(item1.good());
- item1.PutServerSpecifics(bookmark_specs);
- item1.PutIsUnsynced(true);
-
- MutableEntry item2(&trans, CREATE_NEW_UPDATE_ITEM,
- id_factory.NewServerId());
- ASSERT_TRUE(item2.good());
- item2.PutServerSpecifics(bookmark_specs);
- item2.PutIsUnappliedUpdate(true);
-
- MutableEntry item3(&trans, CREATE, PREFERENCES,
- trans.root_id(), "Item");
- ASSERT_TRUE(item3.good());
- item3.PutSpecifics(preference_specs);
- item3.PutServerSpecifics(preference_specs);
- item3.PutIsUnsynced(true);
-
- MutableEntry item4(&trans, CREATE_NEW_UPDATE_ITEM,
- id_factory.NewServerId());
- ASSERT_TRUE(item4.good());
- item4.PutServerSpecifics(preference_specs);
- item4.PutIsUnappliedUpdate(true);
-
- MutableEntry item5(&trans, CREATE, AUTOFILL,
- trans.root_id(), "Item");
- ASSERT_TRUE(item5.good());
- item5.PutSpecifics(autofill_specs);
- item5.PutServerSpecifics(autofill_specs);
- item5.PutIsUnsynced(true);
-
- MutableEntry item6(&trans, CREATE_NEW_UPDATE_ITEM,
- id_factory.NewServerId());
- ASSERT_TRUE(item6.good());
- item6.PutServerSpecifics(autofill_specs);
- item6.PutIsUnappliedUpdate(true);
- }
-
- dir_->SaveChanges();
- {
- ReadTransaction trans(FROM_HERE, dir_.get());
- MetahandleSet all_set;
- GetAllMetaHandles(&trans, &all_set);
- ASSERT_EQ(10U, all_set.size());
- }
-
- dir_->PurgeEntriesWithTypeIn(types_to_purge, ModelTypeSet(), ModelTypeSet());
-
- // We first query the in-memory data, and then reload the directory (without
- // saving) to verify that disk does not still have the data.
- CheckPurgeEntriesWithTypeInSucceeded(types_to_purge, true);
- SaveAndReloadDir();
- CheckPurgeEntriesWithTypeInSucceeded(types_to_purge, false);
-}
-
-TEST_F(OnDiskSyncableDirectoryTest, TestShareInfo) {
- dir_->set_store_birthday("Jan 31st");
- const char* const bag_of_chips_array = "\0bag of chips";
- const std::string bag_of_chips_string =
- std::string(bag_of_chips_array, sizeof(bag_of_chips_array));
- dir_->set_bag_of_chips(bag_of_chips_string);
- {
- ReadTransaction trans(FROM_HERE, dir_.get());
- EXPECT_EQ("Jan 31st", dir_->store_birthday());
- EXPECT_EQ(bag_of_chips_string, dir_->bag_of_chips());
- }
- dir_->set_store_birthday("April 10th");
- const char* const bag_of_chips2_array = "\0bag of chips2";
- const std::string bag_of_chips2_string =
- std::string(bag_of_chips2_array, sizeof(bag_of_chips2_array));
- dir_->set_bag_of_chips(bag_of_chips2_string);
- dir_->SaveChanges();
- {
- ReadTransaction trans(FROM_HERE, dir_.get());
- EXPECT_EQ("April 10th", dir_->store_birthday());
- EXPECT_EQ(bag_of_chips2_string, dir_->bag_of_chips());
- }
- const char* const bag_of_chips3_array = "\0bag of chips3";
- const std::string bag_of_chips3_string =
- std::string(bag_of_chips3_array, sizeof(bag_of_chips3_array));
- dir_->set_bag_of_chips(bag_of_chips3_string);
- // Restore the directory from disk. Make sure that nothing's changed.
- SaveAndReloadDir();
- {
- ReadTransaction trans(FROM_HERE, dir_.get());
- EXPECT_EQ("April 10th", dir_->store_birthday());
- EXPECT_EQ(bag_of_chips3_string, dir_->bag_of_chips());
- }
-}
-
-TEST_F(OnDiskSyncableDirectoryTest,
- TestSimpleFieldsPreservedDuringSaveChanges) {
- Id update_id = TestIdFactory::FromNumber(1);
- Id create_id;
- EntryKernel create_pre_save, update_pre_save;
- EntryKernel create_post_save, update_post_save;
- std::string create_name = "Create";
-
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry create(
- &trans, CREATE, BOOKMARKS, trans.root_id(), create_name);
- MutableEntry update(&trans, CREATE_NEW_UPDATE_ITEM, update_id);
- create.PutIsUnsynced(true);
- update.PutIsUnappliedUpdate(true);
- sync_pb::EntitySpecifics specifics;
- specifics.mutable_bookmark()->set_favicon("PNG");
- specifics.mutable_bookmark()->set_url("http://nowhere");
- create.PutSpecifics(specifics);
- update.PutSpecifics(specifics);
- create_pre_save = create.GetKernelCopy();
- update_pre_save = update.GetKernelCopy();
- create_id = create.GetId();
- }
-
- dir_->SaveChanges();
- dir_.reset(new Directory(new OnDiskDirectoryBackingStore(kName, file_path_),
- &handler_,
- NULL,
- NULL,
- NULL));
-
- ASSERT_TRUE(dir_.get());
- ASSERT_EQ(OPENED, dir_->Open(kName, &delegate_, NullTransactionObserver()));
- ASSERT_TRUE(dir_->good());
-
- {
- ReadTransaction trans(FROM_HERE, dir_.get());
- Entry create(&trans, GET_BY_ID, create_id);
- EXPECT_EQ(1, CountEntriesWithName(&trans, trans.root_id(), create_name));
- Entry update(&trans, GET_BY_ID, update_id);
- create_post_save = create.GetKernelCopy();
- update_post_save = update.GetKernelCopy();
- }
- int i = BEGIN_FIELDS;
- for ( ; i < INT64_FIELDS_END ; ++i) {
- EXPECT_EQ(create_pre_save.ref((Int64Field)i) +
- (i == TRANSACTION_VERSION ? 1 : 0),
- create_post_save.ref((Int64Field)i))
- << "int64 field #" << i << " changed during save/load";
- EXPECT_EQ(update_pre_save.ref((Int64Field)i) +
- (i == TRANSACTION_VERSION ? 1 : 0),
- update_post_save.ref((Int64Field)i))
- << "int64 field #" << i << " changed during save/load";
- }
- for ( ; i < TIME_FIELDS_END ; ++i) {
- EXPECT_EQ(create_pre_save.ref((TimeField)i),
- create_post_save.ref((TimeField)i))
- << "time field #" << i << " changed during save/load";
- EXPECT_EQ(update_pre_save.ref((TimeField)i),
- update_post_save.ref((TimeField)i))
- << "time field #" << i << " changed during save/load";
- }
- for ( ; i < ID_FIELDS_END ; ++i) {
- EXPECT_EQ(create_pre_save.ref((IdField)i),
- create_post_save.ref((IdField)i))
- << "id field #" << i << " changed during save/load";
- EXPECT_EQ(update_pre_save.ref((IdField)i),
- update_pre_save.ref((IdField)i))
- << "id field #" << i << " changed during save/load";
- }
- for ( ; i < BIT_FIELDS_END ; ++i) {
- EXPECT_EQ(create_pre_save.ref((BitField)i),
- create_post_save.ref((BitField)i))
- << "Bit field #" << i << " changed during save/load";
- EXPECT_EQ(update_pre_save.ref((BitField)i),
- update_post_save.ref((BitField)i))
- << "Bit field #" << i << " changed during save/load";
- }
- for ( ; i < STRING_FIELDS_END ; ++i) {
- EXPECT_EQ(create_pre_save.ref((StringField)i),
- create_post_save.ref((StringField)i))
- << "String field #" << i << " changed during save/load";
- EXPECT_EQ(update_pre_save.ref((StringField)i),
- update_post_save.ref((StringField)i))
- << "String field #" << i << " changed during save/load";
- }
- for ( ; i < PROTO_FIELDS_END; ++i) {
- EXPECT_EQ(create_pre_save.ref((ProtoField)i).SerializeAsString(),
- create_post_save.ref((ProtoField)i).SerializeAsString())
- << "Blob field #" << i << " changed during save/load";
- EXPECT_EQ(update_pre_save.ref((ProtoField)i).SerializeAsString(),
- update_post_save.ref((ProtoField)i).SerializeAsString())
- << "Blob field #" << i << " changed during save/load";
- }
- for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
- EXPECT_TRUE(create_pre_save.ref((UniquePositionField)i).Equals(
- create_post_save.ref((UniquePositionField)i)))
- << "Position field #" << i << " changed during save/load";
- EXPECT_TRUE(update_pre_save.ref((UniquePositionField)i).Equals(
- update_post_save.ref((UniquePositionField)i)))
- << "Position field #" << i << " changed during save/load";
- }
-}
-
-TEST_F(OnDiskSyncableDirectoryTest, TestSaveChangesFailure) {
- int64 handle1 = 0;
- // Set up an item using a regular, saveable directory.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- MutableEntry e1(&trans, CREATE, BOOKMARKS, trans.root_id(), "aguilera");
- ASSERT_TRUE(e1.good());
- EXPECT_TRUE(e1.GetKernelCopy().is_dirty());
- handle1 = e1.GetMetahandle();
- e1.PutBaseVersion(1);
- e1.PutIsDir(true);
- e1.PutId(TestIdFactory::FromNumber(101));
- EXPECT_TRUE(e1.GetKernelCopy().is_dirty());
- EXPECT_TRUE(IsInDirtyMetahandles(handle1));
- }
- ASSERT_TRUE(dir_->SaveChanges());
-
- // Make sure the item is no longer dirty after saving,
- // and make a modification.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- MutableEntry aguilera(&trans, GET_BY_HANDLE, handle1);
- ASSERT_TRUE(aguilera.good());
- EXPECT_FALSE(aguilera.GetKernelCopy().is_dirty());
- EXPECT_EQ(aguilera.GetNonUniqueName(), "aguilera");
- aguilera.PutNonUniqueName("overwritten");
- EXPECT_TRUE(aguilera.GetKernelCopy().is_dirty());
- EXPECT_TRUE(IsInDirtyMetahandles(handle1));
- }
- ASSERT_TRUE(dir_->SaveChanges());
-
- // Now do some operations when SaveChanges() will fail.
- StartFailingSaveChanges();
- ASSERT_TRUE(dir_->good());
-
- int64 handle2 = 0;
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- MutableEntry aguilera(&trans, GET_BY_HANDLE, handle1);
- ASSERT_TRUE(aguilera.good());
- EXPECT_FALSE(aguilera.GetKernelCopy().is_dirty());
- EXPECT_EQ(aguilera.GetNonUniqueName(), "overwritten");
- EXPECT_FALSE(aguilera.GetKernelCopy().is_dirty());
- EXPECT_FALSE(IsInDirtyMetahandles(handle1));
- aguilera.PutNonUniqueName("christina");
- EXPECT_TRUE(aguilera.GetKernelCopy().is_dirty());
- EXPECT_TRUE(IsInDirtyMetahandles(handle1));
-
- // New item.
- MutableEntry kids_on_block(
- &trans, CREATE, BOOKMARKS, trans.root_id(), "kids");
- ASSERT_TRUE(kids_on_block.good());
- handle2 = kids_on_block.GetMetahandle();
- kids_on_block.PutBaseVersion(1);
- kids_on_block.PutIsDir(true);
- kids_on_block.PutId(TestIdFactory::FromNumber(102));
- EXPECT_TRUE(kids_on_block.GetKernelCopy().is_dirty());
- EXPECT_TRUE(IsInDirtyMetahandles(handle2));
- }
-
- // We are using an unsaveable directory, so this can't succeed. However,
- // the HandleSaveChangesFailure code path should have been triggered.
- ASSERT_FALSE(dir_->SaveChanges());
-
- // Make sure things were rolled back and the world is as it was before call.
- {
- ReadTransaction trans(FROM_HERE, dir_.get());
- Entry e1(&trans, GET_BY_HANDLE, handle1);
- ASSERT_TRUE(e1.good());
- EntryKernel aguilera = e1.GetKernelCopy();
- Entry kids(&trans, GET_BY_HANDLE, handle2);
- ASSERT_TRUE(kids.good());
- EXPECT_TRUE(kids.GetKernelCopy().is_dirty());
- EXPECT_TRUE(IsInDirtyMetahandles(handle2));
- EXPECT_TRUE(aguilera.is_dirty());
- EXPECT_TRUE(IsInDirtyMetahandles(handle1));
- }
-}
-
-TEST_F(OnDiskSyncableDirectoryTest, TestSaveChangesFailureWithPurge) {
- int64 handle1 = 0;
- // Set up an item using a regular, saveable directory.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- MutableEntry e1(&trans, CREATE, BOOKMARKS, trans.root_id(), "aguilera");
- ASSERT_TRUE(e1.good());
- EXPECT_TRUE(e1.GetKernelCopy().is_dirty());
- handle1 = e1.GetMetahandle();
- e1.PutBaseVersion(1);
- e1.PutIsDir(true);
- e1.PutId(TestIdFactory::FromNumber(101));
- sync_pb::EntitySpecifics bookmark_specs;
- AddDefaultFieldValue(BOOKMARKS, &bookmark_specs);
- e1.PutSpecifics(bookmark_specs);
- e1.PutServerSpecifics(bookmark_specs);
- e1.PutId(TestIdFactory::FromNumber(101));
- EXPECT_TRUE(e1.GetKernelCopy().is_dirty());
- EXPECT_TRUE(IsInDirtyMetahandles(handle1));
- }
- ASSERT_TRUE(dir_->SaveChanges());
-
- // Now do some operations while SaveChanges() is set to fail.
- StartFailingSaveChanges();
- ASSERT_TRUE(dir_->good());
-
- ModelTypeSet set(BOOKMARKS);
- dir_->PurgeEntriesWithTypeIn(set, ModelTypeSet(), ModelTypeSet());
- EXPECT_TRUE(IsInMetahandlesToPurge(handle1));
- ASSERT_FALSE(dir_->SaveChanges());
- EXPECT_TRUE(IsInMetahandlesToPurge(handle1));
-}
-
-} // namespace
-
-void SyncableDirectoryTest::ValidateEntry(BaseTransaction* trans,
- int64 id,
- bool check_name,
- const std::string& name,
- int64 base_version,
- int64 server_version,
- bool is_del) {
- Entry e(trans, GET_BY_ID, TestIdFactory::FromNumber(id));
- ASSERT_TRUE(e.good());
- if (check_name)
- ASSERT_TRUE(name == e.GetNonUniqueName());
- ASSERT_TRUE(base_version == e.GetBaseVersion());
- ASSERT_TRUE(server_version == e.GetServerVersion());
- ASSERT_TRUE(is_del == e.GetIsDel());
-}
-
-DirOpenResult SyncableDirectoryTest::SimulateSaveAndReloadDir() {
- if (!dir_->SaveChanges())
- return FAILED_IN_UNITTEST;
-
- return ReloadDirImpl();
-}
-
-DirOpenResult SyncableDirectoryTest::SimulateCrashAndReloadDir() {
- return ReloadDirImpl();
-}
-
-DirOpenResult SyncableDirectoryTest::ReloadDirImpl() {
- // Do some tricky things to preserve the backing store.
- DirectoryBackingStore* saved_store = dir_->store_.release();
-
- // Close the current directory.
- dir_->Close();
- dir_.reset();
-
- dir_.reset(new Directory(saved_store,
- &handler_,
- NULL,
- NULL,
- NULL));
- DirOpenResult result = dir_->OpenImpl(kName, &delegate_,
- NullTransactionObserver());
-
- // If something went wrong, we need to clear this member. If we don't,
- // TearDown() will be guaranteed to crash when it calls SaveChanges().
- if (result != OPENED)
- dir_.reset();
-
- return result;
-}
-
-namespace {
-
-class SyncableDirectoryManagement : public testing::Test {
- public:
- virtual void SetUp() {
- ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
- }
-
- virtual void TearDown() {
- }
- protected:
- base::MessageLoop message_loop_;
- base::ScopedTempDir temp_dir_;
- FakeEncryptor encryptor_;
- TestUnrecoverableErrorHandler handler_;
- NullDirectoryChangeDelegate delegate_;
-};
-
-TEST_F(SyncableDirectoryManagement, TestFileRelease) {
- base::FilePath path = temp_dir_.path().Append(
- Directory::kSyncDatabaseFilename);
-
- syncable::Directory dir(new OnDiskDirectoryBackingStore("ScopeTest", path),
- &handler_,
- NULL,
- NULL,
- NULL);
- DirOpenResult result =
- dir.Open("ScopeTest", &delegate_, NullTransactionObserver());
- ASSERT_EQ(result, OPENED);
- dir.Close();
-
- // Closing the directory should have released the backing database file.
- ASSERT_TRUE(base::DeleteFile(path, true));
-}
-
-class StressTransactionsDelegate : public base::PlatformThread::Delegate {
- public:
- StressTransactionsDelegate(Directory* dir, int thread_number)
- : dir_(dir),
- thread_number_(thread_number) {}
-
- private:
- Directory* const dir_;
- const int thread_number_;
-
- // PlatformThread::Delegate methods:
- virtual void ThreadMain() OVERRIDE {
- int entry_count = 0;
- std::string path_name;
-
- for (int i = 0; i < 20; ++i) {
- const int rand_action = rand() % 10;
- if (rand_action < 4 && !path_name.empty()) {
- ReadTransaction trans(FROM_HERE, dir_);
- CHECK(1 == CountEntriesWithName(&trans, trans.root_id(), path_name));
- base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(
- rand() % 10));
- } else {
- std::string unique_name =
- base::StringPrintf("%d.%d", thread_number_, entry_count++);
- path_name.assign(unique_name.begin(), unique_name.end());
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_);
- MutableEntry e(&trans, CREATE, BOOKMARKS, trans.root_id(), path_name);
- CHECK(e.good());
- base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(
- rand() % 20));
- e.PutIsUnsynced(true);
- if (e.PutId(TestIdFactory::FromNumber(rand())) &&
- e.GetId().ServerKnows() && !e.GetId().IsRoot()) {
- e.PutBaseVersion(1);
- }
- }
- }
- }
-
- DISALLOW_COPY_AND_ASSIGN(StressTransactionsDelegate);
-};
-
-TEST(SyncableDirectory, StressTransactions) {
- base::MessageLoop message_loop;
- base::ScopedTempDir temp_dir;
- ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- FakeEncryptor encryptor;
- TestUnrecoverableErrorHandler handler;
- NullDirectoryChangeDelegate delegate;
- std::string dirname = "stress";
- Directory dir(new InMemoryDirectoryBackingStore(dirname),
- &handler,
- NULL,
- NULL,
- NULL);
- dir.Open(dirname, &delegate, NullTransactionObserver());
-
- const int kThreadCount = 7;
- base::PlatformThreadHandle threads[kThreadCount];
- scoped_ptr<StressTransactionsDelegate> thread_delegates[kThreadCount];
-
- for (int i = 0; i < kThreadCount; ++i) {
- thread_delegates[i].reset(new StressTransactionsDelegate(&dir, i));
- ASSERT_TRUE(base::PlatformThread::Create(
- 0, thread_delegates[i].get(), &threads[i]));
- }
-
- for (int i = 0; i < kThreadCount; ++i) {
- base::PlatformThread::Join(threads[i]);
- }
-
- dir.Close();
-}
-
-class SyncableClientTagTest : public SyncableDirectoryTest {
- public:
- static const int kBaseVersion = 1;
- const char* test_name_;
- const char* test_tag_;
-
- SyncableClientTagTest() : test_name_("test_name"), test_tag_("dietcoke") {}
-
- bool CreateWithDefaultTag(Id id, bool deleted) {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry me(&wtrans, CREATE, PREFERENCES,
- wtrans.root_id(), test_name_);
- CHECK(me.good());
- me.PutId(id);
- if (id.ServerKnows()) {
- me.PutBaseVersion(kBaseVersion);
- }
- me.PutIsUnsynced(true);
- me.PutIsDel(deleted);
- me.PutIsDir(false);
- return me.PutUniqueClientTag(test_tag_);
- }
-
- // Verify an entry exists with the default tag.
- void VerifyTag(Id id, bool deleted) {
- // Should still be present and valid in the client tag index.
- ReadTransaction trans(FROM_HERE, dir_.get());
- Entry me(&trans, GET_BY_CLIENT_TAG, test_tag_);
- CHECK(me.good());
- EXPECT_EQ(me.GetId(), id);
- EXPECT_EQ(me.GetUniqueClientTag(), test_tag_);
- EXPECT_EQ(me.GetIsDel(), deleted);
-
- // We only sync deleted items that the server knew about.
- if (me.GetId().ServerKnows() || !me.GetIsDel()) {
- EXPECT_EQ(me.GetIsUnsynced(), true);
- }
- }
-
- protected:
- TestIdFactory factory_;
-};
-
-TEST_F(SyncableClientTagTest, TestClientTagClear) {
- Id server_id = factory_.NewServerId();
- EXPECT_TRUE(CreateWithDefaultTag(server_id, false));
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry me(&trans, GET_BY_CLIENT_TAG, test_tag_);
- EXPECT_TRUE(me.good());
- me.PutUniqueClientTag(std::string());
- }
- {
- ReadTransaction trans(FROM_HERE, dir_.get());
- Entry by_tag(&trans, GET_BY_CLIENT_TAG, test_tag_);
- EXPECT_FALSE(by_tag.good());
-
- Entry by_id(&trans, GET_BY_ID, server_id);
- EXPECT_TRUE(by_id.good());
- EXPECT_TRUE(by_id.GetUniqueClientTag().empty());
- }
-}
-
-TEST_F(SyncableClientTagTest, TestClientTagIndexServerId) {
- Id server_id = factory_.NewServerId();
- EXPECT_TRUE(CreateWithDefaultTag(server_id, false));
- VerifyTag(server_id, false);
-}
-
-TEST_F(SyncableClientTagTest, TestClientTagIndexClientId) {
- Id client_id = factory_.NewLocalId();
- EXPECT_TRUE(CreateWithDefaultTag(client_id, false));
- VerifyTag(client_id, false);
-}
-
-TEST_F(SyncableClientTagTest, TestDeletedClientTagIndexClientId) {
- Id client_id = factory_.NewLocalId();
- EXPECT_TRUE(CreateWithDefaultTag(client_id, true));
- VerifyTag(client_id, true);
-}
-
-TEST_F(SyncableClientTagTest, TestDeletedClientTagIndexServerId) {
- Id server_id = factory_.NewServerId();
- EXPECT_TRUE(CreateWithDefaultTag(server_id, true));
- VerifyTag(server_id, true);
-}
-
-TEST_F(SyncableClientTagTest, TestClientTagIndexDuplicateServer) {
- EXPECT_TRUE(CreateWithDefaultTag(factory_.NewServerId(), true));
- EXPECT_FALSE(CreateWithDefaultTag(factory_.NewServerId(), true));
- EXPECT_FALSE(CreateWithDefaultTag(factory_.NewServerId(), false));
- EXPECT_FALSE(CreateWithDefaultTag(factory_.NewLocalId(), false));
- EXPECT_FALSE(CreateWithDefaultTag(factory_.NewLocalId(), true));
-}
-
-} // namespace
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/syncable_util.cc b/chromium/sync/syncable/syncable_util.cc
deleted file mode 100644
index d92aa47ed46..00000000000
--- a/chromium/sync/syncable/syncable_util.cc
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/syncable_util.h"
-
-#include "base/base64.h"
-#include "base/location.h"
-#include "base/logging.h"
-#include "base/sha1.h"
-#include "sync/syncable/directory.h"
-#include "sync/syncable/entry.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/syncable_id.h"
-#include "sync/syncable/syncable_write_transaction.h"
-
-namespace syncer {
-namespace syncable {
-
-// Returns the number of unsynced entries.
-int GetUnsyncedEntries(BaseTransaction* trans,
- std::vector<int64> *handles) {
- trans->directory()->GetUnsyncedMetaHandles(trans, handles);
- DVLOG_IF(1, !handles->empty()) << "Have " << handles->size()
- << " unsynced items.";
- return handles->size();
-}
-
-bool IsLegalNewParent(BaseTransaction* trans, const Id& entry_id,
- const Id& new_parent_id) {
- if (entry_id.IsRoot())
- return false;
- // we have to ensure that the entry is not an ancestor of the new parent.
- Id ancestor_id = new_parent_id;
- while (!ancestor_id.IsRoot()) {
- if (entry_id == ancestor_id)
- return false;
- Entry new_parent(trans, GET_BY_ID, ancestor_id);
- if (!SyncAssert(new_parent.good(),
- FROM_HERE,
- "Invalid new parent",
- trans))
- return false;
- ancestor_id = new_parent.GetParentId();
- }
- return true;
-}
-
-void ChangeEntryIDAndUpdateChildren(
- BaseWriteTransaction* trans,
- ModelNeutralMutableEntry* entry,
- const Id& new_id) {
- Id old_id = entry->GetId();
- if (!entry->PutId(new_id)) {
- Entry old_entry(trans, GET_BY_ID, new_id);
- CHECK(old_entry.good());
- LOG(FATAL) << "Attempt to change ID to " << new_id
- << " conflicts with existing entry.\n\n"
- << *entry << "\n\n" << old_entry;
- }
- if (entry->GetIsDir()) {
- // Get all child entries of the old id.
- Directory::Metahandles children;
- trans->directory()->GetChildHandlesById(trans, old_id, &children);
- Directory::Metahandles::iterator i = children.begin();
- while (i != children.end()) {
- ModelNeutralMutableEntry child_entry(trans, GET_BY_HANDLE, *i++);
- CHECK(child_entry.good());
- // Use the unchecked setter here to avoid touching the child's
- // UNIQUE_POSITION field. In this case, UNIQUE_POSITION among the
- // children will be valid after the loop, since we update all the children
- // at once.
- child_entry.PutParentIdPropertyOnly(new_id);
- }
- }
-}
-
-// Function to handle runtime failures on syncable code. Rather than crashing,
-// if the |condition| is false the following will happen:
-// 1. Sets unrecoverable error on transaction.
-// 2. Returns false.
-bool SyncAssert(bool condition,
- const tracked_objects::Location& location,
- const char* msg,
- BaseTransaction* trans) {
- if (!condition) {
- trans->OnUnrecoverableError(location, msg);
- return false;
- }
- return true;
-}
-
-std::string GenerateSyncableHash(
- ModelType model_type, const std::string& client_tag) {
- // Blank PB with just the field in it has termination symbol,
- // handy for delimiter.
- sync_pb::EntitySpecifics serialized_type;
- AddDefaultFieldValue(model_type, &serialized_type);
- std::string hash_input;
- serialized_type.AppendToString(&hash_input);
- hash_input.append(client_tag);
-
- std::string encode_output;
- base::Base64Encode(base::SHA1HashString(hash_input), &encode_output);
- return encode_output;
-}
-
-std::string GenerateSyncableBookmarkHash(
- const std::string& originator_cache_guid,
- const std::string& originator_client_item_id) {
- return syncable::GenerateSyncableHash(
- BOOKMARKS, originator_cache_guid + originator_client_item_id);
-}
-
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/syncable_util.h b/chromium/sync/syncable/syncable_util.h
deleted file mode 100644
index be903fd5954..00000000000
--- a/chromium/sync/syncable/syncable_util.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_SYNCABLE_UTIL_H_
-#define SYNC_SYNCABLE_SYNCABLE_UTIL_H_
-
-#include <string>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-
-namespace tracked_objects {
-class Location;
-}
-
-namespace syncer {
-namespace syncable {
-
-class BaseTransaction;
-class BaseWriteTransaction;
-class ModelNeutralMutableEntry;
-class Id;
-
-SYNC_EXPORT_PRIVATE void ChangeEntryIDAndUpdateChildren(
- BaseWriteTransaction* trans,
- ModelNeutralMutableEntry* entry,
- const Id& new_id);
-
-SYNC_EXPORT_PRIVATE bool IsLegalNewParent(BaseTransaction* trans,
- const Id& id,
- const Id& parentid);
-
-bool SyncAssert(bool condition,
- const tracked_objects::Location& location,
- const char* msg,
- BaseTransaction* trans);
-
-SYNC_EXPORT_PRIVATE int GetUnsyncedEntries(BaseTransaction* trans,
- std::vector<int64> *handles);
-
-// Generates a fixed-length tag for the given string under the given model_type.
-SYNC_EXPORT_PRIVATE std::string GenerateSyncableHash(
- ModelType model_type, const std::string& client_tag);
-
-// A helper for generating the bookmark type's tag. This is required in more
-// than one place, so we define the algorithm here to make sure the
-// implementation is consistent.
-SYNC_EXPORT_PRIVATE std::string GenerateSyncableBookmarkHash(
- const std::string& originator_cache_guid,
- const std::string& originator_client_item_id);
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_SYNCABLE_UTIL_H_
diff --git a/chromium/sync/syncable/syncable_util_unittest.cc b/chromium/sync/syncable/syncable_util_unittest.cc
deleted file mode 100644
index 8f818f0a134..00000000000
--- a/chromium/sync/syncable/syncable_util_unittest.cc
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/syncable/syncable_util.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace syncable {
-namespace {
-
-// Tests that the hashing algorithm has not changed.
-TEST(SyncableUtilTest, GenerateSyncableHash) {
- EXPECT_EQ("OyaXV5mEzrPS4wbogmtKvRfekAI=",
- GenerateSyncableHash(BOOKMARKS, "tag1"));
- EXPECT_EQ("iNFQtRFQb+IZcn1kKUJEZDDkLs4=",
- GenerateSyncableHash(PREFERENCES, "tag1"));
- EXPECT_EQ("gO1cPZQXaM73sHOvSA+tKCKFs58=",
- GenerateSyncableHash(AUTOFILL, "tag1"));
-
- EXPECT_EQ("A0eYIHXM1/jVwKDDp12Up20IkKY=",
- GenerateSyncableHash(BOOKMARKS, "tag2"));
- EXPECT_EQ("XYxkF7bhS4eItStFgiOIAU23swI=",
- GenerateSyncableHash(PREFERENCES, "tag2"));
- EXPECT_EQ("GFiWzo5NGhjLlN+OyCfhy28DJTQ=",
- GenerateSyncableHash(AUTOFILL, "tag2"));
-}
-
-} // namespace
-} // namespace syncer
-} // namespace syncable
diff --git a/chromium/sync/syncable/syncable_write_transaction.cc b/chromium/sync/syncable/syncable_write_transaction.cc
deleted file mode 100644
index d97ff6728aa..00000000000
--- a/chromium/sync/syncable/syncable_write_transaction.cc
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/syncable_write_transaction.h"
-
-#include "sync/syncable/directory.h"
-#include "sync/syncable/directory_change_delegate.h"
-#include "sync/syncable/mutable_entry.h"
-#include "sync/syncable/transaction_observer.h"
-#include "sync/syncable/write_transaction_info.h"
-
-namespace syncer {
-namespace syncable {
-
-const int64 kInvalidTransactionVersion = -1;
-
-WriteTransaction::WriteTransaction(const tracked_objects::Location& location,
- WriterTag writer, Directory* directory)
- : BaseWriteTransaction(location, "WriteTransaction", writer, directory),
- transaction_version_(NULL) {
- Lock();
-}
-
-WriteTransaction::WriteTransaction(const tracked_objects::Location& location,
- Directory* directory,
- int64* transaction_version)
- : BaseWriteTransaction(location, "WriteTransaction", SYNCAPI, directory),
- transaction_version_(transaction_version) {
- Lock();
- if (transaction_version_)
- *transaction_version_ = kInvalidTransactionVersion;
-}
-
-void WriteTransaction::TrackChangesTo(const EntryKernel* entry) {
- if (!entry) {
- return;
- }
- // Insert only if it's not already there.
- const int64 handle = entry->ref(META_HANDLE);
- EntryKernelMutationMap::iterator it = mutations_.lower_bound(handle);
- if (it == mutations_.end() || it->first != handle) {
- mutations_[handle].original = *entry;
- }
-}
-
-ImmutableEntryKernelMutationMap WriteTransaction::RecordMutations() {
- directory_->kernel_->transaction_mutex.AssertAcquired();
- for (syncable::EntryKernelMutationMap::iterator it = mutations_.begin();
- it != mutations_.end();) {
- EntryKernel* kernel = directory()->GetEntryByHandle(it->first);
- if (!kernel) {
- NOTREACHED();
- continue;
- }
- if (kernel->is_dirty()) {
- it->second.mutated = *kernel;
- ++it;
- } else {
- DCHECK(!it->second.original.is_dirty());
- // Not actually mutated, so erase from |mutations_|.
- mutations_.erase(it++);
- }
- }
- return ImmutableEntryKernelMutationMap(&mutations_);
-}
-
-void WriteTransaction::UnlockAndNotify(
- const ImmutableEntryKernelMutationMap& mutations) {
- // Work while transaction mutex is held.
- ModelTypeSet models_with_changes;
- bool has_mutations = !mutations.Get().empty();
- if (has_mutations) {
- models_with_changes = NotifyTransactionChangingAndEnding(mutations);
- }
- Unlock();
-
- // Work after mutex is relased.
- if (has_mutations) {
- NotifyTransactionComplete(models_with_changes);
- }
-}
-
-ModelTypeSet WriteTransaction::NotifyTransactionChangingAndEnding(
- const ImmutableEntryKernelMutationMap& mutations) {
- directory_->kernel_->transaction_mutex.AssertAcquired();
- DCHECK(!mutations.Get().empty());
-
- WriteTransactionInfo write_transaction_info(
- directory_->kernel_->next_write_transaction_id,
- from_here_, writer_, mutations);
- ++directory_->kernel_->next_write_transaction_id;
-
- ImmutableWriteTransactionInfo immutable_write_transaction_info(
- &write_transaction_info);
- DirectoryChangeDelegate* const delegate = directory_->kernel_->delegate;
- std::vector<int64> entry_changed;
- if (writer_ == syncable::SYNCAPI) {
- delegate->HandleCalculateChangesChangeEventFromSyncApi(
- immutable_write_transaction_info, this, &entry_changed);
- } else {
- delegate->HandleCalculateChangesChangeEventFromSyncer(
- immutable_write_transaction_info, this, &entry_changed);
- }
- UpdateTransactionVersion(entry_changed);
-
- ModelTypeSet models_with_changes =
- delegate->HandleTransactionEndingChangeEvent(
- immutable_write_transaction_info, this);
-
- directory_->kernel_->transaction_observer.Call(FROM_HERE,
- &TransactionObserver::OnTransactionWrite,
- immutable_write_transaction_info, models_with_changes);
-
- return models_with_changes;
-}
-
-void WriteTransaction::NotifyTransactionComplete(
- ModelTypeSet models_with_changes) {
- directory_->kernel_->delegate->HandleTransactionCompleteChangeEvent(
- models_with_changes);
-}
-
-void WriteTransaction::UpdateTransactionVersion(
- const std::vector<int64>& entry_changed) {
- syncer::ModelTypeSet type_seen;
- for (uint32 i = 0; i < entry_changed.size(); ++i) {
- MutableEntry entry(this, GET_BY_HANDLE, entry_changed[i]);
- if (entry.good()) {
- ModelType type = GetModelTypeFromSpecifics(entry.GetSpecifics());
- if (type < FIRST_REAL_MODEL_TYPE)
- continue;
- if (!type_seen.Has(type)) {
- directory_->IncrementTransactionVersion(type);
- type_seen.Put(type);
- }
- entry.UpdateTransactionVersion(directory_->GetTransactionVersion(type));
- }
- }
-
- if (!type_seen.Empty() && transaction_version_) {
- DCHECK_EQ(1u, type_seen.Size());
- *transaction_version_ = directory_->GetTransactionVersion(
- type_seen.First().Get());
- }
-}
-
-WriteTransaction::~WriteTransaction() {
- const ImmutableEntryKernelMutationMap& mutations = RecordMutations();
-
- MetahandleSet modified_handles;
- for (EntryKernelMutationMap::const_iterator i = mutations.Get().begin();
- i != mutations.Get().end(); ++i) {
- modified_handles.insert(i->first);
- }
- directory()->CheckInvariantsOnTransactionClose(this, modified_handles);
-
- // |CheckTreeInvariants| could have thrown an unrecoverable error.
- if (unrecoverable_error_set_) {
- HandleUnrecoverableErrorIfSet();
- Unlock();
- return;
- }
-
- UnlockAndNotify(mutations);
-}
-
-#define ENUM_CASE(x) case x: return #x; break
-
-std::string WriterTagToString(WriterTag writer_tag) {
- switch (writer_tag) {
- ENUM_CASE(INVALID);
- ENUM_CASE(SYNCER);
- ENUM_CASE(AUTHWATCHER);
- ENUM_CASE(UNITTEST);
- ENUM_CASE(VACUUM_AFTER_SAVE);
- ENUM_CASE(HANDLE_SAVE_FAILURE);
- ENUM_CASE(PURGE_ENTRIES);
- ENUM_CASE(SYNCAPI);
- };
- NOTREACHED();
- return std::string();
-}
-
-#undef ENUM_CASE
-
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/syncable_write_transaction.h b/chromium/sync/syncable/syncable_write_transaction.h
deleted file mode 100644
index 4d16aca33ce..00000000000
--- a/chromium/sync/syncable/syncable_write_transaction.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_SYNCABLE_WRITE_TRANSACTION_H_
-#define SYNC_SYNCABLE_SYNCABLE_WRITE_TRANSACTION_H_
-
-#include "sync/base/sync_export.h"
-#include "sync/syncable/entry_kernel.h"
-#include "sync/syncable/syncable_base_write_transaction.h"
-
-namespace syncer {
-namespace syncable {
-
-SYNC_EXPORT extern const int64 kInvalidTransactionVersion;
-
-// Locks db in constructor, unlocks in destructor.
-class SYNC_EXPORT WriteTransaction : public BaseWriteTransaction {
- public:
- WriteTransaction(const tracked_objects::Location& from_here,
- WriterTag writer, Directory* directory);
-
- // Constructor used for getting back transaction version after making sync
- // API changes to one model. If model is changed by the transaction,
- // the new transaction version of the model and modified nodes will be saved
- // in |transaction_version| upon destruction of the transaction. If model is
- // not changed, |transaction_version| will be kInvalidTransactionVersion.
- WriteTransaction(const tracked_objects::Location& from_here,
- Directory* directory, int64* transaction_version);
-
- virtual ~WriteTransaction();
-
- virtual void TrackChangesTo(const EntryKernel* entry) OVERRIDE;
-
- protected:
- // Overridden by tests.
- virtual void NotifyTransactionComplete(ModelTypeSet models_with_changes);
-
- private:
- friend class MutableEntry;
-
- // Clears |mutations_|.
- ImmutableEntryKernelMutationMap RecordMutations();
-
- void UnlockAndNotify(const ImmutableEntryKernelMutationMap& mutations);
-
- ModelTypeSet NotifyTransactionChangingAndEnding(
- const ImmutableEntryKernelMutationMap& mutations);
-
- // Increment versions of the models whose entries are modified and set the
- // version on the changed entries.
- void UpdateTransactionVersion(const std::vector<int64>& entry_changed);
-
- // Only the original fields are filled in until |RecordMutations()|.
- // We use a mutation map instead of a kernel set to avoid copying.
- EntryKernelMutationMap mutations_;
-
- // Stores new transaction version of changed model and nodes if model is
- // indeed changed. kInvalidTransactionVersion otherwise. Not owned.
- int64* transaction_version_;
-
- DISALLOW_COPY_AND_ASSIGN(WriteTransaction);
-};
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_SYNCABLE_WRITE_TRANSACTION_H_
diff --git a/chromium/sync/syncable/transaction_observer.h b/chromium/sync/syncable/transaction_observer.h
deleted file mode 100644
index 183fd858728..00000000000
--- a/chromium/sync/syncable/transaction_observer.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_TRANSACTION_OBSERVER_H_
-#define SYNC_SYNCABLE_TRANSACTION_OBSERVER_H_
-
-#include "sync/base/sync_export.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/syncable/write_transaction_info.h"
-
-namespace syncer {
-namespace syncable {
-
-class SYNC_EXPORT_PRIVATE TransactionObserver {
- public:
- virtual void OnTransactionWrite(
- const ImmutableWriteTransactionInfo& write_transaction_info,
- ModelTypeSet models_with_changes) = 0;
- protected:
- virtual ~TransactionObserver() {}
-};
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_TRANSACTION_OBSERVER_H_
diff --git a/chromium/sync/syncable/write_transaction_info.cc b/chromium/sync/syncable/write_transaction_info.cc
deleted file mode 100644
index 3f69da8c5ae..00000000000
--- a/chromium/sync/syncable/write_transaction_info.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/syncable/write_transaction_info.h"
-
-#include "base/strings/string_number_conversions.h"
-
-namespace syncer {
-namespace syncable {
-
-WriteTransactionInfo::WriteTransactionInfo(
- int64 id,
- tracked_objects::Location location,
- WriterTag writer,
- ImmutableEntryKernelMutationMap mutations)
- : id(id),
- location_string(location.ToString()),
- writer(writer),
- mutations(mutations) {}
-
-WriteTransactionInfo::WriteTransactionInfo()
- : id(-1), writer(INVALID) {}
-
-WriteTransactionInfo::~WriteTransactionInfo() {}
-
-base::DictionaryValue* WriteTransactionInfo::ToValue(
- size_t max_mutations_size) const {
- base::DictionaryValue* dict = new base::DictionaryValue();
- dict->SetString("id", base::Int64ToString(id));
- dict->SetString("location", location_string);
- dict->SetString("writer", WriterTagToString(writer));
- base::Value* mutations_value = NULL;
- const size_t mutations_size = mutations.Get().size();
- if (mutations_size <= max_mutations_size) {
- mutations_value = EntryKernelMutationMapToValue(mutations.Get());
- } else {
- mutations_value =
- new base::StringValue(
- base::Uint64ToString(static_cast<uint64>(mutations_size)) +
- " mutations");
- }
- dict->Set("mutations", mutations_value);
- return dict;
-}
-
-} // namespace syncable
-} // namespace syncer
diff --git a/chromium/sync/syncable/write_transaction_info.h b/chromium/sync/syncable/write_transaction_info.h
deleted file mode 100644
index 823a66c488d..00000000000
--- a/chromium/sync/syncable/write_transaction_info.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_SYNCABLE_WRITE_TRANSACTION_INFO_H_
-#define SYNC_SYNCABLE_WRITE_TRANSACTION_INFO_H_
-
-#include "sync/syncable/entry_kernel.h"
-#include "sync/syncable/syncable_base_transaction.h"
-
-namespace syncer {
-namespace syncable {
-
-// A struct describing the changes made during a transaction.
-struct WriteTransactionInfo {
- WriteTransactionInfo(int64 id,
- tracked_objects::Location location,
- WriterTag writer,
- ImmutableEntryKernelMutationMap mutations);
- WriteTransactionInfo();
- ~WriteTransactionInfo();
-
- // Caller owns the return value.
- base::DictionaryValue* ToValue(size_t max_mutations_size) const;
-
- int64 id;
- // If tracked_objects::Location becomes assignable, we can use that
- // instead.
- std::string location_string;
- WriterTag writer;
- ImmutableEntryKernelMutationMap mutations;
-};
-
-typedef
- Immutable<WriteTransactionInfo>
- ImmutableWriteTransactionInfo;
-
-} // namespace syncable
-} // namespace syncer
-
-#endif // SYNC_SYNCABLE_WRITE_TRANSACTION_INFO_H_
diff --git a/chromium/sync/tools/DEPS b/chromium/sync/tools/DEPS
deleted file mode 100644
index 322ec264008..00000000000
--- a/chromium/sync/tools/DEPS
+++ /dev/null
@@ -1,9 +0,0 @@
-include_rules = [
- "+jingle/notifier/base",
- "+net",
- "+sync/internal_api/public",
- "+sync/js",
- "+sync/notifier",
- # TODO(akalin): Remove this when we use the system encryptor.
- "+sync/test/fake_encryptor.h",
-]
diff --git a/chromium/sync/tools/null_invalidation_state_tracker.cc b/chromium/sync/tools/null_invalidation_state_tracker.cc
deleted file mode 100644
index 68237595f49..00000000000
--- a/chromium/sync/tools/null_invalidation_state_tracker.cc
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/tools/null_invalidation_state_tracker.h"
-
-#include "base/base64.h"
-#include "base/bind.h"
-#include "base/callback.h"
-#include "base/location.h"
-#include "base/logging.h"
-#include "base/task_runner.h"
-#include "sync/notifier/invalidation_util.h"
-
-namespace syncer {
-
-NullInvalidationStateTracker::NullInvalidationStateTracker() {}
-NullInvalidationStateTracker::~NullInvalidationStateTracker() {}
-
-void NullInvalidationStateTracker::SetInvalidatorClientId(
- const std::string& data) {
- LOG(INFO) << "Setting invalidator client ID to: " << data;
-}
-
-std::string NullInvalidationStateTracker::GetInvalidatorClientId() const {
- // The caller of this function is probably looking for an ID it can use to
- // identify this client as the originator of some notifiable change. It does
- // this so the invalidation server can prevent it from being notified of its
- // own changes. This invalidation state tracker doesn't remember its ID, so
- // it can't support this feature.
- NOTREACHED() << "This state tracker does not support reflection-blocking";
- return std::string();
-}
-
-std::string NullInvalidationStateTracker::GetBootstrapData() const {
- return std::string();
-}
-
-void NullInvalidationStateTracker::SetBootstrapData(const std::string& data) {
- std::string base64_data;
- base::Base64Encode(data, &base64_data);
- LOG(INFO) << "Setting bootstrap data to: " << base64_data;
-}
-
-void NullInvalidationStateTracker::Clear() {
- // We have no members to clear.
-}
-
-void NullInvalidationStateTracker::SetSavedInvalidations(
- const UnackedInvalidationsMap& states) {
- // Do nothing.
-}
-
-UnackedInvalidationsMap
-NullInvalidationStateTracker::GetSavedInvalidations() const {
- return UnackedInvalidationsMap();
-}
-
-} // namespace syncer
diff --git a/chromium/sync/tools/null_invalidation_state_tracker.h b/chromium/sync/tools/null_invalidation_state_tracker.h
deleted file mode 100644
index a12844c3d06..00000000000
--- a/chromium/sync/tools/null_invalidation_state_tracker.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_TOOLS_NULL_INVALIDATION_STATE_TRACKER_H_
-#define SYNC_TOOLS_NULL_INVALIDATION_STATE_TRACKER_H_
-
-#include "base/compiler_specific.h"
-#include "base/memory/weak_ptr.h"
-#include "sync/notifier/invalidation_state_tracker.h"
-
-namespace syncer {
-
-class NullInvalidationStateTracker
- : public base::SupportsWeakPtr<NullInvalidationStateTracker>,
- public InvalidationStateTracker {
- public:
- NullInvalidationStateTracker();
- virtual ~NullInvalidationStateTracker();
-
- virtual void SetInvalidatorClientId(const std::string& data) OVERRIDE;
- virtual std::string GetInvalidatorClientId() const OVERRIDE;
-
- virtual std::string GetBootstrapData() const OVERRIDE;
- virtual void SetBootstrapData(const std::string& data) OVERRIDE;
-
- virtual void SetSavedInvalidations(
- const UnackedInvalidationsMap& states) OVERRIDE;
- virtual UnackedInvalidationsMap GetSavedInvalidations() const OVERRIDE;
-
- virtual void Clear() OVERRIDE;
-};
-
-} // namespace syncer
-
-#endif // SYNC_TOOLS_NULL_INVALIDATION_STATE_TRACKER_H_
diff --git a/chromium/sync/tools/sync_client.cc b/chromium/sync/tools/sync_client.cc
deleted file mode 100644
index e5051040539..00000000000
--- a/chromium/sync/tools/sync_client.cc
+++ /dev/null
@@ -1,396 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <cstddef>
-#include <cstdio>
-#include <string>
-
-#include "base/at_exit.h"
-#include "base/command_line.h"
-#include "base/compiler_specific.h"
-#include "base/debug/stack_trace.h"
-#include "base/files/scoped_temp_dir.h"
-#include "base/json/json_writer.h"
-#include "base/logging.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "base/message_loop/message_loop.h"
-#include "base/rand_util.h"
-#include "base/task_runner.h"
-#include "base/threading/thread.h"
-#include "jingle/notifier/base/notification_method.h"
-#include "jingle/notifier/base/notifier_options.h"
-#include "net/base/host_port_pair.h"
-#include "net/base/network_change_notifier.h"
-#include "net/dns/host_resolver.h"
-#include "net/http/transport_security_state.h"
-#include "net/url_request/url_request_test_util.h"
-#include "sync/internal_api/public/base/cancelation_signal.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/base_node.h"
-#include "sync/internal_api/public/engine/passive_model_worker.h"
-#include "sync/internal_api/public/http_bridge.h"
-#include "sync/internal_api/public/internal_components_factory_impl.h"
-#include "sync/internal_api/public/read_node.h"
-#include "sync/internal_api/public/sync_manager.h"
-#include "sync/internal_api/public/sync_manager_factory.h"
-#include "sync/internal_api/public/util/report_unrecoverable_error_function.h"
-#include "sync/internal_api/public/util/unrecoverable_error_handler.h"
-#include "sync/internal_api/public/util/weak_handle.h"
-#include "sync/js/js_event_details.h"
-#include "sync/js/js_event_handler.h"
-#include "sync/notifier/invalidation_state_tracker.h"
-#include "sync/notifier/non_blocking_invalidator.h"
-#include "sync/test/fake_encryptor.h"
-#include "sync/tools/null_invalidation_state_tracker.h"
-
-#if defined(OS_MACOSX)
-#include "base/mac/scoped_nsautorelease_pool.h"
-#endif
-
-// This is a simple utility that initializes a sync client and
-// prints out any events.
-
-// TODO(akalin): Refactor to combine shared code with
-// sync_listen_notifications.
-namespace syncer {
-namespace {
-
-const char kEmailSwitch[] = "email";
-const char kTokenSwitch[] = "token";
-const char kXmppHostPortSwitch[] = "xmpp-host-port";
-const char kXmppTrySslTcpFirstSwitch[] = "xmpp-try-ssltcp-first";
-const char kXmppAllowInsecureConnectionSwitch[] =
- "xmpp-allow-insecure-connection";
-
-// Needed to use a real host resolver.
-class MyTestURLRequestContext : public net::TestURLRequestContext {
- public:
- MyTestURLRequestContext() : TestURLRequestContext(true) {
- context_storage_.set_host_resolver(
- net::HostResolver::CreateDefaultResolver(NULL));
- context_storage_.set_transport_security_state(
- new net::TransportSecurityState());
- Init();
- }
-
- virtual ~MyTestURLRequestContext() {}
-};
-
-class MyTestURLRequestContextGetter : public net::TestURLRequestContextGetter {
- public:
- explicit MyTestURLRequestContextGetter(
- const scoped_refptr<base::MessageLoopProxy>& io_message_loop_proxy)
- : TestURLRequestContextGetter(io_message_loop_proxy) {}
-
- virtual net::TestURLRequestContext* GetURLRequestContext() OVERRIDE {
- // Construct |context_| lazily so it gets constructed on the right
- // thread (the IO thread).
- if (!context_)
- context_.reset(new MyTestURLRequestContext());
- return context_.get();
- }
-
- private:
- virtual ~MyTestURLRequestContextGetter() {}
-
- scoped_ptr<MyTestURLRequestContext> context_;
-};
-
-// TODO(akalin): Use system encryptor once it's moved to sync/.
-class NullEncryptor : public Encryptor {
- public:
- virtual ~NullEncryptor() {}
-
- virtual bool EncryptString(const std::string& plaintext,
- std::string* ciphertext) OVERRIDE {
- *ciphertext = plaintext;
- return true;
- }
-
- virtual bool DecryptString(const std::string& ciphertext,
- std::string* plaintext) OVERRIDE {
- *plaintext = ciphertext;
- return true;
- }
-};
-
-std::string ValueToString(const Value& value) {
- std::string str;
- base::JSONWriter::Write(&value, &str);
- return str;
-}
-
-class LoggingChangeDelegate : public SyncManager::ChangeDelegate {
- public:
- virtual ~LoggingChangeDelegate() {}
-
- virtual void OnChangesApplied(
- ModelType model_type,
- int64 model_version,
- const BaseTransaction* trans,
- const ImmutableChangeRecordList& changes) OVERRIDE {
- LOG(INFO) << "Changes applied for "
- << ModelTypeToString(model_type);
- size_t i = 1;
- size_t change_count = changes.Get().size();
- for (ChangeRecordList::const_iterator it =
- changes.Get().begin(); it != changes.Get().end(); ++it) {
- scoped_ptr<base::DictionaryValue> change_value(it->ToValue());
- LOG(INFO) << "Change (" << i << "/" << change_count << "): "
- << ValueToString(*change_value);
- if (it->action != ChangeRecord::ACTION_DELETE) {
- ReadNode node(trans);
- CHECK_EQ(node.InitByIdLookup(it->id), BaseNode::INIT_OK);
- scoped_ptr<base::DictionaryValue> details(node.GetDetailsAsValue());
- VLOG(1) << "Details: " << ValueToString(*details);
- }
- ++i;
- }
- }
-
- virtual void OnChangesComplete(ModelType model_type) OVERRIDE {
- LOG(INFO) << "Changes complete for "
- << ModelTypeToString(model_type);
- }
-};
-
-class LoggingUnrecoverableErrorHandler
- : public UnrecoverableErrorHandler {
- public:
- virtual ~LoggingUnrecoverableErrorHandler() {}
-
- virtual void OnUnrecoverableError(const tracked_objects::Location& from_here,
- const std::string& message) OVERRIDE {
- if (LOG_IS_ON(ERROR)) {
- logging::LogMessage(from_here.file_name(), from_here.line_number(),
- logging::LOG_ERROR).stream()
- << message;
- }
- }
-};
-
-class LoggingJsEventHandler
- : public JsEventHandler,
- public base::SupportsWeakPtr<LoggingJsEventHandler> {
- public:
- virtual ~LoggingJsEventHandler() {}
-
- virtual void HandleJsEvent(
- const std::string& name,
- const JsEventDetails& details) OVERRIDE {
- VLOG(1) << name << ": " << details.ToString();
- }
-};
-
-void LogUnrecoverableErrorContext() {
- base::debug::StackTrace().Print();
-}
-
-notifier::NotifierOptions ParseNotifierOptions(
- const CommandLine& command_line,
- const scoped_refptr<net::URLRequestContextGetter>&
- request_context_getter) {
- notifier::NotifierOptions notifier_options;
- notifier_options.request_context_getter = request_context_getter;
- notifier_options.auth_mechanism = "X-OAUTH2";
-
- if (command_line.HasSwitch(kXmppHostPortSwitch)) {
- notifier_options.xmpp_host_port =
- net::HostPortPair::FromString(
- command_line.GetSwitchValueASCII(kXmppHostPortSwitch));
- LOG(INFO) << "Using " << notifier_options.xmpp_host_port.ToString()
- << " for test sync notification server.";
- }
-
- notifier_options.try_ssltcp_first =
- command_line.HasSwitch(kXmppTrySslTcpFirstSwitch);
- LOG_IF(INFO, notifier_options.try_ssltcp_first)
- << "Trying SSL/TCP port before XMPP port for notifications.";
-
- notifier_options.allow_insecure_connection =
- command_line.HasSwitch(kXmppAllowInsecureConnectionSwitch);
- LOG_IF(INFO, notifier_options.allow_insecure_connection)
- << "Allowing insecure XMPP connections.";
-
- return notifier_options;
-}
-
-void StubNetworkTimeUpdateCallback(const base::Time&,
- const base::TimeDelta&,
- const base::TimeDelta&) {
-}
-
-int SyncClientMain(int argc, char* argv[]) {
-#if defined(OS_MACOSX)
- base::mac::ScopedNSAutoreleasePool pool;
-#endif
- base::AtExitManager exit_manager;
- CommandLine::Init(argc, argv);
- logging::LoggingSettings settings;
- settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
- logging::InitLogging(settings);
-
- base::MessageLoop sync_loop;
- base::Thread io_thread("IO thread");
- base::Thread::Options options;
- options.message_loop_type = base::MessageLoop::TYPE_IO;
- io_thread.StartWithOptions(options);
-
- // Parse command line.
- const CommandLine& command_line = *CommandLine::ForCurrentProcess();
- SyncCredentials credentials;
- credentials.email = command_line.GetSwitchValueASCII(kEmailSwitch);
- credentials.sync_token = command_line.GetSwitchValueASCII(kTokenSwitch);
- // TODO(akalin): Write a wrapper script that gets a token for an
- // email and password and passes that in to this utility.
- if (credentials.email.empty() || credentials.sync_token.empty()) {
- std::printf("Usage: %s --%s=foo@bar.com --%s=token\n"
- "[--%s=host:port] [--%s] [--%s]\n"
- "Run chrome and set a breakpoint on\n"
- "syncer::SyncManagerImpl::UpdateCredentials() "
- "after logging into\n"
- "sync to get the token to pass into this utility.\n",
- argv[0],
- kEmailSwitch, kTokenSwitch, kXmppHostPortSwitch,
- kXmppTrySslTcpFirstSwitch,
- kXmppAllowInsecureConnectionSwitch);
- return -1;
- }
-
- // Set up objects that monitor the network.
- scoped_ptr<net::NetworkChangeNotifier> network_change_notifier(
- net::NetworkChangeNotifier::Create());
-
- // Set up sync notifier factory.
- const scoped_refptr<MyTestURLRequestContextGetter> context_getter =
- new MyTestURLRequestContextGetter(io_thread.message_loop_proxy());
- const notifier::NotifierOptions& notifier_options =
- ParseNotifierOptions(command_line, context_getter);
- const char kClientInfo[] = "standalone_sync_client";
- std::string invalidator_id = base::RandBytesAsString(8);
- NullInvalidationStateTracker null_invalidation_state_tracker;
- scoped_ptr<Invalidator> invalidator(new NonBlockingInvalidator(
- notifier_options,
- invalidator_id,
- null_invalidation_state_tracker.GetSavedInvalidations(),
- null_invalidation_state_tracker.GetBootstrapData(),
- WeakHandle<InvalidationStateTracker>(
- null_invalidation_state_tracker.AsWeakPtr()),
- kClientInfo));
-
- // Set up database directory for the syncer.
- base::ScopedTempDir database_dir;
- CHECK(database_dir.CreateUniqueTempDir());
-
- // Developers often add types to ModelTypeSet::All() before the server
- // supports them. We need to be explicit about which types we want here.
- ModelTypeSet model_types;
- model_types.Put(BOOKMARKS);
- model_types.Put(PREFERENCES);
- model_types.Put(PASSWORDS);
- model_types.Put(AUTOFILL);
- model_types.Put(THEMES);
- model_types.Put(TYPED_URLS);
- model_types.Put(EXTENSIONS);
- model_types.Put(NIGORI);
- model_types.Put(SEARCH_ENGINES);
- model_types.Put(SESSIONS);
- model_types.Put(APPS);
- model_types.Put(AUTOFILL_PROFILE);
- model_types.Put(APP_SETTINGS);
- model_types.Put(EXTENSION_SETTINGS);
- model_types.Put(APP_NOTIFICATIONS);
- model_types.Put(HISTORY_DELETE_DIRECTIVES);
- model_types.Put(SYNCED_NOTIFICATIONS);
- model_types.Put(DEVICE_INFO);
- model_types.Put(EXPERIMENTS);
- model_types.Put(PRIORITY_PREFERENCES);
- model_types.Put(DICTIONARY);
- model_types.Put(FAVICON_IMAGES);
- model_types.Put(FAVICON_TRACKING);
-
- ModelSafeRoutingInfo routing_info;
- for (ModelTypeSet::Iterator it = model_types.First();
- it.Good(); it.Inc()) {
- routing_info[it.Get()] = GROUP_PASSIVE;
- }
- scoped_refptr<PassiveModelWorker> passive_model_safe_worker =
- new PassiveModelWorker(&sync_loop, NULL);
- std::vector<ModelSafeWorker*> workers;
- workers.push_back(passive_model_safe_worker.get());
-
- // Set up sync manager.
- SyncManagerFactory sync_manager_factory;
- scoped_ptr<SyncManager> sync_manager =
- sync_manager_factory.CreateSyncManager("sync_client manager");
- LoggingJsEventHandler js_event_handler;
- const char kSyncServerAndPath[] = "clients4.google.com/chrome-sync/dev";
- int kSyncServerPort = 443;
- bool kUseSsl = true;
- // Used only by InitialProcessMetadata(), so it's okay to leave this as NULL.
- const scoped_refptr<base::TaskRunner> blocking_task_runner = NULL;
- const char kUserAgent[] = "sync_client";
- // TODO(akalin): Replace this with just the context getter once
- // HttpPostProviderFactory is removed.
- CancelationSignal factory_cancelation_signal;
- scoped_ptr<HttpPostProviderFactory> post_factory(
- new HttpBridgeFactory(context_getter.get(),
- base::Bind(&StubNetworkTimeUpdateCallback),
- &factory_cancelation_signal));
- post_factory->Init(kUserAgent);
- // Used only when committing bookmarks, so it's okay to leave this
- // as NULL.
- ExtensionsActivity* extensions_activity = NULL;
- LoggingChangeDelegate change_delegate;
- const char kRestoredKeyForBootstrapping[] = "";
- const char kRestoredKeystoreKeyForBootstrapping[] = "";
- NullEncryptor null_encryptor;
- InternalComponentsFactoryImpl::Switches factory_switches = {
- InternalComponentsFactory::ENCRYPTION_KEYSTORE,
- InternalComponentsFactory::BACKOFF_NORMAL
- };
- CancelationSignal scm_cancelation_signal;
-
- sync_manager->Init(database_dir.path(),
- WeakHandle<JsEventHandler>(
- js_event_handler.AsWeakPtr()),
- kSyncServerAndPath,
- kSyncServerPort,
- kUseSsl,
- post_factory.Pass(),
- workers,
- extensions_activity,
- &change_delegate,
- credentials,
- invalidator_id,
- kRestoredKeyForBootstrapping,
- kRestoredKeystoreKeyForBootstrapping,
- new InternalComponentsFactoryImpl(factory_switches),
- &null_encryptor,
- scoped_ptr<UnrecoverableErrorHandler>(
- new LoggingUnrecoverableErrorHandler).Pass(),
- &LogUnrecoverableErrorContext,
- &scm_cancelation_signal);
- // TODO(akalin): Avoid passing in model parameters multiple times by
- // organizing handling of model types.
- invalidator->UpdateCredentials(credentials.email, credentials.sync_token);
- invalidator->RegisterHandler(sync_manager.get());
- invalidator->UpdateRegisteredIds(
- sync_manager.get(), ModelTypeSetToObjectIdSet(model_types));
- sync_manager->StartSyncingNormally(routing_info);
-
- sync_loop.Run();
-
- io_thread.Stop();
- return 0;
-}
-
-} // namespace
-} // namespace syncer
-
-int main(int argc, char* argv[]) {
- return syncer::SyncClientMain(argc, argv);
-}
diff --git a/chromium/sync/tools/sync_listen_notifications.cc b/chromium/sync/tools/sync_listen_notifications.cc
deleted file mode 100644
index 5d212f3b8e2..00000000000
--- a/chromium/sync/tools/sync_listen_notifications.cc
+++ /dev/null
@@ -1,213 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <cstddef>
-#include <cstdio>
-#include <string>
-
-#include "base/at_exit.h"
-#include "base/command_line.h"
-#include "base/compiler_specific.h"
-#include "base/logging.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/message_loop/message_loop.h"
-#include "base/rand_util.h"
-#include "base/threading/thread.h"
-#include "jingle/notifier/base/notification_method.h"
-#include "jingle/notifier/base/notifier_options.h"
-#include "net/base/host_port_pair.h"
-#include "net/base/network_change_notifier.h"
-#include "net/dns/host_resolver.h"
-#include "net/http/transport_security_state.h"
-#include "net/url_request/url_request_test_util.h"
-#include "sync/internal_api/public/base/model_type.h"
-#include "sync/notifier/invalidation_handler.h"
-#include "sync/notifier/invalidation_state_tracker.h"
-#include "sync/notifier/invalidation_util.h"
-#include "sync/notifier/invalidator.h"
-#include "sync/notifier/non_blocking_invalidator.h"
-#include "sync/notifier/object_id_invalidation_map.h"
-#include "sync/tools/null_invalidation_state_tracker.h"
-
-#if defined(OS_MACOSX)
-#include "base/mac/scoped_nsautorelease_pool.h"
-#endif
-
-// This is a simple utility that initializes a sync notifier and
-// listens to any received notifications.
-
-namespace syncer {
-namespace {
-
-const char kEmailSwitch[] = "email";
-const char kTokenSwitch[] = "token";
-const char kHostPortSwitch[] = "host-port";
-const char kTrySslTcpFirstSwitch[] = "try-ssltcp-first";
-const char kAllowInsecureConnectionSwitch[] = "allow-insecure-connection";
-
-// Class to print received notifications events.
-class NotificationPrinter : public InvalidationHandler {
- public:
- NotificationPrinter() {}
- virtual ~NotificationPrinter() {}
-
- virtual void OnInvalidatorStateChange(InvalidatorState state) OVERRIDE {
- LOG(INFO) << "Invalidator state changed to "
- << InvalidatorStateToString(state);
- }
-
- virtual void OnIncomingInvalidation(
- const ObjectIdInvalidationMap& invalidation_map) OVERRIDE {
- ObjectIdSet ids = invalidation_map.GetObjectIds();
- for (ObjectIdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
- LOG(INFO) << "Remote invalidation: "
- << invalidation_map.ToString();
- }
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(NotificationPrinter);
-};
-
-// Needed to use a real host resolver.
-class MyTestURLRequestContext : public net::TestURLRequestContext {
- public:
- MyTestURLRequestContext() : TestURLRequestContext(true) {
- context_storage_.set_host_resolver(
- net::HostResolver::CreateDefaultResolver(NULL));
- context_storage_.set_transport_security_state(
- new net::TransportSecurityState());
- Init();
- }
-
- virtual ~MyTestURLRequestContext() {}
-};
-
-class MyTestURLRequestContextGetter : public net::TestURLRequestContextGetter {
- public:
- explicit MyTestURLRequestContextGetter(
- const scoped_refptr<base::MessageLoopProxy>& io_message_loop_proxy)
- : TestURLRequestContextGetter(io_message_loop_proxy) {}
-
- virtual net::TestURLRequestContext* GetURLRequestContext() OVERRIDE {
- // Construct |context_| lazily so it gets constructed on the right
- // thread (the IO thread).
- if (!context_)
- context_.reset(new MyTestURLRequestContext());
- return context_.get();
- }
-
- private:
- virtual ~MyTestURLRequestContextGetter() {}
-
- scoped_ptr<MyTestURLRequestContext> context_;
-};
-
-notifier::NotifierOptions ParseNotifierOptions(
- const CommandLine& command_line,
- const scoped_refptr<net::URLRequestContextGetter>&
- request_context_getter) {
- notifier::NotifierOptions notifier_options;
- notifier_options.request_context_getter = request_context_getter;
-
- if (command_line.HasSwitch(kHostPortSwitch)) {
- notifier_options.xmpp_host_port =
- net::HostPortPair::FromString(
- command_line.GetSwitchValueASCII(kHostPortSwitch));
- LOG(INFO) << "Using " << notifier_options.xmpp_host_port.ToString()
- << " for test sync notification server.";
- }
-
- notifier_options.try_ssltcp_first =
- command_line.HasSwitch(kTrySslTcpFirstSwitch);
- LOG_IF(INFO, notifier_options.try_ssltcp_first)
- << "Trying SSL/TCP port before XMPP port for notifications.";
-
- notifier_options.allow_insecure_connection =
- command_line.HasSwitch(kAllowInsecureConnectionSwitch);
- LOG_IF(INFO, notifier_options.allow_insecure_connection)
- << "Allowing insecure XMPP connections.";
-
- return notifier_options;
-}
-
-int SyncListenNotificationsMain(int argc, char* argv[]) {
- using namespace syncer;
-#if defined(OS_MACOSX)
- base::mac::ScopedNSAutoreleasePool pool;
-#endif
- base::AtExitManager exit_manager;
- CommandLine::Init(argc, argv);
- logging::LoggingSettings settings;
- settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
- logging::InitLogging(settings);
-
- base::MessageLoop ui_loop;
- base::Thread io_thread("IO thread");
- base::Thread::Options options;
- options.message_loop_type = base::MessageLoop::TYPE_IO;
- io_thread.StartWithOptions(options);
-
- // Parse command line.
- const CommandLine& command_line = *CommandLine::ForCurrentProcess();
- std::string email = command_line.GetSwitchValueASCII(kEmailSwitch);
- std::string token = command_line.GetSwitchValueASCII(kTokenSwitch);
- // TODO(akalin): Write a wrapper script that gets a token for an
- // email and password and passes that in to this utility.
- if (email.empty() || token.empty()) {
- std::printf("Usage: %s --%s=foo@bar.com --%s=token\n"
- "[--%s=host:port] [--%s] [--%s]\n"
- "Run chrome and set a breakpoint on\n"
- "syncer::SyncManagerImpl::UpdateCredentials() "
- "after logging into\n"
- "sync to get the token to pass into this utility.\n",
- argv[0],
- kEmailSwitch, kTokenSwitch, kHostPortSwitch,
- kTrySslTcpFirstSwitch, kAllowInsecureConnectionSwitch);
- return -1;
- }
-
- // Set up objects that monitor the network.
- scoped_ptr<net::NetworkChangeNotifier> network_change_notifier(
- net::NetworkChangeNotifier::Create());
-
- const notifier::NotifierOptions& notifier_options =
- ParseNotifierOptions(
- command_line,
- new MyTestURLRequestContextGetter(io_thread.message_loop_proxy()));
- const char kClientInfo[] = "sync_listen_notifications";
- NullInvalidationStateTracker null_invalidation_state_tracker;
- scoped_ptr<Invalidator> invalidator(
- new NonBlockingInvalidator(
- notifier_options,
- base::RandBytesAsString(8),
- null_invalidation_state_tracker.GetSavedInvalidations(),
- null_invalidation_state_tracker.GetBootstrapData(),
- WeakHandle<InvalidationStateTracker>(
- null_invalidation_state_tracker.AsWeakPtr()),
- kClientInfo));
-
- NotificationPrinter notification_printer;
-
- invalidator->UpdateCredentials(email, token);
-
- // Listen for notifications for all known types.
- invalidator->RegisterHandler(&notification_printer);
- invalidator->UpdateRegisteredIds(
- &notification_printer, ModelTypeSetToObjectIdSet(ModelTypeSet::All()));
-
- ui_loop.Run();
-
- invalidator->UnregisterHandler(&notification_printer);
- io_thread.Stop();
- return 0;
-}
-
-} // namespace
-} // namespace syncer
-
-int main(int argc, char* argv[]) {
- return syncer::SyncListenNotificationsMain(argc, argv);
-}
diff --git a/chromium/sync/tools/sync_tools.gyp b/chromium/sync/tools/sync_tools.gyp
new file mode 100644
index 00000000000..20ca8ec410e
--- /dev/null
+++ b/chromium/sync/tools/sync_tools.gyp
@@ -0,0 +1,77 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'chromium_code': 1,
+ },
+
+ 'targets': [
+ {
+ 'target_name': 'sync_tools_helper',
+ 'type': 'static_library',
+ 'defines': [
+ 'SYNC_IMPLEMENTATION',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'dependencies': [
+ '../../base/base.gyp:base',
+ '../sync.gyp:sync',
+ ],
+ 'export_dependent_settings': [
+ '../../base/base.gyp:base',
+ '../sync.gyp:sync',
+ ],
+ 'sources': [
+ 'null_invalidation_state_tracker.cc',
+ 'null_invalidation_state_tracker.h',
+ ],
+ },
+ # A tool to listen to sync notifications and print them out.
+ {
+ 'target_name': 'sync_listen_notifications',
+ 'type': 'executable',
+ 'defines': [
+ 'SYNC_TEST',
+ ],
+ 'dependencies': [
+ '../../base/base.gyp:base',
+ '../../components/components.gyp:invalidation',
+ '../../jingle/jingle.gyp:notifier',
+ '../../net/net.gyp:net',
+ '../../net/net.gyp:net_test_support',
+ '../sync.gyp:sync',
+ 'sync_tools_helper',
+ ],
+ 'sources': [
+ 'sync_listen_notifications.cc',
+ ],
+ },
+
+ # A standalone command-line sync client.
+ {
+ 'target_name': 'sync_client',
+ 'type': 'executable',
+ 'defines': [
+ 'SYNC_TEST',
+ ],
+ 'dependencies': [
+ '../../base/base.gyp:base',
+ '../../components/components.gyp:invalidation',
+ '../../jingle/jingle.gyp:notifier',
+ '../../net/net.gyp:net',
+ '../../net/net.gyp:net_test_support',
+ '../sync.gyp:sync',
+ '../sync.gyp:test_support_sync_core',
+ 'sync_tools_helper',
+ ],
+ 'sources': [
+ 'sync_client.cc',
+ ],
+ },
+ ]
+}
+
diff --git a/chromium/sync/tools/testserver/DEPS b/chromium/sync/tools/testserver/DEPS
deleted file mode 100644
index f9b201f9df2..00000000000
--- a/chromium/sync/tools/testserver/DEPS
+++ /dev/null
@@ -1,3 +0,0 @@
-include_rules = [
- "+sync/test",
-]
diff --git a/chromium/sync/tools/testserver/OWNERS b/chromium/sync/tools/testserver/OWNERS
deleted file mode 100644
index e6284796e9f..00000000000
--- a/chromium/sync/tools/testserver/OWNERS
+++ /dev/null
@@ -1,3 +0,0 @@
-akalin@chromium.org
-nick@chromium.org
-rsimha@chromium.org
diff --git a/chromium/sync/tools/testserver/chromiumsync.py b/chromium/sync/tools/testserver/chromiumsync.py
deleted file mode 100644
index 496cb6ae7e8..00000000000
--- a/chromium/sync/tools/testserver/chromiumsync.py
+++ /dev/null
@@ -1,1643 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""An implementation of the server side of the Chromium sync protocol.
-
-The details of the protocol are described mostly by comments in the protocol
-buffer definition at chrome/browser/sync/protocol/sync.proto.
-"""
-
-import base64
-import cgi
-import copy
-import google.protobuf.text_format
-import hashlib
-import operator
-import pickle
-import random
-import string
-import sys
-import threading
-import time
-import urlparse
-import uuid
-
-import app_list_specifics_pb2
-import app_notification_specifics_pb2
-import app_setting_specifics_pb2
-import app_specifics_pb2
-import article_specifics_pb2
-import autofill_specifics_pb2
-import bookmark_specifics_pb2
-import client_commands_pb2
-import dictionary_specifics_pb2
-import get_updates_caller_info_pb2
-import extension_setting_specifics_pb2
-import extension_specifics_pb2
-import favicon_image_specifics_pb2
-import favicon_tracking_specifics_pb2
-import history_delete_directive_specifics_pb2
-import managed_user_setting_specifics_pb2
-import managed_user_specifics_pb2
-import nigori_specifics_pb2
-import password_specifics_pb2
-import preference_specifics_pb2
-import priority_preference_specifics_pb2
-import search_engine_specifics_pb2
-import session_specifics_pb2
-import sync_pb2
-import sync_enums_pb2
-import synced_notification_data_pb2
-import synced_notification_render_pb2
-import synced_notification_specifics_pb2
-import theme_specifics_pb2
-import typed_url_specifics_pb2
-
-# An enumeration of the various kinds of data that can be synced.
-# Over the wire, this enumeration is not used: a sync object's type is
-# inferred by which EntitySpecifics field it has. But in the context
-# of a program, it is useful to have an enumeration.
-ALL_TYPES = (
- TOP_LEVEL, # The type of the 'Google Chrome' folder.
- APPS,
- APP_LIST,
- APP_NOTIFICATION,
- APP_SETTINGS,
- ARTICLE,
- AUTOFILL,
- AUTOFILL_PROFILE,
- BOOKMARK,
- DEVICE_INFO,
- DICTIONARY,
- EXPERIMENTS,
- EXTENSIONS,
- HISTORY_DELETE_DIRECTIVE,
- MANAGED_USER_SETTING,
- MANAGED_USER,
- NIGORI,
- PASSWORD,
- PREFERENCE,
- PRIORITY_PREFERENCE,
- SEARCH_ENGINE,
- SESSION,
- SYNCED_NOTIFICATION,
- THEME,
- TYPED_URL,
- EXTENSION_SETTINGS,
- FAVICON_IMAGES,
- FAVICON_TRACKING) = range(28)
-
-# An enumeration on the frequency at which the server should send errors
-# to the client. This would be specified by the url that triggers the error.
-# Note: This enum should be kept in the same order as the enum in sync_test.h.
-SYNC_ERROR_FREQUENCY = (
- ERROR_FREQUENCY_NONE,
- ERROR_FREQUENCY_ALWAYS,
- ERROR_FREQUENCY_TWO_THIRDS) = range(3)
-
-# Well-known server tag of the top level 'Google Chrome' folder.
-TOP_LEVEL_FOLDER_TAG = 'google_chrome'
-
-# Given a sync type from ALL_TYPES, find the FieldDescriptor corresponding
-# to that datatype. Note that TOP_LEVEL has no such token.
-SYNC_TYPE_FIELDS = sync_pb2.EntitySpecifics.DESCRIPTOR.fields_by_name
-SYNC_TYPE_TO_DESCRIPTOR = {
- APP_LIST: SYNC_TYPE_FIELDS['app_list'],
- APP_NOTIFICATION: SYNC_TYPE_FIELDS['app_notification'],
- APP_SETTINGS: SYNC_TYPE_FIELDS['app_setting'],
- APPS: SYNC_TYPE_FIELDS['app'],
- ARTICLE: SYNC_TYPE_FIELDS['article'],
- AUTOFILL: SYNC_TYPE_FIELDS['autofill'],
- AUTOFILL_PROFILE: SYNC_TYPE_FIELDS['autofill_profile'],
- BOOKMARK: SYNC_TYPE_FIELDS['bookmark'],
- DEVICE_INFO: SYNC_TYPE_FIELDS['device_info'],
- DICTIONARY: SYNC_TYPE_FIELDS['dictionary'],
- EXPERIMENTS: SYNC_TYPE_FIELDS['experiments'],
- EXTENSION_SETTINGS: SYNC_TYPE_FIELDS['extension_setting'],
- EXTENSIONS: SYNC_TYPE_FIELDS['extension'],
- FAVICON_IMAGES: SYNC_TYPE_FIELDS['favicon_image'],
- FAVICON_TRACKING: SYNC_TYPE_FIELDS['favicon_tracking'],
- HISTORY_DELETE_DIRECTIVE: SYNC_TYPE_FIELDS['history_delete_directive'],
- MANAGED_USER_SETTING: SYNC_TYPE_FIELDS['managed_user_setting'],
- MANAGED_USER: SYNC_TYPE_FIELDS['managed_user'],
- NIGORI: SYNC_TYPE_FIELDS['nigori'],
- PASSWORD: SYNC_TYPE_FIELDS['password'],
- PREFERENCE: SYNC_TYPE_FIELDS['preference'],
- PRIORITY_PREFERENCE: SYNC_TYPE_FIELDS['priority_preference'],
- SEARCH_ENGINE: SYNC_TYPE_FIELDS['search_engine'],
- SESSION: SYNC_TYPE_FIELDS['session'],
- SYNCED_NOTIFICATION: SYNC_TYPE_FIELDS["synced_notification"],
- THEME: SYNC_TYPE_FIELDS['theme'],
- TYPED_URL: SYNC_TYPE_FIELDS['typed_url'],
- }
-
-# The parent ID used to indicate a top-level node.
-ROOT_ID = '0'
-
-# Unix time epoch +1 day in struct_time format. The tuple corresponds to
-# UTC Thursday Jan 2 1970, 00:00:00, non-dst.
-# We have to add one day after start of epoch, since in timezones with positive
-# UTC offset time.mktime throws an OverflowError,
-# rather then returning negative number.
-FIRST_DAY_UNIX_TIME_EPOCH = (1970, 1, 2, 0, 0, 0, 4, 2, 0)
-ONE_DAY_SECONDS = 60 * 60 * 24
-
-# The number of characters in the server-generated encryption key.
-KEYSTORE_KEY_LENGTH = 16
-
-# The hashed client tags for some experiment nodes.
-KEYSTORE_ENCRYPTION_EXPERIMENT_TAG = "pis8ZRzh98/MKLtVEio2mr42LQA="
-PRE_COMMIT_GU_AVOIDANCE_EXPERIMENT_TAG = "Z1xgeh3QUBa50vdEPd8C/4c7jfE="
-
-class Error(Exception):
- """Error class for this module."""
-
-
-class ProtobufDataTypeFieldNotUnique(Error):
- """An entry should not have more than one data type present."""
-
-
-class DataTypeIdNotRecognized(Error):
- """The requested data type is not recognized."""
-
-
-class MigrationDoneError(Error):
- """A server-side migration occurred; clients must re-sync some datatypes.
-
- Attributes:
- datatypes: a list of the datatypes (python enum) needing migration.
- """
-
- def __init__(self, datatypes):
- self.datatypes = datatypes
-
-
-class StoreBirthdayError(Error):
- """The client sent a birthday that doesn't correspond to this server."""
-
-
-class TransientError(Error):
- """The client would be sent a transient error."""
-
-
-class SyncInducedError(Error):
- """The client would be sent an error."""
-
-
-class InducedErrorFrequencyNotDefined(Error):
- """The error frequency defined is not handled."""
-
-
-class ClientNotConnectedError(Error):
- """The client is not connected to the server."""
-
-
-def GetEntryType(entry):
- """Extract the sync type from a SyncEntry.
-
- Args:
- entry: A SyncEntity protobuf object whose type to determine.
- Returns:
- An enum value from ALL_TYPES if the entry's type can be determined, or None
- if the type cannot be determined.
- Raises:
- ProtobufDataTypeFieldNotUnique: More than one type was indicated by
- the entry.
- """
- if entry.server_defined_unique_tag == TOP_LEVEL_FOLDER_TAG:
- return TOP_LEVEL
- entry_types = GetEntryTypesFromSpecifics(entry.specifics)
- if not entry_types:
- return None
-
- # If there is more than one, either there's a bug, or else the caller
- # should use GetEntryTypes.
- if len(entry_types) > 1:
- raise ProtobufDataTypeFieldNotUnique
- return entry_types[0]
-
-
-def GetEntryTypesFromSpecifics(specifics):
- """Determine the sync types indicated by an EntitySpecifics's field(s).
-
- If the specifics have more than one recognized data type field (as commonly
- happens with the requested_types field of GetUpdatesMessage), all types
- will be returned. Callers must handle the possibility of the returned
- value having more than one item.
-
- Args:
- specifics: A EntitySpecifics protobuf message whose extensions to
- enumerate.
- Returns:
- A list of the sync types (values from ALL_TYPES) associated with each
- recognized extension of the specifics message.
- """
- return [data_type for data_type, field_descriptor
- in SYNC_TYPE_TO_DESCRIPTOR.iteritems()
- if specifics.HasField(field_descriptor.name)]
-
-
-def SyncTypeToProtocolDataTypeId(data_type):
- """Convert from a sync type (python enum) to the protocol's data type id."""
- return SYNC_TYPE_TO_DESCRIPTOR[data_type].number
-
-
-def ProtocolDataTypeIdToSyncType(protocol_data_type_id):
- """Convert from the protocol's data type id to a sync type (python enum)."""
- for data_type, field_descriptor in SYNC_TYPE_TO_DESCRIPTOR.iteritems():
- if field_descriptor.number == protocol_data_type_id:
- return data_type
- raise DataTypeIdNotRecognized
-
-
-def DataTypeStringToSyncTypeLoose(data_type_string):
- """Converts a human-readable string to a sync type (python enum).
-
- Capitalization and pluralization don't matter; this function is appropriate
- for values that might have been typed by a human being; e.g., command-line
- flags or query parameters.
- """
- if data_type_string.isdigit():
- return ProtocolDataTypeIdToSyncType(int(data_type_string))
- name = data_type_string.lower().rstrip('s')
- for data_type, field_descriptor in SYNC_TYPE_TO_DESCRIPTOR.iteritems():
- if field_descriptor.name.lower().rstrip('s') == name:
- return data_type
- raise DataTypeIdNotRecognized
-
-
-def MakeNewKeystoreKey():
- """Returns a new random keystore key."""
- return ''.join(random.choice(string.ascii_uppercase + string.digits)
- for x in xrange(KEYSTORE_KEY_LENGTH))
-
-
-def SyncTypeToString(data_type):
- """Formats a sync type enum (from ALL_TYPES) to a human-readable string."""
- return SYNC_TYPE_TO_DESCRIPTOR[data_type].name
-
-
-def CallerInfoToString(caller_info_source):
- """Formats a GetUpdatesSource enum value to a readable string."""
- return get_updates_caller_info_pb2.GetUpdatesCallerInfo \
- .DESCRIPTOR.enum_types_by_name['GetUpdatesSource'] \
- .values_by_number[caller_info_source].name
-
-
-def ShortDatatypeListSummary(data_types):
- """Formats compactly a list of sync types (python enums) for human eyes.
-
- This function is intended for use by logging. If the list of datatypes
- contains almost all of the values, the return value will be expressed
- in terms of the datatypes that aren't set.
- """
- included = set(data_types) - set([TOP_LEVEL])
- if not included:
- return 'nothing'
- excluded = set(ALL_TYPES) - included - set([TOP_LEVEL])
- if not excluded:
- return 'everything'
- simple_text = '+'.join(sorted([SyncTypeToString(x) for x in included]))
- all_but_text = 'all except %s' % (
- '+'.join(sorted([SyncTypeToString(x) for x in excluded])))
- if len(included) < len(excluded) or len(simple_text) <= len(all_but_text):
- return simple_text
- else:
- return all_but_text
-
-
-def GetDefaultEntitySpecifics(data_type):
- """Get an EntitySpecifics having a sync type's default field value."""
- specifics = sync_pb2.EntitySpecifics()
- if data_type in SYNC_TYPE_TO_DESCRIPTOR:
- descriptor = SYNC_TYPE_TO_DESCRIPTOR[data_type]
- getattr(specifics, descriptor.name).SetInParent()
- return specifics
-
-
-class PermanentItem(object):
- """A specification of one server-created permanent item.
-
- Attributes:
- tag: A known-to-the-client value that uniquely identifies a server-created
- permanent item.
- name: The human-readable display name for this item.
- parent_tag: The tag of the permanent item's parent. If ROOT_ID, indicates
- a top-level item. Otherwise, this must be the tag value of some other
- server-created permanent item.
- sync_type: A value from ALL_TYPES, giving the datatype of this permanent
- item. This controls which types of client GetUpdates requests will
- cause the permanent item to be created and returned.
- create_by_default: Whether the permanent item is created at startup or not.
- This value is set to True in the default case. Non-default permanent items
- are those that are created only when a client explicitly tells the server
- to do so.
- """
-
- def __init__(self, tag, name, parent_tag, sync_type, create_by_default=True):
- self.tag = tag
- self.name = name
- self.parent_tag = parent_tag
- self.sync_type = sync_type
- self.create_by_default = create_by_default
-
-
-class MigrationHistory(object):
- """A record of the migration events associated with an account.
-
- Each migration event invalidates one or more datatypes on all clients
- that had synced the datatype before the event. Such clients will continue
- to receive MigrationDone errors until they throw away their progress and
- re-sync that datatype from the beginning.
- """
- def __init__(self):
- self._migrations = {}
- for datatype in ALL_TYPES:
- self._migrations[datatype] = [1]
- self._next_migration_version = 2
-
- def GetLatestVersion(self, datatype):
- return self._migrations[datatype][-1]
-
- def CheckAllCurrent(self, versions_map):
- """Raises an error if any the provided versions are out of date.
-
- This function intentionally returns migrations in the order that they were
- triggered. Doing it this way allows the client to queue up two migrations
- in a row, so the second one is received while responding to the first.
-
- Arguments:
- version_map: a map whose keys are datatypes and whose values are versions.
-
- Raises:
- MigrationDoneError: if a mismatch is found.
- """
- problems = {}
- for datatype, client_migration in versions_map.iteritems():
- for server_migration in self._migrations[datatype]:
- if client_migration < server_migration:
- problems.setdefault(server_migration, []).append(datatype)
- if problems:
- raise MigrationDoneError(problems[min(problems.keys())])
-
- def Bump(self, datatypes):
- """Add a record of a migration, to cause errors on future requests."""
- for idx, datatype in enumerate(datatypes):
- self._migrations[datatype].append(self._next_migration_version)
- self._next_migration_version += 1
-
-
-class UpdateSieve(object):
- """A filter to remove items the client has already seen."""
- def __init__(self, request, migration_history=None):
- self._original_request = request
- self._state = {}
- self._migration_history = migration_history or MigrationHistory()
- self._migration_versions_to_check = {}
- if request.from_progress_marker:
- for marker in request.from_progress_marker:
- data_type = ProtocolDataTypeIdToSyncType(marker.data_type_id)
- if marker.HasField('timestamp_token_for_migration'):
- timestamp = marker.timestamp_token_for_migration
- if timestamp:
- self._migration_versions_to_check[data_type] = 1
- elif marker.token:
- (timestamp, version) = pickle.loads(marker.token)
- self._migration_versions_to_check[data_type] = version
- elif marker.HasField('token'):
- timestamp = 0
- else:
- raise ValueError('No timestamp information in progress marker.')
- data_type = ProtocolDataTypeIdToSyncType(marker.data_type_id)
- self._state[data_type] = timestamp
- elif request.HasField('from_timestamp'):
- for data_type in GetEntryTypesFromSpecifics(request.requested_types):
- self._state[data_type] = request.from_timestamp
- self._migration_versions_to_check[data_type] = 1
- if self._state:
- self._state[TOP_LEVEL] = min(self._state.itervalues())
-
- def SummarizeRequest(self):
- timestamps = {}
- for data_type, timestamp in self._state.iteritems():
- if data_type == TOP_LEVEL:
- continue
- timestamps.setdefault(timestamp, []).append(data_type)
- return ', '.join('<%s>@%d' % (ShortDatatypeListSummary(types), stamp)
- for stamp, types in sorted(timestamps.iteritems()))
-
- def CheckMigrationState(self):
- self._migration_history.CheckAllCurrent(self._migration_versions_to_check)
-
- def ClientWantsItem(self, item):
- """Return true if the client hasn't already seen an item."""
- return self._state.get(GetEntryType(item), sys.maxint) < item.version
-
- def HasAnyTimestamp(self):
- """Return true if at least one datatype was requested."""
- return bool(self._state)
-
- def GetMinTimestamp(self):
- """Return true the smallest timestamp requested across all datatypes."""
- return min(self._state.itervalues())
-
- def GetFirstTimeTypes(self):
- """Return a list of datatypes requesting updates from timestamp zero."""
- return [datatype for datatype, timestamp in self._state.iteritems()
- if timestamp == 0]
-
- def GetCreateMobileBookmarks(self):
- """Return true if the client has requested to create the 'Mobile Bookmarks'
- folder.
- """
- return (self._original_request.HasField('create_mobile_bookmarks_folder')
- and self._original_request.create_mobile_bookmarks_folder)
-
- def SaveProgress(self, new_timestamp, get_updates_response):
- """Write the new_timestamp or new_progress_marker fields to a response."""
- if self._original_request.from_progress_marker:
- for data_type, old_timestamp in self._state.iteritems():
- if data_type == TOP_LEVEL:
- continue
- new_marker = sync_pb2.DataTypeProgressMarker()
- new_marker.data_type_id = SyncTypeToProtocolDataTypeId(data_type)
- final_stamp = max(old_timestamp, new_timestamp)
- final_migration = self._migration_history.GetLatestVersion(data_type)
- new_marker.token = pickle.dumps((final_stamp, final_migration))
- get_updates_response.new_progress_marker.add().MergeFrom(new_marker)
- elif self._original_request.HasField('from_timestamp'):
- if self._original_request.from_timestamp < new_timestamp:
- get_updates_response.new_timestamp = new_timestamp
-
-
-class SyncDataModel(object):
- """Models the account state of one sync user."""
- _BATCH_SIZE = 100
-
- # Specify all the permanent items that a model might need.
- _PERMANENT_ITEM_SPECS = [
- PermanentItem('google_chrome_apps', name='Apps',
- parent_tag=ROOT_ID, sync_type=APPS),
- PermanentItem('google_chrome_app_list', name='App List',
- parent_tag=ROOT_ID, sync_type=APP_LIST),
- PermanentItem('google_chrome_app_notifications', name='App Notifications',
- parent_tag=ROOT_ID, sync_type=APP_NOTIFICATION),
- PermanentItem('google_chrome_app_settings',
- name='App Settings',
- parent_tag=ROOT_ID, sync_type=APP_SETTINGS),
- PermanentItem('google_chrome_bookmarks', name='Bookmarks',
- parent_tag=ROOT_ID, sync_type=BOOKMARK),
- PermanentItem('bookmark_bar', name='Bookmark Bar',
- parent_tag='google_chrome_bookmarks', sync_type=BOOKMARK),
- PermanentItem('other_bookmarks', name='Other Bookmarks',
- parent_tag='google_chrome_bookmarks', sync_type=BOOKMARK),
- PermanentItem('synced_bookmarks', name='Synced Bookmarks',
- parent_tag='google_chrome_bookmarks', sync_type=BOOKMARK,
- create_by_default=False),
- PermanentItem('google_chrome_autofill', name='Autofill',
- parent_tag=ROOT_ID, sync_type=AUTOFILL),
- PermanentItem('google_chrome_autofill_profiles', name='Autofill Profiles',
- parent_tag=ROOT_ID, sync_type=AUTOFILL_PROFILE),
- PermanentItem('google_chrome_device_info', name='Device Info',
- parent_tag=ROOT_ID, sync_type=DEVICE_INFO),
- PermanentItem('google_chrome_experiments', name='Experiments',
- parent_tag=ROOT_ID, sync_type=EXPERIMENTS),
- PermanentItem('google_chrome_extension_settings',
- name='Extension Settings',
- parent_tag=ROOT_ID, sync_type=EXTENSION_SETTINGS),
- PermanentItem('google_chrome_extensions', name='Extensions',
- parent_tag=ROOT_ID, sync_type=EXTENSIONS),
- PermanentItem('google_chrome_history_delete_directives',
- name='History Delete Directives',
- parent_tag=ROOT_ID,
- sync_type=HISTORY_DELETE_DIRECTIVE),
- PermanentItem('google_chrome_favicon_images',
- name='Favicon Images',
- parent_tag=ROOT_ID,
- sync_type=FAVICON_IMAGES),
- PermanentItem('google_chrome_favicon_tracking',
- name='Favicon Tracking',
- parent_tag=ROOT_ID,
- sync_type=FAVICON_TRACKING),
- PermanentItem('google_chrome_managed_user_settings',
- name='Managed User Settings',
- parent_tag=ROOT_ID, sync_type=MANAGED_USER_SETTING),
- PermanentItem('google_chrome_managed_users',
- name='Managed Users',
- parent_tag=ROOT_ID, sync_type=MANAGED_USER),
- PermanentItem('google_chrome_nigori', name='Nigori',
- parent_tag=ROOT_ID, sync_type=NIGORI),
- PermanentItem('google_chrome_passwords', name='Passwords',
- parent_tag=ROOT_ID, sync_type=PASSWORD),
- PermanentItem('google_chrome_preferences', name='Preferences',
- parent_tag=ROOT_ID, sync_type=PREFERENCE),
- PermanentItem('google_chrome_priority_preferences',
- name='Priority Preferences',
- parent_tag=ROOT_ID, sync_type=PRIORITY_PREFERENCE),
- PermanentItem('google_chrome_synced_notifications',
- name='Synced Notifications',
- parent_tag=ROOT_ID, sync_type=SYNCED_NOTIFICATION),
- PermanentItem('google_chrome_search_engines', name='Search Engines',
- parent_tag=ROOT_ID, sync_type=SEARCH_ENGINE),
- PermanentItem('google_chrome_sessions', name='Sessions',
- parent_tag=ROOT_ID, sync_type=SESSION),
- PermanentItem('google_chrome_themes', name='Themes',
- parent_tag=ROOT_ID, sync_type=THEME),
- PermanentItem('google_chrome_typed_urls', name='Typed URLs',
- parent_tag=ROOT_ID, sync_type=TYPED_URL),
- PermanentItem('google_chrome_dictionary', name='Dictionary',
- parent_tag=ROOT_ID, sync_type=DICTIONARY),
- PermanentItem('google_chrome_articles', name='Articles',
- parent_tag=ROOT_ID, sync_type=ARTICLE),
- ]
-
- def __init__(self):
- # Monotonically increasing version number. The next object change will
- # take on this value + 1.
- self._version = 0
-
- # The definitive copy of this client's items: a map from ID string to a
- # SyncEntity protocol buffer.
- self._entries = {}
-
- self.ResetStoreBirthday()
- self.migration_history = MigrationHistory()
- self.induced_error = sync_pb2.ClientToServerResponse.Error()
- self.induced_error_frequency = 0
- self.sync_count_before_errors = 0
- self.acknowledge_managed_users = False
- self._keys = [MakeNewKeystoreKey()]
-
- def _SaveEntry(self, entry):
- """Insert or update an entry in the change log, and give it a new version.
-
- The ID fields of this entry are assumed to be valid server IDs. This
- entry will be updated with a new version number and sync_timestamp.
-
- Args:
- entry: The entry to be added or updated.
- """
- self._version += 1
- # Maintain a global (rather than per-item) sequence number and use it
- # both as the per-entry version as well as the update-progress timestamp.
- # This simulates the behavior of the original server implementation.
- entry.version = self._version
- entry.sync_timestamp = self._version
-
- # Preserve the originator info, which the client is not required to send
- # when updating.
- base_entry = self._entries.get(entry.id_string)
- if base_entry:
- entry.originator_cache_guid = base_entry.originator_cache_guid
- entry.originator_client_item_id = base_entry.originator_client_item_id
-
- self._entries[entry.id_string] = copy.deepcopy(entry)
-
- def _ServerTagToId(self, tag):
- """Determine the server ID from a server-unique tag.
-
- The resulting value is guaranteed not to collide with the other ID
- generation methods.
-
- Args:
- datatype: The sync type (python enum) of the identified object.
- tag: The unique, known-to-the-client tag of a server-generated item.
- Returns:
- The string value of the computed server ID.
- """
- if not tag or tag == ROOT_ID:
- return tag
- spec = [x for x in self._PERMANENT_ITEM_SPECS if x.tag == tag][0]
- return self._MakeCurrentId(spec.sync_type, '<server tag>%s' % tag)
-
- def _ClientTagToId(self, datatype, tag):
- """Determine the server ID from a client-unique tag.
-
- The resulting value is guaranteed not to collide with the other ID
- generation methods.
-
- Args:
- datatype: The sync type (python enum) of the identified object.
- tag: The unique, opaque-to-the-server tag of a client-tagged item.
- Returns:
- The string value of the computed server ID.
- """
- return self._MakeCurrentId(datatype, '<client tag>%s' % tag)
-
- def _ClientIdToId(self, datatype, client_guid, client_item_id):
- """Compute a unique server ID from a client-local ID tag.
-
- The resulting value is guaranteed not to collide with the other ID
- generation methods.
-
- Args:
- datatype: The sync type (python enum) of the identified object.
- client_guid: A globally unique ID that identifies the client which
- created this item.
- client_item_id: An ID that uniquely identifies this item on the client
- which created it.
- Returns:
- The string value of the computed server ID.
- """
- # Using the client ID info is not required here (we could instead generate
- # a random ID), but it's useful for debugging.
- return self._MakeCurrentId(datatype,
- '<server ID originally>%s/%s' % (client_guid, client_item_id))
-
- def _MakeCurrentId(self, datatype, inner_id):
- return '%d^%d^%s' % (datatype,
- self.migration_history.GetLatestVersion(datatype),
- inner_id)
-
- def _ExtractIdInfo(self, id_string):
- if not id_string or id_string == ROOT_ID:
- return None
- datatype_string, separator, remainder = id_string.partition('^')
- migration_version_string, separator, inner_id = remainder.partition('^')
- return (int(datatype_string), int(migration_version_string), inner_id)
-
- def _WritePosition(self, entry, parent_id):
- """Ensure the entry has an absolute, numeric position and parent_id.
-
- Historically, clients would specify positions using the predecessor-based
- references in the insert_after_item_id field; starting July 2011, this
- was changed and Chrome now sends up the absolute position. The server
- must store a position_in_parent value and must not maintain
- insert_after_item_id.
- Starting in Jan 2013, the client will also send up a unique_position field
- which should be saved and returned on subsequent GetUpdates.
-
- Args:
- entry: The entry for which to write a position. Its ID field are
- assumed to be server IDs. This entry will have its parent_id_string,
- position_in_parent and unique_position fields updated; its
- insert_after_item_id field will be cleared.
- parent_id: The ID of the entry intended as the new parent.
- """
-
- entry.parent_id_string = parent_id
- if not entry.HasField('position_in_parent'):
- entry.position_in_parent = 1337 # A debuggable, distinctive default.
- entry.ClearField('insert_after_item_id')
-
- def _ItemExists(self, id_string):
- """Determine whether an item exists in the changelog."""
- return id_string in self._entries
-
- def _CreatePermanentItem(self, spec):
- """Create one permanent item from its spec, if it doesn't exist.
-
- The resulting item is added to the changelog.
-
- Args:
- spec: A PermanentItem object holding the properties of the item to create.
- """
- id_string = self._ServerTagToId(spec.tag)
- if self._ItemExists(id_string):
- return
- print 'Creating permanent item: %s' % spec.name
- entry = sync_pb2.SyncEntity()
- entry.id_string = id_string
- entry.non_unique_name = spec.name
- entry.name = spec.name
- entry.server_defined_unique_tag = spec.tag
- entry.folder = True
- entry.deleted = False
- entry.specifics.CopyFrom(GetDefaultEntitySpecifics(spec.sync_type))
- self._WritePosition(entry, self._ServerTagToId(spec.parent_tag))
- self._SaveEntry(entry)
-
- def _CreateDefaultPermanentItems(self, requested_types):
- """Ensure creation of all default permanent items for a given set of types.
-
- Args:
- requested_types: A list of sync data types from ALL_TYPES.
- All default permanent items of only these types will be created.
- """
- for spec in self._PERMANENT_ITEM_SPECS:
- if spec.sync_type in requested_types and spec.create_by_default:
- self._CreatePermanentItem(spec)
-
- def ResetStoreBirthday(self):
- """Resets the store birthday to a random value."""
- # TODO(nick): uuid.uuid1() is better, but python 2.5 only.
- self.store_birthday = '%0.30f' % random.random()
-
- def StoreBirthday(self):
- """Gets the store birthday."""
- return self.store_birthday
-
- def GetChanges(self, sieve):
- """Get entries which have changed, oldest first.
-
- The returned entries are limited to being _BATCH_SIZE many. The entries
- are returned in strict version order.
-
- Args:
- sieve: An update sieve to use to filter out updates the client
- has already seen.
- Returns:
- A tuple of (version, entries, changes_remaining). Version is a new
- timestamp value, which should be used as the starting point for the
- next query. Entries is the batch of entries meeting the current
- timestamp query. Changes_remaining indicates the number of changes
- left on the server after this batch.
- """
- if not sieve.HasAnyTimestamp():
- return (0, [], 0)
- min_timestamp = sieve.GetMinTimestamp()
- first_time_types = sieve.GetFirstTimeTypes()
- self._CreateDefaultPermanentItems(first_time_types)
- # Mobile bookmark folder is not created by default, create it only when
- # client requested it.
- if (sieve.GetCreateMobileBookmarks() and
- first_time_types.count(BOOKMARK) > 0):
- self.TriggerCreateSyncedBookmarks()
-
- self.TriggerAcknowledgeManagedUsers()
-
- change_log = sorted(self._entries.values(),
- key=operator.attrgetter('version'))
- new_changes = [x for x in change_log if x.version > min_timestamp]
- # Pick batch_size new changes, and then filter them. This matches
- # the RPC behavior of the production sync server.
- batch = new_changes[:self._BATCH_SIZE]
- if not batch:
- # Client is up to date.
- return (min_timestamp, [], 0)
-
- # Restrict batch to requested types. Tombstones are untyped
- # and will always get included.
- filtered = [copy.deepcopy(item) for item in batch
- if item.deleted or sieve.ClientWantsItem(item)]
-
- # The new client timestamp is the timestamp of the last item in the
- # batch, even if that item was filtered out.
- return (batch[-1].version, filtered, len(new_changes) - len(batch))
-
- def GetKeystoreKeys(self):
- """Returns the encryption keys for this account."""
- print "Returning encryption keys: %s" % self._keys
- return self._keys
-
- def _CopyOverImmutableFields(self, entry):
- """Preserve immutable fields by copying pre-commit state.
-
- Args:
- entry: A sync entity from the client.
- """
- if entry.id_string in self._entries:
- if self._entries[entry.id_string].HasField(
- 'server_defined_unique_tag'):
- entry.server_defined_unique_tag = (
- self._entries[entry.id_string].server_defined_unique_tag)
-
- def _CheckVersionForCommit(self, entry):
- """Perform an optimistic concurrency check on the version number.
-
- Clients are only allowed to commit if they report having seen the most
- recent version of an object.
-
- Args:
- entry: A sync entity from the client. It is assumed that ID fields
- have been converted to server IDs.
- Returns:
- A boolean value indicating whether the client's version matches the
- newest server version for the given entry.
- """
- if entry.id_string in self._entries:
- # Allow edits/deletes if the version matches, and any undeletion.
- return (self._entries[entry.id_string].version == entry.version or
- self._entries[entry.id_string].deleted)
- else:
- # Allow unknown ID only if the client thinks it's new too.
- return entry.version == 0
-
- def _CheckParentIdForCommit(self, entry):
- """Check that the parent ID referenced in a SyncEntity actually exists.
-
- Args:
- entry: A sync entity from the client. It is assumed that ID fields
- have been converted to server IDs.
- Returns:
- A boolean value indicating whether the entity's parent ID is an object
- that actually exists (and is not deleted) in the current account state.
- """
- if entry.parent_id_string == ROOT_ID:
- # This is generally allowed.
- return True
- if entry.parent_id_string not in self._entries:
- print 'Warning: Client sent unknown ID. Should never happen.'
- return False
- if entry.parent_id_string == entry.id_string:
- print 'Warning: Client sent circular reference. Should never happen.'
- return False
- if self._entries[entry.parent_id_string].deleted:
- # This can happen in a race condition between two clients.
- return False
- if not self._entries[entry.parent_id_string].folder:
- print 'Warning: Client sent non-folder parent. Should never happen.'
- return False
- return True
-
- def _RewriteIdsAsServerIds(self, entry, cache_guid, commit_session):
- """Convert ID fields in a client sync entry to server IDs.
-
- A commit batch sent by a client may contain new items for which the
- server has not generated IDs yet. And within a commit batch, later
- items are allowed to refer to earlier items. This method will
- generate server IDs for new items, as well as rewrite references
- to items whose server IDs were generated earlier in the batch.
-
- Args:
- entry: The client sync entry to modify.
- cache_guid: The globally unique ID of the client that sent this
- commit request.
- commit_session: A dictionary mapping the original IDs to the new server
- IDs, for any items committed earlier in the batch.
- """
- if entry.version == 0:
- data_type = GetEntryType(entry)
- if entry.HasField('client_defined_unique_tag'):
- # When present, this should determine the item's ID.
- new_id = self._ClientTagToId(data_type, entry.client_defined_unique_tag)
- else:
- new_id = self._ClientIdToId(data_type, cache_guid, entry.id_string)
- entry.originator_cache_guid = cache_guid
- entry.originator_client_item_id = entry.id_string
- commit_session[entry.id_string] = new_id # Remember the remapping.
- entry.id_string = new_id
- if entry.parent_id_string in commit_session:
- entry.parent_id_string = commit_session[entry.parent_id_string]
- if entry.insert_after_item_id in commit_session:
- entry.insert_after_item_id = commit_session[entry.insert_after_item_id]
-
- def ValidateCommitEntries(self, entries):
- """Raise an exception if a commit batch contains any global errors.
-
- Arguments:
- entries: an iterable containing commit-form SyncEntity protocol buffers.
-
- Raises:
- MigrationDoneError: if any of the entries reference a recently-migrated
- datatype.
- """
- server_ids_in_commit = set()
- local_ids_in_commit = set()
- for entry in entries:
- if entry.version:
- server_ids_in_commit.add(entry.id_string)
- else:
- local_ids_in_commit.add(entry.id_string)
- if entry.HasField('parent_id_string'):
- if entry.parent_id_string not in local_ids_in_commit:
- server_ids_in_commit.add(entry.parent_id_string)
-
- versions_present = {}
- for server_id in server_ids_in_commit:
- parsed = self._ExtractIdInfo(server_id)
- if parsed:
- datatype, version, _ = parsed
- versions_present.setdefault(datatype, []).append(version)
-
- self.migration_history.CheckAllCurrent(
- dict((k, min(v)) for k, v in versions_present.iteritems()))
-
- def CommitEntry(self, entry, cache_guid, commit_session):
- """Attempt to commit one entry to the user's account.
-
- Args:
- entry: A SyncEntity protobuf representing desired object changes.
- cache_guid: A string value uniquely identifying the client; this
- is used for ID generation and will determine the originator_cache_guid
- if the entry is new.
- commit_session: A dictionary mapping client IDs to server IDs for any
- objects committed earlier this session. If the entry gets a new ID
- during commit, the change will be recorded here.
- Returns:
- A SyncEntity reflecting the post-commit value of the entry, or None
- if the entry was not committed due to an error.
- """
- entry = copy.deepcopy(entry)
-
- # Generate server IDs for this entry, and write generated server IDs
- # from earlier entries into the message's fields, as appropriate. The
- # ID generation state is stored in 'commit_session'.
- self._RewriteIdsAsServerIds(entry, cache_guid, commit_session)
-
- # Perform the optimistic concurrency check on the entry's version number.
- # Clients are not allowed to commit unless they indicate that they've seen
- # the most recent version of an object.
- if not self._CheckVersionForCommit(entry):
- return None
-
- # Check the validity of the parent ID; it must exist at this point.
- # TODO(nick): Implement cycle detection and resolution.
- if not self._CheckParentIdForCommit(entry):
- return None
-
- self._CopyOverImmutableFields(entry);
-
- # At this point, the commit is definitely going to happen.
-
- # Deletion works by storing a limited record for an entry, called a
- # tombstone. A sync server must track deleted IDs forever, since it does
- # not keep track of client knowledge (there's no deletion ACK event).
- if entry.deleted:
- def MakeTombstone(id_string, datatype):
- """Make a tombstone entry that will replace the entry being deleted.
-
- Args:
- id_string: Index of the SyncEntity to be deleted.
- Returns:
- A new SyncEntity reflecting the fact that the entry is deleted.
- """
- # Only the ID, version and deletion state are preserved on a tombstone.
- tombstone = sync_pb2.SyncEntity()
- tombstone.id_string = id_string
- tombstone.deleted = True
- tombstone.name = ''
- tombstone.specifics.CopyFrom(GetDefaultEntitySpecifics(datatype))
- return tombstone
-
- def IsChild(child_id):
- """Check if a SyncEntity is a child of entry, or any of its children.
-
- Args:
- child_id: Index of the SyncEntity that is a possible child of entry.
- Returns:
- True if it is a child; false otherwise.
- """
- if child_id not in self._entries:
- return False
- if self._entries[child_id].parent_id_string == entry.id_string:
- return True
- return IsChild(self._entries[child_id].parent_id_string)
-
- # Identify any children entry might have.
- child_ids = [child.id_string for child in self._entries.itervalues()
- if IsChild(child.id_string)]
-
- # Mark all children that were identified as deleted.
- for child_id in child_ids:
- datatype = GetEntryType(self._entries[child_id])
- self._SaveEntry(MakeTombstone(child_id, datatype))
-
- # Delete entry itself.
- datatype = GetEntryType(self._entries[entry.id_string])
- entry = MakeTombstone(entry.id_string, datatype)
- else:
- # Comments in sync.proto detail how the representation of positional
- # ordering works.
- #
- # We've almost fully deprecated the 'insert_after_item_id' field.
- # The 'position_in_parent' field is also deprecated, but as of Jan 2013
- # is still in common use. The 'unique_position' field is the latest
- # and greatest in positioning technology.
- #
- # This server supports 'position_in_parent' and 'unique_position'.
- self._WritePosition(entry, entry.parent_id_string)
-
- # Preserve the originator info, which the client is not required to send
- # when updating.
- base_entry = self._entries.get(entry.id_string)
- if base_entry and not entry.HasField('originator_cache_guid'):
- entry.originator_cache_guid = base_entry.originator_cache_guid
- entry.originator_client_item_id = base_entry.originator_client_item_id
-
- # Store the current time since the Unix epoch in milliseconds.
- entry.mtime = (int((time.mktime(time.gmtime()) -
- (time.mktime(FIRST_DAY_UNIX_TIME_EPOCH) - ONE_DAY_SECONDS))*1000))
-
- # Commit the change. This also updates the version number.
- self._SaveEntry(entry)
- return entry
-
- def _RewriteVersionInId(self, id_string):
- """Rewrites an ID so that its migration version becomes current."""
- parsed_id = self._ExtractIdInfo(id_string)
- if not parsed_id:
- return id_string
- datatype, old_migration_version, inner_id = parsed_id
- return self._MakeCurrentId(datatype, inner_id)
-
- def TriggerMigration(self, datatypes):
- """Cause a migration to occur for a set of datatypes on this account.
-
- Clients will see the MIGRATION_DONE error for these datatypes until they
- resync them.
- """
- versions_to_remap = self.migration_history.Bump(datatypes)
- all_entries = self._entries.values()
- self._entries.clear()
- for entry in all_entries:
- new_id = self._RewriteVersionInId(entry.id_string)
- entry.id_string = new_id
- if entry.HasField('parent_id_string'):
- entry.parent_id_string = self._RewriteVersionInId(
- entry.parent_id_string)
- self._entries[entry.id_string] = entry
-
- def TriggerSyncTabFavicons(self):
- """Set the 'sync_tab_favicons' field to this account's nigori node.
-
- If the field is not currently set, will write a new nigori node entry
- with the field set. Else does nothing.
- """
-
- nigori_tag = "google_chrome_nigori"
- nigori_original = self._entries.get(self._ServerTagToId(nigori_tag))
- if (nigori_original.specifics.nigori.sync_tab_favicons):
- return
- nigori_new = copy.deepcopy(nigori_original)
- nigori_new.specifics.nigori.sync_tabs = True
- self._SaveEntry(nigori_new)
-
- def TriggerCreateSyncedBookmarks(self):
- """Create the Synced Bookmarks folder under the Bookmarks permanent item.
-
- Clients will then receive the Synced Bookmarks folder on future
- GetUpdates, and new bookmarks can be added within the Synced Bookmarks
- folder.
- """
-
- synced_bookmarks_spec, = [spec for spec in self._PERMANENT_ITEM_SPECS
- if spec.name == "Synced Bookmarks"]
- self._CreatePermanentItem(synced_bookmarks_spec)
-
- def TriggerEnableKeystoreEncryption(self):
- """Create the keystore_encryption experiment entity and enable it.
-
- A new entity within the EXPERIMENTS datatype is created with the unique
- client tag "keystore_encryption" if it doesn't already exist. The
- keystore_encryption message is then filled with |enabled| set to true.
- """
-
- experiment_id = self._ServerTagToId("google_chrome_experiments")
- keystore_encryption_id = self._ClientTagToId(
- EXPERIMENTS,
- KEYSTORE_ENCRYPTION_EXPERIMENT_TAG)
- keystore_entry = self._entries.get(keystore_encryption_id)
- if keystore_entry is None:
- keystore_entry = sync_pb2.SyncEntity()
- keystore_entry.id_string = keystore_encryption_id
- keystore_entry.name = "Keystore Encryption"
- keystore_entry.client_defined_unique_tag = (
- KEYSTORE_ENCRYPTION_EXPERIMENT_TAG)
- keystore_entry.folder = False
- keystore_entry.deleted = False
- keystore_entry.specifics.CopyFrom(GetDefaultEntitySpecifics(EXPERIMENTS))
- self._WritePosition(keystore_entry, experiment_id)
-
- keystore_entry.specifics.experiments.keystore_encryption.enabled = True
-
- self._SaveEntry(keystore_entry)
-
- def TriggerRotateKeystoreKeys(self):
- """Rotate the current set of keystore encryption keys.
-
- |self._keys| will have a new random encryption key appended to it. We touch
- the nigori node so that each client will receive the new encryption keys
- only once.
- """
-
- # Add a new encryption key.
- self._keys += [MakeNewKeystoreKey(), ]
-
- # Increment the nigori node's timestamp, so clients will get the new keys
- # on their next GetUpdates (any time the nigori node is sent back, we also
- # send back the keystore keys).
- nigori_tag = "google_chrome_nigori"
- self._SaveEntry(self._entries.get(self._ServerTagToId(nigori_tag)))
-
- def TriggerAcknowledgeManagedUsers(self):
- """Set the "acknowledged" flag for any managed user entities that don't have
- it set already.
- """
-
- if not self.acknowledge_managed_users:
- return
-
- managed_users = [copy.deepcopy(entry) for entry in self._entries.values()
- if entry.specifics.HasField('managed_user')
- and not entry.specifics.managed_user.acknowledged]
- for user in managed_users:
- user.specifics.managed_user.acknowledged = True
- self._SaveEntry(user)
-
- def TriggerEnablePreCommitGetUpdateAvoidance(self):
- """Sets the experiment to enable pre-commit GetUpdate avoidance."""
- experiment_id = self._ServerTagToId("google_chrome_experiments")
- pre_commit_gu_avoidance_id = self._ClientTagToId(
- EXPERIMENTS,
- PRE_COMMIT_GU_AVOIDANCE_EXPERIMENT_TAG)
- entry = self._entries.get(pre_commit_gu_avoidance_id)
- if entry is None:
- entry = sync_pb2.SyncEntity()
- entry.id_string = pre_commit_gu_avoidance_id
- entry.name = "Pre-commit GU avoidance"
- entry.client_defined_unique_tag = PRE_COMMIT_GU_AVOIDANCE_EXPERIMENT_TAG
- entry.folder = False
- entry.deleted = False
- entry.specifics.CopyFrom(GetDefaultEntitySpecifics(EXPERIMENTS))
- self._WritePosition(entry, experiment_id)
- entry.specifics.experiments.pre_commit_update_avoidance.enabled = True
- self._SaveEntry(entry)
-
- def SetInducedError(self, error, error_frequency,
- sync_count_before_errors):
- self.induced_error = error
- self.induced_error_frequency = error_frequency
- self.sync_count_before_errors = sync_count_before_errors
-
- def GetInducedError(self):
- return self.induced_error
-
- def AddSyncedNotification(self, serialized_notification):
- """Adds a synced notification to the server data.
-
- The notification will be delivered to the client on the next GetUpdates
- call.
-
- Args:
- serialized_notification: A serialized CoalescedSyncedNotification.
-
- Returns:
- The string representation of the added SyncEntity.
-
- Raises:
- ClientNotConnectedError: if the client has not yet connected to this
- server
- """
- # A unique string used wherever a unique ID for this notification is
- # required.
- unique_notification_id = str(uuid.uuid4())
-
- specifics = self._CreateSyncedNotificationEntitySpecifics(
- unique_notification_id, serialized_notification)
-
- # Create the root SyncEntity representing a single notification.
- entity = sync_pb2.SyncEntity()
- entity.specifics.CopyFrom(specifics)
- entity.parent_id_string = self._ServerTagToId(
- 'google_chrome_synced_notifications')
- entity.name = 'Synced notification added for testing'
- entity.server_defined_unique_tag = unique_notification_id
-
- # Set the version to one more than the greatest version number already seen.
- entries = sorted(self._entries.values(), key=operator.attrgetter('version'))
- if len(entries) < 1:
- raise ClientNotConnectedError
- entity.version = entries[-1].version + 1
-
- entity.client_defined_unique_tag = self._CreateSyncedNotificationClientTag(
- specifics.synced_notification.coalesced_notification.key)
- entity.id_string = self._ClientTagToId(GetEntryType(entity),
- entity.client_defined_unique_tag)
-
- self._entries[entity.id_string] = copy.deepcopy(entity)
-
- return google.protobuf.text_format.MessageToString(entity)
-
- def _CreateSyncedNotificationEntitySpecifics(self, unique_id,
- serialized_notification):
- """Create the EntitySpecifics proto for a synced notification."""
- coalesced = synced_notification_data_pb2.CoalescedSyncedNotification()
- google.protobuf.text_format.Merge(serialized_notification, coalesced)
-
- # Override the provided key so that we have a unique one.
- coalesced.key = unique_id
-
- specifics = sync_pb2.EntitySpecifics()
- notification_specifics = \
- synced_notification_specifics_pb2.SyncedNotificationSpecifics()
- notification_specifics.coalesced_notification.CopyFrom(coalesced)
- specifics.synced_notification.CopyFrom(notification_specifics)
-
- return specifics
-
-
- def _CreateSyncedNotificationClientTag(self, key):
- """Create the client_defined_unique_tag value for a SyncedNotification.
-
- Args:
- key: The entity used to create the client tag.
-
- Returns:
- The string value of the to be used as the client_defined_unique_tag.
- """
- serialized_type = sync_pb2.EntitySpecifics()
- specifics = synced_notification_specifics_pb2.SyncedNotificationSpecifics()
- serialized_type.synced_notification.CopyFrom(specifics)
- hash_input = serialized_type.SerializeToString() + key
- return base64.b64encode(hashlib.sha1(hash_input).digest())
-
-
-class TestServer(object):
- """An object to handle requests for one (and only one) Chrome Sync account.
-
- TestServer consumes the sync command messages that are the outermost
- layers of the protocol, performs the corresponding actions on its
- SyncDataModel, and constructs an appropriate response message.
- """
-
- def __init__(self):
- # The implementation supports exactly one account; its state is here.
- self.account = SyncDataModel()
- self.account_lock = threading.Lock()
- # Clients that have talked to us: a map from the full client ID
- # to its nickname.
- self.clients = {}
- self.client_name_generator = ('+' * times + chr(c)
- for times in xrange(0, sys.maxint) for c in xrange(ord('A'), ord('Z')))
- self.transient_error = False
- self.sync_count = 0
- # Gaia OAuth2 Token fields and their default values.
- self.response_code = 200
- self.request_token = 'rt1'
- self.access_token = 'at1'
- self.expires_in = 3600
- self.token_type = 'Bearer'
- # The ClientCommand to send back on each ServerToClientResponse. If set to
- # None, no ClientCommand should be sent.
- self._client_command = None
-
-
- def GetShortClientName(self, query):
- parsed = cgi.parse_qs(query[query.find('?')+1:])
- client_id = parsed.get('client_id')
- if not client_id:
- return '?'
- client_id = client_id[0]
- if client_id not in self.clients:
- self.clients[client_id] = self.client_name_generator.next()
- return self.clients[client_id]
-
- def CheckStoreBirthday(self, request):
- """Raises StoreBirthdayError if the request's birthday is a mismatch."""
- if not request.HasField('store_birthday'):
- return
- if self.account.StoreBirthday() != request.store_birthday:
- raise StoreBirthdayError
-
- def CheckTransientError(self):
- """Raises TransientError if transient_error variable is set."""
- if self.transient_error:
- raise TransientError
-
- def CheckSendError(self):
- """Raises SyncInducedError if needed."""
- if (self.account.induced_error.error_type !=
- sync_enums_pb2.SyncEnums.UNKNOWN):
- # Always means return the given error for all requests.
- if self.account.induced_error_frequency == ERROR_FREQUENCY_ALWAYS:
- raise SyncInducedError
- # This means the FIRST 2 requests of every 3 requests
- # return an error. Don't switch the order of failures. There are
- # test cases that rely on the first 2 being the failure rather than
- # the last 2.
- elif (self.account.induced_error_frequency ==
- ERROR_FREQUENCY_TWO_THIRDS):
- if (((self.sync_count -
- self.account.sync_count_before_errors) % 3) != 0):
- raise SyncInducedError
- else:
- raise InducedErrorFrequencyNotDefined
-
- def HandleMigrate(self, path):
- query = urlparse.urlparse(path)[4]
- code = 200
- self.account_lock.acquire()
- try:
- datatypes = [DataTypeStringToSyncTypeLoose(x)
- for x in urlparse.parse_qs(query).get('type',[])]
- if datatypes:
- self.account.TriggerMigration(datatypes)
- response = 'Migrated datatypes %s' % (
- ' and '.join(SyncTypeToString(x).upper() for x in datatypes))
- else:
- response = 'Please specify one or more <i>type=name</i> parameters'
- code = 400
- except DataTypeIdNotRecognized, error:
- response = 'Could not interpret datatype name'
- code = 400
- finally:
- self.account_lock.release()
- return (code, '<html><title>Migration: %d</title><H1>%d %s</H1></html>' %
- (code, code, response))
-
- def HandleSetInducedError(self, path):
- query = urlparse.urlparse(path)[4]
- self.account_lock.acquire()
- code = 200
- response = 'Success'
- error = sync_pb2.ClientToServerResponse.Error()
- try:
- error_type = urlparse.parse_qs(query)['error']
- action = urlparse.parse_qs(query)['action']
- error.error_type = int(error_type[0])
- error.action = int(action[0])
- try:
- error.url = (urlparse.parse_qs(query)['url'])[0]
- except KeyError:
- error.url = ''
- try:
- error.error_description =(
- (urlparse.parse_qs(query)['error_description'])[0])
- except KeyError:
- error.error_description = ''
- try:
- error_frequency = int((urlparse.parse_qs(query)['frequency'])[0])
- except KeyError:
- error_frequency = ERROR_FREQUENCY_ALWAYS
- self.account.SetInducedError(error, error_frequency, self.sync_count)
- response = ('Error = %d, action = %d, url = %s, description = %s' %
- (error.error_type, error.action,
- error.url,
- error.error_description))
- except error:
- response = 'Could not parse url'
- code = 400
- finally:
- self.account_lock.release()
- return (code, '<html><title>SetError: %d</title><H1>%d %s</H1></html>' %
- (code, code, response))
-
- def HandleCreateBirthdayError(self):
- self.account.ResetStoreBirthday()
- return (
- 200,
- '<html><title>Birthday error</title><H1>Birthday error</H1></html>')
-
- def HandleSetTransientError(self):
- self.transient_error = True
- return (
- 200,
- '<html><title>Transient error</title><H1>Transient error</H1></html>')
-
- def HandleSetSyncTabFavicons(self):
- """Set 'sync_tab_favicons' field of the nigori node for this account."""
- self.account.TriggerSyncTabFavicons()
- return (
- 200,
- '<html><title>Tab Favicons</title><H1>Tab Favicons</H1></html>')
-
- def HandleCreateSyncedBookmarks(self):
- """Create the Synced Bookmarks folder under Bookmarks."""
- self.account.TriggerCreateSyncedBookmarks()
- return (
- 200,
- '<html><title>Synced Bookmarks</title><H1>Synced Bookmarks</H1></html>')
-
- def HandleEnableKeystoreEncryption(self):
- """Enables the keystore encryption experiment."""
- self.account.TriggerEnableKeystoreEncryption()
- return (
- 200,
- '<html><title>Enable Keystore Encryption</title>'
- '<H1>Enable Keystore Encryption</H1></html>')
-
- def HandleRotateKeystoreKeys(self):
- """Rotate the keystore encryption keys."""
- self.account.TriggerRotateKeystoreKeys()
- return (
- 200,
- '<html><title>Rotate Keystore Keys</title>'
- '<H1>Rotate Keystore Keys</H1></html>')
-
- def HandleEnableManagedUserAcknowledgement(self):
- """Enable acknowledging newly created managed users."""
- self.account.acknowledge_managed_users = True
- return (
- 200,
- '<html><title>Enable Managed User Acknowledgement</title>'
- '<h1>Enable Managed User Acknowledgement</h1></html>')
-
- def HandleEnablePreCommitGetUpdateAvoidance(self):
- """Enables the pre-commit GU avoidance experiment."""
- self.account.TriggerEnablePreCommitGetUpdateAvoidance()
- return (
- 200,
- '<html><title>Enable pre-commit GU avoidance</title>'
- '<H1>Enable pre-commit GU avoidance</H1></html>')
-
- def HandleCommand(self, query, raw_request):
- """Decode and handle a sync command from a raw input of bytes.
-
- This is the main entry point for this class. It is safe to call this
- method from multiple threads.
-
- Args:
- raw_request: An iterable byte sequence to be interpreted as a sync
- protocol command.
- Returns:
- A tuple (response_code, raw_response); the first value is an HTTP
- result code, while the second value is a string of bytes which is the
- serialized reply to the command.
- """
- self.account_lock.acquire()
- self.sync_count += 1
- def print_context(direction):
- print '[Client %s %s %s.py]' % (self.GetShortClientName(query), direction,
- __name__),
-
- try:
- request = sync_pb2.ClientToServerMessage()
- request.MergeFromString(raw_request)
- contents = request.message_contents
-
- response = sync_pb2.ClientToServerResponse()
- response.error_code = sync_enums_pb2.SyncEnums.SUCCESS
-
- if self._client_command:
- response.client_command.CopyFrom(self._client_command)
-
- self.CheckStoreBirthday(request)
- response.store_birthday = self.account.store_birthday
- self.CheckTransientError()
- self.CheckSendError()
-
- print_context('->')
-
- if contents == sync_pb2.ClientToServerMessage.AUTHENTICATE:
- print 'Authenticate'
- # We accept any authentication token, and support only one account.
- # TODO(nick): Mock out the GAIA authentication as well; hook up here.
- response.authenticate.user.email = 'syncjuser@chromium'
- response.authenticate.user.display_name = 'Sync J User'
- elif contents == sync_pb2.ClientToServerMessage.COMMIT:
- print 'Commit %d item(s)' % len(request.commit.entries)
- self.HandleCommit(request.commit, response.commit)
- elif contents == sync_pb2.ClientToServerMessage.GET_UPDATES:
- print 'GetUpdates',
- self.HandleGetUpdates(request.get_updates, response.get_updates)
- print_context('<-')
- print '%d update(s)' % len(response.get_updates.entries)
- else:
- print 'Unrecognizable sync request!'
- return (400, None) # Bad request.
- return (200, response.SerializeToString())
- except MigrationDoneError, error:
- print_context('<-')
- print 'MIGRATION_DONE: <%s>' % (ShortDatatypeListSummary(error.datatypes))
- response = sync_pb2.ClientToServerResponse()
- response.store_birthday = self.account.store_birthday
- response.error_code = sync_enums_pb2.SyncEnums.MIGRATION_DONE
- response.migrated_data_type_id[:] = [
- SyncTypeToProtocolDataTypeId(x) for x in error.datatypes]
- return (200, response.SerializeToString())
- except StoreBirthdayError, error:
- print_context('<-')
- print 'NOT_MY_BIRTHDAY'
- response = sync_pb2.ClientToServerResponse()
- response.store_birthday = self.account.store_birthday
- response.error_code = sync_enums_pb2.SyncEnums.NOT_MY_BIRTHDAY
- return (200, response.SerializeToString())
- except TransientError, error:
- ### This is deprecated now. Would be removed once test cases are removed.
- print_context('<-')
- print 'TRANSIENT_ERROR'
- response.store_birthday = self.account.store_birthday
- response.error_code = sync_enums_pb2.SyncEnums.TRANSIENT_ERROR
- return (200, response.SerializeToString())
- except SyncInducedError, error:
- print_context('<-')
- print 'INDUCED_ERROR'
- response.store_birthday = self.account.store_birthday
- error = self.account.GetInducedError()
- response.error.error_type = error.error_type
- response.error.url = error.url
- response.error.error_description = error.error_description
- response.error.action = error.action
- return (200, response.SerializeToString())
- finally:
- self.account_lock.release()
-
- def HandleCommit(self, commit_message, commit_response):
- """Respond to a Commit request by updating the user's account state.
-
- Commit attempts stop after the first error, returning a CONFLICT result
- for any unattempted entries.
-
- Args:
- commit_message: A sync_pb.CommitMessage protobuf holding the content
- of the client's request.
- commit_response: A sync_pb.CommitResponse protobuf into which a reply
- to the client request will be written.
- """
- commit_response.SetInParent()
- batch_failure = False
- session = {} # Tracks ID renaming during the commit operation.
- guid = commit_message.cache_guid
-
- self.account.ValidateCommitEntries(commit_message.entries)
-
- for entry in commit_message.entries:
- server_entry = None
- if not batch_failure:
- # Try to commit the change to the account.
- server_entry = self.account.CommitEntry(entry, guid, session)
-
- # An entryresponse is returned in both success and failure cases.
- reply = commit_response.entryresponse.add()
- if not server_entry:
- reply.response_type = sync_pb2.CommitResponse.CONFLICT
- reply.error_message = 'Conflict.'
- batch_failure = True # One failure halts the batch.
- else:
- reply.response_type = sync_pb2.CommitResponse.SUCCESS
- # These are the properties that the server is allowed to override
- # during commit; the client wants to know their values at the end
- # of the operation.
- reply.id_string = server_entry.id_string
- if not server_entry.deleted:
- # Note: the production server doesn't actually send the
- # parent_id_string on commit responses, so we don't either.
- reply.position_in_parent = server_entry.position_in_parent
- reply.version = server_entry.version
- reply.name = server_entry.name
- reply.non_unique_name = server_entry.non_unique_name
- else:
- reply.version = entry.version + 1
-
- def HandleGetUpdates(self, update_request, update_response):
- """Respond to a GetUpdates request by querying the user's account.
-
- Args:
- update_request: A sync_pb.GetUpdatesMessage protobuf holding the content
- of the client's request.
- update_response: A sync_pb.GetUpdatesResponse protobuf into which a reply
- to the client request will be written.
- """
- update_response.SetInParent()
- update_sieve = UpdateSieve(update_request, self.account.migration_history)
-
- print CallerInfoToString(update_request.caller_info.source),
- print update_sieve.SummarizeRequest()
-
- update_sieve.CheckMigrationState()
-
- new_timestamp, entries, remaining = self.account.GetChanges(update_sieve)
-
- update_response.changes_remaining = remaining
- sending_nigori_node = False
- for entry in entries:
- if entry.name == 'Nigori':
- sending_nigori_node = True
- reply = update_response.entries.add()
- reply.CopyFrom(entry)
- update_sieve.SaveProgress(new_timestamp, update_response)
-
- if update_request.need_encryption_key or sending_nigori_node:
- update_response.encryption_keys.extend(self.account.GetKeystoreKeys())
-
- def HandleGetOauth2Token(self):
- return (int(self.response_code),
- '{\n'
- ' \"refresh_token\": \"' + self.request_token + '\",\n'
- ' \"access_token\": \"' + self.access_token + '\",\n'
- ' \"expires_in\": ' + str(self.expires_in) + ',\n'
- ' \"token_type\": \"' + self.token_type +'\"\n'
- '}')
-
- def HandleSetOauth2Token(self, response_code, request_token, access_token,
- expires_in, token_type):
- if response_code != 0:
- self.response_code = response_code
- if request_token != '':
- self.request_token = request_token
- if access_token != '':
- self.access_token = access_token
- if expires_in != 0:
- self.expires_in = expires_in
- if token_type != '':
- self.token_type = token_type
-
- return (200,
- '<html><title>Set OAuth2 Token</title>'
- '<H1>This server will now return the OAuth2 Token:</H1>'
- '<p>response_code: ' + str(self.response_code) + '</p>'
- '<p>request_token: ' + self.request_token + '</p>'
- '<p>access_token: ' + self.access_token + '</p>'
- '<p>expires_in: ' + str(self.expires_in) + '</p>'
- '<p>token_type: ' + self.token_type + '</p>'
- '</html>')
-
- def CustomizeClientCommand(self, sessions_commit_delay_seconds):
- """Customizes the value of the ClientCommand of ServerToClientResponse.
-
- Currently, this only allows for changing the sessions_commit_delay_seconds
- field. This is useful for testing in conjunction with
- AddSyncedNotification so that synced notifications are seen immediately
- after triggering them with an HTTP call to the test server.
-
- Args:
- sessions_commit_delay_seconds: The desired sync delay time for sessions.
- """
- if not self._client_command:
- self._client_command = client_commands_pb2.ClientCommand()
-
- self._client_command.sessions_commit_delay_seconds = \
- sessions_commit_delay_seconds
- return self._client_command
diff --git a/chromium/sync/tools/testserver/chromiumsync_test.py b/chromium/sync/tools/testserver/chromiumsync_test.py
deleted file mode 100755
index d83e4146c82..00000000000
--- a/chromium/sync/tools/testserver/chromiumsync_test.py
+++ /dev/null
@@ -1,680 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Tests exercising chromiumsync and SyncDataModel."""
-
-import pickle
-import unittest
-
-import autofill_specifics_pb2
-import bookmark_specifics_pb2
-import chromiumsync
-import managed_user_specifics_pb2
-import sync_pb2
-import theme_specifics_pb2
-
-class SyncDataModelTest(unittest.TestCase):
- def setUp(self):
- self.model = chromiumsync.SyncDataModel()
- # The Synced Bookmarks folder is not created by default
- self._expect_synced_bookmarks_folder = False
-
- def AddToModel(self, proto):
- self.model._entries[proto.id_string] = proto
-
- def GetChangesFromTimestamp(self, requested_types, timestamp):
- message = sync_pb2.GetUpdatesMessage()
- message.from_timestamp = timestamp
- for data_type in requested_types:
- getattr(message.requested_types,
- chromiumsync.SYNC_TYPE_TO_DESCRIPTOR[
- data_type].name).SetInParent()
- return self.model.GetChanges(
- chromiumsync.UpdateSieve(message, self.model.migration_history))
-
- def FindMarkerByNumber(self, markers, datatype):
- """Search a list of progress markers and find the one for a datatype."""
- for marker in markers:
- if marker.data_type_id == datatype.number:
- return marker
- self.fail('Required marker not found: %s' % datatype.name)
-
- def testPermanentItemSpecs(self):
- specs = chromiumsync.SyncDataModel._PERMANENT_ITEM_SPECS
-
- declared_specs = set(['0'])
- for spec in specs:
- self.assertTrue(spec.parent_tag in declared_specs, 'parent tags must '
- 'be declared before use')
- declared_specs.add(spec.tag)
-
- unique_datatypes = set([x.sync_type for x in specs])
- self.assertEqual(unique_datatypes,
- set(chromiumsync.ALL_TYPES[1:]),
- 'Every sync datatype should have a permanent folder '
- 'associated with it')
-
- def testSaveEntry(self):
- proto = sync_pb2.SyncEntity()
- proto.id_string = 'abcd'
- proto.version = 0
- self.assertFalse(self.model._ItemExists(proto.id_string))
- self.model._SaveEntry(proto)
- self.assertEqual(1, proto.version)
- self.assertTrue(self.model._ItemExists(proto.id_string))
- self.model._SaveEntry(proto)
- self.assertEqual(2, proto.version)
- proto.version = 0
- self.assertTrue(self.model._ItemExists(proto.id_string))
- self.assertEqual(2, self.model._entries[proto.id_string].version)
-
- def testCreatePermanentItems(self):
- self.model._CreateDefaultPermanentItems(chromiumsync.ALL_TYPES)
- self.assertEqual(len(chromiumsync.ALL_TYPES) + 1,
- len(self.model._entries))
-
- def ExpectedPermanentItemCount(self, sync_type):
- if sync_type == chromiumsync.BOOKMARK:
- if self._expect_synced_bookmarks_folder:
- return 4
- else:
- return 3
- else:
- return 1
-
- def testGetChangesFromTimestampZeroForEachType(self):
- all_types = chromiumsync.ALL_TYPES[1:]
- for sync_type in all_types:
- self.model = chromiumsync.SyncDataModel()
- request_types = [sync_type]
-
- version, changes, remaining = (
- self.GetChangesFromTimestamp(request_types, 0))
-
- expected_count = self.ExpectedPermanentItemCount(sync_type)
- self.assertEqual(expected_count, version)
- self.assertEqual(expected_count, len(changes))
- for change in changes:
- self.assertTrue(change.HasField('server_defined_unique_tag'))
- self.assertEqual(change.version, change.sync_timestamp)
- self.assertTrue(change.version <= version)
-
- # Test idempotence: another GetUpdates from ts=0 shouldn't recreate.
- version, changes, remaining = (
- self.GetChangesFromTimestamp(request_types, 0))
- self.assertEqual(expected_count, version)
- self.assertEqual(expected_count, len(changes))
- self.assertEqual(0, remaining)
-
- # Doing a wider GetUpdates from timestamp zero shouldn't recreate either.
- new_version, changes, remaining = (
- self.GetChangesFromTimestamp(all_types, 0))
- if self._expect_synced_bookmarks_folder:
- self.assertEqual(len(chromiumsync.SyncDataModel._PERMANENT_ITEM_SPECS),
- new_version)
- else:
- self.assertEqual(
- len(chromiumsync.SyncDataModel._PERMANENT_ITEM_SPECS) -1,
- new_version)
- self.assertEqual(new_version, len(changes))
- self.assertEqual(0, remaining)
- version, changes, remaining = (
- self.GetChangesFromTimestamp(request_types, 0))
- self.assertEqual(new_version, version)
- self.assertEqual(expected_count, len(changes))
- self.assertEqual(0, remaining)
-
- def testBatchSize(self):
- for sync_type in chromiumsync.ALL_TYPES[1:]:
- specifics = chromiumsync.GetDefaultEntitySpecifics(sync_type)
- self.model = chromiumsync.SyncDataModel()
- request_types = [sync_type]
-
- for i in range(self.model._BATCH_SIZE*3):
- entry = sync_pb2.SyncEntity()
- entry.id_string = 'batch test %d' % i
- entry.specifics.CopyFrom(specifics)
- self.model._SaveEntry(entry)
- last_bit = self.ExpectedPermanentItemCount(sync_type)
- version, changes, changes_remaining = (
- self.GetChangesFromTimestamp(request_types, 0))
- self.assertEqual(self.model._BATCH_SIZE, version)
- self.assertEqual(self.model._BATCH_SIZE*2 + last_bit, changes_remaining)
- version, changes, changes_remaining = (
- self.GetChangesFromTimestamp(request_types, version))
- self.assertEqual(self.model._BATCH_SIZE*2, version)
- self.assertEqual(self.model._BATCH_SIZE + last_bit, changes_remaining)
- version, changes, changes_remaining = (
- self.GetChangesFromTimestamp(request_types, version))
- self.assertEqual(self.model._BATCH_SIZE*3, version)
- self.assertEqual(last_bit, changes_remaining)
- version, changes, changes_remaining = (
- self.GetChangesFromTimestamp(request_types, version))
- self.assertEqual(self.model._BATCH_SIZE*3 + last_bit, version)
- self.assertEqual(0, changes_remaining)
-
- # Now delete a third of the items.
- for i in xrange(self.model._BATCH_SIZE*3 - 1, 0, -3):
- entry = sync_pb2.SyncEntity()
- entry.id_string = 'batch test %d' % i
- entry.deleted = True
- self.model._SaveEntry(entry)
-
- # The batch counts shouldn't change.
- version, changes, changes_remaining = (
- self.GetChangesFromTimestamp(request_types, 0))
- self.assertEqual(self.model._BATCH_SIZE, len(changes))
- self.assertEqual(self.model._BATCH_SIZE*2 + last_bit, changes_remaining)
- version, changes, changes_remaining = (
- self.GetChangesFromTimestamp(request_types, version))
- self.assertEqual(self.model._BATCH_SIZE, len(changes))
- self.assertEqual(self.model._BATCH_SIZE + last_bit, changes_remaining)
- version, changes, changes_remaining = (
- self.GetChangesFromTimestamp(request_types, version))
- self.assertEqual(self.model._BATCH_SIZE, len(changes))
- self.assertEqual(last_bit, changes_remaining)
- version, changes, changes_remaining = (
- self.GetChangesFromTimestamp(request_types, version))
- self.assertEqual(last_bit, len(changes))
- self.assertEqual(self.model._BATCH_SIZE*4 + last_bit, version)
- self.assertEqual(0, changes_remaining)
-
- def testCommitEachDataType(self):
- for sync_type in chromiumsync.ALL_TYPES[1:]:
- specifics = chromiumsync.GetDefaultEntitySpecifics(sync_type)
- self.model = chromiumsync.SyncDataModel()
- my_cache_guid = '112358132134'
- parent = 'foobar'
- commit_session = {}
-
- # Start with a GetUpdates from timestamp 0, to populate permanent items.
- original_version, original_changes, changes_remaining = (
- self.GetChangesFromTimestamp([sync_type], 0))
-
- def DoCommit(original=None, id_string='', name=None, parent=None,
- position=0):
- proto = sync_pb2.SyncEntity()
- if original is not None:
- proto.version = original.version
- proto.id_string = original.id_string
- proto.parent_id_string = original.parent_id_string
- proto.name = original.name
- else:
- proto.id_string = id_string
- proto.version = 0
- proto.specifics.CopyFrom(specifics)
- if name is not None:
- proto.name = name
- if parent:
- proto.parent_id_string = parent.id_string
- proto.insert_after_item_id = 'please discard'
- proto.position_in_parent = position
- proto.folder = True
- proto.deleted = False
- result = self.model.CommitEntry(proto, my_cache_guid, commit_session)
- self.assertTrue(result)
- return (proto, result)
-
- # Commit a new item.
- proto1, result1 = DoCommit(name='namae', id_string='Foo',
- parent=original_changes[-1], position=100)
- # Commit an item whose parent is another item (referenced via the
- # pre-commit ID).
- proto2, result2 = DoCommit(name='Secondo', id_string='Bar',
- parent=proto1, position=-100)
- # Commit a sibling of the second item.
- proto3, result3 = DoCommit(name='Third!', id_string='Baz',
- parent=proto1, position=-50)
-
- self.assertEqual(3, len(commit_session))
- for p, r in [(proto1, result1), (proto2, result2), (proto3, result3)]:
- self.assertNotEqual(r.id_string, p.id_string)
- self.assertEqual(r.originator_client_item_id, p.id_string)
- self.assertEqual(r.originator_cache_guid, my_cache_guid)
- self.assertTrue(r is not self.model._entries[r.id_string],
- "Commit result didn't make a defensive copy.")
- self.assertTrue(p is not self.model._entries[r.id_string],
- "Commit result didn't make a defensive copy.")
- self.assertEqual(commit_session.get(p.id_string), r.id_string)
- self.assertTrue(r.version > original_version)
- self.assertEqual(result1.parent_id_string, proto1.parent_id_string)
- self.assertEqual(result2.parent_id_string, result1.id_string)
- version, changes, remaining = (
- self.GetChangesFromTimestamp([sync_type], original_version))
- self.assertEqual(3, len(changes))
- self.assertEqual(0, remaining)
- self.assertEqual(original_version + 3, version)
- self.assertEqual([result1, result2, result3], changes)
- for c in changes:
- self.assertTrue(c is not self.model._entries[c.id_string],
- "GetChanges didn't make a defensive copy.")
- self.assertTrue(result2.position_in_parent < result3.position_in_parent)
- self.assertEqual(-100, result2.position_in_parent)
-
- # Now update the items so that the second item is the parent of the
- # first; with the first sandwiched between two new items (4 and 5).
- # Do this in a new commit session, meaning we'll reference items from
- # the first batch by their post-commit, server IDs.
- commit_session = {}
- old_cache_guid = my_cache_guid
- my_cache_guid = 'A different GUID'
- proto2b, result2b = DoCommit(original=result2,
- parent=original_changes[-1])
- proto4, result4 = DoCommit(id_string='ID4', name='Four',
- parent=result2, position=-200)
- proto1b, result1b = DoCommit(original=result1,
- parent=result2, position=-150)
- proto5, result5 = DoCommit(id_string='ID5', name='Five', parent=result2,
- position=150)
-
- self.assertEqual(2, len(commit_session), 'Only new items in second '
- 'batch should be in the session')
- for p, r, original in [(proto2b, result2b, proto2),
- (proto4, result4, proto4),
- (proto1b, result1b, proto1),
- (proto5, result5, proto5)]:
- self.assertEqual(r.originator_client_item_id, original.id_string)
- if original is not p:
- self.assertEqual(r.id_string, p.id_string,
- 'Ids should be stable after first commit')
- self.assertEqual(r.originator_cache_guid, old_cache_guid)
- else:
- self.assertNotEqual(r.id_string, p.id_string)
- self.assertEqual(r.originator_cache_guid, my_cache_guid)
- self.assertEqual(commit_session.get(p.id_string), r.id_string)
- self.assertTrue(r is not self.model._entries[r.id_string],
- "Commit result didn't make a defensive copy.")
- self.assertTrue(p is not self.model._entries[r.id_string],
- "Commit didn't make a defensive copy.")
- self.assertTrue(r.version > p.version)
- version, changes, remaining = (
- self.GetChangesFromTimestamp([sync_type], original_version))
- self.assertEqual(5, len(changes))
- self.assertEqual(0, remaining)
- self.assertEqual(original_version + 7, version)
- self.assertEqual([result3, result2b, result4, result1b, result5], changes)
- for c in changes:
- self.assertTrue(c is not self.model._entries[c.id_string],
- "GetChanges didn't make a defensive copy.")
- self.assertTrue(result4.parent_id_string ==
- result1b.parent_id_string ==
- result5.parent_id_string ==
- result2b.id_string)
- self.assertTrue(result4.position_in_parent <
- result1b.position_in_parent <
- result5.position_in_parent)
-
- def testUpdateSieve(self):
- # from_timestamp, legacy mode
- autofill = chromiumsync.SYNC_TYPE_FIELDS['autofill']
- theme = chromiumsync.SYNC_TYPE_FIELDS['theme']
- msg = sync_pb2.GetUpdatesMessage()
- msg.from_timestamp = 15412
- msg.requested_types.autofill.SetInParent()
- msg.requested_types.theme.SetInParent()
-
- sieve = chromiumsync.UpdateSieve(msg)
- self.assertEqual(sieve._state,
- {chromiumsync.TOP_LEVEL: 15412,
- chromiumsync.AUTOFILL: 15412,
- chromiumsync.THEME: 15412})
-
- response = sync_pb2.GetUpdatesResponse()
- sieve.SaveProgress(15412, response)
- self.assertEqual(0, len(response.new_progress_marker))
- self.assertFalse(response.HasField('new_timestamp'))
-
- response = sync_pb2.GetUpdatesResponse()
- sieve.SaveProgress(15413, response)
- self.assertEqual(0, len(response.new_progress_marker))
- self.assertTrue(response.HasField('new_timestamp'))
- self.assertEqual(15413, response.new_timestamp)
-
- # Existing tokens
- msg = sync_pb2.GetUpdatesMessage()
- marker = msg.from_progress_marker.add()
- marker.data_type_id = autofill.number
- marker.token = pickle.dumps((15412, 1))
- marker = msg.from_progress_marker.add()
- marker.data_type_id = theme.number
- marker.token = pickle.dumps((15413, 1))
- sieve = chromiumsync.UpdateSieve(msg)
- self.assertEqual(sieve._state,
- {chromiumsync.TOP_LEVEL: 15412,
- chromiumsync.AUTOFILL: 15412,
- chromiumsync.THEME: 15413})
-
- response = sync_pb2.GetUpdatesResponse()
- sieve.SaveProgress(15413, response)
- self.assertEqual(1, len(response.new_progress_marker))
- self.assertFalse(response.HasField('new_timestamp'))
- marker = response.new_progress_marker[0]
- self.assertEqual(marker.data_type_id, autofill.number)
- self.assertEqual(pickle.loads(marker.token), (15413, 1))
- self.assertFalse(marker.HasField('timestamp_token_for_migration'))
-
- # Empty tokens indicating from timestamp = 0
- msg = sync_pb2.GetUpdatesMessage()
- marker = msg.from_progress_marker.add()
- marker.data_type_id = autofill.number
- marker.token = pickle.dumps((412, 1))
- marker = msg.from_progress_marker.add()
- marker.data_type_id = theme.number
- marker.token = ''
- sieve = chromiumsync.UpdateSieve(msg)
- self.assertEqual(sieve._state,
- {chromiumsync.TOP_LEVEL: 0,
- chromiumsync.AUTOFILL: 412,
- chromiumsync.THEME: 0})
- response = sync_pb2.GetUpdatesResponse()
- sieve.SaveProgress(1, response)
- self.assertEqual(1, len(response.new_progress_marker))
- self.assertFalse(response.HasField('new_timestamp'))
- marker = response.new_progress_marker[0]
- self.assertEqual(marker.data_type_id, theme.number)
- self.assertEqual(pickle.loads(marker.token), (1, 1))
- self.assertFalse(marker.HasField('timestamp_token_for_migration'))
-
- response = sync_pb2.GetUpdatesResponse()
- sieve.SaveProgress(412, response)
- self.assertEqual(1, len(response.new_progress_marker))
- self.assertFalse(response.HasField('new_timestamp'))
- marker = response.new_progress_marker[0]
- self.assertEqual(marker.data_type_id, theme.number)
- self.assertEqual(pickle.loads(marker.token), (412, 1))
- self.assertFalse(marker.HasField('timestamp_token_for_migration'))
-
- response = sync_pb2.GetUpdatesResponse()
- sieve.SaveProgress(413, response)
- self.assertEqual(2, len(response.new_progress_marker))
- self.assertFalse(response.HasField('new_timestamp'))
- marker = self.FindMarkerByNumber(response.new_progress_marker, theme)
- self.assertEqual(pickle.loads(marker.token), (413, 1))
- self.assertFalse(marker.HasField('timestamp_token_for_migration'))
- marker = self.FindMarkerByNumber(response.new_progress_marker, autofill)
- self.assertEqual(pickle.loads(marker.token), (413, 1))
- self.assertFalse(marker.HasField('timestamp_token_for_migration'))
-
- # Migration token timestamps (client gives timestamp, server returns token)
- # These are for migrating from the old 'timestamp' protocol to the
- # progressmarker protocol, and have nothing to do with the MIGRATION_DONE
- # error code.
- msg = sync_pb2.GetUpdatesMessage()
- marker = msg.from_progress_marker.add()
- marker.data_type_id = autofill.number
- marker.timestamp_token_for_migration = 15213
- marker = msg.from_progress_marker.add()
- marker.data_type_id = theme.number
- marker.timestamp_token_for_migration = 15211
- sieve = chromiumsync.UpdateSieve(msg)
- self.assertEqual(sieve._state,
- {chromiumsync.TOP_LEVEL: 15211,
- chromiumsync.AUTOFILL: 15213,
- chromiumsync.THEME: 15211})
- response = sync_pb2.GetUpdatesResponse()
- sieve.SaveProgress(16000, response) # There were updates
- self.assertEqual(2, len(response.new_progress_marker))
- self.assertFalse(response.HasField('new_timestamp'))
- marker = self.FindMarkerByNumber(response.new_progress_marker, theme)
- self.assertEqual(pickle.loads(marker.token), (16000, 1))
- self.assertFalse(marker.HasField('timestamp_token_for_migration'))
- marker = self.FindMarkerByNumber(response.new_progress_marker, autofill)
- self.assertEqual(pickle.loads(marker.token), (16000, 1))
- self.assertFalse(marker.HasField('timestamp_token_for_migration'))
-
- msg = sync_pb2.GetUpdatesMessage()
- marker = msg.from_progress_marker.add()
- marker.data_type_id = autofill.number
- marker.timestamp_token_for_migration = 3000
- marker = msg.from_progress_marker.add()
- marker.data_type_id = theme.number
- marker.timestamp_token_for_migration = 3000
- sieve = chromiumsync.UpdateSieve(msg)
- self.assertEqual(sieve._state,
- {chromiumsync.TOP_LEVEL: 3000,
- chromiumsync.AUTOFILL: 3000,
- chromiumsync.THEME: 3000})
- response = sync_pb2.GetUpdatesResponse()
- sieve.SaveProgress(3000, response) # Already up to date
- self.assertEqual(2, len(response.new_progress_marker))
- self.assertFalse(response.HasField('new_timestamp'))
- marker = self.FindMarkerByNumber(response.new_progress_marker, theme)
- self.assertEqual(pickle.loads(marker.token), (3000, 1))
- self.assertFalse(marker.HasField('timestamp_token_for_migration'))
- marker = self.FindMarkerByNumber(response.new_progress_marker, autofill)
- self.assertEqual(pickle.loads(marker.token), (3000, 1))
- self.assertFalse(marker.HasField('timestamp_token_for_migration'))
-
- def testCheckRaiseTransientError(self):
- testserver = chromiumsync.TestServer()
- http_code, raw_respon = testserver.HandleSetTransientError()
- self.assertEqual(http_code, 200)
- try:
- testserver.CheckTransientError()
- self.fail('Should have raised transient error exception')
- except chromiumsync.TransientError:
- self.assertTrue(testserver.transient_error)
-
- def testUpdateSieveStoreMigration(self):
- autofill = chromiumsync.SYNC_TYPE_FIELDS['autofill']
- theme = chromiumsync.SYNC_TYPE_FIELDS['theme']
- migrator = chromiumsync.MigrationHistory()
- msg = sync_pb2.GetUpdatesMessage()
- marker = msg.from_progress_marker.add()
- marker.data_type_id = autofill.number
- marker.token = pickle.dumps((15412, 1))
- marker = msg.from_progress_marker.add()
- marker.data_type_id = theme.number
- marker.token = pickle.dumps((15413, 1))
- sieve = chromiumsync.UpdateSieve(msg, migrator)
- sieve.CheckMigrationState()
-
- migrator.Bump([chromiumsync.BOOKMARK, chromiumsync.PASSWORD]) # v=2
- sieve = chromiumsync.UpdateSieve(msg, migrator)
- sieve.CheckMigrationState()
- self.assertEqual(sieve._state,
- {chromiumsync.TOP_LEVEL: 15412,
- chromiumsync.AUTOFILL: 15412,
- chromiumsync.THEME: 15413})
-
- migrator.Bump([chromiumsync.AUTOFILL, chromiumsync.PASSWORD]) # v=3
- sieve = chromiumsync.UpdateSieve(msg, migrator)
- try:
- sieve.CheckMigrationState()
- self.fail('Should have raised.')
- except chromiumsync.MigrationDoneError, error:
- # We want this to happen.
- self.assertEqual([chromiumsync.AUTOFILL], error.datatypes)
-
- msg = sync_pb2.GetUpdatesMessage()
- marker = msg.from_progress_marker.add()
- marker.data_type_id = autofill.number
- marker.token = ''
- marker = msg.from_progress_marker.add()
- marker.data_type_id = theme.number
- marker.token = pickle.dumps((15413, 1))
- sieve = chromiumsync.UpdateSieve(msg, migrator)
- sieve.CheckMigrationState()
- response = sync_pb2.GetUpdatesResponse()
- sieve.SaveProgress(15412, response) # There were updates
- self.assertEqual(1, len(response.new_progress_marker))
- self.assertFalse(response.HasField('new_timestamp'))
- self.assertFalse(marker.HasField('timestamp_token_for_migration'))
- marker = self.FindMarkerByNumber(response.new_progress_marker, autofill)
- self.assertEqual(pickle.loads(marker.token), (15412, 3))
- self.assertFalse(marker.HasField('timestamp_token_for_migration'))
- msg = sync_pb2.GetUpdatesMessage()
- marker = msg.from_progress_marker.add()
- marker.data_type_id = autofill.number
- marker.token = pickle.dumps((15412, 3))
- marker = msg.from_progress_marker.add()
- marker.data_type_id = theme.number
- marker.token = pickle.dumps((15413, 1))
- sieve = chromiumsync.UpdateSieve(msg, migrator)
- sieve.CheckMigrationState()
-
- migrator.Bump([chromiumsync.THEME, chromiumsync.AUTOFILL]) # v=4
- migrator.Bump([chromiumsync.AUTOFILL]) # v=5
- sieve = chromiumsync.UpdateSieve(msg, migrator)
- try:
- sieve.CheckMigrationState()
- self.fail("Should have raised.")
- except chromiumsync.MigrationDoneError, error:
- # We want this to happen.
- self.assertEqual(set([chromiumsync.THEME, chromiumsync.AUTOFILL]),
- set(error.datatypes))
- msg = sync_pb2.GetUpdatesMessage()
- marker = msg.from_progress_marker.add()
- marker.data_type_id = autofill.number
- marker.token = ''
- marker = msg.from_progress_marker.add()
- marker.data_type_id = theme.number
- marker.token = pickle.dumps((15413, 1))
- sieve = chromiumsync.UpdateSieve(msg, migrator)
- try:
- sieve.CheckMigrationState()
- self.fail("Should have raised.")
- except chromiumsync.MigrationDoneError, error:
- # We want this to happen.
- self.assertEqual([chromiumsync.THEME], error.datatypes)
-
- msg = sync_pb2.GetUpdatesMessage()
- marker = msg.from_progress_marker.add()
- marker.data_type_id = autofill.number
- marker.token = ''
- marker = msg.from_progress_marker.add()
- marker.data_type_id = theme.number
- marker.token = ''
- sieve = chromiumsync.UpdateSieve(msg, migrator)
- sieve.CheckMigrationState()
- response = sync_pb2.GetUpdatesResponse()
- sieve.SaveProgress(15412, response) # There were updates
- self.assertEqual(2, len(response.new_progress_marker))
- self.assertFalse(response.HasField('new_timestamp'))
- self.assertFalse(marker.HasField('timestamp_token_for_migration'))
- marker = self.FindMarkerByNumber(response.new_progress_marker, autofill)
- self.assertEqual(pickle.loads(marker.token), (15412, 5))
- self.assertFalse(marker.HasField('timestamp_token_for_migration'))
- marker = self.FindMarkerByNumber(response.new_progress_marker, theme)
- self.assertEqual(pickle.loads(marker.token), (15412, 4))
- self.assertFalse(marker.HasField('timestamp_token_for_migration'))
- msg = sync_pb2.GetUpdatesMessage()
- marker = msg.from_progress_marker.add()
- marker.data_type_id = autofill.number
- marker.token = pickle.dumps((15412, 5))
- marker = msg.from_progress_marker.add()
- marker.data_type_id = theme.number
- marker.token = pickle.dumps((15413, 4))
- sieve = chromiumsync.UpdateSieve(msg, migrator)
- sieve.CheckMigrationState()
-
- def testCreateSyncedBookmarks(self):
- version1, changes, remaining = (
- self.GetChangesFromTimestamp([chromiumsync.BOOKMARK], 0))
- id_string = self.model._MakeCurrentId(chromiumsync.BOOKMARK,
- '<server tag>synced_bookmarks')
- self.assertFalse(self.model._ItemExists(id_string))
- self._expect_synced_bookmarks_folder = True
- self.model.TriggerCreateSyncedBookmarks()
- self.assertTrue(self.model._ItemExists(id_string))
-
- # Check that the version changed when the folder was created and the only
- # change was the folder creation.
- version2, changes, remaining = (
- self.GetChangesFromTimestamp([chromiumsync.BOOKMARK], version1))
- self.assertEqual(len(changes), 1)
- self.assertEqual(changes[0].id_string, id_string)
- self.assertNotEqual(version1, version2)
- self.assertEqual(
- self.ExpectedPermanentItemCount(chromiumsync.BOOKMARK),
- version2)
-
- # Ensure getting from timestamp 0 includes the folder.
- version, changes, remaining = (
- self.GetChangesFromTimestamp([chromiumsync.BOOKMARK], 0))
- self.assertEqual(
- self.ExpectedPermanentItemCount(chromiumsync.BOOKMARK),
- len(changes))
- self.assertEqual(version2, version)
-
- def testAcknowledgeManagedUser(self):
- # Create permanent items.
- self.GetChangesFromTimestamp([chromiumsync.MANAGED_USER], 0)
- proto = sync_pb2.SyncEntity()
- proto.id_string = 'abcd'
- proto.version = 0
-
- # Make sure the managed_user field exists.
- proto.specifics.managed_user.acknowledged = False
- self.assertTrue(proto.specifics.HasField('managed_user'))
- self.AddToModel(proto)
- version1, changes1, remaining1 = (
- self.GetChangesFromTimestamp([chromiumsync.MANAGED_USER], 0))
- for change in changes1:
- self.assertTrue(not change.specifics.managed_user.acknowledged)
-
- # Turn on managed user acknowledgement
- self.model.acknowledge_managed_users = True
-
- version2, changes2, remaining2 = (
- self.GetChangesFromTimestamp([chromiumsync.MANAGED_USER], 0))
- for change in changes2:
- self.assertTrue(change.specifics.managed_user.acknowledged)
-
- def testGetKey(self):
- [key1] = self.model.GetKeystoreKeys()
- [key2] = self.model.GetKeystoreKeys()
- self.assertTrue(len(key1))
- self.assertEqual(key1, key2)
-
- # Trigger the rotation. A subsequent GetUpdates should return the nigori
- # node (whose timestamp was bumped by the rotation).
- version1, changes, remaining = (
- self.GetChangesFromTimestamp([chromiumsync.NIGORI], 0))
- self.model.TriggerRotateKeystoreKeys()
- version2, changes, remaining = (
- self.GetChangesFromTimestamp([chromiumsync.NIGORI], version1))
- self.assertNotEqual(version1, version2)
- self.assertEquals(len(changes), 1)
- self.assertEquals(changes[0].name, "Nigori")
-
- # The current keys should contain the old keys, with the new key appended.
- [key1, key3] = self.model.GetKeystoreKeys()
- self.assertEquals(key1, key2)
- self.assertNotEqual(key1, key3)
- self.assertTrue(len(key3) > 0)
-
- def testTriggerEnableKeystoreEncryption(self):
- version1, changes, remaining = (
- self.GetChangesFromTimestamp([chromiumsync.EXPERIMENTS], 0))
- keystore_encryption_id_string = (
- self.model._ClientTagToId(
- chromiumsync.EXPERIMENTS,
- chromiumsync.KEYSTORE_ENCRYPTION_EXPERIMENT_TAG))
-
- self.assertFalse(self.model._ItemExists(keystore_encryption_id_string))
- self.model.TriggerEnableKeystoreEncryption()
- self.assertTrue(self.model._ItemExists(keystore_encryption_id_string))
-
- # The creation of the experiment should be downloaded on the next
- # GetUpdates.
- version2, changes, remaining = (
- self.GetChangesFromTimestamp([chromiumsync.EXPERIMENTS], version1))
- self.assertEqual(len(changes), 1)
- self.assertEqual(changes[0].id_string, keystore_encryption_id_string)
- self.assertNotEqual(version1, version2)
-
- # Verify the experiment was created properly and is enabled.
- self.assertEqual(chromiumsync.KEYSTORE_ENCRYPTION_EXPERIMENT_TAG,
- changes[0].client_defined_unique_tag)
- self.assertTrue(changes[0].HasField("specifics"))
- self.assertTrue(changes[0].specifics.HasField("experiments"))
- self.assertTrue(
- changes[0].specifics.experiments.HasField("keystore_encryption"))
- self.assertTrue(
- changes[0].specifics.experiments.keystore_encryption.enabled)
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/chromium/sync/tools/testserver/run_sync_testserver.cc b/chromium/sync/tools/testserver/run_sync_testserver.cc
deleted file mode 100644
index 00899634fae..00000000000
--- a/chromium/sync/tools/testserver/run_sync_testserver.cc
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stdio.h>
-
-#include "base/at_exit.h"
-#include "base/command_line.h"
-#include "base/files/file_path.h"
-#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
-#include "base/process/launch.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/test/test_timeouts.h"
-#include "net/test/python_utils.h"
-#include "sync/test/local_sync_test_server.h"
-
-static void PrintUsage() {
- printf("run_sync_testserver [--port=<port>] [--xmpp-port=<xmpp_port>]\n");
-}
-
-// Launches the chromiumsync_test.py or xmppserver_test.py scripts, which test
-// the sync HTTP and XMPP sever functionality respectively.
-static bool RunSyncTest(
- const base::FilePath::StringType& sync_test_script_name) {
- scoped_ptr<syncer::LocalSyncTestServer> test_server(
- new syncer::LocalSyncTestServer());
- if (!test_server->SetPythonPath()) {
- LOG(ERROR) << "Error trying to set python path. Exiting.";
- return false;
- }
-
- base::FilePath sync_test_script_path;
- if (!test_server->GetTestScriptPath(sync_test_script_name,
- &sync_test_script_path)) {
- LOG(ERROR) << "Error trying to get path for test script "
- << sync_test_script_name;
- return false;
- }
-
- CommandLine python_command(CommandLine::NO_PROGRAM);
- if (!GetPythonCommand(&python_command)) {
- LOG(ERROR) << "Could not get python runtime command.";
- return false;
- }
-
- python_command.AppendArgPath(sync_test_script_path);
- if (!base::LaunchProcess(python_command, base::LaunchOptions(), NULL)) {
- LOG(ERROR) << "Failed to launch test script " << sync_test_script_name;
- return false;
- }
- return true;
-}
-
-// Gets a port value from the switch with name |switch_name| and writes it to
-// |port|. Returns true if a port was provided and false otherwise.
-static bool GetPortFromSwitch(const std::string& switch_name, uint16* port) {
- DCHECK(port != NULL) << "|port| is NULL";
- CommandLine* command_line = CommandLine::ForCurrentProcess();
- int port_int = 0;
- if (command_line->HasSwitch(switch_name)) {
- std::string port_str = command_line->GetSwitchValueASCII(switch_name);
- if (!base::StringToInt(port_str, &port_int)) {
- return false;
- }
- }
- *port = static_cast<uint16>(port_int);
- return true;
-}
-
-int main(int argc, const char* argv[]) {
- base::AtExitManager at_exit_manager;
- base::MessageLoopForIO message_loop;
-
- // Process command line
- CommandLine::Init(argc, argv);
- CommandLine* command_line = CommandLine::ForCurrentProcess();
-
- logging::LoggingSettings settings;
- settings.logging_dest = logging::LOG_TO_ALL;
- settings.log_file = FILE_PATH_LITERAL("sync_testserver.log");
- if (!logging::InitLogging(settings)) {
- printf("Error: could not initialize logging. Exiting.\n");
- return -1;
- }
-
- TestTimeouts::Initialize();
-
- if (command_line->HasSwitch("help")) {
- PrintUsage();
- return 0;
- }
-
- if (command_line->HasSwitch("sync-test")) {
- return RunSyncTest(FILE_PATH_LITERAL("chromiumsync_test.py")) ? 0 : -1;
- }
-
- if (command_line->HasSwitch("xmpp-test")) {
- return RunSyncTest(FILE_PATH_LITERAL("xmppserver_test.py")) ? 0 : -1;
- }
-
- uint16 port = 0;
- GetPortFromSwitch("port", &port);
-
- uint16 xmpp_port = 0;
- GetPortFromSwitch("xmpp-port", &xmpp_port);
-
- scoped_ptr<syncer::LocalSyncTestServer> test_server(
- new syncer::LocalSyncTestServer(port, xmpp_port));
- if (!test_server->Start()) {
- printf("Error: failed to start python sync test server. Exiting.\n");
- return -1;
- }
-
- printf("Python sync test server running at %s (type ctrl+c to exit)\n",
- test_server->host_port_pair().ToString().c_str());
-
- message_loop.Run();
- return 0;
-}
diff --git a/chromium/sync/tools/testserver/sync_testserver.py b/chromium/sync/tools/testserver/sync_testserver.py
deleted file mode 100755
index 5954e012ca0..00000000000
--- a/chromium/sync/tools/testserver/sync_testserver.py
+++ /dev/null
@@ -1,614 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""This is a python sync server used for testing Chrome Sync.
-
-By default, it listens on an ephemeral port and xmpp_port and sends the port
-numbers back to the originating process over a pipe. The originating process can
-specify an explicit port and xmpp_port if necessary.
-"""
-
-import asyncore
-import BaseHTTPServer
-import errno
-import os
-import select
-import socket
-import sys
-import urlparse
-
-import chromiumsync
-import echo_message
-import testserver_base
-import xmppserver
-
-
-class SyncHTTPServer(testserver_base.ClientRestrictingServerMixIn,
- testserver_base.BrokenPipeHandlerMixIn,
- testserver_base.StoppableHTTPServer):
- """An HTTP server that handles sync commands."""
-
- def __init__(self, server_address, xmpp_port, request_handler_class):
- testserver_base.StoppableHTTPServer.__init__(self,
- server_address,
- request_handler_class)
- self._sync_handler = chromiumsync.TestServer()
- self._xmpp_socket_map = {}
- self._xmpp_server = xmppserver.XmppServer(
- self._xmpp_socket_map, ('localhost', xmpp_port))
- self.xmpp_port = self._xmpp_server.getsockname()[1]
- self.authenticated = True
-
- def GetXmppServer(self):
- return self._xmpp_server
-
- def HandleCommand(self, query, raw_request):
- return self._sync_handler.HandleCommand(query, raw_request)
-
- def HandleRequestNoBlock(self):
- """Handles a single request.
-
- Copied from SocketServer._handle_request_noblock().
- """
-
- try:
- request, client_address = self.get_request()
- except socket.error:
- return
- if self.verify_request(request, client_address):
- try:
- self.process_request(request, client_address)
- except Exception:
- self.handle_error(request, client_address)
- self.close_request(request)
-
- def SetAuthenticated(self, auth_valid):
- self.authenticated = auth_valid
-
- def GetAuthenticated(self):
- return self.authenticated
-
- def serve_forever(self):
- """This is a merge of asyncore.loop() and SocketServer.serve_forever().
- """
-
- def HandleXmppSocket(fd, socket_map, handler):
- """Runs the handler for the xmpp connection for fd.
-
- Adapted from asyncore.read() et al.
- """
-
- xmpp_connection = socket_map.get(fd)
- # This could happen if a previous handler call caused fd to get
- # removed from socket_map.
- if xmpp_connection is None:
- return
- try:
- handler(xmpp_connection)
- except (asyncore.ExitNow, KeyboardInterrupt, SystemExit):
- raise
- except:
- xmpp_connection.handle_error()
-
- while True:
- read_fds = [ self.fileno() ]
- write_fds = []
- exceptional_fds = []
-
- for fd, xmpp_connection in self._xmpp_socket_map.items():
- is_r = xmpp_connection.readable()
- is_w = xmpp_connection.writable()
- if is_r:
- read_fds.append(fd)
- if is_w:
- write_fds.append(fd)
- if is_r or is_w:
- exceptional_fds.append(fd)
-
- try:
- read_fds, write_fds, exceptional_fds = (
- select.select(read_fds, write_fds, exceptional_fds))
- except select.error, err:
- if err.args[0] != errno.EINTR:
- raise
- else:
- continue
-
- for fd in read_fds:
- if fd == self.fileno():
- self.HandleRequestNoBlock()
- continue
- HandleXmppSocket(fd, self._xmpp_socket_map,
- asyncore.dispatcher.handle_read_event)
-
- for fd in write_fds:
- HandleXmppSocket(fd, self._xmpp_socket_map,
- asyncore.dispatcher.handle_write_event)
-
- for fd in exceptional_fds:
- HandleXmppSocket(fd, self._xmpp_socket_map,
- asyncore.dispatcher.handle_expt_event)
-
-
-class SyncPageHandler(testserver_base.BasePageHandler):
- """Handler for the main HTTP sync server."""
-
- def __init__(self, request, client_address, sync_http_server):
- get_handlers = [self.ChromiumSyncTimeHandler,
- self.ChromiumSyncMigrationOpHandler,
- self.ChromiumSyncCredHandler,
- self.ChromiumSyncXmppCredHandler,
- self.ChromiumSyncDisableNotificationsOpHandler,
- self.ChromiumSyncEnableNotificationsOpHandler,
- self.ChromiumSyncSendNotificationOpHandler,
- self.ChromiumSyncBirthdayErrorOpHandler,
- self.ChromiumSyncTransientErrorOpHandler,
- self.ChromiumSyncErrorOpHandler,
- self.ChromiumSyncSyncTabFaviconsOpHandler,
- self.ChromiumSyncCreateSyncedBookmarksOpHandler,
- self.ChromiumSyncEnableKeystoreEncryptionOpHandler,
- self.ChromiumSyncRotateKeystoreKeysOpHandler,
- self.ChromiumSyncEnableManagedUserAcknowledgementHandler,
- self.ChromiumSyncEnablePreCommitGetUpdateAvoidanceHandler,
- self.GaiaOAuth2TokenHandler,
- self.GaiaSetOAuth2TokenResponseHandler,
- self.TriggerSyncedNotificationHandler,
- self.SyncedNotificationsPageHandler,
- self.CustomizeClientCommandHandler]
-
- post_handlers = [self.ChromiumSyncCommandHandler,
- self.ChromiumSyncTimeHandler,
- self.GaiaOAuth2TokenHandler,
- self.GaiaSetOAuth2TokenResponseHandler]
- testserver_base.BasePageHandler.__init__(self, request, client_address,
- sync_http_server, [], get_handlers,
- [], post_handlers, [])
-
-
- def ChromiumSyncTimeHandler(self):
- """Handle Chromium sync .../time requests.
-
- The syncer sometimes checks server reachability by examining /time.
- """
-
- test_name = "/chromiumsync/time"
- if not self._ShouldHandleRequest(test_name):
- return False
-
- # Chrome hates it if we send a response before reading the request.
- if self.headers.getheader('content-length'):
- length = int(self.headers.getheader('content-length'))
- _raw_request = self.rfile.read(length)
-
- self.send_response(200)
- self.send_header('Content-Type', 'text/plain')
- self.end_headers()
- self.wfile.write('0123456789')
- return True
-
- def ChromiumSyncCommandHandler(self):
- """Handle a chromiumsync command arriving via http.
-
- This covers all sync protocol commands: authentication, getupdates, and
- commit.
- """
-
- test_name = "/chromiumsync/command"
- if not self._ShouldHandleRequest(test_name):
- return False
-
- length = int(self.headers.getheader('content-length'))
- raw_request = self.rfile.read(length)
- http_response = 200
- raw_reply = None
- if not self.server.GetAuthenticated():
- http_response = 401
- challenge = 'GoogleLogin realm="http://%s", service="chromiumsync"' % (
- self.server.server_address[0])
- else:
- http_response, raw_reply = self.server.HandleCommand(
- self.path, raw_request)
-
- ### Now send the response to the client. ###
- self.send_response(http_response)
- if http_response == 401:
- self.send_header('www-Authenticate', challenge)
- self.end_headers()
- self.wfile.write(raw_reply)
- return True
-
- def ChromiumSyncMigrationOpHandler(self):
- test_name = "/chromiumsync/migrate"
- if not self._ShouldHandleRequest(test_name):
- return False
-
- http_response, raw_reply = self.server._sync_handler.HandleMigrate(
- self.path)
- self.send_response(http_response)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Content-Length', len(raw_reply))
- self.end_headers()
- self.wfile.write(raw_reply)
- return True
-
- def ChromiumSyncCredHandler(self):
- test_name = "/chromiumsync/cred"
- if not self._ShouldHandleRequest(test_name):
- return False
- try:
- query = urlparse.urlparse(self.path)[4]
- cred_valid = urlparse.parse_qs(query)['valid']
- if cred_valid[0] == 'True':
- self.server.SetAuthenticated(True)
- else:
- self.server.SetAuthenticated(False)
- except Exception:
- self.server.SetAuthenticated(False)
-
- http_response = 200
- raw_reply = 'Authenticated: %s ' % self.server.GetAuthenticated()
- self.send_response(http_response)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Content-Length', len(raw_reply))
- self.end_headers()
- self.wfile.write(raw_reply)
- return True
-
- def ChromiumSyncXmppCredHandler(self):
- test_name = "/chromiumsync/xmppcred"
- if not self._ShouldHandleRequest(test_name):
- return False
- xmpp_server = self.server.GetXmppServer()
- try:
- query = urlparse.urlparse(self.path)[4]
- cred_valid = urlparse.parse_qs(query)['valid']
- if cred_valid[0] == 'True':
- xmpp_server.SetAuthenticated(True)
- else:
- xmpp_server.SetAuthenticated(False)
- except:
- xmpp_server.SetAuthenticated(False)
-
- http_response = 200
- raw_reply = 'XMPP Authenticated: %s ' % xmpp_server.GetAuthenticated()
- self.send_response(http_response)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Content-Length', len(raw_reply))
- self.end_headers()
- self.wfile.write(raw_reply)
- return True
-
- def ChromiumSyncDisableNotificationsOpHandler(self):
- test_name = "/chromiumsync/disablenotifications"
- if not self._ShouldHandleRequest(test_name):
- return False
- self.server.GetXmppServer().DisableNotifications()
- result = 200
- raw_reply = ('<html><title>Notifications disabled</title>'
- '<H1>Notifications disabled</H1></html>')
- self.send_response(result)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Content-Length', len(raw_reply))
- self.end_headers()
- self.wfile.write(raw_reply)
- return True
-
- def ChromiumSyncEnableNotificationsOpHandler(self):
- test_name = "/chromiumsync/enablenotifications"
- if not self._ShouldHandleRequest(test_name):
- return False
- self.server.GetXmppServer().EnableNotifications()
- result = 200
- raw_reply = ('<html><title>Notifications enabled</title>'
- '<H1>Notifications enabled</H1></html>')
- self.send_response(result)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Content-Length', len(raw_reply))
- self.end_headers()
- self.wfile.write(raw_reply)
- return True
-
- def ChromiumSyncSendNotificationOpHandler(self):
- test_name = "/chromiumsync/sendnotification"
- if not self._ShouldHandleRequest(test_name):
- return False
- query = urlparse.urlparse(self.path)[4]
- query_params = urlparse.parse_qs(query)
- channel = ''
- data = ''
- if 'channel' in query_params:
- channel = query_params['channel'][0]
- if 'data' in query_params:
- data = query_params['data'][0]
- self.server.GetXmppServer().SendNotification(channel, data)
- result = 200
- raw_reply = ('<html><title>Notification sent</title>'
- '<H1>Notification sent with channel "%s" '
- 'and data "%s"</H1></html>'
- % (channel, data))
- self.send_response(result)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Content-Length', len(raw_reply))
- self.end_headers()
- self.wfile.write(raw_reply)
- return True
-
- def ChromiumSyncBirthdayErrorOpHandler(self):
- test_name = "/chromiumsync/birthdayerror"
- if not self._ShouldHandleRequest(test_name):
- return False
- result, raw_reply = self.server._sync_handler.HandleCreateBirthdayError()
- self.send_response(result)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Content-Length', len(raw_reply))
- self.end_headers()
- self.wfile.write(raw_reply)
- return True
-
- def ChromiumSyncTransientErrorOpHandler(self):
- test_name = "/chromiumsync/transienterror"
- if not self._ShouldHandleRequest(test_name):
- return False
- result, raw_reply = self.server._sync_handler.HandleSetTransientError()
- self.send_response(result)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Content-Length', len(raw_reply))
- self.end_headers()
- self.wfile.write(raw_reply)
- return True
-
- def ChromiumSyncErrorOpHandler(self):
- test_name = "/chromiumsync/error"
- if not self._ShouldHandleRequest(test_name):
- return False
- result, raw_reply = self.server._sync_handler.HandleSetInducedError(
- self.path)
- self.send_response(result)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Content-Length', len(raw_reply))
- self.end_headers()
- self.wfile.write(raw_reply)
- return True
-
- def ChromiumSyncSyncTabFaviconsOpHandler(self):
- test_name = "/chromiumsync/synctabfavicons"
- if not self._ShouldHandleRequest(test_name):
- return False
- result, raw_reply = self.server._sync_handler.HandleSetSyncTabFavicons()
- self.send_response(result)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Content-Length', len(raw_reply))
- self.end_headers()
- self.wfile.write(raw_reply)
- return True
-
- def ChromiumSyncCreateSyncedBookmarksOpHandler(self):
- test_name = "/chromiumsync/createsyncedbookmarks"
- if not self._ShouldHandleRequest(test_name):
- return False
- result, raw_reply = self.server._sync_handler.HandleCreateSyncedBookmarks()
- self.send_response(result)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Content-Length', len(raw_reply))
- self.end_headers()
- self.wfile.write(raw_reply)
- return True
-
- def ChromiumSyncEnableKeystoreEncryptionOpHandler(self):
- test_name = "/chromiumsync/enablekeystoreencryption"
- if not self._ShouldHandleRequest(test_name):
- return False
- result, raw_reply = (
- self.server._sync_handler.HandleEnableKeystoreEncryption())
- self.send_response(result)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Content-Length', len(raw_reply))
- self.end_headers()
- self.wfile.write(raw_reply)
- return True
-
- def ChromiumSyncRotateKeystoreKeysOpHandler(self):
- test_name = "/chromiumsync/rotatekeystorekeys"
- if not self._ShouldHandleRequest(test_name):
- return False
- result, raw_reply = (
- self.server._sync_handler.HandleRotateKeystoreKeys())
- self.send_response(result)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Content-Length', len(raw_reply))
- self.end_headers()
- self.wfile.write(raw_reply)
- return True
-
- def ChromiumSyncEnableManagedUserAcknowledgementHandler(self):
- test_name = "/chromiumsync/enablemanageduseracknowledgement"
- if not self._ShouldHandleRequest(test_name):
- return False
- result, raw_reply = (
- self.server._sync_handler.HandleEnableManagedUserAcknowledgement())
- self.send_response(result)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Content-Length', len(raw_reply))
- self.end_headers()
- self.wfile.write(raw_reply)
- return True
-
- def ChromiumSyncEnablePreCommitGetUpdateAvoidanceHandler(self):
- test_name = "/chromiumsync/enableprecommitgetupdateavoidance"
- if not self._ShouldHandleRequest(test_name):
- return False
- result, raw_reply = (
- self.server._sync_handler.HandleEnablePreCommitGetUpdateAvoidance())
- self.send_response(result)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Content-Length', len(raw_reply))
- self.end_headers()
- self.wfile.write(raw_reply)
- return True
-
- def GaiaOAuth2TokenHandler(self):
- test_name = "/o/oauth2/token"
- if not self._ShouldHandleRequest(test_name):
- return False
- if self.headers.getheader('content-length'):
- length = int(self.headers.getheader('content-length'))
- _raw_request = self.rfile.read(length)
- result, raw_reply = (
- self.server._sync_handler.HandleGetOauth2Token())
- self.send_response(result)
- self.send_header('Content-Type', 'application/json')
- self.send_header('Content-Length', len(raw_reply))
- self.end_headers()
- self.wfile.write(raw_reply)
- return True
-
- def GaiaSetOAuth2TokenResponseHandler(self):
- test_name = "/setfakeoauth2token"
- if not self._ShouldHandleRequest(test_name):
- return False
-
- # The index of 'query' is 4.
- # See http://docs.python.org/2/library/urlparse.html
- query = urlparse.urlparse(self.path)[4]
- query_params = urlparse.parse_qs(query)
-
- response_code = 0
- request_token = ''
- access_token = ''
- expires_in = 0
- token_type = ''
-
- if 'response_code' in query_params:
- response_code = query_params['response_code'][0]
- if 'request_token' in query_params:
- request_token = query_params['request_token'][0]
- if 'access_token' in query_params:
- access_token = query_params['access_token'][0]
- if 'expires_in' in query_params:
- expires_in = query_params['expires_in'][0]
- if 'token_type' in query_params:
- token_type = query_params['token_type'][0]
-
- result, raw_reply = (
- self.server._sync_handler.HandleSetOauth2Token(
- response_code, request_token, access_token, expires_in, token_type))
- self.send_response(result)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Content-Length', len(raw_reply))
- self.end_headers()
- self.wfile.write(raw_reply)
- return True
-
- def TriggerSyncedNotificationHandler(self):
- test_name = "/triggersyncednotification"
- if not self._ShouldHandleRequest(test_name):
- return False
-
- query = urlparse.urlparse(self.path)[4]
- query_params = urlparse.parse_qs(query)
-
- serialized_notification = ''
-
- if 'serialized_notification' in query_params:
- serialized_notification = query_params['serialized_notification'][0]
-
- try:
- notification_string = self.server._sync_handler.account \
- .AddSyncedNotification(serialized_notification)
- reply = "A synced notification was triggered:\n\n"
- reply += "<code>{}</code>.".format(notification_string)
- response_code = 200
- except chromiumsync.ClientNotConnectedError:
- reply = ('The client is not connected to the server, so the notification'
- ' could not be created.')
- response_code = 400
-
- self.send_response(response_code)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Content-Length', len(reply))
- self.end_headers()
- self.wfile.write(reply)
- return True
-
- def CustomizeClientCommandHandler(self):
- test_name = "/customizeclientcommand"
- if not self._ShouldHandleRequest(test_name):
- return False
-
- query = urlparse.urlparse(self.path)[4]
- query_params = urlparse.parse_qs(query)
-
- if 'sessions_commit_delay_seconds' in query_params:
- sessions_commit_delay = query_params['sessions_commit_delay_seconds'][0]
- try:
- command_string = self.server._sync_handler.CustomizeClientCommand(
- int(sessions_commit_delay))
- response_code = 200
- reply = "The ClientCommand was customized:\n\n"
- reply += "<code>{}</code>.".format(command_string)
- except ValueError:
- response_code = 400
- reply = "sessions_commit_delay_seconds was not an int"
- else:
- response_code = 400
- reply = "sessions_commit_delay_seconds is required"
-
- self.send_response(response_code)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Content-Length', len(reply))
- self.end_headers()
- self.wfile.write(reply)
- return True
-
- def SyncedNotificationsPageHandler(self):
- test_name = "/syncednotifications"
- if not self._ShouldHandleRequest(test_name):
- return False
-
- html = open('sync/tools/testserver/synced_notifications.html', 'r').read()
-
- self.send_response(200)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Content-Length', len(html))
- self.end_headers()
- self.wfile.write(html)
- return True
-
-
-class SyncServerRunner(testserver_base.TestServerRunner):
- """TestServerRunner for the net test servers."""
-
- def __init__(self):
- super(SyncServerRunner, self).__init__()
-
- def create_server(self, server_data):
- port = self.options.port
- host = self.options.host
- xmpp_port = self.options.xmpp_port
- server = SyncHTTPServer((host, port), xmpp_port, SyncPageHandler)
- print ('Sync HTTP server started at %s:%d/chromiumsync...' %
- (host, server.server_port))
- print ('Fake OAuth2 Token server started at %s:%d/o/oauth2/token...' %
- (host, server.server_port))
- print ('Sync XMPP server started at %s:%d...' %
- (host, server.xmpp_port))
- server_data['port'] = server.server_port
- server_data['xmpp_port'] = server.xmpp_port
- return server
-
- def run_server(self):
- testserver_base.TestServerRunner.run_server(self)
-
- def add_options(self):
- testserver_base.TestServerRunner.add_options(self)
- self.option_parser.add_option('--xmpp-port', default='0', type='int',
- help='Port used by the XMPP server. If '
- 'unspecified, the XMPP server will listen on '
- 'an ephemeral port.')
- # Override the default logfile name used in testserver.py.
- self.option_parser.set_defaults(log_file='sync_testserver.log')
-
-if __name__ == '__main__':
- sys.exit(SyncServerRunner().main())
diff --git a/chromium/sync/tools/testserver/synced_notifications.html b/chromium/sync/tools/testserver/synced_notifications.html
deleted file mode 100644
index c06f80b2036..00000000000
--- a/chromium/sync/tools/testserver/synced_notifications.html
+++ /dev/null
@@ -1,51 +0,0 @@
-<html>
- <head>
- <title>Synced notifications</title>
-
- <script type="text/javascript">
- // Creates link (appended to the bottom of the page body) to trigger a
- // synced notifications. The link's title will be |title| and
- // |serialized_notification| is the ASCII-serialized version of the
- // CoalescedSyncedNotification to be triggered.
- function appendNotificationLink(title, serialized_notification) {
- var link = document.createElement('a');
- link.innerHTML = title;
- link.setAttribute('target', '_blank');
- link.setAttribute('href', 'triggersyncednotification?' +
- 'serialized_notification=' +
- encodeURIComponent(serialized_notification));
- document.body.appendChild(link);
- }
- </script>
- </head>
-
- <body>
- <h1>Synced notifications</h1>
-
- <h2>Step 0: Sign in to the browser and set up Sync</h2>
-
- <h2>Step 1: Click this link (only required once per server lifetime)</h2>
-
- <a href="/customizeclientcommand?sessions_commit_delay_seconds=0">
- Make notifications triggering instant</a>
-
- <h2>Step 2: Ctrl-Click the links below to trigger synced notifications</h2>
-
- <script type="text/javascript">
- appendNotificationLink('Simple notification',
- 'key: \"foo\"\n' +
- 'priority: 2\n' +
- 'read_state: 1\n' +
- 'render_info {\n' +
- ' collapsed_info {\n' +
- ' creation_timestamp_usec: 42\n' +
- ' simple_collapsed_layout {\n' +
- ' annotation: \"Space Needle, 12:00 pm\"\n' +
- ' description: \"Space Needle, 12:00 pm\"\n' +
- ' heading: \"New appointment\"\n' +
- ' }\n' +
- ' }\n' +
- '}');
- </script>
- </body>
-</html>
diff --git a/chromium/sync/tools/testserver/xmppserver.py b/chromium/sync/tools/testserver/xmppserver.py
deleted file mode 100644
index 3f7c7d05a76..00000000000
--- a/chromium/sync/tools/testserver/xmppserver.py
+++ /dev/null
@@ -1,603 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""A bare-bones and non-compliant XMPP server.
-
-Just enough of the protocol is implemented to get it to work with
-Chrome's sync notification system.
-"""
-
-import asynchat
-import asyncore
-import base64
-import re
-import socket
-from xml.dom import minidom
-
-# pychecker complains about the use of fileno(), which is implemented
-# by asyncore by forwarding to an internal object via __getattr__.
-__pychecker__ = 'no-classattr'
-
-
-class Error(Exception):
- """Error class for this module."""
- pass
-
-
-class UnexpectedXml(Error):
- """Raised when an unexpected XML element has been encountered."""
-
- def __init__(self, xml_element):
- xml_text = xml_element.toxml()
- Error.__init__(self, 'Unexpected XML element', xml_text)
-
-
-def ParseXml(xml_string):
- """Parses the given string as XML and returns a minidom element
- object.
- """
- dom = minidom.parseString(xml_string)
-
- # minidom handles xmlns specially, but there's a bug where it sets
- # the attribute value to None, which causes toxml() or toprettyxml()
- # to break.
- def FixMinidomXmlnsBug(xml_element):
- if xml_element.getAttribute('xmlns') is None:
- xml_element.setAttribute('xmlns', '')
-
- def ApplyToAllDescendantElements(xml_element, fn):
- fn(xml_element)
- for node in xml_element.childNodes:
- if node.nodeType == node.ELEMENT_NODE:
- ApplyToAllDescendantElements(node, fn)
-
- root = dom.documentElement
- ApplyToAllDescendantElements(root, FixMinidomXmlnsBug)
- return root
-
-
-def CloneXml(xml):
- """Returns a deep copy of the given XML element.
-
- Args:
- xml: The XML element, which should be something returned from
- ParseXml() (i.e., a root element).
- """
- return xml.ownerDocument.cloneNode(True).documentElement
-
-
-class StanzaParser(object):
- """A hacky incremental XML parser.
-
- StanzaParser consumes data incrementally via FeedString() and feeds
- its delegate complete parsed stanzas (i.e., XML documents) via
- FeedStanza(). Any stanzas passed to FeedStanza() are unlinked after
- the callback is done.
-
- Use like so:
-
- class MyClass(object):
- ...
- def __init__(self, ...):
- ...
- self._parser = StanzaParser(self)
- ...
-
- def SomeFunction(self, ...):
- ...
- self._parser.FeedString(some_data)
- ...
-
- def FeedStanza(self, stanza):
- ...
- print stanza.toprettyxml()
- ...
- """
-
- # NOTE(akalin): The following regexps are naive, but necessary since
- # none of the existing Python 2.4/2.5 XML libraries support
- # incremental parsing. This works well enough for our purposes.
- #
- # The regexps below assume that any present XML element starts at
- # the beginning of the string, but there may be trailing whitespace.
-
- # Matches an opening stream tag (e.g., '<stream:stream foo="bar">')
- # (assumes that the stream XML namespace is defined in the tag).
- _stream_re = re.compile(r'^(<stream:stream [^>]*>)\s*')
-
- # Matches an empty element tag (e.g., '<foo bar="baz"/>').
- _empty_element_re = re.compile(r'^(<[^>]*/>)\s*')
-
- # Matches a non-empty element (e.g., '<foo bar="baz">quux</foo>').
- # Does *not* handle nested elements.
- _non_empty_element_re = re.compile(r'^(<([^ >]*)[^>]*>.*?</\2>)\s*')
-
- # The closing tag for a stream tag. We have to insert this
- # ourselves since all XML stanzas are children of the stream tag,
- # which is never closed until the connection is closed.
- _stream_suffix = '</stream:stream>'
-
- def __init__(self, delegate):
- self._buffer = ''
- self._delegate = delegate
-
- def FeedString(self, data):
- """Consumes the given string data, possibly feeding one or more
- stanzas to the delegate.
- """
- self._buffer += data
- while (self._ProcessBuffer(self._stream_re, self._stream_suffix) or
- self._ProcessBuffer(self._empty_element_re) or
- self._ProcessBuffer(self._non_empty_element_re)):
- pass
-
- def _ProcessBuffer(self, regexp, xml_suffix=''):
- """If the buffer matches the given regexp, removes the match from
- the buffer, appends the given suffix, parses it, and feeds it to
- the delegate.
-
- Returns:
- Whether or not the buffer matched the given regexp.
- """
- results = regexp.match(self._buffer)
- if not results:
- return False
- xml_text = self._buffer[:results.end()] + xml_suffix
- self._buffer = self._buffer[results.end():]
- stanza = ParseXml(xml_text)
- self._delegate.FeedStanza(stanza)
- # Needed because stanza may have cycles.
- stanza.unlink()
- return True
-
-
-class Jid(object):
- """Simple struct for an XMPP jid (essentially an e-mail address with
- an optional resource string).
- """
-
- def __init__(self, username, domain, resource=''):
- self.username = username
- self.domain = domain
- self.resource = resource
-
- def __str__(self):
- jid_str = "%s@%s" % (self.username, self.domain)
- if self.resource:
- jid_str += '/' + self.resource
- return jid_str
-
- def GetBareJid(self):
- return Jid(self.username, self.domain)
-
-
-class IdGenerator(object):
- """Simple class to generate unique IDs for XMPP messages."""
-
- def __init__(self, prefix):
- self._prefix = prefix
- self._id = 0
-
- def GetNextId(self):
- next_id = "%s.%s" % (self._prefix, self._id)
- self._id += 1
- return next_id
-
-
-class HandshakeTask(object):
- """Class to handle the initial handshake with a connected XMPP
- client.
- """
-
- # The handshake states in order.
- (_INITIAL_STREAM_NEEDED,
- _AUTH_NEEDED,
- _AUTH_STREAM_NEEDED,
- _BIND_NEEDED,
- _SESSION_NEEDED,
- _FINISHED) = range(6)
-
- # Used when in the _INITIAL_STREAM_NEEDED and _AUTH_STREAM_NEEDED
- # states. Not an XML object as it's only the opening tag.
- #
- # The from and id attributes are filled in later.
- _STREAM_DATA = (
- '<stream:stream from="%s" id="%s" '
- 'version="1.0" xmlns:stream="http://etherx.jabber.org/streams" '
- 'xmlns="jabber:client">')
-
- # Used when in the _INITIAL_STREAM_NEEDED state.
- _AUTH_STANZA = ParseXml(
- '<stream:features xmlns:stream="http://etherx.jabber.org/streams">'
- ' <mechanisms xmlns="urn:ietf:params:xml:ns:xmpp-sasl">'
- ' <mechanism>PLAIN</mechanism>'
- ' <mechanism>X-GOOGLE-TOKEN</mechanism>'
- ' <mechanism>X-OAUTH2</mechanism>'
- ' </mechanisms>'
- '</stream:features>')
-
- # Used when in the _AUTH_NEEDED state.
- _AUTH_SUCCESS_STANZA = ParseXml(
- '<success xmlns="urn:ietf:params:xml:ns:xmpp-sasl"/>')
-
- # Used when in the _AUTH_NEEDED state.
- _AUTH_FAILURE_STANZA = ParseXml(
- '<failure xmlns="urn:ietf:params:xml:ns:xmpp-sasl"/>')
-
- # Used when in the _AUTH_STREAM_NEEDED state.
- _BIND_STANZA = ParseXml(
- '<stream:features xmlns:stream="http://etherx.jabber.org/streams">'
- ' <bind xmlns="urn:ietf:params:xml:ns:xmpp-bind"/>'
- ' <session xmlns="urn:ietf:params:xml:ns:xmpp-session"/>'
- '</stream:features>')
-
- # Used when in the _BIND_NEEDED state.
- #
- # The id and jid attributes are filled in later.
- _BIND_RESULT_STANZA = ParseXml(
- '<iq id="" type="result">'
- ' <bind xmlns="urn:ietf:params:xml:ns:xmpp-bind">'
- ' <jid/>'
- ' </bind>'
- '</iq>')
-
- # Used when in the _SESSION_NEEDED state.
- #
- # The id attribute is filled in later.
- _IQ_RESPONSE_STANZA = ParseXml('<iq id="" type="result"/>')
-
- def __init__(self, connection, resource_prefix, authenticated):
- self._connection = connection
- self._id_generator = IdGenerator(resource_prefix)
- self._username = ''
- self._domain = ''
- self._jid = None
- self._authenticated = authenticated
- self._resource_prefix = resource_prefix
- self._state = self._INITIAL_STREAM_NEEDED
-
- def FeedStanza(self, stanza):
- """Inspects the given stanza and changes the handshake state if needed.
-
- Called when a stanza is received from the client. Inspects the
- stanza to make sure it has the expected attributes given the
- current state, advances the state if needed, and sends a reply to
- the client if needed.
- """
- def ExpectStanza(stanza, name):
- if stanza.tagName != name:
- raise UnexpectedXml(stanza)
-
- def ExpectIq(stanza, type, name):
- ExpectStanza(stanza, 'iq')
- if (stanza.getAttribute('type') != type or
- stanza.firstChild.tagName != name):
- raise UnexpectedXml(stanza)
-
- def GetStanzaId(stanza):
- return stanza.getAttribute('id')
-
- def HandleStream(stanza):
- ExpectStanza(stanza, 'stream:stream')
- domain = stanza.getAttribute('to')
- if domain:
- self._domain = domain
- SendStreamData()
-
- def SendStreamData():
- next_id = self._id_generator.GetNextId()
- stream_data = self._STREAM_DATA % (self._domain, next_id)
- self._connection.SendData(stream_data)
-
- def GetUserDomain(stanza):
- encoded_username_password = stanza.firstChild.data
- username_password = base64.b64decode(encoded_username_password)
- (_, username_domain, _) = username_password.split('\0')
- # The domain may be omitted.
- #
- # If we were using python 2.5, we'd be able to do:
- #
- # username, _, domain = username_domain.partition('@')
- # if not domain:
- # domain = self._domain
- at_pos = username_domain.find('@')
- if at_pos != -1:
- username = username_domain[:at_pos]
- domain = username_domain[at_pos+1:]
- else:
- username = username_domain
- domain = self._domain
- return (username, domain)
-
- def Finish():
- self._state = self._FINISHED
- self._connection.HandshakeDone(self._jid)
-
- if self._state == self._INITIAL_STREAM_NEEDED:
- HandleStream(stanza)
- self._connection.SendStanza(self._AUTH_STANZA, False)
- self._state = self._AUTH_NEEDED
-
- elif self._state == self._AUTH_NEEDED:
- ExpectStanza(stanza, 'auth')
- (self._username, self._domain) = GetUserDomain(stanza)
- if self._authenticated:
- self._connection.SendStanza(self._AUTH_SUCCESS_STANZA, False)
- self._state = self._AUTH_STREAM_NEEDED
- else:
- self._connection.SendStanza(self._AUTH_FAILURE_STANZA, False)
- Finish()
-
- elif self._state == self._AUTH_STREAM_NEEDED:
- HandleStream(stanza)
- self._connection.SendStanza(self._BIND_STANZA, False)
- self._state = self._BIND_NEEDED
-
- elif self._state == self._BIND_NEEDED:
- ExpectIq(stanza, 'set', 'bind')
- stanza_id = GetStanzaId(stanza)
- resource_element = stanza.getElementsByTagName('resource')[0]
- resource = resource_element.firstChild.data
- full_resource = '%s.%s' % (self._resource_prefix, resource)
- response = CloneXml(self._BIND_RESULT_STANZA)
- response.setAttribute('id', stanza_id)
- self._jid = Jid(self._username, self._domain, full_resource)
- jid_text = response.parentNode.createTextNode(str(self._jid))
- response.getElementsByTagName('jid')[0].appendChild(jid_text)
- self._connection.SendStanza(response)
- self._state = self._SESSION_NEEDED
-
- elif self._state == self._SESSION_NEEDED:
- ExpectIq(stanza, 'set', 'session')
- stanza_id = GetStanzaId(stanza)
- xml = CloneXml(self._IQ_RESPONSE_STANZA)
- xml.setAttribute('id', stanza_id)
- self._connection.SendStanza(xml)
- Finish()
-
-
-def AddrString(addr):
- return '%s:%d' % addr
-
-
-class XmppConnection(asynchat.async_chat):
- """A single XMPP client connection.
-
- This class handles the connection to a single XMPP client (via a
- socket). It does the XMPP handshake and also implements the (old)
- Google notification protocol.
- """
-
- # Used for acknowledgements to the client.
- #
- # The from and id attributes are filled in later.
- _IQ_RESPONSE_STANZA = ParseXml('<iq from="" id="" type="result"/>')
-
- def __init__(self, sock, socket_map, delegate, addr, authenticated):
- """Starts up the xmpp connection.
-
- Args:
- sock: The socket to the client.
- socket_map: A map from sockets to their owning objects.
- delegate: The delegate, which is notified when the XMPP
- handshake is successful, when the connection is closed, and
- when a notification has to be broadcast.
- addr: The host/port of the client.
- """
- # We do this because in versions of python < 2.6,
- # async_chat.__init__ doesn't take a map argument nor pass it to
- # dispatcher.__init__. We rely on the fact that
- # async_chat.__init__ calls dispatcher.__init__ as the last thing
- # it does, and that calling dispatcher.__init__ with socket=None
- # and map=None is essentially a no-op.
- asynchat.async_chat.__init__(self)
- asyncore.dispatcher.__init__(self, sock, socket_map)
-
- self.set_terminator(None)
-
- self._delegate = delegate
- self._parser = StanzaParser(self)
- self._jid = None
-
- self._addr = addr
- addr_str = AddrString(self._addr)
- self._handshake_task = HandshakeTask(self, addr_str, authenticated)
- print 'Starting connection to %s' % self
-
- def __str__(self):
- if self._jid:
- return str(self._jid)
- else:
- return AddrString(self._addr)
-
- # async_chat implementation.
-
- def collect_incoming_data(self, data):
- self._parser.FeedString(data)
-
- # This is only here to make pychecker happy.
- def found_terminator(self):
- asynchat.async_chat.found_terminator(self)
-
- def close(self):
- print "Closing connection to %s" % self
- self._delegate.OnXmppConnectionClosed(self)
- asynchat.async_chat.close(self)
-
- # Called by self._parser.FeedString().
- def FeedStanza(self, stanza):
- if self._handshake_task:
- self._handshake_task.FeedStanza(stanza)
- elif stanza.tagName == 'iq' and stanza.getAttribute('type') == 'result':
- # Ignore all client acks.
- pass
- elif (stanza.firstChild and
- stanza.firstChild.namespaceURI == 'google:push'):
- self._HandlePushCommand(stanza)
- else:
- raise UnexpectedXml(stanza)
-
- # Called by self._handshake_task.
- def HandshakeDone(self, jid):
- if jid:
- self._jid = jid
- self._handshake_task = None
- self._delegate.OnXmppHandshakeDone(self)
- print "Handshake done for %s" % self
- else:
- print "Handshake failed for %s" % self
- self.close()
-
- def _HandlePushCommand(self, stanza):
- if stanza.tagName == 'iq' and stanza.firstChild.tagName == 'subscribe':
- # Subscription request.
- self._SendIqResponseStanza(stanza)
- elif stanza.tagName == 'message' and stanza.firstChild.tagName == 'push':
- # Send notification request.
- self._delegate.ForwardNotification(self, stanza)
- else:
- raise UnexpectedXml(command_xml)
-
- def _SendIqResponseStanza(self, iq):
- stanza = CloneXml(self._IQ_RESPONSE_STANZA)
- stanza.setAttribute('from', str(self._jid.GetBareJid()))
- stanza.setAttribute('id', iq.getAttribute('id'))
- self.SendStanza(stanza)
-
- def SendStanza(self, stanza, unlink=True):
- """Sends a stanza to the client.
-
- Args:
- stanza: The stanza to send.
- unlink: Whether to unlink stanza after sending it. (Pass in
- False if stanza is a constant.)
- """
- self.SendData(stanza.toxml())
- if unlink:
- stanza.unlink()
-
- def SendData(self, data):
- """Sends raw data to the client.
- """
- # We explicitly encode to ascii as that is what the client expects
- # (some minidom library functions return unicode strings).
- self.push(data.encode('ascii'))
-
- def ForwardNotification(self, notification_stanza):
- """Forwards a notification to the client."""
- notification_stanza.setAttribute('from', str(self._jid.GetBareJid()))
- notification_stanza.setAttribute('to', str(self._jid))
- self.SendStanza(notification_stanza, False)
-
-
-class XmppServer(asyncore.dispatcher):
- """The main XMPP server class.
-
- The XMPP server starts accepting connections on the given address
- and spawns off XmppConnection objects for each one.
-
- Use like so:
-
- socket_map = {}
- xmpp_server = xmppserver.XmppServer(socket_map, ('127.0.0.1', 5222))
- asyncore.loop(30.0, False, socket_map)
- """
-
- # Used when sending a notification.
- _NOTIFICATION_STANZA = ParseXml(
- '<message>'
- ' <push xmlns="google:push">'
- ' <data/>'
- ' </push>'
- '</message>')
-
- def __init__(self, socket_map, addr):
- asyncore.dispatcher.__init__(self, None, socket_map)
- self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
- self.set_reuse_addr()
- self.bind(addr)
- self.listen(5)
- self._socket_map = socket_map
- self._connections = set()
- self._handshake_done_connections = set()
- self._notifications_enabled = True
- self._authenticated = True
-
- def handle_accept(self):
- (sock, addr) = self.accept()
- xmpp_connection = XmppConnection(
- sock, self._socket_map, self, addr, self._authenticated)
- self._connections.add(xmpp_connection)
- # Return the new XmppConnection for testing.
- return xmpp_connection
-
- def close(self):
- # A copy is necessary since calling close on each connection
- # removes it from self._connections.
- for connection in self._connections.copy():
- connection.close()
- asyncore.dispatcher.close(self)
-
- def EnableNotifications(self):
- self._notifications_enabled = True
-
- def DisableNotifications(self):
- self._notifications_enabled = False
-
- def MakeNotification(self, channel, data):
- """Makes a notification from the given channel and encoded data.
-
- Args:
- channel: The channel on which to send the notification.
- data: The notification payload.
- """
- notification_stanza = CloneXml(self._NOTIFICATION_STANZA)
- push_element = notification_stanza.getElementsByTagName('push')[0]
- push_element.setAttribute('channel', channel)
- data_element = push_element.getElementsByTagName('data')[0]
- encoded_data = base64.b64encode(data)
- data_text = notification_stanza.parentNode.createTextNode(encoded_data)
- data_element.appendChild(data_text)
- return notification_stanza
-
- def SendNotification(self, channel, data):
- """Sends a notification to all connections.
-
- Args:
- channel: The channel on which to send the notification.
- data: The notification payload.
- """
- notification_stanza = self.MakeNotification(channel, data)
- self.ForwardNotification(None, notification_stanza)
- notification_stanza.unlink()
-
- def SetAuthenticated(self, auth_valid):
- self._authenticated = auth_valid
-
- # We check authentication only when establishing new connections. We close
- # all existing connections here to make sure previously connected clients
- # pick up on the change. It's a hack, but it works well enough for our
- # purposes.
- if not self._authenticated:
- for connection in self._handshake_done_connections:
- connection.close()
-
- def GetAuthenticated(self):
- return self._authenticated
-
- # XmppConnection delegate methods.
- def OnXmppHandshakeDone(self, xmpp_connection):
- self._handshake_done_connections.add(xmpp_connection)
-
- def OnXmppConnectionClosed(self, xmpp_connection):
- self._connections.discard(xmpp_connection)
- self._handshake_done_connections.discard(xmpp_connection)
-
- def ForwardNotification(self, unused_xmpp_connection, notification_stanza):
- if self._notifications_enabled:
- for connection in self._handshake_done_connections:
- print 'Sending notification to %s' % connection
- connection.ForwardNotification(notification_stanza)
- else:
- print 'Notifications disabled; dropping notification'
diff --git a/chromium/sync/tools/testserver/xmppserver_test.py b/chromium/sync/tools/testserver/xmppserver_test.py
deleted file mode 100755
index 1a539d1af99..00000000000
--- a/chromium/sync/tools/testserver/xmppserver_test.py
+++ /dev/null
@@ -1,421 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Tests exercising the various classes in xmppserver.py."""
-
-import unittest
-
-import base64
-import xmppserver
-
-class XmlUtilsTest(unittest.TestCase):
-
- def testParseXml(self):
- xml_text = """<foo xmlns=""><bar xmlns=""><baz/></bar></foo>"""
- xml = xmppserver.ParseXml(xml_text)
- self.assertEqual(xml.toxml(), xml_text)
-
- def testCloneXml(self):
- xml = xmppserver.ParseXml('<foo/>')
- xml_clone = xmppserver.CloneXml(xml)
- xml_clone.setAttribute('bar', 'baz')
- self.assertEqual(xml, xml)
- self.assertEqual(xml_clone, xml_clone)
- self.assertNotEqual(xml, xml_clone)
-
- def testCloneXmlUnlink(self):
- xml_text = '<foo/>'
- xml = xmppserver.ParseXml(xml_text)
- xml_clone = xmppserver.CloneXml(xml)
- xml.unlink()
- self.assertEqual(xml.parentNode, None)
- self.assertNotEqual(xml_clone.parentNode, None)
- self.assertEqual(xml_clone.toxml(), xml_text)
-
-class StanzaParserTest(unittest.TestCase):
-
- def setUp(self):
- self.stanzas = []
-
- def FeedStanza(self, stanza):
- # We can't append stanza directly because it is unlinked after
- # this callback.
- self.stanzas.append(stanza.toxml())
-
- def testBasic(self):
- parser = xmppserver.StanzaParser(self)
- parser.FeedString('<foo')
- self.assertEqual(len(self.stanzas), 0)
- parser.FeedString('/><bar></bar>')
- self.assertEqual(self.stanzas[0], '<foo/>')
- self.assertEqual(self.stanzas[1], '<bar/>')
-
- def testStream(self):
- parser = xmppserver.StanzaParser(self)
- parser.FeedString('<stream')
- self.assertEqual(len(self.stanzas), 0)
- parser.FeedString(':stream foo="bar" xmlns:stream="baz">')
- self.assertEqual(self.stanzas[0],
- '<stream:stream foo="bar" xmlns:stream="baz"/>')
-
- def testNested(self):
- parser = xmppserver.StanzaParser(self)
- parser.FeedString('<foo')
- self.assertEqual(len(self.stanzas), 0)
- parser.FeedString(' bar="baz"')
- parser.FeedString('><baz/><blah>meh</blah></foo>')
- self.assertEqual(self.stanzas[0],
- '<foo bar="baz"><baz/><blah>meh</blah></foo>')
-
-
-class JidTest(unittest.TestCase):
-
- def testBasic(self):
- jid = xmppserver.Jid('foo', 'bar.com')
- self.assertEqual(str(jid), 'foo@bar.com')
-
- def testResource(self):
- jid = xmppserver.Jid('foo', 'bar.com', 'resource')
- self.assertEqual(str(jid), 'foo@bar.com/resource')
-
- def testGetBareJid(self):
- jid = xmppserver.Jid('foo', 'bar.com', 'resource')
- self.assertEqual(str(jid.GetBareJid()), 'foo@bar.com')
-
-
-class IdGeneratorTest(unittest.TestCase):
-
- def testBasic(self):
- id_generator = xmppserver.IdGenerator('foo')
- for i in xrange(0, 100):
- self.assertEqual('foo.%d' % i, id_generator.GetNextId())
-
-
-class HandshakeTaskTest(unittest.TestCase):
-
- def setUp(self):
- self.Reset()
-
- def Reset(self):
- self.data_received = 0
- self.handshake_done = False
- self.jid = None
-
- def SendData(self, _):
- self.data_received += 1
-
- def SendStanza(self, _, unused=True):
- self.data_received += 1
-
- def HandshakeDone(self, jid):
- self.handshake_done = True
- self.jid = jid
-
- def DoHandshake(self, resource_prefix, resource, username,
- initial_stream_domain, auth_domain, auth_stream_domain):
- self.Reset()
- handshake_task = (
- xmppserver.HandshakeTask(self, resource_prefix, True))
- stream_xml = xmppserver.ParseXml('<stream:stream xmlns:stream="foo"/>')
- stream_xml.setAttribute('to', initial_stream_domain)
- self.assertEqual(self.data_received, 0)
- handshake_task.FeedStanza(stream_xml)
- self.assertEqual(self.data_received, 2)
-
- if auth_domain:
- username_domain = '%s@%s' % (username, auth_domain)
- else:
- username_domain = username
- auth_string = base64.b64encode('\0%s\0bar' % username_domain)
- auth_xml = xmppserver.ParseXml('<auth>%s</auth>'% auth_string)
- handshake_task.FeedStanza(auth_xml)
- self.assertEqual(self.data_received, 3)
-
- stream_xml = xmppserver.ParseXml('<stream:stream xmlns:stream="foo"/>')
- stream_xml.setAttribute('to', auth_stream_domain)
- handshake_task.FeedStanza(stream_xml)
- self.assertEqual(self.data_received, 5)
-
- bind_xml = xmppserver.ParseXml(
- '<iq type="set"><bind><resource>%s</resource></bind></iq>' % resource)
- handshake_task.FeedStanza(bind_xml)
- self.assertEqual(self.data_received, 6)
-
- self.assertFalse(self.handshake_done)
-
- session_xml = xmppserver.ParseXml(
- '<iq type="set"><session></session></iq>')
- handshake_task.FeedStanza(session_xml)
- self.assertEqual(self.data_received, 7)
-
- self.assertTrue(self.handshake_done)
-
- self.assertEqual(self.jid.username, username)
- self.assertEqual(self.jid.domain,
- auth_stream_domain or auth_domain or
- initial_stream_domain)
- self.assertEqual(self.jid.resource,
- '%s.%s' % (resource_prefix, resource))
-
- handshake_task.FeedStanza('<ignored/>')
- self.assertEqual(self.data_received, 7)
-
- def DoHandshakeUnauthenticated(self, resource_prefix, resource, username,
- initial_stream_domain):
- self.Reset()
- handshake_task = (
- xmppserver.HandshakeTask(self, resource_prefix, False))
- stream_xml = xmppserver.ParseXml('<stream:stream xmlns:stream="foo"/>')
- stream_xml.setAttribute('to', initial_stream_domain)
- self.assertEqual(self.data_received, 0)
- handshake_task.FeedStanza(stream_xml)
- self.assertEqual(self.data_received, 2)
-
- self.assertFalse(self.handshake_done)
-
- auth_string = base64.b64encode('\0%s\0bar' % username)
- auth_xml = xmppserver.ParseXml('<auth>%s</auth>'% auth_string)
- handshake_task.FeedStanza(auth_xml)
- self.assertEqual(self.data_received, 3)
-
- self.assertTrue(self.handshake_done)
-
- self.assertEqual(self.jid, None)
-
- handshake_task.FeedStanza('<ignored/>')
- self.assertEqual(self.data_received, 3)
-
- def testBasic(self):
- self.DoHandshake('resource_prefix', 'resource',
- 'foo', 'bar.com', 'baz.com', 'quux.com')
-
- def testDomainBehavior(self):
- self.DoHandshake('resource_prefix', 'resource',
- 'foo', 'bar.com', 'baz.com', 'quux.com')
- self.DoHandshake('resource_prefix', 'resource',
- 'foo', 'bar.com', 'baz.com', '')
- self.DoHandshake('resource_prefix', 'resource',
- 'foo', 'bar.com', '', '')
- self.DoHandshake('resource_prefix', 'resource',
- 'foo', '', '', '')
-
- def testBasicUnauthenticated(self):
- self.DoHandshakeUnauthenticated('resource_prefix', 'resource',
- 'foo', 'bar.com')
-
-
-class FakeSocket(object):
- """A fake socket object used for testing.
- """
-
- def __init__(self):
- self._sent_data = []
-
- def GetSentData(self):
- return self._sent_data
-
- # socket-like methods.
- def fileno(self):
- return 0
-
- def setblocking(self, int):
- pass
-
- def getpeername(self):
- return ('', 0)
-
- def send(self, data):
- self._sent_data.append(data)
- pass
-
- def close(self):
- pass
-
-
-class XmppConnectionTest(unittest.TestCase):
-
- def setUp(self):
- self.connections = set()
- self.fake_socket = FakeSocket()
-
- # XmppConnection delegate methods.
- def OnXmppHandshakeDone(self, xmpp_connection):
- self.connections.add(xmpp_connection)
-
- def OnXmppConnectionClosed(self, xmpp_connection):
- self.connections.discard(xmpp_connection)
-
- def ForwardNotification(self, unused_xmpp_connection, notification_stanza):
- for connection in self.connections:
- connection.ForwardNotification(notification_stanza)
-
- def testBasic(self):
- socket_map = {}
- xmpp_connection = xmppserver.XmppConnection(
- self.fake_socket, socket_map, self, ('', 0), True)
- self.assertEqual(len(socket_map), 1)
- self.assertEqual(len(self.connections), 0)
- xmpp_connection.HandshakeDone(xmppserver.Jid('foo', 'bar'))
- self.assertEqual(len(socket_map), 1)
- self.assertEqual(len(self.connections), 1)
-
- sent_data = self.fake_socket.GetSentData()
-
- # Test subscription request.
- self.assertEqual(len(sent_data), 0)
- xmpp_connection.collect_incoming_data(
- '<iq><subscribe xmlns="google:push"></subscribe></iq>')
- self.assertEqual(len(sent_data), 1)
-
- # Test acks.
- xmpp_connection.collect_incoming_data('<iq type="result"/>')
- self.assertEqual(len(sent_data), 1)
-
- # Test notification.
- xmpp_connection.collect_incoming_data(
- '<message><push xmlns="google:push"/></message>')
- self.assertEqual(len(sent_data), 2)
-
- # Test unexpected stanza.
- def SendUnexpectedStanza():
- xmpp_connection.collect_incoming_data('<foo/>')
- self.assertRaises(xmppserver.UnexpectedXml, SendUnexpectedStanza)
-
- # Test unexpected notifier command.
- def SendUnexpectedNotifierCommand():
- xmpp_connection.collect_incoming_data(
- '<iq><foo xmlns="google:notifier"/></iq>')
- self.assertRaises(xmppserver.UnexpectedXml,
- SendUnexpectedNotifierCommand)
-
- # Test close.
- xmpp_connection.close()
- self.assertEqual(len(socket_map), 0)
- self.assertEqual(len(self.connections), 0)
-
- def testBasicUnauthenticated(self):
- socket_map = {}
- xmpp_connection = xmppserver.XmppConnection(
- self.fake_socket, socket_map, self, ('', 0), False)
- self.assertEqual(len(socket_map), 1)
- self.assertEqual(len(self.connections), 0)
- xmpp_connection.HandshakeDone(None)
- self.assertEqual(len(socket_map), 0)
- self.assertEqual(len(self.connections), 0)
-
- # Test unexpected stanza.
- def SendUnexpectedStanza():
- xmpp_connection.collect_incoming_data('<foo/>')
- self.assertRaises(xmppserver.UnexpectedXml, SendUnexpectedStanza)
-
- # Test redundant close.
- xmpp_connection.close()
- self.assertEqual(len(socket_map), 0)
- self.assertEqual(len(self.connections), 0)
-
-
-class FakeXmppServer(xmppserver.XmppServer):
- """A fake XMPP server object used for testing.
- """
-
- def __init__(self):
- self._socket_map = {}
- self._fake_sockets = set()
- self._next_jid_suffix = 1
- xmppserver.XmppServer.__init__(self, self._socket_map, ('', 0))
-
- def GetSocketMap(self):
- return self._socket_map
-
- def GetFakeSockets(self):
- return self._fake_sockets
-
- def AddHandshakeCompletedConnection(self):
- """Creates a new XMPP connection and completes its handshake.
- """
- xmpp_connection = self.handle_accept()
- jid = xmppserver.Jid('user%s' % self._next_jid_suffix, 'domain.com')
- self._next_jid_suffix += 1
- xmpp_connection.HandshakeDone(jid)
-
- # XmppServer overrides.
- def accept(self):
- fake_socket = FakeSocket()
- self._fake_sockets.add(fake_socket)
- return (fake_socket, ('', 0))
-
- def close(self):
- self._fake_sockets.clear()
- xmppserver.XmppServer.close(self)
-
-
-class XmppServerTest(unittest.TestCase):
-
- def setUp(self):
- self.xmpp_server = FakeXmppServer()
-
- def AssertSentDataLength(self, expected_length):
- for fake_socket in self.xmpp_server.GetFakeSockets():
- self.assertEqual(len(fake_socket.GetSentData()), expected_length)
-
- def testBasic(self):
- socket_map = self.xmpp_server.GetSocketMap()
- self.assertEqual(len(socket_map), 1)
- self.xmpp_server.AddHandshakeCompletedConnection()
- self.assertEqual(len(socket_map), 2)
- self.xmpp_server.close()
- self.assertEqual(len(socket_map), 0)
-
- def testMakeNotification(self):
- notification = self.xmpp_server.MakeNotification('channel', 'data')
- expected_xml = (
- '<message>'
- ' <push channel="channel" xmlns="google:push">'
- ' <data>%s</data>'
- ' </push>'
- '</message>' % base64.b64encode('data'))
- self.assertEqual(notification.toxml(), expected_xml)
-
- def testSendNotification(self):
- # Add a few connections.
- for _ in xrange(0, 7):
- self.xmpp_server.AddHandshakeCompletedConnection()
-
- self.assertEqual(len(self.xmpp_server.GetFakeSockets()), 7)
-
- self.AssertSentDataLength(0)
- self.xmpp_server.SendNotification('channel', 'data')
- self.AssertSentDataLength(1)
-
- def testEnableDisableNotifications(self):
- # Add a few connections.
- for _ in xrange(0, 5):
- self.xmpp_server.AddHandshakeCompletedConnection()
-
- self.assertEqual(len(self.xmpp_server.GetFakeSockets()), 5)
-
- self.AssertSentDataLength(0)
- self.xmpp_server.SendNotification('channel', 'data')
- self.AssertSentDataLength(1)
-
- self.xmpp_server.EnableNotifications()
- self.xmpp_server.SendNotification('channel', 'data')
- self.AssertSentDataLength(2)
-
- self.xmpp_server.DisableNotifications()
- self.xmpp_server.SendNotification('channel', 'data')
- self.AssertSentDataLength(2)
-
- self.xmpp_server.DisableNotifications()
- self.xmpp_server.SendNotification('channel', 'data')
- self.AssertSentDataLength(2)
-
- self.xmpp_server.EnableNotifications()
- self.xmpp_server.SendNotification('channel', 'data')
- self.AssertSentDataLength(3)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/chromium/sync/util/DEPS b/chromium/sync/util/DEPS
deleted file mode 100644
index d311654f50c..00000000000
--- a/chromium/sync/util/DEPS
+++ /dev/null
@@ -1,13 +0,0 @@
-include_rules = [
- "+chromeos",
- "+crypto",
- "+sync/base",
- "+sync/internal_api/public/base",
- "+sync/internal_api/public/util",
- "+sync/protocol",
- "+sync/test/fake_encryptor.h",
-
- # TODO(zea): remove this once we don't need the cryptographer to get the set
- # of encrypted types.
- "+sync/syncable/nigori_handler.h"
-]
diff --git a/chromium/sync/util/cryptographer.cc b/chromium/sync/util/cryptographer.cc
deleted file mode 100644
index 29f378125a7..00000000000
--- a/chromium/sync/util/cryptographer.cc
+++ /dev/null
@@ -1,361 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/util/cryptographer.h"
-
-#include <algorithm>
-
-#include "base/base64.h"
-#include "base/basictypes.h"
-#include "base/logging.h"
-#include "sync/protocol/nigori_specifics.pb.h"
-#include "sync/util/encryptor.h"
-
-namespace syncer {
-
-const char kNigoriTag[] = "google_chrome_nigori";
-
-// We name a particular Nigori instance (ie. a triplet consisting of a hostname,
-// a username, and a password) by calling Permute on this string. Since the
-// output of Permute is always the same for a given triplet, clients will always
-// assign the same name to a particular triplet.
-const char kNigoriKeyName[] = "nigori-key";
-
-Cryptographer::Cryptographer(Encryptor* encryptor)
- : encryptor_(encryptor) {
- DCHECK(encryptor);
-}
-
-Cryptographer::~Cryptographer() {}
-
-
-void Cryptographer::Bootstrap(const std::string& restored_bootstrap_token) {
- if (is_initialized()) {
- NOTREACHED();
- return;
- }
-
- std::string serialized_nigori_key =
- UnpackBootstrapToken(restored_bootstrap_token);
- if (serialized_nigori_key.empty())
- return;
- ImportNigoriKey(serialized_nigori_key);
-}
-
-bool Cryptographer::CanDecrypt(const sync_pb::EncryptedData& data) const {
- return nigoris_.end() != nigoris_.find(data.key_name());
-}
-
-bool Cryptographer::CanDecryptUsingDefaultKey(
- const sync_pb::EncryptedData& data) const {
- return !default_nigori_name_.empty() &&
- data.key_name() == default_nigori_name_;
-}
-
-bool Cryptographer::Encrypt(
- const ::google::protobuf::MessageLite& message,
- sync_pb::EncryptedData* encrypted) const {
- DCHECK(encrypted);
- if (default_nigori_name_.empty()) {
- LOG(ERROR) << "Cryptographer not ready, failed to encrypt.";
- return false;
- }
-
- std::string serialized;
- if (!message.SerializeToString(&serialized)) {
- LOG(ERROR) << "Message is invalid/missing a required field.";
- return false;
- }
-
- return EncryptString(serialized, encrypted);
-}
-
-bool Cryptographer::EncryptString(
- const std::string& serialized,
- sync_pb::EncryptedData* encrypted) const {
- if (CanDecryptUsingDefaultKey(*encrypted)) {
- const std::string& original_serialized = DecryptToString(*encrypted);
- if (original_serialized == serialized) {
- DVLOG(2) << "Re-encryption unnecessary, encrypted data already matches.";
- return true;
- }
- }
-
- NigoriMap::const_iterator default_nigori =
- nigoris_.find(default_nigori_name_);
- if (default_nigori == nigoris_.end()) {
- LOG(ERROR) << "Corrupt default key.";
- return false;
- }
-
- encrypted->set_key_name(default_nigori_name_);
- if (!default_nigori->second->Encrypt(serialized,
- encrypted->mutable_blob())) {
- LOG(ERROR) << "Failed to encrypt data.";
- return false;
- }
- return true;
-}
-
-bool Cryptographer::Decrypt(const sync_pb::EncryptedData& encrypted,
- ::google::protobuf::MessageLite* message) const {
- DCHECK(message);
- std::string plaintext = DecryptToString(encrypted);
- return message->ParseFromString(plaintext);
-}
-
-std::string Cryptographer::DecryptToString(
- const sync_pb::EncryptedData& encrypted) const {
- NigoriMap::const_iterator it = nigoris_.find(encrypted.key_name());
- if (nigoris_.end() == it) {
- NOTREACHED() << "Cannot decrypt message";
- return std::string(); // Caller should have called CanDecrypt(encrypt).
- }
-
- std::string plaintext;
- if (!it->second->Decrypt(encrypted.blob(), &plaintext)) {
- return std::string();
- }
-
- return plaintext;
-}
-
-bool Cryptographer::GetKeys(sync_pb::EncryptedData* encrypted) const {
- DCHECK(encrypted);
- DCHECK(!nigoris_.empty());
-
- // Create a bag of all the Nigori parameters we know about.
- sync_pb::NigoriKeyBag bag;
- for (NigoriMap::const_iterator it = nigoris_.begin(); it != nigoris_.end();
- ++it) {
- const Nigori& nigori = *it->second;
- sync_pb::NigoriKey* key = bag.add_key();
- key->set_name(it->first);
- nigori.ExportKeys(key->mutable_user_key(),
- key->mutable_encryption_key(),
- key->mutable_mac_key());
- }
-
- // Encrypt the bag with the default Nigori.
- return Encrypt(bag, encrypted);
-}
-
-bool Cryptographer::AddKey(const KeyParams& params) {
- // Create the new Nigori and make it the default encryptor.
- scoped_ptr<Nigori> nigori(new Nigori);
- if (!nigori->InitByDerivation(params.hostname,
- params.username,
- params.password)) {
- NOTREACHED(); // Invalid username or password.
- return false;
- }
- return AddKeyImpl(nigori.Pass(), true);
-}
-
-bool Cryptographer::AddNonDefaultKey(const KeyParams& params) {
- DCHECK(is_initialized());
- // Create the new Nigori and add it to the keybag.
- scoped_ptr<Nigori> nigori(new Nigori);
- if (!nigori->InitByDerivation(params.hostname,
- params.username,
- params.password)) {
- NOTREACHED(); // Invalid username or password.
- return false;
- }
- return AddKeyImpl(nigori.Pass(), false);
-}
-
-bool Cryptographer::AddKeyFromBootstrapToken(
- const std::string restored_bootstrap_token) {
- // Create the new Nigori and make it the default encryptor.
- std::string serialized_nigori_key = UnpackBootstrapToken(
- restored_bootstrap_token);
- return ImportNigoriKey(serialized_nigori_key);
-}
-
-bool Cryptographer::AddKeyImpl(scoped_ptr<Nigori> initialized_nigori,
- bool set_as_default) {
- std::string name;
- if (!initialized_nigori->Permute(Nigori::Password, kNigoriKeyName, &name)) {
- NOTREACHED();
- return false;
- }
-
- nigoris_[name] = make_linked_ptr(initialized_nigori.release());
-
- // Check if the key we just added can decrypt the pending keys and add them
- // too if so.
- if (pending_keys_.get() && CanDecrypt(*pending_keys_)) {
- sync_pb::NigoriKeyBag pending_bag;
- Decrypt(*pending_keys_, &pending_bag);
- InstallKeyBag(pending_bag);
- SetDefaultKey(pending_keys_->key_name());
- pending_keys_.reset();
- }
-
- // The just-added key takes priority over the pending keys as default.
- if (set_as_default) SetDefaultKey(name);
- return true;
-}
-
-void Cryptographer::InstallKeys(const sync_pb::EncryptedData& encrypted) {
- DCHECK(CanDecrypt(encrypted));
-
- sync_pb::NigoriKeyBag bag;
- if (!Decrypt(encrypted, &bag))
- return;
- InstallKeyBag(bag);
-}
-
-void Cryptographer::SetDefaultKey(const std::string& key_name) {
- DCHECK(nigoris_.end() != nigoris_.find(key_name));
- default_nigori_name_ = key_name;
-}
-
-void Cryptographer::SetPendingKeys(const sync_pb::EncryptedData& encrypted) {
- DCHECK(!CanDecrypt(encrypted));
- DCHECK(!encrypted.blob().empty());
- pending_keys_.reset(new sync_pb::EncryptedData(encrypted));
-}
-
-const sync_pb::EncryptedData& Cryptographer::GetPendingKeys() const {
- DCHECK(has_pending_keys());
- return *(pending_keys_.get());
-}
-
-bool Cryptographer::DecryptPendingKeys(const KeyParams& params) {
- Nigori nigori;
- if (!nigori.InitByDerivation(params.hostname,
- params.username,
- params.password)) {
- NOTREACHED();
- return false;
- }
-
- std::string plaintext;
- if (!nigori.Decrypt(pending_keys_->blob(), &plaintext))
- return false;
-
- sync_pb::NigoriKeyBag bag;
- if (!bag.ParseFromString(plaintext)) {
- NOTREACHED();
- return false;
- }
- InstallKeyBag(bag);
- const std::string& new_default_key_name = pending_keys_->key_name();
- SetDefaultKey(new_default_key_name);
- pending_keys_.reset();
- return true;
-}
-
-bool Cryptographer::GetBootstrapToken(std::string* token) const {
- DCHECK(token);
- std::string unencrypted_token = GetDefaultNigoriKey();
- if (unencrypted_token.empty())
- return false;
-
- std::string encrypted_token;
- if (!encryptor_->EncryptString(unencrypted_token, &encrypted_token)) {
- NOTREACHED();
- return false;
- }
-
- base::Base64Encode(encrypted_token, token);
-
- return true;
-}
-
-std::string Cryptographer::UnpackBootstrapToken(
- const std::string& token) const {
- if (token.empty())
- return std::string();
-
- std::string encrypted_data;
- if (!base::Base64Decode(token, &encrypted_data)) {
- DLOG(WARNING) << "Could not decode token.";
- return std::string();
- }
-
- std::string unencrypted_token;
- if (!encryptor_->DecryptString(encrypted_data, &unencrypted_token)) {
- DLOG(WARNING) << "Decryption of bootstrap token failed.";
- return std::string();
- }
- return unencrypted_token;
-}
-
-void Cryptographer::InstallKeyBag(const sync_pb::NigoriKeyBag& bag) {
- int key_size = bag.key_size();
- for (int i = 0; i < key_size; ++i) {
- const sync_pb::NigoriKey key = bag.key(i);
- // Only use this key if we don't already know about it.
- if (nigoris_.end() == nigoris_.find(key.name())) {
- scoped_ptr<Nigori> new_nigori(new Nigori);
- if (!new_nigori->InitByImport(key.user_key(),
- key.encryption_key(),
- key.mac_key())) {
- NOTREACHED();
- continue;
- }
- nigoris_[key.name()] = make_linked_ptr(new_nigori.release());
- }
- }
-}
-
-bool Cryptographer::KeybagIsStale(
- const sync_pb::EncryptedData& encrypted_bag) const {
- if (!is_ready())
- return false;
- if (encrypted_bag.blob().empty())
- return true;
- if (!CanDecrypt(encrypted_bag))
- return false;
- if (!CanDecryptUsingDefaultKey(encrypted_bag))
- return true;
- sync_pb::NigoriKeyBag bag;
- if (!Decrypt(encrypted_bag, &bag)) {
- LOG(ERROR) << "Failed to decrypt keybag for stale check. "
- << "Assuming keybag is corrupted.";
- return true;
- }
- if (static_cast<size_t>(bag.key_size()) < nigoris_.size())
- return true;
- return false;
-}
-
-std::string Cryptographer::GetDefaultNigoriKey() const {
- if (!is_initialized())
- return std::string();
- NigoriMap::const_iterator iter = nigoris_.find(default_nigori_name_);
- if (iter == nigoris_.end())
- return std::string();
- sync_pb::NigoriKey key;
- if (!iter->second->ExportKeys(key.mutable_user_key(),
- key.mutable_encryption_key(),
- key.mutable_mac_key()))
- return std::string();
- return key.SerializeAsString();
-}
-
-bool Cryptographer::ImportNigoriKey(const std::string serialized_nigori_key) {
- if (serialized_nigori_key.empty())
- return false;
-
- sync_pb::NigoriKey key;
- if (!key.ParseFromString(serialized_nigori_key))
- return false;
-
- scoped_ptr<Nigori> nigori(new Nigori);
- if (!nigori->InitByImport(key.user_key(), key.encryption_key(),
- key.mac_key())) {
- NOTREACHED();
- return false;
- }
-
- if (!AddKeyImpl(nigori.Pass(), true))
- return false;
- return true;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/util/cryptographer.h b/chromium/sync/util/cryptographer.h
deleted file mode 100644
index 2dfdedccf6c..00000000000
--- a/chromium/sync/util/cryptographer.h
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_UTIL_CRYPTOGRAPHER_H_
-#define SYNC_UTIL_CRYPTOGRAPHER_H_
-
-#include <map>
-#include <string>
-
-#include "base/gtest_prod_util.h"
-#include "base/memory/linked_ptr.h"
-#include "base/memory/scoped_ptr.h"
-#include "sync/base/sync_export.h"
-#include "sync/protocol/encryption.pb.h"
-#include "sync/util/nigori.h"
-
-namespace sync_pb {
-class NigoriKeyBag;
-class NigoriSpecifics;
-}
-
-namespace syncer {
-
-class Encryptor;
-
-SYNC_EXPORT_PRIVATE extern const char kNigoriTag[];
-
-// The parameters used to initialize a Nigori instance.
-struct KeyParams {
- std::string hostname;
- std::string username;
- std::string password;
-};
-
-// This class manages the Nigori objects used to encrypt and decrypt sensitive
-// sync data (eg. passwords). Each Nigori object knows how to handle data
-// protected with a particular passphrase.
-//
-// Whenever an update to the Nigori sync node is received from the server,
-// SetPendingKeys should be called with the encrypted contents of that node.
-// Most likely, an updated Nigori node means that a new passphrase has been set
-// and that future node updates won't be decryptable. To remedy this, the user
-// should be prompted for the new passphrase and DecryptPendingKeys be called.
-//
-// Whenever a update to an encrypted node is received from the server,
-// CanDecrypt should be used to verify whether the Cryptographer can decrypt
-// that node. If it cannot, then the application of that update should be
-// delayed until after it can be decrypted.
-class SYNC_EXPORT Cryptographer {
- public:
- // Does not take ownership of |encryptor|.
- explicit Cryptographer(Encryptor* encryptor);
- ~Cryptographer();
-
- // |restored_bootstrap_token| can be provided via this method to bootstrap
- // Cryptographer instance into the ready state (is_ready will be true).
- // It must be a string that was previously built by the
- // GetSerializedBootstrapToken function. It is possible that the token is no
- // longer valid (due to server key change), in which case the normal
- // decryption code paths will fail and the user will need to provide a new
- // passphrase.
- // It is an error to call this if is_ready() == true, though it is fair to
- // never call Bootstrap at all.
- void Bootstrap(const std::string& restored_bootstrap_token);
-
- // Returns whether we can decrypt |encrypted| using the keys we currently know
- // about.
- bool CanDecrypt(const sync_pb::EncryptedData& encrypted) const;
-
- // Returns whether |encrypted| can be decrypted using the default encryption
- // key.
- bool CanDecryptUsingDefaultKey(const sync_pb::EncryptedData& encrypted) const;
-
- // Encrypts |message| into |encrypted|. Does not overwrite |encrypted| if
- // |message| already matches the decrypted data within |encrypted| and
- // |encrypted| was encrypted with the current default key. This avoids
- // unnecessarily modifying |encrypted| if the change had no practical effect.
- // Returns true unless encryption fails or |message| isn't valid (e.g. a
- // required field isn't set).
- bool Encrypt(const ::google::protobuf::MessageLite& message,
- sync_pb::EncryptedData* encrypted) const;
-
- // Encrypted |serialized| into |encrypted|. Does not overwrite |encrypted| if
- // |message| already matches the decrypted data within |encrypted| and
- // |encrypted| was encrypted with the current default key. This avoids
- // unnecessarily modifying |encrypted| if the change had no practical effect.
- // Returns true unless encryption fails or |message| isn't valid (e.g. a
- // required field isn't set).
- bool EncryptString(const std::string& serialized,
- sync_pb::EncryptedData* encrypted) const;
-
- // Decrypts |encrypted| into |message|. Returns true unless decryption fails,
- // or |message| fails to parse the decrypted data.
- bool Decrypt(const sync_pb::EncryptedData& encrypted,
- ::google::protobuf::MessageLite* message) const;
-
- // Decrypts |encrypted| and returns plaintext decrypted data. If decryption
- // fails, returns empty string.
- std::string DecryptToString(const sync_pb::EncryptedData& encrypted) const;
-
- // Encrypts the set of currently known keys into |encrypted|. Returns true if
- // successful.
- bool GetKeys(sync_pb::EncryptedData* encrypted) const;
-
- // Creates a new Nigori instance using |params|. If successful, |params| will
- // become the default encryption key and be used for all future calls to
- // Encrypt.
- // Will decrypt the pending keys and install them if possible (pending key
- // will not overwrite default).
- bool AddKey(const KeyParams& params);
-
- // Same as AddKey(..), but builds the new Nigori from a previously persisted
- // bootstrap token. This can be useful when consuming a bootstrap token
- // with a cryptographer that has already been initialized.
- // Updates the default key.
- // Will decrypt the pending keys and install them if possible (pending key
- // will not overwrite default).
- bool AddKeyFromBootstrapToken(const std::string restored_bootstrap_token);
-
- // Creates a new Nigori instance using |params|. If successful, |params|
- // will be added to the nigori keybag, but will not be the default encryption
- // key (default_nigori_ will remain the same).
- // Prereq: is_initialized() must be true.
- // Will decrypt the pending keys and install them if possible (pending key
- // will become the new default).
- bool AddNonDefaultKey(const KeyParams& params);
-
- // Decrypts |encrypted| and uses its contents to initialize Nigori instances.
- // Returns true unless decryption of |encrypted| fails. The caller is
- // responsible for checking that CanDecrypt(encrypted) == true.
- // Does not modify the default key.
- void InstallKeys(const sync_pb::EncryptedData& encrypted);
-
- // Makes a local copy of |encrypted| to later be decrypted by
- // DecryptPendingKeys. This should only be used if CanDecrypt(encrypted) ==
- // false.
- void SetPendingKeys(const sync_pb::EncryptedData& encrypted);
-
- // Makes |pending_keys_| available to callers that may want to cache its
- // value for later use on the UI thread. It is illegal to call this if the
- // cryptographer has no pending keys. Like other calls that access the
- // cryptographer, this method must be called from within a transaction.
- const sync_pb::EncryptedData& GetPendingKeys() const;
-
- // Attempts to decrypt the set of keys that was copied in the previous call to
- // SetPendingKeys using |params|. Returns true if the pending keys were
- // successfully decrypted and installed. If successful, the default key
- // is updated.
- bool DecryptPendingKeys(const KeyParams& params);
-
- // Sets the default key to the nigori with name |key_name|. |key_name| must
- // correspond to a nigori that has already been installed into the keybag.
- void SetDefaultKey(const std::string& key_name);
-
- bool is_initialized() const {
- return !nigoris_.empty() && !default_nigori_name_.empty();
- }
-
- // Returns whether this Cryptographer is ready to encrypt and decrypt data.
- bool is_ready() const {
- return is_initialized() && !has_pending_keys();
- }
-
- // Returns whether there is a pending set of keys that needs to be decrypted.
- bool has_pending_keys() const { return NULL != pending_keys_.get(); }
-
- // Obtain a token that can be provided on construction to a future
- // Cryptographer instance to bootstrap itself. Returns false if such a token
- // can't be created (i.e. if this Cryptograhper doesn't have valid keys).
- bool GetBootstrapToken(std::string* token) const;
-
- Encryptor* encryptor() const { return encryptor_; }
-
- // Returns true if |keybag| is decryptable and either is a subset of nigoris_
- // and/or has a different default key.
- bool KeybagIsStale(const sync_pb::EncryptedData& keybag) const;
-
- // Returns a serialized sync_pb::NigoriKey version of current default
- // encryption key.
- std::string GetDefaultNigoriKey() const;
-
- // Generates a new Nigori from |serialized_nigori_key|, and if successful
- // installs the new nigori as the default key.
- bool ImportNigoriKey(const std::string serialized_nigori_key);
-
- private:
- typedef std::map<std::string, linked_ptr<const Nigori> > NigoriMap;
-
- // Helper method to instantiate Nigori instances for each set of key
- // parameters in |bag|.
- // Does not update the default nigori.
- void InstallKeyBag(const sync_pb::NigoriKeyBag& bag);
-
- // Helper method to add a nigori to the keybag, optionally making it the
- // default as well.
- bool AddKeyImpl(scoped_ptr<Nigori> nigori, bool set_as_default);
-
- // Helper to unencrypt a bootstrap token into a serialized sync_pb::NigoriKey.
- std::string UnpackBootstrapToken(const std::string& token) const;
-
- Encryptor* const encryptor_;
-
- // The Nigoris we know about, mapped by key name.
- NigoriMap nigoris_;
- // The key name associated with the default nigori. If non-empty, must
- // correspond to a nigori within |nigoris_|.
- std::string default_nigori_name_;
-
- scoped_ptr<sync_pb::EncryptedData> pending_keys_;
-
- DISALLOW_COPY_AND_ASSIGN(Cryptographer);
-};
-
-} // namespace syncer
-
-#endif // SYNC_UTIL_CRYPTOGRAPHER_H_
diff --git a/chromium/sync/util/cryptographer_unittest.cc b/chromium/sync/util/cryptographer_unittest.cc
deleted file mode 100644
index c045064674c..00000000000
--- a/chromium/sync/util/cryptographer_unittest.cc
+++ /dev/null
@@ -1,204 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/util/cryptographer.h"
-
-#include <string>
-
-#include "base/memory/scoped_ptr.h"
-#include "base/strings/string_util.h"
-#include "sync/protocol/password_specifics.pb.h"
-#include "sync/test/fake_encryptor.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-namespace {
-
-using ::testing::_;
-
-} // namespace
-
-class CryptographerTest : public ::testing::Test {
- protected:
- CryptographerTest() : cryptographer_(&encryptor_) {}
-
- FakeEncryptor encryptor_;
- Cryptographer cryptographer_;
-};
-
-TEST_F(CryptographerTest, EmptyCantDecrypt) {
- EXPECT_FALSE(cryptographer_.is_ready());
-
- sync_pb::EncryptedData encrypted;
- encrypted.set_key_name("foo");
- encrypted.set_blob("bar");
-
- EXPECT_FALSE(cryptographer_.CanDecrypt(encrypted));
-}
-
-TEST_F(CryptographerTest, EmptyCantEncrypt) {
- EXPECT_FALSE(cryptographer_.is_ready());
-
- sync_pb::EncryptedData encrypted;
- sync_pb::PasswordSpecificsData original;
- EXPECT_FALSE(cryptographer_.Encrypt(original, &encrypted));
-}
-
-TEST_F(CryptographerTest, MissingCantDecrypt) {
- KeyParams params = {"localhost", "dummy", "dummy"};
- cryptographer_.AddKey(params);
- EXPECT_TRUE(cryptographer_.is_ready());
-
- sync_pb::EncryptedData encrypted;
- encrypted.set_key_name("foo");
- encrypted.set_blob("bar");
-
- EXPECT_FALSE(cryptographer_.CanDecrypt(encrypted));
-}
-
-TEST_F(CryptographerTest, CanEncryptAndDecrypt) {
- KeyParams params = {"localhost", "dummy", "dummy"};
- EXPECT_TRUE(cryptographer_.AddKey(params));
- EXPECT_TRUE(cryptographer_.is_ready());
-
- sync_pb::PasswordSpecificsData original;
- original.set_origin("http://example.com");
- original.set_username_value("azure");
- original.set_password_value("hunter2");
-
- sync_pb::EncryptedData encrypted;
- EXPECT_TRUE(cryptographer_.Encrypt(original, &encrypted));
-
- sync_pb::PasswordSpecificsData decrypted;
- EXPECT_TRUE(cryptographer_.Decrypt(encrypted, &decrypted));
-
- EXPECT_EQ(original.SerializeAsString(), decrypted.SerializeAsString());
-}
-
-TEST_F(CryptographerTest, EncryptOnlyIfDifferent) {
- KeyParams params = {"localhost", "dummy", "dummy"};
- EXPECT_TRUE(cryptographer_.AddKey(params));
- EXPECT_TRUE(cryptographer_.is_ready());
-
- sync_pb::PasswordSpecificsData original;
- original.set_origin("http://example.com");
- original.set_username_value("azure");
- original.set_password_value("hunter2");
-
- sync_pb::EncryptedData encrypted;
- EXPECT_TRUE(cryptographer_.Encrypt(original, &encrypted));
-
- sync_pb::EncryptedData encrypted2, encrypted3;
- encrypted2.CopyFrom(encrypted);
- encrypted3.CopyFrom(encrypted);
- EXPECT_TRUE(cryptographer_.Encrypt(original, &encrypted2));
-
- // Now encrypt with a new default key. Should overwrite the old data.
- KeyParams params_new = {"localhost", "dummy", "dummy2"};
- cryptographer_.AddKey(params_new);
- EXPECT_TRUE(cryptographer_.Encrypt(original, &encrypted3));
-
- sync_pb::PasswordSpecificsData decrypted;
- EXPECT_TRUE(cryptographer_.Decrypt(encrypted2, &decrypted));
- // encrypted2 should match encrypted, encrypted3 should not (due to salting).
- EXPECT_EQ(encrypted.SerializeAsString(), encrypted2.SerializeAsString());
- EXPECT_NE(encrypted.SerializeAsString(), encrypted3.SerializeAsString());
- EXPECT_EQ(original.SerializeAsString(), decrypted.SerializeAsString());
-}
-
-TEST_F(CryptographerTest, AddKeySetsDefault) {
- KeyParams params1 = {"localhost", "dummy", "dummy1"};
- EXPECT_TRUE(cryptographer_.AddKey(params1));
- EXPECT_TRUE(cryptographer_.is_ready());
-
- sync_pb::PasswordSpecificsData original;
- original.set_origin("http://example.com");
- original.set_username_value("azure");
- original.set_password_value("hunter2");
-
- sync_pb::EncryptedData encrypted1;
- EXPECT_TRUE(cryptographer_.Encrypt(original, &encrypted1));
- sync_pb::EncryptedData encrypted2;
- EXPECT_TRUE(cryptographer_.Encrypt(original, &encrypted2));
-
- KeyParams params2 = {"localhost", "dummy", "dummy2"};
- EXPECT_TRUE(cryptographer_.AddKey(params2));
- EXPECT_TRUE(cryptographer_.is_ready());
-
- sync_pb::EncryptedData encrypted3;
- EXPECT_TRUE(cryptographer_.Encrypt(original, &encrypted3));
- sync_pb::EncryptedData encrypted4;
- EXPECT_TRUE(cryptographer_.Encrypt(original, &encrypted4));
-
- EXPECT_EQ(encrypted1.key_name(), encrypted2.key_name());
- EXPECT_NE(encrypted1.key_name(), encrypted3.key_name());
- EXPECT_EQ(encrypted3.key_name(), encrypted4.key_name());
-}
-
-// Crashes, Bug 55178.
-#if defined(OS_WIN)
-#define MAYBE_EncryptExportDecrypt DISABLED_EncryptExportDecrypt
-#else
-#define MAYBE_EncryptExportDecrypt EncryptExportDecrypt
-#endif
-TEST_F(CryptographerTest, MAYBE_EncryptExportDecrypt) {
- sync_pb::EncryptedData nigori;
- sync_pb::EncryptedData encrypted;
-
- sync_pb::PasswordSpecificsData original;
- original.set_origin("http://example.com");
- original.set_username_value("azure");
- original.set_password_value("hunter2");
-
- {
- Cryptographer cryptographer(&encryptor_);
-
- KeyParams params = {"localhost", "dummy", "dummy"};
- cryptographer.AddKey(params);
- EXPECT_TRUE(cryptographer.is_ready());
-
- EXPECT_TRUE(cryptographer.Encrypt(original, &encrypted));
- EXPECT_TRUE(cryptographer.GetKeys(&nigori));
- }
-
- {
- Cryptographer cryptographer(&encryptor_);
- EXPECT_FALSE(cryptographer.CanDecrypt(nigori));
-
- cryptographer.SetPendingKeys(nigori);
- EXPECT_FALSE(cryptographer.is_ready());
- EXPECT_TRUE(cryptographer.has_pending_keys());
-
- KeyParams params = {"localhost", "dummy", "dummy"};
- EXPECT_TRUE(cryptographer.DecryptPendingKeys(params));
- EXPECT_TRUE(cryptographer.is_ready());
- EXPECT_FALSE(cryptographer.has_pending_keys());
-
- sync_pb::PasswordSpecificsData decrypted;
- EXPECT_TRUE(cryptographer.Decrypt(encrypted, &decrypted));
- EXPECT_EQ(original.SerializeAsString(), decrypted.SerializeAsString());
- }
-}
-
-TEST_F(CryptographerTest, Bootstrap) {
- KeyParams params = {"localhost", "dummy", "dummy"};
- cryptographer_.AddKey(params);
-
- std::string token;
- EXPECT_TRUE(cryptographer_.GetBootstrapToken(&token));
- EXPECT_TRUE(IsStringUTF8(token));
-
- Cryptographer other_cryptographer(&encryptor_);
- other_cryptographer.Bootstrap(token);
- EXPECT_TRUE(other_cryptographer.is_ready());
-
- const char secret[] = "secret";
- sync_pb::EncryptedData encrypted;
- EXPECT_TRUE(other_cryptographer.EncryptString(secret, &encrypted));
- EXPECT_TRUE(cryptographer_.CanDecryptUsingDefaultKey(encrypted));
-}
-
-} // namespace syncer
diff --git a/chromium/sync/util/data_type_histogram.h b/chromium/sync/util/data_type_histogram.h
deleted file mode 100644
index e3a8d6ff795..00000000000
--- a/chromium/sync/util/data_type_histogram.h
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_UTIL_DATA_TYPE_HISTOGRAM_H_
-#define SYNC_UTIL_DATA_TYPE_HISTOGRAM_H_
-
-#include "base/metrics/histogram.h"
-#include "base/time/time.h"
-#include "sync/internal_api/public/base/model_type.h"
-
-// For now, this just implements UMA_HISTOGRAM_LONG_TIMES. This can be adjusted
-// if we feel the min, max, or bucket count amount are not appropriate.
-#define SYNC_FREQ_HISTOGRAM(name, time) UMA_HISTOGRAM_CUSTOM_TIMES( \
- name, time, base::TimeDelta::FromMilliseconds(1), \
- base::TimeDelta::FromHours(1), 50)
-
-// Helper macro for datatype specific histograms. For each datatype, invokes
-// a pre-defined PER_DATA_TYPE_MACRO(type_str), where |type_str| is the string
-// version of the datatype.
-//
-// Example usage (ignoring newlines necessary for multiline macro):
-// std::vector<syncer::ModelType> types = GetEntryTypes();
-// for (int i = 0; i < types.size(); ++i) {
-// #define PER_DATA_TYPE_MACRO(type_str)
-// UMA_HISTOGRAM_ENUMERATION("Sync." type_str "StartFailures",
-// error, max_error);
-// SYNC_DATA_TYPE_HISTOGRAM(types[i]);
-// #undef PER_DATA_TYPE_MACRO
-// }
-//
-// TODO(zea): Once visual studio supports proper variadic argument replacement
-// in macros, pass in the histogram method directly as a parameter.
-// See http://connect.microsoft.com/VisualStudio/feedback/details/380090/
-// variadic-macro-replacement#details
-#define SYNC_DATA_TYPE_HISTOGRAM(datatype) \
- do { \
- switch (datatype) { \
- case ::syncer::BOOKMARKS: \
- PER_DATA_TYPE_MACRO("Bookmarks"); \
- break; \
- case ::syncer::PREFERENCES: \
- PER_DATA_TYPE_MACRO("Preferences"); \
- break; \
- case ::syncer::PASSWORDS: \
- PER_DATA_TYPE_MACRO("Passwords"); \
- break; \
- case ::syncer::AUTOFILL: \
- PER_DATA_TYPE_MACRO("Autofill"); \
- break; \
- case ::syncer::AUTOFILL_PROFILE: \
- PER_DATA_TYPE_MACRO("AutofillProfiles"); \
- break; \
- case ::syncer::THEMES: \
- PER_DATA_TYPE_MACRO("Themes"); \
- break; \
- case ::syncer::TYPED_URLS: \
- PER_DATA_TYPE_MACRO("TypedUrls"); \
- break; \
- case ::syncer::EXTENSIONS: \
- PER_DATA_TYPE_MACRO("Extensions"); \
- break; \
- case ::syncer::NIGORI: \
- PER_DATA_TYPE_MACRO("Nigori"); \
- break; \
- case ::syncer::SEARCH_ENGINES: \
- PER_DATA_TYPE_MACRO("SearchEngines"); \
- break; \
- case ::syncer::SESSIONS: \
- PER_DATA_TYPE_MACRO("Sessions"); \
- break; \
- case ::syncer::APPS: \
- PER_DATA_TYPE_MACRO("Apps"); \
- break; \
- case ::syncer::APP_LIST: \
- PER_DATA_TYPE_MACRO("AppList"); \
- break; \
- case ::syncer::APP_SETTINGS: \
- PER_DATA_TYPE_MACRO("AppSettings"); \
- break; \
- case ::syncer::EXTENSION_SETTINGS: \
- PER_DATA_TYPE_MACRO("ExtensionSettings"); \
- break; \
- case ::syncer::APP_NOTIFICATIONS: \
- PER_DATA_TYPE_MACRO("AppNotifications"); \
- break; \
- case ::syncer::HISTORY_DELETE_DIRECTIVES: \
- PER_DATA_TYPE_MACRO("HistoryDeleteDirectives"); \
- break; \
- case ::syncer::SYNCED_NOTIFICATIONS: \
- PER_DATA_TYPE_MACRO("SyncedNotifications"); \
- break; \
- case ::syncer::DEVICE_INFO: \
- PER_DATA_TYPE_MACRO("DeviceInfo"); \
- break; \
- case ::syncer::EXPERIMENTS: \
- PER_DATA_TYPE_MACRO("Experiments"); \
- break; \
- case ::syncer::PRIORITY_PREFERENCES :\
- PER_DATA_TYPE_MACRO("PriorityPreferences"); \
- break; \
- case ::syncer::DICTIONARY: \
- PER_DATA_TYPE_MACRO("Dictionary"); \
- break; \
- case ::syncer::FAVICON_IMAGES: \
- PER_DATA_TYPE_MACRO("FaviconImages"); \
- break; \
- case ::syncer::FAVICON_TRACKING: \
- PER_DATA_TYPE_MACRO("FaviconTracking"); \
- break; \
- case ::syncer::MANAGED_USER_SETTINGS: \
- PER_DATA_TYPE_MACRO("ManagedUserSetting"); \
- break; \
- case ::syncer::MANAGED_USERS: \
- PER_DATA_TYPE_MACRO("ManagedUser"); \
- break; \
- case ::syncer::ARTICLES: \
- PER_DATA_TYPE_MACRO("Article"); \
- break; \
- case ::syncer::PROXY_TABS: \
- PER_DATA_TYPE_MACRO("Tabs"); \
- break; \
- default: \
- NOTREACHED() << "Unknown datatype " \
- << ::syncer::ModelTypeToString(datatype); \
- } \
- } while (0)
-
-#endif // SYNC_UTIL_DATA_TYPE_HISTOGRAM_H_
diff --git a/chromium/sync/util/data_type_histogram_unittest.cc b/chromium/sync/util/data_type_histogram_unittest.cc
deleted file mode 100644
index 67b5e642d64..00000000000
--- a/chromium/sync/util/data_type_histogram_unittest.cc
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/util/data_type_histogram.h"
-
-#include "base/time/time.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace {
-
-class DataTypeHistogramTest : public testing::Test {
-};
-
-// Create a histogram of type HISTOGRAM_COUNTS for each model type. Nothing
-// should break.
-TEST(DataTypeHistogramTest, BasicCount) {
- for (int i = FIRST_REAL_MODEL_TYPE; i <= LAST_REAL_MODEL_TYPE; ++i) {
- ModelType type = ModelTypeFromInt(i);
-#define PER_DATA_TYPE_MACRO(type_str) \
- HISTOGRAM_COUNTS("Prefix" type_str "Suffix", 1);
- SYNC_DATA_TYPE_HISTOGRAM(type);
-#undef PER_DATA_TYPE_MACRO
- }
-}
-
-// Create a histogram of type SYNC_FREQ_HISTOGRAM for each model type. Nothing
-// should break.
-TEST(DataTypeHistogramTest, BasicFreq) {
- for (int i = FIRST_REAL_MODEL_TYPE; i <= LAST_REAL_MODEL_TYPE; ++i) {
- ModelType type = ModelTypeFromInt(i);
-#define PER_DATA_TYPE_MACRO(type_str) \
- SYNC_FREQ_HISTOGRAM("Prefix" type_str "Suffix", \
- base::TimeDelta::FromSeconds(1));
- SYNC_DATA_TYPE_HISTOGRAM(type);
-#undef PER_DATA_TYPE_MACRO
- }
-}
-
-// Create a histogram of type UMA_HISTOGRAM_ENUMERATION for each model type.
-// Nothing should break.
-TEST(DataTypeHistogramTest, BasicEnum) {
- enum HistTypes {
- TYPE_1,
- TYPE_2,
- TYPE_COUNT,
- };
- for (int i = FIRST_REAL_MODEL_TYPE; i <= LAST_REAL_MODEL_TYPE; ++i) {
- ModelType type = ModelTypeFromInt(i);
-#define PER_DATA_TYPE_MACRO(type_str) \
- UMA_HISTOGRAM_ENUMERATION("Prefix" type_str "Suffix", \
- (i % 2 ? TYPE_1 : TYPE_2), TYPE_COUNT);
- SYNC_DATA_TYPE_HISTOGRAM(type);
-#undef PER_DATA_TYPE_MACRO
- }
-}
-
-} // namespace
-} // namespace syncer
diff --git a/chromium/sync/util/encryptor.h b/chromium/sync/util/encryptor.h
deleted file mode 100644
index d188a0139f9..00000000000
--- a/chromium/sync/util/encryptor.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_UTIL_ENCRYPTOR_H_
-#define SYNC_UTIL_ENCRYPTOR_H_
-
-#include <string>
-
-namespace syncer {
-
-class Encryptor {
- public:
- // All methods below should be thread-safe.
- virtual bool EncryptString(const std::string& plaintext,
- std::string* ciphertext) = 0;
-
- virtual bool DecryptString(const std::string& ciphertext,
- std::string* plaintext) = 0;
-
- protected:
- virtual ~Encryptor() {}
-};
-
-} // namespace syncer
-
-#endif // SYNC_UTIL_ENCRYPTOR_H_
diff --git a/chromium/sync/util/extensions_activity.cc b/chromium/sync/util/extensions_activity.cc
deleted file mode 100644
index dcccb263f0b..00000000000
--- a/chromium/sync/util/extensions_activity.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/util/extensions_activity.h"
-
-namespace syncer {
-
-ExtensionsActivity::Record::Record()
- : bookmark_write_count(0U) {}
-
-ExtensionsActivity::Record::~Record() {}
-
-ExtensionsActivity::ExtensionsActivity() {}
-
-ExtensionsActivity::~ExtensionsActivity() {}
-
-void ExtensionsActivity::GetAndClearRecords(Records* buffer) {
- base::AutoLock lock(records_lock_);
- buffer->clear();
- buffer->swap(records_);
-}
-
-void ExtensionsActivity::PutRecords(const Records& records) {
- base::AutoLock lock(records_lock_);
- for (Records::const_iterator i = records.begin(); i != records.end(); ++i) {
- records_[i->first].extension_id = i->second.extension_id;
- records_[i->first].bookmark_write_count += i->second.bookmark_write_count;
- }
-}
-
-void ExtensionsActivity::UpdateRecord(const std::string& extension_id) {
- base::AutoLock lock(records_lock_);
- Record& record = records_[extension_id];
- record.extension_id = extension_id;
- record.bookmark_write_count++;
-}
-
-} // namespace syncer
diff --git a/chromium/sync/util/extensions_activity.h b/chromium/sync/util/extensions_activity.h
deleted file mode 100644
index 8178760b315..00000000000
--- a/chromium/sync/util/extensions_activity.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_UTIL_EXTENSIONS_ACTIVITY_H_
-#define SYNC_UTIL_EXTENSIONS_ACTIVITY_H_
-
-#include <map>
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/memory/ref_counted.h"
-#include "base/synchronization/lock.h"
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-
-// A storage to record usage of extensions APIs to send to sync
-// servers, with the ability to purge data once sync servers have
-// acknowledged it (successful commit response).
-class SYNC_EXPORT ExtensionsActivity
- : public base::RefCountedThreadSafe<ExtensionsActivity> {
- public:
- // A data record of activity performed by extension |extension_id|.
- struct SYNC_EXPORT Record {
- Record();
- ~Record();
-
- // The human-readable ID identifying the extension responsible
- // for the activity reported in this Record.
- std::string extension_id;
-
- // How many times the extension successfully invoked a write
- // operation through the bookmarks API since the last CommitMessage.
- uint32 bookmark_write_count;
- };
-
- typedef std::map<std::string, Record> Records;
-
- ExtensionsActivity();
-
- // Fill |buffer| with all current records and then clear the
- // internal records. Called on sync thread to append records to sync commit
- // message.
- void GetAndClearRecords(Records* buffer);
-
- // Merge |records| with the current set of records. Called on sync thread to
- // put back records if sync commit failed.
- void PutRecords(const Records& records);
-
- // Increment write count of the specified extension.
- void UpdateRecord(const std::string& extension_id);
-
- private:
- friend class base::RefCountedThreadSafe<ExtensionsActivity>;
- ~ExtensionsActivity();
-
- Records records_;
- mutable base::Lock records_lock_;
-};
-
-} // namespace syncer
-
-#endif // SYNC_UTIL_EXTENSIONS_ACTIVITY_H_
diff --git a/chromium/sync/util/get_session_name.cc b/chromium/sync/util/get_session_name.cc
deleted file mode 100644
index 3a09c51379d..00000000000
--- a/chromium/sync/util/get_session_name.cc
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/util/get_session_name.h"
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/callback.h"
-#include "base/location.h"
-#include "base/sys_info.h"
-#include "base/task_runner.h"
-
-#if defined(OS_LINUX)
-#include "sync/util/get_session_name_linux.h"
-#elif defined(OS_IOS)
-#include "sync/util/get_session_name_ios.h"
-#elif defined(OS_MACOSX)
-#include "sync/util/get_session_name_mac.h"
-#elif defined(OS_WIN)
-#include "sync/util/get_session_name_win.h"
-#elif defined(OS_ANDROID)
-#include "base/android/build_info.h"
-#endif
-
-namespace syncer {
-
-namespace {
-
-std::string GetSessionNameSynchronously() {
- std::string session_name;
-#if defined(OS_CHROMEOS)
- std::string board = base::SysInfo::GetLsbReleaseBoard();
- // Currently, only "stumpy" type of board is considered Chromebox, and
- // anything else is Chromebook. On these devices, session_name should look
- // like "stumpy-signed-mp-v2keys" etc. The information can be checked on
- // "CHROMEOS_RELEASE_BOARD" line in chrome://system.
- session_name = board.substr(0, 6) == "stumpy" ? "Chromebox" : "Chromebook";
-#elif defined(OS_LINUX)
- session_name = internal::GetHostname();
-#elif defined(OS_IOS)
- session_name = internal::GetComputerName();
-#elif defined(OS_MACOSX)
- session_name = internal::GetHardwareModelName();
-#elif defined(OS_WIN)
- session_name = internal::GetComputerName();
-#elif defined(OS_ANDROID)
- base::android::BuildInfo* android_build_info =
- base::android::BuildInfo::GetInstance();
- session_name = android_build_info->model();
-#endif
-
- if (session_name == "Unknown" || session_name.empty())
- session_name = base::SysInfo::OperatingSystemName();
-
- return session_name;
-}
-
-void FillSessionName(std::string* session_name) {
- *session_name = GetSessionNameSynchronously();
-}
-
-void OnSessionNameFilled(
- const base::Callback<void(const std::string&)>& done_callback,
- std::string* session_name) {
- done_callback.Run(*session_name);
-}
-
-} // namespace
-
-void GetSessionName(
- const scoped_refptr<base::TaskRunner>& task_runner,
- const base::Callback<void(const std::string&)>& done_callback) {
- std::string* session_name = new std::string();
- task_runner->PostTaskAndReply(
- FROM_HERE,
- base::Bind(&FillSessionName,
- base::Unretained(session_name)),
- base::Bind(&OnSessionNameFilled,
- done_callback,
- base::Owned(session_name)));
-}
-
-std::string GetSessionNameSynchronouslyForTesting() {
- return GetSessionNameSynchronously();
-}
-
-} // namespace syncer
diff --git a/chromium/sync/util/get_session_name.h b/chromium/sync/util/get_session_name.h
deleted file mode 100644
index 724ae3ca7b8..00000000000
--- a/chromium/sync/util/get_session_name.h
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_UTIL_GET_SESSION_NAME_H_
-#define SYNC_UTIL_GET_SESSION_NAME_H_
-
-#include <string>
-
-#include "base/callback_forward.h"
-#include "base/memory/ref_counted.h"
-#include "sync/base/sync_export.h"
-
-namespace base {
-class TaskRunner;
-} // namespace base
-
-namespace syncer {
-
-SYNC_EXPORT void GetSessionName(
- const scoped_refptr<base::TaskRunner>& task_runner,
- const base::Callback<void(const std::string&)>& done_callback);
-
-SYNC_EXPORT_PRIVATE std::string GetSessionNameSynchronouslyForTesting();
-
-} // namespace syncer
-
-#endif // SYNC_UTIL_GET_SESSION_NAME_H_
diff --git a/chromium/sync/util/get_session_name_ios.h b/chromium/sync/util/get_session_name_ios.h
deleted file mode 100644
index 15e7e98ee73..00000000000
--- a/chromium/sync/util/get_session_name_ios.h
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_UTIL_GET_SESSION_NAME_IOS_H_
-#define SYNC_UTIL_GET_SESSION_NAME_IOS_H_
-
-#include <string>
-
-namespace syncer {
-namespace internal {
-
-std::string GetComputerName();
-
-} // namespace internal
-} // namespace syncer
-
-#endif // SYNC_UTIL_GET_SESSION_NAME_IOS_H_
diff --git a/chromium/sync/util/get_session_name_ios.mm b/chromium/sync/util/get_session_name_ios.mm
deleted file mode 100644
index ff9e619a708..00000000000
--- a/chromium/sync/util/get_session_name_ios.mm
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/util/get_session_name_ios.h"
-
-#import <UIKit/UIKit.h>
-
-#include "base/strings/sys_string_conversions.h"
-
-namespace syncer {
-namespace internal {
-
-std::string GetComputerName() {
- return base::SysNSStringToUTF8([[UIDevice currentDevice] name]);
-}
-
-} // namespace internal
-} // namespace syncer
diff --git a/chromium/sync/util/get_session_name_linux.cc b/chromium/sync/util/get_session_name_linux.cc
deleted file mode 100644
index 172252d80e3..00000000000
--- a/chromium/sync/util/get_session_name_linux.cc
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/util/get_session_name_linux.h"
-
-#include <limits.h> // for HOST_NAME_MAX
-#include <unistd.h> // for gethostname()
-
-#include "base/linux_util.h"
-
-namespace syncer {
-namespace internal {
-
-std::string GetHostname() {
- char hostname[HOST_NAME_MAX];
- if (gethostname(hostname, HOST_NAME_MAX) == 0) // Success.
- return hostname;
- return base::GetLinuxDistro();
-}
-
-} // namespace internal
-} // namespace syncer
-
diff --git a/chromium/sync/util/get_session_name_linux.h b/chromium/sync/util/get_session_name_linux.h
deleted file mode 100644
index b6bd273374e..00000000000
--- a/chromium/sync/util/get_session_name_linux.h
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_UTIL_GET_SESSION_NAME_LINUX_H_
-#define SYNC_UTIL_GET_SESSION_NAME_LINUX_H_
-
-#include <string>
-
-namespace syncer {
-namespace internal {
-
-std::string GetHostname();
-
-} // namespace internal
-} // namespace syncer
-
-#endif // SYNC_UTIL_GET_SESSION_NAME_LINUX_H_
-
diff --git a/chromium/sync/util/get_session_name_mac.h b/chromium/sync/util/get_session_name_mac.h
deleted file mode 100644
index e0a81ed1940..00000000000
--- a/chromium/sync/util/get_session_name_mac.h
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_UTIL_GET_SESSION_NAME_MAC_H_
-#define SYNC_UTIL_GET_SESSION_NAME_MAC_H_
-
-#include <string>
-
-namespace syncer {
-namespace internal {
-
-// Returns the Hardware model name, without trailing numbers, if
-// possible. See http://www.cocoadev.com/index.pl?MacintoshModels for
-// an example list of models. If an error occurs trying to read the
-// model, this simply returns "Unknown".
-std::string GetHardwareModelName();
-
-} // namespace internal
-} // namespace syncer
-
-#endif // SYNC_UTIL_GET_SESSION_NAME_MAC_H_
diff --git a/chromium/sync/util/get_session_name_mac.mm b/chromium/sync/util/get_session_name_mac.mm
deleted file mode 100644
index aa61e2f6d1a..00000000000
--- a/chromium/sync/util/get_session_name_mac.mm
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/util/get_session_name_mac.h"
-
-#import <SystemConfiguration/SCDynamicStoreCopySpecific.h>
-#include <sys/sysctl.h>
-
-#include "base/mac/scoped_cftyperef.h"
-#include "base/strings/string_util.h"
-#include "base/strings/sys_string_conversions.h"
-
-namespace syncer {
-namespace internal {
-
-std::string GetHardwareModelName() {
- // Do not use NSHost currentHost, as it's very slow. http://crbug.com/138570
- SCDynamicStoreContext context = { 0, NULL, NULL, NULL };
- base::ScopedCFTypeRef<SCDynamicStoreRef> store(SCDynamicStoreCreate(
- kCFAllocatorDefault, CFSTR("chrome_sync"), NULL, &context));
- base::ScopedCFTypeRef<CFStringRef> machine_name(
- SCDynamicStoreCopyLocalHostName(store.get()));
- if (machine_name.get())
- return base::SysCFStringRefToUTF8(machine_name.get());
-
- // Fall back to get computer name.
- base::ScopedCFTypeRef<CFStringRef> computer_name(
- SCDynamicStoreCopyComputerName(store.get(), NULL));
- if (computer_name.get())
- return base::SysCFStringRefToUTF8(computer_name.get());
-
- // If all else fails, return to using a slightly nicer version of the
- // hardware model.
- char modelBuffer[256];
- size_t length = sizeof(modelBuffer);
- if (!sysctlbyname("hw.model", modelBuffer, &length, NULL, 0)) {
- for (size_t i = 0; i < length; i++) {
- if (IsAsciiDigit(modelBuffer[i]))
- return std::string(modelBuffer, 0, i);
- }
- return std::string(modelBuffer, 0, length);
- }
- return "Unknown";
-}
-
-} // namespace internal
-} // namespace syncer
diff --git a/chromium/sync/util/get_session_name_unittest.cc b/chromium/sync/util/get_session_name_unittest.cc
deleted file mode 100644
index 724cd8b7f69..00000000000
--- a/chromium/sync/util/get_session_name_unittest.cc
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <string>
-
-#include "base/bind.h"
-#include "base/message_loop/message_loop.h"
-#include "base/sys_info.h"
-#include "sync/util/get_session_name.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#if defined(OS_CHROMEOS)
-#include "base/command_line.h"
-#include "chromeos/chromeos_switches.h"
-#endif // OS_CHROMEOS
-
-namespace syncer {
-
-namespace {
-
-class GetSessionNameTest : public ::testing::Test {
- public:
- void SetSessionNameAndQuit(const std::string& session_name) {
- session_name_ = session_name;
- message_loop_.Quit();
- }
-
- protected:
- base::MessageLoop message_loop_;
- std::string session_name_;
-};
-
-// Call GetSessionNameSynchronouslyForTesting and make sure its return
-// value looks sane.
-TEST_F(GetSessionNameTest, GetSessionNameSynchronously) {
- const std::string& session_name = GetSessionNameSynchronouslyForTesting();
- EXPECT_FALSE(session_name.empty());
-}
-
-#if defined(OS_CHROMEOS)
-
-// Call GetSessionNameSynchronouslyForTesting on ChromeOS where the board type
-// is "lumpy-signed-mp-v2keys" and make sure the return value is "Chromebook".
-TEST_F(GetSessionNameTest, GetSessionNameSynchronouslyChromebook) {
- const char* kLsbRelease = "CHROMEOS_RELEASE_BOARD=lumpy-signed-mp-v2keys\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, base::Time());
- const std::string& session_name = GetSessionNameSynchronouslyForTesting();
- EXPECT_EQ("Chromebook", session_name);
-}
-
-// Call GetSessionNameSynchronouslyForTesting on ChromeOS where the board type
-// is "stumpy-signed-mp-v2keys" and make sure the return value is "Chromebox".
-TEST_F(GetSessionNameTest, GetSessionNameSynchronouslyChromebox) {
- const char* kLsbRelease = "CHROMEOS_RELEASE_BOARD=stumpy-signed-mp-v2keys\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, base::Time());
- const std::string& session_name = GetSessionNameSynchronouslyForTesting();
- EXPECT_EQ("Chromebox", session_name);
-}
-
-#endif // OS_CHROMEOS
-
-// Calls GetSessionName and runs the message loop until it comes back
-// with a session name. Makes sure the returned session name is equal
-// to the return value of GetSessionNameSynchronouslyForTesting().
-TEST_F(GetSessionNameTest, GetSessionName) {
- GetSessionName(message_loop_.message_loop_proxy(),
- base::Bind(&GetSessionNameTest::SetSessionNameAndQuit,
- base::Unretained(this)));
- message_loop_.Run();
- EXPECT_EQ(session_name_, GetSessionNameSynchronouslyForTesting());
-}
-
-} // namespace
-
-} // namespace syncer
diff --git a/chromium/sync/util/get_session_name_win.cc b/chromium/sync/util/get_session_name_win.cc
deleted file mode 100644
index 101bcd4fea7..00000000000
--- a/chromium/sync/util/get_session_name_win.cc
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/util/get_session_name_win.h"
-
-#include <windows.h>
-
-namespace syncer {
-namespace internal {
-
-std::string GetComputerName() {
- char computer_name[MAX_COMPUTERNAME_LENGTH + 1];
- DWORD size = sizeof(computer_name);
- if (GetComputerNameA(computer_name, &size))
- return computer_name;
- return std::string();
-}
-
-} // namespace internal
-} // namespace syncer
diff --git a/chromium/sync/util/get_session_name_win.h b/chromium/sync/util/get_session_name_win.h
deleted file mode 100644
index 8ec903f6094..00000000000
--- a/chromium/sync/util/get_session_name_win.h
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_UTIL_GET_SESSION_NAME_WIN_H_
-#define SYNC_UTIL_GET_SESSION_NAME_WIN_H_
-
-#include <string>
-
-namespace syncer {
-namespace internal {
-
-std::string GetComputerName();
-
-} // namespace internal
-} // namespace syncer
-
-#endif // SYNC_UTIL_GET_SESSION_NAME_WIN_H_
diff --git a/chromium/sync/util/logging.cc b/chromium/sync/util/logging.cc
deleted file mode 100644
index 272c72cf55e..00000000000
--- a/chromium/sync/util/logging.cc
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/util/logging.h"
-
-#include "base/location.h"
-
-namespace syncer {
-
-bool VlogIsOnForLocation(const tracked_objects::Location& from_here,
- int verbose_level) {
- return (verbose_level <=
- logging::GetVlogLevelHelper(
- from_here.file_name(), ::strlen(from_here.file_name())));
-}
-
-} // namespace syncer
diff --git a/chromium/sync/util/logging.h b/chromium/sync/util/logging.h
deleted file mode 100644
index f23d9c16f76..00000000000
--- a/chromium/sync/util/logging.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_UTIL_LOGGING_H_
-#define SYNC_UTIL_LOGGING_H_
-
-#include "base/logging.h"
-
-// TODO(akalin): This probably belongs in base/ somewhere.
-
-namespace tracked_objects {
-class Location;
-} // namespace tracked_objects
-
-namespace syncer {
-
-bool VlogIsOnForLocation(const tracked_objects::Location& from_here,
- int verbose_level);
-
-} // namespace syncer
-
-#define VLOG_LOC_STREAM(from_here, verbose_level) \
- logging::LogMessage(from_here.file_name(), from_here.line_number(), \
- -verbose_level).stream()
-
-#define DVLOG_LOC(from_here, verbose_level) \
- LAZY_STREAM( \
- VLOG_LOC_STREAM(from_here, verbose_level), \
- ::logging::DEBUG_MODE && \
- (VLOG_IS_ON(verbose_level) || \
- ::syncer::VlogIsOnForLocation(from_here, verbose_level))) \
-
-#endif // SYNC_UTIL_LOGGING_H_
diff --git a/chromium/sync/util/nigori.cc b/chromium/sync/util/nigori.cc
deleted file mode 100644
index e74d81a9185..00000000000
--- a/chromium/sync/util/nigori.cc
+++ /dev/null
@@ -1,250 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/util/nigori.h"
-
-#include <sstream>
-#include <vector>
-
-#include "base/base64.h"
-#include "base/logging.h"
-#include "base/strings/string_util.h"
-#include "base/sys_byteorder.h"
-#include "crypto/encryptor.h"
-#include "crypto/hmac.h"
-#include "crypto/random.h"
-#include "crypto/symmetric_key.h"
-
-using base::Base64Encode;
-using base::Base64Decode;
-using crypto::Encryptor;
-using crypto::HMAC;
-using crypto::SymmetricKey;
-
-namespace syncer {
-
-// NigoriStream simplifies the concatenation operation of the Nigori protocol.
-class NigoriStream {
- public:
- // Append the big-endian representation of the length of |value| with 32 bits,
- // followed by |value| itself to the stream.
- NigoriStream& operator<<(const std::string& value) {
- uint32 size = base::HostToNet32(value.size());
- stream_.write((char *) &size, sizeof(uint32));
- stream_ << value;
- return *this;
- }
-
- // Append the big-endian representation of the length of |type| with 32 bits,
- // followed by the big-endian representation of the value of |type|, with 32
- // bits, to the stream.
- NigoriStream& operator<<(const Nigori::Type type) {
- uint32 size = base::HostToNet32(sizeof(uint32));
- stream_.write((char *) &size, sizeof(uint32));
- uint32 value = base::HostToNet32(type);
- stream_.write((char *) &value, sizeof(uint32));
- return *this;
- }
-
- std::string str() {
- return stream_.str();
- }
-
- private:
- std::ostringstream stream_;
-};
-
-// static
-const char Nigori::kSaltSalt[] = "saltsalt";
-
-Nigori::Nigori() {
-}
-
-Nigori::~Nigori() {
-}
-
-bool Nigori::InitByDerivation(const std::string& hostname,
- const std::string& username,
- const std::string& password) {
- NigoriStream salt_password;
- salt_password << username << hostname;
-
- // Suser = PBKDF2(Username || Servername, "saltsalt", Nsalt, 8)
- scoped_ptr<SymmetricKey> user_salt(SymmetricKey::DeriveKeyFromPassword(
- SymmetricKey::HMAC_SHA1, salt_password.str(),
- kSaltSalt,
- kSaltIterations,
- kSaltKeySizeInBits));
- DCHECK(user_salt.get());
-
- std::string raw_user_salt;
- if (!user_salt->GetRawKey(&raw_user_salt))
- return false;
-
- // Kuser = PBKDF2(P, Suser, Nuser, 16)
- user_key_.reset(SymmetricKey::DeriveKeyFromPassword(SymmetricKey::AES,
- password, raw_user_salt, kUserIterations, kDerivedKeySizeInBits));
- DCHECK(user_key_.get());
-
- // Kenc = PBKDF2(P, Suser, Nenc, 16)
- encryption_key_.reset(SymmetricKey::DeriveKeyFromPassword(SymmetricKey::AES,
- password, raw_user_salt, kEncryptionIterations, kDerivedKeySizeInBits));
- DCHECK(encryption_key_.get());
-
- // Kmac = PBKDF2(P, Suser, Nmac, 16)
- mac_key_.reset(SymmetricKey::DeriveKeyFromPassword(
- SymmetricKey::HMAC_SHA1, password, raw_user_salt, kSigningIterations,
- kDerivedKeySizeInBits));
- DCHECK(mac_key_.get());
-
- return user_key_.get() && encryption_key_.get() && mac_key_.get();
-}
-
-bool Nigori::InitByImport(const std::string& user_key,
- const std::string& encryption_key,
- const std::string& mac_key) {
- user_key_.reset(SymmetricKey::Import(SymmetricKey::AES, user_key));
- DCHECK(user_key_.get());
-
- encryption_key_.reset(SymmetricKey::Import(SymmetricKey::AES,
- encryption_key));
- DCHECK(encryption_key_.get());
-
- mac_key_.reset(SymmetricKey::Import(SymmetricKey::HMAC_SHA1, mac_key));
- DCHECK(mac_key_.get());
-
- return user_key_.get() && encryption_key_.get() && mac_key_.get();
-}
-
-// Permute[Kenc,Kmac](type || name)
-bool Nigori::Permute(Type type, const std::string& name,
- std::string* permuted) const {
- DCHECK_LT(0U, name.size());
-
- NigoriStream plaintext;
- plaintext << type << name;
-
- Encryptor encryptor;
- if (!encryptor.Init(encryption_key_.get(), Encryptor::CBC,
- std::string(kIvSize, 0)))
- return false;
-
- std::string ciphertext;
- if (!encryptor.Encrypt(plaintext.str(), &ciphertext))
- return false;
-
- std::string raw_mac_key;
- if (!mac_key_->GetRawKey(&raw_mac_key))
- return false;
-
- HMAC hmac(HMAC::SHA256);
- if (!hmac.Init(raw_mac_key))
- return false;
-
- std::vector<unsigned char> hash(kHashSize);
- if (!hmac.Sign(ciphertext, &hash[0], hash.size()))
- return false;
-
- std::string output;
- output.assign(ciphertext);
- output.append(hash.begin(), hash.end());
-
- Base64Encode(output, permuted);
- return true;
-}
-
-// Enc[Kenc,Kmac](value)
-bool Nigori::Encrypt(const std::string& value, std::string* encrypted) const {
- if (0U >= value.size())
- return false;
-
- std::string iv;
- crypto::RandBytes(WriteInto(&iv, kIvSize + 1), kIvSize);
-
- Encryptor encryptor;
- if (!encryptor.Init(encryption_key_.get(), Encryptor::CBC, iv))
- return false;
-
- std::string ciphertext;
- if (!encryptor.Encrypt(value, &ciphertext))
- return false;
-
- std::string raw_mac_key;
- if (!mac_key_->GetRawKey(&raw_mac_key))
- return false;
-
- HMAC hmac(HMAC::SHA256);
- if (!hmac.Init(raw_mac_key))
- return false;
-
- std::vector<unsigned char> hash(kHashSize);
- if (!hmac.Sign(ciphertext, &hash[0], hash.size()))
- return false;
-
- std::string output;
- output.assign(iv);
- output.append(ciphertext);
- output.append(hash.begin(), hash.end());
-
- Base64Encode(output, encrypted);
- return true;
-}
-
-bool Nigori::Decrypt(const std::string& encrypted, std::string* value) const {
- std::string input;
- if (!Base64Decode(encrypted, &input))
- return false;
-
- if (input.size() < kIvSize * 2 + kHashSize)
- return false;
-
- // The input is:
- // * iv (16 bytes)
- // * ciphertext (multiple of 16 bytes)
- // * hash (32 bytes)
- std::string iv(input.substr(0, kIvSize));
- std::string ciphertext(input.substr(kIvSize,
- input.size() - (kIvSize + kHashSize)));
- std::string hash(input.substr(input.size() - kHashSize, kHashSize));
-
- std::string raw_mac_key;
- if (!mac_key_->GetRawKey(&raw_mac_key))
- return false;
-
- HMAC hmac(HMAC::SHA256);
- if (!hmac.Init(raw_mac_key))
- return false;
-
- std::vector<unsigned char> expected(kHashSize);
- if (!hmac.Sign(ciphertext, &expected[0], expected.size()))
- return false;
-
- if (hash.compare(0, hash.size(),
- reinterpret_cast<char *>(&expected[0]),
- expected.size()))
- return false;
-
- Encryptor encryptor;
- if (!encryptor.Init(encryption_key_.get(), Encryptor::CBC, iv))
- return false;
-
- if (!encryptor.Decrypt(ciphertext, value))
- return false;
-
- return true;
-}
-
-bool Nigori::ExportKeys(std::string* user_key,
- std::string* encryption_key,
- std::string* mac_key) const {
- DCHECK(user_key);
- DCHECK(encryption_key);
- DCHECK(mac_key);
-
- return user_key_->GetRawKey(user_key) &&
- encryption_key_->GetRawKey(encryption_key) &&
- mac_key_->GetRawKey(mac_key);
-}
-
-} // namespace syncer
diff --git a/chromium/sync/util/nigori.h b/chromium/sync/util/nigori.h
deleted file mode 100644
index cd67bf9ec47..00000000000
--- a/chromium/sync/util/nigori.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_UTIL_NIGORI_H_
-#define SYNC_UTIL_NIGORI_H_
-
-#include <string>
-
-#include "base/memory/scoped_ptr.h"
-#include "sync/base/sync_export.h"
-
-namespace crypto {
-class SymmetricKey;
-} // namespace crypto
-
-namespace syncer {
-
-// A (partial) implementation of Nigori, a protocol to securely store secrets in
-// the cloud. This implementation does not support server authentication or
-// assisted key derivation.
-//
-// To store secrets securely, use the |Permute| method to derive a lookup name
-// for your secret (basically a map key), and |Encrypt| and |Decrypt| to store
-// and retrieve the secret.
-//
-// TODO: Link to doc.
-class SYNC_EXPORT Nigori {
- public:
- enum Type {
- Password = 1,
- };
-
- Nigori();
- virtual ~Nigori();
-
- // Initialize the client with the given |hostname|, |username| and |password|.
- bool InitByDerivation(const std::string& hostname,
- const std::string& username,
- const std::string& password);
-
- // Initialize the client by importing the given keys instead of deriving new
- // ones.
- bool InitByImport(const std::string& user_key,
- const std::string& encryption_key,
- const std::string& mac_key);
-
- // Derives a secure lookup name from |type| and |name|. If |hostname|,
- // |username| and |password| are kept constant, a given |type| and |name| pair
- // always yields the same |permuted| value. Note that |permuted| will be
- // Base64 encoded.
- bool Permute(Type type, const std::string& name, std::string* permuted) const;
-
- // Encrypts |value|. Note that on success, |encrypted| will be Base64
- // encoded.
- bool Encrypt(const std::string& value, std::string* encrypted) const;
-
- // Decrypts |value| into |decrypted|. It is assumed that |value| is Base64
- // encoded.
- bool Decrypt(const std::string& value, std::string* decrypted) const;
-
- // Exports the raw derived keys.
- bool ExportKeys(std::string* user_key,
- std::string* encryption_key,
- std::string* mac_key) const;
-
- static const char kSaltSalt[]; // The salt used to derive the user salt.
- static const size_t kSaltKeySizeInBits = 128;
- static const size_t kDerivedKeySizeInBits = 128;
- static const size_t kIvSize = 16;
- static const size_t kHashSize = 32;
-
- static const size_t kSaltIterations = 1001;
- static const size_t kUserIterations = 1002;
- static const size_t kEncryptionIterations = 1003;
- static const size_t kSigningIterations = 1004;
-
- private:
- scoped_ptr<crypto::SymmetricKey> user_key_;
- scoped_ptr<crypto::SymmetricKey> encryption_key_;
- scoped_ptr<crypto::SymmetricKey> mac_key_;
-};
-
-} // namespace syncer
-
-#endif // SYNC_UTIL_NIGORI_H_
diff --git a/chromium/sync/util/nigori_unittest.cc b/chromium/sync/util/nigori_unittest.cc
deleted file mode 100644
index 88c2461231a..00000000000
--- a/chromium/sync/util/nigori_unittest.cc
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/util/nigori.h"
-
-#include <string>
-
-#include "base/memory/scoped_ptr.h"
-#include "base/strings/string_util.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-namespace {
-
-TEST(SyncNigoriTest, Permute) {
- Nigori nigori;
- EXPECT_TRUE(nigori.InitByDerivation("example.com", "username", "password"));
-
- std::string permuted;
- EXPECT_TRUE(nigori.Permute(Nigori::Password, "test name",
- &permuted));
-
- std::string expected =
- "prewwdJj2PrGDczvmsHJEE5ndcCyVze8sY9kD5hjY/Tm"
- "c5kOjXFK7zB3Ss4LlHjEDirMu+vh85JwHOnGrMVe+g==";
- EXPECT_EQ(expected, permuted);
-}
-
-TEST(SyncNigoriTest, PermuteIsConstant) {
- Nigori nigori1;
- EXPECT_TRUE(nigori1.InitByDerivation("example.com", "username", "password"));
-
- std::string permuted1;
- EXPECT_TRUE(nigori1.Permute(Nigori::Password,
- "name",
- &permuted1));
-
- Nigori nigori2;
- EXPECT_TRUE(nigori2.InitByDerivation("example.com", "username", "password"));
-
- std::string permuted2;
- EXPECT_TRUE(nigori2.Permute(Nigori::Password,
- "name",
- &permuted2));
-
- EXPECT_LT(0U, permuted1.size());
- EXPECT_EQ(permuted1, permuted2);
-}
-
-TEST(SyncNigoriTest, EncryptDifferentIv) {
- Nigori nigori;
- EXPECT_TRUE(nigori.InitByDerivation("example.com", "username", "password"));
-
- std::string plaintext("value");
-
- std::string encrypted1;
- EXPECT_TRUE(nigori.Encrypt(plaintext, &encrypted1));
-
- std::string encrypted2;
- EXPECT_TRUE(nigori.Encrypt(plaintext, &encrypted2));
-
- EXPECT_NE(encrypted1, encrypted2);
-}
-
-TEST(SyncNigoriTest, Decrypt) {
- Nigori nigori;
- EXPECT_TRUE(nigori.InitByDerivation("example.com", "username", "password"));
-
- std::string encrypted =
- "e7+JyS6ibj6F5qqvpseukNRTZ+oBpu5iuv2VYjOfrH1dNiFLNf7Ov0"
- "kx/zicKFn0lJcbG1UmkNWqIuR4x+quDNVuLaZGbrJPhrJuj7cokCM=";
-
- std::string plaintext;
- EXPECT_TRUE(nigori.Decrypt(encrypted, &plaintext));
-
- std::string expected("test, test, 1, 2, 3");
- EXPECT_EQ(expected, plaintext);
-}
-
-TEST(SyncNigoriTest, EncryptDecrypt) {
- Nigori nigori;
- EXPECT_TRUE(nigori.InitByDerivation("example.com", "username", "password"));
-
- std::string plaintext("value");
-
- std::string encrypted;
- EXPECT_TRUE(nigori.Encrypt(plaintext, &encrypted));
-
- std::string decrypted;
- EXPECT_TRUE(nigori.Decrypt(encrypted, &decrypted));
-
- EXPECT_EQ(plaintext, decrypted);
-}
-
-TEST(SyncNigoriTest, CorruptedIv) {
- Nigori nigori;
- EXPECT_TRUE(nigori.InitByDerivation("example.com", "username", "password"));
-
- std::string plaintext("test");
-
- std::string encrypted;
- EXPECT_TRUE(nigori.Encrypt(plaintext, &encrypted));
-
- // Corrupt the IV by changing one of its byte.
- encrypted[0] = (encrypted[0] == 'a' ? 'b' : 'a');
-
- std::string decrypted;
- EXPECT_TRUE(nigori.Decrypt(encrypted, &decrypted));
-
- EXPECT_NE(plaintext, decrypted);
-}
-
-TEST(SyncNigoriTest, CorruptedCiphertext) {
- Nigori nigori;
- EXPECT_TRUE(nigori.InitByDerivation("example.com", "username", "password"));
-
- std::string plaintext("test");
-
- std::string encrypted;
- EXPECT_TRUE(nigori.Encrypt(plaintext, &encrypted));
-
- // Corrput the ciphertext by changing one of its bytes.
- encrypted[Nigori::kIvSize + 10] =
- (encrypted[Nigori::kIvSize + 10] == 'a' ? 'b' : 'a');
-
- std::string decrypted;
- EXPECT_FALSE(nigori.Decrypt(encrypted, &decrypted));
-
- EXPECT_NE(plaintext, decrypted);
-}
-
-// Crashes, Bug 55180.
-#if defined(OS_WIN)
-#define MAYBE_ExportImport DISABLED_ExportImport
-#else
-#define MAYBE_ExportImport ExportImport
-#endif
-TEST(SyncNigoriTest, MAYBE_ExportImport) {
- Nigori nigori1;
- EXPECT_TRUE(nigori1.InitByDerivation("example.com", "username", "password"));
-
- std::string user_key;
- std::string encryption_key;
- std::string mac_key;
- EXPECT_TRUE(nigori1.ExportKeys(&user_key, &encryption_key, &mac_key));
-
- Nigori nigori2;
- EXPECT_TRUE(nigori2.InitByImport(user_key, encryption_key, mac_key));
-
- std::string original("test");
- std::string plaintext;
- std::string ciphertext;
-
- EXPECT_TRUE(nigori1.Encrypt(original, &ciphertext));
- EXPECT_TRUE(nigori2.Decrypt(ciphertext, &plaintext));
- EXPECT_EQ(original, plaintext);
-
- EXPECT_TRUE(nigori2.Encrypt(original, &ciphertext));
- EXPECT_TRUE(nigori1.Decrypt(ciphertext, &plaintext));
- EXPECT_EQ(original, plaintext);
-
- std::string permuted1, permuted2;
- EXPECT_TRUE(nigori1.Permute(Nigori::Password, original, &permuted1));
- EXPECT_TRUE(nigori2.Permute(Nigori::Password, original, &permuted2));
- EXPECT_EQ(permuted1, permuted2);
-}
-
-} // anonymous namespace
-} // namespace syncer
diff --git a/chromium/sync/util/protobuf_unittest.cc b/chromium/sync/util/protobuf_unittest.cc
deleted file mode 100644
index 4f654d5d77d..00000000000
--- a/chromium/sync/util/protobuf_unittest.cc
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <string>
-#include <vector>
-
-#include "sync/protocol/test.pb.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace {
-
-TEST(SyncProtobufTest, TestUnknownFields) {
- // This tests ensures that we retain unknown fields in protocol buffers by
- // serialising UnknownFieldsTestB, which is a superset of UnknownFieldsTestA,
- // and checking we get back to the same message after parsing/serialising via
- // UnknownFieldsTestA.
- sync_pb::UnknownFieldsTestA a;
- sync_pb::UnknownFieldsTestB b;
- sync_pb::UnknownFieldsTestB b2;
-
- b.set_foo(true);
- b.set_bar(true);
- std::string serialized;
- ASSERT_TRUE(b.SerializeToString(&serialized));
- ASSERT_TRUE(a.ParseFromString(serialized));
- ASSERT_TRUE(a.foo());
- std::string serialized2;
- ASSERT_TRUE(a.SerializeToString(&serialized2));
- ASSERT_TRUE(b2.ParseFromString(serialized2));
- ASSERT_TRUE(b2.foo());
- ASSERT_TRUE(b2.bar());
-}
-
-} // namespace
diff --git a/chromium/sync/util/test_unrecoverable_error_handler.cc b/chromium/sync/util/test_unrecoverable_error_handler.cc
deleted file mode 100644
index 4bdbf3dc732..00000000000
--- a/chromium/sync/util/test_unrecoverable_error_handler.cc
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/util/test_unrecoverable_error_handler.h"
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncer {
-
-TestUnrecoverableErrorHandler::TestUnrecoverableErrorHandler() {}
-
-TestUnrecoverableErrorHandler::~TestUnrecoverableErrorHandler() {}
-
-void TestUnrecoverableErrorHandler::OnUnrecoverableError(
- const tracked_objects::Location& from_here,
- const std::string& message) {
- ADD_FAILURE_AT(from_here.file_name(), from_here.line_number())
- << from_here.function_name() << ": " << message;
-}
-
-} // namespace syncer
-
diff --git a/chromium/sync/util/test_unrecoverable_error_handler.h b/chromium/sync/util/test_unrecoverable_error_handler.h
deleted file mode 100644
index d36724ec543..00000000000
--- a/chromium/sync/util/test_unrecoverable_error_handler.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_INTERNAL_API_INCLUDES_TEST_UNRECOVERABLE_ERROR_HANDLER_H_
-#define SYNC_INTERNAL_API_INCLUDES_TEST_UNRECOVERABLE_ERROR_HANDLER_H_
-
-#include "base/compiler_specific.h"
-#include "sync/internal_api/public/util/unrecoverable_error_handler.h"
-
-namespace syncer {
-
-// Implementation of UnrecoverableErrorHandler that simply adds a
-// gtest failure.
-class TestUnrecoverableErrorHandler : public UnrecoverableErrorHandler {
- public:
- TestUnrecoverableErrorHandler();
- virtual ~TestUnrecoverableErrorHandler();
-
- virtual void OnUnrecoverableError(const tracked_objects::Location& from_here,
- const std::string& message) OVERRIDE;
-};
-
-} // namespace syncer
-
-#endif // SYNC_INTERNAL_API_INCLUDES_TEST_UNRECOVERABLE_ERROR_HANDLER_H_
-
diff --git a/chromium/sync/util/time.cc b/chromium/sync/util/time.cc
deleted file mode 100644
index 00ff1b2e4a6..00000000000
--- a/chromium/sync/util/time.cc
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/util/time.h"
-
-#include "base/i18n/time_formatting.h"
-#include "base/strings/utf_string_conversions.h"
-
-namespace syncer {
-
-int64 TimeToProtoTime(const base::Time& t) {
- return (t - base::Time::UnixEpoch()).InMilliseconds();
-}
-
-base::Time ProtoTimeToTime(int64 proto_t) {
- return base::Time::UnixEpoch() + base::TimeDelta::FromMilliseconds(proto_t);
-}
-
-std::string GetTimeDebugString(const base::Time& t) {
- return UTF16ToUTF8(base::TimeFormatFriendlyDateAndTime(t));
-}
-
-} // namespace syncer
diff --git a/chromium/sync/util/time.h b/chromium/sync/util/time.h
deleted file mode 100644
index f28a6f0b714..00000000000
--- a/chromium/sync/util/time.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Time-related sync functions.
-
-#ifndef SYNC_UTIL_TIME_H_
-#define SYNC_UTIL_TIME_H_
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/time/time.h"
-#include "sync/base/sync_export.h"
-
-namespace syncer {
-
-// Converts a time object to the format used in sync protobufs (ms
-// since the Unix epoch).
-SYNC_EXPORT int64 TimeToProtoTime(const base::Time& t);
-
-// Converts a time field from sync protobufs to a time object.
-SYNC_EXPORT_PRIVATE base::Time ProtoTimeToTime(int64 proto_t);
-
-SYNC_EXPORT std::string GetTimeDebugString(const base::Time& t);
-
-} // namespace syncer
-
-#endif // SYNC_UTIL_TIME_H_