summaryrefslogtreecommitdiffstats
path: root/chromium/content/renderer/media
diff options
context:
space:
mode:
authorJocelyn Turcotte <jocelyn.turcotte@digia.com>2014-08-08 14:30:41 +0200
committerJocelyn Turcotte <jocelyn.turcotte@digia.com>2014-08-12 13:49:54 +0200
commitab0a50979b9eb4dfa3320eff7e187e41efedf7a9 (patch)
tree498dfb8a97ff3361a9f7486863a52bb4e26bb898 /chromium/content/renderer/media
parent4ce69f7403811819800e7c5ae1318b2647e778d1 (diff)
Update Chromium to beta version 37.0.2062.68
Change-Id: I188e3b5aff1bec75566014291b654eb19f5bc8ca Reviewed-by: Andras Becsi <andras.becsi@digia.com>
Diffstat (limited to 'chromium/content/renderer/media')
-rw-r--r--chromium/content/renderer/media/OWNERS7
-rw-r--r--chromium/content/renderer/media/aec_dump_message_filter.cc191
-rw-r--r--chromium/content/renderer/media/aec_dump_message_filter.h108
-rw-r--r--chromium/content/renderer/media/android/audio_decoder_android.cc37
-rw-r--r--chromium/content/renderer/media/android/audio_decoder_android.h1
-rw-r--r--chromium/content/renderer/media/android/media_info_loader.cc3
-rw-r--r--chromium/content/renderer/media/android/media_info_loader.h7
-rw-r--r--chromium/content/renderer/media/android/media_info_loader_unittest.cc15
-rw-r--r--chromium/content/renderer/media/android/media_source_delegate.cc288
-rw-r--r--chromium/content/renderer/media/android/media_source_delegate.h72
-rw-r--r--chromium/content/renderer/media/android/proxy_media_keys.cc98
-rw-r--r--chromium/content/renderer/media/android/proxy_media_keys.h69
-rw-r--r--chromium/content/renderer/media/android/renderer_demuxer_android.cc9
-rw-r--r--chromium/content/renderer/media/android/renderer_demuxer_android.h11
-rw-r--r--chromium/content/renderer/media/android/renderer_media_player_manager.cc197
-rw-r--r--chromium/content/renderer/media/android/renderer_media_player_manager.h93
-rw-r--r--chromium/content/renderer/media/android/stream_texture_factory.h (renamed from chromium/content/renderer/media/android/stream_texture_factory_android.h)48
-rw-r--r--chromium/content/renderer/media/android/stream_texture_factory_android_impl.h48
-rw-r--r--chromium/content/renderer/media/android/stream_texture_factory_impl.cc (renamed from chromium/content/renderer/media/android/stream_texture_factory_android_impl.cc)77
-rw-r--r--chromium/content/renderer/media/android/stream_texture_factory_impl.h58
-rw-r--r--chromium/content/renderer/media/android/stream_texture_factory_synchronous_impl.cc (renamed from chromium/content/renderer/media/android/stream_texture_factory_android_synchronous_impl.cc)78
-rw-r--r--chromium/content/renderer/media/android/stream_texture_factory_synchronous_impl.h (renamed from chromium/content/renderer/media/android/stream_texture_factory_android_synchronous_impl.h)46
-rw-r--r--chromium/content/renderer/media/android/webmediaplayer_android.cc921
-rw-r--r--chromium/content/renderer/media/android/webmediaplayer_android.h204
-rw-r--r--chromium/content/renderer/media/audio_decoder.cc4
-rw-r--r--chromium/content/renderer/media/audio_decoder.h2
-rw-r--r--chromium/content/renderer/media/audio_device_factory.cc5
-rw-r--r--chromium/content/renderer/media/audio_device_factory.h9
-rw-r--r--chromium/content/renderer/media/audio_input_message_filter.cc15
-rw-r--r--chromium/content/renderer/media/audio_input_message_filter.h13
-rw-r--r--chromium/content/renderer/media/audio_message_filter.cc31
-rw-r--r--chromium/content/renderer/media/audio_message_filter.h26
-rw-r--r--chromium/content/renderer/media/audio_message_filter_unittest.cc11
-rw-r--r--chromium/content/renderer/media/audio_renderer_mixer_manager.cc10
-rw-r--r--chromium/content/renderer/media/audio_renderer_mixer_manager.h9
-rw-r--r--chromium/content/renderer/media/audio_renderer_mixer_manager_unittest.cc21
-rw-r--r--chromium/content/renderer/media/buffered_data_source.cc141
-rw-r--r--chromium/content/renderer/media/buffered_data_source.h55
-rw-r--r--chromium/content/renderer/media/buffered_data_source_host_impl.cc56
-rw-r--r--chromium/content/renderer/media/buffered_data_source_host_impl.h51
-rw-r--r--chromium/content/renderer/media/buffered_data_source_host_impl_unittest.cc75
-rw-r--r--chromium/content/renderer/media/buffered_data_source_unittest.cc203
-rw-r--r--chromium/content/renderer/media/buffered_resource_loader.cc6
-rw-r--r--chromium/content/renderer/media/buffered_resource_loader.h6
-rw-r--r--chromium/content/renderer/media/buffered_resource_loader_unittest.cc13
-rw-r--r--chromium/content/renderer/media/cdm_session_adapter.cc156
-rw-r--r--chromium/content/renderer/media/cdm_session_adapter.h136
-rw-r--r--chromium/content/renderer/media/crypto/content_decryption_module_factory.cc153
-rw-r--r--chromium/content/renderer/media/crypto/content_decryption_module_factory.h37
-rw-r--r--chromium/content/renderer/media/crypto/key_systems.cc438
-rw-r--r--chromium/content/renderer/media/crypto/key_systems.h30
-rw-r--r--chromium/content/renderer/media/crypto/key_systems_unittest.cc230
-rw-r--r--chromium/content/renderer/media/crypto/pepper_cdm_wrapper.h44
-rw-r--r--chromium/content/renderer/media/crypto/pepper_cdm_wrapper_impl.cc79
-rw-r--r--chromium/content/renderer/media/crypto/pepper_cdm_wrapper_impl.h69
-rw-r--r--chromium/content/renderer/media/crypto/ppapi_decryptor.cc352
-rw-r--r--chromium/content/renderer/media/crypto/ppapi_decryptor.h84
-rw-r--r--chromium/content/renderer/media/crypto/proxy_decryptor.cc310
-rw-r--r--chromium/content/renderer/media/crypto/proxy_decryptor.h121
-rw-r--r--chromium/content/renderer/media/crypto/proxy_media_keys.cc273
-rw-r--r--chromium/content/renderer/media/crypto/proxy_media_keys.h127
-rw-r--r--chromium/content/renderer/media/crypto/renderer_cdm_manager.cc147
-rw-r--r--chromium/content/renderer/media/crypto/renderer_cdm_manager.h90
-rw-r--r--chromium/content/renderer/media/media_stream.cc81
-rw-r--r--chromium/content/renderer/media/media_stream.h96
-rw-r--r--chromium/content/renderer/media/media_stream_audio_level_calculator.cc65
-rw-r--r--chromium/content/renderer/media/media_stream_audio_level_calculator.h41
-rw-r--r--chromium/content/renderer/media/media_stream_audio_processor.cc427
-rw-r--r--chromium/content/renderer/media/media_stream_audio_processor.h132
-rw-r--r--chromium/content/renderer/media/media_stream_audio_processor_options.cc310
-rw-r--r--chromium/content/renderer/media/media_stream_audio_processor_options.h91
-rw-r--r--chromium/content/renderer/media/media_stream_audio_processor_unittest.cc394
-rw-r--r--chromium/content/renderer/media/media_stream_audio_source.cc56
-rw-r--r--chromium/content/renderer/media/media_stream_audio_source.h66
-rw-r--r--chromium/content/renderer/media/media_stream_center.cc165
-rw-r--r--chromium/content/renderer/media/media_stream_center.h12
-rw-r--r--chromium/content/renderer/media/media_stream_client.h39
-rw-r--r--chromium/content/renderer/media/media_stream_constraints_util.cc132
-rw-r--r--chromium/content/renderer/media/media_stream_constraints_util.h87
-rw-r--r--chromium/content/renderer/media/media_stream_constraints_util_unittest.cc103
-rw-r--r--chromium/content/renderer/media/media_stream_dependency_factory.cc1000
-rw-r--r--chromium/content/renderer/media/media_stream_dependency_factory.h261
-rw-r--r--chromium/content/renderer/media/media_stream_dependency_factory_unittest.cc217
-rw-r--r--chromium/content/renderer/media/media_stream_dispatcher.cc27
-rw-r--r--chromium/content/renderer/media/media_stream_dispatcher.h13
-rw-r--r--chromium/content/renderer/media/media_stream_dispatcher_eventhandler.h4
-rw-r--r--chromium/content/renderer/media/media_stream_dispatcher_unittest.cc14
-rw-r--r--chromium/content/renderer/media/media_stream_extra_data.h47
-rw-r--r--chromium/content/renderer/media/media_stream_impl.cc946
-rw-r--r--chromium/content/renderer/media/media_stream_impl.h185
-rw-r--r--chromium/content/renderer/media/media_stream_impl_unittest.cc323
-rw-r--r--chromium/content/renderer/media/media_stream_renderer_factory.cc197
-rw-r--r--chromium/content/renderer/media/media_stream_renderer_factory.h42
-rw-r--r--chromium/content/renderer/media/media_stream_source.cc26
-rw-r--r--chromium/content/renderer/media/media_stream_source.h79
-rw-r--r--chromium/content/renderer/media/media_stream_source_extra_data.h92
-rw-r--r--chromium/content/renderer/media/media_stream_source_observer.cc63
-rw-r--r--chromium/content/renderer/media/media_stream_source_observer.h42
-rw-r--r--chromium/content/renderer/media/media_stream_track.cc47
-rw-r--r--chromium/content/renderer/media/media_stream_track.h (renamed from chromium/content/renderer/media/media_stream_track_extra_data.h)35
-rw-r--r--chromium/content/renderer/media/media_stream_track_extra_data.cc20
-rw-r--r--chromium/content/renderer/media/media_stream_video_capture_source_unittest.cc200
-rw-r--r--chromium/content/renderer/media/media_stream_video_capturer_source.cc247
-rw-r--r--chromium/content/renderer/media/media_stream_video_capturer_source.h123
-rw-r--r--chromium/content/renderer/media/media_stream_video_source.cc578
-rw-r--r--chromium/content/renderer/media/media_stream_video_source.h180
-rw-r--r--chromium/content/renderer/media/media_stream_video_source_unittest.cc694
-rw-r--r--chromium/content/renderer/media/media_stream_video_track.cc172
-rw-r--r--chromium/content/renderer/media/media_stream_video_track.h89
-rw-r--r--chromium/content/renderer/media/media_stream_video_track_unittest.cc232
-rw-r--r--chromium/content/renderer/media/midi_dispatcher.cc34
-rw-r--r--chromium/content/renderer/media/midi_dispatcher.h33
-rw-r--r--chromium/content/renderer/media/midi_message_filter.cc129
-rw-r--r--chromium/content/renderer/media/midi_message_filter.h38
-rw-r--r--chromium/content/renderer/media/mock_media_constraint_factory.cc101
-rw-r--r--chromium/content/renderer/media/mock_media_constraint_factory.h38
-rw-r--r--chromium/content/renderer/media/mock_media_stream_dispatcher.cc90
-rw-r--r--chromium/content/renderer/media/mock_media_stream_dispatcher.h30
-rw-r--r--chromium/content/renderer/media/mock_media_stream_registry.cc42
-rw-r--r--chromium/content/renderer/media/mock_media_stream_registry.h12
-rw-r--r--chromium/content/renderer/media/mock_media_stream_video_sink.cc49
-rw-r--r--chromium/content/renderer/media/mock_media_stream_video_sink.h55
-rw-r--r--chromium/content/renderer/media/mock_media_stream_video_source.cc91
-rw-r--r--chromium/content/renderer/media/mock_media_stream_video_source.h73
-rw-r--r--chromium/content/renderer/media/mock_peer_connection_impl.cc29
-rw-r--r--chromium/content/renderer/media/mock_peer_connection_impl.h16
-rw-r--r--chromium/content/renderer/media/mock_web_rtc_peer_connection_handler_client.cc4
-rw-r--r--chromium/content/renderer/media/mock_web_rtc_peer_connection_handler_client.h1
-rw-r--r--chromium/content/renderer/media/peer_connection_handler_base.cc46
-rw-r--r--chromium/content/renderer/media/peer_connection_handler_base.h61
-rw-r--r--chromium/content/renderer/media/peer_connection_tracker.cc36
-rw-r--r--chromium/content/renderer/media/peer_connection_tracker.h5
-rw-r--r--chromium/content/renderer/media/pepper_platform_video_decoder.cc127
-rw-r--r--chromium/content/renderer/media/pepper_platform_video_decoder.h66
-rw-r--r--chromium/content/renderer/media/remote_media_stream_impl.cc163
-rw-r--r--chromium/content/renderer/media/remote_media_stream_impl.h6
-rw-r--r--chromium/content/renderer/media/render_media_log.cc43
-rw-r--r--chromium/content/renderer/media/render_media_log.h18
-rw-r--r--chromium/content/renderer/media/render_media_log_unittest.cc104
-rw-r--r--chromium/content/renderer/media/renderer_gpu_video_accelerator_factories.cc263
-rw-r--r--chromium/content/renderer/media/renderer_gpu_video_accelerator_factories.h86
-rw-r--r--chromium/content/renderer/media/renderer_webaudiodevice_impl.cc34
-rw-r--r--chromium/content/renderer/media/renderer_webaudiodevice_impl.h4
-rw-r--r--chromium/content/renderer/media/renderer_webmidiaccessor_impl.cc4
-rw-r--r--chromium/content/renderer/media/renderer_webmidiaccessor_impl.h4
-rw-r--r--chromium/content/renderer/media/rtc_data_channel_handler.cc71
-rw-r--r--chromium/content/renderer/media/rtc_data_channel_handler.h2
-rw-r--r--chromium/content/renderer/media/rtc_dtmf_sender_handler.cc6
-rw-r--r--chromium/content/renderer/media/rtc_media_constraints.cc29
-rw-r--r--chromium/content/renderer/media/rtc_media_constraints.h8
-rw-r--r--chromium/content/renderer/media/rtc_peer_connection_handler.cc229
-rw-r--r--chromium/content/renderer/media/rtc_peer_connection_handler.h45
-rw-r--r--chromium/content/renderer/media/rtc_peer_connection_handler_unittest.cc158
-rw-r--r--chromium/content/renderer/media/rtc_video_capture_delegate.cc124
-rw-r--r--chromium/content/renderer/media/rtc_video_capture_delegate.h89
-rw-r--r--chromium/content/renderer/media/rtc_video_capturer.cc159
-rw-r--r--chromium/content/renderer/media/rtc_video_capturer.h59
-rw-r--r--chromium/content/renderer/media/rtc_video_decoder.cc270
-rw-r--r--chromium/content/renderer/media/rtc_video_decoder.h47
-rw-r--r--chromium/content/renderer/media/rtc_video_decoder_bridge_tv.cc113
-rw-r--r--chromium/content/renderer/media/rtc_video_decoder_bridge_tv.h55
-rw-r--r--chromium/content/renderer/media/rtc_video_decoder_factory.cc12
-rw-r--r--chromium/content/renderer/media/rtc_video_decoder_factory.h11
-rw-r--r--chromium/content/renderer/media/rtc_video_decoder_factory_tv.cc246
-rw-r--r--chromium/content/renderer/media/rtc_video_decoder_factory_tv.h80
-rw-r--r--chromium/content/renderer/media/rtc_video_decoder_factory_tv_unittest.cc348
-rw-r--r--chromium/content/renderer/media/rtc_video_decoder_unittest.cc52
-rw-r--r--chromium/content/renderer/media/rtc_video_encoder.cc128
-rw-r--r--chromium/content/renderer/media/rtc_video_encoder.h18
-rw-r--r--chromium/content/renderer/media/rtc_video_encoder_factory.cc9
-rw-r--r--chromium/content/renderer/media/rtc_video_encoder_factory.h12
-rw-r--r--chromium/content/renderer/media/rtc_video_renderer.cc52
-rw-r--r--chromium/content/renderer/media/rtc_video_renderer.h13
-rw-r--r--chromium/content/renderer/media/texttrack_impl.cc2
-rw-r--r--chromium/content/renderer/media/video_capture_impl.cc423
-rw-r--r--chromium/content/renderer/media/video_capture_impl.h189
-rw-r--r--chromium/content/renderer/media/video_capture_impl_manager.cc210
-rw-r--r--chromium/content/renderer/media/video_capture_impl_manager.h125
-rw-r--r--chromium/content/renderer/media/video_capture_impl_manager_unittest.cc177
-rw-r--r--chromium/content/renderer/media/video_capture_impl_unittest.cc338
-rw-r--r--chromium/content/renderer/media/video_capture_message_filter.cc88
-rw-r--r--chromium/content/renderer/media/video_capture_message_filter.h57
-rw-r--r--chromium/content/renderer/media/video_capture_message_filter_unittest.cc93
-rw-r--r--chromium/content/renderer/media/video_destination_handler.cc210
-rw-r--r--chromium/content/renderer/media/video_destination_handler_unittest.cc127
-rw-r--r--chromium/content/renderer/media/video_frame_compositor.cc77
-rw-r--r--chromium/content/renderer/media/video_frame_compositor.h71
-rw-r--r--chromium/content/renderer/media/video_frame_compositor_unittest.cc162
-rw-r--r--chromium/content/renderer/media/video_frame_deliverer.cc84
-rw-r--r--chromium/content/renderer/media/video_frame_deliverer.h82
-rw-r--r--chromium/content/renderer/media/video_source_handler.cc168
-rw-r--r--chromium/content/renderer/media/video_source_handler.h55
-rw-r--r--chromium/content/renderer/media/video_source_handler_unittest.cc73
-rw-r--r--chromium/content/renderer/media/video_track_adapter.cc340
-rw-r--r--chromium/content/renderer/media/video_track_adapter.h90
-rw-r--r--chromium/content/renderer/media/webaudio_capturer_source.cc16
-rw-r--r--chromium/content/renderer/media/webaudio_capturer_source.h3
-rw-r--r--chromium/content/renderer/media/webaudiosourceprovider_impl.cc18
-rw-r--r--chromium/content/renderer/media/webaudiosourceprovider_impl.h5
-rw-r--r--chromium/content/renderer/media/webaudiosourceprovider_impl_unittest.cc16
-rw-r--r--chromium/content/renderer/media/webcontentdecryptionmodule_impl.cc230
-rw-r--r--chromium/content/renderer/media/webcontentdecryptionmodule_impl.h49
-rw-r--r--chromium/content/renderer/media/webcontentdecryptionmodulesession_impl.cc127
-rw-r--r--chromium/content/renderer/media/webcontentdecryptionmodulesession_impl.h56
-rw-r--r--chromium/content/renderer/media/webmediaplayer_impl.cc858
-rw-r--r--chromium/content/renderer/media/webmediaplayer_impl.h182
-rw-r--r--chromium/content/renderer/media/webmediaplayer_ms.cc102
-rw-r--r--chromium/content/renderer/media/webmediaplayer_ms.h67
-rw-r--r--chromium/content/renderer/media/webmediaplayer_params.cc16
-rw-r--r--chromium/content/renderer/media/webmediaplayer_params.h30
-rw-r--r--chromium/content/renderer/media/webmediaplayer_util.cc43
-rw-r--r--chromium/content/renderer/media/webmediaplayer_util.h4
-rw-r--r--chromium/content/renderer/media/webmediasource_impl.cc3
-rw-r--r--chromium/content/renderer/media/webmediasource_impl.h10
-rw-r--r--chromium/content/renderer/media/webrtc/DEPS5
-rw-r--r--chromium/content/renderer/media/webrtc/OWNERS3
-rw-r--r--chromium/content/renderer/media/webrtc/media_stream_remote_video_source.cc193
-rw-r--r--chromium/content/renderer/media/webrtc/media_stream_remote_video_source.h65
-rw-r--r--chromium/content/renderer/media/webrtc/media_stream_remote_video_source_unittest.cc141
-rw-r--r--chromium/content/renderer/media/webrtc/media_stream_track_metrics.cc332
-rw-r--r--chromium/content/renderer/media/webrtc/media_stream_track_metrics.h101
-rw-r--r--chromium/content/renderer/media/webrtc/media_stream_track_metrics_unittest.cc543
-rw-r--r--chromium/content/renderer/media/webrtc/mock_peer_connection_dependency_factory.cc (renamed from chromium/content/renderer/media/mock_media_stream_dependency_factory.cc)359
-rw-r--r--chromium/content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h (renamed from chromium/content/renderer/media/mock_media_stream_dependency_factory.h)118
-rw-r--r--chromium/content/renderer/media/webrtc/peer_connection_dependency_factory.cc657
-rw-r--r--chromium/content/renderer/media/webrtc/peer_connection_dependency_factory.h218
-rw-r--r--chromium/content/renderer/media/webrtc/peer_connection_dependency_factory_unittest.cc29
-rw-r--r--chromium/content/renderer/media/webrtc/video_destination_handler.cc235
-rw-r--r--chromium/content/renderer/media/webrtc/video_destination_handler.h (renamed from chromium/content/renderer/media/video_destination_handler.h)56
-rw-r--r--chromium/content/renderer/media/webrtc/video_destination_handler_unittest.cc109
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_audio_sink_adapter.cc40
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_audio_sink_adapter.h45
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter.cc158
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h108
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter_unittest.cc99
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter.cc111
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter.h67
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter_unittest.cc155
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_video_capturer_adapter.cc193
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_video_capturer_adapter.h71
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_video_capturer_adapter_unittest.cc71
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_video_sink_adapter.cc119
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_video_sink_adapter.h60
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_video_track_adapter.cc185
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_video_track_adapter.h59
-rw-r--r--chromium/content/renderer/media/webrtc_audio_capturer.cc384
-rw-r--r--chromium/content/renderer/media/webrtc_audio_capturer.h150
-rw-r--r--chromium/content/renderer/media/webrtc_audio_capturer_unittest.cc229
-rw-r--r--chromium/content/renderer/media/webrtc_audio_device_impl.cc226
-rw-r--r--chromium/content/renderer/media/webrtc_audio_device_impl.h126
-rw-r--r--chromium/content/renderer/media/webrtc_audio_device_not_impl.cc8
-rw-r--r--chromium/content/renderer/media/webrtc_audio_device_not_impl.h2
-rw-r--r--chromium/content/renderer/media/webrtc_audio_device_unittest.cc979
-rw-r--r--chromium/content/renderer/media/webrtc_audio_renderer.cc271
-rw-r--r--chromium/content/renderer/media/webrtc_audio_renderer.h133
-rw-r--r--chromium/content/renderer/media/webrtc_audio_renderer_unittest.cc154
-rw-r--r--chromium/content/renderer/media/webrtc_local_audio_renderer.cc36
-rw-r--r--chromium/content/renderer/media/webrtc_local_audio_renderer.h4
-rw-r--r--chromium/content/renderer/media/webrtc_local_audio_source_provider.cc25
-rw-r--r--chromium/content/renderer/media/webrtc_local_audio_source_provider.h21
-rw-r--r--chromium/content/renderer/media/webrtc_local_audio_source_provider_unittest.cc46
-rw-r--r--chromium/content/renderer/media/webrtc_local_audio_track.cc275
-rw-r--r--chromium/content/renderer/media/webrtc_local_audio_track.h104
-rw-r--r--chromium/content/renderer/media/webrtc_local_audio_track_unittest.cc427
-rw-r--r--chromium/content/renderer/media/webrtc_logging.cc4
-rw-r--r--chromium/content/renderer/media/webrtc_uma_histograms.cc60
-rw-r--r--chromium/content/renderer/media/webrtc_uma_histograms.h65
-rw-r--r--chromium/content/renderer/media/webrtc_uma_histograms_unittest.cc79
-rw-r--r--chromium/content/renderer/media/websourcebuffer_impl.cc74
-rw-r--r--chromium/content/renderer/media/websourcebuffer_impl.h25
270 files changed, 20888 insertions, 11996 deletions
diff --git a/chromium/content/renderer/media/OWNERS b/chromium/content/renderer/media/OWNERS
index d132d0e6061..b1afdbbadb8 100644
--- a/chromium/content/renderer/media/OWNERS
+++ b/chromium/content/renderer/media/OWNERS
@@ -1,7 +1,7 @@
acolwell@chromium.org
dalecurtis@chromium.org
ddorwin@chromium.org
-fischman@chromium.org
+perkj@chromium.org
scherkus@chromium.org
shadi@chromium.org
tommi@chromium.org
@@ -9,3 +9,8 @@ vrk@chromium.org
wjia@chromium.org
xhwang@chromium.org
xians@chromium.org
+
+per-file cast_*=hclam@chromium.org
+per-file cast_*=hubbe@chromium.org
+per-file cast_*=mikhal@chromium.org
+per-file cast_*=pwestin@google.com
diff --git a/chromium/content/renderer/media/aec_dump_message_filter.cc b/chromium/content/renderer/media/aec_dump_message_filter.cc
new file mode 100644
index 00000000000..ed6aa4a8ee5
--- /dev/null
+++ b/chromium/content/renderer/media/aec_dump_message_filter.cc
@@ -0,0 +1,191 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/aec_dump_message_filter.h"
+
+#include "base/message_loop/message_loop_proxy.h"
+#include "content/common/media/aec_dump_messages.h"
+#include "content/renderer/media/webrtc_logging.h"
+#include "ipc/ipc_logging.h"
+#include "ipc/ipc_sender.h"
+
+namespace {
+const int kInvalidDelegateId = -1;
+}
+
+namespace content {
+
+AecDumpMessageFilter* AecDumpMessageFilter::g_filter = NULL;
+
+AecDumpMessageFilter::AecDumpMessageFilter(
+ const scoped_refptr<base::MessageLoopProxy>& io_message_loop,
+ const scoped_refptr<base::MessageLoopProxy>& main_message_loop)
+ : sender_(NULL),
+ delegate_id_counter_(0),
+ io_message_loop_(io_message_loop),
+ main_message_loop_(main_message_loop) {
+ DCHECK(!g_filter);
+ g_filter = this;
+}
+
+AecDumpMessageFilter::~AecDumpMessageFilter() {
+ DCHECK_EQ(g_filter, this);
+ g_filter = NULL;
+}
+
+// static
+scoped_refptr<AecDumpMessageFilter> AecDumpMessageFilter::Get() {
+ return g_filter;
+}
+
+void AecDumpMessageFilter::AddDelegate(
+ AecDumpMessageFilter::AecDumpDelegate* delegate) {
+ DCHECK(main_message_loop_->BelongsToCurrentThread());
+ DCHECK(delegate);
+ DCHECK_EQ(kInvalidDelegateId, GetIdForDelegate(delegate));
+
+ int id = delegate_id_counter_++;
+ delegates_[id] = delegate;
+
+ io_message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &AecDumpMessageFilter::RegisterAecDumpConsumer,
+ this,
+ id));
+}
+
+void AecDumpMessageFilter::RemoveDelegate(
+ AecDumpMessageFilter::AecDumpDelegate* delegate) {
+ DCHECK(main_message_loop_->BelongsToCurrentThread());
+ DCHECK(delegate);
+
+ int id = GetIdForDelegate(delegate);
+ DCHECK_NE(kInvalidDelegateId, id);
+ delegates_.erase(id);
+
+ io_message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &AecDumpMessageFilter::UnregisterAecDumpConsumer,
+ this,
+ id));
+}
+
+void AecDumpMessageFilter::Send(IPC::Message* message) {
+ DCHECK(io_message_loop_->BelongsToCurrentThread());
+ if (sender_)
+ sender_->Send(message);
+ else
+ delete message;
+}
+
+void AecDumpMessageFilter::RegisterAecDumpConsumer(int id) {
+ Send(new AecDumpMsg_RegisterAecDumpConsumer(id));
+}
+
+void AecDumpMessageFilter::UnregisterAecDumpConsumer(int id) {
+ Send(new AecDumpMsg_UnregisterAecDumpConsumer(id));
+}
+
+bool AecDumpMessageFilter::OnMessageReceived(const IPC::Message& message) {
+ DCHECK(io_message_loop_->BelongsToCurrentThread());
+ bool handled = true;
+ IPC_BEGIN_MESSAGE_MAP(AecDumpMessageFilter, message)
+ IPC_MESSAGE_HANDLER(AecDumpMsg_EnableAecDump, OnEnableAecDump)
+ IPC_MESSAGE_HANDLER(AecDumpMsg_DisableAecDump, OnDisableAecDump)
+ IPC_MESSAGE_UNHANDLED(handled = false)
+ IPC_END_MESSAGE_MAP()
+ return handled;
+}
+
+void AecDumpMessageFilter::OnFilterAdded(IPC::Sender* sender) {
+ DCHECK(io_message_loop_->BelongsToCurrentThread());
+ sender_ = sender;
+}
+
+void AecDumpMessageFilter::OnFilterRemoved() {
+ DCHECK(io_message_loop_->BelongsToCurrentThread());
+
+ // Once removed, a filter will not be used again. At this time the
+ // observer must be notified so it releases its reference.
+ OnChannelClosing();
+}
+
+void AecDumpMessageFilter::OnChannelClosing() {
+ DCHECK(io_message_loop_->BelongsToCurrentThread());
+ sender_ = NULL;
+ main_message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &AecDumpMessageFilter::DoChannelClosingOnDelegates,
+ this));
+}
+
+void AecDumpMessageFilter::OnEnableAecDump(
+ int id,
+ IPC::PlatformFileForTransit file_handle) {
+ DCHECK(io_message_loop_->BelongsToCurrentThread());
+ main_message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &AecDumpMessageFilter::DoEnableAecDump,
+ this,
+ id,
+ file_handle));
+}
+
+void AecDumpMessageFilter::OnDisableAecDump() {
+ DCHECK(io_message_loop_->BelongsToCurrentThread());
+ main_message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &AecDumpMessageFilter::DoDisableAecDump,
+ this));
+}
+
+void AecDumpMessageFilter::DoEnableAecDump(
+ int id,
+ IPC::PlatformFileForTransit file_handle) {
+ DCHECK(main_message_loop_->BelongsToCurrentThread());
+ DelegateMap::iterator it = delegates_.find(id);
+ if (it != delegates_.end()) {
+ it->second->OnAecDumpFile(file_handle);
+ } else {
+ // Delegate has been removed, we must close the file.
+ base::File file = IPC::PlatformFileForTransitToFile(file_handle);
+ DCHECK(file.IsValid());
+ file.Close();
+ }
+}
+
+void AecDumpMessageFilter::DoDisableAecDump() {
+ DCHECK(main_message_loop_->BelongsToCurrentThread());
+ for (DelegateMap::iterator it = delegates_.begin();
+ it != delegates_.end(); ++it) {
+ it->second->OnDisableAecDump();
+ }
+}
+
+void AecDumpMessageFilter::DoChannelClosingOnDelegates() {
+ DCHECK(main_message_loop_->BelongsToCurrentThread());
+ for (DelegateMap::iterator it = delegates_.begin();
+ it != delegates_.end(); ++it) {
+ it->second->OnIpcClosing();
+ }
+ delegates_.clear();
+}
+
+int AecDumpMessageFilter::GetIdForDelegate(
+ AecDumpMessageFilter::AecDumpDelegate* delegate) {
+ DCHECK(main_message_loop_->BelongsToCurrentThread());
+ for (DelegateMap::iterator it = delegates_.begin();
+ it != delegates_.end(); ++it) {
+ if (it->second == delegate)
+ return it->first;
+ }
+ return kInvalidDelegateId;
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/aec_dump_message_filter.h b/chromium/content/renderer/media/aec_dump_message_filter.h
new file mode 100644
index 00000000000..7268132d2ef
--- /dev/null
+++ b/chromium/content/renderer/media/aec_dump_message_filter.h
@@ -0,0 +1,108 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_AEC_DUMP_MESSAGE_FILTER_H_
+#define CONTENT_RENDERER_MEDIA_AEC_DUMP_MESSAGE_FILTER_H_
+
+#include "base/gtest_prod_util.h"
+#include "base/memory/scoped_ptr.h"
+#include "content/common/content_export.h"
+#include "content/renderer/render_thread_impl.h"
+#include "ipc/ipc_platform_file.h"
+#include "ipc/message_filter.h"
+
+namespace base {
+class MessageLoopProxy;
+}
+
+namespace content {
+
+// MessageFilter that handles AEC dump messages and forwards them to an
+// observer.
+class CONTENT_EXPORT AecDumpMessageFilter : public IPC::MessageFilter {
+ public:
+ class AecDumpDelegate {
+ public:
+ virtual void OnAecDumpFile(
+ const IPC::PlatformFileForTransit& file_handle) = 0;
+ virtual void OnDisableAecDump() = 0;
+ virtual void OnIpcClosing() = 0;
+ };
+
+ AecDumpMessageFilter(
+ const scoped_refptr<base::MessageLoopProxy>& io_message_loop,
+ const scoped_refptr<base::MessageLoopProxy>& main_message_loop);
+
+ // Getter for the one AecDumpMessageFilter object.
+ static scoped_refptr<AecDumpMessageFilter> Get();
+
+ // Adds a delegate that receives the enable and disable notifications.
+ void AddDelegate(AecDumpMessageFilter::AecDumpDelegate* delegate);
+
+ // Removes a delegate.
+ void RemoveDelegate(AecDumpMessageFilter::AecDumpDelegate* delegate);
+
+ // IO message loop associated with this message filter.
+ scoped_refptr<base::MessageLoopProxy> io_message_loop() const {
+ return io_message_loop_;
+ }
+
+ protected:
+ virtual ~AecDumpMessageFilter();
+
+ private:
+ // Sends an IPC message using |sender_|.
+ void Send(IPC::Message* message);
+
+ // Registers a consumer of AEC dump in the browser process. This consumer will
+ // get a file handle when the AEC dump is enabled and a notification when it
+ // is disabled.
+ void RegisterAecDumpConsumer(int id);
+
+ // Unregisters a consumer of AEC dump in the browser process.
+ void UnregisterAecDumpConsumer(int id);
+
+ // IPC::MessageFilter override. Called on |io_message_loop|.
+ virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE;
+ virtual void OnFilterAdded(IPC::Sender* sender) OVERRIDE;
+ virtual void OnFilterRemoved() OVERRIDE;
+ virtual void OnChannelClosing() OVERRIDE;
+
+ // Accessed on |io_message_loop|.
+ void OnEnableAecDump(int id, IPC::PlatformFileForTransit file_handle);
+ void OnDisableAecDump();
+
+ // Accessed on |main_message_loop_|.
+ void DoEnableAecDump(int id, IPC::PlatformFileForTransit file_handle);
+ void DoDisableAecDump();
+ void DoChannelClosingOnDelegates();
+ int GetIdForDelegate(AecDumpMessageFilter::AecDumpDelegate* delegate);
+
+ // Accessed on |io_message_loop_|.
+ IPC::Sender* sender_;
+
+ // The delgates for this filter. Must only be accessed on
+ // |main_message_loop_|.
+ typedef std::map<int, AecDumpMessageFilter::AecDumpDelegate*> DelegateMap;
+ DelegateMap delegates_;
+
+ // Counter for generating unique IDs to delegates. Accessed on
+ // |main_message_loop_|.
+ int delegate_id_counter_;
+
+ // Message loop on which IPC calls are driven.
+ const scoped_refptr<base::MessageLoopProxy> io_message_loop_;
+
+ // Main message loop.
+ const scoped_refptr<base::MessageLoopProxy> main_message_loop_;
+
+ // The singleton instance for this filter.
+ static AecDumpMessageFilter* g_filter;
+
+ DISALLOW_COPY_AND_ASSIGN(AecDumpMessageFilter);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_AEC_DUMP_MESSAGE_FILTER_H_
diff --git a/chromium/content/renderer/media/android/audio_decoder_android.cc b/chromium/content/renderer/media/android/audio_decoder_android.cc
index b589d67412c..72e25ab2bcc 100644
--- a/chromium/content/renderer/media/android/audio_decoder_android.cc
+++ b/chromium/content/renderer/media/android/audio_decoder_android.cc
@@ -165,6 +165,9 @@ class WAVEDecoder {
// The number of bytes in the data portion of the chunk.
size_t chunk_size_;
+ // The total number of bytes in the encoded data.
+ size_t data_size_;
+
// The current position within the WAVE file.
const uint8_t* buffer_;
@@ -183,7 +186,8 @@ class WAVEDecoder {
};
WAVEDecoder::WAVEDecoder(const uint8_t* encoded_data, size_t data_size)
- : buffer_(encoded_data),
+ : data_size_(data_size),
+ buffer_(encoded_data),
buffer_end_(encoded_data + 1),
bytes_per_sample_(0),
number_of_channels_(0),
@@ -247,6 +251,10 @@ bool WAVEDecoder::ReadChunkHeader() {
if (chunk_size_ % 2)
++chunk_size_;
+ // Check for completely bogus chunk size.
+ if (chunk_size_ > data_size_)
+ return false;
+
return true;
}
@@ -377,8 +385,8 @@ bool WAVEDecoder::DecodeWAVEFile(blink::WebAudioBus* destination_bus) {
return CopyDataChunkToBus(destination_bus);
} else {
// Ignore these chunks that we don't know about.
- VLOG(0) << "Ignoring WAVE chunk `" << chunk_id_ << "' size "
- << chunk_size_;
+ DVLOG(0) << "Ignoring WAVE chunk `" << chunk_id_ << "' size "
+ << chunk_size_;
}
// Advance to next chunk.
@@ -405,21 +413,30 @@ static void CopyPcmDataToBus(int input_fd,
int16_t pipe_data[PIPE_BUF / sizeof(int16_t)];
size_t decoded_frames = 0;
+ size_t current_sample_in_frame = 0;
ssize_t nread;
while ((nread = HANDLE_EINTR(read(input_fd, pipe_data, sizeof(pipe_data)))) >
0) {
size_t samples_in_pipe = nread / sizeof(int16_t);
- for (size_t m = 0; m < samples_in_pipe; m += number_of_channels) {
+
+ // The pipe may not contain a whole number of frames. This is
+ // especially true if the number of channels is greater than
+ // 2. Thus, keep track of which sample in a frame is being
+ // processed, so we handle the boundary at the end of the pipe
+ // correctly.
+ for (size_t m = 0; m < samples_in_pipe; ++m) {
if (decoded_frames >= number_of_frames)
break;
- for (size_t k = 0; k < number_of_channels; ++k) {
- int16_t sample = pipe_data[m + k];
- destination_bus->channelData(k)[decoded_frames] =
- ConvertSampleToFloat(sample);
+ destination_bus->channelData(current_sample_in_frame)[decoded_frames] =
+ ConvertSampleToFloat(pipe_data[m]);
+ ++current_sample_in_frame;
+
+ if (current_sample_in_frame >= number_of_channels) {
+ current_sample_in_frame = 0;
+ ++decoded_frames;
}
- ++decoded_frames;
}
}
@@ -494,7 +511,7 @@ static bool TryWAVEFileDecoder(blink::WebAudioBus* destination_bus,
// of a pipe. The MediaCodec class will decode the data from the
// shared memory and write the PCM samples back to us over a pipe.
bool DecodeAudioFileData(blink::WebAudioBus* destination_bus, const char* data,
- size_t data_size, double sample_rate,
+ size_t data_size,
scoped_refptr<ThreadSafeSender> sender) {
// Try to decode the data as a WAVE file first. If it can't be
// decoded, use MediaCodec. See crbug.com/259048.
diff --git a/chromium/content/renderer/media/android/audio_decoder_android.h b/chromium/content/renderer/media/android/audio_decoder_android.h
index d2cac1575df..983e8b42850 100644
--- a/chromium/content/renderer/media/android/audio_decoder_android.h
+++ b/chromium/content/renderer/media/android/audio_decoder_android.h
@@ -16,7 +16,6 @@ namespace content {
bool DecodeAudioFileData(blink::WebAudioBus* destination_bus,
const char* data,
size_t data_size,
- double sample_rate,
scoped_refptr<ThreadSafeSender> sender);
} // namespace content
diff --git a/chromium/content/renderer/media/android/media_info_loader.cc b/chromium/content/renderer/media/android/media_info_loader.cc
index 875265c41c5..8a49e04087f 100644
--- a/chromium/content/renderer/media/android/media_info_loader.cc
+++ b/chromium/content/renderer/media/android/media_info_loader.cc
@@ -147,7 +147,8 @@ void MediaInfoLoader::didReceiveCachedMetadata(
void MediaInfoLoader::didFinishLoading(
WebURLLoader* loader,
- double finishTime) {
+ double finishTime,
+ int64_t total_encoded_data_length) {
DCHECK(active_loader_.get());
DidBecomeReady(kOk);
}
diff --git a/chromium/content/renderer/media/android/media_info_loader.h b/chromium/content/renderer/media/android/media_info_loader.h
index 8e67c80c0c9..d87b67c30ed 100644
--- a/chromium/content/renderer/media/android/media_info_loader.h
+++ b/chromium/content/renderer/media/android/media_info_loader.h
@@ -66,6 +66,10 @@ class CONTENT_EXPORT MediaInfoLoader : private blink::WebURLLoaderClient {
// Only valid to call after the loader becomes ready.
bool DidPassCORSAccessCheck() const;
+ void set_single_origin(bool single_origin) {
+ single_origin_ = single_origin;
+ }
+
private:
friend class MediaInfoLoaderTest;
@@ -95,7 +99,8 @@ class CONTENT_EXPORT MediaInfoLoader : private blink::WebURLLoaderClient {
const char* data, int dataLength);
virtual void didFinishLoading(
blink::WebURLLoader* loader,
- double finishTime);
+ double finishTime,
+ int64_t total_encoded_data_length);
virtual void didFail(
blink::WebURLLoader* loader,
const blink::WebURLError&);
diff --git a/chromium/content/renderer/media/android/media_info_loader_unittest.cc b/chromium/content/renderer/media/android/media_info_loader_unittest.cc
index 65bfba8e77c..ffde7927e1d 100644
--- a/chromium/content/renderer/media/android/media_info_loader_unittest.cc
+++ b/chromium/content/renderer/media/android/media_info_loader_unittest.cc
@@ -11,12 +11,14 @@
#include "third_party/WebKit/public/platform/WebURLError.h"
#include "third_party/WebKit/public/platform/WebURLRequest.h"
#include "third_party/WebKit/public/platform/WebURLResponse.h"
+#include "third_party/WebKit/public/web/WebLocalFrame.h"
#include "third_party/WebKit/public/web/WebView.h"
using ::testing::_;
using ::testing::InSequence;
using ::testing::NiceMock;
+using blink::WebLocalFrame;
using blink::WebString;
using blink::WebURLError;
using blink::WebURLResponse;
@@ -28,6 +30,7 @@ static const char* kHttpUrl = "http://test";
static const char kHttpRedirectToSameDomainUrl1[] = "http://test/ing";
static const char kHttpRedirectToSameDomainUrl2[] = "http://test/ing2";
static const char kHttpRedirectToDifferentDomainUrl1[] = "http://test2";
+static const char kHttpDataUrl[] = "data:audio/wav;base64,UklGRhwMAABXQVZFZm10";
static const int kHttpOK = 200;
static const int kHttpNotFound = 404;
@@ -35,12 +38,13 @@ static const int kHttpNotFound = 404;
class MediaInfoLoaderTest : public testing::Test {
public:
MediaInfoLoaderTest()
- : view_(WebView::create(NULL)) {
- view_->initializeMainFrame(&client_);
+ : view_(WebView::create(NULL)), frame_(WebLocalFrame::create(&client_)) {
+ view_->setMainFrame(frame_);
}
virtual ~MediaInfoLoaderTest() {
view_->close();
+ frame_->close();
}
void Initialize(
@@ -108,6 +112,7 @@ class MediaInfoLoaderTest : public testing::Test {
MockWebFrameClient client_;
WebView* view_;
+ WebLocalFrame* frame_;
base::MessageLoop message_loop_;
@@ -127,6 +132,12 @@ TEST_F(MediaInfoLoaderTest, LoadFailure) {
FailLoad();
}
+TEST_F(MediaInfoLoaderTest, DataUri) {
+ Initialize(kHttpDataUrl, blink::WebMediaPlayer::CORSModeUnspecified);
+ Start();
+ SendResponse(0, MediaInfoLoader::kOk);
+}
+
TEST_F(MediaInfoLoaderTest, HasSingleOriginNoRedirect) {
// Make sure no redirect case works as expected.
Initialize(kHttpUrl, blink::WebMediaPlayer::CORSModeUnspecified);
diff --git a/chromium/content/renderer/media/android/media_source_delegate.cc b/chromium/content/renderer/media/android/media_source_delegate.cc
index 57fc0157ca4..eeb4c7e662a 100644
--- a/chromium/content/renderer/media/android/media_source_delegate.cc
+++ b/chromium/content/renderer/media/android/media_source_delegate.cc
@@ -14,7 +14,7 @@
#include "content/renderer/media/webmediaplayer_util.h"
#include "content/renderer/media/webmediasource_impl.h"
#include "media/base/android/demuxer_stream_player_params.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/demuxer_stream.h"
#include "media/base/media_log.h"
#include "media/filters/chunk_demuxer.h"
@@ -31,8 +31,8 @@ using blink::WebString;
namespace {
// The size of the access unit to transfer in an IPC in case of MediaSource.
-// 16: approximately 250ms of content in 60 fps movies.
-const size_t kAccessUnitSizeForMediaSource = 16;
+// 4: approximately 64ms of content in 60 fps movies.
+const size_t kAccessUnitSizeForMediaSource = 4;
const uint8 kVorbisPadding[] = { 0xff, 0xff, 0xff, 0xff };
@@ -50,26 +50,23 @@ MediaSourceDelegate::MediaSourceDelegate(
int demuxer_client_id,
const scoped_refptr<base::MessageLoopProxy>& media_loop,
media::MediaLog* media_log)
- : main_loop_(base::MessageLoopProxy::current()),
- main_weak_factory_(this),
- main_weak_this_(main_weak_factory_.GetWeakPtr()),
- media_loop_(media_loop),
- media_weak_factory_(this),
- demuxer_client_(demuxer_client),
+ : demuxer_client_(demuxer_client),
demuxer_client_id_(demuxer_client_id),
media_log_(media_log),
- demuxer_(NULL),
is_demuxer_ready_(false),
audio_stream_(NULL),
video_stream_(NULL),
seeking_(false),
+ is_video_encrypted_(false),
doing_browser_seek_(false),
browser_seek_time_(media::kNoTimestamp()),
expecting_regular_seek_(false),
-#if defined(GOOGLE_TV)
- key_added_(false),
-#endif
- access_unit_size_(0) {
+ access_unit_size_(0),
+ main_loop_(base::MessageLoopProxy::current()),
+ media_loop_(media_loop),
+ main_weak_factory_(this),
+ media_weak_factory_(this),
+ main_weak_this_(main_weak_factory_.GetWeakPtr()) {
DCHECK(main_loop_->BelongsToCurrentThread());
}
@@ -77,7 +74,6 @@ MediaSourceDelegate::~MediaSourceDelegate() {
DCHECK(main_loop_->BelongsToCurrentThread());
DVLOG(1) << __FUNCTION__ << " : " << demuxer_client_id_;
DCHECK(!chunk_demuxer_);
- DCHECK(!demuxer_);
DCHECK(!demuxer_client_);
DCHECK(!audio_decrypting_demuxer_stream_);
DCHECK(!video_decrypting_demuxer_stream_);
@@ -89,7 +85,7 @@ void MediaSourceDelegate::Destroy() {
DCHECK(main_loop_->BelongsToCurrentThread());
DVLOG(1) << __FUNCTION__ << " : " << demuxer_client_id_;
- if (!demuxer_) {
+ if (!chunk_demuxer_) {
DCHECK(!demuxer_client_);
delete this;
return;
@@ -102,8 +98,7 @@ void MediaSourceDelegate::Destroy() {
main_weak_factory_.InvalidateWeakPtrs();
DCHECK(!main_weak_factory_.HasWeakPtrs());
- if (chunk_demuxer_)
- chunk_demuxer_->Shutdown();
+ chunk_demuxer_->Shutdown();
// |this| will be transferred to the callback StopDemuxer() and
// OnDemuxerStopDone(). They own |this| and OnDemuxerStopDone() will delete
@@ -113,9 +108,23 @@ void MediaSourceDelegate::Destroy() {
base::Unretained(this)));
}
+bool MediaSourceDelegate::IsVideoEncrypted() {
+ DCHECK(main_loop_->BelongsToCurrentThread());
+ base::AutoLock auto_lock(is_video_encrypted_lock_);
+ return is_video_encrypted_;
+}
+
+base::Time MediaSourceDelegate::GetTimelineOffset() const {
+ DCHECK(main_loop_->BelongsToCurrentThread());
+ if (!chunk_demuxer_)
+ return base::Time();
+
+ return chunk_demuxer_->GetTimelineOffset();
+}
+
void MediaSourceDelegate::StopDemuxer() {
DCHECK(media_loop_->BelongsToCurrentThread());
- DCHECK(demuxer_);
+ DCHECK(chunk_demuxer_);
demuxer_client_->RemoveDelegate(demuxer_client_id_);
demuxer_client_ = NULL;
@@ -132,9 +141,8 @@ void MediaSourceDelegate::StopDemuxer() {
// The callback OnDemuxerStopDone() owns |this| and will delete it when
// called. Hence using base::Unretained(this) is safe here.
- demuxer_->Stop(media::BindToLoop(main_loop_,
- base::Bind(&MediaSourceDelegate::OnDemuxerStopDone,
- base::Unretained(this))));
+ chunk_demuxer_->Stop(base::Bind(&MediaSourceDelegate::OnDemuxerStopDone,
+ base::Unretained(this)));
}
void MediaSourceDelegate::InitializeMediaSource(
@@ -153,12 +161,12 @@ void MediaSourceDelegate::InitializeMediaSource(
access_unit_size_ = kAccessUnitSizeForMediaSource;
chunk_demuxer_.reset(new media::ChunkDemuxer(
- media::BindToCurrentLoop(base::Bind(
- &MediaSourceDelegate::OnDemuxerOpened, main_weak_this_)),
- media::BindToCurrentLoop(base::Bind(
- &MediaSourceDelegate::OnNeedKey, main_weak_this_)),
- base::Bind(&LogMediaSourceError, media_log_)));
- demuxer_ = chunk_demuxer_.get();
+ media::BindToCurrentLoop(
+ base::Bind(&MediaSourceDelegate::OnDemuxerOpened, main_weak_this_)),
+ media::BindToCurrentLoop(
+ base::Bind(&MediaSourceDelegate::OnNeedKey, main_weak_this_)),
+ base::Bind(&LogMediaSourceError, media_log_),
+ false));
// |this| will be retained until StopDemuxer() is posted, so Unretained() is
// safe here.
@@ -170,35 +178,14 @@ void MediaSourceDelegate::InitializeMediaSource(
void MediaSourceDelegate::InitializeDemuxer() {
DCHECK(media_loop_->BelongsToCurrentThread());
demuxer_client_->AddDelegate(demuxer_client_id_, this);
- demuxer_->Initialize(this, base::Bind(&MediaSourceDelegate::OnDemuxerInitDone,
+ chunk_demuxer_->Initialize(this,
+ base::Bind(&MediaSourceDelegate::OnDemuxerInitDone,
media_weak_factory_.GetWeakPtr()),
- false);
-}
-
-#if defined(GOOGLE_TV)
-void MediaSourceDelegate::InitializeMediaStream(
- media::Demuxer* demuxer,
- const UpdateNetworkStateCB& update_network_state_cb) {
- DCHECK(main_loop_->BelongsToCurrentThread());
- DCHECK(demuxer);
- demuxer_ = demuxer;
- update_network_state_cb_ = media::BindToCurrentLoop(update_network_state_cb);
- // When playing Media Stream, don't wait to accumulate multiple packets per
- // IPC communication.
- access_unit_size_ = 1;
-
- // |this| will be retained until StopDemuxer() is posted, so Unretained() is
- // safe here.
- media_loop_->PostTask(FROM_HERE,
- base::Bind(&MediaSourceDelegate::InitializeDemuxer,
- base::Unretained(this)));
+ false);
}
-#endif
-const blink::WebTimeRanges& MediaSourceDelegate::Buffered() {
- buffered_web_time_ranges_ =
- ConvertToWebTimeRanges(buffered_time_ranges_);
- return buffered_web_time_ranges_;
+blink::WebTimeRanges MediaSourceDelegate::Buffered() const {
+ return ConvertToWebTimeRanges(buffered_time_ranges_);
}
size_t MediaSourceDelegate::DecodedFrameCount() const {
@@ -314,19 +301,11 @@ void MediaSourceDelegate::Seek(
void MediaSourceDelegate::SeekInternal(const base::TimeDelta& seek_time) {
DCHECK(media_loop_->BelongsToCurrentThread());
DCHECK(IsSeeking());
- demuxer_->Seek(seek_time, base::Bind(
+ chunk_demuxer_->Seek(seek_time, base::Bind(
&MediaSourceDelegate::OnDemuxerSeekDone,
media_weak_factory_.GetWeakPtr()));
}
-void MediaSourceDelegate::SetTotalBytes(int64 total_bytes) {
- NOTIMPLEMENTED();
-}
-
-void MediaSourceDelegate::AddBufferedByteRange(int64 start, int64 end) {
- NOTIMPLEMENTED();
-}
-
void MediaSourceDelegate::AddBufferedTimeRange(base::TimeDelta start,
base::TimeDelta end) {
buffered_time_ranges_.Add(start, end);
@@ -389,7 +368,7 @@ void MediaSourceDelegate::OnBufferReady(
<< ((!buffer || buffer->end_of_stream()) ?
-1 : buffer->timestamp().InMilliseconds())
<< ") : " << demuxer_client_id_;
- DCHECK(demuxer_);
+ DCHECK(chunk_demuxer_);
// No new OnReadFromDemuxer() will be called during seeking. So this callback
// must be from previous OnReadFromDemuxer() call and should be ignored.
@@ -417,14 +396,11 @@ void MediaSourceDelegate::OnBufferReady(
break;
case DemuxerStream::kConfigChanged:
- // In case of kConfigChanged, need to read decoder_config once
- // for the next reads.
- // TODO(kjyoun): Investigate if we need to use this new config. See
- // http://crbug.com/255783
- if (is_audio) {
- audio_stream_->audio_decoder_config();
- } else {
- gfx::Size size = video_stream_->video_decoder_config().coded_size();
+ CHECK((is_audio && audio_stream_) || (!is_audio && video_stream_));
+ data->demuxer_configs.resize(1);
+ CHECK(GetDemuxerConfigFromStream(&data->demuxer_configs[0], is_audio));
+ if (!is_audio) {
+ gfx::Size size = data->demuxer_configs[0].video_size;
DVLOG(1) << "Video config is changed: " << size.width() << "x"
<< size.height();
}
@@ -450,15 +426,8 @@ void MediaSourceDelegate::OnBufferReady(
}
data->access_units[index].timestamp = buffer->timestamp();
- { // No local variable in switch-case scope.
- int data_offset = buffer->decrypt_config() ?
- buffer->decrypt_config()->data_offset() : 0;
- DCHECK_LT(data_offset, buffer->data_size());
- data->access_units[index].data = std::vector<uint8>(
- buffer->data() + data_offset,
- buffer->data() + buffer->data_size() - data_offset);
- }
-#if !defined(GOOGLE_TV)
+ data->access_units[index].data.assign(
+ buffer->data(), buffer->data() + buffer->data_size());
// Vorbis needs 4 extra bytes padding on Android. Check
// NuMediaExtractor.cpp in Android source code.
if (is_audio && media::kCodecVorbis ==
@@ -467,7 +436,6 @@ void MediaSourceDelegate::OnBufferReady(
data->access_units[index].data.end(), kVorbisPadding,
kVorbisPadding + 4);
}
-#endif
if (buffer->decrypt_config()) {
data->access_units[index].key_id = std::vector<char>(
buffer->decrypt_config()->key_id().begin(),
@@ -515,15 +483,15 @@ void MediaSourceDelegate::RemoveTextStream(
void MediaSourceDelegate::OnDemuxerInitDone(media::PipelineStatus status) {
DCHECK(media_loop_->BelongsToCurrentThread());
DVLOG(1) << __FUNCTION__ << "(" << status << ") : " << demuxer_client_id_;
- DCHECK(demuxer_);
+ DCHECK(chunk_demuxer_);
if (status != media::PIPELINE_OK) {
OnDemuxerError(status);
return;
}
- audio_stream_ = demuxer_->GetStream(DemuxerStream::AUDIO);
- video_stream_ = demuxer_->GetStream(DemuxerStream::VIDEO);
+ audio_stream_ = chunk_demuxer_->GetStream(DemuxerStream::AUDIO);
+ video_stream_ = chunk_demuxer_->GetStream(DemuxerStream::VIDEO);
if (audio_stream_ && audio_stream_->audio_decoder_config().is_encrypted() &&
!set_decryptor_ready_cb_.is_null()) {
@@ -541,8 +509,7 @@ void MediaSourceDelegate::OnDemuxerInitDone(media::PipelineStatus status) {
// Notify demuxer ready when both streams are not encrypted.
is_demuxer_ready_ = true;
- if (CanNotifyDemuxerReady())
- NotifyDemuxerReady();
+ NotifyDemuxerReady();
}
void MediaSourceDelegate::InitAudioDecryptingDemuxerStream() {
@@ -575,7 +542,7 @@ void MediaSourceDelegate::OnAudioDecryptingDemuxerStreamInitDone(
media::PipelineStatus status) {
DCHECK(media_loop_->BelongsToCurrentThread());
DVLOG(1) << __FUNCTION__ << "(" << status << ") : " << demuxer_client_id_;
- DCHECK(demuxer_);
+ DCHECK(chunk_demuxer_);
if (status != media::PIPELINE_OK)
audio_decrypting_demuxer_stream_.reset();
@@ -590,15 +557,14 @@ void MediaSourceDelegate::OnAudioDecryptingDemuxerStreamInitDone(
// Try to notify demuxer ready when audio DDS initialization finished and
// video is not encrypted.
is_demuxer_ready_ = true;
- if (CanNotifyDemuxerReady())
- NotifyDemuxerReady();
+ NotifyDemuxerReady();
}
void MediaSourceDelegate::OnVideoDecryptingDemuxerStreamInitDone(
media::PipelineStatus status) {
DCHECK(media_loop_->BelongsToCurrentThread());
DVLOG(1) << __FUNCTION__ << "(" << status << ") : " << demuxer_client_id_;
- DCHECK(demuxer_);
+ DCHECK(chunk_demuxer_);
if (status != media::PIPELINE_OK)
video_decrypting_demuxer_stream_.reset();
@@ -607,8 +573,7 @@ void MediaSourceDelegate::OnVideoDecryptingDemuxerStreamInitDone(
// Try to notify demuxer ready when video DDS initialization finished.
is_demuxer_ready_ = true;
- if (CanNotifyDemuxerReady())
- NotifyDemuxerReady();
+ NotifyDemuxerReady();
}
void MediaSourceDelegate::OnDemuxerSeekDone(media::PipelineStatus status) {
@@ -662,104 +627,47 @@ void MediaSourceDelegate::FinishResettingDecryptingDemuxerStreams() {
}
void MediaSourceDelegate::OnDemuxerStopDone() {
- DCHECK(main_loop_->BelongsToCurrentThread());
- DVLOG(1) << __FUNCTION__ << " : " << demuxer_client_id_;
- chunk_demuxer_.reset();
- demuxer_ = NULL;
- delete this;
-}
-
-void MediaSourceDelegate::OnMediaConfigRequest() {
DCHECK(media_loop_->BelongsToCurrentThread());
DVLOG(1) << __FUNCTION__ << " : " << demuxer_client_id_;
- if (CanNotifyDemuxerReady())
- NotifyDemuxerReady();
-}
-
-#if defined(GOOGLE_TV)
-// TODO(kjyoun): Enhance logic to detect when to call NotifyDemuxerReady()
-// For now, we call it when the first key is added. See http://crbug.com/255781
-void MediaSourceDelegate::NotifyKeyAdded(const std::string& key_system) {
- if (!media_loop_->BelongsToCurrentThread()) {
- media_loop_->PostTask(FROM_HERE,
- base::Bind(&MediaSourceDelegate::NotifyKeyAdded,
- base::Unretained(this), key_system));
- return;
- }
- DVLOG(1) << __FUNCTION__ << " : " << demuxer_client_id_;
- if (key_added_)
- return;
- key_added_ = true;
- key_system_ = key_system;
- if (!CanNotifyDemuxerReady())
- return;
- if (HasEncryptedStream())
- NotifyDemuxerReady();
+ main_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&MediaSourceDelegate::DeleteSelf, base::Unretained(this)));
}
-#endif // defined(GOOGLE_TV)
-bool MediaSourceDelegate::CanNotifyDemuxerReady() {
- DCHECK(media_loop_->BelongsToCurrentThread());
- // This can happen when a key is added before the demuxer is initialized.
- // See NotifyKeyAdded().
- // TODO(kjyoun): Remove NotifyDemxuerReady() call from NotifyKeyAdded() so
- // that we can remove all is_demuxer_ready_/key_added_/key_system_ madness.
- // See http://crbug.com/255781
- if (!is_demuxer_ready_)
- return false;
-#if defined(GOOGLE_TV)
- if (HasEncryptedStream() && !key_added_)
- return false;
-#endif // defined(GOOGLE_TV)
- return true;
+void MediaSourceDelegate::DeleteSelf() {
+ DCHECK(main_loop_->BelongsToCurrentThread());
+ DVLOG(1) << __FUNCTION__ << " : " << demuxer_client_id_;
+ chunk_demuxer_.reset();
+ delete this;
}
void MediaSourceDelegate::NotifyDemuxerReady() {
DCHECK(media_loop_->BelongsToCurrentThread());
DVLOG(1) << __FUNCTION__ << " : " << demuxer_client_id_;
- DCHECK(CanNotifyDemuxerReady());
+ DCHECK(is_demuxer_ready_);
scoped_ptr<DemuxerConfigs> configs(new DemuxerConfigs());
- if (audio_stream_) {
- media::AudioDecoderConfig config = audio_stream_->audio_decoder_config();
- configs->audio_codec = config.codec();
- configs->audio_channels =
- media::ChannelLayoutToChannelCount(config.channel_layout());
- configs->audio_sampling_rate = config.samples_per_second();
- configs->is_audio_encrypted = config.is_encrypted();
- configs->audio_extra_data = std::vector<uint8>(
- config.extra_data(), config.extra_data() + config.extra_data_size());
- }
- if (video_stream_) {
- media::VideoDecoderConfig config = video_stream_->video_decoder_config();
- configs->video_codec = config.codec();
- configs->video_size = config.natural_size();
- configs->is_video_encrypted = config.is_encrypted();
- configs->video_extra_data = std::vector<uint8>(
- config.extra_data(), config.extra_data() + config.extra_data_size());
- }
- configs->duration_ms = GetDurationMs();
-
-#if defined(GOOGLE_TV)
- configs->key_system = HasEncryptedStream() ? key_system_ : "";
-#endif
+ GetDemuxerConfigFromStream(configs.get(), true);
+ GetDemuxerConfigFromStream(configs.get(), false);
+ configs->duration = GetDuration();
if (demuxer_client_)
demuxer_client_->DemuxerReady(demuxer_client_id_, *configs);
+
+ base::AutoLock auto_lock(is_video_encrypted_lock_);
+ is_video_encrypted_ = configs->is_video_encrypted;
}
-int MediaSourceDelegate::GetDurationMs() {
+base::TimeDelta MediaSourceDelegate::GetDuration() const {
DCHECK(media_loop_->BelongsToCurrentThread());
if (!chunk_demuxer_)
- return -1;
+ return media::kNoTimestamp();
- double duration_ms = chunk_demuxer_->GetDuration() * 1000;
- if (duration_ms > std::numeric_limits<int32>::max()) {
- LOG(WARNING) << "Duration from ChunkDemuxer is too large; probably "
- "something has gone wrong.";
- return std::numeric_limits<int32>::max();
- }
- return duration_ms;
+ double duration = chunk_demuxer_->GetDuration();
+ if (duration == std::numeric_limits<double>::infinity())
+ return media::kInfiniteDuration();
+
+ return ConvertSecondsToTimestamp(duration);
}
void MediaSourceDelegate::OnDemuxerOpened() {
@@ -780,14 +688,6 @@ void MediaSourceDelegate::OnNeedKey(const std::string& type,
need_key_cb_.Run(type, init_data);
}
-bool MediaSourceDelegate::HasEncryptedStream() {
- DCHECK(media_loop_->BelongsToCurrentThread());
- return (audio_stream_ &&
- audio_stream_->audio_decoder_config().is_encrypted()) ||
- (video_stream_ &&
- video_stream_->video_decoder_config().is_encrypted());
-}
-
bool MediaSourceDelegate::IsSeeking() const {
base::AutoLock auto_lock(seeking_lock_);
return seeking_;
@@ -834,4 +734,32 @@ base::TimeDelta MediaSourceDelegate::FindBufferedBrowserSeekTime_Locked(
return seek_time;
}
+bool MediaSourceDelegate::GetDemuxerConfigFromStream(
+ media::DemuxerConfigs* configs, bool is_audio) {
+ DCHECK(media_loop_->BelongsToCurrentThread());
+ if (!is_demuxer_ready_)
+ return false;
+ if (is_audio && audio_stream_) {
+ media::AudioDecoderConfig config = audio_stream_->audio_decoder_config();
+ configs->audio_codec = config.codec();
+ configs->audio_channels =
+ media::ChannelLayoutToChannelCount(config.channel_layout());
+ configs->audio_sampling_rate = config.samples_per_second();
+ configs->is_audio_encrypted = config.is_encrypted();
+ configs->audio_extra_data = std::vector<uint8>(
+ config.extra_data(), config.extra_data() + config.extra_data_size());
+ return true;
+ }
+ if (!is_audio && video_stream_) {
+ media::VideoDecoderConfig config = video_stream_->video_decoder_config();
+ configs->video_codec = config.codec();
+ configs->video_size = config.natural_size();
+ configs->is_video_encrypted = config.is_encrypted();
+ configs->video_extra_data = std::vector<uint8>(
+ config.extra_data(), config.extra_data() + config.extra_data_size());
+ return true;
+ }
+ return false;
+}
+
} // namespace content
diff --git a/chromium/content/renderer/media/android/media_source_delegate.h b/chromium/content/renderer/media/android/media_source_delegate.h
index 317ef6544df..28b18325938 100644
--- a/chromium/content/renderer/media/android/media_source_delegate.h
+++ b/chromium/content/renderer/media/android/media_source_delegate.h
@@ -67,13 +67,7 @@ class MediaSourceDelegate : public media::DemuxerHost {
const UpdateNetworkStateCB& update_network_state_cb,
const DurationChangeCB& duration_change_cb);
-#if defined(GOOGLE_TV)
- void InitializeMediaStream(
- media::Demuxer* demuxer,
- const UpdateNetworkStateCB& update_network_state_cb);
-#endif
-
- const blink::WebTimeRanges& Buffered();
+ blink::WebTimeRanges Buffered() const;
size_t DecodedFrameCount() const;
size_t DroppedFrameCount() const;
size_t AudioDecodedByteCount() const;
@@ -100,29 +94,23 @@ class MediaSourceDelegate : public media::DemuxerHost {
// cached data since last keyframe. See http://crbug.com/304234.
void Seek(const base::TimeDelta& seek_time, bool is_browser_seek);
- void NotifyKeyAdded(const std::string& key_system);
-
// Called when DemuxerStreamPlayer needs to read data from ChunkDemuxer.
void OnReadFromDemuxer(media::DemuxerStream::Type type);
- // Called when the player needs the new config data from ChunkDemuxer.
- void OnMediaConfigRequest();
-
// Called by the Destroyer to destroy an instance of this object.
void Destroy();
- private:
- typedef base::Callback<void(scoped_ptr<media::DemuxerData> data)>
- ReadFromDemuxerAckCB;
- typedef base::Callback<void(scoped_ptr<media::DemuxerConfigs> configs)>
- DemuxerReadyCB;
+ // Called on the main thread to check whether the video stream is encrypted.
+ bool IsVideoEncrypted();
+
+ // Gets the ChunkDemuxer timeline offset.
+ base::Time GetTimelineOffset() const;
+ private:
// This is private to enforce use of the Destroyer.
virtual ~MediaSourceDelegate();
// Methods inherited from DemuxerHost.
- virtual void SetTotalBytes(int64 total_bytes) OVERRIDE;
- virtual void AddBufferedByteRange(int64 start, int64 end) OVERRIDE;
virtual void AddBufferedTimeRange(base::TimeDelta start,
base::TimeDelta end) OVERRIDE;
virtual void SetDuration(base::TimeDelta duration) OVERRIDE;
@@ -154,12 +142,15 @@ class MediaSourceDelegate : public media::DemuxerHost {
void ResetVideoDecryptingDemuxerStream();
void FinishResettingDecryptingDemuxerStreams();
+ // Callback for ChunkDemuxer::Stop() and helper for deleting |this| on the
+ // main thread.
void OnDemuxerStopDone();
+ void DeleteSelf();
+
void OnDemuxerOpened();
void OnNeedKey(const std::string& type,
const std::vector<uint8>& init_data);
void NotifyDemuxerReady();
- bool CanNotifyDemuxerReady();
void StopDemuxer();
void InitializeDemuxer();
@@ -176,9 +167,7 @@ class MediaSourceDelegate : public media::DemuxerHost {
const scoped_refptr<media::DecoderBuffer>& buffer);
// Helper function for calculating duration.
- int GetDurationMs();
-
- bool HasEncryptedStream();
+ base::TimeDelta GetDuration() const;
bool IsSeeking() const;
@@ -190,14 +179,10 @@ class MediaSourceDelegate : public media::DemuxerHost {
base::TimeDelta FindBufferedBrowserSeekTime_Locked(
const base::TimeDelta& seek_time) const;
- // Message loop for main renderer thread and corresponding weak pointer.
- const scoped_refptr<base::MessageLoopProxy> main_loop_;
- base::WeakPtrFactory<MediaSourceDelegate> main_weak_factory_;
- base::WeakPtr<MediaSourceDelegate> main_weak_this_;
-
- // Message loop for media thread and corresponding weak pointer.
- const scoped_refptr<base::MessageLoopProxy> media_loop_;
- base::WeakPtrFactory<MediaSourceDelegate> media_weak_factory_;
+ // Get the demuxer configs for a particular stream identified by |is_audio|.
+ // Returns true on success, of false otherwise.
+ bool GetDemuxerConfigFromStream(media::DemuxerConfigs* configs,
+ bool is_audio);
RendererDemuxerAndroid* demuxer_client_;
int demuxer_client_id_;
@@ -207,7 +192,6 @@ class MediaSourceDelegate : public media::DemuxerHost {
DurationChangeCB duration_change_cb_;
scoped_ptr<media::ChunkDemuxer> chunk_demuxer_;
- media::Demuxer* demuxer_;
bool is_demuxer_ready_;
media::SetDecryptorReadyCB set_decryptor_ready_cb_;
@@ -220,16 +204,10 @@ class MediaSourceDelegate : public media::DemuxerHost {
media::PipelineStatistics statistics_;
media::Ranges<base::TimeDelta> buffered_time_ranges_;
- // Keep a list of buffered time ranges.
- blink::WebTimeRanges buffered_web_time_ranges_;
MediaSourceOpenedCB media_source_opened_cb_;
media::Demuxer::NeedKeyCB need_key_cb_;
- // The currently selected key system. Empty string means that no key system
- // has been selected.
- blink::WebString current_key_system_;
-
// Temporary for EME v0.1. In the future the init data type should be passed
// through GenerateKeyRequest() directly from WebKit.
std::string init_data_type_;
@@ -238,6 +216,10 @@ class MediaSourceDelegate : public media::DemuxerHost {
mutable base::Lock seeking_lock_;
bool seeking_;
+ // Lock used to serialize access for |is_video_encrypted_|.
+ mutable base::Lock is_video_encrypted_lock_;
+ bool is_video_encrypted_;
+
// Track if we are currently performing a browser seek, and track whether or
// not a regular seek is expected soon. If a regular seek is expected soon,
// then any in-progress browser seek will be canceled pending the
@@ -247,13 +229,17 @@ class MediaSourceDelegate : public media::DemuxerHost {
base::TimeDelta browser_seek_time_;
bool expecting_regular_seek_;
-#if defined(GOOGLE_TV)
- bool key_added_;
- std::string key_system_;
-#endif // defined(GOOGLE_TV)
-
size_t access_unit_size_;
+ // Message loop for main renderer and media threads.
+ const scoped_refptr<base::MessageLoopProxy> main_loop_;
+ const scoped_refptr<base::MessageLoopProxy> media_loop_;
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<MediaSourceDelegate> main_weak_factory_;
+ base::WeakPtrFactory<MediaSourceDelegate> media_weak_factory_;
+ base::WeakPtr<MediaSourceDelegate> main_weak_this_;
+
DISALLOW_COPY_AND_ASSIGN(MediaSourceDelegate);
};
diff --git a/chromium/content/renderer/media/android/proxy_media_keys.cc b/chromium/content/renderer/media/android/proxy_media_keys.cc
deleted file mode 100644
index ef93a2552fb..00000000000
--- a/chromium/content/renderer/media/android/proxy_media_keys.cc
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/renderer/media/android/proxy_media_keys.h"
-
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/logging.h"
-#include "content/renderer/media/android/renderer_media_player_manager.h"
-#include "content/renderer/media/crypto/key_systems.h"
-
-namespace content {
-
-ProxyMediaKeys::ProxyMediaKeys(
- RendererMediaPlayerManager* manager,
- int media_keys_id,
- const media::SessionCreatedCB& session_created_cb,
- const media::SessionMessageCB& session_message_cb,
- const media::SessionReadyCB& session_ready_cb,
- const media::SessionClosedCB& session_closed_cb,
- const media::SessionErrorCB& session_error_cb)
- : manager_(manager),
- media_keys_id_(media_keys_id),
- session_created_cb_(session_created_cb),
- session_message_cb_(session_message_cb),
- session_ready_cb_(session_ready_cb),
- session_closed_cb_(session_closed_cb),
- session_error_cb_(session_error_cb) {
- DCHECK(manager_);
-}
-
-ProxyMediaKeys::~ProxyMediaKeys() {
-}
-
-void ProxyMediaKeys::InitializeCDM(const std::string& key_system,
- const GURL& frame_url) {
-#if defined(ENABLE_PEPPER_CDMS)
- NOTIMPLEMENTED();
-#elif defined(OS_ANDROID)
- std::vector<uint8> uuid = GetUUID(key_system);
- DCHECK(!uuid.empty());
- manager_->InitializeCDM(media_keys_id_, this, uuid, frame_url);
-#endif
-}
-
-bool ProxyMediaKeys::CreateSession(uint32 session_id,
- const std::string& type,
- const uint8* init_data,
- int init_data_length) {
- manager_->CreateSession(
- media_keys_id_,
- session_id,
- type,
- std::vector<uint8>(init_data, init_data + init_data_length));
- return true;
-}
-
-void ProxyMediaKeys::UpdateSession(uint32 session_id,
- const uint8* response,
- int response_length) {
- manager_->UpdateSession(
- media_keys_id_,
- session_id,
- std::vector<uint8>(response, response + response_length));
-}
-
-void ProxyMediaKeys::ReleaseSession(uint32 session_id) {
- manager_->ReleaseSession(media_keys_id_, session_id);
-}
-
-void ProxyMediaKeys::OnSessionCreated(uint32 session_id,
- const std::string& web_session_id) {
- session_created_cb_.Run(session_id, web_session_id);
-}
-
-void ProxyMediaKeys::OnSessionMessage(uint32 session_id,
- const std::vector<uint8>& message,
- const std::string& destination_url) {
- session_message_cb_.Run(session_id, message, destination_url);
-}
-
-void ProxyMediaKeys::OnSessionReady(uint32 session_id) {
- session_ready_cb_.Run(session_id);
-}
-
-void ProxyMediaKeys::OnSessionClosed(uint32 session_id) {
- session_closed_cb_.Run(session_id);
-}
-
-void ProxyMediaKeys::OnSessionError(uint32 session_id,
- media::MediaKeys::KeyError error_code,
- int system_code) {
- session_error_cb_.Run(session_id, error_code, system_code);
-}
-
-} // namespace content
diff --git a/chromium/content/renderer/media/android/proxy_media_keys.h b/chromium/content/renderer/media/android/proxy_media_keys.h
deleted file mode 100644
index ca5c932d883..00000000000
--- a/chromium/content/renderer/media/android/proxy_media_keys.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_ANDROID_PROXY_MEDIA_KEYS_H_
-#define CONTENT_RENDERER_MEDIA_ANDROID_PROXY_MEDIA_KEYS_H_
-
-#include "base/basictypes.h"
-#include "media/base/media_keys.h"
-
-class GURL;
-
-namespace content {
-
-class RendererMediaPlayerManager;
-
-// A MediaKeys proxy that wraps the EME part of RendererMediaPlayerManager.
-// TODO(xhwang): Instead of accessing RendererMediaPlayerManager directly, let
-// RendererMediaPlayerManager return a MediaKeys object that can be used by
-// ProxyDecryptor directly. Then we can remove this class!
-class ProxyMediaKeys : public media::MediaKeys {
- public:
- ProxyMediaKeys(RendererMediaPlayerManager* proxy,
- int media_keys_id,
- const media::SessionCreatedCB& session_created_cb,
- const media::SessionMessageCB& session_message_cb,
- const media::SessionReadyCB& session_ready_cb,
- const media::SessionClosedCB& session_closed_cb,
- const media::SessionErrorCB& session_error_cb);
- virtual ~ProxyMediaKeys();
-
- void InitializeCDM(const std::string& key_system, const GURL& frame_url);
-
- // MediaKeys implementation.
- virtual bool CreateSession(uint32 session_id,
- const std::string& type,
- const uint8* init_data,
- int init_data_length) OVERRIDE;
- virtual void UpdateSession(uint32 session_id,
- const uint8* response,
- int response_length) OVERRIDE;
- virtual void ReleaseSession(uint32 session_id) OVERRIDE;
-
- // Callbacks.
- void OnSessionCreated(uint32 session_id, const std::string& web_session_id);
- void OnSessionMessage(uint32 session_id,
- const std::vector<uint8>& message,
- const std::string& destination_url);
- void OnSessionReady(uint32 session_id);
- void OnSessionClosed(uint32 session_id);
- void OnSessionError(uint32 session_id,
- media::MediaKeys::KeyError error_code,
- int system_code);
-
- private:
- RendererMediaPlayerManager* manager_;
- int media_keys_id_;
- media::SessionCreatedCB session_created_cb_;
- media::SessionMessageCB session_message_cb_;
- media::SessionReadyCB session_ready_cb_;
- media::SessionClosedCB session_closed_cb_;
- media::SessionErrorCB session_error_cb_;
-
- DISALLOW_COPY_AND_ASSIGN(ProxyMediaKeys);
-};
-
-} // namespace content
-
-#endif // CONTENT_RENDERER_MEDIA_ANDROID_PROXY_MEDIA_KEYS_H_
diff --git a/chromium/content/renderer/media/android/renderer_demuxer_android.cc b/chromium/content/renderer/media/android/renderer_demuxer_android.cc
index ab139b5403e..586d7fa44d2 100644
--- a/chromium/content/renderer/media/android/renderer_demuxer_android.cc
+++ b/chromium/content/renderer/media/android/renderer_demuxer_android.cc
@@ -6,6 +6,7 @@
#include "base/bind.h"
#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_proxy.h"
#include "content/child/thread_safe_sender.h"
#include "content/common/media/media_player_messages_android.h"
#include "content/renderer/media/android/media_source_delegate.h"
@@ -42,7 +43,6 @@ bool RendererDemuxerAndroid::OnMessageReceived(const IPC::Message& message) {
switch (message.type()) {
case MediaPlayerMsg_DemuxerSeekRequest::ID:
case MediaPlayerMsg_ReadFromDemuxer::ID:
- case MediaPlayerMsg_MediaConfigRequest::ID:
media_message_loop_->PostTask(FROM_HERE, base::Bind(
&RendererDemuxerAndroid::DispatchMessage, this, message));
return true;
@@ -81,7 +81,6 @@ void RendererDemuxerAndroid::DispatchMessage(const IPC::Message& message) {
IPC_BEGIN_MESSAGE_MAP(RendererDemuxerAndroid, message)
IPC_MESSAGE_HANDLER(MediaPlayerMsg_DemuxerSeekRequest, OnDemuxerSeekRequest)
IPC_MESSAGE_HANDLER(MediaPlayerMsg_ReadFromDemuxer, OnReadFromDemuxer)
- IPC_MESSAGE_HANDLER(MediaPlayerMsg_MediaConfigRequest, OnMediaConfigRequest)
IPC_END_MESSAGE_MAP()
}
@@ -102,10 +101,4 @@ void RendererDemuxerAndroid::OnDemuxerSeekRequest(
delegate->Seek(time_to_seek, is_browser_seek);
}
-void RendererDemuxerAndroid::OnMediaConfigRequest(int demuxer_client_id) {
- MediaSourceDelegate* delegate = delegates_.Lookup(demuxer_client_id);
- if (delegate)
- delegate->OnMediaConfigRequest();
-}
-
} // namespace content
diff --git a/chromium/content/renderer/media/android/renderer_demuxer_android.h b/chromium/content/renderer/media/android/renderer_demuxer_android.h
index c6538a1b2cb..2a92918bb7a 100644
--- a/chromium/content/renderer/media/android/renderer_demuxer_android.h
+++ b/chromium/content/renderer/media/android/renderer_demuxer_android.h
@@ -7,9 +7,13 @@
#include "base/atomic_sequence_num.h"
#include "base/id_map.h"
-#include "ipc/ipc_channel_proxy.h"
+#include "ipc/message_filter.h"
#include "media/base/android/demuxer_stream_player_params.h"
+namespace base {
+class MessageLoopProxy;
+}
+
namespace content {
class MediaSourceDelegate;
@@ -19,7 +23,7 @@ class ThreadSafeSender;
// media::DemuxerAndroid.
//
// Refer to BrowserDemuxerAndroid for the browser process half.
-class RendererDemuxerAndroid : public IPC::ChannelProxy::MessageFilter {
+class RendererDemuxerAndroid : public IPC::MessageFilter {
public:
RendererDemuxerAndroid();
@@ -39,7 +43,7 @@ class RendererDemuxerAndroid : public IPC::ChannelProxy::MessageFilter {
// Must be called on media thread.
void RemoveDelegate(int demuxer_client_id);
- // IPC::ChannelProxy::MessageFilter overrides.
+ // IPC::MessageFilter overrides.
virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE;
// media::DemuxerAndroidClient "implementation".
@@ -66,7 +70,6 @@ class RendererDemuxerAndroid : public IPC::ChannelProxy::MessageFilter {
void OnDemuxerSeekRequest(int demuxer_client_id,
const base::TimeDelta& time_to_seek,
bool is_browser_seek);
- void OnMediaConfigRequest(int demuxer_client_id);
base::AtomicSequenceNumber next_demuxer_client_id_;
diff --git a/chromium/content/renderer/media/android/renderer_media_player_manager.cc b/chromium/content/renderer/media/android/renderer_media_player_manager.cc
index 64e6fc5b112..f3e0bc01cfc 100644
--- a/chromium/content/renderer/media/android/renderer_media_player_manager.cc
+++ b/chromium/content/renderer/media/android/renderer_media_player_manager.cc
@@ -4,38 +4,28 @@
#include "content/renderer/media/android/renderer_media_player_manager.h"
-#include "base/bind.h"
-#include "base/message_loop/message_loop.h"
#include "content/common/media/media_player_messages_android.h"
-#include "content/renderer/media/android/proxy_media_keys.h"
+#include "content/public/common/renderer_preferences.h"
#include "content/renderer/media/android/renderer_media_player_manager.h"
#include "content/renderer/media/android/webmediaplayer_android.h"
+#include "content/renderer/media/crypto/renderer_cdm_manager.h"
+#include "content/renderer/render_view_impl.h"
#include "ui/gfx/rect_f.h"
-// Maximum sizes for various EME message parameters. These are checks to
-// prevent unnecessarily large messages from being passed around, and the sizes
-// are somewhat arbitrary as the EME specification doesn't specify any limits.
-static const size_t kEmeWebSessionIdMaximum = 512;
-static const size_t kEmeMessageMaximum = 10240; // 10 KB
-static const size_t kEmeDestinationUrlMaximum = 2048; // 2 KB
-
namespace content {
-RendererMediaPlayerManager::RendererMediaPlayerManager(RenderView* render_view)
- : RenderViewObserver(render_view),
+RendererMediaPlayerManager::RendererMediaPlayerManager(
+ RenderFrame* render_frame)
+ : RenderFrameObserver(render_frame),
next_media_player_id_(0),
fullscreen_frame_(NULL),
- pending_fullscreen_frame_(NULL) {}
+ pending_fullscreen_frame_(NULL) {
+}
RendererMediaPlayerManager::~RendererMediaPlayerManager() {
- std::map<int, WebMediaPlayerAndroid*>::iterator player_it;
- for (player_it = media_players_.begin();
- player_it != media_players_.end(); ++player_it) {
- WebMediaPlayerAndroid* player = player_it->second;
- player->Detach();
- }
-
- Send(new MediaPlayerHostMsg_DestroyAllMediaPlayers(routing_id()));
+ DCHECK(media_players_.empty())
+ << "RendererMediaPlayerManager is owned by RenderFrameImpl and is "
+ "destroyed only after all media players are destroyed.";
}
bool RendererMediaPlayerManager::OnMessageReceived(const IPC::Message& msg) {
@@ -65,11 +55,7 @@ bool RendererMediaPlayerManager::OnMessageReceived(const IPC::Message& msg) {
IPC_MESSAGE_HANDLER(MediaPlayerMsg_DidExitFullscreen, OnDidExitFullscreen)
IPC_MESSAGE_HANDLER(MediaPlayerMsg_DidMediaPlayerPlay, OnPlayerPlay)
IPC_MESSAGE_HANDLER(MediaPlayerMsg_DidMediaPlayerPause, OnPlayerPause)
- IPC_MESSAGE_HANDLER(MediaKeysMsg_SessionCreated, OnSessionCreated)
- IPC_MESSAGE_HANDLER(MediaKeysMsg_SessionMessage, OnSessionMessage)
- IPC_MESSAGE_HANDLER(MediaKeysMsg_SessionReady, OnSessionReady)
- IPC_MESSAGE_HANDLER(MediaKeysMsg_SessionClosed, OnSessionClosed)
- IPC_MESSAGE_HANDLER(MediaKeysMsg_SessionError, OnSessionError)
+ IPC_MESSAGE_HANDLER(MediaPlayerMsg_PauseVideo, OnPauseVideo)
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
return handled;
@@ -80,10 +66,18 @@ void RendererMediaPlayerManager::Initialize(
int player_id,
const GURL& url,
const GURL& first_party_for_cookies,
- int demuxer_client_id) {
- Send(new MediaPlayerHostMsg_Initialize(
- routing_id(), type, player_id, url, first_party_for_cookies,
- demuxer_client_id));
+ int demuxer_client_id,
+ const GURL& frame_url) {
+
+ MediaPlayerHostMsg_Initialize_Params media_player_params;
+ media_player_params.type = type;
+ media_player_params.player_id = player_id;
+ media_player_params.demuxer_client_id = demuxer_client_id;
+ media_player_params.url = url;
+ media_player_params.first_party_for_cookies = first_party_for_cookies;
+ media_player_params.frame_url = frame_url;
+
+ Send(new MediaPlayerHostMsg_Initialize(routing_id(), media_player_params));
}
void RendererMediaPlayerManager::Start(int player_id) {
@@ -107,6 +101,10 @@ void RendererMediaPlayerManager::SetVolume(int player_id, double volume) {
Send(new MediaPlayerHostMsg_SetVolume(routing_id(), player_id, volume));
}
+void RendererMediaPlayerManager::SetPoster(int player_id, const GURL& poster) {
+ Send(new MediaPlayerHostMsg_SetPoster(routing_id(), player_id, poster));
+}
+
void RendererMediaPlayerManager::ReleaseResources(int player_id) {
Send(new MediaPlayerHostMsg_Release(routing_id(), player_id));
}
@@ -182,10 +180,11 @@ void RendererMediaPlayerManager::OnMediaPlayerReleased(int player_id) {
player->OnPlayerReleased();
}
-void RendererMediaPlayerManager::OnConnectedToRemoteDevice(int player_id) {
+void RendererMediaPlayerManager::OnConnectedToRemoteDevice(int player_id,
+ const std::string& remote_playback_message) {
WebMediaPlayerAndroid* player = GetMediaPlayer(player_id);
if (player)
- player->OnConnectedToRemoteDevice();
+ player->OnConnectedToRemoteDevice(remote_playback_message);
}
void RendererMediaPlayerManager::OnDisconnectedFromRemoteDevice(int player_id) {
@@ -224,6 +223,10 @@ void RendererMediaPlayerManager::OnRequestFullscreen(int player_id) {
player->OnRequestFullscreen();
}
+void RendererMediaPlayerManager::OnPauseVideo() {
+ ReleaseVideoResources();
+}
+
void RendererMediaPlayerManager::EnterFullscreen(int player_id,
blink::WebFrame* frame) {
pending_fullscreen_frame_ = frame;
@@ -232,99 +235,16 @@ void RendererMediaPlayerManager::EnterFullscreen(int player_id,
void RendererMediaPlayerManager::ExitFullscreen(int player_id) {
pending_fullscreen_frame_ = NULL;
+ fullscreen_frame_ = NULL;
Send(new MediaPlayerHostMsg_ExitFullscreen(routing_id(), player_id));
}
-void RendererMediaPlayerManager::InitializeCDM(int media_keys_id,
- ProxyMediaKeys* media_keys,
- const std::vector<uint8>& uuid,
- const GURL& frame_url) {
- RegisterMediaKeys(media_keys_id, media_keys);
- Send(new MediaKeysHostMsg_InitializeCDM(
- routing_id(), media_keys_id, uuid, frame_url));
-}
-
-void RendererMediaPlayerManager::CreateSession(
- int media_keys_id,
- uint32 session_id,
- const std::string& type,
- const std::vector<uint8>& init_data) {
- Send(new MediaKeysHostMsg_CreateSession(
- routing_id(), media_keys_id, session_id, type, init_data));
-}
-
-void RendererMediaPlayerManager::UpdateSession(
- int media_keys_id,
- uint32 session_id,
- const std::vector<uint8>& response) {
- Send(new MediaKeysHostMsg_UpdateSession(
- routing_id(), media_keys_id, session_id, response));
-}
-
-void RendererMediaPlayerManager::ReleaseSession(int media_keys_id,
- uint32 session_id) {
- Send(new MediaKeysHostMsg_ReleaseSession(
- routing_id(), media_keys_id, session_id));
-}
-
-void RendererMediaPlayerManager::OnSessionCreated(
- int media_keys_id,
- uint32 session_id,
- const std::string& web_session_id) {
- if (web_session_id.length() > kEmeWebSessionIdMaximum) {
- OnSessionError(
- media_keys_id, session_id, media::MediaKeys::kUnknownError, 0);
- return;
- }
-
- ProxyMediaKeys* media_keys = GetMediaKeys(media_keys_id);
- if (media_keys)
- media_keys->OnSessionCreated(session_id, web_session_id);
-}
-
-void RendererMediaPlayerManager::OnSessionMessage(
- int media_keys_id,
- uint32 session_id,
- const std::vector<uint8>& message,
- const std::string& destination_url) {
- if (message.size() > kEmeMessageMaximum) {
- OnSessionError(
- media_keys_id, session_id, media::MediaKeys::kUnknownError, 0);
- return;
- }
- if (destination_url.length() > kEmeDestinationUrlMaximum) {
- OnSessionError(
- media_keys_id, session_id, media::MediaKeys::kUnknownError, 0);
+void RendererMediaPlayerManager::SetCdm(int player_id, int cdm_id) {
+ if (cdm_id == RendererCdmManager::kInvalidCdmId) {
+ NOTREACHED();
return;
}
-
- ProxyMediaKeys* media_keys = GetMediaKeys(media_keys_id);
- if (media_keys)
- media_keys->OnSessionMessage(session_id, message, destination_url);
-}
-
-void RendererMediaPlayerManager::OnSessionReady(int media_keys_id,
- uint32 session_id) {
- ProxyMediaKeys* media_keys = GetMediaKeys(media_keys_id);
- if (media_keys)
- media_keys->OnSessionReady(session_id);
-}
-
-void RendererMediaPlayerManager::OnSessionClosed(int media_keys_id,
- uint32 session_id) {
- ProxyMediaKeys* media_keys = GetMediaKeys(media_keys_id);
- if (media_keys)
- media_keys->OnSessionClosed(session_id);
-}
-
-void RendererMediaPlayerManager::OnSessionError(
- int media_keys_id,
- uint32 session_id,
- media::MediaKeys::KeyError error_code,
- int system_code) {
- ProxyMediaKeys* media_keys = GetMediaKeys(media_keys_id);
- if (media_keys)
- media_keys->OnSessionError(session_id, error_code, system_code);
+ Send(new MediaPlayerHostMsg_SetCdm(routing_id(), player_id, cdm_id));
}
int RendererMediaPlayerManager::RegisterMediaPlayer(
@@ -335,26 +255,12 @@ int RendererMediaPlayerManager::RegisterMediaPlayer(
void RendererMediaPlayerManager::UnregisterMediaPlayer(int player_id) {
media_players_.erase(player_id);
- media_keys_.erase(player_id);
-}
-
-void RendererMediaPlayerManager::RegisterMediaKeys(int media_keys_id,
- ProxyMediaKeys* media_keys) {
- // WebMediaPlayerAndroid must have already been registered for
- // |media_keys_id|. For now |media_keys_id| is the same as player_id
- // used in other methods.
- DCHECK(media_players_.find(media_keys_id) != media_players_.end());
-
- // Only allowed to register once.
- DCHECK(media_keys_.find(media_keys_id) == media_keys_.end());
-
- media_keys_[media_keys_id] = media_keys;
}
void RendererMediaPlayerManager::ReleaseVideoResources() {
std::map<int, WebMediaPlayerAndroid*>::iterator player_it;
- for (player_it = media_players_.begin();
- player_it != media_players_.end(); ++player_it) {
+ for (player_it = media_players_.begin(); player_it != media_players_.end();
+ ++player_it) {
WebMediaPlayerAndroid* player = player_it->second;
// Do not release if an audio track is still playing
@@ -372,12 +278,6 @@ WebMediaPlayerAndroid* RendererMediaPlayerManager::GetMediaPlayer(
return NULL;
}
-ProxyMediaKeys* RendererMediaPlayerManager::GetMediaKeys(int media_keys_id) {
- std::map<int, ProxyMediaKeys*>::iterator iter =
- media_keys_.find(media_keys_id);
- return (iter != media_keys_.end()) ? iter->second : NULL;
-}
-
bool RendererMediaPlayerManager::CanEnterFullscreen(blink::WebFrame* frame) {
return (!fullscreen_frame_ && !pending_fullscreen_frame_)
|| ShouldEnterFullscreen(frame);
@@ -429,13 +329,18 @@ void RendererMediaPlayerManager::RetrieveGeometryChanges(
WebMediaPlayerAndroid* player = player_it->second;
if (player && player->hasVideo()) {
- gfx::RectF rect;
- if (player->RetrieveGeometryChange(&rect)) {
- (*changes)[player_it->first] = rect;
- }
+ if (player->UpdateBoundaryRectangle())
+ (*changes)[player_it->first] = player->GetBoundaryRectangle();
}
}
}
+
+bool
+RendererMediaPlayerManager::ShouldUseVideoOverlayForEmbeddedEncryptedVideo() {
+ const RendererPreferences& prefs = static_cast<RenderFrameImpl*>(
+ render_frame())->render_view()->renderer_preferences();
+ return prefs.use_video_overlay_for_embedded_encrypted_video;
+}
#endif // defined(VIDEO_HOLE)
} // namespace content
diff --git a/chromium/content/renderer/media/android/renderer_media_player_manager.h b/chromium/content/renderer/media/android/renderer_media_player_manager.h
index 188d5425132..4bcb2888378 100644
--- a/chromium/content/renderer/media/android/renderer_media_player_manager.h
+++ b/chromium/content/renderer/media/android/renderer_media_player_manager.h
@@ -7,20 +7,14 @@
#include <map>
#include <string>
-#include <vector>
#include "base/basictypes.h"
#include "base/time/time.h"
#include "content/common/media/media_player_messages_enums_android.h"
-#include "content/public/renderer/render_view_observer.h"
+#include "content/public/renderer/render_frame_observer.h"
#include "media/base/android/media_player_android.h"
-#include "media/base/media_keys.h"
#include "url/gurl.h"
-#if defined(GOOGLE_TV)
-#include "ui/gfx/rect_f.h"
-#endif
-
namespace blink {
class WebFrame;
}
@@ -29,20 +23,20 @@ namespace gfx {
class RectF;
}
-namespace content {
+struct MediaPlayerHostMsg_Initialize_Params;
-class ProxyMediaKeys;
+namespace content {
class WebMediaPlayerAndroid;
// Class for managing all the WebMediaPlayerAndroid objects in the same
-// RenderView.
-class RendererMediaPlayerManager : public RenderViewObserver {
+// RenderFrame.
+class RendererMediaPlayerManager : public RenderFrameObserver {
public:
- // Constructs a RendererMediaPlayerManager object for the |render_view|.
- RendererMediaPlayerManager(RenderView* render_view);
+ // Constructs a RendererMediaPlayerManager object for the |render_frame|.
+ explicit RendererMediaPlayerManager(RenderFrame* render_frame);
virtual ~RendererMediaPlayerManager();
- // RenderViewObserver overrides.
+ // RenderFrameObserver overrides.
virtual bool OnMessageReceived(const IPC::Message& msg) OVERRIDE;
// Initializes a MediaPlayerAndroid object in browser process.
@@ -50,7 +44,8 @@ class RendererMediaPlayerManager : public RenderViewObserver {
int player_id,
const GURL& url,
const GURL& first_party_for_cookies,
- int demuxer_client_id);
+ int demuxer_client_id,
+ const GURL& frame_url);
// Starts the player.
void Start(int player_id);
@@ -68,6 +63,9 @@ class RendererMediaPlayerManager : public RenderViewObserver {
// Sets the player volume.
void SetVolume(int player_id, double volume);
+ // Sets the poster image.
+ void SetPoster(int player_id, const GURL& poster);
+
// Releases resources for the player.
void ReleaseResources(int player_id);
@@ -80,42 +78,27 @@ class RendererMediaPlayerManager : public RenderViewObserver {
// Requests the player to exit fullscreen.
void ExitFullscreen(int player_id);
+ // Requests the player with |player_id| to use the CDM with |cdm_id|.
+ // Does nothing if |cdm_id| is kInvalidCdmId.
+ // TODO(xhwang): Update this when we implement setCdm(0).
+ void SetCdm(int player_id, int cdm_id);
+
#if defined(VIDEO_HOLE)
// Requests an external surface for out-of-band compositing.
void RequestExternalSurface(int player_id, const gfx::RectF& geometry);
- // RenderViewObserver overrides.
+ // RenderFrameObserver overrides.
virtual void DidCommitCompositorFrame() OVERRIDE;
-#endif // defined(VIDEO_HOLE)
- // Encrypted media related methods.
- void InitializeCDM(int media_keys_id,
- ProxyMediaKeys* media_keys,
- const std::vector<uint8>& uuid,
- const GURL& frame_url);
- void CreateSession(int media_keys_id,
- uint32 session_id,
- const std::string& type,
- const std::vector<uint8>& init_data);
- void UpdateSession(int media_keys_id,
- uint32 session_id,
- const std::vector<uint8>& response);
- void ReleaseSession(int media_keys_id, uint32 session_id);
+ // Returns true if a media player should use video-overlay for the embedded
+ // encrypted video.
+ bool ShouldUseVideoOverlayForEmbeddedEncryptedVideo();
+#endif // defined(VIDEO_HOLE)
// Registers and unregisters a WebMediaPlayerAndroid object.
int RegisterMediaPlayer(WebMediaPlayerAndroid* player);
void UnregisterMediaPlayer(int player_id);
- // Registers a ProxyMediaKeys object. There must be a WebMediaPlayerAndroid
- // object already registered for this id, and it is unregistered when the
- // player is unregistered. For now |media_keys_id| is the same as player_id
- // used in other methods.
- void RegisterMediaKeys(int media_keys_id, ProxyMediaKeys* media_keys);
-
- // Releases the media resources managed by this object when a video
- // is playing.
- void ReleaseVideoResources();
-
// Checks whether a player can enter fullscreen.
bool CanEnterFullscreen(blink::WebFrame* frame);
@@ -132,9 +115,6 @@ class RendererMediaPlayerManager : public RenderViewObserver {
// Gets the pointer to WebMediaPlayerAndroid given the |player_id|.
WebMediaPlayerAndroid* GetMediaPlayer(int player_id);
- // Gets the pointer to ProxyMediaKeys given the |media_keys_id|.
- ProxyMediaKeys* GetMediaKeys(int media_keys_id);
-
#if defined(VIDEO_HOLE)
// Gets the list of media players with video geometry changes.
void RetrieveGeometryChanges(std::map<int, gfx::RectF>* changes);
@@ -155,35 +135,26 @@ class RendererMediaPlayerManager : public RenderViewObserver {
void OnVideoSizeChanged(int player_id, int width, int height);
void OnTimeUpdate(int player_id, base::TimeDelta current_time);
void OnMediaPlayerReleased(int player_id);
- void OnConnectedToRemoteDevice(int player_id);
+ void OnConnectedToRemoteDevice(int player_id,
+ const std::string& remote_playback_message);
void OnDisconnectedFromRemoteDevice(int player_id);
void OnDidExitFullscreen(int player_id);
void OnDidEnterFullscreen(int player_id);
void OnPlayerPlay(int player_id);
void OnPlayerPause(int player_id);
void OnRequestFullscreen(int player_id);
- void OnSessionCreated(int media_keys_id,
- uint32 session_id,
- const std::string& web_session_id);
- void OnSessionMessage(int media_keys_id,
- uint32 session_id,
- const std::vector<uint8>& message,
- const std::string& destination_url);
- void OnSessionReady(int media_keys_id, uint32 session_id);
- void OnSessionClosed(int media_keys_id, uint32 session_id);
- void OnSessionError(int media_keys_id,
- uint32 session_id,
- media::MediaKeys::KeyError error_code,
- int system_code);
+ void OnPauseVideo();
+
+ // Release all video player resources.
+ // If something is in progress the resource will not be freed. It will
+ // only be freed once the tab is destroyed or if the user navigates away
+ // via WebMediaPlayerAndroid::Destroy.
+ void ReleaseVideoResources();
// Info for all available WebMediaPlayerAndroid on a page; kept so that
// we can enumerate them to send updates about tab focus and visibility.
std::map<int, WebMediaPlayerAndroid*> media_players_;
- // Info for all available ProxyMediaKeys. There must be at most one
- // ProxyMediaKeys for each available WebMediaPlayerAndroid.
- std::map<int, ProxyMediaKeys*> media_keys_;
-
int next_media_player_id_;
// WebFrame of the fullscreen video.
diff --git a/chromium/content/renderer/media/android/stream_texture_factory_android.h b/chromium/content/renderer/media/android/stream_texture_factory.h
index 849cc06eefa..70d31e9391a 100644
--- a/chromium/content/renderer/media/android/stream_texture_factory_android.h
+++ b/chromium/content/renderer/media/android/stream_texture_factory.h
@@ -1,18 +1,21 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef CONTENT_RENDERER_MEDIA_ANDROID_STREAM_TEXTURE_FACTORY_ANDROID_H_
-#define CONTENT_RENDERER_MEDIA_ANDROID_STREAM_TEXTURE_FACTORY_ANDROID_H_
+#ifndef CONTENT_RENDERER_MEDIA_ANDROID_STREAM_TEXTURE_FACTORY_H_
+#define CONTENT_RENDERER_MEDIA_ANDROID_STREAM_TEXTURE_FACTORY_H_
+#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "cc/layers/video_frame_provider.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "ui/gfx/size.h"
-namespace blink {
-class WebGraphicsContext3D;
-}
+namespace gpu {
+namespace gles2 {
+class GLES2Interface;
+} // namespace gles2
+} // namespace gpu
namespace content {
@@ -26,8 +29,6 @@ class StreamTextureProxy {
// a connected client will receive callbacks on.
virtual void BindToCurrentThread(int32 stream_id) = 0;
- virtual bool IsBoundToThread() = 0;
-
// Setting the target for callback when a frame is available. This function
// could be called on both the main thread and the compositor thread.
virtual void SetClient(cc::VideoFrameProvider::Client* client) = 0;
@@ -44,10 +45,8 @@ typedef scoped_ptr<StreamTextureProxy, StreamTextureProxy::Deleter>
ScopedStreamTextureProxy;
// Factory class for managing stream textures.
-class StreamTextureFactory {
+class StreamTextureFactory : public base::RefCounted<StreamTextureFactory> {
public:
- virtual ~StreamTextureFactory() {}
-
// Create the StreamTextureProxy object.
virtual StreamTextureProxy* CreateProxy() = 0;
@@ -57,27 +56,24 @@ class StreamTextureFactory {
// the player_id.
virtual void EstablishPeer(int32 stream_id, int player_id) = 0;
- // Create the streamTexture and return the stream Id and create a client-side
- // texture id to refer to the streamTexture. The texture id is produced into
- // a mailbox so it can be used to ship in a VideoFrame, with a sync point for
- // when the mailbox can be accessed.
- virtual unsigned CreateStreamTexture(
- unsigned texture_target,
- unsigned* texture_id,
- gpu::Mailbox* texture_mailbox,
- unsigned* texture_mailbox_sync_point) = 0;
-
- // Destroy the streamTexture for the given texture id, as well as the
- // client side texture.
- virtual void DestroyStreamTexture(unsigned texture_id) = 0;
+ // Creates a StreamTexture and returns its id. Sets |*texture_id| to the
+ // client-side id of the StreamTexture. The texture is produced into
+ // a mailbox so it can be shipped in a VideoFrame.
+ virtual unsigned CreateStreamTexture(unsigned texture_target,
+ unsigned* texture_id,
+ gpu::Mailbox* texture_mailbox) = 0;
// Set the streamTexture size for the given stream Id.
virtual void SetStreamTextureSize(int32 texture_id,
const gfx::Size& size) = 0;
- virtual blink::WebGraphicsContext3D* Context3d() = 0;
+ virtual gpu::gles2::GLES2Interface* ContextGL() = 0;
+
+ protected:
+ friend class base::RefCounted<StreamTextureFactory>;
+ virtual ~StreamTextureFactory() {}
};
} // namespace content
-#endif // CONTENT_RENDERER_MEDIA_ANDROID_STREAM_TEXTURE_FACTORY_ANDROID_H_
+#endif // CONTENT_RENDERER_MEDIA_ANDROID_STREAM_TEXTURE_FACTORY_H_
diff --git a/chromium/content/renderer/media/android/stream_texture_factory_android_impl.h b/chromium/content/renderer/media/android/stream_texture_factory_android_impl.h
deleted file mode 100644
index 0cd3d5866c9..00000000000
--- a/chromium/content/renderer/media/android/stream_texture_factory_android_impl.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_ANDROID_STREAM_TEXTURE_FACTORY_ANDROID_IMPL_H_
-#define CONTENT_RENDERER_MEDIA_ANDROID_STREAM_TEXTURE_FACTORY_ANDROID_IMPL_H_
-
-#include "content/renderer/media/android/stream_texture_factory_android.h"
-
-namespace blink {
-class WebGraphicsContext3D;
-}
-
-namespace content {
-
-class GpuChannelHost;
-
-class StreamTextureFactoryImpl : public StreamTextureFactory {
- public:
- StreamTextureFactoryImpl(blink::WebGraphicsContext3D* context,
- GpuChannelHost* channel,
- int view_id);
- virtual ~StreamTextureFactoryImpl();
-
- // StreamTextureFactory implementation.
- virtual StreamTextureProxy* CreateProxy() OVERRIDE;
- virtual void EstablishPeer(int32 stream_id, int player_id) OVERRIDE;
- virtual unsigned CreateStreamTexture(
- unsigned texture_target,
- unsigned* texture_id,
- gpu::Mailbox* texture_mailbox,
- unsigned* texture_mailbox_sync_point) OVERRIDE;
- virtual void DestroyStreamTexture(unsigned texture_id) OVERRIDE;
- virtual void SetStreamTextureSize(int32 texture_id,
- const gfx::Size& size) OVERRIDE;
- virtual blink::WebGraphicsContext3D* Context3d() OVERRIDE;
-
- private:
- blink::WebGraphicsContext3D* context_;
- scoped_refptr<GpuChannelHost> channel_;
- int view_id_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(StreamTextureFactoryImpl);
-};
-
-} // namespace content
-
-#endif // CONTENT_RENDERER_MEDIA_ANDROID_STREAM_TEXTURE_FACTORY_ANDROID_IMPL_H_
diff --git a/chromium/content/renderer/media/android/stream_texture_factory_android_impl.cc b/chromium/content/renderer/media/android/stream_texture_factory_impl.cc
index 3b421ca04ef..58d165b5437 100644
--- a/chromium/content/renderer/media/android/stream_texture_factory_android_impl.cc
+++ b/chromium/content/renderer/media/android/stream_texture_factory_impl.cc
@@ -1,13 +1,14 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "content/renderer/media/android/stream_texture_factory_android_impl.h"
+#include "content/renderer/media/android/stream_texture_factory_impl.h"
+#include "cc/output/context_provider.h"
#include "content/common/gpu/client/gpu_channel_host.h"
#include "content/common/gpu/gpu_messages.h"
#include "content/renderer/gpu/stream_texture_host_android.h"
-#include "third_party/WebKit/public/platform/WebGraphicsContext3D.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
#include "ui/gfx/size.h"
namespace content {
@@ -22,7 +23,6 @@ class StreamTextureProxyImpl : public StreamTextureProxy,
// StreamTextureProxy implementation:
virtual void BindToCurrentThread(int32 stream_id) OVERRIDE;
- virtual bool IsBoundToThread() OVERRIDE { return loop_.get() != NULL; }
virtual void SetClient(cc::VideoFrameProvider::Client* client) OVERRIDE;
virtual void Release() OVERRIDE;
@@ -41,9 +41,7 @@ class StreamTextureProxyImpl : public StreamTextureProxy,
};
StreamTextureProxyImpl::StreamTextureProxyImpl(StreamTextureHost* host)
- : host_(host), client_(NULL) {
- host->SetListener(this);
-}
+ : host_(host), client_(NULL) {}
StreamTextureProxyImpl::~StreamTextureProxyImpl() {}
@@ -62,7 +60,7 @@ void StreamTextureProxyImpl::SetClient(cc::VideoFrameProvider::Client* client) {
void StreamTextureProxyImpl::BindToCurrentThread(int stream_id) {
loop_ = base::MessageLoopProxy::current();
- host_->Initialize(stream_id);
+ host_->BindToCurrentThread(stream_id, this);
}
void StreamTextureProxyImpl::OnFrameAvailable() {
@@ -79,12 +77,21 @@ void StreamTextureProxyImpl::OnMatrixChanged(const float matrix[16]) {
} // namespace
+// static
+scoped_refptr<StreamTextureFactoryImpl> StreamTextureFactoryImpl::Create(
+ const scoped_refptr<cc::ContextProvider>& context_provider,
+ GpuChannelHost* channel,
+ int frame_id) {
+ return new StreamTextureFactoryImpl(context_provider, channel, frame_id);
+}
+
StreamTextureFactoryImpl::StreamTextureFactoryImpl(
- blink::WebGraphicsContext3D* context,
+ const scoped_refptr<cc::ContextProvider>& context_provider,
GpuChannelHost* channel,
- int view_id)
- : context_(context), channel_(channel), view_id_(view_id) {
- DCHECK(context_);
+ int frame_id)
+ : context_provider_(context_provider),
+ channel_(channel),
+ frame_id_(frame_id) {
DCHECK(channel);
}
@@ -99,46 +106,32 @@ StreamTextureProxy* StreamTextureFactoryImpl::CreateProxy() {
void StreamTextureFactoryImpl::EstablishPeer(int32 stream_id, int player_id) {
DCHECK(channel_.get());
channel_->Send(
- new GpuChannelMsg_EstablishStreamTexture(stream_id, view_id_, player_id));
+ new GpuStreamTextureMsg_EstablishPeer(stream_id, frame_id_, player_id));
}
unsigned StreamTextureFactoryImpl::CreateStreamTexture(
unsigned texture_target,
unsigned* texture_id,
- gpu::Mailbox* texture_mailbox,
- unsigned* texture_mailbox_sync_point) {
- unsigned stream_id = 0;
- if (context_->makeContextCurrent()) {
- *texture_id = context_->createTexture();
- stream_id = context_->createStreamTextureCHROMIUM(*texture_id);
-
- context_->genMailboxCHROMIUM(texture_mailbox->name);
- context_->bindTexture(texture_target, *texture_id);
- context_->produceTextureCHROMIUM(texture_target, texture_mailbox->name);
-
- context_->flush();
- *texture_mailbox_sync_point = context_->insertSyncPoint();
- }
- return stream_id;
-}
+ gpu::Mailbox* texture_mailbox) {
+ GLuint stream_id = 0;
+ gpu::gles2::GLES2Interface* gl = context_provider_->ContextGL();
+ gl->GenTextures(1, texture_id);
-void StreamTextureFactoryImpl::DestroyStreamTexture(unsigned texture_id) {
- if (context_->makeContextCurrent()) {
- // TODO(sievers): Make the destroyStreamTexture implicit when the last
- // texture referencing it is lost.
- context_->destroyStreamTextureCHROMIUM(texture_id);
- context_->deleteTexture(texture_id);
- context_->flush();
- }
+ stream_id = gl->CreateStreamTextureCHROMIUM(*texture_id);
+
+ gl->GenMailboxCHROMIUM(texture_mailbox->name);
+ gl->BindTexture(texture_target, *texture_id);
+ gl->ProduceTextureCHROMIUM(texture_target, texture_mailbox->name);
+ return stream_id;
}
-void StreamTextureFactoryImpl::SetStreamTextureSize(
- int32 stream_id, const gfx::Size& size) {
- channel_->Send(new GpuChannelMsg_SetStreamTextureSize(stream_id, size));
+void StreamTextureFactoryImpl::SetStreamTextureSize(int32 stream_id,
+ const gfx::Size& size) {
+ channel_->Send(new GpuStreamTextureMsg_SetSize(stream_id, size));
}
-blink::WebGraphicsContext3D* StreamTextureFactoryImpl::Context3d() {
- return context_;
+gpu::gles2::GLES2Interface* StreamTextureFactoryImpl::ContextGL() {
+ return context_provider_->ContextGL();
}
} // namespace content
diff --git a/chromium/content/renderer/media/android/stream_texture_factory_impl.h b/chromium/content/renderer/media/android/stream_texture_factory_impl.h
new file mode 100644
index 00000000000..7721c4812ee
--- /dev/null
+++ b/chromium/content/renderer/media/android/stream_texture_factory_impl.h
@@ -0,0 +1,58 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_ANDROID_STREAM_TEXTURE_FACTORY_IMPL_H_
+#define CONTENT_RENDERER_MEDIA_ANDROID_STREAM_TEXTURE_FACTORY_IMPL_H_
+
+#include "content/renderer/media/android/stream_texture_factory.h"
+
+namespace cc {
+class ContextProvider;
+}
+
+namespace gpu {
+namespace gles2 {
+class GLES2Interface;
+} // namespace gles2
+} // namespace gpu
+
+namespace content {
+
+class GpuChannelHost;
+
+class StreamTextureFactoryImpl : public StreamTextureFactory {
+ public:
+ static scoped_refptr<StreamTextureFactoryImpl> Create(
+ const scoped_refptr<cc::ContextProvider>& context_provider,
+ GpuChannelHost* channel,
+ int frame_id);
+
+ // StreamTextureFactory implementation.
+ virtual StreamTextureProxy* CreateProxy() OVERRIDE;
+ virtual void EstablishPeer(int32 stream_id, int player_id) OVERRIDE;
+ virtual unsigned CreateStreamTexture(unsigned texture_target,
+ unsigned* texture_id,
+ gpu::Mailbox* texture_mailbox) OVERRIDE;
+ virtual void SetStreamTextureSize(int32 texture_id,
+ const gfx::Size& size) OVERRIDE;
+ virtual gpu::gles2::GLES2Interface* ContextGL() OVERRIDE;
+
+ private:
+ friend class base::RefCounted<StreamTextureFactoryImpl>;
+ StreamTextureFactoryImpl(
+ const scoped_refptr<cc::ContextProvider>& context_provider,
+ GpuChannelHost* channel,
+ int frame_id);
+ virtual ~StreamTextureFactoryImpl();
+
+ scoped_refptr<cc::ContextProvider> context_provider_;
+ scoped_refptr<GpuChannelHost> channel_;
+ int frame_id_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StreamTextureFactoryImpl);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_ANDROID_STREAM_TEXTURE_FACTORY_IMPL_H_
diff --git a/chromium/content/renderer/media/android/stream_texture_factory_android_synchronous_impl.cc b/chromium/content/renderer/media/android/stream_texture_factory_synchronous_impl.cc
index c284328a4b2..75536cbacc9 100644
--- a/chromium/content/renderer/media/android/stream_texture_factory_android_synchronous_impl.cc
+++ b/chromium/content/renderer/media/android/stream_texture_factory_synchronous_impl.cc
@@ -1,8 +1,8 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "content/renderer/media/android/stream_texture_factory_android_synchronous_impl.h"
+#include "content/renderer/media/android/stream_texture_factory_synchronous_impl.h"
#include <algorithm>
@@ -15,9 +15,12 @@
#include "base/synchronization/lock.h"
#include "cc/output/context_provider.h"
#include "content/common/android/surface_texture_peer.h"
-#include "third_party/WebKit/public/platform/WebGraphicsContext3D.h"
+#include "content/renderer/render_thread_impl.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
#include "ui/gl/android/surface_texture.h"
+using gpu::gles2::GLES2Interface;
+
namespace content {
namespace {
@@ -32,11 +35,11 @@ class StreamTextureProxyImpl
// StreamTextureProxy implementation:
virtual void BindToCurrentThread(int32 stream_id) OVERRIDE;
- virtual bool IsBoundToThread() OVERRIDE { return loop_.get() != NULL; }
virtual void SetClient(cc::VideoFrameProvider::Client* client) OVERRIDE;
virtual void Release() OVERRIDE;
private:
+ void BindOnCompositorThread(int stream_id);
void OnFrameAvailable();
scoped_refptr<base::MessageLoopProxy> loop_;
@@ -57,6 +60,8 @@ class StreamTextureProxyImpl
StreamTextureProxyImpl::StreamTextureProxyImpl(
StreamTextureFactorySynchronousImpl::ContextProvider* provider)
: context_provider_(provider), has_updated_(false) {
+ DCHECK(RenderThreadImpl::current());
+ loop_ = RenderThreadImpl::current()->compositor_message_loop_proxy();
std::fill(current_matrix_, current_matrix_ + 16, 0);
}
@@ -64,7 +69,7 @@ StreamTextureProxyImpl::~StreamTextureProxyImpl() {}
void StreamTextureProxyImpl::Release() {
SetClient(NULL);
- if (loop_.get() && !loop_->BelongsToCurrentThread())
+ if (!loop_->BelongsToCurrentThread())
loop_->DeleteSoon(FROM_HERE, this);
else
delete this;
@@ -76,8 +81,19 @@ void StreamTextureProxyImpl::SetClient(cc::VideoFrameProvider::Client* client) {
}
void StreamTextureProxyImpl::BindToCurrentThread(int stream_id) {
- loop_ = base::MessageLoopProxy::current();
+ if (loop_->BelongsToCurrentThread()) {
+ BindOnCompositorThread(stream_id);
+ return;
+ }
+ // Weakptr is only used on compositor thread loop, so this is safe.
+ loop_->PostTask(FROM_HERE,
+ base::Bind(&StreamTextureProxyImpl::BindOnCompositorThread,
+ AsWeakPtr(),
+ stream_id));
+}
+
+void StreamTextureProxyImpl::BindOnCompositorThread(int stream_id) {
surface_texture_ = context_provider_->GetSurfaceTexture(stream_id);
if (!surface_texture_) {
LOG(ERROR) << "Failed to get SurfaceTexture for stream.";
@@ -116,12 +132,20 @@ void StreamTextureProxyImpl::OnFrameAvailable() {
} // namespace
+// static
+scoped_refptr<StreamTextureFactorySynchronousImpl>
+StreamTextureFactorySynchronousImpl::Create(
+ const CreateContextProviderCallback& try_create_callback,
+ int frame_id) {
+ return new StreamTextureFactorySynchronousImpl(try_create_callback, frame_id);
+}
+
StreamTextureFactorySynchronousImpl::StreamTextureFactorySynchronousImpl(
const CreateContextProviderCallback& try_create_callback,
- int view_id)
+ int frame_id)
: create_context_provider_callback_(try_create_callback),
context_provider_(create_context_provider_callback_.Run()),
- view_id_(view_id) {}
+ frame_id_(frame_id) {}
StreamTextureFactorySynchronousImpl::~StreamTextureFactorySynchronousImpl() {}
@@ -143,7 +167,7 @@ void StreamTextureFactorySynchronousImpl::EstablishPeer(int32 stream_id,
SurfaceTexturePeer::GetInstance()->EstablishSurfaceTexturePeer(
base::Process::Current().handle(),
surface_texture,
- view_id_,
+ frame_id_,
player_id);
}
}
@@ -151,44 +175,26 @@ void StreamTextureFactorySynchronousImpl::EstablishPeer(int32 stream_id,
unsigned StreamTextureFactorySynchronousImpl::CreateStreamTexture(
unsigned texture_target,
unsigned* texture_id,
- gpu::Mailbox* texture_mailbox,
- unsigned* texture_mailbox_sync_point) {
+ gpu::Mailbox* texture_mailbox) {
DCHECK(context_provider_);
- blink::WebGraphicsContext3D* context = context_provider_->Context3d();
unsigned stream_id = 0;
- if (context->makeContextCurrent()) {
- *texture_id = context->createTexture();
- stream_id = context->createStreamTextureCHROMIUM(*texture_id);
-
- context->genMailboxCHROMIUM(texture_mailbox->name);
- context->bindTexture(texture_target, *texture_id);
- context->produceTextureCHROMIUM(texture_target, texture_mailbox->name);
+ GLES2Interface* gl = context_provider_->ContextGL();
+ gl->GenTextures(1, texture_id);
+ stream_id = gl->CreateStreamTextureCHROMIUM(*texture_id);
- context->flush();
- *texture_mailbox_sync_point = context->insertSyncPoint();
- }
+ gl->GenMailboxCHROMIUM(texture_mailbox->name);
+ gl->BindTexture(texture_target, *texture_id);
+ gl->ProduceTextureCHROMIUM(texture_target, texture_mailbox->name);
return stream_id;
}
-void StreamTextureFactorySynchronousImpl::DestroyStreamTexture(
- unsigned texture_id) {
- DCHECK(context_provider_);
- blink::WebGraphicsContext3D* context = context_provider_->Context3d();
- if (context->makeContextCurrent()) {
- context->destroyStreamTextureCHROMIUM(texture_id);
- context->deleteTexture(texture_id);
- context->flush();
- }
-}
-
void StreamTextureFactorySynchronousImpl::SetStreamTextureSize(
int32 stream_id,
const gfx::Size& size) {}
-blink::WebGraphicsContext3D*
-StreamTextureFactorySynchronousImpl::Context3d() {
+gpu::gles2::GLES2Interface* StreamTextureFactorySynchronousImpl::ContextGL() {
DCHECK(context_provider_);
- return context_provider_->Context3d();
+ return context_provider_->ContextGL();
}
} // namespace content
diff --git a/chromium/content/renderer/media/android/stream_texture_factory_android_synchronous_impl.h b/chromium/content/renderer/media/android/stream_texture_factory_synchronous_impl.h
index 51c77e5666e..3466c564c9b 100644
--- a/chromium/content/renderer/media/android/stream_texture_factory_android_synchronous_impl.h
+++ b/chromium/content/renderer/media/android/stream_texture_factory_synchronous_impl.h
@@ -1,21 +1,23 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef CONTENT_RENDERER_MEDIA_ANDROID_STREAM_TEXTURE_FACTORY_ANDROID_SYNCHRONOUS_IMPL_H_
-#define CONTENT_RENDERER_MEDIA_ANDROID_STREAM_TEXTURE_FACTORY_ANDROID_SYNCHRONOUS_IMPL_H_
+#ifndef CONTENT_RENDERER_MEDIA_ANDROID_STREAM_TEXTURE_FACTORY_SYNCHRONOUS_IMPL_H_
+#define CONTENT_RENDERER_MEDIA_ANDROID_STREAM_TEXTURE_FACTORY_SYNCHRONOUS_IMPL_H_
#include "base/callback.h"
#include "base/memory/ref_counted.h"
-#include "content/renderer/media/android/stream_texture_factory_android.h"
+#include "content/renderer/media/android/stream_texture_factory.h"
namespace gfx {
class SurfaceTexture;
}
-namespace blink {
-class WebGraphicsContext3D;
-}
+namespace gpu {
+namespace gles2 {
+class GLES2Interface;
+} // namespace gles2
+} // namespace gpu
namespace content {
@@ -27,7 +29,7 @@ class StreamTextureFactorySynchronousImpl : public StreamTextureFactory {
virtual scoped_refptr<gfx::SurfaceTexture> GetSurfaceTexture(
uint32 stream_id) = 0;
- virtual blink::WebGraphicsContext3D* Context3d() = 0;
+ virtual gpu::gles2::GLES2Interface* ContextGL() = 0;
protected:
friend class base::RefCountedThreadSafe<ContextProvider>;
@@ -37,31 +39,33 @@ class StreamTextureFactorySynchronousImpl : public StreamTextureFactory {
typedef base::Callback<scoped_refptr<ContextProvider>(void)>
CreateContextProviderCallback;
- StreamTextureFactorySynchronousImpl(
+ static scoped_refptr<StreamTextureFactorySynchronousImpl> Create(
const CreateContextProviderCallback& try_create_callback,
- int view_id);
- virtual ~StreamTextureFactorySynchronousImpl();
+ int frame_id);
virtual StreamTextureProxy* CreateProxy() OVERRIDE;
virtual void EstablishPeer(int32 stream_id, int player_id) OVERRIDE;
- virtual unsigned CreateStreamTexture(
- unsigned texture_target,
- unsigned* texture_id,
- gpu::Mailbox* texture_mailbox,
- unsigned* texture_mailbox_sync_point) OVERRIDE;
- virtual void DestroyStreamTexture(unsigned texture_id) OVERRIDE;
+ virtual unsigned CreateStreamTexture(unsigned texture_target,
+ unsigned* texture_id,
+ gpu::Mailbox* texture_mailbox) OVERRIDE;
virtual void SetStreamTextureSize(int32 stream_id,
const gfx::Size& size) OVERRIDE;
- virtual blink::WebGraphicsContext3D* Context3d() OVERRIDE;
+ virtual gpu::gles2::GLES2Interface* ContextGL() OVERRIDE;
private:
+ friend class base::RefCounted<StreamTextureFactorySynchronousImpl>;
+ StreamTextureFactorySynchronousImpl(
+ const CreateContextProviderCallback& try_create_callback,
+ int frame_id);
+ virtual ~StreamTextureFactorySynchronousImpl();
+
CreateContextProviderCallback create_context_provider_callback_;
scoped_refptr<ContextProvider> context_provider_;
- int view_id_;
+ int frame_id_;
- DISALLOW_COPY_AND_ASSIGN(StreamTextureFactorySynchronousImpl);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StreamTextureFactorySynchronousImpl);
};
} // namespace content
-#endif // CONTENT_RENDERER_MEDIA_ANDROID_STREAM_TEXTURE_FACTORY_ANDROID_SYNCHRONOUS_IMPL_H_
+#endif // CONTENT_RENDERER_MEDIA_ANDROID_STREAM_TEXTURE_FACTORY_SYNCHRONOUS_IMPL_H_
diff --git a/chromium/content/renderer/media/android/webmediaplayer_android.cc b/chromium/content/renderer/media/android/webmediaplayer_android.cc
index a39ae56fead..98335469166 100644
--- a/chromium/content/renderer/media/android/webmediaplayer_android.cc
+++ b/chromium/content/renderer/media/android/webmediaplayer_android.cc
@@ -7,43 +7,51 @@
#include <limits>
#include "base/bind.h"
+#include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/files/file_path.h"
#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
#include "base/metrics/histogram.h"
#include "base/strings/string_number_conversions.h"
+#include "base/strings/utf_string_conversions.h"
#include "cc/layers/video_layer.h"
#include "content/public/common/content_client.h"
-#include "content/renderer/media/android/proxy_media_keys.h"
+#include "content/public/common/content_switches.h"
+#include "content/public/renderer/render_frame.h"
+#include "content/renderer/compositor_bindings/web_layer_impl.h"
#include "content/renderer/media/android/renderer_demuxer_android.h"
#include "content/renderer/media/android/renderer_media_player_manager.h"
#include "content/renderer/media/crypto/key_systems.h"
+#include "content/renderer/media/crypto/renderer_cdm_manager.h"
+#include "content/renderer/media/webcontentdecryptionmodule_impl.h"
#include "content/renderer/media/webmediaplayer_delegate.h"
#include "content/renderer/media/webmediaplayer_util.h"
+#include "content/renderer/render_frame_impl.h"
#include "content/renderer/render_thread_impl.h"
#include "gpu/GLES2/gl2extchromium.h"
-#include "grit/content_resources.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
+#include "gpu/command_buffer/common/mailbox_holder.h"
#include "media/base/android/media_player_android.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
+// TODO(xhwang): Remove when we remove prefixed EME implementation.
+#include "media/base/media_keys.h"
#include "media/base/media_switches.h"
#include "media/base/video_frame.h"
#include "net/base/mime_util.h"
#include "third_party/WebKit/public/platform/WebMediaPlayerClient.h"
#include "third_party/WebKit/public/platform/WebString.h"
+#include "third_party/WebKit/public/platform/WebURL.h"
#include "third_party/WebKit/public/web/WebDocument.h"
#include "third_party/WebKit/public/web/WebFrame.h"
#include "third_party/WebKit/public/web/WebRuntimeFeatures.h"
+#include "third_party/WebKit/public/web/WebSecurityOrigin.h"
#include "third_party/WebKit/public/web/WebView.h"
#include "third_party/skia/include/core/SkBitmap.h"
#include "third_party/skia/include/core/SkCanvas.h"
#include "third_party/skia/include/core/SkPaint.h"
+#include "third_party/skia/include/core/SkTypeface.h"
#include "ui/gfx/image/image.h"
-#include "webkit/renderer/compositor_bindings/web_layer_impl.h"
-
-#if defined(GOOGLE_TV)
-#include "content/renderer/media/media_stream_audio_renderer.h"
-#include "content/renderer/media/media_stream_client.h"
-#endif
static const uint32 kGLTextureExternalOES = 0x8D65;
@@ -52,52 +60,52 @@ using blink::WebSize;
using blink::WebString;
using blink::WebTimeRanges;
using blink::WebURL;
+using gpu::gles2::GLES2Interface;
using media::MediaPlayerAndroid;
using media::VideoFrame;
namespace {
// Prefix for histograms related to Encrypted Media Extensions.
const char* kMediaEme = "Media.EME.";
+
+// File-static function is to allow it to run even after WMPA is deleted.
+void OnReleaseTexture(
+ const scoped_refptr<content::StreamTextureFactory>& factories,
+ uint32 texture_id,
+ const std::vector<uint32>& release_sync_points) {
+ GLES2Interface* gl = factories->ContextGL();
+ for (size_t i = 0; i < release_sync_points.size(); i++)
+ gl->WaitSyncPointCHROMIUM(release_sync_points[i]);
+ gl->DeleteTextures(1, &texture_id);
+}
} // namespace
namespace content {
-// static
-void WebMediaPlayerAndroid::OnReleaseRemotePlaybackTexture(
- const scoped_refptr<base::MessageLoopProxy>& main_loop,
- const base::WeakPtr<WebMediaPlayerAndroid>& player,
- uint32 sync_point) {
- main_loop->PostTask(
- FROM_HERE,
- base::Bind(&WebMediaPlayerAndroid::DoReleaseRemotePlaybackTexture,
- player,
- sync_point));
-}
-
WebMediaPlayerAndroid::WebMediaPlayerAndroid(
blink::WebFrame* frame,
blink::WebMediaPlayerClient* client,
base::WeakPtr<WebMediaPlayerDelegate> delegate,
- RendererMediaPlayerManager* manager,
- StreamTextureFactory* factory,
+ RendererMediaPlayerManager* player_manager,
+ RendererCdmManager* cdm_manager,
+ scoped_refptr<StreamTextureFactory> factory,
const scoped_refptr<base::MessageLoopProxy>& media_loop,
media::MediaLog* media_log)
- : frame_(frame),
+ : RenderFrameObserver(RenderFrame::FromWebFrame(frame)),
+ frame_(frame),
client_(client),
delegate_(delegate),
- buffered_(1u),
- main_loop_(base::MessageLoopProxy::current()),
+ buffered_(static_cast<size_t>(1)),
media_loop_(media_loop),
ignore_metadata_duration_change_(false),
pending_seek_(false),
seeking_(false),
did_loading_progress_(false),
- manager_(manager),
+ player_manager_(player_manager),
+ cdm_manager_(cdm_manager),
network_state_(WebMediaPlayer::NetworkStateEmpty),
ready_state_(WebMediaPlayer::ReadyStateHaveNothing),
- remote_playback_texture_id_(0),
texture_id_(0),
- texture_mailbox_sync_point_(0),
stream_id_(0),
is_playing_(false),
playing_started_(false),
@@ -109,112 +117,64 @@ WebMediaPlayerAndroid::WebMediaPlayerAndroid(
stream_texture_factory_(factory),
needs_external_surface_(false),
video_frame_provider_client_(NULL),
-#if defined(GOOGLE_TV)
- external_surface_threshold_(-1),
- demuxer_(NULL),
- media_stream_client_(NULL),
-#endif // defined(GOOGLE_TV)
pending_playback_(false),
player_type_(MEDIA_PLAYER_TYPE_URL),
current_time_(0),
is_remote_(false),
media_log_(media_log),
+ web_cdm_(NULL),
weak_factory_(this) {
- DCHECK(manager_);
+ DCHECK(player_manager_);
+ DCHECK(cdm_manager_);
DCHECK(main_thread_checker_.CalledOnValidThread());
- // We want to be notified of |main_loop_| destruction.
- base::MessageLoop::current()->AddDestructionObserver(this);
-
- player_id_ = manager_->RegisterMediaPlayer(this);
-
-#if defined(GOOGLE_TV)
- if (CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kUseExternalVideoSurfaceThresholdInPixels)) {
- if (!base::StringToInt(
- CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
- switches::kUseExternalVideoSurfaceThresholdInPixels),
- &external_surface_threshold_)) {
- external_surface_threshold_ = -1;
- }
- }
-#endif // defined(GOOGLE_TV)
+ player_id_ = player_manager_->RegisterMediaPlayer(this);
#if defined(VIDEO_HOLE)
- // Defer stream texture creation until we are sure it's necessary.
- needs_establish_peer_ = false;
- current_frame_ = VideoFrame::CreateBlackFrame(gfx::Size(1, 1));
+ force_use_overlay_embedded_video_ = CommandLine::ForCurrentProcess()->
+ HasSwitch(switches::kForceUseOverlayEmbeddedVideo);
+ if (force_use_overlay_embedded_video_ ||
+ player_manager_->ShouldUseVideoOverlayForEmbeddedEncryptedVideo()) {
+ // Defer stream texture creation until we are sure it's necessary.
+ needs_establish_peer_ = false;
+ current_frame_ = VideoFrame::CreateBlackFrame(gfx::Size(1, 1));
+ }
#endif // defined(VIDEO_HOLE)
TryCreateStreamTextureProxyIfNeeded();
-
- if (blink::WebRuntimeFeatures::isPrefixedEncryptedMediaEnabled()) {
- // TODO(xhwang): Report an error when there is encrypted stream but EME is
- // not enabled. Currently the player just doesn't start and waits for ever.
- decryptor_.reset(new ProxyDecryptor(
-#if defined(ENABLE_PEPPER_CDMS)
- client,
- frame,
-#else
- manager_,
- player_id_, // TODO(xhwang): Use media_keys_id when MediaKeys are
- // separated from WebMediaPlayer.
-#endif // defined(ENABLE_PEPPER_CDMS)
- // |decryptor_| is owned, so Unretained() is safe here.
- base::Bind(&WebMediaPlayerAndroid::OnKeyAdded, base::Unretained(this)),
- base::Bind(&WebMediaPlayerAndroid::OnKeyError, base::Unretained(this)),
- base::Bind(&WebMediaPlayerAndroid::OnKeyMessage,
- base::Unretained(this))));
- }
}
WebMediaPlayerAndroid::~WebMediaPlayerAndroid() {
SetVideoFrameProviderClient(NULL);
client_->setWebLayer(NULL);
- if (manager_) {
- manager_->DestroyPlayer(player_id_);
- manager_->UnregisterMediaPlayer(player_id_);
+ if (player_manager_) {
+ player_manager_->DestroyPlayer(player_id_);
+ player_manager_->UnregisterMediaPlayer(player_id_);
}
- if (stream_id_)
- stream_texture_factory_->DestroyStreamTexture(texture_id_);
-
- if (remote_playback_texture_id_) {
- blink::WebGraphicsContext3D* context =
- stream_texture_factory_->Context3d();
- if (context->makeContextCurrent())
- context->deleteTexture(remote_playback_texture_id_);
+ if (stream_id_) {
+ GLES2Interface* gl = stream_texture_factory_->ContextGL();
+ gl->DeleteTextures(1, &texture_id_);
+ texture_id_ = 0;
+ texture_mailbox_ = gpu::Mailbox();
+ stream_id_ = 0;
}
- if (base::MessageLoop::current())
- base::MessageLoop::current()->RemoveDestructionObserver(this);
+ {
+ base::AutoLock auto_lock(current_frame_lock_);
+ current_frame_ = NULL;
+ }
if (player_type_ == MEDIA_PLAYER_TYPE_MEDIA_SOURCE && delegate_)
delegate_->PlayerGone(this);
-
-#if defined(GOOGLE_TV)
- if (audio_renderer_) {
- if (audio_renderer_->IsLocalRenderer()) {
- audio_renderer_->Stop();
- } else if (!paused()) {
- // The |audio_renderer_| can be shared by multiple remote streams, and
- // it will be stopped when WebRtcAudioDeviceImpl goes away. So we simply
- // pause the |audio_renderer_| here to avoid re-creating the
- // |audio_renderer_|.
- audio_renderer_->Pause();
- }
- }
- if (demuxer_ && !destroy_demuxer_cb_.is_null()) {
- media_source_delegate_.reset();
- destroy_demuxer_cb_.Run();
- }
-#endif
}
void WebMediaPlayerAndroid::load(LoadType load_type,
const blink::WebURL& url,
CORSMode cors_mode) {
+ ReportMediaSchemeUma(GURL(url));
+
switch (load_type) {
case LoadTypeURL:
player_type_ = MEDIA_PLAYER_TYPE_URL;
@@ -225,25 +185,14 @@ void WebMediaPlayerAndroid::load(LoadType load_type,
break;
case LoadTypeMediaStream:
-#if defined(GOOGLE_TV)
- player_type_ = MEDIA_PLAYER_TYPE_MEDIA_STREAM;
- break;
-#else
CHECK(false) << "WebMediaPlayerAndroid doesn't support MediaStream on "
"this platform";
return;
-#endif
}
has_media_metadata_ = false;
has_media_info_ = false;
- media::SetDecryptorReadyCB set_decryptor_ready_cb;
- if (decryptor_) { // |decryptor_| can be NULL is EME if not enabled.
- set_decryptor_ready_cb = base::Bind(&ProxyDecryptor::SetDecryptorReadyCB,
- base::Unretained(decryptor_.get()));
- }
-
int demuxer_client_id = 0;
if (player_type_ != MEDIA_PLAYER_TYPE_URL) {
has_media_info_ = true;
@@ -255,55 +204,52 @@ void WebMediaPlayerAndroid::load(LoadType load_type,
media_source_delegate_.reset(new MediaSourceDelegate(
demuxer, demuxer_client_id, media_loop_, media_log_));
- // |media_source_delegate_| is owned, so Unretained() is safe here.
if (player_type_ == MEDIA_PLAYER_TYPE_MEDIA_SOURCE) {
+ media::SetDecryptorReadyCB set_decryptor_ready_cb =
+ media::BindToCurrentLoop(
+ base::Bind(&WebMediaPlayerAndroid::SetDecryptorReadyCB,
+ weak_factory_.GetWeakPtr()));
+
media_source_delegate_->InitializeMediaSource(
base::Bind(&WebMediaPlayerAndroid::OnMediaSourceOpened,
weak_factory_.GetWeakPtr()),
- base::Bind(&WebMediaPlayerAndroid::OnNeedKey, base::Unretained(this)),
+ base::Bind(&WebMediaPlayerAndroid::OnNeedKey,
+ weak_factory_.GetWeakPtr()),
set_decryptor_ready_cb,
base::Bind(&WebMediaPlayerAndroid::UpdateNetworkState,
weak_factory_.GetWeakPtr()),
base::Bind(&WebMediaPlayerAndroid::OnDurationChanged,
weak_factory_.GetWeakPtr()));
}
-#if defined(GOOGLE_TV)
- // TODO(xhwang): Pass set_decryptor_ready_cb in InitializeMediaStream() to
- // enable ClearKey support for Google TV.
- if (player_type_ == MEDIA_PLAYER_TYPE_MEDIA_STREAM) {
- media_source_delegate_->InitializeMediaStream(
- demuxer_,
- base::Bind(&WebMediaPlayerAndroid::UpdateNetworkState,
- weak_factory_.GetWeakPtr()));
- audio_renderer_ = media_stream_client_->GetAudioRenderer(url);
- if (audio_renderer_)
- audio_renderer_->Start();
- }
-#endif
} else {
info_loader_.reset(
new MediaInfoLoader(
url,
cors_mode,
base::Bind(&WebMediaPlayerAndroid::DidLoadMediaInfo,
- base::Unretained(this))));
+ weak_factory_.GetWeakPtr())));
+ // TODO(qinmin): The url might be redirected when android media player
+ // requests the stream. As a result, we cannot guarantee there is only
+ // a single origin. Remove the following line when b/12573548 is fixed.
+ // Check http://crbug.com/334204.
+ info_loader_->set_single_origin(false);
info_loader_->Start(frame_);
}
url_ = url;
GURL first_party_url = frame_->document().firstPartyForCookies();
- manager_->Initialize(
- player_type_, player_id_, url, first_party_url, demuxer_client_id);
+ player_manager_->Initialize(
+ player_type_, player_id_, url, first_party_url, demuxer_client_id,
+ frame_->document().url());
- if (manager_->ShouldEnterFullscreen(frame_))
- manager_->EnterFullscreen(player_id_, frame_);
+ if (player_manager_->ShouldEnterFullscreen(frame_))
+ player_manager_->EnterFullscreen(player_id_, frame_);
UpdateNetworkState(WebMediaPlayer::NetworkStateLoading);
UpdateReadyState(WebMediaPlayer::ReadyStateHaveNothing);
}
-void WebMediaPlayerAndroid::DidLoadMediaInfo(
- MediaInfoLoader::Status status) {
+void WebMediaPlayerAndroid::DidLoadMediaInfo(MediaInfoLoader::Status status) {
DCHECK(!media_source_delegate_);
if (status == MediaInfoLoader::kFailed) {
info_loader_.reset();
@@ -327,42 +273,33 @@ void WebMediaPlayerAndroid::DidLoadMediaInfo(
void WebMediaPlayerAndroid::play() {
#if defined(VIDEO_HOLE)
if (hasVideo() && needs_external_surface_ &&
- !manager_->IsInFullscreen(frame_)) {
+ !player_manager_->IsInFullscreen(frame_)) {
DCHECK(!needs_establish_peer_);
- manager_->RequestExternalSurface(player_id_, last_computed_rect_);
+ player_manager_->RequestExternalSurface(player_id_, last_computed_rect_);
}
#endif // defined(VIDEO_HOLE)
-#if defined(GOOGLE_TV)
- if (audio_renderer_ && paused())
- audio_renderer_->Play();
-#endif // defined(GOOGLE_TV)
TryCreateStreamTextureProxyIfNeeded();
- if (hasVideo() && needs_establish_peer_)
+ // There is no need to establish the surface texture peer for fullscreen
+ // video.
+ if (hasVideo() && needs_establish_peer_ &&
+ !player_manager_->IsInFullscreen(frame_)) {
EstablishSurfaceTexturePeer();
+ }
if (paused())
- manager_->Start(player_id_);
+ player_manager_->Start(player_id_);
UpdatePlayingState(true);
UpdateNetworkState(WebMediaPlayer::NetworkStateLoading);
playing_started_ = true;
}
void WebMediaPlayerAndroid::pause() {
- pause(true);
-}
-
-void WebMediaPlayerAndroid::pause(bool is_media_related_action) {
-#if defined(GOOGLE_TV)
- if (audio_renderer_ && !paused())
- audio_renderer_->Pause();
-#endif
- manager_->Pause(player_id_, is_media_related_action);
- UpdatePlayingState(false);
+ Pause(true);
}
void WebMediaPlayerAndroid::seek(double seconds) {
- DCHECK(main_loop_->BelongsToCurrentThread());
+ DCHECK(main_thread_checker_.CalledOnValidThread());
DVLOG(1) << __FUNCTION__ << "(" << seconds << ")";
base::TimeDelta new_seek_time = ConvertSecondsToTimestamp(seconds);
@@ -402,11 +339,7 @@ void WebMediaPlayerAndroid::seek(double seconds) {
media_source_delegate_->StartWaitingForSeek(seek_time_);
// Kick off the asynchronous seek!
- manager_->Seek(player_id_, seek_time_);
-}
-
-bool WebMediaPlayerAndroid::supportsFullscreen() const {
- return true;
+ player_manager_->Seek(player_id_, seek_time_);
}
bool WebMediaPlayerAndroid::supportsSave() const {
@@ -418,7 +351,7 @@ void WebMediaPlayerAndroid::setRate(double rate) {
}
void WebMediaPlayerAndroid::setVolume(double volume) {
- manager_->SetVolume(player_id_, volume);
+ player_manager_->SetVolume(player_id_, volume);
}
bool WebMediaPlayerAndroid::hasVideo() const {
@@ -443,8 +376,18 @@ bool WebMediaPlayerAndroid::hasVideo() const {
}
bool WebMediaPlayerAndroid::hasAudio() const {
- // TODO(hclam): Query status of audio and return the actual value.
- return true;
+ if (!url_.has_path())
+ return false;
+ std::string mime;
+ if (!net::GetMimeTypeFromFile(base::FilePath(url_.path()), &mime))
+ return true;
+
+ if (mime.find("audio/") != std::string::npos ||
+ mime.find("video/") != std::string::npos ||
+ mime.find("application/ogg") != std::string::npos) {
+ return true;
+ }
+ return false;
}
bool WebMediaPlayerAndroid::paused() const {
@@ -460,12 +403,23 @@ double WebMediaPlayerAndroid::duration() const {
if (ready_state_ == WebMediaPlayer::ReadyStateHaveNothing)
return std::numeric_limits<double>::quiet_NaN();
- // TODO(wolenetz): Correctly handle durations that MediaSourcePlayer
- // considers unseekable, including kInfiniteDuration().
- // See http://crbug.com/248396
+ if (duration_ == media::kInfiniteDuration())
+ return std::numeric_limits<double>::infinity();
+
return duration_.InSecondsF();
}
+double WebMediaPlayerAndroid::timelineOffset() const {
+ base::Time timeline_offset;
+ if (media_source_delegate_)
+ timeline_offset = media_source_delegate_->GetTimelineOffset();
+
+ if (timeline_offset.is_null())
+ return std::numeric_limits<double>::quiet_NaN();
+
+ return timeline_offset.ToJsTime();
+}
+
double WebMediaPlayerAndroid::currentTime() const {
// If the player is processing a seek, return the seek time.
// Blink may still query us if updatePlaybackState() occurs while seeking.
@@ -489,7 +443,7 @@ WebMediaPlayer::ReadyState WebMediaPlayerAndroid::readyState() const {
return ready_state_;
}
-const WebTimeRanges& WebMediaPlayerAndroid::buffered() {
+WebTimeRanges WebMediaPlayerAndroid::buffered() const {
if (media_source_delegate_)
return media_source_delegate_->Buffered();
return buffered_;
@@ -501,11 +455,10 @@ double WebMediaPlayerAndroid::maxTimeSeekable() const {
if (ready_state_ < WebMediaPlayer::ReadyStateHaveMetadata)
return 0.0;
- // TODO(hclam): If this stream is not seekable this should return 0.
return duration();
}
-bool WebMediaPlayerAndroid::didLoadingProgress() const {
+bool WebMediaPlayerAndroid::didLoadingProgress() {
bool ret = did_loading_progress_;
did_loading_progress_ = false;
return ret;
@@ -525,21 +478,43 @@ bool WebMediaPlayerAndroid::copyVideoTextureToPlatformTexture(
unsigned int type,
bool premultiply_alpha,
bool flip_y) {
- if (is_remote_ || !texture_id_)
+ // Don't allow clients to copy an encrypted video frame.
+ if (needs_external_surface_)
return false;
+ scoped_refptr<VideoFrame> video_frame;
+ {
+ base::AutoLock auto_lock(current_frame_lock_);
+ video_frame = current_frame_;
+ }
+
+ if (!video_frame ||
+ video_frame->format() != media::VideoFrame::NATIVE_TEXTURE)
+ return false;
+ const gpu::MailboxHolder* mailbox_holder = video_frame->mailbox_holder();
+ DCHECK((!is_remote_ &&
+ mailbox_holder->texture_target == GL_TEXTURE_EXTERNAL_OES) ||
+ (is_remote_ && mailbox_holder->texture_target == GL_TEXTURE_2D));
+
// For hidden video element (with style "display:none"), ensure the texture
// size is set.
- if (cached_stream_texture_size_.width != natural_size_.width ||
- cached_stream_texture_size_.height != natural_size_.height) {
+ if (!is_remote_ &&
+ (cached_stream_texture_size_.width != natural_size_.width ||
+ cached_stream_texture_size_.height != natural_size_.height)) {
stream_texture_factory_->SetStreamTextureSize(
stream_id_, gfx::Size(natural_size_.width, natural_size_.height));
cached_stream_texture_size_ = natural_size_;
}
+ uint32 source_texture = web_graphics_context->createTexture();
+ web_graphics_context->waitSyncPoint(mailbox_holder->sync_point);
+
// Ensure the target of texture is set before copyTextureCHROMIUM, otherwise
// an invalid texture target may be used for copy texture.
- web_graphics_context->bindTexture(GL_TEXTURE_EXTERNAL_OES, texture_id_);
+ web_graphics_context->bindTexture(mailbox_holder->texture_target,
+ source_texture);
+ web_graphics_context->consumeTextureCHROMIUM(mailbox_holder->texture_target,
+ mailbox_holder->mailbox.name);
// The video is stored in an unmultiplied format, so premultiply if
// necessary.
@@ -551,14 +526,20 @@ bool WebMediaPlayerAndroid::copyVideoTextureToPlatformTexture(
// flip_y==true means to reverse the video orientation while
// flip_y==false means to keep the intrinsic orientation.
web_graphics_context->pixelStorei(GL_UNPACK_FLIP_Y_CHROMIUM, flip_y);
- web_graphics_context->copyTextureCHROMIUM(GL_TEXTURE_2D, texture_id_,
+ web_graphics_context->copyTextureCHROMIUM(GL_TEXTURE_2D, source_texture,
texture, level, internal_format,
type);
web_graphics_context->pixelStorei(GL_UNPACK_FLIP_Y_CHROMIUM, false);
web_graphics_context->pixelStorei(GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM,
false);
- web_graphics_context->bindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
+ if (mailbox_holder->texture_target == GL_TEXTURE_EXTERNAL_OES)
+ web_graphics_context->bindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
+ else
+ web_graphics_context->bindTexture(GL_TEXTURE_2D, texture);
+ web_graphics_context->deleteTexture(source_texture);
+ web_graphics_context->flush();
+ video_frame->AppendReleaseSyncPoint(web_graphics_context->insertSyncPoint());
return true;
}
@@ -618,9 +599,6 @@ void WebMediaPlayerAndroid::OnMediaMetadataChanged(
// Update duration, if necessary, prior to ready state updates that may
// cause duration() query.
- // TODO(wolenetz): Correctly handle durations that MediaSourcePlayer
- // considers unseekable, including kInfiniteDuration().
- // See http://crbug.com/248396
if (!ignore_metadata_duration_change_ && duration_ != duration) {
duration_ = duration;
@@ -647,12 +625,6 @@ void WebMediaPlayerAndroid::OnMediaMetadataChanged(
if (success)
OnVideoSizeChanged(width, height);
- if (hasVideo() && !video_weblayer_ && client_->needsWebLayerForVideo()) {
- video_weblayer_.reset(
- new webkit::WebLayerImpl(cc::VideoLayer::Create(this)));
- client_->setWebLayer(video_weblayer_.get());
- }
-
if (need_to_signal_duration_changed)
client_->durationChanged();
}
@@ -681,13 +653,13 @@ void WebMediaPlayerAndroid::OnBufferingUpdate(int percentage) {
}
void WebMediaPlayerAndroid::OnSeekRequest(const base::TimeDelta& time_to_seek) {
- DCHECK(main_loop_->BelongsToCurrentThread());
+ DCHECK(main_thread_checker_.CalledOnValidThread());
client_->requestSeek(time_to_seek.InSecondsF());
}
void WebMediaPlayerAndroid::OnSeekComplete(
const base::TimeDelta& current_time) {
- DCHECK(main_loop_->BelongsToCurrentThread());
+ DCHECK(main_thread_checker_.CalledOnValidThread());
seeking_ = false;
if (pending_seek_) {
pending_seek_ = false;
@@ -730,27 +702,20 @@ void WebMediaPlayerAndroid::OnVideoSizeChanged(int width, int height) {
return;
#if defined(VIDEO_HOLE)
- bool has_surface_size_restriction = false;
-#if defined(GOOGLE_TV)
- has_surface_size_restriction = external_surface_threshold_ >= 0 &&
- external_surface_threshold_ <= width * height;
-#endif // defined(GOOGLE_TV)
- // Use H/W surface for MSE as the content might be protected.
+ // Use H/W surface for encrypted video.
// TODO(qinmin): Change this so that only EME needs the H/W surface
- if (media_source_delegate_ || has_surface_size_restriction) {
+ if (force_use_overlay_embedded_video_ ||
+ (media_source_delegate_ && media_source_delegate_->IsVideoEncrypted() &&
+ player_manager_->ShouldUseVideoOverlayForEmbeddedEncryptedVideo())) {
needs_external_surface_ = true;
- if (!paused() && !manager_->IsInFullscreen(frame_))
- manager_->RequestExternalSurface(player_id_, last_computed_rect_);
- } else if (stream_texture_factory_ && !stream_id_) {
+ if (!paused() && !player_manager_->IsInFullscreen(frame_))
+ player_manager_->RequestExternalSurface(player_id_, last_computed_rect_);
+ } else if (stream_texture_proxy_ && !stream_id_) {
// Do deferred stream texture creation finally.
DoCreateStreamTexture();
- if (paused()) {
- SetNeedsEstablishPeer(true);
- } else {
- EstablishSurfaceTexturePeer();
- }
+ SetNeedsEstablishPeer(true);
}
-#else
+#endif // defined(VIDEO_HOLE)
// When play() gets called, |natural_size_| may still be empty and
// EstablishSurfaceTexturePeer() will not get called. As a result, the video
// may play without a surface texture. When we finally get the valid video
@@ -758,22 +723,33 @@ void WebMediaPlayerAndroid::OnVideoSizeChanged(int width, int height) {
// previously called.
if (!paused() && needs_establish_peer_)
EstablishSurfaceTexturePeer();
-#endif // defined(VIDEO_HOLE)
natural_size_.width = width;
natural_size_.height = height;
ReallocateVideoFrame();
+
+ // Lazily allocate compositing layer.
+ if (!video_weblayer_) {
+ video_weblayer_.reset(new WebLayerImpl(cc::VideoLayer::Create(this)));
+ client_->setWebLayer(video_weblayer_.get());
+ }
+
+ // TODO(qinmin): This is a hack. We need the media element to stop showing the
+ // poster image by forcing it to call setDisplayMode(video). Should move the
+ // logic into HTMLMediaElement.cpp.
+ client_->timeChanged();
}
void WebMediaPlayerAndroid::OnTimeUpdate(const base::TimeDelta& current_time) {
- DCHECK(main_loop_->BelongsToCurrentThread());
+ DCHECK(main_thread_checker_.CalledOnValidThread());
current_time_ = current_time.InSecondsF();
}
-void WebMediaPlayerAndroid::OnConnectedToRemoteDevice() {
+void WebMediaPlayerAndroid::OnConnectedToRemoteDevice(
+ const std::string& remote_playback_message) {
DCHECK(main_thread_checker_.CalledOnValidThread());
DCHECK(!media_source_delegate_);
- DrawRemotePlaybackIcon();
+ DrawRemotePlaybackText(remote_playback_message);
is_remote_ = true;
SetNeedsEstablishPeer(false);
}
@@ -789,10 +765,10 @@ void WebMediaPlayerAndroid::OnDisconnectedFromRemoteDevice() {
}
void WebMediaPlayerAndroid::OnDidEnterFullscreen() {
- if (!manager_->IsInFullscreen(frame_)) {
+ if (!player_manager_->IsInFullscreen(frame_)) {
frame_->view()->willEnterFullScreen();
frame_->view()->didEnterFullScreen();
- manager_->DidEnterFullscreen(frame_);
+ player_manager_->DidEnterFullscreen(frame_);
}
}
@@ -807,12 +783,12 @@ void WebMediaPlayerAndroid::OnDidExitFullscreen() {
#if defined(VIDEO_HOLE)
if (!paused() && needs_external_surface_)
- manager_->RequestExternalSurface(player_id_, last_computed_rect_);
+ player_manager_->RequestExternalSurface(player_id_, last_computed_rect_);
#endif // defined(VIDEO_HOLE)
frame_->view()->willExitFullScreen();
frame_->view()->didExitFullScreen();
- manager_->DidExitFullscreen();
+ player_manager_->DidExitFullscreen();
client_->repaint();
}
@@ -831,15 +807,12 @@ void WebMediaPlayerAndroid::OnRequestFullscreen() {
}
void WebMediaPlayerAndroid::OnDurationChanged(const base::TimeDelta& duration) {
- DCHECK(main_loop_->BelongsToCurrentThread());
+ DCHECK(main_thread_checker_.CalledOnValidThread());
// Only MSE |player_type_| registers this callback.
DCHECK_EQ(player_type_, MEDIA_PLAYER_TYPE_MEDIA_SOURCE);
// Cache the new duration value and trust it over any subsequent duration
// values received in OnMediaMetadataChanged().
- // TODO(wolenetz): Correctly handle durations that MediaSourcePlayer
- // considers unseekable, including kInfiniteDuration().
- // See http://crbug.com/248396
duration_ = duration;
ignore_metadata_duration_change_ = true;
@@ -850,7 +823,7 @@ void WebMediaPlayerAndroid::OnDurationChanged(const base::TimeDelta& duration) {
void WebMediaPlayerAndroid::UpdateNetworkState(
WebMediaPlayer::NetworkState state) {
- DCHECK(main_loop_->BelongsToCurrentThread());
+ DCHECK(main_thread_checker_.CalledOnValidThread());
if (ready_state_ == WebMediaPlayer::ReadyStateHaveNothing &&
(state == WebMediaPlayer::NetworkStateNetworkError ||
state == WebMediaPlayer::NetworkStateDecodeError)) {
@@ -874,6 +847,9 @@ void WebMediaPlayerAndroid::OnPlayerReleased() {
if (!needs_external_surface_)
needs_establish_peer_ = true;
+ if (is_playing_)
+ OnMediaPlayerPause();
+
#if defined(VIDEO_HOLE)
last_computed_rect_ = gfx::RectF();
#endif // defined(VIDEO_HOLE)
@@ -886,7 +862,7 @@ void WebMediaPlayerAndroid::ReleaseMediaResources() {
case WebMediaPlayer::NetworkStateIdle:
case WebMediaPlayer::NetworkStateLoading:
case WebMediaPlayer::NetworkStateLoaded:
- pause(false);
+ Pause(false);
client_->playbackStateChanged();
break;
// If a WebMediaPlayer instance has entered into one of these states,
@@ -898,38 +874,27 @@ void WebMediaPlayerAndroid::ReleaseMediaResources() {
case WebMediaPlayer::NetworkStateDecodeError:
break;
}
- manager_->ReleaseResources(player_id_);
+ player_manager_->ReleaseResources(player_id_);
OnPlayerReleased();
}
-void WebMediaPlayerAndroid::WillDestroyCurrentMessageLoop() {
- if (manager_)
- manager_->UnregisterMediaPlayer(player_id_);
- Detach();
+void WebMediaPlayerAndroid::OnDestruct() {
+ NOTREACHED() << "WebMediaPlayer should be destroyed before any "
+ "RenderFrameObserver::OnDestruct() gets called when "
+ "the RenderFrame goes away.";
}
-void WebMediaPlayerAndroid::Detach() {
- if (stream_id_) {
- stream_texture_factory_->DestroyStreamTexture(texture_id_);
- stream_id_ = 0;
- }
-
- media_source_delegate_.reset();
- {
- base::AutoLock auto_lock(current_frame_lock_);
- current_frame_ = NULL;
- }
- is_remote_ = false;
- manager_ = NULL;
+void WebMediaPlayerAndroid::Pause(bool is_media_related_action) {
+ player_manager_->Pause(player_id_, is_media_related_action);
+ UpdatePlayingState(false);
}
-void WebMediaPlayerAndroid::DrawRemotePlaybackIcon() {
+void WebMediaPlayerAndroid::DrawRemotePlaybackText(
+ const std::string& remote_playback_message) {
+
DCHECK(main_thread_checker_.CalledOnValidThread());
if (!video_weblayer_)
return;
- blink::WebGraphicsContext3D* context = stream_texture_factory_->Context3d();
- if (!context->makeContextCurrent())
- return;
// TODO(johnme): Should redraw this frame if the layer bounds change; but
// there seems no easy way to listen for the layer resizing (as opposed to
@@ -947,77 +912,105 @@ void WebMediaPlayerAndroid::DrawRemotePlaybackIcon() {
SkBitmap::kARGB_8888_Config, canvas_size.width(), canvas_size.height());
bitmap.allocPixels();
+ // Create the canvas and draw the "Casting to <Chromecast>" text on it.
SkCanvas canvas(bitmap);
canvas.drawColor(SK_ColorBLACK);
+
+ const SkScalar kTextSize(40);
+ const SkScalar kMinPadding(40);
+
SkPaint paint;
paint.setAntiAlias(true);
paint.setFilterLevel(SkPaint::kHigh_FilterLevel);
- const SkBitmap* icon_bitmap =
- content::GetContentClient()
- ->GetNativeImageNamed(IDR_MEDIAPLAYER_REMOTE_PLAYBACK_ICON)
- .ToSkBitmap();
- // In order to get a reasonable margin around the icon:
- // - the icon should be under half the frame width
- // - the icon should be at most 3/5 of the frame height
- // Additionally, on very large screens, the icon size should be capped. A max
- // width of 320 was arbitrarily chosen; since this is half the resource's
- // pixel width, it should look crisp even on 2x deviceScaleFactor displays.
- int icon_width = 320;
- icon_width = std::min(icon_width, canvas_size.width() / 2);
- icon_width = std::min(icon_width,
- canvas_size.height() * icon_bitmap->width() /
- icon_bitmap->height() * 3 / 5);
- int icon_height = icon_width * icon_bitmap->height() / icon_bitmap->width();
- // Center the icon within the frame
- SkRect icon_rect = SkRect::MakeXYWH((canvas_size.width() - icon_width) / 2,
- (canvas_size.height() - icon_height) / 2,
- icon_width,
- icon_height);
- canvas.drawBitmapRectToRect(
- *icon_bitmap, NULL /* src */, icon_rect /* dest */, &paint);
-
- if (!remote_playback_texture_id_)
- remote_playback_texture_id_ = context->createTexture();
- unsigned texture_target = GL_TEXTURE_2D;
- context->bindTexture(texture_target, remote_playback_texture_id_);
- context->texParameteri(texture_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- context->texParameteri(texture_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
- context->texParameteri(texture_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- context->texParameteri(texture_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ paint.setColor(SK_ColorWHITE);
+ paint.setTypeface(SkTypeface::CreateFromName("sans", SkTypeface::kBold));
+ paint.setTextSize(kTextSize);
+
+ // Calculate the vertical margin from the top
+ SkPaint::FontMetrics font_metrics;
+ paint.getFontMetrics(&font_metrics);
+ SkScalar sk_vertical_margin = kMinPadding - font_metrics.fAscent;
+
+ // Measure the width of the entire text to display
+ size_t display_text_width = paint.measureText(
+ remote_playback_message.c_str(), remote_playback_message.size());
+ std::string display_text(remote_playback_message);
+
+ if (display_text_width + (kMinPadding * 2) > canvas_size.width()) {
+ // The text is too long to fit in one line, truncate it and append ellipsis
+ // to the end.
+
+ // First, figure out how much of the canvas the '...' will take up.
+ const std::string kTruncationEllipsis("\xE2\x80\xA6");
+ SkScalar sk_ellipse_width = paint.measureText(
+ kTruncationEllipsis.c_str(), kTruncationEllipsis.size());
+
+ // Then calculate how much of the text can be drawn with the '...' appended
+ // to the end of the string.
+ SkScalar sk_max_original_text_width(
+ canvas_size.width() - (kMinPadding * 2) - sk_ellipse_width);
+ size_t sk_max_original_text_length = paint.breakText(
+ remote_playback_message.c_str(),
+ remote_playback_message.size(),
+ sk_max_original_text_width);
+
+ // Remove the part of the string that doesn't fit and append '...'.
+ display_text.erase(sk_max_original_text_length,
+ remote_playback_message.size() - sk_max_original_text_length);
+ display_text.append(kTruncationEllipsis);
+ display_text_width = paint.measureText(
+ display_text.c_str(), display_text.size());
+ }
+
+ // Center the text horizontally.
+ SkScalar sk_horizontal_margin =
+ (canvas_size.width() - display_text_width) / 2.0;
+ canvas.drawText(display_text.c_str(),
+ display_text.size(),
+ sk_horizontal_margin,
+ sk_vertical_margin,
+ paint);
+
+ GLES2Interface* gl = stream_texture_factory_->ContextGL();
+ GLuint remote_playback_texture_id = 0;
+ gl->GenTextures(1, &remote_playback_texture_id);
+ GLuint texture_target = GL_TEXTURE_2D;
+ gl->BindTexture(texture_target, remote_playback_texture_id);
+ gl->TexParameteri(texture_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ gl->TexParameteri(texture_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ gl->TexParameteri(texture_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ gl->TexParameteri(texture_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
{
SkAutoLockPixels lock(bitmap);
- context->texImage2D(texture_target,
- 0 /* level */,
- GL_RGBA /* internalformat */,
- bitmap.width(),
- bitmap.height(),
- 0 /* border */,
- GL_RGBA /* format */,
- GL_UNSIGNED_BYTE /* type */,
- bitmap.getPixels());
+ gl->TexImage2D(texture_target,
+ 0 /* level */,
+ GL_RGBA /* internalformat */,
+ bitmap.width(),
+ bitmap.height(),
+ 0 /* border */,
+ GL_RGBA /* format */,
+ GL_UNSIGNED_BYTE /* type */,
+ bitmap.getPixels());
}
gpu::Mailbox texture_mailbox;
- context->genMailboxCHROMIUM(texture_mailbox.name);
- context->produceTextureCHROMIUM(texture_target, texture_mailbox.name);
- context->flush();
- unsigned texture_mailbox_sync_point = context->insertSyncPoint();
+ gl->GenMailboxCHROMIUM(texture_mailbox.name);
+ gl->ProduceTextureCHROMIUM(texture_target, texture_mailbox.name);
+ gl->Flush();
+ GLuint texture_mailbox_sync_point = gl->InsertSyncPointCHROMIUM();
scoped_refptr<VideoFrame> new_frame = VideoFrame::WrapNativeTexture(
- make_scoped_ptr(new VideoFrame::MailboxHolder(
- texture_mailbox,
- texture_mailbox_sync_point,
- base::Bind(&WebMediaPlayerAndroid::OnReleaseRemotePlaybackTexture,
- main_loop_,
- weak_factory_.GetWeakPtr()))),
- texture_target,
+ make_scoped_ptr(new gpu::MailboxHolder(
+ texture_mailbox, texture_target, texture_mailbox_sync_point)),
+ media::BindToCurrentLoop(base::Bind(&OnReleaseTexture,
+ stream_texture_factory_,
+ remote_playback_texture_id)),
canvas_size /* coded_size */,
gfx::Rect(canvas_size) /* visible_rect */,
canvas_size /* natural_size */,
base::TimeDelta() /* timestamp */,
- VideoFrame::ReadPixelsCB(),
- base::Closure() /* no_longer_needed_cb */);
+ VideoFrame::ReadPixelsCB());
SetCurrentFrameInternal(new_frame);
}
@@ -1036,18 +1029,25 @@ void WebMediaPlayerAndroid::ReallocateVideoFrame() {
NOTIMPLEMENTED() << "Hole punching not supported without VIDEO_HOLE flag";
#endif // defined(VIDEO_HOLE)
} else if (!is_remote_ && texture_id_) {
+ GLES2Interface* gl = stream_texture_factory_->ContextGL();
+ GLuint texture_id_ref = 0;
+ gl->GenTextures(1, &texture_id_ref);
+ GLuint texture_target = kGLTextureExternalOES;
+ gl->BindTexture(texture_target, texture_id_ref);
+ gl->ConsumeTextureCHROMIUM(texture_target, texture_mailbox_.name);
+ gl->Flush();
+ GLuint texture_mailbox_sync_point = gl->InsertSyncPointCHROMIUM();
+
scoped_refptr<VideoFrame> new_frame = VideoFrame::WrapNativeTexture(
- make_scoped_ptr(new VideoFrame::MailboxHolder(
- texture_mailbox_,
- texture_mailbox_sync_point_,
- VideoFrame::MailboxHolder::TextureNoLongerNeededCallback())),
- kGLTextureExternalOES,
+ make_scoped_ptr(new gpu::MailboxHolder(
+ texture_mailbox_, texture_target, texture_mailbox_sync_point)),
+ media::BindToCurrentLoop(base::Bind(
+ &OnReleaseTexture, stream_texture_factory_, texture_id_ref)),
natural_size_,
gfx::Rect(natural_size_),
natural_size_,
base::TimeDelta(),
- VideoFrame::ReadPixelsCB(),
- base::Closure());
+ VideoFrame::ReadPixelsCB());
SetCurrentFrameInternal(new_frame);
}
}
@@ -1118,18 +1118,6 @@ void WebMediaPlayerAndroid::EstablishSurfaceTexturePeer() {
if (!stream_texture_proxy_)
return;
- if (media_source_delegate_ && stream_texture_factory_) {
- // MediaCodec will release the old surface when it goes away, we need to
- // recreate a new one each time this is called.
- stream_texture_factory_->DestroyStreamTexture(texture_id_);
- stream_id_ = 0;
- texture_id_ = 0;
- texture_mailbox_ = gpu::Mailbox();
- texture_mailbox_sync_point_ = 0;
- DoCreateStreamTexture();
- ReallocateVideoFrame();
- stream_texture_proxy_initialized_ = false;
- }
if (stream_texture_factory_.get() && stream_id_)
stream_texture_factory_->EstablishPeer(stream_id_, player_id_);
needs_establish_peer_ = false;
@@ -1138,18 +1126,18 @@ void WebMediaPlayerAndroid::EstablishSurfaceTexturePeer() {
void WebMediaPlayerAndroid::DoCreateStreamTexture() {
DCHECK(!stream_id_);
DCHECK(!texture_id_);
- DCHECK(!texture_mailbox_sync_point_);
stream_id_ = stream_texture_factory_->CreateStreamTexture(
- kGLTextureExternalOES,
- &texture_id_,
- &texture_mailbox_,
- &texture_mailbox_sync_point_);
+ kGLTextureExternalOES, &texture_id_, &texture_mailbox_);
}
void WebMediaPlayerAndroid::SetNeedsEstablishPeer(bool needs_establish_peer) {
needs_establish_peer_ = needs_establish_peer;
}
+void WebMediaPlayerAndroid::setPoster(const blink::WebURL& poster) {
+ player_manager_->SetPoster(player_id_, poster);
+}
+
void WebMediaPlayerAndroid::UpdatePlayingState(bool is_playing) {
is_playing_ = is_playing;
if (!delegate_)
@@ -1161,39 +1149,50 @@ void WebMediaPlayerAndroid::UpdatePlayingState(bool is_playing) {
}
#if defined(VIDEO_HOLE)
-bool WebMediaPlayerAndroid::RetrieveGeometryChange(gfx::RectF* rect) {
+bool WebMediaPlayerAndroid::UpdateBoundaryRectangle() {
if (!video_weblayer_)
return false;
// Compute the geometry of video frame layer.
cc::Layer* layer = video_weblayer_->layer();
- rect->set_size(layer->bounds());
+ gfx::RectF rect(layer->bounds());
while (layer) {
- rect->Offset(layer->position().OffsetFromOrigin());
+ rect.Offset(layer->position().OffsetFromOrigin());
layer = layer->parent();
}
// Return false when the geometry hasn't been changed from the last time.
- if (last_computed_rect_ == *rect)
+ if (last_computed_rect_ == rect)
return false;
// Store the changed geometry information when it is actually changed.
- last_computed_rect_ = *rect;
+ last_computed_rect_ = rect;
return true;
}
+
+const gfx::RectF WebMediaPlayerAndroid::GetBoundaryRectangle() {
+ return last_computed_rect_;
+}
#endif
// The following EME related code is copied from WebMediaPlayerImpl.
// TODO(xhwang): Remove duplicate code between WebMediaPlayerAndroid and
// WebMediaPlayerImpl.
-// TODO(kjyoun): Update Google TV EME implementation to use IPC.
+
+// Convert a WebString to ASCII, falling back on an empty string in the case
+// of a non-ASCII string.
+static std::string ToASCIIOrEmpty(const blink::WebString& string) {
+ return base::IsStringASCII(string) ? base::UTF16ToASCII(string)
+ : std::string();
+}
// Helper functions to report media EME related stats to UMA. They follow the
// convention of more commonly used macros UMA_HISTOGRAM_ENUMERATION and
// UMA_HISTOGRAM_COUNTS. The reason that we cannot use those macros directly is
// that UMA_* macros require the names to be constant throughout the process'
// lifetime.
-static void EmeUMAHistogramEnumeration(const blink::WebString& key_system,
+
+static void EmeUMAHistogramEnumeration(const std::string& key_system,
const std::string& method,
int sample,
int boundary_value) {
@@ -1203,7 +1202,7 @@ static void EmeUMAHistogramEnumeration(const blink::WebString& key_system,
base::Histogram::kUmaTargetedHistogramFlag)->Add(sample);
}
-static void EmeUMAHistogramCounts(const blink::WebString& key_system,
+static void EmeUMAHistogramCounts(const std::string& key_system,
const std::string& method,
int sample) {
// Use the same parameters as UMA_HISTOGRAM_COUNTS.
@@ -1237,59 +1236,103 @@ static MediaKeyException MediaKeyExceptionForUMA(
// Helper for converting |key_system| name and exception |e| to a pair of enum
// values from above, for reporting to UMA.
-static void ReportMediaKeyExceptionToUMA(
- const std::string& method,
- const WebString& key_system,
- WebMediaPlayer::MediaKeyException e) {
+static void ReportMediaKeyExceptionToUMA(const std::string& method,
+ const std::string& key_system,
+ WebMediaPlayer::MediaKeyException e) {
MediaKeyException result_id = MediaKeyExceptionForUMA(e);
DCHECK_NE(result_id, kUnknownResultId) << e;
EmeUMAHistogramEnumeration(
key_system, method, result_id, kMaxMediaKeyException);
}
+bool WebMediaPlayerAndroid::IsKeySystemSupported(
+ const std::string& key_system) {
+ // On Android, EME only works with MSE.
+ return player_type_ == MEDIA_PLAYER_TYPE_MEDIA_SOURCE &&
+ IsConcreteSupportedKeySystem(key_system);
+}
+
WebMediaPlayer::MediaKeyException WebMediaPlayerAndroid::generateKeyRequest(
const WebString& key_system,
const unsigned char* init_data,
unsigned init_data_length) {
+ DVLOG(1) << "generateKeyRequest: " << base::string16(key_system) << ": "
+ << std::string(reinterpret_cast<const char*>(init_data),
+ static_cast<size_t>(init_data_length));
+
+ std::string ascii_key_system =
+ GetUnprefixedKeySystemName(ToASCIIOrEmpty(key_system));
+
WebMediaPlayer::MediaKeyException e =
- GenerateKeyRequestInternal(key_system, init_data, init_data_length);
- ReportMediaKeyExceptionToUMA("generateKeyRequest", key_system, e);
+ GenerateKeyRequestInternal(ascii_key_system, init_data, init_data_length);
+ ReportMediaKeyExceptionToUMA("generateKeyRequest", ascii_key_system, e);
return e;
}
-bool WebMediaPlayerAndroid::IsKeySystemSupported(const WebString& key_system) {
- // On Android, EME only works with MSE.
- return player_type_ == MEDIA_PLAYER_TYPE_MEDIA_SOURCE &&
- IsConcreteSupportedKeySystem(key_system);
+// Guess the type of |init_data|. This is only used to handle some corner cases
+// so we keep it as simple as possible without breaking major use cases.
+static std::string GuessInitDataType(const unsigned char* init_data,
+ unsigned init_data_length) {
+ // Most WebM files use KeyId of 16 bytes. MP4 init data are always >16 bytes.
+ if (init_data_length == 16)
+ return "video/webm";
+
+ return "video/mp4";
}
+// TODO(xhwang): Report an error when there is encrypted stream but EME is
+// not enabled. Currently the player just doesn't start and waits for
+// ever.
WebMediaPlayer::MediaKeyException
WebMediaPlayerAndroid::GenerateKeyRequestInternal(
- const WebString& key_system,
+ const std::string& key_system,
const unsigned char* init_data,
unsigned init_data_length) {
- DVLOG(1) << "generateKeyRequest: " << key_system.utf8().data() << ": "
- << std::string(reinterpret_cast<const char*>(init_data),
- static_cast<size_t>(init_data_length));
-
if (!IsKeySystemSupported(key_system))
return WebMediaPlayer::MediaKeyExceptionKeySystemNotSupported;
// We do not support run-time switching between key systems for now.
- if (current_key_system_.isEmpty()) {
- if (!decryptor_->InitializeCDM(key_system.utf8(), frame_->document().url()))
+ if (current_key_system_.empty()) {
+ if (!proxy_decryptor_) {
+ proxy_decryptor_.reset(new ProxyDecryptor(
+ cdm_manager_,
+ base::Bind(&WebMediaPlayerAndroid::OnKeyAdded,
+ weak_factory_.GetWeakPtr()),
+ base::Bind(&WebMediaPlayerAndroid::OnKeyError,
+ weak_factory_.GetWeakPtr()),
+ base::Bind(&WebMediaPlayerAndroid::OnKeyMessage,
+ weak_factory_.GetWeakPtr())));
+ }
+
+ GURL security_origin(frame_->document().securityOrigin().toString());
+ if (!proxy_decryptor_->InitializeCDM(key_system, security_origin))
return WebMediaPlayer::MediaKeyExceptionKeySystemNotSupported;
+
+ if (!decryptor_ready_cb_.is_null()) {
+ base::ResetAndReturn(&decryptor_ready_cb_)
+ .Run(proxy_decryptor_->GetDecryptor());
+ }
+
+ // Only browser CDMs have CDM ID. Render side CDMs (e.g. ClearKey CDM) do
+ // not have a CDM ID and there is no need to call player_manager_->SetCdm().
+ if (proxy_decryptor_->GetCdmId() != RendererCdmManager::kInvalidCdmId)
+ player_manager_->SetCdm(player_id_, proxy_decryptor_->GetCdmId());
+
current_key_system_ = key_system;
} else if (key_system != current_key_system_) {
return WebMediaPlayer::MediaKeyExceptionInvalidPlayerState;
}
+ std::string init_data_type = init_data_type_;
+ if (init_data_type.empty())
+ init_data_type = GuessInitDataType(init_data, init_data_length);
+
// TODO(xhwang): We assume all streams are from the same container (thus have
// the same "type") for now. In the future, the "type" should be passed down
// from the application.
- if (!decryptor_->GenerateKeyRequest(init_data_type_,
- init_data, init_data_length)) {
- current_key_system_.reset();
+ if (!proxy_decryptor_->GenerateKeyRequest(
+ init_data_type, init_data, init_data_length)) {
+ current_key_system_.clear();
return WebMediaPlayer::MediaKeyExceptionKeySystemNotSupported;
}
@@ -1303,98 +1346,136 @@ WebMediaPlayer::MediaKeyException WebMediaPlayerAndroid::addKey(
const unsigned char* init_data,
unsigned init_data_length,
const WebString& session_id) {
- WebMediaPlayer::MediaKeyException e = AddKeyInternal(
- key_system, key, key_length, init_data, init_data_length, session_id);
- ReportMediaKeyExceptionToUMA("addKey", key_system, e);
+ DVLOG(1) << "addKey: " << base::string16(key_system) << ": "
+ << std::string(reinterpret_cast<const char*>(key),
+ static_cast<size_t>(key_length)) << ", "
+ << std::string(reinterpret_cast<const char*>(init_data),
+ static_cast<size_t>(init_data_length)) << " ["
+ << base::string16(session_id) << "]";
+
+ std::string ascii_key_system =
+ GetUnprefixedKeySystemName(ToASCIIOrEmpty(key_system));
+ std::string ascii_session_id = ToASCIIOrEmpty(session_id);
+
+ WebMediaPlayer::MediaKeyException e = AddKeyInternal(ascii_key_system,
+ key,
+ key_length,
+ init_data,
+ init_data_length,
+ ascii_session_id);
+ ReportMediaKeyExceptionToUMA("addKey", ascii_key_system, e);
return e;
}
WebMediaPlayer::MediaKeyException WebMediaPlayerAndroid::AddKeyInternal(
- const WebString& key_system,
+ const std::string& key_system,
const unsigned char* key,
unsigned key_length,
const unsigned char* init_data,
unsigned init_data_length,
- const WebString& session_id) {
+ const std::string& session_id) {
DCHECK(key);
DCHECK_GT(key_length, 0u);
- DVLOG(1) << "addKey: " << key_system.utf8().data() << ": "
- << std::string(reinterpret_cast<const char*>(key),
- static_cast<size_t>(key_length)) << ", "
- << std::string(reinterpret_cast<const char*>(init_data),
- static_cast<size_t>(init_data_length))
- << " [" << session_id.utf8().data() << "]";
if (!IsKeySystemSupported(key_system))
return WebMediaPlayer::MediaKeyExceptionKeySystemNotSupported;
- if (current_key_system_.isEmpty() || key_system != current_key_system_)
+ if (current_key_system_.empty() || key_system != current_key_system_)
return WebMediaPlayer::MediaKeyExceptionInvalidPlayerState;
- decryptor_->AddKey(key, key_length, init_data, init_data_length,
- session_id.utf8());
+ proxy_decryptor_->AddKey(
+ key, key_length, init_data, init_data_length, session_id);
return WebMediaPlayer::MediaKeyExceptionNoError;
}
WebMediaPlayer::MediaKeyException WebMediaPlayerAndroid::cancelKeyRequest(
const WebString& key_system,
const WebString& session_id) {
+ DVLOG(1) << "cancelKeyRequest: " << base::string16(key_system) << ": "
+ << " [" << base::string16(session_id) << "]";
+
+ std::string ascii_key_system =
+ GetUnprefixedKeySystemName(ToASCIIOrEmpty(key_system));
+ std::string ascii_session_id = ToASCIIOrEmpty(session_id);
+
WebMediaPlayer::MediaKeyException e =
- CancelKeyRequestInternal(key_system, session_id);
- ReportMediaKeyExceptionToUMA("cancelKeyRequest", key_system, e);
+ CancelKeyRequestInternal(ascii_key_system, ascii_session_id);
+ ReportMediaKeyExceptionToUMA("cancelKeyRequest", ascii_key_system, e);
return e;
}
WebMediaPlayer::MediaKeyException
-WebMediaPlayerAndroid::CancelKeyRequestInternal(
- const WebString& key_system,
- const WebString& session_id) {
+WebMediaPlayerAndroid::CancelKeyRequestInternal(const std::string& key_system,
+ const std::string& session_id) {
if (!IsKeySystemSupported(key_system))
return WebMediaPlayer::MediaKeyExceptionKeySystemNotSupported;
- if (current_key_system_.isEmpty() || key_system != current_key_system_)
+ if (current_key_system_.empty() || key_system != current_key_system_)
return WebMediaPlayer::MediaKeyExceptionInvalidPlayerState;
- decryptor_->CancelKeyRequest(session_id.utf8());
+ proxy_decryptor_->CancelKeyRequest(session_id);
return WebMediaPlayer::MediaKeyExceptionNoError;
}
+void WebMediaPlayerAndroid::setContentDecryptionModule(
+ blink::WebContentDecryptionModule* cdm) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+
+ // TODO(xhwang): Support setMediaKeys(0) if necessary: http://crbug.com/330324
+ if (!cdm)
+ return;
+
+ web_cdm_ = ToWebContentDecryptionModuleImpl(cdm);
+ if (!web_cdm_)
+ return;
+
+ if (!decryptor_ready_cb_.is_null())
+ base::ResetAndReturn(&decryptor_ready_cb_).Run(web_cdm_->GetDecryptor());
+
+ if (web_cdm_->GetCdmId() != RendererCdmManager::kInvalidCdmId)
+ player_manager_->SetCdm(player_id_, web_cdm_->GetCdmId());
+}
+
void WebMediaPlayerAndroid::OnKeyAdded(const std::string& session_id) {
EmeUMAHistogramCounts(current_key_system_, "KeyAdded", 1);
-#if defined(GOOGLE_TV)
- if (media_source_delegate_)
- media_source_delegate_->NotifyKeyAdded(current_key_system_.utf8());
-#endif // defined(GOOGLE_TV)
-
- client_->keyAdded(current_key_system_, WebString::fromUTF8(session_id));
+ client_->keyAdded(
+ WebString::fromUTF8(GetPrefixedKeySystemName(current_key_system_)),
+ WebString::fromUTF8(session_id));
}
void WebMediaPlayerAndroid::OnKeyError(const std::string& session_id,
media::MediaKeys::KeyError error_code,
- int system_code) {
+ uint32 system_code) {
EmeUMAHistogramEnumeration(current_key_system_, "KeyError",
error_code, media::MediaKeys::kMaxKeyError);
+ unsigned short short_system_code = 0;
+ if (system_code > std::numeric_limits<unsigned short>::max()) {
+ LOG(WARNING) << "system_code exceeds unsigned short limit.";
+ short_system_code = std::numeric_limits<unsigned short>::max();
+ } else {
+ short_system_code = static_cast<unsigned short>(system_code);
+ }
+
client_->keyError(
- current_key_system_,
+ WebString::fromUTF8(GetPrefixedKeySystemName(current_key_system_)),
WebString::fromUTF8(session_id),
static_cast<blink::WebMediaPlayerClient::MediaKeyErrorCode>(error_code),
- system_code);
+ short_system_code);
}
void WebMediaPlayerAndroid::OnKeyMessage(const std::string& session_id,
const std::vector<uint8>& message,
- const std::string& destination_url) {
- const GURL destination_url_gurl(destination_url);
- DLOG_IF(WARNING, !destination_url.empty() && !destination_url_gurl.is_valid())
- << "Invalid URL in destination_url: " << destination_url;
+ const GURL& destination_url) {
+ DCHECK(destination_url.is_empty() || destination_url.is_valid());
- client_->keyMessage(current_key_system_,
- WebString::fromUTF8(session_id),
- message.empty() ? NULL : &message[0],
- message.size(),
- destination_url_gurl);
+ client_->keyMessage(
+ WebString::fromUTF8(GetPrefixedKeySystemName(current_key_system_)),
+ WebString::fromUTF8(session_id),
+ message.empty() ? NULL : &message[0],
+ message.size(),
+ destination_url);
}
void WebMediaPlayerAndroid::OnMediaSourceOpened(
@@ -1404,10 +1485,11 @@ void WebMediaPlayerAndroid::OnMediaSourceOpened(
void WebMediaPlayerAndroid::OnNeedKey(const std::string& type,
const std::vector<uint8>& init_data) {
- DCHECK(main_loop_->BelongsToCurrentThread());
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+
// Do not fire NeedKey event if encrypted media is not enabled.
- if (!blink::WebRuntimeFeatures::isEncryptedMediaEnabled() &&
- !blink::WebRuntimeFeatures::isPrefixedEncryptedMediaEnabled()) {
+ if (!blink::WebRuntimeFeatures::isPrefixedEncryptedMediaEnabled() &&
+ !blink::WebRuntimeFeatures::isEncryptedMediaEnabled()) {
return;
}
@@ -1418,52 +1500,57 @@ void WebMediaPlayerAndroid::OnNeedKey(const std::string& type,
init_data_type_ = type;
const uint8* init_data_ptr = init_data.empty() ? NULL : &init_data[0];
- // TODO(xhwang): Drop |keySystem| and |sessionId| in keyNeeded() call.
- client_->keyNeeded(WebString(),
- WebString(),
- init_data_ptr,
- init_data.size());
-}
-
-#if defined(GOOGLE_TV)
-bool WebMediaPlayerAndroid::InjectMediaStream(
- MediaStreamClient* media_stream_client,
- media::Demuxer* demuxer,
- const base::Closure& destroy_demuxer_cb) {
- DCHECK(!demuxer);
- media_stream_client_ = media_stream_client;
- demuxer_ = demuxer;
- destroy_demuxer_cb_ = destroy_demuxer_cb;
- return true;
+ client_->keyNeeded(
+ WebString::fromUTF8(type), init_data_ptr, init_data.size());
}
-#endif
-void WebMediaPlayerAndroid::DoReleaseRemotePlaybackTexture(uint32 sync_point) {
+void WebMediaPlayerAndroid::SetDecryptorReadyCB(
+ const media::DecryptorReadyCB& decryptor_ready_cb) {
DCHECK(main_thread_checker_.CalledOnValidThread());
- DCHECK(remote_playback_texture_id_);
- blink::WebGraphicsContext3D* context =
- stream_texture_factory_->Context3d();
+ // Cancels the previous decryptor request.
+ if (decryptor_ready_cb.is_null()) {
+ if (!decryptor_ready_cb_.is_null())
+ base::ResetAndReturn(&decryptor_ready_cb_).Run(NULL);
+ return;
+ }
+
+ // TODO(xhwang): Support multiple decryptor notification request (e.g. from
+ // video and audio). The current implementation is okay for the current
+ // media pipeline since we initialize audio and video decoders in sequence.
+ // But WebMediaPlayerImpl should not depend on media pipeline's implementation
+ // detail.
+ DCHECK(decryptor_ready_cb_.is_null());
+
+ // Mixed use of prefixed and unprefixed EME APIs is disallowed by Blink.
+ DCHECK(!proxy_decryptor_ || !web_cdm_);
+
+ if (proxy_decryptor_) {
+ decryptor_ready_cb.Run(proxy_decryptor_->GetDecryptor());
+ return;
+ }
+
+ if (web_cdm_) {
+ decryptor_ready_cb.Run(web_cdm_->GetDecryptor());
+ return;
+ }
- if (sync_point)
- context->waitSyncPoint(sync_point);
- context->deleteTexture(remote_playback_texture_id_);
- remote_playback_texture_id_ = 0;
+ decryptor_ready_cb_ = decryptor_ready_cb;
}
void WebMediaPlayerAndroid::enterFullscreen() {
- if (manager_->CanEnterFullscreen(frame_)) {
- manager_->EnterFullscreen(player_id_, frame_);
+ if (player_manager_->CanEnterFullscreen(frame_)) {
+ player_manager_->EnterFullscreen(player_id_, frame_);
SetNeedsEstablishPeer(false);
}
}
void WebMediaPlayerAndroid::exitFullscreen() {
- manager_->ExitFullscreen(player_id_);
+ player_manager_->ExitFullscreen(player_id_);
}
bool WebMediaPlayerAndroid::canEnterFullscreen() const {
- return manager_->CanEnterFullscreen(frame_);
+ return player_manager_->CanEnterFullscreen(frame_);
}
} // namespace content
diff --git a/chromium/content/renderer/media/android/webmediaplayer_android.h b/chromium/content/renderer/media/android/webmediaplayer_android.h
index 40582ca1c8a..67d09e06708 100644
--- a/chromium/content/renderer/media/android/webmediaplayer_android.h
+++ b/chromium/content/renderer/media/android/webmediaplayer_android.h
@@ -13,13 +13,13 @@
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
-#include "base/message_loop/message_loop.h"
#include "base/time/time.h"
#include "cc/layers/video_frame_provider.h"
#include "content/common/media/media_player_messages_enums_android.h"
+#include "content/public/renderer/render_frame_observer.h"
#include "content/renderer/media/android/media_info_loader.h"
#include "content/renderer/media/android/media_source_delegate.h"
-#include "content/renderer/media/android/stream_texture_factory_android.h"
+#include "content/renderer/media/android/stream_texture_factory.h"
#include "content/renderer/media/crypto/proxy_decryptor.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "media/base/android/media_player_android.h"
@@ -31,53 +31,53 @@
#include "third_party/WebKit/public/platform/WebURL.h"
#include "ui/gfx/rect_f.h"
-namespace media {
-class Demuxer;
-class MediaLog;
+namespace base {
+class MessageLoopProxy;
}
namespace blink {
+class WebContentDecryptionModule;
class WebFrame;
+class WebURL;
}
-namespace webkit {
-class WebLayerImpl;
+namespace gpu {
+struct MailboxHolder;
+}
+
+namespace media {
+class MediaLog;
}
namespace content {
-class WebMediaPlayerDelegate;
+class RendererCdmManager;
class RendererMediaPlayerManager;
-
-#if defined(GOOGLE_TV)
-class MediaStreamAudioRenderer;
-class MediaStreamClient;
-#endif
+class WebContentDecryptionModuleImpl;
+class WebLayerImpl;
+class WebMediaPlayerDelegate;
// This class implements blink::WebMediaPlayer by keeping the android
// media player in the browser process. It listens to all the status changes
// sent from the browser process and sends playback controls to the media
// player.
-class WebMediaPlayerAndroid
- : public blink::WebMediaPlayer,
- public cc::VideoFrameProvider,
- public base::MessageLoop::DestructionObserver,
- public base::SupportsWeakPtr<WebMediaPlayerAndroid> {
+class WebMediaPlayerAndroid : public blink::WebMediaPlayer,
+ public cc::VideoFrameProvider,
+ public RenderFrameObserver {
public:
- // Construct a WebMediaPlayerAndroid object. This class communicates
- // with the MediaPlayerAndroid object in the browser process through
- // |proxy|.
+ // Construct a WebMediaPlayerAndroid object. This class communicates with the
+ // MediaPlayerAndroid object in the browser process through |proxy|.
// TODO(qinmin): |frame| argument is used to determine whether the current
// player can enter fullscreen. This logic should probably be moved into
// blink, so that enterFullscreen() will not be called if another video is
// already in fullscreen.
- WebMediaPlayerAndroid(
- blink::WebFrame* frame,
- blink::WebMediaPlayerClient* client,
- base::WeakPtr<WebMediaPlayerDelegate> delegate,
- RendererMediaPlayerManager* manager,
- StreamTextureFactory* factory,
- const scoped_refptr<base::MessageLoopProxy>& media_loop,
- media::MediaLog* media_log);
+ WebMediaPlayerAndroid(blink::WebFrame* frame,
+ blink::WebMediaPlayerClient* client,
+ base::WeakPtr<WebMediaPlayerDelegate> delegate,
+ RendererMediaPlayerManager* player_manager,
+ RendererCdmManager* cdm_manager,
+ scoped_refptr<StreamTextureFactory> factory,
+ const scoped_refptr<base::MessageLoopProxy>& media_loop,
+ media::MediaLog* media_log);
virtual ~WebMediaPlayerAndroid();
// blink::WebMediaPlayer implementation.
@@ -88,20 +88,21 @@ class WebMediaPlayerAndroid
// Resource loading.
virtual void load(LoadType load_type,
const blink::WebURL& url,
- CORSMode cors_mode) OVERRIDE;
+ CORSMode cors_mode);
// Playback controls.
virtual void play();
virtual void pause();
- virtual void pause(bool is_media_related_action);
virtual void seek(double seconds);
- virtual bool supportsFullscreen() const;
virtual bool supportsSave() const;
virtual void setRate(double rate);
virtual void setVolume(double volume);
- virtual const blink::WebTimeRanges& buffered();
+ virtual blink::WebTimeRanges buffered() const;
virtual double maxTimeSeekable() const;
+ // Poster image, as defined in the <video> element.
+ virtual void setPoster(const blink::WebURL& poster) OVERRIDE;
+
// Methods for painting.
virtual void paint(blink::WebCanvas* canvas,
const blink::WebRect& rect,
@@ -127,9 +128,10 @@ class WebMediaPlayerAndroid
virtual bool paused() const;
virtual bool seeking() const;
virtual double duration() const;
+ virtual double timelineOffset() const;
virtual double currentTime() const;
- virtual bool didLoadingProgress() const;
+ virtual bool didLoadingProgress();
// Internal states of loading and network.
virtual blink::WebMediaPlayer::NetworkState networkState() const;
@@ -169,7 +171,7 @@ class WebMediaPlayerAndroid
void OnTimeUpdate(const base::TimeDelta& current_time);
// Functions called when media player status changes.
- void OnConnectedToRemoteDevice();
+ void OnConnectedToRemoteDevice(const std::string& remote_playback_message);
void OnDisconnectedFromRemoteDevice();
void OnDidEnterFullscreen();
void OnDidExitFullscreen();
@@ -185,58 +187,50 @@ class WebMediaPlayerAndroid
// However, the actual GlTexture is not released to keep the video screenshot.
virtual void ReleaseMediaResources();
- // Method inherited from DestructionObserver.
- virtual void WillDestroyCurrentMessageLoop() OVERRIDE;
-
- // Detach the player from its manager.
- void Detach();
+ // RenderFrameObserver implementation.
+ virtual void OnDestruct() OVERRIDE;
#if defined(VIDEO_HOLE)
- // Retrieve geometry of the media player (i.e. location and size of the video
- // frame) if changed. Returns true only if the geometry has been changed since
- // the last call.
- bool RetrieveGeometryChange(gfx::RectF* rect);
+ // Calculate the boundary rectangle of the media player (i.e. location and
+ // size of the video frame).
+ // Returns true if the geometry has been changed since the last call.
+ bool UpdateBoundaryRectangle();
+
+ const gfx::RectF GetBoundaryRectangle();
#endif // defined(VIDEO_HOLE)
virtual MediaKeyException generateKeyRequest(
const blink::WebString& key_system,
const unsigned char* init_data,
- unsigned init_data_length) OVERRIDE;
+ unsigned init_data_length);
virtual MediaKeyException addKey(
const blink::WebString& key_system,
const unsigned char* key,
unsigned key_length,
const unsigned char* init_data,
unsigned init_data_length,
- const blink::WebString& session_id) OVERRIDE;
+ const blink::WebString& session_id);
virtual MediaKeyException cancelKeyRequest(
const blink::WebString& key_system,
- const blink::WebString& session_id) OVERRIDE;
+ const blink::WebString& session_id);
+ virtual void setContentDecryptionModule(
+ blink::WebContentDecryptionModule* cdm);
void OnKeyAdded(const std::string& session_id);
void OnKeyError(const std::string& session_id,
media::MediaKeys::KeyError error_code,
- int system_code);
+ uint32 system_code);
void OnKeyMessage(const std::string& session_id,
const std::vector<uint8>& message,
- const std::string& destination_url);
+ const GURL& destination_url);
void OnMediaSourceOpened(blink::WebMediaSource* web_media_source);
void OnNeedKey(const std::string& type,
const std::vector<uint8>& init_data);
-#if defined(GOOGLE_TV)
- bool InjectMediaStream(MediaStreamClient* media_stream_client,
- media::Demuxer* demuxer,
- const base::Closure& destroy_demuxer_cb);
-#endif
-
- // Can be called on any thread.
- static void OnReleaseRemotePlaybackTexture(
- const scoped_refptr<base::MessageLoopProxy>& main_loop,
- const base::WeakPtr<WebMediaPlayerAndroid>& player,
- uint32 sync_point);
+ // TODO(xhwang): Implement WebMediaPlayer::setContentDecryptionModule().
+ // See: http://crbug.com/224786
protected:
// Helper method to update the playing state.
@@ -248,7 +242,6 @@ class WebMediaPlayerAndroid
void TryCreateStreamTextureProxyIfNeeded();
void DoCreateStreamTexture();
-
// Helper method to reestablish the surface texture peer for android
// media player.
void EstablishSurfaceTexturePeer();
@@ -257,29 +250,32 @@ class WebMediaPlayerAndroid
void SetNeedsEstablishPeer(bool needs_establish_peer);
private:
- void DrawRemotePlaybackIcon();
+ void Pause(bool is_media_related_action);
+ void DrawRemotePlaybackText(const std::string& remote_playback_message);
void ReallocateVideoFrame();
void SetCurrentFrameInternal(scoped_refptr<media::VideoFrame>& frame);
void DidLoadMediaInfo(MediaInfoLoader::Status status);
- void DoReleaseRemotePlaybackTexture(uint32 sync_point);
-
- bool IsKeySystemSupported(const blink::WebString& key_system);
+ bool IsKeySystemSupported(const std::string& key_system);
// Actually do the work for generateKeyRequest/addKey so they can easily
// report results to UMA.
- MediaKeyException GenerateKeyRequestInternal(
- const blink::WebString& key_system,
- const unsigned char* init_data,
- unsigned init_data_length);
- MediaKeyException AddKeyInternal(const blink::WebString& key_system,
+ MediaKeyException GenerateKeyRequestInternal(const std::string& key_system,
+ const unsigned char* init_data,
+ unsigned init_data_length);
+ MediaKeyException AddKeyInternal(const std::string& key_system,
const unsigned char* key,
unsigned key_length,
const unsigned char* init_data,
unsigned init_data_length,
- const blink::WebString& session_id);
- MediaKeyException CancelKeyRequestInternal(
- const blink::WebString& key_system,
- const blink::WebString& session_id);
+ const std::string& session_id);
+ MediaKeyException CancelKeyRequestInternal(const std::string& key_system,
+ const std::string& session_id);
+
+ // Requests that this object notifies when a decryptor is ready through the
+ // |decryptor_ready_cb| provided.
+ // If |decryptor_ready_cb| is null, the existing callback will be fired with
+ // NULL immediately and reset.
+ void SetDecryptorReadyCB(const media::DecryptorReadyCB& decryptor_ready_cb);
blink::WebFrame* const frame_;
@@ -307,9 +303,6 @@ class WebMediaPlayerAndroid
base::ThreadChecker main_thread_checker_;
- // Message loop for main renderer thread.
- const scoped_refptr<base::MessageLoopProxy> main_loop_;
-
// Message loop for media thread.
const scoped_refptr<base::MessageLoopProxy> media_loop_;
@@ -334,29 +327,30 @@ class WebMediaPlayerAndroid
base::TimeDelta seek_time_;
// Whether loading has progressed since the last call to didLoadingProgress.
- mutable bool did_loading_progress_;
+ bool did_loading_progress_;
+
+ // Manages this object and delegates player calls to the browser process.
+ // Owned by RenderFrameImpl.
+ RendererMediaPlayerManager* player_manager_;
- // Manager for managing this object and for delegating method calls on
- // Render Thread.
- RendererMediaPlayerManager* manager_;
+ // Delegates EME calls to the browser process. Owned by RenderFrameImpl.
+ // TODO(xhwang): Remove |cdm_manager_| when prefixed EME is deprecated. See
+ // http://crbug.com/249976
+ RendererCdmManager* cdm_manager_;
- // Player ID assigned by the |manager_|.
+ // Player ID assigned by the |player_manager_|.
int player_id_;
// Current player states.
blink::WebMediaPlayer::NetworkState network_state_;
blink::WebMediaPlayer::ReadyState ready_state_;
- // GL texture ID used to show the remote playback icon.
- unsigned int remote_playback_texture_id_;
-
// GL texture ID allocated to the video.
unsigned int texture_id_;
// GL texture mailbox for texture_id_ to provide in the VideoFrame, and sync
// point for when the mailbox was produced.
gpu::Mailbox texture_mailbox_;
- unsigned int texture_mailbox_sync_point_;
// Stream texture ID allocated to the video.
unsigned int stream_id_;
@@ -381,38 +375,31 @@ class WebMediaPlayerAndroid
bool has_media_info_;
// Object for allocating stream textures.
- scoped_ptr<StreamTextureFactory> stream_texture_factory_;
+ scoped_refptr<StreamTextureFactory> stream_texture_factory_;
// Object for calling back the compositor thread to repaint the video when a
// frame available. It should be initialized on the compositor thread.
ScopedStreamTextureProxy stream_texture_proxy_;
// Whether media player needs external surface.
+ // Only used for the VIDEO_HOLE logic.
bool needs_external_surface_;
// A pointer back to the compositor to inform it about state changes. This is
// not NULL while the compositor is actively using this webmediaplayer.
cc::VideoFrameProvider::Client* video_frame_provider_client_;
- scoped_ptr<webkit::WebLayerImpl> video_weblayer_;
+ scoped_ptr<WebLayerImpl> video_weblayer_;
#if defined(VIDEO_HOLE)
// A rectangle represents the geometry of video frame, when computed last
// time.
gfx::RectF last_computed_rect_;
-#endif // defined(VIDEO_HOLE)
-#if defined(GOOGLE_TV)
- // Pixel threshold for external surface usage. Negative value means that the
- // threshold is not defined, so that external surface is never used.
- int external_surface_threshold_;
-
- // Media Stream related fields.
- media::Demuxer* demuxer_;
- base::Closure destroy_demuxer_cb_;
- scoped_refptr<MediaStreamAudioRenderer> audio_renderer_;
- MediaStreamClient* media_stream_client_;
-#endif
+ // Whether to use the video overlay for all embedded video.
+ // True only for testing.
+ bool force_use_overlay_embedded_video_;
+#endif // defined(VIDEO_HOLE)
scoped_ptr<MediaSourceDelegate,
MediaSourceDelegate::Destroyer> media_source_delegate_;
@@ -437,15 +424,26 @@ class WebMediaPlayerAndroid
// The currently selected key system. Empty string means that no key system
// has been selected.
- blink::WebString current_key_system_;
+ std::string current_key_system_;
// Temporary for EME v0.1. In the future the init data type should be passed
// through GenerateKeyRequest() directly from WebKit.
std::string init_data_type_;
- // The decryptor that manages decryption keys and decrypts encrypted frames.
- scoped_ptr<ProxyDecryptor> decryptor_;
+ // Manages decryption keys and decrypts encrypted frames.
+ scoped_ptr<ProxyDecryptor> proxy_decryptor_;
+
+ // Non-owned pointer to the CDM. Updated via calls to
+ // setContentDecryptionModule().
+ WebContentDecryptionModuleImpl* web_cdm_;
+
+ // This is only Used by Clear Key key system implementation, where a renderer
+ // side CDM will be used. This is similar to WebMediaPlayerImpl. For other key
+ // systems, a browser side CDM will be used and we set CDM by calling
+ // player_manager_->SetCdm() directly.
+ media::DecryptorReadyCB decryptor_ready_cb_;
+ // NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<WebMediaPlayerAndroid> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(WebMediaPlayerAndroid);
diff --git a/chromium/content/renderer/media/audio_decoder.cc b/chromium/content/renderer/media/audio_decoder.cc
index dd9f1fafa0d..c413bb08020 100644
--- a/chromium/content/renderer/media/audio_decoder.cc
+++ b/chromium/content/renderer/media/audio_decoder.cc
@@ -26,7 +26,7 @@ namespace content {
// Decode in-memory audio file data.
bool DecodeAudioFileData(
blink::WebAudioBus* destination_bus,
- const char* data, size_t data_size, double sample_rate) {
+ const char* data, size_t data_size) {
DCHECK(destination_bus);
if (!destination_bus)
return false;
@@ -41,7 +41,7 @@ bool DecodeAudioFileData(
size_t number_of_channels = reader.channels();
double file_sample_rate = reader.sample_rate();
- size_t number_of_frames = static_cast<size_t>(reader.number_of_frames());
+ size_t number_of_frames = static_cast<size_t>(reader.GetNumberOfFrames());
// Apply sanity checks to make sure crazy values aren't coming out of
// FFmpeg.
diff --git a/chromium/content/renderer/media/audio_decoder.h b/chromium/content/renderer/media/audio_decoder.h
index 9c14ed9f5d5..454a80a1caf 100644
--- a/chromium/content/renderer/media/audio_decoder.h
+++ b/chromium/content/renderer/media/audio_decoder.h
@@ -13,7 +13,7 @@ namespace content {
// Decode in-memory audio file data.
bool DecodeAudioFileData(blink::WebAudioBus* destination_bus, const char* data,
- size_t data_size, double sample_rate);
+ size_t data_size);
} // namespace content
diff --git a/chromium/content/renderer/media/audio_device_factory.cc b/chromium/content/renderer/media/audio_device_factory.cc
index 927a85f319a..bc903be7146 100644
--- a/chromium/content/renderer/media/audio_device_factory.cc
+++ b/chromium/content/renderer/media/audio_device_factory.cc
@@ -17,7 +17,7 @@ AudioDeviceFactory* AudioDeviceFactory::factory_ = NULL;
// static
scoped_refptr<media::AudioOutputDevice> AudioDeviceFactory::NewOutputDevice(
- int render_view_id) {
+ int render_view_id, int render_frame_id) {
if (factory_) {
media::AudioOutputDevice* const device =
factory_->CreateOutputDevice(render_view_id);
@@ -27,7 +27,8 @@ scoped_refptr<media::AudioOutputDevice> AudioDeviceFactory::NewOutputDevice(
AudioMessageFilter* const filter = AudioMessageFilter::Get();
return new media::AudioOutputDevice(
- filter->CreateAudioOutputIPC(render_view_id), filter->io_message_loop());
+ filter->CreateAudioOutputIPC(render_view_id, render_frame_id),
+ filter->io_message_loop());
}
// static
diff --git a/chromium/content/renderer/media/audio_device_factory.h b/chromium/content/renderer/media/audio_device_factory.h
index 46bf5f96099..3865753d8d0 100644
--- a/chromium/content/renderer/media/audio_device_factory.h
+++ b/chromium/content/renderer/media/audio_device_factory.h
@@ -7,6 +7,7 @@
#include "base/basictypes.h"
#include "base/memory/ref_counted.h"
+#include "content/common/content_export.h"
namespace media {
class AudioInputDevice;
@@ -18,13 +19,13 @@ namespace content {
// A factory for creating AudioOutputDevices and AudioInputDevices. There is a
// global factory function that can be installed for the purposes of testing to
// provide specialized implementations.
-class AudioDeviceFactory {
+class CONTENT_EXPORT AudioDeviceFactory {
public:
// Creates an AudioOutputDevice using the currently registered factory.
- // |render_view_id| refers to the render view containing the entity producing
- // the audio.
+ // |render_view_id| and |render_frame_id| refer to the render view and render
+ // frame containing the entity producing the audio.
static scoped_refptr<media::AudioOutputDevice> NewOutputDevice(
- int render_view_id);
+ int render_view_id, int render_frame_id);
// Creates an AudioInputDevice using the currently registered factory.
// |render_view_id| refers to the render view containing the entity consuming
diff --git a/chromium/content/renderer/media/audio_input_message_filter.cc b/chromium/content/renderer/media/audio_input_message_filter.cc
index a2696f085e6..412196cb3c1 100644
--- a/chromium/content/renderer/media/audio_input_message_filter.cc
+++ b/chromium/content/renderer/media/audio_input_message_filter.cc
@@ -10,6 +10,7 @@
#include "content/common/media/audio_messages.h"
#include "content/renderer/media/webrtc_logging.h"
#include "ipc/ipc_logging.h"
+#include "ipc/ipc_sender.h"
namespace content {
@@ -44,7 +45,7 @@ AudioInputMessageFilter* AudioInputMessageFilter::g_filter = NULL;
AudioInputMessageFilter::AudioInputMessageFilter(
const scoped_refptr<base::MessageLoopProxy>& io_message_loop)
- : channel_(NULL),
+ : sender_(NULL),
io_message_loop_(io_message_loop) {
DCHECK(!g_filter);
g_filter = this;
@@ -62,10 +63,10 @@ AudioInputMessageFilter* AudioInputMessageFilter::Get() {
void AudioInputMessageFilter::Send(IPC::Message* message) {
DCHECK(io_message_loop_->BelongsToCurrentThread());
- if (!channel_) {
+ if (!sender_) {
delete message;
} else {
- channel_->Send(message);
+ sender_->Send(message);
}
}
@@ -83,11 +84,11 @@ bool AudioInputMessageFilter::OnMessageReceived(const IPC::Message& message) {
return handled;
}
-void AudioInputMessageFilter::OnFilterAdded(IPC::Channel* channel) {
+void AudioInputMessageFilter::OnFilterAdded(IPC::Sender* sender) {
DCHECK(io_message_loop_->BelongsToCurrentThread());
- // Captures the channel for IPC.
- channel_ = channel;
+ // Captures the sender for IPC.
+ sender_ = sender;
}
void AudioInputMessageFilter::OnFilterRemoved() {
@@ -100,7 +101,7 @@ void AudioInputMessageFilter::OnFilterRemoved() {
void AudioInputMessageFilter::OnChannelClosing() {
DCHECK(io_message_loop_->BelongsToCurrentThread());
- channel_ = NULL;
+ sender_ = NULL;
DLOG_IF(WARNING, !delegates_.IsEmpty())
<< "Not all audio devices have been closed.";
diff --git a/chromium/content/renderer/media/audio_input_message_filter.h b/chromium/content/renderer/media/audio_input_message_filter.h
index 023d0902340..1107463eb96 100644
--- a/chromium/content/renderer/media/audio_input_message_filter.h
+++ b/chromium/content/renderer/media/audio_input_message_filter.h
@@ -10,7 +10,7 @@
#include "base/memory/shared_memory.h"
#include "base/sync_socket.h"
#include "content/common/content_export.h"
-#include "ipc/ipc_channel_proxy.h"
+#include "ipc/message_filter.h"
#include "media/audio/audio_input_ipc.h"
namespace base {
@@ -23,8 +23,7 @@ namespace content {
// audio capturers. Created on render thread, AudioMessageFilter is operated on
// IO thread (secondary thread of render process), it intercepts audio messages
// and process them on IO thread since these messages are time critical.
-class CONTENT_EXPORT AudioInputMessageFilter
- : public IPC::ChannelProxy::MessageFilter {
+class CONTENT_EXPORT AudioInputMessageFilter : public IPC::MessageFilter {
public:
explicit AudioInputMessageFilter(
const scoped_refptr<base::MessageLoopProxy>& io_message_loop);
@@ -53,9 +52,9 @@ class CONTENT_EXPORT AudioInputMessageFilter
// Sends an IPC message using |channel_|.
void Send(IPC::Message* message);
- // IPC::ChannelProxy::MessageFilter override. Called on |io_message_loop_|.
+ // IPC::MessageFilter override. Called on |io_message_loop_|.
virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE;
- virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE;
+ virtual void OnFilterAdded(IPC::Sender* sender) OVERRIDE;
virtual void OnFilterRemoved() OVERRIDE;
virtual void OnChannelClosing() OVERRIDE;
@@ -81,8 +80,8 @@ class CONTENT_EXPORT AudioInputMessageFilter
// A map of stream ids to delegates.
IDMap<media::AudioInputIPCDelegate> delegates_;
- // IPC channel for Send(), must only be accesed on |io_message_loop_|.
- IPC::Channel* channel_;
+ // IPC sender for Send(), must only be accesed on |io_message_loop_|.
+ IPC::Sender* sender_;
// Message loop on which IPC calls are driven.
const scoped_refptr<base::MessageLoopProxy> io_message_loop_;
diff --git a/chromium/content/renderer/media/audio_message_filter.cc b/chromium/content/renderer/media/audio_message_filter.cc
index 27d0821342f..fde42fb18e2 100644
--- a/chromium/content/renderer/media/audio_message_filter.cc
+++ b/chromium/content/renderer/media/audio_message_filter.cc
@@ -22,7 +22,8 @@ class AudioMessageFilter::AudioOutputIPCImpl
: public NON_EXPORTED_BASE(media::AudioOutputIPC) {
public:
AudioOutputIPCImpl(const scoped_refptr<AudioMessageFilter>& filter,
- int render_view_id);
+ int render_view_id,
+ int render_frame_id);
virtual ~AudioOutputIPCImpl();
// media::AudioOutputIPC implementation.
@@ -37,6 +38,7 @@ class AudioMessageFilter::AudioOutputIPCImpl
private:
const scoped_refptr<AudioMessageFilter> filter_;
const int render_view_id_;
+ const int render_frame_id_;
int stream_id_;
};
@@ -44,7 +46,7 @@ AudioMessageFilter* AudioMessageFilter::g_filter = NULL;
AudioMessageFilter::AudioMessageFilter(
const scoped_refptr<base::MessageLoopProxy>& io_message_loop)
- : channel_(NULL),
+ : sender_(NULL),
audio_hardware_config_(NULL),
io_message_loop_(io_message_loop) {
DCHECK(!g_filter);
@@ -62,18 +64,21 @@ AudioMessageFilter* AudioMessageFilter::Get() {
}
AudioMessageFilter::AudioOutputIPCImpl::AudioOutputIPCImpl(
- const scoped_refptr<AudioMessageFilter>& filter, int render_view_id)
+ const scoped_refptr<AudioMessageFilter>& filter,
+ int render_view_id,
+ int render_frame_id)
: filter_(filter),
render_view_id_(render_view_id),
+ render_frame_id_(render_frame_id),
stream_id_(kStreamIDNotSet) {}
AudioMessageFilter::AudioOutputIPCImpl::~AudioOutputIPCImpl() {}
scoped_ptr<media::AudioOutputIPC> AudioMessageFilter::CreateAudioOutputIPC(
- int render_view_id) {
+ int render_view_id, int render_frame_id) {
DCHECK_GT(render_view_id, 0);
return scoped_ptr<media::AudioOutputIPC>(
- new AudioOutputIPCImpl(this, render_view_id));
+ new AudioOutputIPCImpl(this, render_view_id, render_frame_id));
}
void AudioMessageFilter::AudioOutputIPCImpl::CreateStream(
@@ -85,7 +90,7 @@ void AudioMessageFilter::AudioOutputIPCImpl::CreateStream(
DCHECK_EQ(stream_id_, kStreamIDNotSet);
stream_id_ = filter_->delegates_.Add(delegate);
filter_->Send(new AudioHostMsg_CreateStream(
- stream_id_, render_view_id_, session_id, params));
+ stream_id_, render_view_id_, render_frame_id_, session_id, params));
}
void AudioMessageFilter::AudioOutputIPCImpl::PlayStream() {
@@ -113,10 +118,10 @@ void AudioMessageFilter::AudioOutputIPCImpl::SetVolume(double volume) {
void AudioMessageFilter::Send(IPC::Message* message) {
DCHECK(io_message_loop_->BelongsToCurrentThread());
- if (!channel_) {
+ if (!sender_) {
delete message;
} else {
- channel_->Send(message);
+ sender_->Send(message);
}
}
@@ -132,9 +137,9 @@ bool AudioMessageFilter::OnMessageReceived(const IPC::Message& message) {
return handled;
}
-void AudioMessageFilter::OnFilterAdded(IPC::Channel* channel) {
+void AudioMessageFilter::OnFilterAdded(IPC::Sender* sender) {
DCHECK(io_message_loop_->BelongsToCurrentThread());
- channel_ = channel;
+ sender_ = sender;
}
void AudioMessageFilter::OnFilterRemoved() {
@@ -147,7 +152,7 @@ void AudioMessageFilter::OnFilterRemoved() {
void AudioMessageFilter::OnChannelClosing() {
DCHECK(io_message_loop_->BelongsToCurrentThread());
- channel_ = NULL;
+ sender_ = NULL;
DLOG_IF(WARNING, !delegates_.IsEmpty())
<< "Not all audio devices have been closed.";
@@ -217,9 +222,7 @@ void AudioMessageFilter::OnOutputDeviceChanged(int stream_id,
// Ignore the message if an audio hardware config hasn't been created; this
// can occur if the renderer is using the high latency audio path.
- // TODO(dalecurtis): After http://crbug.com/173435 is fixed, convert to CHECK.
- if (!audio_hardware_config_)
- return;
+ CHECK(audio_hardware_config_);
// TODO(crogers): fix OnOutputDeviceChanged() to pass AudioParameters.
media::ChannelLayout channel_layout =
diff --git a/chromium/content/renderer/media/audio_message_filter.h b/chromium/content/renderer/media/audio_message_filter.h
index 9a1597a3444..ed4d5b261de 100644
--- a/chromium/content/renderer/media/audio_message_filter.h
+++ b/chromium/content/renderer/media/audio_message_filter.h
@@ -12,7 +12,7 @@
#include "base/sync_socket.h"
#include "base/synchronization/lock.h"
#include "content/common/content_export.h"
-#include "ipc/ipc_channel_proxy.h"
+#include "ipc/message_filter.h"
#include "media/audio/audio_output_ipc.h"
#include "media/base/audio_hardware_config.h"
@@ -26,8 +26,7 @@ namespace content {
// renderers. Created on render thread, AudioMessageFilter is operated on
// IO thread (secondary thread of render process) it intercepts audio messages
// and process them on IO thread since these messages are time critical.
-class CONTENT_EXPORT AudioMessageFilter
- : public IPC::ChannelProxy::MessageFilter {
+class CONTENT_EXPORT AudioMessageFilter : public IPC::MessageFilter {
public:
explicit AudioMessageFilter(
const scoped_refptr<base::MessageLoopProxy>& io_message_loop);
@@ -35,15 +34,18 @@ class CONTENT_EXPORT AudioMessageFilter
// Getter for the one AudioMessageFilter object.
static AudioMessageFilter* Get();
- // Create an AudioOutputIPC to be owned by one delegate. |render_view_id| is
- // the render view containing the entity producing the audio.
+ // Create an AudioOutputIPC to be owned by one delegate. |render_view_id| and
+ // |render_frame_id| are the render view and render frame containing the
+ // entity producing the audio.
+ // TODO(jam): remove render_view_id
//
// The returned object is not thread-safe, and must be used on
// |io_message_loop|.
- scoped_ptr<media::AudioOutputIPC> CreateAudioOutputIPC(int render_view_id);
+ scoped_ptr<media::AudioOutputIPC> CreateAudioOutputIPC(int render_view_id,
+ int render_frame_id);
// When set, AudioMessageFilter will update the AudioHardwareConfig with new
- // configuration values as recieved by OnOutputDeviceChanged(). The provided
+ // configuration values as received by OnOutputDeviceChanged(). The provided
// |config| must outlive AudioMessageFilter.
void SetAudioHardwareConfig(media::AudioHardwareConfig* config);
@@ -63,12 +65,12 @@ class CONTENT_EXPORT AudioMessageFilter
// stream_id and the source render_view_id.
class AudioOutputIPCImpl;
- // Sends an IPC message using |channel_|.
+ // Sends an IPC message using |sender_|.
void Send(IPC::Message* message);
- // IPC::ChannelProxy::MessageFilter override. Called on |io_message_loop|.
+ // IPC::MessageFilter override. Called on |io_message_loop|.
virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE;
- virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE;
+ virtual void OnFilterAdded(IPC::Sender* sender) OVERRIDE;
virtual void OnFilterRemoved() OVERRIDE;
virtual void OnChannelClosing() OVERRIDE;
@@ -90,8 +92,8 @@ class CONTENT_EXPORT AudioMessageFilter
void OnOutputDeviceChanged(int stream_id, int new_buffer_size,
int new_sample_rate);
- // IPC channel for Send(); must only be accesed on |io_message_loop_|.
- IPC::Channel* channel_;
+ // IPC sender for Send(); must only be accesed on |io_message_loop_|.
+ IPC::Sender* sender_;
// A map of stream ids to delegates; must only be accessed on
// |io_message_loop_|.
diff --git a/chromium/content/renderer/media/audio_message_filter_unittest.cc b/chromium/content/renderer/media/audio_message_filter_unittest.cc
index 46460dc3917..cdd82888506 100644
--- a/chromium/content/renderer/media/audio_message_filter_unittest.cc
+++ b/chromium/content/renderer/media/audio_message_filter_unittest.cc
@@ -12,6 +12,7 @@ namespace content {
namespace {
const int kRenderViewId = 1;
+const int kRenderFrameId = 2;
class MockAudioDelegate : public media::AudioOutputIPCDelegate {
public:
@@ -71,14 +72,14 @@ class MockAudioDelegate : public media::AudioOutputIPCDelegate {
} // namespace
TEST(AudioMessageFilterTest, Basic) {
- base::MessageLoop message_loop(base::MessageLoop::TYPE_IO);
+ base::MessageLoopForIO message_loop;
scoped_refptr<AudioMessageFilter> filter(new AudioMessageFilter(
message_loop.message_loop_proxy()));
MockAudioDelegate delegate;
const scoped_ptr<media::AudioOutputIPC> ipc =
- filter->CreateAudioOutputIPC(kRenderViewId);
+ filter->CreateAudioOutputIPC(kRenderViewId, kRenderFrameId);
static const int kSessionId = 0;
ipc->CreateStream(&delegate, media::AudioParameters(), kSessionId);
static const int kStreamId = 1;
@@ -118,7 +119,7 @@ TEST(AudioMessageFilterTest, Basic) {
}
TEST(AudioMessageFilterTest, Delegates) {
- base::MessageLoop message_loop(base::MessageLoop::TYPE_IO);
+ base::MessageLoopForIO message_loop;
scoped_refptr<AudioMessageFilter> filter(new AudioMessageFilter(
message_loop.message_loop_proxy()));
@@ -126,9 +127,9 @@ TEST(AudioMessageFilterTest, Delegates) {
MockAudioDelegate delegate1;
MockAudioDelegate delegate2;
const scoped_ptr<media::AudioOutputIPC> ipc1 =
- filter->CreateAudioOutputIPC(kRenderViewId);
+ filter->CreateAudioOutputIPC(kRenderViewId, kRenderFrameId);
const scoped_ptr<media::AudioOutputIPC> ipc2 =
- filter->CreateAudioOutputIPC(kRenderViewId);
+ filter->CreateAudioOutputIPC(kRenderViewId, kRenderFrameId);
static const int kSessionId = 0;
ipc1->CreateStream(&delegate1, media::AudioParameters(), kSessionId);
ipc2->CreateStream(&delegate2, media::AudioParameters(), kSessionId);
diff --git a/chromium/content/renderer/media/audio_renderer_mixer_manager.cc b/chromium/content/renderer/media/audio_renderer_mixer_manager.cc
index 0044d25110f..935fe766f58 100644
--- a/chromium/content/renderer/media/audio_renderer_mixer_manager.cc
+++ b/chromium/content/renderer/media/audio_renderer_mixer_manager.cc
@@ -25,11 +25,12 @@ AudioRendererMixerManager::~AudioRendererMixerManager() {
}
media::AudioRendererMixerInput* AudioRendererMixerManager::CreateInput(
- int source_render_view_id) {
+ int source_render_view_id, int source_render_frame_id) {
return new media::AudioRendererMixerInput(
base::Bind(
&AudioRendererMixerManager::GetMixer, base::Unretained(this),
- source_render_view_id),
+ source_render_view_id,
+ source_render_frame_id),
base::Bind(
&AudioRendererMixerManager::RemoveMixer, base::Unretained(this),
source_render_view_id));
@@ -42,6 +43,7 @@ void AudioRendererMixerManager::SetAudioRendererSinkForTesting(
media::AudioRendererMixer* AudioRendererMixerManager::GetMixer(
int source_render_view_id,
+ int source_render_frame_id,
const media::AudioParameters& params) {
const MixerKey key(source_render_view_id, params);
base::AutoLock auto_lock(mixers_lock_);
@@ -65,7 +67,7 @@ media::AudioRendererMixer* AudioRendererMixerManager::GetMixer(
// know that works well for WebAudio and WebRTC.
media::AudioParameters output_params(
media::AudioParameters::AUDIO_PCM_LOW_LATENCY, params.channel_layout(),
- sample_rate, 16, hardware_config_->GetOutputBufferSize());
+ sample_rate, 16, hardware_config_->GetHighLatencyBufferSize());
// If we've created invalid output parameters, simply pass on the input params
// and let the browser side handle automatic fallback.
@@ -79,7 +81,7 @@ media::AudioRendererMixer* AudioRendererMixerManager::GetMixer(
} else {
mixer = new media::AudioRendererMixer(
params, output_params, AudioDeviceFactory::NewOutputDevice(
- source_render_view_id));
+ source_render_view_id, source_render_frame_id));
}
AudioRendererMixerReference mixer_reference = { mixer, 1 };
diff --git a/chromium/content/renderer/media/audio_renderer_mixer_manager.h b/chromium/content/renderer/media/audio_renderer_mixer_manager.h
index 10901c1cf22..0a76b5dc394 100644
--- a/chromium/content/renderer/media/audio_renderer_mixer_manager.h
+++ b/chromium/content/renderer/media/audio_renderer_mixer_manager.h
@@ -46,13 +46,16 @@ class CONTENT_EXPORT AudioRendererMixerManager {
// Creates an AudioRendererMixerInput with the proper callbacks necessary to
// retrieve an AudioRendererMixer instance from AudioRendererMixerManager.
// |source_render_view_id| refers to the RenderView containing the entity
- // rendering the audio. Caller must ensure AudioRendererMixerManager outlives
- // the returned input.
- media::AudioRendererMixerInput* CreateInput(int source_render_view_id);
+ // rendering the audio. |source_render_frame_id| refers to the RenderFrame
+ // containing the entity rendering the audio. Caller must ensure
+ // AudioRendererMixerManager outlives the returned input.
+ media::AudioRendererMixerInput* CreateInput(int source_render_view_id,
+ int source_render_frame_id);
// Returns a mixer instance based on AudioParameters; an existing one if one
// with the provided AudioParameters exists or a new one if not.
media::AudioRendererMixer* GetMixer(int source_render_view_id,
+ int source_render_frame_id,
const media::AudioParameters& params);
// Remove a mixer instance given a mixer if the only other reference is held
diff --git a/chromium/content/renderer/media/audio_renderer_mixer_manager_unittest.cc b/chromium/content/renderer/media/audio_renderer_mixer_manager_unittest.cc
index 40b0c8ca21d..cb7293e6c97 100644
--- a/chromium/content/renderer/media/audio_renderer_mixer_manager_unittest.cc
+++ b/chromium/content/renderer/media/audio_renderer_mixer_manager_unittest.cc
@@ -6,6 +6,7 @@
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "content/renderer/media/audio_renderer_mixer_manager.h"
+#include "ipc/ipc_message.h"
#include "media/audio/audio_parameters.h"
#include "media/base/audio_hardware_config.h"
#include "media/base/audio_renderer_mixer.h"
@@ -23,7 +24,9 @@ static const int kBufferSize = 8192;
static const media::ChannelLayout kChannelLayout = media::CHANNEL_LAYOUT_STEREO;
static const int kRenderViewId = 123;
+static const int kRenderFrameId = 124;
static const int kAnotherRenderViewId = 456;
+static const int kAnotherRenderFrameId = 678;
using media::AudioParameters;
@@ -49,7 +52,7 @@ class AudioRendererMixerManagerTest : public testing::Test {
media::AudioRendererMixer* GetMixer(int source_render_view_id,
const media::AudioParameters& params) {
- return manager_->GetMixer(source_render_view_id, params);
+ return manager_->GetMixer(source_render_view_id, MSG_ROUTING_NONE, params);
}
void RemoveMixer(int source_render_view_id,
@@ -130,25 +133,29 @@ TEST_F(AudioRendererMixerManagerTest, CreateInput) {
// Create two mixer inputs and ensure this doesn't instantiate any mixers yet.
EXPECT_EQ(mixer_count(), 0);
+ media::FakeAudioRenderCallback callback(0);
scoped_refptr<media::AudioRendererMixerInput> input(
- manager_->CreateInput(kRenderViewId));
+ manager_->CreateInput(kRenderViewId, kRenderFrameId));
+ input->Initialize(params, &callback);
EXPECT_EQ(mixer_count(), 0);
+ media::FakeAudioRenderCallback another_callback(1);
scoped_refptr<media::AudioRendererMixerInput> another_input(
- manager_->CreateInput(kAnotherRenderViewId));
+ manager_->CreateInput(kAnotherRenderViewId, kAnotherRenderFrameId));
+ another_input->Initialize(params, &another_callback);
EXPECT_EQ(mixer_count(), 0);
// Implicitly test that AudioRendererMixerInput was provided with the expected
// callbacks needed to acquire an AudioRendererMixer and remove it.
- media::FakeAudioRenderCallback callback(0);
- input->Initialize(params, &callback);
+ input->Start();
EXPECT_EQ(mixer_count(), 1);
- media::FakeAudioRenderCallback another_callback(1);
- another_input->Initialize(params, &another_callback);
+ another_input->Start();
EXPECT_EQ(mixer_count(), 2);
// Destroying the inputs should destroy the mixers.
+ input->Stop();
input = NULL;
EXPECT_EQ(mixer_count(), 1);
+ another_input->Stop();
another_input = NULL;
EXPECT_EQ(mixer_count(), 0);
}
diff --git a/chromium/content/renderer/media/buffered_data_source.cc b/chromium/content/renderer/media/buffered_data_source.cc
index 1992b706ad4..f4a3094684a 100644
--- a/chromium/content/renderer/media/buffered_data_source.cc
+++ b/chromium/content/renderer/media/buffered_data_source.cc
@@ -7,6 +7,7 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/message_loop/message_loop_proxy.h"
+#include "content/public/common/url_constants.h"
#include "media/base/media_log.h"
#include "net/base/net_errors.h"
@@ -78,13 +79,16 @@ void BufferedDataSource::ReadOperation::Run(
}
BufferedDataSource::BufferedDataSource(
+ const GURL& url,
+ BufferedResourceLoader::CORSMode cors_mode,
const scoped_refptr<base::MessageLoopProxy>& render_loop,
WebFrame* frame,
media::MediaLog* media_log,
+ BufferedDataSourceHost* host,
const DownloadingCB& downloading_cb)
- : cors_mode_(BufferedResourceLoader::kUnspecified),
+ : url_(url),
+ cors_mode_(cors_mode),
total_bytes_(kPositionNotSpecified),
- assume_fully_buffered_(false),
streaming_(false),
frame_(frame),
intermediate_read_buffer_(new uint8[kInitialReadBufferSize]),
@@ -96,10 +100,11 @@ BufferedDataSource::BufferedDataSource(
bitrate_(0),
playback_rate_(0.0),
media_log_(media_log),
+ host_(host),
downloading_cb_(downloading_cb),
weak_factory_(this) {
+ DCHECK(host_);
DCHECK(!downloading_cb_.is_null());
- weak_this_ = weak_factory_.GetWeakPtr();
}
BufferedDataSource::~BufferedDataSource() {}
@@ -125,28 +130,14 @@ BufferedResourceLoader* BufferedDataSource::CreateResourceLoader(
media_log_.get());
}
-void BufferedDataSource::set_host(media::DataSourceHost* host) {
- DataSource::set_host(host);
-
- if (loader_) {
- base::AutoLock auto_lock(lock_);
- UpdateHostState_Locked();
- }
-}
-
-void BufferedDataSource::Initialize(
- const GURL& url,
- BufferedResourceLoader::CORSMode cors_mode,
- const InitializeCB& init_cb) {
+void BufferedDataSource::Initialize(const InitializeCB& init_cb) {
DCHECK(render_loop_->BelongsToCurrentThread());
DCHECK(!init_cb.is_null());
DCHECK(!loader_.get());
- url_ = url;
- cors_mode_ = cors_mode;
init_cb_ = init_cb;
- if (url_.SchemeIs(kHttpScheme) || url_.SchemeIs(kHttpsScheme)) {
+ if (url_.SchemeIsHTTPOrHTTPS()) {
// Do an unbounded range request starting at the beginning. If the server
// responds with 200 instead of 206 we'll fall back into a streaming mode.
loader_.reset(CreateResourceLoader(0, kPositionNotSpecified));
@@ -156,13 +147,13 @@ void BufferedDataSource::Initialize(
// we won't be served HTTP headers.
loader_.reset(CreateResourceLoader(kPositionNotSpecified,
kPositionNotSpecified));
- assume_fully_buffered_ = true;
}
+ base::WeakPtr<BufferedDataSource> weak_this = weak_factory_.GetWeakPtr();
loader_->Start(
- base::Bind(&BufferedDataSource::StartCallback, weak_this_),
- base::Bind(&BufferedDataSource::LoadingStateChangedCallback, weak_this_),
- base::Bind(&BufferedDataSource::ProgressCallback, weak_this_),
+ base::Bind(&BufferedDataSource::StartCallback, weak_this),
+ base::Bind(&BufferedDataSource::LoadingStateChangedCallback, weak_this),
+ base::Bind(&BufferedDataSource::ProgressCallback, weak_this),
frame_);
}
@@ -223,13 +214,16 @@ void BufferedDataSource::Stop(const base::Closure& closure) {
}
closure.Run();
- render_loop_->PostTask(FROM_HERE,
- base::Bind(&BufferedDataSource::StopLoader, weak_this_));
+ render_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&BufferedDataSource::StopLoader, weak_factory_.GetWeakPtr()));
}
void BufferedDataSource::SetBitrate(int bitrate) {
- render_loop_->PostTask(FROM_HERE, base::Bind(
- &BufferedDataSource::SetBitrateTask, weak_this_, bitrate));
+ render_loop_->PostTask(FROM_HERE,
+ base::Bind(&BufferedDataSource::SetBitrateTask,
+ weak_factory_.GetWeakPtr(),
+ bitrate));
}
void BufferedDataSource::Read(
@@ -250,8 +244,9 @@ void BufferedDataSource::Read(
read_op_.reset(new ReadOperation(position, size, data, read_cb));
}
- render_loop_->PostTask(FROM_HERE, base::Bind(
- &BufferedDataSource::ReadTask, weak_this_));
+ render_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&BufferedDataSource::ReadTask, weak_factory_.GetWeakPtr()));
}
bool BufferedDataSource::GetSize(int64* size_out) {
@@ -326,9 +321,11 @@ void BufferedDataSource::ReadInternal() {
}
// Perform the actual read with BufferedResourceLoader.
- loader_->Read(
- position, size, intermediate_read_buffer_.get(),
- base::Bind(&BufferedDataSource::ReadCallback, weak_this_));
+ loader_->Read(position,
+ size,
+ intermediate_read_buffer_.get(),
+ base::Bind(&BufferedDataSource::ReadCallback,
+ weak_factory_.GetWeakPtr()));
}
@@ -352,12 +349,13 @@ void BufferedDataSource::StartCallback(
// All responses must be successful. Resources that are assumed to be fully
// buffered must have a known content length.
bool success = status == BufferedResourceLoader::kOk &&
- (!assume_fully_buffered_ ||
- loader_->instance_size() != kPositionNotSpecified);
+ (!assume_fully_buffered() ||
+ loader_->instance_size() != kPositionNotSpecified);
if (success) {
total_bytes_ = loader_->instance_size();
- streaming_ = !assume_fully_buffered_ &&
+ streaming_ =
+ !assume_fully_buffered() &&
(total_bytes_ == kPositionNotSpecified || !loader_->range_supported());
media_log_->SetDoubleProperty("total_bytes",
@@ -374,7 +372,12 @@ void BufferedDataSource::StartCallback(
return;
if (success) {
- UpdateHostState_Locked();
+ if (total_bytes_ != kPositionNotSpecified) {
+ host_->SetTotalBytes(total_bytes_);
+ if (assume_fully_buffered())
+ host_->AddBufferedByteRange(0, total_bytes_);
+ }
+
media_log_->SetBooleanProperty("single_origin", loader_->HasSingleOrigin());
media_log_->SetBooleanProperty("passed_cors_access_check",
loader_->DidPassCORSAccessCheck());
@@ -431,11 +434,13 @@ void BufferedDataSource::ReadCallback(
// end of the resource.
loader_.reset(CreateResourceLoader(
read_op_->position(), kPositionNotSpecified));
+
+ base::WeakPtr<BufferedDataSource> weak_this = weak_factory_.GetWeakPtr();
loader_->Start(
- base::Bind(&BufferedDataSource::PartialReadStartCallback, weak_this_),
+ base::Bind(&BufferedDataSource::PartialReadStartCallback, weak_this),
base::Bind(&BufferedDataSource::LoadingStateChangedCallback,
- weak_this_),
- base::Bind(&BufferedDataSource::ProgressCallback, weak_this_),
+ weak_this),
+ base::Bind(&BufferedDataSource::ProgressCallback, weak_this),
frame_);
return;
}
@@ -452,10 +457,10 @@ void BufferedDataSource::ReadCallback(
// fail like they would if we had known the file size at the beginning.
total_bytes_ = loader_->instance_size();
- if (host() && total_bytes_ != kPositionNotSpecified) {
- host()->SetTotalBytes(total_bytes_);
- host()->AddBufferedByteRange(loader_->first_byte_position(),
- total_bytes_);
+ if (total_bytes_ != kPositionNotSpecified) {
+ host_->SetTotalBytes(total_bytes_);
+ host_->AddBufferedByteRange(loader_->first_byte_position(),
+ total_bytes_);
}
}
ReadOperation::Run(read_op_.Pass(), bytes_read);
@@ -465,7 +470,7 @@ void BufferedDataSource::LoadingStateChangedCallback(
BufferedResourceLoader::LoadingState state) {
DCHECK(render_loop_->BelongsToCurrentThread());
- if (assume_fully_buffered_)
+ if (assume_fully_buffered())
return;
bool is_downloading_data;
@@ -493,7 +498,7 @@ void BufferedDataSource::LoadingStateChangedCallback(
void BufferedDataSource::ProgressCallback(int64 position) {
DCHECK(render_loop_->BelongsToCurrentThread());
- if (assume_fully_buffered_)
+ if (assume_fully_buffered())
return;
// TODO(scherkus): we shouldn't have to lock to signal host(), see
@@ -502,42 +507,13 @@ void BufferedDataSource::ProgressCallback(int64 position) {
if (stop_signal_received_)
return;
- ReportOrQueueBufferedBytes(loader_->first_byte_position(), position);
-}
-
-void BufferedDataSource::ReportOrQueueBufferedBytes(int64 start, int64 end) {
- if (host())
- host()->AddBufferedByteRange(start, end);
- else
- queued_buffered_byte_ranges_.Add(start, end);
-}
-
-void BufferedDataSource::UpdateHostState_Locked() {
- lock_.AssertAcquired();
-
- if (!host())
- return;
-
- for (size_t i = 0; i < queued_buffered_byte_ranges_.size(); ++i) {
- host()->AddBufferedByteRange(queued_buffered_byte_ranges_.start(i),
- queued_buffered_byte_ranges_.end(i));
- }
- queued_buffered_byte_ranges_.clear();
-
- if (total_bytes_ == kPositionNotSpecified)
- return;
-
- host()->SetTotalBytes(total_bytes_);
-
- if (assume_fully_buffered_)
- host()->AddBufferedByteRange(0, total_bytes_);
+ host_->AddBufferedByteRange(loader_->first_byte_position(), position);
}
void BufferedDataSource::UpdateDeferStrategy(bool paused) {
- // 200 responses end up not being reused to satisfy future range requests,
- // and we don't want to get too far ahead of the read-head (and thus require
- // a restart), so keep to the thresholds.
- if (!loader_->range_supported()) {
+ // No need to aggressively buffer when we are assuming the resource is fully
+ // buffered.
+ if (assume_fully_buffered()) {
loader_->UpdateDeferStrategy(BufferedResourceLoader::kCapacityDefer);
return;
}
@@ -546,14 +522,15 @@ void BufferedDataSource::UpdateDeferStrategy(bool paused) {
// and we're paused, then try to load as much as possible (the loader will
// fall back to kCapacityDefer if it knows the current response won't be
// useful from the cache in the future).
- if (media_has_played_ && paused) {
+ if (media_has_played_ && paused && loader_->range_supported()) {
loader_->UpdateDeferStrategy(BufferedResourceLoader::kNeverDefer);
return;
}
- // If media is currently playing or the page indicated preload=auto,
- // use threshold strategy to enable/disable deferring when the buffer
- // is full/depleted.
+ // If media is currently playing or the page indicated preload=auto or the
+ // the server does not support the byte range request or we do not want to go
+ // too far ahead of the read head, use threshold strategy to enable/disable
+ // deferring when the buffer is full/depleted.
loader_->UpdateDeferStrategy(BufferedResourceLoader::kCapacityDefer);
}
diff --git a/chromium/content/renderer/media/buffered_data_source.h b/chromium/content/renderer/media/buffered_data_source.h
index 30991dd2796..de44e3c9db2 100644
--- a/chromium/content/renderer/media/buffered_data_source.h
+++ b/chromium/content/renderer/media/buffered_data_source.h
@@ -27,6 +27,20 @@ class MediaLog;
namespace content {
+class CONTENT_EXPORT BufferedDataSourceHost {
+ public:
+ // Notify the host of the total size of the media file.
+ virtual void SetTotalBytes(int64 total_bytes) = 0;
+
+ // Notify the host that byte range [start,end] has been buffered.
+ // TODO(fischman): remove this method when demuxing is push-based instead of
+ // pull-based. http://crbug.com/131444
+ virtual void AddBufferedByteRange(int64 start, int64 end) = 0;
+
+ protected:
+ virtual ~BufferedDataSourceHost() {};
+};
+
// A data source capable of loading URLs and buffering the data using an
// in-memory sliding window.
//
@@ -36,23 +50,23 @@ class CONTENT_EXPORT BufferedDataSource : public media::DataSource {
public:
typedef base::Callback<void(bool)> DownloadingCB;
- // |downloading_cb| will be called whenever the downloading/paused state of
- // the source changes.
- BufferedDataSource(const scoped_refptr<base::MessageLoopProxy>& render_loop,
+ // |url| and |cors_mode| are passed to the object. Buffered byte range changes
+ // will be reported to |host|. |downloading_cb| will be called whenever the
+ // downloading/paused state of the source changes.
+ BufferedDataSource(const GURL& url,
+ BufferedResourceLoader::CORSMode cors_mode,
+ const scoped_refptr<base::MessageLoopProxy>& render_loop,
blink::WebFrame* frame,
media::MediaLog* media_log,
+ BufferedDataSourceHost* host,
const DownloadingCB& downloading_cb);
virtual ~BufferedDataSource();
- // Initialize this object using |url| and |cors_mode|, executing |init_cb|
- // with the result of initialization when it has completed.
+ // Executes |init_cb| with the result of initialization when it has completed.
//
// Method called on the render thread.
typedef base::Callback<void(bool)> InitializeCB;
- void Initialize(
- const GURL& url,
- BufferedResourceLoader::CORSMode cors_mode,
- const InitializeCB& init_cb);
+ void Initialize(const InitializeCB& init_cb);
// Adjusts the buffering algorithm based on the given preload value.
void SetPreload(Preload preload);
@@ -79,9 +93,11 @@ class CONTENT_EXPORT BufferedDataSource : public media::DataSource {
void MediaIsPlaying();
void MediaIsPaused();
+ // Returns true if the resource is local.
+ bool assume_fully_buffered() { return !url_.SchemeIsHTTPOrHTTPS(); }
+
// media::DataSource implementation.
// Called from demuxer thread.
- virtual void set_host(media::DataSourceHost* host) OVERRIDE;
virtual void Stop(const base::Closure& closure) OVERRIDE;
virtual void Read(int64 position, int size, uint8* data,
@@ -129,18 +145,10 @@ class CONTENT_EXPORT BufferedDataSource : public media::DataSource {
void LoadingStateChangedCallback(BufferedResourceLoader::LoadingState state);
void ProgressCallback(int64 position);
- // Report a buffered byte range [start,end] or queue it for later
- // reporting if set_host() hasn't been called yet.
- void ReportOrQueueBufferedBytes(int64 start, int64 end);
-
- void UpdateHostState_Locked();
-
// Update |loader_|'s deferring strategy in response to a play/pause, or
// change in playback rate.
void UpdateDeferStrategy(bool paused);
- base::WeakPtr<BufferedDataSource> weak_this_;
-
// URL of the resource requested.
GURL url_;
// crossorigin attribute on the corresponding HTML media element, if any.
@@ -151,10 +159,6 @@ class CONTENT_EXPORT BufferedDataSource : public media::DataSource {
// determined by reaching EOF.
int64 total_bytes_;
- // Some resources are assumed to be fully buffered (i.e., file://) so we don't
- // need to report what |loader_| has buffered.
- bool assume_fully_buffered_;
-
// This value will be true if this data source can only support streaming.
// i.e. range request is not supported.
bool streaming_;
@@ -209,13 +213,14 @@ class CONTENT_EXPORT BufferedDataSource : public media::DataSource {
// Current playback rate.
float playback_rate_;
- // Buffered byte ranges awaiting set_host() being called to report to host().
- media::Ranges<int64> queued_buffered_byte_ranges_;
-
scoped_refptr<media::MediaLog> media_log_;
+ // Host object to report buffered byte range changes to.
+ BufferedDataSourceHost* host_;
+
DownloadingCB downloading_cb_;
+ // NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<BufferedDataSource> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(BufferedDataSource);
diff --git a/chromium/content/renderer/media/buffered_data_source_host_impl.cc b/chromium/content/renderer/media/buffered_data_source_host_impl.cc
new file mode 100644
index 00000000000..45c97e63587
--- /dev/null
+++ b/chromium/content/renderer/media/buffered_data_source_host_impl.cc
@@ -0,0 +1,56 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/buffered_data_source_host_impl.h"
+
+namespace content {
+
+BufferedDataSourceHostImpl::BufferedDataSourceHostImpl()
+ : total_bytes_(0),
+ did_loading_progress_(false) { }
+
+BufferedDataSourceHostImpl::~BufferedDataSourceHostImpl() { }
+
+void BufferedDataSourceHostImpl::SetTotalBytes(int64 total_bytes) {
+ total_bytes_ = total_bytes;
+}
+
+void BufferedDataSourceHostImpl::AddBufferedByteRange(int64 start, int64 end) {
+ buffered_byte_ranges_.Add(start, end);
+ did_loading_progress_ = true;
+}
+
+static base::TimeDelta TimeForByteOffset(
+ int64 byte_offset, int64 total_bytes, base::TimeDelta duration) {
+ double position = static_cast<double>(byte_offset) / total_bytes;
+ // Snap to the beginning/end where the approximation can look especially bad.
+ if (position < 0.01)
+ return base::TimeDelta();
+ if (position > 0.99)
+ return duration;
+ return base::TimeDelta::FromMilliseconds(
+ static_cast<int64>(position * duration.InMilliseconds()));
+}
+
+void BufferedDataSourceHostImpl::AddBufferedTimeRanges(
+ media::Ranges<base::TimeDelta>* buffered_time_ranges,
+ base::TimeDelta media_duration) const {
+ if (total_bytes_ && buffered_byte_ranges_.size()) {
+ for (size_t i = 0; i < buffered_byte_ranges_.size(); ++i) {
+ int64 start = buffered_byte_ranges_.start(i);
+ int64 end = buffered_byte_ranges_.end(i);
+ buffered_time_ranges->Add(
+ TimeForByteOffset(start, total_bytes_, media_duration),
+ TimeForByteOffset(end, total_bytes_, media_duration));
+ }
+ }
+}
+
+bool BufferedDataSourceHostImpl::DidLoadingProgress() {
+ bool ret = did_loading_progress_;
+ did_loading_progress_ = false;
+ return ret;
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/buffered_data_source_host_impl.h b/chromium/content/renderer/media/buffered_data_source_host_impl.h
new file mode 100644
index 00000000000..d04d4671cc5
--- /dev/null
+++ b/chromium/content/renderer/media/buffered_data_source_host_impl.h
@@ -0,0 +1,51 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_BUFFERED_DATA_SOURCE_HOST_IMPL_H_
+#define CONTENT_RENDERER_MEDIA_BUFFERED_DATA_SOURCE_HOST_IMPL_H_
+
+#include "base/time/time.h"
+#include "content/common/content_export.h"
+#include "content/renderer/media/buffered_data_source.h"
+#include "media/base/ranges.h"
+
+namespace content {
+
+// Provides an implementation of BufferedDataSourceHost that translates the
+// buffered byte ranges into estimated time ranges.
+class CONTENT_EXPORT BufferedDataSourceHostImpl
+ : public BufferedDataSourceHost {
+ public:
+ BufferedDataSourceHostImpl();
+ virtual ~BufferedDataSourceHostImpl();
+
+ // BufferedDataSourceHost implementation.
+ virtual void SetTotalBytes(int64 total_bytes) OVERRIDE;
+ virtual void AddBufferedByteRange(int64 start, int64 end) OVERRIDE;
+
+ // Translate the byte ranges to time ranges and append them to the list.
+ // TODO(sandersd): This is a confusing name, find something better.
+ void AddBufferedTimeRanges(
+ media::Ranges<base::TimeDelta>* buffered_time_ranges,
+ base::TimeDelta media_duration) const;
+
+ bool DidLoadingProgress();
+
+ private:
+ // Total size of the data source.
+ int64 total_bytes_;
+
+ // List of buffered byte ranges for estimating buffered time.
+ media::Ranges<int64> buffered_byte_ranges_;
+
+ // True when AddBufferedByteRange() has been called more recently than
+ // DidLoadingProgress().
+ bool did_loading_progress_;
+
+ DISALLOW_COPY_AND_ASSIGN(BufferedDataSourceHostImpl);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_BUFFERED_DATA_SOURCE_HOST_IMPL_H_
diff --git a/chromium/content/renderer/media/buffered_data_source_host_impl_unittest.cc b/chromium/content/renderer/media/buffered_data_source_host_impl_unittest.cc
new file mode 100644
index 00000000000..6b5a5ecf14a
--- /dev/null
+++ b/chromium/content/renderer/media/buffered_data_source_host_impl_unittest.cc
@@ -0,0 +1,75 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/buffered_data_source_host_impl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace content {
+
+class BufferedDataSourceHostImplTest : public testing::Test {
+ public:
+ BufferedDataSourceHostImplTest() {}
+
+ void Add() {
+ host_.AddBufferedTimeRanges(&ranges_, base::TimeDelta::FromSeconds(10));
+ }
+
+ protected:
+ BufferedDataSourceHostImpl host_;
+ media::Ranges<base::TimeDelta> ranges_;
+
+ DISALLOW_COPY_AND_ASSIGN(BufferedDataSourceHostImplTest);
+};
+
+TEST_F(BufferedDataSourceHostImplTest, Empty) {
+ EXPECT_FALSE(host_.DidLoadingProgress());
+ Add();
+ EXPECT_EQ(0u, ranges_.size());
+}
+
+TEST_F(BufferedDataSourceHostImplTest, AddBufferedTimeRanges) {
+ host_.AddBufferedByteRange(10, 20);
+ host_.SetTotalBytes(100);
+ Add();
+ EXPECT_EQ(1u, ranges_.size());
+ EXPECT_EQ(base::TimeDelta::FromSeconds(1), ranges_.start(0));
+ EXPECT_EQ(base::TimeDelta::FromSeconds(2), ranges_.end(0));
+}
+
+TEST_F(BufferedDataSourceHostImplTest, AddBufferedTimeRanges_Merges) {
+ ranges_.Add(base::TimeDelta::FromSeconds(0), base::TimeDelta::FromSeconds(1));
+ host_.AddBufferedByteRange(10, 20);
+ host_.SetTotalBytes(100);
+ Add();
+ EXPECT_EQ(1u, ranges_.size());
+ EXPECT_EQ(base::TimeDelta::FromSeconds(0), ranges_.start(0));
+ EXPECT_EQ(base::TimeDelta::FromSeconds(2), ranges_.end(0));
+}
+
+TEST_F(BufferedDataSourceHostImplTest, AddBufferedTimeRanges_Snaps) {
+ host_.AddBufferedByteRange(5, 995);
+ host_.SetTotalBytes(1000);
+ Add();
+ EXPECT_EQ(1u, ranges_.size());
+ EXPECT_EQ(base::TimeDelta::FromSeconds(0), ranges_.start(0));
+ EXPECT_EQ(base::TimeDelta::FromSeconds(10), ranges_.end(0));
+}
+
+TEST_F(BufferedDataSourceHostImplTest, SetTotalBytes) {
+ host_.AddBufferedByteRange(10, 20);
+ Add();
+ EXPECT_EQ(0u, ranges_.size());
+
+ host_.SetTotalBytes(100);
+ Add();
+ EXPECT_EQ(1u, ranges_.size());
+}
+
+TEST_F(BufferedDataSourceHostImplTest, DidLoadingProgress) {
+ host_.AddBufferedByteRange(10, 20);
+ EXPECT_TRUE(host_.DidLoadingProgress());
+ EXPECT_FALSE(host_.DidLoadingProgress());
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/buffered_data_source_unittest.cc b/chromium/content/renderer/media/buffered_data_source_unittest.cc
index b7808a88d53..403a93b8308 100644
--- a/chromium/content/renderer/media/buffered_data_source_unittest.cc
+++ b/chromium/content/renderer/media/buffered_data_source_unittest.cc
@@ -4,15 +4,16 @@
#include "base/bind.h"
#include "base/message_loop/message_loop.h"
+#include "content/public/common/url_constants.h"
#include "content/renderer/media/buffered_data_source.h"
#include "content/renderer/media/test_response_generator.h"
#include "content/test/mock_webframeclient.h"
#include "content/test/mock_weburlloader.h"
#include "media/base/media_log.h"
-#include "media/base/mock_data_source_host.h"
#include "media/base/mock_filters.h"
#include "media/base/test_helpers.h"
#include "third_party/WebKit/public/platform/WebURLResponse.h"
+#include "third_party/WebKit/public/web/WebLocalFrame.h"
#include "third_party/WebKit/public/web/WebView.h"
using ::testing::_;
@@ -22,7 +23,7 @@ using ::testing::InSequence;
using ::testing::NiceMock;
using ::testing::StrictMock;
-using blink::WebFrame;
+using blink::WebLocalFrame;
using blink::WebString;
using blink::WebURLLoader;
using blink::WebURLResponse;
@@ -30,19 +31,37 @@ using blink::WebView;
namespace content {
+class MockBufferedDataSourceHost : public BufferedDataSourceHost {
+ public:
+ MockBufferedDataSourceHost() {}
+ virtual ~MockBufferedDataSourceHost() {}
+
+ MOCK_METHOD1(SetTotalBytes, void(int64 total_bytes));
+ MOCK_METHOD2(AddBufferedByteRange, void(int64 start, int64 end));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockBufferedDataSourceHost);
+};
+
// Overrides CreateResourceLoader() to permit injecting a MockWebURLLoader.
// Also keeps track of whether said MockWebURLLoader is actively loading.
class MockBufferedDataSource : public BufferedDataSource {
public:
MockBufferedDataSource(
+ const GURL& url,
const scoped_refptr<base::MessageLoopProxy>& message_loop,
- WebFrame* frame)
- : BufferedDataSource(message_loop, frame, new media::MediaLog(),
+ WebLocalFrame* frame,
+ BufferedDataSourceHost* host)
+ : BufferedDataSource(url,
+ BufferedResourceLoader::kUnspecified,
+ message_loop,
+ frame,
+ new media::MediaLog(),
+ host,
base::Bind(&MockBufferedDataSource::set_downloading,
base::Unretained(this))),
downloading_(false),
- loading_(false) {
- }
+ loading_(false) {}
virtual ~MockBufferedDataSource() {}
MOCK_METHOD2(CreateResourceLoader, BufferedResourceLoader*(int64, int64));
@@ -91,35 +110,47 @@ static const char kFileUrl[] = "file:///tmp/bar.webm";
class BufferedDataSourceTest : public testing::Test {
public:
BufferedDataSourceTest()
- : view_(WebView::create(NULL)) {
- view_->initializeMainFrame(&client_);
-
- data_source_.reset(new MockBufferedDataSource(
- message_loop_.message_loop_proxy(), view_->mainFrame()));
- data_source_->set_host(&host_);
+ : view_(WebView::create(NULL)),
+ frame_(WebLocalFrame::create(&client_)),
+ preload_(AUTO) {
+ view_->setMainFrame(frame_);
}
virtual ~BufferedDataSourceTest() {
view_->close();
+ frame_->close();
}
MOCK_METHOD1(OnInitialize, void(bool));
void Initialize(const char* url, bool expected) {
GURL gurl(url);
- response_generator_.reset(new TestResponseGenerator(gurl, kFileSize));
+ data_source_.reset(
+ new MockBufferedDataSource(gurl,
+ message_loop_.message_loop_proxy(),
+ view_->mainFrame()->toWebLocalFrame(),
+ &host_));
+ data_source_->SetPreload(preload_);
+ response_generator_.reset(new TestResponseGenerator(gurl, kFileSize));
ExpectCreateResourceLoader();
EXPECT_CALL(*this, OnInitialize(expected));
- data_source_->Initialize(
- gurl, BufferedResourceLoader::kUnspecified, base::Bind(
- &BufferedDataSourceTest::OnInitialize, base::Unretained(this)));
+ data_source_->Initialize(base::Bind(&BufferedDataSourceTest::OnInitialize,
+ base::Unretained(this)));
message_loop_.RunUntilIdle();
- bool is_http = gurl.SchemeIs(kHttpScheme) || gurl.SchemeIs(kHttpsScheme);
+ bool is_http = gurl.SchemeIsHTTPOrHTTPS();
EXPECT_EQ(data_source_->downloading(), is_http);
}
+ // Helper to initialize tests with a valid 200 response.
+ void InitializeWith200Response() {
+ Initialize(kHttpUrl, true);
+
+ EXPECT_CALL(host_, SetTotalBytes(response_generator_->content_length()));
+ Respond(response_generator_->Generate200());
+ }
+
// Helper to initialize tests with a valid 206 response.
void InitializeWith206Response() {
Initialize(kHttpUrl, true);
@@ -173,7 +204,7 @@ class BufferedDataSourceTest : public testing::Test {
void FinishLoading() {
data_source_->set_loading(false);
- loader()->didFinishLoading(url_loader(), 0);
+ loader()->didFinishLoading(url_loader(), 0, -1);
message_loop_.RunUntilIdle();
}
@@ -195,6 +226,7 @@ class BufferedDataSourceTest : public testing::Test {
}
Preload preload() { return data_source_->preload_; }
+ void set_preload(Preload preload) { preload_ = preload; }
BufferedResourceLoader::DeferStrategy defer_strategy() {
return loader()->defer_strategy_;
}
@@ -202,28 +234,32 @@ class BufferedDataSourceTest : public testing::Test {
int data_source_playback_rate() { return data_source_->playback_rate_; }
int loader_bitrate() { return loader()->bitrate_; }
int loader_playback_rate() { return loader()->playback_rate_; }
+ bool is_local_source() { return data_source_->assume_fully_buffered(); }
+ void set_might_be_reused_from_cache_in_future(bool value) {
+ loader()->might_be_reused_from_cache_in_future_ = value;
+ }
scoped_ptr<MockBufferedDataSource> data_source_;
scoped_ptr<TestResponseGenerator> response_generator_;
MockWebFrameClient client_;
WebView* view_;
+ WebLocalFrame* frame_;
- StrictMock<media::MockDataSourceHost> host_;
+ StrictMock<MockBufferedDataSourceHost> host_;
base::MessageLoop message_loop_;
private:
// Used for calling BufferedDataSource::Read().
uint8 buffer_[kDataSize];
+ Preload preload_;
+
DISALLOW_COPY_AND_ASSIGN(BufferedDataSourceTest);
};
TEST_F(BufferedDataSourceTest, Range_Supported) {
- Initialize(kHttpUrl, true);
-
- EXPECT_CALL(host_, SetTotalBytes(response_generator_->content_length()));
- Respond(response_generator_->Generate206(0));
+ InitializeWith206Response();
EXPECT_TRUE(data_source_->loading());
EXPECT_FALSE(data_source_->IsStreaming());
@@ -250,9 +286,7 @@ TEST_F(BufferedDataSourceTest, Range_NotFound) {
}
TEST_F(BufferedDataSourceTest, Range_NotSupported) {
- Initialize(kHttpUrl, true);
- EXPECT_CALL(host_, SetTotalBytes(response_generator_->content_length()));
- Respond(response_generator_->Generate200());
+ InitializeWith200Response();
EXPECT_TRUE(data_source_->loading());
EXPECT_TRUE(data_source_->IsStreaming());
@@ -651,4 +685,121 @@ TEST_F(BufferedDataSourceTest, File_FinishLoading) {
Stop();
}
+TEST_F(BufferedDataSourceTest, LocalResource_DeferStrategy) {
+ InitializeWithFileResponse();
+
+ EXPECT_EQ(AUTO, preload());
+ EXPECT_TRUE(is_local_source());
+ EXPECT_EQ(BufferedResourceLoader::kCapacityDefer, defer_strategy());
+
+ data_source_->MediaIsPlaying();
+ EXPECT_EQ(BufferedResourceLoader::kCapacityDefer, defer_strategy());
+
+ data_source_->MediaIsPaused();
+ EXPECT_EQ(BufferedResourceLoader::kCapacityDefer, defer_strategy());
+
+ Stop();
+}
+
+TEST_F(BufferedDataSourceTest, LocalResource_PreloadMetadata_DeferStrategy) {
+ set_preload(METADATA);
+ InitializeWithFileResponse();
+
+ EXPECT_EQ(METADATA, preload());
+ EXPECT_TRUE(is_local_source());
+ EXPECT_EQ(BufferedResourceLoader::kReadThenDefer, defer_strategy());
+
+ data_source_->MediaIsPlaying();
+ EXPECT_EQ(BufferedResourceLoader::kCapacityDefer, defer_strategy());
+
+ data_source_->MediaIsPaused();
+ EXPECT_EQ(BufferedResourceLoader::kCapacityDefer, defer_strategy());
+
+ Stop();
+}
+
+TEST_F(BufferedDataSourceTest, ExternalResource_Reponse200_DeferStrategy) {
+ InitializeWith200Response();
+
+ EXPECT_EQ(AUTO, preload());
+ EXPECT_FALSE(is_local_source());
+ EXPECT_FALSE(loader()->range_supported());
+ EXPECT_EQ(BufferedResourceLoader::kCapacityDefer, defer_strategy());
+
+ data_source_->MediaIsPlaying();
+ EXPECT_EQ(BufferedResourceLoader::kCapacityDefer, defer_strategy());
+
+ data_source_->MediaIsPaused();
+ EXPECT_EQ(BufferedResourceLoader::kCapacityDefer, defer_strategy());
+
+ Stop();
+}
+
+TEST_F(BufferedDataSourceTest,
+ ExternalResource_Response200_PreloadMetadata_DeferStrategy) {
+ set_preload(METADATA);
+ InitializeWith200Response();
+
+ EXPECT_EQ(METADATA, preload());
+ EXPECT_FALSE(is_local_source());
+ EXPECT_FALSE(loader()->range_supported());
+ EXPECT_EQ(BufferedResourceLoader::kReadThenDefer, defer_strategy());
+
+ data_source_->MediaIsPlaying();
+ EXPECT_EQ(BufferedResourceLoader::kCapacityDefer, defer_strategy());
+
+ data_source_->MediaIsPaused();
+ EXPECT_EQ(BufferedResourceLoader::kCapacityDefer, defer_strategy());
+
+ Stop();
+}
+
+TEST_F(BufferedDataSourceTest, ExternalResource_Reponse206_DeferStrategy) {
+ InitializeWith206Response();
+
+ EXPECT_EQ(AUTO, preload());
+ EXPECT_FALSE(is_local_source());
+ EXPECT_TRUE(loader()->range_supported());
+ EXPECT_EQ(BufferedResourceLoader::kCapacityDefer, defer_strategy());
+
+ data_source_->MediaIsPlaying();
+ EXPECT_EQ(BufferedResourceLoader::kCapacityDefer, defer_strategy());
+ set_might_be_reused_from_cache_in_future(true);
+ data_source_->MediaIsPaused();
+ EXPECT_EQ(BufferedResourceLoader::kNeverDefer, defer_strategy());
+
+ data_source_->MediaIsPlaying();
+ EXPECT_EQ(BufferedResourceLoader::kCapacityDefer, defer_strategy());
+ set_might_be_reused_from_cache_in_future(false);
+ data_source_->MediaIsPaused();
+ EXPECT_EQ(BufferedResourceLoader::kCapacityDefer, defer_strategy());
+
+ Stop();
+}
+
+TEST_F(BufferedDataSourceTest,
+ ExternalResource_Response206_PreloadMetadata_DeferStrategy) {
+ set_preload(METADATA);
+ InitializeWith206Response();
+
+ EXPECT_EQ(METADATA, preload());
+ EXPECT_FALSE(is_local_source());
+ EXPECT_TRUE(loader()->range_supported());
+ EXPECT_EQ(BufferedResourceLoader::kReadThenDefer, defer_strategy());
+
+ data_source_->MediaIsPlaying();
+ EXPECT_EQ(BufferedResourceLoader::kCapacityDefer, defer_strategy());
+ set_might_be_reused_from_cache_in_future(true);
+ data_source_->MediaIsPaused();
+ EXPECT_EQ(BufferedResourceLoader::kNeverDefer, defer_strategy());
+
+ data_source_->MediaIsPlaying();
+ EXPECT_EQ(BufferedResourceLoader::kCapacityDefer, defer_strategy());
+ set_might_be_reused_from_cache_in_future(false);
+ data_source_->MediaIsPaused();
+ EXPECT_EQ(BufferedResourceLoader::kCapacityDefer, defer_strategy());
+
+ Stop();
+}
+
} // namespace content
diff --git a/chromium/content/renderer/media/buffered_resource_loader.cc b/chromium/content/renderer/media/buffered_resource_loader.cc
index b050febc25d..24b1213a08c 100644
--- a/chromium/content/renderer/media/buffered_resource_loader.cc
+++ b/chromium/content/renderer/media/buffered_resource_loader.cc
@@ -9,6 +9,7 @@
#include "base/metrics/histogram.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
+#include "content/public/common/url_constants.h"
#include "content/renderer/media/cache_util.h"
#include "media/base/media_log.h"
#include "net/http/http_byte_range.h"
@@ -388,7 +389,7 @@ void BufferedResourceLoader::didReceiveResponse(
// received a response from HTTP/HTTPS protocol or the request was
// successful (in particular range request). So we only verify the partial
// response for HTTP and HTTPS protocol.
- if (url_.SchemeIs(kHttpScheme) || url_.SchemeIs(kHttpsScheme)) {
+ if (url_.SchemeIsHTTPOrHTTPS()) {
bool partial_response = (response.httpStatusCode() == kHttpPartialContent);
bool ok_response = (response.httpStatusCode() == kHttpOK);
@@ -483,7 +484,8 @@ void BufferedResourceLoader::didReceiveCachedMetadata(
void BufferedResourceLoader::didFinishLoading(
WebURLLoader* loader,
- double finishTime) {
+ double finishTime,
+ int64_t total_encoded_data_length) {
DVLOG(1) << "didFinishLoading";
DCHECK(active_loader_.get());
diff --git a/chromium/content/renderer/media/buffered_resource_loader.h b/chromium/content/renderer/media/buffered_resource_loader.h
index 3331979d2c9..3bdf1388c7f 100644
--- a/chromium/content/renderer/media/buffered_resource_loader.h
+++ b/chromium/content/renderer/media/buffered_resource_loader.h
@@ -28,9 +28,6 @@ namespace content {
const int64 kPositionNotSpecified = -1;
-const char kHttpScheme[] = "http";
-const char kHttpsScheme[] = "https";
-
// BufferedResourceLoader is single threaded and must be accessed on the
// render thread. It wraps a WebURLLoader and does in-memory buffering,
// pausing resource loading when the in-memory buffer is full and resuming
@@ -164,7 +161,8 @@ class CONTENT_EXPORT BufferedResourceLoader
const char* data, int dataLength);
virtual void didFinishLoading(
blink::WebURLLoader* loader,
- double finishTime);
+ double finishTime,
+ int64_t total_encoded_data_length);
virtual void didFail(
blink::WebURLLoader* loader,
const blink::WebURLError&);
diff --git a/chromium/content/renderer/media/buffered_resource_loader_unittest.cc b/chromium/content/renderer/media/buffered_resource_loader_unittest.cc
index 656f1e846e7..7bd23de9e25 100644
--- a/chromium/content/renderer/media/buffered_resource_loader_unittest.cc
+++ b/chromium/content/renderer/media/buffered_resource_loader_unittest.cc
@@ -21,7 +21,7 @@
#include "third_party/WebKit/public/platform/WebURLError.h"
#include "third_party/WebKit/public/platform/WebURLRequest.h"
#include "third_party/WebKit/public/platform/WebURLResponse.h"
-#include "third_party/WebKit/public/web/WebFrame.h"
+#include "third_party/WebKit/public/web/WebLocalFrame.h"
#include "third_party/WebKit/public/web/WebView.h"
using ::testing::_;
@@ -30,6 +30,7 @@ using ::testing::Return;
using ::testing::Truly;
using ::testing::NiceMock;
+using blink::WebLocalFrame;
using blink::WebString;
using blink::WebURLError;
using blink::WebURLResponse;
@@ -63,8 +64,8 @@ static bool CorrectAcceptEncoding(const blink::WebURLRequest &request) {
class BufferedResourceLoaderTest : public testing::Test {
public:
BufferedResourceLoaderTest()
- : view_(WebView::create(NULL)) {
- view_->initializeMainFrame(&client_);
+ : view_(WebView::create(NULL)), frame_(WebLocalFrame::create(&client_)) {
+ view_->setMainFrame(frame_);
for (int i = 0; i < kDataSize; ++i) {
data_[i] = i;
@@ -73,6 +74,7 @@ class BufferedResourceLoaderTest : public testing::Test {
virtual ~BufferedResourceLoaderTest() {
view_->close();
+ frame_->close();
}
void Initialize(const char* url, int first_position, int last_position) {
@@ -295,6 +297,7 @@ class BufferedResourceLoaderTest : public testing::Test {
MockWebFrameClient client_;
WebView* view_;
+ WebLocalFrame* frame_;
base::MessageLoop message_loop_;
@@ -422,7 +425,7 @@ TEST_F(BufferedResourceLoaderTest, BufferAndRead) {
// Response has completed.
EXPECT_CALL(*this, LoadingCallback(BufferedResourceLoader::kLoadingFinished));
- loader_->didFinishLoading(url_loader_, 0);
+ loader_->didFinishLoading(url_loader_, 0, -1);
// Try to read 10 from position 25 will just return with 5 bytes.
EXPECT_CALL(*this, ReadCallback(BufferedResourceLoader::kOk, 5));
@@ -515,7 +518,7 @@ TEST_F(BufferedResourceLoaderTest, ReadOutsideBuffer) {
EXPECT_CALL(*this, LoadingCallback(BufferedResourceLoader::kLoadingFinished));
EXPECT_CALL(*this, ReadCallback(BufferedResourceLoader::kOk, 5));
- loader_->didFinishLoading(url_loader_, 0);
+ loader_->didFinishLoading(url_loader_, 0, -1);
}
TEST_F(BufferedResourceLoaderTest, RequestFailedWhenRead) {
diff --git a/chromium/content/renderer/media/cdm_session_adapter.cc b/chromium/content/renderer/media/cdm_session_adapter.cc
new file mode 100644
index 00000000000..5550c4470b7
--- /dev/null
+++ b/chromium/content/renderer/media/cdm_session_adapter.cc
@@ -0,0 +1,156 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/cdm_session_adapter.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/memory/weak_ptr.h"
+#include "base/stl_util.h"
+#include "content/renderer/media/crypto/content_decryption_module_factory.h"
+#include "content/renderer/media/webcontentdecryptionmodulesession_impl.h"
+#include "media/base/cdm_promise.h"
+#include "media/base/media_keys.h"
+#include "url/gurl.h"
+
+namespace content {
+
+CdmSessionAdapter::CdmSessionAdapter() :
+#if defined(ENABLE_BROWSER_CDMS)
+ cdm_id_(0),
+#endif
+ weak_ptr_factory_(this) {}
+
+CdmSessionAdapter::~CdmSessionAdapter() {}
+
+bool CdmSessionAdapter::Initialize(
+#if defined(ENABLE_PEPPER_CDMS)
+ const CreatePepperCdmCB& create_pepper_cdm_cb,
+#elif defined(ENABLE_BROWSER_CDMS)
+ RendererCdmManager* manager,
+#endif // defined(ENABLE_PEPPER_CDMS)
+ const std::string& key_system,
+ const GURL& security_origin) {
+ base::WeakPtr<CdmSessionAdapter> weak_this = weak_ptr_factory_.GetWeakPtr();
+ media_keys_ = ContentDecryptionModuleFactory::Create(
+ key_system,
+ security_origin,
+#if defined(ENABLE_PEPPER_CDMS)
+ create_pepper_cdm_cb,
+#elif defined(ENABLE_BROWSER_CDMS)
+ manager,
+ &cdm_id_,
+#endif // defined(ENABLE_PEPPER_CDMS)
+ base::Bind(&CdmSessionAdapter::OnSessionMessage, weak_this),
+ base::Bind(&CdmSessionAdapter::OnSessionReady, weak_this),
+ base::Bind(&CdmSessionAdapter::OnSessionClosed, weak_this),
+ base::Bind(&CdmSessionAdapter::OnSessionError, weak_this));
+
+ // Success if |media_keys_| created.
+ return media_keys_;
+}
+
+WebContentDecryptionModuleSessionImpl* CdmSessionAdapter::CreateSession(
+ blink::WebContentDecryptionModuleSession::Client* client) {
+ return new WebContentDecryptionModuleSessionImpl(client, this);
+}
+
+void CdmSessionAdapter::RegisterSession(
+ const std::string& web_session_id,
+ base::WeakPtr<WebContentDecryptionModuleSessionImpl> session) {
+ DCHECK(!ContainsKey(sessions_, web_session_id));
+ sessions_[web_session_id] = session;
+}
+
+void CdmSessionAdapter::RemoveSession(const std::string& web_session_id) {
+ DCHECK(ContainsKey(sessions_, web_session_id));
+ sessions_.erase(web_session_id);
+}
+
+void CdmSessionAdapter::InitializeNewSession(
+ const std::string& init_data_type,
+ const uint8* init_data,
+ int init_data_length,
+ media::MediaKeys::SessionType session_type,
+ scoped_ptr<media::NewSessionCdmPromise> promise) {
+ media_keys_->CreateSession(init_data_type,
+ init_data,
+ init_data_length,
+ session_type,
+ promise.Pass());
+}
+
+void CdmSessionAdapter::UpdateSession(
+ const std::string& web_session_id,
+ const uint8* response,
+ int response_length,
+ scoped_ptr<media::SimpleCdmPromise> promise) {
+ media_keys_->UpdateSession(
+ web_session_id, response, response_length, promise.Pass());
+}
+
+void CdmSessionAdapter::ReleaseSession(
+ const std::string& web_session_id,
+ scoped_ptr<media::SimpleCdmPromise> promise) {
+ media_keys_->ReleaseSession(web_session_id, promise.Pass());
+}
+
+media::Decryptor* CdmSessionAdapter::GetDecryptor() {
+ return media_keys_->GetDecryptor();
+}
+
+#if defined(ENABLE_BROWSER_CDMS)
+int CdmSessionAdapter::GetCdmId() const {
+ return cdm_id_;
+}
+#endif // defined(ENABLE_BROWSER_CDMS)
+
+void CdmSessionAdapter::OnSessionMessage(const std::string& web_session_id,
+ const std::vector<uint8>& message,
+ const GURL& destination_url) {
+ WebContentDecryptionModuleSessionImpl* session = GetSession(web_session_id);
+ DLOG_IF(WARNING, !session) << __FUNCTION__ << " for unknown session "
+ << web_session_id;
+ if (session)
+ session->OnSessionMessage(message, destination_url);
+}
+
+void CdmSessionAdapter::OnSessionReady(const std::string& web_session_id) {
+ WebContentDecryptionModuleSessionImpl* session = GetSession(web_session_id);
+ DLOG_IF(WARNING, !session) << __FUNCTION__ << " for unknown session "
+ << web_session_id;
+ if (session)
+ session->OnSessionReady();
+}
+
+void CdmSessionAdapter::OnSessionClosed(const std::string& web_session_id) {
+ WebContentDecryptionModuleSessionImpl* session = GetSession(web_session_id);
+ DLOG_IF(WARNING, !session) << __FUNCTION__ << " for unknown session "
+ << web_session_id;
+ if (session)
+ session->OnSessionClosed();
+}
+
+void CdmSessionAdapter::OnSessionError(
+ const std::string& web_session_id,
+ media::MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_message) {
+ WebContentDecryptionModuleSessionImpl* session = GetSession(web_session_id);
+ DLOG_IF(WARNING, !session) << __FUNCTION__ << " for unknown session "
+ << web_session_id;
+ if (session)
+ session->OnSessionError(exception_code, system_code, error_message);
+}
+
+WebContentDecryptionModuleSessionImpl* CdmSessionAdapter::GetSession(
+ const std::string& web_session_id) {
+ // Since session objects may get garbage collected, it is possible that there
+ // are events coming back from the CDM and the session has been unregistered.
+ // We can not tell if the CDM is firing events at sessions that never existed.
+ SessionMap::iterator session = sessions_.find(web_session_id);
+ return (session != sessions_.end()) ? session->second.get() : NULL;
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/cdm_session_adapter.h b/chromium/content/renderer/media/cdm_session_adapter.h
new file mode 100644
index 00000000000..de27125db15
--- /dev/null
+++ b/chromium/content/renderer/media/cdm_session_adapter.h
@@ -0,0 +1,136 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_CDM_SESSION_ADAPTER_H_
+#define CONTENT_RENDERER_MEDIA_CDM_SESSION_ADAPTER_H_
+
+#include <map>
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "media/base/media_keys.h"
+#include "third_party/WebKit/public/platform/WebContentDecryptionModuleSession.h"
+
+#if defined(ENABLE_PEPPER_CDMS)
+#include "content/renderer/media/crypto/pepper_cdm_wrapper.h"
+#endif
+
+class GURL;
+
+namespace content {
+
+#if defined(ENABLE_BROWSER_CDMS)
+class RendererCdmManager;
+#endif
+
+class WebContentDecryptionModuleSessionImpl;
+
+// Owns the CDM instance and makes calls from session objects to the CDM.
+// Forwards the web session ID-based callbacks of the MediaKeys interface to the
+// appropriate session object. Callers should hold references to this class
+// as long as they need the CDM instance.
+class CdmSessionAdapter : public base::RefCounted<CdmSessionAdapter> {
+ public:
+ CdmSessionAdapter();
+
+ // Returns true on success.
+ bool Initialize(
+#if defined(ENABLE_PEPPER_CDMS)
+ const CreatePepperCdmCB& create_pepper_cdm_cb,
+#elif defined(ENABLE_BROWSER_CDMS)
+ RendererCdmManager* manager,
+#endif
+ const std::string& key_system,
+ const GURL& security_origin);
+
+ // Creates a new session and adds it to the internal map. The caller owns the
+ // created session. RemoveSession() must be called when destroying it, if
+ // RegisterSession() was called.
+ WebContentDecryptionModuleSessionImpl* CreateSession(
+ blink::WebContentDecryptionModuleSession::Client* client);
+
+ // Adds a session to the internal map. Called once the session is successfully
+ // initialized.
+ void RegisterSession(
+ const std::string& web_session_id,
+ base::WeakPtr<WebContentDecryptionModuleSessionImpl> session);
+
+ // Removes a session from the internal map.
+ void RemoveSession(const std::string& web_session_id);
+
+ // Initializes a session with the |init_data_type|, |init_data| and
+ // |session_type| provided. Takes ownership of |promise|.
+ void InitializeNewSession(const std::string& init_data_type,
+ const uint8* init_data,
+ int init_data_length,
+ media::MediaKeys::SessionType session_type,
+ scoped_ptr<media::NewSessionCdmPromise> promise);
+
+ // Updates the session specified by |web_session_id| with |response|.
+ // Takes ownership of |promise|.
+ void UpdateSession(const std::string& web_session_id,
+ const uint8* response,
+ int response_length,
+ scoped_ptr<media::SimpleCdmPromise> promise);
+
+ // Releases the session specified by |web_session_id|.
+ // Takes ownership of |promise|.
+ void ReleaseSession(const std::string& web_session_id,
+ scoped_ptr<media::SimpleCdmPromise> promise);
+
+ // Returns the Decryptor associated with this CDM. May be NULL if no
+ // Decryptor is associated with the MediaKeys object.
+ // TODO(jrummell): Figure out lifetimes, as WMPI may still use the decryptor
+ // after WebContentDecryptionModule is freed. http://crbug.com/330324
+ media::Decryptor* GetDecryptor();
+
+#if defined(ENABLE_BROWSER_CDMS)
+ // Returns the CDM ID associated with the |media_keys_|. May be kInvalidCdmId
+ // if no CDM ID is associated.
+ int GetCdmId() const;
+#endif
+
+ private:
+ friend class base::RefCounted<CdmSessionAdapter>;
+ typedef base::hash_map<std::string,
+ base::WeakPtr<WebContentDecryptionModuleSessionImpl> >
+ SessionMap;
+
+ ~CdmSessionAdapter();
+
+ // Callbacks for firing session events.
+ void OnSessionMessage(const std::string& web_session_id,
+ const std::vector<uint8>& message,
+ const GURL& destination_url);
+ void OnSessionReady(const std::string& web_session_id);
+ void OnSessionClosed(const std::string& web_session_id);
+ void OnSessionError(const std::string& web_session_id,
+ media::MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_message);
+
+ // Helper function of the callbacks.
+ WebContentDecryptionModuleSessionImpl* GetSession(
+ const std::string& web_session_id);
+
+ scoped_ptr<media::MediaKeys> media_keys_;
+
+ SessionMap sessions_;
+
+#if defined(ENABLE_BROWSER_CDMS)
+ int cdm_id_;
+#endif
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<CdmSessionAdapter> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(CdmSessionAdapter);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_CDM_SESSION_ADAPTER_H_
diff --git a/chromium/content/renderer/media/crypto/content_decryption_module_factory.cc b/chromium/content/renderer/media/crypto/content_decryption_module_factory.cc
index feafca7c4bc..e56962f6811 100644
--- a/chromium/content/renderer/media/crypto/content_decryption_module_factory.cc
+++ b/chromium/content/renderer/media/crypto/content_decryption_module_factory.cc
@@ -7,140 +7,63 @@
#include "base/logging.h"
#include "content/renderer/media/crypto/key_systems.h"
#include "media/cdm/aes_decryptor.h"
+#include "url/gurl.h"
#if defined(ENABLE_PEPPER_CDMS)
#include "content/renderer/media/crypto/ppapi_decryptor.h"
-#include "content/renderer/pepper/pepper_plugin_instance_impl.h"
-#include "content/renderer/pepper/pepper_webplugin_impl.h"
-#include "third_party/WebKit/public/platform/WebMediaPlayerClient.h"
-#include "third_party/WebKit/public/platform/WebString.h"
-#include "third_party/WebKit/public/web/WebFrame.h"
-#elif defined(OS_ANDROID)
-#include "content/renderer/media/android/proxy_media_keys.h"
-#include "content/renderer/media/android/renderer_media_player_manager.h"
+#elif defined(ENABLE_BROWSER_CDMS)
+#include "content/renderer/media/crypto/proxy_media_keys.h"
+#include "content/renderer/media/crypto/renderer_cdm_manager.h"
#endif // defined(ENABLE_PEPPER_CDMS)
namespace content {
-#if defined(ENABLE_PEPPER_CDMS)
-// Returns the PepperPluginInstanceImpl associated with the Helper Plugin.
-// If a non-NULL pointer is returned, the caller must call
-// closeHelperPluginSoon() when the Helper Plugin is no longer needed.
-static scoped_refptr<PepperPluginInstanceImpl> CreateHelperPlugin(
- const std::string& plugin_type,
- blink::WebMediaPlayerClient* web_media_player_client,
- blink::WebFrame* web_frame) {
- DCHECK(web_media_player_client);
- DCHECK(web_frame);
-
- blink::WebPlugin* web_plugin = web_media_player_client->createHelperPlugin(
- blink::WebString::fromUTF8(plugin_type), web_frame);
- if (!web_plugin)
- return NULL;
-
- DCHECK(!web_plugin->isPlaceholder()); // Prevented by Blink.
- // Only Pepper plugins are supported, so it must be a ppapi object.
- PepperWebPluginImpl* ppapi_plugin =
- static_cast<PepperWebPluginImpl*>(web_plugin);
- return ppapi_plugin->instance();
-}
-
-static scoped_ptr<media::MediaKeys> CreatePpapiDecryptor(
- const std::string& key_system,
- const media::SessionCreatedCB& session_created_cb,
- const media::SessionMessageCB& session_message_cb,
- const media::SessionReadyCB& session_ready_cb,
- const media::SessionClosedCB& session_closed_cb,
- const media::SessionErrorCB& session_error_cb,
- const base::Closure& destroy_plugin_cb,
- blink::WebMediaPlayerClient* web_media_player_client,
- blink::WebFrame* web_frame) {
- DCHECK(web_media_player_client);
- DCHECK(web_frame);
-
- std::string plugin_type = GetPepperType(key_system);
- DCHECK(!plugin_type.empty());
- const scoped_refptr<PepperPluginInstanceImpl>& plugin_instance =
- CreateHelperPlugin(plugin_type, web_media_player_client, web_frame);
- if (!plugin_instance.get()) {
- DLOG(ERROR) << "Plugin instance creation failed.";
- return scoped_ptr<media::MediaKeys>();
- }
-
- scoped_ptr<PpapiDecryptor> decryptor =
- PpapiDecryptor::Create(key_system,
- plugin_instance,
- session_created_cb,
- session_message_cb,
- session_ready_cb,
- session_closed_cb,
- session_error_cb,
- destroy_plugin_cb);
-
- if (!decryptor)
- destroy_plugin_cb.Run();
- // Else the new object will call destroy_plugin_cb to destroy Helper Plugin.
-
- return scoped_ptr<media::MediaKeys>(decryptor.Pass());
-}
-
-void ContentDecryptionModuleFactory::DestroyHelperPlugin(
- blink::WebMediaPlayerClient* web_media_player_client,
- blink::WebFrame* web_frame) {
- web_media_player_client->closeHelperPluginSoon(web_frame);
-}
-#endif // defined(ENABLE_PEPPER_CDMS)
-
scoped_ptr<media::MediaKeys> ContentDecryptionModuleFactory::Create(
const std::string& key_system,
+ const GURL& security_origin,
#if defined(ENABLE_PEPPER_CDMS)
- blink::WebMediaPlayerClient* web_media_player_client,
- blink::WebFrame* web_frame,
- const base::Closure& destroy_plugin_cb,
-#elif defined(OS_ANDROID)
- RendererMediaPlayerManager* manager,
- int media_keys_id,
- const GURL& frame_url,
+ const CreatePepperCdmCB& create_pepper_cdm_cb,
+#elif defined(ENABLE_BROWSER_CDMS)
+ RendererCdmManager* manager,
+ int* cdm_id,
#endif // defined(ENABLE_PEPPER_CDMS)
- const media::SessionCreatedCB& session_created_cb,
const media::SessionMessageCB& session_message_cb,
const media::SessionReadyCB& session_ready_cb,
const media::SessionClosedCB& session_closed_cb,
const media::SessionErrorCB& session_error_cb) {
+ // TODO(jrummell): Pass |security_origin| to all constructors.
+ // TODO(jrummell): Enable the following line once blink code updated to
+ // check the security origin before calling.
+ // DCHECK(security_origin.is_valid());
+
+#if defined(ENABLE_BROWSER_CDMS)
+ *cdm_id = RendererCdmManager::kInvalidCdmId;
+#endif
+
if (CanUseAesDecryptor(key_system)) {
return scoped_ptr<media::MediaKeys>(
- new media::AesDecryptor(session_created_cb,
- session_message_cb,
- session_ready_cb,
- session_closed_cb,
- session_error_cb));
+ new media::AesDecryptor(session_message_cb, session_closed_cb));
}
-
#if defined(ENABLE_PEPPER_CDMS)
- // TODO(ddorwin): Remove when the WD API implementation supports loading
- // Pepper-based CDMs: http://crbug.com/250049
- if (!web_media_player_client)
- return scoped_ptr<media::MediaKeys>();
-
- return CreatePpapiDecryptor(key_system,
- session_created_cb,
- session_message_cb,
- session_ready_cb,
- session_closed_cb,
- session_error_cb,
- destroy_plugin_cb,
- web_media_player_client,
- web_frame);
-#elif defined(OS_ANDROID)
- scoped_ptr<ProxyMediaKeys> proxy_media_keys(
- new ProxyMediaKeys(manager,
- media_keys_id,
- session_created_cb,
- session_message_cb,
- session_ready_cb,
- session_closed_cb,
- session_error_cb));
- proxy_media_keys->InitializeCDM(key_system, frame_url);
+ return scoped_ptr<media::MediaKeys>(
+ PpapiDecryptor::Create(key_system,
+ security_origin,
+ create_pepper_cdm_cb,
+ session_message_cb,
+ session_ready_cb,
+ session_closed_cb,
+ session_error_cb));
+#elif defined(ENABLE_BROWSER_CDMS)
+ scoped_ptr<ProxyMediaKeys> proxy_media_keys =
+ ProxyMediaKeys::Create(key_system,
+ security_origin,
+ manager,
+ session_message_cb,
+ session_ready_cb,
+ session_closed_cb,
+ session_error_cb);
+ if (proxy_media_keys)
+ *cdm_id = proxy_media_keys->GetCdmId();
return proxy_media_keys.PassAs<media::MediaKeys>();
#else
return scoped_ptr<media::MediaKeys>();
diff --git a/chromium/content/renderer/media/crypto/content_decryption_module_factory.h b/chromium/content/renderer/media/crypto/content_decryption_module_factory.h
index 87623ca0785..c18bf9d9b21 100644
--- a/chromium/content/renderer/media/crypto/content_decryption_module_factory.h
+++ b/chromium/content/renderer/media/crypto/content_decryption_module_factory.h
@@ -10,44 +10,35 @@
#include "base/memory/scoped_ptr.h"
#include "media/base/media_keys.h"
-class GURL;
-
#if defined(ENABLE_PEPPER_CDMS)
-namespace blink {
-class WebFrame;
-class WebMediaPlayerClient;
-}
-#endif // defined(ENABLE_PEPPER_CDMS)
+#include "content/renderer/media/crypto/pepper_cdm_wrapper.h"
+#endif
+
+class GURL;
namespace content {
-class RendererMediaPlayerManager;
+#if defined(ENABLE_BROWSER_CDMS)
+class RendererCdmManager;
+#endif
class ContentDecryptionModuleFactory {
public:
+ // |create_pepper_cdm_cb| will be called synchronously if necessary. The other
+ // callbacks can be called asynchronously.
static scoped_ptr<media::MediaKeys> Create(
const std::string& key_system,
+ const GURL& security_origin,
#if defined(ENABLE_PEPPER_CDMS)
- // TODO(ddorwin): We need different pointers for the WD API.
- blink::WebMediaPlayerClient* web_media_player_client,
- blink::WebFrame* web_frame,
- const base::Closure& destroy_plugin_cb,
-#elif defined(OS_ANDROID)
- RendererMediaPlayerManager* manager,
- int media_keys_id,
- const GURL& frame_url,
+ const CreatePepperCdmCB& create_pepper_cdm_cb,
+#elif defined(ENABLE_BROWSER_CDMS)
+ RendererCdmManager* manager,
+ int* cdm_id, // Output parameter indicating the CDM ID of the MediaKeys.
#endif // defined(ENABLE_PEPPER_CDMS)
- const media::SessionCreatedCB& session_created_cb,
const media::SessionMessageCB& session_message_cb,
const media::SessionReadyCB& session_ready_cb,
const media::SessionClosedCB& session_closed_cb,
const media::SessionErrorCB& session_error_cb);
-
-#if defined(ENABLE_PEPPER_CDMS)
- static void DestroyHelperPlugin(
- blink::WebMediaPlayerClient* web_media_player_client,
- blink::WebFrame* web_frame);
-#endif // defined(ENABLE_PEPPER_CDMS)
};
} // namespace content
diff --git a/chromium/content/renderer/media/crypto/key_systems.cc b/chromium/content/renderer/media/crypto/key_systems.cc
index 41025337a03..dace75167b1 100644
--- a/chromium/content/renderer/media/crypto/key_systems.cc
+++ b/chromium/content/renderer/media/crypto/key_systems.cc
@@ -4,68 +4,80 @@
#include "content/renderer/media/crypto/key_systems.h"
-#include <map>
#include <string>
+#include "base/containers/hash_tables.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/strings/string_util.h"
+#include "base/threading/thread_checker.h"
+#include "base/time/time.h"
#include "content/public/common/content_client.h"
+#include "content/public/common/eme_codec.h"
#include "content/public/renderer/content_renderer_client.h"
#include "content/public/renderer/key_system_info.h"
#include "content/renderer/media/crypto/key_systems_support_uma.h"
-#include "net/base/mime_util.h"
-#include "third_party/WebKit/public/platform/WebCString.h"
-#include "third_party/WebKit/public/platform/WebString.h"
+
+#if defined(OS_ANDROID)
+#include "media/base/android/media_codec_bridge.h"
+#endif
#include "widevine_cdm_version.h" // In SHARED_INTERMEDIATE_DIR.
namespace content {
-const char kClearKeyKeySystem[] = "webkit-org.w3.clearkey";
+const char kClearKeyKeySystem[] = "org.w3.clearkey";
+const char kPrefixedClearKeyKeySystem[] = "webkit-org.w3.clearkey";
+const char kUnsupportedClearKeyKeySystem[] = "unsupported-org.w3.clearkey";
-const char kAudioWebM[] = "audio/webm";
-const char kVideoWebM[] = "video/webm";
-const char kVorbis[] = "vorbis";
-const char kVorbisVP8[] = "vorbis,vp8,vp8.0";
+struct CodecMask {
+ const char* type;
+ EmeCodec mask;
+};
+// Mapping between container types and the masks of associated codecs.
+// Only audio codec can belong to a "audio/*" container. Both audio and video
+// codecs can belong to a "video/*" container.
+CodecMask kContainerCodecMasks[] = {
+ {"audio/webm", EME_CODEC_WEBM_AUDIO_ALL},
+ {"video/webm", EME_CODEC_WEBM_ALL},
#if defined(USE_PROPRIETARY_CODECS)
-const char kAudioMp4[] = "audio/mp4";
-const char kVideoMp4[] = "video/mp4";
-const char kMp4a[] = "mp4a";
-const char kMp4aAvc1Avc3[] = "mp4a,avc1,avc3";
+ {"audio/mp4", EME_CODEC_MP4_AUDIO_ALL},
+ {"video/mp4", EME_CODEC_MP4_ALL}
#endif // defined(USE_PROPRIETARY_CODECS)
+};
-#if !defined(GOOGLE_TV)
-inline std::string KeySystemNameForUMAInternal(
- const blink::WebString& key_system) {
- if (key_system == kClearKeyKeySystem)
- return "ClearKey";
-#if defined(WIDEVINE_CDM_AVAILABLE)
- if (key_system == kWidevineKeySystem)
- return "Widevine";
-#endif // WIDEVINE_CDM_AVAILABLE
- return "Unknown";
-}
-#else
-// Declares the function, which is defined in another file.
-std::string KeySystemNameForUMAInternal(const blink::WebString& key_system);
-#endif // !defined(GOOGLE_TV)
-
-// Convert a WebString to ASCII, falling back on an empty string in the case
-// of a non-ASCII string.
-static std::string ToASCIIOrEmpty(const blink::WebString& string) {
- return IsStringASCII(string) ? UTF16ToASCII(string) : std::string();
-}
+// Mapping between codec types and their masks.
+CodecMask kCodecMasks[] = {
+ {"vorbis", EME_CODEC_WEBM_VORBIS},
+ {"vp8", EME_CODEC_WEBM_VP8},
+ {"vp8.0", EME_CODEC_WEBM_VP8},
+ {"vp9", EME_CODEC_WEBM_VP9},
+ {"vp9.0", EME_CODEC_WEBM_VP9},
+#if defined(USE_PROPRIETARY_CODECS)
+ {"mp4a", EME_CODEC_MP4_AAC},
+ {"avc1", EME_CODEC_MP4_AVC1},
+ {"avc3", EME_CODEC_MP4_AVC1}
+#endif // defined(USE_PROPRIETARY_CODECS)
+};
static void AddClearKey(std::vector<KeySystemInfo>* concrete_key_systems) {
KeySystemInfo info(kClearKeyKeySystem);
- info.supported_types.push_back(std::make_pair(kAudioWebM, kVorbis));
- info.supported_types.push_back(std::make_pair(kVideoWebM, kVorbisVP8));
+ // On Android, Vorbis, VP8, AAC and AVC1 are supported in MediaCodec:
+ // http://developer.android.com/guide/appendix/media-formats.html
+ // VP9 support is device dependent.
+
+ info.supported_codecs = EME_CODEC_WEBM_ALL;
+
+#if defined(OS_ANDROID)
+ // Temporarily disable VP9 support for Android.
+ // TODO(xhwang): Use mime_util.h to query VP9 support on Android.
+ info.supported_codecs &= ~EME_CODEC_WEBM_VP9;
+#endif // defined(OS_ANDROID)
+
#if defined(USE_PROPRIETARY_CODECS)
- info.supported_types.push_back(std::make_pair(kAudioMp4, kMp4a));
- info.supported_types.push_back(std::make_pair(kVideoMp4, kMp4aAvc1Avc3));
+ info.supported_codecs |= EME_CODEC_MP4_ALL;
#endif // defined(USE_PROPRIETARY_CODECS)
info.use_aes_decryptor = true;
@@ -77,6 +89,8 @@ class KeySystems {
public:
static KeySystems& GetInstance();
+ void UpdateIfNeeded();
+
bool IsConcreteSupportedKeySystem(const std::string& key_system);
bool IsSupportedKeySystemWithMediaMimeType(
@@ -88,11 +102,14 @@ class KeySystems {
#if defined(ENABLE_PEPPER_CDMS)
std::string GetPepperType(const std::string& concrete_key_system);
-#elif defined(OS_ANDROID)
- std::vector<uint8> GetUUID(const std::string& concrete_key_system);
#endif
+ void AddContainerMask(const std::string& container, uint32 mask);
+ void AddCodecMask(const std::string& codec, uint32 mask);
+
private:
+ void UpdateSupportedKeySystems();
+
void AddConcreteSupportedKeySystems(
const std::vector<KeySystemInfo>& concrete_key_systems);
@@ -101,44 +118,42 @@ class KeySystems {
bool use_aes_decryptor,
#if defined(ENABLE_PEPPER_CDMS)
const std::string& pepper_type,
-#elif defined(OS_ANDROID)
- const std::vector<uint8>& uuid,
#endif
- const std::vector<KeySystemInfo::ContainerCodecsPair>& supported_types,
+ SupportedCodecs supported_codecs,
const std::string& parent_key_system);
-
friend struct base::DefaultLazyInstanceTraits<KeySystems>;
- typedef base::hash_set<std::string> CodecSet;
- typedef std::map<std::string, CodecSet> MimeTypeMap;
-
struct KeySystemProperties {
KeySystemProperties() : use_aes_decryptor(false) {}
bool use_aes_decryptor;
#if defined(ENABLE_PEPPER_CDMS)
std::string pepper_type;
-#elif defined(OS_ANDROID)
- std::vector<uint8> uuid;
#endif
- MimeTypeMap types;
+ SupportedCodecs supported_codecs;
};
- typedef std::map<std::string, KeySystemProperties> KeySystemPropertiesMap;
-
- typedef std::map<std::string, std::string> ParentKeySystemMap;
+ typedef base::hash_map<std::string, KeySystemProperties>
+ KeySystemPropertiesMap;
+ typedef base::hash_map<std::string, std::string> ParentKeySystemMap;
+ typedef base::hash_map<std::string, EmeCodec> CodecMaskMap;
KeySystems();
~KeySystems() {}
- void AddSupportedType(const std::string& mime_type,
- const std::string& codecs_list,
- KeySystemProperties* properties);
+ // Returns whether a |container| type is supported by checking
+ // |key_system_supported_codecs|.
+ // TODO(xhwang): Update this to actually check initDataType support.
+ bool IsSupportedContainer(const std::string& container,
+ SupportedCodecs key_system_supported_codecs) const;
- bool IsSupportedKeySystemWithContainerAndCodec(const std::string& mime_type,
- const std::string& codec,
- const std::string& key_system);
+ // Returns true if all |codecs| are supported in |container| by checking
+ // |key_system_supported_codecs|.
+ bool IsSupportedContainerAndCodecs(
+ const std::string& container,
+ const std::vector<std::string>& codecs,
+ SupportedCodecs key_system_supported_codecs) const;
// Map from key system string to capabilities.
KeySystemPropertiesMap concrete_key_system_map_;
@@ -149,40 +164,105 @@ class KeySystems {
KeySystemsSupportUMA key_systems_support_uma_;
+ CodecMaskMap container_codec_masks_;
+ CodecMaskMap codec_masks_;
+
+ bool needs_update_;
+ base::Time last_update_time_;
+
+ // Makes sure all methods are called from the same thread.
+ base::ThreadChecker thread_checker_;
+
DISALLOW_COPY_AND_ASSIGN(KeySystems);
};
static base::LazyInstance<KeySystems> g_key_systems = LAZY_INSTANCE_INITIALIZER;
KeySystems& KeySystems::GetInstance() {
- return g_key_systems.Get();
+ KeySystems& key_systems = g_key_systems.Get();
+ key_systems.UpdateIfNeeded();
+ return key_systems;
}
// Because we use a LazyInstance, the key systems info must be populated when
// the instance is lazily initiated.
-KeySystems::KeySystems() {
+KeySystems::KeySystems() : needs_update_(true) {
+ // Build container and codec masks for quick look up.
+ for (size_t i = 0; i < arraysize(kContainerCodecMasks); ++i) {
+ const CodecMask& container_codec_mask = kContainerCodecMasks[i];
+ DCHECK(container_codec_masks_.find(container_codec_mask.type) ==
+ container_codec_masks_.end());
+ container_codec_masks_[container_codec_mask.type] =
+ container_codec_mask.mask;
+ }
+ for (size_t i = 0; i < arraysize(kCodecMasks); ++i) {
+ const CodecMask& codec_mask = kCodecMasks[i];
+ DCHECK(codec_masks_.find(codec_mask.type) == codec_masks_.end());
+ codec_masks_[codec_mask.type] = codec_mask.mask;
+ }
+
+ UpdateSupportedKeySystems();
+
+#if defined(WIDEVINE_CDM_AVAILABLE)
+ key_systems_support_uma_.AddKeySystemToReport(kWidevineKeySystem);
+#endif // defined(WIDEVINE_CDM_AVAILABLE)
+}
+
+void KeySystems::UpdateIfNeeded() {
+#if defined(WIDEVINE_CDM_AVAILABLE) && defined(WIDEVINE_CDM_IS_COMPONENT)
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!needs_update_)
+ return;
+
+ // The update could involve a sync IPC to the browser process. Use a minimum
+ // update interval to avoid unnecessary frequent IPC to the browser.
+ static const int kMinUpdateIntervalInSeconds = 5;
+ base::Time now = base::Time::Now();
+ if (now - last_update_time_ <
+ base::TimeDelta::FromSeconds(kMinUpdateIntervalInSeconds)) {
+ return;
+ }
+
+ UpdateSupportedKeySystems();
+#endif
+}
+
+void KeySystems::UpdateSupportedKeySystems() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(needs_update_);
+ concrete_key_system_map_.clear();
+ parent_key_system_map_.clear();
+
+ // Build KeySystemInfo.
std::vector<KeySystemInfo> key_systems_info;
GetContentClient()->renderer()->AddKeySystems(&key_systems_info);
// Clear Key is always supported.
AddClearKey(&key_systems_info);
+
AddConcreteSupportedKeySystems(key_systems_info);
-#if defined(WIDEVINE_CDM_AVAILABLE)
- key_systems_support_uma_.AddKeySystemToReport(kWidevineKeySystem);
-#endif // defined(WIDEVINE_CDM_AVAILABLE)
+
+#if defined(WIDEVINE_CDM_AVAILABLE) && defined(WIDEVINE_CDM_IS_COMPONENT)
+ if (IsConcreteSupportedKeySystem(kWidevineKeySystem))
+ needs_update_ = false;
+#endif
+
+ last_update_time_ = base::Time::Now();
}
void KeySystems::AddConcreteSupportedKeySystems(
const std::vector<KeySystemInfo>& concrete_key_systems) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(concrete_key_system_map_.empty());
+ DCHECK(parent_key_system_map_.empty());
+
for (size_t i = 0; i < concrete_key_systems.size(); ++i) {
const KeySystemInfo& key_system_info = concrete_key_systems[i];
AddConcreteSupportedKeySystem(key_system_info.key_system,
key_system_info.use_aes_decryptor,
#if defined(ENABLE_PEPPER_CDMS)
key_system_info.pepper_type,
-#elif defined(OS_ANDROID)
- key_system_info.uuid,
#endif
- key_system_info.supported_types,
+ key_system_info.supported_codecs,
key_system_info.parent_key_system);
}
}
@@ -192,11 +272,10 @@ void KeySystems::AddConcreteSupportedKeySystem(
bool use_aes_decryptor,
#if defined(ENABLE_PEPPER_CDMS)
const std::string& pepper_type,
-#elif defined(OS_ANDROID)
- const std::vector<uint8>& uuid,
#endif
- const std::vector<KeySystemInfo::ContainerCodecsPair>& supported_types,
+ SupportedCodecs supported_codecs,
const std::string& parent_key_system) {
+ DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(!IsConcreteSupportedKeySystem(concrete_key_system))
<< "Key system '" << concrete_key_system << "' already registered";
DCHECK(parent_key_system_map_.find(concrete_key_system) ==
@@ -208,18 +287,9 @@ void KeySystems::AddConcreteSupportedKeySystem(
#if defined(ENABLE_PEPPER_CDMS)
DCHECK_EQ(use_aes_decryptor, pepper_type.empty());
properties.pepper_type = pepper_type;
-#elif defined(OS_ANDROID)
- DCHECK_EQ(use_aes_decryptor, uuid.empty());
- DCHECK(use_aes_decryptor || uuid.size() == 16);
- properties.uuid = uuid;
#endif
- for (size_t i = 0; i < supported_types.size(); ++i) {
- const KeySystemInfo::ContainerCodecsPair& pair = supported_types[i];
- const std::string& mime_type = pair.first;
- const std::string& codecs_list = pair.second;
- AddSupportedType(mime_type, codecs_list, &properties);
- }
+ properties.supported_codecs = supported_codecs;
concrete_key_system_map_[concrete_key_system] = properties;
@@ -233,59 +303,68 @@ void KeySystems::AddConcreteSupportedKeySystem(
}
}
-void KeySystems::AddSupportedType(const std::string& mime_type,
- const std::string& codecs_list,
- KeySystemProperties* properties) {
- std::vector<std::string> mime_type_codecs;
- net::ParseCodecString(codecs_list, &mime_type_codecs, false);
-
- CodecSet codecs(mime_type_codecs.begin(), mime_type_codecs.end());
-
- MimeTypeMap& mime_types_map = properties->types;
- // mime_types_map must not be repeated for a given key system.
- DCHECK(mime_types_map.find(mime_type) == mime_types_map.end());
- mime_types_map[mime_type] = codecs;
-}
-
bool KeySystems::IsConcreteSupportedKeySystem(const std::string& key_system) {
+ DCHECK(thread_checker_.CalledOnValidThread());
return concrete_key_system_map_.find(key_system) !=
concrete_key_system_map_.end();
}
-bool KeySystems::IsSupportedKeySystemWithContainerAndCodec(
- const std::string& mime_type,
- const std::string& codec,
- const std::string& key_system) {
- bool has_type = !mime_type.empty();
- DCHECK(has_type || codec.empty());
+bool KeySystems::IsSupportedContainer(
+ const std::string& container,
+ SupportedCodecs key_system_supported_codecs) const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!container.empty());
+
+ // When checking container support for EME, "audio/foo" should be treated the
+ // same as "video/foo". Convert the |container| to achieve this.
+ // TODO(xhwang): Replace this with real checks against supported initDataTypes
+ // combined with supported demuxers.
+ std::string canonical_container = container;
+ if (container.find("audio/") == 0)
+ canonical_container.replace(0, 6, "video/");
+
+ CodecMaskMap::const_iterator container_iter =
+ container_codec_masks_.find(canonical_container);
+ // Unrecognized container.
+ if (container_iter == container_codec_masks_.end())
+ return false;
- key_systems_support_uma_.ReportKeySystemQuery(key_system, has_type);
+ EmeCodec container_codec_mask = container_iter->second;
+ // A container is supported iif at least one codec in that container is
+ // supported.
+ return (container_codec_mask & key_system_supported_codecs) != 0;
+}
- KeySystemPropertiesMap::const_iterator key_system_iter =
- concrete_key_system_map_.find(key_system);
- if (key_system_iter == concrete_key_system_map_.end())
- return false;
+bool KeySystems::IsSupportedContainerAndCodecs(
+ const std::string& container,
+ const std::vector<std::string>& codecs,
+ SupportedCodecs key_system_supported_codecs) const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!container.empty());
+ DCHECK(!codecs.empty());
+ DCHECK(IsSupportedContainer(container, key_system_supported_codecs));
- key_systems_support_uma_.ReportKeySystemSupport(key_system, false);
+ CodecMaskMap::const_iterator container_iter =
+ container_codec_masks_.find(container);
+ EmeCodec container_codec_mask = container_iter->second;
- if (mime_type.empty())
- return true;
+ for (size_t i = 0; i < codecs.size(); ++i) {
+ const std::string& codec = codecs[i];
+ if (codec.empty())
+ continue;
+ CodecMaskMap::const_iterator codec_iter = codec_masks_.find(codec);
+ if (codec_iter == codec_masks_.end()) // Unrecognized codec.
+ return false;
- const MimeTypeMap& mime_types_map = key_system_iter->second.types;
- MimeTypeMap::const_iterator mime_iter = mime_types_map.find(mime_type);
- if (mime_iter == mime_types_map.end())
- return false;
+ EmeCodec codec_mask = codec_iter->second;
+ if (!(codec_mask & key_system_supported_codecs)) // Unsupported codec.
+ return false;
- if (codec.empty()) {
- key_systems_support_uma_.ReportKeySystemSupport(key_system, true);
- return true;
+ // Unsupported codec/container combination, e.g. "video/webm" and "avc1".
+ if (!(codec_mask & container_codec_mask))
+ return false;
}
- const CodecSet& codecs = mime_iter->second;
- if (codecs.find(codec) == codecs.end())
- return false;
-
- key_systems_support_uma_.ReportKeySystemSupport(key_system, true);
return true;
}
@@ -293,6 +372,8 @@ bool KeySystems::IsSupportedKeySystemWithMediaMimeType(
const std::string& mime_type,
const std::vector<std::string>& codecs,
const std::string& key_system) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
// If |key_system| is a parent key_system, use its concrete child.
// Otherwise, use |key_system|.
std::string concrete_key_system;
@@ -303,22 +384,42 @@ bool KeySystems::IsSupportedKeySystemWithMediaMimeType(
else
concrete_key_system = key_system;
- if (codecs.empty()) {
- return IsSupportedKeySystemWithContainerAndCodec(
- mime_type, std::string(), concrete_key_system);
+ bool has_type = !mime_type.empty();
+
+ key_systems_support_uma_.ReportKeySystemQuery(key_system, has_type);
+
+ // Check key system support.
+ KeySystemPropertiesMap::const_iterator key_system_iter =
+ concrete_key_system_map_.find(concrete_key_system);
+ if (key_system_iter == concrete_key_system_map_.end())
+ return false;
+
+ key_systems_support_uma_.ReportKeySystemSupport(key_system, false);
+
+ if (!has_type) {
+ DCHECK(codecs.empty());
+ return true;
}
- for (size_t i = 0; i < codecs.size(); ++i) {
- if (!IsSupportedKeySystemWithContainerAndCodec(
- mime_type, codecs[i], concrete_key_system)) {
- return false;
- }
+ SupportedCodecs key_system_supported_codecs =
+ key_system_iter->second.supported_codecs;
+
+ if (!IsSupportedContainer(mime_type, key_system_supported_codecs))
+ return false;
+
+ if (!codecs.empty() &&
+ !IsSupportedContainerAndCodecs(
+ mime_type, codecs, key_system_supported_codecs)) {
+ return false;
}
+ key_systems_support_uma_.ReportKeySystemSupport(key_system, true);
return true;
}
bool KeySystems::UseAesDecryptor(const std::string& concrete_key_system) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
KeySystemPropertiesMap::iterator key_system_iter =
concrete_key_system_map_.find(concrete_key_system);
if (key_system_iter == concrete_key_system_map_.end()) {
@@ -331,6 +432,8 @@ bool KeySystems::UseAesDecryptor(const std::string& concrete_key_system) {
#if defined(ENABLE_PEPPER_CDMS)
std::string KeySystems::GetPepperType(const std::string& concrete_key_system) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
KeySystemPropertiesMap::iterator key_system_iter =
concrete_key_system_map_.find(concrete_key_system);
if (key_system_iter == concrete_key_system_map_.end()) {
@@ -342,24 +445,46 @@ std::string KeySystems::GetPepperType(const std::string& concrete_key_system) {
DLOG_IF(FATAL, type.empty()) << concrete_key_system << " is not Pepper-based";
return type;
}
-#elif defined(OS_ANDROID)
-std::vector<uint8> KeySystems::GetUUID(const std::string& concrete_key_system) {
- KeySystemPropertiesMap::iterator key_system_iter =
- concrete_key_system_map_.find(concrete_key_system);
- if (key_system_iter == concrete_key_system_map_.end()) {
- DLOG(FATAL) << concrete_key_system << " is not a known concrete system";
- return std::vector<uint8>();
- }
+#endif
+
+void KeySystems::AddContainerMask(const std::string& container, uint32 mask) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(container_codec_masks_.find(container) ==
+ container_codec_masks_.end());
- return key_system_iter->second.uuid;
+ container_codec_masks_[container] = static_cast<EmeCodec>(mask);
+}
+
+void KeySystems::AddCodecMask(const std::string& codec, uint32 mask) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(codec_masks_.find(codec) == codec_masks_.end());
+
+ codec_masks_[codec] = static_cast<EmeCodec>(mask);
}
-#endif
//------------------------------------------------------------------------------
-bool IsConcreteSupportedKeySystem(const blink::WebString& key_system) {
- return KeySystems::GetInstance().IsConcreteSupportedKeySystem(
- ToASCIIOrEmpty(key_system));
+std::string GetUnprefixedKeySystemName(const std::string& key_system) {
+ if (key_system == kClearKeyKeySystem)
+ return kUnsupportedClearKeyKeySystem;
+
+ if (key_system == kPrefixedClearKeyKeySystem)
+ return kClearKeyKeySystem;
+
+ return key_system;
+}
+
+std::string GetPrefixedKeySystemName(const std::string& key_system) {
+ DCHECK_NE(key_system, kPrefixedClearKeyKeySystem);
+
+ if (key_system == kClearKeyKeySystem)
+ return kPrefixedClearKeyKeySystem;
+
+ return key_system;
+}
+
+bool IsConcreteSupportedKeySystem(const std::string& key_system) {
+ return KeySystems::GetInstance().IsConcreteSupportedKeySystem(key_system);
}
bool IsSupportedKeySystemWithMediaMimeType(
@@ -370,12 +495,14 @@ bool IsSupportedKeySystemWithMediaMimeType(
mime_type, codecs, key_system);
}
-std::string KeySystemNameForUMA(const blink::WebString& key_system) {
- return KeySystemNameForUMAInternal(key_system);
-}
-
std::string KeySystemNameForUMA(const std::string& key_system) {
- return KeySystemNameForUMAInternal(blink::WebString::fromUTF8(key_system));
+ if (key_system == kClearKeyKeySystem)
+ return "ClearKey";
+#if defined(WIDEVINE_CDM_AVAILABLE)
+ if (key_system == kWidevineKeySystem)
+ return "Widevine";
+#endif // WIDEVINE_CDM_AVAILABLE
+ return "Unknown";
}
bool CanUseAesDecryptor(const std::string& concrete_key_system) {
@@ -386,10 +513,21 @@ bool CanUseAesDecryptor(const std::string& concrete_key_system) {
std::string GetPepperType(const std::string& concrete_key_system) {
return KeySystems::GetInstance().GetPepperType(concrete_key_system);
}
-#elif defined(OS_ANDROID)
-std::vector<uint8> GetUUID(const std::string& concrete_key_system) {
- return KeySystems::GetInstance().GetUUID(concrete_key_system);
-}
#endif
+// These two functions are for testing purpose only. The declaration in the
+// header file is guarded by "#if defined(UNIT_TEST)" so that they can be used
+// by tests but not non-test code. However, this .cc file is compiled as part of
+// "content" where "UNIT_TEST" is not defined. So we need to specify
+// "CONTENT_EXPORT" here again so that they are visible to tests.
+
+CONTENT_EXPORT void AddContainerMask(const std::string& container,
+ uint32 mask) {
+ KeySystems::GetInstance().AddContainerMask(container, mask);
+}
+
+CONTENT_EXPORT void AddCodecMask(const std::string& codec, uint32 mask) {
+ KeySystems::GetInstance().AddCodecMask(codec, mask);
+}
+
} // namespace content
diff --git a/chromium/content/renderer/media/crypto/key_systems.h b/chromium/content/renderer/media/crypto/key_systems.h
index 2f4cb101b09..99825d1997a 100644
--- a/chromium/content/renderer/media/crypto/key_systems.h
+++ b/chromium/content/renderer/media/crypto/key_systems.h
@@ -11,20 +11,26 @@
#include "base/memory/scoped_ptr.h"
#include "content/common/content_export.h"
-namespace blink {
-class WebString;
-}
-
namespace content {
+// Prefixed EME API only supports prefixed (webkit-) key system name for
+// certain key systems. But internally only unprefixed key systems are
+// supported. The following two functions help convert between prefixed and
+// unprefixed key system names.
+
+// Gets the unprefixed key system name for |key_system|.
+std::string GetUnprefixedKeySystemName(const std::string& key_system);
+
+// Gets the prefixed key system name for |key_system|.
+std::string GetPrefixedKeySystemName(const std::string& key_system);
+
// Returns whether |key_system| is a real supported key system that can be
// instantiated.
// Abstract parent |key_system| strings will return false.
// Call IsSupportedKeySystemWithMediaMimeType() to determine whether a
// |key_system| supports a specific type of media or to check parent key
// systems.
-CONTENT_EXPORT bool IsConcreteSupportedKeySystem(
- const blink::WebString& key_system);
+CONTENT_EXPORT bool IsConcreteSupportedKeySystem(const std::string& key_system);
// Returns whether |key_sytem| supports the specified media type and codec(s).
CONTENT_EXPORT bool IsSupportedKeySystemWithMediaMimeType(
@@ -33,8 +39,6 @@ CONTENT_EXPORT bool IsSupportedKeySystemWithMediaMimeType(
const std::string& key_system);
// Returns a name for |key_system| suitable to UMA logging.
-CONTENT_EXPORT std::string KeySystemNameForUMA(
- const blink::WebString& key_system);
CONTENT_EXPORT std::string KeySystemNameForUMA(const std::string& key_system);
// Returns whether AesDecryptor can be used for the given |concrete_key_system|.
@@ -45,12 +49,14 @@ CONTENT_EXPORT bool CanUseAesDecryptor(const std::string& concrete_key_system);
// Returns empty string if |concrete_key_system| is unknown or not Pepper-based.
CONTENT_EXPORT std::string GetPepperType(
const std::string& concrete_key_system);
-#elif defined(OS_ANDROID)
-// Convert |concrete_key_system| to 16-byte Android UUID.
-CONTENT_EXPORT std::vector<uint8> GetUUID(
- const std::string& concrete_key_system);
#endif
+#if defined(UNIT_TEST)
+// Helper functions to add container/codec types for testing purposes.
+CONTENT_EXPORT void AddContainerMask(const std::string& container, uint32 mask);
+CONTENT_EXPORT void AddCodecMask(const std::string& codec, uint32 mask);
+#endif // defined(UNIT_TEST)
+
} // namespace content
#endif // CONTENT_RENDERER_MEDIA_CRYPTO_KEY_SYSTEMS_H_
diff --git a/chromium/content/renderer/media/crypto/key_systems_unittest.cc b/chromium/content/renderer/media/crypto/key_systems_unittest.cc
index 89fa51ab19a..e52d361322a 100644
--- a/chromium/content/renderer/media/crypto/key_systems_unittest.cc
+++ b/chromium/content/renderer/media/crypto/key_systems_unittest.cc
@@ -33,39 +33,61 @@
#endif // defined(NDEBUG)
#endif // defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+namespace content {
+
using blink::WebString;
// These are the (fake) key systems that are registered for these tests.
// kUsesAes uses the AesDecryptor like Clear Key.
// kExternal uses an external CDM, such as Pepper-based or Android platform CDM.
-static const char kUsesAes[] = "org.example.clear";
-static const char kUsesAesParent[] = "org.example"; // Not registered.
-static const char kExternal[] = "com.example.test";
-static const char kExternalParent[] = "com.example";
-
-static const char kPrefixedClearKey[] = "webkit-org.w3.clearkey";
-static const char kUnprefixedClearKey[] = "org.w3.clearkey";
-static const char kExternalClearKey[] = "org.chromium.externalclearkey";
-
-static const char kAudioWebM[] = "audio/webm";
-static const char kVideoWebM[] = "video/webm";
-static const char kWebMAudioCodecs[] = "vorbis";
-static const char kWebMVideoCodecs[] = "vorbis,vp8,vp8.0";
-
-static const char kAudioFoo[] = "audio/foo";
-static const char kVideoFoo[] = "video/foo";
-static const char kFooAudioCodecs[] = "fooaudio";
-static const char kFooVideoCodecs[] = "fooaudio,foovideo";
-
-namespace content {
-
-// Helper functions that handle the WebString conversion to simplify tests.
-static std::string KeySystemNameForUMAUTF8(const std::string& key_system) {
- return KeySystemNameForUMA(WebString::fromUTF8(key_system));
-}
+const char kUsesAes[] = "org.example.clear";
+const char kUsesAesParent[] = "org.example"; // Not registered.
+const char kExternal[] = "com.example.test";
+const char kExternalParent[] = "com.example";
+
+const char kClearKey[] = "org.w3.clearkey";
+const char kPrefixedClearKey[] = "webkit-org.w3.clearkey";
+const char kExternalClearKey[] = "org.chromium.externalclearkey";
+
+const char kAudioWebM[] = "audio/webm";
+const char kVideoWebM[] = "video/webm";
+const char kAudioFoo[] = "audio/foo";
+const char kVideoFoo[] = "video/foo";
+
+// Pick some arbitrary bit fields as long as they are not in conflict with the
+// real ones.
+enum TestCodec {
+ TEST_CODEC_FOO_AUDIO = 1 << 10, // An audio codec for foo container.
+ TEST_CODEC_FOO_AUDIO_ALL = TEST_CODEC_FOO_AUDIO,
+ TEST_CODEC_FOO_VIDEO = 1 << 11, // A video codec for foo container.
+ TEST_CODEC_FOO_VIDEO_ALL = TEST_CODEC_FOO_VIDEO,
+ TEST_CODEC_FOO_ALL = TEST_CODEC_FOO_AUDIO_ALL | TEST_CODEC_FOO_VIDEO_ALL
+};
-static bool IsConcreteSupportedKeySystemUTF8(const std::string& key_system) {
- return IsConcreteSupportedKeySystem(WebString::fromUTF8(key_system));
+COMPILE_ASSERT((TEST_CODEC_FOO_ALL & EME_CODEC_ALL) == EME_CODEC_NONE,
+ test_codec_masks_should_only_use_invalid_codec_masks);
+
+// Adds test container and codec masks.
+// This function must be called after SetContentClient() is called.
+// More details: AddXxxMask() will create KeySystems if it hasn't been created.
+// During KeySystems's construction GetContentClient() will be used to add key
+// systems. In test code, the content client is set by SetContentClient().
+// Therefore, SetContentClient() must be called before this function to avoid
+// access violation.
+static void AddContainerAndCodecMasksForTest() {
+ // Since KeySystems is a singleton. Make sure we only add test container and
+ // codec masks once per process.
+ static bool is_test_masks_added = false;
+
+ if (is_test_masks_added)
+ return;
+
+ AddContainerMask("audio/foo", TEST_CODEC_FOO_AUDIO_ALL);
+ AddContainerMask("video/foo", TEST_CODEC_FOO_ALL);
+ AddCodecMask("fooaudio", TEST_CODEC_FOO_AUDIO);
+ AddCodecMask("foovideo", TEST_CODEC_FOO_VIDEO);
+
+ is_test_masks_added = true;
}
class TestContentRendererClient : public ContentRendererClient {
@@ -75,40 +97,19 @@ class TestContentRendererClient : public ContentRendererClient {
void TestContentRendererClient::AddKeySystems(
std::vector<content::KeySystemInfo>* key_systems) {
-#if defined(OS_ANDROID)
- static const uint8 kExternalUuid[16] = {
- 0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF,
- 0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF };
-#endif
-
KeySystemInfo aes(kUsesAes);
-
- aes.supported_types.push_back(std::make_pair(kAudioWebM, kWebMAudioCodecs));
- aes.supported_types.push_back(std::make_pair(kVideoWebM, kWebMVideoCodecs));
-
- aes.supported_types.push_back(std::make_pair(kAudioFoo, kFooAudioCodecs));
- aes.supported_types.push_back(std::make_pair(kVideoFoo, kFooVideoCodecs));
-
+ aes.supported_codecs = EME_CODEC_WEBM_ALL;
+ aes.supported_codecs |= TEST_CODEC_FOO_ALL;
aes.use_aes_decryptor = true;
-
key_systems->push_back(aes);
KeySystemInfo ext(kExternal);
-
- ext.supported_types.push_back(std::make_pair(kAudioWebM, kWebMAudioCodecs));
- ext.supported_types.push_back(std::make_pair(kVideoWebM, kWebMVideoCodecs));
-
- ext.supported_types.push_back(std::make_pair(kAudioFoo, kFooAudioCodecs));
- ext.supported_types.push_back(std::make_pair(kVideoFoo, kFooVideoCodecs));
-
+ ext.supported_codecs = EME_CODEC_WEBM_ALL;
+ ext.supported_codecs |= TEST_CODEC_FOO_ALL;
ext.parent_key_system = kExternalParent;
-
#if defined(ENABLE_PEPPER_CDMS)
ext.pepper_type = "application/x-ppapi-external-cdm";
-#elif defined(OS_ANDROID)
- ext.uuid.assign(kExternalUuid, kExternalUuid + arraysize(kExternalUuid));
#endif // defined(ENABLE_PEPPER_CDMS)
-
key_systems->push_back(ext);
}
@@ -119,11 +120,18 @@ class KeySystemsTest : public testing::Test {
vp80_codec_.push_back("vp8.0");
+ vp9_codec_.push_back("vp9");
+
+ vp90_codec_.push_back("vp9.0");
+
vorbis_codec_.push_back("vorbis");
vp8_and_vorbis_codecs_.push_back("vp8");
vp8_and_vorbis_codecs_.push_back("vorbis");
+ vp9_and_vorbis_codecs_.push_back("vp9");
+ vp9_and_vorbis_codecs_.push_back("vorbis");
+
foovideo_codec_.push_back("foovideo");
foovideo_extended_codec_.push_back("foovideo.4D400C");
@@ -151,6 +159,10 @@ class KeySystemsTest : public testing::Test {
SetRendererClientForTesting(&content_renderer_client_);
}
+ virtual void SetUp() OVERRIDE {
+ AddContainerAndCodecMasksForTest();
+ }
+
virtual ~KeySystemsTest() {
// Clear the use of content_client_, which was set in SetUp().
SetContentClient(NULL);
@@ -162,10 +174,17 @@ class KeySystemsTest : public testing::Test {
const CodecVector& vp8_codec() const { return vp8_codec_; }
const CodecVector& vp80_codec() const { return vp80_codec_; }
+ const CodecVector& vp9_codec() const { return vp9_codec_; }
+ const CodecVector& vp90_codec() const { return vp90_codec_; }
+
const CodecVector& vorbis_codec() const { return vorbis_codec_; }
+
const CodecVector& vp8_and_vorbis_codecs() const {
return vp8_and_vorbis_codecs_;
}
+ const CodecVector& vp9_and_vorbis_codecs() const {
+ return vp9_and_vorbis_codecs_;
+ }
const CodecVector& foovideo_codec() const { return foovideo_codec_; }
const CodecVector& foovideo_extended_codec() const {
@@ -183,11 +202,13 @@ class KeySystemsTest : public testing::Test {
private:
const CodecVector no_codecs_;
-
CodecVector vp8_codec_;
CodecVector vp80_codec_;
+ CodecVector vp9_codec_;
+ CodecVector vp90_codec_;
CodecVector vorbis_codec_;
CodecVector vp8_and_vorbis_codecs_;
+ CodecVector vp9_and_vorbis_codecs_;
CodecVector foovideo_codec_;
CodecVector foovideo_extended_codec_;
@@ -203,33 +224,39 @@ class KeySystemsTest : public testing::Test {
TestContentRendererClient content_renderer_client_;
};
-// TODO(ddorwin): Consider moving GetUUID() into these tests or moving
-// GetPepperType() calls out to their own test.
+// TODO(ddorwin): Consider moving GetPepperType() calls out to their own test.
+
+TEST_F(KeySystemsTest, EmptyKeySystem) {
+ EXPECT_FALSE(IsConcreteSupportedKeySystem(std::string()));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, no_codecs(), std::string()));
+ EXPECT_EQ("Unknown", KeySystemNameForUMA(std::string()));
+}
// Clear Key is the only key system registered in content.
TEST_F(KeySystemsTest, ClearKey) {
- EXPECT_TRUE(IsConcreteSupportedKeySystemUTF8(kPrefixedClearKey));
+ EXPECT_TRUE(IsConcreteSupportedKeySystem(kClearKey));
EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
- kVideoWebM, no_codecs(), kPrefixedClearKey));
+ kVideoWebM, no_codecs(), kClearKey));
- EXPECT_EQ("ClearKey", KeySystemNameForUMAUTF8(kPrefixedClearKey));
+ EXPECT_EQ("ClearKey", KeySystemNameForUMA(kClearKey));
- // Not yet out from behind the vendor prefix.
- EXPECT_FALSE(IsConcreteSupportedKeySystem(kUnprefixedClearKey));
+ // Prefixed Clear Key is not supported internally.
+ EXPECT_FALSE(IsConcreteSupportedKeySystem(kPrefixedClearKey));
EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
- kVideoWebM, no_codecs(), kUnprefixedClearKey));
- EXPECT_EQ("Unknown", KeySystemNameForUMAUTF8(kUnprefixedClearKey));
+ kVideoWebM, no_codecs(), kPrefixedClearKey));
+ EXPECT_EQ("Unknown", KeySystemNameForUMA(kPrefixedClearKey));
}
// The key system is not registered and therefore is unrecognized.
TEST_F(KeySystemsTest, Basic_UnrecognizedKeySystem) {
static const char* const kUnrecognized = "org.example.unrecognized";
- EXPECT_FALSE(IsConcreteSupportedKeySystemUTF8(kUnrecognized));
+ EXPECT_FALSE(IsConcreteSupportedKeySystem(kUnrecognized));
EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
kVideoWebM, no_codecs(), kUnrecognized));
- EXPECT_EQ("Unknown", KeySystemNameForUMAUTF8(kUnrecognized));
+ EXPECT_EQ("Unknown", KeySystemNameForUMA(kUnrecognized));
bool can_use = false;
EXPECT_DEBUG_DEATH_PORTABLE(
@@ -246,12 +273,12 @@ TEST_F(KeySystemsTest, Basic_UnrecognizedKeySystem) {
}
TEST_F(KeySystemsTest, Basic_UsesAesDecryptor) {
- EXPECT_TRUE(IsConcreteSupportedKeySystemUTF8(kUsesAes));
+ EXPECT_TRUE(IsConcreteSupportedKeySystem(kUsesAes));
EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
kVideoWebM, no_codecs(), kUsesAes));
// No UMA value for this test key system.
- EXPECT_EQ("Unknown", KeySystemNameForUMAUTF8(kUsesAes));
+ EXPECT_EQ("Unknown", KeySystemNameForUMA(kUsesAes));
EXPECT_TRUE(CanUseAesDecryptor(kUsesAes));
#if defined(ENABLE_PEPPER_CDMS)
@@ -272,6 +299,12 @@ TEST_F(KeySystemsTest,
EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
kVideoWebM, vp8_and_vorbis_codecs(), kUsesAes));
EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp9_codec(), kUsesAes));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp90_codec(), kUsesAes));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp9_and_vorbis_codecs(), kUsesAes));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
kVideoWebM, vorbis_codec(), kUsesAes));
// Non-Webm codecs.
@@ -293,6 +326,10 @@ TEST_F(KeySystemsTest,
kAudioWebM, vp8_codec(), kUsesAes));
EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
kAudioWebM, vp8_and_vorbis_codecs(), kUsesAes));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kAudioWebM, vp9_codec(), kUsesAes));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kAudioWebM, vp9_and_vorbis_codecs(), kUsesAes));
// Non-Webm codec.
EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
@@ -301,12 +338,12 @@ TEST_F(KeySystemsTest,
// No parent is registered for UsesAes.
TEST_F(KeySystemsTest, Parent_NoParentRegistered) {
- EXPECT_FALSE(IsConcreteSupportedKeySystemUTF8(kUsesAesParent));
+ EXPECT_FALSE(IsConcreteSupportedKeySystem(kUsesAesParent));
EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
kVideoWebM, no_codecs(), kUsesAesParent));
// The parent is not supported for most things.
- EXPECT_EQ("Unknown", KeySystemNameForUMAUTF8(kUsesAesParent));
+ EXPECT_EQ("Unknown", KeySystemNameForUMA(kUsesAesParent));
bool result = false;
EXPECT_DEBUG_DEATH_PORTABLE(result = CanUseAesDecryptor(kUsesAesParent),
"org.example is not a known concrete system");
@@ -321,7 +358,7 @@ TEST_F(KeySystemsTest, Parent_NoParentRegistered) {
TEST_F(KeySystemsTest, IsSupportedKeySystem_InvalidVariants) {
// Case sensitive.
- EXPECT_FALSE(IsConcreteSupportedKeySystemUTF8("org.example.ClEaR"));
+ EXPECT_FALSE(IsConcreteSupportedKeySystem("org.example.ClEaR"));
EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
kVideoWebM, no_codecs(), "org.example.ClEaR"));
@@ -422,7 +459,7 @@ TEST_F(KeySystemsTest,
//
TEST_F(KeySystemsTest, Basic_ExternalDecryptor) {
- EXPECT_TRUE(IsConcreteSupportedKeySystemUTF8(kExternal));
+ EXPECT_TRUE(IsConcreteSupportedKeySystem(kExternal));
EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
kVideoWebM, no_codecs(), kExternal));
@@ -435,12 +472,12 @@ TEST_F(KeySystemsTest, Basic_ExternalDecryptor) {
TEST_F(KeySystemsTest, Parent_ParentRegistered) {
// The parent system is not a concrete system but is supported.
- EXPECT_FALSE(IsConcreteSupportedKeySystemUTF8(kExternalParent));
+ EXPECT_FALSE(IsConcreteSupportedKeySystem(kExternalParent));
EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
kVideoWebM, no_codecs(), kExternalParent));
// The parent is not supported for most things.
- EXPECT_EQ("Unknown", KeySystemNameForUMAUTF8(kExternalParent));
+ EXPECT_EQ("Unknown", KeySystemNameForUMA(kExternalParent));
bool result = false;
EXPECT_DEBUG_DEATH_PORTABLE(result = CanUseAesDecryptor(kExternalParent),
"com.example is not a known concrete system");
@@ -466,6 +503,12 @@ TEST_F(
EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
kVideoWebM, vp8_and_vorbis_codecs(), kExternal));
EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp9_codec(), kExternal));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp90_codec(), kExternal));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp9_and_vorbis_codecs(), kExternal));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
kVideoWebM, vorbis_codec(), kExternal));
// Valid video types - parent key system.
@@ -478,6 +521,12 @@ TEST_F(
EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
kVideoWebM, vp8_and_vorbis_codecs(), kExternalParent));
EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp9_codec(), kExternalParent));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp90_codec(), kExternalParent));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
+ kVideoWebM, vp9_and_vorbis_codecs(), kExternalParent));
+ EXPECT_TRUE(IsSupportedKeySystemWithMediaMimeType(
kVideoWebM, vorbis_codec(), kExternalParent));
// Non-Webm codecs.
@@ -505,6 +554,10 @@ TEST_F(
kAudioWebM, vp8_codec(), kExternal));
EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
kAudioWebM, vp8_and_vorbis_codecs(), kExternal));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kAudioWebM, vp9_codec(), kExternal));
+ EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
+ kAudioWebM, vp9_and_vorbis_codecs(), kExternal));
// Non-Webm codec.
EXPECT_FALSE(IsSupportedKeySystemWithMediaMimeType(
@@ -574,43 +627,20 @@ TEST_F(
kAudioFoo, vorbis_codec(), kExternal));
}
-#if defined(OS_ANDROID)
-TEST_F(KeySystemsTest, GetUUID_RegisteredExternalDecryptor) {
- std::vector<uint8> uuid = GetUUID(kExternal);
- EXPECT_EQ(16u, uuid.size());
- EXPECT_EQ(0xef, uuid[15]);
-}
-
-TEST_F(KeySystemsTest, GetUUID_RegisteredAesDecryptor) {
- EXPECT_TRUE(GetUUID(kUsesAes).empty());
-}
-
-TEST_F(KeySystemsTest, GetUUID_Unrecognized) {
- std::vector<uint8> uuid;
- EXPECT_DEBUG_DEATH_PORTABLE(uuid = GetUUID(kExternalParent),
- "com.example is not a known concrete system");
- EXPECT_TRUE(uuid.empty());
-
- EXPECT_DEBUG_DEATH_PORTABLE(uuid = GetUUID(""), " is not a concrete system");
- EXPECT_TRUE(uuid.empty());
-}
-#endif // defined(OS_ANDROID)
-
TEST_F(KeySystemsTest, KeySystemNameForUMA) {
- EXPECT_EQ("ClearKey", KeySystemNameForUMAUTF8(kPrefixedClearKey));
- // Unprefixed is not yet supported.
- EXPECT_EQ("Unknown", KeySystemNameForUMAUTF8(kUnprefixedClearKey));
+ EXPECT_EQ("ClearKey", KeySystemNameForUMA(kClearKey));
+ // Prefixed is not supported internally.
+ EXPECT_EQ("Unknown", KeySystemNameForUMA(kPrefixedClearKey));
// External Clear Key never has a UMA name.
- EXPECT_EQ("Unknown", KeySystemNameForUMAUTF8(kExternalClearKey));
+ EXPECT_EQ("Unknown", KeySystemNameForUMA(kExternalClearKey));
#if defined(WIDEVINE_CDM_AVAILABLE)
const char* const kTestWidevineUmaName = "Widevine";
#else
const char* const kTestWidevineUmaName = "Unknown";
#endif
- EXPECT_EQ(kTestWidevineUmaName,
- KeySystemNameForUMAUTF8("com.widevine.alpha"));
+ EXPECT_EQ(kTestWidevineUmaName, KeySystemNameForUMA("com.widevine.alpha"));
}
} // namespace content
diff --git a/chromium/content/renderer/media/crypto/pepper_cdm_wrapper.h b/chromium/content/renderer/media/crypto/pepper_cdm_wrapper.h
new file mode 100644
index 00000000000..13ec9e989df
--- /dev/null
+++ b/chromium/content/renderer/media/crypto/pepper_cdm_wrapper.h
@@ -0,0 +1,44 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_CRYPTO_PEPPER_CDM_WRAPPER_H_
+#define CONTENT_RENDERER_MEDIA_CRYPTO_PEPPER_CDM_WRAPPER_H_
+
+#if !defined(ENABLE_PEPPER_CDMS)
+#error This file should only be included when ENABLE_PEPPER_CDMS is defined
+#endif
+
+#include <string>
+
+#include "base/callback.h"
+
+class GURL;
+
+namespace content {
+class ContentDecryptorDelegate;
+
+// PepperCdmWrapper provides access to the Pepper CDM instance.
+class PepperCdmWrapper {
+ public:
+ virtual ~PepperCdmWrapper() {}
+
+ // Returns the ContentDecryptorDelegate* associated with this plugin.
+ virtual ContentDecryptorDelegate* GetCdmDelegate() = 0;
+
+ protected:
+ PepperCdmWrapper() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PepperCdmWrapper);
+};
+
+// Callback used to create a PepperCdmWrapper. This may return null if the
+// Pepper CDM can not be created.
+typedef base::Callback<scoped_ptr<PepperCdmWrapper>(
+ const std::string& pluginType,
+ const GURL& security_origin)> CreatePepperCdmCB;
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_CRYPTO_PEPPER_CDM_WRAPPER_H_
diff --git a/chromium/content/renderer/media/crypto/pepper_cdm_wrapper_impl.cc b/chromium/content/renderer/media/crypto/pepper_cdm_wrapper_impl.cc
new file mode 100644
index 00000000000..c443e031fc6
--- /dev/null
+++ b/chromium/content/renderer/media/crypto/pepper_cdm_wrapper_impl.cc
@@ -0,0 +1,79 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if defined(ENABLE_PEPPER_CDMS)
+#include "content/renderer/media/crypto/pepper_cdm_wrapper_impl.h"
+
+#include "content/renderer/pepper/pepper_plugin_instance_impl.h"
+#include "content/renderer/pepper/pepper_webplugin_impl.h"
+#include "third_party/WebKit/public/platform/WebString.h"
+#include "third_party/WebKit/public/web/WebDocument.h"
+#include "third_party/WebKit/public/web/WebElement.h"
+#include "third_party/WebKit/public/web/WebFrame.h"
+#include "third_party/WebKit/public/web/WebHelperPlugin.h"
+#include "third_party/WebKit/public/web/WebPlugin.h"
+#include "third_party/WebKit/public/web/WebPluginContainer.h"
+#include "third_party/WebKit/public/web/WebView.h"
+
+namespace content {
+
+void WebHelperPluginDeleter::operator()(blink::WebHelperPlugin* plugin) const {
+ plugin->destroy();
+}
+
+scoped_ptr<PepperCdmWrapper> PepperCdmWrapperImpl::Create(
+ blink::WebLocalFrame* frame,
+ const std::string& pluginType,
+ const GURL& security_origin) {
+ DCHECK(frame);
+ ScopedHelperPlugin helper_plugin(blink::WebHelperPlugin::create(
+ blink::WebString::fromUTF8(pluginType), frame));
+ if (!helper_plugin)
+ return scoped_ptr<PepperCdmWrapper>();
+
+ blink::WebPlugin* plugin = helper_plugin->getPlugin();
+ DCHECK(!plugin->isPlaceholder()); // Prevented by Blink.
+
+ // Only Pepper plugins are supported, so it must ultimately be a ppapi object.
+ PepperWebPluginImpl* ppapi_plugin = static_cast<PepperWebPluginImpl*>(plugin);
+ scoped_refptr<PepperPluginInstanceImpl> plugin_instance =
+ ppapi_plugin->instance();
+ if (!plugin_instance)
+ return scoped_ptr<PepperCdmWrapper>();
+
+ GURL url(plugin_instance->container()->element().document().url());
+ CHECK_EQ(security_origin.GetOrigin(), url.GetOrigin())
+ << "Pepper instance has a different origin than the EME call.";
+
+ if (!plugin_instance->GetContentDecryptorDelegate())
+ return scoped_ptr<PepperCdmWrapper>();
+
+ return scoped_ptr<PepperCdmWrapper>(
+ new PepperCdmWrapperImpl(helper_plugin.Pass(), plugin_instance));
+}
+
+PepperCdmWrapperImpl::PepperCdmWrapperImpl(
+ ScopedHelperPlugin helper_plugin,
+ const scoped_refptr<PepperPluginInstanceImpl>& plugin_instance)
+ : helper_plugin_(helper_plugin.Pass()),
+ plugin_instance_(plugin_instance) {
+ DCHECK(helper_plugin_);
+ DCHECK(plugin_instance_);
+ // Plugin must be a CDM.
+ DCHECK(plugin_instance_->GetContentDecryptorDelegate());
+}
+
+PepperCdmWrapperImpl::~PepperCdmWrapperImpl() {
+ // Destroy the nested objects in reverse order.
+ plugin_instance_ = NULL;
+ helper_plugin_.reset();
+}
+
+ContentDecryptorDelegate* PepperCdmWrapperImpl::GetCdmDelegate() {
+ return plugin_instance_->GetContentDecryptorDelegate();
+}
+
+} // namespace content
+
+#endif // defined(ENABLE_PEPPER_CDMS)
diff --git a/chromium/content/renderer/media/crypto/pepper_cdm_wrapper_impl.h b/chromium/content/renderer/media/crypto/pepper_cdm_wrapper_impl.h
new file mode 100644
index 00000000000..7f54f0c74dc
--- /dev/null
+++ b/chromium/content/renderer/media/crypto/pepper_cdm_wrapper_impl.h
@@ -0,0 +1,69 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_CRYPTO_PEPPER_CDM_WRAPPER_IMPL_H_
+#define CONTENT_RENDERER_MEDIA_CRYPTO_PEPPER_CDM_WRAPPER_IMPL_H_
+
+#if !defined(ENABLE_PEPPER_CDMS)
+#error This file should only be included when ENABLE_PEPPER_CDMS is defined
+#endif
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "content/renderer/media/crypto/pepper_cdm_wrapper.h"
+
+namespace blink {
+class WebHelperPlugin;
+class WebLocalFrame;
+}
+
+namespace content {
+
+class ContentDecryptorDelegate;
+class PepperPluginInstanceImpl;
+
+// Deleter for blink::WebHelperPlugin.
+struct WebHelperPluginDeleter {
+ void operator()(blink::WebHelperPlugin* plugin) const;
+};
+
+// Implements a wrapper on blink::WebHelperPlugin so that the plugin gets
+// destroyed properly. It owns all the objects derived from WebHelperPlugin
+// (WebPlugin, PepperPluginInstanceImpl, ContentDecryptionDelegate), and will
+// free them as necessary when this wrapper is destroyed. In particular, it
+// takes a reference to PepperPluginInstanceImpl so it won't go away until
+// this object is destroyed.
+//
+// Implemented so that lower layers in Chromium don't need to be aware of
+// blink:: objects.
+class PepperCdmWrapperImpl : public PepperCdmWrapper {
+ public:
+ static scoped_ptr<PepperCdmWrapper> Create(blink::WebLocalFrame* frame,
+ const std::string& pluginType,
+ const GURL& security_origin);
+
+ virtual ~PepperCdmWrapperImpl();
+
+ // Returns the ContentDecryptorDelegate* associated with this plugin.
+ virtual ContentDecryptorDelegate* GetCdmDelegate() OVERRIDE;
+
+ private:
+ typedef scoped_ptr<blink::WebHelperPlugin, WebHelperPluginDeleter>
+ ScopedHelperPlugin;
+
+ // Takes ownership of |helper_plugin| and |plugin_instance|.
+ PepperCdmWrapperImpl(
+ ScopedHelperPlugin helper_plugin,
+ const scoped_refptr<PepperPluginInstanceImpl>& plugin_instance);
+
+ ScopedHelperPlugin helper_plugin_;
+ scoped_refptr<PepperPluginInstanceImpl> plugin_instance_;
+
+ DISALLOW_COPY_AND_ASSIGN(PepperCdmWrapperImpl);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_CRYPTO_PEPPER_CDM_WRAPPER_IMPL_H_
diff --git a/chromium/content/renderer/media/crypto/ppapi_decryptor.cc b/chromium/content/renderer/media/crypto/ppapi_decryptor.cc
index aff94a240e5..dd07aaacc69 100644
--- a/chromium/content/renderer/media/crypto/ppapi_decryptor.cc
+++ b/chromium/content/renderer/media/crypto/ppapi_decryptor.cc
@@ -7,14 +7,17 @@
#include <string>
#include "base/bind.h"
+#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
#include "base/message_loop/message_loop_proxy.h"
+#include "content/renderer/media/crypto/key_systems.h"
#include "content/renderer/pepper/content_decryptor_delegate.h"
#include "content/renderer/pepper/pepper_plugin_instance_impl.h"
#include "media/base/audio_decoder_config.h"
+#include "media/base/cdm_promise.h"
#include "media/base/data_buffer.h"
#include "media/base/decoder_buffer.h"
#include "media/base/video_decoder_config.h"
@@ -22,140 +25,179 @@
namespace content {
+// This class is needed so that resolving an Update() promise triggers playback
+// of the stream. It intercepts the resolve() call to invoke an additional
+// callback.
+class SessionUpdatedPromise : public media::SimpleCdmPromise {
+ public:
+ SessionUpdatedPromise(scoped_ptr<media::SimpleCdmPromise> caller_promise,
+ base::Closure additional_resolve_cb)
+ : caller_promise_(caller_promise.Pass()),
+ additional_resolve_cb_(additional_resolve_cb) {}
+
+ virtual void resolve() OVERRIDE {
+ DCHECK(is_pending_);
+ is_pending_ = false;
+ additional_resolve_cb_.Run();
+ caller_promise_->resolve();
+ }
+
+ virtual void reject(media::MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_message) OVERRIDE {
+ DCHECK(is_pending_);
+ is_pending_ = false;
+ caller_promise_->reject(exception_code, system_code, error_message);
+ }
+
+ protected:
+ scoped_ptr<media::SimpleCdmPromise> caller_promise_;
+ base::Closure additional_resolve_cb_;
+};
+
scoped_ptr<PpapiDecryptor> PpapiDecryptor::Create(
const std::string& key_system,
- const scoped_refptr<PepperPluginInstanceImpl>& plugin_instance,
- const media::SessionCreatedCB& session_created_cb,
+ const GURL& security_origin,
+ const CreatePepperCdmCB& create_pepper_cdm_cb,
const media::SessionMessageCB& session_message_cb,
const media::SessionReadyCB& session_ready_cb,
const media::SessionClosedCB& session_closed_cb,
- const media::SessionErrorCB& session_error_cb,
- const base::Closure& destroy_plugin_cb) {
- ContentDecryptorDelegate* plugin_cdm_delegate =
- plugin_instance->GetContentDecryptorDelegate();
- if (!plugin_cdm_delegate) {
- DVLOG(1) << "PpapiDecryptor: plugin cdm delegate creation failed.";
+ const media::SessionErrorCB& session_error_cb) {
+ std::string plugin_type = GetPepperType(key_system);
+ DCHECK(!plugin_type.empty());
+ scoped_ptr<PepperCdmWrapper> pepper_cdm_wrapper =
+ create_pepper_cdm_cb.Run(plugin_type, security_origin);
+ if (!pepper_cdm_wrapper) {
+ DLOG(ERROR) << "Plugin instance creation failed.";
return scoped_ptr<PpapiDecryptor>();
}
- plugin_cdm_delegate->Initialize(key_system);
-
- return scoped_ptr<PpapiDecryptor>(new PpapiDecryptor(plugin_instance,
- plugin_cdm_delegate,
- session_created_cb,
- session_message_cb,
- session_ready_cb,
- session_closed_cb,
- session_error_cb,
- destroy_plugin_cb));
+ return scoped_ptr<PpapiDecryptor>(
+ new PpapiDecryptor(key_system,
+ pepper_cdm_wrapper.Pass(),
+ session_message_cb,
+ session_ready_cb,
+ session_closed_cb,
+ session_error_cb));
}
PpapiDecryptor::PpapiDecryptor(
- const scoped_refptr<PepperPluginInstanceImpl>& plugin_instance,
- ContentDecryptorDelegate* plugin_cdm_delegate,
- const media::SessionCreatedCB& session_created_cb,
+ const std::string& key_system,
+ scoped_ptr<PepperCdmWrapper> pepper_cdm_wrapper,
const media::SessionMessageCB& session_message_cb,
const media::SessionReadyCB& session_ready_cb,
const media::SessionClosedCB& session_closed_cb,
- const media::SessionErrorCB& session_error_cb,
- const base::Closure& destroy_plugin_cb)
- : plugin_instance_(plugin_instance),
- plugin_cdm_delegate_(plugin_cdm_delegate),
- session_created_cb_(session_created_cb),
+ const media::SessionErrorCB& session_error_cb)
+ : pepper_cdm_wrapper_(pepper_cdm_wrapper.Pass()),
session_message_cb_(session_message_cb),
session_ready_cb_(session_ready_cb),
session_closed_cb_(session_closed_cb),
session_error_cb_(session_error_cb),
- destroy_plugin_cb_(destroy_plugin_cb),
render_loop_proxy_(base::MessageLoopProxy::current()),
weak_ptr_factory_(this) {
- DCHECK(plugin_instance_.get());
- DCHECK(!session_created_cb_.is_null());
+ DCHECK(pepper_cdm_wrapper_.get());
DCHECK(!session_message_cb_.is_null());
DCHECK(!session_ready_cb_.is_null());
DCHECK(!session_closed_cb_.is_null());
DCHECK(!session_error_cb_.is_null());
- DCHECK(!destroy_plugin_cb_.is_null());
-
- weak_this_ = weak_ptr_factory_.GetWeakPtr();
- plugin_cdm_delegate_->SetSessionEventCallbacks(
- base::Bind(&PpapiDecryptor::OnSessionCreated, weak_this_),
- base::Bind(&PpapiDecryptor::OnSessionMessage, weak_this_),
- base::Bind(&PpapiDecryptor::OnSessionReady, weak_this_),
- base::Bind(&PpapiDecryptor::OnSessionClosed, weak_this_),
- base::Bind(&PpapiDecryptor::OnSessionError, weak_this_));
+ base::WeakPtr<PpapiDecryptor> weak_this = weak_ptr_factory_.GetWeakPtr();
+ CdmDelegate()->Initialize(
+ key_system,
+ base::Bind(&PpapiDecryptor::OnSessionMessage, weak_this),
+ base::Bind(&PpapiDecryptor::OnSessionReady, weak_this),
+ base::Bind(&PpapiDecryptor::OnSessionClosed, weak_this),
+ base::Bind(&PpapiDecryptor::OnSessionError, weak_this),
+ base::Bind(&PpapiDecryptor::OnFatalPluginError, weak_this));
}
PpapiDecryptor::~PpapiDecryptor() {
- plugin_cdm_delegate_ = NULL;
- plugin_instance_ = NULL;
- destroy_plugin_cb_.Run();
+ pepper_cdm_wrapper_.reset();
}
-bool PpapiDecryptor::CreateSession(uint32 session_id,
- const std::string& type,
- const uint8* init_data,
- int init_data_length) {
+void PpapiDecryptor::CreateSession(
+ const std::string& init_data_type,
+ const uint8* init_data,
+ int init_data_length,
+ SessionType session_type,
+ scoped_ptr<media::NewSessionCdmPromise> promise) {
DVLOG(2) << __FUNCTION__;
DCHECK(render_loop_proxy_->BelongsToCurrentThread());
- DCHECK(plugin_cdm_delegate_);
- if (!plugin_cdm_delegate_->CreateSession(
- session_id, type, init_data, init_data_length)) {
- ReportFailureToCallPlugin(session_id);
- return false;
+ if (!CdmDelegate()) {
+ promise->reject(INVALID_STATE_ERROR, 0, "CdmDelegate() does not exist.");
+ return;
}
- return true;
+ CdmDelegate()->CreateSession(init_data_type,
+ init_data,
+ init_data_length,
+ session_type,
+ promise.Pass());
}
-void PpapiDecryptor::UpdateSession(uint32 session_id,
- const uint8* response,
- int response_length) {
+void PpapiDecryptor::LoadSession(
+ const std::string& web_session_id,
+ scoped_ptr<media::NewSessionCdmPromise> promise) {
DVLOG(2) << __FUNCTION__;
DCHECK(render_loop_proxy_->BelongsToCurrentThread());
- if (!plugin_cdm_delegate_->UpdateSession(
- session_id, response, response_length))
- ReportFailureToCallPlugin(session_id);
+ if (!CdmDelegate()) {
+ promise->reject(INVALID_STATE_ERROR, 0, "CdmDelegate() does not exist.");
+ return;
+ }
+
+ CdmDelegate()->LoadSession(web_session_id, promise.Pass());
+}
- if (!new_audio_key_cb_.is_null())
- new_audio_key_cb_.Run();
+void PpapiDecryptor::UpdateSession(
+ const std::string& web_session_id,
+ const uint8* response,
+ int response_length,
+ scoped_ptr<media::SimpleCdmPromise> promise) {
+ DCHECK(render_loop_proxy_->BelongsToCurrentThread());
- if (!new_video_key_cb_.is_null())
- new_video_key_cb_.Run();
+ if (!CdmDelegate()) {
+ promise->reject(INVALID_STATE_ERROR, 0, "CdmDelegate() does not exist.");
+ return;
+ }
+
+ scoped_ptr<SessionUpdatedPromise> session_updated_promise(
+ new SessionUpdatedPromise(promise.Pass(),
+ base::Bind(&PpapiDecryptor::ResumePlayback,
+ weak_ptr_factory_.GetWeakPtr())));
+ CdmDelegate()->UpdateSession(
+ web_session_id,
+ response,
+ response_length,
+ session_updated_promise.PassAs<media::SimpleCdmPromise>());
}
-void PpapiDecryptor::ReleaseSession(uint32 session_id) {
- DVLOG(2) << __FUNCTION__;
+void PpapiDecryptor::ReleaseSession(
+ const std::string& web_session_id,
+ scoped_ptr<media::SimpleCdmPromise> promise) {
DCHECK(render_loop_proxy_->BelongsToCurrentThread());
- if (!plugin_cdm_delegate_->ReleaseSession(session_id))
- ReportFailureToCallPlugin(session_id);
+ if (!CdmDelegate()) {
+ promise->reject(INVALID_STATE_ERROR, 0, "CdmDelegate() does not exist.");
+ return;
+ }
+
+ CdmDelegate()->ReleaseSession(web_session_id, promise.Pass());
}
media::Decryptor* PpapiDecryptor::GetDecryptor() {
-#if defined(GOOGLE_TV)
- // Google TV only uses PpapiDecrytor as a MediaKeys and does not need the
- // Decryptor interface of the PpapiDecryptor.
- // Details: If we don't do this GTV will be broken. The reason is that during
- // initialization, MediaSourceDelegate tries to use DecryptingDemuxerStream
- // to decrypt the stream in the renderer process (for ClearKey support).
- // However, for GTV, PpapiDecryptor cannot do decryption at all. By returning
- // NULL, DDS init will fail and we fallback to what GTV used to do.
- return NULL;
-#else
return this;
-#endif // defined(GOOGLE_TV)
}
void PpapiDecryptor::RegisterNewKeyCB(StreamType stream_type,
const NewKeyCB& new_key_cb) {
if (!render_loop_proxy_->BelongsToCurrentThread()) {
- render_loop_proxy_->PostTask(FROM_HERE, base::Bind(
- &PpapiDecryptor::RegisterNewKeyCB, weak_this_, stream_type,
- new_key_cb));
+ render_loop_proxy_->PostTask(FROM_HERE,
+ base::Bind(&PpapiDecryptor::RegisterNewKeyCB,
+ weak_ptr_factory_.GetWeakPtr(),
+ stream_type,
+ new_key_cb));
return;
}
@@ -177,34 +219,46 @@ void PpapiDecryptor::Decrypt(
const scoped_refptr<media::DecoderBuffer>& encrypted,
const DecryptCB& decrypt_cb) {
if (!render_loop_proxy_->BelongsToCurrentThread()) {
- render_loop_proxy_->PostTask(FROM_HERE, base::Bind(
- &PpapiDecryptor::Decrypt, weak_this_,
- stream_type, encrypted, decrypt_cb));
+ render_loop_proxy_->PostTask(FROM_HERE,
+ base::Bind(&PpapiDecryptor::Decrypt,
+ weak_ptr_factory_.GetWeakPtr(),
+ stream_type,
+ encrypted,
+ decrypt_cb));
return;
}
DVLOG(3) << __FUNCTION__ << " - stream_type: " << stream_type;
- if (!plugin_cdm_delegate_->Decrypt(stream_type, encrypted, decrypt_cb))
+ if (!CdmDelegate() ||
+ !CdmDelegate()->Decrypt(stream_type, encrypted, decrypt_cb)) {
decrypt_cb.Run(kError, NULL);
+ }
}
void PpapiDecryptor::CancelDecrypt(StreamType stream_type) {
if (!render_loop_proxy_->BelongsToCurrentThread()) {
- render_loop_proxy_->PostTask(FROM_HERE, base::Bind(
- &PpapiDecryptor::CancelDecrypt, weak_this_, stream_type));
+ render_loop_proxy_->PostTask(FROM_HERE,
+ base::Bind(&PpapiDecryptor::CancelDecrypt,
+ weak_ptr_factory_.GetWeakPtr(),
+ stream_type));
return;
}
DVLOG(1) << __FUNCTION__ << " - stream_type: " << stream_type;
- plugin_cdm_delegate_->CancelDecrypt(stream_type);
+ if (CdmDelegate())
+ CdmDelegate()->CancelDecrypt(stream_type);
}
void PpapiDecryptor::InitializeAudioDecoder(
const media::AudioDecoderConfig& config,
const DecoderInitCB& init_cb) {
if (!render_loop_proxy_->BelongsToCurrentThread()) {
- render_loop_proxy_->PostTask(FROM_HERE, base::Bind(
- &PpapiDecryptor::InitializeAudioDecoder, weak_this_, config, init_cb));
+ render_loop_proxy_->PostTask(
+ FROM_HERE,
+ base::Bind(&PpapiDecryptor::InitializeAudioDecoder,
+ weak_ptr_factory_.GetWeakPtr(),
+ config,
+ init_cb));
return;
}
@@ -213,8 +267,11 @@ void PpapiDecryptor::InitializeAudioDecoder(
DCHECK(config.IsValidConfig());
audio_decoder_init_cb_ = init_cb;
- if (!plugin_cdm_delegate_->InitializeAudioDecoder(config, base::Bind(
- &PpapiDecryptor::OnDecoderInitialized, weak_this_, kAudio))) {
+ if (!CdmDelegate() || !CdmDelegate()->InitializeAudioDecoder(
+ config,
+ base::Bind(&PpapiDecryptor::OnDecoderInitialized,
+ weak_ptr_factory_.GetWeakPtr(),
+ kAudio))) {
base::ResetAndReturn(&audio_decoder_init_cb_).Run(false);
return;
}
@@ -224,8 +281,12 @@ void PpapiDecryptor::InitializeVideoDecoder(
const media::VideoDecoderConfig& config,
const DecoderInitCB& init_cb) {
if (!render_loop_proxy_->BelongsToCurrentThread()) {
- render_loop_proxy_->PostTask(FROM_HERE, base::Bind(
- &PpapiDecryptor::InitializeVideoDecoder, weak_this_, config, init_cb));
+ render_loop_proxy_->PostTask(
+ FROM_HERE,
+ base::Bind(&PpapiDecryptor::InitializeVideoDecoder,
+ weak_ptr_factory_.GetWeakPtr(),
+ config,
+ init_cb));
return;
}
@@ -234,8 +295,11 @@ void PpapiDecryptor::InitializeVideoDecoder(
DCHECK(config.IsValidConfig());
video_decoder_init_cb_ = init_cb;
- if (!plugin_cdm_delegate_->InitializeVideoDecoder(config, base::Bind(
- &PpapiDecryptor::OnDecoderInitialized, weak_this_, kVideo))) {
+ if (!CdmDelegate() || !CdmDelegate()->InitializeVideoDecoder(
+ config,
+ base::Bind(&PpapiDecryptor::OnDecoderInitialized,
+ weak_ptr_factory_.GetWeakPtr(),
+ kVideo))) {
base::ResetAndReturn(&video_decoder_init_cb_).Run(false);
return;
}
@@ -245,58 +309,69 @@ void PpapiDecryptor::DecryptAndDecodeAudio(
const scoped_refptr<media::DecoderBuffer>& encrypted,
const AudioDecodeCB& audio_decode_cb) {
if (!render_loop_proxy_->BelongsToCurrentThread()) {
- render_loop_proxy_->PostTask(FROM_HERE, base::Bind(
- &PpapiDecryptor::DecryptAndDecodeAudio, weak_this_,
- encrypted, audio_decode_cb));
+ render_loop_proxy_->PostTask(
+ FROM_HERE,
+ base::Bind(&PpapiDecryptor::DecryptAndDecodeAudio,
+ weak_ptr_factory_.GetWeakPtr(),
+ encrypted,
+ audio_decode_cb));
return;
}
DVLOG(3) << __FUNCTION__;
- if (!plugin_cdm_delegate_->DecryptAndDecodeAudio(encrypted, audio_decode_cb))
+ if (!CdmDelegate() ||
+ !CdmDelegate()->DecryptAndDecodeAudio(encrypted, audio_decode_cb)) {
audio_decode_cb.Run(kError, AudioBuffers());
+ }
}
void PpapiDecryptor::DecryptAndDecodeVideo(
const scoped_refptr<media::DecoderBuffer>& encrypted,
const VideoDecodeCB& video_decode_cb) {
if (!render_loop_proxy_->BelongsToCurrentThread()) {
- render_loop_proxy_->PostTask(FROM_HERE, base::Bind(
- &PpapiDecryptor::DecryptAndDecodeVideo, weak_this_,
- encrypted, video_decode_cb));
+ render_loop_proxy_->PostTask(
+ FROM_HERE,
+ base::Bind(&PpapiDecryptor::DecryptAndDecodeVideo,
+ weak_ptr_factory_.GetWeakPtr(),
+ encrypted,
+ video_decode_cb));
return;
}
DVLOG(3) << __FUNCTION__;
- if (!plugin_cdm_delegate_->DecryptAndDecodeVideo(encrypted, video_decode_cb))
+ if (!CdmDelegate() ||
+ !CdmDelegate()->DecryptAndDecodeVideo(encrypted, video_decode_cb)) {
video_decode_cb.Run(kError, NULL);
+ }
}
void PpapiDecryptor::ResetDecoder(StreamType stream_type) {
if (!render_loop_proxy_->BelongsToCurrentThread()) {
- render_loop_proxy_->PostTask(FROM_HERE, base::Bind(
- &PpapiDecryptor::ResetDecoder, weak_this_, stream_type));
+ render_loop_proxy_->PostTask(FROM_HERE,
+ base::Bind(&PpapiDecryptor::ResetDecoder,
+ weak_ptr_factory_.GetWeakPtr(),
+ stream_type));
return;
}
DVLOG(2) << __FUNCTION__ << " - stream_type: " << stream_type;
- plugin_cdm_delegate_->ResetDecoder(stream_type);
+ if (CdmDelegate())
+ CdmDelegate()->ResetDecoder(stream_type);
}
void PpapiDecryptor::DeinitializeDecoder(StreamType stream_type) {
if (!render_loop_proxy_->BelongsToCurrentThread()) {
- render_loop_proxy_->PostTask(FROM_HERE, base::Bind(
- &PpapiDecryptor::DeinitializeDecoder, weak_this_, stream_type));
+ render_loop_proxy_->PostTask(
+ FROM_HERE,
+ base::Bind(&PpapiDecryptor::DeinitializeDecoder,
+ weak_ptr_factory_.GetWeakPtr(),
+ stream_type));
return;
}
DVLOG(2) << __FUNCTION__ << " - stream_type: " << stream_type;
- plugin_cdm_delegate_->DeinitializeDecoder(stream_type);
-}
-
-void PpapiDecryptor::ReportFailureToCallPlugin(uint32 session_id) {
- DCHECK(render_loop_proxy_->BelongsToCurrentThread());
- DVLOG(1) << "Failed to call plugin.";
- session_error_cb_.Run(session_id, kUnknownError, 0);
+ if (CdmDelegate())
+ CdmDelegate()->DeinitializeDecoder(stream_type);
}
void PpapiDecryptor::OnDecoderInitialized(StreamType stream_type,
@@ -316,34 +391,53 @@ void PpapiDecryptor::OnDecoderInitialized(StreamType stream_type,
}
}
-void PpapiDecryptor::OnSessionCreated(uint32 session_id,
- const std::string& web_session_id) {
+void PpapiDecryptor::OnSessionMessage(const std::string& web_session_id,
+ const std::vector<uint8>& message,
+ const GURL& destination_url) {
DCHECK(render_loop_proxy_->BelongsToCurrentThread());
- session_created_cb_.Run(session_id, web_session_id);
+ session_message_cb_.Run(web_session_id, message, destination_url);
}
-void PpapiDecryptor::OnSessionMessage(uint32 session_id,
- const std::vector<uint8>& message,
- const std::string& destination_url) {
+void PpapiDecryptor::OnSessionReady(const std::string& web_session_id) {
DCHECK(render_loop_proxy_->BelongsToCurrentThread());
- session_message_cb_.Run(session_id, message, destination_url);
+
+ ResumePlayback();
+ session_ready_cb_.Run(web_session_id);
+}
+
+void PpapiDecryptor::OnSessionClosed(const std::string& web_session_id) {
+ DCHECK(render_loop_proxy_->BelongsToCurrentThread());
+ session_closed_cb_.Run(web_session_id);
}
-void PpapiDecryptor::OnSessionReady(uint32 session_id) {
+void PpapiDecryptor::OnSessionError(const std::string& web_session_id,
+ MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_description) {
DCHECK(render_loop_proxy_->BelongsToCurrentThread());
- session_ready_cb_.Run(session_id);
+ session_error_cb_.Run(
+ web_session_id, exception_code, system_code, error_description);
+}
+
+void PpapiDecryptor::ResumePlayback() {
+ // Based on the spec, we need to resume playback when update() completes
+ // successfully, or when a session is successfully loaded (triggered by
+ // OnSessionReady()). So we choose to call the NewKeyCBs here.
+ if (!new_audio_key_cb_.is_null())
+ new_audio_key_cb_.Run();
+
+ if (!new_video_key_cb_.is_null())
+ new_video_key_cb_.Run();
}
-void PpapiDecryptor::OnSessionClosed(uint32 session_id) {
+void PpapiDecryptor::OnFatalPluginError() {
DCHECK(render_loop_proxy_->BelongsToCurrentThread());
- session_closed_cb_.Run(session_id);
+ pepper_cdm_wrapper_.reset();
}
-void PpapiDecryptor::OnSessionError(uint32 session_id,
- media::MediaKeys::KeyError error_code,
- int system_code) {
+ContentDecryptorDelegate* PpapiDecryptor::CdmDelegate() {
DCHECK(render_loop_proxy_->BelongsToCurrentThread());
- session_error_cb_.Run(session_id, error_code, system_code);
+ return (pepper_cdm_wrapper_) ? pepper_cdm_wrapper_->GetCdmDelegate() : NULL;
}
} // namespace content
diff --git a/chromium/content/renderer/media/crypto/ppapi_decryptor.h b/chromium/content/renderer/media/crypto/ppapi_decryptor.h
index 95c5f74a4c3..c09e4b046fd 100644
--- a/chromium/content/renderer/media/crypto/ppapi_decryptor.h
+++ b/chromium/content/renderer/media/crypto/ppapi_decryptor.h
@@ -11,10 +11,13 @@
#include "base/basictypes.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
+#include "content/renderer/media/crypto/pepper_cdm_wrapper.h"
#include "media/base/decryptor.h"
#include "media/base/media_keys.h"
#include "media/base/video_decoder_config.h"
+class GURL;
+
namespace base {
class MessageLoopProxy;
}
@@ -29,27 +32,34 @@ class PepperPluginInstanceImpl;
class PpapiDecryptor : public media::MediaKeys, public media::Decryptor {
public:
static scoped_ptr<PpapiDecryptor> Create(
- // TODO(ddorwin): Remove after updating the delegate.
const std::string& key_system,
- const scoped_refptr<PepperPluginInstanceImpl>& plugin_instance,
- const media::SessionCreatedCB& session_created_cb,
+ const GURL& security_origin,
+ const CreatePepperCdmCB& create_pepper_cdm_cb,
const media::SessionMessageCB& session_message_cb,
const media::SessionReadyCB& session_ready_cb,
const media::SessionClosedCB& session_closed_cb,
- const media::SessionErrorCB& session_error_cb,
- const base::Closure& destroy_plugin_cb);
+ const media::SessionErrorCB& session_error_cb);
virtual ~PpapiDecryptor();
// media::MediaKeys implementation.
- virtual bool CreateSession(uint32 session_id,
- const std::string& type,
- const uint8* init_data,
- int init_data_length) OVERRIDE;
- virtual void UpdateSession(uint32 session_id,
- const uint8* response,
- int response_length) OVERRIDE;
- virtual void ReleaseSession(uint32 session_id) OVERRIDE;
+ virtual void CreateSession(
+ const std::string& init_data_type,
+ const uint8* init_data,
+ int init_data_length,
+ SessionType session_type,
+ scoped_ptr<media::NewSessionCdmPromise> promise) OVERRIDE;
+ virtual void LoadSession(
+ const std::string& web_session_id,
+ scoped_ptr<media::NewSessionCdmPromise> promise) OVERRIDE;
+ virtual void UpdateSession(
+ const std::string& web_session_id,
+ const uint8* response,
+ int response_length,
+ scoped_ptr<media::SimpleCdmPromise> promise) OVERRIDE;
+ virtual void ReleaseSession(
+ const std::string& web_session_id,
+ scoped_ptr<media::SimpleCdmPromise> promise) OVERRIDE;
virtual Decryptor* GetDecryptor() OVERRIDE;
// media::Decryptor implementation.
@@ -73,49 +83,46 @@ class PpapiDecryptor : public media::MediaKeys, public media::Decryptor {
virtual void DeinitializeDecoder(StreamType stream_type) OVERRIDE;
private:
- PpapiDecryptor(const scoped_refptr<PepperPluginInstanceImpl>& plugin_instance,
- ContentDecryptorDelegate* plugin_cdm_delegate,
- const media::SessionCreatedCB& session_created_cb,
+ PpapiDecryptor(const std::string& key_system,
+ scoped_ptr<PepperCdmWrapper> pepper_cdm_wrapper,
const media::SessionMessageCB& session_message_cb,
const media::SessionReadyCB& session_ready_cb,
const media::SessionClosedCB& session_closed_cb,
- const media::SessionErrorCB& session_error_cb,
- const base::Closure& destroy_plugin_cb);
-
- void ReportFailureToCallPlugin(uint32 session_id);
+ const media::SessionErrorCB& session_error_cb);
void OnDecoderInitialized(StreamType stream_type, bool success);
// Callbacks for |plugin_cdm_delegate_| to fire session events.
- void OnSessionCreated(uint32 session_id, const std::string& web_session_id);
- void OnSessionMessage(uint32 session_id,
+ void OnSessionMessage(const std::string& web_session_id,
const std::vector<uint8>& message,
- const std::string& destination_url);
- void OnSessionReady(uint32 session_id);
- void OnSessionClosed(uint32 session_id);
- void OnSessionError(uint32 session_id,
- media::MediaKeys::KeyError error_code,
- int system_code);
+ const GURL& destination_url);
+ void OnSessionReady(const std::string& web_session_id);
+ void OnSessionClosed(const std::string& web_session_id);
+ void OnSessionError(const std::string& web_session_id,
+ MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_description);
- base::WeakPtr<PpapiDecryptor> weak_this_;
+ // On a successful Update() or SessionReady event, trigger playback to resume.
+ void ResumePlayback();
- // Hold a reference of the plugin instance to make sure the plugin outlives
- // the |plugin_cdm_delegate_|. This is needed because |plugin_cdm_delegate_|
- // is owned by the |plugin_instance_|.
- scoped_refptr<PepperPluginInstanceImpl> plugin_instance_;
+ // Callback to notify that a fatal error happened in |plugin_cdm_delegate_|.
+ // The error is terminal and |plugin_cdm_delegate_| should not be used after
+ // this call.
+ void OnFatalPluginError();
- ContentDecryptorDelegate* plugin_cdm_delegate_;
+ ContentDecryptorDelegate* CdmDelegate();
+
+ // Hold a reference of the Pepper CDM wrapper to make sure the plugin lives
+ // as long as needed.
+ scoped_ptr<PepperCdmWrapper> pepper_cdm_wrapper_;
// Callbacks for firing session events.
- media::SessionCreatedCB session_created_cb_;
media::SessionMessageCB session_message_cb_;
media::SessionReadyCB session_ready_cb_;
media::SessionClosedCB session_closed_cb_;
media::SessionErrorCB session_error_cb_;
- // Called to destroy the helper plugin when this class no longer needs it.
- base::Closure destroy_plugin_cb_;
-
scoped_refptr<base::MessageLoopProxy> render_loop_proxy_;
DecoderInitCB audio_decoder_init_cb_;
@@ -123,6 +130,7 @@ class PpapiDecryptor : public media::MediaKeys, public media::Decryptor {
NewKeyCB new_audio_key_cb_;
NewKeyCB new_video_key_cb_;
+ // NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<PpapiDecryptor> weak_ptr_factory_;
DISALLOW_COPY_AND_ASSIGN(PpapiDecryptor);
diff --git a/chromium/content/renderer/media/crypto/proxy_decryptor.cc b/chromium/content/renderer/media/crypto/proxy_decryptor.cc
index 0016d19a7e8..28cd421e85f 100644
--- a/chromium/content/renderer/media/crypto/proxy_decryptor.cc
+++ b/chromium/content/renderer/media/crypto/proxy_decryptor.cc
@@ -4,57 +4,56 @@
#include "content/renderer/media/crypto/proxy_decryptor.h"
+#include <cstring>
+
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/logging.h"
#include "base/strings/string_util.h"
#include "content/renderer/media/crypto/content_decryption_module_factory.h"
-#if defined(OS_ANDROID)
-#include "content/renderer/media/android/renderer_media_player_manager.h"
-#endif // defined(OS_ANDROID)
+#include "media/base/cdm_promise.h"
#include "media/cdm/json_web_key.h"
#include "media/cdm/key_system_names.h"
-namespace content {
+#if defined(ENABLE_PEPPER_CDMS)
+#include "content/renderer/media/crypto/pepper_cdm_wrapper.h"
+#endif // defined(ENABLE_PEPPER_CDMS)
-// Since these reference IDs may conflict with the ones generated in
-// WebContentDecryptionModuleSessionImpl for the short time both paths are
-// active, start with 100000 and generate the IDs from there.
-// TODO(jrummell): Only allow one path http://crbug.com/306680.
-uint32 ProxyDecryptor::next_session_id_ = 100000;
+#if defined(ENABLE_BROWSER_CDMS)
+#include "content/renderer/media/crypto/renderer_cdm_manager.h"
+#endif // defined(ENABLE_BROWSER_CDMS)
-const uint32 kInvalidSessionId = 0;
+namespace content {
-#if defined(ENABLE_PEPPER_CDMS)
-void ProxyDecryptor::DestroyHelperPlugin() {
- ContentDecryptionModuleFactory::DestroyHelperPlugin(
- web_media_player_client_, web_frame_);
-}
-#endif // defined(ENABLE_PEPPER_CDMS)
+// Special system code to signal a closed persistent session in a SessionError()
+// call. This is needed because there is no SessionClosed() call in the prefixed
+// EME API.
+const int kSessionClosedSystemCode = 29127;
ProxyDecryptor::ProxyDecryptor(
#if defined(ENABLE_PEPPER_CDMS)
- blink::WebMediaPlayerClient* web_media_player_client,
- blink::WebFrame* web_frame,
-#elif defined(OS_ANDROID)
- RendererMediaPlayerManager* manager,
- int media_keys_id,
+ const CreatePepperCdmCB& create_pepper_cdm_cb,
+#elif defined(ENABLE_BROWSER_CDMS)
+ RendererCdmManager* manager,
#endif // defined(ENABLE_PEPPER_CDMS)
const KeyAddedCB& key_added_cb,
const KeyErrorCB& key_error_cb,
const KeyMessageCB& key_message_cb)
- : weak_ptr_factory_(this),
+ :
#if defined(ENABLE_PEPPER_CDMS)
- web_media_player_client_(web_media_player_client),
- web_frame_(web_frame),
-#elif defined(OS_ANDROID)
+ create_pepper_cdm_cb_(create_pepper_cdm_cb),
+#elif defined(ENABLE_BROWSER_CDMS)
manager_(manager),
- media_keys_id_(media_keys_id),
+ cdm_id_(RendererCdmManager::kInvalidCdmId),
#endif // defined(ENABLE_PEPPER_CDMS)
key_added_cb_(key_added_cb),
key_error_cb_(key_error_cb),
key_message_cb_(key_message_cb),
- is_clear_key_(false) {
+ is_clear_key_(false),
+ weak_ptr_factory_(this) {
+#if defined(ENABLE_PEPPER_CDMS)
+ DCHECK(!create_pepper_cdm_cb_.is_null());
+#endif // defined(ENABLE_PEPPER_CDMS)
DCHECK(!key_added_cb_.is_null());
DCHECK(!key_error_cb_.is_null());
DCHECK(!key_message_cb_.is_null());
@@ -62,67 +61,75 @@ ProxyDecryptor::ProxyDecryptor(
ProxyDecryptor::~ProxyDecryptor() {
// Destroy the decryptor explicitly before destroying the plugin.
- {
- base::AutoLock auto_lock(lock_);
- media_keys_.reset();
- }
+ media_keys_.reset();
}
-// TODO(xhwang): Support multiple decryptor notification request (e.g. from
-// video and audio decoders). The current implementation is okay for the current
-// media pipeline since we initialize audio and video decoders in sequence.
-// But ProxyDecryptor should not depend on media pipeline's implementation
-// detail.
-void ProxyDecryptor::SetDecryptorReadyCB(
- const media::DecryptorReadyCB& decryptor_ready_cb) {
- base::AutoLock auto_lock(lock_);
-
- // Cancels the previous decryptor request.
- if (decryptor_ready_cb.is_null()) {
- if (!decryptor_ready_cb_.is_null())
- base::ResetAndReturn(&decryptor_ready_cb_).Run(NULL);
- return;
- }
+media::Decryptor* ProxyDecryptor::GetDecryptor() {
+ return media_keys_ ? media_keys_->GetDecryptor() : NULL;
+}
- // Normal decryptor request.
- DCHECK(decryptor_ready_cb_.is_null());
- if (media_keys_) {
- decryptor_ready_cb.Run(media_keys_->GetDecryptor());
- return;
- }
- decryptor_ready_cb_ = decryptor_ready_cb;
+#if defined(ENABLE_BROWSER_CDMS)
+int ProxyDecryptor::GetCdmId() {
+ return cdm_id_;
}
+#endif
bool ProxyDecryptor::InitializeCDM(const std::string& key_system,
- const GURL& frame_url) {
+ const GURL& security_origin) {
DVLOG(1) << "InitializeCDM: key_system = " << key_system;
- base::AutoLock auto_lock(lock_);
-
DCHECK(!media_keys_);
- media_keys_ = CreateMediaKeys(key_system, frame_url);
+ media_keys_ = CreateMediaKeys(key_system, security_origin);
if (!media_keys_)
return false;
- if (!decryptor_ready_cb_.is_null())
- base::ResetAndReturn(&decryptor_ready_cb_).Run(media_keys_->GetDecryptor());
-
is_clear_key_ =
media::IsClearKey(key_system) || media::IsExternalClearKey(key_system);
return true;
}
-bool ProxyDecryptor::GenerateKeyRequest(const std::string& type,
+// Returns true if |data| is prefixed with |header| and has data after the
+// |header|.
+bool HasHeader(const uint8* data, int data_length, const std::string& header) {
+ return static_cast<size_t>(data_length) > header.size() &&
+ std::equal(data, data + header.size(), header.begin());
+}
+
+bool ProxyDecryptor::GenerateKeyRequest(const std::string& content_type,
const uint8* init_data,
int init_data_length) {
- // Use a unique reference id for this request.
- uint32 session_id = next_session_id_++;
- if (!media_keys_->CreateSession(
- session_id, type, init_data, init_data_length)) {
- media_keys_.reset();
- return false;
+ DVLOG(1) << "GenerateKeyRequest()";
+ const char kPrefixedApiPersistentSessionHeader[] = "PERSISTENT|";
+ const char kPrefixedApiLoadSessionHeader[] = "LOAD_SESSION|";
+
+ bool loadSession =
+ HasHeader(init_data, init_data_length, kPrefixedApiLoadSessionHeader);
+ bool persistent = HasHeader(
+ init_data, init_data_length, kPrefixedApiPersistentSessionHeader);
+
+ scoped_ptr<media::NewSessionCdmPromise> promise(
+ new media::NewSessionCdmPromise(
+ base::Bind(&ProxyDecryptor::SetSessionId,
+ weak_ptr_factory_.GetWeakPtr(),
+ persistent || loadSession),
+ base::Bind(&ProxyDecryptor::OnSessionError,
+ weak_ptr_factory_.GetWeakPtr(),
+ std::string()))); // No session id until created.
+
+ if (loadSession) {
+ media_keys_->LoadSession(
+ std::string(reinterpret_cast<const char*>(
+ init_data + strlen(kPrefixedApiLoadSessionHeader)),
+ init_data_length - strlen(kPrefixedApiLoadSessionHeader)),
+ promise.Pass());
+ return true;
}
+ media::MediaKeys::SessionType session_type =
+ persistent ? media::MediaKeys::PERSISTENT_SESSION
+ : media::MediaKeys::TEMPORARY_SESSION;
+ media_keys_->CreateSession(
+ content_type, init_data, init_data_length, session_type, promise.Pass());
return true;
}
@@ -133,18 +140,31 @@ void ProxyDecryptor::AddKey(const uint8* key,
const std::string& web_session_id) {
DVLOG(1) << "AddKey()";
- // WebMediaPlayerImpl ensures GenerateKeyRequest() has been called.
- uint32 session_id = LookupSessionId(web_session_id);
- if (session_id == kInvalidSessionId) {
- // Session hasn't been referenced before, so it is an error.
- // Note that the specification says "If sessionId is not null and is
- // unrecognized, throw an INVALID_ACCESS_ERR." However, for backwards
- // compatibility the error is not thrown, but rather reported as a
- // KeyError.
- key_error_cb_.Run(std::string(), media::MediaKeys::kUnknownError, 0);
- return;
+ // In the prefixed API, the session parameter provided to addKey() is
+ // optional, so use the single existing session if it exists.
+ // TODO(jrummell): remove when the prefixed API is removed.
+ std::string session_id(web_session_id);
+ if (session_id.empty()) {
+ if (active_sessions_.size() == 1) {
+ base::hash_map<std::string, bool>::iterator it = active_sessions_.begin();
+ session_id = it->first;
+ } else {
+ OnSessionError(std::string(),
+ media::MediaKeys::NOT_SUPPORTED_ERROR,
+ 0,
+ "SessionId not specified.");
+ return;
+ }
}
+ scoped_ptr<media::SimpleCdmPromise> promise(
+ new media::SimpleCdmPromise(base::Bind(&ProxyDecryptor::OnSessionReady,
+ weak_ptr_factory_.GetWeakPtr(),
+ web_session_id),
+ base::Bind(&ProxyDecryptor::OnSessionError,
+ weak_ptr_factory_.GetWeakPtr(),
+ web_session_id)));
+
// EME WD spec only supports a single array passed to the CDM. For
// Clear Key using v0.1b, both arrays are used (|init_data| is key_id).
// Since the EME WD spec supports the key as a JSON Web Key,
@@ -161,46 +181,41 @@ void ProxyDecryptor::AddKey(const uint8* key,
std::string jwk =
media::GenerateJWKSet(key, key_length, init_data, init_data_length);
DCHECK(!jwk.empty());
- media_keys_->UpdateSession(
- session_id, reinterpret_cast<const uint8*>(jwk.data()), jwk.size());
+ media_keys_->UpdateSession(session_id,
+ reinterpret_cast<const uint8*>(jwk.data()),
+ jwk.size(),
+ promise.Pass());
return;
}
- media_keys_->UpdateSession(session_id, key, key_length);
+ media_keys_->UpdateSession(session_id, key, key_length, promise.Pass());
}
-void ProxyDecryptor::CancelKeyRequest(const std::string& session_id) {
+void ProxyDecryptor::CancelKeyRequest(const std::string& web_session_id) {
DVLOG(1) << "CancelKeyRequest()";
- // WebMediaPlayerImpl ensures GenerateKeyRequest() has been called.
- uint32 session_reference_id = LookupSessionId(session_id);
- if (session_reference_id == kInvalidSessionId) {
- // Session hasn't been created, so it is an error.
- key_error_cb_.Run(
- std::string(), media::MediaKeys::kUnknownError, 0);
- }
- else {
- media_keys_->ReleaseSession(session_reference_id);
- }
+ scoped_ptr<media::SimpleCdmPromise> promise(
+ new media::SimpleCdmPromise(base::Bind(&ProxyDecryptor::OnSessionClosed,
+ weak_ptr_factory_.GetWeakPtr(),
+ web_session_id),
+ base::Bind(&ProxyDecryptor::OnSessionError,
+ weak_ptr_factory_.GetWeakPtr(),
+ web_session_id)));
+ media_keys_->ReleaseSession(web_session_id, promise.Pass());
}
scoped_ptr<media::MediaKeys> ProxyDecryptor::CreateMediaKeys(
const std::string& key_system,
- const GURL& frame_url) {
+ const GURL& security_origin) {
return ContentDecryptionModuleFactory::Create(
key_system,
+ security_origin,
#if defined(ENABLE_PEPPER_CDMS)
- web_media_player_client_,
- web_frame_,
- base::Bind(&ProxyDecryptor::DestroyHelperPlugin,
- weak_ptr_factory_.GetWeakPtr()),
-#elif defined(OS_ANDROID)
+ create_pepper_cdm_cb_,
+#elif defined(ENABLE_BROWSER_CDMS)
manager_,
- media_keys_id_,
- frame_url,
+ &cdm_id_,
#endif // defined(ENABLE_PEPPER_CDMS)
- base::Bind(&ProxyDecryptor::OnSessionCreated,
- weak_ptr_factory_.GetWeakPtr()),
base::Bind(&ProxyDecryptor::OnSessionMessage,
weak_ptr_factory_.GetWeakPtr()),
base::Bind(&ProxyDecryptor::OnSessionReady,
@@ -211,60 +226,69 @@ scoped_ptr<media::MediaKeys> ProxyDecryptor::CreateMediaKeys(
weak_ptr_factory_.GetWeakPtr()));
}
-void ProxyDecryptor::OnSessionCreated(uint32 session_id,
- const std::string& web_session_id) {
- // Due to heartbeat messages, OnSessionCreated() can get called multiple
- // times.
- SessionIdMap::iterator it = sessions_.find(session_id);
- DCHECK(it == sessions_.end() || it->second == web_session_id);
- if (it == sessions_.end())
- sessions_[session_id] = web_session_id;
-}
-
-void ProxyDecryptor::OnSessionMessage(uint32 session_id,
+void ProxyDecryptor::OnSessionMessage(const std::string& web_session_id,
const std::vector<uint8>& message,
- const std::string& destination_url) {
+ const GURL& destination_url) {
// Assumes that OnSessionCreated() has been called before this.
- key_message_cb_.Run(LookupWebSessionId(session_id), message, destination_url);
+ key_message_cb_.Run(web_session_id, message, destination_url);
}
-void ProxyDecryptor::OnSessionReady(uint32 session_id) {
- // Assumes that OnSessionCreated() has been called before this.
- key_added_cb_.Run(LookupWebSessionId(session_id));
+void ProxyDecryptor::OnSessionReady(const std::string& web_session_id) {
+ key_added_cb_.Run(web_session_id);
}
-void ProxyDecryptor::OnSessionClosed(uint32 session_id) {
- // No closed event in EME v0.1b.
-}
+void ProxyDecryptor::OnSessionClosed(const std::string& web_session_id) {
+ base::hash_map<std::string, bool>::iterator it =
+ active_sessions_.find(web_session_id);
+
+ // Latest EME spec separates closing a session ("allows an application to
+ // indicate that it no longer needs the session") and actually closing the
+ // session (done by the CDM at any point "such as in response to a close()
+ // call, when the session is no longer needed, or when system resources are
+ // lost.") Thus the CDM may cause 2 close() events -- one to resolve the
+ // close() promise, and a second to actually close the session. Prefixed EME
+ // only expects 1 close event, so drop the second (and subsequent) events.
+ // However, this means we can't tell if the CDM is generating spurious close()
+ // events.
+ if (it == active_sessions_.end())
+ return;
-void ProxyDecryptor::OnSessionError(uint32 session_id,
- media::MediaKeys::KeyError error_code,
- int system_code) {
- // Assumes that OnSessionCreated() has been called before this.
- key_error_cb_.Run(LookupWebSessionId(session_id), error_code, system_code);
+ if (it->second) {
+ OnSessionError(web_session_id,
+ media::MediaKeys::NOT_SUPPORTED_ERROR,
+ kSessionClosedSystemCode,
+ "Do not close persistent sessions.");
+ }
+ active_sessions_.erase(it);
}
-uint32 ProxyDecryptor::LookupSessionId(const std::string& session_id) {
- for (SessionIdMap::iterator it = sessions_.begin();
- it != sessions_.end();
- ++it) {
- if (it->second == session_id)
- return it->first;
+void ProxyDecryptor::OnSessionError(const std::string& web_session_id,
+ media::MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_message) {
+ // Convert |error_name| back to MediaKeys::KeyError if possible. Prefixed
+ // EME has different error message, so all the specific error events will
+ // get lost.
+ media::MediaKeys::KeyError error_code;
+ switch (exception_code) {
+ case media::MediaKeys::CLIENT_ERROR:
+ error_code = media::MediaKeys::kClientError;
+ break;
+ case media::MediaKeys::OUTPUT_ERROR:
+ error_code = media::MediaKeys::kOutputError;
+ break;
+ default:
+ // This will include all other CDM4 errors and any error generated
+ // by CDM5 or later.
+ error_code = media::MediaKeys::kUnknownError;
+ break;
}
-
- // If |session_id| is null, then use the single reference id.
- if (session_id.empty() && sessions_.size() == 1)
- return sessions_.begin()->first;
-
- return kInvalidSessionId;
+ key_error_cb_.Run(web_session_id, error_code, system_code);
}
-const std::string& ProxyDecryptor::LookupWebSessionId(uint32 session_id) {
- DCHECK_NE(session_id, kInvalidSessionId);
-
- // Session may not exist if error happens during GenerateKeyRequest().
- SessionIdMap::iterator it = sessions_.find(session_id);
- return (it != sessions_.end()) ? it->second : base::EmptyString();
+void ProxyDecryptor::SetSessionId(bool persistent,
+ const std::string& web_session_id) {
+ active_sessions_.insert(std::make_pair(web_session_id, persistent));
}
} // namespace content
diff --git a/chromium/content/renderer/media/crypto/proxy_decryptor.h b/chromium/content/renderer/media/crypto/proxy_decryptor.h
index 026b56a2802..e4fd7eace5e 100644
--- a/chromium/content/renderer/media/crypto/proxy_decryptor.h
+++ b/chromium/content/renderer/media/crypto/proxy_decryptor.h
@@ -5,42 +5,32 @@
#ifndef CONTENT_RENDERER_MEDIA_CRYPTO_PROXY_DECRYPTOR_H_
#define CONTENT_RENDERER_MEDIA_CRYPTO_PROXY_DECRYPTOR_H_
-#include <map>
#include <string>
#include <vector>
#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
-#include "base/synchronization/lock.h"
#include "media/base/decryptor.h"
#include "media/base/media_keys.h"
-class GURL;
-
-namespace blink {
#if defined(ENABLE_PEPPER_CDMS)
-class WebFrame;
-class WebMediaPlayerClient;
-#endif // defined(ENABLE_PEPPER_CDMS)
-}
+#include "content/renderer/media/crypto/pepper_cdm_wrapper.h"
+#endif
+
+class GURL;
namespace content {
-#if defined(OS_ANDROID)
-class RendererMediaPlayerManager;
-#endif // defined(OS_ANDROID)
+#if defined(ENABLE_BROWSER_CDMS)
+class RendererCdmManager;
+#endif // defined(ENABLE_BROWSER_CDMS)
// ProxyDecryptor is for EME v0.1b only. It should not be used for the WD API.
// A decryptor proxy that creates a real decryptor object on demand and
// forwards decryptor calls to it.
//
-// Now that the Pepper API calls use session ID to match responses with
-// requests, this class maintains a mapping between session ID and web session
-// ID. Callers of this class expect web session IDs in the responses.
-// Session IDs are internal unique references to the session. Web session IDs
-// are the CDM generated ID for the session, and are what are visible to users.
-//
// TODO(xhwang): Currently we don't support run-time switching among decryptor
// objects. Fix this when needed.
// TODO(xhwang): The ProxyDecryptor is not a Decryptor. Find a better name!
@@ -51,32 +41,35 @@ class ProxyDecryptor {
typedef base::Callback<void(const std::string& session_id)> KeyAddedCB;
typedef base::Callback<void(const std::string& session_id,
media::MediaKeys::KeyError error_code,
- int system_code)> KeyErrorCB;
+ uint32 system_code)> KeyErrorCB;
typedef base::Callback<void(const std::string& session_id,
const std::vector<uint8>& message,
- const std::string& default_url)> KeyMessageCB;
+ const GURL& destination_url)> KeyMessageCB;
ProxyDecryptor(
#if defined(ENABLE_PEPPER_CDMS)
- blink::WebMediaPlayerClient* web_media_player_client,
- blink::WebFrame* web_frame,
-#elif defined(OS_ANDROID)
- RendererMediaPlayerManager* manager,
- int media_keys_id,
+ const CreatePepperCdmCB& create_pepper_cdm_cb,
+#elif defined(ENABLE_BROWSER_CDMS)
+ RendererCdmManager* manager,
#endif // defined(ENABLE_PEPPER_CDMS)
const KeyAddedCB& key_added_cb,
const KeyErrorCB& key_error_cb,
const KeyMessageCB& key_message_cb);
virtual ~ProxyDecryptor();
- // Only call this once.
- bool InitializeCDM(const std::string& key_system, const GURL& frame_url);
+ // Returns the Decryptor associated with this object. May be NULL if no
+ // Decryptor is associated.
+ media::Decryptor* GetDecryptor();
- // Requests the ProxyDecryptor to notify the decryptor when it's ready through
- // the |decryptor_ready_cb| provided.
- // If |decryptor_ready_cb| is null, the existing callback will be fired with
- // NULL immediately and reset.
- void SetDecryptorReadyCB(const media::DecryptorReadyCB& decryptor_ready_cb);
+#if defined(ENABLE_BROWSER_CDMS)
+ // Returns the CDM ID associated with this object. May be kInvalidCdmId if no
+ // CDM ID is associated, such as when Clear Key is used.
+ int GetCdmId();
+#endif
+
+ // Only call this once.
+ bool InitializeCDM(const std::string& key_system,
+ const GURL& security_origin);
// May only be called after InitializeCDM() succeeds.
bool GenerateKeyRequest(const std::string& type,
@@ -88,48 +81,33 @@ class ProxyDecryptor {
void CancelKeyRequest(const std::string& session_id);
private:
- // Session_id <-> web_session_id map.
- typedef std::map<uint32, std::string> SessionIdMap;
-
// Helper function to create MediaKeys to handle the given |key_system|.
scoped_ptr<media::MediaKeys> CreateMediaKeys(const std::string& key_system,
- const GURL& frame_url);
+ const GURL& security_origin);
// Callbacks for firing session events.
- void OnSessionCreated(uint32 session_id, const std::string& web_session_id);
- void OnSessionMessage(uint32 session_id,
+ void OnSessionMessage(const std::string& web_session_id,
const std::vector<uint8>& message,
- const std::string& default_url);
- void OnSessionReady(uint32 session_id);
- void OnSessionClosed(uint32 session_id);
- void OnSessionError(uint32 session_id,
- media::MediaKeys::KeyError error_code,
- int system_code);
-
- // Helper function to determine session_id for the provided |web_session_id|.
- uint32 LookupSessionId(const std::string& web_session_id);
+ const GURL& default_url);
+ void OnSessionReady(const std::string& web_session_id);
+ void OnSessionClosed(const std::string& web_session_id);
+ void OnSessionError(const std::string& web_session_id,
+ media::MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_message);
- // Helper function to determine web_session_id for the provided |session_id|.
- // The returned web_session_id is only valid on the main thread, and should be
- // stored by copy.
- const std::string& LookupWebSessionId(uint32 session_id);
-
- base::WeakPtrFactory<ProxyDecryptor> weak_ptr_factory_;
+ // Called when a session is actually created or loaded.
+ void SetSessionId(bool persistent, const std::string& web_session_id);
#if defined(ENABLE_PEPPER_CDMS)
- // Callback for cleaning up a Pepper-based CDM.
- void DestroyHelperPlugin();
-
- // Needed to create the PpapiDecryptor.
- blink::WebMediaPlayerClient* web_media_player_client_;
- blink::WebFrame* web_frame_;
-#elif defined(OS_ANDROID)
- RendererMediaPlayerManager* manager_;
- int media_keys_id_;
+ // Callback to create the Pepper plugin.
+ CreatePepperCdmCB create_pepper_cdm_cb_;
+#elif defined(ENABLE_BROWSER_CDMS)
+ RendererCdmManager* manager_;
+ int cdm_id_;
#endif // defined(ENABLE_PEPPER_CDMS)
// The real MediaKeys that manages key operations for the ProxyDecryptor.
- // This pointer is protected by the |lock_|.
scoped_ptr<media::MediaKeys> media_keys_;
// Callbacks for firing key events.
@@ -137,21 +115,14 @@ class ProxyDecryptor {
KeyErrorCB key_error_cb_;
KeyMessageCB key_message_cb_;
- // Protects the |decryptor_|. Note that |decryptor_| itself should be thread
- // safe as per the Decryptor interface.
- base::Lock lock_;
-
- media::DecryptorReadyCB decryptor_ready_cb_;
-
- // Session IDs are used to uniquely track sessions so that CDM callbacks
- // can get mapped to the correct session ID. Session ID should be unique
- // per renderer process for debugging purposes.
- static uint32 next_session_id_;
-
- SessionIdMap sessions_;
+ // Keep track of both persistent and non-persistent sessions.
+ base::hash_map<std::string, bool> active_sessions_;
bool is_clear_key_;
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<ProxyDecryptor> weak_ptr_factory_;
+
DISALLOW_COPY_AND_ASSIGN(ProxyDecryptor);
};
diff --git a/chromium/content/renderer/media/crypto/proxy_media_keys.cc b/chromium/content/renderer/media/crypto/proxy_media_keys.cc
new file mode 100644
index 00000000000..68a68b3bc37
--- /dev/null
+++ b/chromium/content/renderer/media/crypto/proxy_media_keys.cc
@@ -0,0 +1,273 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/crypto/proxy_media_keys.h"
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/stl_util.h"
+#include "content/renderer/media/crypto/key_systems.h"
+#include "content/renderer/media/crypto/renderer_cdm_manager.h"
+#include "media/base/cdm_promise.h"
+
+namespace content {
+
+scoped_ptr<ProxyMediaKeys> ProxyMediaKeys::Create(
+ const std::string& key_system,
+ const GURL& security_origin,
+ RendererCdmManager* manager,
+ const media::SessionMessageCB& session_message_cb,
+ const media::SessionReadyCB& session_ready_cb,
+ const media::SessionClosedCB& session_closed_cb,
+ const media::SessionErrorCB& session_error_cb) {
+ DCHECK(manager);
+ scoped_ptr<ProxyMediaKeys> proxy_media_keys(
+ new ProxyMediaKeys(manager,
+ session_message_cb,
+ session_ready_cb,
+ session_closed_cb,
+ session_error_cb));
+ proxy_media_keys->InitializeCdm(key_system, security_origin);
+ return proxy_media_keys.Pass();
+}
+
+ProxyMediaKeys::~ProxyMediaKeys() {
+ manager_->DestroyCdm(cdm_id_);
+ manager_->UnregisterMediaKeys(cdm_id_);
+
+ // Reject any outstanding promises.
+ for (PromiseMap::iterator it = session_id_to_promise_map_.begin();
+ it != session_id_to_promise_map_.end();
+ ++it) {
+ it->second->reject(
+ media::MediaKeys::NOT_SUPPORTED_ERROR, 0, "The operation was aborted.");
+ }
+ session_id_to_promise_map_.clear();
+}
+
+void ProxyMediaKeys::CreateSession(
+ const std::string& init_data_type,
+ const uint8* init_data,
+ int init_data_length,
+ SessionType session_type,
+ scoped_ptr<media::NewSessionCdmPromise> promise) {
+ // TODO(xhwang): Move these checks up to blink and DCHECK here.
+ // See http://crbug.com/342510
+ CdmHostMsg_CreateSession_ContentType create_session_content_type;
+ if (init_data_type == "audio/mp4" || init_data_type == "video/mp4") {
+ create_session_content_type = CREATE_SESSION_TYPE_MP4;
+ } else if (init_data_type == "audio/webm" || init_data_type == "video/webm") {
+ create_session_content_type = CREATE_SESSION_TYPE_WEBM;
+ } else {
+ DLOG(ERROR) << "Unsupported EME CreateSession content type of "
+ << init_data_type;
+ promise->reject(
+ NOT_SUPPORTED_ERROR,
+ 0,
+ "Unsupported EME CreateSession init data type of " + init_data_type);
+ return;
+ }
+
+ uint32 session_id = CreateSessionId();
+ SavePromise(session_id, promise.PassAs<media::CdmPromise>());
+ manager_->CreateSession(
+ cdm_id_,
+ session_id,
+ create_session_content_type,
+ std::vector<uint8>(init_data, init_data + init_data_length));
+}
+
+void ProxyMediaKeys::LoadSession(
+ const std::string& web_session_id,
+ scoped_ptr<media::NewSessionCdmPromise> promise) {
+ // TODO(xhwang): Check key system and platform support for LoadSession in
+ // blink and add NOTREACHED() here.
+ DLOG(ERROR) << "ProxyMediaKeys doesn't support session loading.";
+ promise->reject(NOT_SUPPORTED_ERROR, 0, "LoadSession() is not supported.");
+}
+
+void ProxyMediaKeys::UpdateSession(
+ const std::string& web_session_id,
+ const uint8* response,
+ int response_length,
+ scoped_ptr<media::SimpleCdmPromise> promise) {
+ uint32 session_id = LookupSessionId(web_session_id);
+ if (!session_id) {
+ promise->reject(INVALID_ACCESS_ERROR, 0, "Session does not exist.");
+ return;
+ }
+
+ SavePromise(session_id, promise.PassAs<media::CdmPromise>());
+ manager_->UpdateSession(
+ cdm_id_,
+ session_id,
+ std::vector<uint8>(response, response + response_length));
+}
+
+void ProxyMediaKeys::ReleaseSession(
+ const std::string& web_session_id,
+ scoped_ptr<media::SimpleCdmPromise> promise) {
+ uint32 session_id = LookupSessionId(web_session_id);
+ if (!session_id) {
+ promise->reject(INVALID_ACCESS_ERROR, 0, "Session does not exist.");
+ return;
+ }
+
+ SavePromise(session_id, promise.PassAs<media::CdmPromise>());
+ manager_->ReleaseSession(cdm_id_, session_id);
+}
+
+void ProxyMediaKeys::OnSessionCreated(uint32 session_id,
+ const std::string& web_session_id) {
+ AssignWebSessionId(session_id, web_session_id);
+ scoped_ptr<media::CdmPromise> promise = TakePromise(session_id);
+ if (promise) {
+ media::NewSessionCdmPromise* session_promise(
+ static_cast<media::NewSessionCdmPromise*>(promise.get()));
+ session_promise->resolve(web_session_id);
+ }
+}
+
+void ProxyMediaKeys::OnSessionMessage(uint32 session_id,
+ const std::vector<uint8>& message,
+ const GURL& destination_url) {
+ session_message_cb_.Run(
+ LookupWebSessionId(session_id), message, destination_url);
+}
+
+void ProxyMediaKeys::OnSessionReady(uint32 session_id) {
+ scoped_ptr<media::CdmPromise> promise = TakePromise(session_id);
+ if (promise) {
+ media::SimpleCdmPromise* simple_promise(
+ static_cast<media::SimpleCdmPromise*>(promise.get()));
+ simple_promise->resolve();
+ } else {
+ // Still needed for keyadded.
+ const std::string web_session_id = LookupWebSessionId(session_id);
+ session_ready_cb_.Run(web_session_id);
+ }
+}
+
+void ProxyMediaKeys::OnSessionClosed(uint32 session_id) {
+ const std::string web_session_id = LookupWebSessionId(session_id);
+ DropWebSessionId(web_session_id);
+ scoped_ptr<media::CdmPromise> promise = TakePromise(session_id);
+ if (promise) {
+ media::SimpleCdmPromise* simple_promise(
+ static_cast<media::SimpleCdmPromise*>(promise.get()));
+ simple_promise->resolve();
+ } else {
+ // It is possible for the CDM to close a session independent of a
+ // Release() request.
+ session_closed_cb_.Run(web_session_id);
+ }
+}
+
+void ProxyMediaKeys::OnSessionError(uint32 session_id,
+ media::MediaKeys::KeyError error_code,
+ uint32 system_code) {
+ const std::string web_session_id = LookupWebSessionId(session_id);
+ media::MediaKeys::Exception exception_code;
+ switch (error_code) {
+ case media::MediaKeys::kClientError:
+ exception_code = media::MediaKeys::CLIENT_ERROR;
+ break;
+ case media::MediaKeys::kOutputError:
+ exception_code = media::MediaKeys::OUTPUT_ERROR;
+ break;
+ case media::MediaKeys::kUnknownError:
+ default:
+ exception_code = media::MediaKeys::UNKNOWN_ERROR;
+ break;
+ }
+
+ scoped_ptr<media::CdmPromise> promise = TakePromise(session_id);
+ if (promise) {
+ promise->reject(exception_code, system_code, std::string());
+ return;
+ }
+
+ // Errors generally happen in response to a request, but it is possible
+ // for something bad to happen in the CDM and it needs to tell the client.
+ session_error_cb_.Run(
+ web_session_id, exception_code, system_code, std::string());
+}
+
+int ProxyMediaKeys::GetCdmId() const {
+ return cdm_id_;
+}
+
+ProxyMediaKeys::ProxyMediaKeys(
+ RendererCdmManager* manager,
+ const media::SessionMessageCB& session_message_cb,
+ const media::SessionReadyCB& session_ready_cb,
+ const media::SessionClosedCB& session_closed_cb,
+ const media::SessionErrorCB& session_error_cb)
+ : manager_(manager),
+ session_message_cb_(session_message_cb),
+ session_ready_cb_(session_ready_cb),
+ session_closed_cb_(session_closed_cb),
+ session_error_cb_(session_error_cb),
+ next_session_id_(1) {
+ cdm_id_ = manager->RegisterMediaKeys(this);
+}
+
+void ProxyMediaKeys::InitializeCdm(const std::string& key_system,
+ const GURL& security_origin) {
+ manager_->InitializeCdm(cdm_id_, this, key_system, security_origin);
+}
+
+uint32_t ProxyMediaKeys::CreateSessionId() {
+ return next_session_id_++;
+}
+
+void ProxyMediaKeys::AssignWebSessionId(uint32_t session_id,
+ const std::string& web_session_id) {
+ DCHECK(!ContainsKey(web_session_to_session_id_map_, web_session_id));
+ DCHECK(session_id);
+ web_session_to_session_id_map_.insert(
+ std::make_pair(web_session_id, session_id));
+}
+
+uint32_t ProxyMediaKeys::LookupSessionId(
+ const std::string& web_session_id) const {
+ SessionIdMap::const_iterator it =
+ web_session_to_session_id_map_.find(web_session_id);
+ return (it != web_session_to_session_id_map_.end()) ? it->second : 0;
+}
+
+std::string ProxyMediaKeys::LookupWebSessionId(uint32_t session_id) const {
+ for (SessionIdMap::const_iterator it = web_session_to_session_id_map_.begin();
+ it != web_session_to_session_id_map_.end();
+ ++it) {
+ if (it->second == session_id)
+ return it->first;
+ }
+ // Possible to get an error creating a session, so no |web_session_id|
+ // available.
+ return std::string();
+}
+
+void ProxyMediaKeys::DropWebSessionId(const std::string& web_session_id) {
+ web_session_to_session_id_map_.erase(web_session_id);
+}
+
+void ProxyMediaKeys::SavePromise(uint32_t session_id,
+ scoped_ptr<media::CdmPromise> promise) {
+ // Should only be one promise outstanding for any |session_id|.
+ DCHECK(!ContainsKey(session_id_to_promise_map_, session_id));
+ session_id_to_promise_map_.add(session_id, promise.Pass());
+}
+
+scoped_ptr<media::CdmPromise> ProxyMediaKeys::TakePromise(uint32_t session_id) {
+ PromiseMap::iterator it = session_id_to_promise_map_.find(session_id);
+ // May not be a promise associated with this session for asynchronous events.
+ if (it == session_id_to_promise_map_.end())
+ return scoped_ptr<media::CdmPromise>();
+ return session_id_to_promise_map_.take_and_erase(it);
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/crypto/proxy_media_keys.h b/chromium/content/renderer/media/crypto/proxy_media_keys.h
new file mode 100644
index 00000000000..4ebd4c120f0
--- /dev/null
+++ b/chromium/content/renderer/media/crypto/proxy_media_keys.h
@@ -0,0 +1,127 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_CRYPTO_PROXY_MEDIA_KEYS_H_
+#define CONTENT_RENDERER_MEDIA_CRYPTO_PROXY_MEDIA_KEYS_H_
+
+#include <map>
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/containers/scoped_ptr_hash_map.h"
+#include "media/base/cdm_promise.h"
+#include "media/base/media_keys.h"
+
+class GURL;
+
+namespace content {
+
+class RendererCdmManager;
+
+// A MediaKeys proxy that wraps the EME part of RendererCdmManager.
+class ProxyMediaKeys : public media::MediaKeys {
+ public:
+ static scoped_ptr<ProxyMediaKeys> Create(
+ const std::string& key_system,
+ const GURL& security_origin,
+ RendererCdmManager* manager,
+ const media::SessionMessageCB& session_message_cb,
+ const media::SessionReadyCB& session_ready_cb,
+ const media::SessionClosedCB& session_closed_cb,
+ const media::SessionErrorCB& session_error_cb);
+
+ virtual ~ProxyMediaKeys();
+
+ // MediaKeys implementation.
+ virtual void CreateSession(
+ const std::string& init_data_type,
+ const uint8* init_data,
+ int init_data_length,
+ SessionType session_type,
+ scoped_ptr<media::NewSessionCdmPromise> promise) OVERRIDE;
+ virtual void LoadSession(
+ const std::string& web_session_id,
+ scoped_ptr<media::NewSessionCdmPromise> promise) OVERRIDE;
+ virtual void UpdateSession(
+ const std::string& web_session_id,
+ const uint8* response,
+ int response_length,
+ scoped_ptr<media::SimpleCdmPromise> promise) OVERRIDE;
+ virtual void ReleaseSession(
+ const std::string& web_session_id,
+ scoped_ptr<media::SimpleCdmPromise> promise) OVERRIDE;
+
+ // Callbacks.
+ void OnSessionCreated(uint32 session_id, const std::string& web_session_id);
+ void OnSessionMessage(uint32 session_id,
+ const std::vector<uint8>& message,
+ const GURL& destination_url);
+ void OnSessionReady(uint32 session_id);
+ void OnSessionClosed(uint32 session_id);
+ void OnSessionError(uint32 session_id,
+ media::MediaKeys::KeyError error_code,
+ uint32 system_code);
+
+ int GetCdmId() const;
+
+ private:
+ // The Android-specific code that handles sessions uses integer session ids
+ // (basically a reference id), but media::MediaKeys bases everything on
+ // web_session_id (a string representing the actual session id as generated
+ // by the CDM). SessionIdMap is used to map between the web_session_id and
+ // the session_id used by the Android-specific code.
+ typedef base::hash_map<std::string, uint32_t> SessionIdMap;
+
+ // The following types keep track of Promises. The index is the
+ // Android-specific session_id, so that returning results can be matched to
+ // the corresponding promise.
+ typedef base::ScopedPtrHashMap<uint32_t, media::CdmPromise> PromiseMap;
+
+ ProxyMediaKeys(RendererCdmManager* manager,
+ const media::SessionMessageCB& session_message_cb,
+ const media::SessionReadyCB& session_ready_cb,
+ const media::SessionClosedCB& session_closed_cb,
+ const media::SessionErrorCB& session_error_cb);
+
+ void InitializeCdm(const std::string& key_system,
+ const GURL& security_origin);
+
+ // These functions keep track of Android-specific session_ids <->
+ // web_session_ids mappings.
+ // TODO(jrummell): Remove this once the Android-specific code changes to
+ // support string web session ids.
+ uint32_t CreateSessionId();
+ void AssignWebSessionId(uint32_t session_id,
+ const std::string& web_session_id);
+ uint32_t LookupSessionId(const std::string& web_session_id) const;
+ std::string LookupWebSessionId(uint32_t session_id) const;
+ void DropWebSessionId(const std::string& web_session_id);
+
+ // Helper function to keep track of promises. Adding takes ownership of the
+ // promise, transferred back to caller on take.
+ void SavePromise(uint32_t session_id, scoped_ptr<media::CdmPromise> promise);
+ scoped_ptr<media::CdmPromise> TakePromise(uint32_t session_id);
+
+ RendererCdmManager* manager_;
+ int cdm_id_;
+
+ media::SessionMessageCB session_message_cb_;
+ media::SessionReadyCB session_ready_cb_;
+ media::SessionClosedCB session_closed_cb_;
+ media::SessionErrorCB session_error_cb_;
+
+ // Android-specific. See comment above CreateSessionId().
+ uint32_t next_session_id_;
+ SessionIdMap web_session_to_session_id_map_;
+
+ // Keep track of outstanding promises. This map owns the promise object.
+ PromiseMap session_id_to_promise_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(ProxyMediaKeys);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_CRYPTO_PROXY_MEDIA_KEYS_H_
diff --git a/chromium/content/renderer/media/crypto/renderer_cdm_manager.cc b/chromium/content/renderer/media/crypto/renderer_cdm_manager.cc
new file mode 100644
index 00000000000..ba2e8783756
--- /dev/null
+++ b/chromium/content/renderer/media/crypto/renderer_cdm_manager.cc
@@ -0,0 +1,147 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/crypto/renderer_cdm_manager.h"
+
+#include "base/stl_util.h"
+#include "content/common/media/cdm_messages.h"
+#include "content/renderer/media/crypto/proxy_media_keys.h"
+
+namespace content {
+
+// Maximum sizes for various EME API parameters. These are checks to prevent
+// unnecessarily large messages from being passed around, and the sizes
+// are somewhat arbitrary as the EME spec doesn't specify any limits.
+const size_t kMaxWebSessionIdLength = 512;
+const size_t kMaxSessionMessageLength = 10240; // 10 KB
+
+RendererCdmManager::RendererCdmManager(RenderFrame* render_frame)
+ : RenderFrameObserver(render_frame),
+ next_cdm_id_(kInvalidCdmId + 1) {
+}
+
+RendererCdmManager::~RendererCdmManager() {
+ DCHECK(proxy_media_keys_map_.empty())
+ << "RendererCdmManager is owned by RenderFrameImpl and is destroyed only "
+ "after all ProxyMediaKeys are destroyed and unregistered.";
+}
+
+bool RendererCdmManager::OnMessageReceived(const IPC::Message& msg) {
+ bool handled = true;
+ IPC_BEGIN_MESSAGE_MAP(RendererCdmManager, msg)
+ IPC_MESSAGE_HANDLER(CdmMsg_SessionCreated, OnSessionCreated)
+ IPC_MESSAGE_HANDLER(CdmMsg_SessionMessage, OnSessionMessage)
+ IPC_MESSAGE_HANDLER(CdmMsg_SessionReady, OnSessionReady)
+ IPC_MESSAGE_HANDLER(CdmMsg_SessionClosed, OnSessionClosed)
+ IPC_MESSAGE_HANDLER(CdmMsg_SessionError, OnSessionError)
+ IPC_MESSAGE_UNHANDLED(handled = false)
+ IPC_END_MESSAGE_MAP()
+ return handled;
+}
+
+void RendererCdmManager::InitializeCdm(int cdm_id,
+ ProxyMediaKeys* media_keys,
+ const std::string& key_system,
+ const GURL& security_origin) {
+ DCHECK(GetMediaKeys(cdm_id)) << "|cdm_id| not registered.";
+ Send(new CdmHostMsg_InitializeCdm(
+ routing_id(), cdm_id, key_system, security_origin));
+}
+
+void RendererCdmManager::CreateSession(
+ int cdm_id,
+ uint32 session_id,
+ CdmHostMsg_CreateSession_ContentType content_type,
+ const std::vector<uint8>& init_data) {
+ DCHECK(GetMediaKeys(cdm_id)) << "|cdm_id| not registered.";
+ Send(new CdmHostMsg_CreateSession(
+ routing_id(), cdm_id, session_id, content_type, init_data));
+}
+
+void RendererCdmManager::UpdateSession(int cdm_id,
+ uint32 session_id,
+ const std::vector<uint8>& response) {
+ DCHECK(GetMediaKeys(cdm_id)) << "|cdm_id| not registered.";
+ Send(
+ new CdmHostMsg_UpdateSession(routing_id(), cdm_id, session_id, response));
+}
+
+void RendererCdmManager::ReleaseSession(int cdm_id, uint32 session_id) {
+ DCHECK(GetMediaKeys(cdm_id)) << "|cdm_id| not registered.";
+ Send(new CdmHostMsg_ReleaseSession(routing_id(), cdm_id, session_id));
+}
+
+void RendererCdmManager::DestroyCdm(int cdm_id) {
+ DCHECK(GetMediaKeys(cdm_id)) << "|cdm_id| not registered.";
+ Send(new CdmHostMsg_DestroyCdm(routing_id(), cdm_id));
+}
+
+void RendererCdmManager::OnSessionCreated(int cdm_id,
+ uint32 session_id,
+ const std::string& web_session_id) {
+ if (web_session_id.length() > kMaxWebSessionIdLength) {
+ OnSessionError(cdm_id, session_id, media::MediaKeys::kUnknownError, 0);
+ return;
+ }
+
+ ProxyMediaKeys* media_keys = GetMediaKeys(cdm_id);
+ if (media_keys)
+ media_keys->OnSessionCreated(session_id, web_session_id);
+}
+
+void RendererCdmManager::OnSessionMessage(int cdm_id,
+ uint32 session_id,
+ const std::vector<uint8>& message,
+ const GURL& destination_url) {
+ if (message.size() > kMaxSessionMessageLength) {
+ OnSessionError(cdm_id, session_id, media::MediaKeys::kUnknownError, 0);
+ return;
+ }
+
+ ProxyMediaKeys* media_keys = GetMediaKeys(cdm_id);
+ if (media_keys)
+ media_keys->OnSessionMessage(session_id, message, destination_url);
+}
+
+void RendererCdmManager::OnSessionReady(int cdm_id, uint32 session_id) {
+ ProxyMediaKeys* media_keys = GetMediaKeys(cdm_id);
+ if (media_keys)
+ media_keys->OnSessionReady(session_id);
+}
+
+void RendererCdmManager::OnSessionClosed(int cdm_id, uint32 session_id) {
+ ProxyMediaKeys* media_keys = GetMediaKeys(cdm_id);
+ if (media_keys)
+ media_keys->OnSessionClosed(session_id);
+}
+
+void RendererCdmManager::OnSessionError(int cdm_id,
+ uint32 session_id,
+ media::MediaKeys::KeyError error_code,
+ uint32 system_code) {
+ ProxyMediaKeys* media_keys = GetMediaKeys(cdm_id);
+ if (media_keys)
+ media_keys->OnSessionError(session_id, error_code, system_code);
+}
+
+int RendererCdmManager::RegisterMediaKeys(ProxyMediaKeys* media_keys) {
+ int cdm_id = next_cdm_id_++;
+ DCHECK_NE(cdm_id, kInvalidCdmId);
+ DCHECK(!ContainsKey(proxy_media_keys_map_, cdm_id));
+ proxy_media_keys_map_[cdm_id] = media_keys;
+ return cdm_id;
+}
+
+void RendererCdmManager::UnregisterMediaKeys(int cdm_id) {
+ DCHECK(ContainsKey(proxy_media_keys_map_, cdm_id));
+ proxy_media_keys_map_.erase(cdm_id);
+}
+
+ProxyMediaKeys* RendererCdmManager::GetMediaKeys(int cdm_id) {
+ std::map<int, ProxyMediaKeys*>::iterator iter =
+ proxy_media_keys_map_.find(cdm_id);
+ return (iter != proxy_media_keys_map_.end()) ? iter->second : NULL;
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/crypto/renderer_cdm_manager.h b/chromium/content/renderer/media/crypto/renderer_cdm_manager.h
new file mode 100644
index 00000000000..a5ff82fc213
--- /dev/null
+++ b/chromium/content/renderer/media/crypto/renderer_cdm_manager.h
@@ -0,0 +1,90 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_CRYPTO_RENDERER_CDM_MANAGER_H_
+#define CONTENT_RENDERER_MEDIA_CRYPTO_RENDERER_CDM_MANAGER_H_
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "content/common/media/cdm_messages_enums.h"
+#include "content/public/renderer/render_frame_observer.h"
+#include "media/base/media_keys.h"
+#include "url/gurl.h"
+
+namespace blink {
+class WebFrame;
+}
+
+namespace content {
+
+class ProxyMediaKeys;
+
+// Class for managing all the CDM objects in the same RenderFrame.
+class RendererCdmManager : public RenderFrameObserver {
+ public:
+ static const int kInvalidCdmId = 0;
+
+ // Constructs a RendererCdmManager object for the |render_frame|.
+ explicit RendererCdmManager(RenderFrame* render_frame);
+ virtual ~RendererCdmManager();
+
+ // RenderFrameObserver overrides.
+ virtual bool OnMessageReceived(const IPC::Message& msg) OVERRIDE;
+
+ // Encrypted media related methods.
+ void InitializeCdm(int cdm_id,
+ ProxyMediaKeys* media_keys,
+ const std::string& key_system,
+ const GURL& security_origin);
+ void CreateSession(int cdm_id,
+ uint32 session_id,
+ CdmHostMsg_CreateSession_ContentType conent_type,
+ const std::vector<uint8>& init_data);
+ void UpdateSession(int cdm_id,
+ uint32 session_id,
+ const std::vector<uint8>& response);
+ void ReleaseSession(int cdm_id, uint32 session_id);
+ void DestroyCdm(int cdm_id);
+
+ // Registers a ProxyMediaKeys object. Returns allocated CDM ID.
+ int RegisterMediaKeys(ProxyMediaKeys* media_keys);
+
+ // Unregisters a ProxyMediaKeys object identified by |cdm_id|.
+ void UnregisterMediaKeys(int cdm_id);
+
+ private:
+ // Gets the pointer to ProxyMediaKeys given the |cdm_id|.
+ ProxyMediaKeys* GetMediaKeys(int cdm_id);
+
+ // Message handlers.
+ void OnSessionCreated(int cdm_id,
+ uint32 session_id,
+ const std::string& web_session_id);
+ void OnSessionMessage(int cdm_id,
+ uint32 session_id,
+ const std::vector<uint8>& message,
+ const GURL& destination_url);
+ void OnSessionReady(int cdm_id, uint32 session_id);
+ void OnSessionClosed(int cdm_id, uint32 session_id);
+ void OnSessionError(int cdm_id,
+ uint32 session_id,
+ media::MediaKeys::KeyError error_code,
+ uint32 system_code);
+
+ // CDM ID should be unique per renderer frame.
+ // TODO(xhwang): Use uint32 to prevent undefined overflow behavior.
+ int next_cdm_id_;
+
+ // CDM ID to ProxyMediaKeys mapping.
+ std::map<int, ProxyMediaKeys*> proxy_media_keys_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(RendererCdmManager);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_CRYPTO_RENDERER_CDM_MANAGER_H_
diff --git a/chromium/content/renderer/media/media_stream.cc b/chromium/content/renderer/media/media_stream.cc
new file mode 100644
index 00000000000..b555a75f2d3
--- /dev/null
+++ b/chromium/content/renderer/media/media_stream.cc
@@ -0,0 +1,81 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/media_stream.h"
+
+#include "base/logging.h"
+#include "third_party/WebKit/public/platform/WebString.h"
+#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
+
+namespace content {
+
+// static
+MediaStream* MediaStream::GetMediaStream(
+ const blink::WebMediaStream& stream) {
+ return static_cast<MediaStream*>(stream.extraData());
+}
+
+// static
+webrtc::MediaStreamInterface* MediaStream::GetAdapter(
+ const blink::WebMediaStream& stream) {
+ MediaStream* native_stream = GetMediaStream(stream);
+ DCHECK(native_stream);
+ return native_stream->GetWebRtcAdapter(stream);
+}
+
+MediaStream::MediaStream(const blink::WebMediaStream& stream)
+ : is_local_(true),
+ webrtc_media_stream_(NULL) {
+}
+
+MediaStream::MediaStream(webrtc::MediaStreamInterface* webrtc_stream)
+ : is_local_(false),
+ webrtc_media_stream_(webrtc_stream) {
+}
+
+MediaStream::~MediaStream() {
+ DCHECK(observers_.empty());
+}
+
+webrtc::MediaStreamInterface* MediaStream::GetWebRtcAdapter(
+ const blink::WebMediaStream& stream) {
+ DCHECK(webrtc_media_stream_);
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return webrtc_media_stream_.get();
+}
+
+void MediaStream::AddObserver(MediaStreamObserver* observer) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(std::find(observers_.begin(), observers_.end(), observer) ==
+ observers_.end());
+ observers_.push_back(observer);
+}
+
+void MediaStream::RemoveObserver(MediaStreamObserver* observer) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ std::vector<MediaStreamObserver*>::iterator it =
+ std::find(observers_.begin(), observers_.end(), observer);
+ DCHECK(it != observers_.end());
+ observers_.erase(it);
+}
+
+bool MediaStream::AddTrack(const blink::WebMediaStreamTrack& track) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ for (std::vector<MediaStreamObserver*>::iterator it = observers_.begin();
+ it != observers_.end(); ++it) {
+ (*it)->TrackAdded(track);
+ }
+ return true;
+}
+
+bool MediaStream::RemoveTrack(const blink::WebMediaStreamTrack& track) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ for (std::vector<MediaStreamObserver*>::iterator it = observers_.begin();
+ it != observers_.end(); ++it) {
+ (*it)->TrackRemoved(track);
+ }
+ return true;
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/media_stream.h b/chromium/content/renderer/media/media_stream.h
new file mode 100644
index 00000000000..6a43cfc4cdd
--- /dev/null
+++ b/chromium/content/renderer/media/media_stream.h
@@ -0,0 +1,96 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_MEDIA_STREAM_H_
+#define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_H_
+
+#include <string>
+#include <vector>
+
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/threading/thread_checker.h"
+#include "content/common/content_export.h"
+#include "third_party/WebKit/public/platform/WebMediaStream.h"
+
+namespace webrtc {
+class MediaStreamInterface;
+}
+
+namespace content {
+
+// MediaStreamObserver can be used to get notifications of when a track is
+// added or removed from a MediaStream.
+class MediaStreamObserver {
+ public:
+ // TrackAdded is called |track| is added to the observed MediaStream.
+ virtual void TrackAdded(const blink::WebMediaStreamTrack& track) = 0;
+ // TrackRemoved is called |track| is added to the observed MediaStream.
+ virtual void TrackRemoved(const blink::WebMediaStreamTrack& track) = 0;
+
+ protected:
+ virtual ~MediaStreamObserver() {}
+};
+
+// MediaStream is the Chrome representation of blink::WebMediaStream.
+// It is owned by blink::WebMediaStream as blink::WebMediaStream::ExtraData.
+// Its lifetime is the same as the blink::WebMediaStream instance it belongs to.
+class CONTENT_EXPORT MediaStream
+ : NON_EXPORTED_BASE(public blink::WebMediaStream::ExtraData) {
+ public:
+ // Constructor for local MediaStreams.
+ MediaStream(const blink::WebMediaStream& stream);
+
+ // Constructor for remote MediaStreams.
+ // TODO(xians): Remove once the audio renderer don't separate between local
+ // and remotely generated streams.
+ explicit MediaStream(webrtc::MediaStreamInterface* webrtc_stream);
+
+ virtual ~MediaStream();
+
+ // Returns an instance of MediaStream. This method will never return NULL.
+ static MediaStream* GetMediaStream(
+ const blink::WebMediaStream& stream);
+
+ // Returns a libjingle representation of a remote MediaStream.
+ // TODO(xians): Remove once the audio renderer don't separate between local
+ // and remotely generated streams.
+ static webrtc::MediaStreamInterface* GetAdapter(
+ const blink::WebMediaStream& stream);
+
+ // Adds an observer to this MediaStream. Its the callers responsibility to
+ // remove the observer before the destruction of the MediaStream.
+ void AddObserver(MediaStreamObserver* observer);
+ void RemoveObserver(MediaStreamObserver* observer);
+
+ // TODO(xians): Remove |is_local| once AudioTracks can be rendered the same
+ // way regardless if they are local or remote.
+ bool is_local() const { return is_local_; }
+
+ // Called by MediaStreamCenter when a track has been added to a stream stream.
+ bool AddTrack(const blink::WebMediaStreamTrack& track);
+
+ // Called by MediaStreamCenter when a track has been removed from |stream|.
+ bool RemoveTrack(const blink::WebMediaStreamTrack& track);
+
+ protected:
+ virtual webrtc::MediaStreamInterface* GetWebRtcAdapter(
+ const blink::WebMediaStream& stream);
+
+ private:
+ base::ThreadChecker thread_checker_;
+ const bool is_local_;
+ const std::string label_;
+ std::vector<MediaStreamObserver*> observers_;
+
+ // TODO(xians): Remove once the audio renderer don't separate between local
+ // and remotely generated streams.
+ scoped_refptr<webrtc::MediaStreamInterface> webrtc_media_stream_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaStream);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_H_
diff --git a/chromium/content/renderer/media/media_stream_audio_level_calculator.cc b/chromium/content/renderer/media/media_stream_audio_level_calculator.cc
new file mode 100644
index 00000000000..a136e32ae10
--- /dev/null
+++ b/chromium/content/renderer/media/media_stream_audio_level_calculator.cc
@@ -0,0 +1,65 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/media_stream_audio_level_calculator.h"
+
+#include "base/logging.h"
+#include "base/stl_util.h"
+
+namespace content {
+
+namespace {
+
+// Calculates the maximum absolute amplitude of the audio data.
+// Note, the return value can be bigger than std::numeric_limits<int16>::max().
+int MaxAmplitude(const int16* audio_data, int length) {
+ int max = 0, absolute = 0;
+ for (int i = 0; i < length; ++i) {
+ absolute = std::abs(audio_data[i]);
+ if (absolute > max)
+ max = absolute;
+ }
+ // The range of int16 is [-32768, 32767], verify the |max| should not be
+ // bigger than 32768.
+ DCHECK(max <= std::abs(std::numeric_limits<int16>::min()));
+
+ return max;
+}
+
+} // namespace
+
+MediaStreamAudioLevelCalculator::MediaStreamAudioLevelCalculator()
+ : counter_(0),
+ max_amplitude_(0),
+ level_(0) {
+}
+
+MediaStreamAudioLevelCalculator::~MediaStreamAudioLevelCalculator() {
+}
+
+int MediaStreamAudioLevelCalculator::Calculate(const int16* audio_data,
+ int number_of_channels,
+ int number_of_frames) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // |level_| is updated every 10 callbacks. For the case where callback comes
+ // every 10ms, |level_| will be updated approximately every 100ms.
+ static const int kUpdateFrequency = 10;
+
+ int max = MaxAmplitude(audio_data, number_of_channels * number_of_frames);
+ max_amplitude_ = std::max(max_amplitude_, max);
+
+ if (counter_++ == kUpdateFrequency) {
+ level_ = max_amplitude_;
+
+ // Decay the absolute maximum amplitude by 1/4.
+ max_amplitude_ >>= 2;
+
+ // Reset the counter.
+ counter_ = 0;
+ }
+
+ return level_;
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_audio_level_calculator.h b/chromium/content/renderer/media/media_stream_audio_level_calculator.h
new file mode 100644
index 00000000000..41c9c34f7be
--- /dev/null
+++ b/chromium/content/renderer/media/media_stream_audio_level_calculator.h
@@ -0,0 +1,41 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_LEVEL_CALCULATOR_H_
+#define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_LEVEL_CALCULATOR_H_
+
+#include "base/threading/thread_checker.h"
+
+namespace content {
+
+// This class is used by the WebRtcLocalAudioTrack to calculate the level of
+// the audio signal. And the audio level will be eventually used by the volume
+// animation UI.
+// The algorithm used by this class is the same as how it is done in
+// third_party/webrtc/voice_engine/level_indicator.cc.
+class MediaStreamAudioLevelCalculator {
+ public:
+ MediaStreamAudioLevelCalculator();
+ ~MediaStreamAudioLevelCalculator();
+
+ // Calculates the signal level of the audio data.
+ // Returns the absolute value of the amplitude of the signal.
+ int Calculate(const int16* audio_data, int number_of_channels,
+ int number_of_frames);
+
+ private:
+ // Used to DCHECK that the constructor and Calculate() are always called on
+ // the same audio thread. Note that the destructor will be called on a
+ // different thread, which can be either the main render thread or a new
+ // audio thread where WebRtcLocalAudioTrack::OnSetFormat() is called.
+ base::ThreadChecker thread_checker_;
+
+ int counter_;
+ int max_amplitude_;
+ int level_;
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_LEVEL_CALCULATOR_H_
diff --git a/chromium/content/renderer/media/media_stream_audio_processor.cc b/chromium/content/renderer/media/media_stream_audio_processor.cc
index 98439c78aef..2e7a40db18c 100644
--- a/chromium/content/renderer/media/media_stream_audio_processor.cc
+++ b/chromium/content/renderer/media/media_stream_audio_processor.cc
@@ -6,29 +6,49 @@
#include "base/command_line.h"
#include "base/debug/trace_event.h"
+#include "base/metrics/histogram.h"
#include "content/public/common/content_switches.h"
#include "content/renderer/media/media_stream_audio_processor_options.h"
+#include "content/renderer/media/rtc_media_constraints.h"
+#include "content/renderer/media/webrtc_audio_device_impl.h"
#include "media/audio/audio_parameters.h"
#include "media/base/audio_converter.h"
#include "media/base/audio_fifo.h"
#include "media/base/channel_layout.h"
+#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
+#include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface.h"
+#include "third_party/webrtc/modules/audio_processing/typing_detection.h"
namespace content {
namespace {
using webrtc::AudioProcessing;
-using webrtc::MediaConstraintsInterface;
-#if defined(ANDROID)
+#if defined(OS_ANDROID)
const int kAudioProcessingSampleRate = 16000;
#else
const int kAudioProcessingSampleRate = 32000;
#endif
-const int kAudioProcessingNumberOfChannel = 1;
+const int kAudioProcessingNumberOfChannels = 1;
+const AudioProcessing::ChannelLayout kAudioProcessingChannelLayout =
+ AudioProcessing::kMono;
const int kMaxNumberOfBuffersInFifo = 2;
+// Used by UMA histograms and entries shouldn't be re-ordered or removed.
+enum AudioTrackProcessingStates {
+ AUDIO_PROCESSING_ENABLED = 0,
+ AUDIO_PROCESSING_DISABLED,
+ AUDIO_PROCESSING_IN_WEBRTC,
+ AUDIO_PROCESSING_MAX
+};
+
+void RecordProcessingState(AudioTrackProcessingStates state) {
+ UMA_HISTOGRAM_ENUMERATION("Media.AudioTrackProcessingStates",
+ state, AUDIO_PROCESSING_MAX);
+}
+
} // namespace
class MediaStreamAudioProcessor::MediaStreamAudioConverter
@@ -39,25 +59,34 @@ class MediaStreamAudioProcessor::MediaStreamAudioConverter
: source_params_(source_params),
sink_params_(sink_params),
audio_converter_(source_params, sink_params_, false) {
+ // An instance of MediaStreamAudioConverter may be created in the main
+ // render thread and used in the audio thread, for example, the
+ // |MediaStreamAudioProcessor::capture_converter_|.
+ thread_checker_.DetachFromThread();
audio_converter_.AddInput(this);
+
// Create and initialize audio fifo and audio bus wrapper.
// The size of the FIFO should be at least twice of the source buffer size
- // or twice of the sink buffer size.
+ // or twice of the sink buffer size. Also, FIFO needs to have enough space
+ // to store pre-processed data before passing the data to
+ // webrtc::AudioProcessing, which requires 10ms as packet size.
+ int max_frame_size = std::max(source_params_.frames_per_buffer(),
+ sink_params_.frames_per_buffer());
int buffer_size = std::max(
- kMaxNumberOfBuffersInFifo * source_params_.frames_per_buffer(),
- kMaxNumberOfBuffersInFifo * sink_params_.frames_per_buffer());
+ kMaxNumberOfBuffersInFifo * max_frame_size,
+ kMaxNumberOfBuffersInFifo * source_params_.sample_rate() / 100);
fifo_.reset(new media::AudioFifo(source_params_.channels(), buffer_size));
+
// TODO(xians): Use CreateWrapper to save one memcpy.
audio_wrapper_ = media::AudioBus::Create(sink_params_.channels(),
sink_params_.frames_per_buffer());
}
virtual ~MediaStreamAudioConverter() {
- DCHECK(thread_checker_.CalledOnValidThread());
audio_converter_.RemoveInput(this);
}
- void Push(media::AudioBus* audio_source) {
+ void Push(const media::AudioBus* audio_source) {
// Called on the audio thread, which is the capture audio thread for
// |MediaStreamAudioProcessor::capture_converter_|, and render audio thread
// for |MediaStreamAudioProcessor::render_converter_|.
@@ -66,17 +95,30 @@ class MediaStreamAudioProcessor::MediaStreamAudioConverter
fifo_->Push(audio_source);
}
- bool Convert(webrtc::AudioFrame* out) {
+ bool Convert(webrtc::AudioFrame* out, bool audio_mirroring) {
// Called on the audio thread, which is the capture audio thread for
// |MediaStreamAudioProcessor::capture_converter_|, and render audio thread
// for |MediaStreamAudioProcessor::render_converter_|.
- // Return false if there is no 10ms data in the FIFO.
DCHECK(thread_checker_.CalledOnValidThread());
- if (fifo_->frames() < (source_params_.sample_rate() / 100))
+ // Return false if there is not enough data in the FIFO, this happens when
+ // fifo_->frames() / source_params_.sample_rate() is less than
+ // sink_params.frames_per_buffer() / sink_params.sample_rate().
+ if (fifo_->frames() * sink_params_.sample_rate() <
+ sink_params_.frames_per_buffer() * source_params_.sample_rate()) {
return false;
+ }
- // Convert 10ms data to the output format, this will trigger ProvideInput().
+ // Convert data to the output format, this will trigger ProvideInput().
audio_converter_.Convert(audio_wrapper_.get());
+ DCHECK_EQ(audio_wrapper_->frames(), sink_params_.frames_per_buffer());
+
+ // Swap channels before interleaving the data if |audio_mirroring| is
+ // set to true.
+ if (audio_mirroring &&
+ sink_params_.channel_layout() == media::CHANNEL_LAYOUT_STEREO) {
+ // Swap the first and second channels.
+ audio_wrapper_->SwapChannels(0, 1);
+ }
// TODO(xians): Figure out a better way to handle the interleaved and
// deinterleaved format switching.
@@ -127,155 +169,292 @@ class MediaStreamAudioProcessor::MediaStreamAudioConverter
scoped_ptr<media::AudioFifo> fifo_;
};
+bool MediaStreamAudioProcessor::IsAudioTrackProcessingEnabled() {
+ return !CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kDisableAudioTrackProcessing);
+}
+
MediaStreamAudioProcessor::MediaStreamAudioProcessor(
- const webrtc::MediaConstraintsInterface* constraints)
- : render_delay_ms_(0) {
+ const blink::WebMediaConstraints& constraints,
+ int effects,
+ WebRtcPlayoutDataSource* playout_data_source)
+ : render_delay_ms_(0),
+ playout_data_source_(playout_data_source),
+ audio_mirroring_(false),
+ typing_detected_(false),
+ stopped_(false) {
capture_thread_checker_.DetachFromThread();
render_thread_checker_.DetachFromThread();
- InitializeAudioProcessingModule(constraints);
+ InitializeAudioProcessingModule(constraints, effects);
+ if (IsAudioTrackProcessingEnabled()) {
+ aec_dump_message_filter_ = AecDumpMessageFilter::Get();
+ // In unit tests not creating a message filter, |aec_dump_message_filter_|
+ // will be NULL. We can just ignore that. Other unit tests and browser tests
+ // ensure that we do get the filter when we should.
+ if (aec_dump_message_filter_)
+ aec_dump_message_filter_->AddDelegate(this);
+ }
}
MediaStreamAudioProcessor::~MediaStreamAudioProcessor() {
DCHECK(main_thread_checker_.CalledOnValidThread());
- StopAudioProcessing();
-}
-
-void MediaStreamAudioProcessor::PushCaptureData(media::AudioBus* audio_source) {
- DCHECK(capture_thread_checker_.CalledOnValidThread());
- capture_converter_->Push(audio_source);
+ Stop();
}
-void MediaStreamAudioProcessor::PushRenderData(
- const int16* render_audio, int sample_rate, int number_of_channels,
- int number_of_frames, base::TimeDelta render_delay) {
- DCHECK(render_thread_checker_.CalledOnValidThread());
-
- // Return immediately if the echo cancellation is off.
- if (!audio_processing_ ||
- !audio_processing_->echo_cancellation()->is_enabled()) {
- return;
- }
+void MediaStreamAudioProcessor::OnCaptureFormatChanged(
+ const media::AudioParameters& source_params) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ // There is no need to hold a lock here since the caller guarantees that
+ // there is no more PushCaptureData() and ProcessAndConsumeData() callbacks
+ // on the capture thread.
+ InitializeCaptureConverter(source_params);
- TRACE_EVENT0("audio",
- "MediaStreamAudioProcessor::FeedRenderDataToAudioProcessing");
- int64 new_render_delay_ms = render_delay.InMilliseconds();
- DCHECK_LT(new_render_delay_ms,
- std::numeric_limits<base::subtle::Atomic32>::max());
- base::subtle::Release_Store(&render_delay_ms_, new_render_delay_ms);
+ // Reset the |capture_thread_checker_| since the capture data will come from
+ // a new capture thread.
+ capture_thread_checker_.DetachFromThread();
+}
- InitializeRenderConverterIfNeeded(sample_rate, number_of_channels,
- number_of_frames);
+void MediaStreamAudioProcessor::PushCaptureData(
+ const media::AudioBus* audio_source) {
+ DCHECK(capture_thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(audio_source->channels(),
+ capture_converter_->source_parameters().channels());
+ DCHECK_EQ(audio_source->frames(),
+ capture_converter_->source_parameters().frames_per_buffer());
- // TODO(xians): Avoid this extra interleave/deinterleave.
- render_data_bus_->FromInterleaved(render_audio,
- render_data_bus_->frames(),
- sizeof(render_audio[0]));
- render_converter_->Push(render_data_bus_.get());
- while (render_converter_->Convert(&render_frame_))
- audio_processing_->AnalyzeReverseStream(&render_frame_);
+ capture_converter_->Push(audio_source);
}
bool MediaStreamAudioProcessor::ProcessAndConsumeData(
base::TimeDelta capture_delay, int volume, bool key_pressed,
- int16** out) {
+ int* new_volume, int16** out) {
DCHECK(capture_thread_checker_.CalledOnValidThread());
- TRACE_EVENT0("audio",
- "MediaStreamAudioProcessor::ProcessAndConsumeData");
+ TRACE_EVENT0("audio", "MediaStreamAudioProcessor::ProcessAndConsumeData");
- if (!capture_converter_->Convert(&capture_frame_))
+ if (!capture_converter_->Convert(&capture_frame_, audio_mirroring_))
return false;
- ProcessData(&capture_frame_, capture_delay, volume, key_pressed);
+ *new_volume = ProcessData(&capture_frame_, capture_delay, volume,
+ key_pressed);
*out = capture_frame_.data_;
return true;
}
-void MediaStreamAudioProcessor::SetCaptureFormat(
- const media::AudioParameters& source_params) {
- DCHECK(capture_thread_checker_.CalledOnValidThread());
- DCHECK(source_params.IsValid());
+void MediaStreamAudioProcessor::Stop() {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ if (stopped_)
+ return;
- // Create and initialize audio converter for the source data.
- // When the webrtc AudioProcessing is enabled, the sink format of the
- // converter will be the same as the post-processed data format, which is
- // 32k mono for desktops and 16k mono for Android. When the AudioProcessing
- // is disabled, the sink format will be the same as the source format.
- const int sink_sample_rate = audio_processing_ ?
- kAudioProcessingSampleRate : source_params.sample_rate();
- const media::ChannelLayout sink_channel_layout = audio_processing_ ?
- media::CHANNEL_LAYOUT_MONO : source_params.channel_layout();
+ stopped_ = true;
- // WebRtc is using 10ms data as its native packet size.
- media::AudioParameters sink_params(
- media::AudioParameters::AUDIO_PCM_LOW_LATENCY, sink_channel_layout,
- sink_sample_rate, 16, sink_sample_rate / 100);
- capture_converter_.reset(
- new MediaStreamAudioConverter(source_params, sink_params));
+ if (aec_dump_message_filter_) {
+ aec_dump_message_filter_->RemoveDelegate(this);
+ aec_dump_message_filter_ = NULL;
+ }
+
+ if (!audio_processing_.get())
+ return;
+
+ StopEchoCancellationDump(audio_processing_.get());
+
+ if (playout_data_source_) {
+ playout_data_source_->RemovePlayoutSink(this);
+ playout_data_source_ = NULL;
+ }
+}
+
+const media::AudioParameters& MediaStreamAudioProcessor::InputFormat() const {
+ return capture_converter_->source_parameters();
}
const media::AudioParameters& MediaStreamAudioProcessor::OutputFormat() const {
return capture_converter_->sink_parameters();
}
+void MediaStreamAudioProcessor::OnAecDumpFile(
+ const IPC::PlatformFileForTransit& file_handle) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+
+ base::File file = IPC::PlatformFileForTransitToFile(file_handle);
+ DCHECK(file.IsValid());
+
+ if (audio_processing_)
+ StartEchoCancellationDump(audio_processing_.get(), file.Pass());
+ else
+ file.Close();
+}
+
+void MediaStreamAudioProcessor::OnDisableAecDump() {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ if (audio_processing_)
+ StopEchoCancellationDump(audio_processing_.get());
+}
+
+void MediaStreamAudioProcessor::OnIpcClosing() {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ aec_dump_message_filter_ = NULL;
+}
+
+void MediaStreamAudioProcessor::OnPlayoutData(media::AudioBus* audio_bus,
+ int sample_rate,
+ int audio_delay_milliseconds) {
+ DCHECK(render_thread_checker_.CalledOnValidThread());
+ DCHECK(audio_processing_->echo_control_mobile()->is_enabled() ^
+ audio_processing_->echo_cancellation()->is_enabled());
+
+ TRACE_EVENT0("audio", "MediaStreamAudioProcessor::OnPlayoutData");
+ DCHECK_LT(audio_delay_milliseconds,
+ std::numeric_limits<base::subtle::Atomic32>::max());
+ base::subtle::Release_Store(&render_delay_ms_, audio_delay_milliseconds);
+
+ InitializeRenderConverterIfNeeded(sample_rate, audio_bus->channels(),
+ audio_bus->frames());
+
+ render_converter_->Push(audio_bus);
+ while (render_converter_->Convert(&render_frame_, false))
+ audio_processing_->AnalyzeReverseStream(&render_frame_);
+}
+
+void MediaStreamAudioProcessor::OnPlayoutDataSourceChanged() {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ // There is no need to hold a lock here since the caller guarantees that
+ // there is no more OnPlayoutData() callback on the render thread.
+ render_thread_checker_.DetachFromThread();
+ render_converter_.reset();
+}
+
+void MediaStreamAudioProcessor::GetStats(AudioProcessorStats* stats) {
+ stats->typing_noise_detected =
+ (base::subtle::Acquire_Load(&typing_detected_) != false);
+ GetAecStats(audio_processing_.get(), stats);
+}
+
void MediaStreamAudioProcessor::InitializeAudioProcessingModule(
- const webrtc::MediaConstraintsInterface* constraints) {
+ const blink::WebMediaConstraints& constraints, int effects) {
DCHECK(!audio_processing_);
- DCHECK(constraints);
- if (!CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kEnableAudioTrackProcessing)) {
+
+ MediaAudioConstraints audio_constraints(constraints, effects);
+
+ // Audio mirroring can be enabled even though audio processing is otherwise
+ // disabled.
+ audio_mirroring_ = audio_constraints.GetProperty(
+ MediaAudioConstraints::kGoogAudioMirroring);
+
+ if (!IsAudioTrackProcessingEnabled()) {
+ RecordProcessingState(AUDIO_PROCESSING_IN_WEBRTC);
return;
}
- const bool enable_aec = GetPropertyFromConstraints(
- constraints, MediaConstraintsInterface::kEchoCancellation);
- const bool enable_ns = GetPropertyFromConstraints(
- constraints, MediaConstraintsInterface::kNoiseSuppression);
- const bool enable_high_pass_filter = GetPropertyFromConstraints(
- constraints, MediaConstraintsInterface::kHighpassFilter);
-#if defined(IOS) || defined(ANDROID)
- const bool enable_experimental_aec = false;
- const bool enable_typing_detection = false;
+#if defined(OS_IOS)
+ // On iOS, VPIO provides built-in AGC and AEC.
+ const bool echo_cancellation = false;
+ const bool goog_agc = false;
#else
- const bool enable_experimental_aec = GetPropertyFromConstraints(
- constraints, MediaConstraintsInterface::kExperimentalEchoCancellation);
- const bool enable_typing_detection = GetPropertyFromConstraints(
- constraints, MediaConstraintsInterface::kTypingNoiseDetection);
+ const bool echo_cancellation =
+ audio_constraints.GetEchoCancellationProperty();
+ const bool goog_agc = audio_constraints.GetProperty(
+ MediaAudioConstraints::kGoogAutoGainControl);
#endif
- // Return immediately if no audio processing component is enabled.
- if (!enable_aec && !enable_experimental_aec && !enable_ns &&
- !enable_high_pass_filter && !enable_typing_detection) {
+#if defined(OS_IOS) || defined(OS_ANDROID)
+ const bool goog_experimental_aec = false;
+ const bool goog_typing_detection = false;
+#else
+ const bool goog_experimental_aec = audio_constraints.GetProperty(
+ MediaAudioConstraints::kGoogExperimentalEchoCancellation);
+ const bool goog_typing_detection = audio_constraints.GetProperty(
+ MediaAudioConstraints::kGoogTypingNoiseDetection);
+#endif
+
+ const bool goog_ns = audio_constraints.GetProperty(
+ MediaAudioConstraints::kGoogNoiseSuppression);
+ const bool goog_experimental_ns = audio_constraints.GetProperty(
+ MediaAudioConstraints::kGoogExperimentalNoiseSuppression);
+ const bool goog_high_pass_filter = audio_constraints.GetProperty(
+ MediaAudioConstraints::kGoogHighpassFilter);
+
+ // Return immediately if no goog constraint is enabled.
+ if (!echo_cancellation && !goog_experimental_aec && !goog_ns &&
+ !goog_high_pass_filter && !goog_typing_detection &&
+ !goog_agc && !goog_experimental_ns) {
+ RecordProcessingState(AUDIO_PROCESSING_DISABLED);
return;
}
// Create and configure the webrtc::AudioProcessing.
- audio_processing_.reset(webrtc::AudioProcessing::Create(0));
+ audio_processing_.reset(webrtc::AudioProcessing::Create());
+ CHECK_EQ(0, audio_processing_->Initialize(kAudioProcessingSampleRate,
+ kAudioProcessingSampleRate,
+ kAudioProcessingSampleRate,
+ kAudioProcessingChannelLayout,
+ kAudioProcessingChannelLayout,
+ kAudioProcessingChannelLayout));
// Enable the audio processing components.
- if (enable_aec) {
+ if (echo_cancellation) {
EnableEchoCancellation(audio_processing_.get());
- if (enable_experimental_aec)
+
+ if (goog_experimental_aec)
EnableExperimentalEchoCancellation(audio_processing_.get());
+
+ if (playout_data_source_)
+ playout_data_source_->AddPlayoutSink(this);
}
- if (enable_ns)
+ if (goog_ns)
EnableNoiseSuppression(audio_processing_.get());
- if (enable_high_pass_filter)
+ if (goog_experimental_ns)
+ EnableExperimentalNoiseSuppression(audio_processing_.get());
+
+ if (goog_high_pass_filter)
EnableHighPassFilter(audio_processing_.get());
- if (enable_typing_detection)
- EnableTypingDetection(audio_processing_.get());
+ if (goog_typing_detection) {
+ // TODO(xians): Remove this |typing_detector_| after the typing suppression
+ // is enabled by default.
+ typing_detector_.reset(new webrtc::TypingDetection());
+ EnableTypingDetection(audio_processing_.get(), typing_detector_.get());
+ }
+
+ if (goog_agc)
+ EnableAutomaticGainControl(audio_processing_.get());
+
+ RecordProcessingState(AUDIO_PROCESSING_ENABLED);
+}
+void MediaStreamAudioProcessor::InitializeCaptureConverter(
+ const media::AudioParameters& source_params) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ DCHECK(source_params.IsValid());
- // Configure the audio format the audio processing is running on. This
- // has to be done after all the needed components are enabled.
- CHECK_EQ(audio_processing_->set_sample_rate_hz(kAudioProcessingSampleRate),
- 0);
- CHECK_EQ(audio_processing_->set_num_channels(kAudioProcessingNumberOfChannel,
- kAudioProcessingNumberOfChannel),
- 0);
+ // Create and initialize audio converter for the source data.
+ // When the webrtc AudioProcessing is enabled, the sink format of the
+ // converter will be the same as the post-processed data format, which is
+ // 32k mono for desktops and 16k mono for Android. When the AudioProcessing
+ // is disabled, the sink format will be the same as the source format.
+ const int sink_sample_rate = audio_processing_ ?
+ kAudioProcessingSampleRate : source_params.sample_rate();
+ const media::ChannelLayout sink_channel_layout = audio_processing_ ?
+ media::GuessChannelLayout(kAudioProcessingNumberOfChannels) :
+ source_params.channel_layout();
+
+ // WebRtc AudioProcessing requires 10ms as its packet size. We use this
+ // native size when processing is enabled. While processing is disabled, and
+ // the source is running with a buffer size smaller than 10ms buffer, we use
+ // same buffer size as the incoming format to avoid extra FIFO for WebAudio.
+ int sink_buffer_size = sink_sample_rate / 100;
+ if (!audio_processing_ &&
+ source_params.frames_per_buffer() < sink_buffer_size) {
+ sink_buffer_size = source_params.frames_per_buffer();
+ }
+
+ media::AudioParameters sink_params(
+ media::AudioParameters::AUDIO_PCM_LOW_LATENCY, sink_channel_layout,
+ sink_sample_rate, 16, sink_buffer_size);
+ capture_converter_.reset(
+ new MediaStreamAudioConverter(source_params, sink_params));
}
void MediaStreamAudioProcessor::InitializeRenderConverterIfNeeded(
@@ -306,16 +485,16 @@ void MediaStreamAudioProcessor::InitializeRenderConverterIfNeeded(
frames_per_buffer);
}
-void MediaStreamAudioProcessor::ProcessData(webrtc::AudioFrame* audio_frame,
- base::TimeDelta capture_delay,
- int volume,
- bool key_pressed) {
+int MediaStreamAudioProcessor::ProcessData(webrtc::AudioFrame* audio_frame,
+ base::TimeDelta capture_delay,
+ int volume,
+ bool key_pressed) {
DCHECK(capture_thread_checker_.CalledOnValidThread());
if (!audio_processing_)
- return;
+ return 0;
- TRACE_EVENT0("audio", "MediaStreamAudioProcessor::Process10MsData");
- DCHECK_EQ(audio_processing_->sample_rate_hz(),
+ TRACE_EVENT0("audio", "MediaStreamAudioProcessor::ProcessData");
+ DCHECK_EQ(audio_processing_->input_sample_rate_hz(),
capture_converter_->sink_parameters().sample_rate());
DCHECK_EQ(audio_processing_->num_input_channels(),
capture_converter_->sink_parameters().channels());
@@ -328,27 +507,35 @@ void MediaStreamAudioProcessor::ProcessData(webrtc::AudioFrame* audio_frame,
DCHECK_LT(capture_delay_ms,
std::numeric_limits<base::subtle::Atomic32>::max());
int total_delay_ms = capture_delay_ms + render_delay_ms;
- if (total_delay_ms > 1000) {
+ if (total_delay_ms > 300) {
LOG(WARNING) << "Large audio delay, capture delay: " << capture_delay_ms
<< "ms; render delay: " << render_delay_ms << "ms";
}
audio_processing_->set_stream_delay_ms(total_delay_ms);
+
+ DCHECK_LE(volume, WebRtcAudioDeviceImpl::kMaxVolumeLevel);
webrtc::GainControl* agc = audio_processing_->gain_control();
int err = agc->set_stream_analog_level(volume);
DCHECK_EQ(err, 0) << "set_stream_analog_level() error: " << err;
+
+ audio_processing_->set_stream_key_pressed(key_pressed);
+
err = audio_processing_->ProcessStream(audio_frame);
DCHECK_EQ(err, 0) << "ProcessStream() error: " << err;
- // TODO(xians): Add support for AGC, typing detection, audio level
- // calculation, stereo swapping.
-}
-
-void MediaStreamAudioProcessor::StopAudioProcessing() {
- if (!audio_processing_.get())
- return;
+ if (typing_detector_ &&
+ audio_frame->vad_activity_ != webrtc::AudioFrame::kVadUnknown) {
+ bool vad_active =
+ (audio_frame->vad_activity_ == webrtc::AudioFrame::kVadActive);
+ bool typing_detected = typing_detector_->Process(key_pressed, vad_active);
+ base::subtle::Release_Store(&typing_detected_, typing_detected);
+ }
- audio_processing_.reset();
+ // Return 0 if the volume has not been changed, otherwise return the new
+ // volume.
+ return (agc->stream_analog_level() == volume) ?
+ 0 : agc->stream_analog_level();
}
} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_audio_processor.h b/chromium/content/renderer/media/media_stream_audio_processor.h
index 9c6db685db4..035a87edcd0 100644
--- a/chromium/content/renderer/media/media_stream_audio_processor.h
+++ b/chromium/content/renderer/media/media_stream_audio_processor.h
@@ -6,15 +6,22 @@
#define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_PROCESSOR_H_
#include "base/atomicops.h"
+#include "base/files/file.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
#include "base/time/time.h"
#include "content/common/content_export.h"
+#include "content/renderer/media/aec_dump_message_filter.h"
+#include "content/renderer/media/webrtc_audio_device_impl.h"
#include "media/base/audio_converter.h"
-#include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface.h"
+#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
#include "third_party/webrtc/modules/audio_processing/include/audio_processing.h"
#include "third_party/webrtc/modules/interface/module_common_types.h"
+namespace blink {
+class WebMediaConstraints;
+}
+
namespace media {
class AudioBus;
class AudioFifo;
@@ -23,40 +30,54 @@ class AudioParameters;
namespace webrtc {
class AudioFrame;
+class TypingDetection;
}
namespace content {
+class RTCMediaConstraints;
+
+using webrtc::AudioProcessorInterface;
+
// This class owns an object of webrtc::AudioProcessing which contains signal
// processing components like AGC, AEC and NS. It enables the components based
// on the getUserMedia constraints, processes the data and outputs it in a unit
// of 10 ms data chunk.
-class CONTENT_EXPORT MediaStreamAudioProcessor {
+class CONTENT_EXPORT MediaStreamAudioProcessor :
+ NON_EXPORTED_BASE(public WebRtcPlayoutDataSource::Sink),
+ NON_EXPORTED_BASE(public AudioProcessorInterface),
+ NON_EXPORTED_BASE(public AecDumpMessageFilter::AecDumpDelegate) {
public:
- explicit MediaStreamAudioProcessor(
- const webrtc::MediaConstraintsInterface* constraints);
- ~MediaStreamAudioProcessor();
+ // Returns false if |kDisableAudioTrackProcessing| is set to true, otherwise
+ // returns true.
+ static bool IsAudioTrackProcessingEnabled();
+
+ // |playout_data_source| is used to register this class as a sink to the
+ // WebRtc playout data for processing AEC. If clients do not enable AEC,
+ // |playout_data_source| won't be used.
+ MediaStreamAudioProcessor(const blink::WebMediaConstraints& constraints,
+ int effects,
+ WebRtcPlayoutDataSource* playout_data_source);
+
+ // Called when format of the capture data has changed.
+ // Called on the main render thread. The caller is responsible for stopping
+ // the capture thread before calling this method.
+ // After this method, the capture thread will be changed to a new capture
+ // thread.
+ void OnCaptureFormatChanged(const media::AudioParameters& source_params);
// Pushes capture data in |audio_source| to the internal FIFO.
// Called on the capture audio thread.
- void PushCaptureData(media::AudioBus* audio_source);
-
- // Push the render audio to webrtc::AudioProcessing for analysis. This is
- // needed iff echo processing is enabled.
- // |render_audio| is the pointer to the render audio data, its format
- // is specified by |sample_rate|, |number_of_channels| and |number_of_frames|.
- // Called on the render audio thread.
- void PushRenderData(const int16* render_audio,
- int sample_rate,
- int number_of_channels,
- int number_of_frames,
- base::TimeDelta render_delay);
+ void PushCaptureData(const media::AudioBus* audio_source);
// Processes a block of 10 ms data from the internal FIFO and outputs it via
// |out|. |out| is the address of the pointer that will be pointed to
// the post-processed data if the method is returning a true. The lifetime
// of the data represeted by |out| is guaranteed to outlive the method call.
// That also says *|out| won't change until this method is called again.
+ // |new_volume| receives the new microphone volume from the AGC.
+ // The new microphoen volume range is [0, 255], and the value will be 0 if
+ // the microphone volume should not be adjusted.
// Returns true if the internal FIFO has at least 10 ms data for processing,
// otherwise false.
// |capture_delay|, |volume| and |key_pressed| will be passed to
@@ -65,25 +86,54 @@ class CONTENT_EXPORT MediaStreamAudioProcessor {
bool ProcessAndConsumeData(base::TimeDelta capture_delay,
int volume,
bool key_pressed,
+ int* new_volume,
int16** out);
- // Called when the format of the capture data has changed.
- // This has to be called before PushCaptureData() and ProcessAndConsumeData().
- // Called on the main render thread.
- void SetCaptureFormat(const media::AudioParameters& source_params);
+ // Stops the audio processor, no more AEC dump or render data after calling
+ // this method.
+ void Stop();
+
+ // The audio format of the input to the processor.
+ const media::AudioParameters& InputFormat() const;
// The audio format of the output from the processor.
const media::AudioParameters& OutputFormat() const;
// Accessor to check if the audio processing is enabled or not.
- bool has_audio_processing() const { return audio_processing_.get() != NULL; }
+ bool has_audio_processing() const { return audio_processing_ != NULL; }
+
+ // AecDumpMessageFilter::AecDumpDelegate implementation.
+ // Called on the main render thread.
+ virtual void OnAecDumpFile(
+ const IPC::PlatformFileForTransit& file_handle) OVERRIDE;
+ virtual void OnDisableAecDump() OVERRIDE;
+ virtual void OnIpcClosing() OVERRIDE;
+
+ protected:
+ friend class base::RefCountedThreadSafe<MediaStreamAudioProcessor>;
+ virtual ~MediaStreamAudioProcessor();
private:
+ friend class MediaStreamAudioProcessorTest;
+
class MediaStreamAudioConverter;
+ // WebRtcPlayoutDataSource::Sink implementation.
+ virtual void OnPlayoutData(media::AudioBus* audio_bus,
+ int sample_rate,
+ int audio_delay_milliseconds) OVERRIDE;
+ virtual void OnPlayoutDataSourceChanged() OVERRIDE;
+
+ // webrtc::AudioProcessorInterface implementation.
+ // This method is called on the libjingle thread.
+ virtual void GetStats(AudioProcessorStats* stats) OVERRIDE;
+
// Helper to initialize the WebRtc AudioProcessing.
void InitializeAudioProcessingModule(
- const webrtc::MediaConstraintsInterface* constraints);
+ const blink::WebMediaConstraints& constraints, int effects);
+
+ // Helper to initialize the capture converter.
+ void InitializeCaptureConverter(const media::AudioParameters& source_params);
// Helper to initialize the render converter.
void InitializeRenderConverterIfNeeded(int sample_rate,
@@ -91,13 +141,12 @@ class CONTENT_EXPORT MediaStreamAudioProcessor {
int frames_per_buffer);
// Called by ProcessAndConsumeData().
- void ProcessData(webrtc::AudioFrame* audio_frame,
- base::TimeDelta capture_delay,
- int volume,
- bool key_pressed);
-
- // Called when the processor is going away.
- void StopAudioProcessing();
+ // Returns the new microphone volume in the range of |0, 255].
+ // When the volume does not need to be updated, it returns 0.
+ int ProcessData(webrtc::AudioFrame* audio_frame,
+ base::TimeDelta capture_delay,
+ int volume,
+ bool key_pressed);
// Cached value for the render delay latency. This member is accessed by
// both the capture audio thread and the render audio thread.
@@ -123,7 +172,11 @@ class CONTENT_EXPORT MediaStreamAudioProcessor {
// Data bus to help converting interleaved data to an AudioBus.
scoped_ptr<media::AudioBus> render_data_bus_;
- // Used to DCHECK that some methods are called on the main render thread.
+ // Raw pointer to the WebRtcPlayoutDataSource, which is valid for the
+ // lifetime of RenderThread.
+ WebRtcPlayoutDataSource* playout_data_source_;
+
+ // Used to DCHECK that the destructor is called on the main render thread.
base::ThreadChecker main_thread_checker_;
// Used to DCHECK that some methods are called on the capture audio thread.
@@ -131,6 +184,23 @@ class CONTENT_EXPORT MediaStreamAudioProcessor {
// Used to DCHECK that PushRenderData() is called on the render audio thread.
base::ThreadChecker render_thread_checker_;
+
+ // Flag to enable the stereo channels mirroring.
+ bool audio_mirroring_;
+
+ // Used by the typing detection.
+ scoped_ptr<webrtc::TypingDetection> typing_detector_;
+
+ // This flag is used to show the result of typing detection.
+ // It can be accessed by the capture audio thread and by the libjingle thread
+ // which calls GetStats().
+ base::subtle::Atomic32 typing_detected_;
+
+ // Communication with browser for AEC dump.
+ scoped_refptr<AecDumpMessageFilter> aec_dump_message_filter_;
+
+ // Flag to avoid executing Stop() more than once.
+ bool stopped_;
};
} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_audio_processor_options.cc b/chromium/content/renderer/media/media_stream_audio_processor_options.cc
index add7f957c86..5ebb2a2a91e 100644
--- a/chromium/content/renderer/media/media_stream_audio_processor_options.cc
+++ b/chromium/content/renderer/media/media_stream_audio_processor_options.cc
@@ -4,41 +4,226 @@
#include "content/renderer/media/media_stream_audio_processor_options.h"
+#include "base/file_util.h"
#include "base/files/file_path.h"
#include "base/logging.h"
+#include "base/metrics/field_trial.h"
#include "base/path_service.h"
+#include "base/strings/string_number_conversions.h"
#include "base/strings/utf_string_conversions.h"
-#include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface.h"
+#include "content/common/media/media_stream_options.h"
+#include "content/renderer/media/media_stream_constraints_util.h"
+#include "content/renderer/media/media_stream_source.h"
+#include "content/renderer/media/rtc_media_constraints.h"
+#include "media/audio/audio_parameters.h"
#include "third_party/webrtc/modules/audio_processing/include/audio_processing.h"
+#include "third_party/webrtc/modules/audio_processing/typing_detection.h"
namespace content {
-bool GetPropertyFromConstraints(const MediaConstraintsInterface* constraints,
- const std::string& key) {
+const char MediaAudioConstraints::kEchoCancellation[] = "echoCancellation";
+const char MediaAudioConstraints::kGoogEchoCancellation[] =
+ "googEchoCancellation";
+const char MediaAudioConstraints::kGoogExperimentalEchoCancellation[] =
+ "googEchoCancellation2";
+const char MediaAudioConstraints::kGoogAutoGainControl[] =
+ "googAutoGainControl";
+const char MediaAudioConstraints::kGoogExperimentalAutoGainControl[] =
+ "googAutoGainControl2";
+const char MediaAudioConstraints::kGoogNoiseSuppression[] =
+ "googNoiseSuppression";
+const char MediaAudioConstraints::kGoogExperimentalNoiseSuppression[] =
+ "googNoiseSuppression2";
+const char MediaAudioConstraints::kGoogHighpassFilter[] = "googHighpassFilter";
+const char MediaAudioConstraints::kGoogTypingNoiseDetection[] =
+ "googTypingNoiseDetection";
+const char MediaAudioConstraints::kGoogAudioMirroring[] = "googAudioMirroring";
+
+namespace {
+
+// Constant constraint keys which enables default audio constraints on
+// mediastreams with audio.
+struct {
+ const char* key;
+ bool value;
+} const kDefaultAudioConstraints[] = {
+ { MediaAudioConstraints::kEchoCancellation, true },
+ { MediaAudioConstraints::kGoogEchoCancellation, true },
+#if defined(OS_CHROMEOS) || defined(OS_MACOSX)
+ // Enable the extended filter mode AEC on platforms with known echo issues.
+ { MediaAudioConstraints::kGoogExperimentalEchoCancellation, true },
+#else
+ { MediaAudioConstraints::kGoogExperimentalEchoCancellation, false },
+#endif
+ { MediaAudioConstraints::kGoogAutoGainControl, true },
+ { MediaAudioConstraints::kGoogExperimentalAutoGainControl, true },
+ { MediaAudioConstraints::kGoogNoiseSuppression, true },
+ { MediaAudioConstraints::kGoogHighpassFilter, true },
+ { MediaAudioConstraints::kGoogTypingNoiseDetection, true },
+ { MediaAudioConstraints::kGoogExperimentalNoiseSuppression, false },
+#if defined(OS_WIN)
+ { kMediaStreamAudioDucking, true },
+#else
+ { kMediaStreamAudioDucking, false },
+#endif
+};
+
+bool IsAudioProcessingConstraint(const std::string& key) {
+ // |kMediaStreamAudioDucking| does not require audio processing.
+ return key != kMediaStreamAudioDucking;
+}
+
+} // namespace
+
+// TODO(xians): Remove this method after the APM in WebRtc is deprecated.
+void MediaAudioConstraints::ApplyFixedAudioConstraints(
+ RTCMediaConstraints* constraints) {
+ for (size_t i = 0; i < ARRAYSIZE_UNSAFE(kDefaultAudioConstraints); ++i) {
+ bool already_set_value;
+ if (!webrtc::FindConstraint(constraints, kDefaultAudioConstraints[i].key,
+ &already_set_value, NULL)) {
+ const std::string value = kDefaultAudioConstraints[i].value ?
+ webrtc::MediaConstraintsInterface::kValueTrue :
+ webrtc::MediaConstraintsInterface::kValueFalse;
+ constraints->AddOptional(kDefaultAudioConstraints[i].key, value, false);
+ } else {
+ DVLOG(1) << "Constraint " << kDefaultAudioConstraints[i].key
+ << " already set to " << already_set_value;
+ }
+ }
+}
+
+MediaAudioConstraints::MediaAudioConstraints(
+ const blink::WebMediaConstraints& constraints, int effects)
+ : constraints_(constraints),
+ effects_(effects),
+ default_audio_processing_constraint_value_(true) {
+ // The default audio processing constraints are turned off when
+ // - gUM has a specific kMediaStreamSource, which is used by tab capture
+ // and screen capture.
+ // - |kEchoCancellation| is explicitly set to false.
+ std::string value_str;
+ bool value_bool = false;
+ if ((GetConstraintValueAsString(constraints, kMediaStreamSource,
+ &value_str)) ||
+ (GetConstraintValueAsBoolean(constraints_, kEchoCancellation,
+ &value_bool) && !value_bool)) {
+ default_audio_processing_constraint_value_ = false;
+ }
+}
+
+MediaAudioConstraints::~MediaAudioConstraints() {}
+
+// TODO(xians): Remove this method after the APM in WebRtc is deprecated.
+bool MediaAudioConstraints::NeedsAudioProcessing() {
+ if (GetEchoCancellationProperty())
+ return true;
+
+ for (size_t i = 0; i < ARRAYSIZE_UNSAFE(kDefaultAudioConstraints); ++i) {
+ // |kEchoCancellation| and |kGoogEchoCancellation| have been convered by
+ // GetEchoCancellationProperty().
+ if (kDefaultAudioConstraints[i].key != kEchoCancellation &&
+ kDefaultAudioConstraints[i].key != kGoogEchoCancellation &&
+ IsAudioProcessingConstraint(kDefaultAudioConstraints[i].key) &&
+ GetProperty(kDefaultAudioConstraints[i].key)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool MediaAudioConstraints::GetProperty(const std::string& key) {
+ // Return the value if the constraint is specified in |constraints|,
+ // otherwise return the default value.
bool value = false;
- return webrtc::FindConstraint(constraints, key, &value, NULL) && value;
+ if (!GetConstraintValueAsBoolean(constraints_, key, &value))
+ value = GetDefaultValueForConstraint(constraints_, key);
+
+ return value;
+}
+
+bool MediaAudioConstraints::GetEchoCancellationProperty() {
+ // If platform echo canceller is enabled, disable the software AEC.
+ if (effects_ & media::AudioParameters::ECHO_CANCELLER)
+ return false;
+
+ // If |kEchoCancellation| is specified in the constraints, it will
+ // override the value of |kGoogEchoCancellation|.
+ bool value = false;
+ if (GetConstraintValueAsBoolean(constraints_, kEchoCancellation, &value))
+ return value;
+
+ return GetProperty(kGoogEchoCancellation);
+}
+
+bool MediaAudioConstraints::IsValid() {
+ blink::WebVector<blink::WebMediaConstraint> mandatory;
+ constraints_.getMandatoryConstraints(mandatory);
+ for (size_t i = 0; i < mandatory.size(); ++i) {
+ const std::string key = mandatory[i].m_name.utf8();
+ if (key == kMediaStreamSource || key == kMediaStreamSourceId ||
+ key == MediaStreamSource::kSourceId) {
+ // Ignore Chrome specific Tab capture and |kSourceId| constraints.
+ continue;
+ }
+
+ bool valid = false;
+ for (size_t j = 0; j < ARRAYSIZE_UNSAFE(kDefaultAudioConstraints); ++j) {
+ if (key == kDefaultAudioConstraints[j].key) {
+ bool value = false;
+ valid = GetMandatoryConstraintValueAsBoolean(constraints_, key, &value);
+ break;
+ }
+ }
+
+ if (!valid) {
+ DLOG(ERROR) << "Invalid MediaStream constraint. Name: " << key;
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool MediaAudioConstraints::GetDefaultValueForConstraint(
+ const blink::WebMediaConstraints& constraints, const std::string& key) {
+ // |kMediaStreamAudioDucking| is not restricted by
+ // |default_audio_processing_constraint_value_| since it does not require
+ // audio processing.
+ if (!default_audio_processing_constraint_value_ &&
+ IsAudioProcessingConstraint(key))
+ return false;
+
+ for (size_t i = 0; i < ARRAYSIZE_UNSAFE(kDefaultAudioConstraints); ++i) {
+ if (kDefaultAudioConstraints[i].key == key)
+ return kDefaultAudioConstraints[i].value;
+ }
+
+ return false;
}
void EnableEchoCancellation(AudioProcessing* audio_processing) {
-#if defined(OS_IOS)
- // On iOS, VPIO provides built-in EC and AGC.
- return;
-#elif defined(OS_ANDROID)
- // Mobile devices are using AECM.
- int err = audio_processing->echo_control_mobile()->Enable(true);
- err |= audio_processing->echo_control_mobile()->set_routing_mode(
- webrtc::EchoControlMobile::kSpeakerphone);
- CHECK_EQ(err, 0);
-#else
- int err = audio_processing->echo_cancellation()->Enable(true);
- err |= audio_processing->echo_cancellation()->set_suppression_level(
+#if defined(OS_ANDROID) || defined(OS_IOS)
+ const std::string group_name =
+ base::FieldTrialList::FindFullName("ReplaceAECMWithAEC");
+ if (group_name.empty() || group_name != "Enabled") {
+ // Mobile devices are using AECM.
+ int err = audio_processing->echo_control_mobile()->set_routing_mode(
+ webrtc::EchoControlMobile::kSpeakerphone);
+ err |= audio_processing->echo_control_mobile()->Enable(true);
+ CHECK_EQ(err, 0);
+ return;
+ }
+#endif
+ int err = audio_processing->echo_cancellation()->set_suppression_level(
webrtc::EchoCancellation::kHighSuppression);
// Enable the metrics for AEC.
err |= audio_processing->echo_cancellation()->enable_metrics(true);
err |= audio_processing->echo_cancellation()->enable_delay_logging(true);
+ err |= audio_processing->echo_cancellation()->Enable(true);
CHECK_EQ(err, 0);
-#endif
}
void EnableNoiseSuppression(AudioProcessing* audio_processing) {
@@ -48,16 +233,23 @@ void EnableNoiseSuppression(AudioProcessing* audio_processing) {
CHECK_EQ(err, 0);
}
+void EnableExperimentalNoiseSuppression(AudioProcessing* audio_processing) {
+ CHECK_EQ(audio_processing->EnableExperimentalNs(true), 0);
+}
+
void EnableHighPassFilter(AudioProcessing* audio_processing) {
CHECK_EQ(audio_processing->high_pass_filter()->Enable(true), 0);
}
-// TODO(xians): stereo swapping
-void EnableTypingDetection(AudioProcessing* audio_processing) {
+void EnableTypingDetection(AudioProcessing* audio_processing,
+ webrtc::TypingDetection* typing_detector) {
int err = audio_processing->voice_detection()->Enable(true);
err |= audio_processing->voice_detection()->set_likelihood(
webrtc::VoiceDetection::kVeryLowLikelihood);
CHECK_EQ(err, 0);
+
+ // Configure the update period to 1s (100 * 10ms) in the typing detector.
+ typing_detector->SetParameters(0, 0, 0, 0, 0, 100);
}
void EnableExperimentalEchoCancellation(AudioProcessing* audio_processing) {
@@ -66,31 +258,73 @@ void EnableExperimentalEchoCancellation(AudioProcessing* audio_processing) {
audio_processing->SetExtraOptions(config);
}
-void StartAecDump(AudioProcessing* audio_processing) {
- // TODO(grunell): Figure out a more suitable directory for the audio dump
- // data.
- base::FilePath path;
-#if defined(CHROMEOS)
- PathService::Get(base::DIR_TEMP, &path);
-#elif defined(ANDROID)
- path = base::FilePath(FILE_PATH_LITERAL("sdcard"));
-#else
- PathService::Get(base::DIR_EXE, &path);
-#endif
- base::FilePath file = path.Append(FILE_PATH_LITERAL("audio.aecdump"));
+void StartEchoCancellationDump(AudioProcessing* audio_processing,
+ base::File aec_dump_file) {
+ DCHECK(aec_dump_file.IsValid());
-#if defined(OS_WIN)
- const std::string file_name = WideToUTF8(file.value());
-#else
- const std::string file_name = file.value();
-#endif
- if (audio_processing->StartDebugRecording(file_name.c_str()))
+ FILE* stream = base::FileToFILE(aec_dump_file.Pass(), "w");
+ if (!stream) {
+ LOG(ERROR) << "Failed to open AEC dump file";
+ return;
+ }
+
+ if (audio_processing->StartDebugRecording(stream))
DLOG(ERROR) << "Fail to start AEC debug recording";
}
-void StopAecDump(AudioProcessing* audio_processing) {
+void StopEchoCancellationDump(AudioProcessing* audio_processing) {
if (audio_processing->StopDebugRecording())
DLOG(ERROR) << "Fail to stop AEC debug recording";
}
+void EnableAutomaticGainControl(AudioProcessing* audio_processing) {
+#if defined(OS_ANDROID) || defined(OS_IOS)
+ const webrtc::GainControl::Mode mode = webrtc::GainControl::kFixedDigital;
+#else
+ const webrtc::GainControl::Mode mode = webrtc::GainControl::kAdaptiveAnalog;
+#endif
+ int err = audio_processing->gain_control()->set_mode(mode);
+ err |= audio_processing->gain_control()->Enable(true);
+ CHECK_EQ(err, 0);
+}
+
+void GetAecStats(AudioProcessing* audio_processing,
+ webrtc::AudioProcessorInterface::AudioProcessorStats* stats) {
+ // These values can take on valid negative values, so use the lowest possible
+ // level as default rather than -1.
+ stats->echo_return_loss = -100;
+ stats->echo_return_loss_enhancement = -100;
+
+ // These values can also be negative, but in practice -1 is only used to
+ // signal insufficient data, since the resolution is limited to multiples
+ // of 4ms.
+ stats->echo_delay_median_ms = -1;
+ stats->echo_delay_std_ms = -1;
+
+ // TODO(ajm): Re-enable this metric once we have a reliable implementation.
+ stats->aec_quality_min = -1.0f;
+
+ if (!audio_processing->echo_cancellation()->are_metrics_enabled() ||
+ !audio_processing->echo_cancellation()->is_delay_logging_enabled() ||
+ !audio_processing->echo_cancellation()->is_enabled()) {
+ return;
+ }
+
+ // TODO(ajm): we may want to use VoECallReport::GetEchoMetricsSummary
+ // here, but it appears to be unsuitable currently. Revisit after this is
+ // investigated: http://b/issue?id=5666755
+ webrtc::EchoCancellation::Metrics echo_metrics;
+ if (!audio_processing->echo_cancellation()->GetMetrics(&echo_metrics)) {
+ stats->echo_return_loss = echo_metrics.echo_return_loss.instant;
+ stats->echo_return_loss_enhancement =
+ echo_metrics.echo_return_loss_enhancement.instant;
+ }
+
+ int median = 0, std = 0;
+ if (!audio_processing->echo_cancellation()->GetDelayMetrics(&median, &std)) {
+ stats->echo_delay_median_ms = median;
+ stats->echo_delay_std_ms = std;
+ }
+}
+
} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_audio_processor_options.h b/chromium/content/renderer/media/media_stream_audio_processor_options.h
index dcdec4e61f4..468355547f5 100644
--- a/chromium/content/renderer/media/media_stream_audio_processor_options.h
+++ b/chromium/content/renderer/media/media_stream_audio_processor_options.h
@@ -7,25 +7,86 @@
#include <string>
+#include "base/files/file.h"
+#include "content/common/content_export.h"
+#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
+#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
+
namespace webrtc {
class AudioFrame;
class AudioProcessing;
class MediaConstraintsInterface;
+class TypingDetection;
}
namespace content {
+class RTCMediaConstraints;
+
using webrtc::AudioProcessing;
using webrtc::MediaConstraintsInterface;
-// Gets the property named by |key| from the |constraints|.
-// Returns true if the key is found and has a valid boolean value; Otherwise
-// false.
-bool GetPropertyFromConstraints(
- const MediaConstraintsInterface* constraints,
- const std::string& key);
+// A helper class to parse audio constraints from a blink::WebMediaConstraints
+// object.
+class CONTENT_EXPORT MediaAudioConstraints {
+ public:
+ // Constraint keys used by audio processing.
+ static const char kEchoCancellation[];
+ static const char kGoogEchoCancellation[];
+ static const char kGoogExperimentalEchoCancellation[];
+ static const char kGoogAutoGainControl[];
+ static const char kGoogExperimentalAutoGainControl[];
+ static const char kGoogNoiseSuppression[];
+ static const char kGoogExperimentalNoiseSuppression[];
+ static const char kGoogHighpassFilter[];
+ static const char kGoogTypingNoiseDetection[];
+ static const char kGoogAudioMirroring[];
+
+ // Merge |constraints| with |kDefaultAudioConstraints|. For any key which
+ // exists in both, the value from |constraints| is maintained, including its
+ // mandatory/optional status. New values from |kDefaultAudioConstraints| will
+ // be added with optional status.
+ static void ApplyFixedAudioConstraints(RTCMediaConstraints* constraints);
+
+ // |effects| is the bitmasks telling whether certain platform
+ // hardware audio effects are enabled, like hardware echo cancellation. If
+ // some hardware effect is enabled, the corresponding software audio
+ // processing will be disabled.
+ MediaAudioConstraints(const blink::WebMediaConstraints& constraints,
+ int effects);
+ virtual ~MediaAudioConstraints();
+
+ // Checks if any audio constraints are set that requires audio processing to
+ // be applied.
+ bool NeedsAudioProcessing();
+
+ // Gets the property of the constraint named by |key| in |constraints_|.
+ // Returns the constraint's value if the key is found; Otherwise returns the
+ // default value of the constraint.
+ // Note, for constraint of |kEchoCancellation| or |kGoogEchoCancellation|,
+ // clients should use GetEchoCancellationProperty().
+ bool GetProperty(const std::string& key);
+
+ // Gets the property of echo cancellation defined in |constraints_|. The
+ // returned value depends on a combination of |effects_|, |kEchoCancellation|
+ // and |kGoogEchoCancellation| in |constraints_|.
+ bool GetEchoCancellationProperty();
+
+ // Returns true if all the mandatory constraints in |constraints_| are valid;
+ // Otherwise return false.
+ bool IsValid();
+
+ private:
+ // Gets the default value of constraint named by |key| in |constraints|.
+ bool GetDefaultValueForConstraint(
+ const blink::WebMediaConstraints& constraints, const std::string& key);
+
+ const blink::WebMediaConstraints constraints_;
+ const int effects_;
+ bool default_audio_processing_constraint_value_;
+};
// Enables the echo cancellation in |audio_processing|.
void EnableEchoCancellation(AudioProcessing* audio_processing);
@@ -33,20 +94,32 @@ void EnableEchoCancellation(AudioProcessing* audio_processing);
// Enables the noise suppression in |audio_processing|.
void EnableNoiseSuppression(AudioProcessing* audio_processing);
+// Enables the experimental noise suppression in |audio_processing|.
+void EnableExperimentalNoiseSuppression(AudioProcessing* audio_processing);
+
// Enables the high pass filter in |audio_processing|.
void EnableHighPassFilter(AudioProcessing* audio_processing);
// Enables the typing detection in |audio_processing|.
-void EnableTypingDetection(AudioProcessing* audio_processing);
+void EnableTypingDetection(AudioProcessing* audio_processing,
+ webrtc::TypingDetection* typing_detector);
// Enables the experimental echo cancellation in |audio_processing|.
void EnableExperimentalEchoCancellation(AudioProcessing* audio_processing);
// Starts the echo cancellation dump in |audio_processing|.
-void StartAecDump(AudioProcessing* audio_processing);
+void StartEchoCancellationDump(AudioProcessing* audio_processing,
+ base::File aec_dump_file);
// Stops the echo cancellation dump in |audio_processing|.
-void StopAecDump(AudioProcessing* audio_processing);
+// This method has no impact if echo cancellation dump has not been started on
+// |audio_processing|.
+void StopEchoCancellationDump(AudioProcessing* audio_processing);
+
+void EnableAutomaticGainControl(AudioProcessing* audio_processing);
+
+void GetAecStats(AudioProcessing* audio_processing,
+ webrtc::AudioProcessorInterface::AudioProcessorStats* stats);
} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_audio_processor_unittest.cc b/chromium/content/renderer/media/media_stream_audio_processor_unittest.cc
index 1a6409c2c1a..d9cddf47005 100644
--- a/chromium/content/renderer/media/media_stream_audio_processor_unittest.cc
+++ b/chromium/content/renderer/media/media_stream_audio_processor_unittest.cc
@@ -6,15 +6,19 @@
#include "base/file_util.h"
#include "base/files/file_path.h"
#include "base/logging.h"
+#include "base/memory/aligned_memory.h"
#include "base/path_service.h"
#include "base/time/time.h"
#include "content/public/common/content_switches.h"
+#include "content/public/common/media_stream_request.h"
#include "content/renderer/media/media_stream_audio_processor.h"
-#include "content/renderer/media/rtc_media_constraints.h"
+#include "content/renderer/media/media_stream_audio_processor_options.h"
+#include "content/renderer/media/mock_media_constraint_factory.h"
#include "media/audio/audio_parameters.h"
#include "media/base/audio_bus.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
using ::testing::_;
@@ -50,36 +54,6 @@ void ReadDataFromSpeechFile(char* data, int length) {
DCHECK(data_file_size64 > length);
}
-void ApplyFixedAudioConstraints(RTCMediaConstraints* constraints) {
- // Constant constraint keys which enables default audio constraints on
- // mediastreams with audio.
- struct {
- const char* key;
- const char* value;
- } static const kDefaultAudioConstraints[] = {
- { webrtc::MediaConstraintsInterface::kEchoCancellation,
- webrtc::MediaConstraintsInterface::kValueTrue },
- #if defined(OS_CHROMEOS) || defined(OS_MACOSX)
- // Enable the extended filter mode AEC on platforms with known echo issues.
- { webrtc::MediaConstraintsInterface::kExperimentalEchoCancellation,
- webrtc::MediaConstraintsInterface::kValueTrue },
- #endif
- { webrtc::MediaConstraintsInterface::kAutoGainControl,
- webrtc::MediaConstraintsInterface::kValueTrue },
- { webrtc::MediaConstraintsInterface::kExperimentalAutoGainControl,
- webrtc::MediaConstraintsInterface::kValueTrue },
- { webrtc::MediaConstraintsInterface::kNoiseSuppression,
- webrtc::MediaConstraintsInterface::kValueTrue },
- { webrtc::MediaConstraintsInterface::kHighpassFilter,
- webrtc::MediaConstraintsInterface::kValueTrue },
- };
-
- for (size_t i = 0; i < ARRAYSIZE_UNSAFE(kDefaultAudioConstraints); ++i) {
- constraints->AddMandatory(kDefaultAudioConstraints[i].key,
- kDefaultAudioConstraints[i].value, false);
- }
-}
-
} // namespace
class MediaStreamAudioProcessorTest : public ::testing::Test {
@@ -87,8 +61,6 @@ class MediaStreamAudioProcessorTest : public ::testing::Test {
MediaStreamAudioProcessorTest()
: params_(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
media::CHANNEL_LAYOUT_STEREO, 48000, 16, 512) {
- CommandLine::ForCurrentProcess()->AppendSwitch(
- switches::kEnableAudioTrackProcessing);
}
protected:
@@ -98,28 +70,39 @@ class MediaStreamAudioProcessorTest : public ::testing::Test {
int expected_output_channels,
int expected_output_buffer_size) {
// Read the audio data from a file.
+ const media::AudioParameters& params = audio_processor->InputFormat();
const int packet_size =
- params_.frames_per_buffer() * 2 * params_.channels();
+ params.frames_per_buffer() * 2 * params.channels();
const size_t length = packet_size * kNumberOfPacketsForTest;
scoped_ptr<char[]> capture_data(new char[length]);
ReadDataFromSpeechFile(capture_data.get(), length);
const int16* data_ptr = reinterpret_cast<const int16*>(capture_data.get());
scoped_ptr<media::AudioBus> data_bus = media::AudioBus::Create(
- params_.channels(), params_.frames_per_buffer());
+ params.channels(), params.frames_per_buffer());
for (int i = 0; i < kNumberOfPacketsForTest; ++i) {
data_bus->FromInterleaved(data_ptr, data_bus->frames(), 2);
audio_processor->PushCaptureData(data_bus.get());
// |audio_processor| does nothing when the audio processing is off in
// the processor.
- audio_processor->PushRenderData(
- data_ptr,
- params_.sample_rate(), params_.channels(),
- params_.frames_per_buffer(), base::TimeDelta::FromMilliseconds(10));
+ webrtc::AudioProcessing* ap = audio_processor->audio_processing_.get();
+#if defined(OS_ANDROID) || defined(OS_IOS)
+ const bool is_aec_enabled = ap && ap->echo_control_mobile()->is_enabled();
+ // AEC should be turned off for mobiles.
+ DCHECK(!ap || !ap->echo_cancellation()->is_enabled());
+#else
+ const bool is_aec_enabled = ap && ap->echo_cancellation()->is_enabled();
+#endif
+ if (is_aec_enabled) {
+ audio_processor->OnPlayoutData(data_bus.get(), params.sample_rate(),
+ 10);
+ }
int16* output = NULL;
+ int new_volume = 0;
while(audio_processor->ProcessAndConsumeData(
- base::TimeDelta::FromMilliseconds(10), 255, false, &output)) {
+ base::TimeDelta::FromMilliseconds(10), 255, false, &new_volume,
+ &output)) {
EXPECT_TRUE(output != NULL);
EXPECT_EQ(audio_processor->OutputFormat().sample_rate(),
expected_output_sample_rate);
@@ -129,38 +112,339 @@ class MediaStreamAudioProcessorTest : public ::testing::Test {
expected_output_buffer_size);
}
- data_ptr += params_.frames_per_buffer() * params_.channels();
+ data_ptr += params.frames_per_buffer() * params.channels();
}
}
+ void VerifyDefaultComponents(MediaStreamAudioProcessor* audio_processor) {
+ webrtc::AudioProcessing* audio_processing =
+ audio_processor->audio_processing_.get();
+#if defined(OS_ANDROID)
+ EXPECT_TRUE(audio_processing->echo_control_mobile()->is_enabled());
+ EXPECT_TRUE(audio_processing->echo_control_mobile()->routing_mode() ==
+ webrtc::EchoControlMobile::kSpeakerphone);
+ EXPECT_FALSE(audio_processing->echo_cancellation()->is_enabled());
+#elif !defined(OS_IOS)
+ EXPECT_TRUE(audio_processing->echo_cancellation()->is_enabled());
+ EXPECT_TRUE(audio_processing->echo_cancellation()->suppression_level() ==
+ webrtc::EchoCancellation::kHighSuppression);
+ EXPECT_TRUE(audio_processing->echo_cancellation()->are_metrics_enabled());
+ EXPECT_TRUE(
+ audio_processing->echo_cancellation()->is_delay_logging_enabled());
+#endif
+
+ EXPECT_TRUE(audio_processing->noise_suppression()->is_enabled());
+ EXPECT_TRUE(audio_processing->noise_suppression()->level() ==
+ webrtc::NoiseSuppression::kHigh);
+ EXPECT_TRUE(audio_processing->high_pass_filter()->is_enabled());
+ EXPECT_TRUE(audio_processing->gain_control()->is_enabled());
+#if defined(OS_ANDROID) || defined(OS_IOS)
+ EXPECT_TRUE(audio_processing->gain_control()->mode() ==
+ webrtc::GainControl::kFixedDigital);
+ EXPECT_FALSE(audio_processing->voice_detection()->is_enabled());
+#else
+ EXPECT_TRUE(audio_processing->gain_control()->mode() ==
+ webrtc::GainControl::kAdaptiveAnalog);
+ EXPECT_TRUE(audio_processing->voice_detection()->is_enabled());
+ EXPECT_TRUE(audio_processing->voice_detection()->likelihood() ==
+ webrtc::VoiceDetection::kVeryLowLikelihood);
+#endif
+ }
+
media::AudioParameters params_;
};
TEST_F(MediaStreamAudioProcessorTest, WithoutAudioProcessing) {
- // Setup the audio processor with empty constraint.
- RTCMediaConstraints constraints;
- MediaStreamAudioProcessor audio_processor(&constraints);
- audio_processor.SetCaptureFormat(params_);
- EXPECT_FALSE(audio_processor.has_audio_processing());
+ // Setup the audio processor with disabled flag on.
+ CommandLine::ForCurrentProcess()->AppendSwitch(
+ switches::kDisableAudioTrackProcessing);
+ MockMediaConstraintFactory constraint_factory;
+ scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
+ new WebRtcAudioDeviceImpl());
+ scoped_refptr<MediaStreamAudioProcessor> audio_processor(
+ new talk_base::RefCountedObject<MediaStreamAudioProcessor>(
+ constraint_factory.CreateWebMediaConstraints(), 0,
+ webrtc_audio_device.get()));
+ EXPECT_FALSE(audio_processor->has_audio_processing());
+ audio_processor->OnCaptureFormatChanged(params_);
- ProcessDataAndVerifyFormat(&audio_processor,
+ ProcessDataAndVerifyFormat(audio_processor,
params_.sample_rate(),
params_.channels(),
params_.sample_rate() / 100);
+ // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
+ // |audio_processor|.
+ audio_processor = NULL;
}
TEST_F(MediaStreamAudioProcessorTest, WithAudioProcessing) {
- // Setup the audio processor with default constraint.
- RTCMediaConstraints constraints;
- ApplyFixedAudioConstraints(&constraints);
- MediaStreamAudioProcessor audio_processor(&constraints);
- audio_processor.SetCaptureFormat(params_);
- EXPECT_TRUE(audio_processor.has_audio_processing());
-
- ProcessDataAndVerifyFormat(&audio_processor,
+ MockMediaConstraintFactory constraint_factory;
+ scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
+ new WebRtcAudioDeviceImpl());
+ scoped_refptr<MediaStreamAudioProcessor> audio_processor(
+ new talk_base::RefCountedObject<MediaStreamAudioProcessor>(
+ constraint_factory.CreateWebMediaConstraints(), 0,
+ webrtc_audio_device.get()));
+ EXPECT_TRUE(audio_processor->has_audio_processing());
+ audio_processor->OnCaptureFormatChanged(params_);
+ VerifyDefaultComponents(audio_processor);
+
+ ProcessDataAndVerifyFormat(audio_processor,
kAudioProcessingSampleRate,
kAudioProcessingNumberOfChannel,
kAudioProcessingSampleRate / 100);
+ // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
+ // |audio_processor|.
+ audio_processor = NULL;
+}
+
+TEST_F(MediaStreamAudioProcessorTest, VerifyTabCaptureWithoutAudioProcessing) {
+ scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
+ new WebRtcAudioDeviceImpl());
+ // Create MediaStreamAudioProcessor instance for kMediaStreamSourceTab source.
+ MockMediaConstraintFactory tab_constraint_factory;
+ const std::string tab_string = kMediaStreamSourceTab;
+ tab_constraint_factory.AddMandatory(kMediaStreamSource,
+ tab_string);
+ scoped_refptr<MediaStreamAudioProcessor> audio_processor(
+ new talk_base::RefCountedObject<MediaStreamAudioProcessor>(
+ tab_constraint_factory.CreateWebMediaConstraints(), 0,
+ webrtc_audio_device.get()));
+ EXPECT_FALSE(audio_processor->has_audio_processing());
+ audio_processor->OnCaptureFormatChanged(params_);
+
+ ProcessDataAndVerifyFormat(audio_processor,
+ params_.sample_rate(),
+ params_.channels(),
+ params_.sample_rate() / 100);
+
+ // Create MediaStreamAudioProcessor instance for kMediaStreamSourceSystem
+ // source.
+ MockMediaConstraintFactory system_constraint_factory;
+ const std::string system_string = kMediaStreamSourceSystem;
+ system_constraint_factory.AddMandatory(kMediaStreamSource,
+ system_string);
+ audio_processor = new talk_base::RefCountedObject<MediaStreamAudioProcessor>(
+ system_constraint_factory.CreateWebMediaConstraints(), 0,
+ webrtc_audio_device.get());
+ EXPECT_FALSE(audio_processor->has_audio_processing());
+
+ // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
+ // |audio_processor|.
+ audio_processor = NULL;
+}
+
+TEST_F(MediaStreamAudioProcessorTest, TurnOffDefaultConstraints) {
+ // Turn off the default constraints and pass it to MediaStreamAudioProcessor.
+ MockMediaConstraintFactory constraint_factory;
+ constraint_factory.DisableDefaultAudioConstraints();
+ scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
+ new WebRtcAudioDeviceImpl());
+ scoped_refptr<MediaStreamAudioProcessor> audio_processor(
+ new talk_base::RefCountedObject<MediaStreamAudioProcessor>(
+ constraint_factory.CreateWebMediaConstraints(), 0,
+ webrtc_audio_device.get()));
+ EXPECT_FALSE(audio_processor->has_audio_processing());
+ audio_processor->OnCaptureFormatChanged(params_);
+
+ ProcessDataAndVerifyFormat(audio_processor,
+ params_.sample_rate(),
+ params_.channels(),
+ params_.sample_rate() / 100);
+ // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
+ // |audio_processor|.
+ audio_processor = NULL;
+}
+
+TEST_F(MediaStreamAudioProcessorTest, VerifyConstraints) {
+ static const char* kDefaultAudioConstraints[] = {
+ MediaAudioConstraints::kEchoCancellation,
+ MediaAudioConstraints::kGoogAudioMirroring,
+ MediaAudioConstraints::kGoogAutoGainControl,
+ MediaAudioConstraints::kGoogEchoCancellation,
+ MediaAudioConstraints::kGoogExperimentalEchoCancellation,
+ MediaAudioConstraints::kGoogExperimentalAutoGainControl,
+ MediaAudioConstraints::kGoogExperimentalNoiseSuppression,
+ MediaAudioConstraints::kGoogHighpassFilter,
+ MediaAudioConstraints::kGoogNoiseSuppression,
+ MediaAudioConstraints::kGoogTypingNoiseDetection
+ };
+
+ // Verify mandatory constraints.
+ for (size_t i = 0; i < arraysize(kDefaultAudioConstraints); ++i) {
+ MockMediaConstraintFactory constraint_factory;
+ constraint_factory.AddMandatory(kDefaultAudioConstraints[i], false);
+ blink::WebMediaConstraints constraints =
+ constraint_factory.CreateWebMediaConstraints();
+ MediaAudioConstraints audio_constraints(constraints, 0);
+ EXPECT_FALSE(audio_constraints.GetProperty(kDefaultAudioConstraints[i]));
+ }
+
+ // Verify optional constraints.
+ for (size_t i = 0; i < arraysize(kDefaultAudioConstraints); ++i) {
+ MockMediaConstraintFactory constraint_factory;
+ constraint_factory.AddOptional(kDefaultAudioConstraints[i], false);
+ blink::WebMediaConstraints constraints =
+ constraint_factory.CreateWebMediaConstraints();
+ MediaAudioConstraints audio_constraints(constraints, 0);
+ EXPECT_FALSE(audio_constraints.GetProperty(kDefaultAudioConstraints[i]));
+ }
+
+ {
+ // Verify echo cancellation is off when platform aec effect is on.
+ MockMediaConstraintFactory constraint_factory;
+ MediaAudioConstraints audio_constraints(
+ constraint_factory.CreateWebMediaConstraints(),
+ media::AudioParameters::ECHO_CANCELLER);
+ EXPECT_FALSE(audio_constraints.GetEchoCancellationProperty());
+ }
+
+ {
+ // Verify |kEchoCancellation| overwrite |kGoogEchoCancellation|.
+ MockMediaConstraintFactory constraint_factory_1;
+ constraint_factory_1.AddOptional(MediaAudioConstraints::kEchoCancellation,
+ true);
+ constraint_factory_1.AddOptional(
+ MediaAudioConstraints::kGoogEchoCancellation, false);
+ blink::WebMediaConstraints constraints_1 =
+ constraint_factory_1.CreateWebMediaConstraints();
+ MediaAudioConstraints audio_constraints_1(constraints_1, 0);
+ EXPECT_TRUE(audio_constraints_1.GetEchoCancellationProperty());
+
+ MockMediaConstraintFactory constraint_factory_2;
+ constraint_factory_2.AddOptional(MediaAudioConstraints::kEchoCancellation,
+ false);
+ constraint_factory_2.AddOptional(
+ MediaAudioConstraints::kGoogEchoCancellation, true);
+ blink::WebMediaConstraints constraints_2 =
+ constraint_factory_2.CreateWebMediaConstraints();
+ MediaAudioConstraints audio_constraints_2(constraints_2, 0);
+ EXPECT_FALSE(audio_constraints_2.GetEchoCancellationProperty());
+ }
+
+ {
+ // When |kEchoCancellation| is explicitly set to false, the default values
+ // for all the constraints except |kMediaStreamAudioDucking| are false.
+ MockMediaConstraintFactory constraint_factory;
+ constraint_factory.AddOptional(MediaAudioConstraints::kEchoCancellation,
+ false);
+ blink::WebMediaConstraints constraints =
+ constraint_factory.CreateWebMediaConstraints();
+ MediaAudioConstraints audio_constraints(constraints, 0);
+ for (size_t i = 0; i < arraysize(kDefaultAudioConstraints); ++i) {
+ EXPECT_FALSE(audio_constraints.GetProperty(kDefaultAudioConstraints[i]));
+ }
+ EXPECT_FALSE(audio_constraints.NeedsAudioProcessing());
+#if defined(OS_WIN)
+ EXPECT_TRUE(audio_constraints.GetProperty(kMediaStreamAudioDucking));
+#else
+ EXPECT_FALSE(audio_constraints.GetProperty(kMediaStreamAudioDucking));
+#endif
+ }
+}
+
+TEST_F(MediaStreamAudioProcessorTest, ValidateConstraints) {
+ MockMediaConstraintFactory constraint_factory;
+ const std::string dummy_constraint = "dummy";
+ constraint_factory.AddMandatory(dummy_constraint, true);
+ MediaAudioConstraints audio_constraints(
+ constraint_factory.CreateWebMediaConstraints(), 0);
+ EXPECT_FALSE(audio_constraints.IsValid());
+}
+
+TEST_F(MediaStreamAudioProcessorTest, TestAllSampleRates) {
+ MockMediaConstraintFactory constraint_factory;
+ scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
+ new WebRtcAudioDeviceImpl());
+ scoped_refptr<MediaStreamAudioProcessor> audio_processor(
+ new talk_base::RefCountedObject<MediaStreamAudioProcessor>(
+ constraint_factory.CreateWebMediaConstraints(), 0,
+ webrtc_audio_device.get()));
+ EXPECT_TRUE(audio_processor->has_audio_processing());
+
+ static const int kSupportedSampleRates[] =
+ { 8000, 16000, 22050, 32000, 44100, 48000, 88200, 96000 };
+ for (size_t i = 0; i < arraysize(kSupportedSampleRates); ++i) {
+ int buffer_size = (kSupportedSampleRates[i] / 100) < 128 ?
+ kSupportedSampleRates[i] / 100 : 128;
+ media::AudioParameters params(
+ media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ media::CHANNEL_LAYOUT_STEREO, kSupportedSampleRates[i], 16,
+ buffer_size);
+ audio_processor->OnCaptureFormatChanged(params);
+ VerifyDefaultComponents(audio_processor);
+
+ ProcessDataAndVerifyFormat(audio_processor,
+ kAudioProcessingSampleRate,
+ kAudioProcessingNumberOfChannel,
+ kAudioProcessingSampleRate / 100);
+ }
+
+ // Set |audio_processor| to NULL to make sure |webrtc_audio_device|
+ // outlives |audio_processor|.
+ audio_processor = NULL;
+}
+
+TEST_F(MediaStreamAudioProcessorTest, TestStereoAudio) {
+ // Set up the correct constraints to turn off the audio processing and turn
+ // on the stereo channels mirroring.
+ MockMediaConstraintFactory constraint_factory;
+ constraint_factory.AddMandatory(MediaAudioConstraints::kEchoCancellation,
+ false);
+ constraint_factory.AddMandatory(MediaAudioConstraints::kGoogAudioMirroring,
+ true);
+ scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
+ new WebRtcAudioDeviceImpl());
+ scoped_refptr<MediaStreamAudioProcessor> audio_processor(
+ new talk_base::RefCountedObject<MediaStreamAudioProcessor>(
+ constraint_factory.CreateWebMediaConstraints(), 0,
+ webrtc_audio_device.get()));
+ EXPECT_FALSE(audio_processor->has_audio_processing());
+ const media::AudioParameters source_params(
+ media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ media::CHANNEL_LAYOUT_STEREO, 48000, 16, 480);
+ audio_processor->OnCaptureFormatChanged(source_params);
+ EXPECT_EQ(audio_processor->OutputFormat().channels(), 2);
+
+ // Construct left and right channels, and assign different values to the
+ // first data of the left channel and right channel.
+ const int size = media::AudioBus::CalculateMemorySize(source_params);
+ scoped_ptr<float, base::AlignedFreeDeleter> left_channel(
+ static_cast<float*>(base::AlignedAlloc(size, 32)));
+ scoped_ptr<float, base::AlignedFreeDeleter> right_channel(
+ static_cast<float*>(base::AlignedAlloc(size, 32)));
+ scoped_ptr<media::AudioBus> wrapper = media::AudioBus::CreateWrapper(
+ source_params.channels());
+ wrapper->set_frames(source_params.frames_per_buffer());
+ wrapper->SetChannelData(0, left_channel.get());
+ wrapper->SetChannelData(1, right_channel.get());
+ wrapper->Zero();
+ float* left_channel_ptr = left_channel.get();
+ left_channel_ptr[0] = 1.0f;
+
+ // A audio bus used for verifying the output data values.
+ scoped_ptr<media::AudioBus> output_bus = media::AudioBus::Create(
+ audio_processor->OutputFormat());
+
+ // Run the test consecutively to make sure the stereo channels are not
+ // flipped back and forth.
+ static const int kNumberOfPacketsForTest = 100;
+ for (int i = 0; i < kNumberOfPacketsForTest; ++i) {
+ audio_processor->PushCaptureData(wrapper.get());
+
+ int16* output = NULL;
+ int new_volume = 0;
+ EXPECT_TRUE(audio_processor->ProcessAndConsumeData(
+ base::TimeDelta::FromMilliseconds(0), 0, false, &new_volume, &output));
+ output_bus->FromInterleaved(output, output_bus->frames(), 2);
+ EXPECT_TRUE(output != NULL);
+ EXPECT_EQ(output_bus->channel(0)[0], 0);
+ EXPECT_NE(output_bus->channel(1)[0], 0);
+ }
+
+ // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
+ // |audio_processor|.
+ audio_processor = NULL;
}
} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_audio_source.cc b/chromium/content/renderer/media/media_stream_audio_source.cc
new file mode 100644
index 00000000000..069f4e3825d
--- /dev/null
+++ b/chromium/content/renderer/media/media_stream_audio_source.cc
@@ -0,0 +1,56 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/media_stream_audio_source.h"
+
+namespace content {
+
+MediaStreamAudioSource::MediaStreamAudioSource(
+ int render_view_id,
+ const StreamDeviceInfo& device_info,
+ const SourceStoppedCallback& stop_callback,
+ PeerConnectionDependencyFactory* factory)
+ : render_view_id_(render_view_id),
+ factory_(factory) {
+ SetDeviceInfo(device_info);
+ SetStopCallback(stop_callback);
+}
+
+MediaStreamAudioSource::MediaStreamAudioSource()
+ : render_view_id_(-1),
+ factory_(NULL) {
+}
+
+MediaStreamAudioSource::~MediaStreamAudioSource() {}
+
+void MediaStreamAudioSource::DoStopSource() {
+ if (audio_capturer_.get())
+ audio_capturer_->Stop();
+}
+
+void MediaStreamAudioSource::AddTrack(
+ const blink::WebMediaStreamTrack& track,
+ const blink::WebMediaConstraints& constraints,
+ const ConstraintsCallback& callback) {
+ // TODO(xians): Properly implement for audio sources.
+ if (!local_audio_source_) {
+ if (!factory_->InitializeMediaStreamAudioSource(render_view_id_,
+ constraints,
+ this)) {
+ // The source failed to start.
+ // MediaStreamImpl rely on the |stop_callback| to be triggered when the
+ // last track is removed from the source. But in this case, the source is
+ // is not even started. So we need to fail both adding the track and
+ // trigger |stop_callback|.
+ callback.Run(this, false);
+ StopSource();
+ return;
+ }
+ }
+
+ factory_->CreateLocalAudioTrack(track);
+ callback.Run(this, true);
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_audio_source.h b/chromium/content/renderer/media/media_stream_audio_source.h
new file mode 100644
index 00000000000..29f1d4cfbaa
--- /dev/null
+++ b/chromium/content/renderer/media/media_stream_audio_source.h
@@ -0,0 +1,66 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_SOURCE_H_
+#define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_SOURCE_H_
+
+#include "base/compiler_specific.h"
+#include "content/common/content_export.h"
+#include "content/renderer/media/media_stream_source.h"
+#include "content/renderer/media/webrtc/peer_connection_dependency_factory.h"
+#include "content/renderer/media/webrtc_audio_capturer.h"
+#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
+
+namespace content {
+
+class CONTENT_EXPORT MediaStreamAudioSource
+ : NON_EXPORTED_BASE(public MediaStreamSource) {
+ public:
+ MediaStreamAudioSource(int render_view_id,
+ const StreamDeviceInfo& device_info,
+ const SourceStoppedCallback& stop_callback,
+ PeerConnectionDependencyFactory* factory);
+ MediaStreamAudioSource();
+ virtual ~MediaStreamAudioSource();
+
+ void AddTrack(const blink::WebMediaStreamTrack& track,
+ const blink::WebMediaConstraints& constraints,
+ const ConstraintsCallback& callback);
+
+ void SetLocalAudioSource(webrtc::AudioSourceInterface* source) {
+ local_audio_source_ = source;
+ }
+
+ void SetAudioCapturer(WebRtcAudioCapturer* capturer) {
+ DCHECK(!audio_capturer_);
+ audio_capturer_ = capturer;
+ }
+
+ const scoped_refptr<WebRtcAudioCapturer>& GetAudioCapturer() {
+ return audio_capturer_;
+ }
+
+ webrtc::AudioSourceInterface* local_audio_source() {
+ return local_audio_source_.get();
+ }
+
+ protected:
+ virtual void DoStopSource() OVERRIDE;
+
+ private:
+ int render_view_id_; // Render view ID that created this source.
+ // This member holds an instance of webrtc::LocalAudioSource. This is used
+ // as a container for audio options.
+ scoped_refptr<webrtc::AudioSourceInterface> local_audio_source_;
+
+ scoped_refptr<WebRtcAudioCapturer> audio_capturer_;
+
+ PeerConnectionDependencyFactory* factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaStreamAudioSource);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_SOURCE_H_
diff --git a/chromium/content/renderer/media/media_stream_center.cc b/chromium/content/renderer/media/media_stream_center.cc
index daf1a499507..9d663497f3b 100644
--- a/chromium/content/renderer/media/media_stream_center.cc
+++ b/chromium/content/renderer/media/media_stream_center.cc
@@ -8,17 +8,17 @@
#include "base/command_line.h"
#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/strings/utf_string_conversions.h"
#include "content/common/media/media_stream_messages.h"
#include "content/public/common/content_switches.h"
+#include "content/public/renderer/media_stream_audio_sink.h"
#include "content/public/renderer/render_thread.h"
-#include "content/renderer/media/media_stream_dependency_factory.h"
-#include "content/renderer/media/media_stream_extra_data.h"
-#include "content/renderer/media/media_stream_source_extra_data.h"
-#include "content/renderer/media/media_stream_track_extra_data.h"
-#include "content/renderer/render_view_impl.h"
+#include "content/renderer/media/media_stream.h"
+#include "content/renderer/media/media_stream_source.h"
+#include "content/renderer/media/media_stream_video_source.h"
+#include "content/renderer/media/media_stream_video_track.h"
+#include "content/renderer/media/webrtc/peer_connection_dependency_factory.h"
+#include "content/renderer/media/webrtc_local_audio_source_provider.h"
+#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
#include "third_party/WebKit/public/platform/WebMediaStream.h"
#include "third_party/WebKit/public/platform/WebMediaStreamCenterClient.h"
#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
@@ -27,15 +27,65 @@
#include "third_party/WebKit/public/platform/WebSourceInfo.h"
#include "third_party/WebKit/public/platform/WebVector.h"
#include "third_party/WebKit/public/web/WebFrame.h"
-#include "third_party/libjingle/source/talk/app/webrtc/jsep.h"
using blink::WebFrame;
using blink::WebView;
namespace content {
+namespace {
+
+void CreateNativeAudioMediaStreamTrack(
+ const blink::WebMediaStreamTrack& track,
+ PeerConnectionDependencyFactory* factory) {
+ DCHECK(!track.extraData());
+ blink::WebMediaStreamSource source = track.source();
+ DCHECK_EQ(source.type(), blink::WebMediaStreamSource::TypeAudio);
+ factory->CreateLocalAudioTrack(track);
+}
+
+void CreateNativeVideoMediaStreamTrack(
+ const blink::WebMediaStreamTrack& track) {
+ DCHECK(track.extraData() == NULL);
+ blink::WebMediaStreamSource source = track.source();
+ DCHECK_EQ(source.type(), blink::WebMediaStreamSource::TypeVideo);
+ MediaStreamVideoSource* native_source =
+ MediaStreamVideoSource::GetVideoSource(source);
+ DCHECK(native_source);
+ blink::WebMediaStreamTrack writable_track(track);
+ // TODO(perkj): The constraints to use here should be passed from blink when
+ // a new track is created. For cloning, it should be the constraints of the
+ // cloned track and not the originating source.
+ // Also - source.constraints() returns an uninitialized constraint if the
+ // source is coming from a remote video track. See http://crbug/287805.
+ blink::WebMediaConstraints constraints = source.constraints();
+ if (constraints.isNull())
+ constraints.initialize();
+ writable_track.setExtraData(
+ new MediaStreamVideoTrack(native_source, constraints,
+ MediaStreamVideoSource::ConstraintsCallback(),
+ track.isEnabled()));
+}
+
+void CreateNativeMediaStreamTrack(const blink::WebMediaStreamTrack& track,
+ PeerConnectionDependencyFactory* factory) {
+ DCHECK(!track.isNull() && !track.extraData());
+ DCHECK(!track.source().isNull());
+
+ switch (track.source().type()) {
+ case blink::WebMediaStreamSource::TypeAudio:
+ CreateNativeAudioMediaStreamTrack(track, factory);
+ break;
+ case blink::WebMediaStreamSource::TypeVideo:
+ CreateNativeVideoMediaStreamTrack(track);
+ break;
+ }
+}
+
+} // namespace
+
MediaStreamCenter::MediaStreamCenter(blink::WebMediaStreamCenterClient* client,
- MediaStreamDependencyFactory* factory)
+ PeerConnectionDependencyFactory* factory)
: rtc_factory_(factory), next_request_id_(0) {}
MediaStreamCenter::~MediaStreamCenter() {}
@@ -55,94 +105,105 @@ bool MediaStreamCenter::getMediaStreamTrackSources(
void MediaStreamCenter::didCreateMediaStreamTrack(
const blink::WebMediaStreamTrack& track) {
- if (!rtc_factory_)
- return;
- rtc_factory_->CreateNativeMediaStreamTrack(track);
+ DVLOG(1) << "MediaStreamCenter::didCreateMediaStreamTrack";
+ CreateNativeMediaStreamTrack(track, rtc_factory_);
}
void MediaStreamCenter::didEnableMediaStreamTrack(
const blink::WebMediaStreamTrack& track) {
- webrtc::MediaStreamTrackInterface* native_track =
- MediaStreamDependencyFactory::GetNativeMediaStreamTrack(track);
+ MediaStreamTrack* native_track =
+ MediaStreamTrack::GetTrack(track);
if (native_track)
- native_track->set_enabled(true);
+ native_track->SetEnabled(true);
}
void MediaStreamCenter::didDisableMediaStreamTrack(
const blink::WebMediaStreamTrack& track) {
- webrtc::MediaStreamTrackInterface* native_track =
- MediaStreamDependencyFactory::GetNativeMediaStreamTrack(track);
+ MediaStreamTrack* native_track =
+ MediaStreamTrack::GetTrack(track);
if (native_track)
- native_track->set_enabled(false);
+ native_track->SetEnabled(false);
}
bool MediaStreamCenter::didStopMediaStreamTrack(
const blink::WebMediaStreamTrack& track) {
DVLOG(1) << "MediaStreamCenter::didStopMediaStreamTrack";
- blink::WebMediaStreamSource source = track.source();
- MediaStreamSourceExtraData* extra_data =
- static_cast<MediaStreamSourceExtraData*>(source.extraData());
- if (!extra_data) {
- DVLOG(1) << "didStopMediaStreamTrack called on a remote track.";
- return false;
+ MediaStreamTrack* native_track = MediaStreamTrack::GetTrack(track);
+ native_track->Stop();
+ return true;
+}
+
+blink::WebAudioSourceProvider*
+MediaStreamCenter::createWebAudioSourceFromMediaStreamTrack(
+ const blink::WebMediaStreamTrack& track) {
+ DVLOG(1) << "MediaStreamCenter::createWebAudioSourceFromMediaStreamTrack";
+ MediaStreamTrack* media_stream_track =
+ static_cast<MediaStreamTrack*>(track.extraData());
+ // Only local audio track is supported now.
+ // TODO(xians): Support remote audio track.
+ if (!media_stream_track || !media_stream_track->is_local_track()) {
+ NOTIMPLEMENTED();
+ return NULL;
}
- extra_data->OnLocalSourceStop();
- return true;
+ blink::WebMediaStreamSource source = track.source();
+ DCHECK_EQ(source.type(), blink::WebMediaStreamSource::TypeAudio);
+ WebRtcLocalAudioSourceProvider* source_provider =
+ new WebRtcLocalAudioSourceProvider(track);
+ return source_provider;
}
void MediaStreamCenter::didStopLocalMediaStream(
const blink::WebMediaStream& stream) {
DVLOG(1) << "MediaStreamCenter::didStopLocalMediaStream";
- MediaStreamExtraData* extra_data =
- static_cast<MediaStreamExtraData*>(stream.extraData());
- if (!extra_data) {
+ MediaStream* native_stream = MediaStream::GetMediaStream(stream);
+ if (!native_stream) {
NOTREACHED();
return;
}
// TODO(perkj): MediaStream::Stop is being deprecated. But for the moment we
- // need to support the old behavior and the new. Since we only create one
- // source object per actual device- we need to fake stopping a
- // MediaStreamTrack by disabling it if the same device is used as source by
- // multiple tracks. Note that disabling a track here, don't affect the
- // enabled property in JS.
+ // need to support both MediaStream::Stop and MediaStreamTrack::Stop.
blink::WebVector<blink::WebMediaStreamTrack> audio_tracks;
stream.audioTracks(audio_tracks);
for (size_t i = 0; i < audio_tracks.size(); ++i)
- didDisableMediaStreamTrack(audio_tracks[i]);
+ didStopMediaStreamTrack(audio_tracks[i]);
blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
stream.videoTracks(video_tracks);
for (size_t i = 0; i < video_tracks.size(); ++i)
- didDisableMediaStreamTrack(video_tracks[i]);
-
- extra_data->OnLocalStreamStop();
+ didStopMediaStreamTrack(video_tracks[i]);
}
-void MediaStreamCenter::didCreateMediaStream(
- blink::WebMediaStream& stream) {
- if (!rtc_factory_)
- return;
- rtc_factory_->CreateNativeLocalMediaStream(&stream);
+void MediaStreamCenter::didCreateMediaStream(blink::WebMediaStream& stream) {
+ DVLOG(1) << "MediaStreamCenter::didCreateMediaStream";
+ blink::WebMediaStream writable_stream(stream);
+ MediaStream* native_stream(
+ new MediaStream(stream));
+ writable_stream.setExtraData(native_stream);
+
+ blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
+ stream.videoTracks(video_tracks);
+ for (size_t i = 0; i < video_tracks.size(); ++i) {
+ if (!MediaStreamTrack::GetTrack(video_tracks[i]))
+ CreateNativeMediaStreamTrack(video_tracks[i], rtc_factory_);
+ }
}
bool MediaStreamCenter::didAddMediaStreamTrack(
const blink::WebMediaStream& stream,
const blink::WebMediaStreamTrack& track) {
- if (!rtc_factory_)
- return false;
-
- return rtc_factory_->AddNativeMediaStreamTrack(stream, track);
+ DVLOG(1) << "MediaStreamCenter::didAddMediaStreamTrack";
+ MediaStream* native_stream = MediaStream::GetMediaStream(stream);
+ return native_stream->AddTrack(track);
}
bool MediaStreamCenter::didRemoveMediaStreamTrack(
const blink::WebMediaStream& stream,
const blink::WebMediaStreamTrack& track) {
- if (!rtc_factory_)
- return false;
-
- return rtc_factory_->RemoveNativeMediaStreamTrack(stream, track);
+ DVLOG(1) << "MediaStreamCenter::didRemoveMediaStreamTrack";
+ MediaStream* native_stream = MediaStream::GetMediaStream(stream);
+ return native_stream->RemoveTrack(track);
}
bool MediaStreamCenter::OnControlMessageReceived(const IPC::Message& message) {
diff --git a/chromium/content/renderer/media/media_stream_center.h b/chromium/content/renderer/media/media_stream_center.h
index fbf1be73b55..d2dfa98cd5d 100644
--- a/chromium/content/renderer/media/media_stream_center.h
+++ b/chromium/content/renderer/media/media_stream_center.h
@@ -18,18 +18,19 @@
#include "third_party/WebKit/public/platform/WebMediaStreamTrackSourcesRequest.h"
namespace blink {
+class WebAudioSourceProvider;
class WebMediaStreamCenterClient;
}
namespace content {
-class MediaStreamDependencyFactory;
+class PeerConnectionDependencyFactory;
class CONTENT_EXPORT MediaStreamCenter
: NON_EXPORTED_BASE(public blink::WebMediaStreamCenter),
public RenderProcessObserver {
public:
MediaStreamCenter(blink::WebMediaStreamCenterClient* client,
- MediaStreamDependencyFactory* factory);
+ PeerConnectionDependencyFactory* factory);
virtual ~MediaStreamCenter();
private:
@@ -51,6 +52,11 @@ class CONTENT_EXPORT MediaStreamCenter
virtual bool didStopMediaStreamTrack(
const blink::WebMediaStreamTrack& track) OVERRIDE;
+ virtual blink::WebAudioSourceProvider*
+ createWebAudioSourceFromMediaStreamTrack(
+ const blink::WebMediaStreamTrack& track) OVERRIDE;
+
+
virtual void didCreateMediaStream(
blink::WebMediaStream& stream) OVERRIDE;
@@ -70,7 +76,7 @@ class CONTENT_EXPORT MediaStreamCenter
// |rtc_factory_| is a weak pointer and is owned by the RenderThreadImpl.
// It is valid as long as RenderThreadImpl exist.
- MediaStreamDependencyFactory* rtc_factory_;
+ PeerConnectionDependencyFactory* rtc_factory_;
// A strictly increasing id that's used to label incoming GetSources()
// requests.
diff --git a/chromium/content/renderer/media/media_stream_client.h b/chromium/content/renderer/media/media_stream_client.h
deleted file mode 100644
index 71b90424dde..00000000000
--- a/chromium/content/renderer/media/media_stream_client.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_MEDIA_STREAM_CLIENT_H_
-#define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_CLIENT_H_
-
-#include "base/callback.h"
-#include "base/memory/ref_counted.h"
-#include "content/renderer/media/video_frame_provider.h"
-
-class GURL;
-
-namespace content {
-
-class MediaStreamAudioRenderer;
-
-// Define an interface for media stream client to get some information about
-// the media stream.
-class MediaStreamClient {
- public:
- // Check if the |url| is derived from a media stream object.
- virtual bool IsMediaStream(const GURL& url) = 0;
-
- virtual scoped_refptr<VideoFrameProvider> GetVideoFrameProvider(
- const GURL& url,
- const base::Closure& error_cb,
- const VideoFrameProvider::RepaintCB& repaint_cb) = 0;
-
- virtual scoped_refptr<MediaStreamAudioRenderer> GetAudioRenderer(
- const GURL& url) = 0;
-
- protected:
- virtual ~MediaStreamClient() {}
-};
-
-} // namespace content
-
-#endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_CLIENT_H_
diff --git a/chromium/content/renderer/media/media_stream_constraints_util.cc b/chromium/content/renderer/media/media_stream_constraints_util.cc
new file mode 100644
index 00000000000..ddc223dacc6
--- /dev/null
+++ b/chromium/content/renderer/media/media_stream_constraints_util.cc
@@ -0,0 +1,132 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/media_stream_constraints_util.h"
+
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
+#include "third_party/WebKit/public/platform/WebString.h"
+
+namespace content {
+
+namespace {
+
+// Convert a string ("true", "false") to a boolean.
+bool ConvertStringToBoolean(const std::string& string, bool* value) {
+ static const char kValueTrue[] = "true";
+ static const char kValueFalse[] = "false";
+
+ *value = (string == kValueTrue);
+ return *value || (string == kValueFalse);
+}
+
+} // namespace
+
+bool GetConstraintValueAsBoolean(const blink::WebMediaConstraints& constraints,
+ const std::string& name,
+ bool* value) {
+ return GetMandatoryConstraintValueAsBoolean(constraints, name, value) ||
+ GetOptionalConstraintValueAsBoolean(constraints, name, value);
+}
+
+bool GetConstraintValueAsInteger(const blink::WebMediaConstraints& constraints,
+ const std::string& name,
+ int* value) {
+ return GetMandatoryConstraintValueAsInteger(constraints, name, value) ||
+ GetOptionalConstraintValueAsInteger(constraints, name, value);
+}
+
+bool GetConstraintValueAsString(const blink::WebMediaConstraints& constraints,
+ const std::string& name,
+ std::string* value) {
+ blink::WebString value_str;
+ base::string16 name_16 = base::UTF8ToUTF16(name);
+ if (!constraints.getMandatoryConstraintValue(name_16, value_str) &&
+ !constraints.getOptionalConstraintValue(name_16, value_str)) {
+ return false;
+ }
+
+ *value = value_str.utf8();
+ return true;
+}
+
+bool GetMandatoryConstraintValueAsBoolean(
+ const blink::WebMediaConstraints& constraints,
+ const std::string& name,
+ bool* value) {
+ blink::WebString value_str;
+ if (!constraints.getMandatoryConstraintValue(base::UTF8ToUTF16(name),
+ value_str)) {
+ return false;
+ }
+
+ return ConvertStringToBoolean(value_str.utf8(), value);
+}
+
+bool GetMandatoryConstraintValueAsInteger(
+ const blink::WebMediaConstraints& constraints,
+ const std::string& name,
+ int* value) {
+ blink::WebString value_str;
+ if (!constraints.getMandatoryConstraintValue(base::UTF8ToUTF16(name),
+ value_str)) {
+ return false;
+ }
+
+ return base::StringToInt(value_str.utf8(), value);
+}
+
+bool GetMandatoryConstraintValueAsDouble(
+ const blink::WebMediaConstraints& constraints,
+ const std::string& name,
+ double* value) {
+ blink::WebString value_str;
+ if (!constraints.getMandatoryConstraintValue(base::UTF8ToUTF16(name),
+ value_str)) {
+ return false;
+ }
+ return base::StringToDouble(value_str.utf8(), value);
+}
+
+bool GetOptionalConstraintValueAsBoolean(
+ const blink::WebMediaConstraints& constraints,
+ const std::string& name,
+ bool* value) {
+ blink::WebString value_str;
+ if (!constraints.getOptionalConstraintValue(base::UTF8ToUTF16(name),
+ value_str)) {
+ return false;
+ }
+
+ return ConvertStringToBoolean(value_str.utf8(), value);
+}
+
+bool GetOptionalConstraintValueAsInteger(
+ const blink::WebMediaConstraints& constraints,
+ const std::string& name,
+ int* value) {
+ blink::WebString value_str;
+ if (!constraints.getOptionalConstraintValue(base::UTF8ToUTF16(name),
+ value_str)) {
+ return false;
+ }
+
+ return base::StringToInt(value_str.utf8(), value);
+}
+
+bool GetOptionalConstraintValueAsDouble(
+ const blink::WebMediaConstraints& constraints,
+ const std::string& name,
+ double* value) {
+ blink::WebString value_str;
+ if (!constraints.getOptionalConstraintValue(base::UTF8ToUTF16(name),
+ value_str)) {
+ return false;
+ }
+
+ return base::StringToDouble(value_str.utf8(), value);
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_constraints_util.h b/chromium/content/renderer/media/media_stream_constraints_util.h
new file mode 100644
index 00000000000..2068588b99f
--- /dev/null
+++ b/chromium/content/renderer/media/media_stream_constraints_util.h
@@ -0,0 +1,87 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_MEDIA_STREAM_CONSTRAINTS_UTIL_H_
+#define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_CONSTRAINTS_UTIL_H_
+
+#include <string>
+
+#include "content/common/content_export.h"
+
+namespace blink {
+class WebMediaConstraints;
+class WebString;
+}
+
+namespace content {
+
+// Method to get boolean value of constraint with |name| from constraints.
+// Returns true if the constraint is specified in either mandatory or optional
+// constraints.
+bool CONTENT_EXPORT GetConstraintValueAsBoolean(
+ const blink::WebMediaConstraints& constraints,
+ const std::string& name,
+ bool* value);
+
+// Method to get int value of constraint with |name| from constraints.
+// Returns true if the constraint is specified in either mandatory or Optional
+// constraints.
+bool CONTENT_EXPORT GetConstraintValueAsInteger(
+ const blink::WebMediaConstraints& constraints,
+ const std::string& name,
+ int* value);
+
+// Method to get std::string value of constraint with |name| from constraints.
+// Returns true if the constraint is specified in either mandatory or Optional
+// constraints.
+bool CONTENT_EXPORT GetConstraintValueAsString(
+ const blink::WebMediaConstraints& constraints,
+ const std::string& name,
+ std::string* value);
+
+// Method to get boolean value of constraint with |name| from the
+// mandatory constraints.
+bool CONTENT_EXPORT GetMandatoryConstraintValueAsBoolean(
+ const blink::WebMediaConstraints& constraints,
+ const std::string& name,
+ bool* value);
+
+// Method to get int value of constraint with |name| from the
+// mandatory constraints.
+bool CONTENT_EXPORT GetMandatoryConstraintValueAsInteger(
+ const blink::WebMediaConstraints& constraints,
+ const std::string& name,
+ int* value);
+
+// Method to get double value of constraint with |name| from the
+// mandatory constraints.
+bool CONTENT_EXPORT GetMandatoryConstraintValueAsDouble(
+ const blink::WebMediaConstraints& constraints,
+ const std::string& name,
+ double* value);
+
+// Method to get bool value of constraint with |name| from the
+// optional constraints.
+bool CONTENT_EXPORT GetOptionalConstraintValueAsBoolean(
+ const blink::WebMediaConstraints& constraints,
+ const std::string& name,
+ bool* value);
+
+// Method to get int value of constraint with |name| from the
+// optional constraints.
+bool CONTENT_EXPORT GetOptionalConstraintValueAsInteger(
+ const blink::WebMediaConstraints& constraints,
+ const std::string& name,
+ int* value);
+
+// Method to get double value of constraint with |name| from the
+// optional constraints.
+bool CONTENT_EXPORT GetOptionalConstraintValueAsDouble(
+ const blink::WebMediaConstraints& constraints,
+ const std::string& name,
+ double* value);
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_CONSTRAINTS_UTIL_H_
diff --git a/chromium/content/renderer/media/media_stream_constraints_util_unittest.cc b/chromium/content/renderer/media/media_stream_constraints_util_unittest.cc
new file mode 100644
index 00000000000..37e7df66ddf
--- /dev/null
+++ b/chromium/content/renderer/media/media_stream_constraints_util_unittest.cc
@@ -0,0 +1,103 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "content/renderer/media/media_stream_audio_processor_options.h"
+#include "content/renderer/media/media_stream_constraints_util.h"
+#include "content/renderer/media/media_stream_video_source.h"
+#include "content/renderer/media/mock_media_constraint_factory.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace content {
+
+class MediaStreamConstraintsUtilTest : public testing::Test {
+};
+
+TEST_F(MediaStreamConstraintsUtilTest, BooleanConstraints) {
+ static const std::string kValueTrue = "true";
+ static const std::string kValueFalse = "false";
+
+ MockMediaConstraintFactory constraint_factory;
+ // Mandatory constraints.
+ constraint_factory.AddMandatory(MediaAudioConstraints::kEchoCancellation,
+ kValueTrue);
+ constraint_factory.AddMandatory(MediaAudioConstraints::kGoogEchoCancellation,
+ kValueFalse);
+ blink::WebMediaConstraints constraints =
+ constraint_factory.CreateWebMediaConstraints();
+ bool value_true = false;
+ bool value_false = false;
+ EXPECT_TRUE(GetMandatoryConstraintValueAsBoolean(
+ constraints, MediaAudioConstraints::kEchoCancellation, &value_true));
+ EXPECT_TRUE(GetMandatoryConstraintValueAsBoolean(
+ constraints, MediaAudioConstraints::kGoogEchoCancellation, &value_false));
+ EXPECT_TRUE(value_true);
+ EXPECT_FALSE(value_false);
+
+ // Optional constraints.
+ constraint_factory.AddOptional(MediaAudioConstraints::kEchoCancellation,
+ kValueFalse);
+ constraint_factory.AddOptional(MediaAudioConstraints::kGoogEchoCancellation,
+ kValueTrue);
+ constraints = constraint_factory.CreateWebMediaConstraints();
+ EXPECT_TRUE(GetOptionalConstraintValueAsBoolean(
+ constraints, MediaAudioConstraints::kEchoCancellation, &value_false));
+ EXPECT_TRUE(GetOptionalConstraintValueAsBoolean(
+ constraints, MediaAudioConstraints::kGoogEchoCancellation,
+ &value_true));
+ EXPECT_TRUE(value_true);
+ EXPECT_FALSE(value_false);
+}
+
+TEST_F(MediaStreamConstraintsUtilTest, IntConstraints) {
+ MockMediaConstraintFactory constraint_factory;
+ int width = 600;
+ int height = 480;
+ constraint_factory.AddMandatory(MediaStreamVideoSource::kMaxWidth, width);
+ constraint_factory.AddMandatory(MediaStreamVideoSource::kMaxHeight, height);
+ blink::WebMediaConstraints constraints =
+ constraint_factory.CreateWebMediaConstraints();
+ int value_width = 0;
+ int value_height = 0;
+ EXPECT_TRUE(GetMandatoryConstraintValueAsInteger(
+ constraints, MediaStreamVideoSource::kMaxWidth, &value_width));
+ EXPECT_TRUE(GetMandatoryConstraintValueAsInteger(
+ constraints, MediaStreamVideoSource::kMaxHeight, &value_height));
+ EXPECT_EQ(width, value_width);
+ EXPECT_EQ(height, value_height);
+
+ width = 720;
+ height = 600;
+ constraint_factory.AddOptional(MediaStreamVideoSource::kMaxWidth, width);
+ constraint_factory.AddOptional(MediaStreamVideoSource::kMaxHeight, height);
+ constraints = constraint_factory.CreateWebMediaConstraints();
+ EXPECT_TRUE(GetOptionalConstraintValueAsInteger(
+ constraints, MediaStreamVideoSource::kMaxWidth, &value_width));
+ EXPECT_TRUE(GetOptionalConstraintValueAsInteger(
+ constraints, MediaStreamVideoSource::kMaxHeight, &value_height));
+ EXPECT_EQ(width, value_width);
+ EXPECT_EQ(height, value_height);
+}
+
+TEST_F(MediaStreamConstraintsUtilTest, WrongBooleanConstraints) {
+ static const std::string kWrongValueTrue = "True";
+ static const std::string kWrongValueFalse = "False";
+ MockMediaConstraintFactory constraint_factory;
+ constraint_factory.AddMandatory(MediaAudioConstraints::kEchoCancellation,
+ kWrongValueTrue);
+ constraint_factory.AddMandatory(MediaAudioConstraints::kGoogEchoCancellation,
+ kWrongValueFalse);
+ blink::WebMediaConstraints constraints =
+ constraint_factory.CreateWebMediaConstraints();
+ bool value_false = false;
+ EXPECT_FALSE(GetMandatoryConstraintValueAsBoolean(
+ constraints, MediaAudioConstraints::kEchoCancellation, &value_false));
+ EXPECT_FALSE(value_false);
+ EXPECT_FALSE(GetMandatoryConstraintValueAsBoolean(
+ constraints, MediaAudioConstraints::kGoogEchoCancellation, &value_false));
+ EXPECT_FALSE(value_false);
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_dependency_factory.cc b/chromium/content/renderer/media/media_stream_dependency_factory.cc
deleted file mode 100644
index 8da7b14cb71..00000000000
--- a/chromium/content/renderer/media/media_stream_dependency_factory.cc
+++ /dev/null
@@ -1,1000 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/renderer/media/media_stream_dependency_factory.h"
-
-#include <vector>
-
-#include "base/command_line.h"
-#include "base/strings/utf_string_conversions.h"
-#include "base/synchronization/waitable_event.h"
-#include "content/public/common/content_switches.h"
-#include "content/renderer/media/media_stream_source_extra_data.h"
-#include "content/renderer/media/media_stream_track_extra_data.h"
-#include "content/renderer/media/media_stream_video_track.h"
-#include "content/renderer/media/peer_connection_identity_service.h"
-#include "content/renderer/media/rtc_media_constraints.h"
-#include "content/renderer/media/rtc_peer_connection_handler.h"
-#include "content/renderer/media/rtc_video_capturer.h"
-#include "content/renderer/media/rtc_video_decoder_factory.h"
-#include "content/renderer/media/rtc_video_encoder_factory.h"
-#include "content/renderer/media/video_capture_impl_manager.h"
-#include "content/renderer/media/webaudio_capturer_source.h"
-#include "content/renderer/media/webrtc_audio_device_impl.h"
-#include "content/renderer/media/webrtc_local_audio_track.h"
-#include "content/renderer/media/webrtc_uma_histograms.h"
-#include "content/renderer/p2p/ipc_network_manager.h"
-#include "content/renderer/p2p/ipc_socket_factory.h"
-#include "content/renderer/p2p/port_allocator.h"
-#include "content/renderer/render_thread_impl.h"
-#include "jingle/glue/thread_wrapper.h"
-#include "media/filters/gpu_video_accelerator_factories.h"
-#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
-#include "third_party/WebKit/public/platform/WebMediaStream.h"
-#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
-#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
-#include "third_party/WebKit/public/platform/WebURL.h"
-#include "third_party/WebKit/public/web/WebDocument.h"
-#include "third_party/WebKit/public/web/WebFrame.h"
-#include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface.h"
-
-#if defined(USE_OPENSSL)
-#include "third_party/libjingle/source/talk/base/ssladapter.h"
-#else
-#include "net/socket/nss_ssl_util.h"
-#endif
-
-#if defined(GOOGLE_TV)
-#include "content/renderer/media/rtc_video_decoder_factory_tv.h"
-#endif
-
-#if defined(OS_ANDROID)
-#include "media/base/android/media_codec_bridge.h"
-#endif
-
-namespace content {
-
-// Constant constraint keys which enables default audio constraints on
-// mediastreams with audio.
-struct {
- const char* key;
- const char* value;
-} const kDefaultAudioConstraints[] = {
- { webrtc::MediaConstraintsInterface::kEchoCancellation,
- webrtc::MediaConstraintsInterface::kValueTrue },
-#if defined(OS_CHROMEOS) || defined(OS_MACOSX)
- // Enable the extended filter mode AEC on platforms with known echo issues.
- { webrtc::MediaConstraintsInterface::kExperimentalEchoCancellation,
- webrtc::MediaConstraintsInterface::kValueTrue },
-#endif
- { webrtc::MediaConstraintsInterface::kAutoGainControl,
- webrtc::MediaConstraintsInterface::kValueTrue },
- { webrtc::MediaConstraintsInterface::kExperimentalAutoGainControl,
- webrtc::MediaConstraintsInterface::kValueTrue },
- { webrtc::MediaConstraintsInterface::kNoiseSuppression,
- webrtc::MediaConstraintsInterface::kValueTrue },
- { webrtc::MediaConstraintsInterface::kHighpassFilter,
- webrtc::MediaConstraintsInterface::kValueTrue },
-};
-
-// Map of corresponding media constraints and platform effects.
-struct {
- const char* constraint;
- const media::AudioParameters::PlatformEffectsMask effect;
-} const kConstraintEffectMap[] = {
- { webrtc::MediaConstraintsInterface::kEchoCancellation,
- media::AudioParameters::ECHO_CANCELLER},
-};
-
-// Merge |constraints| with |kDefaultAudioConstraints|. For any key which exists
-// in both, the value from |constraints| is maintained, including its
-// mandatory/optional status. New values from |kDefaultAudioConstraints| will
-// be added with mandatory status.
-void ApplyFixedAudioConstraints(RTCMediaConstraints* constraints) {
- for (size_t i = 0; i < ARRAYSIZE_UNSAFE(kDefaultAudioConstraints); ++i) {
- bool already_set_value;
- if (!webrtc::FindConstraint(constraints, kDefaultAudioConstraints[i].key,
- &already_set_value, NULL)) {
- constraints->AddMandatory(kDefaultAudioConstraints[i].key,
- kDefaultAudioConstraints[i].value, false);
- } else {
- DVLOG(1) << "Constraint " << kDefaultAudioConstraints[i].key
- << " already set to " << already_set_value;
- }
- }
-}
-
-class P2PPortAllocatorFactory : public webrtc::PortAllocatorFactoryInterface {
- public:
- P2PPortAllocatorFactory(
- P2PSocketDispatcher* socket_dispatcher,
- talk_base::NetworkManager* network_manager,
- talk_base::PacketSocketFactory* socket_factory,
- blink::WebFrame* web_frame)
- : socket_dispatcher_(socket_dispatcher),
- network_manager_(network_manager),
- socket_factory_(socket_factory),
- web_frame_(web_frame) {
- }
-
- virtual cricket::PortAllocator* CreatePortAllocator(
- const std::vector<StunConfiguration>& stun_servers,
- const std::vector<TurnConfiguration>& turn_configurations) OVERRIDE {
- CHECK(web_frame_);
- P2PPortAllocator::Config config;
- if (stun_servers.size() > 0) {
- config.stun_server = stun_servers[0].server.hostname();
- config.stun_server_port = stun_servers[0].server.port();
- }
- config.legacy_relay = false;
- for (size_t i = 0; i < turn_configurations.size(); ++i) {
- P2PPortAllocator::Config::RelayServerConfig relay_config;
- relay_config.server_address = turn_configurations[i].server.hostname();
- relay_config.port = turn_configurations[i].server.port();
- relay_config.username = turn_configurations[i].username;
- relay_config.password = turn_configurations[i].password;
- relay_config.transport_type = turn_configurations[i].transport_type;
- relay_config.secure = turn_configurations[i].secure;
- config.relays.push_back(relay_config);
- }
-
- // Use first turn server as the stun server.
- if (turn_configurations.size() > 0) {
- config.stun_server = config.relays[0].server_address;
- config.stun_server_port = config.relays[0].port;
- }
-
- return new P2PPortAllocator(
- web_frame_, socket_dispatcher_.get(), network_manager_,
- socket_factory_, config);
- }
-
- protected:
- virtual ~P2PPortAllocatorFactory() {}
-
- private:
- scoped_refptr<P2PSocketDispatcher> socket_dispatcher_;
- // |network_manager_| and |socket_factory_| are a weak references, owned by
- // MediaStreamDependencyFactory.
- talk_base::NetworkManager* network_manager_;
- talk_base::PacketSocketFactory* socket_factory_;
- // Raw ptr to the WebFrame that created the P2PPortAllocatorFactory.
- blink::WebFrame* web_frame_;
-};
-
-// SourceStateObserver is a help class used for observing the startup state
-// transition of webrtc media sources such as a camera or microphone.
-// An instance of the object deletes itself after use.
-// Usage:
-// 1. Create an instance of the object with the blink::WebMediaStream
-// the observed sources belongs to a callback.
-// 2. Add the sources to the observer using AddSource.
-// 3. Call StartObserving()
-// 4. The callback will be triggered when all sources have transitioned from
-// webrtc::MediaSourceInterface::kInitializing.
-class SourceStateObserver : public webrtc::ObserverInterface,
- public base::NonThreadSafe {
- public:
- SourceStateObserver(
- blink::WebMediaStream* web_stream,
- const MediaStreamDependencyFactory::MediaSourcesCreatedCallback& callback)
- : web_stream_(web_stream),
- ready_callback_(callback),
- live_(true) {
- }
-
- void AddSource(webrtc::MediaSourceInterface* source) {
- DCHECK(CalledOnValidThread());
- switch (source->state()) {
- case webrtc::MediaSourceInterface::kInitializing:
- sources_.push_back(source);
- source->RegisterObserver(this);
- break;
- case webrtc::MediaSourceInterface::kLive:
- // The source is already live so we don't need to wait for it.
- break;
- case webrtc::MediaSourceInterface::kEnded:
- // The source have already failed.
- live_ = false;
- break;
- default:
- NOTREACHED();
- }
- }
-
- void StartObservering() {
- DCHECK(CalledOnValidThread());
- CheckIfSourcesAreLive();
- }
-
- virtual void OnChanged() OVERRIDE {
- DCHECK(CalledOnValidThread());
- CheckIfSourcesAreLive();
- }
-
- private:
- void CheckIfSourcesAreLive() {
- ObservedSources::iterator it = sources_.begin();
- while (it != sources_.end()) {
- if ((*it)->state() != webrtc::MediaSourceInterface::kInitializing) {
- live_ &= (*it)->state() == webrtc::MediaSourceInterface::kLive;
- (*it)->UnregisterObserver(this);
- it = sources_.erase(it);
- } else {
- ++it;
- }
- }
- if (sources_.empty()) {
- ready_callback_.Run(web_stream_, live_);
- delete this;
- }
- }
-
- blink::WebMediaStream* web_stream_;
- MediaStreamDependencyFactory::MediaSourcesCreatedCallback ready_callback_;
- bool live_;
- typedef std::vector<scoped_refptr<webrtc::MediaSourceInterface> >
- ObservedSources;
- ObservedSources sources_;
-};
-
-MediaStreamDependencyFactory::MediaStreamDependencyFactory(
- VideoCaptureImplManager* vc_manager,
- P2PSocketDispatcher* p2p_socket_dispatcher)
- : network_manager_(NULL),
-#if defined(GOOGLE_TV)
- decoder_factory_tv_(NULL),
-#endif
- vc_manager_(vc_manager),
- p2p_socket_dispatcher_(p2p_socket_dispatcher),
- signaling_thread_(NULL),
- worker_thread_(NULL),
- chrome_worker_thread_("Chrome_libJingle_WorkerThread") {
-}
-
-MediaStreamDependencyFactory::~MediaStreamDependencyFactory() {
- CleanupPeerConnectionFactory();
-}
-
-blink::WebRTCPeerConnectionHandler*
-MediaStreamDependencyFactory::CreateRTCPeerConnectionHandler(
- blink::WebRTCPeerConnectionHandlerClient* client) {
- // Save histogram data so we can see how much PeerConnetion is used.
- // The histogram counts the number of calls to the JS API
- // webKitRTCPeerConnection.
- UpdateWebRTCMethodCount(WEBKIT_RTC_PEER_CONNECTION);
-
- if (!EnsurePeerConnectionFactory())
- return NULL;
-
- return new RTCPeerConnectionHandler(client, this);
-}
-
-void MediaStreamDependencyFactory::CreateNativeMediaSources(
- int render_view_id,
- const blink::WebMediaConstraints& audio_constraints,
- const blink::WebMediaConstraints& video_constraints,
- blink::WebMediaStream* web_stream,
- const MediaSourcesCreatedCallback& sources_created) {
- DVLOG(1) << "MediaStreamDependencyFactory::CreateNativeMediaSources()";
- if (!EnsurePeerConnectionFactory()) {
- sources_created.Run(web_stream, false);
- return;
- }
-
- // |source_observer| clean up itself when it has completed
- // source_observer->StartObservering.
- SourceStateObserver* source_observer =
- new SourceStateObserver(web_stream, sources_created);
-
- // Create local video sources.
- RTCMediaConstraints native_video_constraints(video_constraints);
- blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
- web_stream->videoTracks(video_tracks);
- for (size_t i = 0; i < video_tracks.size(); ++i) {
- const blink::WebMediaStreamSource& source = video_tracks[i].source();
- MediaStreamSourceExtraData* source_data =
- static_cast<MediaStreamSourceExtraData*>(source.extraData());
-
- // Check if the source has already been created. This happens when the same
- // source is used in multiple MediaStreams as a result of calling
- // getUserMedia.
- if (source_data->video_source())
- continue;
-
- const bool is_screencast =
- source_data->device_info().device.type == MEDIA_TAB_VIDEO_CAPTURE ||
- source_data->device_info().device.type == MEDIA_DESKTOP_VIDEO_CAPTURE;
- source_data->SetVideoSource(
- CreateLocalVideoSource(source_data->device_info().session_id,
- is_screencast,
- &native_video_constraints).get());
- source_observer->AddSource(source_data->video_source());
- }
-
- // Do additional source initialization if the audio source is a valid
- // microphone or tab audio.
- RTCMediaConstraints native_audio_constraints(audio_constraints);
- ApplyFixedAudioConstraints(&native_audio_constraints);
- blink::WebVector<blink::WebMediaStreamTrack> audio_tracks;
- web_stream->audioTracks(audio_tracks);
- for (size_t i = 0; i < audio_tracks.size(); ++i) {
- const blink::WebMediaStreamSource& source = audio_tracks[i].source();
- MediaStreamSourceExtraData* source_data =
- static_cast<MediaStreamSourceExtraData*>(source.extraData());
-
- // Check if the source has already been created. This happens when the same
- // source is used in multiple MediaStreams as a result of calling
- // getUserMedia.
- if (source_data->local_audio_source())
- continue;
-
- // TODO(xians): Create a new capturer for difference microphones when we
- // support multiple microphones. See issue crbug/262117 .
- StreamDeviceInfo device_info = source_data->device_info();
- RTCMediaConstraints constraints = native_audio_constraints;
-
- // If any platform effects are available, check them against the
- // constraints. Disable effects to match false constraints, but if a
- // constraint is true, set the constraint to false to later disable the
- // software effect.
- int effects = device_info.device.input.effects;
- if (effects != media::AudioParameters::NO_EFFECTS) {
- for (size_t i = 0; i < ARRAYSIZE_UNSAFE(kConstraintEffectMap); ++i) {
- bool value;
- if (!webrtc::FindConstraint(&constraints,
- kConstraintEffectMap[i].constraint, &value, NULL) || !value) {
- // If the constraint is false, or does not exist, disable the platform
- // effect.
- effects &= ~kConstraintEffectMap[i].effect;
- DVLOG(1) << "Disabling constraint: "
- << kConstraintEffectMap[i].constraint;
- } else if (effects & kConstraintEffectMap[i].effect) {
- // If the constraint is true, leave the platform effect enabled, and
- // set the constraint to false to later disable the software effect.
- constraints.AddMandatory(kConstraintEffectMap[i].constraint,
- webrtc::MediaConstraintsInterface::kValueFalse, true);
- DVLOG(1) << "Disabling platform effect: "
- << kConstraintEffectMap[i].constraint;
- }
- }
- device_info.device.input.effects = effects;
- }
-
- scoped_refptr<WebRtcAudioCapturer> capturer(
- MaybeCreateAudioCapturer(render_view_id, device_info));
- if (!capturer.get()) {
- DLOG(WARNING) << "Failed to create the capturer for device "
- << device_info.device.id;
- sources_created.Run(web_stream, false);
- // TODO(xians): Don't we need to check if source_observer is observing
- // something? If not, then it looks like we have a leak here.
- // OTOH, if it _is_ observing something, then the callback might
- // be called multiple times which is likely also a bug.
- return;
- }
- source_data->SetAudioCapturer(capturer);
-
- // Creates a LocalAudioSource object which holds audio options.
- // TODO(xians): The option should apply to the track instead of the source.
- source_data->SetLocalAudioSource(
- CreateLocalAudioSource(&constraints).get());
- source_observer->AddSource(source_data->local_audio_source());
- }
-
- source_observer->StartObservering();
-}
-
-void MediaStreamDependencyFactory::CreateNativeLocalMediaStream(
- blink::WebMediaStream* web_stream) {
- DVLOG(1) << "MediaStreamDependencyFactory::CreateNativeLocalMediaStream()";
- if (!EnsurePeerConnectionFactory()) {
- DVLOG(1) << "EnsurePeerConnectionFactory() failed!";
- return;
- }
-
- std::string label = UTF16ToUTF8(web_stream->id());
- scoped_refptr<webrtc::MediaStreamInterface> native_stream =
- CreateLocalMediaStream(label);
- MediaStreamExtraData* extra_data =
- new MediaStreamExtraData(native_stream.get(), true);
- web_stream->setExtraData(extra_data);
-
- // Add audio tracks.
- blink::WebVector<blink::WebMediaStreamTrack> audio_tracks;
- web_stream->audioTracks(audio_tracks);
- for (size_t i = 0; i < audio_tracks.size(); ++i) {
- AddNativeMediaStreamTrack(*web_stream, audio_tracks[i]);
- }
-
- // Add video tracks.
- blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
- web_stream->videoTracks(video_tracks);
- for (size_t i = 0; i < video_tracks.size(); ++i) {
- AddNativeMediaStreamTrack(*web_stream, video_tracks[i]);
- }
-}
-
-void MediaStreamDependencyFactory::CreateNativeLocalMediaStream(
- blink::WebMediaStream* web_stream,
- const MediaStreamExtraData::StreamStopCallback& stream_stop) {
- CreateNativeLocalMediaStream(web_stream);
-
- MediaStreamExtraData* extra_data =
- static_cast<MediaStreamExtraData*>(web_stream->extraData());
- extra_data->SetLocalStreamStopCallback(stream_stop);
-}
-
-scoped_refptr<webrtc::AudioTrackInterface>
-MediaStreamDependencyFactory::CreateNativeAudioMediaStreamTrack(
- const blink::WebMediaStreamTrack& track) {
- blink::WebMediaStreamSource source = track.source();
- DCHECK_EQ(source.type(), blink::WebMediaStreamSource::TypeAudio);
- MediaStreamSourceExtraData* source_data =
- static_cast<MediaStreamSourceExtraData*>(source.extraData());
-
- // In the future the constraints will belong to the track itself, but
- // right now they're on the source, so we fetch them from there.
- RTCMediaConstraints track_constraints(source.constraints());
-
- // Apply default audio constraints that enable echo cancellation,
- // automatic gain control, noise suppression and high-pass filter.
- ApplyFixedAudioConstraints(&track_constraints);
-
- scoped_refptr<WebAudioCapturerSource> webaudio_source;
- if (!source_data) {
- if (source.requiresAudioConsumer()) {
- // We're adding a WebAudio MediaStream.
- // Create a specific capturer for each WebAudio consumer.
- webaudio_source = CreateWebAudioSource(&source, &track_constraints);
- source_data =
- static_cast<MediaStreamSourceExtraData*>(source.extraData());
- } else {
- // TODO(perkj): Implement support for sources from
- // remote MediaStreams.
- NOTIMPLEMENTED();
- return NULL;
- }
- }
-
- std::string track_id = UTF16ToUTF8(track.id());
- scoped_refptr<WebRtcAudioCapturer> capturer;
- if (GetWebRtcAudioDevice())
- capturer = GetWebRtcAudioDevice()->GetDefaultCapturer();
-
- scoped_refptr<webrtc::AudioTrackInterface> audio_track(
- CreateLocalAudioTrack(track_id,
- capturer,
- webaudio_source.get(),
- source_data->local_audio_source(),
- &track_constraints));
- AddNativeTrackToBlinkTrack(audio_track.get(), track, true);
-
- audio_track->set_enabled(track.isEnabled());
-
- // Pass the pointer of the source provider to the blink audio track.
- blink::WebMediaStreamTrack writable_track = track;
- writable_track.setSourceProvider(static_cast<WebRtcLocalAudioTrack*>(
- audio_track.get())->audio_source_provider());
-
- return audio_track;
-}
-
-scoped_refptr<webrtc::VideoTrackInterface>
-MediaStreamDependencyFactory::CreateNativeVideoMediaStreamTrack(
- const blink::WebMediaStreamTrack& track) {
- blink::WebMediaStreamSource source = track.source();
- DCHECK_EQ(source.type(), blink::WebMediaStreamSource::TypeVideo);
- MediaStreamSourceExtraData* source_data =
- static_cast<MediaStreamSourceExtraData*>(source.extraData());
-
- if (!source_data) {
- // TODO(perkj): Implement support for sources from
- // remote MediaStreams.
- NOTIMPLEMENTED();
- return NULL;
- }
-
- std::string track_id = UTF16ToUTF8(track.id());
- scoped_refptr<webrtc::VideoTrackInterface> video_track(
- CreateLocalVideoTrack(track_id, source_data->video_source()));
- AddNativeTrackToBlinkTrack(video_track.get(), track, true);
-
- video_track->set_enabled(track.isEnabled());
-
- return video_track;
-}
-
-void MediaStreamDependencyFactory::CreateNativeMediaStreamTrack(
- const blink::WebMediaStreamTrack& track) {
- DCHECK(!track.isNull() && !track.extraData());
- DCHECK(!track.source().isNull());
-
- switch (track.source().type()) {
- case blink::WebMediaStreamSource::TypeAudio:
- CreateNativeAudioMediaStreamTrack(track);
- break;
- case blink::WebMediaStreamSource::TypeVideo:
- CreateNativeVideoMediaStreamTrack(track);
- break;
- }
-}
-
-bool MediaStreamDependencyFactory::AddNativeMediaStreamTrack(
- const blink::WebMediaStream& stream,
- const blink::WebMediaStreamTrack& track) {
- webrtc::MediaStreamInterface* native_stream = GetNativeMediaStream(stream);
- DCHECK(native_stream);
-
- switch (track.source().type()) {
- case blink::WebMediaStreamSource::TypeAudio: {
- scoped_refptr<webrtc::AudioTrackInterface> native_audio_track;
- if (!track.extraData()) {
- native_audio_track = CreateNativeAudioMediaStreamTrack(track);
- } else {
- native_audio_track = static_cast<webrtc::AudioTrackInterface*>(
- GetNativeMediaStreamTrack(track));
- }
-
- return native_audio_track.get() &&
- native_stream->AddTrack(native_audio_track);
- }
- case blink::WebMediaStreamSource::TypeVideo: {
- scoped_refptr<webrtc::VideoTrackInterface> native_video_track;
- if (!track.extraData()) {
- native_video_track = CreateNativeVideoMediaStreamTrack(track);
- } else {
- native_video_track = static_cast<webrtc::VideoTrackInterface*>(
- GetNativeMediaStreamTrack(track));
- }
-
- return native_video_track.get() &&
- native_stream->AddTrack(native_video_track);
- }
- }
- return false;
-}
-
-bool MediaStreamDependencyFactory::AddNativeVideoMediaTrack(
- const std::string& track_id,
- blink::WebMediaStream* stream,
- cricket::VideoCapturer* capturer) {
- if (!stream) {
- LOG(ERROR) << "AddNativeVideoMediaTrack called with null WebMediaStream.";
- return false;
- }
-
- // Create native track from the source.
- scoped_refptr<webrtc::VideoTrackInterface> native_track =
- CreateLocalVideoTrack(track_id, capturer);
-
- // Add the native track to native stream
- webrtc::MediaStreamInterface* native_stream =
- GetNativeMediaStream(*stream);
- DCHECK(native_stream);
- native_stream->AddTrack(native_track.get());
-
- // Create a new webkit video track.
- blink::WebMediaStreamTrack webkit_track;
- blink::WebMediaStreamSource webkit_source;
- blink::WebString webkit_track_id(UTF8ToUTF16(track_id));
- blink::WebMediaStreamSource::Type type =
- blink::WebMediaStreamSource::TypeVideo;
- webkit_source.initialize(webkit_track_id, type, webkit_track_id);
-
- webkit_track.initialize(webkit_track_id, webkit_source);
- AddNativeTrackToBlinkTrack(native_track.get(), webkit_track, true);
-
- // Add the track to WebMediaStream.
- stream->addTrack(webkit_track);
- return true;
-}
-
-bool MediaStreamDependencyFactory::RemoveNativeMediaStreamTrack(
- const blink::WebMediaStream& stream,
- const blink::WebMediaStreamTrack& track) {
- MediaStreamExtraData* extra_data =
- static_cast<MediaStreamExtraData*>(stream.extraData());
- webrtc::MediaStreamInterface* native_stream = extra_data->stream().get();
- DCHECK(native_stream);
- std::string track_id = UTF16ToUTF8(track.id());
- switch (track.source().type()) {
- case blink::WebMediaStreamSource::TypeAudio:
- return native_stream->RemoveTrack(
- native_stream->FindAudioTrack(track_id));
- case blink::WebMediaStreamSource::TypeVideo:
- return native_stream->RemoveTrack(
- native_stream->FindVideoTrack(track_id));
- }
- return false;
-}
-
-bool MediaStreamDependencyFactory::CreatePeerConnectionFactory() {
- DCHECK(!pc_factory_.get());
- DCHECK(!audio_device_.get());
- DVLOG(1) << "MediaStreamDependencyFactory::CreatePeerConnectionFactory()";
-
- scoped_ptr<cricket::WebRtcVideoDecoderFactory> decoder_factory;
- scoped_ptr<cricket::WebRtcVideoEncoderFactory> encoder_factory;
-
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- scoped_refptr<RendererGpuVideoAcceleratorFactories> gpu_factories =
- RenderThreadImpl::current()->GetGpuFactories();
-#if !defined(GOOGLE_TV)
- if (!cmd_line->HasSwitch(switches::kDisableWebRtcHWDecoding)) {
- if (gpu_factories)
- decoder_factory.reset(new RTCVideoDecoderFactory(gpu_factories));
- }
-#else
- // PeerConnectionFactory will hold the ownership of this
- // VideoDecoderFactory.
- decoder_factory.reset(decoder_factory_tv_ = new RTCVideoDecoderFactoryTv());
-#endif
-
- if (!cmd_line->HasSwitch(switches::kDisableWebRtcHWEncoding)) {
- if (gpu_factories)
- encoder_factory.reset(new RTCVideoEncoderFactory(gpu_factories));
- }
-
-#if defined(OS_ANDROID)
- if (!media::MediaCodecBridge::IsAvailable() ||
- !media::MediaCodecBridge::SupportsSetParameters()) {
- encoder_factory.reset();
- }
-#endif
-
- scoped_refptr<WebRtcAudioDeviceImpl> audio_device(
- new WebRtcAudioDeviceImpl());
-
- scoped_refptr<webrtc::PeerConnectionFactoryInterface> factory(
- webrtc::CreatePeerConnectionFactory(worker_thread_,
- signaling_thread_,
- audio_device.get(),
- encoder_factory.release(),
- decoder_factory.release()));
- if (!factory.get()) {
- return false;
- }
-
- audio_device_ = audio_device;
- pc_factory_ = factory;
- webrtc::PeerConnectionFactoryInterface::Options factory_options;
- factory_options.enable_aec_dump =
- cmd_line->HasSwitch(switches::kEnableWebRtcAecRecordings);
- factory_options.disable_sctp_data_channels =
- cmd_line->HasSwitch(switches::kDisableSCTPDataChannels);
- factory_options.disable_encryption =
- cmd_line->HasSwitch(switches::kDisableWebRtcEncryption);
- pc_factory_->SetOptions(factory_options);
- return true;
-}
-
-bool MediaStreamDependencyFactory::PeerConnectionFactoryCreated() {
- return pc_factory_.get() != NULL;
-}
-
-scoped_refptr<webrtc::PeerConnectionInterface>
-MediaStreamDependencyFactory::CreatePeerConnection(
- const webrtc::PeerConnectionInterface::IceServers& ice_servers,
- const webrtc::MediaConstraintsInterface* constraints,
- blink::WebFrame* web_frame,
- webrtc::PeerConnectionObserver* observer) {
- CHECK(web_frame);
- CHECK(observer);
-
- scoped_refptr<P2PPortAllocatorFactory> pa_factory =
- new talk_base::RefCountedObject<P2PPortAllocatorFactory>(
- p2p_socket_dispatcher_.get(),
- network_manager_,
- socket_factory_.get(),
- web_frame);
-
- PeerConnectionIdentityService* identity_service =
- new PeerConnectionIdentityService(
- GURL(web_frame->document().url().spec()).GetOrigin());
-
- return pc_factory_->CreatePeerConnection(ice_servers,
- constraints,
- pa_factory.get(),
- identity_service,
- observer).get();
-}
-
-scoped_refptr<webrtc::MediaStreamInterface>
-MediaStreamDependencyFactory::CreateLocalMediaStream(
- const std::string& label) {
- return pc_factory_->CreateLocalMediaStream(label).get();
-}
-
-scoped_refptr<webrtc::AudioSourceInterface>
-MediaStreamDependencyFactory::CreateLocalAudioSource(
- const webrtc::MediaConstraintsInterface* constraints) {
- scoped_refptr<webrtc::AudioSourceInterface> source =
- pc_factory_->CreateAudioSource(constraints).get();
- return source;
-}
-
-scoped_refptr<webrtc::VideoSourceInterface>
-MediaStreamDependencyFactory::CreateLocalVideoSource(
- int video_session_id,
- bool is_screencast,
- const webrtc::MediaConstraintsInterface* constraints) {
- RtcVideoCapturer* capturer = new RtcVideoCapturer(
- video_session_id, vc_manager_.get(), is_screencast);
-
- // The video source takes ownership of |capturer|.
- scoped_refptr<webrtc::VideoSourceInterface> source =
- pc_factory_->CreateVideoSource(capturer, constraints).get();
- return source;
-}
-
-scoped_refptr<WebAudioCapturerSource>
-MediaStreamDependencyFactory::CreateWebAudioSource(
- blink::WebMediaStreamSource* source,
- RTCMediaConstraints* constraints) {
- DVLOG(1) << "MediaStreamDependencyFactory::CreateWebAudioSource()";
- DCHECK(GetWebRtcAudioDevice());
-
- scoped_refptr<WebAudioCapturerSource>
- webaudio_capturer_source(new WebAudioCapturerSource());
- MediaStreamSourceExtraData* source_data = new MediaStreamSourceExtraData();
-
- // Create a LocalAudioSource object which holds audio options.
- // SetLocalAudioSource() affects core audio parts in third_party/Libjingle.
- source_data->SetLocalAudioSource(CreateLocalAudioSource(constraints).get());
- source->setExtraData(source_data);
-
- // Replace the default source with WebAudio as source instead.
- source->addAudioConsumer(webaudio_capturer_source.get());
-
- return webaudio_capturer_source;
-}
-
-scoped_refptr<webrtc::VideoTrackInterface>
-MediaStreamDependencyFactory::CreateLocalVideoTrack(
- const std::string& id,
- webrtc::VideoSourceInterface* source) {
- return pc_factory_->CreateVideoTrack(id, source).get();
-}
-
-scoped_refptr<webrtc::VideoTrackInterface>
-MediaStreamDependencyFactory::CreateLocalVideoTrack(
- const std::string& id, cricket::VideoCapturer* capturer) {
- if (!capturer) {
- LOG(ERROR) << "CreateLocalVideoTrack called with null VideoCapturer.";
- return NULL;
- }
-
- // Create video source from the |capturer|.
- scoped_refptr<webrtc::VideoSourceInterface> source =
- pc_factory_->CreateVideoSource(capturer, NULL).get();
-
- // Create native track from the source.
- return pc_factory_->CreateVideoTrack(id, source.get()).get();
-}
-
-scoped_refptr<webrtc::AudioTrackInterface>
-MediaStreamDependencyFactory::CreateLocalAudioTrack(
- const std::string& id,
- const scoped_refptr<WebRtcAudioCapturer>& capturer,
- WebAudioCapturerSource* webaudio_source,
- webrtc::AudioSourceInterface* source,
- const webrtc::MediaConstraintsInterface* constraints) {
- // TODO(xians): Merge |source| to the capturer(). We can't do this today
- // because only one capturer() is supported while one |source| is created
- // for each audio track.
- scoped_refptr<WebRtcLocalAudioTrack> audio_track(
- WebRtcLocalAudioTrack::Create(id, capturer, webaudio_source,
- source, constraints));
-
- // Add the WebRtcAudioDevice as the sink to the local audio track.
- audio_track->AddSink(GetWebRtcAudioDevice());
- // Start the audio track. This will hook the |audio_track| to the capturer
- // as the sink of the audio, and only start the source of the capturer if
- // it is the first audio track connecting to the capturer.
- audio_track->Start();
- return audio_track;
-}
-
-webrtc::SessionDescriptionInterface*
-MediaStreamDependencyFactory::CreateSessionDescription(
- const std::string& type,
- const std::string& sdp,
- webrtc::SdpParseError* error) {
- return webrtc::CreateSessionDescription(type, sdp, error);
-}
-
-webrtc::IceCandidateInterface* MediaStreamDependencyFactory::CreateIceCandidate(
- const std::string& sdp_mid,
- int sdp_mline_index,
- const std::string& sdp) {
- return webrtc::CreateIceCandidate(sdp_mid, sdp_mline_index, sdp);
-}
-
-WebRtcAudioDeviceImpl*
-MediaStreamDependencyFactory::GetWebRtcAudioDevice() {
- return audio_device_.get();
-}
-
-void MediaStreamDependencyFactory::InitializeWorkerThread(
- talk_base::Thread** thread,
- base::WaitableEvent* event) {
- jingle_glue::JingleThreadWrapper::EnsureForCurrentMessageLoop();
- jingle_glue::JingleThreadWrapper::current()->set_send_allowed(true);
- *thread = jingle_glue::JingleThreadWrapper::current();
- event->Signal();
-}
-
-void MediaStreamDependencyFactory::CreateIpcNetworkManagerOnWorkerThread(
- base::WaitableEvent* event) {
- DCHECK_EQ(base::MessageLoop::current(), chrome_worker_thread_.message_loop());
- network_manager_ = new IpcNetworkManager(p2p_socket_dispatcher_.get());
- event->Signal();
-}
-
-void MediaStreamDependencyFactory::DeleteIpcNetworkManager() {
- DCHECK_EQ(base::MessageLoop::current(), chrome_worker_thread_.message_loop());
- delete network_manager_;
- network_manager_ = NULL;
-}
-
-bool MediaStreamDependencyFactory::EnsurePeerConnectionFactory() {
- DCHECK(CalledOnValidThread());
- if (PeerConnectionFactoryCreated())
- return true;
-
- if (!signaling_thread_) {
- jingle_glue::JingleThreadWrapper::EnsureForCurrentMessageLoop();
- jingle_glue::JingleThreadWrapper::current()->set_send_allowed(true);
- signaling_thread_ = jingle_glue::JingleThreadWrapper::current();
- CHECK(signaling_thread_);
- }
-
- if (!worker_thread_) {
- if (!chrome_worker_thread_.IsRunning()) {
- if (!chrome_worker_thread_.Start()) {
- LOG(ERROR) << "Could not start worker thread";
- signaling_thread_ = NULL;
- return false;
- }
- }
- base::WaitableEvent event(true, false);
- chrome_worker_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
- &MediaStreamDependencyFactory::InitializeWorkerThread,
- base::Unretained(this),
- &worker_thread_,
- &event));
- event.Wait();
- DCHECK(worker_thread_);
- }
-
- if (!network_manager_) {
- base::WaitableEvent event(true, false);
- chrome_worker_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
- &MediaStreamDependencyFactory::CreateIpcNetworkManagerOnWorkerThread,
- base::Unretained(this),
- &event));
- event.Wait();
- }
-
- if (!socket_factory_) {
- socket_factory_.reset(
- new IpcPacketSocketFactory(p2p_socket_dispatcher_.get()));
- }
-
- // Init SSL, which will be needed by PeerConnection.
-#if defined(USE_OPENSSL)
- if (!talk_base::InitializeSSL()) {
- LOG(ERROR) << "Failed on InitializeSSL.";
- return false;
- }
-#else
- // TODO(ronghuawu): Replace this call with InitializeSSL.
- net::EnsureNSSSSLInit();
-#endif
-
- if (!CreatePeerConnectionFactory()) {
- LOG(ERROR) << "Could not create PeerConnection factory";
- return false;
- }
- return true;
-}
-
-void MediaStreamDependencyFactory::CleanupPeerConnectionFactory() {
- pc_factory_ = NULL;
- if (network_manager_) {
- // The network manager needs to free its resources on the thread they were
- // created, which is the worked thread.
- if (chrome_worker_thread_.IsRunning()) {
- chrome_worker_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
- &MediaStreamDependencyFactory::DeleteIpcNetworkManager,
- base::Unretained(this)));
- // Stopping the thread will wait until all tasks have been
- // processed before returning. We wait for the above task to finish before
- // letting the the function continue to avoid any potential race issues.
- chrome_worker_thread_.Stop();
- } else {
- NOTREACHED() << "Worker thread not running.";
- }
- }
-}
-
-scoped_refptr<WebRtcAudioCapturer>
-MediaStreamDependencyFactory::MaybeCreateAudioCapturer(
- int render_view_id,
- const StreamDeviceInfo& device_info) {
- // TODO(xians): Handle the cases when gUM is called without a proper render
- // view, for example, by an extension.
- DCHECK_GE(render_view_id, 0);
-
- scoped_refptr<WebRtcAudioCapturer> capturer =
- GetWebRtcAudioDevice()->GetDefaultCapturer();
-
- // If the default capturer does not exist or |render_view_id| == -1, create
- // a new capturer.
- bool is_new_capturer = false;
- if (!capturer.get()) {
- capturer = WebRtcAudioCapturer::CreateCapturer();
- is_new_capturer = true;
- }
-
- if (!capturer->Initialize(
- render_view_id,
- static_cast<media::ChannelLayout>(
- device_info.device.input.channel_layout),
- device_info.device.input.sample_rate,
- device_info.device.input.frames_per_buffer,
- device_info.session_id,
- device_info.device.id,
- device_info.device.matched_output.sample_rate,
- device_info.device.matched_output.frames_per_buffer,
- device_info.device.input.effects)) {
- return NULL;
- }
-
- // Add the capturer to the WebRtcAudioDeviceImpl if it is a new capturer.
- if (is_new_capturer)
- GetWebRtcAudioDevice()->AddAudioCapturer(capturer);
-
- return capturer;
-}
-
-void MediaStreamDependencyFactory::AddNativeTrackToBlinkTrack(
- webrtc::MediaStreamTrackInterface* native_track,
- const blink::WebMediaStreamTrack& webkit_track,
- bool is_local_track) {
- DCHECK(!webkit_track.isNull() && !webkit_track.extraData());
- blink::WebMediaStreamTrack track = webkit_track;
-
- if (track.source().type() == blink::WebMediaStreamSource::TypeVideo) {
- track.setExtraData(new MediaStreamVideoTrack(
- static_cast<webrtc::VideoTrackInterface*>(native_track),
- is_local_track));
- } else {
- track.setExtraData(new MediaStreamTrackExtraData(native_track,
- is_local_track));
- }
-}
-
-webrtc::MediaStreamInterface*
-MediaStreamDependencyFactory::GetNativeMediaStream(
- const blink::WebMediaStream& stream) {
- if (stream.isNull())
- return NULL;
- MediaStreamExtraData* extra_data =
- static_cast<MediaStreamExtraData*>(stream.extraData());
- return extra_data ? extra_data->stream().get() : NULL;
-}
-
-webrtc::MediaStreamTrackInterface*
-MediaStreamDependencyFactory::GetNativeMediaStreamTrack(
- const blink::WebMediaStreamTrack& track) {
- if (track.isNull())
- return NULL;
- MediaStreamTrackExtraData* extra_data =
- static_cast<MediaStreamTrackExtraData*>(track.extraData());
- return extra_data ? extra_data->track().get() : NULL;
-}
-
-} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_dependency_factory.h b/chromium/content/renderer/media/media_stream_dependency_factory.h
deleted file mode 100644
index d0759708b8f..00000000000
--- a/chromium/content/renderer/media/media_stream_dependency_factory.h
+++ /dev/null
@@ -1,261 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_MEDIA_STREAM_DEPENDENCY_FACTORY_H_
-#define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_DEPENDENCY_FACTORY_H_
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/memory/ref_counted.h"
-#include "base/threading/thread.h"
-#include "content/common/content_export.h"
-#include "content/renderer/media/media_stream_extra_data.h"
-#include "content/renderer/p2p/socket_dispatcher.h"
-#include "third_party/libjingle/source/talk/app/webrtc/peerconnectioninterface.h"
-#include "third_party/libjingle/source/talk/app/webrtc/videosourceinterface.h"
-
-namespace base {
-class WaitableEvent;
-}
-
-namespace talk_base {
-class NetworkManager;
-class PacketSocketFactory;
-class Thread;
-}
-
-namespace webrtc {
-class PeerConnection;
-}
-
-namespace blink {
-class WebFrame;
-class WebMediaConstraints;
-class WebMediaStream;
-class WebRTCPeerConnectionHandler;
-class WebRTCPeerConnectionHandlerClient;
-}
-
-namespace content {
-
-class IpcNetworkManager;
-class IpcPacketSocketFactory;
-class RTCMediaConstraints;
-class VideoCaptureImplManager;
-class WebAudioCapturerSource;
-class WebRtcAudioCapturer;
-class WebRtcAudioDeviceImpl;
-class WebRtcLoggingHandlerImpl;
-class WebRtcLoggingMessageFilter;
-struct StreamDeviceInfo;
-
-#if defined(GOOGLE_TV)
-class RTCVideoDecoderFactoryTv;
-#endif
-
-// Object factory for RTC MediaStreams and RTC PeerConnections.
-class CONTENT_EXPORT MediaStreamDependencyFactory
- : NON_EXPORTED_BASE(public base::NonThreadSafe) {
- public:
- // MediaSourcesCreatedCallback is used in CreateNativeMediaSources.
- typedef base::Callback<void(blink::WebMediaStream* web_stream,
- bool live)> MediaSourcesCreatedCallback;
- MediaStreamDependencyFactory(
- VideoCaptureImplManager* vc_manager,
- P2PSocketDispatcher* p2p_socket_dispatcher);
- virtual ~MediaStreamDependencyFactory();
-
- // Create a RTCPeerConnectionHandler object that implements the
- // WebKit WebRTCPeerConnectionHandler interface.
- blink::WebRTCPeerConnectionHandler* CreateRTCPeerConnectionHandler(
- blink::WebRTCPeerConnectionHandlerClient* client);
-
- // CreateNativeMediaSources creates libjingle representations of
- // the underlying sources to the tracks in |web_stream|.
- // |sources_created| is invoked when the sources have either been created and
- // transitioned to a live state or failed.
- // The libjingle sources is stored in the extra data field of
- // WebMediaStreamSource.
- // |audio_constraints| and |video_constraints| set parameters for the sources.
- void CreateNativeMediaSources(
- int render_view_id,
- const blink::WebMediaConstraints& audio_constraints,
- const blink::WebMediaConstraints& video_constraints,
- blink::WebMediaStream* web_stream,
- const MediaSourcesCreatedCallback& sources_created);
-
- // Creates a libjingle representation of a MediaStream and stores
- // it in the extra data field of |web_stream|.
- void CreateNativeLocalMediaStream(
- blink::WebMediaStream* web_stream);
-
- // Creates a libjingle representation of a MediaStream and stores
- // it in the extra data field of |web_stream|.
- // |stream_stopped| is a callback that is run when a MediaStream have been
- // stopped.
- void CreateNativeLocalMediaStream(
- blink::WebMediaStream* web_stream,
- const MediaStreamExtraData::StreamStopCallback& stream_stop);
-
- // Creates a libjingle representation of a MediaStreamTrack and stores
- // it in the extra data field of |track|.
- void CreateNativeMediaStreamTrack(const blink::WebMediaStreamTrack& track);
-
- // Adds a libjingle representation of a MediaStreamTrack to |stream| based
- // on the source of |track|.
- bool AddNativeMediaStreamTrack(const blink::WebMediaStream& stream,
- const blink::WebMediaStreamTrack& track);
-
- // Creates and adds libjingle representation of a MediaStreamTrack to |stream|
- // based on the desired |track_id| and |capturer|.
- bool AddNativeVideoMediaTrack(const std::string& track_id,
- blink::WebMediaStream* stream,
- cricket::VideoCapturer* capturer);
-
- bool RemoveNativeMediaStreamTrack(const blink::WebMediaStream& stream,
- const blink::WebMediaStreamTrack& track);
-
- // Asks the libjingle PeerConnection factory to create a libjingle
- // PeerConnection object.
- // The PeerConnection object is owned by PeerConnectionHandler.
- virtual scoped_refptr<webrtc::PeerConnectionInterface>
- CreatePeerConnection(
- const webrtc::PeerConnectionInterface::IceServers& ice_servers,
- const webrtc::MediaConstraintsInterface* constraints,
- blink::WebFrame* web_frame,
- webrtc::PeerConnectionObserver* observer);
-
- // Creates a libjingle representation of a Session description. Used by a
- // RTCPeerConnectionHandler instance.
- virtual webrtc::SessionDescriptionInterface* CreateSessionDescription(
- const std::string& type,
- const std::string& sdp,
- webrtc::SdpParseError* error);
-
- // Creates a libjingle representation of an ice candidate.
- virtual webrtc::IceCandidateInterface* CreateIceCandidate(
- const std::string& sdp_mid,
- int sdp_mline_index,
- const std::string& sdp);
-
- WebRtcAudioDeviceImpl* GetWebRtcAudioDevice();
-
-#if defined(GOOGLE_TV)
- RTCVideoDecoderFactoryTv* decoder_factory_tv() { return decoder_factory_tv_; }
-#endif
-
- static void AddNativeTrackToBlinkTrack(
- webrtc::MediaStreamTrackInterface* native_track,
- const blink::WebMediaStreamTrack& webkit_track,
- bool is_local_track);
-
- static webrtc::MediaStreamInterface* GetNativeMediaStream(
- const blink::WebMediaStream& stream);
-
- static webrtc::MediaStreamTrackInterface* GetNativeMediaStreamTrack(
- const blink::WebMediaStreamTrack& track);
-
- protected:
- // Asks the PeerConnection factory to create a Local MediaStream object.
- virtual scoped_refptr<webrtc::MediaStreamInterface>
- CreateLocalMediaStream(const std::string& label);
-
- // Asks the PeerConnection factory to create a Local Audio Source.
- virtual scoped_refptr<webrtc::AudioSourceInterface>
- CreateLocalAudioSource(
- const webrtc::MediaConstraintsInterface* constraints);
-
- // Asks the PeerConnection factory to create a Local Video Source.
- virtual scoped_refptr<webrtc::VideoSourceInterface>
- CreateLocalVideoSource(
- int video_session_id,
- bool is_screen_cast,
- const webrtc::MediaConstraintsInterface* constraints);
-
- // Creates a media::AudioCapturerSource with an implementation that is
- // specific for a WebAudio source. The created WebAudioCapturerSource
- // instance will function as audio source instead of the default
- // WebRtcAudioCapturer.
- // The |constraints| will be modified to include the default, mandatory
- // WebAudio constraints.
- virtual scoped_refptr<WebAudioCapturerSource> CreateWebAudioSource(
- blink::WebMediaStreamSource* source, RTCMediaConstraints* constraints);
-
- // Asks the PeerConnection factory to create a Local AudioTrack object.
- virtual scoped_refptr<webrtc::AudioTrackInterface>
- CreateLocalAudioTrack(
- const std::string& id,
- const scoped_refptr<WebRtcAudioCapturer>& capturer,
- WebAudioCapturerSource* webaudio_source,
- webrtc::AudioSourceInterface* source,
- const webrtc::MediaConstraintsInterface* constraints);
-
- // Asks the PeerConnection factory to create a Local VideoTrack object.
- virtual scoped_refptr<webrtc::VideoTrackInterface>
- CreateLocalVideoTrack(const std::string& id,
- webrtc::VideoSourceInterface* source);
-
- // Asks the PeerConnection factory to create a Local VideoTrack object with
- // the video source using |capturer|.
- virtual scoped_refptr<webrtc::VideoTrackInterface>
- CreateLocalVideoTrack(const std::string& id,
- cricket::VideoCapturer* capturer);
-
- virtual bool EnsurePeerConnectionFactory();
- virtual bool PeerConnectionFactoryCreated();
-
- // Returns a new capturer or existing capturer based on the |render_view_id|
- // and |device_info|. When the |render_view_id| and |device_info| are valid,
- // it reuses existing capture if any; otherwise it creates a new capturer.
- virtual scoped_refptr<WebRtcAudioCapturer> MaybeCreateAudioCapturer(
- int render_view_id, const StreamDeviceInfo& device_info);
-
- private:
- // Creates and deletes |pc_factory_|, which in turn is used for
- // creating PeerConnection objects.
- bool CreatePeerConnectionFactory();
-
- void InitializeWorkerThread(talk_base::Thread** thread,
- base::WaitableEvent* event);
-
- void CreateIpcNetworkManagerOnWorkerThread(base::WaitableEvent* event);
- void DeleteIpcNetworkManager();
- void CleanupPeerConnectionFactory();
-
- scoped_refptr<webrtc::AudioTrackInterface>
- CreateNativeAudioMediaStreamTrack(const blink::WebMediaStreamTrack& track);
-
- scoped_refptr<webrtc::VideoTrackInterface>
- CreateNativeVideoMediaStreamTrack(const blink::WebMediaStreamTrack& track);
-
- // We own network_manager_, must be deleted on the worker thread.
- // The network manager uses |p2p_socket_dispatcher_|.
- IpcNetworkManager* network_manager_;
- scoped_ptr<IpcPacketSocketFactory> socket_factory_;
-
- scoped_refptr<webrtc::PeerConnectionFactoryInterface> pc_factory_;
-
-#if defined(GOOGLE_TV)
- // |pc_factory_| will hold the ownership of this object, and |pc_factory_|
- // outlives this object. Thus weak pointer is sufficient.
- RTCVideoDecoderFactoryTv* decoder_factory_tv_;
-#endif
-
- scoped_refptr<VideoCaptureImplManager> vc_manager_;
- scoped_refptr<P2PSocketDispatcher> p2p_socket_dispatcher_;
- scoped_refptr<WebRtcAudioDeviceImpl> audio_device_;
-
- // PeerConnection threads. signaling_thread_ is created from the
- // "current" chrome thread.
- talk_base::Thread* signaling_thread_;
- talk_base::Thread* worker_thread_;
- base::Thread chrome_worker_thread_;
-
- DISALLOW_COPY_AND_ASSIGN(MediaStreamDependencyFactory);
-};
-
-} // namespace content
-
-#endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_DEPENDENCY_FACTORY_H_
diff --git a/chromium/content/renderer/media/media_stream_dependency_factory_unittest.cc b/chromium/content/renderer/media/media_stream_dependency_factory_unittest.cc
deleted file mode 100644
index 352fa6ea0ee..00000000000
--- a/chromium/content/renderer/media/media_stream_dependency_factory_unittest.cc
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/scoped_ptr.h"
-#include "content/common/media/media_stream_options.h"
-#include "content/renderer/media/media_stream_extra_data.h"
-#include "content/renderer/media/media_stream_source_extra_data.h"
-#include "content/renderer/media/mock_media_stream_dependency_factory.h"
-#include "content/renderer/media/mock_web_rtc_peer_connection_handler_client.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
-#include "third_party/WebKit/public/platform/WebMediaStream.h"
-#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
-#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
-#include "third_party/WebKit/public/platform/WebRTCPeerConnectionHandler.h"
-#include "third_party/WebKit/public/platform/WebVector.h"
-#include "third_party/libjingle/source/talk/app/webrtc/videosourceinterface.h"
-
-namespace content {
-
-class MediaSourceCreatedObserver {
- public:
- MediaSourceCreatedObserver()
- : result_(false),
- description_(NULL) {
- }
-
- void OnCreateNativeSourcesComplete(
- blink::WebMediaStream* description,
- bool request_succeeded) {
- result_ = request_succeeded;
- description_ = description;
- }
-
- blink::WebMediaStream* description() const {
- return description_;
- }
- bool result() const { return result_; }
-
- private:
- bool result_;
- blink::WebMediaStream* description_;
-};
-
-class MediaStreamDependencyFactoryTest : public ::testing::Test {
- public:
- virtual void SetUp() {
- dependency_factory_.reset(new MockMediaStreamDependencyFactory());
- }
-
- blink::WebMediaStream CreateWebKitMediaStream(bool audio, bool video) {
- blink::WebVector<blink::WebMediaStreamSource> audio_sources(
- audio ? static_cast<size_t>(1) : 0);
- blink::WebVector<blink::WebMediaStreamSource> video_sources(
- video ? static_cast<size_t>(1) : 0);
- MediaStreamSourceExtraData::SourceStopCallback dummy_callback;
-
- if (audio) {
- StreamDeviceInfo info;
- info.device.type = content::MEDIA_DEVICE_AUDIO_CAPTURE;
- info.device.name = "audio";
- info.session_id = 99;
- audio_sources[0].initialize("audio",
- blink::WebMediaStreamSource::TypeAudio,
- "audio");
- audio_sources[0].setExtraData(
- new MediaStreamSourceExtraData(info, dummy_callback));
- audio_sources_.assign(audio_sources);
- }
- if (video) {
- StreamDeviceInfo info;
- info.device.type = content::MEDIA_DEVICE_VIDEO_CAPTURE;
- info.device.name = "video";
- info.session_id = 98;
- video_sources[0].initialize("video",
- blink::WebMediaStreamSource::TypeVideo,
- "video");
- video_sources[0].setExtraData(
- new MediaStreamSourceExtraData(info, dummy_callback));
- video_sources_.assign(video_sources);
- }
- blink::WebMediaStream stream_desc;
- blink::WebVector<blink::WebMediaStreamTrack> audio_track_vector(
- audio_sources.size());
- for (size_t i = 0; i < audio_track_vector.size(); ++i) {
- audio_track_vector[i].initialize(audio_sources[i].id(),
- audio_sources[i]);
- }
-
- blink::WebVector<blink::WebMediaStreamTrack> video_track_vector(
- video_sources.size());
- for (size_t i = 0; i < video_track_vector.size(); ++i) {
- video_track_vector[i].initialize(video_sources[i].id(),
- video_sources[i]);
- }
-
- stream_desc.initialize("media stream", audio_track_vector,
- video_track_vector);
- return stream_desc;
- }
-
- void CreateNativeSources(blink::WebMediaStream* descriptor) {
- static const int kRenderViewId = 1;
-
- MediaSourceCreatedObserver observer;
- blink::WebMediaConstraints audio_constraints;
- dependency_factory_->CreateNativeMediaSources(
- kRenderViewId,
- blink::WebMediaConstraints(),
- blink::WebMediaConstraints(),
- descriptor,
- base::Bind(
- &MediaSourceCreatedObserver::OnCreateNativeSourcesComplete,
- base::Unretained(&observer)));
-
- EXPECT_FALSE(observer.result());
- // Change the state of the created source to live. This should trigger
- // MediaSourceCreatedObserver::OnCreateNativeSourcesComplete
- if (dependency_factory_->last_video_source()) {
- dependency_factory_->last_audio_source()->SetLive();
- dependency_factory_->last_video_source()->SetLive();
- }
- EXPECT_TRUE(observer.result());
- EXPECT_TRUE(observer.description() == descriptor);
- }
-
- void VerifyMediaStream(const blink::WebMediaStream& stream_desc,
- size_t num_audio_tracks,
- size_t num_video_tracks) {
- content::MediaStreamExtraData* extra_data =
- static_cast<content::MediaStreamExtraData*>(stream_desc.extraData());
- ASSERT_TRUE(extra_data && extra_data->stream().get());
- EXPECT_TRUE(extra_data->is_local());
- EXPECT_EQ(num_audio_tracks, extra_data->stream()->GetAudioTracks().size());
- EXPECT_EQ(num_video_tracks, extra_data->stream()->GetVideoTracks().size());
- }
-
- protected:
- scoped_ptr<MockMediaStreamDependencyFactory> dependency_factory_;
- blink::WebVector<blink::WebMediaStreamSource> audio_sources_;
- blink::WebVector<blink::WebMediaStreamSource> video_sources_;
-};
-
-TEST_F(MediaStreamDependencyFactoryTest, CreateRTCPeerConnectionHandler) {
- MockWebRTCPeerConnectionHandlerClient client_jsep;
- scoped_ptr<blink::WebRTCPeerConnectionHandler> pc_handler(
- dependency_factory_->CreateRTCPeerConnectionHandler(&client_jsep));
- EXPECT_TRUE(pc_handler.get() != NULL);
-}
-
-TEST_F(MediaStreamDependencyFactoryTest, CreateNativeMediaStream) {
- blink::WebMediaStream stream_desc = CreateWebKitMediaStream(true, true);
- CreateNativeSources(&stream_desc);
-
- dependency_factory_->CreateNativeLocalMediaStream(&stream_desc);
- VerifyMediaStream(stream_desc, 1, 1);
-}
-
-// Test that we don't crash if a MediaStream is created in WebKit with unknown
-// sources. This can for example happen if a MediaStream is created with
-// remote tracks.
-TEST_F(MediaStreamDependencyFactoryTest, CreateNativeMediaStreamWithoutSource) {
- // Create a WebKit MediaStream description.
- blink::WebMediaStreamSource audio_source;
- audio_source.initialize("audio source",
- blink::WebMediaStreamSource::TypeAudio,
- "something");
- blink::WebMediaStreamSource video_source;
- video_source.initialize("video source",
- blink::WebMediaStreamSource::TypeVideo,
- "something");
-
- blink::WebVector<blink::WebMediaStreamTrack> audio_tracks(
- static_cast<size_t>(1));
- audio_tracks[0].initialize(audio_source.id(), audio_source);
- blink::WebVector<blink::WebMediaStreamTrack> video_tracks(
- static_cast<size_t>(1));
- video_tracks[0].initialize(video_source.id(), video_source);
-
- blink::WebMediaStream stream_desc;
- stream_desc.initialize("new stream", audio_tracks, video_tracks);
-
- EXPECT_TRUE(dependency_factory_->EnsurePeerConnectionFactory());
- dependency_factory_->CreateNativeLocalMediaStream(&stream_desc);
- VerifyMediaStream(stream_desc, 0, 0);
-}
-
-TEST_F(MediaStreamDependencyFactoryTest, AddAndRemoveNativeTrack) {
- blink::WebMediaStream stream_desc = CreateWebKitMediaStream(true, true);
- CreateNativeSources(&stream_desc);
-
- dependency_factory_->CreateNativeLocalMediaStream(&stream_desc);
- VerifyMediaStream(stream_desc, 1, 1);
-
- blink::WebVector<blink::WebMediaStreamTrack> audio_tracks;
- stream_desc.audioTracks(audio_tracks);
- EXPECT_TRUE(dependency_factory_->RemoveNativeMediaStreamTrack(
- stream_desc, audio_tracks[0]));
- VerifyMediaStream(stream_desc, 0, 1);
-
- EXPECT_TRUE(dependency_factory_->AddNativeMediaStreamTrack(
- stream_desc, audio_tracks[0]));
- VerifyMediaStream(stream_desc, 1, 1);
-
- blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
- stream_desc.videoTracks(video_tracks);
- EXPECT_TRUE(dependency_factory_->RemoveNativeMediaStreamTrack(
- stream_desc, video_tracks[0]));
- VerifyMediaStream(stream_desc, 1, 0);
-
- EXPECT_TRUE(dependency_factory_->AddNativeMediaStreamTrack(
- stream_desc, video_tracks[0]));
- VerifyMediaStream(stream_desc, 1, 1);
-}
-
-} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_dispatcher.cc b/chromium/content/renderer/media/media_stream_dispatcher.cc
index 6890ffc5d6f..2f93d53de9a 100644
--- a/chromium/content/renderer/media/media_stream_dispatcher.cc
+++ b/chromium/content/renderer/media/media_stream_dispatcher.cc
@@ -10,6 +10,7 @@
#include "content/renderer/media/media_stream_dispatcher_eventhandler.h"
#include "content/renderer/render_thread_impl.h"
#include "content/renderer/render_view_impl.h"
+#include "third_party/WebKit/public/web/WebUserGestureIndicator.h"
#include "url/gurl.h"
namespace content {
@@ -78,10 +79,9 @@ void MediaStreamDispatcher::GenerateStream(
DVLOG(1) << "MediaStreamDispatcher::GenerateStream(" << request_id << ")";
requests_.push_back(Request(event_handler, request_id, next_ipc_id_));
- Send(new MediaStreamHostMsg_GenerateStream(routing_id(),
- next_ipc_id_++,
- components,
- security_origin));
+ Send(new MediaStreamHostMsg_GenerateStream(
+ routing_id(), next_ipc_id_++, components, security_origin,
+ blink::WebUserGestureIndicator::isProcessingUserGesture()));
}
void MediaStreamDispatcher::CancelGenerateStream(
@@ -134,10 +134,12 @@ void MediaStreamDispatcher::EnumerateDevices(
int request_id,
const base::WeakPtr<MediaStreamDispatcherEventHandler>& event_handler,
MediaStreamType type,
- const GURL& security_origin) {
+ const GURL& security_origin,
+ bool hide_labels_if_no_access) {
DCHECK(main_loop_->BelongsToCurrentThread());
DCHECK(type == MEDIA_DEVICE_AUDIO_CAPTURE ||
- type == MEDIA_DEVICE_VIDEO_CAPTURE);
+ type == MEDIA_DEVICE_VIDEO_CAPTURE ||
+ type == MEDIA_DEVICE_AUDIO_OUTPUT);
DVLOG(1) << "MediaStreamDispatcher::EnumerateDevices("
<< request_id << ")";
@@ -150,7 +152,8 @@ void MediaStreamDispatcher::EnumerateDevices(
Send(new MediaStreamHostMsg_EnumerateDevices(routing_id(),
next_ipc_id_++,
type,
- security_origin));
+ security_origin,
+ hide_labels_if_no_access));
}
void MediaStreamDispatcher::StopEnumerateDevices(
@@ -264,14 +267,16 @@ void MediaStreamDispatcher::OnStreamGenerated(
}
}
-void MediaStreamDispatcher::OnStreamGenerationFailed(int request_id) {
+void MediaStreamDispatcher::OnStreamGenerationFailed(
+ int request_id,
+ content::MediaStreamRequestResult result) {
DCHECK(main_loop_->BelongsToCurrentThread());
for (RequestList::iterator it = requests_.begin();
it != requests_.end(); ++it) {
Request& request = *it;
if (request.ipc_request == request_id) {
if (request.handler.get()) {
- request.handler->OnStreamGenerationFailed(request.request_id);
+ request.handler->OnStreamGenerationFailed(request.request_id, result);
DVLOG(1) << "MediaStreamDispatcher::OnStreamGenerationFailed("
<< request.request_id << ")\n";
}
@@ -296,7 +301,7 @@ void MediaStreamDispatcher::OnDeviceStopped(
return;
}
Stream* stream = &it->second;
- if (IsAudioMediaType(device_info.device.type))
+ if (IsAudioInputMediaType(device_info.device.type))
RemoveStreamDeviceFromArray(device_info, &stream->audio_array);
else
RemoveStreamDeviceFromArray(device_info, &stream->video_array);
@@ -334,7 +339,7 @@ void MediaStreamDispatcher::OnDeviceOpened(
if (request.ipc_request == request_id) {
Stream new_stream;
new_stream.handler = request.handler;
- if (IsAudioMediaType(device_info.device.type)) {
+ if (IsAudioInputMediaType(device_info.device.type)) {
new_stream.audio_array.push_back(device_info);
} else if (IsVideoMediaType(device_info.device.type)) {
new_stream.video_array.push_back(device_info);
diff --git a/chromium/content/renderer/media/media_stream_dispatcher.h b/chromium/content/renderer/media/media_stream_dispatcher.h
index 89d7c0daafa..b7c666080fa 100644
--- a/chromium/content/renderer/media/media_stream_dispatcher.h
+++ b/chromium/content/renderer/media/media_stream_dispatcher.h
@@ -56,11 +56,15 @@ class CONTENT_EXPORT MediaStreamDispatcher
virtual void StopStreamDevice(const StreamDeviceInfo& device_info);
// Request to enumerate devices.
- void EnumerateDevices(
+ // If |hide_labels_if_no_access| is true, labels will be empty in the
+ // response if permission has not been granted for the device type. This
+ // should normally be true.
+ virtual void EnumerateDevices(
int request_id,
const base::WeakPtr<MediaStreamDispatcherEventHandler>& event_handler,
MediaStreamType type,
- const GURL& security_origin);
+ const GURL& security_origin,
+ bool hide_labels_if_no_access);
// Request to stop enumerating devices.
void StopEnumerateDevices(
@@ -115,13 +119,14 @@ class CONTENT_EXPORT MediaStreamDispatcher
const std::string& label,
const StreamDeviceInfoArray& audio_array,
const StreamDeviceInfoArray& video_array);
- void OnStreamGenerationFailed(int request_id);
+ void OnStreamGenerationFailed(
+ int request_id,
+ content::MediaStreamRequestResult result);
void OnDeviceStopped(const std::string& label,
const StreamDeviceInfo& device_info);
void OnDevicesEnumerated(
int request_id,
const StreamDeviceInfoArray& device_array);
- void OnDevicesEnumerationFailed(int request_id);
void OnDeviceOpened(
int request_id,
const std::string& label,
diff --git a/chromium/content/renderer/media/media_stream_dispatcher_eventhandler.h b/chromium/content/renderer/media/media_stream_dispatcher_eventhandler.h
index e83f2952560..f5595ac247c 100644
--- a/chromium/content/renderer/media/media_stream_dispatcher_eventhandler.h
+++ b/chromium/content/renderer/media/media_stream_dispatcher_eventhandler.h
@@ -23,7 +23,9 @@ class CONTENT_EXPORT MediaStreamDispatcherEventHandler {
// Creation of a new media stream failed. The user might have denied access
// to the requested devices or no device is available.
- virtual void OnStreamGenerationFailed(int request_id) = 0;
+ virtual void OnStreamGenerationFailed(
+ int request_id,
+ content::MediaStreamRequestResult result) = 0;
// A device has been stopped in the browser processes.
virtual void OnDeviceStopped(
diff --git a/chromium/content/renderer/media/media_stream_dispatcher_unittest.cc b/chromium/content/renderer/media/media_stream_dispatcher_unittest.cc
index 635d22fa04c..7637e4f968b 100644
--- a/chromium/content/renderer/media/media_stream_dispatcher_unittest.cc
+++ b/chromium/content/renderer/media/media_stream_dispatcher_unittest.cc
@@ -53,7 +53,9 @@ class MockMediaStreamDispatcherEventHandler
}
}
- virtual void OnStreamGenerationFailed(int request_id) OVERRIDE {
+ virtual void OnStreamGenerationFailed(
+ int request_id,
+ content::MediaStreamRequestResult result) OVERRIDE {
request_id_ = request_id;
}
@@ -63,7 +65,7 @@ class MockMediaStreamDispatcherEventHandler
if (IsVideoMediaType(device_info.device.type)) {
EXPECT_TRUE(StreamDeviceInfo::IsEqual(video_device_, device_info));
}
- if (IsAudioMediaType(device_info.device.type)) {
+ if (IsAudioInputMediaType(device_info.device.type)) {
EXPECT_TRUE(StreamDeviceInfo::IsEqual(audio_device_, device_info));
}
}
@@ -224,13 +226,15 @@ TEST_F(MediaStreamDispatcherTest, BasicVideoDevice) {
dispatcher->EnumerateDevices(
kRequestId1, handler1.get()->AsWeakPtr(),
kVideoType,
- security_origin);
+ security_origin,
+ false);
int ipc_request_id2 = dispatcher->next_ipc_id_;
EXPECT_NE(ipc_request_id1, ipc_request_id2);
dispatcher->EnumerateDevices(
kRequestId2, handler2.get()->AsWeakPtr(),
kVideoType,
- security_origin);
+ security_origin,
+ false);
EXPECT_EQ(dispatcher->requests_.size(), size_t(2));
StreamDeviceInfoArray video_device_array(1);
@@ -313,7 +317,7 @@ TEST_F(MediaStreamDispatcherTest, TestFailure) {
dispatcher->GenerateStream(kRequestId1, handler.get()->AsWeakPtr(),
components, security_origin);
dispatcher->OnMessageReceived(MediaStreamMsg_StreamGenerationFailed(
- kRouteId, ipc_request_id1));
+ kRouteId, ipc_request_id1, MEDIA_DEVICE_PERMISSION_DENIED));
// Verify that the request have been completed.
EXPECT_EQ(handler->request_id_, kRequestId1);
diff --git a/chromium/content/renderer/media/media_stream_extra_data.h b/chromium/content/renderer/media/media_stream_extra_data.h
deleted file mode 100644
index 63bfca4a139..00000000000
--- a/chromium/content/renderer/media/media_stream_extra_data.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_MEDIA_STREAM_EXTRA_DATA_H_
-#define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_EXTRA_DATA_H_
-
-#include "base/callback.h"
-#include "base/compiler_specific.h"
-#include "base/memory/ref_counted.h"
-#include "content/common/content_export.h"
-#include "third_party/WebKit/public/platform/WebMediaStream.h"
-
-namespace webrtc {
-class MediaStreamInterface;
-} // namespace webrtc
-
-namespace content {
-
-class CONTENT_EXPORT MediaStreamExtraData
- : NON_EXPORTED_BASE(public blink::WebMediaStream::ExtraData) {
- public:
- typedef base::Callback<void(const std::string& label)> StreamStopCallback;
-
- MediaStreamExtraData(webrtc::MediaStreamInterface* stream, bool is_local);
- virtual ~MediaStreamExtraData();
-
- bool is_local() const { return is_local_; }
-
- void SetLocalStreamStopCallback(
- const StreamStopCallback& stop_callback);
- void OnLocalStreamStop();
-
- const scoped_refptr<webrtc::MediaStreamInterface>& stream() const {
- return stream_;
- }
- private:
- StreamStopCallback stream_stop_callback_;
- scoped_refptr<webrtc::MediaStreamInterface> stream_;
- bool is_local_;
-
- DISALLOW_COPY_AND_ASSIGN(MediaStreamExtraData);
-};
-
-} // namespace content
-
-#endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_EXTRA_DATA_H_
diff --git a/chromium/content/renderer/media/media_stream_impl.cc b/chromium/content/renderer/media/media_stream_impl.cc
index bc720205d8e..79f86a83217 100644
--- a/chromium/content/renderer/media/media_stream_impl.cc
+++ b/chromium/content/renderer/media/media_stream_impl.cc
@@ -6,28 +6,28 @@
#include <utility>
+#include "base/hash.h"
#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
-#include "content/renderer/media/media_stream_audio_renderer.h"
-#include "content/renderer/media/media_stream_dependency_factory.h"
+#include "content/renderer/media/media_stream.h"
+#include "content/renderer/media/media_stream_audio_source.h"
#include "content/renderer/media/media_stream_dispatcher.h"
-#include "content/renderer/media/media_stream_extra_data.h"
-#include "content/renderer/media/media_stream_source_extra_data.h"
-#include "content/renderer/media/rtc_video_renderer.h"
+#include "content/renderer/media/media_stream_video_capturer_source.h"
+#include "content/renderer/media/media_stream_video_track.h"
+#include "content/renderer/media/peer_connection_tracker.h"
+#include "content/renderer/media/webrtc/webrtc_video_capturer_adapter.h"
#include "content/renderer/media/webrtc_audio_capturer.h"
-#include "content/renderer/media/webrtc_audio_renderer.h"
-#include "content/renderer/media/webrtc_local_audio_renderer.h"
#include "content/renderer/media/webrtc_logging.h"
#include "content/renderer/media/webrtc_uma_histograms.h"
#include "content/renderer/render_thread_impl.h"
-#include "media/base/audio_hardware_config.h"
#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
+#include "third_party/WebKit/public/platform/WebMediaDeviceInfo.h"
#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
#include "third_party/WebKit/public/web/WebDocument.h"
-#include "third_party/WebKit/public/web/WebFrame.h"
-#include "third_party/WebKit/public/web/WebMediaStreamRegistry.h"
+#include "third_party/WebKit/public/web/WebLocalFrame.h"
namespace content {
namespace {
@@ -54,42 +54,37 @@ void CopyStreamConstraints(const blink::WebMediaConstraints& constraints,
static int g_next_request_id = 0;
-webrtc::MediaStreamInterface* GetNativeMediaStream(
- const blink::WebMediaStream& web_stream) {
- content::MediaStreamExtraData* extra_data =
- static_cast<content::MediaStreamExtraData*>(web_stream.extraData());
- if (!extra_data)
- return NULL;
- return extra_data->stream().get();
-}
-
-void GetDefaultOutputDeviceParams(
- int* output_sample_rate, int* output_buffer_size) {
- // Fetch the default audio output hardware config.
- media::AudioHardwareConfig* hardware_config =
- RenderThreadImpl::current()->GetAudioHardwareConfig();
- *output_sample_rate = hardware_config->GetOutputSampleRate();
- *output_buffer_size = hardware_config->GetOutputBufferSize();
-}
-
-void RemoveSource(const blink::WebMediaStreamSource& source,
- std::vector<blink::WebMediaStreamSource>* sources) {
- for (std::vector<blink::WebMediaStreamSource>::iterator it =
- sources->begin();
- it != sources->end(); ++it) {
- if (source.id() == it->id()) {
- sources->erase(it);
- return;
- }
- }
-}
-
} // namespace
+struct MediaStreamImpl::MediaDevicesRequestInfo {
+ MediaDevicesRequestInfo(const blink::WebMediaDevicesRequest& request,
+ int audio_input_request_id,
+ int video_input_request_id,
+ int audio_output_request_id)
+ : request(request),
+ audio_input_request_id(audio_input_request_id),
+ video_input_request_id(video_input_request_id),
+ audio_output_request_id(audio_output_request_id),
+ has_audio_input_returned(false),
+ has_video_input_returned(false),
+ has_audio_output_returned(false) {}
+
+ blink::WebMediaDevicesRequest request;
+ int audio_input_request_id;
+ int video_input_request_id;
+ int audio_output_request_id;
+ bool has_audio_input_returned;
+ bool has_video_input_returned;
+ bool has_audio_output_returned;
+ StreamDeviceInfoArray audio_input_devices;
+ StreamDeviceInfoArray video_input_devices;
+ StreamDeviceInfoArray audio_output_devices;
+};
+
MediaStreamImpl::MediaStreamImpl(
RenderView* render_view,
MediaStreamDispatcher* media_stream_dispatcher,
- MediaStreamDependencyFactory* dependency_factory)
+ PeerConnectionDependencyFactory* dependency_factory)
: RenderViewObserver(render_view),
dependency_factory_(dependency_factory),
media_stream_dispatcher_(media_stream_dispatcher) {
@@ -105,9 +100,15 @@ void MediaStreamImpl::requestUserMedia(
// webGetUserMedia.
UpdateWebRTCMethodCount(WEBKIT_GET_USER_MEDIA);
DCHECK(CalledOnValidThread());
+
+ if (RenderThreadImpl::current()) {
+ RenderThreadImpl::current()->peer_connection_tracker()->TrackGetUserMedia(
+ user_media_request);
+ }
+
int request_id = g_next_request_id++;
StreamOptions options;
- blink::WebFrame* frame = NULL;
+ blink::WebLocalFrame* frame = NULL;
GURL security_origin;
bool enable_automatic_output_device_selection = false;
@@ -197,89 +198,73 @@ void MediaStreamImpl::cancelUserMediaRequest(
}
}
-blink::WebMediaStream MediaStreamImpl::GetMediaStream(
- const GURL& url) {
- return blink::WebMediaStreamRegistry::lookupMediaStreamDescriptor(url);
-}
-
-bool MediaStreamImpl::IsMediaStream(const GURL& url) {
- blink::WebMediaStream web_stream(
- blink::WebMediaStreamRegistry::lookupMediaStreamDescriptor(url));
+void MediaStreamImpl::requestMediaDevices(
+ const blink::WebMediaDevicesRequest& media_devices_request) {
+ UpdateWebRTCMethodCount(WEBKIT_GET_MEDIA_DEVICES);
+ DCHECK(CalledOnValidThread());
- if (web_stream.isNull() || !web_stream.extraData())
- return false; // This is not a valid stream.
+ int audio_input_request_id = g_next_request_id++;
+ int video_input_request_id = g_next_request_id++;
+ int audio_output_request_id = g_next_request_id++;
- webrtc::MediaStreamInterface* stream = GetNativeMediaStream(web_stream);
- return (stream &&
- (!stream->GetVideoTracks().empty() || !stream->GetAudioTracks().empty()));
-}
+ // |media_devices_request| can't be mocked, so in tests it will be empty (the
+ // underlying pointer is null). In order to use this function in a test we
+ // need to check if it isNull.
+ GURL security_origin;
+ if (!media_devices_request.isNull())
+ security_origin = GURL(media_devices_request.securityOrigin().toString());
-scoped_refptr<VideoFrameProvider>
-MediaStreamImpl::GetVideoFrameProvider(
- const GURL& url,
- const base::Closure& error_cb,
- const VideoFrameProvider::RepaintCB& repaint_cb) {
- DCHECK(CalledOnValidThread());
- blink::WebMediaStream web_stream(GetMediaStream(url));
+ DVLOG(1) << "MediaStreamImpl::requestMediaDevices(" << audio_input_request_id
+ << ", " << video_input_request_id << ", " << audio_output_request_id
+ << ", " << security_origin.spec() << ")";
- if (web_stream.isNull() || !web_stream.extraData())
- return NULL; // This is not a valid stream.
+ media_devices_requests_.push_back(new MediaDevicesRequestInfo(
+ media_devices_request,
+ audio_input_request_id,
+ video_input_request_id,
+ audio_output_request_id));
- DVLOG(1) << "MediaStreamImpl::GetVideoFrameProvider stream:"
- << UTF16ToUTF8(web_stream.id());
+ media_stream_dispatcher_->EnumerateDevices(
+ audio_input_request_id,
+ AsWeakPtr(),
+ MEDIA_DEVICE_AUDIO_CAPTURE,
+ security_origin,
+ true);
- blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
- web_stream.videoTracks(video_tracks);
- if (video_tracks.isEmpty())
- return NULL;
+ media_stream_dispatcher_->EnumerateDevices(
+ video_input_request_id,
+ AsWeakPtr(),
+ MEDIA_DEVICE_VIDEO_CAPTURE,
+ security_origin,
+ true);
- return new RTCVideoRenderer(video_tracks[0], error_cb, repaint_cb);
+ media_stream_dispatcher_->EnumerateDevices(
+ audio_output_request_id,
+ AsWeakPtr(),
+ MEDIA_DEVICE_AUDIO_OUTPUT,
+ security_origin,
+ true);
}
-scoped_refptr<MediaStreamAudioRenderer>
-MediaStreamImpl::GetAudioRenderer(const GURL& url) {
+void MediaStreamImpl::cancelMediaDevicesRequest(
+ const blink::WebMediaDevicesRequest& media_devices_request) {
DCHECK(CalledOnValidThread());
- blink::WebMediaStream web_stream(GetMediaStream(url));
-
- if (web_stream.isNull() || !web_stream.extraData())
- return NULL; // This is not a valid stream.
-
- DVLOG(1) << "MediaStreamImpl::GetAudioRenderer stream:"
- << UTF16ToUTF8(web_stream.id());
-
- MediaStreamExtraData* extra_data =
- static_cast<MediaStreamExtraData*>(web_stream.extraData());
-
- if (extra_data->is_local()) {
- // Create the local audio renderer if the stream contains audio tracks.
- blink::WebVector<blink::WebMediaStreamTrack> audio_tracks;
- web_stream.audioTracks(audio_tracks);
- if (audio_tracks.isEmpty())
- return NULL;
-
- // TODO(xians): Add support for the case that the media stream contains
- // multiple audio tracks.
- return CreateLocalAudioRenderer(audio_tracks[0]);
- }
-
- webrtc::MediaStreamInterface* stream = extra_data->stream().get();
- if (!stream || stream->GetAudioTracks().empty())
- return NULL;
-
- // This is a remote media stream.
- WebRtcAudioDeviceImpl* audio_device =
- dependency_factory_->GetWebRtcAudioDevice();
-
- // Share the existing renderer if any, otherwise create a new one.
- scoped_refptr<WebRtcAudioRenderer> renderer(audio_device->renderer());
- if (!renderer.get()) {
- renderer = CreateRemoteAudioRenderer(extra_data->stream().get());
-
- if (renderer.get() && !audio_device->SetAudioRenderer(renderer.get()))
- renderer = NULL;
- }
+ MediaDevicesRequestInfo* request =
+ FindMediaDevicesRequestInfo(media_devices_request);
+ if (!request)
+ return;
- return renderer.get() ? renderer->CreateSharedAudioRendererProxy() : NULL;
+ // Cancel device enumeration.
+ media_stream_dispatcher_->StopEnumerateDevices(
+ request->audio_input_request_id,
+ AsWeakPtr());
+ media_stream_dispatcher_->StopEnumerateDevices(
+ request->video_input_request_id,
+ AsWeakPtr());
+ media_stream_dispatcher_->StopEnumerateDevices(
+ request->audio_output_request_id,
+ AsWeakPtr());
+ DeleteMediaDevicesRequestInfo(request);
}
// Callback from MediaStreamDispatcher.
@@ -314,78 +299,49 @@ void MediaStreamImpl::OnStreamGenerated(
}
request_info->generated = true;
- blink::WebVector<blink::WebMediaStreamSource> audio_source_vector(
- audio_array.size());
-
- // Log the device names for this request.
- for (StreamDeviceInfoArray::const_iterator it = audio_array.begin();
- it != audio_array.end(); ++it) {
- WebRtcLogMessage(base::StringPrintf(
- "Generated media stream for request id %d contains audio device name"
- " \"%s\"",
- request_id,
- it->device.name.c_str()));
- }
-
- StreamDeviceInfoArray overridden_audio_array = audio_array;
- if (!request_info->enable_automatic_output_device_selection) {
- // If the GetUserMedia request did not explicitly set the constraint
- // kMediaStreamRenderToAssociatedSink, the output device parameters must
- // be removed.
- for (StreamDeviceInfoArray::iterator it = overridden_audio_array.begin();
- it != overridden_audio_array.end(); ++it) {
- it->device.matched_output_device_id = "";
- it->device.matched_output = MediaStreamDevice::AudioDeviceParameters();
- }
- }
- CreateWebKitSourceVector(label, overridden_audio_array,
- blink::WebMediaStreamSource::TypeAudio,
- request_info->frame,
- audio_source_vector);
-
- blink::WebVector<blink::WebMediaStreamSource> video_source_vector(
- video_array.size());
- CreateWebKitSourceVector(label, video_array,
- blink::WebMediaStreamSource::TypeVideo,
- request_info->frame,
- video_source_vector);
+ // WebUserMediaRequest don't have an implementation in unit tests.
+ // Therefore we need to check for isNull here and initialize the
+ // constraints.
blink::WebUserMediaRequest* request = &(request_info->request);
- blink::WebString webkit_id = UTF8ToUTF16(label);
- blink::WebMediaStream* web_stream = &(request_info->web_stream);
+ blink::WebMediaConstraints audio_constraints;
+ blink::WebMediaConstraints video_constraints;
+ if (request->isNull()) {
+ audio_constraints.initialize();
+ video_constraints.initialize();
+ } else {
+ audio_constraints = request->audioConstraints();
+ video_constraints = request->videoConstraints();
+ }
blink::WebVector<blink::WebMediaStreamTrack> audio_track_vector(
audio_array.size());
- for (size_t i = 0; i < audio_track_vector.size(); ++i) {
- audio_track_vector[i].initialize(audio_source_vector[i]);
- request_info->sources.push_back(audio_source_vector[i]);
- }
+ CreateAudioTracks(audio_array, audio_constraints, &audio_track_vector,
+ request_info);
blink::WebVector<blink::WebMediaStreamTrack> video_track_vector(
video_array.size());
- for (size_t i = 0; i < video_track_vector.size(); ++i) {
- video_track_vector[i].initialize(video_source_vector[i]);
- request_info->sources.push_back(video_source_vector[i]);
- }
+ CreateVideoTracks(video_array, video_constraints, &video_track_vector,
+ request_info);
+
+ blink::WebString webkit_id = base::UTF8ToUTF16(label);
+ blink::WebMediaStream* web_stream = &(request_info->web_stream);
web_stream->initialize(webkit_id, audio_track_vector,
video_track_vector);
+ web_stream->setExtraData(
+ new MediaStream(
+ *web_stream));
- // WebUserMediaRequest don't have an implementation in unit tests.
- // Therefore we need to check for isNull here.
- blink::WebMediaConstraints audio_constraints = request->isNull() ?
- blink::WebMediaConstraints() : request->audioConstraints();
- blink::WebMediaConstraints video_constraints = request->isNull() ?
- blink::WebMediaConstraints() : request->videoConstraints();
-
- dependency_factory_->CreateNativeMediaSources(
- RenderViewObserver::routing_id(),
- audio_constraints, video_constraints, web_stream,
- base::Bind(&MediaStreamImpl::OnCreateNativeSourcesComplete, AsWeakPtr()));
+ // Wait for the tracks to be started successfully or to fail.
+ request_info->CallbackOnTracksStarted(
+ base::Bind(&MediaStreamImpl::OnCreateNativeTracksCompleted, AsWeakPtr()));
}
// Callback from MediaStreamDispatcher.
// The requested stream failed to be generated.
-void MediaStreamImpl::OnStreamGenerationFailed(int request_id) {
+void MediaStreamImpl::OnStreamGenerationFailed(
+ int request_id,
+ content::MediaStreamRequestResult result) {
DCHECK(CalledOnValidThread());
DVLOG(1) << "MediaStreamImpl::OnStreamGenerationFailed("
<< request_id << ")";
@@ -396,9 +352,8 @@ void MediaStreamImpl::OnStreamGenerationFailed(int request_id) {
DVLOG(1) << "Request ID not found";
return;
}
- CompleteGetUserMediaRequest(request_info->web_stream,
- &request_info->request,
- false);
+
+ GetUserMediaRequestFailed(&request_info->request, result);
DeleteUserMediaRequestInfo(request_info);
}
@@ -430,89 +385,219 @@ void MediaStreamImpl::OnDeviceStopped(
break;
}
}
+}
- // Remove the reference to this source from all |user_media_requests_|.
- // TODO(perkj): The below is not necessary once we don't need to support
- // MediaStream::Stop().
- UserMediaRequests::iterator it = user_media_requests_.begin();
- while (it != user_media_requests_.end()) {
- RemoveSource(source, &(*it)->sources);
- if ((*it)->sources.empty()) {
- it = user_media_requests_.erase(it);
- } else {
- ++it;
- }
+void MediaStreamImpl::InitializeSourceObject(
+ const StreamDeviceInfo& device,
+ blink::WebMediaStreamSource::Type type,
+ const blink::WebMediaConstraints& constraints,
+ blink::WebFrame* frame,
+ blink::WebMediaStreamSource* webkit_source) {
+ const blink::WebMediaStreamSource* existing_source =
+ FindLocalSource(device);
+ if (existing_source) {
+ *webkit_source = *existing_source;
+ DVLOG(1) << "Source already exist. Reusing source with id "
+ << webkit_source->id().utf8();
+ return;
+ }
+
+ webkit_source->initialize(
+ base::UTF8ToUTF16(device.device.id),
+ type,
+ base::UTF8ToUTF16(device.device.name));
+
+ DVLOG(1) << "Initialize source object :"
+ << "id = " << webkit_source->id().utf8()
+ << ", name = " << webkit_source->name().utf8();
+
+ if (type == blink::WebMediaStreamSource::TypeVideo) {
+ webkit_source->setExtraData(
+ CreateVideoSource(
+ device,
+ base::Bind(&MediaStreamImpl::OnLocalSourceStopped, AsWeakPtr())));
+ } else {
+ DCHECK_EQ(blink::WebMediaStreamSource::TypeAudio, type);
+ MediaStreamAudioSource* audio_source(
+ new MediaStreamAudioSource(
+ RenderViewObserver::routing_id(),
+ device,
+ base::Bind(&MediaStreamImpl::OnLocalSourceStopped, AsWeakPtr()),
+ dependency_factory_));
+ webkit_source->setExtraData(audio_source);
}
+ local_sources_.push_back(LocalStreamSource(frame, *webkit_source));
}
-void MediaStreamImpl::CreateWebKitSourceVector(
- const std::string& label,
+MediaStreamVideoSource* MediaStreamImpl::CreateVideoSource(
+ const StreamDeviceInfo& device,
+ const MediaStreamSource::SourceStoppedCallback& stop_callback) {
+ return new content::MediaStreamVideoCapturerSource(
+ device,
+ stop_callback,
+ new VideoCapturerDelegate(device));
+}
+
+void MediaStreamImpl::CreateVideoTracks(
const StreamDeviceInfoArray& devices,
- blink::WebMediaStreamSource::Type type,
- blink::WebFrame* frame,
- blink::WebVector<blink::WebMediaStreamSource>& webkit_sources) {
- CHECK_EQ(devices.size(), webkit_sources.size());
+ const blink::WebMediaConstraints& constraints,
+ blink::WebVector<blink::WebMediaStreamTrack>* webkit_tracks,
+ UserMediaRequestInfo* request) {
+ DCHECK_EQ(devices.size(), webkit_tracks->size());
+
for (size_t i = 0; i < devices.size(); ++i) {
- const blink::WebMediaStreamSource* existing_source =
- FindLocalSource(devices[i]);
- if (existing_source) {
- webkit_sources[i] = *existing_source;
- DVLOG(1) << "Source already exist. Reusing source with id "
- << webkit_sources[i]. id().utf8();
- continue;
- }
- webkit_sources[i].initialize(
- UTF8ToUTF16(devices[i].device.id),
- type,
- UTF8ToUTF16(devices[i].device.name));
- MediaStreamSourceExtraData* source_extra_data(
- new content::MediaStreamSourceExtraData(
- devices[i],
- base::Bind(&MediaStreamImpl::OnLocalSourceStop, AsWeakPtr())));
- // |source_extra_data| is owned by webkit_sources[i].
- webkit_sources[i].setExtraData(source_extra_data);
- local_sources_.push_back(LocalStreamSource(frame, webkit_sources[i]));
- }
-}
-
-// Callback from MediaStreamDependencyFactory when the sources in |web_stream|
-// have been generated.
-void MediaStreamImpl::OnCreateNativeSourcesComplete(
- blink::WebMediaStream* web_stream,
- bool request_succeeded) {
- UserMediaRequestInfo* request_info = FindUserMediaRequestInfo(web_stream);
- if (!request_info) {
- // This can happen if the request is canceled or the frame reloads while
- // MediaStreamDependencyFactory is creating the sources.
- DVLOG(1) << "Request ID not found";
- return;
+ blink::WebMediaStreamSource webkit_source;
+ InitializeSourceObject(devices[i],
+ blink::WebMediaStreamSource::TypeVideo,
+ constraints,
+ request->frame,
+ &webkit_source);
+ (*webkit_tracks)[i] =
+ request->CreateAndStartVideoTrack(webkit_source, constraints);
}
+}
+
+void MediaStreamImpl::CreateAudioTracks(
+ const StreamDeviceInfoArray& devices,
+ const blink::WebMediaConstraints& constraints,
+ blink::WebVector<blink::WebMediaStreamTrack>* webkit_tracks,
+ UserMediaRequestInfo* request) {
+ DCHECK_EQ(devices.size(), webkit_tracks->size());
- // Create a native representation of the stream.
- if (request_succeeded) {
- dependency_factory_->CreateNativeLocalMediaStream(
- web_stream,
- base::Bind(&MediaStreamImpl::OnLocalMediaStreamStop, AsWeakPtr()));
+ // Log the device names for this request.
+ for (StreamDeviceInfoArray::const_iterator it = devices.begin();
+ it != devices.end(); ++it) {
+ WebRtcLogMessage(base::StringPrintf(
+ "Generated media stream for request id %d contains audio device name"
+ " \"%s\"",
+ request->request_id,
+ it->device.name.c_str()));
}
- DVLOG(1) << "MediaStreamImpl::OnCreateNativeSourcesComplete("
- << "{request_id = " << request_info->request_id << "} "
- << "{request_succeeded = " << request_succeeded << "})";
- CompleteGetUserMediaRequest(request_info->web_stream, &request_info->request,
- request_succeeded);
- if (!request_succeeded) {
- // TODO(perkj): Once we don't support MediaStream::Stop the |request_info|
- // can be deleted even if the request succeeds.
- DeleteUserMediaRequestInfo(request_info);
- StopUnreferencedSources(true);
+
+ StreamDeviceInfoArray overridden_audio_array = devices;
+ if (!request->enable_automatic_output_device_selection) {
+ // If the GetUserMedia request did not explicitly set the constraint
+ // kMediaStreamRenderToAssociatedSink, the output device parameters must
+ // be removed.
+ for (StreamDeviceInfoArray::iterator it = overridden_audio_array.begin();
+ it != overridden_audio_array.end(); ++it) {
+ it->device.matched_output_device_id = "";
+ it->device.matched_output = MediaStreamDevice::AudioDeviceParameters();
+ }
+ }
+
+ for (size_t i = 0; i < overridden_audio_array.size(); ++i) {
+ blink::WebMediaStreamSource webkit_source;
+ InitializeSourceObject(overridden_audio_array[i],
+ blink::WebMediaStreamSource::TypeAudio,
+ constraints,
+ request->frame,
+ &webkit_source);
+ (*webkit_tracks)[i].initialize(webkit_source);
+ request->StartAudioTrack((*webkit_tracks)[i], constraints);
}
}
+void MediaStreamImpl::OnCreateNativeTracksCompleted(
+ UserMediaRequestInfo* request,
+ content::MediaStreamRequestResult result) {
+ DVLOG(1) << "MediaStreamImpl::OnCreateNativeTracksComplete("
+ << "{request_id = " << request->request_id << "} "
+ << "{result = " << result << "})";
+ if (result == content::MEDIA_DEVICE_OK)
+ GetUserMediaRequestSucceeded(request->web_stream, &request->request);
+ else
+ GetUserMediaRequestFailed(&request->request, result);
+
+ DeleteUserMediaRequestInfo(request);
+}
+
void MediaStreamImpl::OnDevicesEnumerated(
int request_id,
const StreamDeviceInfoArray& device_array) {
- DVLOG(1) << "MediaStreamImpl::OnDevicesEnumerated("
- << request_id << ")";
- NOTIMPLEMENTED();
+ DVLOG(1) << "MediaStreamImpl::OnDevicesEnumerated(" << request_id << ")";
+
+ MediaDevicesRequestInfo* request = FindMediaDevicesRequestInfo(request_id);
+ DCHECK(request);
+
+ if (request_id == request->audio_input_request_id) {
+ request->has_audio_input_returned = true;
+ DCHECK(request->audio_input_devices.empty());
+ request->audio_input_devices = device_array;
+ } else if (request_id == request->video_input_request_id) {
+ request->has_video_input_returned = true;
+ DCHECK(request->video_input_devices.empty());
+ request->video_input_devices = device_array;
+ } else {
+ DCHECK_EQ(request->audio_output_request_id, request_id);
+ request->has_audio_output_returned = true;
+ DCHECK(request->audio_output_devices.empty());
+ request->audio_output_devices = device_array;
+ }
+
+ if (!request->has_audio_input_returned ||
+ !request->has_video_input_returned ||
+ !request->has_audio_output_returned) {
+ // Wait for the rest of the devices to complete.
+ return;
+ }
+
+ // All devices are ready for copying. We use a hashed audio output device id
+ // as the group id for input and output audio devices. If an input device
+ // doesn't have an associated output device, we use the input device's own id.
+ // We don't support group id for video devices, that's left empty.
+ blink::WebVector<blink::WebMediaDeviceInfo>
+ devices(request->audio_input_devices.size() +
+ request->video_input_devices.size() +
+ request->audio_output_devices.size());
+ for (size_t i = 0; i < request->audio_input_devices.size(); ++i) {
+ const MediaStreamDevice& device = request->audio_input_devices[i].device;
+ DCHECK_EQ(device.type, MEDIA_DEVICE_AUDIO_CAPTURE);
+ std::string group_id = base::UintToString(base::Hash(
+ !device.matched_output_device_id.empty() ?
+ device.matched_output_device_id :
+ device.id));
+ devices[i].initialize(
+ blink::WebString::fromUTF8(device.id),
+ blink::WebMediaDeviceInfo::MediaDeviceKindAudioInput,
+ blink::WebString::fromUTF8(device.name),
+ blink::WebString::fromUTF8(group_id));
+ }
+ size_t offset = request->audio_input_devices.size();
+ for (size_t i = 0; i < request->video_input_devices.size(); ++i) {
+ const MediaStreamDevice& device = request->video_input_devices[i].device;
+ DCHECK_EQ(device.type, MEDIA_DEVICE_VIDEO_CAPTURE);
+ devices[offset + i].initialize(
+ blink::WebString::fromUTF8(device.id),
+ blink::WebMediaDeviceInfo::MediaDeviceKindVideoInput,
+ blink::WebString::fromUTF8(device.name),
+ blink::WebString());
+ }
+ offset += request->video_input_devices.size();
+ for (size_t i = 0; i < request->audio_output_devices.size(); ++i) {
+ const MediaStreamDevice& device = request->audio_output_devices[i].device;
+ DCHECK_EQ(device.type, MEDIA_DEVICE_AUDIO_OUTPUT);
+ devices[offset + i].initialize(
+ blink::WebString::fromUTF8(device.id),
+ blink::WebMediaDeviceInfo::MediaDeviceKindAudioOutput,
+ blink::WebString::fromUTF8(device.name),
+ blink::WebString::fromUTF8(base::UintToString(base::Hash(device.id))));
+ }
+
+ EnumerateDevicesSucceded(&request->request, devices);
+
+ // Cancel device enumeration.
+ media_stream_dispatcher_->StopEnumerateDevices(
+ request->audio_input_request_id,
+ AsWeakPtr());
+ media_stream_dispatcher_->StopEnumerateDevices(
+ request->video_input_request_id,
+ AsWeakPtr());
+ media_stream_dispatcher_->StopEnumerateDevices(
+ request->audio_output_request_id,
+ AsWeakPtr());
+
+ DeleteMediaDevicesRequestInfo(request);
}
void MediaStreamImpl::OnDeviceOpened(
@@ -530,25 +615,66 @@ void MediaStreamImpl::OnDeviceOpenFailed(int request_id) {
NOTIMPLEMENTED();
}
-void MediaStreamImpl::CompleteGetUserMediaRequest(
+void MediaStreamImpl::GetUserMediaRequestSucceeded(
const blink::WebMediaStream& stream,
+ blink::WebUserMediaRequest* request_info) {
+ DVLOG(1) << "MediaStreamImpl::GetUserMediaRequestSucceeded";
+ request_info->requestSucceeded(stream);
+}
+
+void MediaStreamImpl::GetUserMediaRequestFailed(
blink::WebUserMediaRequest* request_info,
- bool request_succeeded) {
- if (request_succeeded) {
- request_info->requestSucceeded(stream);
- } else {
- request_info->requestFailed();
+ content::MediaStreamRequestResult result) {
+ switch (result) {
+ case MEDIA_DEVICE_OK:
+ NOTREACHED();
+ break;
+ case MEDIA_DEVICE_PERMISSION_DENIED:
+ request_info->requestDenied();
+ break;
+ case MEDIA_DEVICE_PERMISSION_DISMISSED:
+ request_info->requestFailedUASpecific("PermissionDismissedError");
+ break;
+ case MEDIA_DEVICE_INVALID_STATE:
+ request_info->requestFailedUASpecific("InvalidStateError");
+ break;
+ case MEDIA_DEVICE_NO_HARDWARE:
+ request_info->requestFailedUASpecific("DevicesNotFoundError");
+ break;
+ case MEDIA_DEVICE_INVALID_SECURITY_ORIGIN:
+ request_info->requestFailedUASpecific("InvalidSecurityOriginError");
+ break;
+ case MEDIA_DEVICE_TAB_CAPTURE_FAILURE:
+ request_info->requestFailedUASpecific("TabCaptureError");
+ break;
+ case MEDIA_DEVICE_SCREEN_CAPTURE_FAILURE:
+ request_info->requestFailedUASpecific("ScreenCaptureError");
+ break;
+ case MEDIA_DEVICE_CAPTURE_FAILURE:
+ request_info->requestFailedUASpecific("DeviceCaptureError");
+ break;
+ case MEDIA_DEVICE_TRACK_START_FAILURE:
+ request_info->requestFailedUASpecific("TrackStartError");
+ break;
+ default:
+ request_info->requestFailed();
+ break;
}
}
+void MediaStreamImpl::EnumerateDevicesSucceded(
+ blink::WebMediaDevicesRequest* request,
+ blink::WebVector<blink::WebMediaDeviceInfo>& devices) {
+ request->requestSucceeded(devices);
+}
+
const blink::WebMediaStreamSource* MediaStreamImpl::FindLocalSource(
const StreamDeviceInfo& device) const {
for (LocalStreamSources::const_iterator it = local_sources_.begin();
it != local_sources_.end(); ++it) {
- MediaStreamSourceExtraData* extra_data =
- static_cast<MediaStreamSourceExtraData*>(
- it->source.extraData());
- const StreamDeviceInfo& active_device = extra_data->device_info();
+ MediaStreamSource* source =
+ static_cast<MediaStreamSource*>(it->source.extraData());
+ const StreamDeviceInfo& active_device = source->device_info();
if (active_device.device.id == device.device.id &&
active_device.device.type == device.device.type &&
active_device.session_id == device.session_id) {
@@ -558,23 +684,6 @@ const blink::WebMediaStreamSource* MediaStreamImpl::FindLocalSource(
return NULL;
}
-bool MediaStreamImpl::FindSourceInRequests(
- const blink::WebMediaStreamSource& source) const {
- for (UserMediaRequests::const_iterator req_it = user_media_requests_.begin();
- req_it != user_media_requests_.end(); ++req_it) {
- const std::vector<blink::WebMediaStreamSource>& sources =
- (*req_it)->sources;
- for (std::vector<blink::WebMediaStreamSource>::const_iterator source_it =
- sources.begin();
- source_it != sources.end(); ++source_it) {
- if (source_it->id() == source.id()) {
- return true;
- }
- }
- }
- return false;
-}
-
MediaStreamImpl::UserMediaRequestInfo*
MediaStreamImpl::FindUserMediaRequestInfo(int request_id) {
UserMediaRequests::iterator it = user_media_requests_.begin();
@@ -596,33 +705,49 @@ MediaStreamImpl::FindUserMediaRequestInfo(
return NULL;
}
-MediaStreamImpl::UserMediaRequestInfo*
-MediaStreamImpl::FindUserMediaRequestInfo(const std::string& label) {
+void MediaStreamImpl::DeleteUserMediaRequestInfo(
+ UserMediaRequestInfo* request) {
UserMediaRequests::iterator it = user_media_requests_.begin();
for (; it != user_media_requests_.end(); ++it) {
- if ((*it)->generated && (*it)->web_stream.id() == UTF8ToUTF16(label))
+ if ((*it) == request) {
+ user_media_requests_.erase(it);
+ return;
+ }
+ }
+ NOTREACHED();
+}
+
+MediaStreamImpl::MediaDevicesRequestInfo*
+MediaStreamImpl::FindMediaDevicesRequestInfo(
+ int request_id) {
+ MediaDevicesRequests::iterator it = media_devices_requests_.begin();
+ for (; it != media_devices_requests_.end(); ++it) {
+ if ((*it)->audio_input_request_id == request_id ||
+ (*it)->video_input_request_id == request_id ||
+ (*it)->audio_output_request_id == request_id) {
return (*it);
+ }
}
return NULL;
}
-MediaStreamImpl::UserMediaRequestInfo*
-MediaStreamImpl::FindUserMediaRequestInfo(
- blink::WebMediaStream* web_stream) {
- UserMediaRequests::iterator it = user_media_requests_.begin();
- for (; it != user_media_requests_.end(); ++it) {
- if (&((*it)->web_stream) == web_stream)
- return (*it);
+MediaStreamImpl::MediaDevicesRequestInfo*
+MediaStreamImpl::FindMediaDevicesRequestInfo(
+ const blink::WebMediaDevicesRequest& request) {
+ MediaDevicesRequests::iterator it = media_devices_requests_.begin();
+ for (; it != media_devices_requests_.end(); ++it) {
+ if ((*it)->request == request)
+ return (*it);
}
return NULL;
}
-void MediaStreamImpl::DeleteUserMediaRequestInfo(
- UserMediaRequestInfo* request) {
- UserMediaRequests::iterator it = user_media_requests_.begin();
- for (; it != user_media_requests_.end(); ++it) {
+void MediaStreamImpl::DeleteMediaDevicesRequestInfo(
+ MediaDevicesRequestInfo* request) {
+ MediaDevicesRequests::iterator it = media_devices_requests_.begin();
+ for (; it != media_devices_requests_.end(); ++it) {
if ((*it) == request) {
- user_media_requests_.erase(it);
+ media_devices_requests_.erase(it);
return;
}
}
@@ -669,22 +794,10 @@ void MediaStreamImpl::FrameWillClose(blink::WebFrame* frame) {
}
}
-void MediaStreamImpl::OnLocalMediaStreamStop(
- const std::string& label) {
- DVLOG(1) << "MediaStreamImpl::OnLocalMediaStreamStop(" << label << ")";
-
- UserMediaRequestInfo* user_media_request = FindUserMediaRequestInfo(label);
- if (user_media_request) {
- DeleteUserMediaRequestInfo(user_media_request);
- }
- StopUnreferencedSources(true);
-}
-
-void MediaStreamImpl::OnLocalSourceStop(
+void MediaStreamImpl::OnLocalSourceStopped(
const blink::WebMediaStreamSource& source) {
DCHECK(CalledOnValidThread());
-
- StopLocalSource(source, true);
+ DVLOG(1) << "MediaStreamImpl::OnLocalSourceStopped";
bool device_found = false;
for (LocalStreamSources::iterator device_it = local_sources_.begin();
@@ -697,152 +810,24 @@ void MediaStreamImpl::OnLocalSourceStop(
}
CHECK(device_found);
- // Remove the reference to this source from all |user_media_requests_|.
- // TODO(perkj): The below is not necessary once we don't need to support
- // MediaStream::Stop().
- UserMediaRequests::iterator it = user_media_requests_.begin();
- while (it != user_media_requests_.end()) {
- RemoveSource(source, &(*it)->sources);
- if ((*it)->sources.empty()) {
- it = user_media_requests_.erase(it);
- } else {
- ++it;
- }
- }
+ MediaStreamSource* source_impl =
+ static_cast<MediaStreamSource*> (source.extraData());
+ media_stream_dispatcher_->StopStreamDevice(source_impl->device_info());
}
void MediaStreamImpl::StopLocalSource(
const blink::WebMediaStreamSource& source,
bool notify_dispatcher) {
- MediaStreamSourceExtraData* extra_data =
- static_cast<MediaStreamSourceExtraData*> (source.extraData());
- CHECK(extra_data);
+ MediaStreamSource* source_impl =
+ static_cast<MediaStreamSource*> (source.extraData());
DVLOG(1) << "MediaStreamImpl::StopLocalSource("
- << "{device_id = " << extra_data->device_info().device.id << "})";
-
- if (source.type() == blink::WebMediaStreamSource::TypeAudio) {
- if (extra_data->GetAudioCapturer()) {
- extra_data->GetAudioCapturer()->Stop();
- }
- }
+ << "{device_id = " << source_impl->device_info().device.id << "})";
if (notify_dispatcher)
- media_stream_dispatcher_->StopStreamDevice(extra_data->device_info());
-
- blink::WebMediaStreamSource writable_source(source);
- writable_source.setReadyState(
- blink::WebMediaStreamSource::ReadyStateEnded);
- writable_source.setExtraData(NULL);
-}
-
-void MediaStreamImpl::StopUnreferencedSources(bool notify_dispatcher) {
- LocalStreamSources::iterator source_it = local_sources_.begin();
- while (source_it != local_sources_.end()) {
- if (!FindSourceInRequests(source_it->source)) {
- StopLocalSource(source_it->source, notify_dispatcher);
- source_it = local_sources_.erase(source_it);
- } else {
- ++source_it;
- }
- }
-}
-
-scoped_refptr<WebRtcAudioRenderer> MediaStreamImpl::CreateRemoteAudioRenderer(
- webrtc::MediaStreamInterface* stream) {
- if (stream->GetAudioTracks().empty())
- return NULL;
-
- DVLOG(1) << "MediaStreamImpl::CreateRemoteAudioRenderer label:"
- << stream->label();
-
- // TODO(tommi): Change the default value of session_id to be
- // StreamDeviceInfo::kNoId. Also update AudioOutputDevice etc.
- int session_id = 0, sample_rate = 0, buffer_size = 0;
- if (!GetAuthorizedDeviceInfoForAudioRenderer(&session_id,
- &sample_rate,
- &buffer_size)) {
- GetDefaultOutputDeviceParams(&sample_rate, &buffer_size);
- }
-
- return new WebRtcAudioRenderer(RenderViewObserver::routing_id(),
- session_id, sample_rate, buffer_size);
-}
-
-scoped_refptr<WebRtcLocalAudioRenderer>
-MediaStreamImpl::CreateLocalAudioRenderer(
- const blink::WebMediaStreamTrack& audio_track) {
- DVLOG(1) << "MediaStreamImpl::CreateLocalAudioRenderer";
+ media_stream_dispatcher_->StopStreamDevice(source_impl->device_info());
- int session_id = 0, sample_rate = 0, buffer_size = 0;
- if (!GetAuthorizedDeviceInfoForAudioRenderer(&session_id,
- &sample_rate,
- &buffer_size)) {
- GetDefaultOutputDeviceParams(&sample_rate, &buffer_size);
- }
-
- // Create a new WebRtcLocalAudioRenderer instance and connect it to the
- // existing WebRtcAudioCapturer so that the renderer can use it as source.
- return new WebRtcLocalAudioRenderer(
- audio_track,
- RenderViewObserver::routing_id(),
- session_id,
- buffer_size);
-}
-
-bool MediaStreamImpl::GetAuthorizedDeviceInfoForAudioRenderer(
- int* session_id,
- int* output_sample_rate,
- int* output_frames_per_buffer) {
- DCHECK(CalledOnValidThread());
-
- WebRtcAudioDeviceImpl* audio_device =
- dependency_factory_->GetWebRtcAudioDevice();
- if (!audio_device)
- return false;
-
- if (!audio_device->GetDefaultCapturer())
- return false;
-
- return audio_device->GetDefaultCapturer()->GetPairedOutputParameters(
- session_id,
- output_sample_rate,
- output_frames_per_buffer);
-}
-
-MediaStreamSourceExtraData::MediaStreamSourceExtraData(
- const StreamDeviceInfo& device_info,
- const SourceStopCallback& stop_callback)
- : device_info_(device_info),
- stop_callback_(stop_callback) {
-}
-
-MediaStreamSourceExtraData::MediaStreamSourceExtraData() {
-}
-
-MediaStreamSourceExtraData::~MediaStreamSourceExtraData() {}
-
-void MediaStreamSourceExtraData::OnLocalSourceStop() {
- if (!stop_callback_.is_null())
- stop_callback_.Run(owner());
-}
-
-MediaStreamExtraData::MediaStreamExtraData(
- webrtc::MediaStreamInterface* stream, bool is_local)
- : stream_(stream),
- is_local_(is_local) {
-}
-
-MediaStreamExtraData::~MediaStreamExtraData() {
-}
-
-void MediaStreamExtraData::SetLocalStreamStopCallback(
- const StreamStopCallback& stop_callback) {
- stream_stop_callback_ = stop_callback;
-}
-
-void MediaStreamExtraData::OnLocalStreamStop() {
- if (!stream_stop_callback_.is_null())
- stream_stop_callback_.Run(stream_->label());
+ source_impl->ResetSourceStoppedCallback();
+ source_impl->StopSource();
}
MediaStreamImpl::UserMediaRequestInfo::UserMediaRequestInfo(
@@ -855,10 +840,99 @@ MediaStreamImpl::UserMediaRequestInfo::UserMediaRequestInfo(
enable_automatic_output_device_selection(
enable_automatic_output_device_selection),
frame(frame),
- request(request) {
+ request(request),
+ request_failed_(false) {
}
MediaStreamImpl::UserMediaRequestInfo::~UserMediaRequestInfo() {
+ DVLOG(1) << "~UserMediaRequestInfo";
+}
+
+void MediaStreamImpl::UserMediaRequestInfo::StartAudioTrack(
+ const blink::WebMediaStreamTrack& track,
+ const blink::WebMediaConstraints& constraints) {
+ DCHECK(track.source().type() == blink::WebMediaStreamSource::TypeAudio);
+ MediaStreamAudioSource* native_source =
+ static_cast <MediaStreamAudioSource*>(track.source().extraData());
+ DCHECK(native_source);
+
+ sources_.push_back(track.source());
+ sources_waiting_for_callback_.push_back(native_source);
+ native_source->AddTrack(
+ track, constraints, base::Bind(
+ &MediaStreamImpl::UserMediaRequestInfo::OnTrackStarted,
+ AsWeakPtr()));
+}
+
+blink::WebMediaStreamTrack
+MediaStreamImpl::UserMediaRequestInfo::CreateAndStartVideoTrack(
+ const blink::WebMediaStreamSource& source,
+ const blink::WebMediaConstraints& constraints) {
+ DCHECK(source.type() == blink::WebMediaStreamSource::TypeVideo);
+ MediaStreamVideoSource* native_source =
+ MediaStreamVideoSource::GetVideoSource(source);
+ DCHECK(native_source);
+ sources_.push_back(source);
+ sources_waiting_for_callback_.push_back(native_source);
+ return MediaStreamVideoTrack::CreateVideoTrack(
+ native_source, constraints, base::Bind(
+ &MediaStreamImpl::UserMediaRequestInfo::OnTrackStarted,
+ AsWeakPtr()),
+ true);
+}
+
+void MediaStreamImpl::UserMediaRequestInfo::CallbackOnTracksStarted(
+ const ResourcesReady& callback) {
+ DCHECK(ready_callback_.is_null());
+ ready_callback_ = callback;
+ CheckAllTracksStarted();
+}
+
+void MediaStreamImpl::UserMediaRequestInfo::OnTrackStarted(
+ MediaStreamSource* source, bool success) {
+ DVLOG(1) << "OnTrackStarted result " << success;
+ std::vector<MediaStreamSource*>::iterator it =
+ std::find(sources_waiting_for_callback_.begin(),
+ sources_waiting_for_callback_.end(),
+ source);
+ DCHECK(it != sources_waiting_for_callback_.end());
+ sources_waiting_for_callback_.erase(it);
+ // All tracks must be started successfully. Otherwise the request is a
+ // failure.
+ if (!success)
+ request_failed_ = true;
+ CheckAllTracksStarted();
+}
+
+void MediaStreamImpl::UserMediaRequestInfo::CheckAllTracksStarted() {
+ if (!ready_callback_.is_null() && sources_waiting_for_callback_.empty()) {
+ ready_callback_.Run(
+ this,
+ request_failed_ ? MEDIA_DEVICE_TRACK_START_FAILURE : MEDIA_DEVICE_OK);
+ }
+}
+
+bool MediaStreamImpl::UserMediaRequestInfo::IsSourceUsed(
+ const blink::WebMediaStreamSource& source) const {
+ for (std::vector<blink::WebMediaStreamSource>::const_iterator source_it =
+ sources_.begin();
+ source_it != sources_.end(); ++source_it) {
+ if (source_it->id() == source.id())
+ return true;
+ }
+ return false;
+}
+
+void MediaStreamImpl::UserMediaRequestInfo::RemoveSource(
+ const blink::WebMediaStreamSource& source) {
+ for (std::vector<blink::WebMediaStreamSource>::iterator it =
+ sources_.begin();
+ it != sources_.end(); ++it) {
+ if (source.id() == it->id()) {
+ sources_.erase(it);
+ return;
+ }
+ }
}
} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_impl.h b/chromium/content/renderer/media/media_stream_impl.h
index 8f72d2ba287..ad960201f36 100644
--- a/chromium/content/renderer/media/media_stream_impl.h
+++ b/chromium/content/renderer/media/media_stream_impl.h
@@ -17,33 +17,31 @@
#include "base/threading/non_thread_safe.h"
#include "content/common/content_export.h"
#include "content/public/renderer/render_view_observer.h"
-#include "content/renderer/media/media_stream_client.h"
#include "content/renderer/media/media_stream_dispatcher_eventhandler.h"
+#include "content/renderer/media/media_stream_source.h"
#include "third_party/WebKit/public/platform/WebMediaStream.h"
#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
#include "third_party/WebKit/public/platform/WebVector.h"
+#include "third_party/WebKit/public/web/WebMediaDevicesRequest.h"
#include "third_party/WebKit/public/web/WebUserMediaClient.h"
#include "third_party/WebKit/public/web/WebUserMediaRequest.h"
#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
namespace content {
-class MediaStreamAudioRenderer;
-class MediaStreamDependencyFactory;
+class PeerConnectionDependencyFactory;
class MediaStreamDispatcher;
-class MediaStreamSourceExtraData;
-class WebRtcAudioRenderer;
-class WebRtcLocalAudioRenderer;
+class MediaStreamVideoSource;
+class VideoCapturerDelegate;
-// MediaStreamImpl is a delegate for the Media Stream API messages used by
-// WebKit. It ties together WebKit, native PeerConnection in libjingle and
-// MediaStreamManager (via MediaStreamDispatcher and MediaStreamDispatcherHost)
+// MediaStreamImpl is a delegate for the Media Stream GetUserMedia API.
+// It ties together WebKit and MediaStreamManager
+// (via MediaStreamDispatcher and MediaStreamDispatcherHost)
// in the browser process. It must be created, called and destroyed on the
// render thread.
// MediaStreamImpl have weak pointers to a MediaStreamDispatcher.
class CONTENT_EXPORT MediaStreamImpl
: public RenderViewObserver,
NON_EXPORTED_BASE(public blink::WebUserMediaClient),
- NON_EXPORTED_BASE(public MediaStreamClient),
public MediaStreamDispatcherEventHandler,
public base::SupportsWeakPtr<MediaStreamImpl>,
NON_EXPORTED_BASE(public base::NonThreadSafe) {
@@ -51,23 +49,18 @@ class CONTENT_EXPORT MediaStreamImpl
MediaStreamImpl(
RenderView* render_view,
MediaStreamDispatcher* media_stream_dispatcher,
- MediaStreamDependencyFactory* dependency_factory);
+ PeerConnectionDependencyFactory* dependency_factory);
virtual ~MediaStreamImpl();
// blink::WebUserMediaClient implementation
virtual void requestUserMedia(
- const blink::WebUserMediaRequest& user_media_request) OVERRIDE;
+ const blink::WebUserMediaRequest& user_media_request);
virtual void cancelUserMediaRequest(
- const blink::WebUserMediaRequest& user_media_request) OVERRIDE;
-
- // MediaStreamClient implementation.
- virtual bool IsMediaStream(const GURL& url) OVERRIDE;
- virtual scoped_refptr<VideoFrameProvider> GetVideoFrameProvider(
- const GURL& url,
- const base::Closure& error_cb,
- const VideoFrameProvider::RepaintCB& repaint_cb) OVERRIDE;
- virtual scoped_refptr<MediaStreamAudioRenderer>
- GetAudioRenderer(const GURL& url) OVERRIDE;
+ const blink::WebUserMediaRequest& user_media_request);
+ virtual void requestMediaDevices(
+ const blink::WebMediaDevicesRequest& media_devices_request) OVERRIDE;
+ virtual void cancelMediaDevicesRequest(
+ const blink::WebMediaDevicesRequest& media_devices_request) OVERRIDE;
// MediaStreamDispatcherEventHandler implementation.
virtual void OnStreamGenerated(
@@ -75,7 +68,9 @@ class CONTENT_EXPORT MediaStreamImpl
const std::string& label,
const StreamDeviceInfoArray& audio_array,
const StreamDeviceInfoArray& video_array) OVERRIDE;
- virtual void OnStreamGenerationFailed(int request_id) OVERRIDE;
+ virtual void OnStreamGenerationFailed(
+ int request_id,
+ content::MediaStreamRequestResult result) OVERRIDE;
virtual void OnDeviceStopped(const std::string& label,
const StreamDeviceInfo& device_info) OVERRIDE;
virtual void OnDevicesEnumerated(
@@ -92,35 +87,37 @@ class CONTENT_EXPORT MediaStreamImpl
virtual void FrameWillClose(blink::WebFrame* frame) OVERRIDE;
protected:
- void OnLocalSourceStop(const blink::WebMediaStreamSource& source);
-
- void OnLocalMediaStreamStop(const std::string& label);
-
- // Callback function triggered when all native (libjingle) versions of the
- // underlying media sources have been created and started.
- // |web_stream| is a raw pointer to the web_stream in
- // UserMediaRequests::web_stream for which the underlying sources have been
- // created.
- void OnCreateNativeSourcesComplete(
- blink::WebMediaStream* web_stream,
- bool request_succeeded);
+ // Called when |source| has been stopped from JavaScript.
+ void OnLocalSourceStopped(const blink::WebMediaStreamSource& source);
- // This function is virtual for test purposes. A test can override this to
+ // These methods are virtual for test purposes. A test can override them to
// test requesting local media streams. The function notifies WebKit that the
- // |request| have completed and generated the MediaStream |stream|.
- virtual void CompleteGetUserMediaRequest(
- const blink::WebMediaStream& stream,
+ // |request| have completed.
+ virtual void GetUserMediaRequestSucceeded(
+ const blink::WebMediaStream& stream,
+ blink::WebUserMediaRequest* request_info);
+ virtual void GetUserMediaRequestFailed(
blink::WebUserMediaRequest* request_info,
- bool request_succeeded);
-
- // Returns the WebKit representation of a MediaStream given an URL.
+ content::MediaStreamRequestResult result);
+ virtual void EnumerateDevicesSucceded(
+ blink::WebMediaDevicesRequest* request,
+ blink::WebVector<blink::WebMediaDeviceInfo>& devices);
+ // Creates a MediaStreamVideoSource object.
// This is virtual for test purposes.
- virtual blink::WebMediaStream GetMediaStream(const GURL& url);
+ virtual MediaStreamVideoSource* CreateVideoSource(
+ const StreamDeviceInfo& device,
+ const MediaStreamSource::SourceStoppedCallback& stop_callback);
private:
- // Structure for storing information about a WebKit request to create a
+ // Class for storing information about a WebKit request to create a
// MediaStream.
- struct UserMediaRequestInfo {
+ class UserMediaRequestInfo
+ : public base::SupportsWeakPtr<UserMediaRequestInfo> {
+ public:
+ typedef base::Callback<void(UserMediaRequestInfo* request_info,
+ content::MediaStreamRequestResult result)>
+ ResourcesReady;
+
UserMediaRequestInfo(int request_id,
blink::WebFrame* frame,
const blink::WebUserMediaRequest& request,
@@ -134,7 +131,32 @@ class CONTENT_EXPORT MediaStreamImpl
blink::WebFrame* frame; // WebFrame that requested the MediaStream.
blink::WebMediaStream web_stream;
blink::WebUserMediaRequest request;
- std::vector<blink::WebMediaStreamSource> sources;
+
+ void StartAudioTrack(const blink::WebMediaStreamTrack& track,
+ const blink::WebMediaConstraints& constraints);
+
+ blink::WebMediaStreamTrack CreateAndStartVideoTrack(
+ const blink::WebMediaStreamSource& source,
+ const blink::WebMediaConstraints& constraints);
+
+ // Triggers |callback| when all sources used in this request have either
+ // successfully started, or a source has failed to start.
+ void CallbackOnTracksStarted(const ResourcesReady& callback);
+
+ bool IsSourceUsed(const blink::WebMediaStreamSource& source) const;
+ void RemoveSource(const blink::WebMediaStreamSource& source);
+
+ bool AreAllSourcesRemoved() const { return sources_.empty(); }
+
+ private:
+ void OnTrackStarted(MediaStreamSource* source, bool success);
+ void CheckAllTracksStarted();
+
+ ResourcesReady ready_callback_;
+ bool request_failed_;
+ // Sources used in this request.
+ std::vector<blink::WebMediaStreamSource> sources_;
+ std::vector<MediaStreamSource*> sources_waiting_for_callback_;
};
typedef ScopedVector<UserMediaRequestInfo> UserMediaRequests;
@@ -150,63 +172,70 @@ class CONTENT_EXPORT MediaStreamImpl
};
typedef std::vector<LocalStreamSource> LocalStreamSources;
+ struct MediaDevicesRequestInfo;
+ typedef ScopedVector<MediaDevicesRequestInfo> MediaDevicesRequests;
+
// Creates a WebKit representation of stream sources based on
// |devices| from the MediaStreamDispatcher.
- void CreateWebKitSourceVector(
- const std::string& label,
- const StreamDeviceInfoArray& devices,
+ void InitializeSourceObject(
+ const StreamDeviceInfo& device,
blink::WebMediaStreamSource::Type type,
+ const blink::WebMediaConstraints& constraints,
blink::WebFrame* frame,
- blink::WebVector<blink::WebMediaStreamSource>& webkit_sources);
+ blink::WebMediaStreamSource* webkit_source);
+
+ void CreateVideoTracks(
+ const StreamDeviceInfoArray& devices,
+ const blink::WebMediaConstraints& constraints,
+ blink::WebVector<blink::WebMediaStreamTrack>* webkit_tracks,
+ UserMediaRequestInfo* request);
+
+ void CreateAudioTracks(
+ const StreamDeviceInfoArray& devices,
+ const blink::WebMediaConstraints& constraints,
+ blink::WebVector<blink::WebMediaStreamTrack>* webkit_tracks,
+ UserMediaRequestInfo* request);
+
+ // Callback function triggered when all native versions of the
+ // underlying media sources and tracks have been created and started.
+ void OnCreateNativeTracksCompleted(
+ UserMediaRequestInfo* request,
+ content::MediaStreamRequestResult result);
UserMediaRequestInfo* FindUserMediaRequestInfo(int request_id);
UserMediaRequestInfo* FindUserMediaRequestInfo(
- blink::WebMediaStream* web_stream);
- UserMediaRequestInfo* FindUserMediaRequestInfo(
const blink::WebUserMediaRequest& request);
- UserMediaRequestInfo* FindUserMediaRequestInfo(const std::string& label);
void DeleteUserMediaRequestInfo(UserMediaRequestInfo* request);
+ MediaDevicesRequestInfo* FindMediaDevicesRequestInfo(int request_id);
+ MediaDevicesRequestInfo* FindMediaDevicesRequestInfo(
+ const blink::WebMediaDevicesRequest& request);
+ void DeleteMediaDevicesRequestInfo(MediaDevicesRequestInfo* request);
+
// Returns the source that use a device with |device.session_id|
// and |device.device.id|. NULL if such source doesn't exist.
const blink::WebMediaStreamSource* FindLocalSource(
const StreamDeviceInfo& device) const;
- // Returns true if |source| exists in |user_media_requests_|
- bool FindSourceInRequests(const blink::WebMediaStreamSource& source) const;
-
void StopLocalSource(const blink::WebMediaStreamSource& source,
bool notify_dispatcher);
- // Stops all local sources that don't exist in exist in
- // |user_media_requests_|.
- void StopUnreferencedSources(bool notify_dispatcher);
-
- scoped_refptr<WebRtcAudioRenderer> CreateRemoteAudioRenderer(
- webrtc::MediaStreamInterface* stream);
- scoped_refptr<WebRtcLocalAudioRenderer> CreateLocalAudioRenderer(
- const blink::WebMediaStreamTrack& audio_track);
-
- // Returns a valid session id if a single capture device is currently open
- // (and then the matching session_id), otherwise -1.
- // This is used to pass on a session id to a webrtc audio renderer (either
- // local or remote), so that audio will be rendered to a matching output
- // device, should one exist.
- // Note that if there are more than one open capture devices the function
- // will not be able to pick an appropriate device and return false.
- bool GetAuthorizedDeviceInfoForAudioRenderer(
- int* session_id, int* output_sample_rate, int* output_buffer_size);
-
- // Weak ref to a MediaStreamDependencyFactory, owned by the RenderThread.
+
+ // Weak ref to a PeerConnectionDependencyFactory, owned by the RenderThread.
// It's valid for the lifetime of RenderThread.
- MediaStreamDependencyFactory* dependency_factory_;
+ // TODO(xians): Remove this dependency once audio do not need it for local
+ // audio.
+ PeerConnectionDependencyFactory* dependency_factory_;
// media_stream_dispatcher_ is a weak reference, owned by RenderView. It's
// valid for the lifetime of RenderView.
MediaStreamDispatcher* media_stream_dispatcher_;
+ LocalStreamSources local_sources_;
+
UserMediaRequests user_media_requests_;
- LocalStreamSources local_sources_;
+ // Requests to enumerate media devices.
+ MediaDevicesRequests media_devices_requests_;
DISALLOW_COPY_AND_ASSIGN(MediaStreamImpl);
};
diff --git a/chromium/content/renderer/media/media_stream_impl_unittest.cc b/chromium/content/renderer/media/media_stream_impl_unittest.cc
index 644f2cd483d..cddf1192150 100644
--- a/chromium/content/renderer/media/media_stream_impl_unittest.cc
+++ b/chromium/content/renderer/media/media_stream_impl_unittest.cc
@@ -3,13 +3,17 @@
// found in the LICENSE file.
#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
#include "base/strings/utf_string_conversions.h"
-#include "content/renderer/media/media_stream_extra_data.h"
+#include "content/child/child_process.h"
+#include "content/renderer/media/media_stream.h"
#include "content/renderer/media/media_stream_impl.h"
-#include "content/renderer/media/mock_media_stream_dependency_factory.h"
+#include "content/renderer/media/media_stream_track.h"
#include "content/renderer/media/mock_media_stream_dispatcher.h"
-#include "content/renderer/media/video_capture_impl_manager.h"
+#include "content/renderer/media/mock_media_stream_video_source.h"
+#include "content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/WebKit/public/platform/WebMediaDeviceInfo.h"
#include "third_party/WebKit/public/platform/WebMediaStream.h"
#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
@@ -18,6 +22,18 @@
namespace content {
+class MockMediaStreamVideoCapturerSource : public MockMediaStreamVideoSource {
+ public:
+ MockMediaStreamVideoCapturerSource(
+ const StreamDeviceInfo& device,
+ const SourceStoppedCallback& stop_callback,
+ PeerConnectionDependencyFactory* factory)
+ : MockMediaStreamVideoSource(false) {
+ SetDeviceInfo(device);
+ SetStopCallback(stop_callback);
+ }
+};
+
class MediaStreamImplUnderTest : public MediaStreamImpl {
public:
enum RequestState {
@@ -28,9 +44,12 @@ class MediaStreamImplUnderTest : public MediaStreamImpl {
};
MediaStreamImplUnderTest(MediaStreamDispatcher* media_stream_dispatcher,
- MediaStreamDependencyFactory* dependency_factory)
+ PeerConnectionDependencyFactory* dependency_factory)
: MediaStreamImpl(NULL, media_stream_dispatcher, dependency_factory),
- state_(REQUEST_NOT_STARTED) {
+ state_(REQUEST_NOT_STARTED),
+ result_(NUM_MEDIA_REQUEST_RESULTS),
+ factory_(dependency_factory),
+ video_source_(NULL) {
}
void RequestUserMedia() {
@@ -39,102 +58,150 @@ class MediaStreamImplUnderTest : public MediaStreamImpl {
requestUserMedia(user_media_request);
}
- virtual void CompleteGetUserMediaRequest(
+ void RequestMediaDevices() {
+ blink::WebMediaDevicesRequest media_devices_request;
+ state_ = REQUEST_NOT_COMPLETE;
+ requestMediaDevices(media_devices_request);
+ }
+
+ virtual void GetUserMediaRequestSucceeded(
const blink::WebMediaStream& stream,
- blink::WebUserMediaRequest* request_info,
- bool request_succeeded) OVERRIDE {
+ blink::WebUserMediaRequest* request_info) OVERRIDE {
last_generated_stream_ = stream;
- state_ = request_succeeded ? REQUEST_SUCCEEDED : REQUEST_FAILED;
+ state_ = REQUEST_SUCCEEDED;
}
- virtual blink::WebMediaStream GetMediaStream(
- const GURL& url) OVERRIDE {
- return last_generated_stream_;
+ virtual void GetUserMediaRequestFailed(
+ blink::WebUserMediaRequest* request_info,
+ content::MediaStreamRequestResult result) OVERRIDE {
+ last_generated_stream_.reset();
+ state_ = REQUEST_FAILED;
+ result_ = result;
}
- using MediaStreamImpl::OnLocalMediaStreamStop;
- using MediaStreamImpl::OnLocalSourceStop;
+ virtual void EnumerateDevicesSucceded(
+ blink::WebMediaDevicesRequest* request,
+ blink::WebVector<blink::WebMediaDeviceInfo>& devices) OVERRIDE {
+ state_ = REQUEST_SUCCEEDED;
+ last_devices_ = devices;
+ }
+
+ virtual MediaStreamVideoSource* CreateVideoSource(
+ const StreamDeviceInfo& device,
+ const MediaStreamSource::SourceStoppedCallback& stop_callback) OVERRIDE {
+ video_source_ = new MockMediaStreamVideoCapturerSource(device,
+ stop_callback,
+ factory_);
+ return video_source_;
+ }
const blink::WebMediaStream& last_generated_stream() {
return last_generated_stream_;
}
+ const blink::WebVector<blink::WebMediaDeviceInfo>& last_devices() {
+ return last_devices_;
+ }
+
+ void ClearLastGeneratedStream() {
+ last_generated_stream_.reset();
+ }
+
+ MockMediaStreamVideoCapturerSource* last_created_video_source() const {
+ return video_source_;
+ }
+
RequestState request_state() const { return state_; }
+ content::MediaStreamRequestResult error_reason() const { return result_; }
private:
blink::WebMediaStream last_generated_stream_;
RequestState state_;
+ content::MediaStreamRequestResult result_;
+ blink::WebVector<blink::WebMediaDeviceInfo> last_devices_;
+ PeerConnectionDependencyFactory* factory_;
+ MockMediaStreamVideoCapturerSource* video_source_;
};
class MediaStreamImplTest : public ::testing::Test {
public:
virtual void SetUp() {
// Create our test object.
+ child_process_.reset(new ChildProcess());
ms_dispatcher_.reset(new MockMediaStreamDispatcher());
- dependency_factory_.reset(new MockMediaStreamDependencyFactory());
+ dependency_factory_.reset(new MockPeerConnectionDependencyFactory());
ms_impl_.reset(new MediaStreamImplUnderTest(ms_dispatcher_.get(),
dependency_factory_.get()));
}
blink::WebMediaStream RequestLocalMediaStream() {
ms_impl_->RequestUserMedia();
- FakeMediaStreamDispatcherComplete();
- ChangeVideoSourceStateToLive();
- ChangeAudioSourceStateToLive();
+ FakeMediaStreamDispatcherRequestUserMediaComplete();
+ StartMockedVideoSource();
EXPECT_EQ(MediaStreamImplUnderTest::REQUEST_SUCCEEDED,
ms_impl_->request_state());
blink::WebMediaStream desc = ms_impl_->last_generated_stream();
- content::MediaStreamExtraData* extra_data =
- static_cast<content::MediaStreamExtraData*>(desc.extraData());
- if (!extra_data || !extra_data->stream().get()) {
+ content::MediaStream* native_stream =
+ content::MediaStream::GetMediaStream(desc);
+ if (!native_stream) {
ADD_FAILURE();
return desc;
}
- EXPECT_EQ(1u, extra_data->stream()->GetAudioTracks().size());
- EXPECT_EQ(1u, extra_data->stream()->GetVideoTracks().size());
- EXPECT_NE(extra_data->stream()->GetAudioTracks()[0]->id(),
- extra_data->stream()->GetVideoTracks()[0]->id());
+ blink::WebVector<blink::WebMediaStreamTrack> audio_tracks;
+ desc.audioTracks(audio_tracks);
+ blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
+ desc.videoTracks(video_tracks);
+
+ EXPECT_EQ(1u, audio_tracks.size());
+ EXPECT_EQ(1u, video_tracks.size());
+ EXPECT_NE(audio_tracks[0].id(), video_tracks[0].id());
return desc;
}
- void FakeMediaStreamDispatcherComplete() {
- ms_impl_->OnStreamGenerated(ms_dispatcher_->request_id(),
+ void FakeMediaStreamDispatcherRequestUserMediaComplete() {
+ // Audio request ID is used as the shared request ID.
+ ms_impl_->OnStreamGenerated(ms_dispatcher_->audio_input_request_id(),
ms_dispatcher_->stream_label(),
- ms_dispatcher_->audio_array(),
+ ms_dispatcher_->audio_input_array(),
ms_dispatcher_->video_array());
}
- void ChangeVideoSourceStateToLive() {
- if (dependency_factory_->last_video_source() != NULL) {
- dependency_factory_->last_video_source()->SetLive();
- }
+ void FakeMediaStreamDispatcherRequestMediaDevicesComplete() {
+ ms_impl_->OnDevicesEnumerated(ms_dispatcher_->audio_input_request_id(),
+ ms_dispatcher_->audio_input_array());
+ ms_impl_->OnDevicesEnumerated(ms_dispatcher_->audio_output_request_id(),
+ ms_dispatcher_->audio_output_array());
+ ms_impl_->OnDevicesEnumerated(ms_dispatcher_->video_request_id(),
+ ms_dispatcher_->video_array());
}
- void ChangeAudioSourceStateToLive() {
- if (dependency_factory_->last_audio_source() != NULL) {
- dependency_factory_->last_audio_source()->SetLive();
- }
+ void StartMockedVideoSource() {
+ MockMediaStreamVideoCapturerSource* video_source =
+ ms_impl_->last_created_video_source();
+ if (video_source->SourceHasAttemptedToStart())
+ video_source->StartMockedSource();
}
- void ChangeVideoSourceStateToEnded() {
- if (dependency_factory_->last_video_source() != NULL) {
- dependency_factory_->last_video_source()->SetEnded();
- }
+ void FailToStartMockedVideoSource() {
+ MockMediaStreamVideoCapturerSource* video_source =
+ ms_impl_->last_created_video_source();
+ if (video_source->SourceHasAttemptedToStart())
+ video_source->FailToStartMockedSource();
}
- void ChangeAudioSourceStateToEnded() {
- if (dependency_factory_->last_audio_source() != NULL) {
- dependency_factory_->last_audio_source()->SetEnded();
- }
+ void FailToCreateNextAudioCapturer() {
+ dependency_factory_->FailToCreateNextAudioCapturer();
}
protected:
+ base::MessageLoop message_loop_;
+ scoped_ptr<ChildProcess> child_process_;
scoped_ptr<MockMediaStreamDispatcher> ms_dispatcher_;
scoped_ptr<MediaStreamImplUnderTest> ms_impl_;
- scoped_ptr<MockMediaStreamDependencyFactory> dependency_factory_;
+ scoped_ptr<MockPeerConnectionDependencyFactory> dependency_factory_;
};
TEST_F(MediaStreamImplTest, GenerateMediaStream) {
@@ -199,51 +266,65 @@ TEST_F(MediaStreamImplTest, GenerateTwoMediaStreamsWithDifferentSources) {
desc2_audio_tracks[0].source().extraData());
}
-TEST_F(MediaStreamImplTest, StopLocalMediaStream) {
+TEST_F(MediaStreamImplTest, StopLocalTracks) {
// Generate a stream with both audio and video.
blink::WebMediaStream mixed_desc = RequestLocalMediaStream();
- // Stop generated local streams.
- ms_impl_->OnLocalMediaStreamStop(mixed_desc.id().utf8());
+ blink::WebVector<blink::WebMediaStreamTrack> audio_tracks;
+ mixed_desc.audioTracks(audio_tracks);
+ MediaStreamTrack* audio_track = MediaStreamTrack::GetTrack(audio_tracks[0]);
+ audio_track->Stop();
EXPECT_EQ(1, ms_dispatcher_->stop_audio_device_counter());
+
+ blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
+ mixed_desc.videoTracks(video_tracks);
+ MediaStreamTrack* video_track = MediaStreamTrack::GetTrack(video_tracks[0]);
+ video_track->Stop();
EXPECT_EQ(1, ms_dispatcher_->stop_video_device_counter());
}
-// This test that a source is not stopped even if the MediaStream is stopped if
-// there are two MediaStreams using the same device. The source is stopped
-// if there are no more MediaStreams using the device.
-TEST_F(MediaStreamImplTest, StopLocalMediaStreamWhenTwoStreamUseSameDevices) {
+// This test that a source is not stopped even if the tracks in a
+// MediaStream is stopped if there are two MediaStreams with tracks using the
+// same device. The source is stopped
+// if there are no more MediaStream tracks using the device.
+TEST_F(MediaStreamImplTest, StopLocalTracksWhenTwoStreamUseSameDevices) {
// Generate a stream with both audio and video.
blink::WebMediaStream desc1 = RequestLocalMediaStream();
blink::WebMediaStream desc2 = RequestLocalMediaStream();
- ms_impl_->OnLocalMediaStreamStop(desc2.id().utf8());
+ blink::WebVector<blink::WebMediaStreamTrack> audio_tracks1;
+ desc1.audioTracks(audio_tracks1);
+ MediaStreamTrack* audio_track1 = MediaStreamTrack::GetTrack(audio_tracks1[0]);
+ audio_track1->Stop();
EXPECT_EQ(0, ms_dispatcher_->stop_audio_device_counter());
- EXPECT_EQ(0, ms_dispatcher_->stop_video_device_counter());
- ms_impl_->OnLocalMediaStreamStop(desc1.id().utf8());
+ blink::WebVector<blink::WebMediaStreamTrack> audio_tracks2;
+ desc2.audioTracks(audio_tracks2);
+ MediaStreamTrack* audio_track2 = MediaStreamTrack::GetTrack(audio_tracks2[0]);
+ audio_track2->Stop();
EXPECT_EQ(1, ms_dispatcher_->stop_audio_device_counter());
+
+ blink::WebVector<blink::WebMediaStreamTrack> video_tracks1;
+ desc1.videoTracks(video_tracks1);
+ MediaStreamTrack* video_track1 = MediaStreamTrack::GetTrack(video_tracks1[0]);
+ video_track1->Stop();
+ EXPECT_EQ(0, ms_dispatcher_->stop_video_device_counter());
+
+ blink::WebVector<blink::WebMediaStreamTrack> video_tracks2;
+ desc2.videoTracks(video_tracks2);
+ MediaStreamTrack* video_track2 = MediaStreamTrack::GetTrack(video_tracks2[0]);
+ video_track2->Stop();
EXPECT_EQ(1, ms_dispatcher_->stop_video_device_counter());
}
-// Test that the source is stopped even if there are two MediaStreams using
-// the same source.
-TEST_F(MediaStreamImplTest, StopSource) {
+TEST_F(MediaStreamImplTest, StopSourceWhenMediaStreamGoesOutOfScope) {
// Generate a stream with both audio and video.
- blink::WebMediaStream desc1 = RequestLocalMediaStream();
- blink::WebMediaStream desc2 = RequestLocalMediaStream();
-
- // Stop the video source.
- blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
- desc1.videoTracks(video_tracks);
- ms_impl_->OnLocalSourceStop(video_tracks[0].source());
- EXPECT_EQ(0, ms_dispatcher_->stop_audio_device_counter());
- EXPECT_EQ(1, ms_dispatcher_->stop_video_device_counter());
+ RequestLocalMediaStream();
+ // Makes sure the test itself don't hold a reference to the created
+ // MediaStream.
+ ms_impl_->ClearLastGeneratedStream();
- // Stop the audio source.
- blink::WebVector<blink::WebMediaStreamTrack> audio_tracks;
- desc1.audioTracks(audio_tracks);
- ms_impl_->OnLocalSourceStop(audio_tracks[0].source());
+ // Expect the sources to be stopped when the MediaStream goes out of scope.
EXPECT_EQ(1, ms_dispatcher_->stop_audio_device_counter());
EXPECT_EQ(1, ms_dispatcher_->stop_video_device_counter());
}
@@ -253,6 +334,7 @@ TEST_F(MediaStreamImplTest, StopSource) {
TEST_F(MediaStreamImplTest, FrameWillClose) {
// Test a stream with both audio and video.
blink::WebMediaStream mixed_desc = RequestLocalMediaStream();
+ blink::WebMediaStream desc2 = RequestLocalMediaStream();
// Test that the MediaStreams are deleted if the owning WebFrame is deleted.
// In the unit test the owning frame is NULL.
@@ -261,30 +343,44 @@ TEST_F(MediaStreamImplTest, FrameWillClose) {
EXPECT_EQ(1, ms_dispatcher_->stop_video_device_counter());
}
-// This test what happens if a source to a MediaSteam fails to start.
-TEST_F(MediaStreamImplTest, MediaSourceFailToStart) {
+// This test what happens if a video source to a MediaSteam fails to start.
+TEST_F(MediaStreamImplTest, MediaVideoSourceFailToStart) {
+ ms_impl_->RequestUserMedia();
+ FakeMediaStreamDispatcherRequestUserMediaComplete();
+ FailToStartMockedVideoSource();
+ EXPECT_EQ(MediaStreamImplUnderTest::REQUEST_FAILED,
+ ms_impl_->request_state());
+ EXPECT_EQ(MEDIA_DEVICE_TRACK_START_FAILURE,
+ ms_impl_->error_reason());
+ EXPECT_EQ(1, ms_dispatcher_->request_stream_counter());
+ EXPECT_EQ(1, ms_dispatcher_->stop_audio_device_counter());
+ EXPECT_EQ(1, ms_dispatcher_->stop_video_device_counter());
+}
+
+// This test what happens if an audio source fail to initialize.
+TEST_F(MediaStreamImplTest, MediaAudioSourceFailToInitialize) {
+ FailToCreateNextAudioCapturer();
ms_impl_->RequestUserMedia();
- FakeMediaStreamDispatcherComplete();
- ChangeVideoSourceStateToEnded();
- ChangeAudioSourceStateToEnded();
+ FakeMediaStreamDispatcherRequestUserMediaComplete();
+ StartMockedVideoSource();
EXPECT_EQ(MediaStreamImplUnderTest::REQUEST_FAILED,
ms_impl_->request_state());
+ EXPECT_EQ(MEDIA_DEVICE_TRACK_START_FAILURE,
+ ms_impl_->error_reason());
EXPECT_EQ(1, ms_dispatcher_->request_stream_counter());
EXPECT_EQ(1, ms_dispatcher_->stop_audio_device_counter());
EXPECT_EQ(1, ms_dispatcher_->stop_video_device_counter());
}
-// This test what happens if MediaStreamImpl is deleted while the sources of a
-// MediaStream is being started.
+// This test what happens if MediaStreamImpl is deleted before a source has
+// started.
TEST_F(MediaStreamImplTest, MediaStreamImplShutDown) {
ms_impl_->RequestUserMedia();
- FakeMediaStreamDispatcherComplete();
+ FakeMediaStreamDispatcherRequestUserMediaComplete();
EXPECT_EQ(1, ms_dispatcher_->request_stream_counter());
EXPECT_EQ(MediaStreamImplUnderTest::REQUEST_NOT_COMPLETE,
ms_impl_->request_state());
ms_impl_.reset();
- ChangeAudioSourceStateToLive();
- ChangeVideoSourceStateToLive();
}
// This test what happens if the WebFrame is closed while the MediaStream is
@@ -295,38 +391,85 @@ TEST_F(MediaStreamImplTest, ReloadFrameWhileGeneratingStream) {
EXPECT_EQ(1, ms_dispatcher_->request_stream_counter());
EXPECT_EQ(0, ms_dispatcher_->stop_audio_device_counter());
EXPECT_EQ(0, ms_dispatcher_->stop_video_device_counter());
- ChangeAudioSourceStateToLive();
- ChangeVideoSourceStateToLive();
EXPECT_EQ(MediaStreamImplUnderTest::REQUEST_NOT_COMPLETE,
ms_impl_->request_state());
}
// This test what happens if the WebFrame is closed while the sources are being
-// started by MediaStreamDependencyFactory.
+// started.
TEST_F(MediaStreamImplTest, ReloadFrameWhileGeneratingSources) {
ms_impl_->RequestUserMedia();
- FakeMediaStreamDispatcherComplete();
+ FakeMediaStreamDispatcherRequestUserMediaComplete();
EXPECT_EQ(1, ms_dispatcher_->request_stream_counter());
ms_impl_->FrameWillClose(NULL);
EXPECT_EQ(1, ms_dispatcher_->stop_audio_device_counter());
EXPECT_EQ(1, ms_dispatcher_->stop_video_device_counter());
- ChangeAudioSourceStateToLive();
- ChangeVideoSourceStateToLive();
EXPECT_EQ(MediaStreamImplUnderTest::REQUEST_NOT_COMPLETE,
ms_impl_->request_state());
}
-// This test what happens if stop is called on a stream after the frame has
+// This test what happens if stop is called on a track after the frame has
// been reloaded.
-TEST_F(MediaStreamImplTest, StopStreamAfterReload) {
+TEST_F(MediaStreamImplTest, StopTrackAfterReload) {
blink::WebMediaStream mixed_desc = RequestLocalMediaStream();
EXPECT_EQ(1, ms_dispatcher_->request_stream_counter());
ms_impl_->FrameWillClose(NULL);
EXPECT_EQ(1, ms_dispatcher_->stop_audio_device_counter());
EXPECT_EQ(1, ms_dispatcher_->stop_video_device_counter());
- ms_impl_->OnLocalMediaStreamStop(mixed_desc.id().utf8());
+
+ blink::WebVector<blink::WebMediaStreamTrack> audio_tracks;
+ mixed_desc.audioTracks(audio_tracks);
+ MediaStreamTrack* audio_track = MediaStreamTrack::GetTrack(audio_tracks[0]);
+ audio_track->Stop();
EXPECT_EQ(1, ms_dispatcher_->stop_audio_device_counter());
+
+ blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
+ mixed_desc.videoTracks(video_tracks);
+ MediaStreamTrack* video_track = MediaStreamTrack::GetTrack(video_tracks[0]);
+ video_track->Stop();
EXPECT_EQ(1, ms_dispatcher_->stop_video_device_counter());
}
+TEST_F(MediaStreamImplTest, EnumerateMediaDevices) {
+ ms_impl_->RequestMediaDevices();
+ FakeMediaStreamDispatcherRequestMediaDevicesComplete();
+
+ EXPECT_EQ(MediaStreamImplUnderTest::REQUEST_SUCCEEDED,
+ ms_impl_->request_state());
+
+ // Audio input device with matched output ID.
+ EXPECT_FALSE(ms_impl_->last_devices()[0].deviceId().isEmpty());
+ EXPECT_EQ(blink::WebMediaDeviceInfo::MediaDeviceKindAudioInput,
+ ms_impl_->last_devices()[0].kind());
+ EXPECT_FALSE(ms_impl_->last_devices()[0].label().isEmpty());
+ EXPECT_FALSE(ms_impl_->last_devices()[0].groupId().isEmpty());
+
+ // Audio input device without matched output ID.
+ EXPECT_FALSE(ms_impl_->last_devices()[1].deviceId().isEmpty());
+ EXPECT_EQ(blink::WebMediaDeviceInfo::MediaDeviceKindAudioInput,
+ ms_impl_->last_devices()[1].kind());
+ EXPECT_FALSE(ms_impl_->last_devices()[1].label().isEmpty());
+ EXPECT_FALSE(ms_impl_->last_devices()[1].groupId().isEmpty());
+
+ // Video input device.
+ EXPECT_FALSE(ms_impl_->last_devices()[2].deviceId().isEmpty());
+ EXPECT_EQ(blink::WebMediaDeviceInfo::MediaDeviceKindVideoInput,
+ ms_impl_->last_devices()[2].kind());
+ EXPECT_FALSE(ms_impl_->last_devices()[2].label().isEmpty());
+ EXPECT_TRUE(ms_impl_->last_devices()[2].groupId().isEmpty());
+
+ // Audio output device.
+ EXPECT_FALSE(ms_impl_->last_devices()[3].deviceId().isEmpty());
+ EXPECT_EQ(blink::WebMediaDeviceInfo::MediaDeviceKindAudioOutput,
+ ms_impl_->last_devices()[3].kind());
+ EXPECT_FALSE(ms_impl_->last_devices()[3].label().isEmpty());
+ EXPECT_FALSE(ms_impl_->last_devices()[3].groupId().isEmpty());
+
+ // Verfify group IDs.
+ EXPECT_TRUE(ms_impl_->last_devices()[0].groupId().equals(
+ ms_impl_->last_devices()[3].groupId()));
+ EXPECT_FALSE(ms_impl_->last_devices()[1].groupId().equals(
+ ms_impl_->last_devices()[3].groupId()));
+}
+
} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_renderer_factory.cc b/chromium/content/renderer/media/media_stream_renderer_factory.cc
new file mode 100644
index 00000000000..628364465aa
--- /dev/null
+++ b/chromium/content/renderer/media/media_stream_renderer_factory.cc
@@ -0,0 +1,197 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/media_stream_renderer_factory.h"
+
+#include "base/strings/utf_string_conversions.h"
+#include "content/renderer/media/media_stream.h"
+#include "content/renderer/media/media_stream_video_track.h"
+#include "content/renderer/media/rtc_video_renderer.h"
+#include "content/renderer/media/webrtc/peer_connection_dependency_factory.h"
+#include "content/renderer/media/webrtc_audio_renderer.h"
+#include "content/renderer/media/webrtc_local_audio_renderer.h"
+#include "content/renderer/render_thread_impl.h"
+#include "media/base/audio_hardware_config.h"
+#include "third_party/WebKit/public/platform/WebMediaStream.h"
+#include "third_party/WebKit/public/platform/WebURL.h"
+#include "third_party/WebKit/public/web/WebMediaStreamRegistry.h"
+#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
+
+namespace content {
+
+namespace {
+
+PeerConnectionDependencyFactory* GetPeerConnectionDependencyFactory() {
+ return RenderThreadImpl::current()->GetPeerConnectionDependencyFactory();
+}
+
+void GetDefaultOutputDeviceParams(
+ int* output_sample_rate, int* output_buffer_size) {
+ // Fetch the default audio output hardware config.
+ media::AudioHardwareConfig* hardware_config =
+ RenderThreadImpl::current()->GetAudioHardwareConfig();
+ *output_sample_rate = hardware_config->GetOutputSampleRate();
+ *output_buffer_size = hardware_config->GetOutputBufferSize();
+}
+
+
+// Returns a valid session id if a single capture device is currently open
+// (and then the matching session_id), otherwise -1.
+// This is used to pass on a session id to a webrtc audio renderer (either
+// local or remote), so that audio will be rendered to a matching output
+// device, should one exist.
+// Note that if there are more than one open capture devices the function
+// will not be able to pick an appropriate device and return false.
+bool GetAuthorizedDeviceInfoForAudioRenderer(
+ int* session_id,
+ int* output_sample_rate,
+ int* output_frames_per_buffer) {
+ WebRtcAudioDeviceImpl* audio_device =
+ GetPeerConnectionDependencyFactory()->GetWebRtcAudioDevice();
+ if (!audio_device)
+ return false;
+
+ return audio_device->GetAuthorizedDeviceInfoForAudioRenderer(
+ session_id, output_sample_rate, output_frames_per_buffer);
+}
+
+scoped_refptr<WebRtcAudioRenderer> CreateRemoteAudioRenderer(
+ webrtc::MediaStreamInterface* stream,
+ int routing_id,
+ int render_frame_id) {
+ if (stream->GetAudioTracks().empty())
+ return NULL;
+
+ DVLOG(1) << "MediaStreamRendererFactory::CreateRemoteAudioRenderer label:"
+ << stream->label();
+
+ // TODO(tommi): Change the default value of session_id to be
+ // StreamDeviceInfo::kNoId. Also update AudioOutputDevice etc.
+ int session_id = 0, sample_rate = 0, buffer_size = 0;
+ if (!GetAuthorizedDeviceInfoForAudioRenderer(&session_id,
+ &sample_rate,
+ &buffer_size)) {
+ GetDefaultOutputDeviceParams(&sample_rate, &buffer_size);
+ }
+
+ return new WebRtcAudioRenderer(
+ stream, routing_id, render_frame_id, session_id,
+ sample_rate, buffer_size);
+}
+
+
+scoped_refptr<WebRtcLocalAudioRenderer> CreateLocalAudioRenderer(
+ const blink::WebMediaStreamTrack& audio_track,
+ int routing_id,
+ int render_frame_id) {
+ DVLOG(1) << "MediaStreamRendererFactory::CreateLocalAudioRenderer";
+
+ int session_id = 0, sample_rate = 0, buffer_size = 0;
+ if (!GetAuthorizedDeviceInfoForAudioRenderer(&session_id,
+ &sample_rate,
+ &buffer_size)) {
+ GetDefaultOutputDeviceParams(&sample_rate, &buffer_size);
+ }
+
+ // Create a new WebRtcLocalAudioRenderer instance and connect it to the
+ // existing WebRtcAudioCapturer so that the renderer can use it as source.
+ return new WebRtcLocalAudioRenderer(
+ audio_track,
+ routing_id,
+ render_frame_id,
+ session_id,
+ buffer_size);
+}
+
+} // namespace
+
+
+MediaStreamRendererFactory::MediaStreamRendererFactory() {
+}
+
+MediaStreamRendererFactory::~MediaStreamRendererFactory() {
+}
+
+scoped_refptr<VideoFrameProvider>
+MediaStreamRendererFactory::GetVideoFrameProvider(
+ const GURL& url,
+ const base::Closure& error_cb,
+ const VideoFrameProvider::RepaintCB& repaint_cb) {
+ blink::WebMediaStream web_stream =
+ blink::WebMediaStreamRegistry::lookupMediaStreamDescriptor(url);
+ DCHECK(!web_stream.isNull());
+
+ DVLOG(1) << "MediaStreamRendererFactory::GetVideoFrameProvider stream:"
+ << base::UTF16ToUTF8(web_stream.id());
+
+ blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
+ web_stream.videoTracks(video_tracks);
+ if (video_tracks.isEmpty() ||
+ !MediaStreamVideoTrack::GetTrack(video_tracks[0])) {
+ return NULL;
+ }
+
+ return new RTCVideoRenderer(video_tracks[0], error_cb, repaint_cb);
+}
+
+scoped_refptr<MediaStreamAudioRenderer>
+MediaStreamRendererFactory::GetAudioRenderer(
+ const GURL& url, int render_view_id, int render_frame_id) {
+ blink::WebMediaStream web_stream =
+ blink::WebMediaStreamRegistry::lookupMediaStreamDescriptor(url);
+
+ if (web_stream.isNull() || !web_stream.extraData())
+ return NULL; // This is not a valid stream.
+
+ DVLOG(1) << "MediaStreamRendererFactory::GetAudioRenderer stream:"
+ << base::UTF16ToUTF8(web_stream.id());
+
+ MediaStream* native_stream = MediaStream::GetMediaStream(web_stream);
+
+ // TODO(tommi): MediaStreams do not have a 'local or not' concept.
+ // Tracks _might_, but even so, we need to fix the data flow so that
+ // it works the same way for all track implementations, local, remote or what
+ // have you.
+ // In this function, we should simply create a renderer object that receives
+ // and mixes audio from all the tracks that belong to the media stream.
+ // We need to remove the |is_local| property from MediaStreamExtraData since
+ // this concept is peerconnection specific (is a previously recorded stream
+ // local or remote?).
+ if (native_stream->is_local()) {
+ // Create the local audio renderer if the stream contains audio tracks.
+ blink::WebVector<blink::WebMediaStreamTrack> audio_tracks;
+ web_stream.audioTracks(audio_tracks);
+ if (audio_tracks.isEmpty())
+ return NULL;
+
+ // TODO(xians): Add support for the case where the media stream contains
+ // multiple audio tracks.
+ return CreateLocalAudioRenderer(audio_tracks[0], render_view_id,
+ render_frame_id);
+ }
+
+ webrtc::MediaStreamInterface* stream =
+ MediaStream::GetAdapter(web_stream);
+ if (stream->GetAudioTracks().empty())
+ return NULL;
+
+ // This is a remote WebRTC media stream.
+ WebRtcAudioDeviceImpl* audio_device =
+ GetPeerConnectionDependencyFactory()->GetWebRtcAudioDevice();
+
+ // Share the existing renderer if any, otherwise create a new one.
+ scoped_refptr<WebRtcAudioRenderer> renderer(audio_device->renderer());
+ if (!renderer.get()) {
+ renderer = CreateRemoteAudioRenderer(stream, render_view_id,
+ render_frame_id);
+
+ if (renderer.get() && !audio_device->SetAudioRenderer(renderer.get()))
+ renderer = NULL;
+ }
+
+ return renderer.get() ?
+ renderer->CreateSharedAudioRendererProxy(stream) : NULL;
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_renderer_factory.h b/chromium/content/renderer/media/media_stream_renderer_factory.h
new file mode 100644
index 00000000000..eb07e1cae4f
--- /dev/null
+++ b/chromium/content/renderer/media/media_stream_renderer_factory.h
@@ -0,0 +1,42 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_MEDIA_STREAM_RENDERER_FACTORY_H_
+#define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_RENDERER_FACTORY_H_
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "content/common/content_export.h"
+#include "content/renderer/media/media_stream_audio_renderer.h"
+#include "content/renderer/media/video_frame_provider.h"
+#include "url/gurl.h"
+
+namespace content {
+
+// MediaStreamRendererFactory is used by WebMediaPlayerMS to create audio and
+// video feeds from a MediaStream provided an URL.
+// The factory methods are virtual in order for blink layouttests to be able to
+// override them.
+class CONTENT_EXPORT MediaStreamRendererFactory {
+ public:
+ MediaStreamRendererFactory();
+ virtual ~MediaStreamRendererFactory();
+
+ virtual scoped_refptr<VideoFrameProvider> GetVideoFrameProvider(
+ const GURL& url,
+ const base::Closure& error_cb,
+ const VideoFrameProvider::RepaintCB& repaint_cb);
+
+ virtual scoped_refptr<MediaStreamAudioRenderer> GetAudioRenderer(
+ const GURL& url,
+ int render_view_id,
+ int render_frame_id);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MediaStreamRendererFactory);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_RENDERER_FACTORY_H_
diff --git a/chromium/content/renderer/media/media_stream_source.cc b/chromium/content/renderer/media/media_stream_source.cc
new file mode 100644
index 00000000000..8db26024403
--- /dev/null
+++ b/chromium/content/renderer/media/media_stream_source.cc
@@ -0,0 +1,26 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/media_stream_source.h"
+
+#include "base/callback_helpers.h"
+
+namespace content {
+
+const char MediaStreamSource::kSourceId[] = "sourceId";
+
+MediaStreamSource::MediaStreamSource() {
+}
+
+MediaStreamSource::~MediaStreamSource() {}
+
+void MediaStreamSource::StopSource() {
+ DoStopSource();
+ if (!stop_callback_.is_null())
+ base::ResetAndReturn(&stop_callback_).Run(owner());
+
+ owner().setReadyState(blink::WebMediaStreamSource::ReadyStateEnded);
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_source.h b/chromium/content/renderer/media/media_stream_source.h
new file mode 100644
index 00000000000..18821c38cf5
--- /dev/null
+++ b/chromium/content/renderer/media/media_stream_source.h
@@ -0,0 +1,79 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_MEDIA_STREAM_SOURCE_H_
+#define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_SOURCE_H_
+
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "content/common/content_export.h"
+#include "content/common/media/media_stream_options.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
+
+namespace blink {
+class WebMediaStreamTrack;
+} // namespace blink
+
+namespace content {
+
+class CONTENT_EXPORT MediaStreamSource
+ : NON_EXPORTED_BASE(public blink::WebMediaStreamSource::ExtraData) {
+ public:
+ typedef base::Callback<void(const blink::WebMediaStreamSource& source)>
+ SourceStoppedCallback;
+
+ typedef base::Callback<void(MediaStreamSource* source,
+ bool success)> ConstraintsCallback;
+
+ // Source constraints key for
+ // http://dev.w3.org/2011/webrtc/editor/getusermedia.html.
+ static const char kSourceId[];
+
+ MediaStreamSource();
+ virtual ~MediaStreamSource();
+
+ // Returns device information about a source that has been created by a
+ // JavaScript call to GetUserMedia, e.g., a camera or microphone.
+ const StreamDeviceInfo& device_info() const {
+ return device_info_;
+ }
+
+ // Stops the source (by calling DoStopSource()). This sets the
+ // WebMediaStreamSource::readyState to ended, triggers the |stop_callback_|
+ // if set. All pointers to this object are invalid after calling this.
+ void StopSource();
+
+ void ResetSourceStoppedCallback() {
+ DCHECK(!stop_callback_.is_null());
+ stop_callback_.Reset();
+ }
+
+ protected:
+ // Called when StopSource is called. It allows derived classes to implement
+ // its own Stop method.
+ virtual void DoStopSource() = 0;
+
+ // Sets device information about a source that has been created by a
+ // JavaScript call to GetUserMedia. F.E a camera or microphone.
+ void SetDeviceInfo(const StreamDeviceInfo& device_info) {
+ device_info_ = device_info;
+ }
+
+ // Sets a callback that will be triggered when StopSource is called.
+ void SetStopCallback(const SourceStoppedCallback& stop_callback) {
+ DCHECK(stop_callback_.is_null());
+ stop_callback_ = stop_callback;
+ }
+
+ private:
+ StreamDeviceInfo device_info_;
+ SourceStoppedCallback stop_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaStreamSource);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_SOURCE_H_
diff --git a/chromium/content/renderer/media/media_stream_source_extra_data.h b/chromium/content/renderer/media/media_stream_source_extra_data.h
deleted file mode 100644
index 4b88c147f8b..00000000000
--- a/chromium/content/renderer/media/media_stream_source_extra_data.h
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_MEDIA_STREAM_SOURCE_EXTRA_DATA_H_
-#define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_SOURCE_EXTRA_DATA_H_
-
-#include "base/callback.h"
-#include "base/compiler_specific.h"
-#include "content/common/content_export.h"
-#include "content/common/media/media_stream_options.h"
-#include "content/renderer/media/media_stream_source_observer.h"
-#include "content/renderer/media/webrtc_audio_capturer.h"
-#include "third_party/libjingle/source/talk/app/webrtc/videosourceinterface.h"
-#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
-
-namespace content {
-
-class CONTENT_EXPORT MediaStreamSourceExtraData
- : NON_EXPORTED_BASE(public blink::WebMediaStreamSource::ExtraData) {
- public:
- typedef base::Callback<void(const blink::WebMediaStreamSource& source)>
- SourceStopCallback;
-
- MediaStreamSourceExtraData(const StreamDeviceInfo& device_info,
- const SourceStopCallback& stop_callback);
- MediaStreamSourceExtraData();
- virtual ~MediaStreamSourceExtraData();
-
- // Return device information about the camera or microphone.
- const StreamDeviceInfo& device_info() const {
- return device_info_;
- }
-
- void SetVideoSource(webrtc::VideoSourceInterface* source) {
- video_source_ = source;
- source_observer_.reset(new MediaStreamSourceObserver(source, this));
- }
-
- void SetLocalAudioSource(webrtc::AudioSourceInterface* source) {
- local_audio_source_ = source;
- // TODO(perkj): Implement a local source observer for audio.
- // See |source_observer_|.
- }
-
- void SetAudioCapturer(WebRtcAudioCapturer* capturer) {
- DCHECK(!audio_capturer_);
- audio_capturer_ = capturer;
- }
-
- WebRtcAudioCapturer* GetAudioCapturer() const {
- // TODO(perkj): |audio_capturer_| can currently be reconfigured to use
- // another microphone even after it has been created since only one
- // capturer is supported. See issue crbug/262117.
- // It would make more sense if a WebRtcAudioCapturer represent one and only
- // one audio source.
- if (audio_capturer_ &&
- device_info_.session_id == audio_capturer_->session_id()) {
- return audio_capturer_;
- }
- return NULL;
- }
-
- webrtc::VideoSourceInterface* video_source() { return video_source_.get(); }
- webrtc::AudioSourceInterface* local_audio_source() {
- return local_audio_source_.get();
- }
-
- void OnLocalSourceStop();
-
- private:
- StreamDeviceInfo device_info_;
-
- scoped_refptr<webrtc::VideoSourceInterface> video_source_;
-
- // This member holds an instance of webrtc::LocalAudioSource. This is used
- // as a container for audio options.
- // TODO(hclam): This should be merged with |audio_source_| such that it
- // carries audio options.
- scoped_refptr<webrtc::AudioSourceInterface> local_audio_source_;
- scoped_ptr<MediaStreamSourceObserver> source_observer_;
-
- scoped_refptr<WebRtcAudioCapturer> audio_capturer_;
-
- SourceStopCallback stop_callback_;
-
- DISALLOW_COPY_AND_ASSIGN(MediaStreamSourceExtraData);
-};
-
-} // namespace content
-
-#endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_SOURCE_EXTRA_DATA_H_
diff --git a/chromium/content/renderer/media/media_stream_source_observer.cc b/chromium/content/renderer/media/media_stream_source_observer.cc
deleted file mode 100644
index 319327fa72d..00000000000
--- a/chromium/content/renderer/media/media_stream_source_observer.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/renderer/media/media_stream_source_observer.h"
-
-#include "base/logging.h"
-#include "content/renderer/media/media_stream_source_extra_data.h"
-
-namespace content {
-
-MediaStreamSourceObserver::MediaStreamSourceObserver(
- webrtc::MediaSourceInterface* webrtc_source,
- MediaStreamSourceExtraData* extra_data)
- : state_(webrtc_source->state()),
- webrtc_source_(webrtc_source),
- extra_data_(extra_data) {
- webrtc_source_->RegisterObserver(this);
-}
-
-MediaStreamSourceObserver::~MediaStreamSourceObserver() {
- DCHECK(CalledOnValidThread());
- if (webrtc_source_.get())
- webrtc_source_->UnregisterObserver(this);
-}
-
-void MediaStreamSourceObserver::OnChanged() {
- DCHECK(CalledOnValidThread());
- // There should be no more notification after kEnded.
- DCHECK(webrtc_source_.get() != NULL);
-
- webrtc::MediaSourceInterface::SourceState state = webrtc_source_->state();
- if (state == state_)
- return;
- state_ = state;
- blink::WebMediaStreamSource webkit_source(extra_data_->owner());
-
- switch (state) {
- case webrtc::MediaSourceInterface::kInitializing:
- // Ignore the kInitializing state since there is no match in
- // WebMediaStreamSource::ReadyState.
- break;
- case webrtc::MediaSourceInterface::kLive:
- webkit_source.setReadyState(
- blink::WebMediaStreamSource::ReadyStateLive);
- break;
- case webrtc::MediaSourceInterface::kMuted:
- webkit_source.setReadyState(
- blink::WebMediaStreamSource::ReadyStateMuted);
- break;
- case webrtc::MediaSourceInterface::kEnded:
- webkit_source.setReadyState(
- blink::WebMediaStreamSource::ReadyStateEnded);
- webrtc_source_->UnregisterObserver(this);
- webrtc_source_ = NULL;
- break;
- default:
- NOTREACHED();
- break;
- }
-}
-
-} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_source_observer.h b/chromium/content/renderer/media/media_stream_source_observer.h
deleted file mode 100644
index f7e0b60c421..00000000000
--- a/chromium/content/renderer/media/media_stream_source_observer.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_MEDIA_STREAM_SOURCE_OBSERVER_H_
-#define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_SOURCE_OBSERVER_H_
-
-#include "base/compiler_specific.h"
-#include "base/memory/ref_counted.h"
-#include "base/threading/non_thread_safe.h"
-#include "content/common/content_export.h"
-#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
-#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
-
-namespace content {
-
-class MediaStreamSourceExtraData;
-
-// MediaStreamSourceObserver listens to events on MediaSourceInterface and
-// notify WebKit. It will be owned by MediaStreamSourceExtraData.
-class CONTENT_EXPORT MediaStreamSourceObserver
- : NON_EXPORTED_BASE(public webrtc::ObserverInterface),
- NON_EXPORTED_BASE(public base::NonThreadSafe) {
- public:
- MediaStreamSourceObserver(webrtc::MediaSourceInterface* webrtc_source,
- MediaStreamSourceExtraData* extra_data);
- virtual ~MediaStreamSourceObserver();
-
- private:
- // webrtc::ObserverInterface implementation.
- virtual void OnChanged() OVERRIDE;
-
- webrtc::MediaSourceInterface::SourceState state_;
- scoped_refptr<webrtc::MediaSourceInterface> webrtc_source_;
- MediaStreamSourceExtraData* extra_data_;
-
- DISALLOW_COPY_AND_ASSIGN(MediaStreamSourceObserver);
-};
-
-} // namespace content
-
-#endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_SOURCE_OBSERVER_H_
diff --git a/chromium/content/renderer/media/media_stream_track.cc b/chromium/content/renderer/media/media_stream_track.cc
new file mode 100644
index 00000000000..529bc384e89
--- /dev/null
+++ b/chromium/content/renderer/media/media_stream_track.cc
@@ -0,0 +1,47 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/media_stream_track.h"
+
+#include "base/logging.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
+#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
+
+namespace content {
+
+MediaStreamTrack* MediaStreamTrack::GetTrack(
+ const blink::WebMediaStreamTrack& track) {
+ if (track.isNull())
+ return NULL;
+ return static_cast<MediaStreamTrack*>(track.extraData());
+}
+
+MediaStreamTrack::MediaStreamTrack(
+ webrtc::MediaStreamTrackInterface* track, bool is_local_track)
+ : track_(track),
+ is_local_track_(is_local_track) {
+}
+
+MediaStreamTrack::~MediaStreamTrack() {
+}
+
+void MediaStreamTrack::SetEnabled(bool enabled) {
+ if (track_)
+ track_->set_enabled(enabled);
+}
+
+void MediaStreamTrack::Stop() {
+ // Stop means that a track should be stopped permanently. But
+ // since there is no proper way of doing that on a remote track, we can
+ // at least disable the track. Blink will not call down to the content layer
+ // after a track has been stopped.
+ if (track_)
+ track_->set_enabled(false);
+}
+
+webrtc::AudioTrackInterface* MediaStreamTrack::GetAudioAdapter() {
+ return static_cast<webrtc::AudioTrackInterface*>(track_.get());
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_track_extra_data.h b/chromium/content/renderer/media/media_stream_track.h
index dbc25b46dec..456005d4d05 100644
--- a/chromium/content/renderer/media/media_stream_track_extra_data.h
+++ b/chromium/content/renderer/media/media_stream_track.h
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -12,28 +12,43 @@
#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
namespace webrtc {
+class AudioTrackInterface;
class MediaStreamTrackInterface;
} // namespace webrtc
namespace content {
-class CONTENT_EXPORT MediaStreamTrackExtraData
+// MediaStreamTrack is a Chrome representation of blink::WebMediaStreamTrack.
+// It is owned by blink::WebMediaStreamTrack as
+// blink::WebMediaStreamTrack::ExtraData.
+class CONTENT_EXPORT MediaStreamTrack
: NON_EXPORTED_BASE(public blink::WebMediaStreamTrack::ExtraData) {
public:
- MediaStreamTrackExtraData(webrtc::MediaStreamTrackInterface* track,
- bool is_local_track);
- virtual ~MediaStreamTrackExtraData();
+ MediaStreamTrack(webrtc::MediaStreamTrackInterface* track,
+ bool is_local_track);
+ virtual ~MediaStreamTrack();
+
+ static MediaStreamTrack* GetTrack(
+ const blink::WebMediaStreamTrack& track);
+
+ // If a subclass overrides this method it has to call the base class.
+ virtual void SetEnabled(bool enabled);
+
+ // TODO(xians): Make this pure virtual when Stop[Track] has been
+ // implemented for remote audio tracks.
+ virtual void Stop();
+
+ virtual webrtc::AudioTrackInterface* GetAudioAdapter();
- const scoped_refptr<webrtc::MediaStreamTrackInterface>& track() const {
- return track_;
- }
bool is_local_track () const { return is_local_track_; }
- private:
+ protected:
scoped_refptr<webrtc::MediaStreamTrackInterface> track_;
+
+ private:
const bool is_local_track_;
- DISALLOW_COPY_AND_ASSIGN(MediaStreamTrackExtraData);
+ DISALLOW_COPY_AND_ASSIGN(MediaStreamTrack);
};
} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_track_extra_data.cc b/chromium/content/renderer/media/media_stream_track_extra_data.cc
deleted file mode 100644
index 0fbb29496b3..00000000000
--- a/chromium/content/renderer/media/media_stream_track_extra_data.cc
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/renderer/media/media_stream_track_extra_data.h"
-
-#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
-
-namespace content {
-
-MediaStreamTrackExtraData::MediaStreamTrackExtraData(
- webrtc::MediaStreamTrackInterface* track, bool is_local_track)
- : track_(track),
- is_local_track_(is_local_track) {
-}
-
-MediaStreamTrackExtraData::~MediaStreamTrackExtraData() {
-}
-
-} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_video_capture_source_unittest.cc b/chromium/content/renderer/media/media_stream_video_capture_source_unittest.cc
new file mode 100644
index 00000000000..fca4562a93d
--- /dev/null
+++ b/chromium/content/renderer/media/media_stream_video_capture_source_unittest.cc
@@ -0,0 +1,200 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/strings/utf_string_conversions.h"
+#include "content/child/child_process.h"
+#include "content/public/renderer/media_stream_video_sink.h"
+#include "content/renderer/media/media_stream_video_capturer_source.h"
+#include "content/renderer/media/media_stream_video_track.h"
+#include "content/renderer/media/mock_media_constraint_factory.h"
+#include "media/base/bind_to_current_loop.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace content {
+
+class MockVideoCapturerDelegate : public VideoCapturerDelegate {
+ public:
+ explicit MockVideoCapturerDelegate(const StreamDeviceInfo& device_info)
+ : VideoCapturerDelegate(device_info) {}
+
+ MOCK_METHOD3(StartCapture,
+ void(const media::VideoCaptureParams& params,
+ const VideoCaptureDeliverFrameCB& new_frame_callback,
+ const RunningCallback& running_callback));
+ MOCK_METHOD0(StopCapture, void());
+
+ private:
+ virtual ~MockVideoCapturerDelegate() {}
+};
+
+class MediaStreamVideoCapturerSourceTest : public testing::Test {
+ public:
+ MediaStreamVideoCapturerSourceTest()
+ : child_process_(new ChildProcess()),
+ source_(NULL) {
+ }
+
+ void InitWithDeviceInfo(const StreamDeviceInfo& device_info) {
+ delegate_ = new MockVideoCapturerDelegate(device_info);
+ source_ = new MediaStreamVideoCapturerSource(
+ device_info,
+ MediaStreamSource::SourceStoppedCallback(),
+ delegate_);
+
+ webkit_source_.initialize(base::UTF8ToUTF16("dummy_source_id"),
+ blink::WebMediaStreamSource::TypeVideo,
+ base::UTF8ToUTF16("dummy_source_name"));
+ webkit_source_.setExtraData(source_);
+ }
+
+ blink::WebMediaStreamTrack StartSource() {
+ MockMediaConstraintFactory factory;
+ bool enabled = true;
+ // CreateVideoTrack will trigger OnConstraintsApplied.
+ return MediaStreamVideoTrack::CreateVideoTrack(
+ source_, factory.CreateWebMediaConstraints(),
+ base::Bind(
+ &MediaStreamVideoCapturerSourceTest::OnConstraintsApplied,
+ base::Unretained(this)),
+ enabled);
+ }
+
+ MockVideoCapturerDelegate& mock_delegate() {
+ return *static_cast<MockVideoCapturerDelegate*>(delegate_.get());
+ }
+
+ protected:
+ void OnConstraintsApplied(MediaStreamSource* source, bool success) {
+ }
+
+ base::MessageLoopForUI message_loop_;
+ scoped_ptr<ChildProcess> child_process_;
+ blink::WebMediaStreamSource webkit_source_;
+ MediaStreamVideoCapturerSource* source_; // owned by webkit_source.
+ scoped_refptr<VideoCapturerDelegate> delegate_;
+};
+
+TEST_F(MediaStreamVideoCapturerSourceTest, TabCaptureAllowResolutionChange) {
+ StreamDeviceInfo device_info;
+ device_info.device.type = MEDIA_TAB_VIDEO_CAPTURE;
+ InitWithDeviceInfo(device_info);
+
+ EXPECT_CALL(mock_delegate(), StartCapture(
+ testing::Field(&media::VideoCaptureParams::allow_resolution_change, true),
+ testing::_,
+ testing::_)).Times(1);
+ blink::WebMediaStreamTrack track = StartSource();
+ // When the track goes out of scope, the source will be stopped.
+ EXPECT_CALL(mock_delegate(), StopCapture());
+}
+
+TEST_F(MediaStreamVideoCapturerSourceTest,
+ DesktopCaptureAllowResolutionChange) {
+ StreamDeviceInfo device_info;
+ device_info.device.type = MEDIA_DESKTOP_VIDEO_CAPTURE;
+ InitWithDeviceInfo(device_info);
+
+ EXPECT_CALL(mock_delegate(), StartCapture(
+ testing::Field(&media::VideoCaptureParams::allow_resolution_change, true),
+ testing::_,
+ testing::_)).Times(1);
+ blink::WebMediaStreamTrack track = StartSource();
+ // When the track goes out of scope, the source will be stopped.
+ EXPECT_CALL(mock_delegate(), StopCapture());
+}
+
+TEST_F(MediaStreamVideoCapturerSourceTest, Ended) {
+ StreamDeviceInfo device_info;
+ device_info.device.type = MEDIA_DESKTOP_VIDEO_CAPTURE;
+ delegate_ = new VideoCapturerDelegate(device_info);
+ source_ = new MediaStreamVideoCapturerSource(
+ device_info,
+ MediaStreamSource::SourceStoppedCallback(),
+ delegate_);
+ webkit_source_.initialize(base::UTF8ToUTF16("dummy_source_id"),
+ blink::WebMediaStreamSource::TypeVideo,
+ base::UTF8ToUTF16("dummy_source_name"));
+ webkit_source_.setExtraData(source_);
+ blink::WebMediaStreamTrack track = StartSource();
+ message_loop_.RunUntilIdle();
+
+ delegate_->OnStateUpdateOnRenderThread(VIDEO_CAPTURE_STATE_STARTED);
+ message_loop_.RunUntilIdle();
+ EXPECT_EQ(blink::WebMediaStreamSource::ReadyStateLive,
+ webkit_source_.readyState());
+
+ delegate_->OnStateUpdateOnRenderThread(VIDEO_CAPTURE_STATE_ERROR);
+ message_loop_.RunUntilIdle();
+ EXPECT_EQ(blink::WebMediaStreamSource::ReadyStateEnded,
+ webkit_source_.readyState());
+}
+
+class FakeMediaStreamVideoSink : public MediaStreamVideoSink {
+ public:
+ FakeMediaStreamVideoSink(base::TimeTicks* capture_time,
+ base::Closure got_frame_cb)
+ : capture_time_(capture_time),
+ got_frame_cb_(got_frame_cb) {
+ }
+
+ void OnVideoFrame(const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format,
+ const base::TimeTicks& capture_time) {
+ *capture_time_ = capture_time;
+ base::ResetAndReturn(&got_frame_cb_).Run();
+ }
+
+ private:
+ base::TimeTicks* capture_time_;
+ base::Closure got_frame_cb_;
+};
+
+TEST_F(MediaStreamVideoCapturerSourceTest, CaptureTime) {
+ StreamDeviceInfo device_info;
+ device_info.device.type = MEDIA_DESKTOP_VIDEO_CAPTURE;
+ InitWithDeviceInfo(device_info);
+
+ VideoCaptureDeliverFrameCB deliver_frame_cb;
+ VideoCapturerDelegate::RunningCallback running_cb;
+
+ EXPECT_CALL(mock_delegate(), StartCapture(
+ testing::_,
+ testing::_,
+ testing::_))
+ .Times(1)
+ .WillOnce(testing::DoAll(testing::SaveArg<1>(&deliver_frame_cb),
+ testing::SaveArg<2>(&running_cb)));
+ EXPECT_CALL(mock_delegate(), StopCapture());
+ blink::WebMediaStreamTrack track = StartSource();
+ running_cb.Run(true);
+
+ base::RunLoop run_loop;
+ base::TimeTicks reference_capture_time =
+ base::TimeTicks::FromInternalValue(60013);
+ base::TimeTicks capture_time;
+ FakeMediaStreamVideoSink fake_sink(
+ &capture_time,
+ media::BindToCurrentLoop(run_loop.QuitClosure()));
+ FakeMediaStreamVideoSink::AddToVideoTrack(
+ &fake_sink,
+ base::Bind(&FakeMediaStreamVideoSink::OnVideoFrame,
+ base::Unretained(&fake_sink)),
+ track);
+ child_process_->io_message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(deliver_frame_cb,
+ media::VideoFrame::CreateBlackFrame(gfx::Size(2, 2)),
+ media::VideoCaptureFormat(),
+ reference_capture_time));
+ run_loop.Run();
+ FakeMediaStreamVideoSink::RemoveFromVideoTrack(&fake_sink, track);
+ EXPECT_EQ(reference_capture_time, capture_time);
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_video_capturer_source.cc b/chromium/content/renderer/media/media_stream_video_capturer_source.cc
new file mode 100644
index 00000000000..f4555afb4f0
--- /dev/null
+++ b/chromium/content/renderer/media/media_stream_video_capturer_source.cc
@@ -0,0 +1,247 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/media_stream_video_capturer_source.h"
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/location.h"
+#include "content/renderer/media/video_capture_impl_manager.h"
+#include "content/renderer/render_thread_impl.h"
+#include "media/base/bind_to_current_loop.h"
+#include "media/base/video_frame.h"
+
+namespace {
+
+struct SourceVideoResolution {
+ int width;
+ int height;
+};
+
+// Resolutions used if the source doesn't support capability enumeration.
+const SourceVideoResolution kVideoResolutions[] = {{1920, 1080},
+ {1280, 720},
+ {960, 720},
+ {640, 480},
+ {640, 360},
+ {320, 240},
+ {320, 180}};
+// Frame rates for sources with no support for capability enumeration.
+const int kVideoFrameRates[] = {30, 60};
+
+} // namespace
+
+namespace content {
+
+VideoCapturerDelegate::VideoCapturerDelegate(
+ const StreamDeviceInfo& device_info)
+ : session_id_(device_info.session_id),
+ is_screen_cast_(device_info.device.type == MEDIA_TAB_VIDEO_CAPTURE ||
+ device_info.device.type == MEDIA_DESKTOP_VIDEO_CAPTURE),
+ got_first_frame_(false) {
+ DVLOG(3) << "VideoCapturerDelegate::ctor";
+
+ // NULL in unit test.
+ if (RenderThreadImpl::current()) {
+ VideoCaptureImplManager* manager =
+ RenderThreadImpl::current()->video_capture_impl_manager();
+ if (manager)
+ release_device_cb_ = manager->UseDevice(session_id_);
+ }
+}
+
+VideoCapturerDelegate::~VideoCapturerDelegate() {
+ DVLOG(3) << "VideoCapturerDelegate::dtor";
+ if (!release_device_cb_.is_null())
+ release_device_cb_.Run();
+}
+
+void VideoCapturerDelegate::GetCurrentSupportedFormats(
+ int max_requested_width,
+ int max_requested_height,
+ const VideoCaptureDeviceFormatsCB& callback) {
+ DVLOG(3) << "GetCurrentSupportedFormats("
+ << " { max_requested_height = " << max_requested_height << "})"
+ << " { max_requested_width = " << max_requested_width << "})";
+
+ if (is_screen_cast_) {
+ media::VideoCaptureFormats formats;
+ const int width = max_requested_width ?
+ max_requested_width : MediaStreamVideoSource::kDefaultWidth;
+ const int height = max_requested_height ?
+ max_requested_height : MediaStreamVideoSource::kDefaultHeight;
+ formats.push_back(
+ media::VideoCaptureFormat(
+ gfx::Size(width, height),
+ MediaStreamVideoSource::kDefaultFrameRate,
+ media::PIXEL_FORMAT_I420));
+ callback.Run(formats);
+ return;
+ }
+
+ // NULL in unit test.
+ if (!RenderThreadImpl::current())
+ return;
+ VideoCaptureImplManager* manager =
+ RenderThreadImpl::current()->video_capture_impl_manager();
+ if (!manager)
+ return;
+ DCHECK(source_formats_callback_.is_null());
+ source_formats_callback_ = callback;
+ manager->GetDeviceFormatsInUse(
+ session_id_,
+ media::BindToCurrentLoop(
+ base::Bind(
+ &VideoCapturerDelegate::OnDeviceFormatsInUseReceived, this)));
+}
+
+void VideoCapturerDelegate::StartCapture(
+ const media::VideoCaptureParams& params,
+ const VideoCaptureDeliverFrameCB& new_frame_callback,
+ const RunningCallback& running_callback) {
+ DCHECK(params.requested_format.IsValid());
+ DCHECK(thread_checker_.CalledOnValidThread());
+ running_callback_ = running_callback;
+ got_first_frame_ = false;
+
+ // NULL in unit test.
+ if (!RenderThreadImpl::current())
+ return;
+ VideoCaptureImplManager* manager =
+ RenderThreadImpl::current()->video_capture_impl_manager();
+ if (!manager)
+ return;
+ stop_capture_cb_ =
+ manager->StartCapture(
+ session_id_,
+ params,
+ media::BindToCurrentLoop(base::Bind(
+ &VideoCapturerDelegate::OnStateUpdateOnRenderThread, this)),
+ new_frame_callback);
+}
+
+void VideoCapturerDelegate::StopCapture() {
+ // Immediately make sure we don't provide more frames.
+ DVLOG(3) << "VideoCapturerDelegate::StopCapture()";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!stop_capture_cb_.is_null()) {
+ base::ResetAndReturn(&stop_capture_cb_).Run();
+ }
+ running_callback_.Reset();
+ source_formats_callback_.Reset();
+}
+
+void VideoCapturerDelegate::OnStateUpdateOnRenderThread(
+ VideoCaptureState state) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DVLOG(3) << "OnStateUpdateOnRenderThread state = " << state;
+ if (state == VIDEO_CAPTURE_STATE_STARTED && !running_callback_.is_null()) {
+ running_callback_.Run(true);
+ return;
+ }
+ if (state > VIDEO_CAPTURE_STATE_STARTED && !running_callback_.is_null()) {
+ base::ResetAndReturn(&running_callback_).Run(false);
+ }
+}
+
+void VideoCapturerDelegate::OnDeviceFormatsInUseReceived(
+ const media::VideoCaptureFormats& formats_in_use) {
+ DVLOG(3) << "OnDeviceFormatsInUseReceived: " << formats_in_use.size();
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // StopCapture() might have destroyed |source_formats_callback_| before
+ // arriving here.
+ if (source_formats_callback_.is_null())
+ return;
+ // If there are no formats in use, try to retrieve the whole list of
+ // supported form.
+ if (!formats_in_use.empty()) {
+ source_formats_callback_.Run(formats_in_use);
+ source_formats_callback_.Reset();
+ return;
+ }
+
+ // NULL in unit test.
+ if (!RenderThreadImpl::current())
+ return;
+ VideoCaptureImplManager* manager =
+ RenderThreadImpl::current()->video_capture_impl_manager();
+ if (!manager)
+ return;
+ manager->GetDeviceSupportedFormats(
+ session_id_,
+ media::BindToCurrentLoop(
+ base::Bind(
+ &VideoCapturerDelegate::OnDeviceSupportedFormatsEnumerated,
+ this)));
+}
+
+void VideoCapturerDelegate::OnDeviceSupportedFormatsEnumerated(
+ const media::VideoCaptureFormats& formats) {
+ DVLOG(3) << "OnDeviceSupportedFormatsEnumerated: " << formats.size()
+ << " received";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // StopCapture() might have destroyed |source_formats_callback_| before
+ // arriving here.
+ if (source_formats_callback_.is_null())
+ return;
+ if (formats.size()) {
+ source_formats_callback_.Run(formats);
+ } else {
+ // The capture device doesn't seem to support capability enumeration,
+ // compose a fallback list of capabilities.
+ media::VideoCaptureFormats default_formats;
+ for (size_t i = 0; i < arraysize(kVideoResolutions); ++i) {
+ for (size_t j = 0; j < arraysize(kVideoFrameRates); ++j) {
+ default_formats.push_back(media::VideoCaptureFormat(
+ gfx::Size(kVideoResolutions[i].width, kVideoResolutions[i].height),
+ kVideoFrameRates[j], media::PIXEL_FORMAT_I420));
+ }
+ }
+ source_formats_callback_.Run(default_formats);
+ }
+ source_formats_callback_.Reset();
+}
+
+MediaStreamVideoCapturerSource::MediaStreamVideoCapturerSource(
+ const StreamDeviceInfo& device_info,
+ const SourceStoppedCallback& stop_callback,
+ const scoped_refptr<VideoCapturerDelegate>& delegate)
+ : delegate_(delegate) {
+ SetDeviceInfo(device_info);
+ SetStopCallback(stop_callback);
+}
+
+MediaStreamVideoCapturerSource::~MediaStreamVideoCapturerSource() {
+}
+
+void MediaStreamVideoCapturerSource::GetCurrentSupportedFormats(
+ int max_requested_width,
+ int max_requested_height,
+ const VideoCaptureDeviceFormatsCB& callback) {
+ delegate_->GetCurrentSupportedFormats(
+ max_requested_width,
+ max_requested_height,
+ callback);
+}
+
+void MediaStreamVideoCapturerSource::StartSourceImpl(
+ const media::VideoCaptureParams& params,
+ const VideoCaptureDeliverFrameCB& frame_callback) {
+ media::VideoCaptureParams new_params(params);
+ if (device_info().device.type == MEDIA_TAB_VIDEO_CAPTURE ||
+ device_info().device.type == MEDIA_DESKTOP_VIDEO_CAPTURE) {
+ new_params.allow_resolution_change = true;
+ }
+ delegate_->StartCapture(
+ new_params,
+ frame_callback,
+ base::Bind(&MediaStreamVideoCapturerSource::OnStartDone,
+ base::Unretained(this)));
+}
+
+void MediaStreamVideoCapturerSource::StopSourceImpl() {
+ delegate_->StopCapture();
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_video_capturer_source.h b/chromium/content/renderer/media/media_stream_video_capturer_source.h
new file mode 100644
index 00000000000..dcffa02d371
--- /dev/null
+++ b/chromium/content/renderer/media/media_stream_video_capturer_source.h
@@ -0,0 +1,123 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_MEDIA_STREAM_VIDEO_CAPTURER_SOURCE_H_
+#define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_VIDEO_CAPTURER_SOURCE_H_
+
+#include "base/callback.h"
+#include "base/gtest_prod_util.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/threading/thread_checker.h"
+#include "content/common/media/video_capture.h"
+#include "content/renderer/media/media_stream_video_source.h"
+
+namespace content {
+
+// VideoCapturerDelegate is a delegate used by MediaStreamVideoCapturerSource
+// for local video capturer. It uses VideoCaptureImplManager to start / stop
+// and receive I420 frames from Chrome's video capture implementation.
+//
+// This is a render thread only object.
+class CONTENT_EXPORT VideoCapturerDelegate
+ : public base::RefCountedThreadSafe<VideoCapturerDelegate> {
+ public:
+ typedef base::Callback<void(bool running)> RunningCallback;
+
+ explicit VideoCapturerDelegate(
+ const StreamDeviceInfo& device_info);
+
+ // Collects the formats that can currently be used.
+ // |max_requested_height| and |max_requested_width| is used by Tab and Screen
+ // capture to decide what resolution to generate.
+ // |callback| is triggered when the formats have been collected.
+ virtual void GetCurrentSupportedFormats(
+ int max_requested_width,
+ int max_requested_height,
+ const VideoCaptureDeviceFormatsCB& callback);
+
+ // Starts capturing frames using the resolution in |params|.
+ // |new_frame_callback| is triggered when a new video frame is available.
+ // If capturing is started successfully then |running_callback| will be
+ // called with a parameter of true.
+ // If capturing fails to start or stopped due to an external event then
+ // |running_callback| will be called with a parameter of false.
+ virtual void StartCapture(
+ const media::VideoCaptureParams& params,
+ const VideoCaptureDeliverFrameCB& new_frame_callback,
+ const RunningCallback& running_callback);
+
+ // Stops capturing frames and clears all callbacks including the
+ // SupportedFormatsCallback callback.
+ virtual void StopCapture();
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(MediaStreamVideoCapturerSourceTest, Ended);
+ friend class base::RefCountedThreadSafe<VideoCapturerDelegate>;
+ friend class MockVideoCapturerDelegate;
+
+ virtual ~VideoCapturerDelegate();
+
+ void OnStateUpdateOnRenderThread(VideoCaptureState state);
+ void OnDeviceFormatsInUseReceived(const media::VideoCaptureFormats& formats);
+ void OnDeviceSupportedFormatsEnumerated(
+ const media::VideoCaptureFormats& formats);
+
+ // The id identifies which video capture device is used for this video
+ // capture session.
+ media::VideoCaptureSessionId session_id_;
+ base::Closure release_device_cb_;
+ base::Closure stop_capture_cb_;
+
+ bool is_screen_cast_;
+ bool got_first_frame_;
+
+ // |running_callback| is provided to this class in StartCapture and must be
+ // valid until StopCapture is called.
+ RunningCallback running_callback_;
+
+ VideoCaptureDeviceFormatsCB source_formats_callback_;
+
+ // Bound to the render thread.
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoCapturerDelegate);
+};
+
+// Owned by WebMediaStreamSource in Blink as a representation of a video
+// stream coming from a camera.
+// This is a render thread only object. All methods must be called on the
+// render thread.
+class CONTENT_EXPORT MediaStreamVideoCapturerSource
+ : public MediaStreamVideoSource {
+ public:
+ MediaStreamVideoCapturerSource(
+ const StreamDeviceInfo& device_info,
+ const SourceStoppedCallback& stop_callback,
+ const scoped_refptr<VideoCapturerDelegate>& delegate);
+
+ virtual ~MediaStreamVideoCapturerSource();
+
+ protected:
+ // Implements MediaStreamVideoSource.
+ virtual void GetCurrentSupportedFormats(
+ int max_requested_width,
+ int max_requested_height,
+ const VideoCaptureDeviceFormatsCB& callback) OVERRIDE;
+
+ virtual void StartSourceImpl(
+ const media::VideoCaptureParams& params,
+ const VideoCaptureDeliverFrameCB& frame_callback) OVERRIDE;
+
+ virtual void StopSourceImpl() OVERRIDE;
+
+ private:
+ // The delegate that provides video frames.
+ scoped_refptr<VideoCapturerDelegate> delegate_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaStreamVideoCapturerSource);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_VIDEO_CAPTURER_SOURCE_H_
diff --git a/chromium/content/renderer/media/media_stream_video_source.cc b/chromium/content/renderer/media/media_stream_video_source.cc
new file mode 100644
index 00000000000..65ece681c14
--- /dev/null
+++ b/chromium/content/renderer/media/media_stream_video_source.cc
@@ -0,0 +1,578 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/media_stream_video_source.h"
+
+#include <algorithm>
+#include <limits>
+#include <string>
+
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "content/child/child_process.h"
+#include "content/renderer/media/media_stream_constraints_util.h"
+#include "content/renderer/media/media_stream_video_track.h"
+#include "content/renderer/media/video_track_adapter.h"
+
+namespace content {
+
+// Constraint keys. Specified by draft-alvestrand-constraints-resolution-00b
+const char MediaStreamVideoSource::kMinAspectRatio[] = "minAspectRatio";
+const char MediaStreamVideoSource::kMaxAspectRatio[] = "maxAspectRatio";
+const char MediaStreamVideoSource::kMaxWidth[] = "maxWidth";
+const char MediaStreamVideoSource::kMinWidth[] = "minWidth";
+const char MediaStreamVideoSource::kMaxHeight[] = "maxHeight";
+const char MediaStreamVideoSource::kMinHeight[] = "minHeight";
+const char MediaStreamVideoSource::kMaxFrameRate[] = "maxFrameRate";
+const char MediaStreamVideoSource::kMinFrameRate[] = "minFrameRate";
+
+const char* kSupportedConstraints[] = {
+ MediaStreamVideoSource::kMaxAspectRatio,
+ MediaStreamVideoSource::kMinAspectRatio,
+ MediaStreamVideoSource::kMaxWidth,
+ MediaStreamVideoSource::kMinWidth,
+ MediaStreamVideoSource::kMaxHeight,
+ MediaStreamVideoSource::kMinHeight,
+ MediaStreamVideoSource::kMaxFrameRate,
+ MediaStreamVideoSource::kMinFrameRate,
+};
+
+const int MediaStreamVideoSource::kDefaultWidth = 640;
+const int MediaStreamVideoSource::kDefaultHeight = 480;
+const int MediaStreamVideoSource::kDefaultFrameRate = 30;
+
+namespace {
+
+// Google-specific key prefix. Constraints with this prefix are ignored if they
+// are unknown.
+const char kGooglePrefix[] = "goog";
+
+// Returns true if |constraint| has mandatory constraints.
+bool HasMandatoryConstraints(const blink::WebMediaConstraints& constraints) {
+ blink::WebVector<blink::WebMediaConstraint> mandatory_constraints;
+ constraints.getMandatoryConstraints(mandatory_constraints);
+ return !mandatory_constraints.isEmpty();
+}
+
+// Retrieve the desired max width and height from |constraints|. If not set,
+// the |desired_width| and |desired_height| are set to
+// std::numeric_limits<int>::max();
+// If either max width or height is set as a mandatory constraint, the optional
+// constraints are not checked.
+void GetDesiredMaxWidthAndHeight(const blink::WebMediaConstraints& constraints,
+ int* desired_width, int* desired_height) {
+ *desired_width = std::numeric_limits<int>::max();
+ *desired_height = std::numeric_limits<int>::max();
+
+ bool mandatory = GetMandatoryConstraintValueAsInteger(
+ constraints,
+ MediaStreamVideoSource::kMaxWidth,
+ desired_width);
+ mandatory |= GetMandatoryConstraintValueAsInteger(
+ constraints,
+ MediaStreamVideoSource::kMaxHeight,
+ desired_height);
+ if (mandatory)
+ return;
+
+ GetOptionalConstraintValueAsInteger(constraints,
+ MediaStreamVideoSource::kMaxWidth,
+ desired_width);
+ GetOptionalConstraintValueAsInteger(constraints,
+ MediaStreamVideoSource::kMaxHeight,
+ desired_height);
+}
+
+// Retrieve the desired max and min aspect ratio from |constraints|. If not set,
+// the |min_aspect_ratio| is set to 0 and |max_aspect_ratio| is set to
+// std::numeric_limits<double>::max();
+// If either min or max aspect ratio is set as a mandatory constraint, the
+// optional constraints are not checked.
+void GetDesiredMinAndMaxAspectRatio(
+ const blink::WebMediaConstraints& constraints,
+ double* min_aspect_ratio,
+ double* max_aspect_ratio) {
+ *min_aspect_ratio = 0;
+ *max_aspect_ratio = std::numeric_limits<double>::max();
+
+ bool mandatory = GetMandatoryConstraintValueAsDouble(
+ constraints,
+ MediaStreamVideoSource::kMinAspectRatio,
+ min_aspect_ratio);
+ mandatory |= GetMandatoryConstraintValueAsDouble(
+ constraints,
+ MediaStreamVideoSource::kMaxAspectRatio,
+ max_aspect_ratio);
+ if (mandatory)
+ return;
+
+ GetOptionalConstraintValueAsDouble(
+ constraints,
+ MediaStreamVideoSource::kMinAspectRatio,
+ min_aspect_ratio);
+ GetOptionalConstraintValueAsDouble(
+ constraints,
+ MediaStreamVideoSource::kMaxAspectRatio,
+ max_aspect_ratio);
+}
+
+// Returns true if |constraint| is fulfilled. |format| can be changed by a
+// constraint, e.g. the frame rate can be changed by setting maxFrameRate.
+bool UpdateFormatForConstraint(
+ const blink::WebMediaConstraint& constraint,
+ bool mandatory,
+ media::VideoCaptureFormat* format) {
+ DCHECK(format != NULL);
+
+ if (!format->IsValid())
+ return false;
+
+ std::string constraint_name = constraint.m_name.utf8();
+ std::string constraint_value = constraint.m_value.utf8();
+
+ if (constraint_name.find(kGooglePrefix) == 0) {
+ // These are actually options, not constraints, so they can be satisfied
+ // regardless of the format.
+ return true;
+ }
+
+ if (constraint_name == MediaStreamSource::kSourceId) {
+ // This is a constraint that doesn't affect the format.
+ return true;
+ }
+
+ // Ignore Chrome specific Tab capture constraints.
+ if (constraint_name == kMediaStreamSource ||
+ constraint_name == kMediaStreamSourceId)
+ return true;
+
+ if (constraint_name == MediaStreamVideoSource::kMinAspectRatio ||
+ constraint_name == MediaStreamVideoSource::kMaxAspectRatio) {
+ // These constraints are handled by cropping if the camera outputs the wrong
+ // aspect ratio.
+ double value;
+ return base::StringToDouble(constraint_value, &value);
+ }
+
+ double value = 0.0;
+ if (!base::StringToDouble(constraint_value, &value)) {
+ DLOG(WARNING) << "Can't parse MediaStream constraint. Name:"
+ << constraint_name << " Value:" << constraint_value;
+ return false;
+ }
+
+ if (constraint_name == MediaStreamVideoSource::kMinWidth) {
+ return (value <= format->frame_size.width());
+ } else if (constraint_name == MediaStreamVideoSource::kMaxWidth) {
+ return value > 0.0;
+ } else if (constraint_name == MediaStreamVideoSource::kMinHeight) {
+ return (value <= format->frame_size.height());
+ } else if (constraint_name == MediaStreamVideoSource::kMaxHeight) {
+ return value > 0.0;
+ } else if (constraint_name == MediaStreamVideoSource::kMinFrameRate) {
+ return (value <= format->frame_rate);
+ } else if (constraint_name == MediaStreamVideoSource::kMaxFrameRate) {
+ if (value == 0.0) {
+ // The frame rate is set by constraint.
+ // Don't allow 0 as frame rate if it is a mandatory constraint.
+ // Set the frame rate to 1 if it is not mandatory.
+ if (mandatory) {
+ return false;
+ } else {
+ value = 1.0;
+ }
+ }
+ format->frame_rate =
+ (format->frame_rate > value) ? value : format->frame_rate;
+ return true;
+ } else {
+ LOG(WARNING) << "Found unknown MediaStream constraint. Name:"
+ << constraint_name << " Value:" << constraint_value;
+ return false;
+ }
+}
+
+// Removes media::VideoCaptureFormats from |formats| that don't meet
+// |constraint|.
+void FilterFormatsByConstraint(
+ const blink::WebMediaConstraint& constraint,
+ bool mandatory,
+ media::VideoCaptureFormats* formats) {
+ DVLOG(3) << "FilterFormatsByConstraint("
+ << "{ constraint.m_name = " << constraint.m_name.utf8()
+ << " constraint.m_value = " << constraint.m_value.utf8()
+ << " mandatory = " << mandatory << "})";
+ media::VideoCaptureFormats::iterator format_it = formats->begin();
+ while (format_it != formats->end()) {
+ // Modify the format_it to fulfill the constraint if possible.
+ // Delete it otherwise.
+ if (!UpdateFormatForConstraint(constraint, mandatory, &(*format_it))) {
+ format_it = formats->erase(format_it);
+ } else {
+ ++format_it;
+ }
+ }
+}
+
+// Returns the media::VideoCaptureFormats that matches |constraints|.
+media::VideoCaptureFormats FilterFormats(
+ const blink::WebMediaConstraints& constraints,
+ const media::VideoCaptureFormats& supported_formats) {
+ if (constraints.isNull()) {
+ return supported_formats;
+ }
+
+ double max_aspect_ratio;
+ double min_aspect_ratio;
+ GetDesiredMinAndMaxAspectRatio(constraints,
+ &min_aspect_ratio,
+ &max_aspect_ratio);
+
+ if (min_aspect_ratio > max_aspect_ratio || max_aspect_ratio < 0.05f) {
+ DLOG(WARNING) << "Wrong requested aspect ratio.";
+ return media::VideoCaptureFormats();
+ }
+
+ int min_width = 0;
+ GetMandatoryConstraintValueAsInteger(constraints,
+ MediaStreamVideoSource::kMinWidth,
+ &min_width);
+ int min_height = 0;
+ GetMandatoryConstraintValueAsInteger(constraints,
+ MediaStreamVideoSource::kMinHeight,
+ &min_height);
+ int max_width;
+ int max_height;
+ GetDesiredMaxWidthAndHeight(constraints, &max_width, &max_height);
+
+ if (min_width > max_width || min_height > max_height)
+ return media::VideoCaptureFormats();
+
+ blink::WebVector<blink::WebMediaConstraint> mandatory;
+ blink::WebVector<blink::WebMediaConstraint> optional;
+ constraints.getMandatoryConstraints(mandatory);
+ constraints.getOptionalConstraints(optional);
+ media::VideoCaptureFormats candidates = supported_formats;
+ for (size_t i = 0; i < mandatory.size(); ++i)
+ FilterFormatsByConstraint(mandatory[i], true, &candidates);
+
+ if (candidates.empty())
+ return candidates;
+
+ // Ok - all mandatory checked and we still have candidates.
+ // Let's try filtering using the optional constraints. The optional
+ // constraints must be filtered in the order they occur in |optional|.
+ // But if a constraint produce zero candidates, the constraint is ignored and
+ // the next constraint is tested.
+ // http://dev.w3.org/2011/webrtc/editor/getusermedia.html#idl-def-Constraints
+ for (size_t i = 0; i < optional.size(); ++i) {
+ media::VideoCaptureFormats current_candidates = candidates;
+ FilterFormatsByConstraint(optional[i], false, &current_candidates);
+ if (!current_candidates.empty()) {
+ candidates = current_candidates;
+ }
+ }
+
+ // We have done as good as we can to filter the supported resolutions.
+ return candidates;
+}
+
+const media::VideoCaptureFormat& GetBestFormatBasedOnArea(
+ const media::VideoCaptureFormats& formats,
+ int area) {
+ media::VideoCaptureFormats::const_iterator it = formats.begin();
+ media::VideoCaptureFormats::const_iterator best_it = formats.begin();
+ int best_diff = std::numeric_limits<int>::max();
+ for (; it != formats.end(); ++it) {
+ int diff = abs(area - it->frame_size.width() * it->frame_size.height());
+ if (diff < best_diff) {
+ best_diff = diff;
+ best_it = it;
+ }
+ }
+ return *best_it;
+}
+
+// Find the format that best matches the default video size.
+// This algorithm is chosen since a resolution must be picked even if no
+// constraints are provided. We don't just select the maximum supported
+// resolution since higher resolutions cost more in terms of complexity and
+// many cameras have lower frame rate and have more noise in the image at
+// their maximum supported resolution.
+void GetBestCaptureFormat(
+ const media::VideoCaptureFormats& formats,
+ const blink::WebMediaConstraints& constraints,
+ media::VideoCaptureFormat* capture_format) {
+ DCHECK(!formats.empty());
+
+ int max_width;
+ int max_height;
+ GetDesiredMaxWidthAndHeight(constraints, &max_width, &max_height);
+
+ *capture_format = GetBestFormatBasedOnArea(
+ formats,
+ std::min(max_width, MediaStreamVideoSource::kDefaultWidth) *
+ std::min(max_height, MediaStreamVideoSource::kDefaultHeight));
+}
+
+} // anonymous namespace
+
+// static
+MediaStreamVideoSource* MediaStreamVideoSource::GetVideoSource(
+ const blink::WebMediaStreamSource& source) {
+ return static_cast<MediaStreamVideoSource*>(source.extraData());
+}
+
+// static
+bool MediaStreamVideoSource::IsConstraintSupported(const std::string& name) {
+ for (size_t i = 0; i < arraysize(kSupportedConstraints); ++i) {
+ if (kSupportedConstraints[i] == name)
+ return true;
+ }
+ return false;
+}
+
+MediaStreamVideoSource::MediaStreamVideoSource()
+ : state_(NEW),
+ track_adapter_(new VideoTrackAdapter(
+ ChildProcess::current()->io_message_loop_proxy())),
+ weak_factory_(this) {
+}
+
+MediaStreamVideoSource::~MediaStreamVideoSource() {
+ DVLOG(3) << "~MediaStreamVideoSource()";
+}
+
+void MediaStreamVideoSource::AddTrack(
+ MediaStreamVideoTrack* track,
+ const VideoCaptureDeliverFrameCB& frame_callback,
+ const blink::WebMediaConstraints& constraints,
+ const ConstraintsCallback& callback) {
+ DCHECK(CalledOnValidThread());
+ DCHECK(!constraints.isNull());
+ DCHECK(std::find(tracks_.begin(), tracks_.end(),
+ track) == tracks_.end());
+ tracks_.push_back(track);
+
+ requested_constraints_.push_back(
+ RequestedConstraints(track, frame_callback, constraints, callback));
+
+ switch (state_) {
+ case NEW: {
+ // Tab capture and Screen capture needs the maximum requested height
+ // and width to decide on the resolution.
+ int max_requested_width = 0;
+ GetMandatoryConstraintValueAsInteger(constraints, kMaxWidth,
+ &max_requested_width);
+
+ int max_requested_height = 0;
+ GetMandatoryConstraintValueAsInteger(constraints, kMaxHeight,
+ &max_requested_height);
+
+ state_ = RETRIEVING_CAPABILITIES;
+ GetCurrentSupportedFormats(
+ max_requested_width,
+ max_requested_height,
+ base::Bind(&MediaStreamVideoSource::OnSupportedFormats,
+ weak_factory_.GetWeakPtr()));
+
+ break;
+ }
+ case STARTING:
+ case RETRIEVING_CAPABILITIES: {
+ // The |callback| will be triggered once the source has started or
+ // the capabilities have been retrieved.
+ break;
+ }
+ case ENDED:
+ case STARTED: {
+ // Currently, reconfiguring the source is not supported.
+ FinalizeAddTrack();
+ }
+ }
+}
+
+void MediaStreamVideoSource::RemoveTrack(MediaStreamVideoTrack* video_track) {
+ DCHECK(CalledOnValidThread());
+ std::vector<MediaStreamVideoTrack*>::iterator it =
+ std::find(tracks_.begin(), tracks_.end(), video_track);
+ DCHECK(it != tracks_.end());
+ tracks_.erase(it);
+
+ // Check if |video_track| is waiting for applying new constraints and remove
+ // the request in that case.
+ for (std::vector<RequestedConstraints>::iterator it =
+ requested_constraints_.begin();
+ it != requested_constraints_.end(); ++it) {
+ if (it->track == video_track) {
+ requested_constraints_.erase(it);
+ break;
+ }
+ }
+ // Call |frame_adapter_->RemoveTrack| here even if adding the track has
+ // failed and |frame_adapter_->AddCallback| has not been called.
+ track_adapter_->RemoveTrack(video_track);
+
+ if (tracks_.empty())
+ StopSource();
+}
+
+const scoped_refptr<base::MessageLoopProxy>&
+MediaStreamVideoSource::io_message_loop() const {
+ DCHECK(CalledOnValidThread());
+ return track_adapter_->io_message_loop();
+}
+
+void MediaStreamVideoSource::DoStopSource() {
+ DCHECK(CalledOnValidThread());
+ DVLOG(3) << "DoStopSource()";
+ if (state_ == ENDED)
+ return;
+ StopSourceImpl();
+ state_ = ENDED;
+ SetReadyState(blink::WebMediaStreamSource::ReadyStateEnded);
+}
+
+void MediaStreamVideoSource::OnSupportedFormats(
+ const media::VideoCaptureFormats& formats) {
+ DCHECK(CalledOnValidThread());
+ DCHECK_EQ(RETRIEVING_CAPABILITIES, state_);
+
+ supported_formats_ = formats;
+ if (!FindBestFormatWithConstraints(supported_formats_,
+ &current_format_)) {
+ SetReadyState(blink::WebMediaStreamSource::ReadyStateEnded);
+ // This object can be deleted after calling FinalizeAddTrack. See comment
+ // in the header file.
+ FinalizeAddTrack();
+ return;
+ }
+
+ state_ = STARTING;
+ DVLOG(3) << "Starting the capturer with"
+ << " width = " << current_format_.frame_size.width()
+ << " height = " << current_format_.frame_size.height()
+ << " frame rate = " << current_format_.frame_rate;
+
+ media::VideoCaptureParams params;
+ params.requested_format = current_format_;
+ StartSourceImpl(
+ params,
+ base::Bind(&VideoTrackAdapter::DeliverFrameOnIO, track_adapter_));
+}
+
+bool MediaStreamVideoSource::FindBestFormatWithConstraints(
+ const media::VideoCaptureFormats& formats,
+ media::VideoCaptureFormat* best_format) {
+ // Find the first constraints that we can fulfill.
+ for (std::vector<RequestedConstraints>::iterator request_it =
+ requested_constraints_.begin();
+ request_it != requested_constraints_.end(); ++request_it) {
+ const blink::WebMediaConstraints& requested_constraints =
+ request_it->constraints;
+
+ // If the source doesn't support capability enumeration it is still ok if
+ // no mandatory constraints have been specified. That just means that
+ // we will start with whatever format is native to the source.
+ if (formats.empty() && !HasMandatoryConstraints(requested_constraints)) {
+ *best_format = media::VideoCaptureFormat();
+ return true;
+ }
+ media::VideoCaptureFormats filtered_formats =
+ FilterFormats(requested_constraints, formats);
+ if (filtered_formats.size() > 0) {
+ // A request with constraints that can be fulfilled.
+ GetBestCaptureFormat(filtered_formats,
+ requested_constraints,
+ best_format);
+ return true;
+ }
+ }
+ return false;
+}
+
+void MediaStreamVideoSource::OnStartDone(bool success) {
+ DCHECK(CalledOnValidThread());
+ DVLOG(3) << "OnStartDone({success =" << success << "})";
+ if (success) {
+ DCHECK_EQ(STARTING, state_);
+ state_ = STARTED;
+ SetReadyState(blink::WebMediaStreamSource::ReadyStateLive);
+ } else {
+ state_ = ENDED;
+ SetReadyState(blink::WebMediaStreamSource::ReadyStateEnded);
+ StopSourceImpl();
+ }
+
+ // This object can be deleted after calling FinalizeAddTrack. See comment in
+ // the header file.
+ FinalizeAddTrack();
+}
+
+void MediaStreamVideoSource::FinalizeAddTrack() {
+ media::VideoCaptureFormats formats;
+ formats.push_back(current_format_);
+
+ std::vector<RequestedConstraints> callbacks;
+ callbacks.swap(requested_constraints_);
+ for (std::vector<RequestedConstraints>::iterator it = callbacks.begin();
+ it != callbacks.end(); ++it) {
+ // The track has been added successfully if the source has started and
+ // there are either no mandatory constraints and the source doesn't expose
+ // its format capabilities, or the constraints and the format match.
+ // For example, a remote source doesn't expose its format capabilities.
+ bool success =
+ state_ == STARTED &&
+ ((!current_format_.IsValid() && !HasMandatoryConstraints(
+ it->constraints)) ||
+ !FilterFormats(it->constraints, formats).empty());
+
+ if (success) {
+ int max_width;
+ int max_height;
+ GetDesiredMaxWidthAndHeight(it->constraints, &max_width, &max_height);
+ double max_aspect_ratio;
+ double min_aspect_ratio;
+ GetDesiredMinAndMaxAspectRatio(it->constraints,
+ &min_aspect_ratio,
+ &max_aspect_ratio);
+ track_adapter_->AddTrack(it->track,it->frame_callback,
+ max_width, max_height,
+ min_aspect_ratio, max_aspect_ratio);
+ }
+
+ DVLOG(3) << "FinalizeAddTrack() success " << success;
+
+ if (!it->callback.is_null())
+ it->callback.Run(this, success);
+ }
+}
+
+void MediaStreamVideoSource::SetReadyState(
+ blink::WebMediaStreamSource::ReadyState state) {
+ if (!owner().isNull()) {
+ owner().setReadyState(state);
+ }
+ for (std::vector<MediaStreamVideoTrack*>::iterator it = tracks_.begin();
+ it != tracks_.end(); ++it) {
+ (*it)->OnReadyStateChanged(state);
+ }
+}
+
+MediaStreamVideoSource::RequestedConstraints::RequestedConstraints(
+ MediaStreamVideoTrack* track,
+ const VideoCaptureDeliverFrameCB& frame_callback,
+ const blink::WebMediaConstraints& constraints,
+ const ConstraintsCallback& callback)
+ : track(track),
+ frame_callback(frame_callback),
+ constraints(constraints),
+ callback(callback) {
+}
+
+MediaStreamVideoSource::RequestedConstraints::~RequestedConstraints() {
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_video_source.h b/chromium/content/renderer/media/media_stream_video_source.h
new file mode 100644
index 00000000000..45d2b03082d
--- /dev/null
+++ b/chromium/content/renderer/media/media_stream_video_source.h
@@ -0,0 +1,180 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_MEDIA_STREAM_VIDEO_SOURCE_H_
+#define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_VIDEO_SOURCE_H_
+
+#include <string>
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/threading/non_thread_safe.h"
+#include "content/common/content_export.h"
+#include "content/common/media/video_capture.h"
+#include "content/public/renderer/media_stream_video_sink.h"
+#include "content/renderer/media/media_stream_source.h"
+#include "media/base/video_frame.h"
+#include "media/video/capture/video_capture_types.h"
+#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
+
+namespace content {
+
+class MediaStreamVideoTrack;
+class VideoTrackAdapter;
+
+// MediaStreamVideoSource is an interface used for sending video frames to a
+// MediaStreamVideoTrack.
+// http://dev.w3.org/2011/webrtc/editor/getusermedia.html
+// The purpose of this base class is to be able to implement different
+// MediaStreaVideoSources such as local video capture, video sources received
+// on a PeerConnection or a source created in NaCl.
+// All methods calls will be done from the main render thread.
+//
+// When the first track is added to the source by calling AddTrack
+// the MediaStreamVideoSource implementation calls GetCurrentSupportedFormats.
+// the source implementation must call OnSupportedFormats.
+// MediaStreamVideoSource then match the constraints provided in AddTrack with
+// the formats and call StartSourceImpl. The source implementation must call
+// OnStartDone when the underlying source has been started or failed to start.
+class CONTENT_EXPORT MediaStreamVideoSource
+ : public MediaStreamSource,
+ NON_EXPORTED_BASE(public base::NonThreadSafe) {
+ public:
+ MediaStreamVideoSource();
+ virtual ~MediaStreamVideoSource();
+
+ // Returns the MediaStreamVideoSource object owned by |source|.
+ static MediaStreamVideoSource* GetVideoSource(
+ const blink::WebMediaStreamSource& source);
+
+ // Puts |track| in the registered tracks list.
+ void AddTrack(MediaStreamVideoTrack* track,
+ const VideoCaptureDeliverFrameCB& frame_callback,
+ const blink::WebMediaConstraints& constraints,
+ const ConstraintsCallback& callback);
+ void RemoveTrack(MediaStreamVideoTrack* track);
+
+ // Return true if |name| is a constraint supported by MediaStreamVideoSource.
+ static bool IsConstraintSupported(const std::string& name);
+
+ // Returns the MessageLoopProxy where video frames will be delivered on.
+ const scoped_refptr<base::MessageLoopProxy>& io_message_loop() const;
+
+ // Constraint keys used by a video source.
+ // Specified by draft-alvestrand-constraints-resolution-00b
+ static const char kMinAspectRatio[]; // minAspectRatio
+ static const char kMaxAspectRatio[]; // maxAspectRatio
+ static const char kMaxWidth[]; // maxWidth
+ static const char kMinWidth[]; // minWidthOnCaptureFormats
+ static const char kMaxHeight[]; // maxHeight
+ static const char kMinHeight[]; // minHeight
+ static const char kMaxFrameRate[]; // maxFrameRate
+ static const char kMinFrameRate[]; // minFrameRate
+
+ // Default resolution. If no constraints are specified and the delegate
+ // support it, this is the resolution that will be used.
+ static const int kDefaultWidth;
+ static const int kDefaultHeight;
+ static const int kDefaultFrameRate;
+
+ protected:
+ virtual void DoStopSource() OVERRIDE;
+
+ // Sets ready state and notifies the ready state to all registered tracks.
+ virtual void SetReadyState(blink::WebMediaStreamSource::ReadyState state);
+
+ // An implementation must fetch the formats that can currently be used by
+ // the source and call OnSupportedFormats when done.
+ // |max_requested_height| and |max_requested_width| is the max height and
+ // width set as a mandatory constraint if set when calling
+ // MediaStreamVideoSource::AddTrack. If max height and max width is not set
+ // |max_requested_height| and |max_requested_width| are 0.
+ virtual void GetCurrentSupportedFormats(
+ int max_requested_width,
+ int max_requested_height,
+ const VideoCaptureDeviceFormatsCB& callback) = 0;
+
+ // An implementation must start capture frames using the resolution in
+ // |params|. When the source has started or the source failed to start
+ // OnStartDone must be called. An implementation must call
+ // invoke |frame_callback| on the IO thread with the captured frames.
+ // TODO(perkj): pass a VideoCaptureFormats instead of VideoCaptureParams for
+ // subclasses to customize.
+ virtual void StartSourceImpl(
+ const media::VideoCaptureParams& params,
+ const VideoCaptureDeliverFrameCB& frame_callback) = 0;
+ void OnStartDone(bool success);
+
+ // An implementation must immediately stop capture video frames and must not
+ // call OnSupportedFormats after this method has been called. After this
+ // method has been called, MediaStreamVideoSource may be deleted.
+ virtual void StopSourceImpl() = 0;
+
+ enum State {
+ NEW,
+ RETRIEVING_CAPABILITIES,
+ STARTING,
+ STARTED,
+ ENDED
+ };
+ State state() const { return state_; }
+
+ private:
+ void OnSupportedFormats(const media::VideoCaptureFormats& formats);
+
+ // Finds the first constraints in |requested_constraints_| that can be
+ // fulfilled. |best_format| is set to the video resolution that can be
+ // fulfilled.
+ bool FindBestFormatWithConstraints(
+ const media::VideoCaptureFormats& formats,
+ media::VideoCaptureFormat* best_format);
+
+ // Trigger all cached callbacks from AddTrack. AddTrack is successful
+ // if the capture delegate has started and the constraints provided in
+ // AddTrack match the format that was used to start the device.
+ // Note that it must be ok to delete the MediaStreamVideoSource object
+ // in the context of the callback. If gUM fail, the implementation will
+ // simply drop the references to the blink source and track which will lead
+ // to that this object is deleted.
+ void FinalizeAddTrack();
+
+ State state_;
+
+ media::VideoCaptureFormat current_format_;
+
+ struct RequestedConstraints {
+ RequestedConstraints(MediaStreamVideoTrack* track,
+ const VideoCaptureDeliverFrameCB& frame_callback,
+ const blink::WebMediaConstraints& constraints,
+ const ConstraintsCallback& callback);
+ ~RequestedConstraints();
+
+ MediaStreamVideoTrack* track;
+ VideoCaptureDeliverFrameCB frame_callback;
+ blink::WebMediaConstraints constraints;
+ ConstraintsCallback callback;
+ };
+ std::vector<RequestedConstraints> requested_constraints_;
+
+ media::VideoCaptureFormats supported_formats_;
+
+ // |track_adapter_| delivers video frames to the tracks on the IO-thread.
+ scoped_refptr<VideoTrackAdapter> track_adapter_;
+
+ // Tracks that currently are connected to this source.
+ std::vector<MediaStreamVideoTrack*> tracks_;
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<MediaStreamVideoSource> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaStreamVideoSource);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_VIDEO_SOURCE_H_
diff --git a/chromium/content/renderer/media/media_stream_video_source_unittest.cc b/chromium/content/renderer/media/media_stream_video_source_unittest.cc
new file mode 100644
index 00000000000..d47fae5bdd6
--- /dev/null
+++ b/chromium/content/renderer/media/media_stream_video_source_unittest.cc
@@ -0,0 +1,694 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+#include "content/child/child_process.h"
+#include "content/renderer/media/media_stream_video_source.h"
+#include "content/renderer/media/media_stream_video_track.h"
+#include "content/renderer/media/mock_media_constraint_factory.h"
+#include "content/renderer/media/mock_media_stream_video_sink.h"
+#include "content/renderer/media/mock_media_stream_video_source.h"
+#include "media/base/video_frame.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace content {
+
+ACTION_P(RunClosure, closure) {
+ closure.Run();
+}
+
+class MediaStreamVideoSourceTest
+ : public ::testing::Test {
+ public:
+ MediaStreamVideoSourceTest()
+ : child_process_(new ChildProcess()),
+ number_of_successful_constraints_applied_(0),
+ number_of_failed_constraints_applied_(0),
+ mock_source_(new MockMediaStreamVideoSource(true)) {
+ media::VideoCaptureFormats formats;
+ formats.push_back(media::VideoCaptureFormat(
+ gfx::Size(1280, 720), 30, media::PIXEL_FORMAT_I420));
+ formats.push_back(media::VideoCaptureFormat(
+ gfx::Size(640, 480), 30, media::PIXEL_FORMAT_I420));
+ formats.push_back(media::VideoCaptureFormat(
+ gfx::Size(352, 288), 30, media::PIXEL_FORMAT_I420));
+ formats.push_back(media::VideoCaptureFormat(
+ gfx::Size(320, 240), 30, media::PIXEL_FORMAT_I420));
+ mock_source_->SetSupportedFormats(formats);
+ webkit_source_.initialize(base::UTF8ToUTF16("dummy_source_id"),
+ blink::WebMediaStreamSource::TypeVideo,
+ base::UTF8ToUTF16("dummy_source_name"));
+ webkit_source_.setExtraData(mock_source_);
+ }
+
+ protected:
+ // Create a track that's associated with |webkit_source_|.
+ blink::WebMediaStreamTrack CreateTrack(
+ const std::string& id,
+ const blink::WebMediaConstraints& constraints) {
+ bool enabled = true;
+ return MediaStreamVideoTrack::CreateVideoTrack(
+ mock_source_, constraints,
+ base::Bind(
+ &MediaStreamVideoSourceTest::OnConstraintsApplied,
+ base::Unretained(this)),
+ enabled);
+ }
+
+ blink::WebMediaStreamTrack CreateTrackAndStartSource(
+ const blink::WebMediaConstraints& constraints,
+ int expected_width,
+ int expected_height,
+ int expected_frame_rate) {
+ blink::WebMediaStreamTrack track = CreateTrack("123", constraints);
+
+ mock_source_->CompleteGetSupportedFormats();
+ const media::VideoCaptureParams& format = mock_source()->start_params();
+ EXPECT_EQ(expected_width, format.requested_format.frame_size.width());
+ EXPECT_EQ(expected_height, format.requested_format.frame_size.height());
+ EXPECT_EQ(expected_frame_rate, format.requested_format.frame_rate);
+
+ EXPECT_EQ(0, NumberOfSuccessConstraintsCallbacks());
+ mock_source_->StartMockedSource();
+ // Once the source has started successfully we expect that the
+ // ConstraintsCallback in MediaStreamSource::AddTrack completes.
+ EXPECT_EQ(1, NumberOfSuccessConstraintsCallbacks());
+ return track;
+ }
+
+ int NumberOfSuccessConstraintsCallbacks() const {
+ return number_of_successful_constraints_applied_;
+ }
+
+ int NumberOfFailedConstraintsCallbacks() const {
+ return number_of_failed_constraints_applied_;
+ }
+
+ MockMediaStreamVideoSource* mock_source() { return mock_source_; }
+
+ // Test that the source crops/scales to the requested width and
+ // height even though the camera delivers a larger frame.
+ void TestSourceCropFrame(int capture_width,
+ int capture_height,
+ const blink::WebMediaConstraints& constraints,
+ int expected_width,
+ int expected_height) {
+ // Expect the source to start capture with the supported resolution.
+ blink::WebMediaStreamTrack track =
+ CreateTrackAndStartSource(constraints, capture_width, capture_height,
+ 30);
+
+ MockMediaStreamVideoSink sink;
+ MediaStreamVideoSink::AddToVideoTrack(
+ &sink, sink.GetDeliverFrameCB(), track);
+ DeliverVideoFrameAndWaitForRenderer(capture_width, capture_height, &sink);
+ EXPECT_EQ(1, sink.number_of_frames());
+
+ // Expect the delivered frame to be cropped.
+ EXPECT_EQ(expected_height, sink.frame_size().height());
+ EXPECT_EQ(expected_width, sink.frame_size().width());
+ MediaStreamVideoSink::RemoveFromVideoTrack(&sink, track);
+ }
+
+ void DeliverVideoFrameAndWaitForRenderer(int width, int height,
+ MockMediaStreamVideoSink* sink) {
+ base::RunLoop run_loop;
+ base::Closure quit_closure = run_loop.QuitClosure();
+ EXPECT_CALL(*sink, OnVideoFrame()).WillOnce(
+ RunClosure(quit_closure));
+ scoped_refptr<media::VideoFrame> frame =
+ media::VideoFrame::CreateBlackFrame(gfx::Size(width, height));
+ mock_source()->DeliverVideoFrame(frame);
+ run_loop.Run();
+ }
+
+ void DeliverVideoFrameAndWaitForTwoRenderers(
+ int width,
+ int height,
+ MockMediaStreamVideoSink* sink1,
+ MockMediaStreamVideoSink* sink2) {
+ base::RunLoop run_loop;
+ base::Closure quit_closure = run_loop.QuitClosure();
+ EXPECT_CALL(*sink1, OnVideoFrame());
+ EXPECT_CALL(*sink2, OnVideoFrame()).WillOnce(
+ RunClosure(quit_closure));
+ scoped_refptr<media::VideoFrame> frame =
+ media::VideoFrame::CreateBlackFrame(gfx::Size(width, height));
+ mock_source()->DeliverVideoFrame(frame);
+ run_loop.Run();
+ }
+
+ void TestTwoTracksWithDifferentConstraints(
+ const blink::WebMediaConstraints& constraints1,
+ const blink::WebMediaConstraints& constraints2,
+ int capture_width,
+ int capture_height,
+ int expected_width1,
+ int expected_height1,
+ int expected_width2,
+ int expected_height2) {
+ blink::WebMediaStreamTrack track1 =
+ CreateTrackAndStartSource(constraints1, capture_width, capture_height,
+ MediaStreamVideoSource::kDefaultFrameRate);
+
+ blink::WebMediaStreamTrack track2 =
+ CreateTrack("dummy", constraints2);
+
+ MockMediaStreamVideoSink sink1;
+ MediaStreamVideoSink::AddToVideoTrack(&sink1, sink1.GetDeliverFrameCB(),
+ track1);
+ EXPECT_EQ(0, sink1.number_of_frames());
+
+ MockMediaStreamVideoSink sink2;
+ MediaStreamVideoSink::AddToVideoTrack(&sink2, sink2.GetDeliverFrameCB(),
+ track2);
+ EXPECT_EQ(0, sink2.number_of_frames());
+
+ DeliverVideoFrameAndWaitForTwoRenderers(capture_width,
+ capture_height,
+ &sink1,
+ &sink2);
+
+ EXPECT_EQ(1, sink1.number_of_frames());
+ EXPECT_EQ(expected_width1, sink1.frame_size().width());
+ EXPECT_EQ(expected_height1, sink1.frame_size().height());
+
+ EXPECT_EQ(1, sink2.number_of_frames());
+ EXPECT_EQ(expected_width2, sink2.frame_size().width());
+ EXPECT_EQ(expected_height2, sink2.frame_size().height());
+
+ MediaStreamVideoSink::RemoveFromVideoTrack(&sink1, track1);
+ MediaStreamVideoSink::RemoveFromVideoTrack(&sink2, track2);
+ }
+
+ void ReleaseTrackAndSourceOnAddTrackCallback(
+ const blink::WebMediaStreamTrack& track_to_release) {
+ track_to_release_ = track_to_release;
+ }
+
+ private:
+ void OnConstraintsApplied(MediaStreamSource* source, bool success) {
+ ASSERT_EQ(source, webkit_source_.extraData());
+
+ if (success)
+ ++number_of_successful_constraints_applied_;
+ else
+ ++number_of_failed_constraints_applied_;
+
+ if (!track_to_release_.isNull()) {
+ mock_source_ = NULL;
+ webkit_source_.reset();
+ track_to_release_.reset();
+ }
+ }
+ base::MessageLoopForUI message_loop_;
+ scoped_ptr<ChildProcess> child_process_;
+ blink::WebMediaStreamTrack track_to_release_;
+ int number_of_successful_constraints_applied_;
+ int number_of_failed_constraints_applied_;
+ blink::WebMediaStreamSource webkit_source_;
+ // |mock_source_| is owned by |webkit_source_|.
+ MockMediaStreamVideoSource* mock_source_;
+};
+
+TEST_F(MediaStreamVideoSourceTest, AddTrackAndStartSource) {
+ blink::WebMediaConstraints constraints;
+ constraints.initialize();
+ blink::WebMediaStreamTrack track = CreateTrack("123", constraints);
+ mock_source()->CompleteGetSupportedFormats();
+ mock_source()->StartMockedSource();
+ EXPECT_EQ(1, NumberOfSuccessConstraintsCallbacks());
+}
+
+TEST_F(MediaStreamVideoSourceTest, AddTwoTracksBeforeSourceStarts) {
+ blink::WebMediaConstraints constraints;
+ constraints.initialize();
+ blink::WebMediaStreamTrack track1 = CreateTrack("123", constraints);
+ mock_source()->CompleteGetSupportedFormats();
+ blink::WebMediaStreamTrack track2 = CreateTrack("123", constraints);
+ EXPECT_EQ(0, NumberOfSuccessConstraintsCallbacks());
+ mock_source()->StartMockedSource();
+ EXPECT_EQ(2, NumberOfSuccessConstraintsCallbacks());
+}
+
+TEST_F(MediaStreamVideoSourceTest, AddTrackAfterSourceStarts) {
+ blink::WebMediaConstraints constraints;
+ constraints.initialize();
+ blink::WebMediaStreamTrack track1 = CreateTrack("123", constraints);
+ mock_source()->CompleteGetSupportedFormats();
+ mock_source()->StartMockedSource();
+ EXPECT_EQ(1, NumberOfSuccessConstraintsCallbacks());
+ blink::WebMediaStreamTrack track2 = CreateTrack("123", constraints);
+ EXPECT_EQ(2, NumberOfSuccessConstraintsCallbacks());
+}
+
+TEST_F(MediaStreamVideoSourceTest, AddTrackAndFailToStartSource) {
+ blink::WebMediaConstraints constraints;
+ constraints.initialize();
+ blink::WebMediaStreamTrack track = CreateTrack("123", constraints);
+ mock_source()->CompleteGetSupportedFormats();
+ mock_source()->FailToStartMockedSource();
+ EXPECT_EQ(1, NumberOfFailedConstraintsCallbacks());
+}
+
+TEST_F(MediaStreamVideoSourceTest, AddTwoTracksBeforeGetSupportedFormats) {
+ blink::WebMediaConstraints constraints;
+ constraints.initialize();
+ blink::WebMediaStreamTrack track1 = CreateTrack("123", constraints);
+ blink::WebMediaStreamTrack track2 = CreateTrack("123", constraints);
+ mock_source()->CompleteGetSupportedFormats();
+ mock_source()->StartMockedSource();
+ EXPECT_EQ(2, NumberOfSuccessConstraintsCallbacks());
+}
+
+// Test that the capture output is CIF if we set max constraints to CIF.
+// and the capture device support CIF.
+TEST_F(MediaStreamVideoSourceTest, MandatoryConstraintCif5Fps) {
+ MockMediaConstraintFactory factory;
+ factory.AddMandatory(MediaStreamVideoSource::kMaxWidth, 352);
+ factory.AddMandatory(MediaStreamVideoSource::kMaxHeight, 288);
+ factory.AddMandatory(MediaStreamVideoSource::kMaxFrameRate, 5);
+
+ CreateTrackAndStartSource(factory.CreateWebMediaConstraints(), 352, 288, 5);
+}
+
+// Test that the capture output is 720P if the camera support it and the
+// optional constraint is set to 720P.
+TEST_F(MediaStreamVideoSourceTest, MandatoryMinVgaOptional720P) {
+ MockMediaConstraintFactory factory;
+ factory.AddMandatory(MediaStreamVideoSource::kMinWidth, 640);
+ factory.AddMandatory(MediaStreamVideoSource::kMinHeight, 480);
+ factory.AddOptional(MediaStreamVideoSource::kMinWidth, 1280);
+ factory.AddOptional(MediaStreamVideoSource::kMinAspectRatio,
+ 1280.0 / 720);
+
+ CreateTrackAndStartSource(factory.CreateWebMediaConstraints(), 1280, 720, 30);
+}
+
+// Test that the capture output have aspect ratio 4:3 if a mandatory constraint
+// require it even if an optional constraint request a higher resolution
+// that don't have this aspect ratio.
+TEST_F(MediaStreamVideoSourceTest, MandatoryAspectRatio4To3) {
+ MockMediaConstraintFactory factory;
+ factory.AddMandatory(MediaStreamVideoSource::kMinWidth, 640);
+ factory.AddMandatory(MediaStreamVideoSource::kMinHeight, 480);
+ factory.AddMandatory(MediaStreamVideoSource::kMaxAspectRatio,
+ 640.0 / 480);
+ factory.AddOptional(MediaStreamVideoSource::kMinWidth, 1280);
+
+ TestSourceCropFrame(1280, 720,
+ factory.CreateWebMediaConstraints(), 960, 720);
+}
+
+// Test that AddTrack succeeds if the mandatory min aspect ratio it set to 2.
+TEST_F(MediaStreamVideoSourceTest, MandatoryAspectRatio2) {
+ MockMediaConstraintFactory factory;
+ factory.AddMandatory(MediaStreamVideoSource::kMinAspectRatio, 2);
+
+ TestSourceCropFrame(MediaStreamVideoSource::kDefaultWidth,
+ MediaStreamVideoSource::kDefaultHeight,
+ factory.CreateWebMediaConstraints(), 640, 320);
+}
+
+TEST_F(MediaStreamVideoSourceTest, MinAspectRatioLargerThanMaxAspectRatio) {
+ MockMediaConstraintFactory factory;
+ factory.AddMandatory(MediaStreamVideoSource::kMinAspectRatio, 2);
+ factory.AddMandatory(MediaStreamVideoSource::kMaxAspectRatio, 1);
+ blink::WebMediaStreamTrack track = CreateTrack(
+ "123", factory.CreateWebMediaConstraints());
+ mock_source()->CompleteGetSupportedFormats();
+ EXPECT_EQ(1, NumberOfFailedConstraintsCallbacks());
+}
+
+TEST_F(MediaStreamVideoSourceTest, MaxAspectRatioZero) {
+ MockMediaConstraintFactory factory;
+ factory.AddOptional(MediaStreamVideoSource::kMaxAspectRatio, 0);
+ blink::WebMediaStreamTrack track = CreateTrack(
+ "123", factory.CreateWebMediaConstraints());
+ mock_source()->CompleteGetSupportedFormats();
+ EXPECT_EQ(1, NumberOfFailedConstraintsCallbacks());
+}
+
+TEST_F(MediaStreamVideoSourceTest, MinWidthLargerThanMaxWidth) {
+ MockMediaConstraintFactory factory;
+ factory.AddMandatory(MediaStreamVideoSource::kMinWidth, 640);
+ factory.AddMandatory(MediaStreamVideoSource::kMaxWidth, 320);
+ blink::WebMediaStreamTrack track = CreateTrack(
+ "123", factory.CreateWebMediaConstraints());
+ mock_source()->CompleteGetSupportedFormats();
+ EXPECT_EQ(1, NumberOfFailedConstraintsCallbacks());
+}
+
+TEST_F(MediaStreamVideoSourceTest, MinHeightLargerThanMaxHeight) {
+ MockMediaConstraintFactory factory;
+ factory.AddMandatory(MediaStreamVideoSource::kMinHeight, 480);
+ factory.AddMandatory(MediaStreamVideoSource::kMaxHeight, 360);
+ blink::WebMediaStreamTrack track = CreateTrack(
+ "123", factory.CreateWebMediaConstraints());
+ mock_source()->CompleteGetSupportedFormats();
+ EXPECT_EQ(1, NumberOfFailedConstraintsCallbacks());
+}
+
+// Test that its safe to release the last reference of a blink track and the
+// source during the callback if adding a track succeeds.
+TEST_F(MediaStreamVideoSourceTest, ReleaseTrackAndSourceOnSuccessCallBack) {
+ MockMediaConstraintFactory factory;
+ {
+ blink::WebMediaStreamTrack track =
+ CreateTrack("123", factory.CreateWebMediaConstraints());
+ ReleaseTrackAndSourceOnAddTrackCallback(track);
+ }
+ mock_source()->CompleteGetSupportedFormats();
+ mock_source()->StartMockedSource();
+ EXPECT_EQ(1, NumberOfSuccessConstraintsCallbacks());
+}
+
+// Test that its safe to release the last reference of a blink track and the
+// source during the callback if adding a track fails.
+TEST_F(MediaStreamVideoSourceTest, ReleaseTrackAndSourceOnFailureCallBack) {
+ MockMediaConstraintFactory factory;
+ factory.AddMandatory(MediaStreamVideoSource::kMinWidth, 99999);
+ {
+ blink::WebMediaStreamTrack track =
+ CreateTrack("123", factory.CreateWebMediaConstraints());
+ ReleaseTrackAndSourceOnAddTrackCallback(track);
+ }
+ mock_source()->CompleteGetSupportedFormats();
+ EXPECT_EQ(1, NumberOfFailedConstraintsCallbacks());
+}
+
+// Test that the source ignores an optional aspect ratio that is higher than
+// supported.
+TEST_F(MediaStreamVideoSourceTest, OptionalAspectRatioTooHigh) {
+ MockMediaConstraintFactory factory;
+ factory.AddOptional(MediaStreamVideoSource::kMinAspectRatio, 2);
+ blink::WebMediaStreamTrack track = CreateTrack(
+ "123", factory.CreateWebMediaConstraints());
+ mock_source()->CompleteGetSupportedFormats();
+
+ const media::VideoCaptureParams& params = mock_source()->start_params();
+ double aspect_ratio =
+ static_cast<double>(params.requested_format.frame_size.width()) /
+ params.requested_format.frame_size.height();
+ EXPECT_LT(aspect_ratio, 2);
+}
+
+// Test that the source starts video with the default resolution if the
+// that is the only supported.
+TEST_F(MediaStreamVideoSourceTest, DefaultCapability) {
+ media::VideoCaptureFormats formats;
+ formats.push_back(media::VideoCaptureFormat(
+ gfx::Size(MediaStreamVideoSource::kDefaultWidth,
+ MediaStreamVideoSource::kDefaultHeight),
+ MediaStreamVideoSource::kDefaultFrameRate,
+ media::PIXEL_FORMAT_I420));
+ mock_source()->SetSupportedFormats(formats);
+
+ blink::WebMediaConstraints constraints;
+ constraints.initialize();
+ CreateTrackAndStartSource(constraints,
+ MediaStreamVideoSource::kDefaultWidth,
+ MediaStreamVideoSource::kDefaultHeight,
+ 30);
+}
+
+TEST_F(MediaStreamVideoSourceTest, InvalidMandatoryConstraint) {
+ MockMediaConstraintFactory factory;
+ factory.AddMandatory("weird key", 640);
+ blink::WebMediaStreamTrack track = CreateTrack(
+ "123", factory.CreateWebMediaConstraints());
+ mock_source()->CompleteGetSupportedFormats();
+ EXPECT_EQ(1, NumberOfFailedConstraintsCallbacks());
+}
+
+// Test that the source ignores an unknown optional constraint.
+TEST_F(MediaStreamVideoSourceTest, InvalidOptionalConstraint) {
+ MockMediaConstraintFactory factory;
+ factory.AddOptional("weird key", 640);
+
+ CreateTrackAndStartSource(factory.CreateWebMediaConstraints(),
+ MediaStreamVideoSource::kDefaultWidth,
+ MediaStreamVideoSource::kDefaultHeight,
+ 30);
+}
+
+// Tests that the source starts video with the max width and height set by
+// constraints for screencast.
+TEST_F(MediaStreamVideoSourceTest, ScreencastResolutionWithConstraint) {
+ media::VideoCaptureFormats formats;
+ formats.push_back(media::VideoCaptureFormat(
+ gfx::Size(480, 270), 30, media::PIXEL_FORMAT_I420));
+ mock_source()->SetSupportedFormats(formats);
+ MockMediaConstraintFactory factory;
+ factory.AddMandatory(MediaStreamVideoSource::kMaxWidth, 480);
+ factory.AddMandatory(MediaStreamVideoSource::kMaxHeight, 270);
+
+ blink::WebMediaStreamTrack track = CreateTrackAndStartSource(
+ factory.CreateWebMediaConstraints(), 480, 270, 30);
+ EXPECT_EQ(480, mock_source()->max_requested_height());
+ EXPECT_EQ(270, mock_source()->max_requested_width());
+}
+
+// Test that optional constraints are applied in order.
+TEST_F(MediaStreamVideoSourceTest, OptionalConstraints) {
+ MockMediaConstraintFactory factory;
+ // Min width of 2056 pixels can not be fulfilled.
+ factory.AddOptional(MediaStreamVideoSource::kMinWidth, 2056);
+ factory.AddOptional(MediaStreamVideoSource::kMinWidth, 641);
+ // Since min width is set to 641 pixels, max width 640 can not be fulfilled.
+ factory.AddOptional(MediaStreamVideoSource::kMaxWidth, 640);
+ CreateTrackAndStartSource(factory.CreateWebMediaConstraints(), 1280, 720, 30);
+}
+
+// Test that the source crops to the requested max width and
+// height even though the camera delivers a larger frame.
+TEST_F(MediaStreamVideoSourceTest, DeliverCroppedVideoFrameOptional640360) {
+ MockMediaConstraintFactory factory;
+ factory.AddOptional(MediaStreamVideoSource::kMaxWidth, 640);
+ factory.AddOptional(MediaStreamVideoSource::kMaxHeight, 360);
+ TestSourceCropFrame(640, 480, factory.CreateWebMediaConstraints(), 640, 360);
+}
+
+TEST_F(MediaStreamVideoSourceTest, DeliverCroppedVideoFrameMandatory640360) {
+ MockMediaConstraintFactory factory;
+ factory.AddMandatory(MediaStreamVideoSource::kMaxWidth, 640);
+ factory.AddMandatory(MediaStreamVideoSource::kMaxHeight, 360);
+ TestSourceCropFrame(640, 480, factory.CreateWebMediaConstraints(), 640, 360);
+}
+
+TEST_F(MediaStreamVideoSourceTest, DeliverCroppedVideoFrameMandatory732489) {
+ MockMediaConstraintFactory factory;
+ factory.AddMandatory(MediaStreamVideoSource::kMaxWidth, 732);
+ factory.AddMandatory(MediaStreamVideoSource::kMaxHeight, 489);
+ factory.AddMandatory(MediaStreamVideoSource::kMinWidth, 732);
+ factory.AddMandatory(MediaStreamVideoSource::kMinWidth, 489);
+ TestSourceCropFrame(1280, 720, factory.CreateWebMediaConstraints(), 732, 489);
+}
+
+// Test that the source crops to the requested max width and
+// height even though the requested frame has odd size.
+TEST_F(MediaStreamVideoSourceTest, DeliverCroppedVideoFrame637359) {
+ MockMediaConstraintFactory factory;
+ factory.AddOptional(MediaStreamVideoSource::kMaxWidth, 637);
+ factory.AddOptional(MediaStreamVideoSource::kMaxHeight, 359);
+ TestSourceCropFrame(640, 480, factory.CreateWebMediaConstraints(), 637, 359);
+}
+
+TEST_F(MediaStreamVideoSourceTest, DeliverCroppedVideoFrame320320) {
+ MockMediaConstraintFactory factory;
+ factory.AddMandatory(MediaStreamVideoSource::kMaxWidth, 320);
+ factory.AddMandatory(MediaStreamVideoSource::kMaxHeight, 320);
+ factory.AddMandatory(MediaStreamVideoSource::kMinHeight, 320);
+ factory.AddMandatory(MediaStreamVideoSource::kMinWidth, 320);
+ TestSourceCropFrame(640, 480, factory.CreateWebMediaConstraints(), 320, 320);
+}
+
+TEST_F(MediaStreamVideoSourceTest, DeliverSmallerSizeWhenTooLargeMax) {
+ MockMediaConstraintFactory factory;
+ factory.AddOptional(MediaStreamVideoSource::kMaxWidth, 1920);
+ factory.AddOptional(MediaStreamVideoSource::kMaxHeight, 1080);
+ factory.AddOptional(MediaStreamVideoSource::kMinWidth, 1280);
+ factory.AddOptional(MediaStreamVideoSource::kMinHeight, 720);
+ TestSourceCropFrame(1280, 720, factory.CreateWebMediaConstraints(),
+ 1280, 720);
+}
+
+TEST_F(MediaStreamVideoSourceTest, TwoTracksWithVGAAndWVGA) {
+ MockMediaConstraintFactory factory1;
+ factory1.AddOptional(MediaStreamVideoSource::kMaxWidth, 640);
+ factory1.AddOptional(MediaStreamVideoSource::kMaxHeight, 480);
+
+ MockMediaConstraintFactory factory2;
+ factory2.AddOptional(MediaStreamVideoSource::kMaxHeight, 360);
+
+ TestTwoTracksWithDifferentConstraints(factory1.CreateWebMediaConstraints(),
+ factory2.CreateWebMediaConstraints(),
+ 640, 480,
+ 640, 480,
+ 640, 360);
+}
+
+TEST_F(MediaStreamVideoSourceTest, TwoTracksWith720AndWVGA) {
+ MockMediaConstraintFactory factory1;
+ factory1.AddOptional(MediaStreamVideoSource::kMinWidth, 1280);
+ factory1.AddOptional(MediaStreamVideoSource::kMinHeight, 720);
+
+
+ MockMediaConstraintFactory factory2;
+ factory2.AddMandatory(MediaStreamVideoSource::kMaxWidth, 640);
+ factory2.AddMandatory(MediaStreamVideoSource::kMaxHeight, 360);
+
+ TestTwoTracksWithDifferentConstraints(factory1.CreateWebMediaConstraints(),
+ factory2.CreateWebMediaConstraints(),
+ 1280, 720,
+ 1280, 720,
+ 640, 360);
+}
+
+TEST_F(MediaStreamVideoSourceTest, TwoTracksWith720AndW700H700) {
+ MockMediaConstraintFactory factory1;
+ factory1.AddOptional(MediaStreamVideoSource::kMinWidth, 1280);
+ factory1.AddOptional(MediaStreamVideoSource::kMinHeight, 720);
+
+ MockMediaConstraintFactory factory2;
+ factory2.AddMandatory(MediaStreamVideoSource::kMaxWidth, 700);
+ factory2.AddMandatory(MediaStreamVideoSource::kMaxHeight, 700);
+
+ TestTwoTracksWithDifferentConstraints(factory1.CreateWebMediaConstraints(),
+ factory2.CreateWebMediaConstraints(),
+ 1280, 720,
+ 1280, 720,
+ 700, 700);
+}
+
+TEST_F(MediaStreamVideoSourceTest, TwoTracksWith720AndMaxAspectRatio4To3) {
+ MockMediaConstraintFactory factory1;
+ factory1.AddOptional(MediaStreamVideoSource::kMinWidth, 1280);
+ factory1.AddOptional(MediaStreamVideoSource::kMinHeight, 720);
+
+ MockMediaConstraintFactory factory2;
+ factory2.AddMandatory(MediaStreamVideoSource::kMaxAspectRatio, 640.0 / 480);
+
+ TestTwoTracksWithDifferentConstraints(factory1.CreateWebMediaConstraints(),
+ factory2.CreateWebMediaConstraints(),
+ 1280, 720,
+ 1280, 720,
+ 960, 720);
+}
+
+TEST_F(MediaStreamVideoSourceTest, TwoTracksWithVgaAndMinAspectRatio) {
+ MockMediaConstraintFactory factory1;
+ factory1.AddOptional(MediaStreamVideoSource::kMaxWidth, 640);
+ factory1.AddOptional(MediaStreamVideoSource::kMaxHeight, 480);
+
+ MockMediaConstraintFactory factory2;
+ factory2.AddMandatory(MediaStreamVideoSource::kMinAspectRatio, 640.0 / 360);
+
+ TestTwoTracksWithDifferentConstraints(factory1.CreateWebMediaConstraints(),
+ factory2.CreateWebMediaConstraints(),
+ 640, 480,
+ 640, 480,
+ 640, 360);
+}
+
+// Test that a source can change the frame resolution on the fly and that
+// tracks sinks get the new frame size unless constraints force the frame to be
+// cropped.
+TEST_F(MediaStreamVideoSourceTest, SourceChangeFrameSize) {
+ MockMediaConstraintFactory factory;
+ factory.AddOptional(MediaStreamVideoSource::kMaxWidth, 800);
+ factory.AddOptional(MediaStreamVideoSource::kMaxHeight, 700);
+
+ // Expect the source to start capture with the supported resolution.
+ blink::WebMediaStreamTrack track =
+ CreateTrackAndStartSource(factory.CreateWebMediaConstraints(),
+ 640, 480, 30);
+
+ MockMediaStreamVideoSink sink;
+ MediaStreamVideoSink::AddToVideoTrack(
+ &sink, sink.GetDeliverFrameCB(), track);
+ EXPECT_EQ(0, sink.number_of_frames());
+ DeliverVideoFrameAndWaitForRenderer(320, 240, &sink);
+ EXPECT_EQ(1, sink.number_of_frames());
+ // Expect the delivered frame to be passed unchanged since its smaller than
+ // max requested.
+ EXPECT_EQ(320, sink.frame_size().width());
+ EXPECT_EQ(240, sink.frame_size().height());
+
+ DeliverVideoFrameAndWaitForRenderer(640, 480, &sink);
+ EXPECT_EQ(2, sink.number_of_frames());
+ // Expect the delivered frame to be passed unchanged since its smaller than
+ // max requested.
+ EXPECT_EQ(640, sink.frame_size().width());
+ EXPECT_EQ(480, sink.frame_size().height());
+
+ DeliverVideoFrameAndWaitForRenderer(1280, 720, &sink);
+
+ EXPECT_EQ(3, sink.number_of_frames());
+ // Expect a frame to be cropped since its larger than max requested.
+ EXPECT_EQ(800, sink.frame_size().width());
+ EXPECT_EQ(700, sink.frame_size().height());
+
+ MediaStreamVideoSink::RemoveFromVideoTrack(&sink, track);
+}
+
+TEST_F(MediaStreamVideoSourceTest, IsConstraintSupported) {
+ EXPECT_TRUE(MediaStreamVideoSource::IsConstraintSupported(
+ MediaStreamVideoSource::kMaxFrameRate));
+ EXPECT_TRUE(MediaStreamVideoSource::IsConstraintSupported(
+ MediaStreamVideoSource::kMinFrameRate));
+ EXPECT_TRUE(MediaStreamVideoSource::IsConstraintSupported(
+ MediaStreamVideoSource::kMaxWidth));
+ EXPECT_TRUE(MediaStreamVideoSource::IsConstraintSupported(
+ MediaStreamVideoSource::kMinWidth));
+ EXPECT_TRUE(MediaStreamVideoSource::IsConstraintSupported(
+ MediaStreamVideoSource::kMaxHeight));
+ EXPECT_TRUE(MediaStreamVideoSource::IsConstraintSupported(
+ MediaStreamVideoSource::kMinHeight));
+ EXPECT_TRUE(MediaStreamVideoSource::IsConstraintSupported(
+ MediaStreamVideoSource::kMaxAspectRatio));
+ EXPECT_TRUE(MediaStreamVideoSource::IsConstraintSupported(
+ MediaStreamVideoSource::kMinAspectRatio));
+
+ EXPECT_FALSE(MediaStreamVideoSource::IsConstraintSupported(
+ "something unsupported"));
+}
+
+// Test that the constraint negotiation can handle 0.0 fps as frame rate.
+TEST_F(MediaStreamVideoSourceTest, Use0FpsSupportedFormat) {
+ media::VideoCaptureFormats formats;
+ formats.push_back(media::VideoCaptureFormat(
+ gfx::Size(640, 480), 0.0f, media::PIXEL_FORMAT_I420));
+ formats.push_back(media::VideoCaptureFormat(
+ gfx::Size(320, 240), 0.0f, media::PIXEL_FORMAT_I420));
+ mock_source()->SetSupportedFormats(formats);
+
+ blink::WebMediaConstraints constraints;
+ constraints.initialize();
+ blink::WebMediaStreamTrack track = CreateTrack("123", constraints);
+ mock_source()->CompleteGetSupportedFormats();
+ mock_source()->StartMockedSource();
+ EXPECT_EQ(1, NumberOfSuccessConstraintsCallbacks());
+
+ MockMediaStreamVideoSink sink;
+ MediaStreamVideoSink::AddToVideoTrack(
+ &sink, sink.GetDeliverFrameCB(), track);
+ EXPECT_EQ(0, sink.number_of_frames());
+ DeliverVideoFrameAndWaitForRenderer(320, 240, &sink);
+ EXPECT_EQ(1, sink.number_of_frames());
+ // Expect the delivered frame to be passed unchanged since its smaller than
+ // max requested.
+ EXPECT_EQ(320, sink.frame_size().width());
+ EXPECT_EQ(240, sink.frame_size().height());
+ MediaStreamVideoSink::RemoveFromVideoTrack(&sink, track);
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_video_track.cc b/chromium/content/renderer/media/media_stream_video_track.cc
index 44962445161..0ad7159255d 100644
--- a/chromium/content/renderer/media/media_stream_video_track.cc
+++ b/chromium/content/renderer/media/media_stream_video_track.cc
@@ -4,44 +4,172 @@
#include "content/renderer/media/media_stream_video_track.h"
-#include "content/renderer/media/webrtc/webrtc_video_sink_adapter.h"
+#include "base/bind.h"
+#include "content/renderer/media/video_frame_deliverer.h"
+#include "media/base/bind_to_current_loop.h"
namespace content {
-// Wrapper which allows to use std::find_if() when adding and removing
-// sinks to/from |sinks_|.
-struct SinkWrapper {
- explicit SinkWrapper(MediaStreamVideoSink* sink) : sink_(sink) {}
- bool operator()(
- const WebRtcVideoSinkAdapter* owner) {
- return owner->sink() == sink_;
+// Helper class used for delivering video frames to MediaStreamSinks on the
+// IO-thread.
+// Frames are delivered to an instance of this class from a
+// MediaStreamVideoSource on the IO-thread to the method DeliverFrameOnIO.
+// Frames are only delivered to the sinks if the track is enabled.
+class MediaStreamVideoTrack::FrameDeliverer : public VideoFrameDeliverer {
+ public:
+ FrameDeliverer(
+ const scoped_refptr<base::MessageLoopProxy>& io_message_loop_proxy,
+ bool enabled)
+ : VideoFrameDeliverer(io_message_loop_proxy),
+ enabled_(enabled) {
}
- MediaStreamVideoSink* sink_;
+
+ // Add |sink| to receive state changes on the main render thread.
+ // Video frames will be delivered to |callback| on the IO thread.
+ void AddSink(MediaStreamVideoSink* sink,
+ const VideoCaptureDeliverFrameCB& callback) {
+ DCHECK(thread_checker().CalledOnValidThread());
+ DCHECK(std::find(sinks_.begin(), sinks_.end(), sink) == sinks_.end());
+ sinks_.push_back(sink);
+ AddCallback(sink, callback);
+ }
+
+ void RemoveSink(MediaStreamVideoSink* sink) {
+ DCHECK(thread_checker().CalledOnValidThread());
+ std::vector<MediaStreamVideoSink*>::iterator it =
+ std::find(sinks_.begin(), sinks_.end(), sink);
+ DCHECK(it != sinks_.end());
+ sinks_.erase(it);
+ RemoveCallback(sink);
+ }
+
+ void SetEnabled(bool enabled) {
+ DCHECK(thread_checker().CalledOnValidThread());
+ io_message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&MediaStreamVideoTrack::FrameDeliverer::SetEnabledOnIO,
+ this, enabled));
+ }
+
+ virtual void DeliverFrameOnIO(
+ const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format,
+ const base::TimeTicks& estimated_capture_time) OVERRIDE {
+ DCHECK(io_message_loop()->BelongsToCurrentThread());
+ if (!enabled_)
+ return;
+ VideoFrameDeliverer::DeliverFrameOnIO(frame, format,
+ estimated_capture_time);
+ }
+
+ const std::vector<MediaStreamVideoSink*>& sinks() const { return sinks_; }
+
+ protected:
+ virtual ~FrameDeliverer() {
+ DCHECK(sinks_.empty());
+ }
+
+ void SetEnabledOnIO(bool enabled) {
+ DCHECK(io_message_loop()->BelongsToCurrentThread());
+ enabled_ = enabled;
+ }
+
+ private:
+ // The below members are used on the main render thread.
+ std::vector<MediaStreamVideoSink*> sinks_;
+
+ // The below parameters are used on the IO-thread.
+ bool enabled_;
+
+ DISALLOW_COPY_AND_ASSIGN(FrameDeliverer);
};
-MediaStreamVideoTrack::MediaStreamVideoTrack(webrtc::VideoTrackInterface* track,
- bool is_local_track)
- : MediaStreamTrackExtraData(track, is_local_track),
- video_track_(track) {
+// static
+blink::WebMediaStreamTrack MediaStreamVideoTrack::CreateVideoTrack(
+ MediaStreamVideoSource* source,
+ const blink::WebMediaConstraints& constraints,
+ const MediaStreamVideoSource::ConstraintsCallback& callback,
+ bool enabled) {
+ blink::WebMediaStreamTrack track;
+ track.initialize(source->owner());
+ track.setExtraData(new MediaStreamVideoTrack(source,
+ constraints,
+ callback,
+ enabled));
+ return track;
+}
+
+// static
+MediaStreamVideoTrack* MediaStreamVideoTrack::GetVideoTrack(
+ const blink::WebMediaStreamTrack& track) {
+ return static_cast<MediaStreamVideoTrack*>(track.extraData());
+}
+
+MediaStreamVideoTrack::MediaStreamVideoTrack(
+ MediaStreamVideoSource* source,
+ const blink::WebMediaConstraints& constraints,
+ const MediaStreamVideoSource::ConstraintsCallback& callback,
+ bool enabled)
+ : MediaStreamTrack(NULL, true),
+ frame_deliverer_(
+ new MediaStreamVideoTrack::FrameDeliverer(source->io_message_loop(),
+ enabled)),
+ constraints_(constraints),
+ source_(source) {
+ DCHECK(!constraints.isNull());
+ source->AddTrack(this,
+ base::Bind(
+ &MediaStreamVideoTrack::FrameDeliverer::DeliverFrameOnIO,
+ frame_deliverer_),
+ constraints, callback);
}
MediaStreamVideoTrack::~MediaStreamVideoTrack() {
- DCHECK(sinks_.empty());
+ DCHECK(thread_checker_.CalledOnValidThread());
+ Stop();
+ DVLOG(3) << "~MediaStreamVideoTrack()";
}
-void MediaStreamVideoTrack::AddSink(MediaStreamVideoSink* sink) {
+void MediaStreamVideoTrack::AddSink(
+ MediaStreamVideoSink* sink, const VideoCaptureDeliverFrameCB& callback) {
DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(std::find_if(sinks_.begin(), sinks_.end(),
- SinkWrapper(sink)) == sinks_.end());
- sinks_.push_back(new WebRtcVideoSinkAdapter(video_track_, sink));
+ frame_deliverer_->AddSink(sink, callback);
}
void MediaStreamVideoTrack::RemoveSink(MediaStreamVideoSink* sink) {
DCHECK(thread_checker_.CalledOnValidThread());
- ScopedVector<WebRtcVideoSinkAdapter>::iterator it =
- std::find_if(sinks_.begin(), sinks_.end(), SinkWrapper(sink));
- DCHECK(it != sinks_.end());
- sinks_.erase(it);
+ frame_deliverer_->RemoveSink(sink);
+}
+
+void MediaStreamVideoTrack::SetEnabled(bool enabled) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ MediaStreamTrack::SetEnabled(enabled);
+
+ frame_deliverer_->SetEnabled(enabled);
+ const std::vector<MediaStreamVideoSink*>& sinks = frame_deliverer_->sinks();
+ for (std::vector<MediaStreamVideoSink*>::const_iterator it = sinks.begin();
+ it != sinks.end(); ++it) {
+ (*it)->OnEnabledChanged(enabled);
+ }
+}
+
+void MediaStreamVideoTrack::Stop() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (source_) {
+ source_->RemoveTrack(this);
+ source_ = NULL;
+ }
+ OnReadyStateChanged(blink::WebMediaStreamSource::ReadyStateEnded);
+}
+
+void MediaStreamVideoTrack::OnReadyStateChanged(
+ blink::WebMediaStreamSource::ReadyState state) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ const std::vector<MediaStreamVideoSink*>& sinks = frame_deliverer_->sinks();
+ for (std::vector<MediaStreamVideoSink*>::const_iterator it = sinks.begin();
+ it != sinks.end(); ++it) {
+ (*it)->OnReadyStateChanged(state);
+ }
}
} // namespace content
diff --git a/chromium/content/renderer/media/media_stream_video_track.h b/chromium/content/renderer/media/media_stream_video_track.h
index 410a8f9a40b..040686c5828 100644
--- a/chromium/content/renderer/media/media_stream_video_track.h
+++ b/chromium/content/renderer/media/media_stream_video_track.h
@@ -5,41 +5,92 @@
#ifndef CONTENT_RENDERER_MEDIA_MEDIA_STREAM_VIDEO_TRACK_H_
#define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_VIDEO_TRACK_H_
+#include <vector>
+
#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
#include "base/memory/scoped_vector.h"
#include "base/threading/thread_checker.h"
#include "content/common/content_export.h"
#include "content/public/renderer/media_stream_video_sink.h"
-#include "content/renderer/media/media_stream_track_extra_data.h"
-
-namespace webrtc {
-class VideoTrackInterface;
-}
+#include "content/renderer/media/media_stream_track.h"
+#include "content/renderer/media/media_stream_video_source.h"
namespace content {
-class WebRtcVideoSinkAdapter;
-
// MediaStreamVideoTrack is a video specific representation of a
// blink::WebMediaStreamTrack in content. It is owned by the blink object
// and can be retrieved from a blink object using
-// WebMediaStreamTrack::extraData()
-class CONTENT_EXPORT MediaStreamVideoTrack : public MediaStreamTrackExtraData {
+// WebMediaStreamTrack::extraData() or MediaStreamVideoTrack::GetVideoTrack.
+class CONTENT_EXPORT MediaStreamVideoTrack : public MediaStreamTrack {
public:
- MediaStreamVideoTrack(webrtc::VideoTrackInterface* track,
- bool is_local_track);
+ // Help method to create a blink::WebMediaStreamTrack and a
+ // MediaStreamVideoTrack instance. The MediaStreamVideoTrack object is owned
+ // by the blink object in its WebMediaStreamTrack::ExtraData member.
+ // |callback| is triggered if the track is added to the source
+ // successfully and will receive video frames that match |constraints|
+ // or if the source fail to provide video frames.
+ // If |enabled| is true, sinks added to the track will
+ // receive video frames when the source deliver frames to the track.
+ static blink::WebMediaStreamTrack CreateVideoTrack(
+ MediaStreamVideoSource* source,
+ const blink::WebMediaConstraints& constraints,
+ const MediaStreamVideoSource::ConstraintsCallback& callback,
+ bool enabled);
+
+ static MediaStreamVideoTrack* GetVideoTrack(
+ const blink::WebMediaStreamTrack& track);
+
+ // Constructor for local video tracks.
+ MediaStreamVideoTrack(
+ MediaStreamVideoSource* source,
+ const blink::WebMediaConstraints& constraints,
+ const MediaStreamVideoSource::ConstraintsCallback& callback,
+ bool enabled);
virtual ~MediaStreamVideoTrack();
- void AddSink(MediaStreamVideoSink* sink);
- void RemoveSink(MediaStreamVideoSink* sink);
- private:
+ virtual void SetEnabled(bool enabled) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+
+ void OnReadyStateChanged(blink::WebMediaStreamSource::ReadyState state);
+
+ const blink::WebMediaConstraints& constraints() const {
+ return constraints_;
+ }
+
+ protected:
// Used to DCHECK that we are called on the correct thread.
base::ThreadChecker thread_checker_;
- // The webrtc video track.
- // TODO(perkj): Make this class independent of webrtc as part of project
- // Piranha Plant.
- webrtc::VideoTrackInterface* video_track_;
- ScopedVector<WebRtcVideoSinkAdapter> sinks_;
+
+ private:
+ // MediaStreamVideoSink is a friend to allow it to call AddSink() and
+ // RemoveSink().
+ friend class MediaStreamVideoSink;
+ FRIEND_TEST_ALL_PREFIXES(MediaStreamRemoteVideoSourceTest, StartTrack);
+ FRIEND_TEST_ALL_PREFIXES(MediaStreamRemoteVideoSourceTest, RemoteTrackStop);
+ FRIEND_TEST_ALL_PREFIXES(VideoDestinationHandlerTest, PutFrame);
+
+ // Add |sink| to receive state changes on the main render thread and video
+ // frames in the |callback| method on the IO-thread.
+ // |callback| will be reset on the render thread.
+ // These two methods are private such that no subclass can intercept and
+ // store the callback. This is important to ensure that we can release
+ // the callback on render thread without reference to it on the IO-thread.
+ void AddSink(MediaStreamVideoSink* sink,
+ const VideoCaptureDeliverFrameCB& callback);
+ void RemoveSink(MediaStreamVideoSink* sink);
+
+ // |FrameDeliverer| is an internal helper object used for delivering video
+ // frames on the IO-thread using callbacks to all registered tracks.
+ class FrameDeliverer;
+ scoped_refptr<FrameDeliverer> frame_deliverer_;
+
+ blink::WebMediaConstraints constraints_;
+
+ // Weak ref to the source this tracks is connected to. |source_| is owned
+ // by the blink::WebMediaStreamSource and is guaranteed to outlive the
+ // track.
+ MediaStreamVideoSource* source_;
DISALLOW_COPY_AND_ASSIGN(MediaStreamVideoTrack);
};
diff --git a/chromium/content/renderer/media/media_stream_video_track_unittest.cc b/chromium/content/renderer/media/media_stream_video_track_unittest.cc
new file mode 100644
index 00000000000..9b281a67d15
--- /dev/null
+++ b/chromium/content/renderer/media/media_stream_video_track_unittest.cc
@@ -0,0 +1,232 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread_checker_impl.h"
+#include "content/child/child_process.h"
+#include "content/renderer/media/media_stream_video_track.h"
+#include "content/renderer/media/mock_media_stream_video_sink.h"
+#include "content/renderer/media/mock_media_stream_video_source.h"
+#include "media/base/video_frame.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace content {
+
+ACTION_P(RunClosure, closure) {
+ closure.Run();
+}
+
+class MediaStreamVideoTrackTest : public ::testing::Test {
+ public:
+ MediaStreamVideoTrackTest()
+ : child_process_(new ChildProcess()),
+ mock_source_(new MockMediaStreamVideoSource(false)),
+ source_started_(false) {
+ blink_source_.initialize(base::UTF8ToUTF16("dummy_source_id"),
+ blink::WebMediaStreamSource::TypeVideo,
+ base::UTF8ToUTF16("dummy_source_name"));
+ blink_source_.setExtraData(mock_source_);
+ }
+
+ virtual ~MediaStreamVideoTrackTest() {
+ }
+
+ void DeliverVideoFrameAndWaitForRenderer(MockMediaStreamVideoSink* sink) {
+ base::RunLoop run_loop;
+ base::Closure quit_closure = run_loop.QuitClosure();
+ EXPECT_CALL(*sink, OnVideoFrame()).WillOnce(
+ RunClosure(quit_closure));
+ scoped_refptr<media::VideoFrame> frame =
+ media::VideoFrame::CreateBlackFrame(
+ gfx::Size(MediaStreamVideoSource::kDefaultWidth,
+ MediaStreamVideoSource::kDefaultHeight));
+ mock_source()->DeliverVideoFrame(frame);
+ run_loop.Run();
+ }
+
+ protected:
+ base::MessageLoop* io_message_loop() const {
+ return child_process_->io_message_loop();
+ }
+
+ // Create a track that's associated with |mock_source_|.
+ blink::WebMediaStreamTrack CreateTrack() {
+ blink::WebMediaConstraints constraints;
+ constraints.initialize();
+ bool enabled = true;
+ blink::WebMediaStreamTrack track =
+ MediaStreamVideoTrack::CreateVideoTrack(
+ mock_source_, constraints,
+ MediaStreamSource::ConstraintsCallback(), enabled);
+ if (!source_started_) {
+ mock_source_->StartMockedSource();
+ source_started_ = true;
+ }
+ return track;
+ }
+
+ MockMediaStreamVideoSource* mock_source() { return mock_source_; }
+ const blink::WebMediaStreamSource& blink_source() const {
+ return blink_source_;
+ }
+
+ private:
+ base::MessageLoopForUI message_loop_;
+ scoped_ptr<ChildProcess> child_process_;
+ blink::WebMediaStreamSource blink_source_;
+ // |mock_source_| is owned by |webkit_source_|.
+ MockMediaStreamVideoSource* mock_source_;
+ bool source_started_;
+};
+
+TEST_F(MediaStreamVideoTrackTest, AddAndRemoveSink) {
+ MockMediaStreamVideoSink sink;
+ blink::WebMediaStreamTrack track = CreateTrack();
+ MediaStreamVideoSink::AddToVideoTrack(
+ &sink, sink.GetDeliverFrameCB(), track);
+
+ DeliverVideoFrameAndWaitForRenderer(&sink);
+ EXPECT_EQ(1, sink.number_of_frames());
+
+ DeliverVideoFrameAndWaitForRenderer(&sink);
+
+ MediaStreamVideoSink::RemoveFromVideoTrack(&sink, track);
+
+ scoped_refptr<media::VideoFrame> frame =
+ media::VideoFrame::CreateBlackFrame(
+ gfx::Size(MediaStreamVideoSource::kDefaultWidth,
+ MediaStreamVideoSource::kDefaultHeight));
+ mock_source()->DeliverVideoFrame(frame);
+ // Wait for the IO thread to complete delivering frames.
+ io_message_loop()->RunUntilIdle();
+ EXPECT_EQ(2, sink.number_of_frames());
+}
+
+class CheckThreadHelper {
+ public:
+ CheckThreadHelper(base::Closure callback, bool* correct)
+ : callback_(callback),
+ correct_(correct) {
+ }
+
+ ~CheckThreadHelper() {
+ *correct_ = thread_checker_.CalledOnValidThread();
+ callback_.Run();
+ }
+
+ private:
+ base::Closure callback_;
+ bool* correct_;
+ base::ThreadCheckerImpl thread_checker_;
+};
+
+void CheckThreadVideoFrameReceiver(
+ CheckThreadHelper* helper,
+ const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format,
+ const base::TimeTicks& estimated_capture_time) {
+ // Do nothing.
+}
+
+// Checks that the callback given to the track is reset on the right thread.
+TEST_F(MediaStreamVideoTrackTest, ResetCallbackOnThread) {
+ MockMediaStreamVideoSink sink;
+ blink::WebMediaStreamTrack track = CreateTrack();
+
+ base::RunLoop run_loop;
+ bool correct = false;
+ MediaStreamVideoSink::AddToVideoTrack(
+ &sink,
+ base::Bind(
+ &CheckThreadVideoFrameReceiver,
+ base::Owned(new CheckThreadHelper(run_loop.QuitClosure(), &correct))),
+ track);
+ MediaStreamVideoSink::RemoveFromVideoTrack(&sink, track);
+ run_loop.Run();
+ EXPECT_TRUE(correct) << "Not called on correct thread.";
+}
+
+TEST_F(MediaStreamVideoTrackTest, SetEnabled) {
+ MockMediaStreamVideoSink sink;
+ blink::WebMediaStreamTrack track = CreateTrack();
+ MediaStreamVideoSink::AddToVideoTrack(
+ &sink, sink.GetDeliverFrameCB(), track);
+
+ MediaStreamVideoTrack* video_track =
+ MediaStreamVideoTrack::GetVideoTrack(track);
+
+ DeliverVideoFrameAndWaitForRenderer(&sink);
+ EXPECT_EQ(1, sink.number_of_frames());
+
+ video_track->SetEnabled(false);
+ EXPECT_FALSE(sink.enabled());
+
+ scoped_refptr<media::VideoFrame> frame =
+ media::VideoFrame::CreateBlackFrame(
+ gfx::Size(MediaStreamVideoSource::kDefaultWidth,
+ MediaStreamVideoSource::kDefaultHeight));
+ mock_source()->DeliverVideoFrame(frame);
+ // Wait for the IO thread to complete delivering frames.
+ io_message_loop()->RunUntilIdle();
+ EXPECT_EQ(1, sink.number_of_frames());
+
+ video_track->SetEnabled(true);
+ EXPECT_TRUE(sink.enabled());
+ mock_source()->DeliverVideoFrame(frame);
+ DeliverVideoFrameAndWaitForRenderer(&sink);
+ EXPECT_EQ(2, sink.number_of_frames());
+ MediaStreamVideoSink::RemoveFromVideoTrack(&sink, track);
+}
+
+TEST_F(MediaStreamVideoTrackTest, SourceStopped) {
+ MockMediaStreamVideoSink sink;
+ blink::WebMediaStreamTrack track = CreateTrack();
+ MediaStreamVideoSink::AddToVideoTrack(
+ &sink, sink.GetDeliverFrameCB(), track);
+ EXPECT_EQ(blink::WebMediaStreamSource::ReadyStateLive, sink.state());
+
+ mock_source()->StopSource();
+ EXPECT_EQ(blink::WebMediaStreamSource::ReadyStateEnded, sink.state());
+ MediaStreamVideoSink::RemoveFromVideoTrack(&sink, track);
+}
+
+TEST_F(MediaStreamVideoTrackTest, StopLastTrack) {
+ MockMediaStreamVideoSink sink1;
+ blink::WebMediaStreamTrack track1 = CreateTrack();
+ MediaStreamVideoSink::AddToVideoTrack(
+ &sink1, sink1.GetDeliverFrameCB(), track1);
+ EXPECT_EQ(blink::WebMediaStreamSource::ReadyStateLive, sink1.state());
+
+ EXPECT_EQ(blink::WebMediaStreamSource::ReadyStateLive,
+ blink_source().readyState());
+
+ MockMediaStreamVideoSink sink2;
+ blink::WebMediaStreamTrack track2 = CreateTrack();
+ MediaStreamVideoSink::AddToVideoTrack(
+ &sink2, sink2.GetDeliverFrameCB(), track2);
+ EXPECT_EQ(blink::WebMediaStreamSource::ReadyStateLive, sink2.state());
+
+ MediaStreamVideoTrack* native_track1 =
+ MediaStreamVideoTrack::GetVideoTrack(track1);
+ native_track1->Stop();
+ EXPECT_EQ(blink::WebMediaStreamSource::ReadyStateEnded, sink1.state());
+ EXPECT_EQ(blink::WebMediaStreamSource::ReadyStateLive,
+ blink_source().readyState());
+ MediaStreamVideoSink::RemoveFromVideoTrack(&sink1, track1);
+
+ MediaStreamVideoTrack* native_track2 =
+ MediaStreamVideoTrack::GetVideoTrack(track2);
+ native_track2->Stop();
+ EXPECT_EQ(blink::WebMediaStreamSource::ReadyStateEnded, sink2.state());
+ EXPECT_EQ(blink::WebMediaStreamSource::ReadyStateEnded,
+ blink_source().readyState());
+ MediaStreamVideoSink::RemoveFromVideoTrack(&sink2, track2);
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/midi_dispatcher.cc b/chromium/content/renderer/media/midi_dispatcher.cc
index 1f04e3e22f8..cbbd876332b 100644
--- a/chromium/content/renderer/media/midi_dispatcher.cc
+++ b/chromium/content/renderer/media/midi_dispatcher.cc
@@ -7,49 +7,48 @@
#include "base/bind.h"
#include "base/message_loop/message_loop.h"
#include "content/common/media/midi_messages.h"
-#include "content/renderer/render_view_impl.h"
+#include "third_party/WebKit/public/platform/WebString.h"
#include "third_party/WebKit/public/web/WebMIDIPermissionRequest.h"
#include "third_party/WebKit/public/web/WebSecurityOrigin.h"
+#include "third_party/WebKit/public/web/WebUserGestureIndicator.h"
using blink::WebMIDIPermissionRequest;
using blink::WebSecurityOrigin;
namespace content {
-MIDIDispatcher::MIDIDispatcher(RenderViewImpl* render_view)
- : RenderViewObserver(render_view) {
+MidiDispatcher::MidiDispatcher(RenderFrame* render_frame)
+ : RenderFrameObserver(render_frame) {
}
-MIDIDispatcher::~MIDIDispatcher() {}
+MidiDispatcher::~MidiDispatcher() {}
-bool MIDIDispatcher::OnMessageReceived(const IPC::Message& message) {
+bool MidiDispatcher::OnMessageReceived(const IPC::Message& message) {
bool handled = true;
- IPC_BEGIN_MESSAGE_MAP(MIDIDispatcher, message)
- IPC_MESSAGE_HANDLER(MIDIMsg_SysExPermissionApproved,
+ IPC_BEGIN_MESSAGE_MAP(MidiDispatcher, message)
+ IPC_MESSAGE_HANDLER(MidiMsg_SysExPermissionApproved,
OnSysExPermissionApproved)
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
return handled;
}
-void MIDIDispatcher::requestSysExPermission(
+void MidiDispatcher::requestSysexPermission(
const WebMIDIPermissionRequest& request) {
int bridge_id = requests_.Add(new WebMIDIPermissionRequest(request));
WebSecurityOrigin security_origin = request.securityOrigin();
- std::string origin = security_origin.toString().utf8();
- GURL url(origin);
- Send(new MIDIHostMsg_RequestSysExPermission(routing_id(), bridge_id, url));
+ GURL url(security_origin.toString());
+ Send(new MidiHostMsg_RequestSysExPermission(routing_id(), bridge_id, url,
+ blink::WebUserGestureIndicator::isProcessingUserGesture()));
}
-void MIDIDispatcher::cancelSysExPermissionRequest(
+void MidiDispatcher::cancelSysexPermissionRequest(
const WebMIDIPermissionRequest& request) {
- for (IDMap<WebMIDIPermissionRequest>::iterator it(&requests_);
- !it.IsAtEnd();
- it.Advance()) {
+ for (Requests::iterator it(&requests_); !it.IsAtEnd(); it.Advance()) {
WebMIDIPermissionRequest* value = it.GetCurrentValue();
if (value->equals(request)) {
base::string16 origin = request.securityOrigin().toString();
- Send(new MIDIHostMsg_CancelSysExPermissionRequest(
+ Send(new MidiHostMsg_CancelSysExPermissionRequest(
routing_id(), it.GetCurrentKey(), GURL(origin)));
requests_.Remove(it.GetCurrentKey());
break;
@@ -57,7 +56,8 @@ void MIDIDispatcher::cancelSysExPermissionRequest(
}
}
-void MIDIDispatcher::OnSysExPermissionApproved(int bridge_id, bool is_allowed) {
+void MidiDispatcher::OnSysExPermissionApproved(int bridge_id,
+ bool is_allowed) {
// |request| can be NULL when the request is canceled.
WebMIDIPermissionRequest* request = requests_.Lookup(bridge_id);
if (!request)
diff --git a/chromium/content/renderer/media/midi_dispatcher.h b/chromium/content/renderer/media/midi_dispatcher.h
index 70f8125bdd2..0d40f46d26e 100644
--- a/chromium/content/renderer/media/midi_dispatcher.h
+++ b/chromium/content/renderer/media/midi_dispatcher.h
@@ -6,7 +6,7 @@
#define CONTENT_RENDERER_MEDIA_MIDI_DISPATCHER_H_
#include "base/id_map.h"
-#include "content/public/renderer/render_view_observer.h"
+#include "content/public/renderer/render_frame_observer.h"
#include "third_party/WebKit/public/web/WebMIDIClient.h"
namespace blink {
@@ -15,38 +15,37 @@ class WebMIDIPermissionRequest;
namespace content {
-class RenderViewImpl;
-
-// MIDIDispatcher implements WebMIDIClient to handle permissions for using
+// MidiDispatcher implements WebMIDIClient to handle permissions for using
// system exclusive messages.
-// It works as RenderViewObserver to handle IPC messages between
-// MIDIDispatcherHost owned by RenderViewHost since permissions are managed in
+// It works as RenderFrameObserver to handle IPC messages between
+// MidiDispatcherHost owned by WebContents since permissions are managed in
// the browser process.
-class MIDIDispatcher : public RenderViewObserver,
+class MidiDispatcher : public RenderFrameObserver,
public blink::WebMIDIClient {
public:
- explicit MIDIDispatcher(RenderViewImpl* render_view);
- virtual ~MIDIDispatcher();
+ explicit MidiDispatcher(RenderFrame* render_frame);
+ virtual ~MidiDispatcher();
private:
- // RenderView::Observer implementation.
+ // RenderFrameObserver implementation.
virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE;
// blink::WebMIDIClient implementation.
- virtual void requestSysExPermission(
- const blink::WebMIDIPermissionRequest& request) OVERRIDE;
- virtual void cancelSysExPermissionRequest(
- const blink::WebMIDIPermissionRequest& request) OVERRIDE;
+ virtual void requestSysexPermission(
+ const blink::WebMIDIPermissionRequest& request);
+ virtual void cancelSysexPermissionRequest(
+ const blink::WebMIDIPermissionRequest& request);
// Permission for using system exclusive messages has been set.
void OnSysExPermissionApproved(int client_id, bool is_allowed);
// Each WebMIDIPermissionRequest object is valid until
- // cancelSysExPermissionRequest() is called with the object, or used to call
+ // cancelSysexPermissionRequest() is called with the object, or used to call
// WebMIDIPermissionRequest::setIsAllowed().
- IDMap<blink::WebMIDIPermissionRequest> requests_;
+ typedef IDMap<blink::WebMIDIPermissionRequest, IDMapOwnPointer> Requests;
+ Requests requests_;
- DISALLOW_COPY_AND_ASSIGN(MIDIDispatcher);
+ DISALLOW_COPY_AND_ASSIGN(MidiDispatcher);
};
} // namespace content
diff --git a/chromium/content/renderer/media/midi_message_filter.cc b/chromium/content/renderer/media/midi_message_filter.cc
index d114618ec60..e6a7f8067be 100644
--- a/chromium/content/renderer/media/midi_message_filter.cc
+++ b/chromium/content/renderer/media/midi_message_filter.cc
@@ -12,7 +12,7 @@
#include "content/renderer/render_thread_impl.h"
#include "ipc/ipc_logging.h"
-using media::MIDIPortInfoList;
+using media::MidiPortInfoList;
using base::AutoLock;
// The maximum number of bytes which we're allowed to send to the browser
@@ -22,44 +22,44 @@ static const size_t kMaxUnacknowledgedBytesSent = 10 * 1024 * 1024; // 10 MB.
namespace content {
-MIDIMessageFilter::MIDIMessageFilter(
+MidiMessageFilter::MidiMessageFilter(
const scoped_refptr<base::MessageLoopProxy>& io_message_loop)
- : channel_(NULL),
+ : sender_(NULL),
io_message_loop_(io_message_loop),
main_message_loop_(base::MessageLoopProxy::current()),
next_available_id_(0),
unacknowledged_bytes_sent_(0) {
}
-MIDIMessageFilter::~MIDIMessageFilter() {}
+MidiMessageFilter::~MidiMessageFilter() {}
-void MIDIMessageFilter::Send(IPC::Message* message) {
+void MidiMessageFilter::Send(IPC::Message* message) {
DCHECK(io_message_loop_->BelongsToCurrentThread());
- if (!channel_) {
+ if (!sender_) {
delete message;
} else {
- channel_->Send(message);
+ sender_->Send(message);
}
}
-bool MIDIMessageFilter::OnMessageReceived(const IPC::Message& message) {
+bool MidiMessageFilter::OnMessageReceived(const IPC::Message& message) {
DCHECK(io_message_loop_->BelongsToCurrentThread());
bool handled = true;
- IPC_BEGIN_MESSAGE_MAP(MIDIMessageFilter, message)
- IPC_MESSAGE_HANDLER(MIDIMsg_SessionStarted, OnSessionStarted)
- IPC_MESSAGE_HANDLER(MIDIMsg_DataReceived, OnDataReceived)
- IPC_MESSAGE_HANDLER(MIDIMsg_AcknowledgeSentData, OnAcknowledgeSentData)
+ IPC_BEGIN_MESSAGE_MAP(MidiMessageFilter, message)
+ IPC_MESSAGE_HANDLER(MidiMsg_SessionStarted, OnSessionStarted)
+ IPC_MESSAGE_HANDLER(MidiMsg_DataReceived, OnDataReceived)
+ IPC_MESSAGE_HANDLER(MidiMsg_AcknowledgeSentData, OnAcknowledgeSentData)
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
return handled;
}
-void MIDIMessageFilter::OnFilterAdded(IPC::Channel* channel) {
+void MidiMessageFilter::OnFilterAdded(IPC::Sender* sender) {
DCHECK(io_message_loop_->BelongsToCurrentThread());
- channel_ = channel;
+ sender_ = sender;
}
-void MIDIMessageFilter::OnFilterRemoved() {
+void MidiMessageFilter::OnFilterRemoved() {
DCHECK(io_message_loop_->BelongsToCurrentThread());
// Once removed, a filter will not be used again. At this time all
@@ -67,12 +67,12 @@ void MIDIMessageFilter::OnFilterRemoved() {
OnChannelClosing();
}
-void MIDIMessageFilter::OnChannelClosing() {
+void MidiMessageFilter::OnChannelClosing() {
DCHECK(io_message_loop_->BelongsToCurrentThread());
- channel_ = NULL;
+ sender_ = NULL;
}
-void MIDIMessageFilter::StartSession(blink::WebMIDIAccessorClient* client) {
+void MidiMessageFilter::StartSession(blink::WebMIDIAccessorClient* client) {
// Generate and keep track of a "client id" which is sent to the browser
// to ask permission to talk to MIDI hardware.
// This id is handed back when we receive the answer in OnAccessApproved().
@@ -81,16 +81,16 @@ void MIDIMessageFilter::StartSession(blink::WebMIDIAccessorClient* client) {
clients_[client] = client_id;
io_message_loop_->PostTask(FROM_HERE,
- base::Bind(&MIDIMessageFilter::StartSessionOnIOThread, this,
+ base::Bind(&MidiMessageFilter::StartSessionOnIOThread, this,
client_id));
}
}
-void MIDIMessageFilter::StartSessionOnIOThread(int client_id) {
- Send(new MIDIHostMsg_StartSession(client_id));
+void MidiMessageFilter::StartSessionOnIOThread(int client_id) {
+ Send(new MidiHostMsg_StartSession(client_id));
}
-void MIDIMessageFilter::RemoveClient(blink::WebMIDIAccessorClient* client) {
+void MidiMessageFilter::RemoveClient(blink::WebMIDIAccessorClient* client) {
ClientsMap::iterator i = clients_.find(client);
if (i != clients_.end())
clients_.erase(i);
@@ -98,50 +98,69 @@ void MIDIMessageFilter::RemoveClient(blink::WebMIDIAccessorClient* client) {
// Received from browser.
-void MIDIMessageFilter::OnSessionStarted(
+void MidiMessageFilter::OnSessionStarted(
int client_id,
- bool success,
- MIDIPortInfoList inputs,
- MIDIPortInfoList outputs) {
+ media::MidiResult result,
+ MidiPortInfoList inputs,
+ MidiPortInfoList outputs) {
// Handle on the main JS thread.
main_message_loop_->PostTask(
FROM_HERE,
- base::Bind(&MIDIMessageFilter::HandleSessionStarted, this,
- client_id, success, inputs, outputs));
+ base::Bind(&MidiMessageFilter::HandleSessionStarted, this,
+ client_id, result, inputs, outputs));
}
-void MIDIMessageFilter::HandleSessionStarted(
+void MidiMessageFilter::HandleSessionStarted(
int client_id,
- bool success,
- MIDIPortInfoList inputs,
- MIDIPortInfoList outputs) {
+ media::MidiResult result,
+ MidiPortInfoList inputs,
+ MidiPortInfoList outputs) {
blink::WebMIDIAccessorClient* client = GetClientFromId(client_id);
if (!client)
return;
- if (success) {
+ if (result == media::MIDI_OK) {
// Add the client's input and output ports.
for (size_t i = 0; i < inputs.size(); ++i) {
client->didAddInputPort(
- UTF8ToUTF16(inputs[i].id),
- UTF8ToUTF16(inputs[i].manufacturer),
- UTF8ToUTF16(inputs[i].name),
- UTF8ToUTF16(inputs[i].version));
+ base::UTF8ToUTF16(inputs[i].id),
+ base::UTF8ToUTF16(inputs[i].manufacturer),
+ base::UTF8ToUTF16(inputs[i].name),
+ base::UTF8ToUTF16(inputs[i].version));
}
for (size_t i = 0; i < outputs.size(); ++i) {
client->didAddOutputPort(
- UTF8ToUTF16(outputs[i].id),
- UTF8ToUTF16(outputs[i].manufacturer),
- UTF8ToUTF16(outputs[i].name),
- UTF8ToUTF16(outputs[i].version));
+ base::UTF8ToUTF16(outputs[i].id),
+ base::UTF8ToUTF16(outputs[i].manufacturer),
+ base::UTF8ToUTF16(outputs[i].name),
+ base::UTF8ToUTF16(outputs[i].version));
}
}
- client->didStartSession(success);
+ std::string error;
+ std::string message;
+ switch (result) {
+ case media::MIDI_OK:
+ break;
+ case media::MIDI_NOT_SUPPORTED:
+ error = "NotSupportedError";
+ break;
+ case media::MIDI_INITIALIZATION_ERROR:
+ error = "InvalidStateError";
+ message = "Platform dependent initialization failed.";
+ break;
+ default:
+ NOTREACHED();
+ error = "InvalidStateError";
+ message = "Unknown internal error occurred.";
+ break;
+ }
+ client->didStartSession(result == media::MIDI_OK, base::UTF8ToUTF16(error),
+ base::UTF8ToUTF16(message));
}
blink::WebMIDIAccessorClient*
-MIDIMessageFilter::GetClientFromId(int client_id) {
+MidiMessageFilter::GetClientFromId(int client_id) {
// Iterating like this seems inefficient, but in practice there generally
// will be very few clients (usually one). Additionally, this lookup
// usually happens one time during page load. So the performance hit is
@@ -153,57 +172,57 @@ MIDIMessageFilter::GetClientFromId(int client_id) {
return NULL;
}
-void MIDIMessageFilter::OnDataReceived(uint32 port,
+void MidiMessageFilter::OnDataReceived(uint32 port,
const std::vector<uint8>& data,
double timestamp) {
- TRACE_EVENT0("midi", "MIDIMessageFilter::OnDataReceived");
+ TRACE_EVENT0("midi", "MidiMessageFilter::OnDataReceived");
main_message_loop_->PostTask(
FROM_HERE,
- base::Bind(&MIDIMessageFilter::HandleDataReceived, this,
+ base::Bind(&MidiMessageFilter::HandleDataReceived, this,
port, data, timestamp));
}
-void MIDIMessageFilter::OnAcknowledgeSentData(size_t bytes_sent) {
+void MidiMessageFilter::OnAcknowledgeSentData(size_t bytes_sent) {
DCHECK_GE(unacknowledged_bytes_sent_, bytes_sent);
if (unacknowledged_bytes_sent_ >= bytes_sent)
unacknowledged_bytes_sent_ -= bytes_sent;
}
-void MIDIMessageFilter::HandleDataReceived(uint32 port,
+void MidiMessageFilter::HandleDataReceived(uint32 port,
const std::vector<uint8>& data,
double timestamp) {
DCHECK(!data.empty());
- TRACE_EVENT0("midi", "MIDIMessageFilter::HandleDataReceived");
+ TRACE_EVENT0("midi", "MidiMessageFilter::HandleDataReceived");
for (ClientsMap::iterator i = clients_.begin(); i != clients_.end(); ++i)
(*i).first->didReceiveMIDIData(port, &data[0], data.size(), timestamp);
}
-void MIDIMessageFilter::SendMIDIData(uint32 port,
+void MidiMessageFilter::SendMidiData(uint32 port,
const uint8* data,
size_t length,
double timestamp) {
if (length > kMaxUnacknowledgedBytesSent) {
- // TODO(crogers): buffer up the data to send at a later time.
+ // TODO(toyoshim): buffer up the data to send at a later time.
// For now we're just dropping these bytes on the floor.
return;
}
std::vector<uint8> v(data, data + length);
io_message_loop_->PostTask(FROM_HERE,
- base::Bind(&MIDIMessageFilter::SendMIDIDataOnIOThread, this,
+ base::Bind(&MidiMessageFilter::SendMidiDataOnIOThread, this,
port, v, timestamp));
}
-void MIDIMessageFilter::SendMIDIDataOnIOThread(uint32 port,
+void MidiMessageFilter::SendMidiDataOnIOThread(uint32 port,
const std::vector<uint8>& data,
double timestamp) {
size_t n = data.size();
if (n > kMaxUnacknowledgedBytesSent ||
unacknowledged_bytes_sent_ > kMaxUnacknowledgedBytesSent ||
n + unacknowledged_bytes_sent_ > kMaxUnacknowledgedBytesSent) {
- // TODO(crogers): buffer up the data to send at a later time.
+ // TODO(toyoshim): buffer up the data to send at a later time.
// For now we're just dropping these bytes on the floor.
return;
}
@@ -211,7 +230,7 @@ void MIDIMessageFilter::SendMIDIDataOnIOThread(uint32 port,
unacknowledged_bytes_sent_ += n;
// Send to the browser.
- Send(new MIDIHostMsg_SendData(port, data, timestamp));
+ Send(new MidiHostMsg_SendData(port, data, timestamp));
}
} // namespace content
diff --git a/chromium/content/renderer/media/midi_message_filter.h b/chromium/content/renderer/media/midi_message_filter.h
index c2e66ae4d0c..e8de64d7fbd 100644
--- a/chromium/content/renderer/media/midi_message_filter.h
+++ b/chromium/content/renderer/media/midi_message_filter.h
@@ -10,8 +10,9 @@
#include "base/memory/scoped_ptr.h"
#include "content/common/content_export.h"
-#include "ipc/ipc_channel_proxy.h"
+#include "ipc/message_filter.h"
#include "media/midi/midi_port_info.h"
+#include "media/midi/midi_result.h"
#include "third_party/WebKit/public/platform/WebMIDIAccessorClient.h"
namespace base {
@@ -21,10 +22,9 @@ class MessageLoopProxy;
namespace content {
// MessageFilter that handles MIDI messages.
-class CONTENT_EXPORT MIDIMessageFilter
- : public IPC::ChannelProxy::MessageFilter {
+class CONTENT_EXPORT MidiMessageFilter : public IPC::MessageFilter {
public:
- explicit MIDIMessageFilter(
+ explicit MidiMessageFilter(
const scoped_refptr<base::MessageLoopProxy>& io_message_loop);
// Each client registers for MIDI access here.
@@ -36,7 +36,7 @@ class CONTENT_EXPORT MIDIMessageFilter
// A client will only be able to call this method if it has a suitable
// output port (from addOutputPort()).
- void SendMIDIData(uint32 port,
+ void SendMidiData(uint32 port,
const uint8* data,
size_t length,
double timestamp);
@@ -47,24 +47,24 @@ class CONTENT_EXPORT MIDIMessageFilter
}
protected:
- virtual ~MIDIMessageFilter();
+ virtual ~MidiMessageFilter();
private:
- // Sends an IPC message using |channel_|.
+ // Sends an IPC message using |sender_|.
void Send(IPC::Message* message);
- // IPC::ChannelProxy::MessageFilter override. Called on |io_message_loop|.
+ // IPC::MessageFilter override. Called on |io_message_loop|.
virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE;
- virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE;
+ virtual void OnFilterAdded(IPC::Sender* sender) OVERRIDE;
virtual void OnFilterRemoved() OVERRIDE;
virtual void OnChannelClosing() OVERRIDE;
// Called when the browser process has approved (or denied) access to
// MIDI hardware.
void OnSessionStarted(int client_id,
- bool success,
- media::MIDIPortInfoList inputs,
- media::MIDIPortInfoList outputs);
+ media::MidiResult result,
+ media::MidiPortInfoList inputs,
+ media::MidiPortInfoList outputs);
// Called when the browser process has sent MIDI data containing one or
// more messages.
@@ -78,9 +78,9 @@ class CONTENT_EXPORT MIDIMessageFilter
void OnAcknowledgeSentData(size_t bytes_sent);
void HandleSessionStarted(int client_id,
- bool success,
- media::MIDIPortInfoList inputs,
- media::MIDIPortInfoList outputs);
+ media::MidiResult result,
+ media::MidiPortInfoList inputs,
+ media::MidiPortInfoList outputs);
void HandleDataReceived(uint32 port,
const std::vector<uint8>& data,
@@ -88,14 +88,14 @@ class CONTENT_EXPORT MIDIMessageFilter
void StartSessionOnIOThread(int client_id);
- void SendMIDIDataOnIOThread(uint32 port,
+ void SendMidiDataOnIOThread(uint32 port,
const std::vector<uint8>& data,
double timestamp);
blink::WebMIDIAccessorClient* GetClientFromId(int client_id);
- // IPC channel for Send(); must only be accessed on |io_message_loop_|.
- IPC::Channel* channel_;
+ // IPC sender for Send(); must only be accessed on |io_message_loop_|.
+ IPC::Sender* sender_;
// Message loop on which IPC calls are driven.
const scoped_refptr<base::MessageLoopProxy> io_message_loop_;
@@ -115,7 +115,7 @@ class CONTENT_EXPORT MIDIMessageFilter
size_t unacknowledged_bytes_sent_;
- DISALLOW_COPY_AND_ASSIGN(MIDIMessageFilter);
+ DISALLOW_COPY_AND_ASSIGN(MidiMessageFilter);
};
} // namespace content
diff --git a/chromium/content/renderer/media/mock_media_constraint_factory.cc b/chromium/content/renderer/media/mock_media_constraint_factory.cc
new file mode 100644
index 00000000000..5708d50bf2f
--- /dev/null
+++ b/chromium/content/renderer/media/mock_media_constraint_factory.cc
@@ -0,0 +1,101 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+#include "content/renderer/media/mock_media_constraint_factory.h"
+#include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface.h"
+
+namespace content {
+
+namespace {
+
+static const char kValueTrue[] = "true";
+static const char kValueFalse[] = "false";
+
+} // namespace
+
+MockMediaConstraintFactory::MockMediaConstraintFactory() {
+}
+
+MockMediaConstraintFactory::~MockMediaConstraintFactory() {
+}
+
+blink::WebMediaConstraints
+MockMediaConstraintFactory::CreateWebMediaConstraints() {
+ blink::WebVector<blink::WebMediaConstraint> mandatory(mandatory_);
+ blink::WebVector<blink::WebMediaConstraint> optional(optional_);
+ blink::WebMediaConstraints constraints;
+ constraints.initialize(optional, mandatory);
+ return constraints;
+}
+
+void MockMediaConstraintFactory::AddMandatory(const std::string& key,
+ int value) {
+ mandatory_.push_back(blink::WebMediaConstraint(base::UTF8ToUTF16(key),
+ base::IntToString16(value)));
+}
+
+void MockMediaConstraintFactory::AddMandatory(const std::string& key,
+ double value) {
+ mandatory_.push_back(blink::WebMediaConstraint(
+ base::UTF8ToUTF16(key),
+ base::UTF8ToUTF16(base::DoubleToString(value))));
+}
+
+void MockMediaConstraintFactory::AddMandatory(const std::string& key,
+ const std::string& value) {
+ mandatory_.push_back(blink::WebMediaConstraint(
+ base::UTF8ToUTF16(key), base::UTF8ToUTF16(value)));
+}
+
+void MockMediaConstraintFactory::AddMandatory(const std::string& key,
+ bool value) {
+ const std::string string_value = value ? kValueTrue : kValueFalse;
+ AddMandatory(key, string_value);
+}
+
+void MockMediaConstraintFactory::AddOptional(const std::string& key,
+ int value) {
+ optional_.push_back(blink::WebMediaConstraint(base::UTF8ToUTF16(key),
+ base::IntToString16(value)));
+}
+
+void MockMediaConstraintFactory::AddOptional(const std::string& key,
+ double value) {
+ optional_.push_back(blink::WebMediaConstraint(
+ base::UTF8ToUTF16(key),
+ base::UTF8ToUTF16(base::DoubleToString(value))));
+}
+
+void MockMediaConstraintFactory::AddOptional(const std::string& key,
+ const std::string& value) {
+ optional_.push_back(blink::WebMediaConstraint(
+ base::UTF8ToUTF16(key), base::UTF8ToUTF16(value)));
+}
+
+void MockMediaConstraintFactory::AddOptional(const std::string& key,
+ bool value) {
+ const std::string string_value = value ? kValueTrue : kValueFalse;
+ AddOptional(key, string_value);
+}
+
+void MockMediaConstraintFactory::DisableDefaultAudioConstraints() {
+ static const char* kDefaultAudioConstraints[] = {
+ webrtc::MediaConstraintsInterface::kEchoCancellation,
+ webrtc::MediaConstraintsInterface::kExperimentalEchoCancellation,
+ webrtc::MediaConstraintsInterface::kAutoGainControl,
+ webrtc::MediaConstraintsInterface::kExperimentalAutoGainControl,
+ webrtc::MediaConstraintsInterface::kNoiseSuppression,
+ webrtc::MediaConstraintsInterface::kHighpassFilter,
+ webrtc::MediaConstraintsInterface::kTypingNoiseDetection,
+ webrtc::MediaConstraintsInterface::kExperimentalNoiseSuppression
+ };
+ MockMediaConstraintFactory factory;
+ for (size_t i = 0; i < arraysize(kDefaultAudioConstraints); ++i) {
+ AddMandatory(kDefaultAudioConstraints[i], false);
+ }
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/mock_media_constraint_factory.h b/chromium/content/renderer/media/mock_media_constraint_factory.h
new file mode 100644
index 00000000000..e98cb30429b
--- /dev/null
+++ b/chromium/content/renderer/media/mock_media_constraint_factory.h
@@ -0,0 +1,38 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_MOCK_MEDIA_STREAM_CONSTRAINT_H_
+#define CONTENT_RENDERER_MEDIA_MOCK_MEDIA_STREAM_CONSTRAINT_H_
+
+#include <string>
+#include <vector>
+
+#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
+
+namespace content {
+
+class MockMediaConstraintFactory {
+ public:
+ MockMediaConstraintFactory();
+ ~MockMediaConstraintFactory();
+
+ blink::WebMediaConstraints CreateWebMediaConstraints();
+ void AddMandatory(const std::string& key, int value);
+ void AddMandatory(const std::string& key, double value);
+ void AddMandatory(const std::string& key, const std::string& value);
+ void AddMandatory(const std::string& key, bool value);
+ void AddOptional(const std::string& key, int value);
+ void AddOptional(const std::string& key, double value);
+ void AddOptional(const std::string& key, const std::string& value);
+ void AddOptional(const std::string& key, bool value);
+ void DisableDefaultAudioConstraints();
+
+ private:
+ std::vector<blink::WebMediaConstraint> mandatory_;
+ std::vector<blink::WebMediaConstraint> optional_;
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_MOCK_MEDIA_STREAM_CONSTRAINT_H_
diff --git a/chromium/content/renderer/media/mock_media_stream_dispatcher.cc b/chromium/content/renderer/media/mock_media_stream_dispatcher.cc
index ed5c91c91ee..32ac1de61d3 100644
--- a/chromium/content/renderer/media/mock_media_stream_dispatcher.cc
+++ b/chromium/content/renderer/media/mock_media_stream_dispatcher.cc
@@ -8,11 +8,17 @@
#include "content/public/common/media_stream_request.h"
#include "testing/gtest/include/gtest/gtest.h"
+// Used for ID for output devices and for matching output device ID for input
+// devices.
+const char kAudioOutputDeviceIdPrefix[] = "audio_output_device_id";
+
namespace content {
MockMediaStreamDispatcher::MockMediaStreamDispatcher()
: MediaStreamDispatcher(NULL),
- request_id_(-1),
+ audio_input_request_id_(-1),
+ audio_output_request_id_(-1),
+ video_request_id_(-1),
request_stream_counter_(0),
stop_audio_device_counter_(0),
stop_video_device_counter_(0),
@@ -26,40 +32,54 @@ void MockMediaStreamDispatcher::GenerateStream(
const base::WeakPtr<MediaStreamDispatcherEventHandler>& event_handler,
const StreamOptions& components,
const GURL& url) {
- request_id_ = request_id;
+ // Audio and video share the same request so we use |audio_input_request_id_|
+ // only.
+ audio_input_request_id_ = request_id;
stream_label_ = "local_stream" + base::IntToString(request_id);
- audio_array_.clear();
+ audio_input_array_.clear();
video_array_.clear();
if (components.audio_requested) {
- StreamDeviceInfo audio;
- audio.device.id = "audio_device_id" + base::IntToString(session_id_);
- audio.device.name = "microphone";
- audio.device.type = MEDIA_DEVICE_AUDIO_CAPTURE;
- audio.session_id = session_id_;
- audio_array_.push_back(audio);
+ AddAudioInputDeviceToArray(false);
}
if (components.video_requested) {
- StreamDeviceInfo video;
- video.device.id = "video_device_id" + base::IntToString(session_id_);
- video.device.name = "usb video camera";
- video.device.type = MEDIA_DEVICE_VIDEO_CAPTURE;
- video.session_id = session_id_;
- video_array_.push_back(video);
+ AddVideoDeviceToArray();
}
++request_stream_counter_;
}
void MockMediaStreamDispatcher::CancelGenerateStream(
- int request_id,
- const base::WeakPtr<MediaStreamDispatcherEventHandler>& event_handler) {
- EXPECT_EQ(request_id, request_id_);
+ int request_id,
+ const base::WeakPtr<MediaStreamDispatcherEventHandler>& event_handler) {
+ EXPECT_EQ(request_id, audio_input_request_id_);
+}
+
+void MockMediaStreamDispatcher::EnumerateDevices(
+ int request_id,
+ const base::WeakPtr<MediaStreamDispatcherEventHandler>& event_handler,
+ MediaStreamType type,
+ const GURL& security_origin,
+ bool hide_labels_if_no_access) {
+ if (type == MEDIA_DEVICE_AUDIO_CAPTURE) {
+ audio_input_request_id_ = request_id;
+ audio_input_array_.clear();
+ AddAudioInputDeviceToArray(true);
+ AddAudioInputDeviceToArray(false);
+ } else if (type == MEDIA_DEVICE_AUDIO_OUTPUT) {
+ audio_output_request_id_ = request_id;
+ audio_output_array_.clear();
+ AddAudioOutputDeviceToArray();
+ } else if (type == MEDIA_DEVICE_VIDEO_CAPTURE) {
+ video_request_id_ = request_id;
+ video_array_.clear();
+ AddVideoDeviceToArray();
+ }
}
void MockMediaStreamDispatcher::StopStreamDevice(
const StreamDeviceInfo& device_info) {
- if (IsAudioMediaType(device_info.device.type)) {
+ if (IsAudioInputMediaType(device_info.device.type)) {
++stop_audio_device_counter_;
return;
}
@@ -84,4 +104,36 @@ int MockMediaStreamDispatcher::audio_session_id(const std::string& label,
return -1;
}
+void MockMediaStreamDispatcher::AddAudioInputDeviceToArray(
+ bool matched_output) {
+ StreamDeviceInfo audio;
+ audio.device.id = "audio_input_device_id" + base::IntToString(session_id_);
+ audio.device.name = "microphone";
+ audio.device.type = MEDIA_DEVICE_AUDIO_CAPTURE;
+ if (matched_output) {
+ audio.device.matched_output_device_id =
+ kAudioOutputDeviceIdPrefix + base::IntToString(session_id_);
+ }
+ audio.session_id = session_id_;
+ audio_input_array_.push_back(audio);
+}
+
+void MockMediaStreamDispatcher::AddAudioOutputDeviceToArray() {
+ StreamDeviceInfo audio;
+ audio.device.id = kAudioOutputDeviceIdPrefix + base::IntToString(session_id_);
+ audio.device.name = "speaker";
+ audio.device.type = MEDIA_DEVICE_AUDIO_OUTPUT;
+ audio.session_id = session_id_;
+ audio_output_array_.push_back(audio);
+}
+
+void MockMediaStreamDispatcher::AddVideoDeviceToArray() {
+ StreamDeviceInfo video;
+ video.device.id = "video_device_id" + base::IntToString(session_id_);
+ video.device.name = "usb video camera";
+ video.device.type = MEDIA_DEVICE_VIDEO_CAPTURE;
+ video.session_id = session_id_;
+ video_array_.push_back(video);
+}
+
} // namespace content
diff --git a/chromium/content/renderer/media/mock_media_stream_dispatcher.h b/chromium/content/renderer/media/mock_media_stream_dispatcher.h
index d16f32d6f3f..eed1f572051 100644
--- a/chromium/content/renderer/media/mock_media_stream_dispatcher.h
+++ b/chromium/content/renderer/media/mock_media_stream_dispatcher.h
@@ -27,12 +27,20 @@ class MockMediaStreamDispatcher : public MediaStreamDispatcher {
int request_id,
const base::WeakPtr<MediaStreamDispatcherEventHandler>&
event_handler) OVERRIDE;
+ virtual void EnumerateDevices(
+ int request_id,
+ const base::WeakPtr<MediaStreamDispatcherEventHandler>& event_handler,
+ MediaStreamType type,
+ const GURL& security_origin,
+ bool hide_labels_if_no_access) OVERRIDE;
virtual void StopStreamDevice(const StreamDeviceInfo& device_info) OVERRIDE;
virtual bool IsStream(const std::string& label) OVERRIDE;
virtual int video_session_id(const std::string& label, int index) OVERRIDE;
virtual int audio_session_id(const std::string& label, int index) OVERRIDE;
- int request_id() const { return request_id_; }
+ int audio_input_request_id() const { return audio_input_request_id_; }
+ int audio_output_request_id() const { return audio_output_request_id_; }
+ int video_request_id() const { return video_request_id_; }
int request_stream_counter() const { return request_stream_counter_; }
void IncrementSessionId() { ++session_id_; }
@@ -40,11 +48,22 @@ class MockMediaStreamDispatcher : public MediaStreamDispatcher {
int stop_video_device_counter() const { return stop_video_device_counter_; }
const std::string& stream_label() const { return stream_label_;}
- StreamDeviceInfoArray audio_array() const { return audio_array_; }
- StreamDeviceInfoArray video_array() const { return video_array_; }
+ const StreamDeviceInfoArray& audio_input_array() const {
+ return audio_input_array_;
+ }
+ const StreamDeviceInfoArray& audio_output_array() const {
+ return audio_output_array_;
+ }
+ const StreamDeviceInfoArray& video_array() const { return video_array_; }
private:
- int request_id_;
+ void AddAudioInputDeviceToArray(bool matched_output);
+ void AddAudioOutputDeviceToArray();
+ void AddVideoDeviceToArray();
+
+ int audio_input_request_id_;
+ int audio_output_request_id_; // Only used for EnumerateDevices.
+ int video_request_id_; // Only used for EnumerateDevices.
base::WeakPtr<MediaStreamDispatcherEventHandler> event_handler_;
int request_stream_counter_;
int stop_audio_device_counter_;
@@ -52,7 +71,8 @@ class MockMediaStreamDispatcher : public MediaStreamDispatcher {
std::string stream_label_;
int session_id_;
- StreamDeviceInfoArray audio_array_;
+ StreamDeviceInfoArray audio_input_array_;
+ StreamDeviceInfoArray audio_output_array_;
StreamDeviceInfoArray video_array_;
DISALLOW_COPY_AND_ASSIGN(MockMediaStreamDispatcher);
diff --git a/chromium/content/renderer/media/mock_media_stream_registry.cc b/chromium/content/renderer/media/mock_media_stream_registry.cc
index cc29c058e66..53bf948479a 100644
--- a/chromium/content/renderer/media/mock_media_stream_registry.cc
+++ b/chromium/content/renderer/media/mock_media_stream_registry.cc
@@ -7,36 +7,50 @@
#include <string>
#include "base/strings/utf_string_conversions.h"
+#include "content/renderer/media/media_stream.h"
+#include "content/renderer/media/media_stream_video_track.h"
+#include "content/renderer/media/mock_media_stream_video_source.h"
#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
#include "third_party/WebKit/public/platform/WebString.h"
#include "third_party/WebKit/public/platform/WebVector.h"
-#include "third_party/libjingle/source/talk/media/base/videocapturer.h"
namespace content {
-static const std::string kTestStreamLabel = "stream_label";
+static const char kTestStreamLabel[] = "stream_label";
-MockMediaStreamRegistry::MockMediaStreamRegistry(
- MockMediaStreamDependencyFactory* factory)
- : factory_(factory) {
+MockMediaStreamRegistry::MockMediaStreamRegistry() {
}
void MockMediaStreamRegistry::Init(const std::string& stream_url) {
stream_url_ = stream_url;
- scoped_refptr<webrtc::MediaStreamInterface> stream(
- factory_->CreateLocalMediaStream(kTestStreamLabel));
blink::WebVector<blink::WebMediaStreamTrack> webkit_audio_tracks;
blink::WebVector<blink::WebMediaStreamTrack> webkit_video_tracks;
- blink::WebString webkit_stream_label(UTF8ToUTF16(stream->label()));
- test_stream_.initialize(webkit_stream_label,
- webkit_audio_tracks, webkit_video_tracks);
- test_stream_.setExtraData(new MediaStreamExtraData(stream.get(), false));
+ blink::WebString label(kTestStreamLabel);
+ test_stream_.initialize(label, webkit_audio_tracks, webkit_video_tracks);
+ test_stream_.setExtraData(new MediaStream(test_stream_));
}
-bool MockMediaStreamRegistry::AddVideoTrack(const std::string& track_id) {
- cricket::VideoCapturer* capturer = NULL;
- return factory_->AddNativeVideoMediaTrack(track_id, &test_stream_, capturer);
+void MockMediaStreamRegistry::AddVideoTrack(const std::string& track_id) {
+ blink::WebMediaStreamSource blink_source;
+ blink_source.initialize("mock video source id",
+ blink::WebMediaStreamSource::TypeVideo,
+ "mock video source name");
+ MockMediaStreamVideoSource* native_source =
+ new MockMediaStreamVideoSource(false);
+ blink_source.setExtraData(native_source);
+ blink::WebMediaStreamTrack blink_track;
+ blink_track.initialize(base::UTF8ToUTF16(track_id), blink_source);
+ blink::WebMediaConstraints constraints;
+ constraints.initialize();
+
+ MediaStreamVideoTrack* native_track =
+ new MediaStreamVideoTrack(native_source,
+ constraints,
+ MediaStreamVideoSource::ConstraintsCallback(),
+ true);
+ blink_track.setExtraData(native_track);
+ test_stream_.addTrack(blink_track);
}
blink::WebMediaStream MockMediaStreamRegistry::GetMediaStream(
diff --git a/chromium/content/renderer/media/mock_media_stream_registry.h b/chromium/content/renderer/media/mock_media_stream_registry.h
index c0dba849fb4..ac0ffa1db62 100644
--- a/chromium/content/renderer/media/mock_media_stream_registry.h
+++ b/chromium/content/renderer/media/mock_media_stream_registry.h
@@ -7,23 +7,23 @@
#include <string>
+#include "base/compiler_specific.h"
#include "content/renderer/media/media_stream_registry_interface.h"
-#include "content/renderer/media/mock_media_stream_dependency_factory.h"
namespace content {
class MockMediaStreamRegistry : public MediaStreamRegistryInterface {
public:
- explicit MockMediaStreamRegistry(MockMediaStreamDependencyFactory* factory);
+ MockMediaStreamRegistry();
void Init(const std::string& stream_label);
- bool AddVideoTrack(const std::string& track_id);
- virtual blink::WebMediaStream GetMediaStream(const std::string& url)
- OVERRIDE;
+ void AddVideoTrack(const std::string& track_id);
+ virtual blink::WebMediaStream GetMediaStream(
+ const std::string& url) OVERRIDE;
+
const blink::WebMediaStream test_stream() const;
private:
- MockMediaStreamDependencyFactory* factory_;
blink::WebMediaStream test_stream_;
std::string stream_url_;
};
diff --git a/chromium/content/renderer/media/mock_media_stream_video_sink.cc b/chromium/content/renderer/media/mock_media_stream_video_sink.cc
new file mode 100644
index 00000000000..347ca62e728
--- /dev/null
+++ b/chromium/content/renderer/media/mock_media_stream_video_sink.cc
@@ -0,0 +1,49 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/mock_media_stream_video_sink.h"
+
+#include "media/base/bind_to_current_loop.h"
+
+namespace content {
+
+MockMediaStreamVideoSink::MockMediaStreamVideoSink()
+ : number_of_frames_(0),
+ enabled_(true),
+ format_(media::VideoFrame::UNKNOWN),
+ state_(blink::WebMediaStreamSource::ReadyStateLive),
+ weak_factory_(this) {
+}
+
+MockMediaStreamVideoSink::~MockMediaStreamVideoSink() {
+}
+
+VideoCaptureDeliverFrameCB
+MockMediaStreamVideoSink::GetDeliverFrameCB() {
+ return media::BindToCurrentLoop(
+ base::Bind(
+ &MockMediaStreamVideoSink::DeliverVideoFrame,
+ weak_factory_.GetWeakPtr()));
+}
+
+void MockMediaStreamVideoSink::DeliverVideoFrame(
+ const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format,
+ const base::TimeTicks& estimated_capture_time) {
+ ++number_of_frames_;
+ format_ = frame->format();
+ frame_size_ = frame->natural_size();
+ OnVideoFrame();
+}
+
+void MockMediaStreamVideoSink::OnReadyStateChanged(
+ blink::WebMediaStreamSource::ReadyState state) {
+ state_ = state;
+}
+
+void MockMediaStreamVideoSink::OnEnabledChanged(bool enabled) {
+ enabled_ = enabled;
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/mock_media_stream_video_sink.h b/chromium/content/renderer/media/mock_media_stream_video_sink.h
new file mode 100644
index 00000000000..682a617f167
--- /dev/null
+++ b/chromium/content/renderer/media/mock_media_stream_video_sink.h
@@ -0,0 +1,55 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_MOCK_MEDIA_STREAM_VIDEO_SINK_H_
+#define CONTENT_RENDERER_MEDIA_MOCK_MEDIA_STREAM_VIDEO_SINK_H_
+
+#include "content/public/renderer/media_stream_video_sink.h"
+
+#include "base/memory/weak_ptr.h"
+#include "content/common/media/video_capture.h"
+#include "media/base/video_frame.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace content {
+
+class MockMediaStreamVideoSink : public MediaStreamVideoSink {
+ public:
+ MockMediaStreamVideoSink();
+ virtual ~MockMediaStreamVideoSink();
+
+ virtual void OnReadyStateChanged(
+ blink::WebMediaStreamSource::ReadyState state) OVERRIDE;
+ virtual void OnEnabledChanged(bool enabled) OVERRIDE;
+
+ // Triggered when OnVideoFrame(const scoped_refptr<media::VideoFrame>& frame)
+ // is called.
+ MOCK_METHOD0(OnVideoFrame, void());
+
+ VideoCaptureDeliverFrameCB GetDeliverFrameCB();
+
+ int number_of_frames() const { return number_of_frames_; }
+ media::VideoFrame::Format format() const { return format_; }
+ gfx::Size frame_size() const { return frame_size_; }
+
+ bool enabled() const { return enabled_; }
+ blink::WebMediaStreamSource::ReadyState state() const { return state_; }
+
+ private:
+ void DeliverVideoFrame(
+ const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format,
+ const base::TimeTicks& estimated_capture_time);
+
+ int number_of_frames_;
+ bool enabled_;
+ media::VideoFrame::Format format_;
+ blink::WebMediaStreamSource::ReadyState state_;
+ gfx::Size frame_size_;
+ base::WeakPtrFactory<MockMediaStreamVideoSink> weak_factory_;
+};
+
+} // namespace content
+
+#endif
diff --git a/chromium/content/renderer/media/mock_media_stream_video_source.cc b/chromium/content/renderer/media/mock_media_stream_video_source.cc
new file mode 100644
index 00000000000..b1e58f3957b
--- /dev/null
+++ b/chromium/content/renderer/media/mock_media_stream_video_source.cc
@@ -0,0 +1,91 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/mock_media_stream_video_source.h"
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/location.h"
+
+namespace content {
+
+MockMediaStreamVideoSource::MockMediaStreamVideoSource(
+ bool manual_get_supported_formats)
+ : manual_get_supported_formats_(manual_get_supported_formats),
+ max_requested_height_(0),
+ max_requested_width_(0),
+ attempted_to_start_(false) {
+ supported_formats_.push_back(
+ media::VideoCaptureFormat(
+ gfx::Size(MediaStreamVideoSource::kDefaultWidth,
+ MediaStreamVideoSource::kDefaultHeight),
+ MediaStreamVideoSource::kDefaultFrameRate,
+ media::PIXEL_FORMAT_I420));
+}
+
+MockMediaStreamVideoSource::~MockMediaStreamVideoSource() {}
+
+void MockMediaStreamVideoSource::StartMockedSource() {
+ DCHECK(attempted_to_start_);
+ attempted_to_start_ = false;
+ OnStartDone(true);
+}
+
+void MockMediaStreamVideoSource::FailToStartMockedSource() {
+ DCHECK(attempted_to_start_);
+ attempted_to_start_ = false;
+ OnStartDone(false);
+}
+
+void MockMediaStreamVideoSource::CompleteGetSupportedFormats() {
+ DCHECK(!formats_callback_.is_null());
+ base::ResetAndReturn(&formats_callback_).Run(supported_formats_);
+}
+
+void MockMediaStreamVideoSource::GetCurrentSupportedFormats(
+ int max_requested_height,
+ int max_requested_width,
+ const VideoCaptureDeviceFormatsCB& callback) {
+ DCHECK(formats_callback_.is_null());
+ max_requested_height_ = max_requested_height;
+ max_requested_width_ = max_requested_width;
+
+ if (manual_get_supported_formats_) {
+ formats_callback_ = callback;
+ return;
+ }
+ callback.Run(supported_formats_);
+}
+
+void MockMediaStreamVideoSource::StartSourceImpl(
+ const media::VideoCaptureParams& params,
+ const VideoCaptureDeliverFrameCB& frame_callback) {
+ DCHECK(frame_callback_.is_null());
+ params_ = params;
+ attempted_to_start_ = true;
+ frame_callback_ = frame_callback;
+}
+
+void MockMediaStreamVideoSource::StopSourceImpl() {
+}
+
+void MockMediaStreamVideoSource::DeliverVideoFrame(
+ const scoped_refptr<media::VideoFrame>& frame) {
+ DCHECK(!frame_callback_.is_null());
+ io_message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&MockMediaStreamVideoSource::DeliverVideoFrameOnIO,
+ base::Unretained(this), frame, params_.requested_format,
+ base::TimeTicks(), frame_callback_));
+}
+
+void MockMediaStreamVideoSource::DeliverVideoFrameOnIO(
+ const scoped_refptr<media::VideoFrame>& frame,
+ media::VideoCaptureFormat format,
+ const base::TimeTicks& estimated_capture_time,
+ const VideoCaptureDeliverFrameCB& frame_callback) {
+ frame_callback.Run(frame, format, estimated_capture_time);
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/mock_media_stream_video_source.h b/chromium/content/renderer/media/mock_media_stream_video_source.h
new file mode 100644
index 00000000000..ad7463b6d4c
--- /dev/null
+++ b/chromium/content/renderer/media/mock_media_stream_video_source.h
@@ -0,0 +1,73 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_MOCK_MEDIA_STREAM_VIDEO_SOURCE_H_
+#define CONTENT_RENDERER_MEDIA_MOCK_MEDIA_STREAM_VIDEO_SOURCE_H_
+
+#include "content/renderer/media/media_stream_video_source.h"
+
+namespace content {
+
+class MockMediaStreamVideoSource : public MediaStreamVideoSource {
+ public:
+ explicit MockMediaStreamVideoSource(bool manual_get_supported_formats);
+ virtual ~MockMediaStreamVideoSource();
+
+ // Simulate that the underlying source start successfully.
+ void StartMockedSource();
+
+ // Simulate that the underlying source fail to start.
+ void FailToStartMockedSource();
+
+ // Returns true if StartSource has been called and StartMockedSource
+ // or FailToStartMockedSource has not been called.
+ bool SourceHasAttemptedToStart() { return attempted_to_start_; }
+
+ void SetSupportedFormats(const media::VideoCaptureFormats& formats) {
+ supported_formats_ = formats;
+ }
+
+ // Delivers |frame| to all registered tracks on the IO thread. Its up to the
+ // call to make sure MockMediaStreamVideoSource is not destroyed before the
+ // frame has been delivered.
+ void DeliverVideoFrame(const scoped_refptr<media::VideoFrame>& frame);
+
+ void CompleteGetSupportedFormats();
+
+ const media::VideoCaptureParams& start_params() const { return params_; }
+ int max_requested_height() const { return max_requested_height_; }
+ int max_requested_width() const { return max_requested_width_; }
+
+ protected:
+ void DeliverVideoFrameOnIO(const scoped_refptr<media::VideoFrame>& frame,
+ media::VideoCaptureFormat format,
+ const base::TimeTicks& estimated_capture_time,
+ const VideoCaptureDeliverFrameCB& frame_callback);
+
+ // Implements MediaStreamVideoSource.
+ virtual void GetCurrentSupportedFormats(
+ int max_requested_height,
+ int max_requested_width,
+ const VideoCaptureDeviceFormatsCB& callback) OVERRIDE;
+ virtual void StartSourceImpl(
+ const media::VideoCaptureParams& params,
+ const VideoCaptureDeliverFrameCB& frame_callback) OVERRIDE;
+ virtual void StopSourceImpl() OVERRIDE;
+
+ private:
+ media::VideoCaptureParams params_;
+ media::VideoCaptureFormats supported_formats_;
+ bool manual_get_supported_formats_;
+ int max_requested_height_;
+ int max_requested_width_;
+ bool attempted_to_start_;
+ VideoCaptureDeviceFormatsCB formats_callback_;
+ VideoCaptureDeliverFrameCB frame_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockMediaStreamVideoSource);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_MOCK_MEDIA_STREAM_VIDEO_SOURCE_H_
diff --git a/chromium/content/renderer/media/mock_peer_connection_impl.cc b/chromium/content/renderer/media/mock_peer_connection_impl.cc
index b944bd13b54..41fa2d1a5d5 100644
--- a/chromium/content/renderer/media/mock_peer_connection_impl.cc
+++ b/chromium/content/renderer/media/mock_peer_connection_impl.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "content/renderer/media/mock_media_stream_dependency_factory.h"
#include "content/renderer/media/mock_peer_connection_impl.h"
#include <vector>
#include "base/logging.h"
+#include "content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h"
using testing::_;
using webrtc::AudioTrackInterface;
@@ -205,7 +205,7 @@ const char MockPeerConnectionImpl::kDummyOffer[] = "dummy offer";
const char MockPeerConnectionImpl::kDummyAnswer[] = "dummy answer";
MockPeerConnectionImpl::MockPeerConnectionImpl(
- MockMediaStreamDependencyFactory* factory)
+ MockPeerConnectionDependencyFactory* factory)
: dependency_factory_(factory),
local_streams_(new talk_base::RefCountedObject<MockStreamCollection>),
remote_streams_(new talk_base::RefCountedObject<MockStreamCollection>),
@@ -263,12 +263,14 @@ MockPeerConnectionImpl::CreateDataChannel(const std::string& label,
bool MockPeerConnectionImpl::GetStats(
webrtc::StatsObserver* observer,
- webrtc::MediaStreamTrackInterface* track) {
+ webrtc::MediaStreamTrackInterface* track,
+ StatsOutputLevel level) {
if (!getstats_result_)
return false;
- std::vector<webrtc::StatsReport> reports;
- webrtc::StatsReport report;
+ DCHECK_EQ(kStatsOutputLevelStandard, level);
+ std::vector<webrtc::StatsReport> reports(track ? 1 : 2);
+ webrtc::StatsReport& report = reports[0];
report.id = "1234";
report.type = "ssrc";
report.timestamp = 42;
@@ -276,17 +278,17 @@ bool MockPeerConnectionImpl::GetStats(
value.name = "trackname";
value.value = "trackvalue";
report.values.push_back(value);
- reports.push_back(report);
// If selector is given, we pass back one report.
// If selector is not given, we pass back two.
if (!track) {
- report.id = "nontrack";
- report.type = "generic";
- report.timestamp = 44;
+ webrtc::StatsReport& report2 = reports[1];
+ report2.id = "nontrack";
+ report2.type = "generic";
+ report2.timestamp = 44;
+ report2.values.push_back(value);
value.name = "somename";
value.value = "somevalue";
- report.values.push_back(value);
- reports.push_back(report);
+ report2.values.push_back(value);
}
// Note that the callback is synchronous, not asynchronous; it will
// happen before the request call completes.
@@ -353,4 +355,9 @@ bool MockPeerConnectionImpl::AddIceCandidate(
return candidate->ToString(&ice_sdp_);
}
+void MockPeerConnectionImpl::RegisterUMAObserver(
+ webrtc::UMAObserver* observer) {
+ NOTIMPLEMENTED();
+}
+
} // namespace content
diff --git a/chromium/content/renderer/media/mock_peer_connection_impl.h b/chromium/content/renderer/media/mock_peer_connection_impl.h
index 5adf930d269..d563746aeed 100644
--- a/chromium/content/renderer/media/mock_peer_connection_impl.h
+++ b/chromium/content/renderer/media/mock_peer_connection_impl.h
@@ -9,18 +9,19 @@
#include "base/basictypes.h"
#include "base/compiler_specific.h"
+#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "third_party/libjingle/source/talk/app/webrtc/peerconnectioninterface.h"
namespace content {
-class MockMediaStreamDependencyFactory;
+class MockPeerConnectionDependencyFactory;
class MockStreamCollection;
class MockPeerConnectionImpl : public webrtc::PeerConnectionInterface {
public:
- explicit MockPeerConnectionImpl(MockMediaStreamDependencyFactory* factory);
+ explicit MockPeerConnectionImpl(MockPeerConnectionDependencyFactory* factory);
// PeerConnectionInterface implementation.
virtual talk_base::scoped_refptr<webrtc::StreamCollectionInterface>
@@ -39,7 +40,13 @@ class MockPeerConnectionImpl : public webrtc::PeerConnectionInterface {
const webrtc::DataChannelInit* config) OVERRIDE;
virtual bool GetStats(webrtc::StatsObserver* observer,
- webrtc::MediaStreamTrackInterface* track) OVERRIDE;
+ webrtc::MediaStreamTrackInterface* track) {
+ return false;
+ }
+ virtual bool GetStats(webrtc::StatsObserver* observer,
+ webrtc::MediaStreamTrackInterface* track,
+ StatsOutputLevel level) OVERRIDE;
+
// Set Call this function to make sure next call to GetStats fail.
void SetGetStatsResult(bool result) { getstats_result_ = result; }
@@ -92,6 +99,7 @@ class MockPeerConnectionImpl : public webrtc::PeerConnectionInterface {
const webrtc::MediaConstraintsInterface* constraints) OVERRIDE;
virtual bool AddIceCandidate(
const webrtc::IceCandidateInterface* candidate) OVERRIDE;
+ virtual void RegisterUMAObserver(webrtc::UMAObserver* observer) OVERRIDE;
void AddRemoteStream(webrtc::MediaStreamInterface* stream);
@@ -113,7 +121,7 @@ class MockPeerConnectionImpl : public webrtc::PeerConnectionInterface {
private:
// Used for creating MockSessionDescription.
- MockMediaStreamDependencyFactory* dependency_factory_;
+ MockPeerConnectionDependencyFactory* dependency_factory_;
std::string stream_label_;
talk_base::scoped_refptr<MockStreamCollection> local_streams_;
diff --git a/chromium/content/renderer/media/mock_web_rtc_peer_connection_handler_client.cc b/chromium/content/renderer/media/mock_web_rtc_peer_connection_handler_client.cc
index a50525e1c39..a4b8e550133 100644
--- a/chromium/content/renderer/media/mock_web_rtc_peer_connection_handler_client.cc
+++ b/chromium/content/renderer/media/mock_web_rtc_peer_connection_handler_client.cc
@@ -32,9 +32,9 @@ MockWebRTCPeerConnectionHandlerClient::
void MockWebRTCPeerConnectionHandlerClient::didGenerateICECandidateWorker(
const blink::WebRTCICECandidate& candidate) {
if (!candidate.isNull()) {
- candidate_sdp_ = UTF16ToUTF8(candidate.candidate());
+ candidate_sdp_ = base::UTF16ToUTF8(candidate.candidate());
candidate_mline_index_ = candidate.sdpMLineIndex();
- candidate_mid_ = UTF16ToUTF8(candidate.sdpMid());
+ candidate_mid_ = base::UTF16ToUTF8(candidate.sdpMid());
} else {
candidate_sdp_ = "";
candidate_mline_index_ = -1;
diff --git a/chromium/content/renderer/media/mock_web_rtc_peer_connection_handler_client.h b/chromium/content/renderer/media/mock_web_rtc_peer_connection_handler_client.h
index 720a7cb65ea..31b5a02d431 100644
--- a/chromium/content/renderer/media/mock_web_rtc_peer_connection_handler_client.h
+++ b/chromium/content/renderer/media/mock_web_rtc_peer_connection_handler_client.h
@@ -35,6 +35,7 @@ class MockWebRTCPeerConnectionHandlerClient
void(const blink::WebMediaStream& stream_descriptor));
MOCK_METHOD1(didAddRemoteDataChannel,
void(blink::WebRTCDataChannelHandler*));
+ MOCK_METHOD0(releasePeerConnectionHandler, void());
void didGenerateICECandidateWorker(
const blink::WebRTCICECandidate& candidate);
diff --git a/chromium/content/renderer/media/peer_connection_handler_base.cc b/chromium/content/renderer/media/peer_connection_handler_base.cc
deleted file mode 100644
index 05e71a6226a..00000000000
--- a/chromium/content/renderer/media/peer_connection_handler_base.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/renderer/media/peer_connection_handler_base.h"
-
-#include "base/logging.h"
-#include "base/strings/utf_string_conversions.h"
-#include "content/renderer/media/media_stream_dependency_factory.h"
-#include "content/renderer/media/media_stream_extra_data.h"
-#include "third_party/WebKit/public/platform/WebMediaStream.h"
-#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
-#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
-#include "third_party/WebKit/public/platform/WebString.h"
-
-namespace content {
-
-PeerConnectionHandlerBase::PeerConnectionHandlerBase(
- MediaStreamDependencyFactory* dependency_factory)
- : dependency_factory_(dependency_factory),
- message_loop_proxy_(base::MessageLoopProxy::current()) {
-}
-
-PeerConnectionHandlerBase::~PeerConnectionHandlerBase() {
-}
-
-bool PeerConnectionHandlerBase::AddStream(
- const blink::WebMediaStream& stream,
- const webrtc::MediaConstraintsInterface* constraints) {
- webrtc::MediaStreamInterface* native_stream =
- MediaStreamDependencyFactory::GetNativeMediaStream(stream);
- if (!native_stream)
- return false;
- return native_peer_connection_->AddStream(native_stream, constraints);
-}
-
-void PeerConnectionHandlerBase::RemoveStream(
- const blink::WebMediaStream& stream) {
- webrtc::MediaStreamInterface* native_stream =
- MediaStreamDependencyFactory::GetNativeMediaStream(stream);
- if (native_stream)
- native_peer_connection_->RemoveStream(native_stream);
- DCHECK(native_stream);
-}
-
-} // namespace content
diff --git a/chromium/content/renderer/media/peer_connection_handler_base.h b/chromium/content/renderer/media/peer_connection_handler_base.h
deleted file mode 100644
index db6c0407e31..00000000000
--- a/chromium/content/renderer/media/peer_connection_handler_base.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_PEER_CONNECTION_HANDLER_BASE_H_
-#define CONTENT_RENDERER_MEDIA_PEER_CONNECTION_HANDLER_BASE_H_
-
-#include <map>
-#include <string>
-
-#include "base/memory/ref_counted.h"
-#include "base/message_loop/message_loop_proxy.h"
-#include "content/common/content_export.h"
-#include "third_party/WebKit/public/platform/WebMediaStream.h"
-#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
-#include "third_party/libjingle/source/talk/app/webrtc/mediastream.h"
-#include "third_party/libjingle/source/talk/app/webrtc/peerconnectioninterface.h"
-
-namespace content {
-class MediaStreamDependencyFactory;
-class RemoteMediaStreamImpl;
-
-// PeerConnectionHandlerBase is the base class of a delegate for the
-// PeerConnection API messages going between WebKit and native
-// PeerConnection in libjingle.
-class CONTENT_EXPORT PeerConnectionHandlerBase
- : NON_EXPORTED_BASE(public webrtc::PeerConnectionObserver) {
- public:
- PeerConnectionHandlerBase(
- MediaStreamDependencyFactory* dependency_factory);
-
- protected:
- virtual ~PeerConnectionHandlerBase();
-
- void AddStream(const blink::WebMediaStream& stream);
- bool AddStream(const blink::WebMediaStream& stream,
- const webrtc::MediaConstraintsInterface* constraints);
- void RemoveStream(const blink::WebMediaStream& stream);
-
- // dependency_factory_ is a raw pointer, and is valid for the lifetime of
- // MediaStreamImpl.
- MediaStreamDependencyFactory* dependency_factory_;
-
- // native_peer_connection_ is the native PeerConnection object,
- // it handles the ICE processing and media engine.
- scoped_refptr<webrtc::PeerConnectionInterface> native_peer_connection_;
-
- typedef std::map<webrtc::MediaStreamInterface*,
- content::RemoteMediaStreamImpl*> RemoteStreamMap;
- RemoteStreamMap remote_streams_;
-
- // The message loop we are created on and on which to make calls to WebKit.
- // This should be the render thread message loop.
- scoped_refptr<base::MessageLoopProxy> message_loop_proxy_;
-
- DISALLOW_COPY_AND_ASSIGN(PeerConnectionHandlerBase);
-};
-
-} // namespace content
-
-#endif // CONTENT_RENDERER_MEDIA_PEER_CONNECTION_HANDLER_BASE_H_
diff --git a/chromium/content/renderer/media/peer_connection_tracker.cc b/chromium/content/renderer/media/peer_connection_tracker.cc
index b594ff8f0b3..a9873971be8 100644
--- a/chromium/content/renderer/media/peer_connection_tracker.cc
+++ b/chromium/content/renderer/media/peer_connection_tracker.cc
@@ -8,6 +8,7 @@
#include "content/renderer/media/rtc_media_constraints.h"
#include "content/renderer/media/rtc_peer_connection_handler.h"
#include "content/renderer/render_thread_impl.h"
+#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
#include "third_party/WebKit/public/platform/WebMediaStream.h"
#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
@@ -15,6 +16,7 @@
#include "third_party/WebKit/public/platform/WebRTCPeerConnectionHandlerClient.h"
#include "third_party/WebKit/public/web/WebDocument.h"
#include "third_party/WebKit/public/web/WebFrame.h"
+#include "third_party/WebKit/public/web/WebUserMediaRequest.h"
using std::string;
using webrtc::MediaConstraintsInterface;
@@ -64,13 +66,13 @@ static string SerializeMediaConstraints(
static string SerializeMediaStreamComponent(
const blink::WebMediaStreamTrack component) {
- string id = UTF16ToUTF8(component.source().id());
+ string id = base::UTF16ToUTF8(component.source().id());
return id;
}
static string SerializeMediaDescriptor(
const blink::WebMediaStream& stream) {
- string label = UTF16ToUTF8(stream.id());
+ string label = base::UTF16ToUTF8(stream.id());
string result = "label: " + label;
blink::WebVector<blink::WebMediaStreamTrack> tracks;
stream.audioTracks(tracks);
@@ -160,7 +162,7 @@ static base::DictionaryValue* GetDictValueStats(
if (report.values.empty())
return NULL;
- DictionaryValue* dict = new base::DictionaryValue();
+ base::DictionaryValue* dict = new base::DictionaryValue();
dict->SetDouble("timestamp", report.timestamp);
base::ListValue* values = new base::ListValue();
@@ -243,7 +245,10 @@ void PeerConnectionTracker::OnGetAllStats() {
talk_base::scoped_refptr<InternalStatsObserver> observer(
new talk_base::RefCountedObject<InternalStatsObserver>(it->second));
- it->first->GetStats(observer, NULL);
+ it->first->GetStats(
+ observer,
+ NULL,
+ webrtc::PeerConnectionInterface::kStatsOutputLevelDebug);
}
}
@@ -306,8 +311,8 @@ void PeerConnectionTracker::TrackSetSessionDescription(
RTCPeerConnectionHandler* pc_handler,
const blink::WebRTCSessionDescription& desc,
Source source) {
- string sdp = UTF16ToUTF8(desc.sdp());
- string type = UTF16ToUTF8(desc.type());
+ string sdp = base::UTF16ToUTF8(desc.sdp());
+ string type = base::UTF16ToUTF8(desc.type());
string value = "type: " + type + ", sdp: " + sdp;
SendPeerConnectionUpdate(
@@ -332,8 +337,8 @@ void PeerConnectionTracker::TrackAddIceCandidate(
RTCPeerConnectionHandler* pc_handler,
const blink::WebRTCICECandidate& candidate,
Source source) {
- string value = "mid: " + UTF16ToUTF8(candidate.sdpMid()) + ", " +
- "candidate: " + UTF16ToUTF8(candidate.candidate());
+ string value = "mid: " + base::UTF16ToUTF8(candidate.sdpMid()) + ", " +
+ "candidate: " + base::UTF16ToUTF8(candidate.candidate());
SendPeerConnectionUpdate(
pc_handler,
source == SOURCE_LOCAL ? "onIceCandidate" : "addIceCandidate", value);
@@ -431,7 +436,20 @@ void PeerConnectionTracker::TrackCreateDTMFSender(
RTCPeerConnectionHandler* pc_handler,
const blink::WebMediaStreamTrack& track) {
SendPeerConnectionUpdate(pc_handler, "createDTMFSender",
- UTF16ToUTF8(track.id()));
+ base::UTF16ToUTF8(track.id()));
+}
+
+void PeerConnectionTracker::TrackGetUserMedia(
+ const blink::WebUserMediaRequest& user_media_request) {
+ RTCMediaConstraints audio_constraints(user_media_request.audioConstraints());
+ RTCMediaConstraints video_constraints(user_media_request.videoConstraints());
+
+ RenderThreadImpl::current()->Send(new PeerConnectionTrackerHost_GetUserMedia(
+ user_media_request.securityOrigin().toString().utf8(),
+ user_media_request.audio(),
+ user_media_request.video(),
+ SerializeMediaConstraints(audio_constraints),
+ SerializeMediaConstraints(video_constraints)));
}
int PeerConnectionTracker::GetNextLocalID() {
diff --git a/chromium/content/renderer/media/peer_connection_tracker.h b/chromium/content/renderer/media/peer_connection_tracker.h
index 3bcda3996f7..406b7ac4b53 100644
--- a/chromium/content/renderer/media/peer_connection_tracker.h
+++ b/chromium/content/renderer/media/peer_connection_tracker.h
@@ -19,6 +19,7 @@ class WebFrame;
class WebRTCICECandidate;
class WebString;
class WebRTCSessionDescription;
+class WebUserMediaRequest;
} // namespace blink
namespace webrtc {
@@ -148,6 +149,10 @@ class CONTENT_EXPORT PeerConnectionTracker : public RenderProcessObserver {
RTCPeerConnectionHandler* pc_handler,
const blink::WebMediaStreamTrack& track);
+ // Sends an update when getUserMedia is called.
+ virtual void TrackGetUserMedia(
+ const blink::WebUserMediaRequest& user_media_request);
+
private:
// Assign a local ID to a peer connection so that the browser process can
// uniquely identify a peer connection in the renderer process.
diff --git a/chromium/content/renderer/media/pepper_platform_video_decoder.cc b/chromium/content/renderer/media/pepper_platform_video_decoder.cc
deleted file mode 100644
index 89fdbb4a0fa..00000000000
--- a/chromium/content/renderer/media/pepper_platform_video_decoder.cc
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/renderer/media/pepper_platform_video_decoder.h"
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "content/child/child_process.h"
-#include "content/common/gpu/client/gpu_channel_host.h"
-#include "content/renderer/render_thread_impl.h"
-
-using media::BitstreamBuffer;
-
-namespace content {
-
-PlatformVideoDecoder::PlatformVideoDecoder(
- VideoDecodeAccelerator::Client* client,
- int32 command_buffer_route_id)
- : client_(client),
- command_buffer_route_id_(command_buffer_route_id) {
- DCHECK(client);
-}
-
-PlatformVideoDecoder::~PlatformVideoDecoder() {}
-
-bool PlatformVideoDecoder::Initialize(media::VideoCodecProfile profile) {
- // TODO(vrk): Support multiple decoders.
- if (decoder_)
- return true;
-
- RenderThreadImpl* render_thread = RenderThreadImpl::current();
-
- // This is not synchronous, but subsequent IPC messages will be buffered, so
- // it is okay to immediately send IPC messages through the returned channel.
- GpuChannelHost* channel =
- render_thread->EstablishGpuChannelSync(
- CAUSE_FOR_GPU_LAUNCH_VIDEODECODEACCELERATOR_INITIALIZE);
-
- if (!channel)
- return false;
-
- // Send IPC message to initialize decoder in GPU process.
- decoder_ =
- channel->CreateVideoDecoder(command_buffer_route_id_, profile, this);
- return decoder_.get() != NULL;
-}
-
-void PlatformVideoDecoder::Decode(const BitstreamBuffer& bitstream_buffer) {
- DCHECK(decoder_.get());
- decoder_->Decode(bitstream_buffer);
-}
-
-void PlatformVideoDecoder::AssignPictureBuffers(
- const std::vector<media::PictureBuffer>& buffers) {
- DCHECK(decoder_.get());
- decoder_->AssignPictureBuffers(buffers);
-}
-
-void PlatformVideoDecoder::ReusePictureBuffer(int32 picture_buffer_id) {
- DCHECK(decoder_.get());
- decoder_->ReusePictureBuffer(picture_buffer_id);
-}
-
-void PlatformVideoDecoder::Flush() {
- DCHECK(decoder_.get());
- decoder_->Flush();
-}
-
-void PlatformVideoDecoder::Reset() {
- DCHECK(decoder_.get());
- decoder_->Reset();
-}
-
-void PlatformVideoDecoder::Destroy() {
- if (decoder_)
- decoder_.release()->Destroy();
- client_ = NULL;
- delete this;
-}
-
-void PlatformVideoDecoder::NotifyError(
- VideoDecodeAccelerator::Error error) {
- DCHECK(RenderThreadImpl::current());
- client_->NotifyError(error);
-}
-
-void PlatformVideoDecoder::ProvidePictureBuffers(
- uint32 requested_num_of_buffers,
- const gfx::Size& dimensions,
- uint32 texture_target) {
- DCHECK(RenderThreadImpl::current());
- client_->ProvidePictureBuffers(requested_num_of_buffers, dimensions,
- texture_target);
-}
-
-void PlatformVideoDecoder::DismissPictureBuffer(int32 picture_buffer_id) {
- DCHECK(RenderThreadImpl::current());
- client_->DismissPictureBuffer(picture_buffer_id);
-}
-
-void PlatformVideoDecoder::PictureReady(const media::Picture& picture) {
- DCHECK(RenderThreadImpl::current());
- client_->PictureReady(picture);
-}
-
-void PlatformVideoDecoder::NotifyInitializeDone() {
- NOTREACHED() << "GpuVideoDecodeAcceleratorHost::Initialize is synchronous!";
-}
-
-void PlatformVideoDecoder::NotifyEndOfBitstreamBuffer(
- int32 bitstream_buffer_id) {
- DCHECK(RenderThreadImpl::current());
- client_->NotifyEndOfBitstreamBuffer(bitstream_buffer_id);
-}
-
-void PlatformVideoDecoder::NotifyFlushDone() {
- DCHECK(RenderThreadImpl::current());
- client_->NotifyFlushDone();
-}
-
-void PlatformVideoDecoder::NotifyResetDone() {
- DCHECK(RenderThreadImpl::current());
- client_->NotifyResetDone();
-}
-
-} // namespace content
diff --git a/chromium/content/renderer/media/pepper_platform_video_decoder.h b/chromium/content/renderer/media/pepper_platform_video_decoder.h
deleted file mode 100644
index 83b6487140d..00000000000
--- a/chromium/content/renderer/media/pepper_platform_video_decoder.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_PEPPER_PLATFORM_VIDEO_DECODER_H_
-#define CONTENT_RENDERER_MEDIA_PEPPER_PLATFORM_VIDEO_DECODER_H_
-
-#include <vector>
-
-#include "base/compiler_specific.h"
-#include "base/memory/ref_counted.h"
-#include "base/message_loop/message_loop.h"
-#include "media/video/video_decode_accelerator.h"
-
-namespace content {
-
-class PlatformVideoDecoder : public media::VideoDecodeAccelerator,
- public media::VideoDecodeAccelerator::Client {
- public:
- PlatformVideoDecoder(media::VideoDecodeAccelerator::Client* client,
- int32 command_buffer_route_id);
- virtual ~PlatformVideoDecoder();
-
- // PlatformVideoDecoder (a.k.a. VideoDecodeAccelerator) implementation.
- virtual bool Initialize(media::VideoCodecProfile profile) OVERRIDE;
- virtual void Decode(
- const media::BitstreamBuffer& bitstream_buffer) OVERRIDE;
- virtual void AssignPictureBuffers(
- const std::vector<media::PictureBuffer>& buffers) OVERRIDE;
- virtual void ReusePictureBuffer(int32 picture_buffer_id) OVERRIDE;
- virtual void Flush() OVERRIDE;
- virtual void Reset() OVERRIDE;
- virtual void Destroy() OVERRIDE;
-
- // VideoDecodeAccelerator::Client implementation.
- virtual void ProvidePictureBuffers(uint32 requested_num_of_buffers,
- const gfx::Size& dimensions,
- uint32 texture_target) OVERRIDE;
- virtual void PictureReady(const media::Picture& picture) OVERRIDE;
- virtual void DismissPictureBuffer(int32 picture_buffer_id) OVERRIDE;
- virtual void NotifyInitializeDone() OVERRIDE;
- virtual void NotifyError(
- media::VideoDecodeAccelerator::Error error) OVERRIDE;
- virtual void NotifyEndOfBitstreamBuffer(int32 bitstream_buffer_id) OVERRIDE;
- virtual void NotifyFlushDone() OVERRIDE;
- virtual void NotifyResetDone() OVERRIDE;
-
- private:
- // Client lifetime must exceed lifetime of this class.
- // TODO(vrk/fischman): We should take another look at the overall
- // arcitecture of PPAPI Video Decode to make sure lifetime/ownership makes
- // sense, including lifetime of this client.
- media::VideoDecodeAccelerator::Client* client_;
-
- // Route ID for the command buffer associated with video decoder's context.
- int32 command_buffer_route_id_;
-
- // Holds a GpuVideoDecodeAcceleratorHost.
- scoped_ptr<media::VideoDecodeAccelerator> decoder_;
-
- DISALLOW_COPY_AND_ASSIGN(PlatformVideoDecoder);
-};
-
-} // namespace content
-
-#endif // CONTENT_RENDERER_MEDIA_PEPPER_PLATFORM_VIDEO_DECODER_H_
diff --git a/chromium/content/renderer/media/remote_media_stream_impl.cc b/chromium/content/renderer/media/remote_media_stream_impl.cc
index af430e242f7..d2421310ef8 100644
--- a/chromium/content/renderer/media/remote_media_stream_impl.cc
+++ b/chromium/content/renderer/media/remote_media_stream_impl.cc
@@ -8,88 +8,125 @@
#include "base/logging.h"
#include "base/strings/utf_string_conversions.h"
-#include "content/renderer/media/media_stream_dependency_factory.h"
-#include "content/renderer/media/media_stream_extra_data.h"
+#include "content/renderer/media/media_stream.h"
+#include "content/renderer/media/media_stream_video_track.h"
+#include "content/renderer/media/webrtc/media_stream_remote_video_source.h"
+#include "content/renderer/media/webrtc/peer_connection_dependency_factory.h"
#include "third_party/WebKit/public/platform/WebString.h"
namespace content {
-// RemoteMediaStreamTrackObserver is responsible for listening on change
-// notification on a remote webrtc MediaStreamTrack and notify WebKit.
-class RemoteMediaStreamTrackObserver
- : NON_EXPORTED_BASE(public webrtc::ObserverInterface),
- NON_EXPORTED_BASE(public base::NonThreadSafe) {
+namespace {
+
+void InitializeWebkitTrack(webrtc::MediaStreamTrackInterface* track,
+ blink::WebMediaStreamTrack* webkit_track,
+ blink::WebMediaStreamSource::Type type) {
+ blink::WebMediaStreamSource webkit_source;
+ blink::WebString webkit_track_id(base::UTF8ToUTF16(track->id()));
+
+ webkit_source.initialize(webkit_track_id, type, webkit_track_id);
+ webkit_track->initialize(webkit_track_id, webkit_source);
+
+ if (type == blink::WebMediaStreamSource::TypeVideo) {
+ MediaStreamRemoteVideoSource* video_source =
+ new MediaStreamRemoteVideoSource(
+ static_cast<webrtc::VideoTrackInterface*>(track));
+ webkit_source.setExtraData(video_source);
+ // Initial constraints must be provided to a MediaStreamVideoTrack. But
+ // no constraints are available initially on a remote video track.
+ blink::WebMediaConstraints constraints;
+ constraints.initialize();
+ webkit_track->setExtraData(
+ new MediaStreamVideoTrack(video_source, constraints,
+ MediaStreamVideoSource::ConstraintsCallback(),
+ track->enabled()));
+ } else {
+ DCHECK(type == blink::WebMediaStreamSource::TypeAudio);
+ content::PeerConnectionDependencyFactory::AddNativeAudioTrackToBlinkTrack(
+ track, *webkit_track, false);
+ }
+}
+
+} // namespace
+
+// Base class used for mapping between webrtc and blink MediaStream tracks.
+// An instance of a RemoteMediaStreamTrackAdapter is stored in
+// RemoteMediaStreamImpl per remote audio and video track.
+class RemoteMediaStreamTrackAdapter {
public:
- RemoteMediaStreamTrackObserver(
- webrtc::MediaStreamTrackInterface* webrtc_track,
- const blink::WebMediaStreamTrack& webkit_track);
- virtual ~RemoteMediaStreamTrackObserver();
+ RemoteMediaStreamTrackAdapter(webrtc::MediaStreamTrackInterface* webrtc_track,
+ const blink::WebMediaStreamTrack& webkit_track)
+ : webrtc_track_(webrtc_track),
+ webkit_track_(webkit_track) {
+ }
+
+ virtual ~RemoteMediaStreamTrackAdapter() {
+ }
- webrtc::MediaStreamTrackInterface* observered_track() {
+ webrtc::MediaStreamTrackInterface* observed_track() {
return webrtc_track_.get();
}
+
const blink::WebMediaStreamTrack& webkit_track() { return webkit_track_; }
private:
- // webrtc::ObserverInterface implementation.
- virtual void OnChanged() OVERRIDE;
-
- webrtc::MediaStreamTrackInterface::TrackState state_;
scoped_refptr<webrtc::MediaStreamTrackInterface> webrtc_track_;
blink::WebMediaStreamTrack webkit_track_;
- DISALLOW_COPY_AND_ASSIGN(RemoteMediaStreamTrackObserver);
+ DISALLOW_COPY_AND_ASSIGN(RemoteMediaStreamTrackAdapter);
};
-} // namespace content
-
-namespace {
-
-void InitializeWebkitTrack(webrtc::MediaStreamTrackInterface* track,
- blink::WebMediaStreamTrack* webkit_track,
- blink::WebMediaStreamSource::Type type) {
- blink::WebMediaStreamSource webkit_source;
- blink::WebString webkit_track_id(UTF8ToUTF16(track->id()));
-
- webkit_source.initialize(webkit_track_id, type, webkit_track_id);
- webkit_track->initialize(webkit_track_id, webkit_source);
- content::MediaStreamDependencyFactory::AddNativeTrackToBlinkTrack(
- track, *webkit_track, false);
-}
-
-content::RemoteMediaStreamTrackObserver* FindTrackObserver(
- webrtc::MediaStreamTrackInterface* track,
- const ScopedVector<content::RemoteMediaStreamTrackObserver>& observers) {
- ScopedVector<content::RemoteMediaStreamTrackObserver>::const_iterator it =
+static content::RemoteMediaStreamTrackAdapter* FindTrackObserver(
+ webrtc::MediaStreamTrackInterface* track,
+ const ScopedVector<content::RemoteMediaStreamTrackAdapter>& observers) {
+ ScopedVector<content::RemoteMediaStreamTrackAdapter>::const_iterator it =
observers.begin();
for (; it != observers.end(); ++it) {
- if ((*it)->observered_track() == track)
+ if ((*it)->observed_track() == track)
return *it;
}
return NULL;
}
-} // namespace anonymous
+// RemoteAudioMediaStreamTrackAdapter is responsible for listening on state
+// change notifications on a remote webrtc audio MediaStreamTracks and notify
+// WebKit.
+class RemoteAudioMediaStreamTrackAdapter
+ : public RemoteMediaStreamTrackAdapter,
+ public webrtc::ObserverInterface,
+ public base::NonThreadSafe {
+ public:
+ RemoteAudioMediaStreamTrackAdapter(
+ webrtc::MediaStreamTrackInterface* webrtc_track,
+ const blink::WebMediaStreamTrack& webkit_track);
+ virtual ~RemoteAudioMediaStreamTrackAdapter();
-namespace content {
+ private:
+ // webrtc::ObserverInterface implementation.
+ virtual void OnChanged() OVERRIDE;
+
+ webrtc::MediaStreamTrackInterface::TrackState state_;
+
+ DISALLOW_COPY_AND_ASSIGN(RemoteAudioMediaStreamTrackAdapter);
+};
-RemoteMediaStreamTrackObserver::RemoteMediaStreamTrackObserver(
+RemoteAudioMediaStreamTrackAdapter::RemoteAudioMediaStreamTrackAdapter(
webrtc::MediaStreamTrackInterface* webrtc_track,
const blink::WebMediaStreamTrack& webkit_track)
- : state_(webrtc_track->state()),
- webrtc_track_(webrtc_track),
- webkit_track_(webkit_track) {
- webrtc_track->RegisterObserver(this);
+ : RemoteMediaStreamTrackAdapter(webrtc_track, webkit_track),
+ state_(observed_track()->state()) {
+ observed_track()->RegisterObserver(this);
}
-RemoteMediaStreamTrackObserver::~RemoteMediaStreamTrackObserver() {
- webrtc_track_->UnregisterObserver(this);
+RemoteAudioMediaStreamTrackAdapter::~RemoteAudioMediaStreamTrackAdapter() {
+ observed_track()->UnregisterObserver(this);
}
-void RemoteMediaStreamTrackObserver::OnChanged() {
+void RemoteAudioMediaStreamTrackAdapter::OnChanged() {
DCHECK(CalledOnValidThread());
- webrtc::MediaStreamTrackInterface::TrackState state = webrtc_track_->state();
+ webrtc::MediaStreamTrackInterface::TrackState state =
+ observed_track()->state();
if (state == state_)
return;
@@ -100,11 +137,11 @@ void RemoteMediaStreamTrackObserver::OnChanged() {
// WebMediaStreamSource::ReadyState.
break;
case webrtc::MediaStreamTrackInterface::kLive:
- webkit_track_.source().setReadyState(
+ webkit_track().source().setReadyState(
blink::WebMediaStreamSource::ReadyStateLive);
break;
case webrtc::MediaStreamTrackInterface::kEnded:
- webkit_track_.source().setReadyState(
+ webkit_track().source().setReadyState(
blink::WebMediaStreamSource::ReadyStateEnded);
break;
default:
@@ -131,8 +168,8 @@ RemoteMediaStreamImpl::RemoteMediaStreamImpl(
InitializeWebkitTrack(audio_track, &webkit_audio_tracks[i],
blink::WebMediaStreamSource::TypeAudio);
audio_track_observers_.push_back(
- new RemoteMediaStreamTrackObserver(audio_track,
- webkit_audio_tracks[i]));
+ new RemoteAudioMediaStreamTrackAdapter(audio_track,
+ webkit_audio_tracks[i]));
}
// Initialize WebKit video tracks.
@@ -146,13 +183,13 @@ RemoteMediaStreamImpl::RemoteMediaStreamImpl(
InitializeWebkitTrack(video_track, &webkit_video_tracks[i],
blink::WebMediaStreamSource::TypeVideo);
video_track_observers_.push_back(
- new RemoteMediaStreamTrackObserver(video_track,
- webkit_video_tracks[i]));
+ new RemoteMediaStreamTrackAdapter(video_track,
+ webkit_video_tracks[i]));
}
- webkit_stream_.initialize(UTF8ToUTF16(webrtc_stream->label()),
+ webkit_stream_.initialize(base::UTF8ToUTF16(webrtc_stream->label()),
webkit_audio_tracks, webkit_video_tracks);
- webkit_stream_.setExtraData(new MediaStreamExtraData(webrtc_stream, false));
+ webkit_stream_.setExtraData(new MediaStream(webrtc_stream));
}
RemoteMediaStreamImpl::~RemoteMediaStreamImpl() {
@@ -161,10 +198,10 @@ RemoteMediaStreamImpl::~RemoteMediaStreamImpl() {
void RemoteMediaStreamImpl::OnChanged() {
// Find removed audio tracks.
- ScopedVector<RemoteMediaStreamTrackObserver>::iterator audio_it =
+ ScopedVector<RemoteMediaStreamTrackAdapter>::iterator audio_it =
audio_track_observers_.begin();
while (audio_it != audio_track_observers_.end()) {
- std::string track_id = (*audio_it)->observered_track()->id();
+ std::string track_id = (*audio_it)->observed_track()->id();
if (webrtc_stream_->FindAudioTrack(track_id) == NULL) {
webkit_stream_.removeTrack((*audio_it)->webkit_track());
audio_it = audio_track_observers_.erase(audio_it);
@@ -174,10 +211,10 @@ void RemoteMediaStreamImpl::OnChanged() {
}
// Find removed video tracks.
- ScopedVector<RemoteMediaStreamTrackObserver>::iterator video_it =
+ ScopedVector<RemoteMediaStreamTrackAdapter>::iterator video_it =
video_track_observers_.begin();
while (video_it != video_track_observers_.end()) {
- std::string track_id = (*video_it)->observered_track()->id();
+ std::string track_id = (*video_it)->observed_track()->id();
if (webrtc_stream_->FindVideoTrack(track_id) == NULL) {
webkit_stream_.removeTrack((*video_it)->webkit_track());
video_it = video_track_observers_.erase(video_it);
@@ -196,7 +233,7 @@ void RemoteMediaStreamImpl::OnChanged() {
InitializeWebkitTrack(*it, &new_track,
blink::WebMediaStreamSource::TypeAudio);
audio_track_observers_.push_back(
- new RemoteMediaStreamTrackObserver(*it, new_track));
+ new RemoteAudioMediaStreamTrackAdapter(*it, new_track));
webkit_stream_.addTrack(new_track);
}
}
@@ -211,7 +248,7 @@ void RemoteMediaStreamImpl::OnChanged() {
InitializeWebkitTrack(*it, &new_track,
blink::WebMediaStreamSource::TypeVideo);
video_track_observers_.push_back(
- new RemoteMediaStreamTrackObserver(*it, new_track));
+ new RemoteMediaStreamTrackAdapter(*it, new_track));
webkit_stream_.addTrack(new_track);
}
}
diff --git a/chromium/content/renderer/media/remote_media_stream_impl.h b/chromium/content/renderer/media/remote_media_stream_impl.h
index 3263ed458e9..6b1554aff06 100644
--- a/chromium/content/renderer/media/remote_media_stream_impl.h
+++ b/chromium/content/renderer/media/remote_media_stream_impl.h
@@ -18,7 +18,7 @@
namespace content {
-class RemoteMediaStreamTrackObserver;
+class RemoteMediaStreamTrackAdapter;
// RemoteMediaStreamImpl serves as a container and glue between remote webrtc
// MediaStreams and WebKit MediaStreams. For each remote MediaStream received
@@ -39,8 +39,8 @@ class CONTENT_EXPORT RemoteMediaStreamImpl
virtual void OnChanged() OVERRIDE;
scoped_refptr<webrtc::MediaStreamInterface> webrtc_stream_;
- ScopedVector<RemoteMediaStreamTrackObserver> audio_track_observers_;
- ScopedVector<RemoteMediaStreamTrackObserver> video_track_observers_;
+ ScopedVector<RemoteMediaStreamTrackAdapter> video_track_observers_;
+ ScopedVector<RemoteMediaStreamTrackAdapter> audio_track_observers_;
blink::WebMediaStream webkit_stream_;
DISALLOW_COPY_AND_ASSIGN(RemoteMediaStreamImpl);
diff --git a/chromium/content/renderer/media/render_media_log.cc b/chromium/content/renderer/media/render_media_log.cc
index 5515ecc394e..d20f622ec74 100644
--- a/chromium/content/renderer/media/render_media_log.cc
+++ b/chromium/content/renderer/media/render_media_log.cc
@@ -8,38 +8,59 @@
#include "base/logging.h"
#include "base/message_loop/message_loop_proxy.h"
#include "content/common/view_messages.h"
-#include "content/renderer/render_thread_impl.h"
-
-using base::Time;
-using base::TimeDelta;
+#include "content/public/renderer/render_thread.h"
namespace content {
RenderMediaLog::RenderMediaLog()
: render_loop_(base::MessageLoopProxy::current()),
- last_ipc_send_time_(Time::Now()) {
- DCHECK(RenderThreadImpl::current()) <<
+ tick_clock_(new base::DefaultTickClock()),
+ last_ipc_send_time_(tick_clock_->NowTicks()) {
+ DCHECK(RenderThread::Get()) <<
"RenderMediaLog must be constructed on the render thread";
}
void RenderMediaLog::AddEvent(scoped_ptr<media::MediaLogEvent> event) {
- if (!RenderThreadImpl::current()) {
+ if (!RenderThread::Get()) {
render_loop_->PostTask(FROM_HERE, base::Bind(
&RenderMediaLog::AddEvent, this, base::Passed(&event)));
return;
}
- queued_media_events_.push_back(*event);
+
+ // Keep track of the latest buffered extents properties to avoid sending
+ // thousands of events over IPC. See http://crbug.com/352585 for details.
+ //
+ // TODO(scherkus): We should overhaul MediaLog entirely to have clearer
+ // separation of properties vs. events.
+ if (event->type == media::MediaLogEvent::BUFFERED_EXTENTS_CHANGED)
+ last_buffered_extents_changed_event_.swap(event);
+ else
+ queued_media_events_.push_back(*event);
+
// Limit the send rate of high frequency events.
- Time curr_time = Time::Now();
- if ((curr_time - last_ipc_send_time_) < TimeDelta::FromSeconds(1))
+ base::TimeTicks curr_time = tick_clock_->NowTicks();
+ if ((curr_time - last_ipc_send_time_) < base::TimeDelta::FromSeconds(1))
return;
last_ipc_send_time_ = curr_time;
+
+ if (last_buffered_extents_changed_event_) {
+ queued_media_events_.push_back(*last_buffered_extents_changed_event_);
+ last_buffered_extents_changed_event_.reset();
+ }
+
DVLOG(1) << "media log events array size " << queued_media_events_.size();
- RenderThreadImpl::current()->Send(
+
+ RenderThread::Get()->Send(
new ViewHostMsg_MediaLogEvents(queued_media_events_));
queued_media_events_.clear();
}
RenderMediaLog::~RenderMediaLog() {}
+void RenderMediaLog::SetTickClockForTesting(
+ scoped_ptr<base::TickClock> tick_clock) {
+ tick_clock_.swap(tick_clock);
+ last_ipc_send_time_ = tick_clock_->NowTicks();
+}
+
} // namespace content
diff --git a/chromium/content/renderer/media/render_media_log.h b/chromium/content/renderer/media/render_media_log.h
index 29b49134722..9aac107c4c6 100644
--- a/chromium/content/renderer/media/render_media_log.h
+++ b/chromium/content/renderer/media/render_media_log.h
@@ -6,7 +6,9 @@
#define CONTENT_RENDERER_MEDIA_RENDER_MEDIA_LOG_H_
#include <vector>
+
#include "base/time/time.h"
+#include "content/common/content_export.h"
#include "media/base/media_log.h"
namespace base {
@@ -15,22 +17,32 @@ class MessageLoopProxy;
namespace content {
-// RenderMediaLog is an implementation of MediaLog that passes all events to the
+// RenderMediaLog is an implementation of MediaLog that forwards events to the
// browser process, throttling as necessary.
-class RenderMediaLog : public media::MediaLog {
+//
+// To minimize the number of events sent over the wire, only the latest event
+// added is sent for high frequency events (e.g., BUFFERED_EXTENTS_CHANGED).
+class CONTENT_EXPORT RenderMediaLog : public media::MediaLog {
public:
RenderMediaLog();
// MediaLog implementation.
virtual void AddEvent(scoped_ptr<media::MediaLogEvent> event) OVERRIDE;
+ // Will reset |last_ipc_send_time_| with the value of NowTicks().
+ void SetTickClockForTesting(scoped_ptr<base::TickClock> tick_clock);
+
private:
virtual ~RenderMediaLog();
scoped_refptr<base::MessageLoopProxy> render_loop_;
- base::Time last_ipc_send_time_;
+ scoped_ptr<base::TickClock> tick_clock_;
+ base::TimeTicks last_ipc_send_time_;
std::vector<media::MediaLogEvent> queued_media_events_;
+ // Limits the number buffered extents changed events we send over IPC to one.
+ scoped_ptr<media::MediaLogEvent> last_buffered_extents_changed_event_;
+
DISALLOW_COPY_AND_ASSIGN(RenderMediaLog);
};
diff --git a/chromium/content/renderer/media/render_media_log_unittest.cc b/chromium/content/renderer/media/render_media_log_unittest.cc
new file mode 100644
index 00000000000..83708aa49e2
--- /dev/null
+++ b/chromium/content/renderer/media/render_media_log_unittest.cc
@@ -0,0 +1,104 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/simple_test_tick_clock.h"
+#include "content/common/view_messages.h"
+#include "content/public/test/mock_render_thread.h"
+#include "content/renderer/media/render_media_log.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace content {
+
+class RenderMediaLogTest : public testing::Test {
+ public:
+ RenderMediaLogTest()
+ : log_(new RenderMediaLog()),
+ tick_clock_(new base::SimpleTestTickClock()) {
+ log_->SetTickClockForTesting(scoped_ptr<base::TickClock>(tick_clock_));
+ }
+
+ virtual ~RenderMediaLogTest() {}
+
+ void AddEvent(media::MediaLogEvent::Type type) {
+ log_->AddEvent(log_->CreateEvent(type));
+ }
+
+ void Advance(base::TimeDelta delta) { tick_clock_->Advance(delta); }
+
+ int message_count() { return render_thread_.sink().message_count(); }
+
+ std::vector<media::MediaLogEvent> GetMediaLogEvents() {
+ const IPC::Message* msg = render_thread_.sink().GetFirstMessageMatching(
+ ViewHostMsg_MediaLogEvents::ID);
+ if (!msg) {
+ ADD_FAILURE() << "Did not find ViewHostMsg_MediaLogEvents IPC message";
+ return std::vector<media::MediaLogEvent>();
+ }
+
+ Tuple1<std::vector<media::MediaLogEvent> > events;
+ ViewHostMsg_MediaLogEvents::Read(msg, &events);
+ return events.a;
+ }
+
+ private:
+ MockRenderThread render_thread_;
+ scoped_refptr<RenderMediaLog> log_;
+ base::SimpleTestTickClock* tick_clock_; // Owned by |log_|.
+
+ DISALLOW_COPY_AND_ASSIGN(RenderMediaLogTest);
+};
+
+TEST_F(RenderMediaLogTest, ThrottleSendingEvents) {
+ AddEvent(media::MediaLogEvent::LOAD);
+ EXPECT_EQ(0, message_count());
+
+ // Still shouldn't send anything.
+ Advance(base::TimeDelta::FromMilliseconds(500));
+ AddEvent(media::MediaLogEvent::SEEK);
+ EXPECT_EQ(0, message_count());
+
+ // Now we should expect an IPC.
+ Advance(base::TimeDelta::FromMilliseconds(500));
+ AddEvent(media::MediaLogEvent::PLAY);
+ EXPECT_EQ(1, message_count());
+
+ // Verify contents.
+ std::vector<media::MediaLogEvent> events = GetMediaLogEvents();
+ ASSERT_EQ(3u, events.size());
+ EXPECT_EQ(media::MediaLogEvent::LOAD, events[0].type);
+ EXPECT_EQ(media::MediaLogEvent::SEEK, events[1].type);
+ EXPECT_EQ(media::MediaLogEvent::PLAY, events[2].type);
+
+ // Adding another event shouldn't send anything.
+ AddEvent(media::MediaLogEvent::PIPELINE_ERROR);
+ EXPECT_EQ(1, message_count());
+}
+
+TEST_F(RenderMediaLogTest, BufferedExtents) {
+ AddEvent(media::MediaLogEvent::LOAD);
+ AddEvent(media::MediaLogEvent::SEEK);
+
+ // This event is handled separately and should always appear last regardless
+ // of how many times we see it.
+ AddEvent(media::MediaLogEvent::BUFFERED_EXTENTS_CHANGED);
+ AddEvent(media::MediaLogEvent::BUFFERED_EXTENTS_CHANGED);
+ AddEvent(media::MediaLogEvent::BUFFERED_EXTENTS_CHANGED);
+
+ // Trigger IPC message.
+ EXPECT_EQ(0, message_count());
+ Advance(base::TimeDelta::FromMilliseconds(1000));
+ AddEvent(media::MediaLogEvent::PLAY);
+ EXPECT_EQ(1, message_count());
+
+ // Verify contents. There should only be a single buffered extents changed
+ // event.
+ std::vector<media::MediaLogEvent> events = GetMediaLogEvents();
+ ASSERT_EQ(4u, events.size());
+ EXPECT_EQ(media::MediaLogEvent::LOAD, events[0].type);
+ EXPECT_EQ(media::MediaLogEvent::SEEK, events[1].type);
+ EXPECT_EQ(media::MediaLogEvent::PLAY, events[2].type);
+ EXPECT_EQ(media::MediaLogEvent::BUFFERED_EXTENTS_CHANGED, events[3].type);
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/renderer_gpu_video_accelerator_factories.cc b/chromium/content/renderer/media/renderer_gpu_video_accelerator_factories.cc
index 7d4db85513f..94e0d78217f 100644
--- a/chromium/content/renderer/media/renderer_gpu_video_accelerator_factories.cc
+++ b/chromium/content/renderer/media/renderer_gpu_video_accelerator_factories.cc
@@ -14,133 +14,99 @@
#include "content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.h"
#include "content/renderer/render_thread_impl.h"
#include "gpu/command_buffer/client/gles2_implementation.h"
+#include "media/video/video_decode_accelerator.h"
+#include "media/video/video_encode_accelerator.h"
+#include "third_party/skia/include/core/SkBitmap.h"
#include "third_party/skia/include/core/SkPixelRef.h"
namespace content {
-RendererGpuVideoAcceleratorFactories::~RendererGpuVideoAcceleratorFactories() {}
+// static
+scoped_refptr<RendererGpuVideoAcceleratorFactories>
+RendererGpuVideoAcceleratorFactories::Create(
+ GpuChannelHost* gpu_channel_host,
+ const scoped_refptr<base::MessageLoopProxy>& message_loop_proxy,
+ const scoped_refptr<ContextProviderCommandBuffer>& context_provider) {
+ scoped_refptr<RendererGpuVideoAcceleratorFactories> factories =
+ new RendererGpuVideoAcceleratorFactories(
+ gpu_channel_host, message_loop_proxy, context_provider);
+ // Post task from outside constructor, since AddRef()/Release() is unsafe from
+ // within.
+ message_loop_proxy->PostTask(
+ FROM_HERE,
+ base::Bind(&RendererGpuVideoAcceleratorFactories::BindContext,
+ factories));
+ return factories;
+}
+
RendererGpuVideoAcceleratorFactories::RendererGpuVideoAcceleratorFactories(
GpuChannelHost* gpu_channel_host,
+ const scoped_refptr<base::MessageLoopProxy>& message_loop_proxy,
const scoped_refptr<ContextProviderCommandBuffer>& context_provider)
- : message_loop_(
- RenderThreadImpl::current()->GetMediaThreadMessageLoopProxy()),
+ : task_runner_(message_loop_proxy),
gpu_channel_host_(gpu_channel_host),
context_provider_(context_provider),
- thread_safe_sender_(ChildThread::current()->thread_safe_sender()),
- aborted_waiter_(true, false),
- message_loop_async_waiter_(false, false) {
- // |context_provider_| is only required to support HW-accelerated decode.
- if (!context_provider_)
- return;
+ thread_safe_sender_(ChildThread::current()->thread_safe_sender()) {}
- if (message_loop_->BelongsToCurrentThread()) {
- AsyncBindContext();
- message_loop_async_waiter_.Reset();
- return;
- }
- // Wait for the context to be acquired.
- message_loop_->PostTask(
- FROM_HERE,
- base::Bind(&RendererGpuVideoAcceleratorFactories::AsyncBindContext,
- // Unretained to avoid ref/deref'ing |*this|, which is not yet
- // stored in a scoped_refptr. Safe because the Wait() below
- // keeps us alive until this task completes.
- base::Unretained(this)));
- message_loop_async_waiter_.Wait();
-}
+RendererGpuVideoAcceleratorFactories::~RendererGpuVideoAcceleratorFactories() {}
-RendererGpuVideoAcceleratorFactories::RendererGpuVideoAcceleratorFactories()
- : aborted_waiter_(true, false),
- message_loop_async_waiter_(false, false) {}
+void RendererGpuVideoAcceleratorFactories::BindContext() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ if (!context_provider_->BindToCurrentThread())
+ context_provider_ = NULL;
+}
WebGraphicsContext3DCommandBufferImpl*
RendererGpuVideoAcceleratorFactories::GetContext3d() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
if (!context_provider_)
return NULL;
- WebGraphicsContext3DCommandBufferImpl* context =
- context_provider_->Context3d();
- if (context->isContextLost()) {
+ if (context_provider_->IsContextLost()) {
context_provider_->VerifyContexts();
context_provider_ = NULL;
return NULL;
}
- return context;
-}
-
-void RendererGpuVideoAcceleratorFactories::AsyncBindContext() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- if (!context_provider_->BindToCurrentThread())
- context_provider_ = NULL;
- message_loop_async_waiter_.Signal();
+ return context_provider_->WebContext3D();
}
scoped_ptr<media::VideoDecodeAccelerator>
-RendererGpuVideoAcceleratorFactories::CreateVideoDecodeAccelerator(
- media::VideoCodecProfile profile,
- media::VideoDecodeAccelerator::Client* client) {
- if (message_loop_->BelongsToCurrentThread()) {
- AsyncCreateVideoDecodeAccelerator(profile, client);
- message_loop_async_waiter_.Reset();
- return vda_.Pass();
- }
- // The VDA is returned in the vda_ member variable by the
- // AsyncCreateVideoDecodeAccelerator() function.
- message_loop_->PostTask(FROM_HERE,
- base::Bind(&RendererGpuVideoAcceleratorFactories::
- AsyncCreateVideoDecodeAccelerator,
- this,
- profile,
- client));
-
- base::WaitableEvent* objects[] = {&aborted_waiter_,
- &message_loop_async_waiter_};
- if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) {
- // If we are aborting and the VDA is created by the
- // AsyncCreateVideoDecodeAccelerator() function later we need to ensure
- // that it is destroyed on the same thread.
- message_loop_->PostTask(FROM_HERE,
- base::Bind(&RendererGpuVideoAcceleratorFactories::
- AsyncDestroyVideoDecodeAccelerator,
- this));
- return scoped_ptr<media::VideoDecodeAccelerator>();
- }
- return vda_.Pass();
-}
+RendererGpuVideoAcceleratorFactories::CreateVideoDecodeAccelerator() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
-scoped_ptr<media::VideoEncodeAccelerator>
-RendererGpuVideoAcceleratorFactories::CreateVideoEncodeAccelerator(
- media::VideoEncodeAccelerator::Client* client) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ WebGraphicsContext3DCommandBufferImpl* context = GetContext3d();
+ if (context && context->GetCommandBufferProxy()) {
+ return gpu_channel_host_->CreateVideoDecoder(
+ context->GetCommandBufferProxy()->GetRouteID());
+ }
- return gpu_channel_host_->CreateVideoEncoder(client);
+ return scoped_ptr<media::VideoDecodeAccelerator>();
}
-void RendererGpuVideoAcceleratorFactories::AsyncCreateVideoDecodeAccelerator(
- media::VideoCodecProfile profile,
- media::VideoDecodeAccelerator::Client* client) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+scoped_ptr<media::VideoEncodeAccelerator>
+RendererGpuVideoAcceleratorFactories::CreateVideoEncodeAccelerator() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
WebGraphicsContext3DCommandBufferImpl* context = GetContext3d();
if (context && context->GetCommandBufferProxy()) {
- vda_ = gpu_channel_host_->CreateVideoDecoder(
- context->GetCommandBufferProxy()->GetRouteID(), profile, client);
+ return gpu_channel_host_->CreateVideoEncoder(
+ context->GetCommandBufferProxy()->GetRouteID());
}
- message_loop_async_waiter_.Signal();
+
+ return scoped_ptr<media::VideoEncodeAccelerator>();
}
-uint32 RendererGpuVideoAcceleratorFactories::CreateTextures(
+bool RendererGpuVideoAcceleratorFactories::CreateTextures(
int32 count,
const gfx::Size& size,
std::vector<uint32>* texture_ids,
std::vector<gpu::Mailbox>* texture_mailboxes,
uint32 texture_target) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(texture_target);
WebGraphicsContext3DCommandBufferImpl* context = GetContext3d();
if (!context)
- return 0;
+ return false;
gpu::gles2::GLES2Implementation* gles2 = context->GetImplementation();
texture_ids->resize(count);
@@ -170,17 +136,17 @@ uint32 RendererGpuVideoAcceleratorFactories::CreateTextures(
texture_mailboxes->at(i).name);
}
- // We need a glFlush here to guarantee the decoder (in the GPU process) can
- // use the texture ids we return here. Since textures are expected to be
- // reused, this should not be unacceptably expensive.
- gles2->Flush();
+ // We need ShallowFlushCHROMIUM() here to order the command buffer commands
+ // with respect to IPC to the GPU process, to guarantee that the decoder in
+ // the GPU process can use these textures as soon as it receives IPC
+ // notification of them.
+ gles2->ShallowFlushCHROMIUM();
DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR));
-
- return gles2->InsertSyncPointCHROMIUM();
+ return true;
}
void RendererGpuVideoAcceleratorFactories::DeleteTexture(uint32 texture_id) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
WebGraphicsContext3DCommandBufferImpl* context = GetContext3d();
if (!context)
@@ -192,7 +158,7 @@ void RendererGpuVideoAcceleratorFactories::DeleteTexture(uint32 texture_id) {
}
void RendererGpuVideoAcceleratorFactories::WaitSyncPoint(uint32 sync_point) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
WebGraphicsContext3DCommandBufferImpl* context = GetContext3d();
if (!context)
@@ -206,42 +172,15 @@ void RendererGpuVideoAcceleratorFactories::WaitSyncPoint(uint32 sync_point) {
gles2->ShallowFlushCHROMIUM();
}
-void RendererGpuVideoAcceleratorFactories::ReadPixels(uint32 texture_id,
- const gfx::Size& size,
- const SkBitmap& pixels) {
- // SkBitmaps use the SkPixelRef object to refcount the underlying pixels.
- // Multiple SkBitmaps can share a SkPixelRef instance. We use this to
- // ensure that the underlying pixels in the SkBitmap passed in remain valid
- // until the AsyncReadPixels() call completes.
- read_pixels_bitmap_.setPixelRef(pixels.pixelRef());
-
- if (!message_loop_->BelongsToCurrentThread()) {
- message_loop_->PostTask(
- FROM_HERE,
- base::Bind(&RendererGpuVideoAcceleratorFactories::AsyncReadPixels,
- this,
- texture_id,
- size));
- base::WaitableEvent* objects[] = {&aborted_waiter_,
- &message_loop_async_waiter_};
- if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0)
- return;
- } else {
- AsyncReadPixels(texture_id, size);
- message_loop_async_waiter_.Reset();
- }
- read_pixels_bitmap_.setPixelRef(NULL);
-}
-
-void RendererGpuVideoAcceleratorFactories::AsyncReadPixels(
+void RendererGpuVideoAcceleratorFactories::ReadPixels(
uint32 texture_id,
- const gfx::Size& size) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ const gfx::Rect& visible_rect,
+ const SkBitmap& pixels) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
WebGraphicsContext3DCommandBufferImpl* context = GetContext3d();
- if (!context) {
- message_loop_async_waiter_.Signal();
+ if (!context)
return;
- }
gpu::gles2::GLES2Implementation* gles2 = context->GetImplementation();
@@ -261,61 +200,63 @@ void RendererGpuVideoAcceleratorFactories::AsyncReadPixels(
gles2->FramebufferTexture2D(
GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tmp_texture, 0);
gles2->PixelStorei(GL_PACK_ALIGNMENT, 4);
+
#if SK_B32_SHIFT == 0 && SK_G32_SHIFT == 8 && SK_R32_SHIFT == 16 && \
SK_A32_SHIFT == 24
GLenum skia_format = GL_BGRA_EXT;
+ GLenum read_format = GL_BGRA_EXT;
+ GLint supported_format = 0;
+ GLint supported_type = 0;
+ gles2->GetIntegerv(GL_IMPLEMENTATION_COLOR_READ_FORMAT, &supported_format);
+ gles2->GetIntegerv(GL_IMPLEMENTATION_COLOR_READ_TYPE, &supported_type);
+ if (supported_format != GL_BGRA_EXT || supported_type != GL_UNSIGNED_BYTE) {
+ read_format = GL_RGBA;
+ }
#elif SK_R32_SHIFT == 0 && SK_G32_SHIFT == 8 && SK_B32_SHIFT == 16 && \
SK_A32_SHIFT == 24
GLenum skia_format = GL_RGBA;
+ GLenum read_format = GL_RGBA;
#else
#error Unexpected Skia ARGB_8888 layout!
#endif
- gles2->ReadPixels(0,
- 0,
- size.width(),
- size.height(),
- skia_format,
+ gles2->ReadPixels(visible_rect.x(),
+ visible_rect.y(),
+ visible_rect.width(),
+ visible_rect.height(),
+ read_format,
GL_UNSIGNED_BYTE,
- read_pixels_bitmap_.pixelRef()->pixels());
+ pixels.pixelRef()->pixels());
gles2->DeleteFramebuffers(1, &fb);
gles2->DeleteTextures(1, &tmp_texture);
+
+ if (skia_format != read_format) {
+ DCHECK(read_format == GL_RGBA);
+ int pixel_count = visible_rect.width() * visible_rect.height();
+ uint32_t* pixels_ptr = static_cast<uint32_t*>(pixels.pixelRef()->pixels());
+ for (int i = 0; i < pixel_count; ++i) {
+ uint32_t r = pixels_ptr[i] & 0xFF;
+ uint32_t g = (pixels_ptr[i] >> 8) & 0xFF;
+ uint32_t b = (pixels_ptr[i] >> 16) & 0xFF;
+ uint32_t a = (pixels_ptr[i] >> 24) & 0xFF;
+ pixels_ptr[i] = (r << SK_R32_SHIFT) |
+ (g << SK_G32_SHIFT) |
+ (b << SK_B32_SHIFT) |
+ (a << SK_A32_SHIFT);
+ }
+ }
+
DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR));
- message_loop_async_waiter_.Signal();
}
base::SharedMemory* RendererGpuVideoAcceleratorFactories::CreateSharedMemory(
size_t size) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
return ChildThread::AllocateSharedMemory(size, thread_safe_sender_.get());
}
-scoped_refptr<base::MessageLoopProxy>
-RendererGpuVideoAcceleratorFactories::GetMessageLoop() {
- return message_loop_;
-}
-
-void RendererGpuVideoAcceleratorFactories::Abort() { aborted_waiter_.Signal(); }
-
-bool RendererGpuVideoAcceleratorFactories::IsAborted() {
- return aborted_waiter_.IsSignaled();
-}
-
-scoped_refptr<RendererGpuVideoAcceleratorFactories>
-RendererGpuVideoAcceleratorFactories::Clone() {
- scoped_refptr<RendererGpuVideoAcceleratorFactories> factories =
- new RendererGpuVideoAcceleratorFactories();
- factories->message_loop_ = message_loop_;
- factories->gpu_channel_host_ = gpu_channel_host_;
- factories->context_provider_ = context_provider_;
- factories->thread_safe_sender_ = thread_safe_sender_;
- return factories;
-}
-
-void
-RendererGpuVideoAcceleratorFactories::AsyncDestroyVideoDecodeAccelerator() {
- // OK to release because Destroy() will delete the VDA instance.
- if (vda_)
- vda_.release()->Destroy();
+scoped_refptr<base::SingleThreadTaskRunner>
+RendererGpuVideoAcceleratorFactories::GetTaskRunner() {
+ return task_runner_;
}
} // namespace content
diff --git a/chromium/content/renderer/media/renderer_gpu_video_accelerator_factories.h b/chromium/content/renderer/media/renderer_gpu_video_accelerator_factories.h
index eb9d650b5d0..ffbc0abe664 100644
--- a/chromium/content/renderer/media/renderer_gpu_video_accelerator_factories.h
+++ b/chromium/content/renderer/media/renderer_gpu_video_accelerator_factories.h
@@ -14,11 +14,9 @@
#include "content/child/thread_safe_sender.h"
#include "content/common/content_export.h"
#include "media/filters/gpu_video_accelerator_factories.h"
-#include "third_party/skia/include/core/SkBitmap.h"
#include "ui/gfx/size.h"
namespace base {
-class MessageLoopProxy;
class WaitableEvent;
}
@@ -32,96 +30,62 @@ class WebGraphicsContext3DCommandBufferImpl;
// RenderViewImpl and only has its own header to allow extraction of its
// implementation from render_view_impl.cc which is already far too large.
//
-// The RendererGpuVideoAcceleratorFactories can be constructed on any thread.
-// Most public methods of the class must be called from the media thread. The
-// exceptions (which can be called from any thread, as they are internally
-// trampolined) are:
-// * CreateVideoDecodeAccelerator()
-// * ReadPixels()
+// The RendererGpuVideoAcceleratorFactories can be constructed on any thread,
+// but subsequent calls to all public methods of the class must be called from
+// the |message_loop_proxy_|, as provided during construction.
class CONTENT_EXPORT RendererGpuVideoAcceleratorFactories
: public media::GpuVideoAcceleratorFactories {
public:
// Takes a ref on |gpu_channel_host| and tests |context| for loss before each
// use. Safe to call from any thread.
- RendererGpuVideoAcceleratorFactories(
+ static scoped_refptr<RendererGpuVideoAcceleratorFactories> Create(
GpuChannelHost* gpu_channel_host,
+ const scoped_refptr<base::MessageLoopProxy>& message_loop_proxy,
const scoped_refptr<ContextProviderCommandBuffer>& context_provider);
// media::GpuVideoAcceleratorFactories implementation.
- // CreateVideoDecodeAccelerator() is safe to call from any thread.
virtual scoped_ptr<media::VideoDecodeAccelerator>
- CreateVideoDecodeAccelerator(
- media::VideoCodecProfile profile,
- media::VideoDecodeAccelerator::Client* client) OVERRIDE;
+ CreateVideoDecodeAccelerator() OVERRIDE;
virtual scoped_ptr<media::VideoEncodeAccelerator>
- CreateVideoEncodeAccelerator(
- media::VideoEncodeAccelerator::Client* client) OVERRIDE;
- // Creates textures and produces them into mailboxes. Returns a sync point to
- // wait on before using the mailboxes, or 0 on failure.
- virtual uint32 CreateTextures(int32 count,
- const gfx::Size& size,
- std::vector<uint32>* texture_ids,
- std::vector<gpu::Mailbox>* texture_mailboxes,
- uint32 texture_target) OVERRIDE;
+ CreateVideoEncodeAccelerator() OVERRIDE;
+ // Creates textures and produces them into mailboxes. Returns true on success
+ // or false on failure.
+ virtual bool CreateTextures(int32 count,
+ const gfx::Size& size,
+ std::vector<uint32>* texture_ids,
+ std::vector<gpu::Mailbox>* texture_mailboxes,
+ uint32 texture_target) OVERRIDE;
virtual void DeleteTexture(uint32 texture_id) OVERRIDE;
virtual void WaitSyncPoint(uint32 sync_point) OVERRIDE;
- // ReadPixels() is safe to call from any thread.
virtual void ReadPixels(uint32 texture_id,
- const gfx::Size& size,
+ const gfx::Rect& visible_rect,
const SkBitmap& pixels) OVERRIDE;
virtual base::SharedMemory* CreateSharedMemory(size_t size) OVERRIDE;
- virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE;
- virtual void Abort() OVERRIDE;
- virtual bool IsAborted() OVERRIDE;
- scoped_refptr<RendererGpuVideoAcceleratorFactories> Clone();
+ virtual scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() OVERRIDE;
- protected:
+ private:
friend class base::RefCountedThreadSafe<RendererGpuVideoAcceleratorFactories>;
+ RendererGpuVideoAcceleratorFactories(
+ GpuChannelHost* gpu_channel_host,
+ const scoped_refptr<base::MessageLoopProxy>& message_loop_proxy,
+ const scoped_refptr<ContextProviderCommandBuffer>& context_provider);
virtual ~RendererGpuVideoAcceleratorFactories();
- private:
- RendererGpuVideoAcceleratorFactories();
+ // Helper to bind |context_provider| to the |task_runner_| thread after
+ // construction.
+ void BindContext();
// Helper to get a pointer to the WebGraphicsContext3DCommandBufferImpl,
// if it has not been lost yet.
WebGraphicsContext3DCommandBufferImpl* GetContext3d();
- // Helper for the constructor to acquire the ContentGLContext on
- // |message_loop_|.
- void AsyncBindContext();
-
- // Async versions of the public methods, run on |message_loop_|.
- // They use output parameters instead of return values and each takes
- // a WaitableEvent* param to signal completion (except for DeleteTexture,
- // which is fire-and-forget).
- // AsyncCreateVideoDecodeAccelerator returns its output in the |vda_| member.
- void AsyncCreateVideoDecodeAccelerator(
- media::VideoCodecProfile profile,
- media::VideoDecodeAccelerator::Client* client);
- void AsyncReadPixels(uint32 texture_id, const gfx::Size& size);
- void AsyncDestroyVideoDecodeAccelerator();
-
- scoped_refptr<base::MessageLoopProxy> message_loop_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
scoped_refptr<GpuChannelHost> gpu_channel_host_;
scoped_refptr<ContextProviderCommandBuffer> context_provider_;
// For sending requests to allocate shared memory in the Browser process.
scoped_refptr<ThreadSafeSender> thread_safe_sender_;
- // This event is signaled if we have been asked to Abort().
- base::WaitableEvent aborted_waiter_;
-
- // This event is signaled by asynchronous tasks posted to |message_loop_| to
- // indicate their completion.
- // e.g. AsyncCreateVideoDecodeAccelerator()/AsyncCreateTextures() etc.
- base::WaitableEvent message_loop_async_waiter_;
-
- // The vda returned by the CreateVideoDecodeAccelerator function.
- scoped_ptr<media::VideoDecodeAccelerator> vda_;
-
- // Bitmap returned by ReadPixels().
- SkBitmap read_pixels_bitmap_;
-
DISALLOW_COPY_AND_ASSIGN(RendererGpuVideoAcceleratorFactories);
};
diff --git a/chromium/content/renderer/media/renderer_webaudiodevice_impl.cc b/chromium/content/renderer/media/renderer_webaudiodevice_impl.cc
index ec38769573f..536bb1e3889 100644
--- a/chromium/content/renderer/media/renderer_webaudiodevice_impl.cc
+++ b/chromium/content/renderer/media/renderer_webaudiodevice_impl.cc
@@ -7,14 +7,15 @@
#include "base/command_line.h"
#include "base/logging.h"
#include "content/renderer/media/audio_device_factory.h"
+#include "content/renderer/render_frame_impl.h"
#include "content/renderer/render_view_impl.h"
#include "media/audio/audio_output_device.h"
#include "media/base/media_switches.h"
-#include "third_party/WebKit/public/web/WebFrame.h"
+#include "third_party/WebKit/public/web/WebLocalFrame.h"
#include "third_party/WebKit/public/web/WebView.h"
using blink::WebAudioDevice;
-using blink::WebFrame;
+using blink::WebLocalFrame;
using blink::WebVector;
using blink::WebView;
@@ -47,13 +48,16 @@ void RendererWebAudioDeviceImpl::start() {
// starting the audio device. The reason for all this is because the creator
// of the WebAudio objects might not be the actual source of the audio (e.g.,
// an extension creates a object that is passed and used within a page).
- WebFrame* const web_frame = WebFrame::frameForCurrentContext();
+ WebLocalFrame* const web_frame = WebLocalFrame::frameForCurrentContext();
WebView* const web_view = web_frame ? web_frame->view() : NULL;
+ RenderFrame* const render_frame =
+ web_frame ? RenderFrame::FromWebFrame(web_frame) : NULL;
RenderViewImpl* const render_view =
web_view ? RenderViewImpl::FromWebView(web_view) : NULL;
output_device_ = AudioDeviceFactory::NewOutputDevice(
- render_view ? render_view->routing_id() : MSG_ROUTING_NONE);
- output_device_->InitializeUnifiedStream(params_, this, session_id_);
+ render_view ? render_view->routing_id() : MSG_ROUTING_NONE,
+ render_frame ? render_frame->GetRoutingID(): MSG_ROUTING_NONE);
+ output_device_->InitializeWithSessionId(params_, this, session_id_);
output_device_->Start();
// Note: Default behavior is to auto-play on start.
}
@@ -73,32 +77,22 @@ double RendererWebAudioDeviceImpl::sampleRate() {
int RendererWebAudioDeviceImpl::Render(media::AudioBus* dest,
int audio_delay_milliseconds) {
- RenderIO(NULL, dest, audio_delay_milliseconds);
- return dest->frames();
-}
-
-void RendererWebAudioDeviceImpl::RenderIO(media::AudioBus* source,
- media::AudioBus* dest,
- int audio_delay_milliseconds) {
- // Make the client callback for an I/O cycle.
if (client_callback_) {
- // Wrap the input pointers using WebVector.
- size_t source_channels =
- source ? static_cast<size_t>(source->channels()) : 0;
- WebVector<float*> web_audio_source_data(source_channels);
- for (size_t i = 0; i < source_channels; ++i)
- web_audio_source_data[i] = source->channel(i);
-
// Wrap the output pointers using WebVector.
WebVector<float*> web_audio_dest_data(
static_cast<size_t>(dest->channels()));
for (int i = 0; i < dest->channels(); ++i)
web_audio_dest_data[i] = dest->channel(i);
+ // TODO(xians): Remove the following |web_audio_source_data| after
+ // changing the blink interface.
+ WebVector<float*> web_audio_source_data(static_cast<size_t>(0));
client_callback_->render(web_audio_source_data,
web_audio_dest_data,
dest->frames());
}
+
+ return dest->frames();
}
void RendererWebAudioDeviceImpl::OnRenderError() {
diff --git a/chromium/content/renderer/media/renderer_webaudiodevice_impl.h b/chromium/content/renderer/media/renderer_webaudiodevice_impl.h
index 962ec0851c3..0f06a89aa94 100644
--- a/chromium/content/renderer/media/renderer_webaudiodevice_impl.h
+++ b/chromium/content/renderer/media/renderer_webaudiodevice_impl.h
@@ -36,10 +36,6 @@ class RendererWebAudioDeviceImpl
virtual int Render(media::AudioBus* dest,
int audio_delay_milliseconds) OVERRIDE;
- virtual void RenderIO(media::AudioBus* source,
- media::AudioBus* dest,
- int audio_delay_milliseconds) OVERRIDE;
-
virtual void OnRenderError() OVERRIDE;
private:
diff --git a/chromium/content/renderer/media/renderer_webmidiaccessor_impl.cc b/chromium/content/renderer/media/renderer_webmidiaccessor_impl.cc
index b12174636e8..75b6b972005 100644
--- a/chromium/content/renderer/media/renderer_webmidiaccessor_impl.cc
+++ b/chromium/content/renderer/media/renderer_webmidiaccessor_impl.cc
@@ -29,14 +29,14 @@ void RendererWebMIDIAccessorImpl::sendMIDIData(
const unsigned char* data,
size_t length,
double timestamp) {
- midi_message_filter()->SendMIDIData(
+ midi_message_filter()->SendMidiData(
port_index,
data,
length,
timestamp);
}
-MIDIMessageFilter* RendererWebMIDIAccessorImpl::midi_message_filter() {
+MidiMessageFilter* RendererWebMIDIAccessorImpl::midi_message_filter() {
return RenderThreadImpl::current()->midi_message_filter();
}
diff --git a/chromium/content/renderer/media/renderer_webmidiaccessor_impl.h b/chromium/content/renderer/media/renderer_webmidiaccessor_impl.h
index 4ebaffd408b..791c294e758 100644
--- a/chromium/content/renderer/media/renderer_webmidiaccessor_impl.h
+++ b/chromium/content/renderer/media/renderer_webmidiaccessor_impl.h
@@ -12,7 +12,7 @@
namespace content {
-class MIDIMessageFilter;
+class MidiMessageFilter;
class RendererWebMIDIAccessorImpl
: public blink::WebMIDIAccessor {
@@ -31,7 +31,7 @@ class RendererWebMIDIAccessorImpl
private:
blink::WebMIDIAccessorClient* client_;
- MIDIMessageFilter* midi_message_filter();
+ MidiMessageFilter* midi_message_filter();
DISALLOW_COPY_AND_ASSIGN(RendererWebMIDIAccessorImpl);
};
diff --git a/chromium/content/renderer/media/rtc_data_channel_handler.cc b/chromium/content/renderer/media/rtc_data_channel_handler.cc
index 065048a5556..1fe4de11756 100644
--- a/chromium/content/renderer/media/rtc_data_channel_handler.cc
+++ b/chromium/content/renderer/media/rtc_data_channel_handler.cc
@@ -4,19 +4,55 @@
#include "content/renderer/media/rtc_data_channel_handler.h"
+#include <limits>
#include <string>
#include "base/logging.h"
+#include "base/metrics/histogram.h"
#include "base/strings/utf_string_conversions.h"
namespace content {
+namespace {
+
+enum DataChannelCounters {
+ CHANNEL_CREATED,
+ CHANNEL_OPENED,
+ CHANNEL_RELIABLE,
+ CHANNEL_ORDERED,
+ CHANNEL_NEGOTIATED,
+ CHANNEL_BOUNDARY
+};
+
+void IncrementCounter(DataChannelCounters counter) {
+ UMA_HISTOGRAM_ENUMERATION("WebRTC.DataChannelCounters",
+ counter,
+ CHANNEL_BOUNDARY);
+}
+
+} // namespace
+
RtcDataChannelHandler::RtcDataChannelHandler(
webrtc::DataChannelInterface* channel)
: channel_(channel),
webkit_client_(NULL) {
DVLOG(1) << "::ctor";
channel_->RegisterObserver(this);
+
+ IncrementCounter(CHANNEL_CREATED);
+ if (isReliable())
+ IncrementCounter(CHANNEL_RELIABLE);
+ if (ordered())
+ IncrementCounter(CHANNEL_ORDERED);
+ if (negotiated())
+ IncrementCounter(CHANNEL_NEGOTIATED);
+
+ UMA_HISTOGRAM_CUSTOM_COUNTS("WebRTC.DataChannelMaxRetransmits",
+ maxRetransmits(), 0,
+ std::numeric_limits<unsigned short>::max(), 50);
+ UMA_HISTOGRAM_CUSTOM_COUNTS("WebRTC.DataChannelMaxRetransmitTime",
+ maxRetransmitTime(), 0,
+ std::numeric_limits<unsigned short>::max(), 50);
}
RtcDataChannelHandler::~RtcDataChannelHandler() {
@@ -30,7 +66,7 @@ void RtcDataChannelHandler::setClient(
}
blink::WebString RtcDataChannelHandler::label() {
- return UTF8ToUTF16(channel_->label());
+ return base::UTF8ToUTF16(channel_->label());
}
bool RtcDataChannelHandler::isReliable() {
@@ -50,7 +86,7 @@ unsigned short RtcDataChannelHandler::maxRetransmits() const {
}
blink::WebString RtcDataChannelHandler::protocol() const {
- return UTF8ToUTF16(channel_->protocol());
+ return base::UTF8ToUTF16(channel_->protocol());
}
bool RtcDataChannelHandler::negotiated() const {
@@ -66,15 +102,17 @@ unsigned long RtcDataChannelHandler::bufferedAmount() {
}
bool RtcDataChannelHandler::sendStringData(const blink::WebString& data) {
- std::string utf8_buffer = UTF16ToUTF8(data);
+ std::string utf8_buffer = base::UTF16ToUTF8(data);
talk_base::Buffer buffer(utf8_buffer.c_str(), utf8_buffer.length());
webrtc::DataBuffer data_buffer(buffer, false);
+ RecordMessageSent(data_buffer.size());
return channel_->Send(data_buffer);
}
bool RtcDataChannelHandler::sendRawData(const char* data, size_t length) {
talk_base::Buffer buffer(data, length);
webrtc::DataBuffer data_buffer(buffer, true);
+ RecordMessageSent(data_buffer.size());
return channel_->Send(data_buffer);
}
@@ -94,6 +132,7 @@ void RtcDataChannelHandler::OnStateChange() {
blink::WebRTCDataChannelHandlerClient::ReadyStateConnecting);
break;
case webrtc::DataChannelInterface::kOpen:
+ IncrementCounter(CHANNEL_OPENED);
webkit_client_->didChangeReadyState(
blink::WebRTCDataChannelHandlerClient::ReadyStateOpen);
break;
@@ -121,7 +160,7 @@ void RtcDataChannelHandler::OnMessage(const webrtc::DataBuffer& buffer) {
webkit_client_->didReceiveRawData(buffer.data.data(), buffer.data.length());
} else {
base::string16 utf16;
- if (!UTF8ToUTF16(buffer.data.data(), buffer.data.length(), &utf16)) {
+ if (!base::UTF8ToUTF16(buffer.data.data(), buffer.data.length(), &utf16)) {
LOG(ERROR) << "Failed convert received data to UTF16";
return;
}
@@ -129,4 +168,28 @@ void RtcDataChannelHandler::OnMessage(const webrtc::DataBuffer& buffer) {
}
}
+void RtcDataChannelHandler::RecordMessageSent(size_t num_bytes) {
+ // Currently, messages are capped at some fairly low limit (16 Kb?)
+ // but we may allow unlimited-size messages at some point, so making
+ // the histogram maximum quite large (100 Mb) to have some
+ // granularity at the higher end in that eventuality. The histogram
+ // buckets are exponentially growing in size, so we'll still have
+ // good granularity at the low end.
+
+ // This makes the last bucket in the histogram count messages from
+ // 100 Mb to infinity.
+ const int kMaxBucketSize = 100 * 1024 * 1024;
+ const int kNumBuckets = 50;
+
+ if (isReliable()) {
+ UMA_HISTOGRAM_CUSTOM_COUNTS("WebRTC.ReliableDataChannelMessageSize",
+ num_bytes,
+ 1, kMaxBucketSize, kNumBuckets);
+ } else {
+ UMA_HISTOGRAM_CUSTOM_COUNTS("WebRTC.UnreliableDataChannelMessageSize",
+ num_bytes,
+ 1, kMaxBucketSize, kNumBuckets);
+ }
+}
+
} // namespace content
diff --git a/chromium/content/renderer/media/rtc_data_channel_handler.h b/chromium/content/renderer/media/rtc_data_channel_handler.h
index a5521344f08..1be7e87dd01 100644
--- a/chromium/content/renderer/media/rtc_data_channel_handler.h
+++ b/chromium/content/renderer/media/rtc_data_channel_handler.h
@@ -49,6 +49,8 @@ class CONTENT_EXPORT RtcDataChannelHandler
virtual void OnMessage(const webrtc::DataBuffer& buffer) OVERRIDE;
private:
+ void RecordMessageSent(size_t num_bytes);
+
scoped_refptr<webrtc::DataChannelInterface> channel_;
blink::WebRTCDataChannelHandlerClient* webkit_client_;
};
diff --git a/chromium/content/renderer/media/rtc_dtmf_sender_handler.cc b/chromium/content/renderer/media/rtc_dtmf_sender_handler.cc
index 3df592c8a6b..23c5cc18f88 100644
--- a/chromium/content/renderer/media/rtc_dtmf_sender_handler.cc
+++ b/chromium/content/renderer/media/rtc_dtmf_sender_handler.cc
@@ -31,7 +31,7 @@ void RtcDtmfSenderHandler::setClient(
}
blink::WebString RtcDtmfSenderHandler::currentToneBuffer() {
- return UTF8ToUTF16(dtmf_sender_->tones());
+ return base::UTF8ToUTF16(dtmf_sender_->tones());
}
bool RtcDtmfSenderHandler::canInsertDTMF() {
@@ -41,7 +41,7 @@ bool RtcDtmfSenderHandler::canInsertDTMF() {
bool RtcDtmfSenderHandler::insertDTMF(const blink::WebString& tones,
long duration,
long interToneGap) {
- std::string utf8_tones = UTF16ToUTF8(tones);
+ std::string utf8_tones = base::UTF16ToUTF8(tones);
return dtmf_sender_->InsertDtmf(utf8_tones, static_cast<int>(duration),
static_cast<int>(interToneGap));
}
@@ -51,7 +51,7 @@ void RtcDtmfSenderHandler::OnToneChange(const std::string& tone) {
LOG(ERROR) << "WebRTCDTMFSenderHandlerClient not set.";
return;
}
- webkit_client_->didPlayTone(UTF8ToUTF16(tone));
+ webkit_client_->didPlayTone(base::UTF8ToUTF16(tone));
}
} // namespace content
diff --git a/chromium/content/renderer/media/rtc_media_constraints.cc b/chromium/content/renderer/media/rtc_media_constraints.cc
index 4bdfd9079b3..7f8b79dc136 100644
--- a/chromium/content/renderer/media/rtc_media_constraints.cc
+++ b/chromium/content/renderer/media/rtc_media_constraints.cc
@@ -8,6 +8,7 @@
#include "base/logging.h"
#include "base/strings/string_util.h"
#include "content/common/media/media_stream_options.h"
+#include "content/renderer/media/media_stream_video_source.h"
#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
#include "third_party/WebKit/public/platform/WebCString.h"
#include "third_party/WebKit/public/platform/WebString.h"
@@ -33,13 +34,9 @@ void GetNativeMediaConstraints(
if (new_constraint.key == kMediaStreamSourceInfoId)
continue;
- // Ignore internal constraints set by JS.
- if (StartsWithASCII(
- new_constraint.key,
- webrtc::MediaConstraintsInterface::kInternalConstraintPrefix,
- true)) {
+ // Ignore constraints that are handled by Chrome in MediaStreamVideoSource.
+ if (MediaStreamVideoSource::IsConstraintSupported(new_constraint.key))
continue;
- }
DVLOG(3) << "MediaStreamConstraints:" << new_constraint.key
<< " : " << new_constraint.value;
@@ -75,16 +72,24 @@ RTCMediaConstraints::GetOptional() const {
return optional_;
}
-void RTCMediaConstraints::AddOptional(const std::string& key,
- const std::string& value) {
- optional_.push_back(Constraint(key, value));
+bool RTCMediaConstraints::AddOptional(const std::string& key,
+ const std::string& value,
+ bool override_if_exists) {
+ return AddConstraint(&optional_, key, value, override_if_exists);
}
bool RTCMediaConstraints::AddMandatory(const std::string& key,
const std::string& value,
bool override_if_exists) {
- for (Constraints::iterator iter = mandatory_.begin();
- iter != mandatory_.end();
+ return AddConstraint(&mandatory_, key, value, override_if_exists);
+}
+
+bool RTCMediaConstraints::AddConstraint(Constraints* constraints,
+ const std::string& key,
+ const std::string& value,
+ bool override_if_exists) {
+ for (Constraints::iterator iter = constraints->begin();
+ iter != constraints->end();
++iter) {
if (iter->key == key) {
if (override_if_exists)
@@ -93,7 +98,7 @@ bool RTCMediaConstraints::AddMandatory(const std::string& key,
}
}
// The key wasn't found, add it.
- mandatory_.push_back(Constraint(key, value));
+ constraints->push_back(Constraint(key, value));
return true;
}
diff --git a/chromium/content/renderer/media/rtc_media_constraints.h b/chromium/content/renderer/media/rtc_media_constraints.h
index fd55e346b06..b1247363dc6 100644
--- a/chromium/content/renderer/media/rtc_media_constraints.h
+++ b/chromium/content/renderer/media/rtc_media_constraints.h
@@ -27,14 +27,20 @@ class CONTENT_EXPORT RTCMediaConstraints
virtual ~RTCMediaConstraints();
virtual const Constraints& GetMandatory() const OVERRIDE;
virtual const Constraints& GetOptional() const OVERRIDE;
- void AddOptional(const std::string& key, const std::string& value);
// Adds a mandatory constraint, optionally overriding an existing one.
// If the constraint is already set and |override_if_exists| is false,
// the function will return false, otherwise true.
bool AddMandatory(const std::string& key, const std::string& value,
bool override_if_exists);
+ // As above, but against the optional constraints.
+ bool AddOptional(const std::string& key, const std::string& value,
+ bool override_if_exists);
protected:
+ bool AddConstraint(Constraints* constraints,
+ const std::string& key,
+ const std::string& value,
+ bool override_if_exists);
Constraints mandatory_;
Constraints optional_;
};
diff --git a/chromium/content/renderer/media/rtc_peer_connection_handler.cc b/chromium/content/renderer/media/rtc_peer_connection_handler.cc
index 1536ec30188..b8c39be0ce6 100644
--- a/chromium/content/renderer/media/rtc_peer_connection_handler.cc
+++ b/chromium/content/renderer/media/rtc_peer_connection_handler.cc
@@ -9,34 +9,35 @@
#include <vector>
#include "base/command_line.h"
+#include "base/debug/trace_event.h"
+#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
+#include "base/metrics/histogram.h"
#include "base/stl_util.h"
#include "base/strings/utf_string_conversions.h"
#include "content/public/common/content_switches.h"
-#include "content/renderer/media/media_stream_dependency_factory.h"
+#include "content/renderer/media/media_stream_track.h"
#include "content/renderer/media/peer_connection_tracker.h"
#include "content/renderer/media/remote_media_stream_impl.h"
#include "content/renderer/media/rtc_data_channel_handler.h"
#include "content/renderer/media/rtc_dtmf_sender_handler.h"
#include "content/renderer/media/rtc_media_constraints.h"
+#include "content/renderer/media/webrtc/peer_connection_dependency_factory.h"
+#include "content/renderer/media/webrtc/webrtc_media_stream_adapter.h"
#include "content/renderer/media/webrtc_audio_capturer.h"
#include "content/renderer/media/webrtc_audio_device_impl.h"
+#include "content/renderer/media/webrtc_uma_histograms.h"
#include "content/renderer/render_thread_impl.h"
#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
-// TODO(hta): Move the following include to WebRTCStatsRequest.h file.
#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
-#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
#include "third_party/WebKit/public/platform/WebRTCConfiguration.h"
#include "third_party/WebKit/public/platform/WebRTCDataChannelInit.h"
#include "third_party/WebKit/public/platform/WebRTCICECandidate.h"
-#include "third_party/WebKit/public/platform/WebRTCPeerConnectionHandlerClient.h"
#include "third_party/WebKit/public/platform/WebRTCSessionDescription.h"
#include "third_party/WebKit/public/platform/WebRTCSessionDescriptionRequest.h"
-#include "third_party/WebKit/public/platform/WebRTCStatsRequest.h"
#include "third_party/WebKit/public/platform/WebRTCVoidRequest.h"
#include "third_party/WebKit/public/platform/WebURL.h"
-#include "third_party/WebKit/public/web/WebFrame.h"
namespace content {
@@ -121,7 +122,8 @@ CreateWebKitSessionDescription(
return description;
}
- description.initialize(UTF8ToUTF16(native_desc->type()), UTF8ToUTF16(sdp));
+ description.initialize(base::UTF8ToUTF16(native_desc->type()),
+ base::UTF8ToUTF16(sdp));
return description;
}
@@ -136,8 +138,8 @@ static void GetNativeIceServers(
webrtc::PeerConnectionInterface::IceServer server;
const blink::WebRTCICEServer& webkit_server =
server_configuration.server(i);
- server.username = UTF16ToUTF8(webkit_server.username());
- server.password = UTF16ToUTF8(webkit_server.credential());
+ server.username = base::UTF16ToUTF8(webkit_server.username());
+ server.password = base::UTF16ToUTF8(webkit_server.credential());
server.uri = webkit_server.uri().spec();
servers->push_back(server);
}
@@ -185,10 +187,11 @@ class CreateSessionDescriptionRequest
virtual void OnSuccess(webrtc::SessionDescriptionInterface* desc) OVERRIDE {
tracker_.TrackOnSuccess(desc);
webkit_request_.requestSucceeded(CreateWebKitSessionDescription(desc));
+ delete desc;
}
virtual void OnFailure(const std::string& error) OVERRIDE {
tracker_.TrackOnFailure(error);
- webkit_request_.requestFailed(UTF8ToUTF16(error));
+ webkit_request_.requestFailed(base::UTF8ToUTF16(error));
}
protected:
@@ -216,7 +219,7 @@ class SetSessionDescriptionRequest
}
virtual void OnFailure(const std::string& error) OVERRIDE {
tracker_.TrackOnFailure(error);
- webkit_request_.requestFailed(UTF8ToUTF16(error));
+ webkit_request_.requestFailed(base::UTF8ToUTF16(error));
}
protected:
@@ -232,16 +235,26 @@ class SetSessionDescriptionRequest
class StatsResponse : public webrtc::StatsObserver {
public:
explicit StatsResponse(const scoped_refptr<LocalRTCStatsRequest>& request)
- : request_(request.get()), response_(request_->createResponse().get()) {}
+ : request_(request.get()), response_(request_->createResponse().get()) {
+ // Measure the overall time it takes to satisfy a getStats request.
+ TRACE_EVENT_ASYNC_BEGIN0("webrtc", "getStats_Native", this);
+ }
virtual void OnComplete(
const std::vector<webrtc::StatsReport>& reports) OVERRIDE {
+ TRACE_EVENT0("webrtc", "StatsResponse::OnComplete")
for (std::vector<webrtc::StatsReport>::const_iterator it = reports.begin();
it != reports.end(); ++it) {
if (it->values.size() > 0) {
AddReport(*it);
}
}
+
+ // Record the getSync operation as done before calling into Blink so that
+ // we don't skew the perf measurements of the native code with whatever the
+ // callback might be doing.
+ TRACE_EVENT_ASYNC_END0("webrtc", "getStats_Native", this);
+
request_->requestSucceeded(response_);
}
@@ -314,19 +327,78 @@ void LocalRTCStatsResponse::addStatistic(size_t report,
impl_.addStatistic(report, name, value);
}
+namespace {
+
+class PeerConnectionUMAObserver : public webrtc::UMAObserver {
+ public:
+ PeerConnectionUMAObserver() {}
+ virtual ~PeerConnectionUMAObserver() {}
+
+ virtual void IncrementCounter(
+ webrtc::PeerConnectionUMAMetricsCounter counter) OVERRIDE {
+ UMA_HISTOGRAM_ENUMERATION("WebRTC.PeerConnection.IPMetrics",
+ counter,
+ webrtc::kBoundary);
+ }
+
+ virtual void AddHistogramSample(
+ webrtc::PeerConnectionUMAMetricsName type, int value) OVERRIDE {
+ switch (type) {
+ case webrtc::kTimeToConnect:
+ UMA_HISTOGRAM_MEDIUM_TIMES(
+ "WebRTC.PeerConnection.TimeToConnect",
+ base::TimeDelta::FromMilliseconds(value));
+ break;
+ case webrtc::kNetworkInterfaces_IPv4:
+ UMA_HISTOGRAM_COUNTS_100("WebRTC.PeerConnection.IPv4Interfaces",
+ value);
+ break;
+ case webrtc::kNetworkInterfaces_IPv6:
+ UMA_HISTOGRAM_COUNTS_100("WebRTC.PeerConnection.IPv6Interfaces",
+ value);
+ break;
+ default:
+ NOTREACHED();
+ }
+ }
+};
+
+base::LazyInstance<std::set<RTCPeerConnectionHandler*> >::Leaky
+ g_peer_connection_handlers = LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
RTCPeerConnectionHandler::RTCPeerConnectionHandler(
blink::WebRTCPeerConnectionHandlerClient* client,
- MediaStreamDependencyFactory* dependency_factory)
- : PeerConnectionHandlerBase(dependency_factory),
- client_(client),
+ PeerConnectionDependencyFactory* dependency_factory)
+ : client_(client),
+ dependency_factory_(dependency_factory),
frame_(NULL),
- peer_connection_tracker_(NULL) {
+ peer_connection_tracker_(NULL),
+ num_data_channels_created_(0) {
+ g_peer_connection_handlers.Get().insert(this);
}
RTCPeerConnectionHandler::~RTCPeerConnectionHandler() {
+ g_peer_connection_handlers.Get().erase(this);
if (peer_connection_tracker_)
peer_connection_tracker_->UnregisterPeerConnection(this);
STLDeleteValues(&remote_streams_);
+
+ UMA_HISTOGRAM_COUNTS_10000(
+ "WebRTC.NumDataChannelsPerPeerConnection", num_data_channels_created_);
+}
+
+// static
+void RTCPeerConnectionHandler::DestructAllHandlers() {
+ std::set<RTCPeerConnectionHandler*> handlers(
+ g_peer_connection_handlers.Get().begin(),
+ g_peer_connection_handlers.Get().end());
+ for (std::set<RTCPeerConnectionHandler*>::iterator handler = handlers.begin();
+ handler != handlers.end();
+ ++handler) {
+ (*handler)->client_->releasePeerConnectionHandler();
+ }
}
void RTCPeerConnectionHandler::associateWithFrame(blink::WebFrame* frame) {
@@ -358,6 +430,8 @@ bool RTCPeerConnectionHandler::initialize(
peer_connection_tracker_->RegisterPeerConnection(
this, servers, constraints, frame_);
+ uma_observer_ = new talk_base::RefCountedObject<PeerConnectionUMAObserver>();
+ native_peer_connection_->RegisterUMAObserver(uma_observer_.get());
return true;
}
@@ -507,9 +581,9 @@ bool RTCPeerConnectionHandler::addICECandidate(
const blink::WebRTCICECandidate& candidate) {
scoped_ptr<webrtc::IceCandidateInterface> native_candidate(
dependency_factory_->CreateIceCandidate(
- UTF16ToUTF8(candidate.sdpMid()),
+ base::UTF16ToUTF8(candidate.sdpMid()),
candidate.sdpMLineIndex(),
- UTF16ToUTF8(candidate.candidate())));
+ base::UTF16ToUTF8(candidate.candidate())));
if (!native_candidate) {
LOG(ERROR) << "Could not create native ICE candidate.";
return false;
@@ -532,7 +606,7 @@ void RTCPeerConnectionHandler::OnaddICECandidateResult(
// We don't have the actual error code from the libjingle, so for now
// using a generic error string.
return webkit_request.requestFailed(
- UTF8ToUTF16("Error processing ICE candidate"));
+ base::UTF8ToUTF16("Error processing ICE candidate"));
}
return webkit_request.requestSucceeded();
@@ -541,31 +615,57 @@ void RTCPeerConnectionHandler::OnaddICECandidateResult(
bool RTCPeerConnectionHandler::addStream(
const blink::WebMediaStream& stream,
const blink::WebMediaConstraints& options) {
- RTCMediaConstraints constraints(options);
+
+ for (ScopedVector<WebRtcMediaStreamAdapter>::iterator adapter_it =
+ local_streams_.begin(); adapter_it != local_streams_.end();
+ ++adapter_it) {
+ if ((*adapter_it)->IsEqual(stream)) {
+ DVLOG(1) << "RTCPeerConnectionHandler::addStream called with the same "
+ << "stream twice. id=" << stream.id().utf8();
+ return false;
+ }
+ }
if (peer_connection_tracker_)
peer_connection_tracker_->TrackAddStream(
this, stream, PeerConnectionTracker::SOURCE_LOCAL);
- // A media stream is connected to a peer connection, enable the
- // peer connection mode for the capturer.
- WebRtcAudioDeviceImpl* audio_device =
- dependency_factory_->GetWebRtcAudioDevice();
- if (audio_device) {
- WebRtcAudioCapturer* capturer = audio_device->GetDefaultCapturer();
- if (capturer)
- capturer->EnablePeerConnectionMode();
- }
+ PerSessionWebRTCAPIMetrics::GetInstance()->IncrementStreamCounter();
+
+ WebRtcMediaStreamAdapter* adapter =
+ new WebRtcMediaStreamAdapter(stream, dependency_factory_);
+ local_streams_.push_back(adapter);
- return AddStream(stream, &constraints);
+ webrtc::MediaStreamInterface* webrtc_stream = adapter->webrtc_media_stream();
+ track_metrics_.AddStream(MediaStreamTrackMetrics::SENT_STREAM,
+ webrtc_stream);
+
+ RTCMediaConstraints constraints(options);
+ return native_peer_connection_->AddStream(webrtc_stream, &constraints);
}
void RTCPeerConnectionHandler::removeStream(
const blink::WebMediaStream& stream) {
- RemoveStream(stream);
+ // Find the webrtc stream.
+ scoped_refptr<webrtc::MediaStreamInterface> webrtc_stream;
+ for (ScopedVector<WebRtcMediaStreamAdapter>::iterator adapter_it =
+ local_streams_.begin(); adapter_it != local_streams_.end();
+ ++adapter_it) {
+ if ((*adapter_it)->IsEqual(stream)) {
+ webrtc_stream = (*adapter_it)->webrtc_media_stream();
+ local_streams_.erase(adapter_it);
+ break;
+ }
+ }
+ DCHECK(webrtc_stream);
+ native_peer_connection_->RemoveStream(webrtc_stream);
+
if (peer_connection_tracker_)
peer_connection_tracker_->TrackRemoveStream(
this, stream, PeerConnectionTracker::SOURCE_LOCAL);
+ PerSessionWebRTCAPIMetrics::GetInstance()->DecrementStreamCounter();
+ track_metrics_.RemoveStream(MediaStreamTrackMetrics::SENT_STREAM,
+ webrtc_stream);
}
void RTCPeerConnectionHandler::getStats(
@@ -580,8 +680,25 @@ void RTCPeerConnectionHandler::getStats(LocalRTCStatsRequest* request) {
new talk_base::RefCountedObject<StatsResponse>(request));
webrtc::MediaStreamTrackInterface* track = NULL;
if (request->hasSelector()) {
- track = MediaStreamDependencyFactory::GetNativeMediaStreamTrack(
- request->component());
+ blink::WebMediaStreamSource::Type type =
+ request->component().source().type();
+ std::string track_id = request->component().id().utf8();
+ if (type == blink::WebMediaStreamSource::TypeAudio) {
+ track =
+ native_peer_connection_->local_streams()->FindAudioTrack(track_id);
+ if (!track) {
+ track =
+ native_peer_connection_->remote_streams()->FindAudioTrack(track_id);
+ }
+ } else {
+ DCHECK_EQ(blink::WebMediaStreamSource::TypeVideo, type);
+ track =
+ native_peer_connection_->local_streams()->FindVideoTrack(track_id);
+ if (!track) {
+ track =
+ native_peer_connection_->remote_streams()->FindVideoTrack(track_id);
+ }
+ }
if (!track) {
DVLOG(1) << "GetStats: Track not found.";
// TODO(hta): Consider how to get an error back.
@@ -590,13 +707,17 @@ void RTCPeerConnectionHandler::getStats(LocalRTCStatsRequest* request) {
return;
}
}
- GetStats(observer, track);
+ GetStats(observer,
+ track,
+ webrtc::PeerConnectionInterface::kStatsOutputLevelStandard);
}
void RTCPeerConnectionHandler::GetStats(
webrtc::StatsObserver* observer,
- webrtc::MediaStreamTrackInterface* track) {
- if (!native_peer_connection_->GetStats(observer, track)) {
+ webrtc::MediaStreamTrackInterface* track,
+ webrtc::PeerConnectionInterface::StatsOutputLevel level) {
+ TRACE_EVENT0("webrtc", "RTCPeerConnectionHandler::GetStats");
+ if (!native_peer_connection_->GetStats(observer, track, level)) {
DVLOG(1) << "GetStats failed.";
// TODO(hta): Consider how to get an error back.
std::vector<webrtc::StatsReport> no_reports;
@@ -607,7 +728,7 @@ void RTCPeerConnectionHandler::GetStats(
blink::WebRTCDataChannelHandler* RTCPeerConnectionHandler::createDataChannel(
const blink::WebString& label, const blink::WebRTCDataChannelInit& init) {
- DVLOG(1) << "createDataChannel label " << UTF16ToUTF8(label);
+ DVLOG(1) << "createDataChannel label " << base::UTF16ToUTF8(label);
webrtc::DataChannelInit config;
// TODO(jiayl): remove the deprecated reliable field once Libjingle is updated
@@ -618,10 +739,11 @@ blink::WebRTCDataChannelHandler* RTCPeerConnectionHandler::createDataChannel(
config.negotiated = init.negotiated;
config.maxRetransmits = init.maxRetransmits;
config.maxRetransmitTime = init.maxRetransmitTime;
- config.protocol = UTF16ToUTF8(init.protocol);
+ config.protocol = base::UTF16ToUTF8(init.protocol);
talk_base::scoped_refptr<webrtc::DataChannelInterface> webrtc_channel(
- native_peer_connection_->CreateDataChannel(UTF16ToUTF8(label), &config));
+ native_peer_connection_->CreateDataChannel(base::UTF16ToUTF8(label),
+ &config));
if (!webrtc_channel) {
DLOG(ERROR) << "Could not create native data channel.";
return NULL;
@@ -630,6 +752,8 @@ blink::WebRTCDataChannelHandler* RTCPeerConnectionHandler::createDataChannel(
peer_connection_tracker_->TrackCreateDataChannel(
this, webrtc_channel.get(), PeerConnectionTracker::SOURCE_LOCAL);
+ ++num_data_channels_created_;
+
return new RtcDataChannelHandler(webrtc_channel);
}
@@ -637,15 +761,14 @@ blink::WebRTCDTMFSenderHandler* RTCPeerConnectionHandler::createDTMFSender(
const blink::WebMediaStreamTrack& track) {
DVLOG(1) << "createDTMFSender.";
- if (track.source().type() != blink::WebMediaStreamSource::TypeAudio) {
+ MediaStreamTrack* native_track = MediaStreamTrack::GetTrack(track);
+ if (!native_track ||
+ track.source().type() != blink::WebMediaStreamSource::TypeAudio) {
DLOG(ERROR) << "Could not create DTMF sender from a non-audio track.";
return NULL;
}
- webrtc::AudioTrackInterface* audio_track =
- static_cast<webrtc::AudioTrackInterface*>(
- MediaStreamDependencyFactory::GetNativeMediaStreamTrack(track));
-
+ webrtc::AudioTrackInterface* audio_track = native_track->GetAudioAdapter();
talk_base::scoped_refptr<webrtc::DtmfSenderInterface> sender(
native_peer_connection_->CreateDtmfSender(audio_track));
if (!sender) {
@@ -683,6 +806,7 @@ void RTCPeerConnectionHandler::OnSignalingChange(
// Called any time the IceConnectionState changes
void RTCPeerConnectionHandler::OnIceConnectionChange(
webrtc::PeerConnectionInterface::IceConnectionState new_state) {
+ track_metrics_.IceConnectionChange(new_state);
blink::WebRTCPeerConnectionHandlerClient::ICEConnectionState state =
GetWebKitIceConnectionState(new_state);
if (peer_connection_tracker_)
@@ -723,6 +847,11 @@ void RTCPeerConnectionHandler::OnAddStream(
this, remote_stream->webkit_stream(),
PeerConnectionTracker::SOURCE_REMOTE);
+ PerSessionWebRTCAPIMetrics::GetInstance()->IncrementStreamCounter();
+
+ track_metrics_.AddStream(MediaStreamTrackMetrics::RECEIVED_STREAM,
+ stream_interface);
+
client_->didAddRemoteStream(remote_stream->webkit_stream());
}
@@ -735,6 +864,10 @@ void RTCPeerConnectionHandler::OnRemoveStream(
return;
}
+ track_metrics_.RemoveStream(MediaStreamTrackMetrics::RECEIVED_STREAM,
+ stream_interface);
+ PerSessionWebRTCAPIMetrics::GetInstance()->DecrementStreamCounter();
+
scoped_ptr<RemoteMediaStreamImpl> remote_stream(it->second);
const blink::WebMediaStream& webkit_stream = remote_stream->webkit_stream();
DCHECK(!webkit_stream.isNull());
@@ -756,8 +889,8 @@ void RTCPeerConnectionHandler::OnIceCandidate(
return;
}
blink::WebRTCICECandidate web_candidate;
- web_candidate.initialize(UTF8ToUTF16(sdp),
- UTF8ToUTF16(candidate->sdp_mid()),
+ web_candidate.initialize(base::UTF8ToUTF16(sdp),
+ base::UTF8ToUTF16(candidate->sdp_mid()),
candidate->sdp_mline_index());
if (peer_connection_tracker_)
peer_connection_tracker_->TrackAddIceCandidate(
@@ -791,8 +924,8 @@ webrtc::SessionDescriptionInterface*
RTCPeerConnectionHandler::CreateNativeSessionDescription(
const blink::WebRTCSessionDescription& description,
webrtc::SdpParseError* error) {
- std::string sdp = UTF16ToUTF8(description.sdp());
- std::string type = UTF16ToUTF8(description.type());
+ std::string sdp = base::UTF16ToUTF8(description.sdp());
+ std::string type = base::UTF16ToUTF8(description.type());
webrtc::SessionDescriptionInterface* native_desc =
dependency_factory_->CreateSessionDescription(type, sdp, error);
diff --git a/chromium/content/renderer/media/rtc_peer_connection_handler.h b/chromium/content/renderer/media/rtc_peer_connection_handler.h
index f3de4b9e8f7..558ecbf631d 100644
--- a/chromium/content/renderer/media/rtc_peer_connection_handler.h
+++ b/chromium/content/renderer/media/rtc_peer_connection_handler.h
@@ -5,10 +5,14 @@
#ifndef CONTENT_RENDERER_MEDIA_RTC_PEER_CONNECTION_HANDLER_H_
#define CONTENT_RENDERER_MEDIA_RTC_PEER_CONNECTION_HANDLER_H_
+#include <map>
+#include <string>
+
#include "base/basictypes.h"
#include "base/compiler_specific.h"
+#include "base/memory/ref_counted.h"
#include "content/common/content_export.h"
-#include "content/renderer/media/peer_connection_handler_base.h"
+#include "content/renderer/media/webrtc/media_stream_track_metrics.h"
#include "third_party/WebKit/public/platform/WebRTCPeerConnectionHandler.h"
#include "third_party/WebKit/public/platform/WebRTCStatsRequest.h"
#include "third_party/WebKit/public/platform/WebRTCStatsResponse.h"
@@ -20,7 +24,10 @@ class WebRTCDataChannelHandler;
namespace content {
+class PeerConnectionDependencyFactory;
class PeerConnectionTracker;
+class RemoteMediaStreamImpl;
+class WebRtcMediaStreamAdapter;
// Mockable wrapper for blink::WebRTCStatsResponse
class CONTENT_EXPORT LocalRTCStatsResponse
@@ -73,14 +80,17 @@ class CONTENT_EXPORT LocalRTCStatsRequest
// Callbacks to the webrtc::PeerConnectionObserver implementation also occur on
// the main render thread.
class CONTENT_EXPORT RTCPeerConnectionHandler
- : public PeerConnectionHandlerBase,
- NON_EXPORTED_BASE(public blink::WebRTCPeerConnectionHandler) {
+ : NON_EXPORTED_BASE(public blink::WebRTCPeerConnectionHandler),
+ NON_EXPORTED_BASE(public webrtc::PeerConnectionObserver) {
public:
RTCPeerConnectionHandler(
blink::WebRTCPeerConnectionHandlerClient* client,
- MediaStreamDependencyFactory* dependency_factory);
+ PeerConnectionDependencyFactory* dependency_factory);
virtual ~RTCPeerConnectionHandler();
+ // Destroy all existing RTCPeerConnectionHandler objects.
+ static void DestructAllHandlers();
+
void associateWithFrame(blink::WebFrame* frame);
// Initialize method only used for unit test.
@@ -162,10 +172,16 @@ class CONTENT_EXPORT RTCPeerConnectionHandler
// Calls GetStats on |native_peer_connection_|.
void GetStats(webrtc::StatsObserver* observer,
- webrtc::MediaStreamTrackInterface* track);
+ webrtc::MediaStreamTrackInterface* track,
+ webrtc::PeerConnectionInterface::StatsOutputLevel level);
PeerConnectionTracker* peer_connection_tracker();
+ protected:
+ webrtc::PeerConnectionInterface* native_peer_connection() {
+ return native_peer_connection_.get();
+ }
+
private:
webrtc::SessionDescriptionInterface* CreateNativeSessionDescription(
const blink::WebRTCSessionDescription& description,
@@ -174,10 +190,29 @@ class CONTENT_EXPORT RTCPeerConnectionHandler
// |client_| is a weak pointer, and is valid until stop() has returned.
blink::WebRTCPeerConnectionHandlerClient* client_;
+ // |dependency_factory_| is a raw pointer, and is valid for the lifetime of
+ // RenderThreadImpl.
+ PeerConnectionDependencyFactory* dependency_factory_;
+
blink::WebFrame* frame_;
+ ScopedVector<WebRtcMediaStreamAdapter> local_streams_;
+
PeerConnectionTracker* peer_connection_tracker_;
+ MediaStreamTrackMetrics track_metrics_;
+
+ // Counter for a UMA stat reported at destruction time.
+ int num_data_channels_created_;
+
+ // |native_peer_connection_| is the libjingle native PeerConnection object.
+ scoped_refptr<webrtc::PeerConnectionInterface> native_peer_connection_;
+
+ typedef std::map<webrtc::MediaStreamInterface*,
+ content::RemoteMediaStreamImpl*> RemoteStreamMap;
+ RemoteStreamMap remote_streams_;
+ scoped_refptr<webrtc::UMAObserver> uma_observer_;
+
DISALLOW_COPY_AND_ASSIGN(RTCPeerConnectionHandler);
};
diff --git a/chromium/content/renderer/media/rtc_peer_connection_handler_unittest.cc b/chromium/content/renderer/media/rtc_peer_connection_handler_unittest.cc
index 18901ea7a7e..7ed95ffb2af 100644
--- a/chromium/content/renderer/media/rtc_peer_connection_handler_unittest.cc
+++ b/chromium/content/renderer/media/rtc_peer_connection_handler_unittest.cc
@@ -6,15 +6,22 @@
#include <vector>
#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
#include "base/strings/utf_string_conversions.h"
#include "base/values.h"
-#include "content/renderer/media/media_stream_extra_data.h"
-#include "content/renderer/media/mock_media_stream_dependency_factory.h"
+#include "content/child/child_process.h"
+#include "content/renderer/media/media_stream.h"
+#include "content/renderer/media/media_stream_audio_source.h"
+#include "content/renderer/media/media_stream_source.h"
+#include "content/renderer/media/media_stream_video_track.h"
+#include "content/renderer/media/mock_media_stream_video_source.h"
#include "content/renderer/media/mock_peer_connection_impl.h"
#include "content/renderer/media/mock_web_rtc_peer_connection_handler_client.h"
#include "content/renderer/media/peer_connection_tracker.h"
#include "content/renderer/media/rtc_media_constraints.h"
#include "content/renderer/media/rtc_peer_connection_handler.h"
+#include "content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h"
+#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
#include "content/renderer/media/webrtc_audio_capturer.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -180,24 +187,25 @@ class RTCPeerConnectionHandlerUnderTest : public RTCPeerConnectionHandler {
public:
RTCPeerConnectionHandlerUnderTest(
WebRTCPeerConnectionHandlerClient* client,
- MediaStreamDependencyFactory* dependency_factory)
+ PeerConnectionDependencyFactory* dependency_factory)
: RTCPeerConnectionHandler(client, dependency_factory) {
}
MockPeerConnectionImpl* native_peer_connection() {
- return static_cast<MockPeerConnectionImpl*>(native_peer_connection_.get());
+ return static_cast<MockPeerConnectionImpl*>(
+ RTCPeerConnectionHandler::native_peer_connection());
}
};
class RTCPeerConnectionHandlerTest : public ::testing::Test {
public:
RTCPeerConnectionHandlerTest() : mock_peer_connection_(NULL) {
+ child_process_.reset(new ChildProcess());
}
virtual void SetUp() {
mock_client_.reset(new NiceMock<MockWebRTCPeerConnectionHandlerClient>());
- mock_dependency_factory_.reset(new MockMediaStreamDependencyFactory());
- mock_dependency_factory_->EnsurePeerConnectionFactory();
+ mock_dependency_factory_.reset(new MockPeerConnectionDependencyFactory());
pc_handler_.reset(
new RTCPeerConnectionHandlerUnderTest(mock_client_.get(),
mock_dependency_factory_.get()));
@@ -221,49 +229,36 @@ class RTCPeerConnectionHandlerTest : public ::testing::Test {
audio_source.initialize(blink::WebString::fromUTF8(audio_track_label),
blink::WebMediaStreamSource::TypeAudio,
blink::WebString::fromUTF8("audio_track"));
+ audio_source.setExtraData(new MediaStreamAudioSource());
blink::WebMediaStreamSource video_source;
video_source.initialize(blink::WebString::fromUTF8(video_track_label),
blink::WebMediaStreamSource::TypeVideo,
blink::WebString::fromUTF8("video_track"));
+ MockMediaStreamVideoSource* native_video_source =
+ new MockMediaStreamVideoSource(false);
+ video_source.setExtraData(native_video_source);
blink::WebVector<blink::WebMediaStreamTrack> audio_tracks(
static_cast<size_t>(1));
audio_tracks[0].initialize(audio_source.id(), audio_source);
+ audio_tracks[0].setExtraData(
+ new MediaStreamTrack(
+ WebRtcLocalAudioTrackAdapter::Create(audio_track_label,
+ NULL),
+ true));
blink::WebVector<blink::WebMediaStreamTrack> video_tracks(
static_cast<size_t>(1));
- video_tracks[0].initialize(video_source.id(), video_source);
+ blink::WebMediaConstraints constraints;
+ constraints.initialize();
+ video_tracks[0] = MediaStreamVideoTrack::CreateVideoTrack(
+ native_video_source, constraints,
+ MediaStreamVideoSource::ConstraintsCallback(), true);
blink::WebMediaStream local_stream;
- local_stream.initialize(UTF8ToUTF16(stream_label), audio_tracks,
+ local_stream.initialize(base::UTF8ToUTF16(stream_label), audio_tracks,
video_tracks);
-
- scoped_refptr<webrtc::MediaStreamInterface> native_stream(
- mock_dependency_factory_->CreateLocalMediaStream(stream_label));
-
- local_stream.audioTracks(audio_tracks);
- const std::string audio_track_id = UTF16ToUTF8(audio_tracks[0].id());
- scoped_refptr<WebRtcAudioCapturer> capturer;
- RTCMediaConstraints audio_constraints(audio_source.constraints());
- scoped_refptr<webrtc::AudioTrackInterface> audio_track(
- mock_dependency_factory_->CreateLocalAudioTrack(
- audio_track_id, capturer, NULL, NULL,
- &audio_constraints));
- MediaStreamDependencyFactory::AddNativeTrackToBlinkTrack(
- audio_track.get(), audio_tracks[0], true);
- native_stream->AddTrack(audio_track.get());
-
- local_stream.videoTracks(video_tracks);
- const std::string video_track_id = UTF16ToUTF8(video_tracks[0].id());
- webrtc::VideoSourceInterface* source = NULL;
- scoped_refptr<webrtc::VideoTrackInterface> video_track(
- mock_dependency_factory_->CreateLocalVideoTrack(
- video_track_id, source));
- MediaStreamDependencyFactory::AddNativeTrackToBlinkTrack(
- video_track.get(), video_tracks[0], true);
- native_stream->AddTrack(video_track.get());
-
local_stream.setExtraData(
- new MediaStreamExtraData(native_stream.get(), true));
+ new MediaStream(local_stream));
return local_stream;
}
@@ -285,19 +280,17 @@ class RTCPeerConnectionHandlerTest : public ::testing::Test {
if (!audio_track_label.empty()) {
scoped_refptr<WebRtcAudioCapturer> capturer;
scoped_refptr<webrtc::AudioTrackInterface> audio_track(
- mock_dependency_factory_->CreateLocalAudioTrack(audio_track_label,
- capturer,
- NULL,
- NULL,
- NULL));
+ WebRtcLocalAudioTrackAdapter::Create(audio_track_label, NULL));
stream->AddTrack(audio_track.get());
}
mock_peer_connection_->AddRemoteStream(stream.get());
return stream;
}
+ base::MessageLoop message_loop_;
+ scoped_ptr<ChildProcess> child_process_;
scoped_ptr<MockWebRTCPeerConnectionHandlerClient> mock_client_;
- scoped_ptr<MockMediaStreamDependencyFactory> mock_dependency_factory_;
+ scoped_ptr<MockPeerConnectionDependencyFactory> mock_dependency_factory_;
scoped_ptr<NiceMock<MockPeerConnectionTracker> > mock_tracker_;
scoped_ptr<RTCPeerConnectionHandlerUnderTest> pc_handler_;
@@ -429,10 +422,42 @@ TEST_F(RTCPeerConnectionHandlerTest, addAndRemoveStream) {
EXPECT_EQ(1u,
mock_peer_connection_->local_streams()->at(0)->GetVideoTracks().size());
+ EXPECT_FALSE(pc_handler_->addStream(local_stream, constraints));
+
pc_handler_->removeStream(local_stream);
EXPECT_EQ(0u, mock_peer_connection_->local_streams()->count());
}
+TEST_F(RTCPeerConnectionHandlerTest, addStreamWithStoppedAudioAndVideoTrack) {
+ std::string stream_label = "local_stream";
+ blink::WebMediaStream local_stream(
+ CreateLocalMediaStream(stream_label));
+ blink::WebMediaConstraints constraints;
+
+ blink::WebVector<blink::WebMediaStreamTrack> audio_tracks;
+ local_stream.audioTracks(audio_tracks);
+ MediaStreamAudioSource* native_audio_source =
+ static_cast<MediaStreamAudioSource*>(
+ audio_tracks[0].source().extraData());
+ native_audio_source->StopSource();
+
+ blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
+ local_stream.videoTracks(video_tracks);
+ MediaStreamVideoSource* native_video_source =
+ static_cast<MediaStreamVideoSource*>(
+ video_tracks[0].source().extraData());
+ native_video_source->StopSource();
+
+ EXPECT_TRUE(pc_handler_->addStream(local_stream, constraints));
+ EXPECT_EQ(stream_label, mock_peer_connection_->stream_label());
+ EXPECT_EQ(
+ 1u,
+ mock_peer_connection_->local_streams()->at(0)->GetAudioTracks().size());
+ EXPECT_EQ(
+ 1u,
+ mock_peer_connection_->local_streams()->at(0)->GetVideoTracks().size());
+}
+
TEST_F(RTCPeerConnectionHandlerTest, GetStatsNoSelector) {
scoped_refptr<MockRTCStatsRequest> request(
new talk_base::RefCountedObject<MockRTCStatsRequest>());
@@ -653,20 +678,20 @@ TEST_F(RTCPeerConnectionHandlerTest, OnAddAndOnRemoveStream) {
EXPECT_CALL(*mock_tracker_.get(), TrackAddStream(
pc_handler_.get(),
testing::Property(&blink::WebMediaStream::id,
- UTF8ToUTF16(remote_stream_label)),
+ base::UTF8ToUTF16(remote_stream_label)),
PeerConnectionTracker::SOURCE_REMOTE));
EXPECT_CALL(*mock_client_.get(), didAddRemoteStream(
testing::Property(&blink::WebMediaStream::id,
- UTF8ToUTF16(remote_stream_label))));
+ base::UTF8ToUTF16(remote_stream_label))));
EXPECT_CALL(*mock_tracker_.get(), TrackRemoveStream(
pc_handler_.get(),
testing::Property(&blink::WebMediaStream::id,
- UTF8ToUTF16(remote_stream_label)),
+ base::UTF8ToUTF16(remote_stream_label)),
PeerConnectionTracker::SOURCE_REMOTE));
EXPECT_CALL(*mock_client_.get(), didRemoveRemoteStream(
testing::Property(&blink::WebMediaStream::id,
- UTF8ToUTF16(remote_stream_label))));
+ base::UTF8ToUTF16(remote_stream_label))));
pc_handler_->OnAddStream(remote_stream.get());
pc_handler_->OnRemoveStream(remote_stream.get());
@@ -681,7 +706,7 @@ TEST_F(RTCPeerConnectionHandlerTest, RemoteTrackState) {
testing::InSequence sequence;
EXPECT_CALL(*mock_client_.get(), didAddRemoteStream(
testing::Property(&blink::WebMediaStream::id,
- UTF8ToUTF16(remote_stream_label))));
+ base::UTF8ToUTF16(remote_stream_label))));
pc_handler_->OnAddStream(remote_stream.get());
const blink::WebMediaStream& webkit_stream = mock_client_->remote_stream();
@@ -713,21 +738,28 @@ TEST_F(RTCPeerConnectionHandlerTest, RemoveAndAddAudioTrackFromRemoteStream) {
EXPECT_CALL(*mock_client_.get(), didAddRemoteStream(
testing::Property(&blink::WebMediaStream::id,
- UTF8ToUTF16(remote_stream_label))));
+ base::UTF8ToUTF16(remote_stream_label))));
pc_handler_->OnAddStream(remote_stream.get());
const blink::WebMediaStream& webkit_stream = mock_client_->remote_stream();
- blink::WebVector<blink::WebMediaStreamTrack> audio_tracks;
- webkit_stream.audioTracks(audio_tracks);
- EXPECT_EQ(1u, audio_tracks.size());
+ {
+ // Test in a small scope so that |audio_tracks| don't hold on to destroyed
+ // source later.
+ blink::WebVector<blink::WebMediaStreamTrack> audio_tracks;
+ webkit_stream.audioTracks(audio_tracks);
+ EXPECT_EQ(1u, audio_tracks.size());
+ }
// Remove the Webrtc audio track from the Webrtc MediaStream.
scoped_refptr<webrtc::AudioTrackInterface> webrtc_track =
remote_stream->GetAudioTracks()[0].get();
remote_stream->RemoveTrack(webrtc_track.get());
- blink::WebVector<blink::WebMediaStreamTrack> modified_audio_tracks1;
- webkit_stream.audioTracks(modified_audio_tracks1);
- EXPECT_EQ(0u, modified_audio_tracks1.size());
+
+ {
+ blink::WebVector<blink::WebMediaStreamTrack> modified_audio_tracks1;
+ webkit_stream.audioTracks(modified_audio_tracks1);
+ EXPECT_EQ(0u, modified_audio_tracks1.size());
+ }
// Add the WebRtc audio track again.
remote_stream->AddTrack(webrtc_track.get());
@@ -743,21 +775,27 @@ TEST_F(RTCPeerConnectionHandlerTest, RemoveAndAddVideoTrackFromRemoteStream) {
EXPECT_CALL(*mock_client_.get(), didAddRemoteStream(
testing::Property(&blink::WebMediaStream::id,
- UTF8ToUTF16(remote_stream_label))));
+ base::UTF8ToUTF16(remote_stream_label))));
pc_handler_->OnAddStream(remote_stream.get());
const blink::WebMediaStream& webkit_stream = mock_client_->remote_stream();
- blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
- webkit_stream.videoTracks(video_tracks);
- EXPECT_EQ(1u, video_tracks.size());
+ {
+ // Test in a small scope so that |video_tracks| don't hold on to destroyed
+ // source later.
+ blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
+ webkit_stream.videoTracks(video_tracks);
+ EXPECT_EQ(1u, video_tracks.size());
+ }
// Remove the Webrtc video track from the Webrtc MediaStream.
scoped_refptr<webrtc::VideoTrackInterface> webrtc_track =
remote_stream->GetVideoTracks()[0].get();
remote_stream->RemoveTrack(webrtc_track.get());
- blink::WebVector<blink::WebMediaStreamTrack> modified_video_tracks1;
- webkit_stream.videoTracks(modified_video_tracks1);
- EXPECT_EQ(0u, modified_video_tracks1.size());
+ {
+ blink::WebVector<blink::WebMediaStreamTrack> modified_video_tracks1;
+ webkit_stream.videoTracks(modified_video_tracks1);
+ EXPECT_EQ(0u, modified_video_tracks1.size());
+ }
// Add the WebRtc video track again.
remote_stream->AddTrack(webrtc_track.get());
diff --git a/chromium/content/renderer/media/rtc_video_capture_delegate.cc b/chromium/content/renderer/media/rtc_video_capture_delegate.cc
deleted file mode 100644
index 4cc2b59193a..00000000000
--- a/chromium/content/renderer/media/rtc_video_capture_delegate.cc
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/renderer/media/rtc_video_capture_delegate.h"
-
-#include "base/bind.h"
-#include "media/base/video_frame.h"
-
-namespace content {
-
-RtcVideoCaptureDelegate::RtcVideoCaptureDelegate(
- const media::VideoCaptureSessionId id,
- VideoCaptureImplManager* vc_manager)
- : session_id_(id),
- vc_manager_(vc_manager),
- capture_engine_(NULL),
- got_first_frame_(false),
- error_occured_(false) {
- DVLOG(3) << " RtcVideoCaptureDelegate::ctor";
- capture_engine_ = vc_manager_->AddDevice(session_id_, this);
-}
-
-RtcVideoCaptureDelegate::~RtcVideoCaptureDelegate() {
- DVLOG(3) << " RtcVideoCaptureDelegate::dtor";
- vc_manager_->RemoveDevice(session_id_, this);
-}
-
-void RtcVideoCaptureDelegate::StartCapture(
- const media::VideoCaptureParams& params,
- const FrameCapturedCallback& captured_callback,
- const StateChangeCallback& state_callback) {
- DVLOG(3) << " RtcVideoCaptureDelegate::StartCapture ";
- message_loop_proxy_ = base::MessageLoopProxy::current();
- captured_callback_ = captured_callback;
- state_callback_ = state_callback;
- got_first_frame_ = false;
- error_occured_ = false;
-
- // Increase the reference count to ensure we are not deleted until
- // The we are unregistered in RtcVideoCaptureDelegate::OnRemoved.
- AddRef();
- capture_engine_->StartCapture(this, params);
-}
-
-void RtcVideoCaptureDelegate::StopCapture() {
- // Immediately make sure we don't provide more frames.
- captured_callback_.Reset();
- state_callback_.Reset();
- capture_engine_->StopCapture(this);
-}
-
-void RtcVideoCaptureDelegate::OnStarted(media::VideoCapture* capture) {
- DVLOG(3) << " RtcVideoCaptureDelegate::OnStarted";
-}
-
-void RtcVideoCaptureDelegate::OnStopped(media::VideoCapture* capture) {
-}
-
-void RtcVideoCaptureDelegate::OnPaused(media::VideoCapture* capture) {
-}
-
-void RtcVideoCaptureDelegate::OnError(media::VideoCapture* capture,
- int error_code) {
- DVLOG(3) << " RtcVideoCaptureDelegate::OnError";
- message_loop_proxy_->PostTask(
- FROM_HERE,
- base::Bind(&RtcVideoCaptureDelegate::OnErrorOnCaptureThread,
- this, capture));
-}
-
-void RtcVideoCaptureDelegate::OnRemoved(media::VideoCapture* capture) {
- DVLOG(3) << " RtcVideoCaptureDelegate::OnRemoved";
- message_loop_proxy_->PostTask(
- FROM_HERE,
- base::Bind(&RtcVideoCaptureDelegate::OnRemovedOnCaptureThread,
- this, capture));
-
- // Balance the AddRef in StartCapture.
- // This means we are no longer registered as an event handler and can safely
- // be deleted.
- Release();
-}
-
-void RtcVideoCaptureDelegate::OnFrameReady(
- media::VideoCapture* capture,
- const scoped_refptr<media::VideoFrame>& frame) {
- message_loop_proxy_->PostTask(
- FROM_HERE,
- base::Bind(&RtcVideoCaptureDelegate::OnFrameReadyOnCaptureThread,
- this,
- capture,
- frame));
-}
-
-void RtcVideoCaptureDelegate::OnFrameReadyOnCaptureThread(
- media::VideoCapture* capture,
- const scoped_refptr<media::VideoFrame>& frame) {
- if (!captured_callback_.is_null()) {
- if (!got_first_frame_) {
- got_first_frame_ = true;
- if (!state_callback_.is_null())
- state_callback_.Run(CAPTURE_RUNNING);
- }
-
- captured_callback_.Run(frame);
- }
-}
-
-void RtcVideoCaptureDelegate::OnErrorOnCaptureThread(
- media::VideoCapture* capture) {
- error_occured_ = true;
- if (!state_callback_.is_null())
- state_callback_.Run(CAPTURE_FAILED);
-}
-
-
-void RtcVideoCaptureDelegate::OnRemovedOnCaptureThread(
- media::VideoCapture* capture) {
- if (!error_occured_ && !state_callback_.is_null())
- state_callback_.Run(CAPTURE_STOPPED);
-}
-
-} // namespace content
diff --git a/chromium/content/renderer/media/rtc_video_capture_delegate.h b/chromium/content/renderer/media/rtc_video_capture_delegate.h
deleted file mode 100644
index f081c3731b0..00000000000
--- a/chromium/content/renderer/media/rtc_video_capture_delegate.h
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_RTC_VIDEO_CAPTURE_DELEGATE_H_
-#define CONTENT_RENDERER_MEDIA_RTC_VIDEO_CAPTURE_DELEGATE_H_
-
-#include "base/callback.h"
-#include "base/message_loop/message_loop_proxy.h"
-#include "content/common/media/video_capture.h"
-#include "content/renderer/media/video_capture_impl_manager.h"
-#include "media/video/capture/video_capture.h"
-
-namespace content {
-
-// Implements a simple reference counted video capturer that guarantees that
-// methods in RtcVideoCaptureDelegateEventHandler is only called from when
-// StartCapture have been called until after StopCapture have been called.
-// It uses VideoCaptureImplManager to start / stop and receive I420 frames
-// from Chrome's video capture implementation.
-class RtcVideoCaptureDelegate
- : public base::RefCountedThreadSafe<RtcVideoCaptureDelegate>,
- public media::VideoCapture::EventHandler {
- public:
- enum CaptureState {
- CAPTURE_STOPPED, // The capturer has been stopped or hasn't started yet.
- CAPTURE_RUNNING, // The capturer has been started successfully and is now
- // capturing.
- CAPTURE_FAILED, // The capturer failed to start.
- };
-
- typedef base::Callback<void(const scoped_refptr<media::VideoFrame>&)>
- FrameCapturedCallback;
- typedef base::Callback<void(CaptureState)> StateChangeCallback;
-
- RtcVideoCaptureDelegate(const media::VideoCaptureSessionId id,
- VideoCaptureImplManager* vc_manager);
-
- void StartCapture(const media::VideoCaptureParams& params,
- const FrameCapturedCallback& captured_callback,
- const StateChangeCallback& state_callback);
- void StopCapture();
-
- // media::VideoCapture::EventHandler implementation.
- // These functions are called from a thread owned by |vc_manager_|.
- virtual void OnStarted(media::VideoCapture* capture) OVERRIDE;
- virtual void OnStopped(media::VideoCapture* capture) OVERRIDE;
- virtual void OnPaused(media::VideoCapture* capture) OVERRIDE;
- virtual void OnError(media::VideoCapture* capture, int error_code) OVERRIDE;
- virtual void OnRemoved(media::VideoCapture* capture) OVERRIDE;
- virtual void OnFrameReady(
- media::VideoCapture* capture,
- const scoped_refptr<media::VideoFrame>& frame) OVERRIDE;
-
- private:
- friend class base::RefCountedThreadSafe<RtcVideoCaptureDelegate>;
-
- virtual ~RtcVideoCaptureDelegate();
-
- void OnFrameReadyOnCaptureThread(
- media::VideoCapture* capture,
- const scoped_refptr<media::VideoFrame>& frame);
- void OnErrorOnCaptureThread(media::VideoCapture* capture);
- void OnRemovedOnCaptureThread(media::VideoCapture* capture);
-
- // The id identifies which video capture device is used for this video
- // capture session.
- media::VideoCaptureSessionId session_id_;
- // The video capture manager handles open/close of video capture devices.
- scoped_refptr<VideoCaptureImplManager> vc_manager_;
- media::VideoCapture* capture_engine_;
-
- // Accessed on the thread where StartCapture is called.
- bool got_first_frame_;
- bool error_occured_;
-
- // |captured_callback_| is provided to this class in StartCapture and must be
- // valid until StopCapture is called.
- FrameCapturedCallback captured_callback_;
- // |state_callback_| is provided to this class in StartCapture and must be
- // valid until StopCapture is called.
- StateChangeCallback state_callback_;
- // Message loop of the caller of StartCapture.
- scoped_refptr<base::MessageLoopProxy> message_loop_proxy_;
-};
-
-} // namespace content
-
-#endif // CONTENT_RENDERER_MEDIA_RTC_VIDEO_CAPTURE_DELEGATE_H_
diff --git a/chromium/content/renderer/media/rtc_video_capturer.cc b/chromium/content/renderer/media/rtc_video_capturer.cc
deleted file mode 100644
index 0a7a82cf588..00000000000
--- a/chromium/content/renderer/media/rtc_video_capturer.cc
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/renderer/media/rtc_video_capturer.h"
-
-#include "base/bind.h"
-#include "base/debug/trace_event.h"
-#include "media/base/video_frame.h"
-
-namespace content {
-
-RtcVideoCapturer::RtcVideoCapturer(const media::VideoCaptureSessionId id,
- VideoCaptureImplManager* vc_manager,
- bool is_screencast)
- : is_screencast_(is_screencast),
- delegate_(new RtcVideoCaptureDelegate(id, vc_manager)),
- state_(VIDEO_CAPTURE_STATE_STOPPED) {}
-
-RtcVideoCapturer::~RtcVideoCapturer() {
- DCHECK(VIDEO_CAPTURE_STATE_STOPPED);
- DVLOG(3) << " RtcVideoCapturer::dtor";
-}
-
-cricket::CaptureState RtcVideoCapturer::Start(
- const cricket::VideoFormat& capture_format) {
- DVLOG(3) << " RtcVideoCapturer::Start ";
- if (state_ == VIDEO_CAPTURE_STATE_STARTED) {
- DVLOG(1) << "Got a StartCapture when already started!!! ";
- return cricket::CS_FAILED;
- }
-
- media::VideoCaptureParams request;
- request.requested_format = media::VideoCaptureFormat(
- gfx::Size(capture_format.width, capture_format.height),
- capture_format.framerate(),
- media::PIXEL_FORMAT_I420);
-
- SetCaptureFormat(&capture_format);
-
- state_ = VIDEO_CAPTURE_STATE_STARTED;
- first_frame_timestamp_ = media::kNoTimestamp();
- delegate_->StartCapture(
- request,
- base::Bind(&RtcVideoCapturer::OnFrameCaptured, base::Unretained(this)),
- base::Bind(&RtcVideoCapturer::OnStateChange, base::Unretained(this)));
- // Update the desired aspect ratio so that later the video frame can be
- // cropped to meet the requirement if the camera returns a different
- // resolution than the |request|.
- UpdateAspectRatio(capture_format.width, capture_format.height);
- return cricket::CS_STARTING;
-}
-
-void RtcVideoCapturer::Stop() {
- DVLOG(3) << " RtcVideoCapturer::Stop ";
- if (state_ == VIDEO_CAPTURE_STATE_STOPPED) {
- DVLOG(1) << "Got a StopCapture while not started.";
- return;
- }
-
- SetCaptureFormat(NULL);
- state_ = VIDEO_CAPTURE_STATE_STOPPED;
- delegate_->StopCapture();
- SignalStateChange(this, cricket::CS_STOPPED);
-}
-
-bool RtcVideoCapturer::IsRunning() {
- return state_ == VIDEO_CAPTURE_STATE_STARTED;
-}
-
-bool RtcVideoCapturer::GetPreferredFourccs(std::vector<uint32>* fourccs) {
- if (!fourccs)
- return false;
- fourccs->push_back(cricket::FOURCC_I420);
- return true;
-}
-
-bool RtcVideoCapturer::IsScreencast() const {
- return is_screencast_;
-}
-
-bool RtcVideoCapturer::GetBestCaptureFormat(const cricket::VideoFormat& desired,
- cricket::VideoFormat* best_format) {
- if (!best_format) {
- return false;
- }
-
- // Chrome does not support capability enumeration.
- // Use the desired format as the best format.
- best_format->width = desired.width;
- best_format->height = desired.height;
- best_format->fourcc = cricket::FOURCC_I420;
- best_format->interval = desired.interval;
- return true;
-}
-
-void RtcVideoCapturer::OnFrameCaptured(
- const scoped_refptr<media::VideoFrame>& frame) {
- if (first_frame_timestamp_ == media::kNoTimestamp())
- first_frame_timestamp_ = frame->GetTimestamp();
-
- // Currently, |fourcc| is always I420.
- cricket::CapturedFrame captured_frame;
- captured_frame.width = frame->coded_size().width();
- captured_frame.height = frame->coded_size().height();
- captured_frame.fourcc = cricket::FOURCC_I420;
- // cricket::CapturedFrame time is in nanoseconds.
- captured_frame.elapsed_time =
- (frame->GetTimestamp() - first_frame_timestamp_).InMicroseconds() *
- base::Time::kNanosecondsPerMicrosecond;
- captured_frame.time_stamp = frame->GetTimestamp().InMicroseconds() *
- base::Time::kNanosecondsPerMicrosecond;
- // TODO(sheu): we assume contiguous layout of image planes.
- captured_frame.data = frame->data(0);
- captured_frame.data_size =
- media::VideoFrame::AllocationSize(frame->format(), frame->coded_size());
- captured_frame.pixel_height = 1;
- captured_frame.pixel_width = 1;
-
- TRACE_EVENT_INSTANT2(
- "rtc_video_capturer",
- "OnFrameCaptured",
- TRACE_EVENT_SCOPE_THREAD,
- "elapsed time",
- captured_frame.elapsed_time,
- "timestamp_ms",
- captured_frame.time_stamp / talk_base::kNumNanosecsPerMillisec);
-
- // This signals to libJingle that a new VideoFrame is available.
- // libJingle have no assumptions on what thread this signal come from.
- SignalFrameCaptured(this, &captured_frame);
-}
-
-void RtcVideoCapturer::OnStateChange(
- RtcVideoCaptureDelegate::CaptureState state) {
- cricket::CaptureState converted_state = cricket::CS_FAILED;
- DVLOG(3) << " RtcVideoCapturer::OnStateChange " << state;
- switch (state) {
- case RtcVideoCaptureDelegate::CAPTURE_STOPPED:
- converted_state = cricket::CS_STOPPED;
- break;
- case RtcVideoCaptureDelegate::CAPTURE_RUNNING:
- converted_state = cricket::CS_RUNNING;
- break;
- case RtcVideoCaptureDelegate::CAPTURE_FAILED:
- // TODO(perkj): Update the comments in the the definition of
- // cricket::CS_FAILED. According to the comments, cricket::CS_FAILED
- // means that the capturer failed to start. But here and in libjingle it
- // is also used if an error occur during capturing.
- converted_state = cricket::CS_FAILED;
- break;
- default:
- NOTREACHED();
- break;
- }
- SignalStateChange(this, converted_state);
-}
-
-} // namespace content
diff --git a/chromium/content/renderer/media/rtc_video_capturer.h b/chromium/content/renderer/media/rtc_video_capturer.h
deleted file mode 100644
index 006efdf647f..00000000000
--- a/chromium/content/renderer/media/rtc_video_capturer.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_RTC_VIDEO_CAPTURER_H_
-#define CONTENT_RENDERER_MEDIA_RTC_VIDEO_CAPTURER_H_
-
-#include <vector>
-
-#include "base/compiler_specific.h"
-#include "content/renderer/media/rtc_video_capture_delegate.h"
-#include "third_party/libjingle/source/talk/media/base/videocapturer.h"
-
-namespace content {
-class VideoCaptureImplManager;
-
-// RtcVideoCapturer implements a simple cricket::VideoCapturer that is used for
-// VideoCapturing in libJingle and especially in PeerConnections.
-// The class is created and destroyed on the main render thread.
-// PeerConnection access cricket::VideoCapturer from a libJingle worker thread.
-// The video frames are delivered in OnFrameCaptured on a thread owned by
-// Chrome's video capture implementation.
-class RtcVideoCapturer
- : public cricket::VideoCapturer {
- public:
- RtcVideoCapturer(const media::VideoCaptureSessionId id,
- VideoCaptureImplManager* vc_manager,
- bool is_screencast);
- virtual ~RtcVideoCapturer();
-
- // cricket::VideoCapturer implementation.
- // These methods are accessed from a libJingle worker thread.
- virtual cricket::CaptureState Start(
- const cricket::VideoFormat& capture_format) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual bool IsRunning() OVERRIDE;
- virtual bool GetPreferredFourccs(std::vector<uint32>* fourccs) OVERRIDE;
- virtual bool GetBestCaptureFormat(const cricket::VideoFormat& desired,
- cricket::VideoFormat* best_format) OVERRIDE;
- virtual bool IsScreencast() const OVERRIDE;
-
- private:
- // Frame captured callback method.
- virtual void OnFrameCaptured(const scoped_refptr<media::VideoFrame>& frame);
-
- // State change callback, must be called on same thread as Start is called.
- void OnStateChange(RtcVideoCaptureDelegate::CaptureState state);
-
- const bool is_screencast_;
- scoped_refptr<RtcVideoCaptureDelegate> delegate_;
- VideoCaptureState state_;
- base::TimeDelta first_frame_timestamp_;
-
- DISALLOW_COPY_AND_ASSIGN(RtcVideoCapturer);
-};
-
-} // namespace content
-
-#endif // CONTENT_RENDERER_MEDIA_RTC_VIDEO_CAPTURER_H_
diff --git a/chromium/content/renderer/media/rtc_video_decoder.cc b/chromium/content/renderer/media/rtc_video_decoder.cc
index 1c8d6008bca..3987767b353 100644
--- a/chromium/content/renderer/media/rtc_video_decoder.cc
+++ b/chromium/content/renderer/media/rtc_video_decoder.cc
@@ -9,13 +9,16 @@
#include "base/memory/ref_counted.h"
#include "base/message_loop/message_loop_proxy.h"
#include "base/metrics/histogram.h"
-#include "base/safe_numerics.h"
+#include "base/numerics/safe_conversions.h"
#include "base/stl_util.h"
+#include "base/synchronization/waitable_event.h"
#include "base/task_runner_util.h"
#include "content/child/child_thread.h"
#include "content/renderer/media/native_handle_impl.h"
-#include "media/base/bind_to_loop.h"
+#include "gpu/command_buffer/common/mailbox_holder.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/filters/gpu_video_accelerator_factories.h"
+#include "third_party/skia/include/core/SkBitmap.h"
#include "third_party/webrtc/common_video/interface/texture_video_frame.h"
#include "third_party/webrtc/system_wrappers/interface/ref_count.h"
@@ -74,7 +77,6 @@ RTCVideoDecoder::BufferData::~BufferData() {}
RTCVideoDecoder::RTCVideoDecoder(
const scoped_refptr<media::GpuVideoAcceleratorFactories>& factories)
: factories_(factories),
- vda_loop_proxy_(factories->GetMessageLoop()),
decoder_texture_target_(0),
next_picture_buffer_id_(0),
state_(UNINITIALIZED),
@@ -83,31 +85,13 @@ RTCVideoDecoder::RTCVideoDecoder(
next_bitstream_buffer_id_(0),
reset_bitstream_buffer_id_(ID_INVALID),
weak_factory_(this) {
- DCHECK(!vda_loop_proxy_->BelongsToCurrentThread());
-
- weak_this_ = weak_factory_.GetWeakPtr();
-
- base::WaitableEvent message_loop_async_waiter(false, false);
- // Waiting here is safe. The media thread is stopped in the child thread and
- // the child thread is blocked when VideoDecoderFactory::CreateVideoDecoder
- // runs.
- vda_loop_proxy_->PostTask(FROM_HERE,
- base::Bind(&RTCVideoDecoder::Initialize,
- base::Unretained(this),
- &message_loop_async_waiter));
- message_loop_async_waiter.Wait();
+ DCHECK(!factories_->GetTaskRunner()->BelongsToCurrentThread());
}
RTCVideoDecoder::~RTCVideoDecoder() {
DVLOG(2) << "~RTCVideoDecoder";
- // Destroy VDA and remove |this| from the observer if this is vda thread.
- if (vda_loop_proxy_->BelongsToCurrentThread()) {
- base::MessageLoop::current()->RemoveDestructionObserver(this);
- DestroyVDA();
- } else {
- // VDA should have been destroyed in WillDestroyCurrentMessageLoop.
- DCHECK(!vda_);
- }
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
+ DestroyVDA();
// Delete all shared memories.
STLDeleteElements(&available_shm_segments_);
@@ -125,6 +109,7 @@ RTCVideoDecoder::~RTCVideoDecoder() {
}
}
+// static
scoped_ptr<RTCVideoDecoder> RTCVideoDecoder::Create(
webrtc::VideoCodecType type,
const scoped_refptr<media::GpuVideoAcceleratorFactories>& factories) {
@@ -140,14 +125,20 @@ scoped_ptr<RTCVideoDecoder> RTCVideoDecoder::Create(
return decoder.Pass();
}
+ base::WaitableEvent waiter(true, false);
decoder.reset(new RTCVideoDecoder(factories));
- decoder->vda_ =
- factories->CreateVideoDecodeAccelerator(profile, decoder.get()).Pass();
+ decoder->factories_->GetTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&RTCVideoDecoder::CreateVDA,
+ base::Unretained(decoder.get()),
+ profile,
+ &waiter));
+ waiter.Wait();
// vda can be NULL if VP8 is not supported.
if (decoder->vda_ != NULL) {
decoder->state_ = INITIALIZED;
} else {
- factories->GetMessageLoop()->DeleteSoon(FROM_HERE, decoder.release());
+ factories->GetTaskRunner()->DeleteSoon(FROM_HERE, decoder.release());
}
return decoder.Pass();
}
@@ -168,11 +159,12 @@ int32_t RTCVideoDecoder::InitDecode(const webrtc::VideoCodec* codecSettings,
}
// Create some shared memory if the queue is empty.
if (available_shm_segments_.size() == 0) {
- vda_loop_proxy_->PostTask(FROM_HERE,
- base::Bind(&RTCVideoDecoder::CreateSHM,
- weak_this_,
- kMaxInFlightDecodes,
- kSharedMemorySegmentBytes));
+ factories_->GetTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&RTCVideoDecoder::CreateSHM,
+ weak_factory_.GetWeakPtr(),
+ kMaxInFlightDecodes,
+ kSharedMemorySegmentBytes));
}
return RecordInitDecodeUMA(WEBRTC_VIDEO_CODEC_OK);
}
@@ -258,8 +250,10 @@ int32_t RTCVideoDecoder::Decode(
}
SaveToDecodeBuffers_Locked(inputImage, shm_buffer.Pass(), buffer_data);
- vda_loop_proxy_->PostTask(
- FROM_HERE, base::Bind(&RTCVideoDecoder::RequestBufferDecode, weak_this_));
+ factories_->GetTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&RTCVideoDecoder::RequestBufferDecode,
+ weak_factory_.GetWeakPtr()));
return WEBRTC_VIDEO_CODEC_OK;
}
@@ -292,21 +286,18 @@ int32_t RTCVideoDecoder::Reset() {
// If VDA is already resetting, no need to request the reset again.
if (state_ != RESETTING) {
state_ = RESETTING;
- vda_loop_proxy_->PostTask(
- FROM_HERE, base::Bind(&RTCVideoDecoder::ResetInternal, weak_this_));
+ factories_->GetTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&RTCVideoDecoder::ResetInternal,
+ weak_factory_.GetWeakPtr()));
}
return WEBRTC_VIDEO_CODEC_OK;
}
-void RTCVideoDecoder::NotifyInitializeDone() {
- DVLOG(2) << "NotifyInitializeDone";
- NOTREACHED();
-}
-
void RTCVideoDecoder::ProvidePictureBuffers(uint32 count,
const gfx::Size& size,
uint32 texture_target) {
- DCHECK(vda_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
DVLOG(3) << "ProvidePictureBuffers. texture_target=" << texture_target;
if (!vda_)
@@ -315,8 +306,6 @@ void RTCVideoDecoder::ProvidePictureBuffers(uint32 count,
std::vector<uint32> texture_ids;
std::vector<gpu::Mailbox> texture_mailboxes;
decoder_texture_target_ = texture_target;
- // Discards the sync point returned here since PictureReady will imply that
- // the produce has already happened, and the texture is ready for use.
if (!factories_->CreateTextures(count,
size,
&texture_ids,
@@ -341,7 +330,7 @@ void RTCVideoDecoder::ProvidePictureBuffers(uint32 count,
void RTCVideoDecoder::DismissPictureBuffer(int32 id) {
DVLOG(3) << "DismissPictureBuffer. id=" << id;
- DCHECK(vda_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
std::map<int32, media::PictureBuffer>::iterator it =
assigned_picture_buffers_.find(id);
@@ -353,23 +342,18 @@ void RTCVideoDecoder::DismissPictureBuffer(int32 id) {
media::PictureBuffer buffer_to_dismiss = it->second;
assigned_picture_buffers_.erase(it);
- std::set<int32>::iterator at_display_it =
- picture_buffers_at_display_.find(id);
-
- if (at_display_it == picture_buffers_at_display_.end()) {
+ if (!picture_buffers_at_display_.count(id)) {
// We can delete the texture immediately as it's not being displayed.
factories_->DeleteTexture(buffer_to_dismiss.texture_id());
- } else {
- // Texture in display. Postpone deletion until after it's returned to us.
- bool inserted = dismissed_picture_buffers_
- .insert(std::make_pair(id, buffer_to_dismiss)).second;
- DCHECK(inserted);
+ return;
}
+ // Not destroying a texture in display in |picture_buffers_at_display_|.
+ // Postpone deletion until after it's returned to us.
}
void RTCVideoDecoder::PictureReady(const media::Picture& picture) {
DVLOG(3) << "PictureReady";
- DCHECK(vda_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
std::map<int32, media::PictureBuffer>::iterator it =
assigned_picture_buffers_.find(picture.picture_buffer_id());
@@ -388,7 +372,9 @@ void RTCVideoDecoder::PictureReady(const media::Picture& picture) {
scoped_refptr<media::VideoFrame> frame =
CreateVideoFrame(picture, pb, timestamp, width, height, size);
bool inserted =
- picture_buffers_at_display_.insert(picture.picture_buffer_id()).second;
+ picture_buffers_at_display_.insert(std::make_pair(
+ picture.picture_buffer_id(),
+ pb.texture_id())).second;
DCHECK(inserted);
// Create a WebRTC video frame.
@@ -407,6 +393,33 @@ void RTCVideoDecoder::PictureReady(const media::Picture& picture) {
}
}
+static void ReadPixelsSyncInner(
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& factories,
+ uint32 texture_id,
+ const gfx::Rect& visible_rect,
+ const SkBitmap& pixels,
+ base::WaitableEvent* event) {
+ factories->ReadPixels(texture_id, visible_rect, pixels);
+ event->Signal();
+}
+
+static void ReadPixelsSync(
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& factories,
+ uint32 texture_id,
+ const gfx::Rect& visible_rect,
+ const SkBitmap& pixels) {
+ base::WaitableEvent event(true, false);
+ if (!factories->GetTaskRunner()->PostTask(FROM_HERE,
+ base::Bind(&ReadPixelsSyncInner,
+ factories,
+ texture_id,
+ visible_rect,
+ pixels,
+ &event)))
+ return;
+ event.Wait();
+}
+
scoped_refptr<media::VideoFrame> RTCVideoDecoder::CreateVideoFrame(
const media::Picture& picture,
const media::PictureBuffer& pb,
@@ -415,34 +428,28 @@ scoped_refptr<media::VideoFrame> RTCVideoDecoder::CreateVideoFrame(
uint32_t height,
size_t size) {
gfx::Rect visible_rect(width, height);
- gfx::Size natural_size(width, height);
DCHECK(decoder_texture_target_);
// Convert timestamp from 90KHz to ms.
base::TimeDelta timestamp_ms = base::TimeDelta::FromInternalValue(
- base::checked_numeric_cast<uint64_t>(timestamp) * 1000 / 90);
+ base::checked_cast<uint64_t>(timestamp) * 1000 / 90);
return media::VideoFrame::WrapNativeTexture(
- make_scoped_ptr(new media::VideoFrame::MailboxHolder(
- pb.texture_mailbox(),
- 0, // sync_point
- media::BindToCurrentLoop(
- base::Bind(&RTCVideoDecoder::ReusePictureBuffer,
- weak_this_,
- picture.picture_buffer_id())))),
- decoder_texture_target_,
+ make_scoped_ptr(new gpu::MailboxHolder(
+ pb.texture_mailbox(), decoder_texture_target_, 0)),
+ media::BindToCurrentLoop(base::Bind(&RTCVideoDecoder::ReleaseMailbox,
+ weak_factory_.GetWeakPtr(),
+ factories_,
+ picture.picture_buffer_id(),
+ pb.texture_id())),
pb.size(),
visible_rect,
- natural_size,
+ visible_rect.size(),
timestamp_ms,
- base::Bind(&media::GpuVideoAcceleratorFactories::ReadPixels,
- factories_,
- pb.texture_id(),
- natural_size),
- base::Closure());
+ base::Bind(&ReadPixelsSync, factories_, pb.texture_id(), visible_rect));
}
void RTCVideoDecoder::NotifyEndOfBitstreamBuffer(int32 id) {
DVLOG(3) << "NotifyEndOfBitstreamBuffer. id=" << id;
- DCHECK(vda_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
std::map<int32, SHMBuffer*>::iterator it =
bitstream_buffers_in_decoder_.find(id);
@@ -467,7 +474,7 @@ void RTCVideoDecoder::NotifyFlushDone() {
}
void RTCVideoDecoder::NotifyResetDone() {
- DCHECK(vda_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
DVLOG(3) << "NotifyResetDone";
if (!vda_)
@@ -483,7 +490,7 @@ void RTCVideoDecoder::NotifyResetDone() {
}
void RTCVideoDecoder::NotifyError(media::VideoDecodeAccelerator::Error error) {
- DCHECK(vda_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
if (!vda_)
return;
@@ -497,23 +504,8 @@ void RTCVideoDecoder::NotifyError(media::VideoDecodeAccelerator::Error error) {
state_ = DECODE_ERROR;
}
-void RTCVideoDecoder::WillDestroyCurrentMessageLoop() {
- DVLOG(2) << "WillDestroyCurrentMessageLoop";
- DCHECK(vda_loop_proxy_->BelongsToCurrentThread());
- factories_->Abort();
- weak_factory_.InvalidateWeakPtrs();
- DestroyVDA();
-}
-
-void RTCVideoDecoder::Initialize(base::WaitableEvent* waiter) {
- DVLOG(2) << "Initialize";
- DCHECK(vda_loop_proxy_->BelongsToCurrentThread());
- base::MessageLoop::current()->AddDestructionObserver(this);
- waiter->Signal();
-}
-
void RTCVideoDecoder::RequestBufferDecode() {
- DCHECK(vda_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
if (!vda_)
return;
@@ -635,64 +627,88 @@ void RTCVideoDecoder::MovePendingBuffersToDecodeBuffers() {
}
void RTCVideoDecoder::ResetInternal() {
- DCHECK(vda_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
DVLOG(2) << "ResetInternal";
if (vda_)
vda_->Reset();
}
-void RTCVideoDecoder::ReusePictureBuffer(int64 picture_buffer_id,
- uint32 sync_point) {
- DCHECK(vda_loop_proxy_->BelongsToCurrentThread());
- DVLOG(3) << "ReusePictureBuffer. id=" << picture_buffer_id;
+// static
+void RTCVideoDecoder::ReleaseMailbox(
+ base::WeakPtr<RTCVideoDecoder> decoder,
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& factories,
+ int64 picture_buffer_id,
+ uint32 texture_id,
+ const std::vector<uint32>& release_sync_points) {
+ DCHECK(factories->GetTaskRunner()->BelongsToCurrentThread());
- if (!vda_)
- return;
+ for (size_t i = 0; i < release_sync_points.size(); i++)
+ factories->WaitSyncPoint(release_sync_points[i]);
- CHECK(!picture_buffers_at_display_.empty());
+ if (decoder) {
+ decoder->ReusePictureBuffer(picture_buffer_id);
+ return;
+ }
+ // It's the last chance to delete the texture after display,
+ // because RTCVideoDecoder was destructed.
+ factories->DeleteTexture(texture_id);
+}
- size_t num_erased = picture_buffers_at_display_.erase(picture_buffer_id);
- DCHECK(num_erased);
+void RTCVideoDecoder::ReusePictureBuffer(int64 picture_buffer_id) {
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
+ DVLOG(3) << "ReusePictureBuffer. id=" << picture_buffer_id;
- std::map<int32, media::PictureBuffer>::iterator it =
- assigned_picture_buffers_.find(picture_buffer_id);
+ DCHECK(!picture_buffers_at_display_.empty());
+ PictureBufferTextureMap::iterator display_iterator =
+ picture_buffers_at_display_.find(picture_buffer_id);
+ uint32 texture_id = display_iterator->second;
+ DCHECK(display_iterator != picture_buffers_at_display_.end());
+ picture_buffers_at_display_.erase(display_iterator);
- if (it == assigned_picture_buffers_.end()) {
+ if (!assigned_picture_buffers_.count(picture_buffer_id)) {
// This picture was dismissed while in display, so we postponed deletion.
- it = dismissed_picture_buffers_.find(picture_buffer_id);
- DCHECK(it != dismissed_picture_buffers_.end());
- factories_->DeleteTexture(it->second.texture_id());
- dismissed_picture_buffers_.erase(it);
+ factories_->DeleteTexture(texture_id);
return;
}
- factories_->WaitSyncPoint(sync_point);
+ // DestroyVDA() might already have been called.
+ if (vda_)
+ vda_->ReusePictureBuffer(picture_buffer_id);
+}
- vda_->ReusePictureBuffer(picture_buffer_id);
+void RTCVideoDecoder::CreateVDA(media::VideoCodecProfile profile,
+ base::WaitableEvent* waiter) {
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
+ vda_ = factories_->CreateVideoDecodeAccelerator();
+ if (vda_ && !vda_->Initialize(profile, this))
+ vda_.release()->Destroy();
+ waiter->Signal();
}
void RTCVideoDecoder::DestroyTextures() {
- DCHECK(vda_loop_proxy_->BelongsToCurrentThread());
- std::map<int32, media::PictureBuffer>::iterator it;
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
- for (it = assigned_picture_buffers_.begin();
- it != assigned_picture_buffers_.end();
+ // Not destroying PictureBuffers in |picture_buffers_at_display_| yet, since
+ // their textures may still be in use by the user of this RTCVideoDecoder.
+ for (PictureBufferTextureMap::iterator it =
+ picture_buffers_at_display_.begin();
+ it != picture_buffers_at_display_.end();
++it) {
- factories_->DeleteTexture(it->second.texture_id());
+ assigned_picture_buffers_.erase(it->first);
}
- assigned_picture_buffers_.clear();
- for (it = dismissed_picture_buffers_.begin();
- it != dismissed_picture_buffers_.end();
+ for (std::map<int32, media::PictureBuffer>::iterator it =
+ assigned_picture_buffers_.begin();
+ it != assigned_picture_buffers_.end();
++it) {
factories_->DeleteTexture(it->second.texture_id());
}
- dismissed_picture_buffers_.clear();
+ assigned_picture_buffers_.clear();
}
void RTCVideoDecoder::DestroyVDA() {
DVLOG(2) << "DestroyVDA";
- DCHECK(vda_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
if (vda_)
vda_.release()->Destroy();
DestroyTextures();
@@ -713,9 +729,12 @@ scoped_ptr<RTCVideoDecoder::SHMBuffer> RTCVideoDecoder::GetSHM_Locked(
// queue is almost empty.
if (num_shm_buffers_ < kMaxNumSharedMemorySegments &&
(ret == NULL || available_shm_segments_.size() <= 1)) {
- vda_loop_proxy_->PostTask(
+ factories_->GetTaskRunner()->PostTask(
FROM_HERE,
- base::Bind(&RTCVideoDecoder::CreateSHM, weak_this_, 1, min_size));
+ base::Bind(&RTCVideoDecoder::CreateSHM,
+ weak_factory_.GetWeakPtr(),
+ 1,
+ min_size));
}
return scoped_ptr<SHMBuffer>(ret);
}
@@ -725,7 +744,7 @@ void RTCVideoDecoder::PutSHM_Locked(scoped_ptr<SHMBuffer> shm_buffer) {
}
void RTCVideoDecoder::CreateSHM(int number, size_t min_size) {
- DCHECK(vda_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
DVLOG(2) << "CreateSHM. size=" << min_size;
int number_to_allocate;
{
@@ -785,4 +804,9 @@ int32_t RTCVideoDecoder::RecordInitDecodeUMA(int32_t status) {
return status;
}
+void RTCVideoDecoder::DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent()
+ const {
+ DCHECK(factories_->GetTaskRunner()->BelongsToCurrentThread());
+}
+
} // namespace content
diff --git a/chromium/content/renderer/media/rtc_video_decoder.h b/chromium/content/renderer/media/rtc_video_decoder.h
index b0f967ab546..d1a04e5cd9b 100644
--- a/chromium/content/renderer/media/rtc_video_decoder.h
+++ b/chromium/content/renderer/media/rtc_video_decoder.h
@@ -6,6 +6,7 @@
#define CONTENT_RENDERER_MEDIA_RTC_VIDEO_DECODER_H_
#include <deque>
+#include <list>
#include <map>
#include <set>
#include <utility>
@@ -13,9 +14,7 @@
#include "base/basictypes.h"
#include "base/gtest_prod_util.h"
#include "base/memory/weak_ptr.h"
-#include "base/message_loop/message_loop.h"
#include "base/synchronization/lock.h"
-#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
#include "content/common/content_export.h"
#include "media/base/bitstream_buffer.h"
@@ -25,6 +24,7 @@
#include "third_party/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
namespace base {
+class WaitableEvent;
class MessageLoopProxy;
};
@@ -43,8 +43,7 @@ namespace content {
// frames are delivered to WebRTC on |vda_message_loop_|.
class CONTENT_EXPORT RTCVideoDecoder
: NON_EXPORTED_BASE(public webrtc::VideoDecoder),
- public media::VideoDecodeAccelerator::Client,
- public base::MessageLoop::DestructionObserver {
+ public media::VideoDecodeAccelerator::Client {
public:
virtual ~RTCVideoDecoder();
@@ -76,7 +75,6 @@ class CONTENT_EXPORT RTCVideoDecoder
virtual int32_t Reset() OVERRIDE;
// VideoDecodeAccelerator::Client implementation.
- virtual void NotifyInitializeDone() OVERRIDE;
virtual void ProvidePictureBuffers(uint32 count,
const gfx::Size& size,
uint32 texture_target) OVERRIDE;
@@ -87,10 +85,6 @@ class CONTENT_EXPORT RTCVideoDecoder
virtual void NotifyResetDone() OVERRIDE;
virtual void NotifyError(media::VideoDecodeAccelerator::Error error) OVERRIDE;
- // base::DestructionObserver implementation. Called when |vda_message_loop_|
- // is stopped.
- virtual void WillDestroyCurrentMessageLoop() OVERRIDE;
-
private:
class SHMBuffer;
// Metadata of a bitstream buffer.
@@ -112,12 +106,9 @@ class CONTENT_EXPORT RTCVideoDecoder
FRIEND_TEST_ALL_PREFIXES(RTCVideoDecoderTest, IsBufferAfterReset);
FRIEND_TEST_ALL_PREFIXES(RTCVideoDecoderTest, IsFirstBufferAfterReset);
- // The meessage loop of |factories| will be saved to |vda_loop_proxy_|.
RTCVideoDecoder(
const scoped_refptr<media::GpuVideoAcceleratorFactories>& factories);
- void Initialize(base::WaitableEvent* waiter);
-
// Requests a buffer to be decoded by VDA.
void RequestBufferDecode();
@@ -155,8 +146,18 @@ class CONTENT_EXPORT RTCVideoDecoder
// Resets VDA.
void ResetInternal();
+ // Static method is to allow it to run even after RVD is deleted.
+ static void ReleaseMailbox(
+ base::WeakPtr<RTCVideoDecoder> decoder,
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& factories,
+ int64 picture_buffer_id,
+ uint32 texture_id,
+ const std::vector<uint32>& release_sync_points);
// Tells VDA that a picture buffer can be recycled.
- void ReusePictureBuffer(int64 picture_buffer_id, uint32 sync_point);
+ void ReusePictureBuffer(int64 picture_buffer_id);
+
+ // Create |vda_| on |vda_loop_proxy_|.
+ void CreateVDA(media::VideoCodecProfile profile, base::WaitableEvent* waiter);
void DestroyTextures();
void DestroyVDA();
@@ -184,6 +185,9 @@ class CONTENT_EXPORT RTCVideoDecoder
// Records the result of InitDecode to UMA and returns |status|.
int32_t RecordInitDecodeUMA(int32_t status);
+ // Assert the contract that this class is operated on the right thread.
+ void DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent() const;
+
enum State {
UNINITIALIZED, // The decoder has not initialized.
INITIALIZED, // The decoder has initialized.
@@ -201,14 +205,8 @@ class CONTENT_EXPORT RTCVideoDecoder
// The size of the incoming video frames.
gfx::Size frame_size_;
- // Weak pointer to this, which can be dereferenced only on |vda_loop_proxy_|.
- base::WeakPtr<RTCVideoDecoder> weak_this_;
-
scoped_refptr<media::GpuVideoAcceleratorFactories> factories_;
- // The message loop to run callbacks on. This is from |factories_|.
- scoped_refptr<base::MessageLoopProxy> vda_loop_proxy_;
-
// The texture target used for decoded pictures.
uint32 decoder_texture_target_;
@@ -222,13 +220,12 @@ class CONTENT_EXPORT RTCVideoDecoder
// A map from picture buffer IDs to texture-backed picture buffers.
std::map<int32, media::PictureBuffer> assigned_picture_buffers_;
- // Picture buffers that are dismissed but not deleted yet.
- std::map<int32, media::PictureBuffer> dismissed_picture_buffers_;
-
// PictureBuffers given to us by VDA via PictureReady, which we sent forward
// as VideoFrames to be rendered via read_cb_, and which will be returned
// to us via ReusePictureBuffer.
- std::set<int32> picture_buffers_at_display_;
+ typedef std::map<int32 /* picture_buffer_id */, uint32 /* texture_id */>
+ PictureBufferTextureMap;
+ PictureBufferTextureMap picture_buffers_at_display_;
// The id that will be given to the next picture buffer.
int32 next_picture_buffer_id_;
@@ -268,8 +265,8 @@ class CONTENT_EXPORT RTCVideoDecoder
// Release has been called. Guarded by |lock_|.
int32 reset_bitstream_buffer_id_;
- // Factory used to populate |weak_this_|. Must be destroyed, or invalidated,
- // on |vda_loop_proxy_|.
+ // Must be destroyed, or invalidated, on |vda_loop_proxy_|
+ // NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<RTCVideoDecoder> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(RTCVideoDecoder);
diff --git a/chromium/content/renderer/media/rtc_video_decoder_bridge_tv.cc b/chromium/content/renderer/media/rtc_video_decoder_bridge_tv.cc
deleted file mode 100644
index 9b0007e9d4c..00000000000
--- a/chromium/content/renderer/media/rtc_video_decoder_bridge_tv.cc
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/renderer/media/rtc_video_decoder_bridge_tv.h"
-
-#include <queue>
-
-#include "base/bind.h"
-#include "base/callback_helpers.h"
-#include "base/location.h"
-#include "base/logging.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/singleton.h"
-#include "base/message_loop/message_loop_proxy.h"
-#include "base/time/time.h"
-#include "content/renderer/media/rtc_video_decoder_factory_tv.h"
-#include "media/base/bind_to_loop.h"
-#include "media/base/decoder_buffer.h"
-#include "third_party/libjingle/source/talk/base/ratetracker.h"
-
-namespace content {
-
-RTCVideoDecoderBridgeTv::RTCVideoDecoderBridgeTv(
- RTCVideoDecoderFactoryTv* factory)
- : factory_(factory),
- is_initialized_(false),
- first_frame_(true) {}
-
-RTCVideoDecoderBridgeTv::~RTCVideoDecoderBridgeTv() {}
-
-int32_t RTCVideoDecoderBridgeTv::InitDecode(
- const webrtc::VideoCodec* codec_settings,
- int32_t number_of_cores) {
- // We don't support non-VP8 codec, feedback mode, nor double-initialization
- if (codec_settings->codecType != webrtc::kVideoCodecVP8 ||
- codec_settings->codecSpecific.VP8.feedbackModeOn || is_initialized_)
- return WEBRTC_VIDEO_CODEC_ERROR;
- size_ = gfx::Size(codec_settings->width, codec_settings->height);
-
- is_initialized_ = true;
- first_frame_ = true;
- factory_->InitializeStream(size_);
-
- return WEBRTC_VIDEO_CODEC_OK;
-}
-
-int32_t RTCVideoDecoderBridgeTv::Decode(
- const webrtc::EncodedImage& input_image,
- bool missing_frames,
- const webrtc::RTPFragmentationHeader* fragmentation,
- const webrtc::CodecSpecificInfo* codec_specific_info,
- int64_t render_time_ms) {
- // Unlike the SW decoder in libvpx, hw decoder can not handle broken frames.
- // Here, we return an error in order to request a key frame.
- if (missing_frames || !input_image._completeFrame)
- return WEBRTC_VIDEO_CODEC_ERROR;
-
- if (!is_initialized_)
- return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
-
- if (first_frame_) {
- // If the first frame is not a key frame, return an error to request a key
- // frame.
- if (input_image._frameType != webrtc::kKeyFrame)
- return WEBRTC_VIDEO_CODEC_ERROR;
-
- // Google TV expects timestamp from 0, so we store the initial timestamp as
- // an offset and subtract the value from every timestamps to meet the
- // expectation.
- timestamp_offset_millis_ = render_time_ms;
- }
- first_frame_ = false;
- gfx::Size new_size;
- if (input_image._frameType == webrtc::kKeyFrame &&
- input_image._encodedWidth != 0 && input_image._encodedHeight != 0) {
- // Only a key frame has a meaningful size.
- new_size.SetSize(input_image._encodedWidth, input_image._encodedHeight);
- if (size_ == new_size)
- new_size = gfx::Size();
- else
- size_ = new_size;
- }
- // |input_image_| may be destroyed after this call, so we make a copy of the
- // buffer so that we can queue the buffer asynchronously.
- scoped_refptr<media::DecoderBuffer> buffer =
- media::DecoderBuffer::CopyFrom(input_image._buffer, input_image._length);
- if (render_time_ms != -1) {
- buffer->set_timestamp(base::TimeDelta::FromMilliseconds(
- render_time_ms - timestamp_offset_millis_));
- }
-
- factory_->QueueBuffer(buffer, new_size);
-
- return WEBRTC_VIDEO_CODEC_OK;
-}
-
-int32_t RTCVideoDecoderBridgeTv::RegisterDecodeCompleteCallback(
- webrtc::DecodedImageCallback* callback) {
- return WEBRTC_VIDEO_CODEC_OK;
-}
-
-int32_t RTCVideoDecoderBridgeTv::Release() {
- is_initialized_ = false;
- return WEBRTC_VIDEO_CODEC_OK;
-}
-
-int32_t RTCVideoDecoderBridgeTv::Reset() {
- first_frame_ = true;
- return WEBRTC_VIDEO_CODEC_OK;
-}
-
-} // namespace content
diff --git a/chromium/content/renderer/media/rtc_video_decoder_bridge_tv.h b/chromium/content/renderer/media/rtc_video_decoder_bridge_tv.h
deleted file mode 100644
index 2ab6fde0234..00000000000
--- a/chromium/content/renderer/media/rtc_video_decoder_bridge_tv.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_RTC_VIDEO_DECODER_BRIDGE_TV_H_
-#define CONTENT_RENDERER_MEDIA_RTC_VIDEO_DECODER_BRIDGE_TV_H_
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "content/common/content_export.h"
-#include "third_party/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
-#include "ui/gfx/size.h"
-
-namespace content {
-
-class MediaStreamDependencyFactory;
-class RTCVideoDecoderFactoryTv;
-
-// An object shared between WebMediaPlayerAndroid and WebRTC Video Engine.
-// Note that this class provides the first remote stream.
-class CONTENT_EXPORT RTCVideoDecoderBridgeTv
- : NON_EXPORTED_BASE(public webrtc::VideoDecoder) {
- public:
- explicit RTCVideoDecoderBridgeTv(RTCVideoDecoderFactoryTv* factory);
- virtual ~RTCVideoDecoderBridgeTv();
-
- // webrtc::VideoDecoder implementation.
- virtual int32_t InitDecode(const webrtc::VideoCodec* codec_settings,
- int32_t number_of_cores) OVERRIDE;
- virtual int32_t Decode(
- const webrtc::EncodedImage& input_image,
- bool missing_frames,
- const webrtc::RTPFragmentationHeader* fragmentation,
- const webrtc::CodecSpecificInfo* codec_specific_info,
- int64_t render_time_ms) OVERRIDE;
- virtual int32_t RegisterDecodeCompleteCallback(
- webrtc::DecodedImageCallback* callback) OVERRIDE;
- virtual int32_t Release() OVERRIDE;
- virtual int32_t Reset() OVERRIDE;
-
- private:
- // The factory outlives this object, so weak pointer is fine.
- RTCVideoDecoderFactoryTv* factory_;
-
- gfx::Size size_;
- bool is_initialized_;
- bool first_frame_;
- int64_t timestamp_offset_millis_;
-
- DISALLOW_COPY_AND_ASSIGN(RTCVideoDecoderBridgeTv);
-};
-
-} // namespace content
-
-#endif // CONTENT_RENDERER_MEDIA_RTC_VIDEO_DECODER_BRIDGE_TV_H_
diff --git a/chromium/content/renderer/media/rtc_video_decoder_factory.cc b/chromium/content/renderer/media/rtc_video_decoder_factory.cc
index 57b6a580c3a..f59a38fa9b3 100644
--- a/chromium/content/renderer/media/rtc_video_decoder_factory.cc
+++ b/chromium/content/renderer/media/rtc_video_decoder_factory.cc
@@ -6,13 +6,13 @@
#include "base/location.h"
#include "base/memory/scoped_ptr.h"
-#include "content/renderer/media/renderer_gpu_video_accelerator_factories.h"
#include "content/renderer/media/rtc_video_decoder.h"
+#include "media/filters/gpu_video_accelerator_factories.h"
namespace content {
RTCVideoDecoderFactory::RTCVideoDecoderFactory(
- const scoped_refptr<RendererGpuVideoAcceleratorFactories>& gpu_factories)
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& gpu_factories)
: gpu_factories_(gpu_factories) {
DVLOG(2) << "RTCVideoDecoderFactory";
}
@@ -24,19 +24,15 @@ RTCVideoDecoderFactory::~RTCVideoDecoderFactory() {
webrtc::VideoDecoder* RTCVideoDecoderFactory::CreateVideoDecoder(
webrtc::VideoCodecType type) {
DVLOG(2) << "CreateVideoDecoder";
- // GpuVideoAcceleratorFactories is not thread safe. It cannot be shared
- // by different decoders. This method runs on Chrome_libJingle_WorkerThread
- // and the child thread is blocked while this runs. We cannot create new gpu
- // factories here. Clone one instead.
scoped_ptr<RTCVideoDecoder> decoder =
- RTCVideoDecoder::Create(type, gpu_factories_->Clone());
+ RTCVideoDecoder::Create(type, gpu_factories_);
return decoder.release();
}
void RTCVideoDecoderFactory::DestroyVideoDecoder(
webrtc::VideoDecoder* decoder) {
DVLOG(2) << "DestroyVideoDecoder";
- gpu_factories_->GetMessageLoop()->DeleteSoon(FROM_HERE, decoder);
+ gpu_factories_->GetTaskRunner()->DeleteSoon(FROM_HERE, decoder);
}
} // namespace content
diff --git a/chromium/content/renderer/media/rtc_video_decoder_factory.h b/chromium/content/renderer/media/rtc_video_decoder_factory.h
index f7a42a3cd47..d40203f002d 100644
--- a/chromium/content/renderer/media/rtc_video_decoder_factory.h
+++ b/chromium/content/renderer/media/rtc_video_decoder_factory.h
@@ -13,17 +13,20 @@
namespace webrtc {
class VideoDecoder;
-}
+} // namespace webrtc
+
+namespace media {
+class GpuVideoAcceleratorFactories;
+} // namespace media
namespace content {
-class RendererGpuVideoAcceleratorFactories;
// TODO(wuchengli): add unittest.
class CONTENT_EXPORT RTCVideoDecoderFactory
: NON_EXPORTED_BASE(public cricket::WebRtcVideoDecoderFactory) {
public:
explicit RTCVideoDecoderFactory(
- const scoped_refptr<RendererGpuVideoAcceleratorFactories>& gpu_factories);
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& gpu_factories);
virtual ~RTCVideoDecoderFactory();
// Runs on Chrome_libJingle_WorkerThread. The child thread is blocked while
@@ -36,7 +39,7 @@ class CONTENT_EXPORT RTCVideoDecoderFactory
virtual void DestroyVideoDecoder(webrtc::VideoDecoder* decoder) OVERRIDE;
private:
- scoped_refptr<RendererGpuVideoAcceleratorFactories> gpu_factories_;
+ scoped_refptr<media::GpuVideoAcceleratorFactories> gpu_factories_;
DISALLOW_COPY_AND_ASSIGN(RTCVideoDecoderFactory);
};
diff --git a/chromium/content/renderer/media/rtc_video_decoder_factory_tv.cc b/chromium/content/renderer/media/rtc_video_decoder_factory_tv.cc
deleted file mode 100644
index 64c14adf313..00000000000
--- a/chromium/content/renderer/media/rtc_video_decoder_factory_tv.cc
+++ /dev/null
@@ -1,246 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/renderer/media/rtc_video_decoder_factory_tv.h"
-
-#include "base/callback_helpers.h"
-#include "content/renderer/media/rtc_video_decoder_bridge_tv.h"
-#include "media/base/audio_decoder_config.h"
-#include "media/base/bind_to_loop.h"
-#include "media/base/decoder_buffer.h"
-#include "media/base/video_decoder_config.h"
-#include "third_party/libjingle/source/talk/base/ratetracker.h"
-
-using media::DemuxerStream;
-
-namespace content {
-
-// RTCDemuxerStream ------------------------------------------------------------
-
-class RTCDemuxerStream : public DemuxerStream {
- public:
- explicit RTCDemuxerStream(const gfx::Size& size);
- virtual ~RTCDemuxerStream();
-
- // DemuxerStream implementation.
- virtual void Read(const ReadCB& read_cb) OVERRIDE;
- virtual media::AudioDecoderConfig audio_decoder_config() OVERRIDE;
- virtual media::VideoDecoderConfig video_decoder_config() OVERRIDE;
- virtual Type type() OVERRIDE;
- virtual void EnableBitstreamConverter() OVERRIDE;
-
- void QueueBuffer(scoped_refptr<media::DecoderBuffer> buffer,
- const gfx::Size& new_size);
- void Destroy();
-
- private:
- struct BufferEntry {
- BufferEntry(const scoped_refptr<media::DecoderBuffer>& decoder_buffer_param,
- const gfx::Size& new_size_param)
- : decoder_buffer(decoder_buffer_param),
- new_size(new_size_param) {}
-
- scoped_refptr<media::DecoderBuffer> decoder_buffer;
- // When |!new_size.isEmpty()|, it means that config change with new size
- // |new_size| happened.
- gfx::Size new_size;
- };
-
- void RunReadCallback_Locked();
-
- base::Lock lock_;
- bool is_destroyed_;
- std::queue<BufferEntry> buffer_queue_;
- ReadCB read_cb_;
-
- media::AudioDecoderConfig dummy_audio_decoder_config_;
- media::VideoDecoderConfig video_decoder_config_;
- talk_base::RateTracker frame_rate_tracker_;
-};
-
-RTCDemuxerStream::RTCDemuxerStream(const gfx::Size& size)
- : is_destroyed_(false),
- video_decoder_config_(media::kCodecVP8,
- media::VP8PROFILE_MAIN,
- media::VideoFrame::NATIVE_TEXTURE,
- size,
- gfx::Rect(size),
- size,
- NULL,
- 0,
- false) {}
-
-RTCDemuxerStream::~RTCDemuxerStream() { DCHECK(is_destroyed_); }
-
-media::AudioDecoderConfig RTCDemuxerStream::audio_decoder_config() {
- NOTIMPLEMENTED() << "Does not support audio.";
- return dummy_audio_decoder_config_;
-}
-
-media::VideoDecoderConfig RTCDemuxerStream::video_decoder_config() {
- base::AutoLock lock(lock_);
- return video_decoder_config_;
-}
-
-DemuxerStream::Type RTCDemuxerStream::type() { return DemuxerStream::VIDEO; }
-
-void RTCDemuxerStream::EnableBitstreamConverter() { NOTREACHED(); }
-
-void RTCDemuxerStream::QueueBuffer(scoped_refptr<media::DecoderBuffer> buffer,
- const gfx::Size& new_size) {
- base::AutoLock lock(lock_);
- if (is_destroyed_)
- return;
- buffer_queue_.push(BufferEntry(buffer, new_size));
- if (buffer)
- frame_rate_tracker_.Update(1);
- DVLOG(1) << "frame rate received : " << frame_rate_tracker_.units_second();
- RunReadCallback_Locked();
-}
-
-void RTCDemuxerStream::Read(const ReadCB& read_cb) {
- base::AutoLock lock(lock_);
- DCHECK(read_cb_.is_null());
- if (is_destroyed_) {
- media::BindToLoop(base::MessageLoopProxy::current(), read_cb)
- .Run(DemuxerStream::kAborted, NULL);
- return;
- }
- read_cb_ = media::BindToLoop(base::MessageLoopProxy::current(), read_cb);
- RunReadCallback_Locked();
-}
-
-void RTCDemuxerStream::Destroy() {
- base::AutoLock lock(lock_);
- DCHECK(!is_destroyed_);
- is_destroyed_ = true;
- if (!read_cb_.is_null())
- base::ResetAndReturn(&read_cb_).Run(DemuxerStream::kAborted, NULL);
- while (!buffer_queue_.empty())
- buffer_queue_.pop();
-}
-
-void RTCDemuxerStream::RunReadCallback_Locked() {
- if (read_cb_.is_null() || buffer_queue_.empty())
- return;
-
- BufferEntry& front = buffer_queue_.front();
- if (!front.new_size.IsEmpty()) {
- // No VideoFrame actually reaches GL renderer in Google TV case. We just
- // make coded_size == visible_rect == natural_size here.
- video_decoder_config_.Initialize(media::kCodecVP8,
- media::VP8PROFILE_MAIN,
- media::VideoFrame::NATIVE_TEXTURE,
- front.new_size,
- gfx::Rect(front.new_size),
- front.new_size,
- NULL,
- 0,
- false,
- false);
- base::ResetAndReturn(&read_cb_).Run(DemuxerStream::kConfigChanged, NULL);
- front.new_size.SetSize(0, 0);
- return;
- }
- base::ResetAndReturn(&read_cb_).Run(DemuxerStream::kOk, front.decoder_buffer);
- buffer_queue_.pop();
-}
-
-// RTCVideoDecoderFactoryTv ----------------------------------------------------
-
-RTCVideoDecoderFactoryTv::RTCVideoDecoderFactoryTv() : is_acquired_(false) {}
-RTCVideoDecoderFactoryTv::~RTCVideoDecoderFactoryTv() {}
-
-webrtc::VideoDecoder* RTCVideoDecoderFactoryTv::CreateVideoDecoder(
- webrtc::VideoCodecType type) {
- base::AutoLock lock(lock_);
- // One decoder at a time!
- if (decoder_)
- return NULL;
- // Only VP8 is supported --- returning NULL will make WebRTC fall back to SW
- // decoder.
- if (type != webrtc::kVideoCodecVP8)
- return NULL;
- decoder_.reset(new RTCVideoDecoderBridgeTv(this));
- return decoder_.get();
-}
-
-void RTCVideoDecoderFactoryTv::DestroyVideoDecoder(
- webrtc::VideoDecoder* decoder) {
- base::AutoLock lock(lock_);
- DCHECK_EQ(decoder_.get(), decoder);
- decoder_.reset();
-}
-
-bool RTCVideoDecoderFactoryTv::AcquireDemuxer() {
- base::AutoLock lock(lock_);
- if (is_acquired_)
- return false;
- is_acquired_ = true;
- return true;
-}
-
-void RTCVideoDecoderFactoryTv::ReleaseDemuxer() {
- base::AutoLock lock(lock_);
- DCHECK(is_acquired_);
- is_acquired_ = false;
- // Clean up internal state as a demuxer.
- init_cb_.Reset();
- if (stream_) {
- stream_->Destroy();
- stream_.reset();
- }
-}
-
-void RTCVideoDecoderFactoryTv::Initialize(media::DemuxerHost* /*host*/,
- const media::PipelineStatusCB& cb,
- bool /*enable_text_tracks*/) {
- base::AutoLock lock(lock_);
- init_cb_ = media::BindToLoop(base::MessageLoopProxy::current(), cb);
- if (stream_)
- base::ResetAndReturn(&init_cb_).Run(media::PIPELINE_OK);
-}
-
-void RTCVideoDecoderFactoryTv::Seek(base::TimeDelta time,
- const media::PipelineStatusCB& status_cb) {
- DCHECK(!status_cb.is_null());
- status_cb.Run(media::PIPELINE_OK);
-}
-
-void RTCVideoDecoderFactoryTv::Stop(const base::Closure& callback) {
- DCHECK(!callback.is_null());
- callback.Run();
-}
-
-void RTCVideoDecoderFactoryTv::OnAudioRendererDisabled() {
-}
-
-DemuxerStream* RTCVideoDecoderFactoryTv::GetStream(DemuxerStream::Type type) {
- base::AutoLock lock(lock_);
- if (type == DemuxerStream::VIDEO)
- return stream_.get();
- return NULL;
-}
-
-base::TimeDelta RTCVideoDecoderFactoryTv::GetStartTime() const {
- return base::TimeDelta();
-}
-
-void RTCVideoDecoderFactoryTv::InitializeStream(const gfx::Size& size) {
- base::AutoLock lock(lock_);
- DCHECK(!stream_);
- stream_.reset(new RTCDemuxerStream(size));
- if (!init_cb_.is_null())
- base::ResetAndReturn(&init_cb_).Run(media::PIPELINE_OK);
-}
-
-void RTCVideoDecoderFactoryTv::QueueBuffer(
- scoped_refptr<media::DecoderBuffer> buffer,
- const gfx::Size& new_size) {
- base::AutoLock lock(lock_);
- DCHECK(stream_);
- stream_->QueueBuffer(buffer, new_size);
-}
-
-} // namespace content
diff --git a/chromium/content/renderer/media/rtc_video_decoder_factory_tv.h b/chromium/content/renderer/media/rtc_video_decoder_factory_tv.h
deleted file mode 100644
index cd51cdcf862..00000000000
--- a/chromium/content/renderer/media/rtc_video_decoder_factory_tv.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_RTC_VIDEO_DECODER_FACTORY_TV_H_
-#define CONTENT_RENDERER_MEDIA_RTC_VIDEO_DECODER_FACTORY_TV_H_
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/message_loop/message_loop.h"
-#include "content/common/content_export.h"
-#include "media/base/demuxer.h"
-#include "third_party/libjingle/source/talk/media/webrtc/webrtcvideodecoderfactory.h"
-#include "ui/gfx/size.h"
-
-namespace webrtc {
-class VideoDecoder;
-}
-
-namespace content {
-
-class MediaStreamDependencyFactory;
-class RTCDemuxerStream;
-class RTCVideoDecoderBridgeTv;
-
-// A factory object generating |RTCVideoDecoderBridgeTv| objects. This object
-// also functions as a |media::Demuxer| object to receive encoded streams from
-// the |RTCVideoDecoderBridgeTv| object (which inherits from
-// |webrtc::VideoDecoder|).
-class CONTENT_EXPORT RTCVideoDecoderFactoryTv
- : NON_EXPORTED_BASE(public cricket::WebRtcVideoDecoderFactory),
- public media::Demuxer {
- public:
- RTCVideoDecoderFactoryTv();
- virtual ~RTCVideoDecoderFactoryTv();
-
- // cricket::WebRtcVideoDecoderFactory implementation.
- virtual webrtc::VideoDecoder* CreateVideoDecoder(
- webrtc::VideoCodecType type) OVERRIDE;
- virtual void DestroyVideoDecoder(webrtc::VideoDecoder* decoder) OVERRIDE;
-
- // Acquires and releases the demuxer functionality of this object. Only one
- // client object can access demuxer functionality at a time. No calls to
- // |media::Demuxer| implementations should be made without acquiring it first.
- bool AcquireDemuxer();
- void ReleaseDemuxer();
-
- // media::Demuxer implementation.
- virtual void Initialize(media::DemuxerHost* host,
- const media::PipelineStatusCB& cb,
- bool enable_text_tracks) OVERRIDE;
- virtual void Seek(base::TimeDelta time,
- const media::PipelineStatusCB& status_cb) OVERRIDE;
- virtual void Stop(const base::Closure& callback) OVERRIDE;
- virtual void OnAudioRendererDisabled() OVERRIDE;
- virtual media::DemuxerStream* GetStream(
- media::DemuxerStream::Type type) OVERRIDE;
- virtual base::TimeDelta GetStartTime() const OVERRIDE;
-
- // For RTCVideoDecoderBridgeTv to talk to RTCDemuxerStream.
- void InitializeStream(const gfx::Size& size);
- void QueueBuffer(scoped_refptr<media::DecoderBuffer> buffer,
- const gfx::Size& size);
-
- private:
- // All private variables are lock protected.
- base::Lock lock_;
- scoped_ptr<RTCVideoDecoderBridgeTv> decoder_;
-
- media::PipelineStatusCB init_cb_;
- scoped_ptr<RTCDemuxerStream> stream_;
-
- bool is_acquired_;
-
- DISALLOW_COPY_AND_ASSIGN(RTCVideoDecoderFactoryTv);
-};
-
-} // namespace content
-
-#endif // CONTENT_RENDERER_MEDIA_RTC_VIDEO_DECODER_FACTORY_TV_H_
diff --git a/chromium/content/renderer/media/rtc_video_decoder_factory_tv_unittest.cc b/chromium/content/renderer/media/rtc_video_decoder_factory_tv_unittest.cc
deleted file mode 100644
index 31b3143488a..00000000000
--- a/chromium/content/renderer/media/rtc_video_decoder_factory_tv_unittest.cc
+++ /dev/null
@@ -1,348 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/synchronization/waitable_event.h"
-#include "base/task_runner_util.h"
-#include "base/threading/thread.h"
-#include "content/renderer/media/rtc_video_decoder_factory_tv.h"
-#include "media/base/decoder_buffer.h"
-#include "media/base/video_decoder_config.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "third_party/webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h"
-#include "ui/gfx/rect.h"
-
-using ::testing::_;
-using ::testing::Return;
-
-namespace content {
-
-class RTCVideoDecoderFactoryTvTest : public ::testing::Test {
- public:
- RTCVideoDecoderFactoryTvTest()
- : factory_(new RTCVideoDecoderFactoryTv),
- decoder_(NULL),
- is_demuxer_acquired_(false),
- video_stream_(NULL),
- size_(1280, 720),
- input_image_(&data_, sizeof(data_), sizeof(data_)),
- data_('a'),
- read_event_(false, false),
- decoder_thread_("Test decoder thread"),
- decoder_thread_event_(false, false) {
- memset(&codec_, 0, sizeof(codec_));
- message_loop_proxy_ = base::MessageLoopProxy::current();
- input_image_._frameType = webrtc::kKeyFrame;
- input_image_._encodedWidth = size_.width();
- input_image_._encodedHeight = size_.height();
- input_image_._completeFrame = true;
- decoder_thread_.Start();
- }
-
- virtual ~RTCVideoDecoderFactoryTvTest() {
- if (is_demuxer_acquired_) {
- factory_->ReleaseDemuxer();
- is_demuxer_acquired_ = false;
- }
- if (decoder_) {
- factory_->DestroyVideoDecoder(decoder_);
- decoder_ = NULL;
- }
-
- decoder_thread_.Stop();
- }
-
- void ReadCallback(media::DemuxerStream::Status status,
- const scoped_refptr<media::DecoderBuffer>& decoder_buffer) {
- switch (status) {
- case media::DemuxerStream::kOk:
- EXPECT_TRUE(decoder_buffer);
- break;
- case media::DemuxerStream::kConfigChanged:
- case media::DemuxerStream::kAborted:
- EXPECT_FALSE(decoder_buffer);
- break;
- }
- last_decoder_buffer_ = decoder_buffer;
- read_event_.Signal();
- }
-
- void ExpectEqualsAndSignal(int32_t expected, int32_t actual) {
- EXPECT_EQ(expected, actual);
- decoder_thread_event_.Signal();
- }
-
- void ExpectNotEqualsAndSignal(int32_t unexpected, int32_t actual) {
- EXPECT_NE(unexpected, actual);
- decoder_thread_event_.Signal();
- }
-
- protected:
- base::Callback<void(int32_t)> BindExpectEquals(int32_t expected) {
- return base::Bind(&RTCVideoDecoderFactoryTvTest::ExpectEqualsAndSignal,
- base::Unretained(this),
- expected);
- }
-
- base::Callback<void(int32_t)> BindExpectNotEquals(int32_t unexpected) {
- return base::Bind(&RTCVideoDecoderFactoryTvTest::ExpectNotEqualsAndSignal,
- base::Unretained(this),
- unexpected);
- }
-
- base::Callback<int32_t(void)> BindInitDecode(const webrtc::VideoCodec* codec,
- int32_t num_cores) {
- return base::Bind(&webrtc::VideoDecoder::InitDecode,
- base::Unretained(decoder_),
- codec,
- num_cores);
- }
-
- base::Callback<int32_t(void)> BindDecode(
- const webrtc::EncodedImage& input_image,
- bool missing_frames,
- const webrtc::RTPFragmentationHeader* fragmentation,
- const webrtc::CodecSpecificInfo* info,
- int64_t render_time_ms) {
- return base::Bind(&webrtc::VideoDecoder::Decode,
- base::Unretained(decoder_),
- input_image,
- missing_frames,
- fragmentation,
- info,
- render_time_ms);
- }
-
- void CreateDecoderAndAcquireDemuxer() {
- decoder_ = factory_->CreateVideoDecoder(webrtc::kVideoCodecVP8);
- ASSERT_TRUE(decoder_);
- ASSERT_TRUE(factory_->AcquireDemuxer());
- is_demuxer_acquired_ = true;
- }
-
- void InitDecode() {
- codec_.codecType = webrtc::kVideoCodecVP8;
- codec_.width = size_.width();
- codec_.height = size_.height();
- base::PostTaskAndReplyWithResult(decoder_thread_.message_loop_proxy(),
- FROM_HERE,
- BindInitDecode(&codec_, 1),
- BindExpectEquals(WEBRTC_VIDEO_CODEC_OK));
- decoder_thread_event_.Wait();
- base::PostTaskAndReplyWithResult(
- decoder_thread_.message_loop_proxy(),
- FROM_HERE,
- base::Bind(&webrtc::VideoDecoder::RegisterDecodeCompleteCallback,
- base::Unretained(decoder_),
- &decode_complete_callback_),
- BindExpectEquals(WEBRTC_VIDEO_CODEC_OK));
- decoder_thread_event_.Wait();
- }
-
- void GetVideoStream() {
- video_stream_ = factory_->GetStream(media::DemuxerStream::VIDEO);
- ASSERT_TRUE(video_stream_);
- EXPECT_EQ(media::kCodecVP8, video_stream_->video_decoder_config().codec());
- EXPECT_EQ(size_, video_stream_->video_decoder_config().coded_size());
- EXPECT_EQ(gfx::Rect(size_),
- video_stream_->video_decoder_config().visible_rect());
- EXPECT_EQ(size_, video_stream_->video_decoder_config().natural_size());
- }
-
- void PostDecodeAndWait(int32_t expected,
- const webrtc::EncodedImage& input_image,
- bool missing_frames,
- const webrtc::RTPFragmentationHeader* fragmentation,
- const webrtc::CodecSpecificInfo* info,
- int64_t render_time_ms) {
- base::PostTaskAndReplyWithResult(
- decoder_thread_.message_loop_proxy(),
- FROM_HERE,
- BindDecode(
- input_image, missing_frames, fragmentation, info, render_time_ms),
- BindExpectEquals(expected));
- decoder_thread_event_.Wait();
- }
-
- RTCVideoDecoderFactoryTv* factory_;
- webrtc::VideoDecoder* decoder_;
- bool is_demuxer_acquired_;
- base::MessageLoopProxy* message_loop_proxy_;
- media::DemuxerStream* video_stream_;
- webrtc::VideoCodec codec_;
- gfx::Size size_;
- webrtc::EncodedImage input_image_;
- unsigned char data_;
- webrtc::MockDecodedImageCallback decode_complete_callback_;
- base::WaitableEvent read_event_;
- base::Thread decoder_thread_;
- base::WaitableEvent decoder_thread_event_;
- scoped_refptr<media::DecoderBuffer> last_decoder_buffer_;
-};
-
-TEST_F(RTCVideoDecoderFactoryTvTest, CreateAndDestroyDecoder) {
- // Only VP8 decoder is supported.
- ASSERT_FALSE(factory_->CreateVideoDecoder(webrtc::kVideoCodecI420));
- decoder_ = factory_->CreateVideoDecoder(webrtc::kVideoCodecVP8);
- ASSERT_TRUE(decoder_);
- // Only one decoder at a time will be created.
- ASSERT_FALSE(factory_->CreateVideoDecoder(webrtc::kVideoCodecVP8));
- factory_->DestroyVideoDecoder(decoder_);
-}
-
-TEST_F(RTCVideoDecoderFactoryTvTest, AcquireDemuxerAfterCreateDecoder) {
- decoder_ = factory_->CreateVideoDecoder(webrtc::kVideoCodecVP8);
- ASSERT_TRUE(decoder_);
- ASSERT_TRUE(factory_->AcquireDemuxer());
- is_demuxer_acquired_ = true;
- // Demuxer can be acquired only once.
- ASSERT_FALSE(factory_->AcquireDemuxer());
-}
-
-TEST_F(RTCVideoDecoderFactoryTvTest, AcquireDemuxerBeforeCreateDecoder) {
- ASSERT_TRUE(factory_->AcquireDemuxer());
- is_demuxer_acquired_ = true;
- decoder_ = factory_->CreateVideoDecoder(webrtc::kVideoCodecVP8);
- ASSERT_TRUE(decoder_);
-}
-
-TEST_F(RTCVideoDecoderFactoryTvTest, InitDecodeReturnsErrorOnNonVP8Codec) {
- CreateDecoderAndAcquireDemuxer();
- codec_.codecType = webrtc::kVideoCodecI420;
- base::PostTaskAndReplyWithResult(decoder_thread_.message_loop_proxy(),
- FROM_HERE,
- BindInitDecode(&codec_, 1),
- BindExpectNotEquals(WEBRTC_VIDEO_CODEC_OK));
- decoder_thread_event_.Wait();
-}
-
-TEST_F(RTCVideoDecoderFactoryTvTest, InitDecodeReturnsErrorOnFeedbackMode) {
- CreateDecoderAndAcquireDemuxer();
- codec_.codecType = webrtc::kVideoCodecVP8;
- codec_.codecSpecific.VP8.feedbackModeOn = true;
- base::PostTaskAndReplyWithResult(decoder_thread_.message_loop_proxy(),
- FROM_HERE,
- BindInitDecode(&codec_, 1),
- BindExpectNotEquals(WEBRTC_VIDEO_CODEC_OK));
- decoder_thread_event_.Wait();
-}
-
-TEST_F(RTCVideoDecoderFactoryTvTest, DecodeReturnsErrorBeforeInitDecode) {
- CreateDecoderAndAcquireDemuxer();
- PostDecodeAndWait(
- WEBRTC_VIDEO_CODEC_UNINITIALIZED, input_image_, false, NULL, NULL, 0);
-}
-
-TEST_F(RTCVideoDecoderFactoryTvTest, DecodeReturnsErrorOnDamagedBitstream) {
- CreateDecoderAndAcquireDemuxer();
- InitDecode();
- input_image_._completeFrame = false;
- PostDecodeAndWait(
- WEBRTC_VIDEO_CODEC_ERROR, input_image_, false, NULL, NULL, 0);
-}
-
-TEST_F(RTCVideoDecoderFactoryTvTest, DecodeReturnsErrorOnMissingFrames) {
- CreateDecoderAndAcquireDemuxer();
- InitDecode();
- PostDecodeAndWait(
- WEBRTC_VIDEO_CODEC_ERROR, input_image_, true, NULL, NULL, 0);
-}
-
-TEST_F(RTCVideoDecoderFactoryTvTest, GetNonVideoStreamFails) {
- CreateDecoderAndAcquireDemuxer();
- InitDecode();
- EXPECT_FALSE(factory_->GetStream(media::DemuxerStream::AUDIO));
- EXPECT_FALSE(factory_->GetStream(media::DemuxerStream::UNKNOWN));
-}
-
-TEST_F(RTCVideoDecoderFactoryTvTest, GetVideoStreamSucceeds) {
- CreateDecoderAndAcquireDemuxer();
- InitDecode();
- GetVideoStream();
-}
-
-TEST_F(RTCVideoDecoderFactoryTvTest, DecodeReturnsErrorOnNonKeyFrameAtFirst) {
- CreateDecoderAndAcquireDemuxer();
- InitDecode();
- GetVideoStream();
- input_image_._frameType = webrtc::kDeltaFrame;
- PostDecodeAndWait(
- WEBRTC_VIDEO_CODEC_ERROR, input_image_, false, NULL, NULL, 0);
-}
-
-TEST_F(RTCVideoDecoderFactoryTvTest, DecodeUpdatesVideoSizeOnKeyFrame) {
- CreateDecoderAndAcquireDemuxer();
- InitDecode();
- GetVideoStream();
- gfx::Size new_size(320, 240);
- input_image_._encodedWidth = new_size.width();
- input_image_._encodedHeight = new_size.height();
- PostDecodeAndWait(WEBRTC_VIDEO_CODEC_OK, input_image_, false, NULL, NULL, 0);
- EXPECT_EQ(new_size, video_stream_->video_decoder_config().coded_size());
- EXPECT_EQ(gfx::Rect(new_size),
- video_stream_->video_decoder_config().visible_rect());
- EXPECT_EQ(new_size, video_stream_->video_decoder_config().natural_size());
-}
-
-TEST_F(RTCVideoDecoderFactoryTvTest, DecodeAdjustsTimestampFromZero) {
- CreateDecoderAndAcquireDemuxer();
- InitDecode();
- GetVideoStream();
- PostDecodeAndWait(
- WEBRTC_VIDEO_CODEC_OK, input_image_, false, NULL, NULL, 10000);
- video_stream_->Read(base::Bind(&RTCVideoDecoderFactoryTvTest::ReadCallback,
- base::Unretained(this)));
- read_event_.Wait();
- EXPECT_EQ(base::TimeDelta::FromMilliseconds(0),
- last_decoder_buffer_->GetTimestamp());
- PostDecodeAndWait(
- WEBRTC_VIDEO_CODEC_OK, input_image_, false, NULL, NULL, 10033);
- video_stream_->Read(base::Bind(&RTCVideoDecoderFactoryTvTest::ReadCallback,
- base::Unretained(this)));
- read_event_.Wait();
- EXPECT_EQ(base::TimeDelta::FromMilliseconds(33),
- last_decoder_buffer_->GetTimestamp());
-}
-
-TEST_F(RTCVideoDecoderFactoryTvTest, DecodePassesDataCorrectly) {
- CreateDecoderAndAcquireDemuxer();
- InitDecode();
- GetVideoStream();
- video_stream_->Read(base::Bind(&RTCVideoDecoderFactoryTvTest::ReadCallback,
- base::Unretained(this)));
- PostDecodeAndWait(WEBRTC_VIDEO_CODEC_OK, input_image_, false, NULL, NULL, 0);
- read_event_.Wait();
- EXPECT_EQ(static_cast<int>(sizeof(data_)),
- last_decoder_buffer_->GetDataSize());
- EXPECT_EQ(data_, last_decoder_buffer_->GetData()[0]);
-}
-
-TEST_F(RTCVideoDecoderFactoryTvTest, NextReadTriggersDecodeCompleteCallback) {
- EXPECT_CALL(decode_complete_callback_, Decoded(_))
- .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
-
- CreateDecoderAndAcquireDemuxer();
- InitDecode();
- GetVideoStream();
- video_stream_->Read(base::Bind(&RTCVideoDecoderFactoryTvTest::ReadCallback,
- base::Unretained(this)));
- PostDecodeAndWait(WEBRTC_VIDEO_CODEC_OK, input_image_, false, NULL, NULL, 0);
- read_event_.Wait();
- video_stream_->Read(base::Bind(&RTCVideoDecoderFactoryTvTest::ReadCallback,
- base::Unretained(this)));
-}
-
-TEST_F(RTCVideoDecoderFactoryTvTest, ResetReturnsOk) {
- CreateDecoderAndAcquireDemuxer();
- InitDecode();
- EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Reset());
-}
-
-TEST_F(RTCVideoDecoderFactoryTvTest, ReleaseReturnsOk) {
- CreateDecoderAndAcquireDemuxer();
- InitDecode();
- EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Release());
-}
-
-} // content
diff --git a/chromium/content/renderer/media/rtc_video_decoder_unittest.cc b/chromium/content/renderer/media/rtc_video_decoder_unittest.cc
index ba27a576765..e6dfe3eeaa1 100644
--- a/chromium/content/renderer/media/rtc_video_decoder_unittest.cc
+++ b/chromium/content/renderer/media/rtc_video_decoder_unittest.cc
@@ -33,40 +33,35 @@ class RTCVideoDecoderTest : public ::testing::Test,
virtual void SetUp() OVERRIDE {
ASSERT_TRUE(vda_thread_.Start());
- vda_loop_proxy_ = vda_thread_.message_loop_proxy();
+ vda_task_runner_ = vda_thread_.message_loop_proxy();
mock_vda_ = new media::MockVideoDecodeAccelerator;
- EXPECT_CALL(*mock_gpu_factories_, GetMessageLoop())
- .WillRepeatedly(Return(vda_loop_proxy_));
- EXPECT_CALL(*mock_gpu_factories_, DoCreateVideoDecodeAccelerator(_, _))
- .WillRepeatedly(
- Return(static_cast<media::VideoDecodeAccelerator*>(NULL)));
- EXPECT_CALL(*mock_gpu_factories_,
- DoCreateVideoDecodeAccelerator(media::VP8PROFILE_MAIN, _))
+ EXPECT_CALL(*mock_gpu_factories_, GetTaskRunner())
+ .WillRepeatedly(Return(vda_task_runner_));
+ EXPECT_CALL(*mock_gpu_factories_, DoCreateVideoDecodeAccelerator())
.WillRepeatedly(Return(mock_vda_));
- EXPECT_CALL(*mock_gpu_factories_, Abort()).WillRepeatedly(Return());
EXPECT_CALL(*mock_gpu_factories_, CreateSharedMemory(_))
.WillRepeatedly(Return(static_cast<base::SharedMemory*>(NULL)));
- EXPECT_CALL(*mock_vda_, Destroy());
+ EXPECT_CALL(*mock_vda_, Initialize(_, _))
+ .Times(1)
+ .WillRepeatedly(Return(true));
+ EXPECT_CALL(*mock_vda_, Destroy()).Times(1);
rtc_decoder_ =
RTCVideoDecoder::Create(webrtc::kVideoCodecVP8, mock_gpu_factories_);
}
virtual void TearDown() OVERRIDE {
VLOG(2) << "TearDown";
- if (vda_thread_.IsRunning()) {
- RunUntilIdle(); // Wait until all callbascks complete.
- vda_loop_proxy_->DeleteSoon(FROM_HERE, rtc_decoder_.release());
- // Make sure the decoder is released before stopping the thread.
- RunUntilIdle();
- vda_thread_.Stop();
- } else {
- rtc_decoder_.reset();
- }
+ EXPECT_TRUE(vda_thread_.IsRunning());
+ RunUntilIdle(); // Wait until all callbascks complete.
+ vda_task_runner_->DeleteSoon(FROM_HERE, rtc_decoder_.release());
+ // Make sure the decoder is released before stopping the thread.
+ RunUntilIdle();
+ vda_thread_.Stop();
}
virtual int32_t Decoded(webrtc::I420VideoFrame& decoded_image) OVERRIDE {
VLOG(2) << "Decoded";
- EXPECT_EQ(vda_loop_proxy_, base::MessageLoopProxy::current());
+ EXPECT_EQ(vda_task_runner_, base::MessageLoopProxy::current());
return WEBRTC_VIDEO_CODEC_OK;
}
@@ -80,16 +75,17 @@ class RTCVideoDecoderTest : public ::testing::Test,
void NotifyResetDone() {
VLOG(2) << "NotifyResetDone";
- vda_loop_proxy_->PostTask(FROM_HERE,
- base::Bind(&RTCVideoDecoder::NotifyResetDone,
- base::Unretained(rtc_decoder_.get())));
+ vda_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&RTCVideoDecoder::NotifyResetDone,
+ base::Unretained(rtc_decoder_.get())));
}
void RunUntilIdle() {
VLOG(2) << "RunUntilIdle";
- vda_loop_proxy_->PostTask(FROM_HERE,
- base::Bind(&base::WaitableEvent::Signal,
- base::Unretained(&idle_waiter_)));
+ vda_task_runner_->PostTask(FROM_HERE,
+ base::Bind(&base::WaitableEvent::Signal,
+ base::Unretained(&idle_waiter_)));
idle_waiter_.Wait();
}
@@ -101,7 +97,7 @@ class RTCVideoDecoderTest : public ::testing::Test,
base::Thread vda_thread_;
private:
- scoped_refptr<base::MessageLoopProxy> vda_loop_proxy_;
+ scoped_refptr<base::SingleThreadTaskRunner> vda_task_runner_;
base::Lock lock_;
base::WaitableEvent idle_waiter_;
@@ -165,8 +161,6 @@ TEST_F(RTCVideoDecoderTest, InitDecodeAfterRelease) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, rtc_decoder_->Release());
}
-TEST_F(RTCVideoDecoderTest, VdaThreadStops) { vda_thread_.Stop(); }
-
TEST_F(RTCVideoDecoderTest, IsBufferAfterReset) {
EXPECT_TRUE(rtc_decoder_->IsBufferAfterReset(0, RTCVideoDecoder::ID_INVALID));
EXPECT_TRUE(rtc_decoder_->IsBufferAfterReset(RTCVideoDecoder::ID_LAST,
diff --git a/chromium/content/renderer/media/rtc_video_encoder.cc b/chromium/content/renderer/media/rtc_video_encoder.cc
index 4b54db2342d..22f17a17836 100644
--- a/chromium/content/renderer/media/rtc_video_encoder.cc
+++ b/chromium/content/renderer/media/rtc_video_encoder.cc
@@ -10,8 +10,8 @@
#include "base/memory/scoped_vector.h"
#include "base/message_loop/message_loop_proxy.h"
#include "base/metrics/histogram.h"
+#include "base/rand_util.h"
#include "base/synchronization/waitable_event.h"
-#include "content/renderer/media/renderer_gpu_video_accelerator_factories.h"
#include "media/base/bitstream_buffer.h"
#include "media/base/video_frame.h"
#include "media/base/video_util.h"
@@ -42,9 +42,8 @@ class RTCVideoEncoder::Impl
: public media::VideoEncodeAccelerator::Client,
public base::RefCountedThreadSafe<RTCVideoEncoder::Impl> {
public:
- Impl(
- const base::WeakPtr<RTCVideoEncoder>& weak_encoder,
- const scoped_refptr<RendererGpuVideoAcceleratorFactories>& gpu_factories);
+ Impl(const base::WeakPtr<RTCVideoEncoder>& weak_encoder,
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& gpu_factories);
// Create the VEA and call Initialize() on it. Called once per instantiation,
// and then the instance is bound forevermore to whichever thread made the
@@ -77,7 +76,6 @@ class RTCVideoEncoder::Impl
void Destroy();
// media::VideoEncodeAccelerator::Client implementation.
- virtual void NotifyInitializeDone() OVERRIDE;
virtual void RequireBitstreamBuffers(unsigned int input_count,
const gfx::Size& input_coded_size,
size_t output_buffer_size) OVERRIDE;
@@ -119,7 +117,7 @@ class RTCVideoEncoder::Impl
const scoped_refptr<base::MessageLoopProxy> encoder_message_loop_proxy_;
// Factory for creating VEAs, shared memory buffers, etc.
- const scoped_refptr<RendererGpuVideoAcceleratorFactories> gpu_factories_;
+ const scoped_refptr<media::GpuVideoAcceleratorFactories> gpu_factories_;
// webrtc::VideoEncoder expects InitEncode() and Encode() to be synchronous.
// Do this by waiting on the |async_waiter_| and returning the return value in
@@ -150,20 +148,30 @@ class RTCVideoEncoder::Impl
// we don't care about ordering.
std::vector<int> input_buffers_free_;
+ // The number of output buffers ready to be filled with output from the
+ // encoder.
+ int output_buffers_free_count_;
+
+ // 15 bits running index of the VP8 frames. See VP8 RTP spec for details.
+ uint16 picture_id_;
+
DISALLOW_COPY_AND_ASSIGN(Impl);
};
RTCVideoEncoder::Impl::Impl(
const base::WeakPtr<RTCVideoEncoder>& weak_encoder,
- const scoped_refptr<RendererGpuVideoAcceleratorFactories>& gpu_factories)
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& gpu_factories)
: weak_encoder_(weak_encoder),
encoder_message_loop_proxy_(base::MessageLoopProxy::current()),
gpu_factories_(gpu_factories),
async_waiter_(NULL),
async_retval_(NULL),
input_next_frame_(NULL),
- input_next_frame_keyframe_(false) {
+ input_next_frame_keyframe_(false),
+ output_buffers_free_count_(0) {
thread_checker_.DetachFromThread();
+ // Picture ID should start on a random number.
+ picture_id_ = static_cast<uint16_t>(base::RandInt(0, 0x7FFF));
}
void RTCVideoEncoder::Impl::CreateAndInitializeVEA(
@@ -183,14 +191,20 @@ void RTCVideoEncoder::Impl::CreateAndInitializeVEA(
return;
}
- video_encoder_ = gpu_factories_->CreateVideoEncodeAccelerator(this).Pass();
+ video_encoder_ = gpu_factories_->CreateVideoEncodeAccelerator().Pass();
if (!video_encoder_) {
NOTIFY_ERROR(media::VideoEncodeAccelerator::kPlatformFailureError);
return;
}
input_visible_size_ = input_visible_size;
- video_encoder_->Initialize(
- media::VideoFrame::I420, input_visible_size_, profile, bitrate * 1000);
+ if (!video_encoder_->Initialize(media::VideoFrame::I420,
+ input_visible_size_,
+ profile,
+ bitrate * 1000,
+ this)) {
+ NOTIFY_ERROR(media::VideoEncodeAccelerator::kInvalidArgumentError);
+ return;
+ }
}
void RTCVideoEncoder::Impl::Enqueue(const webrtc::I420VideoFrame* input_frame,
@@ -202,6 +216,29 @@ void RTCVideoEncoder::Impl::Enqueue(const webrtc::I420VideoFrame* input_frame,
DCHECK(!input_next_frame_);
RegisterAsyncWaiter(async_waiter, async_retval);
+ // If there are no free input and output buffers, drop the frame to avoid a
+ // deadlock. If there is a free input buffer, EncodeOneFrame will run and
+ // unblock Encode(). If there are no free input buffers but there is a free
+ // output buffer, EncodeFrameFinished will be called later to unblock
+ // Encode().
+ //
+ // The caller of Encode() holds a webrtc lock. The deadlock happens when:
+ // (1) Encode() is waiting for the frame to be encoded in EncodeOneFrame().
+ // (2) There are no free input buffers and they cannot be freed because
+ // the encoder has no output buffers.
+ // (3) Output buffers cannot be freed because ReturnEncodedImage is queued
+ // on libjingle worker thread to be run. But the worker thread is waiting
+ // for the same webrtc lock held by the caller of Encode().
+ //
+ // Dropping a frame is fine. The encoder has been filled with all input
+ // buffers. Returning an error in Encode() is not fatal and WebRTC will just
+ // continue. If this is a key frame, WebRTC will request a key frame again.
+ // Besides, webrtc will drop a frame if Encode() blocks too long.
+ if (input_buffers_free_.empty() && output_buffers_free_count_ == 0) {
+ DVLOG(2) << "Run out of input and output buffers. Drop the frame.";
+ SignalAsyncWaiter(WEBRTC_VIDEO_CODEC_ERROR);
+ return;
+ }
input_next_frame_ = input_frame;
input_next_frame_keyframe_ = force_keyframe;
@@ -219,6 +256,7 @@ void RTCVideoEncoder::Impl::UseOutputBitstreamBufferId(
bitstream_buffer_id,
output_buffers_[bitstream_buffer_id]->handle(),
output_buffers_[bitstream_buffer_id]->mapped_size()));
+ output_buffers_free_count_++;
}
}
@@ -241,13 +279,7 @@ void RTCVideoEncoder::Impl::RequestEncodingParametersChange(uint32 bitrate,
void RTCVideoEncoder::Impl::Destroy() {
DVLOG(3) << "Impl::Destroy()";
DCHECK(thread_checker_.CalledOnValidThread());
- if (video_encoder_)
- video_encoder_.release()->Destroy();
-}
-
-void RTCVideoEncoder::Impl::NotifyInitializeDone() {
- DVLOG(3) << "Impl::NotifyInitializeDone()";
- DCHECK(thread_checker_.CalledOnValidThread());
+ video_encoder_.reset();
}
void RTCVideoEncoder::Impl::RequireBitstreamBuffers(
@@ -294,6 +326,7 @@ void RTCVideoEncoder::Impl::RequireBitstreamBuffers(
for (size_t i = 0; i < output_buffers_.size(); ++i) {
video_encoder_->UseOutputBitstreamBuffer(media::BitstreamBuffer(
i, output_buffers_[i]->handle(), output_buffers_[i]->mapped_size()));
+ output_buffers_free_count_++;
}
SignalAsyncWaiter(WEBRTC_VIDEO_CODEC_OK);
}
@@ -321,10 +354,15 @@ void RTCVideoEncoder::Impl::BitstreamBufferReady(int32 bitstream_buffer_id,
NOTIFY_ERROR(media::VideoEncodeAccelerator::kPlatformFailureError);
return;
}
+ output_buffers_free_count_--;
// Use webrtc timestamps to ensure correct RTP sender behavior.
- // TODO(hshi): obtain timestamp from the capturer, see crbug.com/284783.
- const int64 capture_time_ms = webrtc::TickTime::MillisecondTimestamp();
+ // TODO(hshi): obtain timestamp from the capturer, see crbug.com/350106.
+ const int64 capture_time_us = webrtc::TickTime::MicrosecondTimestamp();
+
+ // Derive the capture time (in ms) and RTP timestamp (in 90KHz ticks).
+ int64 capture_time_ms = capture_time_us / 1000;
+ uint32_t rtp_timestamp = static_cast<uint32_t>(capture_time_us * 90 / 1000);
scoped_ptr<webrtc::EncodedImage> image(new webrtc::EncodedImage(
reinterpret_cast<uint8_t*>(output_buffer->memory()),
@@ -332,8 +370,7 @@ void RTCVideoEncoder::Impl::BitstreamBufferReady(int32 bitstream_buffer_id,
output_buffer->mapped_size()));
image->_encodedWidth = input_visible_size_.width();
image->_encodedHeight = input_visible_size_.height();
- // Convert capture time to 90 kHz RTP timestamp.
- image->_timeStamp = static_cast<uint32_t>(90 * capture_time_ms);
+ image->_timeStamp = rtp_timestamp;
image->capture_time_ms_ = capture_time_ms;
image->_frameType = (key_frame ? webrtc::kKeyFrame : webrtc::kDeltaFrame);
image->_completeFrame = true;
@@ -343,7 +380,10 @@ void RTCVideoEncoder::Impl::BitstreamBufferReady(int32 bitstream_buffer_id,
base::Bind(&RTCVideoEncoder::ReturnEncodedImage,
weak_encoder_,
base::Passed(&image),
- bitstream_buffer_id));
+ bitstream_buffer_id,
+ picture_id_));
+ // Picture ID must wrap after reaching the maximum.
+ picture_id_ = (picture_id_ + 1) & 0x7FFF;
}
void RTCVideoEncoder::Impl::NotifyError(
@@ -359,8 +399,7 @@ void RTCVideoEncoder::Impl::NotifyError(
retval = WEBRTC_VIDEO_CODEC_ERROR;
}
- if (video_encoder_)
- video_encoder_.release()->Destroy();
+ video_encoder_.reset();
if (async_waiter_) {
SignalAsyncWaiter(retval);
@@ -471,13 +510,13 @@ void RTCVideoEncoder::Impl::SignalAsyncWaiter(int32_t retval) {
RTCVideoEncoder::RTCVideoEncoder(
webrtc::VideoCodecType type,
media::VideoCodecProfile profile,
- const scoped_refptr<RendererGpuVideoAcceleratorFactories>& gpu_factories)
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& gpu_factories)
: video_codec_type_(type),
video_codec_profile_(profile),
gpu_factories_(gpu_factories),
encoded_image_callback_(NULL),
impl_status_(WEBRTC_VIDEO_CODEC_UNINITIALIZED),
- weak_this_factory_(this) {
+ weak_factory_(this) {
DVLOG(1) << "RTCVideoEncoder(): profile=" << profile;
}
@@ -497,11 +536,11 @@ int32_t RTCVideoEncoder::InitEncode(const webrtc::VideoCodec* codec_settings,
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(!impl_);
- weak_this_factory_.InvalidateWeakPtrs();
- impl_ = new Impl(weak_this_factory_.GetWeakPtr(), gpu_factories_);
+ weak_factory_.InvalidateWeakPtrs();
+ impl_ = new Impl(weak_factory_.GetWeakPtr(), gpu_factories_);
base::WaitableEvent initialization_waiter(true, false);
int32_t initialization_retval = WEBRTC_VIDEO_CODEC_UNINITIALIZED;
- gpu_factories_->GetMessageLoop()->PostTask(
+ gpu_factories_->GetTaskRunner()->PostTask(
FROM_HERE,
base::Bind(&RTCVideoEncoder::Impl::CreateAndInitializeVEA,
impl_,
@@ -529,14 +568,16 @@ int32_t RTCVideoEncoder::Encode(
return impl_status_;
}
+ bool want_key_frame = frame_types && frame_types->size() &&
+ frame_types->front() == webrtc::kKeyFrame;
base::WaitableEvent encode_waiter(true, false);
int32_t encode_retval = WEBRTC_VIDEO_CODEC_UNINITIALIZED;
- gpu_factories_->GetMessageLoop()->PostTask(
+ gpu_factories_->GetTaskRunner()->PostTask(
FROM_HERE,
base::Bind(&RTCVideoEncoder::Impl::Enqueue,
impl_,
&input_image,
- (frame_types->front() == webrtc::kKeyFrame),
+ want_key_frame,
&encode_waiter,
&encode_retval));
@@ -563,14 +604,11 @@ int32_t RTCVideoEncoder::Release() {
DVLOG(3) << "Release()";
DCHECK(thread_checker_.CalledOnValidThread());
- // Reset the gpu_factory_, in case we reuse this encoder.
- gpu_factories_->Abort();
- gpu_factories_ = gpu_factories_->Clone();
if (impl_) {
- gpu_factories_->GetMessageLoop()->PostTask(
+ gpu_factories_->GetTaskRunner()->PostTask(
FROM_HERE, base::Bind(&RTCVideoEncoder::Impl::Destroy, impl_));
impl_ = NULL;
- weak_this_factory_.InvalidateWeakPtrs();
+ weak_factory_.InvalidateWeakPtrs();
impl_status_ = WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
return WEBRTC_VIDEO_CODEC_OK;
@@ -593,7 +631,7 @@ int32_t RTCVideoEncoder::SetRates(uint32_t new_bit_rate, uint32_t frame_rate) {
return impl_status_;
}
- gpu_factories_->GetMessageLoop()->PostTask(
+ gpu_factories_->GetTaskRunner()->PostTask(
FROM_HERE,
base::Bind(&RTCVideoEncoder::Impl::RequestEncodingParametersChange,
impl_,
@@ -603,10 +641,12 @@ int32_t RTCVideoEncoder::SetRates(uint32_t new_bit_rate, uint32_t frame_rate) {
}
void RTCVideoEncoder::ReturnEncodedImage(scoped_ptr<webrtc::EncodedImage> image,
- int32 bitstream_buffer_id) {
+ int32 bitstream_buffer_id,
+ uint16 picture_id) {
DCHECK(thread_checker_.CalledOnValidThread());
DVLOG(3) << "ReturnEncodedImage(): "
- "bitstream_buffer_id=" << bitstream_buffer_id;
+ << "bitstream_buffer_id=" << bitstream_buffer_id
+ << ", picture_id=" << picture_id;
if (!encoded_image_callback_)
return;
@@ -615,7 +655,7 @@ void RTCVideoEncoder::ReturnEncodedImage(scoped_ptr<webrtc::EncodedImage> image,
memset(&info, 0, sizeof(info));
info.codecType = video_codec_type_;
if (video_codec_type_ == webrtc::kVideoCodecVP8) {
- info.codecSpecific.VP8.pictureId = -1;
+ info.codecSpecific.VP8.pictureId = picture_id;
info.codecSpecific.VP8.tl0PicIdx = -1;
info.codecSpecific.VP8.keyIdx = -1;
}
@@ -637,7 +677,7 @@ void RTCVideoEncoder::ReturnEncodedImage(scoped_ptr<webrtc::EncodedImage> image,
// The call through webrtc::EncodedImageCallback is synchronous, so we can
// immediately recycle the output buffer back to the Impl.
- gpu_factories_->GetMessageLoop()->PostTask(
+ gpu_factories_->GetTaskRunner()->PostTask(
FROM_HERE,
base::Bind(&RTCVideoEncoder::Impl::UseOutputBitstreamBufferId,
impl_,
@@ -649,7 +689,7 @@ void RTCVideoEncoder::NotifyError(int32_t error) {
DVLOG(1) << "NotifyError(): error=" << error;
impl_status_ = error;
- gpu_factories_->GetMessageLoop()->PostTask(
+ gpu_factories_->GetTaskRunner()->PostTask(
FROM_HERE, base::Bind(&RTCVideoEncoder::Impl::Destroy, impl_));
impl_ = NULL;
}
@@ -660,7 +700,7 @@ void RTCVideoEncoder::RecordInitEncodeUMA(int32_t init_retval) {
if (init_retval == WEBRTC_VIDEO_CODEC_OK) {
UMA_HISTOGRAM_ENUMERATION("Media.RTCVideoEncoderProfile",
video_codec_profile_,
- media::VIDEO_CODEC_PROFILE_MAX);
+ media::VIDEO_CODEC_PROFILE_MAX + 1);
}
}
diff --git a/chromium/content/renderer/media/rtc_video_encoder.h b/chromium/content/renderer/media/rtc_video_encoder.h
index 4e36ece1c1d..eb1677ea5f6 100644
--- a/chromium/content/renderer/media/rtc_video_encoder.h
+++ b/chromium/content/renderer/media/rtc_video_encoder.h
@@ -23,9 +23,13 @@ class MessageLoopProxy;
} // namespace base
-namespace content {
+namespace media {
+
+class GpuVideoAcceleratorFactories;
-class RendererGpuVideoAcceleratorFactories;
+} // namespace media
+
+namespace content {
// RTCVideoEncoder uses a media::VideoEncodeAccelerator to implement a
// webrtc::VideoEncoder class for WebRTC. Internally, VEA methods are
@@ -41,7 +45,7 @@ class CONTENT_EXPORT RTCVideoEncoder
RTCVideoEncoder(
webrtc::VideoCodecType type,
media::VideoCodecProfile profile,
- const scoped_refptr<RendererGpuVideoAcceleratorFactories>& gpu_factories);
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& gpu_factories);
virtual ~RTCVideoEncoder();
// webrtc::VideoEncoder implementation. Tasks are posted to |impl_| using the
@@ -65,7 +69,8 @@ class CONTENT_EXPORT RTCVideoEncoder
// Return an encoded output buffer to WebRTC.
void ReturnEncodedImage(scoped_ptr<webrtc::EncodedImage> image,
- int32 bitstream_buffer_id);
+ int32 bitstream_buffer_id,
+ uint16 picture_id);
void NotifyError(int32_t error);
@@ -80,7 +85,7 @@ class CONTENT_EXPORT RTCVideoEncoder
const media::VideoCodecProfile video_codec_profile_;
// Factory for creating VEAs, shared memory buffers, etc.
- scoped_refptr<RendererGpuVideoAcceleratorFactories> gpu_factories_;
+ scoped_refptr<media::GpuVideoAcceleratorFactories> gpu_factories_;
// webrtc::VideoEncoder encode complete callback.
webrtc::EncodedImageCallback* encoded_image_callback_;
@@ -96,7 +101,8 @@ class CONTENT_EXPORT RTCVideoEncoder
// Weak pointer factory for posting back VEA::Client notifications to
// RTCVideoEncoder.
- base::WeakPtrFactory<RTCVideoEncoder> weak_this_factory_;
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<RTCVideoEncoder> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(RTCVideoEncoder);
};
diff --git a/chromium/content/renderer/media/rtc_video_encoder_factory.cc b/chromium/content/renderer/media/rtc_video_encoder_factory.cc
index 3ff42728df2..777d69f83c9 100644
--- a/chromium/content/renderer/media/rtc_video_encoder_factory.cc
+++ b/chromium/content/renderer/media/rtc_video_encoder_factory.cc
@@ -5,8 +5,8 @@
#include "content/renderer/media/rtc_video_encoder_factory.h"
#include "content/common/gpu/client/gpu_video_encode_accelerator_host.h"
-#include "content/renderer/media/renderer_gpu_video_accelerator_factories.h"
#include "content/renderer/media/rtc_video_encoder.h"
+#include "media/filters/gpu_video_accelerator_factories.h"
#include "media/video/video_encode_accelerator.h"
namespace content {
@@ -59,7 +59,7 @@ media::VideoCodecProfile WebRTCCodecToVideoCodecProfile(
} // anonymous namespace
RTCVideoEncoderFactory::RTCVideoEncoderFactory(
- const scoped_refptr<RendererGpuVideoAcceleratorFactories>& gpu_factories)
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& gpu_factories)
: gpu_factories_(gpu_factories) {
// Query media::VideoEncodeAccelerator (statically) for our supported codecs.
std::vector<media::VideoEncodeAccelerator::SupportedProfile> profiles =
@@ -84,11 +84,8 @@ webrtc::VideoEncoder* RTCVideoEncoderFactory::CreateVideoEncoder(
}
if (!found)
return NULL;
- // GpuVideoAcceleratorFactories is not thread safe. It cannot be shared
- // by different encoders. Since we aren't running on the child thread and
- // cannot create a new factory, clone one instead.
return new RTCVideoEncoder(
- type, WebRTCCodecToVideoCodecProfile(type), gpu_factories_->Clone());
+ type, WebRTCCodecToVideoCodecProfile(type), gpu_factories_);
}
void RTCVideoEncoderFactory::AddObserver(Observer* observer) {
diff --git a/chromium/content/renderer/media/rtc_video_encoder_factory.h b/chromium/content/renderer/media/rtc_video_encoder_factory.h
index b07ccda0043..b9971bfeb0a 100644
--- a/chromium/content/renderer/media/rtc_video_encoder_factory.h
+++ b/chromium/content/renderer/media/rtc_video_encoder_factory.h
@@ -12,9 +12,13 @@
#include "content/common/content_export.h"
#include "third_party/libjingle/source/talk/media/webrtc/webrtcvideoencoderfactory.h"
-namespace content {
+namespace media {
+
+class GpuVideoAcceleratorFactories;
-class RendererGpuVideoAcceleratorFactories;
+} // namespace media
+
+namespace content {
// This class creates RTCVideoEncoder instances (each wrapping a
// media::VideoEncodeAccelerator) on behalf of the WebRTC stack.
@@ -22,7 +26,7 @@ class CONTENT_EXPORT RTCVideoEncoderFactory
: NON_EXPORTED_BASE(public cricket::WebRtcVideoEncoderFactory) {
public:
explicit RTCVideoEncoderFactory(
- const scoped_refptr<RendererGpuVideoAcceleratorFactories>& gpu_factories);
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& gpu_factories);
virtual ~RTCVideoEncoderFactory();
// cricket::WebRtcVideoEncoderFactory implementation.
@@ -34,7 +38,7 @@ class CONTENT_EXPORT RTCVideoEncoderFactory
virtual void DestroyVideoEncoder(webrtc::VideoEncoder* encoder) OVERRIDE;
private:
- const scoped_refptr<RendererGpuVideoAcceleratorFactories> gpu_factories_;
+ const scoped_refptr<media::GpuVideoAcceleratorFactories> gpu_factories_;
// Codec support list of cricket::WebRtcVideoEncoderFactory::VideoCodec
// instances.
diff --git a/chromium/content/renderer/media/rtc_video_renderer.cc b/chromium/content/renderer/media/rtc_video_renderer.cc
index 013fcad8dfb..c45a24a2f68 100644
--- a/chromium/content/renderer/media/rtc_video_renderer.cc
+++ b/chromium/content/renderer/media/rtc_video_renderer.cc
@@ -6,9 +6,12 @@
#include "base/debug/trace_event.h"
#include "base/message_loop/message_loop_proxy.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/video_frame.h"
#include "media/base/video_util.h"
+const int kMinFrameSize = 2;
+
namespace content {
RTCVideoRenderer::RTCVideoRenderer(
@@ -19,8 +22,9 @@ RTCVideoRenderer::RTCVideoRenderer(
repaint_cb_(repaint_cb),
message_loop_proxy_(base::MessageLoopProxy::current()),
state_(STOPPED),
- first_frame_rendered_(false),
- video_track_(video_track) {
+ frame_size_(kMinFrameSize, kMinFrameSize),
+ video_track_(video_track),
+ weak_factory_(this) {
}
RTCVideoRenderer::~RTCVideoRenderer() {
@@ -29,15 +33,20 @@ RTCVideoRenderer::~RTCVideoRenderer() {
void RTCVideoRenderer::Start() {
DCHECK(message_loop_proxy_->BelongsToCurrentThread());
DCHECK_EQ(state_, STOPPED);
- DCHECK(!first_frame_rendered_);
- AddToVideoTrack(this, video_track_);
+ AddToVideoTrack(
+ this,
+ media::BindToCurrentLoop(
+ base::Bind(
+ &RTCVideoRenderer::OnVideoFrame,
+ weak_factory_.GetWeakPtr())),
+ video_track_);
state_ = STARTED;
if (video_track_.source().readyState() ==
blink::WebMediaStreamSource::ReadyStateEnded ||
!video_track_.isEnabled()) {
- MaybeRenderSignalingFrame();
+ RenderSignalingFrame();
}
}
@@ -45,8 +54,10 @@ void RTCVideoRenderer::Stop() {
DCHECK(message_loop_proxy_->BelongsToCurrentThread());
DCHECK(state_ == STARTED || state_ == PAUSED);
RemoveFromVideoTrack(this, video_track_);
+ weak_factory_.InvalidateWeakPtrs();
state_ = STOPPED;
- first_frame_rendered_ = false;
+ frame_size_.set_width(kMinFrameSize);
+ frame_size_.set_height(kMinFrameSize);
}
void RTCVideoRenderer::Play() {
@@ -67,43 +78,44 @@ void RTCVideoRenderer::OnReadyStateChanged(
blink::WebMediaStreamSource::ReadyState state) {
DCHECK(message_loop_proxy_->BelongsToCurrentThread());
if (state == blink::WebMediaStreamSource::ReadyStateEnded)
- MaybeRenderSignalingFrame();
+ RenderSignalingFrame();
}
void RTCVideoRenderer::OnEnabledChanged(bool enabled) {
DCHECK(message_loop_proxy_->BelongsToCurrentThread());
if (!enabled)
- MaybeRenderSignalingFrame();
+ RenderSignalingFrame();
}
void RTCVideoRenderer::OnVideoFrame(
- const scoped_refptr<media::VideoFrame>& frame) {
+ const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format,
+ const base::TimeTicks& estimated_capture_time) {
DCHECK(message_loop_proxy_->BelongsToCurrentThread());
if (state_ != STARTED) {
return;
}
+ frame_size_ = frame->natural_size();
+
TRACE_EVENT_INSTANT1("rtc_video_renderer",
"OnVideoFrame",
TRACE_EVENT_SCOPE_THREAD,
"timestamp",
- frame->GetTimestamp().InMilliseconds());
+ frame->timestamp().InMilliseconds());
repaint_cb_.Run(frame);
- first_frame_rendered_ = true;
}
-void RTCVideoRenderer::MaybeRenderSignalingFrame() {
- // Render a small black frame if no frame has been rendered.
+void RTCVideoRenderer::RenderSignalingFrame() {
// This is necessary to make sure audio can play if the video tag src is
// a MediaStream video track that has been rejected, ended or disabled.
- if (first_frame_rendered_)
- return;
-
- const int kMinFrameSize = 2;
- const gfx::Size size(kMinFrameSize, kMinFrameSize);
+ // It also ensure that the renderer don't hold a reference to a real video
+ // frame if no more frames are provided. This is since there might be a
+ // finite number of available buffers. E.g, video that
+ // originates from a video camera.
scoped_refptr<media::VideoFrame> video_frame =
- media::VideoFrame::CreateBlackFrame(size);
- OnVideoFrame(video_frame);
+ media::VideoFrame::CreateBlackFrame(frame_size_);
+ OnVideoFrame(video_frame, media::VideoCaptureFormat(), base::TimeTicks());
}
} // namespace content
diff --git a/chromium/content/renderer/media/rtc_video_renderer.h b/chromium/content/renderer/media/rtc_video_renderer.h
index b72a5860ee8..e205b6b5f8d 100644
--- a/chromium/content/renderer/media/rtc_video_renderer.h
+++ b/chromium/content/renderer/media/rtc_video_renderer.h
@@ -6,7 +6,9 @@
#define CONTENT_RENDERER_MEDIA_RTC_VIDEO_RENDERER_H_
#include "base/callback.h"
+#include "base/memory/weak_ptr.h"
#include "content/common/content_export.h"
+#include "content/common/media/video_capture.h"
#include "content/public/renderer/media_stream_video_sink.h"
#include "content/renderer/media/video_frame_provider.h"
#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
@@ -52,21 +54,24 @@ class CONTENT_EXPORT RTCVideoRenderer
STOPPED,
};
+ void OnVideoFrame(const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format,
+ const base::TimeTicks& estimated_capture_time);
+
// VideoTrackSink implementation. Called on the main thread.
- virtual void OnVideoFrame(
- const scoped_refptr<media::VideoFrame>& frame) OVERRIDE;
virtual void OnReadyStateChanged(
blink::WebMediaStreamSource::ReadyState state) OVERRIDE;
virtual void OnEnabledChanged(bool enabled) OVERRIDE;
- void MaybeRenderSignalingFrame();
+ void RenderSignalingFrame();
base::Closure error_cb_;
RepaintCB repaint_cb_;
scoped_refptr<base::MessageLoopProxy> message_loop_proxy_;
State state_;
- bool first_frame_rendered_;
+ gfx::Size frame_size_;
blink::WebMediaStreamTrack video_track_;
+ base::WeakPtrFactory<RTCVideoRenderer> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(RTCVideoRenderer);
};
diff --git a/chromium/content/renderer/media/texttrack_impl.cc b/chromium/content/renderer/media/texttrack_impl.cc
index 3df473b593a..1e24f9bad65 100644
--- a/chromium/content/renderer/media/texttrack_impl.cc
+++ b/chromium/content/renderer/media/texttrack_impl.cc
@@ -8,7 +8,7 @@
#include "base/location.h"
#include "base/message_loop/message_loop_proxy.h"
#include "content/renderer/media/webinbandtexttrack_impl.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
#include "third_party/WebKit/public/platform/WebInbandTextTrackClient.h"
#include "third_party/WebKit/public/platform/WebMediaPlayerClient.h"
diff --git a/chromium/content/renderer/media/video_capture_impl.cc b/chromium/content/renderer/media/video_capture_impl.cc
index 30b43ddfcf4..a9d6240ea1c 100644
--- a/chromium/content/renderer/media/video_capture_impl.cc
+++ b/chromium/content/renderer/media/video_capture_impl.cc
@@ -1,6 +1,13 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+//
+// Notes about usage of this object by VideoCaptureImplManager.
+//
+// VideoCaptureImplManager access this object by using a Unretained()
+// binding and tasks on the IO thread. It is then important that
+// VideoCaptureImpl never post task to itself. All operations must be
+// synchronous.
#include "content/renderer/media/video_capture_impl.h"
@@ -8,7 +15,7 @@
#include "base/stl_util.h"
#include "content/child/child_process.h"
#include "content/common/media/video_capture_messages.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/limits.h"
#include "media/base/video_frame.h"
@@ -32,143 +39,81 @@ class VideoCaptureImpl::ClientBuffer
DISALLOW_COPY_AND_ASSIGN(ClientBuffer);
};
-bool VideoCaptureImpl::CaptureStarted() {
- return state_ == VIDEO_CAPTURE_STATE_STARTED;
-}
-
-int VideoCaptureImpl::CaptureFrameRate() {
- return last_frame_format_.frame_rate;
-}
+VideoCaptureImpl::ClientInfo::ClientInfo() {}
+VideoCaptureImpl::ClientInfo::~ClientInfo() {}
VideoCaptureImpl::VideoCaptureImpl(
const media::VideoCaptureSessionId session_id,
- base::MessageLoopProxy* capture_message_loop_proxy,
VideoCaptureMessageFilter* filter)
- : VideoCapture(),
- message_filter_(filter),
- capture_message_loop_proxy_(capture_message_loop_proxy),
- io_message_loop_proxy_(ChildProcess::current()->io_message_loop_proxy()),
+ : message_filter_(filter),
device_id_(0),
session_id_(session_id),
suspended_(false),
state_(VIDEO_CAPTURE_STATE_STOPPED),
- weak_this_factory_(this) {
+ weak_factory_(this) {
DCHECK(filter);
+ thread_checker_.DetachFromThread();
}
-VideoCaptureImpl::~VideoCaptureImpl() {}
-
-void VideoCaptureImpl::Init() {
- if (!io_message_loop_proxy_->BelongsToCurrentThread()) {
- io_message_loop_proxy_->PostTask(FROM_HERE,
- base::Bind(&VideoCaptureImpl::AddDelegateOnIOThread,
- base::Unretained(this)));
- } else {
- AddDelegateOnIOThread();
- }
-}
-
-void VideoCaptureImpl::DeInit(base::Closure task) {
- capture_message_loop_proxy_->PostTask(FROM_HERE,
- base::Bind(&VideoCaptureImpl::DoDeInitOnCaptureThread,
- base::Unretained(this), task));
-}
-
-void VideoCaptureImpl::StartCapture(
- media::VideoCapture::EventHandler* handler,
- const media::VideoCaptureParams& params) {
- capture_message_loop_proxy_->PostTask(FROM_HERE,
- base::Bind(&VideoCaptureImpl::DoStartCaptureOnCaptureThread,
- base::Unretained(this), handler, params));
-}
-
-void VideoCaptureImpl::StopCapture(media::VideoCapture::EventHandler* handler) {
- capture_message_loop_proxy_->PostTask(FROM_HERE,
- base::Bind(&VideoCaptureImpl::DoStopCaptureOnCaptureThread,
- base::Unretained(this), handler));
-}
-
-void VideoCaptureImpl::OnBufferCreated(
- base::SharedMemoryHandle handle,
- int length, int buffer_id) {
- capture_message_loop_proxy_->PostTask(FROM_HERE,
- base::Bind(&VideoCaptureImpl::DoBufferCreatedOnCaptureThread,
- base::Unretained(this), handle, length, buffer_id));
-}
-
-void VideoCaptureImpl::OnBufferDestroyed(int buffer_id) {
- capture_message_loop_proxy_->PostTask(FROM_HERE,
- base::Bind(&VideoCaptureImpl::DoBufferDestroyedOnCaptureThread,
- base::Unretained(this), buffer_id));
-}
-
-void VideoCaptureImpl::OnBufferReceived(
- int buffer_id,
- base::Time timestamp,
- const media::VideoCaptureFormat& format) {
- capture_message_loop_proxy_->PostTask(FROM_HERE,
- base::Bind(&VideoCaptureImpl::DoBufferReceivedOnCaptureThread,
- base::Unretained(this), buffer_id, timestamp, format));
+VideoCaptureImpl::~VideoCaptureImpl() {
+ DCHECK(thread_checker_.CalledOnValidThread());
}
-void VideoCaptureImpl::OnStateChanged(VideoCaptureState state) {
- capture_message_loop_proxy_->PostTask(FROM_HERE,
- base::Bind(&VideoCaptureImpl::DoStateChangedOnCaptureThread,
- base::Unretained(this), state));
-}
-
-void VideoCaptureImpl::OnDelegateAdded(int32 device_id) {
- capture_message_loop_proxy_->PostTask(FROM_HERE,
- base::Bind(&VideoCaptureImpl::DoDelegateAddedOnCaptureThread,
- base::Unretained(this), device_id));
-}
-
-void VideoCaptureImpl::SuspendCapture(bool suspend) {
- capture_message_loop_proxy_->PostTask(FROM_HERE,
- base::Bind(&VideoCaptureImpl::DoSuspendCaptureOnCaptureThread,
- base::Unretained(this), suspend));
+void VideoCaptureImpl::Init() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ message_filter_->AddDelegate(this);
}
-void VideoCaptureImpl::DoDeInitOnCaptureThread(base::Closure task) {
+void VideoCaptureImpl::DeInit() {
+ DCHECK(thread_checker_.CalledOnValidThread());
if (state_ == VIDEO_CAPTURE_STATE_STARTED)
Send(new VideoCaptureHostMsg_Stop(device_id_));
+ message_filter_->RemoveDelegate(this);
+}
- io_message_loop_proxy_->PostTask(FROM_HERE,
- base::Bind(&VideoCaptureImpl::RemoveDelegateOnIOThread,
- base::Unretained(this), task));
+void VideoCaptureImpl::SuspendCapture(bool suspend) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ suspended_ = suspend;
}
-void VideoCaptureImpl::DoStartCaptureOnCaptureThread(
- media::VideoCapture::EventHandler* handler,
- const media::VideoCaptureParams& params) {
- DCHECK(capture_message_loop_proxy_->BelongsToCurrentThread());
+void VideoCaptureImpl::StartCapture(
+ int client_id,
+ const media::VideoCaptureParams& params,
+ const VideoCaptureStateUpdateCB& state_update_cb,
+ const VideoCaptureDeliverFrameCB& deliver_frame_cb) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ ClientInfo client_info;
+ client_info.params = params;
+ client_info.state_update_cb = state_update_cb;
+ client_info.deliver_frame_cb = deliver_frame_cb;
if (state_ == VIDEO_CAPTURE_STATE_ERROR) {
- handler->OnError(this, 1);
- handler->OnRemoved(this);
- } else if ((clients_pending_on_filter_.find(handler) !=
- clients_pending_on_filter_.end()) ||
- (clients_pending_on_restart_.find(handler) !=
- clients_pending_on_restart_.end()) ||
- clients_.find(handler) != clients_.end() ) {
- // This client has started.
+ state_update_cb.Run(VIDEO_CAPTURE_STATE_ERROR);
+ } else if (clients_pending_on_filter_.count(client_id) ||
+ clients_pending_on_restart_.count(client_id) ||
+ clients_.count(client_id)) {
+ LOG(FATAL) << "This client has already started.";
} else if (!device_id_) {
- clients_pending_on_filter_[handler] = params;
+ clients_pending_on_filter_[client_id] = client_info;
} else {
- handler->OnStarted(this);
+ // Note: |state_| might not be started at this point. But we tell
+ // client that we have started.
+ state_update_cb.Run(VIDEO_CAPTURE_STATE_STARTED);
if (state_ == VIDEO_CAPTURE_STATE_STARTED) {
- clients_[handler] = params;
+ clients_[client_id] = client_info;
+ // TODO(sheu): Allowing resolution change will require that all
+ // outstanding clients of a capture session support resolution change.
+ DCHECK_EQ(params_.allow_resolution_change,
+ params.allow_resolution_change);
} else if (state_ == VIDEO_CAPTURE_STATE_STOPPING) {
- clients_pending_on_restart_[handler] = params;
+ clients_pending_on_restart_[client_id] = client_info;
DVLOG(1) << "StartCapture: Got new resolution "
<< params.requested_format.frame_size.ToString()
<< " during stopping.";
} else {
- // TODO(sheu): Allowing resolution change will require that all
- // outstanding clients of a capture session support resolution change.
- DCHECK(!params.allow_resolution_change);
- clients_[handler] = params;
- DCHECK_EQ(1ul, clients_.size());
+ clients_[client_id] = client_info;
+ if (state_ == VIDEO_CAPTURE_STATE_STARTED)
+ return;
params_ = params;
if (params_.requested_format.frame_rate >
media::limits::kMaxFramesPerSecond) {
@@ -177,35 +122,54 @@ void VideoCaptureImpl::DoStartCaptureOnCaptureThread(
}
DVLOG(1) << "StartCapture: starting with first resolution "
<< params_.requested_format.frame_size.ToString();
-
+ first_frame_timestamp_ = base::TimeTicks();
StartCaptureInternal();
}
}
}
-void VideoCaptureImpl::DoStopCaptureOnCaptureThread(
- media::VideoCapture::EventHandler* handler) {
- DCHECK(capture_message_loop_proxy_->BelongsToCurrentThread());
+void VideoCaptureImpl::StopCapture(int client_id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
- // A handler can be in only one client list.
- // If this handler is in any client list, we can just remove it from
+ // A client ID can be in only one client list.
+ // If this ID is in any client list, we can just remove it from
// that client list and don't have to run the other following RemoveClient().
- RemoveClient(handler, &clients_pending_on_filter_) ||
- RemoveClient(handler, &clients_pending_on_restart_) ||
- RemoveClient(handler, &clients_);
+ if (!RemoveClient(client_id, &clients_pending_on_filter_)) {
+ if (!RemoveClient(client_id, &clients_pending_on_restart_)) {
+ RemoveClient(client_id, &clients_);
+ }
+ }
if (clients_.empty()) {
DVLOG(1) << "StopCapture: No more client, stopping ...";
StopDevice();
client_buffers_.clear();
- weak_this_factory_.InvalidateWeakPtrs();
+ weak_factory_.InvalidateWeakPtrs();
}
}
-void VideoCaptureImpl::DoBufferCreatedOnCaptureThread(
+void VideoCaptureImpl::GetDeviceSupportedFormats(
+ const VideoCaptureDeviceFormatsCB& callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ device_formats_cb_queue_.push_back(callback);
+ if (device_formats_cb_queue_.size() == 1)
+ Send(new VideoCaptureHostMsg_GetDeviceSupportedFormats(device_id_,
+ session_id_));
+}
+
+void VideoCaptureImpl::GetDeviceFormatsInUse(
+ const VideoCaptureDeviceFormatsCB& callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ device_formats_in_use_cb_queue_.push_back(callback);
+ if (device_formats_in_use_cb_queue_.size() == 1)
+ Send(
+ new VideoCaptureHostMsg_GetDeviceFormatsInUse(device_id_, session_id_));
+}
+
+void VideoCaptureImpl::OnBufferCreated(
base::SharedMemoryHandle handle,
int length, int buffer_id) {
- DCHECK(capture_message_loop_proxy_->BelongsToCurrentThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
// In case client calls StopCapture before the arrival of created buffer,
// just close this buffer and return.
@@ -216,7 +180,7 @@ void VideoCaptureImpl::DoBufferCreatedOnCaptureThread(
scoped_ptr<base::SharedMemory> shm(new base::SharedMemory(handle, false));
if (!shm->Map(length)) {
- DLOG(ERROR) << "DoBufferCreatedOnCaptureThread: Map() failed.";
+ DLOG(ERROR) << "OnBufferCreated: Map failed.";
return;
}
@@ -228,8 +192,8 @@ void VideoCaptureImpl::DoBufferCreatedOnCaptureThread(
DCHECK(inserted);
}
-void VideoCaptureImpl::DoBufferDestroyedOnCaptureThread(int buffer_id) {
- DCHECK(capture_message_loop_proxy_->BelongsToCurrentThread());
+void VideoCaptureImpl::OnBufferDestroyed(int buffer_id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
ClientBufferMap::iterator iter = client_buffers_.find(buffer_id);
if (iter == client_buffers_.end())
@@ -240,18 +204,30 @@ void VideoCaptureImpl::DoBufferDestroyedOnCaptureThread(int buffer_id) {
client_buffers_.erase(iter);
}
-void VideoCaptureImpl::DoBufferReceivedOnCaptureThread(
- int buffer_id,
- base::Time timestamp,
- const media::VideoCaptureFormat& format) {
- DCHECK(capture_message_loop_proxy_->BelongsToCurrentThread());
+void VideoCaptureImpl::OnBufferReceived(int buffer_id,
+ const media::VideoCaptureFormat& format,
+ base::TimeTicks timestamp) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // The capture pipeline supports only I420 for now.
+ DCHECK_EQ(format.pixel_format, media::PIXEL_FORMAT_I420);
if (state_ != VIDEO_CAPTURE_STATE_STARTED || suspended_) {
- Send(new VideoCaptureHostMsg_BufferReady(device_id_, buffer_id));
+ Send(new VideoCaptureHostMsg_BufferReady(
+ device_id_, buffer_id, std::vector<uint32>()));
return;
}
last_frame_format_ = format;
+ if (first_frame_timestamp_.is_null())
+ first_frame_timestamp_ = timestamp;
+
+ // Used by chrome/browser/extension/api/cast_streaming/performance_test.cc
+ TRACE_EVENT_INSTANT2(
+ "cast_perf_test", "OnBufferReceived",
+ TRACE_EVENT_SCOPE_THREAD,
+ "timestamp", timestamp.ToInternalValue(),
+ "time_delta", (timestamp - first_frame_timestamp_).ToInternalValue());
ClientBufferMap::iterator iter = client_buffers_.find(buffer_id);
DCHECK(iter != client_buffers_.end());
@@ -265,64 +241,104 @@ void VideoCaptureImpl::DoBufferReceivedOnCaptureThread(
reinterpret_cast<uint8*>(buffer->buffer->memory()),
buffer->buffer_size,
buffer->buffer->handle(),
- // TODO(sheu): convert VideoCaptureMessageFilter::Delegate to use
- // base::TimeTicks instead of base::Time. http://crbug.com/249215
- timestamp - base::Time::UnixEpoch(),
- media::BindToLoop(
- capture_message_loop_proxy_,
- base::Bind(
- &VideoCaptureImpl::DoClientBufferFinishedOnCaptureThread,
- weak_this_factory_.GetWeakPtr(),
- buffer_id,
- buffer)));
-
- for (ClientInfo::iterator it = clients_.begin(); it != clients_.end(); ++it)
- it->first->OnFrameReady(this, frame);
+ timestamp - first_frame_timestamp_,
+ media::BindToCurrentLoop(
+ base::Bind(&VideoCaptureImpl::OnClientBufferFinished,
+ weak_factory_.GetWeakPtr(),
+ buffer_id,
+ buffer,
+ std::vector<uint32>())));
+
+ for (ClientInfoMap::iterator it = clients_.begin(); it != clients_.end();
+ ++it) {
+ it->second.deliver_frame_cb.Run(frame, format, timestamp);
+ }
+}
+
+static void NullReadPixelsCB(const SkBitmap& bitmap) { NOTIMPLEMENTED(); }
+
+void VideoCaptureImpl::OnMailboxBufferReceived(
+ int buffer_id,
+ const gpu::MailboxHolder& mailbox_holder,
+ const media::VideoCaptureFormat& format,
+ base::TimeTicks timestamp) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (state_ != VIDEO_CAPTURE_STATE_STARTED || suspended_) {
+ Send(new VideoCaptureHostMsg_BufferReady(
+ device_id_, buffer_id, std::vector<uint32>()));
+ return;
+ }
+
+ last_frame_format_ = format;
+ if (first_frame_timestamp_.is_null())
+ first_frame_timestamp_ = timestamp;
+
+ scoped_refptr<media::VideoFrame> frame = media::VideoFrame::WrapNativeTexture(
+ make_scoped_ptr(new gpu::MailboxHolder(mailbox_holder)),
+ media::BindToCurrentLoop(
+ base::Bind(&VideoCaptureImpl::OnClientBufferFinished,
+ weak_factory_.GetWeakPtr(),
+ buffer_id,
+ scoped_refptr<ClientBuffer>())),
+ last_frame_format_.frame_size,
+ gfx::Rect(last_frame_format_.frame_size),
+ last_frame_format_.frame_size,
+ timestamp - first_frame_timestamp_,
+ base::Bind(&NullReadPixelsCB));
+
+ for (ClientInfoMap::iterator it = clients_.begin(); it != clients_.end();
+ ++it) {
+ it->second.deliver_frame_cb.Run(frame, format, timestamp);
+ }
}
-void VideoCaptureImpl::DoClientBufferFinishedOnCaptureThread(
+void VideoCaptureImpl::OnClientBufferFinished(
int buffer_id,
- const scoped_refptr<ClientBuffer>& buffer) {
- DCHECK(capture_message_loop_proxy_->BelongsToCurrentThread());
- Send(new VideoCaptureHostMsg_BufferReady(device_id_, buffer_id));
+ const scoped_refptr<ClientBuffer>& /* ignored_buffer */,
+ const std::vector<uint32>& release_sync_points) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ Send(new VideoCaptureHostMsg_BufferReady(
+ device_id_, buffer_id, release_sync_points));
}
-void VideoCaptureImpl::DoStateChangedOnCaptureThread(VideoCaptureState state) {
- DCHECK(capture_message_loop_proxy_->BelongsToCurrentThread());
+void VideoCaptureImpl::OnStateChanged(VideoCaptureState state) {
+ DCHECK(thread_checker_.CalledOnValidThread());
switch (state) {
case VIDEO_CAPTURE_STATE_STARTED:
+ // Camera has started in the browser process. Since we have already
+ // told all clients that we have started there's nothing to do.
break;
case VIDEO_CAPTURE_STATE_STOPPED:
state_ = VIDEO_CAPTURE_STATE_STOPPED;
DVLOG(1) << "OnStateChanged: stopped!, device_id = " << device_id_;
client_buffers_.clear();
- weak_this_factory_.InvalidateWeakPtrs();
+ weak_factory_.InvalidateWeakPtrs();
if (!clients_.empty() || !clients_pending_on_restart_.empty())
RestartCapture();
break;
case VIDEO_CAPTURE_STATE_PAUSED:
- for (ClientInfo::iterator it = clients_.begin();
+ for (ClientInfoMap::iterator it = clients_.begin();
it != clients_.end(); ++it) {
- it->first->OnPaused(this);
+ it->second.state_update_cb.Run(VIDEO_CAPTURE_STATE_PAUSED);
}
break;
case VIDEO_CAPTURE_STATE_ERROR:
DVLOG(1) << "OnStateChanged: error!, device_id = " << device_id_;
- for (ClientInfo::iterator it = clients_.begin();
+ for (ClientInfoMap::iterator it = clients_.begin();
it != clients_.end(); ++it) {
- // TODO(wjia): browser process would send error code.
- it->first->OnError(this, 1);
- it->first->OnRemoved(this);
+ it->second.state_update_cb.Run(VIDEO_CAPTURE_STATE_ERROR);
}
clients_.clear();
state_ = VIDEO_CAPTURE_STATE_ERROR;
break;
case VIDEO_CAPTURE_STATE_ENDED:
DVLOG(1) << "OnStateChanged: ended!, device_id = " << device_id_;
- for (ClientInfo::iterator it = clients_.begin();
+ for (ClientInfoMap::iterator it = clients_.begin();
it != clients_.end(); ++it) {
- it->first->OnRemoved(this);
+ // We'll only notify the client that the stream has stopped.
+ it->second.state_update_cb.Run(VIDEO_CAPTURE_STATE_STOPPED);
}
clients_.clear();
state_ = VIDEO_CAPTURE_STATE_ENDED;
@@ -332,29 +348,43 @@ void VideoCaptureImpl::DoStateChangedOnCaptureThread(VideoCaptureState state) {
}
}
-void VideoCaptureImpl::DoDelegateAddedOnCaptureThread(int32 device_id) {
- DVLOG(1) << "DoDelegateAdded: device_id " << device_id;
- DCHECK(capture_message_loop_proxy_->BelongsToCurrentThread());
+void VideoCaptureImpl::OnDeviceSupportedFormatsEnumerated(
+ const media::VideoCaptureFormats& supported_formats) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ for (size_t i = 0; i < device_formats_cb_queue_.size(); ++i)
+ device_formats_cb_queue_[i].Run(supported_formats);
+ device_formats_cb_queue_.clear();
+}
+
+void VideoCaptureImpl::OnDeviceFormatsInUseReceived(
+ const media::VideoCaptureFormats& formats_in_use) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ for (size_t i = 0; i < device_formats_in_use_cb_queue_.size(); ++i)
+ device_formats_in_use_cb_queue_[i].Run(formats_in_use);
+ device_formats_in_use_cb_queue_.clear();
+}
+
+void VideoCaptureImpl::OnDelegateAdded(int32 device_id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DVLOG(1) << "OnDelegateAdded: device_id " << device_id;
device_id_ = device_id;
- for (ClientInfo::iterator it = clients_pending_on_filter_.begin();
+ for (ClientInfoMap::iterator it = clients_pending_on_filter_.begin();
it != clients_pending_on_filter_.end(); ) {
- media::VideoCapture::EventHandler* handler = it->first;
- const media::VideoCaptureParams params = it->second;
+ int client_id = it->first;
+ VideoCaptureStateUpdateCB state_update_cb =
+ it->second.state_update_cb;
+ VideoCaptureDeliverFrameCB deliver_frame_cb =
+ it->second.deliver_frame_cb;
+ const media::VideoCaptureParams params = it->second.params;
clients_pending_on_filter_.erase(it++);
- StartCapture(handler, params);
+ StartCapture(client_id, params, state_update_cb,
+ deliver_frame_cb);
}
}
-void VideoCaptureImpl::DoSuspendCaptureOnCaptureThread(bool suspend) {
- DVLOG(1) << "DoSuspendCapture: suspend " << (suspend ? "yes" : "no");
- DCHECK(capture_message_loop_proxy_->BelongsToCurrentThread());
-
- suspended_ = suspend;
-}
-
void VideoCaptureImpl::StopDevice() {
- DCHECK(capture_message_loop_proxy_->BelongsToCurrentThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
if (state_ == VIDEO_CAPTURE_STATE_STARTED) {
state_ = VIDEO_CAPTURE_STATE_STOPPING;
@@ -364,22 +394,20 @@ void VideoCaptureImpl::StopDevice() {
}
void VideoCaptureImpl::RestartCapture() {
- DCHECK(capture_message_loop_proxy_->BelongsToCurrentThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
DCHECK_EQ(state_, VIDEO_CAPTURE_STATE_STOPPED);
int width = 0;
int height = 0;
- for (ClientInfo::iterator it = clients_.begin();
+ clients_.insert(clients_pending_on_restart_.begin(),
+ clients_pending_on_restart_.end());
+ clients_pending_on_restart_.clear();
+ for (ClientInfoMap::iterator it = clients_.begin();
it != clients_.end(); ++it) {
- width = std::max(width, it->second.requested_format.frame_size.width());
- height = std::max(height, it->second.requested_format.frame_size.height());
- }
- for (ClientInfo::iterator it = clients_pending_on_restart_.begin();
- it != clients_pending_on_restart_.end(); ) {
- width = std::max(width, it->second.requested_format.frame_size.width());
- height = std::max(height, it->second.requested_format.frame_size.height());
- clients_[it->first] = it->second;
- clients_pending_on_restart_.erase(it++);
+ width = std::max(width,
+ it->second.params.requested_format.frame_size.width());
+ height = std::max(height,
+ it->second.params.requested_format.frame_size.height());
}
params_.requested_format.frame_size.SetSize(width, height);
DVLOG(1) << "RestartCapture, "
@@ -388,40 +416,25 @@ void VideoCaptureImpl::RestartCapture() {
}
void VideoCaptureImpl::StartCaptureInternal() {
- DCHECK(capture_message_loop_proxy_->BelongsToCurrentThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(device_id_);
Send(new VideoCaptureHostMsg_Start(device_id_, session_id_, params_));
state_ = VIDEO_CAPTURE_STATE_STARTED;
}
-void VideoCaptureImpl::AddDelegateOnIOThread() {
- DCHECK(io_message_loop_proxy_->BelongsToCurrentThread());
- message_filter_->AddDelegate(this);
-}
-
-void VideoCaptureImpl::RemoveDelegateOnIOThread(base::Closure task) {
- DCHECK(io_message_loop_proxy_->BelongsToCurrentThread());
- message_filter_->RemoveDelegate(this);
- capture_message_loop_proxy_->PostTask(FROM_HERE, task);
-}
-
void VideoCaptureImpl::Send(IPC::Message* message) {
- io_message_loop_proxy_->PostTask(FROM_HERE,
- base::Bind(base::IgnoreResult(&VideoCaptureMessageFilter::Send),
- message_filter_.get(), message));
+ DCHECK(thread_checker_.CalledOnValidThread());
+ message_filter_->Send(message);
}
-bool VideoCaptureImpl::RemoveClient(
- media::VideoCapture::EventHandler* handler,
- ClientInfo* clients) {
- DCHECK(capture_message_loop_proxy_->BelongsToCurrentThread());
+bool VideoCaptureImpl::RemoveClient(int client_id, ClientInfoMap* clients) {
+ DCHECK(thread_checker_.CalledOnValidThread());
bool found = false;
- ClientInfo::iterator it = clients->find(handler);
+ ClientInfoMap::iterator it = clients->find(client_id);
if (it != clients->end()) {
- handler->OnStopped(this);
- handler->OnRemoved(this);
+ it->second.state_update_cb.Run(VIDEO_CAPTURE_STATE_STOPPED);
clients->erase(it);
found = true;
}
diff --git a/chromium/content/renderer/media/video_capture_impl.h b/chromium/content/renderer/media/video_capture_impl.h
index 2215cef4336..160f0bf95c8 100644
--- a/chromium/content/renderer/media/video_capture_impl.h
+++ b/chromium/content/renderer/media/video_capture_impl.h
@@ -9,23 +9,13 @@
// VideoCaptureImpl is also a delegate of VideoCaptureMessageFilter which relays
// operation of a capture device to the browser process and receives responses
// from browser process.
-
-// The media::VideoCapture and VideoCaptureMessageFilter::Delegate are
-// asynchronous interfaces, which means callers can call those interfaces
-// from any threads without worrying about thread safety.
-// The |capture_message_loop_proxy_| is the working thread of VideoCaptureImpl.
-// All non-const members are accessed only on that working thread.
//
-// Implementation note: tasks are posted bound to Unretained(this) to both the
-// I/O and Capture threads and this is safe (even though the I/O thread is
-// scoped to the renderer process and the capture_message_loop_proxy_ thread is
-// scoped to the VideoCaptureImplManager) because VideoCaptureImplManager only
-// triggers deletion of its VideoCaptureImpl's by calling DeInit which detours
-// through the capture & I/O threads, so as long as nobody posts tasks after the
-// DeInit() call is made, it is guaranteed none of these Unretained posted tasks
-// will dangle after the delete goes through. The "as long as" is guaranteed by
-// clients of VideoCaptureImplManager not using devices after they've
-// RemoveDevice'd them.
+// VideoCaptureImpl is an IO thread only object. See the comments in
+// video_capture_impl_manager.cc for the lifetime of this object.
+// All methods must be called on the IO thread.
+//
+// This is an internal class used by VideoCaptureImplManager only. Do not access
+// this directly.
#ifndef CONTENT_RENDERER_MEDIA_VIDEO_CAPTURE_IMPL_H_
#define CONTENT_RENDERER_MEDIA_VIDEO_CAPTURE_IMPL_H_
@@ -34,120 +24,163 @@
#include <map>
#include "base/memory/weak_ptr.h"
+#include "base/threading/thread_checker.h"
#include "content/common/content_export.h"
#include "content/common/media/video_capture.h"
+#include "content/public/renderer/media_stream_video_sink.h"
#include "content/renderer/media/video_capture_message_filter.h"
-#include "media/video/capture/video_capture.h"
#include "media/video/capture/video_capture_types.h"
namespace base {
class MessageLoopProxy;
-}
+} // namespace base
+
+namespace gpu {
+struct MailboxHolder;
+} // namespace gpu
+
+namespace media {
+class VideoFrame;
+} // namespace media
namespace content {
class CONTENT_EXPORT VideoCaptureImpl
- : public media::VideoCapture, public VideoCaptureMessageFilter::Delegate {
+ : public VideoCaptureMessageFilter::Delegate {
public:
- // media::VideoCapture interface.
- virtual void StartCapture(
- media::VideoCapture::EventHandler* handler,
- const media::VideoCaptureParams& params) OVERRIDE;
- virtual void StopCapture(media::VideoCapture::EventHandler* handler) OVERRIDE;
- virtual bool CaptureStarted() OVERRIDE;
- virtual int CaptureFrameRate() OVERRIDE;
+ virtual ~VideoCaptureImpl();
- // VideoCaptureMessageFilter::Delegate interface.
- virtual void OnBufferCreated(base::SharedMemoryHandle handle,
- int length,
- int buffer_id) OVERRIDE;
- virtual void OnBufferDestroyed(int buffer_id) OVERRIDE;
- virtual void OnBufferReceived(
- int buffer_id,
- base::Time timestamp,
- const media::VideoCaptureFormat& format) OVERRIDE;
- virtual void OnStateChanged(VideoCaptureState state) OVERRIDE;
- virtual void OnDelegateAdded(int32 device_id) OVERRIDE;
+ VideoCaptureImpl(media::VideoCaptureSessionId session_id,
+ VideoCaptureMessageFilter* filter);
+
+ // Start listening to IPC messages.
+ void Init();
+
+ // Stop listening to IPC messages.
+ void DeInit();
// Stop/resume delivering video frames to clients, based on flag |suspend|.
- virtual void SuspendCapture(bool suspend);
+ void SuspendCapture(bool suspend);
+
+ // Start capturing using the provided parameters.
+ // |client_id| must be unique to this object in the render process. It is
+ // used later to stop receiving video frames.
+ // |state_update_cb| will be called when state changes.
+ // |deliver_frame_cb| will be called when a frame is ready.
+ void StartCapture(
+ int client_id,
+ const media::VideoCaptureParams& params,
+ const VideoCaptureStateUpdateCB& state_update_cb,
+ const VideoCaptureDeliverFrameCB& deliver_frame_cb);
+
+ // Stop capturing. |client_id| is the identifier used to call StartCapture.
+ void StopCapture(int client_id);
+
+ // Get capturing formats supported by this device.
+ // |callback| will be invoked with the results.
+ void GetDeviceSupportedFormats(
+ const VideoCaptureDeviceFormatsCB& callback);
+
+ // Get capturing formats currently in use by this device.
+ // |callback| will be invoked with the results.
+ void GetDeviceFormatsInUse(
+ const VideoCaptureDeviceFormatsCB& callback);
+
+ media::VideoCaptureSessionId session_id() const { return session_id_; }
private:
- friend class VideoCaptureImplManager;
friend class VideoCaptureImplTest;
friend class MockVideoCaptureImpl;
+ // Carries a shared memory for transferring video frames from browser to
+ // renderer.
class ClientBuffer;
- typedef std::map<media::VideoCapture::EventHandler*,
- media::VideoCaptureParams> ClientInfo;
- VideoCaptureImpl(media::VideoCaptureSessionId session_id,
- base::MessageLoopProxy* capture_message_loop_proxy,
- VideoCaptureMessageFilter* filter);
- virtual ~VideoCaptureImpl();
+ // Contains information for a video capture client. Including parameters
+ // for capturing and callbacks to the client.
+ struct ClientInfo {
+ ClientInfo();
+ ~ClientInfo();
+ media::VideoCaptureParams params;
+ VideoCaptureStateUpdateCB state_update_cb;
+ VideoCaptureDeliverFrameCB deliver_frame_cb;
+ };
+ typedef std::map<int, ClientInfo> ClientInfoMap;
+
+ // VideoCaptureMessageFilter::Delegate interface.
+ virtual void OnBufferCreated(base::SharedMemoryHandle handle,
+ int length,
+ int buffer_id) OVERRIDE;
+ virtual void OnBufferDestroyed(int buffer_id) OVERRIDE;
+ virtual void OnBufferReceived(int buffer_id,
+ const media::VideoCaptureFormat& format,
+ base::TimeTicks) OVERRIDE;
+ virtual void OnMailboxBufferReceived(int buffer_id,
+ const gpu::MailboxHolder& mailbox_holder,
+ const media::VideoCaptureFormat& format,
+ base::TimeTicks timestamp) OVERRIDE;
+ virtual void OnStateChanged(VideoCaptureState state) OVERRIDE;
+ virtual void OnDeviceSupportedFormatsEnumerated(
+ const media::VideoCaptureFormats& supported_formats) OVERRIDE;
+ virtual void OnDeviceFormatsInUseReceived(
+ const media::VideoCaptureFormats& formats_in_use) OVERRIDE;
+ virtual void OnDelegateAdded(int32 device_id) OVERRIDE;
- void DoStartCaptureOnCaptureThread(
- media::VideoCapture::EventHandler* handler,
- const media::VideoCaptureParams& params);
- void DoStopCaptureOnCaptureThread(media::VideoCapture::EventHandler* handler);
- void DoBufferCreatedOnCaptureThread(base::SharedMemoryHandle handle,
- int length,
- int buffer_id);
- void DoBufferDestroyedOnCaptureThread(int buffer_id);
- void DoBufferReceivedOnCaptureThread(
- int buffer_id,
- base::Time timestamp,
- const media::VideoCaptureFormat& format);
- void DoClientBufferFinishedOnCaptureThread(
- int buffer_id,
- const scoped_refptr<ClientBuffer>& buffer);
- void DoStateChangedOnCaptureThread(VideoCaptureState state);
- void DoDelegateAddedOnCaptureThread(int32 device_id);
-
- void DoSuspendCaptureOnCaptureThread(bool suspend);
+ // Sends an IPC message to browser process when all clients are done with the
+ // buffer.
+ void OnClientBufferFinished(int buffer_id,
+ const scoped_refptr<ClientBuffer>& buffer,
+ const std::vector<uint32>& release_sync_points);
- void Init();
- void DeInit(base::Closure task);
- void DoDeInitOnCaptureThread(base::Closure task);
void StopDevice();
void RestartCapture();
void StartCaptureInternal();
- void AddDelegateOnIOThread();
- void RemoveDelegateOnIOThread(base::Closure task);
+
virtual void Send(IPC::Message* message);
// Helpers.
- bool RemoveClient(media::VideoCapture::EventHandler* handler,
- ClientInfo* clients);
+ bool RemoveClient(int client_id, ClientInfoMap* clients);
const scoped_refptr<VideoCaptureMessageFilter> message_filter_;
- const scoped_refptr<base::MessageLoopProxy> capture_message_loop_proxy_;
- const scoped_refptr<base::MessageLoopProxy> io_message_loop_proxy_;
int device_id_;
const int session_id_;
+ // Vector of callbacks to be notified of device format enumerations, used only
+ // on IO Thread.
+ std::vector<VideoCaptureDeviceFormatsCB> device_formats_cb_queue_;
+ // Vector of callbacks to be notified of a device's in use capture format(s),
+ // used only on IO Thread.
+ std::vector<VideoCaptureDeviceFormatsCB> device_formats_in_use_cb_queue_;
+
// Buffers available for sending to the client.
typedef std::map<int32, scoped_refptr<ClientBuffer> > ClientBufferMap;
ClientBufferMap client_buffers_;
- ClientInfo clients_;
- ClientInfo clients_pending_on_filter_;
- ClientInfo clients_pending_on_restart_;
+ ClientInfoMap clients_;
+ ClientInfoMap clients_pending_on_filter_;
+ ClientInfoMap clients_pending_on_restart_;
// Member params_ represents the video format requested by the
- // client to this class via DoStartCaptureOnCaptureThread.
+ // client to this class via StartCapture().
media::VideoCaptureParams params_;
// The device's video capture format sent from browser process side.
media::VideoCaptureFormat last_frame_format_;
+ // The device's first captured frame timestamp sent from browser process side.
+ base::TimeTicks first_frame_timestamp_;
+
bool suspended_;
VideoCaptureState state_;
+ // |weak_factory_| and |thread_checker_| are bound to the IO thread.
+ base::ThreadChecker thread_checker_;
+
// WeakPtrFactory pointing back to |this| object, for use with
// media::VideoFrames constructed in OnBufferReceived() from buffers cached
// in |client_buffers_|.
- base::WeakPtrFactory<VideoCaptureImpl> weak_this_factory_;
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<VideoCaptureImpl> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(VideoCaptureImpl);
};
diff --git a/chromium/content/renderer/media/video_capture_impl_manager.cc b/chromium/content/renderer/media/video_capture_impl_manager.cc
index 672fbd403c3..686e6ad74c1 100644
--- a/chromium/content/renderer/media/video_capture_impl_manager.cc
+++ b/chromium/content/renderer/media/video_capture_impl_manager.cc
@@ -1,88 +1,190 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+//
+// Implementation notes about interactions with VideoCaptureImpl.
+//
+// How is VideoCaptureImpl used:
+//
+// VideoCaptureImpl is an IO thread object while VideoCaptureImplManager
+// lives only on the render thread. It is only possible to access an
+// object of VideoCaptureImpl via a task on the IO thread.
+//
+// How is VideoCaptureImpl deleted:
+//
+// A task is posted to the IO thread to delete a VideoCaptureImpl.
+// Immediately after that the pointer to it is dropped. This means no
+// access to this VideoCaptureImpl object is possible on the render
+// thread. Also note that VideoCaptureImpl does not post task to itself.
+//
+// The use of Unretained:
+//
+// We make sure deletion is the last task on the IO thread for a
+// VideoCaptureImpl object. This allows the use of Unretained() binding.
#include "content/renderer/media/video_capture_impl_manager.h"
#include "base/bind.h"
-#include "base/stl_util.h"
+#include "base/bind_helpers.h"
+#include "content/child/child_process.h"
#include "content/renderer/media/video_capture_impl.h"
#include "content/renderer/media/video_capture_message_filter.h"
+#include "media/base/bind_to_current_loop.h"
namespace content {
VideoCaptureImplManager::VideoCaptureImplManager()
- : thread_("VC manager") {
- thread_.Start();
- message_loop_proxy_ = thread_.message_loop_proxy();
- filter_ = new VideoCaptureMessageFilter();
+ : next_client_id_(0),
+ filter_(new VideoCaptureMessageFilter()),
+ weak_factory_(this) {
}
-media::VideoCapture* VideoCaptureImplManager::AddDevice(
- media::VideoCaptureSessionId id,
- media::VideoCapture::EventHandler* handler) {
- DCHECK(handler);
+VideoCaptureImplManager::~VideoCaptureImplManager() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (devices_.empty())
+ return;
+ // Forcibly release all video capture resources.
+ for (VideoCaptureDeviceMap::iterator it = devices_.begin();
+ it != devices_.end(); ++it) {
+ VideoCaptureImpl* impl = it->second.second;
+ ChildProcess::current()->io_message_loop_proxy()->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoCaptureImpl::DeInit,
+ base::Unretained(impl)));
+ ChildProcess::current()->io_message_loop_proxy()->PostTask(
+ FROM_HERE,
+ base::Bind(&base::DeletePointer<VideoCaptureImpl>,
+ base::Unretained(impl)));
+ }
+ devices_.clear();
+}
+
+base::Closure VideoCaptureImplManager::UseDevice(
+ media::VideoCaptureSessionId id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
- base::AutoLock auto_lock(lock_);
- Devices::iterator it = devices_.find(id);
+ VideoCaptureImpl* impl = NULL;
+ VideoCaptureDeviceMap::iterator it = devices_.find(id);
if (it == devices_.end()) {
- VideoCaptureImpl* vc =
- new VideoCaptureImpl(id, message_loop_proxy_.get(), filter_.get());
- devices_[id] = new Device(vc, handler);
- vc->Init();
- return vc;
+ impl = CreateVideoCaptureImplForTesting(id, filter_.get());
+ if (!impl)
+ impl = new VideoCaptureImpl(id, filter_.get());
+ devices_[id] = std::make_pair(1, impl);
+ ChildProcess::current()->io_message_loop_proxy()->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoCaptureImpl::Init,
+ base::Unretained(impl)));
+ } else {
+ ++it->second.first;
}
-
- devices_[id]->clients.push_front(handler);
- return it->second->vc;
+ return base::Bind(&VideoCaptureImplManager::UnrefDevice,
+ weak_factory_.GetWeakPtr(), id);
}
-void VideoCaptureImplManager::SuspendDevices(bool suspend) {
- base::AutoLock auto_lock(lock_);
- for (Devices::iterator it = devices_.begin(); it != devices_.end(); ++it)
- it->second->vc->SuspendCapture(suspend);
+base::Closure VideoCaptureImplManager::StartCapture(
+ media::VideoCaptureSessionId id,
+ const media::VideoCaptureParams& params,
+ const VideoCaptureStateUpdateCB& state_update_cb,
+ const VideoCaptureDeliverFrameCB& deliver_frame_cb) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ VideoCaptureDeviceMap::iterator it = devices_.find(id);
+ DCHECK(it != devices_.end());
+ VideoCaptureImpl* impl = it->second.second;
+
+ // This ID is used to identify a client of VideoCaptureImpl.
+ const int client_id = ++next_client_id_;
+
+ ChildProcess::current()->io_message_loop_proxy()->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoCaptureImpl::StartCapture,
+ base::Unretained(impl),
+ client_id,
+ params,
+ state_update_cb,
+ deliver_frame_cb));
+ return base::Bind(&VideoCaptureImplManager::StopCapture,
+ weak_factory_.GetWeakPtr(),
+ client_id, id);
}
-void VideoCaptureImplManager::RemoveDevice(
+void VideoCaptureImplManager::GetDeviceSupportedFormats(
media::VideoCaptureSessionId id,
- media::VideoCapture::EventHandler* handler) {
- DCHECK(handler);
-
- base::AutoLock auto_lock(lock_);
- Devices::iterator it = devices_.find(id);
- if (it == devices_.end())
- return;
-
- size_t size = it->second->clients.size();
- it->second->clients.remove(handler);
-
- if (size == it->second->clients.size() || size > 1)
- return;
+ const VideoCaptureDeviceFormatsCB& callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ VideoCaptureDeviceMap::iterator it = devices_.find(id);
+ DCHECK(it != devices_.end());
+ VideoCaptureImpl* impl = it->second.second;
+ ChildProcess::current()->io_message_loop_proxy()->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoCaptureImpl::GetDeviceSupportedFormats,
+ base::Unretained(impl), callback));
+}
- devices_[id]->vc->DeInit(base::Bind(&VideoCaptureImplManager::FreeDevice,
- this, devices_[id]->vc));
- delete devices_[id];
- devices_.erase(id);
+void VideoCaptureImplManager::GetDeviceFormatsInUse(
+ media::VideoCaptureSessionId id,
+ const VideoCaptureDeviceFormatsCB& callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ VideoCaptureDeviceMap::iterator it = devices_.find(id);
+ DCHECK(it != devices_.end());
+ VideoCaptureImpl* impl = it->second.second;
+ ChildProcess::current()->io_message_loop_proxy()->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoCaptureImpl::GetDeviceFormatsInUse,
+ base::Unretained(impl), callback));
}
-void VideoCaptureImplManager::FreeDevice(VideoCaptureImpl* vc) {
- delete vc;
+VideoCaptureImpl*
+VideoCaptureImplManager::CreateVideoCaptureImplForTesting(
+ media::VideoCaptureSessionId id,
+ VideoCaptureMessageFilter* filter) const {
+ return NULL;
}
-VideoCaptureImplManager::~VideoCaptureImplManager() {
- thread_.Stop();
- // TODO(wjia): uncomment the line below after collecting enough info for
- // crbug.com/152418.
- // STLDeleteContainerPairSecondPointers(devices_.begin(), devices_.end());
+void VideoCaptureImplManager::StopCapture(
+ int client_id, media::VideoCaptureSessionId id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ VideoCaptureDeviceMap::iterator it = devices_.find(id);
+ DCHECK(it != devices_.end());
+ VideoCaptureImpl* impl = it->second.second;
+ ChildProcess::current()->io_message_loop_proxy()->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoCaptureImpl::StopCapture,
+ base::Unretained(impl), client_id));
}
-VideoCaptureImplManager::Device::Device(
- VideoCaptureImpl* device,
- media::VideoCapture::EventHandler* handler)
- : vc(device) {
- clients.push_front(handler);
+void VideoCaptureImplManager::UnrefDevice(
+ media::VideoCaptureSessionId id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ VideoCaptureDeviceMap::iterator it = devices_.find(id);
+ DCHECK(it != devices_.end());
+ VideoCaptureImpl* impl = it->second.second;
+
+ // Unref and destroy on the IO thread if there's no more client.
+ DCHECK(it->second.first);
+ --it->second.first;
+ if (!it->second.first) {
+ devices_.erase(id);
+ ChildProcess::current()->io_message_loop_proxy()->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoCaptureImpl::DeInit,
+ base::Unretained(impl)));
+ ChildProcess::current()->io_message_loop_proxy()->PostTask(
+ FROM_HERE,
+ base::Bind(&base::DeletePointer<VideoCaptureImpl>,
+ base::Unretained(impl)));
+ }
}
-VideoCaptureImplManager::Device::~Device() {}
+void VideoCaptureImplManager::SuspendDevices(bool suspend) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ for (VideoCaptureDeviceMap::iterator it = devices_.begin();
+ it != devices_.end(); ++it) {
+ VideoCaptureImpl* impl = it->second.second;
+ ChildProcess::current()->io_message_loop_proxy()->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoCaptureImpl::SuspendCapture,
+ base::Unretained(impl), suspend));
+ }
+}
} // namespace content
diff --git a/chromium/content/renderer/media/video_capture_impl_manager.h b/chromium/content/renderer/media/video_capture_impl_manager.h
index e9422015b02..28b804c8743 100644
--- a/chromium/content/renderer/media/video_capture_impl_manager.h
+++ b/chromium/content/renderer/media/video_capture_impl_manager.h
@@ -2,77 +2,128 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// VideoCaptureImplManager manages video capture devices in renderer process.
-// The video capture clients use AddDevice() to get a pointer to
-// video capture device. VideoCaputreImplManager supports multiple clients
-// accessing same device.
+// TODO(hclam): This class should be renamed to VideoCaptureService.
+
+// This class provides access to a video capture device in the browser
+// process through IPC. The main function is to deliver video frames
+// to a client.
+//
+// THREADING
+//
+// VideoCaptureImplManager lives only on the render thread. All methods
+// must be called on this thread.
+//
+// VideoFrames are delivered on the IO thread. Callbacks provided by
+// a client are also called on the IO thread.
#ifndef CONTENT_RENDERER_MEDIA_VIDEO_CAPTURE_IMPL_MANAGER_H_
#define CONTENT_RENDERER_MEDIA_VIDEO_CAPTURE_IMPL_MANAGER_H_
-#include <list>
#include <map>
+#include "base/callback.h"
+#include "base/memory/linked_ptr.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
#include "base/message_loop/message_loop_proxy.h"
-#include "base/threading/thread.h"
#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
#include "content/common/content_export.h"
-#include "media/video/capture/video_capture.h"
+#include "content/common/media/video_capture.h"
+#include "content/public/renderer/media_stream_video_sink.h"
+#include "media/video/capture/video_capture_types.h"
namespace content {
class VideoCaptureImpl;
class VideoCaptureMessageFilter;
-class CONTENT_EXPORT VideoCaptureImplManager
- : public base::RefCountedThreadSafe<VideoCaptureImplManager> {
+class CONTENT_EXPORT VideoCaptureImplManager {
public:
VideoCaptureImplManager();
+ virtual ~VideoCaptureImplManager();
- // Called by video capture client |handler| to add device referenced
- // by |id| to VideoCaptureImplManager's list of opened device list.
- // A pointer to VideoCapture is returned to client so that client can
- // operate on that pointer, such as StartCaptrue, StopCapture.
- virtual media::VideoCapture* AddDevice(
+ // Open a device associated with the session ID.
+ // This method must be called before any methods with the same ID
+ // is used.
+ // Returns a callback that should be used to release the acquired
+ // resources.
+ base::Closure UseDevice(media::VideoCaptureSessionId id);
+
+ // Start receiving video frames for the given session ID.
+ //
+ // |state_update_cb| will be called on the IO thread when capturing
+ // state changes.
+ // States will be one of the following four:
+ // * VIDEO_CAPTURE_STATE_STARTED
+ // * VIDEO_CAPTURE_STATE_STOPPED
+ // * VIDEO_CAPTURE_STATE_PAUSED
+ // * VIDEO_CAPTURE_STATE_ERROR
+ //
+ // |deliver_frame_cb| will be called on the IO thread when a video
+ // frame is ready.
+ //
+ // Returns a callback that is used to stop capturing. Note that stopping
+ // video capture is not synchronous. Client should handle the case where
+ // callbacks are called after capturing is instructed to stop, typically
+ // by binding the passed callbacks on a WeakPtr.
+ base::Closure StartCapture(
media::VideoCaptureSessionId id,
- media::VideoCapture::EventHandler* handler);
+ const media::VideoCaptureParams& params,
+ const VideoCaptureStateUpdateCB& state_update_cb,
+ const VideoCaptureDeliverFrameCB& deliver_frame_cb);
- // Called by video capture client |handler| to remove device referenced
- // by |id| from VideoCaptureImplManager's list of opened device list.
- virtual void RemoveDevice(media::VideoCaptureSessionId id,
- media::VideoCapture::EventHandler* handler);
+ // Get supported formats supported by the device for the given session
+ // ID. |callback| will be called on the IO thread.
+ void GetDeviceSupportedFormats(
+ media::VideoCaptureSessionId id,
+ const VideoCaptureDeviceFormatsCB& callback);
+
+ // Get supported formats currently in use for the given session ID.
+ // |callback| will be called on the IO thread.
+ void GetDeviceFormatsInUse(
+ media::VideoCaptureSessionId id,
+ const VideoCaptureDeviceFormatsCB& callback);
// Make all existing VideoCaptureImpl instances stop/resume delivering
// video frames to their clients, depends on flag |suspend|.
- virtual void SuspendDevices(bool suspend);
+ void SuspendDevices(bool suspend);
VideoCaptureMessageFilter* video_capture_message_filter() const {
return filter_.get();
}
protected:
- virtual ~VideoCaptureImplManager();
+ virtual VideoCaptureImpl* CreateVideoCaptureImplForTesting(
+ media::VideoCaptureSessionId id,
+ VideoCaptureMessageFilter* filter) const;
private:
- friend class base::RefCountedThreadSafe<VideoCaptureImplManager>;
-
- struct Device {
- Device(VideoCaptureImpl* device,
- media::VideoCapture::EventHandler* handler);
- ~Device();
+ void StopCapture(int client_id, media::VideoCaptureSessionId id);
+ void UnrefDevice(media::VideoCaptureSessionId id);
+
+ // The int is used to count clients of the corresponding VideoCaptureImpl.
+ // VideoCaptureImpl objects are owned by this object. But they are
+ // destroyed on the IO thread. These are raw pointers because we destroy
+ // them manually.
+ typedef std::map<media::VideoCaptureSessionId,
+ std::pair<int, VideoCaptureImpl*> >
+ VideoCaptureDeviceMap;
+ VideoCaptureDeviceMap devices_;
+
+ // This is an internal ID for identifying clients of VideoCaptureImpl.
+ // The ID is global for the render process.
+ int next_client_id_;
- VideoCaptureImpl* vc;
- std::list<media::VideoCapture::EventHandler*> clients;
- };
+ scoped_refptr<VideoCaptureMessageFilter> filter_;
- void FreeDevice(VideoCaptureImpl* vc);
+ // Bound to the render thread.
+ base::ThreadChecker thread_checker_;
- typedef std::map<media::VideoCaptureSessionId, Device*> Devices;
- Devices devices_;
- base::Lock lock_;
- scoped_refptr<VideoCaptureMessageFilter> filter_;
- base::Thread thread_;
- scoped_refptr<base::MessageLoopProxy> message_loop_proxy_;
+ // Bound to the render thread.
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<VideoCaptureImplManager> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(VideoCaptureImplManager);
};
diff --git a/chromium/content/renderer/media/video_capture_impl_manager_unittest.cc b/chromium/content/renderer/media/video_capture_impl_manager_unittest.cc
new file mode 100644
index 00000000000..7ab67a45632
--- /dev/null
+++ b/chromium/content/renderer/media/video_capture_impl_manager_unittest.cc
@@ -0,0 +1,177 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "content/child/child_process.h"
+#include "content/renderer/media/video_capture_impl.h"
+#include "content/renderer/media/video_capture_impl_manager.h"
+#include "content/renderer/media/video_capture_message_filter.h"
+#include "media/base/bind_to_current_loop.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::SaveArg;
+using media::BindToCurrentLoop;
+
+namespace content {
+
+ACTION_P(RunClosure, closure) {
+ closure.Run();
+}
+
+class MockVideoCaptureImpl : public VideoCaptureImpl {
+ public:
+ MockVideoCaptureImpl(media::VideoCaptureSessionId session_id,
+ VideoCaptureMessageFilter* filter,
+ base::Closure destruct_callback)
+ : VideoCaptureImpl(session_id, filter),
+ destruct_callback_(destruct_callback) {
+ }
+
+ virtual ~MockVideoCaptureImpl() {
+ destruct_callback_.Run();
+ }
+
+ private:
+ base::Closure destruct_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockVideoCaptureImpl);
+};
+
+class MockVideoCaptureImplManager : public VideoCaptureImplManager {
+ public:
+ explicit MockVideoCaptureImplManager(
+ base::Closure destruct_video_capture_callback)
+ : destruct_video_capture_callback_(
+ destruct_video_capture_callback) {}
+ virtual ~MockVideoCaptureImplManager() {}
+
+ protected:
+ virtual VideoCaptureImpl* CreateVideoCaptureImplForTesting(
+ media::VideoCaptureSessionId id,
+ VideoCaptureMessageFilter* filter) const OVERRIDE {
+ return new MockVideoCaptureImpl(id,
+ filter,
+ destruct_video_capture_callback_);
+ }
+
+ private:
+ base::Closure destruct_video_capture_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockVideoCaptureImplManager);
+};
+
+class VideoCaptureImplManagerTest : public ::testing::Test {
+ public:
+ VideoCaptureImplManagerTest()
+ : manager_(new MockVideoCaptureImplManager(
+ BindToCurrentLoop(cleanup_run_loop_.QuitClosure()))) {
+ params_.requested_format = media::VideoCaptureFormat(
+ gfx::Size(176, 144), 30, media::PIXEL_FORMAT_I420);
+ child_process_.reset(new ChildProcess());
+ }
+
+ void FakeChannelSetup() {
+ scoped_refptr<base::MessageLoopProxy> loop =
+ child_process_->io_message_loop_proxy();
+ if (!loop->BelongsToCurrentThread()) {
+ loop->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &VideoCaptureImplManagerTest::FakeChannelSetup,
+ base::Unretained(this)));
+ return;
+ }
+ manager_->video_capture_message_filter()->OnFilterAdded(NULL);
+ }
+
+ protected:
+ MOCK_METHOD3(OnFrameReady,
+ void(const scoped_refptr<media::VideoFrame>&,
+ const media::VideoCaptureFormat&,
+ const base::TimeTicks& estimated_capture_time));
+ MOCK_METHOD0(OnStarted, void());
+ MOCK_METHOD0(OnStopped, void());
+
+ void OnStateUpdate(VideoCaptureState state) {
+ switch (state) {
+ case VIDEO_CAPTURE_STATE_STARTED:
+ OnStarted();
+ break;
+ case VIDEO_CAPTURE_STATE_STOPPED:
+ OnStopped();
+ break;
+ default:
+ NOTREACHED();
+ }
+ }
+
+ base::Closure StartCapture(const media::VideoCaptureParams& params) {
+ return manager_->StartCapture(
+ 0, params,
+ base::Bind(&VideoCaptureImplManagerTest::OnStateUpdate,
+ base::Unretained(this)),
+ base::Bind(&VideoCaptureImplManagerTest::OnFrameReady,
+ base::Unretained(this)));
+ }
+
+ base::MessageLoop message_loop_;
+ scoped_ptr<ChildProcess> child_process_;
+ media::VideoCaptureParams params_;
+ base::RunLoop cleanup_run_loop_;
+ scoped_ptr<MockVideoCaptureImplManager> manager_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(VideoCaptureImplManagerTest);
+};
+
+// Multiple clients with the same session id. There is only one
+// media::VideoCapture object.
+TEST_F(VideoCaptureImplManagerTest, MultipleClients) {
+ base::Closure release_cb1 = manager_->UseDevice(0);
+ base::Closure release_cb2 = manager_->UseDevice(0);
+ base::Closure stop_cb1, stop_cb2;
+ {
+ base::RunLoop run_loop;
+ base::Closure quit_closure = BindToCurrentLoop(
+ run_loop.QuitClosure());
+ EXPECT_CALL(*this, OnStarted()).WillOnce(
+ RunClosure(quit_closure));
+ EXPECT_CALL(*this, OnStarted()).RetiresOnSaturation();
+ stop_cb1 = StartCapture(params_);
+ stop_cb2 = StartCapture(params_);
+ FakeChannelSetup();
+ run_loop.Run();
+ }
+
+ {
+ base::RunLoop run_loop;
+ base::Closure quit_closure = BindToCurrentLoop(
+ run_loop.QuitClosure());
+ EXPECT_CALL(*this, OnStopped()).WillOnce(
+ RunClosure(quit_closure));
+ EXPECT_CALL(*this, OnStopped()).RetiresOnSaturation();
+ stop_cb1.Run();
+ stop_cb2.Run();
+ run_loop.Run();
+ }
+
+ release_cb1.Run();
+ release_cb2.Run();
+ cleanup_run_loop_.Run();
+}
+
+TEST_F(VideoCaptureImplManagerTest, NoLeak) {
+ manager_->UseDevice(0).Reset();
+ manager_.reset();
+ cleanup_run_loop_.Run();
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/video_capture_impl_unittest.cc b/chromium/content/renderer/media/video_capture_impl_unittest.cc
index 713b3a06d4f..19e481d92aa 100644
--- a/chromium/content/renderer/media/video_capture_impl_unittest.cc
+++ b/chromium/content/renderer/media/video_capture_impl_unittest.cc
@@ -6,12 +6,15 @@
#include "content/child/child_process.h"
#include "content/common/media/video_capture_messages.h"
#include "content/renderer/media/video_capture_impl.h"
+#include "media/base/bind_to_current_loop.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::_;
using ::testing::AtLeast;
+using ::testing::InvokeWithoutArgs;
using ::testing::Return;
+using ::testing::SaveArg;
namespace content {
@@ -29,36 +32,14 @@ class MockVideoCaptureMessageFilter : public VideoCaptureMessageFilter {
DISALLOW_COPY_AND_ASSIGN(MockVideoCaptureMessageFilter);
};
-class MockVideoCaptureClient : public media::VideoCapture::EventHandler {
- public:
- MockVideoCaptureClient() {}
- virtual ~MockVideoCaptureClient() {}
-
- // EventHandler implementation.
- MOCK_METHOD1(OnStarted, void(media::VideoCapture* capture));
- MOCK_METHOD1(OnStopped, void(media::VideoCapture* capture));
- MOCK_METHOD1(OnPaused, void(media::VideoCapture* capture));
- MOCK_METHOD2(OnError, void(media::VideoCapture* capture, int error_code));
- MOCK_METHOD1(OnRemoved, void(media::VideoCapture* capture));
- MOCK_METHOD2(OnFrameReady,
- void(media::VideoCapture* capture,
- const scoped_refptr<media::VideoFrame>& frame));
- MOCK_METHOD2(OnDeviceInfoReceived,
- void(media::VideoCapture* capture,
- const media::VideoCaptureFormat& device_info));
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockVideoCaptureClient);
-};
-
class VideoCaptureImplTest : public ::testing::Test {
public:
class MockVideoCaptureImpl : public VideoCaptureImpl {
public:
MockVideoCaptureImpl(const media::VideoCaptureSessionId id,
- scoped_refptr<base::MessageLoopProxy> ml_proxy,
VideoCaptureMessageFilter* filter)
- : VideoCaptureImpl(id, ml_proxy.get(), filter) {}
+ : VideoCaptureImpl(id, filter) {
+ }
virtual ~MockVideoCaptureImpl() {}
// Override Send() to mimic device to send events.
@@ -74,6 +55,10 @@ class VideoCaptureImplTest : public ::testing::Test {
IPC_MESSAGE_HANDLER(VideoCaptureHostMsg_Stop, DeviceStopCapture)
IPC_MESSAGE_HANDLER(VideoCaptureHostMsg_BufferReady,
DeviceReceiveEmptyBuffer)
+ IPC_MESSAGE_HANDLER(VideoCaptureHostMsg_GetDeviceSupportedFormats,
+ DeviceGetSupportedFormats)
+ IPC_MESSAGE_HANDLER(VideoCaptureHostMsg_GetDeviceFormatsInUse,
+ DeviceGetFormatsInUse)
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
EXPECT_TRUE(handled);
@@ -84,6 +69,7 @@ class VideoCaptureImplTest : public ::testing::Test {
media::VideoCaptureSessionId session_id,
const media::VideoCaptureParams& params) {
OnStateChanged(VIDEO_CAPTURE_STATE_STARTED);
+ capture_params_ = params;
}
void DevicePauseCapture(int device_id) {}
@@ -92,7 +78,33 @@ class VideoCaptureImplTest : public ::testing::Test {
OnStateChanged(VIDEO_CAPTURE_STATE_STOPPED);
}
- void DeviceReceiveEmptyBuffer(int device_id, int buffer_id) {}
+ void DeviceReceiveEmptyBuffer(int device_id,
+ int buffer_id,
+ const std::vector<uint32>& sync_points) {}
+
+ void DeviceGetSupportedFormats(int device_id,
+ media::VideoCaptureSessionId session_id) {
+ // When the mock message filter receives a request for the device
+ // supported formats, replies immediately with an empty format list.
+ OnDeviceSupportedFormatsEnumerated(
+ media::VideoCaptureFormats());
+ }
+
+ void DeviceGetFormatsInUse(int device_id,
+ media::VideoCaptureSessionId session_id) {
+ OnDeviceFormatsInUseReceived(media::VideoCaptureFormats());
+ }
+
+ void ReceiveStateChangeMessage(VideoCaptureState state) {
+ OnStateChanged(state);
+ }
+
+ const media::VideoCaptureParams& capture_params() const {
+ return capture_params_;
+ }
+
+ private:
+ media::VideoCaptureParams capture_params_;
};
VideoCaptureImplTest() {
@@ -102,30 +114,74 @@ class VideoCaptureImplTest : public ::testing::Test {
params_large_.requested_format = media::VideoCaptureFormat(
gfx::Size(320, 240), 30, media::PIXEL_FORMAT_I420);
- message_loop_.reset(new base::MessageLoop(base::MessageLoop::TYPE_IO));
- message_loop_proxy_ = base::MessageLoopProxy::current().get();
child_process_.reset(new ChildProcess());
message_filter_ = new MockVideoCaptureMessageFilter;
session_id_ = 1;
- video_capture_impl_ = new MockVideoCaptureImpl(
- session_id_, message_loop_proxy_, message_filter_.get());
+ video_capture_impl_.reset(new MockVideoCaptureImpl(
+ session_id_, message_filter_.get()));
video_capture_impl_->device_id_ = 2;
}
virtual ~VideoCaptureImplTest() {
- delete video_capture_impl_;
}
protected:
- scoped_ptr<base::MessageLoop> message_loop_;
- scoped_refptr<base::MessageLoopProxy> message_loop_proxy_;
+ MOCK_METHOD3(OnFrameReady,
+ void(const scoped_refptr<media::VideoFrame>&,
+ const media::VideoCaptureFormat&,
+ const base::TimeTicks&));
+ MOCK_METHOD1(OnStateUpdate, void(VideoCaptureState));
+ MOCK_METHOD1(OnDeviceFormatsInUse,
+ void(const media::VideoCaptureFormats&));
+ MOCK_METHOD1(OnDeviceSupportedFormats,
+ void(const media::VideoCaptureFormats&));
+
+ void Init() {
+ video_capture_impl_->Init();
+ }
+
+ void StartCapture(int client_id,
+ const media::VideoCaptureParams& params) {
+ video_capture_impl_->StartCapture(
+ client_id, params,
+ base::Bind(&VideoCaptureImplTest::OnStateUpdate,
+ base::Unretained(this)),
+ base::Bind(&VideoCaptureImplTest::OnFrameReady,
+ base::Unretained(this)));
+ }
+
+ void StopCapture(int client_id) {
+ video_capture_impl_->StopCapture(client_id);
+ }
+
+ void DeInit() {
+ video_capture_impl_->DeInit();
+ }
+
+ void GetDeviceSupportedFormats() {
+ const base::Callback<void(const media::VideoCaptureFormats&)>
+ callback = base::Bind(
+ &VideoCaptureImplTest::OnDeviceSupportedFormats,
+ base::Unretained(this));
+ video_capture_impl_->GetDeviceSupportedFormats(callback);
+ }
+
+ void GetDeviceFormatsInUse() {
+ const base::Callback<void(const media::VideoCaptureFormats&)>
+ callback = base::Bind(
+ &VideoCaptureImplTest::OnDeviceFormatsInUse,
+ base::Unretained(this));
+ video_capture_impl_->GetDeviceFormatsInUse(callback);
+ }
+
+ base::MessageLoop message_loop_;
scoped_ptr<ChildProcess> child_process_;
scoped_refptr<MockVideoCaptureMessageFilter> message_filter_;
media::VideoCaptureSessionId session_id_;
- MockVideoCaptureImpl* video_capture_impl_;
+ scoped_ptr<MockVideoCaptureImpl> video_capture_impl_;
media::VideoCaptureParams params_small_;
media::VideoCaptureParams params_large_;
@@ -135,141 +191,123 @@ class VideoCaptureImplTest : public ::testing::Test {
TEST_F(VideoCaptureImplTest, Simple) {
// Execute SetCapture() and StopCapture() for one client.
- scoped_ptr<MockVideoCaptureClient> client(new MockVideoCaptureClient);
-
- EXPECT_CALL(*client, OnStarted(_))
- .WillOnce(Return());
-
- video_capture_impl_->StartCapture(client.get(), params_small_);
- message_loop_->RunUntilIdle();
+ EXPECT_CALL(*this, OnStateUpdate(VIDEO_CAPTURE_STATE_STARTED));
+ EXPECT_CALL(*this, OnStateUpdate(VIDEO_CAPTURE_STATE_STOPPED));
- EXPECT_CALL(*client, OnStopped(_))
- .WillOnce(Return());
- EXPECT_CALL(*client, OnRemoved(_))
- .WillOnce(Return());
-
- video_capture_impl_->StopCapture(client.get());
- message_loop_->RunUntilIdle();
+ Init();
+ StartCapture(0, params_small_);
+ StopCapture(0);
+ DeInit();
}
TEST_F(VideoCaptureImplTest, TwoClientsInSequence) {
- // Execute SetCapture() and StopCapture() for 2 clients in sequence.
- scoped_ptr<MockVideoCaptureClient> client(new MockVideoCaptureClient);
+ EXPECT_CALL(*this, OnStateUpdate(VIDEO_CAPTURE_STATE_STARTED)).Times(2);
+ EXPECT_CALL(*this, OnStateUpdate(VIDEO_CAPTURE_STATE_STOPPED)).Times(2);
+
+ Init();
+ StartCapture(0, params_small_);
+ StopCapture(0);
+ StartCapture(1, params_small_);
+ StopCapture(1);
+ DeInit();
+}
- EXPECT_CALL(*client, OnStarted(_))
- .WillOnce(Return());
+TEST_F(VideoCaptureImplTest, LargeAndSmall) {
+ EXPECT_CALL(*this, OnStateUpdate(VIDEO_CAPTURE_STATE_STARTED)).Times(2);
+ EXPECT_CALL(*this, OnStateUpdate(VIDEO_CAPTURE_STATE_STOPPED)).Times(2);
+
+ Init();
+ StartCapture(0, params_large_);
+ StopCapture(0);
+ StartCapture(1, params_small_);
+ StopCapture(1);
+ DeInit();
+}
- video_capture_impl_->StartCapture(client.get(), params_small_);
- message_loop_->RunUntilIdle();
+TEST_F(VideoCaptureImplTest, SmallAndLarge) {
+ EXPECT_CALL(*this, OnStateUpdate(VIDEO_CAPTURE_STATE_STARTED)).Times(2);
+ EXPECT_CALL(*this, OnStateUpdate(VIDEO_CAPTURE_STATE_STOPPED)).Times(2);
+
+ Init();
+ StartCapture(0, params_small_);
+ StopCapture(0);
+ StartCapture(1, params_large_);
+ StopCapture(1);
+ DeInit();
+}
- EXPECT_CALL(*client, OnStopped(_))
- .WillOnce(Return());
- EXPECT_CALL(*client, OnRemoved(_))
- .WillOnce(Return());
+// Check that a request to GetDeviceSupportedFormats() ends up eventually in the
+// provided callback.
+TEST_F(VideoCaptureImplTest, GetDeviceFormats) {
+ EXPECT_CALL(*this, OnDeviceSupportedFormats(_));
- video_capture_impl_->StopCapture(client.get());
- message_loop_->RunUntilIdle();
+ Init();
+ GetDeviceSupportedFormats();
+ DeInit();
+}
- EXPECT_CALL(*client, OnStarted(_))
- .WillOnce(Return());
+// Check that two requests to GetDeviceSupportedFormats() end up eventually
+// calling the provided callbacks.
+TEST_F(VideoCaptureImplTest, TwoClientsGetDeviceFormats) {
+ EXPECT_CALL(*this, OnDeviceSupportedFormats(_)).Times(2);
- video_capture_impl_->StartCapture(client.get(), params_small_);
- message_loop_->RunUntilIdle();
+ Init();
+ GetDeviceSupportedFormats();
+ GetDeviceSupportedFormats();
+ DeInit();
+}
- EXPECT_CALL(*client, OnStopped(_))
- .WillOnce(Return());
- EXPECT_CALL(*client, OnRemoved(_))
- .WillOnce(Return());
+// Check that a request to GetDeviceFormatsInUse() ends up eventually in the
+// provided callback.
+TEST_F(VideoCaptureImplTest, GetDeviceFormatsInUse) {
+ EXPECT_CALL(*this, OnDeviceFormatsInUse(_));
- video_capture_impl_->StopCapture(client.get());
- message_loop_->RunUntilIdle();
+ Init();
+ GetDeviceFormatsInUse();
+ DeInit();
}
-TEST_F(VideoCaptureImplTest, LargeAndSmall) {
- // Execute SetCapture() and StopCapture() for 2 clients simultaneously.
- // The large client starts first and stops first.
- scoped_ptr<MockVideoCaptureClient> client_small(new MockVideoCaptureClient);
- scoped_ptr<MockVideoCaptureClient> client_large(new MockVideoCaptureClient);
-
- EXPECT_CALL(*client_large, OnStarted(_))
- .WillOnce(Return());
- EXPECT_CALL(*client_small, OnStarted(_))
- .WillOnce(Return());
-
- video_capture_impl_->StartCapture(client_large.get(), params_large_);
- video_capture_impl_->StartCapture(client_small.get(), params_small_);
- message_loop_->RunUntilIdle();
-
- EXPECT_CALL(*client_large, OnStopped(_))
- .WillOnce(Return());
- EXPECT_CALL(*client_large, OnRemoved(_))
- .WillOnce(Return());
- EXPECT_CALL(*client_small, OnStopped(_))
- .WillOnce(Return());
- EXPECT_CALL(*client_small, OnRemoved(_))
- .WillOnce(Return());
-
- video_capture_impl_->StopCapture(client_large.get());
- video_capture_impl_->StopCapture(client_small.get());
- message_loop_->RunUntilIdle();
+TEST_F(VideoCaptureImplTest, AlreadyStarted) {
+ EXPECT_CALL(*this, OnStateUpdate(VIDEO_CAPTURE_STATE_STARTED)).Times(2);
+ EXPECT_CALL(*this, OnStateUpdate(VIDEO_CAPTURE_STATE_STOPPED)).Times(2);
+
+ Init();
+ StartCapture(0, params_small_);
+ StartCapture(1, params_large_);
+ StopCapture(0);
+ StopCapture(1);
+ DeInit();
+ DCHECK(video_capture_impl_->capture_params().requested_format
+ .frame_size ==
+ params_small_.requested_format.frame_size);
}
-TEST_F(VideoCaptureImplTest, SmallAndLarge) {
- // Execute SetCapture() and StopCapture() for 2 clients simultaneously.
- // The small client starts first and stops first.
- scoped_ptr<MockVideoCaptureClient> client_small(new MockVideoCaptureClient);
- scoped_ptr<MockVideoCaptureClient> client_large(new MockVideoCaptureClient);
-
- EXPECT_CALL(*client_large, OnStarted(_))
- .WillOnce(Return());
- EXPECT_CALL(*client_small, OnStarted(_))
- .WillOnce(Return());
-
- video_capture_impl_->StartCapture(client_small.get(), params_small_);
- video_capture_impl_->StartCapture(client_large.get(), params_large_);
- message_loop_->RunUntilIdle();
-
- EXPECT_CALL(*client_large, OnStopped(_))
- .WillOnce(Return());
- EXPECT_CALL(*client_large, OnRemoved(_))
- .WillOnce(Return());
- EXPECT_CALL(*client_small, OnStopped(_))
- .WillOnce(Return());
- EXPECT_CALL(*client_small, OnRemoved(_))
- .WillOnce(Return());
-
- video_capture_impl_->StopCapture(client_small.get());
- video_capture_impl_->StopCapture(client_large.get());
- message_loop_->RunUntilIdle();
+TEST_F(VideoCaptureImplTest, EndedBeforeStop) {
+ EXPECT_CALL(*this, OnStateUpdate(VIDEO_CAPTURE_STATE_STARTED));
+ EXPECT_CALL(*this, OnStateUpdate(VIDEO_CAPTURE_STATE_STOPPED));
+
+ Init();
+ StartCapture(0, params_small_);
+
+ // Receive state change message from browser.
+ video_capture_impl_->ReceiveStateChangeMessage(VIDEO_CAPTURE_STATE_ENDED);
+
+ StopCapture(0);
+ DeInit();
}
-TEST_F(VideoCaptureImplTest, TwoClientsWithSameSize) {
- // Execute SetCapture() and StopCapture() for 2 clients simultaneously.
- // The client1 starts first and stops first.
- scoped_ptr<MockVideoCaptureClient> client1(new MockVideoCaptureClient);
- scoped_ptr<MockVideoCaptureClient> client2(new MockVideoCaptureClient);
-
- EXPECT_CALL(*client1, OnStarted(_))
- .WillOnce(Return());
- EXPECT_CALL(*client2, OnStarted(_))
- .WillOnce(Return());
-
- video_capture_impl_->StartCapture(client1.get(), params_small_);
- video_capture_impl_->StartCapture(client2.get(), params_small_);
- message_loop_->RunUntilIdle();
-
- EXPECT_CALL(*client1, OnStopped(_))
- .WillOnce(Return());
- EXPECT_CALL(*client1, OnRemoved(_))
- .WillOnce(Return());
- EXPECT_CALL(*client2, OnStopped(_))
- .WillOnce(Return());
- EXPECT_CALL(*client2, OnRemoved(_))
- .WillOnce(Return());
-
- video_capture_impl_->StopCapture(client1.get());
- video_capture_impl_->StopCapture(client2.get());
- message_loop_->RunUntilIdle();
+TEST_F(VideoCaptureImplTest, ErrorBeforeStop) {
+ EXPECT_CALL(*this, OnStateUpdate(VIDEO_CAPTURE_STATE_STARTED));
+ EXPECT_CALL(*this, OnStateUpdate(VIDEO_CAPTURE_STATE_ERROR));
+
+ Init();
+ StartCapture(0, params_small_);
+
+ // Receive state change message from browser.
+ video_capture_impl_->ReceiveStateChangeMessage(VIDEO_CAPTURE_STATE_ERROR);
+
+ StopCapture(0);
+ DeInit();
}
} // namespace content
diff --git a/chromium/content/renderer/media/video_capture_message_filter.cc b/chromium/content/renderer/media/video_capture_message_filter.cc
index 52847d91744..50345260b18 100644
--- a/chromium/content/renderer/media/video_capture_message_filter.cc
+++ b/chromium/content/renderer/media/video_capture_message_filter.cc
@@ -6,12 +6,13 @@
#include "content/common/media/video_capture_messages.h"
#include "content/common/view_messages.h"
+#include "ipc/ipc_sender.h"
namespace content {
VideoCaptureMessageFilter::VideoCaptureMessageFilter()
: last_device_id_(0),
- channel_(NULL) {
+ sender_(NULL) {
}
void VideoCaptureMessageFilter::AddDelegate(Delegate* delegate) {
@@ -20,7 +21,7 @@ void VideoCaptureMessageFilter::AddDelegate(Delegate* delegate) {
while (delegates_.find(last_device_id_) != delegates_.end())
last_device_id_++;
- if (channel_) {
+ if (sender_) {
delegates_[last_device_id_] = delegate;
delegate->OnDelegateAdded(last_device_id_);
} else {
@@ -46,29 +47,35 @@ void VideoCaptureMessageFilter::RemoveDelegate(Delegate* delegate) {
}
bool VideoCaptureMessageFilter::Send(IPC::Message* message) {
- if (!channel_) {
+ if (!sender_) {
delete message;
return false;
}
- return channel_->Send(message);
+ return sender_->Send(message);
}
bool VideoCaptureMessageFilter::OnMessageReceived(const IPC::Message& message) {
bool handled = true;
IPC_BEGIN_MESSAGE_MAP(VideoCaptureMessageFilter, message)
IPC_MESSAGE_HANDLER(VideoCaptureMsg_BufferReady, OnBufferReceived)
+ IPC_MESSAGE_HANDLER(VideoCaptureMsg_MailboxBufferReady,
+ OnMailboxBufferReceived)
IPC_MESSAGE_HANDLER(VideoCaptureMsg_StateChanged, OnDeviceStateChanged)
IPC_MESSAGE_HANDLER(VideoCaptureMsg_NewBuffer, OnBufferCreated)
IPC_MESSAGE_HANDLER(VideoCaptureMsg_FreeBuffer, OnBufferDestroyed)
+ IPC_MESSAGE_HANDLER(VideoCaptureMsg_DeviceSupportedFormatsEnumerated,
+ OnDeviceSupportedFormatsEnumerated)
+ IPC_MESSAGE_HANDLER(VideoCaptureMsg_DeviceFormatsInUseReceived,
+ OnDeviceFormatsInUseReceived)
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
return handled;
}
-void VideoCaptureMessageFilter::OnFilterAdded(IPC::Channel* channel) {
+void VideoCaptureMessageFilter::OnFilterAdded(IPC::Sender* sender) {
DVLOG(1) << "VideoCaptureMessageFilter::OnFilterAdded()";
- channel_ = channel;
+ sender_ = sender;
for (Delegates::iterator it = pending_delegates_.begin();
it != pending_delegates_.end(); it++) {
@@ -79,11 +86,11 @@ void VideoCaptureMessageFilter::OnFilterAdded(IPC::Channel* channel) {
}
void VideoCaptureMessageFilter::OnFilterRemoved() {
- channel_ = NULL;
+ sender_ = NULL;
}
void VideoCaptureMessageFilter::OnChannelClosing() {
- channel_ = NULL;
+ sender_ = NULL;
}
VideoCaptureMessageFilter::~VideoCaptureMessageFilter() {}
@@ -101,13 +108,14 @@ void VideoCaptureMessageFilter::OnBufferCreated(
int buffer_id) {
Delegate* delegate = find_delegate(device_id);
if (!delegate) {
- DLOG(WARNING) << "OnBufferCreated: Got video frame buffer for a "
- "non-existent or removed video capture.";
+ DLOG(WARNING) << "OnBufferCreated: Got video SHM buffer for a "
+ "non-existent or removed video capture.";
// Send the buffer back to Host in case it's waiting for all buffers
// to be returned.
base::SharedMemory::CloseHandle(handle);
- Send(new VideoCaptureHostMsg_BufferReady(device_id, buffer_id));
+ Send(new VideoCaptureHostMsg_BufferReady(
+ device_id, buffer_id, std::vector<uint32>()));
return;
}
@@ -117,20 +125,44 @@ void VideoCaptureMessageFilter::OnBufferCreated(
void VideoCaptureMessageFilter::OnBufferReceived(
int device_id,
int buffer_id,
- base::Time timestamp,
- const media::VideoCaptureFormat& format) {
+ const media::VideoCaptureFormat& format,
+ base::TimeTicks timestamp) {
Delegate* delegate = find_delegate(device_id);
if (!delegate) {
- DLOG(WARNING) << "OnBufferReceived: Got video frame buffer for a "
- "non-existent or removed video capture.";
+ DLOG(WARNING) << "OnBufferReceived: Got video SHM buffer for a "
+ "non-existent or removed video capture.";
+
+ // Send the buffer back to Host in case it's waiting for all buffers
+ // to be returned.
+ Send(new VideoCaptureHostMsg_BufferReady(
+ device_id, buffer_id, std::vector<uint32>()));
+ return;
+ }
+
+ delegate->OnBufferReceived(buffer_id, format, timestamp);
+}
+
+void VideoCaptureMessageFilter::OnMailboxBufferReceived(
+ int device_id,
+ int buffer_id,
+ const gpu::MailboxHolder& mailbox_holder,
+ const media::VideoCaptureFormat& format,
+ base::TimeTicks timestamp) {
+ Delegate* delegate = find_delegate(device_id);
+
+ if (!delegate) {
+ DLOG(WARNING) << "OnMailboxBufferReceived: Got video mailbox buffer for a "
+ "non-existent or removed video capture.";
// Send the buffer back to Host in case it's waiting for all buffers
// to be returned.
- Send(new VideoCaptureHostMsg_BufferReady(device_id, buffer_id));
+ Send(new VideoCaptureHostMsg_BufferReady(
+ device_id, buffer_id, std::vector<uint32>()));
return;
}
- delegate->OnBufferReceived(buffer_id, timestamp, format);
+ delegate->OnMailboxBufferReceived(
+ buffer_id, mailbox_holder, format, timestamp);
}
void VideoCaptureMessageFilter::OnBufferDestroyed(
@@ -158,4 +190,26 @@ void VideoCaptureMessageFilter::OnDeviceStateChanged(
delegate->OnStateChanged(state);
}
+void VideoCaptureMessageFilter::OnDeviceSupportedFormatsEnumerated(
+ int device_id,
+ const media::VideoCaptureFormats& supported_formats) {
+ Delegate* delegate = find_delegate(device_id);
+ if (!delegate) {
+ DLOG(WARNING) << "OnDeviceFormatsEnumerated: unknown device";
+ return;
+ }
+ delegate->OnDeviceSupportedFormatsEnumerated(supported_formats);
+}
+
+void VideoCaptureMessageFilter::OnDeviceFormatsInUseReceived(
+ int device_id,
+ const media::VideoCaptureFormats& formats_in_use) {
+ Delegate* delegate = find_delegate(device_id);
+ if (!delegate) {
+ DLOG(WARNING) << "OnDeviceFormatInUse: unknown device";
+ return;
+ }
+ delegate->OnDeviceFormatsInUseReceived(formats_in_use);
+}
+
} // namespace content
diff --git a/chromium/content/renderer/media/video_capture_message_filter.h b/chromium/content/renderer/media/video_capture_message_filter.h
index 024c1bd91b3..b6faa03fa97 100644
--- a/chromium/content/renderer/media/video_capture_message_filter.h
+++ b/chromium/content/renderer/media/video_capture_message_filter.h
@@ -15,13 +15,16 @@
#include "base/memory/shared_memory.h"
#include "content/common/content_export.h"
#include "content/common/media/video_capture.h"
-#include "ipc/ipc_channel_proxy.h"
-#include "media/video/capture/video_capture.h"
+#include "ipc/message_filter.h"
+#include "media/video/capture/video_capture_types.h"
+
+namespace gpu {
+struct MailboxHolder;
+} // namespace gpu
namespace content {
-class CONTENT_EXPORT VideoCaptureMessageFilter
- : public IPC::ChannelProxy::MessageFilter {
+class CONTENT_EXPORT VideoCaptureMessageFilter : public IPC::MessageFilter {
public:
class CONTENT_EXPORT Delegate {
public:
@@ -34,13 +37,28 @@ class CONTENT_EXPORT VideoCaptureMessageFilter
// Called when a video frame buffer is received from the browser process.
virtual void OnBufferReceived(int buffer_id,
- base::Time timestamp,
- const media::VideoCaptureFormat& format) = 0;
+ const media::VideoCaptureFormat& format,
+ base::TimeTicks timestamp) = 0;
+
+ // Called when a video mailbox buffer is received from the browser process.
+ virtual void OnMailboxBufferReceived(
+ int buffer_id,
+ const gpu::MailboxHolder& mailbox_holder,
+ const media::VideoCaptureFormat& format,
+ base::TimeTicks timestamp) = 0;
// Called when state of a video capture device has changed in the browser
// process.
virtual void OnStateChanged(VideoCaptureState state) = 0;
+ // Called upon reception of device's supported formats back from browser.
+ virtual void OnDeviceSupportedFormatsEnumerated(
+ const media::VideoCaptureFormats& supported_formats) = 0;
+
+ // Called upon reception of format(s) in use by a device back from browser.
+ virtual void OnDeviceFormatsInUseReceived(
+ const media::VideoCaptureFormats& formats_in_use) = 0;
+
// Called when the delegate has been added to filter's delegate list.
// |device_id| is the device id for the delegate.
virtual void OnDelegateAdded(int32 device_id) = 0;
@@ -60,9 +78,9 @@ class CONTENT_EXPORT VideoCaptureMessageFilter
// Send a message asynchronously.
virtual bool Send(IPC::Message* message);
- // IPC::ChannelProxy::MessageFilter override. Called on IO thread.
+ // IPC::MessageFilter override. Called on IO thread.
virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE;
- virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE;
+ virtual void OnFilterAdded(IPC::Sender* sender) OVERRIDE;
virtual void OnFilterRemoved() OVERRIDE;
virtual void OnChannelClosing() OVERRIDE;
@@ -85,12 +103,29 @@ class CONTENT_EXPORT VideoCaptureMessageFilter
// Receive a filled buffer from browser process.
void OnBufferReceived(int device_id,
int buffer_id,
- base::Time timestamp,
- const media::VideoCaptureFormat& format);
+ const media::VideoCaptureFormat& format,
+ base::TimeTicks timestamp);
+
+ // Receive a filled texture mailbox buffer from browser process.
+ void OnMailboxBufferReceived(int device_id,
+ int buffer_id,
+ const gpu::MailboxHolder& mailbox_holder,
+ const media::VideoCaptureFormat& format,
+ base::TimeTicks timestamp);
// State of browser process' video capture device has changed.
void OnDeviceStateChanged(int device_id, VideoCaptureState state);
+ // Receive a device's supported formats back from browser process.
+ void OnDeviceSupportedFormatsEnumerated(
+ int device_id,
+ const media::VideoCaptureFormats& supported_formats);
+
+ // Receive the formats in-use by a device back from browser process.
+ void OnDeviceFormatsInUseReceived(
+ int device_id,
+ const media::VideoCaptureFormats& formats_in_use);
+
// Finds the delegate associated with |device_id|, NULL if not found.
Delegate* find_delegate(int device_id) const;
@@ -99,7 +134,7 @@ class CONTENT_EXPORT VideoCaptureMessageFilter
Delegates pending_delegates_;
int32 last_device_id_;
- IPC::Channel* channel_;
+ IPC::Sender* sender_;
DISALLOW_COPY_AND_ASSIGN(VideoCaptureMessageFilter);
};
diff --git a/chromium/content/renderer/media/video_capture_message_filter_unittest.cc b/chromium/content/renderer/media/video_capture_message_filter_unittest.cc
index 366bcb03041..742de87e14f 100644
--- a/chromium/content/renderer/media/video_capture_message_filter_unittest.cc
+++ b/chromium/content/renderer/media/video_capture_message_filter_unittest.cc
@@ -29,10 +29,20 @@ class MockVideoCaptureDelegate : public VideoCaptureMessageFilter::Delegate {
int length,
int buffer_id));
MOCK_METHOD1(OnBufferDestroyed, void(int buffer_id));
- MOCK_METHOD3(OnBufferReceived, void(int buffer_id,
- base::Time timestamp,
- const media::VideoCaptureFormat& format));
+ MOCK_METHOD3(OnBufferReceived,
+ void(int buffer_id,
+ const media::VideoCaptureFormat& format,
+ base::TimeTicks timestamp));
+ MOCK_METHOD4(OnMailboxBufferReceived,
+ void(int buffer_id,
+ const gpu::MailboxHolder& mailbox_holder,
+ const media::VideoCaptureFormat& format,
+ base::TimeTicks timestamp));
MOCK_METHOD1(OnStateChanged, void(VideoCaptureState state));
+ MOCK_METHOD1(OnDeviceSupportedFormatsEnumerated,
+ void(const media::VideoCaptureFormats& formats));
+ MOCK_METHOD1(OnDeviceFormatsInUseReceived,
+ void(const media::VideoCaptureFormats& formats_in_use));
virtual void OnDelegateAdded(int32 device_id) OVERRIDE {
ASSERT_TRUE(device_id != 0);
@@ -79,19 +89,49 @@ TEST(VideoCaptureMessageFilterTest, Basic) {
// VideoCaptureMsg_BufferReady
int buffer_id = 22;
- base::Time timestamp = base::Time::FromInternalValue(1);
+ base::TimeTicks timestamp = base::TimeTicks::FromInternalValue(1);
- media::VideoCaptureFormat format(
+ const media::VideoCaptureFormat shm_format(
gfx::Size(234, 512), 30, media::PIXEL_FORMAT_I420);
media::VideoCaptureFormat saved_format;
- EXPECT_CALL(delegate, OnBufferReceived(buffer_id, timestamp, _))
- .WillRepeatedly(SaveArg<2>(&saved_format));
+ EXPECT_CALL(delegate, OnBufferReceived(buffer_id, _, timestamp))
+ .WillRepeatedly(SaveArg<1>(&saved_format));
filter->OnMessageReceived(VideoCaptureMsg_BufferReady(
- delegate.device_id(), buffer_id, timestamp, format));
+ delegate.device_id(), buffer_id, shm_format, timestamp));
Mock::VerifyAndClearExpectations(&delegate);
- EXPECT_EQ(234, saved_format.frame_size.width());
- EXPECT_EQ(512, saved_format.frame_size.height());
- EXPECT_EQ(30, saved_format.frame_rate);
+ EXPECT_EQ(shm_format.frame_size, saved_format.frame_size);
+ EXPECT_EQ(shm_format.frame_rate, saved_format.frame_rate);
+ EXPECT_EQ(shm_format.pixel_format, saved_format.pixel_format);
+
+ // VideoCaptureMsg_MailboxBufferReady
+ buffer_id = 33;
+ timestamp = base::TimeTicks::FromInternalValue(2);
+
+ const media::VideoCaptureFormat mailbox_format(
+ gfx::Size(234, 512), 30, media::PIXEL_FORMAT_TEXTURE);
+ gpu::Mailbox mailbox;
+ const int8 mailbox_name[arraysize(mailbox.name)] = "TEST MAILBOX";
+ mailbox.SetName(mailbox_name);
+ unsigned int syncpoint = 44;
+ gpu::MailboxHolder saved_mailbox_holder;
+ EXPECT_CALL(delegate, OnMailboxBufferReceived(buffer_id, _, _, timestamp))
+ .WillRepeatedly(
+ DoAll(SaveArg<1>(&saved_mailbox_holder), SaveArg<2>(&saved_format)));
+ gpu::MailboxHolder mailbox_holder(mailbox, 0, syncpoint);
+ filter->OnMessageReceived(
+ VideoCaptureMsg_MailboxBufferReady(delegate.device_id(),
+ buffer_id,
+ mailbox_holder,
+ mailbox_format,
+ timestamp));
+ Mock::VerifyAndClearExpectations(&delegate);
+ EXPECT_EQ(mailbox_format.frame_size, saved_format.frame_size);
+ EXPECT_EQ(mailbox_format.frame_rate, saved_format.frame_rate);
+ EXPECT_EQ(mailbox_format.pixel_format, saved_format.pixel_format);
+ EXPECT_EQ(memcmp(mailbox.name,
+ saved_mailbox_holder.mailbox.name,
+ sizeof(mailbox.name)),
+ 0);
// VideoCaptureMsg_FreeBuffer
EXPECT_CALL(delegate, OnBufferDestroyed(buffer_id));
@@ -140,4 +180,35 @@ TEST(VideoCaptureMessageFilterTest, Delegates) {
VIDEO_CAPTURE_STATE_ENDED));
}
+TEST(VideoCaptureMessageFilterTest, GetSomeDeviceSupportedFormats) {
+ scoped_refptr<VideoCaptureMessageFilter> filter(
+ new VideoCaptureMessageFilter());
+
+ IPC::TestSink channel;
+ filter->OnFilterAdded(&channel);
+ MockVideoCaptureDelegate delegate;
+ filter->AddDelegate(&delegate);
+ ASSERT_EQ(1, delegate.device_id());
+
+ EXPECT_CALL(delegate, OnDeviceSupportedFormatsEnumerated(_));
+ media::VideoCaptureFormats supported_formats;
+ filter->OnMessageReceived(VideoCaptureMsg_DeviceSupportedFormatsEnumerated(
+ delegate.device_id(), supported_formats));
+}
+
+TEST(VideoCaptureMessageFilterTest, GetSomeDeviceFormatInUse) {
+ scoped_refptr<VideoCaptureMessageFilter> filter(
+ new VideoCaptureMessageFilter());
+
+ IPC::TestSink channel;
+ filter->OnFilterAdded(&channel);
+ MockVideoCaptureDelegate delegate;
+ filter->AddDelegate(&delegate);
+ ASSERT_EQ(1, delegate.device_id());
+
+ EXPECT_CALL(delegate, OnDeviceFormatsInUseReceived(_));
+ media::VideoCaptureFormats formats_in_use;
+ filter->OnMessageReceived(VideoCaptureMsg_DeviceFormatsInUseReceived(
+ delegate.device_id(), formats_in_use));
+}
} // namespace content
diff --git a/chromium/content/renderer/media/video_destination_handler.cc b/chromium/content/renderer/media/video_destination_handler.cc
deleted file mode 100644
index 692efeff86f..00000000000
--- a/chromium/content/renderer/media/video_destination_handler.cc
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/renderer/media/video_destination_handler.h"
-
-#include <string>
-
-#include "base/base64.h"
-#include "base/logging.h"
-#include "base/rand_util.h"
-#include "content/renderer/media/media_stream_dependency_factory.h"
-#include "content/renderer/media/media_stream_registry_interface.h"
-#include "content/renderer/pepper/ppb_image_data_impl.h"
-#include "content/renderer/render_thread_impl.h"
-#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
-#include "third_party/WebKit/public/web/WebMediaStreamRegistry.h"
-
-using cricket::CaptureState;
-using cricket::VideoFormat;
-using webrtc::VideoTrackInterface;
-using webrtc::VideoTrackVector;
-
-static const cricket::FourCC kEffectColorFormat = cricket::FOURCC_BGRA;
-
-namespace content {
-
-PpFrameWriter::PpFrameWriter()
- : started_(false) {}
-
-PpFrameWriter::~PpFrameWriter() {}
-
-CaptureState PpFrameWriter::Start(const VideoFormat& capture_format) {
- base::AutoLock auto_lock(lock_);
- if (started_) {
- LOG(ERROR) << "PpFrameWriter::Start - "
- << "Got a StartCapture when already started!";
- return cricket::CS_FAILED;
- }
- started_ = true;
- return cricket::CS_STARTING;
-}
-
-void PpFrameWriter::Stop() {
- base::AutoLock auto_lock(lock_);
- started_ = false;
- SignalStateChange(this, cricket::CS_STOPPED);
-}
-
-bool PpFrameWriter::IsRunning() {
- return started_;
-}
-
-bool PpFrameWriter::GetPreferredFourccs(std::vector<uint32>* fourccs) {
- if (!fourccs) {
- LOG(ERROR) << "PpFrameWriter::GetPreferredFourccs - "
- << "fourccs is NULL.";
- return false;
- }
- // The effects plugin output BGRA.
- fourccs->push_back(kEffectColorFormat);
- return true;
-}
-
-bool PpFrameWriter::GetBestCaptureFormat(const VideoFormat& desired,
- VideoFormat* best_format) {
- if (!best_format) {
- LOG(ERROR) << "PpFrameWriter::GetBestCaptureFormat - "
- << "best_format is NULL.";
- return false;
- }
-
- // Use the desired format as the best format.
- best_format->width = desired.width;
- best_format->height = desired.height;
- best_format->fourcc = kEffectColorFormat;
- best_format->interval = desired.interval;
- return true;
-}
-
-bool PpFrameWriter::IsScreencast() const {
- return false;
-}
-
-void PpFrameWriter::PutFrame(PPB_ImageData_Impl* image_data,
- int64 time_stamp_ns) {
- base::AutoLock auto_lock(lock_);
- // This assumes the handler of the SignalFrameCaptured won't call Start/Stop.
- // TODO(ronghuawu): Avoid the using of lock. One way is to post this call to
- // libjingle worker thread, which will require an extra copy of |image_data|.
- // However if pepper host can hand over the ownership of |image_data|
- // then we can avoid this extra copy.
- if (!started_) {
- LOG(ERROR) << "PpFrameWriter::PutFrame - "
- << "Called when capturer is not started.";
- return;
- }
- if (!image_data) {
- LOG(ERROR) << "PpFrameWriter::PutFrame - Called with NULL image_data.";
- return;
- }
- ImageDataAutoMapper mapper(image_data);
- if (!mapper.is_valid()) {
- LOG(ERROR) << "PpFrameWriter::PutFrame - "
- << "The image could not be mapped and is unusable.";
- return;
- }
- const SkBitmap* bitmap = image_data->GetMappedBitmap();
- if (!bitmap) {
- LOG(ERROR) << "PpFrameWriter::PutFrame - "
- << "The image_data's mapped bitmap is NULL.";
- return;
- }
-
- cricket::CapturedFrame frame;
- frame.elapsed_time = 0;
- frame.time_stamp = time_stamp_ns;
- frame.pixel_height = 1;
- frame.pixel_width = 1;
- frame.width = bitmap->width();
- frame.height = bitmap->height();
- if (image_data->format() == PP_IMAGEDATAFORMAT_BGRA_PREMUL) {
- frame.fourcc = cricket::FOURCC_BGRA;
- } else {
- LOG(ERROR) << "PpFrameWriter::PutFrame - Got RGBA which is not supported.";
- return;
- }
- frame.data_size = bitmap->getSize();
- frame.data = bitmap->getPixels();
-
- // This signals to libJingle that a new VideoFrame is available.
- // libJingle have no assumptions on what thread this signal come from.
- SignalFrameCaptured(this, &frame);
-}
-
-// PpFrameWriterProxy is a helper class to make sure the user won't use
-// PpFrameWriter after it is released (IOW its owner - WebMediaStreamTrack -
-// is released).
-class PpFrameWriterProxy : public FrameWriterInterface {
- public:
- PpFrameWriterProxy(VideoTrackInterface* track,
- PpFrameWriter* writer)
- : track_(track),
- writer_(writer) {
- DCHECK(writer_ != NULL);
- }
-
- virtual ~PpFrameWriterProxy() {}
-
- virtual void PutFrame(PPB_ImageData_Impl* image_data,
- int64 time_stamp_ns) OVERRIDE {
- writer_->PutFrame(image_data, time_stamp_ns);
- }
-
- private:
- scoped_refptr<VideoTrackInterface> track_;
- PpFrameWriter* writer_;
-
- DISALLOW_COPY_AND_ASSIGN(PpFrameWriterProxy);
-};
-
-bool VideoDestinationHandler::Open(
- MediaStreamDependencyFactory* factory,
- MediaStreamRegistryInterface* registry,
- const std::string& url,
- FrameWriterInterface** frame_writer) {
- if (!factory) {
- factory = RenderThreadImpl::current()->GetMediaStreamDependencyFactory();
- DCHECK(factory != NULL);
- }
- blink::WebMediaStream stream;
- if (registry) {
- stream = registry->GetMediaStream(url);
- } else {
- stream =
- blink::WebMediaStreamRegistry::lookupMediaStreamDescriptor(GURL(url));
- }
- if (stream.isNull() || !stream.extraData()) {
- LOG(ERROR) << "VideoDestinationHandler::Open - invalid url: " << url;
- return false;
- }
-
- // Create a new native video track and add it to |stream|.
- std::string track_id;
- // According to spec, a media stream track's id should be globally unique.
- // There's no easy way to strictly achieve that. The id generated with this
- // method should be unique for most of the cases but theoretically it's
- // possible we can get an id that's duplicated with the existing tracks.
- base::Base64Encode(base::RandBytesAsString(64), &track_id);
- PpFrameWriter* writer = new PpFrameWriter();
- if (!factory->AddNativeVideoMediaTrack(track_id, &stream, writer)) {
- delete writer;
- return false;
- }
-
- // Gets a handler to the native video track, which owns the |writer|.
- MediaStreamExtraData* extra_data =
- static_cast<MediaStreamExtraData*>(stream.extraData());
- webrtc::MediaStreamInterface* native_stream = extra_data->stream().get();
- DCHECK(native_stream);
- VideoTrackVector video_tracks = native_stream->GetVideoTracks();
- // Currently one supports one video track per media stream.
- DCHECK(video_tracks.size() == 1);
-
- *frame_writer = new PpFrameWriterProxy(video_tracks[0].get(), writer);
- return true;
-}
-
-} // namespace content
-
diff --git a/chromium/content/renderer/media/video_destination_handler_unittest.cc b/chromium/content/renderer/media/video_destination_handler_unittest.cc
deleted file mode 100644
index a89ad614386..00000000000
--- a/chromium/content/renderer/media/video_destination_handler_unittest.cc
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <string>
-
-#include "base/strings/utf_string_conversions.h"
-#include "content/renderer/media/media_stream_extra_data.h"
-#include "content/renderer/media/mock_media_stream_dependency_factory.h"
-#include "content/renderer/media/mock_media_stream_registry.h"
-#include "content/renderer/media/video_destination_handler.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
-#include "third_party/WebKit/public/platform/WebString.h"
-
-using cricket::CapturedFrame;
-using cricket::CaptureState;
-using cricket::VideoCapturer;
-using cricket::VideoFormat;
-using cricket::VideoFormatPod;
-
-namespace content {
-
-static const std::string kTestStreamUrl = "stream_url";
-static const std::string kUnknownStreamUrl = "unknown_stream_url";
-static const VideoFormatPod kTestFormat = {
- 640, 360, FPS_TO_INTERVAL(30), cricket::FOURCC_ANY
-};
-
-class PpFrameWriterTest
- : public ::testing::Test,
- public sigslot::has_slots<> {
- public:
- PpFrameWriterTest()
- : last_capture_state_(cricket::CS_FAILED),
- captured_frame_count_(0),
- captured_frame_(NULL) {
- writer_.SignalStateChange.connect(this, &PpFrameWriterTest::OnStateChange);
- writer_.SignalFrameCaptured.connect(
- this, &PpFrameWriterTest::OnFrameCaptured);
- }
-
- void OnStateChange(VideoCapturer* capturer, CaptureState state) {
- last_capture_state_ = state;
- }
-
- void OnFrameCaptured(VideoCapturer* capturer, const CapturedFrame* frame) {
- ++captured_frame_count_;
- captured_frame_ = const_cast<CapturedFrame*>(frame);
- }
-
- protected:
- PpFrameWriter writer_;
- CaptureState last_capture_state_;
- int captured_frame_count_;
- CapturedFrame* captured_frame_;
-};
-
-class VideoDestinationHandlerTest : public ::testing::Test {
- public:
- VideoDestinationHandlerTest() : registry_(&factory_) {
- factory_.EnsurePeerConnectionFactory();
- registry_.Init(kTestStreamUrl);
- }
-
- protected:
- MockMediaStreamDependencyFactory factory_;
- MockMediaStreamRegistry registry_;
-};
-
-TEST_F(PpFrameWriterTest, StartStop) {
- EXPECT_FALSE(writer_.IsRunning());
- EXPECT_EQ(cricket::CS_STARTING, writer_.Start(VideoFormat(kTestFormat)));
- EXPECT_TRUE(writer_.IsRunning());
- EXPECT_EQ(cricket::CS_FAILED, writer_.Start(VideoFormat(kTestFormat)));
- writer_.Stop();
- EXPECT_EQ(cricket::CS_STOPPED, last_capture_state_);
-}
-
-TEST_F(PpFrameWriterTest, GetPreferredFourccs) {
- std::vector<uint32> fourccs;
- EXPECT_TRUE(writer_.GetPreferredFourccs(&fourccs));
- EXPECT_EQ(1u, fourccs.size());
- EXPECT_EQ(cricket::FOURCC_BGRA, fourccs[0]);
-}
-
-TEST_F(PpFrameWriterTest, GetBestCaptureFormat) {
- VideoFormat desired(kTestFormat);
- VideoFormat best_format;
- EXPECT_FALSE(writer_.GetBestCaptureFormat(desired, NULL));
- EXPECT_TRUE(writer_.GetBestCaptureFormat(desired, &best_format));
- EXPECT_EQ(cricket::FOURCC_BGRA, best_format.fourcc);
-
- desired.fourcc = best_format.fourcc;
- EXPECT_EQ(desired, best_format);
-}
-
-TEST_F(VideoDestinationHandlerTest, Open) {
- FrameWriterInterface* frame_writer = NULL;
- // Unknow url will return false.
- EXPECT_FALSE(VideoDestinationHandler::Open(&factory_, &registry_,
- kUnknownStreamUrl, &frame_writer));
- EXPECT_TRUE(VideoDestinationHandler::Open(&factory_, &registry_,
- kTestStreamUrl, &frame_writer));
- EXPECT_TRUE(frame_writer);
-
- // Verify the video track has been added.
- const blink::WebMediaStream test_stream = registry_.test_stream();
- blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
- test_stream.videoTracks(video_tracks);
- EXPECT_EQ(1u, video_tracks.size());
-
- // Verify the native video track has been added.
- MediaStreamExtraData* extra_data =
- static_cast<MediaStreamExtraData*>(test_stream.extraData());
- DCHECK(extra_data);
- webrtc::MediaStreamInterface* native_stream = extra_data->stream().get();
- DCHECK(native_stream);
- webrtc::VideoTrackVector native_video_tracks =
- native_stream->GetVideoTracks();
- EXPECT_EQ(1u, native_video_tracks.size());
-
- delete frame_writer;
-}
-
-} // namespace content
diff --git a/chromium/content/renderer/media/video_frame_compositor.cc b/chromium/content/renderer/media/video_frame_compositor.cc
new file mode 100644
index 00000000000..3fa8042f1f2
--- /dev/null
+++ b/chromium/content/renderer/media/video_frame_compositor.cc
@@ -0,0 +1,77 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/video_frame_compositor.h"
+
+#include "media/base/video_frame.h"
+
+namespace content {
+
+static bool IsOpaque(const scoped_refptr<media::VideoFrame>& frame) {
+ switch (frame->format()) {
+ case media::VideoFrame::UNKNOWN:
+ case media::VideoFrame::YV12:
+ case media::VideoFrame::YV12J:
+ case media::VideoFrame::YV16:
+ case media::VideoFrame::I420:
+ case media::VideoFrame::YV24:
+ case media::VideoFrame::NV12:
+ return true;
+
+ case media::VideoFrame::YV12A:
+#if defined(VIDEO_HOLE)
+ case media::VideoFrame::HOLE:
+#endif // defined(VIDEO_HOLE)
+ case media::VideoFrame::NATIVE_TEXTURE:
+ break;
+ }
+ return false;
+}
+
+VideoFrameCompositor::VideoFrameCompositor(
+ const base::Callback<void(gfx::Size)>& natural_size_changed_cb,
+ const base::Callback<void(bool)>& opacity_changed_cb)
+ : natural_size_changed_cb_(natural_size_changed_cb),
+ opacity_changed_cb_(opacity_changed_cb),
+ client_(NULL) {
+}
+
+VideoFrameCompositor::~VideoFrameCompositor() {
+ if (client_)
+ client_->StopUsingProvider();
+}
+
+void VideoFrameCompositor::SetVideoFrameProviderClient(
+ cc::VideoFrameProvider::Client* client) {
+ if (client_)
+ client_->StopUsingProvider();
+ client_ = client;
+}
+
+scoped_refptr<media::VideoFrame> VideoFrameCompositor::GetCurrentFrame() {
+ return current_frame_;
+}
+
+void VideoFrameCompositor::PutCurrentFrame(
+ const scoped_refptr<media::VideoFrame>& frame) {
+}
+
+void VideoFrameCompositor::UpdateCurrentFrame(
+ const scoped_refptr<media::VideoFrame>& frame) {
+ if (current_frame_ &&
+ current_frame_->natural_size() != frame->natural_size()) {
+ natural_size_changed_cb_.Run(frame->natural_size());
+ }
+
+ if (!current_frame_ || IsOpaque(current_frame_) != IsOpaque(frame)) {
+ opacity_changed_cb_.Run(IsOpaque(frame));
+ }
+
+ current_frame_ = frame;
+
+ if (client_)
+ client_->DidReceiveFrame();
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/video_frame_compositor.h b/chromium/content/renderer/media/video_frame_compositor.h
new file mode 100644
index 00000000000..91e5d0cd17e
--- /dev/null
+++ b/chromium/content/renderer/media/video_frame_compositor.h
@@ -0,0 +1,71 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_VIDEO_FRAME_COMPOSITOR_H_
+#define CONTENT_RENDERER_MEDIA_VIDEO_FRAME_COMPOSITOR_H_
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "cc/layers/video_frame_provider.h"
+#include "content/common/content_export.h"
+#include "ui/gfx/size.h"
+
+namespace media {
+class VideoFrame;
+}
+
+namespace content {
+
+// VideoFrameCompositor handles incoming frames by notifying the compositor and
+// dispatching callbacks when detecting changes in video frames.
+//
+// Typical usage is to deliver ready-to-be-displayed video frames to
+// UpdateCurrentFrame() so that VideoFrameCompositor can take care of tracking
+// changes in video frames and firing callbacks as needed.
+//
+// VideoFrameCompositor must live on the same thread as the compositor.
+class CONTENT_EXPORT VideoFrameCompositor
+ : NON_EXPORTED_BASE(public cc::VideoFrameProvider) {
+ public:
+ // |natural_size_changed_cb| is run with the new natural size of the video
+ // frame whenever a change in natural size is detected. It is not called the
+ // first time UpdateCurrentFrame() is called. Run on the same thread as the
+ // caller of UpdateCurrentFrame().
+ //
+ // |opacity_changed_cb| is run when a change in opacity is detected. It *is*
+ // called the first time UpdateCurrentFrame() is called. Run on the same
+ // thread as the caller of UpdateCurrentFrame().
+ //
+ // TODO(scherkus): Investigate the inconsistency between the callbacks with
+ // respect to why we don't call |natural_size_changed_cb| on the first frame.
+ // I suspect it was for historical reasons that no longer make sense.
+ VideoFrameCompositor(
+ const base::Callback<void(gfx::Size)>& natural_size_changed_cb,
+ const base::Callback<void(bool)>& opacity_changed_cb);
+ virtual ~VideoFrameCompositor();
+
+ // cc::VideoFrameProvider implementation.
+ virtual void SetVideoFrameProviderClient(
+ cc::VideoFrameProvider::Client* client) OVERRIDE;
+ virtual scoped_refptr<media::VideoFrame> GetCurrentFrame() OVERRIDE;
+ virtual void PutCurrentFrame(
+ const scoped_refptr<media::VideoFrame>& frame) OVERRIDE;
+
+ // Updates the current frame and notifies the compositor.
+ void UpdateCurrentFrame(const scoped_refptr<media::VideoFrame>& frame);
+
+ private:
+ base::Callback<void(gfx::Size)> natural_size_changed_cb_;
+ base::Callback<void(bool)> opacity_changed_cb_;
+
+ cc::VideoFrameProvider::Client* client_;
+
+ scoped_refptr<media::VideoFrame> current_frame_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoFrameCompositor);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_VIDEO_FRAME_COMPOSITOR_H_
diff --git a/chromium/content/renderer/media/video_frame_compositor_unittest.cc b/chromium/content/renderer/media/video_frame_compositor_unittest.cc
new file mode 100644
index 00000000000..6669825e913
--- /dev/null
+++ b/chromium/content/renderer/media/video_frame_compositor_unittest.cc
@@ -0,0 +1,162 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "cc/layers/video_frame_provider.h"
+#include "content/renderer/media/video_frame_compositor.h"
+#include "media/base/video_frame.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace content {
+
+using media::VideoFrame;
+
+class VideoFrameCompositorTest : public testing::Test,
+ public cc::VideoFrameProvider::Client {
+ public:
+ VideoFrameCompositorTest()
+ : compositor_(new VideoFrameCompositor(
+ base::Bind(&VideoFrameCompositorTest::NaturalSizeChanged,
+ base::Unretained(this)),
+ base::Bind(&VideoFrameCompositorTest::OpacityChanged,
+ base::Unretained(this)))),
+ did_receive_frame_count_(0),
+ natural_size_changed_count_(0),
+ opacity_changed_count_(0),
+ opaque_(false) {
+ compositor_->SetVideoFrameProviderClient(this);
+ }
+
+ virtual ~VideoFrameCompositorTest() {
+ compositor_->SetVideoFrameProviderClient(NULL);
+ }
+
+ VideoFrameCompositor* compositor() { return compositor_.get(); }
+ int did_receive_frame_count() { return did_receive_frame_count_; }
+ int natural_size_changed_count() { return natural_size_changed_count_; }
+ gfx::Size natural_size() { return natural_size_; }
+
+ int opacity_changed_count() { return opacity_changed_count_; }
+ bool opaque() { return opaque_; }
+
+ private:
+ // cc::VideoFrameProvider::Client implementation.
+ virtual void StopUsingProvider() OVERRIDE {}
+ virtual void DidReceiveFrame() OVERRIDE {
+ ++did_receive_frame_count_;
+ }
+ virtual void DidUpdateMatrix(const float* matrix) OVERRIDE {}
+
+ void NaturalSizeChanged(gfx::Size natural_size) {
+ ++natural_size_changed_count_;
+ natural_size_ = natural_size;
+ }
+
+ void OpacityChanged(bool opaque) {
+ ++opacity_changed_count_;
+ opaque_ = opaque;
+ }
+
+ scoped_ptr<VideoFrameCompositor> compositor_;
+ int did_receive_frame_count_;
+ int natural_size_changed_count_;
+ gfx::Size natural_size_;
+ int opacity_changed_count_;
+ bool opaque_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoFrameCompositorTest);
+};
+
+TEST_F(VideoFrameCompositorTest, InitialValues) {
+ EXPECT_FALSE(compositor()->GetCurrentFrame());
+}
+
+TEST_F(VideoFrameCompositorTest, UpdateCurrentFrame) {
+ scoped_refptr<VideoFrame> expected = VideoFrame::CreateEOSFrame();
+
+ // Should notify compositor synchronously.
+ EXPECT_EQ(0, did_receive_frame_count());
+ compositor()->UpdateCurrentFrame(expected);
+ scoped_refptr<VideoFrame> actual = compositor()->GetCurrentFrame();
+ EXPECT_EQ(expected, actual);
+ EXPECT_EQ(1, did_receive_frame_count());
+}
+
+TEST_F(VideoFrameCompositorTest, NaturalSizeChanged) {
+ gfx::Size initial_size(8, 8);
+ scoped_refptr<VideoFrame> initial_frame =
+ VideoFrame::CreateBlackFrame(initial_size);
+
+ gfx::Size larger_size(16, 16);
+ scoped_refptr<VideoFrame> larger_frame =
+ VideoFrame::CreateBlackFrame(larger_size);
+
+ // Initial expectations.
+ EXPECT_EQ(0, natural_size().width());
+ EXPECT_EQ(0, natural_size().height());
+ EXPECT_EQ(0, natural_size_changed_count());
+
+ // Callback isn't fired for the first frame.
+ compositor()->UpdateCurrentFrame(initial_frame);
+ EXPECT_EQ(0, natural_size().width());
+ EXPECT_EQ(0, natural_size().height());
+ EXPECT_EQ(0, natural_size_changed_count());
+
+ // Callback should be fired once.
+ compositor()->UpdateCurrentFrame(larger_frame);
+ EXPECT_EQ(larger_size.width(), natural_size().width());
+ EXPECT_EQ(larger_size.height(), natural_size().height());
+ EXPECT_EQ(1, natural_size_changed_count());
+
+ compositor()->UpdateCurrentFrame(larger_frame);
+ EXPECT_EQ(larger_size.width(), natural_size().width());
+ EXPECT_EQ(larger_size.height(), natural_size().height());
+ EXPECT_EQ(1, natural_size_changed_count());
+
+ // Callback is fired once more when switching back to initial size.
+ compositor()->UpdateCurrentFrame(initial_frame);
+ EXPECT_EQ(initial_size.width(), natural_size().width());
+ EXPECT_EQ(initial_size.height(), natural_size().height());
+ EXPECT_EQ(2, natural_size_changed_count());
+
+ compositor()->UpdateCurrentFrame(initial_frame);
+ EXPECT_EQ(initial_size.width(), natural_size().width());
+ EXPECT_EQ(initial_size, natural_size());
+ EXPECT_EQ(2, natural_size_changed_count());
+}
+
+TEST_F(VideoFrameCompositorTest, OpacityChanged) {
+ gfx::Size size(8, 8);
+ gfx::Rect rect(gfx::Point(0, 0), size);
+ scoped_refptr<VideoFrame> opaque_frame = VideoFrame::CreateFrame(
+ VideoFrame::YV12, size, rect, size, base::TimeDelta());
+ scoped_refptr<VideoFrame> not_opaque_frame = VideoFrame::CreateFrame(
+ VideoFrame::YV12A, size, rect, size, base::TimeDelta());
+
+ // Initial expectations.
+ EXPECT_FALSE(opaque());
+ EXPECT_EQ(0, opacity_changed_count());
+
+ // Callback is fired for the first frame.
+ compositor()->UpdateCurrentFrame(not_opaque_frame);
+ EXPECT_FALSE(opaque());
+ EXPECT_EQ(1, opacity_changed_count());
+
+ // Callback shouldn't be first subsequent times with same opaqueness.
+ compositor()->UpdateCurrentFrame(not_opaque_frame);
+ EXPECT_FALSE(opaque());
+ EXPECT_EQ(1, opacity_changed_count());
+
+ // Callback is fired when using opacity changes.
+ compositor()->UpdateCurrentFrame(opaque_frame);
+ EXPECT_TRUE(opaque());
+ EXPECT_EQ(2, opacity_changed_count());
+
+ // Callback shouldn't be first subsequent times with same opaqueness.
+ compositor()->UpdateCurrentFrame(opaque_frame);
+ EXPECT_TRUE(opaque());
+ EXPECT_EQ(2, opacity_changed_count());
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/video_frame_deliverer.cc b/chromium/content/renderer/media/video_frame_deliverer.cc
new file mode 100644
index 00000000000..05c77496790
--- /dev/null
+++ b/chromium/content/renderer/media/video_frame_deliverer.cc
@@ -0,0 +1,84 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/video_frame_deliverer.h"
+
+#include "base/bind.h"
+#include "base/location.h"
+
+namespace content {
+namespace {
+void ResetCallback(scoped_ptr<VideoCaptureDeliverFrameCB> callback) {
+ // |callback| will be deleted when this exits.
+}
+} // namespace
+
+VideoFrameDeliverer::VideoFrameDeliverer(
+ const scoped_refptr<base::MessageLoopProxy>& io_message_loop)
+ : io_message_loop_(io_message_loop) {
+ DCHECK(io_message_loop_);
+}
+
+VideoFrameDeliverer::~VideoFrameDeliverer() {
+ DCHECK(callbacks_.empty());
+}
+
+void VideoFrameDeliverer::AddCallback(
+ void* id,
+ const VideoCaptureDeliverFrameCB& callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ io_message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoFrameDeliverer::AddCallbackOnIO,
+ this, id, callback));
+}
+
+void VideoFrameDeliverer::AddCallbackOnIO(
+ void* id,
+ const VideoCaptureDeliverFrameCB& callback) {
+ DCHECK(io_message_loop_->BelongsToCurrentThread());
+ callbacks_.push_back(std::make_pair(id, callback));
+}
+
+void VideoFrameDeliverer::RemoveCallback(void* id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ io_message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoFrameDeliverer::RemoveCallbackOnIO,
+ this, id, base::MessageLoopProxy::current()));
+}
+
+void VideoFrameDeliverer::RemoveCallbackOnIO(
+ void* id, const scoped_refptr<base::MessageLoopProxy>& message_loop) {
+ DCHECK(io_message_loop_->BelongsToCurrentThread());
+ std::vector<VideoIdCallbackPair>::iterator it = callbacks_.begin();
+ for (; it != callbacks_.end(); ++it) {
+ if (it->first == id) {
+ // Callback is copied to heap and then deleted on the target thread.
+ // The following code ensures that the callback is not referenced on
+ // the stack.
+ scoped_ptr<VideoCaptureDeliverFrameCB> callback;
+ {
+ callback.reset(new VideoCaptureDeliverFrameCB(it->second));
+ callbacks_.erase(it);
+ }
+ message_loop->PostTask(
+ FROM_HERE, base::Bind(&ResetCallback, base::Passed(&callback)));
+ return;
+ }
+ }
+}
+
+void VideoFrameDeliverer::DeliverFrameOnIO(
+ const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format,
+ const base::TimeTicks& estimated_capture_time) {
+ DCHECK(io_message_loop_->BelongsToCurrentThread());
+ for (std::vector<VideoIdCallbackPair>::iterator it = callbacks_.begin();
+ it != callbacks_.end(); ++it) {
+ it->second.Run(frame, format, estimated_capture_time);
+ }
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/video_frame_deliverer.h b/chromium/content/renderer/media/video_frame_deliverer.h
new file mode 100644
index 00000000000..a1f4b9d4f6a
--- /dev/null
+++ b/chromium/content/renderer/media/video_frame_deliverer.h
@@ -0,0 +1,82 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_VIDEO_FRAME_DELIVERER_H_
+#define CONTENT_RENDERER_MEDIA_VIDEO_FRAME_DELIVERER_H_
+
+#include <utility>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/threading/thread_checker.h"
+#include "content/common/media/video_capture.h"
+#include "content/public/renderer/media_stream_video_sink.h"
+#include "media/base/video_frame.h"
+
+namespace content {
+
+// VideoFrameDeliverer is a helper class used for registering
+// VideoCaptureDeliverFrameCB on the main render thread to receive video frames
+// on the IO-thread.
+// Its used by MediaStreamVideoTrack.
+class VideoFrameDeliverer
+ : public base::RefCountedThreadSafe<VideoFrameDeliverer> {
+ public:
+ explicit VideoFrameDeliverer(
+ const scoped_refptr<base::MessageLoopProxy>& io_message_loop);
+
+ // Add |callback| to receive video frames on the IO-thread.
+ // Must be called on the main render thread.
+ void AddCallback(void* id, const VideoCaptureDeliverFrameCB& callback);
+
+ // Removes |callback| associated with |id| from receiving video frames if |id|
+ // has been added. It is ok to call RemoveCallback even if the |id| has not
+ // been added. Note that the added callback will be reset on the main thread.
+ // Must be called on the main render thread.
+ void RemoveCallback(void* id);
+
+ // Triggers all registered callbacks with |frame|, |format| and
+ // |estimated_capture_time| as parameters. Must be called on the IO-thread.
+ virtual void DeliverFrameOnIO(
+ const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format,
+ const base::TimeTicks& estimated_capture_time);
+
+ const scoped_refptr<base::MessageLoopProxy>& io_message_loop() const {
+ return io_message_loop_;
+ }
+
+ protected:
+ void AddCallbackOnIO(void* id, const VideoCaptureDeliverFrameCB& callback);
+
+ // Callback will be removed and then reset on the designated message loop.
+ // It is ok to call RemoveCallback even if |id| has not been added.
+ void RemoveCallbackOnIO(
+ void* id, const scoped_refptr<base::MessageLoopProxy>& message_loop);
+
+ protected:
+ virtual ~VideoFrameDeliverer();
+ const base::ThreadChecker& thread_checker() const {
+ return thread_checker_;
+ }
+
+ private:
+ friend class base::RefCountedThreadSafe<VideoFrameDeliverer>;
+
+ // Used to DCHECK that AddCallback and RemoveCallback are called on the main
+ // render thread.
+ base::ThreadChecker thread_checker_;
+ scoped_refptr<base::MessageLoopProxy> io_message_loop_;
+
+ typedef std::pair<void*, VideoCaptureDeliverFrameCB> VideoIdCallbackPair;
+ std::vector<VideoIdCallbackPair> callbacks_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoFrameDeliverer);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_VIDEO_FRAME_DELIVERER_H_
diff --git a/chromium/content/renderer/media/video_source_handler.cc b/chromium/content/renderer/media/video_source_handler.cc
index 69b83557939..a91ac3b0884 100644
--- a/chromium/content/renderer/media/video_source_handler.cc
+++ b/chromium/content/renderer/media/video_source_handler.cc
@@ -7,50 +7,64 @@
#include <string>
#include "base/logging.h"
-#include "content/renderer/media/media_stream_dependency_factory.h"
+#include "base/memory/weak_ptr.h"
+#include "base/synchronization/lock.h"
+#include "content/public/renderer/media_stream_video_sink.h"
+#include "content/renderer/media/media_stream.h"
#include "content/renderer/media/media_stream_registry_interface.h"
-#include "content/renderer/render_thread_impl.h"
+#include "media/base/bind_to_current_loop.h"
+#include "media/video/capture/video_capture_types.h"
#include "third_party/WebKit/public/platform/WebMediaStream.h"
+#include "third_party/WebKit/public/platform/WebURL.h"
#include "third_party/WebKit/public/web/WebMediaStreamRegistry.h"
-#include "third_party/libjingle/source/talk/media/base/videoframe.h"
-#include "third_party/libjingle/source/talk/media/base/videorenderer.h"
-
-using cricket::VideoFrame;
-using cricket::VideoRenderer;
-using webrtc::VideoSourceInterface;
+#include "url/gurl.h"
namespace content {
-// PpFrameReceiver implements cricket::VideoRenderer so that it can be attached
-// to native video track's video source to receive the captured frame.
+// PpFrameReceiver implements MediaStreamVideoSink so that it can be attached
+// to video track to receive the captured frame.
// It can be attached to a FrameReaderInterface to output the received frame.
-class PpFrameReceiver : public cricket::VideoRenderer {
+class PpFrameReceiver : public MediaStreamVideoSink {
public:
- PpFrameReceiver() : reader_(NULL) {}
+ PpFrameReceiver(blink::WebMediaStreamTrack track)
+ : track_(track),
+ reader_(NULL),
+ weak_factory_(this) {
+ }
+
virtual ~PpFrameReceiver() {}
- // Implements VideoRenderer.
- virtual bool SetSize(int width, int height, int reserved) OVERRIDE {
- return true;
- }
- virtual bool RenderFrame(const cricket::VideoFrame* frame) OVERRIDE {
- base::AutoLock auto_lock(lock_);
- if (reader_) {
- // Make a shallow copy of the frame as the |reader_| may need to queue it.
- // Both frames will share a single reference-counted frame buffer.
- reader_->GotFrame(frame->Copy());
+ void SetReader(FrameReaderInterface* reader) {
+ if (reader) {
+ DCHECK(!reader_);
+ MediaStreamVideoSink::AddToVideoTrack(
+ this,
+ media::BindToCurrentLoop(
+ base::Bind(
+ &PpFrameReceiver::OnVideoFrame,
+ weak_factory_.GetWeakPtr())),
+ track_);
+ } else {
+ DCHECK(reader_);
+ MediaStreamVideoSink::RemoveFromVideoTrack(this, track_);
+ weak_factory_.InvalidateWeakPtrs();
}
- return true;
+ reader_ = reader;
}
- void SetReader(FrameReaderInterface* reader) {
- base::AutoLock auto_lock(lock_);
- reader_ = reader;
+ void OnVideoFrame(
+ const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format,
+ const base::TimeTicks& estimated_capture_time) {
+ if (reader_) {
+ reader_->GotFrame(frame);
+ }
}
private:
+ blink::WebMediaStreamTrack track_;
FrameReaderInterface* reader_;
- base::Lock lock_;
+ base::WeakPtrFactory<PpFrameReceiver> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(PpFrameReceiver);
};
@@ -61,48 +75,37 @@ VideoSourceHandler::VideoSourceHandler(
}
VideoSourceHandler::~VideoSourceHandler() {
- // All the opened readers should have been closed by now.
- DCHECK(reader_to_receiver_.empty());
+ for (SourceInfoMap::iterator it = reader_to_receiver_.begin();
+ it != reader_to_receiver_.end();
+ ++it) {
+ delete it->second;
+ }
}
bool VideoSourceHandler::Open(const std::string& url,
FrameReaderInterface* reader) {
- scoped_refptr<webrtc::VideoSourceInterface> source = GetFirstVideoSource(url);
- if (!source.get()) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ const blink::WebMediaStreamTrack& track = GetFirstVideoTrack(url);
+ if (track.isNull()) {
return false;
}
- PpFrameReceiver* receiver = new PpFrameReceiver();
- receiver->SetReader(reader);
- source->AddSink(receiver);
- reader_to_receiver_[reader] = receiver;
+ reader_to_receiver_[reader] = new SourceInfo(track, reader);
return true;
}
-bool VideoSourceHandler::Close(const std::string& url,
- FrameReaderInterface* reader) {
- scoped_refptr<webrtc::VideoSourceInterface> source = GetFirstVideoSource(url);
- if (!source.get()) {
- LOG(ERROR) << "VideoSourceHandler::Close - Failed to get the video source "
- << "from MediaStream with url: " << url;
- return false;
- }
- PpFrameReceiver* receiver =
- static_cast<PpFrameReceiver*>(GetReceiver(reader));
- if (!receiver) {
- LOG(ERROR) << "VideoSourceHandler::Close - Failed to find receiver that "
- << "is associated with the given reader.";
+bool VideoSourceHandler::Close(FrameReaderInterface* reader) {
+ DCHECK(thread_checker_. CalledOnValidThread());
+ SourceInfoMap::iterator it = reader_to_receiver_.find(reader);
+ if (it == reader_to_receiver_.end()) {
return false;
}
- receiver->SetReader(NULL);
- source->RemoveSink(receiver);
- reader_to_receiver_.erase(reader);
- delete receiver;
+ delete it->second;
+ reader_to_receiver_.erase(it);
return true;
}
-scoped_refptr<VideoSourceInterface> VideoSourceHandler::GetFirstVideoSource(
+blink::WebMediaStreamTrack VideoSourceHandler::GetFirstVideoTrack(
const std::string& url) {
- scoped_refptr<webrtc::VideoSourceInterface> source;
blink::WebMediaStream stream;
if (registry_) {
stream = registry_->GetMediaStream(url);
@@ -110,42 +113,45 @@ scoped_refptr<VideoSourceInterface> VideoSourceHandler::GetFirstVideoSource(
stream =
blink::WebMediaStreamRegistry::lookupMediaStreamDescriptor(GURL(url));
}
- if (stream.isNull() || !stream.extraData()) {
+
+ if (stream.isNull()) {
LOG(ERROR) << "GetFirstVideoSource - invalid url: " << url;
- return source;
+ return blink::WebMediaStreamTrack();
}
// Get the first video track from the stream.
- MediaStreamExtraData* extra_data =
- static_cast<MediaStreamExtraData*>(stream.extraData());
- if (!extra_data) {
- LOG(ERROR) << "GetFirstVideoSource - MediaStreamExtraData is NULL.";
- return source;
- }
- webrtc::MediaStreamInterface* native_stream = extra_data->stream().get();
- if (!native_stream) {
- LOG(ERROR) << "GetFirstVideoSource - native stream is NULL.";
- return source;
- }
- webrtc::VideoTrackVector native_video_tracks =
- native_stream->GetVideoTracks();
- if (native_video_tracks.empty()) {
- LOG(ERROR) << "GetFirstVideoSource - stream has no video track.";
- return source;
+ blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
+ stream.videoTracks(video_tracks);
+ if (video_tracks.isEmpty()) {
+ LOG(ERROR) << "GetFirstVideoSource - non video tracks available."
+ << " url: " << url;
+ return blink::WebMediaStreamTrack();
}
- source = native_video_tracks[0]->GetSource();
- return source;
+
+ return video_tracks[0];
}
-VideoRenderer* VideoSourceHandler::GetReceiver(
- FrameReaderInterface* reader) {
- std::map<FrameReaderInterface*, VideoRenderer*>::iterator it;
- it = reader_to_receiver_.find(reader);
+void VideoSourceHandler::DeliverFrameForTesting(
+ FrameReaderInterface* reader,
+ const scoped_refptr<media::VideoFrame>& frame) {
+ SourceInfoMap::iterator it = reader_to_receiver_.find(reader);
if (it == reader_to_receiver_.end()) {
- return NULL;
+ return;
}
- return it->second;
+ PpFrameReceiver* receiver = it->second->receiver_.get();
+ receiver->OnVideoFrame(frame, media::VideoCaptureFormat(),
+ base::TimeTicks());
}
-} // namespace content
+VideoSourceHandler::SourceInfo::SourceInfo(
+ const blink::WebMediaStreamTrack& blink_track,
+ FrameReaderInterface* reader)
+ : receiver_(new PpFrameReceiver(blink_track)) {
+ receiver_->SetReader(reader);
+}
+VideoSourceHandler::SourceInfo::~SourceInfo() {
+ receiver_->SetReader(NULL);
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/video_source_handler.h b/chromium/content/renderer/media/video_source_handler.h
index 7025d922ccc..ead167a0a5b 100644
--- a/chromium/content/renderer/media/video_source_handler.h
+++ b/chromium/content/renderer/media/video_source_handler.h
@@ -9,33 +9,32 @@
#include <string>
#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/threading/thread_checker.h"
#include "content/common/content_export.h"
-#include "third_party/libjingle/source/talk/app/webrtc/videosourceinterface.h"
-
-namespace cricket {
-class VideoFrame;
-}
+#include "media/base/video_frame.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
namespace content {
-class MediaStreamDependencyFactory;
class MediaStreamRegistryInterface;
+class MediaStreamVideoSink;
+class PpFrameReceiver;
// Interface used by the effects pepper plugin to get captured frame
// from the video track.
class CONTENT_EXPORT FrameReaderInterface {
public:
// Got a new captured frame.
- // The ownership of the |frame| is transfered to the caller. So the caller
- // must delete |frame| when done with it.
- virtual bool GotFrame(cricket::VideoFrame* frame) = 0;
+ virtual bool GotFrame(const scoped_refptr<media::VideoFrame>& frame) = 0;
protected:
virtual ~FrameReaderInterface() {}
};
-// VideoSourceHandler is a glue class between the webrtc MediaStream and
+// VideoSourceHandler is a glue class between MediaStreamVideoTrack and
// the effects pepper plugin host.
class CONTENT_EXPORT VideoSourceHandler {
public:
@@ -47,22 +46,35 @@ class CONTENT_EXPORT VideoSourceHandler {
// the received frames will be delivered via |reader|.
// Returns true on success and false on failure.
bool Open(const std::string& url, FrameReaderInterface* reader);
- // Closes |reader|'s connection with the first video track in
- // the MediaStream specified by |url|, i.e. stops receiving frames from the
- // video track.
+ // Closes |reader|'s connection with the video track, i.e. stops receiving
+ // frames from the video track.
// Returns true on success and false on failure.
- bool Close(const std::string& url, FrameReaderInterface* reader);
-
- // Gets the VideoRenderer associated with |reader|.
- // Made it public only for testing purpose.
- cricket::VideoRenderer* GetReceiver(FrameReaderInterface* reader);
+ bool Close(FrameReaderInterface* reader);
private:
- scoped_refptr<webrtc::VideoSourceInterface> GetFirstVideoSource(
- const std::string& url);
+ FRIEND_TEST_ALL_PREFIXES(VideoSourceHandlerTest, OpenClose);
+
+ struct SourceInfo {
+ SourceInfo(const blink::WebMediaStreamTrack& blink_track,
+ FrameReaderInterface* reader);
+ ~SourceInfo();
+
+ scoped_ptr<PpFrameReceiver> receiver_;
+ };
+
+ typedef std::map<FrameReaderInterface*, SourceInfo*> SourceInfoMap;
+
+ // Deliver VideoFrame to the MediaStreamVideoSink associated with
+ // |reader|. For testing only.
+ void DeliverFrameForTesting(FrameReaderInterface* reader,
+ const scoped_refptr<media::VideoFrame>& frame);
+
+ blink::WebMediaStreamTrack GetFirstVideoTrack(const std::string& url);
MediaStreamRegistryInterface* registry_;
- std::map<FrameReaderInterface*, cricket::VideoRenderer*> reader_to_receiver_;
+ SourceInfoMap reader_to_receiver_;
+
+ base::ThreadChecker thread_checker_;
DISALLOW_COPY_AND_ASSIGN(VideoSourceHandler);
};
@@ -70,4 +82,3 @@ class CONTENT_EXPORT VideoSourceHandler {
} // namespace content
#endif // CONTENT_RENDERER_MEDIA_VIDEO_SOURCE_HANDLER_H_
-
diff --git a/chromium/content/renderer/media/video_source_handler_unittest.cc b/chromium/content/renderer/media/video_source_handler_unittest.cc
index f00a277827b..94dce46a195 100644
--- a/chromium/content/renderer/media/video_source_handler_unittest.cc
+++ b/chromium/content/renderer/media/video_source_handler_unittest.cc
@@ -4,20 +4,19 @@
#include <string>
+#include "base/message_loop/message_loop.h"
#include "base/strings/utf_string_conversions.h"
-#include "content/renderer/media/media_stream_extra_data.h"
+#include "content/child/child_process.h"
+#include "content/common/media/video_capture.h"
+#include "content/public/renderer/media_stream_video_sink.h"
+#include "content/renderer/media/media_stream.h"
#include "content/renderer/media/media_stream_registry_interface.h"
-#include "content/renderer/media/mock_media_stream_dependency_factory.h"
#include "content/renderer/media/mock_media_stream_registry.h"
#include "content/renderer/media/video_source_handler.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
#include "third_party/WebKit/public/platform/WebString.h"
-#include "third_party/libjingle/source/talk/media/base/videorenderer.h"
-#include "third_party/libjingle/source/talk/media/webrtc/webrtcvideoframe.h"
-
-using cricket::VideoFrame;
namespace content {
@@ -27,31 +26,34 @@ static const std::string kUnknownStreamUrl = "unknown_stream_url";
class FakeFrameReader : public FrameReaderInterface {
public:
- virtual bool GotFrame(VideoFrame* frame) OVERRIDE {
- last_frame_.reset(frame);
+ virtual bool GotFrame(
+ const scoped_refptr<media::VideoFrame>& frame) OVERRIDE {
+ last_frame_ = frame;
return true;
}
- const VideoFrame* last_frame() {
+ const media::VideoFrame* last_frame() {
return last_frame_.get();
}
private:
- scoped_ptr<VideoFrame> last_frame_;
+ scoped_refptr<media::VideoFrame> last_frame_;
};
class VideoSourceHandlerTest : public ::testing::Test {
public:
- VideoSourceHandlerTest() : registry_(&dependency_factory_) {
+ VideoSourceHandlerTest()
+ : child_process_(new ChildProcess()),
+ registry_() {
handler_.reset(new VideoSourceHandler(&registry_));
- dependency_factory_.EnsurePeerConnectionFactory();
registry_.Init(kTestStreamUrl);
registry_.AddVideoTrack(kTestVideoTrackId);
}
protected:
+ base::MessageLoop message_loop_;
+ scoped_ptr<ChildProcess> child_process_;
scoped_ptr<VideoSourceHandler> handler_;
- MockMediaStreamDependencyFactory dependency_factory_;
MockMediaStreamRegistry registry_;
};
@@ -60,30 +62,35 @@ TEST_F(VideoSourceHandlerTest, OpenClose) {
// Unknow url will return false.
EXPECT_FALSE(handler_->Open(kUnknownStreamUrl, &reader));
EXPECT_TRUE(handler_->Open(kTestStreamUrl, &reader));
- cricket::WebRtcVideoFrame test_frame;
+
int width = 640;
int height = 360;
- int64 et = 123456;
- int64 ts = 789012;
- test_frame.InitToBlack(width, height, 1, 1, et, ts);
- cricket::VideoRenderer* receiver = handler_->GetReceiver(&reader);
- ASSERT(receiver != NULL);
- receiver->RenderFrame(&test_frame);
-
- const VideoFrame* frame = reader.last_frame();
+ base::TimeDelta ts = base::TimeDelta::FromInternalValue(789012);
+
+ // A new frame is captured.
+ scoped_refptr<media::VideoFrame> captured_frame =
+ media::VideoFrame::CreateBlackFrame(gfx::Size(width, height));
+ captured_frame->set_timestamp(ts);
+
+ // The frame is delivered to VideoSourceHandler.
+ handler_->DeliverFrameForTesting(&reader, captured_frame);
+
+ // Compare |frame| to |captured_frame|.
+ const media::VideoFrame* frame = reader.last_frame();
ASSERT_TRUE(frame != NULL);
+ EXPECT_EQ(width, frame->coded_size().width());
+ EXPECT_EQ(height, frame->coded_size().height());
+ EXPECT_EQ(ts, frame->timestamp());
+ EXPECT_EQ(captured_frame->data(media::VideoFrame::kYPlane),
+ frame->data(media::VideoFrame::kYPlane));
- // Compare |frame| to |test_frame|.
- EXPECT_EQ(test_frame.GetWidth(), frame->GetWidth());
- EXPECT_EQ(test_frame.GetHeight(), frame->GetHeight());
- EXPECT_EQ(test_frame.GetElapsedTime(), frame->GetElapsedTime());
- EXPECT_EQ(test_frame.GetTimeStamp(), frame->GetTimeStamp());
- EXPECT_EQ(test_frame.GetYPlane(), frame->GetYPlane());
- EXPECT_EQ(test_frame.GetUPlane(), frame->GetUPlane());
- EXPECT_EQ(test_frame.GetVPlane(), frame->GetVPlane());
-
- EXPECT_TRUE(handler_->Close(kTestStreamUrl, &reader));
- EXPECT_TRUE(handler_->GetReceiver(&reader) == NULL);
+ EXPECT_FALSE(handler_->Close(NULL));
+ EXPECT_TRUE(handler_->Close(&reader));
+}
+
+TEST_F(VideoSourceHandlerTest, OpenWithoutClose) {
+ FakeFrameReader reader;
+ EXPECT_TRUE(handler_->Open(kTestStreamUrl, &reader));
}
} // namespace content
diff --git a/chromium/content/renderer/media/video_track_adapter.cc b/chromium/content/renderer/media/video_track_adapter.cc
new file mode 100644
index 00000000000..461b16638f6
--- /dev/null
+++ b/chromium/content/renderer/media/video_track_adapter.cc
@@ -0,0 +1,340 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/video_track_adapter.h"
+
+#include <algorithm>
+#include <limits>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/debug/trace_event.h"
+#include "base/location.h"
+#include "media/base/video_util.h"
+
+namespace content {
+
+namespace {
+
+// Empty method used for keeping a reference to the original media::VideoFrame
+// in VideoFrameResolutionAdapter::DeliverFrame if cropping is needed.
+// The reference to |frame| is kept in the closure that calls this method.
+void ReleaseOriginalFrame(
+ const scoped_refptr<media::VideoFrame>& frame) {
+}
+
+void ResetCallbackOnMainRenderThread(
+ scoped_ptr<VideoCaptureDeliverFrameCB> callback) {
+ // |callback| will be deleted when this exits.
+}
+
+} // anonymous namespace
+
+// VideoFrameResolutionAdapter is created on and lives on
+// on the IO-thread. It does the resolution adaptation and delivers frames to
+// all registered tracks on the IO-thread.
+// All method calls must be on the IO-thread.
+class VideoTrackAdapter::VideoFrameResolutionAdapter
+ : public base::RefCountedThreadSafe<VideoFrameResolutionAdapter> {
+ public:
+ VideoFrameResolutionAdapter(
+ scoped_refptr<base::SingleThreadTaskRunner> render_message_loop,
+ int max_width,
+ int max_height,
+ double min_aspect_ratio,
+ double max_aspect_ratio);
+
+ // Add |callback| to receive video frames on the IO-thread.
+ // |callback| will however be released on the main render thread.
+ void AddCallback(const MediaStreamVideoTrack* track,
+ const VideoCaptureDeliverFrameCB& callback);
+
+ // Removes |callback| associated with |track| from receiving video frames if
+ // |track| has been added. It is ok to call RemoveCallback even if the |track|
+ // has not been added. The |callback| is released on the main render thread.
+ void RemoveCallback(const MediaStreamVideoTrack* track);
+
+ void DeliverFrame(const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format,
+ const base::TimeTicks& estimated_capture_time);
+
+ // Returns true if all arguments match with the output of this adapter.
+ bool ConstraintsMatch(int max_width,
+ int max_height,
+ double min_aspect_ratio,
+ double max_aspect_ratio) const;
+
+ bool IsEmpty() const;
+
+ private:
+ virtual ~VideoFrameResolutionAdapter();
+ friend class base::RefCountedThreadSafe<VideoFrameResolutionAdapter>;
+
+ virtual void DoDeliverFrame(
+ const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format,
+ const base::TimeTicks& estimated_capture_time);
+
+ // Bound to the IO-thread.
+ base::ThreadChecker io_thread_checker_;
+
+ // The task runner where we will release VideoCaptureDeliverFrameCB
+ // registered in AddCallback.
+ scoped_refptr<base::SingleThreadTaskRunner> renderer_task_runner_;
+
+ gfx::Size max_frame_size_;
+ double min_aspect_ratio_;
+ double max_aspect_ratio_;
+
+ typedef std::pair<const void*, VideoCaptureDeliverFrameCB>
+ VideoIdCallbackPair;
+ std::vector<VideoIdCallbackPair> callbacks_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoFrameResolutionAdapter);
+};
+
+VideoTrackAdapter::
+VideoFrameResolutionAdapter::VideoFrameResolutionAdapter(
+ scoped_refptr<base::SingleThreadTaskRunner> render_message_loop,
+ int max_width,
+ int max_height,
+ double min_aspect_ratio,
+ double max_aspect_ratio)
+ : renderer_task_runner_(render_message_loop),
+ max_frame_size_(max_width, max_height),
+ min_aspect_ratio_(min_aspect_ratio),
+ max_aspect_ratio_(max_aspect_ratio) {
+ DCHECK(renderer_task_runner_);
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ DCHECK_GE(max_aspect_ratio_, min_aspect_ratio_);
+ CHECK_NE(0, max_aspect_ratio_);
+ DVLOG(3) << "VideoFrameResolutionAdapter("
+ << "{ max_width =" << max_width << "}, "
+ << "{ max_height =" << max_height << "}, "
+ << "{ min_aspect_ratio =" << min_aspect_ratio << "}, "
+ << "{ max_aspect_ratio_ =" << max_aspect_ratio_ << "}) ";
+}
+
+VideoTrackAdapter::
+VideoFrameResolutionAdapter::~VideoFrameResolutionAdapter() {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ DCHECK(callbacks_.empty());
+}
+
+void VideoTrackAdapter::VideoFrameResolutionAdapter::DeliverFrame(
+ const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format,
+ const base::TimeTicks& estimated_capture_time) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ // TODO(perkj): Allow cropping / scaling of textures once
+ // http://crbug/362521 is fixed.
+ if (frame->format() == media::VideoFrame::NATIVE_TEXTURE) {
+ DoDeliverFrame(frame, format, estimated_capture_time);
+ return;
+ }
+ scoped_refptr<media::VideoFrame> video_frame(frame);
+ double input_ratio =
+ static_cast<double>(frame->natural_size().width()) /
+ frame->natural_size().height();
+
+ // If |frame| has larger width or height than requested, or the aspect ratio
+ // does not match the requested, we want to create a wrapped version of this
+ // frame with a size that fulfills the constraints.
+ if (frame->natural_size().width() > max_frame_size_.width() ||
+ frame->natural_size().height() > max_frame_size_.height() ||
+ input_ratio > max_aspect_ratio_ ||
+ input_ratio < min_aspect_ratio_) {
+ int desired_width = std::min(max_frame_size_.width(),
+ frame->natural_size().width());
+ int desired_height = std::min(max_frame_size_.height(),
+ frame->natural_size().height());
+
+ double resulting_ratio =
+ static_cast<double>(desired_width) / desired_height;
+ double requested_ratio = resulting_ratio;
+
+ if (requested_ratio > max_aspect_ratio_)
+ requested_ratio = max_aspect_ratio_;
+ else if (requested_ratio < min_aspect_ratio_)
+ requested_ratio = min_aspect_ratio_;
+
+ if (resulting_ratio < requested_ratio) {
+ desired_height = static_cast<int>((desired_height * resulting_ratio) /
+ requested_ratio);
+ // Make sure we scale to an even height to avoid rounding errors
+ desired_height = (desired_height + 1) & ~1;
+ } else if (resulting_ratio > requested_ratio) {
+ desired_width = static_cast<int>((desired_width * requested_ratio) /
+ resulting_ratio);
+ // Make sure we scale to an even width to avoid rounding errors.
+ desired_width = (desired_width + 1) & ~1;
+ }
+
+ gfx::Size desired_size(desired_width, desired_height);
+
+ // Get the largest centered rectangle with the same aspect ratio of
+ // |desired_size| that fits entirely inside of |frame->visible_rect()|.
+ // This will be the rect we need to crop the original frame to.
+ // From this rect, the original frame can be scaled down to |desired_size|.
+ gfx::Rect region_in_frame =
+ media::ComputeLetterboxRegion(frame->visible_rect(), desired_size);
+
+ video_frame = media::VideoFrame::WrapVideoFrame(
+ frame,
+ region_in_frame,
+ desired_size,
+ base::Bind(&ReleaseOriginalFrame, frame));
+
+ DVLOG(3) << "desired size " << desired_size.ToString()
+ << " output natural size "
+ << video_frame->natural_size().ToString()
+ << " output visible rect "
+ << video_frame->visible_rect().ToString();
+ }
+ DoDeliverFrame(video_frame, format, estimated_capture_time);
+}
+
+void VideoTrackAdapter::
+VideoFrameResolutionAdapter::DoDeliverFrame(
+ const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format,
+ const base::TimeTicks& estimated_capture_time) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ for (std::vector<VideoIdCallbackPair>::const_iterator it = callbacks_.begin();
+ it != callbacks_.end(); ++it) {
+ it->second.Run(frame, format, estimated_capture_time);
+ }
+}
+
+void VideoTrackAdapter::VideoFrameResolutionAdapter::AddCallback(
+ const MediaStreamVideoTrack* track,
+ const VideoCaptureDeliverFrameCB& callback) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ callbacks_.push_back(std::make_pair(track, callback));
+}
+
+void VideoTrackAdapter::VideoFrameResolutionAdapter::RemoveCallback(
+ const MediaStreamVideoTrack* track) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ std::vector<VideoIdCallbackPair>::iterator it = callbacks_.begin();
+ for (; it != callbacks_.end(); ++it) {
+ if (it->first == track) {
+ // Make sure the VideoCaptureDeliverFrameCB is released on the main
+ // render thread since it was added on the main render thread in
+ // VideoTrackAdapter::AddTrack.
+ scoped_ptr<VideoCaptureDeliverFrameCB> callback(
+ new VideoCaptureDeliverFrameCB(it->second));
+ callbacks_.erase(it);
+ renderer_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&ResetCallbackOnMainRenderThread,
+ base::Passed(&callback)));
+
+ return;
+ }
+ }
+}
+
+bool VideoTrackAdapter::VideoFrameResolutionAdapter::ConstraintsMatch(
+ int max_width,
+ int max_height,
+ double min_aspect_ratio,
+ double max_aspect_ratio) const {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ return max_frame_size_.width() == max_width &&
+ max_frame_size_.height() == max_height &&
+ min_aspect_ratio_ == min_aspect_ratio &&
+ max_aspect_ratio_ == max_aspect_ratio;
+}
+
+bool VideoTrackAdapter::VideoFrameResolutionAdapter::IsEmpty() const {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ return callbacks_.empty();
+}
+
+VideoTrackAdapter::VideoTrackAdapter(
+ const scoped_refptr<base::MessageLoopProxy>& io_message_loop)
+ : io_message_loop_(io_message_loop),
+ renderer_task_runner_(base::MessageLoopProxy::current()) {
+ DCHECK(io_message_loop_);
+}
+
+VideoTrackAdapter::~VideoTrackAdapter() {
+ DCHECK(adapters_.empty());
+}
+
+void VideoTrackAdapter::AddTrack(const MediaStreamVideoTrack* track,
+ VideoCaptureDeliverFrameCB frame_callback,
+ int max_width,
+ int max_height,
+ double min_aspect_ratio,
+ double max_aspect_ratio) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ io_message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoTrackAdapter::AddTrackOnIO,
+ this, track, frame_callback, max_width, max_height,
+ min_aspect_ratio, max_aspect_ratio));
+}
+
+void VideoTrackAdapter::AddTrackOnIO(
+ const MediaStreamVideoTrack* track,
+ VideoCaptureDeliverFrameCB frame_callback,
+ int max_width,
+ int max_height,
+ double min_aspect_ratio,
+ double max_aspect_ratio) {
+ DCHECK(io_message_loop_->BelongsToCurrentThread());
+ scoped_refptr<VideoFrameResolutionAdapter> adapter;
+ for (FrameAdapters::const_iterator it = adapters_.begin();
+ it != adapters_.end(); ++it) {
+ if ((*it)->ConstraintsMatch(max_width, max_height, min_aspect_ratio,
+ max_aspect_ratio)) {
+ adapter = it->get();
+ break;
+ }
+ }
+ if (!adapter) {
+ adapter = new VideoFrameResolutionAdapter(renderer_task_runner_,
+ max_width,
+ max_height,
+ min_aspect_ratio,
+ max_aspect_ratio);
+ adapters_.push_back(adapter);
+ }
+
+ adapter->AddCallback(track, frame_callback);
+}
+
+void VideoTrackAdapter::RemoveTrack(const MediaStreamVideoTrack* track) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ io_message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoTrackAdapter::RemoveTrackOnIO, this, track));
+}
+
+void VideoTrackAdapter::RemoveTrackOnIO(const MediaStreamVideoTrack* track) {
+ DCHECK(io_message_loop_->BelongsToCurrentThread());
+ for (FrameAdapters::iterator it = adapters_.begin();
+ it != adapters_.end(); ++it) {
+ (*it)->RemoveCallback(track);
+ if ((*it)->IsEmpty()) {
+ adapters_.erase(it);
+ break;
+ }
+ }
+}
+
+void VideoTrackAdapter::DeliverFrameOnIO(
+ const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format,
+ const base::TimeTicks& estimated_capture_time) {
+ DCHECK(io_message_loop_->BelongsToCurrentThread());
+ TRACE_EVENT0("video", "VideoTrackAdapter::DeliverFrameOnIO");
+ for (FrameAdapters::iterator it = adapters_.begin();
+ it != adapters_.end(); ++it) {
+ (*it)->DeliverFrame(frame, format, estimated_capture_time);
+ }
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/video_track_adapter.h b/chromium/content/renderer/media/video_track_adapter.h
new file mode 100644
index 00000000000..6060708ca51
--- /dev/null
+++ b/chromium/content/renderer/media/video_track_adapter.h
@@ -0,0 +1,90 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_VIDEO_TRACK_ADAPTER_H_
+#define CONTENT_RENDERER_MEDIA_VIDEO_TRACK_ADAPTER_H_
+
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/time/time.h"
+#include "content/renderer/media/media_stream_video_track.h"
+#include "media/base/video_frame.h"
+
+namespace content {
+
+// VideoTrackAdapter is a helper class used by MediaStreamVideoSource used for
+// adapting the video resolution from a source implementation to the resolution
+// a track requires. Different tracks can have different resolution constraints.
+// The constraints can be set as max width and height as well as max and min
+// aspect ratio.
+// Video frames are delivered to a track using a VideoCaptureDeliverFrameCB on
+// the IO-thread.
+// Adaptations is done by wrapping the original media::VideoFrame in a new
+// media::VideoFrame with a new visible_rect and natural_size.
+class VideoTrackAdapter
+ : public base::RefCountedThreadSafe<VideoTrackAdapter> {
+ public:
+ explicit VideoTrackAdapter(
+ const scoped_refptr<base::MessageLoopProxy>& io_message_loop);
+
+ // Register |track| to receive video frames in |frame_callback| with
+ // a resolution within the boundaries of the arguments.
+ // Must be called on the main render thread. |frame_callback| is guaranteed to
+ // be released on the main render thread.
+ void AddTrack(const MediaStreamVideoTrack* track,
+ VideoCaptureDeliverFrameCB frame_callback,
+ int max_width, int max_height,
+ double min_aspect_ratio,
+ double max_aspect_ratio);
+ void RemoveTrack(const MediaStreamVideoTrack* track);
+
+ // Delivers |frame| to all tracks that have registered a callback.
+ // Must be called on the IO-thread.
+ void DeliverFrameOnIO(
+ const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format,
+ const base::TimeTicks& estimated_capture_time);
+
+ const scoped_refptr<base::MessageLoopProxy>& io_message_loop() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return io_message_loop_;
+ }
+
+ private:
+ virtual ~VideoTrackAdapter();
+ friend class base::RefCountedThreadSafe<VideoTrackAdapter>;
+
+ void AddTrackOnIO(
+ const MediaStreamVideoTrack* track,
+ VideoCaptureDeliverFrameCB frame_callback,
+ int max_width, int max_height,
+ double min_aspect_ratio,
+ double max_aspect_ratio);
+ void RemoveTrackOnIO(const MediaStreamVideoTrack* track);
+
+ // |thread_checker_| is bound to the main render thread.
+ base::ThreadChecker thread_checker_;
+
+ scoped_refptr<base::MessageLoopProxy> io_message_loop_;
+
+ // |renderer_task_runner_| is used to ensure that
+ // VideoCaptureDeliverFrameCB is released on the main render thread.
+ scoped_refptr<base::SingleThreadTaskRunner> renderer_task_runner_;
+
+ // VideoFrameResolutionAdapter is an inner class that is created on the main
+ // render thread but operates on the IO-thread. It does the resolution
+ // adaptation and delivers frames to all registered tracks on the IO-thread.
+ class VideoFrameResolutionAdapter;
+ typedef std::vector<scoped_refptr<VideoFrameResolutionAdapter> >
+ FrameAdapters;
+ FrameAdapters adapters_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoTrackAdapter);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_VIDEO_TRACK_ADAPTER_H_
diff --git a/chromium/content/renderer/media/webaudio_capturer_source.cc b/chromium/content/renderer/media/webaudio_capturer_source.cc
index 1f1192a9791..7076c24f329 100644
--- a/chromium/content/renderer/media/webaudio_capturer_source.cc
+++ b/chromium/content/renderer/media/webaudio_capturer_source.cc
@@ -54,6 +54,8 @@ void WebAudioCapturerSource::setFormat(
wrapper_bus_ = AudioBus::CreateWrapper(params_.channels());
capture_bus_ = AudioBus::Create(params_);
+ audio_data_.reset(
+ new int16[params_.frames_per_buffer() * params_.channels()]);
fifo_.reset(new AudioFifo(
params_.channels(),
kMaxNumberOfBuffersInFifo * params_.frames_per_buffer()));
@@ -112,10 +114,20 @@ void WebAudioCapturerSource::consumeAudio(
if (capturer_) {
capturer_->GetAudioProcessingParams(&delay, &volume, &key_pressed);
}
+
+ // Turn off audio processing if the delay value is 0, since in such case,
+ // it indicates the data is not from microphone.
+ // TODO(xians): remove the flag when supporting one APM per audio track.
+ // See crbug/264611 for details.
+ bool need_audio_processing = (delay.InMilliseconds() != 0);
while (fifo_->frames() >= capture_frames) {
fifo_->Consume(capture_bus_.get(), 0, capture_frames);
- track_->Capture(capture_bus_.get(), delay.InMilliseconds(),
- volume, key_pressed);
+ // TODO(xians): Avoid this interleave/deinterleave operation.
+ capture_bus_->ToInterleaved(capture_bus_->frames(),
+ params_.bits_per_sample() / 8,
+ audio_data_.get());
+ track_->Capture(audio_data_.get(), delay, volume, key_pressed,
+ need_audio_processing);
}
}
diff --git a/chromium/content/renderer/media/webaudio_capturer_source.h b/chromium/content/renderer/media/webaudio_capturer_source.h
index 2993531db18..fdd3f9c68ff 100644
--- a/chromium/content/renderer/media/webaudio_capturer_source.h
+++ b/chromium/content/renderer/media/webaudio_capturer_source.h
@@ -81,6 +81,9 @@ class WebAudioCapturerSource
// Handles mismatch between WebAudio buffer size and WebRTC.
scoped_ptr<media::AudioFifo> fifo_;
+ // Buffer to pass audio data to WebRtc.
+ scoped_ptr<int16[]> audio_data_;
+
// Synchronizes HandleCapture() with AudioCapturerSource calls.
base::Lock lock_;
bool started_;
diff --git a/chromium/content/renderer/media/webaudiosourceprovider_impl.cc b/chromium/content/renderer/media/webaudiosourceprovider_impl.cc
index ec1683daae4..4d878ce4ba7 100644
--- a/chromium/content/renderer/media/webaudiosourceprovider_impl.cc
+++ b/chromium/content/renderer/media/webaudiosourceprovider_impl.cc
@@ -9,7 +9,7 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/logging.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
#include "third_party/WebKit/public/platform/WebAudioSourceProviderClient.h"
using blink::WebVector;
@@ -48,15 +48,14 @@ class AutoTryLock {
WebAudioSourceProviderImpl::WebAudioSourceProviderImpl(
const scoped_refptr<media::AudioRendererSink>& sink)
- : weak_this_(this),
- channels_(0),
+ : channels_(0),
sample_rate_(0),
volume_(1.0),
state_(kStopped),
renderer_(NULL),
client_(NULL),
- sink_(sink) {
-}
+ sink_(sink),
+ weak_factory_(this) {}
WebAudioSourceProviderImpl::~WebAudioSourceProviderImpl() {
}
@@ -71,9 +70,8 @@ void WebAudioSourceProviderImpl::setClient(
// The client will now take control by calling provideInput() periodically.
client_ = client;
- set_format_cb_ = media::BindToCurrentLoop(
- base::Bind(&WebAudioSourceProviderImpl::OnSetFormat,
- weak_this_.GetWeakPtr()));
+ set_format_cb_ = media::BindToCurrentLoop(base::Bind(
+ &WebAudioSourceProviderImpl::OnSetFormat, weak_factory_.GetWeakPtr()));
// If |renderer_| is set, then run |set_format_cb_| to send |client_|
// the current format info. If |renderer_| is not set, then |set_format_cb_|
@@ -116,7 +114,9 @@ void WebAudioSourceProviderImpl::provideInput(
DCHECK(renderer_);
DCHECK(client_);
DCHECK_EQ(channels_, bus_wrapper_->channels());
- renderer_->Render(bus_wrapper_.get(), 0);
+ const size_t frames = renderer_->Render(bus_wrapper_.get(), 0);
+ if (frames < number_of_frames)
+ bus_wrapper_->ZeroFramesPartial(frames, number_of_frames - frames);
bus_wrapper_->Scale(volume_);
}
diff --git a/chromium/content/renderer/media/webaudiosourceprovider_impl.h b/chromium/content/renderer/media/webaudiosourceprovider_impl.h
index d1a6fe72322..aca9d830021 100644
--- a/chromium/content/renderer/media/webaudiosourceprovider_impl.h
+++ b/chromium/content/renderer/media/webaudiosourceprovider_impl.h
@@ -56,8 +56,6 @@ class CONTENT_EXPORT WebAudioSourceProviderImpl
// Calls setFormat() on |client_| from the Blink renderer thread.
void OnSetFormat();
- base::WeakPtrFactory<WebAudioSourceProviderImpl> weak_this_;
-
// Closure that posts a task to call OnSetFormat() on the renderer thread.
base::Closure set_format_cb_;
@@ -81,6 +79,9 @@ class CONTENT_EXPORT WebAudioSourceProviderImpl
scoped_refptr<media::AudioRendererSink> sink_;
scoped_ptr<media::AudioBus> bus_wrapper_;
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<WebAudioSourceProviderImpl> weak_factory_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(WebAudioSourceProviderImpl);
};
diff --git a/chromium/content/renderer/media/webaudiosourceprovider_impl_unittest.cc b/chromium/content/renderer/media/webaudiosourceprovider_impl_unittest.cc
index e4786a5a07a..8a59822debc 100644
--- a/chromium/content/renderer/media/webaudiosourceprovider_impl_unittest.cc
+++ b/chromium/content/renderer/media/webaudiosourceprovider_impl_unittest.cc
@@ -212,15 +212,27 @@ TEST_F(WebAudioSourceProviderImplTest, ProvideInput) {
wasp_impl_->provideInput(audio_data, params_.frames_per_buffer());
ASSERT_TRUE(CompareBusses(bus1.get(), bus2.get()));
+ // Ensure if a renderer properly fill silence for partial Render() calls by
+ // configuring the fake callback to return half the data. After these calls
+ // bus1 is full of junk data, and bus2 is partially filled.
+ wasp_impl_->SetVolume(1);
+ fake_callback_.Render(bus1.get(), 0);
+ fake_callback_.reset();
+ fake_callback_.Render(bus2.get(), 0);
+ bus2->ZeroFramesPartial(bus2->frames() / 2,
+ bus2->frames() - bus2->frames() / 2);
+ fake_callback_.reset();
+ fake_callback_.set_half_fill(true);
wasp_impl_->Play();
- // Play should return real audio data again...
+ // Play should return real audio data again, but the last half should be zero.
wasp_impl_->provideInput(audio_data, params_.frames_per_buffer());
- ASSERT_FALSE(CompareBusses(bus1.get(), bus2.get()));
+ ASSERT_TRUE(CompareBusses(bus1.get(), bus2.get()));
// Stop() should return silence.
wasp_impl_->Stop();
bus1->channel(0)[0] = 1;
+ bus2->Zero();
wasp_impl_->provideInput(audio_data, params_.frames_per_buffer());
ASSERT_TRUE(CompareBusses(bus1.get(), bus2.get()));
}
diff --git a/chromium/content/renderer/media/webcontentdecryptionmodule_impl.cc b/chromium/content/renderer/media/webcontentdecryptionmodule_impl.cc
index 14bbc8d5fe9..c1e455e56fe 100644
--- a/chromium/content/renderer/media/webcontentdecryptionmodule_impl.cc
+++ b/chromium/content/renderer/media/webcontentdecryptionmodule_impl.cc
@@ -9,185 +9,76 @@
#include "base/basictypes.h"
#include "base/bind.h"
-#include "base/callback_helpers.h"
#include "base/logging.h"
-#include "base/memory/weak_ptr.h"
#include "base/strings/string_util.h"
-#include "content/renderer/media/crypto/content_decryption_module_factory.h"
+#include "base/strings/utf_string_conversions.h"
+#include "content/renderer/media/cdm_session_adapter.h"
+#include "content/renderer/media/crypto/key_systems.h"
#include "content/renderer/media/webcontentdecryptionmodulesession_impl.h"
#include "media/base/media_keys.h"
+#include "third_party/WebKit/public/platform/WebString.h"
+#include "third_party/WebKit/public/web/WebSecurityOrigin.h"
#include "url/gurl.h"
-namespace content {
-
-// Forwards the session ID-based callbacks of the MediaKeys interface to the
-// appropriate session object.
-class SessionIdAdapter {
- public:
- SessionIdAdapter();
- ~SessionIdAdapter();
-
- // On success, creates a MediaKeys, returns it in |media_keys|, returns true.
- bool Initialize(const std::string& key_system,
- scoped_ptr<media::MediaKeys>* media_keys);
-
- // Generates a unique internal session id.
- uint32 GenerateSessionId();
-
- // Adds a session to the internal map. Does not take ownership of the session.
- void AddSession(uint32 session_id,
- WebContentDecryptionModuleSessionImpl* session);
-
- // Removes a session from the internal map.
- void RemoveSession(uint32 session_id);
-
- private:
- typedef std::map<uint32, WebContentDecryptionModuleSessionImpl*> SessionMap;
-
- // Callbacks for firing session events.
- void OnSessionCreated(uint32 session_id, const std::string& web_session_id);
- void OnSessionMessage(uint32 session_id,
- const std::vector<uint8>& message,
- const std::string& destination_url);
- void OnSessionReady(uint32 session_id);
- void OnSessionClosed(uint32 session_id);
- void OnSessionError(uint32 session_id,
- media::MediaKeys::KeyError error_code,
- int system_code);
-
- // Helper function of the callbacks.
- WebContentDecryptionModuleSessionImpl* GetSession(uint32 session_id);
-
- base::WeakPtrFactory<SessionIdAdapter> weak_ptr_factory_;
-
- SessionMap sessions_;
-
- // Session ID should be unique per renderer process for debugging purposes.
- static uint32 next_session_id_;
-
- DISALLOW_COPY_AND_ASSIGN(SessionIdAdapter);
-};
-
-const uint32 kStartingSessionId = 1;
-uint32 SessionIdAdapter::next_session_id_ = kStartingSessionId;
-COMPILE_ASSERT(kStartingSessionId > media::MediaKeys::kInvalidSessionId,
- invalid_starting_value);
-
-SessionIdAdapter::SessionIdAdapter()
- : weak_ptr_factory_(this) {
-}
-
-SessionIdAdapter::~SessionIdAdapter() {
-}
-
-bool SessionIdAdapter::Initialize(const std::string& key_system,
- scoped_ptr<media::MediaKeys>* media_keys) {
- DCHECK(media_keys);
- DCHECK(!*media_keys);
-
- base::WeakPtr<SessionIdAdapter> weak_this = weak_ptr_factory_.GetWeakPtr();
- scoped_ptr<media::MediaKeys> created_media_keys =
- ContentDecryptionModuleFactory::Create(
- // TODO(ddorwin): Address lower in the stack: http://crbug.com/252065
- "webkit-" + key_system,
#if defined(ENABLE_PEPPER_CDMS)
- // TODO(ddorwin): Support Pepper-based CDMs: http://crbug.com/250049
- NULL,
- NULL,
- base::Closure(),
-#elif defined(OS_ANDROID)
- // TODO(xhwang): Support Android.
- NULL,
- 0,
- // TODO(ddorwin): Get the URL for the frame containing the MediaKeys.
- GURL(),
-#endif // defined(ENABLE_PEPPER_CDMS)
- base::Bind(&SessionIdAdapter::OnSessionCreated, weak_this),
- base::Bind(&SessionIdAdapter::OnSessionMessage, weak_this),
- base::Bind(&SessionIdAdapter::OnSessionReady, weak_this),
- base::Bind(&SessionIdAdapter::OnSessionClosed, weak_this),
- base::Bind(&SessionIdAdapter::OnSessionError, weak_this));
- if (!created_media_keys)
- return false;
-
- *media_keys = created_media_keys.Pass();
- return true;
-}
-
-uint32 SessionIdAdapter::GenerateSessionId() {
- return next_session_id_++;
-}
-
-void SessionIdAdapter::AddSession(
- uint32 session_id,
- WebContentDecryptionModuleSessionImpl* session) {
- DCHECK(sessions_.find(session_id) == sessions_.end());
- sessions_[session_id] = session;
-}
-
-void SessionIdAdapter::RemoveSession(uint32 session_id) {
- DCHECK(sessions_.find(session_id) != sessions_.end());
- sessions_.erase(session_id);
-}
-
-void SessionIdAdapter::OnSessionCreated(uint32 session_id,
- const std::string& web_session_id) {
- GetSession(session_id)->OnSessionCreated(web_session_id);
-}
-
-void SessionIdAdapter::OnSessionMessage(uint32 session_id,
- const std::vector<uint8>& message,
- const std::string& destination_url) {
- GetSession(session_id)->OnSessionMessage(message, destination_url);
-}
-
-void SessionIdAdapter::OnSessionReady(uint32 session_id) {
- GetSession(session_id)->OnSessionReady();
-}
+#include "content/renderer/media/crypto/pepper_cdm_wrapper_impl.h"
+#endif
-void SessionIdAdapter::OnSessionClosed(uint32 session_id) {
- GetSession(session_id)->OnSessionClosed();
-}
-
-void SessionIdAdapter::OnSessionError(uint32 session_id,
- media::MediaKeys::KeyError error_code,
- int system_code) {
- GetSession(session_id)->OnSessionError(error_code, system_code);
-}
-
-WebContentDecryptionModuleSessionImpl* SessionIdAdapter::GetSession(
- uint32 session_id) {
- DCHECK(sessions_.find(session_id) != sessions_.end());
- return sessions_[session_id];
-}
+namespace content {
-//------------------------------------------------------------------------------
+WebContentDecryptionModuleImpl* WebContentDecryptionModuleImpl::Create(
+#if defined(ENABLE_PEPPER_CDMS)
+ blink::WebLocalFrame* frame,
+#elif defined(ENABLE_BROWSER_CDMS)
+ RendererCdmManager* manager,
+#endif
+ const blink::WebSecurityOrigin& security_origin,
+ const base::string16& key_system) {
+#if defined(ENABLE_PEPPER_CDMS)
+ DCHECK(frame);
+#elif defined(ENABLE_BROWSER_CDMS)
+ DCHECK(manager);
+#endif
+ DCHECK(!security_origin.isNull());
+ DCHECK(!key_system.empty());
-WebContentDecryptionModuleImpl*
-WebContentDecryptionModuleImpl::Create(const base::string16& key_system) {
// TODO(ddorwin): Guard against this in supported types check and remove this.
// Chromium only supports ASCII key systems.
- if (!IsStringASCII(key_system)) {
+ if (!base::IsStringASCII(key_system)) {
NOTREACHED();
return NULL;
}
- // SessionIdAdapter creates the MediaKeys so it can provide its callbacks to
- // during creation of the MediaKeys.
- scoped_ptr<media::MediaKeys> media_keys;
- scoped_ptr<SessionIdAdapter> adapter(new SessionIdAdapter());
- if (!adapter->Initialize(UTF16ToASCII(key_system), &media_keys))
+ std::string key_system_ascii = base::UTF16ToASCII(key_system);
+ if (!IsConcreteSupportedKeySystem(key_system_ascii))
return NULL;
- return new WebContentDecryptionModuleImpl(media_keys.Pass(), adapter.Pass());
+ // If unique security origin, don't try to create the CDM.
+ if (security_origin.isUnique() || security_origin.toString() == "null") {
+ DLOG(ERROR) << "CDM use not allowed for unique security origin.";
+ return NULL;
+ }
+
+ scoped_refptr<CdmSessionAdapter> adapter(new CdmSessionAdapter());
+ GURL security_origin_as_gurl(security_origin.toString());
+
+ if (!adapter->Initialize(
+#if defined(ENABLE_PEPPER_CDMS)
+ base::Bind(&PepperCdmWrapperImpl::Create, frame),
+#elif defined(ENABLE_BROWSER_CDMS)
+ manager,
+#endif
+ key_system_ascii,
+ security_origin_as_gurl)) {
+ return NULL;
+ }
+
+ return new WebContentDecryptionModuleImpl(adapter);
}
WebContentDecryptionModuleImpl::WebContentDecryptionModuleImpl(
- scoped_ptr<media::MediaKeys> media_keys,
- scoped_ptr<SessionIdAdapter> adapter)
- : media_keys_(media_keys.Pass()),
- adapter_(adapter.Pass()) {
-}
+ scoped_refptr<CdmSessionAdapter> adapter)
+ : adapter_(adapter) {}
WebContentDecryptionModuleImpl::~WebContentDecryptionModuleImpl() {
}
@@ -196,22 +87,17 @@ WebContentDecryptionModuleImpl::~WebContentDecryptionModuleImpl() {
blink::WebContentDecryptionModuleSession*
WebContentDecryptionModuleImpl::createSession(
blink::WebContentDecryptionModuleSession::Client* client) {
- DCHECK(media_keys_);
- uint32 session_id = adapter_->GenerateSessionId();
- WebContentDecryptionModuleSessionImpl* session =
- new WebContentDecryptionModuleSessionImpl(
- session_id,
- media_keys_.get(),
- client,
- base::Bind(&WebContentDecryptionModuleImpl::OnSessionClosed,
- base::Unretained(this)));
-
- adapter_->AddSession(session_id, session);
- return session;
+ return adapter_->CreateSession(client);
+}
+
+media::Decryptor* WebContentDecryptionModuleImpl::GetDecryptor() {
+ return adapter_->GetDecryptor();
}
-void WebContentDecryptionModuleImpl::OnSessionClosed(uint32 session_id) {
- adapter_->RemoveSession(session_id);
+#if defined(ENABLE_BROWSER_CDMS)
+int WebContentDecryptionModuleImpl::GetCdmId() const {
+ return adapter_->GetCdmId();
}
+#endif // defined(ENABLE_BROWSER_CDMS)
} // namespace content
diff --git a/chromium/content/renderer/media/webcontentdecryptionmodule_impl.h b/chromium/content/renderer/media/webcontentdecryptionmodule_impl.h
index ecd5198efda..871ca27f4a9 100644
--- a/chromium/content/renderer/media/webcontentdecryptionmodule_impl.h
+++ b/chromium/content/renderer/media/webcontentdecryptionmodule_impl.h
@@ -7,45 +7,76 @@
#include <string>
+#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/strings/string16.h"
#include "third_party/WebKit/public/platform/WebContentDecryptionModule.h"
+namespace blink {
+#if defined(ENABLE_PEPPER_CDMS)
+class WebLocalFrame;
+#endif
+class WebSecurityOrigin;
+}
+
namespace media {
+class Decryptor;
class MediaKeys;
}
namespace content {
+class CdmSessionAdapter;
+#if defined(ENABLE_BROWSER_CDMS)
+class RendererCdmManager;
+#endif
class WebContentDecryptionModuleSessionImpl;
-class SessionIdAdapter;
class WebContentDecryptionModuleImpl
: public blink::WebContentDecryptionModule {
public:
static WebContentDecryptionModuleImpl* Create(
+#if defined(ENABLE_PEPPER_CDMS)
+ blink::WebLocalFrame* frame,
+#elif defined(ENABLE_BROWSER_CDMS)
+ RendererCdmManager* manager,
+#endif
+ const blink::WebSecurityOrigin& security_origin,
const base::string16& key_system);
virtual ~WebContentDecryptionModuleImpl();
+ // Returns the Decryptor associated with this CDM. May be NULL if no
+ // Decryptor associated with the MediaKeys object.
+ // TODO(jrummell): Figure out lifetimes, as WMPI may still use the decryptor
+ // after WebContentDecryptionModule is freed. http://crbug.com/330324
+ media::Decryptor* GetDecryptor();
+
+#if defined(ENABLE_BROWSER_CDMS)
+ // Returns the CDM ID associated with this object. May be kInvalidCdmId if no
+ // CDM ID is associated, such as when Clear Key is used.
+ int GetCdmId() const;
+#endif // defined(ENABLE_BROWSER_CDMS)
+
// blink::WebContentDecryptionModule implementation.
virtual blink::WebContentDecryptionModuleSession* createSession(
blink::WebContentDecryptionModuleSession::Client* client);
private:
- // Takes ownership of |media_keys| and |adapter|.
- WebContentDecryptionModuleImpl(scoped_ptr<media::MediaKeys> media_keys,
- scoped_ptr<SessionIdAdapter> adapter);
+ // Takes reference to |adapter|.
+ WebContentDecryptionModuleImpl(scoped_refptr<CdmSessionAdapter> adapter);
- // Called when a WebContentDecryptionModuleSessionImpl is closed.
- void OnSessionClosed(uint32 session_id);
-
- scoped_ptr<media::MediaKeys> media_keys_;
- scoped_ptr<SessionIdAdapter> adapter_;
+ scoped_refptr<CdmSessionAdapter> adapter_;
DISALLOW_COPY_AND_ASSIGN(WebContentDecryptionModuleImpl);
};
+// Allow typecasting from blink type as this is the only implementation.
+inline WebContentDecryptionModuleImpl* ToWebContentDecryptionModuleImpl(
+ blink::WebContentDecryptionModule* cdm) {
+ return static_cast<WebContentDecryptionModuleImpl*>(cdm);
+}
+
} // namespace content
#endif // CONTENT_RENDERER_MEDIA_WEBCONTENTDECRYPTIONMODULE_IMPL_H_
diff --git a/chromium/content/renderer/media/webcontentdecryptionmodulesession_impl.cc b/chromium/content/renderer/media/webcontentdecryptionmodulesession_impl.cc
index bc74d35dc6a..50ebcd09f85 100644
--- a/chromium/content/renderer/media/webcontentdecryptionmodulesession_impl.cc
+++ b/chromium/content/renderer/media/webcontentdecryptionmodulesession_impl.cc
@@ -4,95 +4,132 @@
#include "content/renderer/media/webcontentdecryptionmodulesession_impl.h"
+#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/logging.h"
#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "content/renderer/media/cdm_session_adapter.h"
+#include "media/base/cdm_promise.h"
#include "third_party/WebKit/public/platform/WebURL.h"
namespace content {
WebContentDecryptionModuleSessionImpl::WebContentDecryptionModuleSessionImpl(
- uint32 session_id,
- media::MediaKeys* media_keys,
Client* client,
- const SessionClosedCB& session_closed_cb)
- : media_keys_(media_keys),
+ const scoped_refptr<CdmSessionAdapter>& adapter)
+ : adapter_(adapter),
client_(client),
- session_closed_cb_(session_closed_cb),
- session_id_(session_id) {
- DCHECK(media_keys_);
+ is_closed_(false),
+ weak_ptr_factory_(this) {
}
WebContentDecryptionModuleSessionImpl::
-~WebContentDecryptionModuleSessionImpl() {
+ ~WebContentDecryptionModuleSessionImpl() {
+ if (!web_session_id_.empty())
+ adapter_->RemoveSession(web_session_id_);
}
blink::WebString WebContentDecryptionModuleSessionImpl::sessionId() const {
- return web_session_id_;
+ return blink::WebString::fromUTF8(web_session_id_);
}
-void WebContentDecryptionModuleSessionImpl::generateKeyRequest(
- const blink::WebString& mime_type,
- const uint8* init_data, size_t init_data_length) {
+void WebContentDecryptionModuleSessionImpl::initializeNewSession(
+ const blink::WebString& init_data_type,
+ const uint8* init_data,
+ size_t init_data_length) {
// TODO(ddorwin): Guard against this in supported types check and remove this.
// Chromium only supports ASCII MIME types.
- if (!IsStringASCII(mime_type)) {
+ if (!base::IsStringASCII(init_data_type)) {
NOTREACHED();
- OnSessionError(media::MediaKeys::kUnknownError, 0);
+ OnSessionError(media::MediaKeys::NOT_SUPPORTED_ERROR,
+ 0,
+ "The initialization data type " + init_data_type.utf8() +
+ " is not supported by the key system.");
return;
}
- media_keys_->CreateSession(
- session_id_, UTF16ToASCII(mime_type), init_data, init_data_length);
+ std::string init_data_type_as_ascii = base::UTF16ToASCII(init_data_type);
+ DLOG_IF(WARNING, init_data_type_as_ascii.find('/') != std::string::npos)
+ << "init_data_type '" << init_data_type_as_ascii
+ << "' may be a MIME type";
+
+ scoped_ptr<media::NewSessionCdmPromise> promise(
+ new media::NewSessionCdmPromise(
+ base::Bind(&WebContentDecryptionModuleSessionImpl::SessionCreated,
+ weak_ptr_factory_.GetWeakPtr()),
+ base::Bind(&WebContentDecryptionModuleSessionImpl::OnSessionError,
+ weak_ptr_factory_.GetWeakPtr())));
+ adapter_->InitializeNewSession(init_data_type_as_ascii,
+ init_data,
+ init_data_length,
+ media::MediaKeys::TEMPORARY_SESSION,
+ promise.Pass());
}
void WebContentDecryptionModuleSessionImpl::update(const uint8* response,
size_t response_length) {
DCHECK(response);
- media_keys_->UpdateSession(session_id_, response, response_length);
+ scoped_ptr<media::SimpleCdmPromise> promise(new media::SimpleCdmPromise(
+ base::Bind(&WebContentDecryptionModuleSessionImpl::OnSessionReady,
+ weak_ptr_factory_.GetWeakPtr()),
+ base::Bind(&WebContentDecryptionModuleSessionImpl::OnSessionError,
+ weak_ptr_factory_.GetWeakPtr())));
+ adapter_->UpdateSession(
+ web_session_id_, response, response_length, promise.Pass());
}
-void WebContentDecryptionModuleSessionImpl::close() {
- media_keys_->ReleaseSession(session_id_);
-}
-
-void WebContentDecryptionModuleSessionImpl::OnSessionCreated(
- const std::string& web_session_id) {
- // Due to heartbeat messages, OnSessionCreated() can get called multiple
- // times.
- // TODO(jrummell): Once all CDMs are updated to support reference ids,
- // OnSessionCreated() should only be called once, and the second check can be
- // removed.
- blink::WebString id = blink::WebString::fromUTF8(web_session_id);
- DCHECK(web_session_id_.isEmpty() || web_session_id_ == id)
- << "Session ID may not be changed once set.";
- web_session_id_ = id;
+void WebContentDecryptionModuleSessionImpl::release() {
+ scoped_ptr<media::SimpleCdmPromise> promise(new media::SimpleCdmPromise(
+ base::Bind(&WebContentDecryptionModuleSessionImpl::OnSessionClosed,
+ weak_ptr_factory_.GetWeakPtr()),
+ base::Bind(&WebContentDecryptionModuleSessionImpl::OnSessionError,
+ weak_ptr_factory_.GetWeakPtr())));
+ adapter_->ReleaseSession(web_session_id_, promise.Pass());
}
void WebContentDecryptionModuleSessionImpl::OnSessionMessage(
const std::vector<uint8>& message,
- const std::string& destination_url) {
- client_->keyMessage(message.empty() ? NULL : &message[0],
- message.size(),
- GURL(destination_url));
+ const GURL& destination_url) {
+ client_->message(
+ message.empty() ? NULL : &message[0], message.size(), destination_url);
}
void WebContentDecryptionModuleSessionImpl::OnSessionReady() {
- // TODO(jrummell): Blink APIs need to be updated to the new EME API. For now,
- // convert the response to the old v0.1b API.
- client_->keyAdded();
+ client_->ready();
}
void WebContentDecryptionModuleSessionImpl::OnSessionClosed() {
- if (!session_closed_cb_.is_null())
- base::ResetAndReturn(&session_closed_cb_).Run(session_id_);
+ if (!is_closed_) {
+ is_closed_ = true;
+ client_->close();
+ }
}
void WebContentDecryptionModuleSessionImpl::OnSessionError(
- media::MediaKeys::KeyError error_code,
- int system_code) {
- client_->keyError(static_cast<Client::MediaKeyErrorCode>(error_code),
- system_code);
+ media::MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_message) {
+ // Convert |exception_code| back to MediaKeyErrorCode if possible.
+ // TODO(jrummell): Update this conversion when promises flow
+ // back into blink:: (as blink:: will have its own error definition).
+ switch (exception_code) {
+ case media::MediaKeys::CLIENT_ERROR:
+ client_->error(Client::MediaKeyErrorCodeClient, system_code);
+ break;
+ default:
+ // This will include all other CDM4 errors and any error generated
+ // by CDM5 or later.
+ client_->error(Client::MediaKeyErrorCodeUnknown, system_code);
+ break;
+ }
+}
+
+void WebContentDecryptionModuleSessionImpl::SessionCreated(
+ const std::string& web_session_id) {
+ DCHECK(web_session_id_.empty()) << "Session ID may not be changed once set.";
+ web_session_id_ = web_session_id;
+ adapter_->RegisterSession(web_session_id_, weak_ptr_factory_.GetWeakPtr());
}
} // namespace content
diff --git a/chromium/content/renderer/media/webcontentdecryptionmodulesession_impl.h b/chromium/content/renderer/media/webcontentdecryptionmodulesession_impl.h
index 6ff091c876a..f1d889bb09d 100644
--- a/chromium/content/renderer/media/webcontentdecryptionmodulesession_impl.h
+++ b/chromium/content/renderer/media/webcontentdecryptionmodulesession_impl.h
@@ -10,6 +10,8 @@
#include "base/basictypes.h"
#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
#include "media/base/media_keys.h"
#include "third_party/WebKit/public/platform/WebContentDecryptionModuleSession.h"
#include "third_party/WebKit/public/platform/WebString.h"
@@ -19,49 +21,55 @@ class MediaKeys;
}
namespace content {
+class CdmSessionAdapter;
class WebContentDecryptionModuleSessionImpl
: public blink::WebContentDecryptionModuleSession {
public:
- typedef base::Callback<void(uint32 session_id)> SessionClosedCB;
-
WebContentDecryptionModuleSessionImpl(
- uint32 session_id,
- media::MediaKeys* media_keys,
Client* client,
- const SessionClosedCB& session_closed_cb);
+ const scoped_refptr<CdmSessionAdapter>& adapter);
virtual ~WebContentDecryptionModuleSessionImpl();
// blink::WebContentDecryptionModuleSession implementation.
- virtual blink::WebString sessionId() const OVERRIDE;
- virtual void generateKeyRequest(const blink::WebString& mime_type,
- const uint8* init_data,
- size_t init_data_length) OVERRIDE;
- virtual void update(const uint8* response, size_t response_length) OVERRIDE;
- virtual void close() OVERRIDE;
+ virtual blink::WebString sessionId() const;
+ virtual void initializeNewSession(const blink::WebString& mime_type,
+ const uint8* init_data,
+ size_t init_data_length);
+ virtual void update(const uint8* response, size_t response_length);
+ virtual void release();
// Callbacks.
- void OnSessionCreated(const std::string& web_session_id);
void OnSessionMessage(const std::vector<uint8>& message,
- const std::string& destination_url);
+ const GURL& destination_url);
void OnSessionReady();
void OnSessionClosed();
- void OnSessionError(media::MediaKeys::KeyError error_code, int system_code);
+ void OnSessionError(media::MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_message);
private:
- // Non-owned pointers.
- media::MediaKeys* media_keys_;
- Client* client_;
+ void SessionCreated(const std::string& web_session_id);
- SessionClosedCB session_closed_cb_;
+ scoped_refptr<CdmSessionAdapter> adapter_;
- // Web session ID is the app visible ID for this session generated by the CDM.
- // This value is not set until the CDM calls OnSessionCreated().
- blink::WebString web_session_id_;
+ // Non-owned pointer.
+ Client* client_;
- // Session ID is used to uniquely track this object so that CDM callbacks
- // can get routed to the correct object.
- const uint32 session_id_;
+ // Web session ID is the app visible ID for this session generated by the CDM.
+ // This value is not set until the CDM resolves the initializeNewSession()
+ // promise.
+ std::string web_session_id_;
+
+ // Don't pass more than 1 close() event to blink::
+ // TODO(jrummell): Remove this once blink tests handle close() promise and
+ // closed() event.
+ bool is_closed_;
+
+ // Since promises will live until they are fired, use a weak reference when
+ // creating a promise in case this class disappears before the promise
+ // actually fires.
+ base::WeakPtrFactory<WebContentDecryptionModuleSessionImpl> weak_ptr_factory_;
DISALLOW_COPY_AND_ASSIGN(WebContentDecryptionModuleSessionImpl);
};
diff --git a/chromium/content/renderer/media/webmediaplayer_impl.cc b/chromium/content/renderer/media/webmediaplayer_impl.cc
index ee637a6879a..12153eea808 100644
--- a/chromium/content/renderer/media/webmediaplayer_impl.cc
+++ b/chromium/content/renderer/media/webmediaplayer_impl.cc
@@ -11,28 +11,38 @@
#include "base/bind.h"
#include "base/callback.h"
+#include "base/callback_helpers.h"
#include "base/command_line.h"
+#include "base/debug/alias.h"
#include "base/debug/crash_logging.h"
#include "base/debug/trace_event.h"
#include "base/message_loop/message_loop_proxy.h"
#include "base/metrics/histogram.h"
#include "base/strings/string_number_conversions.h"
+#include "base/strings/utf_string_conversions.h"
#include "base/synchronization/waitable_event.h"
#include "cc/layers/video_layer.h"
#include "content/public/common/content_switches.h"
+#include "content/public/renderer/render_frame.h"
+#include "content/renderer/compositor_bindings/web_layer_impl.h"
#include "content/renderer/media/buffered_data_source.h"
#include "content/renderer/media/crypto/key_systems.h"
+#include "content/renderer/media/render_media_log.h"
#include "content/renderer/media/texttrack_impl.h"
#include "content/renderer/media/webaudiosourceprovider_impl.h"
+#include "content/renderer/media/webcontentdecryptionmodule_impl.h"
#include "content/renderer/media/webinbandtexttrack_impl.h"
#include "content/renderer/media/webmediaplayer_delegate.h"
#include "content/renderer/media/webmediaplayer_params.h"
#include "content/renderer/media/webmediaplayer_util.h"
#include "content/renderer/media/webmediasource_impl.h"
#include "content/renderer/pepper/pepper_webplugin_impl.h"
+#include "content/renderer/render_thread_impl.h"
#include "gpu/GLES2/gl2extchromium.h"
+#include "gpu/command_buffer/common/mailbox_holder.h"
#include "media/audio/null_audio_sink.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/audio_hardware_config.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/filter_collection.h"
#include "media/base/limits.h"
#include "media/base/media_log.h"
@@ -50,16 +60,22 @@
#include "media/filters/opus_audio_decoder.h"
#include "media/filters/video_renderer_impl.h"
#include "media/filters/vpx_video_decoder.h"
+#include "third_party/WebKit/public/platform/WebContentDecryptionModule.h"
#include "third_party/WebKit/public/platform/WebMediaSource.h"
#include "third_party/WebKit/public/platform/WebRect.h"
#include "third_party/WebKit/public/platform/WebSize.h"
#include "third_party/WebKit/public/platform/WebString.h"
#include "third_party/WebKit/public/platform/WebURL.h"
#include "third_party/WebKit/public/web/WebDocument.h"
+#include "third_party/WebKit/public/web/WebLocalFrame.h"
#include "third_party/WebKit/public/web/WebRuntimeFeatures.h"
+#include "third_party/WebKit/public/web/WebSecurityOrigin.h"
#include "third_party/WebKit/public/web/WebView.h"
#include "v8/include/v8.h"
-#include "webkit/renderer/compositor_bindings/web_layer_impl.h"
+
+#if defined(ENABLE_PEPPER_CDMS)
+#include "content/renderer/media/crypto/pepper_cdm_wrapper_impl.h"
+#endif
using blink::WebCanvas;
using blink::WebMediaPlayer;
@@ -106,6 +122,8 @@ const char* kMediaEme = "Media.EME.";
namespace content {
+class BufferedDataSourceHostImpl;
+
#define COMPILE_ASSERT_MATCHING_ENUM(name) \
COMPILE_ASSERT(static_cast<int>(WebMediaPlayer::CORSMode ## name) == \
static_cast<int>(BufferedResourceLoader::k ## name), \
@@ -116,13 +134,8 @@ COMPILE_ASSERT_MATCHING_ENUM(UseCredentials);
#undef COMPILE_ASSERT_MATCHING_ENUM
#define BIND_TO_RENDER_LOOP(function) \
- media::BindToLoop(main_loop_, base::Bind(function, AsWeakPtr()))
-
-#define BIND_TO_RENDER_LOOP_1(function, arg1) \
- media::BindToLoop(main_loop_, base::Bind(function, AsWeakPtr(), arg1))
-
-#define BIND_TO_RENDER_LOOP_2(function, arg1, arg2) \
- media::BindToLoop(main_loop_, base::Bind(function, AsWeakPtr(), arg1, arg2))
+ (DCHECK(main_loop_->BelongsToCurrentThread()), \
+ media::BindToCurrentLoop(base::Bind(function, AsWeakPtr())))
static void LogMediaSourceError(const scoped_refptr<media::MediaLog>& media_log,
const std::string& error) {
@@ -130,17 +143,19 @@ static void LogMediaSourceError(const scoped_refptr<media::MediaLog>& media_log,
}
WebMediaPlayerImpl::WebMediaPlayerImpl(
- content::RenderView* render_view,
- blink::WebFrame* frame,
+ blink::WebLocalFrame* frame,
blink::WebMediaPlayerClient* client,
base::WeakPtr<WebMediaPlayerDelegate> delegate,
const WebMediaPlayerParams& params)
- : content::RenderViewObserver(render_view),
- frame_(frame),
+ : frame_(frame),
network_state_(WebMediaPlayer::NetworkStateEmpty),
ready_state_(WebMediaPlayer::ReadyStateHaveNothing),
main_loop_(base::MessageLoopProxy::current()),
- media_loop_(params.message_loop_proxy()),
+ media_loop_(
+ RenderThreadImpl::current()->GetMediaThreadMessageLoopProxy()),
+ media_log_(new RenderMediaLog()),
+ pipeline_(media_loop_, media_log_.get()),
+ opaque_(false),
paused_(true),
seeking_(false),
playback_rate_(0.0f),
@@ -149,29 +164,28 @@ WebMediaPlayerImpl::WebMediaPlayerImpl(
client_(client),
delegate_(delegate),
defer_load_cb_(params.defer_load_cb()),
- media_log_(params.media_log()),
accelerated_compositing_reported_(false),
incremented_externally_allocated_memory_(false),
- gpu_factories_(params.gpu_factories()),
- is_local_source_(false),
+ gpu_factories_(RenderThreadImpl::current()->GetGpuFactories()),
supports_save_(true),
- starting_(false),
chunk_demuxer_(NULL),
- current_frame_painted_(false),
- frames_dropped_before_paint_(0),
- pending_repaint_(false),
- pending_size_change_(false),
- video_frame_provider_client_(NULL),
- text_track_index_(0) {
+ // Threaded compositing isn't enabled universally yet.
+ compositor_task_runner_(
+ RenderThreadImpl::current()->compositor_message_loop_proxy()
+ ? RenderThreadImpl::current()->compositor_message_loop_proxy()
+ : base::MessageLoopProxy::current()),
+ compositor_(new VideoFrameCompositor(
+ BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnNaturalSizeChanged),
+ BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnOpacityChanged))),
+ text_track_index_(0),
+ web_cdm_(NULL) {
media_log_->AddEvent(
media_log_->CreateEvent(media::MediaLogEvent::WEBMEDIAPLAYER_CREATED));
- pipeline_.reset(new media::Pipeline(media_loop_, media_log_.get()));
-
// |gpu_factories_| requires that its entry points be called on its
- // |GetMessageLoop()|. Since |pipeline_| will own decoders created from the
+ // |GetTaskRunner()|. Since |pipeline_| will own decoders created from the
// factories, require that their message loops are identical.
- DCHECK(!gpu_factories_ || (gpu_factories_->GetMessageLoop() == media_loop_));
+ DCHECK(!gpu_factories_ || (gpu_factories_->GetTaskRunner() == media_loop_));
// Let V8 know we started new thread if we did not do it yet.
// Made separate task to avoid deletion of player currently being created.
@@ -184,17 +198,6 @@ WebMediaPlayerImpl::WebMediaPlayerImpl(
base::Bind(&WebMediaPlayerImpl::IncrementExternallyAllocatedMemory,
AsWeakPtr()));
- if (blink::WebRuntimeFeatures::isPrefixedEncryptedMediaEnabled()) {
- decryptor_.reset(new ProxyDecryptor(
-#if defined(ENABLE_PEPPER_CDMS)
- client,
- frame,
-#endif
- BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnKeyAdded),
- BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnKeyError),
- BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnKeyMessage)));
- }
-
// Use the null sink if no sink was provided.
audio_source_provider_ = new WebAudioSourceProviderImpl(
params.audio_renderer_sink().get()
@@ -203,8 +206,7 @@ WebMediaPlayerImpl::WebMediaPlayerImpl(
}
WebMediaPlayerImpl::~WebMediaPlayerImpl() {
- SetVideoFrameProviderClient(NULL);
- GetClient()->setWebLayer(NULL);
+ client_->setWebLayer(NULL);
DCHECK(main_loop_->BelongsToCurrentThread());
media_log_->AddEvent(
@@ -213,45 +215,37 @@ WebMediaPlayerImpl::~WebMediaPlayerImpl() {
if (delegate_.get())
delegate_->PlayerGone(this);
- Destroy();
-}
+ // Abort any pending IO so stopping the pipeline doesn't get blocked.
+ if (data_source_)
+ data_source_->Abort();
+ if (chunk_demuxer_) {
+ chunk_demuxer_->Shutdown();
+ chunk_demuxer_ = NULL;
+ }
-namespace {
+ gpu_factories_ = NULL;
-// Helper enum for reporting scheme histograms.
-enum URLSchemeForHistogram {
- kUnknownURLScheme,
- kMissingURLScheme,
- kHttpURLScheme,
- kHttpsURLScheme,
- kFtpURLScheme,
- kChromeExtensionURLScheme,
- kJavascriptURLScheme,
- kFileURLScheme,
- kBlobURLScheme,
- kDataURLScheme,
- kFileSystemScheme,
- kMaxURLScheme = kFileSystemScheme // Must be equal to highest enum value.
-};
+ // Make sure to kill the pipeline so there's no more media threads running.
+ // Note: stopping the pipeline might block for a long time.
+ base::WaitableEvent waiter(false, false);
+ pipeline_.Stop(
+ base::Bind(&base::WaitableEvent::Signal, base::Unretained(&waiter)));
+ waiter.Wait();
-URLSchemeForHistogram URLScheme(const GURL& url) {
- if (!url.has_scheme()) return kMissingURLScheme;
- if (url.SchemeIs("http")) return kHttpURLScheme;
- if (url.SchemeIs("https")) return kHttpsURLScheme;
- if (url.SchemeIs("ftp")) return kFtpURLScheme;
- if (url.SchemeIs("chrome-extension")) return kChromeExtensionURLScheme;
- if (url.SchemeIs("javascript")) return kJavascriptURLScheme;
- if (url.SchemeIs("file")) return kFileURLScheme;
- if (url.SchemeIs("blob")) return kBlobURLScheme;
- if (url.SchemeIs("data")) return kDataURLScheme;
- if (url.SchemeIs("filesystem")) return kFileSystemScheme;
- return kUnknownURLScheme;
-}
+ compositor_task_runner_->DeleteSoon(FROM_HERE, compositor_);
-} // anonymous namespace
+ // Let V8 know we are not using extra resources anymore.
+ if (incremented_externally_allocated_memory_) {
+ v8::Isolate::GetCurrent()->AdjustAmountOfExternalAllocatedMemory(
+ -kPlayerExtraMemory);
+ incremented_externally_allocated_memory_ = false;
+ }
+}
void WebMediaPlayerImpl::load(LoadType load_type, const blink::WebURL& url,
CORSMode cors_mode) {
+ DVLOG(1) << __FUNCTION__ << "(" << load_type << ", " << url << ", "
+ << cors_mode << ")";
if (!defer_load_cb_.is_null()) {
defer_load_cb_.Run(base::Bind(
&WebMediaPlayerImpl::DoLoad, AsWeakPtr(), load_type, url, cors_mode));
@@ -266,7 +260,7 @@ void WebMediaPlayerImpl::DoLoad(LoadType load_type,
DCHECK(main_loop_->BelongsToCurrentThread());
GURL gurl(url);
- UMA_HISTOGRAM_ENUMERATION("Media.URLScheme", URLScheme(gurl), kMaxURLScheme);
+ ReportMediaSchemeUma(gurl);
// Set subresource URL for crash reporting.
base::debug::SetCrashKeyValue("subresource_url", gurl.spec());
@@ -274,8 +268,8 @@ void WebMediaPlayerImpl::DoLoad(LoadType load_type,
load_type_ = load_type;
// Handle any volume/preload changes that occurred before load().
- setVolume(GetClient()->volume());
- setPreload(GetClient()->preload());
+ setVolume(client_->volume());
+ setPreload(client_->preload());
SetNetworkState(WebMediaPlayer::NetworkStateLoading);
SetReadyState(WebMediaPlayer::ReadyStateHaveNothing);
@@ -290,24 +284,23 @@ void WebMediaPlayerImpl::DoLoad(LoadType load_type,
// Otherwise it's a regular request which requires resolving the URL first.
data_source_.reset(new BufferedDataSource(
+ url,
+ static_cast<BufferedResourceLoader::CORSMode>(cors_mode),
main_loop_,
frame_,
media_log_.get(),
+ &buffered_data_source_host_,
base::Bind(&WebMediaPlayerImpl::NotifyDownloading, AsWeakPtr())));
data_source_->Initialize(
- url, static_cast<BufferedResourceLoader::CORSMode>(cors_mode),
- base::Bind(
- &WebMediaPlayerImpl::DataSourceInitialized,
- AsWeakPtr(), gurl));
-
- is_local_source_ = !gurl.SchemeIsHTTPOrHTTPS();
+ base::Bind(&WebMediaPlayerImpl::DataSourceInitialized, AsWeakPtr()));
}
void WebMediaPlayerImpl::play() {
+ DVLOG(1) << __FUNCTION__;
DCHECK(main_loop_->BelongsToCurrentThread());
paused_ = false;
- pipeline_->SetPlaybackRate(playback_rate_);
+ pipeline_.SetPlaybackRate(playback_rate_);
if (data_source_)
data_source_->MediaIsPlaying();
@@ -318,13 +311,14 @@ void WebMediaPlayerImpl::play() {
}
void WebMediaPlayerImpl::pause() {
+ DVLOG(1) << __FUNCTION__;
DCHECK(main_loop_->BelongsToCurrentThread());
paused_ = true;
- pipeline_->SetPlaybackRate(0.0f);
+ pipeline_.SetPlaybackRate(0.0f);
if (data_source_)
data_source_->MediaIsPaused();
- paused_time_ = pipeline_->GetMediaTime();
+ paused_time_ = pipeline_.GetMediaTime();
media_log_->AddEvent(media_log_->CreateEvent(media::MediaLogEvent::PAUSE));
@@ -332,17 +326,13 @@ void WebMediaPlayerImpl::pause() {
delegate_->DidPause(this);
}
-bool WebMediaPlayerImpl::supportsFullscreen() const {
- DCHECK(main_loop_->BelongsToCurrentThread());
- return true;
-}
-
bool WebMediaPlayerImpl::supportsSave() const {
DCHECK(main_loop_->BelongsToCurrentThread());
return supports_save_;
}
void WebMediaPlayerImpl::seek(double seconds) {
+ DVLOG(1) << __FUNCTION__ << "(" << seconds << ")";
DCHECK(main_loop_->BelongsToCurrentThread());
if (ready_state_ > WebMediaPlayer::ReadyStateHaveMetadata)
@@ -350,7 +340,7 @@ void WebMediaPlayerImpl::seek(double seconds) {
base::TimeDelta seek_time = ConvertSecondsToTimestamp(seconds);
- if (starting_ || seeking_) {
+ if (seeking_) {
pending_seek_ = true;
pending_seek_seconds_ = seconds;
if (chunk_demuxer_)
@@ -370,12 +360,13 @@ void WebMediaPlayerImpl::seek(double seconds) {
chunk_demuxer_->StartWaitingForSeek(seek_time);
// Kick off the asynchronous seek!
- pipeline_->Seek(
+ pipeline_.Seek(
seek_time,
BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnPipelineSeek));
}
void WebMediaPlayerImpl::setRate(double rate) {
+ DVLOG(1) << __FUNCTION__ << "(" << rate << ")";
DCHECK(main_loop_->BelongsToCurrentThread());
// TODO(kylep): Remove when support for negatives is added. Also, modify the
@@ -393,16 +384,17 @@ void WebMediaPlayerImpl::setRate(double rate) {
playback_rate_ = rate;
if (!paused_) {
- pipeline_->SetPlaybackRate(rate);
+ pipeline_.SetPlaybackRate(rate);
if (data_source_)
data_source_->MediaPlaybackRateChanged(rate);
}
}
void WebMediaPlayerImpl::setVolume(double volume) {
+ DVLOG(1) << __FUNCTION__ << "(" << volume << ")";
DCHECK(main_loop_->BelongsToCurrentThread());
- pipeline_->SetVolume(volume);
+ pipeline_.SetVolume(volume);
}
#define COMPILE_ASSERT_MATCHING_ENUM(webkit_name, chromium_name) \
@@ -415,6 +407,7 @@ COMPILE_ASSERT_MATCHING_ENUM(PreloadAuto, AUTO);
#undef COMPILE_ASSERT_MATCHING_ENUM
void WebMediaPlayerImpl::setPreload(WebMediaPlayer::Preload preload) {
+ DVLOG(1) << __FUNCTION__ << "(" << preload << ")";
DCHECK(main_loop_->BelongsToCurrentThread());
if (data_source_)
@@ -424,27 +417,25 @@ void WebMediaPlayerImpl::setPreload(WebMediaPlayer::Preload preload) {
bool WebMediaPlayerImpl::hasVideo() const {
DCHECK(main_loop_->BelongsToCurrentThread());
- return pipeline_->HasVideo();
+ return pipeline_metadata_.has_video;
}
bool WebMediaPlayerImpl::hasAudio() const {
DCHECK(main_loop_->BelongsToCurrentThread());
- return pipeline_->HasAudio();
+ return pipeline_metadata_.has_audio;
}
blink::WebSize WebMediaPlayerImpl::naturalSize() const {
DCHECK(main_loop_->BelongsToCurrentThread());
- gfx::Size size;
- pipeline_->GetNaturalVideoSize(&size);
- return blink::WebSize(size);
+ return blink::WebSize(pipeline_metadata_.natural_size);
}
bool WebMediaPlayerImpl::paused() const {
DCHECK(main_loop_->BelongsToCurrentThread());
- return pipeline_->GetPlaybackRate() == 0.0f;
+ return pipeline_.GetPlaybackRate() == 0.0f;
}
bool WebMediaPlayerImpl::seeking() const {
@@ -465,9 +456,18 @@ double WebMediaPlayerImpl::duration() const {
return GetPipelineDuration();
}
+double WebMediaPlayerImpl::timelineOffset() const {
+ DCHECK(main_loop_->BelongsToCurrentThread());
+
+ if (pipeline_metadata_.timeline_offset.is_null())
+ return std::numeric_limits<double>::quiet_NaN();
+
+ return pipeline_metadata_.timeline_offset.ToJsTime();
+}
+
double WebMediaPlayerImpl::currentTime() const {
DCHECK(main_loop_->BelongsToCurrentThread());
- return (paused_ ? paused_time_ : pipeline_->GetMediaTime()).InSecondsF();
+ return (paused_ ? paused_time_ : pipeline_.GetMediaTime()).InSecondsF();
}
WebMediaPlayer::NetworkState WebMediaPlayerImpl::networkState() const {
@@ -480,12 +480,13 @@ WebMediaPlayer::ReadyState WebMediaPlayerImpl::readyState() const {
return ready_state_;
}
-const blink::WebTimeRanges& WebMediaPlayerImpl::buffered() {
+blink::WebTimeRanges WebMediaPlayerImpl::buffered() const {
DCHECK(main_loop_->BelongsToCurrentThread());
- blink::WebTimeRanges web_ranges(
- ConvertToWebTimeRanges(pipeline_->GetBufferedTimeRanges()));
- buffered_.swap(web_ranges);
- return buffered_;
+ media::Ranges<base::TimeDelta> buffered_time_ranges =
+ pipeline_.GetBufferedTimeRanges();
+ buffered_data_source_host_.AddBufferedTimeRanges(
+ &buffered_time_ranges, pipeline_.GetMediaDuration());
+ return ConvertToWebTimeRanges(buffered_time_ranges);
}
double WebMediaPlayerImpl::maxTimeSeekable() const {
@@ -502,15 +503,18 @@ double WebMediaPlayerImpl::maxTimeSeekable() const {
return duration();
}
-bool WebMediaPlayerImpl::didLoadingProgress() const {
+bool WebMediaPlayerImpl::didLoadingProgress() {
DCHECK(main_loop_->BelongsToCurrentThread());
- return pipeline_->DidLoadingProgress();
+ bool pipeline_progress = pipeline_.DidLoadingProgress();
+ bool data_progress = buffered_data_source_host_.DidLoadingProgress();
+ return pipeline_progress || data_progress;
}
void WebMediaPlayerImpl::paint(WebCanvas* canvas,
const WebRect& rect,
unsigned char alpha) {
DCHECK(main_loop_->BelongsToCurrentThread());
+ TRACE_EVENT0("media", "WebMediaPlayerImpl:paint");
if (!accelerated_compositing_reported_) {
accelerated_compositing_reported_ = true;
@@ -522,15 +526,14 @@ void WebMediaPlayerImpl::paint(WebCanvas* canvas,
frame_->view()->isAcceleratedCompositingActive());
}
- // Avoid locking and potentially blocking the video rendering thread while
- // painting in software.
- scoped_refptr<media::VideoFrame> video_frame;
- {
- base::AutoLock auto_lock(lock_);
- DoneWaitingForPaint(true);
- video_frame = current_frame_;
- }
- TRACE_EVENT0("media", "WebMediaPlayerImpl:paint");
+ // TODO(scherkus): Clarify paint() API contract to better understand when and
+ // why it's being called. For example, today paint() is called when:
+ // - We haven't reached HAVE_CURRENT_DATA and need to paint black
+ // - We're painting to a canvas
+ // See http://crbug.com/341225 http://crbug.com/342621 for details.
+ scoped_refptr<media::VideoFrame> video_frame =
+ GetCurrentFrameFromCompositor();
+
gfx::Rect gfx_rect(rect);
skcanvas_video_renderer_.Paint(video_frame.get(), canvas, gfx_rect, alpha);
}
@@ -554,63 +557,31 @@ double WebMediaPlayerImpl::mediaTimeForTimeValue(double timeValue) const {
unsigned WebMediaPlayerImpl::decodedFrameCount() const {
DCHECK(main_loop_->BelongsToCurrentThread());
- media::PipelineStatistics stats = pipeline_->GetStatistics();
+ media::PipelineStatistics stats = pipeline_.GetStatistics();
return stats.video_frames_decoded;
}
unsigned WebMediaPlayerImpl::droppedFrameCount() const {
DCHECK(main_loop_->BelongsToCurrentThread());
- media::PipelineStatistics stats = pipeline_->GetStatistics();
-
- base::AutoLock auto_lock(lock_);
- unsigned frames_dropped =
- stats.video_frames_dropped + frames_dropped_before_paint_;
- DCHECK_LE(frames_dropped, stats.video_frames_decoded);
- return frames_dropped;
+ media::PipelineStatistics stats = pipeline_.GetStatistics();
+ return stats.video_frames_dropped;
}
unsigned WebMediaPlayerImpl::audioDecodedByteCount() const {
DCHECK(main_loop_->BelongsToCurrentThread());
- media::PipelineStatistics stats = pipeline_->GetStatistics();
+ media::PipelineStatistics stats = pipeline_.GetStatistics();
return stats.audio_bytes_decoded;
}
unsigned WebMediaPlayerImpl::videoDecodedByteCount() const {
DCHECK(main_loop_->BelongsToCurrentThread());
- media::PipelineStatistics stats = pipeline_->GetStatistics();
+ media::PipelineStatistics stats = pipeline_.GetStatistics();
return stats.video_bytes_decoded;
}
-void WebMediaPlayerImpl::SetVideoFrameProviderClient(
- cc::VideoFrameProvider::Client* client) {
- // This is called from both the main renderer thread and the compositor
- // thread (when the main thread is blocked).
- if (video_frame_provider_client_)
- video_frame_provider_client_->StopUsingProvider();
- video_frame_provider_client_ = client;
-}
-
-scoped_refptr<media::VideoFrame> WebMediaPlayerImpl::GetCurrentFrame() {
- base::AutoLock auto_lock(lock_);
- DoneWaitingForPaint(true);
- TRACE_EVENT_ASYNC_BEGIN0(
- "media", "WebMediaPlayerImpl:compositing", this);
- return current_frame_;
-}
-
-void WebMediaPlayerImpl::PutCurrentFrame(
- const scoped_refptr<media::VideoFrame>& frame) {
- if (!accelerated_compositing_reported_) {
- accelerated_compositing_reported_ = true;
- DCHECK(frame_->view()->isAcceleratedCompositingActive());
- UMA_HISTOGRAM_BOOLEAN("Media.AcceleratedCompositingActive", true);
- }
- TRACE_EVENT_ASYNC_END0("media", "WebMediaPlayerImpl:compositing", this);
-}
-
bool WebMediaPlayerImpl::copyVideoTextureToPlatformTexture(
blink::WebGraphicsContext3D* web_graphics_context,
unsigned int texture,
@@ -619,19 +590,18 @@ bool WebMediaPlayerImpl::copyVideoTextureToPlatformTexture(
unsigned int type,
bool premultiply_alpha,
bool flip_y) {
- scoped_refptr<media::VideoFrame> video_frame;
- {
- base::AutoLock auto_lock(lock_);
- video_frame = current_frame_;
- }
-
TRACE_EVENT0("media", "WebMediaPlayerImpl:copyVideoTextureToPlatformTexture");
+ scoped_refptr<media::VideoFrame> video_frame =
+ GetCurrentFrameFromCompositor();
+
if (!video_frame)
return false;
if (video_frame->format() != media::VideoFrame::NATIVE_TEXTURE)
return false;
- if (video_frame->texture_target() != GL_TEXTURE_2D)
+
+ const gpu::MailboxHolder* mailbox_holder = video_frame->mailbox_holder();
+ if (mailbox_holder->texture_target != GL_TEXTURE_2D)
return false;
// Since this method changes which texture is bound to the TEXTURE_2D target,
@@ -640,21 +610,18 @@ bool WebMediaPlayerImpl::copyVideoTextureToPlatformTexture(
// avoid it in user builds. As a result assume (below) that |texture| is
// bound when this method is called, and only verify this fact when
// DCHECK_IS_ON.
- if (DCHECK_IS_ON()) {
- GLint bound_texture = 0;
- web_graphics_context->getIntegerv(GL_TEXTURE_BINDING_2D, &bound_texture);
- DCHECK_EQ(static_cast<GLuint>(bound_texture), texture);
- }
-
- media::VideoFrame::MailboxHolder* mailbox_holder =
- video_frame->texture_mailbox();
+#if DCHECK_IS_ON
+ GLint bound_texture = 0;
+ web_graphics_context->getIntegerv(GL_TEXTURE_BINDING_2D, &bound_texture);
+ DCHECK_EQ(static_cast<GLuint>(bound_texture), texture);
+#endif
uint32 source_texture = web_graphics_context->createTexture();
- web_graphics_context->waitSyncPoint(mailbox_holder->sync_point());
+ web_graphics_context->waitSyncPoint(mailbox_holder->sync_point);
web_graphics_context->bindTexture(GL_TEXTURE_2D, source_texture);
web_graphics_context->consumeTextureCHROMIUM(GL_TEXTURE_2D,
- mailbox_holder->mailbox().name);
+ mailbox_holder->mailbox.name);
// The video is stored in a unmultiplied format, so premultiply
// if necessary.
@@ -679,10 +646,8 @@ bool WebMediaPlayerImpl::copyVideoTextureToPlatformTexture(
web_graphics_context->bindTexture(GL_TEXTURE_2D, texture);
web_graphics_context->deleteTexture(source_texture);
-
- // The flush() operation is not necessary here. It is kept since the
- // performance will be better when it is added than not.
web_graphics_context->flush();
+ video_frame->AppendReleaseSyncPoint(web_graphics_context->insertSyncPoint());
return true;
}
@@ -691,7 +656,7 @@ bool WebMediaPlayerImpl::copyVideoTextureToPlatformTexture(
// UMA_HISTOGRAM_COUNTS. The reason that we cannot use those macros directly is
// that UMA_* macros require the names to be constant throughout the process'
// lifetime.
-static void EmeUMAHistogramEnumeration(const blink::WebString& key_system,
+static void EmeUMAHistogramEnumeration(const std::string& key_system,
const std::string& method,
int sample,
int boundary_value) {
@@ -701,7 +666,7 @@ static void EmeUMAHistogramEnumeration(const blink::WebString& key_system,
base::Histogram::kUmaTargetedHistogramFlag)->Add(sample);
}
-static void EmeUMAHistogramCounts(const blink::WebString& key_system,
+static void EmeUMAHistogramCounts(const std::string& key_system,
const std::string& method,
int sample) {
// Use the same parameters as UMA_HISTOGRAM_COUNTS.
@@ -735,54 +700,99 @@ static MediaKeyException MediaKeyExceptionForUMA(
// Helper for converting |key_system| name and exception |e| to a pair of enum
// values from above, for reporting to UMA.
-static void ReportMediaKeyExceptionToUMA(
- const std::string& method,
- const WebString& key_system,
- WebMediaPlayer::MediaKeyException e) {
+static void ReportMediaKeyExceptionToUMA(const std::string& method,
+ const std::string& key_system,
+ WebMediaPlayer::MediaKeyException e) {
MediaKeyException result_id = MediaKeyExceptionForUMA(e);
DCHECK_NE(result_id, kUnknownResultId) << e;
EmeUMAHistogramEnumeration(
key_system, method, result_id, kMaxMediaKeyException);
}
+// Convert a WebString to ASCII, falling back on an empty string in the case
+// of a non-ASCII string.
+static std::string ToASCIIOrEmpty(const blink::WebString& string) {
+ return base::IsStringASCII(string) ? base::UTF16ToASCII(string)
+ : std::string();
+}
+
WebMediaPlayer::MediaKeyException
WebMediaPlayerImpl::generateKeyRequest(const WebString& key_system,
const unsigned char* init_data,
unsigned init_data_length) {
+ DVLOG(1) << "generateKeyRequest: " << base::string16(key_system) << ": "
+ << std::string(reinterpret_cast<const char*>(init_data),
+ static_cast<size_t>(init_data_length));
+
+ std::string ascii_key_system =
+ GetUnprefixedKeySystemName(ToASCIIOrEmpty(key_system));
+
WebMediaPlayer::MediaKeyException e =
- GenerateKeyRequestInternal(key_system, init_data, init_data_length);
- ReportMediaKeyExceptionToUMA("generateKeyRequest", key_system, e);
+ GenerateKeyRequestInternal(ascii_key_system, init_data, init_data_length);
+ ReportMediaKeyExceptionToUMA("generateKeyRequest", ascii_key_system, e);
return e;
}
+// Guess the type of |init_data|. This is only used to handle some corner cases
+// so we keep it as simple as possible without breaking major use cases.
+static std::string GuessInitDataType(const unsigned char* init_data,
+ unsigned init_data_length) {
+ // Most WebM files use KeyId of 16 bytes. MP4 init data are always >16 bytes.
+ if (init_data_length == 16)
+ return "video/webm";
+
+ return "video/mp4";
+}
+
WebMediaPlayer::MediaKeyException
-WebMediaPlayerImpl::GenerateKeyRequestInternal(
- const WebString& key_system,
- const unsigned char* init_data,
- unsigned init_data_length) {
- DVLOG(1) << "generateKeyRequest: " << key_system.utf8().data() << ": "
- << std::string(reinterpret_cast<const char*>(init_data),
- static_cast<size_t>(init_data_length));
+WebMediaPlayerImpl::GenerateKeyRequestInternal(const std::string& key_system,
+ const unsigned char* init_data,
+ unsigned init_data_length) {
+ DCHECK(main_loop_->BelongsToCurrentThread());
if (!IsConcreteSupportedKeySystem(key_system))
return WebMediaPlayer::MediaKeyExceptionKeySystemNotSupported;
// We do not support run-time switching between key systems for now.
- if (current_key_system_.isEmpty()) {
- if (!decryptor_->InitializeCDM(key_system.utf8(), frame_->document().url()))
+ if (current_key_system_.empty()) {
+ if (!proxy_decryptor_) {
+ proxy_decryptor_.reset(new ProxyDecryptor(
+#if defined(ENABLE_PEPPER_CDMS)
+ // Create() must be called synchronously as |frame_| may not be
+ // valid afterwards.
+ base::Bind(&PepperCdmWrapperImpl::Create, frame_),
+#elif defined(ENABLE_BROWSER_CDMS)
+#error Browser side CDM in WMPI for prefixed EME API not supported yet.
+#endif
+ BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnKeyAdded),
+ BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnKeyError),
+ BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnKeyMessage)));
+ }
+
+ GURL security_origin(frame_->document().securityOrigin().toString());
+ if (!proxy_decryptor_->InitializeCDM(key_system, security_origin))
return WebMediaPlayer::MediaKeyExceptionKeySystemNotSupported;
+
+ if (proxy_decryptor_ && !decryptor_ready_cb_.is_null()) {
+ base::ResetAndReturn(&decryptor_ready_cb_)
+ .Run(proxy_decryptor_->GetDecryptor());
+ }
+
current_key_system_ = key_system;
- }
- else if (key_system != current_key_system_) {
+ } else if (key_system != current_key_system_) {
return WebMediaPlayer::MediaKeyExceptionInvalidPlayerState;
}
+ std::string init_data_type = init_data_type_;
+ if (init_data_type.empty())
+ init_data_type = GuessInitDataType(init_data, init_data_length);
+
// TODO(xhwang): We assume all streams are from the same container (thus have
// the same "type") for now. In the future, the "type" should be passed down
// from the application.
- if (!decryptor_->GenerateKeyRequest(init_data_type_,
- init_data, init_data_length)) {
- current_key_system_.reset();
+ if (!proxy_decryptor_->GenerateKeyRequest(
+ init_data_type, init_data, init_data_length)) {
+ current_key_system_.clear();
return WebMediaPlayer::MediaKeyExceptionKeySystemNotSupported;
}
@@ -796,94 +806,101 @@ WebMediaPlayer::MediaKeyException WebMediaPlayerImpl::addKey(
const unsigned char* init_data,
unsigned init_data_length,
const WebString& session_id) {
- WebMediaPlayer::MediaKeyException e = AddKeyInternal(
- key_system, key, key_length, init_data, init_data_length, session_id);
- ReportMediaKeyExceptionToUMA("addKey", key_system, e);
+ DVLOG(1) << "addKey: " << base::string16(key_system) << ": "
+ << std::string(reinterpret_cast<const char*>(key),
+ static_cast<size_t>(key_length)) << ", "
+ << std::string(reinterpret_cast<const char*>(init_data),
+ static_cast<size_t>(init_data_length)) << " ["
+ << base::string16(session_id) << "]";
+
+ std::string ascii_key_system =
+ GetUnprefixedKeySystemName(ToASCIIOrEmpty(key_system));
+ std::string ascii_session_id = ToASCIIOrEmpty(session_id);
+
+ WebMediaPlayer::MediaKeyException e = AddKeyInternal(ascii_key_system,
+ key,
+ key_length,
+ init_data,
+ init_data_length,
+ ascii_session_id);
+ ReportMediaKeyExceptionToUMA("addKey", ascii_key_system, e);
return e;
}
WebMediaPlayer::MediaKeyException WebMediaPlayerImpl::AddKeyInternal(
- const WebString& key_system,
+ const std::string& key_system,
const unsigned char* key,
unsigned key_length,
const unsigned char* init_data,
unsigned init_data_length,
- const WebString& session_id) {
+ const std::string& session_id) {
DCHECK(key);
DCHECK_GT(key_length, 0u);
- DVLOG(1) << "addKey: " << key_system.utf8().data() << ": "
- << std::string(reinterpret_cast<const char*>(key),
- static_cast<size_t>(key_length)) << ", "
- << std::string(reinterpret_cast<const char*>(init_data),
- static_cast<size_t>(init_data_length))
- << " [" << session_id.utf8().data() << "]";
-
if (!IsConcreteSupportedKeySystem(key_system))
return WebMediaPlayer::MediaKeyExceptionKeySystemNotSupported;
- if (current_key_system_.isEmpty() || key_system != current_key_system_)
+ if (current_key_system_.empty() || key_system != current_key_system_)
return WebMediaPlayer::MediaKeyExceptionInvalidPlayerState;
- decryptor_->AddKey(key, key_length,
- init_data, init_data_length, session_id.utf8());
+ proxy_decryptor_->AddKey(
+ key, key_length, init_data, init_data_length, session_id);
return WebMediaPlayer::MediaKeyExceptionNoError;
}
WebMediaPlayer::MediaKeyException WebMediaPlayerImpl::cancelKeyRequest(
const WebString& key_system,
const WebString& session_id) {
+ DVLOG(1) << "cancelKeyRequest: " << base::string16(key_system) << ": "
+ << " [" << base::string16(session_id) << "]";
+
+ std::string ascii_key_system =
+ GetUnprefixedKeySystemName(ToASCIIOrEmpty(key_system));
+ std::string ascii_session_id = ToASCIIOrEmpty(session_id);
+
WebMediaPlayer::MediaKeyException e =
- CancelKeyRequestInternal(key_system, session_id);
- ReportMediaKeyExceptionToUMA("cancelKeyRequest", key_system, e);
+ CancelKeyRequestInternal(ascii_key_system, ascii_session_id);
+ ReportMediaKeyExceptionToUMA("cancelKeyRequest", ascii_key_system, e);
return e;
}
-WebMediaPlayer::MediaKeyException
-WebMediaPlayerImpl::CancelKeyRequestInternal(
- const WebString& key_system,
- const WebString& session_id) {
+WebMediaPlayer::MediaKeyException WebMediaPlayerImpl::CancelKeyRequestInternal(
+ const std::string& key_system,
+ const std::string& session_id) {
if (!IsConcreteSupportedKeySystem(key_system))
return WebMediaPlayer::MediaKeyExceptionKeySystemNotSupported;
- if (current_key_system_.isEmpty() || key_system != current_key_system_)
+ if (current_key_system_.empty() || key_system != current_key_system_)
return WebMediaPlayer::MediaKeyExceptionInvalidPlayerState;
- decryptor_->CancelKeyRequest(session_id.utf8());
+ proxy_decryptor_->CancelKeyRequest(session_id);
return WebMediaPlayer::MediaKeyExceptionNoError;
}
-void WebMediaPlayerImpl::OnDestruct() {
- Destroy();
+void WebMediaPlayerImpl::setContentDecryptionModule(
+ blink::WebContentDecryptionModule* cdm) {
+ DCHECK(main_loop_->BelongsToCurrentThread());
+
+ // TODO(xhwang): Support setMediaKeys(0) if necessary: http://crbug.com/330324
+ if (!cdm)
+ return;
+
+ web_cdm_ = ToWebContentDecryptionModuleImpl(cdm);
+
+ if (web_cdm_ && !decryptor_ready_cb_.is_null())
+ base::ResetAndReturn(&decryptor_ready_cb_).Run(web_cdm_->GetDecryptor());
}
-void WebMediaPlayerImpl::Repaint() {
+void WebMediaPlayerImpl::InvalidateOnMainThread() {
DCHECK(main_loop_->BelongsToCurrentThread());
- TRACE_EVENT0("media", "WebMediaPlayerImpl:repaint");
-
- bool size_changed = false;
- {
- base::AutoLock auto_lock(lock_);
- std::swap(pending_size_change_, size_changed);
- if (pending_repaint_) {
- TRACE_EVENT_ASYNC_END0(
- "media", "WebMediaPlayerImpl:repaintPending", this);
- pending_repaint_ = false;
- }
- }
+ TRACE_EVENT0("media", "WebMediaPlayerImpl::InvalidateOnMainThread");
- if (size_changed) {
- TRACE_EVENT0("media", "WebMediaPlayerImpl:clientSizeChanged");
- GetClient()->sizeChanged();
- }
-
- TRACE_EVENT0("media", "WebMediaPlayerImpl:clientRepaint");
- GetClient()->repaint();
+ client_->repaint();
}
void WebMediaPlayerImpl::OnPipelineSeek(PipelineStatus status) {
+ DVLOG(1) << __FUNCTION__ << "(" << status << ")";
DCHECK(main_loop_->BelongsToCurrentThread());
- starting_ = false;
seeking_ = false;
if (pending_seek_) {
pending_seek_ = false;
@@ -898,14 +915,15 @@ void WebMediaPlayerImpl::OnPipelineSeek(PipelineStatus status) {
// Update our paused time.
if (paused_)
- paused_time_ = pipeline_->GetMediaTime();
+ paused_time_ = pipeline_.GetMediaTime();
- GetClient()->timeChanged();
+ client_->timeChanged();
}
void WebMediaPlayerImpl::OnPipelineEnded() {
+ DVLOG(1) << __FUNCTION__;
DCHECK(main_loop_->BelongsToCurrentThread());
- GetClient()->timeChanged();
+ client_->timeChanged();
}
void WebMediaPlayerImpl::OnPipelineError(PipelineStatus error) {
@@ -916,7 +934,10 @@ void WebMediaPlayerImpl::OnPipelineError(PipelineStatus error) {
// Any error that occurs before reaching ReadyStateHaveMetadata should
// be considered a format error.
SetNetworkState(WebMediaPlayer::NetworkStateFormatError);
- Repaint();
+
+ // TODO(scherkus): This should be handled by HTMLMediaElement and controls
+ // should know when to invalidate themselves http://crbug.com/337015
+ InvalidateOnMainThread();
return;
}
@@ -925,49 +946,59 @@ void WebMediaPlayerImpl::OnPipelineError(PipelineStatus error) {
if (error == media::PIPELINE_ERROR_DECRYPT)
EmeUMAHistogramCounts(current_key_system_, "DecryptError", 1);
- // Repaint to trigger UI update.
- Repaint();
-}
-
-void WebMediaPlayerImpl::OnPipelineBufferingState(
- media::Pipeline::BufferingState buffering_state) {
- DVLOG(1) << "OnPipelineBufferingState(" << buffering_state << ")";
-
- switch (buffering_state) {
- case media::Pipeline::kHaveMetadata:
- SetReadyState(WebMediaPlayer::ReadyStateHaveMetadata);
-
- if (hasVideo() && GetClient()->needsWebLayerForVideo()) {
- DCHECK(!video_weblayer_);
- video_weblayer_.reset(
- new webkit::WebLayerImpl(cc::VideoLayer::Create(this)));
- GetClient()->setWebLayer(video_weblayer_.get());
- }
- break;
- case media::Pipeline::kPrerollCompleted:
- // Only transition to ReadyStateHaveEnoughData if we don't have
- // any pending seeks because the transition can cause Blink to
- // report that the most recent seek has completed.
- if (!pending_seek_)
- SetReadyState(WebMediaPlayer::ReadyStateHaveEnoughData);
- break;
+ // TODO(scherkus): This should be handled by HTMLMediaElement and controls
+ // should know when to invalidate themselves http://crbug.com/337015
+ InvalidateOnMainThread();
+}
+
+void WebMediaPlayerImpl::OnPipelineMetadata(
+ media::PipelineMetadata metadata) {
+ DVLOG(1) << __FUNCTION__;
+
+ pipeline_metadata_ = metadata;
+
+ SetReadyState(WebMediaPlayer::ReadyStateHaveMetadata);
+
+ if (hasVideo()) {
+ DCHECK(!video_weblayer_);
+ video_weblayer_.reset(
+ new WebLayerImpl(cc::VideoLayer::Create(compositor_)));
+ video_weblayer_->setOpaque(opaque_);
+ client_->setWebLayer(video_weblayer_.get());
}
- // Repaint to trigger UI update.
- Repaint();
+ // TODO(scherkus): This should be handled by HTMLMediaElement and controls
+ // should know when to invalidate themselves http://crbug.com/337015
+ InvalidateOnMainThread();
+}
+
+void WebMediaPlayerImpl::OnPipelinePrerollCompleted() {
+ DVLOG(1) << __FUNCTION__;
+
+ // Only transition to ReadyStateHaveEnoughData if we don't have
+ // any pending seeks because the transition can cause Blink to
+ // report that the most recent seek has completed.
+ if (!pending_seek_) {
+ SetReadyState(WebMediaPlayer::ReadyStateHaveEnoughData);
+
+ // TODO(scherkus): This should be handled by HTMLMediaElement and controls
+ // should know when to invalidate themselves http://crbug.com/337015
+ InvalidateOnMainThread();
+ }
}
void WebMediaPlayerImpl::OnDemuxerOpened() {
DCHECK(main_loop_->BelongsToCurrentThread());
- GetClient()->mediaSourceOpened(new WebMediaSourceImpl(
+ client_->mediaSourceOpened(new WebMediaSourceImpl(
chunk_demuxer_, base::Bind(&LogMediaSourceError, media_log_)));
}
void WebMediaPlayerImpl::OnKeyAdded(const std::string& session_id) {
DCHECK(main_loop_->BelongsToCurrentThread());
EmeUMAHistogramCounts(current_key_system_, "KeyAdded", 1);
- GetClient()->keyAdded(current_key_system_,
- WebString::fromUTF8(session_id));
+ client_->keyAdded(
+ WebString::fromUTF8(GetPrefixedKeySystemName(current_key_system_)),
+ WebString::fromUTF8(session_id));
}
void WebMediaPlayerImpl::OnNeedKey(const std::string& type,
@@ -975,8 +1006,10 @@ void WebMediaPlayerImpl::OnNeedKey(const std::string& type,
DCHECK(main_loop_->BelongsToCurrentThread());
// Do not fire NeedKey event if encrypted media is not enabled.
- if (!decryptor_)
+ if (!blink::WebRuntimeFeatures::isPrefixedEncryptedMediaEnabled() &&
+ !blink::WebRuntimeFeatures::isEncryptedMediaEnabled()) {
return;
+ }
UMA_HISTOGRAM_COUNTS(kMediaEme + std::string("NeedKey"), 1);
@@ -985,10 +1018,8 @@ void WebMediaPlayerImpl::OnNeedKey(const std::string& type,
init_data_type_ = type;
const uint8* init_data_ptr = init_data.empty() ? NULL : &init_data[0];
- GetClient()->keyNeeded(WebString(),
- WebString(),
- init_data_ptr,
- init_data.size());
+ client_->keyNeeded(
+ WebString::fromUTF8(type), init_data_ptr, init_data.size());
}
void WebMediaPlayerImpl::OnAddTextTrack(
@@ -1010,54 +1041,58 @@ void WebMediaPlayerImpl::OnAddTextTrack(
text_track_index_++));
scoped_ptr<media::TextTrack> text_track(
- new TextTrackImpl(main_loop_, GetClient(), web_inband_text_track.Pass()));
+ new TextTrackImpl(main_loop_, client_, web_inband_text_track.Pass()));
done_cb.Run(text_track.Pass());
}
void WebMediaPlayerImpl::OnKeyError(const std::string& session_id,
media::MediaKeys::KeyError error_code,
- int system_code) {
+ uint32 system_code) {
DCHECK(main_loop_->BelongsToCurrentThread());
EmeUMAHistogramEnumeration(current_key_system_, "KeyError",
error_code, media::MediaKeys::kMaxKeyError);
- GetClient()->keyError(
- current_key_system_,
+ unsigned short short_system_code = 0;
+ if (system_code > std::numeric_limits<unsigned short>::max()) {
+ LOG(WARNING) << "system_code exceeds unsigned short limit.";
+ short_system_code = std::numeric_limits<unsigned short>::max();
+ } else {
+ short_system_code = static_cast<unsigned short>(system_code);
+ }
+
+ client_->keyError(
+ WebString::fromUTF8(GetPrefixedKeySystemName(current_key_system_)),
WebString::fromUTF8(session_id),
static_cast<blink::WebMediaPlayerClient::MediaKeyErrorCode>(error_code),
- system_code);
+ short_system_code);
}
void WebMediaPlayerImpl::OnKeyMessage(const std::string& session_id,
const std::vector<uint8>& message,
- const std::string& default_url) {
+ const GURL& destination_url) {
DCHECK(main_loop_->BelongsToCurrentThread());
- const GURL default_url_gurl(default_url);
- DLOG_IF(WARNING, !default_url.empty() && !default_url_gurl.is_valid())
- << "Invalid URL in default_url: " << default_url;
-
- GetClient()->keyMessage(current_key_system_,
- WebString::fromUTF8(session_id),
- message.empty() ? NULL : &message[0],
- message.size(),
- default_url_gurl);
-}
-
-void WebMediaPlayerImpl::SetOpaque(bool opaque) {
- DCHECK(main_loop_->BelongsToCurrentThread());
+ DCHECK(destination_url.is_empty() || destination_url.is_valid());
- GetClient()->setOpaque(opaque);
+ client_->keyMessage(
+ WebString::fromUTF8(GetPrefixedKeySystemName(current_key_system_)),
+ WebString::fromUTF8(session_id),
+ message.empty() ? NULL : &message[0],
+ message.size(),
+ destination_url);
}
-void WebMediaPlayerImpl::DataSourceInitialized(const GURL& gurl, bool success) {
+void WebMediaPlayerImpl::DataSourceInitialized(bool success) {
DCHECK(main_loop_->BelongsToCurrentThread());
if (!success) {
SetNetworkState(WebMediaPlayer::NetworkStateFormatError);
- Repaint();
+
+ // TODO(scherkus): This should be handled by HTMLMediaElement and controls
+ // should know when to invalidate themselves http://crbug.com/337015
+ InvalidateOnMainThread();
return;
}
@@ -1076,12 +1111,15 @@ void WebMediaPlayerImpl::NotifyDownloading(bool is_downloading) {
}
void WebMediaPlayerImpl::StartPipeline() {
+ DCHECK(main_loop_->BelongsToCurrentThread());
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
// Keep track if this is a MSE or non-MSE playback.
UMA_HISTOGRAM_BOOLEAN("Media.MSE.Playback",
(load_type_ == LoadTypeMediaSource));
+ media::LogCB mse_log_cb;
+
// Figure out which demuxer to use.
if (load_type_ != LoadTypeMediaSource) {
DCHECK(!chunk_demuxer_);
@@ -1095,10 +1133,13 @@ void WebMediaPlayerImpl::StartPipeline() {
DCHECK(!chunk_demuxer_);
DCHECK(!data_source_);
+ mse_log_cb = base::Bind(&LogMediaSourceError, media_log_);
+
chunk_demuxer_ = new media::ChunkDemuxer(
BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnDemuxerOpened),
BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnNeedKey),
- base::Bind(&LogMediaSourceError, media_log_));
+ mse_log_cb,
+ true);
demuxer_.reset(chunk_demuxer_);
}
@@ -1106,25 +1147,21 @@ void WebMediaPlayerImpl::StartPipeline() {
new media::FilterCollection());
filter_collection->SetDemuxer(demuxer_.get());
- // Figure out if EME is enabled.
- media::SetDecryptorReadyCB set_decryptor_ready_cb;
- if (decryptor_) {
- set_decryptor_ready_cb = base::Bind(&ProxyDecryptor::SetDecryptorReadyCB,
- base::Unretained(decryptor_.get()));
- }
+ media::SetDecryptorReadyCB set_decryptor_ready_cb =
+ BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::SetDecryptorReadyCB);
// Create our audio decoders and renderer.
ScopedVector<media::AudioDecoder> audio_decoders;
- audio_decoders.push_back(new media::FFmpegAudioDecoder(media_loop_));
- if (!cmd_line->HasSwitch(switches::kDisableOpusPlayback)) {
- audio_decoders.push_back(new media::OpusAudioDecoder(media_loop_));
- }
-
- scoped_ptr<media::AudioRenderer> audio_renderer(
- new media::AudioRendererImpl(media_loop_,
- audio_source_provider_.get(),
- audio_decoders.Pass(),
- set_decryptor_ready_cb));
+ audio_decoders.push_back(new media::FFmpegAudioDecoder(media_loop_,
+ mse_log_cb));
+ audio_decoders.push_back(new media::OpusAudioDecoder(media_loop_));
+
+ scoped_ptr<media::AudioRenderer> audio_renderer(new media::AudioRendererImpl(
+ media_loop_,
+ audio_source_provider_.get(),
+ audio_decoders.Pass(),
+ set_decryptor_ready_cb,
+ RenderThreadImpl::current()->GetAudioHardwareConfig()));
filter_collection->SetAudioRenderer(audio_renderer.Pass());
// Create our video decoders and renderer.
@@ -1147,7 +1184,6 @@ void WebMediaPlayerImpl::StartPipeline() {
video_decoders.Pass(),
set_decryptor_ready_cb,
base::Bind(&WebMediaPlayerImpl::FrameReady, base::Unretained(this)),
- BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::SetOpaque),
true));
filter_collection->SetVideoRenderer(video_renderer.Pass());
@@ -1161,78 +1197,37 @@ void WebMediaPlayerImpl::StartPipeline() {
}
// ... and we're ready to go!
- starting_ = true;
- pipeline_->Start(
+ seeking_ = true;
+ pipeline_.Start(
filter_collection.Pass(),
BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnPipelineEnded),
BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnPipelineError),
BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnPipelineSeek),
- BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnPipelineBufferingState),
- BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnDurationChange));
+ BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnPipelineMetadata),
+ BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnPipelinePrerollCompleted),
+ BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnDurationChanged));
}
void WebMediaPlayerImpl::SetNetworkState(WebMediaPlayer::NetworkState state) {
+ DVLOG(1) << __FUNCTION__ << "(" << state << ")";
DCHECK(main_loop_->BelongsToCurrentThread());
- DVLOG(1) << "SetNetworkState: " << state;
network_state_ = state;
// Always notify to ensure client has the latest value.
- GetClient()->networkStateChanged();
+ client_->networkStateChanged();
}
void WebMediaPlayerImpl::SetReadyState(WebMediaPlayer::ReadyState state) {
+ DVLOG(1) << __FUNCTION__ << "(" << state << ")";
DCHECK(main_loop_->BelongsToCurrentThread());
- DVLOG(1) << "SetReadyState: " << state;
- if (state == WebMediaPlayer::ReadyStateHaveEnoughData &&
- is_local_source_ &&
+ if (state == WebMediaPlayer::ReadyStateHaveEnoughData && data_source_ &&
+ data_source_->assume_fully_buffered() &&
network_state_ == WebMediaPlayer::NetworkStateLoading)
SetNetworkState(WebMediaPlayer::NetworkStateLoaded);
ready_state_ = state;
// Always notify to ensure client has the latest value.
- GetClient()->readyStateChanged();
-}
-
-void WebMediaPlayerImpl::Destroy() {
- DCHECK(main_loop_->BelongsToCurrentThread());
-
- // Abort any pending IO so stopping the pipeline doesn't get blocked.
- if (data_source_)
- data_source_->Abort();
- if (chunk_demuxer_) {
- chunk_demuxer_->Shutdown();
- chunk_demuxer_ = NULL;
- }
-
- if (gpu_factories_.get()) {
- gpu_factories_->Abort();
- gpu_factories_ = NULL;
- }
-
- // Make sure to kill the pipeline so there's no more media threads running.
- // Note: stopping the pipeline might block for a long time.
- base::WaitableEvent waiter(false, false);
- pipeline_->Stop(base::Bind(
- &base::WaitableEvent::Signal, base::Unretained(&waiter)));
- waiter.Wait();
-
- // Let V8 know we are not using extra resources anymore.
- if (incremented_externally_allocated_memory_) {
- v8::Isolate::GetCurrent()->AdjustAmountOfExternalAllocatedMemory(
- -kPlayerExtraMemory);
- incremented_externally_allocated_memory_ = false;
- }
-
- // Release any final references now that everything has stopped.
- pipeline_.reset();
- demuxer_.reset();
- data_source_.reset();
-}
-
-blink::WebMediaPlayerClient* WebMediaPlayerImpl::GetClient() {
- DCHECK(main_loop_->BelongsToCurrentThread());
- DCHECK(client_);
- return client_;
+ client_->readyStateChanged();
}
blink::WebAudioSourceProvider* WebMediaPlayerImpl::audioSourceProvider() {
@@ -1247,7 +1242,7 @@ void WebMediaPlayerImpl::IncrementExternallyAllocatedMemory() {
}
double WebMediaPlayerImpl::GetPipelineDuration() const {
- base::TimeDelta duration = pipeline_->GetMediaDuration();
+ base::TimeDelta duration = pipeline_.GetMediaDuration();
// Return positive infinity if the resource is unbounded.
// http://www.whatwg.org/specs/web-apps/current-work/multipage/video.html#dom-media-duration
@@ -1257,62 +1252,103 @@ double WebMediaPlayerImpl::GetPipelineDuration() const {
return duration.InSecondsF();
}
-void WebMediaPlayerImpl::OnDurationChange() {
+void WebMediaPlayerImpl::OnDurationChanged() {
if (ready_state_ == WebMediaPlayer::ReadyStateHaveNothing)
return;
- GetClient()->durationChanged();
+ client_->durationChanged();
}
-void WebMediaPlayerImpl::FrameReady(
- const scoped_refptr<media::VideoFrame>& frame) {
- base::AutoLock auto_lock(lock_);
+void WebMediaPlayerImpl::OnNaturalSizeChanged(gfx::Size size) {
+ DCHECK(main_loop_->BelongsToCurrentThread());
+ DCHECK_NE(ready_state_, WebMediaPlayer::ReadyStateHaveNothing);
+ TRACE_EVENT0("media", "WebMediaPlayerImpl::OnNaturalSizeChanged");
- if (current_frame_ &&
- current_frame_->natural_size() != frame->natural_size() &&
- !pending_size_change_) {
- pending_size_change_ = true;
- }
+ media_log_->AddEvent(
+ media_log_->CreateVideoSizeSetEvent(size.width(), size.height()));
+ pipeline_metadata_.natural_size = size;
- DoneWaitingForPaint(false);
+ client_->sizeChanged();
+}
- current_frame_ = frame;
- current_frame_painted_ = false;
- TRACE_EVENT_FLOW_BEGIN0("media", "WebMediaPlayerImpl:waitingForPaint", this);
+void WebMediaPlayerImpl::OnOpacityChanged(bool opaque) {
+ DCHECK(main_loop_->BelongsToCurrentThread());
+ DCHECK_NE(ready_state_, WebMediaPlayer::ReadyStateHaveNothing);
- if (pending_repaint_)
- return;
+ opaque_ = opaque;
+ if (video_weblayer_)
+ video_weblayer_->setOpaque(opaque_);
+}
- TRACE_EVENT_ASYNC_BEGIN0("media", "WebMediaPlayerImpl:repaintPending", this);
- pending_repaint_ = true;
- main_loop_->PostTask(FROM_HERE, base::Bind(
- &WebMediaPlayerImpl::Repaint, AsWeakPtr()));
+void WebMediaPlayerImpl::FrameReady(
+ const scoped_refptr<media::VideoFrame>& frame) {
+ compositor_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoFrameCompositor::UpdateCurrentFrame,
+ base::Unretained(compositor_),
+ frame));
}
-void WebMediaPlayerImpl::DoneWaitingForPaint(bool painting_frame) {
- lock_.AssertAcquired();
- if (!current_frame_ || current_frame_painted_)
+void WebMediaPlayerImpl::SetDecryptorReadyCB(
+ const media::DecryptorReadyCB& decryptor_ready_cb) {
+ DCHECK(main_loop_->BelongsToCurrentThread());
+
+ // Cancels the previous decryptor request.
+ if (decryptor_ready_cb.is_null()) {
+ if (!decryptor_ready_cb_.is_null())
+ base::ResetAndReturn(&decryptor_ready_cb_).Run(NULL);
return;
+ }
+
+ // TODO(xhwang): Support multiple decryptor notification request (e.g. from
+ // video and audio). The current implementation is okay for the current
+ // media pipeline since we initialize audio and video decoders in sequence.
+ // But WebMediaPlayerImpl should not depend on media pipeline's implementation
+ // detail.
+ DCHECK(decryptor_ready_cb_.is_null());
- TRACE_EVENT_FLOW_END0("media", "WebMediaPlayerImpl:waitingForPaint", this);
+ // Mixed use of prefixed and unprefixed EME APIs is disallowed by Blink.
+ DCHECK(!proxy_decryptor_ || !web_cdm_);
- if (painting_frame) {
- current_frame_painted_ = true;
+ if (proxy_decryptor_) {
+ decryptor_ready_cb.Run(proxy_decryptor_->GetDecryptor());
return;
}
- // The frame wasn't painted, but we aren't waiting for a Repaint() call so
- // assume that the frame wasn't painted because the video wasn't visible.
- if (!pending_repaint_)
+ if (web_cdm_) {
+ decryptor_ready_cb.Run(web_cdm_->GetDecryptor());
return;
+ }
- // The |current_frame_| wasn't painted, it is being replaced, and we haven't
- // even gotten the chance to request a repaint for it yet. Mark it as dropped.
- TRACE_EVENT0("media", "WebMediaPlayerImpl:frameDropped");
- DVLOG(1) << "Frame dropped before being painted: "
- << current_frame_->GetTimestamp().InSecondsF();
- if (frames_dropped_before_paint_ < kuint32max)
- frames_dropped_before_paint_++;
+ decryptor_ready_cb_ = decryptor_ready_cb;
+}
+
+static void GetCurrentFrameAndSignal(
+ VideoFrameCompositor* compositor,
+ scoped_refptr<media::VideoFrame>* video_frame_out,
+ base::WaitableEvent* event) {
+ TRACE_EVENT0("media", "GetCurrentFrameAndSignal");
+ *video_frame_out = compositor->GetCurrentFrame();
+ event->Signal();
+}
+
+scoped_refptr<media::VideoFrame>
+WebMediaPlayerImpl::GetCurrentFrameFromCompositor() {
+ TRACE_EVENT0("media", "WebMediaPlayerImpl::GetCurrentFrameFromCompositor");
+ if (compositor_task_runner_->BelongsToCurrentThread())
+ return compositor_->GetCurrentFrame();
+
+ // Use a posted task and waitable event instead of a lock otherwise
+ // WebGL/Canvas can see different content than what the compositor is seeing.
+ scoped_refptr<media::VideoFrame> video_frame;
+ base::WaitableEvent event(false, false);
+ compositor_task_runner_->PostTask(FROM_HERE,
+ base::Bind(&GetCurrentFrameAndSignal,
+ base::Unretained(compositor_),
+ &video_frame,
+ &event));
+ event.Wait();
+ return video_frame;
}
} // namespace content
diff --git a/chromium/content/renderer/media/webmediaplayer_impl.h b/chromium/content/renderer/media/webmediaplayer_impl.h
index 50ee39b1429..ad932b885ae 100644
--- a/chromium/content/renderer/media/webmediaplayer_impl.h
+++ b/chromium/content/renderer/media/webmediaplayer_impl.h
@@ -2,21 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Delegate calls from WebCore::MediaPlayerPrivate to Chrome's video player.
-// It contains Pipeline which is the actual media player pipeline, it glues
-// the media player pipeline, data source, audio renderer and renderer.
-// Pipeline would creates multiple threads and access some public methods
-// of this class, so we need to be extra careful about concurrent access of
-// methods and members.
-//
-// Other issues:
-// During tear down of the whole browser or a tab, the DOM tree may not be
-// destructed nicely, and there will be some dangling media threads trying to
-// the main thread, so we need this class to listen to destruction event of the
-// main thread and cleanup the media threads when the even is received. Also
-// at destruction of this class we will need to unhook it from destruction event
-// list of the main thread.
-
#ifndef CONTENT_RENDERER_MEDIA_WEBMEDIAPLAYER_IMPL_H_
#define CONTENT_RENDERER_MEDIA_WEBMEDIAPLAYER_IMPL_H_
@@ -28,11 +13,12 @@
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/threading/thread.h"
-#include "cc/layers/video_frame_provider.h"
-#include "content/public/renderer/render_view_observer.h"
+#include "content/renderer/media/buffered_data_source_host_impl.h"
#include "content/renderer/media/crypto/proxy_decryptor.h"
+#include "content/renderer/media/video_frame_compositor.h"
#include "media/base/audio_renderer_sink.h"
#include "media/base/decryptor.h"
+// TODO(xhwang): Remove when we remove prefixed EME implementation.
#include "media/base/media_keys.h"
#include "media/base/pipeline.h"
#include "media/base/text_track.h"
@@ -47,7 +33,8 @@
class RenderAudioSourceProvider;
namespace blink {
-class WebFrame;
+class WebContentDecryptionModule;
+class WebLocalFrame;
}
namespace base {
@@ -60,49 +47,45 @@ class GpuVideoAcceleratorFactories;
class MediaLog;
}
-namespace webkit {
-class WebLayerImpl;
-}
namespace content {
class BufferedDataSource;
+class VideoFrameCompositor;
class WebAudioSourceProviderImpl;
+class WebContentDecryptionModuleImpl;
+class WebLayerImpl;
class WebMediaPlayerDelegate;
class WebMediaPlayerParams;
class WebTextTrackImpl;
+// The canonical implementation of blink::WebMediaPlayer that's backed by
+// media::Pipeline. Handles normal resource loading, Media Source, and
+// Encrypted Media.
class WebMediaPlayerImpl
: public blink::WebMediaPlayer,
- public cc::VideoFrameProvider,
- public content::RenderViewObserver,
public base::SupportsWeakPtr<WebMediaPlayerImpl> {
public:
// Constructs a WebMediaPlayer implementation using Chromium's media stack.
- // |render_view| is passed only for the purpose of registering |this| as an
- // observer of it.
// |delegate| may be null.
- WebMediaPlayerImpl(
- content::RenderView* render_view,
- blink::WebFrame* frame,
- blink::WebMediaPlayerClient* client,
- base::WeakPtr<WebMediaPlayerDelegate> delegate,
- const WebMediaPlayerParams& params);
+ WebMediaPlayerImpl(blink::WebLocalFrame* frame,
+ blink::WebMediaPlayerClient* client,
+ base::WeakPtr<WebMediaPlayerDelegate> delegate,
+ const WebMediaPlayerParams& params);
virtual ~WebMediaPlayerImpl();
virtual void load(LoadType load_type,
const blink::WebURL& url,
- CORSMode cors_mode) OVERRIDE;
+ CORSMode cors_mode);
// Playback controls.
virtual void play();
virtual void pause();
- virtual bool supportsFullscreen() const;
virtual bool supportsSave() const;
virtual void seek(double seconds);
virtual void setRate(double rate);
virtual void setVolume(double volume);
virtual void setPreload(blink::WebMediaPlayer::Preload preload);
- virtual const blink::WebTimeRanges& buffered();
+ virtual blink::WebTimeRanges buffered() const;
virtual double maxTimeSeekable() const;
// Methods for painting.
@@ -121,6 +104,7 @@ class WebMediaPlayerImpl
virtual bool paused() const;
virtual bool seeking() const;
virtual double duration() const;
+ virtual double timelineOffset() const;
virtual double currentTime() const;
// Internal states of loading and network.
@@ -129,7 +113,7 @@ class WebMediaPlayerImpl
virtual blink::WebMediaPlayer::NetworkState networkState() const;
virtual blink::WebMediaPlayer::ReadyState readyState() const;
- virtual bool didLoadingProgress() const;
+ virtual bool didLoadingProgress();
virtual bool hasSingleSecurityOrigin() const;
virtual bool didPassCORSAccessCheck() const;
@@ -141,13 +125,6 @@ class WebMediaPlayerImpl
virtual unsigned audioDecodedByteCount() const;
virtual unsigned videoDecodedByteCount() const;
- // cc::VideoFrameProvider implementation.
- virtual void SetVideoFrameProviderClient(
- cc::VideoFrameProvider::Client* client) OVERRIDE;
- virtual scoped_refptr<media::VideoFrame> GetCurrentFrame() OVERRIDE;
- virtual void PutCurrentFrame(const scoped_refptr<media::VideoFrame>& frame)
- OVERRIDE;
-
virtual bool copyVideoTextureToPlatformTexture(
blink::WebGraphicsContext3D* web_graphics_context,
unsigned int texture,
@@ -175,29 +152,34 @@ class WebMediaPlayerImpl
const blink::WebString& key_system,
const blink::WebString& session_id);
- // content::RenderViewObserver implementation.
- virtual void OnDestruct() OVERRIDE;
+ virtual void setContentDecryptionModule(
+ blink::WebContentDecryptionModule* cdm);
- void Repaint();
+ // Notifies blink that the entire media element region has been invalidated.
+ // This path is slower than notifying the compositor directly as it performs
+ // more work and can trigger layouts. It should only be used in two cases:
+ // 1) Major state changes (e.g., first frame available, run time error
+ // occured)
+ // 2) Compositing not available
+ void InvalidateOnMainThread();
void OnPipelineSeek(media::PipelineStatus status);
void OnPipelineEnded();
void OnPipelineError(media::PipelineStatus error);
- void OnPipelineBufferingState(
- media::Pipeline::BufferingState buffering_state);
+ void OnPipelineMetadata(media::PipelineMetadata metadata);
+ void OnPipelinePrerollCompleted();
void OnDemuxerOpened();
void OnKeyAdded(const std::string& session_id);
void OnKeyError(const std::string& session_id,
media::MediaKeys::KeyError error_code,
- int system_code);
+ uint32 system_code);
void OnKeyMessage(const std::string& session_id,
const std::vector<uint8>& message,
- const std::string& default_url);
+ const GURL& destination_url);
void OnNeedKey(const std::string& type,
const std::vector<uint8>& init_data);
void OnAddTextTrack(const media::TextTrackConfig& config,
const media::AddTextTrackDoneCB& done_cb);
- void SetOpaque(bool);
private:
// Called after |defer_load_cb_| has decided to allow the load. If
@@ -207,7 +189,7 @@ class WebMediaPlayerImpl
CORSMode cors_mode);
// Called after asynchronous initialization of a data source completed.
- void DataSourceInitialized(const GURL& gurl, bool success);
+ void DataSourceInitialized(bool success);
// Called when the data source is downloading or paused.
void NotifyDownloading(bool is_downloading);
@@ -220,70 +202,72 @@ class WebMediaPlayerImpl
void SetNetworkState(blink::WebMediaPlayer::NetworkState state);
void SetReadyState(blink::WebMediaPlayer::ReadyState state);
- // Destroy resources held.
- void Destroy();
-
- // Getter method to |client_|.
- blink::WebMediaPlayerClient* GetClient();
-
// Lets V8 know that player uses extra resources not managed by V8.
void IncrementExternallyAllocatedMemory();
// Actually do the work for generateKeyRequest/addKey so they can easily
// report results to UMA.
- MediaKeyException GenerateKeyRequestInternal(
- const blink::WebString& key_system,
- const unsigned char* init_data,
- unsigned init_data_length);
- MediaKeyException AddKeyInternal(const blink::WebString& key_system,
+ MediaKeyException GenerateKeyRequestInternal(const std::string& key_system,
+ const unsigned char* init_data,
+ unsigned init_data_length);
+ MediaKeyException AddKeyInternal(const std::string& key_system,
const unsigned char* key,
unsigned key_length,
const unsigned char* init_data,
unsigned init_data_length,
- const blink::WebString& session_id);
- MediaKeyException CancelKeyRequestInternal(
- const blink::WebString& key_system,
- const blink::WebString& session_id);
+ const std::string& session_id);
+ MediaKeyException CancelKeyRequestInternal(const std::string& key_system,
+ const std::string& session_id);
// Gets the duration value reported by the pipeline.
double GetPipelineDuration() const;
- // Notifies WebKit of the duration change.
- void OnDurationChange();
+ // Callbacks from |pipeline_| that are forwarded to |client_|.
+ void OnDurationChanged();
+ void OnNaturalSizeChanged(gfx::Size size);
+ void OnOpacityChanged(bool opaque);
// Called by VideoRendererImpl on its internal thread with the new frame to be
// painted.
void FrameReady(const scoped_refptr<media::VideoFrame>& frame);
- // Called when a paint or a new frame arrives to indicate that we are
- // no longer waiting for |current_frame_| to be painted.
- // |painting_frame| is set to true if |current_frame_| is being painted.
- // False indicates |current_frame_| is being replaced with a new frame.
- void DoneWaitingForPaint(bool painting_frame);
+ // Requests that this object notifies when a decryptor is ready through the
+ // |decryptor_ready_cb| provided.
+ // If |decryptor_ready_cb| is null, the existing callback will be fired with
+ // NULL immediately and reset.
+ void SetDecryptorReadyCB(const media::DecryptorReadyCB& decryptor_ready_cb);
+
+ // Returns the current video frame from |compositor_|. Blocks until the
+ // compositor can return the frame.
+ scoped_refptr<media::VideoFrame> GetCurrentFrameFromCompositor();
- blink::WebFrame* frame_;
+ blink::WebLocalFrame* frame_;
// TODO(hclam): get rid of these members and read from the pipeline directly.
blink::WebMediaPlayer::NetworkState network_state_;
blink::WebMediaPlayer::ReadyState ready_state_;
- // Keep a list of buffered time ranges.
- blink::WebTimeRanges buffered_;
-
// Message loops for posting tasks on Chrome's main thread. Also used
// for DCHECKs so methods calls won't execute in the wrong thread.
const scoped_refptr<base::MessageLoopProxy> main_loop_;
- scoped_ptr<media::Pipeline> pipeline_;
scoped_refptr<base::MessageLoopProxy> media_loop_;
+ scoped_refptr<media::MediaLog> media_log_;
+ media::Pipeline pipeline_;
// The currently selected key system. Empty string means that no key system
// has been selected.
- blink::WebString current_key_system_;
+ std::string current_key_system_;
// The LoadType passed in the |load_type| parameter of the load() call.
LoadType load_type_;
+ // Cache of metadata for answering hasAudio(), hasVideo(), and naturalSize().
+ media::PipelineMetadata pipeline_metadata_;
+
+ // Whether the video is known to be opaque or not.
+ bool opaque_;
+
// Playback state.
//
// TODO(scherkus): we have these because Pipeline favours the simplicity of a
@@ -312,8 +296,6 @@ class WebMediaPlayerImpl
base::Callback<void(const base::Closure&)> defer_load_cb_;
- scoped_refptr<media::MediaLog> media_log_;
-
// Since accelerated compositing status is only known after the first layout,
// we delay reporting it to UMA until that time.
bool accelerated_compositing_reported_;
@@ -326,14 +308,8 @@ class WebMediaPlayerImpl
// Routes audio playback to either AudioRendererSink or WebAudio.
scoped_refptr<WebAudioSourceProviderImpl> audio_source_provider_;
- bool is_local_source_;
bool supports_save_;
- // The decryptor that manages decryption keys and decrypts encrypted frames.
- scoped_ptr<ProxyDecryptor> decryptor_;
-
- bool starting_;
-
// These two are mutually exclusive:
// |data_source_| is used for regular resource loads.
// |chunk_demuxer_| is used for Media Source resource loads.
@@ -344,35 +320,33 @@ class WebMediaPlayerImpl
scoped_ptr<media::Demuxer> demuxer_;
media::ChunkDemuxer* chunk_demuxer_;
+ BufferedDataSourceHostImpl buffered_data_source_host_;
+
// Temporary for EME v0.1. In the future the init data type should be passed
// through GenerateKeyRequest() directly from WebKit.
std::string init_data_type_;
- // Video frame rendering members.
- //
- // |lock_| protects |current_frame_|, |current_frame_painted_|, and
- // |frames_dropped_before_paint_| since new frames arrive on the video
- // rendering thread, yet are accessed for rendering on either the main thread
- // or compositing thread depending on whether accelerated compositing is used.
- mutable base::Lock lock_;
+ // Video rendering members.
+ scoped_refptr<base::SingleThreadTaskRunner> compositor_task_runner_;
+ VideoFrameCompositor* compositor_; // Deleted on |compositor_task_runner_|.
media::SkCanvasVideoRenderer skcanvas_video_renderer_;
- scoped_refptr<media::VideoFrame> current_frame_;
- bool current_frame_painted_;
- uint32 frames_dropped_before_paint_;
- bool pending_repaint_;
- bool pending_size_change_;
// The compositor layer for displaying the video content when using composited
// playback.
- scoped_ptr<webkit::WebLayerImpl> video_weblayer_;
-
- // A pointer back to the compositor to inform it about state changes. This is
- // not NULL while the compositor is actively using this webmediaplayer.
- cc::VideoFrameProvider::Client* video_frame_provider_client_;
+ scoped_ptr<WebLayerImpl> video_weblayer_;
// Text track objects get a unique index value when they're created.
int text_track_index_;
+ // Manages decryption keys and decrypts encrypted frames.
+ scoped_ptr<ProxyDecryptor> proxy_decryptor_;
+
+ // Non-owned pointer to the CDM. Updated via calls to
+ // setContentDecryptionModule().
+ WebContentDecryptionModuleImpl* web_cdm_;
+
+ media::DecryptorReadyCB decryptor_ready_cb_;
+
DISALLOW_COPY_AND_ASSIGN(WebMediaPlayerImpl);
};
diff --git a/chromium/content/renderer/media/webmediaplayer_ms.cc b/chromium/content/renderer/media/webmediaplayer_ms.cc
index 3f64ff150ef..b466d3168aa 100644
--- a/chromium/content/renderer/media/webmediaplayer_ms.cc
+++ b/chromium/content/renderer/media/webmediaplayer_ms.cc
@@ -11,52 +11,94 @@
#include "base/message_loop/message_loop.h"
#include "base/metrics/histogram.h"
#include "cc/layers/video_layer.h"
+#include "content/public/renderer/render_view.h"
+#include "content/renderer/compositor_bindings/web_layer_impl.h"
#include "content/renderer/media/media_stream_audio_renderer.h"
-#include "content/renderer/media/media_stream_client.h"
+#include "content/renderer/media/media_stream_renderer_factory.h"
#include "content/renderer/media/video_frame_provider.h"
#include "content/renderer/media/webmediaplayer_delegate.h"
#include "content/renderer/media/webmediaplayer_util.h"
+#include "content/renderer/render_frame_impl.h"
#include "media/base/media_log.h"
#include "media/base/video_frame.h"
+#include "media/base/video_util.h"
#include "third_party/WebKit/public/platform/WebMediaPlayerClient.h"
#include "third_party/WebKit/public/platform/WebRect.h"
#include "third_party/WebKit/public/platform/WebSize.h"
#include "third_party/WebKit/public/platform/WebURL.h"
#include "third_party/WebKit/public/web/WebFrame.h"
#include "third_party/WebKit/public/web/WebView.h"
-#include "webkit/renderer/compositor_bindings/web_layer_impl.h"
+#include "third_party/skia/include/core/SkBitmap.h"
using blink::WebCanvas;
using blink::WebMediaPlayer;
using blink::WebRect;
using blink::WebSize;
+namespace {
+
+// This function copies a YV12 or NATIVE_TEXTURE to a new YV12
+// media::VideoFrame.
+scoped_refptr<media::VideoFrame> CopyFrameToYV12(
+ const scoped_refptr<media::VideoFrame>& frame) {
+ DCHECK(frame->format() == media::VideoFrame::YV12 ||
+ frame->format() == media::VideoFrame::I420 ||
+ frame->format() == media::VideoFrame::NATIVE_TEXTURE);
+ scoped_refptr<media::VideoFrame> new_frame =
+ media::VideoFrame::CreateFrame(media::VideoFrame::YV12,
+ frame->coded_size(),
+ frame->visible_rect(),
+ frame->natural_size(),
+ frame->timestamp());
+
+ if (frame->format() == media::VideoFrame::NATIVE_TEXTURE) {
+ SkBitmap bitmap;
+ bitmap.allocN32Pixels(frame->visible_rect().width(),
+ frame->visible_rect().height());
+ frame->ReadPixelsFromNativeTexture(bitmap);
+
+ media::CopyRGBToVideoFrame(
+ reinterpret_cast<uint8*>(bitmap.getPixels()),
+ bitmap.rowBytes(),
+ frame->visible_rect(),
+ new_frame.get());
+ } else {
+ size_t number_of_planes =
+ media::VideoFrame::NumPlanes(frame->format());
+ for (size_t i = 0; i < number_of_planes; ++i) {
+ media::CopyPlane(i, frame->data(i), frame->stride(i),
+ frame->rows(i), new_frame.get());
+ }
+ }
+ return new_frame;
+}
+
+} // anonymous namespace
+
namespace content {
WebMediaPlayerMS::WebMediaPlayerMS(
blink::WebFrame* frame,
blink::WebMediaPlayerClient* client,
base::WeakPtr<WebMediaPlayerDelegate> delegate,
- MediaStreamClient* media_stream_client,
- media::MediaLog* media_log)
+ media::MediaLog* media_log,
+ scoped_ptr<MediaStreamRendererFactory> factory)
: frame_(frame),
network_state_(WebMediaPlayer::NetworkStateEmpty),
ready_state_(WebMediaPlayer::ReadyStateHaveNothing),
buffered_(static_cast<size_t>(1)),
client_(client),
delegate_(delegate),
- media_stream_client_(media_stream_client),
paused_(true),
current_frame_used_(false),
pending_repaint_(false),
video_frame_provider_client_(NULL),
received_first_frame_(false),
- sequence_started_(false),
total_frame_count_(0),
dropped_frame_count_(0),
- media_log_(media_log) {
+ media_log_(media_log),
+ renderer_factory_(factory.Pass()) {
DVLOG(1) << "WebMediaPlayerMS::ctor";
- DCHECK(media_stream_client);
media_log_->AddEvent(
media_log_->CreateEvent(media::MediaLogEvent::WEBMEDIAPLAYER_CREATED));
}
@@ -98,16 +140,18 @@ void WebMediaPlayerMS::load(LoadType load_type,
SetReadyState(WebMediaPlayer::ReadyStateHaveNothing);
media_log_->AddEvent(media_log_->CreateLoadEvent(url.spec()));
- // Check if this url is media stream.
- video_frame_provider_ = media_stream_client_->GetVideoFrameProvider(
+ video_frame_provider_ = renderer_factory_->GetVideoFrameProvider(
url,
base::Bind(&WebMediaPlayerMS::OnSourceError, AsWeakPtr()),
base::Bind(&WebMediaPlayerMS::OnFrameAvailable, AsWeakPtr()));
- audio_renderer_ = media_stream_client_->GetAudioRenderer(url);
+ RenderFrame* frame = RenderFrame::FromWebFrame(frame_);
+ audio_renderer_ = renderer_factory_->GetAudioRenderer(
+ url,
+ frame->GetRenderView()->GetRoutingID(),
+ frame->GetRoutingID());
if (video_frame_provider_.get() || audio_renderer_.get()) {
- GetClient()->setOpaque(true);
if (audio_renderer_.get())
audio_renderer_->Start();
@@ -162,11 +206,17 @@ void WebMediaPlayerMS::pause() {
paused_ = true;
media_log_->AddEvent(media_log_->CreateEvent(media::MediaLogEvent::PAUSE));
-}
-bool WebMediaPlayerMS::supportsFullscreen() const {
- DCHECK(thread_checker_.CalledOnValidThread());
- return true;
+ if (!current_frame_)
+ return;
+
+ // Copy the frame so that rendering can show the last received frame.
+ // The original frame must not be referenced when the player is paused since
+ // there might be a finite number of available buffers. E.g, video that
+ // originates from a video camera.
+ scoped_refptr<media::VideoFrame> new_frame = CopyFrameToYV12(current_frame_);
+ base::AutoLock auto_lock(current_frame_lock_);
+ current_frame_ = new_frame;
}
bool WebMediaPlayerMS::supportsSave() const {
@@ -231,8 +281,8 @@ double WebMediaPlayerMS::duration() const {
double WebMediaPlayerMS::currentTime() const {
DCHECK(thread_checker_.CalledOnValidThread());
- if (current_frame_.get()) {
- return current_frame_->GetTimestamp().InSecondsF();
+ if (current_time_.ToInternalValue() != 0) {
+ return current_time_.InSecondsF();
} else if (audio_renderer_.get()) {
return audio_renderer_->GetCurrentRenderTime().InSecondsF();
}
@@ -251,7 +301,7 @@ WebMediaPlayer::ReadyState WebMediaPlayerMS::readyState() const {
return ready_state_;
}
-const blink::WebTimeRanges& WebMediaPlayerMS::buffered() {
+blink::WebTimeRanges WebMediaPlayerMS::buffered() const {
DCHECK(thread_checker_.CalledOnValidThread());
return buffered_;
}
@@ -261,7 +311,7 @@ double WebMediaPlayerMS::maxTimeSeekable() const {
return 0.0;
}
-bool WebMediaPlayerMS::didLoadingProgress() const {
+bool WebMediaPlayerMS::didLoadingProgress() {
DCHECK(thread_checker_.CalledOnValidThread());
return true;
}
@@ -363,9 +413,9 @@ void WebMediaPlayerMS::OnFrameAvailable(
SetReadyState(WebMediaPlayer::ReadyStateHaveEnoughData);
GetClient()->sizeChanged();
- if (video_frame_provider_.get() && GetClient()->needsWebLayerForVideo()) {
- video_weblayer_.reset(
- new webkit::WebLayerImpl(cc::VideoLayer::Create(this)));
+ if (video_frame_provider_) {
+ video_weblayer_.reset(new WebLayerImpl(cc::VideoLayer::Create(this)));
+ video_weblayer_->setOpaque(true);
GetClient()->setWebLayer(video_weblayer_.get());
}
}
@@ -374,10 +424,6 @@ void WebMediaPlayerMS::OnFrameAvailable(
if (paused_)
return;
- if (!sequence_started_) {
- sequence_started_ = true;
- start_time_ = frame->GetTimestamp();
- }
bool size_changed = !current_frame_.get() ||
current_frame_->natural_size() != frame->natural_size();
@@ -386,7 +432,7 @@ void WebMediaPlayerMS::OnFrameAvailable(
if (!current_frame_used_ && current_frame_.get())
++dropped_frame_count_;
current_frame_ = frame;
- current_frame_->SetTimestamp(frame->GetTimestamp() - start_time_);
+ current_time_ = frame->timestamp();
current_frame_used_ = false;
}
diff --git a/chromium/content/renderer/media/webmediaplayer_ms.h b/chromium/content/renderer/media/webmediaplayer_ms.h
index 8fa5ef5e1e2..7e8944ad920 100644
--- a/chromium/content/renderer/media/webmediaplayer_ms.h
+++ b/chromium/content/renderer/media/webmediaplayer_ms.h
@@ -25,14 +25,12 @@ namespace media {
class MediaLog;
}
-namespace webkit {
-class WebLayerImpl;
-}
namespace content {
class MediaStreamAudioRenderer;
-class MediaStreamClient;
+class MediaStreamRendererFactory;
class VideoFrameProvider;
+class WebLayerImpl;
class WebMediaPlayerDelegate;
// WebMediaPlayerMS delegates calls from WebCore::MediaPlayerPrivate to
@@ -59,59 +57,58 @@ class WebMediaPlayerMS
WebMediaPlayerMS(blink::WebFrame* frame,
blink::WebMediaPlayerClient* client,
base::WeakPtr<WebMediaPlayerDelegate> delegate,
- MediaStreamClient* media_stream_client,
- media::MediaLog* media_log);
+ media::MediaLog* media_log,
+ scoped_ptr<MediaStreamRendererFactory> factory);
virtual ~WebMediaPlayerMS();
virtual void load(LoadType load_type,
const blink::WebURL& url,
- CORSMode cors_mode) OVERRIDE;
+ CORSMode cors_mode);
// Playback controls.
- virtual void play() OVERRIDE;
- virtual void pause() OVERRIDE;
- virtual bool supportsFullscreen() const OVERRIDE;
- virtual bool supportsSave() const OVERRIDE;
+ virtual void play();
+ virtual void pause();
+ virtual bool supportsSave() const;
virtual void seek(double seconds);
virtual void setRate(double rate);
virtual void setVolume(double volume);
- virtual void setPreload(blink::WebMediaPlayer::Preload preload) OVERRIDE;
- virtual const blink::WebTimeRanges& buffered() OVERRIDE;
+ virtual void setPreload(blink::WebMediaPlayer::Preload preload);
+ virtual blink::WebTimeRanges buffered() const;
virtual double maxTimeSeekable() const;
// Methods for painting.
virtual void paint(blink::WebCanvas* canvas,
const blink::WebRect& rect,
- unsigned char alpha) OVERRIDE;
+ unsigned char alpha);
// True if the loaded media has a playable video/audio track.
- virtual bool hasVideo() const OVERRIDE;
- virtual bool hasAudio() const OVERRIDE;
+ virtual bool hasVideo() const;
+ virtual bool hasAudio() const;
// Dimensions of the video.
- virtual blink::WebSize naturalSize() const OVERRIDE;
+ virtual blink::WebSize naturalSize() const;
// Getters of playback state.
- virtual bool paused() const OVERRIDE;
- virtual bool seeking() const OVERRIDE;
+ virtual bool paused() const;
+ virtual bool seeking() const;
virtual double duration() const;
virtual double currentTime() const;
// Internal states of loading and network.
- virtual blink::WebMediaPlayer::NetworkState networkState() const OVERRIDE;
- virtual blink::WebMediaPlayer::ReadyState readyState() const OVERRIDE;
+ virtual blink::WebMediaPlayer::NetworkState networkState() const;
+ virtual blink::WebMediaPlayer::ReadyState readyState() const;
- virtual bool didLoadingProgress() const OVERRIDE;
+ virtual bool didLoadingProgress();
- virtual bool hasSingleSecurityOrigin() const OVERRIDE;
- virtual bool didPassCORSAccessCheck() const OVERRIDE;
+ virtual bool hasSingleSecurityOrigin() const;
+ virtual bool didPassCORSAccessCheck() const;
virtual double mediaTimeForTimeValue(double timeValue) const;
- virtual unsigned decodedFrameCount() const OVERRIDE;
- virtual unsigned droppedFrameCount() const OVERRIDE;
- virtual unsigned audioDecodedByteCount() const OVERRIDE;
- virtual unsigned videoDecodedByteCount() const OVERRIDE;
+ virtual unsigned decodedFrameCount() const;
+ virtual unsigned droppedFrameCount() const;
+ virtual unsigned audioDecodedByteCount() const;
+ virtual unsigned videoDecodedByteCount() const;
// VideoFrameProvider implementation.
virtual void SetVideoFrameProviderClient(
@@ -151,32 +148,32 @@ class WebMediaPlayerMS
base::WeakPtr<WebMediaPlayerDelegate> delegate_;
- MediaStreamClient* media_stream_client_;
-
// Specify content:: to disambiguate from cc::.
scoped_refptr<content::VideoFrameProvider> video_frame_provider_;
bool paused_;
// |current_frame_| is updated only on main thread. The object it holds
// can be freed on the compositor thread if it is the last to hold a
- // reference but media::VideoFrame is a thread-safe ref-pointer.
+ // reference but media::VideoFrame is a thread-safe ref-pointer. It is
+ // however read on the compositing thread so locking is required around all
+ // modifications on the main thread, and all reads on the compositing thread.
scoped_refptr<media::VideoFrame> current_frame_;
// |current_frame_used_| is updated on both main and compositing thread.
// It's used to track whether |current_frame_| was painted for detecting
// when to increase |dropped_frame_count_|.
bool current_frame_used_;
+ // |current_frame_lock_| protects |current_frame_used_| and |current_frame_|.
base::Lock current_frame_lock_;
bool pending_repaint_;
- scoped_ptr<webkit::WebLayerImpl> video_weblayer_;
+ scoped_ptr<WebLayerImpl> video_weblayer_;
// A pointer back to the compositor to inform it about state changes. This is
// not NULL while the compositor is actively using this webmediaplayer.
cc::VideoFrameProvider::Client* video_frame_provider_client_;
bool received_first_frame_;
- bool sequence_started_;
- base::TimeDelta start_time_;
+ base::TimeDelta current_time_;
unsigned total_frame_count_;
unsigned dropped_frame_count_;
media::SkCanvasVideoRenderer video_renderer_;
@@ -185,6 +182,8 @@ class WebMediaPlayerMS
scoped_refptr<media::MediaLog> media_log_;
+ scoped_ptr<MediaStreamRendererFactory> renderer_factory_;
+
DISALLOW_COPY_AND_ASSIGN(WebMediaPlayerMS);
};
diff --git a/chromium/content/renderer/media/webmediaplayer_params.cc b/chromium/content/renderer/media/webmediaplayer_params.cc
index a05abbfa571..ddcd21341a3 100644
--- a/chromium/content/renderer/media/webmediaplayer_params.cc
+++ b/chromium/content/renderer/media/webmediaplayer_params.cc
@@ -4,25 +4,15 @@
#include "content/renderer/media/webmediaplayer_params.h"
-#include "base/message_loop/message_loop_proxy.h"
#include "media/base/audio_renderer_sink.h"
-#include "media/base/media_log.h"
-#include "media/filters/gpu_video_accelerator_factories.h"
namespace content {
WebMediaPlayerParams::WebMediaPlayerParams(
- const scoped_refptr<base::MessageLoopProxy>& message_loop_proxy,
const base::Callback<void(const base::Closure&)>& defer_load_cb,
- const scoped_refptr<media::AudioRendererSink>& audio_renderer_sink,
- const scoped_refptr<media::GpuVideoAcceleratorFactories>& gpu_factories,
- const scoped_refptr<media::MediaLog>& media_log)
- : message_loop_proxy_(message_loop_proxy),
- defer_load_cb_(defer_load_cb),
- audio_renderer_sink_(audio_renderer_sink),
- gpu_factories_(gpu_factories),
- media_log_(media_log) {
- DCHECK(media_log_.get());
+ const scoped_refptr<media::AudioRendererSink>& audio_renderer_sink)
+ : defer_load_cb_(defer_load_cb),
+ audio_renderer_sink_(audio_renderer_sink) {
}
WebMediaPlayerParams::~WebMediaPlayerParams() {}
diff --git a/chromium/content/renderer/media/webmediaplayer_params.h b/chromium/content/renderer/media/webmediaplayer_params.h
index bf398642f77..9fa96e1af26 100644
--- a/chromium/content/renderer/media/webmediaplayer_params.h
+++ b/chromium/content/renderer/media/webmediaplayer_params.h
@@ -8,14 +8,8 @@
#include "base/callback.h"
#include "base/memory/ref_counted.h"
-namespace base {
-class MessageLoopProxy;
-}
-
namespace media {
class AudioRendererSink;
-class GpuVideoAcceleratorFactories;
-class MediaLog;
}
namespace content {
@@ -24,20 +18,12 @@ namespace content {
// to plumb arguments through various abstraction layers.
class WebMediaPlayerParams {
public:
- // |message_loop_proxy| and |media_log| are the only required parameters;
- // all others may be null.
+ // Parameters may be null.
WebMediaPlayerParams(
- const scoped_refptr<base::MessageLoopProxy>& message_loop_proxy,
const base::Callback<void(const base::Closure&)>& defer_load_cb,
- const scoped_refptr<media::AudioRendererSink>& audio_renderer_sink,
- const scoped_refptr<media::GpuVideoAcceleratorFactories>& gpu_factories,
- const scoped_refptr<media::MediaLog>& media_log);
+ const scoped_refptr<media::AudioRendererSink>& audio_renderer_sink);
~WebMediaPlayerParams();
- const scoped_refptr<base::MessageLoopProxy>& message_loop_proxy() const {
- return message_loop_proxy_;
- }
-
base::Callback<void(const base::Closure&)> defer_load_cb() const {
return defer_load_cb_;
}
@@ -46,21 +32,9 @@ class WebMediaPlayerParams {
return audio_renderer_sink_;
}
- const scoped_refptr<media::GpuVideoAcceleratorFactories>& gpu_factories()
- const {
- return gpu_factories_;
- }
-
- const scoped_refptr<media::MediaLog>& media_log() const {
- return media_log_;
- }
-
private:
- scoped_refptr<base::MessageLoopProxy> message_loop_proxy_;
base::Callback<void(const base::Closure&)> defer_load_cb_;
scoped_refptr<media::AudioRendererSink> audio_renderer_sink_;
- scoped_refptr<media::GpuVideoAcceleratorFactories> gpu_factories_;
- scoped_refptr<media::MediaLog> media_log_;
DISALLOW_IMPLICIT_CONSTRUCTORS(WebMediaPlayerParams);
};
diff --git a/chromium/content/renderer/media/webmediaplayer_util.cc b/chromium/content/renderer/media/webmediaplayer_util.cc
index a245f038b0a..6d1b1e25aa8 100644
--- a/chromium/content/renderer/media/webmediaplayer_util.cc
+++ b/chromium/content/renderer/media/webmediaplayer_util.cc
@@ -6,6 +6,7 @@
#include <math.h>
+#include "base/metrics/histogram.h"
#include "media/base/media_keys.h"
#include "third_party/WebKit/public/platform/WebMediaPlayerClient.h"
@@ -31,7 +32,7 @@ base::TimeDelta ConvertSecondsToTimestamp(double seconds) {
blink::WebTimeRanges ConvertToWebTimeRanges(
const media::Ranges<base::TimeDelta>& ranges) {
blink::WebTimeRanges result(ranges.size());
- for (size_t i = 0; i < ranges.size(); i++) {
+ for (size_t i = 0; i < ranges.size(); ++i) {
result[i].start = ranges.start(i).InSecondsF();
result[i].end = ranges.end(i).InSecondsF();
}
@@ -71,10 +72,48 @@ blink::WebMediaPlayer::NetworkState PipelineErrorToNetworkState(
return blink::WebMediaPlayer::NetworkStateDecodeError;
case media::PIPELINE_OK:
- case media::PIPELINE_STATUS_MAX:
NOTREACHED() << "Unexpected status! " << error;
}
return blink::WebMediaPlayer::NetworkStateFormatError;
}
+namespace {
+
+// Helper enum for reporting scheme histograms.
+enum URLSchemeForHistogram {
+ kUnknownURLScheme,
+ kMissingURLScheme,
+ kHttpURLScheme,
+ kHttpsURLScheme,
+ kFtpURLScheme,
+ kChromeExtensionURLScheme,
+ kJavascriptURLScheme,
+ kFileURLScheme,
+ kBlobURLScheme,
+ kDataURLScheme,
+ kFileSystemScheme,
+ kMaxURLScheme = kFileSystemScheme // Must be equal to highest enum value.
+};
+
+URLSchemeForHistogram URLScheme(const GURL& url) {
+ if (!url.has_scheme()) return kMissingURLScheme;
+ if (url.SchemeIs("http")) return kHttpURLScheme;
+ if (url.SchemeIs("https")) return kHttpsURLScheme;
+ if (url.SchemeIs("ftp")) return kFtpURLScheme;
+ if (url.SchemeIs("chrome-extension")) return kChromeExtensionURLScheme;
+ if (url.SchemeIs("javascript")) return kJavascriptURLScheme;
+ if (url.SchemeIs("file")) return kFileURLScheme;
+ if (url.SchemeIs("blob")) return kBlobURLScheme;
+ if (url.SchemeIs("data")) return kDataURLScheme;
+ if (url.SchemeIs("filesystem")) return kFileSystemScheme;
+
+ return kUnknownURLScheme;
+}
+
+} // namespace
+
+void ReportMediaSchemeUma(const GURL& url) {
+ UMA_HISTOGRAM_ENUMERATION("Media.URLScheme", URLScheme(url), kMaxURLScheme);
+}
+
} // namespace content
diff --git a/chromium/content/renderer/media/webmediaplayer_util.h b/chromium/content/renderer/media/webmediaplayer_util.h
index 3c2dacc6eab..d401c39089c 100644
--- a/chromium/content/renderer/media/webmediaplayer_util.h
+++ b/chromium/content/renderer/media/webmediaplayer_util.h
@@ -10,6 +10,7 @@
#include "media/base/ranges.h"
#include "third_party/WebKit/public/platform/WebMediaPlayer.h"
#include "third_party/WebKit/public/platform/WebTimeRange.h"
+#include "url/gurl.h"
namespace content {
@@ -25,6 +26,9 @@ blink::WebTimeRanges ConvertToWebTimeRanges(
blink::WebMediaPlayer::NetworkState PipelineErrorToNetworkState(
media::PipelineStatus error);
+// Report the scheme of Media URIs.
+void ReportMediaSchemeUma(const GURL& url);
+
} // namespace content
#endif // CONTENT_RENDERER_MEDIA_WEBMEDIAPLAYER_UTIL_H_
diff --git a/chromium/content/renderer/media/webmediasource_impl.cc b/chromium/content/renderer/media/webmediasource_impl.cc
index d1a2dfbc9ed..b3078f7edcd 100644
--- a/chromium/content/renderer/media/webmediasource_impl.cc
+++ b/chromium/content/renderer/media/webmediasource_impl.cc
@@ -41,6 +41,7 @@ WebMediaSource::AddStatus WebMediaSourceImpl::addSourceBuffer(
std::vector<std::string> new_codecs(codecs.size());
for (size_t i = 0; i < codecs.size(); ++i)
new_codecs[i] = codecs[i].utf8().data();
+
WebMediaSource::AddStatus result =
static_cast<WebMediaSource::AddStatus>(
demuxer_->AddId(id, type.utf8().data(), new_codecs));
@@ -73,8 +74,6 @@ void WebMediaSourceImpl::markEndOfStream(
case WebMediaSource::EndOfStreamStatusDecodeError:
pipeline_status = media::PIPELINE_ERROR_DECODE;
break;
- default:
- NOTIMPLEMENTED();
}
demuxer_->MarkEndOfStream(pipeline_status);
diff --git a/chromium/content/renderer/media/webmediasource_impl.h b/chromium/content/renderer/media/webmediasource_impl.h
index 42d5eef9405..c788a663079 100644
--- a/chromium/content/renderer/media/webmediasource_impl.h
+++ b/chromium/content/renderer/media/webmediasource_impl.h
@@ -26,11 +26,11 @@ class WebMediaSourceImpl : public blink::WebMediaSource {
virtual AddStatus addSourceBuffer(
const blink::WebString& type,
const blink::WebVector<blink::WebString>& codecs,
- blink::WebSourceBuffer** source_buffer) OVERRIDE;
- virtual double duration() OVERRIDE;
- virtual void setDuration(double duration) OVERRIDE;
- virtual void markEndOfStream(EndOfStreamStatus status) OVERRIDE;
- virtual void unmarkEndOfStream() OVERRIDE;
+ blink::WebSourceBuffer** source_buffer);
+ virtual double duration();
+ virtual void setDuration(double duration);
+ virtual void markEndOfStream(EndOfStreamStatus status);
+ virtual void unmarkEndOfStream();
private:
media::ChunkDemuxer* demuxer_; // Owned by WebMediaPlayerImpl.
diff --git a/chromium/content/renderer/media/webrtc/DEPS b/chromium/content/renderer/media/webrtc/DEPS
new file mode 100644
index 00000000000..9d1e38610a8
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/DEPS
@@ -0,0 +1,5 @@
+include_rules = [
+ # For video cropping and scaling.
+ "+third_party/libyuv",
+]
+
diff --git a/chromium/content/renderer/media/webrtc/OWNERS b/chromium/content/renderer/media/webrtc/OWNERS
index dbd443f2610..7aa733c9da7 100644
--- a/chromium/content/renderer/media/webrtc/OWNERS
+++ b/chromium/content/renderer/media/webrtc/OWNERS
@@ -2,9 +2,8 @@
xians@chromium.org
perkj@chromium.org
hclam@chromium.org
-joi@chromium.org
# To be able to roll new libjingle releases.
ronghuawu@chromium.org
mallinath@chromium.org
-sergeyu@chromium.org \ No newline at end of file
+sergeyu@chromium.org
diff --git a/chromium/content/renderer/media/webrtc/media_stream_remote_video_source.cc b/chromium/content/renderer/media/webrtc/media_stream_remote_video_source.cc
new file mode 100644
index 00000000000..3b93cdf5d3c
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/media_stream_remote_video_source.cc
@@ -0,0 +1,193 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/webrtc/media_stream_remote_video_source.h"
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/location.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "content/renderer/media/native_handle_impl.h"
+#include "media/base/bind_to_current_loop.h"
+#include "media/base/video_frame.h"
+#include "media/base/video_frame_pool.h"
+#include "media/base/video_util.h"
+#include "third_party/libjingle/source/talk/media/base/videoframe.h"
+
+namespace content {
+
+// Internal class used for receiving frames from the webrtc track on a
+// libjingle thread and forward it to the IO-thread.
+class MediaStreamRemoteVideoSource::RemoteVideoSourceDelegate
+ : public base::RefCountedThreadSafe<RemoteVideoSourceDelegate>,
+ public webrtc::VideoRendererInterface {
+ public:
+ RemoteVideoSourceDelegate(
+ const scoped_refptr<base::MessageLoopProxy>& io_message_loop,
+ const VideoCaptureDeliverFrameCB& new_frame_callback);
+
+ protected:
+ friend class base::RefCountedThreadSafe<RemoteVideoSourceDelegate>;
+ virtual ~RemoteVideoSourceDelegate();
+
+ // Implements webrtc::VideoRendererInterface used for receiving video frames
+ // from the PeerConnection video track. May be called on a libjingle internal
+ // thread.
+ virtual void SetSize(int width, int height) OVERRIDE;
+ virtual void RenderFrame(const cricket::VideoFrame* frame) OVERRIDE;
+
+ void DoRenderFrameOnIOThread(scoped_refptr<media::VideoFrame> video_frame,
+ const media::VideoCaptureFormat& format);
+ private:
+ // Bound to the render thread.
+ base::ThreadChecker thread_checker_;
+
+ scoped_refptr<base::MessageLoopProxy> io_message_loop_;
+ // |frame_pool_| is only accessed on whatever
+ // thread webrtc::VideoRendererInterface::RenderFrame is called on.
+ media::VideoFramePool frame_pool_;
+
+ // |frame_callback_| is accessed on the IO thread.
+ VideoCaptureDeliverFrameCB frame_callback_;
+};
+
+MediaStreamRemoteVideoSource::
+RemoteVideoSourceDelegate::RemoteVideoSourceDelegate(
+ const scoped_refptr<base::MessageLoopProxy>& io_message_loop,
+ const VideoCaptureDeliverFrameCB& new_frame_callback)
+ : io_message_loop_(io_message_loop),
+ frame_callback_(new_frame_callback) {
+}
+
+MediaStreamRemoteVideoSource::
+RemoteVideoSourceDelegate::~RemoteVideoSourceDelegate() {
+}
+
+void MediaStreamRemoteVideoSource::
+RemoteVideoSourceDelegate::SetSize(int width, int height) {
+}
+
+void MediaStreamRemoteVideoSource::
+RemoteVideoSourceDelegate::RenderFrame(
+ const cricket::VideoFrame* frame) {
+ base::TimeDelta timestamp = base::TimeDelta::FromMicroseconds(
+ frame->GetElapsedTime() / talk_base::kNumNanosecsPerMicrosec);
+
+ scoped_refptr<media::VideoFrame> video_frame;
+ if (frame->GetNativeHandle() != NULL) {
+ NativeHandleImpl* handle =
+ static_cast<NativeHandleImpl*>(frame->GetNativeHandle());
+ video_frame = static_cast<media::VideoFrame*>(handle->GetHandle());
+ video_frame->set_timestamp(timestamp);
+ } else {
+ gfx::Size size(frame->GetWidth(), frame->GetHeight());
+ video_frame = frame_pool_.CreateFrame(
+ media::VideoFrame::YV12, size, gfx::Rect(size), size, timestamp);
+
+ // Non-square pixels are unsupported.
+ DCHECK_EQ(frame->GetPixelWidth(), 1u);
+ DCHECK_EQ(frame->GetPixelHeight(), 1u);
+
+ int y_rows = frame->GetHeight();
+ int uv_rows = frame->GetChromaHeight();
+ CopyYPlane(
+ frame->GetYPlane(), frame->GetYPitch(), y_rows, video_frame.get());
+ CopyUPlane(
+ frame->GetUPlane(), frame->GetUPitch(), uv_rows, video_frame.get());
+ CopyVPlane(
+ frame->GetVPlane(), frame->GetVPitch(), uv_rows, video_frame.get());
+ }
+
+ media::VideoPixelFormat pixel_format =
+ (video_frame->format() == media::VideoFrame::YV12) ?
+ media::PIXEL_FORMAT_YV12 : media::PIXEL_FORMAT_TEXTURE;
+
+ media::VideoCaptureFormat format(
+ gfx::Size(video_frame->natural_size().width(),
+ video_frame->natural_size().height()),
+ MediaStreamVideoSource::kDefaultFrameRate,
+ pixel_format);
+
+ io_message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&RemoteVideoSourceDelegate::DoRenderFrameOnIOThread,
+ this, video_frame, format));
+}
+
+void MediaStreamRemoteVideoSource::
+RemoteVideoSourceDelegate::DoRenderFrameOnIOThread(
+ scoped_refptr<media::VideoFrame> video_frame,
+ const media::VideoCaptureFormat& format) {
+ DCHECK(io_message_loop_->BelongsToCurrentThread());
+ // TODO(hclam): Give the estimated capture time.
+ frame_callback_.Run(video_frame, format, base::TimeTicks());
+}
+
+MediaStreamRemoteVideoSource::MediaStreamRemoteVideoSource(
+ webrtc::VideoTrackInterface* remote_track)
+ : remote_track_(remote_track),
+ last_state_(remote_track->state()) {
+ remote_track_->RegisterObserver(this);
+}
+
+MediaStreamRemoteVideoSource::~MediaStreamRemoteVideoSource() {
+ remote_track_->UnregisterObserver(this);
+}
+
+void MediaStreamRemoteVideoSource::GetCurrentSupportedFormats(
+ int max_requested_width,
+ int max_requested_height,
+ const VideoCaptureDeviceFormatsCB& callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ media::VideoCaptureFormats formats;
+ // Since the remote end is free to change the resolution at any point in time
+ // the supported formats are unknown.
+ callback.Run(formats);
+}
+
+void MediaStreamRemoteVideoSource::StartSourceImpl(
+ const media::VideoCaptureParams& params,
+ const VideoCaptureDeliverFrameCB& frame_callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!delegate_);
+ delegate_ = new RemoteVideoSourceDelegate(io_message_loop(), frame_callback);
+ remote_track_->AddRenderer(delegate_);
+ OnStartDone(true);
+}
+
+void MediaStreamRemoteVideoSource::StopSourceImpl() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(state() != MediaStreamVideoSource::ENDED);
+ remote_track_->RemoveRenderer(delegate_);
+}
+
+webrtc::VideoRendererInterface*
+MediaStreamRemoteVideoSource::RenderInterfaceForTest() {
+ return delegate_;
+}
+
+void MediaStreamRemoteVideoSource::OnChanged() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ webrtc::MediaStreamTrackInterface::TrackState state = remote_track_->state();
+ if (state != last_state_) {
+ last_state_ = state;
+ switch (state) {
+ case webrtc::MediaStreamTrackInterface::kInitializing:
+ // Ignore the kInitializing state since there is no match in
+ // WebMediaStreamSource::ReadyState.
+ break;
+ case webrtc::MediaStreamTrackInterface::kLive:
+ SetReadyState(blink::WebMediaStreamSource::ReadyStateLive);
+ break;
+ case webrtc::MediaStreamTrackInterface::kEnded:
+ SetReadyState(blink::WebMediaStreamSource::ReadyStateEnded);
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+ }
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/media_stream_remote_video_source.h b/chromium/content/renderer/media/webrtc/media_stream_remote_video_source.h
new file mode 100644
index 00000000000..76041ee8d79
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/media_stream_remote_video_source.h
@@ -0,0 +1,65 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_MEDIA_STREAM_REMOTE_VIDEO_SOURCE_H_
+#define CONTENT_RENDERER_MEDIA_WEBRTC_MEDIA_STREAM_REMOTE_VIDEO_SOURCE_H_
+
+#include "base/threading/thread_checker.h"
+#include "content/common/content_export.h"
+#include "content/renderer/media/media_stream_video_source.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
+#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
+
+namespace content {
+
+// MediaStreamRemoteVideoSource implements the MediaStreamVideoSource interface
+// for video tracks received on a PeerConnection. The purpose of the class is
+// to make sure there is no difference between a video track where the source is
+// a local source and a video track where the source is a remote video track.
+class CONTENT_EXPORT MediaStreamRemoteVideoSource
+ : public MediaStreamVideoSource,
+ NON_EXPORTED_BASE(public webrtc::ObserverInterface) {
+ public:
+ explicit MediaStreamRemoteVideoSource(
+ webrtc::VideoTrackInterface* remote_track);
+ virtual ~MediaStreamRemoteVideoSource();
+
+ protected:
+ // Implements MediaStreamVideoSource.
+ virtual void GetCurrentSupportedFormats(
+ int max_requested_width,
+ int max_requested_height,
+ const VideoCaptureDeviceFormatsCB& callback) OVERRIDE;
+
+ virtual void StartSourceImpl(
+ const media::VideoCaptureParams& params,
+ const VideoCaptureDeliverFrameCB& frame_callback) OVERRIDE;
+
+ virtual void StopSourceImpl() OVERRIDE;
+
+ // Used by tests to test that a frame can be received and that the
+ // MediaStreamRemoteVideoSource behaves as expected.
+ webrtc::VideoRendererInterface* RenderInterfaceForTest();
+
+ private:
+ // webrtc::ObserverInterface implementation.
+ virtual void OnChanged() OVERRIDE;
+
+ scoped_refptr<webrtc::VideoTrackInterface> remote_track_;
+ webrtc::MediaStreamTrackInterface::TrackState last_state_;
+
+ // Internal class used for receiving frames from the webrtc track on a
+ // libjingle thread and forward it to the IO-thread.
+ class RemoteVideoSourceDelegate;
+ scoped_refptr<RemoteVideoSourceDelegate> delegate_;
+
+ // Bound to the render thread.
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaStreamRemoteVideoSource);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_WEBRTC_MEDIA_STREAM_REMOTE_VIDEO_SOURCE_H_
diff --git a/chromium/content/renderer/media/webrtc/media_stream_remote_video_source_unittest.cc b/chromium/content/renderer/media/webrtc/media_stream_remote_video_source_unittest.cc
new file mode 100644
index 00000000000..c6b50ec25eb
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/media_stream_remote_video_source_unittest.cc
@@ -0,0 +1,141 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/strings/utf_string_conversions.h"
+#include "content/child/child_process.h"
+#include "content/renderer/media/media_stream_video_track.h"
+#include "content/renderer/media/mock_media_stream_video_sink.h"
+#include "content/renderer/media/webrtc/media_stream_remote_video_source.h"
+#include "content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h"
+#include "media/base/video_frame.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/libjingle/source/talk/media/webrtc/webrtcvideoframe.h"
+
+namespace content {
+
+ACTION_P(RunClosure, closure) {
+ closure.Run();
+}
+
+class MediaStreamRemoteVideoSourceUnderTest
+ : public MediaStreamRemoteVideoSource {
+ public:
+ MediaStreamRemoteVideoSourceUnderTest(webrtc::VideoTrackInterface* track)
+ : MediaStreamRemoteVideoSource(track) {
+ }
+ using MediaStreamRemoteVideoSource::RenderInterfaceForTest;
+};
+
+class MediaStreamRemoteVideoSourceTest
+ : public ::testing::Test {
+ public:
+ MediaStreamRemoteVideoSourceTest()
+ : child_process_(new ChildProcess()),
+ mock_factory_(new MockPeerConnectionDependencyFactory()),
+ webrtc_video_track_(
+ mock_factory_->CreateLocalVideoTrack(
+ "test",
+ static_cast<cricket::VideoCapturer*>(NULL))),
+ remote_source_(
+ new MediaStreamRemoteVideoSourceUnderTest(webrtc_video_track_)),
+ number_of_successful_constraints_applied_(0),
+ number_of_failed_constraints_applied_(0) {
+ webkit_source_.initialize(base::UTF8ToUTF16("dummy_source_id"),
+ blink::WebMediaStreamSource::TypeVideo,
+ base::UTF8ToUTF16("dummy_source_name"));
+ webkit_source_.setExtraData(remote_source_);
+ }
+
+ MediaStreamRemoteVideoSourceUnderTest* source() {
+ return remote_source_;
+ }
+
+ MediaStreamVideoTrack* CreateTrack() {
+ bool enabled = true;
+ blink::WebMediaConstraints constraints;
+ constraints.initialize();
+ return new MediaStreamVideoTrack(
+ source(),
+ constraints,
+ base::Bind(
+ &MediaStreamRemoteVideoSourceTest::OnConstraintsApplied,
+ base::Unretained(this)),
+ enabled);
+ }
+
+ int NumberOfSuccessConstraintsCallbacks() const {
+ return number_of_successful_constraints_applied_;
+ }
+
+ int NumberOfFailedConstraintsCallbacks() const {
+ return number_of_failed_constraints_applied_;
+ }
+
+ void StopWebRtcTrack() {
+ static_cast<MockWebRtcVideoTrack*>(webrtc_video_track_.get())->set_state(
+ webrtc::MediaStreamTrackInterface::kEnded);
+ }
+
+ const blink::WebMediaStreamSource& webkit_source() const {
+ return webkit_source_;
+ }
+
+ private:
+ void OnConstraintsApplied(MediaStreamSource* source, bool success) {
+ ASSERT_EQ(source, remote_source_);
+ if (success)
+ ++number_of_successful_constraints_applied_;
+ else
+ ++number_of_failed_constraints_applied_;
+ }
+
+ base::MessageLoopForUI message_loop_;
+ scoped_ptr<ChildProcess> child_process_;
+ scoped_ptr<MockPeerConnectionDependencyFactory> mock_factory_;
+ scoped_refptr<webrtc::VideoTrackInterface> webrtc_video_track_;
+ // |remote_source_| is owned by |webkit_source_|.
+ MediaStreamRemoteVideoSourceUnderTest* remote_source_;
+ blink::WebMediaStreamSource webkit_source_;
+ int number_of_successful_constraints_applied_;
+ int number_of_failed_constraints_applied_;
+};
+
+TEST_F(MediaStreamRemoteVideoSourceTest, StartTrack) {
+ scoped_ptr<MediaStreamVideoTrack> track(CreateTrack());
+ EXPECT_EQ(1, NumberOfSuccessConstraintsCallbacks());
+
+ MockMediaStreamVideoSink sink;
+ track->AddSink(&sink, sink.GetDeliverFrameCB());
+ base::RunLoop run_loop;
+ base::Closure quit_closure = run_loop.QuitClosure();
+ EXPECT_CALL(sink, OnVideoFrame()).WillOnce(
+ RunClosure(quit_closure));
+ cricket::WebRtcVideoFrame webrtc_frame;
+ webrtc_frame.InitToBlack(320, 240, 1, 1, 0, 1);
+ source()->RenderInterfaceForTest()->RenderFrame(&webrtc_frame);
+ run_loop.Run();
+
+ EXPECT_EQ(1, sink.number_of_frames());
+ track->RemoveSink(&sink);
+}
+
+TEST_F(MediaStreamRemoteVideoSourceTest, RemoteTrackStop) {
+ scoped_ptr<MediaStreamVideoTrack> track(CreateTrack());
+
+ MockMediaStreamVideoSink sink;
+ track->AddSink(&sink, sink.GetDeliverFrameCB());
+ EXPECT_EQ(blink::WebMediaStreamSource::ReadyStateLive, sink.state());
+ EXPECT_EQ(blink::WebMediaStreamSource::ReadyStateLive,
+ webkit_source().readyState());
+ StopWebRtcTrack();
+ EXPECT_EQ(blink::WebMediaStreamSource::ReadyStateEnded,
+ webkit_source().readyState());
+ EXPECT_EQ(blink::WebMediaStreamSource::ReadyStateEnded, sink.state());
+
+ track->RemoveSink(&sink);
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/media_stream_track_metrics.cc b/chromium/content/renderer/media/webrtc/media_stream_track_metrics.cc
new file mode 100644
index 00000000000..736cac3ffc0
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/media_stream_track_metrics.cc
@@ -0,0 +1,332 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/webrtc/media_stream_track_metrics.h"
+
+#include <inttypes.h>
+#include <set>
+#include <string>
+
+#include "base/md5.h"
+#include "content/common/media/media_stream_track_metrics_host_messages.h"
+#include "content/renderer/render_thread_impl.h"
+#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
+
+using webrtc::AudioTrackVector;
+using webrtc::MediaStreamInterface;
+using webrtc::MediaStreamTrackInterface;
+using webrtc::PeerConnectionInterface;
+using webrtc::VideoTrackVector;
+
+namespace content {
+
+class MediaStreamTrackMetricsObserver : public webrtc::ObserverInterface {
+ public:
+ MediaStreamTrackMetricsObserver(
+ MediaStreamTrackMetrics::StreamType stream_type,
+ MediaStreamInterface* stream,
+ MediaStreamTrackMetrics* owner);
+ virtual ~MediaStreamTrackMetricsObserver();
+
+ // Sends begin/end messages for all tracks currently tracked.
+ void SendLifetimeMessages(MediaStreamTrackMetrics::LifetimeEvent event);
+
+ MediaStreamInterface* stream() { return stream_; }
+ MediaStreamTrackMetrics::StreamType stream_type() { return stream_type_; }
+
+ private:
+ typedef std::set<std::string> IdSet;
+
+ // webrtc::ObserverInterface implementation.
+ virtual void OnChanged() OVERRIDE;
+
+ template <class T>
+ IdSet GetTrackIds(const std::vector<talk_base::scoped_refptr<T> >& tracks) {
+ IdSet track_ids;
+ typename std::vector<talk_base::scoped_refptr<T> >::const_iterator it =
+ tracks.begin();
+ for (; it != tracks.end(); ++it) {
+ track_ids.insert((*it)->id());
+ }
+ return track_ids;
+ }
+
+ void ReportAddedAndRemovedTracks(
+ const IdSet& new_ids,
+ const IdSet& old_ids,
+ MediaStreamTrackMetrics::TrackType track_type);
+
+ // Sends a lifetime message for the given tracks. OK to call with an
+ // empty |ids|, in which case the method has no side effects.
+ void ReportTracks(const IdSet& ids,
+ MediaStreamTrackMetrics::TrackType track_type,
+ MediaStreamTrackMetrics::LifetimeEvent event);
+
+ // False until start/end of lifetime messages have been sent.
+ bool has_reported_start_;
+ bool has_reported_end_;
+
+ // IDs of audio and video tracks in the stream being observed.
+ IdSet audio_track_ids_;
+ IdSet video_track_ids_;
+
+ MediaStreamTrackMetrics::StreamType stream_type_;
+ talk_base::scoped_refptr<MediaStreamInterface> stream_;
+
+ // Non-owning.
+ MediaStreamTrackMetrics* owner_;
+};
+
+namespace {
+
+// Used with std::find_if.
+struct ObserverFinder {
+ ObserverFinder(MediaStreamTrackMetrics::StreamType stream_type,
+ MediaStreamInterface* stream)
+ : stream_type(stream_type), stream_(stream) {}
+ bool operator()(MediaStreamTrackMetricsObserver* observer) {
+ return stream_ == observer->stream() &&
+ stream_type == observer->stream_type();
+ }
+ MediaStreamTrackMetrics::StreamType stream_type;
+ MediaStreamInterface* stream_;
+};
+
+} // namespace
+
+MediaStreamTrackMetricsObserver::MediaStreamTrackMetricsObserver(
+ MediaStreamTrackMetrics::StreamType stream_type,
+ MediaStreamInterface* stream,
+ MediaStreamTrackMetrics* owner)
+ : has_reported_start_(false),
+ has_reported_end_(false),
+ stream_type_(stream_type),
+ stream_(stream),
+ owner_(owner) {
+ OnChanged(); // To populate initial tracks.
+ stream_->RegisterObserver(this);
+}
+
+MediaStreamTrackMetricsObserver::~MediaStreamTrackMetricsObserver() {
+ stream_->UnregisterObserver(this);
+ SendLifetimeMessages(MediaStreamTrackMetrics::DISCONNECTED);
+}
+
+void MediaStreamTrackMetricsObserver::SendLifetimeMessages(
+ MediaStreamTrackMetrics::LifetimeEvent event) {
+ if (event == MediaStreamTrackMetrics::CONNECTED) {
+ // Both ICE CONNECTED and COMPLETED can trigger the first
+ // start-of-life event, so we only report the first.
+ if (has_reported_start_)
+ return;
+ DCHECK(!has_reported_start_ && !has_reported_end_);
+ has_reported_start_ = true;
+ } else {
+ DCHECK(event == MediaStreamTrackMetrics::DISCONNECTED);
+
+ // We only report the first end-of-life event, since there are
+ // several cases where end-of-life can be reached. We also don't
+ // report end unless we've reported start.
+ if (has_reported_end_ || !has_reported_start_)
+ return;
+ has_reported_end_ = true;
+ }
+
+ ReportTracks(audio_track_ids_, MediaStreamTrackMetrics::AUDIO_TRACK, event);
+ ReportTracks(video_track_ids_, MediaStreamTrackMetrics::VIDEO_TRACK, event);
+
+ if (event == MediaStreamTrackMetrics::DISCONNECTED) {
+ // After disconnection, we can get reconnected, so we need to
+ // forget that we've sent lifetime events, while retaining all
+ // other state.
+ DCHECK(has_reported_start_ && has_reported_end_);
+ has_reported_start_ = false;
+ has_reported_end_ = false;
+ }
+}
+
+void MediaStreamTrackMetricsObserver::OnChanged() {
+ AudioTrackVector all_audio_tracks = stream_->GetAudioTracks();
+ IdSet all_audio_track_ids = GetTrackIds(all_audio_tracks);
+
+ VideoTrackVector all_video_tracks = stream_->GetVideoTracks();
+ IdSet all_video_track_ids = GetTrackIds(all_video_tracks);
+
+ // We only report changes after our initial report, and never after
+ // our last report.
+ if (has_reported_start_ && !has_reported_end_) {
+ ReportAddedAndRemovedTracks(all_audio_track_ids,
+ audio_track_ids_,
+ MediaStreamTrackMetrics::AUDIO_TRACK);
+ ReportAddedAndRemovedTracks(all_video_track_ids,
+ video_track_ids_,
+ MediaStreamTrackMetrics::VIDEO_TRACK);
+ }
+
+ // We always update our sets of tracks.
+ audio_track_ids_ = all_audio_track_ids;
+ video_track_ids_ = all_video_track_ids;
+}
+
+void MediaStreamTrackMetricsObserver::ReportAddedAndRemovedTracks(
+ const IdSet& new_ids,
+ const IdSet& old_ids,
+ MediaStreamTrackMetrics::TrackType track_type) {
+ DCHECK(has_reported_start_ && !has_reported_end_);
+
+ IdSet added_tracks = base::STLSetDifference<IdSet>(new_ids, old_ids);
+ IdSet removed_tracks = base::STLSetDifference<IdSet>(old_ids, new_ids);
+
+ ReportTracks(added_tracks, track_type, MediaStreamTrackMetrics::CONNECTED);
+ ReportTracks(
+ removed_tracks, track_type, MediaStreamTrackMetrics::DISCONNECTED);
+}
+
+void MediaStreamTrackMetricsObserver::ReportTracks(
+ const IdSet& ids,
+ MediaStreamTrackMetrics::TrackType track_type,
+ MediaStreamTrackMetrics::LifetimeEvent event) {
+ for (IdSet::const_iterator it = ids.begin(); it != ids.end(); ++it) {
+ owner_->SendLifetimeMessage(*it, track_type, event, stream_type_);
+ }
+}
+
+MediaStreamTrackMetrics::MediaStreamTrackMetrics()
+ : ice_state_(webrtc::PeerConnectionInterface::kIceConnectionNew) {}
+
+MediaStreamTrackMetrics::~MediaStreamTrackMetrics() {
+ for (ObserverVector::iterator it = observers_.begin(); it != observers_.end();
+ ++it) {
+ (*it)->SendLifetimeMessages(DISCONNECTED);
+ }
+}
+
+void MediaStreamTrackMetrics::AddStream(StreamType type,
+ MediaStreamInterface* stream) {
+ DCHECK(CalledOnValidThread());
+ MediaStreamTrackMetricsObserver* observer =
+ new MediaStreamTrackMetricsObserver(type, stream, this);
+ observers_.insert(observers_.end(), observer);
+ SendLifeTimeMessageDependingOnIceState(observer);
+}
+
+void MediaStreamTrackMetrics::RemoveStream(StreamType type,
+ MediaStreamInterface* stream) {
+ DCHECK(CalledOnValidThread());
+ ObserverVector::iterator it = std::find_if(
+ observers_.begin(), observers_.end(), ObserverFinder(type, stream));
+ if (it == observers_.end()) {
+ // Since external apps could call removeStream with a stream they
+ // never added, this can happen without it being an error.
+ return;
+ }
+
+ observers_.erase(it);
+}
+
+void MediaStreamTrackMetrics::IceConnectionChange(
+ PeerConnectionInterface::IceConnectionState new_state) {
+ DCHECK(CalledOnValidThread());
+ ice_state_ = new_state;
+ for (ObserverVector::iterator it = observers_.begin(); it != observers_.end();
+ ++it) {
+ SendLifeTimeMessageDependingOnIceState(*it);
+ }
+}
+void MediaStreamTrackMetrics::SendLifeTimeMessageDependingOnIceState(
+ MediaStreamTrackMetricsObserver* observer) {
+ // There is a state transition diagram for these states at
+ // http://dev.w3.org/2011/webrtc/editor/webrtc.html#idl-def-RTCIceConnectionState
+ switch (ice_state_) {
+ case PeerConnectionInterface::kIceConnectionConnected:
+ case PeerConnectionInterface::kIceConnectionCompleted:
+ observer->SendLifetimeMessages(CONNECTED);
+ break;
+
+ case PeerConnectionInterface::kIceConnectionFailed:
+ // We don't really need to handle FAILED (it is only supposed
+ // to be preceded by CHECKING so we wouldn't yet have sent a
+ // lifetime message) but we might as well use belt and
+ // suspenders and handle it the same as the other "end call"
+ // states. It will be ignored anyway if the call is not
+ // already connected.
+ case PeerConnectionInterface::kIceConnectionNew:
+ // It's a bit weird to count NEW as an end-lifetime event, but
+ // it's possible to transition directly from a connected state
+ // (CONNECTED or COMPLETED) to NEW, which can then be followed
+ // by a new connection. The observer will ignore the end
+ // lifetime event if it was not preceded by a begin-lifetime
+ // event.
+ case PeerConnectionInterface::kIceConnectionDisconnected:
+ case PeerConnectionInterface::kIceConnectionClosed:
+ observer->SendLifetimeMessages(DISCONNECTED);
+ break;
+
+ default:
+ // We ignore the remaining state (CHECKING) as it is never
+ // involved in a transition from connected to disconnected or
+ // vice versa.
+ break;
+ }
+}
+
+void MediaStreamTrackMetrics::SendLifetimeMessage(const std::string& track_id,
+ TrackType track_type,
+ LifetimeEvent event,
+ StreamType stream_type) {
+ RenderThreadImpl* render_thread = RenderThreadImpl::current();
+ // |render_thread| can be NULL in certain cases when running as part
+ // |of a unit test.
+ if (render_thread) {
+ if (event == CONNECTED) {
+ RenderThreadImpl::current()->Send(
+ new MediaStreamTrackMetricsHost_AddTrack(
+ MakeUniqueId(track_id, stream_type),
+ track_type == AUDIO_TRACK,
+ stream_type == RECEIVED_STREAM));
+ } else {
+ DCHECK_EQ(DISCONNECTED, event);
+ RenderThreadImpl::current()->Send(
+ new MediaStreamTrackMetricsHost_RemoveTrack(
+ MakeUniqueId(track_id, stream_type)));
+ }
+ }
+}
+
+uint64 MediaStreamTrackMetrics::MakeUniqueIdImpl(uint64 pc_id,
+ const std::string& track_id,
+ StreamType stream_type) {
+ // We use a hash over the |track| pointer and the PeerConnection ID,
+ // plus a boolean flag indicating whether the track is remote (since
+ // you might conceivably have a remote track added back as a sent
+ // track) as the unique ID.
+ //
+ // We don't need a cryptographically secure hash (which MD5 should
+ // no longer be considered), just one with virtually zero chance of
+ // collisions when faced with non-malicious data.
+ std::string unique_id_string =
+ base::StringPrintf("%" PRIu64 " %s %d",
+ pc_id,
+ track_id.c_str(),
+ stream_type == RECEIVED_STREAM ? 1 : 0);
+
+ base::MD5Context ctx;
+ base::MD5Init(&ctx);
+ base::MD5Update(&ctx, unique_id_string);
+ base::MD5Digest digest;
+ base::MD5Final(&digest, &ctx);
+
+ COMPILE_ASSERT(sizeof(digest.a) > sizeof(uint64), NeedBiggerDigest);
+ return *reinterpret_cast<uint64*>(digest.a);
+}
+
+uint64 MediaStreamTrackMetrics::MakeUniqueId(const std::string& track_id,
+ StreamType stream_type) {
+ return MakeUniqueIdImpl(
+ reinterpret_cast<uint64>(reinterpret_cast<void*>(this)),
+ track_id,
+ stream_type);
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/media_stream_track_metrics.h b/chromium/content/renderer/media/webrtc/media_stream_track_metrics.h
new file mode 100644
index 00000000000..aaf669d0d0c
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/media_stream_track_metrics.h
@@ -0,0 +1,101 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_MEDIA_STREAM_TRACK_METRICS_H_
+#define CONTENT_RENDERER_MEDIA_WEBRTC_MEDIA_STREAM_TRACK_METRICS_H_
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_vector.h"
+#include "base/threading/non_thread_safe.h"
+#include "content/common/content_export.h"
+#include "third_party/libjingle/source/talk/app/webrtc/peerconnectioninterface.h"
+
+namespace webrtc {
+class MediaStreamInterface;
+class MediaStreamTrackInterface;
+}
+
+namespace content {
+
+class MediaStreamTrackMetricsObserver;
+class RTCPeerConnectionHandler;
+
+// Responsible for observing the connected lifetimes of tracks going
+// over a PeerConnection, and sending messages to the browser process
+// about lifetime events.
+//
+// There should be exactly one of these objects owned by each
+// RTCPeerConnectionHandler, and its lifetime should match the
+// lifetime of its owner.
+class CONTENT_EXPORT MediaStreamTrackMetrics : public base::NonThreadSafe {
+ public:
+ explicit MediaStreamTrackMetrics();
+ ~MediaStreamTrackMetrics();
+
+ enum StreamType { SENT_STREAM, RECEIVED_STREAM };
+
+ enum TrackType { AUDIO_TRACK, VIDEO_TRACK };
+
+ enum LifetimeEvent { CONNECTED, DISCONNECTED };
+
+ // Starts tracking lifetimes of all the tracks in |stream| and any
+ // tracks added or removed to/from the stream until |RemoveStream|
+ // is called or this object's lifetime ends.
+ void AddStream(StreamType type, webrtc::MediaStreamInterface* stream);
+
+ // Stops tracking lifetimes of tracks in |stream|.
+ void RemoveStream(StreamType type, webrtc::MediaStreamInterface* stream);
+
+ // Called to indicate changes in the ICE connection state for the
+ // PeerConnection this object is associated with. Used to generate
+ // the connected/disconnected lifetime events for these tracks.
+ void IceConnectionChange(
+ webrtc::PeerConnectionInterface::IceConnectionState new_state);
+
+ // Send a lifetime message to the browser process. Virtual so that
+ // it can be overridden in unit tests.
+ //
+ // |track_id| is the ID of the track that just got connected or
+ // disconnected.
+ //
+ // |is_audio| is true for an audio track, false for a video track.
+ //
+ // |start_lifetime| is true to indicate that it just got connected,
+ // false to indicate it is no longer connected.
+ //
+ // |is_remote| is true for remote streams (received over a
+ // PeerConnection), false for local streams (sent over a
+ // PeerConnection).
+ virtual void SendLifetimeMessage(const std::string& track_id,
+ TrackType track_type,
+ LifetimeEvent lifetime_event,
+ StreamType stream_type);
+
+ protected:
+ // Calls SendLifetimeMessage for |observer| depending on |ice_state_|.
+ void SendLifeTimeMessageDependingOnIceState(
+ MediaStreamTrackMetricsObserver* observer);
+
+ // Implements MakeUniqueId. |pc_id| is a cast of this object's
+ // |this| pointer to a 64-bit integer, which is usable as a unique
+ // ID for the PeerConnection this object is attached to (since there
+ // is a one-to-one relationship).
+ uint64 MakeUniqueIdImpl(uint64 pc_id,
+ const std::string& track,
+ StreamType stream_type);
+
+ private:
+ // Make a unique ID for the given track, that is valid while the
+ // track object and the PeerConnection it is attached to both exist.
+ uint64 MakeUniqueId(const std::string& track, StreamType stream_type);
+
+ typedef ScopedVector<MediaStreamTrackMetricsObserver> ObserverVector;
+ ObserverVector observers_;
+
+ webrtc::PeerConnectionInterface::IceConnectionState ice_state_;
+};
+
+} // namespace
+
+#endif // CONTENT_RENDERER_MEDIA_WEBRTC_MEDIA_STREAM_TRACK_METRICS_H_
diff --git a/chromium/content/renderer/media/webrtc/media_stream_track_metrics_unittest.cc b/chromium/content/renderer/media/webrtc/media_stream_track_metrics_unittest.cc
new file mode 100644
index 00000000000..343ab3033ec
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/media_stream_track_metrics_unittest.cc
@@ -0,0 +1,543 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/webrtc/media_stream_track_metrics.h"
+#include "content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
+
+using webrtc::AudioSourceInterface;
+using webrtc::AudioTrackInterface;
+using webrtc::AudioTrackSinkInterface;
+using webrtc::MediaStreamInterface;
+using webrtc::ObserverInterface;
+using webrtc::PeerConnectionInterface;
+using webrtc::VideoRendererInterface;
+using webrtc::VideoSourceInterface;
+using webrtc::VideoTrackInterface;
+
+namespace content {
+
+// A very simple mock that implements only the id() method.
+class MockAudioTrackInterface : public AudioTrackInterface {
+ public:
+ explicit MockAudioTrackInterface(const std::string& id) : id_(id) {}
+ virtual ~MockAudioTrackInterface() {}
+
+ virtual std::string id() const OVERRIDE { return id_; }
+
+ MOCK_METHOD1(RegisterObserver, void(ObserverInterface*));
+ MOCK_METHOD1(UnregisterObserver, void(ObserverInterface*));
+ MOCK_CONST_METHOD0(kind, std::string());
+ MOCK_CONST_METHOD0(enabled, bool());
+ MOCK_CONST_METHOD0(state, TrackState());
+ MOCK_METHOD1(set_enabled, bool(bool));
+ MOCK_METHOD1(set_state, bool(TrackState));
+ MOCK_CONST_METHOD0(GetSource, AudioSourceInterface*());
+ MOCK_METHOD1(AddSink, void(AudioTrackSinkInterface*));
+ MOCK_METHOD1(RemoveSink, void(AudioTrackSinkInterface*));
+
+ private:
+ std::string id_;
+};
+
+// A very simple mock that implements only the id() method.
+class MockVideoTrackInterface : public VideoTrackInterface {
+ public:
+ explicit MockVideoTrackInterface(const std::string& id) : id_(id) {}
+ virtual ~MockVideoTrackInterface() {}
+
+ virtual std::string id() const OVERRIDE { return id_; }
+
+ MOCK_METHOD1(RegisterObserver, void(ObserverInterface*));
+ MOCK_METHOD1(UnregisterObserver, void(ObserverInterface*));
+ MOCK_CONST_METHOD0(kind, std::string());
+ MOCK_CONST_METHOD0(enabled, bool());
+ MOCK_CONST_METHOD0(state, TrackState());
+ MOCK_METHOD1(set_enabled, bool(bool));
+ MOCK_METHOD1(set_state, bool(TrackState));
+ MOCK_METHOD1(AddRenderer, void(VideoRendererInterface*));
+ MOCK_METHOD1(RemoveRenderer, void(VideoRendererInterface*));
+ MOCK_CONST_METHOD0(GetSource, VideoSourceInterface*());
+
+ private:
+ std::string id_;
+};
+
+class MockMediaStreamTrackMetrics : public MediaStreamTrackMetrics {
+ public:
+ virtual ~MockMediaStreamTrackMetrics() {}
+
+ MOCK_METHOD4(SendLifetimeMessage,
+ void(const std::string&, TrackType, LifetimeEvent, StreamType));
+
+ using MediaStreamTrackMetrics::MakeUniqueIdImpl;
+};
+
+class MediaStreamTrackMetricsTest : public testing::Test {
+ public:
+ virtual void SetUp() OVERRIDE {
+ metrics_.reset(new MockMediaStreamTrackMetrics());
+ stream_ = new talk_base::RefCountedObject<MockMediaStream>("stream");
+ }
+
+ virtual void TearDown() OVERRIDE {
+ metrics_.reset();
+ stream_ = NULL;
+ }
+
+ scoped_refptr<MockAudioTrackInterface> MakeAudioTrack(std::string id) {
+ return new talk_base::RefCountedObject<MockAudioTrackInterface>(id);
+ }
+
+ scoped_refptr<MockVideoTrackInterface> MakeVideoTrack(std::string id) {
+ return new talk_base::RefCountedObject<MockVideoTrackInterface>(id);
+ }
+
+ scoped_ptr<MockMediaStreamTrackMetrics> metrics_;
+ scoped_refptr<MediaStreamInterface> stream_;
+};
+
+TEST_F(MediaStreamTrackMetricsTest, MakeUniqueId) {
+ // The important testable properties of the unique ID are that it
+ // should differ when any of the three constituents differ
+ // (PeerConnection pointer, track ID, remote or not. Also, testing
+ // that the implementation does not discard the upper 32 bits of the
+ // PeerConnection pointer is important.
+ //
+ // The important hard-to-test property is that the ID be generated
+ // using a hash function with virtually zero chance of
+ // collisions. We don't test this, we rely on MD5 having this
+ // property.
+
+ // Lower 32 bits the same, upper 32 differ.
+ EXPECT_NE(
+ metrics_->MakeUniqueIdImpl(
+ 0x1000000000000001, "x", MediaStreamTrackMetrics::RECEIVED_STREAM),
+ metrics_->MakeUniqueIdImpl(
+ 0x2000000000000001, "x", MediaStreamTrackMetrics::RECEIVED_STREAM));
+
+ // Track ID differs.
+ EXPECT_NE(metrics_->MakeUniqueIdImpl(
+ 42, "x", MediaStreamTrackMetrics::RECEIVED_STREAM),
+ metrics_->MakeUniqueIdImpl(
+ 42, "y", MediaStreamTrackMetrics::RECEIVED_STREAM));
+
+ // Remove vs. local track differs.
+ EXPECT_NE(metrics_->MakeUniqueIdImpl(
+ 42, "x", MediaStreamTrackMetrics::RECEIVED_STREAM),
+ metrics_->MakeUniqueIdImpl(
+ 42, "x", MediaStreamTrackMetrics::SENT_STREAM));
+}
+
+TEST_F(MediaStreamTrackMetricsTest, BasicRemoteStreams) {
+ scoped_refptr<MockAudioTrackInterface> audio(MakeAudioTrack("audio"));
+ scoped_refptr<MockVideoTrackInterface> video(MakeVideoTrack("video"));
+ stream_->AddTrack(audio);
+ stream_->AddTrack(video);
+ metrics_->AddStream(MediaStreamTrackMetrics::RECEIVED_STREAM, stream_);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("audio",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::RECEIVED_STREAM));
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("video",
+ MediaStreamTrackMetrics::VIDEO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::RECEIVED_STREAM));
+ metrics_->IceConnectionChange(
+ PeerConnectionInterface::kIceConnectionConnected);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("audio",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::DISCONNECTED,
+ MediaStreamTrackMetrics::RECEIVED_STREAM));
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("video",
+ MediaStreamTrackMetrics::VIDEO_TRACK,
+ MediaStreamTrackMetrics::DISCONNECTED,
+ MediaStreamTrackMetrics::RECEIVED_STREAM));
+ metrics_->IceConnectionChange(
+ PeerConnectionInterface::kIceConnectionDisconnected);
+}
+
+TEST_F(MediaStreamTrackMetricsTest, BasicLocalStreams) {
+ scoped_refptr<MockAudioTrackInterface> audio(MakeAudioTrack("audio"));
+ scoped_refptr<MockVideoTrackInterface> video(MakeVideoTrack("video"));
+ stream_->AddTrack(audio);
+ stream_->AddTrack(video);
+ metrics_->AddStream(MediaStreamTrackMetrics::SENT_STREAM, stream_);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("audio",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("video",
+ MediaStreamTrackMetrics::VIDEO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ metrics_->IceConnectionChange(
+ PeerConnectionInterface::kIceConnectionConnected);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("audio",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::DISCONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("video",
+ MediaStreamTrackMetrics::VIDEO_TRACK,
+ MediaStreamTrackMetrics::DISCONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ metrics_->IceConnectionChange(PeerConnectionInterface::kIceConnectionFailed);
+}
+
+TEST_F(MediaStreamTrackMetricsTest, LocalStreamAddedAferIceConnect) {
+ metrics_->IceConnectionChange(
+ PeerConnectionInterface::kIceConnectionConnected);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("audio",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("video",
+ MediaStreamTrackMetrics::VIDEO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+
+ scoped_refptr<MockAudioTrackInterface> audio(MakeAudioTrack("audio"));
+ scoped_refptr<MockVideoTrackInterface> video(MakeVideoTrack("video"));
+ stream_->AddTrack(audio);
+ stream_->AddTrack(video);
+ metrics_->AddStream(MediaStreamTrackMetrics::SENT_STREAM, stream_);
+}
+
+TEST_F(MediaStreamTrackMetricsTest, RemoteStreamAddedAferIceConnect) {
+ metrics_->IceConnectionChange(
+ PeerConnectionInterface::kIceConnectionConnected);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("audio",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::RECEIVED_STREAM));
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("video",
+ MediaStreamTrackMetrics::VIDEO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::RECEIVED_STREAM));
+
+ scoped_refptr<MockAudioTrackInterface> audio(MakeAudioTrack("audio"));
+ scoped_refptr<MockVideoTrackInterface> video(MakeVideoTrack("video"));
+ stream_->AddTrack(audio);
+ stream_->AddTrack(video);
+ metrics_->AddStream(MediaStreamTrackMetrics::RECEIVED_STREAM, stream_);
+}
+
+TEST_F(MediaStreamTrackMetricsTest, RemoteStreamTrackAdded) {
+ scoped_refptr<MockAudioTrackInterface> initial(MakeAudioTrack("initial"));
+ scoped_refptr<MockAudioTrackInterface> added(MakeAudioTrack("added"));
+ stream_->AddTrack(initial);
+ metrics_->AddStream(MediaStreamTrackMetrics::RECEIVED_STREAM, stream_);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("initial",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::RECEIVED_STREAM));
+ metrics_->IceConnectionChange(
+ PeerConnectionInterface::kIceConnectionConnected);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("added",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::RECEIVED_STREAM));
+ stream_->AddTrack(added);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("initial",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::DISCONNECTED,
+ MediaStreamTrackMetrics::RECEIVED_STREAM));
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("added",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::DISCONNECTED,
+ MediaStreamTrackMetrics::RECEIVED_STREAM));
+ metrics_->IceConnectionChange(PeerConnectionInterface::kIceConnectionFailed);
+}
+
+TEST_F(MediaStreamTrackMetricsTest, LocalStreamTrackRemoved) {
+ scoped_refptr<MockAudioTrackInterface> first(MakeAudioTrack("first"));
+ scoped_refptr<MockAudioTrackInterface> second(MakeAudioTrack("second"));
+ stream_->AddTrack(first);
+ stream_->AddTrack(second);
+ metrics_->AddStream(MediaStreamTrackMetrics::SENT_STREAM, stream_);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("first",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("second",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ metrics_->IceConnectionChange(
+ PeerConnectionInterface::kIceConnectionConnected);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("first",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::DISCONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ stream_->RemoveTrack(first);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("second",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::DISCONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ metrics_->IceConnectionChange(PeerConnectionInterface::kIceConnectionFailed);
+}
+
+TEST_F(MediaStreamTrackMetricsTest, LocalStreamModificationsBeforeAndAfter) {
+ scoped_refptr<MockAudioTrackInterface> first(MakeAudioTrack("first"));
+ scoped_refptr<MockAudioTrackInterface> second(MakeAudioTrack("second"));
+ stream_->AddTrack(first);
+ metrics_->AddStream(MediaStreamTrackMetrics::SENT_STREAM, stream_);
+
+ // This gets added after we start observing, but no lifetime message
+ // should be sent at this point since the call is not connected. It
+ // should get sent only once it gets connected.
+ stream_->AddTrack(second);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("first",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("second",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ metrics_->IceConnectionChange(
+ PeerConnectionInterface::kIceConnectionConnected);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("first",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::DISCONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("second",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::DISCONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ metrics_->IceConnectionChange(PeerConnectionInterface::kIceConnectionFailed);
+
+ // This happens after the call is disconnected so no lifetime
+ // message should be sent.
+ stream_->RemoveTrack(first);
+}
+
+TEST_F(MediaStreamTrackMetricsTest, RemoteStreamMultipleDisconnects) {
+ scoped_refptr<MockAudioTrackInterface> audio(MakeAudioTrack("audio"));
+ stream_->AddTrack(audio);
+ metrics_->AddStream(MediaStreamTrackMetrics::RECEIVED_STREAM, stream_);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("audio",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::RECEIVED_STREAM));
+ metrics_->IceConnectionChange(
+ PeerConnectionInterface::kIceConnectionConnected);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("audio",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::DISCONNECTED,
+ MediaStreamTrackMetrics::RECEIVED_STREAM));
+ metrics_->IceConnectionChange(
+ PeerConnectionInterface::kIceConnectionDisconnected);
+ metrics_->IceConnectionChange(PeerConnectionInterface::kIceConnectionFailed);
+ stream_->RemoveTrack(audio);
+}
+
+TEST_F(MediaStreamTrackMetricsTest, RemoteStreamConnectDisconnectTwice) {
+ scoped_refptr<MockAudioTrackInterface> audio(MakeAudioTrack("audio"));
+ stream_->AddTrack(audio);
+ metrics_->AddStream(MediaStreamTrackMetrics::RECEIVED_STREAM, stream_);
+
+ for (size_t i = 0; i < 2; ++i) {
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("audio",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::RECEIVED_STREAM));
+ metrics_->IceConnectionChange(
+ PeerConnectionInterface::kIceConnectionConnected);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("audio",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::DISCONNECTED,
+ MediaStreamTrackMetrics::RECEIVED_STREAM));
+ metrics_->IceConnectionChange(
+ PeerConnectionInterface::kIceConnectionDisconnected);
+ }
+
+ stream_->RemoveTrack(audio);
+}
+
+TEST_F(MediaStreamTrackMetricsTest, LocalStreamRemovedNoDisconnect) {
+ scoped_refptr<MockAudioTrackInterface> audio(MakeAudioTrack("audio"));
+ scoped_refptr<MockVideoTrackInterface> video(MakeVideoTrack("video"));
+ stream_->AddTrack(audio);
+ stream_->AddTrack(video);
+ metrics_->AddStream(MediaStreamTrackMetrics::SENT_STREAM, stream_);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("audio",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("video",
+ MediaStreamTrackMetrics::VIDEO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ metrics_->IceConnectionChange(
+ PeerConnectionInterface::kIceConnectionConnected);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("audio",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::DISCONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("video",
+ MediaStreamTrackMetrics::VIDEO_TRACK,
+ MediaStreamTrackMetrics::DISCONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ metrics_->RemoveStream(MediaStreamTrackMetrics::SENT_STREAM, stream_);
+}
+
+TEST_F(MediaStreamTrackMetricsTest, LocalStreamLargerTest) {
+ scoped_refptr<MockAudioTrackInterface> audio1(MakeAudioTrack("audio1"));
+ scoped_refptr<MockAudioTrackInterface> audio2(MakeAudioTrack("audio2"));
+ scoped_refptr<MockAudioTrackInterface> audio3(MakeAudioTrack("audio3"));
+ scoped_refptr<MockVideoTrackInterface> video1(MakeVideoTrack("video1"));
+ scoped_refptr<MockVideoTrackInterface> video2(MakeVideoTrack("video2"));
+ scoped_refptr<MockVideoTrackInterface> video3(MakeVideoTrack("video3"));
+ stream_->AddTrack(audio1);
+ stream_->AddTrack(video1);
+ metrics_->AddStream(MediaStreamTrackMetrics::SENT_STREAM, stream_);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("audio1",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("video1",
+ MediaStreamTrackMetrics::VIDEO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ metrics_->IceConnectionChange(
+ PeerConnectionInterface::kIceConnectionConnected);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("audio2",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ stream_->AddTrack(audio2);
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("video2",
+ MediaStreamTrackMetrics::VIDEO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ stream_->AddTrack(video2);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("audio1",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::DISCONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ stream_->RemoveTrack(audio1);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("audio3",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ stream_->AddTrack(audio3);
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("video3",
+ MediaStreamTrackMetrics::VIDEO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ stream_->AddTrack(video3);
+
+ // Add back audio1
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("audio1",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::CONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ stream_->AddTrack(audio1);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("audio2",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::DISCONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ stream_->RemoveTrack(audio2);
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("video2",
+ MediaStreamTrackMetrics::VIDEO_TRACK,
+ MediaStreamTrackMetrics::DISCONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ stream_->RemoveTrack(video2);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("audio1",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::DISCONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ stream_->RemoveTrack(audio1);
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("video1",
+ MediaStreamTrackMetrics::VIDEO_TRACK,
+ MediaStreamTrackMetrics::DISCONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ stream_->RemoveTrack(video1);
+
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("audio3",
+ MediaStreamTrackMetrics::AUDIO_TRACK,
+ MediaStreamTrackMetrics::DISCONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ EXPECT_CALL(*metrics_,
+ SendLifetimeMessage("video3",
+ MediaStreamTrackMetrics::VIDEO_TRACK,
+ MediaStreamTrackMetrics::DISCONNECTED,
+ MediaStreamTrackMetrics::SENT_STREAM));
+ metrics_->RemoveStream(MediaStreamTrackMetrics::SENT_STREAM, stream_);
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/mock_media_stream_dependency_factory.cc b/chromium/content/renderer/media/webrtc/mock_peer_connection_dependency_factory.cc
index 8de0e2b43dd..7048d935d45 100644
--- a/chromium/content/renderer/media/mock_media_stream_dependency_factory.cc
+++ b/chromium/content/renderer/media/webrtc/mock_peer_connection_dependency_factory.cc
@@ -1,15 +1,18 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "content/renderer/media/mock_media_stream_dependency_factory.h"
+#include "content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h"
#include "base/logging.h"
#include "base/strings/utf_string_conversions.h"
#include "content/renderer/media/mock_peer_connection_impl.h"
#include "content/renderer/media/webaudio_capturer_source.h"
+#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
+#include "content/renderer/media/webrtc/webrtc_video_capturer_adapter.h"
#include "content/renderer/media/webrtc_audio_capturer.h"
#include "content/renderer/media/webrtc_local_audio_track.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
#include "third_party/libjingle/source/talk/base/scoped_ref_ptr.h"
#include "third_party/libjingle/source/talk/media/base/videocapturer.h"
@@ -41,84 +44,140 @@ static typename V::iterator FindTrack(V* vector,
return it;
};
-class MockMediaStream : public webrtc::MediaStreamInterface {
- public:
- explicit MockMediaStream(const std::string& label)
- : label_(label),
- observer_(NULL) {
- }
- virtual bool AddTrack(AudioTrackInterface* track) OVERRIDE {
- audio_track_vector_.push_back(track);
- if (observer_)
- observer_->OnChanged();
- return true;
- }
- virtual bool AddTrack(VideoTrackInterface* track) OVERRIDE {
- video_track_vector_.push_back(track);
- if (observer_)
- observer_->OnChanged();
- return true;
- }
- virtual bool RemoveTrack(AudioTrackInterface* track) OVERRIDE {
- AudioTrackVector::iterator it = FindTrack(&audio_track_vector_,
- track->id());
- if (it == audio_track_vector_.end())
- return false;
- audio_track_vector_.erase(it);
- if (observer_)
- observer_->OnChanged();
- return true;
- }
- virtual bool RemoveTrack(VideoTrackInterface* track) OVERRIDE {
- VideoTrackVector::iterator it = FindTrack(&video_track_vector_,
- track->id());
- if (it == video_track_vector_.end())
- return false;
- video_track_vector_.erase(it);
- if (observer_)
- observer_->OnChanged();
- return true;
- }
- virtual std::string label() const OVERRIDE { return label_; }
- virtual AudioTrackVector GetAudioTracks() OVERRIDE {
- return audio_track_vector_;
- }
- virtual VideoTrackVector GetVideoTracks() OVERRIDE {
- return video_track_vector_;
+MockMediaStream::MockMediaStream(const std::string& label) : label_(label) {}
+
+bool MockMediaStream::AddTrack(AudioTrackInterface* track) {
+ audio_track_vector_.push_back(track);
+ NotifyObservers();
+ return true;
+}
+
+bool MockMediaStream::AddTrack(VideoTrackInterface* track) {
+ video_track_vector_.push_back(track);
+ NotifyObservers();
+ return true;
+}
+
+bool MockMediaStream::RemoveTrack(AudioTrackInterface* track) {
+ AudioTrackVector::iterator it = FindTrack(&audio_track_vector_,
+ track->id());
+ if (it == audio_track_vector_.end())
+ return false;
+ audio_track_vector_.erase(it);
+ NotifyObservers();
+ return true;
+}
+
+bool MockMediaStream::RemoveTrack(VideoTrackInterface* track) {
+ VideoTrackVector::iterator it = FindTrack(&video_track_vector_,
+ track->id());
+ if (it == video_track_vector_.end())
+ return false;
+ video_track_vector_.erase(it);
+ NotifyObservers();
+ return true;
+}
+
+std::string MockMediaStream::label() const {
+ return label_;
+}
+
+AudioTrackVector MockMediaStream::GetAudioTracks() {
+ return audio_track_vector_;
+}
+
+VideoTrackVector MockMediaStream::GetVideoTracks() {
+ return video_track_vector_;
+}
+
+talk_base::scoped_refptr<AudioTrackInterface> MockMediaStream::FindAudioTrack(
+ const std::string& track_id) {
+ AudioTrackVector::iterator it = FindTrack(&audio_track_vector_, track_id);
+ return it == audio_track_vector_.end() ? NULL : *it;
+}
+
+talk_base::scoped_refptr<VideoTrackInterface> MockMediaStream::FindVideoTrack(
+ const std::string& track_id) {
+ VideoTrackVector::iterator it = FindTrack(&video_track_vector_, track_id);
+ return it == video_track_vector_.end() ? NULL : *it;
+}
+
+void MockMediaStream::RegisterObserver(ObserverInterface* observer) {
+ DCHECK(observers_.find(observer) == observers_.end());
+ observers_.insert(observer);
+}
+
+void MockMediaStream::UnregisterObserver(ObserverInterface* observer) {
+ ObserverSet::iterator it = observers_.find(observer);
+ DCHECK(it != observers_.end());
+ observers_.erase(it);
+}
+
+void MockMediaStream::NotifyObservers() {
+ for (ObserverSet::iterator it = observers_.begin(); it != observers_.end();
+ ++it) {
+ (*it)->OnChanged();
}
- virtual talk_base::scoped_refptr<AudioTrackInterface>
- FindAudioTrack(const std::string& track_id) OVERRIDE {
- AudioTrackVector::iterator it = FindTrack(&audio_track_vector_, track_id);
- return it == audio_track_vector_.end() ? NULL : *it;
+}
+
+MockMediaStream::~MockMediaStream() {}
+
+class MockRtcVideoCapturer : public WebRtcVideoCapturerAdapter {
+ public:
+ explicit MockRtcVideoCapturer(bool is_screencast)
+ : WebRtcVideoCapturerAdapter(is_screencast),
+ number_of_capturered_frames_(0),
+ width_(0),
+ height_(0) {
}
- virtual talk_base::scoped_refptr<VideoTrackInterface>
- FindVideoTrack(const std::string& track_id) OVERRIDE {
- VideoTrackVector::iterator it = FindTrack(&video_track_vector_, track_id);
- return it == video_track_vector_.end() ? NULL : *it;
+
+ virtual void OnFrameCaptured(
+ const scoped_refptr<media::VideoFrame>& frame) OVERRIDE {
+ ++number_of_capturered_frames_;
+ width_ = frame->visible_rect().width();
+ height_ = frame->visible_rect().height();
}
- virtual void RegisterObserver(ObserverInterface* observer) OVERRIDE {
- DCHECK(!observer_);
- observer_ = observer;
+
+ int GetLastFrameWidth() const {
+ return width_;
}
- virtual void UnregisterObserver(ObserverInterface* observer) OVERRIDE {
- DCHECK(observer_ == observer);
- observer_ = NULL;
+
+ int GetLastFrameHeight() const {
+ return height_;
}
- protected:
- virtual ~MockMediaStream() {}
+ int GetFrameNum() const {
+ return number_of_capturered_frames_;
+ }
private:
- std::string label_;
- AudioTrackVector audio_track_vector_;
- VideoTrackVector video_track_vector_;
- webrtc::ObserverInterface* observer_;
+ int number_of_capturered_frames_;
+ int width_;
+ int height_;
};
+MockVideoRenderer::MockVideoRenderer()
+ : width_(0),
+ height_(0),
+ num_(0) {}
+
+MockVideoRenderer::~MockVideoRenderer() {}
+
+bool MockVideoRenderer::SetSize(int width, int height, int reserved) {
+ width_ = width;
+ height_ = height;
+ return true;
+}
+
+bool MockVideoRenderer::RenderFrame(const cricket::VideoFrame* frame) {
+ ++num_;
+ return true;
+}
+
MockAudioSource::MockAudioSource(
const webrtc::MediaConstraintsInterface* constraints)
: observer_(NULL),
- state_(MediaSourceInterface::kInitializing),
+ state_(MediaSourceInterface::kLive),
optional_constraints_(constraints->GetOptional()),
mandatory_constraints_(constraints->GetMandatory()) {
}
@@ -134,21 +193,6 @@ void MockAudioSource::UnregisterObserver(webrtc::ObserverInterface* observer) {
observer_ = NULL;
}
-void MockAudioSource::SetLive() {
- DCHECK(state_ == MediaSourceInterface::kInitializing ||
- state_ == MediaSourceInterface::kLive);
- state_ = MediaSourceInterface::kLive;
- if (observer_)
- observer_->OnChanged();
-}
-
-void MockAudioSource::SetEnded() {
- DCHECK_NE(MediaSourceInterface::kEnded, state_);
- state_ = MediaSourceInterface::kEnded;
- if (observer_)
- observer_->OnChanged();
-}
-
webrtc::MediaSourceInterface::SourceState MockAudioSource::state() const {
return state_;
}
@@ -176,8 +220,7 @@ void MockVideoSource::RemoveSink(cricket::VideoRenderer* output) {
}
cricket::VideoRenderer* MockVideoSource::FrameInput() {
- NOTIMPLEMENTED();
- return NULL;
+ return &renderer_;
}
void MockVideoSource::RegisterObserver(webrtc::ObserverInterface* observer) {
@@ -224,60 +267,81 @@ const cricket::VideoOptions* MockVideoSource::options() const {
return NULL;
}
-MockLocalVideoTrack::MockLocalVideoTrack(std::string id,
- webrtc::VideoSourceInterface* source)
+int MockVideoSource::GetLastFrameWidth() const {
+ DCHECK(capturer_);
+ return
+ static_cast<MockRtcVideoCapturer*>(capturer_.get())->GetLastFrameWidth();
+}
+
+int MockVideoSource::GetLastFrameHeight() const {
+ DCHECK(capturer_);
+ return
+ static_cast<MockRtcVideoCapturer*>(capturer_.get())->GetLastFrameHeight();
+}
+
+int MockVideoSource::GetFrameNum() const {
+ DCHECK(capturer_);
+ return static_cast<MockRtcVideoCapturer*>(capturer_.get())->GetFrameNum();
+}
+
+MockWebRtcVideoTrack::MockWebRtcVideoTrack(
+ const std::string& id,
+ webrtc::VideoSourceInterface* source)
: enabled_(false),
id_(id),
state_(MediaStreamTrackInterface::kLive),
source_(source),
- observer_(NULL) {
+ observer_(NULL),
+ renderer_(NULL) {
}
-MockLocalVideoTrack::~MockLocalVideoTrack() {}
+MockWebRtcVideoTrack::~MockWebRtcVideoTrack() {}
-void MockLocalVideoTrack::AddRenderer(VideoRendererInterface* renderer) {
- NOTIMPLEMENTED();
+void MockWebRtcVideoTrack::AddRenderer(VideoRendererInterface* renderer) {
+ DCHECK(!renderer_);
+ renderer_ = renderer;
}
-void MockLocalVideoTrack::RemoveRenderer(VideoRendererInterface* renderer) {
- NOTIMPLEMENTED();
+void MockWebRtcVideoTrack::RemoveRenderer(VideoRendererInterface* renderer) {
+ DCHECK_EQ(renderer_, renderer);
+ renderer_ = NULL;
}
-std::string MockLocalVideoTrack::kind() const {
+std::string MockWebRtcVideoTrack::kind() const {
NOTIMPLEMENTED();
return std::string();
}
-std::string MockLocalVideoTrack::id() const { return id_; }
+std::string MockWebRtcVideoTrack::id() const { return id_; }
-bool MockLocalVideoTrack::enabled() const { return enabled_; }
+bool MockWebRtcVideoTrack::enabled() const { return enabled_; }
-MockLocalVideoTrack::TrackState MockLocalVideoTrack::state() const {
+MockWebRtcVideoTrack::TrackState MockWebRtcVideoTrack::state() const {
return state_;
}
-bool MockLocalVideoTrack::set_enabled(bool enable) {
+bool MockWebRtcVideoTrack::set_enabled(bool enable) {
enabled_ = enable;
return true;
}
-bool MockLocalVideoTrack::set_state(TrackState new_state) {
+bool MockWebRtcVideoTrack::set_state(TrackState new_state) {
state_ = new_state;
if (observer_)
observer_->OnChanged();
return true;
}
-void MockLocalVideoTrack::RegisterObserver(ObserverInterface* observer) {
+void MockWebRtcVideoTrack::RegisterObserver(ObserverInterface* observer) {
observer_ = observer;
}
-void MockLocalVideoTrack::UnregisterObserver(ObserverInterface* observer) {
+void MockWebRtcVideoTrack::UnregisterObserver(ObserverInterface* observer) {
DCHECK(observer_ == observer);
observer_ = NULL;
}
-VideoSourceInterface* MockLocalVideoTrack::GetSource() const {
+VideoSourceInterface* MockWebRtcVideoTrack::GetSource() const {
return source_.get();
}
@@ -366,103 +430,81 @@ class MockIceCandidate : public IceCandidateInterface {
std::string sdp_;
};
-MockMediaStreamDependencyFactory::MockMediaStreamDependencyFactory()
- : MediaStreamDependencyFactory(NULL, NULL),
- mock_pc_factory_created_(false) {
-}
-
-MockMediaStreamDependencyFactory::~MockMediaStreamDependencyFactory() {}
-
-bool MockMediaStreamDependencyFactory::EnsurePeerConnectionFactory() {
- mock_pc_factory_created_ = true;
- return true;
+MockPeerConnectionDependencyFactory::MockPeerConnectionDependencyFactory()
+ : PeerConnectionDependencyFactory(NULL),
+ fail_to_create_next_audio_capturer_(false) {
}
-bool MockMediaStreamDependencyFactory::PeerConnectionFactoryCreated() {
- return mock_pc_factory_created_;
-}
+MockPeerConnectionDependencyFactory::~MockPeerConnectionDependencyFactory() {}
scoped_refptr<webrtc::PeerConnectionInterface>
-MockMediaStreamDependencyFactory::CreatePeerConnection(
+MockPeerConnectionDependencyFactory::CreatePeerConnection(
const webrtc::PeerConnectionInterface::IceServers& ice_servers,
const webrtc::MediaConstraintsInterface* constraints,
blink::WebFrame* frame,
webrtc::PeerConnectionObserver* observer) {
- DCHECK(mock_pc_factory_created_);
return new talk_base::RefCountedObject<MockPeerConnectionImpl>(this);
}
scoped_refptr<webrtc::AudioSourceInterface>
-MockMediaStreamDependencyFactory::CreateLocalAudioSource(
+MockPeerConnectionDependencyFactory::CreateLocalAudioSource(
const webrtc::MediaConstraintsInterface* constraints) {
last_audio_source_ =
new talk_base::RefCountedObject<MockAudioSource>(constraints);
return last_audio_source_;
}
+WebRtcVideoCapturerAdapter*
+MockPeerConnectionDependencyFactory::CreateVideoCapturer(
+ bool is_screen_capture) {
+ return new MockRtcVideoCapturer(is_screen_capture);
+}
+
scoped_refptr<webrtc::VideoSourceInterface>
-MockMediaStreamDependencyFactory::CreateLocalVideoSource(
- int video_session_id,
- bool is_screencast,
- const webrtc::MediaConstraintsInterface* constraints) {
+MockPeerConnectionDependencyFactory::CreateVideoSource(
+ cricket::VideoCapturer* capturer,
+ const blink::WebMediaConstraints& constraints) {
last_video_source_ = new talk_base::RefCountedObject<MockVideoSource>();
+ last_video_source_->SetVideoCapturer(capturer);
return last_video_source_;
}
scoped_refptr<WebAudioCapturerSource>
-MockMediaStreamDependencyFactory::CreateWebAudioSource(
- blink::WebMediaStreamSource* source,
- RTCMediaConstraints* constraints) {
+MockPeerConnectionDependencyFactory::CreateWebAudioSource(
+ blink::WebMediaStreamSource* source) {
return NULL;
}
scoped_refptr<webrtc::MediaStreamInterface>
-MockMediaStreamDependencyFactory::CreateLocalMediaStream(
+MockPeerConnectionDependencyFactory::CreateLocalMediaStream(
const std::string& label) {
- DCHECK(mock_pc_factory_created_);
return new talk_base::RefCountedObject<MockMediaStream>(label);
}
scoped_refptr<webrtc::VideoTrackInterface>
-MockMediaStreamDependencyFactory::CreateLocalVideoTrack(
+MockPeerConnectionDependencyFactory::CreateLocalVideoTrack(
const std::string& id,
webrtc::VideoSourceInterface* source) {
- DCHECK(mock_pc_factory_created_);
scoped_refptr<webrtc::VideoTrackInterface> track(
- new talk_base::RefCountedObject<MockLocalVideoTrack>(
+ new talk_base::RefCountedObject<MockWebRtcVideoTrack>(
id, source));
return track;
}
scoped_refptr<webrtc::VideoTrackInterface>
-MockMediaStreamDependencyFactory::CreateLocalVideoTrack(
+MockPeerConnectionDependencyFactory::CreateLocalVideoTrack(
const std::string& id,
cricket::VideoCapturer* capturer) {
- DCHECK(mock_pc_factory_created_);
-
scoped_refptr<MockVideoSource> source =
new talk_base::RefCountedObject<MockVideoSource>();
source->SetVideoCapturer(capturer);
- return new talk_base::RefCountedObject<MockLocalVideoTrack>(id, source.get());
-}
-
-scoped_refptr<webrtc::AudioTrackInterface>
-MockMediaStreamDependencyFactory::CreateLocalAudioTrack(
- const std::string& id,
- const scoped_refptr<WebRtcAudioCapturer>& capturer,
- WebAudioCapturerSource* webaudio_source,
- webrtc::AudioSourceInterface* source,
- const webrtc::MediaConstraintsInterface* constraints) {
- DCHECK(mock_pc_factory_created_);
- DCHECK(!capturer.get());
- return WebRtcLocalAudioTrack::Create(
- id, WebRtcAudioCapturer::CreateCapturer(), webaudio_source,
- source, constraints);
+ return
+ new talk_base::RefCountedObject<MockWebRtcVideoTrack>(id, source.get());
}
SessionDescriptionInterface*
-MockMediaStreamDependencyFactory::CreateSessionDescription(
+MockPeerConnectionDependencyFactory::CreateSessionDescription(
const std::string& type,
const std::string& sdp,
webrtc::SdpParseError* error) {
@@ -470,7 +512,7 @@ MockMediaStreamDependencyFactory::CreateSessionDescription(
}
webrtc::IceCandidateInterface*
-MockMediaStreamDependencyFactory::CreateIceCandidate(
+MockPeerConnectionDependencyFactory::CreateIceCandidate(
const std::string& sdp_mid,
int sdp_mline_index,
const std::string& sdp) {
@@ -478,9 +520,22 @@ MockMediaStreamDependencyFactory::CreateIceCandidate(
}
scoped_refptr<WebRtcAudioCapturer>
-MockMediaStreamDependencyFactory::MaybeCreateAudioCapturer(
- int render_view_id, const StreamDeviceInfo& device_info) {
- return WebRtcAudioCapturer::CreateCapturer();
+MockPeerConnectionDependencyFactory::CreateAudioCapturer(
+ int render_view_id, const StreamDeviceInfo& device_info,
+ const blink::WebMediaConstraints& constraints,
+ MediaStreamAudioSource* audio_source) {
+ if (fail_to_create_next_audio_capturer_) {
+ fail_to_create_next_audio_capturer_ = false;
+ return NULL;
+ }
+ DCHECK(audio_source);
+ return WebRtcAudioCapturer::CreateCapturer(-1, device_info,
+ constraints, NULL, audio_source);
+}
+
+void MockPeerConnectionDependencyFactory::StartLocalAudioTrack(
+ WebRtcLocalAudioTrack* audio_track) {
+ audio_track->Start();
}
} // namespace content
diff --git a/chromium/content/renderer/media/mock_media_stream_dependency_factory.h b/chromium/content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h
index bd73ec7bd9b..12f4d24d863 100644
--- a/chromium/content/renderer/media/mock_media_stream_dependency_factory.h
+++ b/chromium/content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h
@@ -1,21 +1,40 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef CONTENT_RENDERER_MEDIA_MOCK_MEDIA_STREAM_DEPENDENCY_FACTORY_H_
-#define CONTENT_RENDERER_MEDIA_MOCK_MEDIA_STREAM_DEPENDENCY_FACTORY_H_
+#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_MOCK_PEER_CONNECTION_DEPENDENCY_FACTORY_H_
+#define CONTENT_RENDERER_MEDIA_WEBRTC_MOCK_PEER_CONNECTION_DEPENDENCY_FACTORY_H_
+#include <set>
#include <string>
#include <vector>
#include "base/compiler_specific.h"
-#include "content/renderer/media/media_stream_dependency_factory.h"
+#include "content/renderer/media/webrtc/peer_connection_dependency_factory.h"
#include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface.h"
+#include "third_party/libjingle/source/talk/media/base/videorenderer.h"
namespace content {
class WebAudioCapturerSource;
+class MockVideoRenderer : public cricket::VideoRenderer {
+ public:
+ MockVideoRenderer();
+ virtual ~MockVideoRenderer();
+ virtual bool SetSize(int width, int height, int reserved) OVERRIDE;
+ virtual bool RenderFrame(const cricket::VideoFrame* frame) OVERRIDE;
+
+ int width() const { return width_; }
+ int height() const { return height_; }
+ int num() const { return num_; }
+
+ private:
+ int width_;
+ int height_;
+ int num_;
+};
+
class MockVideoSource : public webrtc::VideoSourceInterface {
public:
MockVideoSource();
@@ -36,6 +55,11 @@ class MockVideoSource : public webrtc::VideoSourceInterface {
// Set the video capturer.
void SetVideoCapturer(cricket::VideoCapturer* capturer);
+ // Test helpers.
+ int GetLastFrameWidth() const;
+ int GetLastFrameHeight() const;
+ int GetFrameNum() const;
+
protected:
virtual ~MockVideoSource();
@@ -45,6 +69,7 @@ class MockVideoSource : public webrtc::VideoSourceInterface {
std::vector<webrtc::ObserverInterface*> observers_;
MediaSourceInterface::SourceState state_;
scoped_ptr<cricket::VideoCapturer> capturer_;
+ MockVideoRenderer renderer_;
};
class MockAudioSource : public webrtc::AudioSourceInterface {
@@ -80,9 +105,9 @@ class MockAudioSource : public webrtc::AudioSourceInterface {
webrtc::MediaConstraintsInterface::Constraints mandatory_constraints_;
};
-class MockLocalVideoTrack : public webrtc::VideoTrackInterface {
+class MockWebRtcVideoTrack : public webrtc::VideoTrackInterface {
public:
- MockLocalVideoTrack(std::string id,
+ MockWebRtcVideoTrack(const std::string& id,
webrtc::VideoSourceInterface* source);
virtual void AddRenderer(webrtc::VideoRendererInterface* renderer) OVERRIDE;
virtual void RemoveRenderer(
@@ -98,7 +123,7 @@ class MockLocalVideoTrack : public webrtc::VideoTrackInterface {
virtual webrtc::VideoSourceInterface* GetSource() const OVERRIDE;
protected:
- virtual ~MockLocalVideoTrack();
+ virtual ~MockWebRtcVideoTrack();
private:
bool enabled_;
@@ -106,14 +131,48 @@ class MockLocalVideoTrack : public webrtc::VideoTrackInterface {
TrackState state_;
scoped_refptr<webrtc::VideoSourceInterface> source_;
webrtc::ObserverInterface* observer_;
+ webrtc::VideoRendererInterface* renderer_;
+};
+
+class MockMediaStream : public webrtc::MediaStreamInterface {
+ public:
+ explicit MockMediaStream(const std::string& label);
+
+ virtual bool AddTrack(webrtc::AudioTrackInterface* track) OVERRIDE;
+ virtual bool AddTrack(webrtc::VideoTrackInterface* track) OVERRIDE;
+ virtual bool RemoveTrack(webrtc::AudioTrackInterface* track) OVERRIDE;
+ virtual bool RemoveTrack(webrtc::VideoTrackInterface* track) OVERRIDE;
+ virtual std::string label() const OVERRIDE;
+ virtual webrtc::AudioTrackVector GetAudioTracks() OVERRIDE;
+ virtual webrtc::VideoTrackVector GetVideoTracks() OVERRIDE;
+ virtual talk_base::scoped_refptr<webrtc::AudioTrackInterface>
+ FindAudioTrack(const std::string& track_id) OVERRIDE;
+ virtual talk_base::scoped_refptr<webrtc::VideoTrackInterface>
+ FindVideoTrack(const std::string& track_id) OVERRIDE;
+ virtual void RegisterObserver(webrtc::ObserverInterface* observer) OVERRIDE;
+ virtual void UnregisterObserver(webrtc::ObserverInterface* observer) OVERRIDE;
+
+ protected:
+ virtual ~MockMediaStream();
+
+ private:
+ void NotifyObservers();
+
+ std::string label_;
+ webrtc::AudioTrackVector audio_track_vector_;
+ webrtc::VideoTrackVector video_track_vector_;
+
+ typedef std::set<webrtc::ObserverInterface*> ObserverSet;
+ ObserverSet observers_;
};
// A mock factory for creating different objects for
-// RTC MediaStreams and PeerConnections.
-class MockMediaStreamDependencyFactory : public MediaStreamDependencyFactory {
+// RTC PeerConnections.
+class MockPeerConnectionDependencyFactory
+ : public PeerConnectionDependencyFactory {
public:
- MockMediaStreamDependencyFactory();
- virtual ~MockMediaStreamDependencyFactory();
+ MockPeerConnectionDependencyFactory();
+ virtual ~MockPeerConnectionDependencyFactory();
virtual scoped_refptr<webrtc::PeerConnectionInterface> CreatePeerConnection(
const webrtc::PeerConnectionInterface::IceServers& ice_servers,
@@ -123,14 +182,14 @@ class MockMediaStreamDependencyFactory : public MediaStreamDependencyFactory {
virtual scoped_refptr<webrtc::AudioSourceInterface>
CreateLocalAudioSource(
const webrtc::MediaConstraintsInterface* constraints) OVERRIDE;
+ virtual WebRtcVideoCapturerAdapter* CreateVideoCapturer(
+ bool is_screen_capture) OVERRIDE;
virtual scoped_refptr<webrtc::VideoSourceInterface>
- CreateLocalVideoSource(
- int video_session_id,
- bool is_screencast,
- const webrtc::MediaConstraintsInterface* constraints) OVERRIDE;
+ CreateVideoSource(
+ cricket::VideoCapturer* capturer,
+ const blink::WebMediaConstraints& constraints) OVERRIDE;
virtual scoped_refptr<WebAudioCapturerSource> CreateWebAudioSource(
- blink::WebMediaStreamSource* source,
- RTCMediaConstraints* constraints) OVERRIDE;
+ blink::WebMediaStreamSource* source) OVERRIDE;
virtual scoped_refptr<webrtc::MediaStreamInterface>
CreateLocalMediaStream(const std::string& label) OVERRIDE;
virtual scoped_refptr<webrtc::VideoTrackInterface>
@@ -139,12 +198,6 @@ class MockMediaStreamDependencyFactory : public MediaStreamDependencyFactory {
virtual scoped_refptr<webrtc::VideoTrackInterface>
CreateLocalVideoTrack(const std::string& id,
cricket::VideoCapturer* capturer) OVERRIDE;
- virtual scoped_refptr<webrtc::AudioTrackInterface> CreateLocalAudioTrack(
- const std::string& id,
- const scoped_refptr<WebRtcAudioCapturer>& capturer,
- WebAudioCapturerSource* webaudio_source,
- webrtc::AudioSourceInterface* source,
- const webrtc::MediaConstraintsInterface* constraints) OVERRIDE;
virtual webrtc::SessionDescriptionInterface* CreateSessionDescription(
const std::string& type,
const std::string& sdp,
@@ -154,23 +207,28 @@ class MockMediaStreamDependencyFactory : public MediaStreamDependencyFactory {
int sdp_mline_index,
const std::string& sdp) OVERRIDE;
- virtual bool EnsurePeerConnectionFactory() OVERRIDE;
- virtual bool PeerConnectionFactoryCreated() OVERRIDE;
+ virtual scoped_refptr<WebRtcAudioCapturer> CreateAudioCapturer(
+ int render_view_id, const StreamDeviceInfo& device_info,
+ const blink::WebMediaConstraints& constraints,
+ MediaStreamAudioSource* audio_source) OVERRIDE;
+ void FailToCreateNextAudioCapturer() {
+ fail_to_create_next_audio_capturer_ = true;
+ }
- virtual scoped_refptr<WebRtcAudioCapturer> MaybeCreateAudioCapturer(
- int render_view_id, const StreamDeviceInfo& device_info) OVERRIDE;
+ virtual void StartLocalAudioTrack(
+ WebRtcLocalAudioTrack* audio_track) OVERRIDE;
MockAudioSource* last_audio_source() { return last_audio_source_.get(); }
MockVideoSource* last_video_source() { return last_video_source_.get(); }
private:
- bool mock_pc_factory_created_;
+ bool fail_to_create_next_audio_capturer_;
scoped_refptr <MockAudioSource> last_audio_source_;
scoped_refptr <MockVideoSource> last_video_source_;
- DISALLOW_COPY_AND_ASSIGN(MockMediaStreamDependencyFactory);
+ DISALLOW_COPY_AND_ASSIGN(MockPeerConnectionDependencyFactory);
};
} // namespace content
-#endif // CONTENT_RENDERER_MEDIA_MOCK_MEDIA_STREAM_DEPENDENCY_FACTORY_H_
+#endif // CONTENT_RENDERER_MEDIA_WEBRTC_MOCK_PEER_CONNECTION_DEPENDENCY_FACTORY_H_
diff --git a/chromium/content/renderer/media/webrtc/peer_connection_dependency_factory.cc b/chromium/content/renderer/media/webrtc/peer_connection_dependency_factory.cc
new file mode 100644
index 00000000000..f2f7f1d109b
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/peer_connection_dependency_factory.cc
@@ -0,0 +1,657 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/webrtc/peer_connection_dependency_factory.h"
+
+#include <vector>
+
+#include "base/command_line.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/synchronization/waitable_event.h"
+#include "content/common/media/media_stream_messages.h"
+#include "content/public/common/content_switches.h"
+#include "content/renderer/media/media_stream.h"
+#include "content/renderer/media/media_stream_audio_processor.h"
+#include "content/renderer/media/media_stream_audio_processor_options.h"
+#include "content/renderer/media/media_stream_audio_source.h"
+#include "content/renderer/media/media_stream_video_source.h"
+#include "content/renderer/media/media_stream_video_track.h"
+#include "content/renderer/media/peer_connection_identity_service.h"
+#include "content/renderer/media/rtc_media_constraints.h"
+#include "content/renderer/media/rtc_peer_connection_handler.h"
+#include "content/renderer/media/rtc_video_decoder_factory.h"
+#include "content/renderer/media/rtc_video_encoder_factory.h"
+#include "content/renderer/media/webaudio_capturer_source.h"
+#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
+#include "content/renderer/media/webrtc/webrtc_video_capturer_adapter.h"
+#include "content/renderer/media/webrtc_audio_device_impl.h"
+#include "content/renderer/media/webrtc_local_audio_track.h"
+#include "content/renderer/media/webrtc_uma_histograms.h"
+#include "content/renderer/p2p/ipc_network_manager.h"
+#include "content/renderer/p2p/ipc_socket_factory.h"
+#include "content/renderer/p2p/port_allocator.h"
+#include "content/renderer/render_thread_impl.h"
+#include "jingle/glue/thread_wrapper.h"
+#include "media/filters/gpu_video_accelerator_factories.h"
+#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
+#include "third_party/WebKit/public/platform/WebMediaStream.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
+#include "third_party/WebKit/public/platform/WebURL.h"
+#include "third_party/WebKit/public/web/WebDocument.h"
+#include "third_party/WebKit/public/web/WebFrame.h"
+#include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface.h"
+
+#if defined(USE_OPENSSL)
+#include "third_party/libjingle/source/talk/base/ssladapter.h"
+#else
+#include "net/socket/nss_ssl_util.h"
+#endif
+
+#if defined(OS_ANDROID)
+#include "media/base/android/media_codec_bridge.h"
+#endif
+
+namespace content {
+
+// Map of corresponding media constraints and platform effects.
+struct {
+ const char* constraint;
+ const media::AudioParameters::PlatformEffectsMask effect;
+} const kConstraintEffectMap[] = {
+ { content::kMediaStreamAudioDucking,
+ media::AudioParameters::DUCKING },
+ { webrtc::MediaConstraintsInterface::kEchoCancellation,
+ media::AudioParameters::ECHO_CANCELLER },
+};
+
+// If any platform effects are available, check them against the constraints.
+// Disable effects to match false constraints, but if a constraint is true, set
+// the constraint to false to later disable the software effect.
+//
+// This function may modify both |constraints| and |effects|.
+void HarmonizeConstraintsAndEffects(RTCMediaConstraints* constraints,
+ int* effects) {
+ if (*effects != media::AudioParameters::NO_EFFECTS) {
+ for (size_t i = 0; i < ARRAYSIZE_UNSAFE(kConstraintEffectMap); ++i) {
+ bool value;
+ size_t is_mandatory = 0;
+ if (!webrtc::FindConstraint(constraints,
+ kConstraintEffectMap[i].constraint,
+ &value,
+ &is_mandatory) || !value) {
+ // If the constraint is false, or does not exist, disable the platform
+ // effect.
+ *effects &= ~kConstraintEffectMap[i].effect;
+ DVLOG(1) << "Disabling platform effect: "
+ << kConstraintEffectMap[i].effect;
+ } else if (*effects & kConstraintEffectMap[i].effect) {
+ // If the constraint is true, leave the platform effect enabled, and
+ // set the constraint to false to later disable the software effect.
+ if (is_mandatory) {
+ constraints->AddMandatory(kConstraintEffectMap[i].constraint,
+ webrtc::MediaConstraintsInterface::kValueFalse, true);
+ } else {
+ constraints->AddOptional(kConstraintEffectMap[i].constraint,
+ webrtc::MediaConstraintsInterface::kValueFalse, true);
+ }
+ DVLOG(1) << "Disabling constraint: "
+ << kConstraintEffectMap[i].constraint;
+ }
+ }
+ }
+}
+
+class P2PPortAllocatorFactory : public webrtc::PortAllocatorFactoryInterface {
+ public:
+ P2PPortAllocatorFactory(
+ P2PSocketDispatcher* socket_dispatcher,
+ talk_base::NetworkManager* network_manager,
+ talk_base::PacketSocketFactory* socket_factory,
+ blink::WebFrame* web_frame)
+ : socket_dispatcher_(socket_dispatcher),
+ network_manager_(network_manager),
+ socket_factory_(socket_factory),
+ web_frame_(web_frame) {
+ }
+
+ virtual cricket::PortAllocator* CreatePortAllocator(
+ const std::vector<StunConfiguration>& stun_servers,
+ const std::vector<TurnConfiguration>& turn_configurations) OVERRIDE {
+ CHECK(web_frame_);
+ P2PPortAllocator::Config config;
+ if (stun_servers.size() > 0) {
+ config.stun_server = stun_servers[0].server.hostname();
+ config.stun_server_port = stun_servers[0].server.port();
+ }
+ config.legacy_relay = false;
+ for (size_t i = 0; i < turn_configurations.size(); ++i) {
+ P2PPortAllocator::Config::RelayServerConfig relay_config;
+ relay_config.server_address = turn_configurations[i].server.hostname();
+ relay_config.port = turn_configurations[i].server.port();
+ relay_config.username = turn_configurations[i].username;
+ relay_config.password = turn_configurations[i].password;
+ relay_config.transport_type = turn_configurations[i].transport_type;
+ relay_config.secure = turn_configurations[i].secure;
+ config.relays.push_back(relay_config);
+ }
+
+ // Use first turn server as the stun server.
+ if (turn_configurations.size() > 0) {
+ config.stun_server = config.relays[0].server_address;
+ config.stun_server_port = config.relays[0].port;
+ }
+
+ return new P2PPortAllocator(
+ web_frame_, socket_dispatcher_.get(), network_manager_,
+ socket_factory_, config);
+ }
+
+ protected:
+ virtual ~P2PPortAllocatorFactory() {}
+
+ private:
+ scoped_refptr<P2PSocketDispatcher> socket_dispatcher_;
+ // |network_manager_| and |socket_factory_| are a weak references, owned by
+ // PeerConnectionDependencyFactory.
+ talk_base::NetworkManager* network_manager_;
+ talk_base::PacketSocketFactory* socket_factory_;
+ // Raw ptr to the WebFrame that created the P2PPortAllocatorFactory.
+ blink::WebFrame* web_frame_;
+};
+
+PeerConnectionDependencyFactory::PeerConnectionDependencyFactory(
+ P2PSocketDispatcher* p2p_socket_dispatcher)
+ : network_manager_(NULL),
+ p2p_socket_dispatcher_(p2p_socket_dispatcher),
+ signaling_thread_(NULL),
+ worker_thread_(NULL),
+ chrome_worker_thread_("Chrome_libJingle_WorkerThread") {
+}
+
+PeerConnectionDependencyFactory::~PeerConnectionDependencyFactory() {
+ CleanupPeerConnectionFactory();
+ if (aec_dump_message_filter_)
+ aec_dump_message_filter_->RemoveDelegate(this);
+}
+
+blink::WebRTCPeerConnectionHandler*
+PeerConnectionDependencyFactory::CreateRTCPeerConnectionHandler(
+ blink::WebRTCPeerConnectionHandlerClient* client) {
+ // Save histogram data so we can see how much PeerConnetion is used.
+ // The histogram counts the number of calls to the JS API
+ // webKitRTCPeerConnection.
+ UpdateWebRTCMethodCount(WEBKIT_RTC_PEER_CONNECTION);
+
+ return new RTCPeerConnectionHandler(client, this);
+}
+
+bool PeerConnectionDependencyFactory::InitializeMediaStreamAudioSource(
+ int render_view_id,
+ const blink::WebMediaConstraints& audio_constraints,
+ MediaStreamAudioSource* source_data) {
+ DVLOG(1) << "InitializeMediaStreamAudioSources()";
+
+ // Do additional source initialization if the audio source is a valid
+ // microphone or tab audio.
+ RTCMediaConstraints native_audio_constraints(audio_constraints);
+ MediaAudioConstraints::ApplyFixedAudioConstraints(&native_audio_constraints);
+
+ StreamDeviceInfo device_info = source_data->device_info();
+ RTCMediaConstraints constraints = native_audio_constraints;
+ // May modify both |constraints| and |effects|.
+ HarmonizeConstraintsAndEffects(&constraints,
+ &device_info.device.input.effects);
+
+ scoped_refptr<WebRtcAudioCapturer> capturer(
+ CreateAudioCapturer(render_view_id, device_info, audio_constraints,
+ source_data));
+ if (!capturer.get()) {
+ DLOG(WARNING) << "Failed to create the capturer for device "
+ << device_info.device.id;
+ // TODO(xians): Don't we need to check if source_observer is observing
+ // something? If not, then it looks like we have a leak here.
+ // OTOH, if it _is_ observing something, then the callback might
+ // be called multiple times which is likely also a bug.
+ return false;
+ }
+ source_data->SetAudioCapturer(capturer);
+
+ // Creates a LocalAudioSource object which holds audio options.
+ // TODO(xians): The option should apply to the track instead of the source.
+ // TODO(perkj): Move audio constraints parsing to Chrome.
+ // Currently there are a few constraints that are parsed by libjingle and
+ // the state is set to ended if parsing fails.
+ scoped_refptr<webrtc::AudioSourceInterface> rtc_source(
+ CreateLocalAudioSource(&constraints).get());
+ if (rtc_source->state() != webrtc::MediaSourceInterface::kLive) {
+ DLOG(WARNING) << "Failed to create rtc LocalAudioSource.";
+ return false;
+ }
+ source_data->SetLocalAudioSource(rtc_source);
+ return true;
+}
+
+WebRtcVideoCapturerAdapter*
+PeerConnectionDependencyFactory::CreateVideoCapturer(
+ bool is_screeencast) {
+ // We need to make sure the libjingle thread wrappers have been created
+ // before we can use an instance of a WebRtcVideoCapturerAdapter. This is
+ // since the base class of WebRtcVideoCapturerAdapter is a
+ // cricket::VideoCapturer and it uses the libjingle thread wrappers.
+ if (!GetPcFactory())
+ return NULL;
+ return new WebRtcVideoCapturerAdapter(is_screeencast);
+}
+
+scoped_refptr<webrtc::VideoSourceInterface>
+PeerConnectionDependencyFactory::CreateVideoSource(
+ cricket::VideoCapturer* capturer,
+ const blink::WebMediaConstraints& constraints) {
+ RTCMediaConstraints webrtc_constraints(constraints);
+ scoped_refptr<webrtc::VideoSourceInterface> source =
+ GetPcFactory()->CreateVideoSource(capturer, &webrtc_constraints).get();
+ return source;
+}
+
+const scoped_refptr<webrtc::PeerConnectionFactoryInterface>&
+PeerConnectionDependencyFactory::GetPcFactory() {
+ if (!pc_factory_)
+ CreatePeerConnectionFactory();
+ CHECK(pc_factory_);
+ return pc_factory_;
+}
+
+void PeerConnectionDependencyFactory::CreatePeerConnectionFactory() {
+ DCHECK(!pc_factory_.get());
+ DCHECK(!signaling_thread_);
+ DCHECK(!worker_thread_);
+ DCHECK(!network_manager_);
+ DCHECK(!socket_factory_);
+ DCHECK(!chrome_worker_thread_.IsRunning());
+
+ DVLOG(1) << "PeerConnectionDependencyFactory::CreatePeerConnectionFactory()";
+
+ jingle_glue::JingleThreadWrapper::EnsureForCurrentMessageLoop();
+ jingle_glue::JingleThreadWrapper::current()->set_send_allowed(true);
+ signaling_thread_ = jingle_glue::JingleThreadWrapper::current();
+ CHECK(signaling_thread_);
+
+ CHECK(chrome_worker_thread_.Start());
+
+ base::WaitableEvent start_worker_event(true, false);
+ chrome_worker_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
+ &PeerConnectionDependencyFactory::InitializeWorkerThread,
+ base::Unretained(this),
+ &worker_thread_,
+ &start_worker_event));
+ start_worker_event.Wait();
+ CHECK(worker_thread_);
+
+ base::WaitableEvent create_network_manager_event(true, false);
+ chrome_worker_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
+ &PeerConnectionDependencyFactory::CreateIpcNetworkManagerOnWorkerThread,
+ base::Unretained(this),
+ &create_network_manager_event));
+ create_network_manager_event.Wait();
+
+ socket_factory_.reset(
+ new IpcPacketSocketFactory(p2p_socket_dispatcher_.get()));
+
+ // Init SSL, which will be needed by PeerConnection.
+#if defined(USE_OPENSSL)
+ if (!talk_base::InitializeSSL()) {
+ LOG(ERROR) << "Failed on InitializeSSL.";
+ NOTREACHED();
+ return;
+ }
+#else
+ // TODO(ronghuawu): Replace this call with InitializeSSL.
+ net::EnsureNSSSSLInit();
+#endif
+
+ scoped_ptr<cricket::WebRtcVideoDecoderFactory> decoder_factory;
+ scoped_ptr<cricket::WebRtcVideoEncoderFactory> encoder_factory;
+
+ const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ scoped_refptr<media::GpuVideoAcceleratorFactories> gpu_factories =
+ RenderThreadImpl::current()->GetGpuFactories();
+ if (!cmd_line->HasSwitch(switches::kDisableWebRtcHWDecoding)) {
+ if (gpu_factories)
+ decoder_factory.reset(new RTCVideoDecoderFactory(gpu_factories));
+ }
+
+ if (!cmd_line->HasSwitch(switches::kDisableWebRtcHWEncoding)) {
+ if (gpu_factories)
+ encoder_factory.reset(new RTCVideoEncoderFactory(gpu_factories));
+ }
+
+#if defined(OS_ANDROID)
+ if (!media::MediaCodecBridge::SupportsSetParameters())
+ encoder_factory.reset();
+#endif
+
+ EnsureWebRtcAudioDeviceImpl();
+
+ scoped_refptr<webrtc::PeerConnectionFactoryInterface> factory(
+ webrtc::CreatePeerConnectionFactory(worker_thread_,
+ signaling_thread_,
+ audio_device_.get(),
+ encoder_factory.release(),
+ decoder_factory.release()));
+ CHECK(factory);
+
+ pc_factory_ = factory;
+ webrtc::PeerConnectionFactoryInterface::Options factory_options;
+ factory_options.disable_sctp_data_channels = false;
+ factory_options.disable_encryption =
+ cmd_line->HasSwitch(switches::kDisableWebRtcEncryption);
+ pc_factory_->SetOptions(factory_options);
+
+ // TODO(xians): Remove the following code after kDisableAudioTrackProcessing
+ // is removed.
+ if (!MediaStreamAudioProcessor::IsAudioTrackProcessingEnabled()) {
+ aec_dump_message_filter_ = AecDumpMessageFilter::Get();
+ // In unit tests not creating a message filter, |aec_dump_message_filter_|
+ // will be NULL. We can just ignore that. Other unit tests and browser tests
+ // ensure that we do get the filter when we should.
+ if (aec_dump_message_filter_)
+ aec_dump_message_filter_->AddDelegate(this);
+ }
+}
+
+bool PeerConnectionDependencyFactory::PeerConnectionFactoryCreated() {
+ return pc_factory_.get() != NULL;
+}
+
+scoped_refptr<webrtc::PeerConnectionInterface>
+PeerConnectionDependencyFactory::CreatePeerConnection(
+ const webrtc::PeerConnectionInterface::IceServers& ice_servers,
+ const webrtc::MediaConstraintsInterface* constraints,
+ blink::WebFrame* web_frame,
+ webrtc::PeerConnectionObserver* observer) {
+ CHECK(web_frame);
+ CHECK(observer);
+ if (!GetPcFactory())
+ return NULL;
+
+ scoped_refptr<P2PPortAllocatorFactory> pa_factory =
+ new talk_base::RefCountedObject<P2PPortAllocatorFactory>(
+ p2p_socket_dispatcher_.get(),
+ network_manager_,
+ socket_factory_.get(),
+ web_frame);
+
+ PeerConnectionIdentityService* identity_service =
+ new PeerConnectionIdentityService(
+ GURL(web_frame->document().url().spec()).GetOrigin());
+
+ return GetPcFactory()->CreatePeerConnection(ice_servers,
+ constraints,
+ pa_factory.get(),
+ identity_service,
+ observer).get();
+}
+
+scoped_refptr<webrtc::MediaStreamInterface>
+PeerConnectionDependencyFactory::CreateLocalMediaStream(
+ const std::string& label) {
+ return GetPcFactory()->CreateLocalMediaStream(label).get();
+}
+
+scoped_refptr<webrtc::AudioSourceInterface>
+PeerConnectionDependencyFactory::CreateLocalAudioSource(
+ const webrtc::MediaConstraintsInterface* constraints) {
+ scoped_refptr<webrtc::AudioSourceInterface> source =
+ GetPcFactory()->CreateAudioSource(constraints).get();
+ return source;
+}
+
+void PeerConnectionDependencyFactory::CreateLocalAudioTrack(
+ const blink::WebMediaStreamTrack& track) {
+ blink::WebMediaStreamSource source = track.source();
+ DCHECK_EQ(source.type(), blink::WebMediaStreamSource::TypeAudio);
+ MediaStreamAudioSource* source_data =
+ static_cast<MediaStreamAudioSource*>(source.extraData());
+
+ scoped_refptr<WebAudioCapturerSource> webaudio_source;
+ if (!source_data) {
+ if (source.requiresAudioConsumer()) {
+ // We're adding a WebAudio MediaStream.
+ // Create a specific capturer for each WebAudio consumer.
+ webaudio_source = CreateWebAudioSource(&source);
+ source_data =
+ static_cast<MediaStreamAudioSource*>(source.extraData());
+ } else {
+ // TODO(perkj): Implement support for sources from
+ // remote MediaStreams.
+ NOTIMPLEMENTED();
+ return;
+ }
+ }
+
+ // Creates an adapter to hold all the libjingle objects.
+ scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter(
+ WebRtcLocalAudioTrackAdapter::Create(track.id().utf8(),
+ source_data->local_audio_source()));
+ static_cast<webrtc::AudioTrackInterface*>(adapter.get())->set_enabled(
+ track.isEnabled());
+
+ // TODO(xians): Merge |source| to the capturer(). We can't do this today
+ // because only one capturer() is supported while one |source| is created
+ // for each audio track.
+ scoped_ptr<WebRtcLocalAudioTrack> audio_track(
+ new WebRtcLocalAudioTrack(adapter,
+ source_data->GetAudioCapturer(),
+ webaudio_source));
+
+ StartLocalAudioTrack(audio_track.get());
+
+ // Pass the ownership of the native local audio track to the blink track.
+ blink::WebMediaStreamTrack writable_track = track;
+ writable_track.setExtraData(audio_track.release());
+}
+
+void PeerConnectionDependencyFactory::StartLocalAudioTrack(
+ WebRtcLocalAudioTrack* audio_track) {
+ // Add the WebRtcAudioDevice as the sink to the local audio track.
+ // TODO(xians): Remove the following line of code after the APM in WebRTC is
+ // completely deprecated. See http://crbug/365672.
+ if (!MediaStreamAudioProcessor::IsAudioTrackProcessingEnabled())
+ audio_track->AddSink(GetWebRtcAudioDevice());
+
+ // Start the audio track. This will hook the |audio_track| to the capturer
+ // as the sink of the audio, and only start the source of the capturer if
+ // it is the first audio track connecting to the capturer.
+ audio_track->Start();
+}
+
+scoped_refptr<WebAudioCapturerSource>
+PeerConnectionDependencyFactory::CreateWebAudioSource(
+ blink::WebMediaStreamSource* source) {
+ DVLOG(1) << "PeerConnectionDependencyFactory::CreateWebAudioSource()";
+
+ scoped_refptr<WebAudioCapturerSource>
+ webaudio_capturer_source(new WebAudioCapturerSource());
+ MediaStreamAudioSource* source_data = new MediaStreamAudioSource();
+
+ // Use the current default capturer for the WebAudio track so that the
+ // WebAudio track can pass a valid delay value and |need_audio_processing|
+ // flag to PeerConnection.
+ // TODO(xians): Remove this after moving APM to Chrome.
+ if (GetWebRtcAudioDevice()) {
+ source_data->SetAudioCapturer(
+ GetWebRtcAudioDevice()->GetDefaultCapturer());
+ }
+
+ // Create a LocalAudioSource object which holds audio options.
+ // SetLocalAudioSource() affects core audio parts in third_party/Libjingle.
+ source_data->SetLocalAudioSource(CreateLocalAudioSource(NULL).get());
+ source->setExtraData(source_data);
+
+ // Replace the default source with WebAudio as source instead.
+ source->addAudioConsumer(webaudio_capturer_source.get());
+
+ return webaudio_capturer_source;
+}
+
+scoped_refptr<webrtc::VideoTrackInterface>
+PeerConnectionDependencyFactory::CreateLocalVideoTrack(
+ const std::string& id,
+ webrtc::VideoSourceInterface* source) {
+ return GetPcFactory()->CreateVideoTrack(id, source).get();
+}
+
+scoped_refptr<webrtc::VideoTrackInterface>
+PeerConnectionDependencyFactory::CreateLocalVideoTrack(
+ const std::string& id, cricket::VideoCapturer* capturer) {
+ if (!capturer) {
+ LOG(ERROR) << "CreateLocalVideoTrack called with null VideoCapturer.";
+ return NULL;
+ }
+
+ // Create video source from the |capturer|.
+ scoped_refptr<webrtc::VideoSourceInterface> source =
+ GetPcFactory()->CreateVideoSource(capturer, NULL).get();
+
+ // Create native track from the source.
+ return GetPcFactory()->CreateVideoTrack(id, source.get()).get();
+}
+
+webrtc::SessionDescriptionInterface*
+PeerConnectionDependencyFactory::CreateSessionDescription(
+ const std::string& type,
+ const std::string& sdp,
+ webrtc::SdpParseError* error) {
+ return webrtc::CreateSessionDescription(type, sdp, error);
+}
+
+webrtc::IceCandidateInterface*
+PeerConnectionDependencyFactory::CreateIceCandidate(
+ const std::string& sdp_mid,
+ int sdp_mline_index,
+ const std::string& sdp) {
+ return webrtc::CreateIceCandidate(sdp_mid, sdp_mline_index, sdp);
+}
+
+WebRtcAudioDeviceImpl*
+PeerConnectionDependencyFactory::GetWebRtcAudioDevice() {
+ return audio_device_.get();
+}
+
+void PeerConnectionDependencyFactory::InitializeWorkerThread(
+ talk_base::Thread** thread,
+ base::WaitableEvent* event) {
+ jingle_glue::JingleThreadWrapper::EnsureForCurrentMessageLoop();
+ jingle_glue::JingleThreadWrapper::current()->set_send_allowed(true);
+ *thread = jingle_glue::JingleThreadWrapper::current();
+ event->Signal();
+}
+
+void PeerConnectionDependencyFactory::CreateIpcNetworkManagerOnWorkerThread(
+ base::WaitableEvent* event) {
+ DCHECK_EQ(base::MessageLoop::current(), chrome_worker_thread_.message_loop());
+ network_manager_ = new IpcNetworkManager(p2p_socket_dispatcher_.get());
+ event->Signal();
+}
+
+void PeerConnectionDependencyFactory::DeleteIpcNetworkManager() {
+ DCHECK_EQ(base::MessageLoop::current(), chrome_worker_thread_.message_loop());
+ delete network_manager_;
+ network_manager_ = NULL;
+}
+
+void PeerConnectionDependencyFactory::CleanupPeerConnectionFactory() {
+ pc_factory_ = NULL;
+ if (network_manager_) {
+ // The network manager needs to free its resources on the thread they were
+ // created, which is the worked thread.
+ if (chrome_worker_thread_.IsRunning()) {
+ chrome_worker_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
+ &PeerConnectionDependencyFactory::DeleteIpcNetworkManager,
+ base::Unretained(this)));
+ // Stopping the thread will wait until all tasks have been
+ // processed before returning. We wait for the above task to finish before
+ // letting the the function continue to avoid any potential race issues.
+ chrome_worker_thread_.Stop();
+ } else {
+ NOTREACHED() << "Worker thread not running.";
+ }
+ }
+}
+
+scoped_refptr<WebRtcAudioCapturer>
+PeerConnectionDependencyFactory::CreateAudioCapturer(
+ int render_view_id,
+ const StreamDeviceInfo& device_info,
+ const blink::WebMediaConstraints& constraints,
+ MediaStreamAudioSource* audio_source) {
+ // TODO(xians): Handle the cases when gUM is called without a proper render
+ // view, for example, by an extension.
+ DCHECK_GE(render_view_id, 0);
+
+ EnsureWebRtcAudioDeviceImpl();
+ DCHECK(GetWebRtcAudioDevice());
+ return WebRtcAudioCapturer::CreateCapturer(render_view_id, device_info,
+ constraints,
+ GetWebRtcAudioDevice(),
+ audio_source);
+}
+
+void PeerConnectionDependencyFactory::AddNativeAudioTrackToBlinkTrack(
+ webrtc::MediaStreamTrackInterface* native_track,
+ const blink::WebMediaStreamTrack& webkit_track,
+ bool is_local_track) {
+ DCHECK(!webkit_track.isNull() && !webkit_track.extraData());
+ DCHECK_EQ(blink::WebMediaStreamSource::TypeAudio,
+ webkit_track.source().type());
+ blink::WebMediaStreamTrack track = webkit_track;
+
+ DVLOG(1) << "AddNativeTrackToBlinkTrack() audio";
+ track.setExtraData(
+ new MediaStreamTrack(
+ static_cast<webrtc::AudioTrackInterface*>(native_track),
+ is_local_track));
+}
+
+scoped_refptr<base::MessageLoopProxy>
+PeerConnectionDependencyFactory::GetWebRtcWorkerThread() const {
+ DCHECK(CalledOnValidThread());
+ return chrome_worker_thread_.message_loop_proxy();
+}
+
+void PeerConnectionDependencyFactory::OnAecDumpFile(
+ const IPC::PlatformFileForTransit& file_handle) {
+ DCHECK(CalledOnValidThread());
+ DCHECK(!MediaStreamAudioProcessor::IsAudioTrackProcessingEnabled());
+ DCHECK(PeerConnectionFactoryCreated());
+
+ base::File file = IPC::PlatformFileForTransitToFile(file_handle);
+ DCHECK(file.IsValid());
+
+ // |pc_factory_| always takes ownership of |aec_dump_file|. If StartAecDump()
+ // fails, |aec_dump_file| will be closed.
+ if (!GetPcFactory()->StartAecDump(file.TakePlatformFile()))
+ VLOG(1) << "Could not start AEC dump.";
+}
+
+void PeerConnectionDependencyFactory::OnDisableAecDump() {
+ DCHECK(CalledOnValidThread());
+ DCHECK(!MediaStreamAudioProcessor::IsAudioTrackProcessingEnabled());
+ // Do nothing. We never disable AEC dump for non-track-processing case.
+}
+
+void PeerConnectionDependencyFactory::OnIpcClosing() {
+ DCHECK(CalledOnValidThread());
+ aec_dump_message_filter_ = NULL;
+}
+
+void PeerConnectionDependencyFactory::EnsureWebRtcAudioDeviceImpl() {
+ if (audio_device_)
+ return;
+
+ audio_device_ = new WebRtcAudioDeviceImpl();
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/peer_connection_dependency_factory.h b/chromium/content/renderer/media/webrtc/peer_connection_dependency_factory.h
new file mode 100644
index 00000000000..51699359eb0
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/peer_connection_dependency_factory.h
@@ -0,0 +1,218 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_PEER_CONNECTION_DEPENDENCY_FACTORY_H_
+#define CONTENT_RENDERER_MEDIA_WEBRTC_PEER_CONNECTION_DEPENDENCY_FACTORY_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/files/file.h"
+#include "base/threading/thread.h"
+#include "content/common/content_export.h"
+#include "content/public/renderer/render_process_observer.h"
+#include "content/renderer/media/aec_dump_message_filter.h"
+#include "content/renderer/p2p/socket_dispatcher.h"
+#include "ipc/ipc_platform_file.h"
+#include "third_party/libjingle/source/talk/app/webrtc/peerconnectioninterface.h"
+#include "third_party/libjingle/source/talk/app/webrtc/videosourceinterface.h"
+
+namespace base {
+class WaitableEvent;
+}
+
+namespace talk_base {
+class NetworkManager;
+class PacketSocketFactory;
+class Thread;
+}
+
+namespace blink {
+class WebFrame;
+class WebMediaConstraints;
+class WebMediaStream;
+class WebMediaStreamSource;
+class WebMediaStreamTrack;
+class WebRTCPeerConnectionHandler;
+class WebRTCPeerConnectionHandlerClient;
+}
+
+namespace content {
+
+class IpcNetworkManager;
+class IpcPacketSocketFactory;
+class MediaStreamAudioSource;
+class RTCMediaConstraints;
+class WebAudioCapturerSource;
+class WebRtcAudioCapturer;
+class WebRtcAudioDeviceImpl;
+class WebRtcLocalAudioTrack;
+class WebRtcLoggingHandlerImpl;
+class WebRtcLoggingMessageFilter;
+class WebRtcVideoCapturerAdapter;
+struct StreamDeviceInfo;
+
+// Object factory for RTC PeerConnections.
+class CONTENT_EXPORT PeerConnectionDependencyFactory
+ : NON_EXPORTED_BASE(public base::NonThreadSafe),
+ NON_EXPORTED_BASE(public AecDumpMessageFilter::AecDumpDelegate) {
+ public:
+ PeerConnectionDependencyFactory(
+ P2PSocketDispatcher* p2p_socket_dispatcher);
+ virtual ~PeerConnectionDependencyFactory();
+
+ // Create a RTCPeerConnectionHandler object that implements the
+ // WebKit WebRTCPeerConnectionHandler interface.
+ blink::WebRTCPeerConnectionHandler* CreateRTCPeerConnectionHandler(
+ blink::WebRTCPeerConnectionHandlerClient* client);
+
+ // Asks the PeerConnection factory to create a Local MediaStream object.
+ virtual scoped_refptr<webrtc::MediaStreamInterface>
+ CreateLocalMediaStream(const std::string& label);
+
+ // InitializeMediaStreamAudioSource initialize a MediaStream source object
+ // for audio input.
+ bool InitializeMediaStreamAudioSource(
+ int render_view_id,
+ const blink::WebMediaConstraints& audio_constraints,
+ MediaStreamAudioSource* source_data);
+
+ // Creates an implementation of a cricket::VideoCapturer object that can be
+ // used when creating a libjingle webrtc::VideoSourceInterface object.
+ virtual WebRtcVideoCapturerAdapter* CreateVideoCapturer(
+ bool is_screen_capture);
+
+ // Create an instance of WebRtcLocalAudioTrack and store it
+ // in the extraData field of |track|.
+ void CreateLocalAudioTrack(const blink::WebMediaStreamTrack& track);
+
+ // Asks the PeerConnection factory to create a Local VideoTrack object.
+ virtual scoped_refptr<webrtc::VideoTrackInterface>
+ CreateLocalVideoTrack(const std::string& id,
+ webrtc::VideoSourceInterface* source);
+
+ // Asks the PeerConnection factory to create a Video Source.
+ // The video source takes ownership of |capturer|.
+ virtual scoped_refptr<webrtc::VideoSourceInterface>
+ CreateVideoSource(cricket::VideoCapturer* capturer,
+ const blink::WebMediaConstraints& constraints);
+
+ // Asks the libjingle PeerConnection factory to create a libjingle
+ // PeerConnection object.
+ // The PeerConnection object is owned by PeerConnectionHandler.
+ virtual scoped_refptr<webrtc::PeerConnectionInterface>
+ CreatePeerConnection(
+ const webrtc::PeerConnectionInterface::IceServers& ice_servers,
+ const webrtc::MediaConstraintsInterface* constraints,
+ blink::WebFrame* web_frame,
+ webrtc::PeerConnectionObserver* observer);
+
+ // Creates a libjingle representation of a Session description. Used by a
+ // RTCPeerConnectionHandler instance.
+ virtual webrtc::SessionDescriptionInterface* CreateSessionDescription(
+ const std::string& type,
+ const std::string& sdp,
+ webrtc::SdpParseError* error);
+
+ // Creates a libjingle representation of an ice candidate.
+ virtual webrtc::IceCandidateInterface* CreateIceCandidate(
+ const std::string& sdp_mid,
+ int sdp_mline_index,
+ const std::string& sdp);
+
+ WebRtcAudioDeviceImpl* GetWebRtcAudioDevice();
+
+ static void AddNativeAudioTrackToBlinkTrack(
+ webrtc::MediaStreamTrackInterface* native_track,
+ const blink::WebMediaStreamTrack& webkit_track,
+ bool is_local_track);
+
+ scoped_refptr<base::MessageLoopProxy> GetWebRtcWorkerThread() const;
+
+ // AecDumpMessageFilter::AecDumpDelegate implementation.
+ // TODO(xians): Remove when option to disable audio track processing is
+ // removed.
+ virtual void OnAecDumpFile(
+ const IPC::PlatformFileForTransit& file_handle) OVERRIDE;
+ virtual void OnDisableAecDump() OVERRIDE;
+ virtual void OnIpcClosing() OVERRIDE;
+
+ protected:
+ // Asks the PeerConnection factory to create a Local Audio Source.
+ virtual scoped_refptr<webrtc::AudioSourceInterface>
+ CreateLocalAudioSource(
+ const webrtc::MediaConstraintsInterface* constraints);
+
+ // Creates a media::AudioCapturerSource with an implementation that is
+ // specific for a WebAudio source. The created WebAudioCapturerSource
+ // instance will function as audio source instead of the default
+ // WebRtcAudioCapturer.
+ virtual scoped_refptr<WebAudioCapturerSource> CreateWebAudioSource(
+ blink::WebMediaStreamSource* source);
+
+ // Asks the PeerConnection factory to create a Local VideoTrack object with
+ // the video source using |capturer|.
+ virtual scoped_refptr<webrtc::VideoTrackInterface>
+ CreateLocalVideoTrack(const std::string& id,
+ cricket::VideoCapturer* capturer);
+
+ virtual const scoped_refptr<webrtc::PeerConnectionFactoryInterface>&
+ GetPcFactory();
+ virtual bool PeerConnectionFactoryCreated();
+
+ // Returns a new capturer or existing capturer based on the |render_view_id|
+ // and |device_info|. When the |render_view_id| and |device_info| are valid,
+ // it reuses existing capture if any; otherwise it creates a new capturer.
+ virtual scoped_refptr<WebRtcAudioCapturer> CreateAudioCapturer(
+ int render_view_id, const StreamDeviceInfo& device_info,
+ const blink::WebMediaConstraints& constraints,
+ MediaStreamAudioSource* audio_source);
+
+ // Adds the audio device as a sink to the audio track and starts the local
+ // audio track. This is virtual for test purposes since no real audio device
+ // exist in unit tests.
+ virtual void StartLocalAudioTrack(WebRtcLocalAudioTrack* audio_track);
+
+ private:
+ // Creates |pc_factory_|, which in turn is used for
+ // creating PeerConnection objects.
+ void CreatePeerConnectionFactory();
+
+ void InitializeWorkerThread(talk_base::Thread** thread,
+ base::WaitableEvent* event);
+
+ void CreateIpcNetworkManagerOnWorkerThread(base::WaitableEvent* event);
+ void DeleteIpcNetworkManager();
+ void CleanupPeerConnectionFactory();
+
+ // Helper method to create a WebRtcAudioDeviceImpl.
+ void EnsureWebRtcAudioDeviceImpl();
+
+ // We own network_manager_, must be deleted on the worker thread.
+ // The network manager uses |p2p_socket_dispatcher_|.
+ IpcNetworkManager* network_manager_;
+ scoped_ptr<IpcPacketSocketFactory> socket_factory_;
+
+ scoped_refptr<webrtc::PeerConnectionFactoryInterface> pc_factory_;
+
+ scoped_refptr<P2PSocketDispatcher> p2p_socket_dispatcher_;
+ scoped_refptr<WebRtcAudioDeviceImpl> audio_device_;
+
+ // This is only used if audio track processing is disabled.
+ // TODO(xians): Remove when option to disable audio track processing is
+ // removed.
+ scoped_refptr<AecDumpMessageFilter> aec_dump_message_filter_;
+
+ // PeerConnection threads. signaling_thread_ is created from the
+ // "current" chrome thread.
+ talk_base::Thread* signaling_thread_;
+ talk_base::Thread* worker_thread_;
+ base::Thread chrome_worker_thread_;
+
+ DISALLOW_COPY_AND_ASSIGN(PeerConnectionDependencyFactory);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_WEBRTC_PEER_CONNECTION_DEPENDENCY_FACTORY_H_
diff --git a/chromium/content/renderer/media/webrtc/peer_connection_dependency_factory_unittest.cc b/chromium/content/renderer/media/webrtc/peer_connection_dependency_factory_unittest.cc
new file mode 100644
index 00000000000..72f564e40ad
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/peer_connection_dependency_factory_unittest.cc
@@ -0,0 +1,29 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/mock_web_rtc_peer_connection_handler_client.h"
+#include "content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/WebKit/public/platform/WebRTCPeerConnectionHandler.h"
+
+namespace content {
+
+class PeerConnectionDependencyFactoryTest : public ::testing::Test {
+ public:
+ virtual void SetUp() {
+ dependency_factory_.reset(new MockPeerConnectionDependencyFactory());
+ }
+
+ protected:
+ scoped_ptr<MockPeerConnectionDependencyFactory> dependency_factory_;
+};
+
+TEST_F(PeerConnectionDependencyFactoryTest, CreateRTCPeerConnectionHandler) {
+ MockWebRTCPeerConnectionHandlerClient client_jsep;
+ scoped_ptr<blink::WebRTCPeerConnectionHandler> pc_handler(
+ dependency_factory_->CreateRTCPeerConnectionHandler(&client_jsep));
+ EXPECT_TRUE(pc_handler.get() != NULL);
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/video_destination_handler.cc b/chromium/content/renderer/media/webrtc/video_destination_handler.cc
new file mode 100644
index 00000000000..84594bbaff5
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/video_destination_handler.cc
@@ -0,0 +1,235 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/webrtc/video_destination_handler.h"
+
+#include <string>
+
+#include "base/base64.h"
+#include "base/logging.h"
+#include "base/rand_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "content/renderer/media/media_stream.h"
+#include "content/renderer/media/media_stream_registry_interface.h"
+#include "content/renderer/media/media_stream_video_track.h"
+#include "content/renderer/pepper/ppb_image_data_impl.h"
+#include "content/renderer/render_thread_impl.h"
+#include "media/video/capture/video_capture_types.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
+#include "third_party/WebKit/public/platform/WebURL.h"
+#include "third_party/WebKit/public/web/WebMediaStreamRegistry.h"
+#include "third_party/libyuv/include/libyuv/convert.h"
+#include "url/gurl.h"
+
+namespace content {
+
+class PpFrameWriter::FrameWriterDelegate
+ : public base::RefCountedThreadSafe<FrameWriterDelegate> {
+ public:
+ FrameWriterDelegate(
+ const scoped_refptr<base::MessageLoopProxy>& io_message_loop_proxy,
+ const VideoCaptureDeliverFrameCB& new_frame_callback);
+
+ void DeliverFrame(const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format);
+ private:
+ friend class base::RefCountedThreadSafe<FrameWriterDelegate>;
+ virtual ~FrameWriterDelegate();
+
+ void DeliverFrameOnIO(const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format);
+
+ scoped_refptr<base::MessageLoopProxy> io_message_loop_;
+ VideoCaptureDeliverFrameCB new_frame_callback_;
+};
+
+PpFrameWriter::FrameWriterDelegate::FrameWriterDelegate(
+ const scoped_refptr<base::MessageLoopProxy>& io_message_loop_proxy,
+ const VideoCaptureDeliverFrameCB& new_frame_callback)
+ : io_message_loop_(io_message_loop_proxy),
+ new_frame_callback_(new_frame_callback) {
+}
+
+PpFrameWriter::FrameWriterDelegate::~FrameWriterDelegate() {
+}
+
+void PpFrameWriter::FrameWriterDelegate::DeliverFrame(
+ const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format) {
+ io_message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&FrameWriterDelegate::DeliverFrameOnIO,
+ this, frame, format));
+}
+
+void PpFrameWriter::FrameWriterDelegate::DeliverFrameOnIO(
+ const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format) {
+ DCHECK(io_message_loop_->BelongsToCurrentThread());
+ // The local time when this frame is generated is unknown so give a null
+ // value to |estimated_capture_time|.
+ new_frame_callback_.Run(frame, format, base::TimeTicks());
+}
+
+PpFrameWriter::PpFrameWriter() {
+ DVLOG(3) << "PpFrameWriter ctor";
+}
+
+PpFrameWriter::~PpFrameWriter() {
+ DVLOG(3) << "PpFrameWriter dtor";
+}
+
+void PpFrameWriter::GetCurrentSupportedFormats(
+ int max_requested_width,
+ int max_requested_height,
+ const VideoCaptureDeviceFormatsCB& callback) {
+ DCHECK(CalledOnValidThread());
+ DVLOG(3) << "PpFrameWriter::GetCurrentSupportedFormats()";
+ // Since the input is free to change the resolution at any point in time
+ // the supported formats are unknown.
+ media::VideoCaptureFormats formats;
+ callback.Run(formats);
+}
+
+void PpFrameWriter::StartSourceImpl(
+ const media::VideoCaptureParams& params,
+ const VideoCaptureDeliverFrameCB& frame_callback) {
+ DCHECK(CalledOnValidThread());
+ DCHECK(!delegate_);
+ DVLOG(3) << "PpFrameWriter::StartSourceImpl()";
+ delegate_ = new FrameWriterDelegate(io_message_loop(), frame_callback);
+ OnStartDone(true);
+}
+
+void PpFrameWriter::StopSourceImpl() {
+ DCHECK(CalledOnValidThread());
+}
+
+void PpFrameWriter::PutFrame(PPB_ImageData_Impl* image_data,
+ int64 time_stamp_ns) {
+ DCHECK(CalledOnValidThread());
+ DVLOG(3) << "PpFrameWriter::PutFrame()";
+
+ if (!image_data) {
+ LOG(ERROR) << "PpFrameWriter::PutFrame - Called with NULL image_data.";
+ return;
+ }
+ ImageDataAutoMapper mapper(image_data);
+ if (!mapper.is_valid()) {
+ LOG(ERROR) << "PpFrameWriter::PutFrame - "
+ << "The image could not be mapped and is unusable.";
+ return;
+ }
+ const SkBitmap* bitmap = image_data->GetMappedBitmap();
+ if (!bitmap) {
+ LOG(ERROR) << "PpFrameWriter::PutFrame - "
+ << "The image_data's mapped bitmap is NULL.";
+ return;
+ }
+
+ const gfx::Size frame_size(bitmap->width(), bitmap->height());
+
+ if (state() != MediaStreamVideoSource::STARTED)
+ return;
+
+ const base::TimeDelta timestamp = base::TimeDelta::FromMicroseconds(
+ time_stamp_ns / base::Time::kNanosecondsPerMicrosecond);
+
+ // TODO(perkj): It would be more efficient to use I420 here. Using YV12 will
+ // force a copy into a tightly packed I420 frame in
+ // WebRtcVideoCapturerAdapter before the frame is delivered to libJingle.
+ // crbug/359587.
+ scoped_refptr<media::VideoFrame> new_frame =
+ frame_pool_.CreateFrame(media::VideoFrame::YV12, frame_size,
+ gfx::Rect(frame_size), frame_size, timestamp);
+ media::VideoCaptureFormat format(
+ frame_size,
+ MediaStreamVideoSource::kDefaultFrameRate,
+ media::PIXEL_FORMAT_YV12);
+
+ libyuv::BGRAToI420(reinterpret_cast<uint8*>(bitmap->getPixels()),
+ bitmap->rowBytes(),
+ new_frame->data(media::VideoFrame::kYPlane),
+ new_frame->stride(media::VideoFrame::kYPlane),
+ new_frame->data(media::VideoFrame::kUPlane),
+ new_frame->stride(media::VideoFrame::kUPlane),
+ new_frame->data(media::VideoFrame::kVPlane),
+ new_frame->stride(media::VideoFrame::kVPlane),
+ frame_size.width(), frame_size.height());
+
+ delegate_->DeliverFrame(new_frame, format);
+}
+
+// PpFrameWriterProxy is a helper class to make sure the user won't use
+// PpFrameWriter after it is released (IOW its owner - WebMediaStreamSource -
+// is released).
+class PpFrameWriterProxy : public FrameWriterInterface {
+ public:
+ explicit PpFrameWriterProxy(const base::WeakPtr<PpFrameWriter>& writer)
+ : writer_(writer) {
+ DCHECK(writer_ != NULL);
+ }
+
+ virtual ~PpFrameWriterProxy() {}
+
+ virtual void PutFrame(PPB_ImageData_Impl* image_data,
+ int64 time_stamp_ns) OVERRIDE {
+ writer_->PutFrame(image_data, time_stamp_ns);
+ }
+
+ private:
+ base::WeakPtr<PpFrameWriter> writer_;
+
+ DISALLOW_COPY_AND_ASSIGN(PpFrameWriterProxy);
+};
+
+bool VideoDestinationHandler::Open(
+ MediaStreamRegistryInterface* registry,
+ const std::string& url,
+ FrameWriterInterface** frame_writer) {
+ DVLOG(3) << "VideoDestinationHandler::Open";
+ blink::WebMediaStream stream;
+ if (registry) {
+ stream = registry->GetMediaStream(url);
+ } else {
+ stream =
+ blink::WebMediaStreamRegistry::lookupMediaStreamDescriptor(GURL(url));
+ }
+ if (stream.isNull()) {
+ LOG(ERROR) << "VideoDestinationHandler::Open - invalid url: " << url;
+ return false;
+ }
+
+ // Create a new native video track and add it to |stream|.
+ std::string track_id;
+ // According to spec, a media stream source's id should be unique per
+ // application. There's no easy way to strictly achieve that. The id
+ // generated with this method should be unique for most of the cases but
+ // theoretically it's possible we can get an id that's duplicated with the
+ // existing sources.
+ base::Base64Encode(base::RandBytesAsString(64), &track_id);
+
+ PpFrameWriter* writer = new PpFrameWriter();
+
+ // Create a new webkit video track.
+ blink::WebMediaStreamSource webkit_source;
+ blink::WebMediaStreamSource::Type type =
+ blink::WebMediaStreamSource::TypeVideo;
+ blink::WebString webkit_track_id = base::UTF8ToUTF16(track_id);
+ webkit_source.initialize(webkit_track_id, type, webkit_track_id);
+ webkit_source.setExtraData(writer);
+
+ blink::WebMediaConstraints constraints;
+ constraints.initialize();
+ bool track_enabled = true;
+
+ stream.addTrack(MediaStreamVideoTrack::CreateVideoTrack(
+ writer, constraints, MediaStreamVideoSource::ConstraintsCallback(),
+ track_enabled));
+
+ *frame_writer = new PpFrameWriterProxy(writer->AsWeakPtr());
+ return true;
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/video_destination_handler.h b/chromium/content/renderer/media/webrtc/video_destination_handler.h
index 578fe223202..e84fdd603df 100644
--- a/chromium/content/renderer/media/video_destination_handler.h
+++ b/chromium/content/renderer/media/webrtc/video_destination_handler.h
@@ -2,20 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef CONTENT_RENDERER_MEDIA_VIDEO_DESTINATION_HANDLER_H_
-#define CONTENT_RENDERER_MEDIA_VIDEO_DESTINATION_HANDLER_H_
+#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_VIDEO_DESTINATION_HANDLER_H_
+#define CONTENT_RENDERER_MEDIA_WEBRTC_VIDEO_DESTINATION_HANDLER_H_
#include <string>
#include <vector>
#include "base/compiler_specific.h"
-#include "base/synchronization/lock.h"
+#include "base/memory/weak_ptr.h"
#include "content/common/content_export.h"
-#include "third_party/libjingle/source/talk/media/base/videocapturer.h"
+#include "content/renderer/media/media_stream_video_source.h"
+#include "media/base/video_frame_pool.h"
namespace content {
-class MediaStreamDependencyFactory;
+class PeerConnectionDependencyFactory;
class MediaStreamRegistryInterface;
class PPB_ImageData_Impl;
@@ -30,57 +31,54 @@ class CONTENT_EXPORT FrameWriterInterface {
virtual ~FrameWriterInterface() {}
};
-// PpFrameWriter implements cricket::VideoCapturer so that it can be used in
-// the native video track's video source. It also implements
+// PpFrameWriter implements MediaStreamVideoSource and can therefore provide
+// video frames to MediaStreamVideoTracks. It also implements
// FrameWriterInterface, which will be used by the effects pepper plugin to
// inject the processed frame.
class CONTENT_EXPORT PpFrameWriter
- : public NON_EXPORTED_BASE(cricket::VideoCapturer),
- public FrameWriterInterface {
+ : NON_EXPORTED_BASE(public MediaStreamVideoSource),
+ public FrameWriterInterface,
+ NON_EXPORTED_BASE(public base::SupportsWeakPtr<PpFrameWriter>) {
public:
PpFrameWriter();
virtual ~PpFrameWriter();
- // cricket::VideoCapturer implementation.
- // These methods are accessed from a libJingle worker thread.
- virtual cricket::CaptureState Start(
- const cricket::VideoFormat& capture_format) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual bool IsRunning() OVERRIDE;
- virtual bool GetPreferredFourccs(std::vector<uint32>* fourccs) OVERRIDE;
- virtual bool GetBestCaptureFormat(const cricket::VideoFormat& desired,
- cricket::VideoFormat* best_format) OVERRIDE;
- virtual bool IsScreencast() const OVERRIDE;
-
// FrameWriterInterface implementation.
// This method will be called by the Pepper host from render thread.
virtual void PutFrame(PPB_ImageData_Impl* image_data,
int64 time_stamp_ns) OVERRIDE;
+ protected:
+ // MediaStreamVideoSource implementation.
+ virtual void GetCurrentSupportedFormats(
+ int max_requested_width,
+ int max_requested_height,
+ const VideoCaptureDeviceFormatsCB& callback) OVERRIDE;
+ virtual void StartSourceImpl(
+ const media::VideoCaptureParams& params,
+ const VideoCaptureDeliverFrameCB& frame_callback) OVERRIDE;
+ virtual void StopSourceImpl() OVERRIDE;
private:
- bool started_;
- // |lock_| is used to protect |started_| which will be accessed from different
- // threads - libjingle worker thread and render thread.
- base::Lock lock_;
+ media::VideoFramePool frame_pool_;
+
+ class FrameWriterDelegate;
+ scoped_refptr<FrameWriterDelegate> delegate_;
DISALLOW_COPY_AND_ASSIGN(PpFrameWriter);
};
-// VideoDestinationHandler is a glue class between the webrtc MediaStream and
+// VideoDestinationHandler is a glue class between the content MediaStream and
// the effects pepper plugin host.
class CONTENT_EXPORT VideoDestinationHandler {
public:
// Instantiates and adds a new video track to the MediaStream specified by
// |url|. Returns a handler for delivering frames to the new video track as
// |frame_writer|.
- // If |factory| is NULL the MediaStreamDependencyFactory owned by
- // RenderThreadImpl::current() will be used.
// If |registry| is NULL the global blink::WebMediaStreamRegistry will be
// used to look up the media stream.
// The caller of the function takes the ownership of |frame_writer|.
// Returns true on success and false on failure.
- static bool Open(MediaStreamDependencyFactory* factory,
- MediaStreamRegistryInterface* registry,
+ static bool Open(MediaStreamRegistryInterface* registry,
const std::string& url,
FrameWriterInterface** frame_writer);
diff --git a/chromium/content/renderer/media/webrtc/video_destination_handler_unittest.cc b/chromium/content/renderer/media/webrtc/video_destination_handler_unittest.cc
new file mode 100644
index 00000000000..49e9cf7ebcd
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/video_destination_handler_unittest.cc
@@ -0,0 +1,109 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/strings/utf_string_conversions.h"
+#include "content/child/child_process.h"
+#include "content/renderer/media/media_stream.h"
+#include "content/renderer/media/media_stream_video_track.h"
+#include "content/renderer/media/mock_media_stream_registry.h"
+#include "content/renderer/media/mock_media_stream_video_sink.h"
+#include "content/renderer/media/webrtc/video_destination_handler.h"
+#include "content/renderer/pepper/pepper_plugin_instance_impl.h"
+#include "content/renderer/pepper/ppb_image_data_impl.h"
+#include "content/test/ppapi_unittest.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
+#include "third_party/WebKit/public/platform/WebString.h"
+
+using ::testing::_;
+
+namespace content {
+
+ACTION_P(RunClosure, closure) {
+ closure.Run();
+}
+
+static const std::string kTestStreamUrl = "stream_url";
+static const std::string kUnknownStreamUrl = "unknown_stream_url";
+
+class VideoDestinationHandlerTest : public PpapiUnittest {
+ public:
+ VideoDestinationHandlerTest()
+ : child_process_(new ChildProcess()),
+ registry_(new MockMediaStreamRegistry()) {
+ registry_->Init(kTestStreamUrl);
+ }
+
+ virtual void TearDown() {
+ registry_.reset();
+ PpapiUnittest::TearDown();
+ }
+
+ base::MessageLoop* io_message_loop() const {
+ return child_process_->io_message_loop();
+ }
+
+ protected:
+ scoped_ptr<ChildProcess> child_process_;
+ scoped_ptr<MockMediaStreamRegistry> registry_;
+};
+
+TEST_F(VideoDestinationHandlerTest, Open) {
+ FrameWriterInterface* frame_writer = NULL;
+ // Unknow url will return false.
+ EXPECT_FALSE(VideoDestinationHandler::Open(registry_.get(),
+ kUnknownStreamUrl, &frame_writer));
+ EXPECT_TRUE(VideoDestinationHandler::Open(registry_.get(),
+ kTestStreamUrl, &frame_writer));
+ // The |frame_writer| is a proxy and is owned by whoever call Open.
+ delete frame_writer;
+}
+
+TEST_F(VideoDestinationHandlerTest, PutFrame) {
+ FrameWriterInterface* frame_writer = NULL;
+ EXPECT_TRUE(VideoDestinationHandler::Open(registry_.get(),
+ kTestStreamUrl, &frame_writer));
+ ASSERT_TRUE(frame_writer);
+
+ // Verify the video track has been added.
+ const blink::WebMediaStream test_stream = registry_->test_stream();
+ blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
+ test_stream.videoTracks(video_tracks);
+ ASSERT_EQ(1u, video_tracks.size());
+
+ // Verify the native video track has been added.
+ MediaStreamVideoTrack* native_track =
+ MediaStreamVideoTrack::GetVideoTrack(video_tracks[0]);
+ ASSERT_TRUE(native_track != NULL);
+
+ MockMediaStreamVideoSink sink;
+ native_track->AddSink(&sink, sink.GetDeliverFrameCB());
+ scoped_refptr<PPB_ImageData_Impl> image(
+ new PPB_ImageData_Impl(instance()->pp_instance(),
+ PPB_ImageData_Impl::ForTest()));
+ image->Init(PP_IMAGEDATAFORMAT_BGRA_PREMUL, 640, 360, true);
+ {
+ base::RunLoop run_loop;
+ base::Closure quit_closure = run_loop.QuitClosure();
+
+ EXPECT_CALL(sink, OnVideoFrame()).WillOnce(
+ RunClosure(quit_closure));
+ frame_writer->PutFrame(image, 10);
+ run_loop.Run();
+ }
+ // TODO(perkj): Verify that the track output I420 when
+ // https://codereview.chromium.org/213423006/ is landed.
+ EXPECT_EQ(1, sink.number_of_frames());
+ native_track->RemoveSink(&sink);
+
+ // The |frame_writer| is a proxy and is owned by whoever call Open.
+ delete frame_writer;
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/webrtc_audio_sink_adapter.cc b/chromium/content/renderer/media/webrtc/webrtc_audio_sink_adapter.cc
new file mode 100644
index 00000000000..593f4d95777
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/webrtc_audio_sink_adapter.cc
@@ -0,0 +1,40 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "content/renderer/media/webrtc/webrtc_audio_sink_adapter.h"
+#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
+
+namespace content {
+
+WebRtcAudioSinkAdapter::WebRtcAudioSinkAdapter(
+ webrtc::AudioTrackSinkInterface* sink)
+ : sink_(sink) {
+ DCHECK(sink);
+}
+
+WebRtcAudioSinkAdapter::~WebRtcAudioSinkAdapter() {
+}
+
+bool WebRtcAudioSinkAdapter::IsEqual(
+ const webrtc::AudioTrackSinkInterface* other) const {
+ return (other == sink_);
+}
+
+void WebRtcAudioSinkAdapter::OnData(const int16* audio_data,
+ int sample_rate,
+ int number_of_channels,
+ int number_of_frames) {
+ sink_->OnData(audio_data, 16, sample_rate, number_of_channels,
+ number_of_frames);
+}
+
+void WebRtcAudioSinkAdapter::OnSetFormat(
+ const media::AudioParameters& params) {
+ // No need to forward the OnSetFormat() callback to
+ // webrtc::AudioTrackSinkInterface sink since the sink will handle the
+ // format change in OnData().
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/webrtc_audio_sink_adapter.h b/chromium/content/renderer/media/webrtc/webrtc_audio_sink_adapter.h
new file mode 100644
index 00000000000..c1be09a3dac
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/webrtc_audio_sink_adapter.h
@@ -0,0 +1,45 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_AUDIO_SINK_ADAPTER_H_
+#define CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_AUDIO_SINK_ADAPTER_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "content/public/renderer/media_stream_audio_sink.h"
+
+namespace webrtc {
+class AudioTrackSinkInterface;
+} // namespace webrtc
+
+namespace content {
+
+// Adapter to the webrtc::AudioTrackSinkInterface of the audio track.
+// This class is used in between the MediaStreamAudioSink and
+// webrtc::AudioTrackSinkInterface. It gets data callback via the
+// MediaStreamAudioSink::OnData() interface and pass the data to
+// webrtc::AudioTrackSinkInterface.
+class WebRtcAudioSinkAdapter : public MediaStreamAudioSink {
+ public:
+ explicit WebRtcAudioSinkAdapter(
+ webrtc::AudioTrackSinkInterface* sink);
+ virtual ~WebRtcAudioSinkAdapter();
+
+ bool IsEqual(const webrtc::AudioTrackSinkInterface* other) const;
+
+ private:
+ // MediaStreamAudioSink implementation.
+ virtual void OnData(const int16* audio_data,
+ int sample_rate,
+ int number_of_channels,
+ int number_of_frames) OVERRIDE;
+ virtual void OnSetFormat(const media::AudioParameters& params) OVERRIDE;
+
+ webrtc::AudioTrackSinkInterface* const sink_;
+
+ DISALLOW_COPY_AND_ASSIGN(WebRtcAudioSinkAdapter);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_AUDIO_SINK_ADAPTER_H_
diff --git a/chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter.cc b/chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter.cc
new file mode 100644
index 00000000000..d94edb83d47
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter.cc
@@ -0,0 +1,158 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
+
+#include "base/logging.h"
+#include "content/renderer/media/media_stream_audio_processor.h"
+#include "content/renderer/media/webrtc/webrtc_audio_sink_adapter.h"
+#include "content/renderer/media/webrtc_local_audio_track.h"
+#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
+
+namespace content {
+
+static const char kAudioTrackKind[] = "audio";
+
+scoped_refptr<WebRtcLocalAudioTrackAdapter>
+WebRtcLocalAudioTrackAdapter::Create(
+ const std::string& label,
+ webrtc::AudioSourceInterface* track_source) {
+ talk_base::RefCountedObject<WebRtcLocalAudioTrackAdapter>* adapter =
+ new talk_base::RefCountedObject<WebRtcLocalAudioTrackAdapter>(
+ label, track_source);
+ return adapter;
+}
+
+WebRtcLocalAudioTrackAdapter::WebRtcLocalAudioTrackAdapter(
+ const std::string& label,
+ webrtc::AudioSourceInterface* track_source)
+ : webrtc::MediaStreamTrack<webrtc::AudioTrackInterface>(label),
+ owner_(NULL),
+ track_source_(track_source),
+ signal_level_(0) {
+}
+
+WebRtcLocalAudioTrackAdapter::~WebRtcLocalAudioTrackAdapter() {
+}
+
+void WebRtcLocalAudioTrackAdapter::Initialize(WebRtcLocalAudioTrack* owner) {
+ DCHECK(!owner_);
+ DCHECK(owner);
+ owner_ = owner;
+}
+
+void WebRtcLocalAudioTrackAdapter::SetAudioProcessor(
+ const scoped_refptr<MediaStreamAudioProcessor>& processor) {
+ base::AutoLock auto_lock(lock_);
+ audio_processor_ = processor;
+}
+
+std::string WebRtcLocalAudioTrackAdapter::kind() const {
+ return kAudioTrackKind;
+}
+
+void WebRtcLocalAudioTrackAdapter::AddSink(
+ webrtc::AudioTrackSinkInterface* sink) {
+ DCHECK(sink);
+#ifndef NDEBUG
+ // Verify that |sink| has not been added.
+ for (ScopedVector<WebRtcAudioSinkAdapter>::const_iterator it =
+ sink_adapters_.begin();
+ it != sink_adapters_.end(); ++it) {
+ DCHECK(!(*it)->IsEqual(sink));
+ }
+#endif
+
+ scoped_ptr<WebRtcAudioSinkAdapter> adapter(
+ new WebRtcAudioSinkAdapter(sink));
+ owner_->AddSink(adapter.get());
+ sink_adapters_.push_back(adapter.release());
+}
+
+void WebRtcLocalAudioTrackAdapter::RemoveSink(
+ webrtc::AudioTrackSinkInterface* sink) {
+ DCHECK(sink);
+ for (ScopedVector<WebRtcAudioSinkAdapter>::iterator it =
+ sink_adapters_.begin();
+ it != sink_adapters_.end(); ++it) {
+ if ((*it)->IsEqual(sink)) {
+ owner_->RemoveSink(*it);
+ sink_adapters_.erase(it);
+ return;
+ }
+ }
+}
+
+bool WebRtcLocalAudioTrackAdapter::GetSignalLevel(int* level) {
+ base::AutoLock auto_lock(lock_);
+ // It is required to provide the signal level after audio processing. In
+ // case the audio processing is not enabled for the track, we return
+ // false here in order not to overwrite the value from WebRTC.
+ // TODO(xians): Remove this after we turn on the APM in Chrome by default.
+ // http://crbug/365672 .
+ if (!MediaStreamAudioProcessor::IsAudioTrackProcessingEnabled())
+ return false;
+
+ *level = signal_level_;
+ return true;
+}
+
+talk_base::scoped_refptr<webrtc::AudioProcessorInterface>
+WebRtcLocalAudioTrackAdapter::GetAudioProcessor() {
+ base::AutoLock auto_lock(lock_);
+ return audio_processor_.get();
+}
+
+std::vector<int> WebRtcLocalAudioTrackAdapter::VoeChannels() const {
+ base::AutoLock auto_lock(lock_);
+ return voe_channels_;
+}
+
+void WebRtcLocalAudioTrackAdapter::SetSignalLevel(int signal_level) {
+ base::AutoLock auto_lock(lock_);
+ signal_level_ = signal_level;
+}
+
+void WebRtcLocalAudioTrackAdapter::AddChannel(int channel_id) {
+ DVLOG(1) << "WebRtcLocalAudioTrack::AddChannel(channel_id="
+ << channel_id << ")";
+ base::AutoLock auto_lock(lock_);
+ if (std::find(voe_channels_.begin(), voe_channels_.end(), channel_id) !=
+ voe_channels_.end()) {
+ // We need to handle the case when the same channel is connected to the
+ // track more than once.
+ return;
+ }
+
+ voe_channels_.push_back(channel_id);
+}
+
+void WebRtcLocalAudioTrackAdapter::RemoveChannel(int channel_id) {
+ DVLOG(1) << "WebRtcLocalAudioTrack::RemoveChannel(channel_id="
+ << channel_id << ")";
+ base::AutoLock auto_lock(lock_);
+ std::vector<int>::iterator iter =
+ std::find(voe_channels_.begin(), voe_channels_.end(), channel_id);
+ DCHECK(iter != voe_channels_.end());
+ voe_channels_.erase(iter);
+}
+
+webrtc::AudioSourceInterface* WebRtcLocalAudioTrackAdapter::GetSource() const {
+ return track_source_;
+}
+
+cricket::AudioRenderer* WebRtcLocalAudioTrackAdapter::GetRenderer() {
+ // When the audio track processing is enabled, return a NULL so that capture
+ // data goes through Libjingle LocalAudioTrackHandler::LocalAudioSinkAdapter
+ // ==> WebRtcVoiceMediaChannel::WebRtcVoiceChannelRenderer ==> WebRTC.
+ // When the audio track processing is disabled, WebRtcLocalAudioTrackAdapter
+ // is used to pass the channel ids to WebRtcAudioDeviceImpl, the data flow
+ // becomes WebRtcAudioDeviceImpl ==> WebRTC.
+ // TODO(xians): Only return NULL after the APM in WebRTC is deprecated.
+ // See See http://crbug/365672 for details.
+ return MediaStreamAudioProcessor::IsAudioTrackProcessingEnabled()?
+ NULL : this;
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h b/chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h
new file mode 100644
index 00000000000..b35ad4a01ea
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h
@@ -0,0 +1,108 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_LOCAL_AUDIO_TRACK_ADAPTER_H_
+#define CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_LOCAL_AUDIO_TRACK_ADAPTER_H_
+
+#include <vector>
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_vector.h"
+#include "base/synchronization/lock.h"
+#include "content/common/content_export.h"
+#include "third_party/libjingle/source/talk/app/webrtc/mediastreamtrack.h"
+#include "third_party/libjingle/source/talk/media/base/audiorenderer.h"
+
+namespace cricket {
+class AudioRenderer;
+}
+
+namespace webrtc {
+class AudioSourceInterface;
+class AudioProcessorInterface;
+}
+
+namespace content {
+
+class MediaStreamAudioProcessor;
+class WebRtcAudioSinkAdapter;
+class WebRtcLocalAudioTrack;
+
+class CONTENT_EXPORT WebRtcLocalAudioTrackAdapter
+ : NON_EXPORTED_BASE(public cricket::AudioRenderer),
+ NON_EXPORTED_BASE(
+ public webrtc::MediaStreamTrack<webrtc::AudioTrackInterface>) {
+ public:
+ static scoped_refptr<WebRtcLocalAudioTrackAdapter> Create(
+ const std::string& label,
+ webrtc::AudioSourceInterface* track_source);
+
+ WebRtcLocalAudioTrackAdapter(
+ const std::string& label,
+ webrtc::AudioSourceInterface* track_source);
+
+ virtual ~WebRtcLocalAudioTrackAdapter();
+
+ void Initialize(WebRtcLocalAudioTrack* owner);
+
+ std::vector<int> VoeChannels() const;
+
+ // Called on the audio thread by the WebRtcLocalAudioTrack to set the signal
+ // level of the audio data.
+ void SetSignalLevel(int signal_level);
+
+ // Method called by the WebRtcLocalAudioTrack to set the processor that
+ // applies signal processing on the data of the track.
+ // This class will keep a reference of the |processor|.
+ // Called on the main render thread.
+ void SetAudioProcessor(
+ const scoped_refptr<MediaStreamAudioProcessor>& processor);
+
+ private:
+ // webrtc::MediaStreamTrack implementation.
+ virtual std::string kind() const OVERRIDE;
+
+ // webrtc::AudioTrackInterface implementation.
+ virtual void AddSink(webrtc::AudioTrackSinkInterface* sink) OVERRIDE;
+ virtual void RemoveSink(webrtc::AudioTrackSinkInterface* sink) OVERRIDE;
+ virtual bool GetSignalLevel(int* level) OVERRIDE;
+ virtual talk_base::scoped_refptr<webrtc::AudioProcessorInterface>
+ GetAudioProcessor() OVERRIDE;
+
+ // cricket::AudioCapturer implementation.
+ virtual void AddChannel(int channel_id) OVERRIDE;
+ virtual void RemoveChannel(int channel_id) OVERRIDE;
+
+ // webrtc::AudioTrackInterface implementation.
+ virtual webrtc::AudioSourceInterface* GetSource() const OVERRIDE;
+ virtual cricket::AudioRenderer* GetRenderer() OVERRIDE;
+
+ // Weak reference.
+ WebRtcLocalAudioTrack* owner_;
+
+ // The source of the audio track which handles the audio constraints.
+ // TODO(xians): merge |track_source_| to |capturer_| in WebRtcLocalAudioTrack.
+ talk_base::scoped_refptr<webrtc::AudioSourceInterface> track_source_;
+
+ // The audio processsor that applies audio processing on the data of audio
+ // track.
+ scoped_refptr<MediaStreamAudioProcessor> audio_processor_;
+
+ // A vector of WebRtc VoE channels that the capturer sends data to.
+ std::vector<int> voe_channels_;
+
+ // A vector of the peer connection sink adapters which receive the audio data
+ // from the audio track.
+ ScopedVector<WebRtcAudioSinkAdapter> sink_adapters_;
+
+ // The amplitude of the signal.
+ int signal_level_;
+
+ // Protects |voe_channels_|, |audio_processor_| and |signal_level_|.
+ mutable base::Lock lock_;
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_LOCAL_AUDIO_TRACK_ADAPTER_H_
diff --git a/chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter_unittest.cc b/chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter_unittest.cc
new file mode 100644
index 00000000000..693067ac939
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter_unittest.cc
@@ -0,0 +1,99 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/command_line.h"
+#include "content/public/common/content_switches.h"
+#include "content/renderer/media/mock_media_constraint_factory.h"
+#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
+#include "content/renderer/media/webrtc_local_audio_track.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
+
+using ::testing::_;
+using ::testing::AnyNumber;
+
+namespace content {
+
+namespace {
+
+class MockWebRtcAudioSink : public webrtc::AudioTrackSinkInterface {
+ public:
+ MockWebRtcAudioSink() {}
+ ~MockWebRtcAudioSink() {}
+ MOCK_METHOD5(OnData, void(const void* audio_data,
+ int bits_per_sample,
+ int sample_rate,
+ int number_of_channels,
+ int number_of_frames));
+};
+
+} // namespace
+
+class WebRtcLocalAudioTrackAdapterTest : public ::testing::Test {
+ public:
+ WebRtcLocalAudioTrackAdapterTest()
+ : params_(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ media::CHANNEL_LAYOUT_STEREO, 48000, 16, 480),
+ adapter_(WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL)) {
+ MockMediaConstraintFactory constraint_factory;
+ capturer_ = WebRtcAudioCapturer::CreateCapturer(
+ -1, StreamDeviceInfo(MEDIA_DEVICE_AUDIO_CAPTURE, "", ""),
+ constraint_factory.CreateWebMediaConstraints(), NULL, NULL);
+ track_.reset(new WebRtcLocalAudioTrack(adapter_, capturer_, NULL));
+ }
+
+ protected:
+ virtual void SetUp() OVERRIDE {
+ track_->OnSetFormat(params_);
+ EXPECT_TRUE(track_->GetAudioAdapter()->enabled());
+ }
+
+ media::AudioParameters params_;
+ scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter_;
+ scoped_refptr<WebRtcAudioCapturer> capturer_;
+ scoped_ptr<WebRtcLocalAudioTrack> track_;
+};
+
+// Adds and Removes a WebRtcAudioSink to a local audio track.
+TEST_F(WebRtcLocalAudioTrackAdapterTest, AddAndRemoveSink) {
+ // Add a sink to the webrtc track.
+ scoped_ptr<MockWebRtcAudioSink> sink(new MockWebRtcAudioSink());
+ webrtc::AudioTrackInterface* webrtc_track =
+ static_cast<webrtc::AudioTrackInterface*>(adapter_.get());
+ webrtc_track->AddSink(sink.get());
+
+ // Send a packet via |track_| and it data should reach the sink of the
+ // |adapter_|.
+ const int length = params_.frames_per_buffer() * params_.channels();
+ scoped_ptr<int16[]> data(new int16[length]);
+ // Initialize the data to 0 to avoid Memcheck:Uninitialized warning.
+ memset(data.get(), 0, length * sizeof(data[0]));
+
+ EXPECT_CALL(*sink,
+ OnData(_, 16, params_.sample_rate(), params_.channels(),
+ params_.frames_per_buffer()));
+ track_->Capture(data.get(), base::TimeDelta(), 255, false, false);
+
+ // Remove the sink from the webrtc track.
+ webrtc_track->RemoveSink(sink.get());
+ sink.reset();
+
+ // Verify that no more callback gets into the sink.
+ track_->Capture(data.get(), base::TimeDelta(), 255, false, false);
+}
+
+TEST_F(WebRtcLocalAudioTrackAdapterTest, GetSignalLevel) {
+ webrtc::AudioTrackInterface* webrtc_track =
+ static_cast<webrtc::AudioTrackInterface*>(adapter_.get());
+ int signal_level = 0;
+ EXPECT_TRUE(webrtc_track->GetSignalLevel(&signal_level));
+
+ // Disable the audio processing in the audio track.
+ CommandLine::ForCurrentProcess()->AppendSwitch(
+ switches::kDisableAudioTrackProcessing);
+ EXPECT_FALSE(webrtc_track->GetSignalLevel(&signal_level));
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter.cc b/chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter.cc
new file mode 100644
index 00000000000..0751238daf3
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter.cc
@@ -0,0 +1,111 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/webrtc/webrtc_media_stream_adapter.h"
+
+#include "base/logging.h"
+#include "content/renderer/media/media_stream_audio_source.h"
+#include "content/renderer/media/media_stream_track.h"
+#include "content/renderer/media/webrtc/peer_connection_dependency_factory.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
+#include "third_party/WebKit/public/platform/WebString.h"
+
+namespace content {
+
+WebRtcMediaStreamAdapter::WebRtcMediaStreamAdapter(
+ const blink::WebMediaStream& web_stream,
+ PeerConnectionDependencyFactory* factory)
+ : web_stream_(web_stream),
+ factory_(factory) {
+ webrtc_media_stream_ =
+ factory_->CreateLocalMediaStream(web_stream.id().utf8());
+
+ blink::WebVector<blink::WebMediaStreamTrack> audio_tracks;
+ web_stream_.audioTracks(audio_tracks);
+ for (size_t i = 0; i < audio_tracks.size(); ++i)
+ CreateAudioTrack(audio_tracks[i]);
+
+ blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
+ web_stream_.videoTracks(video_tracks);
+ for (size_t i = 0; i < video_tracks.size(); ++i)
+ CreateVideoTrack(video_tracks[i]);
+
+ MediaStream* native_stream = MediaStream::GetMediaStream(web_stream_);
+ native_stream->AddObserver(this);
+}
+
+WebRtcMediaStreamAdapter::~WebRtcMediaStreamAdapter() {
+ MediaStream* native_stream = MediaStream::GetMediaStream(web_stream_);
+ native_stream->RemoveObserver(this);
+}
+
+void WebRtcMediaStreamAdapter::TrackAdded(
+ const blink::WebMediaStreamTrack& track) {
+ if (track.source().type() == blink::WebMediaStreamSource::TypeAudio) {
+ CreateAudioTrack(track);
+ } else {
+ CreateVideoTrack(track);
+ }
+}
+
+void WebRtcMediaStreamAdapter::TrackRemoved(
+ const blink::WebMediaStreamTrack& track) {
+ const std::string track_id = track.id().utf8();
+ if (track.source().type() == blink::WebMediaStreamSource::TypeAudio) {
+ webrtc_media_stream_->RemoveTrack(
+ webrtc_media_stream_->FindAudioTrack(track_id));
+ } else {
+ DCHECK_EQ(track.source().type(), blink::WebMediaStreamSource::TypeVideo);
+ scoped_refptr<webrtc::VideoTrackInterface> webrtc_track =
+ webrtc_media_stream_->FindVideoTrack(track_id).get();
+ webrtc_media_stream_->RemoveTrack(webrtc_track.get());
+
+ for (ScopedVector<WebRtcVideoTrackAdapter>::iterator it =
+ video_adapters_.begin(); it != video_adapters_.end(); ++it) {
+ if ((*it)->webrtc_video_track() == webrtc_track) {
+ video_adapters_.erase(it);
+ break;
+ }
+ }
+ }
+}
+
+void WebRtcMediaStreamAdapter::CreateAudioTrack(
+ const blink::WebMediaStreamTrack& track) {
+ DCHECK_EQ(track.source().type(), blink::WebMediaStreamSource::TypeAudio);
+ // A media stream is connected to a peer connection, enable the
+ // peer connection mode for the sources.
+ MediaStreamTrack* native_track = MediaStreamTrack::GetTrack(track);
+ if (!native_track || !native_track->is_local_track()) {
+ // We don't support connecting remote audio tracks to PeerConnection yet.
+ // See issue http://crbug/344303.
+ // TODO(xians): Remove this after we support connecting remote audio track
+ // to PeerConnection.
+ DLOG(ERROR) << "webrtc audio track can not be created from a remote audio"
+ << " track.";
+ NOTIMPLEMENTED();
+ return;
+ }
+
+ // This is a local audio track.
+ const blink::WebMediaStreamSource& source = track.source();
+ MediaStreamAudioSource* audio_source =
+ static_cast<MediaStreamAudioSource*>(source.extraData());
+ if (audio_source && audio_source->GetAudioCapturer())
+ audio_source->GetAudioCapturer()->EnablePeerConnectionMode();
+
+ webrtc_media_stream_->AddTrack(native_track->GetAudioAdapter());
+}
+
+void WebRtcMediaStreamAdapter::CreateVideoTrack(
+ const blink::WebMediaStreamTrack& track) {
+ DCHECK_EQ(track.source().type(), blink::WebMediaStreamSource::TypeVideo);
+ WebRtcVideoTrackAdapter* adapter =
+ new WebRtcVideoTrackAdapter(track, factory_);
+ video_adapters_.push_back(adapter);
+ webrtc_media_stream_->AddTrack(adapter->webrtc_video_track());
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter.h b/chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter.h
new file mode 100644
index 00000000000..d5983224ed3
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter.h
@@ -0,0 +1,67 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_MEDIA_STREAM_ADAPTER_H_
+#define CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_MEDIA_STREAM_ADAPTER_H_
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_vector.h"
+#include "content/common/content_export.h"
+#include "content/renderer/media/media_stream.h"
+#include "content/renderer/media/webrtc/webrtc_video_track_adapter.h"
+#include "third_party/WebKit/public/platform/WebMediaStream.h"
+#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
+
+namespace content {
+
+class PeerConnectionDependencyFactory;
+
+// WebRtcMediaStreamAdapter is an adapter between a blink::WebMediaStream
+// object and a webrtc MediaStreams that is currently sent on a PeerConnection.
+// The responsibility of the class is to create and own a representation of a
+// webrtc MediaStream that can be added and removed from a RTCPeerConnection.
+// An instance of WebRtcMediaStreamAdapter is created when a MediaStream is
+// added to an RTCPeerConnection object
+// Instances of this class is owned by the RTCPeerConnectionHandler object that
+// created it.
+class CONTENT_EXPORT WebRtcMediaStreamAdapter
+ : NON_EXPORTED_BASE(public MediaStreamObserver) {
+ public:
+ WebRtcMediaStreamAdapter(const blink::WebMediaStream& web_stream,
+ PeerConnectionDependencyFactory* factory);
+ virtual ~WebRtcMediaStreamAdapter();
+
+ bool IsEqual(const blink::WebMediaStream& web_stream) {
+ return web_stream_.extraData() == web_stream.extraData();
+ }
+
+ webrtc::MediaStreamInterface* webrtc_media_stream() {
+ return webrtc_media_stream_.get();
+ }
+
+ protected:
+ // MediaStreamObserver implementation.
+ virtual void TrackAdded(const blink::WebMediaStreamTrack& track) OVERRIDE;
+ virtual void TrackRemoved(const blink::WebMediaStreamTrack& track) OVERRIDE;
+
+ private:
+ void CreateAudioTrack(const blink::WebMediaStreamTrack& track);
+ void CreateVideoTrack(const blink::WebMediaStreamTrack& track);
+
+ blink::WebMediaStream web_stream_;
+
+ // Pointer to a PeerConnectionDependencyFactory, owned by the RenderThread.
+ // It's valid for the lifetime of RenderThread.
+ PeerConnectionDependencyFactory* factory_;
+
+ scoped_refptr<webrtc::MediaStreamInterface> webrtc_media_stream_;
+ ScopedVector<WebRtcVideoTrackAdapter> video_adapters_;
+
+ DISALLOW_COPY_AND_ASSIGN (WebRtcMediaStreamAdapter);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_MEDIA_STREAM_ADAPTER_H_
diff --git a/chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter_unittest.cc b/chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter_unittest.cc
new file mode 100644
index 00000000000..a5eced1d87a
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter_unittest.cc
@@ -0,0 +1,155 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "content/child/child_process.h"
+#include "content/renderer/media/media_stream.h"
+#include "content/renderer/media/media_stream_audio_source.h"
+#include "content/renderer/media/media_stream_video_source.h"
+#include "content/renderer/media/media_stream_video_track.h"
+#include "content/renderer/media/mock_media_stream_video_source.h"
+#include "content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h"
+#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
+#include "content/renderer/media/webrtc/webrtc_media_stream_adapter.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/WebKit/public/platform/WebMediaStream.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
+#include "third_party/WebKit/public/platform/WebVector.h"
+
+namespace content {
+
+class WebRtcMediaStreamAdapterTest : public ::testing::Test {
+ public:
+ virtual void SetUp() {
+ child_process_.reset(new ChildProcess());
+ dependency_factory_.reset(new MockPeerConnectionDependencyFactory());
+ }
+
+ blink::WebMediaStream CreateBlinkMediaStream(bool audio, bool video) {
+ blink::WebVector<blink::WebMediaStreamTrack> audio_track_vector(
+ audio ? static_cast<size_t>(1) : 0);
+ if (audio) {
+ blink::WebMediaStreamSource audio_source;
+ audio_source.initialize("audio",
+ blink::WebMediaStreamSource::TypeAudio,
+ "audio");
+ audio_source.setExtraData(new MediaStreamAudioSource());
+
+ audio_track_vector[0].initialize(audio_source);
+ MediaStreamTrack* native_track =
+ new MediaStreamTrack(
+ WebRtcLocalAudioTrackAdapter::Create(
+ audio_track_vector[0].id().utf8(), NULL),
+ true);
+ audio_track_vector[0].setExtraData(native_track);
+ }
+
+ blink::WebVector<blink::WebMediaStreamTrack> video_track_vector(
+ video ? static_cast<size_t>(1) : 0);
+ MediaStreamSource::SourceStoppedCallback dummy_callback;
+ if (video) {
+ blink::WebMediaStreamSource video_source;
+ video_source.initialize("video",
+ blink::WebMediaStreamSource::TypeVideo,
+ "video");
+ MediaStreamVideoSource* native_source =
+ new MockMediaStreamVideoSource(false);
+ video_source.setExtraData(native_source);
+ blink::WebMediaConstraints constraints;
+ constraints.initialize();
+ video_track_vector[0] = MediaStreamVideoTrack::CreateVideoTrack(
+ native_source, constraints,
+ MediaStreamVideoSource::ConstraintsCallback(), true);
+ }
+
+ blink::WebMediaStream stream_desc;
+ stream_desc.initialize("media stream",
+ audio_track_vector,
+ video_track_vector);
+ stream_desc.setExtraData(new MediaStream(stream_desc));
+ return stream_desc;
+ }
+
+ void CreateWebRtcMediaStream(const blink::WebMediaStream& blink_stream,
+ size_t expected_number_of_audio_tracks,
+ size_t expected_number_of_video_tracks) {
+ adapter_.reset(new WebRtcMediaStreamAdapter(
+ blink_stream, dependency_factory_.get()));
+
+ EXPECT_EQ(expected_number_of_audio_tracks,
+ adapter_->webrtc_media_stream()->GetAudioTracks().size());
+ EXPECT_EQ(expected_number_of_video_tracks,
+ adapter_->webrtc_media_stream()->GetVideoTracks().size());
+ EXPECT_EQ(blink_stream.id().utf8(),
+ adapter_->webrtc_media_stream()->label());
+ }
+
+ webrtc::MediaStreamInterface* webrtc_stream() {
+ return adapter_->webrtc_media_stream();
+ }
+
+ protected:
+ base::MessageLoop message_loop_;
+ scoped_ptr<ChildProcess> child_process_;
+ scoped_ptr<MockPeerConnectionDependencyFactory> dependency_factory_;
+ scoped_ptr<WebRtcMediaStreamAdapter> adapter_;
+};
+
+TEST_F(WebRtcMediaStreamAdapterTest, CreateWebRtcMediaStream) {
+ blink::WebMediaStream blink_stream = CreateBlinkMediaStream(true, true);
+ CreateWebRtcMediaStream(blink_stream, 1, 1);
+}
+
+// Test that we don't crash if a MediaStream is created in Blink with an unknown
+// audio sources. This can happen if a MediaStream is created with
+// remote audio track.
+TEST_F(WebRtcMediaStreamAdapterTest,
+ CreateWebRtcMediaStreamWithoutAudioSource) {
+ // Create a blink MediaStream description.
+ blink::WebMediaStreamSource audio_source;
+ audio_source.initialize("audio source",
+ blink::WebMediaStreamSource::TypeAudio,
+ "something");
+
+ blink::WebVector<blink::WebMediaStreamTrack> audio_tracks(
+ static_cast<size_t>(1));
+ audio_tracks[0].initialize(audio_source.id(), audio_source);
+ blink::WebVector<blink::WebMediaStreamTrack> video_tracks(
+ static_cast<size_t>(0));
+
+ blink::WebMediaStream blink_stream;
+ blink_stream.initialize("new stream", audio_tracks, video_tracks);
+ blink_stream.setExtraData(
+ new content::MediaStream(blink_stream));
+ CreateWebRtcMediaStream(blink_stream, 0, 0);
+}
+
+TEST_F(WebRtcMediaStreamAdapterTest, RemoveAndAddTrack) {
+ blink::WebMediaStream blink_stream = CreateBlinkMediaStream(true, true);
+ CreateWebRtcMediaStream(blink_stream, 1, 1);
+
+ MediaStream* native_stream = MediaStream::GetMediaStream(blink_stream);
+
+ blink::WebVector<blink::WebMediaStreamTrack> audio_tracks;
+ blink_stream.audioTracks(audio_tracks);
+
+ native_stream->RemoveTrack(audio_tracks[0]);
+ EXPECT_TRUE(webrtc_stream()->GetAudioTracks().empty());
+
+ blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
+ blink_stream.videoTracks(video_tracks);
+
+ native_stream->RemoveTrack(video_tracks[0]);
+ EXPECT_TRUE(webrtc_stream()->GetVideoTracks().empty());
+
+ native_stream->AddTrack(audio_tracks[0]);
+ EXPECT_EQ(1u, webrtc_stream()->GetAudioTracks().size());
+
+ native_stream->AddTrack(video_tracks[0]);
+ EXPECT_EQ(1u, webrtc_stream()->GetVideoTracks().size());
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/webrtc_video_capturer_adapter.cc b/chromium/content/renderer/media/webrtc/webrtc_video_capturer_adapter.cc
new file mode 100644
index 00000000000..603edb3780a
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/webrtc_video_capturer_adapter.cc
@@ -0,0 +1,193 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/webrtc/webrtc_video_capturer_adapter.h"
+
+#include "base/bind.h"
+#include "base/debug/trace_event.h"
+#include "base/memory/aligned_memory.h"
+#include "media/base/video_frame.h"
+#include "third_party/libyuv/include/libyuv/scale.h"
+
+namespace content {
+
+WebRtcVideoCapturerAdapter::WebRtcVideoCapturerAdapter(bool is_screencast)
+ : is_screencast_(is_screencast),
+ running_(false),
+ buffer_(NULL),
+ buffer_size_(0) {
+ thread_checker_.DetachFromThread();
+}
+
+WebRtcVideoCapturerAdapter::~WebRtcVideoCapturerAdapter() {
+ DVLOG(3) << " WebRtcVideoCapturerAdapter::dtor";
+ base::AlignedFree(buffer_);
+}
+
+cricket::CaptureState WebRtcVideoCapturerAdapter::Start(
+ const cricket::VideoFormat& capture_format) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!running_);
+ DVLOG(3) << " WebRtcVideoCapturerAdapter::Start w = " << capture_format.width
+ << " h = " << capture_format.height;
+
+ running_ = true;
+ return cricket::CS_RUNNING;
+}
+
+void WebRtcVideoCapturerAdapter::Stop() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DVLOG(3) << " WebRtcVideoCapturerAdapter::Stop ";
+ DCHECK(running_);
+ running_ = false;
+ SetCaptureFormat(NULL);
+ SignalStateChange(this, cricket::CS_STOPPED);
+}
+
+bool WebRtcVideoCapturerAdapter::IsRunning() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return running_;
+}
+
+bool WebRtcVideoCapturerAdapter::GetPreferredFourccs(
+ std::vector<uint32>* fourccs) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!fourccs)
+ return false;
+ fourccs->push_back(cricket::FOURCC_I420);
+ return true;
+}
+
+bool WebRtcVideoCapturerAdapter::IsScreencast() const {
+ return is_screencast_;
+}
+
+bool WebRtcVideoCapturerAdapter::GetBestCaptureFormat(
+ const cricket::VideoFormat& desired,
+ cricket::VideoFormat* best_format) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DVLOG(3) << " GetBestCaptureFormat:: "
+ << " w = " << desired.width
+ << " h = " << desired.height;
+
+ // Capability enumeration is done in MediaStreamVideoSource. The adapter can
+ // just use what is provided.
+ // Use the desired format as the best format.
+ best_format->width = desired.width;
+ best_format->height = desired.height;
+ best_format->fourcc = cricket::FOURCC_I420;
+ best_format->interval = desired.interval;
+ return true;
+}
+
+void WebRtcVideoCapturerAdapter::OnFrameCaptured(
+ const scoped_refptr<media::VideoFrame>& frame) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ TRACE_EVENT0("video", "WebRtcVideoCapturerAdapter::OnFrameCaptured");
+ if (!(media::VideoFrame::I420 == frame->format() ||
+ media::VideoFrame::YV12 == frame->format())) {
+ // Some types of sources support textures as output. Since connecting
+ // sources and sinks do not check the format, we need to just ignore
+ // formats that we can not handle.
+ NOTREACHED();
+ return;
+ }
+
+ if (first_frame_timestamp_ == media::kNoTimestamp())
+ first_frame_timestamp_ = frame->timestamp();
+
+ cricket::CapturedFrame captured_frame;
+ captured_frame.width = frame->natural_size().width();
+ captured_frame.height = frame->natural_size().height();
+ // cricket::CapturedFrame time is in nanoseconds.
+ captured_frame.elapsed_time =
+ (frame->timestamp() - first_frame_timestamp_).InMicroseconds() *
+ base::Time::kNanosecondsPerMicrosecond;
+ captured_frame.time_stamp = frame->timestamp().InMicroseconds() *
+ base::Time::kNanosecondsPerMicrosecond;
+ captured_frame.pixel_height = 1;
+ captured_frame.pixel_width = 1;
+
+ // TODO(perkj):
+ // Libjingle expects contiguous layout of image planes as input.
+ // The only format where that is true in Chrome is I420 where the
+ // coded_size == natural_size().
+ if (frame->format() != media::VideoFrame::I420 ||
+ frame->coded_size() != frame->natural_size()) {
+ // Cropping / Scaling and or switching UV planes is needed.
+ UpdateI420Buffer(frame);
+ captured_frame.data = buffer_;
+ captured_frame.data_size = buffer_size_;
+ captured_frame.fourcc = cricket::FOURCC_I420;
+ } else {
+ captured_frame.fourcc = media::VideoFrame::I420 == frame->format() ?
+ cricket::FOURCC_I420 : cricket::FOURCC_YV12;
+ captured_frame.data = frame->data(0);
+ captured_frame.data_size =
+ media::VideoFrame::AllocationSize(frame->format(), frame->coded_size());
+ }
+
+ // This signals to libJingle that a new VideoFrame is available.
+ // libJingle have no assumptions on what thread this signal come from.
+ SignalFrameCaptured(this, &captured_frame);
+}
+
+void WebRtcVideoCapturerAdapter::UpdateI420Buffer(
+ const scoped_refptr<media::VideoFrame>& src) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ const int dst_width = src->natural_size().width();
+ const int dst_height = src->natural_size().height();
+ DCHECK(src->visible_rect().width() >= dst_width &&
+ src->visible_rect().height() >= dst_height);
+
+ const gfx::Rect& visible_rect = src->visible_rect();
+
+ const uint8* src_y = src->data(media::VideoFrame::kYPlane) +
+ visible_rect.y() * src->stride(media::VideoFrame::kYPlane) +
+ visible_rect.x();
+ const uint8* src_u = src->data(media::VideoFrame::kUPlane) +
+ visible_rect.y() / 2 * src->stride(media::VideoFrame::kUPlane) +
+ visible_rect.x() / 2;
+ const uint8* src_v = src->data(media::VideoFrame::kVPlane) +
+ visible_rect.y() / 2 * src->stride(media::VideoFrame::kVPlane) +
+ visible_rect.x() / 2;
+
+ const size_t dst_size =
+ media::VideoFrame::AllocationSize(src->format(), src->natural_size());
+
+ if (dst_size != buffer_size_) {
+ base::AlignedFree(buffer_);
+ buffer_ = reinterpret_cast<uint8*>(
+ base::AlignedAlloc(dst_size + media::VideoFrame::kFrameSizePadding,
+ media::VideoFrame::kFrameAddressAlignment));
+ buffer_size_ = dst_size;
+ }
+
+ uint8* dst_y = buffer_;
+ const int dst_stride_y = dst_width;
+ uint8* dst_u = dst_y + dst_width * dst_height;
+ const int dst_halfwidth = (dst_width + 1) / 2;
+ const int dst_halfheight = (dst_height + 1) / 2;
+ uint8* dst_v = dst_u + dst_halfwidth * dst_halfheight;
+
+ libyuv::I420Scale(src_y,
+ src->stride(media::VideoFrame::kYPlane),
+ src_u,
+ src->stride(media::VideoFrame::kUPlane),
+ src_v,
+ src->stride(media::VideoFrame::kVPlane),
+ visible_rect.width(),
+ visible_rect.height(),
+ dst_y,
+ dst_stride_y,
+ dst_u,
+ dst_halfwidth,
+ dst_v,
+ dst_halfwidth,
+ dst_width,
+ dst_height,
+ libyuv::kFilterBilinear);
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/webrtc_video_capturer_adapter.h b/chromium/content/renderer/media/webrtc/webrtc_video_capturer_adapter.h
new file mode 100644
index 00000000000..0f38f6b9ed7
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/webrtc_video_capturer_adapter.h
@@ -0,0 +1,71 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_VIDEO_CAPTURER_ADAPTER_H_
+#define CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_VIDEO_CAPTURER_ADAPTER_H_
+
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/threading/thread_checker.h"
+#include "content/common/content_export.h"
+#include "media/base/video_frame.h"
+#include "media/video/capture/video_capture_types.h"
+#include "third_party/libjingle/source/talk/media/base/videocapturer.h"
+
+namespace content {
+
+// WebRtcVideoCapturerAdapter implements a simple cricket::VideoCapturer that is
+// used for VideoCapturing in libJingle and especially in PeerConnections.
+// The class is created and destroyed on the main render thread.
+// PeerConnection access cricket::VideoCapturer from a libJingle worker thread.
+// An instance of WebRtcVideoCapturerAdapter is owned by an instance of
+// webrtc::VideoSourceInterface in libJingle. The implementation of
+// webrtc::VideoSourceInterface guarantees that this object is not deleted
+// while it is still used in libJingle.
+class CONTENT_EXPORT WebRtcVideoCapturerAdapter
+ : NON_EXPORTED_BASE(public cricket::VideoCapturer) {
+ public:
+ explicit WebRtcVideoCapturerAdapter(bool is_screencast);
+ virtual ~WebRtcVideoCapturerAdapter();
+
+ // OnFrameCaptured delivers video frames to libjingle. It must be called on
+ // libjingles worker thread.
+ // This method is virtual for testing purposes.
+ virtual void OnFrameCaptured(const scoped_refptr<media::VideoFrame>& frame);
+
+ private:
+ // cricket::VideoCapturer implementation.
+ // These methods are accessed from a libJingle worker thread.
+ virtual cricket::CaptureState Start(
+ const cricket::VideoFormat& capture_format) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual bool IsRunning() OVERRIDE;
+ virtual bool GetPreferredFourccs(std::vector<uint32>* fourccs) OVERRIDE;
+ virtual bool GetBestCaptureFormat(const cricket::VideoFormat& desired,
+ cricket::VideoFormat* best_format) OVERRIDE;
+ virtual bool IsScreencast() const OVERRIDE;
+
+ void UpdateI420Buffer(const scoped_refptr<media::VideoFrame>& src);
+
+ // |thread_checker_| is bound to the libjingle worker thread.
+ base::ThreadChecker thread_checker_;
+
+ const bool is_screencast_;
+ bool running_;
+ base::TimeDelta first_frame_timestamp_;
+ // |buffer_| used if cropping is needed. It is created only if needed and
+ // owned by WebRtcVideoCapturerAdapter. If its created, it exists until
+ // WebRtcVideoCapturerAdapter is destroyed.
+ uint8* buffer_;
+ size_t buffer_size_;
+ scoped_ptr<cricket::CapturedFrame> captured_frame_;
+
+ DISALLOW_COPY_AND_ASSIGN(WebRtcVideoCapturerAdapter);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_VIDEO_CAPTURER_ADAPTER_H_
diff --git a/chromium/content/renderer/media/webrtc/webrtc_video_capturer_adapter_unittest.cc b/chromium/content/renderer/media/webrtc/webrtc_video_capturer_adapter_unittest.cc
new file mode 100644
index 00000000000..250124d0b82
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/webrtc_video_capturer_adapter_unittest.cc
@@ -0,0 +1,71 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+
+#include "content/renderer/media/webrtc/webrtc_video_capturer_adapter.h"
+#include "media/base/video_frame.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace content {
+
+class WebRtcVideoCapturerAdapterTest
+ : public sigslot::has_slots<>,
+ public ::testing::Test {
+ public:
+ WebRtcVideoCapturerAdapterTest()
+ : adapter_(false),
+ output_frame_width_(0),
+ output_frame_height_(0) {
+ adapter_.SignalFrameCaptured.connect(
+ this, &WebRtcVideoCapturerAdapterTest::OnFrameCaptured);
+ }
+ virtual ~WebRtcVideoCapturerAdapterTest() {}
+
+ void TestSourceCropFrame(int capture_width,
+ int capture_height,
+ int cropped_width,
+ int cropped_height,
+ int natural_width,
+ int natural_height) {
+ const int horiz_crop = ((capture_width - cropped_width) / 2);
+ const int vert_crop = ((capture_height - cropped_height) / 2);
+
+ gfx::Size coded_size(capture_width, capture_height);
+ gfx::Size natural_size(natural_width, natural_height);
+ gfx::Rect view_rect(horiz_crop, vert_crop, cropped_width, cropped_height);
+ scoped_refptr<media::VideoFrame> frame =
+ media::VideoFrame::CreateFrame(media::VideoFrame::I420,
+ coded_size, view_rect, natural_size,
+ base::TimeDelta());
+ adapter_.OnFrameCaptured(frame);
+ EXPECT_EQ(natural_width, output_frame_width_);
+ EXPECT_EQ(natural_height, output_frame_height_);
+ }
+ protected:
+ void OnFrameCaptured(cricket::VideoCapturer* capturer,
+ const cricket::CapturedFrame* frame) {
+ output_frame_width_ = frame->width;
+ output_frame_height_ = frame->height;
+ }
+
+ private:
+ WebRtcVideoCapturerAdapter adapter_;
+ int output_frame_width_;
+ int output_frame_height_;
+};
+
+TEST_F(WebRtcVideoCapturerAdapterTest, CropFrameTo640360) {
+ TestSourceCropFrame(640, 480, 640, 360, 640, 360);
+}
+
+TEST_F(WebRtcVideoCapturerAdapterTest, CropFrameTo320320) {
+ TestSourceCropFrame(640, 480, 480, 480, 320, 320);
+}
+
+TEST_F(WebRtcVideoCapturerAdapterTest, Scale720To640360) {
+ TestSourceCropFrame(1280, 720, 1280, 720, 640, 360);
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/webrtc_video_sink_adapter.cc b/chromium/content/renderer/media/webrtc/webrtc_video_sink_adapter.cc
deleted file mode 100644
index 652dec0c846..00000000000
--- a/chromium/content/renderer/media/webrtc/webrtc_video_sink_adapter.cc
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/renderer/media/webrtc/webrtc_video_sink_adapter.h"
-
-#include "base/bind.h"
-#include "base/location.h"
-#include "base/logging.h"
-#include "base/message_loop/message_loop_proxy.h"
-#include "content/renderer/media/native_handle_impl.h"
-#include "media/base/video_frame.h"
-#include "media/base/video_util.h"
-#include "third_party/libjingle/source/talk/media/base/videoframe.h"
-#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
-#include "ui/gfx/size.h"
-
-using media::CopyYPlane;
-using media::CopyUPlane;
-using media::CopyVPlane;
-
-namespace content {
-
-WebRtcVideoSinkAdapter::WebRtcVideoSinkAdapter(
- webrtc::VideoTrackInterface* video_track,
- MediaStreamVideoSink* sink)
- : message_loop_proxy_(base::MessageLoopProxy::current()),
- sink_(sink),
- video_track_(video_track),
- state_(video_track->state()),
- track_enabled_(video_track->enabled()) {
- DCHECK(sink);
- video_track_->AddRenderer(this);
- video_track_->RegisterObserver(this);
- DVLOG(1) << "WebRtcVideoSinkAdapter";
-}
-
-WebRtcVideoSinkAdapter::~WebRtcVideoSinkAdapter() {
- video_track_->RemoveRenderer(this);
- video_track_->UnregisterObserver(this);
- DVLOG(1) << "~WebRtcVideoSinkAdapter";
-}
-
-void WebRtcVideoSinkAdapter::SetSize(int width, int height) {
- NOTIMPLEMENTED();
-}
-
-void WebRtcVideoSinkAdapter::RenderFrame(const cricket::VideoFrame* frame) {
- base::TimeDelta timestamp = base::TimeDelta::FromMilliseconds(
- frame->GetTimeStamp() / talk_base::kNumNanosecsPerMillisec);
-
- scoped_refptr<media::VideoFrame> video_frame;
- if (frame->GetNativeHandle() != NULL) {
- NativeHandleImpl* handle =
- static_cast<NativeHandleImpl*>(frame->GetNativeHandle());
- video_frame = static_cast<media::VideoFrame*>(handle->GetHandle());
- video_frame->SetTimestamp(timestamp);
- } else {
- gfx::Size size(frame->GetWidth(), frame->GetHeight());
- video_frame = media::VideoFrame::CreateFrame(
- media::VideoFrame::YV12, size, gfx::Rect(size), size, timestamp);
-
- // Aspect ratio unsupported; DCHECK when there are non-square pixels.
- DCHECK_EQ(frame->GetPixelWidth(), 1u);
- DCHECK_EQ(frame->GetPixelHeight(), 1u);
-
- int y_rows = frame->GetHeight();
- int uv_rows = frame->GetHeight() / 2; // YV12 format.
- CopyYPlane(
- frame->GetYPlane(), frame->GetYPitch(), y_rows, video_frame.get());
- CopyUPlane(
- frame->GetUPlane(), frame->GetUPitch(), uv_rows, video_frame.get());
- CopyVPlane(
- frame->GetVPlane(), frame->GetVPitch(), uv_rows, video_frame.get());
- }
-
- message_loop_proxy_->PostTask(
- FROM_HERE, base::Bind(&WebRtcVideoSinkAdapter::DoRenderFrameOnMainThread,
- AsWeakPtr(), video_frame));
-}
-
-void WebRtcVideoSinkAdapter::OnChanged() {
- DCHECK(message_loop_proxy_->BelongsToCurrentThread());
-
- // TODO(perkj): OnChanged belongs to the base class of WebRtcVideoSinkAdapter
- // common for both webrtc audio and video.
- webrtc::MediaStreamTrackInterface::TrackState state = video_track_->state();
- if (state != state_) {
- state_ = state;
- switch (state) {
- case webrtc::MediaStreamTrackInterface::kInitializing:
- // Ignore the kInitializing state since there is no match in
- // WebMediaStreamSource::ReadyState.
- break;
- case webrtc::MediaStreamTrackInterface::kLive:
- sink_->OnReadyStateChanged(blink::WebMediaStreamSource::ReadyStateLive);
- break;
- case webrtc::MediaStreamTrackInterface::kEnded:
- sink_->OnReadyStateChanged(
- blink::WebMediaStreamSource::ReadyStateEnded);
- break;
- default:
- NOTREACHED();
- break;
- }
- }
- if (track_enabled_ != video_track_->enabled()) {
- track_enabled_ = video_track_->enabled();
- sink_->OnEnabledChanged(track_enabled_);
- }
-}
-
-void WebRtcVideoSinkAdapter::DoRenderFrameOnMainThread(
- scoped_refptr<media::VideoFrame> video_frame) {
- DCHECK(message_loop_proxy_->BelongsToCurrentThread());
- sink_->OnVideoFrame(video_frame);
-}
-
-} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/webrtc_video_sink_adapter.h b/chromium/content/renderer/media/webrtc/webrtc_video_sink_adapter.h
deleted file mode 100644
index 2f75a95ef35..00000000000
--- a/chromium/content/renderer/media/webrtc/webrtc_video_sink_adapter.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_VIDEO_SINK_ADAPTER_H_
-#define CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_VIDEO_SINK_ADAPTER_H_
-
-#include "base/memory/weak_ptr.h"
-#include "content/common/content_export.h"
-#include "content/public/renderer/media_stream_video_sink.h"
-#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
-
-namespace base {
-class MessageLoopProxy;
-}
-
-namespace content {
-
-// WebRtcVideoSinkAdapter acts as the middle man between a
-// webrtc:::VideoTrackInterface and a content::MediaStreamVideoSink.
-// It is responsible for translating video data from a libjingle video type
-// to a chrome video type.
-class CONTENT_EXPORT WebRtcVideoSinkAdapter
- : NON_EXPORTED_BASE(public webrtc::VideoRendererInterface),
- NON_EXPORTED_BASE(public webrtc::ObserverInterface),
- public base::SupportsWeakPtr<WebRtcVideoSinkAdapter> {
- public:
- WebRtcVideoSinkAdapter(webrtc::VideoTrackInterface* video_track,
- MediaStreamVideoSink* sink);
- virtual ~WebRtcVideoSinkAdapter();
-
- MediaStreamVideoSink* sink() const { return sink_; }
-
- protected:
- // webrtc::VideoRendererInterface implementation. May be called on
- // a different thread.
- virtual void SetSize(int width, int height) OVERRIDE;
- virtual void RenderFrame(const cricket::VideoFrame* frame) OVERRIDE;
-
- // webrtc::ObserverInterface implementation.
- // TODO(perkj): OnChanged should be implemented on a common base class used
- // for both WebRtc Audio and Video tracks.
- virtual void OnChanged() OVERRIDE;
-
- private:
- void DoRenderFrameOnMainThread(scoped_refptr<media::VideoFrame> video_frame);
-
- scoped_refptr<base::MessageLoopProxy> message_loop_proxy_;
- MediaStreamVideoSink* sink_;
- // The video track the renderer is connected to.
- scoped_refptr<webrtc::VideoTrackInterface> video_track_;
- webrtc::MediaStreamTrackInterface::TrackState state_;
- bool track_enabled_;
-
- DISALLOW_COPY_AND_ASSIGN(WebRtcVideoSinkAdapter);
-};
-
-} // namespace content
-
-#endif // CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_VIDEO_SINK_ADAPTER_H_
diff --git a/chromium/content/renderer/media/webrtc/webrtc_video_track_adapter.cc b/chromium/content/renderer/media/webrtc/webrtc_video_track_adapter.cc
new file mode 100644
index 00000000000..6a9c2f4d30d
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/webrtc_video_track_adapter.cc
@@ -0,0 +1,185 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/webrtc/webrtc_video_track_adapter.h"
+
+#include "base/strings/utf_string_conversions.h"
+#include "base/synchronization/lock.h"
+#include "content/common/media/media_stream_options.h"
+#include "content/renderer/media/media_stream_video_source.h"
+#include "content/renderer/media/media_stream_video_track.h"
+
+namespace {
+
+bool ConstraintKeyExists(const blink::WebMediaConstraints& constraints,
+ const blink::WebString& name) {
+ blink::WebString value_str;
+ return constraints.getMandatoryConstraintValue(name, value_str) ||
+ constraints.getOptionalConstraintValue(name, value_str);
+}
+
+} // anonymouse namespace
+
+namespace content {
+
+// Simple help class used for receiving video frames on the IO-thread from
+// a MediaStreamVideoTrack and forward the frames to a
+// WebRtcVideoCapturerAdapter on libjingle's worker thread.
+// WebRtcVideoCapturerAdapter implements a video capturer for libjingle.
+class WebRtcVideoTrackAdapter::WebRtcVideoSourceAdapter
+ : public base::RefCountedThreadSafe<WebRtcVideoSourceAdapter> {
+ public:
+ WebRtcVideoSourceAdapter(
+ const scoped_refptr<base::MessageLoopProxy>& libjingle_worker_thread,
+ const scoped_refptr<webrtc::VideoSourceInterface>& source,
+ WebRtcVideoCapturerAdapter* capture_adapter);
+
+ // WebRtcVideoTrackAdapter can be destroyed on the main render thread or
+ // libjingles worker thread since it posts video frames on that thread. But
+ // |video_source_| must be released on the main render thread before the
+ // PeerConnectionFactory has been destroyed. The only way to ensure that is
+ // to make sure |video_source_| is released when WebRtcVideoTrackAdapter() is
+ // destroyed.
+ void ReleaseSourceOnMainThread();
+
+ void OnVideoFrameOnIO(const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format,
+ const base::TimeTicks& estimated_capture_time);
+
+ private:
+ void OnVideoFrameOnWorkerThread(
+ const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format,
+ const base::TimeTicks& estimated_capture_time);
+ friend class base::RefCountedThreadSafe<WebRtcVideoSourceAdapter>;
+ virtual ~WebRtcVideoSourceAdapter();
+
+ scoped_refptr<base::MessageLoopProxy> render_thread_message_loop_;
+
+ // |render_thread_checker_| is bound to the main render thread.
+ base::ThreadChecker render_thread_checker_;
+ // Used to DCHECK that frames are called on the IO-thread.
+ base::ThreadChecker io_thread_checker_;
+
+ // Used for posting frames to libjingle's worker thread. Accessed on the
+ // IO-thread.
+ scoped_refptr<base::MessageLoopProxy> libjingle_worker_thread_;
+
+ scoped_refptr<webrtc::VideoSourceInterface> video_source_;
+
+ // Used to protect |capture_adapter_|. It is taken by libjingle's worker
+ // thread for each video frame that is delivered but only taken on the
+ // main render thread in ReleaseSourceOnMainThread() when
+ // the owning WebRtcVideoTrackAdapter is being destroyed.
+ base::Lock capture_adapter_stop_lock_;
+ // |capture_adapter_| is owned by |video_source_|
+ WebRtcVideoCapturerAdapter* capture_adapter_;
+};
+
+WebRtcVideoTrackAdapter::WebRtcVideoSourceAdapter::WebRtcVideoSourceAdapter(
+ const scoped_refptr<base::MessageLoopProxy>& libjingle_worker_thread,
+ const scoped_refptr<webrtc::VideoSourceInterface>& source,
+ WebRtcVideoCapturerAdapter* capture_adapter)
+ : render_thread_message_loop_(base::MessageLoopProxy::current()),
+ libjingle_worker_thread_(libjingle_worker_thread),
+ video_source_(source),
+ capture_adapter_(capture_adapter) {
+ io_thread_checker_.DetachFromThread();
+}
+
+WebRtcVideoTrackAdapter::WebRtcVideoSourceAdapter::~WebRtcVideoSourceAdapter() {
+ DVLOG(3) << "~WebRtcVideoSourceAdapter()";
+ DCHECK(!capture_adapter_);
+ // This object can be destroyed on the main render thread or libjingles
+ // worker thread since it posts video frames on that thread. But
+ // |video_source_| must be released on the main render thread before the
+ // PeerConnectionFactory has been destroyed. The only way to ensure that is
+ // to make sure |video_source_| is released when WebRtcVideoTrackAdapter() is
+ // destroyed.
+}
+
+void WebRtcVideoTrackAdapter::WebRtcVideoSourceAdapter::
+ReleaseSourceOnMainThread() {
+ DCHECK(render_thread_checker_.CalledOnValidThread());
+ // Since frames are posted to the worker thread, this object might be deleted
+ // on that thread. However, since |video_source_| was created on the render
+ // thread, it should be released on the render thread.
+ base::AutoLock auto_lock(capture_adapter_stop_lock_);
+ // |video_source| owns |capture_adapter_|.
+ capture_adapter_ = NULL;
+ video_source_ = NULL;
+}
+
+void WebRtcVideoTrackAdapter::WebRtcVideoSourceAdapter::OnVideoFrameOnIO(
+ const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format,
+ const base::TimeTicks& estimated_capture_time) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ libjingle_worker_thread_->PostTask(
+ FROM_HERE,
+ base::Bind(&WebRtcVideoSourceAdapter::OnVideoFrameOnWorkerThread,
+ this, frame, format, estimated_capture_time));
+}
+
+void
+WebRtcVideoTrackAdapter::WebRtcVideoSourceAdapter::OnVideoFrameOnWorkerThread(
+ const scoped_refptr<media::VideoFrame>& frame,
+ const media::VideoCaptureFormat& format,
+ const base::TimeTicks& estimated_capture_time) {
+ DCHECK(libjingle_worker_thread_->BelongsToCurrentThread());
+ base::AutoLock auto_lock(capture_adapter_stop_lock_);
+ if (capture_adapter_)
+ capture_adapter_->OnFrameCaptured(frame);
+}
+
+WebRtcVideoTrackAdapter::WebRtcVideoTrackAdapter(
+ const blink::WebMediaStreamTrack& track,
+ PeerConnectionDependencyFactory* factory)
+ : web_track_(track) {
+ const blink::WebMediaConstraints& constraints =
+ MediaStreamVideoTrack::GetVideoTrack(track)->constraints();
+
+ bool is_screencast = ConstraintKeyExists(
+ constraints, base::UTF8ToUTF16(kMediaStreamSource));
+ WebRtcVideoCapturerAdapter* capture_adapter =
+ factory->CreateVideoCapturer(is_screencast);
+
+ // |video_source| owns |capture_adapter|
+ scoped_refptr<webrtc::VideoSourceInterface> video_source(
+ factory->CreateVideoSource(capture_adapter,
+ track.source().constraints()));
+
+ video_track_ = factory->CreateLocalVideoTrack(web_track_.id().utf8(),
+ video_source.get());
+
+ video_track_->set_enabled(web_track_.isEnabled());
+
+ source_adapter_ = new WebRtcVideoSourceAdapter(
+ factory->GetWebRtcWorkerThread(),
+ video_source,
+ capture_adapter);
+
+ AddToVideoTrack(
+ this,
+ base::Bind(&WebRtcVideoSourceAdapter::OnVideoFrameOnIO,
+ source_adapter_),
+ web_track_);
+
+ DVLOG(3) << "WebRtcVideoTrackAdapter ctor() : is_screencast "
+ << is_screencast;
+}
+
+WebRtcVideoTrackAdapter::~WebRtcVideoTrackAdapter() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DVLOG(3) << "WebRtcVideoTrackAdapter dtor().";
+ RemoveFromVideoTrack(this, web_track_);
+ source_adapter_->ReleaseSourceOnMainThread();
+}
+
+void WebRtcVideoTrackAdapter::OnEnabledChanged(bool enabled) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ video_track_->set_enabled(enabled);
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/webrtc_video_track_adapter.h b/chromium/content/renderer/media/webrtc/webrtc_video_track_adapter.h
new file mode 100644
index 00000000000..0ff7a6ffd99
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/webrtc_video_track_adapter.h
@@ -0,0 +1,59 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_VIDEO_TRACK_ADAPTER_H_
+#define CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_VIDEO_TRACK_ADAPTER_H_
+
+#include "base/macros.h"
+#include "base/threading/thread_checker.h"
+#include "content/public/renderer/media_stream_video_sink.h"
+#include "content/renderer/media/webrtc/peer_connection_dependency_factory.h"
+#include "content/renderer/media/webrtc/webrtc_video_capturer_adapter.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
+#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
+#include "third_party/libjingle/source/talk/app/webrtc/videosourceinterface.h"
+
+namespace content {
+
+class MediaStreamVideoTrack;
+
+// WebRtcVideoTrackAdapter is an adapter between a
+// content::MediaStreamVideoTrack object and a webrtc VideoTrack that is
+// currently sent on a PeerConnection.
+// The responsibility of the class is to create and own a representation of a
+// webrtc VideoTrack that can be added and removed from a RTCPeerConnection.
+// An instance of WebRtcVideoTrackAdapter is created when a VideoTrack is
+// added to an RTCPeerConnection object.
+// Instances of this class is owned by the WebRtcMediaStreamAdapter object that
+// created it.
+class WebRtcVideoTrackAdapter : public MediaStreamVideoSink {
+ public:
+ WebRtcVideoTrackAdapter(const blink::WebMediaStreamTrack& track,
+ PeerConnectionDependencyFactory* factory);
+ virtual ~WebRtcVideoTrackAdapter();
+
+ webrtc::VideoTrackInterface* webrtc_video_track() {
+ return video_track_.get();
+ }
+
+ protected:
+ // Implementation of MediaStreamSink.
+ virtual void OnEnabledChanged(bool enabled) OVERRIDE;
+
+ private:
+ // Used to DCHECK that we are called on the correct thread.
+ base::ThreadChecker thread_checker_;
+
+ scoped_refptr<webrtc::VideoTrackInterface> video_track_;
+ blink::WebMediaStreamTrack web_track_;
+
+ class WebRtcVideoSourceAdapter;
+ scoped_refptr<WebRtcVideoSourceAdapter> source_adapter_;
+
+ DISALLOW_COPY_AND_ASSIGN(WebRtcVideoTrackAdapter);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_VIDEO_TRACK_ADAPTER_H_
diff --git a/chromium/content/renderer/media/webrtc_audio_capturer.cc b/chromium/content/renderer/media/webrtc_audio_capturer.cc
index 391e7d77f59..e47beeada95 100644
--- a/chromium/content/renderer/media/webrtc_audio_capturer.cc
+++ b/chromium/content/renderer/media/webrtc_audio_capturer.cc
@@ -11,6 +11,9 @@
#include "base/strings/stringprintf.h"
#include "content/child/child_process.h"
#include "content/renderer/media/audio_device_factory.h"
+#include "content/renderer/media/media_stream_audio_processor.h"
+#include "content/renderer/media/media_stream_audio_processor_options.h"
+#include "content/renderer/media/media_stream_audio_source.h"
#include "content/renderer/media/webrtc_audio_device_impl.h"
#include "content/renderer/media/webrtc_local_audio_track.h"
#include "content/renderer/media/webrtc_logging.h"
@@ -26,7 +29,8 @@ namespace {
// for its current sample rate (set by the user) on Windows and Mac OS X.
// The listed rates below adds restrictions and WebRtcAudioDeviceImpl::Init()
// will fail if the user selects any rate outside these ranges.
-const int kValidInputRates[] = {96000, 48000, 44100, 32000, 16000, 8000};
+const int kValidInputRates[] =
+ {192000, 96000, 48000, 44100, 32000, 16000, 8000};
#elif defined(OS_LINUX) || defined(OS_OPENBSD)
const int kValidInputRates[] = {48000, 44100};
#elif defined(OS_ANDROID)
@@ -35,25 +39,35 @@ const int kValidInputRates[] = {48000, 44100};
const int kValidInputRates[] = {44100};
#endif
+// Time constant for AudioPowerMonitor. See AudioPowerMonitor ctor comments
+// for semantics. This value was arbitrarily chosen, but seems to work well.
+const int kPowerMonitorTimeConstantMs = 10;
+
+// The time between two audio power level samples.
+const int kPowerMonitorLogIntervalSeconds = 10;
+
} // namespace
// Reference counted container of WebRtcLocalAudioTrack delegate.
+// TODO(xians): Switch to MediaStreamAudioSinkOwner.
class WebRtcAudioCapturer::TrackOwner
: public base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner> {
public:
explicit TrackOwner(WebRtcLocalAudioTrack* track)
: delegate_(track) {}
- void Capture(media::AudioBus* audio_source,
- int audio_delay_milliseconds,
+ void Capture(const int16* audio_data,
+ base::TimeDelta delay,
double volume,
- bool key_pressed) {
+ bool key_pressed,
+ bool need_audio_processing) {
base::AutoLock lock(lock_);
if (delegate_) {
- delegate_->Capture(audio_source,
- audio_delay_milliseconds,
+ delegate_->Capture(audio_data,
+ delay,
volume,
- key_pressed);
+ key_pressed,
+ need_audio_processing);
}
}
@@ -63,6 +77,13 @@ class WebRtcAudioCapturer::TrackOwner
delegate_->OnSetFormat(params);
}
+ void SetAudioProcessor(
+ const scoped_refptr<MediaStreamAudioProcessor>& processor) {
+ base::AutoLock lock(lock_);
+ if (delegate_)
+ delegate_->SetAudioProcessor(processor);
+ }
+
void Reset() {
base::AutoLock lock(lock_);
delegate_ = NULL;
@@ -108,132 +129,127 @@ class WebRtcAudioCapturer::TrackOwner
};
// static
-scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer() {
- scoped_refptr<WebRtcAudioCapturer> capturer = new WebRtcAudioCapturer();
- return capturer;
-}
-
-void WebRtcAudioCapturer::Reconfigure(int sample_rate,
- media::ChannelLayout channel_layout,
- int effects) {
- DCHECK(thread_checker_.CalledOnValidThread());
- int buffer_size = GetBufferSize(sample_rate);
- DVLOG(1) << "Using WebRTC input buffer size: " << buffer_size;
-
- media::AudioParameters::Format format =
- media::AudioParameters::AUDIO_PCM_LOW_LATENCY;
-
- // bits_per_sample is always 16 for now.
- int bits_per_sample = 16;
- media::AudioParameters params(format, channel_layout, 0, sample_rate,
- bits_per_sample, buffer_size, effects);
- {
- base::AutoLock auto_lock(lock_);
- params_ = params;
-
- // Notify all tracks about the new format.
- tracks_.TagAll();
- }
+scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer(
+ int render_view_id, const StreamDeviceInfo& device_info,
+ const blink::WebMediaConstraints& constraints,
+ WebRtcAudioDeviceImpl* audio_device,
+ MediaStreamAudioSource* audio_source) {
+ scoped_refptr<WebRtcAudioCapturer> capturer = new WebRtcAudioCapturer(
+ render_view_id, device_info, constraints, audio_device, audio_source);
+ if (capturer->Initialize())
+ return capturer;
+
+ return NULL;
}
-bool WebRtcAudioCapturer::Initialize(int render_view_id,
- media::ChannelLayout channel_layout,
- int sample_rate,
- int buffer_size,
- int session_id,
- const std::string& device_id,
- int paired_output_sample_rate,
- int paired_output_frames_per_buffer,
- int effects) {
+bool WebRtcAudioCapturer::Initialize() {
DCHECK(thread_checker_.CalledOnValidThread());
DVLOG(1) << "WebRtcAudioCapturer::Initialize()";
-
- DVLOG(1) << "Audio input hardware channel layout: " << channel_layout;
- UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout",
- channel_layout, media::CHANNEL_LAYOUT_MAX);
-
WebRtcLogMessage(base::StringPrintf(
"WAC::Initialize. render_view_id=%d"
", channel_layout=%d, sample_rate=%d, buffer_size=%d"
", session_id=%d, paired_output_sample_rate=%d"
- ", paired_output_frames_per_buffer=%d",
- render_view_id,
- channel_layout,
- sample_rate,
- buffer_size,
- session_id,
- paired_output_sample_rate,
- paired_output_frames_per_buffer));
-
- render_view_id_ = render_view_id;
- session_id_ = session_id;
- device_id_ = device_id;
- hardware_buffer_size_ = buffer_size;
- output_sample_rate_ = paired_output_sample_rate;
- output_frames_per_buffer_= paired_output_frames_per_buffer;
-
- if (render_view_id == -1) {
- // Return true here to allow injecting a new source via SetCapturerSource()
- // at a later state.
+ ", paired_output_frames_per_buffer=%d, effects=%d. ",
+ render_view_id_,
+ device_info_.device.input.channel_layout,
+ device_info_.device.input.sample_rate,
+ device_info_.device.input.frames_per_buffer,
+ device_info_.session_id,
+ device_info_.device.matched_output.sample_rate,
+ device_info_.device.matched_output.frames_per_buffer,
+ device_info_.device.input.effects));
+
+ if (render_view_id_ == -1) {
+ // Return true here to allow injecting a new source via
+ // SetCapturerSourceForTesting() at a later state.
return true;
}
+ MediaAudioConstraints audio_constraints(constraints_,
+ device_info_.device.input.effects);
+ if (!audio_constraints.IsValid())
+ return false;
+
+ media::ChannelLayout channel_layout = static_cast<media::ChannelLayout>(
+ device_info_.device.input.channel_layout);
+ DVLOG(1) << "Audio input hardware channel layout: " << channel_layout;
+ UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout",
+ channel_layout, media::CHANNEL_LAYOUT_MAX + 1);
+
// Verify that the reported input channel configuration is supported.
if (channel_layout != media::CHANNEL_LAYOUT_MONO &&
- channel_layout != media::CHANNEL_LAYOUT_STEREO) {
+ channel_layout != media::CHANNEL_LAYOUT_STEREO &&
+ channel_layout != media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC) {
DLOG(ERROR) << channel_layout
<< " is not a supported input channel configuration.";
return false;
}
- DVLOG(1) << "Audio input hardware sample rate: " << sample_rate;
- media::AudioSampleRate asr = media::AsAudioSampleRate(sample_rate);
- if (asr != media::kUnexpectedAudioSampleRate) {
+ DVLOG(1) << "Audio input hardware sample rate: "
+ << device_info_.device.input.sample_rate;
+ media::AudioSampleRate asr;
+ if (media::ToAudioSampleRate(device_info_.device.input.sample_rate, &asr)) {
UMA_HISTOGRAM_ENUMERATION(
- "WebRTC.AudioInputSampleRate", asr, media::kUnexpectedAudioSampleRate);
+ "WebRTC.AudioInputSampleRate", asr, media::kAudioSampleRateMax + 1);
} else {
- UMA_HISTOGRAM_COUNTS("WebRTC.AudioInputSampleRateUnexpected", sample_rate);
+ UMA_HISTOGRAM_COUNTS("WebRTC.AudioInputSampleRateUnexpected",
+ device_info_.device.input.sample_rate);
}
// Verify that the reported input hardware sample rate is supported
// on the current platform.
if (std::find(&kValidInputRates[0],
&kValidInputRates[0] + arraysize(kValidInputRates),
- sample_rate) ==
+ device_info_.device.input.sample_rate) ==
&kValidInputRates[arraysize(kValidInputRates)]) {
- DLOG(ERROR) << sample_rate << " is not a supported input rate.";
+ DLOG(ERROR) << device_info_.device.input.sample_rate
+ << " is not a supported input rate.";
return false;
}
- // Create and configure the default audio capturing source. The |source_|
- // will be overwritten if an external client later calls SetCapturerSource()
- // providing an alternative media::AudioCapturerSource.
- SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id),
+ // Create and configure the default audio capturing source.
+ SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id_),
channel_layout,
- static_cast<float>(sample_rate),
- effects);
+ static_cast<float>(device_info_.device.input.sample_rate));
+
+ // Add the capturer to the WebRtcAudioDeviceImpl since it needs some hardware
+ // information from the capturer.
+ if (audio_device_)
+ audio_device_->AddAudioCapturer(this);
return true;
}
-WebRtcAudioCapturer::WebRtcAudioCapturer()
- : running_(false),
- render_view_id_(-1),
- hardware_buffer_size_(0),
- session_id_(0),
+WebRtcAudioCapturer::WebRtcAudioCapturer(
+ int render_view_id,
+ const StreamDeviceInfo& device_info,
+ const blink::WebMediaConstraints& constraints,
+ WebRtcAudioDeviceImpl* audio_device,
+ MediaStreamAudioSource* audio_source)
+ : constraints_(constraints),
+ audio_processor_(
+ new talk_base::RefCountedObject<MediaStreamAudioProcessor>(
+ constraints, device_info.device.input.effects, audio_device)),
+ running_(false),
+ render_view_id_(render_view_id),
+ device_info_(device_info),
volume_(0),
peer_connection_mode_(false),
- output_sample_rate_(0),
- output_frames_per_buffer_(0),
- key_pressed_(false) {
+ key_pressed_(false),
+ need_audio_processing_(false),
+ audio_device_(audio_device),
+ audio_source_(audio_source),
+ audio_power_monitor_(
+ device_info_.device.input.sample_rate,
+ base::TimeDelta::FromMilliseconds(kPowerMonitorTimeConstantMs)) {
DVLOG(1) << "WebRtcAudioCapturer::WebRtcAudioCapturer()";
}
WebRtcAudioCapturer::~WebRtcAudioCapturer() {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(tracks_.IsEmpty());
- DCHECK(!running_);
DVLOG(1) << "WebRtcAudioCapturer::~WebRtcAudioCapturer()";
+ Stop();
}
void WebRtcAudioCapturer::AddTrack(WebRtcLocalAudioTrack* track) {
@@ -250,16 +266,11 @@ void WebRtcAudioCapturer::AddTrack(WebRtcLocalAudioTrack* track) {
scoped_refptr<TrackOwner> track_owner(new TrackOwner(track));
tracks_.AddAndTag(track_owner);
}
-
- // Start the source if the first audio track is connected to the capturer.
- // Start() will do nothing if the capturer has already been started.
- Start();
-
}
void WebRtcAudioCapturer::RemoveTrack(WebRtcLocalAudioTrack* track) {
DCHECK(thread_checker_.CalledOnValidThread());
-
+ DVLOG(1) << "WebRtcAudioCapturer::RemoveTrack()";
bool stop_source = false;
{
base::AutoLock auto_lock(lock_);
@@ -270,27 +281,29 @@ void WebRtcAudioCapturer::RemoveTrack(WebRtcLocalAudioTrack* track) {
// Clear the delegate to ensure that no more capture callbacks will
// be sent to this sink. Also avoids a possible crash which can happen
// if this method is called while capturing is active.
- if (removed_item.get())
+ if (removed_item.get()) {
removed_item->Reset();
-
- // Stop the source if the last audio track is going away.
- stop_source = tracks_.IsEmpty();
+ stop_source = tracks_.IsEmpty();
+ }
+ }
+ if (stop_source) {
+ // Since WebRtcAudioCapturer does not inherit MediaStreamAudioSource,
+ // and instead MediaStreamAudioSource is composed of a WebRtcAudioCapturer,
+ // we have to call StopSource on the MediaStreamSource. This will call
+ // MediaStreamAudioSource::DoStopSource which in turn call
+ // WebRtcAudioCapturerer::Stop();
+ audio_source_->StopSource();
}
-
- if (stop_source)
- Stop();
}
void WebRtcAudioCapturer::SetCapturerSource(
const scoped_refptr<media::AudioCapturerSource>& source,
media::ChannelLayout channel_layout,
- float sample_rate,
- int effects) {
+ float sample_rate) {
DCHECK(thread_checker_.CalledOnValidThread());
DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << ","
<< "sample_rate=" << sample_rate << ")";
scoped_refptr<media::AudioCapturerSource> old_source;
- bool restart_source = false;
{
base::AutoLock auto_lock(lock_);
if (source_.get() == source.get())
@@ -300,7 +313,6 @@ void WebRtcAudioCapturer::SetCapturerSource(
source_ = source;
// Reset the flag to allow starting the new source.
- restart_source = running_;
running_ = false;
}
@@ -308,18 +320,33 @@ void WebRtcAudioCapturer::SetCapturerSource(
if (old_source.get())
old_source->Stop();
- // Dispatch the new parameters both to the sink(s) and to the new source.
+ // Dispatch the new parameters both to the sink(s) and to the new source,
+ // also apply the new |constraints|.
// The idea is to get rid of any dependency of the microphone parameters
// which would normally be used by default.
- Reconfigure(sample_rate, channel_layout, effects);
+ // bits_per_sample is always 16 for now.
+ int buffer_size = GetBufferSize(sample_rate);
+ media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ channel_layout, 0, sample_rate,
+ 16, buffer_size,
+ device_info_.device.input.effects);
+
+ {
+ base::AutoLock auto_lock(lock_);
+ // Notify the |audio_processor_| of the new format.
+ audio_processor_->OnCaptureFormatChanged(params);
+
+ MediaAudioConstraints audio_constraints(constraints_,
+ device_info_.device.input.effects);
+ need_audio_processing_ = audio_constraints.NeedsAudioProcessing();
+ // Notify all tracks about the new format.
+ tracks_.TagAll();
+ }
- // Make sure to grab the new parameters in case they were reconfigured.
- media::AudioParameters params = audio_parameters();
if (source.get())
- source->Initialize(params, this, session_id_);
+ source->Initialize(params, this, session_id());
- if (restart_source)
- Start();
+ Start();
}
void WebRtcAudioCapturer::EnablePeerConnectionMode() {
@@ -331,6 +358,7 @@ void WebRtcAudioCapturer::EnablePeerConnectionMode() {
peer_connection_mode_ = true;
int render_view_id = -1;
+ media::AudioParameters input_params;
{
base::AutoLock auto_lock(lock_);
// Simply return if there is no existing source or the |render_view_id_| is
@@ -339,22 +367,24 @@ void WebRtcAudioCapturer::EnablePeerConnectionMode() {
return;
render_view_id = render_view_id_;
+ input_params = audio_processor_->InputFormat();
}
// Do nothing if the current buffer size is the WebRtc native buffer size.
- media::AudioParameters params = audio_parameters();
- if (GetBufferSize(params.sample_rate()) == params.frames_per_buffer())
+ if (GetBufferSize(input_params.sample_rate()) ==
+ input_params.frames_per_buffer()) {
return;
+ }
// Create a new audio stream as source which will open the hardware using
// WebRtc native buffer size.
SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id),
- params.channel_layout(),
- static_cast<float>(params.sample_rate()),
- params.effects());
+ input_params.channel_layout(),
+ static_cast<float>(input_params.sample_rate()));
}
void WebRtcAudioCapturer::Start() {
+ DCHECK(thread_checker_.CalledOnValidThread());
DVLOG(1) << "WebRtcAudioCapturer::Start()";
base::AutoLock auto_lock(lock_);
if (running_ || !source_)
@@ -368,6 +398,7 @@ void WebRtcAudioCapturer::Start() {
}
void WebRtcAudioCapturer::Stop() {
+ DCHECK(thread_checker_.CalledOnValidThread());
DVLOG(1) << "WebRtcAudioCapturer::Stop()";
scoped_refptr<media::AudioCapturerSource> source;
TrackList::ItemList tracks;
@@ -382,6 +413,10 @@ void WebRtcAudioCapturer::Stop() {
running_ = false;
}
+ // Remove the capturer object from the WebRtcAudioDeviceImpl.
+ if (audio_device_)
+ audio_device_->RemoveAudioCapturer(this);
+
for (TrackList::ItemList::const_iterator it = tracks.begin();
it != tracks.end();
++it) {
@@ -390,6 +425,9 @@ void WebRtcAudioCapturer::Stop() {
if (source.get())
source->Stop();
+
+ // Stop the audio processor to avoid feeding render data into the processor.
+ audio_processor_->Stop();
}
void WebRtcAudioCapturer::SetVolume(int volume) {
@@ -410,7 +448,7 @@ int WebRtcAudioCapturer::MaxVolume() const {
return WebRtcAudioDeviceImpl::kMaxVolumeLevel;
}
-void WebRtcAudioCapturer::Capture(media::AudioBus* audio_source,
+void WebRtcAudioCapturer::Capture(const media::AudioBus* audio_source,
int audio_delay_milliseconds,
double volume,
bool key_pressed) {
@@ -419,7 +457,7 @@ void WebRtcAudioCapturer::Capture(media::AudioBus* audio_source,
// CaptureCallback.
#if defined(OS_WIN) || defined(OS_MACOSX)
DCHECK_LE(volume, 1.0);
-#elif defined(OS_LINUX) || defined(OS_OPENBSD)
+#elif (defined(OS_LINUX) && !defined(OS_CHROMEOS)) || defined(OS_OPENBSD)
// We have a special situation on Linux where the microphone volume can be
// "higher than maximum". The input volume slider in the sound preference
// allows the user to set a scaling that is higher than 100%. It means that
@@ -431,41 +469,82 @@ void WebRtcAudioCapturer::Capture(media::AudioBus* audio_source,
TrackList::ItemList tracks;
TrackList::ItemList tracks_to_notify_format;
int current_volume = 0;
- media::AudioParameters params;
+ base::TimeDelta audio_delay;
+ bool need_audio_processing = true;
{
base::AutoLock auto_lock(lock_);
if (!running_)
return;
- // Map internal volume range of [0.0, 1.0] into [0, 255] used by the
- // webrtc::VoiceEngine. webrtc::VoiceEngine will handle the case when the
- // volume is higher than 255.
+ // Map internal volume range of [0.0, 1.0] into [0, 255] used by AGC.
+ // The volume can be higher than 255 on Linux, and it will be cropped to
+ // 255 since AGC does not allow values out of range.
volume_ = static_cast<int>((volume * MaxVolume()) + 0.5);
- current_volume = volume_;
- audio_delay_ = base::TimeDelta::FromMilliseconds(audio_delay_milliseconds);
+ current_volume = volume_ > MaxVolume() ? MaxVolume() : volume_;
+ audio_delay = base::TimeDelta::FromMilliseconds(audio_delay_milliseconds);
+ audio_delay_ = audio_delay;
key_pressed_ = key_pressed;
tracks = tracks_.Items();
tracks_.RetrieveAndClearTags(&tracks_to_notify_format);
- CHECK(params_.IsValid());
- CHECK_EQ(audio_source->channels(), params_.channels());
- CHECK_EQ(audio_source->frames(), params_.frames_per_buffer());
- params = params_;
+ // Set the flag to turn on the audio processing in PeerConnection level.
+ // Note that, we turn off the audio processing in PeerConnection if the
+ // processor has already processed the data.
+ need_audio_processing = need_audio_processing_ ?
+ !MediaStreamAudioProcessor::IsAudioTrackProcessingEnabled() : false;
}
+ DCHECK(audio_processor_->InputFormat().IsValid());
+ DCHECK_EQ(audio_source->channels(),
+ audio_processor_->InputFormat().channels());
+ DCHECK_EQ(audio_source->frames(),
+ audio_processor_->InputFormat().frames_per_buffer());
+
// Notify the tracks on when the format changes. This will do nothing if
// |tracks_to_notify_format| is empty.
+ media::AudioParameters output_params = audio_processor_->OutputFormat();
for (TrackList::ItemList::const_iterator it = tracks_to_notify_format.begin();
it != tracks_to_notify_format.end(); ++it) {
- (*it)->OnSetFormat(params);
+ (*it)->OnSetFormat(output_params);
+ (*it)->SetAudioProcessor(audio_processor_);
}
- // Feed the data to the tracks.
- for (TrackList::ItemList::const_iterator it = tracks.begin();
- it != tracks.end();
- ++it) {
- (*it)->Capture(audio_source, audio_delay_milliseconds,
- current_volume, key_pressed);
+ if ((base::TimeTicks::Now() - last_audio_level_log_time_).InSeconds() >
+ kPowerMonitorLogIntervalSeconds) {
+ audio_power_monitor_.Scan(*audio_source, audio_source->frames());
+
+ last_audio_level_log_time_ = base::TimeTicks::Now();
+
+ std::pair<float, bool> result =
+ audio_power_monitor_.ReadCurrentPowerAndClip();
+ WebRtcLogMessage(base::StringPrintf(
+ "WAC::Capture: current_audio_power=%.2fdBFS.", result.first));
+
+ audio_power_monitor_.Reset();
+ }
+
+ // Push the data to the processor for processing.
+ audio_processor_->PushCaptureData(audio_source);
+
+ // Process and consume the data in the processor until there is not enough
+ // data in the processor.
+ int16* output = NULL;
+ int new_volume = 0;
+ while (audio_processor_->ProcessAndConsumeData(
+ audio_delay, current_volume, key_pressed, &new_volume, &output)) {
+ // Feed the post-processed data to the tracks.
+ for (TrackList::ItemList::const_iterator it = tracks.begin();
+ it != tracks.end(); ++it) {
+ (*it)->Capture(output, audio_delay, current_volume, key_pressed,
+ need_audio_processing);
+ }
+
+ if (new_volume) {
+ SetVolume(new_volume);
+
+ // Update the |current_volume| to avoid passing the old volume to AGC.
+ current_volume = new_volume;
+ }
}
}
@@ -473,9 +552,10 @@ void WebRtcAudioCapturer::OnCaptureError() {
NOTIMPLEMENTED();
}
-media::AudioParameters WebRtcAudioCapturer::audio_parameters() const {
+media::AudioParameters WebRtcAudioCapturer::source_audio_parameters() const {
base::AutoLock auto_lock(lock_);
- return params_;
+ return audio_processor_ ?
+ audio_processor_->InputFormat() : media::AudioParameters();
}
bool WebRtcAudioCapturer::GetPairedOutputParameters(
@@ -483,12 +563,15 @@ bool WebRtcAudioCapturer::GetPairedOutputParameters(
int* output_sample_rate,
int* output_frames_per_buffer) const {
// Don't set output parameters unless all of them are valid.
- if (session_id_ <= 0 || !output_sample_rate_ || !output_frames_per_buffer_)
+ if (device_info_.session_id <= 0 ||
+ !device_info_.device.matched_output.sample_rate ||
+ !device_info_.device.matched_output.frames_per_buffer)
return false;
- *session_id = session_id_;
- *output_sample_rate = output_sample_rate_;
- *output_frames_per_buffer = output_frames_per_buffer_;
+ *session_id = device_info_.session_id;
+ *output_sample_rate = device_info_.device.matched_output.sample_rate;
+ *output_frames_per_buffer =
+ device_info_.device.matched_output.frames_per_buffer;
return true;
}
@@ -507,9 +590,10 @@ int WebRtcAudioCapturer::GetBufferSize(int sample_rate) const {
// Use the native hardware buffer size in non peer connection mode when the
// platform is using a native buffer size smaller than the PeerConnection
// buffer size.
- if (!peer_connection_mode_ && hardware_buffer_size_ &&
- hardware_buffer_size_ <= peer_connection_buffer_size) {
- return hardware_buffer_size_;
+ int hardware_buffer_size = device_info_.device.input.frames_per_buffer;
+ if (!peer_connection_mode_ && hardware_buffer_size &&
+ hardware_buffer_size <= peer_connection_buffer_size) {
+ return hardware_buffer_size;
}
return (sample_rate / 100);
@@ -523,4 +607,12 @@ void WebRtcAudioCapturer::GetAudioProcessingParams(
*key_pressed = key_pressed_;
}
+void WebRtcAudioCapturer::SetCapturerSourceForTesting(
+ const scoped_refptr<media::AudioCapturerSource>& source,
+ media::AudioParameters params) {
+ // Create a new audio stream as source which uses the new source.
+ SetCapturerSource(source, params.channel_layout(),
+ static_cast<float>(params.sample_rate()));
+}
+
} // namespace content
diff --git a/chromium/content/renderer/media/webrtc_audio_capturer.h b/chromium/content/renderer/media/webrtc_audio_capturer.h
index 23391140411..d77a107dbca 100644
--- a/chromium/content/renderer/media/webrtc_audio_capturer.h
+++ b/chromium/content/renderer/media/webrtc_audio_capturer.h
@@ -9,14 +9,17 @@
#include <string>
#include "base/callback.h"
+#include "base/files/file.h"
#include "base/memory/ref_counted.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
#include "base/time/time.h"
+#include "content/common/media/media_stream_options.h"
#include "content/renderer/media/tagged_list.h"
-#include "content/renderer/media/webrtc_audio_device_impl.h"
#include "media/audio/audio_input_device.h"
+#include "media/audio/audio_power_monitor.h"
#include "media/base/audio_capturer_source.h"
+#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
namespace media {
class AudioBus;
@@ -24,13 +27,14 @@ class AudioBus;
namespace content {
+class MediaStreamAudioProcessor;
+class MediaStreamAudioSource;
+class WebRtcAudioDeviceImpl;
class WebRtcLocalAudioRenderer;
class WebRtcLocalAudioTrack;
// This class manages the capture data flow by getting data from its
// |source_|, and passing it to its |tracks_|.
-// It allows clients to inject their own capture data source by calling
-// SetCapturerSource().
// The threading model for this class is rather complex since it will be
// created on the main render thread, captured data is provided on a dedicated
// AudioInputDevice thread, and methods can be called either on the Libjingle
@@ -40,31 +44,29 @@ class CONTENT_EXPORT WebRtcAudioCapturer
: public base::RefCountedThreadSafe<WebRtcAudioCapturer>,
NON_EXPORTED_BASE(public media::AudioCapturerSource::CaptureCallback) {
public:
- // Use to construct the audio capturer.
+ // Used to construct the audio capturer. |render_view_id| specifies the
+ // render view consuming audio for capture, |render_view_id| as -1 is used
+ // by the unittests to skip creating a source via
+ // AudioDeviceFactory::NewInputDevice(), and allow injecting their own source
+ // via SetCapturerSourceForTesting() at a later state. |device_info|
+ // contains all the device information that the capturer is created for.
+ // |constraints| contains the settings for audio processing.
+ // TODO(xians): Implement the interface for the audio source and move the
+ // |constraints| to ApplyConstraints().
// Called on the main render thread.
- static scoped_refptr<WebRtcAudioCapturer> CreateCapturer();
-
- // Creates and configures the default audio capturing source using the
- // provided audio parameters. |render_view_id| specifies the render view
- // consuming audio for capture. |session_id| is passed to the browser to
- // decide which device to use. |device_id| is used to identify which device
- // the capturer is created for. Called on the main render thread.
- bool Initialize(int render_view_id,
- media::ChannelLayout channel_layout,
- int sample_rate,
- int buffer_size,
- int session_id,
- const std::string& device_id,
- int paired_output_sample_rate,
- int paired_output_frames_per_buffer,
- int effects);
+ static scoped_refptr<WebRtcAudioCapturer> CreateCapturer(
+ int render_view_id,
+ const StreamDeviceInfo& device_info,
+ const blink::WebMediaConstraints& constraints,
+ WebRtcAudioDeviceImpl* audio_device,
+ MediaStreamAudioSource* audio_source);
+
// Add a audio track to the sinks of the capturer.
// WebRtcAudioDeviceImpl calls this method on the main render thread but
// other clients may call it from other threads. The current implementation
// does not support multi-thread calling.
// The first AddTrack will implicitly trigger the Start() of this object.
- // Called on the main render thread or libjingle working thread.
void AddTrack(WebRtcLocalAudioTrack* track);
// Remove a audio track from the sinks of the capturer.
@@ -73,16 +75,6 @@ class CONTENT_EXPORT WebRtcAudioCapturer
// Called on the main render thread or libjingle working thread.
void RemoveTrack(WebRtcLocalAudioTrack* track);
- // SetCapturerSource() is called if the client on the source side desires to
- // provide their own captured audio data. Client is responsible for calling
- // Start() on its own source to have the ball rolling.
- // Called on the main render thread.
- void SetCapturerSource(
- const scoped_refptr<media::AudioCapturerSource>& source,
- media::ChannelLayout channel_layout,
- float sample_rate,
- int effects);
-
// Called when a stream is connecting to a peer connection. This will set
// up the native buffer size for the stream in order to optimize the
// performance for peer connection.
@@ -94,15 +86,10 @@ class CONTENT_EXPORT WebRtcAudioCapturer
int Volume() const;
int MaxVolume() const;
- bool is_recording() const { return running_; }
-
- // Audio parameters utilized by the audio capturer. Can be utilized by
- // a local renderer to set up a renderer using identical parameters as the
- // capturer.
- // TODO(phoglund): This accessor is inherently unsafe since the returned
- // parameters can become outdated at any time. Think over the implications
- // of this accessor and if we can remove it.
- media::AudioParameters audio_parameters() const;
+ // Audio parameters utilized by the source of the audio capturer.
+ // TODO(phoglund): Think over the implications of this accessor and if we can
+ // remove it.
+ media::AudioParameters source_audio_parameters() const;
// Gets information about the paired output device. Returns true if such a
// device exists.
@@ -110,13 +97,13 @@ class CONTENT_EXPORT WebRtcAudioCapturer
int* output_sample_rate,
int* output_frames_per_buffer) const;
- const std::string& device_id() const { return device_id_; }
- int session_id() const { return session_id_; }
+ const std::string& device_id() const { return device_info_.device.id; }
+ int session_id() const { return device_info_.session_id; }
// Stops recording audio. This method will empty its track lists since
// stopping the capturer will implicitly invalidate all its tracks.
- // This method is exposed to the public because the media stream track can
- // call Stop() on its source.
+ // This method is exposed to the public because the MediaStreamAudioSource can
+ // call Stop()
void Stop();
// Called by the WebAudioCapturerSource to get the audio processing params.
@@ -125,27 +112,45 @@ class CONTENT_EXPORT WebRtcAudioCapturer
void GetAudioProcessingParams(base::TimeDelta* delay, int* volume,
bool* key_pressed);
+ // Used by the unittests to inject their own source to the capturer.
+ void SetCapturerSourceForTesting(
+ const scoped_refptr<media::AudioCapturerSource>& source,
+ media::AudioParameters params);
+
protected:
friend class base::RefCountedThreadSafe<WebRtcAudioCapturer>;
- WebRtcAudioCapturer();
virtual ~WebRtcAudioCapturer();
private:
class TrackOwner;
typedef TaggedList<TrackOwner> TrackList;
+ WebRtcAudioCapturer(int render_view_id,
+ const StreamDeviceInfo& device_info,
+ const blink::WebMediaConstraints& constraints,
+ WebRtcAudioDeviceImpl* audio_device,
+ MediaStreamAudioSource* audio_source);
+
// AudioCapturerSource::CaptureCallback implementation.
// Called on the AudioInputDevice audio thread.
- virtual void Capture(media::AudioBus* audio_source,
+ virtual void Capture(const media::AudioBus* audio_source,
int audio_delay_milliseconds,
double volume,
bool key_pressed) OVERRIDE;
virtual void OnCaptureError() OVERRIDE;
- // Reconfigures the capturer with a new capture parameters.
- // Must be called without holding the lock.
- void Reconfigure(int sample_rate, media::ChannelLayout channel_layout,
- int effects);
+ // Initializes the default audio capturing source using the provided render
+ // view id and device information. Return true if success, otherwise false.
+ bool Initialize();
+
+ // SetCapturerSource() is called if the client on the source side desires to
+ // provide their own captured audio data. Client is responsible for calling
+ // Start() on its own source to have the ball rolling.
+ // Called on the main render thread.
+ void SetCapturerSource(
+ const scoped_refptr<media::AudioCapturerSource>& source,
+ media::ChannelLayout channel_layout,
+ float sample_rate);
// Starts recording audio.
// Triggered by AddSink() on the main render thread or a Libjingle working
@@ -171,23 +176,19 @@ class CONTENT_EXPORT WebRtcAudioCapturer
// The audio data source from the browser process.
scoped_refptr<media::AudioCapturerSource> source_;
- // Cached audio parameters for output.
- media::AudioParameters params_;
+ // Cached audio constraints for the capturer.
+ blink::WebMediaConstraints constraints_;
+
+ // Audio processor doing processing like FIFO, AGC, AEC and NS. Its output
+ // data is in a unit of 10 ms data chunk.
+ scoped_refptr<MediaStreamAudioProcessor> audio_processor_;
bool running_;
int render_view_id_;
- // Cached value for the hardware native buffer size, used when
- // |peer_connection_mode_| is set to false.
- int hardware_buffer_size_;
-
- // The media session ID used to identify which input device to be started by
- // the browser.
- int session_id_;
-
- // The device this capturer is given permission to use.
- std::string device_id_;
+ // Cached information of the device used by the capturer.
+ const StreamDeviceInfo device_info_;
// Stores latest microphone volume received in a CaptureData() callback.
// Range is [0, 255].
@@ -196,13 +197,32 @@ class CONTENT_EXPORT WebRtcAudioCapturer
// Flag which affects the buffer size used by the capturer.
bool peer_connection_mode_;
- int output_sample_rate_;
- int output_frames_per_buffer_;
-
// Cache value for the audio processing params.
base::TimeDelta audio_delay_;
bool key_pressed_;
+ // Flag to help deciding if the data needs audio processing.
+ bool need_audio_processing_;
+
+ // Raw pointer to the WebRtcAudioDeviceImpl, which is valid for the lifetime
+ // of RenderThread.
+ WebRtcAudioDeviceImpl* audio_device_;
+
+ // Raw pointer to the MediaStreamAudioSource object that holds a reference
+ // to this WebRtcAudioCapturer.
+ // Since |audio_source_| is owned by a blink::WebMediaStreamSource object and
+ // blink guarantees that the blink::WebMediaStreamSource outlives any
+ // blink::WebMediaStreamTrack connected to the source, |audio_source_| is
+ // guaranteed to exist as long as a WebRtcLocalAudioTrack is connected to this
+ // WebRtcAudioCapturer.
+ MediaStreamAudioSource* const audio_source_;
+
+ // Audio power monitor for logging audio power level.
+ media::AudioPowerMonitor audio_power_monitor_;
+
+ // Records when the last time audio power level is logged.
+ base::TimeTicks last_audio_level_log_time_;
+
DISALLOW_COPY_AND_ASSIGN(WebRtcAudioCapturer);
};
diff --git a/chromium/content/renderer/media/webrtc_audio_capturer_unittest.cc b/chromium/content/renderer/media/webrtc_audio_capturer_unittest.cc
index 184ba0155ab..0011f763059 100644
--- a/chromium/content/renderer/media/webrtc_audio_capturer_unittest.cc
+++ b/chromium/content/renderer/media/webrtc_audio_capturer_unittest.cc
@@ -2,14 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "base/command_line.h"
#include "base/logging.h"
-#include "content/renderer/media/rtc_media_constraints.h"
+#include "content/public/common/content_switches.h"
+#include "content/renderer/media/mock_media_constraint_factory.h"
+#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
#include "content/renderer/media/webrtc_audio_capturer.h"
#include "content/renderer/media/webrtc_local_audio_track.h"
#include "media/audio/audio_parameters.h"
#include "media/base/audio_bus.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
using ::testing::_;
using ::testing::AtLeast;
@@ -18,38 +22,6 @@ namespace content {
namespace {
-// TODO(xians): Consolidate the similar methods in different unittests into
-// one.
-void ApplyFixedAudioConstraints(RTCMediaConstraints* constraints) {
- // Constant constraint keys which enables default audio constraints on
- // mediastreams with audio.
- struct {
- const char* key;
- const char* value;
- } static const kDefaultAudioConstraints[] = {
- { webrtc::MediaConstraintsInterface::kEchoCancellation,
- webrtc::MediaConstraintsInterface::kValueTrue },
- #if defined(OS_CHROMEOS) || defined(OS_MACOSX)
- // Enable the extended filter mode AEC on platforms with known echo issues.
- { webrtc::MediaConstraintsInterface::kExperimentalEchoCancellation,
- webrtc::MediaConstraintsInterface::kValueTrue },
- #endif
- { webrtc::MediaConstraintsInterface::kAutoGainControl,
- webrtc::MediaConstraintsInterface::kValueTrue },
- { webrtc::MediaConstraintsInterface::kExperimentalAutoGainControl,
- webrtc::MediaConstraintsInterface::kValueTrue },
- { webrtc::MediaConstraintsInterface::kNoiseSuppression,
- webrtc::MediaConstraintsInterface::kValueTrue },
- { webrtc::MediaConstraintsInterface::kHighpassFilter,
- webrtc::MediaConstraintsInterface::kValueTrue },
- };
-
- for (size_t i = 0; i < ARRAYSIZE_UNSAFE(kDefaultAudioConstraints); ++i) {
- constraints->AddMandatory(kDefaultAudioConstraints[i].key,
- kDefaultAudioConstraints[i].value, false);
- }
-}
-
class MockCapturerSource : public media::AudioCapturerSource {
public:
MockCapturerSource() {}
@@ -69,16 +41,32 @@ class MockPeerConnectionAudioSink : public PeerConnectionAudioSink {
public:
MockPeerConnectionAudioSink() {}
~MockPeerConnectionAudioSink() {}
- MOCK_METHOD9(OnData, int(const int16* audio_data,
- int sample_rate,
- int number_of_channels,
- int number_of_frames,
- const std::vector<int>& channels,
- int audio_delay_milliseconds,
- int current_volume,
- bool need_audio_processing,
- bool key_pressed));
- MOCK_METHOD1(OnSetFormat, void(const media::AudioParameters& params));
+ virtual int OnData(const int16* audio_data, int sample_rate,
+ int number_of_channels, int number_of_frames,
+ const std::vector<int>& channels,
+ int audio_delay_milliseconds, int current_volume,
+ bool need_audio_processing, bool key_pressed) OVERRIDE {
+ EXPECT_EQ(sample_rate, params_.sample_rate());
+ EXPECT_EQ(number_of_channels, params_.channels());
+ EXPECT_EQ(number_of_frames, params_.frames_per_buffer());
+ OnDataCallback(audio_data, channels, audio_delay_milliseconds,
+ current_volume, need_audio_processing, key_pressed);
+ return 0;
+ }
+ MOCK_METHOD6(OnDataCallback, void(const int16* audio_data,
+ const std::vector<int>& channels,
+ int audio_delay_milliseconds,
+ int current_volume,
+ bool need_audio_processing,
+ bool key_pressed));
+ virtual void OnSetFormat(const media::AudioParameters& params) OVERRIDE {
+ params_ = params;
+ FormatIsSet();
+ }
+ MOCK_METHOD0(FormatIsSet, void());
+
+ private:
+ media::AudioParameters params_;
};
} // namespace
@@ -94,81 +82,110 @@ class WebRtcAudioCapturerTest : public testing::Test {
: params_(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
media::CHANNEL_LAYOUT_STEREO, 48000, 16, 128) {
#endif
- capturer_ = WebRtcAudioCapturer::CreateCapturer();
- capturer_->Initialize(-1, params_.channel_layout(), params_.sample_rate(),
- params_.frames_per_buffer(), 0, std::string(), 0, 0,
- params_.effects());
- capturer_source_ = new MockCapturerSource();
- EXPECT_CALL(*capturer_source_.get(), Initialize(_, capturer_.get(), 0));
- capturer_->SetCapturerSource(capturer_source_,
- params_.channel_layout(),
- params_.sample_rate(),
- params_.effects());
+ }
+
+ void DisableAudioTrackProcessing() {
+ CommandLine::ForCurrentProcess()->AppendSwitch(
+ switches::kDisableAudioTrackProcessing);
+ }
+ void VerifyAudioParams(const blink::WebMediaConstraints& constraints,
+ bool need_audio_processing) {
+ capturer_ = WebRtcAudioCapturer::CreateCapturer(
+ -1, StreamDeviceInfo(MEDIA_DEVICE_AUDIO_CAPTURE,
+ "", "", params_.sample_rate(),
+ params_.channel_layout(),
+ params_.frames_per_buffer()),
+ constraints, NULL, NULL);
+ capturer_source_ = new MockCapturerSource();
+ EXPECT_CALL(*capturer_source_.get(), Initialize(_, capturer_.get(), -1));
EXPECT_CALL(*capturer_source_.get(), SetAutomaticGainControl(true));
EXPECT_CALL(*capturer_source_.get(), Start());
- RTCMediaConstraints constraints;
- ApplyFixedAudioConstraints(&constraints);
- track_ = WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL,
- NULL, &constraints);
- static_cast<WebRtcLocalAudioSourceProvider*>(
- track_->audio_source_provider())->SetSinkParamsForTesting(params_);
+ capturer_->SetCapturerSourceForTesting(capturer_source_, params_);
+
+ scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter(
+ WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
+ track_.reset(new WebRtcLocalAudioTrack(adapter, capturer_, NULL));
track_->Start();
- EXPECT_TRUE(track_->enabled());
+
+ // Connect a mock sink to the track.
+ scoped_ptr<MockPeerConnectionAudioSink> sink(
+ new MockPeerConnectionAudioSink());
+ track_->AddSink(sink.get());
+
+ int delay_ms = 65;
+ bool key_pressed = true;
+ double volume = 0.9;
+
+ // MaxVolume() in WebRtcAudioCapturer is hard-coded to return 255, we add
+ // 0.5 to do the correct truncation like the production code does.
+ int expected_volume_value = volume * capturer_->MaxVolume() + 0.5;
+ scoped_ptr<media::AudioBus> audio_bus = media::AudioBus::Create(params_);
+ audio_bus->Zero();
+
+ media::AudioCapturerSource::CaptureCallback* callback =
+ static_cast<media::AudioCapturerSource::CaptureCallback*>(capturer_);
+
+ // Verify the sink is getting the correct values.
+ EXPECT_CALL(*sink, FormatIsSet());
+ EXPECT_CALL(*sink,
+ OnDataCallback(_, _, delay_ms, expected_volume_value,
+ need_audio_processing, key_pressed));
+ callback->Capture(audio_bus.get(), delay_ms, volume, key_pressed);
+
+ // Verify the cached values in the capturer fits what we expect.
+ base::TimeDelta cached_delay;
+ int cached_volume = !expected_volume_value;
+ bool cached_key_pressed = !key_pressed;
+ capturer_->GetAudioProcessingParams(&cached_delay, &cached_volume,
+ &cached_key_pressed);
+ EXPECT_EQ(cached_delay.InMilliseconds(), delay_ms);
+ EXPECT_EQ(cached_volume, expected_volume_value);
+ EXPECT_EQ(cached_key_pressed, key_pressed);
+
+ track_->RemoveSink(sink.get());
+ EXPECT_CALL(*capturer_source_.get(), Stop());
+ capturer_->Stop();
}
media::AudioParameters params_;
scoped_refptr<MockCapturerSource> capturer_source_;
scoped_refptr<WebRtcAudioCapturer> capturer_;
- scoped_refptr<WebRtcLocalAudioTrack> track_;
+ scoped_ptr<WebRtcLocalAudioTrack> track_;
};
-// Pass the delay value, vollume and key_pressed info via capture callback, and
+// Pass the delay value, volume and key_pressed info via capture callback, and
// those values should be correctly stored and passed to the track.
-TEST_F(WebRtcAudioCapturerTest, VerifyAudioParams) {
- // Connect a mock sink to the track.
- scoped_ptr<MockPeerConnectionAudioSink> sink(
- new MockPeerConnectionAudioSink());
- track_->AddSink(sink.get());
-
- int delay_ms = 65;
- bool key_pressed = true;
- double volume = 0.9;
- // MaxVolume() in WebRtcAudioCapturer is hard-coded to return 255, we add 0.5
- // to do the correct truncation as how the production code does.
- int expected_volume_value = volume * capturer_->MaxVolume() + 0.5;
- scoped_ptr<media::AudioBus> audio_bus = media::AudioBus::Create(params_);
- audio_bus->Zero();
-#if defined(OS_ANDROID)
- const int expected_buffer_size = params_.sample_rate() / 100;
-#else
- const int expected_buffer_size = params_.frames_per_buffer();
-#endif
- bool expected_need_audio_processing = true;
- media::AudioCapturerSource::CaptureCallback* callback =
- static_cast<media::AudioCapturerSource::CaptureCallback*>(capturer_);
- // Verify the sink is getting the correct values.
- EXPECT_CALL(*sink, OnSetFormat(_));
- EXPECT_CALL(*sink,
- OnData(_, params_.sample_rate(), params_.channels(),
- expected_buffer_size, _, delay_ms,
- expected_volume_value, expected_need_audio_processing,
- key_pressed)).Times(AtLeast(1));
- callback->Capture(audio_bus.get(), delay_ms, volume, key_pressed);
-
- // Verify the cached values in the capturer fits what we expect.
- base::TimeDelta cached_delay;
- int cached_volume = !expected_volume_value;
- bool cached_key_pressed = !key_pressed;
- capturer_->GetAudioProcessingParams(&cached_delay, &cached_volume,
- &cached_key_pressed);
- EXPECT_EQ(cached_delay.InMilliseconds(), delay_ms);
- EXPECT_EQ(cached_volume, expected_volume_value);
- EXPECT_EQ(cached_key_pressed, key_pressed);
-
- track_->RemoveSink(sink.get());
- EXPECT_CALL(*capturer_source_.get(), Stop());
- capturer_->Stop();
+TEST_F(WebRtcAudioCapturerTest, VerifyAudioParamsWithoutAudioProcessing) {
+ DisableAudioTrackProcessing();
+ // Use constraints with default settings.
+ MockMediaConstraintFactory constraint_factory;
+ VerifyAudioParams(constraint_factory.CreateWebMediaConstraints(), true);
}
+TEST_F(WebRtcAudioCapturerTest, VerifyAudioParamsWithAudioProcessing) {
+ // Turn off the default constraints to verify that the sink will get packets
+ // with a buffer size smaller than 10ms.
+ MockMediaConstraintFactory constraint_factory;
+ constraint_factory.DisableDefaultAudioConstraints();
+ VerifyAudioParams(constraint_factory.CreateWebMediaConstraints(), false);
+}
+
+TEST_F(WebRtcAudioCapturerTest, FailToCreateCapturerWithWrongConstraints) {
+ MockMediaConstraintFactory constraint_factory;
+ const std::string dummy_constraint = "dummy";
+ constraint_factory.AddMandatory(dummy_constraint, true);
+
+ scoped_refptr<WebRtcAudioCapturer> capturer(
+ WebRtcAudioCapturer::CreateCapturer(
+ 0, StreamDeviceInfo(MEDIA_DEVICE_AUDIO_CAPTURE,
+ "", "", params_.sample_rate(),
+ params_.channel_layout(),
+ params_.frames_per_buffer()),
+ constraint_factory.CreateWebMediaConstraints(), NULL, NULL)
+ );
+ EXPECT_TRUE(capturer == NULL);
+}
+
+
} // namespace content
diff --git a/chromium/content/renderer/media/webrtc_audio_device_impl.cc b/chromium/content/renderer/media/webrtc_audio_device_impl.cc
index 1daf048fe86..1339a6f4cb8 100644
--- a/chromium/content/renderer/media/webrtc_audio_device_impl.cc
+++ b/chromium/content/renderer/media/webrtc_audio_device_impl.cc
@@ -8,6 +8,7 @@
#include "base/metrics/histogram.h"
#include "base/strings/string_util.h"
#include "base/win/windows_version.h"
+#include "content/renderer/media/media_stream_audio_processor.h"
#include "content/renderer/media/webrtc_audio_capturer.h"
#include "content/renderer/media/webrtc_audio_renderer.h"
#include "content/renderer/render_thread_impl.h"
@@ -27,7 +28,9 @@ WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl()
initialized_(false),
playing_(false),
recording_(false),
- microphone_volume_(0) {
+ microphone_volume_(0),
+ is_audio_track_processing_enabled_(
+ MediaStreamAudioProcessor::IsAudioTrackProcessingEnabled()) {
DVLOG(1) << "WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl()";
}
@@ -73,15 +76,25 @@ int WebRtcAudioDeviceImpl::OnData(const int16* audio_data,
DVLOG(2) << "total delay: " << input_delay_ms_ + output_delay_ms_;
}
- // Write audio samples in blocks of 10 milliseconds to the registered
+ // Write audio frames in blocks of 10 milliseconds to the registered
// webrtc::AudioTransport sink. Keep writing until our internal byte
// buffer is empty.
const int16* audio_buffer = audio_data;
- const int samples_per_10_msec = (sample_rate / 100);
- CHECK_EQ(number_of_frames % samples_per_10_msec, 0);
- int accumulated_audio_samples = 0;
+ const int frames_per_10_ms = (sample_rate / 100);
+ CHECK_EQ(number_of_frames % frames_per_10_ms, 0);
+ int accumulated_audio_frames = 0;
uint32_t new_volume = 0;
- while (accumulated_audio_samples < number_of_frames) {
+
+ // The lock here is to protect a race in the resampler inside webrtc when
+ // there are more than one input stream calling OnData(), which can happen
+ // when the users setup two getUserMedia, one for the microphone, another
+ // for WebAudio. Currently we don't have a better way to fix it except for
+ // adding a lock here to sequence the call.
+ // TODO(xians): Remove this workaround after we move the
+ // webrtc::AudioProcessing module to Chrome. See http://crbug/264611 for
+ // details.
+ base::AutoLock auto_lock(capture_callback_lock_);
+ while (accumulated_audio_frames < number_of_frames) {
// Deliver 10ms of recorded 16-bit linear PCM audio.
int new_mic_level = audio_transport_callback_->OnDataAvailable(
&channels[0],
@@ -89,14 +102,14 @@ int WebRtcAudioDeviceImpl::OnData(const int16* audio_data,
audio_buffer,
sample_rate,
number_of_channels,
- samples_per_10_msec,
+ frames_per_10_ms,
total_delay_ms,
current_volume,
key_pressed,
need_audio_processing);
- accumulated_audio_samples += samples_per_10_msec;
- audio_buffer += samples_per_10_msec * number_of_channels;
+ accumulated_audio_frames += frames_per_10_ms;
+ audio_buffer += frames_per_10_ms * number_of_channels;
// The latest non-zero new microphone level will be returned.
if (new_mic_level)
@@ -111,11 +124,12 @@ void WebRtcAudioDeviceImpl::OnSetFormat(
DVLOG(1) << "WebRtcAudioDeviceImpl::OnSetFormat()";
}
-void WebRtcAudioDeviceImpl::RenderData(uint8* audio_data,
- int number_of_channels,
- int number_of_frames,
- int audio_delay_milliseconds) {
- DCHECK_LE(number_of_frames, output_buffer_size());
+void WebRtcAudioDeviceImpl::RenderData(media::AudioBus* audio_bus,
+ int sample_rate,
+ int audio_delay_milliseconds,
+ base::TimeDelta* current_time) {
+ render_buffer_.resize(audio_bus->frames() * audio_bus->channels());
+
{
base::AutoLock auto_lock(lock_);
DCHECK(audio_transport_callback_);
@@ -123,43 +137,79 @@ void WebRtcAudioDeviceImpl::RenderData(uint8* audio_data,
output_delay_ms_ = audio_delay_milliseconds;
}
- const int channels = number_of_channels;
- DCHECK_LE(channels, output_channels());
-
- int samples_per_sec = output_sample_rate();
- int samples_per_10_msec = (samples_per_sec / 100);
- int bytes_per_sample = output_audio_parameters_.bits_per_sample() / 8;
- const int bytes_per_10_msec =
- channels * samples_per_10_msec * bytes_per_sample;
+ int frames_per_10_ms = (sample_rate / 100);
+ int bytes_per_sample = sizeof(render_buffer_[0]);
+ const int bytes_per_10_ms =
+ audio_bus->channels() * frames_per_10_ms * bytes_per_sample;
+ DCHECK_EQ(audio_bus->frames() % frames_per_10_ms, 0);
- uint32_t num_audio_samples = 0;
- int accumulated_audio_samples = 0;
-
- // Get audio samples in blocks of 10 milliseconds from the registered
+ // Get audio frames in blocks of 10 milliseconds from the registered
// webrtc::AudioTransport source. Keep reading until our internal buffer
// is full.
- while (accumulated_audio_samples < number_of_frames) {
+ uint32_t num_audio_frames = 0;
+ int accumulated_audio_frames = 0;
+ int16* audio_data = &render_buffer_[0];
+ while (accumulated_audio_frames < audio_bus->frames()) {
// Get 10ms and append output to temporary byte buffer.
- audio_transport_callback_->NeedMorePlayData(samples_per_10_msec,
- bytes_per_sample,
- channels,
- samples_per_sec,
+ int64_t elapsed_time_ms = -1;
+ int64_t ntp_time_ms = -1;
+ if (is_audio_track_processing_enabled_) {
+ // When audio processing is enabled in the audio track, we use
+ // PullRenderData() instead of NeedMorePlayData() to avoid passing the
+ // render data to the APM in WebRTC as reference signal for echo
+ // cancellation.
+ static const int kBitsPerByte = 8;
+ audio_transport_callback_->PullRenderData(bytes_per_sample * kBitsPerByte,
+ sample_rate,
+ audio_bus->channels(),
+ frames_per_10_ms,
audio_data,
- num_audio_samples);
- accumulated_audio_samples += num_audio_samples;
- audio_data += bytes_per_10_msec;
+ &elapsed_time_ms,
+ &ntp_time_ms);
+ accumulated_audio_frames += frames_per_10_ms;
+ } else {
+ // TODO(xians): Remove the following code after the APM in WebRTC is
+ // deprecated.
+ audio_transport_callback_->NeedMorePlayData(frames_per_10_ms,
+ bytes_per_sample,
+ audio_bus->channels(),
+ sample_rate,
+ audio_data,
+ num_audio_frames,
+ &elapsed_time_ms,
+ &ntp_time_ms);
+ accumulated_audio_frames += num_audio_frames;
+ }
+ if (elapsed_time_ms >= 0) {
+ *current_time = base::TimeDelta::FromMilliseconds(elapsed_time_ms);
+ }
+ audio_data += bytes_per_10_ms;
}
-}
-void WebRtcAudioDeviceImpl::SetRenderFormat(const AudioParameters& params) {
- DCHECK(thread_checker_.CalledOnValidThread());
- output_audio_parameters_ = params;
+ // De-interleave each channel and convert to 32-bit floating-point
+ // with nominal range -1.0 -> +1.0 to match the callback format.
+ audio_bus->FromInterleaved(&render_buffer_[0],
+ audio_bus->frames(),
+ bytes_per_sample);
+
+ // Pass the render data to the playout sinks.
+ base::AutoLock auto_lock(lock_);
+ for (PlayoutDataSinkList::const_iterator it = playout_sinks_.begin();
+ it != playout_sinks_.end(); ++it) {
+ (*it)->OnPlayoutData(audio_bus, sample_rate, audio_delay_milliseconds);
+ }
}
void WebRtcAudioDeviceImpl::RemoveAudioRenderer(WebRtcAudioRenderer* renderer) {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK_EQ(renderer, renderer_);
base::AutoLock auto_lock(lock_);
+ // Notify the playout sink of the change.
+ for (PlayoutDataSinkList::const_iterator it = playout_sinks_.begin();
+ it != playout_sinks_.end(); ++it) {
+ (*it)->OnPlayoutDataSourceChanged();
+ }
+
renderer_ = NULL;
playing_ = false;
}
@@ -199,7 +249,16 @@ int32_t WebRtcAudioDeviceImpl::Terminate() {
DCHECK(!renderer_.get() || !renderer_->IsStarted())
<< "The shared audio renderer shouldn't be running";
- capturers_.clear();
+ // Stop all the capturers to ensure no further OnData() and
+ // RemoveAudioCapturer() callback.
+ // Cache the capturers in a local list since WebRtcAudioCapturer::Stop()
+ // will trigger RemoveAudioCapturer() callback.
+ CapturerList capturers;
+ capturers.swap(capturers_);
+ for (CapturerList::const_iterator iter = capturers.begin();
+ iter != capturers.end(); ++iter) {
+ (*iter)->Stop();
+ }
initialized_ = false;
return 0;
@@ -245,7 +304,6 @@ int32_t WebRtcAudioDeviceImpl::StartPlayout() {
}
playing_ = true;
- start_render_time_ = base::Time::Now();
return 0;
}
@@ -256,13 +314,6 @@ int32_t WebRtcAudioDeviceImpl::StopPlayout() {
return 0;
}
- // Add histogram data to be uploaded as part of an UMA logging event.
- // This histogram keeps track of total playout times.
- if (!start_render_time_.is_null()) {
- base::TimeDelta render_time = base::Time::Now() - start_render_time_;
- UMA_HISTOGRAM_LONG_TIMES("WebRTC.AudioRenderTime", render_time);
- }
-
playing_ = false;
return 0;
}
@@ -287,8 +338,6 @@ int32_t WebRtcAudioDeviceImpl::StartRecording() {
recording_ = true;
}
- start_capture_time_ = base::Time::Now();
-
return 0;
}
@@ -302,13 +351,6 @@ int32_t WebRtcAudioDeviceImpl::StopRecording() {
recording_ = false;
}
- // Add histogram data to be uploaded as part of an UMA logging event.
- // This histogram keeps track of total recording times.
- if (!start_capture_time_.is_null()) {
- base::TimeDelta capture_time = base::Time::Now() - start_capture_time_;
- UMA_HISTOGRAM_LONG_TIMES("WebRTC.AudioCaptureTime", capture_time);
- }
-
return 0;
}
@@ -359,7 +401,7 @@ int32_t WebRtcAudioDeviceImpl::MinMicrophoneVolume(uint32_t* min_volume) const {
int32_t WebRtcAudioDeviceImpl::StereoPlayoutIsAvailable(bool* available) const {
DCHECK(initialized_);
- *available = (output_channels() == 2);
+ *available = renderer_ && renderer_->channels() == 2;
return 0;
}
@@ -372,7 +414,7 @@ int32_t WebRtcAudioDeviceImpl::StereoRecordingIsAvailable(
if (!capturer.get())
return -1;
- *available = (capturer->audio_parameters().channels() == 2);
+ *available = (capturer->source_audio_parameters().channels() == 2);
return 0;
}
@@ -389,20 +431,20 @@ int32_t WebRtcAudioDeviceImpl::RecordingDelay(uint16_t* delay_ms) const {
}
int32_t WebRtcAudioDeviceImpl::RecordingSampleRate(
- uint32_t* samples_per_sec) const {
+ uint32_t* sample_rate) const {
// We use the default capturer as the recording sample rate.
scoped_refptr<WebRtcAudioCapturer> capturer(GetDefaultCapturer());
if (!capturer.get())
return -1;
- *samples_per_sec = static_cast<uint32_t>(
- capturer->audio_parameters().sample_rate());
+ *sample_rate = static_cast<uint32_t>(
+ capturer->source_audio_parameters().sample_rate());
return 0;
}
int32_t WebRtcAudioDeviceImpl::PlayoutSampleRate(
- uint32_t* samples_per_sec) const {
- *samples_per_sec = static_cast<uint32_t>(output_sample_rate());
+ uint32_t* sample_rate) const {
+ *sample_rate = renderer_ ? renderer_->sample_rate() : 0;
return 0;
}
@@ -426,24 +468,62 @@ void WebRtcAudioDeviceImpl::AddAudioCapturer(
DVLOG(1) << "WebRtcAudioDeviceImpl::AddAudioCapturer()";
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(capturer.get());
+ DCHECK(!capturer->device_id().empty());
+ {
+ base::AutoLock auto_lock(lock_);
+ DCHECK(std::find(capturers_.begin(), capturers_.end(), capturer) ==
+ capturers_.end());
+ capturers_.push_back(capturer);
+ }
+}
- // We only support one microphone today, which means the list can contain
- // only one capturer with a valid device id.
- DCHECK(capturer->device_id().empty() || !GetDefaultCapturer());
+void WebRtcAudioDeviceImpl::RemoveAudioCapturer(
+ const scoped_refptr<WebRtcAudioCapturer>& capturer) {
+ DVLOG(1) << "WebRtcAudioDeviceImpl::AddAudioCapturer()";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(capturer.get());
base::AutoLock auto_lock(lock_);
- capturers_.push_back(capturer);
+ capturers_.remove(capturer);
}
scoped_refptr<WebRtcAudioCapturer>
WebRtcAudioDeviceImpl::GetDefaultCapturer() const {
base::AutoLock auto_lock(lock_);
- for (CapturerList::const_iterator iter = capturers_.begin();
- iter != capturers_.end(); ++iter) {
- if (!(*iter)->device_id().empty())
- return *iter;
- }
+ // Use the last |capturer| which is from the latest getUserMedia call as
+ // the default capture device.
+ return capturers_.empty() ? NULL : capturers_.back();
+}
+
+void WebRtcAudioDeviceImpl::AddPlayoutSink(
+ WebRtcPlayoutDataSource::Sink* sink) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(sink);
+ base::AutoLock auto_lock(lock_);
+ DCHECK(std::find(playout_sinks_.begin(), playout_sinks_.end(), sink) ==
+ playout_sinks_.end());
+ playout_sinks_.push_back(sink);
+}
+
+void WebRtcAudioDeviceImpl::RemovePlayoutSink(
+ WebRtcPlayoutDataSource::Sink* sink) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(sink);
+ base::AutoLock auto_lock(lock_);
+ playout_sinks_.remove(sink);
+}
+
+bool WebRtcAudioDeviceImpl::GetAuthorizedDeviceInfoForAudioRenderer(
+ int* session_id,
+ int* output_sample_rate,
+ int* output_frames_per_buffer) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // If there is no capturer or there are more than one open capture devices,
+ // return false.
+ if (capturers_.empty() || capturers_.size() > 1)
+ return false;
- return NULL;
+ return GetDefaultCapturer()->GetPairedOutputParameters(
+ session_id, output_sample_rate, output_frames_per_buffer);
}
} // namespace content
diff --git a/chromium/content/renderer/media/webrtc_audio_device_impl.h b/chromium/content/renderer/media/webrtc_audio_device_impl.h
index f515c6e24cb..e53125de036 100644
--- a/chromium/content/renderer/media/webrtc_audio_device_impl.h
+++ b/chromium/content/renderer/media/webrtc_audio_device_impl.h
@@ -10,6 +10,7 @@
#include "base/basictypes.h"
#include "base/compiler_specific.h"
+#include "base/files/file.h"
#include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
@@ -17,7 +18,7 @@
#include "content/common/content_export.h"
#include "content/renderer/media/webrtc_audio_capturer.h"
#include "content/renderer/media/webrtc_audio_device_not_impl.h"
-#include "content/renderer/media/webrtc_audio_renderer.h"
+#include "ipc/ipc_platform_file.h"
#include "media/base/audio_capturer_source.h"
#include "media/base/audio_renderer_sink.h"
@@ -185,15 +186,11 @@ class WebRtcAudioRenderer;
// libjingle can own references to the renderer and capturer.
class WebRtcAudioRendererSource {
public:
- // Callback to get the rendered interleaved data.
- // TODO(xians): Change uint8* to int16*.
- virtual void RenderData(uint8* audio_data,
- int number_of_channels,
- int number_of_frames,
- int audio_delay_milliseconds) = 0;
-
- // Set the format for the capture audio parameters.
- virtual void SetRenderFormat(const media::AudioParameters& params) = 0;
+ // Callback to get the rendered data.
+ virtual void RenderData(media::AudioBus* audio_bus,
+ int sample_rate,
+ int audio_delay_milliseconds,
+ base::TimeDelta* current_time) = 0;
// Callback to notify the client that the renderer is going away.
virtual void RemoveAudioRenderer(WebRtcAudioRenderer* renderer) = 0;
@@ -235,13 +232,46 @@ class PeerConnectionAudioSink {
virtual ~PeerConnectionAudioSink() {}
};
+// TODO(xians): Merge this interface with WebRtcAudioRendererSource.
+// The reason why we could not do it today is that WebRtcAudioRendererSource
+// gets the data by pulling, while the data is pushed into
+// WebRtcPlayoutDataSource::Sink.
+class WebRtcPlayoutDataSource {
+ public:
+ class Sink {
+ public:
+ // Callback to get the playout data.
+ // Called on the render audio thread.
+ virtual void OnPlayoutData(media::AudioBus* audio_bus,
+ int sample_rate,
+ int audio_delay_milliseconds) = 0;
+
+ // Callback to notify the sink that the source has changed.
+ // Called on the main render thread.
+ virtual void OnPlayoutDataSourceChanged() = 0;
+
+ protected:
+ virtual ~Sink() {}
+ };
+
+ // Adds/Removes the sink of WebRtcAudioRendererSource to the ADM.
+ // These methods are used by the MediaStreamAudioProcesssor to get the
+ // rendered data for AEC.
+ virtual void AddPlayoutSink(Sink* sink) = 0;
+ virtual void RemovePlayoutSink(Sink* sink) = 0;
+
+ protected:
+ virtual ~WebRtcPlayoutDataSource() {}
+};
+
// Note that this class inherits from webrtc::AudioDeviceModule but due to
// the high number of non-implemented methods, we move the cruft over to the
// WebRtcAudioDeviceNotImpl.
class CONTENT_EXPORT WebRtcAudioDeviceImpl
: NON_EXPORTED_BASE(public PeerConnectionAudioSink),
NON_EXPORTED_BASE(public WebRtcAudioDeviceNotImpl),
- NON_EXPORTED_BASE(public WebRtcAudioRendererSource) {
+ NON_EXPORTED_BASE(public WebRtcAudioRendererSource),
+ NON_EXPORTED_BASE(public WebRtcPlayoutDataSource) {
public:
// The maximum volume value WebRtc uses.
static const int kMaxVolumeLevel = 255;
@@ -292,37 +322,41 @@ class CONTENT_EXPORT WebRtcAudioDeviceImpl
virtual int32_t StereoRecordingIsAvailable(bool* available) const OVERRIDE;
virtual int32_t PlayoutDelay(uint16_t* delay_ms) const OVERRIDE;
virtual int32_t RecordingDelay(uint16_t* delay_ms) const OVERRIDE;
- virtual int32_t RecordingSampleRate(uint32_t* samples_per_sec) const OVERRIDE;
- virtual int32_t PlayoutSampleRate(uint32_t* samples_per_sec) const OVERRIDE;
+ virtual int32_t RecordingSampleRate(uint32_t* sample_rate) const OVERRIDE;
+ virtual int32_t PlayoutSampleRate(uint32_t* sample_rate) const OVERRIDE;
// Sets the |renderer_|, returns false if |renderer_| already exists.
// Called on the main renderer thread.
bool SetAudioRenderer(WebRtcAudioRenderer* renderer);
- // Adds the capturer to the ADM.
+ // Adds/Removes the capturer to the ADM.
+ // TODO(xians): Remove these two methods once the ADM does not need to pass
+ // hardware information up to WebRtc.
void AddAudioCapturer(const scoped_refptr<WebRtcAudioCapturer>& capturer);
+ void RemoveAudioCapturer(const scoped_refptr<WebRtcAudioCapturer>& capturer);
- // Gets the default capturer, which is the capturer in the list with
- // a valid |device_id|. Microphones are represented by capturers with a valid
- // |device_id|, since only one microphone is supported today, only one
- // capturer in the |capturers_| can have a valid |device_id|.
+ // Gets the default capturer, which is the last capturer in |capturers_|.
+ // The method can be called by both Libjingle thread and main render thread.
scoped_refptr<WebRtcAudioCapturer> GetDefaultCapturer() const;
+ // Gets paired device information of the capture device for the audio
+ // renderer. This is used to pass on a session id, sample rate and buffer
+ // size to a webrtc audio renderer (either local or remote), so that audio
+ // will be rendered to a matching output device.
+ // Returns true if the capture device has a paired output device, otherwise
+ // false. Note that if there are more than one open capture device the
+ // function will not be able to pick an appropriate device and return false.
+ bool GetAuthorizedDeviceInfoForAudioRenderer(
+ int* session_id, int* output_sample_rate, int* output_buffer_size);
+
const scoped_refptr<WebRtcAudioRenderer>& renderer() const {
return renderer_;
}
- int output_buffer_size() const {
- return output_audio_parameters_.frames_per_buffer();
- }
- int output_channels() const {
- return output_audio_parameters_.channels();
- }
- int output_sample_rate() const {
- return output_audio_parameters_.sample_rate();
- }
private:
typedef std::list<scoped_refptr<WebRtcAudioCapturer> > CapturerList;
+ typedef std::list<WebRtcPlayoutDataSource::Sink*> PlayoutDataSinkList;
+ class RenderBuffer;
// Make destructor private to ensure that we can only be deleted by Release().
virtual ~WebRtcAudioDeviceImpl();
@@ -345,16 +379,19 @@ class CONTENT_EXPORT WebRtcAudioDeviceImpl
// WebRtcAudioRendererSource implementation.
- // Called on the AudioInputDevice worker thread.
- virtual void RenderData(uint8* audio_data,
- int number_of_channels,
- int number_of_frames,
- int audio_delay_milliseconds) OVERRIDE;
+ // Called on the AudioOutputDevice worker thread.
+ virtual void RenderData(media::AudioBus* audio_bus,
+ int sample_rate,
+ int audio_delay_milliseconds,
+ base::TimeDelta* current_time) OVERRIDE;
// Called on the main render thread.
- virtual void SetRenderFormat(const media::AudioParameters& params) OVERRIDE;
virtual void RemoveAudioRenderer(WebRtcAudioRenderer* renderer) OVERRIDE;
+ // WebRtcPlayoutDataSource implementation.
+ virtual void AddPlayoutSink(WebRtcPlayoutDataSource::Sink* sink) OVERRIDE;
+ virtual void RemovePlayoutSink(WebRtcPlayoutDataSource::Sink* sink) OVERRIDE;
+
// Used to DCHECK that we are called on the correct thread.
base::ThreadChecker thread_checker_;
@@ -367,14 +404,16 @@ class CONTENT_EXPORT WebRtcAudioDeviceImpl
// Provides access to the audio renderer in the browser process.
scoped_refptr<WebRtcAudioRenderer> renderer_;
+ // A list of raw pointer of WebRtcPlayoutDataSource::Sink objects which want
+ // to get the playout data, the sink need to call RemovePlayoutSink()
+ // before it goes away.
+ PlayoutDataSinkList playout_sinks_;
+
// Weak reference to the audio callback.
// The webrtc client defines |audio_transport_callback_| by calling
// RegisterAudioCallback().
webrtc::AudioTransport* audio_transport_callback_;
- // Cached values of used output audio parameters. Platform dependent.
- media::AudioParameters output_audio_parameters_;
-
// Cached value of the current audio delay on the input/capture side.
int input_delay_ms_;
@@ -385,18 +424,25 @@ class CONTENT_EXPORT WebRtcAudioDeviceImpl
// |recording_| and |microphone_volume_|.
mutable base::Lock lock_;
+ // Used to protect the racing of calling OnData() since there can be more
+ // than one input stream calling OnData().
+ mutable base::Lock capture_callback_lock_;
+
bool initialized_;
bool playing_;
bool recording_;
- // Used for histograms of total recording and playout times.
- base::Time start_capture_time_;
- base::Time start_render_time_;
-
// Stores latest microphone volume received in a CaptureData() callback.
// Range is [0, 255].
uint32_t microphone_volume_;
+ // Buffer used for temporary storage during render callback.
+ // It is only accessed by the audio render thread.
+ std::vector<int16> render_buffer_;
+
+ // Flag to tell if audio processing is enabled in MediaStreamAudioProcessor.
+ const bool is_audio_track_processing_enabled_;
+
DISALLOW_COPY_AND_ASSIGN(WebRtcAudioDeviceImpl);
};
diff --git a/chromium/content/renderer/media/webrtc_audio_device_not_impl.cc b/chromium/content/renderer/media/webrtc_audio_device_not_impl.cc
index a126acdaaf7..b838c21ebc3 100644
--- a/chromium/content/renderer/media/webrtc_audio_device_not_impl.cc
+++ b/chromium/content/renderer/media/webrtc_audio_device_not_impl.cc
@@ -101,10 +101,6 @@ int32_t WebRtcAudioDeviceNotImpl::WaveOutVolume(
return 0;
}
-int32_t WebRtcAudioDeviceNotImpl::SpeakerIsAvailable(bool* available) {
- return 0;
-}
-
int32_t WebRtcAudioDeviceNotImpl::InitSpeaker() {
return 0;
}
@@ -113,10 +109,6 @@ bool WebRtcAudioDeviceNotImpl::SpeakerIsInitialized() const {
return 0;
}
-int32_t WebRtcAudioDeviceNotImpl::MicrophoneIsAvailable(bool* available) {
- return 0;
-}
-
int32_t WebRtcAudioDeviceNotImpl::InitMicrophone() {
return 0;
}
diff --git a/chromium/content/renderer/media/webrtc_audio_device_not_impl.h b/chromium/content/renderer/media/webrtc_audio_device_not_impl.h
index 041c7d43a8c..fad4f3f6a1f 100644
--- a/chromium/content/renderer/media/webrtc_audio_device_not_impl.h
+++ b/chromium/content/renderer/media/webrtc_audio_device_not_impl.h
@@ -58,10 +58,8 @@ class CONTENT_EXPORT WebRtcAudioDeviceNotImpl
uint16_t volume_right) OVERRIDE;
virtual int32_t WaveOutVolume(uint16_t* volume_left,
uint16_t* volume_right) const OVERRIDE;
- virtual int32_t SpeakerIsAvailable(bool* available) OVERRIDE;
virtual int32_t InitSpeaker() OVERRIDE;
virtual bool SpeakerIsInitialized() const OVERRIDE;
- virtual int32_t MicrophoneIsAvailable(bool* available) OVERRIDE;
virtual int32_t InitMicrophone() OVERRIDE;
virtual bool MicrophoneIsInitialized() const OVERRIDE;
virtual int32_t SpeakerVolumeIsAvailable(bool* available) OVERRIDE;
diff --git a/chromium/content/renderer/media/webrtc_audio_device_unittest.cc b/chromium/content/renderer/media/webrtc_audio_device_unittest.cc
deleted file mode 100644
index d6821f6fba0..00000000000
--- a/chromium/content/renderer/media/webrtc_audio_device_unittest.cc
+++ /dev/null
@@ -1,979 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <vector>
-
-#include "base/environment.h"
-#include "base/file_util.h"
-#include "base/files/file_path.h"
-#include "base/path_service.h"
-#include "base/strings/stringprintf.h"
-#include "base/test/test_timeouts.h"
-#include "content/renderer/media/webrtc_audio_capturer.h"
-#include "content/renderer/media/webrtc_audio_device_impl.h"
-#include "content/renderer/media/webrtc_audio_renderer.h"
-#include "content/renderer/media/webrtc_local_audio_track.h"
-#include "content/renderer/render_thread_impl.h"
-#include "content/test/webrtc_audio_device_test.h"
-#include "media/audio/audio_manager_base.h"
-#include "media/base/audio_hardware_config.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "third_party/webrtc/voice_engine/include/voe_audio_processing.h"
-#include "third_party/webrtc/voice_engine/include/voe_base.h"
-#include "third_party/webrtc/voice_engine/include/voe_codec.h"
-#include "third_party/webrtc/voice_engine/include/voe_external_media.h"
-#include "third_party/webrtc/voice_engine/include/voe_file.h"
-#include "third_party/webrtc/voice_engine/include/voe_network.h"
-
-#if defined(OS_WIN)
-#include "base/win/windows_version.h"
-#endif
-
-using media::AudioParameters;
-using media::CHANNEL_LAYOUT_STEREO;
-using testing::_;
-using testing::AnyNumber;
-using testing::InvokeWithoutArgs;
-using testing::Return;
-using testing::StrEq;
-
-namespace content {
-
-namespace {
-
-const int kRenderViewId = 1;
-
-// The number of packers that RunWebRtcLoopbackTimeTest() uses for measurement.
-const int kNumberOfPacketsForLoopbackTest = 100;
-
-// The hardware latency we feed to WebRtc.
-const int kHardwareLatencyInMs = 50;
-
-scoped_ptr<media::AudioHardwareConfig> CreateRealHardwareConfig(
- media::AudioManager* manager) {
- const AudioParameters output_parameters =
- manager->GetDefaultOutputStreamParameters();
- const AudioParameters input_parameters =
- manager->GetInputStreamParameters(
- media::AudioManagerBase::kDefaultDeviceId);
-
- return make_scoped_ptr(new media::AudioHardwareConfig(
- input_parameters, output_parameters));
-}
-
-// Return true if at least one element in the array matches |value|.
-bool FindElementInArray(const int* array, int size, int value) {
- return (std::find(&array[0], &array[0] + size, value) != &array[size]);
-}
-
-// This method returns false if a non-supported rate is detected on the
-// input or output side.
-// TODO(henrika): add support for automatic fallback to Windows Wave audio
-// if a non-supported rate is detected. It is probably better to detect
-// invalid audio settings by actually trying to open the audio streams instead
-// of relying on hard coded conditions.
-bool HardwareSampleRatesAreValid() {
- // These are the currently supported hardware sample rates in both directions.
- // The actual WebRTC client can limit these ranges further depending on
- // platform but this is the maximum range we support today.
- int valid_input_rates[] = {16000, 32000, 44100, 48000, 96000};
- int valid_output_rates[] = {16000, 32000, 44100, 48000, 96000};
-
- media::AudioHardwareConfig* hardware_config =
- RenderThreadImpl::current()->GetAudioHardwareConfig();
-
- // Verify the input sample rate.
- int input_sample_rate = hardware_config->GetInputSampleRate();
-
- if (!FindElementInArray(valid_input_rates, arraysize(valid_input_rates),
- input_sample_rate)) {
- LOG(WARNING) << "Non-supported input sample rate detected.";
- return false;
- }
-
- // Given that the input rate was OK, verify the output rate as well.
- int output_sample_rate = hardware_config->GetOutputSampleRate();
- if (!FindElementInArray(valid_output_rates, arraysize(valid_output_rates),
- output_sample_rate)) {
- LOG(WARNING) << "Non-supported output sample rate detected.";
- return false;
- }
-
- return true;
-}
-
-// Utility method which creates and initializes the audio capturer and adds it
-// to WebRTC audio device. This method should be used in tests where
-// HardwareSampleRatesAreValid() has been called and returned true.
-bool CreateAndInitializeCapturer(WebRtcAudioDeviceImpl* webrtc_audio_device) {
- DCHECK(webrtc_audio_device);
- scoped_refptr<WebRtcAudioCapturer> capturer(
- WebRtcAudioCapturer::CreateCapturer());
-
- media::AudioHardwareConfig* hardware_config =
- RenderThreadImpl::current()->GetAudioHardwareConfig();
-
- // Use native capture sample rate and channel configuration to get some
- // action in this test.
- int sample_rate = hardware_config->GetInputSampleRate();
- media::ChannelLayout channel_layout =
- hardware_config->GetInputChannelLayout();
- if (!capturer->Initialize(kRenderViewId, channel_layout, sample_rate, 0, 1,
- media::AudioManagerBase::kDefaultDeviceId, 0, 0,
- media::AudioParameters::NO_EFFECTS)) {
- return false;
- }
-
- // Add the capturer to the WebRtcAudioDeviceImpl.
- webrtc_audio_device->AddAudioCapturer(capturer);
-
- return true;
-}
-
-// Create and start a local audio track. Starting the audio track will connect
-// the audio track to the capturer and also start the source of the capturer.
-// Also, connect the sink to the audio track.
-scoped_refptr<WebRtcLocalAudioTrack>
-CreateAndStartLocalAudioTrack(WebRtcAudioCapturer* capturer,
- PeerConnectionAudioSink* sink) {
- scoped_refptr<WebRtcLocalAudioTrack> local_audio_track(
- WebRtcLocalAudioTrack::Create(std::string(), capturer, NULL, NULL, NULL));
- local_audio_track->AddSink(sink);
- local_audio_track->Start();
- return local_audio_track;
-}
-
-class WebRTCMediaProcessImpl : public webrtc::VoEMediaProcess {
- public:
- explicit WebRTCMediaProcessImpl(base::WaitableEvent* event)
- : event_(event),
- channel_id_(-1),
- type_(webrtc::kPlaybackPerChannel),
- packet_size_(0),
- sample_rate_(0),
- channels_(0) {
- }
- virtual ~WebRTCMediaProcessImpl() {}
-
- // TODO(henrika): Refactor in WebRTC and convert to Chrome coding style.
- virtual void Process(int channel,
- webrtc::ProcessingTypes type,
- int16_t audio_10ms[],
- int length,
- int sampling_freq,
- bool is_stereo) OVERRIDE {
- base::AutoLock auto_lock(lock_);
- channel_id_ = channel;
- type_ = type;
- packet_size_ = length;
- sample_rate_ = sampling_freq;
- channels_ = (is_stereo ? 2 : 1);
- if (event_) {
- // Signal that a new callback has been received.
- event_->Signal();
- }
- }
-
- int channel_id() const {
- base::AutoLock auto_lock(lock_);
- return channel_id_;
- }
-
- int type() const {
- base::AutoLock auto_lock(lock_);
- return type_;
- }
-
- int packet_size() const {
- base::AutoLock auto_lock(lock_);
- return packet_size_;
- }
-
- int sample_rate() const {
- base::AutoLock auto_lock(lock_);
- return sample_rate_;
- }
-
- private:
- base::WaitableEvent* event_;
- int channel_id_;
- webrtc::ProcessingTypes type_;
- int packet_size_;
- int sample_rate_;
- int channels_;
- mutable base::Lock lock_;
- DISALLOW_COPY_AND_ASSIGN(WebRTCMediaProcessImpl);
-};
-
-// TODO(xians): Use MediaStreamAudioSink.
-class MockMediaStreamAudioSink : public PeerConnectionAudioSink {
- public:
- explicit MockMediaStreamAudioSink(base::WaitableEvent* event)
- : event_(event) {
- DCHECK(event_);
- }
- virtual ~MockMediaStreamAudioSink() {}
-
- // PeerConnectionAudioSink implementation.
- virtual int OnData(const int16* audio_data,
- int sample_rate,
- int number_of_channels,
- int number_of_frames,
- const std::vector<int>& channels,
- int audio_delay_milliseconds,
- int current_volume,
- bool need_audio_processing,
- bool key_pressed) OVERRIDE {
- // Signal that a callback has been received.
- event_->Signal();
- return 0;
- }
-
- // Set the format for the capture audio parameters.
- virtual void OnSetFormat(
- const media::AudioParameters& params) OVERRIDE {}
-
- private:
- base::WaitableEvent* event_;
-
- DISALLOW_COPY_AND_ASSIGN(MockMediaStreamAudioSink);
-};
-
-class MockWebRtcAudioRendererSource : public WebRtcAudioRendererSource {
- public:
- explicit MockWebRtcAudioRendererSource(base::WaitableEvent* event)
- : event_(event) {
- DCHECK(event_);
- }
- virtual ~MockWebRtcAudioRendererSource() {}
-
- // WebRtcAudioRendererSource implementation.
- virtual void RenderData(uint8* audio_data,
- int number_of_channels,
- int number_of_frames,
- int audio_delay_milliseconds) OVERRIDE {
- // Signal that a callback has been received.
- // Initialize the memory to zero to avoid uninitialized warning from
- // Valgrind.
- memset(audio_data, 0,
- sizeof(int16) * number_of_channels * number_of_frames);
- event_->Signal();
- }
-
- virtual void SetRenderFormat(const media::AudioParameters& params) OVERRIDE {
- }
-
- virtual void RemoveAudioRenderer(WebRtcAudioRenderer* renderer) OVERRIDE {};
-
- private:
- base::WaitableEvent* event_;
-
- DISALLOW_COPY_AND_ASSIGN(MockWebRtcAudioRendererSource);
-};
-
-// Prints numerical information to stdout in a controlled format so we can plot
-// the result.
-void PrintPerfResultMs(const char* graph, const char* trace, float time_ms) {
- std::string times;
- base::StringAppendF(&times, "%.2f,", time_ms);
- std::string result = base::StringPrintf(
- "%sRESULT %s%s: %s= %s%s%s %s\n", "*", graph, "",
- trace, "[", times.c_str(), "]", "ms");
-
- fflush(stdout);
- printf("%s", result.c_str());
- fflush(stdout);
-}
-
-void ReadDataFromSpeechFile(char* data, int length) {
- base::FilePath data_file;
- CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &data_file));
- data_file =
- data_file.Append(FILE_PATH_LITERAL("media"))
- .Append(FILE_PATH_LITERAL("test"))
- .Append(FILE_PATH_LITERAL("data"))
- .Append(FILE_PATH_LITERAL("speech_16b_stereo_48kHz.raw"));
- DCHECK(base::PathExists(data_file));
- int64 data_file_size64 = 0;
- DCHECK(base::GetFileSize(data_file, &data_file_size64));
- EXPECT_EQ(length, base::ReadFile(data_file, data, length));
- DCHECK(data_file_size64 > length);
-}
-
-void SetChannelCodec(webrtc::VoiceEngine* engine, int channel) {
- // TODO(xians): move the codec as an input param to this function, and add
- // tests for different codecs, also add support to Android and IOS.
-#if !defined(OS_ANDROID) && !defined(OS_IOS)
- webrtc::CodecInst isac;
- strcpy(isac.plname, "ISAC");
- isac.pltype = 104;
- isac.pacsize = 960;
- isac.plfreq = 32000;
- isac.channels = 1;
- isac.rate = -1;
- ScopedWebRTCPtr<webrtc::VoECodec> codec(engine);
- EXPECT_EQ(0, codec->SetRecPayloadType(channel, isac));
- EXPECT_EQ(0, codec->SetSendCodec(channel, isac));
-#endif
-}
-
-// Returns the time in millisecond for sending packets to WebRtc for encoding,
-// signal processing, decoding and receiving them back.
-int RunWebRtcLoopbackTimeTest(media::AudioManager* manager,
- bool enable_apm) {
- scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
- new WebRtcAudioDeviceImpl());
- WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
- EXPECT_TRUE(engine.valid());
- ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
- EXPECT_TRUE(base.valid());
- int err = base->Init(webrtc_audio_device.get());
- EXPECT_EQ(0, err);
-
- // We use OnSetFormat() and SetRenderFormat() to configure the audio
- // parameters so that this test can run on machine without hardware device.
- const media::AudioParameters params = media::AudioParameters(
- media::AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
- 48000, 2, 480);
- PeerConnectionAudioSink* capturer_sink =
- static_cast<PeerConnectionAudioSink*>(webrtc_audio_device.get());
- WebRtcAudioRendererSource* renderer_source =
- static_cast<WebRtcAudioRendererSource*>(webrtc_audio_device.get());
- renderer_source->SetRenderFormat(params);
-
- // Turn on/off all the signal processing components like AGC, AEC and NS.
- ScopedWebRTCPtr<webrtc::VoEAudioProcessing> audio_processing(engine.get());
- EXPECT_TRUE(audio_processing.valid());
- audio_processing->SetAgcStatus(enable_apm);
- audio_processing->SetNsStatus(enable_apm);
- audio_processing->SetEcStatus(enable_apm);
-
- // Create a voice channel for the WebRtc.
- int channel = base->CreateChannel();
- EXPECT_NE(-1, channel);
- SetChannelCodec(engine.get(), channel);
-
- // Use our fake network transmission and start playout and recording.
- ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get());
- EXPECT_TRUE(network.valid());
- scoped_ptr<WebRTCTransportImpl> transport(
- new WebRTCTransportImpl(network.get()));
- EXPECT_EQ(0, network->RegisterExternalTransport(channel, *transport.get()));
- EXPECT_EQ(0, base->StartPlayout(channel));
- EXPECT_EQ(0, base->StartSend(channel));
-
- // Read speech data from a speech test file.
- const int input_packet_size =
- params.frames_per_buffer() * 2 * params.channels();
- const int num_output_channels = webrtc_audio_device->output_channels();
- const int output_packet_size = webrtc_audio_device->output_buffer_size() * 2 *
- num_output_channels;
- const size_t length = input_packet_size * kNumberOfPacketsForLoopbackTest;
- scoped_ptr<char[]> capture_data(new char[length]);
- ReadDataFromSpeechFile(capture_data.get(), length);
-
- // Start the timer.
- scoped_ptr<uint8[]> buffer(new uint8[output_packet_size]);
- base::Time start_time = base::Time::Now();
- int delay = 0;
- std::vector<int> voe_channels;
- voe_channels.push_back(channel);
- for (int j = 0; j < kNumberOfPacketsForLoopbackTest; ++j) {
- // Sending fake capture data to WebRtc.
- capturer_sink->OnData(
- reinterpret_cast<int16*>(capture_data.get() + input_packet_size * j),
- params.sample_rate(),
- params.channels(),
- params.frames_per_buffer(),
- voe_channels,
- kHardwareLatencyInMs,
- 1.0,
- enable_apm,
- false);
-
- // Receiving data from WebRtc.
- renderer_source->RenderData(
- reinterpret_cast<uint8*>(buffer.get()),
- num_output_channels, webrtc_audio_device->output_buffer_size(),
- kHardwareLatencyInMs + delay);
- delay = (base::Time::Now() - start_time).InMilliseconds();
- }
-
- int latency = (base::Time::Now() - start_time).InMilliseconds();
-
- EXPECT_EQ(0, base->StopSend(channel));
- EXPECT_EQ(0, base->StopPlayout(channel));
- EXPECT_EQ(0, base->DeleteChannel(channel));
- EXPECT_EQ(0, base->Terminate());
-
- return latency;
-}
-
-} // namespace
-
-// Trivial test which verifies that one part of the test harness
-// (HardwareSampleRatesAreValid()) works as intended for all supported
-// hardware input sample rates.
-TEST_F(MAYBE_WebRTCAudioDeviceTest, TestValidInputRates) {
- int valid_rates[] = {16000, 32000, 44100, 48000, 96000};
-
- // Verify that we will approve all rates listed in |valid_rates|.
- for (size_t i = 0; i < arraysize(valid_rates); ++i) {
- EXPECT_TRUE(FindElementInArray(valid_rates, arraysize(valid_rates),
- valid_rates[i]));
- }
-
- // Verify that any value outside the valid range results in negative
- // find results.
- int invalid_rates[] = {-1, 0, 8000, 11025, 22050, 192000};
- for (size_t i = 0; i < arraysize(invalid_rates); ++i) {
- EXPECT_FALSE(FindElementInArray(valid_rates, arraysize(valid_rates),
- invalid_rates[i]));
- }
-}
-
-// Trivial test which verifies that one part of the test harness
-// (HardwareSampleRatesAreValid()) works as intended for all supported
-// hardware output sample rates.
-TEST_F(MAYBE_WebRTCAudioDeviceTest, TestValidOutputRates) {
- int valid_rates[] = {44100, 48000, 96000};
-
- // Verify that we will approve all rates listed in |valid_rates|.
- for (size_t i = 0; i < arraysize(valid_rates); ++i) {
- EXPECT_TRUE(FindElementInArray(valid_rates, arraysize(valid_rates),
- valid_rates[i]));
- }
-
- // Verify that any value outside the valid range results in negative
- // find results.
- int invalid_rates[] = {-1, 0, 8000, 11025, 22050, 32000, 192000};
- for (size_t i = 0; i < arraysize(invalid_rates); ++i) {
- EXPECT_FALSE(FindElementInArray(valid_rates, arraysize(valid_rates),
- invalid_rates[i]));
- }
-}
-
-// Basic test that instantiates and initializes an instance of
-// WebRtcAudioDeviceImpl.
-TEST_F(MAYBE_WebRTCAudioDeviceTest, Construct) {
-#if defined(OS_WIN)
- // This test crashes on Win XP bots.
- if (base::win::GetVersion() <= base::win::VERSION_XP)
- return;
-#endif
-
- AudioParameters input_params(
- AudioParameters::AUDIO_PCM_LOW_LATENCY,
- media::CHANNEL_LAYOUT_MONO,
- 48000,
- 16,
- 480);
-
- AudioParameters output_params(
- AudioParameters::AUDIO_PCM_LOW_LATENCY,
- media::CHANNEL_LAYOUT_STEREO,
- 48000,
- 16,
- 480);
-
- media::AudioHardwareConfig audio_config(input_params, output_params);
- SetAudioHardwareConfig(&audio_config);
-
- scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
- new WebRtcAudioDeviceImpl());
-
- WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
- ASSERT_TRUE(engine.valid());
-
- ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
- int err = base->Init(webrtc_audio_device.get());
- EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get()));
- EXPECT_EQ(0, err);
- EXPECT_EQ(0, base->Terminate());
-}
-
-// Verify that a call to webrtc::VoEBase::StartPlayout() starts audio output
-// with the correct set of parameters. A WebRtcAudioDeviceImpl instance will
-// be utilized to implement the actual audio path. The test registers a
-// webrtc::VoEExternalMedia implementation to hijack the output audio and
-// verify that streaming starts correctly.
-// TODO(henrika): include on Android as well as soon as alla race conditions
-// in OpenSLES are resolved.
-#if defined(OS_ANDROID)
-#define MAYBE_StartPlayout DISABLED_StartPlayout
-#else
-#define MAYBE_StartPlayout StartPlayout
-#endif
-TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_StartPlayout) {
- if (!has_output_devices_) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
-
- scoped_ptr<media::AudioHardwareConfig> config =
- CreateRealHardwareConfig(audio_manager_.get());
- SetAudioHardwareConfig(config.get());
-
- if (!HardwareSampleRatesAreValid())
- return;
-
- WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
- ASSERT_TRUE(engine.valid());
- ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
- ASSERT_TRUE(base.valid());
-
- scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
- new WebRtcAudioDeviceImpl());
- int err = base->Init(webrtc_audio_device.get());
- ASSERT_EQ(0, err);
-
- ScopedWebRTCPtr<webrtc::VoEExternalMedia> external_media(engine.get());
- ASSERT_TRUE(external_media.valid());
- base::WaitableEvent event(false, false);
- scoped_ptr<WebRTCMediaProcessImpl> media_process(
- new WebRTCMediaProcessImpl(&event));
- int ch = base->CreateChannel();
- EXPECT_NE(-1, ch);
- EXPECT_EQ(0, external_media->RegisterExternalMediaProcessing(
- ch, webrtc::kPlaybackPerChannel, *media_process.get()));
-
- EXPECT_EQ(0, base->StartPlayout(ch));
- scoped_refptr<WebRtcAudioRenderer> renderer(
- CreateDefaultWebRtcAudioRenderer(kRenderViewId));
- scoped_refptr<MediaStreamAudioRenderer> proxy(
- renderer->CreateSharedAudioRendererProxy());
- EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
- proxy->Start();
- proxy->Play();
-
- EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
- WaitForIOThreadCompletion();
-
- EXPECT_TRUE(webrtc_audio_device->Playing());
- EXPECT_FALSE(webrtc_audio_device->Recording());
- EXPECT_EQ(ch, media_process->channel_id());
- EXPECT_EQ(webrtc::kPlaybackPerChannel, media_process->type());
- EXPECT_EQ(80, media_process->packet_size());
- EXPECT_EQ(8000, media_process->sample_rate());
-
- EXPECT_EQ(0, external_media->DeRegisterExternalMediaProcessing(
- ch, webrtc::kPlaybackPerChannel));
- EXPECT_EQ(0, base->StopPlayout(ch));
- proxy->Stop();
- EXPECT_EQ(0, base->DeleteChannel(ch));
- EXPECT_EQ(0, base->Terminate());
-}
-
-// Verify that a call to webrtc::VoEBase::StartRecording() starts audio input
-// with the correct set of parameters. A WebRtcAudioDeviceImpl instance will
-// be utilized to implement the actual audio path. The test registers a
-// webrtc::VoEExternalMedia implementation to hijack the input audio and
-// verify that streaming starts correctly. An external transport implementation
-// is also required to ensure that "sending" can start without actually trying
-// to send encoded packets to the network. Our main interest here is to ensure
-// that the audio capturing starts as it should.
-// Disabled when running headless since the bots don't have the required config.
-
-// TODO(leozwang): Because ExternalMediaProcessing is disabled in webrtc,
-// disable this unit test on Android for now.
-#if defined(OS_ANDROID)
-#define MAYBE_StartRecording DISABLED_StartRecording
-#elif defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
-// This test is failing on ARM linux: http://crbug.com/238490
-#define MAYBE_StartRecording DISABLED_StartRecording
-#else
-// Flakily hangs on all other platforms as well: crbug.com/268376.
-// When the flakiness has been fixed, you probably want to leave it disabled
-// on the above platforms.
-#define MAYBE_StartRecording DISABLED_StartRecording
-#endif
-
-TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_StartRecording) {
- if (!has_input_devices_ || !has_output_devices_) {
- LOG(WARNING) << "Missing audio devices.";
- return;
- }
-
- scoped_ptr<media::AudioHardwareConfig> config =
- CreateRealHardwareConfig(audio_manager_.get());
- SetAudioHardwareConfig(config.get());
-
- if (!HardwareSampleRatesAreValid())
- return;
-
- // TODO(tommi): extend MediaObserver and MockMediaObserver with support
- // for new interfaces, like OnSetAudioStreamRecording(). When done, add
- // EXPECT_CALL() macros here.
- scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
- new WebRtcAudioDeviceImpl());
-
- WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
- ASSERT_TRUE(engine.valid());
-
- ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
- ASSERT_TRUE(base.valid());
- int err = base->Init(webrtc_audio_device.get());
- ASSERT_EQ(0, err);
-
- int ch = base->CreateChannel();
- EXPECT_NE(-1, ch);
-
- ScopedWebRTCPtr<webrtc::VoEExternalMedia> external_media(engine.get());
- ASSERT_TRUE(external_media.valid());
-
- base::WaitableEvent event(false, false);
- scoped_ptr<WebRTCMediaProcessImpl> media_process(
- new WebRTCMediaProcessImpl(&event));
- EXPECT_EQ(0, external_media->RegisterExternalMediaProcessing(
- ch, webrtc::kRecordingPerChannel, *media_process.get()));
-
- // We must add an external transport implementation to be able to start
- // recording without actually sending encoded packets to the network. All
- // we want to do here is to verify that audio capturing starts as it should.
- ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get());
- scoped_ptr<WebRTCTransportImpl> transport(
- new WebRTCTransportImpl(network.get()));
- EXPECT_EQ(0, network->RegisterExternalTransport(ch, *transport.get()));
- EXPECT_EQ(0, base->StartSend(ch));
-
- // Create and initialize the capturer which starts the source of the data
- // flow.
- EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get()));
-
- // Create and start a local audio track which is bridging the data flow
- // between the capturer and WebRtcAudioDeviceImpl.
- scoped_refptr<WebRtcLocalAudioTrack> local_audio_track(
- CreateAndStartLocalAudioTrack(webrtc_audio_device->GetDefaultCapturer(),
- webrtc_audio_device));
- // connect the VoE voice channel to the audio track
- static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())->
- GetRenderer()->AddChannel(ch);
-
- // Verify we get the data flow.
- EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
- WaitForIOThreadCompletion();
-
- EXPECT_FALSE(webrtc_audio_device->Playing());
- EXPECT_TRUE(webrtc_audio_device->Recording());
- EXPECT_EQ(ch, media_process->channel_id());
- EXPECT_EQ(webrtc::kRecordingPerChannel, media_process->type());
- EXPECT_EQ(80, media_process->packet_size());
- EXPECT_EQ(8000, media_process->sample_rate());
-
- EXPECT_EQ(0, external_media->DeRegisterExternalMediaProcessing(
- ch, webrtc::kRecordingPerChannel));
- EXPECT_EQ(0, base->StopSend(ch));
-
- webrtc_audio_device->GetDefaultCapturer()->Stop();
- EXPECT_EQ(0, base->DeleteChannel(ch));
- EXPECT_EQ(0, base->Terminate());
-}
-
-// Uses WebRtcAudioDeviceImpl to play a local wave file.
-// TODO(henrika): include on Android as well as soon as alla race conditions
-// in OpenSLES are resolved.
-#if defined(OS_ANDROID)
-#define MAYBE_PlayLocalFile DISABLED_PlayLocalFile
-#else
-#define MAYBE_PlayLocalFile PlayLocalFile
-#endif
-TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_PlayLocalFile) {
- if (!has_output_devices_) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
-
- std::string file_path(
- GetTestDataPath(FILE_PATH_LITERAL("speechmusic_mono_16kHz.pcm")));
-
- scoped_ptr<media::AudioHardwareConfig> config =
- CreateRealHardwareConfig(audio_manager_.get());
- SetAudioHardwareConfig(config.get());
-
- if (!HardwareSampleRatesAreValid())
- return;
-
- WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
- ASSERT_TRUE(engine.valid());
- ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
- ASSERT_TRUE(base.valid());
-
- scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
- new WebRtcAudioDeviceImpl());
- int err = base->Init(webrtc_audio_device.get());
- ASSERT_EQ(0, err);
- int ch = base->CreateChannel();
- EXPECT_NE(-1, ch);
- EXPECT_EQ(0, base->StartPlayout(ch));
- scoped_refptr<WebRtcAudioRenderer> renderer(
- CreateDefaultWebRtcAudioRenderer(kRenderViewId));
- scoped_refptr<MediaStreamAudioRenderer> proxy(
- renderer->CreateSharedAudioRendererProxy());
- EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
- proxy->Start();
- proxy->Play();
-
- ScopedWebRTCPtr<webrtc::VoEFile> file(engine.get());
- ASSERT_TRUE(file.valid());
- int duration = 0;
- EXPECT_EQ(0, file->GetFileDuration(file_path.c_str(), duration,
- webrtc::kFileFormatPcm16kHzFile));
- EXPECT_NE(0, duration);
-
- EXPECT_EQ(0, file->StartPlayingFileLocally(ch, file_path.c_str(), false,
- webrtc::kFileFormatPcm16kHzFile));
-
- // Play 2 seconds worth of audio and then quit.
- message_loop_.PostDelayedTask(FROM_HERE,
- base::MessageLoop::QuitClosure(),
- base::TimeDelta::FromSeconds(2));
- message_loop_.Run();
-
- proxy->Stop();
- EXPECT_EQ(0, base->StopSend(ch));
- EXPECT_EQ(0, base->StopPlayout(ch));
- EXPECT_EQ(0, base->DeleteChannel(ch));
- EXPECT_EQ(0, base->Terminate());
-}
-
-// Uses WebRtcAudioDeviceImpl to play out recorded audio in loopback.
-// An external transport implementation is utilized to feed back RTP packets
-// which are recorded, encoded, packetized into RTP packets and finally
-// "transmitted". The RTP packets are then fed back into the VoiceEngine
-// where they are decoded and played out on the default audio output device.
-// Disabled when running headless since the bots don't have the required config.
-// TODO(henrika): improve quality by using a wideband codec, enabling noise-
-// suppressions etc.
-// FullDuplexAudioWithAGC is flaky on Android, disable it for now.
-// Also flakily hangs on Windows: crbug.com/269348.
-#if defined(OS_ANDROID) || defined(OS_WIN)
-#define MAYBE_FullDuplexAudioWithAGC DISABLED_FullDuplexAudioWithAGC
-#else
-#define MAYBE_FullDuplexAudioWithAGC FullDuplexAudioWithAGC
-#endif
-TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_FullDuplexAudioWithAGC) {
- if (!has_output_devices_ || !has_input_devices_) {
- LOG(WARNING) << "Missing audio devices.";
- return;
- }
-
- scoped_ptr<media::AudioHardwareConfig> config =
- CreateRealHardwareConfig(audio_manager_.get());
- SetAudioHardwareConfig(config.get());
-
- if (!HardwareSampleRatesAreValid())
- return;
-
- WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
- ASSERT_TRUE(engine.valid());
- ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
- ASSERT_TRUE(base.valid());
-
- scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
- new WebRtcAudioDeviceImpl());
- int err = base->Init(webrtc_audio_device.get());
- ASSERT_EQ(0, err);
-
- ScopedWebRTCPtr<webrtc::VoEAudioProcessing> audio_processing(engine.get());
- ASSERT_TRUE(audio_processing.valid());
-#if defined(OS_ANDROID)
- // On Android, by default AGC is off.
- bool enabled = true;
- webrtc::AgcModes agc_mode = webrtc::kAgcDefault;
- EXPECT_EQ(0, audio_processing->GetAgcStatus(enabled, agc_mode));
- EXPECT_FALSE(enabled);
-#else
- bool enabled = false;
- webrtc::AgcModes agc_mode = webrtc::kAgcDefault;
- EXPECT_EQ(0, audio_processing->GetAgcStatus(enabled, agc_mode));
- EXPECT_TRUE(enabled);
- EXPECT_EQ(agc_mode, webrtc::kAgcAdaptiveAnalog);
-#endif
-
- int ch = base->CreateChannel();
- EXPECT_NE(-1, ch);
-
- EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get()));
- scoped_refptr<WebRtcLocalAudioTrack> local_audio_track(
- CreateAndStartLocalAudioTrack(webrtc_audio_device->GetDefaultCapturer(),
- webrtc_audio_device));
- // connect the VoE voice channel to the audio track
- static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())->
- GetRenderer()->AddChannel(ch);
-
- ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get());
- ASSERT_TRUE(network.valid());
- scoped_ptr<WebRTCTransportImpl> transport(
- new WebRTCTransportImpl(network.get()));
- EXPECT_EQ(0, network->RegisterExternalTransport(ch, *transport.get()));
- EXPECT_EQ(0, base->StartPlayout(ch));
- EXPECT_EQ(0, base->StartSend(ch));
- scoped_refptr<WebRtcAudioRenderer> renderer(
- CreateDefaultWebRtcAudioRenderer(kRenderViewId));
- scoped_refptr<MediaStreamAudioRenderer> proxy(
- renderer->CreateSharedAudioRendererProxy());
- EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
- proxy->Start();
- proxy->Play();
-
- VLOG(0) << ">> You should now be able to hear yourself in loopback...";
- message_loop_.PostDelayedTask(FROM_HERE,
- base::MessageLoop::QuitClosure(),
- base::TimeDelta::FromSeconds(2));
- message_loop_.Run();
-
- webrtc_audio_device->GetDefaultCapturer()->Stop();
- proxy->Stop();
- EXPECT_EQ(0, base->StopSend(ch));
- EXPECT_EQ(0, base->StopPlayout(ch));
-
- EXPECT_EQ(0, base->DeleteChannel(ch));
- EXPECT_EQ(0, base->Terminate());
-}
-
-// Test times out on bots, see http://crbug.com/247447
-TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_WebRtcRecordingSetupTime) {
- if (!has_input_devices_) {
- LOG(WARNING) << "Missing audio capture devices.";
- return;
- }
-
- scoped_ptr<media::AudioHardwareConfig> config =
- CreateRealHardwareConfig(audio_manager_.get());
- SetAudioHardwareConfig(config.get());
-
- if (!HardwareSampleRatesAreValid())
- return;
-
- scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
- new WebRtcAudioDeviceImpl());
-
- WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
- ASSERT_TRUE(engine.valid());
-
- ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
- ASSERT_TRUE(base.valid());
- int err = base->Init(webrtc_audio_device.get());
- ASSERT_EQ(0, err);
-
- int ch = base->CreateChannel();
- EXPECT_NE(-1, ch);
-
- EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get()));
- base::WaitableEvent event(false, false);
- scoped_ptr<MockMediaStreamAudioSink> sink(
- new MockMediaStreamAudioSink(&event));
-
- // Create and start a local audio track. Starting the audio track will connect
- // the audio track to the capturer and also start the source of the capturer.
- scoped_refptr<WebRtcLocalAudioTrack> local_audio_track(
- CreateAndStartLocalAudioTrack(
- webrtc_audio_device->GetDefaultCapturer().get(), sink.get()));
-
- // connect the VoE voice channel to the audio track.
- static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())->
- GetRenderer()->AddChannel(ch);
-
- base::Time start_time = base::Time::Now();
- EXPECT_EQ(0, base->StartSend(ch));
-
- EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
- int delay = (base::Time::Now() - start_time).InMilliseconds();
- PrintPerfResultMs("webrtc_recording_setup_c", "t", delay);
-
- webrtc_audio_device->GetDefaultCapturer()->Stop();
- EXPECT_EQ(0, base->StopSend(ch));
- EXPECT_EQ(0, base->DeleteChannel(ch));
- EXPECT_EQ(0, base->Terminate());
-}
-
-
-// TODO(henrika): include on Android as well as soon as alla race conditions
-// in OpenSLES are resolved.
-#if defined(OS_ANDROID)
-#define MAYBE_WebRtcPlayoutSetupTime DISABLED_WebRtcPlayoutSetupTime
-#else
-#define MAYBE_WebRtcPlayoutSetupTime WebRtcPlayoutSetupTime
-#endif
-TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_WebRtcPlayoutSetupTime) {
- if (!has_output_devices_) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
-
- scoped_ptr<media::AudioHardwareConfig> config =
- CreateRealHardwareConfig(audio_manager_.get());
- SetAudioHardwareConfig(config.get());
-
- if (!HardwareSampleRatesAreValid())
- return;
-
- base::WaitableEvent event(false, false);
- scoped_ptr<MockWebRtcAudioRendererSource> renderer_source(
- new MockWebRtcAudioRendererSource(&event));
-
- scoped_refptr<WebRtcAudioRenderer> renderer(
- CreateDefaultWebRtcAudioRenderer(kRenderViewId));
- renderer->Initialize(renderer_source.get());
- scoped_refptr<MediaStreamAudioRenderer> proxy(
- renderer->CreateSharedAudioRendererProxy());
- proxy->Start();
-
- // Start the timer and playout.
- base::Time start_time = base::Time::Now();
- proxy->Play();
- EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
- int delay = (base::Time::Now() - start_time).InMilliseconds();
- PrintPerfResultMs("webrtc_playout_setup_c", "t", delay);
-
- proxy->Stop();
-}
-
-#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
-// Timing out on ARM linux bot: http://crbug.com/238490
-#define MAYBE_WebRtcLoopbackTimeWithoutSignalProcessing \
- DISABLED_WebRtcLoopbackTimeWithoutSignalProcessing
-#else
-#define MAYBE_WebRtcLoopbackTimeWithoutSignalProcessing \
- WebRtcLoopbackTimeWithoutSignalProcessing
-#endif
-
-TEST_F(MAYBE_WebRTCAudioDeviceTest,
- MAYBE_WebRtcLoopbackTimeWithoutSignalProcessing) {
-#if defined(OS_WIN)
- // This test hangs on WinXP: see http://crbug.com/318189.
- if (base::win::GetVersion() <= base::win::VERSION_XP) {
- LOG(WARNING) << "Test disabled due to the test hangs on WinXP.";
- return;
- }
-#endif
- int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), false);
- PrintPerfResultMs("webrtc_loopback_without_sigal_processing (100 packets)",
- "t", latency);
-}
-
-#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
-// Timing out on ARM linux bot: http://crbug.com/238490
-#define MAYBE_WebRtcLoopbackTimeWithSignalProcessing \
- DISABLED_WebRtcLoopbackTimeWithSignalProcessing
-#else
-#define MAYBE_WebRtcLoopbackTimeWithSignalProcessing \
- WebRtcLoopbackTimeWithSignalProcessing
-#endif
-
-TEST_F(MAYBE_WebRTCAudioDeviceTest,
- MAYBE_WebRtcLoopbackTimeWithSignalProcessing) {
-#if defined(OS_WIN)
- // This test hangs on WinXP: see http://crbug.com/318189.
- if (base::win::GetVersion() <= base::win::VERSION_XP) {
- LOG(WARNING) << "Test disabled due to the test hangs on WinXP.";
- return;
- }
-#endif
- int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), true);
- PrintPerfResultMs("webrtc_loopback_with_signal_processing (100 packets)",
- "t", latency);
-}
-
-} // namespace content
diff --git a/chromium/content/renderer/media/webrtc_audio_renderer.cc b/chromium/content/renderer/media/webrtc_audio_renderer.cc
index dae29a506aa..f86c8cc6673 100644
--- a/chromium/content/renderer/media/webrtc_audio_renderer.cc
+++ b/chromium/content/renderer/media/webrtc_audio_renderer.cc
@@ -14,6 +14,9 @@
#include "media/audio/audio_output_device.h"
#include "media/audio/audio_parameters.h"
#include "media/audio/sample_rates.h"
+#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
+#include "third_party/libjingle/source/talk/media/base/audiorenderer.h"
+
#if defined(OS_WIN)
#include "base/win/windows_version.h"
@@ -94,8 +97,20 @@ void AddHistogramFramesPerBuffer(int param) {
// (see the play reference count).
class SharedAudioRenderer : public MediaStreamAudioRenderer {
public:
- SharedAudioRenderer(const scoped_refptr<MediaStreamAudioRenderer>& delegate)
- : delegate_(delegate), started_(false), playing_(false) {
+ // Callback definition for a callback that is called when when Play(), Pause()
+ // or SetVolume are called (whenever the internal |playing_state_| changes).
+ typedef base::Callback<
+ void(const scoped_refptr<webrtc::MediaStreamInterface>&,
+ WebRtcAudioRenderer::PlayingState*)> OnPlayStateChanged;
+
+ SharedAudioRenderer(
+ const scoped_refptr<MediaStreamAudioRenderer>& delegate,
+ const scoped_refptr<webrtc::MediaStreamInterface>& media_stream,
+ const OnPlayStateChanged& on_play_state_changed)
+ : delegate_(delegate), media_stream_(media_stream), started_(false),
+ on_play_state_changed_(on_play_state_changed) {
+ DCHECK(!on_play_state_changed_.is_null());
+ DCHECK(media_stream_.get());
}
protected:
@@ -116,19 +131,19 @@ class SharedAudioRenderer : public MediaStreamAudioRenderer {
virtual void Play() OVERRIDE {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(started_);
- if (playing_)
+ if (playing_state_.playing())
return;
- playing_ = true;
- delegate_->Play();
+ playing_state_.set_playing(true);
+ on_play_state_changed_.Run(media_stream_, &playing_state_);
}
virtual void Pause() OVERRIDE {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(started_);
- if (!playing_)
+ if (!playing_state_.playing())
return;
- playing_ = false;
- delegate_->Pause();
+ playing_state_.set_playing(false);
+ on_play_state_changed_.Run(media_stream_, &playing_state_);
}
virtual void Stop() OVERRIDE {
@@ -142,7 +157,9 @@ class SharedAudioRenderer : public MediaStreamAudioRenderer {
virtual void SetVolume(float volume) OVERRIDE {
DCHECK(thread_checker_.CalledOnValidThread());
- return delegate_->SetVolume(volume);
+ DCHECK(volume >= 0.0f && volume <= 1.0f);
+ playing_state_.set_volume(volume);
+ on_play_state_changed_.Run(media_stream_, &playing_state_);
}
virtual base::TimeDelta GetCurrentRenderTime() const OVERRIDE {
@@ -157,27 +174,35 @@ class SharedAudioRenderer : public MediaStreamAudioRenderer {
private:
base::ThreadChecker thread_checker_;
- scoped_refptr<MediaStreamAudioRenderer> delegate_;
+ const scoped_refptr<MediaStreamAudioRenderer> delegate_;
+ const scoped_refptr<webrtc::MediaStreamInterface> media_stream_;
bool started_;
- bool playing_;
+ WebRtcAudioRenderer::PlayingState playing_state_;
+ OnPlayStateChanged on_play_state_changed_;
};
} // namespace
-WebRtcAudioRenderer::WebRtcAudioRenderer(int source_render_view_id,
- int session_id,
- int sample_rate,
- int frames_per_buffer)
+WebRtcAudioRenderer::WebRtcAudioRenderer(
+ const scoped_refptr<webrtc::MediaStreamInterface>& media_stream,
+ int source_render_view_id,
+ int source_render_frame_id,
+ int session_id,
+ int sample_rate,
+ int frames_per_buffer)
: state_(UNINITIALIZED),
source_render_view_id_(source_render_view_id),
+ source_render_frame_id_(source_render_frame_id),
session_id_(session_id),
+ media_stream_(media_stream),
source_(NULL),
play_ref_count_(0),
start_ref_count_(0),
audio_delay_milliseconds_(0),
fifo_delay_milliseconds_(0),
- sample_rate_(sample_rate),
- frames_per_buffer_(frames_per_buffer) {
+ sink_params_(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ media::CHANNEL_LAYOUT_STEREO, 0, sample_rate, 16,
+ frames_per_buffer, media::AudioParameters::DUCKING) {
WebRtcLogMessage(base::StringPrintf(
"WAR::WAR. source_render_view_id=%d"
", session_id=%d, sample_rate=%d, frames_per_buffer=%d",
@@ -190,7 +215,6 @@ WebRtcAudioRenderer::WebRtcAudioRenderer(int source_render_view_id,
WebRtcAudioRenderer::~WebRtcAudioRenderer() {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK_EQ(state_, UNINITIALIZED);
- buffer_.reset();
}
bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) {
@@ -202,30 +226,25 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) {
DCHECK(!sink_.get());
DCHECK(!source_);
- // Use stereo output on all platforms.
- media::ChannelLayout channel_layout = media::CHANNEL_LAYOUT_STEREO;
-
- // TODO(tommi,henrika): Maybe we should just change |sample_rate_| to be
- // immutable and change its value instead of using a temporary?
- int sample_rate = sample_rate_;
- DVLOG(1) << "Audio output hardware sample rate: " << sample_rate;
-
// WebRTC does not yet support higher rates than 96000 on the client side
// and 48000 is the preferred sample rate. Therefore, if 192000 is detected,
// we change the rate to 48000 instead. The consequence is that the native
// layer will be opened up at 192kHz but WebRTC will provide data at 48kHz
// which will then be resampled by the audio converted on the browser side
// to match the native audio layer.
+ int sample_rate = sink_params_.sample_rate();
+ DVLOG(1) << "Audio output hardware sample rate: " << sample_rate;
if (sample_rate == 192000) {
DVLOG(1) << "Resampling from 48000 to 192000 is required";
sample_rate = 48000;
}
- media::AudioSampleRate asr = media::AsAudioSampleRate(sample_rate);
- if (asr != media::kUnexpectedAudioSampleRate) {
+ media::AudioSampleRate asr;
+ if (media::ToAudioSampleRate(sample_rate, &asr)) {
UMA_HISTOGRAM_ENUMERATION(
- "WebRTC.AudioOutputSampleRate", asr, media::kUnexpectedAudioSampleRate);
+ "WebRTC.AudioOutputSampleRate", asr, media::kAudioSampleRateMax + 1);
} else {
- UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputSampleRateUnexpected", sample_rate);
+ UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputSampleRateUnexpected",
+ sample_rate);
}
// Verify that the reported output hardware sample rate is supported
@@ -243,50 +262,47 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) {
// The WebRTC client only supports multiples of 10ms as buffer size where
// 10ms is preferred for lowest possible delay.
media::AudioParameters source_params;
- int buffer_size = (sample_rate / 100);
- DVLOG(1) << "Using WebRTC output buffer size: " << buffer_size;
+ const int frames_per_10ms = (sample_rate / 100);
+ DVLOG(1) << "Using WebRTC output buffer size: " << frames_per_10ms;
- int channels = ChannelLayoutToChannelCount(channel_layout);
source_params.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
- channel_layout, channels, 0,
- sample_rate, 16, buffer_size);
+ sink_params_.channel_layout(), sink_params_.channels(), 0,
+ sample_rate, 16, frames_per_10ms);
- // Set up audio parameters for the sink, i.e., the native audio output stream.
+ // Update audio parameters for the sink, i.e., the native audio output stream.
// We strive to open up using native parameters to achieve best possible
// performance and to ensure that no FIFO is needed on the browser side to
// match the client request. Any mismatch between the source and the sink is
// taken care of in this class instead using a pull FIFO.
- media::AudioParameters sink_params;
-
- // Use native output siz as default.
- buffer_size = frames_per_buffer_;
+ // Use native output size as default.
+ int frames_per_buffer = sink_params_.frames_per_buffer();
#if defined(OS_ANDROID)
// TODO(henrika): Keep tuning this scheme and espcicially for low-latency
// cases. Might not be possible to come up with the perfect solution using
// the render side only.
- const int frames_per_10ms = (sample_rate / 100);
- if (buffer_size < 2 * frames_per_10ms) {
+ if (frames_per_buffer < 2 * frames_per_10ms) {
// Examples of low-latency frame sizes and the resulting |buffer_size|:
// Nexus 7 : 240 audio frames => 2*480 = 960
// Nexus 10 : 256 => 2*441 = 882
// Galaxy Nexus: 144 => 2*441 = 882
- buffer_size = 2 * frames_per_10ms;
+ frames_per_buffer = 2 * frames_per_10ms;
DVLOG(1) << "Low-latency output detected on Android";
}
#endif
- DVLOG(1) << "Using sink output buffer size: " << buffer_size;
+ DVLOG(1) << "Using sink output buffer size: " << frames_per_buffer;
- sink_params.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
- channel_layout, channels, 0, sample_rate, 16, buffer_size);
+ sink_params_.Reset(sink_params_.format(), sink_params_.channel_layout(),
+ sink_params_.channels(), 0, sample_rate, 16,
+ frames_per_buffer);
// Create a FIFO if re-buffering is required to match the source input with
// the sink request. The source acts as provider here and the sink as
// consumer.
fifo_delay_milliseconds_ = 0;
- if (source_params.frames_per_buffer() != sink_params.frames_per_buffer()) {
+ if (source_params.frames_per_buffer() != sink_params_.frames_per_buffer()) {
DVLOG(1) << "Rebuffering from " << source_params.frames_per_buffer()
- << " to " << sink_params.frames_per_buffer();
+ << " to " << sink_params_.frames_per_buffer();
audio_fifo_.reset(new media::AudioPullFifo(
source_params.channels(),
source_params.frames_per_buffer(),
@@ -294,40 +310,28 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) {
&WebRtcAudioRenderer::SourceCallback,
base::Unretained(this))));
- if (sink_params.frames_per_buffer() > source_params.frames_per_buffer()) {
+ if (sink_params_.frames_per_buffer() > source_params.frames_per_buffer()) {
int frame_duration_milliseconds = base::Time::kMillisecondsPerSecond /
static_cast<double>(source_params.sample_rate());
- fifo_delay_milliseconds_ = (sink_params.frames_per_buffer() -
+ fifo_delay_milliseconds_ = (sink_params_.frames_per_buffer() -
source_params.frames_per_buffer()) * frame_duration_milliseconds;
}
}
- // Allocate local audio buffers based on the parameters above.
- // It is assumed that each audio sample contains 16 bits and each
- // audio frame contains one or two audio samples depending on the
- // number of channels.
- buffer_.reset(
- new int16[source_params.frames_per_buffer() * source_params.channels()]);
-
source_ = source;
- source->SetRenderFormat(source_params);
// Configure the audio rendering client and start rendering.
- sink_ = AudioDeviceFactory::NewOutputDevice(source_render_view_id_);
+ sink_ = AudioDeviceFactory::NewOutputDevice(
+ source_render_view_id_, source_render_frame_id_);
- // TODO(tommi): Rename InitializeUnifiedStream to rather reflect association
- // with a session.
DCHECK_GE(session_id_, 0);
- sink_->InitializeUnifiedStream(sink_params, this, session_id_);
+ sink_->InitializeWithSessionId(sink_params_, this, session_id_);
sink_->Start();
// User must call Play() before any audio can be heard.
state_ = PAUSED;
- UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputChannelLayout",
- source_params.channel_layout(),
- media::CHANNEL_LAYOUT_MAX);
UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputFramesPerBuffer",
source_params.frames_per_buffer(),
kUnexpectedAudioBufferSize);
@@ -337,8 +341,11 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) {
}
scoped_refptr<MediaStreamAudioRenderer>
-WebRtcAudioRenderer::CreateSharedAudioRendererProxy() {
- return new SharedAudioRenderer(this);
+WebRtcAudioRenderer::CreateSharedAudioRendererProxy(
+ const scoped_refptr<webrtc::MediaStreamInterface>& media_stream) {
+ content::SharedAudioRenderer::OnPlayStateChanged on_play_state_changed =
+ base::Bind(&WebRtcAudioRenderer::OnPlayStateChanged, this);
+ return new SharedAudioRenderer(this, media_stream, on_play_state_changed);
}
bool WebRtcAudioRenderer::IsStarted() const {
@@ -355,6 +362,18 @@ void WebRtcAudioRenderer::Start() {
void WebRtcAudioRenderer::Play() {
DVLOG(1) << "WebRtcAudioRenderer::Play()";
DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (playing_state_.playing())
+ return;
+
+ playing_state_.set_playing(true);
+
+ OnPlayStateChanged(media_stream_, &playing_state_);
+}
+
+void WebRtcAudioRenderer::EnterPlayState() {
+ DVLOG(1) << "WebRtcAudioRenderer::EnterPlayState()";
+ DCHECK(thread_checker_.CalledOnValidThread());
DCHECK_GT(start_ref_count_, 0) << "Did you forget to call Start()?";
base::AutoLock auto_lock(lock_);
if (state_ == UNINITIALIZED)
@@ -376,6 +395,17 @@ void WebRtcAudioRenderer::Play() {
void WebRtcAudioRenderer::Pause() {
DVLOG(1) << "WebRtcAudioRenderer::Pause()";
DCHECK(thread_checker_.CalledOnValidThread());
+ if (!playing_state_.playing())
+ return;
+
+ playing_state_.set_playing(false);
+
+ OnPlayStateChanged(media_stream_, &playing_state_);
+}
+
+void WebRtcAudioRenderer::EnterPauseState() {
+ DVLOG(1) << "WebRtcAudioRenderer::EnterPauseState()";
+ DCHECK(thread_checker_.CalledOnValidThread());
DCHECK_GT(start_ref_count_, 0) << "Did you forget to call Start()?";
base::AutoLock auto_lock(lock_);
if (state_ == UNINITIALIZED)
@@ -413,15 +443,16 @@ void WebRtcAudioRenderer::Stop() {
void WebRtcAudioRenderer::SetVolume(float volume) {
DCHECK(thread_checker_.CalledOnValidThread());
- base::AutoLock auto_lock(lock_);
- if (state_ == UNINITIALIZED)
- return;
+ DCHECK(volume >= 0.0f && volume <= 1.0f);
- sink_->SetVolume(volume);
+ playing_state_.set_volume(volume);
+ OnPlayStateChanged(media_stream_, &playing_state_);
}
base::TimeDelta WebRtcAudioRenderer::GetCurrentRenderTime() const {
- return base::TimeDelta();
+ DCHECK(thread_checker_.CalledOnValidThread());
+ base::AutoLock auto_lock(lock_);
+ return current_time_;
}
bool WebRtcAudioRenderer::IsLocalRenderer() const {
@@ -465,22 +496,98 @@ void WebRtcAudioRenderer::SourceCallback(
// We need to keep render data for the |source_| regardless of |state_|,
// otherwise the data will be buffered up inside |source_|.
- source_->RenderData(reinterpret_cast<uint8*>(buffer_.get()),
- audio_bus->channels(), audio_bus->frames(),
- output_delay_milliseconds);
+ source_->RenderData(audio_bus, sink_params_.sample_rate(),
+ output_delay_milliseconds,
+ &current_time_);
// Avoid filling up the audio bus if we are not playing; instead
// return here and ensure that the returned value in Render() is 0.
- if (state_ != PLAYING) {
+ if (state_ != PLAYING)
audio_bus->Zero();
- return;
+}
+
+void WebRtcAudioRenderer::UpdateSourceVolume(
+ webrtc::AudioSourceInterface* source) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Note: If there are no playing audio renderers, then the volume will be
+ // set to 0.0.
+ float volume = 0.0f;
+
+ SourcePlayingStates::iterator entry = source_playing_states_.find(source);
+ if (entry != source_playing_states_.end()) {
+ PlayingStates& states = entry->second;
+ for (PlayingStates::const_iterator it = states.begin();
+ it != states.end(); ++it) {
+ if ((*it)->playing())
+ volume += (*it)->volume();
+ }
}
- // De-interleave each channel and convert to 32-bit floating-point
- // with nominal range -1.0 -> +1.0 to match the callback format.
- audio_bus->FromInterleaved(buffer_.get(),
- audio_bus->frames(),
- sizeof(buffer_[0]));
+ // The valid range for volume scaling of a remote webrtc source is
+ // 0.0-10.0 where 1.0 is no attenuation/boost.
+ DCHECK(volume >= 0.0f);
+ if (volume > 10.0f)
+ volume = 10.0f;
+
+ DVLOG(1) << "Setting remote source volume: " << volume;
+ source->SetVolume(volume);
+}
+
+bool WebRtcAudioRenderer::AddPlayingState(
+ webrtc::AudioSourceInterface* source,
+ PlayingState* state) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(state->playing());
+ // Look up or add the |source| to the map.
+ PlayingStates& array = source_playing_states_[source];
+ if (std::find(array.begin(), array.end(), state) != array.end())
+ return false;
+
+ array.push_back(state);
+
+ return true;
+}
+
+bool WebRtcAudioRenderer::RemovePlayingState(
+ webrtc::AudioSourceInterface* source,
+ PlayingState* state) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!state->playing());
+ SourcePlayingStates::iterator found = source_playing_states_.find(source);
+ if (found == source_playing_states_.end())
+ return false;
+
+ PlayingStates& array = found->second;
+ PlayingStates::iterator state_it =
+ std::find(array.begin(), array.end(), state);
+ if (state_it == array.end())
+ return false;
+
+ array.erase(state_it);
+
+ if (array.empty())
+ source_playing_states_.erase(found);
+
+ return true;
+}
+
+void WebRtcAudioRenderer::OnPlayStateChanged(
+ const scoped_refptr<webrtc::MediaStreamInterface>& media_stream,
+ PlayingState* state) {
+ webrtc::AudioTrackVector tracks(media_stream->GetAudioTracks());
+ for (webrtc::AudioTrackVector::iterator it = tracks.begin();
+ it != tracks.end(); ++it) {
+ webrtc::AudioSourceInterface* source = (*it)->GetSource();
+ DCHECK(source);
+ if (!state->playing()) {
+ if (RemovePlayingState(source, state))
+ EnterPauseState();
+ } else if (AddPlayingState(source, state)) {
+ EnterPlayState();
+ }
+ UpdateSourceVolume(source);
+ }
}
} // namespace content
diff --git a/chromium/content/renderer/media/webrtc_audio_renderer.h b/chromium/content/renderer/media/webrtc_audio_renderer.h
index bdaf1fd9f3b..61b0b24d141 100644
--- a/chromium/content/renderer/media/webrtc_audio_renderer.h
+++ b/chromium/content/renderer/media/webrtc_audio_renderer.h
@@ -7,6 +7,7 @@
#include "base/memory/ref_counted.h"
#include "base/synchronization/lock.h"
+#include "base/threading/non_thread_safe.h"
#include "base/threading/thread_checker.h"
#include "content/renderer/media/media_stream_audio_renderer.h"
#include "content/renderer/media/webrtc_audio_device_impl.h"
@@ -17,7 +18,12 @@
namespace media {
class AudioOutputDevice;
-}
+} // namespace media
+
+namespace webrtc {
+class AudioSourceInterface;
+class MediaStreamInterface;
+} // namespace webrtc
namespace content {
@@ -29,10 +35,47 @@ class CONTENT_EXPORT WebRtcAudioRenderer
: NON_EXPORTED_BASE(public media::AudioRendererSink::RenderCallback),
NON_EXPORTED_BASE(public MediaStreamAudioRenderer) {
public:
- WebRtcAudioRenderer(int source_render_view_id,
- int session_id,
- int sample_rate,
- int frames_per_buffer);
+ // This is a little utility class that holds the configured state of an audio
+ // stream.
+ // It is used by both WebRtcAudioRenderer and SharedAudioRenderer (see cc
+ // file) so a part of why it exists is to avoid code duplication and track
+ // the state in the same way in WebRtcAudioRenderer and SharedAudioRenderer.
+ class PlayingState : public base::NonThreadSafe {
+ public:
+ PlayingState() : playing_(false), volume_(1.0f) {}
+
+ bool playing() const {
+ DCHECK(CalledOnValidThread());
+ return playing_;
+ }
+
+ void set_playing(bool playing) {
+ DCHECK(CalledOnValidThread());
+ playing_ = playing;
+ }
+
+ float volume() const {
+ DCHECK(CalledOnValidThread());
+ return volume_;
+ }
+
+ void set_volume(float volume) {
+ DCHECK(CalledOnValidThread());
+ volume_ = volume;
+ }
+
+ private:
+ bool playing_;
+ float volume_;
+ };
+
+ WebRtcAudioRenderer(
+ const scoped_refptr<webrtc::MediaStreamInterface>& media_stream,
+ int source_render_view_id,
+ int source_render_frame_id,
+ int session_id,
+ int sample_rate,
+ int frames_per_buffer);
// Initialize function called by clients like WebRtcAudioDeviceImpl.
// Stop() has to be called before |source| is deleted.
@@ -47,11 +90,16 @@ class CONTENT_EXPORT WebRtcAudioRenderer
// When Stop() is called or when the proxy goes out of scope, the proxy
// will ensure that Pause() is called followed by a call to Stop(), which
// is the usage pattern that WebRtcAudioRenderer requires.
- scoped_refptr<MediaStreamAudioRenderer> CreateSharedAudioRendererProxy();
+ scoped_refptr<MediaStreamAudioRenderer> CreateSharedAudioRendererProxy(
+ const scoped_refptr<webrtc::MediaStreamInterface>& media_stream);
// Used to DCHECK on the expected state.
bool IsStarted() const;
+ // Accessors to the sink audio parameters.
+ int channels() const { return sink_params_.channels(); }
+ int sample_rate() const { return sink_params_.sample_rate(); }
+
private:
// MediaStreamAudioRenderer implementation. This is private since we want
// callers to use proxy objects.
@@ -64,6 +112,16 @@ class CONTENT_EXPORT WebRtcAudioRenderer
virtual base::TimeDelta GetCurrentRenderTime() const OVERRIDE;
virtual bool IsLocalRenderer() const OVERRIDE;
+ // Called when an audio renderer, either the main or a proxy, starts playing.
+ // Here we maintain a reference count of how many renderers are currently
+ // playing so that the shared play state of all the streams can be reflected
+ // correctly.
+ void EnterPlayState();
+
+ // Called when an audio renderer, either the main or a proxy, is paused.
+ // See EnterPlayState for more details.
+ void EnterPauseState();
+
protected:
virtual ~WebRtcAudioRenderer();
@@ -74,6 +132,14 @@ class CONTENT_EXPORT WebRtcAudioRenderer
PAUSED,
};
+ // Holds raw pointers to PlaingState objects. Ownership is managed outside
+ // of this type.
+ typedef std::vector<PlayingState*> PlayingStates;
+ // Maps an audio source to a list of playing states that collectively hold
+ // volume information for that source.
+ typedef std::map<webrtc::AudioSourceInterface*, PlayingStates>
+ SourcePlayingStates;
+
// Used to DCHECK that we are called on the correct thread.
base::ThreadChecker thread_checker_;
@@ -90,22 +156,46 @@ class CONTENT_EXPORT WebRtcAudioRenderer
// This method is called on the AudioOutputDevice worker thread.
void SourceCallback(int fifo_frame_delay, media::AudioBus* audio_bus);
- // The render view in which the audio is rendered into |sink_|.
+ // Goes through all renderers for the |source| and applies the proper
+ // volume scaling for the source based on the volume(s) of the renderer(s).
+ void UpdateSourceVolume(webrtc::AudioSourceInterface* source);
+
+ // Tracks a playing state. The state must be playing when this method
+ // is called.
+ // Returns true if the state was added, false if it was already being tracked.
+ bool AddPlayingState(webrtc::AudioSourceInterface* source,
+ PlayingState* state);
+ // Removes a playing state for an audio source.
+ // Returns true if the state was removed from the internal map, false if
+ // it had already been removed or if the source isn't being rendered.
+ bool RemovePlayingState(webrtc::AudioSourceInterface* source,
+ PlayingState* state);
+
+ // Called whenever the Play/Pause state changes of any of the renderers
+ // or if the volume of any of them is changed.
+ // Here we update the shared Play state and apply volume scaling to all audio
+ // sources associated with the |media_stream| based on the collective volume
+ // of playing renderers.
+ void OnPlayStateChanged(
+ const scoped_refptr<webrtc::MediaStreamInterface>& media_stream,
+ PlayingState* state);
+
+ // The render view and frame in which the audio is rendered into |sink_|.
const int source_render_view_id_;
+ const int source_render_frame_id_;
const int session_id_;
// The sink (destination) for rendered audio.
scoped_refptr<media::AudioOutputDevice> sink_;
+ // The media stream that holds the audio tracks that this renderer renders.
+ const scoped_refptr<webrtc::MediaStreamInterface> media_stream_;
+
// Audio data source from the browser process.
WebRtcAudioRendererSource* source_;
- // Buffers used for temporary storage during render callbacks.
- // Allocated during initialization.
- scoped_ptr<int16[]> buffer_;
-
- // Protects access to |state_|, |source_| and |sink_|.
- base::Lock lock_;
+ // Protects access to |state_|, |source_|, |sink_| and |current_time_|.
+ mutable base::Lock lock_;
// Ref count for the MediaPlayers which are playing audio.
int play_ref_count_;
@@ -124,9 +214,20 @@ class CONTENT_EXPORT WebRtcAudioRenderer
// Delay due to the FIFO in milliseconds.
int fifo_delay_milliseconds_;
- // The preferred sample rate and buffer sizes provided via the ctor.
- const int sample_rate_;
- const int frames_per_buffer_;
+ base::TimeDelta current_time_;
+
+ // Saved volume and playing state of the root renderer.
+ PlayingState playing_state_;
+
+ // Audio params used by the sink of the renderer.
+ media::AudioParameters sink_params_;
+
+ // Maps audio sources to a list of active audio renderers.
+ // Pointers to PlayingState objects are only kept in this map while the
+ // associated renderer is actually playing the stream. Ownership of the
+ // state objects lies with the renderers and they must leave the playing state
+ // before being destructed (PlayingState object goes out of scope).
+ SourcePlayingStates source_playing_states_;
DISALLOW_IMPLICIT_CONSTRUCTORS(WebRtcAudioRenderer);
};
diff --git a/chromium/content/renderer/media/webrtc_audio_renderer_unittest.cc b/chromium/content/renderer/media/webrtc_audio_renderer_unittest.cc
new file mode 100644
index 00000000000..3cf1b523806
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc_audio_renderer_unittest.cc
@@ -0,0 +1,154 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "content/renderer/media/audio_device_factory.h"
+#include "content/renderer/media/audio_message_filter.h"
+#include "content/renderer/media/media_stream_audio_renderer.h"
+#include "content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h"
+#include "content/renderer/media/webrtc_audio_device_impl.h"
+#include "content/renderer/media/webrtc_audio_renderer.h"
+#include "media/audio/audio_output_device.h"
+#include "media/audio/audio_output_ipc.h"
+#include "media/base/audio_bus.h"
+#include "media/base/mock_audio_renderer_sink.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
+
+using testing::Return;
+
+namespace content {
+
+namespace {
+
+class MockAudioOutputIPC : public media::AudioOutputIPC {
+ public:
+ MockAudioOutputIPC() {}
+ virtual ~MockAudioOutputIPC() {}
+
+ MOCK_METHOD3(CreateStream, void(media::AudioOutputIPCDelegate* delegate,
+ const media::AudioParameters& params,
+ int session_id));
+ MOCK_METHOD0(PlayStream, void());
+ MOCK_METHOD0(PauseStream, void());
+ MOCK_METHOD0(CloseStream, void());
+ MOCK_METHOD1(SetVolume, void(double volume));
+};
+
+class FakeAudioOutputDevice
+ : NON_EXPORTED_BASE(public media::AudioOutputDevice) {
+ public:
+ FakeAudioOutputDevice(
+ scoped_ptr<media::AudioOutputIPC> ipc,
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
+ : AudioOutputDevice(ipc.Pass(),
+ io_task_runner) {}
+ MOCK_METHOD0(Start, void());
+ MOCK_METHOD0(Stop, void());
+ MOCK_METHOD0(Pause, void());
+ MOCK_METHOD0(Play, void());
+ MOCK_METHOD1(SetVolume, bool(double volume));
+
+ protected:
+ virtual ~FakeAudioOutputDevice() {}
+};
+
+class MockAudioDeviceFactory : public AudioDeviceFactory {
+ public:
+ MockAudioDeviceFactory() {}
+ virtual ~MockAudioDeviceFactory() {}
+ MOCK_METHOD1(CreateOutputDevice, media::AudioOutputDevice*(int));
+ MOCK_METHOD1(CreateInputDevice, media::AudioInputDevice*(int));
+};
+
+class MockAudioRendererSource : public WebRtcAudioRendererSource {
+ public:
+ MockAudioRendererSource() {}
+ virtual ~MockAudioRendererSource() {}
+ MOCK_METHOD4(RenderData, void(media::AudioBus* audio_bus,
+ int sample_rate,
+ int audio_delay_milliseconds,
+ base::TimeDelta* current_time));
+ MOCK_METHOD1(RemoveAudioRenderer, void(WebRtcAudioRenderer* renderer));
+};
+
+} // namespace
+
+class WebRtcAudioRendererTest : public testing::Test {
+ protected:
+ WebRtcAudioRendererTest()
+ : message_loop_(new base::MessageLoopForIO),
+ mock_ipc_(new MockAudioOutputIPC()),
+ mock_output_device_(new FakeAudioOutputDevice(
+ scoped_ptr<media::AudioOutputIPC>(mock_ipc_),
+ message_loop_->message_loop_proxy())),
+ factory_(new MockAudioDeviceFactory()),
+ source_(new MockAudioRendererSource()),
+ stream_(new talk_base::RefCountedObject<MockMediaStream>("label")),
+ renderer_(new WebRtcAudioRenderer(stream_, 1, 1, 1, 44100, 441)) {
+ EXPECT_CALL(*factory_.get(), CreateOutputDevice(1))
+ .WillOnce(Return(mock_output_device_));
+ EXPECT_CALL(*mock_output_device_, Start());
+ EXPECT_TRUE(renderer_->Initialize(source_.get()));
+ renderer_proxy_ = renderer_->CreateSharedAudioRendererProxy(stream_);
+ }
+
+ // Used to construct |mock_output_device_|.
+ scoped_ptr<base::MessageLoopForIO> message_loop_;
+ MockAudioOutputIPC* mock_ipc_; // Owned by AudioOuputDevice.
+
+ scoped_refptr<FakeAudioOutputDevice> mock_output_device_;
+ scoped_ptr<MockAudioDeviceFactory> factory_;
+ scoped_ptr<MockAudioRendererSource> source_;
+ scoped_refptr<webrtc::MediaStreamInterface> stream_;
+ scoped_refptr<WebRtcAudioRenderer> renderer_;
+ scoped_refptr<MediaStreamAudioRenderer> renderer_proxy_;
+};
+
+// Verify that the renderer will be stopped if the only proxy is stopped.
+TEST_F(WebRtcAudioRendererTest, StopRenderer) {
+ renderer_proxy_->Start();
+
+ // |renderer_| has only one proxy, stopping the proxy should stop the sink of
+ // |renderer_|.
+ EXPECT_CALL(*mock_output_device_, Stop());
+ EXPECT_CALL(*source_.get(), RemoveAudioRenderer(renderer_.get()));
+ renderer_proxy_->Stop();
+}
+
+// Verify that the renderer will not be stopped unless the last proxy is
+// stopped.
+TEST_F(WebRtcAudioRendererTest, MultipleRenderers) {
+ renderer_proxy_->Start();
+
+ // Create a vector of renderer proxies from the |renderer_|.
+ std::vector<scoped_refptr<MediaStreamAudioRenderer> > renderer_proxies_;
+ static const int kNumberOfRendererProxy = 5;
+ for (int i = 0; i < kNumberOfRendererProxy; ++i) {
+ scoped_refptr<MediaStreamAudioRenderer> renderer_proxy(
+ renderer_->CreateSharedAudioRendererProxy(stream_));
+ renderer_proxy->Start();
+ renderer_proxies_.push_back(renderer_proxy);
+ }
+
+ // Stop the |renderer_proxy_| should not stop the sink since it is used by
+ // other proxies.
+ EXPECT_CALL(*mock_output_device_, Stop()).Times(0);
+ renderer_proxy_->Stop();
+
+ for (int i = 0; i < kNumberOfRendererProxy; ++i) {
+ if (i != kNumberOfRendererProxy -1) {
+ EXPECT_CALL(*mock_output_device_, Stop()).Times(0);
+ } else {
+ // When the last proxy is stopped, the sink will stop.
+ EXPECT_CALL(*source_.get(), RemoveAudioRenderer(renderer_.get()));
+ EXPECT_CALL(*mock_output_device_, Stop());
+ }
+ renderer_proxies_[i]->Stop();
+ }
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/webrtc_local_audio_renderer.cc b/chromium/content/renderer/media/webrtc_local_audio_renderer.cc
index 6a92d906d32..1deb11e26ef 100644
--- a/chromium/content/renderer/media/webrtc_local_audio_renderer.cc
+++ b/chromium/content/renderer/media/webrtc_local_audio_renderer.cc
@@ -102,22 +102,22 @@ void WebRtcLocalAudioRenderer::OnSetFormat(
source_params_ = params;
- sink_params_.Reset(source_params_.format(),
- source_params_.channel_layout(),
- source_params_.channels(),
- source_params_.input_channels(),
- source_params_.sample_rate(),
- source_params_.bits_per_sample(),
+ sink_params_ = media::AudioParameters(source_params_.format(),
+ source_params_.channel_layout(), source_params_.channels(),
+ source_params_.input_channels(), source_params_.sample_rate(),
+ source_params_.bits_per_sample(),
#if defined(OS_ANDROID)
- // On Android, input and output are using same sampling rate. In order to
- // achieve low latency mode, we need use buffer size suggested by
- // AudioManager for the sink paramters which will be used to decide
- // buffer size for shared memory buffer.
- frames_per_buffer_
+ // On Android, input and output use the same sample rate. In order to
+ // use the low latency mode, we need to use the buffer size suggested by
+ // the AudioManager for the sink. It will later be used to decide
+ // the buffer size of the shared memory buffer.
+ frames_per_buffer_,
#else
- 2 * source_params_.frames_per_buffer()
+ 2 * source_params_.frames_per_buffer(),
#endif
- );
+ // If DUCKING is enabled on the source, it needs to be enabled on the
+ // sink as well.
+ source_params_.effects());
// TODO(henrika): we could add a more dynamic solution here but I prefer
// a fixed size combined with bad audio at overflow. The alternative is
@@ -143,10 +143,12 @@ void WebRtcLocalAudioRenderer::OnSetFormat(
WebRtcLocalAudioRenderer::WebRtcLocalAudioRenderer(
const blink::WebMediaStreamTrack& audio_track,
int source_render_view_id,
+ int source_render_frame_id,
int session_id,
int frames_per_buffer)
: audio_track_(audio_track),
source_render_view_id_(source_render_view_id),
+ source_render_frame_id_(source_render_frame_id),
session_id_(session_id),
message_loop_(base::MessageLoopProxy::current()),
playing_(false),
@@ -170,7 +172,8 @@ void WebRtcLocalAudioRenderer::Start() {
MediaStreamAudioSink::AddToAudioTrack(this, audio_track_);
// ...and |sink_| will get audio data from us.
DCHECK(!sink_.get());
- sink_ = AudioDeviceFactory::NewOutputDevice(source_render_view_id_);
+ sink_ = AudioDeviceFactory::NewOutputDevice(source_render_view_id_,
+ source_render_frame_id_);
base::AutoLock auto_lock(thread_lock_);
last_render_time_ = base::TimeTicks::Now();
@@ -284,7 +287,7 @@ void WebRtcLocalAudioRenderer::MaybeStartSink() {
return;
DVLOG(1) << "WebRtcLocalAudioRenderer::MaybeStartSink() -- Starting sink_.";
- sink_->InitializeUnifiedStream(sink_params_, this, session_id_);
+ sink_->InitializeWithSessionId(sink_params_, this, session_id_);
sink_->Start();
sink_started_ = true;
UMA_HISTOGRAM_ENUMERATION("Media.LocalRendererSinkStates",
@@ -306,7 +309,8 @@ void WebRtcLocalAudioRenderer::ReconfigureSink(
sink_->Stop();
sink_started_ = false;
}
- sink_ = AudioDeviceFactory::NewOutputDevice(source_render_view_id_);
+ sink_ = AudioDeviceFactory::NewOutputDevice(source_render_view_id_,
+ source_render_frame_id_);
MaybeStartSink();
}
diff --git a/chromium/content/renderer/media/webrtc_local_audio_renderer.h b/chromium/content/renderer/media/webrtc_local_audio_renderer.h
index e9871ece81b..39f13f2520b 100644
--- a/chromium/content/renderer/media/webrtc_local_audio_renderer.h
+++ b/chromium/content/renderer/media/webrtc_local_audio_renderer.h
@@ -52,6 +52,7 @@ class CONTENT_EXPORT WebRtcLocalAudioRenderer
// Called on the main thread.
WebRtcLocalAudioRenderer(const blink::WebMediaStreamTrack& audio_track,
int source_render_view_id,
+ int source_render_frame_id,
int session_id,
int frames_per_buffer);
@@ -110,8 +111,9 @@ class CONTENT_EXPORT WebRtcLocalAudioRenderer
// with the audio track.
blink::WebMediaStreamTrack audio_track_;
- // The render view in which the audio is rendered into |sink_|.
+ // The render view and frame in which the audio is rendered into |sink_|.
const int source_render_view_id_;
+ const int source_render_frame_id_;
const int session_id_;
// MessageLoop associated with the single thread that performs all control
diff --git a/chromium/content/renderer/media/webrtc_local_audio_source_provider.cc b/chromium/content/renderer/media/webrtc_local_audio_source_provider.cc
index bc5f80dfe37..10ee943f00f 100644
--- a/chromium/content/renderer/media/webrtc_local_audio_source_provider.cc
+++ b/chromium/content/renderer/media/webrtc_local_audio_source_provider.cc
@@ -22,8 +22,11 @@ static const size_t kMaxNumberOfBuffers = 10;
// static
const size_t WebRtcLocalAudioSourceProvider::kWebAudioRenderBufferSize = 128;
-WebRtcLocalAudioSourceProvider::WebRtcLocalAudioSourceProvider()
- : is_enabled_(false) {
+WebRtcLocalAudioSourceProvider::WebRtcLocalAudioSourceProvider(
+ const blink::WebMediaStreamTrack& track)
+ : is_enabled_(false),
+ track_(track),
+ track_stopped_(false) {
// Get the native audio output hardware sample-rate for the sink.
// We need to check if RenderThreadImpl is valid here since the unittests
// do not have one and they will inject their own |sink_params_| for testing.
@@ -36,11 +39,19 @@ WebRtcLocalAudioSourceProvider::WebRtcLocalAudioSourceProvider()
media::CHANNEL_LAYOUT_STEREO, 2, 0, sample_rate, 16,
kWebAudioRenderBufferSize);
}
+
+ // Connect the source provider to the track as a sink.
+ MediaStreamAudioSink::AddToAudioTrack(this, track_);
}
WebRtcLocalAudioSourceProvider::~WebRtcLocalAudioSourceProvider() {
if (audio_converter_.get())
audio_converter_->RemoveInput(this);
+
+ // If the track is still active, it is necessary to notify the track before
+ // the source provider goes away.
+ if (!track_stopped_)
+ MediaStreamAudioSink::RemoveFromAudioTrack(this, track_);
}
void WebRtcLocalAudioSourceProvider::OnSetFormat(
@@ -68,6 +79,12 @@ void WebRtcLocalAudioSourceProvider::OnSetFormat(
params.frames_per_buffer());
}
+void WebRtcLocalAudioSourceProvider::OnReadyStateChanged(
+ blink::WebMediaStreamSource::ReadyState state) {
+ if (state == blink::WebMediaStreamSource::ReadyStateEnded)
+ track_stopped_ = true;
+}
+
void WebRtcLocalAudioSourceProvider::OnData(
const int16* audio_data,
int sample_rate,
@@ -89,9 +106,9 @@ void WebRtcLocalAudioSourceProvider::OnData(
if (fifo_->frames() + number_of_frames <= fifo_->max_frames()) {
fifo_->Push(input_bus_.get());
} else {
- // This can happen if the data in FIFO is too slowed to be consumed or
+ // This can happen if the data in FIFO is too slowly consumed or
// WebAudio stops consuming data.
- DLOG(WARNING) << "Local source provicer FIFO is full" << fifo_->frames();
+ DVLOG(3) << "Local source provicer FIFO is full" << fifo_->frames();
}
}
diff --git a/chromium/content/renderer/media/webrtc_local_audio_source_provider.h b/chromium/content/renderer/media/webrtc_local_audio_source_provider.h
index eb437fabe59..9abd89f9569 100644
--- a/chromium/content/renderer/media/webrtc_local_audio_source_provider.h
+++ b/chromium/content/renderer/media/webrtc_local_audio_source_provider.h
@@ -15,6 +15,7 @@
#include "content/public/renderer/media_stream_audio_sink.h"
#include "media/base/audio_converter.h"
#include "third_party/WebKit/public/platform/WebAudioSourceProvider.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
#include "third_party/WebKit/public/platform/WebVector.h"
namespace media {
@@ -31,12 +32,13 @@ class WebAudioSourceProviderClient;
namespace content {
// WebRtcLocalAudioSourceProvider provides a bridge between classes:
-// WebRtcAudioCapturer ---> blink::WebAudioSourceProvider
+// WebRtcLocalAudioTrack ---> blink::WebAudioSourceProvider
//
-// WebRtcLocalAudioSourceProvider works as a sink to the WebRtcAudiocapturer
+// WebRtcLocalAudioSourceProvider works as a sink to the WebRtcLocalAudioTrack
// and store the capture data to a FIFO. When the media stream is connected to
-// WebAudio as a source provider, WebAudio will periodically call
-// provideInput() to get the data from the FIFO.
+// WebAudio MediaStreamAudioSourceNode as a source provider,
+// MediaStreamAudioSourceNode will periodically call provideInput() to get the
+// data from the FIFO.
//
// All calls are protected by a lock.
class CONTENT_EXPORT WebRtcLocalAudioSourceProvider
@@ -46,7 +48,8 @@ class CONTENT_EXPORT WebRtcLocalAudioSourceProvider
public:
static const size_t kWebAudioRenderBufferSize;
- WebRtcLocalAudioSourceProvider();
+ explicit WebRtcLocalAudioSourceProvider(
+ const blink::WebMediaStreamTrack& track);
virtual ~WebRtcLocalAudioSourceProvider();
// MediaStreamAudioSink implementation.
@@ -55,6 +58,8 @@ class CONTENT_EXPORT WebRtcLocalAudioSourceProvider
int number_of_channels,
int number_of_frames) OVERRIDE;
virtual void OnSetFormat(const media::AudioParameters& params) OVERRIDE;
+ virtual void OnReadyStateChanged(
+ blink::WebMediaStreamSource::ReadyState state) OVERRIDE;
// blink::WebAudioSourceProvider implementation.
virtual void setClient(blink::WebAudioSourceProviderClient* client) OVERRIDE;
@@ -94,6 +99,12 @@ class CONTENT_EXPORT WebRtcLocalAudioSourceProvider
// Used to report the correct delay to |webaudio_source_|.
base::TimeTicks last_fill_;
+ // The audio track that this source provider is connected to.
+ blink::WebMediaStreamTrack track_;
+
+ // Flag to tell if the track has been stopped or not.
+ bool track_stopped_;
+
DISALLOW_COPY_AND_ASSIGN(WebRtcLocalAudioSourceProvider);
};
diff --git a/chromium/content/renderer/media/webrtc_local_audio_source_provider_unittest.cc b/chromium/content/renderer/media/webrtc_local_audio_source_provider_unittest.cc
index 5b7e8526898..3374b7488d1 100644
--- a/chromium/content/renderer/media/webrtc_local_audio_source_provider_unittest.cc
+++ b/chromium/content/renderer/media/webrtc_local_audio_source_provider_unittest.cc
@@ -3,10 +3,16 @@
// found in the LICENSE file.
#include "base/logging.h"
+#include "base/strings/utf_string_conversions.h"
+#include "content/renderer/media/mock_media_constraint_factory.h"
+#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
+#include "content/renderer/media/webrtc_audio_capturer.h"
#include "content/renderer/media/webrtc_local_audio_source_provider.h"
+#include "content/renderer/media/webrtc_local_audio_track.h"
#include "media/audio/audio_parameters.h"
#include "media/base/audio_bus.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
namespace content {
@@ -23,7 +29,23 @@ class WebRtcLocalAudioSourceProviderTest : public testing::Test {
source_params_.frames_per_buffer() * source_params_.channels();
source_data_.reset(new int16[length]);
sink_bus_ = media::AudioBus::Create(sink_params_);
- source_provider_.reset(new WebRtcLocalAudioSourceProvider());
+ MockMediaConstraintFactory constraint_factory;
+ scoped_refptr<WebRtcAudioCapturer> capturer(
+ WebRtcAudioCapturer::CreateCapturer(
+ -1, StreamDeviceInfo(),
+ constraint_factory.CreateWebMediaConstraints(), NULL, NULL));
+ scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter(
+ WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
+ scoped_ptr<WebRtcLocalAudioTrack> native_track(
+ new WebRtcLocalAudioTrack(adapter, capturer, NULL));
+ blink::WebMediaStreamSource audio_source;
+ audio_source.initialize(base::UTF8ToUTF16("dummy_source_id"),
+ blink::WebMediaStreamSource::TypeAudio,
+ base::UTF8ToUTF16("dummy_source_name"));
+ blink_track_.initialize(blink::WebString::fromUTF8("audio_track"),
+ audio_source);
+ blink_track_.setExtraData(native_track.release());
+ source_provider_.reset(new WebRtcLocalAudioSourceProvider(blink_track_));
source_provider_->SetSinkParamsForTesting(sink_params_);
source_provider_->OnSetFormat(source_params_);
}
@@ -32,6 +54,7 @@ class WebRtcLocalAudioSourceProviderTest : public testing::Test {
scoped_ptr<int16[]> source_data_;
media::AudioParameters sink_params_;
scoped_ptr<media::AudioBus> sink_bus_;
+ blink::WebMediaStreamTrack blink_track_;
scoped_ptr<WebRtcLocalAudioSourceProvider> source_provider_;
};
@@ -91,4 +114,25 @@ TEST_F(WebRtcLocalAudioSourceProviderTest, VerifyDataFlow) {
}
}
+TEST_F(WebRtcLocalAudioSourceProviderTest,
+ DeleteSourceProviderBeforeStoppingTrack) {
+ source_provider_.reset();
+
+ // Stop the audio track.
+ WebRtcLocalAudioTrack* native_track = static_cast<WebRtcLocalAudioTrack*>(
+ MediaStreamTrack::GetTrack(blink_track_));
+ native_track->Stop();
+}
+
+TEST_F(WebRtcLocalAudioSourceProviderTest,
+ StopTrackBeforeDeletingSourceProvider) {
+ // Stop the audio track.
+ WebRtcLocalAudioTrack* native_track = static_cast<WebRtcLocalAudioTrack*>(
+ MediaStreamTrack::GetTrack(blink_track_));
+ native_track->Stop();
+
+ // Delete the source provider.
+ source_provider_.reset();
+}
+
} // namespace content
diff --git a/chromium/content/renderer/media/webrtc_local_audio_track.cc b/chromium/content/renderer/media/webrtc_local_audio_track.cc
index 8afa06feab2..95f34f64ea3 100644
--- a/chromium/content/renderer/media/webrtc_local_audio_track.cc
+++ b/chromium/content/renderer/media/webrtc_local_audio_track.cc
@@ -5,150 +5,29 @@
#include "content/renderer/media/webrtc_local_audio_track.h"
#include "content/public/renderer/media_stream_audio_sink.h"
+#include "content/renderer/media/media_stream_audio_level_calculator.h"
+#include "content/renderer/media/media_stream_audio_processor.h"
#include "content/renderer/media/media_stream_audio_sink_owner.h"
#include "content/renderer/media/media_stream_audio_track_sink.h"
#include "content/renderer/media/peer_connection_audio_sink_owner.h"
#include "content/renderer/media/webaudio_capturer_source.h"
+#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
#include "content/renderer/media/webrtc_audio_capturer.h"
-#include "content/renderer/media/webrtc_local_audio_source_provider.h"
-#include "media/base/audio_fifo.h"
-#include "third_party/libjingle/source/talk/media/base/audiorenderer.h"
namespace content {
-static const size_t kMaxNumberOfBuffersInFifo = 2;
-static const char kAudioTrackKind[] = "audio";
-
-namespace {
-
-using webrtc::MediaConstraintsInterface;
-
-// This helper function checks if any audio constraints are set that require
-// audio processing to be applied. Right now this is a big, single switch for
-// all of the properties, but in the future they'll be handled one by one.
-bool NeedsAudioProcessing(
- const webrtc::MediaConstraintsInterface* constraints) {
- if (!constraints)
- return false;
-
- static const char* kAudioProcessingProperties[] = {
- MediaConstraintsInterface::kEchoCancellation,
- MediaConstraintsInterface::kExperimentalEchoCancellation,
- MediaConstraintsInterface::kAutoGainControl,
- MediaConstraintsInterface::kExperimentalAutoGainControl,
- MediaConstraintsInterface::kNoiseSuppression,
- MediaConstraintsInterface::kHighpassFilter,
- MediaConstraintsInterface::kTypingNoiseDetection,
- };
-
- for (size_t i = 0; i < arraysize(kAudioProcessingProperties); ++i) {
- bool value = false;
- if (webrtc::FindConstraint(constraints, kAudioProcessingProperties[i],
- &value, NULL) &&
- value) {
- return true;
- }
- }
-
- return false;
-}
-
-} // namespace.
-
-// This is a temporary audio buffer with parameters used to send data to
-// callbacks.
-class WebRtcLocalAudioTrack::ConfiguredBuffer {
- public:
- ConfiguredBuffer() {}
- virtual ~ConfiguredBuffer() {}
-
- void Configure(const media::AudioParameters& params) {
- DCHECK(params.IsValid());
-
- // PeerConnection uses 10ms as the sink buffer size as its native packet
- // size. We use the native PeerConnection buffer size to achieve the best
- // performance when a PeerConnection is connected with a track.
- int sink_buffer_size = params.sample_rate() / 100;
- if (params.frames_per_buffer() < sink_buffer_size) {
- // When the source is running with a buffer size smaller than the peer
- // connection buffer size, that means no PeerConnection is connected
- // to the track, use the same buffer size as the incoming format to
- // avoid extra FIFO for WebAudio.
- sink_buffer_size = params.frames_per_buffer();
- }
- params_.Reset(params.format(), params.channel_layout(), params.channels(),
- params.input_channels(), params.sample_rate(),
- params.bits_per_sample(), sink_buffer_size);
-
- audio_wrapper_ = media::AudioBus::Create(params_.channels(),
- params_.frames_per_buffer());
- buffer_.reset(new int16[params_.frames_per_buffer() * params_.channels()]);
-
- // The size of the FIFO should be at least twice of the source buffer size
- // or twice of the sink buffer size.
- int buffer_size = std::max(
- kMaxNumberOfBuffersInFifo * params.frames_per_buffer(),
- kMaxNumberOfBuffersInFifo * params_.frames_per_buffer());
- fifo_.reset(new media::AudioFifo(params_.channels(), buffer_size));
- }
-
- void Push(media::AudioBus* audio_source) {
- DCHECK(fifo_->frames() + audio_source->frames() <= fifo_->max_frames());
- fifo_->Push(audio_source);
- }
-
- bool Consume() {
- if (fifo_->frames() < audio_wrapper_->frames())
- return false;
-
- fifo_->Consume(audio_wrapper_.get(), 0, audio_wrapper_->frames());
- audio_wrapper_->ToInterleaved(audio_wrapper_->frames(),
- params_.bits_per_sample() / 8,
- buffer());
- return true;
- }
-
- int16* buffer() const { return buffer_.get(); }
-
- // Format of the output audio buffer.
- const media::AudioParameters& params() const { return params_; }
-
- private:
- media::AudioParameters params_;
- scoped_ptr<media::AudioBus> audio_wrapper_;
- scoped_ptr<media::AudioFifo> fifo_;
- scoped_ptr<int16[]> buffer_;
-};
-
-scoped_refptr<WebRtcLocalAudioTrack> WebRtcLocalAudioTrack::Create(
- const std::string& id,
- const scoped_refptr<WebRtcAudioCapturer>& capturer,
- WebAudioCapturerSource* webaudio_source,
- webrtc::AudioSourceInterface* track_source,
- const webrtc::MediaConstraintsInterface* constraints) {
- talk_base::RefCountedObject<WebRtcLocalAudioTrack>* track =
- new talk_base::RefCountedObject<WebRtcLocalAudioTrack>(
- id, capturer, webaudio_source, track_source, constraints);
- return track;
-}
-
WebRtcLocalAudioTrack::WebRtcLocalAudioTrack(
- const std::string& label,
+ WebRtcLocalAudioTrackAdapter* adapter,
const scoped_refptr<WebRtcAudioCapturer>& capturer,
- WebAudioCapturerSource* webaudio_source,
- webrtc::AudioSourceInterface* track_source,
- const webrtc::MediaConstraintsInterface* constraints)
- : webrtc::MediaStreamTrack<webrtc::AudioTrackInterface>(label),
+ WebAudioCapturerSource* webaudio_source)
+ : MediaStreamTrack(adapter, true),
+ adapter_(adapter),
capturer_(capturer),
- webaudio_source_(webaudio_source),
- track_source_(track_source),
- need_audio_processing_(NeedsAudioProcessing(constraints)),
- buffer_(new ConfiguredBuffer()) {
+ webaudio_source_(webaudio_source) {
DCHECK(capturer.get() || webaudio_source);
- if (!webaudio_source_) {
- source_provider_.reset(new WebRtcLocalAudioSourceProvider());
- AddSink(source_provider_.get());
- }
+
+ adapter_->Initialize(this);
+
DVLOG(1) << "WebRtcLocalAudioTrack::WebRtcLocalAudioTrack()";
}
@@ -159,69 +38,58 @@ WebRtcLocalAudioTrack::~WebRtcLocalAudioTrack() {
Stop();
}
-void WebRtcLocalAudioTrack::Capture(media::AudioBus* audio_source,
- int audio_delay_milliseconds,
+void WebRtcLocalAudioTrack::Capture(const int16* audio_data,
+ base::TimeDelta delay,
int volume,
- bool key_pressed) {
+ bool key_pressed,
+ bool need_audio_processing) {
DCHECK(capture_thread_checker_.CalledOnValidThread());
+
+ // Calculate the signal level regardless if the track is disabled or enabled.
+ int signal_level = level_calculator_->Calculate(
+ audio_data, audio_parameters_.channels(),
+ audio_parameters_.frames_per_buffer());
+ adapter_->SetSignalLevel(signal_level);
+
scoped_refptr<WebRtcAudioCapturer> capturer;
- std::vector<int> voe_channels;
SinkList::ItemList sinks;
SinkList::ItemList sinks_to_notify_format;
- bool is_webaudio_source = false;
{
base::AutoLock auto_lock(lock_);
capturer = capturer_;
- voe_channels = voe_channels_;
sinks = sinks_.Items();
sinks_.RetrieveAndClearTags(&sinks_to_notify_format);
- is_webaudio_source = (webaudio_source_.get() != NULL);
}
// Notify the tracks on when the format changes. This will do nothing if
// |sinks_to_notify_format| is empty.
for (SinkList::ItemList::const_iterator it = sinks_to_notify_format.begin();
it != sinks_to_notify_format.end(); ++it) {
- (*it)->OnSetFormat(buffer_->params());
+ (*it)->OnSetFormat(audio_parameters_);
}
- // Push the data to the fifo.
- buffer_->Push(audio_source);
-
- // When the source is WebAudio, turn off the audio processing if the delay
- // value is 0 even though the constraint is set to true. In such case, it
- // indicates the data is not from microphone.
- // TODO(xians): remove the flag when supporting one APM per audio track.
- // See crbug/264611 for details.
- bool need_audio_processing = need_audio_processing_;
- if (is_webaudio_source && need_audio_processing)
- need_audio_processing = (audio_delay_milliseconds != 0);
-
- int current_volume = volume;
- while (buffer_->Consume()) {
- // Feed the data to the sinks.
- // TODO (jiayl): we should not pass the real audio data down if the track is
- // disabled. This is currently done so to feed input to WebRTC typing
- // detection and should be changed when audio processing is moved from
- // WebRTC to the track.
- for (SinkList::ItemList::const_iterator it = sinks.begin();
- it != sinks.end();
- ++it) {
- int new_volume = (*it)->OnData(buffer_->buffer(),
- buffer_->params().sample_rate(),
- buffer_->params().channels(),
- buffer_->params().frames_per_buffer(),
- voe_channels,
- audio_delay_milliseconds,
- current_volume,
- need_audio_processing,
- key_pressed);
- if (new_volume != 0 && capturer.get()) {
- // Feed the new volume to WebRtc while changing the volume on the
- // browser.
- capturer->SetVolume(new_volume);
- current_volume = new_volume;
- }
+ // Feed the data to the sinks.
+ // TODO(jiayl): we should not pass the real audio data down if the track is
+ // disabled. This is currently done so to feed input to WebRTC typing
+ // detection and should be changed when audio processing is moved from
+ // WebRTC to the track.
+ std::vector<int> voe_channels = adapter_->VoeChannels();
+ for (SinkList::ItemList::const_iterator it = sinks.begin();
+ it != sinks.end();
+ ++it) {
+ int new_volume = (*it)->OnData(audio_data,
+ audio_parameters_.sample_rate(),
+ audio_parameters_.channels(),
+ audio_parameters_.frames_per_buffer(),
+ voe_channels,
+ delay.InMilliseconds(),
+ volume,
+ need_audio_processing,
+ key_pressed);
+ if (new_volume != 0 && capturer.get() && !webaudio_source_) {
+ // Feed the new volume to WebRtc while changing the volume on the
+ // browser.
+ capturer->SetVolume(new_volume);
}
}
}
@@ -234,49 +102,22 @@ void WebRtcLocalAudioTrack::OnSetFormat(
capture_thread_checker_.DetachFromThread();
DCHECK(capture_thread_checker_.CalledOnValidThread());
- DCHECK(params.IsValid());
- buffer_->Configure(params);
+ audio_parameters_ = params;
+ level_calculator_.reset(new MediaStreamAudioLevelCalculator());
base::AutoLock auto_lock(lock_);
// Remember to notify all sinks of the new format.
sinks_.TagAll();
}
-void WebRtcLocalAudioTrack::AddChannel(int channel_id) {
- DVLOG(1) << "WebRtcLocalAudioTrack::AddChannel(channel_id="
- << channel_id << ")";
- base::AutoLock auto_lock(lock_);
- if (std::find(voe_channels_.begin(), voe_channels_.end(), channel_id) !=
- voe_channels_.end()) {
- // We need to handle the case when the same channel is connected to the
- // track more than once.
- return;
- }
-
- voe_channels_.push_back(channel_id);
-}
-
-void WebRtcLocalAudioTrack::RemoveChannel(int channel_id) {
- DVLOG(1) << "WebRtcLocalAudioTrack::RemoveChannel(channel_id="
- << channel_id << ")";
- base::AutoLock auto_lock(lock_);
- std::vector<int>::iterator iter =
- std::find(voe_channels_.begin(), voe_channels_.end(), channel_id);
- DCHECK(iter != voe_channels_.end());
- voe_channels_.erase(iter);
-}
-
-// webrtc::AudioTrackInterface implementation.
-webrtc::AudioSourceInterface* WebRtcLocalAudioTrack::GetSource() const {
- return track_source_;
-}
-
-cricket::AudioRenderer* WebRtcLocalAudioTrack::GetRenderer() {
- return this;
-}
-
-std::string WebRtcLocalAudioTrack::kind() const {
- return kAudioTrackKind;
+void WebRtcLocalAudioTrack::SetAudioProcessor(
+ const scoped_refptr<MediaStreamAudioProcessor>& processor) {
+ // if the |processor| does not have audio processing, which can happen if
+ // kDisableAudioTrackProcessing is set set or all the constraints in
+ // the |processor| are turned off. In such case, we pass NULL to the
+ // adapter to indicate that no stats can be gotten from the processor.
+ adapter_->SetAudioProcessor(processor->has_audio_processing() ?
+ processor : NULL);
}
void WebRtcLocalAudioTrack::AddSink(MediaStreamAudioSink* sink) {
@@ -354,11 +195,9 @@ void WebRtcLocalAudioTrack::Start() {
// capturer as its sink otherwise two streams in different clock will be
// pushed through the same track.
webaudio_source_->Start(this, capturer_.get());
- return;
- }
-
- if (capturer_.get())
+ } else if (capturer_.get()) {
capturer_->AddTrack(this);
+ }
SinkList::ItemList sinks;
{
diff --git a/chromium/content/renderer/media/webrtc_local_audio_track.h b/chromium/content/renderer/media/webrtc_local_audio_track.h
index 0ad463b2cdf..c2cb81b3703 100644
--- a/chromium/content/renderer/media/webrtc_local_audio_track.h
+++ b/chromium/content/renderer/media/webrtc_local_audio_track.h
@@ -8,49 +8,38 @@
#include <list>
#include <string>
+#include "base/memory/ref_counted.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
-#include "content/renderer/media/media_stream_audio_track_sink.h"
+#include "content/renderer/media/media_stream_track.h"
#include "content/renderer/media/tagged_list.h"
#include "content/renderer/media/webrtc_audio_device_impl.h"
-#include "content/renderer/media/webrtc_local_audio_source_provider.h"
-#include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface.h"
-#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
-#include "third_party/libjingle/source/talk/app/webrtc/mediastreamtrack.h"
-#include "third_party/libjingle/source/talk/media/base/audiorenderer.h"
-
-namespace cricket {
-class AudioRenderer;
-} // namespace cricket
-
-namespace media {
-class AudioBus;
-} // namespace media
namespace content {
+class MediaStreamAudioLevelCalculator;
+class MediaStreamAudioProcessor;
class MediaStreamAudioSink;
class MediaStreamAudioSinkOwner;
+class MediaStreamAudioTrackSink;
class PeerConnectionAudioSink;
class WebAudioCapturerSource;
class WebRtcAudioCapturer;
+class WebRtcLocalAudioTrackAdapter;
// A WebRtcLocalAudioTrack instance contains the implementations of
-// MediaStreamTrack and MediaStreamAudioSink.
+// MediaStreamTrackExtraData.
// When an instance is created, it will register itself as a track to the
// WebRtcAudioCapturer to get the captured data, and forward the data to
// its |sinks_|. The data flow can be stopped by disabling the audio track.
class CONTENT_EXPORT WebRtcLocalAudioTrack
- : NON_EXPORTED_BASE(public cricket::AudioRenderer),
- NON_EXPORTED_BASE(
- public webrtc::MediaStreamTrack<webrtc::AudioTrackInterface>) {
+ : NON_EXPORTED_BASE(public MediaStreamTrack) {
public:
- static scoped_refptr<WebRtcLocalAudioTrack> Create(
- const std::string& id,
- const scoped_refptr<WebRtcAudioCapturer>& capturer,
- WebAudioCapturerSource* webaudio_source,
- webrtc::AudioSourceInterface* track_source,
- const webrtc::MediaConstraintsInterface* constraints);
+ WebRtcLocalAudioTrack(WebRtcLocalAudioTrackAdapter* adapter,
+ const scoped_refptr<WebRtcAudioCapturer>& capturer,
+ WebAudioCapturerSource* webaudio_source);
+
+ virtual ~WebRtcLocalAudioTrack();
// Add a sink to the track. This function will trigger a OnSetFormat()
// call on the |sink|.
@@ -73,60 +62,41 @@ class CONTENT_EXPORT WebRtcLocalAudioTrack
// Stops the local audio track. Called on the main render thread and
// should be called only once when audio track going away.
- void Stop();
+ virtual void Stop() OVERRIDE;
// Method called by the capturer to deliver the capture data.
- // Call on the capture audio thread.
- void Capture(media::AudioBus* audio_source,
- int audio_delay_milliseconds,
+ // Called on the capture audio thread.
+ void Capture(const int16* audio_data,
+ base::TimeDelta delay,
int volume,
- bool key_pressed);
+ bool key_pressed,
+ bool need_audio_processing);
// Method called by the capturer to set the audio parameters used by source
// of the capture data..
- // Call on the capture audio thread.
+ // Called on the capture audio thread.
void OnSetFormat(const media::AudioParameters& params);
- blink::WebAudioSourceProvider* audio_source_provider() const {
- return source_provider_.get();
- }
-
- protected:
- WebRtcLocalAudioTrack(
- const std::string& label,
- const scoped_refptr<WebRtcAudioCapturer>& capturer,
- WebAudioCapturerSource* webaudio_source,
- webrtc::AudioSourceInterface* track_source,
- const webrtc::MediaConstraintsInterface* constraints);
-
- virtual ~WebRtcLocalAudioTrack();
+ // Method called by the capturer to set the processor that applies signal
+ // processing on the data of the track.
+ // Called on the capture audio thread.
+ void SetAudioProcessor(
+ const scoped_refptr<MediaStreamAudioProcessor>& processor);
private:
typedef TaggedList<MediaStreamAudioTrackSink> SinkList;
- // cricket::AudioCapturer implementation.
- virtual void AddChannel(int channel_id) OVERRIDE;
- virtual void RemoveChannel(int channel_id) OVERRIDE;
-
- // webrtc::AudioTrackInterface implementation.
- virtual webrtc::AudioSourceInterface* GetSource() const OVERRIDE;
- virtual cricket::AudioRenderer* GetRenderer() OVERRIDE;
-
- // webrtc::MediaStreamTrack implementation.
- virtual std::string kind() const OVERRIDE;
+ // All usage of libjingle is through this adapter. The adapter holds
+ // a reference on this object, but not vice versa.
+ WebRtcLocalAudioTrackAdapter* adapter_;
// The provider of captured data to render.
- // The WebRtcAudioCapturer is today created by WebRtcAudioDeviceImpl.
scoped_refptr<WebRtcAudioCapturer> capturer_;
// The source of the audio track which is used by WebAudio, which provides
// data to the audio track when hooking up with WebAudio.
scoped_refptr<WebAudioCapturerSource> webaudio_source_;
- // The source of the audio track which handles the audio constraints.
- // TODO(xians): merge |track_source_| to |capturer_|.
- talk_base::scoped_refptr<webrtc::AudioSourceInterface> track_source_;
-
// A tagged list of sinks that the audio data is fed to. Tags
// indicate tracks that need to be notified that the audio format
// has changed.
@@ -141,19 +111,13 @@ class CONTENT_EXPORT WebRtcLocalAudioTrack
// Protects |params_| and |sinks_|.
mutable base::Lock lock_;
- // A vector of WebRtc VoE channels that the capturer sends data to.
- std::vector<int> voe_channels_;
-
- bool need_audio_processing_;
-
- // Buffers used for temporary storage during capture callbacks.
- // Allocated and accessed only on the capture audio thread.
- class ConfiguredBuffer;
- scoped_ptr<ConfiguredBuffer> buffer_;
+ // Audio parameters of the audio capture stream.
+ // Accessed on only the audio capture thread.
+ media::AudioParameters audio_parameters_;
- // The source provider to feed the track data to other clients like
- // WebAudio.
- scoped_ptr<WebRtcLocalAudioSourceProvider> source_provider_;
+ // Used to calculate the signal level that shows in the UI.
+ // Accessed on only the audio thread.
+ scoped_ptr<MediaStreamAudioLevelCalculator> level_calculator_;
DISALLOW_COPY_AND_ASSIGN(WebRtcLocalAudioTrack);
};
diff --git a/chromium/content/renderer/media/webrtc_local_audio_track_unittest.cc b/chromium/content/renderer/media/webrtc_local_audio_track_unittest.cc
index a43577669d6..3f542fb6e8b 100644
--- a/chromium/content/renderer/media/webrtc_local_audio_track_unittest.cc
+++ b/chromium/content/renderer/media/webrtc_local_audio_track_unittest.cc
@@ -4,16 +4,18 @@
#include "base/synchronization/waitable_event.h"
#include "base/test/test_timeouts.h"
-#include "content/renderer/media/rtc_media_constraints.h"
+#include "content/renderer/media/media_stream_audio_source.h"
+#include "content/renderer/media/mock_media_constraint_factory.h"
+#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
#include "content/renderer/media/webrtc_audio_capturer.h"
#include "content/renderer/media/webrtc_audio_device_impl.h"
-#include "content/renderer/media/webrtc_local_audio_source_provider.h"
#include "content/renderer/media/webrtc_local_audio_track.h"
#include "media/audio/audio_parameters.h"
#include "media/base/audio_bus.h"
#include "media/base/audio_capturer_source.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
using ::testing::_;
@@ -33,12 +35,12 @@ ACTION_P(SignalEvent, event) {
// the |WebRtcAudioCapturer|.
class FakeAudioThread : public base::PlatformThread::Delegate {
public:
- FakeAudioThread(const scoped_refptr<WebRtcAudioCapturer>& capturer,
+ FakeAudioThread(WebRtcAudioCapturer* capturer,
const media::AudioParameters& params)
: capturer_(capturer),
thread_(),
closure_(false, false) {
- DCHECK(capturer.get());
+ DCHECK(capturer);
audio_bus_ = media::AudioBus::Create(params);
}
@@ -52,7 +54,7 @@ class FakeAudioThread : public base::PlatformThread::Delegate {
media::AudioCapturerSource::CaptureCallback* callback =
static_cast<media::AudioCapturerSource::CaptureCallback*>(
- capturer_.get());
+ capturer_);
audio_bus_->Zero();
callback->Capture(audio_bus_.get(), 0, 0, false);
@@ -75,7 +77,7 @@ class FakeAudioThread : public base::PlatformThread::Delegate {
private:
scoped_ptr<media::AudioBus> audio_bus_;
- scoped_refptr<WebRtcAudioCapturer> capturer_;
+ WebRtcAudioCapturer* capturer_;
base::PlatformThreadHandle thread_;
base::WaitableEvent closure_;
DISALLOW_COPY_AND_ASSIGN(FakeAudioThread);
@@ -133,26 +135,32 @@ class MockMediaStreamAudioSink : public PeerConnectionAudioSink {
int current_volume,
bool need_audio_processing,
bool key_pressed) OVERRIDE {
+ EXPECT_EQ(params_.sample_rate(), sample_rate);
+ EXPECT_EQ(params_.channels(), number_of_channels);
+ EXPECT_EQ(params_.frames_per_buffer(), number_of_frames);
CaptureData(channels.size(),
- sample_rate,
- number_of_channels,
- number_of_frames,
audio_delay_milliseconds,
current_volume,
need_audio_processing,
key_pressed);
return 0;
}
- MOCK_METHOD8(CaptureData,
+ MOCK_METHOD5(CaptureData,
void(int number_of_network_channels,
- int sample_rate,
- int number_of_channels,
- int number_of_frames,
int audio_delay_milliseconds,
int current_volume,
bool need_audio_processing,
bool key_pressed));
- MOCK_METHOD1(OnSetFormat, void(const media::AudioParameters& params));
+ void OnSetFormat(const media::AudioParameters& params) {
+ params_ = params;
+ FormatIsSet();
+ }
+ MOCK_METHOD0(FormatIsSet, void());
+
+ const media::AudioParameters& audio_params() const { return params_; }
+
+ private:
+ media::AudioParameters params_;
};
} // namespace
@@ -162,17 +170,28 @@ class WebRtcLocalAudioTrackTest : public ::testing::Test {
virtual void SetUp() OVERRIDE {
params_.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
media::CHANNEL_LAYOUT_STEREO, 2, 0, 48000, 16, 480);
- capturer_ = WebRtcAudioCapturer::CreateCapturer();
+ MockMediaConstraintFactory constraint_factory;
+ blink_source_.initialize("dummy", blink::WebMediaStreamSource::TypeAudio,
+ "dummy");
+ MediaStreamAudioSource* audio_source = new MediaStreamAudioSource();
+ blink_source_.setExtraData(audio_source);
+
+ StreamDeviceInfo device(MEDIA_DEVICE_AUDIO_CAPTURE,
+ std::string(), std::string());
+ capturer_ = WebRtcAudioCapturer::CreateCapturer(
+ -1, device, constraint_factory.CreateWebMediaConstraints(), NULL,
+ audio_source);
+ audio_source->SetAudioCapturer(capturer_);
capturer_source_ = new MockCapturerSource(capturer_.get());
- EXPECT_CALL(*capturer_source_.get(), OnInitialize(_, capturer_.get(), 0))
+ EXPECT_CALL(*capturer_source_.get(), OnInitialize(_, capturer_.get(), -1))
.WillOnce(Return());
- capturer_->SetCapturerSource(capturer_source_,
- params_.channel_layout(),
- params_.sample_rate(),
- params_.effects());
+ EXPECT_CALL(*capturer_source_.get(), SetAutomaticGainControl(true));
+ EXPECT_CALL(*capturer_source_.get(), OnStart());
+ capturer_->SetCapturerSourceForTesting(capturer_source_, params_);
}
media::AudioParameters params_;
+ blink::WebMediaStreamSource blink_source_;
scoped_refptr<MockCapturerSource> capturer_source_;
scoped_refptr<WebRtcAudioCapturer> capturer_;
};
@@ -182,39 +201,24 @@ class WebRtcLocalAudioTrackTest : public ::testing::Test {
// get data callback when the track is connected to the capturer but not when
// the track is disconnected from the capturer.
TEST_F(WebRtcLocalAudioTrackTest, ConnectAndDisconnectOneSink) {
- EXPECT_CALL(*capturer_source_.get(), SetAutomaticGainControl(true));
- EXPECT_CALL(*capturer_source_.get(), OnStart());
- RTCMediaConstraints constraints;
- scoped_refptr<WebRtcLocalAudioTrack> track =
- WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL, NULL,
- &constraints);
- static_cast<WebRtcLocalAudioSourceProvider*>(
- track->audio_source_provider())->SetSinkParamsForTesting(params_);
+ scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter(
+ WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
+ scoped_ptr<WebRtcLocalAudioTrack> track(
+ new WebRtcLocalAudioTrack(adapter, capturer_, NULL));
track->Start();
- EXPECT_TRUE(track->enabled());
+ EXPECT_TRUE(track->GetAudioAdapter()->enabled());
- // Connect a number of network channels to the audio track.
- static const int kNumberOfNetworkChannels = 4;
- for (int i = 0; i < kNumberOfNetworkChannels; ++i) {
- static_cast<webrtc::AudioTrackInterface*>(track.get())->
- GetRenderer()->AddChannel(i);
- }
scoped_ptr<MockMediaStreamAudioSink> sink(new MockMediaStreamAudioSink());
- const media::AudioParameters params = capturer_->audio_parameters();
base::WaitableEvent event(false, false);
- EXPECT_CALL(*sink, OnSetFormat(_)).WillOnce(Return());
+ EXPECT_CALL(*sink, FormatIsSet());
EXPECT_CALL(*sink,
- CaptureData(kNumberOfNetworkChannels,
- params.sample_rate(),
- params.channels(),
- params.sample_rate() / 100,
+ CaptureData(0,
0,
0,
- false,
+ _,
false)).Times(AtLeast(1))
.WillRepeatedly(SignalEvent(&event));
track->AddSink(sink.get());
-
EXPECT_TRUE(event.TimedWait(TestTimeouts::tiny_timeout()));
track->RemoveSink(sink.get());
@@ -231,182 +235,147 @@ TEST_F(WebRtcLocalAudioTrackTest, ConnectAndDisconnectOneSink) {
TEST_F(WebRtcLocalAudioTrackTest, DISABLED_DisableEnableAudioTrack) {
EXPECT_CALL(*capturer_source_.get(), SetAutomaticGainControl(true));
EXPECT_CALL(*capturer_source_.get(), OnStart());
- RTCMediaConstraints constraints;
- scoped_refptr<WebRtcLocalAudioTrack> track =
- WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL, NULL,
- &constraints);
- static_cast<WebRtcLocalAudioSourceProvider*>(
- track->audio_source_provider())->SetSinkParamsForTesting(params_);
+ scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter(
+ WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
+ scoped_ptr<WebRtcLocalAudioTrack> track(
+ new WebRtcLocalAudioTrack(adapter, capturer_, NULL));
track->Start();
- static_cast<webrtc::AudioTrackInterface*>(track.get())->
- GetRenderer()->AddChannel(0);
- EXPECT_TRUE(track->enabled());
- EXPECT_TRUE(track->set_enabled(false));
+ EXPECT_TRUE(track->GetAudioAdapter()->enabled());
+ EXPECT_TRUE(track->GetAudioAdapter()->set_enabled(false));
scoped_ptr<MockMediaStreamAudioSink> sink(new MockMediaStreamAudioSink());
- const media::AudioParameters params = capturer_->audio_parameters();
+ const media::AudioParameters params = capturer_->source_audio_parameters();
base::WaitableEvent event(false, false);
- EXPECT_CALL(*sink, OnSetFormat(_)).Times(1);
+ EXPECT_CALL(*sink, FormatIsSet()).Times(1);
EXPECT_CALL(*sink,
- CaptureData(1,
- params.sample_rate(),
- params.channels(),
- params.sample_rate() / 100,
- 0,
- 0,
- false,
- false)).Times(0);
+ CaptureData(0, 0, 0, _, false)).Times(0);
+ EXPECT_EQ(sink->audio_params().frames_per_buffer(),
+ params.sample_rate() / 100);
track->AddSink(sink.get());
EXPECT_FALSE(event.TimedWait(TestTimeouts::tiny_timeout()));
event.Reset();
- EXPECT_CALL(*sink,
- CaptureData(1,
- params.sample_rate(),
- params.channels(),
- params.sample_rate() / 100,
- 0,
- 0,
- false,
- false)).Times(AtLeast(1))
+ EXPECT_CALL(*sink, CaptureData(0, 0, 0, _, false)).Times(AtLeast(1))
.WillRepeatedly(SignalEvent(&event));
- EXPECT_TRUE(track->set_enabled(true));
+ EXPECT_TRUE(track->GetAudioAdapter()->set_enabled(true));
EXPECT_TRUE(event.TimedWait(TestTimeouts::tiny_timeout()));
track->RemoveSink(sink.get());
EXPECT_CALL(*capturer_source_.get(), OnStop()).WillOnce(Return());
capturer_->Stop();
- track = NULL;
+ track.reset();
}
// Create multiple audio tracks and enable/disable them, verify that the audio
// callbacks appear/disappear.
// Flaky due to a data race, see http://crbug.com/295418
TEST_F(WebRtcLocalAudioTrackTest, DISABLED_MultipleAudioTracks) {
- EXPECT_CALL(*capturer_source_.get(), SetAutomaticGainControl(true));
- EXPECT_CALL(*capturer_source_.get(), OnStart());
- RTCMediaConstraints constraints;
- scoped_refptr<WebRtcLocalAudioTrack> track_1 =
- WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL, NULL,
- &constraints);
- static_cast<WebRtcLocalAudioSourceProvider*>(
- track_1->audio_source_provider())->SetSinkParamsForTesting(params_);
+ scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter_1(
+ WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
+ scoped_ptr<WebRtcLocalAudioTrack> track_1(
+ new WebRtcLocalAudioTrack(adapter_1, capturer_, NULL));
track_1->Start();
- static_cast<webrtc::AudioTrackInterface*>(track_1.get())->
- GetRenderer()->AddChannel(0);
- EXPECT_TRUE(track_1->enabled());
+ EXPECT_TRUE(track_1->GetAudioAdapter()->enabled());
scoped_ptr<MockMediaStreamAudioSink> sink_1(new MockMediaStreamAudioSink());
- const media::AudioParameters params = capturer_->audio_parameters();
+ const media::AudioParameters params = capturer_->source_audio_parameters();
base::WaitableEvent event_1(false, false);
- EXPECT_CALL(*sink_1, OnSetFormat(_)).WillOnce(Return());
+ EXPECT_CALL(*sink_1, FormatIsSet()).WillOnce(Return());
EXPECT_CALL(*sink_1,
- CaptureData(1,
- params.sample_rate(),
- params.channels(),
- params.sample_rate() / 100,
- 0,
- 0,
- false,
- false)).Times(AtLeast(1))
+ CaptureData(0, 0, 0, _, false)).Times(AtLeast(1))
.WillRepeatedly(SignalEvent(&event_1));
+ EXPECT_EQ(sink_1->audio_params().frames_per_buffer(),
+ params.sample_rate() / 100);
track_1->AddSink(sink_1.get());
EXPECT_TRUE(event_1.TimedWait(TestTimeouts::tiny_timeout()));
- scoped_refptr<WebRtcLocalAudioTrack> track_2 =
- WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL, NULL,
- &constraints);
- static_cast<WebRtcLocalAudioSourceProvider*>(
- track_2->audio_source_provider())->SetSinkParamsForTesting(params_);
+ scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter_2(
+ WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
+ scoped_ptr<WebRtcLocalAudioTrack> track_2(
+ new WebRtcLocalAudioTrack(adapter_2, capturer_, NULL));
track_2->Start();
- static_cast<webrtc::AudioTrackInterface*>(track_2.get())->
- GetRenderer()->AddChannel(1);
- EXPECT_TRUE(track_2->enabled());
+ EXPECT_TRUE(track_2->GetAudioAdapter()->enabled());
// Verify both |sink_1| and |sink_2| get data.
event_1.Reset();
base::WaitableEvent event_2(false, false);
scoped_ptr<MockMediaStreamAudioSink> sink_2(new MockMediaStreamAudioSink());
- EXPECT_CALL(*sink_2, OnSetFormat(_)).WillOnce(Return());
- EXPECT_CALL(*sink_1,
- CaptureData(1,
- params.sample_rate(),
- params.channels(),
- params.sample_rate() / 100,
- 0,
- 0,
- false,
- false)).Times(AtLeast(1))
+ EXPECT_CALL(*sink_2, FormatIsSet()).WillOnce(Return());
+ EXPECT_CALL(*sink_1, CaptureData(0, 0, 0, _, false)).Times(AtLeast(1))
.WillRepeatedly(SignalEvent(&event_1));
- EXPECT_CALL(*sink_2,
- CaptureData(1,
- params.sample_rate(),
- params.channels(),
- params.sample_rate() / 100,
- 0,
- 0,
- false,
- false)).Times(AtLeast(1))
+ EXPECT_EQ(sink_1->audio_params().frames_per_buffer(),
+ params.sample_rate() / 100);
+ EXPECT_CALL(*sink_2, CaptureData(0, 0, 0, _, false)).Times(AtLeast(1))
.WillRepeatedly(SignalEvent(&event_2));
+ EXPECT_EQ(sink_2->audio_params().frames_per_buffer(),
+ params.sample_rate() / 100);
track_2->AddSink(sink_2.get());
EXPECT_TRUE(event_1.TimedWait(TestTimeouts::tiny_timeout()));
EXPECT_TRUE(event_2.TimedWait(TestTimeouts::tiny_timeout()));
track_1->RemoveSink(sink_1.get());
track_1->Stop();
- track_1 = NULL;
+ track_1.reset();
EXPECT_CALL(*capturer_source_.get(), OnStop()).WillOnce(Return());
track_2->RemoveSink(sink_2.get());
track_2->Stop();
- track_2 = NULL;
-
- capturer_->Stop();
+ track_2.reset();
}
// Start one track and verify the capturer is correctly starting its source.
// And it should be fine to not to call Stop() explicitly.
TEST_F(WebRtcLocalAudioTrackTest, StartOneAudioTrack) {
- EXPECT_CALL(*capturer_source_.get(), SetAutomaticGainControl(true));
- EXPECT_CALL(*capturer_source_.get(), OnStart());
- RTCMediaConstraints constraints;
- scoped_refptr<WebRtcLocalAudioTrack> track =
- WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL, NULL,
- &constraints);
- static_cast<WebRtcLocalAudioSourceProvider*>(
- track->audio_source_provider())->SetSinkParamsForTesting(params_);
+ scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter(
+ WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
+ scoped_ptr<WebRtcLocalAudioTrack> track(
+ new WebRtcLocalAudioTrack(adapter, capturer_, NULL));
track->Start();
// When the track goes away, it will automatically stop the
// |capturer_source_|.
EXPECT_CALL(*capturer_source_.get(), OnStop());
- capturer_->Stop();
- track = NULL;
+ track.reset();
+}
+
+// Start two tracks and verify the capturer is correctly starting its source.
+// When the last track connected to the capturer is stopped, the source is
+// stopped.
+TEST_F(WebRtcLocalAudioTrackTest, StartTwoAudioTracks) {
+ scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter1(
+ WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
+ scoped_ptr<WebRtcLocalAudioTrack> track1(
+ new WebRtcLocalAudioTrack(adapter1, capturer_, NULL));
+ track1->Start();
+
+ scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter2(
+ WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
+ scoped_ptr<WebRtcLocalAudioTrack> track2(
+ new WebRtcLocalAudioTrack(adapter2, capturer_, NULL));
+ track2->Start();
+
+ track1->Stop();
+ // When the last track is stopped, it will automatically stop the
+ // |capturer_source_|.
+ EXPECT_CALL(*capturer_source_.get(), OnStop());
+ track2->Stop();
}
// Start/Stop tracks and verify the capturer is correctly starting/stopping
// its source.
TEST_F(WebRtcLocalAudioTrackTest, StartAndStopAudioTracks) {
- // Starting the first audio track will start the |capturer_source_|.
base::WaitableEvent event(false, false);
- EXPECT_CALL(*capturer_source_.get(), SetAutomaticGainControl(true));
- EXPECT_CALL(*capturer_source_.get(), OnStart()).WillOnce(SignalEvent(&event));
- RTCMediaConstraints constraints;
- scoped_refptr<WebRtcLocalAudioTrack> track_1 =
- WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL, NULL,
- &constraints);
- static_cast<webrtc::AudioTrackInterface*>(track_1.get())->
- GetRenderer()->AddChannel(0);
- static_cast<WebRtcLocalAudioSourceProvider*>(
- track_1->audio_source_provider())->SetSinkParamsForTesting(params_);
+ scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter_1(
+ WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
+ scoped_ptr<WebRtcLocalAudioTrack> track_1(
+ new WebRtcLocalAudioTrack(adapter_1, capturer_, NULL));
track_1->Start();
- EXPECT_TRUE(event.TimedWait(TestTimeouts::tiny_timeout()));
// Verify the data flow by connecting the sink to |track_1|.
scoped_ptr<MockMediaStreamAudioSink> sink(new MockMediaStreamAudioSink());
event.Reset();
- EXPECT_CALL(*sink, OnSetFormat(_)).WillOnce(SignalEvent(&event));
- EXPECT_CALL(*sink, CaptureData(_, _, _, _, 0, 0, false, false))
+ EXPECT_CALL(*sink, FormatIsSet()).WillOnce(SignalEvent(&event));
+ EXPECT_CALL(*sink, CaptureData(_, 0, 0, _, false))
.Times(AnyNumber()).WillRepeatedly(Return());
track_1->AddSink(sink.get());
EXPECT_TRUE(event.TimedWait(TestTimeouts::tiny_timeout()));
@@ -414,14 +383,11 @@ TEST_F(WebRtcLocalAudioTrackTest, StartAndStopAudioTracks) {
// Start the second audio track will not start the |capturer_source_|
// since it has been started.
EXPECT_CALL(*capturer_source_.get(), OnStart()).Times(0);
- scoped_refptr<WebRtcLocalAudioTrack> track_2 =
- WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL, NULL,
- &constraints);
- static_cast<WebRtcLocalAudioSourceProvider*>(
- track_2->audio_source_provider())->SetSinkParamsForTesting(params_);
+ scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter_2(
+ WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
+ scoped_ptr<WebRtcLocalAudioTrack> track_2(
+ new WebRtcLocalAudioTrack(adapter_2, capturer_, NULL));
track_2->Start();
- static_cast<webrtc::AudioTrackInterface*>(track_2.get())->
- GetRenderer()->AddChannel(1);
// Stop the capturer will clear up the track lists in the capturer.
EXPECT_CALL(*capturer_source_.get(), OnStop());
@@ -429,7 +395,7 @@ TEST_F(WebRtcLocalAudioTrackTest, StartAndStopAudioTracks) {
// Adding a new track to the capturer.
track_2->AddSink(sink.get());
- EXPECT_CALL(*sink, OnSetFormat(_)).Times(0);
+ EXPECT_CALL(*sink, FormatIsSet()).Times(0);
// Stop the capturer again will not trigger stopping the source of the
// capturer again..
@@ -438,102 +404,54 @@ TEST_F(WebRtcLocalAudioTrackTest, StartAndStopAudioTracks) {
capturer_->Stop();
}
-// Set new source to the existing capturer.
-TEST_F(WebRtcLocalAudioTrackTest, SetNewSourceForCapturerAfterStartTrack) {
- // Setup the audio track and start the track.
- EXPECT_CALL(*capturer_source_.get(), SetAutomaticGainControl(true));
- EXPECT_CALL(*capturer_source_.get(), OnStart());
- RTCMediaConstraints constraints;
- scoped_refptr<WebRtcLocalAudioTrack> track =
- WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL, NULL,
- &constraints);
- static_cast<WebRtcLocalAudioSourceProvider*>(
- track->audio_source_provider())->SetSinkParamsForTesting(params_);
- track->Start();
-
- // Setting new source to the capturer and the track should still get packets.
- scoped_refptr<MockCapturerSource> new_source(
- new MockCapturerSource(capturer_.get()));
- EXPECT_CALL(*capturer_source_.get(), OnStop());
- EXPECT_CALL(*new_source.get(), SetAutomaticGainControl(true));
- EXPECT_CALL(*new_source.get(), OnInitialize(_, capturer_.get(), 0))
- .WillOnce(Return());
- EXPECT_CALL(*new_source.get(), OnStart());
- capturer_->SetCapturerSource(new_source,
- params_.channel_layout(),
- params_.sample_rate(),
- params_.effects());
-
- // Stop the track.
- EXPECT_CALL(*new_source.get(), OnStop());
- capturer_->Stop();
-}
-
// Create a new capturer with new source, connect it to a new audio track.
TEST_F(WebRtcLocalAudioTrackTest, ConnectTracksToDifferentCapturers) {
// Setup the first audio track and start it.
- EXPECT_CALL(*capturer_source_.get(), SetAutomaticGainControl(true));
- EXPECT_CALL(*capturer_source_.get(), OnStart());
- RTCMediaConstraints constraints;
- scoped_refptr<WebRtcLocalAudioTrack> track_1 =
- WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL, NULL,
- &constraints);
- static_cast<WebRtcLocalAudioSourceProvider*>(
- track_1->audio_source_provider())->SetSinkParamsForTesting(params_);
+ scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter_1(
+ WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
+ scoped_ptr<WebRtcLocalAudioTrack> track_1(
+ new WebRtcLocalAudioTrack(adapter_1, capturer_, NULL));
track_1->Start();
- // Connect a number of network channels to the |track_1|.
- static const int kNumberOfNetworkChannelsForTrack1 = 2;
- for (int i = 0; i < kNumberOfNetworkChannelsForTrack1; ++i) {
- static_cast<webrtc::AudioTrackInterface*>(track_1.get())->
- GetRenderer()->AddChannel(i);
- }
// Verify the data flow by connecting the |sink_1| to |track_1|.
scoped_ptr<MockMediaStreamAudioSink> sink_1(new MockMediaStreamAudioSink());
- EXPECT_CALL(
- *sink_1.get(),
- CaptureData(
- kNumberOfNetworkChannelsForTrack1, 48000, 2, _, 0, 0, false, false))
+ EXPECT_CALL(*sink_1.get(), CaptureData(0, 0, 0, _, false))
.Times(AnyNumber()).WillRepeatedly(Return());
- EXPECT_CALL(*sink_1.get(), OnSetFormat(_)).Times(AnyNumber());
+ EXPECT_CALL(*sink_1.get(), FormatIsSet()).Times(AnyNumber());
track_1->AddSink(sink_1.get());
// Create a new capturer with new source with different audio format.
+ MockMediaConstraintFactory constraint_factory;
+ StreamDeviceInfo device(MEDIA_DEVICE_AUDIO_CAPTURE,
+ std::string(), std::string());
scoped_refptr<WebRtcAudioCapturer> new_capturer(
- WebRtcAudioCapturer::CreateCapturer());
+ WebRtcAudioCapturer::CreateCapturer(
+ -1, device, constraint_factory.CreateWebMediaConstraints(), NULL,
+ NULL));
scoped_refptr<MockCapturerSource> new_source(
new MockCapturerSource(new_capturer.get()));
- EXPECT_CALL(*new_source.get(), OnInitialize(_, new_capturer.get(), 0));
- new_capturer->SetCapturerSource(new_source,
- media::CHANNEL_LAYOUT_MONO,
- 44100,
- media::AudioParameters::NO_EFFECTS);
-
- // Setup the second audio track, connect it to the new capturer and start it.
+ EXPECT_CALL(*new_source.get(), OnInitialize(_, new_capturer.get(), -1));
EXPECT_CALL(*new_source.get(), SetAutomaticGainControl(true));
EXPECT_CALL(*new_source.get(), OnStart());
- scoped_refptr<WebRtcLocalAudioTrack> track_2 =
- WebRtcLocalAudioTrack::Create(std::string(), new_capturer, NULL, NULL,
- &constraints);
- static_cast<WebRtcLocalAudioSourceProvider*>(
- track_2->audio_source_provider())->SetSinkParamsForTesting(params_);
+
+ media::AudioParameters new_param(
+ media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ media::CHANNEL_LAYOUT_MONO, 44100, 16, 441);
+ new_capturer->SetCapturerSourceForTesting(new_source, new_param);
+
+ // Setup the second audio track, connect it to the new capturer and start it.
+ scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter_2(
+ WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
+ scoped_ptr<WebRtcLocalAudioTrack> track_2(
+ new WebRtcLocalAudioTrack(adapter_2, new_capturer, NULL));
track_2->Start();
- // Connect a number of network channels to the |track_2|.
- static const int kNumberOfNetworkChannelsForTrack2 = 3;
- for (int i = 0; i < kNumberOfNetworkChannelsForTrack2; ++i) {
- static_cast<webrtc::AudioTrackInterface*>(track_2.get())->
- GetRenderer()->AddChannel(i);
- }
// Verify the data flow by connecting the |sink_2| to |track_2|.
scoped_ptr<MockMediaStreamAudioSink> sink_2(new MockMediaStreamAudioSink());
base::WaitableEvent event(false, false);
- EXPECT_CALL(
- *sink_2,
- CaptureData(
- kNumberOfNetworkChannelsForTrack2, 44100, 1, _, 0, 0, false, false))
+ EXPECT_CALL(*sink_2, CaptureData(0, 0, 0, _, false))
.Times(AnyNumber()).WillRepeatedly(Return());
- EXPECT_CALL(*sink_2, OnSetFormat(_)).WillOnce(SignalEvent(&event));
+ EXPECT_CALL(*sink_2, FormatIsSet()).WillOnce(SignalEvent(&event));
track_2->AddSink(sink_2.get());
EXPECT_TRUE(event.TimedWait(TestTimeouts::tiny_timeout()));
@@ -549,7 +467,6 @@ TEST_F(WebRtcLocalAudioTrackTest, ConnectTracksToDifferentCapturers) {
capturer_->Stop();
}
-
// Make sure a audio track can deliver packets with a buffer size smaller than
// 10ms when it is not connected with a peer connection.
TEST_F(WebRtcLocalAudioTrackTest, TrackWorkWithSmallBufferSize) {
@@ -558,33 +475,35 @@ TEST_F(WebRtcLocalAudioTrackTest, TrackWorkWithSmallBufferSize) {
media::CHANNEL_LAYOUT_STEREO, 48000, 16, 128);
// Create a capturer with new source which works with the format above.
+ MockMediaConstraintFactory factory;
+ factory.DisableDefaultAudioConstraints();
scoped_refptr<WebRtcAudioCapturer> capturer(
- WebRtcAudioCapturer::CreateCapturer());
+ WebRtcAudioCapturer::CreateCapturer(
+ -1,
+ StreamDeviceInfo(MEDIA_DEVICE_AUDIO_CAPTURE,
+ "", "", params.sample_rate(),
+ params.channel_layout(),
+ params.frames_per_buffer()),
+ factory.CreateWebMediaConstraints(),
+ NULL, NULL));
scoped_refptr<MockCapturerSource> source(
new MockCapturerSource(capturer.get()));
- capturer->Initialize(-1, params.channel_layout(), params.sample_rate(),
- params.frames_per_buffer(), 0, std::string(), 0, 0,
- params.effects());
-
- EXPECT_CALL(*source.get(), OnInitialize(_, capturer.get(), 0));
- capturer->SetCapturerSource(source, params.channel_layout(),
- params.sample_rate(), params.effects());
-
- // Setup a audio track, connect it to the capturer and start it.
+ EXPECT_CALL(*source.get(), OnInitialize(_, capturer.get(), -1));
EXPECT_CALL(*source.get(), SetAutomaticGainControl(true));
EXPECT_CALL(*source.get(), OnStart());
- RTCMediaConstraints constraints;
- scoped_refptr<WebRtcLocalAudioTrack> track =
- WebRtcLocalAudioTrack::Create(std::string(), capturer, NULL, NULL,
- &constraints);
- static_cast<WebRtcLocalAudioSourceProvider*>(
- track->audio_source_provider())->SetSinkParamsForTesting(params);
+ capturer->SetCapturerSourceForTesting(source, params);
+
+ // Setup a audio track, connect it to the capturer and start it.
+ scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter(
+ WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
+ scoped_ptr<WebRtcLocalAudioTrack> track(
+ new WebRtcLocalAudioTrack(adapter, capturer, NULL));
track->Start();
// Verify the data flow by connecting the |sink| to |track|.
scoped_ptr<MockMediaStreamAudioSink> sink(new MockMediaStreamAudioSink());
base::WaitableEvent event(false, false);
- EXPECT_CALL(*sink, OnSetFormat(_)).Times(1);
+ EXPECT_CALL(*sink, FormatIsSet()).Times(1);
// Verify the sinks are getting the packets with an expecting buffer size.
#if defined(OS_ANDROID)
const int expected_buffer_size = params.sample_rate() / 100;
@@ -592,15 +511,19 @@ TEST_F(WebRtcLocalAudioTrackTest, TrackWorkWithSmallBufferSize) {
const int expected_buffer_size = params.frames_per_buffer();
#endif
EXPECT_CALL(*sink, CaptureData(
- 0, params.sample_rate(), params.channels(), expected_buffer_size,
- 0, 0, false, false))
+ 0, 0, 0, _, false))
.Times(AtLeast(1)).WillRepeatedly(SignalEvent(&event));
track->AddSink(sink.get());
EXPECT_TRUE(event.TimedWait(TestTimeouts::tiny_timeout()));
+ EXPECT_EQ(expected_buffer_size, sink->audio_params().frames_per_buffer());
// Stopping the new source will stop the second track.
EXPECT_CALL(*source, OnStop()).Times(1);
capturer->Stop();
+
+ // Even though this test don't use |capturer_source_| it will be stopped
+ // during teardown of the test harness.
+ EXPECT_CALL(*capturer_source_.get(), OnStop());
}
} // namespace content
diff --git a/chromium/content/renderer/media/webrtc_logging.cc b/chromium/content/renderer/media/webrtc_logging.cc
index 2bdc7a33e5a..96868a6836c 100644
--- a/chromium/content/renderer/media/webrtc_logging.cc
+++ b/chromium/content/renderer/media/webrtc_logging.cc
@@ -4,6 +4,7 @@
#include "content/renderer/media/webrtc_logging.h"
+#include "base/time/time.h"
#include "content/public/renderer/webrtc_log_message_delegate.h"
#include "third_party/libjingle/overrides/talk/base/logging.h"
@@ -20,7 +21,8 @@ void InitWebRtcLoggingDelegate(WebRtcLogMessageDelegate* delegate) {
}
void InitWebRtcLogging() {
- talk_base::InitDiagnosticLoggingDelegateFunction(WebRtcLogMessage);
+ // Log messages from Libjingle should not have timestamps.
+ talk_base::InitDiagnosticLoggingDelegateFunction(&WebRtcLogMessage);
}
void WebRtcLogMessage(const std::string& message) {
diff --git a/chromium/content/renderer/media/webrtc_uma_histograms.cc b/chromium/content/renderer/media/webrtc_uma_histograms.cc
new file mode 100644
index 00000000000..16da393073e
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc_uma_histograms.cc
@@ -0,0 +1,60 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/webrtc_uma_histograms.h"
+
+#include "base/metrics/histogram.h"
+
+namespace content {
+
+void UpdateWebRTCMethodCount(JavaScriptAPIName api_name) {
+ DVLOG(3) << "Incrementing WebRTC.webkitApiCount for " << api_name;
+ UMA_HISTOGRAM_ENUMERATION("WebRTC.webkitApiCount", api_name, INVALID_NAME);
+ PerSessionWebRTCAPIMetrics::GetInstance()->LogUsageOnlyOnce(api_name);
+}
+
+PerSessionWebRTCAPIMetrics::~PerSessionWebRTCAPIMetrics() {
+}
+
+// static
+PerSessionWebRTCAPIMetrics* PerSessionWebRTCAPIMetrics::GetInstance() {
+ return Singleton<PerSessionWebRTCAPIMetrics>::get();
+}
+
+void PerSessionWebRTCAPIMetrics::IncrementStreamCounter() {
+ DCHECK(CalledOnValidThread());
+ ++num_streams_;
+}
+
+void PerSessionWebRTCAPIMetrics::DecrementStreamCounter() {
+ DCHECK(CalledOnValidThread());
+ if (--num_streams_ == 0) {
+ ResetUsage();
+ }
+}
+
+PerSessionWebRTCAPIMetrics::PerSessionWebRTCAPIMetrics() : num_streams_(0) {
+ ResetUsage();
+}
+
+void PerSessionWebRTCAPIMetrics::LogUsage(JavaScriptAPIName api_name) {
+ DVLOG(3) << "Incrementing WebRTC.webkitApiCountPerSession for " << api_name;
+ UMA_HISTOGRAM_ENUMERATION("WebRTC.webkitApiCountPerSession",
+ api_name, INVALID_NAME);
+}
+
+void PerSessionWebRTCAPIMetrics::LogUsageOnlyOnce(JavaScriptAPIName api_name) {
+ DCHECK(CalledOnValidThread());
+ if (!has_used_api_[api_name]) {
+ has_used_api_[api_name] = true;
+ LogUsage(api_name);
+ }
+}
+
+void PerSessionWebRTCAPIMetrics::ResetUsage() {
+ for (size_t i = 0; i < arraysize(has_used_api_); ++i)
+ has_used_api_[i] = false;
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/webrtc_uma_histograms.h b/chromium/content/renderer/media/webrtc_uma_histograms.h
index de358f4c8d7..2dfb032db1b 100644
--- a/chromium/content/renderer/media/webrtc_uma_histograms.h
+++ b/chromium/content/renderer/media/webrtc_uma_histograms.h
@@ -5,7 +5,9 @@
#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_UMA_HISTOGRAMS_H_
#define CONTENT_RENDERER_MEDIA_WEBRTC_UMA_HISTOGRAMS_H_
-#include "base/metrics/histogram.h"
+#include "base/memory/singleton.h"
+#include "base/threading/non_thread_safe.h"
+#include "content/common/content_export.h"
namespace content {
@@ -15,15 +17,66 @@ enum JavaScriptAPIName {
WEBKIT_PEER_CONNECTION,
WEBKIT_DEPRECATED_PEER_CONNECTION,
WEBKIT_RTC_PEER_CONNECTION,
+ WEBKIT_GET_MEDIA_DEVICES,
INVALID_NAME
};
// Helper method used to collect information about the number of times
-// different WebRTC API:s are called from JavaScript.
-// The histogram can be viewed at chrome://histograms/WebRTC.webkitApiCount.
-inline void UpdateWebRTCMethodCount(JavaScriptAPIName api_name) {
- UMA_HISTOGRAM_ENUMERATION("WebRTC.webkitApiCount", api_name, INVALID_NAME);
-}
+// different WebRTC APIs are called from JavaScript.
+//
+// This contributes to two histograms; the former is a raw count of
+// the number of times the APIs are called, and be viewed at
+// chrome://histograms/WebRTC.webkitApiCount.
+//
+// The latter is a count of the number of times the APIs are called
+// that gets incremented only once per "session" as established by the
+// PerSessionWebRTCAPIMetrics singleton below. It can be viewed at
+// chrome://histograms/WebRTC.webkitApiCountPerSession.
+void UpdateWebRTCMethodCount(JavaScriptAPIName api_name);
+
+// A singleton that keeps track of the number of MediaStreams being
+// sent over PeerConnections. It uses the transition to zero such
+// streams to demarcate the start of a new "session". Note that this
+// is a rough approximation of sessions, as you could conceivably have
+// multiple tabs using this renderer process, and each of them using
+// PeerConnections.
+//
+// The UpdateWebRTCMethodCount function above uses this class to log a
+// metric at most once per session.
+class CONTENT_EXPORT PerSessionWebRTCAPIMetrics : public base::NonThreadSafe {
+ public:
+ virtual ~PerSessionWebRTCAPIMetrics();
+
+ static PerSessionWebRTCAPIMetrics* GetInstance();
+
+ // Increment/decrement the number of streams being sent or received
+ // over any current PeerConnection.
+ void IncrementStreamCounter();
+ void DecrementStreamCounter();
+
+ protected:
+ friend struct DefaultSingletonTraits<PerSessionWebRTCAPIMetrics>;
+ friend void UpdateWebRTCMethodCount(JavaScriptAPIName);
+
+ // Protected so that unit tests can test without this being a
+ // singleton.
+ PerSessionWebRTCAPIMetrics();
+
+ // Overridable by unit tests.
+ virtual void LogUsage(JavaScriptAPIName api_name);
+
+ // Called by UpdateWebRTCMethodCount above. Protected rather than
+ // private so that unit tests can call it.
+ void LogUsageOnlyOnce(JavaScriptAPIName api_name);
+
+ private:
+ void ResetUsage();
+
+ int num_streams_;
+ bool has_used_api_[INVALID_NAME];
+
+ DISALLOW_COPY_AND_ASSIGN(PerSessionWebRTCAPIMetrics);
+};
} // namespace content
diff --git a/chromium/content/renderer/media/webrtc_uma_histograms_unittest.cc b/chromium/content/renderer/media/webrtc_uma_histograms_unittest.cc
new file mode 100644
index 00000000000..0a090cf6f3c
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc_uma_histograms_unittest.cc
@@ -0,0 +1,79 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/webrtc_uma_histograms.h"
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+
+namespace content {
+
+class MockPerSessionWebRTCAPIMetrics : public PerSessionWebRTCAPIMetrics {
+ public:
+ MockPerSessionWebRTCAPIMetrics() {}
+
+ using PerSessionWebRTCAPIMetrics::LogUsageOnlyOnce;
+
+ MOCK_METHOD1(LogUsage, void(JavaScriptAPIName));
+};
+
+TEST(PerSessionWebRTCAPIMetrics, NoCallOngoingGetUserMedia) {
+ MockPerSessionWebRTCAPIMetrics metrics;
+ EXPECT_CALL(metrics, LogUsage(_)).Times(1);
+ metrics.LogUsageOnlyOnce(WEBKIT_GET_USER_MEDIA);
+}
+
+TEST(PerSessionWebRTCAPIMetrics, CallOngoingGetUserMedia) {
+ MockPerSessionWebRTCAPIMetrics metrics;
+ metrics.IncrementStreamCounter();
+ EXPECT_CALL(metrics, LogUsage(WEBKIT_GET_USER_MEDIA)).Times(1);
+ metrics.LogUsageOnlyOnce(WEBKIT_GET_USER_MEDIA);
+}
+
+TEST(PerSessionWebRTCAPIMetrics, NoCallOngoingGetMediaDevices) {
+ MockPerSessionWebRTCAPIMetrics metrics;
+ EXPECT_CALL(metrics, LogUsage(_)).Times(1);
+ metrics.LogUsageOnlyOnce(WEBKIT_GET_MEDIA_DEVICES);
+}
+
+TEST(PerSessionWebRTCAPIMetrics, CallOngoingGetMediaDevices) {
+ MockPerSessionWebRTCAPIMetrics metrics;
+ metrics.IncrementStreamCounter();
+ EXPECT_CALL(metrics, LogUsage(WEBKIT_GET_MEDIA_DEVICES)).Times(1);
+ metrics.LogUsageOnlyOnce(WEBKIT_GET_MEDIA_DEVICES);
+}
+
+TEST(PerSessionWebRTCAPIMetrics, NoCallOngoingRTCPeerConnection) {
+ MockPerSessionWebRTCAPIMetrics metrics;
+ EXPECT_CALL(metrics, LogUsage(WEBKIT_RTC_PEER_CONNECTION));
+ metrics.LogUsageOnlyOnce(WEBKIT_RTC_PEER_CONNECTION);
+}
+
+TEST(PerSessionWebRTCAPIMetrics, NoCallOngoingMultiplePC) {
+ MockPerSessionWebRTCAPIMetrics metrics;
+ EXPECT_CALL(metrics, LogUsage(WEBKIT_RTC_PEER_CONNECTION)).Times(1);
+ metrics.LogUsageOnlyOnce(WEBKIT_RTC_PEER_CONNECTION);
+ metrics.LogUsageOnlyOnce(WEBKIT_RTC_PEER_CONNECTION);
+ metrics.LogUsageOnlyOnce(WEBKIT_RTC_PEER_CONNECTION);
+}
+
+TEST(PerSessionWebRTCAPIMetrics, BeforeAfterCallMultiplePC) {
+ MockPerSessionWebRTCAPIMetrics metrics;
+ EXPECT_CALL(metrics, LogUsage(WEBKIT_RTC_PEER_CONNECTION)).Times(1);
+ metrics.LogUsageOnlyOnce(WEBKIT_RTC_PEER_CONNECTION);
+ metrics.LogUsageOnlyOnce(WEBKIT_RTC_PEER_CONNECTION);
+ metrics.IncrementStreamCounter();
+ metrics.IncrementStreamCounter();
+ metrics.LogUsageOnlyOnce(WEBKIT_RTC_PEER_CONNECTION);
+ metrics.DecrementStreamCounter();
+ metrics.LogUsageOnlyOnce(WEBKIT_RTC_PEER_CONNECTION);
+ metrics.DecrementStreamCounter();
+ EXPECT_CALL(metrics, LogUsage(WEBKIT_RTC_PEER_CONNECTION)).Times(1);
+ metrics.LogUsageOnlyOnce(WEBKIT_RTC_PEER_CONNECTION);
+ metrics.LogUsageOnlyOnce(WEBKIT_RTC_PEER_CONNECTION);
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/websourcebuffer_impl.cc b/chromium/content/renderer/media/websourcebuffer_impl.cc
index c9429779d82..1b8e4fa1435 100644
--- a/chromium/content/renderer/media/websourcebuffer_impl.cc
+++ b/chromium/content/renderer/media/websourcebuffer_impl.cc
@@ -4,6 +4,8 @@
#include "content/renderer/media/websourcebuffer_impl.h"
+#include <limits>
+
#include "base/float_util.h"
#include "media/filters/chunk_demuxer.h"
@@ -11,10 +13,13 @@ namespace content {
static base::TimeDelta DoubleToTimeDelta(double time) {
DCHECK(!base::IsNaN(time));
- DCHECK_GE(time, 0);
+ DCHECK_NE(time, -std::numeric_limits<double>::infinity());
+
if (time == std::numeric_limits<double>::infinity())
return media::kInfiniteDuration();
+ // Don't use base::TimeDelta::Max() here, as we want the largest finite time
+ // delta.
base::TimeDelta max_time = base::TimeDelta::FromInternalValue(kint64max - 1);
double max_time_in_seconds = max_time.InSecondsF();
@@ -28,7 +33,8 @@ static base::TimeDelta DoubleToTimeDelta(double time) {
WebSourceBufferImpl::WebSourceBufferImpl(
const std::string& id, media::ChunkDemuxer* demuxer)
: id_(id),
- demuxer_(demuxer) {
+ demuxer_(demuxer),
+ append_window_end_(media::kInfiniteDuration()) {
DCHECK(demuxer_);
}
@@ -36,6 +42,23 @@ WebSourceBufferImpl::~WebSourceBufferImpl() {
DCHECK(!demuxer_) << "Object destroyed w/o removedFromMediaSource() call";
}
+bool WebSourceBufferImpl::setMode(WebSourceBuffer::AppendMode mode) {
+ if (demuxer_->IsParsingMediaSegment(id_))
+ return false;
+
+ switch (mode) {
+ case WebSourceBuffer::AppendModeSegments:
+ demuxer_->SetSequenceMode(id_, false);
+ return true;
+ case WebSourceBuffer::AppendModeSequence:
+ demuxer_->SetSequenceMode(id_, true);
+ return true;
+ }
+
+ NOTREACHED();
+ return false;
+}
+
blink::WebTimeRanges WebSourceBufferImpl::buffered() {
media::Ranges<base::TimeDelta> ranges = demuxer_->GetBufferedRanges(id_);
blink::WebTimeRanges result(ranges.size());
@@ -46,30 +69,61 @@ blink::WebTimeRanges WebSourceBufferImpl::buffered() {
return result;
}
-void WebSourceBufferImpl::append(const unsigned char* data, unsigned length) {
- demuxer_->AppendData(id_, data, length);
+void WebSourceBufferImpl::append(
+ const unsigned char* data,
+ unsigned length,
+ double* timestamp_offset) {
+ base::TimeDelta old_offset = timestamp_offset_;
+ demuxer_->AppendData(id_, data, length,
+ append_window_start_, append_window_end_,
+ &timestamp_offset_);
+
+ // Coded frame processing may update the timestamp offset. If the caller
+ // provides a non-NULL |timestamp_offset| and frame processing changes the
+ // timestamp offset, report the new offset to the caller. Do not update the
+ // caller's offset otherwise, to preserve any pre-existing value that may have
+ // more than microsecond precision.
+ if (timestamp_offset && old_offset != timestamp_offset_)
+ *timestamp_offset = timestamp_offset_.InSecondsF();
}
void WebSourceBufferImpl::abort() {
- demuxer_->Abort(id_);
+ demuxer_->Abort(id_,
+ append_window_start_, append_window_end_,
+ &timestamp_offset_);
+
+ // TODO(wolenetz): abort should be able to modify the caller timestamp offset
+ // (just like WebSourceBufferImpl::append).
+ // See http://crbug.com/370229 for further details.
}
void WebSourceBufferImpl::remove(double start, double end) {
+ DCHECK_GE(start, 0);
+ DCHECK_GE(end, 0);
demuxer_->Remove(id_, DoubleToTimeDelta(start), DoubleToTimeDelta(end));
}
bool WebSourceBufferImpl::setTimestampOffset(double offset) {
- base::TimeDelta time_offset = base::TimeDelta::FromMicroseconds(
- offset * base::Time::kMicrosecondsPerSecond);
- return demuxer_->SetTimestampOffset(id_, time_offset);
+ if (demuxer_->IsParsingMediaSegment(id_))
+ return false;
+
+ timestamp_offset_ = DoubleToTimeDelta(offset);
+
+ // http://www.w3.org/TR/media-source/#widl-SourceBuffer-timestampOffset
+ // Step 6: If the mode attribute equals "sequence", then set the group start
+ // timestamp to new timestamp offset.
+ demuxer_->SetGroupStartTimestampIfInSequenceMode(id_, timestamp_offset_);
+ return true;
}
void WebSourceBufferImpl::setAppendWindowStart(double start) {
- demuxer_->SetAppendWindowStart(id_, DoubleToTimeDelta(start));
+ DCHECK_GE(start, 0);
+ append_window_start_ = DoubleToTimeDelta(start);
}
void WebSourceBufferImpl::setAppendWindowEnd(double end) {
- demuxer_->SetAppendWindowEnd(id_, DoubleToTimeDelta(end));
+ DCHECK_GE(end, 0);
+ append_window_end_ = DoubleToTimeDelta(end);
}
void WebSourceBufferImpl::removedFromMediaSource() {
diff --git a/chromium/content/renderer/media/websourcebuffer_impl.h b/chromium/content/renderer/media/websourcebuffer_impl.h
index abfec405ad2..b0e4fa686ae 100644
--- a/chromium/content/renderer/media/websourcebuffer_impl.h
+++ b/chromium/content/renderer/media/websourcebuffer_impl.h
@@ -9,6 +9,7 @@
#include "base/basictypes.h"
#include "base/compiler_specific.h"
+#include "base/time/time.h"
#include "third_party/WebKit/public/platform/WebSourceBuffer.h"
namespace media {
@@ -23,21 +24,31 @@ class WebSourceBufferImpl : public blink::WebSourceBuffer {
virtual ~WebSourceBufferImpl();
// blink::WebSourceBuffer implementation.
- virtual blink::WebTimeRanges buffered() OVERRIDE;
- virtual void append(const unsigned char* data, unsigned length) OVERRIDE;
- virtual void abort() OVERRIDE;
- // TODO(acolwell): Add OVERRIDE when Blink-side changes land.
+ virtual bool setMode(AppendMode mode);
+ virtual blink::WebTimeRanges buffered();
+ virtual void append(
+ const unsigned char* data,
+ unsigned length,
+ double* timestamp_offset);
+ virtual void abort();
virtual void remove(double start, double end);
- virtual bool setTimestampOffset(double offset) OVERRIDE;
- // TODO(acolwell): Add OVERRIDE when Blink-side changes land.
+ virtual bool setTimestampOffset(double offset);
virtual void setAppendWindowStart(double start);
virtual void setAppendWindowEnd(double end);
- virtual void removedFromMediaSource() OVERRIDE;
+ virtual void removedFromMediaSource();
private:
std::string id_;
media::ChunkDemuxer* demuxer_; // Owned by WebMediaPlayerImpl.
+ // Controls the offset applied to timestamps when processing appended media
+ // segments. It is initially 0, which indicates that no offset is being
+ // applied. Both setTimestampOffset() and append() may update this value.
+ base::TimeDelta timestamp_offset_;
+
+ base::TimeDelta append_window_start_;
+ base::TimeDelta append_window_end_;
+
DISALLOW_COPY_AND_ASSIGN(WebSourceBufferImpl);
};