summaryrefslogtreecommitdiffstats
path: root/chromium/media
diff options
context:
space:
mode:
authorJocelyn Turcotte <jocelyn.turcotte@digia.com>2014-08-08 14:30:41 +0200
committerJocelyn Turcotte <jocelyn.turcotte@digia.com>2014-08-12 13:49:54 +0200
commitab0a50979b9eb4dfa3320eff7e187e41efedf7a9 (patch)
tree498dfb8a97ff3361a9f7486863a52bb4e26bb898 /chromium/media
parent4ce69f7403811819800e7c5ae1318b2647e778d1 (diff)
Update Chromium to beta version 37.0.2062.68
Change-Id: I188e3b5aff1bec75566014291b654eb19f5bc8ca Reviewed-by: Andras Becsi <andras.becsi@digia.com>
Diffstat (limited to 'chromium/media')
-rw-r--r--chromium/media/DEPS3
-rw-r--r--chromium/media/OWNERS1
-rw-r--r--chromium/media/PRESUBMIT.py109
-rw-r--r--chromium/media/PRESUBMIT_test.py150
-rw-r--r--chromium/media/audio/agc_audio_stream.h4
-rw-r--r--chromium/media/audio/alsa/alsa_input.cc20
-rw-r--r--chromium/media/audio/alsa/alsa_input.h5
-rw-r--r--chromium/media/audio/alsa/alsa_output.cc11
-rw-r--r--chromium/media/audio/alsa/alsa_output.h9
-rw-r--r--chromium/media/audio/alsa/alsa_output_unittest.cc21
-rw-r--r--chromium/media/audio/alsa/audio_manager_alsa.cc16
-rw-r--r--chromium/media/audio/alsa/audio_manager_alsa.h3
-rw-r--r--chromium/media/audio/android/audio_android_unittest.cc533
-rw-r--r--chromium/media/audio/android/audio_manager_android.cc209
-rw-r--r--chromium/media/audio/android/audio_manager_android.h28
-rw-r--r--chromium/media/audio/android/audio_record_input.cc18
-rw-r--r--chromium/media/audio/android/audio_record_input.h4
-rw-r--r--chromium/media/audio/android/opensles_input.cc24
-rw-r--r--chromium/media/audio/android/opensles_input.h3
-rw-r--r--chromium/media/audio/android/opensles_output.cc11
-rw-r--r--chromium/media/audio/android/opensles_output.h7
-rw-r--r--chromium/media/audio/audio_input_controller.cc260
-rw-r--r--chromium/media/audio/audio_input_controller.h99
-rw-r--r--chromium/media/audio/audio_input_controller_unittest.cc33
-rw-r--r--chromium/media/audio/audio_input_device.cc61
-rw-r--r--chromium/media/audio/audio_input_device.h9
-rw-r--r--chromium/media/audio/audio_input_ipc.h3
-rw-r--r--chromium/media/audio/audio_input_unittest.cc252
-rw-r--r--chromium/media/audio/audio_io.h23
-rw-r--r--chromium/media/audio/audio_logging.h8
-rw-r--r--chromium/media/audio/audio_low_latency_input_output_unittest.cc31
-rw-r--r--chromium/media/audio/audio_manager.h38
-rw-r--r--chromium/media/audio/audio_manager_base.cc112
-rw-r--r--chromium/media/audio/audio_manager_base.h31
-rw-r--r--chromium/media/audio/audio_manager_unittest.cc45
-rw-r--r--chromium/media/audio/audio_output_controller.cc94
-rw-r--r--chromium/media/audio/audio_output_controller.h53
-rw-r--r--chromium/media/audio/audio_output_controller_unittest.cc20
-rw-r--r--chromium/media/audio/audio_output_device.cc64
-rw-r--r--chromium/media/audio/audio_output_device.h15
-rw-r--r--chromium/media/audio/audio_output_device_unittest.cc41
-rw-r--r--chromium/media/audio/audio_output_dispatcher.cc16
-rw-r--r--chromium/media/audio/audio_output_dispatcher.h20
-rw-r--r--chromium/media/audio/audio_output_dispatcher_impl.cc45
-rw-r--r--chromium/media/audio/audio_output_dispatcher_impl.h4
-rw-r--r--chromium/media/audio/audio_output_ipc.h3
-rw-r--r--chromium/media/audio/audio_output_proxy_unittest.cc136
-rw-r--r--chromium/media/audio/audio_output_resampler.cc106
-rw-r--r--chromium/media/audio/audio_output_resampler.h3
-rw-r--r--chromium/media/audio/audio_parameters.cc2
-rw-r--r--chromium/media/audio/audio_parameters.h4
-rw-r--r--chromium/media/audio/audio_parameters_unittest.cc38
-rw-r--r--chromium/media/audio/clockless_audio_sink.cc16
-rw-r--r--chromium/media/audio/clockless_audio_sink.h2
-rw-r--r--chromium/media/audio/cras/audio_manager_cras.cc47
-rw-r--r--chromium/media/audio/cras/audio_manager_cras.h7
-rw-r--r--chromium/media/audio/cras/cras_input.cc23
-rw-r--r--chromium/media/audio/cras/cras_input.h8
-rw-r--r--chromium/media/audio/cras/cras_input_unittest.cc29
-rw-r--r--chromium/media/audio/cras/cras_unified.cc11
-rw-r--r--chromium/media/audio/cras/cras_unified.h10
-rw-r--r--chromium/media/audio/cras/cras_unified_unittest.cc29
-rw-r--r--chromium/media/audio/fake_audio_consumer.cc29
-rw-r--r--chromium/media/audio/fake_audio_consumer.h13
-rw-r--r--chromium/media/audio/fake_audio_input_stream.cc72
-rw-r--r--chromium/media/audio/fake_audio_input_stream.h3
-rw-r--r--chromium/media/audio/fake_audio_log_factory.cc3
-rw-r--r--chromium/media/audio/fake_audio_manager.cc3
-rw-r--r--chromium/media/audio/fake_audio_manager.h3
-rw-r--r--chromium/media/audio/fake_audio_output_stream.cc14
-rw-r--r--chromium/media/audio/linux/audio_manager_linux.cc8
-rw-r--r--chromium/media/audio/mac/aggregate_device_manager.cc371
-rw-r--r--chromium/media/audio/mac/aggregate_device_manager.h58
-rw-r--r--chromium/media/audio/mac/audio_auhal_mac.cc215
-rw-r--r--chromium/media/audio/mac/audio_auhal_mac.h21
-rw-r--r--chromium/media/audio/mac/audio_auhal_mac_unittest.cc231
-rw-r--r--chromium/media/audio/mac/audio_device_listener_mac.cc1
-rw-r--r--chromium/media/audio/mac/audio_device_listener_mac_unittest.cc13
-rw-r--r--chromium/media/audio/mac/audio_input_mac.cc44
-rw-r--r--chromium/media/audio/mac/audio_input_mac.h12
-rw-r--r--chromium/media/audio/mac/audio_low_latency_input_mac.cc88
-rw-r--r--chromium/media/audio/mac/audio_low_latency_input_mac.h10
-rw-r--r--chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc83
-rw-r--r--chromium/media/audio/mac/audio_low_latency_output_mac.cc416
-rw-r--r--chromium/media/audio/mac/audio_low_latency_output_mac.h115
-rw-r--r--chromium/media/audio/mac/audio_manager_mac.cc402
-rw-r--r--chromium/media/audio/mac/audio_manager_mac.h39
-rw-r--r--chromium/media/audio/mac/audio_synchronized_mac.cc976
-rw-r--r--chromium/media/audio/mac/audio_synchronized_mac.h216
-rw-r--r--chromium/media/audio/mac/audio_unified_mac.cc397
-rw-r--r--chromium/media/audio/mac/audio_unified_mac.h100
-rw-r--r--chromium/media/audio/mock_audio_manager.cc32
-rw-r--r--chromium/media/audio/mock_audio_manager.h18
-rw-r--r--chromium/media/audio/mock_audio_source_callback.cc12
-rw-r--r--chromium/media/audio/mock_audio_source_callback.h28
-rw-r--r--chromium/media/audio/null_audio_sink.cc18
-rw-r--r--chromium/media/audio/null_audio_sink.h6
-rw-r--r--chromium/media/audio/openbsd/audio_manager_openbsd.cc9
-rw-r--r--chromium/media/audio/openbsd/audio_manager_openbsd.h3
-rw-r--r--chromium/media/audio/pulse/audio_manager_pulse.cc53
-rw-r--r--chromium/media/audio/pulse/audio_manager_pulse.h5
-rw-r--r--chromium/media/audio/pulse/pulse.sigs12
-rw-r--r--chromium/media/audio/pulse/pulse_input.cc13
-rw-r--r--chromium/media/audio/pulse/pulse_input.h2
-rw-r--r--chromium/media/audio/pulse/pulse_output.cc20
-rw-r--r--chromium/media/audio/pulse/pulse_output.h9
-rw-r--r--chromium/media/audio/pulse/pulse_unified.cc292
-rw-r--r--chromium/media/audio/pulse/pulse_unified.h90
-rw-r--r--chromium/media/audio/pulse/pulse_util.cc13
-rw-r--r--chromium/media/audio/pulse/pulse_util.h1
-rw-r--r--chromium/media/audio/sample_rates.cc51
-rw-r--r--chromium/media/audio/sample_rates.h7
-rw-r--r--chromium/media/audio/scoped_loop_observer.h50
-rw-r--r--chromium/media/audio/scoped_task_runner_observer.cc (renamed from chromium/media/audio/scoped_loop_observer.cc)21
-rw-r--r--chromium/media/audio/scoped_task_runner_observer.h52
-rw-r--r--chromium/media/audio/simple_sources.cc6
-rw-r--r--chromium/media/audio/simple_sources.h3
-rw-r--r--chromium/media/audio/sounds/audio_stream_handler.cc127
-rw-r--r--chromium/media/audio/sounds/audio_stream_handler.h10
-rw-r--r--chromium/media/audio/sounds/audio_stream_handler_unittest.cc8
-rw-r--r--chromium/media/audio/sounds/sounds_manager.cc71
-rw-r--r--chromium/media/audio/sounds/sounds_manager.h4
-rw-r--r--chromium/media/audio/sounds/sounds_manager_unittest.cc2
-rw-r--r--chromium/media/audio/sounds/wav_audio_handler.cc34
-rw-r--r--chromium/media/audio/sounds/wav_audio_handler.h14
-rw-r--r--chromium/media/audio/sounds/wav_audio_handler_unittest.cc20
-rw-r--r--chromium/media/audio/test_audio_input_controller_factory.cc4
-rw-r--r--chromium/media/audio/virtual_audio_input_stream.cc28
-rw-r--r--chromium/media/audio/virtual_audio_input_stream.h12
-rw-r--r--chromium/media/audio/virtual_audio_input_stream_unittest.cc52
-rw-r--r--chromium/media/audio/virtual_audio_output_stream_unittest.cc30
-rw-r--r--chromium/media/audio/win/audio_device_listener_win.cc69
-rw-r--r--chromium/media/audio/win/audio_device_listener_win.h3
-rw-r--r--chromium/media/audio/win/audio_device_listener_win_unittest.cc2
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win.cc150
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win.h27
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win_unittest.cc149
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win.cc60
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win.h5
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win_unittest.cc41
-rw-r--r--chromium/media/audio/win/audio_manager_win.cc127
-rw-r--r--chromium/media/audio/win/audio_manager_win.h15
-rw-r--r--chromium/media/audio/win/audio_output_win_unittest.cc79
-rw-r--r--chromium/media/audio/win/audio_unified_win.cc984
-rw-r--r--chromium/media/audio/win/audio_unified_win.h352
-rw-r--r--chromium/media/audio/win/audio_unified_win_unittest.cc366
-rw-r--r--chromium/media/audio/win/core_audio_util_win.cc47
-rw-r--r--chromium/media/audio/win/core_audio_util_win.h4
-rw-r--r--chromium/media/audio/win/core_audio_util_win_unittest.cc2
-rw-r--r--chromium/media/audio/win/device_enumeration_win.cc11
-rw-r--r--chromium/media/audio/win/wavein_input_win.cc21
-rw-r--r--chromium/media/audio/win/wavein_input_win.h5
-rw-r--r--chromium/media/base/android/OWNERS6
-rw-r--r--chromium/media/base/android/audio_decoder_job.cc130
-rw-r--r--chromium/media/base/android/audio_decoder_job.h56
-rw-r--r--chromium/media/base/android/browser_cdm_factory_android.cc53
-rw-r--r--chromium/media/base/android/demuxer_android.h12
-rw-r--r--chromium/media/base/android/demuxer_stream_player_params.cc3
-rw-r--r--chromium/media/base/android/demuxer_stream_player_params.h15
-rw-r--r--chromium/media/base/android/media_codec_bridge.cc57
-rw-r--r--chromium/media/base/android/media_codec_bridge.h27
-rw-r--r--chromium/media/base/android/media_decoder_job.cc447
-rw-r--r--chromium/media/base/android/media_decoder_job.h245
-rw-r--r--chromium/media/base/android/media_drm_bridge.cc354
-rw-r--r--chromium/media/base/android/media_drm_bridge.h105
-rw-r--r--chromium/media/base/android/media_drm_bridge_unittest.cc96
-rw-r--r--chromium/media/base/android/media_player_android.cc26
-rw-r--r--chromium/media/base/android/media_player_android.h34
-rw-r--r--chromium/media/base/android/media_player_bridge.cc94
-rw-r--r--chromium/media/base/android/media_player_bridge.h30
-rw-r--r--chromium/media/base/android/media_player_listener.cc22
-rw-r--r--chromium/media/base/android/media_player_listener.h8
-rw-r--r--chromium/media/base/android/media_player_manager.h57
-rw-r--r--chromium/media/base/android/media_resource_getter.h5
-rw-r--r--chromium/media/base/android/media_source_player.cc629
-rw-r--r--chromium/media/base/android/media_source_player.h127
-rw-r--r--chromium/media/base/android/media_source_player_unittest.cc1338
-rw-r--r--chromium/media/base/android/video_decoder_job.cc139
-rw-r--r--chromium/media/base/android/video_decoder_job.h75
-rw-r--r--chromium/media/base/audio_buffer.cc143
-rw-r--r--chromium/media/base/audio_buffer.h40
-rw-r--r--chromium/media/base/audio_buffer_converter.cc249
-rw-r--r--chromium/media/base/audio_buffer_converter.h108
-rw-r--r--chromium/media/base/audio_buffer_converter_unittest.cc255
-rw-r--r--chromium/media/base/audio_buffer_queue_unittest.cc409
-rw-r--r--chromium/media/base/audio_buffer_unittest.cc526
-rw-r--r--chromium/media/base/audio_bus.cc11
-rw-r--r--chromium/media/base/audio_bus.h11
-rw-r--r--chromium/media/base/audio_bus_unittest.cc4
-rw-r--r--chromium/media/base/audio_capturer_source.h2
-rw-r--r--chromium/media/base/audio_converter.cc82
-rw-r--r--chromium/media/base/audio_converter.h15
-rw-r--r--chromium/media/base/audio_converter_unittest.cc7
-rw-r--r--chromium/media/base/audio_decoder.cc2
-rw-r--r--chromium/media/base/audio_decoder.h67
-rw-r--r--chromium/media/base/audio_decoder_config.cc36
-rw-r--r--chromium/media/base/audio_decoder_config.h54
-rw-r--r--chromium/media/base/audio_discard_helper.cc199
-rw-r--r--chromium/media/base/audio_discard_helper.h81
-rw-r--r--chromium/media/base/audio_discard_helper_unittest.cc481
-rw-r--r--chromium/media/base/audio_fifo.cc15
-rw-r--r--chromium/media/base/audio_fifo.h8
-rw-r--r--chromium/media/base/audio_hardware_config.cc56
-rw-r--r--chromium/media/base/audio_hardware_config.h4
-rw-r--r--chromium/media/base/audio_hardware_config_unittest.cc39
-rw-r--r--chromium/media/base/audio_renderer.h17
-rw-r--r--chromium/media/base/audio_renderer_mixer.cc51
-rw-r--r--chromium/media/base/audio_renderer_mixer.h21
-rw-r--r--chromium/media/base/audio_renderer_mixer_input.cc33
-rw-r--r--chromium/media/base/audio_renderer_mixer_input.h2
-rw-r--r--chromium/media/base/audio_renderer_mixer_input_unittest.cc10
-rw-r--r--chromium/media/base/audio_renderer_mixer_unittest.cc20
-rw-r--r--chromium/media/base/audio_renderer_sink.h5
-rw-r--r--chromium/media/base/audio_splicer.cc441
-rw-r--r--chromium/media/base/audio_splicer.h108
-rw-r--r--chromium/media/base/audio_splicer_unittest.cc607
-rw-r--r--chromium/media/base/audio_timestamp_helper.h3
-rw-r--r--chromium/media/base/audio_video_metadata_extractor.cc260
-rw-r--r--chromium/media/base/audio_video_metadata_extractor.h106
-rw-r--r--chromium/media/base/audio_video_metadata_extractor_unittest.cc208
-rw-r--r--chromium/media/base/bind_to_current_loop.h162
-rw-r--r--chromium/media/base/bind_to_current_loop.h.pump (renamed from chromium/media/base/bind_to_loop.h.pump)44
-rw-r--r--chromium/media/base/bind_to_current_loop_unittest.cc (renamed from chromium/media/base/bind_to_loop_unittest.cc)69
-rw-r--r--chromium/media/base/bind_to_loop.h168
-rw-r--r--chromium/media/base/bit_reader.cc83
-rw-r--r--chromium/media/base/bit_reader.h77
-rw-r--r--chromium/media/base/bit_reader_core.cc159
-rw-r--r--chromium/media/base/bit_reader_core.h118
-rw-r--r--chromium/media/base/bit_reader_unittest.cc21
-rw-r--r--chromium/media/base/browser_cdm.cc15
-rw-r--r--chromium/media/base/browser_cdm.h63
-rw-r--r--chromium/media/base/browser_cdm_factory.h29
-rw-r--r--chromium/media/base/buffering_state.h26
-rw-r--r--chromium/media/base/buffers.h2
-rw-r--r--chromium/media/base/callback_holder.h2
-rw-r--r--chromium/media/base/cdm_promise.cc74
-rw-r--r--chromium/media/base/cdm_promise.h87
-rw-r--r--chromium/media/base/channel_layout.cc14
-rw-r--r--chromium/media/base/channel_layout.h12
-rw-r--r--chromium/media/base/channel_mixer.cc5
-rw-r--r--chromium/media/base/channel_mixer_unittest.cc8
-rw-r--r--chromium/media/base/clock.cc24
-rw-r--r--chromium/media/base/clock.h7
-rw-r--r--chromium/media/base/container_names.h2
-rw-r--r--chromium/media/base/data_source.cc12
-rw-r--r--chromium/media/base/data_source.h25
-rw-r--r--chromium/media/base/decoder_buffer.cc10
-rw-r--r--chromium/media/base/decoder_buffer.h41
-rw-r--r--chromium/media/base/decoder_buffer_queue.cc14
-rw-r--r--chromium/media/base/decoder_buffer_queue.h6
-rw-r--r--chromium/media/base/decoder_buffer_queue_unittest.cc31
-rw-r--r--chromium/media/base/decrypt_config.cc3
-rw-r--r--chromium/media/base/decrypt_config.h12
-rw-r--r--chromium/media/base/demuxer.h28
-rw-r--r--chromium/media/base/demuxer_perftest.cc6
-rw-r--r--chromium/media/base/demuxer_stream.h12
-rw-r--r--chromium/media/base/fake_text_track_stream.cc6
-rw-r--r--chromium/media/base/fake_text_track_stream.h3
-rw-r--r--chromium/media/base/media.cc5
-rw-r--r--chromium/media/base/media_file_checker.cc20
-rw-r--r--chromium/media/base/media_file_checker.h8
-rw-r--r--chromium/media/base/media_file_checker_unittest.cc17
-rw-r--r--chromium/media/base/media_keys.h80
-rw-r--r--chromium/media/base/media_log.cc19
-rw-r--r--chromium/media/base/media_log.h1
-rw-r--r--chromium/media/base/media_log_event.h4
-rw-r--r--chromium/media/base/media_switches.cc48
-rw-r--r--chromium/media/base/media_switches.h19
-rw-r--r--chromium/media/base/mock_data_source_host.cc13
-rw-r--r--chromium/media/base/mock_data_source_host.h32
-rw-r--r--chromium/media/base/mock_demuxer_host.h7
-rw-r--r--chromium/media/base/mock_filters.h36
-rw-r--r--chromium/media/base/multi_channel_resampler.cc5
-rw-r--r--chromium/media/base/multi_channel_resampler.h4
-rw-r--r--chromium/media/base/pipeline.cc527
-rw-r--r--chromium/media/base/pipeline.h185
-rw-r--r--chromium/media/base/pipeline_status.h3
-rw-r--r--chromium/media/base/pipeline_unittest.cc265
-rw-r--r--chromium/media/base/player_tracker.cc15
-rw-r--r--chromium/media/base/player_tracker.h41
-rw-r--r--chromium/media/base/run_all_perftests.cc51
-rw-r--r--chromium/media/base/run_all_unittests.cc4
-rw-r--r--chromium/media/base/sample_format.cc4
-rw-r--r--chromium/media/base/sample_format.h7
-rw-r--r--chromium/media/base/seekable_buffer_unittest.cc15
-rw-r--r--chromium/media/base/serial_runner.cc47
-rw-r--r--chromium/media/base/serial_runner.h9
-rw-r--r--chromium/media/base/serial_runner_unittest.cc52
-rw-r--r--chromium/media/base/simd/convert_rgb_to_yuv_unittest.cc9
-rw-r--r--chromium/media/base/simd/convert_yuv_to_rgb.h122
-rw-r--r--chromium/media/base/simd/convert_yuv_to_rgb_c.cc89
-rw-r--r--chromium/media/base/simd/convert_yuv_to_rgb_mmx.asm3
-rw-r--r--chromium/media/base/simd/convert_yuv_to_rgb_mmx.inc60
-rw-r--r--chromium/media/base/simd/convert_yuv_to_rgb_sse.asm1
-rw-r--r--chromium/media/base/simd/convert_yuv_to_rgb_x86.cc16
-rw-r--r--chromium/media/base/simd/convert_yuva_to_argb_mmx.asm1
-rw-r--r--chromium/media/base/simd/convert_yuva_to_argb_mmx.inc110
-rw-r--r--chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.asm1
-rw-r--r--chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.inc45
-rw-r--r--chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx_x64.asm8
-rw-r--r--chromium/media/base/simd/scale_yuv_to_rgb_mmx.asm1
-rw-r--r--chromium/media/base/simd/scale_yuv_to_rgb_mmx.inc27
-rw-r--r--chromium/media/base/simd/scale_yuv_to_rgb_sse.asm1
-rw-r--r--chromium/media/base/simd/scale_yuv_to_rgb_sse2_x64.asm8
-rw-r--r--chromium/media/base/simd/sinc_resampler_sse.cc48
-rw-r--r--chromium/media/base/simd/vector_math_sse.cc118
-rw-r--r--chromium/media/base/simd/yuv_to_rgb_table.cc331
-rw-r--r--chromium/media/base/simd/yuv_to_rgb_table.h1
-rw-r--r--chromium/media/base/sinc_resampler.cc113
-rw-r--r--chromium/media/base/sinc_resampler.h18
-rw-r--r--chromium/media/base/sinc_resampler_perftest.cc4
-rw-r--r--chromium/media/base/sinc_resampler_unittest.cc5
-rw-r--r--chromium/media/base/stream_parser.cc124
-rw-r--r--chromium/media/base/stream_parser.h92
-rw-r--r--chromium/media/base/stream_parser_buffer.cc145
-rw-r--r--chromium/media/base/stream_parser_buffer.h66
-rw-r--r--chromium/media/base/stream_parser_unittest.cc382
-rw-r--r--chromium/media/base/test_data_util.cc16
-rw-r--r--chromium/media/base/test_helpers.cc136
-rw-r--r--chromium/media/base/test_helpers.h66
-rw-r--r--chromium/media/base/text_ranges.cc141
-rw-r--r--chromium/media/base/text_ranges.h95
-rw-r--r--chromium/media/base/text_ranges_unittest.cc147
-rw-r--r--chromium/media/base/text_renderer.cc62
-rw-r--r--chromium/media/base/text_renderer.h18
-rw-r--r--chromium/media/base/user_input_monitor_linux.cc6
-rw-r--r--chromium/media/base/user_input_monitor_unittest.cc4
-rw-r--r--chromium/media/base/user_input_monitor_win.cc13
-rw-r--r--chromium/media/base/vector_math.cc158
-rw-r--r--chromium/media/base/vector_math.h7
-rw-r--r--chromium/media/base/vector_math_perftest.cc39
-rw-r--r--chromium/media/base/vector_math_testing.h2
-rw-r--r--chromium/media/base/vector_math_unittest.cc24
-rw-r--r--chromium/media/base/video_decoder.cc10
-rw-r--r--chromium/media/base/video_decoder.h65
-rw-r--r--chromium/media/base/video_decoder_config.cc2
-rw-r--r--chromium/media/base/video_decoder_config.h1
-rw-r--r--chromium/media/base/video_frame.cc549
-rw-r--r--chromium/media/base/video_frame.h164
-rw-r--r--chromium/media/base/video_frame_pool.cc7
-rw-r--r--chromium/media/base/video_frame_pool_unittest.cc18
-rw-r--r--chromium/media/base/video_frame_unittest.cc153
-rw-r--r--chromium/media/base/video_renderer.h19
-rw-r--r--chromium/media/base/yuv_convert.cc71
-rw-r--r--chromium/media/base/yuv_convert.h13
-rw-r--r--chromium/media/base/yuv_convert_perftest.cc226
-rw-r--r--chromium/media/base/yuv_convert_unittest.cc104
-rw-r--r--chromium/media/cast/DEPS2
-rw-r--r--chromium/media/cast/OWNERS1
-rw-r--r--chromium/media/cast/README7
-rw-r--r--chromium/media/cast/audio_receiver/audio_decoder.cc161
-rw-r--r--chromium/media/cast/audio_receiver/audio_decoder.h71
-rw-r--r--chromium/media/cast/audio_receiver/audio_decoder_unittest.cc220
-rw-r--r--chromium/media/cast/audio_receiver/audio_receiver.cc490
-rw-r--r--chromium/media/cast/audio_receiver/audio_receiver.gypi29
-rw-r--r--chromium/media/cast/audio_receiver/audio_receiver.h143
-rw-r--r--chromium/media/cast/audio_receiver/audio_receiver_unittest.cc217
-rw-r--r--chromium/media/cast/audio_sender/audio_encoder.cc300
-rw-r--r--chromium/media/cast/audio_sender/audio_encoder.h34
-rw-r--r--chromium/media/cast/audio_sender/audio_encoder_unittest.cc246
-rw-r--r--chromium/media/cast/audio_sender/audio_sender.cc433
-rw-r--r--chromium/media/cast/audio_sender/audio_sender.gypi32
-rw-r--r--chromium/media/cast/audio_sender/audio_sender.h167
-rw-r--r--chromium/media/cast/audio_sender/audio_sender_unittest.cc137
-rw-r--r--chromium/media/cast/base/clock_drift_smoother.cc58
-rw-r--r--chromium/media/cast/base/clock_drift_smoother.h52
-rw-r--r--chromium/media/cast/cast.gyp315
-rw-r--r--chromium/media/cast/cast_config.cc69
-rw-r--r--chromium/media/cast/cast_config.h230
-rw-r--r--chromium/media/cast/cast_defines.h111
-rw-r--r--chromium/media/cast/cast_environment.cc112
-rw-r--r--chromium/media/cast/cast_environment.h68
-rw-r--r--chromium/media/cast/cast_receiver.gyp33
-rw-r--r--chromium/media/cast/cast_receiver.h100
-rw-r--r--chromium/media/cast/cast_receiver_impl.cc175
-rw-r--r--chromium/media/cast/cast_receiver_impl.h50
-rw-r--r--chromium/media/cast/cast_sender.gyp36
-rw-r--r--chromium/media/cast/cast_sender.h103
-rw-r--r--chromium/media/cast/cast_sender_impl.cc274
-rw-r--r--chromium/media/cast/cast_sender_impl.h57
-rw-r--r--chromium/media/cast/cast_testing.gypi276
-rw-r--r--chromium/media/cast/congestion_control/congestion_control.cc231
-rw-r--r--chromium/media/cast/congestion_control/congestion_control.gypi23
-rw-r--r--chromium/media/cast/congestion_control/congestion_control.h60
-rw-r--r--chromium/media/cast/congestion_control/congestion_control_unittest.cc234
-rw-r--r--chromium/media/cast/framer/cast_message_builder.cc90
-rw-r--r--chromium/media/cast/framer/cast_message_builder.h7
-rw-r--r--chromium/media/cast/framer/cast_message_builder_unittest.cc203
-rw-r--r--chromium/media/cast/framer/frame_buffer.cc70
-rw-r--r--chromium/media/cast/framer/frame_buffer.h10
-rw-r--r--chromium/media/cast/framer/frame_buffer_unittest.cc50
-rw-r--r--chromium/media/cast/framer/frame_id_map.cc129
-rw-r--r--chromium/media/cast/framer/frame_id_map.h15
-rw-r--r--chromium/media/cast/framer/framer.cc85
-rw-r--r--chromium/media/cast/framer/framer.gyp27
-rw-r--r--chromium/media/cast/framer/framer.h27
-rw-r--r--chromium/media/cast/framer/framer_unittest.cc419
-rw-r--r--chromium/media/cast/logging/encoding_event_subscriber.cc286
-rw-r--r--chromium/media/cast/logging/encoding_event_subscriber.h122
-rw-r--r--chromium/media/cast/logging/encoding_event_subscriber_unittest.cc668
-rw-r--r--chromium/media/cast/logging/log_deserializer.cc252
-rw-r--r--chromium/media/cast/logging/log_deserializer.h51
-rw-r--r--chromium/media/cast/logging/log_serializer.cc190
-rw-r--r--chromium/media/cast/logging/log_serializer.h37
-rw-r--r--chromium/media/cast/logging/logging_defines.cc110
-rw-r--r--chromium/media/cast/logging/logging_defines.h142
-rw-r--r--chromium/media/cast/logging/logging_impl.cc272
-rw-r--r--chromium/media/cast/logging/logging_impl.h89
-rw-r--r--chromium/media/cast/logging/logging_impl_unittest.cc234
-rw-r--r--chromium/media/cast/logging/logging_internal.cc79
-rw-r--r--chromium/media/cast/logging/logging_internal.h95
-rw-r--r--chromium/media/cast/logging/logging_raw.cc173
-rw-r--r--chromium/media/cast/logging/logging_raw.h94
-rw-r--r--chromium/media/cast/logging/logging_raw_unittest.cc196
-rw-r--r--chromium/media/cast/logging/logging_stats.cc150
-rw-r--r--chromium/media/cast/logging/logging_stats.h75
-rw-r--r--chromium/media/cast/logging/logging_unittest.cc248
-rw-r--r--chromium/media/cast/logging/proto/proto_utils.cc36
-rw-r--r--chromium/media/cast/logging/proto/proto_utils.h21
-rw-r--r--chromium/media/cast/logging/proto/raw_events.proto149
-rw-r--r--chromium/media/cast/logging/raw_event_subscriber.h32
-rw-r--r--chromium/media/cast/logging/raw_event_subscriber_bundle.cc99
-rw-r--r--chromium/media/cast/logging/raw_event_subscriber_bundle.h84
-rw-r--r--chromium/media/cast/logging/receiver_time_offset_estimator.h39
-rw-r--r--chromium/media/cast/logging/receiver_time_offset_estimator_impl.cc129
-rw-r--r--chromium/media/cast/logging/receiver_time_offset_estimator_impl.h64
-rw-r--r--chromium/media/cast/logging/receiver_time_offset_estimator_impl_unittest.cc242
-rw-r--r--chromium/media/cast/logging/serialize_deserialize_test.cc214
-rw-r--r--chromium/media/cast/logging/simple_event_subscriber.cc46
-rw-r--r--chromium/media/cast/logging/simple_event_subscriber.h52
-rw-r--r--chromium/media/cast/logging/simple_event_subscriber_unittest.cc87
-rw-r--r--chromium/media/cast/logging/stats_event_subscriber.cc400
-rw-r--r--chromium/media/cast/logging/stats_event_subscriber.h176
-rw-r--r--chromium/media/cast/logging/stats_event_subscriber_unittest.cc401
-rw-r--r--chromium/media/cast/net/cast_net_defines.h81
-rw-r--r--chromium/media/cast/net/pacing/mock_paced_packet_sender.h27
-rw-r--r--chromium/media/cast/net/pacing/paced_sender.cc148
-rw-r--r--chromium/media/cast/net/pacing/paced_sender.gyp22
-rw-r--r--chromium/media/cast/net/pacing/paced_sender.h83
-rw-r--r--chromium/media/cast/net/pacing/paced_sender_unittest.cc257
-rw-r--r--chromium/media/cast/net/rtp_sender/mock_rtp_sender.h34
-rw-r--r--chromium/media/cast/net/rtp_sender/packet_storage/packet_storage.cc174
-rw-r--r--chromium/media/cast/net/rtp_sender/packet_storage/packet_storage.gyp23
-rw-r--r--chromium/media/cast/net/rtp_sender/packet_storage/packet_storage.h55
-rw-r--r--chromium/media/cast/net/rtp_sender/packet_storage/packet_storage_unittest.cc110
-rw-r--r--chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.cc153
-rw-r--r--chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.gyp27
-rw-r--r--chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.h73
-rw-r--r--chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_config.cc21
-rw-r--r--chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_config.h39
-rw-r--r--chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc153
-rw-r--r--chromium/media/cast/net/rtp_sender/rtp_sender.cc145
-rw-r--r--chromium/media/cast/net/rtp_sender/rtp_sender.gyp26
-rw-r--r--chromium/media/cast/net/rtp_sender/rtp_sender.h66
-rw-r--r--chromium/media/cast/receiver/audio_decoder.cc246
-rw-r--r--chromium/media/cast/receiver/audio_decoder.h64
-rw-r--r--chromium/media/cast/receiver/audio_decoder_unittest.cc241
-rw-r--r--chromium/media/cast/receiver/cast_receiver_impl.cc232
-rw-r--r--chromium/media/cast/receiver/cast_receiver_impl.h122
-rw-r--r--chromium/media/cast/receiver/frame_receiver.cc326
-rw-r--r--chromium/media/cast/receiver/frame_receiver.h184
-rw-r--r--chromium/media/cast/receiver/frame_receiver_unittest.cc419
-rw-r--r--chromium/media/cast/receiver/video_decoder.cc259
-rw-r--r--chromium/media/cast/receiver/video_decoder.h63
-rw-r--r--chromium/media/cast/receiver/video_decoder_unittest.cc183
-rw-r--r--chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.cc12
-rw-r--r--chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.h6
-rw-r--r--chromium/media/cast/rtcp/mock_rtcp_sender_feedback.cc6
-rw-r--r--chromium/media/cast/rtcp/receiver_rtcp_event_subscriber.cc96
-rw-r--r--chromium/media/cast/rtcp/receiver_rtcp_event_subscriber.h79
-rw-r--r--chromium/media/cast/rtcp/receiver_rtcp_event_subscriber_unittest.cc131
-rw-r--r--chromium/media/cast/rtcp/rtcp.cc433
-rw-r--r--chromium/media/cast/rtcp/rtcp.gyp46
-rw-r--r--chromium/media/cast/rtcp/rtcp.h132
-rw-r--r--chromium/media/cast/rtcp/rtcp_defines.cc49
-rw-r--r--chromium/media/cast/rtcp/rtcp_defines.h118
-rw-r--r--chromium/media/cast/rtcp/rtcp_receiver.cc197
-rw-r--r--chromium/media/cast/rtcp/rtcp_receiver.h26
-rw-r--r--chromium/media/cast/rtcp/rtcp_receiver_unittest.cc256
-rw-r--r--chromium/media/cast/rtcp/rtcp_sender.cc798
-rw-r--r--chromium/media/cast/rtcp/rtcp_sender.h115
-rw-r--r--chromium/media/cast/rtcp/rtcp_sender_unittest.cc616
-rw-r--r--chromium/media/cast/rtcp/rtcp_unittest.cc509
-rw-r--r--chromium/media/cast/rtcp/rtcp_utility.cc289
-rw-r--r--chromium/media/cast/rtcp/rtcp_utility.h101
-rw-r--r--chromium/media/cast/rtcp/test_rtcp_packet_builder.cc62
-rw-r--r--chromium/media/cast/rtcp/test_rtcp_packet_builder.h30
-rw-r--r--chromium/media/cast/rtp_receiver/mock_rtp_payload_feedback.cc6
-rw-r--r--chromium/media/cast/rtp_receiver/mock_rtp_payload_feedback.h3
-rw-r--r--chromium/media/cast/rtp_receiver/receiver_stats.cc25
-rw-r--r--chromium/media/cast/rtp_receiver/receiver_stats.h15
-rw-r--r--chromium/media/cast/rtp_receiver/receiver_stats_unittest.cc94
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_parser/include/mock/mock_rtp_feedback.h16
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc173
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.gyp23
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.h57
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser_unittest.cc231
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_receiver.cc70
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_receiver.gyp26
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_receiver.h56
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_receiver_defines.cc25
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_receiver_defines.h26
-rw-r--r--chromium/media/cast/rtp_timestamp_helper.cc36
-rw-r--r--chromium/media/cast/rtp_timestamp_helper.h41
-rw-r--r--chromium/media/cast/test/transport/transport.gyp22
-rw-r--r--chromium/media/cast/test/utility/utility.gyp28
-rw-r--r--chromium/media/cast/transport/cast_transport_config.cc82
-rw-r--r--chromium/media/cast/transport/cast_transport_config.h221
-rw-r--r--chromium/media/cast/transport/cast_transport_defines.h169
-rw-r--r--chromium/media/cast/transport/cast_transport_sender.h113
-rw-r--r--chromium/media/cast/transport/cast_transport_sender_impl.cc212
-rw-r--r--chromium/media/cast/transport/cast_transport_sender_impl.h110
-rw-r--r--chromium/media/cast/transport/cast_transport_sender_impl_unittest.cc113
-rw-r--r--chromium/media/cast/transport/frame_id_wrap_helper_test.cc (renamed from chromium/media/cast/net/frame_id_wrap_helper_test.cc)14
-rw-r--r--chromium/media/cast/transport/pacing/mock_paced_packet_sender.cc (renamed from chromium/media/cast/net/pacing/mock_paced_packet_sender.cc)10
-rw-r--r--chromium/media/cast/transport/pacing/mock_paced_packet_sender.h31
-rw-r--r--chromium/media/cast/transport/pacing/paced_sender.cc260
-rw-r--r--chromium/media/cast/transport/pacing/paced_sender.h147
-rw-r--r--chromium/media/cast/transport/pacing/paced_sender_unittest.cc351
-rw-r--r--chromium/media/cast/transport/rtcp/rtcp_builder.cc197
-rw-r--r--chromium/media/cast/transport/rtcp/rtcp_builder.h49
-rw-r--r--chromium/media/cast/transport/rtcp/rtcp_builder_unittest.cc164
-rw-r--r--chromium/media/cast/transport/rtp_sender/packet_storage/packet_storage.cc65
-rw-r--r--chromium/media/cast/transport/rtp_sender/packet_storage/packet_storage.h62
-rw-r--r--chromium/media/cast/transport/rtp_sender/packet_storage/packet_storage_unittest.cc115
-rw-r--r--chromium/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.cc137
-rw-r--r--chromium/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h86
-rw-r--r--chromium/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc175
-rw-r--r--chromium/media/cast/transport/rtp_sender/rtp_sender.cc150
-rw-r--r--chromium/media/cast/transport/rtp_sender/rtp_sender.h85
-rw-r--r--chromium/media/cast/transport/transport/udp_transport.cc242
-rw-r--r--chromium/media/cast/transport/transport/udp_transport.h97
-rw-r--r--chromium/media/cast/transport/transport/udp_transport_unittest.cc100
-rw-r--r--chromium/media/cast/transport/utility/transport_encryption_handler.cc76
-rw-r--r--chromium/media/cast/transport/utility/transport_encryption_handler.h58
-rw-r--r--chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc106
-rw-r--r--chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp25
-rw-r--r--chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.h46
-rw-r--r--chromium/media/cast/video_receiver/video_decoder.cc44
-rw-r--r--chromium/media/cast/video_receiver/video_decoder.h43
-rw-r--r--chromium/media/cast/video_receiver/video_decoder_unittest.cc94
-rw-r--r--chromium/media/cast/video_receiver/video_receiver.cc465
-rw-r--r--chromium/media/cast/video_receiver/video_receiver.gypi31
-rw-r--r--chromium/media/cast/video_receiver/video_receiver.h133
-rw-r--r--chromium/media/cast/video_receiver/video_receiver_unittest.cc169
-rw-r--r--chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.cc193
-rw-r--r--chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi20
-rw-r--r--chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.h34
-rw-r--r--chromium/media/cast/video_sender/external_video_encoder.cc436
-rw-r--r--chromium/media/cast/video_sender/external_video_encoder.h86
-rw-r--r--chromium/media/cast/video_sender/external_video_encoder_unittest.cc191
-rw-r--r--chromium/media/cast/video_sender/fake_software_video_encoder.cc69
-rw-r--r--chromium/media/cast/video_sender/fake_software_video_encoder.h38
-rw-r--r--chromium/media/cast/video_sender/mock_video_encoder_controller.cc17
-rw-r--r--chromium/media/cast/video_sender/mock_video_encoder_controller.h34
-rw-r--r--chromium/media/cast/video_sender/software_video_encoder.h46
-rw-r--r--chromium/media/cast/video_sender/video_encoder.cc123
-rw-r--r--chromium/media/cast/video_sender/video_encoder.h66
-rw-r--r--chromium/media/cast/video_sender/video_encoder_impl.cc139
-rw-r--r--chromium/media/cast/video_sender/video_encoder_impl.h72
-rw-r--r--chromium/media/cast/video_sender/video_encoder_impl_unittest.cc260
-rw-r--r--chromium/media/cast/video_sender/video_encoder_unittest.cc247
-rw-r--r--chromium/media/cast/video_sender/video_sender.cc632
-rw-r--r--chromium/media/cast/video_sender/video_sender.gypi34
-rw-r--r--chromium/media/cast/video_sender/video_sender.h198
-rw-r--r--chromium/media/cast/video_sender/video_sender_unittest.cc483
-rw-r--r--chromium/media/cdm/aes_decryptor.cc171
-rw-r--r--chromium/media/cdm/aes_decryptor.h41
-rw-r--r--chromium/media/cdm/aes_decryptor_unittest.cc341
-rw-r--r--chromium/media/cdm/json_web_key.cc15
-rw-r--r--chromium/media/cdm/key_system_names.cc4
-rw-r--r--chromium/media/cdm/key_system_names.h7
-rw-r--r--chromium/media/cdm/player_tracker_impl.cc57
-rw-r--r--chromium/media/cdm/player_tracker_impl.h54
-rw-r--r--chromium/media/cdm/ppapi/api/content_decryption_module.h568
-rw-r--r--chromium/media/cdm/ppapi/cdm_adapter.cc556
-rw-r--r--chromium/media/cdm/ppapi/cdm_adapter.h156
-rw-r--r--chromium/media/cdm/ppapi/cdm_file_io_impl.cc457
-rw-r--r--chromium/media/cdm/ppapi/cdm_file_io_impl.h166
-rw-r--r--chromium/media/cdm/ppapi/cdm_file_io_test.cc454
-rw-r--r--chromium/media/cdm/ppapi/cdm_file_io_test.h157
-rw-r--r--chromium/media/cdm/ppapi/cdm_helpers.cc58
-rw-r--r--chromium/media/cdm/ppapi/cdm_helpers.h54
-rw-r--r--chromium/media/cdm/ppapi/cdm_wrapper.h431
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/cdm_video_decoder.cc (renamed from chromium/media/cdm/ppapi/cdm_video_decoder.cc)11
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/cdm_video_decoder.h (renamed from chromium/media/cdm/ppapi/cdm_video_decoder.h)8
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm.cc (renamed from chromium/media/cdm/ppapi/clear_key_cdm.cc)458
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm.h (renamed from chromium/media/cdm/ppapi/clear_key_cdm.h)139
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm_common.h (renamed from chromium/media/cdm/ppapi/clear_key_cdm_common.h)8
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/fake_cdm_video_decoder.cc (renamed from chromium/media/cdm/ppapi/fake_cdm_video_decoder.cc)2
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/fake_cdm_video_decoder.h (renamed from chromium/media/cdm/ppapi/fake_cdm_video_decoder.h)8
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_audio_decoder.cc (renamed from chromium/media/cdm/ppapi/ffmpeg_cdm_audio_decoder.cc)4
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_audio_decoder.h (renamed from chromium/media/cdm/ppapi/ffmpeg_cdm_audio_decoder.h)15
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc (renamed from chromium/media/cdm/ppapi/ffmpeg_cdm_video_decoder.cc)12
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.h (renamed from chromium/media/cdm/ppapi/ffmpeg_cdm_video_decoder.h)18
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/libvpx_cdm_video_decoder.cc (renamed from chromium/media/cdm/ppapi/libvpx_cdm_video_decoder.cc)4
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/libvpx_cdm_video_decoder.h (renamed from chromium/media/cdm/ppapi/libvpx_cdm_video_decoder.h)8
-rw-r--r--chromium/media/cdm/ppapi/supported_cdm_versions.h14
-rw-r--r--chromium/media/ffmpeg/ffmpeg_common.cc60
-rw-r--r--chromium/media/ffmpeg/ffmpeg_common.h94
-rw-r--r--chromium/media/ffmpeg/ffmpeg_common_unittest.cc54
-rw-r--r--chromium/media/ffmpeg/ffmpeg_deleters.h42
-rw-r--r--chromium/media/ffmpeg/ffmpeg_regression_tests.cc1
-rw-r--r--chromium/media/ffmpeg/ffmpeg_unittest.cc8
-rw-r--r--chromium/media/filters/audio_clock.cc135
-rw-r--r--chromium/media/filters/audio_clock.h76
-rw-r--r--chromium/media/filters/audio_clock_unittest.cc177
-rw-r--r--chromium/media/filters/audio_decoder_selector.cc187
-rw-r--r--chromium/media/filters/audio_decoder_selector.h91
-rw-r--r--chromium/media/filters/audio_decoder_selector_unittest.cc50
-rw-r--r--chromium/media/filters/audio_file_reader.cc70
-rw-r--r--chromium/media/filters/audio_file_reader.h28
-rw-r--r--chromium/media/filters/audio_file_reader_unittest.cc36
-rw-r--r--chromium/media/filters/audio_renderer_algorithm_unittest.cc83
-rw-r--r--chromium/media/filters/audio_renderer_impl.cc478
-rw-r--r--chromium/media/filters/audio_renderer_impl.h118
-rw-r--r--chromium/media/filters/audio_renderer_impl_unittest.cc583
-rw-r--r--chromium/media/filters/chunk_demuxer.cc1136
-rw-r--r--chromium/media/filters/chunk_demuxer.h194
-rw-r--r--chromium/media/filters/chunk_demuxer_unittest.cc1297
-rw-r--r--chromium/media/filters/clockless_video_frame_scheduler.cc34
-rw-r--r--chromium/media/filters/clockless_video_frame_scheduler.h34
-rw-r--r--chromium/media/filters/decoder_selector.cc242
-rw-r--r--chromium/media/filters/decoder_selector.h104
-rw-r--r--chromium/media/filters/decoder_stream.cc598
-rw-r--r--chromium/media/filters/decoder_stream.h223
-rw-r--r--chromium/media/filters/decoder_stream_traits.cc109
-rw-r--r--chromium/media/filters/decoder_stream_traits.h74
-rw-r--r--chromium/media/filters/decrypting_audio_decoder.cc332
-rw-r--r--chromium/media/filters/decrypting_audio_decoder.h66
-rw-r--r--chromium/media/filters/decrypting_audio_decoder_unittest.cc411
-rw-r--r--chromium/media/filters/decrypting_demuxer_stream.cc61
-rw-r--r--chromium/media/filters/decrypting_demuxer_stream.h15
-rw-r--r--chromium/media/filters/decrypting_demuxer_stream_unittest.cc9
-rw-r--r--chromium/media/filters/decrypting_video_decoder.cc105
-rw-r--r--chromium/media/filters/decrypting_video_decoder.h21
-rw-r--r--chromium/media/filters/decrypting_video_decoder_unittest.cc155
-rw-r--r--chromium/media/filters/fake_demuxer_stream.cc70
-rw-r--r--chromium/media/filters/fake_demuxer_stream.h27
-rw-r--r--chromium/media/filters/fake_demuxer_stream_unittest.cc76
-rw-r--r--chromium/media/filters/fake_video_decoder.cc233
-rw-r--r--chromium/media/filters/fake_video_decoder.h66
-rw-r--r--chromium/media/filters/fake_video_decoder_unittest.cc318
-rw-r--r--chromium/media/filters/ffmpeg_audio_decoder.cc555
-rw-r--r--chromium/media/filters/ffmpeg_audio_decoder.h116
-rw-r--r--chromium/media/filters/ffmpeg_audio_decoder_unittest.cc154
-rw-r--r--chromium/media/filters/ffmpeg_demuxer.cc324
-rw-r--r--chromium/media/filters/ffmpeg_demuxer.h39
-rw-r--r--chromium/media/filters/ffmpeg_demuxer_unittest.cc153
-rw-r--r--chromium/media/filters/ffmpeg_glue.h5
-rw-r--r--chromium/media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.cc74
-rw-r--r--chromium/media/filters/ffmpeg_video_decoder.cc294
-rw-r--r--chromium/media/filters/ffmpeg_video_decoder.h46
-rw-r--r--chromium/media/filters/ffmpeg_video_decoder_unittest.cc165
-rw-r--r--chromium/media/filters/file_data_source.cc34
-rw-r--r--chromium/media/filters/file_data_source.h8
-rw-r--r--chromium/media/filters/file_data_source_unittest.cc17
-rw-r--r--chromium/media/filters/frame_processor.cc373
-rw-r--r--chromium/media/filters/frame_processor.h57
-rw-r--r--chromium/media/filters/frame_processor_base.cc214
-rw-r--r--chromium/media/filters/frame_processor_base.h234
-rw-r--r--chromium/media/filters/frame_processor_unittest.cc658
-rw-r--r--chromium/media/filters/gpu_video_accelerator_factories.h65
-rw-r--r--chromium/media/filters/gpu_video_decoder.cc368
-rw-r--r--chromium/media/filters/gpu_video_decoder.h73
-rw-r--r--chromium/media/filters/h264_bit_reader.cc113
-rw-r--r--chromium/media/filters/h264_bit_reader.h79
-rw-r--r--chromium/media/filters/h264_bit_reader_unittest.cc73
-rw-r--r--chromium/media/filters/h264_parser.cc1264
-rw-r--r--chromium/media/filters/h264_parser.h410
-rw-r--r--chromium/media/filters/h264_parser_unittest.cc72
-rw-r--r--chromium/media/filters/h264_to_annex_b_bitstream_converter.cc235
-rw-r--r--chromium/media/filters/h264_to_annex_b_bitstream_converter.h113
-rw-r--r--chromium/media/filters/h264_to_annex_b_bitstream_converter_unittest.cc138
-rw-r--r--chromium/media/filters/in_memory_url_protocol.cc14
-rw-r--r--chromium/media/filters/in_memory_url_protocol_unittest.cc51
-rw-r--r--chromium/media/filters/mock_gpu_video_accelerator_factories.cc13
-rw-r--r--chromium/media/filters/mock_gpu_video_accelerator_factories.h34
-rw-r--r--chromium/media/filters/opus_audio_decoder.cc284
-rw-r--r--chromium/media/filters/opus_audio_decoder.h60
-rw-r--r--chromium/media/filters/opus_audio_decoder_unittest.cc219
-rw-r--r--chromium/media/filters/pipeline_integration_test.cc625
-rw-r--r--chromium/media/filters/pipeline_integration_test_base.cc56
-rw-r--r--chromium/media/filters/pipeline_integration_test_base.h7
-rw-r--r--chromium/media/filters/skcanvas_video_renderer.cc266
-rw-r--r--chromium/media/filters/skcanvas_video_renderer_unittest.cc224
-rw-r--r--chromium/media/filters/source_buffer_stream.cc525
-rw-r--r--chromium/media/filters/source_buffer_stream.h68
-rw-r--r--chromium/media/filters/source_buffer_stream_unittest.cc681
-rw-r--r--chromium/media/filters/stream_parser_factory.cc67
-rw-r--r--chromium/media/filters/test_video_frame_scheduler.cc66
-rw-r--r--chromium/media/filters/test_video_frame_scheduler.h57
-rw-r--r--chromium/media/filters/video_decoder_selector.cc184
-rw-r--r--chromium/media/filters/video_decoder_selector.h88
-rw-r--r--chromium/media/filters/video_decoder_selector_unittest.cc82
-rw-r--r--chromium/media/filters/video_frame_scheduler.h48
-rw-r--r--chromium/media/filters/video_frame_scheduler_impl.cc105
-rw-r--r--chromium/media/filters/video_frame_scheduler_impl.h74
-rw-r--r--chromium/media/filters/video_frame_scheduler_impl_unittest.cc150
-rw-r--r--chromium/media/filters/video_frame_scheduler_proxy.cc48
-rw-r--r--chromium/media/filters/video_frame_scheduler_proxy.h51
-rw-r--r--chromium/media/filters/video_frame_scheduler_unittest.cc80
-rw-r--r--chromium/media/filters/video_frame_stream.cc455
-rw-r--r--chromium/media/filters/video_frame_stream.h162
-rw-r--r--chromium/media/filters/video_frame_stream_unittest.cc319
-rw-r--r--chromium/media/filters/video_renderer_impl.cc133
-rw-r--r--chromium/media/filters/video_renderer_impl.h75
-rw-r--r--chromium/media/filters/video_renderer_impl_unittest.cc587
-rw-r--r--chromium/media/filters/vpx_video_decoder.cc272
-rw-r--r--chromium/media/filters/vpx_video_decoder.h27
-rw-r--r--chromium/media/filters/wsola_internals.cc28
-rw-r--r--chromium/media/filters/wsola_internals.h20
-rw-r--r--chromium/media/formats/common/offset_byte_queue.cc (renamed from chromium/media/mp4/offset_byte_queue.cc)4
-rw-r--r--chromium/media/formats/common/offset_byte_queue.h (renamed from chromium/media/mp4/offset_byte_queue.h)8
-rw-r--r--chromium/media/formats/common/offset_byte_queue_unittest.cc (renamed from chromium/media/mp4/offset_byte_queue_unittest.cc)4
-rw-r--r--chromium/media/formats/common/stream_parser_test_base.cc128
-rw-r--r--chromium/media/formats/common/stream_parser_test_base.h74
-rw-r--r--chromium/media/formats/mp2t/es_parser.h (renamed from chromium/media/mp2t/es_parser.h)6
-rw-r--r--chromium/media/formats/mp2t/es_parser_adts.cc (renamed from chromium/media/mp2t/es_parser_adts.cc)98
-rw-r--r--chromium/media/formats/mp2t/es_parser_adts.h (renamed from chromium/media/mp2t/es_parser_adts.h)8
-rw-r--r--chromium/media/formats/mp2t/es_parser_h264.cc332
-rw-r--r--chromium/media/formats/mp2t/es_parser_h264.h (renamed from chromium/media/mp2t/es_parser_h264.h)65
-rw-r--r--chromium/media/formats/mp2t/es_parser_h264_unittest.cc300
-rw-r--r--chromium/media/formats/mp2t/mp2t_common.h (renamed from chromium/media/mp2t/mp2t_common.h)6
-rw-r--r--chromium/media/formats/mp2t/mp2t_stream_parser.cc (renamed from chromium/media/mp2t/mp2t_stream_parser.cc)94
-rw-r--r--chromium/media/formats/mp2t/mp2t_stream_parser.h (renamed from chromium/media/mp2t/mp2t_stream_parser.h)19
-rw-r--r--chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc (renamed from chromium/media/mp2t/mp2t_stream_parser_unittest.cc)37
-rw-r--r--chromium/media/formats/mp2t/ts_packet.cc (renamed from chromium/media/mp2t/ts_packet.cc)6
-rw-r--r--chromium/media/formats/mp2t/ts_packet.h (renamed from chromium/media/mp2t/ts_packet.h)6
-rw-r--r--chromium/media/formats/mp2t/ts_section.h (renamed from chromium/media/mp2t/ts_section.h)6
-rw-r--r--chromium/media/formats/mp2t/ts_section_pat.cc (renamed from chromium/media/mp2t/ts_section_pat.cc)6
-rw-r--r--chromium/media/formats/mp2t/ts_section_pat.h (renamed from chromium/media/mp2t/ts_section_pat.h)8
-rw-r--r--chromium/media/formats/mp2t/ts_section_pes.cc (renamed from chromium/media/mp2t/ts_section_pes.cc)8
-rw-r--r--chromium/media/formats/mp2t/ts_section_pes.h (renamed from chromium/media/mp2t/ts_section_pes.h)8
-rw-r--r--chromium/media/formats/mp2t/ts_section_pmt.cc (renamed from chromium/media/mp2t/ts_section_pmt.cc)6
-rw-r--r--chromium/media/formats/mp2t/ts_section_pmt.h (renamed from chromium/media/mp2t/ts_section_pmt.h)8
-rw-r--r--chromium/media/formats/mp2t/ts_section_psi.cc (renamed from chromium/media/mp2t/ts_section_psi.cc)6
-rw-r--r--chromium/media/formats/mp2t/ts_section_psi.h (renamed from chromium/media/mp2t/ts_section_psi.h)8
-rw-r--r--chromium/media/formats/mp4/aac.cc (renamed from chromium/media/mp4/aac.cc)69
-rw-r--r--chromium/media/formats/mp4/aac.h (renamed from chromium/media/mp4/aac.h)14
-rw-r--r--chromium/media/formats/mp4/aac_unittest.cc146
-rw-r--r--chromium/media/formats/mp4/avc.cc310
-rw-r--r--chromium/media/formats/mp4/avc.h52
-rw-r--r--chromium/media/formats/mp4/avc_unittest.cc372
-rw-r--r--chromium/media/formats/mp4/box_definitions.cc (renamed from chromium/media/mp4/box_definitions.cc)187
-rw-r--r--chromium/media/formats/mp4/box_definitions.h (renamed from chromium/media/mp4/box_definitions.h)78
-rw-r--r--chromium/media/formats/mp4/box_reader.cc (renamed from chromium/media/mp4/box_reader.cc)20
-rw-r--r--chromium/media/formats/mp4/box_reader.h (renamed from chromium/media/mp4/box_reader.h)22
-rw-r--r--chromium/media/formats/mp4/box_reader_unittest.cc (renamed from chromium/media/mp4/box_reader_unittest.cc)52
-rw-r--r--chromium/media/formats/mp4/cenc.cc (renamed from chromium/media/mp4/cenc.cc)8
-rw-r--r--chromium/media/formats/mp4/cenc.h (renamed from chromium/media/mp4/cenc.h)8
-rw-r--r--chromium/media/formats/mp4/es_descriptor.cc (renamed from chromium/media/mp4/es_descriptor.cc)6
-rw-r--r--chromium/media/formats/mp4/es_descriptor.h (renamed from chromium/media/mp4/es_descriptor.h)11
-rw-r--r--chromium/media/formats/mp4/es_descriptor_unittest.cc (renamed from chromium/media/mp4/es_descriptor_unittest.cc)4
-rw-r--r--chromium/media/formats/mp4/fourccs.h (renamed from chromium/media/mp4/fourccs.h)13
-rw-r--r--chromium/media/formats/mp4/mp4_stream_parser.cc (renamed from chromium/media/mp4/mp4_stream_parser.cc)175
-rw-r--r--chromium/media/formats/mp4/mp4_stream_parser.h (renamed from chromium/media/mp4/mp4_stream_parser.h)37
-rw-r--r--chromium/media/formats/mp4/mp4_stream_parser_unittest.cc (renamed from chromium/media/mp4/mp4_stream_parser_unittest.cc)64
-rw-r--r--chromium/media/formats/mp4/rcheck.h (renamed from chromium/media/mp4/rcheck.h)8
-rw-r--r--chromium/media/formats/mp4/sample_to_group_iterator.cc47
-rw-r--r--chromium/media/formats/mp4/sample_to_group_iterator.h49
-rw-r--r--chromium/media/formats/mp4/sample_to_group_iterator_unittest.cc65
-rw-r--r--chromium/media/formats/mp4/track_run_iterator.cc (renamed from chromium/media/mp4/track_run_iterator.cc)162
-rw-r--r--chromium/media/formats/mp4/track_run_iterator.h (renamed from chromium/media/mp4/track_run_iterator.h)22
-rw-r--r--chromium/media/formats/mp4/track_run_iterator_unittest.cc (renamed from chromium/media/mp4/track_run_iterator_unittest.cc)275
-rw-r--r--chromium/media/formats/mpeg/adts_constants.cc27
-rw-r--r--chromium/media/formats/mpeg/adts_constants.h28
-rw-r--r--chromium/media/formats/mpeg/adts_stream_parser.cc99
-rw-r--r--chromium/media/formats/mpeg/adts_stream_parser.h34
-rw-r--r--chromium/media/formats/mpeg/adts_stream_parser_unittest.cc59
-rw-r--r--chromium/media/formats/mpeg/mp3_stream_parser.cc280
-rw-r--r--chromium/media/formats/mpeg/mp3_stream_parser.h34
-rw-r--r--chromium/media/formats/mpeg/mp3_stream_parser_unittest.cc95
-rw-r--r--chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.cc421
-rw-r--r--chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.h (renamed from chromium/media/mp3/mp3_stream_parser.h)126
-rw-r--r--chromium/media/formats/webm/chromeos/DEPS (renamed from chromium/media/webm/chromeos/DEPS)0
-rw-r--r--chromium/media/formats/webm/chromeos/ebml_writer.cc (renamed from chromium/media/webm/chromeos/ebml_writer.cc)4
-rw-r--r--chromium/media/formats/webm/chromeos/ebml_writer.h (renamed from chromium/media/webm/chromeos/ebml_writer.h)8
-rw-r--r--chromium/media/formats/webm/chromeos/webm_encoder.cc (renamed from chromium/media/webm/chromeos/webm_encoder.cc)4
-rw-r--r--chromium/media/formats/webm/chromeos/webm_encoder.h (renamed from chromium/media/webm/chromeos/webm_encoder.h)10
-rw-r--r--chromium/media/formats/webm/cluster_builder.cc (renamed from chromium/media/webm/cluster_builder.cc)65
-rw-r--r--chromium/media/formats/webm/cluster_builder.h (renamed from chromium/media/webm/cluster_builder.h)14
-rw-r--r--chromium/media/formats/webm/tracks_builder.cc384
-rw-r--r--chromium/media/formats/webm/tracks_builder.h91
-rw-r--r--chromium/media/formats/webm/webm_audio_client.cc (renamed from chromium/media/webm/webm_audio_client.cc)24
-rw-r--r--chromium/media/formats/webm/webm_audio_client.h (renamed from chromium/media/webm/webm_audio_client.h)10
-rw-r--r--chromium/media/formats/webm/webm_cluster_parser.cc686
-rw-r--r--chromium/media/formats/webm/webm_cluster_parser.h275
-rw-r--r--chromium/media/formats/webm/webm_cluster_parser_unittest.cc957
-rw-r--r--chromium/media/formats/webm/webm_constants.cc (renamed from chromium/media/webm/webm_constants.cc)4
-rw-r--r--chromium/media/formats/webm/webm_constants.h (renamed from chromium/media/webm/webm_constants.h)10
-rw-r--r--chromium/media/formats/webm/webm_content_encodings.cc (renamed from chromium/media/webm/webm_content_encodings.cc)4
-rw-r--r--chromium/media/formats/webm/webm_content_encodings.h (renamed from chromium/media/webm/webm_content_encodings.h)8
-rw-r--r--chromium/media/formats/webm/webm_content_encodings_client.cc (renamed from chromium/media/webm/webm_content_encodings_client.cc)6
-rw-r--r--chromium/media/formats/webm/webm_content_encodings_client.h (renamed from chromium/media/webm/webm_content_encodings_client.h)12
-rw-r--r--chromium/media/formats/webm/webm_content_encodings_client_unittest.cc (renamed from chromium/media/webm/webm_content_encodings_client_unittest.cc)8
-rw-r--r--chromium/media/formats/webm/webm_crypto_helpers.cc (renamed from chromium/media/webm/webm_crypto_helpers.cc)24
-rw-r--r--chromium/media/formats/webm/webm_crypto_helpers.h33
-rw-r--r--chromium/media/formats/webm/webm_info_parser.cc (renamed from chromium/media/webm/webm_info_parser.cc)25
-rw-r--r--chromium/media/formats/webm/webm_info_parser.h (renamed from chromium/media/webm/webm_info_parser.h)13
-rw-r--r--chromium/media/formats/webm/webm_parser.cc (renamed from chromium/media/webm/webm_parser.cc)16
-rw-r--r--chromium/media/formats/webm/webm_parser.h (renamed from chromium/media/webm/webm_parser.h)8
-rw-r--r--chromium/media/formats/webm/webm_parser_unittest.cc (renamed from chromium/media/webm/webm_parser_unittest.cc)83
-rw-r--r--chromium/media/formats/webm/webm_stream_parser.cc (renamed from chromium/media/webm/webm_stream_parser.cc)138
-rw-r--r--chromium/media/formats/webm/webm_stream_parser.h (renamed from chromium/media/webm/webm_stream_parser.h)16
-rw-r--r--chromium/media/formats/webm/webm_tracks_parser.cc (renamed from chromium/media/webm/webm_tracks_parser.cc)57
-rw-r--r--chromium/media/formats/webm/webm_tracks_parser.h (renamed from chromium/media/webm/webm_tracks_parser.h)30
-rw-r--r--chromium/media/formats/webm/webm_tracks_parser_unittest.cc185
-rw-r--r--chromium/media/formats/webm/webm_video_client.cc (renamed from chromium/media/webm/webm_video_client.cc)14
-rw-r--r--chromium/media/formats/webm/webm_video_client.h (renamed from chromium/media/webm/webm_video_client.h)10
-rw-r--r--chromium/media/formats/webm/webm_webvtt_parser.cc (renamed from chromium/media/webm/webm_webvtt_parser.cc)4
-rw-r--r--chromium/media/formats/webm/webm_webvtt_parser.h (renamed from chromium/media/webm/webm_webvtt_parser.h)8
-rw-r--r--chromium/media/formats/webm/webm_webvtt_parser_unittest.cc (renamed from chromium/media/webm/webm_webvtt_parser_unittest.cc)4
-rw-r--r--chromium/media/media.gyp624
-rw-r--r--chromium/media/media_cdm.gypi202
-rw-r--r--chromium/media/media_cdm_adapter.gyp67
-rw-r--r--chromium/media/media_nacl.gyp73
-rw-r--r--chromium/media/media_untrusted.gyp45
-rw-r--r--chromium/media/midi/OWNERS5
-rw-r--r--chromium/media/midi/midi_manager.cc134
-rw-r--r--chromium/media/midi/midi_manager.h138
-rw-r--r--chromium/media/midi/midi_manager_alsa.cc452
-rw-r--r--chromium/media/midi/midi_manager_alsa.h69
-rw-r--r--chromium/media/midi/midi_manager_android.cc16
-rw-r--r--chromium/media/midi/midi_manager_mac.cc64
-rw-r--r--chromium/media/midi/midi_manager_mac.h24
-rw-r--r--chromium/media/midi/midi_manager_unittest.cc267
-rw-r--r--chromium/media/midi/midi_manager_usb.cc110
-rw-r--r--chromium/media/midi/midi_manager_usb.h86
-rw-r--r--chromium/media/midi/midi_manager_usb_unittest.cc357
-rw-r--r--chromium/media/midi/midi_manager_win.cc112
-rw-r--r--chromium/media/midi/midi_manager_win.h14
-rw-r--r--chromium/media/midi/midi_message_queue.cc12
-rw-r--r--chromium/media/midi/midi_message_queue.h10
-rw-r--r--chromium/media/midi/midi_message_queue_unittest.cc18
-rw-r--r--chromium/media/midi/midi_message_util.cc2
-rw-r--r--chromium/media/midi/midi_message_util.h10
-rw-r--r--chromium/media/midi/midi_message_util_unittest.cc16
-rw-r--r--chromium/media/midi/midi_port_info.cc8
-rw-r--r--chromium/media/midi/midi_port_info.h12
-rw-r--r--chromium/media/midi/midi_result.h24
-rw-r--r--chromium/media/midi/usb_midi_descriptor_parser.cc235
-rw-r--r--chromium/media/midi/usb_midi_descriptor_parser.h60
-rw-r--r--chromium/media/midi/usb_midi_descriptor_parser_unittest.cc101
-rw-r--r--chromium/media/midi/usb_midi_device.h71
-rw-r--r--chromium/media/midi/usb_midi_device_android.cc66
-rw-r--r--chromium/media/midi/usb_midi_device_android.h51
-rw-r--r--chromium/media/midi/usb_midi_device_factory_android.cc77
-rw-r--r--chromium/media/midi/usb_midi_device_factory_android.h48
-rw-r--r--chromium/media/midi/usb_midi_input_stream.cc105
-rw-r--r--chromium/media/midi/usb_midi_input_stream.h84
-rw-r--r--chromium/media/midi/usb_midi_input_stream_unittest.cc178
-rw-r--r--chromium/media/midi/usb_midi_jack.h51
-rw-r--r--chromium/media/midi/usb_midi_output_stream.cc187
-rw-r--r--chromium/media/midi/usb_midi_output_stream.h57
-rw-r--r--chromium/media/midi/usb_midi_output_stream_unittest.cc276
-rw-r--r--chromium/media/mp2t/es_parser_h264.cc505
-rw-r--r--chromium/media/mp3/mp3_stream_parser.cc597
-rw-r--r--chromium/media/mp3/mp3_stream_parser_unittest.cc165
-rw-r--r--chromium/media/mp4/aac_unittest.cc146
-rw-r--r--chromium/media/mp4/avc.cc91
-rw-r--r--chromium/media/mp4/avc.h30
-rw-r--r--chromium/media/mp4/avc_unittest.cc95
-rw-r--r--chromium/media/ozone/media_ozone_platform.cc93
-rw-r--r--chromium/media/ozone/media_ozone_platform.h47
-rwxr-xr-xchromium/media/tools/layout_tests/layouttest_analyzer.py4
-rw-r--r--chromium/media/tools/layout_tests/test_expectations.py22
-rw-r--r--chromium/media/tools/player_x11/data_source_logger.cc5
-rw-r--r--chromium/media/tools/player_x11/data_source_logger.h1
-rw-r--r--chromium/media/tools/player_x11/gl_video_renderer.cc10
-rw-r--r--chromium/media/tools/player_x11/gl_video_renderer.h2
-rw-r--r--chromium/media/tools/player_x11/player_x11.cc89
-rw-r--r--chromium/media/tools/player_x11/x11_video_renderer.cc11
-rw-r--r--chromium/media/tools/player_x11/x11_video_renderer.h2
-rw-r--r--chromium/media/video/capture/android/imageformat_list.h4
-rw-r--r--chromium/media/video/capture/android/video_capture_device_android.cc112
-rw-r--r--chromium/media/video/capture/android/video_capture_device_android.h24
-rw-r--r--chromium/media/video/capture/android/video_capture_device_factory_android.cc130
-rw-r--r--chromium/media/video/capture/android/video_capture_device_factory_android.h48
-rw-r--r--chromium/media/video/capture/fake_video_capture_device.cc119
-rw-r--r--chromium/media/video/capture/fake_video_capture_device.h39
-rw-r--r--chromium/media/video/capture/fake_video_capture_device_factory.cc54
-rw-r--r--chromium/media/video/capture/fake_video_capture_device_factory.h44
-rw-r--r--chromium/media/video/capture/fake_video_capture_device_unittest.cc208
-rw-r--r--chromium/media/video/capture/file_video_capture_device.cc110
-rw-r--r--chromium/media/video/capture/file_video_capture_device.h22
-rw-r--r--chromium/media/video/capture/file_video_capture_device_factory.cc67
-rw-r--r--chromium/media/video/capture/file_video_capture_device_factory.h31
-rw-r--r--chromium/media/video/capture/linux/video_capture_device_chromeos.cc116
-rw-r--r--chromium/media/video/capture/linux/video_capture_device_chromeos.h36
-rw-r--r--chromium/media/video/capture/linux/video_capture_device_factory_linux.cc187
-rw-r--r--chromium/media/video/capture/linux/video_capture_device_factory_linux.h38
-rw-r--r--chromium/media/video/capture/linux/video_capture_device_linux.cc295
-rw-r--r--chromium/media/video/capture/linux/video_capture_device_linux.h19
-rw-r--r--chromium/media/video/capture/mac/avfoundation_glue.h28
-rw-r--r--chromium/media/video/capture/mac/avfoundation_glue.mm123
-rw-r--r--chromium/media/video/capture/mac/coremedia_glue.h20
-rw-r--r--chromium/media/video/capture/mac/coremedia_glue.mm44
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.h9
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.mm147
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_factory_mac.h42
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_factory_mac.mm177
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_factory_mac_unittest.mm44
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_mac.h41
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_mac.mm444
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h8
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm73
-rw-r--r--chromium/media/video/capture/video_capture.h76
-rw-r--r--chromium/media/video/capture/video_capture_device.cc22
-rw-r--r--chromium/media/video/capture/video_capture_device.h102
-rw-r--r--chromium/media/video/capture/video_capture_device_factory.cc76
-rw-r--r--chromium/media/video/capture/video_capture_device_factory.h54
-rw-r--r--chromium/media/video/capture/video_capture_device_unittest.cc287
-rw-r--r--chromium/media/video/capture/video_capture_proxy.cc135
-rw-r--r--chromium/media/video/capture/video_capture_proxy.h85
-rw-r--r--chromium/media/video/capture/video_capture_types.cc8
-rw-r--r--chromium/media/video/capture/video_capture_types.h9
-rw-r--r--chromium/media/video/capture/win/capability_list_win.cc2
-rw-r--r--chromium/media/video/capture/win/capability_list_win.h2
-rw-r--r--chromium/media/video/capture/win/filter_base_win.cc2
-rw-r--r--chromium/media/video/capture/win/video_capture_device_factory_win.cc436
-rw-r--r--chromium/media/video/capture/win/video_capture_device_factory_win.h39
-rw-r--r--chromium/media/video/capture/win/video_capture_device_mf_win.cc192
-rw-r--r--chromium/media/video/capture/win/video_capture_device_mf_win.h25
-rw-r--r--chromium/media/video/capture/win/video_capture_device_win.cc315
-rw-r--r--chromium/media/video/capture/win/video_capture_device_win.h41
-rw-r--r--chromium/media/video/mock_video_decode_accelerator.h3
-rw-r--r--chromium/media/video/video_decode_accelerator.cc20
-rw-r--r--chromium/media/video/video_decode_accelerator.h59
-rw-r--r--chromium/media/video/video_encode_accelerator.cc11
-rw-r--r--chromium/media/video/video_encode_accelerator.h45
-rw-r--r--chromium/media/webm/tracks_builder.cc216
-rw-r--r--chromium/media/webm/tracks_builder.h59
-rw-r--r--chromium/media/webm/webm_cluster_parser.cc463
-rw-r--r--chromium/media/webm/webm_cluster_parser.h159
-rw-r--r--chromium/media/webm/webm_cluster_parser_unittest.cc530
-rw-r--r--chromium/media/webm/webm_crypto_helpers.h32
-rw-r--r--chromium/media/webm/webm_tracks_parser_unittest.cc122
938 files changed, 65246 insertions, 38247 deletions
diff --git a/chromium/media/DEPS b/chromium/media/DEPS
index b46ee56b501..8f61ef2f4a8 100644
--- a/chromium/media/DEPS
+++ b/chromium/media/DEPS
@@ -1,13 +1,14 @@
include_rules = [
"+gpu",
"+jni",
- "+net/http",
"+third_party/ffmpeg",
"+third_party/libvpx",
+ "+third_party/libyuv",
"+third_party/opus",
"+third_party/skia",
"+ui/base",
"+ui/events",
"+ui/gfx",
"+ui/gl",
+ "+ui/ozone",
]
diff --git a/chromium/media/OWNERS b/chromium/media/OWNERS
index eeb9125c71e..b5706fc4eef 100644
--- a/chromium/media/OWNERS
+++ b/chromium/media/OWNERS
@@ -1,7 +1,6 @@
acolwell@chromium.org
dalecurtis@chromium.org
ddorwin@chromium.org
-fischman@chromium.org
scherkus@chromium.org
shadi@chromium.org
vrk@chromium.org
diff --git a/chromium/media/PRESUBMIT.py b/chromium/media/PRESUBMIT.py
index aafe23a2f9f..279115ccb2f 100644
--- a/chromium/media/PRESUBMIT.py
+++ b/chromium/media/PRESUBMIT.py
@@ -8,15 +8,15 @@ See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
+def _FilterFile(affected_file):
+ """Return true if the file could contain code requiring a presubmit check."""
+ return affected_file.LocalPath().endswith(
+ ('.h', '.cc', '.cpp', '.cxx', '.mm'))
+
def _CheckForUseOfWrongClock(input_api, output_api):
"""Make sure new lines of media code don't use a clock susceptible to skew."""
- def FilterFile(affected_file):
- """Return true if the file could contain code referencing base::Time."""
- return affected_file.LocalPath().endswith(
- ('.h', '.cc', '.cpp', '.cxx', '.mm'))
-
# Regular expression that should detect any explicit references to the
# base::Time type (or base::Clock/DefaultClock), whether in using decls,
# typedefs, or to call static methods.
@@ -45,7 +45,7 @@ def _CheckForUseOfWrongClock(input_api, output_api):
r'(' + using_base_time_decl_pattern + r')|(' +
base_time_konstant_pattern + r')')
problems = []
- for f in input_api.AffectedSourceFiles(FilterFile):
+ for f in input_api.AffectedSourceFiles(_FilterFile):
for line_number, line in f.ChangedContents():
if problem_re.search(line):
if not exception_re.search(line):
@@ -64,9 +64,106 @@ def _CheckForUseOfWrongClock(input_api, output_api):
return []
+def _CheckForMessageLoopProxy(input_api, output_api):
+ """Make sure media code only uses MessageLoopProxy for accessing the current
+ loop."""
+
+ message_loop_proxy_re = input_api.re.compile(
+ r'\bMessageLoopProxy(?!::current\(\))')
+
+ problems = []
+ for f in input_api.AffectedSourceFiles(_FilterFile):
+ for line_number, line in f.ChangedContents():
+ if message_loop_proxy_re.search(line):
+ problems.append('%s:%d' % (f.LocalPath(), line_number))
+
+ if problems:
+ return [output_api.PresubmitError(
+ 'MessageLoopProxy should only be used for accessing the current loop.\n'
+ 'Use the TaskRunner interfaces instead as they are more explicit about\n'
+ 'the run-time characteristics. In most cases, SingleThreadTaskRunner\n'
+ 'is a drop-in replacement for MessageLoopProxy.', problems)]
+
+ return []
+
+
+def _CheckForHistogramOffByOne(input_api, output_api):
+ """Make sure histogram enum maxes are used properly"""
+
+ # A general-purpose chunk of regex to match whitespace and/or comments
+ # that may be interspersed with the code we're interested in:
+ comment = r'/\*.*?\*/|//[^\n]*'
+ whitespace = r'(?:[\n\t ]|(?:' + comment + r'))*'
+
+ # The name is assumed to be a literal string.
+ histogram_name = r'"[^"]*"'
+
+ # This can be an arbitrary expression, so just ensure it isn't a ; to prevent
+ # matching past the end of this statement.
+ histogram_value = r'[^;]*'
+
+ # In parens so we can retrieve it for further checks.
+ histogram_max = r'([^;,]*)'
+
+ # This should match a uma histogram enumeration macro expression.
+ uma_macro_re = input_api.re.compile(
+ r'\bUMA_HISTOGRAM_ENUMERATION\(' + whitespace + histogram_name + r',' +
+ whitespace + histogram_value + r',' + whitespace + histogram_max +
+ whitespace + r'\)' + whitespace + r';(?:' + whitespace +
+ r'\/\/ (PRESUBMIT_IGNORE_UMA_MAX))?')
+
+ uma_max_re = input_api.re.compile(r'.*(?:Max|MAX).* \+ 1')
+
+ problems = []
+
+ for f in input_api.AffectedSourceFiles(_FilterFile):
+ contents = input_api.ReadFile(f)
+
+ # We want to match across lines, but still report a line number, so we keep
+ # track of the line we're on as we search through the file.
+ line_number = 1
+
+ # We search the entire file, then check if any violations are in the changed
+ # areas, this is inefficient, but simple. A UMA_HISTOGRAM_ENUMERATION call
+ # will often span multiple lines, so finding a match looking just at the
+ # deltas line-by-line won't catch problems.
+ match = uma_macro_re.search(contents)
+ while match:
+ line_number += contents.count('\n', 0, match.start())
+ max_arg = match.group(1) # The third argument.
+
+ if (not uma_max_re.match(max_arg) and match.group(2) !=
+ 'PRESUBMIT_IGNORE_UMA_MAX'):
+ uma_range = range(match.start(), match.end() + 1)
+ # Check if any part of the match is in the changed lines:
+ for num, line in f.ChangedContents():
+ if line_number <= num <= line_number + match.group().count('\n'):
+ problems.append('%s:%d' % (f, line_number))
+ break
+
+ # Strip off the file contents up to the end of the match and update the
+ # line number.
+ contents = contents[match.end():]
+ line_number += match.group().count('\n')
+ match = uma_macro_re.search(contents)
+
+ if problems:
+ return [output_api.PresubmitError(
+ 'UMA_HISTOGRAM_ENUMERATION reports in src/media/ are expected to adhere\n'
+ 'to the following guidelines:\n'
+ ' - The max value (3rd argument) should be an enum value equal to the\n'
+ ' last valid value, e.g. FOO_MAX = LAST_VALID_FOO.\n'
+ ' - 1 must be added to that max value.\n'
+ 'Contact rileya@chromium.org if you have questions.' , problems)]
+
+ return []
+
+
def _CheckChange(input_api, output_api):
results = []
results.extend(_CheckForUseOfWrongClock(input_api, output_api))
+ results.extend(_CheckForMessageLoopProxy(input_api, output_api))
+ results.extend(_CheckForHistogramOffByOne(input_api, output_api))
return results
diff --git a/chromium/media/PRESUBMIT_test.py b/chromium/media/PRESUBMIT_test.py
new file mode 100644
index 00000000000..f537a6d5090
--- /dev/null
+++ b/chromium/media/PRESUBMIT_test.py
@@ -0,0 +1,150 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import re
+import unittest
+
+import PRESUBMIT
+
+class MockInputApi(object):
+ def __init__(self):
+ self.re = re
+ self.os_path = os.path
+ self.files = []
+ self.is_committing = False
+
+ def AffectedFiles(self):
+ return self.files
+
+ def AffectedSourceFiles(self, fn):
+ # we'll just pretend everything is a source file for the sake of simplicity
+ return self.files
+
+ def ReadFile(self, f):
+ return f.NewContents()
+
+
+class MockOutputApi(object):
+ class PresubmitResult(object):
+ def __init__(self, message, items=None, long_text=''):
+ self.message = message
+ self.items = items
+ self.long_text = long_text
+
+ class PresubmitError(PresubmitResult):
+ def __init__(self, message, items, long_text=''):
+ MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
+ self.type = 'error'
+
+ class PresubmitPromptWarning(PresubmitResult):
+ def __init__(self, message, items, long_text=''):
+ MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
+ self.type = 'warning'
+
+ class PresubmitNotifyResult(PresubmitResult):
+ def __init__(self, message, items, long_text=''):
+ MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
+ self.type = 'notify'
+
+ class PresubmitPromptOrNotify(PresubmitResult):
+ def __init__(self, message, items, long_text=''):
+ MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
+ self.type = 'promptOrNotify'
+
+
+class MockFile(object):
+ def __init__(self, local_path, new_contents):
+ self._local_path = local_path
+ self._new_contents = new_contents
+ self._changed_contents = [(i + 1, l) for i, l in enumerate(new_contents)]
+
+ def ChangedContents(self):
+ return self._changed_contents
+
+ def NewContents(self):
+ return self._new_contents
+
+ def LocalPath(self):
+ return self._local_path
+
+
+class MockChange(object):
+ def __init__(self, changed_files):
+ self._changed_files = changed_files
+
+ def LocalPaths(self):
+ return self._changed_files
+
+
+class HistogramOffByOneTest(unittest.TestCase):
+
+ # Take an input and make sure the problems found equals the expectation.
+ def simpleCheck(self, contents, expected_errors):
+ input_api = MockInputApi()
+ input_api.files.append(MockFile('test.cc', contents))
+ results = PRESUBMIT._CheckForHistogramOffByOne(input_api, MockOutputApi())
+ if expected_errors:
+ self.assertEqual(1, len(results))
+ self.assertEqual(expected_errors, len(results[0].items))
+ else:
+ self.assertEqual(0, len(results))
+
+ def testValid(self):
+ self.simpleCheck('UMA_HISTOGRAM_ENUMERATION("test", kFoo, kFooMax + 1);', 0)
+
+ def testValidComments(self):
+ self.simpleCheck('UMA_HISTOGRAM_ENUMERATION("test", /*...*/ kFoo, /*...*/'
+ 'kFooMax + 1);', 0)
+
+ def testValidMultiLine(self):
+ self.simpleCheck('UMA_HISTOGRAM_ENUMERATION("test",\n'
+ ' kFoo,\n'
+ ' kFooMax + 1);', 0)
+
+ def testValidMultiLineComments(self):
+ self.simpleCheck('UMA_HISTOGRAM_ENUMERATION("test", // This is the name\n'
+ ' kFoo, /* The value */\n'
+ ' kFooMax + 1 /* The max */ );',
+ 0)
+
+ def testNoPlusOne(self):
+ self.simpleCheck('UMA_HISTOGRAM_ENUMERATION("test", kFoo, kFooMax);', 1)
+
+ def testInvalidWithIgnore(self):
+ self.simpleCheck('UMA_HISTOGRAM_ENUMERATION("test", kFoo, kFooMax); '
+ '// PRESUBMIT_IGNORE_UMA_MAX', 0)
+
+ def testNoMax(self):
+ self.simpleCheck('UMA_HISTOGRAM_ENUMERATION("test", kFoo, kFoo + 1);', 1)
+
+ def testNoMaxNoPlusOne(self):
+ self.simpleCheck('UMA_HISTOGRAM_ENUMERATION("test", kFoo, kFoo);', 1)
+
+ def testMultipleErrors(self):
+ self.simpleCheck('UMA_HISTOGRAM_ENUMERATION("test", kFoo, kFoo);\n'
+ 'printf("hello, world!");\n'
+ 'UMA_HISTOGRAM_ENUMERATION("test", kBar, kBarMax);', 2)
+
+ def testValidAndInvalid(self):
+ self.simpleCheck('UMA_HISTOGRAM_ENUMERATION("test", kFoo, kFoo);\n'
+ 'UMA_HISTOGRAM_ENUMERATION("test", kFoo, kFooMax + 1);'
+ 'UMA_HISTOGRAM_ENUMERATION("test", kBar, kBarMax);', 2)
+
+ def testInvalidMultiLine(self):
+ self.simpleCheck('UMA_HISTOGRAM_ENUMERATION("test",\n'
+ ' kFoo,\n'
+ ' kFooMax + 2);', 1)
+
+ def testInvalidComments(self):
+ self.simpleCheck('UMA_HISTOGRAM_ENUMERATION("test", /*...*/, val, /*...*/,'
+ 'Max);\n', 1)
+
+ def testInvalidMultiLineComments(self):
+ self.simpleCheck('UMA_HISTOGRAM_ENUMERATION("test", // This is the name\n'
+ ' kFoo, /* The value */\n'
+ ' kFooMax + 2 /* The max */ );',
+ 1)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/chromium/media/audio/agc_audio_stream.h b/chromium/media/audio/agc_audio_stream.h
index b289a0b15e9..940d96412c2 100644
--- a/chromium/media/audio/agc_audio_stream.h
+++ b/chromium/media/audio/agc_audio_stream.h
@@ -73,12 +73,10 @@ class MEDIA_EXPORT AgcAudioStream : public AudioInterface {
AgcAudioStream()
: agc_is_enabled_(false), max_volume_(0.0), normalized_volume_(0.0) {
- DVLOG(1) << __FUNCTION__;
}
virtual ~AgcAudioStream() {
DCHECK(thread_checker_.CalledOnValidThread());
- DVLOG(1) << __FUNCTION__;
}
protected:
@@ -87,7 +85,6 @@ class MEDIA_EXPORT AgcAudioStream : public AudioInterface {
// The timer is only started if AGC mode is first enabled using the
// SetAutomaticGainControl() method.
void StartAgc() {
- DVLOG(1) << "StartAgc()";
DCHECK(thread_checker_.CalledOnValidThread());
if (!agc_is_enabled_ || timer_.IsRunning())
return;
@@ -105,7 +102,6 @@ class MEDIA_EXPORT AgcAudioStream : public AudioInterface {
// Stops the periodic timer which periodically checks and updates the
// current microphone volume level.
void StopAgc() {
- DVLOG(1) << "StopAgc()";
DCHECK(thread_checker_.CalledOnValidThread());
if (timer_.IsRunning())
timer_.Stop();
diff --git a/chromium/media/audio/alsa/alsa_input.cc b/chromium/media/audio/alsa/alsa_input.cc
index 9dcbf2b8662..0bc9f314d45 100644
--- a/chromium/media/audio/alsa/alsa_input.cc
+++ b/chromium/media/audio/alsa/alsa_input.cc
@@ -32,7 +32,8 @@ AlsaPcmInputStream::AlsaPcmInputStream(AudioManagerBase* audio_manager,
device_name_(device_name),
params_(params),
bytes_per_buffer_(params.frames_per_buffer() *
- (params.channels() * params.bits_per_sample()) / 8),
+ (params.channels() * params.bits_per_sample()) /
+ 8),
wrapper_(wrapper),
buffer_duration_(base::TimeDelta::FromMicroseconds(
params.frames_per_buffer() * base::Time::kMicrosecondsPerSecond /
@@ -41,8 +42,9 @@ AlsaPcmInputStream::AlsaPcmInputStream(AudioManagerBase* audio_manager,
device_handle_(NULL),
mixer_handle_(NULL),
mixer_element_handle_(NULL),
- weak_factory_(this),
- read_callback_behind_schedule_(false) {
+ read_callback_behind_schedule_(false),
+ audio_bus_(AudioBus::Create(params)),
+ weak_factory_(this) {
}
AlsaPcmInputStream::~AlsaPcmInputStream() {}
@@ -208,8 +210,11 @@ void AlsaPcmInputStream::ReadAudio() {
int frames_read = wrapper_->PcmReadi(device_handle_, audio_buffer_.get(),
params_.frames_per_buffer());
if (frames_read == params_.frames_per_buffer()) {
- callback_->OnData(this, audio_buffer_.get(), bytes_per_buffer_,
- hardware_delay_bytes, normalized_volume);
+ audio_bus_->FromInterleaved(audio_buffer_.get(),
+ audio_bus_->frames(),
+ params_.bits_per_sample() / 8);
+ callback_->OnData(
+ this, audio_bus_.get(), hardware_delay_bytes, normalized_volume);
} else {
LOG(WARNING) << "PcmReadi returning less than expected frames: "
<< frames_read << " vs. " << params_.frames_per_buffer()
@@ -245,6 +250,8 @@ void AlsaPcmInputStream::Stop() {
int error = wrapper_->PcmDrop(device_handle_);
if (error < 0)
HandleError("PcmDrop", error);
+
+ callback_ = NULL;
}
void AlsaPcmInputStream::Close() {
@@ -261,9 +268,6 @@ void AlsaPcmInputStream::Close() {
device_handle_ = NULL;
mixer_handle_ = NULL;
mixer_element_handle_ = NULL;
-
- if (callback_)
- callback_->OnClose(this);
}
audio_manager_->ReleaseInputStream(this);
diff --git a/chromium/media/audio/alsa/alsa_input.h b/chromium/media/audio/alsa/alsa_input.h
index 6e9aad9056e..c26f3e2b66e 100644
--- a/chromium/media/audio/alsa/alsa_input.h
+++ b/chromium/media/audio/alsa/alsa_input.h
@@ -80,9 +80,12 @@ class AlsaPcmInputStream : public AgcAudioStream<AudioInputStream> {
snd_pcm_t* device_handle_; // Handle to the ALSA PCM recording device.
snd_mixer_t* mixer_handle_; // Handle to the ALSA microphone mixer.
snd_mixer_elem_t* mixer_element_handle_; // Handle to the capture element.
- base::WeakPtrFactory<AlsaPcmInputStream> weak_factory_;
scoped_ptr<uint8[]> audio_buffer_; // Buffer used for reading audio data.
bool read_callback_behind_schedule_;
+ scoped_ptr<AudioBus> audio_bus_;
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<AlsaPcmInputStream> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(AlsaPcmInputStream);
};
diff --git a/chromium/media/audio/alsa/alsa_output.cc b/chromium/media/audio/alsa/alsa_output.cc
index eccf8ee28a8..690d738f195 100644
--- a/chromium/media/audio/alsa/alsa_output.cc
+++ b/chromium/media/audio/alsa/alsa_output.cc
@@ -39,7 +39,6 @@
#include "base/bind.h"
#include "base/debug/trace_event.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
#include "base/stl_util.h"
#include "base/time/time.h"
#include "media/audio/alsa/alsa_util.h"
@@ -154,12 +153,12 @@ AlsaPcmOutputStream::AlsaPcmOutputStream(const std::string& device_name,
message_loop_(base::MessageLoop::current()),
playback_handle_(NULL),
frames_per_packet_(packet_size_ / bytes_per_frame_),
- weak_factory_(this),
state_(kCreated),
volume_(1.0f),
source_callback_(NULL),
- audio_bus_(AudioBus::Create(params)) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+ audio_bus_(AudioBus::Create(params)),
+ weak_factory_(this) {
+ DCHECK(manager_->GetTaskRunner()->BelongsToCurrentThread());
DCHECK_EQ(audio_bus_->frames() * bytes_per_frame_, packet_size_);
// Sanity check input values.
@@ -536,13 +535,13 @@ std::string AlsaPcmOutputStream::FindDeviceForChannels(uint32 channels) {
for (void** hint_iter = hints; *hint_iter != NULL; hint_iter++) {
// Only examine devices that are output capable.. Valid values are
// "Input", "Output", and NULL which means both input and output.
- scoped_ptr_malloc<char> io(
+ scoped_ptr<char, base::FreeDeleter> io(
wrapper_->DeviceNameGetHint(*hint_iter, kIoHintName));
if (io != NULL && strcmp(io.get(), "Input") == 0)
continue;
// Attempt to select the closest device for number of channels.
- scoped_ptr_malloc<char> name(
+ scoped_ptr<char, base::FreeDeleter> name(
wrapper_->DeviceNameGetHint(*hint_iter, kNameHintName));
if (strncmp(wanted_device, name.get(), strlen(wanted_device)) == 0) {
guessed_device = name.get();
diff --git a/chromium/media/audio/alsa/alsa_output.h b/chromium/media/audio/alsa/alsa_output.h
index 65a23f75124..180564584c2 100644
--- a/chromium/media/audio/alsa/alsa_output.h
+++ b/chromium/media/audio/alsa/alsa_output.h
@@ -201,10 +201,6 @@ class MEDIA_EXPORT AlsaPcmOutputStream : public AudioOutputStream {
scoped_ptr<media::SeekableBuffer> buffer_;
uint32 frames_per_packet_;
- // Allows us to run tasks on the AlsaPcmOutputStream instance which are
- // bound by its lifetime.
- base::WeakPtrFactory<AlsaPcmOutputStream> weak_factory_;
-
InternalState state_;
float volume_; // Volume level from 0.0 to 1.0.
@@ -217,6 +213,11 @@ class MEDIA_EXPORT AlsaPcmOutputStream : public AudioOutputStream {
scoped_ptr<ChannelMixer> channel_mixer_;
scoped_ptr<AudioBus> mixed_audio_bus_;
+ // Allows us to run tasks on the AlsaPcmOutputStream instance which are
+ // bound by its lifetime.
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<AlsaPcmOutputStream> weak_factory_;
+
DISALLOW_COPY_AND_ASSIGN(AlsaPcmOutputStream);
};
diff --git a/chromium/media/audio/alsa/alsa_output_unittest.cc b/chromium/media/audio/alsa/alsa_output_unittest.cc
index 99ae8b02e0a..8b0aeaea4c6 100644
--- a/chromium/media/audio/alsa/alsa_output_unittest.cc
+++ b/chromium/media/audio/alsa/alsa_output_unittest.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/message_loop/message_loop.h"
#include "base/strings/stringprintf.h"
#include "media/audio/alsa/alsa_output.h"
#include "media/audio/alsa/alsa_wrapper.h"
#include "media/audio/alsa/audio_manager_alsa.h"
#include "media/audio/fake_audio_log_factory.h"
+#include "media/audio/mock_audio_source_callback.h"
#include "media/base/data_buffer.h"
#include "media/base/seekable_buffer.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -67,16 +67,6 @@ class MockAlsaWrapper : public AlsaWrapper {
MOCK_METHOD1(StrError, const char*(int errnum));
};
-class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
- public:
- MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
- AudioBuffersState buffers_state));
- MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state));
- MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
-};
-
class MockAudioManagerAlsa : public AudioManagerAlsa {
public:
MockAudioManagerAlsa() : AudioManagerAlsa(&fake_audio_log_factory_) {}
@@ -85,10 +75,9 @@ class MockAudioManagerAlsa : public AudioManagerAlsa {
MOCK_METHOD0(HasAudioInputDevices, bool());
MOCK_METHOD1(MakeLinearOutputStream, AudioOutputStream*(
const AudioParameters& params));
- MOCK_METHOD3(MakeLowLatencyOutputStream, AudioOutputStream*(
+ MOCK_METHOD2(MakeLowLatencyOutputStream, AudioOutputStream*(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id));
+ const std::string& device_id));
MOCK_METHOD2(MakeLowLatencyInputStream, AudioInputStream*(
const AudioParameters& params, const std::string& device_id));
@@ -102,8 +91,8 @@ class MockAudioManagerAlsa : public AudioManagerAlsa {
}
// We don't mock this method since all tests will do the same thing
- // and use the current message loop.
- virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE {
+ // and use the current task runner.
+ virtual scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() OVERRIDE {
return base::MessageLoop::current()->message_loop_proxy();
}
diff --git a/chromium/media/audio/alsa/audio_manager_alsa.cc b/chromium/media/audio/alsa/audio_manager_alsa.cc
index ac61a5fa974..beb60bad88b 100644
--- a/chromium/media/audio/alsa/audio_manager_alsa.cc
+++ b/chromium/media/audio/alsa/audio_manager_alsa.cc
@@ -152,8 +152,8 @@ void AudioManagerAlsa::GetAlsaDevicesInfo(
for (void** hint_iter = hints; *hint_iter != NULL; hint_iter++) {
// Only examine devices of the right type. Valid values are
// "Input", "Output", and NULL which means both input and output.
- scoped_ptr_malloc<char> io(wrapper_->DeviceNameGetHint(*hint_iter,
- kIoHintName));
+ scoped_ptr<char, base::FreeDeleter> io(wrapper_->DeviceNameGetHint(
+ *hint_iter, kIoHintName));
if (io != NULL && strcmp(unwanted_device_type, io.get()) == 0)
continue;
@@ -169,13 +169,13 @@ void AudioManagerAlsa::GetAlsaDevicesInfo(
}
// Get the unique device name for the device.
- scoped_ptr_malloc<char> unique_device_name(
+ scoped_ptr<char, base::FreeDeleter> unique_device_name(
wrapper_->DeviceNameGetHint(*hint_iter, kNameHintName));
// Find out if the device is available.
if (IsAlsaDeviceAvailable(type, unique_device_name.get())) {
// Get the description for the device.
- scoped_ptr_malloc<char> desc(wrapper_->DeviceNameGetHint(
+ scoped_ptr<char, base::FreeDeleter> desc(wrapper_->DeviceNameGetHint(
*hint_iter, kDescriptionHintName));
media::AudioDeviceName name;
@@ -252,8 +252,8 @@ bool AudioManagerAlsa::HasAnyAlsaAudioDevice(
for (void** hint_iter = hints; *hint_iter != NULL; hint_iter++) {
// Only examine devices that are |stream| capable. Valid values are
// "Input", "Output", and NULL which means both input and output.
- scoped_ptr_malloc<char> io(wrapper_->DeviceNameGetHint(*hint_iter,
- kIoHintName));
+ scoped_ptr<char, base::FreeDeleter> io(wrapper_->DeviceNameGetHint(
+ *hint_iter, kIoHintName));
const char* unwanted_type = UnwantedDeviceTypeWhenEnumerating(stream);
if (io != NULL && strcmp(unwanted_type, io.get()) == 0)
continue; // Wrong type, skip the device.
@@ -283,11 +283,9 @@ AudioOutputStream* AudioManagerAlsa::MakeLinearOutputStream(
AudioOutputStream* AudioManagerAlsa::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
+ const std::string& device_id) {
DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
- // TODO(xians): Use input_device_id for unified IO.
return MakeOutputStream(params);
}
diff --git a/chromium/media/audio/alsa/audio_manager_alsa.h b/chromium/media/audio/alsa/audio_manager_alsa.h
index 155089f06bc..d08c3ba1f27 100644
--- a/chromium/media/audio/alsa/audio_manager_alsa.h
+++ b/chromium/media/audio/alsa/audio_manager_alsa.h
@@ -37,8 +37,7 @@ class MEDIA_EXPORT AudioManagerAlsa : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
diff --git a/chromium/media/audio/android/audio_android_unittest.cc b/chromium/media/audio/android/audio_android_unittest.cc
index e7913265269..a356d9c25de 100644
--- a/chromium/media/audio/android/audio_android_unittest.cc
+++ b/chromium/media/audio/android/audio_android_unittest.cc
@@ -8,6 +8,7 @@
#include "base/memory/scoped_ptr.h"
#include "base/message_loop/message_loop.h"
#include "base/path_service.h"
+#include "base/run_loop.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/lock.h"
#include "base/synchronization/waitable_event.h"
@@ -17,6 +18,7 @@
#include "media/audio/android/audio_manager_android.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager_base.h"
+#include "media/audio/mock_audio_source_callback.h"
#include "media/base/decoder_buffer.h"
#include "media/base/seekable_buffer.h"
#include "media/base/test_data_util.h"
@@ -85,6 +87,47 @@ static double ExpectedTimeBetweenCallbacks(AudioParameters params) {
static_cast<double>(params.sample_rate()))).InMillisecondsF();
}
+// Helper method which verifies that the device list starts with a valid
+// default device name followed by non-default device names.
+static void CheckDeviceNames(const AudioDeviceNames& device_names) {
+ VLOG(2) << "Got " << device_names.size() << " audio devices.";
+ if (device_names.empty()) {
+ // Log a warning so we can see the status on the build bots. No need to
+ // break the test though since this does successfully test the code and
+ // some failure cases.
+ LOG(WARNING) << "No input devices detected";
+ return;
+ }
+
+ AudioDeviceNames::const_iterator it = device_names.begin();
+
+ // The first device in the list should always be the default device.
+ EXPECT_EQ(std::string(AudioManagerBase::kDefaultDeviceName),
+ it->device_name);
+ EXPECT_EQ(std::string(AudioManagerBase::kDefaultDeviceId), it->unique_id);
+ ++it;
+
+ // Other devices should have non-empty name and id and should not contain
+ // default name or id.
+ while (it != device_names.end()) {
+ EXPECT_FALSE(it->device_name.empty());
+ EXPECT_FALSE(it->unique_id.empty());
+ VLOG(2) << "Device ID(" << it->unique_id
+ << "), label: " << it->device_name;
+ EXPECT_NE(std::string(AudioManagerBase::kDefaultDeviceName),
+ it->device_name);
+ EXPECT_NE(std::string(AudioManagerBase::kDefaultDeviceId),
+ it->unique_id);
+ ++it;
+ }
+}
+
+// We clear the data bus to ensure that the test does not cause noise.
+static int RealOnMoreData(AudioBus* dest, AudioBuffersState buffers_state) {
+ dest->Zero();
+ return dest->frames();
+}
+
std::ostream& operator<<(std::ostream& os, const AudioParameters& params) {
using namespace std;
os << endl << "format: " << FormatToString(params.format()) << endl
@@ -105,34 +148,14 @@ std::ostream& operator<<(std::ostream& os, const AudioParameters& params) {
// Gmock implementation of AudioInputStream::AudioInputCallback.
class MockAudioInputCallback : public AudioInputStream::AudioInputCallback {
public:
- MOCK_METHOD5(OnData,
+ MOCK_METHOD4(OnData,
void(AudioInputStream* stream,
- const uint8* src,
- uint32 size,
+ const AudioBus* src,
uint32 hardware_delay_bytes,
double volume));
- MOCK_METHOD1(OnClose, void(AudioInputStream* stream));
MOCK_METHOD1(OnError, void(AudioInputStream* stream));
};
-// Gmock implementation of AudioOutputStream::AudioSourceCallback.
-class MockAudioOutputCallback : public AudioOutputStream::AudioSourceCallback {
- public:
- MOCK_METHOD2(OnMoreData,
- int(AudioBus* dest, AudioBuffersState buffers_state));
- MOCK_METHOD3(OnMoreIOData,
- int(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state));
- MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
-
- // We clear the data bus to ensure that the test does not cause noise.
- int RealOnMoreData(AudioBus* dest, AudioBuffersState buffers_state) {
- dest->Zero();
- return dest->frames();
- }
-};
-
// Implements AudioOutputStream::AudioSourceCallback and provides audio data
// by reading from a data file.
class FileAudioSource : public AudioOutputStream::AudioSourceCallback {
@@ -183,13 +206,6 @@ class FileAudioSource : public AudioOutputStream::AudioSourceCallback {
return frames;
}
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) OVERRIDE {
- NOTREACHED();
- return 0;
- }
-
virtual void OnError(AudioOutputStream* stream) OVERRIDE {}
int file_size() { return file_->data_size(); }
@@ -247,18 +263,22 @@ class FileAudioSink : public AudioInputStream::AudioInputCallback {
// AudioInputStream::AudioInputCallback implementation.
virtual void OnData(AudioInputStream* stream,
- const uint8* src,
- uint32 size,
+ const AudioBus* src,
uint32 hardware_delay_bytes,
double volume) OVERRIDE {
+ const int num_samples = src->frames() * src->channels();
+ scoped_ptr<int16> interleaved(new int16[num_samples]);
+ const int bytes_per_sample = sizeof(*interleaved);
+ src->ToInterleaved(src->frames(), bytes_per_sample, interleaved.get());
+
// Store data data in a temporary buffer to avoid making blocking
// fwrite() calls in the audio callback. The complete buffer will be
// written to file in the destructor.
- if (!buffer_->Append(src, size))
+ const int size = bytes_per_sample * num_samples;
+ if (!buffer_->Append((const uint8*)interleaved.get(), size))
event_->Signal();
}
- virtual void OnClose(AudioInputStream* stream) OVERRIDE {}
virtual void OnError(AudioInputStream* stream) OVERRIDE {}
private:
@@ -291,13 +311,19 @@ class FullDuplexAudioSinkSource
// AudioInputStream::AudioInputCallback implementation
virtual void OnData(AudioInputStream* stream,
- const uint8* src,
- uint32 size,
+ const AudioBus* src,
uint32 hardware_delay_bytes,
double volume) OVERRIDE {
const base::TimeTicks now_time = base::TimeTicks::Now();
const int diff = (now_time - previous_time_).InMilliseconds();
+ EXPECT_EQ(params_.bits_per_sample(), 16);
+ const int num_samples = src->frames() * src->channels();
+ scoped_ptr<int16> interleaved(new int16[num_samples]);
+ const int bytes_per_sample = sizeof(*interleaved);
+ src->ToInterleaved(src->frames(), bytes_per_sample, interleaved.get());
+ const int size = bytes_per_sample * num_samples;
+
base::AutoLock lock(lock_);
if (diff > 1000) {
started_ = true;
@@ -318,13 +344,12 @@ class FullDuplexAudioSinkSource
// Append new data to the FIFO and extend the size if the max capacity
// was exceeded. Flush the FIFO when extended just in case.
- if (!fifo_->Append(src, size)) {
+ if (!fifo_->Append((const uint8*)interleaved.get(), size)) {
fifo_->set_forward_capacity(2 * fifo_->forward_capacity());
fifo_->Clear();
}
}
- virtual void OnClose(AudioInputStream* stream) OVERRIDE {}
virtual void OnError(AudioInputStream* stream) OVERRIDE {}
// AudioOutputStream::AudioSourceCallback implementation
@@ -357,13 +382,6 @@ class FullDuplexAudioSinkSource
return dest->frames();
}
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) OVERRIDE {
- NOTREACHED();
- return 0;
- }
-
virtual void OnError(AudioOutputStream* stream) OVERRIDE {}
private:
@@ -389,21 +407,76 @@ class FullDuplexAudioSinkSource
// Test fixture class for tests which only exercise the output path.
class AudioAndroidOutputTest : public testing::Test {
public:
- AudioAndroidOutputTest() {}
-
- protected:
- virtual void SetUp() {
- audio_manager_.reset(AudioManager::CreateForTesting());
- loop_.reset(new base::MessageLoopForUI());
+ AudioAndroidOutputTest()
+ : loop_(new base::MessageLoopForUI()),
+ audio_manager_(AudioManager::CreateForTesting()),
+ audio_output_stream_(NULL) {
}
- virtual void TearDown() {}
+ virtual ~AudioAndroidOutputTest() {
+ }
+ protected:
AudioManager* audio_manager() { return audio_manager_.get(); }
base::MessageLoopForUI* loop() { return loop_.get(); }
+ const AudioParameters& audio_output_parameters() {
+ return audio_output_parameters_;
+ }
- AudioParameters GetDefaultOutputStreamParameters() {
- return audio_manager()->GetDefaultOutputStreamParameters();
+ // Synchronously runs the provided callback/closure on the audio thread.
+ void RunOnAudioThread(const base::Closure& closure) {
+ if (!audio_manager()->GetTaskRunner()->BelongsToCurrentThread()) {
+ base::WaitableEvent event(false, false);
+ audio_manager()->GetTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&AudioAndroidOutputTest::RunOnAudioThreadImpl,
+ base::Unretained(this),
+ closure,
+ &event));
+ event.Wait();
+ } else {
+ closure.Run();
+ }
+ }
+
+ void RunOnAudioThreadImpl(const base::Closure& closure,
+ base::WaitableEvent* event) {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ closure.Run();
+ event->Signal();
+ }
+
+ void GetDefaultOutputStreamParametersOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioAndroidOutputTest::GetDefaultOutputStreamParameters,
+ base::Unretained(this)));
+ }
+
+ void MakeAudioOutputStreamOnAudioThread(const AudioParameters& params) {
+ RunOnAudioThread(
+ base::Bind(&AudioAndroidOutputTest::MakeOutputStream,
+ base::Unretained(this),
+ params));
+ }
+
+ void OpenAndCloseAudioOutputStreamOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioAndroidOutputTest::OpenAndClose,
+ base::Unretained(this)));
+ }
+
+ void OpenAndStartAudioOutputStreamOnAudioThread(
+ AudioOutputStream::AudioSourceCallback* source) {
+ RunOnAudioThread(
+ base::Bind(&AudioAndroidOutputTest::OpenAndStart,
+ base::Unretained(this),
+ source));
+ }
+
+ void StopAndCloseAudioOutputStreamOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioAndroidOutputTest::StopAndClose,
+ base::Unretained(this)));
}
double AverageTimeBetweenCallbacks(int num_callbacks) const {
@@ -416,28 +489,25 @@ class AudioAndroidOutputTest : public testing::Test {
ExpectedTimeBetweenCallbacks(params);
const int num_callbacks =
(kCallbackTestTimeMs / expected_time_between_callbacks_ms);
- AudioOutputStream* stream = audio_manager()->MakeAudioOutputStream(
- params, std::string(), std::string());
- EXPECT_TRUE(stream);
+ MakeAudioOutputStreamOnAudioThread(params);
int count = 0;
- MockAudioOutputCallback source;
+ MockAudioSourceCallback source;
EXPECT_CALL(source, OnMoreData(NotNull(), _))
.Times(AtLeast(num_callbacks))
.WillRepeatedly(
DoAll(CheckCountAndPostQuitTask(&count, num_callbacks, loop()),
- Invoke(&source, &MockAudioOutputCallback::RealOnMoreData)));
- EXPECT_CALL(source, OnError(stream)).Times(0);
- EXPECT_CALL(source, OnMoreIOData(_, _, _)).Times(0);
+ Invoke(RealOnMoreData)));
+ EXPECT_CALL(source, OnError(audio_output_stream_)).Times(0);
+
+ OpenAndStartAudioOutputStreamOnAudioThread(&source);
- EXPECT_TRUE(stream->Open());
- stream->Start(&source);
start_time_ = base::TimeTicks::Now();
loop()->Run();
end_time_ = base::TimeTicks::Now();
- stream->Stop();
- stream->Close();
+
+ StopAndCloseAudioOutputStreamOnAudioThread();
double average_time_between_callbacks_ms =
AverageTimeBetweenCallbacks(num_callbacks);
@@ -448,11 +518,47 @@ class AudioAndroidOutputTest : public testing::Test {
EXPECT_GE(average_time_between_callbacks_ms,
0.70 * expected_time_between_callbacks_ms);
EXPECT_LE(average_time_between_callbacks_ms,
- 1.30 * expected_time_between_callbacks_ms);
+ 1.35 * expected_time_between_callbacks_ms);
+ }
+
+ void GetDefaultOutputStreamParameters() {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ audio_output_parameters_ =
+ audio_manager()->GetDefaultOutputStreamParameters();
+ EXPECT_TRUE(audio_output_parameters_.IsValid());
+ }
+
+ void MakeOutputStream(const AudioParameters& params) {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ audio_output_stream_ = audio_manager()->MakeAudioOutputStream(
+ params, std::string());
+ EXPECT_TRUE(audio_output_stream_);
+ }
+
+ void OpenAndClose() {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ EXPECT_TRUE(audio_output_stream_->Open());
+ audio_output_stream_->Close();
+ audio_output_stream_ = NULL;
+ }
+
+ void OpenAndStart(AudioOutputStream::AudioSourceCallback* source) {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ EXPECT_TRUE(audio_output_stream_->Open());
+ audio_output_stream_->Start(source);
+ }
+
+ void StopAndClose() {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ audio_output_stream_->Stop();
+ audio_output_stream_->Close();
+ audio_output_stream_ = NULL;
}
scoped_ptr<base::MessageLoopForUI> loop_;
scoped_ptr<AudioManager> audio_manager_;
+ AudioParameters audio_output_parameters_;
+ AudioOutputStream* audio_output_stream_;
base::TimeTicks start_time_;
base::TimeTicks end_time_;
@@ -476,53 +582,87 @@ std::vector<bool> RunAudioRecordInputPathTests() {
class AudioAndroidInputTest : public AudioAndroidOutputTest,
public testing::WithParamInterface<bool> {
public:
- AudioAndroidInputTest() {}
+ AudioAndroidInputTest() : audio_input_stream_(NULL) {}
protected:
+ const AudioParameters& audio_input_parameters() {
+ return audio_input_parameters_;
+ }
+
AudioParameters GetInputStreamParameters() {
- AudioParameters input_params = audio_manager()->GetInputStreamParameters(
- AudioManagerBase::kDefaultDeviceId);
+ GetDefaultInputStreamParametersOnAudioThread();
+
// Override the platform effects setting to use the AudioRecord or OpenSLES
// path as requested.
int effects = GetParam() ? AudioParameters::ECHO_CANCELLER :
AudioParameters::NO_EFFECTS;
- AudioParameters params(input_params.format(),
- input_params.channel_layout(),
- input_params.input_channels(),
- input_params.sample_rate(),
- input_params.bits_per_sample(),
- input_params.frames_per_buffer(),
+ AudioParameters params(audio_input_parameters().format(),
+ audio_input_parameters().channel_layout(),
+ audio_input_parameters().input_channels(),
+ audio_input_parameters().sample_rate(),
+ audio_input_parameters().bits_per_sample(),
+ audio_input_parameters().frames_per_buffer(),
effects);
return params;
}
+ void GetDefaultInputStreamParametersOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioAndroidInputTest::GetDefaultInputStreamParameters,
+ base::Unretained(this)));
+ }
+
+ void MakeAudioInputStreamOnAudioThread(const AudioParameters& params) {
+ RunOnAudioThread(
+ base::Bind(&AudioAndroidInputTest::MakeInputStream,
+ base::Unretained(this),
+ params));
+ }
+
+ void OpenAndCloseAudioInputStreamOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioAndroidInputTest::OpenAndClose,
+ base::Unretained(this)));
+ }
+
+ void OpenAndStartAudioInputStreamOnAudioThread(
+ AudioInputStream::AudioInputCallback* sink) {
+ RunOnAudioThread(
+ base::Bind(&AudioAndroidInputTest::OpenAndStart,
+ base::Unretained(this),
+ sink));
+ }
+
+ void StopAndCloseAudioInputStreamOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioAndroidInputTest::StopAndClose,
+ base::Unretained(this)));
+ }
+
void StartInputStreamCallbacks(const AudioParameters& params) {
double expected_time_between_callbacks_ms =
ExpectedTimeBetweenCallbacks(params);
const int num_callbacks =
(kCallbackTestTimeMs / expected_time_between_callbacks_ms);
- AudioInputStream* stream = audio_manager()->MakeAudioInputStream(
- params, AudioManagerBase::kDefaultDeviceId);
- EXPECT_TRUE(stream);
+
+ MakeAudioInputStreamOnAudioThread(params);
int count = 0;
MockAudioInputCallback sink;
- EXPECT_CALL(sink,
- OnData(stream, NotNull(), params.GetBytesPerBuffer(), _, _))
+ EXPECT_CALL(sink, OnData(audio_input_stream_, NotNull(), _, _))
.Times(AtLeast(num_callbacks))
.WillRepeatedly(
- CheckCountAndPostQuitTask(&count, num_callbacks, loop()));
- EXPECT_CALL(sink, OnError(stream)).Times(0);
- EXPECT_CALL(sink, OnClose(stream)).Times(1);
+ CheckCountAndPostQuitTask(&count, num_callbacks, loop()));
+ EXPECT_CALL(sink, OnError(audio_input_stream_)).Times(0);
+
+ OpenAndStartAudioInputStreamOnAudioThread(&sink);
- EXPECT_TRUE(stream->Open());
- stream->Start(&sink);
start_time_ = base::TimeTicks::Now();
loop()->Run();
end_time_ = base::TimeTicks::Now();
- stream->Stop();
- stream->Close();
+
+ StopAndCloseAudioInputStreamOnAudioThread();
double average_time_between_callbacks_ms =
AverageTimeBetweenCallbacks(num_callbacks);
@@ -536,6 +676,41 @@ class AudioAndroidInputTest : public AudioAndroidOutputTest,
1.30 * expected_time_between_callbacks_ms);
}
+ void GetDefaultInputStreamParameters() {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ audio_input_parameters_ = audio_manager()->GetInputStreamParameters(
+ AudioManagerBase::kDefaultDeviceId);
+ }
+
+ void MakeInputStream(const AudioParameters& params) {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ audio_input_stream_ = audio_manager()->MakeAudioInputStream(
+ params, AudioManagerBase::kDefaultDeviceId);
+ EXPECT_TRUE(audio_input_stream_);
+ }
+
+ void OpenAndClose() {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ EXPECT_TRUE(audio_input_stream_->Open());
+ audio_input_stream_->Close();
+ audio_input_stream_ = NULL;
+ }
+
+ void OpenAndStart(AudioInputStream::AudioInputCallback* sink) {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ EXPECT_TRUE(audio_input_stream_->Open());
+ audio_input_stream_->Start(sink);
+ }
+
+ void StopAndClose() {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ audio_input_stream_->Stop();
+ audio_input_stream_->Close();
+ audio_input_stream_ = NULL;
+ }
+
+ AudioInputStream* audio_input_stream_;
+ AudioParameters audio_input_parameters_;
private:
DISALLOW_COPY_AND_ASSIGN(AudioAndroidInputTest);
@@ -545,35 +720,48 @@ class AudioAndroidInputTest : public AudioAndroidOutputTest,
TEST_P(AudioAndroidInputTest, GetDefaultInputStreamParameters) {
// We don't go through AudioAndroidInputTest::GetInputStreamParameters() here
// so that we can log the real (non-overridden) values of the effects.
- AudioParameters params = audio_manager()->GetInputStreamParameters(
- AudioManagerBase::kDefaultDeviceId);
- EXPECT_TRUE(params.IsValid());
- VLOG(1) << params;
+ GetDefaultInputStreamParametersOnAudioThread();
+ EXPECT_TRUE(audio_input_parameters().IsValid());
+ VLOG(1) << audio_input_parameters();
}
// Get the default audio output parameters and log the result.
TEST_F(AudioAndroidOutputTest, GetDefaultOutputStreamParameters) {
- AudioParameters params = GetDefaultOutputStreamParameters();
- EXPECT_TRUE(params.IsValid());
- VLOG(1) << params;
+ GetDefaultOutputStreamParametersOnAudioThread();
+ VLOG(1) << audio_output_parameters();
+}
+
+// Verify input device enumeration.
+TEST_F(AudioAndroidInputTest, GetAudioInputDeviceNames) {
+ if (!audio_manager()->HasAudioInputDevices())
+ return;
+ AudioDeviceNames devices;
+ RunOnAudioThread(
+ base::Bind(&AudioManager::GetAudioInputDeviceNames,
+ base::Unretained(audio_manager()),
+ &devices));
+ CheckDeviceNames(devices);
}
-// Check if low-latency output is supported and log the result as output.
-TEST_F(AudioAndroidOutputTest, IsAudioLowLatencySupported) {
- AudioManagerAndroid* manager =
- static_cast<AudioManagerAndroid*>(audio_manager());
- bool low_latency = manager->IsAudioLowLatencySupported();
- low_latency ? VLOG(0) << "Low latency output is supported"
- : VLOG(0) << "Low latency output is *not* supported";
+// Verify output device enumeration.
+TEST_F(AudioAndroidOutputTest, GetAudioOutputDeviceNames) {
+ if (!audio_manager()->HasAudioOutputDevices())
+ return;
+ AudioDeviceNames devices;
+ RunOnAudioThread(
+ base::Bind(&AudioManager::GetAudioOutputDeviceNames,
+ base::Unretained(audio_manager()),
+ &devices));
+ CheckDeviceNames(devices);
}
// Ensure that a default input stream can be created and closed.
TEST_P(AudioAndroidInputTest, CreateAndCloseInputStream) {
AudioParameters params = GetInputStreamParameters();
- AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
- params, AudioManagerBase::kDefaultDeviceId);
- EXPECT_TRUE(ais);
- ais->Close();
+ MakeAudioInputStreamOnAudioThread(params);
+ RunOnAudioThread(
+ base::Bind(&AudioInputStream::Close,
+ base::Unretained(audio_input_stream_)));
}
// Ensure that a default output stream can be created and closed.
@@ -581,45 +769,39 @@ TEST_P(AudioAndroidInputTest, CreateAndCloseInputStream) {
// to communication mode, and calls RegisterHeadsetReceiver, the first time
// it is called?
TEST_F(AudioAndroidOutputTest, CreateAndCloseOutputStream) {
- AudioParameters params = GetDefaultOutputStreamParameters();
- AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
- params, std::string(), std::string());
- EXPECT_TRUE(aos);
- aos->Close();
+ GetDefaultOutputStreamParametersOnAudioThread();
+ MakeAudioOutputStreamOnAudioThread(audio_output_parameters());
+ RunOnAudioThread(
+ base::Bind(&AudioOutputStream::Close,
+ base::Unretained(audio_output_stream_)));
}
// Ensure that a default input stream can be opened and closed.
TEST_P(AudioAndroidInputTest, OpenAndCloseInputStream) {
AudioParameters params = GetInputStreamParameters();
- AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
- params, AudioManagerBase::kDefaultDeviceId);
- EXPECT_TRUE(ais);
- EXPECT_TRUE(ais->Open());
- ais->Close();
+ MakeAudioInputStreamOnAudioThread(params);
+ OpenAndCloseAudioInputStreamOnAudioThread();
}
// Ensure that a default output stream can be opened and closed.
TEST_F(AudioAndroidOutputTest, OpenAndCloseOutputStream) {
- AudioParameters params = GetDefaultOutputStreamParameters();
- AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
- params, std::string(), std::string());
- EXPECT_TRUE(aos);
- EXPECT_TRUE(aos->Open());
- aos->Close();
+ GetDefaultOutputStreamParametersOnAudioThread();
+ MakeAudioOutputStreamOnAudioThread(audio_output_parameters());
+ OpenAndCloseAudioOutputStreamOnAudioThread();
}
// Start input streaming using default input parameters and ensure that the
// callback sequence is sane.
-TEST_P(AudioAndroidInputTest, StartInputStreamCallbacks) {
- AudioParameters params = GetInputStreamParameters();
- StartInputStreamCallbacks(params);
+TEST_P(AudioAndroidInputTest, DISABLED_StartInputStreamCallbacks) {
+ AudioParameters native_params = GetInputStreamParameters();
+ StartInputStreamCallbacks(native_params);
}
// Start input streaming using non default input parameters and ensure that the
// callback sequence is sane. The only change we make in this test is to select
// a 10ms buffer size instead of the default size.
-// TODO(henrika): possibly add support for more variations.
-TEST_P(AudioAndroidInputTest, StartInputStreamCallbacksNonDefaultParameters) {
+TEST_P(AudioAndroidInputTest,
+ DISABLED_StartInputStreamCallbacksNonDefaultParameters) {
AudioParameters native_params = GetInputStreamParameters();
AudioParameters params(native_params.format(),
native_params.channel_layout(),
@@ -634,8 +816,8 @@ TEST_P(AudioAndroidInputTest, StartInputStreamCallbacksNonDefaultParameters) {
// Start output streaming using default output parameters and ensure that the
// callback sequence is sane.
TEST_F(AudioAndroidOutputTest, StartOutputStreamCallbacks) {
- AudioParameters params = GetDefaultOutputStreamParameters();
- StartOutputStreamCallbacks(params);
+ GetDefaultOutputStreamParametersOnAudioThread();
+ StartOutputStreamCallbacks(audio_output_parameters());
}
// Start output streaming using non default output parameters and ensure that
@@ -643,13 +825,13 @@ TEST_F(AudioAndroidOutputTest, StartOutputStreamCallbacks) {
// select a 10ms buffer size instead of the default size and to open up the
// device in mono.
// TODO(henrika): possibly add support for more variations.
-TEST_F(AudioAndroidOutputTest, StartOutputStreamCallbacksNonDefaultParameters) {
- AudioParameters native_params = GetDefaultOutputStreamParameters();
- AudioParameters params(native_params.format(),
+TEST_F(AudioAndroidOutputTest, DISABLED_StartOutputStreamCallbacksNonDefaultParameters) {
+ GetDefaultOutputStreamParametersOnAudioThread();
+ AudioParameters params(audio_output_parameters().format(),
CHANNEL_LAYOUT_MONO,
- native_params.sample_rate(),
- native_params.bits_per_sample(),
- native_params.sample_rate() / 100);
+ audio_output_parameters().sample_rate(),
+ audio_output_parameters().bits_per_sample(),
+ audio_output_parameters().sample_rate() / 100);
StartOutputStreamCallbacks(params);
}
@@ -658,13 +840,12 @@ TEST_F(AudioAndroidOutputTest, StartOutputStreamCallbacksNonDefaultParameters) {
// NOTE: this test requires user interaction and is not designed to run as an
// automatized test on bots.
TEST_F(AudioAndroidOutputTest, DISABLED_RunOutputStreamWithFileAsSource) {
- AudioParameters params = GetDefaultOutputStreamParameters();
- VLOG(1) << params;
- AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
- params, std::string(), std::string());
- EXPECT_TRUE(aos);
+ GetDefaultOutputStreamParametersOnAudioThread();
+ VLOG(1) << audio_output_parameters();
+ MakeAudioOutputStreamOnAudioThread(audio_output_parameters());
std::string file_name;
+ const AudioParameters params = audio_output_parameters();
if (params.sample_rate() == 48000 && params.channels() == 2) {
file_name = kSpeechFile_16b_s_48k;
} else if (params.sample_rate() == 48000 && params.channels() == 1) {
@@ -681,13 +862,10 @@ TEST_F(AudioAndroidOutputTest, DISABLED_RunOutputStreamWithFileAsSource) {
base::WaitableEvent event(false, false);
FileAudioSource source(&event, file_name);
- EXPECT_TRUE(aos->Open());
- aos->SetVolume(1.0);
- aos->Start(&source);
+ OpenAndStartAudioOutputStreamOnAudioThread(&source);
VLOG(0) << ">> Verify that the file is played out correctly...";
EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout()));
- aos->Stop();
- aos->Close();
+ StopAndCloseAudioOutputStreamOnAudioThread();
}
// Start input streaming and run it for ten seconds while recording to a
@@ -697,9 +875,7 @@ TEST_F(AudioAndroidOutputTest, DISABLED_RunOutputStreamWithFileAsSource) {
TEST_P(AudioAndroidInputTest, DISABLED_RunSimplexInputStreamWithFileAsSink) {
AudioParameters params = GetInputStreamParameters();
VLOG(1) << params;
- AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
- params, AudioManagerBase::kDefaultDeviceId);
- EXPECT_TRUE(ais);
+ MakeAudioInputStreamOnAudioThread(params);
std::string file_name = base::StringPrintf("out_simplex_%d_%d_%d.pcm",
params.sample_rate(),
@@ -709,12 +885,10 @@ TEST_P(AudioAndroidInputTest, DISABLED_RunSimplexInputStreamWithFileAsSink) {
base::WaitableEvent event(false, false);
FileAudioSink sink(&event, params, file_name);
- EXPECT_TRUE(ais->Open());
- ais->Start(&sink);
+ OpenAndStartAudioInputStreamOnAudioThread(&sink);
VLOG(0) << ">> Speak into the microphone to record audio...";
EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout()));
- ais->Stop();
- ais->Close();
+ StopAndCloseAudioInputStreamOnAudioThread();
}
// Same test as RunSimplexInputStreamWithFileAsSink but this time output
@@ -723,15 +897,12 @@ TEST_P(AudioAndroidInputTest, DISABLED_RunSimplexInputStreamWithFileAsSink) {
// automatized test on bots.
TEST_P(AudioAndroidInputTest, DISABLED_RunDuplexInputStreamWithFileAsSink) {
AudioParameters in_params = GetInputStreamParameters();
- AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
- in_params, AudioManagerBase::kDefaultDeviceId);
- EXPECT_TRUE(ais);
+ VLOG(1) << in_params;
+ MakeAudioInputStreamOnAudioThread(in_params);
- AudioParameters out_params =
- audio_manager()->GetDefaultOutputStreamParameters();
- AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
- out_params, std::string(), std::string());
- EXPECT_TRUE(aos);
+ GetDefaultOutputStreamParametersOnAudioThread();
+ VLOG(1) << audio_output_parameters();
+ MakeAudioOutputStreamOnAudioThread(audio_output_parameters());
std::string file_name = base::StringPrintf("out_duplex_%d_%d_%d.pcm",
in_params.sample_rate(),
@@ -740,23 +911,18 @@ TEST_P(AudioAndroidInputTest, DISABLED_RunDuplexInputStreamWithFileAsSink) {
base::WaitableEvent event(false, false);
FileAudioSink sink(&event, in_params, file_name);
- MockAudioOutputCallback source;
+ MockAudioSourceCallback source;
- EXPECT_CALL(source, OnMoreData(NotNull(), _)).WillRepeatedly(
- Invoke(&source, &MockAudioOutputCallback::RealOnMoreData));
- EXPECT_CALL(source, OnError(aos)).Times(0);
- EXPECT_CALL(source, OnMoreIOData(_, _, _)).Times(0);
+ EXPECT_CALL(source, OnMoreData(NotNull(), _))
+ .WillRepeatedly(Invoke(RealOnMoreData));
+ EXPECT_CALL(source, OnError(audio_output_stream_)).Times(0);
- EXPECT_TRUE(ais->Open());
- EXPECT_TRUE(aos->Open());
- ais->Start(&sink);
- aos->Start(&source);
+ OpenAndStartAudioInputStreamOnAudioThread(&sink);
+ OpenAndStartAudioOutputStreamOnAudioThread(&source);
VLOG(0) << ">> Speak into the microphone to record audio";
EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout()));
- aos->Stop();
- ais->Stop();
- aos->Close();
- ais->Close();
+ StopAndCloseAudioOutputStreamOnAudioThread();
+ StopAndCloseAudioInputStreamOnAudioThread();
}
// Start audio in both directions while feeding captured data into a FIFO so
@@ -776,18 +942,17 @@ TEST_P(AudioAndroidInputTest,
// audio on Android.
AudioParameters io_params(default_input_params.format(),
default_input_params.channel_layout(),
+ ChannelLayoutToChannelCount(
+ default_input_params.channel_layout()),
default_input_params.sample_rate(),
default_input_params.bits_per_sample(),
- default_input_params.sample_rate() / 100);
+ default_input_params.sample_rate() / 100,
+ default_input_params.effects());
VLOG(1) << io_params;
// Create input and output streams using the common audio parameters.
- AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
- io_params, AudioManagerBase::kDefaultDeviceId);
- EXPECT_TRUE(ais);
- AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
- io_params, std::string(), std::string());
- EXPECT_TRUE(aos);
+ MakeAudioInputStreamOnAudioThread(io_params);
+ MakeAudioOutputStreamOnAudioThread(io_params);
FullDuplexAudioSinkSource full_duplex(io_params);
@@ -795,20 +960,16 @@ TEST_P(AudioAndroidInputTest,
// delay we should expect from the FIFO. If real-time delay measurements are
// performed, the result should be reduced by this extra delay since it is
// something that has been added by the test.
- EXPECT_TRUE(ais->Open());
- EXPECT_TRUE(aos->Open());
- ais->Start(&full_duplex);
- aos->Start(&full_duplex);
+ OpenAndStartAudioInputStreamOnAudioThread(&full_duplex);
+ OpenAndStartAudioOutputStreamOnAudioThread(&full_duplex);
VLOG(1) << "HINT: an estimate of the extra FIFO delay will be updated "
<< "once per second during this test.";
VLOG(0) << ">> Speak into the mic and listen to the audio in loopback...";
fflush(stdout);
base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(20));
printf("\n");
- aos->Stop();
- ais->Stop();
- aos->Close();
- ais->Close();
+ StopAndCloseAudioOutputStreamOnAudioThread();
+ StopAndCloseAudioInputStreamOnAudioThread();
}
INSTANTIATE_TEST_CASE_P(AudioAndroidInputTest, AudioAndroidInputTest,
diff --git a/chromium/media/audio/android/audio_manager_android.cc b/chromium/media/audio/android/audio_manager_android.cc
index 3464d89a30f..48f203ab74e 100644
--- a/chromium/media/audio/android/audio_manager_android.cc
+++ b/chromium/media/audio/android/audio_manager_android.cc
@@ -9,6 +9,7 @@
#include "base/android/jni_string.h"
#include "base/android/scoped_java_ref.h"
#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
#include "base/strings/string_number_conversions.h"
#include "jni/AudioManagerAndroid_jni.h"
#include "media/audio/android/audio_record_input.h"
@@ -37,9 +38,6 @@ static void AddDefaultDevice(AudioDeviceNames* device_names) {
// Maximum number of output streams that can be open simultaneously.
static const int kMaxOutputStreams = 10;
-static const int kAudioModeNormal = 0x00000000;
-static const int kAudioModeInCommunication = 0x00000003;
-
static const int kDefaultInputBufferSize = 1024;
static const int kDefaultOutputBufferSize = 2048;
@@ -48,19 +46,26 @@ AudioManager* CreateAudioManager(AudioLogFactory* audio_log_factory) {
}
AudioManagerAndroid::AudioManagerAndroid(AudioLogFactory* audio_log_factory)
- : AudioManagerBase(audio_log_factory) {
+ : AudioManagerBase(audio_log_factory),
+ communication_mode_is_on_(false) {
SetMaxOutputStreamsAllowed(kMaxOutputStreams);
- j_audio_manager_.Reset(
- Java_AudioManagerAndroid_createAudioManagerAndroid(
- base::android::AttachCurrentThread(),
- base::android::GetApplicationContext(),
- reinterpret_cast<intptr_t>(this)));
- Init();
+ // WARNING: This is executed on the UI loop, do not add any code here which
+ // loads libraries or attempts to call out into the OS. Instead add such code
+ // to the InitializeOnAudioThread() method below.
+
+ // Task must be posted last to avoid races from handing out "this" to the
+ // audio thread.
+ GetTaskRunner()->PostTask(FROM_HERE, base::Bind(
+ &AudioManagerAndroid::InitializeOnAudioThread,
+ base::Unretained(this)));
}
AudioManagerAndroid::~AudioManagerAndroid() {
- Close();
+ // It's safe to post a task here since Shutdown() will wait for all tasks to
+ // complete before returning.
+ GetTaskRunner()->PostTask(FROM_HERE, base::Bind(
+ &AudioManagerAndroid::ShutdownOnAudioThread, base::Unretained(this)));
Shutdown();
}
@@ -74,13 +79,22 @@ bool AudioManagerAndroid::HasAudioInputDevices() {
void AudioManagerAndroid::GetAudioInputDeviceNames(
AudioDeviceNames* device_names) {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+
// Always add default device parameters as first element.
+ DCHECK(device_names->empty());
AddDefaultDevice(device_names);
+ // Get list of available audio devices.
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jobjectArray> j_device_array =
Java_AudioManagerAndroid_getAudioInputDeviceNames(
env, j_audio_manager_.obj());
+ if (j_device_array.is_null()) {
+ // Most probable reason for a NULL result here is that the process lacks
+ // MODIFY_AUDIO_SETTINGS or RECORD_AUDIO permissions.
+ return;
+ }
jsize len = env->GetArrayLength(j_device_array.obj());
AudioDeviceName device;
for (jsize i = 0; i < len; ++i) {
@@ -104,76 +118,96 @@ void AudioManagerAndroid::GetAudioOutputDeviceNames(
AudioParameters AudioManagerAndroid::GetInputStreamParameters(
const std::string& device_id) {
- JNIEnv* env = AttachCurrentThread();
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+
// Use mono as preferred number of input channels on Android to save
// resources. Using mono also avoids a driver issue seen on Samsung
// Galaxy S3 and S4 devices. See http://crbug.com/256851 for details.
+ JNIEnv* env = AttachCurrentThread();
ChannelLayout channel_layout = CHANNEL_LAYOUT_MONO;
int buffer_size = Java_AudioManagerAndroid_getMinInputFrameSize(
env, GetNativeOutputSampleRate(),
ChannelLayoutToChannelCount(channel_layout));
+ buffer_size = buffer_size <= 0 ? kDefaultInputBufferSize : buffer_size;
int effects = AudioParameters::NO_EFFECTS;
effects |= Java_AudioManagerAndroid_shouldUseAcousticEchoCanceler(env) ?
AudioParameters::ECHO_CANCELLER : AudioParameters::NO_EFFECTS;
+
+ int user_buffer_size = GetUserBufferSize();
+ if (user_buffer_size)
+ buffer_size = user_buffer_size;
+
AudioParameters params(
AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, 0,
- GetNativeOutputSampleRate(), 16,
- buffer_size <= 0 ? kDefaultInputBufferSize : buffer_size, effects);
+ GetNativeOutputSampleRate(), 16, buffer_size, effects);
return params;
}
AudioOutputStream* AudioManagerAndroid::MakeAudioOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
+ const std::string& device_id) {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
AudioOutputStream* stream =
- AudioManagerBase::MakeAudioOutputStream(params, std::string(),
- std::string());
- if (stream && output_stream_count() == 1) {
- SetAudioMode(kAudioModeInCommunication);
- }
-
- {
- base::AutoLock lock(streams_lock_);
- streams_.insert(static_cast<OpenSLESOutputStream*>(stream));
- }
-
+ AudioManagerBase::MakeAudioOutputStream(params, std::string());
+ streams_.insert(static_cast<OpenSLESOutputStream*>(stream));
return stream;
}
AudioInputStream* AudioManagerAndroid::MakeAudioInputStream(
const AudioParameters& params, const std::string& device_id) {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+ bool has_no_input_streams = HasNoAudioInputStreams();
AudioInputStream* stream =
AudioManagerBase::MakeAudioInputStream(params, device_id);
+
+ // The audio manager for Android creates streams intended for real-time
+ // VoIP sessions and therefore sets the audio mode to MODE_IN_COMMUNICATION.
+ // If a Bluetooth headset is used, the audio stream will use the SCO
+ // channel and therefore have a limited bandwidth (8kHz).
+ if (stream && has_no_input_streams) {
+ communication_mode_is_on_ = true;
+ SetCommunicationAudioModeOn(true);
+ }
return stream;
}
void AudioManagerAndroid::ReleaseOutputStream(AudioOutputStream* stream) {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
AudioManagerBase::ReleaseOutputStream(stream);
- if (!output_stream_count()) {
- SetAudioMode(kAudioModeNormal);
- }
- base::AutoLock lock(streams_lock_);
streams_.erase(static_cast<OpenSLESOutputStream*>(stream));
}
void AudioManagerAndroid::ReleaseInputStream(AudioInputStream* stream) {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+ DCHECK(!j_audio_manager_.is_null());
AudioManagerBase::ReleaseInputStream(stream);
+
+ // Restore the audio mode which was used before the first communication-
+ // mode stream was created.
+ if (HasNoAudioInputStreams()) {
+ communication_mode_is_on_ = false;
+ SetCommunicationAudioModeOn(false);
+ }
}
AudioOutputStream* AudioManagerAndroid::MakeLinearOutputStream(
const AudioParameters& params) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
- return new OpenSLESOutputStream(this, params);
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+ return new OpenSLESOutputStream(this, params, SL_ANDROID_STREAM_MEDIA);
}
AudioOutputStream* AudioManagerAndroid::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
+ const std::string& device_id) {
DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
- return new OpenSLESOutputStream(this, params);
+
+ // Set stream type which matches the current system-wide audio mode used by
+ // the Android audio manager.
+ const SLint32 stream_type = communication_mode_is_on_ ?
+ SL_ANDROID_STREAM_VOICE : SL_ANDROID_STREAM_MEDIA;
+ return new OpenSLESOutputStream(this, params, stream_type);
}
AudioInputStream* AudioManagerAndroid::MakeLinearInputStream(
@@ -187,13 +221,18 @@ AudioInputStream* AudioManagerAndroid::MakeLinearInputStream(
AudioInputStream* AudioManagerAndroid::MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
DLOG_IF(ERROR, device_id.empty()) << "Invalid device ID!";
- // Utilize the device ID to select the correct input device.
+
+ // Use the device ID to select the correct input device.
// Note that the input device is always associated with a certain output
// device, i.e., this selection does also switch the output device.
// All input and output streams will be affected by the device selection.
- SetAudioDevice(device_id);
+ if (!SetAudioDevice(device_id)) {
+ LOG(ERROR) << "Unable to select audio device!";
+ return NULL;
+ }
if (params.effects() != AudioParameters::NO_EFFECTS) {
// Platform effects can only be enabled through the AudioRecord path.
@@ -211,22 +250,25 @@ AudioInputStream* AudioManagerAndroid::MakeLowLatencyInputStream(
return new OpenSLESInputStream(this, params);
}
-int AudioManagerAndroid::GetOptimalOutputFrameSize(int sample_rate,
- int channels) {
- if (IsAudioLowLatencySupported()) {
- return GetAudioLowLatencyOutputFrameSize();
- } else {
- return std::max(kDefaultOutputBufferSize,
- Java_AudioManagerAndroid_getMinOutputFrameSize(
- base::android::AttachCurrentThread(),
- sample_rate, channels));
- }
+// static
+bool AudioManagerAndroid::RegisterAudioManager(JNIEnv* env) {
+ return RegisterNativesImpl(env);
+}
+
+void AudioManagerAndroid::SetMute(JNIEnv* env, jobject obj, jboolean muted) {
+ GetTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &AudioManagerAndroid::DoSetMuteOnAudioThread,
+ base::Unretained(this),
+ muted));
}
AudioParameters AudioManagerAndroid::GetPreferredOutputStreamParameters(
const std::string& output_device_id,
const AudioParameters& input_params) {
// TODO(tommi): Support |output_device_id|.
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
DLOG_IF(ERROR, !output_device_id.empty()) << "Not implemented!";
ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
int sample_rate = GetNativeOutputSampleRate();
@@ -252,57 +294,55 @@ AudioParameters AudioManagerAndroid::GetPreferredOutputStreamParameters(
sample_rate, bits_per_sample, buffer_size, AudioParameters::NO_EFFECTS);
}
-// static
-bool AudioManagerAndroid::RegisterAudioManager(JNIEnv* env) {
- return RegisterNativesImpl(env);
+bool AudioManagerAndroid::HasNoAudioInputStreams() {
+ return input_stream_count() == 0;
}
-void AudioManagerAndroid::Init() {
+void AudioManagerAndroid::InitializeOnAudioThread() {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+
+ // Create the Android audio manager on the audio thread.
+ DVLOG(2) << "Creating Java part of the audio manager";
+ j_audio_manager_.Reset(
+ Java_AudioManagerAndroid_createAudioManagerAndroid(
+ base::android::AttachCurrentThread(),
+ base::android::GetApplicationContext(),
+ reinterpret_cast<intptr_t>(this)));
+
+ // Prepare the list of audio devices and register receivers for device
+ // notifications.
Java_AudioManagerAndroid_init(
base::android::AttachCurrentThread(),
j_audio_manager_.obj());
}
-void AudioManagerAndroid::Close() {
+void AudioManagerAndroid::ShutdownOnAudioThread() {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+ DVLOG(2) << "Destroying Java part of the audio manager";
Java_AudioManagerAndroid_close(
base::android::AttachCurrentThread(),
j_audio_manager_.obj());
+ j_audio_manager_.Reset();
}
-void AudioManagerAndroid::SetMute(JNIEnv* env, jobject obj, jboolean muted) {
- GetMessageLoop()->PostTask(
- FROM_HERE,
- base::Bind(
- &AudioManagerAndroid::DoSetMuteOnAudioThread,
- base::Unretained(this),
- muted));
-}
-
-void AudioManagerAndroid::DoSetMuteOnAudioThread(bool muted) {
- base::AutoLock lock(streams_lock_);
- for (OutputStreams::iterator it = streams_.begin();
- it != streams_.end(); ++it) {
- (*it)->SetMute(muted);
- }
-}
-
-void AudioManagerAndroid::SetAudioMode(int mode) {
- Java_AudioManagerAndroid_setMode(
+void AudioManagerAndroid::SetCommunicationAudioModeOn(bool on) {
+ Java_AudioManagerAndroid_setCommunicationAudioModeOn(
base::android::AttachCurrentThread(),
- j_audio_manager_.obj(), mode);
+ j_audio_manager_.obj(), on);
}
-void AudioManagerAndroid::SetAudioDevice(const std::string& device_id) {
- JNIEnv* env = AttachCurrentThread();
+bool AudioManagerAndroid::SetAudioDevice(const std::string& device_id) {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
// Send the unique device ID to the Java audio manager and make the
// device switch. Provide an empty string to the Java audio manager
// if the default device is selected.
+ JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jstring> j_device_id = ConvertUTF8ToJavaString(
env,
device_id == AudioManagerBase::kDefaultDeviceId ?
std::string() : device_id);
- Java_AudioManagerAndroid_setDevice(
+ return Java_AudioManagerAndroid_setDevice(
env, j_audio_manager_.obj(), j_device_id.obj());
}
@@ -324,4 +364,23 @@ int AudioManagerAndroid::GetAudioLowLatencyOutputFrameSize() {
j_audio_manager_.obj());
}
+int AudioManagerAndroid::GetOptimalOutputFrameSize(int sample_rate,
+ int channels) {
+ if (IsAudioLowLatencySupported())
+ return GetAudioLowLatencyOutputFrameSize();
+
+ return std::max(kDefaultOutputBufferSize,
+ Java_AudioManagerAndroid_getMinOutputFrameSize(
+ base::android::AttachCurrentThread(),
+ sample_rate, channels));
+}
+
+void AudioManagerAndroid::DoSetMuteOnAudioThread(bool muted) {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+ for (OutputStreams::iterator it = streams_.begin();
+ it != streams_.end(); ++it) {
+ (*it)->SetMute(muted);
+ }
+}
+
} // namespace media
diff --git a/chromium/media/audio/android/audio_manager_android.h b/chromium/media/audio/android/audio_manager_android.h
index 2900c0f8e29..ee5ad28e36e 100644
--- a/chromium/media/audio/android/audio_manager_android.h
+++ b/chromium/media/audio/android/audio_manager_android.h
@@ -10,6 +10,7 @@
#include "base/android/jni_android.h"
#include "base/gtest_prod_util.h"
#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
#include "media/audio/audio_manager_base.h"
namespace media {
@@ -33,8 +34,7 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
virtual AudioOutputStream* MakeAudioOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeAudioInputStream(
const AudioParameters& params,
const std::string& device_id) OVERRIDE;
@@ -46,8 +46,7 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params,
const std::string& device_id) OVERRIDE;
@@ -67,10 +66,12 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
const AudioParameters& input_params) OVERRIDE;
private:
- void Init();
- void Close();
- void SetAudioMode(int mode);
- void SetAudioDevice(const std::string& device_id);
+ void InitializeOnAudioThread();
+ void ShutdownOnAudioThread();
+
+ bool HasNoAudioInputStreams();
+ void SetCommunicationAudioModeOn(bool on);
+ bool SetAudioDevice(const std::string& device_id);
int GetNativeOutputSampleRate();
bool IsAudioLowLatencySupported();
int GetAudioLowLatencyOutputFrameSize();
@@ -78,18 +79,15 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
void DoSetMuteOnAudioThread(bool muted);
- // Allow the AudioAndroidTest to access private methods.
- FRIEND_TEST_ALL_PREFIXES(AudioAndroidOutputTest, IsAudioLowLatencySupported);
-
// Java AudioManager instance.
base::android::ScopedJavaGlobalRef<jobject> j_audio_manager_;
typedef std::set<OpenSLESOutputStream*> OutputStreams;
OutputStreams streams_;
- // TODO(wjia): remove this lock once unit test modules are fixed to call
- // AudioManager::MakeAudioOutputStream on the audio thread. For now, this
- // lock is used to guard access to |streams_|.
- base::Lock streams_lock_;
+
+ // Enabled when first input stream is created and set to false when last
+ // input stream is destroyed. Also affects the stream type of output streams.
+ bool communication_mode_is_on_;
DISALLOW_COPY_AND_ASSIGN(AudioManagerAndroid);
};
diff --git a/chromium/media/audio/android/audio_record_input.cc b/chromium/media/audio/android/audio_record_input.cc
index 15a0c3d3b7b..3f19588b4a6 100644
--- a/chromium/media/audio/android/audio_record_input.cc
+++ b/chromium/media/audio/android/audio_record_input.cc
@@ -7,14 +7,18 @@
#include "base/logging.h"
#include "jni/AudioRecordInput_jni.h"
#include "media/audio/android/audio_manager_android.h"
+#include "media/base/audio_bus.h"
namespace media {
AudioRecordInputStream::AudioRecordInputStream(
- AudioManagerAndroid* audio_manager, const AudioParameters& params)
+ AudioManagerAndroid* audio_manager,
+ const AudioParameters& params)
: audio_manager_(audio_manager),
callback_(NULL),
- direct_buffer_address_(NULL) {
+ direct_buffer_address_(NULL),
+ audio_bus_(media::AudioBus::Create(params)),
+ bytes_per_sample_(params.bits_per_sample() / 8) {
DVLOG(2) << __PRETTY_FUNCTION__;
DCHECK(params.IsValid());
j_audio_record_.Reset(
@@ -48,10 +52,13 @@ bool AudioRecordInputStream::RegisterAudioRecordInput(JNIEnv* env) {
void AudioRecordInputStream::OnData(JNIEnv* env, jobject obj, jint size,
jint hardware_delay_bytes) {
DCHECK(direct_buffer_address_);
+ DCHECK_EQ(size,
+ audio_bus_->frames() * audio_bus_->channels() * bytes_per_sample_);
// Passing zero as the volume parameter indicates there is no access to a
// hardware volume slider.
- callback_->OnData(this, direct_buffer_address_, size, hardware_delay_bytes,
- 0.0);
+ audio_bus_->FromInterleaved(
+ direct_buffer_address_, audio_bus_->frames(), bytes_per_sample_);
+ callback_->OnData(this, audio_bus_.get(), hardware_delay_bytes, 0.0);
}
bool AudioRecordInputStream::Open() {
@@ -90,8 +97,7 @@ void AudioRecordInputStream::Stop() {
base::android::AttachCurrentThread(), j_audio_record_.obj());
// The Java thread must have been stopped at this point, so we are free to
- // set |callback_|.
- callback_->OnClose(this);
+ // clear |callback_|.
callback_ = NULL;
}
diff --git a/chromium/media/audio/android/audio_record_input.h b/chromium/media/audio/android/audio_record_input.h
index 0a2578b1079..c240038360b 100644
--- a/chromium/media/audio/android/audio_record_input.h
+++ b/chromium/media/audio/android/audio_record_input.h
@@ -12,6 +12,7 @@
namespace media {
+class AudioBus;
class AudioManagerAndroid;
// Implements PCM audio input support for Android using the Java AudioRecord
@@ -64,6 +65,9 @@ class MEDIA_EXPORT AudioRecordInputStream : public AudioInputStream {
// Owned by j_audio_record_.
uint8* direct_buffer_address_;
+ scoped_ptr<media::AudioBus> audio_bus_;
+ int bytes_per_sample_;
+
DISALLOW_COPY_AND_ASSIGN(AudioRecordInputStream);
};
diff --git a/chromium/media/audio/android/opensles_input.cc b/chromium/media/audio/android/opensles_input.cc
index e51ba4f3a97..1ef3aaca5ef 100644
--- a/chromium/media/audio/android/opensles_input.cc
+++ b/chromium/media/audio/android/opensles_input.cc
@@ -7,6 +7,7 @@
#include "base/debug/trace_event.h"
#include "base/logging.h"
#include "media/audio/android/audio_manager_android.h"
+#include "media/base/audio_bus.h"
#define LOG_ON_FAILURE_AND_RETURN(op, ...) \
do { \
@@ -27,7 +28,8 @@ OpenSLESInputStream::OpenSLESInputStream(AudioManagerAndroid* audio_manager,
simple_buffer_queue_(NULL),
active_buffer_index_(0),
buffer_size_bytes_(0),
- started_(false) {
+ started_(false),
+ audio_bus_(media::AudioBus::Create(params)) {
DVLOG(2) << __PRETTY_FUNCTION__;
format_.formatType = SL_DATAFORMAT_PCM;
format_.numChannels = static_cast<SLuint32>(params.channels());
@@ -132,6 +134,7 @@ void OpenSLESInputStream::Stop() {
(*simple_buffer_queue_)->Clear(simple_buffer_queue_));
started_ = false;
+ callback_ = NULL;
}
void OpenSLESInputStream::Close() {
@@ -141,15 +144,9 @@ void OpenSLESInputStream::Close() {
// Stop the stream if it is still recording.
Stop();
{
+ // TODO(henrika): Do we need to hold the lock here?
base::AutoLock lock(lock_);
- // TODO(henrika): we use |callback_| in Close() but |callback_| is set
- // in Start(). Hence, it should be cleared in Stop() and not used here.
- if (callback_) {
- callback_->OnClose(this);
- callback_ = NULL;
- }
-
// Destroy the buffer queue recorder object and invalidate all associated
// interfaces.
recorder_object_.Reset();
@@ -300,13 +297,14 @@ void OpenSLESInputStream::ReadBufferQueue() {
TRACE_EVENT0("audio", "OpenSLESOutputStream::ReadBufferQueue");
+ // Convert from interleaved format to deinterleaved audio bus format.
+ audio_bus_->FromInterleaved(audio_data_[active_buffer_index_],
+ audio_bus_->frames(),
+ format_.bitsPerSample / 8);
+
// TODO(henrika): Investigate if it is possible to get an accurate
// delay estimation.
- callback_->OnData(this,
- audio_data_[active_buffer_index_],
- buffer_size_bytes_,
- buffer_size_bytes_,
- 0.0);
+ callback_->OnData(this, audio_bus_.get(), buffer_size_bytes_, 0.0);
// Done with this buffer. Send it to device for recording.
SLresult err =
diff --git a/chromium/media/audio/android/opensles_input.h b/chromium/media/audio/android/opensles_input.h
index cb07d51f78b..288ab43425e 100644
--- a/chromium/media/audio/android/opensles_input.h
+++ b/chromium/media/audio/android/opensles_input.h
@@ -17,6 +17,7 @@
namespace media {
+class AudioBus;
class AudioManagerAndroid;
// Implements PCM audio input support for Android using the OpenSLES API.
@@ -94,6 +95,8 @@ class OpenSLESInputStream : public AudioInputStream {
bool started_;
+ scoped_ptr<media::AudioBus> audio_bus_;
+
DISALLOW_COPY_AND_ASSIGN(OpenSLESInputStream);
};
diff --git a/chromium/media/audio/android/opensles_output.cc b/chromium/media/audio/android/opensles_output.cc
index b71680f0a7e..41c03c7867a 100644
--- a/chromium/media/audio/android/opensles_output.cc
+++ b/chromium/media/audio/android/opensles_output.cc
@@ -20,8 +20,10 @@
namespace media {
OpenSLESOutputStream::OpenSLESOutputStream(AudioManagerAndroid* manager,
- const AudioParameters& params)
+ const AudioParameters& params,
+ SLint32 stream_type)
: audio_manager_(manager),
+ stream_type_(stream_type),
callback_(NULL),
player_(NULL),
simple_buffer_queue_(NULL),
@@ -30,7 +32,8 @@ OpenSLESOutputStream::OpenSLESOutputStream(AudioManagerAndroid* manager,
started_(false),
muted_(false),
volume_(1.0) {
- DVLOG(2) << "OpenSLESOutputStream::OpenSLESOutputStream()";
+ DVLOG(2) << "OpenSLESOutputStream::OpenSLESOutputStream("
+ << "stream_type=" << stream_type << ")";
format_.formatType = SL_DATAFORMAT_PCM;
format_.numChannels = static_cast<SLuint32>(params.channels());
// Provides sampling rate in milliHertz to OpenSLES.
@@ -248,11 +251,11 @@ bool OpenSLESOutputStream::CreatePlayer() {
player_object_.Get(), SL_IID_ANDROIDCONFIGURATION, &player_config),
false);
- SLint32 stream_type = SL_ANDROID_STREAM_VOICE;
+ // Set configuration using the stream type provided at construction.
LOG_ON_FAILURE_AND_RETURN(
(*player_config)->SetConfiguration(player_config,
SL_ANDROID_KEY_STREAM_TYPE,
- &stream_type,
+ &stream_type_,
sizeof(SLint32)),
false);
diff --git a/chromium/media/audio/android/opensles_output.h b/chromium/media/audio/android/opensles_output.h
index 623b0193894..b0b678cea6e 100644
--- a/chromium/media/audio/android/opensles_output.h
+++ b/chromium/media/audio/android/opensles_output.h
@@ -28,7 +28,8 @@ class OpenSLESOutputStream : public AudioOutputStream {
static const int kMaxNumOfBuffersInQueue = 2;
OpenSLESOutputStream(AudioManagerAndroid* manager,
- const AudioParameters& params);
+ const AudioParameters& params,
+ SLint32 stream_type);
virtual ~OpenSLESOutputStream();
@@ -77,6 +78,10 @@ class OpenSLESOutputStream : public AudioOutputStream {
AudioManagerAndroid* audio_manager_;
+ // Audio playback stream type.
+ // See SLES/OpenSLES_Android.h for details.
+ SLint32 stream_type_;
+
AudioSourceCallback* callback_;
// Shared engine interfaces for the app.
diff --git a/chromium/media/audio/audio_input_controller.cc b/chromium/media/audio/audio_input_controller.cc
index ef94d1274d6..490c62b3c16 100644
--- a/chromium/media/audio/audio_input_controller.cc
+++ b/chromium/media/audio/audio_input_controller.cc
@@ -5,24 +5,46 @@
#include "media/audio/audio_input_controller.h"
#include "base/bind.h"
+#include "base/strings/stringprintf.h"
#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
#include "media/base/limits.h"
#include "media/base/scoped_histogram_timer.h"
#include "media/base/user_input_monitor.h"
+using base::TimeDelta;
+
namespace {
-const int kMaxInputChannels = 2;
+const int kMaxInputChannels = 3;
// TODO(henrika): remove usage of timers and add support for proper
// notification of when the input device is removed. This was originally added
// to resolve http://crbug.com/79936 for Windows platforms. This then caused
// breakage (very hard to repro bugs!) on other platforms: See
// http://crbug.com/226327 and http://crbug.com/230972.
+// See also that the timer has been disabled on Mac now due to
+// crbug.com/357501.
const int kTimerResetIntervalSeconds = 1;
// We have received reports that the timer can be too trigger happy on some
// Mac devices and the initial timer interval has therefore been increased
// from 1 second to 5 seconds.
const int kTimerInitialIntervalSeconds = 5;
+
+#if defined(AUDIO_POWER_MONITORING)
+// Time constant for AudioPowerMonitor.
+// The utilized smoothing factor (alpha) in the exponential filter is given
+// by 1-exp(-1/(fs*ts)), where fs is the sample rate in Hz and ts is the time
+// constant given by |kPowerMeasurementTimeConstantMilliseconds|.
+// Example: fs=44100, ts=10e-3 => alpha~0.022420
+// fs=44100, ts=20e-3 => alpha~0.165903
+// A large smoothing factor corresponds to a faster filter response to input
+// changes since y(n)=alpha*x(n)+(1-alpha)*y(n-1), where x(n) is the input
+// and y(n) is the output.
+const int kPowerMeasurementTimeConstantMilliseconds = 10;
+
+// Time in seconds between two successive measurements of audio power levels.
+const int kPowerMonitorLogIntervalSeconds = 5;
+#endif
}
namespace media {
@@ -33,20 +55,20 @@ AudioInputController::Factory* AudioInputController::factory_ = NULL;
AudioInputController::AudioInputController(EventHandler* handler,
SyncWriter* sync_writer,
UserInputMonitor* user_input_monitor)
- : creator_loop_(base::MessageLoopProxy::current()),
+ : creator_task_runner_(base::MessageLoopProxy::current()),
handler_(handler),
stream_(NULL),
data_is_active_(false),
- state_(kEmpty),
+ state_(CLOSED),
sync_writer_(sync_writer),
max_volume_(0.0),
user_input_monitor_(user_input_monitor),
prev_key_down_count_(0) {
- DCHECK(creator_loop_.get());
+ DCHECK(creator_task_runner_.get());
}
AudioInputController::~AudioInputController() {
- DCHECK(kClosed == state_ || kCreated == state_ || kEmpty == state_);
+ DCHECK_EQ(state_, CLOSED);
}
// static
@@ -68,11 +90,11 @@ scoped_refptr<AudioInputController> AudioInputController::Create(
scoped_refptr<AudioInputController> controller(
new AudioInputController(event_handler, NULL, user_input_monitor));
- controller->message_loop_ = audio_manager->GetMessageLoop();
+ controller->task_runner_ = audio_manager->GetTaskRunner();
// Create and open a new audio input stream from the existing
// audio-device thread.
- if (!controller->message_loop_->PostTask(FROM_HERE,
+ if (!controller->task_runner_->PostTask(FROM_HERE,
base::Bind(&AudioInputController::DoCreate, controller,
base::Unretained(audio_manager), params, device_id))) {
controller = NULL;
@@ -99,11 +121,11 @@ scoped_refptr<AudioInputController> AudioInputController::CreateLowLatency(
// the audio-manager thread.
scoped_refptr<AudioInputController> controller(
new AudioInputController(event_handler, sync_writer, user_input_monitor));
- controller->message_loop_ = audio_manager->GetMessageLoop();
+ controller->task_runner_ = audio_manager->GetTaskRunner();
// Create and open a new audio input stream from the existing
// audio-device thread. Use the provided audio-input device.
- if (!controller->message_loop_->PostTask(FROM_HERE,
+ if (!controller->task_runner_->PostTask(FROM_HERE,
base::Bind(&AudioInputController::DoCreate, controller,
base::Unretained(audio_manager), params, device_id))) {
controller = NULL;
@@ -114,7 +136,7 @@ scoped_refptr<AudioInputController> AudioInputController::CreateLowLatency(
// static
scoped_refptr<AudioInputController> AudioInputController::CreateForStream(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
EventHandler* event_handler,
AudioInputStream* stream,
SyncWriter* sync_writer,
@@ -126,14 +148,14 @@ scoped_refptr<AudioInputController> AudioInputController::CreateForStream(
// the audio-manager thread.
scoped_refptr<AudioInputController> controller(
new AudioInputController(event_handler, sync_writer, user_input_monitor));
- controller->message_loop_ = message_loop;
+ controller->task_runner_ = task_runner;
// TODO(miu): See TODO at top of file. Until that's resolved, we need to
// disable the error auto-detection here (since the audio mirroring
// implementation will reliably report error and close events). Note, of
// course, that we're assuming CreateForStream() has been called for the audio
// mirroring use case only.
- if (!controller->message_loop_->PostTask(
+ if (!controller->task_runner_->PostTask(
FROM_HERE,
base::Bind(&AudioInputController::DoCreateForStream, controller,
stream, false))) {
@@ -144,33 +166,45 @@ scoped_refptr<AudioInputController> AudioInputController::CreateForStream(
}
void AudioInputController::Record() {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&AudioInputController::DoRecord, this));
}
void AudioInputController::Close(const base::Closure& closed_task) {
DCHECK(!closed_task.is_null());
- DCHECK(creator_loop_->BelongsToCurrentThread());
+ DCHECK(creator_task_runner_->BelongsToCurrentThread());
- message_loop_->PostTaskAndReply(
+ task_runner_->PostTaskAndReply(
FROM_HERE, base::Bind(&AudioInputController::DoClose, this), closed_task);
}
void AudioInputController::SetVolume(double volume) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&AudioInputController::DoSetVolume, this, volume));
}
void AudioInputController::SetAutomaticGainControl(bool enabled) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&AudioInputController::DoSetAutomaticGainControl, this, enabled));
}
void AudioInputController::DoCreate(AudioManager* audio_manager,
const AudioParameters& params,
const std::string& device_id) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioInputController.CreateTime");
+
+#if defined(AUDIO_POWER_MONITORING)
+ // Create the audio (power) level meter given the provided audio parameters.
+ // An AudioBus is also needed to wrap the raw data buffer from the native
+ // layer to match AudioPowerMonitor::Scan().
+ // TODO(henrika): Remove use of extra AudioBus. See http://crbug.com/375155.
+ audio_level_.reset(new media::AudioPowerMonitor(
+ params.sample_rate(),
+ TimeDelta::FromMilliseconds(kPowerMeasurementTimeConstantMilliseconds)));
+ audio_params_ = params;
+#endif
+
// TODO(miu): See TODO at top of file. Until that's resolved, assume all
// platform audio input requires the |no_data_timer_| be used to auto-detect
// errors. In reality, probably only Windows needs to be treated as
@@ -181,38 +215,49 @@ void AudioInputController::DoCreate(AudioManager* audio_manager,
void AudioInputController::DoCreateForStream(
AudioInputStream* stream_to_control, bool enable_nodata_timer) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(!stream_);
stream_ = stream_to_control;
if (!stream_) {
- handler_->OnError(this);
+ if (handler_)
+ handler_->OnError(this, STREAM_CREATE_ERROR);
return;
}
if (stream_ && !stream_->Open()) {
stream_->Close();
stream_ = NULL;
- handler_->OnError(this);
+ if (handler_)
+ handler_->OnError(this, STREAM_OPEN_ERROR);
return;
}
DCHECK(!no_data_timer_.get());
+
+ // The timer is enabled for logging purposes. The NO_DATA_ERROR triggered
+ // from the timer must be ignored by the EventHandler.
+ // TODO(henrika): remove usage of timer when it has been verified on Canary
+ // that we are safe doing so. Goal is to get rid of |no_data_timer_| and
+ // everything that is tied to it. crbug.com/357569.
+ enable_nodata_timer = true;
+
if (enable_nodata_timer) {
- // Create the data timer which will call DoCheckForNoData(). The timer
+ // Create the data timer which will call FirstCheckForNoData(). The timer
// is started in DoRecord() and restarted in each DoCheckForNoData()
// callback.
no_data_timer_.reset(new base::Timer(
FROM_HERE, base::TimeDelta::FromSeconds(kTimerInitialIntervalSeconds),
- base::Bind(&AudioInputController::DoCheckForNoData,
+ base::Bind(&AudioInputController::FirstCheckForNoData,
base::Unretained(this)), false));
} else {
DVLOG(1) << "Disabled: timer check for no data.";
}
- state_ = kCreated;
- handler_->OnCreated(this);
+ state_ = CREATED;
+ if (handler_)
+ handler_->OnCreated(this);
if (user_input_monitor_) {
user_input_monitor_->EnableKeyPressMonitoring();
@@ -221,60 +266,62 @@ void AudioInputController::DoCreateForStream(
}
void AudioInputController::DoRecord() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioInputController.RecordTime");
- if (state_ != kCreated)
+ if (state_ != CREATED)
return;
{
base::AutoLock auto_lock(lock_);
- state_ = kRecording;
+ state_ = RECORDING;
}
if (no_data_timer_) {
// Start the data timer. Once |kTimerResetIntervalSeconds| have passed,
- // a callback to DoCheckForNoData() is made.
+ // a callback to FirstCheckForNoData() is made.
no_data_timer_->Reset();
}
stream_->Start(this);
- handler_->OnRecording(this);
+ if (handler_)
+ handler_->OnRecording(this);
}
void AudioInputController::DoClose() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioInputController.CloseTime");
+ if (state_ == CLOSED)
+ return;
+
// Delete the timer on the same thread that created it.
no_data_timer_.reset();
- if (state_ != kClosed) {
- DoStopCloseAndClearStream(NULL);
- SetDataIsActive(false);
+ DoStopCloseAndClearStream();
+ SetDataIsActive(false);
- if (LowLatencyMode()) {
- sync_writer_->Close();
- }
+ if (SharedMemoryAndSyncSocketMode())
+ sync_writer_->Close();
- state_ = kClosed;
+ if (user_input_monitor_)
+ user_input_monitor_->DisableKeyPressMonitoring();
- if (user_input_monitor_)
- user_input_monitor_->DisableKeyPressMonitoring();
- }
+ state_ = CLOSED;
}
void AudioInputController::DoReportError() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- handler_->OnError(this);
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ if (handler_)
+ handler_->OnError(this, STREAM_ERROR);
}
void AudioInputController::DoSetVolume(double volume) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_GE(volume, 0);
DCHECK_LE(volume, 1.0);
- if (state_ != kCreated && state_ != kRecording)
+ if (state_ != CREATED && state_ != RECORDING)
return;
// Only ask for the maximum volume at first call and use cached value
@@ -293,25 +340,32 @@ void AudioInputController::DoSetVolume(double volume) {
}
void AudioInputController::DoSetAutomaticGainControl(bool enabled) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK_NE(state_, kRecording);
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_NE(state_, RECORDING);
// Ensure that the AGC state only can be modified before streaming starts.
- if (state_ != kCreated || state_ == kRecording)
+ if (state_ != CREATED)
return;
stream_->SetAutomaticGainControl(enabled);
}
+void AudioInputController::FirstCheckForNoData() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ UMA_HISTOGRAM_BOOLEAN("Media.AudioInputControllerCaptureStartupSuccess",
+ GetDataIsActive());
+ DoCheckForNoData();
+}
+
void AudioInputController::DoCheckForNoData() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
if (!GetDataIsActive()) {
// The data-is-active marker will be false only if it has been more than
// one second since a data packet was recorded. This can happen if a
// capture device has been removed or disabled.
- handler_->OnError(this);
- return;
+ if (handler_)
+ handler_->OnError(this, NO_DATA_ERROR);
}
// Mark data as non-active. The flag will be re-enabled in OnData() each
@@ -328,13 +382,16 @@ void AudioInputController::DoCheckForNoData() {
}
void AudioInputController::OnData(AudioInputStream* stream,
- const uint8* data,
- uint32 size,
+ const AudioBus* source,
uint32 hardware_delay_bytes,
double volume) {
+ // Mark data as active to ensure that the periodic calls to
+ // DoCheckForNoData() does not report an error to the event handler.
+ SetDataIsActive(true);
+
{
base::AutoLock auto_lock(lock_);
- if (state_ != kRecording)
+ if (state_ != RECORDING)
return;
}
@@ -346,36 +403,92 @@ void AudioInputController::OnData(AudioInputStream* stream,
DVLOG_IF(6, key_pressed) << "Detected keypress.";
}
- // Mark data as active to ensure that the periodic calls to
- // DoCheckForNoData() does not report an error to the event handler.
- SetDataIsActive(true);
-
- // Use SyncSocket if we are in a low-latency mode.
- if (LowLatencyMode()) {
- sync_writer_->Write(data, size, volume, key_pressed);
+ // Use SharedMemory and SyncSocket if the client has created a SyncWriter.
+ // Used by all low-latency clients except WebSpeech.
+ if (SharedMemoryAndSyncSocketMode()) {
+ sync_writer_->Write(source, volume, key_pressed);
sync_writer_->UpdateRecordedBytes(hardware_delay_bytes);
+
+#if defined(AUDIO_POWER_MONITORING)
+ // Only do power-level measurements if an AudioPowerMonitor object has
+ // been created. Done in DoCreate() but not DoCreateForStream(), hence
+ // logging will mainly be done for WebRTC and WebSpeech clients.
+ if (!audio_level_)
+ return;
+
+ // Perform periodic audio (power) level measurements.
+ if ((base::TimeTicks::Now() - last_audio_level_log_time_).InSeconds() >
+ kPowerMonitorLogIntervalSeconds) {
+ // Wrap data into an AudioBus to match AudioPowerMonitor::Scan.
+ // TODO(henrika): remove this section when capture side uses AudioBus.
+ // See http://crbug.com/375155 for details.
+ audio_level_->Scan(*source, source->frames());
+
+ // Get current average power level and add it to the log.
+ // Possible range is given by [-inf, 0] dBFS.
+ std::pair<float, bool> result = audio_level_->ReadCurrentPowerAndClip();
+
+ // Use event handler on the audio thread to relay a message to the ARIH
+ // in content which does the actual logging on the IO thread.
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &AudioInputController::DoLogAudioLevel, this, result.first));
+
+ last_audio_level_log_time_ = base::TimeTicks::Now();
+
+ // Reset the average power level (since we don't log continuously).
+ audio_level_->Reset();
+ }
+#endif
return;
}
- handler_->OnData(this, data, size);
+ // TODO(henrika): Investigate if we can avoid the extra copy here.
+ // (see http://crbug.com/249316 for details). AFAIK, this scope is only
+ // active for WebSpeech clients.
+ scoped_ptr<AudioBus> audio_data =
+ AudioBus::Create(source->channels(), source->frames());
+ source->CopyTo(audio_data.get());
+
+ // Ownership of the audio buffer will be with the callback until it is run,
+ // when ownership is passed to the callback function.
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &AudioInputController::DoOnData, this, base::Passed(&audio_data)));
+}
+
+void AudioInputController::DoOnData(scoped_ptr<AudioBus> data) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ if (handler_)
+ handler_->OnData(this, data.get());
}
-void AudioInputController::OnClose(AudioInputStream* stream) {
- DVLOG(1) << "AudioInputController::OnClose()";
- // TODO(satish): Sometimes the device driver closes the input stream without
- // us asking for it (may be if the device was unplugged?). Check how to handle
- // such cases here.
+void AudioInputController::DoLogAudioLevel(float level_dbfs) {
+#if defined(AUDIO_POWER_MONITORING)
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ if (!handler_)
+ return;
+
+ std::string log_string = base::StringPrintf(
+ "AIC::OnData: average audio level=%.2f dBFS", level_dbfs);
+ static const float kSilenceThresholdDBFS = -72.24719896f;
+ if (level_dbfs < kSilenceThresholdDBFS)
+ log_string += " <=> no audio input!";
+
+ handler_->OnLog(this, log_string);
+#endif
}
void AudioInputController::OnError(AudioInputStream* stream) {
// Handle error on the audio-manager thread.
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&AudioInputController::DoReportError, this));
}
-void AudioInputController::DoStopCloseAndClearStream(
- base::WaitableEvent* done) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+void AudioInputController::DoStopCloseAndClearStream() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
// Allow calling unconditionally and bail if we don't have a stream to close.
if (stream_ != NULL) {
@@ -384,9 +497,8 @@ void AudioInputController::DoStopCloseAndClearStream(
stream_ = NULL;
}
- // Should be last in the method, do not touch "this" from here on.
- if (done != NULL)
- done->Signal();
+ // The event handler should not be touched after the stream has been closed.
+ handler_ = NULL;
}
void AudioInputController::SetDataIsActive(bool enabled) {
diff --git a/chromium/media/audio/audio_input_controller.h b/chromium/media/audio/audio_input_controller.h
index 6b40459ded6..f2771c7e9c2 100644
--- a/chromium/media/audio/audio_input_controller.h
+++ b/chromium/media/audio/audio_input_controller.h
@@ -16,6 +16,9 @@
#include "base/timer/timer.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager_base.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/audio_power_monitor.h"
+#include "media/base/audio_bus.h"
// An AudioInputController controls an AudioInputStream and records data
// from this input stream. The two main methods are Record() and Close() and
@@ -72,21 +75,52 @@
//
namespace media {
+// Only do power monitoring for non-mobile platforms to save resources.
+#if !defined(OS_ANDROID) && !defined(OS_IOS)
+#define AUDIO_POWER_MONITORING
+#endif
+
class UserInputMonitor;
class MEDIA_EXPORT AudioInputController
: public base::RefCountedThreadSafe<AudioInputController>,
public AudioInputStream::AudioInputCallback {
public:
+
+ // Error codes to make native loggin more clear. These error codes are added
+ // to generic error strings to provide a higher degree of details.
+ // Changing these values can lead to problems when matching native debug
+ // logs with the actual cause of error.
+ enum ErrorCode {
+ // An unspecified error occured.
+ UNKNOWN_ERROR = 0,
+
+ // Failed to create an audio input stream.
+ STREAM_CREATE_ERROR, // = 1
+
+ // Failed to open an audio input stream.
+ STREAM_OPEN_ERROR, // = 2
+
+ // Native input stream reports an error. Exact reason differs between
+ // platforms.
+ STREAM_ERROR, // = 3
+
+ // This can happen if a capture device has been removed or disabled.
+ NO_DATA_ERROR, // = 4
+ };
+
// An event handler that receives events from the AudioInputController. The
// following methods are all called on the audio thread.
class MEDIA_EXPORT EventHandler {
public:
virtual void OnCreated(AudioInputController* controller) = 0;
virtual void OnRecording(AudioInputController* controller) = 0;
- virtual void OnError(AudioInputController* controller) = 0;
- virtual void OnData(AudioInputController* controller, const uint8* data,
- uint32 size) = 0;
+ virtual void OnError(AudioInputController* controller,
+ ErrorCode error_code) = 0;
+ virtual void OnData(AudioInputController* controller,
+ const AudioBus* data) = 0;
+ virtual void OnLog(AudioInputController* controller,
+ const std::string& message) = 0;
protected:
virtual ~EventHandler() {}
@@ -102,12 +136,10 @@ class MEDIA_EXPORT AudioInputController
// soundcard which has been recorded.
virtual void UpdateRecordedBytes(uint32 bytes) = 0;
- // Write certain amount of data from |data|. This method returns
- // number of written bytes.
- virtual uint32 Write(const void* data,
- uint32 size,
- double volume,
- bool key_pressed) = 0;
+ // Write certain amount of data from |data|.
+ virtual void Write(const AudioBus* data,
+ double volume,
+ bool key_pressed) = 0;
// Close this synchronous writer.
virtual void Close() = 0;
@@ -165,7 +197,7 @@ class MEDIA_EXPORT AudioInputController
// OnCreated() call from that same thread. |user_input_monitor| is used for
// typing detection and can be NULL.
static scoped_refptr<AudioInputController> CreateForStream(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
EventHandler* event_handler,
AudioInputStream* stream,
// External synchronous writer for audio controller.
@@ -196,23 +228,22 @@ class MEDIA_EXPORT AudioInputController
// AudioInputCallback implementation. Threading details depends on the
// device-specific implementation.
- virtual void OnData(AudioInputStream* stream, const uint8* src, uint32 size,
- uint32 hardware_delay_bytes, double volume) OVERRIDE;
- virtual void OnClose(AudioInputStream* stream) OVERRIDE;
+ virtual void OnData(AudioInputStream* stream,
+ const AudioBus* source,
+ uint32 hardware_delay_bytes,
+ double volume) OVERRIDE;
virtual void OnError(AudioInputStream* stream) OVERRIDE;
- bool LowLatencyMode() const { return sync_writer_ != NULL; }
+ bool SharedMemoryAndSyncSocketMode() const { return sync_writer_ != NULL; }
protected:
friend class base::RefCountedThreadSafe<AudioInputController>;
// Internal state of the source.
enum State {
- kEmpty,
- kCreated,
- kRecording,
- kClosed,
- kError
+ CREATED,
+ RECORDING,
+ CLOSED
};
AudioInputController(EventHandler* handler,
@@ -230,23 +261,28 @@ class MEDIA_EXPORT AudioInputController
void DoReportError();
void DoSetVolume(double volume);
void DoSetAutomaticGainControl(bool enabled);
+ void DoOnData(scoped_ptr<AudioBus> data);
+ void DoLogAudioLevel(float level_dbfs);
+
+ // Method to check if we get recorded data after a stream was started,
+ // and log the result to UMA.
+ void FirstCheckForNoData();
// Method which ensures that OnError() is triggered when data recording
// times out. Called on the audio thread.
void DoCheckForNoData();
// Helper method that stops, closes, and NULL:s |*stream_|.
- // Signals event when done if the event is not NULL.
- void DoStopCloseAndClearStream(base::WaitableEvent* done);
+ void DoStopCloseAndClearStream();
void SetDataIsActive(bool enabled);
bool GetDataIsActive();
- // Gives access to the message loop of the creating thread.
- scoped_refptr<base::MessageLoopProxy> creator_loop_;
+ // Gives access to the task runner of the creating thread.
+ scoped_refptr<base::SingleThreadTaskRunner> creator_task_runner_;
- // The message loop of audio-manager thread that this object runs on.
- scoped_refptr<base::MessageLoopProxy> message_loop_;
+ // The task runner of audio-manager thread that this object runs on.
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
// Contains the AudioInputController::EventHandler which receives state
// notifications from this class.
@@ -256,8 +292,8 @@ class MEDIA_EXPORT AudioInputController
AudioInputStream* stream_;
// |no_data_timer_| is used to call OnError() when we stop receiving
- // OnData() calls without an OnClose() call. This can occur
- // when an audio input device is unplugged whilst recording on Windows.
+ // OnData() calls. This can occur when an audio input device is unplugged
+ // whilst recording on Windows.
// See http://crbug.com/79936 for details.
// This member is only touched by the audio thread.
scoped_ptr<base::Timer> no_data_timer_;
@@ -284,6 +320,15 @@ class MEDIA_EXPORT AudioInputController
UserInputMonitor* user_input_monitor_;
+#if defined(AUDIO_POWER_MONITORING)
+ // Scans audio samples from OnData() as input to compute audio levels.
+ scoped_ptr<AudioPowerMonitor> audio_level_;
+
+ // We need these to be able to feed data to the AudioPowerMonitor.
+ media::AudioParameters audio_params_;
+ base::TimeTicks last_audio_level_log_time_;
+#endif
+
size_t prev_key_down_count_;
DISALLOW_COPY_AND_ASSIGN(AudioInputController);
diff --git a/chromium/media/audio/audio_input_controller_unittest.cc b/chromium/media/audio/audio_input_controller_unittest.cc
index a7bb600aaf4..e71232d5730 100644
--- a/chromium/media/audio/audio_input_controller_unittest.cc
+++ b/chromium/media/audio/audio_input_controller_unittest.cc
@@ -51,9 +51,13 @@ class MockAudioInputControllerEventHandler
MOCK_METHOD1(OnCreated, void(AudioInputController* controller));
MOCK_METHOD1(OnRecording, void(AudioInputController* controller));
- MOCK_METHOD1(OnError, void(AudioInputController* controller));
- MOCK_METHOD3(OnData, void(AudioInputController* controller,
- const uint8* data, uint32 size));
+ MOCK_METHOD2(OnError, void(AudioInputController* controller,
+ AudioInputController::ErrorCode error_code));
+ MOCK_METHOD2(OnData,
+ void(AudioInputController* controller, const AudioBus* data));
+ MOCK_METHOD2(OnLog,
+ void(AudioInputController* controller,
+ const std::string& message));
private:
DISALLOW_COPY_AND_ASSIGN(MockAudioInputControllerEventHandler);
@@ -113,10 +117,10 @@ TEST_F(AudioInputControllerTest, RecordAndClose) {
.Times(Exactly(1));
// OnData() shall be called ten times.
- EXPECT_CALL(event_handler, OnData(NotNull(), NotNull(), _))
+ EXPECT_CALL(event_handler, OnData(NotNull(), NotNull()))
.Times(AtLeast(10))
- .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10,
- message_loop_.message_loop_proxy()));
+ .WillRepeatedly(CheckCountAndPostQuitTask(
+ &count, 10, message_loop_.message_loop_proxy()));
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
AudioParameters params(AudioParameters::AUDIO_FAKE, kChannelLayout,
@@ -142,9 +146,11 @@ TEST_F(AudioInputControllerTest, RecordAndClose) {
}
// Test that the AudioInputController reports an error when the input stream
-// stops without an OnClose() callback. This can happen when the underlying
-// audio layer stops feeding data as a result of a removed microphone device.
-TEST_F(AudioInputControllerTest, RecordAndError) {
+// stops. This can happen when the underlying audio layer stops feeding data as
+// a result of a removed microphone device.
+// Disabled due to crbug.com/357569 and crbug.com/357501.
+// TODO(henrika): Remove the test when the timer workaround has been removed.
+TEST_F(AudioInputControllerTest, DISABLED_RecordAndError) {
MockAudioInputControllerEventHandler event_handler;
int count = 0;
@@ -157,14 +163,15 @@ TEST_F(AudioInputControllerTest, RecordAndError) {
.Times(Exactly(1));
// OnData() shall be called ten times.
- EXPECT_CALL(event_handler, OnData(NotNull(), NotNull(), _))
+ EXPECT_CALL(event_handler, OnData(NotNull(), NotNull()))
.Times(AtLeast(10))
- .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10,
- message_loop_.message_loop_proxy()));
+ .WillRepeatedly(CheckCountAndPostQuitTask(
+ &count, 10, message_loop_.message_loop_proxy()));
// OnError() will be called after the data stream stops while the
// controller is in a recording state.
- EXPECT_CALL(event_handler, OnError(NotNull()))
+ EXPECT_CALL(event_handler, OnError(NotNull(),
+ AudioInputController::NO_DATA_ERROR))
.Times(Exactly(1))
.WillOnce(QuitMessageLoop(&message_loop_));
diff --git a/chromium/media/audio/audio_input_device.cc b/chromium/media/audio/audio_input_device.cc
index d1a6ab89f9f..bf140cbad4e 100644
--- a/chromium/media/audio/audio_input_device.cc
+++ b/chromium/media/audio/audio_input_device.cc
@@ -6,7 +6,7 @@
#include "base/basictypes.h"
#include "base/bind.h"
-#include "base/message_loop/message_loop.h"
+#include "base/memory/scoped_vector.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
#include "media/audio/audio_manager_base.h"
@@ -40,15 +40,16 @@ class AudioInputDevice::AudioThreadCallback
private:
int current_segment_id_;
+ ScopedVector<media::AudioBus> audio_buses_;
CaptureCallback* capture_callback_;
- scoped_ptr<AudioBus> audio_bus_;
+
DISALLOW_COPY_AND_ASSIGN(AudioThreadCallback);
};
AudioInputDevice::AudioInputDevice(
scoped_ptr<AudioInputIPC> ipc,
- const scoped_refptr<base::MessageLoopProxy>& io_loop)
- : ScopedLoopObserver(io_loop),
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
+ : ScopedTaskRunnerObserver(io_task_runner),
callback_(NULL),
ipc_(ipc.Pass()),
state_(IDLE),
@@ -78,7 +79,7 @@ void AudioInputDevice::Initialize(const AudioParameters& params,
void AudioInputDevice::Start() {
DCHECK(callback_) << "Initialize hasn't been called";
DVLOG(1) << "Start()";
- message_loop()->PostTask(FROM_HERE,
+ task_runner()->PostTask(FROM_HERE,
base::Bind(&AudioInputDevice::StartUpOnIOThread, this));
}
@@ -91,7 +92,7 @@ void AudioInputDevice::Stop() {
stopping_hack_ = true;
}
- message_loop()->PostTask(FROM_HERE,
+ task_runner()->PostTask(FROM_HERE,
base::Bind(&AudioInputDevice::ShutDownOnIOThread, this));
}
@@ -101,13 +102,13 @@ void AudioInputDevice::SetVolume(double volume) {
return;
}
- message_loop()->PostTask(FROM_HERE,
+ task_runner()->PostTask(FROM_HERE,
base::Bind(&AudioInputDevice::SetVolumeOnIOThread, this, volume));
}
void AudioInputDevice::SetAutomaticGainControl(bool enabled) {
DVLOG(1) << "SetAutomaticGainControl(enabled=" << enabled << ")";
- message_loop()->PostTask(FROM_HERE,
+ task_runner()->PostTask(FROM_HERE,
base::Bind(&AudioInputDevice::SetAutomaticGainControlOnIOThread,
this, enabled));
}
@@ -117,7 +118,7 @@ void AudioInputDevice::OnStreamCreated(
base::SyncSocket::Handle socket_handle,
int length,
int total_segments) {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
#if defined(OS_WIN)
DCHECK(handle);
DCHECK(socket_handle);
@@ -153,7 +154,7 @@ void AudioInputDevice::OnVolume(double volume) {
void AudioInputDevice::OnStateChanged(
AudioInputIPCDelegate::State state) {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
// Do nothing if the stream has been closed.
if (state_ < CREATING_STREAM)
@@ -186,7 +187,7 @@ void AudioInputDevice::OnStateChanged(
}
void AudioInputDevice::OnIPCClosed() {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
state_ = IPC_CLOSED;
ipc_.reset();
}
@@ -198,7 +199,7 @@ AudioInputDevice::~AudioInputDevice() {
}
void AudioInputDevice::StartUpOnIOThread() {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
// Make sure we don't call Start() more than once.
if (state_ != IDLE)
@@ -215,7 +216,7 @@ void AudioInputDevice::StartUpOnIOThread() {
}
void AudioInputDevice::ShutDownOnIOThread() {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
// Close the stream, if we haven't already.
if (state_ >= CREATING_STREAM) {
@@ -240,13 +241,13 @@ void AudioInputDevice::ShutDownOnIOThread() {
}
void AudioInputDevice::SetVolumeOnIOThread(double volume) {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
if (state_ >= CREATING_STREAM)
ipc_->SetVolume(volume);
}
void AudioInputDevice::SetAutomaticGainControlOnIOThread(bool enabled) {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
if (state_ >= CREATING_STREAM) {
DLOG(WARNING) << "The AGC state can not be modified after starting.";
@@ -274,7 +275,6 @@ AudioInputDevice::AudioThreadCallback::AudioThreadCallback(
total_segments),
current_segment_id_(0),
capture_callback_(capture_callback) {
- audio_bus_ = AudioBus::Create(audio_parameters_);
}
AudioInputDevice::AudioThreadCallback::~AudioThreadCallback() {
@@ -282,6 +282,17 @@ AudioInputDevice::AudioThreadCallback::~AudioThreadCallback() {
void AudioInputDevice::AudioThreadCallback::MapSharedMemory() {
shared_memory_.Map(memory_length_);
+
+ // Create vector of audio buses by wrapping existing blocks of memory.
+ uint8* ptr = static_cast<uint8*>(shared_memory_.memory());
+ for (int i = 0; i < total_segments_; ++i) {
+ media::AudioInputBuffer* buffer =
+ reinterpret_cast<media::AudioInputBuffer*>(ptr);
+ scoped_ptr<media::AudioBus> audio_bus =
+ media::AudioBus::WrapMemory(audio_parameters_, buffer->audio);
+ audio_buses_.push_back(audio_bus.release());
+ ptr += segment_length_;
+ }
}
void AudioInputDevice::AudioThreadCallback::Process(int pending_data) {
@@ -298,21 +309,17 @@ void AudioInputDevice::AudioThreadCallback::Process(int pending_data) {
double volume = buffer->params.volume;
bool key_pressed = buffer->params.key_pressed;
- int audio_delay_milliseconds = pending_data / bytes_per_ms_;
- int16* memory = reinterpret_cast<int16*>(&buffer->audio[0]);
- const int bytes_per_sample = sizeof(memory[0]);
-
- if (++current_segment_id_ >= total_segments_)
- current_segment_id_ = 0;
-
- // Deinterleave each channel and convert to 32-bit floating-point
- // with nominal range -1.0 -> +1.0.
- audio_bus_->FromInterleaved(memory, audio_bus_->frames(), bytes_per_sample);
+ // Use pre-allocated audio bus wrapping existing block of shared memory.
+ media::AudioBus* audio_bus = audio_buses_[current_segment_id_];
// Deliver captured data to the client in floating point format
// and update the audio-delay measurement.
+ int audio_delay_milliseconds = pending_data / bytes_per_ms_;
capture_callback_->Capture(
- audio_bus_.get(), audio_delay_milliseconds, volume, key_pressed);
+ audio_bus, audio_delay_milliseconds, volume, key_pressed);
+
+ if (++current_segment_id_ >= total_segments_)
+ current_segment_id_ = 0;
}
} // namespace media
diff --git a/chromium/media/audio/audio_input_device.h b/chromium/media/audio/audio_input_device.h
index bb7d0ff4f71..0c390028d27 100644
--- a/chromium/media/audio/audio_input_device.h
+++ b/chromium/media/audio/audio_input_device.h
@@ -62,7 +62,7 @@
#include "media/audio/audio_device_thread.h"
#include "media/audio/audio_input_ipc.h"
#include "media/audio/audio_parameters.h"
-#include "media/audio/scoped_loop_observer.h"
+#include "media/audio/scoped_task_runner_observer.h"
#include "media/base/audio_capturer_source.h"
#include "media/base/media_export.h"
@@ -77,11 +77,12 @@ namespace media {
class MEDIA_EXPORT AudioInputDevice
: NON_EXPORTED_BASE(public AudioCapturerSource),
NON_EXPORTED_BASE(public AudioInputIPCDelegate),
- NON_EXPORTED_BASE(public ScopedLoopObserver) {
+ NON_EXPORTED_BASE(public ScopedTaskRunnerObserver) {
public:
// NOTE: Clients must call Initialize() before using.
- AudioInputDevice(scoped_ptr<AudioInputIPC> ipc,
- const scoped_refptr<base::MessageLoopProxy>& io_loop);
+ AudioInputDevice(
+ scoped_ptr<AudioInputIPC> ipc,
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
// AudioCapturerSource implementation.
virtual void Initialize(const AudioParameters& params,
diff --git a/chromium/media/audio/audio_input_ipc.h b/chromium/media/audio/audio_input_ipc.h
index 0e6f2c34c4c..ce7b034d6a5 100644
--- a/chromium/media/audio/audio_input_ipc.h
+++ b/chromium/media/audio/audio_input_ipc.h
@@ -21,7 +21,8 @@ class MEDIA_EXPORT AudioInputIPCDelegate {
enum State {
kRecording,
kStopped,
- kError
+ kError,
+ kStateLast = kError
};
// Called when an AudioInputController has been created.
diff --git a/chromium/media/audio/audio_input_unittest.cc b/chromium/media/audio/audio_input_unittest.cc
index 838cab3867a..0bae9db7c4e 100644
--- a/chromium/media/audio/audio_input_unittest.cc
+++ b/chromium/media/audio/audio_input_unittest.cc
@@ -6,6 +6,8 @@
#include "base/environment.h"
#include "base/memory/scoped_ptr.h"
#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/synchronization/waitable_event.h"
#include "base/threading/platform_thread.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager_base.h"
@@ -13,32 +15,20 @@
namespace media {
-static const int kSamplingRate = 8000;
-static const int kSamplesPerPacket = kSamplingRate / 20;
-
// This class allows to find out if the callbacks are occurring as
// expected and if any error has been reported.
class TestInputCallback : public AudioInputStream::AudioInputCallback {
public:
- explicit TestInputCallback(int max_data_bytes)
+ explicit TestInputCallback()
: callback_count_(0),
- had_error_(0),
- max_data_bytes_(max_data_bytes) {
+ had_error_(0) {
}
virtual void OnData(AudioInputStream* stream,
- const uint8* data,
- uint32 size,
+ const AudioBus* source,
uint32 hardware_delay_bytes,
double volume) OVERRIDE {
++callback_count_;
- // Read the first byte to make sure memory is good.
- if (size) {
- ASSERT_LE(static_cast<int>(size), max_data_bytes_);
- int value = data[0];
- EXPECT_GE(value, 0);
- }
}
- virtual void OnClose(AudioInputStream* stream) OVERRIDE {}
virtual void OnError(AudioInputStream* stream) OVERRIDE {
++had_error_;
}
@@ -54,68 +44,144 @@ class TestInputCallback : public AudioInputStream::AudioInputCallback {
private:
int callback_count_;
int had_error_;
- int max_data_bytes_;
};
-static bool CanRunAudioTests(AudioManager* audio_man) {
- bool has_input = audio_man->HasAudioInputDevices();
+class AudioInputTest : public testing::Test {
+ public:
+ AudioInputTest() :
+ message_loop_(base::MessageLoop::TYPE_UI),
+ audio_manager_(AudioManager::CreateForTesting()),
+ audio_input_stream_(NULL) {
+ // Wait for the AudioManager to finish any initialization on the audio loop.
+ base::RunLoop().RunUntilIdle();
+ }
- if (!has_input)
- LOG(WARNING) << "No input devices detected";
+ virtual ~AudioInputTest() {
+ base::RunLoop().RunUntilIdle();
+ }
- return has_input;
-}
+ protected:
+ AudioManager* audio_manager() { return audio_manager_.get(); }
-static AudioInputStream* CreateTestAudioInputStream(AudioManager* audio_man) {
- AudioInputStream* ais = audio_man->MakeAudioInputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- kSamplingRate, 16, kSamplesPerPacket),
- AudioManagerBase::kDefaultDeviceId);
- EXPECT_TRUE(NULL != ais);
- return ais;
-}
+ bool CanRunAudioTests() {
+ bool has_input = audio_manager()->HasAudioInputDevices();
+ LOG_IF(WARNING, !has_input) << "No input devices detected";
+ return has_input;
+ }
-// Test that AudioInputStream rejects out of range parameters.
-TEST(AudioInputTest, SanityOnMakeParams) {
- scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_man.get()))
- return;
+ void MakeAudioInputStreamOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioInputTest::MakeAudioInputStream,
+ base::Unretained(this)));
+ }
- AudioParameters::Format fmt = AudioParameters::AUDIO_PCM_LINEAR;
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_7_1, 8000, 16,
- kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 1024 * 1024, 16,
- kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, 80,
- kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, 80,
- 1000 * kSamplesPerPacket),
- AudioManagerBase::kDefaultDeviceId));
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_UNSUPPORTED, 8000, 16,
- kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, -8000, 16,
- kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, -16,
- kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, 16, -1024),
- AudioManagerBase::kDefaultDeviceId));
-}
+ void CloseAudioInputStreamOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioInputStream::Close,
+ base::Unretained(audio_input_stream_)));
+ audio_input_stream_ = NULL;
+ }
+
+ void OpenAndCloseAudioInputStreamOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioInputTest::OpenAndClose,
+ base::Unretained(this)));
+ }
+
+ void OpenStopAndCloseAudioInputStreamOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioInputTest::OpenStopAndClose,
+ base::Unretained(this)));
+ }
+
+ void OpenAndStartAudioInputStreamOnAudioThread(
+ AudioInputStream::AudioInputCallback* sink) {
+ RunOnAudioThread(
+ base::Bind(&AudioInputTest::OpenAndStart,
+ base::Unretained(this),
+ sink));
+ }
+
+ void StopAndCloseAudioInputStreamOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioInputTest::StopAndClose,
+ base::Unretained(this)));
+ }
+
+ void MakeAudioInputStream() {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ AudioParameters params = audio_manager()->GetInputStreamParameters(
+ AudioManagerBase::kDefaultDeviceId);
+ audio_input_stream_ = audio_manager()->MakeAudioInputStream(params,
+ AudioManagerBase::kDefaultDeviceId);
+ EXPECT_TRUE(audio_input_stream_);
+ }
+
+ void OpenAndClose() {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ EXPECT_TRUE(audio_input_stream_->Open());
+ audio_input_stream_->Close();
+ audio_input_stream_ = NULL;
+ }
+
+ void OpenAndStart(AudioInputStream::AudioInputCallback* sink) {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ EXPECT_TRUE(audio_input_stream_->Open());
+ audio_input_stream_->Start(sink);
+ }
+
+ void OpenStopAndClose() {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ EXPECT_TRUE(audio_input_stream_->Open());
+ audio_input_stream_->Stop();
+ audio_input_stream_->Close();
+ audio_input_stream_ = NULL;
+ }
+
+ void StopAndClose() {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ audio_input_stream_->Stop();
+ audio_input_stream_->Close();
+ audio_input_stream_ = NULL;
+ }
+
+ // Synchronously runs the provided callback/closure on the audio thread.
+ void RunOnAudioThread(const base::Closure& closure) {
+ if (!audio_manager()->GetTaskRunner()->BelongsToCurrentThread()) {
+ base::WaitableEvent event(false, false);
+ audio_manager()->GetTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&AudioInputTest::RunOnAudioThreadImpl,
+ base::Unretained(this),
+ closure,
+ &event));
+ event.Wait();
+ } else {
+ closure.Run();
+ }
+ }
+
+ void RunOnAudioThreadImpl(const base::Closure& closure,
+ base::WaitableEvent* event) {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ closure.Run();
+ event->Signal();
+ }
+
+ base::MessageLoop message_loop_;
+ scoped_ptr<AudioManager> audio_manager_;
+ AudioInputStream* audio_input_stream_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AudioInputTest);
+};
// Test create and close of an AudioInputStream without recording audio.
-TEST(AudioInputTest, CreateAndClose) {
- scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_man.get()))
+TEST_F(AudioInputTest, CreateAndClose) {
+ if (!CanRunAudioTests())
return;
- AudioInputStream* ais = CreateTestAudioInputStream(audio_man.get());
- ais->Close();
+ MakeAudioInputStreamOnAudioThread();
+ CloseAudioInputStreamOnAudioThread();
}
#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
@@ -125,13 +191,11 @@ TEST(AudioInputTest, CreateAndClose) {
#define MAYBE_OpenAndClose OpenAndClose
#endif
// Test create, open and close of an AudioInputStream without recording audio.
-TEST(AudioInputTest, MAYBE_OpenAndClose) {
- scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_man.get()))
+TEST_F(AudioInputTest, MAYBE_OpenAndClose) {
+ if (!CanRunAudioTests())
return;
- AudioInputStream* ais = CreateTestAudioInputStream(audio_man.get());
- EXPECT_TRUE(ais->Open());
- ais->Close();
+ MakeAudioInputStreamOnAudioThread();
+ OpenAndCloseAudioInputStreamOnAudioThread();
}
#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
@@ -141,14 +205,11 @@ TEST(AudioInputTest, MAYBE_OpenAndClose) {
#define MAYBE_OpenStopAndClose OpenStopAndClose
#endif
// Test create, open, stop and close of an AudioInputStream without recording.
-TEST(AudioInputTest, MAYBE_OpenStopAndClose) {
- scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_man.get()))
+TEST_F(AudioInputTest, MAYBE_OpenStopAndClose) {
+ if (!CanRunAudioTests())
return;
- AudioInputStream* ais = CreateTestAudioInputStream(audio_man.get());
- EXPECT_TRUE(ais->Open());
- ais->Stop();
- ais->Close();
+ MakeAudioInputStreamOnAudioThread();
+ OpenStopAndCloseAudioInputStreamOnAudioThread();
}
#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
@@ -158,28 +219,25 @@ TEST(AudioInputTest, MAYBE_OpenStopAndClose) {
#define MAYBE_Record Record
#endif
// Test a normal recording sequence using an AudioInputStream.
-TEST(AudioInputTest, MAYBE_Record) {
- scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_man.get()))
+// Very simple test which starts capturing during half a second and verifies
+// that recording starts.
+TEST_F(AudioInputTest, MAYBE_Record) {
+ if (!CanRunAudioTests())
return;
- base::MessageLoop message_loop(base::MessageLoop::TYPE_DEFAULT);
- AudioInputStream* ais = CreateTestAudioInputStream(audio_man.get());
- EXPECT_TRUE(ais->Open());
-
- TestInputCallback test_callback(kSamplesPerPacket * 4);
- ais->Start(&test_callback);
- // Verify at least 500ms worth of audio was recorded, after giving sufficient
- // extra time.
- message_loop.PostDelayedTask(
+ MakeAudioInputStreamOnAudioThread();
+
+ TestInputCallback test_callback;
+ OpenAndStartAudioInputStreamOnAudioThread(&test_callback);
+
+ message_loop_.PostDelayedTask(
FROM_HERE,
base::MessageLoop::QuitClosure(),
- base::TimeDelta::FromMilliseconds(690));
- message_loop.Run();
- EXPECT_GE(test_callback.callback_count(), 1);
+ base::TimeDelta::FromMilliseconds(500));
+ message_loop_.Run();
+ EXPECT_GE(test_callback.callback_count(), 2);
EXPECT_FALSE(test_callback.had_error());
- ais->Stop();
- ais->Close();
+ StopAndCloseAudioInputStreamOnAudioThread();
}
} // namespace media
diff --git a/chromium/media/audio/audio_io.h b/chromium/media/audio/audio_io.h
index 473af0d512f..1e1eba420b1 100644
--- a/chromium/media/audio/audio_io.h
+++ b/chromium/media/audio/audio_io.h
@@ -63,10 +63,6 @@ class MEDIA_EXPORT AudioOutputStream {
virtual int OnMoreData(AudioBus* dest,
AudioBuffersState buffers_state) = 0;
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) = 0;
-
// There was an error while playing a buffer. Audio source cannot be
// destroyed yet. No direct action needed by the AudioStream, but it is
// a good place to stop accumulating sound data since is is likely that
@@ -113,13 +109,18 @@ class MEDIA_EXPORT AudioInputStream {
// Called by the audio recorder when a full packet of audio data is
// available. This is called from a special audio thread and the
// implementation should return as soon as possible.
- virtual void OnData(AudioInputStream* stream, const uint8* src,
- uint32 size, uint32 hardware_delay_bytes,
- double volume) = 0;
-
- // The stream is done with this callback, the last call received by this
- // audio sink.
- virtual void OnClose(AudioInputStream* stream) = 0;
+ // TODO(henrika): should be pure virtual when old OnData() is phased out.
+ virtual void OnData(AudioInputStream* stream,
+ const AudioBus* source,
+ uint32 hardware_delay_bytes,
+ double volume) {};
+
+ // TODO(henrika): don't use; to be removed.
+ virtual void OnData(AudioInputStream* stream,
+ const uint8* src,
+ uint32 size,
+ uint32 hardware_delay_bytes,
+ double volume) {};
// There was an error while recording audio. The audio sink cannot be
// destroyed yet. No direct action needed by the AudioInputStream, but it
diff --git a/chromium/media/audio/audio_logging.h b/chromium/media/audio/audio_logging.h
index 1d8366bad75..913b8ec4433 100644
--- a/chromium/media/audio/audio_logging.h
+++ b/chromium/media/audio/audio_logging.h
@@ -20,13 +20,11 @@ class AudioLog {
virtual ~AudioLog() {}
// Called when an audio component is created. |params| are the parameters of
- // the created stream. |input_device_id| and |output_device_id| are the
- // respective device ids for input and output. Either one or both may be
- // specified.
+ // the created stream. |device_id| is the id of the audio device opened by
+ // the created stream.
virtual void OnCreated(int component_id,
const media::AudioParameters& params,
- const std::string& input_device_id,
- const std::string& output_device_id) = 0;
+ const std::string& device_id) = 0;
// Called when an audio component is started, generally this is synonymous
// with "playing."
diff --git a/chromium/media/audio/audio_low_latency_input_output_unittest.cc b/chromium/media/audio/audio_low_latency_input_output_unittest.cc
index c0cfa6937cf..eefd3800aab 100644
--- a/chromium/media/audio/audio_low_latency_input_output_unittest.cc
+++ b/chromium/media/audio/audio_low_latency_input_output_unittest.cc
@@ -6,7 +6,6 @@
#include "base/environment.h"
#include "base/file_util.h"
#include "base/memory/scoped_ptr.h"
-#include "base/message_loop/message_loop.h"
#include "base/path_service.h"
#include "base/synchronization/lock.h"
#include "base/test/test_timeouts.h"
@@ -88,7 +87,7 @@ class MockAudioManager : public AudioManagerAnyPlatform {
MockAudioManager() : AudioManagerAnyPlatform(&fake_audio_log_factory_) {}
virtual ~MockAudioManager() {}
- virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE {
+ virtual scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() OVERRIDE {
return base::MessageLoop::current()->message_loop_proxy();
}
@@ -185,7 +184,7 @@ class FullDuplexAudioSinkSource
// AudioInputStream::AudioInputCallback.
virtual void OnData(AudioInputStream* stream,
- const uint8* src, uint32 size,
+ const AudioBus* src,
uint32 hardware_delay_bytes,
double volume) OVERRIDE {
base::AutoLock lock(lock_);
@@ -204,17 +203,17 @@ class FullDuplexAudioSinkSource
++input_elements_to_write_;
}
+ // TODO(henrika): fix this and use AudioFifo instead.
// Store the captured audio packet in a seekable media buffer.
- if (!buffer_->Append(src, size)) {
- // An attempt to write outside the buffer limits has been made.
- // Double the buffer capacity to ensure that we have a buffer large
- // enough to handle the current sample test scenario.
- buffer_->set_forward_capacity(2 * buffer_->forward_capacity());
- buffer_->Clear();
- }
+ // if (!buffer_->Append(src, size)) {
+ // An attempt to write outside the buffer limits has been made.
+ // Double the buffer capacity to ensure that we have a buffer large
+ // enough to handle the current sample test scenario.
+ // buffer_->set_forward_capacity(2 * buffer_->forward_capacity());
+ // buffer_->Clear();
+ // }
}
- virtual void OnClose(AudioInputStream* stream) OVERRIDE {}
virtual void OnError(AudioInputStream* stream) OVERRIDE {}
// AudioOutputStream::AudioSourceCallback.
@@ -256,13 +255,6 @@ class FullDuplexAudioSinkSource
return 0;
}
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) OVERRIDE {
- NOTREACHED();
- return 0;
- }
-
virtual void OnError(AudioOutputStream* stream) OVERRIDE {}
protected:
@@ -314,8 +306,7 @@ class AudioOutputStreamTraits {
static StreamType* CreateStream(AudioManager* audio_manager,
const AudioParameters& params) {
- return audio_manager->MakeAudioOutputStream(params, std::string(),
- std::string());
+ return audio_manager->MakeAudioOutputStream(params, std::string());
}
};
diff --git a/chromium/media/audio/audio_manager.h b/chromium/media/audio/audio_manager.h
index 0ca468ed4dd..915308ef77d 100644
--- a/chromium/media/audio/audio_manager.h
+++ b/chromium/media/audio/audio_manager.h
@@ -15,8 +15,7 @@
#include "media/audio/audio_parameters.h"
namespace base {
-class MessageLoop;
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
@@ -68,14 +67,14 @@ class MEDIA_EXPORT AudioManager {
// recording.
//
// Not threadsafe; in production this should only be called from the
- // Audio IO thread (see GetMessageLoop).
+ // Audio worker thread (see GetWorkerTaskRunner()).
virtual void GetAudioInputDeviceNames(AudioDeviceNames* device_names) = 0;
// Appends a list of available output devices to |device_names|,
// which must initially be empty.
//
// Not threadsafe; in production this should only be called from the
- // Audio IO thread (see GetMessageLoop).
+ // Audio worker thread (see GetWorkerTaskRunner()).
virtual void GetAudioOutputDeviceNames(AudioDeviceNames* device_names) = 0;
// Factory for all the supported stream formats. |params| defines parameters
@@ -89,11 +88,6 @@ class MEDIA_EXPORT AudioManager {
// To create a stream for the default output device, pass an empty string
// for |device_id|, otherwise the specified audio device will be opened.
//
- // The |input_device_id| is used for low-latency unified streams
- // (input+output) only and then only if the audio parameters specify a >0
- // input channel count. In other cases this id is ignored and should be
- // empty.
- //
// Returns NULL if the combination of the parameters is not supported, or if
// we have reached some other platform specific limit.
//
@@ -106,8 +100,7 @@ class MEDIA_EXPORT AudioManager {
// Do not free the returned AudioOutputStream. It is owned by AudioManager.
virtual AudioOutputStream* MakeAudioOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) = 0;
+ const std::string& device_id) = 0;
// Creates new audio output proxy. A proxy implements
// AudioOutputStream interface, but unlike regular output stream
@@ -115,8 +108,7 @@ class MEDIA_EXPORT AudioManager {
// sound is actually playing.
virtual AudioOutputStream* MakeAudioOutputStreamProxy(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) = 0;
+ const std::string& device_id) = 0;
// Factory to create audio recording streams.
// |channels| can be 1 or 2.
@@ -133,13 +125,13 @@ class MEDIA_EXPORT AudioManager {
virtual AudioInputStream* MakeAudioInputStream(
const AudioParameters& params, const std::string& device_id) = 0;
- // Returns message loop used for audio IO.
- virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() = 0;
+ // Returns the task runner used for audio IO.
+ virtual scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() = 0;
- // Heavyweight tasks should use GetWorkerLoop() instead of GetMessageLoop().
- // On most platforms they are the same, but some share the UI loop with the
- // audio IO loop.
- virtual scoped_refptr<base::MessageLoopProxy> GetWorkerLoop() = 0;
+ // Heavyweight tasks should use GetWorkerTaskRunner() instead of
+ // GetTaskRunner(). On most platforms they are the same, but some share the
+ // UI loop with the audio IO loop.
+ virtual scoped_refptr<base::SingleThreadTaskRunner> GetWorkerTaskRunner() = 0;
// Allows clients to listen for device state changes; e.g. preferred sample
// rate or channel layout changes. The typical response to receiving this
@@ -175,7 +167,8 @@ class MEDIA_EXPORT AudioManager {
// If the hardware has only an input device (e.g. a webcam), the return value
// will be empty (which the caller can then interpret to be the default output
// device). Implementations that don't yet support this feature, must return
- // an empty string.
+ // an empty string. Must be called on the audio worker thread (see
+ // GetWorkerTaskRunner()).
virtual std::string GetAssociatedOutputDeviceID(
const std::string& input_device_id) = 0;
@@ -184,11 +177,6 @@ class MEDIA_EXPORT AudioManager {
virtual scoped_ptr<AudioLog> CreateAudioLog(
AudioLogFactory::AudioComponent component) = 0;
- // Called when a component has detected a OS level audio wedge. Shuts down
- // all active audio streams and then restarts them transparently. See
- // http://crbug.com/160920
- virtual void FixWedgedAudio() = 0;
-
protected:
AudioManager();
diff --git a/chromium/media/audio/audio_manager_base.cc b/chromium/media/audio/audio_manager_base.cc
index f7b590ae724..3b36b106fa6 100644
--- a/chromium/media/audio/audio_manager_base.cc
+++ b/chromium/media/audio/audio_manager_base.cc
@@ -7,7 +7,6 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/command_line.h"
-#include "base/message_loop/message_loop_proxy.h"
#include "base/strings/string_number_conversions.h"
#include "build/build_config.h"
#include "media/audio/audio_output_dispatcher_impl.h"
@@ -29,7 +28,7 @@ static const int kDefaultMaxOutputStreams = 16;
// for all platforms.
static const int kDefaultMaxInputStreams = 16;
-static const int kMaxInputChannels = 2;
+static const int kMaxInputChannels = 3;
const char AudioManagerBase::kDefaultDeviceName[] = "Default";
const char AudioManagerBase::kDefaultDeviceId[] = "default";
@@ -38,17 +37,14 @@ const char AudioManagerBase::kLoopbackInputDeviceId[] = "loopback";
struct AudioManagerBase::DispatcherParams {
DispatcherParams(const AudioParameters& input,
const AudioParameters& output,
- const std::string& output_device_id,
- const std::string& input_device_id)
+ const std::string& output_device_id)
: input_params(input),
output_params(output),
- input_device_id(input_device_id),
output_device_id(output_device_id) {}
~DispatcherParams() {}
const AudioParameters input_params;
const AudioParameters output_params;
- const std::string input_device_id;
const std::string output_device_id;
scoped_refptr<AudioOutputDispatcher> dispatcher;
@@ -64,13 +60,11 @@ class AudioManagerBase::CompareByParams {
// We will reuse the existing dispatcher when:
// 1) Unified IO is not used, input_params and output_params of the
// existing dispatcher are the same as the requested dispatcher.
- // 2) Unified IO is used, input_params, output_params and input_device_id
- // of the existing dispatcher are the same as the request dispatcher.
+ // 2) Unified IO is used, input_params and output_params of the existing
+ // dispatcher are the same as the request dispatcher.
return (dispatcher_->input_params == dispatcher_in->input_params &&
dispatcher_->output_params == dispatcher_in->output_params &&
- dispatcher_->output_device_id == dispatcher_in->output_device_id &&
- (!dispatcher_->input_params.input_channels() ||
- dispatcher_->input_device_id == dispatcher_in->input_device_id));
+ dispatcher_->output_device_id == dispatcher_in->output_device_id);
}
private:
@@ -95,17 +89,14 @@ AudioManagerBase::AudioManagerBase(AudioLogFactory* audio_log_factory)
// case is sadly the browser UI thread. Failure to execute calls on the right
// thread leads to crashes and odd behavior. See http://crbug.com/158170.
// TODO(dalecurtis): We should require the message loop to be passed in.
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- if (!cmd_line->HasSwitch(switches::kDisableMainThreadAudio) &&
- base::MessageLoopProxy::current().get() &&
- base::MessageLoop::current()->IsType(base::MessageLoop::TYPE_UI)) {
- message_loop_ = base::MessageLoopProxy::current();
+ if (base::MessageLoopForUI::IsCurrent()) {
+ task_runner_ = base::MessageLoopProxy::current();
return;
}
#endif
CHECK(audio_thread_.Start());
- message_loop_ = audio_thread_.message_loop_proxy();
+ task_runner_ = audio_thread_.message_loop_proxy();
}
AudioManagerBase::~AudioManagerBase() {
@@ -125,11 +116,12 @@ base::string16 AudioManagerBase::GetAudioInputDeviceModel() {
return base::string16();
}
-scoped_refptr<base::MessageLoopProxy> AudioManagerBase::GetMessageLoop() {
- return message_loop_;
+scoped_refptr<base::SingleThreadTaskRunner> AudioManagerBase::GetTaskRunner() {
+ return task_runner_;
}
-scoped_refptr<base::MessageLoopProxy> AudioManagerBase::GetWorkerLoop() {
+scoped_refptr<base::SingleThreadTaskRunner>
+AudioManagerBase::GetWorkerTaskRunner() {
// Lazily start the worker thread.
if (!audio_thread_.IsRunning())
CHECK(audio_thread_.Start());
@@ -139,11 +131,10 @@ scoped_refptr<base::MessageLoopProxy> AudioManagerBase::GetWorkerLoop() {
AudioOutputStream* AudioManagerBase::MakeAudioOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
+ const std::string& device_id) {
// TODO(miu): Fix ~50 call points across several unit test modules to call
// this method on the audio thread, then uncomment the following:
- // DCHECK(message_loop_->BelongsToCurrentThread());
+ // DCHECK(task_runner_->BelongsToCurrentThread());
if (!params.IsValid()) {
DLOG(ERROR) << "Audio parameters are invalid";
@@ -170,7 +161,7 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStream(
stream = MakeLinearOutputStream(params);
break;
case AudioParameters::AUDIO_PCM_LOW_LATENCY:
- stream = MakeLowLatencyOutputStream(params, device_id, input_device_id);
+ stream = MakeLowLatencyOutputStream(params, device_id);
break;
case AudioParameters::AUDIO_FAKE:
stream = FakeAudioOutputStream::MakeFakeStream(this, params);
@@ -192,7 +183,7 @@ AudioInputStream* AudioManagerBase::MakeAudioInputStream(
const std::string& device_id) {
// TODO(miu): Fix ~20 call points across several unit test modules to call
// this method on the audio thread, then uncomment the following:
- // DCHECK(message_loop_->BelongsToCurrentThread());
+ // DCHECK(task_runner_->BelongsToCurrentThread());
if (!params.IsValid() || (params.channels() > kMaxInputChannels) ||
device_id.empty()) {
@@ -232,9 +223,8 @@ AudioInputStream* AudioManagerBase::MakeAudioInputStream(
AudioOutputStream* AudioManagerBase::MakeAudioOutputStreamProxy(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ const std::string& device_id) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
// If the caller supplied an empty device id to select the default device,
// we fetch the actual device id of the default device so that the lookup
@@ -273,8 +263,7 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStreamProxy(
}
DispatcherParams* dispatcher_params =
- new DispatcherParams(params, output_params, output_device_id,
- input_device_id);
+ new DispatcherParams(params, output_params, output_device_id);
AudioOutputDispatchers::iterator it =
std::find_if(output_dispatchers_.begin(), output_dispatchers_.end(),
@@ -289,12 +278,12 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStreamProxy(
scoped_refptr<AudioOutputDispatcher> dispatcher;
if (output_params.format() != AudioParameters::AUDIO_FAKE) {
dispatcher = new AudioOutputResampler(this, params, output_params,
- output_device_id, input_device_id,
+ output_device_id,
kCloseDelay);
} else {
dispatcher = new AudioOutputDispatcherImpl(this, output_params,
output_device_id,
- input_device_id, kCloseDelay);
+ kCloseDelay);
}
dispatcher_params->dispatcher = dispatcher;
@@ -332,10 +321,10 @@ void AudioManagerBase::ReleaseInputStream(AudioInputStream* stream) {
void AudioManagerBase::Shutdown() {
// Only true when we're sharing the UI message loop with the browser. The UI
// loop is no longer running at this time and browser destruction is imminent.
- if (message_loop_->BelongsToCurrentThread()) {
+ if (task_runner_->BelongsToCurrentThread()) {
ShutdownOnAudioThread();
} else {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&AudioManagerBase::ShutdownOnAudioThread, base::Unretained(this)));
}
@@ -344,39 +333,27 @@ void AudioManagerBase::Shutdown() {
}
void AudioManagerBase::ShutdownOnAudioThread() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- AudioOutputDispatchers::iterator it = output_dispatchers_.begin();
- for (; it != output_dispatchers_.end(); ++it) {
- scoped_refptr<AudioOutputDispatcher>& dispatcher = (*it)->dispatcher;
- dispatcher->Shutdown();
-
- // All AudioOutputProxies must have been freed before Shutdown is called.
- // If they still exist, things will go bad. They have direct pointers to
- // both physical audio stream objects that belong to the dispatcher as
- // well as the message loop of the audio thread that will soon go away.
- // So, better crash now than later.
- DCHECK(dispatcher->HasOneRef()) << "AudioOutputProxies are still alive";
- dispatcher = NULL;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ while (!output_dispatchers_.empty()) {
+ output_dispatchers_.back()->dispatcher->Shutdown();
+ output_dispatchers_.pop_back();
}
-
- output_dispatchers_.clear();
}
void AudioManagerBase::AddOutputDeviceChangeListener(
AudioDeviceListener* listener) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
output_listeners_.AddObserver(listener);
}
void AudioManagerBase::RemoveOutputDeviceChangeListener(
AudioDeviceListener* listener) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
output_listeners_.RemoveObserver(listener);
}
void AudioManagerBase::NotifyAllOutputDeviceChangeListeners() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DVLOG(1) << "Firing OnDeviceChange() notifications.";
FOR_EACH_OBSERVER(AudioDeviceListener, output_listeners_, OnDeviceChange());
}
@@ -400,7 +377,6 @@ AudioParameters AudioManagerBase::GetInputStreamParameters(
std::string AudioManagerBase::GetAssociatedOutputDeviceID(
const std::string& input_device_id) {
- NOTIMPLEMENTED();
return "";
}
@@ -424,32 +400,4 @@ scoped_ptr<AudioLog> AudioManagerBase::CreateAudioLog(
return audio_log_factory_->CreateAudioLog(component);
}
-void AudioManagerBase::FixWedgedAudio() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-#if defined(OS_MACOSX)
- // Through trial and error, we've found that one way to restore audio after a
- // hang is to close all outstanding audio streams. Once all streams have been
- // closed, new streams appear to work correctly.
- //
- // In Chrome terms, this means we need to ask all AudioOutputDispatchers to
- // close all Open()'d streams. Once all streams across all dispatchers have
- // been closed, we ask for all previously Start()'d streams to be recreated
- // using the same AudioSourceCallback they had before.
- //
- // Since this operation takes place on the audio thread we can be sure that no
- // other state-changing stream operations will take place while the fix is in
- // progress.
- //
- // See http://crbug.com/160920 for additional details.
- for (AudioOutputDispatchers::iterator it = output_dispatchers_.begin();
- it != output_dispatchers_.end(); ++it) {
- (*it)->dispatcher->CloseStreamsForWedgeFix();
- }
- for (AudioOutputDispatchers::iterator it = output_dispatchers_.begin();
- it != output_dispatchers_.end(); ++it) {
- (*it)->dispatcher->RestartStreamsForWedgeFix();
- }
-#endif
-}
-
} // namespace media
diff --git a/chromium/media/audio/audio_manager_base.h b/chromium/media/audio/audio_manager_base.h
index 09b021a0d2b..bc13ee5a167 100644
--- a/chromium/media/audio/audio_manager_base.h
+++ b/chromium/media/audio/audio_manager_base.h
@@ -48,8 +48,9 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
virtual ~AudioManagerBase();
- virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE;
- virtual scoped_refptr<base::MessageLoopProxy> GetWorkerLoop() OVERRIDE;
+ virtual scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() OVERRIDE;
+ virtual scoped_refptr<base::SingleThreadTaskRunner> GetWorkerTaskRunner()
+ OVERRIDE;
virtual base::string16 GetAudioInputDeviceModel() OVERRIDE;
@@ -63,16 +64,14 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
virtual AudioOutputStream* MakeAudioOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeAudioInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioOutputStream* MakeAudioOutputStreamProxy(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
// Called internally by the audio stream when it has been closed.
virtual void ReleaseOutputStream(AudioOutputStream* stream);
@@ -84,11 +83,9 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
const AudioParameters& params) = 0;
// Creates the output stream for the |AUDIO_PCM_LOW_LATENCY| format.
- // |input_device_id| is used by unified IO to open the correct input device.
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) = 0;
+ const std::string& device_id) = 0;
// Creates the input stream for the |AUDIO_PCM_LINEAR| format. The legacy
// name is also from |AUDIO_PCM_LINEAR|.
@@ -99,7 +96,7 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
virtual AudioInputStream* MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) = 0;
- // Listeners will be notified on the AudioManager::GetMessageLoop() loop.
+ // Listeners will be notified on the GetTaskRunner() task runner.
virtual void AddOutputDeviceChangeListener(
AudioDeviceListener* listener) OVERRIDE;
virtual void RemoveOutputDeviceChangeListener(
@@ -118,7 +115,9 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
virtual scoped_ptr<AudioLog> CreateAudioLog(
AudioLogFactory::AudioComponent component) OVERRIDE;
- virtual void FixWedgedAudio() OVERRIDE;
+ // Get number of input or output streams.
+ int input_stream_count() const { return num_input_streams_; }
+ int output_stream_count() const { return num_output_streams_; }
protected:
AudioManagerBase(AudioLogFactory* audio_log_factory);
@@ -155,10 +154,6 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
// Implementations that don't yet support this should return an empty string.
virtual std::string GetDefaultOutputDeviceID();
- // Get number of input or output streams.
- int input_stream_count() { return num_input_streams_; }
- int output_stream_count() { return num_output_streams_; }
-
private:
struct DispatcherParams;
typedef ScopedVector<DispatcherParams> AudioOutputDispatchers;
@@ -187,10 +182,10 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
// Thread used to interact with audio streams created by this audio manager.
base::Thread audio_thread_;
- // The message loop of the audio thread this object runs on. Used for internal
+ // The task runner of the audio thread this object runs on. Used for internal
// tasks which run on the audio thread even after Shutdown() has been started
- // and GetMessageLoop() starts returning NULL.
- scoped_refptr<base::MessageLoopProxy> message_loop_;
+ // and GetTaskRunner() starts returning NULL.
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
// Map of cached AudioOutputDispatcher instances. Must only be touched
// from the audio thread (no locking).
diff --git a/chromium/media/audio/audio_manager_unittest.cc b/chromium/media/audio/audio_manager_unittest.cc
index 8c6cc10b423..902618ebd92 100644
--- a/chromium/media/audio/audio_manager_unittest.cc
+++ b/chromium/media/audio/audio_manager_unittest.cc
@@ -5,6 +5,7 @@
#include "base/environment.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
+#include "base/synchronization/waitable_event.h"
#include "media/audio/audio_manager.h"
#include "media/audio/audio_manager_base.h"
#include "media/audio/fake_audio_log_factory.h"
@@ -28,8 +29,7 @@ namespace media {
// Test fixture which allows us to override the default enumeration API on
// Windows.
-class AudioManagerTest
- : public ::testing::Test {
+class AudioManagerTest : public ::testing::Test {
protected:
AudioManagerTest()
: audio_manager_(AudioManager::CreateForTesting())
@@ -37,8 +37,16 @@ class AudioManagerTest
, com_init_(base::win::ScopedCOMInitializer::kMTA)
#endif
{
+ // Wait for audio thread initialization to complete. Otherwise the
+ // enumeration type may not have been set yet.
+ base::WaitableEvent event(false, false);
+ audio_manager_->GetTaskRunner()->PostTask(FROM_HERE, base::Bind(
+ &base::WaitableEvent::Signal, base::Unretained(&event)));
+ event.Wait();
}
+ AudioManager* audio_manager() { return audio_manager_.get(); };
+
#if defined(OS_WIN)
bool SetMMDeviceEnumeration() {
AudioManagerWin* amw = static_cast<AudioManagerWin*>(audio_manager_.get());
@@ -122,6 +130,29 @@ class AudioManagerTest
}
#endif
+ // Synchronously runs the provided callback/closure on the audio thread.
+ void RunOnAudioThread(const base::Closure& closure) {
+ if (!audio_manager()->GetTaskRunner()->BelongsToCurrentThread()) {
+ base::WaitableEvent event(false, false);
+ audio_manager_->GetTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&AudioManagerTest::RunOnAudioThreadImpl,
+ base::Unretained(this),
+ closure,
+ &event));
+ event.Wait();
+ } else {
+ closure.Run();
+ }
+ }
+
+ void RunOnAudioThreadImpl(const base::Closure& closure,
+ base::WaitableEvent* event) {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ closure.Run();
+ event->Signal();
+ }
+
FakeAudioLogFactory fake_audio_log_factory_;
scoped_ptr<AudioManager> audio_manager_;
@@ -137,7 +168,10 @@ TEST_F(AudioManagerTest, EnumerateInputDevices) {
return;
AudioDeviceNames device_names;
- audio_manager_->GetAudioInputDeviceNames(&device_names);
+ RunOnAudioThread(
+ base::Bind(&AudioManager::GetAudioInputDeviceNames,
+ base::Unretained(audio_manager()),
+ &device_names));
CheckDeviceNames(device_names);
}
@@ -147,7 +181,10 @@ TEST_F(AudioManagerTest, EnumerateOutputDevices) {
return;
AudioDeviceNames device_names;
- audio_manager_->GetAudioOutputDeviceNames(&device_names);
+ RunOnAudioThread(
+ base::Bind(&AudioManager::GetAudioOutputDeviceNames,
+ base::Unretained(audio_manager()),
+ &device_names));
CheckDeviceNames(device_names);
}
diff --git a/chromium/media/audio/audio_output_controller.cc b/chromium/media/audio/audio_output_controller.cc
index 92f9f25de53..232b77d727d 100644
--- a/chromium/media/audio/audio_output_controller.cc
+++ b/chromium/media/audio/audio_output_controller.cc
@@ -6,7 +6,6 @@
#include "base/bind.h"
#include "base/debug/trace_event.h"
-#include "base/message_loop/message_loop.h"
#include "base/metrics/histogram.h"
#include "base/task_runner_util.h"
#include "base/threading/platform_thread.h"
@@ -14,7 +13,6 @@
#include "build/build_config.h"
#include "media/base/scoped_histogram_timer.h"
-using base::Time;
using base::TimeDelta;
namespace media {
@@ -23,35 +21,24 @@ namespace media {
// Time constant for AudioPowerMonitor. See AudioPowerMonitor ctor comments for
// semantics. This value was arbitrarily chosen, but seems to work well.
static const int kPowerMeasurementTimeConstantMillis = 10;
-
-// Desired frequency of calls to EventHandler::OnPowerMeasured() for reporting
-// power levels in the audio signal.
-static const int kPowerMeasurementsPerSecond = 4;
#endif
-// Polling-related constants.
-const int AudioOutputController::kPollNumAttempts = 3;
-const int AudioOutputController::kPollPauseInMilliseconds = 3;
-
AudioOutputController::AudioOutputController(
AudioManager* audio_manager,
EventHandler* handler,
const AudioParameters& params,
const std::string& output_device_id,
- const std::string& input_device_id,
SyncReader* sync_reader)
: audio_manager_(audio_manager),
params_(params),
handler_(handler),
output_device_id_(output_device_id),
- input_device_id_(input_device_id),
stream_(NULL),
diverting_to_stream_(NULL),
volume_(1.0),
state_(kEmpty),
- num_allowed_io_(0),
sync_reader_(sync_reader),
- message_loop_(audio_manager->GetMessageLoop()),
+ message_loop_(audio_manager->GetTaskRunner()),
#if defined(AUDIO_POWER_MONITORING)
power_monitor_(
params.sample_rate(),
@@ -74,7 +61,6 @@ scoped_refptr<AudioOutputController> AudioOutputController::Create(
EventHandler* event_handler,
const AudioParameters& params,
const std::string& output_device_id,
- const std::string& input_device_id,
SyncReader* sync_reader) {
DCHECK(audio_manager);
DCHECK(sync_reader);
@@ -83,8 +69,7 @@ scoped_refptr<AudioOutputController> AudioOutputController::Create(
return NULL;
scoped_refptr<AudioOutputController> controller(new AudioOutputController(
- audio_manager, event_handler, params, output_device_id, input_device_id,
- sync_reader));
+ audio_manager, event_handler, params, output_device_id, sync_reader));
controller->message_loop_->PostTask(FROM_HERE, base::Bind(
&AudioOutputController::DoCreate, controller, false));
return controller;
@@ -143,8 +128,7 @@ void AudioOutputController::DoCreate(bool is_for_device_change) {
stream_ = diverting_to_stream_ ?
diverting_to_stream_ :
- audio_manager_->MakeAudioOutputStreamProxy(params_, output_device_id_,
- input_device_id_);
+ audio_manager_->MakeAudioOutputStreamProxy(params_, output_device_id_);
if (!stream_) {
state_ = kError;
handler_->OnError();
@@ -188,18 +172,6 @@ void AudioOutputController::DoPlay() {
state_ = kPlaying;
-#if defined(AUDIO_POWER_MONITORING)
- power_monitor_.Reset();
- power_poll_callback_.Reset(
- base::Bind(&AudioOutputController::ReportPowerMeasurementPeriodically,
- this));
- // Run the callback to send an initial notification that we're starting in
- // silence, and to schedule periodic callbacks.
- power_poll_callback_.callback().Run();
-#endif
-
- on_more_io_data_called_ = 0;
- AllowEntryToOnMoreIOData();
stream_->Start(this);
// For UMA tracking purposes, start the wedge detection timer. This allows us
@@ -221,28 +193,17 @@ void AudioOutputController::DoPlay() {
handler_->OnPlaying();
}
-#if defined(AUDIO_POWER_MONITORING)
-void AudioOutputController::ReportPowerMeasurementPeriodically() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- const std::pair<float, bool>& reading =
- power_monitor_.ReadCurrentPowerAndClip();
- handler_->OnPowerMeasured(reading.first, reading.second);
- message_loop_->PostDelayedTask(
- FROM_HERE, power_poll_callback_.callback(),
- TimeDelta::FromSeconds(1) / kPowerMeasurementsPerSecond);
-}
-#endif
-
void AudioOutputController::StopStream() {
DCHECK(message_loop_->BelongsToCurrentThread());
if (state_ == kPlaying) {
wedge_timer_.reset();
stream_->Stop();
- DisallowEntryToOnMoreIOData();
#if defined(AUDIO_POWER_MONITORING)
- power_poll_callback_.Cancel();
+ // A stopped stream is silent, and power_montior_.Scan() is no longer being
+ // called; so we must reset the power monitor.
+ power_monitor_.Reset();
#endif
state_ = kPaused;
@@ -264,11 +225,6 @@ void AudioOutputController::DoPause() {
// a better way to know when it should exit PPB_Audio_Shared::Run().
sync_reader_->UpdatePendingBytes(-1);
-#if defined(AUDIO_POWER_MONITORING)
- // Paused means silence follows.
- handler_->OnPowerMeasured(AudioPowerMonitor::zero_power(), false);
-#endif
-
handler_->OnPaused();
}
@@ -335,14 +291,7 @@ void AudioOutputController::DoReportError() {
int AudioOutputController::OnMoreData(AudioBus* dest,
AudioBuffersState buffers_state) {
- return OnMoreIOData(NULL, dest, buffers_state);
-}
-
-int AudioOutputController::OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) {
- DisallowEntryToOnMoreIOData();
- TRACE_EVENT0("audio", "AudioOutputController::OnMoreIOData");
+ TRACE_EVENT0("audio", "AudioOutputController::OnMoreData");
// Indicate that we haven't wedged (at least not indefinitely, WedgeCheck()
// may have already fired if OnMoreIOData() took an abnormal amount of time).
@@ -351,7 +300,7 @@ int AudioOutputController::OnMoreIOData(AudioBus* source,
if (base::AtomicRefCountIsZero(&on_more_io_data_called_))
base::AtomicRefCountInc(&on_more_io_data_called_);
- sync_reader_->Read(source, dest);
+ sync_reader_->Read(dest);
const int frames = dest->frames();
sync_reader_->UpdatePendingBytes(
@@ -361,7 +310,6 @@ int AudioOutputController::OnMoreIOData(AudioBus* source,
power_monitor_.Scan(*dest, frames);
#endif
- AllowEntryToOnMoreIOData();
return frames;
}
@@ -463,14 +411,13 @@ void AudioOutputController::DoStopDiverting() {
DCHECK(!diverting_to_stream_);
}
-void AudioOutputController::AllowEntryToOnMoreIOData() {
- DCHECK(base::AtomicRefCountIsZero(&num_allowed_io_));
- base::AtomicRefCountInc(&num_allowed_io_);
-}
-
-void AudioOutputController::DisallowEntryToOnMoreIOData() {
- const bool is_zero = !base::AtomicRefCountDec(&num_allowed_io_);
- DCHECK(is_zero);
+std::pair<float, bool> AudioOutputController::ReadCurrentPowerAndClip() {
+#if defined(AUDIO_POWER_MONITORING)
+ return power_monitor_.ReadCurrentPowerAndClip();
+#else
+ NOTREACHED();
+ return std::make_pair(AudioPowerMonitor::zero_power(), false);
+#endif
}
void AudioOutputController::WedgeCheck() {
@@ -478,15 +425,8 @@ void AudioOutputController::WedgeCheck() {
// If we should be playing and we haven't, that's a wedge.
if (state_ == kPlaying) {
- const bool playback_success =
- base::AtomicRefCountIsOne(&on_more_io_data_called_);
-
- UMA_HISTOGRAM_BOOLEAN(
- "Media.AudioOutputControllerPlaybackStartupSuccess", playback_success);
-
- // Let the AudioManager try and fix it.
- if (!playback_success)
- audio_manager_->FixWedgedAudio();
+ UMA_HISTOGRAM_BOOLEAN("Media.AudioOutputControllerPlaybackStartupSuccess",
+ base::AtomicRefCountIsOne(&on_more_io_data_called_));
}
}
diff --git a/chromium/media/audio/audio_output_controller.h b/chromium/media/audio/audio_output_controller.h
index d16ce9e79b6..0b02ee2bbb2 100644
--- a/chromium/media/audio/audio_output_controller.h
+++ b/chromium/media/audio/audio_output_controller.h
@@ -7,7 +7,6 @@
#include "base/atomic_ref_count.h"
#include "base/callback.h"
-#include "base/cancelable_callback.h"
#include "base/memory/ref_counted.h"
#include "base/timer/timer.h"
#include "media/audio/audio_io.h"
@@ -70,7 +69,6 @@ class MEDIA_EXPORT AudioOutputController
public:
virtual void OnCreated() = 0;
virtual void OnPlaying() = 0;
- virtual void OnPowerMeasured(float power_dbfs, bool clipped) = 0;
virtual void OnPaused() = 0;
virtual void OnError() = 0;
virtual void OnDeviceChange(int new_buffer_size, int new_sample_rate) = 0;
@@ -93,9 +91,8 @@ class MEDIA_EXPORT AudioOutputController
virtual void UpdatePendingBytes(uint32 bytes) = 0;
// Attempts to completely fill |dest|, zeroing |dest| if the request can not
- // be fulfilled (due to timeout). |source| may optionally be provided for
- // input data.
- virtual void Read(const AudioBus* source, AudioBus* dest) = 0;
+ // be fulfilled (due to timeout).
+ virtual void Read(AudioBus* dest) = 0;
// Close this synchronous reader.
virtual void Close() = 0;
@@ -107,13 +104,11 @@ class MEDIA_EXPORT AudioOutputController
// OnCreated() call from the same audio manager thread. |audio_manager| must
// outlive AudioOutputController.
// The |output_device_id| can be either empty (default device) or specify a
- // specific hardware device for audio output. The |input_device_id| is
- // used only for unified audio when opening up input and output at the same
- // time (controlled by |params.input_channel_count()|).
+ // specific hardware device for audio output.
static scoped_refptr<AudioOutputController> Create(
AudioManager* audio_manager, EventHandler* event_handler,
const AudioParameters& params, const std::string& output_device_id,
- const std::string& input_device_id, SyncReader* sync_reader);
+ SyncReader* sync_reader);
// Methods to control playback of the stream.
@@ -155,9 +150,6 @@ class MEDIA_EXPORT AudioOutputController
// AudioSourceCallback implementation.
virtual int OnMoreData(AudioBus* dest,
AudioBuffersState buffers_state) OVERRIDE;
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) OVERRIDE;
virtual void OnError(AudioOutputStream* stream) OVERRIDE;
// AudioDeviceListener implementation. When called AudioOutputController will
@@ -171,6 +163,10 @@ class MEDIA_EXPORT AudioOutputController
virtual void StartDiverting(AudioOutputStream* to_stream) OVERRIDE;
virtual void StopDiverting() OVERRIDE;
+ // Accessor for AudioPowerMonitor::ReadCurrentPowerAndClip(). See comments in
+ // audio_power_monitor.h for usage. This may be called on any thread.
+ std::pair<float, bool> ReadCurrentPowerAndClip();
+
protected:
// Internal state of the source.
enum State {
@@ -186,14 +182,9 @@ class MEDIA_EXPORT AudioOutputController
virtual ~AudioOutputController();
private:
- // We are polling sync reader if data became available.
- static const int kPollNumAttempts;
- static const int kPollPauseInMilliseconds;
-
AudioOutputController(AudioManager* audio_manager, EventHandler* handler,
const AudioParameters& params,
const std::string& output_device_id,
- const std::string& input_device_id,
SyncReader* sync_reader);
// The following methods are executed on the audio manager thread.
@@ -208,22 +199,13 @@ class MEDIA_EXPORT AudioOutputController
void DoStartDiverting(AudioOutputStream* to_stream);
void DoStopDiverting();
- // Calls EventHandler::OnPowerMeasured() with the current power level and then
- // schedules itself to be called again later.
- void ReportPowerMeasurementPeriodically();
-
// Helper method that stops the physical stream.
void StopStream();
// Helper method that stops, closes, and NULLs |*stream_|.
void DoStopCloseAndClearStream();
- // Sanity-check that entry/exit to OnMoreIOData() by the hardware audio thread
- // happens only between AudioOutputStream::Start() and Stop().
- void AllowEntryToOnMoreIOData();
- void DisallowEntryToOnMoreIOData();
-
- // Checks if a stream was started successfully but never calls OnMoreIOData().
+ // Checks if a stream was started successfully but never calls OnMoreData().
void WedgeCheck();
AudioManager* const audio_manager_;
@@ -234,9 +216,6 @@ class MEDIA_EXPORT AudioOutputController
// default output device.
std::string output_device_id_;
- // Used by the unified IO to open the correct input device.
- const std::string input_device_id_;
-
AudioOutputStream* stream_;
// When non-NULL, audio is being diverted to this stream.
@@ -250,25 +229,15 @@ class MEDIA_EXPORT AudioOutputController
// is not required for reading on the audio manager thread.
State state_;
- // Binary semaphore, used to ensure that only one thread enters the
- // OnMoreIOData() method, and only when it is valid to do so. This is for
- // sanity-checking the behavior of platform implementations of
- // AudioOutputStream. In other words, multiple contention is not expected,
- // nor in the design here.
- base::AtomicRefCount num_allowed_io_;
-
// SyncReader is used only in low latency mode for synchronous reading.
SyncReader* const sync_reader_;
// The message loop of audio manager thread that this object runs on.
- const scoped_refptr<base::MessageLoopProxy> message_loop_;
+ const scoped_refptr<base::SingleThreadTaskRunner> message_loop_;
#if defined(AUDIO_POWER_MONITORING)
- // Scans audio samples from OnMoreIOData() as input to compute power levels.
+ // Scans audio samples from OnMoreData() as input to compute power levels.
AudioPowerMonitor power_monitor_;
-
- // Periodic callback to report power levels during playback.
- base::CancelableClosure power_poll_callback_;
#endif
// Flags when we've asked for a stream to start but it never did.
diff --git a/chromium/media/audio/audio_output_controller_unittest.cc b/chromium/media/audio/audio_output_controller_unittest.cc
index 457265ec970..125763c9bfe 100644
--- a/chromium/media/audio/audio_output_controller_unittest.cc
+++ b/chromium/media/audio/audio_output_controller_unittest.cc
@@ -39,7 +39,6 @@ class MockAudioOutputControllerEventHandler
MOCK_METHOD0(OnCreated, void());
MOCK_METHOD0(OnPlaying, void());
- MOCK_METHOD2(OnPowerMeasured, void(float power_dbfs, bool clipped));
MOCK_METHOD0(OnPaused, void());
MOCK_METHOD0(OnError, void());
MOCK_METHOD2(OnDeviceChange, void(int new_buffer_size, int new_sample_rate));
@@ -54,7 +53,7 @@ class MockAudioOutputControllerSyncReader
MockAudioOutputControllerSyncReader() {}
MOCK_METHOD1(UpdatePendingBytes, void(uint32 bytes));
- MOCK_METHOD2(Read, void(const AudioBus* source, AudioBus* dest));
+ MOCK_METHOD1(Read, void(AudioBus* dest));
MOCK_METHOD0(Close, void());
private:
@@ -84,10 +83,10 @@ ACTION_P(SignalEvent, event) {
static const float kBufferNonZeroData = 1.0f;
ACTION(PopulateBuffer) {
- arg1->Zero();
+ arg0->Zero();
// Note: To confirm the buffer will be populated in these tests, it's
// sufficient that only the first float in channel 0 is set to the value.
- arg1->channel(0)[0] = kBufferNonZeroData;
+ arg0->channel(0)[0] = kBufferNonZeroData;
}
class AudioOutputControllerTest : public testing::Test {
@@ -121,7 +120,7 @@ class AudioOutputControllerTest : public testing::Test {
controller_ = AudioOutputController::Create(
audio_manager_.get(), &mock_event_handler_, params_, std::string(),
- std::string(), &mock_sync_reader_);
+ &mock_sync_reader_);
if (controller_.get())
controller_->SetVolume(kTestVolume);
@@ -129,20 +128,15 @@ class AudioOutputControllerTest : public testing::Test {
}
void Play() {
- // Expect the event handler to receive one OnPlaying() call and one or more
- // OnPowerMeasured() calls.
+ // Expect the event handler to receive one OnPlaying() call.
EXPECT_CALL(mock_event_handler_, OnPlaying())
.WillOnce(SignalEvent(&play_event_));
-#if defined(AUDIO_POWER_MONITORING)
- EXPECT_CALL(mock_event_handler_, OnPowerMeasured(_, false))
- .Times(AtLeast(1));
-#endif
// During playback, the mock pretends to provide audio data rendered and
// sent from the render process.
EXPECT_CALL(mock_sync_reader_, UpdatePendingBytes(_))
.Times(AtLeast(1));
- EXPECT_CALL(mock_sync_reader_, Read(_, _))
+ EXPECT_CALL(mock_sync_reader_, Read(_))
.WillRepeatedly(DoAll(PopulateBuffer(),
SignalEvent(&read_event_)));
controller_->Play();
@@ -166,7 +160,7 @@ class AudioOutputControllerTest : public testing::Test {
// Simulate a device change event to AudioOutputController from the
// AudioManager.
- audio_manager_->GetMessageLoop()->PostTask(
+ audio_manager_->GetTaskRunner()->PostTask(
FROM_HERE,
base::Bind(&AudioOutputController::OnDeviceChange, controller_));
}
diff --git a/chromium/media/audio/audio_output_device.cc b/chromium/media/audio/audio_output_device.cc
index 1f9efc185bd..5cc602197eb 100644
--- a/chromium/media/audio/audio_output_device.cc
+++ b/chromium/media/audio/audio_output_device.cc
@@ -6,7 +6,6 @@
#include "base/basictypes.h"
#include "base/debug/trace_event.h"
-#include "base/message_loop/message_loop.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
#include "media/audio/audio_output_controller.h"
@@ -33,15 +32,14 @@ class AudioOutputDevice::AudioThreadCallback
private:
AudioRendererSink::RenderCallback* render_callback_;
- scoped_ptr<AudioBus> input_bus_;
scoped_ptr<AudioBus> output_bus_;
DISALLOW_COPY_AND_ASSIGN(AudioThreadCallback);
};
AudioOutputDevice::AudioOutputDevice(
scoped_ptr<AudioOutputIPC> ipc,
- const scoped_refptr<base::MessageLoopProxy>& io_loop)
- : ScopedLoopObserver(io_loop),
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
+ : ScopedTaskRunnerObserver(io_task_runner),
callback_(NULL),
ipc_(ipc.Pass()),
state_(IDLE),
@@ -58,10 +56,10 @@ AudioOutputDevice::AudioOutputDevice(
COMPILE_ASSERT(PAUSED < PLAYING, invalid_enum_value_assignment_3);
}
-void AudioOutputDevice::InitializeUnifiedStream(const AudioParameters& params,
+void AudioOutputDevice::InitializeWithSessionId(const AudioParameters& params,
RenderCallback* callback,
int session_id) {
- DCHECK(!callback_) << "Calling InitializeUnifiedStream() twice?";
+ DCHECK(!callback_) << "Calling InitializeWithSessionId() twice?";
DCHECK(params.IsValid());
audio_parameters_ = params;
callback_ = callback;
@@ -70,7 +68,7 @@ void AudioOutputDevice::InitializeUnifiedStream(const AudioParameters& params,
void AudioOutputDevice::Initialize(const AudioParameters& params,
RenderCallback* callback) {
- InitializeUnifiedStream(params, callback, 0);
+ InitializeWithSessionId(params, callback, 0);
}
AudioOutputDevice::~AudioOutputDevice() {
@@ -81,7 +79,7 @@ AudioOutputDevice::~AudioOutputDevice() {
void AudioOutputDevice::Start() {
DCHECK(callback_) << "Initialize hasn't been called";
- message_loop()->PostTask(FROM_HERE,
+ task_runner()->PostTask(FROM_HERE,
base::Bind(&AudioOutputDevice::CreateStreamOnIOThread, this,
audio_parameters_));
}
@@ -93,17 +91,17 @@ void AudioOutputDevice::Stop() {
stopping_hack_ = true;
}
- message_loop()->PostTask(FROM_HERE,
+ task_runner()->PostTask(FROM_HERE,
base::Bind(&AudioOutputDevice::ShutDownOnIOThread, this));
}
void AudioOutputDevice::Play() {
- message_loop()->PostTask(FROM_HERE,
+ task_runner()->PostTask(FROM_HERE,
base::Bind(&AudioOutputDevice::PlayOnIOThread, this));
}
void AudioOutputDevice::Pause() {
- message_loop()->PostTask(FROM_HERE,
+ task_runner()->PostTask(FROM_HERE,
base::Bind(&AudioOutputDevice::PauseOnIOThread, this));
}
@@ -111,7 +109,7 @@ bool AudioOutputDevice::SetVolume(double volume) {
if (volume < 0 || volume > 1.0)
return false;
- if (!message_loop()->PostTask(FROM_HERE,
+ if (!task_runner()->PostTask(FROM_HERE,
base::Bind(&AudioOutputDevice::SetVolumeOnIOThread, this, volume))) {
return false;
}
@@ -120,7 +118,7 @@ bool AudioOutputDevice::SetVolume(double volume) {
}
void AudioOutputDevice::CreateStreamOnIOThread(const AudioParameters& params) {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
if (state_ == IDLE) {
state_ = CREATING_STREAM;
ipc_->CreateStream(this, params, session_id_);
@@ -128,7 +126,7 @@ void AudioOutputDevice::CreateStreamOnIOThread(const AudioParameters& params) {
}
void AudioOutputDevice::PlayOnIOThread() {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
if (state_ == PAUSED) {
ipc_->PlayStream();
state_ = PLAYING;
@@ -139,7 +137,7 @@ void AudioOutputDevice::PlayOnIOThread() {
}
void AudioOutputDevice::PauseOnIOThread() {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
if (state_ == PLAYING) {
ipc_->PauseStream();
state_ = PAUSED;
@@ -148,7 +146,7 @@ void AudioOutputDevice::PauseOnIOThread() {
}
void AudioOutputDevice::ShutDownOnIOThread() {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
// Close the stream, if we haven't already.
if (state_ >= CREATING_STREAM) {
@@ -172,13 +170,13 @@ void AudioOutputDevice::ShutDownOnIOThread() {
}
void AudioOutputDevice::SetVolumeOnIOThread(double volume) {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
if (state_ >= CREATING_STREAM)
ipc_->SetVolume(volume);
}
void AudioOutputDevice::OnStateChanged(AudioOutputIPCDelegate::State state) {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
// Do nothing if the stream has been closed.
if (state_ < CREATING_STREAM)
@@ -211,7 +209,7 @@ void AudioOutputDevice::OnStreamCreated(
base::SharedMemoryHandle handle,
base::SyncSocket::Handle socket_handle,
int length) {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
#if defined(OS_WIN)
DCHECK(handle);
DCHECK(socket_handle);
@@ -254,7 +252,7 @@ void AudioOutputDevice::OnStreamCreated(
}
void AudioOutputDevice::OnIPCClosed() {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
state_ = IPC_CLOSED;
ipc_.reset();
}
@@ -280,26 +278,10 @@ AudioOutputDevice::AudioThreadCallback::~AudioThreadCallback() {
void AudioOutputDevice::AudioThreadCallback::MapSharedMemory() {
CHECK_EQ(total_segments_, 1);
CHECK(shared_memory_.Map(memory_length_));
-
- // Calculate output and input memory size.
- int output_memory_size = AudioBus::CalculateMemorySize(audio_parameters_);
- int input_channels = audio_parameters_.input_channels();
- int frames = audio_parameters_.frames_per_buffer();
- int input_memory_size = AudioBus::CalculateMemorySize(input_channels, frames);
-
- int io_size = output_memory_size + input_memory_size;
-
- DCHECK_EQ(memory_length_, io_size);
+ DCHECK_EQ(memory_length_, AudioBus::CalculateMemorySize(audio_parameters_));
output_bus_ =
AudioBus::WrapMemory(audio_parameters_, shared_memory_.memory());
-
- if (input_channels > 0) {
- // The input data is after the output data.
- char* input_data =
- static_cast<char*>(shared_memory_.memory()) + output_memory_size;
- input_bus_ = AudioBus::WrapMemory(input_channels, frames, input_data);
- }
}
// Called whenever we receive notifications about pending data.
@@ -316,13 +298,7 @@ void AudioOutputDevice::AudioThreadCallback::Process(int pending_data) {
// Update the audio-delay measurement then ask client to render audio. Since
// |output_bus_| is wrapping the shared memory the Render() call is writing
// directly into the shared memory.
- int input_channels = audio_parameters_.input_channels();
- if (input_bus_ && input_channels > 0) {
- render_callback_->RenderIO(
- input_bus_.get(), output_bus_.get(), audio_delay_milliseconds);
- } else {
- render_callback_->Render(output_bus_.get(), audio_delay_milliseconds);
- }
+ render_callback_->Render(output_bus_.get(), audio_delay_milliseconds);
}
} // namespace media.
diff --git a/chromium/media/audio/audio_output_device.h b/chromium/media/audio/audio_output_device.h
index 66f78972f46..8449e9faa14 100644
--- a/chromium/media/audio/audio_output_device.h
+++ b/chromium/media/audio/audio_output_device.h
@@ -62,11 +62,10 @@
#include "base/bind.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/shared_memory.h"
-#include "base/message_loop/message_loop.h"
#include "media/audio/audio_device_thread.h"
#include "media/audio/audio_output_ipc.h"
#include "media/audio/audio_parameters.h"
-#include "media/audio/scoped_loop_observer.h"
+#include "media/audio/scoped_task_runner_observer.h"
#include "media/base/audio_renderer_sink.h"
#include "media/base/media_export.h"
@@ -75,21 +74,19 @@ namespace media {
class MEDIA_EXPORT AudioOutputDevice
: NON_EXPORTED_BASE(public AudioRendererSink),
NON_EXPORTED_BASE(public AudioOutputIPCDelegate),
- NON_EXPORTED_BASE(public ScopedLoopObserver) {
+ NON_EXPORTED_BASE(public ScopedTaskRunnerObserver) {
public:
// NOTE: Clients must call Initialize() before using.
- AudioOutputDevice(scoped_ptr<AudioOutputIPC> ipc,
- const scoped_refptr<base::MessageLoopProxy>& io_loop);
+ AudioOutputDevice(
+ scoped_ptr<AudioOutputIPC> ipc,
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
// Initialize function for clients wishing to have unified input and
// output, |params| may specify |input_channels| > 0, representing a
// number of input channels which will be at the same sample-rate
// and buffer-size as the output as specified in |params|. |session_id| is
// used for the browser to select the correct input device.
- // In this case, the callback's RenderIO() method will be called instead
- // of Render(), providing the synchronized input data at the same time as
- // when new output data is to be rendered.
- void InitializeUnifiedStream(const AudioParameters& params,
+ void InitializeWithSessionId(const AudioParameters& params,
RenderCallback* callback,
int session_id);
diff --git a/chromium/media/audio/audio_output_device_unittest.cc b/chromium/media/audio/audio_output_device_unittest.cc
index 7aca2627745..92d94250b4a 100644
--- a/chromium/media/audio/audio_output_device_unittest.cc
+++ b/chromium/media/audio/audio_output_device_unittest.cc
@@ -37,9 +37,6 @@ class MockRenderCallback : public AudioRendererSink::RenderCallback {
virtual ~MockRenderCallback() {}
MOCK_METHOD2(Render, int(AudioBus* dest, int audio_delay_milliseconds));
- MOCK_METHOD3(RenderIO, void(AudioBus* source,
- AudioBus* dest,
- int audio_delay_milliseconds));
MOCK_METHOD0(OnRenderError, void());
};
@@ -114,8 +111,6 @@ class AudioOutputDeviceTest
private:
int CalculateMemorySize();
- const bool synchronized_io_;
- const int input_channels_;
SharedMemory shared_memory_;
CancelableSyncSocket browser_socket_;
CancelableSyncSocket renderer_socket_;
@@ -124,24 +119,14 @@ class AudioOutputDeviceTest
};
int AudioOutputDeviceTest::CalculateMemorySize() {
- // Calculate output and input memory size.
- int output_memory_size =
- AudioBus::CalculateMemorySize(default_audio_parameters_);
-
- int frames = default_audio_parameters_.frames_per_buffer();
- int input_memory_size =
- AudioBus::CalculateMemorySize(input_channels_, frames);
-
- return output_memory_size + input_memory_size;
+ // Calculate output memory size.
+ return AudioBus::CalculateMemorySize(default_audio_parameters_);
}
-AudioOutputDeviceTest::AudioOutputDeviceTest()
- : synchronized_io_(GetParam()),
- input_channels_(synchronized_io_ ? 2 : 0) {
+AudioOutputDeviceTest::AudioOutputDeviceTest() {
default_audio_parameters_.Reset(
AudioParameters::AUDIO_PCM_LINEAR,
- CHANNEL_LAYOUT_STEREO, 2, input_channels_,
- 48000, 16, 1024);
+ CHANNEL_LAYOUT_STEREO, 2, 0, 48000, 16, 1024);
audio_output_ipc_ = new MockAudioOutputIPC();
audio_device_ = new AudioOutputDevice(
@@ -207,18 +192,11 @@ void AudioOutputDeviceTest::ExpectRenderCallback() {
// writing the interleaved audio data into the shared memory section.
// So, for the sake of this test, we consider the call to Render a sign
// of success and quit the loop.
- if (synchronized_io_) {
- // For synchronized I/O, we expect RenderIO().
- EXPECT_CALL(callback_, RenderIO(_, _, _))
- .WillOnce(QuitLoop(io_loop_.message_loop_proxy()));
- } else {
- // For output only we expect Render().
- const int kNumberOfFramesToProcess = 0;
- EXPECT_CALL(callback_, Render(_, _))
- .WillOnce(DoAll(
- QuitLoop(io_loop_.message_loop_proxy()),
- Return(kNumberOfFramesToProcess)));
- }
+ const int kNumberOfFramesToProcess = 0;
+ EXPECT_CALL(callback_, Render(_, _))
+ .WillOnce(DoAll(
+ QuitLoop(io_loop_.message_loop_proxy()),
+ Return(kNumberOfFramesToProcess)));
}
void AudioOutputDeviceTest::WaitUntilRenderCallback() {
@@ -280,6 +258,5 @@ TEST_P(AudioOutputDeviceTest, CreateStream) {
}
INSTANTIATE_TEST_CASE_P(Render, AudioOutputDeviceTest, Values(false));
-INSTANTIATE_TEST_CASE_P(RenderIO, AudioOutputDeviceTest, Values(true));
} // namespace media.
diff --git a/chromium/media/audio/audio_output_dispatcher.cc b/chromium/media/audio/audio_output_dispatcher.cc
index 89912c07dce..7f3dd10e39a 100644
--- a/chromium/media/audio/audio_output_dispatcher.cc
+++ b/chromium/media/audio/audio_output_dispatcher.cc
@@ -4,27 +4,25 @@
#include "media/audio/audio_output_dispatcher.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
namespace media {
AudioOutputDispatcher::AudioOutputDispatcher(
AudioManager* audio_manager,
const AudioParameters& params,
- const std::string& output_device_id,
- const std::string& input_device_id)
+ const std::string& device_id)
: audio_manager_(audio_manager),
- message_loop_(audio_manager->GetMessageLoop()),
+ task_runner_(audio_manager->GetTaskRunner()),
params_(params),
- output_device_id_(output_device_id),
- input_device_id_(input_device_id) {
+ device_id_(device_id) {
// We expect to be instantiated on the audio thread. Otherwise the
- // message_loop_ member will point to the wrong message loop!
- DCHECK(audio_manager->GetMessageLoop()->BelongsToCurrentThread());
+ // |task_runner_| member will point to the wrong message loop!
+ DCHECK(audio_manager->GetTaskRunner()->BelongsToCurrentThread());
}
AudioOutputDispatcher::~AudioOutputDispatcher() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
}
} // namespace media
diff --git a/chromium/media/audio/audio_output_dispatcher.h b/chromium/media/audio/audio_output_dispatcher.h
index d707aff14b6..079cba0ed74 100644
--- a/chromium/media/audio/audio_output_dispatcher.h
+++ b/chromium/media/audio/audio_output_dispatcher.h
@@ -26,7 +26,7 @@
#include "media/audio/audio_parameters.h"
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
@@ -38,8 +38,7 @@ class MEDIA_EXPORT AudioOutputDispatcher
public:
AudioOutputDispatcher(AudioManager* audio_manager,
const AudioParameters& params,
- const std::string& output_device_id,
- const std::string& input_device_id);
+ const std::string& device_id);
// Called by AudioOutputProxy to open the stream.
// Returns false, if it fails to open it.
@@ -66,15 +65,7 @@ class MEDIA_EXPORT AudioOutputDispatcher
// Called on the audio thread when the AudioManager is shutting down.
virtual void Shutdown() = 0;
- // Called by the AudioManager to restart streams when a wedge is detected. A
- // wedge means the OS failed to request any audio after StartStream(). When a
- // wedge is detected all streams across all dispatchers must be closed. After
- // all streams are closed, streams are restarted. See http://crbug.com/160920
- virtual void CloseStreamsForWedgeFix() = 0;
- virtual void RestartStreamsForWedgeFix() = 0;
-
- // Accessor to the input device id used by unified IO.
- const std::string& input_device_id() const { return input_device_id_; }
+ const std::string& device_id() const { return device_id_; }
protected:
friend class base::RefCountedThreadSafe<AudioOutputDispatcher>;
@@ -83,10 +74,9 @@ class MEDIA_EXPORT AudioOutputDispatcher
// A no-reference-held pointer (we don't want circular references) back to the
// AudioManager that owns this object.
AudioManager* audio_manager_;
- const scoped_refptr<base::MessageLoopProxy> message_loop_;
+ const scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
const AudioParameters params_;
- std::string output_device_id_;
- const std::string input_device_id_;
+ std::string device_id_;
private:
DISALLOW_COPY_AND_ASSIGN(AudioOutputDispatcher);
diff --git a/chromium/media/audio/audio_output_dispatcher_impl.cc b/chromium/media/audio/audio_output_dispatcher_impl.cc
index 5118bef71e9..0cb3db85cad 100644
--- a/chromium/media/audio/audio_output_dispatcher_impl.cc
+++ b/chromium/media/audio/audio_output_dispatcher_impl.cc
@@ -8,7 +8,7 @@
#include "base/bind.h"
#include "base/compiler_specific.h"
-#include "base/message_loop/message_loop.h"
+#include "base/single_thread_task_runner.h"
#include "base/time/time.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_output_proxy.h"
@@ -19,12 +19,10 @@ AudioOutputDispatcherImpl::AudioOutputDispatcherImpl(
AudioManager* audio_manager,
const AudioParameters& params,
const std::string& output_device_id,
- const std::string& input_device_id,
const base::TimeDelta& close_delay)
: AudioOutputDispatcher(audio_manager,
params,
- output_device_id,
- input_device_id),
+ output_device_id),
idle_proxies_(0),
close_timer_(FROM_HERE,
close_delay,
@@ -41,7 +39,7 @@ AudioOutputDispatcherImpl::~AudioOutputDispatcherImpl() {
}
bool AudioOutputDispatcherImpl::OpenStream() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
// Ensure that there is at least one open stream.
if (idle_streams_.empty() && !CreateAndOpenStream())
@@ -55,7 +53,7 @@ bool AudioOutputDispatcherImpl::OpenStream() {
bool AudioOutputDispatcherImpl::StartStream(
AudioOutputStream::AudioSourceCallback* callback,
AudioOutputProxy* stream_proxy) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(proxy_to_physical_map_.find(stream_proxy) ==
proxy_to_physical_map_.end());
@@ -82,7 +80,7 @@ bool AudioOutputDispatcherImpl::StartStream(
}
void AudioOutputDispatcherImpl::StopStream(AudioOutputProxy* stream_proxy) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
AudioStreamMap::iterator it = proxy_to_physical_map_.find(stream_proxy);
DCHECK(it != proxy_to_physical_map_.end());
@@ -99,7 +97,7 @@ void AudioOutputDispatcherImpl::StopStream(AudioOutputProxy* stream_proxy) {
void AudioOutputDispatcherImpl::StreamVolumeSet(AudioOutputProxy* stream_proxy,
double volume) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
AudioStreamMap::iterator it = proxy_to_physical_map_.find(stream_proxy);
if (it != proxy_to_physical_map_.end()) {
AudioOutputStream* physical_stream = it->second;
@@ -109,7 +107,7 @@ void AudioOutputDispatcherImpl::StreamVolumeSet(AudioOutputProxy* stream_proxy,
}
void AudioOutputDispatcherImpl::CloseStream(AudioOutputProxy* stream_proxy) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_GT(idle_proxies_, 0u);
--idle_proxies_;
@@ -121,17 +119,21 @@ void AudioOutputDispatcherImpl::CloseStream(AudioOutputProxy* stream_proxy) {
}
void AudioOutputDispatcherImpl::Shutdown() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
// Close all idle streams immediately. The |close_timer_| will handle
// invalidating any outstanding tasks upon its destruction.
CloseAllIdleStreams();
+
+ // No AudioOutputProxy objects should hold a reference to us when we get
+ // to this stage.
+ DCHECK(HasOneRef()) << "Only the AudioManager should hold a reference";
}
bool AudioOutputDispatcherImpl::CreateAndOpenStream() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
AudioOutputStream* stream = audio_manager_->MakeAudioOutputStream(
- params_, output_device_id_, input_device_id_);
+ params_, device_id_);
if (!stream)
return false;
@@ -143,19 +145,19 @@ bool AudioOutputDispatcherImpl::CreateAndOpenStream() {
const int stream_id = audio_stream_id_++;
audio_stream_ids_[stream] = stream_id;
audio_log_->OnCreated(
- stream_id, params_, input_device_id_, output_device_id_);
+ stream_id, params_, device_id_);
idle_streams_.push_back(stream);
return true;
}
void AudioOutputDispatcherImpl::CloseAllIdleStreams() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
CloseIdleStreams(0);
}
void AudioOutputDispatcherImpl::CloseIdleStreams(size_t keep_alive) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
if (idle_streams_.size() <= keep_alive)
return;
for (size_t i = keep_alive; i < idle_streams_.size(); ++i) {
@@ -170,17 +172,4 @@ void AudioOutputDispatcherImpl::CloseIdleStreams(size_t keep_alive) {
idle_streams_.erase(idle_streams_.begin() + keep_alive, idle_streams_.end());
}
-void AudioOutputDispatcherImpl::CloseStreamsForWedgeFix() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- CloseAllIdleStreams();
-}
-
-void AudioOutputDispatcherImpl::RestartStreamsForWedgeFix() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- // Should only be called when the dispatcher is used with fake streams which
- // don't need to be shutdown or restarted.
- CHECK_EQ(params_.format(), AudioParameters::AUDIO_FAKE);
-}
-
} // namespace media
diff --git a/chromium/media/audio/audio_output_dispatcher_impl.h b/chromium/media/audio/audio_output_dispatcher_impl.h
index 037e11466f1..52d647a3be6 100644
--- a/chromium/media/audio/audio_output_dispatcher_impl.h
+++ b/chromium/media/audio/audio_output_dispatcher_impl.h
@@ -36,7 +36,6 @@ class MEDIA_EXPORT AudioOutputDispatcherImpl : public AudioOutputDispatcher {
AudioOutputDispatcherImpl(AudioManager* audio_manager,
const AudioParameters& params,
const std::string& output_device_id,
- const std::string& input_device_id,
const base::TimeDelta& close_delay);
// Opens a new physical stream if there are no pending streams in
@@ -62,9 +61,6 @@ class MEDIA_EXPORT AudioOutputDispatcherImpl : public AudioOutputDispatcher {
virtual void Shutdown() OVERRIDE;
- virtual void CloseStreamsForWedgeFix() OVERRIDE;
- virtual void RestartStreamsForWedgeFix() OVERRIDE;
-
private:
friend class base::RefCountedThreadSafe<AudioOutputDispatcherImpl>;
virtual ~AudioOutputDispatcherImpl();
diff --git a/chromium/media/audio/audio_output_ipc.h b/chromium/media/audio/audio_output_ipc.h
index 3353735b085..f85d8e01953 100644
--- a/chromium/media/audio/audio_output_ipc.h
+++ b/chromium/media/audio/audio_output_ipc.h
@@ -23,7 +23,8 @@ class MEDIA_EXPORT AudioOutputIPCDelegate {
enum State {
kPlaying,
kPaused,
- kError
+ kError,
+ kStateLast = kError
};
// Called when state of an audio stream has changed.
diff --git a/chromium/media/audio/audio_output_proxy_unittest.cc b/chromium/media/audio/audio_output_proxy_unittest.cc
index cea098820aa..b8f23acaa92 100644
--- a/chromium/media/audio/audio_output_proxy_unittest.cc
+++ b/chromium/media/audio/audio_output_proxy_unittest.cc
@@ -5,7 +5,6 @@
#include <string>
#include "base/message_loop/message_loop.h"
-#include "base/message_loop/message_loop_proxy.h"
#include "base/run_loop.h"
#include "media/audio/audio_manager.h"
#include "media/audio/audio_manager_base.h"
@@ -94,27 +93,25 @@ class MockAudioManager : public AudioManagerBase {
MOCK_METHOD0(HasAudioOutputDevices, bool());
MOCK_METHOD0(HasAudioInputDevices, bool());
MOCK_METHOD0(GetAudioInputDeviceModel, base::string16());
- MOCK_METHOD3(MakeAudioOutputStream, AudioOutputStream*(
+ MOCK_METHOD2(MakeAudioOutputStream, AudioOutputStream*(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id));
- MOCK_METHOD3(MakeAudioOutputStreamProxy, AudioOutputStream*(
+ const std::string& device_id));
+ MOCK_METHOD2(MakeAudioOutputStreamProxy, AudioOutputStream*(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id));
+ const std::string& device_id));
MOCK_METHOD2(MakeAudioInputStream, AudioInputStream*(
const AudioParameters& params, const std::string& device_id));
MOCK_METHOD0(ShowAudioInputSettings, void());
- MOCK_METHOD0(GetMessageLoop, scoped_refptr<base::MessageLoopProxy>());
- MOCK_METHOD0(GetWorkerLoop, scoped_refptr<base::MessageLoopProxy>());
+ MOCK_METHOD0(GetTaskRunner, scoped_refptr<base::SingleThreadTaskRunner>());
+ MOCK_METHOD0(GetWorkerTaskRunner,
+ scoped_refptr<base::SingleThreadTaskRunner>());
MOCK_METHOD1(GetAudioInputDeviceNames, void(
media::AudioDeviceNames* device_name));
MOCK_METHOD1(MakeLinearOutputStream, AudioOutputStream*(
const AudioParameters& params));
- MOCK_METHOD3(MakeLowLatencyOutputStream, AudioOutputStream*(
- const AudioParameters& params, const std::string& device_id,
- const std::string& input_device_id));
+ MOCK_METHOD2(MakeLowLatencyOutputStream, AudioOutputStream*(
+ const AudioParameters& params, const std::string& device_id));
MOCK_METHOD2(MakeLinearInputStream, AudioInputStream*(
const AudioParameters& params, const std::string& device_id));
MOCK_METHOD2(MakeLowLatencyInputStream, AudioInputStream*(
@@ -132,10 +129,6 @@ class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
audio_bus->Zero();
return audio_bus->frames();
}
- int OnMoreIOData(AudioBus* source, AudioBus* dest,
- AudioBuffersState buffers_state) {
- return OnMoreData(dest, buffers_state);
- }
MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
};
@@ -146,9 +139,9 @@ namespace media {
class AudioOutputProxyTest : public testing::Test {
protected:
virtual void SetUp() {
- EXPECT_CALL(manager_, GetMessageLoop())
+ EXPECT_CALL(manager_, GetTaskRunner())
.WillRepeatedly(Return(message_loop_.message_loop_proxy()));
- EXPECT_CALL(manager_, GetWorkerLoop())
+ EXPECT_CALL(manager_, GetWorkerTaskRunner())
.WillRepeatedly(Return(message_loop_.message_loop_proxy()));
// Use a low sample rate and large buffer size when testing otherwise the
// FakeAudioOutputStream will keep the message loop busy indefinitely; i.e.,
@@ -168,7 +161,6 @@ class AudioOutputProxyTest : public testing::Test {
dispatcher_impl_ = new AudioOutputDispatcherImpl(&manager(),
params_,
std::string(),
- std::string(),
close_delay);
}
@@ -199,7 +191,7 @@ class AudioOutputProxyTest : public testing::Test {
void OpenAndClose(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -213,7 +205,7 @@ class AudioOutputProxyTest : public testing::Test {
void StartAndStop(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -236,7 +228,7 @@ class AudioOutputProxyTest : public testing::Test {
void CloseAfterStop(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -261,7 +253,7 @@ class AudioOutputProxyTest : public testing::Test {
void TwoStreams(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -280,7 +272,7 @@ class AudioOutputProxyTest : public testing::Test {
void OpenFailed(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(false));
@@ -297,7 +289,7 @@ class AudioOutputProxyTest : public testing::Test {
void CreateAndWait(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -314,7 +306,7 @@ class AudioOutputProxyTest : public testing::Test {
void OneStream_TwoPlays(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
@@ -346,7 +338,7 @@ class AudioOutputProxyTest : public testing::Test {
MockAudioOutputStream stream1(&manager_, params_);
MockAudioOutputStream stream2(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.WillOnce(Return(&stream1))
.WillOnce(Return(&stream2));
@@ -383,7 +375,7 @@ class AudioOutputProxyTest : public testing::Test {
void StartFailed(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -394,7 +386,7 @@ class AudioOutputProxyTest : public testing::Test {
WaitForCloseTimer(&stream);
// |stream| is closed at this point. Start() should reopen it again.
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.Times(2)
.WillRepeatedly(Return(reinterpret_cast<AudioOutputStream*>(NULL)));
@@ -434,8 +426,7 @@ class AudioOutputResamplerTest : public AudioOutputProxyTest {
AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
16000, 16, 1024);
resampler_ = new AudioOutputResampler(
- &manager(), params_, resampler_params_, std::string(), std::string(),
- close_delay);
+ &manager(), params_, resampler_params_, std::string(), close_delay);
}
virtual void OnStart() OVERRIDE {
@@ -535,7 +526,7 @@ TEST_F(AudioOutputResamplerTest, StartFailed) { StartFailed(resampler_); }
// ensure AudioOutputResampler falls back to the high latency path.
TEST_F(AudioOutputResamplerTest, LowLatencyCreateFailedFallback) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.Times(2)
.WillOnce(Return(static_cast<AudioOutputStream*>(NULL)))
.WillRepeatedly(Return(&stream));
@@ -552,7 +543,7 @@ TEST_F(AudioOutputResamplerTest, LowLatencyCreateFailedFallback) {
TEST_F(AudioOutputResamplerTest, LowLatencyOpenFailedFallback) {
MockAudioOutputStream failed_stream(&manager_, params_);
MockAudioOutputStream okay_stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.Times(2)
.WillOnce(Return(&failed_stream))
.WillRepeatedly(Return(&okay_stream));
@@ -580,7 +571,7 @@ TEST_F(AudioOutputResamplerTest, HighLatencyFallbackFailed) {
#else
static const int kFallbackCount = 1;
#endif
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.Times(kFallbackCount)
.WillRepeatedly(Return(static_cast<AudioOutputStream*>(NULL)));
@@ -591,7 +582,7 @@ TEST_F(AudioOutputResamplerTest, HighLatencyFallbackFailed) {
testing::Property(&AudioParameters::sample_rate, params_.sample_rate()),
testing::Property(
&AudioParameters::frames_per_buffer, params_.frames_per_buffer())),
- _, _))
+ _))
.Times(1)
.WillOnce(Return(&okay_stream));
EXPECT_CALL(okay_stream, Open())
@@ -613,7 +604,7 @@ TEST_F(AudioOutputResamplerTest, AllFallbackFailed) {
#else
static const int kFallbackCount = 2;
#endif
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.Times(kFallbackCount)
.WillRepeatedly(Return(static_cast<AudioOutputStream*>(NULL)));
@@ -629,7 +620,7 @@ TEST_F(AudioOutputResamplerTest, LowLatencyOpenEventuallyFails) {
MockAudioOutputStream stream2(&manager_, params_);
// Setup the mock such that all three streams are successfully created.
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.WillOnce(Return(&stream1))
.WillOnce(Return(&stream2))
.WillRepeatedly(Return(static_cast<AudioOutputStream*>(NULL)));
@@ -678,75 +669,4 @@ TEST_F(AudioOutputResamplerTest, LowLatencyOpenEventuallyFails) {
EXPECT_TRUE(stream2.start_called());
}
-// Ensures the methods used to fix audio output wedges are working correctly.
-TEST_F(AudioOutputResamplerTest, WedgeFix) {
- MockAudioOutputStream stream1(&manager_, params_);
- MockAudioOutputStream stream2(&manager_, params_);
- MockAudioOutputStream stream3(&manager_, params_);
-
- // Setup the mock such that all three streams are successfully created.
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
- .WillOnce(Return(&stream1))
- .WillOnce(Return(&stream2))
- .WillOnce(Return(&stream3));
-
- // Stream1 should be able to successfully open and start.
- EXPECT_CALL(stream1, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream1, SetVolume(_));
- EXPECT_CALL(stream2, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream2, SetVolume(_));
-
- // Open and start the first proxy and stream.
- AudioOutputProxy* proxy1 = new AudioOutputProxy(resampler_.get());
- EXPECT_TRUE(proxy1->Open());
- proxy1->Start(&callback_);
- OnStart();
-
- // Open, but do not start the second proxy.
- AudioOutputProxy* proxy2 = new AudioOutputProxy(resampler_.get());
- EXPECT_TRUE(proxy2->Open());
-
- // Open, start and then stop the third proxy.
- AudioOutputProxy* proxy3 = new AudioOutputProxy(resampler_.get());
- EXPECT_TRUE(proxy3->Open());
- proxy3->Start(&callback_);
- OnStart();
- proxy3->Stop();
-
- // Wait for stream to timeout and shutdown.
- WaitForCloseTimer(&stream2);
-
- EXPECT_CALL(stream1, Close());
- resampler_->CloseStreamsForWedgeFix();
-
- // Don't pump the MessageLoop between CloseStreamsForWedgeFix() and
- // RestartStreamsForWedgeFix() to simulate intended usage. The OnStart() call
- // will take care of necessary work.
-
- // Stream3 should take Stream1's place after RestartStreamsForWedgeFix(). No
- // additional streams should be opened for proxy2 and proxy3.
- EXPECT_CALL(stream3, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream3, SetVolume(_));
-
- resampler_->RestartStreamsForWedgeFix();
- OnStart();
-
- // Perform the required Stop()/Close() shutdown dance for each proxy.
- proxy3->Close();
- proxy2->Close();
- proxy1->Stop();
- CloseAndWaitForCloseTimer(proxy1, &stream3);
-
- // Wait for all of the messages to fly and then verify stream behavior.
- EXPECT_TRUE(stream1.stop_called());
- EXPECT_TRUE(stream1.start_called());
- EXPECT_TRUE(stream2.stop_called());
- EXPECT_TRUE(stream2.start_called());
- EXPECT_TRUE(stream3.stop_called());
- EXPECT_TRUE(stream3.start_called());
-}
-
} // namespace media
diff --git a/chromium/media/audio/audio_output_resampler.cc b/chromium/media/audio/audio_output_resampler.cc
index c53f3e089ce..15633bb2017 100644
--- a/chromium/media/audio/audio_output_resampler.cc
+++ b/chromium/media/audio/audio_output_resampler.cc
@@ -7,8 +7,8 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/compiler_specific.h"
-#include "base/message_loop/message_loop.h"
#include "base/metrics/histogram.h"
+#include "base/single_thread_task_runner.h"
#include "base/time/time.h"
#include "build/build_config.h"
#include "media/audio/audio_io.h"
@@ -31,9 +31,6 @@ class OnMoreDataConverter
// AudioSourceCallback interface.
virtual int OnMoreData(AudioBus* dest,
AudioBuffersState buffers_state) OVERRIDE;
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) OVERRIDE;
virtual void OnError(AudioOutputStream* stream) OVERRIDE;
// Sets |source_callback_|. If this is not a new object, then Stop() must be
@@ -72,20 +69,24 @@ class OnMoreDataConverter
// Record UMA statistics for hardware output configuration.
static void RecordStats(const AudioParameters& output_params) {
+ // Note the 'PRESUBMIT_IGNORE_UMA_MAX's below, these silence the PRESUBMIT.py
+ // check for uma enum max usage, since we're abusing UMA_HISTOGRAM_ENUMERATION
+ // to report a discrete value.
UMA_HISTOGRAM_ENUMERATION(
- "Media.HardwareAudioBitsPerChannel", output_params.bits_per_sample(),
- limits::kMaxBitsPerSample);
+ "Media.HardwareAudioBitsPerChannel",
+ output_params.bits_per_sample(),
+ limits::kMaxBitsPerSample); // PRESUBMIT_IGNORE_UMA_MAX
UMA_HISTOGRAM_ENUMERATION(
"Media.HardwareAudioChannelLayout", output_params.channel_layout(),
- CHANNEL_LAYOUT_MAX);
+ CHANNEL_LAYOUT_MAX + 1);
UMA_HISTOGRAM_ENUMERATION(
"Media.HardwareAudioChannelCount", output_params.channels(),
- limits::kMaxChannels);
+ limits::kMaxChannels); // PRESUBMIT_IGNORE_UMA_MAX
- AudioSampleRate asr = media::AsAudioSampleRate(output_params.sample_rate());
- if (asr != kUnexpectedAudioSampleRate) {
+ AudioSampleRate asr;
+ if (ToAudioSampleRate(output_params.sample_rate(), &asr)) {
UMA_HISTOGRAM_ENUMERATION(
- "Media.HardwareAudioSamplesPerSecond", asr, kUnexpectedAudioSampleRate);
+ "Media.HardwareAudioSamplesPerSecond", asr, kAudioSampleRateMax + 1);
} else {
UMA_HISTOGRAM_COUNTS(
"Media.HardwareAudioSamplesPerSecondUnexpected",
@@ -96,21 +97,25 @@ static void RecordStats(const AudioParameters& output_params) {
// Record UMA statistics for hardware output configuration after fallback.
static void RecordFallbackStats(const AudioParameters& output_params) {
UMA_HISTOGRAM_BOOLEAN("Media.FallbackToHighLatencyAudioPath", true);
+ // Note the 'PRESUBMIT_IGNORE_UMA_MAX's below, these silence the PRESUBMIT.py
+ // check for uma enum max usage, since we're abusing UMA_HISTOGRAM_ENUMERATION
+ // to report a discrete value.
UMA_HISTOGRAM_ENUMERATION(
"Media.FallbackHardwareAudioBitsPerChannel",
- output_params.bits_per_sample(), limits::kMaxBitsPerSample);
+ output_params.bits_per_sample(),
+ limits::kMaxBitsPerSample); // PRESUBMIT_IGNORE_UMA_MAX
UMA_HISTOGRAM_ENUMERATION(
"Media.FallbackHardwareAudioChannelLayout",
- output_params.channel_layout(), CHANNEL_LAYOUT_MAX);
+ output_params.channel_layout(), CHANNEL_LAYOUT_MAX + 1);
UMA_HISTOGRAM_ENUMERATION(
- "Media.FallbackHardwareAudioChannelCount",
- output_params.channels(), limits::kMaxChannels);
+ "Media.FallbackHardwareAudioChannelCount", output_params.channels(),
+ limits::kMaxChannels); // PRESUBMIT_IGNORE_UMA_MAX
- AudioSampleRate asr = media::AsAudioSampleRate(output_params.sample_rate());
- if (asr != kUnexpectedAudioSampleRate) {
+ AudioSampleRate asr;
+ if (ToAudioSampleRate(output_params.sample_rate(), &asr)) {
UMA_HISTOGRAM_ENUMERATION(
"Media.FallbackHardwareAudioSamplesPerSecond",
- asr, kUnexpectedAudioSampleRate);
+ asr, kAudioSampleRateMax + 1);
} else {
UMA_HISTOGRAM_COUNTS(
"Media.FallbackHardwareAudioSamplesPerSecondUnexpected",
@@ -135,7 +140,7 @@ void AudioOutputResampler::SetupFallbackParams() {
AudioParameters::AUDIO_PCM_LINEAR, params_.channel_layout(),
params_.sample_rate(), params_.bits_per_sample(),
frames_per_buffer);
- output_device_id_ = "";
+ device_id_ = "";
Initialize();
#endif
}
@@ -144,10 +149,8 @@ AudioOutputResampler::AudioOutputResampler(AudioManager* audio_manager,
const AudioParameters& input_params,
const AudioParameters& output_params,
const std::string& output_device_id,
- const std::string& input_device_id,
const base::TimeDelta& close_delay)
- : AudioOutputDispatcher(audio_manager, input_params, output_device_id,
- input_device_id),
+ : AudioOutputDispatcher(audio_manager, input_params, output_device_id),
close_delay_(close_delay),
output_params_(output_params),
streams_opened_(false) {
@@ -169,12 +172,11 @@ void AudioOutputResampler::Initialize() {
DCHECK(!streams_opened_);
DCHECK(callbacks_.empty());
dispatcher_ = new AudioOutputDispatcherImpl(
- audio_manager_, output_params_, output_device_id_, input_device_id_,
- close_delay_);
+ audio_manager_, output_params_, device_id_, close_delay_);
}
bool AudioOutputResampler::OpenStream() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
if (dispatcher_->OpenStream()) {
// Only record the UMA statistic if we didn't fallback during construction
@@ -233,7 +235,7 @@ bool AudioOutputResampler::OpenStream() {
bool AudioOutputResampler::StartStream(
AudioOutputStream::AudioSourceCallback* callback,
AudioOutputProxy* stream_proxy) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
OnMoreDataConverter* resampler_callback = NULL;
CallbackMap::iterator it = callbacks_.find(stream_proxy);
@@ -253,12 +255,12 @@ bool AudioOutputResampler::StartStream(
void AudioOutputResampler::StreamVolumeSet(AudioOutputProxy* stream_proxy,
double volume) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
dispatcher_->StreamVolumeSet(stream_proxy, volume);
}
void AudioOutputResampler::StopStream(AudioOutputProxy* stream_proxy) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
dispatcher_->StopStream(stream_proxy);
// Now that StopStream() has completed the underlying physical stream should
@@ -270,7 +272,7 @@ void AudioOutputResampler::StopStream(AudioOutputProxy* stream_proxy) {
}
void AudioOutputResampler::CloseStream(AudioOutputProxy* stream_proxy) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
dispatcher_->CloseStream(stream_proxy);
// We assume that StopStream() is always called prior to CloseStream(), so
@@ -283,7 +285,7 @@ void AudioOutputResampler::CloseStream(AudioOutputProxy* stream_proxy) {
}
void AudioOutputResampler::Shutdown() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
// No AudioOutputProxy objects should hold a reference to us when we get
// to this stage.
@@ -293,37 +295,6 @@ void AudioOutputResampler::Shutdown() {
DCHECK(callbacks_.empty());
}
-void AudioOutputResampler::CloseStreamsForWedgeFix() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- // Stop and close all active streams. Once all streams across all dispatchers
- // have been closed the AudioManager will call RestartStreamsForWedgeFix().
- for (CallbackMap::iterator it = callbacks_.begin(); it != callbacks_.end();
- ++it) {
- if (it->second->started())
- dispatcher_->StopStream(it->first);
- dispatcher_->CloseStream(it->first);
- }
-
- // Close all idle streams as well.
- dispatcher_->CloseStreamsForWedgeFix();
-}
-
-void AudioOutputResampler::RestartStreamsForWedgeFix() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- // By opening all streams first and then starting them one by one we ensure
- // the dispatcher only opens streams for those which will actually be used.
- for (CallbackMap::iterator it = callbacks_.begin(); it != callbacks_.end();
- ++it) {
- dispatcher_->OpenStream();
- }
- for (CallbackMap::iterator it = callbacks_.begin(); it != callbacks_.end();
- ++it) {
- if (it->second->started())
- dispatcher_->StartStream(it->second, it->first);
- }
-}
-
OnMoreDataConverter::OnMoreDataConverter(const AudioParameters& input_params,
const AudioParameters& output_params)
: io_ratio_(static_cast<double>(input_params.GetBytesPerSecond()) /
@@ -357,16 +328,6 @@ void OnMoreDataConverter::Stop() {
int OnMoreDataConverter::OnMoreData(AudioBus* dest,
AudioBuffersState buffers_state) {
- return OnMoreIOData(NULL, dest, buffers_state);
-}
-
-int OnMoreDataConverter::OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) {
- // Note: The input portion of OnMoreIOData() is not supported when a converter
- // has been injected. Downstream clients prefer silence to potentially split
- // apart input data.
-
current_buffers_state_ = buffers_state;
audio_converter_.Convert(dest);
@@ -386,8 +347,7 @@ double OnMoreDataConverter::ProvideInput(AudioBus* dest,
buffer_delay.InSecondsF() * input_bytes_per_second_);
// Retrieve data from the original callback.
- const int frames = source_callback_->OnMoreIOData(
- NULL, dest, new_buffers_state);
+ const int frames = source_callback_->OnMoreData(dest, new_buffers_state);
// Zero any unfilled frames if anything was filled, otherwise we'll just
// return a volume of zero and let AudioConverter drop the output.
diff --git a/chromium/media/audio/audio_output_resampler.h b/chromium/media/audio/audio_output_resampler.h
index a8fca232470..fa488aa1956 100644
--- a/chromium/media/audio/audio_output_resampler.h
+++ b/chromium/media/audio/audio_output_resampler.h
@@ -41,7 +41,6 @@ class MEDIA_EXPORT AudioOutputResampler : public AudioOutputDispatcher {
const AudioParameters& input_params,
const AudioParameters& output_params,
const std::string& output_device_id,
- const std::string& input_device_id,
const base::TimeDelta& close_delay);
// AudioOutputDispatcher interface.
@@ -53,8 +52,6 @@ class MEDIA_EXPORT AudioOutputResampler : public AudioOutputDispatcher {
double volume) OVERRIDE;
virtual void CloseStream(AudioOutputProxy* stream_proxy) OVERRIDE;
virtual void Shutdown() OVERRIDE;
- virtual void CloseStreamsForWedgeFix() OVERRIDE;
- virtual void RestartStreamsForWedgeFix() OVERRIDE;
private:
friend class base::RefCountedThreadSafe<AudioOutputResampler>;
diff --git a/chromium/media/audio/audio_parameters.cc b/chromium/media/audio/audio_parameters.cc
index fff815610fe..62b825ff0e5 100644
--- a/chromium/media/audio/audio_parameters.cc
+++ b/chromium/media/audio/audio_parameters.cc
@@ -85,7 +85,7 @@ bool AudioParameters::IsValid() const {
(channels_ > 0) &&
(channels_ <= media::limits::kMaxChannels) &&
(channel_layout_ > CHANNEL_LAYOUT_UNSUPPORTED) &&
- (channel_layout_ < CHANNEL_LAYOUT_MAX) &&
+ (channel_layout_ <= CHANNEL_LAYOUT_MAX) &&
(input_channels_ >= 0) &&
(input_channels_ <= media::limits::kMaxChannels) &&
(sample_rate_ >= media::limits::kMinSampleRate) &&
diff --git a/chromium/media/audio/audio_parameters.h b/chromium/media/audio/audio_parameters.h
index 62ff4fd48f1..b23d26fdcae 100644
--- a/chromium/media/audio/audio_parameters.h
+++ b/chromium/media/audio/audio_parameters.h
@@ -48,7 +48,9 @@ class MEDIA_EXPORT AudioParameters {
// effects should be enabled.
enum PlatformEffectsMask {
NO_EFFECTS = 0x0,
- ECHO_CANCELLER = 0x1
+ ECHO_CANCELLER = 0x1,
+ DUCKING = 0x2, // Enables ducking if the OS supports it.
+ KEYBOARD_MIC = 0x4,
};
AudioParameters();
diff --git a/chromium/media/audio/audio_parameters_unittest.cc b/chromium/media/audio/audio_parameters_unittest.cc
index f0d37129eb9..390b205a091 100644
--- a/chromium/media/audio/audio_parameters_unittest.cc
+++ b/chromium/media/audio/audio_parameters_unittest.cc
@@ -63,6 +63,10 @@ TEST(AudioParameters, GetBytesPerBuffer) {
EXPECT_EQ(800, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
CHANNEL_LAYOUT_STEREO, 1000, 16, 200)
.GetBytesPerBuffer());
+ EXPECT_EQ(300, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC,
+ 1000, 8, 100)
+ .GetBytesPerBuffer());
}
TEST(AudioParameters, GetBytesPerSecond) {
@@ -119,6 +123,23 @@ TEST(AudioParameters, Compare) {
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
2000, 16, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 1000, 8, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 1000, 8, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 1000, 16, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 1000, 16, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 2000, 8, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 2000, 8, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 2000, 16, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 2000, 16, 200),
+
AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_MONO,
1000, 8, 100),
AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_MONO,
@@ -152,6 +173,23 @@ TEST(AudioParameters, Compare) {
CHANNEL_LAYOUT_STEREO, 2000, 16, 100),
AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
CHANNEL_LAYOUT_STEREO, 2000, 16, 200),
+
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 1000, 8, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 1000, 8, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 1000, 16, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 1000, 16, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 2000, 8, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 2000, 8, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 2000, 16, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 2000, 16, 200),
};
for (size_t i = 0; i < arraysize(values); ++i) {
diff --git a/chromium/media/audio/clockless_audio_sink.cc b/chromium/media/audio/clockless_audio_sink.cc
index ff809d0541d..89f43bf42fd 100644
--- a/chromium/media/audio/clockless_audio_sink.cc
+++ b/chromium/media/audio/clockless_audio_sink.cc
@@ -73,16 +73,12 @@ void ClocklessAudioSink::Initialize(const AudioParameters& params,
}
void ClocklessAudioSink::Start() {
+ DCHECK(initialized_);
DCHECK(!playing_);
}
void ClocklessAudioSink::Stop() {
- DCHECK(initialized_);
-
- if (!playing_)
- return;
-
- playback_time_ = thread_->Stop();
+ Pause();
}
void ClocklessAudioSink::Play() {
@@ -96,7 +92,13 @@ void ClocklessAudioSink::Play() {
}
void ClocklessAudioSink::Pause() {
- Stop();
+ DCHECK(initialized_);
+
+ if (!playing_)
+ return;
+
+ playing_ = false;
+ playback_time_ = thread_->Stop();
}
bool ClocklessAudioSink::SetVolume(double volume) {
diff --git a/chromium/media/audio/clockless_audio_sink.h b/chromium/media/audio/clockless_audio_sink.h
index 9e73b1a8817..bf68896c7aa 100644
--- a/chromium/media/audio/clockless_audio_sink.h
+++ b/chromium/media/audio/clockless_audio_sink.h
@@ -10,7 +10,7 @@
#include "media/base/audio_renderer_sink.h"
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
diff --git a/chromium/media/audio/cras/audio_manager_cras.cc b/chromium/media/audio/cras/audio_manager_cras.cc
index 876d6ce8136..abc78dd4b78 100644
--- a/chromium/media/audio/cras/audio_manager_cras.cc
+++ b/chromium/media/audio/cras/audio_manager_cras.cc
@@ -4,6 +4,8 @@
#include "media/audio/cras/audio_manager_cras.h"
+#include <algorithm>
+
#include "base/command_line.h"
#include "base/environment.h"
#include "base/logging.h"
@@ -13,6 +15,11 @@
#include "media/audio/cras/cras_unified.h"
#include "media/base/channel_layout.h"
+// cras_util.h headers pull in min/max macros...
+// TODO(dgreid): Fix headers such that these aren't imported.
+#undef min
+#undef max
+
namespace media {
static void AddDefaultDevice(AudioDeviceNames* device_names) {
@@ -30,6 +37,13 @@ static const int kMaxOutputStreams = 50;
// Default sample rate for input and output streams.
static const int kDefaultSampleRate = 48000;
+// Define bounds for the output buffer size.
+static const int kMinimumOutputBufferSize = 512;
+static const int kMaximumOutputBufferSize = 8192;
+
+// Default input buffer size.
+static const int kDefaultInputBufferSize = 1024;
+
bool AudioManagerCras::HasAudioOutputDevices() {
return true;
}
@@ -63,12 +77,15 @@ void AudioManagerCras::GetAudioOutputDeviceNames(
AudioParameters AudioManagerCras::GetInputStreamParameters(
const std::string& device_id) {
- static const int kDefaultInputBufferSize = 1024;
+ int user_buffer_size = GetUserBufferSize();
+ int buffer_size = user_buffer_size ?
+ user_buffer_size : kDefaultInputBufferSize;
+
// TODO(hshi): Fine-tune audio parameters based on |device_id|. The optimal
// parameters for the loopback stream may differ from the default.
return AudioParameters(
AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
- kDefaultSampleRate, 16, kDefaultInputBufferSize);
+ kDefaultSampleRate, 16, buffer_size);
}
AudioOutputStream* AudioManagerCras::MakeLinearOutputStream(
@@ -79,8 +96,7 @@ AudioOutputStream* AudioManagerCras::MakeLinearOutputStream(
AudioOutputStream* AudioManagerCras::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
+ const std::string& device_id) {
DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
// TODO(dgreid): Open the correct input device for unified IO.
@@ -104,11 +120,9 @@ AudioParameters AudioManagerCras::GetPreferredOutputStreamParameters(
const AudioParameters& input_params) {
// TODO(tommi): Support |output_device_id|.
DLOG_IF(ERROR, !output_device_id.empty()) << "Not implemented!";
- static const int kDefaultOutputBufferSize = 512;
-
ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
int sample_rate = kDefaultSampleRate;
- int buffer_size = kDefaultOutputBufferSize;
+ int buffer_size = kMinimumOutputBufferSize;
int bits_per_sample = 16;
int input_channels = 0;
if (input_params.IsValid()) {
@@ -116,7 +130,9 @@ AudioParameters AudioManagerCras::GetPreferredOutputStreamParameters(
bits_per_sample = input_params.bits_per_sample();
channel_layout = input_params.channel_layout();
input_channels = input_params.input_channels();
- buffer_size = input_params.frames_per_buffer();
+ buffer_size =
+ std::min(kMaximumOutputBufferSize,
+ std::max(buffer_size, input_params.frames_per_buffer()));
}
int user_buffer_size = GetUserBufferSize();
@@ -138,4 +154,19 @@ AudioInputStream* AudioManagerCras::MakeInputStream(
return new CrasInputStream(params, this, device_id);
}
+snd_pcm_format_t AudioManagerCras::BitsToFormat(int bits_per_sample) {
+ switch (bits_per_sample) {
+ case 8:
+ return SND_PCM_FORMAT_U8;
+ case 16:
+ return SND_PCM_FORMAT_S16;
+ case 24:
+ return SND_PCM_FORMAT_S24;
+ case 32:
+ return SND_PCM_FORMAT_S32;
+ default:
+ return SND_PCM_FORMAT_UNKNOWN;
+ }
+}
+
} // namespace media
diff --git a/chromium/media/audio/cras/audio_manager_cras.h b/chromium/media/audio/cras/audio_manager_cras.h
index 589374ae0b9..a9abd6c2deb 100644
--- a/chromium/media/audio/cras/audio_manager_cras.h
+++ b/chromium/media/audio/cras/audio_manager_cras.h
@@ -5,6 +5,8 @@
#ifndef MEDIA_AUDIO_CRAS_AUDIO_MANAGER_CRAS_H_
#define MEDIA_AUDIO_CRAS_AUDIO_MANAGER_CRAS_H_
+#include <cras_types.h>
+
#include <string>
#include "base/compiler_specific.h"
@@ -33,13 +35,14 @@ class MEDIA_EXPORT AudioManagerCras : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
+ static snd_pcm_format_t BitsToFormat(int bits_per_sample);
+
protected:
virtual ~AudioManagerCras();
diff --git a/chromium/media/audio/cras/cras_input.cc b/chromium/media/audio/cras/cras_input.cc
index c41f3645efd..afdabb21d1f 100644
--- a/chromium/media/audio/cras/cras_input.cc
+++ b/chromium/media/audio/cras/cras_input.cc
@@ -7,10 +7,8 @@
#include <math.h>
#include "base/basictypes.h"
-#include "base/bind.h"
#include "base/logging.h"
#include "base/time/time.h"
-#include "media/audio/alsa/alsa_util.h"
#include "media/audio/audio_manager.h"
#include "media/audio/cras/audio_manager_cras.h"
@@ -29,6 +27,7 @@ CrasInputStream::CrasInputStream(const AudioParameters& params,
stream_direction_(device_id == AudioManagerBase::kLoopbackInputDeviceId ?
CRAS_STREAM_POST_MIX_PRE_DSP : CRAS_STREAM_INPUT) {
DCHECK(audio_manager_);
+ audio_bus_ = AudioBus::Create(params_);
}
CrasInputStream::~CrasInputStream() {
@@ -54,7 +53,7 @@ bool CrasInputStream::Open() {
}
snd_pcm_format_t pcm_format =
- alsa_util::BitsToFormat(params_.bits_per_sample());
+ AudioManagerCras::BitsToFormat(params_.bits_per_sample());
if (pcm_format == SND_PCM_FORMAT_UNKNOWN) {
DLOG(WARNING) << "Unsupported bits/sample: " << params_.bits_per_sample();
return false;
@@ -86,17 +85,14 @@ bool CrasInputStream::Open() {
}
void CrasInputStream::Close() {
+ Stop();
+
if (client_) {
cras_client_stop(client_);
cras_client_destroy(client_);
client_ = NULL;
}
- if (callback_) {
- callback_->OnClose(this);
- callback_ = NULL;
- }
-
// Signal to the manager that we're closed and can be removed.
// Should be last call in the method as it deletes "this".
audio_manager_->ReleaseInputStream(this);
@@ -117,7 +113,7 @@ void CrasInputStream::Start(AudioInputCallback* callback) {
// Prepare |audio_format| and |stream_params| for the stream we
// will create.
cras_audio_format* audio_format = cras_audio_format_create(
- alsa_util::BitsToFormat(params_.bits_per_sample()),
+ AudioManagerCras::BitsToFormat(params_.bits_per_sample()),
params_.sample_rate(),
params_.channels());
if (!audio_format) {
@@ -177,6 +173,7 @@ void CrasInputStream::Stop() {
cras_client_rm_stream(client_, stream_id_);
started_ = false;
+ callback_ = NULL;
}
// Static callback asking for samples. Run on high priority thread.
@@ -226,11 +223,9 @@ void CrasInputStream::ReadAudio(size_t frames,
double normalized_volume = 0.0;
GetAgcVolume(&normalized_volume);
- callback_->OnData(this,
- buffer,
- frames * bytes_per_frame_,
- bytes_latency,
- normalized_volume);
+ audio_bus_->FromInterleaved(
+ buffer, audio_bus_->frames(), params_.bits_per_sample() / 8);
+ callback_->OnData(this, audio_bus_.get(), bytes_latency, normalized_volume);
}
void CrasInputStream::NotifyStreamError(int err) {
diff --git a/chromium/media/audio/cras/cras_input.h b/chromium/media/audio/cras/cras_input.h
index dd2cb5474a4..1919224d9a9 100644
--- a/chromium/media/audio/cras/cras_input.h
+++ b/chromium/media/audio/cras/cras_input.h
@@ -10,8 +10,6 @@
#include <string>
#include "base/compiler_specific.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
#include "media/audio/agc_audio_stream.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_parameters.h"
@@ -53,7 +51,7 @@ class CrasInputStream : public AgcAudioStream<AudioInputStream> {
const timespec* sample_ts,
void* arg);
- // Handles notificaiton that there was an error with the playback stream.
+ // Handles notification that there was an error with the playback stream.
static int StreamError(cras_client* client,
cras_stream_id_t stream_id,
int err,
@@ -100,9 +98,11 @@ class CrasInputStream : public AgcAudioStream<AudioInputStream> {
// Direction of the stream.
const CRAS_STREAM_DIRECTION stream_direction_;
+ scoped_ptr<AudioBus> audio_bus_;
+
DISALLOW_COPY_AND_ASSIGN(CrasInputStream);
};
} // namespace media
-#endif // MEDIA_AUDIO_CRAS_ALSA_INPUT_H_
+#endif // MEDIA_AUDIO_CRAS_CRAS_INPUT_H_
diff --git a/chromium/media/audio/cras/cras_input_unittest.cc b/chromium/media/audio/cras/cras_input_unittest.cc
index 27ea9858ba1..7081a98e907 100644
--- a/chromium/media/audio/cras/cras_input_unittest.cc
+++ b/chromium/media/audio/cras/cras_input_unittest.cc
@@ -2,18 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <unistd.h>
-
#include <string>
#include "base/synchronization/waitable_event.h"
#include "base/test/test_timeouts.h"
#include "base/time/time.h"
#include "media/audio/cras/audio_manager_cras.h"
-#include "media/audio/cras/cras_input.h"
+#include "media/audio/fake_audio_log_factory.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+// cras_util.h defines custom min/max macros which break compilation, so ensure
+// it's not included until last. #if avoids presubmit errors.
+#if defined(USE_CRAS)
+#include "media/audio/cras/cras_input.h"
+#endif
+
using testing::_;
using testing::AtLeast;
using testing::Ge;
@@ -24,14 +28,15 @@ namespace media {
class MockAudioInputCallback : public AudioInputStream::AudioInputCallback {
public:
- MOCK_METHOD5(OnData, void(
- AudioInputStream*, const uint8*, uint32, uint32, double));
+ MOCK_METHOD4(OnData,
+ void(AudioInputStream*, const AudioBus*, uint32, double));
MOCK_METHOD1(OnError, void(AudioInputStream*));
- MOCK_METHOD1(OnClose, void(AudioInputStream*));
};
class MockAudioManagerCrasInput : public AudioManagerCras {
public:
+ MockAudioManagerCrasInput() : AudioManagerCras(&fake_audio_log_factory_) {}
+
// We need to override this function in order to skip checking the number
// of active output streams. It is because the number of active streams
// is managed inside MakeAudioInputStream, and we don't use
@@ -40,6 +45,9 @@ class MockAudioManagerCrasInput : public AudioManagerCras {
DCHECK(stream);
delete stream;
}
+
+ private:
+ FakeAudioLogFactory fake_audio_log_factory_;
};
class CrasInputStreamTest : public testing::Test {
@@ -77,14 +85,9 @@ class CrasInputStreamTest : public testing::Test {
// samples can be provided when doing non-integer SRC. For example
// converting from 192k to 44.1k is a ratio of 4.35 to 1.
MockAudioInputCallback mock_callback;
- unsigned int expected_size = (kTestFramesPerPacket - 8) *
- params.channels() *
- params.bits_per_sample() / 8;
-
base::WaitableEvent event(false, false);
- EXPECT_CALL(mock_callback,
- OnData(test_stream, _, Ge(expected_size), _, _))
+ EXPECT_CALL(mock_callback, OnData(test_stream, _, _, _))
.WillOnce(InvokeWithoutArgs(&event, &base::WaitableEvent::Signal));
test_stream->Start(&mock_callback);
@@ -93,8 +96,6 @@ class CrasInputStreamTest : public testing::Test {
EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
test_stream->Stop();
-
- EXPECT_CALL(mock_callback, OnClose(test_stream)).Times(1);
test_stream->Close();
}
diff --git a/chromium/media/audio/cras/cras_unified.cc b/chromium/media/audio/cras/cras_unified.cc
index c85cf59dd5f..25af3837ad6 100644
--- a/chromium/media/audio/cras/cras_unified.cc
+++ b/chromium/media/audio/cras/cras_unified.cc
@@ -4,11 +4,7 @@
#include "media/audio/cras/cras_unified.h"
-#include <cras_client.h>
-
-#include "base/command_line.h"
#include "base/logging.h"
-#include "media/audio/alsa/alsa_util.h"
#include "media/audio/cras/audio_manager_cras.h"
namespace media {
@@ -116,7 +112,7 @@ bool CrasUnifiedStream::Open() {
return false;
}
- if (alsa_util::BitsToFormat(params_.bits_per_sample()) ==
+ if (AudioManagerCras::BitsToFormat(params_.bits_per_sample()) ==
SND_PCM_FORMAT_UNKNOWN) {
LOG(WARNING) << "Unsupported pcm format";
return false;
@@ -187,7 +183,7 @@ void CrasUnifiedStream::Start(AudioSourceCallback* callback) {
// Prepare |audio_format| and |stream_params| for the stream we
// will create.
cras_audio_format* audio_format = cras_audio_format_create(
- alsa_util::BitsToFormat(params_.bits_per_sample()),
+ AudioManagerCras::BitsToFormat(params_.bits_per_sample()),
params_.sample_rate(),
params_.channels());
if (!audio_format) {
@@ -361,8 +357,7 @@ uint32 CrasUnifiedStream::ReadWriteAudio(size_t frames,
cras_client_calc_playback_latency(output_ts, &latency_ts);
total_delay_bytes += GetBytesLatency(latency_ts);
- int frames_filled = source_callback_->OnMoreIOData(
- input_bus_.get(),
+ int frames_filled = source_callback_->OnMoreData(
output_bus_.get(),
AudioBuffersState(0, total_delay_bytes));
diff --git a/chromium/media/audio/cras/cras_unified.h b/chromium/media/audio/cras/cras_unified.h
index 818763efb49..db1d9feb648 100644
--- a/chromium/media/audio/cras/cras_unified.h
+++ b/chromium/media/audio/cras/cras_unified.h
@@ -10,21 +10,21 @@
#ifndef MEDIA_AUDIO_LINUX_CRAS_UNIFIED_H_
#define MEDIA_AUDIO_LINUX_CRAS_UNIFIED_H_
-#include <alsa/asoundlib.h>
#include <cras_client.h>
#include "base/compiler_specific.h"
-#include "base/gtest_prod_util.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_parameters.h"
namespace media {
class AudioManagerCras;
-class AudioParameters;
// Implementation of AudioOuputStream for Chrome OS using the Chrome OS audio
// server.
+// TODO(dgreid): This class is used for only output, either remove all the
+// relevant input code and change the class to CrasOutputStream or merge
+// cras_input.cc into this unified implementation.
class MEDIA_EXPORT CrasUnifiedStream : public AudioOutputStream {
public:
// The ctor takes all the usual parameters, plus |manager| which is the
@@ -57,7 +57,7 @@ class MEDIA_EXPORT CrasUnifiedStream : public AudioOutputStream {
const timespec* output_ts,
void* arg);
- // Handles notificaiton that there was an error with the playback stream.
+ // Handles notification that there was an error with the playback stream.
static int StreamError(cras_client* client,
cras_stream_id_t stream_id,
int err,
@@ -107,7 +107,7 @@ class MEDIA_EXPORT CrasUnifiedStream : public AudioOutputStream {
// Callback to get audio samples.
AudioSourceCallback* source_callback_;
- // Container for exchanging data with AudioSourceCallback::OnMoreIOData().
+ // Container for exchanging data with AudioSourceCallback::OnMoreData().
scoped_ptr<AudioBus> input_bus_;
scoped_ptr<AudioBus> output_bus_;
diff --git a/chromium/media/audio/cras/cras_unified_unittest.cc b/chromium/media/audio/cras/cras_unified_unittest.cc
index 7083eca427e..9d282bb7505 100644
--- a/chromium/media/audio/cras/cras_unified_unittest.cc
+++ b/chromium/media/audio/cras/cras_unified_unittest.cc
@@ -8,10 +8,17 @@
#include "base/test/test_timeouts.h"
#include "base/time/time.h"
#include "media/audio/cras/audio_manager_cras.h"
-#include "media/audio/cras/cras_unified.h"
+#include "media/audio/fake_audio_log_factory.h"
+#include "media/audio/mock_audio_source_callback.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+// cras_util.h defines custom min/max macros which break compilation, so ensure
+// it's not included until last. #if avoids presubmit errors.
+#if defined(USE_CRAS)
+#include "media/audio/cras/cras_unified.h"
+#endif
+
using testing::_;
using testing::DoAll;
using testing::InvokeWithoutArgs;
@@ -21,25 +28,18 @@ using testing::StrictMock;
namespace media {
-class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
- public:
- MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
- AudioBuffersState buffers_state));
- MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state));
- MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
-};
-
class MockAudioManagerCras : public AudioManagerCras {
public:
+ MockAudioManagerCras() : AudioManagerCras(&fake_audio_log_factory_) {}
+
MOCK_METHOD0(Init, void());
MOCK_METHOD0(HasAudioOutputDevices, bool());
MOCK_METHOD0(HasAudioInputDevices, bool());
MOCK_METHOD1(MakeLinearOutputStream, AudioOutputStream*(
const AudioParameters& params));
- MOCK_METHOD1(MakeLowLatencyOutputStream, AudioOutputStream*(
- const AudioParameters& params));
+ MOCK_METHOD2(MakeLowLatencyOutputStream,
+ AudioOutputStream*(const AudioParameters& params,
+ const std::string& device_id));
MOCK_METHOD2(MakeLinearOutputStream, AudioInputStream*(
const AudioParameters& params, const std::string& device_id));
MOCK_METHOD2(MakeLowLatencyInputStream, AudioInputStream*(
@@ -53,6 +53,9 @@ class MockAudioManagerCras : public AudioManagerCras {
DCHECK(stream);
delete stream;
}
+
+ private:
+ FakeAudioLogFactory fake_audio_log_factory_;
};
class CrasUnifiedStreamTest : public testing::Test {
diff --git a/chromium/media/audio/fake_audio_consumer.cc b/chromium/media/audio/fake_audio_consumer.cc
index 55c439ad9f3..ca99424f419 100644
--- a/chromium/media/audio/fake_audio_consumer.cc
+++ b/chromium/media/audio/fake_audio_consumer.cc
@@ -7,10 +7,10 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/cancelable_callback.h"
+#include "base/location.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
-#include "base/message_loop/message_loop.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
#include "base/time/time.h"
@@ -22,7 +22,7 @@ namespace media {
class FakeAudioConsumer::Worker
: public base::RefCountedThreadSafe<FakeAudioConsumer::Worker> {
public:
- Worker(const scoped_refptr<base::MessageLoopProxy>& worker_loop,
+ Worker(const scoped_refptr<base::SingleThreadTaskRunner>& worker_task_runner,
const AudioParameters& params);
bool IsStopped();
@@ -44,7 +44,7 @@ class FakeAudioConsumer::Worker
// the worker loop.
void DoRead();
- const scoped_refptr<base::MessageLoopProxy> worker_loop_;
+ const scoped_refptr<base::SingleThreadTaskRunner> worker_task_runner_;
const scoped_ptr<AudioBus> audio_bus_;
const base::TimeDelta buffer_duration_;
@@ -61,9 +61,9 @@ class FakeAudioConsumer::Worker
};
FakeAudioConsumer::FakeAudioConsumer(
- const scoped_refptr<base::MessageLoopProxy>& worker_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& worker_task_runner,
const AudioParameters& params)
- : worker_(new Worker(worker_loop, params)) {
+ : worker_(new Worker(worker_task_runner, params)) {
}
FakeAudioConsumer::~FakeAudioConsumer() {
@@ -80,9 +80,9 @@ void FakeAudioConsumer::Stop() {
}
FakeAudioConsumer::Worker::Worker(
- const scoped_refptr<base::MessageLoopProxy>& worker_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& worker_task_runner,
const AudioParameters& params)
- : worker_loop_(worker_loop),
+ : worker_task_runner_(worker_task_runner),
audio_bus_(AudioBus::Create(params)),
buffer_duration_(base::TimeDelta::FromMicroseconds(
params.frames_per_buffer() * base::Time::kMicrosecondsPerSecond /
@@ -111,11 +111,11 @@ void FakeAudioConsumer::Worker::Start(const ReadCB& read_cb) {
DCHECK(read_cb_.is_null());
read_cb_ = read_cb;
}
- worker_loop_->PostTask(FROM_HERE, base::Bind(&Worker::DoStart, this));
+ worker_task_runner_->PostTask(FROM_HERE, base::Bind(&Worker::DoStart, this));
}
void FakeAudioConsumer::Worker::DoStart() {
- DCHECK(worker_loop_->BelongsToCurrentThread());
+ DCHECK(worker_task_runner_->BelongsToCurrentThread());
next_read_time_ = base::TimeTicks::Now();
read_task_cb_.Reset(base::Bind(&Worker::DoRead, this));
read_task_cb_.callback().Run();
@@ -129,16 +129,16 @@ void FakeAudioConsumer::Worker::Stop() {
return;
read_cb_.Reset();
}
- worker_loop_->PostTask(FROM_HERE, base::Bind(&Worker::DoCancel, this));
+ worker_task_runner_->PostTask(FROM_HERE, base::Bind(&Worker::DoCancel, this));
}
void FakeAudioConsumer::Worker::DoCancel() {
- DCHECK(worker_loop_->BelongsToCurrentThread());
+ DCHECK(worker_task_runner_->BelongsToCurrentThread());
read_task_cb_.Cancel();
}
void FakeAudioConsumer::Worker::DoRead() {
- DCHECK(worker_loop_->BelongsToCurrentThread());
+ DCHECK(worker_task_runner_->BelongsToCurrentThread());
{
base::AutoLock scoped_lock(read_cb_lock_);
@@ -156,7 +156,8 @@ void FakeAudioConsumer::Worker::DoRead() {
delay += buffer_duration_ * (-delay / buffer_duration_ + 1);
next_read_time_ = now + delay;
- worker_loop_->PostDelayedTask(FROM_HERE, read_task_cb_.callback(), delay);
+ worker_task_runner_->PostDelayedTask(
+ FROM_HERE, read_task_cb_.callback(), delay);
}
} // namespace media
diff --git a/chromium/media/audio/fake_audio_consumer.h b/chromium/media/audio/fake_audio_consumer.h
index 50373565d00..18c552ad97b 100644
--- a/chromium/media/audio/fake_audio_consumer.h
+++ b/chromium/media/audio/fake_audio_consumer.h
@@ -10,7 +10,7 @@
#include "media/base/media_export.h"
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
@@ -21,12 +21,13 @@ class AudioParameters;
// simulate a real time consumer of audio data.
class MEDIA_EXPORT FakeAudioConsumer {
public:
- // |worker_loop| is the loop on which the ReadCB provided to Start() will be
- // executed on. This may or may not be the be for the same thread that
- // invokes the Start/Stop methods.
+ // |worker_task_runner| is the task runner on which the ReadCB provided to
+ // Start() will be executed on. This may or may not be the be for the same
+ // thread that invokes the Start/Stop methods.
// |params| is used to determine the frequency of callbacks.
- FakeAudioConsumer(const scoped_refptr<base::MessageLoopProxy>& worker_loop,
- const AudioParameters& params);
+ FakeAudioConsumer(
+ const scoped_refptr<base::SingleThreadTaskRunner>& worker_task_runner,
+ const AudioParameters& params);
~FakeAudioConsumer();
// Start executing |read_cb| at a regular intervals. Stop() must be called by
diff --git a/chromium/media/audio/fake_audio_input_stream.cc b/chromium/media/audio/fake_audio_input_stream.cc
index a00a9b62001..384adcb411c 100644
--- a/chromium/media/audio/fake_audio_input_stream.cc
+++ b/chromium/media/audio/fake_audio_input_stream.cc
@@ -7,6 +7,7 @@
#include "base/bind.h"
#include "base/lazy_instance.h"
#include "media/audio/audio_manager_base.h"
+#include "media/base/audio_bus.h"
using base::TimeTicks;
using base::TimeDelta;
@@ -20,10 +21,16 @@ namespace {
const int kBeepDurationMilliseconds = 20;
const int kBeepFrequency = 400;
+// Intervals between two automatic beeps.
+const int kAutomaticBeepIntervalInMs = 500;
+
+// Automatic beep will be triggered every |kAutomaticBeepIntervalInMs| unless
+// users explicitly call BeepOnce(), which will disable the automatic beep.
struct BeepContext {
- BeepContext() : beep_once(false) {}
+ BeepContext() : beep_once(false), automatic(true) {}
base::Lock beep_lock;
bool beep_once;
+ bool automatic;
};
static base::LazyInstance<BeepContext> g_beep_context =
@@ -42,17 +49,20 @@ FakeAudioInputStream::FakeAudioInputStream(AudioManagerBase* manager,
: audio_manager_(manager),
callback_(NULL),
buffer_size_((params.channels() * params.bits_per_sample() *
- params.frames_per_buffer()) / 8),
+ params.frames_per_buffer()) /
+ 8),
params_(params),
thread_("FakeAudioRecordingThread"),
callback_interval_(base::TimeDelta::FromMilliseconds(
(params.frames_per_buffer() * 1000) / params.sample_rate())),
- beep_duration_in_buffers_(
- kBeepDurationMilliseconds * params.sample_rate() /
- params.frames_per_buffer() / 1000),
+ beep_duration_in_buffers_(kBeepDurationMilliseconds *
+ params.sample_rate() /
+ params.frames_per_buffer() /
+ 1000),
beep_generated_in_buffers_(0),
beep_period_in_frames_(params.sample_rate() / kBeepFrequency),
- frames_elapsed_(0) {
+ frames_elapsed_(0),
+ audio_bus_(AudioBus::Create(params)) {
}
FakeAudioInputStream::~FakeAudioInputStream() {}
@@ -60,11 +70,13 @@ FakeAudioInputStream::~FakeAudioInputStream() {}
bool FakeAudioInputStream::Open() {
buffer_.reset(new uint8[buffer_size_]);
memset(buffer_.get(), 0, buffer_size_);
+ audio_bus_->Zero();
return true;
}
void FakeAudioInputStream::Start(AudioInputCallback* callback) {
DCHECK(!thread_.IsRunning());
+ DCHECK(!callback_);
callback_ = callback;
last_callback_time_ = TimeTicks::Now();
thread_.Start();
@@ -77,14 +89,37 @@ void FakeAudioInputStream::Start(AudioInputCallback* callback) {
void FakeAudioInputStream::DoCallback() {
DCHECK(callback_);
+ const TimeTicks now = TimeTicks::Now();
+ base::TimeDelta next_callback_time =
+ last_callback_time_ + callback_interval_ * 2 - now;
+
+ // If we are falling behind, try to catch up as much as we can in the next
+ // callback.
+ if (next_callback_time < base::TimeDelta())
+ next_callback_time = base::TimeDelta();
+
+ // Accumulate the time from the last beep.
+ interval_from_last_beep_ += now - last_callback_time_;
+
+ last_callback_time_ = now;
+
memset(buffer_.get(), 0, buffer_size_);
bool should_beep = false;
{
BeepContext* beep_context = g_beep_context.Pointer();
base::AutoLock auto_lock(beep_context->beep_lock);
- should_beep = beep_context->beep_once;
- beep_context->beep_once = false;
+ if (beep_context->automatic) {
+ base::TimeDelta delta = interval_from_last_beep_ -
+ TimeDelta::FromMilliseconds(kAutomaticBeepIntervalInMs);
+ if (delta > base::TimeDelta()) {
+ should_beep = true;
+ interval_from_last_beep_ = delta;
+ }
+ } else {
+ should_beep = beep_context->beep_once;
+ beep_context->beep_once = false;
+ }
}
// If this object was instructed to generate a beep or has started to
@@ -102,7 +137,6 @@ void FakeAudioInputStream::DoCallback() {
while (position + high_bytes <= buffer_size_) {
// Write high values first.
memset(buffer_.get() + position, 128, high_bytes);
-
// Then leave low values in the buffer with |high_bytes|.
position += high_bytes * 2;
}
@@ -112,19 +146,11 @@ void FakeAudioInputStream::DoCallback() {
beep_generated_in_buffers_ = 0;
}
- callback_->OnData(this, buffer_.get(), buffer_size_, buffer_size_, 1.0);
+ audio_bus_->FromInterleaved(
+ buffer_.get(), audio_bus_->frames(), params_.bits_per_sample() / 8);
+ callback_->OnData(this, audio_bus_.get(), buffer_size_, 1.0);
frames_elapsed_ += params_.frames_per_buffer();
- const TimeTicks now = TimeTicks::Now();
- base::TimeDelta next_callback_time =
- last_callback_time_ + callback_interval_ * 2 - now;
-
- // If we are falling behind, try to catch up as much as we can in the next
- // callback.
- if (next_callback_time < base::TimeDelta())
- next_callback_time = base::TimeDelta();
-
- last_callback_time_ = now;
thread_.message_loop()->PostDelayedTask(
FROM_HERE,
base::Bind(&FakeAudioInputStream::DoCallback, base::Unretained(this)),
@@ -133,13 +159,10 @@ void FakeAudioInputStream::DoCallback() {
void FakeAudioInputStream::Stop() {
thread_.Stop();
+ callback_ = NULL;
}
void FakeAudioInputStream::Close() {
- if (callback_) {
- callback_->OnClose(this);
- callback_ = NULL;
- }
audio_manager_->ReleaseInputStream(this);
}
@@ -165,6 +188,7 @@ void FakeAudioInputStream::BeepOnce() {
BeepContext* beep_context = g_beep_context.Pointer();
base::AutoLock auto_lock(beep_context->beep_lock);
beep_context->beep_once = true;
+ beep_context->automatic = false;
}
} // namespace media
diff --git a/chromium/media/audio/fake_audio_input_stream.h b/chromium/media/audio/fake_audio_input_stream.h
index 5879ab39763..e6c625e6b3a 100644
--- a/chromium/media/audio/fake_audio_input_stream.h
+++ b/chromium/media/audio/fake_audio_input_stream.h
@@ -18,6 +18,7 @@
namespace media {
+class AudioBus;
class AudioManagerBase;
class MEDIA_EXPORT FakeAudioInputStream
@@ -63,10 +64,12 @@ class MEDIA_EXPORT FakeAudioInputStream
base::Thread thread_;
base::TimeTicks last_callback_time_;
base::TimeDelta callback_interval_;
+ base::TimeDelta interval_from_last_beep_;
int beep_duration_in_buffers_;
int beep_generated_in_buffers_;
int beep_period_in_frames_;
int frames_elapsed_;
+ scoped_ptr<media::AudioBus> audio_bus_;
DISALLOW_COPY_AND_ASSIGN(FakeAudioInputStream);
};
diff --git a/chromium/media/audio/fake_audio_log_factory.cc b/chromium/media/audio/fake_audio_log_factory.cc
index 6f752e559fd..5e2d134520c 100644
--- a/chromium/media/audio/fake_audio_log_factory.cc
+++ b/chromium/media/audio/fake_audio_log_factory.cc
@@ -12,8 +12,7 @@ class FakeAudioLogImpl : public AudioLog {
virtual ~FakeAudioLogImpl() {}
virtual void OnCreated(int component_id,
const media::AudioParameters& params,
- const std::string& input_device_id,
- const std::string& output_device_id) OVERRIDE {}
+ const std::string& device_id) OVERRIDE {}
virtual void OnStarted(int component_id) OVERRIDE {}
virtual void OnStopped(int component_id) OVERRIDE {}
virtual void OnClosed(int component_id) OVERRIDE {}
diff --git a/chromium/media/audio/fake_audio_manager.cc b/chromium/media/audio/fake_audio_manager.cc
index bfe9a0a7ff3..e5d9bd4d8c8 100644
--- a/chromium/media/audio/fake_audio_manager.cc
+++ b/chromium/media/audio/fake_audio_manager.cc
@@ -33,8 +33,7 @@ AudioOutputStream* FakeAudioManager::MakeLinearOutputStream(
AudioOutputStream* FakeAudioManager::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
+ const std::string& device_id) {
return FakeAudioOutputStream::MakeFakeStream(this, params);
}
diff --git a/chromium/media/audio/fake_audio_manager.h b/chromium/media/audio/fake_audio_manager.h
index b5c45201ed1..9fbf140c6c0 100644
--- a/chromium/media/audio/fake_audio_manager.h
+++ b/chromium/media/audio/fake_audio_manager.h
@@ -26,8 +26,7 @@ class MEDIA_EXPORT FakeAudioManager : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(const AudioParameters& params,
const std::string& device_id)
OVERRIDE;
diff --git a/chromium/media/audio/fake_audio_output_stream.cc b/chromium/media/audio/fake_audio_output_stream.cc
index fb460ab6805..0448c23f8b7 100644
--- a/chromium/media/audio/fake_audio_output_stream.cc
+++ b/chromium/media/audio/fake_audio_output_stream.cc
@@ -7,7 +7,7 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
+#include "base/single_thread_task_runner.h"
#include "media/audio/audio_manager_base.h"
namespace media {
@@ -22,7 +22,7 @@ FakeAudioOutputStream::FakeAudioOutputStream(AudioManagerBase* manager,
const AudioParameters& params)
: audio_manager_(manager),
callback_(NULL),
- fake_consumer_(manager->GetWorkerLoop(), params) {
+ fake_consumer_(manager->GetWorkerTaskRunner(), params) {
}
FakeAudioOutputStream::~FakeAudioOutputStream() {
@@ -30,26 +30,26 @@ FakeAudioOutputStream::~FakeAudioOutputStream() {
}
bool FakeAudioOutputStream::Open() {
- DCHECK(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
return true;
}
void FakeAudioOutputStream::Start(AudioSourceCallback* callback) {
- DCHECK(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
callback_ = callback;
fake_consumer_.Start(base::Bind(
&FakeAudioOutputStream::CallOnMoreData, base::Unretained(this)));
}
void FakeAudioOutputStream::Stop() {
- DCHECK(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
fake_consumer_.Stop();
callback_ = NULL;
}
void FakeAudioOutputStream::Close() {
DCHECK(!callback_);
- DCHECK(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
audio_manager_->ReleaseOutputStream(this);
}
@@ -60,7 +60,7 @@ void FakeAudioOutputStream::GetVolume(double* volume) {
};
void FakeAudioOutputStream::CallOnMoreData(AudioBus* audio_bus) {
- DCHECK(audio_manager_->GetWorkerLoop()->BelongsToCurrentThread());
+ DCHECK(audio_manager_->GetWorkerTaskRunner()->BelongsToCurrentThread());
callback_->OnMoreData(audio_bus, AudioBuffersState());
}
diff --git a/chromium/media/audio/linux/audio_manager_linux.cc b/chromium/media/audio/linux/audio_manager_linux.cc
index eaeb2f332b9..e7824b4d6c8 100644
--- a/chromium/media/audio/linux/audio_manager_linux.cc
+++ b/chromium/media/audio/linux/audio_manager_linux.cc
@@ -23,13 +23,13 @@ enum LinuxAudioIO {
kPulse,
kAlsa,
kCras,
- kAudioIOMax // Must always be last!
+ kAudioIOMax = kCras // Must always be equal to largest logged entry.
};
AudioManager* CreateAudioManager(AudioLogFactory* audio_log_factory) {
#if defined(USE_CRAS)
if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kUseCras)) {
- UMA_HISTOGRAM_ENUMERATION("Media.LinuxAudioIO", kCras, kAudioIOMax);
+ UMA_HISTOGRAM_ENUMERATION("Media.LinuxAudioIO", kCras, kAudioIOMax + 1);
return new AudioManagerCras(audio_log_factory);
}
#endif
@@ -37,13 +37,13 @@ AudioManager* CreateAudioManager(AudioLogFactory* audio_log_factory) {
#if defined(USE_PULSEAUDIO)
AudioManager* manager = AudioManagerPulse::Create(audio_log_factory);
if (manager) {
- UMA_HISTOGRAM_ENUMERATION("Media.LinuxAudioIO", kPulse, kAudioIOMax);
+ UMA_HISTOGRAM_ENUMERATION("Media.LinuxAudioIO", kPulse, kAudioIOMax + 1);
return manager;
}
#endif
#if defined(USE_ALSA)
- UMA_HISTOGRAM_ENUMERATION("Media.LinuxAudioIO", kAlsa, kAudioIOMax);
+ UMA_HISTOGRAM_ENUMERATION("Media.LinuxAudioIO", kAlsa, kAudioIOMax + 1);
return new AudioManagerAlsa(audio_log_factory);
#else
return new FakeAudioManager(audio_log_factory);
diff --git a/chromium/media/audio/mac/aggregate_device_manager.cc b/chromium/media/audio/mac/aggregate_device_manager.cc
deleted file mode 100644
index c7f323322e7..00000000000
--- a/chromium/media/audio/mac/aggregate_device_manager.cc
+++ /dev/null
@@ -1,371 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/mac/aggregate_device_manager.h"
-
-#include <CoreAudio/AudioHardware.h>
-#include <string>
-
-#include "base/mac/mac_logging.h"
-#include "base/mac/scoped_cftyperef.h"
-#include "media/audio/audio_parameters.h"
-#include "media/audio/mac/audio_manager_mac.h"
-
-using base::ScopedCFTypeRef;
-
-namespace media {
-
-AggregateDeviceManager::AggregateDeviceManager()
- : plugin_id_(kAudioObjectUnknown),
- input_device_(kAudioDeviceUnknown),
- output_device_(kAudioDeviceUnknown),
- aggregate_device_(kAudioObjectUnknown) {
-}
-
-AggregateDeviceManager::~AggregateDeviceManager() {
- DestroyAggregateDevice();
-}
-
-AudioDeviceID AggregateDeviceManager::GetDefaultAggregateDevice() {
- AudioDeviceID current_input_device;
- AudioDeviceID current_output_device;
- AudioManagerMac::GetDefaultInputDevice(&current_input_device);
- AudioManagerMac::GetDefaultOutputDevice(&current_output_device);
-
- if (AudioManagerMac::HardwareSampleRateForDevice(current_input_device) !=
- AudioManagerMac::HardwareSampleRateForDevice(current_output_device)) {
- // TODO(crogers): with some extra work we can make aggregate devices work
- // if the clock domain is the same but the sample-rate differ.
- // For now we fallback to the synchronized path.
- return kAudioDeviceUnknown;
- }
-
- // Use a lazily created aggregate device if it's already available
- // and still appropriate.
- if (aggregate_device_ != kAudioObjectUnknown) {
- // TODO(crogers): handle default device changes for synchronized I/O.
- // For now, we check to make sure the default devices haven't changed
- // since we lazily created the aggregate device.
- if (current_input_device == input_device_ &&
- current_output_device == output_device_)
- return aggregate_device_;
-
- // For now, once lazily created don't attempt to create another
- // aggregate device.
- return kAudioDeviceUnknown;
- }
-
- input_device_ = current_input_device;
- output_device_ = current_output_device;
-
- // Only create an aggregrate device if the clock domains match.
- UInt32 input_clockdomain = GetClockDomain(input_device_);
- UInt32 output_clockdomain = GetClockDomain(output_device_);
- DVLOG(1) << "input_clockdomain: " << input_clockdomain;
- DVLOG(1) << "output_clockdomain: " << output_clockdomain;
-
- if (input_clockdomain == 0 || input_clockdomain != output_clockdomain)
- return kAudioDeviceUnknown;
-
- OSStatus result = CreateAggregateDevice(
- input_device_,
- output_device_,
- &aggregate_device_);
- if (result != noErr)
- DestroyAggregateDevice();
-
- return aggregate_device_;
-}
-
-CFStringRef AggregateDeviceManager::GetDeviceUID(AudioDeviceID id) {
- static const AudioObjectPropertyAddress kDeviceUIDAddress = {
- kAudioDevicePropertyDeviceUID,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
-
- // As stated in the CoreAudio header (AudioHardwareBase.h),
- // the caller is responsible for releasing the device_UID.
- CFStringRef device_UID;
- UInt32 size = sizeof(device_UID);
- OSStatus result = AudioObjectGetPropertyData(
- id,
- &kDeviceUIDAddress,
- 0,
- 0,
- &size,
- &device_UID);
-
- return (result == noErr) ? device_UID : NULL;
-}
-
-void AggregateDeviceManager::GetDeviceName(
- AudioDeviceID id, char* name, UInt32 size) {
- static const AudioObjectPropertyAddress kDeviceNameAddress = {
- kAudioDevicePropertyDeviceName,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
-
- OSStatus result = AudioObjectGetPropertyData(
- id,
- &kDeviceNameAddress,
- 0,
- 0,
- &size,
- name);
-
- if (result != noErr && size > 0)
- name[0] = 0;
-}
-
-UInt32 AggregateDeviceManager::GetClockDomain(AudioDeviceID device_id) {
- static const AudioObjectPropertyAddress kClockDomainAddress = {
- kAudioDevicePropertyClockDomain,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
-
- UInt32 clockdomain = 0;
- UInt32 size = sizeof(UInt32);
- OSStatus result = AudioObjectGetPropertyData(
- device_id,
- &kClockDomainAddress,
- 0,
- 0,
- &size,
- &clockdomain);
-
- return (result == noErr) ? clockdomain : 0;
-}
-
-OSStatus AggregateDeviceManager::GetPluginID(AudioObjectID* id) {
- DCHECK(id);
-
- // Get the audio hardware plugin.
- CFStringRef bundle_name = CFSTR("com.apple.audio.CoreAudio");
-
- AudioValueTranslation plugin_translation;
- plugin_translation.mInputData = &bundle_name;
- plugin_translation.mInputDataSize = sizeof(bundle_name);
- plugin_translation.mOutputData = id;
- plugin_translation.mOutputDataSize = sizeof(*id);
-
- static const AudioObjectPropertyAddress kPlugInAddress = {
- kAudioHardwarePropertyPlugInForBundleID,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
-
- UInt32 size = sizeof(plugin_translation);
- OSStatus result = AudioObjectGetPropertyData(
- kAudioObjectSystemObject,
- &kPlugInAddress,
- 0,
- 0,
- &size,
- &plugin_translation);
-
- DVLOG(1) << "CoreAudio plugin ID: " << *id;
-
- return result;
-}
-
-CFMutableDictionaryRef
-AggregateDeviceManager::CreateAggregateDeviceDictionary(
- AudioDeviceID input_id,
- AudioDeviceID output_id) {
- CFMutableDictionaryRef aggregate_device_dict = CFDictionaryCreateMutable(
- NULL,
- 0,
- &kCFTypeDictionaryKeyCallBacks,
- &kCFTypeDictionaryValueCallBacks);
- if (!aggregate_device_dict)
- return NULL;
-
- const CFStringRef kAggregateDeviceName =
- CFSTR("ChromeAggregateAudioDevice");
- const CFStringRef kAggregateDeviceUID =
- CFSTR("com.google.chrome.AggregateAudioDevice");
-
- // Add name and UID of the device to the dictionary.
- CFDictionaryAddValue(
- aggregate_device_dict,
- CFSTR(kAudioAggregateDeviceNameKey),
- kAggregateDeviceName);
- CFDictionaryAddValue(
- aggregate_device_dict,
- CFSTR(kAudioAggregateDeviceUIDKey),
- kAggregateDeviceUID);
-
- // Add a "private aggregate key" to the dictionary.
- // The 1 value means that the created aggregate device will
- // only be accessible from the process that created it, and
- // won't be visible to outside processes.
- int value = 1;
- ScopedCFTypeRef<CFNumberRef> aggregate_device_number(CFNumberCreate(
- NULL,
- kCFNumberIntType,
- &value));
- CFDictionaryAddValue(
- aggregate_device_dict,
- CFSTR(kAudioAggregateDeviceIsPrivateKey),
- aggregate_device_number);
-
- return aggregate_device_dict;
-}
-
-CFMutableArrayRef
-AggregateDeviceManager::CreateSubDeviceArray(
- CFStringRef input_device_UID, CFStringRef output_device_UID) {
- CFMutableArrayRef sub_devices_array = CFArrayCreateMutable(
- NULL,
- 0,
- &kCFTypeArrayCallBacks);
-
- CFArrayAppendValue(sub_devices_array, input_device_UID);
- CFArrayAppendValue(sub_devices_array, output_device_UID);
-
- return sub_devices_array;
-}
-
-OSStatus AggregateDeviceManager::CreateAggregateDevice(
- AudioDeviceID input_id,
- AudioDeviceID output_id,
- AudioDeviceID* aggregate_device) {
- DCHECK(aggregate_device);
-
- const size_t kMaxDeviceNameLength = 256;
-
- scoped_ptr<char[]> input_device_name(new char[kMaxDeviceNameLength]);
- GetDeviceName(
- input_id,
- input_device_name.get(),
- sizeof(input_device_name));
- DVLOG(1) << "Input device: \n" << input_device_name;
-
- scoped_ptr<char[]> output_device_name(new char[kMaxDeviceNameLength]);
- GetDeviceName(
- output_id,
- output_device_name.get(),
- sizeof(output_device_name));
- DVLOG(1) << "Output device: \n" << output_device_name;
-
- OSStatus result = GetPluginID(&plugin_id_);
- if (result != noErr)
- return result;
-
- // Create a dictionary for the aggregate device.
- ScopedCFTypeRef<CFMutableDictionaryRef> aggregate_device_dict(
- CreateAggregateDeviceDictionary(input_id, output_id));
- if (!aggregate_device_dict)
- return -1;
-
- // Create the aggregate device.
- static const AudioObjectPropertyAddress kCreateAggregateDeviceAddress = {
- kAudioPlugInCreateAggregateDevice,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
-
- UInt32 size = sizeof(*aggregate_device);
- result = AudioObjectGetPropertyData(
- plugin_id_,
- &kCreateAggregateDeviceAddress,
- sizeof(aggregate_device_dict),
- &aggregate_device_dict,
- &size,
- aggregate_device);
- if (result != noErr) {
- DLOG(ERROR) << "Error creating aggregate audio device!";
- return result;
- }
-
- // Set the sub-devices for the aggregate device.
- // In this case we use two: the input and output devices.
-
- ScopedCFTypeRef<CFStringRef> input_device_UID(GetDeviceUID(input_id));
- ScopedCFTypeRef<CFStringRef> output_device_UID(GetDeviceUID(output_id));
- if (!input_device_UID || !output_device_UID) {
- DLOG(ERROR) << "Error getting audio device UID strings.";
- return -1;
- }
-
- ScopedCFTypeRef<CFMutableArrayRef> sub_devices_array(
- CreateSubDeviceArray(input_device_UID, output_device_UID));
- if (sub_devices_array == NULL) {
- DLOG(ERROR) << "Error creating sub-devices array.";
- return -1;
- }
-
- static const AudioObjectPropertyAddress kSetSubDevicesAddress = {
- kAudioAggregateDevicePropertyFullSubDeviceList,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
-
- size = sizeof(CFMutableArrayRef);
- result = AudioObjectSetPropertyData(
- *aggregate_device,
- &kSetSubDevicesAddress,
- 0,
- NULL,
- size,
- &sub_devices_array);
- if (result != noErr) {
- DLOG(ERROR) << "Error setting aggregate audio device sub-devices!";
- return result;
- }
-
- // Use the input device as the master device.
- static const AudioObjectPropertyAddress kSetMasterDeviceAddress = {
- kAudioAggregateDevicePropertyMasterSubDevice,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
-
- size = sizeof(CFStringRef);
- result = AudioObjectSetPropertyData(
- *aggregate_device,
- &kSetMasterDeviceAddress,
- 0,
- NULL,
- size,
- &input_device_UID);
- if (result != noErr) {
- DLOG(ERROR) << "Error setting aggregate audio device master device!";
- return result;
- }
-
- DVLOG(1) << "New aggregate device: " << *aggregate_device;
- return noErr;
-}
-
-void AggregateDeviceManager::DestroyAggregateDevice() {
- if (aggregate_device_ == kAudioObjectUnknown)
- return;
-
- static const AudioObjectPropertyAddress kDestroyAddress = {
- kAudioPlugInDestroyAggregateDevice,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
-
- UInt32 size = sizeof(aggregate_device_);
- OSStatus result = AudioObjectGetPropertyData(
- plugin_id_,
- &kDestroyAddress,
- 0,
- NULL,
- &size,
- &aggregate_device_);
- if (result != noErr) {
- DLOG(ERROR) << "Error destroying aggregate audio device!";
- return;
- }
-
- aggregate_device_ = kAudioObjectUnknown;
-}
-
-} // namespace media
diff --git a/chromium/media/audio/mac/aggregate_device_manager.h b/chromium/media/audio/mac/aggregate_device_manager.h
deleted file mode 100644
index 7b8b71ff655..00000000000
--- a/chromium/media/audio/mac/aggregate_device_manager.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_MAC_AGGREGATE_DEVICE_MANAGER_H_
-#define MEDIA_AUDIO_MAC_AGGREGATE_DEVICE_MANAGER_H_
-
-#include <CoreAudio/CoreAudio.h>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-class MEDIA_EXPORT AggregateDeviceManager {
- public:
- AggregateDeviceManager();
- ~AggregateDeviceManager();
-
- // Lazily creates an aggregate device based on the default
- // input and output devices.
- // It will either return a valid device or kAudioDeviceUnknown
- // if the default devices are not suitable for aggregate devices.
- AudioDeviceID GetDefaultAggregateDevice();
-
- private:
- // The caller is responsible for releasing the CFStringRef.
- static CFStringRef GetDeviceUID(AudioDeviceID id);
-
- static void GetDeviceName(AudioDeviceID id, char* name, UInt32 size);
- static UInt32 GetClockDomain(AudioDeviceID device_id);
- static OSStatus GetPluginID(AudioObjectID* id);
-
- CFMutableDictionaryRef CreateAggregateDeviceDictionary(
- AudioDeviceID input_id,
- AudioDeviceID output_id);
-
- CFMutableArrayRef CreateSubDeviceArray(CFStringRef input_device_UID,
- CFStringRef output_device_UID);
-
- OSStatus CreateAggregateDevice(AudioDeviceID input_id,
- AudioDeviceID output_id,
- AudioDeviceID* aggregate_device);
- void DestroyAggregateDevice();
-
- AudioObjectID plugin_id_;
- AudioDeviceID input_device_;
- AudioDeviceID output_device_;
-
- AudioDeviceID aggregate_device_;
-
- DISALLOW_COPY_AND_ASSIGN(AggregateDeviceManager);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_MAC_AGGREGATE_DEVICE_MANAGER_H_
diff --git a/chromium/media/audio/mac/audio_auhal_mac.cc b/chromium/media/audio/mac/audio_auhal_mac.cc
index 9fcd46a6a95..41fc57c553f 100644
--- a/chromium/media/audio/mac/audio_auhal_mac.cc
+++ b/chromium/media/audio/mac/audio_auhal_mac.cc
@@ -7,22 +7,17 @@
#include <CoreServices/CoreServices.h>
#include "base/basictypes.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/debug/trace_event.h"
#include "base/logging.h"
#include "base/mac/mac_logging.h"
+#include "base/time/time.h"
#include "media/audio/mac/audio_manager_mac.h"
#include "media/base/audio_pull_fifo.h"
namespace media {
-static void ZeroBufferList(AudioBufferList* buffer_list) {
- for (size_t i = 0; i < buffer_list->mNumberBuffers; ++i) {
- memset(buffer_list->mBuffers[i].mData,
- 0,
- buffer_list->mBuffers[i].mDataByteSize);
- }
-}
-
static void WrapBufferList(AudioBufferList* buffer_list,
AudioBus* bus,
int frames) {
@@ -48,7 +43,6 @@ AUHALStream::AUHALStream(
AudioDeviceID device)
: manager_(manager),
params_(params),
- input_channels_(params_.input_channels()),
output_channels_(params_.channels()),
number_of_frames_(params_.frames_per_buffer()),
source_(NULL),
@@ -57,14 +51,12 @@ AUHALStream::AUHALStream(
volume_(1),
hardware_latency_frames_(0),
stopped_(false),
- input_buffer_list_(NULL),
current_hardware_pending_bytes_(0) {
// We must have a manager.
DCHECK(manager_);
VLOG(1) << "AUHALStream::AUHALStream()";
VLOG(1) << "Device: " << device;
- VLOG(1) << "Input channels: " << input_channels_;
VLOG(1) << "Output channels: " << output_channels_;
VLOG(1) << "Sample rate: " << params_.sample_rate();
VLOG(1) << "Buffer size: " << number_of_frames_;
@@ -74,27 +66,15 @@ AUHALStream::~AUHALStream() {
}
bool AUHALStream::Open() {
- // Get the total number of input and output channels that the
+ // Get the total number of output channels that the
// hardware supports.
- int device_input_channels;
- bool got_input_channels = AudioManagerMac::GetDeviceChannels(
- device_,
- kAudioDevicePropertyScopeInput,
- &device_input_channels);
-
int device_output_channels;
bool got_output_channels = AudioManagerMac::GetDeviceChannels(
device_,
kAudioDevicePropertyScopeOutput,
&device_output_channels);
- // Sanity check the requested I/O channels.
- if (!got_input_channels ||
- input_channels_ < 0 || input_channels_ > device_input_channels) {
- LOG(ERROR) << "AudioDevice does not support requested input channels.";
- return false;
- }
-
+ // Sanity check the requested output channels.
if (!got_output_channels ||
output_channels_ <= 0 || output_channels_ > device_output_channels) {
LOG(ERROR) << "AudioDevice does not support requested output channels.";
@@ -110,7 +90,10 @@ bool AUHALStream::Open() {
return false;
}
- CreateIOBusses();
+ // The output bus will wrap the AudioBufferList given to us in
+ // the Render() callback.
+ DCHECK_GT(output_channels_, 0);
+ output_bus_ = AudioBus::CreateWrapper(output_channels_);
bool configured = ConfigureAUHAL();
if (configured)
@@ -120,13 +103,6 @@ bool AUHALStream::Open() {
}
void AUHALStream::Close() {
- if (input_buffer_list_) {
- input_buffer_list_storage_.reset();
- input_buffer_list_ = NULL;
- input_bus_.reset(NULL);
- output_bus_.reset(NULL);
- }
-
if (audio_unit_) {
OSStatus result = AudioUnitUninitialize(audio_unit_);
OSSTATUS_DLOG_IF(ERROR, result != noErr, result)
@@ -148,6 +124,18 @@ void AUHALStream::Start(AudioSourceCallback* callback) {
return;
}
+ // Check if we should defer Start() for http://crbug.com/160920.
+ if (manager_->ShouldDeferStreamStart()) {
+ // Use a cancellable closure so that if Stop() is called before Start()
+ // actually runs, we can cancel the pending start.
+ deferred_start_cb_.Reset(
+ base::Bind(&AUHALStream::Start, base::Unretained(this), callback));
+ manager_->GetTaskRunner()->PostDelayedTask(
+ FROM_HERE, deferred_start_cb_.callback(), base::TimeDelta::FromSeconds(
+ AudioManagerMac::kStartDelayInSecsForPowerEvents));
+ return;
+ }
+
stopped_ = false;
audio_fifo_.reset();
{
@@ -165,6 +153,7 @@ void AUHALStream::Start(AudioSourceCallback* callback) {
}
void AUHALStream::Stop() {
+ deferred_start_cb_.Cancel();
if (stopped_)
return;
@@ -196,43 +185,25 @@ OSStatus AUHALStream::Render(
const AudioTimeStamp* output_time_stamp,
UInt32 bus_number,
UInt32 number_of_frames,
- AudioBufferList* io_data) {
+ AudioBufferList* data) {
TRACE_EVENT0("audio", "AUHALStream::Render");
// If the stream parameters change for any reason, we need to insert a FIFO
- // since the OnMoreData() pipeline can't handle frame size changes. Generally
- // this is a temporary situation which can occur after a device change has
- // occurred but the AudioManager hasn't received the notification yet.
+ // since the OnMoreData() pipeline can't handle frame size changes.
if (number_of_frames != number_of_frames_) {
// Create a FIFO on the fly to handle any discrepancies in callback rates.
if (!audio_fifo_) {
- VLOG(1) << "Audio frame size change detected; adding FIFO to compensate.";
+ VLOG(1) << "Audio frame size changed from " << number_of_frames_ << " to "
+ << number_of_frames << "; adding FIFO to compensate.";
audio_fifo_.reset(new AudioPullFifo(
output_channels_,
number_of_frames_,
base::Bind(&AUHALStream::ProvideInput, base::Unretained(this))));
}
-
- // Synchronous IO is not supported in this state.
- if (input_channels_ > 0)
- input_bus_->Zero();
- } else {
- if (input_channels_ > 0 && input_buffer_list_) {
- // Get the input data. |input_buffer_list_| is wrapped
- // to point to the data allocated in |input_bus_|.
- OSStatus result = AudioUnitRender(audio_unit_,
- flags,
- output_time_stamp,
- 1,
- number_of_frames,
- input_buffer_list_);
- if (result != noErr)
- ZeroBufferList(input_buffer_list_);
- }
}
// Make |output_bus_| wrap the output AudioBufferList.
- WrapBufferList(io_data, output_bus_.get(), number_of_frames);
+ WrapBufferList(data, output_bus_.get(), number_of_frames);
// Update the playout latency.
const double playout_latency_frames = GetPlayoutLatency(output_time_stamp);
@@ -255,8 +226,7 @@ void AUHALStream::ProvideInput(int frame_delay, AudioBus* dest) {
}
// Supply the input data and render the output data.
- source_->OnMoreIOData(
- input_bus_.get(),
+ source_->OnMoreData(
dest,
AudioBuffersState(0,
current_hardware_pending_bytes_ +
@@ -355,52 +325,6 @@ double AUHALStream::GetPlayoutLatency(
return (delay_frames + hardware_latency_frames_);
}
-void AUHALStream::CreateIOBusses() {
- if (input_channels_ > 0) {
- // Allocate storage for the AudioBufferList used for the
- // input data from the input AudioUnit.
- // We allocate enough space for with one AudioBuffer per channel.
- size_t buffer_list_size = offsetof(AudioBufferList, mBuffers[0]) +
- (sizeof(AudioBuffer) * input_channels_);
- input_buffer_list_storage_.reset(new uint8[buffer_list_size]);
-
- input_buffer_list_ =
- reinterpret_cast<AudioBufferList*>(input_buffer_list_storage_.get());
- input_buffer_list_->mNumberBuffers = input_channels_;
-
- // |input_bus_| allocates the storage for the PCM input data.
- input_bus_ = AudioBus::Create(input_channels_, number_of_frames_);
-
- // Make the AudioBufferList point to the memory in |input_bus_|.
- UInt32 buffer_size_bytes = input_bus_->frames() * sizeof(Float32);
- for (size_t i = 0; i < input_buffer_list_->mNumberBuffers; ++i) {
- input_buffer_list_->mBuffers[i].mNumberChannels = 1;
- input_buffer_list_->mBuffers[i].mDataByteSize = buffer_size_bytes;
- input_buffer_list_->mBuffers[i].mData = input_bus_->channel(i);
- }
- }
-
- // The output bus will wrap the AudioBufferList given to us in
- // the Render() callback.
- DCHECK_GT(output_channels_, 0);
- output_bus_ = AudioBus::CreateWrapper(output_channels_);
-}
-
-bool AUHALStream::EnableIO(bool enable, UInt32 scope) {
- // See Apple technote for details about the EnableIO property.
- // Note that we use bus 1 for input and bus 0 for output:
- // http://developer.apple.com/library/mac/#technotes/tn2091/_index.html
- UInt32 enable_IO = enable ? 1 : 0;
- OSStatus result = AudioUnitSetProperty(
- audio_unit_,
- kAudioOutputUnitProperty_EnableIO,
- scope,
- (scope == kAudioUnitScope_Input) ? 1 : 0,
- &enable_IO,
- sizeof(enable_IO));
- return (result == noErr);
-}
-
bool AUHALStream::SetStreamFormat(
AudioStreamBasicDescription* desc,
int channels,
@@ -431,8 +355,7 @@ bool AUHALStream::SetStreamFormat(
}
bool AUHALStream::ConfigureAUHAL() {
- if (device_ == kAudioObjectUnknown ||
- (input_channels_ == 0 && output_channels_ == 0))
+ if (device_ == kAudioObjectUnknown || output_channels_ == 0)
return false;
AudioComponentDescription desc = {
@@ -452,10 +375,19 @@ bool AUHALStream::ConfigureAUHAL() {
return false;
}
- // Enable input and output as appropriate.
- if (!EnableIO(input_channels_ > 0, kAudioUnitScope_Input))
- return false;
- if (!EnableIO(output_channels_ > 0, kAudioUnitScope_Output))
+ // Enable output as appropriate.
+ // See Apple technote for details about the EnableIO property.
+ // Note that we use bus 1 for input and bus 0 for output:
+ // http://developer.apple.com/library/mac/#technotes/tn2091/_index.html
+ UInt32 enable_IO = 1;
+ result = AudioUnitSetProperty(
+ audio_unit_,
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Output,
+ 0,
+ &enable_IO,
+ sizeof(enable_IO));
+ if (result != noErr)
return false;
// Set the device to be used with the AUHAL AudioUnit.
@@ -475,42 +407,49 @@ bool AUHALStream::ConfigureAUHAL() {
// (element) numbers:
// http://developer.apple.com/library/mac/#technotes/tn2091/_index.html
- if (input_channels_ > 0) {
- if (!SetStreamFormat(&input_format_,
- input_channels_,
- kAudioUnitScope_Output,
- 1))
- return false;
- }
-
- if (output_channels_ > 0) {
- if (!SetStreamFormat(&output_format_,
- output_channels_,
- kAudioUnitScope_Input,
- 0))
- return false;
+ if (!SetStreamFormat(&output_format_,
+ output_channels_,
+ kAudioUnitScope_Input,
+ 0)) {
+ return false;
}
// Set the buffer frame size.
- // WARNING: Setting this value changes the frame size for all audio units in
- // the current process. It's imperative that the input and output frame sizes
- // be the same as the frames_per_buffer() returned by
- // GetDefaultOutputStreamParameters().
- // See http://crbug.com/154352 for details.
- UInt32 buffer_size = number_of_frames_;
- result = AudioUnitSetProperty(
- audio_unit_,
- kAudioDevicePropertyBufferFrameSize,
- kAudioUnitScope_Output,
- 0,
- &buffer_size,
- sizeof(buffer_size));
+ // WARNING: Setting this value changes the frame size for all output audio
+ // units in the current process. As a result, the AURenderCallback must be
+ // able to handle arbitrary buffer sizes and FIFO appropriately.
+ UInt32 buffer_size = 0;
+ UInt32 property_size = sizeof(buffer_size);
+ result = AudioUnitGetProperty(audio_unit_,
+ kAudioDevicePropertyBufferFrameSize,
+ kAudioUnitScope_Output,
+ 0,
+ &buffer_size,
+ &property_size);
if (result != noErr) {
OSSTATUS_DLOG(ERROR, result)
- << "AudioUnitSetProperty(kAudioDevicePropertyBufferFrameSize) failed.";
+ << "AudioUnitGetProperty(kAudioDevicePropertyBufferFrameSize) failed.";
return false;
}
+ // Only set the buffer size if we're the only active stream or the buffer size
+ // is lower than the current buffer size.
+ if (manager_->output_stream_count() == 1 || number_of_frames_ < buffer_size) {
+ buffer_size = number_of_frames_;
+ result = AudioUnitSetProperty(audio_unit_,
+ kAudioDevicePropertyBufferFrameSize,
+ kAudioUnitScope_Output,
+ 0,
+ &buffer_size,
+ sizeof(buffer_size));
+ if (result != noErr) {
+ OSSTATUS_DLOG(ERROR, result) << "AudioUnitSetProperty("
+ "kAudioDevicePropertyBufferFrameSize) "
+ "failed. Size: " << number_of_frames_;
+ return false;
+ }
+ }
+
// Setup callback.
AURenderCallbackStruct callback;
callback.inputProc = InputProc;
diff --git a/chromium/media/audio/mac/audio_auhal_mac.h b/chromium/media/audio/mac/audio_auhal_mac.h
index b488b73c0d1..8903ea3df4d 100644
--- a/chromium/media/audio/mac/audio_auhal_mac.h
+++ b/chromium/media/audio/mac/audio_auhal_mac.h
@@ -20,6 +20,7 @@
#include <AudioUnit/AudioUnit.h>
#include <CoreAudio/CoreAudio.h>
+#include "base/cancelable_callback.h"
#include "base/compiler_specific.h"
#include "base/synchronization/lock.h"
#include "media/audio/audio_io.h"
@@ -32,8 +33,7 @@ class AudioPullFifo;
// Implementation of AudioOuputStream for Mac OS X using the
// AUHAL Audio Unit present in OS 10.4 and later.
-// It is useful for low-latency output with optional synchronized
-// input.
+// It is useful for low-latency output.
//
// Overview of operation:
// 1) An object of AUHALStream is created by the AudioManager
@@ -87,9 +87,6 @@ class AUHALStream : public AudioOutputStream {
// Called by either |audio_fifo_| or Render() to provide audio data.
void ProvideInput(int frame_delay, AudioBus* dest);
- // Helper method to enable input and output.
- bool EnableIO(bool enable, UInt32 scope);
-
// Sets the stream format on the AUHAL to PCM Float32 non-interleaved
// for the given number of channels on the given scope and element.
// The created stream description will be stored in |desc|.
@@ -116,7 +113,6 @@ class AUHALStream : public AudioOutputStream {
const AudioParameters params_;
// For convenience - same as in params_.
- const int input_channels_;
const int output_channels_;
// Buffer-size.
@@ -130,7 +126,6 @@ class AUHALStream : public AudioOutputStream {
base::Lock source_lock_;
// Holds the stream format details such as bitrate.
- AudioStreamBasicDescription input_format_;
AudioStreamBasicDescription output_format_;
// The audio device to use with the AUHAL.
@@ -149,14 +144,7 @@ class AUHALStream : public AudioOutputStream {
// The flag used to stop the streaming.
bool stopped_;
- // The input AudioUnit renders its data here.
- scoped_ptr<uint8[]> input_buffer_list_storage_;
- AudioBufferList* input_buffer_list_;
-
- // Holds the actual data for |input_buffer_list_|.
- scoped_ptr<AudioBus> input_bus_;
-
- // Container for retrieving data from AudioSourceCallback::OnMoreIOData().
+ // Container for retrieving data from AudioSourceCallback::OnMoreData().
scoped_ptr<AudioBus> output_bus_;
// Dynamically allocated FIFO used when CoreAudio asks for unexpected frame
@@ -166,6 +154,9 @@ class AUHALStream : public AudioOutputStream {
// Current buffer delay. Set by Render().
uint32 current_hardware_pending_bytes_;
+ // Used to defer Start() to workaround http://crbug.com/160920.
+ base::CancelableClosure deferred_start_cb_;
+
DISALLOW_COPY_AND_ASSIGN(AUHALStream);
};
diff --git a/chromium/media/audio/mac/audio_auhal_mac_unittest.cc b/chromium/media/audio/mac/audio_auhal_mac_unittest.cc
index d709554dfaf..69179d56078 100644
--- a/chromium/media/audio/mac/audio_auhal_mac_unittest.cc
+++ b/chromium/media/audio/mac/audio_auhal_mac_unittest.cc
@@ -4,216 +4,103 @@
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/synchronization/waitable_event.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager.h"
-#include "media/audio/simple_sources.h"
+#include "media/audio/mock_audio_source_callback.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-using ::testing::_;
-using ::testing::AnyNumber;
-using ::testing::DoAll;
-using ::testing::Field;
-using ::testing::InSequence;
-using ::testing::Invoke;
-using ::testing::NiceMock;
-using ::testing::NotNull;
-using ::testing::Return;
-
-static const int kBitsPerSample = 16;
+using testing::_;
+using testing::DoAll;
+using testing::Return;
// TODO(crogers): Most of these tests can be made platform agnostic.
// http://crbug.com/223242
namespace media {
-class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
- public:
- MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
- AudioBuffersState buffers_state));
- MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state));
- MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
-};
-
-// Convenience method which creates a default AudioOutputStream object but
-// also allows the user to modify the default settings.
-class AudioOutputStreamWrapper {
- public:
- explicit AudioOutputStreamWrapper()
- : audio_man_(AudioManager::CreateForTesting()),
- format_(AudioParameters::AUDIO_PCM_LOW_LATENCY),
- bits_per_sample_(kBitsPerSample) {
- AudioParameters preferred_params =
- audio_man_->GetDefaultOutputStreamParameters();
- channel_layout_ = preferred_params.channel_layout();
- channels_ = preferred_params.channels();
- sample_rate_ = preferred_params.sample_rate();
- samples_per_packet_ = preferred_params.frames_per_buffer();
- }
-
- ~AudioOutputStreamWrapper() {}
+ACTION(ZeroBuffer) {
+ arg0->Zero();
+}
- // Creates AudioOutputStream object using default parameters.
- AudioOutputStream* Create() {
- return CreateOutputStream();
- }
+ACTION_P(SignalEvent, event) {
+ event->Signal();
+}
- // Creates AudioOutputStream object using non-default parameters where the
- // frame size is modified.
- AudioOutputStream* Create(int samples_per_packet) {
- samples_per_packet_ = samples_per_packet;
- return CreateOutputStream();
+class AUHALStreamTest : public testing::Test {
+ public:
+ AUHALStreamTest()
+ : message_loop_(base::MessageLoop::TYPE_UI),
+ manager_(AudioManager::CreateForTesting()) {
+ // Wait for the AudioManager to finish any initialization on the audio loop.
+ base::RunLoop().RunUntilIdle();
}
- // Creates AudioOutputStream object using non-default parameters where the
- // sample rate is modified.
- AudioOutputStream* CreateWithSampleRate(int sample_rate) {
- sample_rate_ = sample_rate;
- return CreateOutputStream();
+ virtual ~AUHALStreamTest() {
+ base::RunLoop().RunUntilIdle();
}
- // Creates AudioOutputStream object using non-default parameters where the
- // channel layout is modified.
- AudioOutputStream* CreateWithLayout(ChannelLayout layout) {
- channel_layout_ = layout;
- channels_ = ChannelLayoutToChannelCount(layout);
- return CreateOutputStream();
+ AudioOutputStream* Create() {
+ return manager_->MakeAudioOutputStream(
+ manager_->GetDefaultOutputStreamParameters(), "");
}
- AudioParameters::Format format() const { return format_; }
- int channels() const { return ChannelLayoutToChannelCount(channel_layout_); }
- int bits_per_sample() const { return bits_per_sample_; }
- int sample_rate() const { return sample_rate_; }
- int samples_per_packet() const { return samples_per_packet_; }
-
bool CanRunAudioTests() {
- return audio_man_->HasAudioOutputDevices();
+ return manager_->HasAudioOutputDevices();
}
- private:
- AudioOutputStream* CreateOutputStream() {
- AudioParameters params;
- params.Reset(format_, channel_layout_,
- channels_, 0,
- sample_rate_, bits_per_sample_,
- samples_per_packet_);
-
- AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(params,
- std::string(), std::string());
- EXPECT_TRUE(aos);
- return aos;
- }
+ protected:
+ base::MessageLoop message_loop_;
+ scoped_ptr<AudioManager> manager_;
+ MockAudioSourceCallback source_;
- scoped_ptr<AudioManager> audio_man_;
-
- AudioParameters::Format format_;
- ChannelLayout channel_layout_;
- int channels_;
- int bits_per_sample_;
- int sample_rate_;
- int samples_per_packet_;
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AUHALStreamTest);
};
-// Test that we can get the hardware sample-rate.
-TEST(AUHALStreamTest, HardwareSampleRate) {
- AudioOutputStreamWrapper aosw;
- if (!aosw.CanRunAudioTests())
+TEST_F(AUHALStreamTest, HardwareSampleRate) {
+ if (!CanRunAudioTests())
return;
-
- int sample_rate = aosw.sample_rate();
- EXPECT_GE(sample_rate, 16000);
- EXPECT_LE(sample_rate, 192000);
-}
-
-// Test Create(), Close() calling sequence.
-TEST(AUHALStreamTest, CreateAndClose) {
- AudioOutputStreamWrapper aosw;
- if (!aosw.CanRunAudioTests())
- return;
-
- AudioOutputStream* aos = aosw.Create();
- aos->Close();
-}
-
-// Test Open(), Close() calling sequence.
-TEST(AUHALStreamTest, OpenAndClose) {
- AudioOutputStreamWrapper aosw;
- if (!aosw.CanRunAudioTests())
- return;
-
- AudioOutputStream* aos = aosw.Create();
- EXPECT_TRUE(aos->Open());
- aos->Close();
+ const AudioParameters preferred_params =
+ manager_->GetDefaultOutputStreamParameters();
+ EXPECT_GE(preferred_params.sample_rate(), 16000);
+ EXPECT_LE(preferred_params.sample_rate(), 192000);
}
-// Test Open(), Start(), Close() calling sequence.
-TEST(AUHALStreamTest, OpenStartAndClose) {
- AudioOutputStreamWrapper aosw;
- if (!aosw.CanRunAudioTests())
+TEST_F(AUHALStreamTest, CreateClose) {
+ if (!CanRunAudioTests())
return;
-
- AudioOutputStream* aos = aosw.Create();
- EXPECT_TRUE(aos->Open());
- MockAudioSourceCallback source;
- EXPECT_CALL(source, OnError(aos))
- .Times(0);
- aos->Start(&source);
- aos->Close();
-}
-
-// Test Open(), Start(), Stop(), Close() calling sequence.
-TEST(AUHALStreamTest, OpenStartStopAndClose) {
- AudioOutputStreamWrapper aosw;
- if (!aosw.CanRunAudioTests())
- return;
-
- AudioOutputStream* aos = aosw.Create();
- EXPECT_TRUE(aos->Open());
- MockAudioSourceCallback source;
- EXPECT_CALL(source, OnError(aos))
- .Times(0);
- aos->Start(&source);
- aos->Stop();
- aos->Close();
+ Create()->Close();
}
-// This test produces actual audio for 0.5 seconds on the default audio device
-// at the hardware sample-rate (usually 44.1KHz).
-// Parameters have been chosen carefully so you should not hear
-// pops or noises while the sound is playing.
-TEST(AUHALStreamTest, AUHALStreamPlay200HzTone) {
- AudioOutputStreamWrapper aosw;
- if (!aosw.CanRunAudioTests())
+TEST_F(AUHALStreamTest, CreateOpenClose) {
+ if (!CanRunAudioTests())
return;
-
- AudioOutputStream* aos = aosw.CreateWithLayout(CHANNEL_LAYOUT_MONO);
-
- EXPECT_TRUE(aos->Open());
-
- SineWaveAudioSource source(1, 200.0, aosw.sample_rate());
- aos->Start(&source);
- usleep(500000);
-
- aos->Stop();
- aos->Close();
+ AudioOutputStream* stream = Create();
+ EXPECT_TRUE(stream->Open());
+ stream->Close();
}
-// Test that Open() will fail with a sample-rate which isn't the hardware
-// sample-rate.
-TEST(AUHALStreamTest, AUHALStreamInvalidSampleRate) {
- AudioOutputStreamWrapper aosw;
- if (!aosw.CanRunAudioTests())
+TEST_F(AUHALStreamTest, CreateOpenStartStopClose) {
+ if (!CanRunAudioTests())
return;
- int non_default_sample_rate = aosw.sample_rate() == 44100 ?
- 48000 : 44100;
- AudioOutputStream* aos = aosw.CreateWithSampleRate(non_default_sample_rate);
+ AudioOutputStream* stream = Create();
+ EXPECT_TRUE(stream->Open());
- EXPECT_FALSE(aos->Open());
+ // Wait for the first data callback from the OS.
+ base::WaitableEvent event(false, false);
+ EXPECT_CALL(source_, OnMoreData(_, _))
+ .WillOnce(DoAll(ZeroBuffer(), SignalEvent(&event), Return(0)));
+ EXPECT_CALL(source_, OnError(_)).Times(0);
+ stream->Start(&source_);
+ event.Wait();
- aos->Close();
+ stream->Stop();
+ stream->Close();
}
} // namespace media
diff --git a/chromium/media/audio/mac/audio_device_listener_mac.cc b/chromium/media/audio/mac/audio_device_listener_mac.cc
index 5c5ca355b9a..ef8bdd5b96c 100644
--- a/chromium/media/audio/mac/audio_device_listener_mac.cc
+++ b/chromium/media/audio/mac/audio_device_listener_mac.cc
@@ -11,7 +11,6 @@
#include "base/mac/mac_util.h"
#include "base/message_loop/message_loop.h"
#include "base/pending_task.h"
-#include "media/audio/mac/audio_low_latency_output_mac.h"
namespace media {
diff --git a/chromium/media/audio/mac/audio_device_listener_mac_unittest.cc b/chromium/media/audio/mac/audio_device_listener_mac_unittest.cc
index 7efb3297172..12c88b651a7 100644
--- a/chromium/media/audio/mac/audio_device_listener_mac_unittest.cc
+++ b/chromium/media/audio/mac/audio_device_listener_mac_unittest.cc
@@ -9,7 +9,7 @@
#include "base/memory/scoped_ptr.h"
#include "base/message_loop/message_loop.h"
#include "media/audio/mac/audio_device_listener_mac.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -36,12 +36,11 @@ class AudioDeviceListenerMacTest : public testing::Test {
}
void CreateDeviceListener() {
- // Force a post task using BindToLoop to ensure device listener internals
- // are working correctly.
- output_device_listener_.reset(new AudioDeviceListenerMac(BindToLoop(
- message_loop_.message_loop_proxy(), base::Bind(
- &AudioDeviceListenerMacTest::OnDeviceChange,
- base::Unretained(this)))));
+ // Force a post task using BindToCurrentLoop() to ensure device listener
+ // internals are working correctly.
+ output_device_listener_.reset(new AudioDeviceListenerMac(BindToCurrentLoop(
+ base::Bind(&AudioDeviceListenerMacTest::OnDeviceChange,
+ base::Unretained(this)))));
}
void DestroyDeviceListener() {
diff --git a/chromium/media/audio/mac/audio_input_mac.cc b/chromium/media/audio/mac/audio_input_mac.cc
index 4aee1179cfa..b7f6e173109 100644
--- a/chromium/media/audio/mac/audio_input_mac.cc
+++ b/chromium/media/audio/mac/audio_input_mac.cc
@@ -9,18 +9,20 @@
#include "base/basictypes.h"
#include "base/logging.h"
#include "base/mac/mac_logging.h"
-#include "media/audio/audio_manager_base.h"
-
+#include "media/audio/mac/audio_manager_mac.h"
+#include "media/base/audio_bus.h"
namespace media {
PCMQueueInAudioInputStream::PCMQueueInAudioInputStream(
- AudioManagerBase* manager, const AudioParameters& params)
+ AudioManagerMac* manager,
+ const AudioParameters& params)
: manager_(manager),
callback_(NULL),
audio_queue_(NULL),
buffer_size_bytes_(0),
- started_(false) {
+ started_(false),
+ audio_bus_(media::AudioBus::Create(params)) {
// We must have a manager.
DCHECK(manager_);
// A frame is one sample across all channels. In interleaved audio the per
@@ -65,6 +67,21 @@ void PCMQueueInAudioInputStream::Start(AudioInputCallback* callback) {
DLOG_IF(ERROR, !audio_queue_) << "Open() has not been called successfully";
if (callback_ || !audio_queue_)
return;
+
+ // Check if we should defer Start() for http://crbug.com/160920.
+ if (manager_->ShouldDeferStreamStart()) {
+ // Use a cancellable closure so that if Stop() is called before Start()
+ // actually runs, we can cancel the pending start.
+ deferred_start_cb_.Reset(base::Bind(
+ &PCMQueueInAudioInputStream::Start, base::Unretained(this), callback));
+ manager_->GetTaskRunner()->PostDelayedTask(
+ FROM_HERE,
+ deferred_start_cb_.callback(),
+ base::TimeDelta::FromSeconds(
+ AudioManagerMac::kStartDelayInSecsForPowerEvents));
+ return;
+ }
+
callback_ = callback;
OSStatus err = AudioQueueStart(audio_queue_, NULL);
if (err != noErr) {
@@ -75,6 +92,7 @@ void PCMQueueInAudioInputStream::Start(AudioInputCallback* callback) {
}
void PCMQueueInAudioInputStream::Stop() {
+ deferred_start_cb_.Cancel();
if (!audio_queue_ || !started_)
return;
@@ -85,9 +103,12 @@ void PCMQueueInAudioInputStream::Stop() {
HandleError(err);
started_ = false;
+ callback_ = NULL;
}
void PCMQueueInAudioInputStream::Close() {
+ Stop();
+
// It is valid to call Close() before calling Open() or Start(), thus
// |audio_queue_| and |callback_| might be NULL.
if (audio_queue_) {
@@ -96,10 +117,7 @@ void PCMQueueInAudioInputStream::Close() {
if (err != noErr)
HandleError(err);
}
- if (callback_) {
- callback_->OnClose(this);
- callback_ = NULL;
- }
+
manager_->ReleaseInputStream(this);
// CARE: This object may now be destroyed.
}
@@ -200,11 +218,11 @@ void PCMQueueInAudioInputStream::HandleInputBuffer(
if (elapsed < kMinDelay)
base::PlatformThread::Sleep(kMinDelay - elapsed);
- callback_->OnData(this,
- reinterpret_cast<const uint8*>(audio_buffer->mAudioData),
- audio_buffer->mAudioDataByteSize,
- audio_buffer->mAudioDataByteSize,
- 0.0);
+ uint8* audio_data = reinterpret_cast<uint8*>(audio_buffer->mAudioData);
+ audio_bus_->FromInterleaved(
+ audio_data, audio_bus_->frames(), format_.mBitsPerChannel / 8);
+ callback_->OnData(
+ this, audio_bus_.get(), audio_buffer->mAudioDataByteSize, 0.0);
last_fill_ = base::TimeTicks::Now();
}
diff --git a/chromium/media/audio/mac/audio_input_mac.h b/chromium/media/audio/mac/audio_input_mac.h
index 77eb65b0315..a6e897e0610 100644
--- a/chromium/media/audio/mac/audio_input_mac.h
+++ b/chromium/media/audio/mac/audio_input_mac.h
@@ -8,6 +8,7 @@
#include <AudioToolbox/AudioFormat.h>
#include <AudioToolbox/AudioQueue.h>
+#include "base/cancelable_callback.h"
#include "base/compiler_specific.h"
#include "base/time/time.h"
#include "media/audio/audio_io.h"
@@ -15,14 +16,15 @@
namespace media {
-class AudioManagerBase;
+class AudioBus;
+class AudioManagerMac;
// Implementation of AudioInputStream for Mac OS X using the audio queue service
// present in OS 10.5 and later. Design reflects PCMQueueOutAudioOutputStream.
class PCMQueueInAudioInputStream : public AudioInputStream {
public:
// Parameters as per AudioManager::MakeAudioInputStream.
- PCMQueueInAudioInputStream(AudioManagerBase* manager,
+ PCMQueueInAudioInputStream(AudioManagerMac* manager,
const AudioParameters& params);
virtual ~PCMQueueInAudioInputStream();
@@ -66,7 +68,7 @@ class PCMQueueInAudioInputStream : public AudioInputStream {
static const int kNumberBuffers = 3;
// Manager that owns this stream, used for closing down.
- AudioManagerBase* manager_;
+ AudioManagerMac* manager_;
// We use the callback mostly to periodically supply the recorded audio data.
AudioInputCallback* callback_;
// Structure that holds the stream format details such as bitrate.
@@ -79,6 +81,10 @@ class PCMQueueInAudioInputStream : public AudioInputStream {
bool started_;
// Used to determine if we need to slow down |callback_| calls.
base::TimeTicks last_fill_;
+ // Used to defer Start() to workaround http://crbug.com/160920.
+ base::CancelableClosure deferred_start_cb_;
+
+ scoped_ptr<media::AudioBus> audio_bus_;
DISALLOW_COPY_AND_ASSIGN(PCMQueueInAudioInputStream);
};
diff --git a/chromium/media/audio/mac/audio_low_latency_input_mac.cc b/chromium/media/audio/mac/audio_low_latency_input_mac.cc
index dbc75bfea31..d7a3430f6d8 100644
--- a/chromium/media/audio/mac/audio_low_latency_input_mac.cc
+++ b/chromium/media/audio/mac/audio_low_latency_input_mac.cc
@@ -10,6 +10,7 @@
#include "base/logging.h"
#include "base/mac/mac_logging.h"
#include "media/audio/mac/audio_manager_mac.h"
+#include "media/base/audio_bus.h"
#include "media/base/data_buffer.h"
namespace media {
@@ -31,11 +32,10 @@ static std::ostream& operator<<(std::ostream& os,
// http://developer.apple.com/library/mac/#technotes/tn2091/_index.html
// for more details and background regarding this implementation.
-AUAudioInputStream::AUAudioInputStream(
- AudioManagerMac* manager,
- const AudioParameters& input_params,
- const AudioParameters& output_params,
- AudioDeviceID audio_device_id)
+AUAudioInputStream::AUAudioInputStream(AudioManagerMac* manager,
+ const AudioParameters& input_params,
+ const AudioParameters& output_params,
+ AudioDeviceID audio_device_id)
: manager_(manager),
sink_(NULL),
audio_unit_(0),
@@ -43,7 +43,8 @@ AUAudioInputStream::AUAudioInputStream(
started_(false),
hardware_latency_frames_(0),
fifo_delay_bytes_(0),
- number_of_channels_in_frame_(0) {
+ number_of_channels_in_frame_(0),
+ audio_bus_(media::AudioBus::Create(input_params)) {
DCHECK(manager_);
// Set up the desired (output) format specified by the client.
@@ -64,9 +65,6 @@ AUAudioInputStream::AUAudioInputStream(
// Set number of sample frames per callback used by the internal audio layer.
// An internal FIFO is then utilized to adapt the internal size to the size
// requested by the client.
- // Note that we use the same native buffer size as for the output side here
- // since the AUHAL implementation requires that both capture and render side
- // use the same buffer size. See http://crbug.com/154352 for more details.
number_of_frames_ = output_params.frames_per_buffer();
DVLOG(1) << "Size of data buffer in frames : " << number_of_frames_;
@@ -233,23 +231,38 @@ bool AUAudioInputStream::Open() {
}
// Set the desired number of frames in the IO buffer (output scope).
- // WARNING: Setting this value changes the frame size for all audio units in
- // the current process. It's imperative that the input and output frame sizes
- // be the same as the frames_per_buffer() returned by
- // GetInputStreamParameters().
- // TODO(henrika): Due to http://crrev.com/159666 this is currently not true
- // and should be fixed, a CHECK() should be added at that time.
- result = AudioUnitSetProperty(audio_unit_,
+ // WARNING: Setting this value changes the frame size for all input audio
+ // units in the current process. As a result, the AURenderCallback must be
+ // able to handle arbitrary buffer sizes and FIFO appropriately.
+ UInt32 buffer_size = 0;
+ UInt32 property_size = sizeof(buffer_size);
+ result = AudioUnitGetProperty(audio_unit_,
kAudioDevicePropertyBufferFrameSize,
kAudioUnitScope_Output,
1,
- &number_of_frames_, // size is set in the ctor
- sizeof(number_of_frames_));
- if (result) {
+ &buffer_size,
+ &property_size);
+ if (result != noErr) {
HandleError(result);
return false;
}
+ // Only set the buffer size if we're the only active stream or the buffer size
+ // is lower than the current buffer size.
+ if (manager_->input_stream_count() == 1 || number_of_frames_ < buffer_size) {
+ buffer_size = number_of_frames_;
+ result = AudioUnitSetProperty(audio_unit_,
+ kAudioDevicePropertyBufferFrameSize,
+ kAudioUnitScope_Output,
+ 1,
+ &buffer_size,
+ sizeof(buffer_size));
+ if (result != noErr) {
+ HandleError(result);
+ return false;
+ }
+ }
+
// Finally, initialize the audio unit and ensure that it is ready to render.
// Allocates memory according to the maximum number of audio frames
// it can produce in response to a single render call.
@@ -274,6 +287,21 @@ void AUAudioInputStream::Start(AudioInputCallback* callback) {
DLOG_IF(ERROR, !audio_unit_) << "Open() has not been called successfully";
if (started_ || !audio_unit_)
return;
+
+ // Check if we should defer Start() for http://crbug.com/160920.
+ if (manager_->ShouldDeferStreamStart()) {
+ // Use a cancellable closure so that if Stop() is called before Start()
+ // actually runs, we can cancel the pending start.
+ deferred_start_cb_.Reset(base::Bind(
+ &AUAudioInputStream::Start, base::Unretained(this), callback));
+ manager_->GetTaskRunner()->PostDelayedTask(
+ FROM_HERE,
+ deferred_start_cb_.callback(),
+ base::TimeDelta::FromSeconds(
+ AudioManagerMac::kStartDelayInSecsForPowerEvents));
+ return;
+ }
+
sink_ = callback;
StartAgc();
OSStatus result = AudioOutputUnitStart(audio_unit_);
@@ -289,9 +317,10 @@ void AUAudioInputStream::Stop() {
return;
StopAgc();
OSStatus result = AudioOutputUnitStop(audio_unit_);
- if (result == noErr) {
- started_ = false;
- }
+ DCHECK_EQ(result, noErr);
+ started_ = false;
+ sink_ = NULL;
+
OSSTATUS_DLOG_IF(ERROR, result != noErr, result)
<< "Failed to stop acquiring data";
}
@@ -310,10 +339,6 @@ void AUAudioInputStream::Close() {
CloseComponent(audio_unit_);
audio_unit_ = 0;
}
- if (sink_) {
- sink_->OnClose(this);
- sink_ = NULL;
- }
// Inform the audio manager that we have been closed. This can cause our
// destruction.
@@ -518,12 +543,13 @@ OSStatus AUAudioInputStream::Provide(UInt32 number_of_frames,
// Read from FIFO into temporary data buffer.
fifo_->Read(data_->writable_data(), requested_size_bytes_);
+ // Copy captured (and interleaved) data into deinterleaved audio bus.
+ audio_bus_->FromInterleaved(
+ data_->data(), audio_bus_->frames(), format_.mBitsPerChannel / 8);
+
// Deliver data packet, delay estimation and volume level to the user.
- sink_->OnData(this,
- data_->data(),
- requested_size_bytes_,
- capture_delay_bytes,
- normalized_volume);
+ sink_->OnData(
+ this, audio_bus_.get(), capture_delay_bytes, normalized_volume);
}
return noErr;
diff --git a/chromium/media/audio/mac/audio_low_latency_input_mac.h b/chromium/media/audio/mac/audio_low_latency_input_mac.h
index 04592d2cecf..7726227eae5 100644
--- a/chromium/media/audio/mac/audio_low_latency_input_mac.h
+++ b/chromium/media/audio/mac/audio_low_latency_input_mac.h
@@ -39,7 +39,7 @@
#include <AudioUnit/AudioUnit.h>
#include <CoreAudio/CoreAudio.h>
-#include "base/atomicops.h"
+#include "base/cancelable_callback.h"
#include "base/memory/scoped_ptr.h"
#include "base/synchronization/lock.h"
#include "media/audio/agc_audio_stream.h"
@@ -49,6 +49,7 @@
namespace media {
+class AudioBus;
class AudioManagerMac;
class DataBuffer;
@@ -162,6 +163,13 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> {
// OnData() callbacks where each callback contains this amount of bytes.
int requested_size_bytes_;
+ // Used to defer Start() to workaround http://crbug.com/160920.
+ base::CancelableClosure deferred_start_cb_;
+
+ // Extra audio bus used for storage of deinterleaved data for the OnData
+ // callback.
+ scoped_ptr<media::AudioBus> audio_bus_;
+
DISALLOW_COPY_AND_ASSIGN(AUAudioInputStream);
};
diff --git a/chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc b/chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc
index 9360befe575..79721d4f37b 100644
--- a/chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc
+++ b/chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc
@@ -5,6 +5,7 @@
#include "base/basictypes.h"
#include "base/environment.h"
#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
#include "base/test/test_timeouts.h"
#include "base/threading/platform_thread.h"
#include "media/audio/audio_io.h"
@@ -22,18 +23,19 @@ using ::testing::NotNull;
namespace media {
-ACTION_P3(CheckCountAndPostQuitTask, count, limit, loop) {
+ACTION_P4(CheckCountAndPostQuitTask, count, limit, loop, closure) {
if (++*count >= limit) {
- loop->PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
+ loop->PostTask(FROM_HERE, closure);
}
}
class MockAudioInputCallback : public AudioInputStream::AudioInputCallback {
public:
- MOCK_METHOD5(OnData, void(AudioInputStream* stream,
- const uint8* src, uint32 size,
- uint32 hardware_delay_bytes, double volume));
- MOCK_METHOD1(OnClose, void(AudioInputStream* stream));
+ MOCK_METHOD4(OnData,
+ void(AudioInputStream* stream,
+ const AudioBus* src,
+ uint32 hardware_delay_bytes,
+ double volume));
MOCK_METHOD1(OnError, void(AudioInputStream* stream));
};
@@ -74,17 +76,23 @@ class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
// AudioInputStream::AudioInputCallback implementation.
virtual void OnData(AudioInputStream* stream,
- const uint8* src, uint32 size,
- uint32 hardware_delay_bytes, double volume) OVERRIDE {
+ const AudioBus* src,
+ uint32 hardware_delay_bytes,
+ double volume) OVERRIDE {
+ const int num_samples = src->frames() * src->channels();
+ scoped_ptr<int16> interleaved(new int16[num_samples]);
+ const int bytes_per_sample = sizeof(*interleaved);
+ src->ToInterleaved(src->frames(), bytes_per_sample, interleaved.get());
+
// Store data data in a temporary buffer to avoid making blocking
// fwrite() calls in the audio callback. The complete buffer will be
// written to file in the destructor.
- if (buffer_.Append(src, size)) {
+ const int size = bytes_per_sample * num_samples;
+ if (buffer_.Append((const uint8*)interleaved.get(), size)) {
bytes_to_write_ += size;
}
}
- virtual void OnClose(AudioInputStream* stream) OVERRIDE {}
virtual void OnError(AudioInputStream* stream) OVERRIDE {}
private:
@@ -95,8 +103,16 @@ class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
class MacAudioInputTest : public testing::Test {
protected:
- MacAudioInputTest() : audio_manager_(AudioManager::CreateForTesting()) {}
- virtual ~MacAudioInputTest() {}
+ MacAudioInputTest()
+ : message_loop_(base::MessageLoop::TYPE_UI),
+ audio_manager_(AudioManager::CreateForTesting()) {
+ // Wait for the AudioManager to finish any initialization on the audio loop.
+ base::RunLoop().RunUntilIdle();
+ }
+
+ virtual ~MacAudioInputTest() {
+ base::RunLoop().RunUntilIdle();
+ }
// Convenience method which ensures that we are not running on the build
// bots and that at least one valid input device can be found.
@@ -134,6 +150,7 @@ class MacAudioInputTest : public testing::Test {
return ais;
}
+ base::MessageLoop message_loop_;
scoped_ptr<AudioManager> audio_manager_;
};
@@ -162,8 +179,6 @@ TEST_F(MacAudioInputTest, AUAudioInputStreamOpenStartAndClose) {
EXPECT_TRUE(ais->Open());
MockAudioInputCallback sink;
ais->Start(&sink);
- EXPECT_CALL(sink, OnClose(ais))
- .Times(1);
ais->Close();
}
@@ -176,8 +191,6 @@ TEST_F(MacAudioInputTest, AUAudioInputStreamOpenStartStopAndClose) {
MockAudioInputCallback sink;
ais->Start(&sink);
ais->Stop();
- EXPECT_CALL(sink, OnClose(ais))
- .Times(1);
ais->Close();
}
@@ -206,8 +219,6 @@ TEST_F(MacAudioInputTest, AUAudioInputStreamMiscCallingSequences) {
ais->Stop();
EXPECT_FALSE(auais->started());
- EXPECT_CALL(sink, OnClose(ais))
- .Times(1);
ais->Close();
}
@@ -217,32 +228,24 @@ TEST_F(MacAudioInputTest, AUAudioInputStreamVerifyMonoRecording) {
return;
int count = 0;
- base::MessageLoopForUI loop;
// Create an audio input stream which records in mono.
AudioInputStream* ais = CreateAudioInputStream(CHANNEL_LAYOUT_MONO);
EXPECT_TRUE(ais->Open());
- int fs = static_cast<int>(AUAudioInputStream::HardwareSampleRate());
- int samples_per_packet = fs / 100;
- int bits_per_sample = 16;
- uint32 bytes_per_packet = samples_per_packet * (bits_per_sample / 8);
-
MockAudioInputCallback sink;
// We use 10ms packets and will run the test until ten packets are received.
// All should contain valid packets of the same size and a valid delay
// estimate.
- EXPECT_CALL(sink, OnData(ais, NotNull(), bytes_per_packet, _, _))
+ base::RunLoop run_loop;
+ EXPECT_CALL(sink, OnData(ais, NotNull(), _, _))
.Times(AtLeast(10))
- .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
+ .WillRepeatedly(CheckCountAndPostQuitTask(
+ &count, 10, &message_loop_, run_loop.QuitClosure()));
ais->Start(&sink);
- loop.Run();
+ run_loop.Run();
ais->Stop();
-
- // Verify that the sink receieves OnClose() call when calling Close().
- EXPECT_CALL(sink, OnClose(ais))
- .Times(1);
ais->Close();
}
@@ -252,17 +255,11 @@ TEST_F(MacAudioInputTest, AUAudioInputStreamVerifyStereoRecording) {
return;
int count = 0;
- base::MessageLoopForUI loop;
// Create an audio input stream which records in stereo.
AudioInputStream* ais = CreateAudioInputStream(CHANNEL_LAYOUT_STEREO);
EXPECT_TRUE(ais->Open());
- int fs = static_cast<int>(AUAudioInputStream::HardwareSampleRate());
- int samples_per_packet = fs / 100;
- int bits_per_sample = 16;
- uint32 bytes_per_packet = 2 * samples_per_packet * (bits_per_sample / 8);
-
MockAudioInputCallback sink;
// We use 10ms packets and will run the test until ten packets are received.
@@ -275,16 +272,14 @@ TEST_F(MacAudioInputTest, AUAudioInputStreamVerifyStereoRecording) {
// parameter #4 does no longer pass. I am removing this restriction here to
// ensure that we can land the patch but will revisit this test again when
// more analysis of the delay estimates are done.
- EXPECT_CALL(sink, OnData(ais, NotNull(), bytes_per_packet, _, _))
+ base::RunLoop run_loop;
+ EXPECT_CALL(sink, OnData(ais, NotNull(), _, _))
.Times(AtLeast(10))
- .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
+ .WillRepeatedly(CheckCountAndPostQuitTask(
+ &count, 10, &message_loop_, run_loop.QuitClosure()));
ais->Start(&sink);
- loop.Run();
+ run_loop.Run();
ais->Stop();
-
- // Verify that the sink receieves OnClose() call when calling Close().
- EXPECT_CALL(sink, OnClose(ais))
- .Times(1);
ais->Close();
}
diff --git a/chromium/media/audio/mac/audio_low_latency_output_mac.cc b/chromium/media/audio/mac/audio_low_latency_output_mac.cc
deleted file mode 100644
index afa480aefb9..00000000000
--- a/chromium/media/audio/mac/audio_low_latency_output_mac.cc
+++ /dev/null
@@ -1,416 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/mac/audio_low_latency_output_mac.h"
-
-#include <CoreServices/CoreServices.h>
-
-#include "base/basictypes.h"
-#include "base/command_line.h"
-#include "base/logging.h"
-#include "base/mac/mac_logging.h"
-#include "media/audio/mac/audio_manager_mac.h"
-#include "media/base/media_switches.h"
-
-namespace media {
-
-static std::ostream& operator<<(std::ostream& os,
- const AudioStreamBasicDescription& format) {
- os << "sample rate : " << format.mSampleRate << std::endl
- << "format ID : " << format.mFormatID << std::endl
- << "format flags : " << format.mFormatFlags << std::endl
- << "bytes per packet : " << format.mBytesPerPacket << std::endl
- << "frames per packet : " << format.mFramesPerPacket << std::endl
- << "bytes per frame : " << format.mBytesPerFrame << std::endl
- << "channels per frame: " << format.mChannelsPerFrame << std::endl
- << "bits per channel : " << format.mBitsPerChannel;
- return os;
-}
-
-static AudioObjectPropertyAddress kDefaultOutputDeviceAddress = {
- kAudioHardwarePropertyDefaultOutputDevice,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
-};
-
-// Overview of operation:
-// 1) An object of AUAudioOutputStream is created by the AudioManager
-// factory: audio_man->MakeAudioStream().
-// 2) Next some thread will call Open(), at that point the underlying
-// default output Audio Unit is created and configured.
-// 3) Then some thread will call Start(source).
-// Then the Audio Unit is started which creates its own thread which
-// periodically will call the source for more data as buffers are being
-// consumed.
-// 4) At some point some thread will call Stop(), which we handle by directly
-// stopping the default output Audio Unit.
-// 6) The same thread that called stop will call Close() where we cleanup
-// and notify the audio manager, which likely will destroy this object.
-
-AUAudioOutputStream::AUAudioOutputStream(
- AudioManagerMac* manager, const AudioParameters& params)
- : manager_(manager),
- source_(NULL),
- output_unit_(0),
- output_device_id_(kAudioObjectUnknown),
- volume_(1),
- hardware_latency_frames_(0),
- stopped_(false),
- audio_bus_(AudioBus::Create(params)) {
- // We must have a manager.
- DCHECK(manager_);
-
- // A frame is one sample across all channels. In interleaved audio the per
- // frame fields identify the set of n |channels|. In uncompressed audio, a
- // packet is always one frame.
- format_.mSampleRate = params.sample_rate();
- format_.mFormatID = kAudioFormatLinearPCM;
- format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
- kLinearPCMFormatFlagIsSignedInteger;
- format_.mBitsPerChannel = params.bits_per_sample();
- format_.mChannelsPerFrame = params.channels();
- format_.mFramesPerPacket = 1;
- format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels()) / 8;
- format_.mBytesPerFrame = format_.mBytesPerPacket;
- format_.mReserved = 0;
-
- DVLOG(1) << "Desired ouput format: " << format_;
-
- // Calculate the number of sample frames per callback.
- number_of_frames_ = params.frames_per_buffer();
- DVLOG(1) << "Number of frames per callback: " << number_of_frames_;
-}
-
-AUAudioOutputStream::~AUAudioOutputStream() {
-}
-
-bool AUAudioOutputStream::Open() {
- // Obtain the current input device selected by the user.
- UInt32 size = sizeof(output_device_id_);
- OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
- &kDefaultOutputDeviceAddress,
- 0,
- 0,
- &size,
- &output_device_id_);
- if (result != noErr || output_device_id_ == kAudioObjectUnknown) {
- OSSTATUS_DLOG(ERROR, result)
- << "Could not get default audio output device.";
- return false;
- }
-
- // Open and initialize the DefaultOutputUnit.
- AudioComponent comp;
- AudioComponentDescription desc;
-
- desc.componentType = kAudioUnitType_Output;
- desc.componentSubType = kAudioUnitSubType_DefaultOutput;
- desc.componentManufacturer = kAudioUnitManufacturer_Apple;
- desc.componentFlags = 0;
- desc.componentFlagsMask = 0;
- comp = AudioComponentFindNext(0, &desc);
- if (!comp)
- return false;
-
- result = AudioComponentInstanceNew(comp, &output_unit_);
- if (result != noErr) {
- OSSTATUS_DLOG(ERROR, result) << "AudioComponentInstanceNew() failed.";
- return false;
- }
-
- result = AudioUnitInitialize(output_unit_);
- if (result != noErr) {
- OSSTATUS_DLOG(ERROR, result) << "AudioUnitInitialize() failed.";
- return false;
- }
-
- hardware_latency_frames_ = GetHardwareLatency();
-
- return Configure();
-}
-
-bool AUAudioOutputStream::Configure() {
- // Set the render callback.
- AURenderCallbackStruct input;
- input.inputProc = InputProc;
- input.inputProcRefCon = this;
- OSStatus result = AudioUnitSetProperty(
- output_unit_,
- kAudioUnitProperty_SetRenderCallback,
- kAudioUnitScope_Global,
- 0,
- &input,
- sizeof(input));
- if (result != noErr) {
- OSSTATUS_DLOG(ERROR, result)
- << "AudioUnitSetProperty(kAudioUnitProperty_SetRenderCallback) failed.";
- return false;
- }
-
- // Set the stream format.
- result = AudioUnitSetProperty(
- output_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Input,
- 0,
- &format_,
- sizeof(format_));
- if (result != noErr) {
- OSSTATUS_DLOG(ERROR, result)
- << "AudioUnitSetProperty(kAudioUnitProperty_StreamFormat) failed.";
- return false;
- }
-
- // Set the buffer frame size.
- // WARNING: Setting this value changes the frame size for all audio units in
- // the current process. It's imperative that the input and output frame sizes
- // be the same as the frames_per_buffer() returned by
- // GetDefaultOutputStreamParameters.
- // See http://crbug.com/154352 for details.
- const AudioParameters hw_params =
- manager_->GetDefaultOutputStreamParameters();
- if (number_of_frames_ != static_cast<size_t>(hw_params.frames_per_buffer())) {
- DLOG(ERROR) << "Audio buffer size does not match hardware buffer size.";
- return false;
- }
-
- UInt32 buffer_size = number_of_frames_;
- result = AudioUnitSetProperty(
- output_unit_,
- kAudioDevicePropertyBufferFrameSize,
- kAudioUnitScope_Output,
- 0,
- &buffer_size,
- sizeof(buffer_size));
- if (result != noErr) {
- OSSTATUS_DLOG(ERROR, result)
- << "AudioUnitSetProperty(kAudioDevicePropertyBufferFrameSize) failed.";
- return false;
- }
-
- return true;
-}
-
-void AUAudioOutputStream::Close() {
- if (output_unit_)
- AudioComponentInstanceDispose(output_unit_);
-
- // Inform the audio manager that we have been closed. This can cause our
- // destruction.
- manager_->ReleaseOutputStream(this);
-}
-
-void AUAudioOutputStream::Start(AudioSourceCallback* callback) {
- DCHECK(callback);
- if (!output_unit_) {
- DLOG(ERROR) << "Open() has not been called successfully";
- return;
- }
-
- stopped_ = false;
- {
- base::AutoLock auto_lock(source_lock_);
- source_ = callback;
- }
-
- AudioOutputUnitStart(output_unit_);
-}
-
-void AUAudioOutputStream::Stop() {
- if (stopped_)
- return;
-
- AudioOutputUnitStop(output_unit_);
-
- base::AutoLock auto_lock(source_lock_);
- source_ = NULL;
- stopped_ = true;
-}
-
-void AUAudioOutputStream::SetVolume(double volume) {
- if (!output_unit_)
- return;
- volume_ = static_cast<float>(volume);
-
- // TODO(crogers): set volume property
-}
-
-void AUAudioOutputStream::GetVolume(double* volume) {
- if (!output_unit_)
- return;
- *volume = volume_;
-}
-
-// Pulls on our provider to get rendered audio stream.
-// Note to future hackers of this function: Do not add locks here because this
-// is running on a real-time thread (for low-latency).
-OSStatus AUAudioOutputStream::Render(UInt32 number_of_frames,
- AudioBufferList* io_data,
- const AudioTimeStamp* output_time_stamp) {
- // Update the playout latency.
- double playout_latency_frames = GetPlayoutLatency(output_time_stamp);
-
- AudioBuffer& buffer = io_data->mBuffers[0];
- uint8* audio_data = reinterpret_cast<uint8*>(buffer.mData);
- uint32 hardware_pending_bytes = static_cast<uint32>
- ((playout_latency_frames + 0.5) * format_.mBytesPerFrame);
-
- // Unfortunately AUAudioInputStream and AUAudioOutputStream share the frame
- // size set by kAudioDevicePropertyBufferFrameSize above on a per process
- // basis. What this means is that the |number_of_frames| value may be larger
- // or smaller than the value set during Configure(). In this case either
- // audio input or audio output will be broken, so just output silence.
- // TODO(crogers): Figure out what can trigger a change in |number_of_frames|.
- // See http://crbug.com/154352 for details.
- if (number_of_frames != static_cast<UInt32>(audio_bus_->frames())) {
- memset(audio_data, 0, number_of_frames * format_.mBytesPerFrame);
- return noErr;
- }
-
- int frames_filled = 0;
- {
- // Render() shouldn't be called except between AudioOutputUnitStart() and
- // AudioOutputUnitStop() calls, but crash reports have shown otherwise:
- // http://crbug.com/178765. We use |source_lock_| to prevent races and
- // crashes in Render() when |source_| is cleared.
- base::AutoLock auto_lock(source_lock_);
- if (!source_) {
- memset(audio_data, 0, number_of_frames * format_.mBytesPerFrame);
- return noErr;
- }
-
- frames_filled = source_->OnMoreData(
- audio_bus_.get(), AudioBuffersState(0, hardware_pending_bytes));
- }
-
- // Note: If this ever changes to output raw float the data must be clipped and
- // sanitized since it may come from an untrusted source such as NaCl.
- audio_bus_->Scale(volume_);
- audio_bus_->ToInterleaved(
- frames_filled, format_.mBitsPerChannel / 8, audio_data);
-
- return noErr;
-}
-
-// DefaultOutputUnit callback
-OSStatus AUAudioOutputStream::InputProc(void* user_data,
- AudioUnitRenderActionFlags*,
- const AudioTimeStamp* output_time_stamp,
- UInt32,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- AUAudioOutputStream* audio_output =
- static_cast<AUAudioOutputStream*>(user_data);
- if (!audio_output)
- return -1;
-
- return audio_output->Render(number_of_frames, io_data, output_time_stamp);
-}
-
-int AUAudioOutputStream::HardwareSampleRate() {
- // Determine the default output device's sample-rate.
- AudioDeviceID device_id = kAudioObjectUnknown;
- UInt32 info_size = sizeof(device_id);
- OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
- &kDefaultOutputDeviceAddress,
- 0,
- 0,
- &info_size,
- &device_id);
- if (result != noErr || device_id == kAudioObjectUnknown) {
- OSSTATUS_DLOG(WARNING, result)
- << "Could not get default audio output device.";
- return 0;
- }
-
- Float64 nominal_sample_rate;
- info_size = sizeof(nominal_sample_rate);
-
- AudioObjectPropertyAddress nominal_sample_rate_address = {
- kAudioDevicePropertyNominalSampleRate,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
- result = AudioObjectGetPropertyData(device_id,
- &nominal_sample_rate_address,
- 0,
- 0,
- &info_size,
- &nominal_sample_rate);
- if (result != noErr) {
- OSSTATUS_DLOG(WARNING, result)
- << "Could not get default sample rate for device: " << device_id;
- return 0;
- }
-
- return static_cast<int>(nominal_sample_rate);
-}
-
-double AUAudioOutputStream::GetHardwareLatency() {
- if (!output_unit_ || output_device_id_ == kAudioObjectUnknown) {
- DLOG(WARNING) << "Audio unit object is NULL or device ID is unknown";
- return 0.0;
- }
-
- // Get audio unit latency.
- Float64 audio_unit_latency_sec = 0.0;
- UInt32 size = sizeof(audio_unit_latency_sec);
- OSStatus result = AudioUnitGetProperty(output_unit_,
- kAudioUnitProperty_Latency,
- kAudioUnitScope_Global,
- 0,
- &audio_unit_latency_sec,
- &size);
- if (result != noErr) {
- OSSTATUS_DLOG(WARNING, result) << "Could not get audio unit latency";
- return 0.0;
- }
-
- // Get output audio device latency.
- AudioObjectPropertyAddress property_address = {
- kAudioDevicePropertyLatency,
- kAudioDevicePropertyScopeOutput,
- kAudioObjectPropertyElementMaster
- };
- UInt32 device_latency_frames = 0;
- size = sizeof(device_latency_frames);
- result = AudioObjectGetPropertyData(output_device_id_,
- &property_address,
- 0,
- NULL,
- &size,
- &device_latency_frames);
- if (result != noErr) {
- OSSTATUS_DLOG(WARNING, result) << "Could not get audio unit latency";
- return 0.0;
- }
-
- return static_cast<double>((audio_unit_latency_sec *
- format_.mSampleRate) + device_latency_frames);
-}
-
-double AUAudioOutputStream::GetPlayoutLatency(
- const AudioTimeStamp* output_time_stamp) {
- // Ensure mHostTime is valid.
- if ((output_time_stamp->mFlags & kAudioTimeStampHostTimeValid) == 0)
- return 0;
-
- // Get the delay between the moment getting the callback and the scheduled
- // time stamp that tells when the data is going to be played out.
- UInt64 output_time_ns = AudioConvertHostTimeToNanos(
- output_time_stamp->mHostTime);
- UInt64 now_ns = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
-
- // Prevent overflow leading to huge delay information; occurs regularly on
- // the bots, probably less so in the wild.
- if (now_ns > output_time_ns)
- return 0;
-
- double delay_frames = static_cast<double>
- (1e-9 * (output_time_ns - now_ns) * format_.mSampleRate);
-
- return (delay_frames + hardware_latency_frames_);
-}
-
-} // namespace media
diff --git a/chromium/media/audio/mac/audio_low_latency_output_mac.h b/chromium/media/audio/mac/audio_low_latency_output_mac.h
deleted file mode 100644
index 27f3b3a837a..00000000000
--- a/chromium/media/audio/mac/audio_low_latency_output_mac.h
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Implementation notes:
-//
-// - It is recommended to first acquire the native sample rate of the default
-// output device and then use the same rate when creating this object.
-// Use AUAudioOutputStream::HardwareSampleRate() to retrieve the sample rate.
-// - Calling Close() also leads to self destruction.
-// - The latency consists of two parts:
-// 1) Hardware latency, which includes Audio Unit latency, audio device
-// latency;
-// 2) The delay between the moment getting the callback and the scheduled time
-// stamp that tells when the data is going to be played out.
-//
-#ifndef MEDIA_AUDIO_MAC_AUDIO_LOW_LATENCY_OUTPUT_MAC_H_
-#define MEDIA_AUDIO_MAC_AUDIO_LOW_LATENCY_OUTPUT_MAC_H_
-
-#include <AudioUnit/AudioUnit.h>
-#include <CoreAudio/CoreAudio.h>
-
-#include "base/compiler_specific.h"
-#include "base/synchronization/lock.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-
-namespace media {
-
-class AudioManagerMac;
-
-// Implementation of AudioOuputStream for Mac OS X using the
-// default output Audio Unit present in OS 10.4 and later.
-// The default output Audio Unit is for low-latency audio I/O.
-class AUAudioOutputStream : public AudioOutputStream {
- public:
- // The ctor takes all the usual parameters, plus |manager| which is the
- // the audio manager who is creating this object.
- AUAudioOutputStream(AudioManagerMac* manager,
- const AudioParameters& params);
- // The dtor is typically called by the AudioManager only and it is usually
- // triggered by calling AudioOutputStream::Close().
- virtual ~AUAudioOutputStream();
-
- // Implementation of AudioOutputStream.
- virtual bool Open() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
-
- static int HardwareSampleRate();
-
- private:
- // DefaultOutputUnit callback.
- static OSStatus InputProc(void* user_data,
- AudioUnitRenderActionFlags* flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- OSStatus Render(UInt32 number_of_frames, AudioBufferList* io_data,
- const AudioTimeStamp* output_time_stamp);
-
- // Sets up the stream format for the default output Audio Unit.
- bool Configure();
-
- // Gets the fixed playout device hardware latency and stores it. Returns 0
- // if not available.
- double GetHardwareLatency();
-
- // Gets the current playout latency value.
- double GetPlayoutLatency(const AudioTimeStamp* output_time_stamp);
-
- // Our creator, the audio manager needs to be notified when we close.
- AudioManagerMac* manager_;
-
- size_t number_of_frames_;
-
- // Pointer to the object that will provide the audio samples.
- AudioSourceCallback* source_;
-
- // Protects |source_|. Necessary since Render() calls seem to be in flight
- // when |output_unit_| is supposedly stopped. See http://crbug.com/178765.
- base::Lock source_lock_;
-
- // Structure that holds the stream format details such as bitrate.
- AudioStreamBasicDescription format_;
-
- // The default output Audio Unit which talks to the audio hardware.
- AudioUnit output_unit_;
-
- // The UID refers to the current output audio device.
- AudioDeviceID output_device_id_;
-
- // Volume level from 0 to 1.
- float volume_;
-
- // Fixed playout hardware latency in frames.
- double hardware_latency_frames_;
-
- // The flag used to stop the streaming.
- bool stopped_;
-
- // Container for retrieving data from AudioSourceCallback::OnMoreData().
- scoped_ptr<AudioBus> audio_bus_;
-
- DISALLOW_COPY_AND_ASSIGN(AUAudioOutputStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_MAC_AUDIO_LOW_LATENCY_OUTPUT_MAC_H_
diff --git a/chromium/media/audio/mac/audio_manager_mac.cc b/chromium/media/audio/mac/audio_manager_mac.cc
index be7dddd5bb6..970720679ab 100644
--- a/chromium/media/audio/mac/audio_manager_mac.cc
+++ b/chromium/media/audio/mac/audio_manager_mac.cc
@@ -11,15 +11,15 @@
#include "base/command_line.h"
#include "base/mac/mac_logging.h"
#include "base/mac/scoped_cftyperef.h"
+#include "base/power_monitor/power_monitor.h"
+#include "base/power_monitor/power_observer.h"
#include "base/strings/sys_string_conversions.h"
+#include "base/threading/thread_checker.h"
#include "media/audio/audio_parameters.h"
#include "media/audio/mac/audio_auhal_mac.h"
#include "media/audio/mac/audio_input_mac.h"
#include "media/audio/mac/audio_low_latency_input_mac.h"
-#include "media/audio/mac/audio_low_latency_output_mac.h"
-#include "media/audio/mac/audio_synchronized_mac.h"
-#include "media/audio/mac/audio_unified_mac.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/channel_layout.h"
#include "media/base/limits.h"
#include "media/base/media_switches.h"
@@ -29,8 +29,9 @@ namespace media {
// Maximum number of output streams that can be open simultaneously.
static const int kMaxOutputStreams = 50;
-// Default buffer size in samples for low-latency input and output streams.
-static const int kDefaultLowLatencyBufferSize = 128;
+// Define bounds for for low-latency input and output streams.
+static const int kMinimumInputOutputBufferSize = 128;
+static const int kMaximumInputOutputBufferSize = 4096;
// Default sample-rate on most Apple hardware.
static const int kFallbackSampleRate = 44100;
@@ -53,16 +54,6 @@ static bool HasAudioHardware(AudioObjectPropertySelector selector) {
output_device_id != kAudioObjectUnknown;
}
-// Returns true if the default input device is the same as
-// the default output device.
-bool AudioManagerMac::HasUnifiedDefaultIO() {
- AudioDeviceID input_id, output_id;
- if (!GetDefaultInputDevice(&input_id) || !GetDefaultOutputDevice(&output_id))
- return false;
-
- return input_id == output_id;
-}
-
// Retrieves information on audio devices, and prepends the default
// device to the list if the list is non-empty.
static void GetAudioDeviceInfo(bool is_input,
@@ -86,8 +77,8 @@ static void GetAudioDeviceInfo(bool is_input,
// Get the array of device ids for all the devices, which includes both
// input devices and output devices.
- scoped_ptr_malloc<AudioDeviceID>
- devices(reinterpret_cast<AudioDeviceID*>(malloc(size)));
+ scoped_ptr<AudioDeviceID, base::FreeDeleter>
+ devices(static_cast<AudioDeviceID*>(malloc(size)));
AudioDeviceID* device_ids = devices.get();
result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
&property_address,
@@ -220,28 +211,87 @@ static AudioDeviceID GetAudioDeviceIdByUId(bool is_input,
return audio_device_id;
}
+template <class T>
+void StopStreams(std::list<T*>* streams) {
+ for (typename std::list<T*>::iterator it = streams->begin();
+ it != streams->end();
+ ++it) {
+ // Stop() is safe to call multiple times, so it doesn't matter if a stream
+ // has already been stopped.
+ (*it)->Stop();
+ }
+ streams->clear();
+}
+
+class AudioManagerMac::AudioPowerObserver : public base::PowerObserver {
+ public:
+ AudioPowerObserver()
+ : is_suspending_(false),
+ is_monitoring_(base::PowerMonitor::Get()) {
+ // The PowerMonitor requires signifcant setup (a CFRunLoop and preallocated
+ // IO ports) so it's not available under unit tests. See the OSX impl of
+ // base::PowerMonitorDeviceSource for more details.
+ if (!is_monitoring_)
+ return;
+ base::PowerMonitor::Get()->AddObserver(this);
+ }
+
+ virtual ~AudioPowerObserver() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!is_monitoring_)
+ return;
+ base::PowerMonitor::Get()->RemoveObserver(this);
+ }
+
+ bool ShouldDeferStreamStart() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // Start() should be deferred if the system is in the middle of a suspend or
+ // has recently started the process of resuming.
+ return is_suspending_ || base::TimeTicks::Now() < earliest_start_time_;
+ }
+
+ private:
+ virtual void OnSuspend() OVERRIDE {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ is_suspending_ = true;
+ }
+
+ virtual void OnResume() OVERRIDE {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ is_suspending_ = false;
+ earliest_start_time_ = base::TimeTicks::Now() +
+ base::TimeDelta::FromSeconds(kStartDelayInSecsForPowerEvents);
+ }
+
+ bool is_suspending_;
+ const bool is_monitoring_;
+ base::TimeTicks earliest_start_time_;
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioPowerObserver);
+};
+
AudioManagerMac::AudioManagerMac(AudioLogFactory* audio_log_factory)
: AudioManagerBase(audio_log_factory),
- current_sample_rate_(0) {
- current_output_device_ = kAudioDeviceUnknown;
-
+ current_sample_rate_(0),
+ current_output_device_(kAudioDeviceUnknown) {
SetMaxOutputStreamsAllowed(kMaxOutputStreams);
// Task must be posted last to avoid races from handing out "this" to the
// audio thread. Always PostTask even if we're on the right thread since
// AudioManager creation is on the startup path and this may be slow.
- GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
- &AudioManagerMac::CreateDeviceListener, base::Unretained(this)));
+ GetTaskRunner()->PostTask(FROM_HERE, base::Bind(
+ &AudioManagerMac::InitializeOnAudioThread, base::Unretained(this)));
}
AudioManagerMac::~AudioManagerMac() {
- if (GetMessageLoop()->BelongsToCurrentThread()) {
- DestroyDeviceListener();
+ if (GetTaskRunner()->BelongsToCurrentThread()) {
+ ShutdownOnAudioThread();
} else {
// It's safe to post a task here since Shutdown() will wait for all tasks to
// complete before returning.
- GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
- &AudioManagerMac::DestroyDeviceListener, base::Unretained(this)));
+ GetTaskRunner()->PostTask(FROM_HERE, base::Bind(
+ &AudioManagerMac::ShutdownOnAudioThread, base::Unretained(this)));
}
Shutdown();
@@ -257,18 +307,15 @@ bool AudioManagerMac::HasAudioInputDevices() {
// TODO(xians): There are several places on the OSX specific code which
// could benefit from these helper functions.
-bool AudioManagerMac::GetDefaultInputDevice(
- AudioDeviceID* device) {
+bool AudioManagerMac::GetDefaultInputDevice(AudioDeviceID* device) {
return GetDefaultDevice(device, true);
}
-bool AudioManagerMac::GetDefaultOutputDevice(
- AudioDeviceID* device) {
+bool AudioManagerMac::GetDefaultOutputDevice(AudioDeviceID* device) {
return GetDefaultDevice(device, false);
}
-bool AudioManagerMac::GetDefaultDevice(
- AudioDeviceID* device, bool input) {
+bool AudioManagerMac::GetDefaultDevice(AudioDeviceID* device, bool input) {
CHECK(device);
// Obtain the current output device selected by the user.
@@ -279,14 +326,12 @@ bool AudioManagerMac::GetDefaultDevice(
pa.mElement = kAudioObjectPropertyElementMaster;
UInt32 size = sizeof(*device);
-
- OSStatus result = AudioObjectGetPropertyData(
- kAudioObjectSystemObject,
- &pa,
- 0,
- 0,
- &size,
- device);
+ OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
+ &pa,
+ 0,
+ 0,
+ &size,
+ device);
if ((result != kAudioHardwareNoError) || (*device == kAudioDeviceUnknown)) {
DLOG(ERROR) << "Error getting default AudioDevice.";
@@ -296,21 +341,16 @@ bool AudioManagerMac::GetDefaultDevice(
return true;
}
-bool AudioManagerMac::GetDefaultOutputChannels(
- int* channels) {
+bool AudioManagerMac::GetDefaultOutputChannels(int* channels) {
AudioDeviceID device;
if (!GetDefaultOutputDevice(&device))
return false;
-
- return GetDeviceChannels(device,
- kAudioDevicePropertyScopeOutput,
- channels);
+ return GetDeviceChannels(device, kAudioDevicePropertyScopeOutput, channels);
}
-bool AudioManagerMac::GetDeviceChannels(
- AudioDeviceID device,
- AudioObjectPropertyScope scope,
- int* channels) {
+bool AudioManagerMac::GetDeviceChannels(AudioDeviceID device,
+ AudioObjectPropertyScope scope,
+ int* channels) {
CHECK(channels);
// Get stream configuration.
@@ -329,13 +369,7 @@ bool AudioManagerMac::GetDeviceChannels(
AudioBufferList& buffer_list =
*reinterpret_cast<AudioBufferList*>(list_storage.get());
- result = AudioObjectGetPropertyData(
- device,
- &pa,
- 0,
- 0,
- &size,
- &buffer_list);
+ result = AudioObjectGetPropertyData(device, &pa, 0, 0, &size, &buffer_list);
if (result != noErr)
return false;
@@ -362,13 +396,12 @@ int AudioManagerMac::HardwareSampleRateForDevice(AudioDeviceID device_id) {
kAudioObjectPropertyScopeGlobal,
kAudioObjectPropertyElementMaster
};
- OSStatus result = AudioObjectGetPropertyData(
- device_id,
- &kNominalSampleRateAddress,
- 0,
- 0,
- &info_size,
- &nominal_sample_rate);
+ OSStatus result = AudioObjectGetPropertyData(device_id,
+ &kNominalSampleRateAddress,
+ 0,
+ 0,
+ &info_size,
+ &nominal_sample_rate);
if (result != noErr) {
OSSTATUS_DLOG(WARNING, result)
<< "Could not get default sample rate for device: " << device_id;
@@ -401,16 +434,12 @@ void AudioManagerMac::GetAudioOutputDeviceNames(
AudioParameters AudioManagerMac::GetInputStreamParameters(
const std::string& device_id) {
- // Due to the sharing of the input and output buffer sizes, we need to choose
- // the input buffer size based on the output sample rate. See
- // http://crbug.com/154352.
- const int buffer_size = ChooseBufferSize(
- AUAudioOutputStream::HardwareSampleRate());
-
AudioDeviceID device = GetAudioDeviceIdByUId(true, device_id);
if (device == kAudioObjectUnknown) {
DLOG(ERROR) << "Invalid device " << device_id;
- return AudioParameters();
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
+ kFallbackSampleRate, 16, ChooseBufferSize(kFallbackSampleRate));
}
int channels = 0;
@@ -427,6 +456,11 @@ AudioParameters AudioManagerMac::GetInputStreamParameters(
if (!sample_rate)
sample_rate = kFallbackSampleRate;
+ // Due to the sharing of the input and output buffer sizes, we need to choose
+ // the input buffer size based on the output sample rate. See
+ // http://crbug.com/154352.
+ const int buffer_size = ChooseBufferSize(sample_rate);
+
// TODO(xians): query the native channel layout for the specific device.
return AudioParameters(
AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
@@ -450,8 +484,8 @@ std::string AudioManagerMac::GetAssociatedOutputDeviceID(
return std::string();
int device_count = size / sizeof(AudioDeviceID);
- scoped_ptr_malloc<AudioDeviceID>
- devices(reinterpret_cast<AudioDeviceID*>(malloc(size)));
+ scoped_ptr<AudioDeviceID, base::FreeDeleter>
+ devices(static_cast<AudioDeviceID*>(malloc(size)));
result = AudioObjectGetPropertyData(
device, &pa, 0, NULL, &size, devices.get());
if (result)
@@ -514,72 +548,38 @@ std::string AudioManagerMac::GetAssociatedOutputDeviceID(
AudioOutputStream* AudioManagerMac::MakeLinearOutputStream(
const AudioParameters& params) {
- return MakeLowLatencyOutputStream(params, std::string(), std::string());
+ return MakeLowLatencyOutputStream(params, std::string());
}
AudioOutputStream* AudioManagerMac::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
- // Handle basic output with no input channels.
- if (params.input_channels() == 0) {
- AudioDeviceID device = GetAudioDeviceIdByUId(false, device_id);
- if (device == kAudioObjectUnknown) {
- DLOG(ERROR) << "Failed to open output device: " << device_id;
- return NULL;
- }
- return new AUHALStream(this, params, device);
- }
-
- DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
-
- // TODO(xians): support more than stereo input.
- if (params.input_channels() != 2) {
- // WebAudio is currently hard-coded to 2 channels so we should not
- // see this case.
- NOTREACHED() << "Only stereo input is currently supported!";
+ const std::string& device_id) {
+ AudioDeviceID device = GetAudioDeviceIdByUId(false, device_id);
+ if (device == kAudioObjectUnknown) {
+ DLOG(ERROR) << "Failed to open output device: " << device_id;
return NULL;
}
- AudioDeviceID device = kAudioObjectUnknown;
- if (HasUnifiedDefaultIO()) {
- // For I/O, the simplest case is when the default input and output
- // devices are the same.
- GetDefaultOutputDevice(&device);
- VLOG(0) << "UNIFIED: default input and output devices are identical";
- } else {
- // Some audio hardware is presented as separate input and output devices
- // even though they are really the same physical hardware and
- // share the same "clock domain" at the lowest levels of the driver.
- // A common of example of this is the "built-in" audio hardware:
- // "Built-in Line Input"
- // "Built-in Output"
- // We would like to use an "aggregate" device for these situations, since
- // CoreAudio will make the most efficient use of the shared "clock domain"
- // so we get the lowest latency and use fewer threads.
- device = aggregate_device_manager_.GetDefaultAggregateDevice();
- if (device != kAudioObjectUnknown)
- VLOG(0) << "Using AGGREGATE audio device";
+ // Lazily create the audio device listener on the first stream creation.
+ if (!output_device_listener_) {
+ // NOTE: Use BindToCurrentLoop() to ensure the callback is always PostTask'd
+ // even if OSX calls us on the right thread. Some CoreAudio drivers will
+ // fire the callbacks during stream creation, leading to re-entrancy issues
+ // otherwise. See http://crbug.com/349604
+ output_device_listener_.reset(
+ new AudioDeviceListenerMac(BindToCurrentLoop(base::Bind(
+ &AudioManagerMac::HandleDeviceChanges, base::Unretained(this)))));
+ // Only set the current output device for the default device.
+ if (device_id == AudioManagerBase::kDefaultDeviceId || device_id.empty())
+ current_output_device_ = device;
+ // Just use the current sample rate since we don't allow non-native sample
+ // rates on OSX.
+ current_sample_rate_ = params.sample_rate();
}
- if (device != kAudioObjectUnknown &&
- input_device_id == AudioManagerBase::kDefaultDeviceId)
- return new AUHALStream(this, params, device);
-
- // Fallback to AudioSynchronizedStream which will handle completely
- // different and arbitrary combinations of input and output devices
- // even running at different sample-rates.
- // kAudioDeviceUnknown translates to "use default" here.
- // TODO(xians): consider tracking UMA stats on AUHALStream
- // versus AudioSynchronizedStream.
- AudioDeviceID audio_device_id = GetAudioDeviceIdByUId(true, input_device_id);
- if (audio_device_id == kAudioObjectUnknown)
- return NULL;
-
- return new AudioSynchronizedStream(this,
- params,
- audio_device_id,
- kAudioDeviceUnknown);
+ AudioOutputStream* stream = new AUHALStream(this, params, device);
+ output_streams_.push_back(stream);
+ return stream;
}
std::string AudioManagerMac::GetDefaultOutputDeviceID() {
@@ -612,7 +612,9 @@ std::string AudioManagerMac::GetDefaultOutputDeviceID() {
AudioInputStream* AudioManagerMac::MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
- return new PCMQueueInAudioInputStream(this, params);
+ AudioInputStream* stream = new PCMQueueInAudioInputStream(this, params);
+ input_streams_.push_back(stream);
+ return stream;
}
AudioInputStream* AudioManagerMac::MakeLowLatencyInputStream(
@@ -635,6 +637,7 @@ AudioInputStream* AudioManagerMac::MakeLowLatencyInputStream(
params);
stream = new AUAudioInputStream(this, params, output_params,
audio_device_id);
+ input_streams_.push_back(stream);
}
return stream;
@@ -643,81 +646,87 @@ AudioInputStream* AudioManagerMac::MakeLowLatencyInputStream(
AudioParameters AudioManagerMac::GetPreferredOutputStreamParameters(
const std::string& output_device_id,
const AudioParameters& input_params) {
- AudioDeviceID device = GetAudioDeviceIdByUId(false, output_device_id);
+ const AudioDeviceID device = GetAudioDeviceIdByUId(false, output_device_id);
if (device == kAudioObjectUnknown) {
DLOG(ERROR) << "Invalid output device " << output_device_id;
- return AudioParameters();
+ return input_params.IsValid() ? input_params : AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
+ kFallbackSampleRate, 16, ChooseBufferSize(kFallbackSampleRate));
+ }
+
+ const bool has_valid_input_params = input_params.IsValid();
+ const int hardware_sample_rate = HardwareSampleRateForDevice(device);
+
+ // Allow pass through buffer sizes. If concurrent input and output streams
+ // exist, they will use the smallest buffer size amongst them. As such, each
+ // stream must be able to FIFO requests appropriately when this happens.
+ int buffer_size = ChooseBufferSize(hardware_sample_rate);
+ if (has_valid_input_params) {
+ buffer_size =
+ std::min(kMaximumInputOutputBufferSize,
+ std::max(input_params.frames_per_buffer(), buffer_size));
}
- int hardware_channels = 2;
+ int hardware_channels;
if (!GetDeviceChannels(device, kAudioDevicePropertyScopeOutput,
&hardware_channels)) {
- // Fallback to stereo.
hardware_channels = 2;
}
- ChannelLayout channel_layout = GuessChannelLayout(hardware_channels);
+ // Use the input channel count and channel layout if possible. Let OSX take
+ // care of remapping the channels; this lets user specified channel layouts
+ // work correctly.
+ int output_channels = input_params.channels();
+ ChannelLayout channel_layout = input_params.channel_layout();
+ if (!has_valid_input_params || output_channels > hardware_channels) {
+ output_channels = hardware_channels;
+ channel_layout = GuessChannelLayout(output_channels);
+ if (channel_layout == CHANNEL_LAYOUT_UNSUPPORTED)
+ channel_layout = CHANNEL_LAYOUT_DISCRETE;
+ }
- const int hardware_sample_rate = HardwareSampleRateForDevice(device);
- const int buffer_size = ChooseBufferSize(hardware_sample_rate);
-
- int input_channels = 0;
- if (input_params.IsValid()) {
- input_channels = input_params.input_channels();
-
- if (input_channels > 0) {
- // TODO(xians): given the limitations of the AudioOutputStream
- // back-ends used with synchronized I/O, we hard-code to stereo.
- // Specifically, this is a limitation of AudioSynchronizedStream which
- // can be removed as part of the work to consolidate these back-ends.
- channel_layout = CHANNEL_LAYOUT_STEREO;
- }
+ const int input_channels =
+ has_valid_input_params ? input_params.input_channels() : 0;
+ if (input_channels > 0) {
+ // TODO(xians): given the limitations of the AudioOutputStream
+ // back-ends used with synchronized I/O, we hard-code to stereo.
+ // Specifically, this is a limitation of AudioSynchronizedStream which
+ // can be removed as part of the work to consolidate these back-ends.
+ channel_layout = CHANNEL_LAYOUT_STEREO;
}
- if (channel_layout == CHANNEL_LAYOUT_UNSUPPORTED)
- channel_layout = CHANNEL_LAYOUT_DISCRETE;
- else
- hardware_channels = ChannelLayoutToChannelCount(channel_layout);
-
- AudioParameters params(
- AudioParameters::AUDIO_PCM_LOW_LATENCY,
- channel_layout,
- hardware_channels,
- input_channels,
- hardware_sample_rate,
- 16,
- buffer_size,
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, output_channels,
+ input_channels, hardware_sample_rate, 16, buffer_size,
AudioParameters::NO_EFFECTS);
-
- return params;
}
-void AudioManagerMac::CreateDeviceListener() {
- DCHECK(GetMessageLoop()->BelongsToCurrentThread());
-
- // Get a baseline for the sample-rate and current device,
- // so we can intelligently handle device notifications only when necessary.
- current_sample_rate_ = HardwareSampleRate();
- if (!GetDefaultOutputDevice(&current_output_device_))
- current_output_device_ = kAudioDeviceUnknown;
-
- output_device_listener_.reset(new AudioDeviceListenerMac(base::Bind(
- &AudioManagerMac::HandleDeviceChanges, base::Unretained(this))));
+void AudioManagerMac::InitializeOnAudioThread() {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+ power_observer_.reset(new AudioPowerObserver());
}
-void AudioManagerMac::DestroyDeviceListener() {
- DCHECK(GetMessageLoop()->BelongsToCurrentThread());
+void AudioManagerMac::ShutdownOnAudioThread() {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
output_device_listener_.reset();
+ power_observer_.reset();
+
+ // Since CoreAudio calls have to run on the UI thread and browser shutdown
+ // doesn't wait for outstanding tasks to complete, we may have input/output
+ // streams still running at shutdown.
+ //
+ // To avoid calls into destructed classes, we need to stop the OS callbacks
+ // by stopping the streams. Note: The streams are leaked since process
+ // destruction is imminent.
+ //
+ // See http://crbug.com/354139 for crash details.
+ StopStreams(&input_streams_);
+ StopStreams(&output_streams_);
}
void AudioManagerMac::HandleDeviceChanges() {
- if (!GetMessageLoop()->BelongsToCurrentThread()) {
- GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
- &AudioManagerMac::HandleDeviceChanges, base::Unretained(this)));
- return;
- }
-
- int new_sample_rate = HardwareSampleRate();
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+ const int new_sample_rate = HardwareSampleRate();
AudioDeviceID new_output_device;
GetDefaultOutputDevice(&new_output_device);
@@ -731,7 +740,7 @@ void AudioManagerMac::HandleDeviceChanges() {
}
int AudioManagerMac::ChooseBufferSize(int output_sample_rate) {
- int buffer_size = kDefaultLowLatencyBufferSize;
+ int buffer_size = kMinimumInputOutputBufferSize;
const int user_buffer_size = GetUserBufferSize();
if (user_buffer_size) {
buffer_size = user_buffer_size;
@@ -739,14 +748,29 @@ int AudioManagerMac::ChooseBufferSize(int output_sample_rate) {
// The default buffer size is too small for higher sample rates and may lead
// to glitching. Adjust upwards by multiples of the default size.
if (output_sample_rate <= 96000)
- buffer_size = 2 * kDefaultLowLatencyBufferSize;
+ buffer_size = 2 * kMinimumInputOutputBufferSize;
else if (output_sample_rate <= 192000)
- buffer_size = 4 * kDefaultLowLatencyBufferSize;
+ buffer_size = 4 * kMinimumInputOutputBufferSize;
}
return buffer_size;
}
+bool AudioManagerMac::ShouldDeferStreamStart() {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+ return power_observer_->ShouldDeferStreamStart();
+}
+
+void AudioManagerMac::ReleaseOutputStream(AudioOutputStream* stream) {
+ output_streams_.remove(stream);
+ AudioManagerBase::ReleaseOutputStream(stream);
+}
+
+void AudioManagerMac::ReleaseInputStream(AudioInputStream* stream) {
+ input_streams_.remove(stream);
+ AudioManagerBase::ReleaseInputStream(stream);
+}
+
AudioManager* CreateAudioManager(AudioLogFactory* audio_log_factory) {
return new AudioManagerMac(audio_log_factory);
}
diff --git a/chromium/media/audio/mac/audio_manager_mac.h b/chromium/media/audio/mac/audio_manager_mac.h
index fb521c940de..490b0b6bbdd 100644
--- a/chromium/media/audio/mac/audio_manager_mac.h
+++ b/chromium/media/audio/mac/audio_manager_mac.h
@@ -6,13 +6,12 @@
#define MEDIA_AUDIO_MAC_AUDIO_MANAGER_MAC_H_
#include <CoreAudio/AudioHardware.h>
+#include <list>
#include <string>
#include "base/basictypes.h"
#include "base/compiler_specific.h"
-#include "base/message_loop/message_loop_proxy.h"
#include "media/audio/audio_manager_base.h"
-#include "media/audio/mac/aggregate_device_manager.h"
#include "media/audio/mac/audio_device_listener_mac.h"
namespace media {
@@ -41,14 +40,17 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual std::string GetDefaultOutputDeviceID() OVERRIDE;
+ // Used to track destruction of input and output streams.
+ virtual void ReleaseOutputStream(AudioOutputStream* stream) OVERRIDE;
+ virtual void ReleaseInputStream(AudioInputStream* stream) OVERRIDE;
+
static bool GetDefaultInputDevice(AudioDeviceID* device);
static bool GetDefaultOutputDevice(AudioDeviceID* device);
static bool GetDefaultDevice(AudioDeviceID* device, bool input);
@@ -62,6 +64,17 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
static int HardwareSampleRateForDevice(AudioDeviceID device_id);
static int HardwareSampleRate();
+ // OSX has issues with starting streams as the sytem goes into suspend and
+ // immediately after it wakes up from resume. See http://crbug.com/160920.
+ // As a workaround we delay Start() when it occurs after suspend and for a
+ // small amount of time after resume.
+ //
+ // Streams should consult ShouldDeferStreamStart() and if true check the value
+ // again after |kStartDelayInSecsForPowerEvents| has elapsed. If false, the
+ // stream may be started immediately.
+ enum { kStartDelayInSecsForPowerEvents = 1 };
+ bool ShouldDeferStreamStart();
+
protected:
virtual ~AudioManagerMac();
@@ -70,11 +83,8 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
const AudioParameters& input_params) OVERRIDE;
private:
- bool HasUnifiedDefaultIO();
-
- // Helper methods for constructing AudioDeviceListenerMac on the audio thread.
- void CreateDeviceListener();
- void DestroyDeviceListener();
+ void InitializeOnAudioThread();
+ void ShutdownOnAudioThread();
int ChooseBufferSize(int output_sample_rate);
@@ -89,7 +99,16 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
int current_sample_rate_;
AudioDeviceID current_output_device_;
- AggregateDeviceManager aggregate_device_manager_;
+ // Helper class which monitors power events to determine if output streams
+ // should defer Start() calls. Required to workaround an OSX bug. See
+ // http://crbug.com/160920 for more details.
+ class AudioPowerObserver;
+ scoped_ptr<AudioPowerObserver> power_observer_;
+
+ // Tracks all constructed input and output streams so they can be stopped at
+ // shutdown. See ShutdownOnAudioThread() for more details.
+ std::list<AudioInputStream*> input_streams_;
+ std::list<AudioOutputStream*> output_streams_;
DISALLOW_COPY_AND_ASSIGN(AudioManagerMac);
};
diff --git a/chromium/media/audio/mac/audio_synchronized_mac.cc b/chromium/media/audio/mac/audio_synchronized_mac.cc
deleted file mode 100644
index a9bc88e2bd3..00000000000
--- a/chromium/media/audio/mac/audio_synchronized_mac.cc
+++ /dev/null
@@ -1,976 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/mac/audio_synchronized_mac.h"
-
-#include <CoreServices/CoreServices.h>
-#include <algorithm>
-
-#include "base/basictypes.h"
-#include "base/debug/trace_event.h"
-#include "base/logging.h"
-#include "base/mac/mac_logging.h"
-#include "media/audio/mac/audio_manager_mac.h"
-#include "media/base/channel_mixer.h"
-
-namespace media {
-
-static const int kHardwareBufferSize = 128;
-static const int kFifoSize = 16384;
-
-// TODO(crogers): handle the non-stereo case.
-static const int kChannels = 2;
-
-// This value was determined empirically for minimum latency while still
-// guarding against FIFO under-runs.
-static const int kBaseTargetFifoFrames = 256 + 64;
-
-// If the input and output sample-rate don't match, then we need to maintain
-// an additional safety margin due to the callback timing jitter and the
-// varispeed buffering. This value was empirically tuned.
-static const int kAdditionalTargetFifoFrames = 128;
-
-static void ZeroBufferList(AudioBufferList* buffer_list) {
- for (size_t i = 0; i < buffer_list->mNumberBuffers; ++i)
- memset(buffer_list->mBuffers[i].mData,
- 0,
- buffer_list->mBuffers[i].mDataByteSize);
-}
-
-static void WrapBufferList(AudioBufferList* buffer_list,
- AudioBus* bus,
- int frames) {
- DCHECK(buffer_list);
- DCHECK(bus);
- int channels = bus->channels();
- int buffer_list_channels = buffer_list->mNumberBuffers;
-
- // Copy pointers from AudioBufferList.
- int source_idx = 0;
- for (int i = 0; i < channels; ++i) {
- bus->SetChannelData(
- i, static_cast<float*>(buffer_list->mBuffers[source_idx].mData));
-
- // It's ok to pass in a |buffer_list| with fewer channels, in which
- // case we just duplicate the last channel.
- if (source_idx < buffer_list_channels - 1)
- ++source_idx;
- }
-
- // Finally set the actual length.
- bus->set_frames(frames);
-}
-
-AudioSynchronizedStream::AudioSynchronizedStream(
- AudioManagerMac* manager,
- const AudioParameters& params,
- AudioDeviceID input_id,
- AudioDeviceID output_id)
- : manager_(manager),
- params_(params),
- input_sample_rate_(0),
- output_sample_rate_(0),
- input_id_(input_id),
- output_id_(output_id),
- input_buffer_list_(NULL),
- fifo_(kChannels, kFifoSize),
- target_fifo_frames_(kBaseTargetFifoFrames),
- average_delta_(0.0),
- fifo_rate_compensation_(1.0),
- input_unit_(0),
- varispeed_unit_(0),
- output_unit_(0),
- first_input_time_(-1),
- is_running_(false),
- hardware_buffer_size_(kHardwareBufferSize),
- channels_(kChannels) {
- VLOG(1) << "AudioSynchronizedStream::AudioSynchronizedStream()";
-}
-
-AudioSynchronizedStream::~AudioSynchronizedStream() {
- DCHECK(!input_unit_);
- DCHECK(!output_unit_);
- DCHECK(!varispeed_unit_);
-}
-
-bool AudioSynchronizedStream::Open() {
- if (params_.channels() != kChannels) {
- LOG(ERROR) << "Only stereo output is currently supported.";
- return false;
- }
-
- // Create the input, output, and varispeed AudioUnits.
- OSStatus result = CreateAudioUnits();
- if (result != noErr) {
- LOG(ERROR) << "Cannot create AudioUnits.";
- return false;
- }
-
- result = SetupInput(input_id_);
- if (result != noErr) {
- LOG(ERROR) << "Error configuring input AudioUnit.";
- return false;
- }
-
- result = SetupOutput(output_id_);
- if (result != noErr) {
- LOG(ERROR) << "Error configuring output AudioUnit.";
- return false;
- }
-
- result = SetupCallbacks();
- if (result != noErr) {
- LOG(ERROR) << "Error setting up callbacks on AudioUnits.";
- return false;
- }
-
- result = SetupStreamFormats();
- if (result != noErr) {
- LOG(ERROR) << "Error configuring stream formats on AudioUnits.";
- return false;
- }
-
- AllocateInputData();
-
- // Final initialization of the AudioUnits.
- result = AudioUnitInitialize(input_unit_);
- if (result != noErr) {
- LOG(ERROR) << "Error initializing input AudioUnit.";
- return false;
- }
-
- result = AudioUnitInitialize(output_unit_);
- if (result != noErr) {
- LOG(ERROR) << "Error initializing output AudioUnit.";
- return false;
- }
-
- result = AudioUnitInitialize(varispeed_unit_);
- if (result != noErr) {
- LOG(ERROR) << "Error initializing varispeed AudioUnit.";
- return false;
- }
-
- if (input_sample_rate_ != output_sample_rate_) {
- // Add extra safety margin.
- target_fifo_frames_ += kAdditionalTargetFifoFrames;
- }
-
- // Buffer initial silence corresponding to target I/O buffering.
- fifo_.Clear();
- scoped_ptr<AudioBus> silence =
- AudioBus::Create(channels_, target_fifo_frames_);
- silence->Zero();
- fifo_.Push(silence.get());
-
- return true;
-}
-
-void AudioSynchronizedStream::Close() {
- DCHECK(!is_running_);
-
- if (input_buffer_list_) {
- free(input_buffer_list_);
- input_buffer_list_ = 0;
- input_bus_.reset(NULL);
- wrapper_bus_.reset(NULL);
- }
-
- if (input_unit_) {
- AudioUnitUninitialize(input_unit_);
- CloseComponent(input_unit_);
- }
-
- if (output_unit_) {
- AudioUnitUninitialize(output_unit_);
- CloseComponent(output_unit_);
- }
-
- if (varispeed_unit_) {
- AudioUnitUninitialize(varispeed_unit_);
- CloseComponent(varispeed_unit_);
- }
-
- input_unit_ = NULL;
- output_unit_ = NULL;
- varispeed_unit_ = NULL;
-
- // Inform the audio manager that we have been closed. This can cause our
- // destruction.
- manager_->ReleaseOutputStream(this);
-}
-
-void AudioSynchronizedStream::Start(AudioSourceCallback* callback) {
- DCHECK(callback);
- DCHECK(input_unit_);
- DCHECK(output_unit_);
- DCHECK(varispeed_unit_);
-
- if (is_running_ || !input_unit_ || !output_unit_ || !varispeed_unit_)
- return;
-
- source_ = callback;
-
- // Reset state variables each time we Start().
- fifo_rate_compensation_ = 1.0;
- average_delta_ = 0.0;
-
- OSStatus result = noErr;
-
- if (!is_running_) {
- first_input_time_ = -1;
-
- result = AudioOutputUnitStart(input_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
-
- if (result == noErr) {
- result = AudioOutputUnitStart(output_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
- }
- }
-
- is_running_ = true;
-}
-
-void AudioSynchronizedStream::Stop() {
- OSStatus result = noErr;
- if (is_running_) {
- result = AudioOutputUnitStop(input_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
-
- if (result == noErr) {
- result = AudioOutputUnitStop(output_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
- }
- }
-
- if (result == noErr)
- is_running_ = false;
-}
-
-bool AudioSynchronizedStream::IsRunning() {
- return is_running_;
-}
-
-// TODO(crogers): implement - or remove from AudioOutputStream.
-void AudioSynchronizedStream::SetVolume(double volume) {}
-void AudioSynchronizedStream::GetVolume(double* volume) {}
-
-OSStatus AudioSynchronizedStream::SetOutputDeviceAsCurrent(
- AudioDeviceID output_id) {
- OSStatus result = noErr;
-
- // Get the default output device if device is unknown.
- if (output_id == kAudioDeviceUnknown) {
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
- pa.mScope = kAudioObjectPropertyScopeGlobal;
- pa.mElement = kAudioObjectPropertyElementMaster;
- UInt32 size = sizeof(output_id);
-
- result = AudioObjectGetPropertyData(
- kAudioObjectSystemObject,
- &pa,
- 0,
- 0,
- &size,
- &output_id);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
- }
-
- // Set the render frame size.
- UInt32 frame_size = hardware_buffer_size_;
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioDevicePropertyBufferFrameSize;
- pa.mScope = kAudioDevicePropertyScopeInput;
- pa.mElement = kAudioObjectPropertyElementMaster;
- result = AudioObjectSetPropertyData(
- output_id,
- &pa,
- 0,
- 0,
- sizeof(frame_size),
- &frame_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- output_info_.Initialize(output_id, false);
-
- // Set the Current Device to the Default Output Unit.
- result = AudioUnitSetProperty(
- output_unit_,
- kAudioOutputUnitProperty_CurrentDevice,
- kAudioUnitScope_Global,
- 0,
- &output_info_.id_,
- sizeof(output_info_.id_));
-
- OSSTATUS_DCHECK(result == noErr, result);
- return result;
-}
-
-OSStatus AudioSynchronizedStream::SetInputDeviceAsCurrent(
- AudioDeviceID input_id) {
- OSStatus result = noErr;
-
- // Get the default input device if device is unknown.
- if (input_id == kAudioDeviceUnknown) {
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioHardwarePropertyDefaultInputDevice;
- pa.mScope = kAudioObjectPropertyScopeGlobal;
- pa.mElement = kAudioObjectPropertyElementMaster;
- UInt32 size = sizeof(input_id);
-
- result = AudioObjectGetPropertyData(
- kAudioObjectSystemObject,
- &pa,
- 0,
- 0,
- &size,
- &input_id);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
- }
-
- // Set the render frame size.
- UInt32 frame_size = hardware_buffer_size_;
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioDevicePropertyBufferFrameSize;
- pa.mScope = kAudioDevicePropertyScopeInput;
- pa.mElement = kAudioObjectPropertyElementMaster;
- result = AudioObjectSetPropertyData(
- input_id,
- &pa,
- 0,
- 0,
- sizeof(frame_size),
- &frame_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- input_info_.Initialize(input_id, true);
-
- // Set the Current Device to the AUHAL.
- // This should be done only after I/O has been enabled on the AUHAL.
- result = AudioUnitSetProperty(
- input_unit_,
- kAudioOutputUnitProperty_CurrentDevice,
- kAudioUnitScope_Global,
- 0,
- &input_info_.id_,
- sizeof(input_info_.id_));
-
- OSSTATUS_DCHECK(result == noErr, result);
- return result;
-}
-
-OSStatus AudioSynchronizedStream::CreateAudioUnits() {
- // Q: Why do we need a varispeed unit?
- // A: If the input device and the output device are running at
- // different sample rates and/or on different clocks, we will need
- // to compensate to avoid a pitch change and
- // to avoid buffer under and over runs.
- ComponentDescription varispeed_desc;
- varispeed_desc.componentType = kAudioUnitType_FormatConverter;
- varispeed_desc.componentSubType = kAudioUnitSubType_Varispeed;
- varispeed_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
- varispeed_desc.componentFlags = 0;
- varispeed_desc.componentFlagsMask = 0;
-
- Component varispeed_comp = FindNextComponent(NULL, &varispeed_desc);
- if (varispeed_comp == NULL)
- return -1;
-
- OSStatus result = OpenAComponent(varispeed_comp, &varispeed_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Open input AudioUnit.
- ComponentDescription input_desc;
- input_desc.componentType = kAudioUnitType_Output;
- input_desc.componentSubType = kAudioUnitSubType_HALOutput;
- input_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
- input_desc.componentFlags = 0;
- input_desc.componentFlagsMask = 0;
-
- Component input_comp = FindNextComponent(NULL, &input_desc);
- if (input_comp == NULL)
- return -1;
-
- result = OpenAComponent(input_comp, &input_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Open output AudioUnit.
- ComponentDescription output_desc;
- output_desc.componentType = kAudioUnitType_Output;
- output_desc.componentSubType = kAudioUnitSubType_DefaultOutput;
- output_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
- output_desc.componentFlags = 0;
- output_desc.componentFlagsMask = 0;
-
- Component output_comp = FindNextComponent(NULL, &output_desc);
- if (output_comp == NULL)
- return -1;
-
- result = OpenAComponent(output_comp, &output_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- return noErr;
-}
-
-OSStatus AudioSynchronizedStream::SetupInput(AudioDeviceID input_id) {
- // The AUHAL used for input needs to be initialized
- // before anything is done to it.
- OSStatus result = AudioUnitInitialize(input_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // We must enable the Audio Unit (AUHAL) for input and disable output
- // BEFORE setting the AUHAL's current device.
- result = EnableIO();
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- result = SetInputDeviceAsCurrent(input_id);
- OSSTATUS_DCHECK(result == noErr, result);
-
- return result;
-}
-
-OSStatus AudioSynchronizedStream::EnableIO() {
- // Enable input on the AUHAL.
- UInt32 enable_io = 1;
- OSStatus result = AudioUnitSetProperty(
- input_unit_,
- kAudioOutputUnitProperty_EnableIO,
- kAudioUnitScope_Input,
- 1, // input element
- &enable_io,
- sizeof(enable_io));
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Disable Output on the AUHAL.
- enable_io = 0;
- result = AudioUnitSetProperty(
- input_unit_,
- kAudioOutputUnitProperty_EnableIO,
- kAudioUnitScope_Output,
- 0, // output element
- &enable_io,
- sizeof(enable_io));
-
- OSSTATUS_DCHECK(result == noErr, result);
- return result;
-}
-
-OSStatus AudioSynchronizedStream::SetupOutput(AudioDeviceID output_id) {
- OSStatus result = noErr;
-
- result = SetOutputDeviceAsCurrent(output_id);
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Tell the output unit not to reset timestamps.
- // Otherwise sample rate changes will cause sync loss.
- UInt32 start_at_zero = 0;
- result = AudioUnitSetProperty(
- output_unit_,
- kAudioOutputUnitProperty_StartTimestampsAtZero,
- kAudioUnitScope_Global,
- 0,
- &start_at_zero,
- sizeof(start_at_zero));
-
- OSSTATUS_DCHECK(result == noErr, result);
-
- return result;
-}
-
-OSStatus AudioSynchronizedStream::SetupCallbacks() {
- // Set the input callback.
- AURenderCallbackStruct callback;
- callback.inputProc = InputProc;
- callback.inputProcRefCon = this;
- OSStatus result = AudioUnitSetProperty(
- input_unit_,
- kAudioOutputUnitProperty_SetInputCallback,
- kAudioUnitScope_Global,
- 0,
- &callback,
- sizeof(callback));
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Set the output callback.
- callback.inputProc = OutputProc;
- callback.inputProcRefCon = this;
- result = AudioUnitSetProperty(
- output_unit_,
- kAudioUnitProperty_SetRenderCallback,
- kAudioUnitScope_Input,
- 0,
- &callback,
- sizeof(callback));
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Set the varispeed callback.
- callback.inputProc = VarispeedProc;
- callback.inputProcRefCon = this;
- result = AudioUnitSetProperty(
- varispeed_unit_,
- kAudioUnitProperty_SetRenderCallback,
- kAudioUnitScope_Input,
- 0,
- &callback,
- sizeof(callback));
-
- OSSTATUS_DCHECK(result == noErr, result);
-
- return result;
-}
-
-OSStatus AudioSynchronizedStream::SetupStreamFormats() {
- AudioStreamBasicDescription asbd, asbd_dev1_in, asbd_dev2_out;
-
- // Get the Stream Format (Output client side).
- UInt32 property_size = sizeof(asbd_dev1_in);
- OSStatus result = AudioUnitGetProperty(
- input_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Input,
- 1,
- &asbd_dev1_in,
- &property_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Get the Stream Format (client side).
- property_size = sizeof(asbd);
- result = AudioUnitGetProperty(
- input_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Output,
- 1,
- &asbd,
- &property_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Get the Stream Format (Output client side).
- property_size = sizeof(asbd_dev2_out);
- result = AudioUnitGetProperty(
- output_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Output,
- 0,
- &asbd_dev2_out,
- &property_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Set the format of all the AUs to the input/output devices channel count.
- // For a simple case, you want to set this to
- // the lower of count of the channels in the input device vs output device.
- asbd.mChannelsPerFrame = std::min(asbd_dev1_in.mChannelsPerFrame,
- asbd_dev2_out.mChannelsPerFrame);
-
- // We must get the sample rate of the input device and set it to the
- // stream format of AUHAL.
- Float64 rate = 0;
- property_size = sizeof(rate);
-
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioDevicePropertyNominalSampleRate;
- pa.mScope = kAudioObjectPropertyScopeWildcard;
- pa.mElement = kAudioObjectPropertyElementMaster;
- result = AudioObjectGetPropertyData(
- input_info_.id_,
- &pa,
- 0,
- 0,
- &property_size,
- &rate);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- input_sample_rate_ = rate;
-
- asbd.mSampleRate = rate;
- property_size = sizeof(asbd);
-
- // Set the new formats to the AUs...
- result = AudioUnitSetProperty(
- input_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Output,
- 1,
- &asbd,
- property_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- result = AudioUnitSetProperty(
- varispeed_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Input,
- 0,
- &asbd,
- property_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Set the correct sample rate for the output device,
- // but keep the channel count the same.
- property_size = sizeof(rate);
-
- pa.mSelector = kAudioDevicePropertyNominalSampleRate;
- pa.mScope = kAudioObjectPropertyScopeWildcard;
- pa.mElement = kAudioObjectPropertyElementMaster;
- result = AudioObjectGetPropertyData(
- output_info_.id_,
- &pa,
- 0,
- 0,
- &property_size,
- &rate);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- output_sample_rate_ = rate;
-
- // The requested sample-rate must match the hardware sample-rate.
- if (output_sample_rate_ != params_.sample_rate()) {
- LOG(ERROR) << "Requested sample-rate: " << params_.sample_rate()
- << " must match the hardware sample-rate: " << output_sample_rate_;
- return kAudioDeviceUnsupportedFormatError;
- }
-
- asbd.mSampleRate = rate;
- property_size = sizeof(asbd);
-
- // Set the new audio stream formats for the rest of the AUs...
- result = AudioUnitSetProperty(
- varispeed_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Output,
- 0,
- &asbd,
- property_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- result = AudioUnitSetProperty(
- output_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Input,
- 0,
- &asbd,
- property_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- return result;
-}
-
-void AudioSynchronizedStream::AllocateInputData() {
- // Get the native number of input channels that the hardware supports.
- int hardware_channels = 0;
- bool got_hardware_channels = AudioManagerMac::GetDeviceChannels(
- input_id_, kAudioDevicePropertyScopeInput, &hardware_channels);
- if (!got_hardware_channels || hardware_channels > 2) {
- // Only mono and stereo are supported on the input side. When it fails to
- // get the native channel number or the native channel number is bigger
- // than 2, we open the device in stereo mode.
- hardware_channels = 2;
- }
-
- // Allocate storage for the AudioBufferList used for the
- // input data from the input AudioUnit.
- // We allocate enough space for with one AudioBuffer per channel.
- size_t malloc_size = offsetof(AudioBufferList, mBuffers[0]) +
- (sizeof(AudioBuffer) * hardware_channels);
-
- input_buffer_list_ = static_cast<AudioBufferList*>(malloc(malloc_size));
- input_buffer_list_->mNumberBuffers = hardware_channels;
-
- input_bus_ = AudioBus::Create(hardware_channels, hardware_buffer_size_);
- wrapper_bus_ = AudioBus::CreateWrapper(channels_);
- if (hardware_channels != params_.input_channels()) {
- ChannelLayout hardware_channel_layout =
- GuessChannelLayout(hardware_channels);
- ChannelLayout requested_channel_layout =
- GuessChannelLayout(params_.input_channels());
- channel_mixer_.reset(new ChannelMixer(hardware_channel_layout,
- requested_channel_layout));
- mixer_bus_ = AudioBus::Create(params_.input_channels(),
- hardware_buffer_size_);
- }
-
- // Allocate buffers for AudioBufferList.
- UInt32 buffer_size_bytes = input_bus_->frames() * sizeof(Float32);
- for (size_t i = 0; i < input_buffer_list_->mNumberBuffers; ++i) {
- input_buffer_list_->mBuffers[i].mNumberChannels = 1;
- input_buffer_list_->mBuffers[i].mDataByteSize = buffer_size_bytes;
- input_buffer_list_->mBuffers[i].mData = input_bus_->channel(i);
- }
-}
-
-OSStatus AudioSynchronizedStream::HandleInputCallback(
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- TRACE_EVENT0("audio", "AudioSynchronizedStream::HandleInputCallback");
-
- if (first_input_time_ < 0.0)
- first_input_time_ = time_stamp->mSampleTime;
-
- // Get the new audio input data.
- OSStatus result = AudioUnitRender(
- input_unit_,
- io_action_flags,
- time_stamp,
- bus_number,
- number_of_frames,
- input_buffer_list_);
-
- // TODO(xians): Add back the DCHECK after synchronize IO supports all
- // combination of input and output params. See http://issue/246521.
- if (result != noErr)
- return result;
-
- // Buffer input into FIFO.
- int available_frames = fifo_.max_frames() - fifo_.frames();
- if (input_bus_->frames() <= available_frames) {
- if (channel_mixer_) {
- channel_mixer_->Transform(input_bus_.get(), mixer_bus_.get());
- fifo_.Push(mixer_bus_.get());
- } else {
- fifo_.Push(input_bus_.get());
- }
- }
-
- return result;
-}
-
-OSStatus AudioSynchronizedStream::HandleVarispeedCallback(
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- // Create a wrapper bus on the AudioBufferList.
- WrapBufferList(io_data, wrapper_bus_.get(), number_of_frames);
-
- if (fifo_.frames() < static_cast<int>(number_of_frames)) {
- // We don't DCHECK here, since this is a possible run-time condition
- // if the machine is bogged down.
- wrapper_bus_->Zero();
- return noErr;
- }
-
- // Read from the FIFO to feed the varispeed.
- fifo_.Consume(wrapper_bus_.get(), 0, number_of_frames);
-
- return noErr;
-}
-
-OSStatus AudioSynchronizedStream::HandleOutputCallback(
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- // Input callback hasn't run yet or we've suddenly changed sample-rates
- // -> silence.
- if (first_input_time_ < 0.0 ||
- static_cast<int>(number_of_frames) != params_.frames_per_buffer()) {
- ZeroBufferList(io_data);
- return noErr;
- }
-
- // Use the varispeed playback rate to offset small discrepancies
- // in hardware clocks, and also any differences in sample-rate
- // between input and output devices.
-
- // Calculate a varispeed rate scalar factor to compensate for drift between
- // input and output. We use the actual number of frames still in the FIFO
- // compared with the ideal value of |target_fifo_frames_|.
- int delta = fifo_.frames() - target_fifo_frames_;
-
- // Average |delta| because it can jitter back/forth quite frequently
- // by +/- the hardware buffer-size *if* the input and output callbacks are
- // happening at almost exactly the same time. Also, if the input and output
- // sample-rates are different then |delta| will jitter quite a bit due to
- // the rate conversion happening in the varispeed, plus the jittering of
- // the callbacks. The average value is what's important here.
- average_delta_ += (delta - average_delta_) * 0.1;
-
- // Compute a rate compensation which always attracts us back to the
- // |target_fifo_frames_| over a period of kCorrectionTimeSeconds.
- const double kCorrectionTimeSeconds = 0.1;
- double correction_time_frames = kCorrectionTimeSeconds * output_sample_rate_;
- fifo_rate_compensation_ =
- (correction_time_frames + average_delta_) / correction_time_frames;
-
- // Adjust for FIFO drift.
- OSStatus result = AudioUnitSetParameter(
- varispeed_unit_,
- kVarispeedParam_PlaybackRate,
- kAudioUnitScope_Global,
- 0,
- fifo_rate_compensation_,
- 0);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Render to the output using the varispeed.
- result = AudioUnitRender(
- varispeed_unit_,
- io_action_flags,
- time_stamp,
- 0,
- number_of_frames,
- io_data);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Create a wrapper bus on the AudioBufferList.
- WrapBufferList(io_data, wrapper_bus_.get(), number_of_frames);
-
- // Process in-place!
- source_->OnMoreIOData(wrapper_bus_.get(),
- wrapper_bus_.get(),
- AudioBuffersState(0, 0));
-
- return noErr;
-}
-
-OSStatus AudioSynchronizedStream::InputProc(
- void* user_data,
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- AudioSynchronizedStream* stream =
- static_cast<AudioSynchronizedStream*>(user_data);
- DCHECK(stream);
-
- return stream->HandleInputCallback(
- io_action_flags,
- time_stamp,
- bus_number,
- number_of_frames,
- io_data);
-}
-
-OSStatus AudioSynchronizedStream::VarispeedProc(
- void* user_data,
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- AudioSynchronizedStream* stream =
- static_cast<AudioSynchronizedStream*>(user_data);
- DCHECK(stream);
-
- return stream->HandleVarispeedCallback(
- io_action_flags,
- time_stamp,
- bus_number,
- number_of_frames,
- io_data);
-}
-
-OSStatus AudioSynchronizedStream::OutputProc(
- void* user_data,
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- AudioSynchronizedStream* stream =
- static_cast<AudioSynchronizedStream*>(user_data);
- DCHECK(stream);
-
- return stream->HandleOutputCallback(
- io_action_flags,
- time_stamp,
- bus_number,
- number_of_frames,
- io_data);
-}
-
-void AudioSynchronizedStream::AudioDeviceInfo::Initialize(
- AudioDeviceID id, bool is_input) {
- id_ = id;
- is_input_ = is_input;
- if (id_ == kAudioDeviceUnknown)
- return;
-
- UInt32 property_size = sizeof(buffer_size_frames_);
-
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioDevicePropertyBufferFrameSize;
- pa.mScope = kAudioObjectPropertyScopeWildcard;
- pa.mElement = kAudioObjectPropertyElementMaster;
- OSStatus result = AudioObjectGetPropertyData(
- id_,
- &pa,
- 0,
- 0,
- &property_size,
- &buffer_size_frames_);
-
- OSSTATUS_DCHECK(result == noErr, result);
-}
-
-} // namespace media
diff --git a/chromium/media/audio/mac/audio_synchronized_mac.h b/chromium/media/audio/mac/audio_synchronized_mac.h
deleted file mode 100644
index a6db48e3037..00000000000
--- a/chromium/media/audio/mac/audio_synchronized_mac.h
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_MAC_AUDIO_SYNCHRONIZED_MAC_H_
-#define MEDIA_AUDIO_MAC_AUDIO_SYNCHRONIZED_MAC_H_
-
-#include <AudioToolbox/AudioToolbox.h>
-#include <AudioUnit/AudioUnit.h>
-#include <CoreAudio/CoreAudio.h>
-
-#include "base/compiler_specific.h"
-#include "base/synchronization/lock.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/audio_bus.h"
-#include "media/base/audio_fifo.h"
-
-namespace media {
-
-class AudioManagerMac;
-class ChannelMixer;
-
-// AudioSynchronizedStream allows arbitrary combinations of input and output
-// devices running off different clocks and using different drivers, with
-// potentially differing sample-rates. It implements AudioOutputStream
-// and shuttles its synchronized I/O data using AudioSourceCallback.
-//
-// It is required to first acquire the native sample rate of the selected
-// output device and then use the same rate when creating this object.
-//
-// ............................................................................
-// Theory of Operation:
-// .
-// INPUT THREAD . OUTPUT THREAD
-// +-----------------+ +------+ .
-// | Input AudioUnit | --> | | .
-// +-----------------+ | | .
-// | FIFO | .
-// | | +-----------+
-// | | -----> | Varispeed |
-// | | +-----------+
-// +------+ . |
-// . | +-----------+
-// . OnMoreIOData() --> | Output AU |
-// . +-----------+
-//
-// The input AudioUnit's InputProc is called on one thread which feeds the
-// FIFO. The output AudioUnit's OutputProc is called on a second thread
-// which pulls on the varispeed to get the current input data. The varispeed
-// handles mismatches between input and output sample-rate and also clock drift
-// between the input and output drivers. The varispeed consumes its data from
-// the FIFO and adjusts its rate dynamically according to the amount
-// of data buffered in the FIFO. If the FIFO starts getting too much data
-// buffered then the varispeed will speed up slightly to compensate
-// and similarly if the FIFO doesn't have enough data buffered then the
-// varispeed will slow down slightly.
-//
-// Finally, once the input data is available then OnMoreIOData() is called
-// which is given this input, and renders the output which is finally sent
-// to the Output AudioUnit.
-class AudioSynchronizedStream : public AudioOutputStream {
- public:
- // The ctor takes all the usual parameters, plus |manager| which is the
- // the audio manager who is creating this object.
- AudioSynchronizedStream(AudioManagerMac* manager,
- const AudioParameters& params,
- AudioDeviceID input_id,
- AudioDeviceID output_id);
-
- virtual ~AudioSynchronizedStream();
-
- // Implementation of AudioOutputStream.
- virtual bool Open() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
-
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
-
- OSStatus SetInputDeviceAsCurrent(AudioDeviceID input_id);
- OSStatus SetOutputDeviceAsCurrent(AudioDeviceID output_id);
- AudioDeviceID GetInputDeviceID() { return input_info_.id_; }
- AudioDeviceID GetOutputDeviceID() { return output_info_.id_; }
-
- bool IsRunning();
-
- private:
- // Initialization.
- OSStatus CreateAudioUnits();
- OSStatus SetupInput(AudioDeviceID input_id);
- OSStatus EnableIO();
- OSStatus SetupOutput(AudioDeviceID output_id);
- OSStatus SetupCallbacks();
- OSStatus SetupStreamFormats();
- void AllocateInputData();
-
- // Handlers for the AudioUnit callbacks.
- OSStatus HandleInputCallback(AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- OSStatus HandleVarispeedCallback(AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- OSStatus HandleOutputCallback(AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- // AudioUnit callbacks.
- static OSStatus InputProc(void* user_data,
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- static OSStatus VarispeedProc(void* user_data,
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- static OSStatus OutputProc(void* user_data,
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- // Our creator.
- AudioManagerMac* manager_;
-
- // Client parameters.
- AudioParameters params_;
-
- double input_sample_rate_;
- double output_sample_rate_;
-
- // Pointer to the object that will provide the audio samples.
- AudioSourceCallback* source_;
-
- // Values used in Open().
- AudioDeviceID input_id_;
- AudioDeviceID output_id_;
-
- // The input AudioUnit renders its data here.
- AudioBufferList* input_buffer_list_;
-
- // Holds the actual data for |input_buffer_list_|.
- scoped_ptr<AudioBus> input_bus_;
-
- // Used to overlay AudioBufferLists.
- scoped_ptr<AudioBus> wrapper_bus_;
-
- class AudioDeviceInfo {
- public:
- AudioDeviceInfo()
- : id_(kAudioDeviceUnknown),
- is_input_(false),
- buffer_size_frames_(0) {}
- void Initialize(AudioDeviceID inID, bool isInput);
- bool IsInitialized() const { return id_ != kAudioDeviceUnknown; }
-
- AudioDeviceID id_;
- bool is_input_;
- UInt32 buffer_size_frames_;
- };
-
- AudioDeviceInfo input_info_;
- AudioDeviceInfo output_info_;
-
- // Used for input to output buffering.
- AudioFifo fifo_;
-
- // The optimal number of frames we'd like to keep in the FIFO at all times.
- int target_fifo_frames_;
-
- // A running average of the measured delta between actual number of frames
- // in the FIFO versus |target_fifo_frames_|.
- double average_delta_;
-
- // A varispeed rate scalar which is calculated based on FIFO drift.
- double fifo_rate_compensation_;
-
- // AudioUnits.
- AudioUnit input_unit_;
- AudioUnit varispeed_unit_;
- AudioUnit output_unit_;
-
- double first_input_time_;
-
- bool is_running_;
- int hardware_buffer_size_;
- int channels_;
-
- // Channel mixer used to transform mono to stereo data. It is only created
- // if the input_hardware_channels is mono.
- scoped_ptr<ChannelMixer> channel_mixer_;
- scoped_ptr<AudioBus> mixer_bus_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioSynchronizedStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_MAC_AUDIO_SYNCHRONIZED_MAC_H_
diff --git a/chromium/media/audio/mac/audio_unified_mac.cc b/chromium/media/audio/mac/audio_unified_mac.cc
deleted file mode 100644
index d1dc007e6a8..00000000000
--- a/chromium/media/audio/mac/audio_unified_mac.cc
+++ /dev/null
@@ -1,397 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/mac/audio_unified_mac.h"
-
-#include <CoreServices/CoreServices.h>
-
-#include "base/basictypes.h"
-#include "base/logging.h"
-#include "base/mac/mac_logging.h"
-#include "media/audio/mac/audio_manager_mac.h"
-
-namespace media {
-
-// TODO(crogers): support more than hard-coded stereo input.
-// Ideally we would like to receive this value as a constructor argument.
-static const int kDefaultInputChannels = 2;
-
-AudioHardwareUnifiedStream::AudioHardwareUnifiedStream(
- AudioManagerMac* manager, const AudioParameters& params)
- : manager_(manager),
- source_(NULL),
- client_input_channels_(kDefaultInputChannels),
- volume_(1.0f),
- input_channels_(0),
- output_channels_(0),
- input_channels_per_frame_(0),
- output_channels_per_frame_(0),
- io_proc_id_(0),
- device_(kAudioObjectUnknown),
- is_playing_(false) {
- DCHECK(manager_);
-
- // A frame is one sample across all channels. In interleaved audio the per
- // frame fields identify the set of n |channels|. In uncompressed audio, a
- // packet is always one frame.
- format_.mSampleRate = params.sample_rate();
- format_.mFormatID = kAudioFormatLinearPCM;
- format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
- kLinearPCMFormatFlagIsSignedInteger;
- format_.mBitsPerChannel = params.bits_per_sample();
- format_.mChannelsPerFrame = params.channels();
- format_.mFramesPerPacket = 1;
- format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels()) / 8;
- format_.mBytesPerFrame = format_.mBytesPerPacket;
- format_.mReserved = 0;
-
- // Calculate the number of sample frames per callback.
- number_of_frames_ = params.GetBytesPerBuffer() / format_.mBytesPerPacket;
-
- input_bus_ = AudioBus::Create(client_input_channels_,
- params.frames_per_buffer());
- output_bus_ = AudioBus::Create(params);
-}
-
-AudioHardwareUnifiedStream::~AudioHardwareUnifiedStream() {
- DCHECK_EQ(device_, kAudioObjectUnknown);
-}
-
-bool AudioHardwareUnifiedStream::Open() {
- // Obtain the current output device selected by the user.
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
- pa.mScope = kAudioObjectPropertyScopeGlobal;
- pa.mElement = kAudioObjectPropertyElementMaster;
-
- UInt32 size = sizeof(device_);
-
- OSStatus result = AudioObjectGetPropertyData(
- kAudioObjectSystemObject,
- &pa,
- 0,
- 0,
- &size,
- &device_);
-
- if ((result != kAudioHardwareNoError) || (device_ == kAudioDeviceUnknown)) {
- LOG(ERROR) << "Cannot open unified AudioDevice.";
- return false;
- }
-
- // The requested sample-rate must match the hardware sample-rate.
- Float64 sample_rate = 0.0;
- size = sizeof(sample_rate);
-
- pa.mSelector = kAudioDevicePropertyNominalSampleRate;
- pa.mScope = kAudioObjectPropertyScopeWildcard;
- pa.mElement = kAudioObjectPropertyElementMaster;
-
- result = AudioObjectGetPropertyData(
- device_,
- &pa,
- 0,
- 0,
- &size,
- &sample_rate);
-
- if (result != noErr || sample_rate != format_.mSampleRate) {
- LOG(ERROR) << "Requested sample-rate: " << format_.mSampleRate
- << " must match the hardware sample-rate: " << sample_rate;
- return false;
- }
-
- // Configure buffer frame size.
- UInt32 frame_size = number_of_frames_;
-
- pa.mSelector = kAudioDevicePropertyBufferFrameSize;
- pa.mScope = kAudioDevicePropertyScopeInput;
- pa.mElement = kAudioObjectPropertyElementMaster;
- result = AudioObjectSetPropertyData(
- device_,
- &pa,
- 0,
- 0,
- sizeof(frame_size),
- &frame_size);
-
- if (result != noErr) {
- LOG(ERROR) << "Unable to set input buffer frame size: " << frame_size;
- return false;
- }
-
- pa.mScope = kAudioDevicePropertyScopeOutput;
- result = AudioObjectSetPropertyData(
- device_,
- &pa,
- 0,
- 0,
- sizeof(frame_size),
- &frame_size);
-
- if (result != noErr) {
- LOG(ERROR) << "Unable to set output buffer frame size: " << frame_size;
- return false;
- }
-
- DVLOG(1) << "Sample rate: " << sample_rate;
- DVLOG(1) << "Frame size: " << frame_size;
-
- // Determine the number of input and output channels.
- // We handle both the interleaved and non-interleaved cases.
-
- // Get input stream configuration.
- pa.mSelector = kAudioDevicePropertyStreamConfiguration;
- pa.mScope = kAudioDevicePropertyScopeInput;
- pa.mElement = kAudioObjectPropertyElementMaster;
-
- result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size);
- OSSTATUS_DCHECK(result == noErr, result);
-
- if (result == noErr && size > 0) {
- // Allocate storage.
- scoped_ptr<uint8[]> input_list_storage(new uint8[size]);
- AudioBufferList& input_list =
- *reinterpret_cast<AudioBufferList*>(input_list_storage.get());
-
- result = AudioObjectGetPropertyData(
- device_,
- &pa,
- 0,
- 0,
- &size,
- &input_list);
- OSSTATUS_DCHECK(result == noErr, result);
-
- if (result == noErr) {
- // Determine number of input channels.
- input_channels_per_frame_ = input_list.mNumberBuffers > 0 ?
- input_list.mBuffers[0].mNumberChannels : 0;
- if (input_channels_per_frame_ == 1 && input_list.mNumberBuffers > 1) {
- // Non-interleaved.
- input_channels_ = input_list.mNumberBuffers;
- } else {
- // Interleaved.
- input_channels_ = input_channels_per_frame_;
- }
- }
- }
-
- DVLOG(1) << "Input channels: " << input_channels_;
- DVLOG(1) << "Input channels per frame: " << input_channels_per_frame_;
-
- // The hardware must have at least the requested input channels.
- if (result != noErr || client_input_channels_ > input_channels_) {
- LOG(ERROR) << "AudioDevice does not support requested input channels.";
- return false;
- }
-
- // Get output stream configuration.
- pa.mSelector = kAudioDevicePropertyStreamConfiguration;
- pa.mScope = kAudioDevicePropertyScopeOutput;
- pa.mElement = kAudioObjectPropertyElementMaster;
-
- result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size);
- OSSTATUS_DCHECK(result == noErr, result);
-
- if (result == noErr && size > 0) {
- // Allocate storage.
- scoped_ptr<uint8[]> output_list_storage(new uint8[size]);
- AudioBufferList& output_list =
- *reinterpret_cast<AudioBufferList*>(output_list_storage.get());
-
- result = AudioObjectGetPropertyData(
- device_,
- &pa,
- 0,
- 0,
- &size,
- &output_list);
- OSSTATUS_DCHECK(result == noErr, result);
-
- if (result == noErr) {
- // Determine number of output channels.
- output_channels_per_frame_ = output_list.mBuffers[0].mNumberChannels;
- if (output_channels_per_frame_ == 1 && output_list.mNumberBuffers > 1) {
- // Non-interleaved.
- output_channels_ = output_list.mNumberBuffers;
- } else {
- // Interleaved.
- output_channels_ = output_channels_per_frame_;
- }
- }
- }
-
- DVLOG(1) << "Output channels: " << output_channels_;
- DVLOG(1) << "Output channels per frame: " << output_channels_per_frame_;
-
- // The hardware must have at least the requested output channels.
- if (result != noErr ||
- output_channels_ < static_cast<int>(format_.mChannelsPerFrame)) {
- LOG(ERROR) << "AudioDevice does not support requested output channels.";
- return false;
- }
-
- // Setup the I/O proc.
- result = AudioDeviceCreateIOProcID(device_, RenderProc, this, &io_proc_id_);
- if (result != noErr) {
- LOG(ERROR) << "Error creating IOProc.";
- return false;
- }
-
- return true;
-}
-
-void AudioHardwareUnifiedStream::Close() {
- DCHECK(!is_playing_);
-
- OSStatus result = AudioDeviceDestroyIOProcID(device_, io_proc_id_);
- OSSTATUS_DCHECK(result == noErr, result);
-
- io_proc_id_ = 0;
- device_ = kAudioObjectUnknown;
-
- // Inform the audio manager that we have been closed. This can cause our
- // destruction.
- manager_->ReleaseOutputStream(this);
-}
-
-void AudioHardwareUnifiedStream::Start(AudioSourceCallback* callback) {
- DCHECK(callback);
- DCHECK_NE(device_, kAudioObjectUnknown);
- DCHECK(!is_playing_);
- if (device_ == kAudioObjectUnknown || is_playing_)
- return;
-
- source_ = callback;
-
- OSStatus result = AudioDeviceStart(device_, io_proc_id_);
- OSSTATUS_DCHECK(result == noErr, result);
-
- if (result == noErr)
- is_playing_ = true;
-}
-
-void AudioHardwareUnifiedStream::Stop() {
- if (!is_playing_)
- return;
-
- if (device_ != kAudioObjectUnknown) {
- OSStatus result = AudioDeviceStop(device_, io_proc_id_);
- OSSTATUS_DCHECK(result == noErr, result);
- }
-
- is_playing_ = false;
- source_ = NULL;
-}
-
-void AudioHardwareUnifiedStream::SetVolume(double volume) {
- volume_ = static_cast<float>(volume);
- // TODO(crogers): set volume property
-}
-
-void AudioHardwareUnifiedStream::GetVolume(double* volume) {
- *volume = volume_;
-}
-
-// Pulls on our provider with optional input, asking it to render output.
-// Note to future hackers of this function: Do not add locks here because this
-// is running on a real-time thread (for low-latency).
-OSStatus AudioHardwareUnifiedStream::Render(
- AudioDeviceID device,
- const AudioTimeStamp* now,
- const AudioBufferList* input_data,
- const AudioTimeStamp* input_time,
- AudioBufferList* output_data,
- const AudioTimeStamp* output_time) {
- // Convert the input data accounting for possible interleaving.
- // TODO(crogers): it's better to simply memcpy() if source is already planar.
- if (input_channels_ >= client_input_channels_) {
- for (int channel_index = 0; channel_index < client_input_channels_;
- ++channel_index) {
- float* source;
-
- int source_channel_index = channel_index;
-
- if (input_channels_per_frame_ > 1) {
- // Interleaved.
- source = static_cast<float*>(input_data->mBuffers[0].mData) +
- source_channel_index;
- } else {
- // Non-interleaved.
- source = static_cast<float*>(
- input_data->mBuffers[source_channel_index].mData);
- }
-
- float* p = input_bus_->channel(channel_index);
- for (int i = 0; i < number_of_frames_; ++i) {
- p[i] = *source;
- source += input_channels_per_frame_;
- }
- }
- } else if (input_channels_) {
- input_bus_->Zero();
- }
-
- // Give the client optional input data and have it render the output data.
- source_->OnMoreIOData(input_bus_.get(),
- output_bus_.get(),
- AudioBuffersState(0, 0));
-
- // TODO(crogers): handle final Core Audio 5.1 layout for 5.1 audio.
-
- // Handle interleaving as necessary.
- // TODO(crogers): it's better to simply memcpy() if dest is already planar.
-
- for (int channel_index = 0;
- channel_index < static_cast<int>(format_.mChannelsPerFrame);
- ++channel_index) {
- float* dest;
-
- int dest_channel_index = channel_index;
-
- if (output_channels_per_frame_ > 1) {
- // Interleaved.
- dest = static_cast<float*>(output_data->mBuffers[0].mData) +
- dest_channel_index;
- } else {
- // Non-interleaved.
- dest = static_cast<float*>(
- output_data->mBuffers[dest_channel_index].mData);
- }
-
- float* p = output_bus_->channel(channel_index);
- for (int i = 0; i < number_of_frames_; ++i) {
- *dest = p[i];
- dest += output_channels_per_frame_;
- }
- }
-
- return noErr;
-}
-
-OSStatus AudioHardwareUnifiedStream::RenderProc(
- AudioDeviceID device,
- const AudioTimeStamp* now,
- const AudioBufferList* input_data,
- const AudioTimeStamp* input_time,
- AudioBufferList* output_data,
- const AudioTimeStamp* output_time,
- void* user_data) {
- AudioHardwareUnifiedStream* audio_output =
- static_cast<AudioHardwareUnifiedStream*>(user_data);
- DCHECK(audio_output);
- if (!audio_output)
- return -1;
-
- return audio_output->Render(
- device,
- now,
- input_data,
- input_time,
- output_data,
- output_time);
-}
-
-} // namespace media
diff --git a/chromium/media/audio/mac/audio_unified_mac.h b/chromium/media/audio/mac/audio_unified_mac.h
deleted file mode 100644
index ff090e3be1a..00000000000
--- a/chromium/media/audio/mac/audio_unified_mac.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_MAC_AUDIO_UNIFIED_MAC_H_
-#define MEDIA_AUDIO_MAC_AUDIO_UNIFIED_MAC_H_
-
-#include <CoreAudio/CoreAudio.h>
-
-#include "base/memory/scoped_ptr.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-
-namespace media {
-
-class AudioManagerMac;
-
-// Implementation of AudioOutputStream for Mac OS X using the
-// CoreAudio AudioHardware API suitable for low-latency unified audio I/O
-// when using devices which support *both* input and output
-// in the same driver. This is the case with professional
-// USB and Firewire devices.
-//
-// Please note that it's required to first get the native sample-rate of the
-// default output device and use that sample-rate when creating this object.
-class AudioHardwareUnifiedStream : public AudioOutputStream {
- public:
- // The ctor takes all the usual parameters, plus |manager| which is the
- // the audio manager who is creating this object.
- AudioHardwareUnifiedStream(AudioManagerMac* manager,
- const AudioParameters& params);
- // The dtor is typically called by the AudioManager only and it is usually
- // triggered by calling AudioOutputStream::Close().
- virtual ~AudioHardwareUnifiedStream();
-
- // Implementation of AudioOutputStream.
- virtual bool Open() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
-
- int input_channels() const { return input_channels_; }
- int output_channels() const { return output_channels_; }
-
- private:
- OSStatus Render(AudioDeviceID device,
- const AudioTimeStamp* now,
- const AudioBufferList* input_data,
- const AudioTimeStamp* input_time,
- AudioBufferList* output_data,
- const AudioTimeStamp* output_time);
-
- static OSStatus RenderProc(AudioDeviceID device,
- const AudioTimeStamp* now,
- const AudioBufferList* input_data,
- const AudioTimeStamp* input_time,
- AudioBufferList* output_data,
- const AudioTimeStamp* output_time,
- void* user_data);
-
- // Our creator, the audio manager needs to be notified when we close.
- AudioManagerMac* manager_;
-
- // Pointer to the object that will provide the audio samples.
- AudioSourceCallback* source_;
-
- // Structure that holds the stream format details such as bitrate.
- AudioStreamBasicDescription format_;
-
- // Hardware buffer size.
- int number_of_frames_;
-
- // Number of audio channels provided to the client via OnMoreIOData().
- int client_input_channels_;
-
- // Volume level from 0 to 1.
- float volume_;
-
- // Number of input and output channels queried from the hardware.
- int input_channels_;
- int output_channels_;
- int input_channels_per_frame_;
- int output_channels_per_frame_;
-
- AudioDeviceIOProcID io_proc_id_;
- AudioDeviceID device_;
- bool is_playing_;
-
- // Intermediate buffers used with call to OnMoreIOData().
- scoped_ptr<AudioBus> input_bus_;
- scoped_ptr<AudioBus> output_bus_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioHardwareUnifiedStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_MAC_AUDIO_UNIFIED_MAC_H_
diff --git a/chromium/media/audio/mock_audio_manager.cc b/chromium/media/audio/mock_audio_manager.cc
index f2074d65357..7183405a190 100644
--- a/chromium/media/audio/mock_audio_manager.cc
+++ b/chromium/media/audio/mock_audio_manager.cc
@@ -5,14 +5,14 @@
#include "media/audio/mock_audio_manager.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
#include "media/audio/audio_parameters.h"
namespace media {
-MockAudioManager::MockAudioManager(base::MessageLoopProxy* message_loop_proxy)
- : message_loop_proxy_(message_loop_proxy) {
-}
+MockAudioManager::MockAudioManager(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner)
+ : task_runner_(task_runner) {}
MockAudioManager::~MockAudioManager() {
}
@@ -34,11 +34,8 @@ void MockAudioManager::ShowAudioInputSettings() {
void MockAudioManager::GetAudioInputDeviceNames(
AudioDeviceNames* device_names) {
- DCHECK(device_names->empty());
- device_names->push_back(media::AudioDeviceName("fake_device_name_1",
- "fake_device_id_1"));
- device_names->push_back(media::AudioDeviceName("fake_device_name_2",
- "fake_device_id_2"));
+ // Do not inject fake devices here, use
+ // AudioInputDeviceManager::GetFakeDeviceNames() instead.
}
void MockAudioManager::GetAudioOutputDeviceNames(
@@ -47,16 +44,14 @@ void MockAudioManager::GetAudioOutputDeviceNames(
media::AudioOutputStream* MockAudioManager::MakeAudioOutputStream(
const media::AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
+ const std::string& device_id) {
NOTREACHED();
return NULL;
}
media::AudioOutputStream* MockAudioManager::MakeAudioOutputStreamProxy(
const media::AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
+ const std::string& device_id) {
NOTREACHED();
return NULL;
}
@@ -68,12 +63,13 @@ media::AudioInputStream* MockAudioManager::MakeAudioInputStream(
return NULL;
}
-scoped_refptr<base::MessageLoopProxy> MockAudioManager::GetMessageLoop() {
- return message_loop_proxy_;
+scoped_refptr<base::SingleThreadTaskRunner> MockAudioManager::GetTaskRunner() {
+ return task_runner_;
}
-scoped_refptr<base::MessageLoopProxy> MockAudioManager::GetWorkerLoop() {
- return message_loop_proxy_;
+scoped_refptr<base::SingleThreadTaskRunner>
+MockAudioManager::GetWorkerTaskRunner() {
+ return task_runner_;
}
void MockAudioManager::AddOutputDeviceChangeListener(
@@ -108,6 +104,4 @@ scoped_ptr<AudioLog> MockAudioManager::CreateAudioLog(
return scoped_ptr<AudioLog>();
}
-void MockAudioManager::FixWedgedAudio() {}
-
} // namespace media.
diff --git a/chromium/media/audio/mock_audio_manager.h b/chromium/media/audio/mock_audio_manager.h
index 2d71fe8493f..520205d21ba 100644
--- a/chromium/media/audio/mock_audio_manager.h
+++ b/chromium/media/audio/mock_audio_manager.h
@@ -21,7 +21,8 @@ namespace media {
// synchronization purposes).
class MockAudioManager : public media::AudioManager {
public:
- explicit MockAudioManager(base::MessageLoopProxy* message_loop_proxy);
+ explicit MockAudioManager(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner);
virtual bool HasAudioOutputDevices() OVERRIDE;
@@ -39,20 +40,19 @@ class MockAudioManager : public media::AudioManager {
virtual media::AudioOutputStream* MakeAudioOutputStream(
const media::AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual media::AudioOutputStream* MakeAudioOutputStreamProxy(
const media::AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual media::AudioInputStream* MakeAudioInputStream(
const media::AudioParameters& params,
const std::string& device_id) OVERRIDE;
- virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE;
- virtual scoped_refptr<base::MessageLoopProxy> GetWorkerLoop() OVERRIDE;
+ virtual scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() OVERRIDE;
+ virtual scoped_refptr<base::SingleThreadTaskRunner> GetWorkerTaskRunner()
+ OVERRIDE;
virtual void AddOutputDeviceChangeListener(
AudioDeviceListener* listener) OVERRIDE;
@@ -70,13 +70,11 @@ class MockAudioManager : public media::AudioManager {
virtual scoped_ptr<AudioLog> CreateAudioLog(
AudioLogFactory::AudioComponent component) OVERRIDE;
- virtual void FixWedgedAudio() OVERRIDE;
-
protected:
virtual ~MockAudioManager();
private:
- scoped_refptr<base::MessageLoopProxy> message_loop_proxy_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
DISALLOW_COPY_AND_ASSIGN(MockAudioManager);
};
diff --git a/chromium/media/audio/mock_audio_source_callback.cc b/chromium/media/audio/mock_audio_source_callback.cc
new file mode 100644
index 00000000000..da2be1cfa41
--- /dev/null
+++ b/chromium/media/audio/mock_audio_source_callback.cc
@@ -0,0 +1,12 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/mock_audio_source_callback.h"
+
+namespace media {
+
+MockAudioSourceCallback::MockAudioSourceCallback() {}
+MockAudioSourceCallback::~MockAudioSourceCallback() {}
+
+} // namespace media
diff --git a/chromium/media/audio/mock_audio_source_callback.h b/chromium/media/audio/mock_audio_source_callback.h
new file mode 100644
index 00000000000..d24ce44c5f8
--- /dev/null
+++ b/chromium/media/audio/mock_audio_source_callback.h
@@ -0,0 +1,28 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_MOCK_AUDIO_SOURCE_CALLBACK_H_
+#define MEDIA_AUDIO_MOCK_AUDIO_SOURCE_CALLBACK_H_
+
+#include "media/audio/audio_io.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+
+class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
+ public:
+ MockAudioSourceCallback();
+ virtual ~MockAudioSourceCallback();
+
+ MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
+ AudioBuffersState buffers_state));
+ MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockAudioSourceCallback);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_MOCK_AUDIO_SOURCE_CALLBACK_H_
diff --git a/chromium/media/audio/null_audio_sink.cc b/chromium/media/audio/null_audio_sink.cc
index 607d7d861e2..dfd07fcee6a 100644
--- a/chromium/media/audio/null_audio_sink.cc
+++ b/chromium/media/audio/null_audio_sink.cc
@@ -5,18 +5,18 @@
#include "media/audio/null_audio_sink.h"
#include "base/bind.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
#include "media/audio/fake_audio_consumer.h"
#include "media/base/audio_hash.h"
namespace media {
NullAudioSink::NullAudioSink(
- const scoped_refptr<base::MessageLoopProxy>& message_loop)
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner)
: initialized_(false),
playing_(false),
callback_(NULL),
- message_loop_(message_loop) {
+ task_runner_(task_runner) {
}
NullAudioSink::~NullAudioSink() {}
@@ -24,18 +24,18 @@ NullAudioSink::~NullAudioSink() {}
void NullAudioSink::Initialize(const AudioParameters& params,
RenderCallback* callback) {
DCHECK(!initialized_);
- fake_consumer_.reset(new FakeAudioConsumer(message_loop_, params));
+ fake_consumer_.reset(new FakeAudioConsumer(task_runner_, params));
callback_ = callback;
initialized_ = true;
}
void NullAudioSink::Start() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(!playing_);
}
void NullAudioSink::Stop() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
// Stop may be called at any time, so we have to check before stopping.
if (fake_consumer_)
@@ -43,7 +43,7 @@ void NullAudioSink::Stop() {
}
void NullAudioSink::Play() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(initialized_);
if (playing_)
@@ -55,7 +55,7 @@ void NullAudioSink::Play() {
}
void NullAudioSink::Pause() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
if (!playing_)
return;
@@ -70,7 +70,7 @@ bool NullAudioSink::SetVolume(double volume) {
}
void NullAudioSink::CallRender(AudioBus* audio_bus) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
int frames_received = callback_->Render(audio_bus, 0);
if (!audio_hash_ || frames_received <= 0)
diff --git a/chromium/media/audio/null_audio_sink.h b/chromium/media/audio/null_audio_sink.h
index 072414606ff..c28a2cfac06 100644
--- a/chromium/media/audio/null_audio_sink.h
+++ b/chromium/media/audio/null_audio_sink.h
@@ -11,7 +11,7 @@
#include "media/base/audio_renderer_sink.h"
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
@@ -22,7 +22,7 @@ class FakeAudioConsumer;
class MEDIA_EXPORT NullAudioSink
: NON_EXPORTED_BASE(public AudioRendererSink) {
public:
- NullAudioSink(const scoped_refptr<base::MessageLoopProxy>& message_loop);
+ NullAudioSink(const scoped_refptr<base::SingleThreadTaskRunner>& task_runner);
// AudioRendererSink implementation.
virtual void Initialize(const AudioParameters& params,
@@ -53,7 +53,7 @@ class MEDIA_EXPORT NullAudioSink
// Controls whether or not a running hash is computed for audio frames.
scoped_ptr<AudioHash> audio_hash_;
- scoped_refptr<base::MessageLoopProxy> message_loop_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
scoped_ptr<FakeAudioConsumer> fake_consumer_;
DISALLOW_COPY_AND_ASSIGN(NullAudioSink);
diff --git a/chromium/media/audio/openbsd/audio_manager_openbsd.cc b/chromium/media/audio/openbsd/audio_manager_openbsd.cc
index b378b02d0cd..ec482d8b7cd 100644
--- a/chromium/media/audio/openbsd/audio_manager_openbsd.cc
+++ b/chromium/media/audio/openbsd/audio_manager_openbsd.cc
@@ -59,9 +59,13 @@ AudioParameters AudioManagerOpenBSD::GetInputStreamParameters(
const std::string& device_id) {
static const int kDefaultInputBufferSize = 1024;
+ int user_buffer_size = GetUserBufferSize();
+ int buffer_size = user_buffer_size ?
+ user_buffer_size : kDefaultInputBufferSize;
+
return AudioParameters(
AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
- kDefaultSampleRate, 16, kDefaultInputBufferSize);
+ kDefaultSampleRate, 16, buffer_size);
}
AudioManagerOpenBSD::AudioManagerOpenBSD(AudioLogFactory* audio_log_factory)
@@ -92,8 +96,7 @@ AudioOutputStream* AudioManagerOpenBSD::MakeLinearOutputStream(
AudioOutputStream* AudioManagerOpenBSD::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
+ const std::string& device_id) {
DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format);
return MakeOutputStream(params);
diff --git a/chromium/media/audio/openbsd/audio_manager_openbsd.h b/chromium/media/audio/openbsd/audio_manager_openbsd.h
index 113f5915ae1..53b7dfb725f 100644
--- a/chromium/media/audio/openbsd/audio_manager_openbsd.h
+++ b/chromium/media/audio/openbsd/audio_manager_openbsd.h
@@ -27,8 +27,7 @@ class MEDIA_EXPORT AudioManagerOpenBSD : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
diff --git a/chromium/media/audio/pulse/audio_manager_pulse.cc b/chromium/media/audio/pulse/audio_manager_pulse.cc
index d369d135bef..412f2a421b0 100644
--- a/chromium/media/audio/pulse/audio_manager_pulse.cc
+++ b/chromium/media/audio/pulse/audio_manager_pulse.cc
@@ -10,11 +10,12 @@
#include "base/logging.h"
#include "base/nix/xdg_util.h"
#include "base/stl_util.h"
+#if defined(USE_ALSA)
#include "media/audio/alsa/audio_manager_alsa.h"
+#endif
#include "media/audio/audio_parameters.h"
#include "media/audio/pulse/pulse_input.h"
#include "media/audio/pulse/pulse_output.h"
-#include "media/audio/pulse/pulse_unified.h"
#include "media/audio/pulse/pulse_util.h"
#include "media/base/channel_layout.h"
@@ -34,6 +35,13 @@ using pulse::WaitForOperationCompletion;
// Maximum number of output streams that can be open simultaneously.
static const int kMaxOutputStreams = 50;
+// Define bounds for the output buffer size.
+static const int kMinimumOutputBufferSize = 512;
+static const int kMaximumOutputBufferSize = 8192;
+
+// Default input buffer size.
+static const int kDefaultInputBufferSize = 1024;
+
static const base::FilePath::CharType kPulseLib[] =
FILE_PATH_LITERAL("libpulse.so.0");
@@ -78,7 +86,9 @@ bool AudioManagerPulse::HasAudioInputDevices() {
}
void AudioManagerPulse::ShowAudioInputSettings() {
+#if defined(USE_ALSA)
AudioManagerAlsa::ShowLinuxAudioInputSettings();
+#endif
}
void AudioManagerPulse::GetAudioDeviceNames(
@@ -118,27 +128,29 @@ void AudioManagerPulse::GetAudioOutputDeviceNames(
AudioParameters AudioManagerPulse::GetInputStreamParameters(
const std::string& device_id) {
- static const int kDefaultInputBufferSize = 1024;
+ int user_buffer_size = GetUserBufferSize();
+ int buffer_size = user_buffer_size ?
+ user_buffer_size : kDefaultInputBufferSize;
// TODO(xians): add support for querying native channel layout for pulse.
return AudioParameters(
AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
- GetNativeSampleRate(), 16, kDefaultInputBufferSize);
+ GetNativeSampleRate(), 16, buffer_size);
}
AudioOutputStream* AudioManagerPulse::MakeLinearOutputStream(
const AudioParameters& params) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
- return MakeOutputStream(params, std::string());
+ return MakeOutputStream(params, AudioManagerBase::kDefaultDeviceId);
}
AudioOutputStream* AudioManagerPulse::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
- DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
+ const std::string& device_id) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
- return MakeOutputStream(params, input_device_id);
+ return MakeOutputStream(
+ params,
+ device_id.empty() ? AudioManagerBase::kDefaultDeviceId : device_id);
}
AudioInputStream* AudioManagerPulse::MakeLinearInputStream(
@@ -157,11 +169,10 @@ AudioParameters AudioManagerPulse::GetPreferredOutputStreamParameters(
const std::string& output_device_id,
const AudioParameters& input_params) {
// TODO(tommi): Support |output_device_id|.
- DLOG_IF(ERROR, !output_device_id.empty()) << "Not implemented!";
- static const int kDefaultOutputBufferSize = 512;
+ VLOG_IF(0, !output_device_id.empty()) << "Not implemented!";
ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
- int buffer_size = kDefaultOutputBufferSize;
+ int buffer_size = kMinimumOutputBufferSize;
int bits_per_sample = 16;
int input_channels = 0;
int sample_rate;
@@ -169,7 +180,9 @@ AudioParameters AudioManagerPulse::GetPreferredOutputStreamParameters(
bits_per_sample = input_params.bits_per_sample();
channel_layout = input_params.channel_layout();
input_channels = input_params.input_channels();
- buffer_size = std::min(buffer_size, input_params.frames_per_buffer());
+ buffer_size =
+ std::min(kMaximumOutputBufferSize,
+ std::max(buffer_size, input_params.frames_per_buffer()));
sample_rate = input_params.sample_rate();
} else {
sample_rate = GetNativeSampleRate();
@@ -185,12 +198,10 @@ AudioParameters AudioManagerPulse::GetPreferredOutputStreamParameters(
}
AudioOutputStream* AudioManagerPulse::MakeOutputStream(
- const AudioParameters& params, const std::string& input_device_id) {
- if (params.input_channels()) {
- return new PulseAudioUnifiedStream(params, input_device_id, this);
- }
-
- return new PulseAudioOutputStream(params, this);
+ const AudioParameters& params,
+ const std::string& device_id) {
+ DCHECK(!device_id.empty());
+ return new PulseAudioOutputStream(params, device_id, this);
}
AudioInputStream* AudioManagerPulse::MakeInputStream(
@@ -219,7 +230,7 @@ bool AudioManagerPulse::Init() {
// Check if the pulse library is avialbale.
paths[kModulePulse].push_back(kPulseLib);
if (!InitializeStubs(paths)) {
- DLOG(WARNING) << "Failed on loading the Pulse library and symbols";
+ VLOG(1) << "Failed on loading the Pulse library and symbols";
return false;
}
#endif // defined(DLOPEN_PULSEAUDIO)
@@ -247,8 +258,8 @@ bool AudioManagerPulse::Init() {
pa_context_set_state_callback(input_context_, &pulse::ContextStateCallback,
input_mainloop_);
if (pa_context_connect(input_context_, NULL, PA_CONTEXT_NOAUTOSPAWN, NULL)) {
- DLOG(ERROR) << "Failed to connect to the context. Error: "
- << pa_strerror(pa_context_errno(input_context_));
+ VLOG(0) << "Failed to connect to the context. Error: "
+ << pa_strerror(pa_context_errno(input_context_));
return false;
}
diff --git a/chromium/media/audio/pulse/audio_manager_pulse.h b/chromium/media/audio/pulse/audio_manager_pulse.h
index 45fb8cb56fa..150ea51a3e9 100644
--- a/chromium/media/audio/pulse/audio_manager_pulse.h
+++ b/chromium/media/audio/pulse/audio_manager_pulse.h
@@ -37,8 +37,7 @@ class MEDIA_EXPORT AudioManagerPulse : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
@@ -71,7 +70,7 @@ class MEDIA_EXPORT AudioManagerPulse : public AudioManagerBase {
// Called by MakeLinearOutputStream and MakeLowLatencyOutputStream.
AudioOutputStream* MakeOutputStream(const AudioParameters& params,
- const std::string& input_device_id);
+ const std::string& device_id);
// Called by MakeLinearInputStream and MakeLowLatencyInputStream.
AudioInputStream* MakeInputStream(const AudioParameters& params,
diff --git a/chromium/media/audio/pulse/pulse.sigs b/chromium/media/audio/pulse/pulse.sigs
index b5d927c754c..8d2dab70c60 100644
--- a/chromium/media/audio/pulse/pulse.sigs
+++ b/chromium/media/audio/pulse/pulse.sigs
@@ -1,10 +1,10 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#------------------------------------------------
-# Functions from pulse used in media code.
-#------------------------------------------------
+//------------------------------------------------
+// Functions from pulse used in media code.
+//------------------------------------------------
pa_mainloop_api* pa_threaded_mainloop_get_api(pa_threaded_mainloop* m);
void pa_threaded_mainloop_free(pa_threaded_mainloop* m);
pa_threaded_mainloop* pa_threaded_mainloop_new();
diff --git a/chromium/media/audio/pulse/pulse_input.cc b/chromium/media/audio/pulse/pulse_input.cc
index 54dfc1e05ab..d5cb94ece22 100644
--- a/chromium/media/audio/pulse/pulse_input.cc
+++ b/chromium/media/audio/pulse/pulse_input.cc
@@ -34,6 +34,8 @@ PulseAudioInputStream::PulseAudioInputStream(AudioManagerPulse* audio_manager,
context_state_changed_(false) {
DCHECK(mainloop);
DCHECK(context);
+ CHECK(params_.IsValid());
+ audio_bus_ = AudioBus::Create(params_);
}
PulseAudioInputStream::~PulseAudioInputStream() {
@@ -105,6 +107,7 @@ void PulseAudioInputStream::Stop() {
operation = pa_stream_cork(handle_, 1, &pulse::StreamSuccessCallback,
pa_mainloop_);
WaitForOperationCompletion(pa_mainloop_, operation);
+ callback_ = NULL;
}
void PulseAudioInputStream::Close() {
@@ -125,9 +128,6 @@ void PulseAudioInputStream::Close() {
}
}
- if (callback_)
- callback_->OnClose(this);
-
// Signal to the manager that we're closed and can be removed.
// This should be the last call in the function as it deletes "this".
audio_manager_->ReleaseInputStream(this);
@@ -274,8 +274,11 @@ void PulseAudioInputStream::ReadData() {
int packet_size = params_.GetBytesPerBuffer();
while (buffer_->forward_bytes() >= packet_size) {
buffer_->Read(audio_data_buffer_.get(), packet_size);
- callback_->OnData(this, audio_data_buffer_.get(), packet_size,
- hardware_delay, normalized_volume);
+ audio_bus_->FromInterleaved(audio_data_buffer_.get(),
+ audio_bus_->frames(),
+ params_.bits_per_sample() / 8);
+ callback_->OnData(
+ this, audio_bus_.get(), hardware_delay, normalized_volume);
if (buffer_->forward_bytes() < packet_size)
break;
diff --git a/chromium/media/audio/pulse/pulse_input.h b/chromium/media/audio/pulse/pulse_input.h
index 7566eacf10b..7e64bb296a8 100644
--- a/chromium/media/audio/pulse/pulse_input.h
+++ b/chromium/media/audio/pulse/pulse_input.h
@@ -75,6 +75,8 @@ class PulseAudioInputStream : public AgcAudioStream<AudioInputStream> {
// Flag indicating the state of the context has been changed.
bool context_state_changed_;
+ scoped_ptr<AudioBus> audio_bus_;
+
base::ThreadChecker thread_checker_;
DISALLOW_COPY_AND_ASSIGN(PulseAudioInputStream);
diff --git a/chromium/media/audio/pulse/pulse_output.cc b/chromium/media/audio/pulse/pulse_output.cc
index c40d4f65051..19fc47b8be5 100644
--- a/chromium/media/audio/pulse/pulse_output.cc
+++ b/chromium/media/audio/pulse/pulse_output.cc
@@ -6,7 +6,7 @@
#include <pulse/pulseaudio.h>
-#include "base/message_loop/message_loop.h"
+#include "base/single_thread_task_runner.h"
#include "media/audio/audio_manager_base.h"
#include "media/audio/audio_parameters.h"
#include "media/audio/pulse/pulse_util.h"
@@ -39,16 +39,16 @@ void PulseAudioOutputStream::StreamRequestCallback(pa_stream* s, size_t len,
}
PulseAudioOutputStream::PulseAudioOutputStream(const AudioParameters& params,
+ const std::string& device_id,
AudioManagerBase* manager)
: params_(params),
+ device_id_(device_id),
manager_(manager),
pa_context_(NULL),
pa_mainloop_(NULL),
pa_stream_(NULL),
volume_(1.0f),
source_callback_(NULL) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
-
CHECK(params_.IsValid());
audio_bus_ = AudioBus::Create(params_);
}
@@ -62,9 +62,9 @@ PulseAudioOutputStream::~PulseAudioOutputStream() {
}
bool PulseAudioOutputStream::Open() {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
return pulse::CreateOutputStream(&pa_mainloop_, &pa_context_, &pa_stream_,
- params_, &StreamNotifyCallback,
+ params_, device_id_, &StreamNotifyCallback,
&StreamRequestCallback, this);
}
@@ -107,7 +107,7 @@ void PulseAudioOutputStream::Reset() {
}
void PulseAudioOutputStream::Close() {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
Reset();
@@ -157,7 +157,7 @@ void PulseAudioOutputStream::FulfillWriteRequest(size_t requested_bytes) {
}
void PulseAudioOutputStream::Start(AudioSourceCallback* callback) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
CHECK(callback);
CHECK(pa_stream_);
@@ -179,7 +179,7 @@ void PulseAudioOutputStream::Start(AudioSourceCallback* callback) {
}
void PulseAudioOutputStream::Stop() {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
// Cork (pause) the stream. Waiting for the main loop lock will ensure
// outstanding callbacks have completed.
@@ -202,13 +202,13 @@ void PulseAudioOutputStream::Stop() {
}
void PulseAudioOutputStream::SetVolume(double volume) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
volume_ = static_cast<float>(volume);
}
void PulseAudioOutputStream::GetVolume(double* volume) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
*volume = volume_;
}
diff --git a/chromium/media/audio/pulse/pulse_output.h b/chromium/media/audio/pulse/pulse_output.h
index 583cce7e5bd..e1c00455563 100644
--- a/chromium/media/audio/pulse/pulse_output.h
+++ b/chromium/media/audio/pulse/pulse_output.h
@@ -20,7 +20,10 @@
#ifndef MEDIA_AUDIO_PULSE_PULSE_OUTPUT_H_
#define MEDIA_AUDIO_PULSE_PULSE_OUTPUT_H_
+#include <string>
+
#include "base/memory/scoped_ptr.h"
+#include "base/threading/thread_checker.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_parameters.h"
@@ -35,6 +38,7 @@ class AudioManagerBase;
class PulseAudioOutputStream : public AudioOutputStream {
public:
PulseAudioOutputStream(const AudioParameters& params,
+ const std::string& device_id,
AudioManagerBase* manager);
virtual ~PulseAudioOutputStream();
@@ -66,6 +70,9 @@ class PulseAudioOutputStream : public AudioOutputStream {
// AudioParameters from the constructor.
const AudioParameters params_;
+ // The device ID for the device to open.
+ const std::string device_id_;
+
// Audio manager that created us. Used to report that we've closed.
AudioManagerBase* manager_;
@@ -84,6 +91,8 @@ class PulseAudioOutputStream : public AudioOutputStream {
// Container for retrieving data from AudioSourceCallback::OnMoreData().
scoped_ptr<AudioBus> audio_bus_;
+ base::ThreadChecker thread_checker_;
+
DISALLOW_COPY_AND_ASSIGN(PulseAudioOutputStream);
};
diff --git a/chromium/media/audio/pulse/pulse_unified.cc b/chromium/media/audio/pulse/pulse_unified.cc
deleted file mode 100644
index c68a797469f..00000000000
--- a/chromium/media/audio/pulse/pulse_unified.cc
+++ /dev/null
@@ -1,292 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/pulse/pulse_unified.h"
-
-#include "base/message_loop/message_loop.h"
-#include "base/time/time.h"
-#include "media/audio/audio_manager_base.h"
-#include "media/audio/audio_parameters.h"
-#include "media/audio/pulse/pulse_util.h"
-#include "media/base/seekable_buffer.h"
-
-namespace media {
-
-using pulse::AutoPulseLock;
-using pulse::WaitForOperationCompletion;
-
-static const int kFifoSizeInPackets = 10;
-
-// static, pa_stream_notify_cb
-void PulseAudioUnifiedStream::StreamNotifyCallback(pa_stream* s,
- void* user_data) {
- PulseAudioUnifiedStream* stream =
- static_cast<PulseAudioUnifiedStream*>(user_data);
-
- // Forward unexpected failures to the AudioSourceCallback if available. All
- // these variables are only modified under pa_threaded_mainloop_lock() so this
- // should be thread safe.
- if (s && stream->source_callback_ &&
- pa_stream_get_state(s) == PA_STREAM_FAILED) {
- stream->source_callback_->OnError(stream);
- }
-
- pa_threaded_mainloop_signal(stream->pa_mainloop_, 0);
-}
-
-// static, used by pa_stream_set_read_callback.
-void PulseAudioUnifiedStream::ReadCallback(pa_stream* handle, size_t length,
- void* user_data) {
- static_cast<PulseAudioUnifiedStream*>(user_data)->ReadData();
-}
-
-PulseAudioUnifiedStream::PulseAudioUnifiedStream(
- const AudioParameters& params,
- const std::string& input_device_id,
- AudioManagerBase* manager)
- : params_(params),
- input_device_id_(input_device_id),
- manager_(manager),
- pa_context_(NULL),
- pa_mainloop_(NULL),
- input_stream_(NULL),
- output_stream_(NULL),
- volume_(1.0f),
- source_callback_(NULL) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
- CHECK(params_.IsValid());
- input_bus_ = AudioBus::Create(params_);
- output_bus_ = AudioBus::Create(params_);
-}
-
-PulseAudioUnifiedStream::~PulseAudioUnifiedStream() {
- // All internal structures should already have been freed in Close(), which
- // calls AudioManagerBase::ReleaseOutputStream() which deletes this object.
- DCHECK(!input_stream_);
- DCHECK(!output_stream_);
- DCHECK(!pa_context_);
- DCHECK(!pa_mainloop_);
-}
-
-bool PulseAudioUnifiedStream::Open() {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
- // Prepare the recording buffers for the callbacks.
- fifo_.reset(new media::SeekableBuffer(
- 0, kFifoSizeInPackets * params_.GetBytesPerBuffer()));
- input_data_buffer_.reset(new uint8[params_.GetBytesPerBuffer()]);
-
- if (!pulse::CreateOutputStream(&pa_mainloop_, &pa_context_, &output_stream_,
- params_, &StreamNotifyCallback, NULL, this))
- return false;
-
- if (!pulse::CreateInputStream(pa_mainloop_, pa_context_, &input_stream_,
- params_, input_device_id_,
- &StreamNotifyCallback, this))
- return false;
-
- DCHECK(pa_mainloop_);
- DCHECK(pa_context_);
- DCHECK(input_stream_);
- DCHECK(output_stream_);
- return true;
-}
-
-void PulseAudioUnifiedStream::Reset() {
- if (!pa_mainloop_) {
- DCHECK(!input_stream_);
- DCHECK(!output_stream_);
- DCHECK(!pa_context_);
- return;
- }
-
- {
- AutoPulseLock auto_lock(pa_mainloop_);
-
- // Close the input stream.
- if (input_stream_) {
- // Disable all the callbacks before disconnecting.
- pa_stream_set_state_callback(input_stream_, NULL, NULL);
- pa_stream_flush(input_stream_, NULL, NULL);
- pa_stream_disconnect(input_stream_);
-
- // Release PulseAudio structures.
- pa_stream_unref(input_stream_);
- input_stream_ = NULL;
- }
-
- // Close the ouput stream.
- if (output_stream_) {
- // Release PulseAudio output stream structures.
- pa_stream_set_state_callback(output_stream_, NULL, NULL);
- pa_stream_disconnect(output_stream_);
- pa_stream_unref(output_stream_);
- output_stream_ = NULL;
- }
-
- if (pa_context_) {
- pa_context_disconnect(pa_context_);
- pa_context_set_state_callback(pa_context_, NULL, NULL);
- pa_context_unref(pa_context_);
- pa_context_ = NULL;
- }
- }
-
- pa_threaded_mainloop_stop(pa_mainloop_);
- pa_threaded_mainloop_free(pa_mainloop_);
- pa_mainloop_ = NULL;
-}
-
-void PulseAudioUnifiedStream::Close() {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
- Reset();
-
- // Signal to the manager that we're closed and can be removed.
- // This should be the last call in the function as it deletes "this".
- manager_->ReleaseOutputStream(this);
-}
-
-void PulseAudioUnifiedStream::WriteData(size_t requested_bytes) {
- CHECK_EQ(requested_bytes, static_cast<size_t>(params_.GetBytesPerBuffer()));
-
- void* buffer = NULL;
- int frames_filled = 0;
- if (source_callback_) {
- CHECK_GE(pa_stream_begin_write(
- output_stream_, &buffer, &requested_bytes), 0);
- uint32 hardware_delay = pulse::GetHardwareLatencyInBytes(
- output_stream_, params_.sample_rate(),
- params_.GetBytesPerFrame());
- fifo_->Read(input_data_buffer_.get(), requested_bytes);
- input_bus_->FromInterleaved(
- input_data_buffer_.get(), params_.frames_per_buffer(), 2);
-
- frames_filled = source_callback_->OnMoreIOData(
- input_bus_.get(),
- output_bus_.get(),
- AudioBuffersState(0, hardware_delay));
- }
-
- // Zero the unfilled data so it plays back as silence.
- if (frames_filled < output_bus_->frames()) {
- output_bus_->ZeroFramesPartial(
- frames_filled, output_bus_->frames() - frames_filled);
- }
-
- // Note: If this ever changes to output raw float the data must be clipped
- // and sanitized since it may come from an untrusted source such as NaCl.
- output_bus_->Scale(volume_);
- output_bus_->ToInterleaved(
- output_bus_->frames(), params_.bits_per_sample() / 8, buffer);
-
- if (pa_stream_write(output_stream_, buffer, requested_bytes, NULL, 0LL,
- PA_SEEK_RELATIVE) < 0) {
- if (source_callback_) {
- source_callback_->OnError(this);
- }
- }
-}
-
-void PulseAudioUnifiedStream::ReadData() {
- do {
- size_t length = 0;
- const void* data = NULL;
- pa_stream_peek(input_stream_, &data, &length);
- if (!data || length == 0)
- break;
-
- fifo_->Append(reinterpret_cast<const uint8*>(data), length);
-
- // Deliver the recording data to the renderer and drive the playout.
- int packet_size = params_.GetBytesPerBuffer();
- while (fifo_->forward_bytes() >= packet_size) {
- WriteData(packet_size);
- }
-
- // Checks if we still have data.
- pa_stream_drop(input_stream_);
- } while (pa_stream_readable_size(input_stream_) > 0);
-
- pa_threaded_mainloop_signal(pa_mainloop_, 0);
-}
-
-void PulseAudioUnifiedStream::Start(AudioSourceCallback* callback) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
- CHECK(callback);
- CHECK(input_stream_);
- CHECK(output_stream_);
- AutoPulseLock auto_lock(pa_mainloop_);
-
- // Ensure the context and stream are ready.
- if (pa_context_get_state(pa_context_) != PA_CONTEXT_READY &&
- pa_stream_get_state(output_stream_) != PA_STREAM_READY &&
- pa_stream_get_state(input_stream_) != PA_STREAM_READY) {
- callback->OnError(this);
- return;
- }
-
- source_callback_ = callback;
-
- fifo_->Clear();
-
- // Uncork (resume) the input stream.
- pa_stream_set_read_callback(input_stream_, &ReadCallback, this);
- pa_stream_readable_size(input_stream_);
- pa_operation* operation = pa_stream_cork(input_stream_, 0, NULL, NULL);
- WaitForOperationCompletion(pa_mainloop_, operation);
-
- // Uncork (resume) the output stream.
- // We use the recording stream to drive the playback, so we do not need to
- // register the write callback using pa_stream_set_write_callback().
- operation = pa_stream_cork(output_stream_, 0,
- &pulse::StreamSuccessCallback, pa_mainloop_);
- WaitForOperationCompletion(pa_mainloop_, operation);
-}
-
-void PulseAudioUnifiedStream::Stop() {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
-
- // Cork (pause) the stream. Waiting for the main loop lock will ensure
- // outstanding callbacks have completed.
- AutoPulseLock auto_lock(pa_mainloop_);
-
- // Set |source_callback_| to NULL so all FulfillWriteRequest() calls which may
- // occur while waiting on the flush and cork exit immediately.
- source_callback_ = NULL;
-
- // Set the read callback to NULL before flushing the stream, otherwise it
- // will cause deadlock on the operation.
- pa_stream_set_read_callback(input_stream_, NULL, NULL);
- pa_operation* operation = pa_stream_flush(
- input_stream_, &pulse::StreamSuccessCallback, pa_mainloop_);
- WaitForOperationCompletion(pa_mainloop_, operation);
-
- operation = pa_stream_cork(input_stream_, 1, &pulse::StreamSuccessCallback,
- pa_mainloop_);
- WaitForOperationCompletion(pa_mainloop_, operation);
-
- // Flush the stream prior to cork, doing so after will cause hangs. Write
- // callbacks are suspended while inside pa_threaded_mainloop_lock() so this
- // is all thread safe.
- operation = pa_stream_flush(
- output_stream_, &pulse::StreamSuccessCallback, pa_mainloop_);
- WaitForOperationCompletion(pa_mainloop_, operation);
-
- operation = pa_stream_cork(output_stream_, 1, &pulse::StreamSuccessCallback,
- pa_mainloop_);
- WaitForOperationCompletion(pa_mainloop_, operation);
-}
-
-void PulseAudioUnifiedStream::SetVolume(double volume) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
-
- volume_ = static_cast<float>(volume);
-}
-
-void PulseAudioUnifiedStream::GetVolume(double* volume) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
-
- *volume = volume_;
-}
-
-} // namespace media
diff --git a/chromium/media/audio/pulse/pulse_unified.h b/chromium/media/audio/pulse/pulse_unified.h
deleted file mode 100644
index a800d099a10..00000000000
--- a/chromium/media/audio/pulse/pulse_unified.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_PULSE_PULSE_UNIFIED_H_
-#define MEDIA_AUDIO_PULSE_PULSE_UNIFIED_H_
-
-#include <pulse/pulseaudio.h>
-#include <string>
-
-#include "base/memory/scoped_ptr.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/audio_fifo.h"
-
-namespace media {
-
-class AudioManagerBase;
-class SeekableBuffer;
-
-class PulseAudioUnifiedStream : public AudioOutputStream {
- public:
- PulseAudioUnifiedStream(const AudioParameters& params,
- const std::string& input_device_id,
- AudioManagerBase* manager);
-
- virtual ~PulseAudioUnifiedStream();
-
- // Implementation of PulseAudioUnifiedStream.
- virtual bool Open() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
-
- private:
- // Called by PulseAudio when |pa_stream_| change state. If an unexpected
- // failure state change happens and |source_callback_| is set
- // this method will forward the error via OnError().
- static void StreamNotifyCallback(pa_stream* s, void* user_data);
-
- // Called by PulseAudio recording stream when it has data.
- static void ReadCallback(pa_stream* s, size_t length, void* user_data);
-
- // Helpers for ReadCallback() to read and write data.
- void WriteData(size_t requested_bytes);
- void ReadData();
-
- // Close() helper function to free internal structs.
- void Reset();
-
- // AudioParameters from the constructor.
- const AudioParameters params_;
-
- // Device unique ID of the input device.
- const std::string input_device_id_;
-
- // Audio manager that created us. Used to report that we've closed.
- AudioManagerBase* manager_;
-
- // PulseAudio API structs.
- pa_context* pa_context_;
- pa_threaded_mainloop* pa_mainloop_;
- pa_stream* input_stream_;
- pa_stream* output_stream_;
-
- // Float representation of volume from 0.0 to 1.0.
- float volume_;
-
- // Callback to audio data source. Must only be modified while holding a lock
- // on |pa_mainloop_| via pa_threaded_mainloop_lock().
- AudioSourceCallback* source_callback_;
-
- scoped_ptr<AudioBus> input_bus_;
- scoped_ptr<AudioBus> output_bus_;
-
- // Used for input to output buffering.
- scoped_ptr<media::SeekableBuffer> fifo_;
-
- // Temporary storage for recorded data. It gets a packet of data from
- // |fifo_| and deliver the data to OnMoreIOData() callback.
- scoped_ptr<uint8[]> input_data_buffer_;
-
- DISALLOW_COPY_AND_ASSIGN(PulseAudioUnifiedStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_PULSE_PULSE_UNIFIED_H_
diff --git a/chromium/media/audio/pulse/pulse_util.cc b/chromium/media/audio/pulse/pulse_util.cc
index 96831cfabe3..66f52c2c700 100644
--- a/chromium/media/audio/pulse/pulse_util.cc
+++ b/chromium/media/audio/pulse/pulse_util.cc
@@ -41,8 +41,6 @@ pa_channel_position ChromiumToPAChannelPosition(Channels channel) {
return PA_CHANNEL_POSITION_SIDE_LEFT;
case SIDE_RIGHT:
return PA_CHANNEL_POSITION_SIDE_RIGHT;
- case CHANNELS_MAX:
- return PA_CHANNEL_POSITION_INVALID;
default:
NOTREACHED() << "Invalid channel: " << channel;
return PA_CHANNEL_POSITION_INVALID;
@@ -86,7 +84,7 @@ pa_channel_map ChannelLayoutToPAChannelMap(ChannelLayout channel_layout) {
pa_channel_map_init(&channel_map);
channel_map.channels = ChannelLayoutToChannelCount(channel_layout);
- for (Channels ch = LEFT; ch < CHANNELS_MAX;
+ for (Channels ch = LEFT; ch <= CHANNELS_MAX;
ch = static_cast<Channels>(ch + 1)) {
int channel_index = ChannelOrder(channel_layout, ch);
if (channel_index < 0)
@@ -205,6 +203,7 @@ bool CreateOutputStream(pa_threaded_mainloop** mainloop,
pa_context** context,
pa_stream** stream,
const AudioParameters& params,
+ const std::string& device_id,
pa_stream_notify_cb_t stream_callback,
pa_stream_request_cb_t write_callback,
void* user_data) {
@@ -287,12 +286,16 @@ bool CreateOutputStream(pa_threaded_mainloop** mainloop,
// and error.
RETURN_ON_FAILURE(
pa_stream_connect_playback(
- *stream, NULL, &pa_buffer_attributes,
+ *stream,
+ device_id == AudioManagerBase::kDefaultDeviceId ?
+ NULL : device_id.c_str(),
+ &pa_buffer_attributes,
static_cast<pa_stream_flags_t>(
PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_ADJUST_LATENCY |
PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_NOT_MONOTONIC |
PA_STREAM_START_CORKED),
- NULL, NULL) == 0,
+ NULL,
+ NULL) == 0,
"pa_stream_connect_playback FAILED ");
// Wait for the stream to be ready.
diff --git a/chromium/media/audio/pulse/pulse_util.h b/chromium/media/audio/pulse/pulse_util.h
index da0cb0f42d7..791d6ade83a 100644
--- a/chromium/media/audio/pulse/pulse_util.h
+++ b/chromium/media/audio/pulse/pulse_util.h
@@ -69,6 +69,7 @@ bool CreateOutputStream(pa_threaded_mainloop** mainloop,
pa_context** context,
pa_stream** stream,
const AudioParameters& params,
+ const std::string& device_id,
pa_stream_notify_cb_t stream_callback,
pa_stream_request_cb_t write_callback,
void* user_data);
diff --git a/chromium/media/audio/sample_rates.cc b/chromium/media/audio/sample_rates.cc
index a082a938ab8..7fa62a79ed1 100644
--- a/chromium/media/audio/sample_rates.cc
+++ b/chromium/media/audio/sample_rates.cc
@@ -4,23 +4,48 @@
#include "media/audio/sample_rates.h"
+#include "base/logging.h"
+
namespace media {
-AudioSampleRate AsAudioSampleRate(int sample_rate) {
+bool ToAudioSampleRate(int sample_rate, AudioSampleRate* asr) {
+ DCHECK(asr);
switch (sample_rate) {
- case 8000: return k8000Hz;
- case 16000: return k16000Hz;
- case 32000: return k32000Hz;
- case 48000: return k48000Hz;
- case 96000: return k96000Hz;
- case 11025: return k11025Hz;
- case 22050: return k22050Hz;
- case 44100: return k44100Hz;
- case 88200: return k88200Hz;
- case 176400: return k176400Hz;
- case 192000: return k192000Hz;
+ case 8000:
+ *asr = k8000Hz;
+ return true;
+ case 16000:
+ *asr = k16000Hz;
+ return true;
+ case 32000:
+ *asr = k32000Hz;
+ return true;
+ case 48000:
+ *asr = k48000Hz;
+ return true;
+ case 96000:
+ *asr = k96000Hz;
+ return true;
+ case 11025:
+ *asr = k11025Hz;
+ return true;
+ case 22050:
+ *asr = k22050Hz;
+ return true;
+ case 44100:
+ *asr = k44100Hz;
+ return true;
+ case 88200:
+ *asr = k88200Hz;
+ return true;
+ case 176400:
+ *asr = k176400Hz;
+ return true;
+ case 192000:
+ *asr = k192000Hz;
+ return true;
}
- return kUnexpectedAudioSampleRate;
+ return false;
}
} // namespace media
diff --git a/chromium/media/audio/sample_rates.h b/chromium/media/audio/sample_rates.h
index 7c29e548b34..482ec0fdc8b 100644
--- a/chromium/media/audio/sample_rates.h
+++ b/chromium/media/audio/sample_rates.h
@@ -23,12 +23,13 @@ enum AudioSampleRate {
k88200Hz = 8,
k176400Hz = 9,
k192000Hz = 10,
- kUnexpectedAudioSampleRate // Must always be last!
+ // Must always equal the largest value ever reported:
+ kAudioSampleRateMax = k192000Hz,
};
// Helper method to convert integral values to their respective enum values,
-// or kUnexpectedAudioSampleRate if no match exists.
-MEDIA_EXPORT AudioSampleRate AsAudioSampleRate(int sample_rate);
+// returns false for unexpected sample rates.
+MEDIA_EXPORT bool ToAudioSampleRate(int sample_rate, AudioSampleRate* asr);
} // namespace media
diff --git a/chromium/media/audio/scoped_loop_observer.h b/chromium/media/audio/scoped_loop_observer.h
deleted file mode 100644
index 7aaab542225..00000000000
--- a/chromium/media/audio/scoped_loop_observer.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_SCOPED_LOOP_OBSERVER_H_
-#define MEDIA_AUDIO_SCOPED_LOOP_OBSERVER_H_
-
-#include "base/memory/ref_counted.h"
-#include "base/message_loop/message_loop.h"
-#include "base/message_loop/message_loop_proxy.h"
-
-namespace base {
-class WaitableEvent;
-}
-
-namespace media {
-
-// A common base class for AudioOutputDevice and AudioInputDevice that manages
-// a message loop pointer and monitors it for destruction. If the object goes
-// out of scope before the message loop, the object will automatically remove
-// itself from the message loop's list of destruction observers.
-// NOTE: The class that inherits from this class must implement the
-// WillDestroyCurrentMessageLoop virtual method from DestructionObserver.
-class ScopedLoopObserver
- : public base::MessageLoop::DestructionObserver {
- public:
- explicit ScopedLoopObserver(
- const scoped_refptr<base::MessageLoopProxy>& message_loop);
-
- protected:
- virtual ~ScopedLoopObserver();
-
- // Accessor to the loop that's used by the derived class.
- const scoped_refptr<base::MessageLoopProxy>& message_loop() { return loop_; }
-
- private:
- // Call to add or remove ourselves from the list of destruction observers for
- // the message loop.
- void ObserveLoopDestruction(bool enable, base::WaitableEvent* done);
-
- // A pointer to the message loop's proxy. In case the loop gets destroyed
- // before this object goes out of scope, PostTask etc will fail but not crash.
- scoped_refptr<base::MessageLoopProxy> loop_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedLoopObserver);
-};
-
-} // namespace media.
-
-#endif // MEDIA_AUDIO_SCOPED_LOOP_OBSERVER_H_
diff --git a/chromium/media/audio/scoped_loop_observer.cc b/chromium/media/audio/scoped_task_runner_observer.cc
index 01187ec8f99..9f4eac28511 100644
--- a/chromium/media/audio/scoped_loop_observer.cc
+++ b/chromium/media/audio/scoped_task_runner_observer.cc
@@ -2,27 +2,28 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/audio/scoped_loop_observer.h"
+#include "media/audio/scoped_task_runner_observer.h"
#include "base/bind.h"
#include "base/synchronization/waitable_event.h"
namespace media {
-ScopedLoopObserver::ScopedLoopObserver(
- const scoped_refptr<base::MessageLoopProxy>& loop)
- : loop_(loop) {
+ScopedTaskRunnerObserver::ScopedTaskRunnerObserver(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner)
+ : task_runner_(task_runner) {
ObserveLoopDestruction(true, NULL);
}
-ScopedLoopObserver::~ScopedLoopObserver() {
+ScopedTaskRunnerObserver::~ScopedTaskRunnerObserver() {
ObserveLoopDestruction(false, NULL);
}
-void ScopedLoopObserver::ObserveLoopDestruction(bool enable,
- base::WaitableEvent* done) {
+void ScopedTaskRunnerObserver::ObserveLoopDestruction(
+ bool enable,
+ base::WaitableEvent* done) {
// Note: |done| may be NULL.
- if (loop_->BelongsToCurrentThread()) {
+ if (task_runner_->BelongsToCurrentThread()) {
base::MessageLoop* loop = base::MessageLoop::current();
if (enable) {
loop->AddDestructionObserver(this);
@@ -31,8 +32,8 @@ void ScopedLoopObserver::ObserveLoopDestruction(bool enable,
}
} else {
base::WaitableEvent event(false, false);
- if (loop_->PostTask(FROM_HERE,
- base::Bind(&ScopedLoopObserver::ObserveLoopDestruction,
+ if (task_runner_->PostTask(FROM_HERE,
+ base::Bind(&ScopedTaskRunnerObserver::ObserveLoopDestruction,
base::Unretained(this), enable, &event))) {
event.Wait();
} else {
diff --git a/chromium/media/audio/scoped_task_runner_observer.h b/chromium/media/audio/scoped_task_runner_observer.h
new file mode 100644
index 00000000000..ce9adf96713
--- /dev/null
+++ b/chromium/media/audio/scoped_task_runner_observer.h
@@ -0,0 +1,52 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_SCOPED_TASK_RUNNER_OBSERVER_H_
+#define MEDIA_AUDIO_SCOPED_TASK_RUNNER_OBSERVER_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+class WaitableEvent;
+}
+
+namespace media {
+
+// A common base class for AudioOutputDevice and AudioInputDevice that manages
+// a task runner and monitors it for destruction. If the object goes out of
+// scope before the task runner, the object will automatically remove itself
+// from the task runner's list of destruction observers.
+// NOTE: The class that inherits from this class must implement the
+// WillDestroyCurrentMessageLoop virtual method from DestructionObserver.
+class ScopedTaskRunnerObserver
+ : public base::MessageLoop::DestructionObserver {
+ public:
+ explicit ScopedTaskRunnerObserver(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner);
+
+ protected:
+ virtual ~ScopedTaskRunnerObserver();
+
+ // Accessor to the loop that's used by the derived class.
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner() {
+ return task_runner_;
+ }
+
+ private:
+ // Call to add or remove ourselves from the list of destruction observers for
+ // the message loop.
+ void ObserveLoopDestruction(bool enable, base::WaitableEvent* done);
+
+ // A pointer to the task runner. In case it gets destroyed before this object
+ // goes out of scope, PostTask() etc will fail but not crash.
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedTaskRunnerObserver);
+};
+
+} // namespace media.
+
+#endif // MEDIA_AUDIO_SCOPED_TASK_RUNNER_OBSERVER_H_
diff --git a/chromium/media/audio/simple_sources.cc b/chromium/media/audio/simple_sources.cc
index 275413a232c..039029e5388 100644
--- a/chromium/media/audio/simple_sources.cc
+++ b/chromium/media/audio/simple_sources.cc
@@ -48,12 +48,6 @@ int SineWaveAudioSource::OnMoreData(AudioBus* audio_bus,
return max_frames;
}
-int SineWaveAudioSource::OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState audio_buffers) {
- return OnMoreData(dest, audio_buffers);
-}
-
void SineWaveAudioSource::OnError(AudioOutputStream* stream) {
errors_++;
}
diff --git a/chromium/media/audio/simple_sources.h b/chromium/media/audio/simple_sources.h
index 449f875b5d6..6303386ead1 100644
--- a/chromium/media/audio/simple_sources.h
+++ b/chromium/media/audio/simple_sources.h
@@ -29,9 +29,6 @@ class MEDIA_EXPORT SineWaveAudioSource
// Implementation of AudioSourceCallback.
virtual int OnMoreData(AudioBus* audio_bus,
AudioBuffersState audio_buffers) OVERRIDE;
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState audio_buffers) OVERRIDE;
virtual void OnError(AudioOutputStream* stream) OVERRIDE;
// The number of OnMoreData()+OnMoreIOData() and OnError() calls respectively.
diff --git a/chromium/media/audio/sounds/audio_stream_handler.cc b/chromium/media/audio/sounds/audio_stream_handler.cc
index 08608ac4187..645fcb366a3 100644
--- a/chromium/media/audio/sounds/audio_stream_handler.cc
+++ b/chromium/media/audio/sounds/audio_stream_handler.cc
@@ -6,8 +6,11 @@
#include <string>
+#include "base/cancelable_callback.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "base/time/time.h"
#include "media/audio/audio_manager.h"
#include "media/audio/audio_manager_base.h"
#include "media/base/channel_layout.h"
@@ -22,6 +25,9 @@ const double kOutputVolumePercent = 0.8;
// The number of frames each OnMoreData() call will request.
const int kDefaultFrameCount = 1024;
+// Keep alive timeout for audio stream.
+const int kKeepAliveMs = 1500;
+
AudioStreamHandler::TestObserver* g_observer_for_testing = NULL;
AudioOutputStream::AudioSourceCallback* g_audio_source_for_testing = NULL;
@@ -30,36 +36,53 @@ AudioOutputStream::AudioSourceCallback* g_audio_source_for_testing = NULL;
class AudioStreamHandler::AudioStreamContainer
: public AudioOutputStream::AudioSourceCallback {
public:
- AudioStreamContainer(const WavAudioHandler& wav_audio,
- const AudioParameters& params)
- : stream_(NULL),
- wav_audio_(wav_audio),
- params_(params),
- cursor_(0) {
- }
+ AudioStreamContainer(const WavAudioHandler& wav_audio)
+ : started_(false),
+ stream_(NULL),
+ cursor_(0),
+ delayed_stop_posted_(false),
+ wav_audio_(wav_audio) {}
virtual ~AudioStreamContainer() {
- DCHECK(AudioManager::Get()->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(AudioManager::Get()->GetTaskRunner()->BelongsToCurrentThread());
}
void Play() {
- DCHECK(AudioManager::Get()->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(AudioManager::Get()->GetTaskRunner()->BelongsToCurrentThread());
if (!stream_) {
- stream_ = AudioManager::Get()->MakeAudioOutputStreamProxy(params_,
- std::string(),
+ const AudioParameters& p = wav_audio_.params();
+ const AudioParameters params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ p.channel_layout(),
+ p.sample_rate(),
+ p.bits_per_sample(),
+ kDefaultFrameCount);
+ stream_ = AudioManager::Get()->MakeAudioOutputStreamProxy(params,
std::string());
if (!stream_ || !stream_->Open()) {
LOG(ERROR) << "Failed to open an output stream.";
return;
}
stream_->SetVolume(kOutputVolumePercent);
- } else {
- // TODO (ygorshenin@): implement smart stream rewind.
- stream_->Stop();
}
- cursor_ = 0;
+ {
+ base::AutoLock al(state_lock_);
+
+ delayed_stop_posted_ = false;
+ stop_closure_.Reset(base::Bind(&AudioStreamContainer::StopStream,
+ base::Unretained(this)));
+
+ if (started_) {
+ if (wav_audio_.AtEnd(cursor_))
+ cursor_ = 0;
+ return;
+ }
+
+ cursor_ = 0;
+ }
+
+ started_ = true;
if (g_audio_source_for_testing)
stream_->Start(g_audio_source_for_testing);
else
@@ -70,15 +93,12 @@ class AudioStreamHandler::AudioStreamContainer
}
void Stop() {
- DCHECK(AudioManager::Get()->GetMessageLoop()->BelongsToCurrentThread());
- if (!stream_)
- return;
- stream_->Stop();
- stream_->Close();
+ DCHECK(AudioManager::Get()->GetTaskRunner()->BelongsToCurrentThread());
+ StopStream();
+ if (stream_)
+ stream_->Close();
stream_ = NULL;
-
- if (g_observer_for_testing)
- g_observer_for_testing->OnStop(cursor_);
+ stop_closure_.Cancel();
}
private:
@@ -86,35 +106,51 @@ class AudioStreamHandler::AudioStreamContainer
// Following methods could be called from *ANY* thread.
virtual int OnMoreData(AudioBus* dest,
AudioBuffersState /* state */) OVERRIDE {
+ base::AutoLock al(state_lock_);
size_t bytes_written = 0;
+
if (wav_audio_.AtEnd(cursor_) ||
!wav_audio_.CopyTo(dest, cursor_, &bytes_written)) {
- AudioManager::Get()->GetMessageLoop()->PostTask(
+ if (delayed_stop_posted_)
+ return 0;
+ delayed_stop_posted_ = true;
+ AudioManager::Get()->GetTaskRunner()->PostDelayedTask(
FROM_HERE,
- base::Bind(&AudioStreamContainer::Stop, base::Unretained(this)));
+ stop_closure_.callback(),
+ base::TimeDelta::FromMilliseconds(kKeepAliveMs));
return 0;
}
cursor_ += bytes_written;
-
return dest->frames();
}
- virtual int OnMoreIOData(AudioBus* /* source */,
- AudioBus* dest,
- AudioBuffersState state) OVERRIDE {
- return OnMoreData(dest, state);
- }
-
virtual void OnError(AudioOutputStream* /* stream */) OVERRIDE {
LOG(ERROR) << "Error during system sound reproduction.";
}
- AudioOutputStream* stream_;
+ void StopStream() {
+ DCHECK(AudioManager::Get()->GetTaskRunner()->BelongsToCurrentThread());
- const WavAudioHandler wav_audio_;
- const AudioParameters params_;
+ if (stream_ && started_) {
+ // Do not hold the |state_lock_| while stopping the output stream.
+ stream_->Stop();
+ if (g_observer_for_testing)
+ g_observer_for_testing->OnStop(cursor_);
+ }
+
+ started_ = false;
+ }
+ // Must only be accessed on the AudioManager::GetTaskRunner() thread.
+ bool started_;
+ AudioOutputStream* stream_;
+
+ // All variables below must be accessed under |state_lock_| when |started_|.
+ base::Lock state_lock_;
size_t cursor_;
+ bool delayed_stop_posted_;
+ const WavAudioHandler wav_audio_;
+ base::CancelableClosure stop_closure_;
DISALLOW_COPY_AND_ASSIGN(AudioStreamContainer);
};
@@ -127,26 +163,21 @@ AudioStreamHandler::AudioStreamHandler(const base::StringPiece& wav_data)
LOG(ERROR) << "Can't get access to audio manager.";
return;
}
- AudioParameters params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
- GuessChannelLayout(wav_audio_.num_channels()),
- wav_audio_.sample_rate(),
- wav_audio_.bits_per_sample(),
- kDefaultFrameCount);
- if (!params.IsValid()) {
+ if (!wav_audio_.params().IsValid()) {
LOG(ERROR) << "Audio params are invalid.";
return;
}
- stream_.reset(new AudioStreamContainer(wav_audio_, params));
+ stream_.reset(new AudioStreamContainer(wav_audio_));
initialized_ = true;
}
AudioStreamHandler::~AudioStreamHandler() {
DCHECK(CalledOnValidThread());
- AudioManager::Get()->GetMessageLoop()->PostTask(
+ AudioManager::Get()->GetTaskRunner()->PostTask(
FROM_HERE,
base::Bind(&AudioStreamContainer::Stop, base::Unretained(stream_.get())));
- AudioManager::Get()->GetMessageLoop()->DeleteSoon(FROM_HERE,
- stream_.release());
+ AudioManager::Get()->GetTaskRunner()->DeleteSoon(FROM_HERE,
+ stream_.release());
}
bool AudioStreamHandler::IsInitialized() const {
@@ -160,7 +191,7 @@ bool AudioStreamHandler::Play() {
if (!IsInitialized())
return false;
- AudioManager::Get()->GetMessageLoop()->PostTask(
+ AudioManager::Get()->GetTaskRunner()->PostTask(
FROM_HERE,
base::Bind(base::IgnoreResult(&AudioStreamContainer::Play),
base::Unretained(stream_.get())));
@@ -169,7 +200,7 @@ bool AudioStreamHandler::Play() {
void AudioStreamHandler::Stop() {
DCHECK(CalledOnValidThread());
- AudioManager::Get()->GetMessageLoop()->PostTask(
+ AudioManager::Get()->GetTaskRunner()->PostTask(
FROM_HERE,
base::Bind(&AudioStreamContainer::Stop, base::Unretained(stream_.get())));
}
diff --git a/chromium/media/audio/sounds/audio_stream_handler.h b/chromium/media/audio/sounds/audio_stream_handler.h
index 7c63a24f034..f814aaef599 100644
--- a/chromium/media/audio/sounds/audio_stream_handler.h
+++ b/chromium/media/audio/sounds/audio_stream_handler.h
@@ -42,10 +42,12 @@ class MEDIA_EXPORT AudioStreamHandler : public base::NonThreadSafe {
// Returns true iff AudioStreamHandler is correctly initialized;
bool IsInitialized() const;
- // Stops any previous playback if it's still not completed and
- // starts new playback. Volume level will be set according to
- // current settings and won't be changed during playback. Returns
- // true iff new playback was successfully started.
+ // Plays sound. Volume level will be set according to current settings
+ // and won't be changed during playback. Returns true iff new playback
+ // was successfully started.
+ //
+ // NOTE: if current playback isn't at end of stream, playback request
+ // is dropped, but true is returned.
bool Play();
// Stops current playback.
diff --git a/chromium/media/audio/sounds/audio_stream_handler_unittest.cc b/chromium/media/audio/sounds/audio_stream_handler_unittest.cc
index 50bc301c38a..acf472a0e48 100644
--- a/chromium/media/audio/sounds/audio_stream_handler_unittest.cc
+++ b/chromium/media/audio/sounds/audio_stream_handler_unittest.cc
@@ -74,7 +74,7 @@ TEST_F(AudioStreamHandlerTest, Play) {
ASSERT_EQ(4, observer.cursor());
}
-TEST_F(AudioStreamHandlerTest, Rewind) {
+TEST_F(AudioStreamHandlerTest, ConsecutivePlayRequests) {
base::RunLoop run_loop;
TestObserver observer(run_loop.QuitClosure());
SineWaveAudioSource source(CHANNEL_LAYOUT_STEREO, 200.0, 8000);
@@ -89,19 +89,19 @@ TEST_F(AudioStreamHandlerTest, Rewind) {
FROM_HERE,
base::Bind(base::IgnoreResult(&AudioStreamHandler::Play),
base::Unretained(audio_stream_handler())),
- base::TimeDelta::FromSeconds(3));
+ base::TimeDelta::FromSeconds(1));
base::MessageLoop::current()->PostDelayedTask(
FROM_HERE,
base::Bind(&AudioStreamHandler::Stop,
base::Unretained(audio_stream_handler())),
- base::TimeDelta::FromSeconds(6));
+ base::TimeDelta::FromSeconds(2));
run_loop.Run();
SetObserverForTesting(NULL);
SetAudioSourceForTesting(NULL);
- ASSERT_EQ(2, observer.num_play_requests());
+ ASSERT_EQ(1, observer.num_play_requests());
ASSERT_EQ(1, observer.num_stop_requests());
}
diff --git a/chromium/media/audio/sounds/sounds_manager.cc b/chromium/media/audio/sounds/sounds_manager.cc
index e93dc6588dd..e80843685cc 100644
--- a/chromium/media/audio/sounds/sounds_manager.cc
+++ b/chromium/media/audio/sounds/sounds_manager.cc
@@ -4,21 +4,20 @@
#include "media/audio/sounds/sounds_manager.h"
-#include "base/command_line.h"
#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/memory/linked_ptr.h"
#include "base/memory/ref_counted.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
#include "media/audio/audio_manager.h"
#include "media/audio/sounds/audio_stream_handler.h"
-#include "media/base/media_switches.h"
namespace media {
namespace {
SoundsManager* g_instance = NULL;
+bool g_initialized_for_testing = false;
// SoundsManagerImpl ---------------------------------------------------
@@ -35,13 +34,14 @@ class SoundsManagerImpl : public SoundsManager {
private:
base::hash_map<SoundKey, linked_ptr<AudioStreamHandler> > handlers_;
- scoped_refptr<base::MessageLoopProxy> message_loop_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
DISALLOW_COPY_AND_ASSIGN(SoundsManagerImpl);
};
SoundsManagerImpl::SoundsManagerImpl()
- : message_loop_(AudioManager::Get()->GetMessageLoop()) {}
+ : task_runner_(AudioManager::Get()->GetTaskRunner()) {
+}
SoundsManagerImpl::~SoundsManagerImpl() { DCHECK(CalledOnValidThread()); }
@@ -74,46 +74,7 @@ base::TimeDelta SoundsManagerImpl::GetDuration(SoundKey key) {
return base::TimeDelta();
}
const WavAudioHandler& wav_audio = handlers_[key]->wav_audio_handler();
- const int64 size = wav_audio.size();
- const int64 rate = wav_audio.byte_rate();
- return base::TimeDelta::FromMicroseconds(size * 1000000 / rate);
-}
-
-// SoundsManagerStub ---------------------------------------------------
-
-class SoundsManagerStub : public SoundsManager {
- public:
- SoundsManagerStub();
- virtual ~SoundsManagerStub();
-
- // SoundsManager implementation:
- virtual bool Initialize(SoundKey key,
- const base::StringPiece& data) OVERRIDE;
- virtual bool Play(SoundKey key) OVERRIDE;
- virtual base::TimeDelta GetDuration(SoundKey key) OVERRIDE;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SoundsManagerStub);
-};
-
-SoundsManagerStub::SoundsManagerStub() {}
-
-SoundsManagerStub::~SoundsManagerStub() { DCHECK(CalledOnValidThread()); }
-
-bool SoundsManagerStub::Initialize(SoundKey /* key */,
- const base::StringPiece& /* data */) {
- DCHECK(CalledOnValidThread());
- return false;
-}
-
-bool SoundsManagerStub::Play(SoundKey /* key */) {
- DCHECK(CalledOnValidThread());
- return false;
-}
-
-base::TimeDelta SoundsManagerStub::GetDuration(SoundKey /* key */) {
- DCHECK(CalledOnValidThread());
- return base::TimeDelta();
+ return wav_audio.params().GetBufferDuration();
}
} // namespace
@@ -124,13 +85,11 @@ SoundsManager::~SoundsManager() { DCHECK(CalledOnValidThread()); }
// static
void SoundsManager::Create() {
- CHECK(!g_instance) << "SoundsManager::Create() is called twice";
- const bool enabled = !CommandLine::ForCurrentProcess()->HasSwitch(
- ::switches::kDisableSystemSoundsManager);
- if (enabled)
- g_instance = new SoundsManagerImpl();
- else
- g_instance = new SoundsManagerStub();
+ CHECK(!g_instance || g_initialized_for_testing)
+ << "SoundsManager::Create() is called twice";
+ if (g_initialized_for_testing)
+ return;
+ g_instance = new SoundsManagerImpl();
}
// static
@@ -147,4 +106,12 @@ SoundsManager* SoundsManager::Get() {
return g_instance;
}
+// static
+void SoundsManager::InitializeForTesting(SoundsManager* manager) {
+ CHECK(!g_instance) << "SoundsManager is already initialized.";
+ CHECK(manager);
+ g_instance = manager;
+ g_initialized_for_testing = true;
+}
+
} // namespace media
diff --git a/chromium/media/audio/sounds/sounds_manager.h b/chromium/media/audio/sounds/sounds_manager.h
index 7ff6aafffdc..71184da3522 100644
--- a/chromium/media/audio/sounds/sounds_manager.h
+++ b/chromium/media/audio/sounds/sounds_manager.h
@@ -29,6 +29,10 @@ class MEDIA_EXPORT SoundsManager : public base::NonThreadSafe {
// Returns a pointer to a singleton instance of the SoundsManager.
static SoundsManager* Get();
+ // Initializes sounds manager for testing. The |manager| will be owned
+ // by the internal pointer and will be deleted by Shutdown().
+ static void InitializeForTesting(SoundsManager* manager);
+
// Initializes SoundsManager with the wav data for the system
// sounds. Returns true if SoundsManager was successfully
// initialized.
diff --git a/chromium/media/audio/sounds/sounds_manager_unittest.cc b/chromium/media/audio/sounds/sounds_manager_unittest.cc
index 5aa3694e838..78f564ec8b9 100644
--- a/chromium/media/audio/sounds/sounds_manager_unittest.cc
+++ b/chromium/media/audio/sounds/sounds_manager_unittest.cc
@@ -54,7 +54,7 @@ TEST_F(SoundsManagerTest, Play) {
ASSERT_TRUE(SoundsManager::Get()->Initialize(
kTestAudioKey,
base::StringPiece(kTestAudioData, arraysize(kTestAudioData))));
- ASSERT_EQ(41,
+ ASSERT_EQ(20,
SoundsManager::Get()->GetDuration(kTestAudioKey).InMicroseconds());
ASSERT_TRUE(SoundsManager::Get()->Play(kTestAudioKey));
run_loop.Run();
diff --git a/chromium/media/audio/sounds/wav_audio_handler.cc b/chromium/media/audio/sounds/wav_audio_handler.cc
index 20eab8be437..b87baa8fd3f 100644
--- a/chromium/media/audio/sounds/wav_audio_handler.cc
+++ b/chromium/media/audio/sounds/wav_audio_handler.cc
@@ -33,14 +33,14 @@ const size_t kFmtChunkMinimumSize = 16;
const size_t kAudioFormatOffset = 0;
const size_t kChannelOffset = 2;
const size_t kSampleRateOffset = 4;
-const size_t kByteRateOffset = 8;
const size_t kBitsPerSampleOffset = 14;
// Some constants for audio format.
const int kAudioFormatPCM = 1;
// Reads an integer from |data| with |offset|.
-template<typename T> T ReadInt(const base::StringPiece& data, size_t offset) {
+template <typename T>
+T ReadInt(const base::StringPiece& data, size_t offset) {
CHECK_LE(offset + sizeof(T), data.size());
T result;
memcpy(&result, data.data() + offset, sizeof(T));
@@ -57,7 +57,6 @@ namespace media {
WavAudioHandler::WavAudioHandler(const base::StringPiece& wav_data)
: num_channels_(0),
sample_rate_(0),
- byte_rate_(0),
bits_per_sample_(0) {
CHECK_LE(kWavFileHeaderSize, wav_data.size()) << "wav data is too small";
CHECK(wav_data.starts_with(kChunkId) &&
@@ -72,11 +71,17 @@ WavAudioHandler::WavAudioHandler(const base::StringPiece& wav_data)
CHECK_LE(0, length) << "can't parse wav sub-chunk";
offset += length;
}
-}
-WavAudioHandler::~WavAudioHandler() {
+ const int frame_count = data_.size() * 8 / num_channels_ / bits_per_sample_;
+ params_ = AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ GuessChannelLayout(num_channels_),
+ sample_rate_,
+ bits_per_sample_,
+ frame_count);
}
+WavAudioHandler::~WavAudioHandler() {}
+
bool WavAudioHandler::AtEnd(size_t cursor) const {
return data_.size() <= cursor;
}
@@ -86,18 +91,20 @@ bool WavAudioHandler::CopyTo(AudioBus* bus,
size_t* bytes_written) const {
if (!bus)
return false;
- if (bus->channels() != num_channels_) {
- LOG(ERROR) << "Number of channel mismatch.";
+ if (bus->channels() != params_.channels()) {
+ DVLOG(1) << "Number of channel mismatch.";
return false;
}
if (AtEnd(cursor)) {
bus->Zero();
return true;
}
- const int remaining_frames = (data_.size() - cursor) / bytes_per_frame_;
+ const int remaining_frames =
+ (data_.size() - cursor) / params_.GetBytesPerFrame();
const int frames = std::min(bus->frames(), remaining_frames);
- bus->FromInterleaved(data_.data() + cursor, frames, bytes_per_sample_);
- *bytes_written = frames * bytes_per_frame_;
+ bus->FromInterleaved(data_.data() + cursor, frames,
+ params_.bits_per_sample() / 8);
+ *bytes_written = frames * params_.GetBytesPerFrame();
bus->ZeroFramesPartial(frames, bus->frames() - frames);
return true;
}
@@ -113,23 +120,20 @@ int WavAudioHandler::ParseSubChunk(const base::StringPiece& data) {
if (!ParseDataChunk(data.substr(kChunkHeaderSize, chunk_length)))
return -1;
} else {
- LOG(ERROR) << "Unknown data chunk: " << data.substr(0, 4) << ".";
+ DVLOG(1) << "Unknown data chunk: " << data.substr(0, 4) << ".";
}
return chunk_length + kChunkHeaderSize;
}
bool WavAudioHandler::ParseFmtChunk(const base::StringPiece& data) {
if (data.size() < kFmtChunkMinimumSize) {
- LOG(ERROR) << "Data size " << data.size() << " is too short.";
+ DLOG(ERROR) << "Data size " << data.size() << " is too short.";
return false;
}
DCHECK_EQ(ReadInt<uint16>(data, kAudioFormatOffset), kAudioFormatPCM);
num_channels_ = ReadInt<uint16>(data, kChannelOffset);
sample_rate_ = ReadInt<uint32>(data, kSampleRateOffset);
- byte_rate_ = ReadInt<uint32>(data, kByteRateOffset);
bits_per_sample_ = ReadInt<uint16>(data, kBitsPerSampleOffset);
- bytes_per_sample_ = bits_per_sample_ >> 3;
- bytes_per_frame_ = num_channels_ * bytes_per_sample_;
return true;
}
diff --git a/chromium/media/audio/sounds/wav_audio_handler.h b/chromium/media/audio/sounds/wav_audio_handler.h
index a2c3e023650..82b5cc5f842 100644
--- a/chromium/media/audio/sounds/wav_audio_handler.h
+++ b/chromium/media/audio/sounds/wav_audio_handler.h
@@ -6,6 +6,8 @@
#define MEDIA_AUDIO_SOUNDS_WAV_AUDIO_HANDLER_H_
#include "base/strings/string_piece.h"
+#include "base/time/time.h"
+#include "media/audio/audio_parameters.h"
#include "media/base/media_export.h"
namespace media {
@@ -27,11 +29,8 @@ class MEDIA_EXPORT WavAudioHandler {
// |bytes_written|. |bytes_written| should not be NULL.
bool CopyTo(AudioBus* bus, size_t cursor, size_t* bytes_written) const;
- int size() const { return data_.size(); }
- uint16 num_channels() const { return num_channels_; }
- uint32 sample_rate() const { return sample_rate_; }
- uint32 byte_rate() const { return byte_rate_; }
- uint16 bits_per_sample() const { return bits_per_sample_; }
+ const AudioParameters& params() const { return params_; }
+ const base::StringPiece& data() const { return data_; }
private:
// Parses a chunk of wav format data. Returns the length of the chunk.
@@ -46,12 +45,11 @@ class MEDIA_EXPORT WavAudioHandler {
// Data part of the |wav_data_|.
base::StringPiece data_;
+ AudioParameters params_;
+
uint16 num_channels_;
uint32 sample_rate_;
- uint32 byte_rate_;
uint16 bits_per_sample_;
- int bytes_per_sample_;
- int bytes_per_frame_;
};
} // namespace media
diff --git a/chromium/media/audio/sounds/wav_audio_handler_unittest.cc b/chromium/media/audio/sounds/wav_audio_handler_unittest.cc
index a7f8728be35..6098b9399e0 100644
--- a/chromium/media/audio/sounds/wav_audio_handler_unittest.cc
+++ b/chromium/media/audio/sounds/wav_audio_handler_unittest.cc
@@ -16,18 +16,22 @@ namespace media {
TEST(WavAudioHandlerTest, SampleDataTest) {
WavAudioHandler handler(base::StringPiece(kTestAudioData,
arraysize(kTestAudioData)));
- ASSERT_EQ(static_cast<uint16>(2), handler.num_channels());
- ASSERT_EQ(static_cast<uint16>(16), handler.bits_per_sample());
- ASSERT_EQ(static_cast<uint32>(48000), handler.sample_rate());
- ASSERT_EQ(static_cast<uint32>(96000), handler.byte_rate());
+ const AudioParameters& params = handler.params();
+ ASSERT_EQ(2, params.channels());
+ ASSERT_EQ(16, params.bits_per_sample());
+ ASSERT_EQ(48000, params.sample_rate());
+ ASSERT_EQ(192000, params.GetBytesPerSecond());
+
+ ASSERT_EQ(4U, handler.data().size());
+ const char kData[] = "\x01\x00\x01\x00";
+ ASSERT_EQ(base::StringPiece(kData, arraysize(kData) - 1), handler.data());
- ASSERT_EQ(4, handler.size());
scoped_ptr<AudioBus> bus = AudioBus::Create(
- handler.num_channels(),
- handler.size() / handler.num_channels());
+ params.channels(), handler.data().size() / params.channels());
+
size_t bytes_written;
ASSERT_TRUE(handler.CopyTo(bus.get(), 0, &bytes_written));
- ASSERT_EQ(static_cast<size_t>(handler.size()), bytes_written);
+ ASSERT_EQ(static_cast<size_t>(handler.data().size()), bytes_written);
}
} // namespace media
diff --git a/chromium/media/audio/test_audio_input_controller_factory.cc b/chromium/media/audio/test_audio_input_controller_factory.cc
index 3aeb7773366..4490dc9ac84 100644
--- a/chromium/media/audio/test_audio_input_controller_factory.cc
+++ b/chromium/media/audio/test_audio_input_controller_factory.cc
@@ -18,7 +18,7 @@ TestAudioInputController::TestAudioInputController(
audio_parameters_(audio_parameters),
factory_(factory),
event_handler_(event_handler) {
- message_loop_ = audio_manager->GetMessageLoop();
+ task_runner_ = audio_manager->GetTaskRunner();
}
TestAudioInputController::~TestAudioInputController() {
@@ -32,7 +32,7 @@ void TestAudioInputController::Record() {
}
void TestAudioInputController::Close(const base::Closure& closed_task) {
- message_loop_->PostTask(FROM_HERE, closed_task);
+ task_runner_->PostTask(FROM_HERE, closed_task);
if (factory_->delegate_)
factory_->delegate_->TestAudioControllerClosed(this);
}
diff --git a/chromium/media/audio/virtual_audio_input_stream.cc b/chromium/media/audio/virtual_audio_input_stream.cc
index 9c4e7a1f16f..f660b9c9521 100644
--- a/chromium/media/audio/virtual_audio_input_stream.cc
+++ b/chromium/media/audio/virtual_audio_input_stream.cc
@@ -8,8 +8,7 @@
#include <utility>
#include "base/bind.h"
-#include "base/message_loop/message_loop.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
#include "media/audio/virtual_audio_output_stream.h"
namespace media {
@@ -50,18 +49,18 @@ class LoopbackAudioConverter : public AudioConverter::InputCallback {
VirtualAudioInputStream::VirtualAudioInputStream(
const AudioParameters& params,
- const scoped_refptr<base::MessageLoopProxy>& worker_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& worker_task_runner,
const AfterCloseCallback& after_close_cb)
- : worker_loop_(worker_loop),
+ : worker_task_runner_(worker_task_runner),
after_close_cb_(after_close_cb),
callback_(NULL),
buffer_(new uint8[params.GetBytesPerBuffer()]),
params_(params),
mixer_(params_, params_, false),
num_attached_output_streams_(0),
- fake_consumer_(worker_loop_, params_) {
+ fake_consumer_(worker_task_runner_, params_) {
DCHECK(params_.IsValid());
- DCHECK(worker_loop_.get());
+ DCHECK(worker_task_runner_.get());
// VAIS can be constructed on any thread, but will DCHECK that all
// AudioInputStream methods are called from the same thread.
@@ -97,6 +96,7 @@ void VirtualAudioInputStream::Start(AudioInputCallback* callback) {
void VirtualAudioInputStream::Stop() {
DCHECK(thread_checker_.CalledOnValidThread());
fake_consumer_.Stop();
+ callback_ = NULL;
}
void VirtualAudioInputStream::AddOutputStream(
@@ -133,31 +133,19 @@ void VirtualAudioInputStream::RemoveOutputStream(
}
void VirtualAudioInputStream::PumpAudio(AudioBus* audio_bus) {
- DCHECK(worker_loop_->BelongsToCurrentThread());
- DCHECK(callback_);
+ DCHECK(worker_task_runner_->BelongsToCurrentThread());
{
base::AutoLock scoped_lock(converter_network_lock_);
mixer_.Convert(audio_bus);
}
- audio_bus->ToInterleaved(params_.frames_per_buffer(),
- params_.bits_per_sample() / 8,
- buffer_.get());
- callback_->OnData(this,
- buffer_.get(),
- params_.GetBytesPerBuffer(),
- params_.GetBytesPerBuffer(),
- 1.0);
+ callback_->OnData(this, audio_bus, params_.GetBytesPerBuffer(), 1.0);
}
void VirtualAudioInputStream::Close() {
DCHECK(thread_checker_.CalledOnValidThread());
Stop(); // Make sure callback_ is no longer being used.
- if (callback_) {
- callback_->OnClose(this);
- callback_ = NULL;
- }
// If a non-null AfterCloseCallback was provided to the constructor, invoke it
// here. The callback is moved to a stack-local first since |this| could be
diff --git a/chromium/media/audio/virtual_audio_input_stream.h b/chromium/media/audio/virtual_audio_input_stream.h
index 53a10738732..17e2730d535 100644
--- a/chromium/media/audio/virtual_audio_input_stream.h
+++ b/chromium/media/audio/virtual_audio_input_stream.h
@@ -18,7 +18,7 @@
#include "media/base/audio_converter.h"
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
@@ -36,12 +36,12 @@ class MEDIA_EXPORT VirtualAudioInputStream : public AudioInputStream {
AfterCloseCallback;
// Construct a target for audio loopback which mixes multiple data streams
- // into a single stream having the given |params|. |worker_loop| is the loop
- // on which AudioInputCallback methods are called and may or may not be the
- // single thread that invokes the AudioInputStream methods.
+ // into a single stream having the given |params|. |worker_task_runner| is
+ // the task runner on which AudioInputCallback methods are called and may or
+ // may not be the single thread that invokes the AudioInputStream methods.
VirtualAudioInputStream(
const AudioParameters& params,
- const scoped_refptr<base::MessageLoopProxy>& worker_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& worker_task_runner,
const AfterCloseCallback& after_close_cb);
virtual ~VirtualAudioInputStream();
@@ -78,7 +78,7 @@ class MEDIA_EXPORT VirtualAudioInputStream : public AudioInputStream {
// Invoked on the worker thread.
void PumpAudio(AudioBus* audio_bus);
- const scoped_refptr<base::MessageLoopProxy> worker_loop_;
+ const scoped_refptr<base::SingleThreadTaskRunner> worker_task_runner_;
AfterCloseCallback after_close_cb_;
diff --git a/chromium/media/audio/virtual_audio_input_stream_unittest.cc b/chromium/media/audio/virtual_audio_input_stream_unittest.cc
index aab67cca571..3aa87b0a179 100644
--- a/chromium/media/audio/virtual_audio_input_stream_unittest.cc
+++ b/chromium/media/audio/virtual_audio_input_stream_unittest.cc
@@ -6,7 +6,6 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
-#include "base/message_loop/message_loop.h"
#include "base/rand_util.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
@@ -33,17 +32,17 @@ class MockInputCallback : public AudioInputStream::AudioInputCallback {
public:
MockInputCallback()
: data_pushed_(false, false) {
- ON_CALL(*this, OnData(_, _, _, _, _))
- .WillByDefault(InvokeWithoutArgs(&data_pushed_,
- &base::WaitableEvent::Signal));
+ ON_CALL(*this, OnData(_, _, _, _)).WillByDefault(
+ InvokeWithoutArgs(&data_pushed_, &base::WaitableEvent::Signal));
}
virtual ~MockInputCallback() {}
- MOCK_METHOD5(OnData, void(AudioInputStream* stream, const uint8* data,
- uint32 size, uint32 hardware_delay_bytes,
- double volume));
- MOCK_METHOD1(OnClose, void(AudioInputStream* stream));
+ MOCK_METHOD4(OnData,
+ void(AudioInputStream* stream,
+ const AudioBus* source,
+ uint32 hardware_delay_bytes,
+ double volume));
MOCK_METHOD1(OnError, void(AudioInputStream* stream));
void WaitForDataPushes() {
@@ -74,15 +73,6 @@ class TestAudioSource : public SineWaveAudioSource {
return ret;
}
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState audio_buffers) OVERRIDE {
- const int ret =
- SineWaveAudioSource::OnMoreIOData(source, dest, audio_buffers);
- data_pulled_.Signal();
- return ret;
- }
-
void WaitForDataPulls() {
for (int i = 0; i < 3; ++i) {
data_pulled_.Wait();
@@ -105,7 +95,7 @@ class VirtualAudioInputStreamTest : public testing::TestWithParam<bool> {
stream_(NULL),
closed_stream_(false, false) {
audio_thread_->Start();
- audio_message_loop_ = audio_thread_->message_loop_proxy();
+ audio_task_runner_ = audio_thread_->message_loop_proxy();
}
virtual ~VirtualAudioInputStreamTest() {
@@ -118,15 +108,13 @@ class VirtualAudioInputStreamTest : public testing::TestWithParam<bool> {
void Create() {
const bool worker_is_separate_thread = GetParam();
stream_ = new VirtualAudioInputStream(
- kParams, GetWorkerLoop(worker_is_separate_thread),
+ kParams, GetWorkerTaskRunner(worker_is_separate_thread),
base::Bind(&base::DeletePointer<VirtualAudioInputStream>));
stream_->Open();
}
void Start() {
- EXPECT_CALL(input_callback_, OnClose(_));
- EXPECT_CALL(input_callback_, OnData(_, NotNull(), _, _, _))
- .Times(AtLeast(1));
+ EXPECT_CALL(input_callback_, OnData(_, NotNull(), _, _)).Times(AtLeast(1));
ASSERT_TRUE(!!stream_);
stream_->Start(&input_callback_);
@@ -209,36 +197,36 @@ class VirtualAudioInputStreamTest : public testing::TestWithParam<bool> {
stopped_output_streams_.clear();
}
- const scoped_refptr<base::MessageLoopProxy>& audio_message_loop() const {
- return audio_message_loop_;
+ const scoped_refptr<base::SingleThreadTaskRunner>& audio_task_runner() const {
+ return audio_task_runner_;
}
- const scoped_refptr<base::MessageLoopProxy>& GetWorkerLoop(
+ const scoped_refptr<base::SingleThreadTaskRunner>& GetWorkerTaskRunner(
bool worker_is_separate_thread) {
if (worker_is_separate_thread) {
if (!worker_thread_->IsRunning()) {
worker_thread_->Start();
- worker_message_loop_ = worker_thread_->message_loop_proxy();
+ worker_task_runner_ = worker_thread_->message_loop_proxy();
}
- return worker_message_loop_;
+ return worker_task_runner_;
} else {
- return audio_message_loop_;
+ return audio_task_runner_;
}
}
private:
void SyncWithAudioThread() {
base::WaitableEvent done(false, false);
- audio_message_loop_->PostTask(
+ audio_task_runner_->PostTask(
FROM_HERE,
base::Bind(&base::WaitableEvent::Signal, base::Unretained(&done)));
done.Wait();
}
scoped_ptr<base::Thread> audio_thread_;
- scoped_refptr<base::MessageLoopProxy> audio_message_loop_;
+ scoped_refptr<base::SingleThreadTaskRunner> audio_task_runner_;
scoped_ptr<base::Thread> worker_thread_;
- scoped_refptr<base::MessageLoopProxy> worker_message_loop_;
+ scoped_refptr<base::SingleThreadTaskRunner> worker_task_runner_;
VirtualAudioInputStream* stream_;
MockInputCallback input_callback_;
@@ -252,7 +240,7 @@ class VirtualAudioInputStreamTest : public testing::TestWithParam<bool> {
};
#define RUN_ON_AUDIO_THREAD(method) \
- audio_message_loop()->PostTask( \
+ audio_task_runner()->PostTask( \
FROM_HERE, base::Bind(&VirtualAudioInputStreamTest::method, \
base::Unretained(this)))
diff --git a/chromium/media/audio/virtual_audio_output_stream_unittest.cc b/chromium/media/audio/virtual_audio_output_stream_unittest.cc
index 1e3abd1c6bb..72e794d6feb 100644
--- a/chromium/media/audio/virtual_audio_output_stream_unittest.cc
+++ b/chromium/media/audio/virtual_audio_output_stream_unittest.cc
@@ -4,8 +4,6 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
-#include "base/message_loop/message_loop.h"
-#include "base/message_loop/message_loop_proxy.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
#include "media/audio/audio_manager.h"
@@ -27,10 +25,10 @@ const AudioParameters kParams(
class MockVirtualAudioInputStream : public VirtualAudioInputStream {
public:
explicit MockVirtualAudioInputStream(
- const scoped_refptr<base::MessageLoopProxy>& worker_loop)
+ const scoped_refptr<base::SingleThreadTaskRunner>& worker_task_runner)
: VirtualAudioInputStream(
kParams,
- worker_loop,
+ worker_task_runner,
base::Bind(&base::DeletePointer<VirtualAudioInputStream>)) {}
~MockVirtualAudioInputStream() {}
@@ -53,16 +51,16 @@ class VirtualAudioOutputStreamTest : public testing::Test {
VirtualAudioOutputStreamTest()
: audio_thread_(new base::Thread("AudioThread")) {
audio_thread_->Start();
- audio_message_loop_ = audio_thread_->message_loop_proxy();
+ audio_task_runner_ = audio_thread_->message_loop_proxy();
}
- const scoped_refptr<base::MessageLoopProxy>& audio_message_loop() const {
- return audio_message_loop_;
+ const scoped_refptr<base::SingleThreadTaskRunner>& audio_task_runner() const {
+ return audio_task_runner_;
}
void SyncWithAudioThread() {
base::WaitableEvent done(false, false);
- audio_message_loop()->PostTask(
+ audio_task_runner()->PostTask(
FROM_HERE, base::Bind(&base::WaitableEvent::Signal,
base::Unretained(&done)));
done.Wait();
@@ -70,7 +68,7 @@ class VirtualAudioOutputStreamTest : public testing::Test {
private:
scoped_ptr<base::Thread> audio_thread_;
- scoped_refptr<base::MessageLoopProxy> audio_message_loop_;
+ scoped_refptr<base::SingleThreadTaskRunner> audio_task_runner_;
DISALLOW_COPY_AND_ASSIGN(VirtualAudioOutputStreamTest);
};
@@ -79,8 +77,8 @@ TEST_F(VirtualAudioOutputStreamTest, StartStopStartStop) {
static const int kCycles = 3;
MockVirtualAudioInputStream* const input_stream =
- new MockVirtualAudioInputStream(audio_message_loop());
- audio_message_loop()->PostTask(
+ new MockVirtualAudioInputStream(audio_task_runner());
+ audio_task_runner()->PostTask(
FROM_HERE, base::Bind(
base::IgnoreResult(&MockVirtualAudioInputStream::Open),
base::Unretained(input_stream)));
@@ -95,24 +93,24 @@ TEST_F(VirtualAudioOutputStreamTest, StartStopStartStop) {
EXPECT_CALL(*input_stream, RemoveOutputStream(output_stream, _))
.Times(kCycles);
- audio_message_loop()->PostTask(
+ audio_task_runner()->PostTask(
FROM_HERE, base::Bind(base::IgnoreResult(&VirtualAudioOutputStream::Open),
base::Unretained(output_stream)));
SineWaveAudioSource source(CHANNEL_LAYOUT_STEREO, 200.0, 128);
for (int i = 0; i < kCycles; ++i) {
- audio_message_loop()->PostTask(
+ audio_task_runner()->PostTask(
FROM_HERE, base::Bind(&VirtualAudioOutputStream::Start,
base::Unretained(output_stream),
&source));
- audio_message_loop()->PostTask(
+ audio_task_runner()->PostTask(
FROM_HERE, base::Bind(&VirtualAudioOutputStream::Stop,
base::Unretained(output_stream)));
}
- audio_message_loop()->PostTask(
+ audio_task_runner()->PostTask(
FROM_HERE, base::Bind(&VirtualAudioOutputStream::Close,
base::Unretained(output_stream)));
- audio_message_loop()->PostTask(
+ audio_task_runner()->PostTask(
FROM_HERE, base::Bind(&MockVirtualAudioInputStream::Close,
base::Unretained(input_stream)));
diff --git a/chromium/media/audio/win/audio_device_listener_win.cc b/chromium/media/audio/win/audio_device_listener_win.cc
index adbc9a82e4d..ecf83874867 100644
--- a/chromium/media/audio/win/audio_device_listener_win.cc
+++ b/chromium/media/audio/win/audio_device_listener_win.cc
@@ -30,6 +30,27 @@ static std::string RoleToString(ERole role) {
}
}
+static std::string GetDeviceId(EDataFlow flow,
+ ERole role) {
+ ScopedComPtr<IMMDevice> device =
+ CoreAudioUtil::CreateDefaultDevice(flow, role);
+ if (!device) {
+ // Most probable reason for ending up here is that all audio devices are
+ // disabled or unplugged.
+ DVLOG(1) << "CoreAudioUtil::CreateDefaultDevice failed. No device?";
+ return std::string();
+ }
+
+ AudioDeviceName device_name;
+ HRESULT hr = CoreAudioUtil::GetDeviceName(device, &device_name);
+ if (FAILED(hr)) {
+ DVLOG(1) << "Failed to retrieve the device id: " << std::hex << hr;
+ return std::string();
+ }
+
+ return device_name.unique_id;
+}
+
AudioDeviceListenerWin::AudioDeviceListenerWin(const base::Closure& listener_cb)
: listener_cb_(listener_cb) {
CHECK(CoreAudioUtil::IsSupported());
@@ -48,22 +69,12 @@ AudioDeviceListenerWin::AudioDeviceListenerWin(const base::Closure& listener_cb)
device_enumerator_ = device_enumerator;
- ScopedComPtr<IMMDevice> device =
- CoreAudioUtil::CreateDefaultDevice(eRender, eConsole);
- if (!device) {
- // Most probable reason for ending up here is that all audio devices are
- // disabled or unplugged.
- VLOG(1) << "CoreAudioUtil::CreateDefaultDevice failed. No device?";
- return;
- }
-
- AudioDeviceName device_name;
- hr = CoreAudioUtil::GetDeviceName(device, &device_name);
- if (FAILED(hr)) {
- VLOG(1) << "Failed to retrieve the device id: " << std::hex << hr;
- return;
- }
- default_render_device_id_ = device_name.unique_id;
+ default_render_device_id_ = GetDeviceId(eRender, eConsole);
+ default_capture_device_id_ = GetDeviceId(eCapture, eConsole);
+ default_communications_render_device_id_ =
+ GetDeviceId(eRender, eCommunications);
+ default_communications_capture_device_id_ =
+ GetDeviceId(eCapture, eCommunications);
}
AudioDeviceListenerWin::~AudioDeviceListenerWin() {
@@ -126,14 +137,29 @@ STDMETHODIMP AudioDeviceListenerWin::OnDeviceStateChanged(LPCWSTR device_id,
STDMETHODIMP AudioDeviceListenerWin::OnDefaultDeviceChanged(
EDataFlow flow, ERole role, LPCWSTR new_default_device_id) {
- // Only listen for output device changes right now...
- if (flow != eConsole && role != eRender)
+ // Only listen for console and communication device changes.
+ if ((role != eConsole && role != eCommunications) ||
+ (flow != eRender && flow != eCapture)) {
return S_OK;
+ }
+
+ // Grab a pointer to the appropriate ID member.
+ // Note that there are three "?:"'s here to select the right ID.
+ std::string* current_device_id =
+ role == eRender ? (
+ flow == eConsole ?
+ &default_render_device_id_ :
+ &default_communications_render_device_id_
+ ) : (
+ flow == eConsole ?
+ &default_capture_device_id_ :
+ &default_communications_capture_device_id_
+ );
// If no device is now available, |new_default_device_id| will be NULL.
std::string new_device_id;
if (new_default_device_id)
- new_device_id = WideToUTF8(new_default_device_id);
+ new_device_id = base::WideToUTF8(new_default_device_id);
VLOG(1) << "OnDefaultDeviceChanged() "
<< "new_default_device: "
@@ -146,10 +172,11 @@ STDMETHODIMP AudioDeviceListenerWin::OnDefaultDeviceChanged(
// TODO(dalecurtis): This still seems to fire an extra event on my machine for
// an unplug event (probably others too); e.g., we get two transitions to a
// new default device id.
- if (new_device_id.compare(default_render_device_id_) == 0)
+ if (new_device_id.compare(*current_device_id) == 0)
return S_OK;
- default_render_device_id_ = new_device_id;
+ // Store the new id in the member variable (that current_device_id points to).
+ *current_device_id = new_device_id;
listener_cb_.Run();
return S_OK;
diff --git a/chromium/media/audio/win/audio_device_listener_win.h b/chromium/media/audio/win/audio_device_listener_win.h
index 6a312519af9..92777a12a0d 100644
--- a/chromium/media/audio/win/audio_device_listener_win.h
+++ b/chromium/media/audio/win/audio_device_listener_win.h
@@ -49,6 +49,9 @@ class MEDIA_EXPORT AudioDeviceListenerWin : public IMMNotificationClient {
base::Closure listener_cb_;
ScopedComPtr<IMMDeviceEnumerator> device_enumerator_;
std::string default_render_device_id_;
+ std::string default_capture_device_id_;
+ std::string default_communications_render_device_id_;
+ std::string default_communications_capture_device_id_;
// AudioDeviceListenerWin must be constructed and destructed on one thread.
base::ThreadChecker thread_checker_;
diff --git a/chromium/media/audio/win/audio_device_listener_win_unittest.cc b/chromium/media/audio/win/audio_device_listener_win_unittest.cc
index 3076fff2513..14b70a8fe90 100644
--- a/chromium/media/audio/win/audio_device_listener_win_unittest.cc
+++ b/chromium/media/audio/win/audio_device_listener_win_unittest.cc
@@ -47,7 +47,7 @@ class AudioDeviceListenerWinTest : public testing::Test {
bool SimulateDefaultOutputDeviceChange(const char* new_device_id) {
return output_device_listener_->OnDefaultDeviceChanged(
static_cast<EDataFlow>(eConsole), static_cast<ERole>(eRender),
- ASCIIToWide(new_device_id).c_str()) == S_OK;
+ base::ASCIIToWide(new_device_id).c_str()) == S_OK;
}
void SetOutputDeviceId(std::string new_device_id) {
diff --git a/chromium/media/audio/win/audio_low_latency_input_win.cc b/chromium/media/audio/win/audio_low_latency_input_win.cc
index b16ef130a9f..c43ed22977c 100644
--- a/chromium/media/audio/win/audio_low_latency_input_win.cc
+++ b/chromium/media/audio/win/audio_low_latency_input_win.cc
@@ -9,22 +9,48 @@
#include "base/strings/utf_string_conversions.h"
#include "media/audio/win/audio_manager_win.h"
#include "media/audio/win/avrt_wrapper_win.h"
+#include "media/base/audio_bus.h"
using base::win::ScopedComPtr;
using base::win::ScopedCOMInitializer;
namespace media {
+namespace {
+
+// Returns true if |device| represents the default communication capture device.
+bool IsDefaultCommunicationDevice(IMMDeviceEnumerator* enumerator,
+ IMMDevice* device) {
+ ScopedComPtr<IMMDevice> communications;
+ if (FAILED(enumerator->GetDefaultAudioEndpoint(eCapture, eCommunications,
+ communications.Receive()))) {
+ return false;
+ }
+
+ base::win::ScopedCoMem<WCHAR> communications_id, device_id;
+ device->GetId(&device_id);
+ communications->GetId(&communications_id);
+ return lstrcmpW(communications_id, device_id) == 0;
+}
-WASAPIAudioInputStream::WASAPIAudioInputStream(
- AudioManagerWin* manager, const AudioParameters& params,
- const std::string& device_id)
+} // namespace
+
+WASAPIAudioInputStream::WASAPIAudioInputStream(AudioManagerWin* manager,
+ const AudioParameters& params,
+ const std::string& device_id)
: manager_(manager),
capture_thread_(NULL),
opened_(false),
started_(false),
+ frame_size_(0),
+ packet_size_frames_(0),
+ packet_size_bytes_(0),
endpoint_buffer_size_frames_(0),
+ effects_(params.effects()),
device_id_(device_id),
- sink_(NULL) {
+ perf_count_to_100ns_units_(0.0),
+ ms_to_frame_count_(0.0),
+ sink_(NULL),
+ audio_bus_(media::AudioBus::Create(params)) {
DCHECK(manager_);
// Load the Avrt DLL if not already loaded. Required to support MMCSS.
@@ -67,8 +93,7 @@ WASAPIAudioInputStream::WASAPIAudioInputStream(
perf_count_to_100ns_units_ =
(10000000.0 / static_cast<double>(performance_frequency.QuadPart));
} else {
- LOG(ERROR) << "High-resolution performance counters are not supported.";
- perf_count_to_100ns_units_ = 0.0;
+ DLOG(ERROR) << "High-resolution performance counters are not supported.";
}
}
@@ -123,6 +148,7 @@ void WASAPIAudioInputStream::Start(AudioInputCallback* callback) {
if (started_)
return;
+ DCHECK(!sink_);
sink_ = callback;
// Starts periodic AGC microphone measurements if the AGC has been enabled
@@ -173,6 +199,7 @@ void WASAPIAudioInputStream::Stop() {
}
started_ = false;
+ sink_ = NULL;
}
void WASAPIAudioInputStream::Close() {
@@ -180,10 +207,6 @@ void WASAPIAudioInputStream::Close() {
// It is valid to call Close() before calling open or Start().
// It is also valid to call Close() after Start() has been called.
Stop();
- if (sink_) {
- sink_->OnClose(this);
- sink_ = NULL;
- }
// Inform the audio manager that we have been closed. This will cause our
// destruction.
@@ -240,30 +263,32 @@ double WASAPIAudioInputStream::GetVolume() {
}
// static
-int WASAPIAudioInputStream::HardwareSampleRate(
+AudioParameters WASAPIAudioInputStream::GetInputStreamParameters(
const std::string& device_id) {
- base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format;
- HRESULT hr = GetMixFormat(device_id, &audio_engine_mix_format);
- if (FAILED(hr))
- return 0;
+ int sample_rate = 48000;
+ ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
- return static_cast<int>(audio_engine_mix_format->nSamplesPerSec);
-}
-
-// static
-uint32 WASAPIAudioInputStream::HardwareChannelCount(
- const std::string& device_id) {
base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format;
- HRESULT hr = GetMixFormat(device_id, &audio_engine_mix_format);
- if (FAILED(hr))
- return 0;
+ int effects = AudioParameters::NO_EFFECTS;
+ if (SUCCEEDED(GetMixFormat(device_id, &audio_engine_mix_format, &effects))) {
+ sample_rate = static_cast<int>(audio_engine_mix_format->nSamplesPerSec);
+ channel_layout = audio_engine_mix_format->nChannels == 1 ?
+ CHANNEL_LAYOUT_MONO : CHANNEL_LAYOUT_STEREO;
+ }
- return static_cast<uint32>(audio_engine_mix_format->nChannels);
+ // Use 10ms frame size as default.
+ int frames_per_buffer = sample_rate / 100;
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, 0, sample_rate,
+ 16, frames_per_buffer, effects);
}
// static
HRESULT WASAPIAudioInputStream::GetMixFormat(const std::string& device_id,
- WAVEFORMATEX** device_format) {
+ WAVEFORMATEX** device_format,
+ int* effects) {
+ DCHECK(effects);
+
// It is assumed that this static method is called from a COM thread, i.e.,
// CoInitializeEx() is not called here to avoid STA/MTA conflicts.
ScopedComPtr<IMMDeviceEnumerator> enumerator;
@@ -278,18 +303,22 @@ HRESULT WASAPIAudioInputStream::GetMixFormat(const std::string& device_id,
hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole,
endpoint_device.Receive());
} else if (device_id == AudioManagerBase::kLoopbackInputDeviceId) {
- // Capture the default playback stream.
+ // Get the mix format of the default playback stream.
hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole,
endpoint_device.Receive());
} else {
// Retrieve a capture endpoint device that is specified by an endpoint
// device-identification string.
- hr = enumerator->GetDevice(UTF8ToUTF16(device_id).c_str(),
+ hr = enumerator->GetDevice(base::UTF8ToUTF16(device_id).c_str(),
endpoint_device.Receive());
}
+
if (FAILED(hr))
return hr;
+ *effects = IsDefaultCommunicationDevice(enumerator, endpoint_device) ?
+ AudioParameters::DUCKING : AudioParameters::NO_EFFECTS;
+
ScopedComPtr<IAudioClient> audio_client;
hr = endpoint_device->Activate(__uuidof(IAudioClient),
CLSCTX_INPROC_SERVER,
@@ -408,16 +437,15 @@ void WASAPIAudioInputStream::Run() {
// size which was specified at construction.
uint32 delay_frames = static_cast<uint32>(audio_delay_frames + 0.5);
while (buffer_frame_index >= packet_size_frames_) {
- uint8* audio_data =
- reinterpret_cast<uint8*>(capture_buffer.get());
+ // Copy data to audio bus to match the OnData interface.
+ uint8* audio_data = reinterpret_cast<uint8*>(capture_buffer.get());
+ audio_bus_->FromInterleaved(
+ audio_data, audio_bus_->frames(), format_.wBitsPerSample / 8);
// Deliver data packet, delay estimation and volume level to
// the user.
- sink_->OnData(this,
- audio_data,
- packet_size_bytes_,
- delay_frames * frame_size_,
- volume);
+ sink_->OnData(
+ this, audio_bus_.get(), delay_frames * frame_size_, volume);
// Store parts of the recorded data which can't be delivered
// using the current packet size. The stored section will be used
@@ -458,6 +486,8 @@ void WASAPIAudioInputStream::HandleError(HRESULT err) {
}
HRESULT WASAPIAudioInputStream::SetCaptureDevice() {
+ DCHECK(!endpoint_device_);
+
ScopedComPtr<IMMDeviceEnumerator> enumerator;
HRESULT hr = enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
NULL, CLSCTX_INPROC_SERVER);
@@ -466,22 +496,42 @@ HRESULT WASAPIAudioInputStream::SetCaptureDevice() {
// Retrieve the IMMDevice by using the specified role or the specified
// unique endpoint device-identification string.
- // TODO(henrika): possibly add support for the eCommunications as well.
- if (device_id_ == AudioManagerBase::kDefaultDeviceId) {
- // Retrieve the default capture audio endpoint for the specified role.
- // Note that, in Windows Vista, the MMDevice API supports device roles
- // but the system-supplied user interface programs do not.
- hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole,
- endpoint_device_.Receive());
- } else if (device_id_ == AudioManagerBase::kLoopbackInputDeviceId) {
- // Capture the default playback stream.
- hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole,
+
+ if (effects_ & AudioParameters::DUCKING) {
+ // Ducking has been requested and it is only supported for the default
+ // communication device. So, let's open up the communication device and
+ // see if the ID of that device matches the requested ID.
+ // We consider a kDefaultDeviceId as well as an explicit device id match,
+ // to be valid matches.
+ hr = enumerator->GetDefaultAudioEndpoint(eCapture, eCommunications,
endpoint_device_.Receive());
- } else {
- // Retrieve a capture endpoint device that is specified by an endpoint
- // device-identification string.
- hr = enumerator->GetDevice(UTF8ToUTF16(device_id_).c_str(),
- endpoint_device_.Receive());
+ if (endpoint_device_ && device_id_ != AudioManagerBase::kDefaultDeviceId) {
+ base::win::ScopedCoMem<WCHAR> communications_id;
+ endpoint_device_->GetId(&communications_id);
+ if (device_id_ !=
+ base::WideToUTF8(static_cast<WCHAR*>(communications_id))) {
+ DLOG(WARNING) << "Ducking has been requested for a non-default device."
+ "Not supported.";
+ endpoint_device_.Release(); // Fall back on code below.
+ }
+ }
+ }
+
+ if (!endpoint_device_) {
+ if (device_id_ == AudioManagerBase::kDefaultDeviceId) {
+ // Retrieve the default capture audio endpoint for the specified role.
+ // Note that, in Windows Vista, the MMDevice API supports device roles
+ // but the system-supplied user interface programs do not.
+ hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole,
+ endpoint_device_.Receive());
+ } else if (device_id_ == AudioManagerBase::kLoopbackInputDeviceId) {
+ // Capture the default playback stream.
+ hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole,
+ endpoint_device_.Receive());
+ } else {
+ hr = enumerator->GetDevice(base::UTF8ToUTF16(device_id_).c_str(),
+ endpoint_device_.Receive());
+ }
}
if (FAILED(hr))
diff --git a/chromium/media/audio/win/audio_low_latency_input_win.h b/chromium/media/audio/win/audio_low_latency_input_win.h
index 99e1604925a..a33a582c976 100644
--- a/chromium/media/audio/win/audio_low_latency_input_win.h
+++ b/chromium/media/audio/win/audio_low_latency_input_win.h
@@ -75,6 +75,7 @@
namespace media {
+class AudioBus;
class AudioManagerWin;
// AudioInputStream implementation using Windows Core Audio APIs.
@@ -88,6 +89,7 @@ class MEDIA_EXPORT WASAPIAudioInputStream
WASAPIAudioInputStream(AudioManagerWin* manager,
const AudioParameters& params,
const std::string& device_id);
+
// The dtor is typically called by the AudioManager only and it is usually
// triggered by calling AudioInputStream::Close().
virtual ~WASAPIAudioInputStream();
@@ -101,16 +103,11 @@ class MEDIA_EXPORT WASAPIAudioInputStream
virtual void SetVolume(double volume) OVERRIDE;
virtual double GetVolume() OVERRIDE;
- // Retrieves the sample rate used by the audio engine for its internal
- // processing/mixing of shared-mode streams given a specifed device.
- static int HardwareSampleRate(const std::string& device_id);
-
- // Retrieves the number of audio channels used by the audio engine for its
- // internal processing/mixing of shared-mode streams given a specified device.
- static uint32 HardwareChannelCount(const std::string& device_id);
-
bool started() const { return started_; }
+ // Returns the default hardware audio parameters of the specific device.
+ static AudioParameters GetInputStreamParameters(const std::string& device_id);
+
private:
// DelegateSimpleThread::Delegate implementation.
virtual void Run() OVERRIDE;
@@ -127,8 +124,11 @@ class MEDIA_EXPORT WASAPIAudioInputStream
// Retrieves the stream format that the audio engine uses for its internal
// processing/mixing of shared-mode streams.
+ // |effects| is a an AudioParameters::effects() flag that will have the
+ // DUCKING flag raised for only the default communication device.
static HRESULT GetMixFormat(const std::string& device_id,
- WAVEFORMATEX** device_format);
+ WAVEFORMATEX** device_format,
+ int* effects);
// Our creator, the audio manager needs to be notified when we close.
AudioManagerWin* manager_;
@@ -157,6 +157,9 @@ class MEDIA_EXPORT WASAPIAudioInputStream
// Length of the audio endpoint buffer.
uint32 endpoint_buffer_size_frames_;
+ // A copy of the supplied AudioParameter's |effects|.
+ const int effects_;
+
// Contains the unique name of the selected endpoint device.
// Note that AudioManagerBase::kDefaultDeviceId represents the default
// device role and is not a valid ID as such.
@@ -178,7 +181,7 @@ class MEDIA_EXPORT WASAPIAudioInputStream
// An IMMDevice interface which represents an audio endpoint device.
base::win::ScopedComPtr<IMMDevice> endpoint_device_;
- // Windows Audio Session API (WASAP) interfaces.
+ // Windows Audio Session API (WASAPI) interfaces.
// An IAudioClient interface which enables a client to create and initialize
// an audio stream between an audio application and the audio engine.
@@ -209,6 +212,10 @@ class MEDIA_EXPORT WASAPIAudioInputStream
// This event will be signaled when capturing shall stop.
base::win::ScopedHandle stop_capture_event_;
+ // Extra audio bus used for storage of deinterleaved data for the OnData
+ // callback.
+ scoped_ptr<media::AudioBus> audio_bus_;
+
DISALLOW_COPY_AND_ASSIGN(WASAPIAudioInputStream);
};
diff --git a/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc b/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc
index 54bd3f71b26..eee18873f6c 100644
--- a/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc
+++ b/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc
@@ -38,24 +38,23 @@ ACTION_P3(CheckCountAndPostQuitTask, count, limit, loop) {
class MockAudioInputCallback : public AudioInputStream::AudioInputCallback {
public:
- MOCK_METHOD5(OnData, void(AudioInputStream* stream,
- const uint8* src, uint32 size,
- uint32 hardware_delay_bytes, double volume));
- MOCK_METHOD1(OnClose, void(AudioInputStream* stream));
+ MOCK_METHOD4(OnData,
+ void(AudioInputStream* stream,
+ const AudioBus* src,
+ uint32 hardware_delay_bytes,
+ double volume));
MOCK_METHOD1(OnError, void(AudioInputStream* stream));
};
class FakeAudioInputCallback : public AudioInputStream::AudioInputCallback {
public:
FakeAudioInputCallback()
- : closed_(false),
- error_(false),
- data_event_(false, false) {
- }
+ : error_(false),
+ data_event_(false, false),
+ num_received_audio_frames_(0) {}
- const std::vector<uint8>& received_data() const { return received_data_; }
- bool closed() const { return closed_; }
bool error() const { return error_; }
+ int num_received_audio_frames() const { return num_received_audio_frames_; }
// Waits until OnData() is called on another thread.
void WaitForData() {
@@ -63,24 +62,21 @@ class FakeAudioInputCallback : public AudioInputStream::AudioInputCallback {
}
virtual void OnData(AudioInputStream* stream,
- const uint8* src, uint32 size,
- uint32 hardware_delay_bytes, double volume) OVERRIDE {
- received_data_.insert(received_data_.end(), src, src + size);
+ const AudioBus* src,
+ uint32 hardware_delay_bytes,
+ double volume) OVERRIDE {
+ EXPECT_NE(hardware_delay_bytes, 0u);
+ num_received_audio_frames_ += src->frames();
data_event_.Signal();
}
- virtual void OnClose(AudioInputStream* stream) OVERRIDE {
- closed_ = true;
- }
-
virtual void OnError(AudioInputStream* stream) OVERRIDE {
error_ = true;
}
private:
- std::vector<uint8> received_data_;
+ int num_received_audio_frames_;
base::WaitableEvent data_event_;
- bool closed_;
bool error_;
DISALLOW_COPY_AND_ASSIGN(FakeAudioInputCallback);
@@ -94,8 +90,9 @@ class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
// 2 bytes per sample, 2 channels, 10ms @ 48kHz, 10 seconds <=> 1920000 bytes.
static const size_t kMaxBufferSize = 2 * 2 * 480 * 100 * 10;
- explicit WriteToFileAudioSink(const char* file_name)
- : buffer_(0, kMaxBufferSize),
+ explicit WriteToFileAudioSink(const char* file_name, int bits_per_sample)
+ : bits_per_sample_(bits_per_sample),
+ buffer_(0, kMaxBufferSize),
bytes_to_write_(0) {
base::FilePath file_path;
EXPECT_TRUE(PathService::Get(base::DIR_EXE, &file_path));
@@ -103,6 +100,7 @@ class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
binary_file_ = base::OpenFile(file_path, "wb");
DLOG_IF(ERROR, !binary_file_) << "Failed to open binary PCM data file.";
VLOG(0) << ">> Output file: " << file_path.value() << " has been created.";
+ VLOG(0) << "bits_per_sample_:" << bits_per_sample_;
}
virtual ~WriteToFileAudioSink() {
@@ -125,22 +123,28 @@ class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
// AudioInputStream::AudioInputCallback implementation.
virtual void OnData(AudioInputStream* stream,
- const uint8* src,
- uint32 size,
+ const AudioBus* src,
uint32 hardware_delay_bytes,
double volume) {
+ EXPECT_EQ(bits_per_sample_, 16);
+ const int num_samples = src->frames() * src->channels();
+ scoped_ptr<int16> interleaved(new int16[num_samples]);
+ const int bytes_per_sample = sizeof(*interleaved);
+ src->ToInterleaved(src->frames(), bytes_per_sample, interleaved.get());
+
// Store data data in a temporary buffer to avoid making blocking
// fwrite() calls in the audio callback. The complete buffer will be
// written to file in the destructor.
- if (buffer_.Append(src, size)) {
+ const int size = bytes_per_sample * num_samples;
+ if (buffer_.Append((const uint8*)interleaved.get(), size)) {
bytes_to_write_ += size;
}
}
- virtual void OnClose(AudioInputStream* stream) {}
virtual void OnError(AudioInputStream* stream) {}
private:
+ int bits_per_sample_;
media::SeekableBuffer buffer_;
FILE* binary_file_;
size_t bytes_to_write_;
@@ -169,14 +173,13 @@ class AudioInputStreamWrapper {
explicit AudioInputStreamWrapper(AudioManager* audio_manager)
: com_init_(ScopedCOMInitializer::kMTA),
audio_man_(audio_manager),
- format_(AudioParameters::AUDIO_PCM_LOW_LATENCY),
- channel_layout_(CHANNEL_LAYOUT_STEREO),
- bits_per_sample_(16) {
- // Use native/mixing sample rate and 10ms frame size as default.
- sample_rate_ = static_cast<int>(
- WASAPIAudioInputStream::HardwareSampleRate(
- AudioManagerBase::kDefaultDeviceId));
- samples_per_packet_ = sample_rate_ / 100;
+ default_params_(
+ audio_manager->GetInputStreamParameters(
+ AudioManagerBase::kDefaultDeviceId)) {
+ EXPECT_EQ(format(), AudioParameters::AUDIO_PCM_LOW_LATENCY);
+ frames_per_buffer_ = default_params_.frames_per_buffer();
+ // We expect the default buffer size to be a 10ms buffer.
+ EXPECT_EQ(frames_per_buffer_, sample_rate() / 100);
}
~AudioInputStreamWrapper() {}
@@ -188,36 +191,35 @@ class AudioInputStreamWrapper {
// Creates AudioInputStream object using non-default parameters where the
// frame size is modified.
- AudioInputStream* Create(int samples_per_packet) {
- samples_per_packet_ = samples_per_packet;
+ AudioInputStream* Create(int frames_per_buffer) {
+ frames_per_buffer_ = frames_per_buffer;
return CreateInputStream();
}
- AudioParameters::Format format() const { return format_; }
+ AudioParameters::Format format() const { return default_params_.format(); }
int channels() const {
- return ChannelLayoutToChannelCount(channel_layout_);
+ return ChannelLayoutToChannelCount(default_params_.channel_layout());
}
- int bits_per_sample() const { return bits_per_sample_; }
- int sample_rate() const { return sample_rate_; }
- int samples_per_packet() const { return samples_per_packet_; }
+ int bits_per_sample() const { return default_params_.bits_per_sample(); }
+ int sample_rate() const { return default_params_.sample_rate(); }
+ int frames_per_buffer() const { return frames_per_buffer_; }
private:
AudioInputStream* CreateInputStream() {
AudioInputStream* ais = audio_man_->MakeAudioInputStream(
- AudioParameters(format_, channel_layout_, sample_rate_,
- bits_per_sample_, samples_per_packet_),
- AudioManagerBase::kDefaultDeviceId);
+ AudioParameters(format(), default_params_.channel_layout(),
+ default_params_.input_channels(),
+ sample_rate(), bits_per_sample(), frames_per_buffer_,
+ default_params_.effects()),
+ AudioManagerBase::kDefaultDeviceId);
EXPECT_TRUE(ais);
return ais;
}
ScopedCOMInitializer com_init_;
AudioManager* audio_man_;
- AudioParameters::Format format_;
- ChannelLayout channel_layout_;
- int bits_per_sample_;
- int sample_rate_;
- int samples_per_packet_;
+ const AudioParameters default_params_;
+ int frames_per_buffer_;
};
// Convenience method which creates a default AudioInputStream object.
@@ -278,10 +280,9 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamHardwareSampleRate) {
for (media::AudioDeviceNames::const_iterator it = device_names.begin();
it != device_names.end(); ++it) {
// Retrieve the hardware sample rate given a specified audio input device.
- // TODO(tommi): ensure that we don't have to cast here.
- int fs = static_cast<int>(WASAPIAudioInputStream::HardwareSampleRate(
- it->unique_id));
- EXPECT_GE(fs, 0);
+ AudioParameters params = WASAPIAudioInputStream::GetInputStreamParameters(
+ it->unique_id);
+ EXPECT_GE(params.sample_rate(), 0);
}
}
@@ -316,8 +317,6 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenStartAndClose) {
EXPECT_TRUE(ais->Open());
MockAudioInputCallback sink;
ais->Start(&sink);
- EXPECT_CALL(sink, OnClose(ais.get()))
- .Times(1);
ais.Close();
}
@@ -332,8 +331,6 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenStartStopAndClose) {
MockAudioInputCallback sink;
ais->Start(&sink);
ais->Stop();
- EXPECT_CALL(sink, OnClose(ais.get()))
- .Times(1);
ais.Close();
}
@@ -364,9 +361,6 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamMiscCallingSequences) {
EXPECT_FALSE(wais->started());
ais->Stop();
EXPECT_FALSE(wais->started());
-
- EXPECT_CALL(sink, OnClose(ais.get()))
- .Times(1);
ais.Close();
}
@@ -389,14 +383,13 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) {
MockAudioInputCallback sink;
// Derive the expected size in bytes of each recorded packet.
- uint32 bytes_per_packet = aisw.channels() * aisw.samples_per_packet() *
+ uint32 bytes_per_packet = aisw.channels() * aisw.frames_per_buffer() *
(aisw.bits_per_sample() / 8);
// We use 10ms packets and will run the test until ten packets are received.
// All should contain valid packets of the same size and a valid delay
// estimate.
- EXPECT_CALL(sink, OnData(
- ais.get(), NotNull(), bytes_per_packet, Gt(bytes_per_packet), _))
+ EXPECT_CALL(sink, OnData(ais.get(), NotNull(), Gt(bytes_per_packet), _))
.Times(AtLeast(10))
.WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
ais->Start(&sink);
@@ -404,54 +397,44 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) {
ais->Stop();
// Store current packet size (to be used in the subsequent tests).
- int samples_per_packet_10ms = aisw.samples_per_packet();
+ int frames_per_buffer_10ms = aisw.frames_per_buffer();
- EXPECT_CALL(sink, OnClose(ais.get()))
- .Times(1);
ais.Close();
// 20 ms packet size.
count = 0;
- ais.Reset(aisw.Create(2 * samples_per_packet_10ms));
+ ais.Reset(aisw.Create(2 * frames_per_buffer_10ms));
EXPECT_TRUE(ais->Open());
- bytes_per_packet = aisw.channels() * aisw.samples_per_packet() *
+ bytes_per_packet = aisw.channels() * aisw.frames_per_buffer() *
(aisw.bits_per_sample() / 8);
- EXPECT_CALL(sink, OnData(
- ais.get(), NotNull(), bytes_per_packet, Gt(bytes_per_packet), _))
+ EXPECT_CALL(sink, OnData(ais.get(), NotNull(), Gt(bytes_per_packet), _))
.Times(AtLeast(10))
.WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
ais->Start(&sink);
loop.Run();
ais->Stop();
-
- EXPECT_CALL(sink, OnClose(ais.get()))
- .Times(1);
ais.Close();
// 5 ms packet size.
count = 0;
- ais.Reset(aisw.Create(samples_per_packet_10ms / 2));
+ ais.Reset(aisw.Create(frames_per_buffer_10ms / 2));
EXPECT_TRUE(ais->Open());
- bytes_per_packet = aisw.channels() * aisw.samples_per_packet() *
+ bytes_per_packet = aisw.channels() * aisw.frames_per_buffer() *
(aisw.bits_per_sample() / 8);
- EXPECT_CALL(sink, OnData(
- ais.get(), NotNull(), bytes_per_packet, Gt(bytes_per_packet), _))
+ EXPECT_CALL(sink, OnData(ais.get(), NotNull(), Gt(bytes_per_packet), _))
.Times(AtLeast(10))
.WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
ais->Start(&sink);
loop.Run();
ais->Stop();
-
- EXPECT_CALL(sink, OnClose(ais.get()))
- .Times(1);
ais.Close();
}
-// Test that we can capture loopback stream.
+// Test that we can capture a stream in loopback.
TEST(WinAudioInputTest, WASAPIAudioInputStreamLoopback) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!audio_manager->HasAudioOutputDevices() || !CoreAudioUtil::IsSupported())
@@ -459,6 +442,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamLoopback) {
AudioParameters params = audio_manager->GetInputStreamParameters(
AudioManagerBase::kLoopbackInputDeviceId);
+ EXPECT_EQ(params.effects(), 0);
AudioParameters output_params =
audio_manager->GetOutputStreamParameters(std::string());
@@ -475,8 +459,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamLoopback) {
sink.WaitForData();
stream.Close();
- EXPECT_FALSE(sink.received_data().empty());
- EXPECT_TRUE(sink.closed());
+ EXPECT_GT(sink.num_received_audio_frames(), 0);
EXPECT_FALSE(sink.error());
}
@@ -501,7 +484,7 @@ TEST(WinAudioInputTest, DISABLED_WASAPIAudioInputStreamRecordToFile) {
EXPECT_TRUE(ais->Open());
VLOG(0) << ">> Sample rate: " << aisw.sample_rate() << " [Hz]";
- WriteToFileAudioSink file_sink(file_name);
+ WriteToFileAudioSink file_sink(file_name, aisw.bits_per_sample());
VLOG(0) << ">> Speak into the default microphone while recording.";
ais->Start(&file_sink);
base::PlatformThread::Sleep(TestTimeouts::action_timeout());
diff --git a/chromium/media/audio/win/audio_low_latency_output_win.cc b/chromium/media/audio/win/audio_low_latency_output_win.cc
index a10e67a46cb..6aad434f6e6 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win.cc
+++ b/chromium/media/audio/win/audio_low_latency_output_win.cc
@@ -25,19 +25,6 @@ using base::win::ScopedCoMem;
namespace media {
-// Compare two sets of audio parameters and return true if they are equal.
-// Note that bits_per_sample() is excluded from this comparison since Core
-// Audio can deal with most bit depths. As an example, if the native/mixing
-// bit depth is 32 bits (default), opening at 16 or 24 still works fine and
-// the audio engine will do the required conversion for us. Channel count is
-// excluded since Open() will fail anyways and it doesn't impact buffering.
-static bool CompareAudioParametersNoBitDepthOrChannels(
- const media::AudioParameters& a, const media::AudioParameters& b) {
- return (a.format() == b.format() &&
- a.sample_rate() == b.sample_rate() &&
- a.frames_per_buffer() == b.frames_per_buffer());
-}
-
// static
AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() {
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
@@ -73,7 +60,6 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
manager_(manager),
format_(),
opened_(false),
- audio_parameters_are_valid_(false),
volume_(1.0),
packet_size_frames_(0),
packet_size_bytes_(0),
@@ -89,23 +75,6 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
VLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE)
<< "Core Audio (WASAPI) EXCLUSIVE MODE is enabled.";
- if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
- // Verify that the input audio parameters are identical (bit depth and
- // channel count are excluded) to the preferred (native) audio parameters.
- // Open() will fail if this is not the case.
- AudioParameters preferred_params;
- HRESULT hr = device_id_.empty() ?
- CoreAudioUtil::GetPreferredAudioParameters(eRender, device_role,
- &preferred_params) :
- CoreAudioUtil::GetPreferredAudioParameters(device_id_,
- &preferred_params);
- audio_parameters_are_valid_ = SUCCEEDED(hr) &&
- CompareAudioParametersNoBitDepthOrChannels(params, preferred_params);
- LOG_IF(WARNING, !audio_parameters_are_valid_)
- << "Input and preferred parameters are not identical. "
- << "Device id: " << device_id_;
- }
-
// Load the Avrt DLL if not already loaded. Required to support MMCSS.
bool avrt_init = avrt::Initialize();
DCHECK(avrt_init) << "Failed to load the avrt.dll";
@@ -159,18 +128,10 @@ bool WASAPIAudioOutputStream::Open() {
if (opened_)
return true;
- // Audio parameters must be identical to the preferred set of parameters
- // if shared mode (default) is utilized.
- if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
- if (!audio_parameters_are_valid_) {
- LOG(ERROR) << "Audio parameters are not valid.";
- return false;
- }
- }
-
// Create an IAudioClient interface for the default rendering IMMDevice.
ScopedComPtr<IAudioClient> audio_client;
- if (device_id_.empty()) {
+ if (device_id_.empty() ||
+ CoreAudioUtil::DeviceIsDefault(eRender, device_role_, device_id_)) {
audio_client = CoreAudioUtil::CreateDefaultClient(eRender, device_role_);
} else {
ScopedComPtr<IMMDevice> device(CoreAudioUtil::CreateDevice(device_id_));
@@ -186,6 +147,7 @@ bool WASAPIAudioOutputStream::Open() {
if (!CoreAudioUtil::IsFormatSupported(audio_client,
share_mode_,
&format_)) {
+ LOG(ERROR) << "Audio parameters are not supported.";
return false;
}
@@ -201,10 +163,13 @@ bool WASAPIAudioOutputStream::Open() {
// We know from experience that the best possible callback sequence is
// achieved when the packet size (given by the native device period)
- // is an even multiple of the endpoint buffer size.
+ // is an even divisor of the endpoint buffer size.
// Examples: 48kHz => 960 % 480, 44.1kHz => 896 % 448 or 882 % 441.
if (endpoint_buffer_size_frames_ % packet_size_frames_ != 0) {
- LOG(ERROR) << "Bailing out due to non-perfect timing.";
+ LOG(ERROR)
+ << "Bailing out due to non-perfect timing. Buffer size of "
+ << packet_size_frames_ << " is not an even divisor of "
+ << endpoint_buffer_size_frames_;
return false;
}
} else {
@@ -287,8 +252,7 @@ void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) {
// Start streaming data between the endpoint buffer and the audio engine.
HRESULT hr = audio_client_->Start();
if (FAILED(hr)) {
- LOG_GETLASTERROR(ERROR)
- << "Failed to start output streaming: " << std::hex << hr;
+ PLOG(ERROR) << "Failed to start output streaming: " << std::hex << hr;
StopThread();
callback->OnError(this);
}
@@ -303,8 +267,7 @@ void WASAPIAudioOutputStream::Stop() {
// Stop output audio streaming.
HRESULT hr = audio_client_->Stop();
if (FAILED(hr)) {
- LOG_GETLASTERROR(ERROR)
- << "Failed to stop output streaming: " << std::hex << hr;
+ PLOG(ERROR) << "Failed to stop output streaming: " << std::hex << hr;
source_->OnError(this);
}
@@ -315,8 +278,7 @@ void WASAPIAudioOutputStream::Stop() {
// Flush all pending data and reset the audio clock stream position to 0.
hr = audio_client_->Reset();
if (FAILED(hr)) {
- LOG_GETLASTERROR(ERROR)
- << "Failed to reset streaming: " << std::hex << hr;
+ PLOG(ERROR) << "Failed to reset streaming: " << std::hex << hr;
callback->OnError(this);
}
diff --git a/chromium/media/audio/win/audio_low_latency_output_win.h b/chromium/media/audio/win/audio_low_latency_output_win.h
index 2baf6f1ac9a..c118947d94f 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win.h
+++ b/chromium/media/audio/win/audio_low_latency_output_win.h
@@ -190,11 +190,6 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
// Set to true when stream is successfully opened.
bool opened_;
- // We check if the input audio parameters are identical (bit depth is
- // excluded) to the preferred (native) audio parameters during construction.
- // Open() will fail if |audio_parameters_are_valid_| is false.
- bool audio_parameters_are_valid_;
-
// Volume level from 0 to 1.
float volume_;
diff --git a/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc b/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
index 5fda4b14509..ed03d2b714d 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
+++ b/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
@@ -16,6 +16,7 @@
#include "base/win/scoped_com_initializer.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager.h"
+#include "media/audio/mock_audio_source_callback.h"
#include "media/audio/win/audio_low_latency_output_win.h"
#include "media/audio/win/core_audio_util_win.h"
#include "media/base/decoder_buffer.h"
@@ -60,16 +61,6 @@ ACTION_P(QuitLoop, loop) {
loop->PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
}
-class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
- public:
- MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
- AudioBuffersState buffers_state));
- MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state));
- MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
-};
-
// This audio source implementation should be used for manual tests only since
// it takes about 20 seconds to play out a file.
class ReadFromFileAudioSource : public AudioOutputStream::AudioSourceCallback {
@@ -139,13 +130,6 @@ class ReadFromFileAudioSource : public AudioOutputStream::AudioSourceCallback {
return frames;
}
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) OVERRIDE {
- NOTREACHED();
- return 0;
- }
-
virtual void OnError(AudioOutputStream* stream) {}
int file_size() { return file_->data_size(); }
@@ -233,7 +217,7 @@ class AudioOutputStreamWrapper {
AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(
AudioParameters(format_, channel_layout_, sample_rate_,
bits_per_sample_, samples_per_packet_),
- std::string(), std::string());
+ std::string());
EXPECT_TRUE(aos);
return aos;
}
@@ -432,27 +416,6 @@ TEST(WASAPIAudioOutputStreamTest, ValidPacketSize) {
aos->Close();
}
-// Use a non-preferred packet size and verify that Open() fails.
-TEST(WASAPIAudioOutputStreamTest, InvalidPacketSize) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
-
- if (ExclusiveModeIsEnabled())
- return;
-
- AudioParameters preferred_params;
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetPreferredAudioParameters(
- eRender, eConsole, &preferred_params)));
- int too_large_packet_size = 2 * preferred_params.frames_per_buffer();
-
- AudioOutputStreamWrapper aosw(audio_manager.get());
- AudioOutputStream* aos = aosw.Create(too_large_packet_size);
- EXPECT_FALSE(aos->Open());
-
- aos->Close();
-}
-
// This test is intended for manual tests and should only be enabled
// when it is required to play out data from a local PCM file.
// By default, GTest will print out YOU HAVE 1 DISABLED TEST.
diff --git a/chromium/media/audio/win/audio_manager_win.cc b/chromium/media/audio/win/audio_manager_win.cc
index 242813a8c65..eb05ca0547b 100644
--- a/chromium/media/audio/win/audio_manager_win.cc
+++ b/chromium/media/audio/win/audio_manager_win.cc
@@ -26,12 +26,11 @@
#include "media/audio/win/audio_low_latency_input_win.h"
#include "media/audio/win/audio_low_latency_output_win.h"
#include "media/audio/win/audio_manager_win.h"
-#include "media/audio/win/audio_unified_win.h"
#include "media/audio/win/core_audio_util_win.h"
#include "media/audio/win/device_enumeration_win.h"
#include "media/audio/win/wavein_input_win.h"
#include "media/audio/win/waveout_output_win.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/channel_layout.h"
#include "media/base/limits.h"
#include "media/base/media_switches.h"
@@ -128,28 +127,30 @@ static int NumberOfWaveOutBuffers() {
}
AudioManagerWin::AudioManagerWin(AudioLogFactory* audio_log_factory)
- : AudioManagerBase(audio_log_factory) {
- if (!CoreAudioUtil::IsSupported()) {
- // Use the Wave API for device enumeration if XP or lower.
- enumeration_type_ = kWaveEnumeration;
- } else {
- // Use the MMDevice API for device enumeration if Vista or higher.
- enumeration_type_ = kMMDeviceEnumeration;
- }
-
+ : AudioManagerBase(audio_log_factory),
+ // |CoreAudioUtil::IsSupported()| uses static variables to avoid doing
+ // multiple initializations. This is however not thread safe.
+ // So, here we call it explicitly before we kick off the audio thread
+ // or do any other work.
+ enumeration_type_(CoreAudioUtil::IsSupported() ?
+ kMMDeviceEnumeration : kWaveEnumeration) {
SetMaxOutputStreamsAllowed(kMaxOutputStreams);
+ // WARNING: This is executed on the UI loop, do not add any code here which
+ // loads libraries or attempts to call out into the OS. Instead add such code
+ // to the InitializeOnAudioThread() method below.
+
// Task must be posted last to avoid races from handing out "this" to the
// audio thread.
- GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
- &AudioManagerWin::CreateDeviceListener, base::Unretained(this)));
+ GetTaskRunner()->PostTask(FROM_HERE, base::Bind(
+ &AudioManagerWin::InitializeOnAudioThread, base::Unretained(this)));
}
AudioManagerWin::~AudioManagerWin() {
// It's safe to post a task here since Shutdown() will wait for all tasks to
// complete before returning.
- GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
- &AudioManagerWin::DestroyDeviceListener, base::Unretained(this)));
+ GetTaskRunner()->PostTask(FROM_HERE, base::Bind(
+ &AudioManagerWin::ShutdownOnAudioThread, base::Unretained(this)));
Shutdown();
}
@@ -161,18 +162,20 @@ bool AudioManagerWin::HasAudioInputDevices() {
return (::waveInGetNumDevs() != 0);
}
-void AudioManagerWin::CreateDeviceListener() {
- // AudioDeviceListenerWin must be initialized on a COM thread and should only
- // be used if WASAPI / Core Audio is supported.
- if (CoreAudioUtil::IsSupported()) {
- output_device_listener_.reset(new AudioDeviceListenerWin(BindToLoop(
- GetMessageLoop(), base::Bind(
- &AudioManagerWin::NotifyAllOutputDeviceChangeListeners,
- base::Unretained(this)))));
+void AudioManagerWin::InitializeOnAudioThread() {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+
+ if (core_audio_supported()) {
+ // AudioDeviceListenerWin must be initialized on a COM thread and should
+ // only be used if WASAPI / Core Audio is supported.
+ output_device_listener_.reset(new AudioDeviceListenerWin(BindToCurrentLoop(
+ base::Bind(&AudioManagerWin::NotifyAllOutputDeviceChangeListeners,
+ base::Unretained(this)))));
}
}
-void AudioManagerWin::DestroyDeviceListener() {
+void AudioManagerWin::ShutdownOnAudioThread() {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
output_device_listener_.reset();
}
@@ -243,7 +246,7 @@ base::string16 AudioManagerWin::GetAudioInputDeviceModel() {
void AudioManagerWin::ShowAudioInputSettings() {
std::wstring program;
std::string argument;
- if (!CoreAudioUtil::IsSupported()) {
+ if (!core_audio_supported()) {
program = L"sndvol32.exe";
argument = "-R";
} else {
@@ -263,7 +266,6 @@ void AudioManagerWin::GetAudioDeviceNamesImpl(
bool input,
AudioDeviceNames* device_names) {
DCHECK(device_names->empty());
- DCHECK(enumeration_type() != kUninitializedEnumeration);
// Enumerate all active audio-endpoint capture devices.
if (enumeration_type() == kWaveEnumeration) {
// Utilize the Wave API for Windows XP.
@@ -299,26 +301,30 @@ void AudioManagerWin::GetAudioOutputDeviceNames(
AudioParameters AudioManagerWin::GetInputStreamParameters(
const std::string& device_id) {
- int sample_rate = 48000;
- ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
- if (CoreAudioUtil::IsSupported()) {
- int hw_sample_rate = WASAPIAudioInputStream::HardwareSampleRate(device_id);
- if (hw_sample_rate)
- sample_rate = hw_sample_rate;
- channel_layout =
- WASAPIAudioInputStream::HardwareChannelCount(device_id) == 1 ?
- CHANNEL_LAYOUT_MONO : CHANNEL_LAYOUT_STEREO;
+ AudioParameters parameters;
+ if (!core_audio_supported()) {
+ // Windows Wave implementation is being used.
+ parameters = AudioParameters(
+ AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO, 0, 48000,
+ 16, kFallbackBufferSize, AudioParameters::NO_EFFECTS);
+ } else {
+ parameters = WASAPIAudioInputStream::GetInputStreamParameters(device_id);
}
- // TODO(Henrika): improve the default buffer size value for input stream.
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
- sample_rate, 16, kFallbackBufferSize);
+ int user_buffer_size = GetUserBufferSize();
+ if (user_buffer_size) {
+ parameters.Reset(parameters.format(), parameters.channel_layout(),
+ parameters.channels(), parameters.input_channels(),
+ parameters.sample_rate(), parameters.bits_per_sample(),
+ user_buffer_size);
+ }
+
+ return parameters;
}
std::string AudioManagerWin::GetAssociatedOutputDeviceID(
const std::string& input_device_id) {
- if (!CoreAudioUtil::IsSupported()) {
+ if (!core_audio_supported()) {
NOTIMPLEMENTED()
<< "GetAssociatedOutputDeviceID is not supported on this OS";
return std::string();
@@ -348,13 +354,12 @@ AudioOutputStream* AudioManagerWin::MakeLinearOutputStream(
// - WASAPIAudioOutputStream: Based on Core Audio (WASAPI) API.
AudioOutputStream* AudioManagerWin::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
+ const std::string& device_id) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
if (params.channels() > kWinMaxChannels)
return NULL;
- if (!CoreAudioUtil::IsSupported()) {
+ if (!core_audio_supported()) {
// Fall back to Windows Wave implementation on Windows XP or lower.
DLOG_IF(ERROR, !device_id.empty() &&
device_id != AudioManagerBase::kDefaultDeviceId)
@@ -364,22 +369,14 @@ AudioOutputStream* AudioManagerWin::MakeLowLatencyOutputStream(
this, params, NumberOfWaveOutBuffers(), WAVE_MAPPER);
}
- // TODO(rtoy): support more than stereo input.
- if (params.input_channels() > 0) {
- DVLOG(1) << "WASAPIUnifiedStream is created.";
- DLOG_IF(ERROR, !device_id.empty() &&
- device_id != AudioManagerBase::kDefaultDeviceId)
- << "Opening by device id not supported by WASAPIUnifiedStream";
- return new WASAPIUnifiedStream(this, params, input_device_id);
- }
-
// Pass an empty string to indicate that we want the default device
// since we consistently only check for an empty string in
// WASAPIAudioOutputStream.
return new WASAPIAudioOutputStream(this,
device_id == AudioManagerBase::kDefaultDeviceId ?
std::string() : device_id,
- params, eConsole);
+ params,
+ params.effects() & AudioParameters::DUCKING ? eCommunications : eConsole);
}
// Factory for the implementations of AudioInputStream for AUDIO_PCM_LINEAR
@@ -395,8 +392,9 @@ AudioInputStream* AudioManagerWin::MakeLinearInputStream(
AudioInputStream* AudioManagerWin::MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
+ DVLOG(1) << "MakeLowLatencyInputStream: " << device_id;
AudioInputStream* stream = NULL;
- if (!CoreAudioUtil::IsSupported()) {
+ if (!core_audio_supported()) {
// Fall back to Windows Wave implementation on Windows XP or lower.
DVLOG(1) << "Using WaveIn since WASAPI requires at least Vista.";
stream = CreatePCMWaveInAudioInputStream(params, device_id);
@@ -408,7 +406,7 @@ AudioInputStream* AudioManagerWin::MakeLowLatencyInputStream(
}
std::string AudioManagerWin::GetDefaultOutputDeviceID() {
- if (!CoreAudioUtil::IsSupported())
+ if (!core_audio_supported())
return std::string();
return CoreAudioUtil::GetDefaultOutputDeviceID();
}
@@ -416,8 +414,7 @@ std::string AudioManagerWin::GetDefaultOutputDeviceID() {
AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
const std::string& output_device_id,
const AudioParameters& input_params) {
- const bool core_audio_supported = CoreAudioUtil::IsSupported();
- DLOG_IF(ERROR, !core_audio_supported && !output_device_id.empty())
+ DLOG_IF(ERROR, !core_audio_supported() && !output_device_id.empty())
<< "CoreAudio is required to open non-default devices.";
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
@@ -426,8 +423,9 @@ AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
int buffer_size = kFallbackBufferSize;
int bits_per_sample = 16;
int input_channels = 0;
- bool use_input_params = !core_audio_supported;
- if (core_audio_supported) {
+ int effects = AudioParameters::NO_EFFECTS;
+ bool use_input_params = !core_audio_supported();
+ if (core_audio_supported()) {
if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio)) {
// TODO(rtoy): tune these values for best possible WebAudio
// performance. WebRTC works well at 48kHz and a buffer size of 480
@@ -449,7 +447,13 @@ AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
buffer_size = params.frames_per_buffer();
channel_layout = params.channel_layout();
sample_rate = params.sample_rate();
+ effects = params.effects();
} else {
+ // TODO(tommi): This should never happen really and I'm not sure that
+ // setting use_input_params is the right thing to do since WASAPI i
+ // definitely supported (see core_audio_supported() above) and
+ // |use_input_params| is only for cases when it isn't supported.
+ DLOG(ERROR) << "GetPreferredAudioParameters failed: " << std::hex << hr;
use_input_params = true;
}
}
@@ -459,7 +463,7 @@ AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
// If the user has enabled checking supported channel layouts or we don't
// have a valid channel layout yet, try to use the input layout. See bugs
// http://crbug.com/259165 and http://crbug.com/311906 for more details.
- if (core_audio_supported &&
+ if (core_audio_supported() &&
(cmd_line->HasSwitch(switches::kTrySupportedChannelLayouts) ||
channel_layout == CHANNEL_LAYOUT_UNSUPPORTED)) {
// Check if it is possible to open up at the specified input channel
@@ -484,6 +488,7 @@ AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
}
}
input_channels = input_params.input_channels();
+ effects |= input_params.effects();
if (use_input_params) {
// If WASAPI isn't supported we'll fallback to WaveOut, which will take
// care of resampling and bits per sample changes. By setting these
@@ -503,7 +508,7 @@ AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
return AudioParameters(
AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, input_channels,
- sample_rate, bits_per_sample, buffer_size, AudioParameters::NO_EFFECTS);
+ sample_rate, bits_per_sample, buffer_size, effects);
}
AudioInputStream* AudioManagerWin::CreatePCMWaveInAudioInputStream(
diff --git a/chromium/media/audio/win/audio_manager_win.h b/chromium/media/audio/win/audio_manager_win.h
index 01044da40a0..d8d51844c05 100644
--- a/chromium/media/audio/win/audio_manager_win.h
+++ b/chromium/media/audio/win/audio_manager_win.h
@@ -39,8 +39,7 @@ class MEDIA_EXPORT AudioManagerWin : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
@@ -56,7 +55,6 @@ class MEDIA_EXPORT AudioManagerWin : public AudioManagerBase {
private:
enum EnumerationType {
- kUninitializedEnumeration = 0,
kMMDeviceEnumeration,
kWaveEnumeration,
};
@@ -70,6 +68,10 @@ class MEDIA_EXPORT AudioManagerWin : public AudioManagerBase {
enumeration_type_ = type;
}
+ inline bool core_audio_supported() const {
+ return enumeration_type_ == kMMDeviceEnumeration;
+ }
+
// Returns a PCMWaveInAudioInputStream instance or NULL on failure.
// This method converts MMDevice-style device ID to WaveIn-style device ID if
// necessary.
@@ -79,9 +81,10 @@ class MEDIA_EXPORT AudioManagerWin : public AudioManagerBase {
const AudioParameters& params,
const std::string& device_id);
- // Helper methods for constructing AudioDeviceListenerWin on the audio thread.
- void CreateDeviceListener();
- void DestroyDeviceListener();
+ // Helper methods for performing expensive initialization tasks on the audio
+ // thread instead of on the UI thread which AudioManager is constructed on.
+ void InitializeOnAudioThread();
+ void ShutdownOnAudioThread();
void GetAudioDeviceNamesImpl(bool input, AudioDeviceNames* device_names);
diff --git a/chromium/media/audio/win/audio_output_win_unittest.cc b/chromium/media/audio/win/audio_output_win_unittest.cc
index 2b8036d52a2..29026577ad8 100644
--- a/chromium/media/audio/win/audio_output_win_unittest.cc
+++ b/chromium/media/audio/win/audio_output_win_unittest.cc
@@ -15,6 +15,7 @@
#include "media/base/limits.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager.h"
+#include "media/audio/mock_audio_source_callback.h"
#include "media/audio/simple_sources.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -36,6 +37,11 @@ namespace media {
static const wchar_t kAudioFile1_16b_m_16K[]
= L"media\\test\\data\\sweep02_16b_mono_16KHz.raw";
+static int ClearData(AudioBus* audio_bus, AudioBuffersState buffers_state) {
+ audio_bus->Zero();
+ return audio_bus->frames();
+}
+
// This class allows to find out if the callbacks are occurring as
// expected and if any error has been reported.
class TestSourceBasic : public AudioOutputStream::AudioSourceCallback {
@@ -52,12 +58,6 @@ class TestSourceBasic : public AudioOutputStream::AudioSourceCallback {
audio_bus->Zero();
return audio_bus->frames();
}
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) {
- NOTREACHED();
- return 0;
- }
// AudioSourceCallback::OnError implementation:
virtual void OnError(AudioOutputStream* stream) {
++had_error_;
@@ -102,21 +102,6 @@ class TestSourceLaggy : public TestSourceBasic {
int lag_in_ms_;
};
-class MockAudioSource : public AudioOutputStream::AudioSourceCallback {
- public:
- MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
- AudioBuffersState buffers_state));
- MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state));
- MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
-
- static int ClearData(AudioBus* audio_bus, AudioBuffersState buffers_state) {
- audio_bus->Zero();
- return audio_bus->frames();
- }
-};
-
// Helper class to memory map an entire file. The mapping is read-only. Don't
// use for gigabyte-sized files. Attempts to write to this memory generate
// memory access violations.
@@ -184,7 +169,7 @@ TEST(WinAudioTest, PCMWaveStreamGetAndClose) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
8000, 16, 256),
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
oas->Close();
}
@@ -200,29 +185,29 @@ TEST(WinAudioTest, SanityOnMakeParams) {
AudioParameters::Format fmt = AudioParameters::AUDIO_PCM_LINEAR;
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_UNSUPPORTED, 8000, 16, 256),
- std::string(), std::string()));
+ std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 1024 * 1024, 16, 256),
- std::string(), std::string()));
+ std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, 80, 256),
- std::string(), std::string()));
+ std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_UNSUPPORTED, 8000, 16, 256),
- std::string(), std::string()));
+ std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, -8000, 16, 256),
- std::string(), std::string()));
+ std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 8000, 16, -100),
- std::string(), std::string()));
+ std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 8000, 16, 0),
- std::string(), std::string()));
+ std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 8000, 16,
media::limits::kMaxSamplesPerPacket + 1),
- std::string(), std::string()));
+ std::string()));
}
// Test that it can be opened and closed.
@@ -236,7 +221,7 @@ TEST(WinAudioTest, PCMWaveStreamOpenAndClose) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
8000, 16, 256),
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
EXPECT_TRUE(oas->Open());
oas->Close();
@@ -253,7 +238,7 @@ TEST(WinAudioTest, PCMWaveStreamOpenLimit) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
8000, 16, 1024 * 1024 * 1024),
- std::string(), std::string());
+ std::string());
EXPECT_TRUE(NULL == oas);
if (oas)
oas->Close();
@@ -272,7 +257,7 @@ TEST(WinAudioTest, PCMWaveSlowSource) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
16000, 16, 256),
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
TestSourceLaggy test_laggy(2, 90);
EXPECT_TRUE(oas->Open());
@@ -301,7 +286,7 @@ TEST(WinAudioTest, PCMWaveStreamPlaySlowLoop) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
@@ -332,7 +317,7 @@ TEST(WinAudioTest, PCMWaveStreamPlay200HzTone44Kss) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
@@ -361,7 +346,7 @@ TEST(WinAudioTest, PCMWaveStreamPlay200HzTone22Kss) {
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate / 2, 16,
samples_100_ms),
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate/2);
@@ -401,7 +386,7 @@ TEST(WinAudioTest, PushSourceFile16KHz) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
kSampleRate, 16, kSamples100ms),
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
EXPECT_TRUE(oas->Open());
@@ -438,7 +423,7 @@ TEST(WinAudioTest, PCMWaveStreamPlayTwice200HzTone44Kss) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
@@ -485,7 +470,7 @@ TEST(WinAudioTest, PCMWaveStreamPlay200HzToneLowLatency) {
AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
CHANNEL_LAYOUT_MONO, sample_rate,
16, n * samples_10_ms),
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200, sample_rate);
@@ -519,10 +504,10 @@ TEST(WinAudioTest, PCMWaveStreamPendingBytes) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
- NiceMock<MockAudioSource> source;
+ NiceMock<MockAudioSourceCallback> source;
EXPECT_TRUE(oas->Open());
uint32 bytes_100_ms = samples_100_ms * 2;
@@ -537,18 +522,18 @@ TEST(WinAudioTest, PCMWaveStreamPendingBytes) {
EXPECT_CALL(source, OnMoreData(NotNull(),
Field(&AudioBuffersState::pending_bytes, 0)))
- .WillOnce(Invoke(MockAudioSource::ClearData));
+ .WillOnce(Invoke(ClearData));
// Note: If AudioManagerWin::NumberOfWaveOutBuffers() ever changes, or if this
// test is run on Vista, these expectations will fail.
EXPECT_CALL(source, OnMoreData(NotNull(),
Field(&AudioBuffersState::pending_bytes,
bytes_100_ms)))
- .WillOnce(Invoke(MockAudioSource::ClearData));
+ .WillOnce(Invoke(ClearData));
EXPECT_CALL(source, OnMoreData(NotNull(),
Field(&AudioBuffersState::pending_bytes,
2 * bytes_100_ms)))
- .WillOnce(Invoke(MockAudioSource::ClearData));
+ .WillOnce(Invoke(ClearData));
EXPECT_CALL(source, OnMoreData(NotNull(),
Field(&AudioBuffersState::pending_bytes,
2 * bytes_100_ms)))
@@ -606,7 +591,7 @@ class SyncSocketSource : public AudioOutputStream::AudioSourceCallback {
private:
base::SyncSocket* socket_;
int data_size_;
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> data_;
+ scoped_ptr<float, base::AlignedFreeDeleter> data_;
scoped_ptr<AudioBus> audio_bus_;
};
@@ -629,7 +614,7 @@ DWORD __stdcall SyncSocketThread(void* context) {
SyncThreadContext& ctx = *(reinterpret_cast<SyncThreadContext*>(context));
// Setup AudioBus wrapping data we'll pass over the sync socket.
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> data(static_cast<float*>(
+ scoped_ptr<float, base::AlignedFreeDeleter> data(static_cast<float*>(
base::AlignedAlloc(ctx.packet_size_bytes, AudioBus::kChannelAlignment)));
scoped_ptr<AudioBus> audio_bus = AudioBus::WrapMemory(
ctx.channels, ctx.frames, data.get());
@@ -673,7 +658,7 @@ TEST(WinAudioTest, SyncSocketBasic) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(params,
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
ASSERT_TRUE(oas->Open());
diff --git a/chromium/media/audio/win/audio_unified_win.cc b/chromium/media/audio/win/audio_unified_win.cc
deleted file mode 100644
index 901c8b897fa..00000000000
--- a/chromium/media/audio/win/audio_unified_win.cc
+++ /dev/null
@@ -1,984 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/win/audio_unified_win.h"
-
-#include <Functiondiscoverykeys_devpkey.h>
-
-#include "base/debug/trace_event.h"
-#ifndef NDEBUG
-#include "base/file_util.h"
-#include "base/path_service.h"
-#endif
-#include "base/time/time.h"
-#include "base/win/scoped_com_initializer.h"
-#include "media/audio/win/audio_manager_win.h"
-#include "media/audio/win/avrt_wrapper_win.h"
-#include "media/audio/win/core_audio_util_win.h"
-
-using base::win::ScopedComPtr;
-using base::win::ScopedCOMInitializer;
-using base::win::ScopedCoMem;
-
-// Smoothing factor in exponential smoothing filter where 0 < alpha < 1.
-// Larger values of alpha reduce the level of smoothing.
-// See http://en.wikipedia.org/wiki/Exponential_smoothing for details.
-static const double kAlpha = 0.1;
-
-// Compute a rate compensation which always attracts us back to a specified
-// target level over a period of |kCorrectionTimeSeconds|.
-static const double kCorrectionTimeSeconds = 0.1;
-
-#ifndef NDEBUG
-// Max number of columns in the output text file |kUnifiedAudioDebugFileName|.
-// See LogElementNames enumerator for details on what each column represents.
-static const size_t kMaxNumSampleTypes = 4;
-
-static const size_t kMaxNumParams = 2;
-
-// Max number of rows in the output file |kUnifiedAudioDebugFileName|.
-// Each row corresponds to one set of sample values for (approximately) the
-// same time instant (stored in the first column).
-static const size_t kMaxFileSamples = 10000;
-
-// Name of output debug file used for off-line analysis of measurements which
-// can be utilized for performance tuning of this class.
-static const char kUnifiedAudioDebugFileName[] = "unified_win_debug.txt";
-
-// Name of output debug file used for off-line analysis of measurements.
-// This file will contain a list of audio parameters.
-static const char kUnifiedAudioParamsFileName[] = "unified_win_params.txt";
-#endif
-
-// Use the acquired IAudioClock interface to derive a time stamp of the audio
-// sample which is currently playing through the speakers.
-static double SpeakerStreamPosInMilliseconds(IAudioClock* clock) {
- UINT64 device_frequency = 0, position = 0;
- if (FAILED(clock->GetFrequency(&device_frequency)) ||
- FAILED(clock->GetPosition(&position, NULL))) {
- return 0.0;
- }
- return base::Time::kMillisecondsPerSecond *
- (static_cast<double>(position) / device_frequency);
-}
-
-// Get a time stamp in milliseconds given number of audio frames in |num_frames|
-// using the current sample rate |fs| as scale factor.
-// Example: |num_frames| = 960 and |fs| = 48000 => 20 [ms].
-static double CurrentStreamPosInMilliseconds(UINT64 num_frames, DWORD fs) {
- return base::Time::kMillisecondsPerSecond *
- (static_cast<double>(num_frames) / fs);
-}
-
-// Convert a timestamp in milliseconds to byte units given the audio format
-// in |format|.
-// Example: |ts_milliseconds| equals 10, sample rate is 48000 and frame size
-// is 4 bytes per audio frame => 480 * 4 = 1920 [bytes].
-static int MillisecondsToBytes(double ts_milliseconds,
- const WAVEFORMATPCMEX& format) {
- double seconds = ts_milliseconds / base::Time::kMillisecondsPerSecond;
- return static_cast<int>(seconds * format.Format.nSamplesPerSec *
- format.Format.nBlockAlign + 0.5);
-}
-
-// Convert frame count to milliseconds given the audio format in |format|.
-static double FrameCountToMilliseconds(int num_frames,
- const WAVEFORMATPCMEX& format) {
- return (base::Time::kMillisecondsPerSecond * num_frames) /
- static_cast<double>(format.Format.nSamplesPerSec);
-}
-
-namespace media {
-
-WASAPIUnifiedStream::WASAPIUnifiedStream(AudioManagerWin* manager,
- const AudioParameters& params,
- const std::string& input_device_id)
- : creating_thread_id_(base::PlatformThread::CurrentId()),
- manager_(manager),
- params_(params),
- input_channels_(params.input_channels()),
- output_channels_(params.channels()),
- input_device_id_(input_device_id),
- share_mode_(CoreAudioUtil::GetShareMode()),
- opened_(false),
- volume_(1.0),
- output_buffer_size_frames_(0),
- input_buffer_size_frames_(0),
- endpoint_render_buffer_size_frames_(0),
- endpoint_capture_buffer_size_frames_(0),
- num_written_frames_(0),
- total_delay_ms_(0.0),
- total_delay_bytes_(0),
- source_(NULL),
- input_callback_received_(false),
- io_sample_rate_ratio_(1),
- target_fifo_frames_(0),
- average_delta_(0),
- fifo_rate_compensation_(1),
- update_output_delay_(false),
- capture_delay_ms_(0) {
- TRACE_EVENT0("audio", "WASAPIUnifiedStream::WASAPIUnifiedStream");
- VLOG(1) << "WASAPIUnifiedStream::WASAPIUnifiedStream()";
- DCHECK(manager_);
-
- VLOG(1) << "Input channels : " << input_channels_;
- VLOG(1) << "Output channels: " << output_channels_;
- VLOG(1) << "Sample rate : " << params_.sample_rate();
- VLOG(1) << "Buffer size : " << params.frames_per_buffer();
-
-#ifndef NDEBUG
- input_time_stamps_.reset(new int64[kMaxFileSamples]);
- num_frames_in_fifo_.reset(new int[kMaxFileSamples]);
- resampler_margin_.reset(new int[kMaxFileSamples]);
- fifo_rate_comps_.reset(new double[kMaxFileSamples]);
- num_elements_.reset(new int[kMaxNumSampleTypes]);
- std::fill(num_elements_.get(), num_elements_.get() + kMaxNumSampleTypes, 0);
- input_params_.reset(new int[kMaxNumParams]);
- output_params_.reset(new int[kMaxNumParams]);
-#endif
-
- DVLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE)
- << "Core Audio (WASAPI) EXCLUSIVE MODE is enabled.";
-
- // Load the Avrt DLL if not already loaded. Required to support MMCSS.
- bool avrt_init = avrt::Initialize();
- DCHECK(avrt_init) << "Failed to load the avrt.dll";
-
- // All events are auto-reset events and non-signaled initially.
-
- // Create the event which the audio engine will signal each time a buffer
- // has been recorded.
- capture_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
-
- // Create the event which will be set in Stop() when straeming shall stop.
- stop_streaming_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
-}
-
-WASAPIUnifiedStream::~WASAPIUnifiedStream() {
- VLOG(1) << "WASAPIUnifiedStream::~WASAPIUnifiedStream()";
-#ifndef NDEBUG
- base::FilePath data_file_name;
- PathService::Get(base::DIR_EXE, &data_file_name);
- data_file_name = data_file_name.AppendASCII(kUnifiedAudioDebugFileName);
- data_file_ = base::OpenFile(data_file_name, "wt");
- DVLOG(1) << ">> Output file " << data_file_name.value() << " is created.";
-
- size_t n = 0;
- size_t elements_to_write = *std::min_element(
- num_elements_.get(), num_elements_.get() + kMaxNumSampleTypes);
- while (n < elements_to_write) {
- fprintf(data_file_, "%I64d %d %d %10.9f\n",
- input_time_stamps_[n],
- num_frames_in_fifo_[n],
- resampler_margin_[n],
- fifo_rate_comps_[n]);
- ++n;
- }
- base::CloseFile(data_file_);
-
- base::FilePath param_file_name;
- PathService::Get(base::DIR_EXE, &param_file_name);
- param_file_name = param_file_name.AppendASCII(kUnifiedAudioParamsFileName);
- param_file_ = base::OpenFile(param_file_name, "wt");
- DVLOG(1) << ">> Output file " << param_file_name.value() << " is created.";
- fprintf(param_file_, "%d %d\n", input_params_[0], input_params_[1]);
- fprintf(param_file_, "%d %d\n", output_params_[0], output_params_[1]);
- base::CloseFile(param_file_);
-#endif
-}
-
-bool WASAPIUnifiedStream::Open() {
- TRACE_EVENT0("audio", "WASAPIUnifiedStream::Open");
- DVLOG(1) << "WASAPIUnifiedStream::Open()";
- DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
- if (opened_)
- return true;
-
- AudioParameters hw_output_params;
- HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters(
- eRender, eConsole, &hw_output_params);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to get preferred output audio parameters.";
- return false;
- }
-
- AudioParameters hw_input_params;
- if (input_device_id_ == AudioManagerBase::kDefaultDeviceId) {
- // Query native parameters for the default capture device.
- hr = CoreAudioUtil::GetPreferredAudioParameters(
- eCapture, eConsole, &hw_input_params);
- } else {
- // Query native parameters for the capture device given by
- // |input_device_id_|.
- hr = CoreAudioUtil::GetPreferredAudioParameters(
- input_device_id_, &hw_input_params);
- }
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to get preferred input audio parameters.";
- return false;
- }
-
- // It is currently only possible to open up the output audio device using
- // the native number of channels.
- if (output_channels_ != hw_output_params.channels()) {
- LOG(ERROR) << "Audio device does not support requested output channels.";
- return false;
- }
-
- // It is currently only possible to open up the input audio device using
- // the native number of channels. If the client asks for a higher channel
- // count, we will do channel upmixing in this class. The most typical
- // example is that the client provides stereo but the hardware can only be
- // opened in mono mode. We will do mono to stereo conversion in this case.
- if (input_channels_ < hw_input_params.channels()) {
- LOG(ERROR) << "Audio device does not support requested input channels.";
- return false;
- } else if (input_channels_ > hw_input_params.channels()) {
- ChannelLayout input_layout =
- GuessChannelLayout(hw_input_params.channels());
- ChannelLayout output_layout = GuessChannelLayout(input_channels_);
- channel_mixer_.reset(new ChannelMixer(input_layout, output_layout));
- DVLOG(1) << "Remixing input channel layout from " << input_layout
- << " to " << output_layout << "; from "
- << hw_input_params.channels() << " channels to "
- << input_channels_;
- }
-
- if (hw_output_params.sample_rate() != params_.sample_rate()) {
- LOG(ERROR) << "Requested sample-rate: " << params_.sample_rate()
- << " must match the hardware sample-rate: "
- << hw_output_params.sample_rate();
- return false;
- }
-
- if (hw_output_params.frames_per_buffer() != params_.frames_per_buffer()) {
- LOG(ERROR) << "Requested buffer size: " << params_.frames_per_buffer()
- << " must match the hardware buffer size: "
- << hw_output_params.frames_per_buffer();
- return false;
- }
-
- // Set up WAVEFORMATPCMEX structures for input and output given the specified
- // audio parameters.
- SetIOFormats(hw_input_params, params_);
-
- // Create the input and output busses.
- input_bus_ = AudioBus::Create(
- hw_input_params.channels(), input_buffer_size_frames_);
- output_bus_ = AudioBus::Create(params_);
-
- // One extra bus is needed for the input channel mixing case.
- if (channel_mixer_) {
- DCHECK_LT(hw_input_params.channels(), input_channels_);
- // The size of the |channel_bus_| must be the same as the size of the
- // output bus to ensure that the channel manager can deal with both
- // resampled and non-resampled data as input.
- channel_bus_ = AudioBus::Create(
- input_channels_, params_.frames_per_buffer());
- }
-
- // Check if FIFO and resampling is required to match the input rate to the
- // output rate. If so, a special thread loop, optimized for this case, will
- // be used. This mode is also called varispeed mode.
- // Note that we can also use this mode when input and output rates are the
- // same but native buffer sizes differ (can happen if two different audio
- // devices are used). For this case, the resampler uses a target ratio of
- // 1.0 but SetRatio is called to compensate for clock-drift. The FIFO is
- // required to compensate for the difference in buffer sizes.
- // TODO(henrika): we could perhaps improve the performance for the second
- // case here by only using the FIFO and avoid resampling. Not sure how much
- // that would give and we risk not compensation for clock drift.
- if (hw_input_params.sample_rate() != params_.sample_rate() ||
- hw_input_params.frames_per_buffer() != params_.frames_per_buffer()) {
- DoVarispeedInitialization(hw_input_params, params_);
- }
-
- // Render side (event driven only in varispeed mode):
-
- ScopedComPtr<IAudioClient> audio_output_client =
- CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
- if (!audio_output_client)
- return false;
-
- if (!CoreAudioUtil::IsFormatSupported(audio_output_client,
- share_mode_,
- &output_format_)) {
- return false;
- }
-
- if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
- // The |render_event_| will be NULL unless varispeed mode is utilized.
- hr = CoreAudioUtil::SharedModeInitialize(
- audio_output_client, &output_format_, render_event_.Get(),
- &endpoint_render_buffer_size_frames_);
- } else {
- // TODO(henrika): add support for AUDCLNT_SHAREMODE_EXCLUSIVE.
- }
- if (FAILED(hr))
- return false;
-
- ScopedComPtr<IAudioRenderClient> audio_render_client =
- CoreAudioUtil::CreateRenderClient(audio_output_client);
- if (!audio_render_client)
- return false;
-
- // Capture side (always event driven but format depends on varispeed or not):
-
- ScopedComPtr<IAudioClient> audio_input_client;
- if (input_device_id_ == AudioManagerBase::kDefaultDeviceId) {
- audio_input_client = CoreAudioUtil::CreateDefaultClient(eCapture, eConsole);
- } else {
- ScopedComPtr<IMMDevice> audio_input_device(
- CoreAudioUtil::CreateDevice(input_device_id_));
- audio_input_client = CoreAudioUtil::CreateClient(audio_input_device);
- }
- if (!audio_input_client)
- return false;
-
- if (!CoreAudioUtil::IsFormatSupported(audio_input_client,
- share_mode_,
- &input_format_)) {
- return false;
- }
-
- if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
- // Include valid event handle for event-driven initialization.
- // The input side is always event driven independent of if varispeed is
- // used or not.
- hr = CoreAudioUtil::SharedModeInitialize(
- audio_input_client, &input_format_, capture_event_.Get(),
- &endpoint_capture_buffer_size_frames_);
- } else {
- // TODO(henrika): add support for AUDCLNT_SHAREMODE_EXCLUSIVE.
- }
- if (FAILED(hr))
- return false;
-
- ScopedComPtr<IAudioCaptureClient> audio_capture_client =
- CoreAudioUtil::CreateCaptureClient(audio_input_client);
- if (!audio_capture_client)
- return false;
-
- // Varispeed mode requires additional preparations.
- if (VarispeedMode())
- ResetVarispeed();
-
- // Store all valid COM interfaces.
- audio_output_client_ = audio_output_client;
- audio_render_client_ = audio_render_client;
- audio_input_client_ = audio_input_client;
- audio_capture_client_ = audio_capture_client;
-
- opened_ = true;
- return SUCCEEDED(hr);
-}
-
-void WASAPIUnifiedStream::Start(AudioSourceCallback* callback) {
- TRACE_EVENT0("audio", "WASAPIUnifiedStream::Start");
- DVLOG(1) << "WASAPIUnifiedStream::Start()";
- DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
- CHECK(callback);
- CHECK(opened_);
-
- if (audio_io_thread_) {
- CHECK_EQ(callback, source_);
- return;
- }
-
- source_ = callback;
-
- if (VarispeedMode()) {
- ResetVarispeed();
- fifo_rate_compensation_ = 1.0;
- average_delta_ = 0.0;
- input_callback_received_ = false;
- update_output_delay_ = false;
- }
-
- // Create and start the thread that will listen for capture events.
- // We will also listen on render events on the same thread if varispeed
- // mode is utilized.
- audio_io_thread_.reset(
- new base::DelegateSimpleThread(this, "wasapi_io_thread"));
- audio_io_thread_->Start();
- if (!audio_io_thread_->HasBeenStarted()) {
- DLOG(ERROR) << "Failed to start WASAPI IO thread.";
- return;
- }
-
- // Start input streaming data between the endpoint buffer and the audio
- // engine.
- HRESULT hr = audio_input_client_->Start();
- if (FAILED(hr)) {
- StopAndJoinThread(hr);
- return;
- }
-
- // Ensure that the endpoint buffer is prepared with silence.
- if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
- if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence(
- audio_output_client_, audio_render_client_)) {
- DLOG(WARNING) << "Failed to prepare endpoint buffers with silence.";
- return;
- }
- }
- num_written_frames_ = endpoint_render_buffer_size_frames_;
-
- // Start output streaming data between the endpoint buffer and the audio
- // engine.
- hr = audio_output_client_->Start();
- if (FAILED(hr)) {
- StopAndJoinThread(hr);
- return;
- }
-}
-
-void WASAPIUnifiedStream::Stop() {
- TRACE_EVENT0("audio", "WASAPIUnifiedStream::Stop");
- DVLOG(1) << "WASAPIUnifiedStream::Stop()";
- DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
- if (!audio_io_thread_)
- return;
-
- // Stop input audio streaming.
- HRESULT hr = audio_input_client_->Stop();
- if (FAILED(hr)) {
- DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
- << "Failed to stop input streaming: " << std::hex << hr;
- }
-
- // Stop output audio streaming.
- hr = audio_output_client_->Stop();
- if (FAILED(hr)) {
- DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
- << "Failed to stop output streaming: " << std::hex << hr;
- }
-
- // Wait until the thread completes and perform cleanup.
- SetEvent(stop_streaming_event_.Get());
- audio_io_thread_->Join();
- audio_io_thread_.reset();
-
- // Ensure that we don't quit the main thread loop immediately next
- // time Start() is called.
- ResetEvent(stop_streaming_event_.Get());
-
- // Clear source callback, it'll be set again on the next Start() call.
- source_ = NULL;
-
- // Flush all pending data and reset the audio clock stream position to 0.
- hr = audio_output_client_->Reset();
- if (FAILED(hr)) {
- DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
- << "Failed to reset output streaming: " << std::hex << hr;
- }
-
- audio_input_client_->Reset();
- if (FAILED(hr)) {
- DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
- << "Failed to reset input streaming: " << std::hex << hr;
- }
-
- // Extra safety check to ensure that the buffers are cleared.
- // If the buffers are not cleared correctly, the next call to Start()
- // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer().
- // TODO(henrika): this check is is only needed for shared-mode streams.
- UINT32 num_queued_frames = 0;
- audio_output_client_->GetCurrentPadding(&num_queued_frames);
- DCHECK_EQ(0u, num_queued_frames);
-}
-
-void WASAPIUnifiedStream::Close() {
- TRACE_EVENT0("audio", "WASAPIUnifiedStream::Close");
- DVLOG(1) << "WASAPIUnifiedStream::Close()";
- DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
-
- // It is valid to call Close() before calling open or Start().
- // It is also valid to call Close() after Start() has been called.
- Stop();
-
- // Inform the audio manager that we have been closed. This will cause our
- // destruction.
- manager_->ReleaseOutputStream(this);
-}
-
-void WASAPIUnifiedStream::SetVolume(double volume) {
- DVLOG(1) << "SetVolume(volume=" << volume << ")";
- if (volume < 0 || volume > 1)
- return;
- volume_ = volume;
-}
-
-void WASAPIUnifiedStream::GetVolume(double* volume) {
- DVLOG(1) << "GetVolume()";
- *volume = static_cast<double>(volume_);
-}
-
-
-void WASAPIUnifiedStream::ProvideInput(int frame_delay, AudioBus* audio_bus) {
- // TODO(henrika): utilize frame_delay?
- // A non-zero framed delay means multiple callbacks were necessary to
- // fulfill the requested number of frames.
- if (frame_delay > 0)
- DVLOG(3) << "frame_delay: " << frame_delay;
-
-#ifndef NDEBUG
- resampler_margin_[num_elements_[RESAMPLER_MARGIN]] =
- fifo_->frames() - audio_bus->frames();
- num_elements_[RESAMPLER_MARGIN]++;
-#endif
-
- if (fifo_->frames() < audio_bus->frames()) {
- DVLOG(ERROR) << "Not enough data in the FIFO ("
- << fifo_->frames() << " < " << audio_bus->frames() << ")";
- audio_bus->Zero();
- return;
- }
-
- fifo_->Consume(audio_bus, 0, audio_bus->frames());
-}
-
-void WASAPIUnifiedStream::SetIOFormats(const AudioParameters& input_params,
- const AudioParameters& output_params) {
- for (int n = 0; n < 2; ++n) {
- const AudioParameters& params = (n == 0) ? input_params : output_params;
- WAVEFORMATPCMEX* xformat = (n == 0) ? &input_format_ : &output_format_;
- WAVEFORMATEX* format = &xformat->Format;
-
- // Begin with the WAVEFORMATEX structure that specifies the basic format.
- format->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
- format->nChannels = params.channels();
- format->nSamplesPerSec = params.sample_rate();
- format->wBitsPerSample = params.bits_per_sample();
- format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels;
- format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign;
- format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
-
- // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE.
- // Note that we always open up using the native channel layout.
- (*xformat).Samples.wValidBitsPerSample = format->wBitsPerSample;
- (*xformat).dwChannelMask =
- CoreAudioUtil::GetChannelConfig(
- std::string(), n == 0 ? eCapture : eRender);
- (*xformat).SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
- }
-
- input_buffer_size_frames_ = input_params.frames_per_buffer();
- output_buffer_size_frames_ = output_params.frames_per_buffer();
- VLOG(1) << "#audio frames per input buffer : " << input_buffer_size_frames_;
- VLOG(1) << "#audio frames per output buffer: " << output_buffer_size_frames_;
-
-#ifndef NDEBUG
- input_params_[0] = input_format_.Format.nSamplesPerSec;
- input_params_[1] = input_buffer_size_frames_;
- output_params_[0] = output_format_.Format.nSamplesPerSec;
- output_params_[1] = output_buffer_size_frames_;
-#endif
-}
-
-void WASAPIUnifiedStream::DoVarispeedInitialization(
- const AudioParameters& input_params, const AudioParameters& output_params) {
- DVLOG(1) << "WASAPIUnifiedStream::DoVarispeedInitialization()";
-
- // A FIFO is required in this mode for input to output buffering.
- // Note that it will add some latency.
- fifo_.reset(new AudioFifo(input_params.channels(), kFifoSize));
- VLOG(1) << "Using FIFO of size " << fifo_->max_frames()
- << " (#channels=" << input_params.channels() << ")";
-
- // Create the multi channel resampler using the initial sample rate ratio.
- // We will call MultiChannelResampler::SetRatio() during runtime to
- // allow arbitrary combinations of input and output devices running off
- // different clocks and using different drivers, with potentially
- // differing sample-rates. Note that the requested block size is given by
- // the native input buffer size |input_buffer_size_frames_|.
- io_sample_rate_ratio_ = input_params.sample_rate() /
- static_cast<double>(output_params.sample_rate());
- DVLOG(2) << "io_sample_rate_ratio: " << io_sample_rate_ratio_;
- resampler_.reset(new MultiChannelResampler(
- input_params.channels(), io_sample_rate_ratio_, input_buffer_size_frames_,
- base::Bind(&WASAPIUnifiedStream::ProvideInput, base::Unretained(this))));
- VLOG(1) << "Resampling from " << input_params.sample_rate() << " to "
- << output_params.sample_rate();
-
- // The optimal number of frames we'd like to keep in the FIFO at all times.
- // The actual size will vary but the goal is to ensure that the average size
- // is given by this value.
- target_fifo_frames_ = kTargetFifoSafetyFactor * input_buffer_size_frames_;
- VLOG(1) << "Target FIFO size: " << target_fifo_frames_;
-
- // Create the event which the audio engine will signal each time it
- // wants an audio buffer to render.
- render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
-
- // Allocate memory for temporary audio bus used to store resampled input
- // audio.
- resampled_bus_ = AudioBus::Create(
- input_params.channels(), output_buffer_size_frames_);
-
- // Buffer initial silence corresponding to target I/O buffering.
- ResetVarispeed();
-}
-
-void WASAPIUnifiedStream::ResetVarispeed() {
- DCHECK(VarispeedMode());
-
- // Buffer initial silence corresponding to target I/O buffering.
- fifo_->Clear();
- scoped_ptr<AudioBus> silence =
- AudioBus::Create(input_format_.Format.nChannels,
- target_fifo_frames_);
- silence->Zero();
- fifo_->Push(silence.get());
- resampler_->Flush();
-}
-
-void WASAPIUnifiedStream::Run() {
- ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
-
- // Increase the thread priority.
- audio_io_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
-
- // Enable MMCSS to ensure that this thread receives prioritized access to
- // CPU resources.
- // TODO(henrika): investigate if it is possible to include these additional
- // settings in SetThreadPriority() as well.
- DWORD task_index = 0;
- HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio",
- &task_index);
- bool mmcss_is_ok =
- (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL));
- if (!mmcss_is_ok) {
- // Failed to enable MMCSS on this thread. It is not fatal but can lead
- // to reduced QoS at high load.
- DWORD err = GetLastError();
- LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ").";
- }
-
- // The IAudioClock interface enables us to monitor a stream's data
- // rate and the current position in the stream. Allocate it before we
- // start spinning.
- ScopedComPtr<IAudioClock> audio_output_clock;
- HRESULT hr = audio_output_client_->GetService(
- __uuidof(IAudioClock), audio_output_clock.ReceiveVoid());
- LOG_IF(WARNING, FAILED(hr)) << "Failed to create IAudioClock: "
- << std::hex << hr;
-
- bool streaming = true;
- bool error = false;
-
- HANDLE wait_array[3];
- size_t num_handles = 0;
- wait_array[num_handles++] = stop_streaming_event_;
- wait_array[num_handles++] = capture_event_;
- if (render_event_) {
- // One extra event handle is needed in varispeed mode.
- wait_array[num_handles++] = render_event_;
- }
-
- // Keep streaming audio until stop event is signaled.
- // Capture events are always used but render events are only active in
- // varispeed mode.
- while (streaming && !error) {
- // Wait for a close-down event, or a new capture event.
- DWORD wait_result = WaitForMultipleObjects(num_handles,
- wait_array,
- FALSE,
- INFINITE);
- switch (wait_result) {
- case WAIT_OBJECT_0 + 0:
- // |stop_streaming_event_| has been set.
- streaming = false;
- break;
- case WAIT_OBJECT_0 + 1:
- // |capture_event_| has been set
- if (VarispeedMode()) {
- ProcessInputAudio();
- } else {
- ProcessInputAudio();
- ProcessOutputAudio(audio_output_clock);
- }
- break;
- case WAIT_OBJECT_0 + 2:
- DCHECK(VarispeedMode());
- // |render_event_| has been set
- ProcessOutputAudio(audio_output_clock);
- break;
- default:
- error = true;
- break;
- }
- }
-
- if (streaming && error) {
- // Stop audio streaming since something has gone wrong in our main thread
- // loop. Note that, we are still in a "started" state, hence a Stop() call
- // is required to join the thread properly.
- audio_input_client_->Stop();
- audio_output_client_->Stop();
- PLOG(ERROR) << "WASAPI streaming failed.";
- }
-
- // Disable MMCSS.
- if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) {
- PLOG(WARNING) << "Failed to disable MMCSS";
- }
-}
-
-void WASAPIUnifiedStream::ProcessInputAudio() {
- TRACE_EVENT0("audio", "WASAPIUnifiedStream::ProcessInputAudio");
-
- BYTE* data_ptr = NULL;
- UINT32 num_captured_frames = 0;
- DWORD flags = 0;
- UINT64 device_position = 0;
- UINT64 capture_time_stamp = 0;
-
- const int bytes_per_sample = input_format_.Format.wBitsPerSample >> 3;
-
- base::TimeTicks now_tick = base::TimeTicks::HighResNow();
-
-#ifndef NDEBUG
- if (VarispeedMode()) {
- input_time_stamps_[num_elements_[INPUT_TIME_STAMP]] =
- now_tick.ToInternalValue();
- num_elements_[INPUT_TIME_STAMP]++;
- }
-#endif
-
- // Retrieve the amount of data in the capture endpoint buffer.
- // |endpoint_capture_time_stamp| is the value of the performance
- // counter at the time that the audio endpoint device recorded
- // the device position of the first audio frame in the data packet.
- HRESULT hr = audio_capture_client_->GetBuffer(&data_ptr,
- &num_captured_frames,
- &flags,
- &device_position,
- &capture_time_stamp);
- if (FAILED(hr)) {
- DLOG(ERROR) << "Failed to get data from the capture buffer";
- return;
- }
-
- if (hr == AUDCLNT_S_BUFFER_EMPTY) {
- // The return coded is a success code but a new packet is *not* available
- // and none of the output parameters in the GetBuffer() call contains valid
- // values. Best we can do is to deliver silence and avoid setting
- // |input_callback_received_| since this only seems to happen for the
- // initial event(s) on some devices.
- input_bus_->Zero();
- } else {
- // Valid data has been recorded and it is now OK to set the flag which
- // informs the render side that capturing has started.
- input_callback_received_ = true;
- }
-
- if (num_captured_frames != 0) {
- if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
- // Clear out the capture buffer since silence is reported.
- input_bus_->Zero();
- } else {
- // Store captured data in an audio bus after de-interleaving
- // the data to match the audio bus structure.
- input_bus_->FromInterleaved(
- data_ptr, num_captured_frames, bytes_per_sample);
- }
- }
-
- hr = audio_capture_client_->ReleaseBuffer(num_captured_frames);
- DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer";
-
- // Buffer input into FIFO if varispeed mode is used. The render event
- // will drive resampling of this data to match the output side.
- if (VarispeedMode()) {
- int available_frames = fifo_->max_frames() - fifo_->frames();
- if (input_bus_->frames() <= available_frames) {
- fifo_->Push(input_bus_.get());
- }
-#ifndef NDEBUG
- num_frames_in_fifo_[num_elements_[NUM_FRAMES_IN_FIFO]] =
- fifo_->frames();
- num_elements_[NUM_FRAMES_IN_FIFO]++;
-#endif
- }
-
- // Save resource by not asking for new delay estimates each time.
- // These estimates are fairly stable and it is perfectly safe to only
- // sample at a rate of ~1Hz.
- // TODO(henrika): we might have to increase the update rate in varispeed
- // mode since the delay variations are higher in this mode.
- if ((now_tick - last_delay_sample_time_).InMilliseconds() >
- kTimeDiffInMillisecondsBetweenDelayMeasurements &&
- input_callback_received_) {
- // Calculate the estimated capture delay, i.e., the latency between
- // the recording time and the time we when we are notified about
- // the recorded data. Note that the capture time stamp is given in
- // 100-nanosecond (0.1 microseconds) units.
- base::TimeDelta diff =
- now_tick - base::TimeTicks::FromInternalValue(0.1 * capture_time_stamp);
- capture_delay_ms_ = diff.InMillisecondsF();
-
- last_delay_sample_time_ = now_tick;
- update_output_delay_ = true;
- }
-}
-
-void WASAPIUnifiedStream::ProcessOutputAudio(IAudioClock* audio_output_clock) {
- TRACE_EVENT0("audio", "WASAPIUnifiedStream::ProcessOutputAudio");
-
- if (!input_callback_received_) {
- if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
- if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence(
- audio_output_client_, audio_render_client_))
- DLOG(WARNING) << "Failed to prepare endpoint buffers with silence.";
- }
- return;
- }
-
- // Rate adjusted resampling is required in varispeed mode. It means that
- // recorded audio samples will be read from the FIFO, resampled to match the
- // output sample-rate and then stored in |resampled_bus_|.
- if (VarispeedMode()) {
- // Calculate a varispeed rate scalar factor to compensate for drift between
- // input and output. We use the actual number of frames still in the FIFO
- // compared with the ideal value of |target_fifo_frames_|.
- int delta = fifo_->frames() - target_fifo_frames_;
-
- // Average |delta| because it can jitter back/forth quite frequently
- // by +/- the hardware buffer-size *if* the input and output callbacks are
- // happening at almost exactly the same time. Also, if the input and output
- // sample-rates are different then |delta| will jitter quite a bit due to
- // the rate conversion happening in the varispeed, plus the jittering of
- // the callbacks. The average value is what's important here.
- // We use an exponential smoothing filter to reduce the variations.
- average_delta_ += kAlpha * (delta - average_delta_);
-
- // Compute a rate compensation which always attracts us back to the
- // |target_fifo_frames_| over a period of kCorrectionTimeSeconds.
- double correction_time_frames =
- kCorrectionTimeSeconds * output_format_.Format.nSamplesPerSec;
- fifo_rate_compensation_ =
- (correction_time_frames + average_delta_) / correction_time_frames;
-
-#ifndef NDEBUG
- fifo_rate_comps_[num_elements_[RATE_COMPENSATION]] =
- fifo_rate_compensation_;
- num_elements_[RATE_COMPENSATION]++;
-#endif
-
- // Adjust for FIFO drift.
- const double new_ratio = io_sample_rate_ratio_ * fifo_rate_compensation_;
- resampler_->SetRatio(new_ratio);
- // Get resampled input audio from FIFO where the size is given by the
- // output side.
- resampler_->Resample(resampled_bus_->frames(), resampled_bus_.get());
- }
-
- // Derive a new total delay estimate if the capture side has set the
- // |update_output_delay_| flag.
- if (update_output_delay_) {
- // Calculate the estimated render delay, i.e., the time difference
- // between the time when data is added to the endpoint buffer and
- // when the data is played out on the actual speaker.
- const double stream_pos = CurrentStreamPosInMilliseconds(
- num_written_frames_ + output_buffer_size_frames_,
- output_format_.Format.nSamplesPerSec);
- const double speaker_pos =
- SpeakerStreamPosInMilliseconds(audio_output_clock);
- const double render_delay_ms = stream_pos - speaker_pos;
- const double fifo_delay_ms = VarispeedMode() ?
- FrameCountToMilliseconds(target_fifo_frames_, input_format_) : 0;
-
- // Derive the total delay, i.e., the sum of the input and output
- // delays. Also convert the value into byte units. An extra FIFO delay
- // is added for varispeed usage cases.
- total_delay_ms_ = VarispeedMode() ?
- capture_delay_ms_ + render_delay_ms + fifo_delay_ms :
- capture_delay_ms_ + render_delay_ms;
- DVLOG(2) << "total_delay_ms : " << total_delay_ms_;
- DVLOG(3) << " capture_delay_ms: " << capture_delay_ms_;
- DVLOG(3) << " render_delay_ms : " << render_delay_ms;
- DVLOG(3) << " fifo_delay_ms : " << fifo_delay_ms;
- total_delay_bytes_ = MillisecondsToBytes(total_delay_ms_, output_format_);
-
- // Wait for new signal from the capture side.
- update_output_delay_ = false;
- }
-
- // Select source depending on if varispeed is utilized or not.
- // Also, the source might be the output of a channel mixer if channel mixing
- // is required to match the native input channels to the number of input
- // channels used by the client (given by |input_channels_| in this case).
- AudioBus* input_bus = VarispeedMode() ?
- resampled_bus_.get() : input_bus_.get();
- if (channel_mixer_) {
- DCHECK_EQ(input_bus->frames(), channel_bus_->frames());
- // Most common case is 1->2 channel upmixing.
- channel_mixer_->Transform(input_bus, channel_bus_.get());
- // Use the output from the channel mixer as new input bus.
- input_bus = channel_bus_.get();
- }
-
- // Prepare for rendering by calling OnMoreIOData().
- int frames_filled = source_->OnMoreIOData(
- input_bus,
- output_bus_.get(),
- AudioBuffersState(0, total_delay_bytes_));
- DCHECK_EQ(frames_filled, output_bus_->frames());
-
- // Keep track of number of rendered frames since we need it for
- // our delay calculations.
- num_written_frames_ += frames_filled;
-
- // Derive the the amount of available space in the endpoint buffer.
- // Avoid render attempt if there is no room for a captured packet.
- UINT32 num_queued_frames = 0;
- audio_output_client_->GetCurrentPadding(&num_queued_frames);
- if (endpoint_render_buffer_size_frames_ - num_queued_frames <
- output_buffer_size_frames_)
- return;
-
- // Grab all available space in the rendering endpoint buffer
- // into which the client can write a data packet.
- uint8* audio_data = NULL;
- HRESULT hr = audio_render_client_->GetBuffer(output_buffer_size_frames_,
- &audio_data);
- if (FAILED(hr)) {
- DLOG(ERROR) << "Failed to access render buffer";
- return;
- }
-
- const int bytes_per_sample = output_format_.Format.wBitsPerSample >> 3;
-
- // Convert the audio bus content to interleaved integer data using
- // |audio_data| as destination.
- output_bus_->Scale(volume_);
- output_bus_->ToInterleaved(
- output_buffer_size_frames_, bytes_per_sample, audio_data);
-
- // Release the buffer space acquired in the GetBuffer() call.
- audio_render_client_->ReleaseBuffer(output_buffer_size_frames_, 0);
- DLOG_IF(ERROR, FAILED(hr)) << "Failed to release render buffer";
-
- return;
-}
-
-void WASAPIUnifiedStream::HandleError(HRESULT err) {
- CHECK((started() && GetCurrentThreadId() == audio_io_thread_->tid()) ||
- (!started() && GetCurrentThreadId() == creating_thread_id_));
- NOTREACHED() << "Error code: " << std::hex << err;
- if (source_)
- source_->OnError(this);
-}
-
-void WASAPIUnifiedStream::StopAndJoinThread(HRESULT err) {
- CHECK(GetCurrentThreadId() == creating_thread_id_);
- DCHECK(audio_io_thread_.get());
- SetEvent(stop_streaming_event_.Get());
- audio_io_thread_->Join();
- audio_io_thread_.reset();
- HandleError(err);
-}
-
-} // namespace media
diff --git a/chromium/media/audio/win/audio_unified_win.h b/chromium/media/audio/win/audio_unified_win.h
deleted file mode 100644
index 76c53297b51..00000000000
--- a/chromium/media/audio/win/audio_unified_win.h
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_WIN_AUDIO_UNIFIED_WIN_H_
-#define MEDIA_AUDIO_WIN_AUDIO_UNIFIED_WIN_H_
-
-#include <Audioclient.h>
-#include <MMDeviceAPI.h>
-
-#include <string>
-
-#include "base/compiler_specific.h"
-#include "base/gtest_prod_util.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/simple_thread.h"
-#include "base/win/scoped_co_mem.h"
-#include "base/win/scoped_comptr.h"
-#include "base/win/scoped_handle.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/audio_fifo.h"
-#include "media/base/channel_mixer.h"
-#include "media/base/media_export.h"
-#include "media/base/multi_channel_resampler.h"
-
-namespace media {
-
-class AudioManagerWin;
-
-// Implementation of AudioOutputStream for Windows using the Core Audio API
-// where both capturing and rendering takes place on the same thread to enable
-// audio I/O. This class allows arbitrary combinations of input and output
-// devices running off different clocks and using different drivers, with
-// potentially differing sample-rates.
-//
-// It is required to first acquire the native sample rate of the selected
-// output device and then use the same rate when creating this object.
-// The inner operation depends on the input sample rate which is determined
-// during construction. Three different main modes are supported:
-//
-// 1) input rate == output rate => input side drives output side directly.
-// 2) input rate != output rate => both sides are driven independently by
-// events and a FIFO plus a resampling unit is used to compensate for
-// differences in sample rates between the two sides.
-// 3) input rate == output rate but native buffer sizes are not identical =>
-// same inner functionality as in (2) to compensate for the differences
-// in buffer sizes and also compensate for any potential clock drift
-// between the two devices.
-//
-// Mode detection is is done at construction and using mode (1) will lead to
-// best performance (lower delay and no "varispeed distortion"), i.e., it is
-// recommended to use same sample rates for input and output. Mode (2) uses a
-// resampler which supports rate adjustments to fine tune for things like
-// clock drift and differences in sample rates between different devices.
-// Mode (2) - which uses a FIFO and a adjustable multi-channel resampler -
-// is also called the varispeed mode and it is used for case (3) as well to
-// compensate for the difference in buffer sizes mainly.
-// Mode (3) can happen if two different audio devices are used.
-// As an example: some devices needs a buffer size of 441 @ 44.1kHz and others
-// 448 @ 44.1kHz. This is a rare case and will only happen for sample rates
-// which are even multiples of 11025 Hz (11025, 22050, 44100, 88200 etc.).
-//
-// Implementation notes:
-//
-// - Open() can fail if the input and output parameters do not fulfill
-// certain conditions. See source for Open() for more details.
-// - Channel mixing will be performed if the clients asks for a larger
-// number of channels than the native audio layer provides.
-// Example: client wants stereo but audio layer provides mono. In this case
-// upmixing from mono to stereo (1->2) will be done.
-//
-// TODO(henrika):
-//
-// - Add support for exclusive mode.
-// - Add support for KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, i.e., 32-bit float
-// as internal sample-value representation.
-// - Perform fine-tuning for non-matching sample rates to reduce latency.
-//
-class MEDIA_EXPORT WASAPIUnifiedStream
- : public AudioOutputStream,
- public base::DelegateSimpleThread::Delegate {
- public:
- // The ctor takes all the usual parameters, plus |manager| which is the
- // the audio manager who is creating this object.
- WASAPIUnifiedStream(AudioManagerWin* manager,
- const AudioParameters& params,
- const std::string& input_device_id);
-
- // The dtor is typically called by the AudioManager only and it is usually
- // triggered by calling AudioOutputStream::Close().
- virtual ~WASAPIUnifiedStream();
-
- // Implementation of AudioOutputStream.
- virtual bool Open() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
-
- bool started() const {
- return audio_io_thread_.get() != NULL;
- }
-
- // Returns true if input sample rate differs from the output sample rate.
- // A FIFO and a adjustable multi-channel resampler are utilized in this mode.
- bool VarispeedMode() const { return (fifo_ && resampler_); }
-
- private:
- enum {
- // Time in milliseconds between two successive delay measurements.
- // We save resources by not updating the delay estimates for each capture
- // event (typically 100Hz rate).
- kTimeDiffInMillisecondsBetweenDelayMeasurements = 1000,
-
- // Max possible FIFO size.
- kFifoSize = 16384,
-
- // This value was determined empirically for minimum latency while still
- // guarding against FIFO under-runs. The actual target size will be equal
- // to kTargetFifoSafetyFactor * (native input buffer size).
- // TODO(henrika): tune this value for lowest possible latency for all
- // possible sample rate combinations.
- kTargetFifoSafetyFactor = 2
- };
-
- // Additional initialization required when input and output sample rate
- // differs. Allocates resources for |fifo_|, |resampler_|, |render_event_|,
- // and the |capture_bus_| and configures the |input_format_| structure
- // given the provided input and output audio parameters.
- void DoVarispeedInitialization(const AudioParameters& input_params,
- const AudioParameters& output_params);
-
- // Clears varispeed related components such as the FIFO and the resampler.
- void ResetVarispeed();
-
- // Builds WAVEFORMATEX structures for input and output based on input and
- // output audio parameters.
- void SetIOFormats(const AudioParameters& input_params,
- const AudioParameters& output_params);
-
- // DelegateSimpleThread::Delegate implementation.
- virtual void Run() OVERRIDE;
-
- // MultiChannelResampler::MultiChannelAudioSourceProvider implementation.
- // Callback for providing more data into the resampler.
- // Only used in varispeed mode, i.e., when input rate != output rate.
- virtual void ProvideInput(int frame_delay, AudioBus* audio_bus);
-
- // Issues the OnError() callback to the |source_|.
- void HandleError(HRESULT err);
-
- // Stops and joins the audio thread in case of an error.
- void StopAndJoinThread(HRESULT err);
-
- // Converts unique endpoint ID to user-friendly device name.
- std::string GetDeviceName(LPCWSTR device_id) const;
-
- // Called on the audio IO thread for each capture event.
- // Buffers captured audio into a FIFO if varispeed is used or into an audio
- // bus if input and output sample rates are identical.
- void ProcessInputAudio();
-
- // Called on the audio IO thread for each render event when varispeed is
- // active or for each capture event when varispeed is not used.
- // In varispeed mode, it triggers a resampling callback, which reads from the
- // FIFO, and calls AudioSourceCallback::OnMoreIOData using the resampled
- // input signal and at the same time asks for data to play out.
- // If input and output rates are the same - instead of reading from the FIFO
- // and do resampling - we read directly from the audio bus used to store
- // captured data in ProcessInputAudio.
- void ProcessOutputAudio(IAudioClock* audio_output_clock);
-
- // Contains the thread ID of the creating thread.
- base::PlatformThreadId creating_thread_id_;
-
- // Our creator, the audio manager needs to be notified when we close.
- AudioManagerWin* manager_;
-
- // Contains the audio parameter structure provided at construction.
- AudioParameters params_;
- // For convenience, same as in params_.
- int input_channels_;
- int output_channels_;
-
- // Unique ID of the input device to be opened.
- const std::string input_device_id_;
-
- // The sharing mode for the streams.
- // Valid values are AUDCLNT_SHAREMODE_SHARED and AUDCLNT_SHAREMODE_EXCLUSIVE
- // where AUDCLNT_SHAREMODE_SHARED is the default.
- AUDCLNT_SHAREMODE share_mode_;
-
- // Rendering and capturing is driven by this thread (no message loop).
- // All OnMoreIOData() callbacks will be called from this thread.
- scoped_ptr<base::DelegateSimpleThread> audio_io_thread_;
-
- // Contains the desired audio output format which is set up at construction.
- // It is required to first acquire the native sample rate of the selected
- // output device and then use the same rate when creating this object.
- WAVEFORMATPCMEX output_format_;
-
- // Contains the native audio input format which is set up at construction
- // if varispeed mode is utilized.
- WAVEFORMATPCMEX input_format_;
-
- // True when successfully opened.
- bool opened_;
-
- // Volume level from 0 to 1 used for output scaling.
- double volume_;
-
- // Size in audio frames of each audio packet where an audio packet
- // is defined as the block of data which the destination is expected to
- // receive in each OnMoreIOData() callback.
- size_t output_buffer_size_frames_;
-
- // Size in audio frames of each audio packet where an audio packet
- // is defined as the block of data which the source is expected to
- // deliver in each OnMoreIOData() callback.
- size_t input_buffer_size_frames_;
-
- // Length of the audio endpoint buffer.
- uint32 endpoint_render_buffer_size_frames_;
- uint32 endpoint_capture_buffer_size_frames_;
-
- // Counts the number of audio frames written to the endpoint buffer.
- uint64 num_written_frames_;
-
- // Time stamp for last delay measurement.
- base::TimeTicks last_delay_sample_time_;
-
- // Contains the total (sum of render and capture) delay in milliseconds.
- double total_delay_ms_;
-
- // Contains the total (sum of render and capture and possibly FIFO) delay
- // in bytes. The update frequency is set by a constant called
- // |kTimeDiffInMillisecondsBetweenDelayMeasurements|.
- int total_delay_bytes_;
-
- // Pointer to the client that will deliver audio samples to be played out.
- AudioSourceCallback* source_;
-
- // IMMDevice interfaces which represents audio endpoint devices.
- base::win::ScopedComPtr<IMMDevice> endpoint_render_device_;
- base::win::ScopedComPtr<IMMDevice> endpoint_capture_device_;
-
- // IAudioClient interfaces which enables a client to create and initialize
- // an audio stream between an audio application and the audio engine.
- base::win::ScopedComPtr<IAudioClient> audio_output_client_;
- base::win::ScopedComPtr<IAudioClient> audio_input_client_;
-
- // IAudioRenderClient interfaces enables a client to write output
- // data to a rendering endpoint buffer.
- base::win::ScopedComPtr<IAudioRenderClient> audio_render_client_;
-
- // IAudioCaptureClient interfaces enables a client to read input
- // data from a capturing endpoint buffer.
- base::win::ScopedComPtr<IAudioCaptureClient> audio_capture_client_;
-
- // The audio engine will signal this event each time a buffer has been
- // recorded.
- base::win::ScopedHandle capture_event_;
-
- // The audio engine will signal this event each time it needs a new
- // audio buffer to play out.
- // Only utilized in varispeed mode.
- base::win::ScopedHandle render_event_;
-
- // This event will be signaled when streaming shall stop.
- base::win::ScopedHandle stop_streaming_event_;
-
- // Container for retrieving data from AudioSourceCallback::OnMoreIOData().
- scoped_ptr<AudioBus> output_bus_;
-
- // Container for sending data to AudioSourceCallback::OnMoreIOData().
- scoped_ptr<AudioBus> input_bus_;
-
- // Container for storing output from the channel mixer.
- scoped_ptr<AudioBus> channel_bus_;
-
- // All members below are only allocated, or used, in varispeed mode:
-
- // Temporary storage of resampled input audio data.
- scoped_ptr<AudioBus> resampled_bus_;
-
- // Set to true first time a capture event has been received in varispeed
- // mode.
- bool input_callback_received_;
-
- // MultiChannelResampler is a multi channel wrapper for SincResampler;
- // allowing high quality sample rate conversion of multiple channels at once.
- scoped_ptr<MultiChannelResampler> resampler_;
-
- // Resampler I/O ratio.
- double io_sample_rate_ratio_;
-
- // Used for input to output buffering.
- scoped_ptr<AudioFifo> fifo_;
-
- // The channel mixer is only created and utilized if number of input channels
- // is larger than the native number of input channels (e.g client wants
- // stereo but the audio device only supports mono).
- scoped_ptr<ChannelMixer> channel_mixer_;
-
- // The optimal number of frames we'd like to keep in the FIFO at all times.
- int target_fifo_frames_;
-
- // A running average of the measured delta between actual number of frames
- // in the FIFO versus |target_fifo_frames_|.
- double average_delta_;
-
- // A varispeed rate scalar which is calculated based on FIFO drift.
- double fifo_rate_compensation_;
-
- // Set to true when input side signals output side that a new delay
- // estimate is needed.
- bool update_output_delay_;
-
- // Capture side stores its delay estimate so the sum can be derived in
- // the render side.
- double capture_delay_ms_;
-
- // TODO(henrika): possibly remove these members once the performance is
- // properly tuned. Only used for off-line debugging.
-#ifndef NDEBUG
- enum LogElementNames {
- INPUT_TIME_STAMP,
- NUM_FRAMES_IN_FIFO,
- RESAMPLER_MARGIN,
- RATE_COMPENSATION
- };
-
- scoped_ptr<int64[]> input_time_stamps_;
- scoped_ptr<int[]> num_frames_in_fifo_;
- scoped_ptr<int[]> resampler_margin_;
- scoped_ptr<double[]> fifo_rate_comps_;
- scoped_ptr<int[]> num_elements_;
- scoped_ptr<int[]> input_params_;
- scoped_ptr<int[]> output_params_;
-
- FILE* data_file_;
- FILE* param_file_;
-#endif
-
- DISALLOW_COPY_AND_ASSIGN(WASAPIUnifiedStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_WIN_AUDIO_UNIFIED_WIN_H_
diff --git a/chromium/media/audio/win/audio_unified_win_unittest.cc b/chromium/media/audio/win/audio_unified_win_unittest.cc
deleted file mode 100644
index 15573aec76a..00000000000
--- a/chromium/media/audio/win/audio_unified_win_unittest.cc
+++ /dev/null
@@ -1,366 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "base/command_line.h"
-#include "base/file_util.h"
-#include "base/message_loop/message_loop.h"
-#include "base/path_service.h"
-#include "base/test/test_timeouts.h"
-#include "base/time/time.h"
-#include "base/win/scoped_com_initializer.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/win/audio_unified_win.h"
-#include "media/audio/win/core_audio_util_win.h"
-#include "media/base/channel_mixer.h"
-#include "media/base/media_switches.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using ::testing::_;
-using ::testing::AtLeast;
-using ::testing::Between;
-using ::testing::DoAll;
-using ::testing::NotNull;
-using ::testing::Return;
-using base::win::ScopedCOMInitializer;
-
-namespace media {
-
-static const size_t kMaxDeltaSamples = 1000;
-static const char kDeltaTimeMsFileName[] = "unified_delta_times_ms.txt";
-
-// Verify that the delay estimate in the OnMoreIOData() callback is larger
-// than an expected minumum value.
-MATCHER_P(DelayGreaterThan, value, "") {
- return (arg.hardware_delay_bytes > value.hardware_delay_bytes);
-}
-
-// Used to terminate a loop from a different thread than the loop belongs to.
-// |loop| should be a MessageLoopProxy.
-ACTION_P(QuitLoop, loop) {
- loop->PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
-}
-
-class MockUnifiedSourceCallback
- : public AudioOutputStream::AudioSourceCallback {
- public:
- MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
- AudioBuffersState buffers_state));
- MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state));
- MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
-};
-
-// AudioOutputStream::AudioSourceCallback implementation which enables audio
-// play-through. It also creates a text file that contains times between two
-// successive callbacks. Units are in milliseconds. This file can be used for
-// off-line analysis of the callback sequence.
-class UnifiedSourceCallback : public AudioOutputStream::AudioSourceCallback {
- public:
- explicit UnifiedSourceCallback()
- : previous_call_time_(base::TimeTicks::Now()),
- text_file_(NULL),
- elements_to_write_(0) {
- delta_times_.reset(new int[kMaxDeltaSamples]);
- }
-
- virtual ~UnifiedSourceCallback() {
- base::FilePath file_name;
- EXPECT_TRUE(PathService::Get(base::DIR_EXE, &file_name));
- file_name = file_name.AppendASCII(kDeltaTimeMsFileName);
-
- EXPECT_TRUE(!text_file_);
- text_file_ = base::OpenFile(file_name, "wt");
- DLOG_IF(ERROR, !text_file_) << "Failed to open log file.";
- VLOG(0) << ">> Output file " << file_name.value() << " has been created.";
-
- // Write the array which contains delta times to a text file.
- size_t elements_written = 0;
- while (elements_written < elements_to_write_) {
- fprintf(text_file_, "%d\n", delta_times_[elements_written]);
- ++elements_written;
- }
- base::CloseFile(text_file_);
- }
-
- virtual int OnMoreData(AudioBus* dest,
- AudioBuffersState buffers_state) {
- NOTREACHED();
- return 0;
- };
-
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) {
- // Store time between this callback and the previous callback.
- const base::TimeTicks now_time = base::TimeTicks::Now();
- const int diff = (now_time - previous_call_time_).InMilliseconds();
- previous_call_time_ = now_time;
- if (elements_to_write_ < kMaxDeltaSamples) {
- delta_times_[elements_to_write_] = diff;
- ++elements_to_write_;
- }
-
- // Play out the recorded audio samples in loop back. Perform channel mixing
- // if required using a channel mixer which is created only if needed.
- if (source->channels() == dest->channels()) {
- source->CopyTo(dest);
- } else {
- // A channel mixer is required for converting audio between two different
- // channel layouts.
- if (!channel_mixer_) {
- // Guessing the channel layout will work OK for this unit test.
- // Main thing is that the number of channels is correct.
- ChannelLayout input_layout = GuessChannelLayout(source->channels());
- ChannelLayout output_layout = GuessChannelLayout(dest->channels());
- channel_mixer_.reset(new ChannelMixer(input_layout, output_layout));
- DVLOG(1) << "Remixing channel layout from " << input_layout
- << " to " << output_layout << "; from "
- << source->channels() << " channels to "
- << dest->channels() << " channels.";
- }
- if (channel_mixer_)
- channel_mixer_->Transform(source, dest);
- }
- return source->frames();
- };
-
- virtual void OnError(AudioOutputStream* stream) {
- NOTREACHED();
- }
-
- private:
- base::TimeTicks previous_call_time_;
- scoped_ptr<int[]> delta_times_;
- FILE* text_file_;
- size_t elements_to_write_;
- scoped_ptr<ChannelMixer> channel_mixer_;
-};
-
-// Convenience method which ensures that we fulfill all required conditions
-// to run unified audio tests on Windows.
-static bool CanRunUnifiedAudioTests(AudioManager* audio_man) {
- if (!CoreAudioUtil::IsSupported()) {
- LOG(WARNING) << "This tests requires Windows Vista or higher.";
- return false;
- }
-
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output devices detected.";
- return false;
- }
-
- if (!audio_man->HasAudioInputDevices()) {
- LOG(WARNING) << "No input devices detected.";
- return false;
- }
-
- return true;
-}
-
-// Convenience class which simplifies creation of a unified AudioOutputStream
-// object.
-class AudioUnifiedStreamWrapper {
- public:
- explicit AudioUnifiedStreamWrapper(AudioManager* audio_manager)
- : com_init_(ScopedCOMInitializer::kMTA),
- audio_man_(audio_manager) {
- // We open up both both sides (input and output) using the preferred
- // set of audio parameters. These parameters corresponds to the mix format
- // that the audio engine uses internally for processing of shared-mode
- // output streams.
- AudioParameters out_params;
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetPreferredAudioParameters(
- eRender, eConsole, &out_params)));
-
- // WebAudio is the only real user of unified audio and it always asks
- // for stereo.
- // TODO(henrika): extend support to other input channel layouts as well.
- const int kInputChannels = 2;
-
- params_.Reset(out_params.format(),
- out_params.channel_layout(),
- out_params.channels(),
- kInputChannels,
- out_params.sample_rate(),
- out_params.bits_per_sample(),
- out_params.frames_per_buffer());
- }
-
- ~AudioUnifiedStreamWrapper() {}
-
- // Creates an AudioOutputStream object using default parameters.
- WASAPIUnifiedStream* Create() {
- return static_cast<WASAPIUnifiedStream*>(CreateOutputStream());
- }
-
- // Creates an AudioOutputStream object using default parameters but a
- // specified input device.
- WASAPIUnifiedStream* Create(const std::string device_id) {
- return static_cast<WASAPIUnifiedStream*>(CreateOutputStream(device_id));
- }
-
- AudioParameters::Format format() const { return params_.format(); }
- int channels() const { return params_.channels(); }
- int bits_per_sample() const { return params_.bits_per_sample(); }
- int sample_rate() const { return params_.sample_rate(); }
- int frames_per_buffer() const { return params_.frames_per_buffer(); }
- int bytes_per_buffer() const { return params_.GetBytesPerBuffer(); }
- int input_channels() const { return params_.input_channels(); }
-
- private:
- AudioOutputStream* CreateOutputStream() {
- // Get the unique device ID of the default capture device instead of using
- // AudioManagerBase::kDefaultDeviceId since it provides slightly better
- // test coverage and will utilize the same code path as if a non default
- // input device was used.
- ScopedComPtr<IMMDevice> audio_device =
- CoreAudioUtil::CreateDefaultDevice(eCapture, eConsole);
- AudioDeviceName name;
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDeviceName(audio_device, &name)));
- const std::string& input_device_id = name.unique_id;
- EXPECT_TRUE(CoreAudioUtil::DeviceIsDefault(eCapture, eConsole,
- input_device_id));
-
- // Create the unified audio I/O stream using the default input device.
- AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(params_,
- "", input_device_id);
- EXPECT_TRUE(aos);
- return aos;
- }
-
- AudioOutputStream* CreateOutputStream(const std::string& input_device_id) {
- // Create the unified audio I/O stream using the specified input device.
- AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(params_,
- "", input_device_id);
- EXPECT_TRUE(aos);
- return aos;
- }
-
- ScopedCOMInitializer com_init_;
- AudioManager* audio_man_;
- AudioParameters params_;
-};
-
-// Convenience method which creates a default WASAPIUnifiedStream object.
-static WASAPIUnifiedStream* CreateDefaultUnifiedStream(
- AudioManager* audio_manager) {
- AudioUnifiedStreamWrapper aosw(audio_manager);
- return aosw.Create();
-}
-
-// Convenience method which creates a default WASAPIUnifiedStream object but
-// with a specified audio input device.
-static WASAPIUnifiedStream* CreateDefaultUnifiedStream(
- AudioManager* audio_manager, const std::string& device_id) {
- AudioUnifiedStreamWrapper aosw(audio_manager);
- return aosw.Create(device_id);
-}
-
-// Test Open(), Close() calling sequence.
-TEST(WASAPIUnifiedStreamTest, OpenAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunUnifiedAudioTests(audio_manager.get()))
- return;
-
- WASAPIUnifiedStream* wus = CreateDefaultUnifiedStream(audio_manager.get());
- EXPECT_TRUE(wus->Open());
- wus->Close();
-}
-
-// Test Open(), Close() calling sequence for all available capture devices.
-TEST(WASAPIUnifiedStreamTest, OpenAndCloseForAllInputDevices) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunUnifiedAudioTests(audio_manager.get()))
- return;
-
- AudioDeviceNames device_names;
- audio_manager->GetAudioInputDeviceNames(&device_names);
- for (AudioDeviceNames::iterator i = device_names.begin();
- i != device_names.end(); ++i) {
- WASAPIUnifiedStream* wus = CreateDefaultUnifiedStream(
- audio_manager.get(), i->unique_id);
- EXPECT_TRUE(wus->Open());
- wus->Close();
- }
-}
-
-// Test Open(), Start(), Close() calling sequence.
-TEST(WASAPIUnifiedStreamTest, OpenStartAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunUnifiedAudioTests(audio_manager.get()))
- return;
-
- MockUnifiedSourceCallback source;
- AudioUnifiedStreamWrapper ausw(audio_manager.get());
- WASAPIUnifiedStream* wus = ausw.Create();
-
- EXPECT_TRUE(wus->Open());
- EXPECT_CALL(source, OnError(wus))
- .Times(0);
- EXPECT_CALL(source, OnMoreIOData(NotNull(), NotNull(), _))
- .Times(Between(0, 1))
- .WillOnce(Return(ausw.frames_per_buffer()));
- wus->Start(&source);
- wus->Close();
-}
-
-// Verify that IO callbacks starts as they should.
-TEST(WASAPIUnifiedStreamTest, StartLoopbackAudio) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunUnifiedAudioTests(audio_manager.get()))
- return;
-
- base::MessageLoopForUI loop;
- MockUnifiedSourceCallback source;
- AudioUnifiedStreamWrapper ausw(audio_manager.get());
- WASAPIUnifiedStream* wus = ausw.Create();
-
- // Set up expected minimum delay estimation where we use a minium delay
- // which is equal to the sum of render and capture sizes. We can never
- // reach a delay lower than this value.
- AudioBuffersState min_total_audio_delay(0, 2 * ausw.bytes_per_buffer());
-
- EXPECT_TRUE(wus->Open());
- EXPECT_CALL(source, OnError(wus))
- .Times(0);
- EXPECT_CALL(source, OnMoreIOData(
- NotNull(), NotNull(), DelayGreaterThan(min_total_audio_delay)))
- .Times(AtLeast(2))
- .WillOnce(Return(ausw.frames_per_buffer()))
- .WillOnce(DoAll(
- QuitLoop(loop.message_loop_proxy()),
- Return(ausw.frames_per_buffer())));
- wus->Start(&source);
- loop.PostDelayedTask(FROM_HERE, base::MessageLoop::QuitClosure(),
- TestTimeouts::action_timeout());
- loop.Run();
- wus->Stop();
- wus->Close();
-}
-
-// Perform a real-time test in loopback where the recorded audio is echoed
-// back to the speaker. This test allows the user to verify that the audio
-// sounds OK. A text file with name |kDeltaTimeMsFileName| is also generated.
-TEST(WASAPIUnifiedStreamTest, DISABLED_RealTimePlayThrough) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunUnifiedAudioTests(audio_manager.get()))
- return;
-
- base::MessageLoopForUI loop;
- UnifiedSourceCallback source;
- WASAPIUnifiedStream* wus = CreateDefaultUnifiedStream(audio_manager.get());
-
- EXPECT_TRUE(wus->Open());
- wus->Start(&source);
- loop.PostDelayedTask(FROM_HERE, base::MessageLoop::QuitClosure(),
- base::TimeDelta::FromMilliseconds(10000));
- loop.Run();
- wus->Close();
-}
-
-} // namespace media
diff --git a/chromium/media/audio/win/core_audio_util_win.cc b/chromium/media/audio/win/core_audio_util_win.cc
index 790b2b140f7..71e8d717f62 100644
--- a/chromium/media/audio/win/core_audio_util_win.cc
+++ b/chromium/media/audio/win/core_audio_util_win.cc
@@ -146,7 +146,7 @@ static std::string GetDeviceID(IMMDevice* device) {
ScopedCoMem<WCHAR> device_id_com;
std::string device_id;
if (SUCCEEDED(device->GetId(&device_id_com)))
- WideToUTF8(device_id_com, wcslen(device_id_com), &device_id);
+ base::WideToUTF8(device_id_com, wcslen(device_id_com), &device_id);
return device_id;
}
@@ -154,7 +154,7 @@ bool CoreAudioUtil::IsSupported() {
// It is possible to force usage of WaveXxx APIs by using a command line flag.
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
if (cmd_line->HasSwitch(switches::kForceWaveAudio)) {
- LOG(WARNING) << "Forcing usage of Windows WaveXxx APIs";
+ DVLOG(1) << "Forcing usage of Windows WaveXxx APIs";
return false;
}
@@ -230,6 +230,18 @@ ScopedComPtr<IMMDeviceEnumerator> CoreAudioUtil::CreateDeviceEnumerator() {
ScopedComPtr<IMMDeviceEnumerator> device_enumerator;
HRESULT hr = device_enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
NULL, CLSCTX_INPROC_SERVER);
+ if (hr == CO_E_NOTINITIALIZED) {
+ LOG(ERROR) << "CoCreateInstance fails with CO_E_NOTINITIALIZED";
+ // We have seen crashes which indicates that this method can in fact
+ // fail with CO_E_NOTINITIALIZED in combination with certain 3rd party
+ // modules. Calling CoInitializeEx is an attempt to resolve the reported
+ // issues. See http://crbug.com/378465 for details.
+ hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
+ if (SUCCEEDED(hr)) {
+ hr = device_enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
+ NULL, CLSCTX_INPROC_SERVER);
+ }
+ }
CHECK(SUCCEEDED(hr));
return device_enumerator;
}
@@ -288,8 +300,8 @@ ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDevice(
// Retrieve an audio device specified by an endpoint device-identification
// string.
- HRESULT hr = device_enumerator->GetDevice(UTF8ToUTF16(device_id).c_str(),
- endpoint_device.Receive());
+ HRESULT hr = device_enumerator->GetDevice(
+ base::UTF8ToUTF16(device_id).c_str(), endpoint_device.Receive());
DVLOG_IF(1, FAILED(hr)) << "IMMDeviceEnumerator::GetDevice: "
<< std::hex << hr;
return endpoint_device;
@@ -316,9 +328,9 @@ HRESULT CoreAudioUtil::GetDeviceName(IMMDevice* device, AudioDeviceName* name) {
if (FAILED(hr))
return hr;
if (friendly_name.get().vt == VT_LPWSTR && friendly_name.get().pwszVal) {
- WideToUTF8(friendly_name.get().pwszVal,
- wcslen(friendly_name.get().pwszVal),
- &device_name.device_name);
+ base::WideToUTF8(friendly_name.get().pwszVal,
+ wcslen(friendly_name.get().pwszVal),
+ &device_name.device_name);
}
*name = device_name;
@@ -367,9 +379,9 @@ std::string CoreAudioUtil::GetAudioControllerID(IMMDevice* device,
}
std::string controller_id;
- WideToUTF8(instance_id.get().pwszVal,
- wcslen(instance_id.get().pwszVal),
- &controller_id);
+ base::WideToUTF8(instance_id.get().pwszVal,
+ wcslen(instance_id.get().pwszVal),
+ &controller_id);
return controller_id;
}
@@ -672,7 +684,20 @@ HRESULT CoreAudioUtil::GetPreferredAudioParameters(
// actual error code. The exact value is not important here.
return AUDCLNT_E_ENDPOINT_CREATE_FAILED;
}
- return GetPreferredAudioParameters(client, params);
+
+ HRESULT hr = GetPreferredAudioParameters(client, params);
+ if (FAILED(hr))
+ return hr;
+
+ if (role == eCommunications) {
+ // Raise the 'DUCKING' flag for default communication devices.
+ *params = AudioParameters(params->format(), params->channel_layout(),
+ params->channels(), params->input_channels(), params->sample_rate(),
+ params->bits_per_sample(), params->frames_per_buffer(),
+ params->effects() | AudioParameters::DUCKING);
+ }
+
+ return hr;
}
HRESULT CoreAudioUtil::GetPreferredAudioParameters(
diff --git a/chromium/media/audio/win/core_audio_util_win.h b/chromium/media/audio/win/core_audio_util_win.h
index a210af906ea..8727f97b51c 100644
--- a/chromium/media/audio/win/core_audio_util_win.h
+++ b/chromium/media/audio/win/core_audio_util_win.h
@@ -4,7 +4,7 @@
// Utility methods for the Core Audio API on Windows.
// Always ensure that Core Audio is supported before using these methods.
-// Use media::CoreAudioIsSupported() for this purpose.
+// Use media::CoreAudioUtil::IsSupported() for this purpose.
// Also, all methods must be called on a valid COM thread. This can be done
// by using the base::win::ScopedCOMInitializer helper class.
@@ -37,6 +37,8 @@ class MEDIA_EXPORT CoreAudioUtil {
// Returns true if Windows Core Audio is supported.
// Always verify that this method returns true before using any of the
// methods in this class.
+ // WARNING: This function must be called once from the main thread before
+ // it is safe to call from other threads.
static bool IsSupported();
// Converts between reference time to base::TimeDelta.
diff --git a/chromium/media/audio/win/core_audio_util_win_unittest.cc b/chromium/media/audio/win/core_audio_util_win_unittest.cc
index e9ed0c4f597..f18878cb06a 100644
--- a/chromium/media/audio/win/core_audio_util_win_unittest.cc
+++ b/chromium/media/audio/win/core_audio_util_win_unittest.cc
@@ -504,7 +504,7 @@ TEST_F(CoreAudioUtilWinTest, GetMatchingOutputDeviceID) {
base::win::ScopedCoMem<WCHAR> wide_id;
device->GetId(&wide_id);
std::string id;
- WideToUTF8(wide_id, wcslen(wide_id), &id);
+ base::WideToUTF8(wide_id, wcslen(wide_id), &id);
found_a_pair = !CoreAudioUtil::GetMatchingOutputDeviceID(id).empty();
}
diff --git a/chromium/media/audio/win/device_enumeration_win.cc b/chromium/media/audio/win/device_enumeration_win.cc
index aa66afb12b1..1beddbbe939 100644
--- a/chromium/media/audio/win/device_enumeration_win.cc
+++ b/chromium/media/audio/win/device_enumeration_win.cc
@@ -68,7 +68,8 @@ static bool GetDeviceNamesWinImpl(EDataFlow data_flow,
// Store the unique name.
ScopedCoMem<WCHAR> endpoint_device_id;
audio_device->GetId(&endpoint_device_id);
- device.unique_id = WideToUTF8(static_cast<WCHAR*>(endpoint_device_id));
+ device.unique_id =
+ base::WideToUTF8(static_cast<WCHAR*>(endpoint_device_id));
// Retrieve user-friendly name of endpoint device.
// Example: "Microphone (Realtek High Definition Audio)".
@@ -82,7 +83,7 @@ static bool GetDeviceNamesWinImpl(EDataFlow data_flow,
// Store the user-friendly name.
if (SUCCEEDED(hr) &&
friendly_name.get().vt == VT_LPWSTR && friendly_name.get().pwszVal) {
- device.device_name = WideToUTF8(friendly_name.get().pwszVal);
+ device.device_name = base::WideToUTF8(friendly_name.get().pwszVal);
}
}
@@ -124,7 +125,7 @@ static bool GetDeviceNamesWinXPImpl(AudioDeviceNames* device_names) {
// Store the user-friendly name. Max length is MAXPNAMELEN(=32)
// characters and the name cane be truncated on XP.
// Example: "Microphone (Realtek High Defini".
- device.device_name = WideToUTF8(capabilities.szPname);
+ device.device_name = base::WideToUTF8(capabilities.szPname);
// Store the "unique" name (we use same as friendly name on Windows XP).
device.unique_id = device.device_name;
@@ -181,7 +182,7 @@ std::string ConvertToWinXPInputDeviceId(const std::string& device_id) {
if (result != MMSYSERR_NOERROR)
continue;
- std::string utf8_id = WideToUTF8(static_cast<WCHAR*>(id));
+ std::string utf8_id = base::WideToUTF8(static_cast<WCHAR*>(id));
// Check whether the endpoint ID string of this waveIn device matches that
// of the audio endpoint device.
if (device_id == utf8_id)
@@ -195,7 +196,7 @@ std::string ConvertToWinXPInputDeviceId(const std::string& device_id) {
result = waveInGetDevCaps(i, &capabilities, sizeof(capabilities));
if (result == MMSYSERR_NOERROR)
- return WideToUTF8(capabilities.szPname);
+ return base::WideToUTF8(capabilities.szPname);
}
return std::string();
diff --git a/chromium/media/audio/win/wavein_input_win.cc b/chromium/media/audio/win/wavein_input_win.cc
index 05771250e01..f12bcf244c5 100644
--- a/chromium/media/audio/win/wavein_input_win.cc
+++ b/chromium/media/audio/win/wavein_input_win.cc
@@ -10,6 +10,7 @@
#include "media/audio/audio_io.h"
#include "media/audio/win/audio_manager_win.h"
#include "media/audio/win/device_enumeration_win.h"
+#include "media/base/audio_bus.h"
namespace media {
@@ -20,7 +21,9 @@ static WAVEHDR* GetNextBuffer(WAVEHDR* current) {
}
PCMWaveInAudioInputStream::PCMWaveInAudioInputStream(
- AudioManagerWin* manager, const AudioParameters& params, int num_buffers,
+ AudioManagerWin* manager,
+ const AudioParameters& params,
+ int num_buffers,
const std::string& device_id)
: state_(kStateEmpty),
manager_(manager),
@@ -29,7 +32,8 @@ PCMWaveInAudioInputStream::PCMWaveInAudioInputStream(
callback_(NULL),
num_buffers_(num_buffers),
buffer_(NULL),
- channels_(params.channels()) {
+ channels_(params.channels()),
+ audio_bus_(media::AudioBus::Create(params)) {
DCHECK_GT(num_buffers_, 0);
format_.wFormatTag = WAVE_FORMAT_PCM;
format_.nChannels = params.channels() > 2 ? 2 : params.channels();
@@ -224,7 +228,8 @@ bool PCMWaveInAudioInputStream::GetAutomaticGainControl() {
void PCMWaveInAudioInputStream::HandleError(MMRESULT error) {
DLOG(WARNING) << "PCMWaveInAudio error " << error;
- callback_->OnError(this);
+ if (callback_)
+ callback_->OnError(this);
}
void PCMWaveInAudioInputStream::QueueNextPacket(WAVEHDR *buffer) {
@@ -289,11 +294,11 @@ void PCMWaveInAudioInputStream::WaveCallback(HWAVEIN hwi, UINT msg,
// there is currently no support for controlling the microphone volume
// level.
WAVEHDR* buffer = reinterpret_cast<WAVEHDR*>(param1);
- obj->callback_->OnData(obj,
- reinterpret_cast<const uint8*>(buffer->lpData),
- buffer->dwBytesRecorded,
- buffer->dwBytesRecorded,
- 0.0);
+ obj->audio_bus_->FromInterleaved(reinterpret_cast<uint8*>(buffer->lpData),
+ obj->audio_bus_->frames(),
+ obj->format_.wBitsPerSample / 8);
+ obj->callback_->OnData(
+ obj, obj->audio_bus_.get(), buffer->dwBytesRecorded, 0.0);
// Queue the finished buffer back with the audio driver. Since we are
// reusing the same buffers we can get away without calling
diff --git a/chromium/media/audio/win/wavein_input_win.h b/chromium/media/audio/win/wavein_input_win.h
index df5ce4d129b..5b1edd59fb3 100644
--- a/chromium/media/audio/win/wavein_input_win.h
+++ b/chromium/media/audio/win/wavein_input_win.h
@@ -20,6 +20,7 @@
namespace media {
+class AudioBus;
class AudioManagerWin;
class PCMWaveInAudioInputStream : public AudioInputStream {
@@ -123,6 +124,10 @@ class PCMWaveInAudioInputStream : public AudioInputStream {
// Lock used to avoid conflicts when Stop() is called during a callback.
base::Lock lock_;
+ // Extra audio bus used for storage of deinterleaved data for the OnData
+ // callback.
+ scoped_ptr<media::AudioBus> audio_bus_;
+
DISALLOW_COPY_AND_ASSIGN(PCMWaveInAudioInputStream);
};
diff --git a/chromium/media/base/android/OWNERS b/chromium/media/base/android/OWNERS
index b896e286436..4f6695a84b1 100644
--- a/chromium/media/base/android/OWNERS
+++ b/chromium/media/base/android/OWNERS
@@ -1,2 +1,6 @@
-bulach@chromium.org
+# Preferred reviewers.
qinmin@chromium.org
+wjia@chromium.org
+
+# JNI or last resource.
+bulach@chromium.org
diff --git a/chromium/media/base/android/audio_decoder_job.cc b/chromium/media/base/android/audio_decoder_job.cc
index d089796ccc6..cb882cee42e 100644
--- a/chromium/media/base/android/audio_decoder_job.cc
+++ b/chromium/media/base/android/audio_decoder_job.cc
@@ -8,6 +8,14 @@
#include "base/lazy_instance.h"
#include "base/threading/thread.h"
#include "media/base/android/media_codec_bridge.h"
+#include "media/base/audio_timestamp_helper.h"
+
+namespace {
+
+// Use 16bit PCM for audio output. Keep this value in sync with the output
+// format we passed to AudioTrack in MediaCodecBridge.
+const int kBytesPerAudioOutputSample = 2;
+}
namespace media {
@@ -24,54 +32,118 @@ class AudioDecoderThread : public base::Thread {
base::LazyInstance<AudioDecoderThread>::Leaky
g_audio_decoder_thread = LAZY_INSTANCE_INITIALIZER;
-AudioDecoderJob* AudioDecoderJob::Create(
- const AudioCodec audio_codec,
- int sample_rate,
- int channel_count,
- const uint8* extra_data,
- size_t extra_data_size,
- jobject media_crypto,
- const base::Closure& request_data_cb) {
- scoped_ptr<AudioCodecBridge> codec(AudioCodecBridge::Create(audio_codec));
- if (codec && codec->Start(audio_codec, sample_rate, channel_count, extra_data,
- extra_data_size, true, media_crypto)) {
- return new AudioDecoderJob(codec.Pass(), request_data_cb);
- }
-
- LOG(ERROR) << "Failed to create AudioDecoderJob.";
- return NULL;
-}
-
AudioDecoderJob::AudioDecoderJob(
- scoped_ptr<AudioCodecBridge> audio_codec_bridge,
- const base::Closure& request_data_cb)
+ const base::Closure& request_data_cb,
+ const base::Closure& on_demuxer_config_changed_cb)
: MediaDecoderJob(g_audio_decoder_thread.Pointer()->message_loop_proxy(),
- audio_codec_bridge.get(), request_data_cb),
- audio_codec_bridge_(audio_codec_bridge.Pass()) {
+ request_data_cb,
+ on_demuxer_config_changed_cb),
+ audio_codec_(kUnknownAudioCodec),
+ num_channels_(0),
+ sampling_rate_(0),
+ volume_(-1.0),
+ bytes_per_frame_(0) {
}
-AudioDecoderJob::~AudioDecoderJob() {
+AudioDecoderJob::~AudioDecoderJob() {}
+
+bool AudioDecoderJob::HasStream() const {
+ return audio_codec_ != kUnknownAudioCodec;
}
void AudioDecoderJob::SetVolume(double volume) {
- audio_codec_bridge_->SetVolume(volume);
+ volume_ = volume;
+ SetVolumeInternal();
+}
+
+void AudioDecoderJob::SetBaseTimestamp(base::TimeDelta base_timestamp) {
+ DCHECK(!is_decoding());
+ base_timestamp_ = base_timestamp;
+ if (audio_timestamp_helper_)
+ audio_timestamp_helper_->SetBaseTimestamp(base_timestamp_);
}
void AudioDecoderJob::ReleaseOutputBuffer(
int output_buffer_index,
size_t size,
bool render_output,
+ base::TimeDelta current_presentation_timestamp,
const ReleaseOutputCompletionCallback& callback) {
- size_t size_to_render = render_output ? size : 0u;
- if (size_to_render)
- audio_codec_bridge_->PlayOutputBuffer(output_buffer_index, size_to_render);
- audio_codec_bridge_->ReleaseOutputBuffer(output_buffer_index, false);
+ render_output = render_output && (size != 0u);
+ if (render_output) {
+ int64 head_position = (static_cast<AudioCodecBridge*>(
+ media_codec_bridge_.get()))->PlayOutputBuffer(
+ output_buffer_index, size);
+ audio_timestamp_helper_->AddFrames(size / bytes_per_frame_);
+ int64 frames_to_play =
+ audio_timestamp_helper_->frame_count() - head_position;
+ DCHECK_GE(frames_to_play, 0);
+ current_presentation_timestamp =
+ audio_timestamp_helper_->GetTimestamp() -
+ audio_timestamp_helper_->GetFrameDuration(frames_to_play);
+ } else {
+ current_presentation_timestamp = kNoTimestamp();
+ }
+ media_codec_bridge_->ReleaseOutputBuffer(output_buffer_index, false);
- callback.Run(size_to_render);
+ callback.Run(current_presentation_timestamp,
+ audio_timestamp_helper_->GetTimestamp());
}
bool AudioDecoderJob::ComputeTimeToRender() const {
return false;
}
+void AudioDecoderJob::UpdateDemuxerConfigs(const DemuxerConfigs& configs) {
+ // TODO(qinmin): split DemuxerConfig for audio and video separately so we
+ // can simply store the stucture here.
+ audio_codec_ = configs.audio_codec;
+ num_channels_ = configs.audio_channels;
+ sampling_rate_ = configs.audio_sampling_rate;
+ set_is_content_encrypted(configs.is_audio_encrypted);
+ audio_extra_data_ = configs.audio_extra_data;
+ bytes_per_frame_ = kBytesPerAudioOutputSample * num_channels_;
+}
+
+bool AudioDecoderJob::AreDemuxerConfigsChanged(
+ const DemuxerConfigs& configs) const {
+ return audio_codec_ != configs.audio_codec ||
+ num_channels_ != configs.audio_channels ||
+ sampling_rate_ != configs.audio_sampling_rate ||
+ is_content_encrypted() != configs.is_audio_encrypted ||
+ audio_extra_data_.size() != configs.audio_extra_data.size() ||
+ !std::equal(audio_extra_data_.begin(),
+ audio_extra_data_.end(),
+ configs.audio_extra_data.begin());
+}
+
+bool AudioDecoderJob::CreateMediaCodecBridgeInternal() {
+ media_codec_bridge_.reset(AudioCodecBridge::Create(audio_codec_));
+ if (!media_codec_bridge_)
+ return false;
+
+ if (!(static_cast<AudioCodecBridge*>(media_codec_bridge_.get()))->Start(
+ audio_codec_, sampling_rate_, num_channels_, &audio_extra_data_[0],
+ audio_extra_data_.size(), true, GetMediaCrypto().obj())) {
+ media_codec_bridge_.reset();
+ return false;
+ }
+
+ SetVolumeInternal();
+
+ // Need to pass the base timestamp to the new decoder.
+ if (audio_timestamp_helper_)
+ base_timestamp_ = audio_timestamp_helper_->GetTimestamp();
+ audio_timestamp_helper_.reset(new AudioTimestampHelper(sampling_rate_));
+ audio_timestamp_helper_->SetBaseTimestamp(base_timestamp_);
+ return true;
+}
+
+void AudioDecoderJob::SetVolumeInternal() {
+ if (media_codec_bridge_) {
+ static_cast<AudioCodecBridge*>(media_codec_bridge_.get())->SetVolume(
+ volume_);
+ }
+}
+
} // namespace media
diff --git a/chromium/media/base/android/audio_decoder_job.h b/chromium/media/base/android/audio_decoder_job.h
index 3d1b21f4b4a..f3bb091a7d7 100644
--- a/chromium/media/base/android/audio_decoder_job.h
+++ b/chromium/media/base/android/audio_decoder_job.h
@@ -6,48 +6,68 @@
#define MEDIA_BASE_ANDROID_AUDIO_DECODER_JOB_H_
#include <jni.h>
+#include <vector>
+#include "base/callback.h"
#include "media/base/android/media_decoder_job.h"
namespace media {
class AudioCodecBridge;
+class AudioTimestampHelper;
// Class for managing audio decoding jobs.
class AudioDecoderJob : public MediaDecoderJob {
public:
- virtual ~AudioDecoderJob();
-
// Creates a new AudioDecoderJob instance for decoding audio.
- // |audio_codec| - The audio format the object needs to decode.
- // |sample_rate| - The sample rate of the decoded output.
- // |channel_count| - The number of channels in the decoded output.
- // |extra_data|, |extra_data_size| - Extra data buffer needed for initializing
- // the decoder.
- // |media_crypto| - Handle to a Java object that handles the encryption for
- // the audio data.
// |request_data_cb| - Callback used to request more data for the decoder.
- static AudioDecoderJob* Create(
- const AudioCodec audio_codec, int sample_rate, int channel_count,
- const uint8* extra_data, size_t extra_data_size, jobject media_crypto,
- const base::Closure& request_data_cb);
+ // |on_demuxer_config_changed_cb| - Callback used to inform the caller that
+ // demuxer config has changed.
+ AudioDecoderJob(const base::Closure& request_data_cb,
+ const base::Closure& on_demuxer_config_changed_cb);
+ virtual ~AudioDecoderJob();
+ // MediaDecoderJob implementation.
+ virtual bool HasStream() const OVERRIDE;
+
+ // Sets the volume of the audio output.
void SetVolume(double volume);
- private:
- AudioDecoderJob(scoped_ptr<AudioCodecBridge> audio_decoder_bridge,
- const base::Closure& request_data_cb);
+ // Sets the base timestamp for |audio_timestamp_helper_|.
+ void SetBaseTimestamp(base::TimeDelta base_timestamp);
+ private:
// MediaDecoderJob implementation.
virtual void ReleaseOutputBuffer(
int output_buffer_index,
size_t size,
bool render_output,
+ base::TimeDelta current_presentation_timestamp,
const ReleaseOutputCompletionCallback& callback) OVERRIDE;
-
virtual bool ComputeTimeToRender() const OVERRIDE;
+ virtual bool AreDemuxerConfigsChanged(
+ const DemuxerConfigs& configs) const OVERRIDE;
+ virtual void UpdateDemuxerConfigs(const DemuxerConfigs& configs) OVERRIDE;
+ virtual bool CreateMediaCodecBridgeInternal() OVERRIDE;
+
+ // Helper method to set the audio output volume.
+ void SetVolumeInternal();
+
+ // Audio configs from the demuxer.
+ AudioCodec audio_codec_;
+ int num_channels_;
+ int sampling_rate_;
+ std::vector<uint8> audio_extra_data_;
+ double volume_;
+ int bytes_per_frame_;
+
+ // Base timestamp for the |audio_timestamp_helper_|.
+ base::TimeDelta base_timestamp_;
+
+ // Object to calculate the current audio timestamp for A/V sync.
+ scoped_ptr<AudioTimestampHelper> audio_timestamp_helper_;
- scoped_ptr<AudioCodecBridge> audio_codec_bridge_;
+ DISALLOW_COPY_AND_ASSIGN(AudioDecoderJob);
};
} // namespace media
diff --git a/chromium/media/base/android/browser_cdm_factory_android.cc b/chromium/media/base/android/browser_cdm_factory_android.cc
new file mode 100644
index 00000000000..2f438f054af
--- /dev/null
+++ b/chromium/media/base/android/browser_cdm_factory_android.cc
@@ -0,0 +1,53 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/browser_cdm_factory.h"
+
+#include "base/command_line.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/android/media_drm_bridge.h"
+#include "media/base/media_switches.h"
+
+namespace media {
+
+scoped_ptr<BrowserCdm> CreateBrowserCdm(
+ const std::string& key_system,
+ const BrowserCdm::SessionCreatedCB& session_created_cb,
+ const BrowserCdm::SessionMessageCB& session_message_cb,
+ const BrowserCdm::SessionReadyCB& session_ready_cb,
+ const BrowserCdm::SessionClosedCB& session_closed_cb,
+ const BrowserCdm::SessionErrorCB& session_error_cb) {
+ if (!MediaDrmBridge::IsKeySystemSupported(key_system)) {
+ NOTREACHED() << "Unsupported key system: " << key_system;
+ return scoped_ptr<BrowserCdm>();
+ }
+
+ scoped_ptr<MediaDrmBridge> cdm(MediaDrmBridge::Create(key_system,
+ session_created_cb,
+ session_message_cb,
+ session_ready_cb,
+ session_closed_cb,
+ session_error_cb));
+ if (!cdm) {
+ NOTREACHED() << "MediaDrmBridge cannot be created for " << key_system;
+ return scoped_ptr<BrowserCdm>();
+ }
+
+ // TODO(xhwang/ddorwin): Pass the security level from key system.
+ MediaDrmBridge::SecurityLevel security_level =
+ MediaDrmBridge::SECURITY_LEVEL_3;
+ if (CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kMediaDrmEnableNonCompositing)) {
+ security_level = MediaDrmBridge::SECURITY_LEVEL_1;
+ }
+ if (!cdm->SetSecurityLevel(security_level)) {
+ DVLOG(1) << "failed to set security level " << security_level;
+ return scoped_ptr<BrowserCdm>();
+ }
+
+ return cdm.PassAs<BrowserCdm>();
+}
+
+} // namespace media
diff --git a/chromium/media/base/android/demuxer_android.h b/chromium/media/base/android/demuxer_android.h
index 865dc9d33f2..c8e6ffca63b 100644
--- a/chromium/media/base/android/demuxer_android.h
+++ b/chromium/media/base/android/demuxer_android.h
@@ -25,9 +25,6 @@ class MEDIA_EXPORT DemuxerAndroid {
// Must be called prior to calling any other methods.
virtual void Initialize(DemuxerAndroidClient* client) = 0;
- // Called to request the current audio/video decoder configurations.
- virtual void RequestDemuxerConfigs() = 0;
-
// Called to request additional data from the demuxer.
virtual void RequestDemuxerData(media::DemuxerStream::Type type) = 0;
@@ -44,12 +41,7 @@ class MEDIA_EXPORT DemuxerAndroid {
// Defines the client callback interface.
class MEDIA_EXPORT DemuxerAndroidClient {
public:
- // Called in response to RequestDemuxerConfigs() and also when the demuxer has
- // initialized.
- //
- // TODO(scherkus): Perhaps clients should be required to call
- // RequestDemuxerConfigs() to initialize themselves instead of the demuxer
- // calling this method without being prompted.
+ // Called when the demuxer has initialized.
virtual void OnDemuxerConfigsAvailable(const DemuxerConfigs& params) = 0;
// Called in response to RequestDemuxerData().
@@ -62,7 +54,7 @@ class MEDIA_EXPORT DemuxerAndroidClient {
// For regular demuxer seeks, |actual_browser_seek_time| is kNoTimestamp() and
// should be ignored by browser player.
virtual void OnDemuxerSeekDone(
- const base::TimeDelta& actual_browser_seek_time) = 0;
+ base::TimeDelta actual_browser_seek_time) = 0;
// Called whenever the demuxer has detected a duration change.
virtual void OnDemuxerDurationChanged(base::TimeDelta duration) = 0;
diff --git a/chromium/media/base/android/demuxer_stream_player_params.cc b/chromium/media/base/android/demuxer_stream_player_params.cc
index d5021a968c7..e95881eb4af 100644
--- a/chromium/media/base/android/demuxer_stream_player_params.cc
+++ b/chromium/media/base/android/demuxer_stream_player_params.cc
@@ -12,8 +12,7 @@ DemuxerConfigs::DemuxerConfigs()
audio_sampling_rate(0),
is_audio_encrypted(false),
video_codec(kUnknownVideoCodec),
- is_video_encrypted(false),
- duration_ms(0) {}
+ is_video_encrypted(false) {}
DemuxerConfigs::~DemuxerConfigs() {}
diff --git a/chromium/media/base/android/demuxer_stream_player_params.h b/chromium/media/base/android/demuxer_stream_player_params.h
index 4a3a04d10e0..0b8886eb3b1 100644
--- a/chromium/media/base/android/demuxer_stream_player_params.h
+++ b/chromium/media/base/android/demuxer_stream_player_params.h
@@ -5,9 +5,6 @@
#ifndef MEDIA_BASE_ANDROID_DEMUXER_STREAM_PLAYER_PARAMS_H_
#define MEDIA_BASE_ANDROID_DEMUXER_STREAM_PLAYER_PARAMS_H_
-#if defined(GOOGLE_TV)
-#include <string>
-#endif // defined(GOOGLE_TV)
#include <vector>
#include "media/base/audio_decoder_config.h"
@@ -34,11 +31,7 @@ struct MEDIA_EXPORT DemuxerConfigs {
bool is_video_encrypted;
std::vector<uint8> video_extra_data;
- int duration_ms;
-
-#if defined(GOOGLE_TV)
- std::string key_system;
-#endif // defined(GOOGLE_TV)
+ base::TimeDelta duration;
};
struct MEDIA_EXPORT AccessUnit {
@@ -61,6 +54,12 @@ struct MEDIA_EXPORT DemuxerData {
DemuxerStream::Type type;
std::vector<AccessUnit> access_units;
+ // If the last entry in |access_units| has a status equal to |kConfigChanged|,
+ // a corresponding DemuxerConfigs is added into this vector. The
+ // DemuxerConfigs should only contain information of the stream that is
+ // specified by |type|. This solves the issue that we need multiple IPCs when
+ // demuxer configs change.
+ std::vector<DemuxerConfigs> demuxer_configs;
};
}; // namespace media
diff --git a/chromium/media/base/android/media_codec_bridge.cc b/chromium/media/base/android/media_codec_bridge.cc
index 6e7987fd055..92b2791506d 100644
--- a/chromium/media/base/android/media_codec_bridge.cc
+++ b/chromium/media/base/android/media_codec_bridge.cc
@@ -14,7 +14,7 @@
#include "base/basictypes.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
-#include "base/safe_numerics.h"
+#include "base/numerics/safe_conversions.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "jni/MediaCodecBridge_jni.h"
@@ -116,12 +116,11 @@ bool MediaCodecBridge::SupportsSetParameters() {
// static
std::vector<MediaCodecBridge::CodecsInfo> MediaCodecBridge::GetCodecsInfo() {
std::vector<CodecsInfo> codecs_info;
- JNIEnv* env = AttachCurrentThread();
if (!IsAvailable())
return codecs_info;
+ JNIEnv* env = AttachCurrentThread();
std::string mime_type;
- std::string codec_name;
ScopedJavaLocalRef<jobjectArray> j_codec_info_array =
Java_MediaCodecBridge_getCodecsInfo(env);
jsize len = env->GetArrayLength(j_codec_info_array.obj());
@@ -145,6 +144,9 @@ std::vector<MediaCodecBridge::CodecsInfo> MediaCodecBridge::GetCodecsInfo() {
// static
bool MediaCodecBridge::CanDecode(const std::string& codec, bool is_secure) {
+ if (!IsAvailable())
+ return false;
+
JNIEnv* env = AttachCurrentThread();
std::string mime = CodecTypeToAndroidMimeType(codec);
if (mime.empty())
@@ -162,6 +164,9 @@ bool MediaCodecBridge::CanDecode(const std::string& codec, bool is_secure) {
// static
bool MediaCodecBridge::IsKnownUnaccelerated(const std::string& mime_type,
MediaCodecDirection direction) {
+ if (!IsAvailable())
+ return true;
+
std::string codec_type = AndroidMimeTypeToCodecType(mime_type);
std::vector<media::MediaCodecBridge::CodecsInfo> codecs_info =
MediaCodecBridge::GetCodecsInfo();
@@ -225,7 +230,7 @@ MediaCodecStatus MediaCodecBridge::QueueInputBuffer(
size_t data_size,
const base::TimeDelta& presentation_time) {
DVLOG(3) << __PRETTY_FUNCTION__ << index << ": " << data_size;
- if (data_size > base::checked_numeric_cast<size_t>(kint32max))
+ if (data_size > base::checked_cast<size_t>(kint32max))
return MEDIA_CODEC_ERROR;
if (data && !FillInputBuffer(index, data, data_size))
return MEDIA_CODEC_ERROR;
@@ -252,7 +257,7 @@ MediaCodecStatus MediaCodecBridge::QueueSecureInputBuffer(
int subsamples_size,
const base::TimeDelta& presentation_time) {
DVLOG(3) << __PRETTY_FUNCTION__ << index << ": " << data_size;
- if (data_size > base::checked_numeric_cast<size_t>(kint32max))
+ if (data_size > base::checked_cast<size_t>(kint32max))
return MEDIA_CODEC_ERROR;
if (data && !FillInputBuffer(index, data, data_size))
return MEDIA_CODEC_ERROR;
@@ -349,9 +354,9 @@ MediaCodecStatus MediaCodecBridge::DequeueOutputBuffer(
Java_MediaCodecBridge_dequeueOutputBuffer(
env, j_media_codec_.obj(), timeout.InMicroseconds());
*index = Java_DequeueOutputResult_index(env, result.obj());
- *offset = base::checked_numeric_cast<size_t>(
+ *offset = base::checked_cast<size_t>(
Java_DequeueOutputResult_offset(env, result.obj()));
- *size = base::checked_numeric_cast<size_t>(
+ *size = base::checked_cast<size_t>(
Java_DequeueOutputResult_numBytes(env, result.obj()));
if (presentation_time) {
*presentation_time = base::TimeDelta::FromMicroseconds(
@@ -408,7 +413,7 @@ void MediaCodecBridge::GetInputBuffer(int input_buffer_index,
ScopedJavaLocalRef<jobject> j_buffer(Java_MediaCodecBridge_getInputBuffer(
env, j_media_codec_.obj(), input_buffer_index));
*data = static_cast<uint8*>(env->GetDirectBufferAddress(j_buffer.obj()));
- *capacity = base::checked_numeric_cast<size_t>(
+ *capacity = base::checked_cast<size_t>(
env->GetDirectBufferCapacity(j_buffer.obj()));
}
@@ -588,9 +593,9 @@ bool AudioCodecBridge::ConfigureMediaFormat(jobject j_format,
return true;
}
-void AudioCodecBridge::PlayOutputBuffer(int index, size_t size) {
+int64 AudioCodecBridge::PlayOutputBuffer(int index, size_t size) {
DCHECK_LE(0, index);
- int numBytes = base::checked_numeric_cast<int>(size);
+ int numBytes = base::checked_cast<int>(size);
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jobject> buf =
Java_MediaCodecBridge_getOutputBuffer(env, media_codec(), index);
@@ -598,7 +603,8 @@ void AudioCodecBridge::PlayOutputBuffer(int index, size_t size) {
ScopedJavaLocalRef<jbyteArray> byte_array =
base::android::ToJavaByteArray(env, buffer, numBytes);
- Java_MediaCodecBridge_playOutputBuffer(env, media_codec(), byte_array.obj());
+ return Java_MediaCodecBridge_playOutputBuffer(
+ env, media_codec(), byte_array.obj());
}
void AudioCodecBridge::SetVolume(double volume) {
@@ -606,7 +612,11 @@ void AudioCodecBridge::SetVolume(double volume) {
Java_MediaCodecBridge_setVolume(env, media_codec(), volume);
}
+// static
AudioCodecBridge* AudioCodecBridge::Create(const AudioCodec& codec) {
+ if (!MediaCodecBridge::IsAvailable())
+ return NULL;
+
const std::string mime = AudioCodecToAndroidMimeType(codec);
return mime.empty() ? NULL : new AudioCodecBridge(mime);
}
@@ -624,12 +634,15 @@ bool VideoCodecBridge::IsKnownUnaccelerated(const VideoCodec& codec,
VideoCodecToAndroidMimeType(codec), direction);
}
+// static
VideoCodecBridge* VideoCodecBridge::CreateDecoder(const VideoCodec& codec,
bool is_secure,
const gfx::Size& size,
jobject surface,
jobject media_crypto) {
- JNIEnv* env = AttachCurrentThread();
+ if (!MediaCodecBridge::IsAvailable())
+ return NULL;
+
const std::string mime = VideoCodecToAndroidMimeType(codec);
if (mime.empty())
return NULL;
@@ -639,6 +652,7 @@ VideoCodecBridge* VideoCodecBridge::CreateDecoder(const VideoCodec& codec,
if (!bridge->media_codec())
return NULL;
+ JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jstring> j_mime = ConvertUTF8ToJavaString(env, mime);
ScopedJavaLocalRef<jobject> j_format(
Java_MediaCodecBridge_createVideoDecoderFormat(
@@ -656,13 +670,16 @@ VideoCodecBridge* VideoCodecBridge::CreateDecoder(const VideoCodec& codec,
return bridge->StartInternal() ? bridge.release() : NULL;
}
+// static
VideoCodecBridge* VideoCodecBridge::CreateEncoder(const VideoCodec& codec,
const gfx::Size& size,
int bit_rate,
int frame_rate,
int i_frame_interval,
int color_format) {
- JNIEnv* env = AttachCurrentThread();
+ if (!MediaCodecBridge::IsAvailable())
+ return NULL;
+
const std::string mime = VideoCodecToAndroidMimeType(codec);
if (mime.empty())
return NULL;
@@ -672,6 +689,7 @@ VideoCodecBridge* VideoCodecBridge::CreateEncoder(const VideoCodec& codec,
if (!bridge->media_codec())
return NULL;
+ JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jstring> j_mime = ConvertUTF8ToJavaString(env, mime);
ScopedJavaLocalRef<jobject> j_format(
Java_MediaCodecBridge_createVideoEncoderFormat(env,
@@ -698,7 +716,8 @@ VideoCodecBridge* VideoCodecBridge::CreateEncoder(const VideoCodec& codec,
VideoCodecBridge::VideoCodecBridge(const std::string& mime,
bool is_secure,
MediaCodecDirection direction)
- : MediaCodecBridge(mime, is_secure, direction) {}
+ : MediaCodecBridge(mime, is_secure, direction),
+ adaptive_playback_supported_for_testing_(-1) {}
void VideoCodecBridge::SetVideoBitrate(int bps) {
JNIEnv* env = AttachCurrentThread();
@@ -710,6 +729,16 @@ void VideoCodecBridge::RequestKeyFrameSoon() {
Java_MediaCodecBridge_requestKeyFrameSoon(env, media_codec());
}
+bool VideoCodecBridge::IsAdaptivePlaybackSupported(int width, int height) {
+ if (adaptive_playback_supported_for_testing_ == 0)
+ return false;
+ else if (adaptive_playback_supported_for_testing_ > 0)
+ return true;
+ JNIEnv* env = AttachCurrentThread();
+ return Java_MediaCodecBridge_isAdaptivePlaybackSupported(
+ env, media_codec(), width, height);
+}
+
bool MediaCodecBridge::RegisterMediaCodecBridge(JNIEnv* env) {
return RegisterNativesImpl(env);
}
diff --git a/chromium/media/base/android/media_codec_bridge.h b/chromium/media/base/android/media_codec_bridge.h
index e71f67f918e..2d046465d4e 100644
--- a/chromium/media/base/android/media_codec_bridge.h
+++ b/chromium/media/base/android/media_codec_bridge.h
@@ -51,6 +51,8 @@ enum MediaCodecDirection {
class MEDIA_EXPORT MediaCodecBridge {
public:
// Returns true if MediaCodec is available on the device.
+ // All other static methods check IsAvailable() internally. There's no need
+ // to check IsAvailable() explicitly before calling them.
static bool IsAvailable();
// Returns true if MediaCodec.setParameters() is available on the device.
@@ -61,8 +63,8 @@ class MEDIA_EXPORT MediaCodecBridge {
static bool CanDecode(const std::string& codec, bool is_secure);
// Represents supported codecs on android.
- // TODO(qinmin): Curretly the codecs string only contains one codec, do we
- // need more specific codecs separated by comma. (e.g. "vp8" -> "vp8, vp8.0")
+ // TODO(qinmin): Currently the codecs string only contains one codec. Do we
+ // need to support codecs separated by comma. (e.g. "vp8" -> "vp8, vp8.0")?
struct CodecsInfo {
std::string codecs; // E.g. "vp8" or "avc1".
std::string name; // E.g. "OMX.google.vp8.decoder".
@@ -220,8 +222,9 @@ class AudioCodecBridge : public MediaCodecBridge {
bool play_audio, jobject media_crypto) WARN_UNUSED_RESULT;
// Play the output buffer. This call must be called after
- // DequeueOutputBuffer() and before ReleaseOutputBuffer.
- void PlayOutputBuffer(int index, size_t size);
+ // DequeueOutputBuffer() and before ReleaseOutputBuffer. Returns the playback
+ // head position expressed in frames.
+ int64 PlayOutputBuffer(int index, size_t size);
// Set the volume of the audio output.
void SetVolume(double volume);
@@ -260,10 +263,26 @@ class MEDIA_EXPORT VideoCodecBridge : public MediaCodecBridge {
void SetVideoBitrate(int bps);
void RequestKeyFrameSoon();
+ // Returns whether adaptive playback is supported for this object given
+ // the new size.
+ bool IsAdaptivePlaybackSupported(int width, int height);
+
+ // Test-only method to set the return value of IsAdaptivePlaybackSupported().
+ // Without this function, the return value of that function will be device
+ // dependent. If |adaptive_playback_supported| is equal to 0, the return value
+ // will be false. If |adaptive_playback_supported| is larger than 0, the
+ // return value will be true.
+ void set_adaptive_playback_supported_for_testing(
+ int adaptive_playback_supported) {
+ adaptive_playback_supported_for_testing_ = adaptive_playback_supported;
+ }
+
private:
VideoCodecBridge(const std::string& mime,
bool is_secure,
MediaCodecDirection direction);
+
+ int adaptive_playback_supported_for_testing_;
};
} // namespace media
diff --git a/chromium/media/base/android/media_decoder_job.cc b/chromium/media/base/android/media_decoder_job.cc
index c6ad9bbe6f7..4f720f8cbd2 100644
--- a/chromium/media/base/android/media_decoder_job.cc
+++ b/chromium/media/base/android/media_decoder_job.cc
@@ -7,9 +7,10 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/debug/trace_event.h"
-#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_proxy.h"
#include "media/base/android/media_codec_bridge.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/android/media_drm_bridge.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/buffers.h"
namespace media {
@@ -20,57 +21,87 @@ namespace media {
static const int kMediaCodecTimeoutInMilliseconds = 250;
MediaDecoderJob::MediaDecoderJob(
- const scoped_refptr<base::MessageLoopProxy>& decoder_loop,
- MediaCodecBridge* media_codec_bridge,
- const base::Closure& request_data_cb)
- : ui_loop_(base::MessageLoopProxy::current()),
- decoder_loop_(decoder_loop),
- media_codec_bridge_(media_codec_bridge),
+ const scoped_refptr<base::SingleThreadTaskRunner>& decoder_task_runner,
+ const base::Closure& request_data_cb,
+ const base::Closure& config_changed_cb)
+ : ui_task_runner_(base::MessageLoopProxy::current()),
+ decoder_task_runner_(decoder_task_runner),
needs_flush_(false),
input_eos_encountered_(false),
output_eos_encountered_(false),
skip_eos_enqueue_(true),
prerolling_(true),
- weak_this_(this),
request_data_cb_(request_data_cb),
- access_unit_index_(0),
+ config_changed_cb_(config_changed_cb),
+ current_demuxer_data_index_(0),
input_buf_index_(-1),
+ is_content_encrypted_(false),
stop_decode_pending_(false),
- destroy_pending_(false) {
+ destroy_pending_(false),
+ is_requesting_demuxer_data_(false),
+ is_incoming_data_invalid_(false),
+ release_resources_pending_(false),
+ drm_bridge_(NULL),
+ drain_decoder_(false) {
+ InitializeReceivedData();
+ eos_unit_.end_of_stream = true;
}
-MediaDecoderJob::~MediaDecoderJob() {}
+MediaDecoderJob::~MediaDecoderJob() {
+ ReleaseMediaCodecBridge();
+}
void MediaDecoderJob::OnDataReceived(const DemuxerData& data) {
DVLOG(1) << __FUNCTION__ << ": " << data.access_units.size() << " units";
- DCHECK(ui_loop_->BelongsToCurrentThread());
- DCHECK(!on_data_received_cb_.is_null());
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ DCHECK(NoAccessUnitsRemainingInChunk(false));
TRACE_EVENT_ASYNC_END2(
"media", "MediaDecoderJob::RequestData", this,
"Data type", data.type == media::DemuxerStream::AUDIO ? "AUDIO" : "VIDEO",
"Units read", data.access_units.size());
- base::Closure done_cb = base::ResetAndReturn(&on_data_received_cb_);
+ if (is_incoming_data_invalid_) {
+ is_incoming_data_invalid_ = false;
+
+ // If there is a pending callback, need to request the data again to get
+ // valid data.
+ if (!data_received_cb_.is_null())
+ request_data_cb_.Run();
+ else
+ is_requesting_demuxer_data_ = false;
+ return;
+ }
+
+ size_t next_demuxer_data_index = inactive_demuxer_data_index();
+ received_data_[next_demuxer_data_index] = data;
+ access_unit_index_[next_demuxer_data_index] = 0;
+ is_requesting_demuxer_data_ = false;
+
+ base::Closure done_cb = base::ResetAndReturn(&data_received_cb_);
+
+ // If this data request is for the inactive chunk, or |data_received_cb_|
+ // was set to null by Flush() or Release(), do nothing.
+ if (done_cb.is_null())
+ return;
if (stop_decode_pending_) {
- OnDecodeCompleted(MEDIA_CODEC_STOPPED, kNoTimestamp(), 0);
+ DCHECK(is_decoding());
+ OnDecodeCompleted(MEDIA_CODEC_STOPPED, kNoTimestamp(), kNoTimestamp());
return;
}
- access_unit_index_ = 0;
- received_data_ = data;
done_cb.Run();
}
void MediaDecoderJob::Prefetch(const base::Closure& prefetch_cb) {
- DCHECK(ui_loop_->BelongsToCurrentThread());
- DCHECK(on_data_received_cb_.is_null());
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ DCHECK(data_received_cb_.is_null());
DCHECK(decode_cb_.is_null());
if (HasData()) {
DVLOG(1) << __FUNCTION__ << " : using previously received data";
- ui_loop_->PostTask(FROM_HERE, prefetch_cb);
+ ui_task_runner_->PostTask(FROM_HERE, prefetch_cb);
return;
}
@@ -79,73 +110,122 @@ void MediaDecoderJob::Prefetch(const base::Closure& prefetch_cb) {
}
bool MediaDecoderJob::Decode(
- const base::TimeTicks& start_time_ticks,
- const base::TimeDelta& start_presentation_timestamp,
+ base::TimeTicks start_time_ticks,
+ base::TimeDelta start_presentation_timestamp,
const DecoderCallback& callback) {
DCHECK(decode_cb_.is_null());
- DCHECK(on_data_received_cb_.is_null());
- DCHECK(ui_loop_->BelongsToCurrentThread());
+ DCHECK(data_received_cb_.is_null());
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+
+ if (!media_codec_bridge_ || need_to_reconfig_decoder_job_) {
+ need_to_reconfig_decoder_job_ = !CreateMediaCodecBridge();
+ if (drain_decoder_) {
+ // Decoder has been recreated, stop draining.
+ drain_decoder_ = false;
+ input_eos_encountered_ = false;
+ output_eos_encountered_ = false;
+ access_unit_index_[current_demuxer_data_index_]++;
+ }
+ skip_eos_enqueue_ = true;
+ if (need_to_reconfig_decoder_job_)
+ return false;
+ }
decode_cb_ = callback;
if (!HasData()) {
- RequestData(base::Bind(&MediaDecoderJob::DecodeNextAccessUnit,
+ RequestData(base::Bind(&MediaDecoderJob::DecodeCurrentAccessUnit,
base::Unretained(this),
start_time_ticks,
start_presentation_timestamp));
return true;
}
- if (DemuxerStream::kConfigChanged ==
- received_data_.access_units[access_unit_index_].status) {
- // Clear received data because we need to handle a config change.
- decode_cb_.Reset();
- received_data_ = DemuxerData();
- access_unit_index_ = 0;
- return false;
- }
-
- DecodeNextAccessUnit(start_time_ticks, start_presentation_timestamp);
+ DecodeCurrentAccessUnit(start_time_ticks, start_presentation_timestamp);
return true;
}
void MediaDecoderJob::StopDecode() {
- DCHECK(ui_loop_->BelongsToCurrentThread());
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
DCHECK(is_decoding());
stop_decode_pending_ = true;
}
+bool MediaDecoderJob::OutputEOSReached() const {
+ return !drain_decoder_ && output_eos_encountered_;
+}
+
+void MediaDecoderJob::SetDrmBridge(MediaDrmBridge* drm_bridge) {
+ drm_bridge_ = drm_bridge;
+ need_to_reconfig_decoder_job_ = true;
+}
+
void MediaDecoderJob::Flush() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ DCHECK(data_received_cb_.is_null());
DCHECK(decode_cb_.is_null());
+ // Clean up the received data.
+ current_demuxer_data_index_ = 0;
+ InitializeReceivedData();
+ if (is_requesting_demuxer_data_)
+ is_incoming_data_invalid_ = true;
+ input_eos_encountered_ = false;
+ output_eos_encountered_ = false;
+ drain_decoder_ = false;
+
// Do nothing, flush when the next Decode() happens.
needs_flush_ = true;
- received_data_ = DemuxerData();
- input_eos_encountered_ = false;
- access_unit_index_ = 0;
- on_data_received_cb_.Reset();
}
-void MediaDecoderJob::BeginPrerolling(
- const base::TimeDelta& preroll_timestamp) {
+void MediaDecoderJob::BeginPrerolling(base::TimeDelta preroll_timestamp) {
DVLOG(1) << __FUNCTION__ << "(" << preroll_timestamp.InSecondsF() << ")";
- DCHECK(ui_loop_->BelongsToCurrentThread());
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
DCHECK(!is_decoding());
preroll_timestamp_ = preroll_timestamp;
prerolling_ = true;
}
+void MediaDecoderJob::ReleaseDecoderResources() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ if (decode_cb_.is_null()) {
+ DCHECK(!drain_decoder_);
+ // Since the decoder job is not decoding data, we can safely destroy
+ // |media_codec_bridge_|.
+ ReleaseMediaCodecBridge();
+ return;
+ }
+
+ // Release |media_codec_bridge_| once decoding is completed.
+ release_resources_pending_ = true;
+}
+
+bool MediaDecoderJob::SetDemuxerConfigs(const DemuxerConfigs& configs) {
+ bool config_changed = AreDemuxerConfigsChanged(configs);
+ if (config_changed)
+ UpdateDemuxerConfigs(configs);
+ return config_changed;
+}
+
+base::android::ScopedJavaLocalRef<jobject> MediaDecoderJob::GetMediaCrypto() {
+ base::android::ScopedJavaLocalRef<jobject> media_crypto;
+ if (drm_bridge_)
+ media_crypto = drm_bridge_->GetMediaCrypto();
+ return media_crypto;
+}
+
void MediaDecoderJob::Release() {
- DCHECK(ui_loop_->BelongsToCurrentThread());
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
DVLOG(1) << __FUNCTION__;
- // If the decoder job is not waiting for data, and is still decoding, we
- // cannot delete the job immediately.
- destroy_pending_ = on_data_received_cb_.is_null() && is_decoding();
+ // If the decoder job is still decoding, we cannot delete the job immediately.
+ destroy_pending_ = is_decoding();
request_data_cb_.Reset();
- on_data_received_cb_.Reset();
+ data_received_cb_.Reset();
decode_cb_.Reset();
if (destroy_pending_) {
@@ -158,7 +238,7 @@ void MediaDecoderJob::Release() {
MediaCodecStatus MediaDecoderJob::QueueInputBuffer(const AccessUnit& unit) {
DVLOG(1) << __FUNCTION__;
- DCHECK(decoder_loop_->BelongsToCurrentThread());
+ DCHECK(decoder_task_runner_->BelongsToCurrentThread());
TRACE_EVENT0("media", __FUNCTION__);
int input_buf_index = input_buf_index_;
@@ -207,70 +287,93 @@ MediaCodecStatus MediaDecoderJob::QueueInputBuffer(const AccessUnit& unit) {
}
bool MediaDecoderJob::HasData() const {
- DCHECK(ui_loop_->BelongsToCurrentThread());
- // When |input_eos_encountered_| is set, |access_units| must not be empty and
- // |access_unit_index_| must be pointing to an EOS unit. We'll reuse this
- // unit to flush the decoder until we hit output EOS.
- DCHECK(!input_eos_encountered_ ||
- (received_data_.access_units.size() > 0 &&
- access_unit_index_ < received_data_.access_units.size()))
- << " (access_units.size(): " << received_data_.access_units.size()
- << ", access_unit_index_: " << access_unit_index_ << ")";
- return access_unit_index_ < received_data_.access_units.size() ||
- input_eos_encountered_;
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ // When |input_eos_encountered_| is set, |access_unit_index_| and
+ // |current_demuxer_data_index_| must be pointing to an EOS unit,
+ // or a |kConfigChanged| unit if |drain_decoder_| is true. In both cases,
+ // we'll feed an EOS input unit to drain the decoder until we hit output EOS.
+ DCHECK(!input_eos_encountered_ || !NoAccessUnitsRemainingInChunk(true));
+ return !NoAccessUnitsRemainingInChunk(true) ||
+ !NoAccessUnitsRemainingInChunk(false);
}
void MediaDecoderJob::RequestData(const base::Closure& done_cb) {
DVLOG(1) << __FUNCTION__;
- DCHECK(ui_loop_->BelongsToCurrentThread());
- DCHECK(on_data_received_cb_.is_null());
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ DCHECK(data_received_cb_.is_null());
DCHECK(!input_eos_encountered_);
+ DCHECK(NoAccessUnitsRemainingInChunk(false));
TRACE_EVENT_ASYNC_BEGIN0("media", "MediaDecoderJob::RequestData", this);
- received_data_ = DemuxerData();
- access_unit_index_ = 0;
- on_data_received_cb_ = done_cb;
+ data_received_cb_ = done_cb;
+
+ // If we are already expecting new data, just set the callback and do
+ // nothing.
+ if (is_requesting_demuxer_data_)
+ return;
+
+ // The new incoming data will be stored as the next demuxer data chunk, since
+ // the decoder might still be decoding the current one.
+ size_t next_demuxer_data_index = inactive_demuxer_data_index();
+ received_data_[next_demuxer_data_index] = DemuxerData();
+ access_unit_index_[next_demuxer_data_index] = 0;
+ is_requesting_demuxer_data_ = true;
request_data_cb_.Run();
}
-void MediaDecoderJob::DecodeNextAccessUnit(
- const base::TimeTicks& start_time_ticks,
- const base::TimeDelta& start_presentation_timestamp) {
- DCHECK(ui_loop_->BelongsToCurrentThread());
+void MediaDecoderJob::DecodeCurrentAccessUnit(
+ base::TimeTicks start_time_ticks,
+ base::TimeDelta start_presentation_timestamp) {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
DCHECK(!decode_cb_.is_null());
- // If the first access unit is a config change, request the player to dequeue
- // the input buffer again so that it can request config data.
- if (received_data_.access_units[access_unit_index_].status ==
- DemuxerStream::kConfigChanged) {
- ui_loop_->PostTask(FROM_HERE,
- base::Bind(&MediaDecoderJob::OnDecodeCompleted,
- base::Unretained(this),
- MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER,
- kNoTimestamp(),
- 0));
- return;
+ RequestCurrentChunkIfEmpty();
+ const AccessUnit& access_unit = CurrentAccessUnit();
+ if (CurrentAccessUnit().status == DemuxerStream::kConfigChanged) {
+ int index = CurrentReceivedDataChunkIndex();
+ const DemuxerConfigs& configs = received_data_[index].demuxer_configs[0];
+ bool reconfigure_needed = IsCodecReconfigureNeeded(configs);
+ // TODO(qinmin): |config_changed_cb_| should be run after draining finishes.
+ // http://crbug.com/381975.
+ if (SetDemuxerConfigs(configs))
+ config_changed_cb_.Run();
+ if (!drain_decoder_) {
+ // If we haven't decoded any data yet, just skip the current access unit
+ // and request the MediaCodec to be recreated on next Decode().
+ if (skip_eos_enqueue_ || !reconfigure_needed) {
+ need_to_reconfig_decoder_job_ =
+ need_to_reconfig_decoder_job_ || reconfigure_needed;
+ ui_task_runner_->PostTask(FROM_HERE, base::Bind(
+ &MediaDecoderJob::OnDecodeCompleted, base::Unretained(this),
+ MEDIA_CODEC_OUTPUT_FORMAT_CHANGED, kNoTimestamp(), kNoTimestamp()));
+ return;
+ }
+ // Start draining the decoder so that all the remaining frames are
+ // rendered.
+ drain_decoder_ = true;
+ }
}
- decoder_loop_->PostTask(FROM_HERE, base::Bind(
+ DCHECK(!(needs_flush_ && drain_decoder_));
+ decoder_task_runner_->PostTask(FROM_HERE, base::Bind(
&MediaDecoderJob::DecodeInternal, base::Unretained(this),
- received_data_.access_units[access_unit_index_],
+ drain_decoder_ ? eos_unit_ : access_unit,
start_time_ticks, start_presentation_timestamp, needs_flush_,
- media::BindToLoop(ui_loop_, base::Bind(
+ media::BindToCurrentLoop(base::Bind(
&MediaDecoderJob::OnDecodeCompleted, base::Unretained(this)))));
needs_flush_ = false;
}
void MediaDecoderJob::DecodeInternal(
const AccessUnit& unit,
- const base::TimeTicks& start_time_ticks,
- const base::TimeDelta& start_presentation_timestamp,
+ base::TimeTicks start_time_ticks,
+ base::TimeDelta start_presentation_timestamp,
bool needs_flush,
const MediaDecoderJob::DecoderCallback& callback) {
DVLOG(1) << __FUNCTION__;
- DCHECK(decoder_loop_->BelongsToCurrentThread());
+ DCHECK(decoder_task_runner_->BelongsToCurrentThread());
TRACE_EVENT0("media", __FUNCTION__);
if (needs_flush) {
@@ -279,7 +382,7 @@ void MediaDecoderJob::DecodeInternal(
output_eos_encountered_ = false;
MediaCodecStatus reset_status = media_codec_bridge_->Reset();
if (MEDIA_CODEC_OK != reset_status) {
- callback.Run(reset_status, kNoTimestamp(), 0);
+ callback.Run(reset_status, kNoTimestamp(), kNoTimestamp());
return;
}
}
@@ -292,7 +395,7 @@ void MediaDecoderJob::DecodeInternal(
// For aborted access unit, just skip it and inform the player.
if (unit.status == DemuxerStream::kAborted) {
// TODO(qinmin): use a new enum instead of MEDIA_CODEC_STOPPED.
- callback.Run(MEDIA_CODEC_STOPPED, kNoTimestamp(), 0);
+ callback.Run(MEDIA_CODEC_STOPPED, kNoTimestamp(), kNoTimestamp());
return;
}
@@ -300,7 +403,8 @@ void MediaDecoderJob::DecodeInternal(
if (unit.end_of_stream || unit.data.empty()) {
input_eos_encountered_ = true;
output_eos_encountered_ = true;
- callback.Run(MEDIA_CODEC_OUTPUT_END_OF_STREAM, kNoTimestamp(), 0);
+ callback.Run(MEDIA_CODEC_OUTPUT_END_OF_STREAM, kNoTimestamp(),
+ kNoTimestamp());
return;
}
@@ -313,7 +417,7 @@ void MediaDecoderJob::DecodeInternal(
if (input_status == MEDIA_CODEC_INPUT_END_OF_STREAM) {
input_eos_encountered_ = true;
} else if (input_status != MEDIA_CODEC_OK) {
- callback.Run(input_status, kNoTimestamp(), 0);
+ callback.Run(input_status, kNoTimestamp(), kNoTimestamp());
return;
}
}
@@ -340,24 +444,15 @@ void MediaDecoderJob::DecodeInternal(
!media_codec_bridge_->GetOutputBuffers()) {
status = MEDIA_CODEC_ERROR;
}
- callback.Run(status, kNoTimestamp(), 0);
+ callback.Run(status, kNoTimestamp(), kNoTimestamp());
return;
}
// TODO(xhwang/qinmin): This logic is correct but strange. Clean it up.
if (output_eos_encountered_)
status = MEDIA_CODEC_OUTPUT_END_OF_STREAM;
- else if (input_status == MEDIA_CODEC_INPUT_END_OF_STREAM)
- status = MEDIA_CODEC_INPUT_END_OF_STREAM;
-
- // Check whether we need to render the output.
- // TODO(qinmin): comparing most recently queued input's |unit.timestamp| with
- // |preroll_timestamp_| is not accurate due to data reordering and possible
- // input queueing without immediate dequeue when |input_status| !=
- // |MEDIA_CODEC_OK|. Need to use the |presentation_timestamp| for video, and
- // use |size| to calculate the timestamp for audio. See
- // http://crbug.com/310823 and http://b/11356652.
- bool render_output = unit.timestamp >= preroll_timestamp_ &&
+
+ bool render_output = presentation_timestamp >= preroll_timestamp_ &&
(status != MEDIA_CODEC_OUTPUT_END_OF_STREAM || size != 0u);
base::TimeDelta time_to_render;
DCHECK(!start_time_ticks.is_null());
@@ -367,11 +462,15 @@ void MediaDecoderJob::DecodeInternal(
}
if (time_to_render > base::TimeDelta()) {
- decoder_loop_->PostDelayedTask(
+ decoder_task_runner_->PostDelayedTask(
FROM_HERE,
base::Bind(&MediaDecoderJob::ReleaseOutputBuffer,
- weak_this_.GetWeakPtr(), buffer_index, size, render_output,
- base::Bind(callback, status, presentation_timestamp)),
+ base::Unretained(this),
+ buffer_index,
+ size,
+ render_output,
+ presentation_timestamp,
+ base::Bind(callback, status)),
time_to_render);
return;
}
@@ -390,14 +489,15 @@ void MediaDecoderJob::DecodeInternal(
presentation_timestamp = kNoTimestamp();
}
ReleaseOutputCompletionCallback completion_callback = base::Bind(
- callback, status, presentation_timestamp);
- ReleaseOutputBuffer(buffer_index, size, render_output, completion_callback);
+ callback, status);
+ ReleaseOutputBuffer(buffer_index, size, render_output, presentation_timestamp,
+ completion_callback);
}
void MediaDecoderJob::OnDecodeCompleted(
- MediaCodecStatus status, const base::TimeDelta& presentation_timestamp,
- size_t audio_output_bytes) {
- DCHECK(ui_loop_->BelongsToCurrentThread());
+ MediaCodecStatus status, base::TimeDelta current_presentation_timestamp,
+ base::TimeDelta max_presentation_timestamp) {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
if (destroy_pending_) {
DVLOG(1) << __FUNCTION__ << " : completing pending deletion";
@@ -405,10 +505,13 @@ void MediaDecoderJob::OnDecodeCompleted(
return;
}
+ if (status == MEDIA_CODEC_OUTPUT_END_OF_STREAM)
+ output_eos_encountered_ = true;
+
DCHECK(!decode_cb_.is_null());
// If output was queued for rendering, then we have completed prerolling.
- if (presentation_timestamp != kNoTimestamp())
+ if (current_presentation_timestamp != kNoTimestamp())
prerolling_ = false;
switch (status) {
@@ -417,8 +520,11 @@ void MediaDecoderJob::OnDecodeCompleted(
case MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED:
case MEDIA_CODEC_OUTPUT_FORMAT_CHANGED:
case MEDIA_CODEC_OUTPUT_END_OF_STREAM:
- if (!input_eos_encountered_)
- access_unit_index_++;
+ if (!input_eos_encountered_) {
+ CurrentDataConsumed(
+ CurrentAccessUnit().status == DemuxerStream::kConfigChanged);
+ access_unit_index_[current_demuxer_data_index_]++;
+ }
break;
case MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER:
@@ -430,9 +536,118 @@ void MediaDecoderJob::OnDecodeCompleted(
break;
};
+ if (status == MEDIA_CODEC_OUTPUT_END_OF_STREAM && drain_decoder_) {
+ OnDecoderDrained();
+ status = MEDIA_CODEC_OK;
+ }
+
+ if (release_resources_pending_) {
+ ReleaseMediaCodecBridge();
+ release_resources_pending_ = false;
+ if (drain_decoder_)
+ OnDecoderDrained();
+ }
+
stop_decode_pending_ = false;
- base::ResetAndReturn(&decode_cb_).Run(status, presentation_timestamp,
- audio_output_bytes);
+ base::ResetAndReturn(&decode_cb_).Run(
+ status, current_presentation_timestamp, max_presentation_timestamp);
+}
+
+const AccessUnit& MediaDecoderJob::CurrentAccessUnit() const {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ DCHECK(HasData());
+ size_t index = CurrentReceivedDataChunkIndex();
+ return received_data_[index].access_units[access_unit_index_[index]];
+}
+
+size_t MediaDecoderJob::CurrentReceivedDataChunkIndex() const {
+ return NoAccessUnitsRemainingInChunk(true) ?
+ inactive_demuxer_data_index() : current_demuxer_data_index_;
+}
+
+bool MediaDecoderJob::NoAccessUnitsRemainingInChunk(
+ bool is_active_chunk) const {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ size_t index = is_active_chunk ? current_demuxer_data_index_ :
+ inactive_demuxer_data_index();
+ return received_data_[index].access_units.size() <= access_unit_index_[index];
+}
+
+void MediaDecoderJob::RequestCurrentChunkIfEmpty() {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ DCHECK(HasData());
+ if (!NoAccessUnitsRemainingInChunk(true))
+ return;
+
+ // Requests new data if the the last access unit of the next chunk is not EOS.
+ current_demuxer_data_index_ = inactive_demuxer_data_index();
+ const AccessUnit last_access_unit =
+ received_data_[current_demuxer_data_index_].access_units.back();
+ if (!last_access_unit.end_of_stream &&
+ last_access_unit.status != DemuxerStream::kAborted) {
+ RequestData(base::Closure());
+ }
+}
+
+void MediaDecoderJob::InitializeReceivedData() {
+ for (size_t i = 0; i < 2; ++i) {
+ received_data_[i] = DemuxerData();
+ access_unit_index_[i] = 0;
+ }
+}
+
+void MediaDecoderJob::OnDecoderDrained() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ DCHECK(drain_decoder_);
+
+ input_eos_encountered_ = false;
+ output_eos_encountered_ = false;
+ drain_decoder_ = false;
+ ReleaseMediaCodecBridge();
+ // Increase the access unit index so that the new decoder will not handle
+ // the config change again.
+ access_unit_index_[current_demuxer_data_index_]++;
+ CurrentDataConsumed(true);
+}
+
+bool MediaDecoderJob::CreateMediaCodecBridge() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ DCHECK(decode_cb_.is_null());
+
+ if (!HasStream()) {
+ ReleaseMediaCodecBridge();
+ return false;
+ }
+
+ // Create |media_codec_bridge_| only if config changes.
+ if (media_codec_bridge_ && !need_to_reconfig_decoder_job_)
+ return true;
+
+ base::android::ScopedJavaLocalRef<jobject> media_crypto = GetMediaCrypto();
+ if (is_content_encrypted_ && media_crypto.is_null())
+ return false;
+
+ ReleaseMediaCodecBridge();
+ DVLOG(1) << __FUNCTION__ << " : creating new media codec bridge";
+
+ return CreateMediaCodecBridgeInternal();
+}
+
+bool MediaDecoderJob::IsCodecReconfigureNeeded(
+ const DemuxerConfigs& configs) const {
+ if (!AreDemuxerConfigsChanged(configs))
+ return false;
+ return true;
+}
+
+void MediaDecoderJob::ReleaseMediaCodecBridge() {
+ if (!media_codec_bridge_)
+ return;
+
+ media_codec_bridge_.reset();
+ OnMediaCodecBridgeReleased();
}
} // namespace media
diff --git a/chromium/media/base/android/media_decoder_job.h b/chromium/media/base/android/media_decoder_job.h
index 6ee086dea03..433e0359529 100644
--- a/chromium/media/base/android/media_decoder_job.h
+++ b/chromium/media/base/android/media_decoder_job.h
@@ -10,15 +10,21 @@
#include "base/time/time.h"
#include "media/base/android/demuxer_stream_player_params.h"
#include "media/base/android/media_codec_bridge.h"
+#include "ui/gl/android/scoped_java_surface.h"
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
+class MediaDrmBridge;
+
// Class for managing all the decoding tasks. Each decoding task will be posted
// onto the same thread. The thread will be stopped once Stop() is called.
+// Data is stored in 2 chunks. When new data arrives, it is always stored in
+// an inactive chunk. And when the current active chunk becomes empty, a new
+// data request will be sent to the renderer.
class MediaDecoderJob {
public:
struct Deleter {
@@ -26,15 +32,18 @@ class MediaDecoderJob {
};
// Callback when a decoder job finishes its work. Args: whether decode
- // finished successfully, presentation time, audio output bytes.
- // If the presentation time is equal to kNoTimestamp(), the decoder job
- // skipped rendering of the decoded output and the callback target should
- // update its clock to avoid introducing extra delays to the next frame.
- typedef base::Callback<void(MediaCodecStatus, const base::TimeDelta&,
- size_t)> DecoderCallback;
+ // finished successfully, current presentation time, max presentation time.
+ // If the current presentation time is equal to kNoTimestamp(), the decoder
+ // job skipped rendering of the decoded output and the callback target should
+ // ignore the timestamps provided.
+ typedef base::Callback<void(MediaCodecStatus, base::TimeDelta,
+ base::TimeDelta)> DecoderCallback;
// Callback when a decoder job finishes releasing the output buffer.
- // Args: audio output bytes, must be 0 for video.
- typedef base::Callback<void(size_t)> ReleaseOutputCompletionCallback;
+ // Args: current presentation time, max presentation time.
+ // If the current presentation time is equal to kNoTimestamp(), the callback
+ // target should ignore the timestamps provided.
+ typedef base::Callback<void(base::TimeDelta, base::TimeDelta)>
+ ReleaseOutputCompletionCallback;
virtual ~MediaDecoderJob();
@@ -50,10 +59,10 @@ class MediaDecoderJob {
//
// Returns true if the next decode was started and |callback| will be
// called when the decode operation is complete.
- // Returns false if a config change is needed. |callback| is ignored
- // and will not be called.
- bool Decode(const base::TimeTicks& start_time_ticks,
- const base::TimeDelta& start_presentation_timestamp,
+ // Returns false if |media_codec_bridge_| cannot be created; |callback| is
+ // ignored and will not be called.
+ bool Decode(base::TimeTicks start_time_ticks,
+ base::TimeDelta start_presentation_timestamp,
const DecoderCallback& callback);
// Called to stop the last Decode() early.
@@ -66,20 +75,42 @@ class MediaDecoderJob {
// reflects whether data was actually decoded or the decode terminated early.
void StopDecode();
- // Flush the decoder.
- void Flush();
+ // Flushes the decoder and abandons all the data that is being decoded.
+ virtual void Flush();
+
+ // Enters prerolling state. The job must not currently be decoding.
+ void BeginPrerolling(base::TimeDelta preroll_timestamp);
+
+ // Releases all the decoder resources as the current tab is going background.
+ virtual void ReleaseDecoderResources();
+
+ // Sets the demuxer configs. Returns true if configs has changed, or false
+ // otherwise.
+ bool SetDemuxerConfigs(const DemuxerConfigs& configs);
+
+ // Returns whether the decoder has finished decoding all the data.
+ bool OutputEOSReached() const;
- // Enter prerolling state. The job must not currently be decoding.
- void BeginPrerolling(const base::TimeDelta& preroll_timestamp);
+ // Returns true if the audio/video stream is available, implemented by child
+ // classes.
+ virtual bool HasStream() const = 0;
- bool prerolling() const { return prerolling_; }
+ void SetDrmBridge(MediaDrmBridge* drm_bridge);
bool is_decoding() const { return !decode_cb_.is_null(); }
+ bool is_content_encrypted() const { return is_content_encrypted_; }
+
protected:
- MediaDecoderJob(const scoped_refptr<base::MessageLoopProxy>& decoder_loop,
- MediaCodecBridge* media_codec_bridge,
- const base::Closure& request_data_cb);
+ // Creates a new MediaDecoderJob instance.
+ // |decoder_task_runner| - Thread on which the decoder task will run.
+ // |request_data_cb| - Callback to request more data for the decoder.
+ // |config_changed_cb| - Callback to inform the caller that
+ // demuxer config has changed.
+ MediaDecoderJob(
+ const scoped_refptr<base::SingleThreadTaskRunner>& decoder_task_runner,
+ const base::Closure& request_data_cb,
+ const base::Closure& config_changed_cb);
// Release the output buffer at index |output_buffer_index| and render it if
// |render_output| is true. Upon completion, |callback| will be called.
@@ -87,16 +118,36 @@ class MediaDecoderJob {
int output_buffer_index,
size_t size,
bool render_output,
+ base::TimeDelta current_presentation_timestamp,
const ReleaseOutputCompletionCallback& callback) = 0;
// Returns true if the "time to render" needs to be computed for frames in
// this decoder job.
virtual bool ComputeTimeToRender() const = 0;
+ // Gets MediaCrypto object from |drm_bridge_|.
+ base::android::ScopedJavaLocalRef<jobject> GetMediaCrypto();
+
+ // Releases the |media_codec_bridge_|.
+ void ReleaseMediaCodecBridge();
+
+ MediaDrmBridge* drm_bridge() { return drm_bridge_; }
+
+ void set_is_content_encrypted(bool is_content_encrypted) {
+ is_content_encrypted_ = is_content_encrypted;
+ }
+
+ bool need_to_reconfig_decoder_job_;
+
+ scoped_ptr<MediaCodecBridge> media_codec_bridge_;
+
private:
+ friend class MediaSourcePlayerTest;
+
// Causes this instance to be deleted on the thread it is bound to.
void Release();
+ // Queues an access unit into |media_codec_bridge_|'s input buffer.
MediaCodecStatus QueueInputBuffer(const AccessUnit& unit);
// Returns true if this object has data to decode.
@@ -106,20 +157,25 @@ class MediaDecoderJob {
// |done_cb| is called when more data is available in |received_data_|.
void RequestData(const base::Closure& done_cb);
- // Posts a task to start decoding the next access unit in |received_data_|.
- void DecodeNextAccessUnit(
- const base::TimeTicks& start_time_ticks,
- const base::TimeDelta& start_presentation_timestamp);
-
- // Helper function to decoder data on |thread_|. |unit| contains all the data
- // to be decoded. |start_time_ticks| and |start_presentation_timestamp|
- // represent the system time and the presentation timestamp when the first
- // frame is rendered. We use these information to estimate when the current
- // frame should be rendered. If |needs_flush| is true, codec needs to be
- // flushed at the beginning of this call.
+ // Posts a task to start decoding the current access unit in |received_data_|.
+ void DecodeCurrentAccessUnit(
+ base::TimeTicks start_time_ticks,
+ base::TimeDelta start_presentation_timestamp);
+
+ // Helper function to decode data on |decoder_task_runner_|. |unit| contains
+ // the data to be decoded. |start_time_ticks| and
+ // |start_presentation_timestamp| represent the system time and the
+ // presentation timestamp when the first frame is rendered. We use these
+ // information to estimate when the current frame should be rendered.
+ // If |needs_flush| is true, codec needs to be flushed at the beginning of
+ // this call.
+ // It is possible that |stop_decode_pending_| or |release_resources_pending_|
+ // becomes true while DecodeInternal() is called. However, they should have
+ // no impact on DecodeInternal(). They will be handled after DecoderInternal()
+ // finishes and OnDecodeCompleted() is posted on the UI thread.
void DecodeInternal(const AccessUnit& unit,
- const base::TimeTicks& start_time_ticks,
- const base::TimeDelta& start_presentation_timestamp,
+ base::TimeTicks start_time_ticks,
+ base::TimeDelta start_presentation_timestamp,
bool needs_flush,
const DecoderCallback& callback);
@@ -127,18 +183,69 @@ class MediaDecoderJob {
// Completes any pending job destruction or any pending decode stop. If
// destruction was not pending, passes its arguments to |decode_cb_|.
void OnDecodeCompleted(MediaCodecStatus status,
- const base::TimeDelta& presentation_timestamp,
- size_t audio_output_bytes);
+ base::TimeDelta current_presentation_timestamp,
+ base::TimeDelta max_presentation_timestamp);
- // The UI message loop where callbacks should be dispatched.
- scoped_refptr<base::MessageLoopProxy> ui_loop_;
+ // Helper function to get the current access unit that is being decoded.
+ const AccessUnit& CurrentAccessUnit() const;
+
+ // Helper function to get the current data chunk index that is being decoded.
+ size_t CurrentReceivedDataChunkIndex() const;
+
+ // Check whether a chunk has no remaining access units to decode. If
+ // |is_active_chunk| is true, this function returns whether decoder has
+ // consumed all data in |received_data_[current_demuxer_data_index_]|.
+ // Otherwise, it returns whether decoder has consumed all data in the inactive
+ // chunk.
+ bool NoAccessUnitsRemainingInChunk(bool is_active_chunk) const;
+
+ // Requests new data for the current chunk if it runs out of data.
+ void RequestCurrentChunkIfEmpty();
+
+ // Initializes |received_data_| and |access_unit_index_|.
+ void InitializeReceivedData();
+
+ // Called when the decoder is completely drained and is ready to be released.
+ void OnDecoderDrained();
+
+ // Creates |media_codec_bridge_| for decoding purpose. Returns true if it is
+ // created, or false otherwise.
+ bool CreateMediaCodecBridge();
- // The message loop that decoder job runs on.
- scoped_refptr<base::MessageLoopProxy> decoder_loop_;
+ // Called when an access unit is consumed by the decoder. |is_config_change|
+ // indicates whether the current access unit is a config change. If it is
+ // true, the next access unit is guarateed to be an I-frame.
+ virtual void CurrentDataConsumed(bool is_config_change) {}
- // The media codec bridge used for decoding. Owned by derived class.
- // NOTE: This MUST NOT be accessed in the destructor.
- MediaCodecBridge* media_codec_bridge_;
+ // Called when |media_codec_bridge_| is released
+ virtual void OnMediaCodecBridgeReleased() {}
+
+ // Implemented by the child class to create |media_codec_bridge_| for a
+ // particular stream. Returns true if it is created, or false otherwise.
+ virtual bool CreateMediaCodecBridgeInternal() = 0;
+
+ // Returns true if the |configs| doesn't match the current demuxer configs
+ // the decoder job has.
+ virtual bool AreDemuxerConfigsChanged(
+ const DemuxerConfigs& configs) const = 0;
+
+ // Updates the demuxer configs.
+ virtual void UpdateDemuxerConfigs(const DemuxerConfigs& configs) = 0;
+
+ // Returns true if |media_codec_bridge_| needs to be reconfigured for the
+ // new DemuxerConfigs, or false otherwise.
+ virtual bool IsCodecReconfigureNeeded(const DemuxerConfigs& configs) const;
+
+ // Return the index to |received_data_| that is not currently being decoded.
+ size_t inactive_demuxer_data_index() const {
+ return 1 - current_demuxer_data_index_;
+ }
+
+ // The UI message loop where callbacks should be dispatched.
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner_;
+
+ // The task runner that decoder job runs on.
+ scoped_refptr<base::SingleThreadTaskRunner> decoder_task_runner_;
// Whether the decoder needs to be flushed.
bool needs_flush_;
@@ -169,29 +276,41 @@ class MediaDecoderJob {
// is not very accurate.
bool prerolling_;
- // Weak pointer passed to media decoder jobs for callbacks. It is bounded to
- // the decoder thread.
- base::WeakPtrFactory<MediaDecoderJob> weak_this_;
-
// Callback used to request more data.
base::Closure request_data_cb_;
+ // Callback to notify the caller config has changed.
+ base::Closure config_changed_cb_;
+
// Callback to run when new data has been received.
- base::Closure on_data_received_cb_;
+ base::Closure data_received_cb_;
// Callback to run when the current Decode() operation completes.
DecoderCallback decode_cb_;
- // The current access unit being processed.
- size_t access_unit_index_;
-
// Data received over IPC from last RequestData() operation.
- DemuxerData received_data_;
+ // We keep 2 chunks at the same time to reduce the IPC latency between chunks.
+ // If data inside the current chunk are all decoded, we will request a new
+ // chunk from the demuxer and swap the current chunk with the other one.
+ // New data will always be stored in the other chunk since the current
+ // one may be still in use.
+ DemuxerData received_data_[2];
+
+ // Index to the current data chunk that is being decoded.
+ size_t current_demuxer_data_index_;
+
+ // Index to the access unit inside each data chunk that is being decoded.
+ size_t access_unit_index_[2];
// The index of input buffer that can be used by QueueInputBuffer().
// If the index is uninitialized or invalid, it must be -1.
int input_buf_index_;
+ // Indicates whether content is encrypted.
+ bool is_content_encrypted_;
+
+ // Indicates the decoder job should stop after decoding the current access
+ // unit.
bool stop_decode_pending_;
// Indicates that this object should be destroyed once the current
@@ -199,6 +318,28 @@ class MediaDecoderJob {
// while there is a decode in progress.
bool destroy_pending_;
+ // Indicates whether the decoder is in the middle of requesting new data.
+ bool is_requesting_demuxer_data_;
+
+ // Indicates whether the incoming data should be ignored.
+ bool is_incoming_data_invalid_;
+
+ // Indicates that |media_codec_bridge_| should be released once the current
+ // Decode() has completed. This gets set when ReleaseDecoderResources() gets
+ // called while there is a decode in progress.
+ bool release_resources_pending_;
+
+ // Pointer to a DRM object that will be used for encrypted streams.
+ MediaDrmBridge* drm_bridge_;
+
+ // Indicates whether |media_codec_bridge_| is in the middle of being drained
+ // due to a config change.
+ bool drain_decoder_;
+
+ // This access unit is passed to the decoder during config changes to drain
+ // the decoder.
+ AccessUnit eos_unit_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(MediaDecoderJob);
};
diff --git a/chromium/media/base/android/media_drm_bridge.cc b/chromium/media/base/android/media_drm_bridge.cc
index 95085fea4ed..3cb5bace517 100644
--- a/chromium/media/base/android/media_drm_bridge.cc
+++ b/chromium/media/base/android/media_drm_bridge.cc
@@ -4,16 +4,22 @@
#include "media/base/android/media_drm_bridge.h"
+#include <algorithm>
+
#include "base/android/build_info.h"
#include "base/android/jni_array.h"
#include "base/android/jni_string.h"
#include "base/callback_helpers.h"
+#include "base/containers/hash_tables.h"
+#include "base/lazy_instance.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/message_loop/message_loop_proxy.h"
#include "base/strings/string_util.h"
+#include "base/sys_byteorder.h"
#include "jni/MediaDrmBridge_jni.h"
-#include "media/base/android/media_player_manager.h"
+
+#include "widevine_cdm_version.h" // In SHARED_INTERMEDIATE_DIR.
using base::android::AttachCurrentThread;
using base::android::ConvertUTF8ToJavaString;
@@ -50,13 +56,70 @@ static uint64 ReadUint64(const uint8_t* data) {
// uint32 DataSize
// uint8[DataSize] Data
// }
-static const int kBoxHeaderSize = 8; // Box's header contains Size and Type.
-static const int kBoxLargeSizeSize = 8;
-static const int kPsshVersionFlagSize = 4;
-static const int kPsshSystemIdSize = 16;
-static const int kPsshDataSizeSize = 4;
-static const uint32 kTencType = 0x74656e63;
-static const uint32 kPsshType = 0x70737368;
+const int kBoxHeaderSize = 8; // Box's header contains Size and Type.
+const int kBoxLargeSizeSize = 8;
+const int kPsshVersionFlagSize = 4;
+const int kPsshSystemIdSize = 16;
+const int kPsshDataSizeSize = 4;
+const uint32 kTencType = 0x74656e63;
+const uint32 kPsshType = 0x70737368;
+const uint8 kWidevineUuid[16] = {
+ 0xED, 0xEF, 0x8B, 0xA9, 0x79, 0xD6, 0x4A, 0xCE,
+ 0xA3, 0xC8, 0x27, 0xDC, 0xD5, 0x1D, 0x21, 0xED };
+
+typedef std::vector<uint8> UUID;
+
+class KeySystemUuidManager {
+ public:
+ KeySystemUuidManager();
+ UUID GetUUID(const std::string& key_system);
+ void AddMapping(const std::string& key_system, const UUID& uuid);
+ std::vector<std::string> GetPlatformKeySystemNames();
+
+ private:
+ typedef base::hash_map<std::string, UUID> KeySystemUuidMap;
+
+ KeySystemUuidMap key_system_uuid_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(KeySystemUuidManager);
+};
+
+KeySystemUuidManager::KeySystemUuidManager() {
+ // Widevine is always supported in Android.
+ key_system_uuid_map_[kWidevineKeySystem] =
+ UUID(kWidevineUuid, kWidevineUuid + arraysize(kWidevineUuid));
+}
+
+UUID KeySystemUuidManager::GetUUID(const std::string& key_system) {
+ KeySystemUuidMap::iterator it = key_system_uuid_map_.find(key_system);
+ if (it == key_system_uuid_map_.end())
+ return UUID();
+ return it->second;
+}
+
+void KeySystemUuidManager::AddMapping(const std::string& key_system,
+ const UUID& uuid) {
+ KeySystemUuidMap::iterator it = key_system_uuid_map_.find(key_system);
+ DCHECK(it == key_system_uuid_map_.end())
+ << "Shouldn't overwrite an existing key system.";
+ if (it != key_system_uuid_map_.end())
+ return;
+ key_system_uuid_map_[key_system] = uuid;
+}
+
+std::vector<std::string> KeySystemUuidManager::GetPlatformKeySystemNames() {
+ std::vector<std::string> key_systems;
+ for (KeySystemUuidMap::iterator it = key_system_uuid_map_.begin();
+ it != key_system_uuid_map_.end(); ++it) {
+ // Rule out the key system handled by Chrome explicitly.
+ if (it->first != kWidevineKeySystem)
+ key_systems.push_back(it->first);
+ }
+ return key_systems;
+}
+
+base::LazyInstance<KeySystemUuidManager>::Leaky g_key_system_uuid_manager =
+ LAZY_INSTANCE_INITIALIZER;
// Tries to find a PSSH box whose "SystemId" is |uuid| in |data|, parses the
// "Data" of the box and put it in |pssh_data|. Returns true if such a box is
@@ -66,7 +129,7 @@ static const uint32 kPsshType = 0x70737368;
// will be set in |pssh_data|.
// 2, Only PSSH and TENC boxes are allowed in |data|. TENC boxes are skipped.
static bool GetPsshData(const uint8* data, int data_size,
- const std::vector<uint8>& uuid,
+ const UUID& uuid,
std::vector<uint8>* pssh_data) {
const uint8* cur = data;
const uint8* data_end = data + data_size;
@@ -152,24 +215,41 @@ static MediaDrmBridge::SecurityLevel GetSecurityLevelFromString(
return MediaDrmBridge::SECURITY_LEVEL_NONE;
}
-// static
-scoped_ptr<MediaDrmBridge> MediaDrmBridge::Create(
- int media_keys_id,
- const std::vector<uint8>& scheme_uuid,
- const GURL& frame_url,
- const std::string& security_level,
- MediaPlayerManager* manager) {
- scoped_ptr<MediaDrmBridge> media_drm_bridge;
-
- if (IsAvailable() && !scheme_uuid.empty()) {
- // TODO(qinmin): check whether the uuid is valid.
- media_drm_bridge.reset(new MediaDrmBridge(
- media_keys_id, scheme_uuid, frame_url, security_level, manager));
- if (media_drm_bridge->j_media_drm_.is_null())
- media_drm_bridge.reset();
+static std::string GetSecurityLevelString(
+ MediaDrmBridge::SecurityLevel security_level) {
+ switch (security_level) {
+ case MediaDrmBridge::SECURITY_LEVEL_NONE:
+ return "";
+ case MediaDrmBridge::SECURITY_LEVEL_1:
+ return "L1";
+ case MediaDrmBridge::SECURITY_LEVEL_3:
+ return "L3";
}
+ return "";
+}
- return media_drm_bridge.Pass();
+// Checks whether |key_system| is supported with |container_mime_type|. Only
+// checks |key_system| support if |container_mime_type| is empty.
+// TODO(xhwang): The |container_mime_type| is not the same as contentType in
+// the EME spec. Revisit this once the spec issue with initData type is
+// resolved.
+static bool IsKeySystemSupportedWithTypeImpl(
+ const std::string& key_system,
+ const std::string& container_mime_type) {
+ if (!MediaDrmBridge::IsAvailable())
+ return false;
+
+ UUID scheme_uuid = g_key_system_uuid_manager.Get().GetUUID(key_system);
+ if (scheme_uuid.empty())
+ return false;
+
+ JNIEnv* env = AttachCurrentThread();
+ ScopedJavaLocalRef<jbyteArray> j_scheme_uuid =
+ base::android::ToJavaByteArray(env, &scheme_uuid[0], scheme_uuid.size());
+ ScopedJavaLocalRef<jstring> j_container_mime_type =
+ ConvertUTF8ToJavaString(env, container_mime_type);
+ return Java_MediaDrmBridge_isCryptoSchemeSupported(
+ env, j_scheme_uuid.obj(), j_container_mime_type.obj());
}
// static
@@ -178,85 +258,187 @@ bool MediaDrmBridge::IsAvailable() {
}
// static
-bool MediaDrmBridge::IsSecureDecoderRequired(
- const std::string& security_level_str) {
- return IsSecureDecoderRequired(
- GetSecurityLevelFromString(security_level_str));
+bool MediaDrmBridge::IsSecureDecoderRequired(SecurityLevel security_level) {
+ DCHECK(IsAvailable());
+ return SECURITY_LEVEL_1 == security_level;
+}
+
+// static
+bool MediaDrmBridge::IsSecurityLevelSupported(const std::string& key_system,
+ SecurityLevel security_level) {
+ if (!IsAvailable())
+ return false;
+
+ scoped_ptr<MediaDrmBridge> media_drm_bridge =
+ MediaDrmBridge::CreateSessionless(key_system);
+ if (!media_drm_bridge)
+ return false;
+
+ return media_drm_bridge->SetSecurityLevel(security_level);
+}
+
+static void AddKeySystemUuidMapping(JNIEnv* env, jclass clazz,
+ jstring j_key_system,
+ jobject j_buffer) {
+ std::string key_system = ConvertJavaStringToUTF8(env, j_key_system);
+ uint8* buffer = static_cast<uint8*>(env->GetDirectBufferAddress(j_buffer));
+ UUID uuid(buffer, buffer + 16);
+ g_key_system_uuid_manager.Get().AddMapping(key_system, uuid);
+}
+
+// static
+std::vector<std::string> MediaDrmBridge::GetPlatformKeySystemNames() {
+ return g_key_system_uuid_manager.Get().GetPlatformKeySystemNames();
}
-bool MediaDrmBridge::IsSecurityLevelSupported(
- const std::vector<uint8>& scheme_uuid,
- const std::string& security_level) {
- // Pass 0 as |media_keys_id| and NULL as |manager| as they are not used in
- // creation time of MediaDrmBridge.
- return MediaDrmBridge::Create(0, scheme_uuid, GURL(), security_level, NULL) !=
- NULL;
+// static
+bool MediaDrmBridge::IsKeySystemSupported(const std::string& key_system) {
+ DCHECK(!key_system.empty());
+ return IsKeySystemSupportedWithTypeImpl(key_system, "");
}
-bool MediaDrmBridge::IsCryptoSchemeSupported(
- const std::vector<uint8>& scheme_uuid,
+// static
+bool MediaDrmBridge::IsKeySystemSupportedWithType(
+ const std::string& key_system,
const std::string& container_mime_type) {
- JNIEnv* env = AttachCurrentThread();
- ScopedJavaLocalRef<jbyteArray> j_scheme_uuid =
- base::android::ToJavaByteArray(env, &scheme_uuid[0], scheme_uuid.size());
- ScopedJavaLocalRef<jstring> j_container_mime_type =
- ConvertUTF8ToJavaString(env, container_mime_type);
- return Java_MediaDrmBridge_isCryptoSchemeSupported(
- env, j_scheme_uuid.obj(), j_container_mime_type.obj());
+ DCHECK(!key_system.empty() && !container_mime_type.empty());
+ return IsKeySystemSupportedWithTypeImpl(key_system, container_mime_type);
}
bool MediaDrmBridge::RegisterMediaDrmBridge(JNIEnv* env) {
return RegisterNativesImpl(env);
}
-MediaDrmBridge::MediaDrmBridge(int media_keys_id,
- const std::vector<uint8>& scheme_uuid,
- const GURL& frame_url,
- const std::string& security_level,
- MediaPlayerManager* manager)
- : media_keys_id_(media_keys_id),
- scheme_uuid_(scheme_uuid),
- frame_url_(frame_url),
- manager_(manager) {
+MediaDrmBridge::MediaDrmBridge(const std::vector<uint8>& scheme_uuid,
+ const SessionCreatedCB& session_created_cb,
+ const SessionMessageCB& session_message_cb,
+ const SessionReadyCB& session_ready_cb,
+ const SessionClosedCB& session_closed_cb,
+ const SessionErrorCB& session_error_cb)
+ : scheme_uuid_(scheme_uuid),
+ session_created_cb_(session_created_cb),
+ session_message_cb_(session_message_cb),
+ session_ready_cb_(session_ready_cb),
+ session_closed_cb_(session_closed_cb),
+ session_error_cb_(session_error_cb) {
JNIEnv* env = AttachCurrentThread();
CHECK(env);
ScopedJavaLocalRef<jbyteArray> j_scheme_uuid =
base::android::ToJavaByteArray(env, &scheme_uuid[0], scheme_uuid.size());
- ScopedJavaLocalRef<jstring> j_security_level =
- ConvertUTF8ToJavaString(env, security_level);
j_media_drm_.Reset(Java_MediaDrmBridge_create(
- env, j_scheme_uuid.obj(), j_security_level.obj(),
- reinterpret_cast<intptr_t>(this)));
+ env, j_scheme_uuid.obj(), reinterpret_cast<intptr_t>(this)));
}
MediaDrmBridge::~MediaDrmBridge() {
JNIEnv* env = AttachCurrentThread();
+ player_tracker_.NotifyCdmUnset();
if (!j_media_drm_.is_null())
Java_MediaDrmBridge_release(env, j_media_drm_.obj());
}
+// static
+scoped_ptr<MediaDrmBridge> MediaDrmBridge::Create(
+ const std::string& key_system,
+ const SessionCreatedCB& session_created_cb,
+ const SessionMessageCB& session_message_cb,
+ const SessionReadyCB& session_ready_cb,
+ const SessionClosedCB& session_closed_cb,
+ const SessionErrorCB& session_error_cb) {
+ scoped_ptr<MediaDrmBridge> media_drm_bridge;
+ if (!IsAvailable())
+ return media_drm_bridge.Pass();
+
+ UUID scheme_uuid = g_key_system_uuid_manager.Get().GetUUID(key_system);
+ if (scheme_uuid.empty())
+ return media_drm_bridge.Pass();
+
+ media_drm_bridge.reset(new MediaDrmBridge(scheme_uuid,
+ session_created_cb,
+ session_message_cb,
+ session_ready_cb,
+ session_closed_cb,
+ session_error_cb));
+
+ if (media_drm_bridge->j_media_drm_.is_null())
+ media_drm_bridge.reset();
+
+ return media_drm_bridge.Pass();
+}
+
+// static
+scoped_ptr<MediaDrmBridge> MediaDrmBridge::CreateSessionless(
+ const std::string& key_system) {
+ return MediaDrmBridge::Create(key_system,
+ SessionCreatedCB(),
+ SessionMessageCB(),
+ SessionReadyCB(),
+ SessionClosedCB(),
+ SessionErrorCB());
+}
+
+bool MediaDrmBridge::SetSecurityLevel(SecurityLevel security_level) {
+ JNIEnv* env = AttachCurrentThread();
+
+ std::string security_level_str = GetSecurityLevelString(security_level);
+ if (security_level_str.empty())
+ return false;
+
+ ScopedJavaLocalRef<jstring> j_security_level =
+ ConvertUTF8ToJavaString(env, security_level_str);
+ return Java_MediaDrmBridge_setSecurityLevel(
+ env, j_media_drm_.obj(), j_security_level.obj());
+}
+
bool MediaDrmBridge::CreateSession(uint32 session_id,
- const std::string& type,
+ const std::string& content_type,
const uint8* init_data,
int init_data_length) {
- std::vector<uint8> pssh_data;
- if (!GetPsshData(init_data, init_data_length, scheme_uuid_, &pssh_data))
- return false;
+ DVLOG(1) << __FUNCTION__;
+
+ DCHECK(!session_created_cb_.is_null())
+ << "CreateSession called on a sessionless MediaDrmBridge object.";
JNIEnv* env = AttachCurrentThread();
- ScopedJavaLocalRef<jbyteArray> j_pssh_data =
- base::android::ToJavaByteArray(env, &pssh_data[0], pssh_data.size());
- ScopedJavaLocalRef<jstring> j_mime = ConvertUTF8ToJavaString(env, type);
+ ScopedJavaLocalRef<jbyteArray> j_init_data;
+ // Caller should always use "video/*" content types.
+ DCHECK_EQ(0u, content_type.find("video/"));
+
+ // Widevine MediaDrm plugin only accepts the "data" part of the PSSH box as
+ // the init data when using MP4 container.
+ if (std::equal(scheme_uuid_.begin(), scheme_uuid_.end(), kWidevineUuid) &&
+ content_type == "video/mp4") {
+ std::vector<uint8> pssh_data;
+ if (!GetPsshData(init_data, init_data_length, scheme_uuid_, &pssh_data))
+ return false;
+ j_init_data =
+ base::android::ToJavaByteArray(env, &pssh_data[0], pssh_data.size());
+ } else {
+ j_init_data =
+ base::android::ToJavaByteArray(env, init_data, init_data_length);
+ }
+
+ ScopedJavaLocalRef<jstring> j_mime =
+ ConvertUTF8ToJavaString(env, content_type);
Java_MediaDrmBridge_createSession(
- env, j_media_drm_.obj(), session_id, j_pssh_data.obj(), j_mime.obj());
+ env, j_media_drm_.obj(), session_id, j_init_data.obj(), j_mime.obj());
return true;
}
+void MediaDrmBridge::LoadSession(uint32 session_id,
+ const std::string& web_session_id) {
+ // MediaDrmBridge doesn't support loading sessions.
+ NOTREACHED();
+}
+
void MediaDrmBridge::UpdateSession(uint32 session_id,
const uint8* response,
int response_length) {
DVLOG(1) << __FUNCTION__;
+
+ DCHECK(!session_ready_cb_.is_null())
+ << __FUNCTION__ << " called on a sessionless MediaDrmBridge object.";
+
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jbyteArray> j_response =
base::android::ToJavaByteArray(env, response, response_length);
@@ -266,10 +448,23 @@ void MediaDrmBridge::UpdateSession(uint32 session_id,
void MediaDrmBridge::ReleaseSession(uint32 session_id) {
DVLOG(1) << __FUNCTION__;
+
+ DCHECK(!session_closed_cb_.is_null())
+ << __FUNCTION__ << " called on a sessionless MediaDrmBridge object.";
+
JNIEnv* env = AttachCurrentThread();
Java_MediaDrmBridge_releaseSession(env, j_media_drm_.obj(), session_id);
}
+int MediaDrmBridge::RegisterPlayer(const base::Closure& new_key_cb,
+ const base::Closure& cdm_unset_cb) {
+ return player_tracker_.RegisterPlayer(new_key_cb, cdm_unset_cb);
+}
+
+void MediaDrmBridge::UnregisterPlayer(int registration_id) {
+ player_tracker_.UnregisterPlayer(registration_id);
+}
+
void MediaDrmBridge::SetMediaCryptoReadyCB(const base::Closure& closure) {
if (closure.is_null()) {
media_crypto_ready_cb_.Reset();
@@ -298,7 +493,7 @@ void MediaDrmBridge::OnSessionCreated(JNIEnv* env,
jstring j_web_session_id) {
uint32 session_id = j_session_id;
std::string web_session_id = ConvertJavaStringToUTF8(env, j_web_session_id);
- manager_->OnSessionCreated(media_keys_id_, session_id, web_session_id);
+ session_created_cb_.Run(session_id, web_session_id);
}
void MediaDrmBridge::OnSessionMessage(JNIEnv* env,
@@ -309,31 +504,37 @@ void MediaDrmBridge::OnSessionMessage(JNIEnv* env,
uint32 session_id = j_session_id;
std::vector<uint8> message;
JavaByteArrayToByteVector(env, j_message, &message);
- std::string destination_url = ConvertJavaStringToUTF8(env, j_destination_url);
- manager_->OnSessionMessage(
- media_keys_id_, session_id, message, destination_url);
+ GURL destination_gurl = GURL(ConvertJavaStringToUTF8(env, j_destination_url));
+ if (!destination_gurl.is_valid() && !destination_gurl.is_empty()) {
+ DLOG(WARNING) << "SessionMessage destination_url is invalid : "
+ << destination_gurl.possibly_invalid_spec();
+ destination_gurl = GURL::EmptyGURL(); // Replace invalid destination_url.
+ }
+ session_message_cb_.Run(session_id, message, destination_gurl);
}
void MediaDrmBridge::OnSessionReady(JNIEnv* env,
jobject j_media_drm,
jint j_session_id) {
uint32 session_id = j_session_id;
- manager_->OnSessionReady(media_keys_id_, session_id);
+ session_ready_cb_.Run(session_id);
+ // TODO(xhwang/jrummell): Move this when usableKeyIds/keyschange are
+ // implemented.
+ player_tracker_.NotifyNewKey();
}
void MediaDrmBridge::OnSessionClosed(JNIEnv* env,
jobject j_media_drm,
jint j_session_id) {
uint32 session_id = j_session_id;
- manager_->OnSessionClosed(media_keys_id_, session_id);
+ session_closed_cb_.Run(session_id);
}
void MediaDrmBridge::OnSessionError(JNIEnv* env,
jobject j_media_drm,
jint j_session_id) {
uint32 session_id = j_session_id;
- manager_->OnSessionError(
- media_keys_id_, session_id, MediaKeys::kUnknownError, 0);
+ session_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
}
ScopedJavaLocalRef<jobject> MediaDrmBridge::GetMediaCrypto() {
@@ -341,11 +542,6 @@ ScopedJavaLocalRef<jobject> MediaDrmBridge::GetMediaCrypto() {
return Java_MediaDrmBridge_getMediaCrypto(env, j_media_drm_.obj());
}
-// static
-bool MediaDrmBridge::IsSecureDecoderRequired(SecurityLevel security_level) {
- return MediaDrmBridge::SECURITY_LEVEL_1 == security_level;
-}
-
MediaDrmBridge::SecurityLevel MediaDrmBridge::GetSecurityLevel() {
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jstring> j_security_level =
diff --git a/chromium/media/base/android/media_drm_bridge.h b/chromium/media/base/android/media_drm_bridge.h
index 76149a6a609..3c0bfb3e640 100644
--- a/chromium/media/base/android/media_drm_bridge.h
+++ b/chromium/media/base/android/media_drm_bridge.h
@@ -6,16 +6,15 @@
#define MEDIA_BASE_ANDROID_MEDIA_DRM_BRIDGE_H_
#include <jni.h>
-#include <map>
-#include <queue>
#include <string>
#include <vector>
#include "base/android/scoped_java_ref.h"
#include "base/callback.h"
#include "base/memory/scoped_ptr.h"
+#include "media/base/browser_cdm.h"
#include "media/base/media_export.h"
-#include "media/base/media_keys.h"
+#include "media/cdm/player_tracker_impl.h"
#include "url/gurl.h"
class GURL;
@@ -26,7 +25,7 @@ class MediaPlayerManager;
// This class provides DRM services for android EME implementation.
// TODO(qinmin): implement all the functions in this class.
-class MEDIA_EXPORT MediaDrmBridge : public MediaKeys {
+class MEDIA_EXPORT MediaDrmBridge : public BrowserCdm {
public:
enum SecurityLevel {
SECURITY_LEVEL_NONE = 0,
@@ -38,37 +37,70 @@ class MEDIA_EXPORT MediaDrmBridge : public MediaKeys {
virtual ~MediaDrmBridge();
- // Returns a MediaDrmBridge instance if |scheme_uuid| is supported, or a NULL
- // pointer otherwise.
- static scoped_ptr<MediaDrmBridge> Create(
- int media_keys_id,
- const std::vector<uint8>& scheme_uuid,
- const GURL& frame_url,
- const std::string& security_level,
- MediaPlayerManager* manager);
-
// Checks whether MediaDRM is available.
+ // All other static methods check IsAvailable() internally. There's no need
+ // to check IsAvailable() explicitly before calling them.
static bool IsAvailable();
- static bool IsSecurityLevelSupported(const std::vector<uint8>& scheme_uuid,
- const std::string& security_level);
+ static bool IsSecurityLevelSupported(const std::string& key_system,
+ SecurityLevel security_level);
+
+ // Checks whether |key_system| is supported.
+ static bool IsKeySystemSupported(const std::string& key_system);
- static bool IsCryptoSchemeSupported(const std::vector<uint8>& scheme_uuid,
- const std::string& container_mime_type);
+ // Returns the list of the platform-supported key system names that
+ // are not handled by Chrome explicitly.
+ static std::vector<std::string> GetPlatformKeySystemNames();
- static bool IsSecureDecoderRequired(const std::string& security_level_str);
+ // Checks whether |key_system| is supported with |container_mime_type|.
+ // |container_mime_type| must not be empty.
+ static bool IsKeySystemSupportedWithType(
+ const std::string& key_system,
+ const std::string& container_mime_type);
+
+ static bool IsSecureDecoderRequired(SecurityLevel security_level);
static bool RegisterMediaDrmBridge(JNIEnv* env);
- // MediaKeys implementations.
+ // Returns a MediaDrmBridge instance if |key_system| is supported, or a NULL
+ // pointer otherwise.
+ static scoped_ptr<MediaDrmBridge> Create(
+ const std::string& key_system,
+ const SessionCreatedCB& session_created_cb,
+ const SessionMessageCB& session_message_cb,
+ const SessionReadyCB& session_ready_cb,
+ const SessionClosedCB& session_closed_cb,
+ const SessionErrorCB& session_error_cb);
+
+ // Returns a MediaDrmBridge instance if |key_system| is supported, or a NULL
+ // otherwise. No session callbacks are provided. This is used when we need to
+ // use MediaDrmBridge without creating any sessions.
+ static scoped_ptr<MediaDrmBridge> CreateSessionless(
+ const std::string& key_system);
+
+ // Returns true if |security_level| is successfully set, or false otherwise.
+ // Call this function right after Create() and before any other calls.
+ // Note:
+ // - If this function is not called, the default security level of the device
+ // will be used.
+ // - It's recommended to call this function only once on a MediaDrmBridge
+ // object. Calling this function multiples times may cause errors.
+ bool SetSecurityLevel(SecurityLevel security_level);
+
+ // BrowserCdm implementations.
virtual bool CreateSession(uint32 session_id,
- const std::string& type,
+ const std::string& content_type,
const uint8* init_data,
int init_data_length) OVERRIDE;
+ virtual void LoadSession(uint32 session_id,
+ const std::string& web_session_id) OVERRIDE;
virtual void UpdateSession(uint32 session_id,
const uint8* response,
int response_length) OVERRIDE;
virtual void ReleaseSession(uint32 session_id) OVERRIDE;
+ virtual int RegisterPlayer(const base::Closure& new_key_cb,
+ const base::Closure& cdm_unset_cb) OVERRIDE;
+ virtual void UnregisterPlayer(int registration_id) OVERRIDE;
// Returns a MediaCrypto object if it's already created. Returns a null object
// otherwise.
@@ -105,41 +137,36 @@ class MEDIA_EXPORT MediaDrmBridge : public MediaKeys {
// video playback.
bool IsProtectedSurfaceRequired();
- int media_keys_id() const { return media_keys_id_; }
-
- GURL frame_url() const { return frame_url_; }
-
private:
- static bool IsSecureDecoderRequired(SecurityLevel security_level);
-
- MediaDrmBridge(int media_keys_id,
- const std::vector<uint8>& scheme_uuid,
- const GURL& frame_url,
- const std::string& security_level,
- MediaPlayerManager* manager);
+ MediaDrmBridge(const std::vector<uint8>& scheme_uuid,
+ const SessionCreatedCB& session_created_cb,
+ const SessionMessageCB& session_message_cb,
+ const SessionReadyCB& session_ready_cb,
+ const SessionClosedCB& session_closed_cb,
+ const SessionErrorCB& session_error_cb);
// Get the security level of the media.
SecurityLevel GetSecurityLevel();
- // ID of the MediaKeys object.
- int media_keys_id_;
-
// UUID of the key system.
std::vector<uint8> scheme_uuid_;
- // media stream's frame URL.
- const GURL frame_url_;
-
// Java MediaDrm instance.
base::android::ScopedJavaGlobalRef<jobject> j_media_drm_;
- // Non-owned pointer.
- MediaPlayerManager* manager_;
+ // Callbacks for firing session events.
+ SessionCreatedCB session_created_cb_;
+ SessionMessageCB session_message_cb_;
+ SessionReadyCB session_ready_cb_;
+ SessionClosedCB session_closed_cb_;
+ SessionErrorCB session_error_cb_;
base::Closure media_crypto_ready_cb_;
ResetCredentialsCB reset_credentials_cb_;
+ PlayerTrackerImpl player_tracker_;
+
DISALLOW_COPY_AND_ASSIGN(MediaDrmBridge);
};
diff --git a/chromium/media/base/android/media_drm_bridge_unittest.cc b/chromium/media/base/android/media_drm_bridge_unittest.cc
new file mode 100644
index 00000000000..a838203304a
--- /dev/null
+++ b/chromium/media/base/android/media_drm_bridge_unittest.cc
@@ -0,0 +1,96 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "media/base/android/media_drm_bridge.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "widevine_cdm_version.h" // In SHARED_INTERMEDIATE_DIR.
+
+namespace media {
+
+#define EXPECT_TRUE_IF_AVAILABLE(a) \
+ do { \
+ if (!MediaDrmBridge::IsAvailable()) { \
+ VLOG(0) << "MediaDrm not supported on device."; \
+ EXPECT_FALSE(a); \
+ } else { \
+ EXPECT_TRUE(a); \
+ } \
+ } while (0)
+
+const char kAudioMp4[] = "audio/mp4";
+const char kVideoMp4[] = "video/mp4";
+const char kAudioWebM[] = "audio/webm";
+const char kVideoWebM[] = "video/webm";
+const char kInvalidKeySystem[] = "invalid.keysystem";
+const MediaDrmBridge::SecurityLevel kLNone =
+ MediaDrmBridge::SECURITY_LEVEL_NONE;
+const MediaDrmBridge::SecurityLevel kL1 = MediaDrmBridge::SECURITY_LEVEL_1;
+const MediaDrmBridge::SecurityLevel kL3 = MediaDrmBridge::SECURITY_LEVEL_3;
+
+// Helper functions to avoid typing "MediaDrmBridge::" in tests.
+
+static bool IsKeySystemSupported(const std::string& key_system) {
+ return MediaDrmBridge::IsKeySystemSupported(key_system);
+}
+
+static bool IsKeySystemSupportedWithType(
+ const std::string& key_system,
+ const std::string& container_mime_type) {
+ return MediaDrmBridge::IsKeySystemSupportedWithType(key_system,
+ container_mime_type);
+}
+
+static bool IsSecurityLevelSupported(
+ const std::string& key_system,
+ MediaDrmBridge::SecurityLevel security_level) {
+ return MediaDrmBridge::IsSecurityLevelSupported(key_system, security_level);
+}
+
+TEST(MediaDrmBridgeTest, IsSecurityLevelSupported_Widevine) {
+ EXPECT_FALSE(IsSecurityLevelSupported(kWidevineKeySystem, kLNone));
+ // We test "L3" fully. But for "L1" we don't check the result as it depends on
+ // whether the test device supports "L1".
+ EXPECT_TRUE_IF_AVAILABLE(IsSecurityLevelSupported(kWidevineKeySystem, kL3));
+ IsSecurityLevelSupported(kWidevineKeySystem, kL1);
+}
+
+// Invalid keysytem is NOT supported regardless whether MediaDrm is available.
+TEST(MediaDrmBridgeTest, IsSecurityLevelSupported_InvalidKeySystem) {
+ EXPECT_FALSE(IsSecurityLevelSupported(kInvalidKeySystem, kLNone));
+ EXPECT_FALSE(IsSecurityLevelSupported(kInvalidKeySystem, kL1));
+ EXPECT_FALSE(IsSecurityLevelSupported(kInvalidKeySystem, kL3));
+}
+
+TEST(MediaDrmBridgeTest, IsKeySystemSupported_Widevine) {
+ EXPECT_TRUE_IF_AVAILABLE(IsKeySystemSupported(kWidevineKeySystem));
+
+ // TODO(xhwang): Enable when b/13564917 is fixed.
+ // EXPECT_TRUE_IF_AVAILABLE(
+ // IsKeySystemSupportedWithType(kWidevineKeySystem, kAudioMp4));
+ EXPECT_TRUE_IF_AVAILABLE(
+ IsKeySystemSupportedWithType(kWidevineKeySystem, kVideoMp4));
+
+ EXPECT_FALSE(IsKeySystemSupportedWithType(kWidevineKeySystem, kAudioWebM));
+ EXPECT_FALSE(IsKeySystemSupportedWithType(kWidevineKeySystem, kVideoWebM));
+ EXPECT_FALSE(IsKeySystemSupportedWithType(kWidevineKeySystem, "unknown"));
+ EXPECT_FALSE(IsKeySystemSupportedWithType(kWidevineKeySystem, "video/avi"));
+ EXPECT_FALSE(IsKeySystemSupportedWithType(kWidevineKeySystem, "audio/mp3"));
+}
+
+// Invalid keysytem is NOT supported regardless whether MediaDrm is available.
+TEST(MediaDrmBridgeTest, IsKeySystemSupported_InvalidKeySystem) {
+ EXPECT_FALSE(IsKeySystemSupported(kInvalidKeySystem));
+ EXPECT_FALSE(IsKeySystemSupportedWithType(kInvalidKeySystem, kAudioMp4));
+ EXPECT_FALSE(IsKeySystemSupportedWithType(kInvalidKeySystem, kVideoMp4));
+ EXPECT_FALSE(IsKeySystemSupportedWithType(kInvalidKeySystem, kAudioWebM));
+ EXPECT_FALSE(IsKeySystemSupportedWithType(kInvalidKeySystem, kVideoWebM));
+ EXPECT_FALSE(IsKeySystemSupportedWithType(kInvalidKeySystem, "unknown"));
+ EXPECT_FALSE(IsKeySystemSupportedWithType(kInvalidKeySystem, "video/avi"));
+ EXPECT_FALSE(IsKeySystemSupportedWithType(kInvalidKeySystem, "audio/mp3"));
+}
+
+} // namespace media
diff --git a/chromium/media/base/android/media_player_android.cc b/chromium/media/base/android/media_player_android.cc
index c2e00947af6..dc6e0752b3e 100644
--- a/chromium/media/base/android/media_player_android.cc
+++ b/chromium/media/base/android/media_player_android.cc
@@ -12,17 +12,19 @@ namespace media {
MediaPlayerAndroid::MediaPlayerAndroid(
int player_id,
- MediaPlayerManager* manager)
- : player_id_(player_id),
- manager_(manager) {
+ MediaPlayerManager* manager,
+ const RequestMediaResourcesCB& request_media_resources_cb,
+ const ReleaseMediaResourcesCB& release_media_resources_cb,
+ const GURL& frame_url)
+ : request_media_resources_cb_(request_media_resources_cb),
+ release_media_resources_cb_(release_media_resources_cb),
+ player_id_(player_id),
+ manager_(manager),
+ frame_url_(frame_url) {
}
MediaPlayerAndroid::~MediaPlayerAndroid() {}
-bool MediaPlayerAndroid::IsRemote() const {
- return false;
-}
-
GURL MediaPlayerAndroid::GetUrl() {
return GURL();
}
@@ -31,13 +33,9 @@ GURL MediaPlayerAndroid::GetFirstPartyForCookies() {
return GURL();
}
-void MediaPlayerAndroid::SetDrmBridge(MediaDrmBridge* drm_bridge) {
- // Not all players support DrmBridge. Do nothing by default.
- return;
-}
-
-void MediaPlayerAndroid::OnKeyAdded() {
- // Not all players care about the decryption key. Do nothing by default.
+void MediaPlayerAndroid::SetCdm(BrowserCdm* /* cdm */) {
+ // Players that support EME should override this.
+ NOTREACHED() << "EME not supported on base MediaPlayerAndroid class.";
return;
}
diff --git a/chromium/media/base/android/media_player_android.h b/chromium/media/base/android/media_player_android.h
index 27a6432d9e2..879ba756e2c 100644
--- a/chromium/media/base/android/media_player_android.h
+++ b/chromium/media/base/android/media_player_android.h
@@ -16,7 +16,7 @@
namespace media {
-class MediaDrmBridge;
+class BrowserCdm;
class MediaPlayerManager;
// This class serves as the base class for different media player
@@ -34,6 +34,12 @@ class MEDIA_EXPORT MediaPlayerAndroid {
MEDIA_ERROR_INVALID_CODE,
};
+ // Callback when the player needs decoding resources.
+ typedef base::Callback<void(int player_id)> RequestMediaResourcesCB;
+
+ // Callback when the player releases decoding resources.
+ typedef base::Callback<void(int player_id)> ReleaseMediaResourcesCB;
+
// Passing an external java surface object to the player.
virtual void SetVideoSurface(gfx::ScopedJavaSurface surface) = 0;
@@ -46,7 +52,7 @@ class MEDIA_EXPORT MediaPlayerAndroid {
// Seek to a particular position, based on renderer signaling actual seek
// with MediaPlayerHostMsg_Seek. If eventual success, OnSeekComplete() will be
// called.
- virtual void SeekTo(const base::TimeDelta& timestamp) = 0;
+ virtual void SeekTo(base::TimeDelta timestamp) = 0;
// Release the player resources.
virtual void Release() = 0;
@@ -55,7 +61,6 @@ class MEDIA_EXPORT MediaPlayerAndroid {
virtual void SetVolume(double volume) = 0;
// Get the media information from the player.
- virtual bool IsRemote() const;
virtual int GetVideoWidth() = 0;
virtual int GetVideoHeight() = 0;
virtual base::TimeDelta GetDuration() = 0;
@@ -68,21 +73,29 @@ class MEDIA_EXPORT MediaPlayerAndroid {
virtual GURL GetUrl();
virtual GURL GetFirstPartyForCookies();
- // Pass a drm bridge to a player.
- virtual void SetDrmBridge(MediaDrmBridge* drm_bridge);
+ // Associates the |cdm| with this player.
+ virtual void SetCdm(BrowserCdm* cdm);
- // Notifies the player that a decryption key has been added. The player
- // may want to start/resume playback if it is waiting for a key.
- virtual void OnKeyAdded();
+ // Check whether the player still uses the current surface.
+ virtual bool IsSurfaceInUse() const = 0;
int player_id() { return player_id_; }
+ GURL frame_url() { return frame_url_; }
+
protected:
MediaPlayerAndroid(int player_id,
- MediaPlayerManager* manager);
+ MediaPlayerManager* manager,
+ const RequestMediaResourcesCB& request_media_resources_cb,
+ const ReleaseMediaResourcesCB& release_media_resources_cb,
+ const GURL& frame_url);
MediaPlayerManager* manager() { return manager_; }
+ RequestMediaResourcesCB request_media_resources_cb_;
+
+ ReleaseMediaResourcesCB release_media_resources_cb_;
+
private:
// Player ID assigned to this player.
int player_id_;
@@ -90,6 +103,9 @@ class MEDIA_EXPORT MediaPlayerAndroid {
// Resource manager for all the media players.
MediaPlayerManager* manager_;
+ // Url for the frame that contains this player.
+ GURL frame_url_;
+
DISALLOW_COPY_AND_ASSIGN(MediaPlayerAndroid);
};
diff --git a/chromium/media/base/android/media_player_bridge.cc b/chromium/media/base/android/media_player_bridge.cc
index 0f79c13ad93..214b52b486d 100644
--- a/chromium/media/base/android/media_player_bridge.cc
+++ b/chromium/media/base/android/media_player_bridge.cc
@@ -18,7 +18,10 @@ using base::android::ConvertUTF8ToJavaString;
using base::android::ScopedJavaLocalRef;
// Time update happens every 250ms.
-static const int kTimeUpdateInterval = 250;
+const int kTimeUpdateInterval = 250;
+
+// blob url scheme.
+const char kBlobScheme[] = "blob";
namespace media {
@@ -26,23 +29,33 @@ MediaPlayerBridge::MediaPlayerBridge(
int player_id,
const GURL& url,
const GURL& first_party_for_cookies,
+ const std::string& user_agent,
bool hide_url_log,
- MediaPlayerManager* manager)
+ MediaPlayerManager* manager,
+ const RequestMediaResourcesCB& request_media_resources_cb,
+ const ReleaseMediaResourcesCB& release_media_resources_cb,
+ const GURL& frame_url)
: MediaPlayerAndroid(player_id,
- manager),
+ manager,
+ request_media_resources_cb,
+ release_media_resources_cb,
+ frame_url),
prepared_(false),
pending_play_(false),
url_(url),
first_party_for_cookies_(first_party_for_cookies),
+ user_agent_(user_agent),
hide_url_log_(hide_url_log),
width_(0),
height_(0),
can_pause_(true),
can_seek_forward_(true),
can_seek_backward_(true),
- weak_this_(this),
- listener_(base::MessageLoopProxy::current(),
- weak_this_.GetWeakPtr()) {
+ is_surface_in_use_(false),
+ volume_(-1.0),
+ weak_factory_(this) {
+ listener_.reset(new MediaPlayerListener(base::MessageLoopProxy::current(),
+ weak_factory_.GetWeakPtr()));
}
MediaPlayerBridge::~MediaPlayerBridge() {
@@ -55,23 +68,26 @@ MediaPlayerBridge::~MediaPlayerBridge() {
}
void MediaPlayerBridge::Initialize() {
+ cookies_.clear();
if (url_.SchemeIsFile()) {
- cookies_.clear();
ExtractMediaMetadata(url_.spec());
return;
}
media::MediaResourceGetter* resource_getter =
manager()->GetMediaResourceGetter();
- if (url_.SchemeIsFileSystem()) {
- cookies_.clear();
- resource_getter->GetPlatformPathFromFileSystemURL(url_, base::Bind(
- &MediaPlayerBridge::ExtractMediaMetadata, weak_this_.GetWeakPtr()));
+ if (url_.SchemeIsFileSystem() || url_.SchemeIs(kBlobScheme)) {
+ resource_getter->GetPlatformPathFromURL(
+ url_,
+ base::Bind(&MediaPlayerBridge::ExtractMediaMetadata,
+ weak_factory_.GetWeakPtr()));
return;
}
- resource_getter->GetCookies(url_, first_party_for_cookies_, base::Bind(
- &MediaPlayerBridge::OnCookiesRetrieved, weak_this_.GetWeakPtr()));
+ resource_getter->GetCookies(url_,
+ first_party_for_cookies_,
+ base::Bind(&MediaPlayerBridge::OnCookiesRetrieved,
+ weak_factory_.GetWeakPtr()));
}
void MediaPlayerBridge::CreateJavaMediaPlayerBridge() {
@@ -81,6 +97,9 @@ void MediaPlayerBridge::CreateJavaMediaPlayerBridge() {
j_media_player_bridge_.Reset(Java_MediaPlayerBridge_create(
env, reinterpret_cast<intptr_t>(this)));
+ if (volume_ >= 0)
+ SetVolume(volume_);
+
SetMediaPlayerListener();
}
@@ -103,7 +122,7 @@ void MediaPlayerBridge::SetMediaPlayerListener() {
jobject j_context = base::android::GetApplicationContext();
DCHECK(j_context);
- listener_.CreateMediaPlayerListener(j_context, j_media_player_bridge_.obj());
+ listener_->CreateMediaPlayerListener(j_context, j_media_player_bridge_.obj());
}
void MediaPlayerBridge::SetDuration(base::TimeDelta duration) {
@@ -119,7 +138,7 @@ void MediaPlayerBridge::SetVideoSurface(gfx::ScopedJavaSurface surface) {
JNIEnv* env = base::android::AttachCurrentThread();
CHECK(env);
-
+ is_surface_in_use_ = true;
Java_MediaPlayerBridge_setSurface(
env, j_media_player_bridge_.obj(), surface.j_surface().obj());
}
@@ -127,13 +146,15 @@ void MediaPlayerBridge::SetVideoSurface(gfx::ScopedJavaSurface surface) {
void MediaPlayerBridge::Prepare() {
DCHECK(j_media_player_bridge_.is_null());
CreateJavaMediaPlayerBridge();
- if (url_.SchemeIsFileSystem()) {
- manager()->GetMediaResourceGetter()->GetPlatformPathFromFileSystemURL(
- url_, base::Bind(&MediaPlayerBridge::SetDataSource,
- weak_this_.GetWeakPtr()));
- } else {
- SetDataSource(url_.spec());
+ if (url_.SchemeIsFileSystem() || url_.SchemeIs(kBlobScheme)) {
+ manager()->GetMediaResourceGetter()->GetPlatformPathFromURL(
+ url_,
+ base::Bind(&MediaPlayerBridge::SetDataSource,
+ weak_factory_.GetWeakPtr()));
+ return;
}
+
+ SetDataSource(url_.spec());
}
void MediaPlayerBridge::SetDataSource(const std::string& url) {
@@ -147,6 +168,8 @@ void MediaPlayerBridge::SetDataSource(const std::string& url) {
ScopedJavaLocalRef<jstring> j_url_string = ConvertUTF8ToJavaString(env, url);
ScopedJavaLocalRef<jstring> j_cookies = ConvertUTF8ToJavaString(
env, cookies_);
+ ScopedJavaLocalRef<jstring> j_user_agent = ConvertUTF8ToJavaString(
+ env, user_agent_);
jobject j_context = base::android::GetApplicationContext();
DCHECK(j_context);
@@ -162,12 +185,12 @@ void MediaPlayerBridge::SetDataSource(const std::string& url) {
if (!Java_MediaPlayerBridge_setDataSource(
env, j_media_player_bridge_.obj(), j_context, j_url_string.obj(),
- j_cookies.obj(), hide_url_log_)) {
+ j_cookies.obj(), j_user_agent.obj(), hide_url_log_)) {
OnMediaError(MEDIA_ERROR_FORMAT);
return;
}
- manager()->RequestMediaResources(player_id());
+ request_media_resources_cb_.Run(player_id());
if (!Java_MediaPlayerBridge_prepareAsync(env, j_media_player_bridge_.obj()))
OnMediaError(MEDIA_ERROR_FORMAT);
}
@@ -179,7 +202,7 @@ void MediaPlayerBridge::OnDidSetDataUriDataSource(JNIEnv* env, jobject obj,
return;
}
- manager()->RequestMediaResources(player_id());
+ request_media_resources_cb_.Run(player_id());
if (!Java_MediaPlayerBridge_prepareAsync(env, j_media_player_bridge_.obj()))
OnMediaError(MEDIA_ERROR_FORMAT);
}
@@ -191,8 +214,11 @@ void MediaPlayerBridge::OnCookiesRetrieved(const std::string& cookies) {
void MediaPlayerBridge::ExtractMediaMetadata(const std::string& url) {
manager()->GetMediaResourceGetter()->ExtractMediaMetadata(
- url, cookies_, base::Bind(&MediaPlayerBridge::OnMediaMetadataExtracted,
- weak_this_.GetWeakPtr()));
+ url,
+ cookies_,
+ user_agent_,
+ base::Bind(&MediaPlayerBridge::OnMediaMetadataExtracted,
+ weak_factory_.GetWeakPtr()));
}
void MediaPlayerBridge::OnMediaMetadataExtracted(
@@ -256,7 +282,7 @@ int MediaPlayerBridge::GetVideoHeight() {
env, j_media_player_bridge_.obj());
}
-void MediaPlayerBridge::SeekTo(const base::TimeDelta& timestamp) {
+void MediaPlayerBridge::SeekTo(base::TimeDelta timestamp) {
// Record the time to seek when OnMediaPrepared() is called.
pending_seek_ = timestamp;
@@ -293,18 +319,20 @@ void MediaPlayerBridge::Release() {
pending_seek_ = GetCurrentTime();
prepared_ = false;
pending_play_ = false;
+ is_surface_in_use_ = false;
SetVideoSurface(gfx::ScopedJavaSurface());
-
JNIEnv* env = base::android::AttachCurrentThread();
Java_MediaPlayerBridge_release(env, j_media_player_bridge_.obj());
j_media_player_bridge_.Reset();
- manager()->ReleaseMediaResources(player_id());
- listener_.ReleaseMediaPlayerListenerResources();
+ release_media_resources_cb_.Run(player_id());
+ listener_->ReleaseMediaPlayerListenerResources();
}
void MediaPlayerBridge::SetVolume(double volume) {
- if (j_media_player_bridge_.is_null())
+ if (j_media_player_bridge_.is_null()) {
+ volume_ = volume;
return;
+ }
JNIEnv* env = base::android::AttachCurrentThread();
CHECK(env);
@@ -455,4 +483,8 @@ GURL MediaPlayerBridge::GetFirstPartyForCookies() {
return first_party_for_cookies_;
}
+bool MediaPlayerBridge::IsSurfaceInUse() const {
+ return is_surface_in_use_;
+}
+
} // namespace media
diff --git a/chromium/media/base/android/media_player_bridge.h b/chromium/media/base/android/media_player_bridge.h
index 402cb49858b..f63d6268851 100644
--- a/chromium/media/base/android/media_player_bridge.h
+++ b/chromium/media/base/android/media_player_bridge.h
@@ -45,8 +45,12 @@ class MEDIA_EXPORT MediaPlayerBridge : public MediaPlayerAndroid {
MediaPlayerBridge(int player_id,
const GURL& url,
const GURL& first_party_for_cookies,
+ const std::string& user_agent,
bool hide_url_log,
- MediaPlayerManager* manager);
+ MediaPlayerManager* manager,
+ const RequestMediaResourcesCB& request_media_resources_cb,
+ const ReleaseMediaResourcesCB& release_media_resources_cb,
+ const GURL& frame_url);
virtual ~MediaPlayerBridge();
// Initialize this object and extract the metadata from the media.
@@ -56,7 +60,7 @@ class MEDIA_EXPORT MediaPlayerBridge : public MediaPlayerAndroid {
virtual void SetVideoSurface(gfx::ScopedJavaSurface surface) OVERRIDE;
virtual void Start() OVERRIDE;
virtual void Pause(bool is_media_related_action ALLOW_UNUSED) OVERRIDE;
- virtual void SeekTo(const base::TimeDelta& timestamp) OVERRIDE;
+ virtual void SeekTo(base::TimeDelta timestamp) OVERRIDE;
virtual void Release() OVERRIDE;
virtual void SetVolume(double volume) OVERRIDE;
virtual int GetVideoWidth() OVERRIDE;
@@ -70,6 +74,7 @@ class MEDIA_EXPORT MediaPlayerBridge : public MediaPlayerAndroid {
virtual bool IsPlayerReady() OVERRIDE;
virtual GURL GetUrl() OVERRIDE;
virtual GURL GetFirstPartyForCookies() OVERRIDE;
+ virtual bool IsSurfaceInUse() const OVERRIDE;
// MediaPlayerListener callbacks.
void OnVideoSizeChanged(int width, int height);
@@ -101,6 +106,8 @@ class MEDIA_EXPORT MediaPlayerBridge : public MediaPlayerAndroid {
virtual base::android::ScopedJavaLocalRef<jobject> GetAllowedOperations();
private:
+ friend class MediaPlayerListener;
+
// Set the data source for the media player.
void SetDataSource(const std::string& url);
@@ -140,6 +147,9 @@ class MEDIA_EXPORT MediaPlayerBridge : public MediaPlayerAndroid {
// First party url for cookies.
GURL first_party_for_cookies_;
+ // User agent string to be used for media player.
+ const std::string user_agent_;
+
// Hide url log from media player.
bool hide_url_log_;
@@ -161,13 +171,19 @@ class MEDIA_EXPORT MediaPlayerBridge : public MediaPlayerAndroid {
base::RepeatingTimer<MediaPlayerBridge> time_update_timer_;
- // Weak pointer passed to |listener_| for callbacks.
- base::WeakPtrFactory<MediaPlayerBridge> weak_this_;
-
// Listener object that listens to all the media player events.
- MediaPlayerListener listener_;
+ scoped_ptr<MediaPlayerListener> listener_;
+
+ // Whether player is currently using a surface.
+ bool is_surface_in_use_;
+
+ // Volume of playback.
+ double volume_;
+
+ // Weak pointer passed to |listener_| for callbacks.
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<MediaPlayerBridge> weak_factory_;
- friend class MediaPlayerListener;
DISALLOW_COPY_AND_ASSIGN(MediaPlayerBridge);
};
diff --git a/chromium/media/base/android/media_player_listener.cc b/chromium/media/base/android/media_player_listener.cc
index c26984034ed..6a222984686 100644
--- a/chromium/media/base/android/media_player_listener.cc
+++ b/chromium/media/base/android/media_player_listener.cc
@@ -7,7 +7,7 @@
#include "base/android/jni_android.h"
#include "base/bind.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
#include "media/base/android/media_player_bridge.h"
// Auto generated jni class from MediaPlayerListener.java.
@@ -21,11 +21,11 @@ using base::android::ScopedJavaLocalRef;
namespace media {
MediaPlayerListener::MediaPlayerListener(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
base::WeakPtr<MediaPlayerBridge> media_player)
- : message_loop_(message_loop),
+ : task_runner_(task_runner),
media_player_(media_player) {
- DCHECK(message_loop_.get());
+ DCHECK(task_runner_.get());
DCHECK(media_player_);
}
@@ -53,44 +53,44 @@ void MediaPlayerListener::ReleaseMediaPlayerListenerResources() {
void MediaPlayerListener::OnMediaError(
JNIEnv* /* env */, jobject /* obj */, jint error_type) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&MediaPlayerBridge::OnMediaError, media_player_, error_type));
}
void MediaPlayerListener::OnVideoSizeChanged(
JNIEnv* /* env */, jobject /* obj */, jint width, jint height) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&MediaPlayerBridge::OnVideoSizeChanged, media_player_,
width, height));
}
void MediaPlayerListener::OnBufferingUpdate(
JNIEnv* /* env */, jobject /* obj */, jint percent) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&MediaPlayerBridge::OnBufferingUpdate, media_player_, percent));
}
void MediaPlayerListener::OnPlaybackComplete(
JNIEnv* /* env */, jobject /* obj */) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&MediaPlayerBridge::OnPlaybackComplete, media_player_));
}
void MediaPlayerListener::OnSeekComplete(
JNIEnv* /* env */, jobject /* obj */) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&MediaPlayerBridge::OnSeekComplete, media_player_));
}
void MediaPlayerListener::OnMediaPrepared(
JNIEnv* /* env */, jobject /* obj */) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&MediaPlayerBridge::OnMediaPrepared, media_player_));
}
void MediaPlayerListener::OnMediaInterrupted(
JNIEnv* /* env */, jobject /* obj */) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&MediaPlayerBridge::OnMediaInterrupted, media_player_));
}
diff --git a/chromium/media/base/android/media_player_listener.h b/chromium/media/base/android/media_player_listener.h
index 698493b0f33..46e56db84d0 100644
--- a/chromium/media/base/android/media_player_listener.h
+++ b/chromium/media/base/android/media_player_listener.h
@@ -12,7 +12,7 @@
#include "base/memory/weak_ptr.h"
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
@@ -25,9 +25,9 @@ class MediaPlayerListener {
public:
// Construct a native MediaPlayerListener object. Callbacks from the java
// side object will be forwarded to |media_player| by posting a task on the
- // |message_loop|.
+ // |task_runner|.
MediaPlayerListener(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
base::WeakPtr<MediaPlayerBridge> media_player);
virtual ~MediaPlayerListener();
@@ -51,7 +51,7 @@ class MediaPlayerListener {
private:
// The message loop where |media_player_| lives.
- scoped_refptr<base::MessageLoopProxy> message_loop_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
// The MediaPlayerBridge object all the callbacks should be send to.
base::WeakPtr<MediaPlayerBridge> media_player_;
diff --git a/chromium/media/base/android/media_player_manager.h b/chromium/media/base/android/media_player_manager.h
index c215df59f4b..0b79f187d65 100644
--- a/chromium/media/base/android/media_player_manager.h
+++ b/chromium/media/base/android/media_player_manager.h
@@ -5,18 +5,13 @@
#ifndef MEDIA_BASE_ANDROID_MEDIA_PLAYER_MANAGER_H_
#define MEDIA_BASE_ANDROID_MEDIA_PLAYER_MANAGER_H_
-#include <string>
-#include <vector>
-
#include "base/basictypes.h"
#include "base/time/time.h"
#include "media/base/android/demuxer_stream_player_params.h"
#include "media/base/media_export.h"
-#include "media/base/media_keys.h"
namespace media {
-class MediaDrmBridge;
class MediaPlayerAndroid;
class MediaResourceGetter;
@@ -25,16 +20,6 @@ class MEDIA_EXPORT MediaPlayerManager {
public:
virtual ~MediaPlayerManager() {}
- // Called by a MediaPlayerAndroid object when it is going to decode
- // media streams. This helps the manager object maintain an array
- // of active MediaPlayerAndroid objects and release the resources
- // when needed.
- virtual void RequestMediaResources(int player_id) = 0;
-
- // Called when a MediaPlayerAndroid object releases all its decoding
- // resources.
- virtual void ReleaseMediaResources(int player_id) = 0;
-
// Return a pointer to the MediaResourceGetter object.
virtual MediaResourceGetter* GetMediaResourceGetter() = 0;
@@ -78,42 +63,14 @@ class MEDIA_EXPORT MediaPlayerManager {
// Returns the player with the specified id.
virtual MediaPlayerAndroid* GetPlayer(int player_id) = 0;
- // Release all the players managed by this object.
- virtual void DestroyAllMediaPlayers() = 0;
-
- // Get the MediaDrmBridge object for the given media key Id.
- virtual media::MediaDrmBridge* GetDrmBridge(int media_keys_id) = 0;
-
// Called by the player to get a hardware protected surface.
- virtual void OnProtectedSurfaceRequested(int player_id) = 0;
-
- // The following five methods are related to EME.
- // TODO(xhwang): These methods needs to be decoupled from MediaPlayerManager
- // to support the W3C Working Draft version of the EME spec.
- // http://crbug.com/315312
-
- // Called when MediaDrmBridge determines a SessionId.
- virtual void OnSessionCreated(int media_keys_id,
- uint32 session_id,
- const std::string& web_session_id) = 0;
-
- // Called when MediaDrmBridge wants to send a Message event.
- virtual void OnSessionMessage(int media_keys_id,
- uint32 session_id,
- const std::vector<uint8>& message,
- const std::string& destination_url) = 0;
-
- // Called when MediaDrmBridge wants to send a Ready event.
- virtual void OnSessionReady(int media_keys_id, uint32 session_id) = 0;
-
- // Called when MediaDrmBridge wants to send a Closed event.
- virtual void OnSessionClosed(int media_keys_id, uint32 session_id) = 0;
-
- // Called when MediaDrmBridge wants to send an Error event.
- virtual void OnSessionError(int media_keys_id,
- uint32 session_id,
- media::MediaKeys::KeyError error_code,
- int system_code) = 0;
+ virtual void RequestFullScreen(int player_id) = 0;
+
+#if defined(VIDEO_HOLE)
+ // Returns true if a media player should use video-overlay for the embedded
+ // encrypted video.
+ virtual bool ShouldUseVideoOverlayForEmbeddedEncryptedVideo() = 0;
+#endif // defined(VIDEO_HOLE)
};
} // namespace media
diff --git a/chromium/media/base/android/media_resource_getter.h b/chromium/media/base/android/media_resource_getter.h
index ea4eccdf5c5..075f9d01fec 100644
--- a/chromium/media/base/android/media_resource_getter.h
+++ b/chromium/media/base/android/media_resource_getter.h
@@ -30,8 +30,8 @@ class MEDIA_EXPORT MediaResourceGetter {
const GURL& first_party_for_cookies,
const GetCookieCB& callback) = 0;
- // Method for getting the platform path from a file system URL.
- virtual void GetPlatformPathFromFileSystemURL(
+ // Method for getting the platform path from a file system or blob URL.
+ virtual void GetPlatformPathFromURL(
const GURL& url,
const GetPlatformPathCB& callback) = 0;
@@ -40,6 +40,7 @@ class MEDIA_EXPORT MediaResourceGetter {
virtual void ExtractMediaMetadata(
const std::string& url,
const std::string& cookies,
+ const std::string& user_agent,
const ExtractMediaMetadataCB& callback) = 0;
};
diff --git a/chromium/media/base/android/media_source_player.cc b/chromium/media/base/android/media_source_player.cc
index ee84528a87c..f065a5c9347 100644
--- a/chromium/media/base/android/media_source_player.cc
+++ b/chromium/media/base/android/media_source_player.cc
@@ -19,131 +19,82 @@
#include "media/base/android/media_drm_bridge.h"
#include "media/base/android/media_player_manager.h"
#include "media/base/android/video_decoder_job.h"
-#include "media/base/audio_timestamp_helper.h"
-#include "media/base/buffers.h"
-namespace {
-
-// Use 16bit PCM for audio output. Keep this value in sync with the output
-// format we passed to AudioTrack in MediaCodecBridge.
-const int kBytesPerAudioOutputSample = 2;
-}
namespace media {
-// static
-bool MediaSourcePlayer::IsTypeSupported(
- const std::vector<uint8>& scheme_uuid,
- const std::string& security_level,
- const std::string& container,
- const std::vector<std::string>& codecs) {
- if (!MediaDrmBridge::IsCryptoSchemeSupported(scheme_uuid, container)) {
- DVLOG(1) << "UUID and container '" << container << "' not supported.";
- return false;
- }
-
- if (!MediaDrmBridge::IsSecurityLevelSupported(scheme_uuid, security_level)) {
- DVLOG(1) << "UUID and security level '" << security_level
- << "' not supported.";
- return false;
- }
-
- bool is_secure = MediaDrmBridge::IsSecureDecoderRequired(security_level);
- for (size_t i = 0; i < codecs.size(); ++i) {
- if (!MediaCodecBridge::CanDecode(codecs[i], is_secure)) {
- DVLOG(1) << "Codec '" << codecs[i] << "' "
- << (is_secure ? "in secure mode " : "") << "not supported.";
- return false;
- }
- }
-
- return true;
-}
-
MediaSourcePlayer::MediaSourcePlayer(
int player_id,
MediaPlayerManager* manager,
- scoped_ptr<DemuxerAndroid> demuxer)
- : MediaPlayerAndroid(player_id, manager),
+ const RequestMediaResourcesCB& request_media_resources_cb,
+ const ReleaseMediaResourcesCB& release_media_resources_cb,
+ scoped_ptr<DemuxerAndroid> demuxer,
+ const GURL& frame_url)
+ : MediaPlayerAndroid(player_id,
+ manager,
+ request_media_resources_cb,
+ release_media_resources_cb,
+ frame_url),
demuxer_(demuxer.Pass()),
pending_event_(NO_EVENT_PENDING),
- width_(0),
- height_(0),
- audio_codec_(kUnknownAudioCodec),
- video_codec_(kUnknownVideoCodec),
- num_channels_(0),
- sampling_rate_(0),
- reached_audio_eos_(false),
- reached_video_eos_(false),
playing_(false),
- is_audio_encrypted_(false),
- is_video_encrypted_(false),
- volume_(-1.0),
clock_(&default_tick_clock_),
- next_video_data_is_iframe_(true),
doing_browser_seek_(false),
pending_seek_(false),
- reconfig_audio_decoder_(false),
- reconfig_video_decoder_(false),
- weak_this_(this),
drm_bridge_(NULL),
- is_waiting_for_key_(false) {
+ cdm_registration_id_(0),
+ is_waiting_for_key_(false),
+ is_waiting_for_audio_decoder_(false),
+ is_waiting_for_video_decoder_(false),
+ weak_factory_(this) {
+ audio_decoder_job_.reset(new AudioDecoderJob(
+ base::Bind(&DemuxerAndroid::RequestDemuxerData,
+ base::Unretained(demuxer_.get()),
+ DemuxerStream::AUDIO),
+ base::Bind(&MediaSourcePlayer::OnDemuxerConfigsChanged,
+ weak_factory_.GetWeakPtr())));
+ video_decoder_job_.reset(new VideoDecoderJob(
+ base::Bind(&DemuxerAndroid::RequestDemuxerData,
+ base::Unretained(demuxer_.get()),
+ DemuxerStream::VIDEO),
+ base::Bind(request_media_resources_cb_, player_id),
+ base::Bind(release_media_resources_cb_, player_id),
+ base::Bind(&MediaSourcePlayer::OnDemuxerConfigsChanged,
+ weak_factory_.GetWeakPtr())));
demuxer_->Initialize(this);
clock_.SetMaxTime(base::TimeDelta());
+ weak_this_ = weak_factory_.GetWeakPtr();
}
MediaSourcePlayer::~MediaSourcePlayer() {
Release();
+ DCHECK_EQ(!drm_bridge_, !cdm_registration_id_);
+ if (drm_bridge_) {
+ drm_bridge_->UnregisterPlayer(cdm_registration_id_);
+ cdm_registration_id_ = 0;
+ }
}
void MediaSourcePlayer::SetVideoSurface(gfx::ScopedJavaSurface surface) {
- // For an empty surface, always pass it to the decoder job so that it
- // can detach from the current one. Otherwise, don't pass an unprotected
- // surface if the video content requires a protected one.
- if (!surface.IsEmpty() &&
- IsProtectedSurfaceRequired() && !surface.is_protected()) {
- return;
- }
-
- surface_ = surface.Pass();
-
- // If there is a pending surface change event, just wait for it to be
- // processed.
- if (IsEventPending(SURFACE_CHANGE_EVENT_PENDING))
- return;
-
- // Eventual processing of surface change will take care of feeding the new
- // video decoder initially with I-frame. See b/8950387.
- SetPendingEvent(SURFACE_CHANGE_EVENT_PENDING);
-
- // If seek is already pending, processing of the pending surface change
- // event will occur in OnDemuxerSeekDone().
- if (IsEventPending(SEEK_EVENT_PENDING))
- return;
-
- // If video config change is already pending, processing of the pending
- // surface change event will occur in OnDemuxerConfigsAvailable().
- if (reconfig_video_decoder_ && IsEventPending(CONFIG_CHANGE_EVENT_PENDING))
+ DVLOG(1) << __FUNCTION__;
+ if (!video_decoder_job_->SetVideoSurface(surface.Pass()))
return;
-
- // Otherwise we need to trigger pending event processing now.
- ProcessPendingEvents();
+ // Retry video decoder creation.
+ RetryDecoderCreation(false, true);
}
void MediaSourcePlayer::ScheduleSeekEventAndStopDecoding(
- const base::TimeDelta& seek_time) {
+ base::TimeDelta seek_time) {
DVLOG(1) << __FUNCTION__ << "(" << seek_time.InSecondsF() << ")";
DCHECK(!IsEventPending(SEEK_EVENT_PENDING));
pending_seek_ = false;
clock_.SetTime(seek_time, seek_time);
- if (audio_timestamp_helper_)
- audio_timestamp_helper_->SetBaseTimestamp(seek_time);
- if (audio_decoder_job_ && audio_decoder_job_->is_decoding())
+ if (audio_decoder_job_->is_decoding())
audio_decoder_job_->StopDecode();
- if (video_decoder_job_ && video_decoder_job_->is_decoding())
+ if (video_decoder_job_->is_decoding())
video_decoder_job_->StopDecode();
SetPendingEvent(SEEK_EVENT_PENDING);
@@ -172,8 +123,14 @@ void MediaSourcePlayer::Start() {
playing_ = true;
- if (IsProtectedSurfaceRequired())
- manager()->OnProtectedSurfaceRequested(player_id());
+ bool request_fullscreen = IsProtectedSurfaceRequired();
+#if defined(VIDEO_HOLE)
+ // Skip to request fullscreen when hole-punching is used.
+ request_fullscreen = request_fullscreen &&
+ !manager()->ShouldUseVideoOverlayForEmbeddedEncryptedVideo();
+#endif // defined(VIDEO_HOLE)
+ if (request_fullscreen)
+ manager()->RequestFullScreen(player_id());
StartInternal();
}
@@ -195,14 +152,14 @@ bool MediaSourcePlayer::IsPlaying() {
}
int MediaSourcePlayer::GetVideoWidth() {
- return width_;
+ return video_decoder_job_->width();
}
int MediaSourcePlayer::GetVideoHeight() {
- return height_;
+ return video_decoder_job_->height();
}
-void MediaSourcePlayer::SeekTo(const base::TimeDelta& timestamp) {
+void MediaSourcePlayer::SeekTo(base::TimeDelta timestamp) {
DVLOG(1) << __FUNCTION__ << "(" << timestamp.InSecondsF() << ")";
if (IsEventPending(SEEK_EVENT_PENDING)) {
@@ -232,53 +189,22 @@ base::TimeDelta MediaSourcePlayer::GetDuration() {
void MediaSourcePlayer::Release() {
DVLOG(1) << __FUNCTION__;
- // Allow pending seeks and config changes to survive this Release().
- // If previously pending a prefetch done event, or a job was still decoding,
- // then at end of Release() we need to ProcessPendingEvents() to process any
- // seek or config change that was blocked by the prefetch or decode.
- // TODO(qinmin/wolenetz): Maintain channel state to not double-request data
- // or drop data received across Release()+Start(). See http://crbug.com/306314
- // and http://crbug.com/304234.
- bool process_pending_events = false;
- process_pending_events = IsEventPending(PREFETCH_DONE_EVENT_PENDING) ||
- (audio_decoder_job_ && audio_decoder_job_->is_decoding()) ||
- (video_decoder_job_ && video_decoder_job_->is_decoding());
-
- // Clear all the pending events except seeks and config changes.
- pending_event_ &= (SEEK_EVENT_PENDING | CONFIG_CHANGE_EVENT_PENDING);
-
- audio_decoder_job_.reset();
- ResetVideoDecoderJob();
-
- // Prevent job re-creation attempts in OnDemuxerConfigsAvailable()
- reconfig_audio_decoder_ = false;
- reconfig_video_decoder_ = false;
+ is_surface_in_use_ = false;
+ audio_decoder_job_->ReleaseDecoderResources();
+ video_decoder_job_->ReleaseDecoderResources();
// Prevent player restart, including job re-creation attempts.
playing_ = false;
decoder_starvation_callback_.Cancel();
- surface_ = gfx::ScopedJavaSurface();
- manager()->ReleaseMediaResources(player_id());
- if (process_pending_events) {
- DVLOG(1) << __FUNCTION__ << " : Resuming seek or config change processing";
- ProcessPendingEvents();
- }
}
void MediaSourcePlayer::SetVolume(double volume) {
- volume_ = volume;
- SetVolumeInternal();
+ audio_decoder_job_->SetVolume(volume);
}
-void MediaSourcePlayer::OnKeyAdded() {
- DVLOG(1) << __FUNCTION__;
- if (!is_waiting_for_key_)
- return;
-
- is_waiting_for_key_ = false;
- if (playing_)
- StartInternal();
+bool MediaSourcePlayer::IsSurfaceInUse() const {
+ return is_surface_in_use_;
}
bool MediaSourcePlayer::CanPause() {
@@ -308,16 +234,6 @@ void MediaSourcePlayer::StartInternal() {
// |is_waiting_for_key_| condition may not be true anymore.
is_waiting_for_key_ = false;
- // Create decoder jobs if they are not created
- ConfigureAudioDecoderJob();
- ConfigureVideoDecoderJob();
-
- // If one of the decoder job is not ready, do nothing.
- if ((HasAudio() && !audio_decoder_job_) ||
- (HasVideo() && !video_decoder_job_)) {
- return;
- }
-
SetPendingEvent(PREFETCH_REQUEST_EVENT_PENDING);
ProcessPendingEvents();
}
@@ -325,55 +241,24 @@ void MediaSourcePlayer::StartInternal() {
void MediaSourcePlayer::OnDemuxerConfigsAvailable(
const DemuxerConfigs& configs) {
DVLOG(1) << __FUNCTION__;
- duration_ = base::TimeDelta::FromMilliseconds(configs.duration_ms);
+ DCHECK(!HasAudio() && !HasVideo());
+ duration_ = configs.duration;
clock_.SetDuration(duration_);
- audio_codec_ = configs.audio_codec;
- num_channels_ = configs.audio_channels;
- sampling_rate_ = configs.audio_sampling_rate;
- is_audio_encrypted_ = configs.is_audio_encrypted;
- audio_extra_data_ = configs.audio_extra_data;
- if (HasAudio()) {
- DCHECK_GT(num_channels_, 0);
- audio_timestamp_helper_.reset(new AudioTimestampHelper(sampling_rate_));
- audio_timestamp_helper_->SetBaseTimestamp(GetCurrentTime());
- } else {
- audio_timestamp_helper_.reset();
- }
-
- video_codec_ = configs.video_codec;
- width_ = configs.video_size.width();
- height_ = configs.video_size.height();
- is_video_encrypted_ = configs.is_video_encrypted;
-
- manager()->OnMediaMetadataChanged(
- player_id(), duration_, width_, height_, true);
-
- if (IsEventPending(CONFIG_CHANGE_EVENT_PENDING)) {
- if (reconfig_audio_decoder_)
- ConfigureAudioDecoderJob();
-
- if (reconfig_video_decoder_)
- ConfigureVideoDecoderJob();
-
- ClearPendingEvent(CONFIG_CHANGE_EVENT_PENDING);
-
- // Resume decoding after the config change if we are still playing.
- if (playing_)
- StartInternal();
- }
+ audio_decoder_job_->SetDemuxerConfigs(configs);
+ video_decoder_job_->SetDemuxerConfigs(configs);
+ OnDemuxerConfigsChanged();
}
void MediaSourcePlayer::OnDemuxerDataAvailable(const DemuxerData& data) {
DVLOG(1) << __FUNCTION__ << "(" << data.type << ")";
DCHECK_LT(0u, data.access_units.size());
- if (data.type == DemuxerStream::AUDIO && audio_decoder_job_) {
+ CHECK_GE(1u, data.demuxer_configs.size());
+
+ if (data.type == DemuxerStream::AUDIO)
audio_decoder_job_->OnDataReceived(data);
- } else if (data.type == DemuxerStream::VIDEO) {
- next_video_data_is_iframe_ = false;
- if (video_decoder_job_)
- video_decoder_job_->OnDataReceived(data);
- }
+ else if (data.type == DemuxerStream::VIDEO)
+ video_decoder_job_->OnDataReceived(data);
}
void MediaSourcePlayer::OnDemuxerDurationChanged(base::TimeDelta duration) {
@@ -381,22 +266,15 @@ void MediaSourcePlayer::OnDemuxerDurationChanged(base::TimeDelta duration) {
clock_.SetDuration(duration_);
}
-base::android::ScopedJavaLocalRef<jobject> MediaSourcePlayer::GetMediaCrypto() {
- base::android::ScopedJavaLocalRef<jobject> media_crypto;
- if (drm_bridge_)
- media_crypto = drm_bridge_->GetMediaCrypto();
- return media_crypto;
-}
-
void MediaSourcePlayer::OnMediaCryptoReady() {
DCHECK(!drm_bridge_->GetMediaCrypto().is_null());
drm_bridge_->SetMediaCryptoReadyCB(base::Closure());
- if (playing_)
- StartInternal();
+ // Retry decoder creation if the decoders are waiting for MediaCrypto.
+ RetryDecoderCreation(true, true);
}
-void MediaSourcePlayer::SetDrmBridge(MediaDrmBridge* drm_bridge) {
+void MediaSourcePlayer::SetCdm(BrowserCdm* cdm) {
// Currently we don't support DRM change during the middle of playback, even
// if the player is paused.
// TODO(qinmin): support DRM change after playback has started.
@@ -406,28 +284,39 @@ void MediaSourcePlayer::SetDrmBridge(MediaDrmBridge* drm_bridge) {
<< "This is not well supported!";
}
- drm_bridge_ = drm_bridge;
+ if (drm_bridge_) {
+ NOTREACHED() << "Currently we do not support resetting CDM.";
+ return;
+ }
+
+ // Only MediaDrmBridge will be set on MediaSourcePlayer.
+ drm_bridge_ = static_cast<MediaDrmBridge*>(cdm);
+
+ cdm_registration_id_ = drm_bridge_->RegisterPlayer(
+ base::Bind(&MediaSourcePlayer::OnKeyAdded, weak_this_),
+ base::Bind(&MediaSourcePlayer::OnCdmUnset, weak_this_));
+
+ audio_decoder_job_->SetDrmBridge(drm_bridge_);
+ video_decoder_job_->SetDrmBridge(drm_bridge_);
if (drm_bridge_->GetMediaCrypto().is_null()) {
- drm_bridge_->SetMediaCryptoReadyCB(base::Bind(
- &MediaSourcePlayer::OnMediaCryptoReady, weak_this_.GetWeakPtr()));
+ drm_bridge_->SetMediaCryptoReadyCB(
+ base::Bind(&MediaSourcePlayer::OnMediaCryptoReady, weak_this_));
return;
}
- if (playing_)
- StartInternal();
+ // If the player is previously waiting for CDM, retry decoder creation.
+ RetryDecoderCreation(true, true);
}
void MediaSourcePlayer::OnDemuxerSeekDone(
- const base::TimeDelta& actual_browser_seek_time) {
+ base::TimeDelta actual_browser_seek_time) {
DVLOG(1) << __FUNCTION__;
ClearPendingEvent(SEEK_EVENT_PENDING);
if (IsEventPending(PREFETCH_REQUEST_EVENT_PENDING))
ClearPendingEvent(PREFETCH_REQUEST_EVENT_PENDING);
- next_video_data_is_iframe_ = true;
-
if (pending_seek_) {
DVLOG(1) << __FUNCTION__ << "processing pending seek";
DCHECK(doing_browser_seek_);
@@ -441,28 +330,27 @@ void MediaSourcePlayer::OnDemuxerSeekDone(
// player clock to the actual seek target.
if (doing_browser_seek_) {
DCHECK(actual_browser_seek_time != kNoTimestamp());
+ base::TimeDelta seek_time = actual_browser_seek_time;
// A browser seek must not jump into the past. Ideally, it seeks to the
// requested time, but it might jump into the future.
- DCHECK(actual_browser_seek_time >= GetCurrentTime());
+ DCHECK(seek_time >= GetCurrentTime());
DVLOG(1) << __FUNCTION__ << " : setting clock to actual browser seek time: "
- << actual_browser_seek_time.InSecondsF();
- clock_.SetTime(actual_browser_seek_time, actual_browser_seek_time);
- if (audio_timestamp_helper_)
- audio_timestamp_helper_->SetBaseTimestamp(actual_browser_seek_time);
+ << seek_time.InSecondsF();
+ clock_.SetTime(seek_time, seek_time);
+ audio_decoder_job_->SetBaseTimestamp(seek_time);
+ } else {
+ DCHECK(actual_browser_seek_time == kNoTimestamp());
}
- reached_audio_eos_ = false;
- reached_video_eos_ = false;
-
base::TimeDelta current_time = GetCurrentTime();
// TODO(qinmin): Simplify the logic by using |start_presentation_timestamp_|
// to preroll media decoder jobs. Currently |start_presentation_timestamp_|
// is calculated from decoder output, while preroll relies on the access
// unit's timestamp. There are some differences between the two.
preroll_timestamp_ = current_time;
- if (audio_decoder_job_)
+ if (HasAudio())
audio_decoder_job_->BeginPrerolling(preroll_timestamp_);
- if (video_decoder_job_)
+ if (HasVideo())
video_decoder_job_->BeginPrerolling(preroll_timestamp_);
if (!doing_browser_seek_)
@@ -472,28 +360,21 @@ void MediaSourcePlayer::OnDemuxerSeekDone(
}
void MediaSourcePlayer::UpdateTimestamps(
- const base::TimeDelta& presentation_timestamp, size_t audio_output_bytes) {
- base::TimeDelta new_max_time = presentation_timestamp;
-
- if (audio_output_bytes > 0) {
- audio_timestamp_helper_->AddFrames(
- audio_output_bytes / (kBytesPerAudioOutputSample * num_channels_));
- new_max_time = audio_timestamp_helper_->GetTimestamp();
- }
-
- clock_.SetMaxTime(new_max_time);
+ base::TimeDelta current_presentation_timestamp,
+ base::TimeDelta max_presentation_timestamp) {
+ clock_.SetTime(current_presentation_timestamp, max_presentation_timestamp);
manager()->OnTimeUpdate(player_id(), GetCurrentTime());
}
void MediaSourcePlayer::ProcessPendingEvents() {
DVLOG(1) << __FUNCTION__ << " : 0x" << std::hex << pending_event_;
// Wait for all the decoding jobs to finish before processing pending tasks.
- if (video_decoder_job_ && video_decoder_job_->is_decoding()) {
+ if (video_decoder_job_->is_decoding()) {
DVLOG(1) << __FUNCTION__ << " : A video job is still decoding.";
return;
}
- if (audio_decoder_job_ && audio_decoder_job_->is_decoding()) {
+ if (audio_decoder_job_->is_decoding()) {
DVLOG(1) << __FUNCTION__ << " : An audio job is still decoding.";
return;
}
@@ -506,36 +387,20 @@ void MediaSourcePlayer::ProcessPendingEvents() {
if (IsEventPending(SEEK_EVENT_PENDING)) {
DVLOG(1) << __FUNCTION__ << " : Handling SEEK_EVENT";
ClearDecodingData();
+ audio_decoder_job_->SetBaseTimestamp(GetCurrentTime());
demuxer_->RequestDemuxerSeek(GetCurrentTime(), doing_browser_seek_);
return;
}
- start_time_ticks_ = base::TimeTicks();
- if (IsEventPending(CONFIG_CHANGE_EVENT_PENDING)) {
- DVLOG(1) << __FUNCTION__ << " : Handling CONFIG_CHANGE_EVENT.";
- DCHECK(reconfig_audio_decoder_ || reconfig_video_decoder_);
- demuxer_->RequestDemuxerConfigs();
- return;
- }
-
- if (IsEventPending(SURFACE_CHANGE_EVENT_PENDING)) {
- DVLOG(1) << __FUNCTION__ << " : Handling SURFACE_CHANGE_EVENT.";
- // Setting a new surface will require a new MediaCodec to be created.
- ResetVideoDecoderJob();
- ConfigureVideoDecoderJob();
-
- // Return early if we can't successfully configure a new video decoder job
- // yet, except continue processing other pending events if |surface_| is
- // empty.
- if (HasVideo() && !video_decoder_job_ && !surface_.IsEmpty())
+ if (IsEventPending(DECODER_CREATION_EVENT_PENDING)) {
+ // Don't continue if one of the decoder is not created.
+ if (is_waiting_for_audio_decoder_ || is_waiting_for_video_decoder_)
return;
+ ClearPendingEvent(DECODER_CREATION_EVENT_PENDING);
}
if (IsEventPending(PREFETCH_REQUEST_EVENT_PENDING)) {
DVLOG(1) << __FUNCTION__ << " : Handling PREFETCH_REQUEST_EVENT.";
- DCHECK(audio_decoder_job_ || AudioFinished());
- DCHECK(video_decoder_job_ || VideoFinished());
-
int count = (AudioFinished() ? 0 : 1) + (VideoFinished() ? 0 : 1);
// It is possible that all streams have finished decode, yet starvation
@@ -546,8 +411,8 @@ void MediaSourcePlayer::ProcessPendingEvents() {
return;
SetPendingEvent(PREFETCH_DONE_EVENT_PENDING);
- base::Closure barrier = BarrierClosure(count, base::Bind(
- &MediaSourcePlayer::OnPrefetchDone, weak_this_.GetWeakPtr()));
+ base::Closure barrier = BarrierClosure(
+ count, base::Bind(&MediaSourcePlayer::OnPrefetchDone, weak_this_));
if (!AudioFinished())
audio_decoder_job_->Prefetch(barrier);
@@ -568,7 +433,8 @@ void MediaSourcePlayer::ProcessPendingEvents() {
void MediaSourcePlayer::MediaDecoderCallback(
bool is_audio, MediaCodecStatus status,
- const base::TimeDelta& presentation_timestamp, size_t audio_output_bytes) {
+ base::TimeDelta current_presentation_timestamp,
+ base::TimeDelta max_presentation_timestamp) {
DVLOG(1) << __FUNCTION__ << ": " << is_audio << ", " << status;
// TODO(xhwang): Drop IntToString() when http://crbug.com/303899 is fixed.
@@ -612,6 +478,12 @@ void MediaSourcePlayer::MediaDecoderCallback(
return;
}
+ if ((status == MEDIA_CODEC_OK || status == MEDIA_CODEC_INPUT_END_OF_STREAM) &&
+ is_clock_manager && current_presentation_timestamp != kNoTimestamp()) {
+ UpdateTimestamps(current_presentation_timestamp,
+ max_presentation_timestamp);
+ }
+
if (status == MEDIA_CODEC_OUTPUT_END_OF_STREAM)
PlaybackCompleted(is_audio);
@@ -623,11 +495,6 @@ void MediaSourcePlayer::MediaDecoderCallback(
if (status == MEDIA_CODEC_OUTPUT_END_OF_STREAM)
return;
- if (status == MEDIA_CODEC_OK && is_clock_manager &&
- presentation_timestamp != kNoTimestamp()) {
- UpdateTimestamps(presentation_timestamp, audio_output_bytes);
- }
-
if (!playing_) {
if (is_clock_manager)
clock_.Pause();
@@ -649,18 +516,17 @@ void MediaSourcePlayer::MediaDecoderCallback(
// If we have a valid timestamp, start the starvation callback. Otherwise,
// reset the |start_time_ticks_| so that the next frame will not suffer
// from the decoding delay caused by the current frame.
- if (presentation_timestamp != kNoTimestamp())
- StartStarvationCallback(presentation_timestamp);
+ if (current_presentation_timestamp != kNoTimestamp())
+ StartStarvationCallback(current_presentation_timestamp,
+ max_presentation_timestamp);
else
start_time_ticks_ = base::TimeTicks::Now();
}
- if (is_audio) {
+ if (is_audio)
DecodeMoreAudio();
- return;
- }
-
- DecodeMoreVideo();
+ else
+ DecodeMoreVideo();
}
void MediaSourcePlayer::DecodeMoreAudio() {
@@ -669,28 +535,17 @@ void MediaSourcePlayer::DecodeMoreAudio() {
DCHECK(!AudioFinished());
if (audio_decoder_job_->Decode(
- start_time_ticks_, start_presentation_timestamp_, base::Bind(
- &MediaSourcePlayer::MediaDecoderCallback,
- weak_this_.GetWeakPtr(), true))) {
+ start_time_ticks_,
+ start_presentation_timestamp_,
+ base::Bind(&MediaSourcePlayer::MediaDecoderCallback, weak_this_, true))) {
TRACE_EVENT_ASYNC_BEGIN0("media", "MediaSourcePlayer::DecodeMoreAudio",
audio_decoder_job_.get());
return;
}
- // Failed to start the next decode.
- // Wait for demuxer ready message.
- DCHECK(!reconfig_audio_decoder_);
- reconfig_audio_decoder_ = true;
-
- // Config change may have just been detected on the other stream. If so,
- // don't send a duplicate demuxer config request.
- if (IsEventPending(CONFIG_CHANGE_EVENT_PENDING)) {
- DCHECK(reconfig_video_decoder_);
- return;
- }
-
- SetPendingEvent(CONFIG_CHANGE_EVENT_PENDING);
- ProcessPendingEvents();
+ is_waiting_for_audio_decoder_ = true;
+ if (!IsEventPending(DECODER_CREATION_EVENT_PENDING))
+ SetPendingEvent(DECODER_CREATION_EVENT_PENDING);
}
void MediaSourcePlayer::DecodeMoreVideo() {
@@ -699,41 +554,28 @@ void MediaSourcePlayer::DecodeMoreVideo() {
DCHECK(!VideoFinished());
if (video_decoder_job_->Decode(
- start_time_ticks_, start_presentation_timestamp_, base::Bind(
- &MediaSourcePlayer::MediaDecoderCallback,
- weak_this_.GetWeakPtr(), false))) {
+ start_time_ticks_,
+ start_presentation_timestamp_,
+ base::Bind(&MediaSourcePlayer::MediaDecoderCallback, weak_this_,
+ false))) {
TRACE_EVENT_ASYNC_BEGIN0("media", "MediaSourcePlayer::DecodeMoreVideo",
video_decoder_job_.get());
return;
}
- // Failed to start the next decode.
- // Wait for demuxer ready message.
-
- // After this detection of video config change, next video data received
- // will begin with I-frame.
- next_video_data_is_iframe_ = true;
-
- DCHECK(!reconfig_video_decoder_);
- reconfig_video_decoder_ = true;
-
- // Config change may have just been detected on the other stream. If so,
- // don't send a duplicate demuxer config request.
- if (IsEventPending(CONFIG_CHANGE_EVENT_PENDING)) {
- DCHECK(reconfig_audio_decoder_);
+ // If the decoder is waiting for iframe, trigger a browser seek.
+ if (!video_decoder_job_->next_video_data_is_iframe()) {
+ BrowserSeekToCurrentTime();
return;
}
- SetPendingEvent(CONFIG_CHANGE_EVENT_PENDING);
- ProcessPendingEvents();
+ is_waiting_for_video_decoder_ = true;
+ if (!IsEventPending(DECODER_CREATION_EVENT_PENDING))
+ SetPendingEvent(DECODER_CREATION_EVENT_PENDING);
}
void MediaSourcePlayer::PlaybackCompleted(bool is_audio) {
DVLOG(1) << __FUNCTION__ << "(" << is_audio << ")";
- if (is_audio)
- reached_audio_eos_ = true;
- else
- reached_video_eos_ = true;
if (AudioFinished() && VideoFinished()) {
playing_ = false;
@@ -745,132 +587,25 @@ void MediaSourcePlayer::PlaybackCompleted(bool is_audio) {
void MediaSourcePlayer::ClearDecodingData() {
DVLOG(1) << __FUNCTION__;
- if (audio_decoder_job_)
- audio_decoder_job_->Flush();
- if (video_decoder_job_)
- video_decoder_job_->Flush();
+ audio_decoder_job_->Flush();
+ video_decoder_job_->Flush();
start_time_ticks_ = base::TimeTicks();
}
bool MediaSourcePlayer::HasVideo() {
- return kUnknownVideoCodec != video_codec_;
+ return video_decoder_job_->HasStream();
}
bool MediaSourcePlayer::HasAudio() {
- return kUnknownAudioCodec != audio_codec_;
+ return audio_decoder_job_->HasStream();
}
bool MediaSourcePlayer::AudioFinished() {
- return reached_audio_eos_ || !HasAudio();
+ return audio_decoder_job_->OutputEOSReached() || !HasAudio();
}
bool MediaSourcePlayer::VideoFinished() {
- return reached_video_eos_ || !HasVideo();
-}
-
-void MediaSourcePlayer::ConfigureAudioDecoderJob() {
- if (!HasAudio()) {
- audio_decoder_job_.reset();
- return;
- }
-
- // Create audio decoder job only if config changes.
- if (audio_decoder_job_ && !reconfig_audio_decoder_)
- return;
-
- base::android::ScopedJavaLocalRef<jobject> media_crypto = GetMediaCrypto();
- if (is_audio_encrypted_ && media_crypto.is_null())
- return;
-
- DCHECK(!audio_decoder_job_ || !audio_decoder_job_->is_decoding());
-
- DVLOG(1) << __FUNCTION__ << " : creating new audio decoder job";
-
- audio_decoder_job_.reset(AudioDecoderJob::Create(
- audio_codec_, sampling_rate_, num_channels_, &audio_extra_data_[0],
- audio_extra_data_.size(), media_crypto.obj(),
- base::Bind(&DemuxerAndroid::RequestDemuxerData,
- base::Unretained(demuxer_.get()), DemuxerStream::AUDIO)));
-
- if (audio_decoder_job_) {
- SetVolumeInternal();
- audio_decoder_job_->BeginPrerolling(preroll_timestamp_);
- reconfig_audio_decoder_ = false;
- }
-}
-
-void MediaSourcePlayer::ResetVideoDecoderJob() {
- video_decoder_job_.reset();
-
- // Any eventual video decoder job re-creation will use the current |surface_|.
- if (IsEventPending(SURFACE_CHANGE_EVENT_PENDING))
- ClearPendingEvent(SURFACE_CHANGE_EVENT_PENDING);
-}
-
-void MediaSourcePlayer::ConfigureVideoDecoderJob() {
- if (!HasVideo() || surface_.IsEmpty()) {
- ResetVideoDecoderJob();
- return;
- }
-
- // Create video decoder job only if config changes or we don't have a job.
- if (video_decoder_job_ && !reconfig_video_decoder_) {
- DCHECK(!IsEventPending(SURFACE_CHANGE_EVENT_PENDING));
- return;
- }
-
- DCHECK(!video_decoder_job_ || !video_decoder_job_->is_decoding());
-
- if (reconfig_video_decoder_) {
- // No hack browser seek should be required. I-Frame must be next.
- DCHECK(next_video_data_is_iframe_) << "Received video data between "
- << "detecting video config change and reconfiguring video decoder";
- }
-
- // If uncertain that video I-frame data is next and there is no seek already
- // in process, request browser demuxer seek so the new decoder will decode
- // an I-frame first. Otherwise, the new MediaCodec might crash. See b/8950387.
- // Eventual OnDemuxerSeekDone() will trigger ProcessPendingEvents() and
- // continue from here.
- // TODO(wolenetz): Instead of doing hack browser seek, replay cached data
- // since last keyframe. See http://crbug.com/304234.
- if (!next_video_data_is_iframe_ && !IsEventPending(SEEK_EVENT_PENDING)) {
- BrowserSeekToCurrentTime();
- return;
- }
-
- // Release the old VideoDecoderJob first so the surface can get released.
- // Android does not allow 2 MediaCodec instances use the same surface.
- ResetVideoDecoderJob();
-
- base::android::ScopedJavaLocalRef<jobject> media_crypto = GetMediaCrypto();
- if (is_video_encrypted_ && media_crypto.is_null())
- return;
-
- DVLOG(1) << __FUNCTION__ << " : creating new video decoder job";
-
- // Create the new VideoDecoderJob.
- bool is_secure = IsProtectedSurfaceRequired();
- video_decoder_job_.reset(
- VideoDecoderJob::Create(video_codec_,
- is_secure,
- gfx::Size(width_, height_),
- surface_.j_surface().obj(),
- media_crypto.obj(),
- base::Bind(&DemuxerAndroid::RequestDemuxerData,
- base::Unretained(demuxer_.get()),
- DemuxerStream::VIDEO)));
- if (!video_decoder_job_)
- return;
-
- video_decoder_job_->BeginPrerolling(preroll_timestamp_);
- reconfig_video_decoder_ = false;
-
- // Inform the fullscreen view the player is ready.
- // TODO(qinmin): refactor MediaPlayerBridge so that we have a better way
- // to inform ContentVideoView.
- manager()->OnMediaMetadataChanged(
- player_id(), duration_, width_, height_, true);
+ return video_decoder_job_->OutputEOSReached() || !HasVideo();
}
void MediaSourcePlayer::OnDecoderStarved() {
@@ -880,7 +615,8 @@ void MediaSourcePlayer::OnDecoderStarved() {
}
void MediaSourcePlayer::StartStarvationCallback(
- const base::TimeDelta& presentation_timestamp) {
+ base::TimeDelta current_presentation_timestamp,
+ base::TimeDelta max_presentation_timestamp) {
// 20ms was chosen because it is the typical size of a compressed audio frame.
// Anything smaller than this would likely cause unnecessary cycling in and
// out of the prefetch state.
@@ -890,41 +626,35 @@ void MediaSourcePlayer::StartStarvationCallback(
base::TimeDelta current_timestamp = GetCurrentTime();
base::TimeDelta timeout;
if (HasAudio()) {
- timeout = audio_timestamp_helper_->GetTimestamp() - current_timestamp;
+ timeout = max_presentation_timestamp - current_timestamp;
} else {
- DCHECK(current_timestamp <= presentation_timestamp);
+ DCHECK(current_timestamp <= current_presentation_timestamp);
// For video only streams, fps can be estimated from the difference
// between the previous and current presentation timestamps. The
// previous presentation timestamp is equal to current_timestamp.
// TODO(qinmin): determine whether 2 is a good coefficient for estimating
// video frame timeout.
- timeout = 2 * (presentation_timestamp - current_timestamp);
+ timeout = 2 * (current_presentation_timestamp - current_timestamp);
}
timeout = std::max(timeout, kMinStarvationTimeout);
decoder_starvation_callback_.Reset(
- base::Bind(&MediaSourcePlayer::OnDecoderStarved,
- weak_this_.GetWeakPtr()));
+ base::Bind(&MediaSourcePlayer::OnDecoderStarved, weak_this_));
base::MessageLoop::current()->PostDelayedTask(
FROM_HERE, decoder_starvation_callback_.callback(), timeout);
}
-void MediaSourcePlayer::SetVolumeInternal() {
- if (audio_decoder_job_ && volume_ >= 0)
- audio_decoder_job_->SetVolume(volume_);
-}
-
bool MediaSourcePlayer::IsProtectedSurfaceRequired() {
- return is_video_encrypted_ &&
+ return video_decoder_job_->is_content_encrypted() &&
drm_bridge_ && drm_bridge_->IsProtectedSurfaceRequired();
}
void MediaSourcePlayer::OnPrefetchDone() {
DVLOG(1) << __FUNCTION__;
- DCHECK(!audio_decoder_job_ || !audio_decoder_job_->is_decoding());
- DCHECK(!video_decoder_job_ || !video_decoder_job_->is_decoding());
+ DCHECK(!audio_decoder_job_->is_decoding());
+ DCHECK(!video_decoder_job_->is_decoding());
// A previously posted OnPrefetchDone() could race against a Release(). If
// Release() won the race, we should no longer have decoder jobs.
@@ -933,7 +663,6 @@ void MediaSourcePlayer::OnPrefetchDone() {
// and http://crbug.com/304234.
if (!IsEventPending(PREFETCH_DONE_EVENT_PENDING)) {
DVLOG(1) << __FUNCTION__ << " : aborting";
- DCHECK(!audio_decoder_job_ && !video_decoder_job_);
return;
}
@@ -944,6 +673,9 @@ void MediaSourcePlayer::OnPrefetchDone() {
return;
}
+ if (!playing_)
+ return;
+
start_time_ticks_ = base::TimeTicks::Now();
start_presentation_timestamp_ = GetCurrentTime();
if (!clock_.IsPlaying())
@@ -956,13 +688,18 @@ void MediaSourcePlayer::OnPrefetchDone() {
DecodeMoreVideo();
}
+void MediaSourcePlayer::OnDemuxerConfigsChanged() {
+ manager()->OnMediaMetadataChanged(
+ player_id(), duration_, GetVideoWidth(), GetVideoHeight(), true);
+}
+
const char* MediaSourcePlayer::GetEventName(PendingEventFlags event) {
+ // Please keep this in sync with PendingEventFlags.
static const char* kPendingEventNames[] = {
+ "PREFETCH_DONE",
"SEEK",
- "SURFACE_CHANGE",
- "CONFIG_CHANGE",
+ "DECODER_CREATION",
"PREFETCH_REQUEST",
- "PREFETCH_DONE",
};
int mask = 1;
@@ -994,4 +731,36 @@ void MediaSourcePlayer::ClearPendingEvent(PendingEventFlags event) {
pending_event_ &= ~event;
}
+void MediaSourcePlayer::RetryDecoderCreation(bool audio, bool video) {
+ if (audio)
+ is_waiting_for_audio_decoder_ = false;
+ if (video)
+ is_waiting_for_video_decoder_ = false;
+ if (IsEventPending(DECODER_CREATION_EVENT_PENDING))
+ ProcessPendingEvents();
+}
+
+void MediaSourcePlayer::OnKeyAdded() {
+ DVLOG(1) << __FUNCTION__;
+ if (!is_waiting_for_key_)
+ return;
+
+ is_waiting_for_key_ = false;
+ if (playing_)
+ StartInternal();
+}
+
+void MediaSourcePlayer::OnCdmUnset() {
+ DVLOG(1) << __FUNCTION__;
+ // TODO(xhwang): Support detachment of CDM. This will be needed when we start
+ // to support setMediaKeys(0) (see http://crbug.com/330324), or when we
+ // release MediaDrm when the video is paused, or when the device goes to
+ // sleep (see http://crbug.com/272421).
+ NOTREACHED() << "CDM detachment not supported.";
+ DCHECK(drm_bridge_);
+ audio_decoder_job_->SetDrmBridge(NULL);
+ video_decoder_job_->SetDrmBridge(NULL);
+ drm_bridge_ = NULL;
+}
+
} // namespace media
diff --git a/chromium/media/base/android/media_source_player.h b/chromium/media/base/android/media_source_player.h
index ef822d49b28..689c41eb542 100644
--- a/chromium/media/base/android/media_source_player.h
+++ b/chromium/media/base/android/media_source_player.h
@@ -21,6 +21,7 @@
#include "media/base/android/demuxer_android.h"
#include "media/base/android/media_codec_bridge.h"
#include "media/base/android/media_decoder_job.h"
+#include "media/base/android/media_drm_bridge.h"
#include "media/base/android/media_player_android.h"
#include "media/base/clock.h"
#include "media/base/media_export.h"
@@ -28,7 +29,6 @@
namespace media {
class AudioDecoderJob;
-class AudioTimestampHelper;
class VideoDecoderJob;
// This class handles media source extensions on Android. It uses Android
@@ -40,19 +40,17 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
// the lifetime of this object.
MediaSourcePlayer(int player_id,
MediaPlayerManager* manager,
- scoped_ptr<DemuxerAndroid> demuxer);
+ const RequestMediaResourcesCB& request_media_resources_cb,
+ const ReleaseMediaResourcesCB& release_media_resources_cb,
+ scoped_ptr<DemuxerAndroid> demuxer,
+ const GURL& frame_url);
virtual ~MediaSourcePlayer();
- static bool IsTypeSupported(const std::vector<uint8>& scheme_uuid,
- const std::string& security_level,
- const std::string& container,
- const std::vector<std::string>& codecs);
-
// MediaPlayerAndroid implementation.
virtual void SetVideoSurface(gfx::ScopedJavaSurface surface) OVERRIDE;
virtual void Start() OVERRIDE;
virtual void Pause(bool is_media_related_action ALLOW_UNUSED) OVERRIDE;
- virtual void SeekTo(const base::TimeDelta& timestamp) OVERRIDE;
+ virtual void SeekTo(base::TimeDelta timestamp) OVERRIDE;
virtual void Release() OVERRIDE;
virtual void SetVolume(double volume) OVERRIDE;
virtual int GetVideoWidth() OVERRIDE;
@@ -64,20 +62,22 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
virtual bool CanSeekForward() OVERRIDE;
virtual bool CanSeekBackward() OVERRIDE;
virtual bool IsPlayerReady() OVERRIDE;
- virtual void SetDrmBridge(MediaDrmBridge* drm_bridge) OVERRIDE;
- virtual void OnKeyAdded() OVERRIDE;
+ virtual void SetCdm(BrowserCdm* cdm) OVERRIDE;
+ virtual bool IsSurfaceInUse() const OVERRIDE;
// DemuxerAndroidClient implementation.
virtual void OnDemuxerConfigsAvailable(const DemuxerConfigs& params) OVERRIDE;
virtual void OnDemuxerDataAvailable(const DemuxerData& params) OVERRIDE;
virtual void OnDemuxerSeekDone(
- const base::TimeDelta& actual_browser_seek_time) OVERRIDE;
+ base::TimeDelta actual_browser_seek_time) OVERRIDE;
virtual void OnDemuxerDurationChanged(base::TimeDelta duration) OVERRIDE;
private:
+ friend class MediaSourcePlayerTest;
+
// Update the current timestamp.
- void UpdateTimestamps(const base::TimeDelta& presentation_timestamp,
- size_t audio_output_bytes);
+ void UpdateTimestamps(base::TimeDelta current_presentation_timestamp,
+ base::TimeDelta max_presentation_timestamp);
// Helper function for starting media playback.
void StartInternal();
@@ -88,8 +88,8 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
// Called when the decoder finishes its task.
void MediaDecoderCallback(
bool is_audio, MediaCodecStatus status,
- const base::TimeDelta& presentation_timestamp,
- size_t audio_output_bytes);
+ base::TimeDelta current_presentation_timestamp,
+ base::TimeDelta max_presentation_timestamp);
// Gets MediaCrypto object from |drm_bridge_|.
base::android::ScopedJavaLocalRef<jobject> GetMediaCrypto();
@@ -100,14 +100,6 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
// Handle pending events if all the decoder jobs are not currently decoding.
void ProcessPendingEvents();
- // Helper method to clear any pending |SURFACE_CHANGE_EVENT_PENDING|
- // and reset |video_decoder_job_| to null.
- void ResetVideoDecoderJob();
-
- // Helper methods to configure the decoder jobs.
- void ConfigureVideoDecoderJob();
- void ConfigureAudioDecoderJob();
-
// Flush the decoders and clean up all the data needs to be decoded.
void ClearDecodingData();
@@ -131,16 +123,20 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
void OnDecoderStarved();
// Starts the |decoder_starvation_callback_| task with the timeout value.
- // |presentation_timestamp| - The presentation timestamp used for starvation
- // timeout computations. It represents the timestamp of the last piece of
- // decoded data.
- void StartStarvationCallback(const base::TimeDelta& presentation_timestamp);
+ // |current_presentation_timestamp| - The presentation timestamp used for
+ // starvation timeout computations. It represents the current timestamp of
+ // rendered data.
+ // |max_presentation_timestamp| - The presentation timestamp if all the
+ // decoded data are rendered.
+ void StartStarvationCallback(
+ base::TimeDelta current_presentation_timestamp,
+ base::TimeDelta max_presentation_timestamp);
// Schedules a seek event in |pending_events_| and calls StopDecode() on all
// the MediaDecoderJobs. Sets clock to |seek_time|, and resets
// |pending_seek_|. There must not already be a seek event in
// |pending_events_|.
- void ScheduleSeekEventAndStopDecoding(const base::TimeDelta& seek_time);
+ void ScheduleSeekEventAndStopDecoding(base::TimeDelta seek_time);
// Schedules a browser seek event. We must not currently be processing any
// seek. Note that there is possibility that browser seek of renderer demuxer
@@ -150,9 +146,6 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
// since last keyframe. See http://crbug.com/304234.
void BrowserSeekToCurrentTime();
- // Helper function to set the volume.
- void SetVolumeInternal();
-
// Helper function to determine whether a protected surface is needed for
// video playback.
bool IsProtectedSurfaceRequired();
@@ -163,6 +156,15 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
// resync with audio and starts decoding.
void OnPrefetchDone();
+ // Called when the demuxer config changes.
+ void OnDemuxerConfigsChanged();
+
+ // Called when new decryption key becomes available.
+ void OnKeyAdded();
+
+ // Called when the CDM is detached.
+ void OnCdmUnset();
+
// Test-only method to setup hook for the completion of the next decode cycle.
// This callback state is cleared when it is next run.
// Prevent usage creep by only calling this from the
@@ -171,16 +173,13 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
decode_callback_for_testing_ = test_decode_cb;
}
- // TODO(qinmin/wolenetz): Reorder these based on their priority from
- // ProcessPendingEvents(). Release() and other routines are dependent upon
- // priority consistency.
+ // Please keep this in sync with |kPendingEventNames| in GetEventName().
enum PendingEventFlags {
NO_EVENT_PENDING = 0,
- SEEK_EVENT_PENDING = 1 << 0,
- SURFACE_CHANGE_EVENT_PENDING = 1 << 1,
- CONFIG_CHANGE_EVENT_PENDING = 1 << 2,
+ PREFETCH_DONE_EVENT_PENDING = 1 << 0,
+ SEEK_EVENT_PENDING = 1 << 1,
+ DECODER_CREATION_EVENT_PENDING = 1 << 2,
PREFETCH_REQUEST_EVENT_PENDING = 1 << 3,
- PREFETCH_DONE_EVENT_PENDING = 1 << 4,
};
static const char* GetEventName(PendingEventFlags event);
@@ -188,6 +187,10 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
void SetPendingEvent(PendingEventFlags event);
void ClearPendingEvent(PendingEventFlags event);
+ // If the player is previously waiting for audio or video decoder job, retry
+ // creating the decoders identified by |audio| and |video|.
+ void RetryDecoderCreation(bool audio, bool video);
+
scoped_ptr<DemuxerAndroid> demuxer_;
// Pending event that the player needs to do.
@@ -195,20 +198,7 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
// Stats about the media.
base::TimeDelta duration_;
- int width_;
- int height_;
- AudioCodec audio_codec_;
- VideoCodec video_codec_;
- int num_channels_;
- int sampling_rate_;
- // TODO(xhwang/qinmin): Add |video_extra_data_|.
- std::vector<uint8> audio_extra_data_;
- bool reached_audio_eos_;
- bool reached_video_eos_;
bool playing_;
- bool is_audio_encrypted_;
- bool is_video_encrypted_;
- double volume_;
// base::TickClock used by |clock_|.
base::DefaultTickClock default_tick_clock_;
@@ -225,16 +215,6 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
base::TimeTicks start_time_ticks_;
base::TimeDelta start_presentation_timestamp_;
- // The surface object currently owned by the player.
- gfx::ScopedJavaSurface surface_;
-
- // Track whether or not the player has received any video data since the most
- // recent of player construction, end of last seek, or receiving and
- // detecting a |kConfigChanged| access unit from the demuxer.
- // If no such video data has been received, the next video data begins with
- // an I-frame. Otherwise, we have no such guarantee.
- bool next_video_data_is_iframe_;
-
// Flag that is true if doing a hack browser seek or false if doing a
// regular seek. Only valid when |SEEK_EVENT_PENDING| is pending.
// TODO(wolenetz): Instead of doing hack browser seek, replay cached data
@@ -252,9 +232,6 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
scoped_ptr<AudioDecoderJob, MediaDecoderJob::Deleter> audio_decoder_job_;
scoped_ptr<VideoDecoderJob, MediaDecoderJob::Deleter> video_decoder_job_;
- bool reconfig_audio_decoder_;
- bool reconfig_video_decoder_;
-
// Track the most recent preroll target. Decoder re-creation needs this to
// resume any in-progress preroll.
base::TimeDelta preroll_timestamp_;
@@ -264,23 +241,31 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
// elapses.
base::CancelableClosure decoder_starvation_callback_;
- // Object to calculate the current audio timestamp for A/V sync.
- scoped_ptr<AudioTimestampHelper> audio_timestamp_helper_;
-
- // Weak pointer passed to media decoder jobs for callbacks.
- base::WeakPtrFactory<MediaSourcePlayer> weak_this_;
-
MediaDrmBridge* drm_bridge_;
+ int cdm_registration_id_;
// No decryption key available to decrypt the encrypted buffer. In this case,
// the player should pause. When a new key is added (OnKeyAdded()), we should
// try to start playback again.
bool is_waiting_for_key_;
+ // Indicates whether the player is waiting for audio or video decoder to be
+ // created. This could happen if video surface is not available or key is
+ // not added.
+ bool is_waiting_for_audio_decoder_;
+ bool is_waiting_for_video_decoder_;
+
// Test-only callback for hooking the completion of the next decode cycle.
base::Closure decode_callback_for_testing_;
- friend class MediaSourcePlayerTest;
+ // Whether |surface_| is currently used by the player.
+ bool is_surface_in_use_;
+
+ // Weak pointer passed to media decoder jobs for callbacks.
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<MediaSourcePlayer> weak_factory_;
+ base::WeakPtr<MediaSourcePlayer> weak_this_;
+
DISALLOW_COPY_AND_ASSIGN(MediaSourcePlayer);
};
diff --git a/chromium/media/base/android/media_source_player_unittest.cc b/chromium/media/base/android/media_source_player_unittest.cc
index 7970acc0050..604c195da34 100644
--- a/chromium/media/base/android/media_source_player_unittest.cc
+++ b/chromium/media/base/android/media_source_player_unittest.cc
@@ -5,13 +5,16 @@
#include <string>
#include "base/basictypes.h"
+#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/strings/stringprintf.h"
+#include "media/base/android/audio_decoder_job.h"
#include "media/base/android/media_codec_bridge.h"
#include "media/base/android/media_drm_bridge.h"
#include "media/base/android/media_player_manager.h"
#include "media/base/android/media_source_player.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/android/video_decoder_job.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/test_data_util.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -28,32 +31,31 @@ namespace media {
} \
} while (0)
-static const int kDefaultDurationInMs = 10000;
-
-static const char kAudioMp4[] = "audio/mp4";
-static const char kVideoMp4[] = "video/mp4";
-static const char kAudioWebM[] = "audio/webm";
-static const char kVideoWebM[] = "video/webm";
+const base::TimeDelta kDefaultDuration =
+ base::TimeDelta::FromMilliseconds(10000);
// TODO(wolenetz/qinmin): Simplify tests with more effective mock usage, and
// fix flaky pointer-based MDJ inequality testing. See http://crbug.com/327839.
-// Mock of MediaPlayerManager for testing purpose
+// Mock of MediaPlayerManager for testing purpose.
class MockMediaPlayerManager : public MediaPlayerManager {
public:
explicit MockMediaPlayerManager(base::MessageLoop* message_loop)
: message_loop_(message_loop),
- playback_completed_(false) {}
+ playback_completed_(false),
+ num_resources_requested_(0),
+ num_resources_released_(0),
+ timestamp_updated_(false) {}
virtual ~MockMediaPlayerManager() {}
// MediaPlayerManager implementation.
- virtual void RequestMediaResources(int player_id) OVERRIDE {}
- virtual void ReleaseMediaResources(int player_id) OVERRIDE {}
virtual MediaResourceGetter* GetMediaResourceGetter() OVERRIDE {
return NULL;
}
virtual void OnTimeUpdate(int player_id,
- base::TimeDelta current_time) OVERRIDE {}
+ base::TimeDelta current_time) OVERRIDE {
+ timestamp_updated_ = true;
+ }
virtual void OnMediaMetadataChanged(
int player_id, base::TimeDelta duration, int width, int height,
bool success) OVERRIDE {}
@@ -71,32 +73,50 @@ class MockMediaPlayerManager : public MediaPlayerManager {
int height) OVERRIDE {}
virtual MediaPlayerAndroid* GetFullscreenPlayer() OVERRIDE { return NULL; }
virtual MediaPlayerAndroid* GetPlayer(int player_id) OVERRIDE { return NULL; }
- virtual void DestroyAllMediaPlayers() OVERRIDE {}
- virtual MediaDrmBridge* GetDrmBridge(int media_keys_id) OVERRIDE {
- return NULL;
+ virtual void RequestFullScreen(int player_id) OVERRIDE {}
+#if defined(VIDEO_HOLE)
+ virtual bool ShouldUseVideoOverlayForEmbeddedEncryptedVideo() OVERRIDE {
+ return false;
}
- virtual void OnProtectedSurfaceRequested(int player_id) OVERRIDE {}
- virtual void OnSessionCreated(int media_keys_id,
- uint32 session_id,
- const std::string& web_session_id) OVERRIDE {}
- virtual void OnSessionMessage(int media_keys_id,
- uint32 session_id,
- const std::vector<uint8>& message,
- const std::string& destination_url) OVERRIDE {}
- virtual void OnSessionReady(int media_keys_id, uint32 session_id) OVERRIDE {}
- virtual void OnSessionClosed(int media_keys_id, uint32 session_id) OVERRIDE {}
- virtual void OnSessionError(int media_keys_id,
- uint32 session_id,
- media::MediaKeys::KeyError error_code,
- int system_code) OVERRIDE {}
+#endif // defined(VIDEO_HOLE)
bool playback_completed() const {
return playback_completed_;
}
+ int num_resources_requested() const {
+ return num_resources_requested_;
+ }
+
+ int num_resources_released() const {
+ return num_resources_released_;
+ }
+
+ void OnMediaResourcesRequested(int player_id) {
+ num_resources_requested_++;
+ }
+
+ void OnMediaResourcesReleased(int player_id) {
+ num_resources_released_++;
+ }
+
+ bool timestamp_updated() const {
+ return timestamp_updated_;
+ }
+
+ void ResetTimestampUpdated() {
+ timestamp_updated_ = false;
+ }
+
private:
base::MessageLoop* message_loop_;
bool playback_completed_;
+ // The number of resource requests this object has seen.
+ int num_resources_requested_;
+ // The number of released resources.
+ int num_resources_released_;
+ // Playback timestamp was updated.
+ bool timestamp_updated_;
DISALLOW_COPY_AND_ASSIGN(MockMediaPlayerManager);
};
@@ -107,14 +127,10 @@ class MockDemuxerAndroid : public DemuxerAndroid {
: message_loop_(message_loop),
num_data_requests_(0),
num_seek_requests_(0),
- num_browser_seek_requests_(0),
- num_config_requests_(0) {}
+ num_browser_seek_requests_(0) {}
virtual ~MockDemuxerAndroid() {}
virtual void Initialize(DemuxerAndroidClient* client) OVERRIDE {}
- virtual void RequestDemuxerConfigs() OVERRIDE {
- num_config_requests_++;
- }
virtual void RequestDemuxerData(DemuxerStream::Type type) OVERRIDE {
num_data_requests_++;
if (message_loop_->is_running())
@@ -130,7 +146,6 @@ class MockDemuxerAndroid : public DemuxerAndroid {
int num_data_requests() const { return num_data_requests_; }
int num_seek_requests() const { return num_seek_requests_; }
int num_browser_seek_requests() const { return num_browser_seek_requests_; }
- int num_config_requests() const { return num_config_requests_; }
private:
base::MessageLoop* message_loop_;
@@ -144,9 +159,6 @@ class MockDemuxerAndroid : public DemuxerAndroid {
// The number of browser seek requests this object has seen.
int num_browser_seek_requests_;
- // The number of demuxer config requests this object has seen.
- int num_config_requests_;
-
DISALLOW_COPY_AND_ASSIGN(MockDemuxerAndroid);
};
@@ -155,13 +167,20 @@ class MediaSourcePlayerTest : public testing::Test {
MediaSourcePlayerTest()
: manager_(&message_loop_),
demuxer_(new MockDemuxerAndroid(&message_loop_)),
- player_(0, &manager_, scoped_ptr<DemuxerAndroid>(demuxer_)),
+ player_(0, &manager_,
+ base::Bind(&MockMediaPlayerManager::OnMediaResourcesRequested,
+ base::Unretained(&manager_)),
+ base::Bind(&MockMediaPlayerManager::OnMediaResourcesReleased,
+ base::Unretained(&manager_)),
+ scoped_ptr<DemuxerAndroid>(demuxer_),
+ GURL()),
decoder_callback_hook_executed_(false),
surface_texture_a_is_next_(true) {}
virtual ~MediaSourcePlayerTest() {}
protected:
- // Get the decoder job from the MediaSourcePlayer.
+ // Get the decoder job from the MediaSourcePlayer. The return value must not
+ // be NULL.
MediaDecoderJob* GetMediaDecoderJob(bool is_audio) {
if (is_audio) {
return reinterpret_cast<MediaDecoderJob*>(
@@ -171,10 +190,18 @@ class MediaSourcePlayerTest : public testing::Test {
player_.video_decoder_job_.get());
}
+ // Get the MediaCodecBridge from the decoder job. The return value could be
+ // NULL if the decoder is not yet created.
+ MediaCodecBridge* GetMediaCodecBridge(bool is_audio) {
+ if (is_audio)
+ return player_.audio_decoder_job_->media_codec_bridge_.get();
+ return player_.video_decoder_job_->media_codec_bridge_.get();
+ }
+
// Get the per-job prerolling status from the MediaSourcePlayer's job matching
// |is_audio|. Caller must guard against NPE if the player's job is NULL.
bool IsPrerolling(bool is_audio) {
- return GetMediaDecoderJob(is_audio)->prerolling();
+ return GetMediaDecoderJob(is_audio)->prerolling_;
}
// Get the preroll timestamp from the MediaSourcePlayer.
@@ -193,8 +220,6 @@ class MediaSourcePlayerTest : public testing::Test {
EXPECT_TRUE(player_.IsPlaying());
player_.Release();
EXPECT_FALSE(player_.IsPlaying());
- EXPECT_FALSE(GetMediaDecoderJob(true));
- EXPECT_FALSE(GetMediaDecoderJob(false));
}
// Upon the next successful decode callback, post a task to call Release()
@@ -203,39 +228,35 @@ class MediaSourcePlayerTest : public testing::Test {
// Prevent usage creep of MSP::set_decode_callback_for_testing() by
// only using it for the ReleaseWithOnPrefetchDoneAlreadyPosted test.
void OnNextTestDecodeCallbackPostTaskToReleasePlayer() {
- player_.set_decode_callback_for_testing(media::BindToLoop(
- message_loop_.message_loop_proxy(),
+ DCHECK_EQ(&message_loop_, base::MessageLoop::current());
+ player_.set_decode_callback_for_testing(media::BindToCurrentLoop(
base::Bind(
&MediaSourcePlayerTest::ReleaseWithPendingPrefetchDoneVerification,
base::Unretained(this))));
}
// Asynch test callback posted upon decode completion to verify that a pending
- // prefetch done event is cleared across |player_|'s Release(). This helps
+ // prefetch done event is not cleared across |player_|'s Release(). This helps
// ensure the ReleaseWithOnPrefetchDoneAlreadyPosted test scenario is met.
void ReleaseWithPendingPrefetchDoneVerification() {
EXPECT_TRUE(player_.IsEventPending(player_.PREFETCH_DONE_EVENT_PENDING));
ReleasePlayer();
- EXPECT_FALSE(player_.IsEventPending(player_.PREFETCH_DONE_EVENT_PENDING));
+ EXPECT_TRUE(player_.IsEventPending(player_.PREFETCH_DONE_EVENT_PENDING));
EXPECT_FALSE(decoder_callback_hook_executed_);
+ EXPECT_FALSE(GetMediaCodecBridge(true));
decoder_callback_hook_executed_ = true;
}
- // Inspect internal pending_event_ state of |player_|. This is for infrequent
- // use by tests, only where required.
- bool IsPendingSurfaceChange() {
- return player_.IsEventPending(player_.SURFACE_CHANGE_EVENT_PENDING);
- }
-
- DemuxerConfigs CreateAudioDemuxerConfigs(AudioCodec audio_codec) {
+ DemuxerConfigs CreateAudioDemuxerConfigs(AudioCodec audio_codec,
+ bool use_low_sample_rate) {
DemuxerConfigs configs;
configs.audio_codec = audio_codec;
configs.audio_channels = 2;
configs.is_audio_encrypted = false;
- configs.duration_ms = kDefaultDurationInMs;
+ configs.duration = kDefaultDuration;
if (audio_codec == kCodecVorbis) {
- configs.audio_sampling_rate = 44100;
+ configs.audio_sampling_rate = use_low_sample_rate ? 11025 : 44100;
scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(
"vorbis-extradata");
configs.audio_extra_data = std::vector<uint8>(
@@ -255,17 +276,18 @@ class MediaSourcePlayerTest : public testing::Test {
return configs;
}
- DemuxerConfigs CreateVideoDemuxerConfigs() {
+ DemuxerConfigs CreateVideoDemuxerConfigs(bool use_larger_size) {
DemuxerConfigs configs;
configs.video_codec = kCodecVP8;
- configs.video_size = gfx::Size(320, 240);
+ configs.video_size =
+ use_larger_size ? gfx::Size(640, 480) : gfx::Size(320, 240);
configs.is_video_encrypted = false;
- configs.duration_ms = kDefaultDurationInMs;
+ configs.duration = kDefaultDuration;
return configs;
}
DemuxerConfigs CreateAudioVideoDemuxerConfigs() {
- DemuxerConfigs configs = CreateAudioDemuxerConfigs(kCodecVorbis);
+ DemuxerConfigs configs = CreateAudioDemuxerConfigs(kCodecVorbis, false);
configs.video_codec = kCodecVP8;
configs.video_size = gfx::Size(320, 240);
configs.is_video_encrypted = false;
@@ -276,48 +298,77 @@ class MediaSourcePlayerTest : public testing::Test {
DCHECK(have_audio || have_video);
if (have_audio && !have_video)
- return CreateAudioDemuxerConfigs(kCodecVorbis);
+ return CreateAudioDemuxerConfigs(kCodecVorbis, false);
if (have_video && !have_audio)
- return CreateVideoDemuxerConfigs();
+ return CreateVideoDemuxerConfigs(false);
return CreateAudioVideoDemuxerConfigs();
}
- // Starts an audio decoder job. Verifies player behavior relative to
- // |expect_player_requests_data|.
- void StartAudioDecoderJob(bool expect_player_requests_data) {
- Start(CreateAudioDemuxerConfigs(kCodecVorbis), expect_player_requests_data);
+ // Starts an audio decoder job.
+ void StartAudioDecoderJob() {
+ Start(CreateAudioDemuxerConfigs(kCodecVorbis, false));
}
- // Starts a video decoder job. Verifies player behavior relative to
- // |expect_player_requests_data|.
- void StartVideoDecoderJob(bool expect_player_requests_data) {
- Start(CreateVideoDemuxerConfigs(), expect_player_requests_data);
+ // Starts a video decoder job.
+ void StartVideoDecoderJob() {
+ Start(CreateVideoDemuxerConfigs(false));
}
- // Starts decoding the data. Verifies player behavior relative to
- // |expect_player_requests_data|.
- void Start(const DemuxerConfigs& configs, bool expect_player_requests_data) {
- bool has_audio = configs.audio_codec != kUnknownAudioCodec;
- bool has_video = configs.video_codec != kUnknownVideoCodec;
+ // Starts decoding the data.
+ void Start(const DemuxerConfigs& configs) {
+ EXPECT_EQ(demuxer_->num_data_requests(), 0);
+ player_.OnDemuxerConfigsAvailable(configs);
+ player_.Start();
+
+ EXPECT_TRUE(player_.IsPlaying());
+ int expected_num_requests = (player_.HasAudio() ? 1 : 0) +
+ (player_.HasVideo() ? 1 : 0);
+ EXPECT_EQ(expected_num_requests, demuxer_->num_data_requests());
+ }
+
+ // Resumes decoding the data. Verifies player behavior relative to
+ // |expect_player_requests_audio_data| and
+ // |expect_player_requests_video_data|.
+ void Resume(bool expect_player_requests_audio_data,
+ bool expect_player_requests_video_data) {
+ EXPECT_FALSE(player_.IsPlaying());
+ EXPECT_TRUE(player_.HasVideo() || player_.HasAudio());
int original_num_data_requests = demuxer_->num_data_requests();
- int expected_request_delta = expect_player_requests_data ?
- ((has_audio ? 1 : 0) + (has_video ? 1 : 0)) : 0;
+ int expected_request_delta =
+ (expect_player_requests_audio_data ? 1 : 0) +
+ (expect_player_requests_video_data ? 1 : 0);
- player_.OnDemuxerConfigsAvailable(configs);
player_.Start();
EXPECT_TRUE(player_.IsPlaying());
EXPECT_EQ(original_num_data_requests + expected_request_delta,
demuxer_->num_data_requests());
+ }
- // Verify player has decoder job iff the config included the media type for
- // the job and the player is expected to request data due to Start(), above.
- EXPECT_EQ(expect_player_requests_data && has_audio,
- GetMediaDecoderJob(true) != NULL);
- EXPECT_EQ(expect_player_requests_data && has_video,
- GetMediaDecoderJob(false) != NULL);
+ // Keeps decoding audio data until the decoder starts to output samples.
+ // Gives up if no audio output after decoding 10 frames.
+ void DecodeAudioDataUntilOutputBecomesAvailable() {
+ EXPECT_TRUE(player_.IsPlaying());
+ base::TimeDelta current_time = player_.GetCurrentTime();
+ base::TimeDelta start_timestamp = current_time;
+ for (int i = 0; i < 10; ++i) {
+ manager_.ResetTimestampUpdated();
+ player_.OnDemuxerDataAvailable(
+ CreateReadFromDemuxerAckForAudio(i > 3 ? 3 : i));
+ WaitForAudioDecodeDone();
+ base::TimeDelta new_current_time = player_.GetCurrentTime();
+ EXPECT_LE(current_time.InMilliseconds(),
+ new_current_time.InMilliseconds());
+ current_time = new_current_time;
+ if (manager_.timestamp_updated()) {
+ EXPECT_LT(start_timestamp.InMillisecondsF(),
+ new_current_time.InMillisecondsF());
+ return;
+ }
+ }
+ EXPECT_TRUE(false);
}
AccessUnit CreateAccessUnitWithData(bool is_audio, int audio_packet_id) {
@@ -381,25 +432,27 @@ class MediaSourcePlayerTest : public testing::Test {
// decoding.
void StartAudioDecoderJobAndSeekToWhileDecoding(
const base::TimeDelta& seek_time) {
- EXPECT_FALSE(GetMediaDecoderJob(true));
+ EXPECT_FALSE(GetMediaCodecBridge(true));
EXPECT_FALSE(player_.IsPlaying());
EXPECT_EQ(0, demuxer_->num_data_requests());
EXPECT_EQ(0.0, GetPrerollTimestamp().InMillisecondsF());
EXPECT_EQ(player_.GetCurrentTime(), GetPrerollTimestamp());
- StartAudioDecoderJob(true);
+ StartAudioDecoderJob();
EXPECT_FALSE(GetMediaDecoderJob(true)->is_decoding());
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
+ EXPECT_EQ(2, demuxer_->num_data_requests());
EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
player_.SeekTo(seek_time);
EXPECT_EQ(0.0, GetPrerollTimestamp().InMillisecondsF());
EXPECT_EQ(0, demuxer_->num_seek_requests());
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
}
// Seek, including simulated receipt of |kAborted| read between SeekTo() and
// OnDemuxerSeekDone(). Use this helper method only when the player already
- // has created the decoder job. Exactly one request for more data is expected
- // following the seek, so use this helper for players with only audio or only
- // video.
+ // has created the media codec bridge. Exactly one request for more data is
+ // expected following the seek, so use this helper for players with only audio
+ // or only video.
void SeekPlayerWithAbort(bool is_audio, const base::TimeDelta& seek_time) {
int original_num_seeks = demuxer_->num_seek_requests();
int original_num_data_requests = demuxer_->num_data_requests();
@@ -416,6 +469,9 @@ class MediaSourcePlayerTest : public testing::Test {
// caused by the seek.
player_.OnDemuxerDataAvailable(CreateAbortedAck(is_audio));
+ // Wait for the decode job to finish so we can process the seek request.
+ WaitForDecodeDone(is_audio, !is_audio);
+
// Verify that the seek is requested.
EXPECT_EQ(original_num_seeks + 1, demuxer_->num_seek_requests());
@@ -429,8 +485,42 @@ class MediaSourcePlayerTest : public testing::Test {
EXPECT_EQ(original_num_seeks + 1, demuxer_->num_seek_requests());
}
- DemuxerData CreateReadFromDemuxerAckWithConfigChanged(bool is_audio,
- int config_unit_index) {
+ // Preroll the decoder job to |target_timestamp|. The first access unit
+ // to decode will have a timestamp equal to |start_timestamp|.
+ // TODO(qinmin): Add additional test cases for out-of-order decodes.
+ // See http://crbug.com/331421.
+ void PrerollDecoderToTime(bool is_audio,
+ const base::TimeDelta& start_timestamp,
+ const base::TimeDelta& target_timestamp) {
+ EXPECT_EQ(target_timestamp, player_.GetCurrentTime());
+ // |start_timestamp| must be smaller than |target_timestamp|.
+ EXPECT_LE(start_timestamp, target_timestamp);
+ DemuxerData data = is_audio ? CreateReadFromDemuxerAckForAudio(1) :
+ CreateReadFromDemuxerAckForVideo();
+ int current_timestamp = start_timestamp.InMilliseconds();
+
+ // Send some data with access unit timestamps before the |target_timestamp|,
+ // and continue sending the data until preroll finishes.
+ // This simulates the common condition that AUs received after browser
+ // seek begin with timestamps before the seek target, and don't
+ // immediately complete preroll.
+ while (IsPrerolling(is_audio)) {
+ data.access_units[0].timestamp =
+ base::TimeDelta::FromMilliseconds(current_timestamp);
+ player_.OnDemuxerDataAvailable(data);
+ EXPECT_TRUE(GetMediaDecoderJob(is_audio)->is_decoding());
+ EXPECT_TRUE(GetMediaCodecBridge(is_audio));
+ EXPECT_EQ(target_timestamp, player_.GetCurrentTime());
+ current_timestamp += 30;
+ WaitForDecodeDone(is_audio, !is_audio);
+ }
+ EXPECT_LE(target_timestamp, player_.GetCurrentTime());
+ }
+
+ DemuxerData CreateReadFromDemuxerAckWithConfigChanged(
+ bool is_audio,
+ int config_unit_index,
+ const DemuxerConfigs& configs) {
DemuxerData data;
data.type = is_audio ? DemuxerStream::AUDIO : DemuxerStream::VIDEO;
data.access_units.resize(config_unit_index + 1);
@@ -439,6 +529,8 @@ class MediaSourcePlayerTest : public testing::Test {
data.access_units[i] = CreateAccessUnitWithData(is_audio, i);
data.access_units[config_unit_index].status = DemuxerStream::kConfigChanged;
+ data.demuxer_configs.resize(1);
+ data.demuxer_configs[0] = configs;
return data;
}
@@ -450,26 +542,34 @@ class MediaSourcePlayerTest : public testing::Test {
// browser seek results once decode completes and surface change processing
// begins.
void BrowserSeekPlayer(bool trigger_with_release_start) {
- int expected_num_data_requests = demuxer_->num_data_requests() + 1;
+ int expected_num_data_requests = demuxer_->num_data_requests() + 2;
int expected_num_seek_requests = demuxer_->num_seek_requests();
int expected_num_browser_seek_requests =
demuxer_->num_browser_seek_requests();
- EXPECT_FALSE(GetMediaDecoderJob(false));
CreateNextTextureAndSetVideoSurface();
- StartVideoDecoderJob(true);
-
+ StartVideoDecoderJob();
if (trigger_with_release_start) {
+ // Consume the first frame, so that the next VideoDecoderJob will not
+ // inherit the I-frame from the previous decoder.
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
ReleasePlayer();
+ WaitForVideoDecodeDone();
- // Simulate demuxer's response to the video data request.
+ // Simulate demuxer's response to the video data request. The data will be
+ // passed to the next MediaCodecBridge.
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
- EXPECT_FALSE(GetMediaDecoderJob(false));
+ EXPECT_FALSE(GetMediaCodecBridge(false));
EXPECT_FALSE(player_.IsPlaying());
EXPECT_EQ(expected_num_seek_requests, demuxer_->num_seek_requests());
CreateNextTextureAndSetVideoSurface();
- StartVideoDecoderJob(false);
+ Resume(false, false);
+ EXPECT_FALSE(GetMediaCodecBridge(false));
+
+ // Run the message loop so that prefetch will complete.
+ while (expected_num_seek_requests == demuxer_->num_seek_requests())
+ message_loop_.RunUntilIdle();
} else {
// Simulate demuxer's response to the video data request.
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
@@ -482,13 +582,13 @@ class MediaSourcePlayerTest : public testing::Test {
CreateNextTextureAndSetVideoSurface();
// Browser seek should not begin until decoding has completed.
- EXPECT_TRUE(GetMediaDecoderJob(false));
+ EXPECT_TRUE(GetMediaCodecBridge(false));
EXPECT_EQ(expected_num_seek_requests, demuxer_->num_seek_requests());
- // Wait for the decoder job to finish decoding and be reset pending the
- // browser seek.
- while (GetMediaDecoderJob(false))
- message_loop_.RunUntilIdle();
+ // Wait for the media codec bridge to finish decoding and be reset pending
+ // the browser seek.
+ WaitForVideoDecodeDone();
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
}
// Only one browser seek should have been initiated, and no further data
@@ -501,63 +601,99 @@ class MediaSourcePlayerTest : public testing::Test {
EXPECT_EQ(expected_num_data_requests, demuxer_->num_data_requests());
}
- // Creates a new decoder job and feeds it data ending with a |kConfigChanged|
- // access unit. If |config_unit_in_prefetch| is true, sends feeds the config
- // change AU in response to the job's first read request (prefetch). If
- // false, regular data is fed and decoded prior to feeding the config change
- // AU in response to the second data request (after prefetch completed).
- // |config_unit_index| controls which access unit is |kConfigChanged|.
- void StartConfigChange(bool is_audio,
- bool config_unit_in_prefetch,
- int config_unit_index) {
- int expected_num_config_requests = demuxer_->num_config_requests();
-
- EXPECT_FALSE(GetMediaDecoderJob(is_audio));
+ // Creates a new media codec bridge and feeds it data ending with a
+ // |kConfigChanged| access unit. If |config_unit_in_prefetch| is true, sends
+ // feeds the config change AU in response to the job's first read request
+ // (prefetch). If false, regular data is fed and decoded prior to feeding the
+ // config change AU in response to the second data request (after prefetch
+ // completed). |config_unit_index| controls which access unit is
+ // |kConfigChanged|. If |enable_adaptive_playback| is true, config change will
+ // not cause the decoder to recreate the media codec bridge. Otherwise, the
+ // decoder has to drain all its data before recreating the new codec.
+ void SendConfigChangeToDecoder(bool is_audio,
+ bool config_unit_in_prefetch,
+ int config_unit_index,
+ bool enable_adaptive_playback) {
+ EXPECT_FALSE(GetMediaCodecBridge(is_audio));
if (is_audio) {
- StartAudioDecoderJob(true);
+ StartAudioDecoderJob();
} else {
CreateNextTextureAndSetVideoSurface();
- StartVideoDecoderJob(true);
+ StartVideoDecoderJob();
}
int expected_num_data_requests = demuxer_->num_data_requests();
-
// Feed and decode a standalone access unit so the player exits prefetch.
if (!config_unit_in_prefetch) {
- if (is_audio)
+ if (is_audio) {
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
- else
+ } else {
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+ EnableAdaptiveVideoPlayback(enable_adaptive_playback);
+ }
- message_loop_.Run();
+ WaitForDecodeDone(is_audio, !is_audio);
// We should have completed the prefetch phase at this point.
expected_num_data_requests++;
EXPECT_EQ(expected_num_data_requests, demuxer_->num_data_requests());
}
- EXPECT_EQ(expected_num_config_requests, demuxer_->num_config_requests());
-
+ DemuxerConfigs configs = is_audio ?
+ CreateAudioDemuxerConfigs(kCodecAAC, false) :
+ CreateVideoDemuxerConfigs(true);
// Feed and decode access units with data for any units prior to
// |config_unit_index|, and a |kConfigChanged| unit at that index.
// Player should prepare to reconfigure the decoder job, and should request
// new demuxer configs.
- player_.OnDemuxerDataAvailable(
- CreateReadFromDemuxerAckWithConfigChanged(is_audio, config_unit_index));
- WaitForDecodeDone(is_audio, !is_audio);
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckWithConfigChanged(
+ is_audio, config_unit_index, configs));
- expected_num_config_requests++;
+ expected_num_data_requests++;
EXPECT_EQ(expected_num_data_requests, demuxer_->num_data_requests());
- EXPECT_EQ(expected_num_config_requests, demuxer_->num_config_requests());
+ if (is_audio)
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
+ else
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+
+ // If the adaptive playback setting was not passed to the MediaCodecBridge
+ // earlier, do it here.
+ if (config_unit_in_prefetch && !is_audio)
+ EnableAdaptiveVideoPlayback(enable_adaptive_playback);
+ }
+
+ // Send a config change to the decoder job and drain the decoder so that the
+ // config change is processed.
+ void StartConfigChange(bool is_audio,
+ bool config_unit_in_prefetch,
+ int config_unit_index,
+ bool enable_adaptive_playback) {
+ SendConfigChangeToDecoder(is_audio, config_unit_in_prefetch,
+ config_unit_index, enable_adaptive_playback);
+
+ EXPECT_EQ(!config_unit_in_prefetch && !enable_adaptive_playback &&
+ config_unit_index == 0, IsDrainingDecoder(is_audio));
+ int expected_num_data_requests = demuxer_->num_data_requests();
+ // Run until decoder starts to request new data.
+ while (demuxer_->num_data_requests() == expected_num_data_requests)
+ message_loop_.RunUntilIdle();
+ EXPECT_FALSE(IsDrainingDecoder(is_audio));
+ }
+
+ void EnableAdaptiveVideoPlayback(bool enable) {
+ EXPECT_TRUE(GetMediaCodecBridge(false));
+ static_cast<VideoCodecBridge*>(GetMediaCodecBridge(false))->
+ set_adaptive_playback_supported_for_testing(
+ enable ? 1 : 0);
}
void CreateNextTextureAndSetVideoSurface() {
gfx::SurfaceTexture* surface_texture;
if (surface_texture_a_is_next_) {
- surface_texture_a_ = new gfx::SurfaceTexture(next_texture_id_++);
+ surface_texture_a_ = gfx::SurfaceTexture::Create(next_texture_id_++);
surface_texture = surface_texture_a_.get();
} else {
- surface_texture_b_ = new gfx::SurfaceTexture(next_texture_id_++);
+ surface_texture_b_ = gfx::SurfaceTexture::Create(next_texture_id_++);
surface_texture = surface_texture_b_.get();
}
@@ -566,14 +702,15 @@ class MediaSourcePlayerTest : public testing::Test {
player_.SetVideoSurface(surface.Pass());
}
- // Wait for one or both of the jobs to complete decoding. Decoder jobs are
- // assumed to exist for any stream whose decode completion is awaited.
+ // Wait for one or both of the jobs to complete decoding. Media codec bridges
+ // are assumed to exist for any stream whose decode completion is awaited.
void WaitForDecodeDone(bool wait_for_audio, bool wait_for_video) {
DCHECK(wait_for_audio || wait_for_video);
-
- while ((wait_for_audio && GetMediaDecoderJob(true) &&
+ while ((wait_for_audio && GetMediaCodecBridge(true) &&
+ GetMediaDecoderJob(true)->HasData() &&
GetMediaDecoderJob(true)->is_decoding()) ||
- (wait_for_video && GetMediaDecoderJob(false) &&
+ (wait_for_video && GetMediaCodecBridge(false) &&
+ GetMediaDecoderJob(false)->HasData() &&
GetMediaDecoderJob(false)->is_decoding())) {
message_loop_.RunUntilIdle();
}
@@ -615,7 +752,7 @@ class MediaSourcePlayerTest : public testing::Test {
player_.SeekTo(base::TimeDelta());
player_.OnDemuxerSeekDone(kNoTimestamp());
- Start(CreateDemuxerConfigs(have_audio, have_video), true);
+ Resume(have_audio, have_video);
}
// Starts the appropriate decoder jobs according to |have_audio| and
@@ -632,7 +769,7 @@ class MediaSourcePlayerTest : public testing::Test {
if (have_video)
CreateNextTextureAndSetVideoSurface();
- Start(CreateDemuxerConfigs(have_audio, have_video), true);
+ Start(CreateDemuxerConfigs(have_audio, have_video));
if (have_audio)
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
@@ -644,8 +781,7 @@ class MediaSourcePlayerTest : public testing::Test {
// media types configured. Since prefetching may be in progress, we cannot
// reliably expect Run() to complete until we have sent demuxer data for all
// configured media types, above.
- for (int i = 0; i < (have_audio ? 1 : 0) + (have_video ? 1 : 0); i++)
- message_loop_.Run();
+ WaitForDecodeDone(have_audio, have_video);
// Simulate seek while decoding EOS or non-EOS for the appropriate
// stream(s).
@@ -676,12 +812,12 @@ class MediaSourcePlayerTest : public testing::Test {
return player_.start_time_ticks_;
}
- bool IsTypeSupported(const std::vector<uint8>& scheme_uuid,
- const std::string& security_level,
- const std::string& container,
- const std::vector<std::string>& codecs) {
- return MediaSourcePlayer::IsTypeSupported(
- scheme_uuid, security_level, container, codecs);
+ bool IsRequestingDemuxerData(bool is_audio) {
+ return GetMediaDecoderJob(is_audio)->is_requesting_demuxer_data_;
+ }
+
+ bool IsDrainingDecoder(bool is_audio) {
+ return GetMediaDecoderJob(is_audio)->drain_decoder_;
}
base::MessageLoop message_loop_;
@@ -689,7 +825,7 @@ class MediaSourcePlayerTest : public testing::Test {
MockDemuxerAndroid* demuxer_; // Owned by |player_|.
MediaSourcePlayer player_;
- // Track whether a possibly asynch decoder callback test hook has run.
+ // Track whether a possibly async decoder callback test hook has run.
bool decoder_callback_hook_executed_;
// We need to keep the surface texture while the decoder is actively decoding.
@@ -710,60 +846,61 @@ class MediaSourcePlayerTest : public testing::Test {
TEST_F(MediaSourcePlayerTest, StartAudioDecoderWithValidConfig) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
- // Test audio decoder job will be created when codec is successfully started.
- StartAudioDecoderJob(true);
+ // Test audio codec will be created when valid configs and data are passed to
+ // the audio decoder job.
+ StartAudioDecoderJob();
EXPECT_EQ(0, demuxer_->num_seek_requests());
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
+ EXPECT_TRUE(GetMediaCodecBridge(true));
}
TEST_F(MediaSourcePlayerTest, StartAudioDecoderWithInvalidConfig) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
// Test audio decoder job will not be created when failed to start the codec.
- DemuxerConfigs configs = CreateAudioDemuxerConfigs(kCodecVorbis);
+ DemuxerConfigs configs = CreateAudioDemuxerConfigs(kCodecVorbis, false);
// Replace with invalid |audio_extra_data|
configs.audio_extra_data.clear();
uint8 invalid_codec_data[] = { 0x00, 0xff, 0xff, 0xff, 0xff };
configs.audio_extra_data.insert(configs.audio_extra_data.begin(),
invalid_codec_data, invalid_codec_data + 4);
- Start(configs, false);
- EXPECT_EQ(0, demuxer_->num_seek_requests());
+ Start(configs);
+
+ // Decoder is not created after data is received.
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
+ EXPECT_FALSE(GetMediaCodecBridge(true));
}
TEST_F(MediaSourcePlayerTest, StartVideoCodecWithValidSurface) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
- // Test video decoder job will be created when surface is valid.
- // Video decoder job will not be created until surface is available.
- StartVideoDecoderJob(false);
+ // Test video codec will not be created until data is received.
+ StartVideoDecoderJob();
// Set both an initial and a later video surface without receiving any
// demuxed data yet.
CreateNextTextureAndSetVideoSurface();
- MediaDecoderJob* first_job = GetMediaDecoderJob(false);
- EXPECT_TRUE(first_job);
+ EXPECT_FALSE(GetMediaCodecBridge(false));
CreateNextTextureAndSetVideoSurface();
-
- // Setting another surface will not create a new job until any pending
- // read is satisfied (and job is no longer decoding).
- EXPECT_EQ(first_job, GetMediaDecoderJob(false));
+ EXPECT_FALSE(GetMediaCodecBridge(false));
// No seeks, even on setting surface, should have occurred. (Browser seeks can
// occur on setting surface, but only after previously receiving video data.)
EXPECT_EQ(0, demuxer_->num_seek_requests());
- // Note, the decoder job for the second surface set, above, will be created
- // only after the pending read is satisfied and decoded, and the resulting
- // browser seek is done. See BrowserSeek_* tests for this coverage.
+ // Send the first input chunk and verify that decoder will be created.
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+ EXPECT_TRUE(GetMediaCodecBridge(false));
}
TEST_F(MediaSourcePlayerTest, StartVideoCodecWithInvalidSurface) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
- // Test video decoder job will not be created when surface is invalid.
+ // Test video codec will not be created when surface is invalid.
scoped_refptr<gfx::SurfaceTexture> surface_texture(
- new gfx::SurfaceTexture(0));
+ gfx::SurfaceTexture::Create(0));
gfx::ScopedJavaSurface surface(surface_texture.get());
- StartVideoDecoderJob(false);
+ StartVideoDecoderJob();
// Release the surface texture.
surface_texture = NULL;
@@ -771,16 +908,17 @@ TEST_F(MediaSourcePlayerTest, StartVideoCodecWithInvalidSurface) {
// Player should not seek the demuxer on setting initial surface.
EXPECT_EQ(0, demuxer_->num_seek_requests());
+ EXPECT_EQ(1, demuxer_->num_data_requests());
- EXPECT_FALSE(GetMediaDecoderJob(false));
- EXPECT_EQ(0, demuxer_->num_data_requests());
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+ EXPECT_FALSE(GetMediaCodecBridge(false));
}
TEST_F(MediaSourcePlayerTest, ReadFromDemuxerAfterSeek) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
// Test decoder job will resend a ReadFromDemuxer request after seek.
- StartAudioDecoderJob(true);
+ StartAudioDecoderJob();
SeekPlayerWithAbort(true, base::TimeDelta());
}
@@ -789,26 +927,24 @@ TEST_F(MediaSourcePlayerTest, SetSurfaceWhileSeeking) {
// Test SetVideoSurface() will not cause an extra seek while the player is
// waiting for demuxer to indicate seek is done.
- // Player is still waiting for SetVideoSurface(), so no request is sent.
- StartVideoDecoderJob(false); // Verifies no data requested.
+ player_.OnDemuxerConfigsAvailable(
+ CreateVideoDemuxerConfigs(false));
// Initiate a seek. Skip requesting element seek of renderer.
// Instead behave as if the renderer has asked us to seek.
- EXPECT_EQ(0, demuxer_->num_seek_requests());
player_.SeekTo(base::TimeDelta());
EXPECT_EQ(1, demuxer_->num_seek_requests());
CreateNextTextureAndSetVideoSurface();
- EXPECT_FALSE(GetMediaDecoderJob(false));
EXPECT_EQ(1, demuxer_->num_seek_requests());
-
- // Reconfirm player has not yet requested data.
- EXPECT_EQ(0, demuxer_->num_data_requests());
+ player_.Start();
// Send the seek done notification. The player should start requesting data.
player_.OnDemuxerSeekDone(kNoTimestamp());
- EXPECT_TRUE(GetMediaDecoderJob(false));
+ EXPECT_FALSE(GetMediaCodecBridge(false));
EXPECT_EQ(1, demuxer_->num_data_requests());
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+ EXPECT_TRUE(GetMediaCodecBridge(false));
// Reconfirm exactly 1 seek request has been made of demuxer, and that it
// was not a browser seek request.
@@ -821,7 +957,7 @@ TEST_F(MediaSourcePlayerTest, ChangeMultipleSurfaceWhileDecoding) {
// Test MediaSourcePlayer can switch multiple surfaces during decoding.
CreateNextTextureAndSetVideoSurface();
- StartVideoDecoderJob(true);
+ StartVideoDecoderJob();
EXPECT_EQ(0, demuxer_->num_seek_requests());
// Send the first input chunk.
@@ -834,32 +970,94 @@ TEST_F(MediaSourcePlayerTest, ChangeMultipleSurfaceWhileDecoding) {
// Next, pass a new non-empty surface.
CreateNextTextureAndSetVideoSurface();
- // Wait for the decoder job to finish decoding and be reset pending a browser
- // seek.
- while (GetMediaDecoderJob(false))
- message_loop_.RunUntilIdle();
+ // Wait for the media codec bridge to finish decoding and be reset pending a
+ // browser seek.
+ WaitForVideoDecodeDone();
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
// Only one browser seek should have been initiated. No further data request
// should have been processed on |message_loop_| before surface change event
// became pending, above.
EXPECT_EQ(1, demuxer_->num_browser_seek_requests());
- EXPECT_EQ(1, demuxer_->num_data_requests());
+ EXPECT_EQ(2, demuxer_->num_data_requests());
// Simulate browser seek is done and confirm player requests more data for new
- // video decoder job.
+ // video codec.
player_.OnDemuxerSeekDone(player_.GetCurrentTime());
- EXPECT_TRUE(GetMediaDecoderJob(false));
- EXPECT_EQ(2, demuxer_->num_data_requests());
+ EXPECT_FALSE(GetMediaCodecBridge(false));
+ EXPECT_EQ(3, demuxer_->num_data_requests());
EXPECT_EQ(1, demuxer_->num_seek_requests());
+
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+ EXPECT_TRUE(GetMediaCodecBridge(false));
+}
+
+TEST_F(MediaSourcePlayerTest, SetEmptySurfaceAndStarveWhileDecoding) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test player pauses if an empty surface is passed.
+ CreateNextTextureAndSetVideoSurface();
+ StartVideoDecoderJob();
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+
+ // Send the first input chunk.
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+
+ // While the decoder is decoding, pass an empty surface.
+ gfx::ScopedJavaSurface empty_surface;
+ player_.SetVideoSurface(empty_surface.Pass());
+ // Let the player starve. However, it should not issue any new data request in
+ // this case.
+ TriggerPlayerStarvation();
+ // Wait for the media codec bridge to finish decoding and be reset.
+ while (GetMediaDecoderJob(false)->is_decoding())
+ message_loop_.RunUntilIdle();
+
+ // No further seek or data requests should have been received since the
+ // surface is empty.
+ EXPECT_EQ(0, demuxer_->num_browser_seek_requests());
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+
+ // Playback resumes once a non-empty surface is passed.
+ CreateNextTextureAndSetVideoSurface();
+ EXPECT_EQ(1, demuxer_->num_browser_seek_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, ReleaseVideoDecoderResourcesWhileDecoding) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that if video decoder is released while decoding, the resources will
+ // not be immediately released.
+ CreateNextTextureAndSetVideoSurface();
+ StartVideoDecoderJob();
+ // No resource is requested since there is no data to decode.
+ EXPECT_EQ(0, manager_.num_resources_requested());
+ ReleasePlayer();
+ EXPECT_EQ(0, manager_.num_resources_released());
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+
+ // Recreate the video decoder.
+ CreateNextTextureAndSetVideoSurface();
+ player_.Start();
+ while (!GetMediaDecoderJob(false)->is_decoding())
+ message_loop_.RunUntilIdle();
+ EXPECT_EQ(0, demuxer_->num_browser_seek_requests());
+ EXPECT_EQ(1, manager_.num_resources_requested());
+ ReleasePlayer();
+ // The resource is still held by the video decoder until it finishes decoding.
+ EXPECT_EQ(0, manager_.num_resources_released());
+ // Wait for the media codec bridge to finish decoding and be reset.
+ while (manager_.num_resources_released() != 1)
+ message_loop_.RunUntilIdle();
}
TEST_F(MediaSourcePlayerTest, AudioOnlyStartAfterSeekFinish) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
// Test audio decoder job will not start until pending seek event is handled.
- DemuxerConfigs configs = CreateAudioDemuxerConfigs(kCodecVorbis);
+ DemuxerConfigs configs = CreateAudioDemuxerConfigs(kCodecVorbis, false);
player_.OnDemuxerConfigsAvailable(configs);
- EXPECT_FALSE(GetMediaDecoderJob(true));
// Initiate a seek. Skip requesting element seek of renderer.
// Instead behave as if the renderer has asked us to seek.
@@ -867,16 +1065,19 @@ TEST_F(MediaSourcePlayerTest, AudioOnlyStartAfterSeekFinish) {
EXPECT_EQ(1, demuxer_->num_seek_requests());
player_.Start();
- EXPECT_FALSE(GetMediaDecoderJob(true));
EXPECT_EQ(0, demuxer_->num_data_requests());
// Sending back the seek done notification.
player_.OnDemuxerSeekDone(kNoTimestamp());
- EXPECT_TRUE(GetMediaDecoderJob(true));
+ EXPECT_FALSE(GetMediaCodecBridge(true));
EXPECT_EQ(1, demuxer_->num_data_requests());
// Reconfirm exactly 1 seek request has been made of demuxer.
EXPECT_EQ(1, demuxer_->num_seek_requests());
+
+ // Decoder is created after data is received.
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
+ EXPECT_TRUE(GetMediaCodecBridge(true));
}
TEST_F(MediaSourcePlayerTest, VideoOnlyStartAfterSeekFinish) {
@@ -884,9 +1085,8 @@ TEST_F(MediaSourcePlayerTest, VideoOnlyStartAfterSeekFinish) {
// Test video decoder job will not start until pending seek event is handled.
CreateNextTextureAndSetVideoSurface();
- DemuxerConfigs configs = CreateVideoDemuxerConfigs();
+ DemuxerConfigs configs = CreateVideoDemuxerConfigs(false);
player_.OnDemuxerConfigsAvailable(configs);
- EXPECT_FALSE(GetMediaDecoderJob(false));
// Initiate a seek. Skip requesting element seek of renderer.
// Instead behave as if the renderer has asked us to seek.
@@ -894,16 +1094,19 @@ TEST_F(MediaSourcePlayerTest, VideoOnlyStartAfterSeekFinish) {
EXPECT_EQ(1, demuxer_->num_seek_requests());
player_.Start();
- EXPECT_FALSE(GetMediaDecoderJob(false));
EXPECT_EQ(0, demuxer_->num_data_requests());
// Sending back the seek done notification.
player_.OnDemuxerSeekDone(kNoTimestamp());
- EXPECT_TRUE(GetMediaDecoderJob(false));
+ EXPECT_FALSE(GetMediaCodecBridge(false));
EXPECT_EQ(1, demuxer_->num_data_requests());
// Reconfirm exactly 1 seek request has been made of demuxer.
EXPECT_EQ(1, demuxer_->num_seek_requests());
+
+ // Decoder is created after data is received.
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+ EXPECT_TRUE(GetMediaCodecBridge(false));
}
TEST_F(MediaSourcePlayerTest, StartImmediatelyAfterPause) {
@@ -911,7 +1114,7 @@ TEST_F(MediaSourcePlayerTest, StartImmediatelyAfterPause) {
// Test that if the decoding job is not fully stopped after Pause(),
// calling Start() will be a noop.
- StartAudioDecoderJob(true);
+ StartAudioDecoderJob();
MediaDecoderJob* decoder_job = GetMediaDecoderJob(true);
EXPECT_FALSE(GetMediaDecoderJob(true)->is_decoding());
@@ -919,6 +1122,7 @@ TEST_F(MediaSourcePlayerTest, StartImmediatelyAfterPause) {
// Sending data to player.
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+ EXPECT_EQ(2, demuxer_->num_data_requests());
// Decoder job will not immediately stop after Pause() since it is
// running on another thread.
@@ -927,23 +1131,23 @@ TEST_F(MediaSourcePlayerTest, StartImmediatelyAfterPause) {
// Nothing happens when calling Start() again.
player_.Start();
- // Verify that Start() will not destroy and recreate the decoder job.
+ // Verify that Start() will not destroy and recreate the media codec bridge.
EXPECT_EQ(decoder_job, GetMediaDecoderJob(true));
- EXPECT_EQ(1, demuxer_->num_data_requests());
- EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
- message_loop_.Run();
- // The decoder job should finish and a new request will be sent.
+
+ while (GetMediaDecoderJob(true)->is_decoding())
+ message_loop_.RunUntilIdle();
+ // The decoder job should finish and wait for data.
EXPECT_EQ(2, demuxer_->num_data_requests());
- EXPECT_FALSE(GetMediaDecoderJob(true)->is_decoding());
+ EXPECT_TRUE(IsRequestingDemuxerData(true));
}
TEST_F(MediaSourcePlayerTest, DecoderJobsCannotStartWithoutAudio) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
- // Test that when Start() is called, video decoder jobs will wait for audio
+ // Test that when Start() is called, video decoder job will wait for audio
// decoder job before start decoding the data.
CreateNextTextureAndSetVideoSurface();
- Start(CreateAudioVideoDemuxerConfigs(), true);
+ Start(CreateAudioVideoDemuxerConfigs());
MediaDecoderJob* audio_decoder_job = GetMediaDecoderJob(true);
MediaDecoderJob* video_decoder_job = GetMediaDecoderJob(false);
@@ -967,45 +1171,26 @@ TEST_F(MediaSourcePlayerTest, StartTimeTicksResetAfterDecoderUnderruns) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
// Test start time ticks will reset after decoder job underruns.
- StartAudioDecoderJob(true);
+ StartAudioDecoderJob();
- // For the first couple chunks, the decoder job may return
- // DECODE_FORMAT_CHANGED status instead of DECODE_SUCCEEDED status. Decode
- // more frames to guarantee that DECODE_SUCCEEDED will be returned.
- for (int i = 0; i < 4; ++i) {
- player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(i));
- EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
- message_loop_.Run();
- }
+ DecodeAudioDataUntilOutputBecomesAvailable();
// The decoder job should finish and a new request will be sent.
- EXPECT_EQ(5, demuxer_->num_data_requests());
- EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
base::TimeTicks previous = StartTimeTicks();
-
- // Let the decoder timeout and execute the OnDecoderStarved() callback.
- base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(100));
-
- EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
- EXPECT_TRUE(StartTimeTicks() != base::TimeTicks());
- message_loop_.RunUntilIdle();
-
- // Send new data to the decoder so it can finish the currently
- // pending decode.
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(3));
- WaitForAudioDecodeDone();
- // Verify the start time ticks is cleared at this point because the
- // player is prefetching.
- EXPECT_TRUE(StartTimeTicks() == base::TimeTicks());
+ // Let the decoder starve.
+ TriggerPlayerStarvation();
+ WaitForAudioDecodeDone();
+ EXPECT_TRUE(StartTimeTicks() == previous);
// Send new data to the decoder so it can finish prefetching. This should
// reset the start time ticks.
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(3));
- EXPECT_TRUE(StartTimeTicks() != base::TimeTicks());
+ EXPECT_TRUE(StartTimeTicks() != previous);
base::TimeTicks current = StartTimeTicks();
- EXPECT_LE(100.0, (current - previous).InMillisecondsF());
+ EXPECT_LE(0, (current - previous).InMillisecondsF());
}
TEST_F(MediaSourcePlayerTest, V_SecondAccessUnitIsEOSAndResumePlayAfterSeek) {
@@ -1013,11 +1198,11 @@ TEST_F(MediaSourcePlayerTest, V_SecondAccessUnitIsEOSAndResumePlayAfterSeek) {
// Test MediaSourcePlayer can replay video after input EOS is reached.
CreateNextTextureAndSetVideoSurface();
- StartVideoDecoderJob(true);
+ StartVideoDecoderJob();
// Send the first input chunk.
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
- message_loop_.Run();
+ WaitForVideoDecodeDone();
VerifyPlaybackCompletesOnEOSDecode(true, false);
VerifyCompletedPlaybackResumesOnSeekPlusStart(false, true);
@@ -1030,7 +1215,7 @@ TEST_F(MediaSourcePlayerTest, A_FirstAccessUnitIsEOSAndResumePlayAfterSeek) {
// http://b/11696552.
// Also tests that seeking+Start() after completing audio playback resumes
// playback.
- Start(CreateAudioDemuxerConfigs(kCodecAAC), true);
+ Start(CreateAudioDemuxerConfigs(kCodecAAC, false));
VerifyPlaybackCompletesOnEOSDecode(true, true);
VerifyCompletedPlaybackResumesOnSeekPlusStart(true, false);
}
@@ -1042,7 +1227,7 @@ TEST_F(MediaSourcePlayerTest, V_FirstAccessUnitAfterSeekIsEOS) {
// decode (other than the simulated |kAborted| resulting from the seek
// process.)
CreateNextTextureAndSetVideoSurface();
- StartVideoDecoderJob(true);
+ StartVideoDecoderJob();
SeekPlayerWithAbort(false, base::TimeDelta());
VerifyPlaybackCompletesOnEOSDecode(true, false);
}
@@ -1053,7 +1238,7 @@ TEST_F(MediaSourcePlayerTest, A_FirstAccessUnitAfterSeekIsEOS) {
// Test decode of audio EOS buffer, just after seeking, without any prior
// decode (other than the simulated |kAborted| resulting from the seek
// process.) See also http://b/11696552.
- Start(CreateAudioDemuxerConfigs(kCodecAAC), true);
+ Start(CreateAudioDemuxerConfigs(kCodecAAC, false));
SeekPlayerWithAbort(true, base::TimeDelta());
VerifyPlaybackCompletesOnEOSDecode(true, true);
}
@@ -1066,18 +1251,15 @@ TEST_F(MediaSourcePlayerTest, AV_PlaybackCompletionAcrossConfigChange) {
// A/V playback.
// Also tests that seeking+Start() after completing playback resumes playback.
CreateNextTextureAndSetVideoSurface();
- Start(CreateAudioVideoDemuxerConfigs(), true);
+ Start(CreateAudioVideoDemuxerConfigs());
player_.OnDemuxerDataAvailable(CreateEOSAck(true)); // Audio EOS
- EXPECT_EQ(0, demuxer_->num_config_requests());
+ DemuxerConfigs configs = CreateVideoDemuxerConfigs(true);
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckWithConfigChanged(
- false, 0)); // Video |kConfigChanged| as first unit.
+ false, 0, configs)); // Video |kConfigChanged| as first unit.
WaitForAudioVideoDecodeDone();
- EXPECT_EQ(1, demuxer_->num_config_requests());
- EXPECT_EQ(2, demuxer_->num_data_requests());
- player_.OnDemuxerConfigsAvailable(CreateAudioVideoDemuxerConfigs());
EXPECT_EQ(3, demuxer_->num_data_requests());
// At no time after completing audio EOS decode, above, should the
@@ -1094,20 +1276,15 @@ TEST_F(MediaSourcePlayerTest, VA_PlaybackCompletionAcrossConfigChange) {
// A/V playback.
// Also tests that seeking+Start() after completing playback resumes playback.
CreateNextTextureAndSetVideoSurface();
- Start(CreateAudioVideoDemuxerConfigs(), true);
+ Start(CreateAudioVideoDemuxerConfigs());
player_.OnDemuxerDataAvailable(CreateEOSAck(false)); // Video EOS
- EXPECT_EQ(0, demuxer_->num_config_requests());
+ // Audio |kConfigChanged| as first unit.
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckWithConfigChanged(
- true, 0)); // Audio |kConfigChanged| as first unit.
+ true, 0, CreateAudioDemuxerConfigs(kCodecVorbis, false)));
WaitForAudioVideoDecodeDone();
- // TODO(wolenetz/qinmin): Prevent redundant demuxer config request and change
- // expectation to 1 here. See http://crbug.com/325528.
- EXPECT_EQ(2, demuxer_->num_config_requests());
- EXPECT_EQ(2, demuxer_->num_data_requests());
- player_.OnDemuxerConfigsAvailable(CreateAudioVideoDemuxerConfigs());
EXPECT_EQ(3, demuxer_->num_data_requests());
// At no time after completing video EOS decode, above, should the
@@ -1124,16 +1301,14 @@ TEST_F(MediaSourcePlayerTest, AV_NoPrefetchForFinishedVideoOnAudioStarvation) {
// and responding to that prefetch with EOS completes A/V playback, even if
// another starvation occurs during the latter EOS's decode.
CreateNextTextureAndSetVideoSurface();
- Start(CreateAudioVideoDemuxerConfigs(), true);
+ Start(CreateAudioVideoDemuxerConfigs());
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
player_.OnDemuxerDataAvailable(CreateEOSAck(false)); // Video EOS
// Wait until video EOS is processed and more data (assumed to be audio) is
// requested.
- while (demuxer_->num_data_requests() < 3)
- message_loop_.RunUntilIdle();
- WaitForVideoDecodeDone();
+ WaitForAudioVideoDecodeDone();
EXPECT_EQ(3, demuxer_->num_data_requests());
// Simulate decoder underrun to trigger prefetch while still decoding audio.
@@ -1146,7 +1321,6 @@ TEST_F(MediaSourcePlayerTest, AV_NoPrefetchForFinishedVideoOnAudioStarvation) {
// starvation was triggered.
WaitForAudioDecodeDone();
EXPECT_EQ(4, demuxer_->num_data_requests());
-
player_.OnDemuxerDataAvailable(CreateEOSAck(true)); // Audio EOS
EXPECT_FALSE(GetMediaDecoderJob(false)->is_decoding());
EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
@@ -1162,9 +1336,9 @@ TEST_F(MediaSourcePlayerTest, V_StarvationDuringEOSDecode) {
// Test that video-only playback completes without further data requested when
// starvation occurs during EOS decode.
CreateNextTextureAndSetVideoSurface();
- StartVideoDecoderJob(true);
+ StartVideoDecoderJob();
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
- message_loop_.Run();
+ WaitForVideoDecodeDone();
// Simulate decoder underrun to trigger prefetch while decoding EOS.
player_.OnDemuxerDataAvailable(CreateEOSAck(false)); // Video EOS
@@ -1178,9 +1352,9 @@ TEST_F(MediaSourcePlayerTest, A_StarvationDuringEOSDecode) {
// Test that audio-only playback completes without further data requested when
// starvation occurs during EOS decode.
- StartAudioDecoderJob(true);
+ StartAudioDecoderJob();
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
- message_loop_.Run();
+ WaitForAudioDecodeDone();
// Simulate decoder underrun to trigger prefetch while decoding EOS.
player_.OnDemuxerDataAvailable(CreateEOSAck(true)); // Audio EOS
@@ -1236,7 +1410,7 @@ TEST_F(MediaSourcePlayerTest, NoRequestForDataAfterAbort) {
// Test that the decoder will not request new data after receiving an aborted
// access unit.
- StartAudioDecoderJob(true);
+ StartAudioDecoderJob();
// Send an aborted access unit.
player_.OnDemuxerDataAvailable(CreateAbortedAck(true));
@@ -1255,12 +1429,12 @@ TEST_F(MediaSourcePlayerTest, DemuxerDataArrivesAfterRelease) {
// Test that the decoder should not crash if demuxer data arrives after
// Release().
- StartAudioDecoderJob(true);
+ StartAudioDecoderJob();
ReleasePlayer();
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
- // The decoder job should have been released.
+ // The media codec bridge should have been released.
EXPECT_FALSE(player_.IsPlaying());
// No further data should have been requested.
@@ -1279,34 +1453,33 @@ TEST_F(MediaSourcePlayerTest, BrowserSeek_RegularSeekPendsBrowserSeekDone) {
// Simulate renderer requesting a regular seek while browser seek in progress.
player_.SeekTo(base::TimeDelta());
- EXPECT_FALSE(GetMediaDecoderJob(false));
// Simulate browser seek is done. Confirm player requests the regular seek,
- // still has no video decoder job configured, and has not requested any
+ // still has no video codec configured, and has not requested any
// further data since the surface change event became pending in
// BrowserSeekPlayer().
EXPECT_EQ(1, demuxer_->num_seek_requests());
player_.OnDemuxerSeekDone(base::TimeDelta());
- EXPECT_FALSE(GetMediaDecoderJob(false));
EXPECT_EQ(2, demuxer_->num_seek_requests());
EXPECT_EQ(1, demuxer_->num_browser_seek_requests());
- EXPECT_EQ(1, demuxer_->num_data_requests());
// Simulate regular seek is done and confirm player requests more data for
- // new video decoder job.
+ // new video codec.
player_.OnDemuxerSeekDone(kNoTimestamp());
- EXPECT_TRUE(GetMediaDecoderJob(false));
- EXPECT_EQ(2, demuxer_->num_data_requests());
+ EXPECT_FALSE(GetMediaCodecBridge(false));
+ EXPECT_EQ(3, demuxer_->num_data_requests());
EXPECT_EQ(2, demuxer_->num_seek_requests());
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+ EXPECT_TRUE(GetMediaCodecBridge(false));
}
-TEST_F(MediaSourcePlayerTest, NoSeekForInitialReleaseAndStart) {
+TEST_F(MediaSourcePlayerTest, BrowserSeek_InitialReleaseAndStart) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
- // Test that no seek is requested if player Release() + Start() occurs prior
- // to receiving any data.
+ // Test that no browser seek is requested if player Release() + Start() occurs
+ // prior to receiving any data.
CreateNextTextureAndSetVideoSurface();
- StartVideoDecoderJob(true);
+ StartVideoDecoderJob();
ReleasePlayer();
// Pass a new non-empty surface.
@@ -1314,12 +1487,14 @@ TEST_F(MediaSourcePlayerTest, NoSeekForInitialReleaseAndStart) {
player_.Start();
- // TODO(wolenetz/qinmin): Multiple in-flight data requests for same stream
- // should be prevented. See http://crbug.com/306314.
- EXPECT_EQ(2, demuxer_->num_data_requests());
- EXPECT_TRUE(GetMediaDecoderJob(false));
+ // No data request is issued since there is still one pending.
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+ EXPECT_FALSE(GetMediaCodecBridge(false));
- EXPECT_EQ(0, demuxer_->num_seek_requests());
+ // No browser seek is needed.
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+ EXPECT_EQ(0, demuxer_->num_browser_seek_requests());
+ EXPECT_EQ(2, demuxer_->num_data_requests());
}
TEST_F(MediaSourcePlayerTest, BrowserSeek_MidStreamReleaseAndStart) {
@@ -1328,12 +1503,10 @@ TEST_F(MediaSourcePlayerTest, BrowserSeek_MidStreamReleaseAndStart) {
// Test that one browser seek is requested if player Release() + Start(), with
// video data received between Release() and Start().
BrowserSeekPlayer(true);
- EXPECT_EQ(1, demuxer_->num_data_requests());
// Simulate browser seek is done and confirm player requests more data.
player_.OnDemuxerSeekDone(base::TimeDelta());
- EXPECT_TRUE(GetMediaDecoderJob(false));
- EXPECT_EQ(2, demuxer_->num_data_requests());
+ EXPECT_EQ(3, demuxer_->num_data_requests());
EXPECT_EQ(1, demuxer_->num_seek_requests());
}
@@ -1341,28 +1514,12 @@ TEST_F(MediaSourcePlayerTest, PrerollAudioAfterSeek) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
// Test decoder job will preroll the media to the seek position.
- StartAudioDecoderJob(true);
+ StartAudioDecoderJob();
SeekPlayerWithAbort(true, base::TimeDelta::FromMilliseconds(100));
EXPECT_TRUE(IsPrerolling(true));
- EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
-
- // Send some data before the seek position.
- for (int i = 1; i < 4; ++i) {
- player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(i));
- EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
- message_loop_.Run();
- }
- EXPECT_EQ(100.0, player_.GetCurrentTime().InMillisecondsF());
- EXPECT_TRUE(IsPrerolling(true));
-
- // Send data after the seek position.
- DemuxerData data = CreateReadFromDemuxerAckForAudio(3);
- data.access_units[0].timestamp = base::TimeDelta::FromMilliseconds(100);
- player_.OnDemuxerDataAvailable(data);
- message_loop_.Run();
- EXPECT_LT(100.0, player_.GetCurrentTime().InMillisecondsF());
- EXPECT_FALSE(IsPrerolling(true));
+ PrerollDecoderToTime(
+ true, base::TimeDelta(), base::TimeDelta::FromMilliseconds(100));
}
TEST_F(MediaSourcePlayerTest, PrerollVideoAfterSeek) {
@@ -1370,36 +1527,12 @@ TEST_F(MediaSourcePlayerTest, PrerollVideoAfterSeek) {
// Test decoder job will preroll the media to the seek position.
CreateNextTextureAndSetVideoSurface();
- StartVideoDecoderJob(true);
+ StartVideoDecoderJob();
SeekPlayerWithAbort(false, base::TimeDelta::FromMilliseconds(100));
EXPECT_TRUE(IsPrerolling(false));
- EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
-
- // Send some data before the seek position.
- DemuxerData data;
- for (int i = 1; i < 4; ++i) {
- data = CreateReadFromDemuxerAckForVideo();
- data.access_units[0].timestamp = base::TimeDelta::FromMilliseconds(i * 30);
- player_.OnDemuxerDataAvailable(data);
- EXPECT_TRUE(GetMediaDecoderJob(false)->is_decoding());
- message_loop_.Run();
- }
- EXPECT_EQ(100.0, player_.GetCurrentTime().InMillisecondsF());
- EXPECT_TRUE(IsPrerolling(false));
-
- // Send data at the seek position.
- data = CreateReadFromDemuxerAckForVideo();
- data.access_units[0].timestamp = base::TimeDelta::FromMilliseconds(100);
- player_.OnDemuxerDataAvailable(data);
- message_loop_.Run();
-
- // TODO(wolenetz/qinmin): Player's maintenance of current time for video-only
- // streams depends on decoder output, which may be initially inaccurate, and
- // encoded video test data may also need updating. Verify at least that AU
- // timestamp-based preroll logic has determined video preroll has completed.
- // See http://crbug.com/310823 and http://b/11356652.
- EXPECT_FALSE(IsPrerolling(false));
+ PrerollDecoderToTime(
+ false, base::TimeDelta(), base::TimeDelta::FromMilliseconds(100));
}
TEST_F(MediaSourcePlayerTest, SeekingAfterCompletingPrerollRestartsPreroll) {
@@ -1407,17 +1540,12 @@ TEST_F(MediaSourcePlayerTest, SeekingAfterCompletingPrerollRestartsPreroll) {
// Test decoder job will begin prerolling upon seek, when it was not
// prerolling prior to the seek.
- StartAudioDecoderJob(true);
+ StartAudioDecoderJob();
MediaDecoderJob* decoder_job = GetMediaDecoderJob(true);
EXPECT_TRUE(IsPrerolling(true));
// Complete the initial preroll by feeding data to the decoder.
- for (int i = 0; i < 4; ++i) {
- player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(i));
- EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
- message_loop_.Run();
- }
- EXPECT_LT(0.0, player_.GetCurrentTime().InMillisecondsF());
+ DecodeAudioDataUntilOutputBecomesAvailable();
EXPECT_FALSE(IsPrerolling(true));
SeekPlayerWithAbort(true, base::TimeDelta::FromMilliseconds(500));
@@ -1433,13 +1561,13 @@ TEST_F(MediaSourcePlayerTest, SeekingAfterCompletingPrerollRestartsPreroll) {
500 + 30 * (i - 1));
player_.OnDemuxerDataAvailable(data);
EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
- message_loop_.Run();
+ WaitForAudioDecodeDone();
}
EXPECT_LT(500.0, player_.GetCurrentTime().InMillisecondsF());
EXPECT_FALSE(IsPrerolling(true));
- // Throughout this test, we should have not re-created the decoder job, so
- // IsPrerolling() transition from false to true was not due to constructor
+ // Throughout this test, we should have not re-created the media codec bridge,
+ // so IsPrerolling() transition from false to true was not due to constructor
// initialization. It was due to BeginPrerolling().
EXPECT_EQ(decoder_job, GetMediaDecoderJob(true));
}
@@ -1449,9 +1577,10 @@ TEST_F(MediaSourcePlayerTest, PrerollContinuesAcrossReleaseAndStart) {
// Test decoder job will resume media prerolling if interrupted by Release()
// and Start().
- StartAudioDecoderJob(true);
+ StartAudioDecoderJob();
- SeekPlayerWithAbort(true, base::TimeDelta::FromMilliseconds(100));
+ base::TimeDelta target_timestamp = base::TimeDelta::FromMilliseconds(100);
+ SeekPlayerWithAbort(true, target_timestamp);
EXPECT_TRUE(IsPrerolling(true));
EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
@@ -1466,23 +1595,14 @@ TEST_F(MediaSourcePlayerTest, PrerollContinuesAcrossReleaseAndStart) {
data.access_units[0].timestamp = base::TimeDelta::FromMilliseconds(i * 10);
if (i == 1) {
// While still prerolling, Release() and Start() the player.
- // TODO(qinmin): Simulation of multiple in-flight data requests (one from
- // before Release(), one from after Start()) is not included here, and
- // neither is any data enqueued for later decode if it arrives after
- // Release() and before Start(). See http://crbug.com/306314. Assumption
- // for this test, to prevent flakiness until the bug is fixed, is the
- // first request's data arrives before Start(). Though that data is not
- // seen by decoder, this assumption allows preroll continuation
- // verification and prevents multiple in-flight data requests.
ReleasePlayer();
- player_.OnDemuxerDataAvailable(data);
- message_loop_.RunUntilIdle();
- EXPECT_FALSE(GetMediaDecoderJob(true));
- StartAudioDecoderJob(true);
+ // The decoder is still decoding and will not be immediately released.
+ EXPECT_TRUE(GetMediaCodecBridge(true));
+ Resume(false, false);
} else {
player_.OnDemuxerDataAvailable(data);
EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
- message_loop_.Run();
+ WaitForAudioDecodeDone();
}
EXPECT_TRUE(IsPrerolling(true));
}
@@ -1490,12 +1610,7 @@ TEST_F(MediaSourcePlayerTest, PrerollContinuesAcrossReleaseAndStart) {
EXPECT_TRUE(IsPrerolling(true));
// Send data after the seek position.
- data = CreateReadFromDemuxerAckForAudio(3);
- data.access_units[0].timestamp = base::TimeDelta::FromMilliseconds(100);
- player_.OnDemuxerDataAvailable(data);
- message_loop_.Run();
- EXPECT_LT(100.0, player_.GetCurrentTime().InMillisecondsF());
- EXPECT_FALSE(IsPrerolling(true));
+ PrerollDecoderToTime(true, target_timestamp, target_timestamp);
}
TEST_F(MediaSourcePlayerTest, PrerollContinuesAcrossConfigChange) {
@@ -1503,39 +1618,43 @@ TEST_F(MediaSourcePlayerTest, PrerollContinuesAcrossConfigChange) {
// Test decoder job will resume media prerolling if interrupted by
// |kConfigChanged| and OnDemuxerConfigsAvailable().
- StartAudioDecoderJob(true);
+ StartAudioDecoderJob();
SeekPlayerWithAbort(true, base::TimeDelta::FromMilliseconds(100));
EXPECT_TRUE(IsPrerolling(true));
EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
+ DemuxerConfigs configs = CreateAudioDemuxerConfigs(kCodecVorbis, true);
+
// In response to data request, simulate that demuxer signals config change by
- // sending an AU with |kConfigChanged|. Player should prepare to reconfigure
- // the audio decoder job, and should request new demuxer configs.
- DemuxerData data = CreateReadFromDemuxerAckWithConfigChanged(true, 0);
- EXPECT_EQ(0, demuxer_->num_config_requests());
+ // sending an AU with |kConfigChanged|.
+ DemuxerData data = CreateReadFromDemuxerAckWithConfigChanged(
+ true, 0, configs);
player_.OnDemuxerDataAvailable(data);
- EXPECT_EQ(1, demuxer_->num_config_requests());
+ PrerollDecoderToTime(
+ true, base::TimeDelta(), base::TimeDelta::FromMilliseconds(100));
+}
- // Simulate arrival of new configs.
- player_.OnDemuxerConfigsAvailable(CreateAudioDemuxerConfigs(kCodecVorbis));
+TEST_F(MediaSourcePlayerTest, PrerollContinuesAfterUnchangedConfigs) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
- // Send some data before the seek position.
- for (int i = 1; i < 4; ++i) {
- player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(i));
- EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
- message_loop_.Run();
- }
- EXPECT_EQ(100.0, player_.GetCurrentTime().InMillisecondsF());
+ // Test decoder job will resume media prerolling if interrupted by a config
+ // change access unit with unchanged configs.
+ StartAudioDecoderJob();
+
+ SeekPlayerWithAbort(true, base::TimeDelta::FromMilliseconds(100));
EXPECT_TRUE(IsPrerolling(true));
+ EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
- // Send data after the seek position.
- data = CreateReadFromDemuxerAckForAudio(3);
- data.access_units[0].timestamp = base::TimeDelta::FromMilliseconds(100);
+ DemuxerConfigs configs = CreateAudioDemuxerConfigs(kCodecVorbis, false);
+
+ // In response to data request, simulate that demuxer signals config change by
+ // sending an AU with |kConfigChanged|.
+ DemuxerData data = CreateReadFromDemuxerAckWithConfigChanged(
+ true, 0, configs);
player_.OnDemuxerDataAvailable(data);
- message_loop_.Run();
- EXPECT_LT(100.0, player_.GetCurrentTime().InMillisecondsF());
- EXPECT_FALSE(IsPrerolling(true));
+ PrerollDecoderToTime(
+ true, base::TimeDelta(), base::TimeDelta::FromMilliseconds(100));
}
TEST_F(MediaSourcePlayerTest, SimultaneousAudioVideoConfigChange) {
@@ -1545,35 +1664,63 @@ TEST_F(MediaSourcePlayerTest, SimultaneousAudioVideoConfigChange) {
// such as might occur during OnPrefetchDone() if next access unit for both
// audio and video jobs is |kConfigChanged|.
CreateNextTextureAndSetVideoSurface();
- Start(CreateAudioVideoDemuxerConfigs(), true);
- MediaDecoderJob* first_audio_job = GetMediaDecoderJob(true);
- MediaDecoderJob* first_video_job = GetMediaDecoderJob(false);
+ Start(CreateAudioVideoDemuxerConfigs());
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+ EXPECT_TRUE(GetMediaCodecBridge(true));
+ EXPECT_TRUE(GetMediaCodecBridge(false));
+ EnableAdaptiveVideoPlayback(false);
+ WaitForAudioVideoDecodeDone();
// Simulate audio |kConfigChanged| prefetched as standalone access unit.
+ DemuxerConfigs audio_configs = CreateAudioDemuxerConfigs(kCodecVorbis, true);
player_.OnDemuxerDataAvailable(
- CreateReadFromDemuxerAckWithConfigChanged(true, 0));
- EXPECT_EQ(0, demuxer_->num_config_requests()); // No OnPrefetchDone() yet.
+ CreateReadFromDemuxerAckWithConfigChanged(true, 0, audio_configs));
// Simulate video |kConfigChanged| prefetched as standalone access unit.
player_.OnDemuxerDataAvailable(
- CreateReadFromDemuxerAckWithConfigChanged(false, 0));
- EXPECT_EQ(1, demuxer_->num_config_requests()); // OnPrefetchDone() occurred.
- EXPECT_EQ(2, demuxer_->num_data_requests()); // No more data requested yet.
+ CreateReadFromDemuxerAckWithConfigChanged(
+ false, 0, CreateVideoDemuxerConfigs(true)));
+ EXPECT_EQ(6, demuxer_->num_data_requests());
+ EXPECT_TRUE(IsDrainingDecoder(true));
+ EXPECT_TRUE(IsDrainingDecoder(false));
+
+ // Waiting for decoder to finish draining.
+ while (IsDrainingDecoder(true) || IsDrainingDecoder(false))
+ message_loop_.RunUntilIdle();
+}
- // No job re-creation should occur until the requested configs arrive.
- EXPECT_EQ(first_audio_job, GetMediaDecoderJob(true));
- EXPECT_EQ(first_video_job, GetMediaDecoderJob(false));
+TEST_F(MediaSourcePlayerTest,
+ SimultaneousAudioVideoConfigChangeWithAdaptivePlayback) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
- player_.OnDemuxerConfigsAvailable(CreateAudioVideoDemuxerConfigs());
- EXPECT_EQ(4, demuxer_->num_data_requests());
- MediaDecoderJob* second_audio_job = GetMediaDecoderJob(true);
- MediaDecoderJob* second_video_job = GetMediaDecoderJob(false);
- EXPECT_NE(first_audio_job, second_audio_job);
- EXPECT_NE(first_video_job, second_video_job);
- EXPECT_TRUE(second_audio_job && second_video_job);
-
- // Confirm no further demuxer configs requested.
- EXPECT_EQ(1, demuxer_->num_config_requests());
+ // Test that the player allows simultaneous audio and video config change with
+ // adaptive video playback enabled.
+ CreateNextTextureAndSetVideoSurface();
+ Start(CreateAudioVideoDemuxerConfigs());
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+ EXPECT_TRUE(GetMediaCodecBridge(true));
+ EXPECT_TRUE(GetMediaCodecBridge(false));
+ EnableAdaptiveVideoPlayback(true);
+ WaitForAudioVideoDecodeDone();
+
+ // Simulate audio |kConfigChanged| prefetched as standalone access unit.
+ DemuxerConfigs audio_configs = CreateAudioDemuxerConfigs(kCodecVorbis, true);
+ player_.OnDemuxerDataAvailable(
+ CreateReadFromDemuxerAckWithConfigChanged(true, 0, audio_configs));
+
+ // Simulate video |kConfigChanged| prefetched as standalone access unit.
+ player_.OnDemuxerDataAvailable(
+ CreateReadFromDemuxerAckWithConfigChanged(
+ false, 0, CreateVideoDemuxerConfigs(true)));
+ EXPECT_EQ(6, demuxer_->num_data_requests());
+ EXPECT_TRUE(IsDrainingDecoder(true));
+ EXPECT_FALSE(IsDrainingDecoder(false));
+
+ // Waiting for audio decoder to finish draining.
+ while (IsDrainingDecoder(true))
+ message_loop_.RunUntilIdle();
}
TEST_F(MediaSourcePlayerTest, DemuxerConfigRequestedIfInPrefetchUnit0) {
@@ -1583,7 +1730,7 @@ TEST_F(MediaSourcePlayerTest, DemuxerConfigRequestedIfInPrefetchUnit0) {
// the |kConfigChanged| unit is the very first unit in the set of units
// received in OnDemuxerDataAvailable() ostensibly while
// |PREFETCH_DONE_EVENT_PENDING|.
- StartConfigChange(true, true, 0);
+ StartConfigChange(true, true, 0, false);
}
TEST_F(MediaSourcePlayerTest, DemuxerConfigRequestedIfInPrefetchUnit1) {
@@ -1593,7 +1740,7 @@ TEST_F(MediaSourcePlayerTest, DemuxerConfigRequestedIfInPrefetchUnit1) {
// the |kConfigChanged| unit is not the first unit in the set of units
// received in OnDemuxerDataAvailable() ostensibly while
// |PREFETCH_DONE_EVENT_PENDING|.
- StartConfigChange(true, true, 1);
+ StartConfigChange(true, true, 1, false);
}
TEST_F(MediaSourcePlayerTest, DemuxerConfigRequestedIfInUnit0AfterPrefetch) {
@@ -1603,7 +1750,7 @@ TEST_F(MediaSourcePlayerTest, DemuxerConfigRequestedIfInUnit0AfterPrefetch) {
// the |kConfigChanged| unit is the very first unit in the set of units
// received in OnDemuxerDataAvailable() from data requested ostensibly while
// not prefetching.
- StartConfigChange(true, false, 0);
+ StartConfigChange(true, false, 0, false);
}
TEST_F(MediaSourcePlayerTest, DemuxerConfigRequestedIfInUnit1AfterPrefetch) {
@@ -1613,7 +1760,7 @@ TEST_F(MediaSourcePlayerTest, DemuxerConfigRequestedIfInUnit1AfterPrefetch) {
// the |kConfigChanged| unit is not the first unit in the set of units
// received in OnDemuxerDataAvailable() from data requested ostensibly while
// not prefetching.
- StartConfigChange(true, false, 1);
+ StartConfigChange(true, false, 1, false);
}
TEST_F(MediaSourcePlayerTest, BrowserSeek_PrerollAfterBrowserSeek) {
@@ -1626,126 +1773,109 @@ TEST_F(MediaSourcePlayerTest, BrowserSeek_PrerollAfterBrowserSeek) {
// Simulate browser seek is done, but to a later time than was requested.
EXPECT_LT(player_.GetCurrentTime().InMillisecondsF(), 100);
player_.OnDemuxerSeekDone(base::TimeDelta::FromMilliseconds(100));
- EXPECT_TRUE(GetMediaDecoderJob(false));
+ // Because next AU is not I-frame, MediaCodecBridge will not be recreated.
+ EXPECT_FALSE(GetMediaCodecBridge(false));
EXPECT_EQ(100.0, player_.GetCurrentTime().InMillisecondsF());
EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
- EXPECT_EQ(2, demuxer_->num_data_requests());
-
- // Send some data with access unit timestamps before the actual browser seek
- // position. This is a bit unrealistic in this case where the browser seek
- // jumped forward and next data from demuxer would normally begin at this
- // browser seek position, immediately completing preroll. For simplicity and
- // coverage, this test simulates the more common condition that AUs received
- // after browser seek begin with timestamps before the seek target, and don't
- // immediately complete preroll.
- DemuxerData data;
- for (int i = 1; i < 4; ++i) {
- data = CreateReadFromDemuxerAckForVideo();
- data.access_units[0].timestamp = base::TimeDelta::FromMilliseconds(i * 30);
- player_.OnDemuxerDataAvailable(data);
- EXPECT_TRUE(GetMediaDecoderJob(false)->is_decoding());
- message_loop_.Run();
- EXPECT_TRUE(IsPrerolling(false));
- }
-
- EXPECT_EQ(100.0, player_.GetCurrentTime().InMillisecondsF());
+ EXPECT_EQ(3, demuxer_->num_data_requests());
- // Send data after the browser seek position.
- data = CreateReadFromDemuxerAckForVideo();
- data.access_units[0].timestamp = base::TimeDelta::FromMilliseconds(120);
- player_.OnDemuxerDataAvailable(data);
- message_loop_.Run();
- EXPECT_FALSE(IsPrerolling(false));
+ PrerollDecoderToTime(
+ false, base::TimeDelta(), base::TimeDelta::FromMilliseconds(100));
}
TEST_F(MediaSourcePlayerTest, VideoDemuxerConfigChange) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
- // Test that video config change notification results in request for demuxer
- // configuration, and that a video decoder job results without any browser
- // seek necessary once the new demuxer config arrives.
- StartConfigChange(false, true, 1);
- MediaDecoderJob* first_job = GetMediaDecoderJob(false);
- EXPECT_TRUE(first_job);
- EXPECT_EQ(1, demuxer_->num_data_requests());
- EXPECT_EQ(1, demuxer_->num_config_requests());
-
- // Simulate arrival of new configs.
- player_.OnDemuxerConfigsAvailable(CreateVideoDemuxerConfigs());
+ // Test that video config change notification results in creating a new
+ // video codec without any browser seek.
+ StartConfigChange(false, true, 1, false);
- // New video decoder job should have been created and configured, without any
+ // New video codec should have been created and configured, without any
// browser seek.
- MediaDecoderJob* second_job = GetMediaDecoderJob(false);
- EXPECT_TRUE(second_job);
- EXPECT_NE(first_job, second_job);
- EXPECT_EQ(2, demuxer_->num_data_requests());
- EXPECT_EQ(1, demuxer_->num_config_requests());
+ EXPECT_TRUE(GetMediaCodecBridge(false));
+ EXPECT_EQ(3, demuxer_->num_data_requests());
EXPECT_EQ(0, demuxer_->num_seek_requests());
+
+ // 2 codecs should have been created, one before the config change, and one
+ // after it.
+ EXPECT_EQ(2, manager_.num_resources_requested());
+ EXPECT_EQ(1, manager_.num_resources_released());
}
-TEST_F(MediaSourcePlayerTest, VideoConfigChangeContinuesAcrossSeek) {
+TEST_F(MediaSourcePlayerTest, VideoDemuxerConfigChangeWithAdaptivePlayback) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
- // Test if a demuxer config request is pending (due to previously receiving
- // |kConfigChanged|), and a seek request arrives prior to demuxer configs,
- // then seek is processed first, followed by the decoder config change.
- // This assumes the demuxer sends |kConfigChanged| read response prior to
- // canceling any reads pending seek; no |kAborted| is involved in this test.
- StartConfigChange(false, false, 1);
- MediaDecoderJob* first_job = GetMediaDecoderJob(false);
- EXPECT_TRUE(first_job);
- EXPECT_EQ(1, demuxer_->num_config_requests());
- EXPECT_EQ(2, demuxer_->num_data_requests());
+ // Test that if codec supports adaptive playback, no new codec should be
+ // created beyond the one used to decode the prefetch media data prior to
+ // the kConfigChanged.
+ StartConfigChange(false, true, 1, true);
+
+ // No browser seek should be needed.
+ EXPECT_TRUE(GetMediaCodecBridge(false));
+ EXPECT_EQ(3, demuxer_->num_data_requests());
EXPECT_EQ(0, demuxer_->num_seek_requests());
+ // Only 1 codec should have been created so far.
+ EXPECT_EQ(1, manager_.num_resources_requested());
+}
+
+TEST_F(MediaSourcePlayerTest, DecoderDrainInterruptedBySeek) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test if a decoder is being drained while receiving a seek request, draining
+ // is canceled.
+ SendConfigChangeToDecoder(true, false, 0, false);
+ EXPECT_TRUE(IsDrainingDecoder(true));
+
player_.SeekTo(base::TimeDelta::FromMilliseconds(100));
+ WaitForAudioDecodeDone();
+ EXPECT_FALSE(IsDrainingDecoder(true));
+ player_.OnDemuxerSeekDone(kNoTimestamp());
- // Verify that the seek is requested immediately.
EXPECT_EQ(1, demuxer_->num_seek_requests());
+ EXPECT_EQ(4, demuxer_->num_data_requests());
+}
- // Simulate unlikely delayed arrival of the demuxer configs, completing the
- // config change.
- // TODO(wolenetz): Is it even possible for requested demuxer configs to be
- // delayed until after a SeekTo request arrives?
- player_.OnDemuxerConfigsAvailable(CreateVideoDemuxerConfigs());
+TEST_F(MediaSourcePlayerTest, DecoderDrainInterruptedByRelease) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
- MediaDecoderJob* second_job = GetMediaDecoderJob(false);
- EXPECT_NE(first_job, second_job);
- EXPECT_TRUE(second_job);
+ // Test if a decoder is being drained while receiving a release request,
+ // draining is canceled.
+ SendConfigChangeToDecoder(true, false, 0, false);
+ EXPECT_TRUE(IsDrainingDecoder(true));
- // Send back the seek done notification. This should finish the seek and
- // trigger the player to request more data.
- EXPECT_EQ(2, demuxer_->num_data_requests());
- player_.OnDemuxerSeekDone(kNoTimestamp());
+ ReleasePlayer();
+ WaitForAudioDecodeDone();
+ EXPECT_EQ(3, demuxer_->num_data_requests());
+ EXPECT_FALSE(IsDrainingDecoder(true));
+
+ EXPECT_FALSE(GetMediaCodecBridge(true));
+ EXPECT_FALSE(player_.IsPlaying());
+
+ player_.Start();
+ EXPECT_TRUE(player_.IsPlaying());
EXPECT_EQ(3, demuxer_->num_data_requests());
}
-TEST_F(MediaSourcePlayerTest, NewSurfaceWhileChangingConfigs) {
+TEST_F(MediaSourcePlayerTest, DecoderDrainInterruptedBySurfaceChange) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
- // Test that no seek or duplicated demuxer config request results from a
- // SetVideoSurface() that occurs while the player is expecting new demuxer
- // configs. This test may be good to keep beyond browser seek hack.
- StartConfigChange(false, false, 1);
- MediaDecoderJob* first_job = GetMediaDecoderJob(false);
- EXPECT_TRUE(first_job);
- EXPECT_EQ(1, demuxer_->num_config_requests());
- EXPECT_EQ(2, demuxer_->num_data_requests());
+ // Test if a video decoder is being drained while surface changes, draining
+ // is canceled.
+ SendConfigChangeToDecoder(false, false, 0, false);
+ EXPECT_TRUE(IsDrainingDecoder(false));
CreateNextTextureAndSetVideoSurface();
+ WaitForVideoDecodeDone();
- // Surface change processing (including decoder job re-creation) should
- // not occur until the pending video config change is completed.
- EXPECT_EQ(first_job, GetMediaDecoderJob(false));
-
- player_.OnDemuxerConfigsAvailable(CreateVideoDemuxerConfigs());
- MediaDecoderJob* second_job = GetMediaDecoderJob(false);
- EXPECT_NE(first_job, second_job);
- EXPECT_TRUE(second_job);
-
+ EXPECT_FALSE(IsDrainingDecoder(false));
+ EXPECT_FALSE(GetMediaCodecBridge(false));
+ EXPECT_TRUE(player_.IsPlaying());
EXPECT_EQ(3, demuxer_->num_data_requests());
- EXPECT_EQ(1, demuxer_->num_config_requests());
- EXPECT_EQ(0, demuxer_->num_seek_requests());
+
+ // Finish the browser seek introduced by surface change.
+ player_.OnDemuxerSeekDone(base::TimeDelta());
+ EXPECT_EQ(4, demuxer_->num_data_requests());
}
TEST_F(MediaSourcePlayerTest,
@@ -1755,7 +1885,7 @@ TEST_F(MediaSourcePlayerTest,
// Test video decoder starvation while handling a pending surface change
// should not cause any crashes.
CreateNextTextureAndSetVideoSurface();
- StartVideoDecoderJob(true);
+ StartVideoDecoderJob();
DemuxerData data = CreateReadFromDemuxerAckForVideo();
player_.OnDemuxerDataAvailable(data);
@@ -1763,14 +1893,18 @@ TEST_F(MediaSourcePlayerTest,
CreateNextTextureAndSetVideoSurface();
TriggerPlayerStarvation();
WaitForVideoDecodeDone();
+ EXPECT_EQ(0, demuxer_->num_browser_seek_requests());
// Surface change should trigger a seek.
+ player_.OnDemuxerDataAvailable(data);
EXPECT_EQ(1, demuxer_->num_browser_seek_requests());
player_.OnDemuxerSeekDone(base::TimeDelta());
- EXPECT_TRUE(GetMediaDecoderJob(false));
+ // After seek is done, prefetch is handled first. MediaCodecBridge is not
+ // created at this moment.
+ EXPECT_FALSE(GetMediaCodecBridge(false));
// A new data request should be sent.
- EXPECT_EQ(2, demuxer_->num_data_requests());
+ EXPECT_EQ(3, demuxer_->num_data_requests());
}
TEST_F(MediaSourcePlayerTest, ReleaseWithOnPrefetchDoneAlreadyPosted) {
@@ -1782,17 +1916,18 @@ TEST_F(MediaSourcePlayerTest, ReleaseWithOnPrefetchDoneAlreadyPosted) {
// is posted to run |prefetch_cb| if the job already HasData().
// TODO(wolenetz): Remove MSP::set_decode_callback_for_testing() if this test
// becomes obsolete. See http://crbug.com/304234.
- StartAudioDecoderJob(true);
+ StartAudioDecoderJob();
// Escape the original prefetch by decoding a single access unit.
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
- message_loop_.Run();
+ WaitForAudioDecodeDone();
// Prime the job with a few more access units, so that a later prefetch,
// triggered by starvation to simulate decoder underrun, can trivially
// post task to run OnPrefetchDone().
player_.OnDemuxerDataAvailable(
- CreateReadFromDemuxerAckWithConfigChanged(true, 4));
+ CreateReadFromDemuxerAckWithConfigChanged(
+ true, 4, CreateAudioDemuxerConfigs(kCodecVorbis, false)));
EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
// Simulate decoder underrun, so trivial prefetch starts while still decoding.
@@ -1805,13 +1940,14 @@ TEST_F(MediaSourcePlayerTest, ReleaseWithOnPrefetchDoneAlreadyPosted) {
// occurs and should execute after the Release().
OnNextTestDecodeCallbackPostTaskToReleasePlayer();
- while (GetMediaDecoderJob(true))
- message_loop_.RunUntilIdle();
+ WaitForAudioDecodeDone();
EXPECT_TRUE(decoder_callback_hook_executed_);
- EXPECT_EQ(2, demuxer_->num_data_requests());
- // Player should have no decoder job until after Start().
- StartAudioDecoderJob(true);
+ EXPECT_EQ(3, demuxer_->num_data_requests());
+
+ // Player should not request any new data since the access units haven't
+ // been fully decoded yet.
+ Resume(false, false);
}
TEST_F(MediaSourcePlayerTest, SeekToThenReleaseThenDemuxerSeekAndDone) {
@@ -1824,16 +1960,18 @@ TEST_F(MediaSourcePlayerTest, SeekToThenReleaseThenDemuxerSeekAndDone) {
StartAudioDecoderJobAndSeekToWhileDecoding(
base::TimeDelta::FromMilliseconds(100));
ReleasePlayer();
+ EXPECT_EQ(0, demuxer_->num_seek_requests());
+ WaitForAudioDecodeDone();
EXPECT_EQ(1, demuxer_->num_seek_requests());
player_.OnDemuxerSeekDone(kNoTimestamp());
EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
- EXPECT_FALSE(GetMediaDecoderJob(true));
+ EXPECT_FALSE(GetMediaCodecBridge(true));
EXPECT_FALSE(player_.IsPlaying());
// Player should begin prefetch and resume preroll upon Start().
- EXPECT_EQ(1, demuxer_->num_data_requests());
- StartAudioDecoderJob(true);
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+ Resume(true, false);
EXPECT_TRUE(IsPrerolling(true));
EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
@@ -1852,18 +1990,20 @@ TEST_F(MediaSourcePlayerTest, SeekToThenReleaseThenDemuxerSeekThenStart) {
StartAudioDecoderJobAndSeekToWhileDecoding(
base::TimeDelta::FromMilliseconds(100));
ReleasePlayer();
- EXPECT_EQ(1, demuxer_->num_seek_requests());
+ EXPECT_EQ(0, demuxer_->num_seek_requests());
- // Player should not prefetch upon Start() nor create the decoder job, due to
- // awaiting DemuxerSeekDone.
- EXPECT_EQ(1, demuxer_->num_data_requests());
- StartAudioDecoderJob(false);
+ // Player should not prefetch upon Start() nor create the media codec bridge,
+ // due to awaiting DemuxerSeekDone.
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+ Resume(false, false);
+ WaitForAudioDecodeDone();
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
player_.OnDemuxerSeekDone(kNoTimestamp());
EXPECT_TRUE(GetMediaDecoderJob(true));
EXPECT_TRUE(IsPrerolling(true));
EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
- EXPECT_EQ(2, demuxer_->num_data_requests());
+ EXPECT_EQ(3, demuxer_->num_data_requests());
// No further seek should have been requested since Release(), above.
EXPECT_EQ(1, demuxer_->num_seek_requests());
@@ -1883,12 +2023,12 @@ TEST_F(MediaSourcePlayerTest, SeekToThenDemuxerSeekThenReleaseThenSeekDone) {
ReleasePlayer();
player_.OnDemuxerSeekDone(kNoTimestamp());
EXPECT_FALSE(player_.IsPlaying());
- EXPECT_FALSE(GetMediaDecoderJob(true));
+ EXPECT_FALSE(GetMediaCodecBridge(true));
EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
// Player should begin prefetch and resume preroll upon Start().
- EXPECT_EQ(1, demuxer_->num_data_requests());
- StartAudioDecoderJob(true);
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+ Resume(true, false);
EXPECT_TRUE(IsPrerolling(true));
EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
@@ -1909,64 +2049,42 @@ TEST_F(MediaSourcePlayerTest, SeekToThenReleaseThenStart) {
EXPECT_EQ(1, demuxer_->num_seek_requests());
ReleasePlayer();
- EXPECT_EQ(1, demuxer_->num_data_requests());
- StartAudioDecoderJob(false);
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+ Resume(false, false);
player_.OnDemuxerSeekDone(kNoTimestamp());
- EXPECT_TRUE(GetMediaDecoderJob(true));
+ EXPECT_FALSE(GetMediaCodecBridge(true));
EXPECT_TRUE(IsPrerolling(true));
EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
- EXPECT_EQ(2, demuxer_->num_data_requests());
+ EXPECT_EQ(3, demuxer_->num_data_requests());
// No further seek should have been requested since before Release(), above.
EXPECT_EQ(1, demuxer_->num_seek_requests());
}
-TEST_F(MediaSourcePlayerTest, ConfigChangedThenReleaseThenConfigsAvailable) {
+TEST_F(MediaSourcePlayerTest, ConfigChangedThenReleaseThenStart) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
- // Test if Release() occurs after |kConfigChanged| detected, new configs
- // requested of demuxer, and the requested configs arrive before the next
- // Start(), then the player completes the pending config change processing on
- // their receipt.
- StartConfigChange(true, true, 0);
+ // Test if Release() occurs after |kConfigChanged| is processed, new data
+ // requested of demuxer, and the requested data arrive before the next
+ // Start(), then the player starts to decode the new data without any seek.
+ StartConfigChange(true, true, 0, false);
ReleasePlayer();
- player_.OnDemuxerConfigsAvailable(CreateAudioDemuxerConfigs(kCodecVorbis));
- EXPECT_FALSE(GetMediaDecoderJob(true));
+ EXPECT_TRUE(GetMediaCodecBridge(true));
EXPECT_FALSE(player_.IsPlaying());
- EXPECT_EQ(1, demuxer_->num_data_requests());
+ EXPECT_EQ(3, demuxer_->num_data_requests());
+ player_.OnDemuxerDataAvailable(
+ CreateReadFromDemuxerAckWithConfigChanged(
+ true, 4, CreateAudioDemuxerConfigs(kCodecVorbis, false)));
+ WaitForAudioDecodeDone();
+ EXPECT_FALSE(GetMediaCodecBridge(true));
// Player should resume upon Start(), even without further configs supplied.
player_.Start();
- EXPECT_TRUE(GetMediaDecoderJob(true));
- EXPECT_TRUE(player_.IsPlaying());
- EXPECT_EQ(2, demuxer_->num_data_requests());
-
- // No further config request should have occurred since StartConfigChange().
- EXPECT_EQ(1, demuxer_->num_config_requests());
-}
-
-TEST_F(MediaSourcePlayerTest, ConfigChangedThenReleaseThenStart) {
- SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
-
- // Test if Release() occurs after |kConfigChanged| detected, new configs
- // requested of demuxer, and the requested configs arrive after the next
- // Start(), then the player pends job creation until the new configs arrive.
- StartConfigChange(true, true, 0);
- ReleasePlayer();
-
- player_.Start();
EXPECT_TRUE(player_.IsPlaying());
- EXPECT_FALSE(GetMediaDecoderJob(true));
- EXPECT_EQ(1, demuxer_->num_data_requests());
-
- player_.OnDemuxerConfigsAvailable(CreateAudioDemuxerConfigs(kCodecVorbis));
- EXPECT_TRUE(GetMediaDecoderJob(true));
- EXPECT_EQ(2, demuxer_->num_data_requests());
-
- // No further config request should have occurred since StartConfigChange().
- EXPECT_EQ(1, demuxer_->num_config_requests());
+ EXPECT_EQ(3, demuxer_->num_data_requests());
+ EXPECT_EQ(0, demuxer_->num_seek_requests());
}
TEST_F(MediaSourcePlayerTest, BrowserSeek_ThenReleaseThenDemuxerSeekDone) {
@@ -1982,13 +2100,13 @@ TEST_F(MediaSourcePlayerTest, BrowserSeek_ThenReleaseThenDemuxerSeekDone) {
player_.OnDemuxerSeekDone(expected_preroll_timestamp);
EXPECT_FALSE(player_.IsPlaying());
- EXPECT_FALSE(GetMediaDecoderJob(false));
+ EXPECT_FALSE(GetMediaCodecBridge(false));
EXPECT_EQ(expected_preroll_timestamp, GetPrerollTimestamp());
// Player should begin prefetch and resume preroll upon Start().
- EXPECT_EQ(1, demuxer_->num_data_requests());
+ EXPECT_EQ(2, demuxer_->num_data_requests());
CreateNextTextureAndSetVideoSurface();
- StartVideoDecoderJob(true);
+ Resume(false, true);
EXPECT_TRUE(IsPrerolling(false));
EXPECT_EQ(expected_preroll_timestamp, GetPrerollTimestamp());
EXPECT_EQ(expected_preroll_timestamp, player_.GetCurrentTime());
@@ -2009,19 +2127,25 @@ TEST_F(MediaSourcePlayerTest, BrowserSeek_ThenReleaseThenStart) {
base::TimeDelta expected_preroll_timestamp = player_.GetCurrentTime();
ReleasePlayer();
- EXPECT_EQ(1, demuxer_->num_data_requests());
+ EXPECT_EQ(2, demuxer_->num_data_requests());
CreateNextTextureAndSetVideoSurface();
- StartVideoDecoderJob(false);
+ Resume(false, false);
player_.OnDemuxerSeekDone(expected_preroll_timestamp);
- EXPECT_TRUE(GetMediaDecoderJob(false));
+ // Prefetch takes place first, and the decoder is not created yet.
+ EXPECT_FALSE(GetMediaCodecBridge(false));
EXPECT_TRUE(IsPrerolling(false));
EXPECT_EQ(expected_preroll_timestamp, GetPrerollTimestamp());
EXPECT_EQ(expected_preroll_timestamp, player_.GetCurrentTime());
- EXPECT_EQ(2, demuxer_->num_data_requests());
+ EXPECT_EQ(3, demuxer_->num_data_requests());
// No further seek should have been requested since BrowserSeekPlayer().
EXPECT_EQ(1, demuxer_->num_seek_requests());
+
+ // Decoder will be created once data is received.
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+ while (!GetMediaCodecBridge(false))
+ message_loop_.RunUntilIdle();
}
// TODO(xhwang): Once we add tests to cover DrmBridge, update this test to
@@ -2035,86 +2159,46 @@ TEST_F(MediaSourcePlayerTest, SurfaceChangeClearedEvenIfMediaCryptoAbsent) {
// Test that |SURFACE_CHANGE_EVENT_PENDING| is not pending after
// SetVideoSurface() for a player configured for encrypted video, when the
// player has not yet received media crypto.
- DemuxerConfigs configs = CreateVideoDemuxerConfigs();
+ DemuxerConfigs configs = CreateVideoDemuxerConfigs(false);
configs.is_video_encrypted = true;
player_.OnDemuxerConfigsAvailable(configs);
CreateNextTextureAndSetVideoSurface();
- EXPECT_FALSE(IsPendingSurfaceChange());
- EXPECT_FALSE(GetMediaDecoderJob(false));
+ EXPECT_FALSE(GetMediaCodecBridge(false));
}
-// TODO(xhwang): Enable this test when the test devices are updated.
-TEST_F(MediaSourcePlayerTest, DISABLED_IsTypeSupported_Widevine) {
- if (!MediaCodecBridge::IsAvailable() || !MediaDrmBridge::IsAvailable()) {
- VLOG(0) << "Could not run test - not supported on device.";
- return;
- }
-
- uint8 kWidevineUUID[] = { 0xED, 0xEF, 0x8B, 0xA9, 0x79, 0xD6, 0x4A, 0xCE,
- 0xA3, 0xC8, 0x27, 0xDC, 0xD5, 0x1D, 0x21, 0xED };
-
- std::vector<uint8> widevine_uuid(kWidevineUUID,
- kWidevineUUID + arraysize(kWidevineUUID));
-
- // We test "L3" fully. But for "L1" we don't check the result as it depend on
- // whether the test device supports "L1" decoding.
-
- std::vector<std::string> codec_avc(1, "avc1");
- std::vector<std::string> codec_aac(1, "mp4a");
- std::vector<std::string> codec_avc_aac(1, "avc1");
- codec_avc_aac.push_back("mp4a");
-
- EXPECT_TRUE(IsTypeSupported(widevine_uuid, "L3", kVideoMp4, codec_avc));
- IsTypeSupported(widevine_uuid, "L1", kVideoMp4, codec_avc);
-
- // TODO(xhwang): L1/L3 doesn't apply to audio, so the result is messy.
- // Clean this up after we have a solution to specifying decoding mode.
- EXPECT_TRUE(IsTypeSupported(widevine_uuid, "L3", kAudioMp4, codec_aac));
- IsTypeSupported(widevine_uuid, "L1", kAudioMp4, codec_aac);
-
- EXPECT_TRUE(IsTypeSupported(widevine_uuid, "L3", kVideoMp4, codec_avc_aac));
- IsTypeSupported(widevine_uuid, "L1", kVideoMp4, codec_avc_aac);
-
- std::vector<std::string> codec_vp8(1, "vp8");
- std::vector<std::string> codec_vorbis(1, "vorbis");
- std::vector<std::string> codec_vp8_vorbis(1, "vp8");
- codec_vp8_vorbis.push_back("vorbis");
+TEST_F(MediaSourcePlayerTest, CurrentTimeUpdatedWhileDecoderStarved) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
- // TODO(xhwang): WebM is actually not supported but currently
- // MediaDrmBridge.isCryptoSchemeSupported() doesn't check the container type.
- // Fix isCryptoSchemeSupported() and update this test as necessary.
- EXPECT_TRUE(IsTypeSupported(widevine_uuid, "L3", kVideoWebM, codec_vp8));
- IsTypeSupported(widevine_uuid, "L1", kVideoWebM, codec_vp8);
+ // Test that current time is updated while decoder is starved.
+ StartAudioDecoderJob();
+ DecodeAudioDataUntilOutputBecomesAvailable();
- // TODO(xhwang): L1/L3 doesn't apply to audio, so the result is messy.
- // Clean this up after we have a solution to specifying decoding mode.
- EXPECT_TRUE(IsTypeSupported(widevine_uuid, "L3", kAudioWebM, codec_vorbis));
- IsTypeSupported(widevine_uuid, "L1", kAudioWebM, codec_vorbis);
+ // Trigger starvation while the decoder is decoding.
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(3));
+ manager_.ResetTimestampUpdated();
+ TriggerPlayerStarvation();
+ WaitForAudioDecodeDone();
- EXPECT_TRUE(
- IsTypeSupported(widevine_uuid, "L3", kVideoWebM, codec_vp8_vorbis));
- IsTypeSupported(widevine_uuid, "L1", kVideoWebM, codec_vp8_vorbis);
+ // Current time should be updated.
+ EXPECT_TRUE(manager_.timestamp_updated());
}
-TEST_F(MediaSourcePlayerTest, IsTypeSupported_InvalidUUID) {
- if (!MediaCodecBridge::IsAvailable() || !MediaDrmBridge::IsAvailable()) {
- VLOG(0) << "Could not run test - not supported on device.";
- return;
- }
+TEST_F(MediaSourcePlayerTest, CurrentTimeKeepsIncreasingAfterConfigChange) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
- uint8 kInvalidUUID[] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
- 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF };
+ // Test current time keep on increasing after audio config change.
+ // Test that current time is updated while decoder is starved.
+ StartAudioDecoderJob();
- std::vector<uint8> invalid_uuid(kInvalidUUID,
- kInvalidUUID + arraysize(kInvalidUUID));
+ DecodeAudioDataUntilOutputBecomesAvailable();
- std::vector<std::string> codec_avc(1, "avc1");
- EXPECT_FALSE(IsTypeSupported(invalid_uuid, "L3", kVideoMp4, codec_avc));
- EXPECT_FALSE(IsTypeSupported(invalid_uuid, "L1", kVideoMp4, codec_avc));
+ DemuxerConfigs configs = CreateAudioDemuxerConfigs(kCodecVorbis, true);
+ DemuxerData data = CreateReadFromDemuxerAckWithConfigChanged(
+ true, 0, configs);
+ player_.OnDemuxerDataAvailable(data);
+ WaitForAudioDecodeDone();
+ DecodeAudioDataUntilOutputBecomesAvailable();
}
-// TODO(xhwang): Are these IsTypeSupported tests device specific?
-// TODO(xhwang): Add more IsTypeSupported tests.
-
} // namespace media
diff --git a/chromium/media/base/android/video_decoder_job.cc b/chromium/media/base/android/video_decoder_job.cc
index 75124e7d0d7..d4e5f1e2987 100644
--- a/chromium/media/base/android/video_decoder_job.cc
+++ b/chromium/media/base/android/video_decoder_job.cc
@@ -8,6 +8,7 @@
#include "base/lazy_instance.h"
#include "base/threading/thread.h"
#include "media/base/android/media_codec_bridge.h"
+#include "media/base/android/media_drm_bridge.h"
namespace media {
@@ -24,43 +25,137 @@ class VideoDecoderThread : public base::Thread {
base::LazyInstance<VideoDecoderThread>::Leaky
g_video_decoder_thread = LAZY_INSTANCE_INITIALIZER;
-VideoDecoderJob* VideoDecoderJob::Create(const VideoCodec video_codec,
- bool is_secure,
- const gfx::Size& size,
- jobject surface,
- jobject media_crypto,
- const base::Closure& request_data_cb) {
- scoped_ptr<VideoCodecBridge> codec(VideoCodecBridge::CreateDecoder(
- video_codec, is_secure, size, surface, media_crypto));
- if (codec)
- return new VideoDecoderJob(codec.Pass(), request_data_cb);
-
- LOG(ERROR) << "Failed to create VideoDecoderJob.";
- return NULL;
-}
-
VideoDecoderJob::VideoDecoderJob(
- scoped_ptr<VideoCodecBridge> video_codec_bridge,
- const base::Closure& request_data_cb)
+ const base::Closure& request_data_cb,
+ const base::Closure& request_resources_cb,
+ const base::Closure& release_resources_cb,
+ const base::Closure& on_demuxer_config_changed_cb)
: MediaDecoderJob(g_video_decoder_thread.Pointer()->message_loop_proxy(),
- video_codec_bridge.get(), request_data_cb),
- video_codec_bridge_(video_codec_bridge.Pass()) {
+ request_data_cb,
+ on_demuxer_config_changed_cb),
+ video_codec_(kUnknownVideoCodec),
+ width_(0),
+ height_(0),
+ request_resources_cb_(request_resources_cb),
+ release_resources_cb_(release_resources_cb),
+ next_video_data_is_iframe_(true) {
+}
+
+VideoDecoderJob::~VideoDecoderJob() {}
+
+bool VideoDecoderJob::SetVideoSurface(gfx::ScopedJavaSurface surface) {
+ // For an empty surface, always pass it to the |media_codec_bridge_| job so
+ // that it can detach from the current one. Otherwise, don't pass an
+ // unprotected surface if the video content requires a protected one.
+ if (!surface.IsEmpty() && IsProtectedSurfaceRequired() &&
+ !surface.is_protected()) {
+ return false;
+ }
+
+ surface_ = surface.Pass();
+ need_to_reconfig_decoder_job_ = true;
+ return true;
+}
+
+bool VideoDecoderJob::HasStream() const {
+ return video_codec_ != kUnknownVideoCodec;
}
-VideoDecoderJob::~VideoDecoderJob() {
+void VideoDecoderJob::Flush() {
+ MediaDecoderJob::Flush();
+ next_video_data_is_iframe_ = true;
+}
+
+void VideoDecoderJob::ReleaseDecoderResources() {
+ MediaDecoderJob::ReleaseDecoderResources();
+ surface_ = gfx::ScopedJavaSurface();
}
void VideoDecoderJob::ReleaseOutputBuffer(
int output_buffer_index,
size_t size,
bool render_output,
+ base::TimeDelta current_presentation_timestamp,
const ReleaseOutputCompletionCallback& callback) {
- video_codec_bridge_->ReleaseOutputBuffer(output_buffer_index, render_output);
- callback.Run(0u);
+ media_codec_bridge_->ReleaseOutputBuffer(output_buffer_index, render_output);
+ callback.Run(current_presentation_timestamp, current_presentation_timestamp);
}
bool VideoDecoderJob::ComputeTimeToRender() const {
return true;
}
+void VideoDecoderJob::UpdateDemuxerConfigs(const DemuxerConfigs& configs) {
+ video_codec_ = configs.video_codec;
+ width_ = configs.video_size.width();
+ height_ = configs.video_size.height();
+ set_is_content_encrypted(configs.is_video_encrypted);
+}
+
+bool VideoDecoderJob::IsCodecReconfigureNeeded(
+ const DemuxerConfigs& configs) const {
+ if (!media_codec_bridge_)
+ return true;
+
+ if (!AreDemuxerConfigsChanged(configs))
+ return false;
+
+ bool only_size_changed = false;
+ if (video_codec_ == configs.video_codec &&
+ is_content_encrypted() == configs.is_video_encrypted) {
+ only_size_changed = true;
+ }
+
+ return !only_size_changed ||
+ !static_cast<VideoCodecBridge*>(media_codec_bridge_.get())->
+ IsAdaptivePlaybackSupported(configs.video_size.width(),
+ configs.video_size.height());
+}
+
+bool VideoDecoderJob::AreDemuxerConfigsChanged(
+ const DemuxerConfigs& configs) const {
+ return video_codec_ != configs.video_codec ||
+ is_content_encrypted() != configs.is_video_encrypted ||
+ width_ != configs.video_size.width() ||
+ height_ != configs.video_size.height();
+}
+
+bool VideoDecoderJob::CreateMediaCodecBridgeInternal() {
+ if (surface_.IsEmpty()) {
+ ReleaseMediaCodecBridge();
+ return false;
+ }
+
+ // If the next data is not iframe, return false so that the player need to
+ // perform a browser seek.
+ if (!next_video_data_is_iframe_)
+ return false;
+
+ bool is_secure = is_content_encrypted() && drm_bridge() &&
+ drm_bridge()->IsProtectedSurfaceRequired();
+
+ media_codec_bridge_.reset(VideoCodecBridge::CreateDecoder(
+ video_codec_, is_secure, gfx::Size(width_, height_),
+ surface_.j_surface().obj(), GetMediaCrypto().obj()));
+
+ if (!media_codec_bridge_)
+ return false;
+
+ request_resources_cb_.Run();
+ return true;
+}
+
+void VideoDecoderJob::CurrentDataConsumed(bool is_config_change) {
+ next_video_data_is_iframe_ = is_config_change;
+}
+
+void VideoDecoderJob::OnMediaCodecBridgeReleased() {
+ release_resources_cb_.Run();
+}
+
+bool VideoDecoderJob::IsProtectedSurfaceRequired() {
+ return is_content_encrypted() && drm_bridge() &&
+ drm_bridge()->IsProtectedSurfaceRequired();
+}
+
} // namespace media
diff --git a/chromium/media/base/android/video_decoder_job.h b/chromium/media/base/android/video_decoder_job.h
index 41c15edc39e..aef2cc53c80 100644
--- a/chromium/media/base/android/video_decoder_job.h
+++ b/chromium/media/base/android/video_decoder_job.h
@@ -16,37 +16,74 @@ class VideoCodecBridge;
// Class for managing video decoding jobs.
class VideoDecoderJob : public MediaDecoderJob {
public:
- virtual ~VideoDecoderJob();
-
// Create a new VideoDecoderJob instance.
- // |video_codec| - The video format the object needs to decode.
- // |is_secure| - Whether secure decoding is required.
- // |size| - The natural size of the output frames.
- // |surface| - The surface to render the frames to.
- // |media_crypto| - Handle to a Java object responsible for decrypting the
- // video data.
// |request_data_cb| - Callback used to request more data for the decoder.
- static VideoDecoderJob* Create(const VideoCodec video_codec,
- bool is_secure,
- const gfx::Size& size,
- jobject surface,
- jobject media_crypto,
- const base::Closure& request_data_cb);
+ // |request_resources_cb| - Callback used to request resources.
+ // |release_resources_cb| - Callback used to release resources.
+ // |on_demuxer_config_changed_cb| - Callback used to inform the caller that
+ // demuxer config has changed.
+ VideoDecoderJob(
+ const base::Closure& request_data_cb,
+ const base::Closure& request_resources_cb,
+ const base::Closure& release_resources_cb,
+ const base::Closure& on_demuxer_config_changed_cb);
+ virtual ~VideoDecoderJob();
- private:
- VideoDecoderJob(scoped_ptr<VideoCodecBridge> video_codec_bridge,
- const base::Closure& request_data_cb);
+ // Passes a java surface object to the codec. Returns true if the surface
+ // can be used by the decoder, or false otherwise.
+ bool SetVideoSurface(gfx::ScopedJavaSurface surface);
// MediaDecoderJob implementation.
+ virtual bool HasStream() const OVERRIDE;
+ virtual void Flush() OVERRIDE;
+ virtual void ReleaseDecoderResources() OVERRIDE;
+
+ bool next_video_data_is_iframe() {
+ return next_video_data_is_iframe_;
+ }
+
+ int width() const { return width_; }
+ int height() const { return height_; }
+
+ private:
+ // MediaDecoderJob implementation.
virtual void ReleaseOutputBuffer(
int output_buffer_index,
size_t size,
bool render_output,
+ base::TimeDelta current_presentation_timestamp,
const ReleaseOutputCompletionCallback& callback) OVERRIDE;
-
virtual bool ComputeTimeToRender() const OVERRIDE;
+ virtual void UpdateDemuxerConfigs(const DemuxerConfigs& configs) OVERRIDE;
+ virtual bool IsCodecReconfigureNeeded(
+ const DemuxerConfigs& configs) const OVERRIDE;
+ virtual bool AreDemuxerConfigsChanged(
+ const DemuxerConfigs& configs) const OVERRIDE;
+ virtual bool CreateMediaCodecBridgeInternal() OVERRIDE;
+ virtual void CurrentDataConsumed(bool is_config_change) OVERRIDE;
+ virtual void OnMediaCodecBridgeReleased() OVERRIDE;
+
+ // Returns true if a protected surface is required for video playback.
+ bool IsProtectedSurfaceRequired();
+
+ // Video configs from the demuxer.
+ VideoCodec video_codec_;
+ int width_;
+ int height_;
+
+ // The surface object currently owned by the player.
+ gfx::ScopedJavaSurface surface_;
+
+ // Callbacks to inform the caller about decoder resources change.
+ base::Closure request_resources_cb_;
+ base::Closure release_resources_cb_;
+
+ // Track whether the next access unit is an I-frame. The first access
+ // unit after Flush() and CurrentDataConsumed(true) is guaranteed to be an
+ // I-frame.
+ bool next_video_data_is_iframe_;
- scoped_ptr<VideoCodecBridge> video_codec_bridge_;
+ DISALLOW_COPY_AND_ASSIGN(VideoDecoderJob);
};
} // namespace media
diff --git a/chromium/media/base/audio_buffer.cc b/chromium/media/base/audio_buffer.cc
index 0bf37209b2b..33d4ecbb1ab 100644
--- a/chromium/media/base/audio_buffer.cc
+++ b/chromium/media/base/audio_buffer.cc
@@ -11,23 +11,37 @@
namespace media {
+static base::TimeDelta CalculateDuration(int frames, double sample_rate) {
+ DCHECK_GT(sample_rate, 0);
+ return base::TimeDelta::FromMicroseconds(
+ frames * base::Time::kMicrosecondsPerSecond / sample_rate);
+}
+
AudioBuffer::AudioBuffer(SampleFormat sample_format,
+ ChannelLayout channel_layout,
int channel_count,
+ int sample_rate,
int frame_count,
bool create_buffer,
const uint8* const* data,
- const base::TimeDelta timestamp,
- const base::TimeDelta duration)
+ const base::TimeDelta timestamp)
: sample_format_(sample_format),
+ channel_layout_(channel_layout),
channel_count_(channel_count),
+ sample_rate_(sample_rate),
adjusted_frame_count_(frame_count),
trim_start_(0),
end_of_stream_(!create_buffer && data == NULL && frame_count == 0),
timestamp_(timestamp),
- duration_(duration) {
- CHECK_GE(channel_count, 0);
- CHECK_LE(channel_count, limits::kMaxChannels);
+ duration_(end_of_stream_
+ ? base::TimeDelta()
+ : CalculateDuration(adjusted_frame_count_, sample_rate_)) {
+ CHECK_GE(channel_count_, 0);
+ CHECK_LE(channel_count_, limits::kMaxChannels);
CHECK_GE(frame_count, 0);
+ DCHECK(channel_layout == CHANNEL_LAYOUT_DISCRETE ||
+ ChannelLayoutToChannelCount(channel_layout) == channel_count);
+
int bytes_per_channel = SampleFormatToBytesPerChannel(sample_format);
DCHECK_LE(bytes_per_channel, kChannelAlignment);
int data_size = frame_count * bytes_per_channel;
@@ -46,11 +60,11 @@ AudioBuffer::AudioBuffer(SampleFormat sample_format,
// Allocate a contiguous buffer for all the channel data.
data_.reset(static_cast<uint8*>(base::AlignedAlloc(
- channel_count * block_size_per_channel, kChannelAlignment)));
- channel_data_.reserve(channel_count);
+ channel_count_ * block_size_per_channel, kChannelAlignment)));
+ channel_data_.reserve(channel_count_);
// Copy each channel's data into the appropriate spot.
- for (int i = 0; i < channel_count; ++i) {
+ for (int i = 0; i < channel_count_; ++i) {
channel_data_.push_back(data_.get() + i * block_size_per_channel);
if (data)
memcpy(channel_data_[i], data[i], data_size);
@@ -65,7 +79,7 @@ AudioBuffer::AudioBuffer(SampleFormat sample_format,
sample_format_ == kSampleFormatF32) << sample_format_;
// Allocate our own buffer and copy the supplied data into it. Buffer must
// contain the data for all channels.
- data_size *= channel_count;
+ data_size *= channel_count_;
data_.reset(
static_cast<uint8*>(base::AlignedAlloc(data_size, kChannelAlignment)));
channel_data_.reserve(1);
@@ -79,58 +93,72 @@ AudioBuffer::~AudioBuffer() {}
// static
scoped_refptr<AudioBuffer> AudioBuffer::CopyFrom(
SampleFormat sample_format,
+ ChannelLayout channel_layout,
int channel_count,
+ int sample_rate,
int frame_count,
const uint8* const* data,
- const base::TimeDelta timestamp,
- const base::TimeDelta duration) {
+ const base::TimeDelta timestamp) {
// If you hit this CHECK you likely have a bug in a demuxer. Go fix it.
CHECK_GT(frame_count, 0); // Otherwise looks like an EOF buffer.
CHECK(data[0]);
return make_scoped_refptr(new AudioBuffer(sample_format,
+ channel_layout,
channel_count,
+ sample_rate,
frame_count,
true,
data,
- timestamp,
- duration));
+ timestamp));
}
// static
-scoped_refptr<AudioBuffer> AudioBuffer::CreateBuffer(SampleFormat sample_format,
- int channel_count,
- int frame_count) {
+scoped_refptr<AudioBuffer> AudioBuffer::CreateBuffer(
+ SampleFormat sample_format,
+ ChannelLayout channel_layout,
+ int channel_count,
+ int sample_rate,
+ int frame_count) {
CHECK_GT(frame_count, 0); // Otherwise looks like an EOF buffer.
return make_scoped_refptr(new AudioBuffer(sample_format,
+ channel_layout,
channel_count,
+ sample_rate,
frame_count,
true,
NULL,
- kNoTimestamp(),
kNoTimestamp()));
}
// static
scoped_refptr<AudioBuffer> AudioBuffer::CreateEmptyBuffer(
+ ChannelLayout channel_layout,
int channel_count,
+ int sample_rate,
int frame_count,
- const base::TimeDelta timestamp,
- const base::TimeDelta duration) {
+ const base::TimeDelta timestamp) {
CHECK_GT(frame_count, 0); // Otherwise looks like an EOF buffer.
// Since data == NULL, format doesn't matter.
return make_scoped_refptr(new AudioBuffer(kSampleFormatF32,
+ channel_layout,
channel_count,
+ sample_rate,
frame_count,
false,
NULL,
- timestamp,
- duration));
+ timestamp));
}
// static
scoped_refptr<AudioBuffer> AudioBuffer::CreateEOSBuffer() {
- return make_scoped_refptr(new AudioBuffer(
- kUnknownSampleFormat, 1, 0, false, NULL, kNoTimestamp(), kNoTimestamp()));
+ return make_scoped_refptr(new AudioBuffer(kUnknownSampleFormat,
+ CHANNEL_LAYOUT_NONE,
+ 0,
+ 0,
+ 0,
+ false,
+ NULL,
+ kNoTimestamp()));
}
// Convert int16 values in the range [kint16min, kint16max] to [-1.0, 1.0].
@@ -219,33 +247,66 @@ void AudioBuffer::TrimStart(int frames_to_trim) {
CHECK_GE(frames_to_trim, 0);
CHECK_LE(frames_to_trim, adjusted_frame_count_);
- // Adjust timestamp_ and duration_ to reflect the smaller number of frames.
- double offset = static_cast<double>(duration_.InMicroseconds()) *
- frames_to_trim / adjusted_frame_count_;
- base::TimeDelta offset_as_time =
- base::TimeDelta::FromMicroseconds(static_cast<int64>(offset));
- timestamp_ += offset_as_time;
- duration_ -= offset_as_time;
-
- // Finally adjust the number of frames in this buffer and where the start
- // really is.
+ // Adjust the number of frames in this buffer and where the start really is.
adjusted_frame_count_ -= frames_to_trim;
trim_start_ += frames_to_trim;
+
+ // Adjust timestamp_ and duration_ to reflect the smaller number of frames.
+ const base::TimeDelta old_duration = duration_;
+ duration_ = CalculateDuration(adjusted_frame_count_, sample_rate_);
+ timestamp_ += old_duration - duration_;
}
void AudioBuffer::TrimEnd(int frames_to_trim) {
CHECK_GE(frames_to_trim, 0);
CHECK_LE(frames_to_trim, adjusted_frame_count_);
- // Adjust duration_ only to reflect the smaller number of frames.
- double offset = static_cast<double>(duration_.InMicroseconds()) *
- frames_to_trim / adjusted_frame_count_;
- base::TimeDelta offset_as_time =
- base::TimeDelta::FromMicroseconds(static_cast<int64>(offset));
- duration_ -= offset_as_time;
-
- // Finally adjust the number of frames in this buffer.
+ // Adjust the number of frames and duration for this buffer.
adjusted_frame_count_ -= frames_to_trim;
+ duration_ = CalculateDuration(adjusted_frame_count_, sample_rate_);
+}
+
+void AudioBuffer::TrimRange(int start, int end) {
+ CHECK_GE(start, 0);
+ CHECK_LE(end, adjusted_frame_count_);
+
+ const int frames_to_trim = end - start;
+ CHECK_GE(frames_to_trim, 0);
+ CHECK_LE(frames_to_trim, adjusted_frame_count_);
+
+ const int bytes_per_channel = SampleFormatToBytesPerChannel(sample_format_);
+ const int frames_to_copy = adjusted_frame_count_ - end;
+ if (frames_to_copy > 0) {
+ switch (sample_format_) {
+ case kSampleFormatPlanarS16:
+ case kSampleFormatPlanarF32:
+ // Planar data must be shifted per channel.
+ for (int ch = 0; ch < channel_count_; ++ch) {
+ memmove(channel_data_[ch] + (trim_start_ + start) * bytes_per_channel,
+ channel_data_[ch] + (trim_start_ + end) * bytes_per_channel,
+ bytes_per_channel * frames_to_copy);
+ }
+ break;
+ case kSampleFormatU8:
+ case kSampleFormatS16:
+ case kSampleFormatS32:
+ case kSampleFormatF32: {
+ // Interleaved data can be shifted all at once.
+ const int frame_size = channel_count_ * bytes_per_channel;
+ memmove(channel_data_[0] + (trim_start_ + start) * frame_size,
+ channel_data_[0] + (trim_start_ + end) * frame_size,
+ frame_size * frames_to_copy);
+ break;
+ }
+ case kUnknownSampleFormat:
+ NOTREACHED() << "Invalid sample format!";
+ }
+ } else {
+ CHECK_EQ(frames_to_copy, 0);
+ }
+
+ // Trim the leftover data off the end of the buffer and update duration.
+ TrimEnd(frames_to_trim);
}
} // namespace media
diff --git a/chromium/media/base/audio_buffer.h b/chromium/media/base/audio_buffer.h
index c3bcf4dab9c..a07985c9fd8 100644
--- a/chromium/media/base/audio_buffer.h
+++ b/chromium/media/base/audio_buffer.h
@@ -11,6 +11,7 @@
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
+#include "media/base/channel_layout.h"
#include "media/base/media_export.h"
#include "media/base/sample_format.h"
@@ -33,27 +34,29 @@ class MEDIA_EXPORT AudioBuffer
// number of buffers must be equal to |channel_count|. |frame_count| is the
// number of frames in each buffer. |data| must not be null and |frame_count|
// must be >= 0.
- //
- // TODO(jrummell): Compute duration rather than pass it in.
static scoped_refptr<AudioBuffer> CopyFrom(SampleFormat sample_format,
+ ChannelLayout channel_layout,
int channel_count,
+ int sample_rate,
int frame_count,
const uint8* const* data,
- const base::TimeDelta timestamp,
- const base::TimeDelta duration);
+ const base::TimeDelta timestamp);
// Create an AudioBuffer with |frame_count| frames. Buffer is allocated, but
// not initialized. Timestamp and duration are set to kNoTimestamp().
static scoped_refptr<AudioBuffer> CreateBuffer(SampleFormat sample_format,
+ ChannelLayout channel_layout,
int channel_count,
+ int sample_rate,
int frame_count);
// Create an empty AudioBuffer with |frame_count| frames.
static scoped_refptr<AudioBuffer> CreateEmptyBuffer(
+ ChannelLayout channel_layout,
int channel_count,
+ int sample_rate,
int frame_count,
- const base::TimeDelta timestamp,
- const base::TimeDelta duration);
+ const base::TimeDelta timestamp);
// Create a AudioBuffer indicating we've reached end of stream.
// Calling any method other than end_of_stream() on the resulting buffer
@@ -80,21 +83,25 @@ class MEDIA_EXPORT AudioBuffer
// Duration is adjusted to reflect the fewer frames.
void TrimEnd(int frames_to_trim);
+ // Trim an AudioBuffer by removing |end - start| frames from [|start|, |end|).
+ // Even if |start| is zero, timestamp() is not adjusted, only duration().
+ void TrimRange(int start, int end);
+
// Return the number of channels.
int channel_count() const { return channel_count_; }
// Return the number of frames held.
int frame_count() const { return adjusted_frame_count_; }
- // Access to constructor parameters.
+ // Return the sample rate.
+ int sample_rate() const { return sample_rate_; }
+
+ // Return the channel layout.
+ ChannelLayout channel_layout() const { return channel_layout_; }
+
base::TimeDelta timestamp() const { return timestamp_; }
base::TimeDelta duration() const { return duration_; }
-
- // TODO(jrummell): Remove set_timestamp() and set_duration() once
- // DecryptingAudioDecoder::EnqueueFrames() is changed to set them when
- // creating the buffer. See http://crbug.com/255261.
void set_timestamp(base::TimeDelta timestamp) { timestamp_ = timestamp; }
- void set_duration(base::TimeDelta duration) { duration_ = duration; }
// If there's no data in this buffer, it represents end of stream.
bool end_of_stream() const { return end_of_stream_; }
@@ -112,17 +119,20 @@ class MEDIA_EXPORT AudioBuffer
// data is copied. If |create_buffer| is false, no data buffer is created (or
// copied to).
AudioBuffer(SampleFormat sample_format,
+ ChannelLayout channel_layout,
int channel_count,
+ int sample_rate,
int frame_count,
bool create_buffer,
const uint8* const* data,
- const base::TimeDelta timestamp,
- const base::TimeDelta duration);
+ const base::TimeDelta timestamp);
virtual ~AudioBuffer();
const SampleFormat sample_format_;
+ const ChannelLayout channel_layout_;
const int channel_count_;
+ const int sample_rate_;
int adjusted_frame_count_;
int trim_start_;
const bool end_of_stream_;
@@ -130,7 +140,7 @@ class MEDIA_EXPORT AudioBuffer
base::TimeDelta duration_;
// Contiguous block of channel data.
- scoped_ptr_malloc<uint8, base::ScopedPtrAlignedFree> data_;
+ scoped_ptr<uint8, base::AlignedFreeDeleter> data_;
// For planar data, points to each channels data.
std::vector<uint8*> channel_data_;
diff --git a/chromium/media/base/audio_buffer_converter.cc b/chromium/media/base/audio_buffer_converter.cc
new file mode 100644
index 00000000000..59c66811d13
--- /dev/null
+++ b/chromium/media/base/audio_buffer_converter.cc
@@ -0,0 +1,249 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_buffer_converter.h"
+
+#include <cmath>
+
+#include "base/logging.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/audio_bus.h"
+#include "media/base/audio_decoder_config.h"
+#include "media/base/audio_timestamp_helper.h"
+#include "media/base/buffers.h"
+#include "media/base/sinc_resampler.h"
+#include "media/base/vector_math.h"
+
+namespace media {
+
+// Is the config presented by |buffer| a config change from |params|?
+static bool IsConfigChange(const AudioParameters& params,
+ const scoped_refptr<AudioBuffer>& buffer) {
+ return buffer->sample_rate() != params.sample_rate() ||
+ buffer->channel_count() != params.channels() ||
+ buffer->channel_layout() != params.channel_layout();
+}
+
+AudioBufferConverter::AudioBufferConverter(const AudioParameters& output_params)
+ : output_params_(output_params),
+ input_params_(output_params),
+ last_input_buffer_offset_(0),
+ input_frames_(0),
+ buffered_input_frames_(0.0),
+ io_sample_rate_ratio_(1.0),
+ timestamp_helper_(output_params_.sample_rate()),
+ is_flushing_(false) {}
+
+AudioBufferConverter::~AudioBufferConverter() {}
+
+void AudioBufferConverter::AddInput(const scoped_refptr<AudioBuffer>& buffer) {
+ // On EOS flush any remaining buffered data.
+ if (buffer->end_of_stream()) {
+ Flush();
+ queued_outputs_.push_back(buffer);
+ return;
+ }
+
+ // We'll need a new |audio_converter_| if there was a config change.
+ if (IsConfigChange(input_params_, buffer))
+ ResetConverter(buffer);
+
+ // Pass straight through if there's no work to be done.
+ if (!audio_converter_) {
+ queued_outputs_.push_back(buffer);
+ return;
+ }
+
+ if (timestamp_helper_.base_timestamp() == kNoTimestamp())
+ timestamp_helper_.SetBaseTimestamp(buffer->timestamp());
+
+ queued_inputs_.push_back(buffer);
+ input_frames_ += buffer->frame_count();
+
+ ConvertIfPossible();
+}
+
+bool AudioBufferConverter::HasNextBuffer() { return !queued_outputs_.empty(); }
+
+scoped_refptr<AudioBuffer> AudioBufferConverter::GetNextBuffer() {
+ DCHECK(!queued_outputs_.empty());
+ scoped_refptr<AudioBuffer> out = queued_outputs_.front();
+ queued_outputs_.pop_front();
+ return out;
+}
+
+void AudioBufferConverter::Reset() {
+ audio_converter_.reset();
+ queued_inputs_.clear();
+ queued_outputs_.clear();
+ timestamp_helper_.SetBaseTimestamp(kNoTimestamp());
+ input_params_ = output_params_;
+ input_frames_ = 0;
+ buffered_input_frames_ = 0.0;
+ last_input_buffer_offset_ = 0;
+}
+
+void AudioBufferConverter::ResetTimestampState() {
+ Flush();
+ timestamp_helper_.SetBaseTimestamp(kNoTimestamp());
+}
+
+double AudioBufferConverter::ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) {
+ DCHECK(is_flushing_ || input_frames_ >= audio_bus->frames());
+
+ int requested_frames_left = audio_bus->frames();
+ int dest_index = 0;
+
+ while (requested_frames_left > 0 && !queued_inputs_.empty()) {
+ scoped_refptr<AudioBuffer> input_buffer = queued_inputs_.front();
+
+ int frames_to_read =
+ std::min(requested_frames_left,
+ input_buffer->frame_count() - last_input_buffer_offset_);
+ input_buffer->ReadFrames(
+ frames_to_read, last_input_buffer_offset_, dest_index, audio_bus);
+ last_input_buffer_offset_ += frames_to_read;
+
+ if (last_input_buffer_offset_ == input_buffer->frame_count()) {
+ // We've consumed all the frames in |input_buffer|.
+ queued_inputs_.pop_front();
+ last_input_buffer_offset_ = 0;
+ }
+
+ requested_frames_left -= frames_to_read;
+ dest_index += frames_to_read;
+ }
+
+ // If we're flushing, zero any extra space, otherwise we should always have
+ // enough data to completely fulfill the request.
+ if (is_flushing_ && requested_frames_left > 0) {
+ audio_bus->ZeroFramesPartial(audio_bus->frames() - requested_frames_left,
+ requested_frames_left);
+ } else {
+ DCHECK_EQ(requested_frames_left, 0);
+ }
+
+ input_frames_ -= audio_bus->frames() - requested_frames_left;
+ DCHECK_GE(input_frames_, 0);
+
+ buffered_input_frames_ += audio_bus->frames() - requested_frames_left;
+
+ // Full volume.
+ return 1.0;
+}
+
+void AudioBufferConverter::ResetConverter(
+ const scoped_refptr<AudioBuffer>& buffer) {
+ Flush();
+ audio_converter_.reset();
+ input_params_.Reset(
+ input_params_.format(),
+ buffer->channel_layout(),
+ buffer->channel_count(),
+ 0,
+ buffer->sample_rate(),
+ input_params_.bits_per_sample(),
+ // If resampling is needed and the FIFO disabled, the AudioConverter will
+ // always request SincResampler::kDefaultRequestSize frames. Otherwise it
+ // will use the output frame size.
+ buffer->sample_rate() == output_params_.sample_rate()
+ ? output_params_.frames_per_buffer()
+ : SincResampler::kDefaultRequestSize);
+
+ io_sample_rate_ratio_ = static_cast<double>(input_params_.sample_rate()) /
+ output_params_.sample_rate();
+
+ // If |buffer| matches |output_params_| we don't need an AudioConverter at
+ // all, and can early-out here.
+ if (!IsConfigChange(output_params_, buffer))
+ return;
+
+ // Note: The FIFO is disabled to avoid extraneous memcpy().
+ audio_converter_.reset(
+ new AudioConverter(input_params_, output_params_, true));
+ audio_converter_->AddInput(this);
+}
+
+void AudioBufferConverter::ConvertIfPossible() {
+ DCHECK(audio_converter_);
+
+ int request_frames = 0;
+
+ if (is_flushing_) {
+ // If we're flushing we want to convert *everything* even if this means
+ // we'll have to pad some silence in ProvideInput().
+ request_frames =
+ ceil((buffered_input_frames_ + input_frames_) / io_sample_rate_ratio_);
+ } else {
+ // How many calls to ProvideInput() we can satisfy completely.
+ int chunks = input_frames_ / input_params_.frames_per_buffer();
+
+ // How many output frames that corresponds to:
+ request_frames = chunks * audio_converter_->ChunkSize();
+ }
+
+ if (!request_frames)
+ return;
+
+ scoped_refptr<AudioBuffer> output_buffer =
+ AudioBuffer::CreateBuffer(kSampleFormatPlanarF32,
+ output_params_.channel_layout(),
+ output_params_.channels(),
+ output_params_.sample_rate(),
+ request_frames);
+ scoped_ptr<AudioBus> output_bus =
+ AudioBus::CreateWrapper(output_buffer->channel_count());
+
+ int frames_remaining = request_frames;
+
+ // The AudioConverter wants requests of a fixed size, so we'll slide an
+ // AudioBus of that size across the |output_buffer|.
+ while (frames_remaining != 0) {
+ // It's important that this is a multiple of AudioBus::kChannelAlignment in
+ // all requests except for the last, otherwise downstream SIMD optimizations
+ // will crash on unaligned data.
+ const int frames_this_iteration = std::min(
+ static_cast<int>(SincResampler::kDefaultRequestSize), frames_remaining);
+ const int offset_into_buffer =
+ output_buffer->frame_count() - frames_remaining;
+
+ // Wrap the portion of the AudioBuffer in an AudioBus so the AudioConverter
+ // can fill it.
+ output_bus->set_frames(frames_this_iteration);
+ for (int ch = 0; ch < output_buffer->channel_count(); ++ch) {
+ output_bus->SetChannelData(
+ ch,
+ reinterpret_cast<float*>(output_buffer->channel_data()[ch]) +
+ offset_into_buffer);
+ }
+
+ // Do the actual conversion.
+ audio_converter_->Convert(output_bus.get());
+ frames_remaining -= frames_this_iteration;
+ buffered_input_frames_ -= frames_this_iteration * io_sample_rate_ratio_;
+ }
+
+ // Compute the timestamp.
+ output_buffer->set_timestamp(timestamp_helper_.GetTimestamp());
+ timestamp_helper_.AddFrames(request_frames);
+
+ queued_outputs_.push_back(output_buffer);
+}
+
+void AudioBufferConverter::Flush() {
+ if (!audio_converter_)
+ return;
+ is_flushing_ = true;
+ ConvertIfPossible();
+ is_flushing_ = false;
+ audio_converter_->Reset();
+ DCHECK_EQ(input_frames_, 0);
+ DCHECK_EQ(last_input_buffer_offset_, 0);
+ DCHECK_LT(buffered_input_frames_, 1.0);
+ DCHECK(queued_inputs_.empty());
+ buffered_input_frames_ = 0.0;
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_buffer_converter.h b/chromium/media/base/audio_buffer_converter.h
new file mode 100644
index 00000000000..9efbd16a233
--- /dev/null
+++ b/chromium/media/base/audio_buffer_converter.h
@@ -0,0 +1,108 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_BUFFER_CONVERTER
+#define MEDIA_BASE_AUDIO_BUFFER_CONVERTER
+
+#include <deque>
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_converter.h"
+#include "media/base/audio_timestamp_helper.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class AudioBuffer;
+class AudioBus;
+
+// Takes AudioBuffers in any format and uses an AudioConverter to convert them
+// to a common format (usually the hardware output format).
+class MEDIA_EXPORT AudioBufferConverter : public AudioConverter::InputCallback {
+ public:
+ explicit AudioBufferConverter(const AudioParameters& output_params);
+ virtual ~AudioBufferConverter();
+
+ void AddInput(const scoped_refptr<AudioBuffer>& buffer);
+
+ // Is an output buffer available via GetNextBuffer()?
+ bool HasNextBuffer();
+
+ // This should only be called this is HasNextBuffer() returns true.
+ scoped_refptr<AudioBuffer> GetNextBuffer();
+
+ // Reset internal state.
+ void Reset();
+
+ // Reset internal timestamp state. Upon the next AddInput() call, our base
+ // timestamp will be set to match the input buffer.
+ void ResetTimestampState();
+
+ int input_buffer_size_for_testing() const {
+ return input_params_.frames_per_buffer();
+ }
+ int input_frames_left_for_testing() const {
+ return input_frames_;
+ }
+
+ private:
+ // Callback to provide data to the AudioConverter
+ virtual double ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) OVERRIDE;
+
+ // Reset the converter in response to a configuration change.
+ void ResetConverter(const scoped_refptr<AudioBuffer>& input_buffer);
+
+ // Perform conversion if we have enough data.
+ void ConvertIfPossible();
+
+ // Flush remaining output
+ void Flush();
+
+ // The output parameters.
+ AudioParameters output_params_;
+
+ // The current input parameters (we cache these to detect configuration
+ // changes, so we know when to reset the AudioConverter).
+ AudioParameters input_params_;
+
+ typedef std::deque<scoped_refptr<AudioBuffer> > BufferQueue;
+
+ // Queued up inputs (there will never be all that much data stored here, as
+ // soon as there's enough here to produce an output buffer we will do so).
+ BufferQueue queued_inputs_;
+
+ // Offset into the front element of |queued_inputs_|. A ProvideInput() call
+ // doesn't necessarily always consume an entire buffer.
+ int last_input_buffer_offset_;
+
+ // Buffer of output frames, to be returned by GetNextBuffer().
+ BufferQueue queued_outputs_;
+
+ // How many frames of input we have in |queued_inputs_|.
+ int input_frames_;
+
+ // Input frames in the AudioConverter's internal buffers.
+ double buffered_input_frames_;
+
+ // Ratio of sample rates, in/out.
+ double io_sample_rate_ratio_;
+
+ // Computes timestamps in terms of the output sample rate.
+ AudioTimestampHelper timestamp_helper_;
+
+ // Are we flushing everything, without regard for providing AudioConverter
+ // full AudioBuses in ProvideInput()?
+ bool is_flushing_;
+
+ // The AudioConverter which does the real work here.
+ scoped_ptr<AudioConverter> audio_converter_;
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_BUFFER_CONVERTER
diff --git a/chromium/media/base/audio_buffer_converter_unittest.cc b/chromium/media/base/audio_buffer_converter_unittest.cc
new file mode 100644
index 00000000000..34459963e77
--- /dev/null
+++ b/chromium/media/base/audio_buffer_converter_unittest.cc
@@ -0,0 +1,255 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/scoped_ptr.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/audio_buffer_converter.h"
+#include "media/base/sinc_resampler.h"
+#include "media/base/test_helpers.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+// Important: Use an odd buffer size here so SIMD issues are caught.
+const int kOutFrameSize = 441;
+const int kOutSampleRate = 44100;
+const ChannelLayout kOutChannelLayout = CHANNEL_LAYOUT_STEREO;
+const int kOutChannelCount = 2;
+
+static scoped_refptr<AudioBuffer> MakeTestBuffer(int sample_rate,
+ ChannelLayout channel_layout,
+ int channel_count,
+ int frames) {
+ return MakeAudioBuffer<uint8>(kSampleFormatU8,
+ channel_layout,
+ channel_count,
+ sample_rate,
+ 0,
+ 1,
+ frames,
+ base::TimeDelta::FromSeconds(0));
+}
+
+class AudioBufferConverterTest : public ::testing::Test {
+ public:
+ AudioBufferConverterTest()
+ : input_frames_(0),
+ expected_output_frames_(0.0),
+ output_frames_(0),
+ output_params_(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ kOutChannelLayout,
+ kOutSampleRate,
+ 16,
+ kOutFrameSize) {
+ audio_buffer_converter_.reset(new AudioBufferConverter(output_params_));
+ }
+
+ void Reset() {
+ audio_buffer_converter_->Reset();
+ output_frames_ = expected_output_frames_ = input_frames_ = 0;
+ }
+
+ void AddInput(const scoped_refptr<AudioBuffer>& in) {
+ if (!in->end_of_stream()) {
+ input_frames_ += in->frame_count();
+ expected_output_frames_ +=
+ in->frame_count() *
+ (static_cast<double>(output_params_.sample_rate()) /
+ in->sample_rate());
+ }
+ audio_buffer_converter_->AddInput(in);
+ }
+
+ void ConsumeOutput() {
+ ASSERT_TRUE(audio_buffer_converter_->HasNextBuffer());
+ scoped_refptr<AudioBuffer> out = audio_buffer_converter_->GetNextBuffer();
+ if (!out->end_of_stream()) {
+ output_frames_ += out->frame_count();
+ EXPECT_EQ(out->sample_rate(), output_params_.sample_rate());
+ EXPECT_EQ(out->channel_layout(), output_params_.channel_layout());
+ EXPECT_EQ(out->channel_count(), output_params_.channels());
+ } else {
+ EXPECT_FALSE(audio_buffer_converter_->HasNextBuffer());
+ }
+ }
+
+ void ConsumeAllOutput() {
+ AddInput(AudioBuffer::CreateEOSBuffer());
+ while (audio_buffer_converter_->HasNextBuffer())
+ ConsumeOutput();
+ EXPECT_EQ(output_frames_, ceil(expected_output_frames_));
+ }
+
+ protected:
+ scoped_ptr<AudioBufferConverter> audio_buffer_converter_;
+
+ int input_frames_;
+ double expected_output_frames_;
+ int output_frames_;
+ int input_buffers_;
+ AudioParameters output_params_;
+};
+
+TEST_F(AudioBufferConverterTest, PassThrough) {
+ scoped_refptr<AudioBuffer> in =
+ MakeTestBuffer(kOutSampleRate, kOutChannelLayout, kOutChannelCount, 512);
+ AddInput(in);
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, Downsample) {
+ scoped_refptr<AudioBuffer> in =
+ MakeTestBuffer(48000, kOutChannelLayout, kOutChannelCount, 512);
+ AddInput(in);
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, Upsample) {
+ scoped_refptr<AudioBuffer> in =
+ MakeTestBuffer(8000, kOutChannelLayout, kOutChannelCount, 512);
+ AddInput(in);
+ ConsumeAllOutput();
+}
+
+// Test resampling a buffer smaller than the SincResampler's kernel size.
+TEST_F(AudioBufferConverterTest, Resample_TinyBuffer) {
+ AddInput(MakeTestBuffer(
+ 48000, CHANNEL_LAYOUT_STEREO, 2, SincResampler::kKernelSize - 1));
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, Resample_DifferingBufferSizes) {
+ const int input_sample_rate = 48000;
+ AddInput(MakeTestBuffer(
+ input_sample_rate, kOutChannelLayout, kOutChannelCount, 100));
+ AddInput(MakeTestBuffer(
+ input_sample_rate, kOutChannelLayout, kOutChannelCount, 200));
+ AddInput(MakeTestBuffer(
+ input_sample_rate, kOutChannelLayout, kOutChannelCount, 300));
+ AddInput(MakeTestBuffer(
+ input_sample_rate, kOutChannelLayout, kOutChannelCount, 400));
+ AddInput(MakeTestBuffer(
+ input_sample_rate, kOutChannelLayout, kOutChannelCount, 500));
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, ChannelDownmix) {
+ scoped_refptr<AudioBuffer> in =
+ MakeTestBuffer(kOutSampleRate, CHANNEL_LAYOUT_MONO, 1, 512);
+ AddInput(in);
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, ChannelUpmix) {
+ scoped_refptr<AudioBuffer> in =
+ MakeTestBuffer(kOutSampleRate, CHANNEL_LAYOUT_5_1, 6, 512);
+ AddInput(in);
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, ResampleAndRemix) {
+ scoped_refptr<AudioBuffer> in =
+ MakeTestBuffer(48000, CHANNEL_LAYOUT_5_1, 6, 512);
+ AddInput(in);
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, ConfigChange_SampleRate) {
+ AddInput(MakeTestBuffer(48000, kOutChannelLayout, kOutChannelCount, 512));
+ AddInput(MakeTestBuffer(44100, kOutChannelLayout, kOutChannelCount, 512));
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, ConfigChange_ChannelLayout) {
+ AddInput(MakeTestBuffer(kOutSampleRate, CHANNEL_LAYOUT_STEREO, 2, 512));
+ AddInput(MakeTestBuffer(kOutSampleRate, CHANNEL_LAYOUT_MONO, 1, 512));
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, ConfigChange_SampleRateAndChannelLayout) {
+ AddInput(MakeTestBuffer(44100, CHANNEL_LAYOUT_STEREO, 2, 512));
+ AddInput(MakeTestBuffer(48000, CHANNEL_LAYOUT_MONO, 1, 512));
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, ConfigChange_Multiple) {
+ AddInput(MakeTestBuffer(44100, CHANNEL_LAYOUT_STEREO, 2, 512));
+ AddInput(MakeTestBuffer(48000, CHANNEL_LAYOUT_MONO, 1, 512));
+ AddInput(MakeTestBuffer(44100, CHANNEL_LAYOUT_5_1, 6, 512));
+ AddInput(MakeTestBuffer(22050, CHANNEL_LAYOUT_STEREO, 2, 512));
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, Reset) {
+ AddInput(MakeTestBuffer(44100, CHANNEL_LAYOUT_STEREO, 2, 512));
+ Reset();
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, ResampleThenReset) {
+ // Resampling is likely to leave some data buffered in AudioConverter's
+ // fifo or resampler, so make sure Reset() cleans that all up.
+ AddInput(MakeTestBuffer(48000, CHANNEL_LAYOUT_STEREO, 2, 512));
+ Reset();
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, ResetThenConvert) {
+ AddInput(
+ MakeTestBuffer(kOutSampleRate, kOutChannelLayout, kOutChannelCount, 512));
+ Reset();
+ // Make sure we can keep using the AudioBufferConverter after we've Reset().
+ AddInput(
+ MakeTestBuffer(kOutSampleRate, kOutChannelLayout, kOutChannelCount, 512));
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, DiscreteChannelLayout) {
+ output_params_ = AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_DISCRETE,
+ 2,
+ 0,
+ kOutSampleRate,
+ 16,
+ 512,
+ 0);
+ audio_buffer_converter_.reset(new AudioBufferConverter(output_params_));
+ AddInput(MakeTestBuffer(kOutSampleRate, CHANNEL_LAYOUT_STEREO, 2, 512));
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, LargeBuffersResampling) {
+ output_params_ = AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ kOutChannelLayout,
+ kOutSampleRate,
+ 16,
+ 2048);
+
+ audio_buffer_converter_.reset(new AudioBufferConverter(output_params_));
+ const int kInputSampleRate = 48000;
+ const int kInputFrameSize = 8192;
+ ASSERT_NE(kInputSampleRate, kOutSampleRate);
+
+ const int kInputBuffers = 3;
+ for (int i = 0; i < kInputBuffers; ++i) {
+ AddInput(MakeTestBuffer(kInputSampleRate,
+ kOutChannelLayout,
+ kOutChannelCount,
+ kInputFrameSize));
+ }
+
+ // Do not add an EOS packet here, as it will invoke flushing.
+ while (audio_buffer_converter_->HasNextBuffer())
+ ConsumeOutput();
+
+ // Since the input buffer size is a multiple of the input request size there
+ // should never be any frames remaining at this point.
+ ASSERT_EQ(kInputFrameSize %
+ audio_buffer_converter_->input_buffer_size_for_testing(),
+ 0);
+ EXPECT_EQ(0, audio_buffer_converter_->input_frames_left_for_testing());
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_buffer_queue_unittest.cc b/chromium/media/base/audio_buffer_queue_unittest.cc
index b95bdca1454..dfb2098e1aa 100644
--- a/chromium/media/base/audio_buffer_queue_unittest.cc
+++ b/chromium/media/base/audio_buffer_queue_unittest.cc
@@ -5,7 +5,6 @@
#include "base/basictypes.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
-#include "base/strings/stringprintf.h"
#include "base/time/time.h"
#include "media/base/audio_buffer.h"
#include "media/base/audio_buffer_queue.h"
@@ -16,113 +15,125 @@
namespace media {
-static void VerifyResult(float* channel_data,
- int frames,
- float start,
- float increment) {
- for (int i = 0; i < frames; ++i) {
- SCOPED_TRACE(base::StringPrintf(
- "i=%d/%d start=%f, increment=%f", i, frames, start, increment));
- ASSERT_EQ(start, channel_data[i]);
- start += increment;
+const int kSampleRate = 44100;
+
+static void VerifyBus(AudioBus* bus,
+ int offset,
+ int frames,
+ int buffer_size,
+ float start,
+ float increment) {
+ for (int ch = 0; ch < bus->channels(); ++ch) {
+ const float v = start + ch * buffer_size * increment;
+ for (int i = offset; i < offset + frames; ++i) {
+ ASSERT_FLOAT_EQ(v + (i - offset) * increment, bus->channel(ch)[i])
+ << "i=" << i << ", ch=" << ch;
+ }
}
}
+template <typename T>
+static scoped_refptr<AudioBuffer> MakeTestBuffer(SampleFormat format,
+ ChannelLayout channel_layout,
+ T start,
+ T step,
+ int frames) {
+ return MakeAudioBuffer<T>(format,
+ channel_layout,
+ ChannelLayoutToChannelCount(channel_layout),
+ kSampleRate,
+ start,
+ step,
+ frames,
+ kNoTimestamp());
+}
+
TEST(AudioBufferQueueTest, AppendAndClear) {
- const int channels = 1;
- const int frames = 8;
- const base::TimeDelta kNoTime = kNoTimestamp();
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_MONO;
AudioBufferQueue buffer;
EXPECT_EQ(0, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
- kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
- EXPECT_EQ(frames, buffer.frames());
+ buffer.Append(
+ MakeTestBuffer<uint8>(kSampleFormatU8, channel_layout, 10, 1, 8));
+ EXPECT_EQ(8, buffer.frames());
buffer.Clear();
EXPECT_EQ(0, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
- kSampleFormatU8, channels, 20, 1, frames, kNoTime, kNoTime));
- EXPECT_EQ(frames, buffer.frames());
+ buffer.Append(
+ MakeTestBuffer<uint8>(kSampleFormatU8, channel_layout, 20, 1, 8));
+ EXPECT_EQ(8, buffer.frames());
}
TEST(AudioBufferQueueTest, MultipleAppend) {
- const int channels = 1;
- const int frames = 8;
- const base::TimeDelta kNoTime = kNoTimestamp();
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_MONO;
AudioBufferQueue buffer;
-
- // Append 40 frames in 5 buffers.
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
- kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
+ buffer.Append(
+ MakeTestBuffer<uint8>(kSampleFormatU8, channel_layout, 10, 1, 8));
EXPECT_EQ(8, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
- kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
+ buffer.Append(
+ MakeTestBuffer<uint8>(kSampleFormatU8, channel_layout, 10, 1, 8));
EXPECT_EQ(16, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
- kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
+ buffer.Append(
+ MakeTestBuffer<uint8>(kSampleFormatU8, channel_layout, 10, 1, 8));
EXPECT_EQ(24, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
- kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
+ buffer.Append(
+ MakeTestBuffer<uint8>(kSampleFormatU8, channel_layout, 10, 1, 8));
EXPECT_EQ(32, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
- kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
+ buffer.Append(
+ MakeTestBuffer<uint8>(kSampleFormatU8, channel_layout, 10, 1, 8));
EXPECT_EQ(40, buffer.frames());
}
TEST(AudioBufferQueueTest, IteratorCheck) {
- const int channels = 1;
- const int frames = 8;
- const base::TimeDelta kNoTime = kNoTimestamp();
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_MONO;
+ const int channels = ChannelLayoutToChannelCount(channel_layout);
AudioBufferQueue buffer;
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
// Append 40 frames in 5 buffers. Intersperse ReadFrames() to make the
// iterator is pointing to the correct position.
- buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 10.0f, 1.0f, frames, kNoTime, kNoTime));
+ buffer.Append(MakeTestBuffer<float>(
+ kSampleFormatF32, channel_layout, 10.0f, 1.0f, 8));
EXPECT_EQ(8, buffer.frames());
EXPECT_EQ(4, buffer.ReadFrames(4, 0, bus.get()));
EXPECT_EQ(4, buffer.frames());
- VerifyResult(bus->channel(0), 4, 10.0f, 1.0f);
+ VerifyBus(bus.get(), 0, 4, bus->frames(), 10, 1);
- buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 20.0f, 1.0f, frames, kNoTime, kNoTime));
+ buffer.Append(MakeTestBuffer<float>(
+ kSampleFormatF32, channel_layout, 20.0f, 1.0f, 8));
EXPECT_EQ(12, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 30.0f, 1.0f, frames, kNoTime, kNoTime));
+ buffer.Append(MakeTestBuffer<float>(
+ kSampleFormatF32, channel_layout, 30.0f, 1.0f, 8));
EXPECT_EQ(20, buffer.frames());
buffer.SeekFrames(16);
EXPECT_EQ(4, buffer.ReadFrames(4, 0, bus.get()));
EXPECT_EQ(0, buffer.frames());
- VerifyResult(bus->channel(0), 4, 34.0f, 1.0f);
+ VerifyBus(bus.get(), 0, 4, bus->frames(), 34, 1);
- buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 40.0f, 1.0f, frames, kNoTime, kNoTime));
+ buffer.Append(MakeTestBuffer<float>(
+ kSampleFormatF32, channel_layout, 40.0f, 1.0f, 8));
EXPECT_EQ(8, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 50.0f, 1.0f, frames, kNoTime, kNoTime));
+ buffer.Append(MakeTestBuffer<float>(
+ kSampleFormatF32, channel_layout, 50.0f, 1.0f, 8));
EXPECT_EQ(16, buffer.frames());
EXPECT_EQ(4, buffer.ReadFrames(4, 0, bus.get()));
- VerifyResult(bus->channel(0), 4, 40.0f, 1.0f);
+ VerifyBus(bus.get(), 0, 4, bus->frames(), 40, 1);
// Read off the end of the buffer.
EXPECT_EQ(12, buffer.frames());
buffer.SeekFrames(8);
EXPECT_EQ(4, buffer.ReadFrames(100, 0, bus.get()));
- VerifyResult(bus->channel(0), 4, 54.0f, 1.0f);
+ VerifyBus(bus.get(), 0, 4, bus->frames(), 54, 1);
}
TEST(AudioBufferQueueTest, Seek) {
- const int channels = 2;
- const int frames = 6;
- const base::TimeDelta kNoTime = kNoTimestamp();
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
AudioBufferQueue buffer;
// Add 6 frames of data.
- buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 1.0f, 1.0f, frames, kNoTime, kNoTime));
+ buffer.Append(MakeTestBuffer<float>(
+ kSampleFormatF32, channel_layout, 1.0f, 1.0f, 6));
EXPECT_EQ(6, buffer.frames());
// Seek past 2 frames.
@@ -138,32 +149,30 @@ TEST(AudioBufferQueueTest, Seek) {
}
TEST(AudioBufferQueueTest, ReadF32) {
- const int channels = 2;
- const base::TimeDelta kNoTime = kNoTimestamp();
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ const int channels = ChannelLayoutToChannelCount(channel_layout);
AudioBufferQueue buffer;
// Add 76 frames of data.
- buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 1.0f, 1.0f, 6, kNoTime, kNoTime));
- buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 13.0f, 1.0f, 10, kNoTime, kNoTime));
- buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 33.0f, 1.0f, 60, kNoTime, kNoTime));
+ buffer.Append(
+ MakeTestBuffer<float>(kSampleFormatF32, channel_layout, 1.0f, 1.0f, 6));
+ buffer.Append(
+ MakeTestBuffer<float>(kSampleFormatF32, channel_layout, 13.0f, 1.0f, 10));
+ buffer.Append(
+ MakeTestBuffer<float>(kSampleFormatF32, channel_layout, 33.0f, 1.0f, 60));
EXPECT_EQ(76, buffer.frames());
- // Read 3 frames from the buffer. F32 is interleaved, so ch[0] should be
- // 1, 3, 5, and ch[1] should be 2, 4, 6.
+ // Read 3 frames from the buffer.
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
EXPECT_EQ(3, buffer.ReadFrames(3, 0, bus.get()));
EXPECT_EQ(73, buffer.frames());
- VerifyResult(bus->channel(0), 3, 1.0f, 2.0f);
- VerifyResult(bus->channel(1), 3, 2.0f, 2.0f);
+ VerifyBus(bus.get(), 0, 3, 6, 1, 1);
// Now read 5 frames, which will span buffers. Append the data into AudioBus.
EXPECT_EQ(5, buffer.ReadFrames(5, 3, bus.get()));
EXPECT_EQ(68, buffer.frames());
- VerifyResult(bus->channel(0), 8, 1.0f, 2.0f);
- VerifyResult(bus->channel(1), 8, 2.0f, 2.0f);
+ VerifyBus(bus.get(), 0, 6, 6, 1, 1);
+ VerifyBus(bus.get(), 6, 2, 10, 13, 1);
// Now skip into the third buffer.
buffer.SeekFrames(20);
@@ -171,269 +180,261 @@ TEST(AudioBufferQueueTest, ReadF32) {
// Now read 2 frames, which are in the third buffer.
EXPECT_EQ(2, buffer.ReadFrames(2, 0, bus.get()));
- VerifyResult(bus->channel(0), 2, 57.0f, 2.0f);
- VerifyResult(bus->channel(1), 2, 58.0f, 2.0f);
+ VerifyBus(bus.get(), 0, 2, 60, 45, 1);
}
TEST(AudioBufferQueueTest, ReadU8) {
- const int channels = 4;
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_4_0;
+ const int channels = ChannelLayoutToChannelCount(channel_layout);
const int frames = 4;
- const base::TimeDelta kNoTime = kNoTimestamp();
AudioBufferQueue buffer;
// Add 4 frames of data.
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
- kSampleFormatU8, channels, 128, 1, frames, kNoTime, kNoTime));
+ buffer.Append(
+ MakeTestBuffer<uint8>(kSampleFormatU8, channel_layout, 128, 1, frames));
- // Read all 4 frames from the buffer. Data is interleaved, so ch[0] should be
- // 128, 132, 136, 140, other channels similar. However, values are converted
- // from [0, 255] to [-1.0, 1.0] with a bias of 128. Thus the first buffer
- // value should be 0.0, then 1/127, 2/127, etc.
- scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
- EXPECT_EQ(4, buffer.ReadFrames(4, 0, bus.get()));
+ // Read all 4 frames from the buffer.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, frames);
+ EXPECT_EQ(frames, buffer.ReadFrames(frames, 0, bus.get()));
EXPECT_EQ(0, buffer.frames());
- VerifyResult(bus->channel(0), 4, 0.0f, 4.0f / 127.0f);
- VerifyResult(bus->channel(1), 4, 1.0f / 127.0f, 4.0f / 127.0f);
- VerifyResult(bus->channel(2), 4, 2.0f / 127.0f, 4.0f / 127.0f);
- VerifyResult(bus->channel(3), 4, 3.0f / 127.0f, 4.0f / 127.0f);
+ VerifyBus(bus.get(), 0, frames, bus->frames(), 0, 1.0f / 127.0f);
}
TEST(AudioBufferQueueTest, ReadS16) {
- const int channels = 2;
- const base::TimeDelta kNoTime = kNoTimestamp();
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ const int channels = ChannelLayoutToChannelCount(channel_layout);
AudioBufferQueue buffer;
// Add 24 frames of data.
- buffer.Append(MakeInterleavedAudioBuffer<int16>(
- kSampleFormatS16, channels, 1, 1, 4, kNoTime, kNoTime));
- buffer.Append(MakeInterleavedAudioBuffer<int16>(
- kSampleFormatS16, channels, 9, 1, 20, kNoTime, kNoTime));
+ buffer.Append(
+ MakeTestBuffer<int16>(kSampleFormatS16, channel_layout, 1, 1, 4));
+ buffer.Append(
+ MakeTestBuffer<int16>(kSampleFormatS16, channel_layout, 9, 1, 20));
EXPECT_EQ(24, buffer.frames());
- // Read 6 frames from the buffer. Data is interleaved, so ch[0] should be
- // 1, 3, 5, 7, 9, 11, and ch[1] should be 2, 4, 6, 8, 10, 12.
- // Data is converted to float from -1.0 to 1.0 based on int16 range.
- scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
- EXPECT_EQ(6, buffer.ReadFrames(6, 0, bus.get()));
+ // Read 6 frames from the buffer.
+ const int frames = 6;
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, buffer.frames());
+ EXPECT_EQ(frames, buffer.ReadFrames(frames, 0, bus.get()));
EXPECT_EQ(18, buffer.frames());
- VerifyResult(bus->channel(0), 6, 1.0f / kint16max, 2.0f / kint16max);
- VerifyResult(bus->channel(1), 6, 2.0f / kint16max, 2.0f / kint16max);
+ VerifyBus(bus.get(), 0, 4, 4, 1.0f / kint16max, 1.0f / kint16max);
+ VerifyBus(bus.get(), 4, 2, 20, 9.0f / kint16max, 1.0f / kint16max);
}
TEST(AudioBufferQueueTest, ReadS32) {
- const int channels = 2;
- const base::TimeDelta kNoTime = kNoTimestamp();
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ const int channels = ChannelLayoutToChannelCount(channel_layout);
AudioBufferQueue buffer;
// Add 24 frames of data.
- buffer.Append(MakeInterleavedAudioBuffer<int32>(
- kSampleFormatS32, channels, 1, 1, 4, kNoTime, kNoTime));
- buffer.Append(MakeInterleavedAudioBuffer<int32>(
- kSampleFormatS32, channels, 9, 1, 20, kNoTime, kNoTime));
+ buffer.Append(
+ MakeTestBuffer<int32>(kSampleFormatS32, channel_layout, 1, 1, 4));
+ buffer.Append(
+ MakeTestBuffer<int32>(kSampleFormatS32, channel_layout, 9, 1, 20));
EXPECT_EQ(24, buffer.frames());
- // Read 6 frames from the buffer. Data is interleaved, so ch[0] should be
- // 1, 3, 5, 7, 100, 106, and ch[1] should be 2, 4, 6, 8, 103, 109.
- // Data is converted to float from -1.0 to 1.0 based on int32 range.
+ // Read 6 frames from the buffer.
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
EXPECT_EQ(6, buffer.ReadFrames(6, 0, bus.get()));
EXPECT_EQ(18, buffer.frames());
- VerifyResult(bus->channel(0), 6, 1.0f / kint32max, 2.0f / kint32max);
- VerifyResult(bus->channel(1), 6, 2.0f / kint32max, 2.0f / kint32max);
+ VerifyBus(bus.get(), 0, 4, 4, 1.0f / kint32max, 1.0f / kint32max);
+ VerifyBus(bus.get(), 4, 2, 20, 9.0f / kint32max, 1.0f / kint32max);
// Read the next 2 frames.
EXPECT_EQ(2, buffer.ReadFrames(2, 0, bus.get()));
EXPECT_EQ(16, buffer.frames());
- VerifyResult(bus->channel(0), 2, 13.0f / kint32max, 2.0f / kint32max);
- VerifyResult(bus->channel(1), 2, 14.0f / kint32max, 2.0f / kint32max);
+ VerifyBus(bus.get(), 0, 2, 20, 11.0f / kint32max, 1.0f / kint32max);
}
TEST(AudioBufferQueueTest, ReadF32Planar) {
- const int channels = 2;
- const base::TimeDelta kNoTime = kNoTimestamp();
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ const int channels = ChannelLayoutToChannelCount(channel_layout);
AudioBufferQueue buffer;
// Add 14 frames of data.
- buffer.Append(MakePlanarAudioBuffer<float>(
- kSampleFormatPlanarF32, channels, 1.0f, 1.0f, 4, kNoTime, kNoTime));
- buffer.Append(MakePlanarAudioBuffer<float>(
- kSampleFormatPlanarF32, channels, 50.0f, 1.0f, 10, kNoTime, kNoTime));
+ buffer.Append(MakeTestBuffer<float>(
+ kSampleFormatPlanarF32, channel_layout, 1.0f, 1.0f, 4));
+ buffer.Append(MakeTestBuffer<float>(
+ kSampleFormatPlanarF32, channel_layout, 50.0f, 1.0f, 10));
EXPECT_EQ(14, buffer.frames());
- // Read 6 frames from the buffer. F32 is planar, so ch[0] should be
- // 1, 2, 3, 4, 50, 51, and ch[1] should be 5, 6, 7, 8, 60, 61.
+ // Read 6 frames from the buffer.
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
EXPECT_EQ(6, buffer.ReadFrames(6, 0, bus.get()));
EXPECT_EQ(8, buffer.frames());
- VerifyResult(bus->channel(0), 4, 1.0f, 1.0f);
- VerifyResult(bus->channel(0) + 4, 2, 50.0f, 1.0f);
- VerifyResult(bus->channel(1), 4, 5.0f, 1.0f);
- VerifyResult(bus->channel(1) + 4, 2, 60.0f, 1.0f);
+ VerifyBus(bus.get(), 0, 4, 4, 1, 1);
+ VerifyBus(bus.get(), 4, 2, 10, 50, 1);
}
TEST(AudioBufferQueueTest, ReadS16Planar) {
- const int channels = 2;
- const base::TimeDelta kNoTime = kNoTimestamp();
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ const int channels = ChannelLayoutToChannelCount(channel_layout);
AudioBufferQueue buffer;
// Add 24 frames of data.
- buffer.Append(MakePlanarAudioBuffer<int16>(
- kSampleFormatPlanarS16, channels, 1, 1, 4, kNoTime, kNoTime));
- buffer.Append(MakePlanarAudioBuffer<int16>(
- kSampleFormatPlanarS16, channels, 100, 5, 20, kNoTime, kNoTime));
+ buffer.Append(
+ MakeTestBuffer<int16>(kSampleFormatPlanarS16, channel_layout, 1, 1, 4));
+ buffer.Append(
+ MakeTestBuffer<int16>(kSampleFormatPlanarS16, channel_layout, 5, 1, 20));
EXPECT_EQ(24, buffer.frames());
- // Read 6 frames from the buffer. Data is planar, so ch[0] should be
- // 1, 2, 3, 4, 100, 105, and ch[1] should be 5, 6, 7, 8, 200, 205.
- // Data is converted to float from -1.0 to 1.0 based on int16 range.
+ // Read 6 frames from the buffer.
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
EXPECT_EQ(6, buffer.ReadFrames(6, 0, bus.get()));
EXPECT_EQ(18, buffer.frames());
- VerifyResult(bus->channel(0), 4, 1.0f / kint16max, 1.0f / kint16max);
- VerifyResult(bus->channel(0) + 4, 2, 100.0f / kint16max, 5.0f / kint16max);
- VerifyResult(bus->channel(1), 4, 5.0f / kint16max, 1.0f / kint16max);
- VerifyResult(bus->channel(1) + 4, 2, 200.0f / kint16max, 5.0f / kint16max);
+ VerifyBus(bus.get(), 0, 4, 4, 1.0f / kint16max, 1.0f / kint16max);
+ VerifyBus(bus.get(), 4, 2, 20, 5.0f / kint16max, 1.0f / kint16max);
}
TEST(AudioBufferQueueTest, ReadManyChannels) {
- const int channels = 16;
- const base::TimeDelta kNoTime = kNoTimestamp();
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_OCTAGONAL;
+ const int channels = ChannelLayoutToChannelCount(channel_layout);
AudioBufferQueue buffer;
// Add 76 frames of data.
- buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 0.0f, 1.0f, 6, kNoTime, kNoTime));
- buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 6.0f * channels, 1.0f, 10, kNoTime, kNoTime));
- buffer.Append(MakeInterleavedAudioBuffer<float>(kSampleFormatF32,
- channels,
- 16.0f * channels,
- 1.0f,
- 60,
- kNoTime,
- kNoTime));
+ buffer.Append(
+ MakeTestBuffer<float>(kSampleFormatF32, channel_layout, 0.0f, 1.0f, 6));
+ buffer.Append(MakeTestBuffer<float>(
+ kSampleFormatF32, channel_layout, 6.0f * channels, 1.0f, 10));
+ buffer.Append(MakeTestBuffer<float>(
+ kSampleFormatF32, channel_layout, 16.0f * channels, 1.0f, 60));
EXPECT_EQ(76, buffer.frames());
- // Read 3 frames from the buffer. F32 is interleaved, so ch[0] should be
- // 1, 17, 33, and ch[1] should be 2, 18, 34. Just check a few channels.
+ // Read 3 frames from the buffer.
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
EXPECT_EQ(30, buffer.ReadFrames(30, 0, bus.get()));
EXPECT_EQ(46, buffer.frames());
- for (int i = 0; i < channels; ++i) {
- VerifyResult(bus->channel(i), 30, static_cast<float>(i), 16.0f);
- }
+ VerifyBus(bus.get(), 0, 6, 6, 0, 1);
+ VerifyBus(bus.get(), 6, 10, 10, 6 * channels, 1);
+ VerifyBus(bus.get(), 16, 14, 60, 16 * channels, 1);
}
TEST(AudioBufferQueueTest, Peek) {
- const int channels = 4;
- const base::TimeDelta kNoTime = kNoTimestamp();
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_4_0;
+ const int channels = ChannelLayoutToChannelCount(channel_layout);
AudioBufferQueue buffer;
// Add 60 frames of data.
- buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 0.0f, 1.0f, 60, kNoTime, kNoTime));
- EXPECT_EQ(60, buffer.frames());
+ const int frames = 60;
+ buffer.Append(MakeTestBuffer<float>(
+ kSampleFormatF32, channel_layout, 0.0f, 1.0f, frames));
+ EXPECT_EQ(frames, buffer.frames());
// Peek at the first 30 frames.
- scoped_ptr<AudioBus> bus1 = AudioBus::Create(channels, 100);
- EXPECT_EQ(60, buffer.frames());
- EXPECT_EQ(60, buffer.PeekFrames(100, 0, 0, bus1.get()));
+ scoped_ptr<AudioBus> bus1 = AudioBus::Create(channels, frames);
+ EXPECT_EQ(frames, buffer.frames());
+ EXPECT_EQ(frames, buffer.PeekFrames(60, 0, 0, bus1.get()));
EXPECT_EQ(30, buffer.PeekFrames(30, 0, 0, bus1.get()));
- EXPECT_EQ(60, buffer.frames());
+ EXPECT_EQ(frames, buffer.frames());
+ VerifyBus(bus1.get(), 0, 30, bus1->frames(), 0, 1);
// Now read the next 30 frames (which should be the same as those peeked at).
- scoped_ptr<AudioBus> bus2 = AudioBus::Create(channels, 100);
+ scoped_ptr<AudioBus> bus2 = AudioBus::Create(channels, frames);
EXPECT_EQ(30, buffer.ReadFrames(30, 0, bus2.get()));
- for (int i = 0; i < channels; ++i) {
- VerifyResult(bus1->channel(i),
- 30,
- static_cast<float>(i),
- static_cast<float>(channels));
- VerifyResult(bus2->channel(i),
- 30,
- static_cast<float>(i),
- static_cast<float>(channels));
- }
+ VerifyBus(bus2.get(), 0, 30, bus2->frames(), 0, 1);
// Peek 10 frames forward
+ bus1->Zero();
EXPECT_EQ(5, buffer.PeekFrames(5, 10, 0, bus1.get()));
- for (int i = 0; i < channels; ++i) {
- VerifyResult(bus1->channel(i),
- 5,
- static_cast<float>(i + 40 * channels),
- static_cast<float>(channels));
- }
+ VerifyBus(bus1.get(), 0, 5, bus1->frames(), 40, 1);
// Peek to the end of the buffer.
EXPECT_EQ(30, buffer.frames());
- EXPECT_EQ(30, buffer.PeekFrames(100, 0, 0, bus1.get()));
+ EXPECT_EQ(30, buffer.PeekFrames(60, 0, 0, bus1.get()));
EXPECT_EQ(30, buffer.PeekFrames(30, 0, 0, bus1.get()));
}
TEST(AudioBufferQueueTest, Time) {
- const int channels = 2;
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ const int channels = ChannelLayoutToChannelCount(channel_layout);
const base::TimeDelta start_time1;
const base::TimeDelta start_time2 = base::TimeDelta::FromSeconds(30);
- const base::TimeDelta duration = base::TimeDelta::FromSeconds(10);
AudioBufferQueue buffer;
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ scoped_refptr<AudioBuffer> audio_buffer =
+ MakeAudioBuffer<int16>(kSampleFormatS16,
+ channel_layout,
+ channels,
+ kSampleRate,
+ 1,
+ 1,
+ 10,
+ start_time1);
+
// Add two buffers (second one added later):
// first: start=0s, duration=10s
// second: start=30s, duration=10s
- buffer.Append(MakeInterleavedAudioBuffer<int16>(
- kSampleFormatS16, channels, 1, 1, 10, start_time1, duration));
+ buffer.Append(audio_buffer);
EXPECT_EQ(10, buffer.frames());
// Check starting time.
EXPECT_EQ(start_time1, buffer.current_time());
// Read 2 frames, should be 2s in (since duration is 1s per sample).
- EXPECT_EQ(2, buffer.ReadFrames(2, 0, bus.get()));
- EXPECT_EQ(start_time1 + base::TimeDelta::FromSeconds(2),
- buffer.current_time());
+ int frames_read = 2;
+ EXPECT_EQ(frames_read, buffer.ReadFrames(frames_read, 0, bus.get()));
+ EXPECT_EQ(
+ start_time1 +
+ frames_read * audio_buffer->duration() / audio_buffer->frame_count(),
+ buffer.current_time());
// Skip 2 frames.
buffer.SeekFrames(2);
- EXPECT_EQ(start_time1 + base::TimeDelta::FromSeconds(4),
- buffer.current_time());
+ frames_read += 2;
+ EXPECT_EQ(
+ start_time1 +
+ frames_read * audio_buffer->duration() / audio_buffer->frame_count(),
+ buffer.current_time());
// Add second buffer for more data.
- buffer.Append(MakeInterleavedAudioBuffer<int16>(
- kSampleFormatS16, channels, 1, 1, 10, start_time2, duration));
+ buffer.Append(MakeAudioBuffer<int16>(kSampleFormatS16,
+ channel_layout,
+ channels,
+ kSampleRate,
+ 1,
+ 1,
+ 10,
+ start_time2));
EXPECT_EQ(16, buffer.frames());
// Read until almost the end of buffer1.
+ frames_read += 5;
EXPECT_EQ(5, buffer.ReadFrames(5, 0, bus.get()));
- EXPECT_EQ(start_time1 + base::TimeDelta::FromSeconds(9),
- buffer.current_time());
+ EXPECT_EQ(
+ start_time1 +
+ frames_read * audio_buffer->duration() / audio_buffer->frame_count(),
+ buffer.current_time());
// Read 1 value, so time moved to buffer2.
EXPECT_EQ(1, buffer.ReadFrames(1, 0, bus.get()));
EXPECT_EQ(start_time2, buffer.current_time());
// Read all 10 frames in buffer2, timestamp should be last time from buffer2.
+ frames_read = 10;
EXPECT_EQ(10, buffer.ReadFrames(10, 0, bus.get()));
- EXPECT_EQ(start_time2 + base::TimeDelta::FromSeconds(10),
- buffer.current_time());
+ const base::TimeDelta expected_current_time =
+ start_time2 +
+ frames_read * audio_buffer->duration() / audio_buffer->frame_count();
+ EXPECT_EQ(expected_current_time, buffer.current_time());
// Try to read more frames (which don't exist), timestamp should remain.
EXPECT_EQ(0, buffer.ReadFrames(5, 0, bus.get()));
- EXPECT_EQ(start_time2 + base::TimeDelta::FromSeconds(10),
- buffer.current_time());
+ EXPECT_EQ(expected_current_time, buffer.current_time());
}
TEST(AudioBufferQueueTest, NoTime) {
- const int channels = 2;
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ const int channels = ChannelLayoutToChannelCount(channel_layout);
const base::TimeDelta kNoTime = kNoTimestamp();
AudioBufferQueue buffer;
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
// Add two buffers with no timestamps. Time should always be unknown.
- buffer.Append(MakeInterleavedAudioBuffer<int16>(
- kSampleFormatS16, channels, 1, 1, 10, kNoTime, kNoTime));
- buffer.Append(MakeInterleavedAudioBuffer<int16>(
- kSampleFormatS16, channels, 1, 1, 10, kNoTime, kNoTime));
+ buffer.Append(
+ MakeTestBuffer<int16>(kSampleFormatS16, channel_layout, 1, 1, 10));
+ buffer.Append(
+ MakeTestBuffer<int16>(kSampleFormatS16, channel_layout, 1, 1, 10));
EXPECT_EQ(20, buffer.frames());
// Check starting time.
diff --git a/chromium/media/base/audio_buffer_unittest.cc b/chromium/media/base/audio_buffer_unittest.cc
index 473778a6b53..039dc0af3cd 100644
--- a/chromium/media/base/audio_buffer_unittest.cc
+++ b/chromium/media/base/audio_buffer_unittest.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/strings/string_util.h"
-#include "base/strings/stringprintf.h"
#include "media/base/audio_buffer.h"
#include "media/base/audio_bus.h"
#include "media/base/test_helpers.h"
@@ -11,29 +9,175 @@
namespace media {
-static void VerifyResult(float* channel_data,
- int frames,
- float start,
- float increment) {
- for (int i = 0; i < frames; ++i) {
- SCOPED_TRACE(base::StringPrintf(
- "i=%d/%d start=%f, increment=%f", i, frames, start, increment));
- ASSERT_EQ(channel_data[i], start);
- start += increment;
+static const int kSampleRate = 48000;
+
+static void VerifyBusWithOffset(AudioBus* bus,
+ int offset,
+ int frames,
+ float start,
+ float start_offset,
+ float increment) {
+ for (int ch = 0; ch < bus->channels(); ++ch) {
+ const float v = start_offset + start + ch * bus->frames() * increment;
+ for (int i = offset; i < offset + frames; ++i) {
+ ASSERT_FLOAT_EQ(v + i * increment, bus->channel(ch)[i]) << "i=" << i
+ << ", ch=" << ch;
+ }
}
}
-TEST(AudioBufferTest, CopyFrom) {
- const int channels = 1;
- const int frames = 8;
- const base::TimeDelta start_time;
- const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<uint8>(
- kSampleFormatU8, channels, 1, 1, frames, start_time, duration);
+static void VerifyBus(AudioBus* bus, int frames, float start, float increment) {
+ VerifyBusWithOffset(bus, 0, frames, start, 0, increment);
+}
+
+static void TrimRangeTest(SampleFormat sample_format) {
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_4_0;
+ const int channels = ChannelLayoutToChannelCount(channel_layout);
+ const int frames = kSampleRate / 10;
+ const base::TimeDelta timestamp = base::TimeDelta();
+ const base::TimeDelta duration = base::TimeDelta::FromMilliseconds(100);
+ scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<float>(sample_format,
+ channel_layout,
+ channels,
+ kSampleRate,
+ 0,
+ 1,
+ frames,
+ timestamp);
EXPECT_EQ(frames, buffer->frame_count());
- EXPECT_EQ(buffer->timestamp(), start_time);
- EXPECT_EQ(buffer->duration().InSeconds(), frames);
- EXPECT_FALSE(buffer->end_of_stream());
+ EXPECT_EQ(timestamp, buffer->timestamp());
+ EXPECT_EQ(duration, buffer->duration());
+
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, frames);
+
+ // Verify all frames before trimming.
+ buffer->ReadFrames(frames, 0, 0, bus.get());
+ VerifyBus(bus.get(), frames, 0, 1);
+
+ // Trim 10ms of frames from the middle of the buffer.
+ int trim_start = frames / 2;
+ const int trim_length = kSampleRate / 100;
+ const base::TimeDelta trim_duration = base::TimeDelta::FromMilliseconds(10);
+ buffer->TrimRange(trim_start, trim_start + trim_length);
+ EXPECT_EQ(frames - trim_length, buffer->frame_count());
+ EXPECT_EQ(timestamp, buffer->timestamp());
+ EXPECT_EQ(duration - trim_duration, buffer->duration());
+ bus->Zero();
+ buffer->ReadFrames(buffer->frame_count(), 0, 0, bus.get());
+ VerifyBus(bus.get(), trim_start, 0, 1);
+ VerifyBusWithOffset(bus.get(),
+ trim_start,
+ buffer->frame_count() - trim_start,
+ 0,
+ trim_length,
+ 1);
+
+ // Trim 10ms of frames from the start, which just adjusts the buffer's
+ // internal start offset.
+ buffer->TrimStart(trim_length);
+ trim_start -= trim_length;
+ EXPECT_EQ(frames - 2 * trim_length, buffer->frame_count());
+ EXPECT_EQ(timestamp + trim_duration, buffer->timestamp());
+ EXPECT_EQ(duration - 2 * trim_duration, buffer->duration());
+ bus->Zero();
+ buffer->ReadFrames(buffer->frame_count(), 0, 0, bus.get());
+ VerifyBus(bus.get(), trim_start, trim_length, 1);
+ VerifyBusWithOffset(bus.get(),
+ trim_start,
+ buffer->frame_count() - trim_start,
+ trim_length,
+ trim_length,
+ 1);
+
+ // Trim 10ms of frames from the end, which just adjusts the buffer's frame
+ // count.
+ buffer->TrimEnd(trim_length);
+ EXPECT_EQ(frames - 3 * trim_length, buffer->frame_count());
+ EXPECT_EQ(timestamp + trim_duration, buffer->timestamp());
+ EXPECT_EQ(duration - 3 * trim_duration, buffer->duration());
+ bus->Zero();
+ buffer->ReadFrames(buffer->frame_count(), 0, 0, bus.get());
+ VerifyBus(bus.get(), trim_start, trim_length, 1);
+ VerifyBusWithOffset(bus.get(),
+ trim_start,
+ buffer->frame_count() - trim_start,
+ trim_length,
+ trim_length,
+ 1);
+
+ // Trim another 10ms from the inner portion of the buffer.
+ buffer->TrimRange(trim_start, trim_start + trim_length);
+ EXPECT_EQ(frames - 4 * trim_length, buffer->frame_count());
+ EXPECT_EQ(timestamp + trim_duration, buffer->timestamp());
+ EXPECT_EQ(duration - 4 * trim_duration, buffer->duration());
+ bus->Zero();
+ buffer->ReadFrames(buffer->frame_count(), 0, 0, bus.get());
+ VerifyBus(bus.get(), trim_start, trim_length, 1);
+ VerifyBusWithOffset(bus.get(),
+ trim_start,
+ buffer->frame_count() - trim_start,
+ trim_length,
+ trim_length * 2,
+ 1);
+
+ // Trim off the end using TrimRange() to ensure end index is exclusive.
+ buffer->TrimRange(buffer->frame_count() - trim_length, buffer->frame_count());
+ EXPECT_EQ(frames - 5 * trim_length, buffer->frame_count());
+ EXPECT_EQ(timestamp + trim_duration, buffer->timestamp());
+ EXPECT_EQ(duration - 5 * trim_duration, buffer->duration());
+ bus->Zero();
+ buffer->ReadFrames(buffer->frame_count(), 0, 0, bus.get());
+ VerifyBus(bus.get(), trim_start, trim_length, 1);
+ VerifyBusWithOffset(bus.get(),
+ trim_start,
+ buffer->frame_count() - trim_start,
+ trim_length,
+ trim_length * 2,
+ 1);
+
+ // Trim off the start using TrimRange() to ensure start index is inclusive.
+ buffer->TrimRange(0, trim_length);
+ trim_start -= trim_length;
+ EXPECT_EQ(frames - 6 * trim_length, buffer->frame_count());
+ EXPECT_EQ(timestamp + trim_duration, buffer->timestamp());
+ EXPECT_EQ(duration - 6 * trim_duration, buffer->duration());
+ bus->Zero();
+ buffer->ReadFrames(buffer->frame_count(), 0, 0, bus.get());
+ VerifyBus(bus.get(), trim_start, 2 * trim_length, 1);
+ VerifyBusWithOffset(bus.get(),
+ trim_start,
+ buffer->frame_count() - trim_start,
+ trim_length * 2,
+ trim_length * 2,
+ 1);
+}
+
+TEST(AudioBufferTest, CopyFrom) {
+ const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_MONO;
+ scoped_refptr<AudioBuffer> original_buffer =
+ MakeAudioBuffer<uint8>(kSampleFormatU8,
+ kChannelLayout,
+ ChannelLayoutToChannelCount(kChannelLayout),
+ kSampleRate,
+ 1,
+ 1,
+ kSampleRate / 100,
+ base::TimeDelta());
+ scoped_refptr<AudioBuffer> new_buffer =
+ AudioBuffer::CopyFrom(kSampleFormatU8,
+ original_buffer->channel_layout(),
+ original_buffer->channel_count(),
+ original_buffer->sample_rate(),
+ original_buffer->frame_count(),
+ &original_buffer->channel_data()[0],
+ original_buffer->timestamp());
+ EXPECT_EQ(original_buffer->frame_count(), new_buffer->frame_count());
+ EXPECT_EQ(original_buffer->timestamp(), new_buffer->timestamp());
+ EXPECT_EQ(original_buffer->duration(), new_buffer->duration());
+ EXPECT_EQ(original_buffer->sample_rate(), new_buffer->sample_rate());
+ EXPECT_EQ(original_buffer->channel_count(), new_buffer->channel_count());
+ EXPECT_EQ(original_buffer->channel_layout(), new_buffer->channel_layout());
+ EXPECT_FALSE(original_buffer->end_of_stream());
}
TEST(AudioBufferTest, CreateEOSBuffer) {
@@ -45,246 +189,266 @@ TEST(AudioBufferTest, FrameSize) {
const uint8 kTestData[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31 };
- const base::TimeDelta kTimestampA = base::TimeDelta::FromMicroseconds(1337);
- const base::TimeDelta kTimestampB = base::TimeDelta::FromMicroseconds(1234);
+ const base::TimeDelta kTimestamp = base::TimeDelta::FromMicroseconds(1337);
const uint8* const data[] = { kTestData };
- scoped_refptr<AudioBuffer> buffer = AudioBuffer::CopyFrom(
- kSampleFormatU8, 2, 16, data, kTimestampA, kTimestampB);
+ scoped_refptr<AudioBuffer> buffer =
+ AudioBuffer::CopyFrom(kSampleFormatU8,
+ CHANNEL_LAYOUT_STEREO,
+ 2,
+ kSampleRate,
+ 16,
+ data,
+ kTimestamp);
EXPECT_EQ(16, buffer->frame_count()); // 2 channels of 8-bit data
- buffer = AudioBuffer::CopyFrom(
- kSampleFormatF32, 4, 2, data, kTimestampA, kTimestampB);
+ buffer = AudioBuffer::CopyFrom(kSampleFormatF32,
+ CHANNEL_LAYOUT_4_0,
+ 4,
+ kSampleRate,
+ 2,
+ data,
+ kTimestamp);
EXPECT_EQ(2, buffer->frame_count()); // now 4 channels of 32-bit data
}
TEST(AudioBufferTest, ReadU8) {
- const int channels = 4;
- const int frames = 4;
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_4_0;
+ const int channels = ChannelLayoutToChannelCount(channel_layout);
+ const int frames = 10;
const base::TimeDelta start_time;
- const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<uint8>(
- kSampleFormatU8, channels, 128, 1, frames, start_time, duration);
-
- // Read all 4 frames from the buffer. Data is interleaved, so ch[0] should be
- // 128, 132, 136, 140, other channels similar. However, values are converted
- // from [0, 255] to [-1.0, 1.0] with a bias of 128. Thus the first buffer
- // value should be 0.0, then 1/127, 2/127, etc.
- scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<uint8>(kSampleFormatU8,
+ channel_layout,
+ channels,
+ kSampleRate,
+ 128,
+ 1,
+ frames,
+ start_time);
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, frames);
buffer->ReadFrames(frames, 0, 0, bus.get());
- VerifyResult(bus->channel(0), frames, 0.0f, 4.0f / 127.0f);
- VerifyResult(bus->channel(1), frames, 1.0f / 127.0f, 4.0f / 127.0f);
- VerifyResult(bus->channel(2), frames, 2.0f / 127.0f, 4.0f / 127.0f);
- VerifyResult(bus->channel(3), frames, 3.0f / 127.0f, 4.0f / 127.0f);
+ VerifyBus(bus.get(), frames, 0, 1.0f / 127.0f);
+
+ // Now read the same data one frame at a time.
+ bus->Zero();
+ for (int i = 0; i < frames; ++i)
+ buffer->ReadFrames(1, i, i, bus.get());
+ VerifyBus(bus.get(), frames, 0, 1.0f / 127.0f);
}
TEST(AudioBufferTest, ReadS16) {
- const int channels = 2;
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ const int channels = ChannelLayoutToChannelCount(channel_layout);
const int frames = 10;
const base::TimeDelta start_time;
- const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<int16>(
- kSampleFormatS16, channels, 1, 1, frames, start_time, duration);
-
- // Read 6 frames from the buffer. Data is interleaved, so ch[0] should be 1,
- // 3, 5, 7, 9, 11, and ch[1] should be 2, 4, 6, 8, 10, 12. Data is converted
- // to float from -1.0 to 1.0 based on int16 range.
- scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
- buffer->ReadFrames(6, 0, 0, bus.get());
- VerifyResult(bus->channel(0), 6, 1.0f / kint16max, 2.0f / kint16max);
- VerifyResult(bus->channel(1), 6, 2.0f / kint16max, 2.0f / kint16max);
+ scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<int16>(kSampleFormatS16,
+ channel_layout,
+ channels,
+ kSampleRate,
+ 1,
+ 1,
+ frames,
+ start_time);
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, frames);
+ buffer->ReadFrames(frames, 0, 0, bus.get());
+ VerifyBus(bus.get(), frames, 1.0f / kint16max, 1.0f / kint16max);
// Now read the same data one frame at a time.
- bus = AudioBus::Create(channels, 100);
- for (int i = 0; i < frames; ++i) {
+ bus->Zero();
+ for (int i = 0; i < frames; ++i)
buffer->ReadFrames(1, i, i, bus.get());
- }
- VerifyResult(bus->channel(0), frames, 1.0f / kint16max, 2.0f / kint16max);
- VerifyResult(bus->channel(1), frames, 2.0f / kint16max, 2.0f / kint16max);
+ VerifyBus(bus.get(), frames, 1.0f / kint16max, 1.0f / kint16max);
}
TEST(AudioBufferTest, ReadS32) {
- const int channels = 2;
- const int frames = 6;
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ const int channels = ChannelLayoutToChannelCount(channel_layout);
+ const int frames = 20;
const base::TimeDelta start_time;
- const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<int32>(
- kSampleFormatS32, channels, 1, 1, frames, start_time, duration);
-
- // Read 6 frames from the buffer. Data is interleaved, so ch[0] should be 1,
- // 3, 5, 7, 9, 11, and ch[1] should be 2, 4, 6, 8, 10, 12. Data is converted
- // to float from -1.0 to 1.0 based on int32 range.
- scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<int32>(kSampleFormatS32,
+ channel_layout,
+ channels,
+ kSampleRate,
+ 1,
+ 1,
+ frames,
+ start_time);
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, frames);
buffer->ReadFrames(frames, 0, 0, bus.get());
- VerifyResult(bus->channel(0), frames, 1.0f / kint32max, 2.0f / kint32max);
- VerifyResult(bus->channel(1), frames, 2.0f / kint32max, 2.0f / kint32max);
-
- // Now read 2 frames starting at frame offset 3. ch[0] should be 7, 9, and
- // ch[1] should be 8, 10.
- buffer->ReadFrames(2, 3, 0, bus.get());
- VerifyResult(bus->channel(0), 2, 7.0f / kint32max, 2.0f / kint32max);
- VerifyResult(bus->channel(1), 2, 8.0f / kint32max, 2.0f / kint32max);
+ VerifyBus(bus.get(), frames, 1.0f / kint32max, 1.0f / kint32max);
+
+ // Read second 10 frames.
+ bus->Zero();
+ buffer->ReadFrames(10, 10, 0, bus.get());
+ VerifyBus(bus.get(), 10, 11.0f / kint32max, 1.0f / kint32max);
}
TEST(AudioBufferTest, ReadF32) {
- const int channels = 2;
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ const int channels = ChannelLayoutToChannelCount(channel_layout);
const int frames = 20;
const base::TimeDelta start_time;
- const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 1.0f, 1.0f, frames, start_time, duration);
-
- // Read first 10 frames from the buffer. F32 is interleaved, so ch[0] should
- // be 1, 3, 5, ... and ch[1] should be 2, 4, 6, ...
- scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<float>(kSampleFormatF32,
+ channel_layout,
+ channels,
+ kSampleRate,
+ 1.0f,
+ 1.0f,
+ frames,
+ start_time);
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, frames);
buffer->ReadFrames(10, 0, 0, bus.get());
- VerifyResult(bus->channel(0), 10, 1.0f, 2.0f);
- VerifyResult(bus->channel(1), 10, 2.0f, 2.0f);
+ VerifyBus(bus.get(), 10, 1, 1);
// Read second 10 frames.
- bus = AudioBus::Create(channels, 100);
+ bus->Zero();
buffer->ReadFrames(10, 10, 0, bus.get());
- VerifyResult(bus->channel(0), 10, 21.0f, 2.0f);
- VerifyResult(bus->channel(1), 10, 22.0f, 2.0f);
+ VerifyBus(bus.get(), 10, 11, 1);
}
TEST(AudioBufferTest, ReadS16Planar) {
- const int channels = 2;
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ const int channels = ChannelLayoutToChannelCount(channel_layout);
const int frames = 20;
const base::TimeDelta start_time;
- const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
- scoped_refptr<AudioBuffer> buffer = MakePlanarAudioBuffer<int16>(
- kSampleFormatPlanarS16, channels, 1, 1, frames, start_time, duration);
-
- // Read 6 frames from the buffer. Data is planar, so ch[0] should be 1, 2, 3,
- // 4, 5, 6, and ch[1] should be 21, 22, 23, 24, 25, 26. Data is converted to
- // float from -1.0 to 1.0 based on int16 range.
- scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
- buffer->ReadFrames(6, 0, 0, bus.get());
- VerifyResult(bus->channel(0), 6, 1.0f / kint16max, 1.0f / kint16max);
- VerifyResult(bus->channel(1), 6, 21.0f / kint16max, 1.0f / kint16max);
+ scoped_refptr<AudioBuffer> buffer =
+ MakeAudioBuffer<int16>(kSampleFormatPlanarS16,
+ channel_layout,
+ channels,
+ kSampleRate,
+ 1,
+ 1,
+ frames,
+ start_time);
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, frames);
+ buffer->ReadFrames(10, 0, 0, bus.get());
+ VerifyBus(bus.get(), 10, 1.0f / kint16max, 1.0f / kint16max);
// Read all the frames backwards, one by one. ch[0] should be 20, 19, ...
- bus = AudioBus::Create(channels, 100);
- for (int i = 0; i < frames; ++i) {
- buffer->ReadFrames(1, frames - i - 1, i, bus.get());
- }
- VerifyResult(bus->channel(0), frames, 20.0f / kint16max, -1.0f / kint16max);
- VerifyResult(bus->channel(1), frames, 40.0f / kint16max, -1.0f / kint16max);
+ bus->Zero();
+ for (int i = frames - 1; i >= 0; --i)
+ buffer->ReadFrames(1, i, i, bus.get());
+ VerifyBus(bus.get(), frames, 1.0f / kint16max, 1.0f / kint16max);
// Read 0 frames with different offsets. Existing data in AudioBus should be
// unchanged.
buffer->ReadFrames(0, 0, 0, bus.get());
+ VerifyBus(bus.get(), frames, 1.0f / kint16max, 1.0f / kint16max);
buffer->ReadFrames(0, 0, 10, bus.get());
+ VerifyBus(bus.get(), frames, 1.0f / kint16max, 1.0f / kint16max);
buffer->ReadFrames(0, 10, 0, bus.get());
- VerifyResult(bus->channel(0), frames, 20.0f / kint16max, -1.0f / kint16max);
- VerifyResult(bus->channel(1), frames, 40.0f / kint16max, -1.0f / kint16max);
+ VerifyBus(bus.get(), frames, 1.0f / kint16max, 1.0f / kint16max);
}
TEST(AudioBufferTest, ReadF32Planar) {
- const int channels = 4;
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_4_0;
+ const int channels = ChannelLayoutToChannelCount(channel_layout);
const int frames = 100;
const base::TimeDelta start_time;
- const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
scoped_refptr<AudioBuffer> buffer =
- MakePlanarAudioBuffer<float>(kSampleFormatPlanarF32,
- channels,
- 1.0f,
- 1.0f,
- frames,
- start_time,
- duration);
+ MakeAudioBuffer<float>(kSampleFormatPlanarF32,
+ channel_layout,
+ channels,
+ kSampleRate,
+ 1.0f,
+ 1.0f,
+ frames,
+ start_time);
// Read all 100 frames from the buffer. F32 is planar, so ch[0] should be 1,
// 2, 3, 4, ..., ch[1] should be 101, 102, 103, ..., and so on for all 4
// channels.
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
buffer->ReadFrames(frames, 0, 0, bus.get());
- VerifyResult(bus->channel(0), frames, 1.0f, 1.0f);
- VerifyResult(bus->channel(1), frames, 101.0f, 1.0f);
- VerifyResult(bus->channel(2), frames, 201.0f, 1.0f);
- VerifyResult(bus->channel(3), frames, 301.0f, 1.0f);
+ VerifyBus(bus.get(), frames, 1, 1);
// Now read 20 frames from the middle of the buffer.
- bus = AudioBus::Create(channels, 100);
+ bus->Zero();
buffer->ReadFrames(20, 50, 0, bus.get());
- VerifyResult(bus->channel(0), 20, 51.0f, 1.0f);
- VerifyResult(bus->channel(1), 20, 151.0f, 1.0f);
- VerifyResult(bus->channel(2), 20, 251.0f, 1.0f);
- VerifyResult(bus->channel(3), 20, 351.0f, 1.0f);
+ VerifyBus(bus.get(), 20, 51, 1);
}
TEST(AudioBufferTest, EmptyBuffer) {
- const int channels = 4;
- const int frames = 100;
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_4_0;
+ const int channels = ChannelLayoutToChannelCount(channel_layout);
+ const int frames = kSampleRate / 100;
const base::TimeDelta start_time;
- const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
scoped_refptr<AudioBuffer> buffer = AudioBuffer::CreateEmptyBuffer(
- channels, frames, start_time, duration);
+ channel_layout, channels, kSampleRate, frames, start_time);
EXPECT_EQ(frames, buffer->frame_count());
EXPECT_EQ(start_time, buffer->timestamp());
- EXPECT_EQ(frames, buffer->duration().InSeconds());
+ EXPECT_EQ(base::TimeDelta::FromMilliseconds(10), buffer->duration());
EXPECT_FALSE(buffer->end_of_stream());
// Read all 100 frames from the buffer. All data should be 0.
- scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, frames);
buffer->ReadFrames(frames, 0, 0, bus.get());
- VerifyResult(bus->channel(0), frames, 0.0f, 0.0f);
- VerifyResult(bus->channel(1), frames, 0.0f, 0.0f);
- VerifyResult(bus->channel(2), frames, 0.0f, 0.0f);
- VerifyResult(bus->channel(3), frames, 0.0f, 0.0f);
+ VerifyBus(bus.get(), frames, 0, 0);
}
TEST(AudioBufferTest, Trim) {
- const int channels = 4;
- const int frames = 100;
+ const ChannelLayout channel_layout = CHANNEL_LAYOUT_4_0;
+ const int channels = ChannelLayoutToChannelCount(channel_layout);
+ const int frames = kSampleRate / 10;
const base::TimeDelta start_time;
- const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
+ const base::TimeDelta duration = base::TimeDelta::FromMilliseconds(100);
scoped_refptr<AudioBuffer> buffer =
- MakePlanarAudioBuffer<float>(kSampleFormatPlanarF32,
- channels,
- 1.0f,
- 1.0f,
- frames,
- start_time,
- duration);
+ MakeAudioBuffer<float>(kSampleFormatPlanarF32,
+ channel_layout,
+ channels,
+ kSampleRate,
+ 0.0f,
+ 1.0f,
+ frames,
+ start_time);
EXPECT_EQ(frames, buffer->frame_count());
EXPECT_EQ(start_time, buffer->timestamp());
- EXPECT_EQ(frames, buffer->duration().InSeconds());
+ EXPECT_EQ(duration, buffer->duration());
+
+ const int ten_ms_of_frames = kSampleRate / 100;
+ const base::TimeDelta ten_ms = base::TimeDelta::FromMilliseconds(10);
+
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, frames);
+ buffer->ReadFrames(buffer->frame_count(), 0, 0, bus.get());
+ VerifyBus(bus.get(), buffer->frame_count(), 0.0f, 1.0f);
+
+ // Trim off 10ms of frames from the start.
+ buffer->TrimStart(ten_ms_of_frames);
+ EXPECT_EQ(start_time + ten_ms, buffer->timestamp());
+ EXPECT_EQ(frames - ten_ms_of_frames, buffer->frame_count());
+ EXPECT_EQ(duration - ten_ms, buffer->duration());
+ buffer->ReadFrames(buffer->frame_count(), 0, 0, bus.get());
+ VerifyBus(bus.get(), buffer->frame_count(), ten_ms_of_frames, 1.0f);
+
+ // Trim off 10ms of frames from the end.
+ buffer->TrimEnd(ten_ms_of_frames);
+ EXPECT_EQ(start_time + ten_ms, buffer->timestamp());
+ EXPECT_EQ(frames - 2 * ten_ms_of_frames, buffer->frame_count());
+ EXPECT_EQ(duration - 2 * ten_ms, buffer->duration());
+ buffer->ReadFrames(buffer->frame_count(), 0, 0, bus.get());
+ VerifyBus(bus.get(), buffer->frame_count(), ten_ms_of_frames, 1.0f);
+
+ // Trim off 40ms more from the start.
+ buffer->TrimStart(4 * ten_ms_of_frames);
+ EXPECT_EQ(start_time + 5 * ten_ms, buffer->timestamp());
+ EXPECT_EQ(frames - 6 * ten_ms_of_frames, buffer->frame_count());
+ EXPECT_EQ(duration - 6 * ten_ms, buffer->duration());
+ buffer->ReadFrames(buffer->frame_count(), 0, 0, bus.get());
+ VerifyBus(bus.get(), buffer->frame_count(), 5 * ten_ms_of_frames, 1.0f);
+
+ // Trim off the final 40ms from the end.
+ buffer->TrimEnd(4 * ten_ms_of_frames);
+ EXPECT_EQ(0, buffer->frame_count());
+ EXPECT_EQ(start_time + 5 * ten_ms, buffer->timestamp());
+ EXPECT_EQ(base::TimeDelta(), buffer->duration());
+}
- scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
- buffer->ReadFrames(20, 0, 0, bus.get());
- VerifyResult(bus->channel(0), 20, 1.0f, 1.0f);
-
- // Trim off 10 frames from the start.
- buffer->TrimStart(10);
- EXPECT_EQ(buffer->frame_count(), frames - 10);
- EXPECT_EQ(buffer->timestamp(), start_time + base::TimeDelta::FromSeconds(10));
- EXPECT_EQ(buffer->duration(), base::TimeDelta::FromSeconds(90));
- buffer->ReadFrames(20, 0, 0, bus.get());
- VerifyResult(bus->channel(0), 20, 11.0f, 1.0f);
-
- // Trim off 10 frames from the end.
- buffer->TrimEnd(10);
- EXPECT_EQ(buffer->frame_count(), frames - 20);
- EXPECT_EQ(buffer->timestamp(), start_time + base::TimeDelta::FromSeconds(10));
- EXPECT_EQ(buffer->duration(), base::TimeDelta::FromSeconds(80));
- buffer->ReadFrames(20, 0, 0, bus.get());
- VerifyResult(bus->channel(0), 20, 11.0f, 1.0f);
-
- // Trim off 50 more from the start.
- buffer->TrimStart(50);
- EXPECT_EQ(buffer->frame_count(), frames - 70);
- EXPECT_EQ(buffer->timestamp(), start_time + base::TimeDelta::FromSeconds(60));
- EXPECT_EQ(buffer->duration(), base::TimeDelta::FromSeconds(30));
- buffer->ReadFrames(10, 0, 0, bus.get());
- VerifyResult(bus->channel(0), 10, 61.0f, 1.0f);
+TEST(AudioBufferTest, TrimRangePlanar) {
+ TrimRangeTest(kSampleFormatPlanarF32);
+}
- // Trim off the last 30 frames.
- buffer->TrimEnd(30);
- EXPECT_EQ(buffer->frame_count(), 0);
- EXPECT_EQ(buffer->timestamp(), start_time + base::TimeDelta::FromSeconds(60));
- EXPECT_EQ(buffer->duration(), base::TimeDelta::FromSeconds(0));
+TEST(AudioBufferTest, TrimRangeInterleaved) {
+ TrimRangeTest(kSampleFormatF32);
}
} // namespace media
diff --git a/chromium/media/base/audio_bus.cc b/chromium/media/base/audio_bus.cc
index c1123471abc..e34c7489399 100644
--- a/chromium/media/base/audio_bus.cc
+++ b/chromium/media/base/audio_bus.cc
@@ -5,7 +5,7 @@
#include "media/base/audio_bus.h"
#include "base/logging.h"
-#include "base/safe_numerics.h"
+#include "base/numerics/safe_conversions.h"
#include "media/audio/audio_parameters.h"
#include "media/base/limits.h"
#include "media/base/vector_math.h"
@@ -130,7 +130,7 @@ AudioBus::AudioBus(int frames, const std::vector<float*>& channel_data)
frames_(frames),
can_set_channel_data_(false) {
ValidateConfig(
- base::checked_numeric_cast<int>(channel_data_.size()), frames_);
+ base::checked_cast<int>(channel_data_.size()), frames_);
// Sanity check wrapped vector for alignment and channel count.
for (size_t i = 0; i < channel_data_.size(); ++i)
@@ -333,4 +333,11 @@ void AudioBus::Scale(float volume) {
}
}
+void AudioBus::SwapChannels(int a, int b) {
+ DCHECK(a < channels() && a >= 0);
+ DCHECK(b < channels() && b >= 0);
+ DCHECK_NE(a, b);
+ std::swap(channel_data_[a], channel_data_[b]);
+}
+
} // namespace media
diff --git a/chromium/media/base/audio_bus.h b/chromium/media/base/audio_bus.h
index d1106f558ef..c5b161f0236 100644
--- a/chromium/media/base/audio_bus.h
+++ b/chromium/media/base/audio_bus.h
@@ -104,10 +104,13 @@ class MEDIA_EXPORT AudioBus {
// is provided, no adjustment is done.
void Scale(float volume);
- private:
- friend struct base::DefaultDeleter<AudioBus>;
- ~AudioBus();
+ // Swaps channels identified by |a| and |b|. The caller needs to make sure
+ // the channels are valid.
+ void SwapChannels(int a, int b);
+
+ virtual ~AudioBus();
+ private:
AudioBus(int channels, int frames);
AudioBus(int channels, int frames, float* data);
AudioBus(int frames, const std::vector<float*>& channel_data);
@@ -118,7 +121,7 @@ class MEDIA_EXPORT AudioBus {
void BuildChannelData(int channels, int aligned_frame, float* data);
// Contiguous block of channel memory.
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> data_;
+ scoped_ptr<float, base::AlignedFreeDeleter> data_;
std::vector<float*> channel_data_;
int frames_;
diff --git a/chromium/media/base/audio_bus_unittest.cc b/chromium/media/base/audio_bus_unittest.cc
index e8c78a36b41..2f1e02c0ce2 100644
--- a/chromium/media/base/audio_bus_unittest.cc
+++ b/chromium/media/base/audio_bus_unittest.cc
@@ -138,7 +138,7 @@ TEST_F(AudioBusTest, WrapMemory) {
AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout, kSampleRate, 32,
kFrameCount);
int data_size = AudioBus::CalculateMemorySize(params);
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> data(static_cast<float*>(
+ scoped_ptr<float, base::AlignedFreeDeleter> data(static_cast<float*>(
base::AlignedAlloc(data_size, AudioBus::kChannelAlignment)));
// Fill the memory with a test value we can check for after wrapping.
@@ -188,7 +188,7 @@ TEST_F(AudioBusTest, CopyTo) {
{
SCOPED_TRACE("Wrapped Memory");
// Try a copy to an AudioBus wrapping a memory block.
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> data(
+ scoped_ptr<float, base::AlignedFreeDeleter> data(
static_cast<float*>(base::AlignedAlloc(
AudioBus::CalculateMemorySize(params),
AudioBus::kChannelAlignment)));
diff --git a/chromium/media/base/audio_capturer_source.h b/chromium/media/base/audio_capturer_source.h
index b584f8a48db..621c3921ab0 100644
--- a/chromium/media/base/audio_capturer_source.h
+++ b/chromium/media/base/audio_capturer_source.h
@@ -24,7 +24,7 @@ class AudioCapturerSource
class CaptureCallback {
public:
// Callback to deliver the captured data from the OS.
- virtual void Capture(AudioBus* audio_source,
+ virtual void Capture(const AudioBus* audio_source,
int audio_delay_milliseconds,
double volume,
bool key_pressed) = 0;
diff --git a/chromium/media/base/audio_converter.cc b/chromium/media/base/audio_converter.cc
index d0c45136dad..aa0be4f0470 100644
--- a/chromium/media/base/audio_converter.cc
+++ b/chromium/media/base/audio_converter.cc
@@ -25,7 +25,8 @@ namespace media {
AudioConverter::AudioConverter(const AudioParameters& input_params,
const AudioParameters& output_params,
bool disable_fifo)
- : downmix_early_(false),
+ : chunk_size_(input_params.frames_per_buffer()),
+ downmix_early_(false),
resampler_frame_delay_(0),
input_channel_count_(input_params.channels()) {
CHECK(input_params.IsValid());
@@ -41,31 +42,22 @@ AudioConverter::AudioConverter(const AudioParameters& input_params,
// Pare off data as early as we can for efficiency.
downmix_early_ = input_params.channels() > output_params.channels();
- if (downmix_early_) {
- DVLOG(1) << "Remixing channel layout prior to resampling.";
- // |unmixed_audio_| will be allocated on the fly.
- } else {
- // Instead, if we're not downmixing early we need a temporary AudioBus
- // which matches the input channel count but uses the output frame size
- // since we'll mix into the AudioBus from the output stream.
- unmixed_audio_ = AudioBus::Create(
- input_params.channels(), output_params.frames_per_buffer());
- }
}
// Only resample if necessary since it's expensive.
if (input_params.sample_rate() != output_params.sample_rate()) {
DVLOG(1) << "Resampling from " << input_params.sample_rate() << " to "
<< output_params.sample_rate();
- const double io_sample_rate_ratio = input_params.sample_rate() /
- static_cast<double>(output_params.sample_rate());
const int request_size = disable_fifo ? SincResampler::kDefaultRequestSize :
input_params.frames_per_buffer();
+ const double io_sample_rate_ratio =
+ input_params.sample_rate() /
+ static_cast<double>(output_params.sample_rate());
resampler_.reset(new MultiChannelResampler(
- downmix_early_ ? output_params.channels() :
- input_params.channels(),
- io_sample_rate_ratio, request_size, base::Bind(
- &AudioConverter::ProvideInput, base::Unretained(this))));
+ downmix_early_ ? output_params.channels() : input_params.channels(),
+ io_sample_rate_ratio,
+ request_size,
+ base::Bind(&AudioConverter::ProvideInput, base::Unretained(this))));
}
input_frame_duration_ = base::TimeDelta::FromMicroseconds(
@@ -86,12 +78,11 @@ AudioConverter::AudioConverter(const AudioParameters& input_params,
if (input_params.frames_per_buffer() != output_params.frames_per_buffer()) {
DVLOG(1) << "Rebuffering from " << input_params.frames_per_buffer()
<< " to " << output_params.frames_per_buffer();
+ chunk_size_ = input_params.frames_per_buffer();
audio_fifo_.reset(new AudioPullFifo(
- downmix_early_ ? output_params.channels() :
- input_params.channels(),
- input_params.frames_per_buffer(), base::Bind(
- &AudioConverter::SourceCallback,
- base::Unretained(this))));
+ downmix_early_ ? output_params.channels() : input_params.channels(),
+ chunk_size_,
+ base::Bind(&AudioConverter::SourceCallback, base::Unretained(this))));
}
}
@@ -119,6 +110,12 @@ void AudioConverter::Reset() {
resampler_->Flush();
}
+int AudioConverter::ChunkSize() const {
+ if (!resampler_)
+ return chunk_size_;
+ return resampler_->ChunkSize();
+}
+
void AudioConverter::ConvertWithDelay(const base::TimeDelta& initial_delay,
AudioBus* dest) {
initial_delay_ = initial_delay;
@@ -133,6 +130,10 @@ void AudioConverter::ConvertWithDelay(const base::TimeDelta& initial_delay,
// resampling we can save a lot of processing time. Vice versa, we don't want
// to increase the channel count prior to resampling for the same reason.
bool needs_mixing = channel_mixer_ && !downmix_early_;
+
+ if (needs_mixing)
+ CreateUnmixedAudioIfNecessary(dest->frames());
+
AudioBus* temp_dest = needs_mixing ? unmixed_audio_.get() : dest;
DCHECK(temp_dest);
@@ -160,7 +161,7 @@ void AudioConverter::Convert(AudioBus* dest) {
}
void AudioConverter::SourceCallback(int fifo_frame_delay, AudioBus* dest) {
- bool needs_downmix = channel_mixer_ && downmix_early_;
+ const bool needs_downmix = channel_mixer_ && downmix_early_;
if (!mixer_input_audio_bus_ ||
mixer_input_audio_bus_->frames() != dest->frames()) {
@@ -168,15 +169,13 @@ void AudioConverter::SourceCallback(int fifo_frame_delay, AudioBus* dest) {
AudioBus::Create(input_channel_count_, dest->frames());
}
- if (needs_downmix &&
- (!unmixed_audio_ || unmixed_audio_->frames() != dest->frames())) {
- // If we're downmixing early we need a temporary AudioBus which matches
- // the the input channel count and input frame size since we're passing
- // |unmixed_audio_| directly to the |source_callback_|.
- unmixed_audio_ = AudioBus::Create(input_channel_count_, dest->frames());
- }
+ // If we're downmixing early we need a temporary AudioBus which matches
+ // the the input channel count and input frame size since we're passing
+ // |unmixed_audio_| directly to the |source_callback_|.
+ if (needs_downmix)
+ CreateUnmixedAudioIfNecessary(dest->frames());
- AudioBus* temp_dest = needs_downmix ? unmixed_audio_.get() : dest;
+ AudioBus* const temp_dest = needs_downmix ? unmixed_audio_.get() : dest;
// Sanity check our inputs.
DCHECK_EQ(temp_dest->frames(), mixer_input_audio_bus_->frames());
@@ -193,23 +192,27 @@ void AudioConverter::SourceCallback(int fifo_frame_delay, AudioBus* dest) {
fifo_frame_delay * input_frame_duration_.InMicroseconds());
}
+ // If we only have a single input, avoid an extra copy.
+ AudioBus* const provide_input_dest =
+ transform_inputs_.size() == 1 ? temp_dest : mixer_input_audio_bus_.get();
+
// Have each mixer render its data into an output buffer then mix the result.
for (InputCallbackSet::iterator it = transform_inputs_.begin();
it != transform_inputs_.end(); ++it) {
InputCallback* input = *it;
- float volume = input->ProvideInput(
- mixer_input_audio_bus_.get(), buffer_delay);
+ const float volume = input->ProvideInput(provide_input_dest, buffer_delay);
// Optimize the most common single input, full volume case.
if (it == transform_inputs_.begin()) {
if (volume == 1.0f) {
- mixer_input_audio_bus_->CopyTo(temp_dest);
+ if (temp_dest != provide_input_dest)
+ provide_input_dest->CopyTo(temp_dest);
} else if (volume > 0) {
- for (int i = 0; i < mixer_input_audio_bus_->channels(); ++i) {
+ for (int i = 0; i < provide_input_dest->channels(); ++i) {
vector_math::FMUL(
- mixer_input_audio_bus_->channel(i), volume,
- mixer_input_audio_bus_->frames(), temp_dest->channel(i));
+ provide_input_dest->channel(i), volume,
+ provide_input_dest->frames(), temp_dest->channel(i));
}
} else {
// Zero |temp_dest| otherwise, so we're mixing into a clean buffer.
@@ -243,4 +246,9 @@ void AudioConverter::ProvideInput(int resampler_frame_delay, AudioBus* dest) {
SourceCallback(0, dest);
}
+void AudioConverter::CreateUnmixedAudioIfNecessary(int frames) {
+ if (!unmixed_audio_ || unmixed_audio_->frames() != frames)
+ unmixed_audio_ = AudioBus::Create(input_channel_count_, frames);
+}
+
} // namespace media
diff --git a/chromium/media/base/audio_converter.h b/chromium/media/base/audio_converter.h
index 2e43ec880b5..b12dcb95e7d 100644
--- a/chromium/media/base/audio_converter.h
+++ b/chromium/media/base/audio_converter.h
@@ -73,9 +73,8 @@ class MEDIA_EXPORT AudioConverter {
bool disable_fifo);
~AudioConverter();
- // Converts audio from all inputs into the |dest|. |dest| must be sized for
- // data matching the output AudioParameters provided during construction. If
- // an |initial_delay| is specified, it will be propagated to each input.
+ // Converts audio from all inputs into the |dest|. If an |initial_delay| is
+ // specified, it will be propagated to each input.
void Convert(AudioBus* dest);
void ConvertWithDelay(const base::TimeDelta& initial_delay, AudioBus* dest);
@@ -87,6 +86,12 @@ class MEDIA_EXPORT AudioConverter {
// Flushes all buffered data.
void Reset();
+ // The maximum size in frames that guarantees we will only make a single call
+ // to each input's ProvideInput for more data.
+ int ChunkSize() const;
+
+ bool empty() const { return transform_inputs_.empty(); }
+
private:
// Provides input to the MultiChannelResampler. Called by the resampler when
// more data is necessary.
@@ -96,6 +101,9 @@ class MEDIA_EXPORT AudioConverter {
// necessary.
void SourceCallback(int fifo_frame_delay, AudioBus* audio_bus);
+ // (Re)creates the temporary |unmixed_audio_| buffer if necessary.
+ void CreateUnmixedAudioIfNecessary(int frames);
+
// Set of inputs for Convert().
typedef std::list<InputCallback*> InputCallbackSet;
InputCallbackSet transform_inputs_;
@@ -103,6 +111,7 @@ class MEDIA_EXPORT AudioConverter {
// Used to buffer data between the client and the output device in cases where
// the client buffer size is not the same as the output device buffer size.
scoped_ptr<AudioPullFifo> audio_fifo_;
+ int chunk_size_;
// Handles resampling.
scoped_ptr<MultiChannelResampler> resampler_;
diff --git a/chromium/media/base/audio_converter_unittest.cc b/chromium/media/base/audio_converter_unittest.cc
index aeb021c3114..b1564db4233 100644
--- a/chromium/media/base/audio_converter_unittest.cc
+++ b/chromium/media/base/audio_converter_unittest.cc
@@ -227,6 +227,13 @@ TEST(AudioConverterTest, AudioDelay) {
callback.last_audio_delay_milliseconds());
}
+TEST_P(AudioConverterTest, ArbitraryOutputRequestSize) {
+ // Resize output bus to be half of |output_parameters_|'s frames_per_buffer().
+ audio_bus_ = AudioBus::Create(output_parameters_.channels(),
+ output_parameters_.frames_per_buffer() / 2);
+ RunTest(1);
+}
+
TEST_P(AudioConverterTest, NoInputs) {
FillAudioData(1.0f);
EXPECT_TRUE(RenderAndValidateAudioData(0.0f));
diff --git a/chromium/media/base/audio_decoder.cc b/chromium/media/base/audio_decoder.cc
index 939066078c4..5212794d983 100644
--- a/chromium/media/base/audio_decoder.cc
+++ b/chromium/media/base/audio_decoder.cc
@@ -4,6 +4,8 @@
#include "media/base/audio_decoder.h"
+#include "media/base/audio_buffer.h"
+
namespace media {
AudioDecoder::AudioDecoder() {}
diff --git a/chromium/media/base/audio_decoder.h b/chromium/media/base/audio_decoder.h
index aa2eeb80ea8..0118b5e4fdd 100644
--- a/chromium/media/base/audio_decoder.h
+++ b/chromium/media/base/audio_decoder.h
@@ -7,9 +7,11 @@
#include "base/callback.h"
#include "base/memory/ref_counted.h"
+#include "media/base/audio_decoder_config.h"
#include "media/base/channel_layout.h"
-#include "media/base/pipeline_status.h"
+#include "media/base/decoder_buffer.h"
#include "media/base/media_export.h"
+#include "media/base/pipeline_status.h"
namespace media {
@@ -18,44 +20,61 @@ class DemuxerStream;
class MEDIA_EXPORT AudioDecoder {
public:
- // Status codes for read operations.
+ // Status codes for decode operations.
+ // TODO(rileya): Now that both AudioDecoder and VideoDecoder Status enums
+ // match, break them into a decoder_status.h.
enum Status {
- kOk,
- kAborted,
- kDecodeError,
+ kOk, // We're all good.
+ kAborted, // We aborted as a result of Stop() or Reset().
+ kDecodeError, // A decoding error occurred.
+ kDecryptError // Decrypting error happened.
};
+ // Callback for AudioDecoder to return a decoded frame whenever it becomes
+ // available. Only non-EOS frames should be returned via this callback.
+ typedef base::Callback<void(const scoped_refptr<AudioBuffer>&)> OutputCB;
+
+ // Callback for Decode(). Called after the decoder has completed decoding
+ // corresponding DecoderBuffer, indicating that it's ready to accept another
+ // buffer to decode.
+ typedef base::Callback<void(Status)> DecodeCB;
+
AudioDecoder();
virtual ~AudioDecoder();
- // Initialize an AudioDecoder with the given DemuxerStream, executing the
+ // Initializes an AudioDecoder with the given DemuxerStream, executing the
// callback upon completion.
- // statistics_cb is used to update global pipeline statistics.
- virtual void Initialize(DemuxerStream* stream,
+ // |statistics_cb| is used to update global pipeline statistics.
+ // |output_cb| is called for decoded audio buffers (see Decode()).
+ virtual void Initialize(const AudioDecoderConfig& config,
const PipelineStatusCB& status_cb,
- const StatisticsCB& statistics_cb) = 0;
+ const OutputCB& output_cb) = 0;
- // Request samples to be decoded and returned via the provided callback.
- // Only one read may be in flight at any given time.
+ // Requests samples to be decoded. Only one decode may be in flight at any
+ // given time. Once the buffer is decoded the decoder calls |decode_cb|.
+ // |output_cb| specified in Initialize() is called for each decoded buffer,
+ // before or after |decode_cb|.
//
- // Implementations guarantee that the callback will not be called from within
+ // Implementations guarantee that the callbacks will not be called from within
// this method.
//
- // Non-NULL sample buffer pointers will contain decoded audio data or may
- // indicate the end of the stream. A NULL buffer pointer indicates an aborted
- // Read(). This can happen if the DemuxerStream gets flushed and doesn't have
- // any more data to return.
- typedef base::Callback<void(Status, const scoped_refptr<AudioBuffer>&)>
- ReadCB;
- virtual void Read(const ReadCB& read_cb) = 0;
+ // If |buffer| is an EOS buffer then the decoder must be flushed, i.e.
+ // |output_cb| must be called for each frame pending in the queue and
+ // |decode_cb| must be called after that.
+ virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
+ const DecodeCB& decode_cb) = 0;
- // Reset decoder state, dropping any queued encoded data.
+ // Resets decoder state. All pending Decode() requests will be finished or
+ // aborted before |closure| is called.
virtual void Reset(const base::Closure& closure) = 0;
- // Returns various information about the decoded audio format.
- virtual int bits_per_channel() = 0;
- virtual ChannelLayout channel_layout() = 0;
- virtual int samples_per_second() = 0;
+ // Stops decoder, fires any pending callbacks and sets the decoder to an
+ // uninitialized state. An AudioDecoder cannot be re-initialized after it has
+ // been stopped. DecodeCB and OutputCB may still be called for older buffers
+ // if they were scheduled before this method is called.
+ // Note that if Initialize() is pending or has finished successfully, Stop()
+ // must be called before destructing the decoder.
+ virtual void Stop() = 0;
private:
DISALLOW_COPY_AND_ASSIGN(AudioDecoder);
diff --git a/chromium/media/base/audio_decoder_config.cc b/chromium/media/base/audio_decoder_config.cc
index dfaf94a2682..06a1643f0a1 100644
--- a/chromium/media/base/audio_decoder_config.cc
+++ b/chromium/media/base/audio_decoder_config.cc
@@ -20,7 +20,8 @@ AudioDecoderConfig::AudioDecoderConfig()
channel_layout_(CHANNEL_LAYOUT_UNSUPPORTED),
samples_per_second_(0),
bytes_per_frame_(0),
- is_encrypted_(false) {
+ is_encrypted_(false),
+ codec_delay_(0) {
}
AudioDecoderConfig::AudioDecoderConfig(AudioCodec codec,
@@ -32,7 +33,7 @@ AudioDecoderConfig::AudioDecoderConfig(AudioCodec codec,
bool is_encrypted) {
Initialize(codec, sample_format, channel_layout, samples_per_second,
extra_data, extra_data_size, is_encrypted, true,
- base::TimeDelta(), base::TimeDelta());
+ base::TimeDelta(), 0);
}
void AudioDecoderConfig::Initialize(AudioCodec codec,
@@ -44,19 +45,19 @@ void AudioDecoderConfig::Initialize(AudioCodec codec,
bool is_encrypted,
bool record_stats,
base::TimeDelta seek_preroll,
- base::TimeDelta codec_delay) {
+ int codec_delay) {
CHECK((extra_data_size != 0) == (extra_data != NULL));
if (record_stats) {
- UMA_HISTOGRAM_ENUMERATION("Media.AudioCodec", codec, kAudioCodecMax);
+ UMA_HISTOGRAM_ENUMERATION("Media.AudioCodec", codec, kAudioCodecMax + 1);
UMA_HISTOGRAM_ENUMERATION("Media.AudioSampleFormat", sample_format,
- kSampleFormatMax);
+ kSampleFormatMax + 1);
UMA_HISTOGRAM_ENUMERATION("Media.AudioChannelLayout", channel_layout,
- CHANNEL_LAYOUT_MAX);
- AudioSampleRate asr = media::AsAudioSampleRate(samples_per_second);
- if (asr != kUnexpectedAudioSampleRate) {
+ CHANNEL_LAYOUT_MAX + 1);
+ AudioSampleRate asr;
+ if (ToAudioSampleRate(samples_per_second, &asr)) {
UMA_HISTOGRAM_ENUMERATION("Media.AudioSamplesPerSecond", asr,
- kUnexpectedAudioSampleRate);
+ kAudioSampleRateMax + 1);
} else {
UMA_HISTOGRAM_COUNTS(
"Media.AudioSamplesPerSecondUnexpected", samples_per_second);
@@ -88,7 +89,7 @@ bool AudioDecoderConfig::IsValidConfig() const {
samples_per_second_ <= limits::kMaxSampleRate &&
sample_format_ != kUnknownSampleFormat &&
seek_preroll_ >= base::TimeDelta() &&
- codec_delay_ >= base::TimeDelta();
+ codec_delay_ >= 0;
}
bool AudioDecoderConfig::Matches(const AudioDecoderConfig& config) const {
@@ -105,4 +106,19 @@ bool AudioDecoderConfig::Matches(const AudioDecoderConfig& config) const {
(codec_delay() == config.codec_delay()));
}
+std::string AudioDecoderConfig::AsHumanReadableString() const {
+ std::ostringstream s;
+ s << "codec: " << codec()
+ << " bytes_per_channel: " << bytes_per_channel()
+ << " channel_layout: " << channel_layout()
+ << " samples_per_second: " << samples_per_second()
+ << " sample_format: " << sample_format()
+ << " bytes_per_frame: " << bytes_per_frame()
+ << " seek_preroll: " << seek_preroll().InMilliseconds() << "ms"
+ << " codec_delay: " << codec_delay()
+ << " has extra data? " << (extra_data() ? "true" : "false")
+ << " encrypted? " << (is_encrypted() ? "true" : "false");
+ return s.str();
+}
+
} // namespace media
diff --git a/chromium/media/base/audio_decoder_config.h b/chromium/media/base/audio_decoder_config.h
index 53705ccda7b..c8c7b47d23d 100644
--- a/chromium/media/base/audio_decoder_config.h
+++ b/chromium/media/base/audio_decoder_config.h
@@ -5,6 +5,7 @@
#ifndef MEDIA_BASE_AUDIO_DECODER_CONFIG_H_
#define MEDIA_BASE_AUDIO_DECODER_CONFIG_H_
+#include <string>
#include <vector>
#include "base/basictypes.h"
@@ -18,29 +19,30 @@ namespace media {
enum AudioCodec {
// These values are histogrammed over time; do not change their ordinal
// values. When deleting a codec replace it with a dummy value; when adding a
- // codec, do so at the bottom before kAudioCodecMax.
+ // codec, do so at the bottom before kAudioCodecMax, and update the value of
+ // kAudioCodecMax to equal the new codec.
kUnknownAudioCodec = 0,
- kCodecAAC,
- kCodecMP3,
- kCodecPCM,
- kCodecVorbis,
- kCodecFLAC,
- kCodecAMR_NB,
- kCodecAMR_WB,
- kCodecPCM_MULAW,
- kCodecGSM_MS,
- kCodecPCM_S16BE,
- kCodecPCM_S24BE,
- kCodecOpus,
- kCodecEAC3,
- kCodecPCM_ALAW,
+ kCodecAAC = 1,
+ kCodecMP3 = 2,
+ kCodecPCM = 3,
+ kCodecVorbis = 4,
+ kCodecFLAC = 5,
+ kCodecAMR_NB = 6,
+ kCodecAMR_WB = 7,
+ kCodecPCM_MULAW = 8,
+ kCodecGSM_MS = 9,
+ kCodecPCM_S16BE = 10,
+ kCodecPCM_S24BE = 11,
+ kCodecOpus = 12,
+ // kCodecEAC3 = 13,
+ kCodecPCM_ALAW = 14,
// DO NOT ADD RANDOM AUDIO CODECS!
//
// The only acceptable time to add a new codec is if there is production code
// that uses said codec in the same CL.
- // Must always be last!
- kAudioCodecMax
+ // Must always be equal to the largest entry ever logged.
+ kAudioCodecMax = kCodecPCM_ALAW,
};
// TODO(dalecurtis): FFmpeg API uses |bytes_per_channel| instead of
@@ -61,13 +63,13 @@ class MEDIA_EXPORT AudioDecoderConfig {
~AudioDecoderConfig();
- // Resets the internal state of this object.
+ // Resets the internal state of this object. |codec_delay| is in frames.
void Initialize(AudioCodec codec, SampleFormat sample_format,
ChannelLayout channel_layout, int samples_per_second,
const uint8* extra_data, size_t extra_data_size,
bool is_encrypted, bool record_stats,
base::TimeDelta seek_preroll,
- base::TimeDelta codec_delay);
+ int codec_delay);
// Returns true if this object has appropriate configuration values, false
// otherwise.
@@ -77,6 +79,10 @@ class MEDIA_EXPORT AudioDecoderConfig {
// Note: The contents of |extra_data_| are compared not the raw pointers.
bool Matches(const AudioDecoderConfig& config) const;
+ // Returns a human-readable string describing |*this|. For debugging & test
+ // output only.
+ std::string AsHumanReadableString() const;
+
AudioCodec codec() const { return codec_; }
int bits_per_channel() const { return bytes_per_channel_ * 8; }
int bytes_per_channel() const { return bytes_per_channel_; }
@@ -85,7 +91,7 @@ class MEDIA_EXPORT AudioDecoderConfig {
SampleFormat sample_format() const { return sample_format_; }
int bytes_per_frame() const { return bytes_per_frame_; }
base::TimeDelta seek_preroll() const { return seek_preroll_; }
- base::TimeDelta codec_delay() const { return codec_delay_; }
+ int codec_delay() const { return codec_delay_; }
// Optional byte data required to initialize audio decoders such as Vorbis
// codebooks.
@@ -113,10 +119,10 @@ class MEDIA_EXPORT AudioDecoderConfig {
// before the decoded data is valid.
base::TimeDelta seek_preroll_;
- // |codec_delay_| is the overall delay overhead added by the codec while
- // encoding. This value should be subtracted from each block's timestamp to
- // get the actual timestamp.
- base::TimeDelta codec_delay_;
+ // |codec_delay_| is the number of frames the decoder should discard before
+ // returning decoded data. This value can include both decoder delay as well
+ // as padding added during encoding.
+ int codec_delay_;
// Not using DISALLOW_COPY_AND_ASSIGN here intentionally to allow the compiler
// generated copy constructor and assignment operator. Since the extra data is
diff --git a/chromium/media/base/audio_discard_helper.cc b/chromium/media/base/audio_discard_helper.cc
new file mode 100644
index 00000000000..303ee79f0cc
--- /dev/null
+++ b/chromium/media/base/audio_discard_helper.cc
@@ -0,0 +1,199 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_discard_helper.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/buffers.h"
+
+namespace media {
+
+static void WarnOnNonMonotonicTimestamps(base::TimeDelta last_timestamp,
+ base::TimeDelta current_timestamp) {
+ if (last_timestamp == kNoTimestamp() || last_timestamp < current_timestamp)
+ return;
+
+ const base::TimeDelta diff = current_timestamp - last_timestamp;
+ DLOG(WARNING) << "Input timestamps are not monotonically increasing! "
+ << " ts " << current_timestamp.InMicroseconds() << " us"
+ << " diff " << diff.InMicroseconds() << " us";
+}
+
+AudioDiscardHelper::AudioDiscardHelper(int sample_rate, size_t decoder_delay)
+ : sample_rate_(sample_rate),
+ decoder_delay_(decoder_delay),
+ timestamp_helper_(sample_rate_),
+ discard_frames_(0),
+ last_input_timestamp_(kNoTimestamp()),
+ delayed_discard_(false) {
+ DCHECK_GT(sample_rate_, 0);
+}
+
+AudioDiscardHelper::~AudioDiscardHelper() {
+}
+
+size_t AudioDiscardHelper::TimeDeltaToFrames(base::TimeDelta duration) const {
+ DCHECK(duration >= base::TimeDelta());
+ return duration.InSecondsF() * sample_rate_ + 0.5;
+}
+
+void AudioDiscardHelper::Reset(size_t initial_discard) {
+ discard_frames_ = initial_discard;
+ last_input_timestamp_ = kNoTimestamp();
+ timestamp_helper_.SetBaseTimestamp(kNoTimestamp());
+ delayed_discard_ = false;
+ delayed_discard_padding_ = DecoderBuffer::DiscardPadding();
+}
+
+bool AudioDiscardHelper::ProcessBuffers(
+ const scoped_refptr<DecoderBuffer>& encoded_buffer,
+ const scoped_refptr<AudioBuffer>& decoded_buffer) {
+ DCHECK(!encoded_buffer->end_of_stream());
+ DCHECK(encoded_buffer->timestamp() != kNoTimestamp());
+
+ // Issue a debug warning when we see non-monotonic timestamps. Only a warning
+ // to allow chained OGG playback.
+ WarnOnNonMonotonicTimestamps(last_input_timestamp_,
+ encoded_buffer->timestamp());
+ last_input_timestamp_ = encoded_buffer->timestamp();
+
+ // If this is the first buffer seen, setup the timestamp helper.
+ const bool first_buffer = !initialized();
+ if (first_buffer) {
+ // Clamp the base timestamp to zero.
+ timestamp_helper_.SetBaseTimestamp(
+ std::max(base::TimeDelta(), encoded_buffer->timestamp()));
+ }
+ DCHECK(initialized());
+
+ if (!decoded_buffer) {
+ // If there's a one buffer delay for decoding, we need to save it so it can
+ // be processed with the next decoder buffer.
+ if (first_buffer) {
+ delayed_discard_ = true;
+ delayed_discard_padding_ = encoded_buffer->discard_padding();
+ }
+ return false;
+ }
+
+ const size_t original_frame_count = decoded_buffer->frame_count();
+
+ // If there's a one buffer delay for decoding, pick up the last encoded
+ // buffer's discard padding for processing with the current decoded buffer.
+ DecoderBuffer::DiscardPadding current_discard_padding =
+ encoded_buffer->discard_padding();
+ if (delayed_discard_) {
+ // For simplicity disallow cases where decoder delay is present with delayed
+ // discard (no codecs at present). Doing so allows us to avoid complexity
+ // around endpoint tracking when handling complete buffer discards.
+ DCHECK_EQ(decoder_delay_, 0u);
+ std::swap(current_discard_padding, delayed_discard_padding_);
+ }
+
+ if (discard_frames_ > 0) {
+ const size_t decoded_frames = decoded_buffer->frame_count();
+ const size_t frames_to_discard = std::min(discard_frames_, decoded_frames);
+ discard_frames_ -= frames_to_discard;
+
+ // If everything would be discarded, indicate a new buffer is required.
+ if (frames_to_discard == decoded_frames) {
+ // For simplicity disallow cases where a buffer with discard padding is
+ // present. Doing so allows us to avoid complexity around tracking
+ // discards across buffers.
+ DCHECK(current_discard_padding.first == base::TimeDelta());
+ DCHECK(current_discard_padding.second == base::TimeDelta());
+ return false;
+ }
+
+ decoded_buffer->TrimStart(frames_to_discard);
+ }
+
+ // Handle front discard padding.
+ if (current_discard_padding.first > base::TimeDelta()) {
+ const size_t decoded_frames = decoded_buffer->frame_count();
+
+ // If a complete buffer discard is requested and there's no decoder delay,
+ // just discard all remaining frames from this buffer. With decoder delay
+ // we have to estimate the correct number of frames to discard based on the
+ // duration of the encoded buffer.
+ const size_t start_frames_to_discard =
+ current_discard_padding.first == kInfiniteDuration()
+ ? (decoder_delay_ > 0
+ ? TimeDeltaToFrames(encoded_buffer->duration())
+ : decoded_frames)
+ : TimeDeltaToFrames(current_discard_padding.first);
+
+ // Regardless of the timestamp on the encoded buffer, the corresponding
+ // decoded output will appear |decoder_delay_| frames later.
+ size_t discard_start = decoder_delay_;
+ if (decoder_delay_ > 0) {
+ // If we have a |decoder_delay_| and have already discarded frames from
+ // this buffer, the |discard_start| must be adjusted by the number of
+ // frames already discarded.
+ const size_t frames_discarded_so_far =
+ original_frame_count - decoded_buffer->frame_count();
+ CHECK_LE(frames_discarded_so_far, decoder_delay_);
+ discard_start -= frames_discarded_so_far;
+ }
+
+ // For simplicity require the start of the discard to be within the current
+ // buffer. Doing so allows us avoid complexity around tracking discards
+ // across buffers.
+ CHECK_LT(discard_start, decoded_frames);
+
+ const size_t frames_to_discard =
+ std::min(start_frames_to_discard, decoded_frames - discard_start);
+
+ // Carry over any frames which need to be discarded from the front of the
+ // next buffer.
+ DCHECK(!discard_frames_);
+ discard_frames_ = start_frames_to_discard - frames_to_discard;
+
+ // If everything would be discarded, indicate a new buffer is required.
+ if (frames_to_discard == decoded_frames) {
+ // The buffer should not have been marked with end discard if the front
+ // discard removes everything.
+ DCHECK(current_discard_padding.second == base::TimeDelta());
+ return false;
+ }
+
+ decoded_buffer->TrimRange(discard_start, discard_start + frames_to_discard);
+ } else {
+ DCHECK(current_discard_padding.first == base::TimeDelta());
+ }
+
+ // Handle end discard padding.
+ if (current_discard_padding.second > base::TimeDelta()) {
+ // Limit end discarding to when there is no |decoder_delay_|, otherwise it's
+ // non-trivial determining where to start discarding end frames.
+ CHECK(!decoder_delay_);
+
+ const size_t decoded_frames = decoded_buffer->frame_count();
+ const size_t end_frames_to_discard =
+ TimeDeltaToFrames(current_discard_padding.second);
+
+ if (end_frames_to_discard > decoded_frames) {
+ DLOG(ERROR) << "Encountered invalid discard padding value.";
+ return false;
+ }
+
+ // If everything would be discarded, indicate a new buffer is required.
+ if (end_frames_to_discard == decoded_frames)
+ return false;
+
+ decoded_buffer->TrimEnd(end_frames_to_discard);
+ } else {
+ DCHECK(current_discard_padding.second == base::TimeDelta());
+ }
+
+ // Assign timestamp to the buffer.
+ decoded_buffer->set_timestamp(timestamp_helper_.GetTimestamp());
+ timestamp_helper_.AddFrames(decoded_buffer->frame_count());
+ return true;
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_discard_helper.h b/chromium/media/base/audio_discard_helper.h
new file mode 100644
index 00000000000..deeb45f37ea
--- /dev/null
+++ b/chromium/media/base/audio_discard_helper.h
@@ -0,0 +1,81 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_DISCARD_HELPER_H_
+#define MEDIA_BASE_AUDIO_DISCARD_HELPER_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/time/time.h"
+#include "media/base/audio_timestamp_helper.h"
+#include "media/base/buffers.h"
+#include "media/base/decoder_buffer.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class AudioBuffer;
+
+// Helper class for managing timestamps and discard events around decoding.
+class MEDIA_EXPORT AudioDiscardHelper {
+ public:
+ // |sample_rate| is the sample rate of decoded data which will be handed into
+ // the ProcessBuffers() call.
+ //
+ // |decoder_delay| is the number of frames a decoder will output before data
+ // corresponding to the first encoded buffer is output. Callers only need to
+ // specify this if the decoder inserts frames which have no corresponding
+ // encoded buffer.
+ //
+ // For example, most MP3 decoders will output 529 junk frames before the data
+ // corresponding to the first encoded buffer is output. These frames are not
+ // represented in the encoded data stream and instead are an artifact of how
+ // most MP3 decoders work. See http://lame.sourceforge.net/tech-FAQ.txt
+ //
+ // NOTE: End discard is only supported when there is no |decoder_delay|.
+ AudioDiscardHelper(int sample_rate, size_t decoder_delay);
+ ~AudioDiscardHelper();
+
+ // Converts a TimeDelta to a frame count based on the constructed sample rate.
+ // |duration| must be positive.
+ size_t TimeDeltaToFrames(base::TimeDelta duration) const;
+
+ // Resets internal state and indicates that |initial_discard| of upcoming
+ // frames should be discarded.
+ void Reset(size_t initial_discard);
+
+ // Applies discard padding from the encoded buffer along with any initial
+ // discards. |decoded_buffer| may be NULL, if not the timestamp and duration
+ // will be set after discards are applied. Returns true if |decoded_buffer|
+ // exists after processing discard events. Returns false if |decoded_buffer|
+ // was NULL, is completely discarded, or a processing error occurs.
+ //
+ // If AudioDiscardHelper is not initialized() the timestamp of the first
+ // |encoded_buffer| will be used as the basis for all future timestamps set on
+ // |decoded_buffer|s. If the first buffer has a negative timestamp it will be
+ // clamped to zero.
+ bool ProcessBuffers(const scoped_refptr<DecoderBuffer>& encoded_buffer,
+ const scoped_refptr<AudioBuffer>& decoded_buffer);
+
+ // Whether any buffers have been processed.
+ bool initialized() const {
+ return timestamp_helper_.base_timestamp() != kNoTimestamp();
+ }
+
+ private:
+ const int sample_rate_;
+ const size_t decoder_delay_;
+ AudioTimestampHelper timestamp_helper_;
+
+ size_t discard_frames_;
+ base::TimeDelta last_input_timestamp_;
+
+ bool delayed_discard_;
+ DecoderBuffer::DiscardPadding delayed_discard_padding_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AudioDiscardHelper);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_DISCARD_HELPER_H_
diff --git a/chromium/media/base/audio_discard_helper_unittest.cc b/chromium/media/base/audio_discard_helper_unittest.cc
new file mode 100644
index 00000000000..1ea0cc6f816
--- /dev/null
+++ b/chromium/media/base/audio_discard_helper_unittest.cc
@@ -0,0 +1,481 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/scoped_ptr.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/audio_bus.h"
+#include "media/base/audio_discard_helper.h"
+#include "media/base/buffers.h"
+#include "media/base/decoder_buffer.h"
+#include "media/base/test_helpers.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+static const float kDataStep = 0.01f;
+static const size_t kSampleRate = 48000;
+
+static scoped_refptr<DecoderBuffer> CreateEncodedBuffer(
+ base::TimeDelta timestamp,
+ base::TimeDelta duration) {
+ scoped_refptr<DecoderBuffer> result(new DecoderBuffer(1));
+ result->set_timestamp(timestamp);
+ result->set_duration(duration);
+ return result;
+}
+
+static scoped_refptr<AudioBuffer> CreateDecodedBuffer(int frames) {
+ return MakeAudioBuffer(kSampleFormatPlanarF32,
+ CHANNEL_LAYOUT_MONO,
+ 1,
+ kSampleRate,
+ 0.0f,
+ kDataStep,
+ frames,
+ kNoTimestamp());
+}
+
+static float ExtractDecodedData(const scoped_refptr<AudioBuffer>& buffer,
+ int index) {
+ // This is really inefficient, but we can't access the raw AudioBuffer if any
+ // start trimming has been applied.
+ scoped_ptr<AudioBus> temp_bus = AudioBus::Create(buffer->channel_count(), 1);
+ buffer->ReadFrames(1, index, 0, temp_bus.get());
+ return temp_bus->channel(0)[0];
+}
+
+TEST(AudioDiscardHelperTest, TimeDeltaToFrames) {
+ AudioDiscardHelper discard_helper(kSampleRate, 0);
+
+ EXPECT_EQ(0u, discard_helper.TimeDeltaToFrames(base::TimeDelta()));
+ EXPECT_EQ(
+ kSampleRate / 100,
+ discard_helper.TimeDeltaToFrames(base::TimeDelta::FromMilliseconds(10)));
+
+ // Ensure partial frames are rounded down correctly. The equation below
+ // calculates a frame count with a fractional part < 0.5.
+ const int small_remainder =
+ base::Time::kMicrosecondsPerSecond * (kSampleRate - 0.9) / kSampleRate;
+ EXPECT_EQ(kSampleRate - 1,
+ discard_helper.TimeDeltaToFrames(
+ base::TimeDelta::FromMicroseconds(small_remainder)));
+
+ // Ditto, but rounded up using a fractional part > 0.5.
+ const int large_remainder =
+ base::Time::kMicrosecondsPerSecond * (kSampleRate - 0.4) / kSampleRate;
+ EXPECT_EQ(kSampleRate,
+ discard_helper.TimeDeltaToFrames(
+ base::TimeDelta::FromMicroseconds(large_remainder)));
+}
+
+TEST(AudioDiscardHelperTest, BasicProcessBuffers) {
+ AudioDiscardHelper discard_helper(kSampleRate, 0);
+ ASSERT_FALSE(discard_helper.initialized());
+
+ const base::TimeDelta kTimestamp = base::TimeDelta();
+
+ // Use an estimated duration which doesn't match the number of decoded frames
+ // to ensure the helper is correctly setting durations based on output frames.
+ const base::TimeDelta kEstimatedDuration =
+ base::TimeDelta::FromMilliseconds(9);
+ const base::TimeDelta kActualDuration = base::TimeDelta::FromMilliseconds(10);
+ const int kTestFrames = discard_helper.TimeDeltaToFrames(kActualDuration);
+
+ scoped_refptr<DecoderBuffer> encoded_buffer =
+ CreateEncodedBuffer(kTimestamp, kEstimatedDuration);
+ scoped_refptr<AudioBuffer> decoded_buffer = CreateDecodedBuffer(kTestFrames);
+
+ // Verify the basic case where nothing is discarded.
+ ASSERT_TRUE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(discard_helper.initialized());
+ EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
+ EXPECT_EQ(kActualDuration, decoded_buffer->duration());
+ EXPECT_EQ(kTestFrames, decoded_buffer->frame_count());
+
+ // Verify a Reset() takes us back to an uninitialized state.
+ discard_helper.Reset(0);
+ ASSERT_FALSE(discard_helper.initialized());
+
+ // Verify a NULL output buffer returns false.
+ ASSERT_FALSE(discard_helper.ProcessBuffers(encoded_buffer, NULL));
+}
+
+TEST(AudioDiscardHelperTest, NegativeTimestampClampsToZero) {
+ AudioDiscardHelper discard_helper(kSampleRate, 0);
+ ASSERT_FALSE(discard_helper.initialized());
+
+ const base::TimeDelta kTimestamp = -base::TimeDelta::FromSeconds(1);
+ const base::TimeDelta kDuration = base::TimeDelta::FromMilliseconds(10);
+ const int kTestFrames = discard_helper.TimeDeltaToFrames(kDuration);
+
+ scoped_refptr<DecoderBuffer> encoded_buffer =
+ CreateEncodedBuffer(kTimestamp, kDuration);
+ scoped_refptr<AudioBuffer> decoded_buffer = CreateDecodedBuffer(kTestFrames);
+
+ // Verify the basic case where nothing is discarded.
+ ASSERT_TRUE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(discard_helper.initialized());
+ EXPECT_EQ(base::TimeDelta(), decoded_buffer->timestamp());
+ EXPECT_EQ(kDuration, decoded_buffer->duration());
+ EXPECT_EQ(kTestFrames, decoded_buffer->frame_count());
+}
+
+TEST(AudioDiscardHelperTest, ProcessBuffersWithInitialDiscard) {
+ AudioDiscardHelper discard_helper(kSampleRate, 0);
+ ASSERT_FALSE(discard_helper.initialized());
+
+ const base::TimeDelta kTimestamp = base::TimeDelta();
+ const base::TimeDelta kDuration = base::TimeDelta::FromMilliseconds(10);
+ const int kTestFrames = discard_helper.TimeDeltaToFrames(kDuration);
+
+ // Tell the helper we want to discard half of the initial frames.
+ const int kDiscardFrames = kTestFrames / 2;
+ discard_helper.Reset(kDiscardFrames);
+
+ scoped_refptr<DecoderBuffer> encoded_buffer =
+ CreateEncodedBuffer(kTimestamp, kDuration);
+ scoped_refptr<AudioBuffer> decoded_buffer = CreateDecodedBuffer(kTestFrames);
+
+ // Verify half the frames end up discarded.
+ ASSERT_TRUE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(discard_helper.initialized());
+ EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
+ EXPECT_EQ(kDuration / 2, decoded_buffer->duration());
+ EXPECT_EQ(kDiscardFrames, decoded_buffer->frame_count());
+ ASSERT_FLOAT_EQ(kDiscardFrames * kDataStep,
+ ExtractDecodedData(decoded_buffer, 0));
+}
+
+TEST(AudioDiscardHelperTest, ProcessBuffersWithLargeInitialDiscard) {
+ AudioDiscardHelper discard_helper(kSampleRate, 0);
+ ASSERT_FALSE(discard_helper.initialized());
+
+ const base::TimeDelta kTimestamp = base::TimeDelta();
+ const base::TimeDelta kDuration = base::TimeDelta::FromMilliseconds(10);
+ const int kTestFrames = discard_helper.TimeDeltaToFrames(kDuration);
+
+ // Tell the helper we want to discard 1.5 buffers worth of frames.
+ discard_helper.Reset(kTestFrames * 1.5);
+
+ scoped_refptr<DecoderBuffer> encoded_buffer =
+ CreateEncodedBuffer(kTimestamp, kDuration);
+ scoped_refptr<AudioBuffer> decoded_buffer = CreateDecodedBuffer(kTestFrames);
+
+ // The first call should fail since no output buffer remains.
+ ASSERT_FALSE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(discard_helper.initialized());
+
+ // Generate another set of buffers and expect half the output frames.
+ encoded_buffer = CreateEncodedBuffer(kTimestamp + kDuration, kDuration);
+ decoded_buffer = CreateDecodedBuffer(kTestFrames);
+ ASSERT_TRUE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+
+ // The timestamp should match that of the initial buffer.
+ const int kDiscardFrames = kTestFrames / 2;
+ EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
+ EXPECT_EQ(kDuration / 2, decoded_buffer->duration());
+ EXPECT_EQ(kDiscardFrames, decoded_buffer->frame_count());
+ ASSERT_FLOAT_EQ(kDiscardFrames * kDataStep,
+ ExtractDecodedData(decoded_buffer, 0));
+}
+
+TEST(AudioDiscardHelperTest, AllowNonMonotonicTimestamps) {
+ AudioDiscardHelper discard_helper(kSampleRate, 0);
+ ASSERT_FALSE(discard_helper.initialized());
+
+ const base::TimeDelta kTimestamp = base::TimeDelta();
+ const base::TimeDelta kDuration = base::TimeDelta::FromMilliseconds(10);
+ const int kTestFrames = discard_helper.TimeDeltaToFrames(kDuration);
+
+ scoped_refptr<DecoderBuffer> encoded_buffer =
+ CreateEncodedBuffer(kTimestamp, kDuration);
+ scoped_refptr<AudioBuffer> decoded_buffer = CreateDecodedBuffer(kTestFrames);
+
+ ASSERT_TRUE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(discard_helper.initialized());
+ EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
+ EXPECT_EQ(kDuration, decoded_buffer->duration());
+ EXPECT_EQ(kTestFrames, decoded_buffer->frame_count());
+
+ // Process the same input buffer again to ensure input timestamps which go
+ // backwards in time are not errors.
+ ASSERT_TRUE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+ EXPECT_EQ(kTimestamp + kDuration, decoded_buffer->timestamp());
+ EXPECT_EQ(kDuration, decoded_buffer->duration());
+ EXPECT_EQ(kTestFrames, decoded_buffer->frame_count());
+}
+
+TEST(AudioDiscardHelperTest, DiscardEndPadding) {
+ AudioDiscardHelper discard_helper(kSampleRate, 0);
+ ASSERT_FALSE(discard_helper.initialized());
+
+ const base::TimeDelta kTimestamp = base::TimeDelta();
+ const base::TimeDelta kDuration = base::TimeDelta::FromMilliseconds(10);
+ const int kTestFrames = discard_helper.TimeDeltaToFrames(kDuration);
+
+ scoped_refptr<DecoderBuffer> encoded_buffer =
+ CreateEncodedBuffer(kTimestamp, kDuration);
+ scoped_refptr<AudioBuffer> decoded_buffer = CreateDecodedBuffer(kTestFrames);
+
+ // Set a discard padding equivalent to half the buffer.
+ encoded_buffer->set_discard_padding(
+ std::make_pair(base::TimeDelta(), kDuration / 2));
+
+ ASSERT_TRUE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(discard_helper.initialized());
+ EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
+ EXPECT_EQ(kDuration / 2, decoded_buffer->duration());
+ EXPECT_EQ(kTestFrames / 2, decoded_buffer->frame_count());
+}
+
+TEST(AudioDiscardHelperTest, BadDiscardEndPadding) {
+ AudioDiscardHelper discard_helper(kSampleRate, 0);
+ ASSERT_FALSE(discard_helper.initialized());
+
+ const base::TimeDelta kTimestamp = base::TimeDelta();
+ const base::TimeDelta kDuration = base::TimeDelta::FromMilliseconds(10);
+ const int kTestFrames = discard_helper.TimeDeltaToFrames(kDuration);
+
+ scoped_refptr<DecoderBuffer> encoded_buffer =
+ CreateEncodedBuffer(kTimestamp, kDuration);
+ scoped_refptr<AudioBuffer> decoded_buffer = CreateDecodedBuffer(kTestFrames);
+
+ // Set a discard padding equivalent to double the buffer size.
+ encoded_buffer->set_discard_padding(
+ std::make_pair(base::TimeDelta(), kDuration * 2));
+
+ // Verify the end discard padding is rejected.
+ ASSERT_FALSE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(discard_helper.initialized());
+}
+
+TEST(AudioDiscardHelperTest, InitialDiscardAndDiscardEndPadding) {
+ AudioDiscardHelper discard_helper(kSampleRate, 0);
+ ASSERT_FALSE(discard_helper.initialized());
+
+ const base::TimeDelta kTimestamp = base::TimeDelta();
+ const base::TimeDelta kDuration = base::TimeDelta::FromMilliseconds(10);
+ const int kTestFrames = discard_helper.TimeDeltaToFrames(kDuration);
+
+ scoped_refptr<DecoderBuffer> encoded_buffer =
+ CreateEncodedBuffer(kTimestamp, kDuration);
+ scoped_refptr<AudioBuffer> decoded_buffer = CreateDecodedBuffer(kTestFrames);
+
+ // Set a discard padding equivalent to a quarter of the buffer.
+ encoded_buffer->set_discard_padding(
+ std::make_pair(base::TimeDelta(), kDuration / 4));
+
+ // Set an initial discard of a quarter of the buffer.
+ const int kDiscardFrames = kTestFrames / 4;
+ discard_helper.Reset(kDiscardFrames);
+
+ ASSERT_TRUE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(discard_helper.initialized());
+ EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
+ EXPECT_EQ(kDuration / 2, decoded_buffer->duration());
+ EXPECT_EQ(kTestFrames / 2, decoded_buffer->frame_count());
+ ASSERT_FLOAT_EQ(kDiscardFrames * kDataStep,
+ ExtractDecodedData(decoded_buffer, 0));
+}
+
+TEST(AudioDiscardHelperTest, InitialDiscardAndDiscardPadding) {
+ AudioDiscardHelper discard_helper(kSampleRate, 0);
+ ASSERT_FALSE(discard_helper.initialized());
+
+ const base::TimeDelta kTimestamp = base::TimeDelta();
+ const base::TimeDelta kDuration = base::TimeDelta::FromMilliseconds(10);
+ const int kTestFrames = discard_helper.TimeDeltaToFrames(kDuration);
+
+ scoped_refptr<DecoderBuffer> encoded_buffer =
+ CreateEncodedBuffer(kTimestamp, kDuration);
+ scoped_refptr<AudioBuffer> decoded_buffer = CreateDecodedBuffer(kTestFrames);
+
+ // Set all the discard values to be different to ensure each is properly used.
+ const int kDiscardFrames = kTestFrames / 4;
+ encoded_buffer->set_discard_padding(
+ std::make_pair(kDuration / 8, kDuration / 16));
+ discard_helper.Reset(kDiscardFrames);
+
+ ASSERT_TRUE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(discard_helper.initialized());
+ EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
+ EXPECT_EQ(kDuration - kDuration / 4 - kDuration / 8 - kDuration / 16,
+ decoded_buffer->duration());
+ EXPECT_EQ(kTestFrames - kTestFrames / 4 - kTestFrames / 8 - kTestFrames / 16,
+ decoded_buffer->frame_count());
+}
+
+TEST(AudioDiscardHelperTest, InitialDiscardAndDiscardPaddingAndDecoderDelay) {
+ // Use a decoder delay of 5ms.
+ const int kDecoderDelay = kSampleRate / 100 / 2;
+ AudioDiscardHelper discard_helper(kSampleRate, kDecoderDelay);
+ ASSERT_FALSE(discard_helper.initialized());
+ discard_helper.Reset(kDecoderDelay);
+
+ const base::TimeDelta kTimestamp = base::TimeDelta();
+ const base::TimeDelta kDuration = base::TimeDelta::FromMilliseconds(10);
+ const int kTestFrames = discard_helper.TimeDeltaToFrames(kDuration);
+
+ scoped_refptr<DecoderBuffer> encoded_buffer =
+ CreateEncodedBuffer(kTimestamp, kDuration);
+ scoped_refptr<AudioBuffer> decoded_buffer = CreateDecodedBuffer(kTestFrames);
+
+ // Set a discard padding equivalent to half of the buffer.
+ encoded_buffer->set_discard_padding(
+ std::make_pair(kDuration / 2, base::TimeDelta()));
+
+ // All of the first buffer should be discarded.
+ ASSERT_FALSE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(discard_helper.initialized());
+
+ // Processing another buffer (with the same discard padding) should discard
+ // the back half of the buffer since kDecoderDelay is half a buffer.
+ encoded_buffer->set_timestamp(kTimestamp + kDuration);
+ decoded_buffer = CreateDecodedBuffer(kTestFrames);
+ ASSERT_FLOAT_EQ(0.0f, ExtractDecodedData(decoded_buffer, 0));
+ ASSERT_NEAR(kDecoderDelay * kDataStep,
+ ExtractDecodedData(decoded_buffer, kDecoderDelay),
+ kDataStep * 1000);
+ ASSERT_TRUE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+ EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
+ EXPECT_EQ(kDuration / 2, decoded_buffer->duration());
+ EXPECT_EQ(kTestFrames / 2, decoded_buffer->frame_count());
+
+ // Verify it was actually the latter half of the buffer that was removed.
+ ASSERT_FLOAT_EQ(0.0f, ExtractDecodedData(decoded_buffer, 0));
+}
+
+TEST(AudioDiscardHelperTest, DelayedDiscardInitialDiscardAndDiscardPadding) {
+ AudioDiscardHelper discard_helper(kSampleRate, 0);
+ ASSERT_FALSE(discard_helper.initialized());
+
+ const base::TimeDelta kTimestamp = base::TimeDelta();
+ const base::TimeDelta kDuration = base::TimeDelta::FromMilliseconds(10);
+ const int kTestFrames = discard_helper.TimeDeltaToFrames(kDuration);
+
+ scoped_refptr<DecoderBuffer> encoded_buffer =
+ CreateEncodedBuffer(kTimestamp, kDuration);
+
+ // Set all the discard values to be different to ensure each is properly used.
+ const int kDiscardFrames = kTestFrames / 4;
+ encoded_buffer->set_discard_padding(
+ std::make_pair(kDuration / 8, kDuration / 16));
+ discard_helper.Reset(kDiscardFrames);
+
+ // Verify nothing is output for the first buffer, yet initialized is true.
+ ASSERT_FALSE(discard_helper.ProcessBuffers(encoded_buffer, NULL));
+ ASSERT_TRUE(discard_helper.initialized());
+
+ // Create an encoded buffer with no discard padding.
+ encoded_buffer = CreateEncodedBuffer(kTimestamp + kDuration, kDuration);
+ scoped_refptr<AudioBuffer> decoded_buffer = CreateDecodedBuffer(kTestFrames);
+
+ // Verify that when the decoded buffer is consumed, the discards from the
+ // previous encoded buffer are applied.
+ ASSERT_TRUE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+ EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
+ EXPECT_EQ(kDuration - kDuration / 4 - kDuration / 8 - kDuration / 16,
+ decoded_buffer->duration());
+ EXPECT_EQ(kTestFrames - kTestFrames / 4 - kTestFrames / 8 - kTestFrames / 16,
+ decoded_buffer->frame_count());
+}
+
+TEST(AudioDiscardHelperTest, CompleteDiscard) {
+ AudioDiscardHelper discard_helper(kSampleRate, 0);
+ ASSERT_FALSE(discard_helper.initialized());
+
+ const base::TimeDelta kTimestamp = base::TimeDelta();
+ const base::TimeDelta kDuration = base::TimeDelta::FromMilliseconds(10);
+ const int kTestFrames = discard_helper.TimeDeltaToFrames(kDuration);
+ discard_helper.Reset(0);
+
+ scoped_refptr<DecoderBuffer> encoded_buffer =
+ CreateEncodedBuffer(kTimestamp, kDuration);
+ encoded_buffer->set_discard_padding(
+ std::make_pair(kInfiniteDuration(), base::TimeDelta()));
+ scoped_refptr<AudioBuffer> decoded_buffer = CreateDecodedBuffer(kTestFrames);
+
+ // Verify all of the first buffer is discarded.
+ ASSERT_FALSE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(discard_helper.initialized());
+ encoded_buffer->set_timestamp(kTimestamp + kDuration);
+ encoded_buffer->set_discard_padding(DecoderBuffer::DiscardPadding());
+
+ // Verify a second buffer goes through untouched.
+ decoded_buffer = CreateDecodedBuffer(kTestFrames / 2);
+ ASSERT_TRUE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+ EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
+ EXPECT_EQ(kDuration / 2, decoded_buffer->duration());
+ EXPECT_EQ(kTestFrames / 2, decoded_buffer->frame_count());
+ ASSERT_FLOAT_EQ(0.0f, ExtractDecodedData(decoded_buffer, 0));
+}
+
+TEST(AudioDiscardHelperTest, CompleteDiscardWithDelayedDiscard) {
+ AudioDiscardHelper discard_helper(kSampleRate, 0);
+ ASSERT_FALSE(discard_helper.initialized());
+
+ const base::TimeDelta kTimestamp = base::TimeDelta();
+ const base::TimeDelta kDuration = base::TimeDelta::FromMilliseconds(10);
+ const int kTestFrames = discard_helper.TimeDeltaToFrames(kDuration);
+ discard_helper.Reset(0);
+
+ scoped_refptr<DecoderBuffer> encoded_buffer =
+ CreateEncodedBuffer(kTimestamp, kDuration);
+ encoded_buffer->set_discard_padding(
+ std::make_pair(kInfiniteDuration(), base::TimeDelta()));
+ scoped_refptr<AudioBuffer> decoded_buffer = CreateDecodedBuffer(kTestFrames);
+
+ // Setup a delayed discard.
+ ASSERT_FALSE(discard_helper.ProcessBuffers(encoded_buffer, NULL));
+ ASSERT_TRUE(discard_helper.initialized());
+
+ // Verify the first output buffer is dropped.
+ encoded_buffer->set_timestamp(kTimestamp + kDuration);
+ encoded_buffer->set_discard_padding(DecoderBuffer::DiscardPadding());
+ ASSERT_FALSE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+
+ // Verify the second buffer goes through untouched.
+ encoded_buffer->set_timestamp(kTimestamp + 2 * kDuration);
+ decoded_buffer = CreateDecodedBuffer(kTestFrames / 2);
+ ASSERT_TRUE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+ EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
+ EXPECT_EQ(kDuration / 2, decoded_buffer->duration());
+ EXPECT_EQ(kTestFrames / 2, decoded_buffer->frame_count());
+ ASSERT_FLOAT_EQ(0.0f, ExtractDecodedData(decoded_buffer, 0));
+}
+
+TEST(AudioDiscardHelperTest, CompleteDiscardWithInitialDiscardDecoderDelay) {
+ // Use a decoder delay of 5ms.
+ const int kDecoderDelay = kSampleRate / 100 / 2;
+ AudioDiscardHelper discard_helper(kSampleRate, kDecoderDelay);
+ ASSERT_FALSE(discard_helper.initialized());
+ discard_helper.Reset(kDecoderDelay);
+
+ const base::TimeDelta kTimestamp = base::TimeDelta();
+ const base::TimeDelta kDuration = base::TimeDelta::FromMilliseconds(10);
+ const int kTestFrames = discard_helper.TimeDeltaToFrames(kDuration);
+
+ scoped_refptr<DecoderBuffer> encoded_buffer =
+ CreateEncodedBuffer(kTimestamp, kDuration);
+ encoded_buffer->set_discard_padding(
+ std::make_pair(kInfiniteDuration(), base::TimeDelta()));
+ scoped_refptr<AudioBuffer> decoded_buffer = CreateDecodedBuffer(kTestFrames);
+
+ // Verify all of the first buffer is discarded.
+ ASSERT_FALSE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(discard_helper.initialized());
+ encoded_buffer->set_timestamp(kTimestamp + kDuration);
+ encoded_buffer->set_discard_padding(DecoderBuffer::DiscardPadding());
+
+ // Verify 5ms off the front of the second buffer is discarded.
+ decoded_buffer = CreateDecodedBuffer(kTestFrames * 2);
+ ASSERT_TRUE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+ EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
+ EXPECT_EQ(kDuration * 2 - kDuration / 2, decoded_buffer->duration());
+ EXPECT_EQ(kTestFrames * 2 - kDecoderDelay, decoded_buffer->frame_count());
+ ASSERT_FLOAT_EQ(kDecoderDelay * kDataStep,
+ ExtractDecodedData(decoded_buffer, 0));
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_fifo.cc b/chromium/media/base/audio_fifo.cc
index b6e8f806e05..bdc7ddf78d0 100644
--- a/chromium/media/base/audio_fifo.cc
+++ b/chromium/media/base/audio_fifo.cc
@@ -6,9 +6,6 @@
#include "base/logging.h"
-using base::subtle::Atomic32;
-using base::subtle::NoBarrier_Store;
-
namespace media {
// Given current position in the FIFO, the maximum number of elements in the
@@ -52,7 +49,6 @@ AudioFifo::~AudioFifo() {}
int AudioFifo::frames() const {
int delta = frames_pushed_ - frames_consumed_;
- base::subtle::MemoryBarrier();
return delta;
}
@@ -83,12 +79,7 @@ void AudioFifo::Push(const AudioBus* source) {
}
}
- // Ensure the data is *really* written before updating |frames_pushed_|.
- base::subtle::MemoryBarrier();
-
- Atomic32 new_frames_pushed = frames_pushed_ + source_size;
- NoBarrier_Store(&frames_pushed_, new_frames_pushed);
-
+ frames_pushed_ += source_size;
DCHECK_LE(frames(), max_frames());
write_pos_ = UpdatePos(write_pos_, source_size, max_frames());
}
@@ -128,9 +119,7 @@ void AudioFifo::Consume(AudioBus* destination,
}
}
- Atomic32 new_frames_consumed = frames_consumed_ + frames_to_consume;
- NoBarrier_Store(&frames_consumed_, new_frames_consumed);
-
+ frames_consumed_ += frames_to_consume;
read_pos_ = UpdatePos(read_pos_, frames_to_consume, max_frames());
}
diff --git a/chromium/media/base/audio_fifo.h b/chromium/media/base/audio_fifo.h
index e978ace05ba..c00dd40fef9 100644
--- a/chromium/media/base/audio_fifo.h
+++ b/chromium/media/base/audio_fifo.h
@@ -5,7 +5,6 @@
#ifndef MEDIA_BASE_AUDIO_FIFO_H_
#define MEDIA_BASE_AUDIO_FIFO_H_
-#include "base/atomicops.h"
#include "media/base/audio_bus.h"
#include "media/base/media_export.h"
@@ -15,8 +14,7 @@ namespace media {
// The maximum number of audio frames in the FIFO is set at construction and
// can not be extended dynamically. The allocated memory is utilized as a
// ring buffer.
-// This class is thread-safe in the limited sense that one thread may call
-// Push(), while a second thread calls Consume().
+// This class is thread-unsafe.
class MEDIA_EXPORT AudioFifo {
public:
// Creates a new AudioFifo and allocates |channels| of length |frames|.
@@ -51,8 +49,8 @@ class MEDIA_EXPORT AudioFifo {
const int max_frames_;
// Number of actual elements in the FIFO.
- volatile base::subtle::Atomic32 frames_pushed_;
- volatile base::subtle::Atomic32 frames_consumed_;
+ int frames_pushed_;
+ int frames_consumed_;
// Current read position.
int read_pos_;
diff --git a/chromium/media/base/audio_hardware_config.cc b/chromium/media/base/audio_hardware_config.cc
index d72fce7b4e2..d00e03f6b08 100644
--- a/chromium/media/base/audio_hardware_config.cc
+++ b/chromium/media/base/audio_hardware_config.cc
@@ -4,17 +4,37 @@
#include "media/base/audio_hardware_config.h"
+#include <algorithm>
+#include <cmath>
+
+#include "base/logging.h"
+#include "build/build_config.h"
+
using base::AutoLock;
using media::AudioParameters;
namespace media {
+#if !defined(OS_WIN)
+// Taken from "Bit Twiddling Hacks"
+// http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
+static uint32_t RoundUpToPowerOfTwo(uint32_t v) {
+ v--;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v++;
+ return v;
+}
+#endif
+
AudioHardwareConfig::AudioHardwareConfig(
const AudioParameters& input_params,
const AudioParameters& output_params)
: input_params_(input_params),
- output_params_(output_params) {
-}
+ output_params_(output_params) {}
AudioHardwareConfig::~AudioHardwareConfig() {}
@@ -77,4 +97,36 @@ void AudioHardwareConfig::UpdateOutputConfig(
output_params_ = output_params;
}
+int AudioHardwareConfig::GetHighLatencyBufferSize() const {
+ AutoLock auto_lock(config_lock_);
+
+ // Empirically, we consider 20ms of samples to be high latency.
+ const double twenty_ms_size = 2.0 * output_params_.sample_rate() / 100;
+
+#if defined(OS_WIN)
+ // Windows doesn't use power of two buffer sizes, so we should always round up
+ // to the nearest multiple of the output buffer size.
+ const int high_latency_buffer_size =
+ std::ceil(twenty_ms_size / output_params_.frames_per_buffer()) *
+ output_params_.frames_per_buffer();
+#else
+ // On other platforms use the nearest higher power of two buffer size. For a
+ // given sample rate, this works out to:
+ //
+ // <= 3200 : 64
+ // <= 6400 : 128
+ // <= 12800 : 256
+ // <= 25600 : 512
+ // <= 51200 : 1024
+ // <= 102400 : 2048
+ // <= 204800 : 4096
+ //
+ // On Linux, the minimum hardware buffer size is 512, so the lower calculated
+ // values are unused. OSX may have a value as low as 128.
+ const int high_latency_buffer_size = RoundUpToPowerOfTwo(twenty_ms_size);
+#endif // defined(OS_WIN)
+
+ return std::max(output_params_.frames_per_buffer(), high_latency_buffer_size);
+}
+
} // namespace media
diff --git a/chromium/media/base/audio_hardware_config.h b/chromium/media/base/audio_hardware_config.h
index d1621b98224..a4baaac0979 100644
--- a/chromium/media/base/audio_hardware_config.h
+++ b/chromium/media/base/audio_hardware_config.h
@@ -41,6 +41,10 @@ class MEDIA_EXPORT AudioHardwareConfig {
void UpdateInputConfig(const media::AudioParameters& input_params);
void UpdateOutputConfig(const media::AudioParameters& output_params);
+ // For clients which don't need low latency, a larger buffer size should be
+ // used to save power and CPU resources.
+ int GetHighLatencyBufferSize() const;
+
private:
// Cached values; access is protected by |config_lock_|.
mutable base::Lock config_lock_;
diff --git a/chromium/media/base/audio_hardware_config_unittest.cc b/chromium/media/base/audio_hardware_config_unittest.cc
index 4a742bf51c8..2cb16fc96a3 100644
--- a/chromium/media/base/audio_hardware_config_unittest.cc
+++ b/chromium/media/base/audio_hardware_config_unittest.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "build/build_config.h"
#include "media/base/audio_hardware_config.h"
#include "media/audio/audio_parameters.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -87,4 +88,42 @@ TEST(AudioHardwareConfig, Setters) {
EXPECT_EQ(kNewInputChannelLayout, fake_config.GetInputChannelLayout());
}
+TEST(AudioHardwareConfig, HighLatencyBufferSizes) {
+ AudioParameters input_params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ kInputChannelLayout,
+ kInputSampleRate,
+ 16,
+ kOutputBufferSize);
+ AudioParameters output_params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ kOutputChannelLayout,
+ 3200,
+ 16,
+ 32);
+ AudioHardwareConfig fake_config(input_params, output_params);
+
+#if defined(OS_WIN)
+ for (int i = 6400; i <= 204800; i *= 2) {
+ fake_config.UpdateOutputConfig(
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ kOutputChannelLayout,
+ i,
+ 16,
+ i / 100));
+ EXPECT_EQ(2 * (i / 100), fake_config.GetHighLatencyBufferSize());
+ }
+#else
+ EXPECT_EQ(64, fake_config.GetHighLatencyBufferSize());
+
+ for (int i = 6400; i <= 204800; i *= 2) {
+ fake_config.UpdateOutputConfig(
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ kOutputChannelLayout,
+ i,
+ 16,
+ 32));
+ EXPECT_EQ(2 * (i / 100), fake_config.GetHighLatencyBufferSize());
+ }
+#endif // defined(OS_WIN)
+}
+
} // namespace content
diff --git a/chromium/media/base/audio_renderer.h b/chromium/media/base/audio_renderer.h
index bcc06b1c4e8..2d7d3e18a2b 100644
--- a/chromium/media/base/audio_renderer.h
+++ b/chromium/media/base/audio_renderer.h
@@ -38,10 +38,6 @@ class MEDIA_EXPORT AudioRenderer {
//
// |ended_cb| is executed when audio rendering has reached the end of stream.
//
- // |disabled_cb| is executed when audio rendering has been disabled due to
- // external factors (i.e., device was removed). |time_cb| will no longer be
- // executed. TODO(scherkus): this might not be needed http://crbug.com/234708
- //
// |error_cb| is executed if an error was encountered.
virtual void Initialize(DemuxerStream* stream,
const PipelineStatusCB& init_cb,
@@ -49,16 +45,15 @@ class MEDIA_EXPORT AudioRenderer {
const base::Closure& underflow_cb,
const TimeCB& time_cb,
const base::Closure& ended_cb,
- const base::Closure& disabled_cb,
const PipelineStatusCB& error_cb) = 0;
- // Start audio decoding and rendering at the current playback rate, executing
- // |callback| when playback is underway.
- virtual void Play(const base::Closure& callback) = 0;
+ // Signal audio playback to start at the current rate. It is expected that
+ // |time_cb| will eventually start being run with time updates.
+ virtual void StartRendering() = 0;
- // Temporarily suspend decoding and rendering audio, executing |callback| when
- // playback has been suspended.
- virtual void Pause(const base::Closure& callback) = 0;
+ // Signal audio playback to stop until further notice. It is expected that
+ // |time_cb| will no longer be run.
+ virtual void StopRendering() = 0;
// Discard any audio data, executing |callback| when completed.
virtual void Flush(const base::Closure& callback) = 0;
diff --git a/chromium/media/base/audio_renderer_mixer.cc b/chromium/media/base/audio_renderer_mixer.cc
index 11b12110260..26956519b96 100644
--- a/chromium/media/base/audio_renderer_mixer.cc
+++ b/chromium/media/base/audio_renderer_mixer.cc
@@ -29,44 +29,57 @@ AudioRendererMixer::~AudioRendererMixer() {
// AudioRendererSinks must be stopped before being destructed.
audio_sink_->Stop();
- // Ensures that all mixer inputs have stopped themselves prior to destruction
- // and have called RemoveMixerInput().
- DCHECK_EQ(mixer_inputs_.size(), 0U);
+ // Ensure that all mixer inputs have removed themselves prior to destruction.
+ DCHECK(audio_converter_.empty());
+ DCHECK_EQ(error_callbacks_.size(), 0U);
}
-void AudioRendererMixer::AddMixerInput(AudioConverter::InputCallback* input,
- const base::Closure& error_cb) {
- base::AutoLock auto_lock(mixer_inputs_lock_);
-
+void AudioRendererMixer::AddMixerInput(AudioConverter::InputCallback* input) {
+ base::AutoLock auto_lock(lock_);
if (!playing_) {
playing_ = true;
last_play_time_ = base::TimeTicks::Now();
audio_sink_->Play();
}
- DCHECK(mixer_inputs_.find(input) == mixer_inputs_.end());
- mixer_inputs_[input] = error_cb;
audio_converter_.AddInput(input);
}
void AudioRendererMixer::RemoveMixerInput(
AudioConverter::InputCallback* input) {
- base::AutoLock auto_lock(mixer_inputs_lock_);
+ base::AutoLock auto_lock(lock_);
audio_converter_.RemoveInput(input);
+}
- DCHECK(mixer_inputs_.find(input) != mixer_inputs_.end());
- mixer_inputs_.erase(input);
+void AudioRendererMixer::AddErrorCallback(const base::Closure& error_cb) {
+ base::AutoLock auto_lock(lock_);
+ error_callbacks_.push_back(error_cb);
+}
+
+void AudioRendererMixer::RemoveErrorCallback(const base::Closure& error_cb) {
+ base::AutoLock auto_lock(lock_);
+ for (ErrorCallbackList::iterator it = error_callbacks_.begin();
+ it != error_callbacks_.end();
+ ++it) {
+ if (it->Equals(error_cb)) {
+ error_callbacks_.erase(it);
+ return;
+ }
+ }
+
+ // An error callback should always exist when called.
+ NOTREACHED();
}
int AudioRendererMixer::Render(AudioBus* audio_bus,
int audio_delay_milliseconds) {
- base::AutoLock auto_lock(mixer_inputs_lock_);
+ base::AutoLock auto_lock(lock_);
// If there are no mixer inputs and we haven't seen one for a while, pause the
// sink to avoid wasting resources when media elements are present but remain
// in the pause state.
const base::TimeTicks now = base::TimeTicks::Now();
- if (!mixer_inputs_.empty()) {
+ if (!audio_converter_.empty()) {
last_play_time_ = now;
} else if (now - last_play_time_ >= pause_delay_ && playing_) {
audio_sink_->Pause();
@@ -79,12 +92,12 @@ int AudioRendererMixer::Render(AudioBus* audio_bus,
}
void AudioRendererMixer::OnRenderError() {
- base::AutoLock auto_lock(mixer_inputs_lock_);
-
// Call each mixer input and signal an error.
- for (AudioRendererMixerInputSet::iterator it = mixer_inputs_.begin();
- it != mixer_inputs_.end(); ++it) {
- it->second.Run();
+ base::AutoLock auto_lock(lock_);
+ for (ErrorCallbackList::const_iterator it = error_callbacks_.begin();
+ it != error_callbacks_.end();
+ ++it) {
+ it->Run();
}
}
diff --git a/chromium/media/base/audio_renderer_mixer.h b/chromium/media/base/audio_renderer_mixer.h
index 942c61fe849..4b0af9b4e3f 100644
--- a/chromium/media/base/audio_renderer_mixer.h
+++ b/chromium/media/base/audio_renderer_mixer.h
@@ -26,10 +26,15 @@ class MEDIA_EXPORT AudioRendererMixer
virtual ~AudioRendererMixer();
// Add or remove a mixer input from mixing; called by AudioRendererMixerInput.
- void AddMixerInput(AudioConverter::InputCallback* input,
- const base::Closure& error_cb);
+ void AddMixerInput(AudioConverter::InputCallback* input);
void RemoveMixerInput(AudioConverter::InputCallback* input);
+ // Since errors may occur even when no inputs are playing, an error callback
+ // must be registered separately from adding a mixer input. The same callback
+ // must be given to both the functions.
+ void AddErrorCallback(const base::Closure& error_cb);
+ void RemoveErrorCallback(const base::Closure& error_cb);
+
void set_pause_delay_for_testing(base::TimeDelta delay) {
pause_delay_ = delay;
}
@@ -43,12 +48,12 @@ class MEDIA_EXPORT AudioRendererMixer
// Output sink for this mixer.
scoped_refptr<AudioRendererSink> audio_sink_;
- // Set of mixer inputs to be mixed by this mixer. Access is thread-safe
- // through |mixer_inputs_lock_|.
- typedef std::map<AudioConverter::InputCallback*, base::Closure>
- AudioRendererMixerInputSet;
- AudioRendererMixerInputSet mixer_inputs_;
- base::Lock mixer_inputs_lock_;
+ // ---------------[ All variables below protected by |lock_| ]---------------
+ base::Lock lock_;
+
+ // List of error callbacks used by this mixer.
+ typedef std::list<base::Closure> ErrorCallbackList;
+ ErrorCallbackList error_callbacks_;
// Handles mixing and resampling between input and output parameters.
AudioConverter audio_converter_;
diff --git a/chromium/media/base/audio_renderer_mixer_input.cc b/chromium/media/base/audio_renderer_mixer_input.cc
index ffdcfa875f7..ab9f0a7ecab 100644
--- a/chromium/media/base/audio_renderer_mixer_input.cc
+++ b/chromium/media/base/audio_renderer_mixer_input.cc
@@ -24,17 +24,17 @@ AudioRendererMixerInput::AudioRendererMixerInput(
}
AudioRendererMixerInput::~AudioRendererMixerInput() {
- // Mixer is no longer safe to use after |remove_mixer_cb_| has been called.
- if (initialized_)
- remove_mixer_cb_.Run(params_);
+ DCHECK(!playing_);
+ DCHECK(!mixer_);
}
void AudioRendererMixerInput::Initialize(
const AudioParameters& params,
AudioRendererSink::RenderCallback* callback) {
+ DCHECK(callback);
DCHECK(!initialized_);
+
params_ = params;
- mixer_ = get_mixer_cb_.Run(params_);
callback_ = callback;
initialized_ = true;
}
@@ -42,30 +42,45 @@ void AudioRendererMixerInput::Initialize(
void AudioRendererMixerInput::Start() {
DCHECK(initialized_);
DCHECK(!playing_);
+ DCHECK(!mixer_);
+ mixer_ = get_mixer_cb_.Run(params_);
+
+ // Note: OnRenderError() may be called immediately after this call returns.
+ mixer_->AddErrorCallback(error_cb_);
}
void AudioRendererMixerInput::Stop() {
// Stop() may be called at any time, if Pause() hasn't been called we need to
// remove our mixer input before shutdown.
- if (!playing_)
- return;
+ if (playing_) {
+ mixer_->RemoveMixerInput(this);
+ playing_ = false;
+ }
- mixer_->RemoveMixerInput(this);
- playing_ = false;
+ if (mixer_) {
+ // TODO(dalecurtis): This is required so that |callback_| isn't called after
+ // Stop() by an error event since it may outlive this ref-counted object. We
+ // should instead have sane ownership semantics: http://crbug.com/151051
+ mixer_->RemoveErrorCallback(error_cb_);
+ remove_mixer_cb_.Run(params_);
+ mixer_ = NULL;
+ }
}
void AudioRendererMixerInput::Play() {
DCHECK(initialized_);
+ DCHECK(mixer_);
if (playing_)
return;
- mixer_->AddMixerInput(this, error_cb_);
+ mixer_->AddMixerInput(this);
playing_ = true;
}
void AudioRendererMixerInput::Pause() {
DCHECK(initialized_);
+ DCHECK(mixer_);
if (!playing_)
return;
diff --git a/chromium/media/base/audio_renderer_mixer_input.h b/chromium/media/base/audio_renderer_mixer_input.h
index 6b026cf9c29..c7e24c6fbb5 100644
--- a/chromium/media/base/audio_renderer_mixer_input.h
+++ b/chromium/media/base/audio_renderer_mixer_input.h
@@ -68,7 +68,7 @@ class MEDIA_EXPORT AudioRendererMixerInput
AudioRendererSink::RenderCallback* callback_;
// Error callback for handing to AudioRendererMixer.
- base::Closure error_cb_;
+ const base::Closure error_cb_;
DISALLOW_COPY_AND_ASSIGN(AudioRendererMixerInput);
};
diff --git a/chromium/media/base/audio_renderer_mixer_input_unittest.cc b/chromium/media/base/audio_renderer_mixer_input_unittest.cc
index 9a019db5717..be03867027f 100644
--- a/chromium/media/base/audio_renderer_mixer_input_unittest.cc
+++ b/chromium/media/base/audio_renderer_mixer_input_unittest.cc
@@ -28,7 +28,6 @@ class AudioRendererMixerInputTest : public testing::Test {
CreateMixerInput();
fake_callback_.reset(new FakeAudioRenderCallback(0));
mixer_input_->Initialize(audio_parameters_, fake_callback_.get());
- EXPECT_CALL(*this, RemoveMixer(testing::_));
audio_bus_ = AudioBus::Create(audio_parameters_);
}
@@ -49,6 +48,7 @@ class AudioRendererMixerInputTest : public testing::Test {
mixer_.reset(new AudioRendererMixer(
audio_parameters_, audio_parameters_, sink));
}
+ EXPECT_CALL(*this, RemoveMixer(testing::_));
return mixer_.get();
}
@@ -109,4 +109,12 @@ TEST_F(AudioRendererMixerInputTest, StopBeforeInitializeOrStart) {
mixer_input_->Stop();
}
+// Test that Start() can be called after Stop().
+// TODO(dalecurtis): We shouldn't allow this. See http://crbug.com/151051
+TEST_F(AudioRendererMixerInputTest, StartAfterStop) {
+ mixer_input_->Stop();
+ mixer_input_->Start();
+ mixer_input_->Stop();
+}
+
} // namespace media
diff --git a/chromium/media/base/audio_renderer_mixer_unittest.cc b/chromium/media/base/audio_renderer_mixer_unittest.cc
index 589358357b5..cb58a038558 100644
--- a/chromium/media/base/audio_renderer_mixer_unittest.cc
+++ b/chromium/media/base/audio_renderer_mixer_unittest.cc
@@ -51,8 +51,8 @@ class AudioRendererMixerTest
std::tr1::get<1>(GetParam()), 16, kLowLatencyBufferSize);
sink_ = new MockAudioRendererSink();
- EXPECT_CALL(*sink_.get(), Start());
- EXPECT_CALL(*sink_.get(), Stop());
+ EXPECT_CALL(*sink_, Start());
+ EXPECT_CALL(*sink_, Stop());
mixer_.reset(new AudioRendererMixer(
input_parameters_, output_parameters_, sink_));
@@ -393,6 +393,22 @@ TEST_P(AudioRendererMixerBehavioralTest, OnRenderError) {
mixer_inputs_[i]->Stop();
}
+TEST_P(AudioRendererMixerBehavioralTest, OnRenderErrorPausedInput) {
+ InitializeInputs(kMixerInputs);
+
+ for (size_t i = 0; i < mixer_inputs_.size(); ++i) {
+ mixer_inputs_[i]->Start();
+ EXPECT_CALL(*fake_callbacks_[i], OnRenderError()).Times(1);
+ }
+
+ // Fire the error before attaching any inputs. Ensure an error is recieved
+ // even if the input is not connected.
+ mixer_callback_->OnRenderError();
+
+ for (size_t i = 0; i < mixer_inputs_.size(); ++i)
+ mixer_inputs_[i]->Stop();
+}
+
// Ensure constructing an AudioRendererMixerInput, but not initializing it does
// not call RemoveMixer().
TEST_P(AudioRendererMixerBehavioralTest, NoInitialize) {
diff --git a/chromium/media/base/audio_renderer_sink.h b/chromium/media/base/audio_renderer_sink.h
index b2f4ba0a902..fa1ee84c250 100644
--- a/chromium/media/base/audio_renderer_sink.h
+++ b/chromium/media/base/audio_renderer_sink.h
@@ -28,11 +28,6 @@ class AudioRendererSink
// number of frames filled.
virtual int Render(AudioBus* dest, int audio_delay_milliseconds) = 0;
- // Synchronized audio I/O - see InitializeIO() below.
- virtual void RenderIO(AudioBus* source,
- AudioBus* dest,
- int audio_delay_milliseconds) {}
-
// Signals an error has occurred.
virtual void OnRenderError() = 0;
diff --git a/chromium/media/base/audio_splicer.cc b/chromium/media/base/audio_splicer.cc
index 14b4199e0e3..7fafc8bbbac 100644
--- a/chromium/media/base/audio_splicer.cc
+++ b/chromium/media/base/audio_splicer.cc
@@ -5,37 +5,113 @@
#include "media/base/audio_splicer.h"
#include <cstdlib>
+#include <deque>
#include "base/logging.h"
#include "media/base/audio_buffer.h"
+#include "media/base/audio_bus.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/audio_timestamp_helper.h"
-#include "media/base/buffers.h"
+#include "media/base/vector_math.h"
namespace media {
-// Largest gap or overlap allowed by this class. Anything
-// larger than this will trigger an error.
-// This is an arbitrary value, but the initial selection of 50ms
-// roughly represents the duration of 2 compressed AAC or MP3 frames.
-static const int kMaxTimeDeltaInMilliseconds = 50;
+// Minimum gap size needed before the splicer will take action to
+// fill a gap. This avoids periodically inserting and then dropping samples
+// when the buffer timestamps are slightly off because of timestamp rounding
+// in the source content. Unit is frames.
+static const int kMinGapSize = 2;
-AudioSplicer::AudioSplicer(int samples_per_second)
- : output_timestamp_helper_(samples_per_second),
- min_gap_size_(2),
- received_end_of_stream_(false) {
+// AudioBuffer::TrimStart() is not as accurate as the timestamp helper, so
+// manually adjust the duration and timestamp after trimming.
+static void AccurateTrimStart(int frames_to_trim,
+ const scoped_refptr<AudioBuffer> buffer,
+ const AudioTimestampHelper& timestamp_helper) {
+ buffer->TrimStart(frames_to_trim);
+ buffer->set_timestamp(timestamp_helper.GetTimestamp());
}
-AudioSplicer::~AudioSplicer() {
+// Returns an AudioBus whose frame buffer is backed by the provided AudioBuffer.
+static scoped_ptr<AudioBus> CreateAudioBufferWrapper(
+ const scoped_refptr<AudioBuffer>& buffer) {
+ scoped_ptr<AudioBus> wrapper =
+ AudioBus::CreateWrapper(buffer->channel_count());
+ wrapper->set_frames(buffer->frame_count());
+ for (int ch = 0; ch < buffer->channel_count(); ++ch) {
+ wrapper->SetChannelData(
+ ch, reinterpret_cast<float*>(buffer->channel_data()[ch]));
+ }
+ return wrapper.Pass();
}
-void AudioSplicer::Reset() {
- output_timestamp_helper_.SetBaseTimestamp(kNoTimestamp());
+class AudioStreamSanitizer {
+ public:
+ explicit AudioStreamSanitizer(int samples_per_second);
+ ~AudioStreamSanitizer();
+
+ // Resets the sanitizer state by clearing the output buffers queue, and
+ // resetting the timestamp helper.
+ void Reset();
+
+ // Similar to Reset(), but initializes the timestamp helper with the given
+ // parameters.
+ void ResetTimestampState(int64 frame_count, base::TimeDelta base_timestamp);
+
+ // Adds a new buffer full of samples or end of stream buffer to the splicer.
+ // Returns true if the buffer was accepted. False is returned if an error
+ // occurred.
+ bool AddInput(const scoped_refptr<AudioBuffer>& input);
+
+ // Returns true if the sanitizer has a buffer to return.
+ bool HasNextBuffer() const;
+
+ // Removes the next buffer from the output buffer queue and returns it; should
+ // only be called if HasNextBuffer() returns true.
+ scoped_refptr<AudioBuffer> GetNextBuffer();
+
+ // Returns the total frame count of all buffers available for output.
+ int GetFrameCount() const;
+
+ const AudioTimestampHelper& timestamp_helper() {
+ return output_timestamp_helper_;
+ }
+
+ // Transfer all buffers into |output|. Returns false if AddInput() on the
+ // |output| sanitizer fails for any buffer removed from |this|.
+ bool DrainInto(AudioStreamSanitizer* output);
+
+ private:
+ void AddOutputBuffer(const scoped_refptr<AudioBuffer>& buffer);
+
+ AudioTimestampHelper output_timestamp_helper_;
+ bool received_end_of_stream_;
+
+ typedef std::deque<scoped_refptr<AudioBuffer> > BufferQueue;
+ BufferQueue output_buffers_;
+
+ DISALLOW_ASSIGN(AudioStreamSanitizer);
+};
+
+AudioStreamSanitizer::AudioStreamSanitizer(int samples_per_second)
+ : output_timestamp_helper_(samples_per_second),
+ received_end_of_stream_(false) {}
+
+AudioStreamSanitizer::~AudioStreamSanitizer() {}
+
+void AudioStreamSanitizer::Reset() {
+ ResetTimestampState(0, kNoTimestamp());
+}
+
+void AudioStreamSanitizer::ResetTimestampState(int64 frame_count,
+ base::TimeDelta base_timestamp) {
output_buffers_.clear();
received_end_of_stream_ = false;
+ output_timestamp_helper_.SetBaseTimestamp(base_timestamp);
+ if (frame_count > 0)
+ output_timestamp_helper_.AddFrames(frame_count);
}
-bool AudioSplicer::AddInput(const scoped_refptr<AudioBuffer>& input) {
+bool AudioStreamSanitizer::AddInput(const scoped_refptr<AudioBuffer>& input) {
DCHECK(!received_end_of_stream_ || input->end_of_stream());
if (input->end_of_stream()) {
@@ -56,11 +132,13 @@ bool AudioSplicer::AddInput(const scoped_refptr<AudioBuffer>& input) {
return false;
}
- base::TimeDelta timestamp = input->timestamp();
- base::TimeDelta expected_timestamp = output_timestamp_helper_.GetTimestamp();
- base::TimeDelta delta = timestamp - expected_timestamp;
+ const base::TimeDelta timestamp = input->timestamp();
+ const base::TimeDelta expected_timestamp =
+ output_timestamp_helper_.GetTimestamp();
+ const base::TimeDelta delta = timestamp - expected_timestamp;
- if (std::abs(delta.InMilliseconds()) > kMaxTimeDeltaInMilliseconds) {
+ if (std::abs(delta.InMilliseconds()) >
+ AudioSplicer::kMaxTimeDeltaInMilliseconds) {
DVLOG(1) << "Timestamp delta too large: " << delta.InMicroseconds() << "us";
return false;
}
@@ -69,7 +147,7 @@ bool AudioSplicer::AddInput(const scoped_refptr<AudioBuffer>& input) {
if (delta != base::TimeDelta())
frames_to_fill = output_timestamp_helper_.GetFramesToTarget(timestamp);
- if (frames_to_fill == 0 || std::abs(frames_to_fill) < min_gap_size_) {
+ if (frames_to_fill == 0 || std::abs(frames_to_fill) < kMinGapSize) {
AddOutputBuffer(input);
return true;
}
@@ -80,11 +158,12 @@ bool AudioSplicer::AddInput(const scoped_refptr<AudioBuffer>& input) {
// Create a buffer with enough silence samples to fill the gap and
// add it to the output buffer.
- scoped_refptr<AudioBuffer> gap = AudioBuffer::CreateEmptyBuffer(
- input->channel_count(),
- frames_to_fill,
- expected_timestamp,
- output_timestamp_helper_.GetFrameDuration(frames_to_fill));
+ scoped_refptr<AudioBuffer> gap =
+ AudioBuffer::CreateEmptyBuffer(input->channel_layout(),
+ input->channel_count(),
+ input->sample_rate(),
+ frames_to_fill,
+ expected_timestamp);
AddOutputBuffer(gap);
// Add the input buffer now that the gap has been filled.
@@ -92,39 +171,337 @@ bool AudioSplicer::AddInput(const scoped_refptr<AudioBuffer>& input) {
return true;
}
- int frames_to_skip = -frames_to_fill;
-
+ // Overlapping buffers marked as splice frames are handled by AudioSplicer,
+ // but decoder and demuxer quirks may sometimes produce overlapping samples
+ // which need to be sanitized.
+ //
+ // A crossfade can't be done here because only the current buffer is available
+ // at this point, not previous buffers.
DVLOG(1) << "Overlap detected @ " << expected_timestamp.InMicroseconds()
- << " us: " << -delta.InMicroseconds() << " us";
+ << " us: " << -delta.InMicroseconds() << " us";
+ const int frames_to_skip = -frames_to_fill;
if (input->frame_count() <= frames_to_skip) {
DVLOG(1) << "Dropping whole buffer";
return true;
}
// Copy the trailing samples that do not overlap samples already output
- // into a new buffer. Add this new buffer to the output queue.
+ // into a new buffer. Add this new buffer to the output queue.
//
// TODO(acolwell): Implement a cross-fade here so the transition is less
// jarring.
- input->TrimStart(frames_to_skip);
+ AccurateTrimStart(frames_to_skip, input, output_timestamp_helper_);
AddOutputBuffer(input);
return true;
}
-bool AudioSplicer::HasNextBuffer() const {
+bool AudioStreamSanitizer::HasNextBuffer() const {
return !output_buffers_.empty();
}
-scoped_refptr<AudioBuffer> AudioSplicer::GetNextBuffer() {
+scoped_refptr<AudioBuffer> AudioStreamSanitizer::GetNextBuffer() {
scoped_refptr<AudioBuffer> ret = output_buffers_.front();
output_buffers_.pop_front();
return ret;
}
-void AudioSplicer::AddOutputBuffer(const scoped_refptr<AudioBuffer>& buffer) {
+void AudioStreamSanitizer::AddOutputBuffer(
+ const scoped_refptr<AudioBuffer>& buffer) {
output_timestamp_helper_.AddFrames(buffer->frame_count());
output_buffers_.push_back(buffer);
}
+int AudioStreamSanitizer::GetFrameCount() const {
+ int frame_count = 0;
+ for (BufferQueue::const_iterator it = output_buffers_.begin();
+ it != output_buffers_.end(); ++it) {
+ frame_count += (*it)->frame_count();
+ }
+ return frame_count;
+}
+
+bool AudioStreamSanitizer::DrainInto(AudioStreamSanitizer* output) {
+ while (HasNextBuffer()) {
+ if (!output->AddInput(GetNextBuffer()))
+ return false;
+ }
+ return true;
+}
+
+AudioSplicer::AudioSplicer(int samples_per_second)
+ : max_crossfade_duration_(
+ base::TimeDelta::FromMilliseconds(kCrossfadeDurationInMilliseconds)),
+ splice_timestamp_(kNoTimestamp()),
+ max_splice_end_timestamp_(kNoTimestamp()),
+ output_sanitizer_(new AudioStreamSanitizer(samples_per_second)),
+ pre_splice_sanitizer_(new AudioStreamSanitizer(samples_per_second)),
+ post_splice_sanitizer_(new AudioStreamSanitizer(samples_per_second)),
+ have_all_pre_splice_buffers_(false) {}
+
+AudioSplicer::~AudioSplicer() {}
+
+void AudioSplicer::Reset() {
+ output_sanitizer_->Reset();
+ pre_splice_sanitizer_->Reset();
+ post_splice_sanitizer_->Reset();
+ have_all_pre_splice_buffers_ = false;
+ reset_splice_timestamps();
+}
+
+bool AudioSplicer::AddInput(const scoped_refptr<AudioBuffer>& input) {
+ // If we're not processing a splice, add the input to the output queue.
+ if (splice_timestamp_ == kNoTimestamp()) {
+ DCHECK(!pre_splice_sanitizer_->HasNextBuffer());
+ DCHECK(!post_splice_sanitizer_->HasNextBuffer());
+ return output_sanitizer_->AddInput(input);
+ }
+
+ const AudioTimestampHelper& output_ts_helper =
+ output_sanitizer_->timestamp_helper();
+
+ if (!have_all_pre_splice_buffers_) {
+ DCHECK(!input->end_of_stream());
+
+ // If the provided buffer is entirely before the splice point it can also be
+ // added to the output queue.
+ if (input->timestamp() + input->duration() < splice_timestamp_) {
+ DCHECK(!pre_splice_sanitizer_->HasNextBuffer());
+ return output_sanitizer_->AddInput(input);
+ }
+
+ // If we've encountered the first pre splice buffer, reset the pre splice
+ // sanitizer based on |output_sanitizer_|. This is done so that gaps and
+ // overlaps between buffers across the sanitizers are accounted for prior
+ // to calculating crossfade.
+ if (!pre_splice_sanitizer_->HasNextBuffer()) {
+ pre_splice_sanitizer_->ResetTimestampState(
+ output_ts_helper.frame_count(), output_ts_helper.base_timestamp());
+ }
+
+ return pre_splice_sanitizer_->AddInput(input);
+ }
+
+ // The first post splice buffer is expected to match |splice_timestamp_|.
+ if (!post_splice_sanitizer_->HasNextBuffer())
+ CHECK(splice_timestamp_ == input->timestamp());
+
+ // At this point we have all the fade out preroll buffers from the decoder.
+ // We now need to wait until we have enough data to perform the crossfade (or
+ // we receive an end of stream).
+ if (!post_splice_sanitizer_->AddInput(input))
+ return false;
+
+ // Ensure |output_sanitizer_| has a valid base timestamp so we can use it for
+ // timestamp calculations.
+ if (output_ts_helper.base_timestamp() == kNoTimestamp()) {
+ output_sanitizer_->ResetTimestampState(
+ 0, pre_splice_sanitizer_->timestamp_helper().base_timestamp());
+ }
+
+ // If a splice frame was incorrectly marked due to poor demuxed timestamps, we
+ // may not actually have a splice. Here we check if any frames exist before
+ // the splice. In this case, just transfer all data to the output sanitizer.
+ if (pre_splice_sanitizer_->GetFrameCount() <=
+ output_ts_helper.GetFramesToTarget(splice_timestamp_)) {
+ CHECK(pre_splice_sanitizer_->DrainInto(output_sanitizer_.get()));
+
+ // If the file contains incorrectly muxed timestamps, there may be huge gaps
+ // between the demuxed and decoded timestamps.
+ if (!post_splice_sanitizer_->DrainInto(output_sanitizer_.get()))
+ return false;
+
+ reset_splice_timestamps();
+ return true;
+ }
+
+ // Wait until we have enough data to crossfade or end of stream.
+ if (!input->end_of_stream() &&
+ input->timestamp() + input->duration() < max_splice_end_timestamp_) {
+ return true;
+ }
+
+ scoped_refptr<AudioBuffer> crossfade_buffer;
+ scoped_ptr<AudioBus> pre_splice =
+ ExtractCrossfadeFromPreSplice(&crossfade_buffer);
+
+ // Crossfade the pre splice and post splice sections and transfer all relevant
+ // buffers into |output_sanitizer_|.
+ CrossfadePostSplice(pre_splice.Pass(), crossfade_buffer);
+
+ // Clear the splice timestamp so new splices can be accepted.
+ reset_splice_timestamps();
+ return true;
+}
+
+bool AudioSplicer::HasNextBuffer() const {
+ return output_sanitizer_->HasNextBuffer();
+}
+
+scoped_refptr<AudioBuffer> AudioSplicer::GetNextBuffer() {
+ return output_sanitizer_->GetNextBuffer();
+}
+
+void AudioSplicer::SetSpliceTimestamp(base::TimeDelta splice_timestamp) {
+ if (splice_timestamp == kNoTimestamp()) {
+ DCHECK(splice_timestamp_ != kNoTimestamp());
+ DCHECK(!have_all_pre_splice_buffers_);
+ have_all_pre_splice_buffers_ = true;
+ return;
+ }
+
+ if (splice_timestamp_ == splice_timestamp)
+ return;
+
+ // TODO(dalecurtis): We may need the concept of a future_splice_timestamp_ to
+ // handle cases where another splice comes in before we've received 5ms of
+ // data from the last one. Leave this as a CHECK for now to figure out if
+ // this case is possible.
+ CHECK(splice_timestamp_ == kNoTimestamp());
+ splice_timestamp_ = splice_timestamp;
+ max_splice_end_timestamp_ = splice_timestamp_ + max_crossfade_duration_;
+ pre_splice_sanitizer_->Reset();
+ post_splice_sanitizer_->Reset();
+ have_all_pre_splice_buffers_ = false;
+}
+
+scoped_ptr<AudioBus> AudioSplicer::ExtractCrossfadeFromPreSplice(
+ scoped_refptr<AudioBuffer>* crossfade_buffer) {
+ DCHECK(crossfade_buffer);
+ const AudioTimestampHelper& output_ts_helper =
+ output_sanitizer_->timestamp_helper();
+
+ int frames_before_splice =
+ output_ts_helper.GetFramesToTarget(splice_timestamp_);
+
+ // Determine crossfade frame count based on available frames in each splicer
+ // and capping to the maximum crossfade duration.
+ const int max_crossfade_frame_count =
+ output_ts_helper.GetFramesToTarget(max_splice_end_timestamp_) -
+ frames_before_splice;
+ const int frames_to_crossfade = std::min(
+ max_crossfade_frame_count,
+ std::min(pre_splice_sanitizer_->GetFrameCount() - frames_before_splice,
+ post_splice_sanitizer_->GetFrameCount()));
+ // There must always be frames to crossfade, otherwise the splice should not
+ // have been generated.
+ DCHECK_GT(frames_to_crossfade, 0);
+
+ int frames_read = 0;
+ scoped_ptr<AudioBus> output_bus;
+ while (pre_splice_sanitizer_->HasNextBuffer() &&
+ frames_read < frames_to_crossfade) {
+ scoped_refptr<AudioBuffer> preroll = pre_splice_sanitizer_->GetNextBuffer();
+
+ // We don't know the channel count until we see the first buffer, so wait
+ // until the first buffer to allocate the output AudioBus.
+ if (!output_bus) {
+ output_bus =
+ AudioBus::Create(preroll->channel_count(), frames_to_crossfade);
+ // Allocate output buffer for crossfade.
+ *crossfade_buffer = AudioBuffer::CreateBuffer(kSampleFormatPlanarF32,
+ preroll->channel_layout(),
+ preroll->channel_count(),
+ preroll->sample_rate(),
+ frames_to_crossfade);
+ }
+
+ // There may be enough of a gap introduced during decoding such that an
+ // entire buffer exists before the splice point.
+ if (frames_before_splice >= preroll->frame_count()) {
+ // Adjust the number of frames remaining before the splice. NOTE: This is
+ // safe since |pre_splice_sanitizer_| is a continuation of the timeline in
+ // |output_sanitizer_|. As such we're guaranteed there are no gaps or
+ // overlaps in the timeline between the two sanitizers.
+ frames_before_splice -= preroll->frame_count();
+ CHECK(output_sanitizer_->AddInput(preroll));
+ continue;
+ }
+
+ const int frames_to_read =
+ std::min(preroll->frame_count() - frames_before_splice,
+ output_bus->frames() - frames_read);
+ preroll->ReadFrames(
+ frames_to_read, frames_before_splice, frames_read, output_bus.get());
+ frames_read += frames_to_read;
+
+ // If only part of the buffer was consumed, trim it appropriately and stick
+ // it into the output queue.
+ if (frames_before_splice) {
+ preroll->TrimEnd(preroll->frame_count() - frames_before_splice);
+ CHECK(output_sanitizer_->AddInput(preroll));
+ frames_before_splice = 0;
+ }
+ }
+
+ // Ensure outputs were properly allocated. The method should not have been
+ // called if there is not enough data to crossfade.
+ // TODO(dalecurtis): Convert to DCHECK() once http://crbug.com/356073 fixed.
+ CHECK(output_bus);
+ CHECK(*crossfade_buffer);
+
+ // All necessary buffers have been processed, it's safe to reset.
+ pre_splice_sanitizer_->Reset();
+ DCHECK_EQ(output_bus->frames(), frames_read);
+ DCHECK_EQ(output_ts_helper.GetFramesToTarget(splice_timestamp_), 0);
+ return output_bus.Pass();
+}
+
+void AudioSplicer::CrossfadePostSplice(
+ scoped_ptr<AudioBus> pre_splice_bus,
+ scoped_refptr<AudioBuffer> crossfade_buffer) {
+ // Use the calculated timestamp and duration to ensure there's no extra gaps
+ // or overlaps to process when adding the buffer to |output_sanitizer_|.
+ const AudioTimestampHelper& output_ts_helper =
+ output_sanitizer_->timestamp_helper();
+ crossfade_buffer->set_timestamp(output_ts_helper.GetTimestamp());
+
+ // AudioBuffer::ReadFrames() only allows output into an AudioBus, so wrap
+ // our AudioBuffer in one so we can avoid extra data copies.
+ scoped_ptr<AudioBus> output_bus = CreateAudioBufferWrapper(crossfade_buffer);
+
+ // Extract crossfade section from the |post_splice_sanitizer_|.
+ int frames_read = 0, frames_to_trim = 0;
+ scoped_refptr<AudioBuffer> remainder;
+ while (post_splice_sanitizer_->HasNextBuffer() &&
+ frames_read < output_bus->frames()) {
+ scoped_refptr<AudioBuffer> postroll =
+ post_splice_sanitizer_->GetNextBuffer();
+ const int frames_to_read =
+ std::min(postroll->frame_count(), output_bus->frames() - frames_read);
+ postroll->ReadFrames(frames_to_read, 0, frames_read, output_bus.get());
+ frames_read += frames_to_read;
+
+ // If only part of the buffer was consumed, save it for after we've added
+ // the crossfade buffer
+ if (frames_to_read < postroll->frame_count()) {
+ DCHECK(!remainder);
+ remainder.swap(postroll);
+ frames_to_trim = frames_to_read;
+ }
+ }
+
+ DCHECK_EQ(output_bus->frames(), frames_read);
+
+ // Crossfade the audio into |crossfade_buffer|.
+ for (int ch = 0; ch < output_bus->channels(); ++ch) {
+ vector_math::Crossfade(pre_splice_bus->channel(ch),
+ pre_splice_bus->frames(),
+ output_bus->channel(ch));
+ }
+
+ CHECK(output_sanitizer_->AddInput(crossfade_buffer));
+ DCHECK_EQ(crossfade_buffer->frame_count(), output_bus->frames());
+
+ if (remainder) {
+ // Trim off consumed frames.
+ AccurateTrimStart(frames_to_trim, remainder, output_ts_helper);
+ CHECK(output_sanitizer_->AddInput(remainder));
+ }
+
+ // Transfer all remaining buffers out and reset once empty.
+ CHECK(post_splice_sanitizer_->DrainInto(output_sanitizer_.get()));
+ post_splice_sanitizer_->Reset();
+}
+
} // namespace media
diff --git a/chromium/media/base/audio_splicer.h b/chromium/media/base/audio_splicer.h
index 50445b2d54c..0db5d08dd70 100644
--- a/chromium/media/base/audio_splicer.h
+++ b/chromium/media/base/audio_splicer.h
@@ -5,52 +5,116 @@
#ifndef MEDIA_BASE_AUDIO_SPLICER_H_
#define MEDIA_BASE_AUDIO_SPLICER_H_
-#include <deque>
-
#include "base/memory/ref_counted.h"
-#include "media/base/audio_timestamp_helper.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/buffers.h"
#include "media/base/media_export.h"
namespace media {
class AudioBuffer;
-class AudioDecoderConfig;
+class AudioBus;
+class AudioStreamSanitizer;
// Helper class that handles filling gaps and resolving overlaps.
class MEDIA_EXPORT AudioSplicer {
public:
- AudioSplicer(int samples_per_second);
+ explicit AudioSplicer(int samples_per_second);
~AudioSplicer();
- // Resets the splicer state by clearing the output buffers queue,
- // and resetting the timestamp helper.
+ enum {
+ // The number of ms to crossfade before trimming when buffers overlap.
+ kCrossfadeDurationInMilliseconds = 5,
+
+ // Largest gap or overlap allowed between buffers. Anything larger than
+ // this will trigger an error. This is an arbitrary value, but the initial
+ // selection of 50ms roughly represents the duration of 2 compressed AAC or
+ // MP3 frames.
+ kMaxTimeDeltaInMilliseconds = 50,
+ };
+
+ // Resets the splicer state by clearing the output buffers queue and resetting
+ // the timestamp helper.
void Reset();
// Adds a new buffer full of samples or end of stream buffer to the splicer.
- // Returns true if the buffer was accepted. False is returned if an error
+ // Returns true if the buffer was accepted. False is returned if an error
// occurred.
bool AddInput(const scoped_refptr<AudioBuffer>& input);
// Returns true if the splicer has a buffer to return.
bool HasNextBuffer() const;
- // Removes the next buffer from the output buffer queue and returns it.
- // This should only be called if HasNextBuffer() returns true.
+ // Removes the next buffer from the output buffer queue and returns it; this
+ // should only be called if HasNextBuffer() returns true.
scoped_refptr<AudioBuffer> GetNextBuffer();
- private:
- void AddOutputBuffer(const scoped_refptr<AudioBuffer>& buffer);
+ // Indicates an upcoming splice point. All buffers overlapping or after the
+ // |splice_timestamp| will be considered as "before the splice." Clients must
+ // then call SetSpliceTimestamp(kNoTimestamp()) to signal that future buffers
+ // should be considered as "after the splice."
+ //
+ // Once |kCrossfadeDurationInMilliseconds| of buffers "after the splice" or
+ // end of stream has been received, the "after" buffers will be crossfaded
+ // with all "before" buffers which overlap them. "before" buffers outside
+ // of the overlap range will be discarded.
+ void SetSpliceTimestamp(base::TimeDelta splice_timestamp);
- AudioTimestampHelper output_timestamp_helper_;
-
- // Minimum gap size needed before the splicer will take action to
- // fill a gap. This avoids periodically inserting and then dropping samples
- // when the buffer timestamps are slightly off because of timestamp rounding
- // in the source content. Unit is frames.
- int min_gap_size_;
-
- std::deque<scoped_refptr<AudioBuffer> > output_buffers_;
- bool received_end_of_stream_;
+ private:
+ friend class AudioSplicerTest;
+
+ // Extracts frames to be crossfaded from |pre_splice_sanitizer_|. Transfers
+ // all frames before |splice_timestamp_| into |output_sanitizer_| and drops
+ // frames outside of the crossfade duration.
+ //
+ // The size of the returned AudioBus is the crossfade duration in frames.
+ // Crossfade duration is calculated based on the number of frames available
+ // after |splice_timestamp_| in each sanitizer and capped by
+ // |max_crossfade_duration_|.
+ //
+ // |pre_splice_sanitizer_| will be empty after this operation.
+ scoped_ptr<AudioBus> ExtractCrossfadeFromPreSplice(
+ scoped_refptr<AudioBuffer>* crossfade_buffer);
+
+ // Crossfades |pre_splice_bus->frames()| frames from
+ // |post_splice_sanitizer_|
+ // with those from |pre_splice_bus|. Adds the crossfaded buffer to
+ // |output_sanitizer_| along with all buffers in |post_splice_sanitizer_|.
+ //
+ // |post_splice_sanitizer_| will be empty after this operation.
+ void CrossfadePostSplice(scoped_ptr<AudioBus> pre_splice_bus,
+ scoped_refptr<AudioBuffer> crossfade_buffer);
+
+ // Reset the splice and splice end timestamps.
+ void reset_splice_timestamps() {
+ splice_timestamp_ = max_splice_end_timestamp_ = kNoTimestamp();
+ }
+
+ const base::TimeDelta max_crossfade_duration_;
+ base::TimeDelta splice_timestamp_;
+ base::TimeDelta max_splice_end_timestamp_;
+
+ // The various sanitizers for each stage of the crossfade process. Buffers in
+ // |output_sanitizer_| are immediately available for consumption by external
+ // callers.
+ //
+ // Overlapped buffers go into the |pre_splice_sanitizer_| while overlapping
+ // buffers go into the |post_splice_sanitizer_|. Once enough buffers for
+ // crossfading are received the pre and post sanitizers are drained into
+ // |output_sanitizer_| by the two ExtractCrossfadeFromXXX methods above.
+ //
+ // |pre_splice_sanitizer_| is not constructed until the first splice frame is
+ // encountered. At which point it is constructed based on the timestamp state
+ // of |output_sanitizer_|. It is destructed once the splice is finished.
+ scoped_ptr<AudioStreamSanitizer> output_sanitizer_;
+ scoped_ptr<AudioStreamSanitizer> pre_splice_sanitizer_;
+ scoped_ptr<AudioStreamSanitizer> post_splice_sanitizer_;
+
+ // Whether all buffers which should go into |pre_splice_sanitizer_| have been
+ // received. If true, buffers should now be put in |post_splice_sanitizer_|.
+ bool have_all_pre_splice_buffers_;
DISALLOW_IMPLICIT_CONSTRUCTORS(AudioSplicer);
};
diff --git a/chromium/media/base/audio_splicer_unittest.cc b/chromium/media/base/audio_splicer_unittest.cc
index 43902687fae..e6de2c62ed3 100644
--- a/chromium/media/base/audio_splicer_unittest.cc
+++ b/chromium/media/base/audio_splicer_unittest.cc
@@ -13,8 +13,12 @@
namespace media {
+// Do not change this format. AddInput() and GetValue() only work with float.
static const SampleFormat kSampleFormat = kSampleFormatF32;
+COMPILE_ASSERT(kSampleFormat == kSampleFormatF32, invalid_splice_format);
+
static const int kChannels = 1;
+static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_MONO;
static const int kDefaultSampleRate = 44100;
static const int kDefaultBufferSize = 100;
@@ -31,29 +35,117 @@ class AudioSplicerTest : public ::testing::Test {
}
scoped_refptr<AudioBuffer> GetNextInputBuffer(float value, int frame_size) {
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<float>(
- kSampleFormat,
- kChannels,
- value,
- 0.0f,
- frame_size,
- input_timestamp_helper_.GetTimestamp(),
- input_timestamp_helper_.GetFrameDuration(frame_size));
+ scoped_refptr<AudioBuffer> buffer =
+ MakeAudioBuffer<float>(kSampleFormat,
+ kChannelLayout,
+ kChannels,
+ kDefaultSampleRate,
+ value,
+ 0.0f,
+ frame_size,
+ input_timestamp_helper_.GetTimestamp());
input_timestamp_helper_.AddFrames(frame_size);
return buffer;
}
- bool VerifyData(scoped_refptr<AudioBuffer> buffer, float value) {
+ float GetValue(const scoped_refptr<AudioBuffer>& buffer) {
+ return reinterpret_cast<const float*>(buffer->channel_data()[0])[0];
+ }
+
+ bool VerifyData(const scoped_refptr<AudioBuffer>& buffer, float value) {
int frames = buffer->frame_count();
scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, frames);
buffer->ReadFrames(frames, 0, 0, bus.get());
- for (int i = 0; i < frames; ++i) {
- if (bus->channel(0)[i] != value)
- return false;
+ for (int ch = 0; ch < buffer->channel_count(); ++ch) {
+ for (int i = 0; i < frames; ++i) {
+ if (bus->channel(ch)[i] != value)
+ return false;
+ }
}
return true;
}
+ void VerifyNextBuffer(const scoped_refptr<AudioBuffer>& input) {
+ ASSERT_TRUE(splicer_.HasNextBuffer());
+ scoped_refptr<AudioBuffer> output = splicer_.GetNextBuffer();
+ EXPECT_EQ(input->timestamp(), output->timestamp());
+ EXPECT_EQ(input->duration(), output->duration());
+ EXPECT_EQ(input->frame_count(), output->frame_count());
+ EXPECT_TRUE(VerifyData(output, GetValue(input)));
+ }
+
+ void VerifyPreSpliceOutput(
+ const scoped_refptr<AudioBuffer>& overlapped_buffer,
+ const scoped_refptr<AudioBuffer>& overlapping_buffer,
+ int expected_pre_splice_size,
+ base::TimeDelta expected_pre_splice_duration) {
+ ASSERT_TRUE(splicer_.HasNextBuffer());
+ scoped_refptr<AudioBuffer> pre_splice_output = splicer_.GetNextBuffer();
+ EXPECT_EQ(overlapped_buffer->timestamp(), pre_splice_output->timestamp());
+ EXPECT_EQ(expected_pre_splice_size, pre_splice_output->frame_count());
+ EXPECT_EQ(expected_pre_splice_duration, pre_splice_output->duration());
+ EXPECT_TRUE(VerifyData(pre_splice_output, GetValue(overlapped_buffer)));
+ }
+
+ void VerifyCrossfadeOutput(
+ const scoped_refptr<AudioBuffer>& overlapped_buffer_1,
+ const scoped_refptr<AudioBuffer>& overlapped_buffer_2,
+ const scoped_refptr<AudioBuffer>& overlapping_buffer,
+ int second_overlap_index,
+ int expected_crossfade_size,
+ base::TimeDelta expected_crossfade_duration) {
+ ASSERT_TRUE(splicer_.HasNextBuffer());
+
+ scoped_refptr<AudioBuffer> crossfade_output = splicer_.GetNextBuffer();
+ EXPECT_EQ(expected_crossfade_size, crossfade_output->frame_count());
+ EXPECT_EQ(expected_crossfade_duration, crossfade_output->duration());
+
+ // The splice timestamp may be adjusted by a microsecond.
+ EXPECT_NEAR(overlapping_buffer->timestamp().InMicroseconds(),
+ crossfade_output->timestamp().InMicroseconds(),
+ 1);
+
+ // Verify the actual crossfade.
+ const int frames = crossfade_output->frame_count();
+ float overlapped_value = GetValue(overlapped_buffer_1);
+ const float overlapping_value = GetValue(overlapping_buffer);
+ scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, frames);
+ crossfade_output->ReadFrames(frames, 0, 0, bus.get());
+ for (int ch = 0; ch < crossfade_output->channel_count(); ++ch) {
+ float cf_ratio = 0;
+ const float cf_increment = 1.0f / frames;
+ for (int i = 0; i < frames; ++i, cf_ratio += cf_increment) {
+ if (overlapped_buffer_2 && i >= second_overlap_index)
+ overlapped_value = GetValue(overlapped_buffer_2);
+ const float actual = bus->channel(ch)[i];
+ const float expected =
+ (1.0f - cf_ratio) * overlapped_value + cf_ratio * overlapping_value;
+ ASSERT_FLOAT_EQ(expected, actual) << "i=" << i;
+ }
+ }
+ }
+
+ bool AddInput(const scoped_refptr<AudioBuffer>& input) {
+ // Since the splicer doesn't make copies it's working directly on the input
+ // buffers. We must make a copy before adding to ensure the original buffer
+ // is not modified in unexpected ways.
+ scoped_refptr<AudioBuffer> buffer_copy =
+ input->end_of_stream()
+ ? AudioBuffer::CreateEOSBuffer()
+ : AudioBuffer::CopyFrom(kSampleFormat,
+ input->channel_layout(),
+ input->channel_count(),
+ input->sample_rate(),
+ input->frame_count(),
+ &input->channel_data()[0],
+ input->timestamp());
+ return splicer_.AddInput(buffer_copy);
+ }
+
+ base::TimeDelta max_crossfade_duration() {
+ return splicer_.max_crossfade_duration_;
+ }
+
protected:
AudioSplicer splicer_;
AudioTimestampHelper input_timestamp_helper_;
@@ -66,40 +158,24 @@ TEST_F(AudioSplicerTest, PassThru) {
// Test single buffer pass-thru behavior.
scoped_refptr<AudioBuffer> input_1 = GetNextInputBuffer(0.1f);
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_TRUE(splicer_.HasNextBuffer());
-
- scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
+ EXPECT_TRUE(AddInput(input_1));
+ VerifyNextBuffer(input_1);
EXPECT_FALSE(splicer_.HasNextBuffer());
- EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
- EXPECT_EQ(input_1->duration(), output_1->duration());
- EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
- EXPECT_TRUE(VerifyData(output_1, 0.1f));
// Test that multiple buffers can be queued in the splicer.
scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f);
scoped_refptr<AudioBuffer> input_3 = GetNextInputBuffer(0.3f);
- EXPECT_TRUE(splicer_.AddInput(input_2));
- EXPECT_TRUE(splicer_.AddInput(input_3));
- EXPECT_TRUE(splicer_.HasNextBuffer());
-
- scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
- EXPECT_TRUE(splicer_.HasNextBuffer());
- EXPECT_EQ(input_2->timestamp(), output_2->timestamp());
- EXPECT_EQ(input_2->duration(), output_2->duration());
- EXPECT_EQ(input_2->frame_count(), output_2->frame_count());
-
- scoped_refptr<AudioBuffer> output_3 = splicer_.GetNextBuffer();
+ EXPECT_TRUE(AddInput(input_2));
+ EXPECT_TRUE(AddInput(input_3));
+ VerifyNextBuffer(input_2);
+ VerifyNextBuffer(input_3);
EXPECT_FALSE(splicer_.HasNextBuffer());
- EXPECT_EQ(input_3->timestamp(), output_3->timestamp());
- EXPECT_EQ(input_3->duration(), output_3->duration());
- EXPECT_EQ(input_3->frame_count(), output_3->frame_count());
}
TEST_F(AudioSplicerTest, Reset) {
scoped_refptr<AudioBuffer> input_1 = GetNextInputBuffer(0.1f);
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_TRUE(splicer_.HasNextBuffer());
+ EXPECT_TRUE(AddInput(input_1));
+ ASSERT_TRUE(splicer_.HasNextBuffer());
splicer_.Reset();
EXPECT_FALSE(splicer_.HasNextBuffer());
@@ -112,14 +188,9 @@ TEST_F(AudioSplicerTest, Reset) {
// Verify that a new input buffer passes through as expected.
scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f);
- EXPECT_TRUE(splicer_.AddInput(input_2));
- EXPECT_TRUE(splicer_.HasNextBuffer());
-
- scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
+ EXPECT_TRUE(AddInput(input_2));
+ VerifyNextBuffer(input_2);
EXPECT_FALSE(splicer_.HasNextBuffer());
- EXPECT_EQ(input_2->timestamp(), output_2->timestamp());
- EXPECT_EQ(input_2->duration(), output_2->duration());
- EXPECT_EQ(input_2->frame_count(), output_2->frame_count());
}
TEST_F(AudioSplicerTest, EndOfStream) {
@@ -128,30 +199,22 @@ TEST_F(AudioSplicerTest, EndOfStream) {
scoped_refptr<AudioBuffer> input_3 = GetNextInputBuffer(0.2f);
EXPECT_TRUE(input_2->end_of_stream());
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_TRUE(splicer_.AddInput(input_2));
- EXPECT_TRUE(splicer_.HasNextBuffer());
+ EXPECT_TRUE(AddInput(input_1));
+ EXPECT_TRUE(AddInput(input_2));
+
+ VerifyNextBuffer(input_1);
- scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
EXPECT_FALSE(splicer_.HasNextBuffer());
- EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
- EXPECT_EQ(input_1->duration(), output_1->duration());
- EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
-
EXPECT_TRUE(output_2->end_of_stream());
// Verify that buffers can be added again after Reset().
splicer_.Reset();
- EXPECT_TRUE(splicer_.AddInput(input_3));
- scoped_refptr<AudioBuffer> output_3 = splicer_.GetNextBuffer();
+ EXPECT_TRUE(AddInput(input_3));
+ VerifyNextBuffer(input_3);
EXPECT_FALSE(splicer_.HasNextBuffer());
- EXPECT_EQ(input_3->timestamp(), output_3->timestamp());
- EXPECT_EQ(input_3->duration(), output_3->duration());
- EXPECT_EQ(input_3->frame_count(), output_3->frame_count());
}
-
// Test the gap insertion code.
// +--------------+ +--------------+
// |11111111111111| |22222222222222|
@@ -170,40 +233,29 @@ TEST_F(AudioSplicerTest, GapInsertion) {
input_timestamp_helper_.AddFrames(kGapSize);
scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f);
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_TRUE(splicer_.AddInput(input_2));
-
- // Verify that a gap buffer is generated.
- EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
- scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
- scoped_refptr<AudioBuffer> output_3 = splicer_.GetNextBuffer();
- EXPECT_FALSE(splicer_.HasNextBuffer());
+ EXPECT_TRUE(AddInput(input_1));
+ EXPECT_TRUE(AddInput(input_2));
// Verify that the first input buffer passed through unmodified.
- EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
- EXPECT_EQ(input_1->duration(), output_1->duration());
- EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
- EXPECT_TRUE(VerifyData(output_1, 0.1f));
+ VerifyNextBuffer(input_1);
// Verify the contents of the gap buffer.
+ scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
base::TimeDelta gap_timestamp =
input_1->timestamp() + input_1->duration();
base::TimeDelta gap_duration = input_2->timestamp() - gap_timestamp;
EXPECT_GT(gap_duration, base::TimeDelta());
EXPECT_EQ(gap_timestamp, output_2->timestamp());
- EXPECT_EQ(gap_duration, output_2->duration());
+ EXPECT_NEAR(
+ gap_duration.InMicroseconds(), output_2->duration().InMicroseconds(), 1);
EXPECT_EQ(kGapSize, output_2->frame_count());
EXPECT_TRUE(VerifyData(output_2, 0.0f));
// Verify that the second input buffer passed through unmodified.
- EXPECT_EQ(input_2->timestamp(), output_3->timestamp());
- EXPECT_EQ(input_2->duration(), output_3->duration());
- EXPECT_EQ(input_2->frame_count(), output_3->frame_count());
- EXPECT_TRUE(VerifyData(output_3, 0.2f));
+ VerifyNextBuffer(input_2);
+ EXPECT_FALSE(splicer_.HasNextBuffer());
}
-
// Test that an error is signalled when the gap between input buffers is
// too large.
TEST_F(AudioSplicerTest, GapTooLarge) {
@@ -215,17 +267,10 @@ TEST_F(AudioSplicerTest, GapTooLarge) {
input_timestamp_helper_.AddFrames(kGapSize);
scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f);
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_FALSE(splicer_.AddInput(input_2));
+ EXPECT_TRUE(AddInput(input_1));
+ EXPECT_FALSE(AddInput(input_2));
- EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
-
- // Verify that the first input buffer passed through unmodified.
- EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
- EXPECT_EQ(input_1->duration(), output_1->duration());
- EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
- EXPECT_TRUE(VerifyData(output_1, 0.1f));
+ VerifyNextBuffer(input_1);
// Verify that the second buffer is not available.
EXPECT_FALSE(splicer_.HasNextBuffer());
@@ -237,17 +282,11 @@ TEST_F(AudioSplicerTest, GapTooLarge) {
// Verify that valid buffers are still accepted.
scoped_refptr<AudioBuffer> input_3 = GetNextInputBuffer(0.3f);
- EXPECT_TRUE(splicer_.AddInput(input_3));
- EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
+ EXPECT_TRUE(AddInput(input_3));
+ VerifyNextBuffer(input_3);
EXPECT_FALSE(splicer_.HasNextBuffer());
- EXPECT_EQ(input_3->timestamp(), output_2->timestamp());
- EXPECT_EQ(input_3->duration(), output_2->duration());
- EXPECT_EQ(input_3->frame_count(), output_2->frame_count());
- EXPECT_TRUE(VerifyData(output_2, 0.3f));
}
-
// Verifies that an error is signalled if AddInput() is called
// with a timestamp that is earlier than the first buffer added.
TEST_F(AudioSplicerTest, BufferAddedBeforeBase) {
@@ -261,11 +300,10 @@ TEST_F(AudioSplicerTest, BufferAddedBeforeBase) {
scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.1f);
EXPECT_GT(input_1->timestamp(), input_2->timestamp());
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_FALSE(splicer_.AddInput(input_2));
+ EXPECT_TRUE(AddInput(input_1));
+ EXPECT_FALSE(AddInput(input_2));
}
-
// Test when one buffer partially overlaps another.
// +--------------+
// |11111111111111|
@@ -288,33 +326,27 @@ TEST_F(AudioSplicerTest, PartialOverlap) {
scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f);
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_TRUE(splicer_.AddInput(input_2));
+ EXPECT_TRUE(AddInput(input_1));
+ EXPECT_TRUE(AddInput(input_2));
- EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
+ // Verify that the first input buffer passed through unmodified.
+ VerifyNextBuffer(input_1);
+
+ ASSERT_TRUE(splicer_.HasNextBuffer());
scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
EXPECT_FALSE(splicer_.HasNextBuffer());
- // Verify that the first input buffer passed through unmodified.
- EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
- EXPECT_EQ(input_1->duration(), output_1->duration());
- EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
- EXPECT_TRUE(VerifyData(output_1, 0.1f));
-
// Verify that the second input buffer was truncated to only contain
- // the samples that are after the end of |input_1|. Note that data is not
- // copied, so |input_2|'s values are modified.
+ // the samples that are after the end of |input_1|.
base::TimeDelta expected_timestamp =
input_1->timestamp() + input_1->duration();
base::TimeDelta expected_duration =
(input_2->timestamp() + input_2->duration()) - expected_timestamp;
EXPECT_EQ(expected_timestamp, output_2->timestamp());
EXPECT_EQ(expected_duration, output_2->duration());
- EXPECT_TRUE(VerifyData(output_2, 0.2f));
+ EXPECT_TRUE(VerifyData(output_2, GetValue(input_2)));
}
-
// Test that an input buffer that is completely overlapped by a buffer
// that was already added is dropped.
// +--------------+
@@ -348,27 +380,344 @@ TEST_F(AudioSplicerTest, DropBuffer) {
input_timestamp_helper_.AddFrames(input_1->frame_count());
scoped_refptr<AudioBuffer> input_3 = GetNextInputBuffer(0.3f);
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_TRUE(splicer_.AddInput(input_2));
- EXPECT_TRUE(splicer_.AddInput(input_3));
+ EXPECT_TRUE(AddInput(input_1));
+ EXPECT_TRUE(AddInput(input_2));
+ EXPECT_TRUE(AddInput(input_3));
- EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
- scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
+ VerifyNextBuffer(input_1);
+ VerifyNextBuffer(input_3);
EXPECT_FALSE(splicer_.HasNextBuffer());
+}
- // Verify that the first input buffer passed through unmodified.
- EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
- EXPECT_EQ(input_1->duration(), output_1->duration());
- EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
- EXPECT_TRUE(VerifyData(output_1, 0.1f));
-
- // Verify that the second output buffer only contains
- // the samples that are in |input_3|.
- EXPECT_EQ(input_3->timestamp(), output_2->timestamp());
- EXPECT_EQ(input_3->duration(), output_2->duration());
- EXPECT_EQ(input_3->frame_count(), output_2->frame_count());
- EXPECT_TRUE(VerifyData(output_2, 0.3f));
+// Test crossfade when one buffer partially overlaps another.
+// +--------------+
+// |11111111111111|
+// +--------------+
+// +--------------+
+// |22222222222222|
+// +--------------+
+// Results in:
+// +----------+----+----------+
+// |1111111111|xxxx|2222222222|
+// +----------+----+----------+
+// Where "xxxx" represents the crossfaded portion of the signal.
+TEST_F(AudioSplicerTest, PartialOverlapCrossfade) {
+ const int kCrossfadeSize =
+ input_timestamp_helper_.GetFramesToTarget(max_crossfade_duration());
+ const int kBufferSize = kCrossfadeSize * 2;
+
+ scoped_refptr<AudioBuffer> extra_pre_splice_buffer =
+ GetNextInputBuffer(0.2f, kBufferSize);
+ scoped_refptr<AudioBuffer> overlapped_buffer =
+ GetNextInputBuffer(1.0f, kBufferSize);
+
+ // Reset timestamp helper so that the next buffer will have a timestamp that
+ // starts in the middle of |overlapped_buffer|.
+ input_timestamp_helper_.SetBaseTimestamp(overlapped_buffer->timestamp());
+ input_timestamp_helper_.AddFrames(overlapped_buffer->frame_count() -
+ kCrossfadeSize);
+ splicer_.SetSpliceTimestamp(input_timestamp_helper_.GetTimestamp());
+ scoped_refptr<AudioBuffer> overlapping_buffer =
+ GetNextInputBuffer(0.0f, kBufferSize);
+
+ // |extra_pre_splice_buffer| is entirely before the splice and should be ready
+ // for output.
+ EXPECT_TRUE(AddInput(extra_pre_splice_buffer));
+ VerifyNextBuffer(extra_pre_splice_buffer);
+
+ // The splicer should be internally queuing input since |overlapped_buffer| is
+ // part of the splice.
+ EXPECT_TRUE(AddInput(overlapped_buffer));
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+
+ // |overlapping_buffer| completes the splice.
+ splicer_.SetSpliceTimestamp(kNoTimestamp());
+ EXPECT_TRUE(AddInput(overlapping_buffer));
+ ASSERT_TRUE(splicer_.HasNextBuffer());
+
+ // Add one more buffer to make sure it's passed through untouched.
+ scoped_refptr<AudioBuffer> extra_post_splice_buffer =
+ GetNextInputBuffer(0.5f, kBufferSize);
+ EXPECT_TRUE(AddInput(extra_post_splice_buffer));
+
+ VerifyPreSpliceOutput(overlapped_buffer,
+ overlapping_buffer,
+ 221,
+ base::TimeDelta::FromMicroseconds(5011));
+
+ // Due to rounding the crossfade size may vary by up to a frame.
+ const int kExpectedCrossfadeSize = 220;
+ EXPECT_NEAR(kExpectedCrossfadeSize, kCrossfadeSize, 1);
+
+ VerifyCrossfadeOutput(overlapped_buffer,
+ NULL,
+ overlapping_buffer,
+ 0,
+ kExpectedCrossfadeSize,
+ base::TimeDelta::FromMicroseconds(4988));
+
+ // Retrieve the remaining portion after crossfade.
+ ASSERT_TRUE(splicer_.HasNextBuffer());
+ scoped_refptr<AudioBuffer> post_splice_output = splicer_.GetNextBuffer();
+ EXPECT_EQ(base::TimeDelta::FromMicroseconds(20022),
+ post_splice_output->timestamp());
+ EXPECT_EQ(overlapping_buffer->frame_count() - kExpectedCrossfadeSize,
+ post_splice_output->frame_count());
+ EXPECT_EQ(base::TimeDelta::FromMicroseconds(5034),
+ post_splice_output->duration());
+
+ EXPECT_TRUE(VerifyData(post_splice_output, GetValue(overlapping_buffer)));
+
+ VerifyNextBuffer(extra_post_splice_buffer);
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+}
+
+// Test crossfade when one buffer partially overlaps another, but an end of
+// stream buffer is received before the crossfade duration is reached.
+// +--------------+
+// |11111111111111|
+// +--------------+
+// +---------++---+
+// |222222222||EOS|
+// +---------++---+
+// Results in:
+// +----------+----+----++---+
+// |1111111111|xxxx|2222||EOS|
+// +----------+----+----++---+
+// Where "x" represents the crossfaded portion of the signal.
+TEST_F(AudioSplicerTest, PartialOverlapCrossfadeEndOfStream) {
+ const int kCrossfadeSize =
+ input_timestamp_helper_.GetFramesToTarget(max_crossfade_duration());
+
+ scoped_refptr<AudioBuffer> overlapped_buffer =
+ GetNextInputBuffer(1.0f, kCrossfadeSize * 2);
+
+ // Reset timestamp helper so that the next buffer will have a timestamp that
+ // starts 3/4 of the way into |overlapped_buffer|.
+ input_timestamp_helper_.SetBaseTimestamp(overlapped_buffer->timestamp());
+ input_timestamp_helper_.AddFrames(3 * overlapped_buffer->frame_count() / 4);
+ splicer_.SetSpliceTimestamp(input_timestamp_helper_.GetTimestamp());
+ scoped_refptr<AudioBuffer> overlapping_buffer =
+ GetNextInputBuffer(0.0f, kCrossfadeSize / 3);
+
+ // The splicer should be internally queuing input since |overlapped_buffer| is
+ // part of the splice.
+ EXPECT_TRUE(AddInput(overlapped_buffer));
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+
+ // |overlapping_buffer| should not have enough data to complete the splice, so
+ // ensure output is not available.
+ splicer_.SetSpliceTimestamp(kNoTimestamp());
+ EXPECT_TRUE(AddInput(overlapping_buffer));
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+
+ // Now add an EOS buffer which should complete the splice.
+ EXPECT_TRUE(AddInput(AudioBuffer::CreateEOSBuffer()));
+
+ VerifyPreSpliceOutput(overlapped_buffer,
+ overlapping_buffer,
+ 331,
+ base::TimeDelta::FromMicroseconds(7505));
+ VerifyCrossfadeOutput(overlapped_buffer,
+ NULL,
+ overlapping_buffer,
+ 0,
+ overlapping_buffer->frame_count(),
+ overlapping_buffer->duration());
+
+ // Ensure the last buffer is an EOS buffer.
+ ASSERT_TRUE(splicer_.HasNextBuffer());
+ scoped_refptr<AudioBuffer> post_splice_output = splicer_.GetNextBuffer();
+ EXPECT_TRUE(post_splice_output->end_of_stream());
+
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+}
+
+// Test crossfade when one buffer partially overlaps another, but the amount of
+// overlapped data is less than the crossfade duration.
+// +------------+
+// |111111111111|
+// +------------+
+// +--------------+
+// |22222222222222|
+// +--------------+
+// Results in:
+// +----------+-+------------+
+// |1111111111|x|222222222222|
+// +----------+-+------------+
+// Where "x" represents the crossfaded portion of the signal.
+TEST_F(AudioSplicerTest, PartialOverlapCrossfadeShortPreSplice) {
+ const int kCrossfadeSize =
+ input_timestamp_helper_.GetFramesToTarget(max_crossfade_duration());
+
+ scoped_refptr<AudioBuffer> overlapped_buffer =
+ GetNextInputBuffer(1.0f, kCrossfadeSize / 2);
+
+ // Reset timestamp helper so that the next buffer will have a timestamp that
+ // starts in the middle of |overlapped_buffer|.
+ input_timestamp_helper_.SetBaseTimestamp(overlapped_buffer->timestamp());
+ input_timestamp_helper_.AddFrames(overlapped_buffer->frame_count() / 2);
+ splicer_.SetSpliceTimestamp(input_timestamp_helper_.GetTimestamp());
+ scoped_refptr<AudioBuffer> overlapping_buffer =
+ GetNextInputBuffer(0.0f, kCrossfadeSize * 2);
+
+ // The splicer should be internally queuing input since |overlapped_buffer| is
+ // part of the splice.
+ EXPECT_TRUE(AddInput(overlapped_buffer));
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+
+ // |overlapping_buffer| completes the splice.
+ splicer_.SetSpliceTimestamp(kNoTimestamp());
+ EXPECT_TRUE(AddInput(overlapping_buffer));
+
+ const int kExpectedPreSpliceSize = 55;
+ const base::TimeDelta kExpectedPreSpliceDuration =
+ base::TimeDelta::FromMicroseconds(1247);
+ VerifyPreSpliceOutput(overlapped_buffer,
+ overlapping_buffer,
+ kExpectedPreSpliceSize,
+ kExpectedPreSpliceDuration);
+ VerifyCrossfadeOutput(overlapped_buffer,
+ NULL,
+ overlapping_buffer,
+ 0,
+ kExpectedPreSpliceSize,
+ kExpectedPreSpliceDuration);
+
+ // Retrieve the remaining portion after crossfade.
+ ASSERT_TRUE(splicer_.HasNextBuffer());
+ scoped_refptr<AudioBuffer> post_splice_output = splicer_.GetNextBuffer();
+ EXPECT_EQ(overlapping_buffer->timestamp() + kExpectedPreSpliceDuration,
+ post_splice_output->timestamp());
+ EXPECT_EQ(overlapping_buffer->frame_count() - kExpectedPreSpliceSize,
+ post_splice_output->frame_count());
+ EXPECT_EQ(overlapping_buffer->duration() - kExpectedPreSpliceDuration,
+ post_splice_output->duration());
+
+ EXPECT_TRUE(VerifyData(post_splice_output, GetValue(overlapping_buffer)));
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+}
+
+// Test behavior when a splice frame is incorrectly marked and does not actually
+// overlap.
+// +----------+
+// |1111111111|
+// +----------+
+// +--------------+
+// |22222222222222|
+// +--------------+
+// Results in:
+// +----------+--------------+
+// |1111111111|22222222222222|
+// +----------+--------------+
+TEST_F(AudioSplicerTest, IncorrectlyMarkedSplice) {
+ const int kBufferSize =
+ input_timestamp_helper_.GetFramesToTarget(max_crossfade_duration()) * 2;
+
+ scoped_refptr<AudioBuffer> first_buffer =
+ GetNextInputBuffer(1.0f, kBufferSize);
+ // Fuzz the duration slightly so that the buffer overlaps the splice timestamp
+ // by a microsecond, which is not enough to crossfade.
+ const base::TimeDelta kSpliceTimestamp =
+ input_timestamp_helper_.GetTimestamp() -
+ base::TimeDelta::FromMicroseconds(1);
+ splicer_.SetSpliceTimestamp(kSpliceTimestamp);
+ scoped_refptr<AudioBuffer> second_buffer =
+ GetNextInputBuffer(0.0f, kBufferSize);
+ second_buffer->set_timestamp(kSpliceTimestamp);
+
+ // The splicer should be internally queuing input since |first_buffer| is part
+ // of the supposed splice.
+ EXPECT_TRUE(AddInput(first_buffer));
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+
+ // |second_buffer| should complete the supposed splice, so ensure output is
+ // now available.
+ splicer_.SetSpliceTimestamp(kNoTimestamp());
+ EXPECT_TRUE(AddInput(second_buffer));
+
+ VerifyNextBuffer(first_buffer);
+ VerifyNextBuffer(second_buffer);
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+}
+
+// Test behavior when a splice frame is incorrectly marked and there is a gap
+// between whats in the pre splice and post splice.
+// +--------+
+// |11111111|
+// +--------+
+// +--------------+
+// |22222222222222|
+// +--------------+
+// Results in:
+// +--------+-+--------------+
+// |11111111|0|22222222222222|
+// +--------+-+--------------+
+TEST_F(AudioSplicerTest, IncorrectlyMarkedSpliceWithGap) {
+ const int kBufferSize =
+ input_timestamp_helper_.GetFramesToTarget(max_crossfade_duration()) * 2;
+ const int kGapSize = 2;
+
+ scoped_refptr<AudioBuffer> first_buffer =
+ GetNextInputBuffer(1.0f, kBufferSize - kGapSize);
+ scoped_refptr<AudioBuffer> gap_buffer =
+ GetNextInputBuffer(0.0f, kGapSize);
+ splicer_.SetSpliceTimestamp(input_timestamp_helper_.GetTimestamp());
+ scoped_refptr<AudioBuffer> second_buffer =
+ GetNextInputBuffer(0.0f, kBufferSize);
+
+ // The splicer should pass through the first buffer since it's not part of the
+ // splice.
+ EXPECT_TRUE(AddInput(first_buffer));
+ VerifyNextBuffer(first_buffer);
+
+ // Do not add |gap_buffer|.
+
+ // |second_buffer| will complete the supposed splice.
+ splicer_.SetSpliceTimestamp(kNoTimestamp());
+ EXPECT_TRUE(AddInput(second_buffer));
+
+ VerifyNextBuffer(gap_buffer);
+ VerifyNextBuffer(second_buffer);
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+}
+
+// Test behavior when a splice frame is incorrectly marked and there is a gap
+// between what's in the pre splice and post splice that is too large to recover
+// from.
+// +--------+
+// |11111111|
+// +--------+
+// +------+
+// |222222|
+// +------+
+// Results in an error and not a crash.
+TEST_F(AudioSplicerTest, IncorrectlyMarkedSpliceWithBadGap) {
+ const int kBufferSize =
+ input_timestamp_helper_.GetFramesToTarget(max_crossfade_duration()) * 2;
+ const int kGapSize = kBufferSize +
+ input_timestamp_helper_.GetFramesToTarget(
+ base::TimeDelta::FromMilliseconds(
+ AudioSplicer::kMaxTimeDeltaInMilliseconds + 1));
+
+ scoped_refptr<AudioBuffer> first_buffer =
+ GetNextInputBuffer(1.0f, kBufferSize);
+ scoped_refptr<AudioBuffer> gap_buffer =
+ GetNextInputBuffer(0.0f, kGapSize);
+ splicer_.SetSpliceTimestamp(input_timestamp_helper_.GetTimestamp());
+ scoped_refptr<AudioBuffer> second_buffer =
+ GetNextInputBuffer(0.0f, kBufferSize);
+
+ // The splicer should pass through the first buffer since it's not part of the
+ // splice.
+ EXPECT_TRUE(AddInput(first_buffer));
+ VerifyNextBuffer(first_buffer);
+
+ // Do not add |gap_buffer|.
+
+ // |second_buffer| will complete the supposed splice.
+ splicer_.SetSpliceTimestamp(kNoTimestamp());
+ EXPECT_FALSE(AddInput(second_buffer));
}
} // namespace media
diff --git a/chromium/media/base/audio_timestamp_helper.h b/chromium/media/base/audio_timestamp_helper.h
index 8b5d50e66f6..1da8b4a7cda 100644
--- a/chromium/media/base/audio_timestamp_helper.h
+++ b/chromium/media/base/audio_timestamp_helper.h
@@ -27,12 +27,13 @@ namespace media {
// accumulated frames to reach a target timestamp.
class MEDIA_EXPORT AudioTimestampHelper {
public:
- AudioTimestampHelper(int samples_per_second);
+ explicit AudioTimestampHelper(int samples_per_second);
// Sets the base timestamp to |base_timestamp| and the sets count to 0.
void SetBaseTimestamp(base::TimeDelta base_timestamp);
base::TimeDelta base_timestamp() const;
+ int64 frame_count() const { return frame_count_; }
// Adds |frame_count| to the frame counter.
// Note: SetBaseTimestamp() must be called with a value other than
diff --git a/chromium/media/base/audio_video_metadata_extractor.cc b/chromium/media/base/audio_video_metadata_extractor.cc
new file mode 100644
index 00000000000..7a8cf766abd
--- /dev/null
+++ b/chromium/media/base/audio_video_metadata_extractor.cc
@@ -0,0 +1,260 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_video_metadata_extractor.h"
+
+#include "base/bind.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "base/time/time.h"
+#include "media/ffmpeg/ffmpeg_common.h"
+#include "media/filters/blocking_url_protocol.h"
+#include "media/filters/ffmpeg_glue.h"
+
+namespace media {
+
+namespace {
+
+void OnError(bool* succeeded) {
+ *succeeded = false;
+}
+
+// Returns true if the |tag| matches |expected_key|.
+bool ExtractString(AVDictionaryEntry* tag, const char* expected_key,
+ std::string* destination) {
+ if (!LowerCaseEqualsASCII(std::string(tag->key), expected_key))
+ return false;
+
+ if (destination->empty())
+ *destination = tag->value;
+
+ return true;
+}
+
+// Returns true if the |tag| matches |expected_key|.
+bool ExtractInt(AVDictionaryEntry* tag, const char* expected_key,
+ int* destination) {
+ if (!LowerCaseEqualsASCII(std::string(tag->key), expected_key))
+ return false;
+
+ int temporary = -1;
+ if (*destination < 0 && base::StringToInt(tag->value, &temporary) &&
+ temporary >= 0) {
+ *destination = temporary;
+ }
+
+ return true;
+}
+
+// Set attached image size limit to 4MB. Chosen arbitrarily.
+const int kAttachedImageSizeLimit = 4 * 1024 * 1024;
+
+} // namespace
+
+AudioVideoMetadataExtractor::StreamInfo::StreamInfo() {}
+
+AudioVideoMetadataExtractor::StreamInfo::~StreamInfo() {}
+
+AudioVideoMetadataExtractor::AudioVideoMetadataExtractor()
+ : extracted_(false),
+ duration_(-1),
+ width_(-1),
+ height_(-1),
+ disc_(-1),
+ rotation_(-1),
+ track_(-1) {
+}
+
+AudioVideoMetadataExtractor::~AudioVideoMetadataExtractor() {
+}
+
+bool AudioVideoMetadataExtractor::Extract(DataSource* source,
+ bool extract_attached_images) {
+ DCHECK(!extracted_);
+
+ bool read_ok = true;
+ media::BlockingUrlProtocol protocol(source, base::Bind(&OnError, &read_ok));
+ media::FFmpegGlue glue(&protocol);
+ AVFormatContext* format_context = glue.format_context();
+
+ if (!glue.OpenContext())
+ return false;
+
+ if (!read_ok)
+ return false;
+
+ if (!format_context->iformat)
+ return false;
+
+ if (avformat_find_stream_info(format_context, NULL) < 0)
+ return false;
+
+ if (format_context->duration != AV_NOPTS_VALUE)
+ duration_ = static_cast<double>(format_context->duration) / AV_TIME_BASE;
+
+ stream_infos_.push_back(StreamInfo());
+ StreamInfo& container_info = stream_infos_.back();
+ container_info.type = format_context->iformat->name;
+ ExtractDictionary(format_context->metadata, &container_info.tags);
+
+ for (unsigned int i = 0; i < format_context->nb_streams; ++i) {
+ stream_infos_.push_back(StreamInfo());
+ StreamInfo& info = stream_infos_.back();
+
+ AVStream* stream = format_context->streams[i];
+ if (!stream)
+ continue;
+
+ // Extract dictionary from streams also. Needed for containers that attach
+ // metadata to contained streams instead the container itself, like OGG.
+ ExtractDictionary(stream->metadata, &info.tags);
+
+ if (!stream->codec)
+ continue;
+
+ info.type = avcodec_get_name(stream->codec->codec_id);
+
+ // Extract dimensions of largest stream that's not an attached image.
+ if (stream->codec->width > 0 && stream->codec->width > width_ &&
+ stream->codec->height > 0 && stream->codec->height > height_) {
+ width_ = stream->codec->width;
+ height_ = stream->codec->height;
+ }
+
+ // Extract attached image if requested.
+ if (extract_attached_images &&
+ stream->disposition == AV_DISPOSITION_ATTACHED_PIC &&
+ stream->attached_pic.size > 0 &&
+ stream->attached_pic.size <= kAttachedImageSizeLimit &&
+ stream->attached_pic.data != NULL) {
+ attached_images_bytes_.push_back(std::string());
+ attached_images_bytes_.back().assign(
+ reinterpret_cast<const char*>(stream->attached_pic.data),
+ stream->attached_pic.size);
+ }
+ }
+
+ extracted_ = true;
+ return true;
+}
+
+double AudioVideoMetadataExtractor::duration() const {
+ DCHECK(extracted_);
+ return duration_;
+}
+
+int AudioVideoMetadataExtractor::width() const {
+ DCHECK(extracted_);
+ return width_;
+}
+
+int AudioVideoMetadataExtractor::height() const {
+ DCHECK(extracted_);
+ return height_;
+}
+
+int AudioVideoMetadataExtractor::rotation() const {
+ DCHECK(extracted_);
+ return rotation_;
+}
+
+const std::string& AudioVideoMetadataExtractor::album() const {
+ DCHECK(extracted_);
+ return album_;
+}
+
+const std::string& AudioVideoMetadataExtractor::artist() const {
+ DCHECK(extracted_);
+ return artist_;
+}
+
+const std::string& AudioVideoMetadataExtractor::comment() const {
+ DCHECK(extracted_);
+ return comment_;
+}
+
+const std::string& AudioVideoMetadataExtractor::copyright() const {
+ DCHECK(extracted_);
+ return copyright_;
+}
+
+const std::string& AudioVideoMetadataExtractor::date() const {
+ DCHECK(extracted_);
+ return date_;
+}
+
+int AudioVideoMetadataExtractor::disc() const {
+ DCHECK(extracted_);
+ return disc_;
+}
+
+const std::string& AudioVideoMetadataExtractor::encoder() const {
+ DCHECK(extracted_);
+ return encoder_;
+}
+
+const std::string& AudioVideoMetadataExtractor::encoded_by() const {
+ DCHECK(extracted_);
+ return encoded_by_;
+}
+
+const std::string& AudioVideoMetadataExtractor::genre() const {
+ DCHECK(extracted_);
+ return genre_;
+}
+
+const std::string& AudioVideoMetadataExtractor::language() const {
+ DCHECK(extracted_);
+ return language_;
+}
+
+const std::string& AudioVideoMetadataExtractor::title() const {
+ DCHECK(extracted_);
+ return title_;
+}
+
+int AudioVideoMetadataExtractor::track() const {
+ DCHECK(extracted_);
+ return track_;
+}
+
+const std::vector<AudioVideoMetadataExtractor::StreamInfo>&
+AudioVideoMetadataExtractor::stream_infos() const {
+ DCHECK(extracted_);
+ return stream_infos_;
+}
+
+const std::vector<std::string>&
+AudioVideoMetadataExtractor::attached_images_bytes() const {
+ DCHECK(extracted_);
+ return attached_images_bytes_;
+}
+
+void AudioVideoMetadataExtractor::ExtractDictionary(
+ AVDictionary* metadata, TagDictionary* raw_tags) {
+ if (!metadata)
+ return;
+
+ AVDictionaryEntry* tag = NULL;
+ while ((tag = av_dict_get(metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
+ if (raw_tags->find(tag->key) == raw_tags->end())
+ (*raw_tags)[tag->key] = tag->value;
+
+ if (ExtractInt(tag, "rotate", &rotation_)) continue;
+ if (ExtractString(tag, "album", &album_)) continue;
+ if (ExtractString(tag, "artist", &artist_)) continue;
+ if (ExtractString(tag, "comment", &comment_)) continue;
+ if (ExtractString(tag, "copyright", &copyright_)) continue;
+ if (ExtractString(tag, "date", &date_)) continue;
+ if (ExtractInt(tag, "disc", &disc_)) continue;
+ if (ExtractString(tag, "encoder", &encoder_)) continue;
+ if (ExtractString(tag, "encoded_by", &encoded_by_)) continue;
+ if (ExtractString(tag, "genre", &genre_)) continue;
+ if (ExtractString(tag, "language", &language_)) continue;
+ if (ExtractString(tag, "title", &title_)) continue;
+ if (ExtractInt(tag, "track", &track_)) continue;
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_video_metadata_extractor.h b/chromium/media/base/audio_video_metadata_extractor.h
new file mode 100644
index 00000000000..953ece0b972
--- /dev/null
+++ b/chromium/media/base/audio_video_metadata_extractor.h
@@ -0,0 +1,106 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_VIDEO_METADATA_EXTRACTOR_H_
+#define MEDIA_BASE_AUDIO_VIDEO_METADATA_EXTRACTOR_H_
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+
+struct AVDictionary;
+
+namespace media {
+
+class DataSource;
+
+// This class extracts a string dictionary of metadata tags for audio and video
+// files. It also provides the format name.
+class MEDIA_EXPORT AudioVideoMetadataExtractor {
+ public:
+ typedef std::map<std::string, std::string> TagDictionary;
+
+ struct StreamInfo {
+ StreamInfo();
+ ~StreamInfo();
+ std::string type;
+ TagDictionary tags;
+ };
+
+ typedef std::vector<StreamInfo> StreamInfoVector;
+
+ AudioVideoMetadataExtractor();
+ ~AudioVideoMetadataExtractor();
+
+ // Returns whether or not the fields were successfully extracted. Should only
+ // be called once.
+ bool Extract(DataSource* source, bool extract_attached_pics);
+
+ // Returns -1 if we cannot extract the duration. In seconds.
+ double duration() const;
+
+ // Returns -1 for containers without video.
+ int width() const;
+ int height() const;
+
+ // Returns -1 if undefined.
+ int rotation() const;
+
+ // Returns -1 or an empty string if the value is undefined.
+ const std::string& album() const;
+ const std::string& artist() const;
+ const std::string& comment() const;
+ const std::string& copyright() const;
+ const std::string& date() const;
+ int disc() const;
+ const std::string& encoder() const;
+ const std::string& encoded_by() const;
+ const std::string& genre() const;
+ const std::string& language() const;
+ const std::string& title() const;
+ int track() const;
+
+ // First element is the container. Subsequent elements are the child streams.
+ const StreamInfoVector& stream_infos() const;
+
+ // Empty if Extract call did not request attached images, or if no attached
+ // images were found.
+ const std::vector<std::string>& attached_images_bytes() const;
+
+ private:
+ void ExtractDictionary(AVDictionary* metadata, TagDictionary* raw_tags);
+
+ bool extracted_;
+
+ int duration_;
+ int width_;
+ int height_;
+
+ std::string album_;
+ std::string artist_;
+ std::string comment_;
+ std::string copyright_;
+ std::string date_;
+ int disc_;
+ std::string encoder_;
+ std::string encoded_by_;
+ std::string genre_;
+ std::string language_;
+ int rotation_;
+ std::string title_;
+ int track_;
+
+ StreamInfoVector stream_infos_;
+
+ std::vector<std::string> attached_images_bytes_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioVideoMetadataExtractor);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_VIDEO_METADATA_EXTRACTOR_H_
diff --git a/chromium/media/base/audio_video_metadata_extractor_unittest.cc b/chromium/media/base/audio_video_metadata_extractor_unittest.cc
new file mode 100644
index 00000000000..0af6e165e7b
--- /dev/null
+++ b/chromium/media/base/audio_video_metadata_extractor_unittest.cc
@@ -0,0 +1,208 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/sha1.h"
+#include "build/build_config.h"
+#include "media/base/audio_video_metadata_extractor.h"
+#include "media/base/test_data_util.h"
+#include "media/filters/file_data_source.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+scoped_ptr<AudioVideoMetadataExtractor> GetExtractor(
+ const std::string& filename,
+ bool extract_attached_images,
+ bool expected_result,
+ double expected_duration,
+ int expected_width,
+ int expected_height) {
+ FileDataSource source;
+ EXPECT_TRUE(source.Initialize(GetTestDataFilePath(filename)));
+
+ scoped_ptr<AudioVideoMetadataExtractor> extractor(
+ new AudioVideoMetadataExtractor);
+ bool extracted = extractor->Extract(&source, extract_attached_images);
+ EXPECT_EQ(expected_result, extracted);
+
+ if (!extracted)
+ return extractor.Pass();
+
+ EXPECT_EQ(expected_duration, extractor->duration());
+
+ EXPECT_EQ(expected_width, extractor->width());
+ EXPECT_EQ(expected_height, extractor->height());
+
+ return extractor.Pass();
+}
+
+TEST(AudioVideoMetadataExtractorTest, InvalidFile) {
+ GetExtractor("ten_byte_file", true, false, 0, -1, -1);
+}
+
+TEST(AudioVideoMetadataExtractorTest, AudioOGG) {
+ scoped_ptr<AudioVideoMetadataExtractor> extractor =
+ GetExtractor("9ch.ogg", true, true, 0, -1, -1);
+ EXPECT_EQ("Processed by SoX", extractor->comment());
+
+ EXPECT_EQ("ogg", extractor->stream_infos()[0].type);
+ EXPECT_EQ(2u, extractor->stream_infos().size());
+
+ EXPECT_EQ(0u, extractor->stream_infos()[0].tags.size());
+
+ EXPECT_EQ(1u, extractor->stream_infos()[1].tags.size());
+ EXPECT_EQ("vorbis", extractor->stream_infos()[1].type);
+ EXPECT_EQ("Processed by SoX",
+ extractor->stream_infos()[1].tags.find("COMMENT")->second);
+
+ EXPECT_EQ(0u, extractor->attached_images_bytes().size());
+}
+
+TEST(AudioVideoMetadataExtractorTest, AudioWAV) {
+ scoped_ptr<AudioVideoMetadataExtractor> extractor =
+ GetExtractor("sfx_u8.wav", true, true, 0, -1, -1);
+ EXPECT_EQ("Lavf54.37.100", extractor->encoder());
+ EXPECT_EQ("Amadeus Pro", extractor->encoded_by());
+
+ EXPECT_EQ("wav", extractor->stream_infos()[0].type);
+ EXPECT_EQ(2u, extractor->stream_infos().size());
+
+ EXPECT_EQ(2u, extractor->stream_infos()[0].tags.size());
+ EXPECT_EQ("Lavf54.37.100",
+ extractor->stream_infos()[0].tags.find("encoder")->second);
+ EXPECT_EQ("Amadeus Pro",
+ extractor->stream_infos()[0].tags.find("encoded_by")->second);
+
+ EXPECT_EQ("pcm_u8", extractor->stream_infos()[1].type);
+ EXPECT_EQ(0u, extractor->stream_infos()[1].tags.size());
+
+ EXPECT_EQ(0u, extractor->attached_images_bytes().size());
+}
+
+TEST(AudioVideoMetadataExtractorTest, VideoWebM) {
+ scoped_ptr<AudioVideoMetadataExtractor> extractor =
+ GetExtractor("bear-320x240-multitrack.webm", true, true, 2, 320, 240);
+ EXPECT_EQ("Lavf53.9.0", extractor->encoder());
+
+ EXPECT_EQ(6u, extractor->stream_infos().size());
+
+ EXPECT_EQ("matroska,webm", extractor->stream_infos()[0].type);
+ EXPECT_EQ(1u, extractor->stream_infos()[0].tags.size());
+ EXPECT_EQ("Lavf53.9.0",
+ extractor->stream_infos()[0].tags.find("ENCODER")->second);
+
+ EXPECT_EQ("vp8", extractor->stream_infos()[1].type);
+ EXPECT_EQ(0u, extractor->stream_infos()[1].tags.size());
+
+ EXPECT_EQ("vorbis", extractor->stream_infos()[2].type);
+ EXPECT_EQ(0u, extractor->stream_infos()[2].tags.size());
+
+ EXPECT_EQ("subrip", extractor->stream_infos()[3].type);
+ EXPECT_EQ(0u, extractor->stream_infos()[3].tags.size());
+
+ EXPECT_EQ("theora", extractor->stream_infos()[4].type);
+ EXPECT_EQ(0u, extractor->stream_infos()[4].tags.size());
+
+ EXPECT_EQ("pcm_s16le", extractor->stream_infos()[5].type);
+ EXPECT_EQ(1u, extractor->stream_infos()[5].tags.size());
+ EXPECT_EQ("Lavc52.32.0",
+ extractor->stream_infos()[5].tags.find("ENCODER")->second);
+
+ EXPECT_EQ(0u, extractor->attached_images_bytes().size());
+}
+
+#if defined(USE_PROPRIETARY_CODECS)
+TEST(AudioVideoMetadataExtractorTest, AndroidRotatedMP4Video) {
+ scoped_ptr<AudioVideoMetadataExtractor> extractor =
+ GetExtractor("90rotation.mp4", true, true, 0, 1920, 1080);
+
+ EXPECT_EQ(90, extractor->rotation());
+
+ EXPECT_EQ(3u, extractor->stream_infos().size());
+
+ EXPECT_EQ("mov,mp4,m4a,3gp,3g2,mj2", extractor->stream_infos()[0].type);
+ EXPECT_EQ(4u, extractor->stream_infos()[0].tags.size());
+ EXPECT_EQ(
+ "isom3gp4",
+ extractor->stream_infos()[0].tags.find("compatible_brands")->second);
+ EXPECT_EQ(
+ "2014-02-11 00:39:25",
+ extractor->stream_infos()[0].tags.find("creation_time")->second);
+ EXPECT_EQ("isom",
+ extractor->stream_infos()[0].tags.find("major_brand")->second);
+ EXPECT_EQ("0",
+ extractor->stream_infos()[0].tags.find("minor_version")->second);
+
+ EXPECT_EQ("h264", extractor->stream_infos()[1].type);
+ EXPECT_EQ(5u, extractor->stream_infos()[1].tags.size());
+ EXPECT_EQ("2014-02-11 00:39:25",
+ extractor->stream_infos()[1].tags.find("creation_time")->second);
+ EXPECT_EQ("VideoHandle",
+ extractor->stream_infos()[1].tags.find("handler_name")->second);
+ EXPECT_EQ("eng", extractor->stream_infos()[1].tags.find("language")->second);
+ EXPECT_EQ("90", extractor->stream_infos()[1].tags.find("rotate")->second);
+
+ EXPECT_EQ("aac", extractor->stream_infos()[2].type);
+ EXPECT_EQ(3u, extractor->stream_infos()[2].tags.size());
+ EXPECT_EQ("2014-02-11 00:39:25",
+ extractor->stream_infos()[2].tags.find("creation_time")->second);
+ EXPECT_EQ("SoundHandle",
+ extractor->stream_infos()[2].tags.find("handler_name")->second);
+ EXPECT_EQ("eng", extractor->stream_infos()[2].tags.find("language")->second);
+
+ EXPECT_EQ(0u, extractor->attached_images_bytes().size());
+}
+
+TEST(AudioVideoMetadataExtractorTest, AudioMP3) {
+ scoped_ptr<AudioVideoMetadataExtractor> extractor =
+ GetExtractor("id3_png_test.mp3", true, true, 1, -1, -1);
+
+ EXPECT_EQ("Airbag", extractor->title());
+ EXPECT_EQ("Radiohead", extractor->artist());
+ EXPECT_EQ("OK Computer", extractor->album());
+ EXPECT_EQ(1, extractor->track());
+ EXPECT_EQ("Alternative", extractor->genre());
+ EXPECT_EQ("1997", extractor->date());
+ EXPECT_EQ("Lavf54.4.100", extractor->encoder());
+
+ EXPECT_EQ(3u, extractor->stream_infos().size());
+
+ EXPECT_EQ("mp3", extractor->stream_infos()[0].type);
+ EXPECT_EQ(7u, extractor->stream_infos()[0].tags.size());
+ EXPECT_EQ("OK Computer",
+ extractor->stream_infos()[0].tags.find("album")->second);
+ EXPECT_EQ("Radiohead",
+ extractor->stream_infos()[0].tags.find("artist")->second);
+ EXPECT_EQ("1997", extractor->stream_infos()[0].tags.find("date")->second);
+ EXPECT_EQ("Lavf54.4.100",
+ extractor->stream_infos()[0].tags.find("encoder")->second);
+ EXPECT_EQ("Alternative",
+ extractor->stream_infos()[0].tags.find("genre")->second);
+ EXPECT_EQ("Airbag", extractor->stream_infos()[0].tags.find("title")->second);
+ EXPECT_EQ("1", extractor->stream_infos()[0].tags.find("track")->second);
+
+ EXPECT_EQ("mp3", extractor->stream_infos()[1].type);
+ EXPECT_EQ(0u, extractor->stream_infos()[1].tags.size());
+
+ EXPECT_EQ("png", extractor->stream_infos()[2].type);
+ EXPECT_EQ(2u, extractor->stream_infos()[2].tags.size());
+ EXPECT_EQ("Other", extractor->stream_infos()[2].tags.find("comment")->second);
+ EXPECT_EQ("", extractor->stream_infos()[2].tags.find("title")->second);
+
+ EXPECT_EQ(1u, extractor->attached_images_bytes().size());
+ EXPECT_EQ(155752u, extractor->attached_images_bytes()[0].size());
+
+ EXPECT_EQ("\x89PNG\r\n\x1a\n",
+ extractor->attached_images_bytes()[0].substr(0, 8));
+ EXPECT_EQ("IEND\xae\x42\x60\x82",
+ extractor->attached_images_bytes()[0].substr(
+ extractor->attached_images_bytes()[0].size() - 8, 8));
+ EXPECT_EQ("\xF3\xED\x8F\xC7\xC7\x98\xB9V|p\xC0u!\xB5\x82\xCF\x95\xF0\xCD\xCE",
+ base::SHA1HashString(extractor->attached_images_bytes()[0]));
+}
+#endif
+
+} // namespace media
diff --git a/chromium/media/base/bind_to_current_loop.h b/chromium/media/base/bind_to_current_loop.h
new file mode 100644
index 00000000000..6461b1c9af9
--- /dev/null
+++ b/chromium/media/base/bind_to_current_loop.h
@@ -0,0 +1,162 @@
+// This file was GENERATED by command:
+// pump.py bind_to_current_loop.h.pump
+// DO NOT EDIT BY HAND!!!
+
+
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_BIND_TO_CURRENT_LOOP_H_
+#define MEDIA_BASE_BIND_TO_CURRENT_LOOP_H_
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
+
+// This is a helper utility for base::Bind()ing callbacks to the current
+// MessageLoop. The typical use is when |a| (of class |A|) wants to hand a
+// callback such as base::Bind(&A::AMethod, a) to |b|, but needs to ensure that
+// when |b| executes the callback, it does so on |a|'s current MessageLoop.
+//
+// Typical usage: request to be called back on the current thread:
+// other->StartAsyncProcessAndCallMeBack(
+// media::BindToCurrentLoop(base::Bind(&MyClass::MyMethod, this)));
+//
+// Note that like base::Bind(), BindToCurrentLoop() can't bind non-constant
+// references, and that *unlike* base::Bind(), BindToCurrentLoop() makes copies
+// of its arguments, and thus can't be used with arrays.
+
+namespace media {
+
+// Mimic base::internal::CallbackForward, replacing p.Pass() with
+// base::Passed(&p) to account for the extra layer of indirection.
+namespace internal {
+template <typename T>
+T& TrampolineForward(T& t) { return t; }
+
+template <typename T, typename R>
+base::internal::PassedWrapper<scoped_ptr<T, R> > TrampolineForward(
+ scoped_ptr<T, R>& p) { return base::Passed(&p); }
+
+template <typename T>
+base::internal::PassedWrapper<ScopedVector<T> > TrampolineForward(
+ ScopedVector<T>& p) { return base::Passed(&p); }
+
+template <typename T> struct TrampolineHelper;
+
+template <>
+struct TrampolineHelper<void()> {
+ static void Run(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const base::Callback<void()>& cb) {
+ task_runner->PostTask(FROM_HERE, base::Bind(cb));
+ }
+};
+
+
+template <typename A1>
+struct TrampolineHelper<void(A1)> {
+ static void Run(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const base::Callback<void(A1)>& cb, A1 a1) {
+ task_runner->PostTask(FROM_HERE, base::Bind(cb,
+ internal::TrampolineForward(a1)));
+ }
+};
+
+
+template <typename A1, typename A2>
+struct TrampolineHelper<void(A1, A2)> {
+ static void Run(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const base::Callback<void(A1, A2)>& cb, A1 a1, A2 a2) {
+ task_runner->PostTask(FROM_HERE, base::Bind(cb,
+ internal::TrampolineForward(a1), internal::TrampolineForward(a2)));
+ }
+};
+
+
+template <typename A1, typename A2, typename A3>
+struct TrampolineHelper<void(A1, A2, A3)> {
+ static void Run(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const base::Callback<void(A1, A2, A3)>& cb, A1 a1, A2 a2, A3 a3) {
+ task_runner->PostTask(FROM_HERE, base::Bind(cb,
+ internal::TrampolineForward(a1), internal::TrampolineForward(a2),
+ internal::TrampolineForward(a3)));
+ }
+};
+
+
+template <typename A1, typename A2, typename A3, typename A4>
+struct TrampolineHelper<void(A1, A2, A3, A4)> {
+ static void Run(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const base::Callback<void(A1, A2, A3, A4)>& cb, A1 a1, A2 a2, A3 a3,
+ A4 a4) {
+ task_runner->PostTask(FROM_HERE, base::Bind(cb,
+ internal::TrampolineForward(a1), internal::TrampolineForward(a2),
+ internal::TrampolineForward(a3), internal::TrampolineForward(a4)));
+ }
+};
+
+
+template <typename A1, typename A2, typename A3, typename A4, typename A5>
+struct TrampolineHelper<void(A1, A2, A3, A4, A5)> {
+ static void Run(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const base::Callback<void(A1, A2, A3, A4, A5)>& cb, A1 a1, A2 a2, A3 a3,
+ A4 a4, A5 a5) {
+ task_runner->PostTask(FROM_HERE, base::Bind(cb,
+ internal::TrampolineForward(a1), internal::TrampolineForward(a2),
+ internal::TrampolineForward(a3), internal::TrampolineForward(a4),
+ internal::TrampolineForward(a5)));
+ }
+};
+
+
+template <typename A1, typename A2, typename A3, typename A4, typename A5,
+ typename A6>
+struct TrampolineHelper<void(A1, A2, A3, A4, A5, A6)> {
+ static void Run(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const base::Callback<void(A1, A2, A3, A4, A5, A6)>& cb, A1 a1, A2 a2,
+ A3 a3, A4 a4, A5 a5, A6 a6) {
+ task_runner->PostTask(FROM_HERE, base::Bind(cb,
+ internal::TrampolineForward(a1), internal::TrampolineForward(a2),
+ internal::TrampolineForward(a3), internal::TrampolineForward(a4),
+ internal::TrampolineForward(a5), internal::TrampolineForward(a6)));
+ }
+};
+
+
+template <typename A1, typename A2, typename A3, typename A4, typename A5,
+ typename A6, typename A7>
+struct TrampolineHelper<void(A1, A2, A3, A4, A5, A6, A7)> {
+ static void Run(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const base::Callback<void(A1, A2, A3, A4, A5, A6, A7)>& cb, A1 a1, A2 a2,
+ A3 a3, A4 a4, A5 a5, A6 a6, A7 a7) {
+ task_runner->PostTask(FROM_HERE, base::Bind(cb,
+ internal::TrampolineForward(a1), internal::TrampolineForward(a2),
+ internal::TrampolineForward(a3), internal::TrampolineForward(a4),
+ internal::TrampolineForward(a5), internal::TrampolineForward(a6),
+ internal::TrampolineForward(a7)));
+ }
+};
+
+
+} // namespace internal
+
+template<typename T>
+static base::Callback<T> BindToCurrentLoop(
+ const base::Callback<T>& cb) {
+ return base::Bind(&internal::TrampolineHelper<T>::Run,
+ base::MessageLoopProxy::current(), cb);
+}
+
+} // namespace media
+
+#endif // MEDIA_BASE_BIND_TO_CURRENT_LOOP_H_
diff --git a/chromium/media/base/bind_to_loop.h.pump b/chromium/media/base/bind_to_current_loop.h.pump
index 8490413eb1c..4db40f1c1f9 100644
--- a/chromium/media/base/bind_to_loop.h.pump
+++ b/chromium/media/base/bind_to_current_loop.h.pump
@@ -12,26 +12,26 @@ $var MAX_ARITY = 7
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_BASE_BIND_TO_LOOP_H_
-#define MEDIA_BASE_BIND_TO_LOOP_H_
+#ifndef MEDIA_BASE_BIND_TO_CURRENT_LOOP_H_
+#define MEDIA_BASE_BIND_TO_CURRENT_LOOP_H_
#include "base/bind.h"
#include "base/location.h"
#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
-// This is a helper utility for base::Bind()ing callbacks on to particular
-// MessageLoops. A typical use is when |a| (of class |A|) wants to hand a
+// This is a helper utility for base::Bind()ing callbacks to the current
+// MessageLoop. The typical use is when |a| (of class |A|) wants to hand a
// callback such as base::Bind(&A::AMethod, a) to |b|, but needs to ensure that
-// when |b| executes the callback, it does so on a particular MessageLoop.
+// when |b| executes the callback, it does so on |a|'s current MessageLoop.
//
// Typical usage: request to be called back on the current thread:
// other->StartAsyncProcessAndCallMeBack(
-// media::BindToLoop(MessageLoopProxy::current(),
-// base::Bind(&MyClass::MyMethod, this)));
+// media::BindToCurrentLoop(base::Bind(&MyClass::MyMethod, this)));
//
-// Note that like base::Bind(), BindToLoop() can't bind non-constant references,
-// and that *unlike* base::Bind(), BindToLoop() makes copies of its arguments,
-// and thus can't be used with arrays.
+// Note that like base::Bind(), BindToCurrentLoop() can't bind non-constant
+// references, and that *unlike* base::Bind(), BindToCurrentLoop() makes copies
+// of its arguments, and thus can't be used with arrays.
namespace media {
@@ -41,13 +41,9 @@ namespace internal {
template <typename T>
T& TrampolineForward(T& t) { return t; }
-template <typename T>
-base::internal::PassedWrapper<scoped_ptr<T> > TrampolineForward(
- scoped_ptr<T>& p) { return base::Passed(&p); }
-
template <typename T, typename R>
-base::internal::PassedWrapper<scoped_ptr_malloc<T, R> > TrampolineForward(
- scoped_ptr_malloc<T, R>& p) { return base::Passed(&p); }
+base::internal::PassedWrapper<scoped_ptr<T, R> > TrampolineForward(
+ scoped_ptr<T, R>& p) { return base::Passed(&p); }
template <typename T>
base::internal::PassedWrapper<ScopedVector<T> > TrampolineForward(
@@ -62,12 +58,12 @@ $range ARG 1..ARITY
template <$for ARG , [[typename A$(ARG)]]>
struct TrampolineHelper<void($for ARG , [[A$(ARG)]])> {
static void Run(
- const scoped_refptr<base::MessageLoopProxy>& loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
const base::Callback<void($for ARG , [[A$(ARG)]])>& cb
$if ARITY != 0 [[, ]]
$for ARG , [[A$(ARG) a$(ARG)]]
) {
- loop->PostTask(FROM_HERE, base::Bind(cb
+ task_runner->PostTask(FROM_HERE, base::Bind(cb
$if ARITY != 0 [[, ]]
$for ARG , [[internal::TrampolineForward(a$(ARG))]]));
}
@@ -79,18 +75,12 @@ $for ARG , [[internal::TrampolineForward(a$(ARG))]]));
} // namespace internal
template<typename T>
-static base::Callback<T> BindToLoop(
- const scoped_refptr<base::MessageLoopProxy>& loop,
- const base::Callback<T>& cb) {
- return base::Bind(&internal::TrampolineHelper<T>::Run, loop, cb);
-}
-
-template<typename T>
static base::Callback<T> BindToCurrentLoop(
const base::Callback<T>& cb) {
- return BindToLoop(base::MessageLoopProxy::current(), cb);
+ return base::Bind(&internal::TrampolineHelper<T>::Run,
+ base::MessageLoopProxy::current(), cb);
}
} // namespace media
-#endif // MEDIA_BASE_BIND_TO_LOOP_H_
+#endif // MEDIA_BASE_BIND_TO_CURRENT_LOOP_H_
diff --git a/chromium/media/base/bind_to_loop_unittest.cc b/chromium/media/base/bind_to_current_loop_unittest.cc
index 0c7a3ddd194..23030856675 100644
--- a/chromium/media/base/bind_to_loop_unittest.cc
+++ b/chromium/media/base/bind_to_current_loop_unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
#include "base/message_loop/message_loop.h"
#include "base/synchronization/waitable_event.h"
@@ -18,7 +18,9 @@ void BoundBoolSetFromScopedPtr(bool* var, scoped_ptr<bool> val) {
*var = *val;
}
-void BoundBoolSetFromScopedPtrMalloc(bool* var, scoped_ptr_malloc<bool> val) {
+void BoundBoolSetFromScopedPtrFreeDeleter(
+ bool* var,
+ scoped_ptr<bool, base::FreeDeleter> val) {
*var = val;
}
@@ -37,19 +39,15 @@ void BoundIntegersSet(int* a_var, int* b_var, int a_val, int b_val) {
// Various tests that check that the bound function is only actually executed
// on the message loop, not during the original Run.
-class BindToLoopTest : public ::testing::Test {
- public:
- BindToLoopTest() : proxy_(loop_.message_loop_proxy()) {}
-
+class BindToCurrentLoopTest : public ::testing::Test {
protected:
base::MessageLoop loop_;
- scoped_refptr<base::MessageLoopProxy> proxy_;
};
-TEST_F(BindToLoopTest, Closure) {
+TEST_F(BindToCurrentLoopTest, Closure) {
// Test the closure is run inside the loop, not outside it.
base::WaitableEvent waiter(false, false);
- base::Closure cb = BindToLoop(proxy_, base::Bind(
+ base::Closure cb = BindToCurrentLoop(base::Bind(
&base::WaitableEvent::Signal, base::Unretained(&waiter)));
cb.Run();
EXPECT_FALSE(waiter.IsSignaled());
@@ -57,9 +55,9 @@ TEST_F(BindToLoopTest, Closure) {
EXPECT_TRUE(waiter.IsSignaled());
}
-TEST_F(BindToLoopTest, Bool) {
+TEST_F(BindToCurrentLoopTest, Bool) {
bool bool_var = false;
- base::Callback<void(bool)> cb = BindToLoop(proxy_, base::Bind(
+ base::Callback<void(bool)> cb = BindToCurrentLoop(base::Bind(
&BoundBoolSet, &bool_var));
cb.Run(true);
EXPECT_FALSE(bool_var);
@@ -67,10 +65,10 @@ TEST_F(BindToLoopTest, Bool) {
EXPECT_TRUE(bool_var);
}
-TEST_F(BindToLoopTest, BoundScopedPtrBool) {
+TEST_F(BindToCurrentLoopTest, BoundScopedPtrBool) {
bool bool_val = false;
scoped_ptr<bool> scoped_ptr_bool(new bool(true));
- base::Closure cb = BindToLoop(proxy_, base::Bind(
+ base::Closure cb = BindToCurrentLoop(base::Bind(
&BoundBoolSetFromScopedPtr, &bool_val, base::Passed(&scoped_ptr_bool)));
cb.Run();
EXPECT_FALSE(bool_val);
@@ -78,10 +76,10 @@ TEST_F(BindToLoopTest, BoundScopedPtrBool) {
EXPECT_TRUE(bool_val);
}
-TEST_F(BindToLoopTest, PassedScopedPtrBool) {
+TEST_F(BindToCurrentLoopTest, PassedScopedPtrBool) {
bool bool_val = false;
scoped_ptr<bool> scoped_ptr_bool(new bool(true));
- base::Callback<void(scoped_ptr<bool>)> cb = BindToLoop(proxy_, base::Bind(
+ base::Callback<void(scoped_ptr<bool>)> cb = BindToCurrentLoop(base::Bind(
&BoundBoolSetFromScopedPtr, &bool_val));
cb.Run(scoped_ptr_bool.Pass());
EXPECT_FALSE(bool_val);
@@ -89,11 +87,11 @@ TEST_F(BindToLoopTest, PassedScopedPtrBool) {
EXPECT_TRUE(bool_val);
}
-TEST_F(BindToLoopTest, BoundScopedArrayBool) {
+TEST_F(BindToCurrentLoopTest, BoundScopedArrayBool) {
bool bool_val = false;
scoped_ptr<bool[]> scoped_array_bool(new bool[1]);
scoped_array_bool[0] = true;
- base::Closure cb = BindToLoop(proxy_, base::Bind(
+ base::Closure cb = BindToCurrentLoop(base::Bind(
&BoundBoolSetFromScopedArray, &bool_val,
base::Passed(&scoped_array_bool)));
cb.Run();
@@ -102,11 +100,11 @@ TEST_F(BindToLoopTest, BoundScopedArrayBool) {
EXPECT_TRUE(bool_val);
}
-TEST_F(BindToLoopTest, PassedScopedArrayBool) {
+TEST_F(BindToCurrentLoopTest, PassedScopedArrayBool) {
bool bool_val = false;
scoped_ptr<bool[]> scoped_array_bool(new bool[1]);
scoped_array_bool[0] = true;
- base::Callback<void(scoped_ptr<bool[]>)> cb = BindToLoop(proxy_, base::Bind(
+ base::Callback<void(scoped_ptr<bool[]>)> cb = BindToCurrentLoop(base::Bind(
&BoundBoolSetFromScopedArray, &bool_val));
cb.Run(scoped_array_bool.Pass());
EXPECT_FALSE(bool_val);
@@ -114,38 +112,39 @@ TEST_F(BindToLoopTest, PassedScopedArrayBool) {
EXPECT_TRUE(bool_val);
}
-TEST_F(BindToLoopTest, BoundScopedPtrMallocBool) {
+TEST_F(BindToCurrentLoopTest, BoundScopedPtrFreeDeleterBool) {
bool bool_val = false;
- scoped_ptr_malloc<bool> scoped_ptr_malloc_bool(
+ scoped_ptr<bool, base::FreeDeleter> scoped_ptr_free_deleter_bool(
static_cast<bool*>(malloc(sizeof(bool))));
- *scoped_ptr_malloc_bool = true;
- base::Closure cb = BindToLoop(proxy_, base::Bind(
- &BoundBoolSetFromScopedPtrMalloc, &bool_val,
- base::Passed(&scoped_ptr_malloc_bool)));
+ *scoped_ptr_free_deleter_bool = true;
+ base::Closure cb = BindToCurrentLoop(base::Bind(
+ &BoundBoolSetFromScopedPtrFreeDeleter, &bool_val,
+ base::Passed(&scoped_ptr_free_deleter_bool)));
cb.Run();
EXPECT_FALSE(bool_val);
loop_.RunUntilIdle();
EXPECT_TRUE(bool_val);
}
-TEST_F(BindToLoopTest, PassedScopedPtrMallocBool) {
+TEST_F(BindToCurrentLoopTest, PassedScopedPtrFreeDeleterBool) {
bool bool_val = false;
- scoped_ptr_malloc<bool> scoped_ptr_malloc_bool(
+ scoped_ptr<bool, base::FreeDeleter> scoped_ptr_free_deleter_bool(
static_cast<bool*>(malloc(sizeof(bool))));
- *scoped_ptr_malloc_bool = true;
- base::Callback<void(scoped_ptr_malloc<bool>)> cb = BindToLoop(
- proxy_, base::Bind(&BoundBoolSetFromScopedPtrMalloc, &bool_val));
- cb.Run(scoped_ptr_malloc_bool.Pass());
+ *scoped_ptr_free_deleter_bool = true;
+ base::Callback<void(scoped_ptr<bool, base::FreeDeleter>)> cb =
+ BindToCurrentLoop(base::Bind(&BoundBoolSetFromScopedPtrFreeDeleter,
+ &bool_val));
+ cb.Run(scoped_ptr_free_deleter_bool.Pass());
EXPECT_FALSE(bool_val);
loop_.RunUntilIdle();
EXPECT_TRUE(bool_val);
}
-TEST_F(BindToLoopTest, BoolConstRef) {
+TEST_F(BindToCurrentLoopTest, BoolConstRef) {
bool bool_var = false;
bool true_var = true;
const bool& true_ref = true_var;
- base::Closure cb = BindToLoop(proxy_, base::Bind(
+ base::Closure cb = BindToCurrentLoop(base::Bind(
&BoundBoolSetFromConstRef, &bool_var, true_ref));
cb.Run();
EXPECT_FALSE(bool_var);
@@ -153,10 +152,10 @@ TEST_F(BindToLoopTest, BoolConstRef) {
EXPECT_TRUE(bool_var);
}
-TEST_F(BindToLoopTest, Integers) {
+TEST_F(BindToCurrentLoopTest, Integers) {
int a = 0;
int b = 0;
- base::Callback<void(int, int)> cb = BindToLoop(proxy_, base::Bind(
+ base::Callback<void(int, int)> cb = BindToCurrentLoop(base::Bind(
&BoundIntegersSet, &a, &b));
cb.Run(1, -1);
EXPECT_EQ(a, 0);
diff --git a/chromium/media/base/bind_to_loop.h b/chromium/media/base/bind_to_loop.h
deleted file mode 100644
index 92d358c7be9..00000000000
--- a/chromium/media/base/bind_to_loop.h
+++ /dev/null
@@ -1,168 +0,0 @@
-// This file was GENERATED by command:
-// pump.py bind_to_loop.h.pump
-// DO NOT EDIT BY HAND!!!
-
-
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_BIND_TO_LOOP_H_
-#define MEDIA_BASE_BIND_TO_LOOP_H_
-
-#include "base/bind.h"
-#include "base/location.h"
-#include "base/message_loop/message_loop_proxy.h"
-
-// This is a helper utility for base::Bind()ing callbacks on to particular
-// MessageLoops. A typical use is when |a| (of class |A|) wants to hand a
-// callback such as base::Bind(&A::AMethod, a) to |b|, but needs to ensure that
-// when |b| executes the callback, it does so on a particular MessageLoop.
-//
-// Typical usage: request to be called back on the current thread:
-// other->StartAsyncProcessAndCallMeBack(
-// media::BindToLoop(MessageLoopProxy::current(),
-// base::Bind(&MyClass::MyMethod, this)));
-//
-// Note that like base::Bind(), BindToLoop() can't bind non-constant references,
-// and that *unlike* base::Bind(), BindToLoop() makes copies of its arguments,
-// and thus can't be used with arrays.
-
-namespace media {
-
-// Mimic base::internal::CallbackForward, replacing p.Pass() with
-// base::Passed(&p) to account for the extra layer of indirection.
-namespace internal {
-template <typename T>
-T& TrampolineForward(T& t) { return t; }
-
-template <typename T>
-base::internal::PassedWrapper<scoped_ptr<T> > TrampolineForward(
- scoped_ptr<T>& p) { return base::Passed(&p); }
-
-template <typename T, typename R>
-base::internal::PassedWrapper<scoped_ptr_malloc<T, R> > TrampolineForward(
- scoped_ptr_malloc<T, R>& p) { return base::Passed(&p); }
-
-template <typename T>
-base::internal::PassedWrapper<ScopedVector<T> > TrampolineForward(
- ScopedVector<T>& p) { return base::Passed(&p); }
-
-template <typename T> struct TrampolineHelper;
-
-template <>
-struct TrampolineHelper<void()> {
- static void Run(
- const scoped_refptr<base::MessageLoopProxy>& loop,
- const base::Callback<void()>& cb) {
- loop->PostTask(FROM_HERE, base::Bind(cb));
- }
-};
-
-
-template <typename A1>
-struct TrampolineHelper<void(A1)> {
- static void Run(
- const scoped_refptr<base::MessageLoopProxy>& loop,
- const base::Callback<void(A1)>& cb, A1 a1) {
- loop->PostTask(FROM_HERE, base::Bind(cb, internal::TrampolineForward(a1)));
- }
-};
-
-
-template <typename A1, typename A2>
-struct TrampolineHelper<void(A1, A2)> {
- static void Run(
- const scoped_refptr<base::MessageLoopProxy>& loop,
- const base::Callback<void(A1, A2)>& cb, A1 a1, A2 a2) {
- loop->PostTask(FROM_HERE, base::Bind(cb, internal::TrampolineForward(a1),
- internal::TrampolineForward(a2)));
- }
-};
-
-
-template <typename A1, typename A2, typename A3>
-struct TrampolineHelper<void(A1, A2, A3)> {
- static void Run(
- const scoped_refptr<base::MessageLoopProxy>& loop,
- const base::Callback<void(A1, A2, A3)>& cb, A1 a1, A2 a2, A3 a3) {
- loop->PostTask(FROM_HERE, base::Bind(cb, internal::TrampolineForward(a1),
- internal::TrampolineForward(a2), internal::TrampolineForward(a3)));
- }
-};
-
-
-template <typename A1, typename A2, typename A3, typename A4>
-struct TrampolineHelper<void(A1, A2, A3, A4)> {
- static void Run(
- const scoped_refptr<base::MessageLoopProxy>& loop,
- const base::Callback<void(A1, A2, A3, A4)>& cb, A1 a1, A2 a2, A3 a3,
- A4 a4) {
- loop->PostTask(FROM_HERE, base::Bind(cb, internal::TrampolineForward(a1),
- internal::TrampolineForward(a2), internal::TrampolineForward(a3),
- internal::TrampolineForward(a4)));
- }
-};
-
-
-template <typename A1, typename A2, typename A3, typename A4, typename A5>
-struct TrampolineHelper<void(A1, A2, A3, A4, A5)> {
- static void Run(
- const scoped_refptr<base::MessageLoopProxy>& loop,
- const base::Callback<void(A1, A2, A3, A4, A5)>& cb, A1 a1, A2 a2, A3 a3,
- A4 a4, A5 a5) {
- loop->PostTask(FROM_HERE, base::Bind(cb, internal::TrampolineForward(a1),
- internal::TrampolineForward(a2), internal::TrampolineForward(a3),
- internal::TrampolineForward(a4), internal::TrampolineForward(a5)));
- }
-};
-
-
-template <typename A1, typename A2, typename A3, typename A4, typename A5,
- typename A6>
-struct TrampolineHelper<void(A1, A2, A3, A4, A5, A6)> {
- static void Run(
- const scoped_refptr<base::MessageLoopProxy>& loop,
- const base::Callback<void(A1, A2, A3, A4, A5, A6)>& cb, A1 a1, A2 a2,
- A3 a3, A4 a4, A5 a5, A6 a6) {
- loop->PostTask(FROM_HERE, base::Bind(cb, internal::TrampolineForward(a1),
- internal::TrampolineForward(a2), internal::TrampolineForward(a3),
- internal::TrampolineForward(a4), internal::TrampolineForward(a5),
- internal::TrampolineForward(a6)));
- }
-};
-
-
-template <typename A1, typename A2, typename A3, typename A4, typename A5,
- typename A6, typename A7>
-struct TrampolineHelper<void(A1, A2, A3, A4, A5, A6, A7)> {
- static void Run(
- const scoped_refptr<base::MessageLoopProxy>& loop,
- const base::Callback<void(A1, A2, A3, A4, A5, A6, A7)>& cb, A1 a1, A2 a2,
- A3 a3, A4 a4, A5 a5, A6 a6, A7 a7) {
- loop->PostTask(FROM_HERE, base::Bind(cb, internal::TrampolineForward(a1),
- internal::TrampolineForward(a2), internal::TrampolineForward(a3),
- internal::TrampolineForward(a4), internal::TrampolineForward(a5),
- internal::TrampolineForward(a6), internal::TrampolineForward(a7)));
- }
-};
-
-
-} // namespace internal
-
-template<typename T>
-static base::Callback<T> BindToLoop(
- const scoped_refptr<base::MessageLoopProxy>& loop,
- const base::Callback<T>& cb) {
- return base::Bind(&internal::TrampolineHelper<T>::Run, loop, cb);
-}
-
-template<typename T>
-static base::Callback<T> BindToCurrentLoop(
- const base::Callback<T>& cb) {
- return BindToLoop(base::MessageLoopProxy::current(), cb);
-}
-
-} // namespace media
-
-#endif // MEDIA_BASE_BIND_TO_LOOP_H_
diff --git a/chromium/media/base/bit_reader.cc b/chromium/media/base/bit_reader.cc
index e4d83af7410..c0d30d66fdd 100644
--- a/chromium/media/base/bit_reader.cc
+++ b/chromium/media/base/bit_reader.cc
@@ -4,80 +4,31 @@
#include "media/base/bit_reader.h"
-#include <algorithm>
-
namespace media {
-BitReader::BitReader(const uint8* data, off_t size)
- : data_(data), bytes_left_(size), num_remaining_bits_in_curr_byte_(0) {
- DCHECK(data_ != NULL && bytes_left_ > 0);
-
- UpdateCurrByte();
+BitReader::BitReader(const uint8* data, int size)
+ : initial_size_(size),
+ data_(data),
+ bytes_left_(size),
+ bit_reader_core_(this) {
+ DCHECK(data != NULL);
+ DCHECK_GE(size, 0);
}
BitReader::~BitReader() {}
-bool BitReader::SkipBits(int num_bits) {
- DCHECK_GE(num_bits, 0);
- DVLOG_IF(0, num_bits > 100)
- << "BitReader::SkipBits inefficient for large skips";
-
- // Skip any bits in the current byte waiting to be processed, then
- // process full bytes until less than 8 bits remaining.
- while (num_bits > 0 && num_bits > num_remaining_bits_in_curr_byte_) {
- num_bits -= num_remaining_bits_in_curr_byte_;
- num_remaining_bits_in_curr_byte_ = 0;
- UpdateCurrByte();
-
- // If there is no more data remaining, only return true if we
- // skipped all that were requested.
- if (num_remaining_bits_in_curr_byte_ == 0)
- return (num_bits == 0);
- }
-
- // Less than 8 bits remaining to skip. Use ReadBitsInternal to verify
- // that the remaining bits we need exist, and adjust them as necessary
- // for subsequent operations.
- uint64 not_needed;
- return ReadBitsInternal(num_bits, &not_needed);
-}
-
-int BitReader::bits_available() const {
- return 8 * bytes_left_ + num_remaining_bits_in_curr_byte_;
-}
-
-bool BitReader::ReadBitsInternal(int num_bits, uint64* out) {
- DCHECK_LE(num_bits, 64);
-
- *out = 0;
-
- while (num_remaining_bits_in_curr_byte_ != 0 && num_bits != 0) {
- int bits_to_take = std::min(num_remaining_bits_in_curr_byte_, num_bits);
-
- *out <<= bits_to_take;
- *out += curr_byte_ >> (num_remaining_bits_in_curr_byte_ - bits_to_take);
- num_bits -= bits_to_take;
- num_remaining_bits_in_curr_byte_ -= bits_to_take;
- curr_byte_ &= (1 << num_remaining_bits_in_curr_byte_) - 1;
-
- if (num_remaining_bits_in_curr_byte_ == 0)
- UpdateCurrByte();
- }
-
- return num_bits == 0;
-}
-
-void BitReader::UpdateCurrByte() {
- DCHECK_EQ(num_remaining_bits_in_curr_byte_, 0);
+int BitReader::GetBytes(int max_nbytes, const uint8** out) {
+ DCHECK_GE(max_nbytes, 0);
+ DCHECK(out);
- if (bytes_left_ == 0)
- return;
+ int nbytes = max_nbytes;
+ if (nbytes > bytes_left_)
+ nbytes = bytes_left_;
- // Load a new byte and advance pointers.
- curr_byte_ = *data_;
- ++data_;
- --bytes_left_;
- num_remaining_bits_in_curr_byte_ = 8;
+ *out = data_;
+ data_ += nbytes;
+ bytes_left_ -= nbytes;
+ return nbytes;
}
} // namespace media
diff --git a/chromium/media/base/bit_reader.h b/chromium/media/base/bit_reader.h
index 8c15891c915..b70e97c4f2d 100644
--- a/chromium/media/base/bit_reader.h
+++ b/chromium/media/base/bit_reader.h
@@ -5,70 +5,57 @@
#ifndef MEDIA_BASE_BIT_READER_H_
#define MEDIA_BASE_BIT_READER_H_
-#include <sys/types.h>
-
#include "base/basictypes.h"
-#include "base/logging.h"
+#include "base/compiler_specific.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/bit_reader_core.h"
#include "media/base/media_export.h"
namespace media {
-// A class to read bit streams.
-class MEDIA_EXPORT BitReader {
+class MEDIA_EXPORT BitReader
+ : NON_EXPORTED_BASE(private BitReaderCore::ByteStreamProvider) {
public:
// Initialize the reader to start reading at |data|, |size| being size
// of |data| in bytes.
- BitReader(const uint8* data, off_t size);
- ~BitReader();
-
- // Read |num_bits| next bits from stream and return in |*out|, first bit
- // from the stream starting at |num_bits| position in |*out|.
- // |num_bits| cannot be larger than the bits the type can hold.
- // Return false if the given number of bits cannot be read (not enough
- // bits in the stream), true otherwise. When return false, the stream will
- // enter a state where further ReadBits/SkipBits operations will always
- // return false unless |num_bits| is 0. The type |T| has to be a primitive
- // integer type.
- template<typename T> bool ReadBits(int num_bits, T *out) {
- DCHECK_LE(num_bits, static_cast<int>(sizeof(T) * 8));
- uint64 temp;
- bool ret = ReadBitsInternal(num_bits, &temp);
- *out = static_cast<T>(temp);
- return ret;
+ BitReader(const uint8* data, int size);
+ virtual ~BitReader();
+
+ template<typename T> bool ReadBits(int num_bits, T* out) {
+ return bit_reader_core_.ReadBits(num_bits, out);
+ }
+
+ bool ReadFlag(bool* flag) {
+ return bit_reader_core_.ReadFlag(flag);
+ }
+
+ bool SkipBits(int num_bits) {
+ return bit_reader_core_.SkipBits(num_bits);
}
- // Skip |num_bits| next bits from stream. Return false if the given number of
- // bits cannot be skipped (not enough bits in the stream), true otherwise.
- // When return false, the stream will enter a state where further ReadBits/
- // SkipBits operations will always return false unless |num_bits| is 0.
- bool SkipBits(int num_bits);
+ int bits_available() const {
+ return initial_size_ * 8 - bits_read();
+ }
- // Returns the number of bits available for reading.
- int bits_available() const;
+ int bits_read() const {
+ return bit_reader_core_.bits_read();
+ }
private:
- // Help function used by ReadBits to avoid inlining the bit reading logic.
- bool ReadBitsInternal(int num_bits, uint64* out);
+ // BitReaderCore::ByteStreamProvider implementation.
+ virtual int GetBytes(int max_n, const uint8** out) OVERRIDE;
- // Advance to the next byte, loading it into curr_byte_.
- // If the num_remaining_bits_in_curr_byte_ is 0 after this function returns,
- // the stream has reached the end.
- void UpdateCurrByte();
+ // Total number of bytes that was initially passed to BitReader.
+ const int initial_size_;
- // Pointer to the next unread (not in curr_byte_) byte in the stream.
+ // Pointer to the next unread byte in the stream.
const uint8* data_;
- // Bytes left in the stream (without the curr_byte_).
- off_t bytes_left_;
+ // Bytes left in the stream.
+ int bytes_left_;
- // Contents of the current byte; first unread bit starting at position
- // 8 - num_remaining_bits_in_curr_byte_ from MSB.
- uint8 curr_byte_;
+ BitReaderCore bit_reader_core_;
- // Number of bits remaining in curr_byte_
- int num_remaining_bits_in_curr_byte_;
-
- private:
DISALLOW_COPY_AND_ASSIGN(BitReader);
};
diff --git a/chromium/media/base/bit_reader_core.cc b/chromium/media/base/bit_reader_core.cc
new file mode 100644
index 00000000000..2f168ffd67c
--- /dev/null
+++ b/chromium/media/base/bit_reader_core.cc
@@ -0,0 +1,159 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/bit_reader_core.h"
+
+#include <base/port.h>
+#include <base/sys_byteorder.h>
+
+namespace {
+const int kRegWidthInBits = sizeof(uint64) * 8;
+}
+
+namespace media {
+
+BitReaderCore::ByteStreamProvider::ByteStreamProvider() {
+}
+
+BitReaderCore::ByteStreamProvider::~ByteStreamProvider() {
+}
+
+BitReaderCore::BitReaderCore(ByteStreamProvider* byte_stream_provider)
+ : byte_stream_provider_(byte_stream_provider),
+ bits_read_(0),
+ nbits_(0),
+ reg_(0),
+ nbits_next_(0),
+ reg_next_(0) {
+}
+
+BitReaderCore::~BitReaderCore() {
+}
+
+bool BitReaderCore::ReadFlag(bool* flag) {
+ if (nbits_ == 0 && !Refill(1))
+ return false;
+
+ *flag = (reg_ & (GG_UINT64_C(1) << (kRegWidthInBits - 1))) != 0;
+ reg_ <<= 1;
+ nbits_--;
+ bits_read_++;
+ return true;
+}
+
+int BitReaderCore::PeekBitsMsbAligned(int num_bits, uint64* out) {
+ // Try to have at least |num_bits| in the bit register.
+ if (nbits_ < num_bits)
+ Refill(num_bits);
+
+ *out = reg_;
+ return nbits_;
+}
+
+bool BitReaderCore::SkipBits(int num_bits) {
+ DCHECK_GE(num_bits, 0);
+ DVLOG_IF(0, num_bits > 100)
+ << "BitReader::SkipBits inefficient for large skips";
+
+ uint64 dummy;
+ while (num_bits >= kRegWidthInBits) {
+ if (!ReadBitsInternal(kRegWidthInBits, &dummy))
+ return false;
+ num_bits -= kRegWidthInBits;
+ }
+ return ReadBitsInternal(num_bits, &dummy);
+}
+
+int BitReaderCore::bits_read() const {
+ return bits_read_;
+}
+
+bool BitReaderCore::ReadBitsInternal(int num_bits, uint64* out) {
+ DCHECK_GE(num_bits, 0);
+
+ if (num_bits == 0) {
+ *out = 0;
+ return true;
+ }
+
+ if (num_bits > nbits_ && !Refill(num_bits)) {
+ // Any subsequent ReadBits should fail:
+ // empty the current bit register for that purpose.
+ nbits_ = 0;
+ reg_ = 0;
+ return false;
+ }
+
+ bits_read_ += num_bits;
+
+ if (num_bits == kRegWidthInBits) {
+ // Special case needed since for example for a 64 bit integer "a"
+ // "a << 64" is not defined by the C/C++ standard.
+ *out = reg_;
+ reg_ = 0;
+ nbits_ = 0;
+ return true;
+ }
+
+ *out = reg_ >> (kRegWidthInBits - num_bits);
+ reg_ <<= num_bits;
+ nbits_ -= num_bits;
+ return true;
+}
+
+bool BitReaderCore::Refill(int min_nbits) {
+ DCHECK_LE(min_nbits, kRegWidthInBits);
+
+ // Transfer from the next to the current register.
+ RefillCurrentRegister();
+ if (min_nbits <= nbits_)
+ return true;
+ DCHECK_EQ(nbits_next_, 0);
+ DCHECK_EQ(reg_next_, 0u);
+
+ // Max number of bytes to refill.
+ int max_nbytes = sizeof(reg_next_);
+
+ // Refill.
+ const uint8* byte_stream_window;
+ int window_size =
+ byte_stream_provider_->GetBytes(max_nbytes, &byte_stream_window);
+ DCHECK_GE(window_size, 0);
+ DCHECK_LE(window_size, max_nbytes);
+ if (window_size == 0)
+ return false;
+
+ reg_next_ = 0;
+ memcpy(&reg_next_, byte_stream_window, window_size);
+ reg_next_ = base::NetToHost64(reg_next_);
+ nbits_next_ = window_size * 8;
+
+ // Transfer from the next to the current register.
+ RefillCurrentRegister();
+
+ return (nbits_ >= min_nbits);
+}
+
+void BitReaderCore::RefillCurrentRegister() {
+ // No refill possible if the destination register is full
+ // or the source register is empty.
+ if (nbits_ == kRegWidthInBits || nbits_next_ == 0)
+ return;
+
+ reg_ |= (reg_next_ >> nbits_);
+
+ int free_nbits = kRegWidthInBits - nbits_;
+ if (free_nbits >= nbits_next_) {
+ nbits_ += nbits_next_;
+ reg_next_ = 0;
+ nbits_next_ = 0;
+ return;
+ }
+
+ nbits_ += free_nbits;
+ reg_next_ <<= free_nbits;
+ nbits_next_ -= free_nbits;
+}
+
+} // namespace media
diff --git a/chromium/media/base/bit_reader_core.h b/chromium/media/base/bit_reader_core.h
new file mode 100644
index 00000000000..6f92d175643
--- /dev/null
+++ b/chromium/media/base/bit_reader_core.h
@@ -0,0 +1,118 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_BIT_READER_CORE_H_
+#define MEDIA_BASE_BIT_READER_CORE_H_
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class MEDIA_EXPORT BitReaderCore {
+ public:
+ class ByteStreamProvider {
+ public:
+ ByteStreamProvider();
+ virtual ~ByteStreamProvider();
+
+ // Consume at most the following |max_n| bytes of the stream
+ // and return the number n of bytes actually consumed.
+ // Set |*array| to point to a memory buffer containing those n bytes.
+ // Note: |*array| must be valid until the next call to GetBytes
+ // but there is no guarantee it is valid after.
+ virtual int GetBytes(int max_n, const uint8** array) = 0;
+ };
+
+ // Lifetime of |byte_stream_provider| must be longer than BitReaderCore.
+ explicit BitReaderCore(ByteStreamProvider* byte_stream_provider);
+ ~BitReaderCore();
+
+ // Read one bit from the stream and return it as a boolean in |*out|.
+ // Remark: we do not use the template version for reading a bool
+ // since it generates some optimization warnings during compilation
+ // on Windows platforms.
+ bool ReadBits(int num_bits, bool* out) {
+ DCHECK_EQ(num_bits, 1);
+ return ReadFlag(out);
+ }
+
+ // Read |num_bits| next bits from stream and return in |*out|, first bit
+ // from the stream starting at |num_bits| position in |*out|,
+ // bits of |*out| whose position is strictly greater than |num_bits|
+ // are all set to zero.
+ // Notes:
+ // - |num_bits| cannot be larger than the bits the type can hold.
+ // - From the above description, passing a signed type in |T| does not
+ // mean the first bit read from the stream gives the sign of the value.
+ // Return false if the given number of bits cannot be read (not enough
+ // bits in the stream), true otherwise. When return false, the stream will
+ // enter a state where further ReadBits/SkipBits operations will always
+ // return false unless |num_bits| is 0. The type |T| has to be a primitive
+ // integer type.
+ template<typename T> bool ReadBits(int num_bits, T* out) {
+ DCHECK_LE(num_bits, static_cast<int>(sizeof(T) * 8));
+ uint64 temp;
+ bool ret = ReadBitsInternal(num_bits, &temp);
+ *out = static_cast<T>(temp);
+ return ret;
+ }
+
+ // Read one bit from the stream and return it as a boolean in |*flag|.
+ bool ReadFlag(bool* flag);
+
+ // Retrieve some bits without actually consuming them.
+ // Bits returned in |*out| are shifted so the most significant bit contains
+ // the next bit that can be read from the stream.
+ // Return the number of bits actually written in |out|.
+ // Note: |num_bits| is just a suggestion of how many bits the caller
+ // wish to get in |*out| and must be less than 64:
+ // - The number of bits returned can be more than |num_bits|.
+ // - However, it will be strictly less than |num_bits|
+ // if and only if there are not enough bits left in the stream.
+ int PeekBitsMsbAligned(int num_bits, uint64* out);
+
+ // Skip |num_bits| next bits from stream. Return false if the given number of
+ // bits cannot be skipped (not enough bits in the stream), true otherwise.
+ // When return false, the stream will enter a state where further
+ // ReadBits/ReadFlag/SkipBits operations
+ // will always return false unless |num_bits| is 0.
+ bool SkipBits(int num_bits);
+
+ // Returns the number of bits read so far.
+ int bits_read() const;
+
+ private:
+ // Help function used by ReadBits to avoid inlining the bit reading logic.
+ bool ReadBitsInternal(int num_bits, uint64* out);
+
+ // Refill bit registers to have at least |min_nbits| bits available.
+ // Return true if the mininimum bit count condition is met after the refill.
+ bool Refill(int min_nbits);
+
+ // Refill the current bit register from the next bit register.
+ void RefillCurrentRegister();
+
+ ByteStreamProvider* const byte_stream_provider_;
+
+ // Number of bits read so far.
+ int bits_read_;
+
+ // Number of bits in |reg_| that have not been consumed yet.
+ // Note: bits are consumed from MSB to LSB.
+ int nbits_;
+ uint64 reg_;
+
+ // Number of bits in |reg_next_| that have not been consumed yet.
+ // Note: bits are consumed from MSB to LSB.
+ int nbits_next_;
+ uint64 reg_next_;
+
+ DISALLOW_COPY_AND_ASSIGN(BitReaderCore);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_BIT_READER_CORE_H_
diff --git a/chromium/media/base/bit_reader_unittest.cc b/chromium/media/base/bit_reader_unittest.cc
index 3dca9c632da..b6edd491a63 100644
--- a/chromium/media/base/bit_reader_unittest.cc
+++ b/chromium/media/base/bit_reader_unittest.cc
@@ -64,4 +64,25 @@ TEST(BitReaderTest, SkipBitsTest) {
EXPECT_FALSE(reader1.SkipBits(1));
}
+TEST(BitReaderTest, BitsReadTest) {
+ int value;
+ bool flag;
+ uint8 buffer[] = { 0x0a, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
+ BitReader reader1(buffer, sizeof(buffer));
+ EXPECT_EQ(reader1.bits_available(), 120);
+
+ EXPECT_TRUE(reader1.SkipBits(2));
+ EXPECT_EQ(reader1.bits_read(), 2);
+ EXPECT_EQ(reader1.bits_available(), 118);
+ EXPECT_TRUE(reader1.ReadBits(3, &value));
+ EXPECT_EQ(reader1.bits_read(), 5);
+ EXPECT_EQ(reader1.bits_available(), 115);
+ EXPECT_TRUE(reader1.ReadFlag(&flag));
+ EXPECT_EQ(reader1.bits_read(), 6);
+ EXPECT_EQ(reader1.bits_available(), 114);
+ EXPECT_TRUE(reader1.SkipBits(76));
+ EXPECT_EQ(reader1.bits_read(), 82);
+ EXPECT_EQ(reader1.bits_available(), 38);
+}
+
} // namespace media
diff --git a/chromium/media/base/browser_cdm.cc b/chromium/media/base/browser_cdm.cc
new file mode 100644
index 00000000000..2c44e1652ad
--- /dev/null
+++ b/chromium/media/base/browser_cdm.cc
@@ -0,0 +1,15 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/browser_cdm.h"
+
+namespace media {
+
+BrowserCdm::BrowserCdm() {
+}
+
+BrowserCdm::~BrowserCdm() {
+}
+
+} // namespace media
diff --git a/chromium/media/base/browser_cdm.h b/chromium/media/base/browser_cdm.h
new file mode 100644
index 00000000000..f009779324d
--- /dev/null
+++ b/chromium/media/base/browser_cdm.h
@@ -0,0 +1,63 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_BROWSER_CDM_H_
+#define MEDIA_BASE_BROWSER_CDM_H_
+
+#include "media/base/media_export.h"
+#include "media/base/media_keys.h"
+#include "media/base/player_tracker.h"
+
+namespace media {
+
+// Interface for browser side CDMs.
+class MEDIA_EXPORT BrowserCdm : public PlayerTracker {
+ public:
+ // TODO(jrummell): Update this to actually derive from MediaKeys
+ // (Use web_session_id rather than session_id).
+ typedef base::Callback<
+ void(uint32 session_id, const std::string& web_session_id)>
+ SessionCreatedCB;
+
+ typedef base::Callback<void(uint32 session_id,
+ const std::vector<uint8>& message,
+ const GURL& destination_url)> SessionMessageCB;
+
+ typedef base::Callback<void(uint32 session_id)> SessionReadyCB;
+
+ typedef base::Callback<void(uint32 session_id)> SessionClosedCB;
+
+ typedef base::Callback<void(uint32 session_id,
+ media::MediaKeys::KeyError error_code,
+ uint32 system_code)> SessionErrorCB;
+
+ virtual ~BrowserCdm();
+
+ // MediaKeys-like implementation.
+ virtual bool CreateSession(uint32 session_id,
+ const std::string& content_type,
+ const uint8* init_data,
+ int init_data_length) = 0;
+ virtual void LoadSession(uint32 session_id,
+ const std::string& web_session_id) = 0;
+ virtual void UpdateSession(uint32 session_id,
+ const uint8* response,
+ int response_length) = 0;
+ virtual void ReleaseSession(uint32 session_id) = 0;
+
+ // PlayerTracker implementation.
+ virtual int RegisterPlayer(const base::Closure& new_key_cb,
+ const base::Closure& cdm_unset_cb) = 0;
+ virtual void UnregisterPlayer(int registration_id) = 0;
+
+ protected:
+ BrowserCdm();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BrowserCdm);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_BROWSER_CDM_H_
diff --git a/chromium/media/base/browser_cdm_factory.h b/chromium/media/base/browser_cdm_factory.h
new file mode 100644
index 00000000000..e6fa47bcaf4
--- /dev/null
+++ b/chromium/media/base/browser_cdm_factory.h
@@ -0,0 +1,29 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_BROWSER_CDM_FACTORY_H_
+#define MEDIA_BASE_BROWSER_CDM_FACTORY_H_
+
+#include <string>
+
+#include "base/memory/scoped_ptr.h"
+#include "media/base/browser_cdm.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Creates a BrowserCdm for |key_system|. Returns NULL if the CDM cannot be
+// created.
+// TODO(xhwang): Add ifdef for IPC based CDM.
+scoped_ptr<BrowserCdm> MEDIA_EXPORT
+ CreateBrowserCdm(const std::string& key_system,
+ const BrowserCdm::SessionCreatedCB& session_created_cb,
+ const BrowserCdm::SessionMessageCB& session_message_cb,
+ const BrowserCdm::SessionReadyCB& session_ready_cb,
+ const BrowserCdm::SessionClosedCB& session_closed_cb,
+ const BrowserCdm::SessionErrorCB& session_error_cb);
+
+} // namespace media
+
+#endif // MEDIA_BASE_BROWSER_CDM_FACTORY_H_
diff --git a/chromium/media/base/buffering_state.h b/chromium/media/base/buffering_state.h
new file mode 100644
index 00000000000..3140505847e
--- /dev/null
+++ b/chromium/media/base/buffering_state.h
@@ -0,0 +1,26 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_BUFFERING_STATE_H_
+#define MEDIA_BASE_BUFFERING_STATE_H_
+
+#include "base/callback_forward.h"
+
+namespace media {
+
+enum BufferingState {
+ // Indicates that there is no data buffered.
+ //
+ // Typical reason is data underflow and hence playback should be paused.
+ BUFFERING_HAVE_NOTHING,
+
+ // Indicates that enough data has been buffered.
+ //
+ // Typical reason is enough data has been prerolled to start playback.
+ BUFFERING_HAVE_ENOUGH,
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_BUFFERING_STATE_H_
diff --git a/chromium/media/base/buffers.h b/chromium/media/base/buffers.h
index 6a6c7303d1d..5c5c47b68e1 100644
--- a/chromium/media/base/buffers.h
+++ b/chromium/media/base/buffers.h
@@ -37,7 +37,7 @@ MEDIA_EXPORT extern inline base::TimeDelta kNoTimestamp() {
// Represents an infinite stream duration.
MEDIA_EXPORT extern inline base::TimeDelta kInfiniteDuration() {
- return base::TimeDelta::FromMicroseconds(kint64max);
+ return base::TimeDelta::Max();
}
} // namespace media
diff --git a/chromium/media/base/callback_holder.h b/chromium/media/base/callback_holder.h
index 2ea5edbeb19..f69e03de4c3 100644
--- a/chromium/media/base/callback_holder.h
+++ b/chromium/media/base/callback_holder.h
@@ -8,7 +8,7 @@
#include "base/bind.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
namespace media {
diff --git a/chromium/media/base/cdm_promise.cc b/chromium/media/base/cdm_promise.cc
new file mode 100644
index 00000000000..ec5e913dbb8
--- /dev/null
+++ b/chromium/media/base/cdm_promise.cc
@@ -0,0 +1,74 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/cdm_promise.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+
+namespace media {
+
+CdmPromise::CdmPromise() : is_pending_(true) {
+}
+
+CdmPromise::CdmPromise(PromiseRejectedCB reject_cb)
+ : reject_cb_(reject_cb), is_pending_(true) {
+ DCHECK(!reject_cb_.is_null());
+}
+
+CdmPromise::~CdmPromise() {
+ DCHECK(!is_pending_);
+}
+
+void CdmPromise::reject(MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_message) {
+ DCHECK(is_pending_);
+ is_pending_ = false;
+ reject_cb_.Run(exception_code, system_code, error_message);
+}
+
+template <typename T>
+CdmPromiseTemplate<T>::CdmPromiseTemplate(
+ base::Callback<void(const T&)> resolve_cb,
+ PromiseRejectedCB reject_cb)
+ : CdmPromise(reject_cb), resolve_cb_(resolve_cb) {
+ DCHECK(!resolve_cb_.is_null());
+}
+
+template <typename T>
+CdmPromiseTemplate<T>::~CdmPromiseTemplate() {
+ DCHECK(!is_pending_);
+}
+
+template <typename T>
+void CdmPromiseTemplate<T>::resolve(const T& result) {
+ DCHECK(is_pending_);
+ is_pending_ = false;
+ resolve_cb_.Run(result);
+}
+
+CdmPromiseTemplate<void>::CdmPromiseTemplate(base::Callback<void()> resolve_cb,
+ PromiseRejectedCB reject_cb)
+ : CdmPromise(reject_cb), resolve_cb_(resolve_cb) {
+ DCHECK(!resolve_cb_.is_null());
+}
+
+CdmPromiseTemplate<void>::CdmPromiseTemplate() {
+}
+
+CdmPromiseTemplate<void>::~CdmPromiseTemplate() {
+ DCHECK(!is_pending_);
+}
+
+void CdmPromiseTemplate<void>::resolve() {
+ DCHECK(is_pending_);
+ is_pending_ = false;
+ resolve_cb_.Run();
+}
+
+// Explicit template instantiation for the Promises needed.
+template class MEDIA_EXPORT CdmPromiseTemplate<std::string>;
+
+} // namespace media
diff --git a/chromium/media/base/cdm_promise.h b/chromium/media/base/cdm_promise.h
new file mode 100644
index 00000000000..ad1d196ad6e
--- /dev/null
+++ b/chromium/media/base/cdm_promise.h
@@ -0,0 +1,87 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_CDM_PROMISE_H_
+#define MEDIA_BASE_CDM_PROMISE_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "media/base/media_export.h"
+#include "media/base/media_keys.h"
+
+namespace media {
+
+// Interface for promises being resolved/rejected in response to various
+// session actions. These may be called synchronously or asynchronously.
+// The promise must be resolved or rejected exactly once. It is expected that
+// the caller free the promise once it is resolved/rejected.
+//
+// This is only the base class, as parameter to resolve() varies.
+class MEDIA_EXPORT CdmPromise {
+ public:
+ typedef base::Callback<void(MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_message)>
+ PromiseRejectedCB;
+
+ virtual ~CdmPromise();
+
+ // Used to indicate that the operation failed. |exception_code| must be
+ // specified. |system_code| is a Key System-specific value for the error
+ // that occurred, or 0 if there is no associated status code or such status
+ // codes are not supported by the Key System. |error_message| is optional.
+ virtual void reject(MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_message);
+
+ protected:
+ CdmPromise();
+ CdmPromise(PromiseRejectedCB reject_cb);
+
+ PromiseRejectedCB reject_cb_;
+
+ // Keep track of whether the promise hasn't been resolved or rejected yet.
+ bool is_pending_;
+
+ DISALLOW_COPY_AND_ASSIGN(CdmPromise);
+};
+
+template <typename T>
+class MEDIA_EXPORT CdmPromiseTemplate : public CdmPromise {
+ public:
+ CdmPromiseTemplate(base::Callback<void(const T&)> resolve_cb,
+ PromiseRejectedCB rejected_cb);
+ virtual ~CdmPromiseTemplate();
+ virtual void resolve(const T& result);
+
+ private:
+ base::Callback<void(const T&)> resolve_cb_;
+
+ DISALLOW_COPY_AND_ASSIGN(CdmPromiseTemplate);
+};
+
+// Specialization for no parameter to resolve().
+template <>
+class MEDIA_EXPORT CdmPromiseTemplate<void> : public CdmPromise {
+ public:
+ CdmPromiseTemplate(base::Callback<void(void)> resolve_cb,
+ PromiseRejectedCB rejected_cb);
+ virtual ~CdmPromiseTemplate();
+ virtual void resolve();
+
+ protected:
+ // Allow subclasses to completely override the implementation.
+ CdmPromiseTemplate();
+
+ private:
+ base::Callback<void(void)> resolve_cb_;
+
+ DISALLOW_COPY_AND_ASSIGN(CdmPromiseTemplate);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_CDM_PROMISE_H_
diff --git a/chromium/media/base/channel_layout.cc b/chromium/media/base/channel_layout.cc
index 958430ac4e7..d0b02a90fab 100644
--- a/chromium/media/base/channel_layout.cc
+++ b/chromium/media/base/channel_layout.cc
@@ -40,15 +40,16 @@ static const int kLayoutToChannels[] = {
8, // CHANNEL_LAYOUT_7_1_WIDE_BACK
8, // CHANNEL_LAYOUT_OCTAGONAL
0, // CHANNEL_LAYOUT_DISCRETE
+ 3, // CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC
};
-// The channel orderings for each layout as specified by FFmpeg. Each value
+// The channel orderings for each layout as specified by FFmpeg. Each value
// represents the index of each channel in each layout. Values of -1 mean the
-// channel at that index is not used for that layout.For example, the left side
+// channel at that index is not used for that layout. For example, the left side
// surround sound channel in FFmpeg's 5.1 layout is in the 5th position (because
// the order is L, R, C, LFE, LS, RS), so
// kChannelOrderings[CHANNEL_LAYOUT_5POINT1][SIDE_LEFT] = 4;
-static const int kChannelOrderings[CHANNEL_LAYOUT_MAX][CHANNELS_MAX] = {
+static const int kChannelOrderings[CHANNEL_LAYOUT_MAX + 1][CHANNELS_MAX + 1] = {
// FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
// CHANNEL_LAYOUT_NONE
@@ -145,6 +146,9 @@ static const int kChannelOrderings[CHANNEL_LAYOUT_MAX][CHANNELS_MAX] = {
// CHANNEL_LAYOUT_DISCRETE
{ -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
+ // CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC
+ { 0 , 1 , 2 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
+
// FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
};
@@ -246,8 +250,8 @@ const char* ChannelLayoutToString(ChannelLayout layout) {
return "OCTAGONAL";
case CHANNEL_LAYOUT_DISCRETE:
return "DISCRETE";
- case CHANNEL_LAYOUT_MAX:
- break;
+ case CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC:
+ return "STEREO_AND_KEYBOARD_MIC";
}
NOTREACHED() << "Invalid channel layout provided: " << layout;
return "";
diff --git a/chromium/media/base/channel_layout.h b/chromium/media/base/channel_layout.h
index 9354eee850d..12319ecb3ac 100644
--- a/chromium/media/base/channel_layout.h
+++ b/chromium/media/base/channel_layout.h
@@ -99,8 +99,14 @@ enum ChannelLayout {
// Channels are not explicitly mapped to speakers.
CHANNEL_LAYOUT_DISCRETE = 29,
- // Total number of layouts.
- CHANNEL_LAYOUT_MAX // Must always be last!
+ // Front L, Front R, Front C. Front C contains the keyboard mic audio. This
+ // layout is only intended for input for WebRTC. The Front C channel
+ // is stripped away in the WebRTC audio input pipeline and never seen outside
+ // of that.
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC = 30,
+
+ // Max value, must always equal the largest entry ever logged.
+ CHANNEL_LAYOUT_MAX = CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC
};
enum Channels {
@@ -115,7 +121,7 @@ enum Channels {
BACK_CENTER,
SIDE_LEFT,
SIDE_RIGHT,
- CHANNELS_MAX
+ CHANNELS_MAX = SIDE_RIGHT, // Must always equal the largest value ever logged.
};
// Returns the expected channel position in an interleaved stream. Values of -1
diff --git a/chromium/media/base/channel_mixer.cc b/chromium/media/base/channel_mixer.cc
index 3de63fe8bf1..4c5179b40a8 100644
--- a/chromium/media/base/channel_mixer.cc
+++ b/chromium/media/base/channel_mixer.cc
@@ -23,9 +23,10 @@ static const float kEqualPowerScale = static_cast<float>(M_SQRT1_2);
static void ValidateLayout(ChannelLayout layout) {
CHECK_NE(layout, CHANNEL_LAYOUT_NONE);
- CHECK_NE(layout, CHANNEL_LAYOUT_MAX);
+ CHECK_LE(layout, CHANNEL_LAYOUT_MAX);
CHECK_NE(layout, CHANNEL_LAYOUT_UNSUPPORTED);
CHECK_NE(layout, CHANNEL_LAYOUT_DISCRETE);
+ CHECK_NE(layout, CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC);
// Verify there's at least one channel. Should always be true here by virtue
// of not being one of the invalid layouts, but lets double check to be sure.
@@ -170,7 +171,7 @@ bool MatrixBuilder::CreateTransformationMatrix(
}
// Route matching channels and figure out which ones aren't accounted for.
- for (Channels ch = LEFT; ch < CHANNELS_MAX;
+ for (Channels ch = LEFT; ch < CHANNELS_MAX + 1;
ch = static_cast<Channels>(ch + 1)) {
int input_ch_index = ChannelOrder(input_layout_, ch);
if (input_ch_index < 0)
diff --git a/chromium/media/base/channel_mixer_unittest.cc b/chromium/media/base/channel_mixer_unittest.cc
index e048f8d9fc5..911866ac806 100644
--- a/chromium/media/base/channel_mixer_unittest.cc
+++ b/chromium/media/base/channel_mixer_unittest.cc
@@ -21,15 +21,19 @@ enum { kFrames = 16 };
// Test all possible layout conversions can be constructed and mixed.
TEST(ChannelMixerTest, ConstructAllPossibleLayouts) {
for (ChannelLayout input_layout = CHANNEL_LAYOUT_MONO;
- input_layout < CHANNEL_LAYOUT_MAX;
+ input_layout <= CHANNEL_LAYOUT_MAX;
input_layout = static_cast<ChannelLayout>(input_layout + 1)) {
for (ChannelLayout output_layout = CHANNEL_LAYOUT_MONO;
output_layout < CHANNEL_LAYOUT_STEREO_DOWNMIX;
output_layout = static_cast<ChannelLayout>(output_layout + 1)) {
// DISCRETE can't be tested here based on the current approach.
+ // CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC is not mixable.
if (input_layout == CHANNEL_LAYOUT_DISCRETE ||
- output_layout == CHANNEL_LAYOUT_DISCRETE)
+ input_layout == CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC ||
+ output_layout == CHANNEL_LAYOUT_DISCRETE ||
+ output_layout == CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC) {
continue;
+ }
SCOPED_TRACE(base::StringPrintf(
"Input Layout: %d, Output Layout: %d", input_layout, output_layout));
diff --git a/chromium/media/base/clock.cc b/chromium/media/base/clock.cc
index ea954834e94..3dc49e9e79c 100644
--- a/chromium/media/base/clock.cc
+++ b/chromium/media/base/clock.cc
@@ -12,9 +12,14 @@
namespace media {
-Clock::Clock(base::TickClock* clock) : clock_(clock) {
+Clock::Clock(base::TickClock* clock)
+ : clock_(clock),
+ playing_(false),
+ underflow_(false),
+ playback_rate_(1.0f),
+ max_time_(kNoTimestamp()),
+ duration_(kNoTimestamp()) {
DCHECK(clock_);
- Reset();
}
Clock::~Clock() {}
@@ -103,11 +108,6 @@ base::TimeDelta Clock::ClampToValidTimeRange(base::TimeDelta time) const {
return std::max(std::min(time, duration_), base::TimeDelta());
}
-void Clock::EndOfStream() {
- Pause();
- SetTime(Duration(), Duration());
-}
-
base::TimeDelta Clock::Duration() const {
if (duration_ == kNoTimestamp())
return base::TimeDelta();
@@ -127,14 +127,4 @@ base::TimeDelta Clock::EstimatedElapsedTime() {
return ClampToValidTimeRange(ElapsedViaProvidedTime(clock_->NowTicks()));
}
-void Clock::Reset() {
- playing_ = false;
- playback_rate_ = 1.0f;
- max_time_ = kNoTimestamp();
- duration_ = kNoTimestamp();
- media_time_ = base::TimeDelta();
- reference_ = base::TimeTicks();
- underflow_ = false;
-}
-
} // namespace media
diff --git a/chromium/media/base/clock.h b/chromium/media/base/clock.h
index 01449be6250..fbd7ca125d7 100644
--- a/chromium/media/base/clock.h
+++ b/chromium/media/base/clock.h
@@ -70,13 +70,6 @@ class MEDIA_EXPORT Clock {
// exactly once.
void SetDuration(base::TimeDelta duration);
- // Resets clock to an uninitialized state.
- void Reset();
-
- // Notifies the clock that the end of stream has been reached. The clock state
- // is updated accordingly.
- void EndOfStream();
-
// Returns the duration of the clock, or 0 if not set.
base::TimeDelta Duration() const;
diff --git a/chromium/media/base/container_names.h b/chromium/media/base/container_names.h
index 7b7b099a00a..af1214fe9d1 100644
--- a/chromium/media/base/container_names.h
+++ b/chromium/media/base/container_names.h
@@ -56,6 +56,8 @@ enum MediaContainerName {
CONTAINER_WAV, // WAV / WAVE (Waveform Audio)
CONTAINER_WEBM, // Matroska / WebM
CONTAINER_WTV, // WTV (Windows Television)
+ CONTAINER_DASH, // DASH (MPEG-DASH)
+ CONTAINER_SMOOTHSTREAM, // SmoothStreaming
CONTAINER_MAX // Must be last
};
diff --git a/chromium/media/base/data_source.cc b/chromium/media/base/data_source.cc
index 91f52608609..c8ab4461b5d 100644
--- a/chromium/media/base/data_source.cc
+++ b/chromium/media/base/data_source.cc
@@ -11,18 +11,8 @@ namespace media {
// static
const int DataSource::kReadError = -1;
-DataSourceHost::~DataSourceHost() {}
-
-DataSource::DataSource() : host_(NULL) {}
+DataSource::DataSource() {}
DataSource::~DataSource() {}
-void DataSource::set_host(DataSourceHost* host) {
- DCHECK(host);
- DCHECK(!host_);
- host_ = host;
-}
-
-DataSourceHost* DataSource::host() { return host_; }
-
} // namespace media
diff --git a/chromium/media/base/data_source.h b/chromium/media/base/data_source.h
index 9176c8e845c..dca1dd30dad 100644
--- a/chromium/media/base/data_source.h
+++ b/chromium/media/base/data_source.h
@@ -11,24 +11,6 @@
namespace media {
-class MEDIA_EXPORT DataSourceHost {
- public:
- // Set the total size of the media file.
- virtual void SetTotalBytes(int64 total_bytes) = 0;
-
- // Notify the host that byte range [start,end] has been buffered.
- // TODO(fischman): remove this method when demuxing is push-based instead of
- // pull-based. http://crbug.com/131444
- virtual void AddBufferedByteRange(int64 start, int64 end) = 0;
-
- // Notify the host that time range [start,end] has been buffered.
- virtual void AddBufferedTimeRange(base::TimeDelta start,
- base::TimeDelta end) = 0;
-
- protected:
- virtual ~DataSourceHost();
-};
-
class MEDIA_EXPORT DataSource {
public:
typedef base::Callback<void(int64, int64)> StatusCallback;
@@ -38,8 +20,6 @@ class MEDIA_EXPORT DataSource {
DataSource();
virtual ~DataSource();
- virtual void set_host(DataSourceHost* host);
-
// Reads |size| bytes from |position| into |data|. And when the read is done
// or failed, |read_cb| is called with the number of bytes read or
// kReadError in case of error.
@@ -62,12 +42,7 @@ class MEDIA_EXPORT DataSource {
// Values of |bitrate| <= 0 are invalid and should be ignored.
virtual void SetBitrate(int bitrate) = 0;
- protected:
- DataSourceHost* host();
-
private:
- DataSourceHost* host_;
-
DISALLOW_COPY_AND_ASSIGN(DataSource);
};
diff --git a/chromium/media/base/decoder_buffer.cc b/chromium/media/base/decoder_buffer.cc
index d4e75410abe..673610b6874 100644
--- a/chromium/media/base/decoder_buffer.cc
+++ b/chromium/media/base/decoder_buffer.cc
@@ -5,6 +5,7 @@
#include "media/base/decoder_buffer.h"
#include "base/logging.h"
+#include "media/base/buffers.h"
#include "media/base/decrypt_config.h"
namespace media {
@@ -43,6 +44,7 @@ void DecoderBuffer::Initialize() {
base::AlignedAlloc(side_data_size_ + kPaddingSize, kAlignmentSize)));
memset(side_data_.get() + side_data_size_, 0, kPaddingSize);
}
+ splice_timestamp_ = kNoTimestamp();
}
// static
@@ -81,8 +83,14 @@ std::string DecoderBuffer::AsHumanReadableString() {
<< " size: " << size_
<< " side_data_size: " << side_data_size_
<< " encrypted: " << (decrypt_config_ != NULL)
- << " discard_padding (ms): " << discard_padding_.InMilliseconds();
+ << " discard_padding (ms): (" << discard_padding_.first.InMilliseconds()
+ << ", " << discard_padding_.second.InMilliseconds() << ")";
return s.str();
}
+void DecoderBuffer::set_timestamp(base::TimeDelta timestamp) {
+ DCHECK(!end_of_stream());
+ timestamp_ = timestamp;
+}
+
} // namespace media
diff --git a/chromium/media/base/decoder_buffer.h b/chromium/media/base/decoder_buffer.h
index 393e586d06b..27de88f4692 100644
--- a/chromium/media/base/decoder_buffer.h
+++ b/chromium/media/base/decoder_buffer.h
@@ -6,6 +6,7 @@
#define MEDIA_BASE_DECODER_BUFFER_H_
#include <string>
+#include <utility>
#include "base/logging.h"
#include "base/memory/aligned_memory.h"
@@ -65,17 +66,16 @@ class MEDIA_EXPORT DecoderBuffer
return timestamp_;
}
- void set_timestamp(const base::TimeDelta& timestamp) {
- DCHECK(!end_of_stream());
- timestamp_ = timestamp;
- }
+ // TODO(dalecurtis): This should be renamed at some point, but to avoid a yak
+ // shave keep as a virtual with hacker_style() for now.
+ virtual void set_timestamp(base::TimeDelta timestamp);
base::TimeDelta duration() const {
DCHECK(!end_of_stream());
return duration_;
}
- void set_duration(const base::TimeDelta& duration) {
+ void set_duration(base::TimeDelta duration) {
DCHECK(!end_of_stream());
duration_ = duration;
}
@@ -105,12 +105,18 @@ class MEDIA_EXPORT DecoderBuffer
return side_data_size_;
}
- base::TimeDelta discard_padding() const {
+ // A discard window indicates the amount of data which should be discard from
+ // this buffer after decoding. The first value is the amount of the front and
+ // the second the amount off the back. A value of kInfiniteDuration() for the
+ // first value indicates the entire buffer should be discarded; the second
+ // value must be base::TimeDelta() in this case.
+ typedef std::pair<base::TimeDelta, base::TimeDelta> DiscardPadding;
+ const DiscardPadding& discard_padding() const {
DCHECK(!end_of_stream());
return discard_padding_;
}
- void set_discard_padding(const base::TimeDelta discard_padding) {
+ void set_discard_padding(const DiscardPadding& discard_padding) {
DCHECK(!end_of_stream());
discard_padding_ = discard_padding;
}
@@ -130,6 +136,20 @@ class MEDIA_EXPORT DecoderBuffer
return data_ == NULL;
}
+ // Indicates this buffer is part of a splice around |splice_timestamp_|.
+ // Returns kNoTimestamp() if the buffer is not part of a splice.
+ base::TimeDelta splice_timestamp() const {
+ DCHECK(!end_of_stream());
+ return splice_timestamp_;
+ }
+
+ // When set to anything but kNoTimestamp() indicates this buffer is part of a
+ // splice around |splice_timestamp|.
+ void set_splice_timestamp(base::TimeDelta splice_timestamp) {
+ DCHECK(!end_of_stream());
+ splice_timestamp_ = splice_timestamp;
+ }
+
// Returns a human-readable string describing |*this|.
std::string AsHumanReadableString();
@@ -148,11 +168,12 @@ class MEDIA_EXPORT DecoderBuffer
base::TimeDelta duration_;
int size_;
- scoped_ptr<uint8, base::ScopedPtrAlignedFree> data_;
+ scoped_ptr<uint8, base::AlignedFreeDeleter> data_;
int side_data_size_;
- scoped_ptr<uint8, base::ScopedPtrAlignedFree> side_data_;
+ scoped_ptr<uint8, base::AlignedFreeDeleter> side_data_;
scoped_ptr<DecryptConfig> decrypt_config_;
- base::TimeDelta discard_padding_;
+ DiscardPadding discard_padding_;
+ base::TimeDelta splice_timestamp_;
// Constructor helper method for memory allocations.
void Initialize();
diff --git a/chromium/media/base/decoder_buffer_queue.cc b/chromium/media/base/decoder_buffer_queue.cc
index d0486cbf939..26ba9f4e69b 100644
--- a/chromium/media/base/decoder_buffer_queue.cc
+++ b/chromium/media/base/decoder_buffer_queue.cc
@@ -5,13 +5,15 @@
#include "media/base/decoder_buffer_queue.h"
#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
#include "media/base/buffers.h"
#include "media/base/decoder_buffer.h"
namespace media {
DecoderBufferQueue::DecoderBufferQueue()
- : earliest_valid_timestamp_(kNoTimestamp()) {
+ : earliest_valid_timestamp_(kNoTimestamp()),
+ data_size_(0) {
}
DecoderBufferQueue::~DecoderBufferQueue() {}
@@ -21,6 +23,10 @@ void DecoderBufferQueue::Push(const scoped_refptr<DecoderBuffer>& buffer) {
queue_.push_back(buffer);
+ // TODO(damienv): Remove the cast here and in every place in this file
+ // when DecoderBuffer::data_size is modified to return a size_t.
+ data_size_ += base::checked_cast<size_t, int>(buffer->data_size());
+
// TODO(scherkus): FFmpeg returns some packets with no timestamp after
// seeking. Fix and turn this into CHECK(). See http://crbug.com/162192
if (buffer->timestamp() == kNoTimestamp()) {
@@ -49,6 +55,11 @@ scoped_refptr<DecoderBuffer> DecoderBufferQueue::Pop() {
scoped_refptr<DecoderBuffer> buffer = queue_.front();
queue_.pop_front();
+ size_t buffer_data_size =
+ base::checked_cast<size_t, int>(buffer->data_size());
+ DCHECK_LE(buffer_data_size, data_size_);
+ data_size_ -= buffer_data_size;
+
if (!in_order_queue_.empty() &&
in_order_queue_.front().get() == buffer.get()) {
in_order_queue_.pop_front();
@@ -59,6 +70,7 @@ scoped_refptr<DecoderBuffer> DecoderBufferQueue::Pop() {
void DecoderBufferQueue::Clear() {
queue_.clear();
+ data_size_ = 0;
in_order_queue_.clear();
earliest_valid_timestamp_ = kNoTimestamp();
}
diff --git a/chromium/media/base/decoder_buffer_queue.h b/chromium/media/base/decoder_buffer_queue.h
index 938db63123e..9c2c2dc7b19 100644
--- a/chromium/media/base/decoder_buffer_queue.h
+++ b/chromium/media/base/decoder_buffer_queue.h
@@ -51,6 +51,9 @@ class MEDIA_EXPORT DecoderBufferQueue {
// Returns zero if the queue is empty.
base::TimeDelta Duration();
+ // Returns the total size of buffers inside the queue.
+ size_t data_size() const { return data_size_; }
+
private:
typedef std::deque<scoped_refptr<DecoderBuffer> > Queue;
Queue queue_;
@@ -62,6 +65,9 @@ class MEDIA_EXPORT DecoderBufferQueue {
base::TimeDelta earliest_valid_timestamp_;
+ // Total size in bytes of buffers in the queue.
+ size_t data_size_;
+
DISALLOW_COPY_AND_ASSIGN(DecoderBufferQueue);
};
diff --git a/chromium/media/base/decoder_buffer_queue_unittest.cc b/chromium/media/base/decoder_buffer_queue_unittest.cc
index 32e62db06b1..5eb06d2152d 100644
--- a/chromium/media/base/decoder_buffer_queue_unittest.cc
+++ b/chromium/media/base/decoder_buffer_queue_unittest.cc
@@ -25,6 +25,13 @@ static scoped_refptr<DecoderBuffer> CreateBuffer(int timestamp) {
return buffer;
}
+static scoped_refptr<DecoderBuffer> CreateBuffer(int timestamp, int size) {
+ scoped_refptr<DecoderBuffer> buffer = new DecoderBuffer(size);
+ buffer->set_timestamp(ToTimeDelta(timestamp));
+ buffer->set_duration(ToTimeDelta(0));
+ return buffer;
+}
+
TEST(DecoderBufferQueueTest, IsEmpty) {
DecoderBufferQueue queue;
EXPECT_TRUE(queue.IsEmpty());
@@ -135,4 +142,28 @@ TEST(DecoderBufferQueueTest, Duration_NoTimestamp) {
EXPECT_EQ(0, queue.Duration().InSeconds());
}
+TEST(DecoderBufferQueueTest, DataSize) {
+ DecoderBufferQueue queue;
+ EXPECT_EQ(queue.data_size(), 0u);
+
+ queue.Push(CreateBuffer(0, 1200u));
+ EXPECT_EQ(queue.data_size(), 1200u);
+
+ queue.Push(CreateBuffer(1, 1000u));
+ EXPECT_EQ(queue.data_size(), 2200u);
+
+ queue.Pop();
+ EXPECT_EQ(queue.data_size(), 1000u);
+
+ queue.Push(CreateBuffer(2, 999u));
+ queue.Push(CreateBuffer(3, 999u));
+ EXPECT_EQ(queue.data_size(), 2998u);
+
+ queue.Clear();
+ EXPECT_EQ(queue.data_size(), 0u);
+
+ queue.Push(CreateBuffer(4, 1400u));
+ EXPECT_EQ(queue.data_size(), 1400u);
+}
+
} // namespace media
diff --git a/chromium/media/base/decrypt_config.cc b/chromium/media/base/decrypt_config.cc
index 53e20143e1b..a47806504a7 100644
--- a/chromium/media/base/decrypt_config.cc
+++ b/chromium/media/base/decrypt_config.cc
@@ -10,16 +10,13 @@ namespace media {
DecryptConfig::DecryptConfig(const std::string& key_id,
const std::string& iv,
- const int data_offset,
const std::vector<SubsampleEntry>& subsamples)
: key_id_(key_id),
iv_(iv),
- data_offset_(data_offset),
subsamples_(subsamples) {
CHECK_GT(key_id.size(), 0u);
CHECK(iv.size() == static_cast<size_t>(DecryptConfig::kDecryptionKeySize) ||
iv.empty());
- CHECK_GE(data_offset, 0);
}
DecryptConfig::~DecryptConfig() {}
diff --git a/chromium/media/base/decrypt_config.h b/chromium/media/base/decrypt_config.h
index be0bb4d61b1..86480aa9eed 100644
--- a/chromium/media/base/decrypt_config.h
+++ b/chromium/media/base/decrypt_config.h
@@ -38,23 +38,16 @@ class MEDIA_EXPORT DecryptConfig {
// |iv| is the initialization vector defined by the encrypted format.
// Currently |iv| must be 16 bytes as defined by WebM and ISO. Or must be
// empty which signals an unencrypted frame.
- // |data_offset| is the amount of data that should be discarded from the
- // head of the sample buffer before applying subsample information. A
- // decrypted buffer will be shorter than an encrypted buffer by this amount.
// |subsamples| defines the clear and encrypted portions of the sample as
// described above. A decrypted buffer will be equal in size to the sum
// of the subsample sizes.
- //
- // |data_offset| is applied before |subsamples|.
DecryptConfig(const std::string& key_id,
const std::string& iv,
- const int data_offset,
const std::vector<SubsampleEntry>& subsamples);
~DecryptConfig();
const std::string& key_id() const { return key_id_; }
const std::string& iv() const { return iv_; }
- int data_offset() const { return data_offset_; }
const std::vector<SubsampleEntry>& subsamples() const { return subsamples_; }
private:
@@ -63,11 +56,6 @@ class MEDIA_EXPORT DecryptConfig {
// Initialization vector.
const std::string iv_;
- // TODO(fgalligan): Remove |data_offset_| if there is no plan to use it in
- // the future.
- // Amount of data to be discarded before applying subsample information.
- const int data_offset_;
-
// Subsample information. May be empty for some formats, meaning entire frame
// (less data ignored by data_offset_) is encrypted.
const std::vector<SubsampleEntry> subsamples_;
diff --git a/chromium/media/base/demuxer.h b/chromium/media/base/demuxer.h
index 9b671f007cb..c9c851c1bd9 100644
--- a/chromium/media/base/demuxer.h
+++ b/chromium/media/base/demuxer.h
@@ -17,8 +17,12 @@ namespace media {
class TextTrackConfig;
-class MEDIA_EXPORT DemuxerHost : public DataSourceHost {
+class MEDIA_EXPORT DemuxerHost {
public:
+ // Notify the host that time range [start,end] has been buffered.
+ virtual void AddBufferedTimeRange(base::TimeDelta start,
+ base::TimeDelta end) = 0;
+
// Sets the duration of the media in microseconds.
// Duration may be kInfiniteDuration() if the duration is not known.
virtual void SetDuration(base::TimeDelta duration) = 0;
@@ -40,6 +44,12 @@ class MEDIA_EXPORT DemuxerHost : public DataSourceHost {
class MEDIA_EXPORT Demuxer {
public:
+ enum Liveness {
+ LIVENESS_UNKNOWN,
+ LIVENESS_RECORDED,
+ LIVENESS_LIVE,
+ };
+
// A new potentially encrypted stream has been parsed.
// First parameter - The type of initialization data.
// Second parameter - The initialization data associated with the stream.
@@ -68,14 +78,6 @@ class MEDIA_EXPORT Demuxer {
// call any method (including Stop()) after a demuxer has stopped.
virtual void Stop(const base::Closure& callback) = 0;
- // This method is called from the pipeline when the audio renderer
- // is disabled. Demuxers can ignore the notification if they do not
- // need to react to this event.
- //
- // TODO(acolwell): Change to generic DisableStream(DemuxerStream::Type).
- // TODO(scherkus): this might not be needed http://crbug.com/234708
- virtual void OnAudioRendererDisabled() = 0;
-
// Returns the first stream of the given stream type (which is not allowed
// to be DemuxerStream::TEXT), or NULL if that type of stream is not present.
virtual DemuxerStream* GetStream(DemuxerStream::Type type) = 0;
@@ -83,6 +85,14 @@ class MEDIA_EXPORT Demuxer {
// Returns the starting time for the media file.
virtual base::TimeDelta GetStartTime() const = 0;
+ // Returns Time represented by presentation timestamp 0.
+ // If the timstamps are not associated with a Time, then
+ // a null Time is returned.
+ virtual base::Time GetTimelineOffset() const = 0;
+
+ // Returns liveness of the stream, i.e. whether it is recorded or live.
+ virtual Liveness GetLiveness() const = 0;
+
private:
DISALLOW_COPY_AND_ASSIGN(Demuxer);
};
diff --git a/chromium/media/base/demuxer_perftest.cc b/chromium/media/base/demuxer_perftest.cc
index f63e6e4b3e5..73a051b39de 100644
--- a/chromium/media/base/demuxer_perftest.cc
+++ b/chromium/media/base/demuxer_perftest.cc
@@ -24,13 +24,9 @@ class DemuxerHostImpl : public media::DemuxerHost {
DemuxerHostImpl() {}
virtual ~DemuxerHostImpl() {}
- // DataSourceHost implementation.
- virtual void SetTotalBytes(int64 total_bytes) OVERRIDE {}
- virtual void AddBufferedByteRange(int64 start, int64 end) OVERRIDE {}
+ // DemuxerHost implementation.
virtual void AddBufferedTimeRange(base::TimeDelta start,
base::TimeDelta end) OVERRIDE {}
-
- // DemuxerHost implementation.
virtual void SetDuration(base::TimeDelta duration) OVERRIDE {}
virtual void OnDemuxerError(media::PipelineStatus error) OVERRIDE {}
virtual void AddTextStream(media::DemuxerStream* text_stream,
diff --git a/chromium/media/base/demuxer_stream.h b/chromium/media/base/demuxer_stream.h
index 4e07c66d8fe..87f53e7307f 100644
--- a/chromium/media/base/demuxer_stream.h
+++ b/chromium/media/base/demuxer_stream.h
@@ -40,6 +40,8 @@ class MEDIA_EXPORT DemuxerStream {
// new configuration to properly decode the buffers read
// from this point forward. The second parameter MUST be NULL
// when this status is returned.
+ // This will only be returned if SupportsConfigChanges()
+ // returns 'true' for this DemuxerStream.
enum Status {
kOk,
kAborted,
@@ -68,6 +70,16 @@ class MEDIA_EXPORT DemuxerStream {
virtual void EnableBitstreamConverter() = 0;
+ // Whether or not this DemuxerStream allows midstream configuration changes.
+ //
+ // A DemuxerStream that returns 'true' to this may return the 'kConfigChange'
+ // status from a Read() call. In this case the client is expected to be
+ // capable of taking appropriate action to handle config changes. Otherwise
+ // audio_decoder_config() and video_decoder_config()'s return values are
+ // guaranteed to remain constant, and the client may make optimizations based
+ // on this.
+ virtual bool SupportsConfigChanges() = 0;
+
protected:
// Only allow concrete implementations to get deleted.
virtual ~DemuxerStream();
diff --git a/chromium/media/base/fake_text_track_stream.cc b/chromium/media/base/fake_text_track_stream.cc
index 3136c475a78..2e9a1e17c9d 100644
--- a/chromium/media/base/fake_text_track_stream.cc
+++ b/chromium/media/base/fake_text_track_stream.cc
@@ -12,7 +12,7 @@
namespace media {
FakeTextTrackStream::FakeTextTrackStream()
- : message_loop_(base::MessageLoopProxy::current()),
+ : task_runner_(base::MessageLoopProxy::current()),
stopping_(false) {
}
@@ -27,7 +27,7 @@ void FakeTextTrackStream::Read(const ReadCB& read_cb) {
read_cb_ = read_cb;
if (stopping_) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&FakeTextTrackStream::AbortPendingRead, base::Unretained(this)));
}
}
@@ -36,6 +36,8 @@ DemuxerStream::Type FakeTextTrackStream::type() {
return DemuxerStream::TEXT;
}
+bool FakeTextTrackStream::SupportsConfigChanges() { return false; }
+
void FakeTextTrackStream::SatisfyPendingRead(
const base::TimeDelta& start,
const base::TimeDelta& duration,
diff --git a/chromium/media/base/fake_text_track_stream.h b/chromium/media/base/fake_text_track_stream.h
index 33c74ef4f30..db7a3e10df6 100644
--- a/chromium/media/base/fake_text_track_stream.h
+++ b/chromium/media/base/fake_text_track_stream.h
@@ -23,6 +23,7 @@ class FakeTextTrackStream : public DemuxerStream {
MOCK_METHOD0(video_decoder_config, VideoDecoderConfig());
virtual Type type() OVERRIDE;
MOCK_METHOD0(EnableBitstreamConverter, void());
+ virtual bool SupportsConfigChanges();
void SatisfyPendingRead(const base::TimeDelta& start,
const base::TimeDelta& duration,
@@ -37,7 +38,7 @@ class FakeTextTrackStream : public DemuxerStream {
MOCK_METHOD0(OnRead, void());
private:
- scoped_refptr<base::MessageLoopProxy> message_loop_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
ReadCB read_cb_;
bool stopping_;
diff --git a/chromium/media/base/media.cc b/chromium/media/base/media.cc
index 75625fe5f36..37fc02ae457 100644
--- a/chromium/media/base/media.cc
+++ b/chromium/media/base/media.cc
@@ -9,8 +9,6 @@
#include "base/path_service.h"
#include "base/synchronization/lock.h"
#include "build/build_config.h"
-#include "media/base/sinc_resampler.h"
-#include "media/base/vector_math.h"
#include "media/base/yuv_convert.h"
namespace media {
@@ -44,9 +42,6 @@ class MediaInitializer {
: initialized_(false),
tried_initialize_(false) {
// Perform initialization of libraries which require runtime CPU detection.
- // TODO(dalecurtis): Add initialization of YUV, SincResampler.
- vector_math::Initialize();
- SincResampler::InitializeCPUSpecificFeatures();
InitializeCPUSpecificYUVConversions();
}
diff --git a/chromium/media/base/media_file_checker.cc b/chromium/media/base/media_file_checker.cc
index 494657d209f..4a49ac7c6a2 100644
--- a/chromium/media/base/media_file_checker.cc
+++ b/chromium/media/base/media_file_checker.cc
@@ -21,20 +21,17 @@ static void OnError(bool* called) {
*called = false;
}
-MediaFileChecker::MediaFileChecker(const base::PlatformFile& file)
- : file_(file),
- file_closer_(&file_) {
+MediaFileChecker::MediaFileChecker(base::File file) : file_(file.Pass()) {
}
MediaFileChecker::~MediaFileChecker() {
}
bool MediaFileChecker::Start(base::TimeDelta check_time) {
- media::FileDataSource source;
+ media::FileDataSource source(file_.Pass());
bool read_ok = true;
media::BlockingUrlProtocol protocol(&source, base::Bind(&OnError, &read_ok));
media::FFmpegGlue glue(&protocol);
- source.InitializeFromPlatformFile(file_);
AVFormatContext* format_context = glue.format_context();
if (!glue.OpenContext())
@@ -59,11 +56,10 @@ bool MediaFileChecker::Start(base::TimeDelta check_time) {
return false;
AVPacket packet;
- scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFreeFrame> frame(
- av_frame_alloc());
+ scoped_ptr<AVFrame, media::ScopedPtrAVFreeFrame> frame(av_frame_alloc());
int result = 0;
- base::Time deadline = base::Time::Now() +
+ const base::TimeTicks deadline = base::TimeTicks::Now() +
std::min(check_time,
base::TimeDelta::FromSeconds(kMaxCheckTimeInSeconds));
do {
@@ -88,21 +84,23 @@ bool MediaFileChecker::Start(base::TimeDelta check_time) {
// decoded; otherwise av_free_packet() will corrupt memory.
AVPacket temp_packet = packet;
do {
- avcodec_get_frame_defaults(frame.get());
result = avcodec_decode_audio4(av_context, frame.get(), &frame_decoded,
&temp_packet);
if (result < 0)
break;
+ av_frame_unref(frame.get());
temp_packet.size -= result;
temp_packet.data += result;
+ frame_decoded = 0;
} while (temp_packet.size > 0);
} else if (av_context->codec_type == AVMEDIA_TYPE_VIDEO) {
- avcodec_get_frame_defaults(frame.get());
result = avcodec_decode_video2(av_context, frame.get(), &frame_decoded,
&packet);
+ if (result >= 0 && frame_decoded)
+ av_frame_unref(frame.get());
}
av_free_packet(&packet);
- } while (base::Time::Now() < deadline && read_ok && result >= 0);
+ } while (base::TimeTicks::Now() < deadline && read_ok && result >= 0);
return read_ok && (result == AVERROR_EOF || result >= 0);
}
diff --git a/chromium/media/base/media_file_checker.h b/chromium/media/base/media_file_checker.h
index 6e8fc9f285f..8ed191a7c8e 100644
--- a/chromium/media/base/media_file_checker.h
+++ b/chromium/media/base/media_file_checker.h
@@ -6,8 +6,7 @@
#define MEDIA_BASE_MEDIA_FILE_CHECKER_H_
#include "base/basictypes.h"
-#include "base/files/scoped_platform_file_closer.h"
-#include "base/platform_file.h"
+#include "base/files/file.h"
#include "media/base/media_export.h"
namespace base {
@@ -21,7 +20,7 @@ namespace media {
// file safe to use in the browser process.
class MEDIA_EXPORT MediaFileChecker {
public:
- explicit MediaFileChecker(const base::PlatformFile& file);
+ explicit MediaFileChecker(base::File file);
~MediaFileChecker();
// After opening |file|, up to |check_time| amount of wall-clock time is spent
@@ -30,8 +29,7 @@ class MEDIA_EXPORT MediaFileChecker {
bool Start(base::TimeDelta check_time);
private:
- base::PlatformFile file_;
- base::ScopedPlatformFileCloser file_closer_;
+ base::File file_;
DISALLOW_COPY_AND_ASSIGN(MediaFileChecker);
};
diff --git a/chromium/media/base/media_file_checker_unittest.cc b/chromium/media/base/media_file_checker_unittest.cc
index ec61edf3e64..001fe04694c 100644
--- a/chromium/media/base/media_file_checker_unittest.cc
+++ b/chromium/media/base/media_file_checker_unittest.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "base/files/file.h"
#include "base/logging.h"
#include "build/build_config.h"
#include "media/base/media_file_checker.h"
@@ -11,20 +12,14 @@
namespace media {
static void RunMediaFileChecker(const std::string& filename, bool expectation) {
- base::PlatformFileError error;
- base::PlatformFile file = base::CreatePlatformFile(
- GetTestDataFilePath(filename),
- base::PLATFORM_FILE_OPEN | base::PLATFORM_FILE_READ,
- NULL,
- &error);
- ASSERT_EQ(base::PLATFORM_FILE_OK, error);
-
- MediaFileChecker checker(file);
+ base::File file(GetTestDataFilePath(filename),
+ base::File::FLAG_OPEN | base::File::FLAG_READ);
+ ASSERT_TRUE(file.IsValid());
+
+ MediaFileChecker checker(file.Pass());
const base::TimeDelta check_time = base::TimeDelta::FromMilliseconds(100);
bool result = checker.Start(check_time);
EXPECT_EQ(expectation, result);
-
- base::ClosePlatformFile(file);
}
TEST(MediaFileCheckerTest, InvalidFile) {
diff --git a/chromium/media/base/media_keys.h b/chromium/media/base/media_keys.h
index 0d86948564e..d581ae4e8bd 100644
--- a/chromium/media/base/media_keys.h
+++ b/chromium/media/base/media_keys.h
@@ -12,11 +12,18 @@
#include "base/callback.h"
#include "base/memory/scoped_ptr.h"
#include "media/base/media_export.h"
+#include "url/gurl.h"
namespace media {
class Decryptor;
+template <typename T>
+class CdmPromiseTemplate;
+
+typedef CdmPromiseTemplate<std::string> NewSessionCdmPromise;
+typedef CdmPromiseTemplate<void> SimpleCdmPromise;
+
// Performs media key operations.
//
// All key operations are called on the renderer thread. Therefore, these calls
@@ -26,6 +33,8 @@ class MEDIA_EXPORT MediaKeys {
// Reported to UMA, so never reuse a value!
// Must be kept in sync with blink::WebMediaPlayerClient::MediaKeyErrorCode
// (enforced in webmediaplayer_impl.cc).
+ // TODO(jrummell): Can this be moved to proxy_decryptor as it should only be
+ // used by the prefixed EME code?
enum KeyError {
kUnknownError = 1,
kClientError,
@@ -37,28 +46,55 @@ class MEDIA_EXPORT MediaKeys {
kMaxKeyError // Must be last and greater than any legit value.
};
+ // Must be a superset of cdm::MediaKeyException.
+ enum Exception {
+ NOT_SUPPORTED_ERROR,
+ INVALID_STATE_ERROR,
+ INVALID_ACCESS_ERROR,
+ QUOTA_EXCEEDED_ERROR,
+ UNKNOWN_ERROR,
+ CLIENT_ERROR,
+ OUTPUT_ERROR
+ };
+
+ // Type of license required when creating/loading a session.
+ // Must be consistent with the values specified in the spec:
+ // https://dvcs.w3.org/hg/html-media/raw-file/default/encrypted-media/encrypted-media.html#extensions
+ enum SessionType {
+ TEMPORARY_SESSION,
+ PERSISTENT_SESSION
+ };
+
const static uint32 kInvalidSessionId = 0;
MediaKeys();
virtual ~MediaKeys();
- // Generates a key request with the |type| and |init_data| provided.
- // Returns true if generating key request succeeded, false otherwise.
+ // Creates a session with the |init_data_type|, |init_data| and |session_type|
+ // provided.
// Note: UpdateSession() and ReleaseSession() should only be called after
- // CreateSession() returns true.
- // TODO(jrummell): Remove return value when prefixed API is removed.
- virtual bool CreateSession(uint32 session_id,
- const std::string& type,
+ // |promise| is resolved.
+ virtual void CreateSession(const std::string& init_data_type,
const uint8* init_data,
- int init_data_length) = 0;
+ int init_data_length,
+ SessionType session_type,
+ scoped_ptr<NewSessionCdmPromise> promise) = 0;
+
+ // Loads a session with the |web_session_id| provided.
+ // Note: UpdateSession() and ReleaseSession() should only be called after
+ // |promise| is resolved.
+ virtual void LoadSession(const std::string& web_session_id,
+ scoped_ptr<NewSessionCdmPromise> promise) = 0;
- // Updates a session specified by |session_id| with |response|.
- virtual void UpdateSession(uint32 session_id,
+ // Updates a session specified by |web_session_id| with |response|.
+ virtual void UpdateSession(const std::string& web_session_id,
const uint8* response,
- int response_length) = 0;
+ int response_length,
+ scoped_ptr<SimpleCdmPromise> promise) = 0;
- // Releases the session specified by |session_id|.
- virtual void ReleaseSession(uint32 session_id) = 0;
+ // Releases the session specified by |web_session_id|.
+ virtual void ReleaseSession(const std::string& web_session_id,
+ scoped_ptr<SimpleCdmPromise> promise) = 0;
// Gets the Decryptor object associated with the MediaKeys. Returns NULL if
// no Decryptor object is associated. The returned object is only guaranteed
@@ -71,22 +107,18 @@ class MEDIA_EXPORT MediaKeys {
// Key event callbacks. See the spec for details:
// https://dvcs.w3.org/hg/html-media/raw-file/default/encrypted-media/encrypted-media.html#event-summary
-typedef base::Callback<
- void(uint32 session_id, const std::string& web_session_id)>
- SessionCreatedCB;
-
-typedef base::Callback<void(uint32 session_id,
+typedef base::Callback<void(const std::string& web_session_id,
const std::vector<uint8>& message,
- const std::string& destination_url)>
- SessionMessageCB;
+ const GURL& destination_url)> SessionMessageCB;
-typedef base::Callback<void(uint32 session_id)> SessionReadyCB;
+typedef base::Callback<void(const std::string& web_session_id)> SessionReadyCB;
-typedef base::Callback<void(uint32 session_id)> SessionClosedCB;
+typedef base::Callback<void(const std::string& web_session_id)> SessionClosedCB;
-typedef base::Callback<void(uint32 session_id,
- media::MediaKeys::KeyError error_code,
- int system_code)> SessionErrorCB;
+typedef base::Callback<void(const std::string& web_session_id,
+ MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_message)> SessionErrorCB;
} // namespace media
diff --git a/chromium/media/base/media_log.cc b/chromium/media/base/media_log.cc
index e791b441f4f..5e1ed767fb1 100644
--- a/chromium/media/base/media_log.cc
+++ b/chromium/media/base/media_log.cc
@@ -52,8 +52,6 @@ const char* MediaLog::EventTypeToString(MediaLogEvent::Type type) {
return "VIDEO_ENDED";
case MediaLogEvent::TEXT_ENDED:
return "TEXT_ENDED";
- case MediaLogEvent::AUDIO_RENDERER_DISABLED:
- return "AUDIO_RENDERER_DISABLED";
case MediaLogEvent::BUFFERED_EXTENTS_CHANGED:
return "BUFFERED_EXTENTS_CHANGED";
case MediaLogEvent::MEDIA_SOURCE_ERROR:
@@ -97,8 +95,6 @@ const char* MediaLog::PipelineStatusToString(PipelineStatus status) {
return "demuxer: no supported streams";
case DECODER_ERROR_NOT_SUPPORTED:
return "decoder: not supported";
- case PIPELINE_STATUS_MAX:
- NOTREACHED();
}
NOTREACHED();
return NULL;
@@ -143,7 +139,10 @@ scoped_ptr<MediaLogEvent> MediaLog::CreateStringEvent(
scoped_ptr<MediaLogEvent> MediaLog::CreateTimeEvent(
MediaLogEvent::Type type, const char* property, base::TimeDelta value) {
scoped_ptr<MediaLogEvent> event(CreateEvent(type));
- event->params.SetDouble(property, value.InSecondsF());
+ if (value.is_max())
+ event->params.SetString(property, "unknown");
+ else
+ event->params.SetDouble(property, value.InSecondsF());
return event.Pass();
}
@@ -230,4 +229,14 @@ void MediaLog::SetBooleanProperty(
AddEvent(event.Pass());
}
+void MediaLog::SetTimeProperty(
+ const char* key, base::TimeDelta value) {
+ scoped_ptr<MediaLogEvent> event(CreateEvent(MediaLogEvent::PROPERTY_CHANGE));
+ if (value.is_max())
+ event->params.SetString(key, "unknown");
+ else
+ event->params.SetDouble(key, value.InSecondsF());
+ AddEvent(event.Pass());
+}
+
} //namespace media
diff --git a/chromium/media/base/media_log.h b/chromium/media/base/media_log.h
index 1d25c0973a5..f342ee84fc4 100644
--- a/chromium/media/base/media_log.h
+++ b/chromium/media/base/media_log.h
@@ -73,6 +73,7 @@ class MEDIA_EXPORT MediaLog : public base::RefCountedThreadSafe<MediaLog> {
void SetIntegerProperty(const char* key, int value);
void SetDoubleProperty(const char* key, double value);
void SetBooleanProperty(const char* key, bool value);
+ void SetTimeProperty(const char* key, base::TimeDelta value);
protected:
friend class base::RefCountedThreadSafe<MediaLog>;
diff --git a/chromium/media/base/media_log_event.h b/chromium/media/base/media_log_event.h
index 3052d415c12..fe3da9781f3 100644
--- a/chromium/media/base/media_log_event.h
+++ b/chromium/media/base/media_log_event.h
@@ -75,10 +75,6 @@ struct MediaLogEvent {
VIDEO_ENDED,
TEXT_ENDED,
- // The audio renderer has been disabled.
- // params: none.
- AUDIO_RENDERER_DISABLED,
-
// The extents of the sliding buffer have changed.
// params: "buffer_start": <first buffered byte>.
// "buffer_current": <current offset>.
diff --git a/chromium/media/base/media_switches.cc b/chromium/media/base/media_switches.cc
index 3a8fb33bde9..1bb5e980e25 100644
--- a/chromium/media/base/media_switches.cc
+++ b/chromium/media/base/media_switches.cc
@@ -9,20 +9,13 @@ namespace switches {
// Allow users to specify a custom buffer size for debugging purpose.
const char kAudioBufferSize[] = "audio-buffer-size";
-// Enable EAC3 playback in MSE.
-const char kEnableEac3Playback[] = "enable-eac3-playback";
-
-// Disables Opus playback in media elements.
-const char kDisableOpusPlayback[] = "disable-opus-playback";
-
-// Disables VP8 Alpha playback in media elements.
-const char kDisableVp8AlphaPlayback[] = "disable-vp8-alpha-playback";
-
// Set number of threads to use for video decoding.
const char kVideoThreads[] = "video-threads";
-// Enables MP3 stream parser for Media Source Extensions.
-const char kEnableMP3StreamParser[] = "enable-mp3-stream-parser";
+// Bypass autodetection of the upper limit on resolution of streams that can
+// be hardware decoded.
+const char kIgnoreResolutionLimitsForAcceleratedVideoDecode[] =
+ "ignore-resolution-limits-for-accelerated-video-decode";
#if defined(OS_ANDROID)
// Disables the infobar popup for accessing protected media identifier.
@@ -34,15 +27,6 @@ const char kDisableInfobarForProtectedMediaIdentifier[] =
const char kMediaDrmEnableNonCompositing[] = "mediadrm-enable-non-compositing";
#endif
-#if defined(GOOGLE_TV)
-// Use external video surface for video with more than or equal pixels to
-// specified value. For example, value of 0 will enable external video surface
-// for all videos, and value of 921600 (=1280*720) will enable external video
-// surface for 720p video and larger.
-const char kUseExternalVideoSurfaceThresholdInPixels[] =
- "use-external-video-surface-threshold-in-pixels";
-#endif
-
#if defined(OS_LINUX) || defined(OS_FREEBSD) || defined(OS_SOLARIS)
// The Alsa device to use when opening an audio input stream.
const char kAlsaInputDevice[] = "alsa-input-device";
@@ -51,18 +35,19 @@ const char kAlsaOutputDevice[] = "alsa-output-device";
#endif
#if defined(OS_MACOSX)
-// Unlike other platforms, OSX requires CoreAudio calls to happen on the main
-// thread of the process. Provide a way to disable this until support is well
-// tested. See http://crbug.com/158170.
-// TODO(dalecurtis): Remove this once we're sure nothing has exploded.
-const char kDisableMainThreadAudio[] = "disable-main-thread-audio";
// AVFoundation is available in versions 10.7 and onwards, and is to be used
// http://crbug.com/288562 for both audio and video device monitoring and for
// video capture. Being a dynamically loaded NSBundle and library, it hits the
// Chrome startup time (http://crbug.com/311325 and http://crbug.com/311437);
-// until development is finished and the library load time issue is solved, the
-// usage of this library is hidden behind this flag.
+// for experimentation purposes, in particular library load time issue, the
+// usage of this library can be enabled by using this flag.
const char kEnableAVFoundation[] = "enable-avfoundation";
+
+// QTKit is the media capture API predecessor to AVFoundation, available up and
+// until Mac OS X 10.9 (despite being deprecated in this last one). This flag
+// is used for troubleshooting and testing, and forces QTKit in builds and
+// configurations where AVFoundation would be used otherwise.
+const char kForceQTKit[] = "force-qtkit";
#endif
#if defined(OS_WIN)
@@ -79,6 +64,11 @@ const char kEnableExclusiveAudio[] = "enable-exclusive-audio";
// See bug: http://crbug.com/268412
const char kForceDirectShowVideoCapture[] = "force-directshow";
+// Force the use of MediaFoundation for video capture. This is only supported in
+// Windows 7 and above. Used, like |kForceDirectShowVideoCapture|, to
+// troubleshoot problems in Windows platforms.
+const char kForceMediaFoundationVideoCapture[] = "force-mediafoundation";
+
// Use Windows WaveOut/In audio API even if Core Audio is supported.
const char kForceWaveAudio[] = "force-wave-audio";
@@ -98,8 +88,8 @@ const char kWaveOutBuffers[] = "waveout-buffers";
const char kUseCras[] = "use-cras";
#endif
-// Disables system sounds manager.
-const char kDisableSystemSoundsManager[] = "disable-system-sounds-manager";
+// Use fake device for Media Stream to replace actual camera and microphone.
+const char kUseFakeDeviceForMediaStream[] = "use-fake-device-for-media-stream";
// Use a raw video file as fake video capture device.
const char kUseFileForFakeVideoCapture[] = "use-file-for-fake-video-capture";
diff --git a/chromium/media/base/media_switches.h b/chromium/media/base/media_switches.h
index 0c7fa245c84..df621a08008 100644
--- a/chromium/media/base/media_switches.h
+++ b/chromium/media/base/media_switches.h
@@ -14,38 +14,30 @@ namespace switches {
MEDIA_EXPORT extern const char kAudioBufferSize[];
-MEDIA_EXPORT extern const char kEnableEac3Playback[];
-
-MEDIA_EXPORT extern const char kDisableOpusPlayback[];
-
-MEDIA_EXPORT extern const char kDisableVp8AlphaPlayback[];
-
MEDIA_EXPORT extern const char kVideoThreads[];
-MEDIA_EXPORT extern const char kEnableMP3StreamParser[];
+MEDIA_EXPORT extern const char
+ kIgnoreResolutionLimitsForAcceleratedVideoDecode[];
#if defined(OS_ANDROID)
MEDIA_EXPORT extern const char kDisableInfobarForProtectedMediaIdentifier[];
MEDIA_EXPORT extern const char kMediaDrmEnableNonCompositing[];
#endif
-#if defined(GOOGLE_TV)
-MEDIA_EXPORT extern const char kUseExternalVideoSurfaceThresholdInPixels[];
-#endif
-
#if defined(OS_LINUX) || defined(OS_FREEBSD) || defined(OS_SOLARIS)
MEDIA_EXPORT extern const char kAlsaInputDevice[];
MEDIA_EXPORT extern const char kAlsaOutputDevice[];
#endif
#if defined(OS_MACOSX)
-MEDIA_EXPORT extern const char kDisableMainThreadAudio[];
MEDIA_EXPORT extern const char kEnableAVFoundation[];
+MEDIA_EXPORT extern const char kForceQTKit[];
#endif
#if defined(OS_WIN)
MEDIA_EXPORT extern const char kEnableExclusiveAudio[];
MEDIA_EXPORT extern const char kForceDirectShowVideoCapture[];
+MEDIA_EXPORT extern const char kForceMediaFoundationVideoCapture[];
MEDIA_EXPORT extern const char kForceWaveAudio[];
MEDIA_EXPORT extern const char kTrySupportedChannelLayouts[];
MEDIA_EXPORT extern const char kWaveOutBuffers[];
@@ -55,8 +47,7 @@ MEDIA_EXPORT extern const char kWaveOutBuffers[];
MEDIA_EXPORT extern const char kUseCras[];
#endif
-MEDIA_EXPORT extern const char kDisableSystemSoundsManager[];
-
+MEDIA_EXPORT extern const char kUseFakeDeviceForMediaStream[];
MEDIA_EXPORT extern const char kUseFileForFakeVideoCapture[];
} // namespace switches
diff --git a/chromium/media/base/mock_data_source_host.cc b/chromium/media/base/mock_data_source_host.cc
deleted file mode 100644
index eff0b78f163..00000000000
--- a/chromium/media/base/mock_data_source_host.cc
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/mock_data_source_host.h"
-
-namespace media {
-
-MockDataSourceHost::MockDataSourceHost() {}
-
-MockDataSourceHost::~MockDataSourceHost() {}
-
-} // namespace media
diff --git a/chromium/media/base/mock_data_source_host.h b/chromium/media/base/mock_data_source_host.h
deleted file mode 100644
index 914d0556136..00000000000
--- a/chromium/media/base/mock_data_source_host.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-#ifndef MEDIA_BASE_MOCK_DATA_SOURCE_HOST_H_
-#define MEDIA_BASE_MOCK_DATA_SOURCE_HOST_H_
-
-#include <string>
-
-#include "media/base/data_source.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-
-class MockDataSourceHost : public DataSourceHost {
- public:
- MockDataSourceHost();
- virtual ~MockDataSourceHost();
-
- // DataSourceHost implementation.
- MOCK_METHOD1(SetTotalBytes, void(int64 total_bytes));
- MOCK_METHOD2(AddBufferedByteRange, void(int64 start, int64 end));
- MOCK_METHOD2(AddBufferedTimeRange, void(base::TimeDelta start,
- base::TimeDelta end));
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockDataSourceHost);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_MOCK_DATA_SOURCE_HOST_H_
diff --git a/chromium/media/base/mock_demuxer_host.h b/chromium/media/base/mock_demuxer_host.h
index 61761a84b95..f9e8e4398ad 100644
--- a/chromium/media/base/mock_demuxer_host.h
+++ b/chromium/media/base/mock_demuxer_host.h
@@ -16,15 +16,10 @@ class MockDemuxerHost : public DemuxerHost {
MockDemuxerHost();
virtual ~MockDemuxerHost();
- // DataSourceHost implementation.
- MOCK_METHOD1(SetTotalBytes, void(int64 total_bytes));
- MOCK_METHOD2(AddBufferedByteRange, void(int64 start, int64 end));
MOCK_METHOD2(AddBufferedTimeRange, void(base::TimeDelta start,
base::TimeDelta end));
-
- // DemuxerHost implementation.
- MOCK_METHOD1(OnDemuxerError, void(PipelineStatus error));
MOCK_METHOD1(SetDuration, void(base::TimeDelta duration));
+ MOCK_METHOD1(OnDemuxerError, void(PipelineStatus error));
MOCK_METHOD2(AddTextStream, void(DemuxerStream*,
const TextTrackConfig&));
MOCK_METHOD1(RemoveTextStream, void(DemuxerStream*));
diff --git a/chromium/media/base/mock_filters.h b/chromium/media/base/mock_filters.h
index c71590da1d1..e592d52e8b5 100644
--- a/chromium/media/base/mock_filters.h
+++ b/chromium/media/base/mock_filters.h
@@ -39,6 +39,8 @@ class MockDemuxer : public Demuxer {
MOCK_METHOD0(OnAudioRendererDisabled, void());
MOCK_METHOD1(GetStream, DemuxerStream*(DemuxerStream::Type));
MOCK_CONST_METHOD0(GetStartTime, base::TimeDelta());
+ MOCK_CONST_METHOD0(GetTimelineOffset, base::Time());
+ MOCK_CONST_METHOD0(GetLiveness, Liveness());
private:
DISALLOW_COPY_AND_ASSIGN(MockDemuxer);
@@ -55,6 +57,7 @@ class MockDemuxerStream : public DemuxerStream {
virtual AudioDecoderConfig audio_decoder_config() OVERRIDE;
virtual VideoDecoderConfig video_decoder_config() OVERRIDE;
MOCK_METHOD0(EnableBitstreamConverter, void());
+ MOCK_METHOD0(SupportsConfigChanges, bool());
void set_audio_decoder_config(const AudioDecoderConfig& config);
void set_video_decoder_config(const VideoDecoderConfig& config);
@@ -73,12 +76,14 @@ class MockVideoDecoder : public VideoDecoder {
virtual ~MockVideoDecoder();
// VideoDecoder implementation.
- MOCK_METHOD2(Initialize, void(const VideoDecoderConfig& config,
- const PipelineStatusCB&));
+ MOCK_METHOD4(Initialize, void(const VideoDecoderConfig& config,
+ bool low_delay,
+ const PipelineStatusCB& status_cb,
+ const OutputCB& output_cb));
MOCK_METHOD2(Decode, void(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB&));
MOCK_METHOD1(Reset, void(const base::Closure&));
- MOCK_METHOD1(Stop, void(const base::Closure&));
+ MOCK_METHOD0(Stop, void());
MOCK_CONST_METHOD0(HasAlpha, bool());
private:
@@ -91,14 +96,15 @@ class MockAudioDecoder : public AudioDecoder {
virtual ~MockAudioDecoder();
// AudioDecoder implementation.
- MOCK_METHOD3(Initialize, void(DemuxerStream*,
- const PipelineStatusCB&,
- const StatisticsCB&));
- MOCK_METHOD1(Read, void(const ReadCB&));
- MOCK_METHOD0(bits_per_channel, int(void));
- MOCK_METHOD0(channel_layout, ChannelLayout(void));
- MOCK_METHOD0(samples_per_second, int(void));
+ MOCK_METHOD3(Initialize,
+ void(const AudioDecoderConfig& config,
+ const PipelineStatusCB& status_cb,
+ const OutputCB& output_cb));
+ MOCK_METHOD2(Decode,
+ void(const scoped_refptr<DecoderBuffer>& buffer,
+ const DecodeCB&));
MOCK_METHOD1(Reset, void(const base::Closure&));
+ MOCK_METHOD0(Stop, void());
private:
DISALLOW_COPY_AND_ASSIGN(MockAudioDecoder);
@@ -111,16 +117,15 @@ class MockVideoRenderer : public VideoRenderer {
// VideoRenderer implementation.
MOCK_METHOD9(Initialize, void(DemuxerStream* stream,
+ bool low_delay,
const PipelineStatusCB& init_cb,
const StatisticsCB& statistics_cb,
const TimeCB& time_cb,
- const NaturalSizeChangedCB& size_changed_cb,
const base::Closure& ended_cb,
const PipelineStatusCB& error_cb,
const TimeDeltaCB& get_time_cb,
const TimeDeltaCB& get_duration_cb));
MOCK_METHOD1(Play, void(const base::Closure& callback));
- MOCK_METHOD1(Pause, void(const base::Closure& callback));
MOCK_METHOD1(Flush, void(const base::Closure& callback));
MOCK_METHOD2(Preroll, void(base::TimeDelta time, const PipelineStatusCB& cb));
MOCK_METHOD1(Stop, void(const base::Closure& callback));
@@ -136,16 +141,15 @@ class MockAudioRenderer : public AudioRenderer {
virtual ~MockAudioRenderer();
// AudioRenderer implementation.
- MOCK_METHOD8(Initialize, void(DemuxerStream* stream,
+ MOCK_METHOD7(Initialize, void(DemuxerStream* stream,
const PipelineStatusCB& init_cb,
const StatisticsCB& statistics_cb,
const base::Closure& underflow_cb,
const TimeCB& time_cb,
const base::Closure& ended_cb,
- const base::Closure& disabled_cb,
const PipelineStatusCB& error_cb));
- MOCK_METHOD1(Play, void(const base::Closure& callback));
- MOCK_METHOD1(Pause, void(const base::Closure& callback));
+ MOCK_METHOD0(StartRendering, void());
+ MOCK_METHOD0(StopRendering, void());
MOCK_METHOD1(Flush, void(const base::Closure& callback));
MOCK_METHOD1(Stop, void(const base::Closure& callback));
MOCK_METHOD1(SetPlaybackRate, void(float playback_rate));
diff --git a/chromium/media/base/multi_channel_resampler.cc b/chromium/media/base/multi_channel_resampler.cc
index 801e5344cf2..b6bde7bbfab 100644
--- a/chromium/media/base/multi_channel_resampler.cc
+++ b/chromium/media/base/multi_channel_resampler.cc
@@ -108,4 +108,9 @@ void MultiChannelResampler::SetRatio(double io_sample_rate_ratio) {
resamplers_[i]->SetRatio(io_sample_rate_ratio);
}
+int MultiChannelResampler::ChunkSize() const {
+ DCHECK(!resamplers_.empty());
+ return resamplers_[0]->ChunkSize();
+}
+
} // namespace media
diff --git a/chromium/media/base/multi_channel_resampler.h b/chromium/media/base/multi_channel_resampler.h
index 148cb9da78a..ee3222a9175 100644
--- a/chromium/media/base/multi_channel_resampler.h
+++ b/chromium/media/base/multi_channel_resampler.h
@@ -47,6 +47,10 @@ class MEDIA_EXPORT MultiChannelResampler {
// Resample() is in progress.
void SetRatio(double io_sample_rate_ratio);
+ // The maximum size in frames that guarantees Resample() will only make a
+ // single call to |read_cb_| for more data.
+ int ChunkSize() const;
+
private:
// SincResampler::ReadCB implementation. ProvideInput() will be called for
// each channel (in channel order) as SincResampler needs more data.
diff --git a/chromium/media/base/pipeline.cc b/chromium/media/base/pipeline.cc
index 5799dc3f410..46be2b86e68 100644
--- a/chromium/media/base/pipeline.cc
+++ b/chromium/media/base/pipeline.cc
@@ -10,8 +10,9 @@
#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/compiler_specific.h"
-#include "base/message_loop/message_loop.h"
+#include "base/location.h"
#include "base/metrics/histogram.h"
+#include "base/single_thread_task_runner.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
@@ -31,26 +32,24 @@ using base::TimeDelta;
namespace media {
-Pipeline::Pipeline(const scoped_refptr<base::MessageLoopProxy>& message_loop,
- MediaLog* media_log)
- : message_loop_(message_loop),
+Pipeline::Pipeline(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ MediaLog* media_log)
+ : task_runner_(task_runner),
media_log_(media_log),
running_(false),
did_loading_progress_(false),
- total_bytes_(0),
- natural_size_(0, 0),
volume_(1.0f),
playback_rate_(0.0f),
clock_(new Clock(&default_tick_clock_)),
- waiting_for_clock_update_(false),
+ clock_state_(CLOCK_PAUSED),
status_(PIPELINE_OK),
- has_audio_(false),
- has_video_(false),
state_(kCreated),
audio_ended_(false),
video_ended_(false),
text_ended_(false),
- audio_disabled_(false),
+ audio_buffering_state_(BUFFERING_HAVE_NOTHING),
+ video_buffering_state_(BUFFERING_HAVE_NOTHING),
demuxer_(NULL),
creation_time_(default_tick_clock_.NowTicks()) {
media_log_->AddEvent(media_log_->CreatePipelineStateChangedEvent(kCreated));
@@ -73,21 +72,34 @@ void Pipeline::Start(scoped_ptr<FilterCollection> collection,
const base::Closure& ended_cb,
const PipelineStatusCB& error_cb,
const PipelineStatusCB& seek_cb,
- const BufferingStateCB& buffering_state_cb,
+ const PipelineMetadataCB& metadata_cb,
+ const base::Closure& preroll_completed_cb,
const base::Closure& duration_change_cb) {
+ DCHECK(!ended_cb.is_null());
+ DCHECK(!error_cb.is_null());
+ DCHECK(!seek_cb.is_null());
+ DCHECK(!metadata_cb.is_null());
+ DCHECK(!preroll_completed_cb.is_null());
+
base::AutoLock auto_lock(lock_);
CHECK(!running_) << "Media pipeline is already running";
- DCHECK(!buffering_state_cb.is_null());
-
running_ = true;
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &Pipeline::StartTask, base::Unretained(this), base::Passed(&collection),
- ended_cb, error_cb, seek_cb, buffering_state_cb, duration_change_cb));
+
+ filter_collection_ = collection.Pass();
+ ended_cb_ = ended_cb;
+ error_cb_ = error_cb;
+ seek_cb_ = seek_cb;
+ metadata_cb_ = metadata_cb;
+ preroll_completed_cb_ = preroll_completed_cb;
+ duration_change_cb_ = duration_change_cb;
+
+ task_runner_->PostTask(
+ FROM_HERE, base::Bind(&Pipeline::StartTask, base::Unretained(this)));
}
void Pipeline::Stop(const base::Closure& stop_cb) {
base::AutoLock auto_lock(lock_);
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&Pipeline::StopTask, base::Unretained(this), stop_cb));
}
@@ -98,7 +110,7 @@ void Pipeline::Seek(TimeDelta time, const PipelineStatusCB& seek_cb) {
return;
}
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&Pipeline::SeekTask, base::Unretained(this), time, seek_cb));
}
@@ -107,16 +119,6 @@ bool Pipeline::IsRunning() const {
return running_;
}
-bool Pipeline::HasAudio() const {
- base::AutoLock auto_lock(lock_);
- return has_audio_;
-}
-
-bool Pipeline::HasVideo() const {
- base::AutoLock auto_lock(lock_);
- return has_video_;
-}
-
float Pipeline::GetPlaybackRate() const {
base::AutoLock auto_lock(lock_);
return playback_rate_;
@@ -129,7 +131,7 @@ void Pipeline::SetPlaybackRate(float playback_rate) {
base::AutoLock auto_lock(lock_);
playback_rate_ = playback_rate;
if (running_) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&Pipeline::PlaybackRateChangedTask, base::Unretained(this),
playback_rate));
}
@@ -147,7 +149,7 @@ void Pipeline::SetVolume(float volume) {
base::AutoLock auto_lock(lock_);
volume_ = volume;
if (running_) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&Pipeline::VolumeChangedTask, base::Unretained(this), volume));
}
}
@@ -157,24 +159,9 @@ TimeDelta Pipeline::GetMediaTime() const {
return clock_->Elapsed();
}
-Ranges<TimeDelta> Pipeline::GetBufferedTimeRanges() {
+Ranges<TimeDelta> Pipeline::GetBufferedTimeRanges() const {
base::AutoLock auto_lock(lock_);
- Ranges<TimeDelta> time_ranges;
- for (size_t i = 0; i < buffered_time_ranges_.size(); ++i) {
- time_ranges.Add(buffered_time_ranges_.start(i),
- buffered_time_ranges_.end(i));
- }
- if (clock_->Duration() == TimeDelta() || total_bytes_ == 0)
- return time_ranges;
- for (size_t i = 0; i < buffered_byte_ranges_.size(); ++i) {
- TimeDelta start = TimeForByteOffset_Locked(buffered_byte_ranges_.start(i));
- TimeDelta end = TimeForByteOffset_Locked(buffered_byte_ranges_.end(i));
- // Cap approximated buffered time at the length of the video.
- end = std::min(end, clock_->Duration());
- time_ranges.Add(start, end);
- }
-
- return time_ranges;
+ return buffered_time_ranges_;
}
TimeDelta Pipeline::GetMediaDuration() const {
@@ -182,18 +169,7 @@ TimeDelta Pipeline::GetMediaDuration() const {
return clock_->Duration();
}
-int64 Pipeline::GetTotalBytes() const {
- base::AutoLock auto_lock(lock_);
- return total_bytes_;
-}
-
-void Pipeline::GetNaturalVideoSize(gfx::Size* out_size) const {
- CHECK(out_size);
- base::AutoLock auto_lock(lock_);
- *out_size = natural_size_;
-}
-
-bool Pipeline::DidLoadingProgress() const {
+bool Pipeline::DidLoadingProgress() {
base::AutoLock auto_lock(lock_);
bool ret = did_loading_progress_;
did_loading_progress_ = false;
@@ -214,14 +190,14 @@ void Pipeline::SetErrorForTesting(PipelineStatus status) {
}
void Pipeline::SetState(State next_state) {
- if (state_ != kStarted && next_state == kStarted &&
+ if (state_ != kPlaying && next_state == kPlaying &&
!creation_time_.is_null()) {
UMA_HISTOGRAM_TIMES("Media.TimeToPipelineStarted",
default_tick_clock_.NowTicks() - creation_time_);
creation_time_ = base::TimeTicks();
}
- DVLOG(2) << GetStateString(state_) << " -> " << GetStateString(next_state);
+ DVLOG(1) << GetStateString(state_) << " -> " << GetStateString(next_state);
state_ = next_state;
media_log_->AddEvent(media_log_->CreatePipelineStateChangedEvent(next_state));
@@ -237,8 +213,7 @@ const char* Pipeline::GetStateString(State state) {
RETURN_STRING(kInitVideoRenderer);
RETURN_STRING(kInitPrerolling);
RETURN_STRING(kSeeking);
- RETURN_STRING(kStarting);
- RETURN_STRING(kStarted);
+ RETURN_STRING(kPlaying);
RETURN_STRING(kStopping);
RETURN_STRING(kStopped);
}
@@ -249,7 +224,7 @@ const char* Pipeline::GetStateString(State state) {
#undef RETURN_STRING
Pipeline::State Pipeline::GetNextState() const {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(stop_cb_.is_null())
<< "State transitions don't happen when stopping";
DCHECK_EQ(status_, PIPELINE_OK)
@@ -275,15 +250,12 @@ Pipeline::State Pipeline::GetNextState() const {
return kInitPrerolling;
case kInitPrerolling:
- return kStarting;
+ return kPlaying;
case kSeeking:
- return kStarting;
-
- case kStarting:
- return kStarted;
+ return kPlaying;
- case kStarted:
+ case kPlaying:
case kStopping:
case kStopped:
break;
@@ -298,13 +270,13 @@ void Pipeline::OnDemuxerError(PipelineStatus error) {
void Pipeline::AddTextStream(DemuxerStream* text_stream,
const TextTrackConfig& config) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&Pipeline::AddTextStreamTask, base::Unretained(this),
text_stream, config));
}
void Pipeline::RemoveTextStream(DemuxerStream* text_stream) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&Pipeline::RemoveTextStreamTask, base::Unretained(this),
text_stream));
}
@@ -314,32 +286,22 @@ void Pipeline::SetError(PipelineStatus error) {
DCHECK_NE(PIPELINE_OK, error);
VLOG(1) << "Media pipeline error: " << error;
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&Pipeline::ErrorChangedTask, base::Unretained(this), error));
media_log_->AddEvent(media_log_->CreatePipelineErrorEvent(error));
}
-void Pipeline::OnAudioDisabled() {
- DCHECK(IsRunning());
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &Pipeline::AudioDisabledTask, base::Unretained(this)));
- media_log_->AddEvent(
- media_log_->CreateEvent(MediaLogEvent::AUDIO_RENDERER_DISABLED));
-}
-
void Pipeline::OnAudioTimeUpdate(TimeDelta time, TimeDelta max_time) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_LE(time.InMicroseconds(), max_time.InMicroseconds());
- DCHECK(IsRunning());
base::AutoLock auto_lock(lock_);
- if (!has_audio_)
- return;
- if (waiting_for_clock_update_ && time < clock_->Elapsed())
+ if (clock_state_ == CLOCK_WAITING_FOR_AUDIO_TIME_UPDATE &&
+ time < clock_->Elapsed()) {
return;
+ }
- // TODO(scherkus): |state_| should only be accessed on pipeline thread, see
- // http://crbug.com/137973
if (state_ == kSeeking)
return;
@@ -348,18 +310,16 @@ void Pipeline::OnAudioTimeUpdate(TimeDelta time, TimeDelta max_time) {
}
void Pipeline::OnVideoTimeUpdate(TimeDelta max_time) {
- DCHECK(IsRunning());
- base::AutoLock auto_lock(lock_);
+ DCHECK(task_runner_->BelongsToCurrentThread());
- if (has_audio_)
+ if (audio_renderer_)
return;
- // TODO(scherkus): |state_| should only be accessed on pipeline thread, see
- // http://crbug.com/137973
if (state_ == kSeeking)
return;
- DCHECK(!waiting_for_clock_update_);
+ base::AutoLock auto_lock(lock_);
+ DCHECK_NE(clock_state_, CLOCK_WAITING_FOR_AUDIO_TIME_UPDATE);
clock_->SetMaxTime(max_time);
}
@@ -376,47 +336,14 @@ void Pipeline::SetDuration(TimeDelta duration) {
duration_change_cb_.Run();
}
-void Pipeline::SetTotalBytes(int64 total_bytes) {
- DCHECK(IsRunning());
- media_log_->AddEvent(
- media_log_->CreateStringEvent(
- MediaLogEvent::TOTAL_BYTES_SET, "total_bytes",
- base::Int64ToString(total_bytes)));
- int64 total_mbytes = total_bytes >> 20;
- if (total_mbytes > kint32max)
- total_mbytes = kint32max;
- UMA_HISTOGRAM_CUSTOM_COUNTS(
- "Media.TotalMBytes", static_cast<int32>(total_mbytes), 1, kint32max, 50);
-
- base::AutoLock auto_lock(lock_);
- total_bytes_ = total_bytes;
-}
-
-TimeDelta Pipeline::TimeForByteOffset_Locked(int64 byte_offset) const {
- lock_.AssertAcquired();
- // Use floating point to avoid potential overflow when using 64 bit integers.
- double time_offset_in_ms = clock_->Duration().InMilliseconds() *
- (static_cast<double>(byte_offset) / total_bytes_);
- TimeDelta time_offset(TimeDelta::FromMilliseconds(
- static_cast<int64>(time_offset_in_ms)));
- // Since the byte->time calculation is approximate, fudge the beginning &
- // ending areas to look better.
- TimeDelta epsilon = clock_->Duration() / 100;
- if (time_offset < epsilon)
- return TimeDelta();
- if (time_offset + epsilon > clock_->Duration())
- return clock_->Duration();
- return time_offset;
-}
-
void Pipeline::OnStateTransition(PipelineStatus status) {
// Force post to process state transitions after current execution frame.
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&Pipeline::StateTransitionTask, base::Unretained(this), status));
}
void Pipeline::StateTransitionTask(PipelineStatus status) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
// No-op any state transitions if we're stopping.
if (state_ == kStopping || state_ == kStopped)
@@ -433,11 +360,9 @@ void Pipeline::StateTransitionTask(PipelineStatus status) {
// Guard against accidentally clearing |pending_callbacks_| for states that
// use it as well as states that should not be using it.
- //
- // TODO(scherkus): Make every state transition use |pending_callbacks_|.
DCHECK_EQ(pending_callbacks_.get() != NULL,
- (state_ == kInitPrerolling || state_ == kStarting ||
- state_ == kSeeking));
+ (state_ == kInitPrerolling || state_ == kSeeking));
+
pending_callbacks_.reset();
PipelineStatusCB done_cb = base::Bind(
@@ -462,46 +387,43 @@ void Pipeline::StateTransitionTask(PipelineStatus status) {
// We do not want to start the clock running. We only want to set the
// base media time so our timestamp calculations will be correct.
clock_->SetTime(demuxer_->GetStartTime(), demuxer_->GetStartTime());
-
- // TODO(scherkus): |has_audio_| should be true no matter what --
- // otherwise people with muted/disabled sound cards will make our
- // default controls look as if every video doesn't contain an audio
- // track.
- has_audio_ = audio_renderer_ != NULL && !audio_disabled_;
- has_video_ = video_renderer_ != NULL;
}
if (!audio_renderer_ && !video_renderer_) {
done_cb.Run(PIPELINE_ERROR_COULD_NOT_RENDER);
return;
}
- buffering_state_cb_.Run(kHaveMetadata);
-
- return DoInitialPreroll(done_cb);
-
- case kStarting:
- return DoPlay(done_cb);
-
- case kStarted:
{
- base::AutoLock l(lock_);
- // We use audio stream to update the clock. So if there is such a
- // stream, we pause the clock until we receive a valid timestamp.
- waiting_for_clock_update_ = true;
- if (!has_audio_) {
- clock_->SetMaxTime(clock_->Duration());
- StartClockIfWaitingForTimeUpdate_Locked();
- }
+ PipelineMetadata metadata;
+ metadata.has_audio = audio_renderer_;
+ metadata.has_video = video_renderer_;
+ metadata.timeline_offset = demuxer_->GetTimelineOffset();
+ DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
+ if (stream)
+ metadata.natural_size = stream->video_decoder_config().natural_size();
+ metadata_cb_.Run(metadata);
}
- DCHECK(!seek_cb_.is_null());
- DCHECK_EQ(status_, PIPELINE_OK);
+ return DoInitialPreroll(done_cb);
- // Fire canplaythrough immediately after playback begins because of
- // crbug.com/106480.
- // TODO(vrk): set ready state to HaveFutureData when bug above is fixed.
- buffering_state_cb_.Run(kPrerollCompleted);
- return base::ResetAndReturn(&seek_cb_).Run(PIPELINE_OK);
+ case kPlaying:
+ PlaybackRateChangedTask(GetPlaybackRate());
+ VolumeChangedTask(GetVolume());
+
+ // We enter this state from either kInitPrerolling or kSeeking. As of now
+ // both those states call Preroll(), which means by time we enter this
+ // state we've already buffered enough data. Forcefully update the
+ // buffering state, which start the clock and renderers and transition
+ // into kPlaying state.
+ //
+ // TODO(scherkus): Remove after renderers are taught to fire buffering
+ // state callbacks http://crbug.com/144683
+ DCHECK(WaitingForEnoughData());
+ if (audio_renderer_)
+ BufferingStateChanged(&audio_buffering_state_, BUFFERING_HAVE_ENOUGH);
+ if (video_renderer_)
+ BufferingStateChanged(&video_buffering_state_, BUFFERING_HAVE_ENOUGH);
+ return;
case kStopping:
case kStopped:
@@ -519,7 +441,7 @@ void Pipeline::StateTransitionTask(PipelineStatus status) {
// That being said, deleting the renderers while keeping |pending_callbacks_|
// running on the media thread would result in crashes.
void Pipeline::DoInitialPreroll(const PipelineStatusCB& done_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(!pending_callbacks_.get());
SerialRunner::Queue bound_fns;
@@ -536,6 +458,16 @@ void Pipeline::DoInitialPreroll(const PipelineStatusCB& done_cb) {
bound_fns.Push(base::Bind(
&VideoRenderer::Preroll, base::Unretained(video_renderer_.get()),
seek_timestamp));
+
+ // TODO(scherkus): Remove after VideoRenderer is taught to fire buffering
+ // state callbacks http://crbug.com/144683
+ bound_fns.Push(base::Bind(&VideoRenderer::Play,
+ base::Unretained(video_renderer_.get())));
+ }
+
+ if (text_renderer_) {
+ bound_fns.Push(base::Bind(
+ &TextRenderer::Play, base::Unretained(text_renderer_.get())));
}
pending_callbacks_ = SerialRunner::Run(bound_fns, done_cb);
@@ -544,19 +476,11 @@ void Pipeline::DoInitialPreroll(const PipelineStatusCB& done_cb) {
void Pipeline::DoSeek(
base::TimeDelta seek_timestamp,
const PipelineStatusCB& done_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(!pending_callbacks_.get());
SerialRunner::Queue bound_fns;
// Pause.
- if (audio_renderer_) {
- bound_fns.Push(base::Bind(
- &AudioRenderer::Pause, base::Unretained(audio_renderer_.get())));
- }
- if (video_renderer_) {
- bound_fns.Push(base::Bind(
- &VideoRenderer::Pause, base::Unretained(video_renderer_.get())));
- }
if (text_renderer_) {
bound_fns.Push(base::Bind(
&TextRenderer::Pause, base::Unretained(text_renderer_.get())));
@@ -566,10 +490,24 @@ void Pipeline::DoSeek(
if (audio_renderer_) {
bound_fns.Push(base::Bind(
&AudioRenderer::Flush, base::Unretained(audio_renderer_.get())));
+
+ // TODO(scherkus): Remove after AudioRenderer is taught to fire buffering
+ // state callbacks http://crbug.com/144683
+ bound_fns.Push(base::Bind(&Pipeline::BufferingStateChanged,
+ base::Unretained(this),
+ &audio_buffering_state_,
+ BUFFERING_HAVE_NOTHING));
}
if (video_renderer_) {
bound_fns.Push(base::Bind(
&VideoRenderer::Flush, base::Unretained(video_renderer_.get())));
+
+ // TODO(scherkus): Remove after VideoRenderer is taught to fire buffering
+ // state callbacks http://crbug.com/144683
+ bound_fns.Push(base::Bind(&Pipeline::BufferingStateChanged,
+ base::Unretained(this),
+ &video_buffering_state_,
+ BUFFERING_HAVE_NOTHING));
}
if (text_renderer_) {
bound_fns.Push(base::Bind(
@@ -591,27 +529,11 @@ void Pipeline::DoSeek(
bound_fns.Push(base::Bind(
&VideoRenderer::Preroll, base::Unretained(video_renderer_.get()),
seek_timestamp));
- }
-
- pending_callbacks_ = SerialRunner::Run(bound_fns, done_cb);
-}
-
-void Pipeline::DoPlay(const PipelineStatusCB& done_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(!pending_callbacks_.get());
- SerialRunner::Queue bound_fns;
- PlaybackRateChangedTask(GetPlaybackRate());
- VolumeChangedTask(GetVolume());
-
- if (audio_renderer_) {
- bound_fns.Push(base::Bind(
- &AudioRenderer::Play, base::Unretained(audio_renderer_.get())));
- }
-
- if (video_renderer_) {
- bound_fns.Push(base::Bind(
- &VideoRenderer::Play, base::Unretained(video_renderer_.get())));
+ // TODO(scherkus): Remove after renderers are taught to fire buffering
+ // state callbacks http://crbug.com/144683
+ bound_fns.Push(base::Bind(&VideoRenderer::Play,
+ base::Unretained(video_renderer_.get())));
}
if (text_renderer_) {
@@ -623,7 +545,7 @@ void Pipeline::DoPlay(const PipelineStatusCB& done_cb) {
}
void Pipeline::DoStop(const PipelineStatusCB& done_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(!pending_callbacks_.get());
SerialRunner::Queue bound_fns;
@@ -651,7 +573,7 @@ void Pipeline::DoStop(const PipelineStatusCB& done_cb) {
}
void Pipeline::OnStopCompleted(PipelineStatus status) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kStopping);
{
base::AutoLock l(lock_);
@@ -686,13 +608,6 @@ void Pipeline::OnStopCompleted(PipelineStatus status) {
}
}
-void Pipeline::AddBufferedByteRange(int64 start, int64 end) {
- DCHECK(IsRunning());
- base::AutoLock auto_lock(lock_);
- buffered_byte_ranges_.Add(start, end);
- did_loading_progress_ = true;
-}
-
void Pipeline::AddBufferedTimeRange(base::TimeDelta start,
base::TimeDelta end) {
DCHECK(IsRunning());
@@ -701,32 +616,23 @@ void Pipeline::AddBufferedTimeRange(base::TimeDelta start,
did_loading_progress_ = true;
}
-void Pipeline::OnNaturalVideoSizeChanged(const gfx::Size& size) {
- DCHECK(IsRunning());
- media_log_->AddEvent(media_log_->CreateVideoSizeSetEvent(
- size.width(), size.height()));
-
- base::AutoLock auto_lock(lock_);
- natural_size_ = size;
-}
-
void Pipeline::OnAudioRendererEnded() {
- // Force post to process ended messages after current execution frame.
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ // Force post to process ended tasks after current execution frame.
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&Pipeline::DoAudioRendererEnded, base::Unretained(this)));
media_log_->AddEvent(media_log_->CreateEvent(MediaLogEvent::AUDIO_ENDED));
}
void Pipeline::OnVideoRendererEnded() {
- // Force post to process ended messages after current execution frame.
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ // Force post to process ended tasks after current execution frame.
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&Pipeline::DoVideoRendererEnded, base::Unretained(this)));
media_log_->AddEvent(media_log_->CreateEvent(MediaLogEvent::VIDEO_ENDED));
}
void Pipeline::OnTextRendererEnded() {
// Force post to process ended messages after current execution frame.
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&Pipeline::DoTextRendererEnded, base::Unretained(this)));
media_log_->AddEvent(media_log_->CreateEvent(MediaLogEvent::TEXT_ENDED));
}
@@ -740,23 +646,11 @@ void Pipeline::OnUpdateStatistics(const PipelineStatistics& stats) {
statistics_.video_frames_dropped += stats.video_frames_dropped;
}
-void Pipeline::StartTask(scoped_ptr<FilterCollection> filter_collection,
- const base::Closure& ended_cb,
- const PipelineStatusCB& error_cb,
- const PipelineStatusCB& seek_cb,
- const BufferingStateCB& buffering_state_cb,
- const base::Closure& duration_change_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+void Pipeline::StartTask() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
CHECK_EQ(kCreated, state_)
<< "Media pipeline cannot be started more than once";
- filter_collection_ = filter_collection.Pass();
- ended_cb_ = ended_cb;
- error_cb_ = error_cb;
- seek_cb_ = seek_cb;
- buffering_state_cb_ = buffering_state_cb;
- duration_change_cb_ = duration_change_cb;
-
text_renderer_ = filter_collection_->GetTextRenderer();
if (text_renderer_) {
@@ -768,7 +662,7 @@ void Pipeline::StartTask(scoped_ptr<FilterCollection> filter_collection,
}
void Pipeline::StopTask(const base::Closure& stop_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(stop_cb_.is_null());
if (state_ == kStopped) {
@@ -788,7 +682,7 @@ void Pipeline::StopTask(const base::Closure& stop_cb) {
}
void Pipeline::ErrorChangedTask(PipelineStatus error) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_NE(PIPELINE_OK, error) << "PIPELINE_OK isn't an error!";
if (state_ == kStopping || state_ == kStopped)
@@ -802,10 +696,10 @@ void Pipeline::ErrorChangedTask(PipelineStatus error) {
}
void Pipeline::PlaybackRateChangedTask(float playback_rate) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
// Playback rate changes are only carried out while playing.
- if (state_ != kStarting && state_ != kStarted)
+ if (state_ != kPlaying)
return;
{
@@ -820,10 +714,10 @@ void Pipeline::PlaybackRateChangedTask(float playback_rate) {
}
void Pipeline::VolumeChangedTask(float volume) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
// Volume changes are only carried out while playing.
- if (state_ != kStarting && state_ != kStarted)
+ if (state_ != kPlaying)
return;
if (audio_renderer_)
@@ -831,11 +725,11 @@ void Pipeline::VolumeChangedTask(float volume) {
}
void Pipeline::SeekTask(TimeDelta time, const PipelineStatusCB& seek_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(stop_cb_.is_null());
// Suppress seeking if we're not fully started.
- if (state_ != kStarted) {
+ if (state_ != kPlaying) {
DCHECK(state_ == kStopping || state_ == kStopped)
<< "Receive extra seek in unexpected state: " << state_;
@@ -858,8 +752,7 @@ void Pipeline::SeekTask(TimeDelta time, const PipelineStatusCB& seek_cb) {
// Kick off seeking!
{
base::AutoLock auto_lock(lock_);
- if (clock_->IsPlaying())
- clock_->Pause();
+ PauseClockAndStopRendering_Locked();
clock_->SetTime(seek_timestamp, seek_timestamp);
}
DoSeek(seek_timestamp, base::Bind(
@@ -867,16 +760,16 @@ void Pipeline::SeekTask(TimeDelta time, const PipelineStatusCB& seek_cb) {
}
void Pipeline::DoAudioRendererEnded() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
- if (state_ != kStarted)
+ if (state_ != kPlaying)
return;
DCHECK(!audio_ended_);
audio_ended_ = true;
// Start clock since there is no more audio to trigger clock updates.
- if (!audio_disabled_) {
+ {
base::AutoLock auto_lock(lock_);
clock_->SetMaxTime(clock_->Duration());
StartClockIfWaitingForTimeUpdate_Locked();
@@ -886,9 +779,9 @@ void Pipeline::DoAudioRendererEnded() {
}
void Pipeline::DoVideoRendererEnded() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
- if (state_ != kStarted)
+ if (state_ != kPlaying)
return;
DCHECK(!video_ended_);
@@ -898,9 +791,9 @@ void Pipeline::DoVideoRendererEnded() {
}
void Pipeline::DoTextRendererEnded() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
- if (state_ != kStarted)
+ if (state_ != kPlaying)
return;
DCHECK(!text_ended_);
@@ -910,9 +803,9 @@ void Pipeline::DoTextRendererEnded() {
}
void Pipeline::RunEndedCallbackIfNeeded() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
- if (audio_renderer_ && !audio_ended_ && !audio_disabled_)
+ if (audio_renderer_ && !audio_ended_)
return;
if (video_renderer_ && !video_ended_)
@@ -923,50 +816,36 @@ void Pipeline::RunEndedCallbackIfNeeded() {
{
base::AutoLock auto_lock(lock_);
- clock_->EndOfStream();
+ PauseClockAndStopRendering_Locked();
+ clock_->SetTime(clock_->Duration(), clock_->Duration());
}
DCHECK_EQ(status_, PIPELINE_OK);
ended_cb_.Run();
}
-void Pipeline::AudioDisabledTask() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- base::AutoLock auto_lock(lock_);
- has_audio_ = false;
- audio_disabled_ = true;
-
- // Notify our demuxer that we're no longer rendering audio.
- demuxer_->OnAudioRendererDisabled();
-
- // Start clock since there is no more audio to trigger clock updates.
- clock_->SetMaxTime(clock_->Duration());
- StartClockIfWaitingForTimeUpdate_Locked();
-}
-
void Pipeline::AddTextStreamTask(DemuxerStream* text_stream,
const TextTrackConfig& config) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
// TODO(matthewjheaney): fix up text_ended_ when text stream
// is added (http://crbug.com/321446).
text_renderer_->AddTextStream(text_stream, config);
}
void Pipeline::RemoveTextStreamTask(DemuxerStream* text_stream) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
text_renderer_->RemoveTextStream(text_stream);
}
void Pipeline::InitializeDemuxer(const PipelineStatusCB& done_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
demuxer_ = filter_collection_->GetDemuxer();
demuxer_->Initialize(this, done_cb, text_renderer_);
}
void Pipeline::InitializeAudioRenderer(const PipelineStatusCB& done_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
audio_renderer_ = filter_collection_->GetAudioRenderer();
audio_renderer_->Initialize(
@@ -976,29 +855,19 @@ void Pipeline::InitializeAudioRenderer(const PipelineStatusCB& done_cb) {
base::Bind(&Pipeline::OnAudioUnderflow, base::Unretained(this)),
base::Bind(&Pipeline::OnAudioTimeUpdate, base::Unretained(this)),
base::Bind(&Pipeline::OnAudioRendererEnded, base::Unretained(this)),
- base::Bind(&Pipeline::OnAudioDisabled, base::Unretained(this)),
base::Bind(&Pipeline::SetError, base::Unretained(this)));
}
void Pipeline::InitializeVideoRenderer(const PipelineStatusCB& done_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
-
- {
- // Get an initial natural size so we have something when we signal
- // the kHaveMetadata buffering state.
- base::AutoLock l(lock_);
- natural_size_ = stream->video_decoder_config().natural_size();
- }
+ DCHECK(task_runner_->BelongsToCurrentThread());
video_renderer_ = filter_collection_->GetVideoRenderer();
video_renderer_->Initialize(
- stream,
+ demuxer_->GetStream(DemuxerStream::VIDEO),
+ demuxer_->GetLiveness() == Demuxer::LIVENESS_LIVE,
done_cb,
base::Bind(&Pipeline::OnUpdateStatistics, base::Unretained(this)),
base::Bind(&Pipeline::OnVideoTimeUpdate, base::Unretained(this)),
- base::Bind(&Pipeline::OnNaturalVideoSizeChanged, base::Unretained(this)),
base::Bind(&Pipeline::OnVideoRendererEnded, base::Unretained(this)),
base::Bind(&Pipeline::SetError, base::Unretained(this)),
base::Bind(&Pipeline::GetMediaTime, base::Unretained(this)),
@@ -1006,25 +875,111 @@ void Pipeline::InitializeVideoRenderer(const PipelineStatusCB& done_cb) {
}
void Pipeline::OnAudioUnderflow() {
- if (!message_loop_->BelongsToCurrentThread()) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ if (!task_runner_->BelongsToCurrentThread()) {
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&Pipeline::OnAudioUnderflow, base::Unretained(this)));
return;
}
- if (state_ != kStarted)
+ if (state_ != kPlaying)
return;
if (audio_renderer_)
audio_renderer_->ResumeAfterUnderflow();
}
+void Pipeline::BufferingStateChanged(BufferingState* buffering_state,
+ BufferingState new_buffering_state) {
+ DVLOG(1) << __FUNCTION__ << "(" << *buffering_state << ", "
+ << " " << new_buffering_state << ") "
+ << (buffering_state == &audio_buffering_state_ ? "audio" : "video");
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ bool was_waiting_for_enough_data = WaitingForEnoughData();
+ *buffering_state = new_buffering_state;
+
+ // Renderer underflowed.
+ if (!was_waiting_for_enough_data && WaitingForEnoughData()) {
+ StartWaitingForEnoughData();
+ return;
+ }
+
+ // Renderer prerolled.
+ if (was_waiting_for_enough_data && !WaitingForEnoughData()) {
+ StartPlayback();
+ return;
+ }
+}
+
+bool Pipeline::WaitingForEnoughData() const {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ if (state_ != kPlaying)
+ return false;
+ if (audio_renderer_ && audio_buffering_state_ != BUFFERING_HAVE_ENOUGH)
+ return true;
+ if (video_renderer_ && video_buffering_state_ != BUFFERING_HAVE_ENOUGH)
+ return true;
+ return false;
+}
+
+void Pipeline::StartWaitingForEnoughData() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK_EQ(state_, kPlaying);
+ DCHECK(WaitingForEnoughData());
+
+ base::AutoLock auto_lock(lock_);
+ PauseClockAndStopRendering_Locked();
+}
+
+void Pipeline::StartPlayback() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK_EQ(state_, kPlaying);
+ DCHECK_EQ(clock_state_, CLOCK_PAUSED);
+ DCHECK(!WaitingForEnoughData());
+
+ if (audio_renderer_) {
+ // We use audio stream to update the clock. So if there is such a
+ // stream, we pause the clock until we receive a valid timestamp.
+ base::AutoLock auto_lock(lock_);
+ clock_state_ = CLOCK_WAITING_FOR_AUDIO_TIME_UPDATE;
+ audio_renderer_->StartRendering();
+ } else {
+ base::AutoLock auto_lock(lock_);
+ clock_state_ = CLOCK_PLAYING;
+ clock_->SetMaxTime(clock_->Duration());
+ clock_->Play();
+ }
+
+ preroll_completed_cb_.Run();
+ if (!seek_cb_.is_null())
+ base::ResetAndReturn(&seek_cb_).Run(PIPELINE_OK);
+}
+
+void Pipeline::PauseClockAndStopRendering_Locked() {
+ lock_.AssertAcquired();
+ switch (clock_state_) {
+ case CLOCK_PAUSED:
+ return;
+
+ case CLOCK_WAITING_FOR_AUDIO_TIME_UPDATE:
+ audio_renderer_->StopRendering();
+ break;
+
+ case CLOCK_PLAYING:
+ if (audio_renderer_)
+ audio_renderer_->StopRendering();
+ clock_->Pause();
+ break;
+ }
+
+ clock_state_ = CLOCK_PAUSED;
+}
+
void Pipeline::StartClockIfWaitingForTimeUpdate_Locked() {
lock_.AssertAcquired();
- if (!waiting_for_clock_update_)
+ if (clock_state_ != CLOCK_WAITING_FOR_AUDIO_TIME_UPDATE)
return;
- waiting_for_clock_update_ = false;
+ clock_state_ = CLOCK_PLAYING;
clock_->Play();
}
diff --git a/chromium/media/base/pipeline.h b/chromium/media/base/pipeline.h
index 222091fcdbf..b40cd3c98c7 100644
--- a/chromium/media/base/pipeline.h
+++ b/chromium/media/base/pipeline.h
@@ -13,6 +13,7 @@
#include "base/threading/thread_checker.h"
#include "base/time/default_tick_clock.h"
#include "media/base/audio_renderer.h"
+#include "media/base/buffering_state.h"
#include "media/base/demuxer.h"
#include "media/base/media_export.h"
#include "media/base/pipeline_status.h"
@@ -21,7 +22,7 @@
#include "ui/gfx/size.h"
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
class TimeDelta;
}
@@ -34,8 +35,20 @@ class TextRenderer;
class TextTrackConfig;
class VideoRenderer;
+// Metadata describing a pipeline once it has been initialized.
+struct PipelineMetadata {
+ PipelineMetadata() : has_audio(false), has_video(false) {}
+
+ bool has_audio;
+ bool has_video;
+ gfx::Size natural_size;
+ base::Time timeline_offset;
+};
+
+typedef base::Callback<void(PipelineMetadata)> PipelineMetadataCB;
+
// Pipeline runs the media pipeline. Filters are created and called on the
-// message loop injected into this object. Pipeline works like a state
+// task runner injected into this object. Pipeline works like a state
// machine to perform asynchronous initialization, pausing, seeking and playing.
//
// Here's a state diagram that describes the lifetime of this object.
@@ -46,14 +59,13 @@ class VideoRenderer;
// [ InitXXX (for each filter) ] [ Stopping ]
// | |
// V V
-// [ InitPreroll ] [ Stopped ]
+// [ InitPrerolling ] [ Stopped ]
// |
// V
-// [ Starting ] <-- [ Seeking ]
+// [ Playing ] <-- [ Seeking ]
// | ^
-// V |
-// [ Started ] ----------'
-// Seek()
+// `---------------'
+// Seek()
//
// Initialization is a series of state transitions from "Created" through each
// filter initialization state. When all filter initialization states have
@@ -67,23 +79,8 @@ class VideoRenderer;
// "Stopped" state.
class MEDIA_EXPORT Pipeline : public DemuxerHost {
public:
- // Buffering states the pipeline transitions between during playback.
- // kHaveMetadata:
- // Indicates that the following things are known:
- // content duration, natural size, start time, and whether the content has
- // audio and/or video in supported formats.
- // kPrerollCompleted:
- // All renderers have buffered enough data to satisfy preroll and are ready
- // to start playback.
- enum BufferingState {
- kHaveMetadata,
- kPrerollCompleted,
- };
-
- typedef base::Callback<void(BufferingState)> BufferingStateCB;
-
- // Constructs a media pipeline that will execute on |message_loop|.
- Pipeline(const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ // Constructs a media pipeline that will execute on |task_runner|.
+ Pipeline(const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
MediaLog* media_log);
virtual ~Pipeline();
@@ -97,18 +94,23 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// The following permanent callbacks will be executed as follows up until
// Stop() has completed:
// |ended_cb| will be executed whenever the media reaches the end.
- // |error_cb| will be executed whenever an error occurs but hasn't
- // been reported already through another callback.
- // |buffering_state_cb| Optional callback that will be executed whenever the
- // pipeline's buffering state changes.
- // |duration_change_cb| Optional callback that will be executed whenever the
+ // |error_cb| will be executed whenever an error occurs but hasn't been
+ // reported already through another callback.
+ // |metadata_cb| will be executed when the content duration, container video
+ // size, start time, and whether the content has audio and/or
+ // video in supported formats are known.
+ // |preroll_completed_cb| will be executed when all renderers have buffered
+ // enough data to satisfy preroll and are ready to
+ // start playback.
+ // |duration_change_cb| optional callback that will be executed whenever the
// presentation duration changes.
// It is an error to call this method after the pipeline has already started.
void Start(scoped_ptr<FilterCollection> filter_collection,
const base::Closure& ended_cb,
const PipelineStatusCB& error_cb,
const PipelineStatusCB& seek_cb,
- const BufferingStateCB& buffering_state_cb,
+ const PipelineMetadataCB& metadata_cb,
+ const base::Closure& preroll_completed_cb,
const base::Closure& duration_change_cb);
// Asynchronously stops the pipeline, executing |stop_cb| when the pipeline
@@ -134,12 +136,6 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// the pipeline.
bool IsRunning() const;
- // Returns true if the media has audio.
- bool HasAudio() const;
-
- // Returns true if the media has video.
- bool HasVideo() const;
-
// Gets the current playback rate of the pipeline. When the pipeline is
// started, the playback rate will be 0.0f. A rate of 1.0f indicates
// that the pipeline is rendering the media at the standard rate. Valid
@@ -168,24 +164,15 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
base::TimeDelta GetMediaTime() const;
// Get approximate time ranges of buffered media.
- Ranges<base::TimeDelta> GetBufferedTimeRanges();
+ Ranges<base::TimeDelta> GetBufferedTimeRanges() const;
// Get the duration of the media in microseconds. If the duration has not
// been determined yet, then returns 0.
base::TimeDelta GetMediaDuration() const;
- // Get the total size of the media file. If the size has not yet been
- // determined or can not be determined, this value is 0.
- int64 GetTotalBytes() const;
-
- // Gets the natural size of the video output in pixel units. If there is no
- // video or the video has not been rendered yet, the width and height will
- // be 0.
- void GetNaturalVideoSize(gfx::Size* out_size) const;
-
// Return true if loading progress has been made since the last time this
// method was called.
- bool DidLoadingProgress() const;
+ bool DidLoadingProgress();
// Gets the current pipeline statistics.
PipelineStatistics GetStatistics() const;
@@ -195,8 +182,6 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
private:
FRIEND_TEST_ALL_PREFIXES(PipelineTest, GetBufferedTimeRanges);
- FRIEND_TEST_ALL_PREFIXES(PipelineTest, DisableAudioRenderer);
- FRIEND_TEST_ALL_PREFIXES(PipelineTest, DisableAudioRendererDuringInit);
FRIEND_TEST_ALL_PREFIXES(PipelineTest, EndedCallback);
FRIEND_TEST_ALL_PREFIXES(PipelineTest, AudioStreamShorterThanVideo);
friend class MediaLog;
@@ -209,8 +194,7 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
kInitVideoRenderer,
kInitPrerolling,
kSeeking,
- kStarting,
- kStarted,
+ kPlaying,
kStopping,
kStopped,
};
@@ -225,13 +209,9 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// and |seek_pending_|.
void FinishSeek();
- // DataSourceHost (by way of DemuxerHost) implementation.
- virtual void SetTotalBytes(int64 total_bytes) OVERRIDE;
- virtual void AddBufferedByteRange(int64 start, int64 end) OVERRIDE;
+ // DemuxerHost implementaion.
virtual void AddBufferedTimeRange(base::TimeDelta start,
base::TimeDelta end) OVERRIDE;
-
- // DemuxerHost implementaion.
virtual void SetDuration(base::TimeDelta duration) OVERRIDE;
virtual void OnDemuxerError(PipelineStatus error) OVERRIDE;
virtual void AddTextStream(DemuxerStream* text_stream,
@@ -243,9 +223,6 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// Safe to call from any thread.
void SetError(PipelineStatus error);
- // Callback executed when the natural size of the video has changed.
- void OnNaturalVideoSizeChanged(const gfx::Size& size);
-
// Callbacks executed when a renderer has ended.
void OnAudioRendererEnded();
void OnVideoRendererEnded();
@@ -254,9 +231,6 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// Callback executed by filters to update statistics.
void OnUpdateStatistics(const PipelineStatistics& stats);
- // Callback executed by audio renderer when it has been disabled.
- void OnAudioDisabled();
-
// Callback executed by audio renderer to update clock time.
void OnAudioTimeUpdate(base::TimeDelta time, base::TimeDelta max_time);
@@ -264,14 +238,9 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
void OnVideoTimeUpdate(base::TimeDelta max_time);
// The following "task" methods correspond to the public methods, but these
- // methods are run as the result of posting a task to the PipelineInternal's
- // message loop.
- void StartTask(scoped_ptr<FilterCollection> filter_collection,
- const base::Closure& ended_cb,
- const PipelineStatusCB& error_cb,
- const PipelineStatusCB& seek_cb,
- const BufferingStateCB& buffering_state_cb,
- const base::Closure& duration_change_cb);
+ // methods are run as the result of posting a task to the Pipeline's
+ // task runner.
+ void StartTask();
// Stops and destroys all filters, placing the pipeline in the kStopped state.
void StopTask(const base::Closure& stop_cb);
@@ -295,9 +264,6 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
void DoTextRendererEnded();
void RunEndedCallbackIfNeeded();
- // Carries out disabling the audio renderer.
- void AudioDisabledTask();
-
// Carries out adding a new text stream to the text renderer.
void AddTextStreamTask(DemuxerStream* text_stream,
const TextTrackConfig& config);
@@ -338,10 +304,6 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// indepentent from seeking.
void DoSeek(base::TimeDelta seek_timestamp, const PipelineStatusCB& done_cb);
- // Updates playback rate and volume and initiates an asynchronous play call
- // sequence executing |done_cb| with the final status when completed.
- void DoPlay(const PipelineStatusCB& done_cb);
-
// Initiates an asynchronous pause-flush-stop call sequence executing
// |done_cb| when completed.
void DoStop(const PipelineStatusCB& done_cb);
@@ -349,10 +311,26 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
void OnAudioUnderflow();
+ // Collection of callback methods and helpers for tracking changes in
+ // buffering state and transition from paused/underflow states and playing
+ // states.
+ //
+ // While in the kPlaying state:
+ // - A waiting to non-waiting transition indicates preroll has completed
+ // and StartPlayback() should be called
+ // - A non-waiting to waiting transition indicates underflow has occurred
+ // and StartWaitingForEnoughData() should be called
+ void BufferingStateChanged(BufferingState* buffering_state,
+ BufferingState new_buffering_state);
+ bool WaitingForEnoughData() const;
+ void StartWaitingForEnoughData();
+ void StartPlayback();
+
+ void PauseClockAndStopRendering_Locked();
void StartClockIfWaitingForTimeUpdate_Locked();
- // Message loop used to execute pipeline tasks.
- scoped_refptr<base::MessageLoopProxy> message_loop_;
+ // Task runner used to execute pipeline tasks.
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
// MediaLog to which to log events.
scoped_refptr<MediaLog> media_log_;
@@ -363,27 +341,20 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// Whether or not the pipeline is running.
bool running_;
- // Amount of available buffered data. Set by filters.
- Ranges<int64> buffered_byte_ranges_;
+ // Amount of available buffered data as reported by |demuxer_|.
Ranges<base::TimeDelta> buffered_time_ranges_;
- // True when AddBufferedByteRange() has been called more recently than
+ // True when AddBufferedTimeRange() has been called more recently than
// DidLoadingProgress().
- mutable bool did_loading_progress_;
-
- // Total size of the media. Set by filters.
- int64 total_bytes_;
-
- // Video's natural width and height. Set by filters.
- gfx::Size natural_size_;
+ bool did_loading_progress_;
// Current volume level (from 0.0f to 1.0f). This value is set immediately
- // via SetVolume() and a task is dispatched on the message loop to notify the
+ // via SetVolume() and a task is dispatched on the task runner to notify the
// filters.
float volume_;
// Current playback rate (>= 0.0f). This value is set immediately via
- // SetPlaybackRate() and a task is dispatched on the message loop to notify
+ // SetPlaybackRate() and a task is dispatched on the task runner to notify
// the filters.
float playback_rate_;
@@ -395,10 +366,18 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// by filters.
scoped_ptr<Clock> clock_;
- // If this value is set to true, then |clock_| is paused and we are waiting
- // for an update of the clock greater than or equal to the elapsed time to
- // start the clock.
- bool waiting_for_clock_update_;
+ enum ClockState {
+ // Audio (if present) is not rendering. Clock isn't playing.
+ CLOCK_PAUSED,
+
+ // Audio (if present) is rendering. Clock isn't playing.
+ CLOCK_WAITING_FOR_AUDIO_TIME_UPDATE,
+
+ // Audio (if present) is rendering. Clock is playing.
+ CLOCK_PLAYING,
+ };
+
+ ClockState clock_state_;
// Status of the pipeline. Initialized to PIPELINE_OK which indicates that
// the pipeline is operating correctly. Any other value indicates that the
@@ -406,15 +385,8 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// reset the pipeline state, and restore this to PIPELINE_OK.
PipelineStatus status_;
- // Whether the media contains rendered audio or video streams.
- // TODO(fischman,scherkus): replace these with checks for
- // {audio,video}_decoder_ once extraction of {Audio,Video}Decoder from the
- // Filter heirarchy is done.
- bool has_audio_;
- bool has_video_;
-
// The following data members are only accessed by tasks posted to
- // |message_loop_|.
+ // |task_runner_|.
// Member that tracks the current state.
State state_;
@@ -424,8 +396,8 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
bool video_ended_;
bool text_ended_;
- // Set to true in DisableAudioRendererTask().
- bool audio_disabled_;
+ BufferingState audio_buffering_state_;
+ BufferingState video_buffering_state_;
// Temporary callback used for Start() and Seek().
PipelineStatusCB seek_cb_;
@@ -436,7 +408,8 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// Permanent callbacks passed in via Start().
base::Closure ended_cb_;
PipelineStatusCB error_cb_;
- BufferingStateCB buffering_state_cb_;
+ PipelineMetadataCB metadata_cb_;
+ base::Closure preroll_completed_cb_;
base::Closure duration_change_cb_;
// Contains the demuxer and renderers to use when initializing.
diff --git a/chromium/media/base/pipeline_status.h b/chromium/media/base/pipeline_status.h
index a9f8585f573..15e5c9dd76c 100644
--- a/chromium/media/base/pipeline_status.h
+++ b/chromium/media/base/pipeline_status.h
@@ -32,7 +32,8 @@ enum PipelineStatus {
DEMUXER_ERROR_NO_SUPPORTED_STREAMS = 14,
// Decoder related errors.
DECODER_ERROR_NOT_SUPPORTED = 15,
- PIPELINE_STATUS_MAX, // Must be greater than all other values logged.
+ // Must be equal to the largest value ever logged.
+ PIPELINE_STATUS_MAX = DECODER_ERROR_NOT_SUPPORTED,
};
typedef base::Callback<void(PipelineStatus)> PipelineStatusCB;
diff --git a/chromium/media/base/pipeline_unittest.cc b/chromium/media/base/pipeline_unittest.cc
index a7a8cae316c..45cc73ba36f 100644
--- a/chromium/media/base/pipeline_unittest.cc
+++ b/chromium/media/base/pipeline_unittest.cc
@@ -23,6 +23,7 @@
#include "ui/gfx/size.h"
using ::testing::_;
+using ::testing::AnyNumber;
using ::testing::DeleteArg;
using ::testing::DoAll;
// TODO(scherkus): Remove InSequence after refactoring Pipeline.
@@ -38,11 +39,7 @@ using ::testing::WithArg;
namespace media {
-// Demuxer properties.
-const int kTotalBytes = 1024;
-
ACTION_P(SetDemuxerProperties, duration) {
- arg0->SetTotalBytes(kTotalBytes);
arg0->SetDuration(duration);
}
@@ -66,7 +63,8 @@ class CallbackHelper {
MOCK_METHOD0(OnStop, void());
MOCK_METHOD0(OnEnded, void());
MOCK_METHOD1(OnError, void(PipelineStatus));
- MOCK_METHOD1(OnBufferingState, void(Pipeline::BufferingState));
+ MOCK_METHOD1(OnMetadata, void(PipelineMetadata));
+ MOCK_METHOD0(OnPrerollCompleted, void());
MOCK_METHOD0(OnDurationChange, void());
private:
@@ -111,6 +109,12 @@ class PipelineTest : public ::testing::Test {
EXPECT_CALL(*demuxer_, GetStartTime())
.WillRepeatedly(Return(base::TimeDelta()));
+
+ EXPECT_CALL(*demuxer_, GetTimelineOffset())
+ .WillRepeatedly(Return(base::Time()));
+
+ EXPECT_CALL(*demuxer_, GetLiveness())
+ .WillRepeatedly(Return(Demuxer::LIVENESS_UNKNOWN));
}
virtual ~PipelineTest() {
@@ -166,7 +170,7 @@ class PipelineTest : public ::testing::Test {
// Sets up expectations to allow the video renderer to initialize.
void InitializeVideoRenderer(DemuxerStream* stream) {
EXPECT_CALL(*video_renderer_, Initialize(stream, _, _, _, _, _, _, _, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
+ .WillOnce(RunCallback<2>(PIPELINE_OK));
EXPECT_CALL(*video_renderer_, SetPlaybackRate(0.0f));
// Startup sequence.
@@ -177,17 +181,10 @@ class PipelineTest : public ::testing::Test {
}
// Sets up expectations to allow the audio renderer to initialize.
- void InitializeAudioRenderer(DemuxerStream* stream,
- bool disable_after_init_cb) {
- if (disable_after_init_cb) {
- EXPECT_CALL(*audio_renderer_, Initialize(stream, _, _, _, _, _, _, _))
- .WillOnce(DoAll(RunCallback<1>(PIPELINE_OK),
- WithArg<6>(RunClosure<0>()))); // |disabled_cb|.
- } else {
- EXPECT_CALL(*audio_renderer_, Initialize(stream, _, _, _, _, _, _, _))
- .WillOnce(DoAll(SaveArg<4>(&audio_time_cb_),
- RunCallback<1>(PIPELINE_OK)));
- }
+ void InitializeAudioRenderer(DemuxerStream* stream) {
+ EXPECT_CALL(*audio_renderer_, Initialize(stream, _, _, _, _, _, _))
+ .WillOnce(DoAll(SaveArg<4>(&audio_time_cb_),
+ RunCallback<1>(PIPELINE_OK)));
}
void AddTextStream() {
@@ -203,7 +200,7 @@ class PipelineTest : public ::testing::Test {
EXPECT_CALL(callbacks_, OnStart(start_status));
if (start_status == PIPELINE_OK) {
- EXPECT_CALL(callbacks_, OnBufferingState(Pipeline::kHaveMetadata));
+ EXPECT_CALL(callbacks_, OnMetadata(_)).WillOnce(SaveArg<0>(&metadata_));
if (audio_stream_) {
EXPECT_CALL(*audio_renderer_, SetPlaybackRate(0.0f));
@@ -212,10 +209,9 @@ class PipelineTest : public ::testing::Test {
// Startup sequence.
EXPECT_CALL(*audio_renderer_, Preroll(base::TimeDelta(), _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
- EXPECT_CALL(*audio_renderer_, Play(_))
- .WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, StartRendering());
}
- EXPECT_CALL(callbacks_, OnBufferingState(Pipeline::kPrerollCompleted));
+ EXPECT_CALL(callbacks_, OnPrerollCompleted());
}
pipeline_->Start(
@@ -223,7 +219,8 @@ class PipelineTest : public ::testing::Test {
base::Bind(&CallbackHelper::OnEnded, base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnError, base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnStart, base::Unretained(&callbacks_)),
- base::Bind(&CallbackHelper::OnBufferingState,
+ base::Bind(&CallbackHelper::OnMetadata, base::Unretained(&callbacks_)),
+ base::Bind(&CallbackHelper::OnPrerollCompleted,
base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnDurationChange,
base::Unretained(&callbacks_)));
@@ -240,7 +237,8 @@ class PipelineTest : public ::testing::Test {
}
void CreateTextStream() {
- scoped_ptr<FakeTextTrackStream> text_stream(new FakeTextTrackStream);
+ scoped_ptr<FakeTextTrackStream> text_stream(new FakeTextTrackStream());
+ EXPECT_CALL(*text_stream, OnRead()).Times(AnyNumber());
text_stream_ = text_stream.Pass();
}
@@ -262,21 +260,17 @@ class PipelineTest : public ::testing::Test {
.WillOnce(RunCallback<1>(PIPELINE_OK));
if (audio_stream_) {
- EXPECT_CALL(*audio_renderer_, Pause(_))
- .WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, StopRendering());
EXPECT_CALL(*audio_renderer_, Flush(_))
.WillOnce(RunClosure<0>());
EXPECT_CALL(*audio_renderer_, Preroll(seek_time, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
EXPECT_CALL(*audio_renderer_, SetPlaybackRate(_));
EXPECT_CALL(*audio_renderer_, SetVolume(_));
- EXPECT_CALL(*audio_renderer_, Play(_))
- .WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, StartRendering());
}
if (video_stream_) {
- EXPECT_CALL(*video_renderer_, Pause(_))
- .WillOnce(RunClosure<0>());
EXPECT_CALL(*video_renderer_, Flush(_))
.WillOnce(RunClosure<0>());
EXPECT_CALL(*video_renderer_, Preroll(seek_time, _))
@@ -286,7 +280,7 @@ class PipelineTest : public ::testing::Test {
.WillOnce(RunClosure<0>());
}
- EXPECT_CALL(callbacks_, OnBufferingState(Pipeline::kPrerollCompleted));
+ EXPECT_CALL(callbacks_, OnPrerollCompleted());
// We expect a successful seek callback.
EXPECT_CALL(callbacks_, OnSeek(PIPELINE_OK));
@@ -340,6 +334,7 @@ class PipelineTest : public ::testing::Test {
scoped_ptr<FakeTextTrackStream> text_stream_;
AudioRenderer::TimeCB audio_time_cb_;
VideoDecoderConfig video_decoder_config_;
+ PipelineMetadata metadata_;
private:
DISALLOW_COPY_AND_ASSIGN(PipelineTest);
@@ -351,8 +346,6 @@ TEST_F(PipelineTest, NotStarted) {
const base::TimeDelta kZero;
EXPECT_FALSE(pipeline_->IsRunning());
- EXPECT_FALSE(pipeline_->HasAudio());
- EXPECT_FALSE(pipeline_->HasVideo());
// Setting should still work.
EXPECT_EQ(0.0f, pipeline_->GetPlaybackRate());
@@ -371,14 +364,6 @@ TEST_F(PipelineTest, NotStarted) {
EXPECT_TRUE(kZero == pipeline_->GetMediaTime());
EXPECT_EQ(0u, pipeline_->GetBufferedTimeRanges().size());
EXPECT_TRUE(kZero == pipeline_->GetMediaDuration());
-
- EXPECT_EQ(0, pipeline_->GetTotalBytes());
-
- // Should always get set to zero.
- gfx::Size size(1, 1);
- pipeline_->GetNaturalVideoSize(&size);
- EXPECT_EQ(0, size.width());
- EXPECT_EQ(0, size.height());
}
TEST_F(PipelineTest, NeverInitializes) {
@@ -393,7 +378,8 @@ TEST_F(PipelineTest, NeverInitializes) {
base::Bind(&CallbackHelper::OnEnded, base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnError, base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnStart, base::Unretained(&callbacks_)),
- base::Bind(&CallbackHelper::OnBufferingState,
+ base::Bind(&CallbackHelper::OnMetadata, base::Unretained(&callbacks_)),
+ base::Bind(&CallbackHelper::OnPrerollCompleted,
base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnDurationChange,
base::Unretained(&callbacks_)));
@@ -431,11 +417,11 @@ TEST_F(PipelineTest, AudioStream) {
streams.push_back(audio_stream());
InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream(), false);
+ InitializeAudioRenderer(audio_stream());
InitializePipeline(PIPELINE_OK);
- EXPECT_TRUE(pipeline_->HasAudio());
- EXPECT_FALSE(pipeline_->HasVideo());
+ EXPECT_TRUE(metadata_.has_audio);
+ EXPECT_FALSE(metadata_.has_video);
}
TEST_F(PipelineTest, VideoStream) {
@@ -447,8 +433,8 @@ TEST_F(PipelineTest, VideoStream) {
InitializeVideoRenderer(video_stream());
InitializePipeline(PIPELINE_OK);
- EXPECT_FALSE(pipeline_->HasAudio());
- EXPECT_TRUE(pipeline_->HasVideo());
+ EXPECT_FALSE(metadata_.has_audio);
+ EXPECT_TRUE(metadata_.has_video);
}
TEST_F(PipelineTest, AudioVideoStream) {
@@ -459,12 +445,12 @@ TEST_F(PipelineTest, AudioVideoStream) {
streams.push_back(video_stream());
InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream(), false);
+ InitializeAudioRenderer(audio_stream());
InitializeVideoRenderer(video_stream());
InitializePipeline(PIPELINE_OK);
- EXPECT_TRUE(pipeline_->HasAudio());
- EXPECT_TRUE(pipeline_->HasVideo());
+ EXPECT_TRUE(metadata_.has_audio);
+ EXPECT_TRUE(metadata_.has_video);
}
TEST_F(PipelineTest, VideoTextStream) {
@@ -477,8 +463,8 @@ TEST_F(PipelineTest, VideoTextStream) {
InitializeVideoRenderer(video_stream());
InitializePipeline(PIPELINE_OK);
- EXPECT_FALSE(pipeline_->HasAudio());
- EXPECT_TRUE(pipeline_->HasVideo());
+ EXPECT_FALSE(metadata_.has_audio);
+ EXPECT_TRUE(metadata_.has_video);
AddTextStream();
message_loop_.RunUntilIdle();
@@ -494,11 +480,11 @@ TEST_F(PipelineTest, VideoAudioTextStream) {
InitializeDemuxer(&streams);
InitializeVideoRenderer(video_stream());
- InitializeAudioRenderer(audio_stream(), false);
+ InitializeAudioRenderer(audio_stream());
InitializePipeline(PIPELINE_OK);
- EXPECT_TRUE(pipeline_->HasAudio());
- EXPECT_TRUE(pipeline_->HasVideo());
+ EXPECT_TRUE(metadata_.has_audio);
+ EXPECT_TRUE(metadata_.has_video);
AddTextStream();
message_loop_.RunUntilIdle();
@@ -513,7 +499,7 @@ TEST_F(PipelineTest, Seek) {
streams.push_back(video_stream());
InitializeDemuxer(&streams, base::TimeDelta::FromSeconds(3000));
- InitializeAudioRenderer(audio_stream(), false);
+ InitializeAudioRenderer(audio_stream());
InitializeVideoRenderer(video_stream());
// Initialize then seek!
@@ -534,7 +520,7 @@ TEST_F(PipelineTest, SetVolume) {
streams.push_back(audio_stream());
InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream(), false);
+ InitializeAudioRenderer(audio_stream());
// The audio renderer should receive a call to SetVolume().
float expected = 0.5f;
@@ -557,7 +543,6 @@ TEST_F(PipelineTest, Properties) {
InitializePipeline(PIPELINE_OK);
EXPECT_EQ(kDuration.ToInternalValue(),
pipeline_->GetMediaDuration().ToInternalValue());
- EXPECT_EQ(kTotalBytes, pipeline_->GetTotalBytes());
EXPECT_FALSE(pipeline_->DidLoadingProgress());
}
@@ -575,86 +560,18 @@ TEST_F(PipelineTest, GetBufferedTimeRanges) {
EXPECT_EQ(0u, pipeline_->GetBufferedTimeRanges().size());
EXPECT_FALSE(pipeline_->DidLoadingProgress());
- pipeline_->AddBufferedByteRange(0, kTotalBytes / 8);
+ pipeline_->AddBufferedTimeRange(base::TimeDelta(), kDuration / 8);
EXPECT_TRUE(pipeline_->DidLoadingProgress());
EXPECT_FALSE(pipeline_->DidLoadingProgress());
EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
EXPECT_EQ(base::TimeDelta(), pipeline_->GetBufferedTimeRanges().start(0));
EXPECT_EQ(kDuration / 8, pipeline_->GetBufferedTimeRanges().end(0));
- pipeline_->AddBufferedTimeRange(base::TimeDelta(), kDuration / 8);
- EXPECT_EQ(base::TimeDelta(), pipeline_->GetBufferedTimeRanges().start(0));
- EXPECT_EQ(kDuration / 8, pipeline_->GetBufferedTimeRanges().end(0));
base::TimeDelta kSeekTime = kDuration / 2;
ExpectSeek(kSeekTime);
DoSeek(kSeekTime);
- EXPECT_TRUE(pipeline_->DidLoadingProgress());
EXPECT_FALSE(pipeline_->DidLoadingProgress());
- pipeline_->AddBufferedByteRange(kTotalBytes / 2,
- kTotalBytes / 2 + kTotalBytes / 8);
- EXPECT_TRUE(pipeline_->DidLoadingProgress());
- EXPECT_FALSE(pipeline_->DidLoadingProgress());
- EXPECT_EQ(2u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(base::TimeDelta(), pipeline_->GetBufferedTimeRanges().start(0));
- EXPECT_EQ(kDuration / 8, pipeline_->GetBufferedTimeRanges().end(0));
- EXPECT_EQ(kDuration / 2, pipeline_->GetBufferedTimeRanges().start(1));
- EXPECT_EQ(kDuration / 2 + kDuration / 8,
- pipeline_->GetBufferedTimeRanges().end(1));
-
- pipeline_->AddBufferedTimeRange(kDuration / 4, 3 * kDuration / 8);
- EXPECT_EQ(base::TimeDelta(), pipeline_->GetBufferedTimeRanges().start(0));
- EXPECT_EQ(kDuration / 8, pipeline_->GetBufferedTimeRanges().end(0));
- EXPECT_EQ(kDuration / 4, pipeline_->GetBufferedTimeRanges().start(1));
- EXPECT_EQ(3* kDuration / 8, pipeline_->GetBufferedTimeRanges().end(1));
- EXPECT_EQ(kDuration / 2, pipeline_->GetBufferedTimeRanges().start(2));
- EXPECT_EQ(kDuration / 2 + kDuration / 8,
- pipeline_->GetBufferedTimeRanges().end(2));
-}
-
-TEST_F(PipelineTest, DisableAudioRenderer) {
- CreateAudioStream();
- CreateVideoStream();
- MockDemuxerStreamVector streams;
- streams.push_back(audio_stream());
- streams.push_back(video_stream());
-
- InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream(), false);
- InitializeVideoRenderer(video_stream());
-
- InitializePipeline(PIPELINE_OK);
- EXPECT_TRUE(pipeline_->HasAudio());
- EXPECT_TRUE(pipeline_->HasVideo());
-
- EXPECT_CALL(*demuxer_, OnAudioRendererDisabled());
- pipeline_->OnAudioDisabled();
-
- // Verify that ended event is fired when video ends.
- EXPECT_CALL(callbacks_, OnEnded());
- pipeline_->OnVideoRendererEnded();
-}
-
-TEST_F(PipelineTest, DisableAudioRendererDuringInit) {
- CreateAudioStream();
- CreateVideoStream();
- MockDemuxerStreamVector streams;
- streams.push_back(audio_stream());
- streams.push_back(video_stream());
-
- InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream(), true);
- InitializeVideoRenderer(video_stream());
-
- EXPECT_CALL(*demuxer_, OnAudioRendererDisabled());
-
- InitializePipeline(PIPELINE_OK);
- EXPECT_FALSE(pipeline_->HasAudio());
- EXPECT_TRUE(pipeline_->HasVideo());
-
- // Verify that ended event is fired when video ends.
- EXPECT_CALL(callbacks_, OnEnded());
- pipeline_->OnVideoRendererEnded();
}
TEST_F(PipelineTest, EndedCallback) {
@@ -666,7 +583,7 @@ TEST_F(PipelineTest, EndedCallback) {
streams.push_back(video_stream());
InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream(), false);
+ InitializeAudioRenderer(audio_stream());
InitializeVideoRenderer(video_stream());
InitializePipeline(PIPELINE_OK);
@@ -679,6 +596,7 @@ TEST_F(PipelineTest, EndedCallback) {
pipeline_->OnVideoRendererEnded();
message_loop_.RunUntilIdle();
+ EXPECT_CALL(*audio_renderer_, StopRendering());
EXPECT_CALL(callbacks_, OnEnded());
text_stream()->SendEosNotification();
message_loop_.RunUntilIdle();
@@ -693,12 +611,12 @@ TEST_F(PipelineTest, AudioStreamShorterThanVideo) {
streams.push_back(audio_stream());
streams.push_back(video_stream());
- // Replace the clock so we can simulate wallclock time advancing w/o using
+ // Replace the clock so we can simulate wall clock time advancing w/o using
// Sleep().
pipeline_->SetClockForTesting(new Clock(&test_tick_clock_));
InitializeDemuxer(&streams, duration);
- InitializeAudioRenderer(audio_stream(), false);
+ InitializeAudioRenderer(audio_stream());
InitializeVideoRenderer(video_stream());
InitializePipeline(PIPELINE_OK);
@@ -728,6 +646,7 @@ TEST_F(PipelineTest, AudioStreamShorterThanVideo) {
EXPECT_GT(pipeline_->GetMediaTime().ToInternalValue(), start_time);
// Signal end of video stream and make sure OnEnded() callback occurs.
+ EXPECT_CALL(*audio_renderer_, StopRendering());
EXPECT_CALL(callbacks_, OnEnded());
pipeline_->OnVideoRendererEnded();
}
@@ -738,7 +657,7 @@ TEST_F(PipelineTest, ErrorDuringSeek) {
streams.push_back(audio_stream());
InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream(), false);
+ InitializeAudioRenderer(audio_stream());
InitializePipeline(PIPELINE_OK);
float playback_rate = 1.0f;
@@ -749,8 +668,7 @@ TEST_F(PipelineTest, ErrorDuringSeek) {
base::TimeDelta seek_time = base::TimeDelta::FromSeconds(5);
// Preroll() isn't called as the demuxer errors out first.
- EXPECT_CALL(*audio_renderer_, Pause(_))
- .WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, StopRendering());
EXPECT_CALL(*audio_renderer_, Flush(_))
.WillOnce(RunClosure<0>());
EXPECT_CALL(*audio_renderer_, Stop(_))
@@ -792,7 +710,7 @@ TEST_F(PipelineTest, NoMessageDuringTearDownFromError) {
streams.push_back(audio_stream());
InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream(), false);
+ InitializeAudioRenderer(audio_stream());
InitializePipeline(PIPELINE_OK);
// Trigger additional requests on the pipeline during tear down from error.
@@ -804,8 +722,7 @@ TEST_F(PipelineTest, NoMessageDuringTearDownFromError) {
base::TimeDelta seek_time = base::TimeDelta::FromSeconds(5);
// Seek() isn't called as the demuxer errors out first.
- EXPECT_CALL(*audio_renderer_, Pause(_))
- .WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, StopRendering());
EXPECT_CALL(*audio_renderer_, Flush(_))
.WillOnce(RunClosure<0>());
EXPECT_CALL(*audio_renderer_, Stop(_))
@@ -832,8 +749,8 @@ TEST_F(PipelineTest, StartTimeIsZero) {
InitializeVideoRenderer(video_stream());
InitializePipeline(PIPELINE_OK);
- EXPECT_FALSE(pipeline_->HasAudio());
- EXPECT_TRUE(pipeline_->HasVideo());
+ EXPECT_FALSE(metadata_.has_audio);
+ EXPECT_TRUE(metadata_.has_video);
EXPECT_EQ(base::TimeDelta(), pipeline_->GetMediaTime());
}
@@ -853,8 +770,8 @@ TEST_F(PipelineTest, StartTimeIsNonZero) {
InitializeVideoRenderer(video_stream());
InitializePipeline(PIPELINE_OK);
- EXPECT_FALSE(pipeline_->HasAudio());
- EXPECT_TRUE(pipeline_->HasVideo());
+ EXPECT_FALSE(metadata_.has_audio);
+ EXPECT_TRUE(metadata_.has_video);
EXPECT_EQ(kStartTime, pipeline_->GetMediaTime());
}
@@ -872,7 +789,7 @@ TEST_F(PipelineTest, AudioTimeUpdateDuringSeek) {
streams.push_back(audio_stream());
InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream(), false);
+ InitializeAudioRenderer(audio_stream());
InitializePipeline(PIPELINE_OK);
float playback_rate = 1.0f;
@@ -895,18 +812,16 @@ TEST_F(PipelineTest, AudioTimeUpdateDuringSeek) {
.WillOnce(DoAll(InvokeWithoutArgs(&closure, &base::Closure::Run),
RunCallback<1>(PIPELINE_OK)));
- EXPECT_CALL(*audio_renderer_, Pause(_))
- .WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, StopRendering());
EXPECT_CALL(*audio_renderer_, Flush(_))
.WillOnce(RunClosure<0>());
EXPECT_CALL(*audio_renderer_, Preroll(seek_time, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
EXPECT_CALL(*audio_renderer_, SetPlaybackRate(_));
EXPECT_CALL(*audio_renderer_, SetVolume(_));
- EXPECT_CALL(*audio_renderer_, Play(_))
- .WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, StartRendering());
- EXPECT_CALL(callbacks_, OnBufferingState(Pipeline::kPrerollCompleted));
+ EXPECT_CALL(callbacks_, OnPrerollCompleted());
EXPECT_CALL(callbacks_, OnSeek(PIPELINE_OK));
DoSeek(seek_time);
@@ -929,7 +844,7 @@ TEST_F(PipelineTest, DeleteAfterStop) {
MockDemuxerStreamVector streams;
streams.push_back(audio_stream());
InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream(), false);
+ InitializeAudioRenderer(audio_stream());
InitializePipeline(PIPELINE_OK);
ExpectStop();
@@ -945,11 +860,9 @@ class PipelineTeardownTest : public PipelineTest {
kInitDemuxer,
kInitAudioRenderer,
kInitVideoRenderer,
- kPausing,
kFlushing,
kSeeking,
kPrerolling,
- kStarting,
kPlaying,
};
@@ -970,11 +883,9 @@ class PipelineTeardownTest : public PipelineTest {
DoInitialize(state, stop_or_error);
break;
- case kPausing:
case kFlushing:
case kSeeking:
case kPrerolling:
- case kStarting:
DoInitialize(state, stop_or_error);
DoSeek(state, stop_or_error);
break;
@@ -1000,7 +911,8 @@ class PipelineTeardownTest : public PipelineTest {
base::Bind(&CallbackHelper::OnEnded, base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnError, base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnStart, base::Unretained(&callbacks_)),
- base::Bind(&CallbackHelper::OnBufferingState,
+ base::Bind(&CallbackHelper::OnMetadata, base::Unretained(&callbacks_)),
+ base::Bind(&CallbackHelper::OnPrerollCompleted,
base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnDurationChange,
base::Unretained(&callbacks_)));
@@ -1038,13 +950,13 @@ class PipelineTeardownTest : public PipelineTest {
if (state == kInitAudioRenderer) {
if (stop_or_error == kStop) {
- EXPECT_CALL(*audio_renderer_, Initialize(_, _, _, _, _, _, _, _))
+ EXPECT_CALL(*audio_renderer_, Initialize(_, _, _, _, _, _, _))
.WillOnce(DoAll(Stop(pipeline_.get(), stop_cb),
RunCallback<1>(PIPELINE_OK)));
EXPECT_CALL(callbacks_, OnStop());
} else {
status = PIPELINE_ERROR_INITIALIZATION_FAILED;
- EXPECT_CALL(*audio_renderer_, Initialize(_, _, _, _, _, _, _, _))
+ EXPECT_CALL(*audio_renderer_, Initialize(_, _, _, _, _, _, _))
.WillOnce(RunCallback<1>(status));
}
@@ -1053,19 +965,19 @@ class PipelineTeardownTest : public PipelineTest {
return status;
}
- EXPECT_CALL(*audio_renderer_, Initialize(_, _, _, _, _, _, _, _))
+ EXPECT_CALL(*audio_renderer_, Initialize(_, _, _, _, _, _, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
if (state == kInitVideoRenderer) {
if (stop_or_error == kStop) {
EXPECT_CALL(*video_renderer_, Initialize(_, _, _, _, _, _, _, _, _))
.WillOnce(DoAll(Stop(pipeline_.get(), stop_cb),
- RunCallback<1>(PIPELINE_OK)));
+ RunCallback<2>(PIPELINE_OK)));
EXPECT_CALL(callbacks_, OnStop());
} else {
status = PIPELINE_ERROR_INITIALIZATION_FAILED;
EXPECT_CALL(*video_renderer_, Initialize(_, _, _, _, _, _, _, _, _))
- .WillOnce(RunCallback<1>(status));
+ .WillOnce(RunCallback<2>(status));
}
EXPECT_CALL(*demuxer_, Stop(_)).WillOnce(RunClosure<0>());
@@ -1075,9 +987,9 @@ class PipelineTeardownTest : public PipelineTest {
}
EXPECT_CALL(*video_renderer_, Initialize(_, _, _, _, _, _, _, _, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
+ .WillOnce(RunCallback<2>(PIPELINE_OK));
- EXPECT_CALL(callbacks_, OnBufferingState(Pipeline::kHaveMetadata));
+ EXPECT_CALL(callbacks_, OnMetadata(_));
// If we get here it's a successful initialization.
EXPECT_CALL(*audio_renderer_, Preroll(base::TimeDelta(), _))
@@ -1089,13 +1001,12 @@ class PipelineTeardownTest : public PipelineTest {
EXPECT_CALL(*video_renderer_, SetPlaybackRate(0.0f));
EXPECT_CALL(*audio_renderer_, SetVolume(1.0f));
- EXPECT_CALL(*audio_renderer_, Play(_))
- .WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, StartRendering());
EXPECT_CALL(*video_renderer_, Play(_))
.WillOnce(RunClosure<0>());
if (status == PIPELINE_OK)
- EXPECT_CALL(callbacks_, OnBufferingState(Pipeline::kPrerollCompleted));
+ EXPECT_CALL(callbacks_, OnPrerollCompleted());
return status;
}
@@ -1124,21 +1035,7 @@ class PipelineTeardownTest : public PipelineTest {
base::Closure stop_cb = base::Bind(
&CallbackHelper::OnStop, base::Unretained(&callbacks_));
- if (state == kPausing) {
- if (stop_or_error == kStop) {
- EXPECT_CALL(*audio_renderer_, Pause(_))
- .WillOnce(DoAll(Stop(pipeline_.get(), stop_cb), RunClosure<0>()));
- } else {
- status = PIPELINE_ERROR_READ;
- EXPECT_CALL(*audio_renderer_, Pause(_)).WillOnce(
- DoAll(SetError(pipeline_.get(), status), RunClosure<0>()));
- }
-
- return status;
- }
-
- EXPECT_CALL(*audio_renderer_, Pause(_)).WillOnce(RunClosure<0>());
- EXPECT_CALL(*video_renderer_, Pause(_)).WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, StopRendering());
if (state == kFlushing) {
if (stop_or_error == kStop) {
@@ -1197,18 +1094,6 @@ class PipelineTeardownTest : public PipelineTest {
EXPECT_CALL(*video_renderer_, SetPlaybackRate(0.0f));
EXPECT_CALL(*audio_renderer_, SetVolume(1.0f));
- if (state == kStarting) {
- if (stop_or_error == kStop) {
- EXPECT_CALL(*audio_renderer_, Play(_))
- .WillOnce(DoAll(Stop(pipeline_.get(), stop_cb), RunClosure<0>()));
- } else {
- status = PIPELINE_ERROR_READ;
- EXPECT_CALL(*audio_renderer_, Play(_)).WillOnce(
- DoAll(SetError(pipeline_.get(), status), RunClosure<0>()));
- }
- return status;
- }
-
NOTREACHED() << "State not supported: " << state;
return status;
}
@@ -1254,21 +1139,17 @@ class PipelineTeardownTest : public PipelineTest {
INSTANTIATE_TEARDOWN_TEST(Stop, InitDemuxer);
INSTANTIATE_TEARDOWN_TEST(Stop, InitAudioRenderer);
INSTANTIATE_TEARDOWN_TEST(Stop, InitVideoRenderer);
-INSTANTIATE_TEARDOWN_TEST(Stop, Pausing);
INSTANTIATE_TEARDOWN_TEST(Stop, Flushing);
INSTANTIATE_TEARDOWN_TEST(Stop, Seeking);
INSTANTIATE_TEARDOWN_TEST(Stop, Prerolling);
-INSTANTIATE_TEARDOWN_TEST(Stop, Starting);
INSTANTIATE_TEARDOWN_TEST(Stop, Playing);
INSTANTIATE_TEARDOWN_TEST(Error, InitDemuxer);
INSTANTIATE_TEARDOWN_TEST(Error, InitAudioRenderer);
INSTANTIATE_TEARDOWN_TEST(Error, InitVideoRenderer);
-INSTANTIATE_TEARDOWN_TEST(Error, Pausing);
INSTANTIATE_TEARDOWN_TEST(Error, Flushing);
INSTANTIATE_TEARDOWN_TEST(Error, Seeking);
INSTANTIATE_TEARDOWN_TEST(Error, Prerolling);
-INSTANTIATE_TEARDOWN_TEST(Error, Starting);
INSTANTIATE_TEARDOWN_TEST(Error, Playing);
INSTANTIATE_TEARDOWN_TEST(ErrorAndStop, Playing);
diff --git a/chromium/media/base/player_tracker.cc b/chromium/media/base/player_tracker.cc
new file mode 100644
index 00000000000..909aaecfe45
--- /dev/null
+++ b/chromium/media/base/player_tracker.cc
@@ -0,0 +1,15 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/player_tracker.h"
+
+namespace media {
+
+PlayerTracker::PlayerTracker() {
+}
+
+PlayerTracker::~PlayerTracker() {
+}
+
+} // namespace media
diff --git a/chromium/media/base/player_tracker.h b/chromium/media/base/player_tracker.h
new file mode 100644
index 00000000000..0ed1b3f8001
--- /dev/null
+++ b/chromium/media/base/player_tracker.h
@@ -0,0 +1,41 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_PLAYER_TRACKER_H_
+#define MEDIA_BASE_PLAYER_TRACKER_H_
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// An interface for players to register to be notified when a new decryption key
+// becomes available or when the CDM is unset.
+class MEDIA_EXPORT PlayerTracker {
+ public:
+ virtual ~PlayerTracker();
+
+ // Registers player callbacks with the CDM.
+ // - |new_key_cb| is fired when a new decryption key becomes available.
+ // - |cdm_unset_cb| is fired when the CDM is detached from the player.
+ // Returns a registration ID which can be used to unregister a player.
+ virtual int RegisterPlayer(const base::Closure& new_key_cb,
+ const base::Closure& cdm_unset_cb) = 0;
+
+ // Unregisters a previously registered player. This should be called when
+ // the CDM is detached from the player (e.g. setMediaKeys(0)), or when the
+ // player is destroyed.
+ virtual void UnregisterPlayer(int registration_id) = 0;
+
+ protected:
+ PlayerTracker();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PlayerTracker);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_PLAYER_TRACKER_H_
diff --git a/chromium/media/base/run_all_perftests.cc b/chromium/media/base/run_all_perftests.cc
new file mode 100644
index 00000000000..811bb9be574
--- /dev/null
+++ b/chromium/media/base/run_all_perftests.cc
@@ -0,0 +1,51 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/test/launcher/unit_test_launcher.h"
+#include "base/test/test_suite.h"
+#include "build/build_config.h"
+#include "media/base/media.h"
+
+#if defined(OS_ANDROID)
+#include "base/android/jni_android.h"
+#include "media/base/android/media_jni_registrar.h"
+#include "ui/gl/android/gl_jni_registrar.h"
+#endif
+
+class TestSuiteNoAtExit : public base::TestSuite {
+ public:
+ TestSuiteNoAtExit(int argc, char** argv) : TestSuite(argc, argv) {}
+ virtual ~TestSuiteNoAtExit() {}
+ protected:
+ virtual void Initialize() OVERRIDE;
+};
+
+void TestSuiteNoAtExit::Initialize() {
+ // Run TestSuite::Initialize first so that logging is initialized.
+ base::TestSuite::Initialize();
+
+#if defined(OS_ANDROID)
+ // Register JNI bindings for android.
+ JNIEnv* env = base::android::AttachCurrentThread();
+ // Needed for surface texture support.
+ ui::gl::android::RegisterJni(env);
+ media::RegisterJni(env);
+#endif
+
+ // Run this here instead of main() to ensure an AtExitManager is already
+ // present.
+ media::InitializeMediaLibraryForTesting();
+}
+
+int main(int argc, char** argv) {
+ TestSuiteNoAtExit test_suite(argc, argv);
+
+ // Always run the perf tests serially, to avoid distorting
+ // perf measurements with randomness resulting from running
+ // in parallel.
+ return base::LaunchUnitTestsSerially(
+ argc, argv, base::Bind(&TestSuiteNoAtExit::Run,
+ base::Unretained(&test_suite)));
+}
diff --git a/chromium/media/base/run_all_unittests.cc b/chromium/media/base/run_all_unittests.cc
index f1a0092814c..985a46e62b0 100644
--- a/chromium/media/base/run_all_unittests.cc
+++ b/chromium/media/base/run_all_unittests.cc
@@ -3,12 +3,10 @@
// found in the LICENSE file.
#include "base/bind.h"
-#include "base/command_line.h"
#include "base/test/launcher/unit_test_launcher.h"
#include "base/test/test_suite.h"
#include "build/build_config.h"
#include "media/base/media.h"
-#include "media/base/media_switches.h"
#if defined(OS_ANDROID)
#include "base/android/jni_android.h"
@@ -39,8 +37,6 @@ void TestSuiteNoAtExit::Initialize() {
// Run this here instead of main() to ensure an AtExitManager is already
// present.
media::InitializeMediaLibraryForTesting();
- CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- cmd_line->AppendSwitch(switches::kEnableMP3StreamParser);
}
int main(int argc, char** argv) {
diff --git a/chromium/media/base/sample_format.cc b/chromium/media/base/sample_format.cc
index a4791cd6861..cf8f20f5632 100644
--- a/chromium/media/base/sample_format.cc
+++ b/chromium/media/base/sample_format.cc
@@ -21,8 +21,6 @@ int SampleFormatToBytesPerChannel(SampleFormat sample_format) {
case kSampleFormatF32:
case kSampleFormatPlanarF32:
return 4;
- case kSampleFormatMax:
- break;
}
NOTREACHED() << "Invalid sample format provided: " << sample_format;
@@ -45,8 +43,6 @@ const char* SampleFormatToString(SampleFormat sample_format) {
return "Signed 16-bit planar";
case kSampleFormatPlanarF32:
return "Float 32-bit planar";
- case kSampleFormatMax:
- break;
}
NOTREACHED() << "Invalid sample format provided: " << sample_format;
return "";
diff --git a/chromium/media/base/sample_format.h b/chromium/media/base/sample_format.h
index 3d2799fa128..7c3df702157 100644
--- a/chromium/media/base/sample_format.h
+++ b/chromium/media/base/sample_format.h
@@ -12,7 +12,8 @@ namespace media {
enum SampleFormat {
// These values are histogrammed over time; do not change their ordinal
// values. When deleting a sample format replace it with a dummy value; when
- // adding a sample format, do so at the bottom before kSampleFormatMax.
+ // adding a sample format, do so at the bottom before kSampleFormatMax, and
+ // update the value of kSampleFormatMax.
kUnknownSampleFormat = 0,
kSampleFormatU8, // Unsigned 8-bit w/ bias of 128.
kSampleFormatS16, // Signed 16-bit.
@@ -21,8 +22,8 @@ enum SampleFormat {
kSampleFormatPlanarS16, // Signed 16-bit planar.
kSampleFormatPlanarF32, // Float 32-bit planar.
- // Must always be last!
- kSampleFormatMax
+ // Must always be equal to largest value ever logged.
+ kSampleFormatMax = kSampleFormatPlanarF32,
};
// Returns the number of bytes used per channel for the specified
diff --git a/chromium/media/base/seekable_buffer_unittest.cc b/chromium/media/base/seekable_buffer_unittest.cc
index c5e3fb635ed..898ea45e8c7 100644
--- a/chromium/media/base/seekable_buffer_unittest.cc
+++ b/chromium/media/base/seekable_buffer_unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <cstdlib>
+
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
@@ -22,18 +24,19 @@ class SeekableBufferTest : public testing::Test {
static const int kWriteSize = 512;
virtual void SetUp() {
- // Setup seed.
- int seed = static_cast<int32>(base::Time::Now().ToInternalValue());
- srand(seed);
- VLOG(1) << "Random seed: " << seed;
+ // Note: We use srand() and rand() rather than base::RandXXX() to improve
+ // unit test performance. We don't need good random numbers, just
+ // something that generates "mixed data."
+ const unsigned int kKnownSeed = 0x98765432;
+ srand(kKnownSeed);
- // Creates a test data.
+ // Create random test data samples.
for (int i = 0; i < kDataSize; i++)
data_[i] = static_cast<char>(rand());
}
int GetRandomInt(int maximum) {
- return rand() % maximum + 1;
+ return rand() % (maximum + 1);
}
SeekableBuffer buffer_;
diff --git a/chromium/media/base/serial_runner.cc b/chromium/media/base/serial_runner.cc
index dfc4a0b9fc3..779566c7941 100644
--- a/chromium/media/base/serial_runner.cc
+++ b/chromium/media/base/serial_runner.cc
@@ -6,11 +6,20 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
-#include "base/message_loop/message_loop.h"
+#include "base/location.h"
#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
namespace media {
+// Converts a Closure into a bound function accepting a PipelineStatusCB.
+static void RunClosure(
+ const base::Closure& closure,
+ const PipelineStatusCB& status_cb) {
+ closure.Run();
+ status_cb.Run(PIPELINE_OK);
+}
+
// Converts a bound function accepting a Closure into a bound function
// accepting a PipelineStatusCB. Since closures have no way of reporting a
// status |status_cb| is executed with PIPELINE_OK.
@@ -20,19 +29,23 @@ static void RunBoundClosure(
bound_closure.Run(base::Bind(status_cb, PIPELINE_OK));
}
-// Runs |status_cb| with |last_status| on |message_loop|.
-static void RunOnMessageLoop(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
+// Runs |status_cb| with |last_status| on |task_runner|.
+static void RunOnTaskRunner(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
const PipelineStatusCB& status_cb,
PipelineStatus last_status) {
// Force post to permit cancellation of a series in the scenario where all
// bound functions run on the same thread.
- message_loop->PostTask(FROM_HERE, base::Bind(status_cb, last_status));
+ task_runner->PostTask(FROM_HERE, base::Bind(status_cb, last_status));
}
SerialRunner::Queue::Queue() {}
SerialRunner::Queue::~Queue() {}
+void SerialRunner::Queue::Push(const base::Closure& closure) {
+ bound_fns_.push(base::Bind(&RunClosure, closure));
+}
+
void SerialRunner::Queue::Push(
const BoundClosure& bound_closure) {
bound_fns_.push(base::Bind(&RunBoundClosure, bound_closure));
@@ -53,17 +66,19 @@ bool SerialRunner::Queue::empty() {
return bound_fns_.empty();
}
-SerialRunner::SerialRunner(
- const Queue& bound_fns, const PipelineStatusCB& done_cb)
- : weak_this_(this),
- message_loop_(base::MessageLoopProxy::current()),
+SerialRunner::SerialRunner(const Queue& bound_fns,
+ const PipelineStatusCB& done_cb)
+ : task_runner_(base::MessageLoopProxy::current()),
bound_fns_(bound_fns),
- done_cb_(done_cb) {
+ done_cb_(done_cb),
+ weak_factory_(this) {
// Respect both cancellation and calling stack guarantees for |done_cb|
// when empty.
if (bound_fns_.empty()) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &SerialRunner::RunNextInSeries, weak_this_.GetWeakPtr(), PIPELINE_OK));
+ task_runner_->PostTask(FROM_HERE,
+ base::Bind(&SerialRunner::RunNextInSeries,
+ weak_factory_.GetWeakPtr(),
+ PIPELINE_OK));
return;
}
@@ -80,7 +95,7 @@ scoped_ptr<SerialRunner> SerialRunner::Run(
}
void SerialRunner::RunNextInSeries(PipelineStatus last_status) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(!done_cb_.is_null());
if (bound_fns_.empty() || last_status != PIPELINE_OK) {
@@ -89,8 +104,10 @@ void SerialRunner::RunNextInSeries(PipelineStatus last_status) {
}
BoundPipelineStatusCB bound_fn = bound_fns_.Pop();
- bound_fn.Run(base::Bind(&RunOnMessageLoop, message_loop_, base::Bind(
- &SerialRunner::RunNextInSeries, weak_this_.GetWeakPtr())));
+ bound_fn.Run(base::Bind(
+ &RunOnTaskRunner,
+ task_runner_,
+ base::Bind(&SerialRunner::RunNextInSeries, weak_factory_.GetWeakPtr())));
}
} // namespace media
diff --git a/chromium/media/base/serial_runner.h b/chromium/media/base/serial_runner.h
index eaae625cd43..9750e212031 100644
--- a/chromium/media/base/serial_runner.h
+++ b/chromium/media/base/serial_runner.h
@@ -15,7 +15,7 @@
#include "media/base/pipeline_status.h"
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
@@ -34,6 +34,7 @@ class MEDIA_EXPORT SerialRunner {
Queue();
~Queue();
+ void Push(const base::Closure& closure);
void Push(const BoundClosure& bound_fn);
void Push(const BoundPipelineStatusCB& bound_fn);
@@ -71,11 +72,13 @@ class MEDIA_EXPORT SerialRunner {
void RunNextInSeries(PipelineStatus last_status);
- base::WeakPtrFactory<SerialRunner> weak_this_;
- scoped_refptr<base::MessageLoopProxy> message_loop_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
Queue bound_fns_;
PipelineStatusCB done_cb_;
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<SerialRunner> weak_factory_;
+
DISALLOW_COPY_AND_ASSIGN(SerialRunner);
};
diff --git a/chromium/media/base/serial_runner_unittest.cc b/chromium/media/base/serial_runner_unittest.cc
index 6d21968c0a4..f18fef88e9c 100644
--- a/chromium/media/base/serial_runner_unittest.cc
+++ b/chromium/media/base/serial_runner_unittest.cc
@@ -35,6 +35,20 @@ class SerialRunnerTest : public ::testing::Test {
called_.push_back(false);
}
+ void PushBoundClosure() {
+ bound_fns_.Push(base::Bind(&SerialRunnerTest::RunBoundClosure,
+ base::Unretained(this),
+ called_.size()));
+ called_.push_back(false);
+ }
+
+ void PushClosure() {
+ bound_fns_.Push(base::Bind(&SerialRunnerTest::RunClosure,
+ base::Unretained(this),
+ called_.size()));
+ called_.push_back(false);
+ }
+
// Push a bound function to the queue that will delete the SerialRunner,
// which should cancel all remaining queued work.
void PushCancellation() {
@@ -61,6 +75,26 @@ class SerialRunnerTest : public ::testing::Test {
status_cb.Run(status);
}
+ void RunBoundClosure(size_t index,
+ const base::Closure& done_cb) {
+ EXPECT_EQ(index == 0u, inside_start_)
+ << "First bound function should run on same stack as "
+ << "SerialRunner::Run() while all others should not\n"
+ << base::debug::StackTrace().ToString();
+
+ called_[index] = true;
+ done_cb.Run();
+ }
+
+ void RunClosure(size_t index) {
+ EXPECT_EQ(index == 0u, inside_start_)
+ << "First bound function should run on same stack as "
+ << "SerialRunner::Run() while all others should not\n"
+ << base::debug::StackTrace().ToString();
+
+ called_[index] = true;
+ }
+
void StartRunnerInternal(const SerialRunner::Queue& bound_fns) {
inside_start_ = true;
runner_ = SerialRunner::Run(bound_fns_, base::Bind(
@@ -173,4 +207,22 @@ TEST_F(SerialRunnerTest, Multiple_Cancel) {
EXPECT_FALSE(done_called());
}
+TEST_F(SerialRunnerTest, BoundClosure) {
+ PushBoundClosure();
+ RunSerialRunner();
+
+ EXPECT_TRUE(called(0));
+ EXPECT_TRUE(done_called());
+ EXPECT_EQ(PIPELINE_OK, done_status());
+}
+
+TEST_F(SerialRunnerTest, Closure) {
+ PushClosure();
+ RunSerialRunner();
+
+ EXPECT_TRUE(called(0));
+ EXPECT_TRUE(done_called());
+ EXPECT_EQ(PIPELINE_OK, done_status());
+}
+
} // namespace media
diff --git a/chromium/media/base/simd/convert_rgb_to_yuv_unittest.cc b/chromium/media/base/simd/convert_rgb_to_yuv_unittest.cc
index d8f8b9caadb..fc887957d61 100644
--- a/chromium/media/base/simd/convert_rgb_to_yuv_unittest.cc
+++ b/chromium/media/base/simd/convert_rgb_to_yuv_unittest.cc
@@ -30,12 +30,19 @@ int ConvertRGBToV(const uint8* rgb, int size) {
} // namespace
+// Assembly code confuses MemorySanitizer. Do not run it in MSan builds.
+#if defined(MEMORY_SANITIZER)
+#define MAYBE_SideBySideRGB DISABLED_SideBySideRGB
+#else
+#define MAYBE_SideBySideRGB SideBySideRGB
+#endif
+
// A side-by-side test that verifies our ASM functions that convert RGB pixels
// to YUV pixels can output the expected results. This test converts RGB pixels
// to YUV pixels with our ASM functions (which use SSE, SSE2, SSE3, and SSSE3)
// and compare the output YUV pixels with the ones calculated with out reference
// functions implemented in C++.
-TEST(YUVConvertTest, SideBySideRGB) {
+TEST(YUVConvertTest, MAYBE_SideBySideRGB) {
// We skip this test on PCs which does not support SSE3 because this test
// needs it.
base::CPU cpu;
diff --git a/chromium/media/base/simd/convert_yuv_to_rgb.h b/chromium/media/base/simd/convert_yuv_to_rgb.h
index 2991d562af0..6c0a9661135 100644
--- a/chromium/media/base/simd/convert_yuv_to_rgb.h
+++ b/chromium/media/base/simd/convert_yuv_to_rgb.h
@@ -28,7 +28,8 @@ MEDIA_EXPORT void ConvertYUVToRGB32Row_C(const uint8* yplane,
const uint8* uplane,
const uint8* vplane,
uint8* rgbframe,
- ptrdiff_t width);
+ ptrdiff_t width,
+ const int16 convert_table[1024][4]);
MEDIA_EXPORT void ConvertYUVAToARGB_C(const uint8* yplane,
const uint8* uplane,
@@ -48,7 +49,8 @@ MEDIA_EXPORT void ConvertYUVAToARGBRow_C(const uint8* yplane,
const uint8* vplane,
const uint8* aplane,
uint8* rgbframe,
- ptrdiff_t width);
+ ptrdiff_t width,
+ const int16 convert_table[1024][4]);
MEDIA_EXPORT void ConvertYUVToRGB32_SSE(const uint8* yplane,
const uint8* uplane,
@@ -90,22 +92,27 @@ MEDIA_EXPORT void ScaleYUVToRGB32Row_C(const uint8* y_buf,
const uint8* v_buf,
uint8* rgb_buf,
ptrdiff_t width,
- ptrdiff_t source_dx);
-
-MEDIA_EXPORT void LinearScaleYUVToRGB32Row_C(const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- ptrdiff_t width,
- ptrdiff_t source_dx);
-
-MEDIA_EXPORT void LinearScaleYUVToRGB32RowWithRange_C(const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- int dest_width,
- int source_x,
- int source_dx);
+ ptrdiff_t source_dx,
+ const int16 convert_table[1024][4]);
+
+MEDIA_EXPORT void LinearScaleYUVToRGB32Row_C(
+ const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ ptrdiff_t width,
+ ptrdiff_t source_dx,
+ const int16 convert_table[1024][4]);
+
+MEDIA_EXPORT void LinearScaleYUVToRGB32RowWithRange_C(
+ const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int dest_width,
+ int source_x,
+ int source_dx,
+ const int16 convert_table[1024][4]);
} // namespace media
@@ -123,62 +130,75 @@ MEDIA_EXPORT void ConvertYUVToRGB32Row_MMX(const uint8* yplane,
const uint8* uplane,
const uint8* vplane,
uint8* rgbframe,
- ptrdiff_t width);
+ ptrdiff_t width,
+ const int16 convert_table[1024][4]);
MEDIA_EXPORT void ConvertYUVAToARGBRow_MMX(const uint8* yplane,
const uint8* uplane,
const uint8* vplane,
const uint8* aplane,
uint8* rgbframe,
- ptrdiff_t width);
+ ptrdiff_t width,
+ const int16 convert_table[1024][4]);
MEDIA_EXPORT void ConvertYUVToRGB32Row_SSE(const uint8* yplane,
const uint8* uplane,
const uint8* vplane,
uint8* rgbframe,
- ptrdiff_t width);
+ ptrdiff_t width,
+ const int16 convert_table[1024][4]);
MEDIA_EXPORT void ScaleYUVToRGB32Row_MMX(const uint8* y_buf,
const uint8* u_buf,
const uint8* v_buf,
uint8* rgb_buf,
ptrdiff_t width,
- ptrdiff_t source_dx);
+ ptrdiff_t source_dx,
+ const int16 convert_table[1024][4]);
MEDIA_EXPORT void ScaleYUVToRGB32Row_SSE(const uint8* y_buf,
const uint8* u_buf,
const uint8* v_buf,
uint8* rgb_buf,
ptrdiff_t width,
- ptrdiff_t source_dx);
-
-MEDIA_EXPORT void ScaleYUVToRGB32Row_SSE2_X64(const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- ptrdiff_t width,
- ptrdiff_t source_dx);
-
-MEDIA_EXPORT void LinearScaleYUVToRGB32Row_MMX(const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- ptrdiff_t width,
- ptrdiff_t source_dx);
-
-MEDIA_EXPORT void LinearScaleYUVToRGB32Row_SSE(const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- ptrdiff_t width,
- ptrdiff_t source_dx);
-
-MEDIA_EXPORT void LinearScaleYUVToRGB32Row_MMX_X64(const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- ptrdiff_t width,
- ptrdiff_t source_dx);
+ ptrdiff_t source_dx,
+ const int16 convert_table[1024][4]);
+
+MEDIA_EXPORT void ScaleYUVToRGB32Row_SSE2_X64(
+ const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ ptrdiff_t width,
+ ptrdiff_t source_dx,
+ const int16 convert_table[1024][4]);
+
+MEDIA_EXPORT void LinearScaleYUVToRGB32Row_MMX(
+ const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ ptrdiff_t width,
+ ptrdiff_t source_dx,
+ const int16 convert_table[1024][4]);
+
+MEDIA_EXPORT void LinearScaleYUVToRGB32Row_SSE(
+ const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ ptrdiff_t width,
+ ptrdiff_t source_dx,
+ const int16 convert_table[1024][4]);
+
+MEDIA_EXPORT void LinearScaleYUVToRGB32Row_MMX_X64(
+ const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ ptrdiff_t width,
+ ptrdiff_t source_dx,
+ const int16 convert_table[1024][4]);
} // extern "C"
diff --git a/chromium/media/base/simd/convert_yuv_to_rgb_c.cc b/chromium/media/base/simd/convert_yuv_to_rgb_c.cc
index 0466112918d..9d6476b07d5 100644
--- a/chromium/media/base/simd/convert_yuv_to_rgb_c.cc
+++ b/chromium/media/base/simd/convert_yuv_to_rgb_c.cc
@@ -38,21 +38,22 @@ namespace media {
static inline void ConvertYUVToRGB32_C(uint8 y,
uint8 u,
uint8 v,
- uint8* rgb_buf) {
- int b = kCoefficientsRgbY[256+u][B_INDEX];
- int g = kCoefficientsRgbY[256+u][G_INDEX];
- int r = kCoefficientsRgbY[256+u][R_INDEX];
- int a = kCoefficientsRgbY[256+u][A_INDEX];
+ uint8* rgb_buf,
+ const int16 convert_table[1024][4]) {
+ int b = convert_table[256+u][B_INDEX];
+ int g = convert_table[256+u][G_INDEX];
+ int r = convert_table[256+u][R_INDEX];
+ int a = convert_table[256+u][A_INDEX];
- b = paddsw(b, kCoefficientsRgbY[512+v][B_INDEX]);
- g = paddsw(g, kCoefficientsRgbY[512+v][G_INDEX]);
- r = paddsw(r, kCoefficientsRgbY[512+v][R_INDEX]);
- a = paddsw(a, kCoefficientsRgbY[512+v][A_INDEX]);
+ b = paddsw(b, convert_table[512+v][B_INDEX]);
+ g = paddsw(g, convert_table[512+v][G_INDEX]);
+ r = paddsw(r, convert_table[512+v][R_INDEX]);
+ a = paddsw(a, convert_table[512+v][A_INDEX]);
- b = paddsw(b, kCoefficientsRgbY[y][B_INDEX]);
- g = paddsw(g, kCoefficientsRgbY[y][G_INDEX]);
- r = paddsw(r, kCoefficientsRgbY[y][R_INDEX]);
- a = paddsw(a, kCoefficientsRgbY[y][A_INDEX]);
+ b = paddsw(b, convert_table[y][B_INDEX]);
+ g = paddsw(g, convert_table[y][G_INDEX]);
+ r = paddsw(r, convert_table[y][R_INDEX]);
+ a = paddsw(a, convert_table[y][A_INDEX]);
b >>= 6;
g >>= 6;
@@ -69,18 +70,19 @@ static inline void ConvertYUVAToARGB_C(uint8 y,
uint8 u,
uint8 v,
uint8 a,
- uint8* rgb_buf) {
- int b = kCoefficientsRgbY[256+u][0];
- int g = kCoefficientsRgbY[256+u][1];
- int r = kCoefficientsRgbY[256+u][2];
+ uint8* rgb_buf,
+ const int16 convert_table[1024][4]) {
+ int b = convert_table[256+u][0];
+ int g = convert_table[256+u][1];
+ int r = convert_table[256+u][2];
- b = paddsw(b, kCoefficientsRgbY[512+v][0]);
- g = paddsw(g, kCoefficientsRgbY[512+v][1]);
- r = paddsw(r, kCoefficientsRgbY[512+v][2]);
+ b = paddsw(b, convert_table[512+v][0]);
+ g = paddsw(g, convert_table[512+v][1]);
+ r = paddsw(r, convert_table[512+v][2]);
- b = paddsw(b, kCoefficientsRgbY[y][0]);
- g = paddsw(g, kCoefficientsRgbY[y][1]);
- r = paddsw(r, kCoefficientsRgbY[y][2]);
+ b = paddsw(b, convert_table[y][0]);
+ g = paddsw(g, convert_table[y][1]);
+ r = paddsw(r, convert_table[y][2]);
b >>= 6;
g >>= 6;
@@ -100,15 +102,16 @@ void ConvertYUVToRGB32Row_C(const uint8* y_buf,
const uint8* u_buf,
const uint8* v_buf,
uint8* rgb_buf,
- ptrdiff_t width) {
+ ptrdiff_t width,
+ const int16 convert_table[1024][4]) {
for (int x = 0; x < width; x += 2) {
uint8 u = u_buf[x >> 1];
uint8 v = v_buf[x >> 1];
uint8 y0 = y_buf[x];
- ConvertYUVToRGB32_C(y0, u, v, rgb_buf);
+ ConvertYUVToRGB32_C(y0, u, v, rgb_buf, convert_table);
if ((x + 1) < width) {
uint8 y1 = y_buf[x + 1];
- ConvertYUVToRGB32_C(y1, u, v, rgb_buf + 4);
+ ConvertYUVToRGB32_C(y1, u, v, rgb_buf + 4, convert_table);
}
rgb_buf += 8; // Advance 2 pixels.
}
@@ -119,17 +122,18 @@ void ConvertYUVAToARGBRow_C(const uint8* y_buf,
const uint8* v_buf,
const uint8* a_buf,
uint8* rgba_buf,
- ptrdiff_t width) {
+ ptrdiff_t width,
+ const int16 convert_table[1024][4]) {
for (int x = 0; x < width; x += 2) {
uint8 u = u_buf[x >> 1];
uint8 v = v_buf[x >> 1];
uint8 y0 = y_buf[x];
uint8 a0 = a_buf[x];
- ConvertYUVAToARGB_C(y0, u, v, a0, rgba_buf);
+ ConvertYUVAToARGB_C(y0, u, v, a0, rgba_buf, convert_table);
if ((x + 1) < width) {
uint8 y1 = y_buf[x + 1];
uint8 a1 = a_buf[x + 1];
- ConvertYUVAToARGB_C(y1, u, v, a1, rgba_buf + 4);
+ ConvertYUVAToARGB_C(y1, u, v, a1, rgba_buf + 4, convert_table);
}
rgba_buf += 8; // Advance 2 pixels.
}
@@ -144,17 +148,18 @@ void ScaleYUVToRGB32Row_C(const uint8* y_buf,
const uint8* v_buf,
uint8* rgb_buf,
ptrdiff_t width,
- ptrdiff_t source_dx) {
+ ptrdiff_t source_dx,
+ const int16 convert_table[1024][4]) {
int x = 0;
for (int i = 0; i < width; i += 2) {
int y = y_buf[x >> 16];
int u = u_buf[(x >> 17)];
int v = v_buf[(x >> 17)];
- ConvertYUVToRGB32_C(y, u, v, rgb_buf);
+ ConvertYUVToRGB32_C(y, u, v, rgb_buf, convert_table);
x += source_dx;
if ((i + 1) < width) {
y = y_buf[x >> 16];
- ConvertYUVToRGB32_C(y, u, v, rgb_buf+4);
+ ConvertYUVToRGB32_C(y, u, v, rgb_buf+4, convert_table);
x += source_dx;
}
rgb_buf += 8;
@@ -166,13 +171,14 @@ void LinearScaleYUVToRGB32Row_C(const uint8* y_buf,
const uint8* v_buf,
uint8* rgb_buf,
ptrdiff_t width,
- ptrdiff_t source_dx) {
+ ptrdiff_t source_dx,
+ const int16 convert_table[1024][4]) {
// Avoid point-sampling for down-scaling by > 2:1.
int source_x = 0;
if (source_dx >= 0x20000)
source_x += 0x8000;
LinearScaleYUVToRGB32RowWithRange_C(y_buf, u_buf, v_buf, rgb_buf, width,
- source_x, source_dx);
+ source_x, source_dx, convert_table);
}
void LinearScaleYUVToRGB32RowWithRange_C(const uint8* y_buf,
@@ -181,7 +187,8 @@ void LinearScaleYUVToRGB32RowWithRange_C(const uint8* y_buf,
uint8* rgb_buf,
int dest_width,
int x,
- int source_dx) {
+ int source_dx,
+ const int16 convert_table[1024][4]) {
for (int i = 0; i < dest_width; i += 2) {
int y0 = y_buf[x >> 16];
int y1 = y_buf[(x >> 16) + 1];
@@ -194,14 +201,14 @@ void LinearScaleYUVToRGB32RowWithRange_C(const uint8* y_buf,
int y = (y_frac * y1 + (y_frac ^ 65535) * y0) >> 16;
int u = (uv_frac * u1 + (uv_frac ^ 65535) * u0) >> 16;
int v = (uv_frac * v1 + (uv_frac ^ 65535) * v0) >> 16;
- ConvertYUVToRGB32_C(y, u, v, rgb_buf);
+ ConvertYUVToRGB32_C(y, u, v, rgb_buf, convert_table);
x += source_dx;
if ((i + 1) < dest_width) {
y0 = y_buf[x >> 16];
y1 = y_buf[(x >> 16) + 1];
y_frac = (x & 65535);
y = (y_frac * y1 + (y_frac ^ 65535) * y0) >> 16;
- ConvertYUVToRGB32_C(y, u, v, rgb_buf+4);
+ ConvertYUVToRGB32_C(y, u, v, rgb_buf+4, convert_table);
x += source_dx;
}
rgb_buf += 8;
@@ -218,7 +225,7 @@ void ConvertYUVToRGB32_C(const uint8* yplane,
int uvstride,
int rgbstride,
YUVType yuv_type) {
- unsigned int y_shift = yuv_type;
+ unsigned int y_shift = GetVerticalShift(yuv_type);
for (int y = 0; y < height; ++y) {
uint8* rgb_row = rgbframe + y * rgbstride;
const uint8* y_ptr = yplane + y * ystride;
@@ -229,7 +236,8 @@ void ConvertYUVToRGB32_C(const uint8* yplane,
u_ptr,
v_ptr,
rgb_row,
- width);
+ width,
+ GetLookupTable(yuv_type));
}
}
@@ -258,7 +266,8 @@ void ConvertYUVAToARGB_C(const uint8* yplane,
v_ptr,
a_ptr,
rgba_row,
- width);
+ width,
+ GetLookupTable(yuv_type));
}
}
diff --git a/chromium/media/base/simd/convert_yuv_to_rgb_mmx.asm b/chromium/media/base/simd/convert_yuv_to_rgb_mmx.asm
index 39a4f75697d..7395419d442 100644
--- a/chromium/media/base/simd/convert_yuv_to_rgb_mmx.asm
+++ b/chromium/media/base/simd/convert_yuv_to_rgb_mmx.asm
@@ -17,6 +17,7 @@
; const uint8* u_buf,
; const uint8* v_buf,
; uint8* rgb_buf,
-; ptrdiff_t width);
+; ptrdiff_t width,
+; const int16 convert_table[1024][4]);
%define SYMBOL ConvertYUVToRGB32Row_MMX
%include "convert_yuv_to_rgb_mmx.inc"
diff --git a/chromium/media/base/simd/convert_yuv_to_rgb_mmx.inc b/chromium/media/base/simd/convert_yuv_to_rgb_mmx.inc
index e38794a814c..4b69d1b13cf 100644
--- a/chromium/media/base/simd/convert_yuv_to_rgb_mmx.inc
+++ b/chromium/media/base/simd/convert_yuv_to_rgb_mmx.inc
@@ -7,66 +7,9 @@
EXPORT SYMBOL
align function_align
-; Non-PIC code is the fastest so use this if possible.
-%ifndef PIC
mangle(SYMBOL):
%assign stack_offset 0
- PROLOGUE 5, 7, 3, Y, U, V, ARGB, WIDTH, TEMPU, TEMPV
- extern mangle(kCoefficientsRgbY)
- jmp .convertend
-
-.convertloop:
- movzx TEMPUd, BYTE [Uq]
- add Uq, 1
- movzx TEMPVd, BYTE [Vq]
- add Vq, 1
- movq mm0, [mangle(kCoefficientsRgbY) + 2048 + 8 * TEMPUq]
- movzx TEMPUd, BYTE [Yq]
- paddsw mm0, [mangle(kCoefficientsRgbY) + 4096 + 8 * TEMPVq]
- movzx TEMPVd, BYTE [Yq + 1]
- movq mm1, [mangle(kCoefficientsRgbY) + 8 * TEMPUq]
- add Yq, 2
- movq mm2, [mangle(kCoefficientsRgbY) + 8 * TEMPVq]
- paddsw mm1, mm0
- paddsw mm2, mm0
- psraw mm1, 6
- psraw mm2, 6
- packuswb mm1, mm2
- MOVQ [ARGBq], mm1
- add ARGBq, 8
-
-.convertend:
- sub WIDTHq, 2
- jns .convertloop
-
- ; If number of pixels is odd then compute it.
- and WIDTHq, 1
- jz .convertdone
-
- movzx TEMPUd, BYTE [Uq]
- movq mm0, [mangle(kCoefficientsRgbY) + 2048 + 8 * TEMPUq]
- movzx TEMPVd, BYTE [Vq]
- paddsw mm0, [mangle(kCoefficientsRgbY) + 4096 + 8 * TEMPVq]
- movzx TEMPUd, BYTE [Yq]
- movq mm1, [mangle(kCoefficientsRgbY) + 8 * TEMPUq]
- paddsw mm1, mm0
- psraw mm1, 6
- packuswb mm1, mm1
- movd [ARGBq], mm1
-
-.convertdone:
- RET
-%endif
-
-; With PIC code we need to load the address of mangle(kCoefficientsRgbY).
-; This code is slower than the above version.
-%ifdef PIC
-mangle(SYMBOL):
- %assign stack_offset 0
- PROLOGUE 5, 7, 3, Y, U, V, ARGB, WIDTH, TEMP, TABLE
-
- extern mangle(kCoefficientsRgbY)
- LOAD_SYM TABLEq, mangle(kCoefficientsRgbY)
+ PROLOGUE 6, 7, 3, Y, U, V, ARGB, WIDTH, TABLE, TEMP
jmp .convertend
@@ -118,4 +61,3 @@ mangle(SYMBOL):
.convertdone:
RET
-%endif
diff --git a/chromium/media/base/simd/convert_yuv_to_rgb_sse.asm b/chromium/media/base/simd/convert_yuv_to_rgb_sse.asm
index 8b3ee582ad2..44b123fcdd1 100644
--- a/chromium/media/base/simd/convert_yuv_to_rgb_sse.asm
+++ b/chromium/media/base/simd/convert_yuv_to_rgb_sse.asm
@@ -19,5 +19,6 @@
; const uint8* v_buf,
; uint8* rgb_buf,
; ptrdiff_t width);
+; const int16 convert_table[1024][4]);
%define SYMBOL ConvertYUVToRGB32Row_SSE
%include "convert_yuv_to_rgb_mmx.inc"
diff --git a/chromium/media/base/simd/convert_yuv_to_rgb_x86.cc b/chromium/media/base/simd/convert_yuv_to_rgb_x86.cc
index d1d6e16beb7..969890dbbef 100644
--- a/chromium/media/base/simd/convert_yuv_to_rgb_x86.cc
+++ b/chromium/media/base/simd/convert_yuv_to_rgb_x86.cc
@@ -9,6 +9,7 @@
#endif
#include "media/base/simd/convert_yuv_to_rgb.h"
+#include "media/base/simd/yuv_to_rgb_table.h"
#include "media/base/yuv_convert.h"
namespace media {
@@ -23,7 +24,7 @@ void ConvertYUVToRGB32_MMX(const uint8* yplane,
int uvstride,
int rgbstride,
YUVType yuv_type) {
- unsigned int y_shift = yuv_type;
+ unsigned int y_shift = GetVerticalShift(yuv_type);
for (int y = 0; y < height; ++y) {
uint8* rgb_row = rgbframe + y * rgbstride;
const uint8* y_ptr = yplane + y * ystride;
@@ -34,7 +35,8 @@ void ConvertYUVToRGB32_MMX(const uint8* yplane,
u_ptr,
v_ptr,
rgb_row,
- width);
+ width,
+ GetLookupTable(yuv_type));
}
EmptyRegisterState();
@@ -52,7 +54,7 @@ void ConvertYUVAToARGB_MMX(const uint8* yplane,
int astride,
int rgbstride,
YUVType yuv_type) {
- unsigned int y_shift = yuv_type;
+ unsigned int y_shift = GetVerticalShift(yuv_type);
for (int y = 0; y < height; ++y) {
uint8* rgb_row = rgbframe + y * rgbstride;
const uint8* y_ptr = yplane + y * ystride;
@@ -65,7 +67,8 @@ void ConvertYUVAToARGB_MMX(const uint8* yplane,
v_ptr,
a_ptr,
rgb_row,
- width);
+ width,
+ GetLookupTable(yuv_type));
}
EmptyRegisterState();
@@ -81,7 +84,7 @@ void ConvertYUVToRGB32_SSE(const uint8* yplane,
int uvstride,
int rgbstride,
YUVType yuv_type) {
- unsigned int y_shift = yuv_type;
+ unsigned int y_shift = GetVerticalShift(yuv_type);
for (int y = 0; y < height; ++y) {
uint8* rgb_row = rgbframe + y * rgbstride;
const uint8* y_ptr = yplane + y * ystride;
@@ -92,7 +95,8 @@ void ConvertYUVToRGB32_SSE(const uint8* yplane,
u_ptr,
v_ptr,
rgb_row,
- width);
+ width,
+ GetLookupTable(yuv_type));
}
EmptyRegisterState();
diff --git a/chromium/media/base/simd/convert_yuva_to_argb_mmx.asm b/chromium/media/base/simd/convert_yuva_to_argb_mmx.asm
index b39315dc461..395f326abbf 100644
--- a/chromium/media/base/simd/convert_yuva_to_argb_mmx.asm
+++ b/chromium/media/base/simd/convert_yuva_to_argb_mmx.asm
@@ -19,5 +19,6 @@
; const uint8* a_buf,
; uint8* rgb_buf,
; ptrdiff_t width);
+; const int16 convert_table[1024][4]);
%define SYMBOL ConvertYUVAToARGBRow_MMX
%include "convert_yuva_to_argb_mmx.inc"
diff --git a/chromium/media/base/simd/convert_yuva_to_argb_mmx.inc b/chromium/media/base/simd/convert_yuva_to_argb_mmx.inc
index bcafb3807ba..d4933836ca8 100644
--- a/chromium/media/base/simd/convert_yuva_to_argb_mmx.inc
+++ b/chromium/media/base/simd/convert_yuva_to_argb_mmx.inc
@@ -7,94 +7,12 @@
EXPORT SYMBOL
align function_align
-; Non-PIC code is the fastest so use this if possible.
-%ifndef PIC
mangle(SYMBOL):
%assign stack_offset 0
- PROLOGUE 6, 7, 3, Y, U, V, A, ARGB, WIDTH, TEMP
- extern mangle(kCoefficientsRgbY)
- jmp .convertend
-
-.convertloop:
- movzx TEMPd, BYTE [Uq]
- movq mm0, [mangle(kCoefficientsRgbY) + 2048 + 8 * TEMPq]
- add Uq, 1
- movzx TEMPd, BYTE [Vq]
- paddsw mm0, [mangle(kCoefficientsRgbY) + 4096 + 8 * TEMPq]
- add Vq, 1
- movzx TEMPd, BYTE [Yq]
- movq mm1, [mangle(kCoefficientsRgbY) + 8 * TEMPq]
- movzx TEMPd, BYTE [Yq + 1]
- movq mm2, [mangle(kCoefficientsRgbY) + 8 * TEMPq]
- add Yq, 2
- paddsw mm1, mm0
- paddsw mm2, mm0
- psraw mm1, 6
- psraw mm2, 6
- packuswb mm1, mm2
-
- ; Multiply ARGB by alpha value.
- movq mm0, mm1
- pxor mm2, mm2
- punpcklbw mm0, mm2
- punpckhbw mm1, mm2
- movzx TEMPd, BYTE [Aq]
- movq mm2, [mangle(kCoefficientsRgbY) + 6144 + 8 * TEMPq]
- pmullw mm0, mm2
- psrlw mm0, 8
- movzx TEMPd, BYTE [Aq + 1]
- movq mm2, [mangle(kCoefficientsRgbY) + 6144 + 8 * TEMPq]
- add Aq, 2
- pmullw mm1, mm2
- psrlw mm1, 8
- packuswb mm0, mm1
-
- MOVQ [ARGBq], mm0
- add ARGBq, 8
-
-.convertend:
- sub WIDTHq, 2
- jns .convertloop
-
- ; If number of pixels is odd then compute it.
- and WIDTHq, 1
- jz .convertdone
-
- movzx TEMPd, BYTE [Uq]
- movq mm0, [mangle(kCoefficientsRgbY) + 2048 + 8 * TEMPq]
- movzx TEMPd, BYTE [Vq]
- paddsw mm0, [mangle(kCoefficientsRgbY) + 4096 + 8 * TEMPq]
- movzx TEMPd, BYTE [Yq]
- movq mm1, [mangle(kCoefficientsRgbY) + 8 * TEMPq]
- paddsw mm1, mm0
- psraw mm1, 6
- packuswb mm1, mm1
-
- ; Multiply ARGB by alpha value.
- pxor mm0, mm0
- punpcklbw mm1, mm0
- movzx TEMPd, BYTE [Aq]
- movq mm0, [mangle(kCoefficientsRgbY) + 6144 + 8 * TEMPq]
- pmullw mm1, mm0
- psrlw mm1, 8
- packuswb mm1, mm1
-
- movd [ARGBq], mm1
-
-.convertdone:
- RET
-%endif
-
-; With PIC code we need to load the address of mangle(kCoefficientsRgbY).
-; This code is slower than the above version.
-%ifdef PIC
-mangle(SYMBOL):
- %assign stack_offset 0
- PROLOGUE 6, 7, 3, Y, U, V, A, ARGB, WIDTH, TEMP
- extern mangle(kCoefficientsRgbY)
+ PROLOGUE 7, 7, 3, Y, U, V, A, ARGB, WIDTH, TABLE
PUSH WIDTHq
DEFINE_ARGS Y, U, V, A, ARGB, TABLE, TEMP
- LOAD_SYM TABLEq, mangle(kCoefficientsRgbY)
+ mov TABLEq, TEMPq
jmp .convertend
.convertloop:
@@ -122,11 +40,25 @@ mangle(SYMBOL):
psraw mm2, 6
packuswb mm1, mm2
- ; Unpack and multiply by alpha value, then repack high bytes of words.
+ ; Unpack
movq mm0, mm1
pxor mm2, mm2
punpcklbw mm0, mm2
punpckhbw mm1, mm2
+
+ ; Add one to our alpha values, this is a somewhat unfortunate hack; while
+ ; the pack/unpack above handle saturating any negative numbers to 0, they also
+ ; truncate the alpha value to 255. The math ahead wants to produce the same
+ ; ARGB alpha value as the source pixel in YUVA, but this depends on the alpha
+ ; value in |mm0| and |mm1| being 256, (let A be the source image alpha,
+ ; 256 * A >> 8 == A, whereas 255 * A >> 8 is off by one except at 0).
+ mov TEMPq, 0x00010000
+ movd mm2, TEMPd
+ psllq mm2, 32
+ paddsw mm0, mm2
+ paddsw mm1, mm2
+
+ ; Multiply by alpha value, then repack high bytes of words.
movzx TEMPd, BYTE [Aq]
movq mm2, [TABLEq + 6144 + 8 * TEMPq]
pmullw mm0, mm2
@@ -162,6 +94,13 @@ mangle(SYMBOL):
; Multiply ARGB by alpha value.
pxor mm0, mm0
punpcklbw mm1, mm0
+
+ ; See above note about this hack.
+ mov TEMPq, 0x00010000
+ movd mm0, TEMPd
+ psllq mm0, 32
+ paddsw mm1, mm0
+
movzx TEMPd, BYTE [Aq]
movq mm0, [TABLEq + 6144 + 8 * TEMPq]
pmullw mm1, mm0
@@ -173,4 +112,3 @@ mangle(SYMBOL):
.convertdone:
POP TABLEq
RET
-%endif \ No newline at end of file
diff --git a/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.asm b/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.asm
index 40418340e35..bf2f7080520 100644
--- a/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.asm
+++ b/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.asm
@@ -19,5 +19,6 @@
; uint8* rgb_buf,
; ptrdiff_t width,
; ptrdiff_t source_dx);
+; const int16 convert_table[1024][4]);
%define SYMBOL LinearScaleYUVToRGB32Row_MMX
%include "linear_scale_yuv_to_rgb_mmx.inc"
diff --git a/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.inc b/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.inc
index 493e9b3694d..48f62acf25a 100644
--- a/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.inc
+++ b/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.inc
@@ -10,8 +10,6 @@
mangle(SYMBOL):
%assign stack_offset 0
- extern mangle(kCoefficientsRgbY)
-
; Parameters are in the following order:
; 1. Y plane
; 2. U plane
@@ -19,8 +17,9 @@ mangle(SYMBOL):
; 4. ARGB frame
; 5. Width
; 6. Source dx
+; 7. Conversion lookup table
-PROLOGUE 6, 7, 3, Y, R0, R1, ARGB, R2, R3, TEMP
+PROLOGUE 7, 7, 3, Y, R0, R1, ARGB, R2, TEMP, R3
%if gprsize == 8
%define WORD_SIZE QWORD
@@ -34,60 +33,30 @@ PROLOGUE 6, 7, 3, Y, R0, R1, ARGB, R2, R3, TEMP
%define COMPLd R2d ; Component A value
%define U_ARG_REGq R0q ; U plane address argument
%define V_ARG_REGq R1q ; V plane address argument
-%define SOURCE_DX_ARG_REGq R3q ; Source dx argument
+%define SOURCE_DX_ARG_REGq TEMPq ; Source dx argument
%define WIDTH_ARG_REGq R2q ; Width argument
-%ifdef PIC
-; PIC code shared COMPR, U and V with the same register. Need to be careful in the
-; code they don't mix up. This allows R3q to be used for YUV table.
%define COMPRq R0q ; Component B value
%define COMPRd R0d ; Component B value
%define Uq R0q ; U plane address
%define Vq R0q ; V plane address
%define U_PLANE WORD_SIZE [rsp + 3 * gprsize]
%define TABLE R3q ; Address of the table
-%else
-; Non-PIC code defines.
-%define COMPRq R3q ; Component B value
-%define COMPRd R3d ; Component B value
-%define Uq R0q ; U plane address
-%define Vq R3q ; V plane address
-%define TABLE mangle(kCoefficientsRgbY)
-%endif
-; Defines for stack variables. These are used in both PIC and non-PIC code.
+; Defines for stack variables.
%define V_PLANE WORD_SIZE [rsp + 2 * gprsize]
%define SOURCE_DX WORD_SIZE [rsp + gprsize]
%define SOURCE_WIDTH WORD_SIZE [rsp]
-; Handle stack variables differently for PIC and non-PIC code.
-
-%ifdef PIC
-; Define stack usage for PIC code. PIC code push U plane onto stack.
+; Define stack usage.
PUSH U_ARG_REGq
PUSH V_ARG_REGq
PUSH SOURCE_DX_ARG_REGq
imul WIDTH_ARG_REGq, SOURCE_DX_ARG_REGq ; source_width = width * source_dx
PUSH WIDTH_ARG_REGq
-; Load the address of kCoefficientsRgbY into TABLE
- mov TEMPq, SOURCE_DX_ARG_REGq ; Need to save source_dx first
- LOAD_SYM TABLE, mangle(kCoefficientsRgbY)
-%define SOURCE_DX_ARG_REGq TEMPq ; Overwrite SOURCE_DX_ARG_REGq to TEMPq
-%else
-; Define stack usage. Non-PIC code just push 3 registers to stack.
- PUSH V_ARG_REGq
- PUSH SOURCE_DX_ARG_REGq
- imul WIDTH_ARG_REGq, SOURCE_DX_ARG_REGq ; source_width = width * source_dx
- PUSH WIDTH_ARG_REGq
-%endif
-
%macro EPILOGUE 0
-%ifdef PIC
ADD rsp, 4 * gprsize
-%else
- ADD rsp, 3 * gprsize
-%endif
%endmacro
xor Xq, Xq ; x = 0
@@ -97,9 +66,7 @@ PROLOGUE 6, 7, 3, Y, R0, R1, ARGB, R2, R3, TEMP
jmp .lscaleend
.lscaleloop:
-%ifdef PIC
- mov Uq, U_PLANE ; PIC code saves U_PLANE on stack.
-%endif
+ mov Uq, U_PLANE
; Define macros for scaling YUV components since they are reused.
%macro SCALEUV 1
diff --git a/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx_x64.asm b/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx_x64.asm
index f7e1d908549..89e4e2a6825 100644
--- a/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx_x64.asm
+++ b/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx_x64.asm
@@ -32,8 +32,9 @@ mangle(SYMBOL):
; 4. ARGB frame
; 5. Width
; 6. Source dx
+; 7. Conversion lookup table
-PROLOGUE 6, 7, 3, Y, U, V, ARGB, WIDTH, SOURCE_DX, COMPL
+PROLOGUE 7, 7, 3, Y, U, V, ARGB, WIDTH, SOURCE_DX, R1
%define TABLEq r10
%define Xq r11
@@ -41,6 +42,9 @@ PROLOGUE 6, 7, 3, Y, U, V, ARGB, WIDTH, SOURCE_DX, COMPL
%define COMPRd r13d
%define COMPRq r13
%define FRACTIONq r14
+%define COMPL R1
+%define COMPLq R1q
+%define COMPLd R1d
PUSH TABLEq
PUSH Xq
@@ -56,7 +60,7 @@ PROLOGUE 6, 7, 3, Y, U, V, ARGB, WIDTH, SOURCE_DX, COMPL
POP TABLEq
%endmacro
- LOAD_SYM TABLEq, mangle(kCoefficientsRgbY)
+ mov TABLEq, R1q
imul WIDTHq, SOURCE_DXq ; source_width = width * source_dx
xor Xq, Xq ; x = 0
diff --git a/chromium/media/base/simd/scale_yuv_to_rgb_mmx.asm b/chromium/media/base/simd/scale_yuv_to_rgb_mmx.asm
index 583b7cbb0e6..122365149fc 100644
--- a/chromium/media/base/simd/scale_yuv_to_rgb_mmx.asm
+++ b/chromium/media/base/simd/scale_yuv_to_rgb_mmx.asm
@@ -19,5 +19,6 @@
; uint8* rgb_buf,
; ptrdiff_t width,
; ptrdiff_t source_dx);
+; const int16 convert_table[1024][4]);
%define SYMBOL ScaleYUVToRGB32Row_MMX
%include "scale_yuv_to_rgb_mmx.inc"
diff --git a/chromium/media/base/simd/scale_yuv_to_rgb_mmx.inc b/chromium/media/base/simd/scale_yuv_to_rgb_mmx.inc
index 2026390ed00..60351db557d 100644
--- a/chromium/media/base/simd/scale_yuv_to_rgb_mmx.inc
+++ b/chromium/media/base/simd/scale_yuv_to_rgb_mmx.inc
@@ -19,8 +19,9 @@ mangle(SYMBOL):
; 4. ARGB frame
; 5. Width
; 6. Source dx
+; 7. Lookup table address
-PROLOGUE 6, 7, 3, Y, U, V, ARGB, R1, R2, TEMP
+PROLOGUE 7, 7, 3, Y, U, V, ARGB, R1, R2, TEMP
%ifdef ARCH_X86_64
%define WORD_SIZE QWORD
@@ -28,34 +29,22 @@ PROLOGUE 6, 7, 3, Y, U, V, ARGB, R1, R2, TEMP
%define WORD_SIZE DWORD
%endif
-%ifdef PIC
PUSH R1q ; Width
-%endif
PUSH R2q ; Source dx
%define SOURCE_DX WORD_SIZE [rsp]
-; PIC code.
-%ifdef PIC
- LOAD_SYM R1q, mangle(kCoefficientsRgbY)
+ mov R1q, TEMPq
+
%define WIDTH WORD_SIZE [rsp + gprsize]
%define TABLE R1q
-%define Xq R2q
-
-; Non-PIC code.
-%else
-%define WIDTH R1q
-%define TABLE mangle(kCoefficientsRgbY)
-%define Xq R2q
-%endif
+%define Xq R2q
; Set Xq index to 0.
xor Xq, Xq
jmp .scaleend
.scaleloop:
- ; TABLE can either be a register or a symbol depending on this is
- ; PIC or not.
mov TEMPq, Xq
sar TEMPq, 17
movzx TEMPd, BYTE [Uq + TEMPq]
@@ -83,8 +72,6 @@ PROLOGUE 6, 7, 3, Y, U, V, ARGB, R1, R2, TEMP
add ARGBq, 8
.scaleend:
- ; WIDTH can either be a register or memory depending on this is
- ; PIC or not.
sub WIDTH, 2
jns .scaleloop
@@ -109,9 +96,5 @@ PROLOGUE 6, 7, 3, Y, U, V, ARGB, R1, R2, TEMP
movd DWORD [ARGBq], mm1
.scaledone:
-%ifdef PIC
ADD rsp, 2 * gprsize
-%else
- ADD rsp, gprsize
-%endif
RET
diff --git a/chromium/media/base/simd/scale_yuv_to_rgb_sse.asm b/chromium/media/base/simd/scale_yuv_to_rgb_sse.asm
index 536ed18db07..fc98bbe6bda 100644
--- a/chromium/media/base/simd/scale_yuv_to_rgb_sse.asm
+++ b/chromium/media/base/simd/scale_yuv_to_rgb_sse.asm
@@ -19,5 +19,6 @@
; uint8* rgb_buf,
; ptrdiff_t width,
; ptrdiff_t source_dx);
+; const int16 convert_table[1024][4]);
%define SYMBOL ScaleYUVToRGB32Row_SSE
%include "scale_yuv_to_rgb_mmx.inc"
diff --git a/chromium/media/base/simd/scale_yuv_to_rgb_sse2_x64.asm b/chromium/media/base/simd/scale_yuv_to_rgb_sse2_x64.asm
index d6786875a31..cf0d140dbfd 100644
--- a/chromium/media/base/simd/scale_yuv_to_rgb_sse2_x64.asm
+++ b/chromium/media/base/simd/scale_yuv_to_rgb_sse2_x64.asm
@@ -32,17 +32,21 @@ mangle(SYMBOL):
; 4. ARGB frame
; 5. Width
; 6. Source dx
+; 7. Convert table
-PROLOGUE 6, 7, 3, Y, U, V, ARGB, WIDTH, SOURCE_DX, COMP
+PROLOGUE 7, 7, 3, Y, U, V, ARGB, WIDTH, SOURCE_DX, R1
%define TABLEq r10
%define Xq r11
%define INDEXq r12
+%define COMPq R1q
+%define COMPd R1d
+
PUSH r10
PUSH r11
PUSH r12
- LOAD_SYM TABLEq, mangle(kCoefficientsRgbY)
+ mov TABLEq, R1q
; Set Xq index to 0.
xor Xq, Xq
diff --git a/chromium/media/base/simd/sinc_resampler_sse.cc b/chromium/media/base/simd/sinc_resampler_sse.cc
deleted file mode 100644
index f0aec1ce673..00000000000
--- a/chromium/media/base/simd/sinc_resampler_sse.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/sinc_resampler.h"
-
-#include <xmmintrin.h>
-
-namespace media {
-
-float SincResampler::Convolve_SSE(const float* input_ptr, const float* k1,
- const float* k2,
- double kernel_interpolation_factor) {
- __m128 m_input;
- __m128 m_sums1 = _mm_setzero_ps();
- __m128 m_sums2 = _mm_setzero_ps();
-
- // Based on |input_ptr| alignment, we need to use loadu or load. Unrolling
- // these loops hurt performance in local testing.
- if (reinterpret_cast<uintptr_t>(input_ptr) & 0x0F) {
- for (int i = 0; i < kKernelSize; i += 4) {
- m_input = _mm_loadu_ps(input_ptr + i);
- m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i)));
- m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i)));
- }
- } else {
- for (int i = 0; i < kKernelSize; i += 4) {
- m_input = _mm_load_ps(input_ptr + i);
- m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i)));
- m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i)));
- }
- }
-
- // Linearly interpolate the two "convolutions".
- m_sums1 = _mm_mul_ps(m_sums1, _mm_set_ps1(1.0 - kernel_interpolation_factor));
- m_sums2 = _mm_mul_ps(m_sums2, _mm_set_ps1(kernel_interpolation_factor));
- m_sums1 = _mm_add_ps(m_sums1, m_sums2);
-
- // Sum components together.
- float result;
- m_sums2 = _mm_add_ps(_mm_movehl_ps(m_sums1, m_sums1), m_sums1);
- _mm_store_ss(&result, _mm_add_ss(m_sums2, _mm_shuffle_ps(
- m_sums2, m_sums2, 1)));
-
- return result;
-}
-
-} // namespace media
diff --git a/chromium/media/base/simd/vector_math_sse.cc b/chromium/media/base/simd/vector_math_sse.cc
deleted file mode 100644
index c2121225cd6..00000000000
--- a/chromium/media/base/simd/vector_math_sse.cc
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/vector_math_testing.h"
-
-#include <algorithm>
-
-#include <xmmintrin.h> // NOLINT
-
-namespace media {
-namespace vector_math {
-
-void FMUL_SSE(const float src[], float scale, int len, float dest[]) {
- const int rem = len % 4;
- const int last_index = len - rem;
- __m128 m_scale = _mm_set_ps1(scale);
- for (int i = 0; i < last_index; i += 4)
- _mm_store_ps(dest + i, _mm_mul_ps(_mm_load_ps(src + i), m_scale));
-
- // Handle any remaining values that wouldn't fit in an SSE pass.
- for (int i = last_index; i < len; ++i)
- dest[i] = src[i] * scale;
-}
-
-void FMAC_SSE(const float src[], float scale, int len, float dest[]) {
- const int rem = len % 4;
- const int last_index = len - rem;
- __m128 m_scale = _mm_set_ps1(scale);
- for (int i = 0; i < last_index; i += 4) {
- _mm_store_ps(dest + i, _mm_add_ps(_mm_load_ps(dest + i),
- _mm_mul_ps(_mm_load_ps(src + i), m_scale)));
- }
-
- // Handle any remaining values that wouldn't fit in an SSE pass.
- for (int i = last_index; i < len; ++i)
- dest[i] += src[i] * scale;
-}
-
-// Convenience macro to extract float 0 through 3 from the vector |a|. This is
-// needed because compilers other than clang don't support access via
-// operator[]().
-#define EXTRACT_FLOAT(a, i) \
- (i == 0 ? \
- _mm_cvtss_f32(a) : \
- _mm_cvtss_f32(_mm_shuffle_ps(a, a, i)))
-
-std::pair<float, float> EWMAAndMaxPower_SSE(
- float initial_value, const float src[], int len, float smoothing_factor) {
- // When the recurrence is unrolled, we see that we can split it into 4
- // separate lanes of evaluation:
- //
- // y[n] = a(S[n]^2) + (1-a)(y[n-1])
- // = a(S[n]^2) + (1-a)^1(aS[n-1]^2) + (1-a)^2(aS[n-2]^2) + ...
- // = z[n] + (1-a)^1(z[n-1]) + (1-a)^2(z[n-2]) + (1-a)^3(z[n-3])
- //
- // where z[n] = a(S[n]^2) + (1-a)^4(z[n-4]) + (1-a)^8(z[n-8]) + ...
- //
- // Thus, the strategy here is to compute z[n], z[n-1], z[n-2], and z[n-3] in
- // each of the 4 lanes, and then combine them to give y[n].
-
- const int rem = len % 4;
- const int last_index = len - rem;
-
- const __m128 smoothing_factor_x4 = _mm_set_ps1(smoothing_factor);
- const float weight_prev = 1.0f - smoothing_factor;
- const __m128 weight_prev_x4 = _mm_set_ps1(weight_prev);
- const __m128 weight_prev_squared_x4 =
- _mm_mul_ps(weight_prev_x4, weight_prev_x4);
- const __m128 weight_prev_4th_x4 =
- _mm_mul_ps(weight_prev_squared_x4, weight_prev_squared_x4);
-
- // Compute z[n], z[n-1], z[n-2], and z[n-3] in parallel in lanes 3, 2, 1 and
- // 0, respectively.
- __m128 max_x4 = _mm_setzero_ps();
- __m128 ewma_x4 = _mm_setr_ps(0.0f, 0.0f, 0.0f, initial_value);
- int i;
- for (i = 0; i < last_index; i += 4) {
- ewma_x4 = _mm_mul_ps(ewma_x4, weight_prev_4th_x4);
- const __m128 sample_x4 = _mm_load_ps(src + i);
- const __m128 sample_squared_x4 = _mm_mul_ps(sample_x4, sample_x4);
- max_x4 = _mm_max_ps(max_x4, sample_squared_x4);
- // Note: The compiler optimizes this to a single multiply-and-accumulate
- // instruction:
- ewma_x4 = _mm_add_ps(ewma_x4,
- _mm_mul_ps(sample_squared_x4, smoothing_factor_x4));
- }
-
- // y[n] = z[n] + (1-a)^1(z[n-1]) + (1-a)^2(z[n-2]) + (1-a)^3(z[n-3])
- float ewma = EXTRACT_FLOAT(ewma_x4, 3);
- ewma_x4 = _mm_mul_ps(ewma_x4, weight_prev_x4);
- ewma += EXTRACT_FLOAT(ewma_x4, 2);
- ewma_x4 = _mm_mul_ps(ewma_x4, weight_prev_x4);
- ewma += EXTRACT_FLOAT(ewma_x4, 1);
- ewma_x4 = _mm_mul_ss(ewma_x4, weight_prev_x4);
- ewma += EXTRACT_FLOAT(ewma_x4, 0);
-
- // Fold the maximums together to get the overall maximum.
- max_x4 = _mm_max_ps(max_x4,
- _mm_shuffle_ps(max_x4, max_x4, _MM_SHUFFLE(3, 3, 1, 1)));
- max_x4 = _mm_max_ss(max_x4, _mm_shuffle_ps(max_x4, max_x4, 2));
-
- std::pair<float, float> result(ewma, EXTRACT_FLOAT(max_x4, 0));
-
- // Handle remaining values at the end of |src|.
- for (; i < len; ++i) {
- result.first *= weight_prev;
- const float sample = src[i];
- const float sample_squared = sample * sample;
- result.first += sample_squared * smoothing_factor;
- result.second = std::max(result.second, sample_squared);
- }
-
- return result;
-}
-
-} // namespace vector_math
-} // namespace media
diff --git a/chromium/media/base/simd/yuv_to_rgb_table.cc b/chromium/media/base/simd/yuv_to_rgb_table.cc
index 253280da951..5bc35aff89a 100644
--- a/chromium/media/base/simd/yuv_to_rgb_table.cc
+++ b/chromium/media/base/simd/yuv_to_rgb_table.cc
@@ -335,4 +335,335 @@ SIMD_ALIGNED(const int16 kCoefficientsRgbY[256 * 4][4]) = {
#undef RGBV
#undef ALPHA
+// JPEG color range version:
+
+// Defines the R,G,B,A contributions from Y.
+#define RGBY(i) { \
+ static_cast<int16>(64 * i + 0.5), \
+ static_cast<int16>(64 * i + 0.5), \
+ static_cast<int16>(64 * i + 0.5), \
+ 0 \
+}
+
+// Defines the R,G,B,A contributions from U.
+// The contribution to A is the same for any value of U
+// causing the final A value to be 255 in every conversion.
+// Android's pixel layout is RGBA, while other platforms
+// are BGRA.
+#if defined(OS_ANDROID)
+#define RGBU(i) { \
+ 0, \
+ static_cast<int16>(-0.34414 * 64 * (i - 128) + 0.5), \
+ static_cast<int16>(1.772 * 64 * (i - 128) + 0.5), \
+ static_cast<int16>(256 * 64 - 1) \
+}
+#else
+#define RGBU(i) { \
+ static_cast<int16>(1.772 * 64 * (i - 128) + 0.5), \
+ static_cast<int16>(-0.34414 * 64 * (i - 128) + 0.5), \
+ 0, \
+ static_cast<int16>(256 * 64 - 1) \
+}
+#endif
+
+// Defines the R,G,B,A contributions from V.
+// Android's pixel layout is RGBA, while other platforms
+// are BGRA.
+#if defined(OS_ANDROID)
+#define RGBV(i) { \
+ static_cast<int16>(1.402 * 64 * (i - 128) + 0.5), \
+ static_cast<int16>(-0.71414 * 64 * (i - 128) + 0.5), \
+ 0, \
+ 0 \
+}
+#else
+#define RGBV(i) { \
+ 0, \
+ static_cast<int16>(-0.813 * 64 * (i - 128) + 0.5), \
+ static_cast<int16>(1.402 * 64 * (i - 128) + 0.5), \
+ 0 \
+}
+#endif
+
+// Used to define a set of multiplier words for each alpha level.
+#define ALPHA(i) { \
+ i, i, i, i \
+}
+
+// The following table defines the RGBA contributions
+// for each component of YUVA. The Y table is first followed
+// by the U, and V tables. The alpha multiplier table follows.
+// These tables are aligned and kept adjacent to optimize for
+// SIMD and caching.
+
+SIMD_ALIGNED(const int16 kCoefficientsRgbY_JPEG[256 * 4][4]) = {
+ RGBY(0x00), RGBY(0x01), RGBY(0x02), RGBY(0x03),
+ RGBY(0x04), RGBY(0x05), RGBY(0x06), RGBY(0x07),
+ RGBY(0x08), RGBY(0x09), RGBY(0x0A), RGBY(0x0B),
+ RGBY(0x0C), RGBY(0x0D), RGBY(0x0E), RGBY(0x0F),
+ RGBY(0x10), RGBY(0x11), RGBY(0x12), RGBY(0x13),
+ RGBY(0x14), RGBY(0x15), RGBY(0x16), RGBY(0x17),
+ RGBY(0x18), RGBY(0x19), RGBY(0x1A), RGBY(0x1B),
+ RGBY(0x1C), RGBY(0x1D), RGBY(0x1E), RGBY(0x1F),
+ RGBY(0x20), RGBY(0x21), RGBY(0x22), RGBY(0x23),
+ RGBY(0x24), RGBY(0x25), RGBY(0x26), RGBY(0x27),
+ RGBY(0x28), RGBY(0x29), RGBY(0x2A), RGBY(0x2B),
+ RGBY(0x2C), RGBY(0x2D), RGBY(0x2E), RGBY(0x2F),
+ RGBY(0x30), RGBY(0x31), RGBY(0x32), RGBY(0x33),
+ RGBY(0x34), RGBY(0x35), RGBY(0x36), RGBY(0x37),
+ RGBY(0x38), RGBY(0x39), RGBY(0x3A), RGBY(0x3B),
+ RGBY(0x3C), RGBY(0x3D), RGBY(0x3E), RGBY(0x3F),
+ RGBY(0x40), RGBY(0x41), RGBY(0x42), RGBY(0x43),
+ RGBY(0x44), RGBY(0x45), RGBY(0x46), RGBY(0x47),
+ RGBY(0x48), RGBY(0x49), RGBY(0x4A), RGBY(0x4B),
+ RGBY(0x4C), RGBY(0x4D), RGBY(0x4E), RGBY(0x4F),
+ RGBY(0x50), RGBY(0x51), RGBY(0x52), RGBY(0x53),
+ RGBY(0x54), RGBY(0x55), RGBY(0x56), RGBY(0x57),
+ RGBY(0x58), RGBY(0x59), RGBY(0x5A), RGBY(0x5B),
+ RGBY(0x5C), RGBY(0x5D), RGBY(0x5E), RGBY(0x5F),
+ RGBY(0x60), RGBY(0x61), RGBY(0x62), RGBY(0x63),
+ RGBY(0x64), RGBY(0x65), RGBY(0x66), RGBY(0x67),
+ RGBY(0x68), RGBY(0x69), RGBY(0x6A), RGBY(0x6B),
+ RGBY(0x6C), RGBY(0x6D), RGBY(0x6E), RGBY(0x6F),
+ RGBY(0x70), RGBY(0x71), RGBY(0x72), RGBY(0x73),
+ RGBY(0x74), RGBY(0x75), RGBY(0x76), RGBY(0x77),
+ RGBY(0x78), RGBY(0x79), RGBY(0x7A), RGBY(0x7B),
+ RGBY(0x7C), RGBY(0x7D), RGBY(0x7E), RGBY(0x7F),
+ RGBY(0x80), RGBY(0x81), RGBY(0x82), RGBY(0x83),
+ RGBY(0x84), RGBY(0x85), RGBY(0x86), RGBY(0x87),
+ RGBY(0x88), RGBY(0x89), RGBY(0x8A), RGBY(0x8B),
+ RGBY(0x8C), RGBY(0x8D), RGBY(0x8E), RGBY(0x8F),
+ RGBY(0x90), RGBY(0x91), RGBY(0x92), RGBY(0x93),
+ RGBY(0x94), RGBY(0x95), RGBY(0x96), RGBY(0x97),
+ RGBY(0x98), RGBY(0x99), RGBY(0x9A), RGBY(0x9B),
+ RGBY(0x9C), RGBY(0x9D), RGBY(0x9E), RGBY(0x9F),
+ RGBY(0xA0), RGBY(0xA1), RGBY(0xA2), RGBY(0xA3),
+ RGBY(0xA4), RGBY(0xA5), RGBY(0xA6), RGBY(0xA7),
+ RGBY(0xA8), RGBY(0xA9), RGBY(0xAA), RGBY(0xAB),
+ RGBY(0xAC), RGBY(0xAD), RGBY(0xAE), RGBY(0xAF),
+ RGBY(0xB0), RGBY(0xB1), RGBY(0xB2), RGBY(0xB3),
+ RGBY(0xB4), RGBY(0xB5), RGBY(0xB6), RGBY(0xB7),
+ RGBY(0xB8), RGBY(0xB9), RGBY(0xBA), RGBY(0xBB),
+ RGBY(0xBC), RGBY(0xBD), RGBY(0xBE), RGBY(0xBF),
+ RGBY(0xC0), RGBY(0xC1), RGBY(0xC2), RGBY(0xC3),
+ RGBY(0xC4), RGBY(0xC5), RGBY(0xC6), RGBY(0xC7),
+ RGBY(0xC8), RGBY(0xC9), RGBY(0xCA), RGBY(0xCB),
+ RGBY(0xCC), RGBY(0xCD), RGBY(0xCE), RGBY(0xCF),
+ RGBY(0xD0), RGBY(0xD1), RGBY(0xD2), RGBY(0xD3),
+ RGBY(0xD4), RGBY(0xD5), RGBY(0xD6), RGBY(0xD7),
+ RGBY(0xD8), RGBY(0xD9), RGBY(0xDA), RGBY(0xDB),
+ RGBY(0xDC), RGBY(0xDD), RGBY(0xDE), RGBY(0xDF),
+ RGBY(0xE0), RGBY(0xE1), RGBY(0xE2), RGBY(0xE3),
+ RGBY(0xE4), RGBY(0xE5), RGBY(0xE6), RGBY(0xE7),
+ RGBY(0xE8), RGBY(0xE9), RGBY(0xEA), RGBY(0xEB),
+ RGBY(0xEC), RGBY(0xED), RGBY(0xEE), RGBY(0xEF),
+ RGBY(0xF0), RGBY(0xF1), RGBY(0xF2), RGBY(0xF3),
+ RGBY(0xF4), RGBY(0xF5), RGBY(0xF6), RGBY(0xF7),
+ RGBY(0xF8), RGBY(0xF9), RGBY(0xFA), RGBY(0xFB),
+ RGBY(0xFC), RGBY(0xFD), RGBY(0xFE), RGBY(0xFF),
+
+ // Chroma U table.
+ RGBU(0x00), RGBU(0x01), RGBU(0x02), RGBU(0x03),
+ RGBU(0x04), RGBU(0x05), RGBU(0x06), RGBU(0x07),
+ RGBU(0x08), RGBU(0x09), RGBU(0x0A), RGBU(0x0B),
+ RGBU(0x0C), RGBU(0x0D), RGBU(0x0E), RGBU(0x0F),
+ RGBU(0x10), RGBU(0x11), RGBU(0x12), RGBU(0x13),
+ RGBU(0x14), RGBU(0x15), RGBU(0x16), RGBU(0x17),
+ RGBU(0x18), RGBU(0x19), RGBU(0x1A), RGBU(0x1B),
+ RGBU(0x1C), RGBU(0x1D), RGBU(0x1E), RGBU(0x1F),
+ RGBU(0x20), RGBU(0x21), RGBU(0x22), RGBU(0x23),
+ RGBU(0x24), RGBU(0x25), RGBU(0x26), RGBU(0x27),
+ RGBU(0x28), RGBU(0x29), RGBU(0x2A), RGBU(0x2B),
+ RGBU(0x2C), RGBU(0x2D), RGBU(0x2E), RGBU(0x2F),
+ RGBU(0x30), RGBU(0x31), RGBU(0x32), RGBU(0x33),
+ RGBU(0x34), RGBU(0x35), RGBU(0x36), RGBU(0x37),
+ RGBU(0x38), RGBU(0x39), RGBU(0x3A), RGBU(0x3B),
+ RGBU(0x3C), RGBU(0x3D), RGBU(0x3E), RGBU(0x3F),
+ RGBU(0x40), RGBU(0x41), RGBU(0x42), RGBU(0x43),
+ RGBU(0x44), RGBU(0x45), RGBU(0x46), RGBU(0x47),
+ RGBU(0x48), RGBU(0x49), RGBU(0x4A), RGBU(0x4B),
+ RGBU(0x4C), RGBU(0x4D), RGBU(0x4E), RGBU(0x4F),
+ RGBU(0x50), RGBU(0x51), RGBU(0x52), RGBU(0x53),
+ RGBU(0x54), RGBU(0x55), RGBU(0x56), RGBU(0x57),
+ RGBU(0x58), RGBU(0x59), RGBU(0x5A), RGBU(0x5B),
+ RGBU(0x5C), RGBU(0x5D), RGBU(0x5E), RGBU(0x5F),
+ RGBU(0x60), RGBU(0x61), RGBU(0x62), RGBU(0x63),
+ RGBU(0x64), RGBU(0x65), RGBU(0x66), RGBU(0x67),
+ RGBU(0x68), RGBU(0x69), RGBU(0x6A), RGBU(0x6B),
+ RGBU(0x6C), RGBU(0x6D), RGBU(0x6E), RGBU(0x6F),
+ RGBU(0x70), RGBU(0x71), RGBU(0x72), RGBU(0x73),
+ RGBU(0x74), RGBU(0x75), RGBU(0x76), RGBU(0x77),
+ RGBU(0x78), RGBU(0x79), RGBU(0x7A), RGBU(0x7B),
+ RGBU(0x7C), RGBU(0x7D), RGBU(0x7E), RGBU(0x7F),
+ RGBU(0x80), RGBU(0x81), RGBU(0x82), RGBU(0x83),
+ RGBU(0x84), RGBU(0x85), RGBU(0x86), RGBU(0x87),
+ RGBU(0x88), RGBU(0x89), RGBU(0x8A), RGBU(0x8B),
+ RGBU(0x8C), RGBU(0x8D), RGBU(0x8E), RGBU(0x8F),
+ RGBU(0x90), RGBU(0x91), RGBU(0x92), RGBU(0x93),
+ RGBU(0x94), RGBU(0x95), RGBU(0x96), RGBU(0x97),
+ RGBU(0x98), RGBU(0x99), RGBU(0x9A), RGBU(0x9B),
+ RGBU(0x9C), RGBU(0x9D), RGBU(0x9E), RGBU(0x9F),
+ RGBU(0xA0), RGBU(0xA1), RGBU(0xA2), RGBU(0xA3),
+ RGBU(0xA4), RGBU(0xA5), RGBU(0xA6), RGBU(0xA7),
+ RGBU(0xA8), RGBU(0xA9), RGBU(0xAA), RGBU(0xAB),
+ RGBU(0xAC), RGBU(0xAD), RGBU(0xAE), RGBU(0xAF),
+ RGBU(0xB0), RGBU(0xB1), RGBU(0xB2), RGBU(0xB3),
+ RGBU(0xB4), RGBU(0xB5), RGBU(0xB6), RGBU(0xB7),
+ RGBU(0xB8), RGBU(0xB9), RGBU(0xBA), RGBU(0xBB),
+ RGBU(0xBC), RGBU(0xBD), RGBU(0xBE), RGBU(0xBF),
+ RGBU(0xC0), RGBU(0xC1), RGBU(0xC2), RGBU(0xC3),
+ RGBU(0xC4), RGBU(0xC5), RGBU(0xC6), RGBU(0xC7),
+ RGBU(0xC8), RGBU(0xC9), RGBU(0xCA), RGBU(0xCB),
+ RGBU(0xCC), RGBU(0xCD), RGBU(0xCE), RGBU(0xCF),
+ RGBU(0xD0), RGBU(0xD1), RGBU(0xD2), RGBU(0xD3),
+ RGBU(0xD4), RGBU(0xD5), RGBU(0xD6), RGBU(0xD7),
+ RGBU(0xD8), RGBU(0xD9), RGBU(0xDA), RGBU(0xDB),
+ RGBU(0xDC), RGBU(0xDD), RGBU(0xDE), RGBU(0xDF),
+ RGBU(0xE0), RGBU(0xE1), RGBU(0xE2), RGBU(0xE3),
+ RGBU(0xE4), RGBU(0xE5), RGBU(0xE6), RGBU(0xE7),
+ RGBU(0xE8), RGBU(0xE9), RGBU(0xEA), RGBU(0xEB),
+ RGBU(0xEC), RGBU(0xED), RGBU(0xEE), RGBU(0xEF),
+ RGBU(0xF0), RGBU(0xF1), RGBU(0xF2), RGBU(0xF3),
+ RGBU(0xF4), RGBU(0xF5), RGBU(0xF6), RGBU(0xF7),
+ RGBU(0xF8), RGBU(0xF9), RGBU(0xFA), RGBU(0xFB),
+ RGBU(0xFC), RGBU(0xFD), RGBU(0xFE), RGBU(0xFF),
+
+ // Chroma V table.
+ RGBV(0x00), RGBV(0x01), RGBV(0x02), RGBV(0x03),
+ RGBV(0x04), RGBV(0x05), RGBV(0x06), RGBV(0x07),
+ RGBV(0x08), RGBV(0x09), RGBV(0x0A), RGBV(0x0B),
+ RGBV(0x0C), RGBV(0x0D), RGBV(0x0E), RGBV(0x0F),
+ RGBV(0x10), RGBV(0x11), RGBV(0x12), RGBV(0x13),
+ RGBV(0x14), RGBV(0x15), RGBV(0x16), RGBV(0x17),
+ RGBV(0x18), RGBV(0x19), RGBV(0x1A), RGBV(0x1B),
+ RGBV(0x1C), RGBV(0x1D), RGBV(0x1E), RGBV(0x1F),
+ RGBV(0x20), RGBV(0x21), RGBV(0x22), RGBV(0x23),
+ RGBV(0x24), RGBV(0x25), RGBV(0x26), RGBV(0x27),
+ RGBV(0x28), RGBV(0x29), RGBV(0x2A), RGBV(0x2B),
+ RGBV(0x2C), RGBV(0x2D), RGBV(0x2E), RGBV(0x2F),
+ RGBV(0x30), RGBV(0x31), RGBV(0x32), RGBV(0x33),
+ RGBV(0x34), RGBV(0x35), RGBV(0x36), RGBV(0x37),
+ RGBV(0x38), RGBV(0x39), RGBV(0x3A), RGBV(0x3B),
+ RGBV(0x3C), RGBV(0x3D), RGBV(0x3E), RGBV(0x3F),
+ RGBV(0x40), RGBV(0x41), RGBV(0x42), RGBV(0x43),
+ RGBV(0x44), RGBV(0x45), RGBV(0x46), RGBV(0x47),
+ RGBV(0x48), RGBV(0x49), RGBV(0x4A), RGBV(0x4B),
+ RGBV(0x4C), RGBV(0x4D), RGBV(0x4E), RGBV(0x4F),
+ RGBV(0x50), RGBV(0x51), RGBV(0x52), RGBV(0x53),
+ RGBV(0x54), RGBV(0x55), RGBV(0x56), RGBV(0x57),
+ RGBV(0x58), RGBV(0x59), RGBV(0x5A), RGBV(0x5B),
+ RGBV(0x5C), RGBV(0x5D), RGBV(0x5E), RGBV(0x5F),
+ RGBV(0x60), RGBV(0x61), RGBV(0x62), RGBV(0x63),
+ RGBV(0x64), RGBV(0x65), RGBV(0x66), RGBV(0x67),
+ RGBV(0x68), RGBV(0x69), RGBV(0x6A), RGBV(0x6B),
+ RGBV(0x6C), RGBV(0x6D), RGBV(0x6E), RGBV(0x6F),
+ RGBV(0x70), RGBV(0x71), RGBV(0x72), RGBV(0x73),
+ RGBV(0x74), RGBV(0x75), RGBV(0x76), RGBV(0x77),
+ RGBV(0x78), RGBV(0x79), RGBV(0x7A), RGBV(0x7B),
+ RGBV(0x7C), RGBV(0x7D), RGBV(0x7E), RGBV(0x7F),
+ RGBV(0x80), RGBV(0x81), RGBV(0x82), RGBV(0x83),
+ RGBV(0x84), RGBV(0x85), RGBV(0x86), RGBV(0x87),
+ RGBV(0x88), RGBV(0x89), RGBV(0x8A), RGBV(0x8B),
+ RGBV(0x8C), RGBV(0x8D), RGBV(0x8E), RGBV(0x8F),
+ RGBV(0x90), RGBV(0x91), RGBV(0x92), RGBV(0x93),
+ RGBV(0x94), RGBV(0x95), RGBV(0x96), RGBV(0x97),
+ RGBV(0x98), RGBV(0x99), RGBV(0x9A), RGBV(0x9B),
+ RGBV(0x9C), RGBV(0x9D), RGBV(0x9E), RGBV(0x9F),
+ RGBV(0xA0), RGBV(0xA1), RGBV(0xA2), RGBV(0xA3),
+ RGBV(0xA4), RGBV(0xA5), RGBV(0xA6), RGBV(0xA7),
+ RGBV(0xA8), RGBV(0xA9), RGBV(0xAA), RGBV(0xAB),
+ RGBV(0xAC), RGBV(0xAD), RGBV(0xAE), RGBV(0xAF),
+ RGBV(0xB0), RGBV(0xB1), RGBV(0xB2), RGBV(0xB3),
+ RGBV(0xB4), RGBV(0xB5), RGBV(0xB6), RGBV(0xB7),
+ RGBV(0xB8), RGBV(0xB9), RGBV(0xBA), RGBV(0xBB),
+ RGBV(0xBC), RGBV(0xBD), RGBV(0xBE), RGBV(0xBF),
+ RGBV(0xC0), RGBV(0xC1), RGBV(0xC2), RGBV(0xC3),
+ RGBV(0xC4), RGBV(0xC5), RGBV(0xC6), RGBV(0xC7),
+ RGBV(0xC8), RGBV(0xC9), RGBV(0xCA), RGBV(0xCB),
+ RGBV(0xCC), RGBV(0xCD), RGBV(0xCE), RGBV(0xCF),
+ RGBV(0xD0), RGBV(0xD1), RGBV(0xD2), RGBV(0xD3),
+ RGBV(0xD4), RGBV(0xD5), RGBV(0xD6), RGBV(0xD7),
+ RGBV(0xD8), RGBV(0xD9), RGBV(0xDA), RGBV(0xDB),
+ RGBV(0xDC), RGBV(0xDD), RGBV(0xDE), RGBV(0xDF),
+ RGBV(0xE0), RGBV(0xE1), RGBV(0xE2), RGBV(0xE3),
+ RGBV(0xE4), RGBV(0xE5), RGBV(0xE6), RGBV(0xE7),
+ RGBV(0xE8), RGBV(0xE9), RGBV(0xEA), RGBV(0xEB),
+ RGBV(0xEC), RGBV(0xED), RGBV(0xEE), RGBV(0xEF),
+ RGBV(0xF0), RGBV(0xF1), RGBV(0xF2), RGBV(0xF3),
+ RGBV(0xF4), RGBV(0xF5), RGBV(0xF6), RGBV(0xF7),
+ RGBV(0xF8), RGBV(0xF9), RGBV(0xFA), RGBV(0xFB),
+ RGBV(0xFC), RGBV(0xFD), RGBV(0xFE), RGBV(0xFF),
+
+ // Alpha multipliers for each alpha level.
+ ALPHA(0x00), ALPHA(0x01), ALPHA(0x02), ALPHA(0x03),
+ ALPHA(0x04), ALPHA(0x05), ALPHA(0x06), ALPHA(0x07),
+ ALPHA(0x08), ALPHA(0x09), ALPHA(0x0A), ALPHA(0x0B),
+ ALPHA(0x0C), ALPHA(0x0D), ALPHA(0x0E), ALPHA(0x0F),
+ ALPHA(0x10), ALPHA(0x11), ALPHA(0x12), ALPHA(0x13),
+ ALPHA(0x14), ALPHA(0x15), ALPHA(0x16), ALPHA(0x17),
+ ALPHA(0x18), ALPHA(0x19), ALPHA(0x1A), ALPHA(0x1B),
+ ALPHA(0x1C), ALPHA(0x1D), ALPHA(0x1E), ALPHA(0x1F),
+ ALPHA(0x20), ALPHA(0x21), ALPHA(0x22), ALPHA(0x23),
+ ALPHA(0x24), ALPHA(0x25), ALPHA(0x26), ALPHA(0x27),
+ ALPHA(0x28), ALPHA(0x29), ALPHA(0x2A), ALPHA(0x2B),
+ ALPHA(0x2C), ALPHA(0x2D), ALPHA(0x2E), ALPHA(0x2F),
+ ALPHA(0x30), ALPHA(0x31), ALPHA(0x32), ALPHA(0x33),
+ ALPHA(0x34), ALPHA(0x35), ALPHA(0x36), ALPHA(0x37),
+ ALPHA(0x38), ALPHA(0x39), ALPHA(0x3A), ALPHA(0x3B),
+ ALPHA(0x3C), ALPHA(0x3D), ALPHA(0x3E), ALPHA(0x3F),
+ ALPHA(0x40), ALPHA(0x41), ALPHA(0x42), ALPHA(0x43),
+ ALPHA(0x44), ALPHA(0x45), ALPHA(0x46), ALPHA(0x47),
+ ALPHA(0x48), ALPHA(0x49), ALPHA(0x4A), ALPHA(0x4B),
+ ALPHA(0x4C), ALPHA(0x4D), ALPHA(0x4E), ALPHA(0x4F),
+ ALPHA(0x50), ALPHA(0x51), ALPHA(0x52), ALPHA(0x53),
+ ALPHA(0x54), ALPHA(0x55), ALPHA(0x56), ALPHA(0x57),
+ ALPHA(0x58), ALPHA(0x59), ALPHA(0x5A), ALPHA(0x5B),
+ ALPHA(0x5C), ALPHA(0x5D), ALPHA(0x5E), ALPHA(0x5F),
+ ALPHA(0x60), ALPHA(0x61), ALPHA(0x62), ALPHA(0x63),
+ ALPHA(0x64), ALPHA(0x65), ALPHA(0x66), ALPHA(0x67),
+ ALPHA(0x68), ALPHA(0x69), ALPHA(0x6A), ALPHA(0x6B),
+ ALPHA(0x6C), ALPHA(0x6D), ALPHA(0x6E), ALPHA(0x6F),
+ ALPHA(0x70), ALPHA(0x71), ALPHA(0x72), ALPHA(0x73),
+ ALPHA(0x74), ALPHA(0x75), ALPHA(0x76), ALPHA(0x77),
+ ALPHA(0x78), ALPHA(0x79), ALPHA(0x7A), ALPHA(0x7B),
+ ALPHA(0x7C), ALPHA(0x7D), ALPHA(0x7E), ALPHA(0x7F),
+ ALPHA(0x80), ALPHA(0x81), ALPHA(0x82), ALPHA(0x83),
+ ALPHA(0x84), ALPHA(0x85), ALPHA(0x86), ALPHA(0x87),
+ ALPHA(0x88), ALPHA(0x89), ALPHA(0x8A), ALPHA(0x8B),
+ ALPHA(0x8C), ALPHA(0x8D), ALPHA(0x8E), ALPHA(0x8F),
+ ALPHA(0x90), ALPHA(0x91), ALPHA(0x92), ALPHA(0x93),
+ ALPHA(0x94), ALPHA(0x95), ALPHA(0x96), ALPHA(0x97),
+ ALPHA(0x98), ALPHA(0x99), ALPHA(0x9A), ALPHA(0x9B),
+ ALPHA(0x9C), ALPHA(0x9D), ALPHA(0x9E), ALPHA(0x9F),
+ ALPHA(0xA0), ALPHA(0xA1), ALPHA(0xA2), ALPHA(0xA3),
+ ALPHA(0xA4), ALPHA(0xA5), ALPHA(0xA6), ALPHA(0xA7),
+ ALPHA(0xA8), ALPHA(0xA9), ALPHA(0xAA), ALPHA(0xAB),
+ ALPHA(0xAC), ALPHA(0xAD), ALPHA(0xAE), ALPHA(0xAF),
+ ALPHA(0xB0), ALPHA(0xB1), ALPHA(0xB2), ALPHA(0xB3),
+ ALPHA(0xB4), ALPHA(0xB5), ALPHA(0xB6), ALPHA(0xB7),
+ ALPHA(0xB8), ALPHA(0xB9), ALPHA(0xBA), ALPHA(0xBB),
+ ALPHA(0xBC), ALPHA(0xBD), ALPHA(0xBE), ALPHA(0xBF),
+ ALPHA(0xC0), ALPHA(0xC1), ALPHA(0xC2), ALPHA(0xC3),
+ ALPHA(0xC4), ALPHA(0xC5), ALPHA(0xC6), ALPHA(0xC7),
+ ALPHA(0xC8), ALPHA(0xC9), ALPHA(0xCA), ALPHA(0xCB),
+ ALPHA(0xCC), ALPHA(0xCD), ALPHA(0xCE), ALPHA(0xCF),
+ ALPHA(0xD0), ALPHA(0xD1), ALPHA(0xD2), ALPHA(0xD3),
+ ALPHA(0xD4), ALPHA(0xD5), ALPHA(0xD6), ALPHA(0xD7),
+ ALPHA(0xD8), ALPHA(0xD9), ALPHA(0xDA), ALPHA(0xDB),
+ ALPHA(0xDC), ALPHA(0xDD), ALPHA(0xDE), ALPHA(0xDF),
+ ALPHA(0xE0), ALPHA(0xE1), ALPHA(0xE2), ALPHA(0xE3),
+ ALPHA(0xE4), ALPHA(0xE5), ALPHA(0xE6), ALPHA(0xE7),
+ ALPHA(0xE8), ALPHA(0xE9), ALPHA(0xEA), ALPHA(0xEB),
+ ALPHA(0xEC), ALPHA(0xED), ALPHA(0xEE), ALPHA(0xEF),
+ ALPHA(0xF0), ALPHA(0xF1), ALPHA(0xF2), ALPHA(0xF3),
+ ALPHA(0xF4), ALPHA(0xF5), ALPHA(0xF6), ALPHA(0xF7),
+ ALPHA(0xF8), ALPHA(0xF9), ALPHA(0xFA), ALPHA(0xFB),
+ ALPHA(0xFC), ALPHA(0xFD), ALPHA(0xFE), ALPHA(0xFF),
+};
+
+#undef RGBY
+#undef RGBU
+#undef RGBV
+#undef ALPHA
+
} // extern "C"
diff --git a/chromium/media/base/simd/yuv_to_rgb_table.h b/chromium/media/base/simd/yuv_to_rgb_table.h
index aebf1b20d19..1ed6fd86cae 100644
--- a/chromium/media/base/simd/yuv_to_rgb_table.h
+++ b/chromium/media/base/simd/yuv_to_rgb_table.h
@@ -20,6 +20,7 @@ extern "C" {
// Align the table to 16-bytes to allow faster reading.
extern SIMD_ALIGNED(const int16 kCoefficientsRgbY[256 * 4][4]);
+extern SIMD_ALIGNED(const int16 kCoefficientsRgbY_JPEG[256 * 4][4]);
} // extern "C"
diff --git a/chromium/media/base/sinc_resampler.cc b/chromium/media/base/sinc_resampler.cc
index 82168dbc64a..900648e6760 100644
--- a/chromium/media/base/sinc_resampler.cc
+++ b/chromium/media/base/sinc_resampler.cc
@@ -81,11 +81,16 @@
#include <cmath>
#include <limits>
-#include "base/cpu.h"
#include "base/logging.h"
-#if defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+#if defined(ARCH_CPU_X86_FAMILY)
+#include <xmmintrin.h>
+#define CONVOLVE_FUNC Convolve_SSE
+#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
#include <arm_neon.h>
+#define CONVOLVE_FUNC Convolve_NEON
+#else
+#define CONVOLVE_FUNC Convolve_C
#endif
namespace media {
@@ -106,36 +111,6 @@ static double SincScaleFactor(double io_ratio) {
return sinc_scale_factor;
}
-// If we know the minimum architecture at compile time, avoid CPU detection.
-// Force NaCl code to use C routines since (at present) nothing there uses these
-// methods and plumbing the -msse built library is non-trivial.
-#if defined(ARCH_CPU_X86_FAMILY) && !defined(OS_NACL)
-#if defined(__SSE__)
-#define CONVOLVE_FUNC Convolve_SSE
-void SincResampler::InitializeCPUSpecificFeatures() {}
-#else
-// X86 CPU detection required. Functions will be set by
-// InitializeCPUSpecificFeatures().
-// TODO(dalecurtis): Once Chrome moves to an SSE baseline this can be removed.
-#define CONVOLVE_FUNC g_convolve_proc_
-
-typedef float (*ConvolveProc)(const float*, const float*, const float*, double);
-static ConvolveProc g_convolve_proc_ = NULL;
-
-void SincResampler::InitializeCPUSpecificFeatures() {
- CHECK(!g_convolve_proc_);
- g_convolve_proc_ = base::CPU().has_sse() ? Convolve_SSE : Convolve_C;
-}
-#endif
-#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
-#define CONVOLVE_FUNC Convolve_NEON
-void SincResampler::InitializeCPUSpecificFeatures() {}
-#else
-// Unknown architecture.
-#define CONVOLVE_FUNC Convolve_C
-void SincResampler::InitializeCPUSpecificFeatures() {}
-#endif
-
SincResampler::SincResampler(double io_sample_rate_ratio,
int request_frames,
const ReadCB& read_cb)
@@ -153,8 +128,7 @@ SincResampler::SincResampler(double io_sample_rate_ratio,
input_buffer_(static_cast<float*>(
base::AlignedAlloc(sizeof(float) * input_buffer_size_, 16))),
r1_(input_buffer_.get()),
- r2_(input_buffer_.get() + kKernelSize / 2),
- currently_resampling_(0) {
+ r2_(input_buffer_.get() + kKernelSize / 2) {
CHECK_GT(request_frames_, 0);
Flush();
CHECK_GT(block_size_, kKernelSize)
@@ -170,10 +144,7 @@ SincResampler::SincResampler(double io_sample_rate_ratio,
InitializeKernel();
}
-SincResampler::~SincResampler() {
- // TODO(dalecurtis): Remove debugging for http://crbug.com/295278
- CHECK(base::AtomicRefCountIsZero(&currently_resampling_));
-}
+SincResampler::~SincResampler() {}
void SincResampler::UpdateRegions(bool second_load) {
// Setup various region pointers in the buffer (see diagram above). If we're
@@ -212,8 +183,8 @@ void SincResampler::InitializeKernel() {
// Compute Blackman window, matching the offset of the sinc().
const float x = (i - subsample_offset) / kKernelSize;
- const float window = kA0 - kA1 * cos(2.0 * M_PI * x) + kA2
- * cos(4.0 * M_PI * x);
+ const float window =
+ kA0 - kA1 * cos(2.0 * M_PI * x) + kA2 * cos(4.0 * M_PI * x);
kernel_window_storage_[idx] = window;
// Compute the sinc with offset, then window the sinc() function and store
@@ -256,8 +227,6 @@ void SincResampler::SetRatio(double io_sample_rate_ratio) {
}
void SincResampler::Resample(int frames, float* destination) {
- base::AtomicRefCountInc(&currently_resampling_);
-
int remaining_frames = frames;
// Step (1) -- Prime the input buffer at the start of the input stream.
@@ -271,18 +240,12 @@ void SincResampler::Resample(int frames, float* destination) {
const double current_io_ratio = io_sample_rate_ratio_;
const float* const kernel_ptr = kernel_storage_.get();
while (remaining_frames) {
- // |i| may be negative if the last Resample() call ended on an iteration
- // that put |virtual_source_idx_| over the limit.
- //
// Note: The loop construct here can severely impact performance on ARM
// or when built with clang. See https://codereview.chromium.org/18566009/
- for (int i = ceil((block_size_ - virtual_source_idx_) / current_io_ratio);
- i > 0; --i) {
- DCHECK_LT(virtual_source_idx_, block_size_);
-
+ int source_idx = virtual_source_idx_;
+ while (source_idx < block_size_) {
// |virtual_source_idx_| lies in between two kernel offsets so figure out
// what they are.
- const int source_idx = virtual_source_idx_;
const double subsample_remainder = virtual_source_idx_ - source_idx;
const double virtual_offset_idx =
@@ -310,14 +273,14 @@ void SincResampler::Resample(int frames, float* destination) {
// Advance the virtual index.
virtual_source_idx_ += current_io_ratio;
+ source_idx = virtual_source_idx_;
- if (!--remaining_frames) {
- CHECK(!base::AtomicRefCountDec(&currently_resampling_));
+ if (!--remaining_frames)
return;
- }
}
// Wrap back around to the start.
+ DCHECK_GE(virtual_source_idx_, block_size_);
virtual_source_idx_ -= block_size_;
// Step (3) -- Copy r3_, r4_ to r1_, r2_.
@@ -331,18 +294,13 @@ void SincResampler::Resample(int frames, float* destination) {
// Step (5) -- Refresh the buffer with more input.
read_cb_.Run(request_frames_, r0_);
}
-
- CHECK(!base::AtomicRefCountDec(&currently_resampling_));
}
-#undef CONVOLVE_FUNC
-
int SincResampler::ChunkSize() const {
return block_size_ / io_sample_rate_ratio_;
}
void SincResampler::Flush() {
- CHECK(base::AtomicRefCountIsZero(&currently_resampling_));
virtual_source_idx_ = 0;
buffer_primed_ = false;
memset(input_buffer_.get(), 0,
@@ -369,7 +327,44 @@ float SincResampler::Convolve_C(const float* input_ptr, const float* k1,
+ kernel_interpolation_factor * sum2;
}
-#if defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+#if defined(ARCH_CPU_X86_FAMILY)
+float SincResampler::Convolve_SSE(const float* input_ptr, const float* k1,
+ const float* k2,
+ double kernel_interpolation_factor) {
+ __m128 m_input;
+ __m128 m_sums1 = _mm_setzero_ps();
+ __m128 m_sums2 = _mm_setzero_ps();
+
+ // Based on |input_ptr| alignment, we need to use loadu or load. Unrolling
+ // these loops hurt performance in local testing.
+ if (reinterpret_cast<uintptr_t>(input_ptr) & 0x0F) {
+ for (int i = 0; i < kKernelSize; i += 4) {
+ m_input = _mm_loadu_ps(input_ptr + i);
+ m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i)));
+ m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i)));
+ }
+ } else {
+ for (int i = 0; i < kKernelSize; i += 4) {
+ m_input = _mm_load_ps(input_ptr + i);
+ m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i)));
+ m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i)));
+ }
+ }
+
+ // Linearly interpolate the two "convolutions".
+ m_sums1 = _mm_mul_ps(m_sums1, _mm_set_ps1(1.0 - kernel_interpolation_factor));
+ m_sums2 = _mm_mul_ps(m_sums2, _mm_set_ps1(kernel_interpolation_factor));
+ m_sums1 = _mm_add_ps(m_sums1, m_sums2);
+
+ // Sum components together.
+ float result;
+ m_sums2 = _mm_add_ps(_mm_movehl_ps(m_sums1, m_sums1), m_sums1);
+ _mm_store_ss(&result, _mm_add_ss(m_sums2, _mm_shuffle_ps(
+ m_sums2, m_sums2, 1)));
+
+ return result;
+}
+#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
float SincResampler::Convolve_NEON(const float* input_ptr, const float* k1,
const float* k2,
double kernel_interpolation_factor) {
diff --git a/chromium/media/base/sinc_resampler.h b/chromium/media/base/sinc_resampler.h
index 217077830cc..79db85390fb 100644
--- a/chromium/media/base/sinc_resampler.h
+++ b/chromium/media/base/sinc_resampler.h
@@ -5,7 +5,6 @@
#ifndef MEDIA_BASE_SINC_RESAMPLER_H_
#define MEDIA_BASE_SINC_RESAMPLER_H_
-#include "base/atomic_ref_count.h"
#include "base/callback.h"
#include "base/gtest_prod_util.h"
#include "base/memory/aligned_memory.h"
@@ -35,10 +34,6 @@ class MEDIA_EXPORT SincResampler {
kKernelStorageSize = kKernelSize * (kKernelOffsetCount + 1),
};
- // Selects runtime specific CPU features like SSE. Must be called before
- // using SincResampler.
- static void InitializeCPUSpecificFeatures();
-
// Callback type for providing more data into the resampler. Expects |frames|
// of data to be rendered into |destination|; zero padded if not enough frames
// are available to satisfy the request.
@@ -121,12 +116,12 @@ class MEDIA_EXPORT SincResampler {
// Contains kKernelOffsetCount kernels back-to-back, each of size kKernelSize.
// The kernel offsets are sub-sample shifts of a windowed sinc shifted from
// 0.0 to 1.0 sample.
- scoped_ptr<float[], base::ScopedPtrAlignedFree> kernel_storage_;
- scoped_ptr<float[], base::ScopedPtrAlignedFree> kernel_pre_sinc_storage_;
- scoped_ptr<float[], base::ScopedPtrAlignedFree> kernel_window_storage_;
+ scoped_ptr<float[], base::AlignedFreeDeleter> kernel_storage_;
+ scoped_ptr<float[], base::AlignedFreeDeleter> kernel_pre_sinc_storage_;
+ scoped_ptr<float[], base::AlignedFreeDeleter> kernel_window_storage_;
// Data from the source is copied into this buffer for each processing pass.
- scoped_ptr<float[], base::ScopedPtrAlignedFree> input_buffer_;
+ scoped_ptr<float[], base::AlignedFreeDeleter> input_buffer_;
// Pointers to the various regions inside |input_buffer_|. See the diagram at
// the top of the .cc file for more information.
@@ -136,11 +131,6 @@ class MEDIA_EXPORT SincResampler {
float* r3_;
float* r4_;
- // Atomic ref count indicating when when we're in the middle of resampling.
- // Will be CHECK'd to find crashes...
- // TODO(dalecurtis): Remove debug helpers for http://crbug.com/295278
- base::AtomicRefCount currently_resampling_;
-
DISALLOW_COPY_AND_ASSIGN(SincResampler);
};
diff --git a/chromium/media/base/sinc_resampler_perftest.cc b/chromium/media/base/sinc_resampler_perftest.cc
index 21c6ec325c9..b54056af80d 100644
--- a/chromium/media/base/sinc_resampler_perftest.cc
+++ b/chromium/media/base/sinc_resampler_perftest.cc
@@ -4,7 +4,6 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
-#include "base/cpu.h"
#include "base/time/time.h"
#include "media/base/sinc_resampler.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -61,9 +60,6 @@ TEST(SincResamplerPerfTest, Convolve) {
&resampler, SincResampler::Convolve_C, true, "unoptimized_aligned");
#if defined(CONVOLVE_FUNC)
-#if defined(ARCH_CPU_X86_FAMILY)
- ASSERT_TRUE(base::CPU().has_sse());
-#endif
RunConvolveBenchmark(
&resampler, SincResampler::CONVOLVE_FUNC, true, "optimized_aligned");
RunConvolveBenchmark(
diff --git a/chromium/media/base/sinc_resampler_unittest.cc b/chromium/media/base/sinc_resampler_unittest.cc
index 3b460a39c39..c0f9d98f8ba 100644
--- a/chromium/media/base/sinc_resampler_unittest.cc
+++ b/chromium/media/base/sinc_resampler_unittest.cc
@@ -9,7 +9,6 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
-#include "base/cpu.h"
#include "base/strings/string_number_conversions.h"
#include "base/time/time.h"
#include "build/build_config.h"
@@ -121,10 +120,6 @@ TEST(SincResamplerTest, DISABLED_SetRatioBench) {
static const double kKernelInterpolationFactor = 0.5;
TEST(SincResamplerTest, Convolve) {
-#if defined(ARCH_CPU_X86_FAMILY)
- ASSERT_TRUE(base::CPU().has_sse());
-#endif
-
// Initialize a dummy resampler.
MockSource mock_source;
SincResampler resampler(
diff --git a/chromium/media/base/stream_parser.cc b/chromium/media/base/stream_parser.cc
index 12409194fed..59c3ed60796 100644
--- a/chromium/media/base/stream_parser.cc
+++ b/chromium/media/base/stream_parser.cc
@@ -4,10 +4,134 @@
#include "media/base/stream_parser.h"
+#include "media/base/buffers.h"
+#include "media/base/stream_parser_buffer.h"
+
namespace media {
+StreamParser::InitParameters::InitParameters(base::TimeDelta duration)
+ : duration(duration),
+ auto_update_timestamp_offset(false),
+ liveness(Demuxer::LIVENESS_UNKNOWN) {
+}
+
StreamParser::StreamParser() {}
StreamParser::~StreamParser() {}
+static bool MergeBufferQueuesInternal(
+ const std::vector<const StreamParser::BufferQueue*>& buffer_queues,
+ StreamParser::BufferQueue* merged_buffers) {
+ // Instead of std::merge usage, this method implements a custom merge because:
+ // 1) |buffer_queues| may contain N queues,
+ // 2) we must detect and return false if any of the queues in |buffer_queues|
+ // is unsorted, and
+ // 3) we must detect and return false if any of the buffers in |buffer_queues|
+ // has a decode timestamp prior to the last, if any, buffer in
+ // |merged_buffers|.
+ // TODO(wolenetz/acolwell): Refactor stream parsers to eliminate need for
+ // this large grain merge. See http://crbug.com/338484.
+
+ // Done if no inputs to merge.
+ if (buffer_queues.empty())
+ return true;
+
+ // Build a vector of iterators, one for each input, to traverse inputs.
+ // The union of these iterators points to the set of candidate buffers
+ // for being appended to |merged_buffers|.
+ size_t num_itrs = buffer_queues.size();
+ std::vector<StreamParser::BufferQueue::const_iterator> itrs(num_itrs);
+ for (size_t i = 0; i < num_itrs; ++i)
+ itrs[i] = buffer_queues[i]->begin();
+
+ // |last_decode_timestamp| tracks the lower bound, if any, that all candidate
+ // buffers must not be less than. If |merged_buffers| already has buffers,
+ // initialize |last_decode_timestamp| to the decode timestamp of the last
+ // buffer in it.
+ base::TimeDelta last_decode_timestamp = kNoTimestamp();
+ if (!merged_buffers->empty())
+ last_decode_timestamp = merged_buffers->back()->GetDecodeTimestamp();
+
+ // Repeatedly select and append the next buffer from the candidate buffers
+ // until either:
+ // 1) returning false, to indicate detection of decreasing DTS in some queue,
+ // when a candidate buffer has decode timestamp below
+ // |last_decode_timestamp|, which means either an input buffer wasn't
+ // sorted correctly or had a buffer with decode timestamp below the last
+ // buffer, if any, in |merged_buffers|, or
+ // 2) returning true when all buffers have been merged successfully;
+ // equivalently, when all of the iterators in |itrs| have reached the end
+ // of their respective queue from |buffer_queues|.
+ // TODO(wolenetz/acolwell): Ideally, we would use a heap to store the head of
+ // all queues and pop the head with lowest decode timestamp in log(N) time.
+ // However, N will typically be small and usage of this implementation is
+ // meant to be short-term. See http://crbug.com/338484.
+ while (true) {
+ // Tracks which queue's iterator is pointing to the candidate buffer to
+ // append next, or -1 if no candidate buffers found. This indexes |itrs|.
+ int index_of_queue_with_next_decode_timestamp = -1;
+ base::TimeDelta next_decode_timestamp = kNoTimestamp();
+
+ // Scan each of the iterators for |buffer_queues| to find the candidate
+ // buffer, if any, that has the lowest decode timestamp.
+ for (size_t i = 0; i < num_itrs; ++i) {
+ if (itrs[i] == buffer_queues[i]->end())
+ continue;
+
+ // Extract the candidate buffer's decode timestamp.
+ base::TimeDelta ts = (*itrs[i])->GetDecodeTimestamp();
+
+ if (last_decode_timestamp != kNoTimestamp() &&
+ ts < last_decode_timestamp)
+ return false;
+
+ if (ts < next_decode_timestamp ||
+ next_decode_timestamp == kNoTimestamp()) {
+ // Remember the decode timestamp and queue iterator index for this
+ // potentially winning candidate buffer.
+ next_decode_timestamp = ts;
+ index_of_queue_with_next_decode_timestamp = i;
+ }
+ }
+
+ // All done if no further candidate buffers exist.
+ if (index_of_queue_with_next_decode_timestamp == -1)
+ return true;
+
+ // Otherwise, append the winning candidate buffer to |merged_buffers|,
+ // remember its decode timestamp as |last_decode_timestamp| now that it is
+ // the last buffer in |merged_buffers|, advance the corresponding
+ // input BufferQueue iterator, and continue.
+ scoped_refptr<StreamParserBuffer> buffer =
+ *itrs[index_of_queue_with_next_decode_timestamp];
+ last_decode_timestamp = buffer->GetDecodeTimestamp();
+ merged_buffers->push_back(buffer);
+ ++itrs[index_of_queue_with_next_decode_timestamp];
+ }
+}
+
+bool MergeBufferQueues(const StreamParser::BufferQueue& audio_buffers,
+ const StreamParser::BufferQueue& video_buffers,
+ const StreamParser::TextBufferQueueMap& text_buffers,
+ StreamParser::BufferQueue* merged_buffers) {
+ DCHECK(merged_buffers);
+
+ // Prepare vector containing pointers to any provided non-empty buffer queues.
+ std::vector<const StreamParser::BufferQueue*> buffer_queues;
+ if (!audio_buffers.empty())
+ buffer_queues.push_back(&audio_buffers);
+ if (!video_buffers.empty())
+ buffer_queues.push_back(&video_buffers);
+ for (StreamParser::TextBufferQueueMap::const_iterator map_itr =
+ text_buffers.begin();
+ map_itr != text_buffers.end();
+ map_itr++) {
+ if (!map_itr->second.empty())
+ buffer_queues.push_back(&(map_itr->second));
+ }
+
+ // Do the merge.
+ return MergeBufferQueuesInternal(buffer_queues, merged_buffers);
+}
+
} // namespace media
diff --git a/chromium/media/base/stream_parser.h b/chromium/media/base/stream_parser.h
index 101ce4eee0e..398e8e0a1af 100644
--- a/chromium/media/base/stream_parser.h
+++ b/chromium/media/base/stream_parser.h
@@ -8,6 +8,7 @@
#include <deque>
#include <map>
#include <string>
+#include <vector>
#include "base/callback_forward.h"
#include "base/memory/ref_counted.h"
@@ -27,18 +28,48 @@ class VideoDecoderConfig;
class MEDIA_EXPORT StreamParser {
public:
typedef std::deque<scoped_refptr<StreamParserBuffer> > BufferQueue;
- typedef std::map<int, TextTrackConfig> TextTrackConfigMap;
- StreamParser();
- virtual ~StreamParser();
+ // Range of |TrackId| is dependent upon stream parsers. It is currently
+ // the key for the buffer's text track config in the applicable
+ // TextTrackConfigMap (which is passed in StreamParser::NewConfigCB), or
+ // 0 for other media types that currently allow at most one track.
+ // WebMTracksParser uses -1 as an invalid text track number.
+ // TODO(wolenetz/acolwell): Change to size_type while fixing stream parsers to
+ // emit validated track configuration and buffer vectors rather than max 1
+ // audio, max 1 video, and N text tracks in a map keyed by
+ // bytestream-specific-ranged track numbers. See http://crbug.com/341581.
+ typedef int TrackId;
+
+ // Map of text track ID to the track configuration.
+ typedef std::map<TrackId, TextTrackConfig> TextTrackConfigMap;
+
+ // Map of text track ID to decode-timestamp-ordered buffers for the track.
+ typedef std::map<TrackId, const BufferQueue> TextBufferQueueMap;
+
+ // Stream parameters passed in InitCB.
+ struct InitParameters {
+ InitParameters(base::TimeDelta duration);
+
+ // Stream duration.
+ base::TimeDelta duration;
+
+ // Indicates the source time associated with presentation timestamp 0. A
+ // null Time is returned if no mapping to Time exists.
+ base::Time timeline_offset;
+
+ // Indicates that timestampOffset should be updated based on the earliest
+ // end timestamp (audio or video) provided during each NewBuffersCB.
+ bool auto_update_timestamp_offset;
+
+ // Indicates live stream.
+ Demuxer::Liveness liveness;
+ };
// Indicates completion of parser initialization.
- // First parameter - Indicates initialization success. Set to true if
- // initialization was successful. False if an error
- // occurred.
- // Second parameter - Indicates the stream duration. Only contains a valid
- // value if the first parameter is true.
- typedef base::Callback<void(bool, base::TimeDelta)> InitCB;
+ // success - True if initialization was successful.
+ // params - Stream parameters, in case of successful initialization.
+ typedef base::Callback<void(bool success,
+ const InitParameters& params)> InitCB;
// Indicates when new stream configurations have been parsed.
// First parameter - The new audio configuration. If the config is not valid
@@ -57,20 +88,16 @@ class MEDIA_EXPORT StreamParser {
// New stream buffers have been parsed.
// First parameter - A queue of newly parsed audio buffers.
// Second parameter - A queue of newly parsed video buffers.
+ // Third parameter - A map of text track ids to queues of newly parsed inband
+ // text buffers. If the map is not empty, it must contain
+ // at least one track with a non-empty queue of text
+ // buffers.
// Return value - True indicates that the buffers are accepted.
// False if something was wrong with the buffers and a parsing
// error should be signalled.
typedef base::Callback<bool(const BufferQueue&,
- const BufferQueue&)> NewBuffersCB;
-
- // New stream buffers of inband text have been parsed.
- // First parameter - The id of the text track to which these cues will
- // be added.
- // Second parameter - A queue of newly parsed buffers.
- // Return value - True indicates that the buffers are accepted.
- // False if something was wrong with the buffers and a parsing
- // error should be signalled.
- typedef base::Callback<bool(int, const BufferQueue&)> NewTextBuffersCB;
+ const BufferQueue&,
+ const TextBufferQueueMap&)> NewBuffersCB;
// Signals the beginning of a new media segment.
typedef base::Callback<void()> NewMediaSegmentCB;
@@ -82,14 +109,18 @@ class MEDIA_EXPORT StreamParser {
typedef base::Callback<void(const std::string&,
const std::vector<uint8>&)> NeedKeyCB;
- // Initialize the parser with necessary callbacks. Must be called before any
+ StreamParser();
+ virtual ~StreamParser();
+
+ // Initializes the parser with necessary callbacks. Must be called before any
// data is passed to Parse(). |init_cb| will be called once enough data has
// been parsed to determine the initial stream configurations, presentation
- // start time, and duration.
+ // start time, and duration. If |ignore_text_track| is true, then no text
+ // buffers should be passed later by the parser to |new_buffers_cb|.
virtual void Init(const InitCB& init_cb,
const NewConfigCB& config_cb,
const NewBuffersCB& new_buffers_cb,
- const NewTextBuffersCB& text_cb,
+ bool ignore_text_track,
const NeedKeyCB& need_key_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
@@ -109,6 +140,23 @@ class MEDIA_EXPORT StreamParser {
DISALLOW_COPY_AND_ASSIGN(StreamParser);
};
+// Appends to |merged_buffers| the provided buffers in decode-timestamp order.
+// Any previous contents of |merged_buffers| is assumed to have lower
+// decode timestamps versus the provided buffers. All provided buffer queues
+// are assumed to already be in decode-timestamp order.
+// Returns false if any of the provided audio/video/text buffers are found
+// to not be in decode timestamp order, or have a decode timestamp less than
+// the last buffer, if any, in |merged_buffers|. Partial results may exist
+// in |merged_buffers| in this case. Returns true on success.
+// No validation of media type within the various buffer queues is done here.
+// TODO(wolenetz/acolwell): Merge incrementally in parsers to eliminate
+// subtle issues with tie-breaking. See http://crbug.com/338484.
+MEDIA_EXPORT bool MergeBufferQueues(
+ const StreamParser::BufferQueue& audio_buffers,
+ const StreamParser::BufferQueue& video_buffers,
+ const StreamParser::TextBufferQueueMap& text_buffers,
+ StreamParser::BufferQueue* merged_buffers);
+
} // namespace media
#endif // MEDIA_BASE_STREAM_PARSER_H_
diff --git a/chromium/media/base/stream_parser_buffer.cc b/chromium/media/base/stream_parser_buffer.cc
index bb46ef516ec..e9d64272c4d 100644
--- a/chromium/media/base/stream_parser_buffer.cc
+++ b/chromium/media/base/stream_parser_buffer.cc
@@ -9,22 +9,56 @@
namespace media {
+static scoped_refptr<StreamParserBuffer> CopyBuffer(
+ const StreamParserBuffer& buffer) {
+ if (buffer.end_of_stream())
+ return StreamParserBuffer::CreateEOSBuffer();
+
+ scoped_refptr<StreamParserBuffer> copied_buffer =
+ StreamParserBuffer::CopyFrom(buffer.data(),
+ buffer.data_size(),
+ buffer.side_data(),
+ buffer.side_data_size(),
+ buffer.IsKeyframe(),
+ buffer.type(),
+ buffer.track_id());
+ copied_buffer->SetDecodeTimestamp(buffer.GetDecodeTimestamp());
+ copied_buffer->SetConfigId(buffer.GetConfigId());
+ copied_buffer->set_timestamp(buffer.timestamp());
+ copied_buffer->set_duration(buffer.duration());
+ copied_buffer->set_discard_padding(buffer.discard_padding());
+ copied_buffer->set_splice_timestamp(buffer.splice_timestamp());
+ const DecryptConfig* decrypt_config = buffer.decrypt_config();
+ if (decrypt_config) {
+ copied_buffer->set_decrypt_config(
+ make_scoped_ptr(new DecryptConfig(decrypt_config->key_id(),
+ decrypt_config->iv(),
+ decrypt_config->subsamples())));
+ }
+
+ return copied_buffer;
+}
+
scoped_refptr<StreamParserBuffer> StreamParserBuffer::CreateEOSBuffer() {
- return make_scoped_refptr(new StreamParserBuffer(NULL, 0, NULL, 0, false));
+ return make_scoped_refptr(new StreamParserBuffer(NULL, 0, NULL, 0, false,
+ DemuxerStream::UNKNOWN, 0));
}
scoped_refptr<StreamParserBuffer> StreamParserBuffer::CopyFrom(
- const uint8* data, int data_size, bool is_keyframe) {
+ const uint8* data, int data_size, bool is_keyframe, Type type,
+ TrackId track_id) {
return make_scoped_refptr(
- new StreamParserBuffer(data, data_size, NULL, 0, is_keyframe));
+ new StreamParserBuffer(data, data_size, NULL, 0, is_keyframe, type,
+ track_id));
}
scoped_refptr<StreamParserBuffer> StreamParserBuffer::CopyFrom(
const uint8* data, int data_size,
- const uint8* side_data, int side_data_size, bool is_keyframe) {
+ const uint8* side_data, int side_data_size,
+ bool is_keyframe, Type type, TrackId track_id) {
return make_scoped_refptr(
new StreamParserBuffer(data, data_size, side_data, side_data_size,
- is_keyframe));
+ is_keyframe, type, track_id));
}
base::TimeDelta StreamParserBuffer::GetDecodeTimestamp() const {
@@ -33,17 +67,22 @@ base::TimeDelta StreamParserBuffer::GetDecodeTimestamp() const {
return decode_timestamp_;
}
-void StreamParserBuffer::SetDecodeTimestamp(const base::TimeDelta& timestamp) {
+void StreamParserBuffer::SetDecodeTimestamp(base::TimeDelta timestamp) {
decode_timestamp_ = timestamp;
+ if (preroll_buffer_)
+ preroll_buffer_->SetDecodeTimestamp(timestamp);
}
StreamParserBuffer::StreamParserBuffer(const uint8* data, int data_size,
const uint8* side_data,
- int side_data_size, bool is_keyframe)
+ int side_data_size, bool is_keyframe,
+ Type type, TrackId track_id)
: DecoderBuffer(data, data_size, side_data, side_data_size),
is_keyframe_(is_keyframe),
decode_timestamp_(kNoTimestamp()),
- config_id_(kInvalidConfigId) {
+ config_id_(kInvalidConfigId),
+ type_(type),
+ track_id_(track_id) {
// TODO(scherkus): Should DataBuffer constructor accept a timestamp and
// duration to force clients to set them? Today they end up being zero which
// is both a common and valid value and could lead to bugs.
@@ -52,8 +91,7 @@ StreamParserBuffer::StreamParserBuffer(const uint8* data, int data_size,
}
}
-StreamParserBuffer::~StreamParserBuffer() {
-}
+StreamParserBuffer::~StreamParserBuffer() {}
int StreamParserBuffer::GetConfigId() const {
return config_id_;
@@ -61,6 +99,93 @@ int StreamParserBuffer::GetConfigId() const {
void StreamParserBuffer::SetConfigId(int config_id) {
config_id_ = config_id;
+ if (preroll_buffer_)
+ preroll_buffer_->SetConfigId(config_id);
+}
+
+void StreamParserBuffer::ConvertToSpliceBuffer(
+ const BufferQueue& pre_splice_buffers) {
+ DCHECK(splice_buffers_.empty());
+ DCHECK(!end_of_stream());
+
+ // Make a copy of this first, before making any changes.
+ scoped_refptr<StreamParserBuffer> overlapping_buffer = CopyBuffer(*this);
+ overlapping_buffer->set_splice_timestamp(kNoTimestamp());
+
+ const scoped_refptr<StreamParserBuffer>& first_splice_buffer =
+ pre_splice_buffers.front();
+
+ // Ensure the given buffers are actually before the splice point.
+ DCHECK(first_splice_buffer->timestamp() <= overlapping_buffer->timestamp());
+
+ // TODO(dalecurtis): We should also clear |data| and |side_data|, but since
+ // that implies EOS care must be taken to ensure there are no clients relying
+ // on that behavior.
+
+ // Move over any preroll from this buffer.
+ if (preroll_buffer_) {
+ DCHECK(!overlapping_buffer->preroll_buffer_);
+ overlapping_buffer->preroll_buffer_.swap(preroll_buffer_);
+ }
+
+ // Rewrite |this| buffer as a splice buffer.
+ SetDecodeTimestamp(first_splice_buffer->GetDecodeTimestamp());
+ SetConfigId(first_splice_buffer->GetConfigId());
+ set_timestamp(first_splice_buffer->timestamp());
+ is_keyframe_ = first_splice_buffer->IsKeyframe();
+ type_ = first_splice_buffer->type();
+ track_id_ = first_splice_buffer->track_id();
+ set_splice_timestamp(overlapping_buffer->timestamp());
+
+ // The splice duration is the duration of all buffers before the splice plus
+ // the highest ending timestamp after the splice point.
+ set_duration(
+ std::max(overlapping_buffer->timestamp() + overlapping_buffer->duration(),
+ pre_splice_buffers.back()->timestamp() +
+ pre_splice_buffers.back()->duration()) -
+ first_splice_buffer->timestamp());
+
+ // Copy all pre splice buffers into our wrapper buffer.
+ for (BufferQueue::const_iterator it = pre_splice_buffers.begin();
+ it != pre_splice_buffers.end();
+ ++it) {
+ const scoped_refptr<StreamParserBuffer>& buffer = *it;
+ DCHECK(!buffer->end_of_stream());
+ DCHECK(!buffer->preroll_buffer());
+ DCHECK(buffer->splice_buffers().empty());
+ splice_buffers_.push_back(CopyBuffer(*buffer));
+ splice_buffers_.back()->set_splice_timestamp(splice_timestamp());
+ }
+
+ splice_buffers_.push_back(overlapping_buffer);
+}
+
+void StreamParserBuffer::SetPrerollBuffer(
+ const scoped_refptr<StreamParserBuffer>& preroll_buffer) {
+ DCHECK(!preroll_buffer_);
+ DCHECK(!end_of_stream());
+ DCHECK(!preroll_buffer->end_of_stream());
+ DCHECK(!preroll_buffer->preroll_buffer_);
+ DCHECK(preroll_buffer->splice_timestamp() == kNoTimestamp());
+ DCHECK(preroll_buffer->splice_buffers().empty());
+ DCHECK(preroll_buffer->timestamp() <= timestamp());
+ DCHECK(preroll_buffer->discard_padding() == DecoderBuffer::DiscardPadding());
+ DCHECK_EQ(preroll_buffer->type(), type());
+ DCHECK_EQ(preroll_buffer->track_id(), track_id());
+
+ preroll_buffer_ = preroll_buffer;
+ preroll_buffer_->set_timestamp(timestamp());
+ preroll_buffer_->SetDecodeTimestamp(GetDecodeTimestamp());
+
+ // Mark the entire buffer for discard.
+ preroll_buffer_->set_discard_padding(
+ std::make_pair(kInfiniteDuration(), base::TimeDelta()));
+}
+
+void StreamParserBuffer::set_timestamp(base::TimeDelta timestamp) {
+ DecoderBuffer::set_timestamp(timestamp);
+ if (preroll_buffer_)
+ preroll_buffer_->set_timestamp(timestamp);
}
} // namespace media
diff --git a/chromium/media/base/stream_parser_buffer.h b/chromium/media/base/stream_parser_buffer.h
index 8899f11216d..24abe1a9cd8 100644
--- a/chromium/media/base/stream_parser_buffer.h
+++ b/chromium/media/base/stream_parser_buffer.h
@@ -5,8 +5,12 @@
#ifndef MEDIA_BASE_STREAM_PARSER_BUFFER_H_
#define MEDIA_BASE_STREAM_PARSER_BUFFER_H_
+#include <deque>
+
#include "media/base/decoder_buffer.h"
+#include "media/base/demuxer_stream.h"
#include "media/base/media_export.h"
+#include "media/base/stream_parser.h"
namespace media {
@@ -15,33 +19,83 @@ class MEDIA_EXPORT StreamParserBuffer : public DecoderBuffer {
// Value used to signal an invalid decoder config ID.
enum { kInvalidConfigId = -1 };
+ typedef DemuxerStream::Type Type;
+ typedef StreamParser::TrackId TrackId;
+
static scoped_refptr<StreamParserBuffer> CreateEOSBuffer();
+
static scoped_refptr<StreamParserBuffer> CopyFrom(
- const uint8* data, int data_size, bool is_keyframe);
+ const uint8* data, int data_size, bool is_keyframe, Type type,
+ TrackId track_id);
static scoped_refptr<StreamParserBuffer> CopyFrom(
const uint8* data, int data_size,
- const uint8* side_data, int side_data_size, bool is_keyframe);
+ const uint8* side_data, int side_data_size, bool is_keyframe, Type type,
+ TrackId track_id);
bool IsKeyframe() const { return is_keyframe_; }
// Decode timestamp. If not explicitly set, or set to kNoTimestamp(), the
// value will be taken from the normal timestamp.
base::TimeDelta GetDecodeTimestamp() const;
- void SetDecodeTimestamp(const base::TimeDelta& timestamp);
+ void SetDecodeTimestamp(base::TimeDelta timestamp);
- // Gets/sets the ID of the decoder config associated with this
- // buffer.
+ // Gets/sets the ID of the decoder config associated with this buffer.
int GetConfigId() const;
void SetConfigId(int config_id);
+ // Gets the parser's media type associated with this buffer. Value is
+ // meaningless for EOS buffers.
+ Type type() const { return type_; }
+
+ // Gets the parser's track ID associated with this buffer. Value is
+ // meaningless for EOS buffers.
+ TrackId track_id() const { return track_id_; }
+
+ // Converts this buffer to a splice buffer. |pre_splice_buffers| must not
+ // have any EOS buffers, must not have any splice buffers, nor must have any
+ // buffer with preroll.
+ //
+ // |pre_splice_buffers| will be deep copied and each copy's splice_timestamp()
+ // will be set to this buffer's splice_timestamp(). A copy of |this|, with a
+ // splice_timestamp() of kNoTimestamp(), will be added to the end of
+ // |splice_buffers_|.
+ //
+ // See the Audio Splice Frame Algorithm in the MSE specification for details.
+ typedef StreamParser::BufferQueue BufferQueue;
+ void ConvertToSpliceBuffer(const BufferQueue& pre_splice_buffers);
+ const BufferQueue& splice_buffers() const { return splice_buffers_; }
+
+ // Specifies a buffer which must be decoded prior to this one to ensure this
+ // buffer can be accurately decoded. The given buffer must be of the same
+ // type, must not be a splice buffer, must not have any discard padding, and
+ // must not be an end of stream buffer. |preroll| is not copied.
+ //
+ // It's expected that this preroll buffer will be discarded entirely post
+ // decoding. As such it's discard_padding() will be set to kInfiniteDuration.
+ //
+ // All future timestamp, decode timestamp, config id, or track id changes to
+ // this buffer will be applied to the preroll buffer as well.
+ void SetPrerollBuffer(const scoped_refptr<StreamParserBuffer>& preroll);
+ const scoped_refptr<StreamParserBuffer>& preroll_buffer() {
+ return preroll_buffer_;
+ }
+
+ virtual void set_timestamp(base::TimeDelta timestamp) OVERRIDE;
+
private:
StreamParserBuffer(const uint8* data, int data_size,
const uint8* side_data, int side_data_size,
- bool is_keyframe);
+ bool is_keyframe, Type type,
+ TrackId track_id);
virtual ~StreamParserBuffer();
bool is_keyframe_;
base::TimeDelta decode_timestamp_;
int config_id_;
+ Type type_;
+ TrackId track_id_;
+ BufferQueue splice_buffers_;
+ scoped_refptr<StreamParserBuffer> preroll_buffer_;
+
DISALLOW_COPY_AND_ASSIGN(StreamParserBuffer);
};
diff --git a/chromium/media/base/stream_parser_unittest.cc b/chromium/media/base/stream_parser_unittest.cc
new file mode 100644
index 00000000000..793260ca722
--- /dev/null
+++ b/chromium/media/base/stream_parser_unittest.cc
@@ -0,0 +1,382 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+#include <sstream>
+
+#include "base/basictypes.h"
+#include "media/base/stream_parser.h"
+#include "media/base/stream_parser_buffer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+typedef StreamParser::TrackId TrackId;
+typedef StreamParser::BufferQueue BufferQueue;
+typedef StreamParser::TextBufferQueueMap TextBufferQueueMap;
+
+const int kEnd = -1;
+const uint8 kFakeData[] = { 0xFF };
+const TrackId kAudioTrackId = 0;
+const TrackId kVideoTrackId = 1;
+const TrackId kTextTrackIdA = 2;
+const TrackId kTextTrackIdB = 3;
+
+static bool IsAudio(scoped_refptr<StreamParserBuffer> buffer) {
+ return buffer->type() == DemuxerStream::AUDIO;
+}
+
+static bool IsVideo(scoped_refptr<StreamParserBuffer> buffer) {
+ return buffer->type() == DemuxerStream::VIDEO;
+}
+
+static bool IsText(scoped_refptr<StreamParserBuffer> buffer) {
+ return buffer->type() == DemuxerStream::TEXT;
+}
+
+// Creates and appends a sequence of StreamParserBuffers to the provided
+// |queue|. |decode_timestamps| determines the number of appended buffers and
+// their sequence of decode timestamps; a |kEnd| timestamp indicates the
+// end of the sequence and no buffer is appended for it. Each new buffer's
+// type will be |type| with track ID set to |track_id|.
+static void GenerateBuffers(const int* decode_timestamps,
+ StreamParserBuffer::Type type,
+ TrackId track_id,
+ BufferQueue* queue) {
+ DCHECK(decode_timestamps);
+ DCHECK(queue);
+ DCHECK_NE(type, DemuxerStream::UNKNOWN);
+ DCHECK_LT(type, DemuxerStream::NUM_TYPES);
+ for (int i = 0; decode_timestamps[i] != kEnd; ++i) {
+ scoped_refptr<StreamParserBuffer> buffer =
+ StreamParserBuffer::CopyFrom(kFakeData, sizeof(kFakeData),
+ true, type, track_id);
+ buffer->SetDecodeTimestamp(
+ base::TimeDelta::FromMicroseconds(decode_timestamps[i]));
+ queue->push_back(buffer);
+ }
+}
+
+class StreamParserTest : public testing::Test {
+ protected:
+ StreamParserTest() {}
+
+ // Returns the number of buffers in |merged_buffers_| for which |predicate|
+ // returns true.
+ size_t CountMatchingMergedBuffers(
+ bool (*predicate)(scoped_refptr<StreamParserBuffer> buffer)) {
+ return static_cast<size_t>(count_if(merged_buffers_.begin(),
+ merged_buffers_.end(),
+ predicate));
+ }
+
+ // Appends test audio buffers in the sequence described by |decode_timestamps|
+ // to |audio_buffers_|. See GenerateBuffers() for |decode_timestamps| format.
+ void GenerateAudioBuffers(const int* decode_timestamps) {
+ GenerateBuffers(decode_timestamps, DemuxerStream::AUDIO, kAudioTrackId,
+ &audio_buffers_);
+ }
+
+ // Appends test video buffers in the sequence described by |decode_timestamps|
+ // to |video_buffers_|. See GenerateBuffers() for |decode_timestamps| format.
+ void GenerateVideoBuffers(const int* decode_timestamps) {
+ GenerateBuffers(decode_timestamps, DemuxerStream::VIDEO, kVideoTrackId,
+ &video_buffers_);
+ }
+
+ // Current tests only need up to two distinct text BufferQueues. This helper
+ // conditionally appends buffers to the underlying |text_buffers_a_| and
+ // |text_buffers_b_| and conditionally inserts these BufferQueues into
+ // |text_map_| keyed by the respective track ID. If |decode_timestamps_{a,b}|
+ // is NULL, then the corresponding BufferQueue is neither appended to nor
+ // inserted into |text_map_| (though it may previously have been inserted).
+ // Note that key collision on map insertion does not replace the previous
+ // value.
+ void GenerateTextBuffers(const int* decode_timestamps_a,
+ const int* decode_timestamps_b) {
+ if (decode_timestamps_a) {
+ GenerateBuffers(decode_timestamps_a, DemuxerStream::TEXT, kTextTrackIdA,
+ &text_buffers_a_);
+ text_map_.insert(std::make_pair(kTextTrackIdA, text_buffers_a_));
+ }
+
+ if (decode_timestamps_b) {
+ GenerateBuffers(decode_timestamps_b, DemuxerStream::TEXT, kTextTrackIdB,
+ &text_buffers_b_);
+ text_map_.insert(std::make_pair(kTextTrackIdB, text_buffers_b_));
+ }
+ }
+
+ // Returns a string that describes the sequence of buffers in
+ // |merged_buffers_|. The string is a concatenation of space-delimited buffer
+ // descriptors in the same sequence as |merged_buffers_|. Each descriptor is
+ // the concatenation of
+ // 1) a single character that describes the buffer's type(), e.g. A, V, or T
+ // for audio, video, or text, respectively
+ // 2) the buffer's track_id()
+ // 3) ":"
+ // 4) the buffer's decode timestamp.
+ // If |include_type_and_text_track| is false, then items 1, 2, and 3 are
+ // not included in descriptors. This is useful when buffers with different
+ // media types but the same decode timestamp are expected, and the exact
+ // sequence of media types for the tying timestamps is not subject to
+ // verification.
+ std::string MergedBufferQueueString(bool include_type_and_text_track) {
+ std::stringstream results_stream;
+ for (BufferQueue::const_iterator itr = merged_buffers_.begin();
+ itr != merged_buffers_.end();
+ ++itr) {
+ if (itr != merged_buffers_.begin())
+ results_stream << " ";
+ const StreamParserBuffer& buffer = *(*itr);
+ if (include_type_and_text_track) {
+ switch (buffer.type()) {
+ case DemuxerStream::AUDIO:
+ results_stream << "A";
+ break;
+ case DemuxerStream::VIDEO:
+ results_stream << "V";
+ break;
+ case DemuxerStream::TEXT:
+ results_stream << "T";
+
+ break;
+ default:
+ NOTREACHED();
+ }
+ results_stream << buffer.track_id() << ":";
+ }
+ results_stream << buffer.GetDecodeTimestamp().InMicroseconds();
+ }
+
+ return results_stream.str();
+ }
+
+ // Verifies that MergeBufferQueues() of the current |audio_buffers_|,
+ // |video_buffers_|, |text_map_|, and |merged_buffers_| returns true and
+ // results in an updated |merged_buffers_| that matches expectation. The
+ // expectation, specified in |expected|, is compared to the string resulting
+ // from MergedBufferQueueString() (see comments for that method) with
+ // |verify_type_and_text_track_sequence| passed. |merged_buffers_| is appended
+ // to by the merge, and may be setup by the caller to have some pre-existing
+ // buffers; it is both an input and output of this method.
+ // Regardless of |verify_type_and_text_track_sequence|, the marginal number
+ // of buffers of each type (audio, video, text) resulting from the merge is
+ // also verified to match the number of buffers in |audio_buffers_|,
+ // |video_buffers_|, and |text_map_|, respectively.
+ void VerifyMergeSuccess(const std::string& expected,
+ bool verify_type_and_text_track_sequence) {
+ // |merged_buffers| may already have some buffers. Count them by type for
+ // later inclusion in verification.
+ size_t original_audio_in_merged = CountMatchingMergedBuffers(IsAudio);
+ size_t original_video_in_merged = CountMatchingMergedBuffers(IsVideo);
+ size_t original_text_in_merged = CountMatchingMergedBuffers(IsText);
+
+ EXPECT_TRUE(MergeBufferQueues(audio_buffers_, video_buffers_, text_map_,
+ &merged_buffers_));
+
+ // Verify resulting contents of |merged_buffers| matches |expected|.
+ EXPECT_EQ(expected,
+ MergedBufferQueueString(verify_type_and_text_track_sequence));
+
+ // Verify that the correct number of each type of buffer is in the merge
+ // result.
+ size_t audio_in_merged = CountMatchingMergedBuffers(IsAudio);
+ size_t video_in_merged = CountMatchingMergedBuffers(IsVideo);
+ size_t text_in_merged = CountMatchingMergedBuffers(IsText);
+
+ EXPECT_GE(audio_in_merged, original_audio_in_merged);
+ EXPECT_GE(video_in_merged, original_video_in_merged);
+ EXPECT_GE(text_in_merged, original_text_in_merged);
+
+ EXPECT_EQ(audio_buffers_.size(),
+ audio_in_merged - original_audio_in_merged);
+ EXPECT_EQ(video_buffers_.size(),
+ video_in_merged - original_video_in_merged);
+
+ size_t expected_text_buffer_count = 0;
+ for (TextBufferQueueMap::const_iterator itr = text_map_.begin();
+ itr != text_map_.end();
+ ++itr) {
+ expected_text_buffer_count += itr->second.size();
+ }
+ EXPECT_EQ(expected_text_buffer_count,
+ text_in_merged - original_text_in_merged);
+ }
+
+ // Verifies that MergeBufferQueues() of the current |audio_buffers_|,
+ // |video_buffers_|, |text_map_|, and |merged_buffers_| returns false.
+ void VerifyMergeFailure() {
+ EXPECT_FALSE(MergeBufferQueues(audio_buffers_, video_buffers_, text_map_,
+ &merged_buffers_));
+ }
+
+ // Helper to allow tests to clear all the input BufferQueues (except
+ // |merged_buffers_|) and the TextBufferQueueMap that are used in
+ // VerifyMerge{Success/Failure}().
+ void ClearQueuesAndTextMapButKeepAnyMergedBuffers() {
+ audio_buffers_.clear();
+ video_buffers_.clear();
+ text_buffers_a_.clear();
+ text_buffers_b_.clear();
+ text_map_.clear();
+ }
+
+ private:
+ BufferQueue audio_buffers_;
+ BufferQueue video_buffers_;
+ BufferQueue text_buffers_a_;
+ BufferQueue text_buffers_b_;
+ BufferQueue merged_buffers_;
+ TextBufferQueueMap text_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(StreamParserTest);
+};
+
+TEST_F(StreamParserTest, MergeBufferQueues_AllEmpty) {
+ std::string expected = "";
+ VerifyMergeSuccess(expected, true);
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_SingleAudioBuffer) {
+ std::string expected = "A0:100";
+ int audio_timestamps[] = { 100, kEnd };
+ GenerateAudioBuffers(audio_timestamps);
+ VerifyMergeSuccess(expected, true);
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_SingleVideoBuffer) {
+ std::string expected = "V1:100";
+ int video_timestamps[] = { 100, kEnd };
+ GenerateVideoBuffers(video_timestamps);
+ VerifyMergeSuccess(expected, true);
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_SingleTextBuffer) {
+ std::string expected = "T2:100";
+ int text_timestamps[] = { 100, kEnd };
+ GenerateTextBuffers(text_timestamps, NULL);
+ VerifyMergeSuccess(expected, true);
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_OverlappingAudioVideo) {
+ std::string expected = "A0:100 V1:101 V1:102 A0:103 A0:104 V1:105";
+ int audio_timestamps[] = { 100, 103, 104, kEnd };
+ GenerateAudioBuffers(audio_timestamps);
+ int video_timestamps[] = { 101, 102, 105, kEnd };
+ GenerateVideoBuffers(video_timestamps);
+ VerifyMergeSuccess(expected, true);
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_OverlappingMultipleText) {
+ std::string expected = "T2:100 T2:101 T3:103 T2:104 T3:105 T3:106";
+ int text_timestamps_a[] = { 100, 101, 104, kEnd };
+ int text_timestamps_b[] = { 103, 105, 106, kEnd };
+ GenerateTextBuffers(text_timestamps_a, text_timestamps_b);
+ VerifyMergeSuccess(expected, true);
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_OverlappingAudioVideoText) {
+ std::string expected = "A0:100 V1:101 T2:102 V1:103 T3:104 A0:105 V1:106 "
+ "T2:107";
+ int audio_timestamps[] = { 100, 105, kEnd };
+ GenerateAudioBuffers(audio_timestamps);
+ int video_timestamps[] = { 101, 103, 106, kEnd };
+ GenerateVideoBuffers(video_timestamps);
+ int text_timestamps_a[] = { 102, 107, kEnd };
+ int text_timestamps_b[] = { 104, kEnd };
+ GenerateTextBuffers(text_timestamps_a, text_timestamps_b);
+ VerifyMergeSuccess(expected, true);
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_NonDecreasingNoCrossMediaDuplicate) {
+ std::string expected = "A0:100 A0:100 A0:100 V1:101 V1:101 V1:101 A0:102 "
+ "V1:103 V1:103";
+ int audio_timestamps[] = { 100, 100, 100, 102, kEnd };
+ GenerateAudioBuffers(audio_timestamps);
+ int video_timestamps[] = { 101, 101, 101, 103, 103, kEnd };
+ GenerateVideoBuffers(video_timestamps);
+ VerifyMergeSuccess(expected, true);
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_CrossStreamDuplicates) {
+ // Interface keeps the choice undefined of which stream's buffer wins the
+ // selection when timestamps are tied. Verify at least the right number of
+ // each kind of buffer results, and that buffers are in nondecreasing order.
+ std::string expected = "100 100 100 100 100 100 102 102 102 102 102 102 102";
+ int audio_timestamps[] = { 100, 100, 100, 102, kEnd };
+ GenerateAudioBuffers(audio_timestamps);
+ int video_timestamps[] = { 100, 100, 102, 102, 102, kEnd };
+ GenerateVideoBuffers(video_timestamps);
+ int text_timestamps[] = { 100, 102, 102, 102, kEnd };
+ GenerateTextBuffers(text_timestamps, NULL);
+ VerifyMergeSuccess(expected, false);
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_InvalidDecreasingSingleStream) {
+ int audio_timestamps[] = { 101, 102, 100, 103, kEnd };
+ GenerateAudioBuffers(audio_timestamps);
+ VerifyMergeFailure();
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_InvalidDecreasingMultipleStreams) {
+ int audio_timestamps[] = { 101, 102, 100, 103, kEnd };
+ GenerateAudioBuffers(audio_timestamps);
+ int video_timestamps[] = { 104, 100, kEnd };
+ GenerateVideoBuffers(video_timestamps);
+ VerifyMergeFailure();
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_ValidAppendToExistingMerge) {
+ std::string expected = "A0:100 V1:101 T2:102 V1:103 T3:104 A0:105 V1:106 "
+ "T2:107";
+ int audio_timestamps[] = { 100, 105, kEnd };
+ GenerateAudioBuffers(audio_timestamps);
+ int video_timestamps[] = { 101, 103, 106, kEnd };
+ GenerateVideoBuffers(video_timestamps);
+ int text_timestamps_a[] = { 102, 107, kEnd };
+ int text_timestamps_b[] = { 104, kEnd };
+ GenerateTextBuffers(text_timestamps_a, text_timestamps_b);
+ VerifyMergeSuccess(expected, true);
+
+ ClearQueuesAndTextMapButKeepAnyMergedBuffers();
+
+ expected = "A0:100 V1:101 T2:102 V1:103 T3:104 A0:105 V1:106 T2:107 "
+ "A0:107 V1:111 T2:112 V1:113 T3:114 A0:115 V1:116 T2:117";
+ int more_audio_timestamps[] = { 107, 115, kEnd };
+ GenerateAudioBuffers(more_audio_timestamps);
+ int more_video_timestamps[] = { 111, 113, 116, kEnd };
+ GenerateVideoBuffers(more_video_timestamps);
+ int more_text_timestamps_a[] = { 112, 117, kEnd };
+ int more_text_timestamps_b[] = { 114, kEnd };
+ GenerateTextBuffers(more_text_timestamps_a, more_text_timestamps_b);
+ VerifyMergeSuccess(expected, true);
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_InvalidAppendToExistingMerge) {
+ std::string expected = "A0:100 V1:101 T2:102 V1:103 T3:104 A0:105 V1:106 "
+ "T2:107";
+ int audio_timestamps[] = { 100, 105, kEnd };
+ GenerateAudioBuffers(audio_timestamps);
+ int video_timestamps[] = { 101, 103, 106, kEnd };
+ GenerateVideoBuffers(video_timestamps);
+ int text_timestamps_a[] = { 102, 107, kEnd };
+ int text_timestamps_b[] = { 104, kEnd };
+ GenerateTextBuffers(text_timestamps_a, text_timestamps_b);
+ VerifyMergeSuccess(expected, true);
+
+ // Appending empty buffers to pre-existing merge result should succeed and not
+ // change the existing result.
+ ClearQueuesAndTextMapButKeepAnyMergedBuffers();
+ VerifyMergeSuccess(expected, true);
+
+ // But appending something with a lower timestamp than the last timestamp
+ // in the pre-existing merge result should fail.
+ int more_audio_timestamps[] = { 106, kEnd };
+ GenerateAudioBuffers(more_audio_timestamps);
+ VerifyMergeFailure();
+}
+
+} // namespace media
+
diff --git a/chromium/media/base/test_data_util.cc b/chromium/media/base/test_data_util.cc
index 386617e006b..a83fa840b41 100644
--- a/chromium/media/base/test_data_util.cc
+++ b/chromium/media/base/test_data_util.cc
@@ -6,6 +6,7 @@
#include "base/file_util.h"
#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
#include "base/path_service.h"
#include "media/base/decoder_buffer.h"
@@ -15,25 +16,20 @@ base::FilePath GetTestDataFilePath(const std::string& name) {
base::FilePath file_path;
CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &file_path));
- file_path = file_path.Append(FILE_PATH_LITERAL("media"))
- .Append(FILE_PATH_LITERAL("test")).Append(FILE_PATH_LITERAL("data"))
+ return file_path.AppendASCII("media")
+ .AppendASCII("test")
+ .AppendASCII("data")
.AppendASCII(name);
- return file_path;
}
scoped_refptr<DecoderBuffer> ReadTestDataFile(const std::string& name) {
- base::FilePath file_path;
- CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &file_path));
-
- file_path = file_path.Append(FILE_PATH_LITERAL("media"))
- .Append(FILE_PATH_LITERAL("test")).Append(FILE_PATH_LITERAL("data"))
- .AppendASCII(name);
+ base::FilePath file_path = GetTestDataFilePath(name);
int64 tmp = 0;
CHECK(base::GetFileSize(file_path, &tmp))
<< "Failed to get file size for '" << name << "'";
- int file_size = static_cast<int>(tmp);
+ int file_size = base::checked_cast<int>(tmp);
scoped_refptr<DecoderBuffer> buffer(new DecoderBuffer(file_size));
CHECK_EQ(file_size,
diff --git a/chromium/media/base/test_helpers.cc b/chromium/media/base/test_helpers.cc
index 43c5cfac748..929b2f3c725 100644
--- a/chromium/media/base/test_helpers.cc
+++ b/chromium/media/base/test_helpers.cc
@@ -12,7 +12,7 @@
#include "base/time/time.h"
#include "base/timer/timer.h"
#include "media/base/audio_buffer.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
#include "ui/gfx/rect.h"
@@ -62,14 +62,14 @@ WaitableMessageLoopEvent::~WaitableMessageLoopEvent() {}
base::Closure WaitableMessageLoopEvent::GetClosure() {
DCHECK_EQ(message_loop_, base::MessageLoop::current());
- return BindToLoop(message_loop_->message_loop_proxy(), base::Bind(
+ return BindToCurrentLoop(base::Bind(
&WaitableMessageLoopEvent::OnCallback, base::Unretained(this),
PIPELINE_OK));
}
PipelineStatusCB WaitableMessageLoopEvent::GetPipelineStatusCB() {
DCHECK_EQ(message_loop_, base::MessageLoop::current());
- return BindToLoop(message_loop_->message_loop_proxy(), base::Bind(
+ return BindToCurrentLoop(base::Bind(
&WaitableMessageLoopEvent::OnCallback, base::Unretained(this)));
}
@@ -149,100 +149,62 @@ gfx::Size TestVideoConfig::LargeCodedSize() {
}
template <class T>
-scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer(
- SampleFormat format,
- int channels,
- T start,
- T increment,
- int frames,
- base::TimeDelta start_time,
- base::TimeDelta duration) {
- DCHECK(format == kSampleFormatU8 || format == kSampleFormatS16 ||
- format == kSampleFormatS32 || format == kSampleFormatF32);
-
- // Create a block of memory with values:
- // start
- // start + increment
- // start + 2 * increment, ...
- // Since this is interleaved data, channel 0 data will be:
- // start
- // start + channels * increment
- // start + 2 * channels * increment, ...
- int buffer_size = frames * channels * sizeof(T);
- scoped_ptr<uint8[]> memory(new uint8[buffer_size]);
- uint8* data[] = { memory.get() };
- T* buffer = reinterpret_cast<T*>(memory.get());
- for (int i = 0; i < frames * channels; ++i) {
- buffer[i] = start;
- start += increment;
- }
- return AudioBuffer::CopyFrom(
- format, channels, frames, data, start_time, duration);
-}
+scoped_refptr<AudioBuffer> MakeAudioBuffer(SampleFormat format,
+ ChannelLayout channel_layout,
+ size_t channel_count,
+ int sample_rate,
+ T start,
+ T increment,
+ size_t frames,
+ base::TimeDelta timestamp) {
+ const size_t channels = ChannelLayoutToChannelCount(channel_layout);
+ scoped_refptr<AudioBuffer> output =
+ AudioBuffer::CreateBuffer(format,
+ channel_layout,
+ static_cast<int>(channel_count),
+ sample_rate,
+ static_cast<int>(frames));
+ output->set_timestamp(timestamp);
+
+ const bool is_planar =
+ format == kSampleFormatPlanarS16 || format == kSampleFormatPlanarF32;
-template <class T>
-scoped_refptr<AudioBuffer> MakePlanarAudioBuffer(
- SampleFormat format,
- int channels,
- T start,
- T increment,
- int frames,
- base::TimeDelta start_time,
- base::TimeDelta duration) {
- DCHECK(format == kSampleFormatPlanarF32 || format == kSampleFormatPlanarS16);
-
- // Create multiple blocks of data, one for each channel.
// Values in channel 0 will be:
// start
// start + increment
// start + 2 * increment, ...
- // Values in channel 1 will be:
+ // While, values in channel 1 will be:
// start + frames * increment
// start + (frames + 1) * increment
// start + (frames + 2) * increment, ...
- int buffer_size = frames * sizeof(T);
- scoped_ptr<uint8*[]> data(new uint8*[channels]);
- scoped_ptr<uint8[]> memory(new uint8[channels * buffer_size]);
- for (int i = 0; i < channels; ++i) {
- data.get()[i] = memory.get() + i * buffer_size;
- T* buffer = reinterpret_cast<T*>(data.get()[i]);
- for (int j = 0; j < frames; ++j) {
- buffer[j] = start;
- start += increment;
+ for (size_t ch = 0; ch < channels; ++ch) {
+ T* buffer =
+ reinterpret_cast<T*>(output->channel_data()[is_planar ? ch : 0]);
+ const T v = static_cast<T>(start + ch * frames * increment);
+ for (size_t i = 0; i < frames; ++i) {
+ buffer[is_planar ? i : ch + i * channels] =
+ static_cast<T>(v + i * increment);
}
}
- return AudioBuffer::CopyFrom(
- format, channels, frames, data.get(), start_time, duration);
-}
-
-// Instantiate all the types of MakeInterleavedAudioBuffer() and
-// MakePlanarAudioBuffer() needed.
-
-#define DEFINE_INTERLEAVED_INSTANCE(type) \
- template scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer<type>( \
- SampleFormat format, \
- int channels, \
- type start, \
- type increment, \
- int frames, \
- base::TimeDelta start_time, \
- base::TimeDelta duration)
-DEFINE_INTERLEAVED_INSTANCE(uint8);
-DEFINE_INTERLEAVED_INSTANCE(int16);
-DEFINE_INTERLEAVED_INSTANCE(int32);
-DEFINE_INTERLEAVED_INSTANCE(float);
-
-#define DEFINE_PLANAR_INSTANCE(type) \
- template scoped_refptr<AudioBuffer> MakePlanarAudioBuffer<type>( \
- SampleFormat format, \
- int channels, \
- type start, \
- type increment, \
- int frames, \
- base::TimeDelta start_time, \
- base::TimeDelta duration);
-DEFINE_PLANAR_INSTANCE(int16);
-DEFINE_PLANAR_INSTANCE(float);
+ return output;
+}
+
+// Instantiate all the types of MakeAudioBuffer() and
+// MakeAudioBuffer() needed.
+#define DEFINE_MAKE_AUDIO_BUFFER_INSTANCE(type) \
+ template scoped_refptr<AudioBuffer> MakeAudioBuffer<type>( \
+ SampleFormat format, \
+ ChannelLayout channel_layout, \
+ size_t channel_count, \
+ int sample_rate, \
+ type start, \
+ type increment, \
+ size_t frames, \
+ base::TimeDelta start_time)
+DEFINE_MAKE_AUDIO_BUFFER_INSTANCE(uint8);
+DEFINE_MAKE_AUDIO_BUFFER_INSTANCE(int16);
+DEFINE_MAKE_AUDIO_BUFFER_INSTANCE(int32);
+DEFINE_MAKE_AUDIO_BUFFER_INSTANCE(float);
static const char kFakeVideoBufferHeader[] = "FakeVideoBufferForTest";
diff --git a/chromium/media/base/test_helpers.h b/chromium/media/base/test_helpers.h
index 872d08d6f8d..8dc38958f5d 100644
--- a/chromium/media/base/test_helpers.h
+++ b/chromium/media/base/test_helpers.h
@@ -7,6 +7,7 @@
#include "base/basictypes.h"
#include "base/callback.h"
+#include "media/base/channel_layout.h"
#include "media/base/pipeline_status.h"
#include "media/base/sample_format.h"
#include "media/base/video_decoder_config.h"
@@ -85,51 +86,32 @@ class TestVideoConfig {
};
// Create an AudioBuffer containing |frames| frames of data, where each sample
-// is of type T. Each frame will have the data from |channels| channels
-// interleaved. |start| and |increment| are used to specify the values for the
-// samples. Since this is interleaved data, channel 0 data will be:
-// |start|
-// |start| + |channels| * |increment|
-// |start| + 2 * |channels| * |increment|, and so on.
-// Data for subsequent channels is similar. No check is done that |format|
-// requires data to be of type T, but it is verified that |format| is an
-// interleaved format.
+// is of type T. |start| and |increment| are used to specify the values for the
+// samples, which are created in channel order. The value for frame and channel
+// is determined by:
//
-// |start_time| will be used as the start time for the samples. |duration| is
-// the duration.
-template <class T>
-scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer(
- SampleFormat format,
- int channels,
- T start,
- T increment,
- int frames,
- base::TimeDelta start_time,
- base::TimeDelta duration);
-
-// Create an AudioBuffer containing |frames| frames of data, where each sample
-// is of type T. Since this is planar data, there will be a block for each of
-// |channel| channels. |start| and |increment| are used to specify the values
-// for the samples, which are created in channel order. Since this is planar
-// data, channel 0 data will be:
-// |start|
-// |start| + |increment|
-// |start| + 2 * |increment|, and so on.
-// Data for channel 1 will follow where channel 0 ends. Subsequent channels are
-// similar. No check is done that |format| requires data to be of type T, but it
-// is verified that |format| is a planar format.
+// |start| + |channel| * |frames| * |increment| + index * |increment|
//
-// |start_time| will be used as the start time for the samples. |duration| is
-// the duration.
+// E.g., for a stereo buffer the values in channel 0 will be:
+// start
+// start + increment
+// start + 2 * increment, ...
+//
+// While, values in channel 1 will be:
+// start + frames * increment
+// start + (frames + 1) * increment
+// start + (frames + 2) * increment, ...
+//
+// |start_time| will be used as the start time for the samples.
template <class T>
-scoped_refptr<AudioBuffer> MakePlanarAudioBuffer(
- SampleFormat format,
- int channels,
- T start,
- T increment,
- int frames,
- base::TimeDelta start_time,
- base::TimeDelta duration);
+scoped_refptr<AudioBuffer> MakeAudioBuffer(SampleFormat format,
+ ChannelLayout channel_layout,
+ size_t channel_count,
+ int sample_rate,
+ T start,
+ T increment,
+ size_t frames,
+ base::TimeDelta timestamp);
// Create a fake video DecoderBuffer for testing purpose. The buffer contains
// part of video decoder config info embedded so that the testing code can do
diff --git a/chromium/media/base/text_ranges.cc b/chromium/media/base/text_ranges.cc
new file mode 100644
index 00000000000..41bc7d0905d
--- /dev/null
+++ b/chromium/media/base/text_ranges.cc
@@ -0,0 +1,141 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/text_ranges.h"
+
+#include "base/logging.h"
+
+namespace media {
+
+TextRanges::TextRanges() {
+ Reset();
+}
+
+TextRanges::~TextRanges() {
+}
+
+void TextRanges::Reset() {
+ curr_range_itr_ = range_map_.end();
+}
+
+bool TextRanges::AddCue(base::TimeDelta start_time) {
+ typedef RangeMap::iterator Itr;
+
+ if (curr_range_itr_ == range_map_.end()) {
+ // There is no active time range, so this is the first AddCue()
+ // attempt that follows a Reset().
+
+ if (range_map_.empty()) {
+ NewRange(start_time);
+ return true;
+ }
+
+ if (start_time < range_map_.begin()->first) {
+ NewRange(start_time);
+ return true;
+ }
+
+ const Itr itr = --Itr(range_map_.upper_bound(start_time));
+ DCHECK(start_time >= itr->first);
+
+ Range& range = itr->second;
+
+ if (start_time > range.last_time()) {
+ NewRange(start_time);
+ return true;
+ }
+
+ range.ResetCount(start_time);
+ curr_range_itr_ = itr;
+ return false;
+ }
+
+ DCHECK(start_time >= curr_range_itr_->first);
+
+ Range& curr_range = curr_range_itr_->second;
+
+ if (start_time <= curr_range.last_time())
+ return curr_range.AddCue(start_time);
+
+ const Itr next_range_itr = ++Itr(curr_range_itr_);
+
+ if (next_range_itr != range_map_.end()) {
+ DCHECK(next_range_itr->first > curr_range.last_time());
+ DCHECK(start_time <= next_range_itr->first);
+
+ if (start_time == next_range_itr->first) {
+ // We have walked off the current range, and onto the next one.
+ // There is now no ambiguity about where the current time range
+ // ends, and so we coalesce the current and next ranges.
+
+ Merge(curr_range, next_range_itr);
+ return false;
+ }
+ }
+
+ // Either |curr_range| is the last range in the map, or there is a
+ // next range beyond |curr_range|, but its start time is ahead of
+ // this cue's start time. In either case, this cue becomes the new
+ // last_time for |curr_range|. Eventually we will see a cue whose
+ // time matches the start time of the next range, in which case we
+ // coalesce the current and next ranges.
+
+ curr_range.SetLastTime(start_time);
+ return true;
+}
+
+size_t TextRanges::RangeCountForTesting() const {
+ return range_map_.size();
+}
+
+void TextRanges::NewRange(base::TimeDelta start_time) {
+ Range range;
+ range.SetLastTime(start_time);
+
+ std::pair<RangeMap::iterator, bool> result =
+ range_map_.insert(std::make_pair(start_time, range));
+ DCHECK(result.second);
+
+ curr_range_itr_ = result.first;
+}
+
+void TextRanges::Merge(
+ Range& curr_range,
+ const RangeMap::iterator& next_range_itr) {
+ curr_range = next_range_itr->second;
+ curr_range.ResetCount(next_range_itr->first);
+ range_map_.erase(next_range_itr);
+}
+
+void TextRanges::Range::ResetCount(base::TimeDelta start_time) {
+ count_ = (start_time < last_time_) ? 0 : 1;
+}
+
+void TextRanges::Range::SetLastTime(base::TimeDelta last_time) {
+ last_time_ = last_time;
+ count_ = 1;
+ max_count_ = 1;
+}
+
+bool TextRanges::Range::AddCue(base::TimeDelta start_time) {
+ if (start_time < last_time_) {
+ DCHECK_EQ(count_, 0);
+ return false;
+ }
+
+ DCHECK(start_time == last_time_);
+
+ ++count_;
+ if (count_ <= max_count_)
+ return false;
+
+ ++max_count_;
+ return true;
+}
+
+base::TimeDelta TextRanges::Range::last_time() const {
+ return last_time_;
+}
+
+} // namespace media
diff --git a/chromium/media/base/text_ranges.h b/chromium/media/base/text_ranges.h
new file mode 100644
index 00000000000..adb1750817d
--- /dev/null
+++ b/chromium/media/base/text_ranges.h
@@ -0,0 +1,95 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_TEXT_RANGES_H_
+#define MEDIA_BASE_TEXT_RANGES_H_
+
+#include <map>
+
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Helper class used by the TextRenderer to filter out text cues that
+// have already been passed downstream.
+class MEDIA_EXPORT TextRanges {
+ public:
+ TextRanges();
+ ~TextRanges();
+
+ // Reset the current range pointer, such that we bind to a new range
+ // (either one that exists already, or one that is freshly-created)
+ // during the next AddCue().
+ void Reset();
+
+ // Given a cue with starting timestamp |start_time|, add its start
+ // time to the time ranges. (Note that following a Reset(), cue
+ // times are assumed to be monotonically increasing.) If this time
+ // has already been added to the time ranges, then AddCue() returns
+ // false and clients should not push the cue downstream. Otherwise,
+ // the time is added to the time ranges and AddCue() returns true,
+ // meaning that the cue should be pushed downstream.
+ bool AddCue(base::TimeDelta start_time);
+
+ // Returns a count of the number of time ranges, intended for use by
+ // the unit test module to vet proper time range merge behavior.
+ size_t RangeCountForTesting() const;
+
+ private:
+ // Describes a range of times for cues that have already been
+ // pushed downstream.
+ class Range {
+ public:
+ // Initialize last_time count.
+ void ResetCount(base::TimeDelta start_time);
+
+ // Set last_time and associated counts.
+ void SetLastTime(base::TimeDelta last_time);
+
+ // Adjust time range state to mark the cue as having been seen,
+ // returning true if we have not seen |start_time| already and
+ // false otherwise.
+ bool AddCue(base::TimeDelta start_time);
+
+ // Returns the value of the last time in the range.
+ base::TimeDelta last_time() const;
+
+ private:
+ // The last timestamp of this range.
+ base::TimeDelta last_time_;
+
+ // The number of cues we have detected so far, for this range,
+ // whose timestamp matches last_time.
+ int max_count_;
+
+ // The number of cues we have seen since the most recent Reset(),
+ // whose timestamp matches last_time.
+ int count_;
+ };
+
+ typedef std::map<base::TimeDelta, Range> RangeMap;
+
+ // NewRange() is used to create a new time range when AddCue() is
+ // called immediately following a Reset(), and no existing time
+ // range contains the indicated |start_time| of the cue.
+ void NewRange(base::TimeDelta start_time);
+
+ // Coalesce curr_range with the range that immediately follows.
+ void Merge(Range& curr_range, const RangeMap::iterator& next_range_itr);
+
+ // The collection of time ranges, each of which is bounded
+ // (inclusive) by the key and Range::last_time.
+ RangeMap range_map_;
+
+ // The time range to which we bind following a Reset().
+ RangeMap::iterator curr_range_itr_;
+
+ DISALLOW_COPY_AND_ASSIGN(TextRanges);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_TEXT_RANGES_H_
diff --git a/chromium/media/base/text_ranges_unittest.cc b/chromium/media/base/text_ranges_unittest.cc
new file mode 100644
index 00000000000..7de051452f4
--- /dev/null
+++ b/chromium/media/base/text_ranges_unittest.cc
@@ -0,0 +1,147 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/text_ranges.h"
+
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class TextRangesTest : public ::testing::Test {
+ protected:
+ bool AddCue(int seconds) {
+ return ranges_.AddCue(base::TimeDelta::FromSeconds(seconds));
+ }
+
+ void Reset() {
+ ranges_.Reset();
+ }
+
+ size_t RangeCount() {
+ return ranges_.RangeCountForTesting();
+ }
+
+ TextRanges ranges_;
+};
+
+TEST_F(TextRangesTest, TestEmptyRanges) {
+ // Create a new active range, with t=5.
+ EXPECT_TRUE(AddCue(5));
+
+ // Create a new active range, with t=2.
+ Reset();
+ EXPECT_TRUE(AddCue(2));
+
+ // Create a new active range, with t=8.
+ Reset();
+ EXPECT_TRUE(AddCue(8));
+
+ Reset();
+
+ // Make range [2, 2] active.
+ EXPECT_FALSE(AddCue(2));
+ EXPECT_EQ(RangeCount(), 3U);
+
+ // Coalesce first two ranges: [2, 5].
+ EXPECT_FALSE(AddCue(5));
+ EXPECT_EQ(RangeCount(), 2U);
+
+ // Coalesce first two ranges: [2, 8].
+ EXPECT_FALSE(AddCue(8));
+ EXPECT_EQ(RangeCount(), 1U);
+
+ // Add new cue to end of (only) range.
+ EXPECT_TRUE(AddCue(9));
+ EXPECT_EQ(RangeCount(), 1U);
+}
+
+TEST_F(TextRangesTest, TestOneRange) {
+ // Create a new active range, with t=0.
+ EXPECT_TRUE(AddCue(0));
+
+ // Add cues to end of existing range.
+ EXPECT_TRUE(AddCue(1));
+ EXPECT_TRUE(AddCue(4));
+
+ Reset();
+ EXPECT_FALSE(AddCue(2));
+ EXPECT_FALSE(AddCue(3));
+ EXPECT_FALSE(AddCue(4));
+}
+
+TEST_F(TextRangesTest, TestDuplicateLast) {
+ // Create a new active range, with t=0.
+ EXPECT_TRUE(AddCue(0));
+ EXPECT_TRUE(AddCue(1));
+
+ Reset();
+ EXPECT_FALSE(AddCue(1));
+ EXPECT_TRUE(AddCue(1));
+}
+
+TEST_F(TextRangesTest, TestTwoRanges) {
+ // Create a new active range, with t=0.
+ EXPECT_TRUE(AddCue(0));
+
+ // Add cue to end of existing range.
+ EXPECT_TRUE(AddCue(2));
+
+ Reset();
+
+ // Create a new active range, with t=4.
+ EXPECT_TRUE(AddCue(4));
+
+ // Add a new cue to end of last (active) range.
+ EXPECT_TRUE(AddCue(5));
+
+ Reset();
+
+ // Make first range active.
+ EXPECT_FALSE(AddCue(0));
+ EXPECT_FALSE(AddCue(2));
+
+ // Expand first range.
+ EXPECT_TRUE(AddCue(3));
+
+ // Coalesce first and second ranges.
+ EXPECT_FALSE(AddCue(4));
+ EXPECT_EQ(RangeCount(), 1U);
+}
+
+TEST_F(TextRangesTest, TestThreeRanges) {
+ // Create a new active range, with t=0.
+ EXPECT_TRUE(AddCue(0));
+
+ // Add cue to end of existing range.
+ EXPECT_TRUE(AddCue(2));
+
+ Reset();
+
+ // Create a new active range, with t=4.
+ EXPECT_TRUE(AddCue(4));
+
+ // Add a new cue to end of last (active) range.
+ EXPECT_TRUE(AddCue(5));
+
+ Reset();
+
+ // Create a new active range, in between the other two.
+ EXPECT_TRUE(AddCue(3));
+
+ // Coalesce middle and last ranges.
+ EXPECT_FALSE(AddCue(4));
+
+ Reset();
+
+ // Make first range active.
+ EXPECT_FALSE(AddCue(0));
+ EXPECT_FALSE(AddCue(2));
+
+ // Coalesce first and last ranges.
+ EXPECT_FALSE(AddCue(3));
+ EXPECT_EQ(RangeCount(), 1U);
+}
+
+} // namespace media
diff --git a/chromium/media/base/text_renderer.cc b/chromium/media/base/text_renderer.cc
index 91f9a33618d..6f88ef71a69 100644
--- a/chromium/media/base/text_renderer.cc
+++ b/chromium/media/base/text_renderer.cc
@@ -7,9 +7,9 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
#include "base/stl_util.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/demuxer.h"
#include "media/base/demuxer_stream.h"
@@ -18,14 +18,13 @@
namespace media {
TextRenderer::TextRenderer(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
const AddTextTrackCB& add_text_track_cb)
- : message_loop_(message_loop),
- weak_factory_(this),
+ : task_runner_(task_runner),
add_text_track_cb_(add_text_track_cb),
state_(kUninitialized),
- pending_read_count_(0) {
-}
+ pending_read_count_(0),
+ weak_factory_(this) {}
TextRenderer::~TextRenderer() {
DCHECK(state_ == kUninitialized ||
@@ -35,7 +34,7 @@ TextRenderer::~TextRenderer() {
}
void TextRenderer::Initialize(const base::Closure& ended_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(!ended_cb.is_null());
DCHECK_EQ(kUninitialized, state_) << "state_ " << state_;
DCHECK(text_track_state_map_.empty());
@@ -43,13 +42,12 @@ void TextRenderer::Initialize(const base::Closure& ended_cb) {
DCHECK(pending_eos_set_.empty());
DCHECK(ended_cb_.is_null());
- weak_this_ = weak_factory_.GetWeakPtr();
ended_cb_ = ended_cb;
state_ = kPaused;
}
void TextRenderer::Play(const base::Closure& callback) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kPaused) << "state_ " << state_;
for (TextTrackStateMap::iterator itr = text_track_state_map_.begin();
@@ -68,7 +66,7 @@ void TextRenderer::Play(const base::Closure& callback) {
}
void TextRenderer::Pause(const base::Closure& callback) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(state_ == kPlaying || state_ == kEnded) << "state_ " << state_;
DCHECK_GE(pending_read_count_, 0);
pause_cb_ = callback;
@@ -83,20 +81,21 @@ void TextRenderer::Pause(const base::Closure& callback) {
}
void TextRenderer::Flush(const base::Closure& callback) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(pending_read_count_, 0);
DCHECK(state_ == kPaused) << "state_ " << state_;
for (TextTrackStateMap::iterator itr = text_track_state_map_.begin();
itr != text_track_state_map_.end(); ++itr) {
pending_eos_set_.insert(itr->first);
+ itr->second->text_ranges_.Reset();
}
DCHECK_EQ(pending_eos_set_.size(), text_track_state_map_.size());
callback.Run();
}
void TextRenderer::Stop(const base::Closure& cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(!cb.is_null());
DCHECK(state_ == kPlaying ||
state_ == kPausePending ||
@@ -117,7 +116,7 @@ void TextRenderer::Stop(const base::Closure& cb) {
void TextRenderer::AddTextStream(DemuxerStream* text_stream,
const TextTrackConfig& config) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(state_ != kUninitialized) << "state_ " << state_;
DCHECK_NE(state_, kStopPending);
DCHECK_NE(state_, kStopped);
@@ -126,17 +125,16 @@ void TextRenderer::AddTextStream(DemuxerStream* text_stream,
DCHECK(pending_eos_set_.find(text_stream) ==
pending_eos_set_.end());
- media::AddTextTrackDoneCB done_cb =
- media::BindToLoop(message_loop_,
- base::Bind(&TextRenderer::OnAddTextTrackDone,
- weak_this_,
+ AddTextTrackDoneCB done_cb =
+ BindToCurrentLoop(base::Bind(&TextRenderer::OnAddTextTrackDone,
+ weak_factory_.GetWeakPtr(),
text_stream));
add_text_track_cb_.Run(config, done_cb);
}
void TextRenderer::RemoveTextStream(DemuxerStream* text_stream) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
TextTrackStateMap::iterator itr = text_track_state_map_.find(text_stream);
DCHECK(itr != text_track_state_map_.end());
@@ -150,7 +148,7 @@ void TextRenderer::RemoveTextStream(DemuxerStream* text_stream) {
}
bool TextRenderer::HasTracks() const {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
return !text_track_state_map_.empty();
}
@@ -158,7 +156,7 @@ void TextRenderer::BufferReady(
DemuxerStream* stream,
DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>& input) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_NE(status, DemuxerStream::kConfigChanged);
if (status == DemuxerStream::kAborted) {
@@ -241,7 +239,7 @@ void TextRenderer::BufferReady(
void TextRenderer::CueReady(
DemuxerStream* text_stream,
const scoped_refptr<TextCue>& text_cue) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(state_ != kUninitialized &&
state_ != kStopped) << "state_ " << state_;
DCHECK_GT(pending_read_count_, 0);
@@ -309,12 +307,15 @@ void TextRenderer::CueReady(
}
base::TimeDelta start = text_cue->timestamp();
- base::TimeDelta end = start + text_cue->duration();
- state->text_track->addWebVTTCue(start, end,
- text_cue->id(),
- text_cue->text(),
- text_cue->settings());
+ if (state->text_ranges_.AddCue(start)) {
+ base::TimeDelta end = start + text_cue->duration();
+
+ state->text_track->addWebVTTCue(start, end,
+ text_cue->id(),
+ text_cue->text(),
+ text_cue->settings());
+ }
if (state_ == kPlaying) {
Read(state, text_stream);
@@ -330,7 +331,7 @@ void TextRenderer::CueReady(
void TextRenderer::OnAddTextTrackDone(DemuxerStream* text_stream,
scoped_ptr<TextTrack> text_track) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(state_ != kUninitialized &&
state_ != kStopped &&
state_ != kStopPending) << "state_ " << state_;
@@ -353,9 +354,8 @@ void TextRenderer::Read(
state->read_state = TextTrackState::kReadPending;
++pending_read_count_;
- text_stream->Read(base::Bind(&TextRenderer::BufferReady,
- weak_this_,
- text_stream));
+ text_stream->Read(base::Bind(
+ &TextRenderer::BufferReady, weak_factory_.GetWeakPtr(), text_stream));
}
TextRenderer::TextTrackState::TextTrackState(scoped_ptr<TextTrack> tt)
diff --git a/chromium/media/base/text_renderer.h b/chromium/media/base/text_renderer.h
index 532a1fa0376..ce109125565 100644
--- a/chromium/media/base/text_renderer.h
+++ b/chromium/media/base/text_renderer.h
@@ -14,10 +14,11 @@
#include "media/base/demuxer_stream.h"
#include "media/base/media_export.h"
#include "media/base/pipeline_status.h"
+#include "media/base/text_ranges.h"
#include "media/base/text_track.h"
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
@@ -30,12 +31,13 @@ class TextTrackConfig;
// demuxer text stream.
class MEDIA_EXPORT TextRenderer {
public:
- // |message_loop| is the thread on which TextRenderer will execute.
+ // |task_runner| is the thread on which TextRenderer will execute.
//
// |add_text_track_cb] is called when the demuxer requests (via its host)
// that a new text track be created.
- TextRenderer(const scoped_refptr<base::MessageLoopProxy>& message_loop,
- const AddTextTrackCB& add_text_track_cb);
+ TextRenderer(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const AddTextTrackCB& add_text_track_cb);
~TextRenderer();
// |ended_cb| is executed when all of the text tracks have reached
@@ -81,6 +83,7 @@ class MEDIA_EXPORT TextRenderer {
ReadState read_state;
scoped_ptr<TextTrack> text_track;
+ TextRanges text_ranges_;
};
// Callback delivered by the demuxer |text_stream| when
@@ -101,9 +104,7 @@ class MEDIA_EXPORT TextRenderer {
// Utility function to post a read request on |text_stream|.
void Read(TextTrackState* state, DemuxerStream* text_stream);
- scoped_refptr<base::MessageLoopProxy> message_loop_;
- base::WeakPtrFactory<TextRenderer> weak_factory_;
- base::WeakPtr<TextRenderer> weak_this_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
const AddTextTrackCB add_text_track_cb_;
// Callbacks provided during Initialize().
@@ -137,6 +138,9 @@ class MEDIA_EXPORT TextRenderer {
typedef std::set<DemuxerStream*> PendingEosSet;
PendingEosSet pending_eos_set_;
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<TextRenderer> weak_factory_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(TextRenderer);
};
diff --git a/chromium/media/base/user_input_monitor_linux.cc b/chromium/media/base/user_input_monitor_linux.cc
index 70090eae4bb..b92cdda0325 100644
--- a/chromium/media/base/user_input_monitor_linux.cc
+++ b/chromium/media/base/user_input_monitor_linux.cc
@@ -15,7 +15,6 @@
#include "base/compiler_specific.h"
#include "base/location.h"
#include "base/logging.h"
-#include "base/memory/weak_ptr.h"
#include "base/message_loop/message_loop.h"
#include "base/message_loop/message_pump_libevent.h"
#include "base/single_thread_task_runner.h"
@@ -23,6 +22,7 @@
#include "media/base/keyboard_event_counter.h"
#include "third_party/skia/include/core/SkPoint.h"
#include "ui/events/keycodes/keyboard_code_conversion_x.h"
+#include "ui/gfx/x/x11_types.h"
// These includes need to be later than dictated by the style guide due to
// Xlib header pollution, specifically the min, max, and Status macros.
@@ -148,10 +148,10 @@ void UserInputMonitorLinuxCore::StartMonitor(EventType type) {
// them with something like the following:
// XOpenDisplay(DisplayString(display));
if (!x_control_display_)
- x_control_display_ = XOpenDisplay(NULL);
+ x_control_display_ = gfx::OpenNewXDisplay();
if (!x_record_display_)
- x_record_display_ = XOpenDisplay(NULL);
+ x_record_display_ = gfx::OpenNewXDisplay();
if (!x_control_display_ || !x_record_display_) {
LOG(ERROR) << "Couldn't open X display";
diff --git a/chromium/media/base/user_input_monitor_unittest.cc b/chromium/media/base/user_input_monitor_unittest.cc
index 4874a10eb82..55e6cca8076 100644
--- a/chromium/media/base/user_input_monitor_unittest.cc
+++ b/chromium/media/base/user_input_monitor_unittest.cc
@@ -47,9 +47,9 @@ TEST(UserInputMonitorTest, KeyPressCounter) {
TEST(UserInputMonitorTest, CreatePlatformSpecific) {
#if defined(OS_LINUX)
- base::MessageLoop message_loop(base::MessageLoop::TYPE_IO);
+ base::MessageLoopForIO message_loop;
#else
- base::MessageLoop message_loop(base::MessageLoop::TYPE_UI);
+ base::MessageLoopForUI message_loop;
#endif // defined(OS_LINUX)
base::RunLoop run_loop;
diff --git a/chromium/media/base/user_input_monitor_win.cc b/chromium/media/base/user_input_monitor_win.cc
index 29cedc8b631..6efdc5283d2 100644
--- a/chromium/media/base/user_input_monitor_win.cc
+++ b/chromium/media/base/user_input_monitor_win.cc
@@ -7,7 +7,6 @@
#include "base/bind.h"
#include "base/location.h"
#include "base/logging.h"
-#include "base/memory/weak_ptr.h"
#include "base/message_loop/message_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/strings/stringprintf.h"
@@ -129,7 +128,7 @@ void UserInputMonitorWinCore::StartMonitor(EventBitMask type) {
window_.reset(new base::win::MessageWindow());
if (!window_->Create(base::Bind(&UserInputMonitorWinCore::HandleMessage,
base::Unretained(this)))) {
- LOG_GETLASTERROR(ERROR) << "Failed to create the raw input window";
+ PLOG(ERROR) << "Failed to create the raw input window";
window_.reset();
return;
}
@@ -138,8 +137,7 @@ void UserInputMonitorWinCore::StartMonitor(EventBitMask type) {
// Register to receive raw mouse and/or keyboard input.
scoped_ptr<RAWINPUTDEVICE> device(GetRawInputDevices(type, RIDEV_INPUTSINK));
if (!RegisterRawInputDevices(device.get(), 1, sizeof(*device))) {
- LOG_GETLASTERROR(ERROR)
- << "RegisterRawInputDevices() failed for RIDEV_INPUTSINK";
+ PLOG(ERROR) << "RegisterRawInputDevices() failed for RIDEV_INPUTSINK";
window_.reset();
return;
}
@@ -163,8 +161,7 @@ void UserInputMonitorWinCore::StopMonitor(EventBitMask type) {
scoped_ptr<RAWINPUTDEVICE> device(GetRawInputDevices(type, RIDEV_REMOVE));
if (!RegisterRawInputDevices(device.get(), 1, sizeof(*device))) {
- LOG_GETLASTERROR(INFO)
- << "RegisterRawInputDevices() failed for RIDEV_REMOVE";
+ PLOG(INFO) << "RegisterRawInputDevices() failed for RIDEV_REMOVE";
}
events_monitored_ &= ~type;
@@ -184,7 +181,7 @@ LRESULT UserInputMonitorWinCore::OnInput(HRAWINPUT input_handle) {
UINT result = GetRawInputData(
input_handle, RID_INPUT, NULL, &size, sizeof(RAWINPUTHEADER));
if (result == -1) {
- LOG_GETLASTERROR(ERROR) << "GetRawInputData() failed";
+ PLOG(ERROR) << "GetRawInputData() failed";
return 0;
}
DCHECK_EQ(0u, result);
@@ -195,7 +192,7 @@ LRESULT UserInputMonitorWinCore::OnInput(HRAWINPUT input_handle) {
result = GetRawInputData(
input_handle, RID_INPUT, buffer.get(), &size, sizeof(RAWINPUTHEADER));
if (result == -1) {
- LOG_GETLASTERROR(ERROR) << "GetRawInputData() failed";
+ PLOG(ERROR) << "GetRawInputData() failed";
return 0;
}
DCHECK_EQ(size, result);
diff --git a/chromium/media/base/vector_math.cc b/chromium/media/base/vector_math.cc
index 32584f5cf64..71721b69858 100644
--- a/chromium/media/base/vector_math.cc
+++ b/chromium/media/base/vector_math.cc
@@ -7,63 +7,29 @@
#include <algorithm>
-#include "base/cpu.h"
#include "base/logging.h"
#include "build/build_config.h"
-#if defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
-#include <arm_neon.h>
-#endif
-
-namespace media {
-namespace vector_math {
-
-// If we know the minimum architecture at compile time, avoid CPU detection.
-// Force NaCl code to use C routines since (at present) nothing there uses these
-// methods and plumbing the -msse built library is non-trivial.
+// NaCl does not allow intrinsics.
#if defined(ARCH_CPU_X86_FAMILY) && !defined(OS_NACL)
-#if defined(__SSE__)
+#include <xmmintrin.h>
#define FMAC_FUNC FMAC_SSE
#define FMUL_FUNC FMUL_SSE
#define EWMAAndMaxPower_FUNC EWMAAndMaxPower_SSE
-void Initialize() {}
-#else
-// X86 CPU detection required. Functions will be set by Initialize().
-// TODO(dalecurtis): Once Chrome moves to an SSE baseline this can be removed.
-#define FMAC_FUNC g_fmac_proc_
-#define FMUL_FUNC g_fmul_proc_
-#define EWMAAndMaxPower_FUNC g_ewma_power_proc_
-
-typedef void (*MathProc)(const float src[], float scale, int len, float dest[]);
-static MathProc g_fmac_proc_ = NULL;
-static MathProc g_fmul_proc_ = NULL;
-typedef std::pair<float, float> (*EWMAAndMaxPowerProc)(
- float initial_value, const float src[], int len, float smoothing_factor);
-static EWMAAndMaxPowerProc g_ewma_power_proc_ = NULL;
-
-void Initialize() {
- CHECK(!g_fmac_proc_);
- CHECK(!g_fmul_proc_);
- CHECK(!g_ewma_power_proc_);
- const bool kUseSSE = base::CPU().has_sse();
- g_fmac_proc_ = kUseSSE ? FMAC_SSE : FMAC_C;
- g_fmul_proc_ = kUseSSE ? FMUL_SSE : FMUL_C;
- g_ewma_power_proc_ = kUseSSE ? EWMAAndMaxPower_SSE : EWMAAndMaxPower_C;
-}
-#endif
#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+#include <arm_neon.h>
#define FMAC_FUNC FMAC_NEON
#define FMUL_FUNC FMUL_NEON
#define EWMAAndMaxPower_FUNC EWMAAndMaxPower_NEON
-void Initialize() {}
#else
-// Unknown architecture.
#define FMAC_FUNC FMAC_C
#define FMUL_FUNC FMUL_C
#define EWMAAndMaxPower_FUNC EWMAAndMaxPower_C
-void Initialize() {}
#endif
+namespace media {
+namespace vector_math {
+
void FMAC(const float src[], float scale, int len, float dest[]) {
// Ensure |src| and |dest| are 16-byte aligned.
DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(src) & (kRequiredAlignment - 1));
@@ -88,6 +54,13 @@ void FMUL_C(const float src[], float scale, int len, float dest[]) {
dest[i] = src[i] * scale;
}
+void Crossfade(const float src[], int len, float dest[]) {
+ float cf_ratio = 0;
+ const float cf_increment = 1.0f / len;
+ for (int i = 0; i < len; ++i, cf_ratio += cf_increment)
+ dest[i] = (1.0f - cf_ratio) * src[i] + cf_ratio * dest[i];
+}
+
std::pair<float, float> EWMAAndMaxPower(
float initial_value, const float src[], int len, float smoothing_factor) {
// Ensure |src| is 16-byte aligned.
@@ -109,6 +82,111 @@ std::pair<float, float> EWMAAndMaxPower_C(
return result;
}
+#if defined(ARCH_CPU_X86_FAMILY) && !defined(OS_NACL)
+void FMUL_SSE(const float src[], float scale, int len, float dest[]) {
+ const int rem = len % 4;
+ const int last_index = len - rem;
+ __m128 m_scale = _mm_set_ps1(scale);
+ for (int i = 0; i < last_index; i += 4)
+ _mm_store_ps(dest + i, _mm_mul_ps(_mm_load_ps(src + i), m_scale));
+
+ // Handle any remaining values that wouldn't fit in an SSE pass.
+ for (int i = last_index; i < len; ++i)
+ dest[i] = src[i] * scale;
+}
+
+void FMAC_SSE(const float src[], float scale, int len, float dest[]) {
+ const int rem = len % 4;
+ const int last_index = len - rem;
+ __m128 m_scale = _mm_set_ps1(scale);
+ for (int i = 0; i < last_index; i += 4) {
+ _mm_store_ps(dest + i, _mm_add_ps(_mm_load_ps(dest + i),
+ _mm_mul_ps(_mm_load_ps(src + i), m_scale)));
+ }
+
+ // Handle any remaining values that wouldn't fit in an SSE pass.
+ for (int i = last_index; i < len; ++i)
+ dest[i] += src[i] * scale;
+}
+
+// Convenience macro to extract float 0 through 3 from the vector |a|. This is
+// needed because compilers other than clang don't support access via
+// operator[]().
+#define EXTRACT_FLOAT(a, i) \
+ (i == 0 ? \
+ _mm_cvtss_f32(a) : \
+ _mm_cvtss_f32(_mm_shuffle_ps(a, a, i)))
+
+std::pair<float, float> EWMAAndMaxPower_SSE(
+ float initial_value, const float src[], int len, float smoothing_factor) {
+ // When the recurrence is unrolled, we see that we can split it into 4
+ // separate lanes of evaluation:
+ //
+ // y[n] = a(S[n]^2) + (1-a)(y[n-1])
+ // = a(S[n]^2) + (1-a)^1(aS[n-1]^2) + (1-a)^2(aS[n-2]^2) + ...
+ // = z[n] + (1-a)^1(z[n-1]) + (1-a)^2(z[n-2]) + (1-a)^3(z[n-3])
+ //
+ // where z[n] = a(S[n]^2) + (1-a)^4(z[n-4]) + (1-a)^8(z[n-8]) + ...
+ //
+ // Thus, the strategy here is to compute z[n], z[n-1], z[n-2], and z[n-3] in
+ // each of the 4 lanes, and then combine them to give y[n].
+
+ const int rem = len % 4;
+ const int last_index = len - rem;
+
+ const __m128 smoothing_factor_x4 = _mm_set_ps1(smoothing_factor);
+ const float weight_prev = 1.0f - smoothing_factor;
+ const __m128 weight_prev_x4 = _mm_set_ps1(weight_prev);
+ const __m128 weight_prev_squared_x4 =
+ _mm_mul_ps(weight_prev_x4, weight_prev_x4);
+ const __m128 weight_prev_4th_x4 =
+ _mm_mul_ps(weight_prev_squared_x4, weight_prev_squared_x4);
+
+ // Compute z[n], z[n-1], z[n-2], and z[n-3] in parallel in lanes 3, 2, 1 and
+ // 0, respectively.
+ __m128 max_x4 = _mm_setzero_ps();
+ __m128 ewma_x4 = _mm_setr_ps(0.0f, 0.0f, 0.0f, initial_value);
+ int i;
+ for (i = 0; i < last_index; i += 4) {
+ ewma_x4 = _mm_mul_ps(ewma_x4, weight_prev_4th_x4);
+ const __m128 sample_x4 = _mm_load_ps(src + i);
+ const __m128 sample_squared_x4 = _mm_mul_ps(sample_x4, sample_x4);
+ max_x4 = _mm_max_ps(max_x4, sample_squared_x4);
+ // Note: The compiler optimizes this to a single multiply-and-accumulate
+ // instruction:
+ ewma_x4 = _mm_add_ps(ewma_x4,
+ _mm_mul_ps(sample_squared_x4, smoothing_factor_x4));
+ }
+
+ // y[n] = z[n] + (1-a)^1(z[n-1]) + (1-a)^2(z[n-2]) + (1-a)^3(z[n-3])
+ float ewma = EXTRACT_FLOAT(ewma_x4, 3);
+ ewma_x4 = _mm_mul_ps(ewma_x4, weight_prev_x4);
+ ewma += EXTRACT_FLOAT(ewma_x4, 2);
+ ewma_x4 = _mm_mul_ps(ewma_x4, weight_prev_x4);
+ ewma += EXTRACT_FLOAT(ewma_x4, 1);
+ ewma_x4 = _mm_mul_ss(ewma_x4, weight_prev_x4);
+ ewma += EXTRACT_FLOAT(ewma_x4, 0);
+
+ // Fold the maximums together to get the overall maximum.
+ max_x4 = _mm_max_ps(max_x4,
+ _mm_shuffle_ps(max_x4, max_x4, _MM_SHUFFLE(3, 3, 1, 1)));
+ max_x4 = _mm_max_ss(max_x4, _mm_shuffle_ps(max_x4, max_x4, 2));
+
+ std::pair<float, float> result(ewma, EXTRACT_FLOAT(max_x4, 0));
+
+ // Handle remaining values at the end of |src|.
+ for (; i < len; ++i) {
+ result.first *= weight_prev;
+ const float sample = src[i];
+ const float sample_squared = sample * sample;
+ result.first += sample_squared * smoothing_factor;
+ result.second = std::max(result.second, sample_squared);
+ }
+
+ return result;
+}
+#endif
+
#if defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
void FMAC_NEON(const float src[], float scale, int len, float dest[]) {
const int rem = len % 4;
diff --git a/chromium/media/base/vector_math.h b/chromium/media/base/vector_math.h
index a4dea372898..a148ca050f1 100644
--- a/chromium/media/base/vector_math.h
+++ b/chromium/media/base/vector_math.h
@@ -15,11 +15,6 @@ namespace vector_math {
// Required alignment for inputs and outputs to all vector math functions
enum { kRequiredAlignment = 16 };
-// Selects runtime specific optimizations such as SSE. Must be called prior to
-// calling FMAC() or FMUL(). Called during media library initialization; most
-// users should never have to call this.
-MEDIA_EXPORT void Initialize();
-
// Multiply each element of |src| (up to |len|) by |scale| and add to |dest|.
// |src| and |dest| must be aligned by kRequiredAlignment.
MEDIA_EXPORT void FMAC(const float src[], float scale, int len, float dest[]);
@@ -38,6 +33,8 @@ MEDIA_EXPORT void FMUL(const float src[], float scale, int len, float dest[]);
MEDIA_EXPORT std::pair<float, float> EWMAAndMaxPower(
float initial_value, const float src[], int len, float smoothing_factor);
+MEDIA_EXPORT void Crossfade(const float src[], int len, float dest[]);
+
} // namespace vector_math
} // namespace media
diff --git a/chromium/media/base/vector_math_perftest.cc b/chromium/media/base/vector_math_perftest.cc
index 9742f2e9534..2cf4691be4a 100644
--- a/chromium/media/base/vector_math_perftest.cc
+++ b/chromium/media/base/vector_math_perftest.cc
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/cpu.h"
#include "base/memory/aligned_memory.h"
#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
@@ -74,17 +73,21 @@ class VectorMathPerfTest : public testing::Test {
}
protected:
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> input_vector_;
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> output_vector_;
+ scoped_ptr<float, base::AlignedFreeDeleter> input_vector_;
+ scoped_ptr<float, base::AlignedFreeDeleter> output_vector_;
DISALLOW_COPY_AND_ASSIGN(VectorMathPerfTest);
};
-// Define platform independent function name for FMAC* perf tests.
+// Define platform dependent function names for SIMD optimized methods.
#if defined(ARCH_CPU_X86_FAMILY)
#define FMAC_FUNC FMAC_SSE
+#define FMUL_FUNC FMUL_SSE
+#define EWMAAndMaxPower_FUNC EWMAAndMaxPower_SSE
#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
#define FMAC_FUNC FMAC_NEON
+#define FMUL_FUNC FMUL_NEON
+#define EWMAAndMaxPower_FUNC EWMAAndMaxPower_NEON
#endif
// Benchmark for each optimized vector_math::FMAC() method.
@@ -93,9 +96,6 @@ TEST_F(VectorMathPerfTest, FMAC) {
RunBenchmark(
vector_math::FMAC_C, true, "vector_math_fmac", "unoptimized");
#if defined(FMAC_FUNC)
-#if defined(ARCH_CPU_X86_FAMILY)
- ASSERT_TRUE(base::CPU().has_sse());
-#endif
// Benchmark FMAC_FUNC() with unaligned size.
ASSERT_NE((kVectorSize - 1) % (vector_math::kRequiredAlignment /
sizeof(float)), 0U);
@@ -109,24 +109,12 @@ TEST_F(VectorMathPerfTest, FMAC) {
#endif
}
-#undef FMAC_FUNC
-
-// Define platform independent function name for FMULBenchmark* tests.
-#if defined(ARCH_CPU_X86_FAMILY)
-#define FMUL_FUNC FMUL_SSE
-#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
-#define FMUL_FUNC FMUL_NEON
-#endif
-
// Benchmark for each optimized vector_math::FMUL() method.
TEST_F(VectorMathPerfTest, FMUL) {
// Benchmark FMUL_C().
RunBenchmark(
vector_math::FMUL_C, true, "vector_math_fmul", "unoptimized");
#if defined(FMUL_FUNC)
-#if defined(ARCH_CPU_X86_FAMILY)
- ASSERT_TRUE(base::CPU().has_sse());
-#endif
// Benchmark FMUL_FUNC() with unaligned size.
ASSERT_NE((kVectorSize - 1) % (vector_math::kRequiredAlignment /
sizeof(float)), 0U);
@@ -140,14 +128,6 @@ TEST_F(VectorMathPerfTest, FMUL) {
#endif
}
-#undef FMUL_FUNC
-
-#if defined(ARCH_CPU_X86_FAMILY)
-#define EWMAAndMaxPower_FUNC EWMAAndMaxPower_SSE
-#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
-#define EWMAAndMaxPower_FUNC EWMAAndMaxPower_NEON
-#endif
-
// Benchmark for each optimized vector_math::EWMAAndMaxPower() method.
TEST_F(VectorMathPerfTest, EWMAAndMaxPower) {
// Benchmark EWMAAndMaxPower_C().
@@ -156,9 +136,6 @@ TEST_F(VectorMathPerfTest, EWMAAndMaxPower) {
"vector_math_ewma_and_max_power",
"unoptimized");
#if defined(EWMAAndMaxPower_FUNC)
-#if defined(ARCH_CPU_X86_FAMILY)
- ASSERT_TRUE(base::CPU().has_sse());
-#endif
// Benchmark EWMAAndMaxPower_FUNC() with unaligned size.
ASSERT_NE((kVectorSize - 1) % (vector_math::kRequiredAlignment /
sizeof(float)), 0U);
@@ -176,6 +153,4 @@ TEST_F(VectorMathPerfTest, EWMAAndMaxPower) {
#endif
}
-#undef EWMAAndMaxPower_FUNC
-
} // namespace media
diff --git a/chromium/media/base/vector_math_testing.h b/chromium/media/base/vector_math_testing.h
index b0b304409dd..9240fbf54f4 100644
--- a/chromium/media/base/vector_math_testing.h
+++ b/chromium/media/base/vector_math_testing.h
@@ -19,7 +19,7 @@ MEDIA_EXPORT void FMUL_C(const float src[], float scale, int len, float dest[]);
MEDIA_EXPORT std::pair<float, float> EWMAAndMaxPower_C(
float initial_value, const float src[], int len, float smoothing_factor);
-#if defined(ARCH_CPU_X86_FAMILY)
+#if defined(ARCH_CPU_X86_FAMILY) && !defined(OS_NACL)
MEDIA_EXPORT void FMAC_SSE(const float src[], float scale, int len,
float dest[]);
MEDIA_EXPORT void FMUL_SSE(const float src[], float scale, int len,
diff --git a/chromium/media/base/vector_math_unittest.cc b/chromium/media/base/vector_math_unittest.cc
index f8278ce1b5d..a9369231987 100644
--- a/chromium/media/base/vector_math_unittest.cc
+++ b/chromium/media/base/vector_math_unittest.cc
@@ -6,7 +6,6 @@
#define _USE_MATH_DEFINES
#include <cmath>
-#include "base/cpu.h"
#include "base/memory/aligned_memory.h"
#include "base/memory/scoped_ptr.h"
#include "base/strings/string_number_conversions.h"
@@ -44,12 +43,12 @@ class VectorMathTest : public testing::Test {
void VerifyOutput(float value) {
for (int i = 0; i < kVectorSize; ++i)
- ASSERT_FLOAT_EQ(output_vector_.get()[i], value);
+ ASSERT_FLOAT_EQ(output_vector_[i], value);
}
protected:
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> input_vector_;
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> output_vector_;
+ scoped_ptr<float[], base::AlignedFreeDeleter> input_vector_;
+ scoped_ptr<float[], base::AlignedFreeDeleter> output_vector_;
DISALLOW_COPY_AND_ASSIGN(VectorMathTest);
};
@@ -76,7 +75,6 @@ TEST_F(VectorMathTest, FMAC) {
#if defined(ARCH_CPU_X86_FAMILY)
{
- ASSERT_TRUE(base::CPU().has_sse());
SCOPED_TRACE("FMAC_SSE");
FillTestVectors(kInputFillValue, kOutputFillValue);
vector_math::FMAC_SSE(
@@ -118,7 +116,6 @@ TEST_F(VectorMathTest, FMUL) {
#if defined(ARCH_CPU_X86_FAMILY)
{
- ASSERT_TRUE(base::CPU().has_sse());
SCOPED_TRACE("FMUL_SSE");
FillTestVectors(kInputFillValue, kOutputFillValue);
vector_math::FMUL_SSE(
@@ -138,7 +135,15 @@ TEST_F(VectorMathTest, FMUL) {
#endif
}
-namespace {
+TEST_F(VectorMathTest, Crossfade) {
+ FillTestVectors(0, 1);
+ vector_math::Crossfade(
+ input_vector_.get(), kVectorSize, output_vector_.get());
+ for (int i = 0; i < kVectorSize; ++i) {
+ ASSERT_FLOAT_EQ(i / static_cast<float>(kVectorSize), output_vector_[i])
+ << "i=" << i;
+ }
+}
class EWMATestScenario {
public:
@@ -219,7 +224,6 @@ class EWMATestScenario {
#if defined(ARCH_CPU_X86_FAMILY)
{
- ASSERT_TRUE(base::CPU().has_sse());
SCOPED_TRACE("EWMAAndMaxPower_SSE");
const std::pair<float, float>& result = vector_math::EWMAAndMaxPower_SSE(
initial_value_, data_.get(), data_len_, smoothing_factor_);
@@ -241,15 +245,13 @@ class EWMATestScenario {
private:
float initial_value_;
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> data_;
+ scoped_ptr<float, base::AlignedFreeDeleter> data_;
int data_len_;
float smoothing_factor_;
float expected_final_avg_;
float expected_max_;
};
-} // namespace
-
typedef testing::TestWithParam<EWMATestScenario> VectorMathEWMAAndMaxPowerTest;
TEST_P(VectorMathEWMAAndMaxPowerTest, Correctness) {
diff --git a/chromium/media/base/video_decoder.cc b/chromium/media/base/video_decoder.cc
index 81397b7b676..9a6de2fdd35 100644
--- a/chromium/media/base/video_decoder.cc
+++ b/chromium/media/base/video_decoder.cc
@@ -4,16 +4,14 @@
#include "media/base/video_decoder.h"
+#include "media/base/video_frame.h"
+
namespace media {
VideoDecoder::VideoDecoder() {}
VideoDecoder::~VideoDecoder() {}
-bool VideoDecoder::HasAlpha() const {
- return false;
-}
-
bool VideoDecoder::NeedsBitstreamConversion() const {
return false;
}
@@ -22,4 +20,8 @@ bool VideoDecoder::CanReadWithoutStalling() const {
return true;
}
+int VideoDecoder::GetMaxDecodeRequests() const {
+ return 1;
+}
+
} // namespace media
diff --git a/chromium/media/base/video_decoder.h b/chromium/media/base/video_decoder.h
index 63f63e5d7d6..e7d7ad62878 100644
--- a/chromium/media/base/video_decoder.h
+++ b/chromium/media/base/video_decoder.h
@@ -20,18 +20,30 @@ class VideoFrame;
class MEDIA_EXPORT VideoDecoder {
public:
// Status codes for decode operations on VideoDecoder.
+ // TODO(rileya): Now that both AudioDecoder and VideoDecoder Status enums
+ // match, break them into a decoder_status.h.
enum Status {
kOk, // Everything went as planned.
- kNotEnoughData, // Not enough data to produce a video frame.
+ kAborted, // Decode was aborted as a result of Reset() being called.
kDecodeError, // Decoding error happened.
kDecryptError // Decrypting error happened.
};
+ // Callback for VideoDecoder to return a decoded frame whenever it becomes
+ // available. Only non-EOS frames should be returned via this callback.
+ typedef base::Callback<void(const scoped_refptr<VideoFrame>&)> OutputCB;
+
+ // Callback type for Decode(). Called after the decoder has completed decoding
+ // corresponding DecoderBuffer, indicating that it's ready to accept another
+ // buffer to decode.
+ typedef base::Callback<void(Status status)> DecodeCB;
+
VideoDecoder();
virtual ~VideoDecoder();
// Initializes a VideoDecoder with the given |config|, executing the
- // |status_cb| upon completion.
+ // |status_cb| upon completion. |output_cb| is called for each output frame
+ // decoded by Decode().
//
// Note:
// 1) The VideoDecoder will be reinitialized if it was initialized before.
@@ -40,43 +52,43 @@ class MEDIA_EXPORT VideoDecoder {
// 3) No VideoDecoder calls except for Stop() should be made before
// |status_cb| is executed.
virtual void Initialize(const VideoDecoderConfig& config,
- const PipelineStatusCB& status_cb) = 0;
+ bool low_delay,
+ const PipelineStatusCB& status_cb,
+ const OutputCB& output_cb) = 0;
// Requests a |buffer| to be decoded. The status of the decoder and decoded
- // frame are returned via the provided callback. Only one decode may be in
- // flight at any given time.
+ // frame are returned via the provided callback. Some decoders may allow
+ // decoding multiple buffers in parallel. Callers should call
+ // GetMaxDecodeRequests() to get number of buffers that may be decoded in
+ // parallel. Decoder must call |decode_cb| in the same order in which Decode()
+ // is called.
//
// Implementations guarantee that the callback will not be called from within
- // this method.
+ // this method and that |decode_cb| will not be blocked on the following
+ // Decode() calls (i.e. |decode_cb| will be called even Decode() is never
+ // called again).
+ //
+ // After decoding is finished the decoder calls |output_cb| specified in
+ // Initialize() for each decoded frame. |output_cb| may be called before or
+ // after |decode_cb|.
//
- // If the returned status is kOk:
- // - Non-EOS (end of stream) frame contains decoded video data.
- // - EOS frame indicates the end of the stream.
- // - NULL frame indicates an aborted decode. This can happen if Reset() or
- // Stop() is called during the decoding process.
- // Otherwise the returned frame must be NULL.
- typedef base::Callback<void(Status,
- const scoped_refptr<VideoFrame>&)> DecodeCB;
+ // If |buffer| is an EOS buffer then the decoder must be flushed, i.e.
+ // |output_cb| must be called for each frame pending in the queue and
+ // |decode_cb| must be called after that.
virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) = 0;
- // Resets decoder state, fulfilling all pending DecodeCB and dropping extra
- // queued decoded data. After this call, the decoder is back to an initialized
- // clean state.
+ // Resets decoder state. All pending Decode() requests will be finished or
+ // aborted before |closure| is called.
// Note: No VideoDecoder calls should be made before |closure| is executed.
virtual void Reset(const base::Closure& closure) = 0;
// Stops decoder, fires any pending callbacks and sets the decoder to an
// uninitialized state. A VideoDecoder cannot be re-initialized after it has
// been stopped.
- // Note that if Initialize() has been called, Stop() must be called and
- // complete before deleting the decoder.
- virtual void Stop(const base::Closure& closure) = 0;
-
- // Returns true if the output format has an alpha channel. Most formats do not
- // have alpha so the default is false. Override and return true for decoders
- // that return formats with an alpha channel.
- virtual bool HasAlpha() const;
+ // Note that if Initialize() is pending or has finished successfully, Stop()
+ // must be called before destructing the decoder.
+ virtual void Stop() = 0;
// Returns true if the decoder needs bitstream conversion before decoding.
virtual bool NeedsBitstreamConversion() const;
@@ -87,6 +99,9 @@ class MEDIA_EXPORT VideoDecoder {
// use a fixed set of VideoFrames for decoding.
virtual bool CanReadWithoutStalling() const;
+ // Returns maximum number of parallel decode requests.
+ virtual int GetMaxDecodeRequests() const;
+
private:
DISALLOW_COPY_AND_ASSIGN(VideoDecoder);
};
diff --git a/chromium/media/base/video_decoder_config.cc b/chromium/media/base/video_decoder_config.cc
index 82d607526bb..d2b6e410103 100644
--- a/chromium/media/base/video_decoder_config.cc
+++ b/chromium/media/base/video_decoder_config.cc
@@ -77,7 +77,7 @@ void VideoDecoderConfig::Initialize(VideoCodec codec,
UMA_HISTOGRAM_COUNTS_10000("Media.VideoVisibleWidth", visible_rect.width());
UmaHistogramAspectRatio("Media.VideoVisibleAspectRatio", visible_rect);
UMA_HISTOGRAM_ENUMERATION(
- "Media.VideoPixelFormat", format, VideoFrame::HISTOGRAM_MAX);
+ "Media.VideoPixelFormat", format, VideoFrame::FORMAT_MAX + 1);
}
codec_ = codec;
diff --git a/chromium/media/base/video_decoder_config.h b/chromium/media/base/video_decoder_config.h
index 3f7db4e6ca8..3cc33d92715 100644
--- a/chromium/media/base/video_decoder_config.h
+++ b/chromium/media/base/video_decoder_config.h
@@ -43,6 +43,7 @@ enum VideoCodecProfile {
// for example), and keep the values for a particular format grouped
// together for clarity.
VIDEO_CODEC_PROFILE_UNKNOWN = -1,
+ VIDEO_CODEC_PROFILE_MIN = VIDEO_CODEC_PROFILE_UNKNOWN,
H264PROFILE_MIN = 0,
H264PROFILE_BASELINE = H264PROFILE_MIN,
H264PROFILE_MAIN = 1,
diff --git a/chromium/media/base/video_frame.cc b/chromium/media/base/video_frame.cc
index 8a4eb3cce2b..272d41dc4c6 100644
--- a/chromium/media/base/video_frame.cc
+++ b/chromium/media/base/video_frame.cc
@@ -11,12 +11,19 @@
#include "base/logging.h"
#include "base/memory/aligned_memory.h"
#include "base/strings/string_piece.h"
+#include "gpu/command_buffer/common/mailbox_holder.h"
#include "media/base/limits.h"
#include "media/base/video_util.h"
#include "third_party/skia/include/core/SkBitmap.h"
namespace media {
+static inline size_t RoundUp(size_t value, size_t alignment) {
+ // Check that |alignment| is a power of 2.
+ DCHECK((alignment + (alignment - 1)) == (alignment | (alignment - 1)));
+ return ((value + (alignment - 1)) & ~(alignment - 1));
+}
+
// static
scoped_refptr<VideoFrame> VideoFrame::CreateFrame(
VideoFrame::Format format,
@@ -24,20 +31,41 @@ scoped_refptr<VideoFrame> VideoFrame::CreateFrame(
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
base::TimeDelta timestamp) {
- DCHECK(IsValidConfig(format, coded_size, visible_rect, natural_size));
- scoped_refptr<VideoFrame> frame(new VideoFrame(
- format, coded_size, visible_rect, natural_size, timestamp, false));
+ // Since we're creating a new YUV frame (and allocating memory for it
+ // ourselves), we can pad the requested |coded_size| if necessary if the
+ // request does not line up on sample boundaries.
+ gfx::Size new_coded_size(coded_size);
switch (format) {
+ case VideoFrame::YV24:
+ break;
case VideoFrame::YV12:
case VideoFrame::YV12A:
- case VideoFrame::YV16:
case VideoFrame::I420:
case VideoFrame::YV12J:
- frame->AllocateYUV();
+ new_coded_size.set_height((new_coded_size.height() + 1) / 2 * 2);
+ // Fallthrough.
+ case VideoFrame::YV16:
+ new_coded_size.set_width((new_coded_size.width() + 1) / 2 * 2);
break;
- default:
- LOG(FATAL) << "Unsupported frame format: " << format;
+ case VideoFrame::UNKNOWN:
+ case VideoFrame::NV12:
+#if defined(VIDEO_HOLE)
+ case VideoFrame::HOLE:
+#endif // defined(VIDEO_HOLE)
+ case VideoFrame::NATIVE_TEXTURE:
+ LOG(FATAL) << "Only YUV formats supported: " << format;
+ return NULL;
}
+ DCHECK(IsValidConfig(format, new_coded_size, visible_rect, natural_size));
+ scoped_refptr<VideoFrame> frame(
+ new VideoFrame(format,
+ new_coded_size,
+ visible_rect,
+ natural_size,
+ scoped_ptr<gpu::MailboxHolder>(),
+ timestamp,
+ false));
+ frame->AllocateYUV();
return frame;
}
@@ -62,8 +90,10 @@ std::string VideoFrame::FormatToString(VideoFrame::Format format) {
return "YV12A";
case VideoFrame::YV12J:
return "YV12J";
- case VideoFrame::HISTOGRAM_MAX:
- return "HISTOGRAM_MAX";
+ case VideoFrame::NV12:
+ return "NV12";
+ case VideoFrame::YV24:
+ return "YV24";
}
NOTREACHED() << "Invalid videoframe format provided: " << format;
return "";
@@ -74,41 +104,72 @@ bool VideoFrame::IsValidConfig(VideoFrame::Format format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size) {
- return (format != VideoFrame::UNKNOWN &&
- !coded_size.IsEmpty() &&
- coded_size.GetArea() <= limits::kMaxCanvas &&
- coded_size.width() <= limits::kMaxDimension &&
- coded_size.height() <= limits::kMaxDimension &&
- !visible_rect.IsEmpty() &&
- visible_rect.x() >= 0 && visible_rect.y() >= 0 &&
- visible_rect.right() <= coded_size.width() &&
- visible_rect.bottom() <= coded_size.height() &&
- !natural_size.IsEmpty() &&
- natural_size.GetArea() <= limits::kMaxCanvas &&
- natural_size.width() <= limits::kMaxDimension &&
- natural_size.height() <= limits::kMaxDimension);
+ // Check maximum limits for all formats.
+ if (coded_size.GetArea() > limits::kMaxCanvas ||
+ coded_size.width() > limits::kMaxDimension ||
+ coded_size.height() > limits::kMaxDimension ||
+ visible_rect.x() < 0 || visible_rect.y() < 0 ||
+ visible_rect.right() > coded_size.width() ||
+ visible_rect.bottom() > coded_size.height() ||
+ natural_size.GetArea() > limits::kMaxCanvas ||
+ natural_size.width() > limits::kMaxDimension ||
+ natural_size.height() > limits::kMaxDimension)
+ return false;
+
+ // Check format-specific width/height requirements.
+ switch (format) {
+ case VideoFrame::UNKNOWN:
+ return (coded_size.IsEmpty() && visible_rect.IsEmpty() &&
+ natural_size.IsEmpty());
+ case VideoFrame::YV24:
+ break;
+ case VideoFrame::YV12:
+ case VideoFrame::YV12J:
+ case VideoFrame::I420:
+ case VideoFrame::YV12A:
+ case VideoFrame::NV12:
+ // Subsampled YUV formats have width/height requirements.
+ if (static_cast<size_t>(coded_size.height()) <
+ RoundUp(visible_rect.bottom(), 2))
+ return false;
+ // Fallthrough.
+ case VideoFrame::YV16:
+ if (static_cast<size_t>(coded_size.width()) <
+ RoundUp(visible_rect.right(), 2))
+ return false;
+ break;
+ case VideoFrame::NATIVE_TEXTURE:
+#if defined(VIDEO_HOLE)
+ case VideoFrame::HOLE:
+#endif // defined(VIDEO_HOLE)
+ // NATIVE_TEXTURE and HOLE have no software-allocated buffers and are
+ // allowed to skip the below check and be empty.
+ return true;
+ }
+
+ // Check that software-allocated buffer formats are not empty.
+ return (!coded_size.IsEmpty() && !visible_rect.IsEmpty() &&
+ !natural_size.IsEmpty());
}
// static
scoped_refptr<VideoFrame> VideoFrame::WrapNativeTexture(
- scoped_ptr<MailboxHolder> mailbox_holder,
- uint32 texture_target,
+ scoped_ptr<gpu::MailboxHolder> mailbox_holder,
+ const ReleaseMailboxCB& mailbox_holder_release_cb,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
base::TimeDelta timestamp,
- const ReadPixelsCB& read_pixels_cb,
- const base::Closure& no_longer_needed_cb) {
+ const ReadPixelsCB& read_pixels_cb) {
scoped_refptr<VideoFrame> frame(new VideoFrame(NATIVE_TEXTURE,
coded_size,
visible_rect,
natural_size,
+ mailbox_holder.Pass(),
timestamp,
false));
- frame->texture_mailbox_holder_ = mailbox_holder.Pass();
- frame->texture_target_ = texture_target;
+ frame->mailbox_holder_release_cb_ = mailbox_holder_release_cb;
frame->read_pixels_cb_ = read_pixels_cb;
- frame->no_longer_needed_cb_ = no_longer_needed_cb;
return frame;
}
@@ -130,13 +191,21 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalPackedMemory(
base::SharedMemoryHandle handle,
base::TimeDelta timestamp,
const base::Closure& no_longer_needed_cb) {
+ if (!IsValidConfig(format, coded_size, visible_rect, natural_size))
+ return NULL;
if (data_size < AllocationSize(format, coded_size))
return NULL;
switch (format) {
- case I420: {
- scoped_refptr<VideoFrame> frame(new VideoFrame(
- format, coded_size, visible_rect, natural_size, timestamp, false));
+ case VideoFrame::I420: {
+ scoped_refptr<VideoFrame> frame(
+ new VideoFrame(format,
+ coded_size,
+ visible_rect,
+ natural_size,
+ scoped_ptr<gpu::MailboxHolder>(),
+ timestamp,
+ false));
frame->shared_memory_handle_ = handle;
frame->strides_[kYPlane] = coded_size.width();
frame->strides_[kUPlane] = coded_size.width() / 2;
@@ -153,6 +222,53 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalPackedMemory(
}
}
+#if defined(OS_POSIX)
+// static
+scoped_refptr<VideoFrame> VideoFrame::WrapExternalDmabufs(
+ Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ const std::vector<int> dmabuf_fds,
+ base::TimeDelta timestamp,
+ const base::Closure& no_longer_needed_cb) {
+ if (!IsValidConfig(format, coded_size, visible_rect, natural_size))
+ return NULL;
+
+ if (dmabuf_fds.size() != NumPlanes(format)) {
+ LOG(FATAL) << "Not enough dmabuf fds provided!";
+ return NULL;
+ }
+
+ scoped_refptr<VideoFrame> frame(
+ new VideoFrame(format,
+ coded_size,
+ visible_rect,
+ natural_size,
+ scoped_ptr<gpu::MailboxHolder>(),
+ timestamp,
+ false));
+
+ for (size_t i = 0; i < dmabuf_fds.size(); ++i) {
+ int duped_fd = HANDLE_EINTR(dup(dmabuf_fds[i]));
+ if (duped_fd == -1) {
+ // The already-duped in previous iterations fds will be closed when
+ // the partially-created frame drops out of scope here.
+ DLOG(ERROR) << "Failed duplicating a dmabuf fd";
+ return NULL;
+ }
+
+ frame->dmabuf_fds_[i].reset(duped_fd);
+ // Data is accessible only via fds.
+ frame->data_[i] = NULL;
+ frame->strides_[i] = 0;
+ }
+
+ frame->no_longer_needed_cb_ = no_longer_needed_cb;
+ return frame;
+}
+#endif
+
// static
scoped_refptr<VideoFrame> VideoFrame::WrapExternalYuvData(
Format format,
@@ -167,9 +283,17 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalYuvData(
uint8* v_data,
base::TimeDelta timestamp,
const base::Closure& no_longer_needed_cb) {
- DCHECK(format == YV12 || format == YV16 || format == I420) << format;
- scoped_refptr<VideoFrame> frame(new VideoFrame(
- format, coded_size, visible_rect, natural_size, timestamp, false));
+ if (!IsValidConfig(format, coded_size, visible_rect, natural_size))
+ return NULL;
+
+ scoped_refptr<VideoFrame> frame(
+ new VideoFrame(format,
+ coded_size,
+ visible_rect,
+ natural_size,
+ scoped_ptr<gpu::MailboxHolder>(),
+ timestamp,
+ false));
frame->strides_[kYPlane] = y_stride;
frame->strides_[kUPlane] = u_stride;
frame->strides_[kVPlane] = v_stride;
@@ -183,10 +307,22 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalYuvData(
// static
scoped_refptr<VideoFrame> VideoFrame::WrapVideoFrame(
const scoped_refptr<VideoFrame>& frame,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
const base::Closure& no_longer_needed_cb) {
- scoped_refptr<VideoFrame> wrapped_frame(new VideoFrame(
- frame->format(), frame->coded_size(), frame->visible_rect(),
- frame->natural_size(), frame->GetTimestamp(), frame->end_of_stream()));
+ // NATIVE_TEXTURE frames need mailbox info propagated, and there's no support
+ // for that here yet, see http://crbug/362521.
+ CHECK(frame->format() != NATIVE_TEXTURE);
+
+ DCHECK(frame->visible_rect().Contains(visible_rect));
+ scoped_refptr<VideoFrame> wrapped_frame(
+ new VideoFrame(frame->format(),
+ frame->coded_size(),
+ visible_rect,
+ natural_size,
+ scoped_ptr<gpu::MailboxHolder>(),
+ frame->timestamp(),
+ frame->end_of_stream()));
for (size_t i = 0; i < NumPlanes(frame->format()); ++i) {
wrapped_frame->strides_[i] = frame->stride(i);
@@ -203,6 +339,7 @@ scoped_refptr<VideoFrame> VideoFrame::CreateEOSFrame() {
gfx::Size(),
gfx::Rect(),
gfx::Size(),
+ scoped_ptr<gpu::MailboxHolder>(),
kNoTimestamp(),
true);
}
@@ -212,7 +349,6 @@ scoped_refptr<VideoFrame> VideoFrame::CreateColorFrame(
const gfx::Size& size,
uint8 y, uint8 u, uint8 v,
base::TimeDelta timestamp) {
- DCHECK(IsValidConfig(VideoFrame::YV12, size, gfx::Rect(size), size));
scoped_refptr<VideoFrame> frame = VideoFrame::CreateFrame(
VideoFrame::YV12, size, gfx::Rect(size), size, timestamp);
FillYUV(frame.get(), y, u, v);
@@ -239,8 +375,14 @@ scoped_refptr<VideoFrame> VideoFrame::CreateBlackFrame(const gfx::Size& size) {
scoped_refptr<VideoFrame> VideoFrame::CreateHoleFrame(
const gfx::Size& size) {
DCHECK(IsValidConfig(VideoFrame::HOLE, size, gfx::Rect(size), size));
- scoped_refptr<VideoFrame> frame(new VideoFrame(
- VideoFrame::HOLE, size, gfx::Rect(size), size, base::TimeDelta(), false));
+ scoped_refptr<VideoFrame> frame(
+ new VideoFrame(VideoFrame::HOLE,
+ size,
+ gfx::Rect(size),
+ size,
+ scoped_ptr<gpu::MailboxHolder>(),
+ base::TimeDelta(),
+ false));
return frame;
}
#endif // defined(VIDEO_HOLE)
@@ -253,26 +395,23 @@ size_t VideoFrame::NumPlanes(Format format) {
case VideoFrame::HOLE:
#endif // defined(VIDEO_HOLE)
return 0;
+ case VideoFrame::NV12:
+ return 2;
case VideoFrame::YV12:
case VideoFrame::YV16:
case VideoFrame::I420:
case VideoFrame::YV12J:
+ case VideoFrame::YV24:
return 3;
case VideoFrame::YV12A:
return 4;
case VideoFrame::UNKNOWN:
- case VideoFrame::HISTOGRAM_MAX:
break;
}
NOTREACHED() << "Unsupported video frame format: " << format;
return 0;
}
-static inline size_t RoundUp(size_t value, size_t alignment) {
- // Check that |alignment| is a power of 2.
- DCHECK((alignment + (alignment - 1)) == (alignment | (alignment - 1)));
- return ((value + (alignment - 1)) & ~(alignment-1));
-}
// static
size_t VideoFrame::AllocationSize(Format format, const gfx::Size& coded_size) {
@@ -283,51 +422,73 @@ size_t VideoFrame::AllocationSize(Format format, const gfx::Size& coded_size) {
}
// static
-size_t VideoFrame::PlaneAllocationSize(Format format,
- size_t plane,
- const gfx::Size& coded_size) {
- const size_t area =
- RoundUp(coded_size.width(), 2) * RoundUp(coded_size.height(), 2);
+gfx::Size VideoFrame::PlaneSize(Format format,
+ size_t plane,
+ const gfx::Size& coded_size) {
+ // Align to multiple-of-two size overall. This ensures that non-subsampled
+ // planes can be addressed by pixel with the same scaling as the subsampled
+ // planes.
+ const int width = RoundUp(coded_size.width(), 2);
+ const int height = RoundUp(coded_size.height(), 2);
switch (format) {
+ case VideoFrame::YV24:
+ switch (plane) {
+ case VideoFrame::kYPlane:
+ case VideoFrame::kUPlane:
+ case VideoFrame::kVPlane:
+ return gfx::Size(width, height);
+ default:
+ break;
+ }
+ break;
case VideoFrame::YV12:
case VideoFrame::YV12J:
- case VideoFrame::I420: {
+ case VideoFrame::I420:
switch (plane) {
case VideoFrame::kYPlane:
- return area;
+ return gfx::Size(width, height);
case VideoFrame::kUPlane:
case VideoFrame::kVPlane:
- return area / 4;
+ return gfx::Size(width / 2, height / 2);
default:
break;
}
- }
- case VideoFrame::YV12A: {
+ break;
+ case VideoFrame::YV12A:
switch (plane) {
case VideoFrame::kYPlane:
case VideoFrame::kAPlane:
- return area;
+ return gfx::Size(width, height);
case VideoFrame::kUPlane:
case VideoFrame::kVPlane:
- return area / 4;
+ return gfx::Size(width / 2, height / 2);
default:
break;
}
- }
- case VideoFrame::YV16: {
+ break;
+ case VideoFrame::YV16:
switch (plane) {
case VideoFrame::kYPlane:
- return area;
+ return gfx::Size(width, height);
case VideoFrame::kUPlane:
case VideoFrame::kVPlane:
- return area / 2;
+ return gfx::Size(width / 2, height);
default:
break;
}
- }
+ break;
+ case VideoFrame::NV12:
+ switch (plane) {
+ case VideoFrame::kYPlane:
+ return gfx::Size(width, height);
+ case VideoFrame::kUVPlane:
+ return gfx::Size(width, height / 2);
+ default:
+ break;
+ }
+ break;
case VideoFrame::UNKNOWN:
case VideoFrame::NATIVE_TEXTURE:
- case VideoFrame::HISTOGRAM_MAX:
#if defined(VIDEO_HOLE)
case VideoFrame::HOLE:
#endif // defined(VIDEO_HOLE)
@@ -335,6 +496,74 @@ size_t VideoFrame::PlaneAllocationSize(Format format,
}
NOTREACHED() << "Unsupported video frame format/plane: "
<< format << "/" << plane;
+ return gfx::Size();
+}
+
+size_t VideoFrame::PlaneAllocationSize(Format format,
+ size_t plane,
+ const gfx::Size& coded_size) {
+ // VideoFrame formats are (so far) all YUV and 1 byte per sample.
+ return PlaneSize(format, plane, coded_size).GetArea();
+}
+
+// static
+int VideoFrame::PlaneHorizontalBitsPerPixel(Format format, size_t plane) {
+ switch (format) {
+ case VideoFrame::YV24:
+ switch (plane) {
+ case kYPlane:
+ case kUPlane:
+ case kVPlane:
+ return 8;
+ default:
+ break;
+ }
+ break;
+ case VideoFrame::YV12:
+ case VideoFrame::YV16:
+ case VideoFrame::I420:
+ case VideoFrame::YV12J:
+ switch (plane) {
+ case kYPlane:
+ return 8;
+ case kUPlane:
+ case kVPlane:
+ return 2;
+ default:
+ break;
+ }
+ break;
+ case VideoFrame::YV12A:
+ switch (plane) {
+ case kYPlane:
+ case kAPlane:
+ return 8;
+ case kUPlane:
+ case kVPlane:
+ return 2;
+ default:
+ break;
+ }
+ break;
+ case VideoFrame::NV12:
+ switch (plane) {
+ case kYPlane:
+ return 8;
+ case kUVPlane:
+ return 4;
+ default:
+ break;
+ }
+ break;
+ case VideoFrame::UNKNOWN:
+#if defined(VIDEO_HOLE)
+ case VideoFrame::HOLE:
+#endif // defined(VIDEO_HOLE)
+ case VideoFrame::NATIVE_TEXTURE:
+ break;
+ }
+ NOTREACHED() << "Unsupported video frame format/plane: "
+ << format << "/" << plane;
return 0;
}
@@ -347,7 +576,7 @@ static void ReleaseData(uint8* data) {
void VideoFrame::AllocateYUV() {
DCHECK(format_ == VideoFrame::YV12 || format_ == VideoFrame::YV16 ||
format_ == VideoFrame::YV12A || format_ == VideoFrame::I420 ||
- format_ == VideoFrame::YV12J);
+ format_ == VideoFrame::YV12J || format_ == VideoFrame::YV24);
// Align Y rows at least at 16 byte boundaries. The stride for both
// YV12 and YV16 is 1/2 of the stride of Y. For YV12, every row of bytes for
// U and V applies to two rows of Y (one byte of UV for 4 bytes of Y), so in
@@ -358,11 +587,11 @@ void VideoFrame::AllocateYUV() {
// the Y values of the final row, but assumes that the last row of U & V
// applies to a full two rows of Y. YV12A is the same as YV12, but with an
// additional alpha plane that has the same size and alignment as the Y plane.
-
size_t y_stride = RoundUp(row_bytes(VideoFrame::kYPlane),
kFrameSizeAlignment);
size_t uv_stride = RoundUp(row_bytes(VideoFrame::kUPlane),
kFrameSizeAlignment);
+
// The *2 here is because some formats (e.g. h264) allow interlaced coding,
// and then the size needs to be a multiple of two macroblocks (vertically).
// See libavcodec/utils.c:avcodec_align_dimensions2().
@@ -402,21 +631,32 @@ VideoFrame::VideoFrame(VideoFrame::Format format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
+ scoped_ptr<gpu::MailboxHolder> mailbox_holder,
base::TimeDelta timestamp,
bool end_of_stream)
: format_(format),
coded_size_(coded_size),
visible_rect_(visible_rect),
natural_size_(natural_size),
- texture_target_(0),
+ mailbox_holder_(mailbox_holder.Pass()),
shared_memory_handle_(base::SharedMemory::NULLHandle()),
timestamp_(timestamp),
end_of_stream_(end_of_stream) {
+ DCHECK(IsValidConfig(format_, coded_size_, visible_rect_, natural_size_));
+
memset(&strides_, 0, sizeof(strides_));
memset(&data_, 0, sizeof(data_));
}
VideoFrame::~VideoFrame() {
+ if (!mailbox_holder_release_cb_.is_null()) {
+ std::vector<uint32> release_sync_points;
+ {
+ base::AutoLock locker(release_sync_point_lock_);
+ release_sync_points_.swap(release_sync_points);
+ }
+ base::ResetAndReturn(&mailbox_holder_release_cb_).Run(release_sync_points);
+ }
if (!no_longer_needed_cb_.is_null())
base::ResetAndReturn(&no_longer_needed_cb_).Run();
}
@@ -434,25 +674,60 @@ int VideoFrame::row_bytes(size_t plane) const {
DCHECK(IsValidPlane(plane));
int width = coded_size_.width();
switch (format_) {
- // Planar, 8bpp.
- case YV12A:
- if (plane == kAPlane)
- return width;
- // Fallthrough.
- case YV12:
- case YV16:
- case I420:
- case YV12J:
- if (plane == kYPlane)
- return width;
- return RoundUp(width, 2) / 2;
-
- default:
+ case VideoFrame::YV24:
+ switch (plane) {
+ case kYPlane:
+ case kUPlane:
+ case kVPlane:
+ return width;
+ default:
+ break;
+ }
+ break;
+ case VideoFrame::YV12:
+ case VideoFrame::YV16:
+ case VideoFrame::I420:
+ case VideoFrame::YV12J:
+ switch (plane) {
+ case kYPlane:
+ return width;
+ case kUPlane:
+ case kVPlane:
+ return RoundUp(width, 2) / 2;
+ default:
+ break;
+ }
+ break;
+ case VideoFrame::YV12A:
+ switch (plane) {
+ case kYPlane:
+ case kAPlane:
+ return width;
+ case kUPlane:
+ case kVPlane:
+ return RoundUp(width, 2) / 2;
+ default:
+ break;
+ }
+ break;
+ case VideoFrame::NV12:
+ switch (plane) {
+ case kYPlane:
+ case kUVPlane:
+ return width;
+ default:
+ break;
+ }
+ break;
+ case VideoFrame::UNKNOWN:
+#if defined(VIDEO_HOLE)
+ case VideoFrame::HOLE:
+#endif // defined(VIDEO_HOLE)
+ case VideoFrame::NATIVE_TEXTURE:
break;
}
-
- // Intentionally leave out non-production formats.
- NOTREACHED() << "Unsupported video frame format: " << format_;
+ NOTREACHED() << "Unsupported video frame format/plane: "
+ << format_ << "/" << plane;
return 0;
}
@@ -460,25 +735,61 @@ int VideoFrame::rows(size_t plane) const {
DCHECK(IsValidPlane(plane));
int height = coded_size_.height();
switch (format_) {
- case YV16:
- return height;
-
- case YV12A:
- if (plane == kAPlane)
- return height;
- // Fallthrough.
- case YV12:
- case I420:
- if (plane == kYPlane)
- return height;
- return RoundUp(height, 2) / 2;
-
- default:
+ case VideoFrame::YV24:
+ case VideoFrame::YV16:
+ switch (plane) {
+ case kYPlane:
+ case kUPlane:
+ case kVPlane:
+ return height;
+ default:
+ break;
+ }
+ break;
+ case VideoFrame::YV12:
+ case VideoFrame::YV12J:
+ case VideoFrame::I420:
+ switch (plane) {
+ case kYPlane:
+ return height;
+ case kUPlane:
+ case kVPlane:
+ return RoundUp(height, 2) / 2;
+ default:
+ break;
+ }
+ break;
+ case VideoFrame::YV12A:
+ switch (plane) {
+ case kYPlane:
+ case kAPlane:
+ return height;
+ case kUPlane:
+ case kVPlane:
+ return RoundUp(height, 2) / 2;
+ default:
+ break;
+ }
+ break;
+ case VideoFrame::NV12:
+ switch (plane) {
+ case kYPlane:
+ return height;
+ case kUVPlane:
+ return RoundUp(height, 2) / 2;
+ default:
+ break;
+ }
+ break;
+ case VideoFrame::UNKNOWN:
+#if defined(VIDEO_HOLE)
+ case VideoFrame::HOLE:
+#endif // defined(VIDEO_HOLE)
+ case VideoFrame::NATIVE_TEXTURE:
break;
}
-
- // Intentionally leave out non-production formats.
- NOTREACHED() << "Unsupported video frame format: " << format_;
+ NOTREACHED() << "Unsupported video frame format/plane: "
+ << format_ << "/" << plane;
return 0;
}
@@ -487,19 +798,28 @@ uint8* VideoFrame::data(size_t plane) const {
return data_[plane];
}
-VideoFrame::MailboxHolder* VideoFrame::texture_mailbox() const {
+const gpu::MailboxHolder* VideoFrame::mailbox_holder() const {
DCHECK_EQ(format_, NATIVE_TEXTURE);
- return texture_mailbox_holder_.get();
+ return mailbox_holder_.get();
}
-uint32 VideoFrame::texture_target() const {
+base::SharedMemoryHandle VideoFrame::shared_memory_handle() const {
+ return shared_memory_handle_;
+}
+
+void VideoFrame::AppendReleaseSyncPoint(uint32 sync_point) {
DCHECK_EQ(format_, NATIVE_TEXTURE);
- return texture_target_;
+ if (!sync_point)
+ return;
+ base::AutoLock locker(release_sync_point_lock_);
+ release_sync_points_.push_back(sync_point);
}
-base::SharedMemoryHandle VideoFrame::shared_memory_handle() const {
- return shared_memory_handle_;
+#if defined(OS_POSIX)
+int VideoFrame::dmabuf_fd(size_t plane) const {
+ return dmabuf_fds_[plane].get();
}
+#endif
void VideoFrame::HashFrameForTesting(base::MD5Context* context) {
for (int plane = 0; plane < kMaxPlanes; ++plane) {
@@ -513,17 +833,4 @@ void VideoFrame::HashFrameForTesting(base::MD5Context* context) {
}
}
-VideoFrame::MailboxHolder::MailboxHolder(
- const gpu::Mailbox& mailbox,
- unsigned sync_point,
- const TextureNoLongerNeededCallback& release_callback)
- : mailbox_(mailbox),
- sync_point_(sync_point),
- release_callback_(release_callback) {}
-
-VideoFrame::MailboxHolder::~MailboxHolder() {
- if (!release_callback_.is_null())
- release_callback_.Run(sync_point_);
-}
-
} // namespace media
diff --git a/chromium/media/base/video_frame.h b/chromium/media/base/video_frame.h
index b51bfe96d2d..0696a554c8f 100644
--- a/chromium/media/base/video_frame.h
+++ b/chromium/media/base/video_frame.h
@@ -5,16 +5,22 @@
#ifndef MEDIA_BASE_VIDEO_FRAME_H_
#define MEDIA_BASE_VIDEO_FRAME_H_
+#include <vector>
+
#include "base/callback.h"
#include "base/md5.h"
#include "base/memory/shared_memory.h"
-#include "gpu/command_buffer/common/mailbox.h"
+#include "base/synchronization/lock.h"
#include "media/base/buffers.h"
#include "ui/gfx/rect.h"
#include "ui/gfx/size.h"
class SkBitmap;
+namespace gpu {
+struct MailboxHolder;
+} // namespace gpu
+
namespace media {
class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
@@ -30,6 +36,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
kYPlane = 0,
kUPlane = 1,
+ kUVPlane = kUPlane,
kVPlane = 2,
kAPlane = 3,
};
@@ -49,47 +56,16 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
#endif // defined(VIDEO_HOLE)
NATIVE_TEXTURE = 6, // Native texture. Pixel-format agnostic.
YV12J = 7, // JPEG color range version of YV12
- HISTOGRAM_MAX, // Must always be greatest.
+ NV12 = 8, // 12bpp 1x1 Y plane followed by an interleaved 2x2 UV plane.
+ YV24 = 9, // 24bpp YUV planar, no subsampling.
+ FORMAT_MAX = YV24, // Must always be equal to largest entry logged.
};
// Returns the name of a Format as a string.
static std::string FormatToString(Format format);
- // This class calls the TextureNoLongerNeededCallback when this class is
- // destroyed. Users can query the current sync point associated with this
- // mailbox with sync_point(), and should call Resync() with a new sync point
- // to ensure the mailbox remains valid for the issued commands.
- // valid for the issued commands.
- class MEDIA_EXPORT MailboxHolder {
- public:
- typedef base::Callback<void(uint32 sync_point)>
- TextureNoLongerNeededCallback;
-
- MailboxHolder(const gpu::Mailbox& mailbox,
- unsigned sync_point,
- const TextureNoLongerNeededCallback& release_callback);
- ~MailboxHolder();
-
- const gpu::Mailbox& mailbox() const { return mailbox_; }
- unsigned sync_point() const { return sync_point_; }
-
- void Resync(unsigned sync_point) { sync_point_ = sync_point; }
-
- private:
-
- gpu::Mailbox mailbox_;
- unsigned sync_point_;
- TextureNoLongerNeededCallback release_callback_;
- };
-
-
// Creates a new frame in system memory with given parameters. Buffers for
// the frame are allocated but not initialized.
- // |coded_size| is the width and height of the frame data in pixels.
- // |visible_rect| is the visible portion of |coded_size|, after cropping (if
- // any) is applied.
- // |natural_size| is the width and height of the frame when the frame's aspect
- // ratio is applied to |visible_rect|.
static scoped_refptr<VideoFrame> CreateFrame(
Format format,
const gfx::Size& coded_size,
@@ -108,29 +84,28 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// |const SkBitmap&| parameter.
typedef base::Callback<void(const SkBitmap&)> ReadPixelsCB;
- // Wraps a native texture of the given parameters with a VideoFrame. When the
- // frame is destroyed |no_longer_needed_cb.Run()| will be called.
- // |coded_size| is the width and height of the frame data in pixels.
- // |visible_rect| is the visible portion of |coded_size|, after cropping (if
- // any) is applied.
- // |natural_size| is the width and height of the frame when the frame's aspect
- // ratio is applied to |visible_rect|.
+ // CB to be called on the mailbox backing this frame when the frame is
+ // destroyed.
+ typedef base::Callback<void(const std::vector<uint32>&)> ReleaseMailboxCB;
+ // Wraps a native texture of the given parameters with a VideoFrame. The
+ // backing of the VideoFrame is held in the mailbox held by |mailbox_holder|,
+ // and |mailbox_holder_release_cb| will be called with |mailbox_holder| as the
+ // argument when the VideoFrame is to be destroyed.
// |read_pixels_cb| may be used to do (slow!) readbacks from the
// texture to main memory.
static scoped_refptr<VideoFrame> WrapNativeTexture(
- scoped_ptr<MailboxHolder> mailbox_holder,
- uint32 texture_target,
+ scoped_ptr<gpu::MailboxHolder> mailbox_holder,
+ const ReleaseMailboxCB& mailbox_holder_release_cb,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
base::TimeDelta timestamp,
- const ReadPixelsCB& read_pixels_cb,
- const base::Closure& no_longer_needed_cb);
+ const ReadPixelsCB& read_pixels_cb);
// Read pixels from the native texture backing |*this| and write
// them to |pixels| as BGRA. |pixels| must point to a buffer at
- // least as large as 4*visible_rect().width()*visible_rect().height().
+ // least as large as 4 * visible_rect().size().GetArea().
void ReadPixelsFromNativeTexture(const SkBitmap& pixels);
// Wraps packed image data residing in a memory buffer with a VideoFrame.
@@ -150,6 +125,27 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
base::TimeDelta timestamp,
const base::Closure& no_longer_needed_cb);
+#if defined(OS_POSIX)
+ // Wraps provided dmabufs
+ // (https://www.kernel.org/doc/Documentation/dma-buf-sharing.txt) with a
+ // VideoFrame. The dmabuf fds are dup()ed on creation, so that the VideoFrame
+ // retains a reference to them, and are automatically close()d on destruction,
+ // dropping the reference. The caller may safely close() its reference after
+ // calling WrapExternalDmabufs().
+ // The image data is only accessible via dmabuf fds, which are usually passed
+ // directly to a hardware device and/or to another process, or can also be
+ // mapped via mmap() for CPU access.
+ // When the frame is destroyed, |no_longer_needed_cb.Run()| will be called.
+ static scoped_refptr<VideoFrame> WrapExternalDmabufs(
+ Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ const std::vector<int> dmabuf_fds,
+ base::TimeDelta timestamp,
+ const base::Closure& no_longer_needed_cb);
+#endif
+
// Wraps external YUV data of the given parameters with a VideoFrame.
// The returned VideoFrame does not own the data passed in. When the frame
// is destroyed |no_longer_needed_cb.Run()| will be called.
@@ -170,9 +166,12 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
const base::Closure& no_longer_needed_cb);
// Wraps |frame| and calls |no_longer_needed_cb| when the wrapper VideoFrame
- // gets destroyed.
+ // gets destroyed. |visible_rect| must be a sub rect within
+ // frame->visible_rect().
static scoped_refptr<VideoFrame> WrapVideoFrame(
const scoped_refptr<VideoFrame>& frame,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
const base::Closure& no_longer_needed_cb);
// Creates a frame which indicates end-of-stream.
@@ -199,12 +198,20 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// given coded size and format.
static size_t AllocationSize(Format format, const gfx::Size& coded_size);
+ // Returns the plane size for a plane of the given coded size and format.
+ static gfx::Size PlaneSize(Format format,
+ size_t plane,
+ const gfx::Size& coded_size);
+
// Returns the required allocation size for a (tightly packed) plane of the
// given coded size and format.
static size_t PlaneAllocationSize(Format format,
size_t plane,
const gfx::Size& coded_size);
+ // Returns horizontal bits per pixel for given |plane| and |format|.
+ static int PlaneHorizontalBitsPerPixel(Format format, size_t plane);
+
Format format() const { return format_; }
const gfx::Size& coded_size() const { return coded_size_; }
@@ -224,27 +231,36 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// VideoFrame object and must not be freed by the caller.
uint8* data(size_t plane) const;
- // Returns the mailbox of the native texture wrapped by this frame. Only
- // valid to call if this is a NATIVE_TEXTURE frame. Before using the
+ // Returns the mailbox holder of the native texture wrapped by this frame.
+ // Only valid to call if this is a NATIVE_TEXTURE frame. Before using the
// mailbox, the caller must wait for the included sync point.
- MailboxHolder* texture_mailbox() const;
-
- // Returns the texture target. Only valid for NATIVE_TEXTURE frames.
- uint32 texture_target() const;
+ const gpu::MailboxHolder* mailbox_holder() const;
// Returns the shared-memory handle, if present
base::SharedMemoryHandle shared_memory_handle() const;
+#if defined(OS_POSIX)
+ // Returns backing dmabuf file descriptor for given |plane|, if present.
+ int dmabuf_fd(size_t plane) const;
+#endif
+
// Returns true if this VideoFrame represents the end of the stream.
bool end_of_stream() const { return end_of_stream_; }
- base::TimeDelta GetTimestamp() const {
+ base::TimeDelta timestamp() const {
return timestamp_;
}
- void SetTimestamp(const base::TimeDelta& timestamp) {
+ void set_timestamp(const base::TimeDelta& timestamp) {
timestamp_ = timestamp;
}
+ // Append |sync_point| into |release_sync_points_| which will be passed to
+ // the video decoder when |mailbox_holder_release_cb_| is called so that
+ // the video decoder waits for the sync points before reusing the mailbox.
+ // Multiple clients can append multiple sync points on one frame.
+ // This method is thread safe. Both blink and compositor threads can call it.
+ void AppendReleaseSyncPoint(uint32 sync_point);
+
// Used to keep a running hash of seen frames. Expects an initialized MD5
// context. Calls MD5Update with the context and the contents of the frame.
void HashFrameForTesting(base::MD5Context* context);
@@ -256,6 +272,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
+ scoped_ptr<gpu::MailboxHolder> mailbox_holder,
base::TimeDelta timestamp,
bool end_of_stream);
virtual ~VideoFrame();
@@ -266,17 +283,23 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
bool IsValidPlane(size_t plane) const;
// Frame format.
- Format format_;
+ const Format format_;
- // Width and height of the video frame.
- gfx::Size coded_size_;
+ // Width and height of the video frame, in pixels. This must include pixel
+ // data for the whole image; i.e. for YUV formats with subsampled chroma
+ // planes, in the case that the visible portion of the image does not line up
+ // on a sample boundary, |coded_size_| must be rounded up appropriately and
+ // the pixel data provided for the odd pixels.
+ const gfx::Size coded_size_;
- // Width, height, and offsets of the visible portion of the video frame.
- gfx::Rect visible_rect_;
+ // Width, height, and offsets of the visible portion of the video frame. Must
+ // be a subrect of |coded_size_|. Can be odd with respect to the sample
+ // boundaries, e.g. for formats with subsampled chroma.
+ const gfx::Rect visible_rect_;
- // Width and height of the visible portion of the video frame with aspect
- // ratio taken into account.
- gfx::Size natural_size_;
+ // Width and height of the visible portion of the video frame
+ // (|visible_rect_.size()|) with aspect ratio taken into account.
+ const gfx::Size natural_size_;
// Array of strides for each plane, typically greater or equal to the width
// of the surface divided by the horizontal sampling period. Note that
@@ -287,17 +310,26 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
uint8* data_[kMaxPlanes];
// Native texture mailbox, if this is a NATIVE_TEXTURE frame.
- scoped_ptr<MailboxHolder> texture_mailbox_holder_;
- uint32 texture_target_;
+ const scoped_ptr<gpu::MailboxHolder> mailbox_holder_;
+ ReleaseMailboxCB mailbox_holder_release_cb_;
ReadPixelsCB read_pixels_cb_;
// Shared memory handle, if this frame was allocated from shared memory.
base::SharedMemoryHandle shared_memory_handle_;
+#if defined(OS_POSIX)
+ // Dmabufs for each plane, if this frame is wrapping memory
+ // acquired via dmabuf.
+ base::ScopedFD dmabuf_fds_[kMaxPlanes];
+#endif
+
base::Closure no_longer_needed_cb_;
base::TimeDelta timestamp_;
+ base::Lock release_sync_point_lock_;
+ std::vector<uint32> release_sync_points_;
+
const bool end_of_stream_;
DISALLOW_IMPLICIT_CONSTRUCTORS(VideoFrame);
diff --git a/chromium/media/base/video_frame_pool.cc b/chromium/media/base/video_frame_pool.cc
index 4c5a5e31e22..800d0470f3e 100644
--- a/chromium/media/base/video_frame_pool.cc
+++ b/chromium/media/base/video_frame_pool.cc
@@ -75,18 +75,19 @@ scoped_refptr<VideoFrame> VideoFramePool::PoolImpl::CreateFrame(
pool_frame->visible_rect() == visible_rect &&
pool_frame->natural_size() == natural_size) {
frame = pool_frame;
- frame->SetTimestamp(kNoTimestamp());
+ frame->set_timestamp(timestamp);
break;
}
}
if (!frame) {
frame = VideoFrame::CreateFrame(
- format, coded_size, visible_rect, natural_size, kNoTimestamp());
+ format, coded_size, visible_rect, natural_size, timestamp);
}
return VideoFrame::WrapVideoFrame(
- frame, base::Bind(&VideoFramePool::PoolImpl::FrameReleased, this, frame));
+ frame, frame->visible_rect(), frame->natural_size(),
+ base::Bind(&VideoFramePool::PoolImpl::FrameReleased, this, frame));
}
void VideoFramePool::PoolImpl::Shutdown() {
diff --git a/chromium/media/base/video_frame_pool_unittest.cc b/chromium/media/base/video_frame_pool_unittest.cc
index 707279f8b9b..7f3694b9f6d 100644
--- a/chromium/media/base/video_frame_pool_unittest.cc
+++ b/chromium/media/base/video_frame_pool_unittest.cc
@@ -16,9 +16,19 @@ class VideoFramePoolTest : public ::testing::Test {
gfx::Size coded_size(320,240);
gfx::Rect visible_rect(coded_size);
gfx::Size natural_size(coded_size);
- return pool_->CreateFrame(
- format, coded_size, visible_rect, natural_size,
- base::TimeDelta::FromMilliseconds(timestamp_ms));
+
+ scoped_refptr<VideoFrame> frame =
+ pool_->CreateFrame(
+ format, coded_size, visible_rect, natural_size,
+ base::TimeDelta::FromMilliseconds(timestamp_ms));
+ EXPECT_EQ(format, frame->format());
+ EXPECT_EQ(base::TimeDelta::FromMilliseconds(timestamp_ms),
+ frame->timestamp());
+ EXPECT_EQ(coded_size, frame->coded_size());
+ EXPECT_EQ(visible_rect, frame->visible_rect());
+ EXPECT_EQ(natural_size, frame->natural_size());
+
+ return frame;
}
void CheckPoolSize(size_t size) const {
@@ -37,7 +47,7 @@ TEST_F(VideoFramePoolTest, SimpleFrameReuse) {
frame = NULL;
// Verify that the next frame from the pool uses the same memory.
- scoped_refptr<VideoFrame> new_frame = CreateFrame(VideoFrame::YV12, 10);
+ scoped_refptr<VideoFrame> new_frame = CreateFrame(VideoFrame::YV12, 20);
EXPECT_EQ(old_y_data, new_frame->data(VideoFrame::kYPlane));
}
diff --git a/chromium/media/base/video_frame_unittest.cc b/chromium/media/base/video_frame_unittest.cc
index 9c7eab05995..618d68f458f 100644
--- a/chromium/media/base/video_frame_unittest.cc
+++ b/chromium/media/base/video_frame_unittest.cc
@@ -10,6 +10,7 @@
#include "base/memory/aligned_memory.h"
#include "base/memory/scoped_ptr.h"
#include "base/strings/stringprintf.h"
+#include "gpu/command_buffer/common/mailbox_holder.h"
#include "media/base/buffers.h"
#include "media/base/yuv_convert.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -87,8 +88,7 @@ void ExpectFrameColor(media::VideoFrame* yv12_frame, uint32 expect_rgb_color) {
// Fill each plane to its reported extents and verify accessors report non
// zero values. Additionally, for the first plane verify the rows and
// row_bytes values are correct.
-void ExpectFrameExtents(VideoFrame::Format format, int planes,
- int bytes_per_pixel, const char* expected_hash) {
+void ExpectFrameExtents(VideoFrame::Format format, const char* expected_hash) {
const unsigned char kFillByte = 0x80;
const int kWidth = 61;
const int kHeight = 31;
@@ -99,18 +99,14 @@ void ExpectFrameExtents(VideoFrame::Format format, int planes,
format, size, gfx::Rect(size), size, kTimestamp);
ASSERT_TRUE(frame.get());
- for(int plane = 0; plane < planes; plane++) {
+ int planes = VideoFrame::NumPlanes(format);
+ for (int plane = 0; plane < planes; plane++) {
SCOPED_TRACE(base::StringPrintf("Checking plane %d", plane));
EXPECT_TRUE(frame->data(plane));
EXPECT_TRUE(frame->stride(plane));
EXPECT_TRUE(frame->rows(plane));
EXPECT_TRUE(frame->row_bytes(plane));
- if (plane == 0) {
- EXPECT_EQ(frame->rows(plane), kHeight);
- EXPECT_EQ(frame->row_bytes(plane), kWidth * bytes_per_pixel);
- }
-
memset(frame->data(plane), kFillByte,
frame->stride(plane) * frame->rows(plane));
}
@@ -174,7 +170,7 @@ TEST(VideoFrame, CreateBlackFrame) {
ASSERT_TRUE(frame.get());
// Test basic properties.
- EXPECT_EQ(0, frame->GetTimestamp().InMicroseconds());
+ EXPECT_EQ(0, frame->timestamp().InMicroseconds());
EXPECT_FALSE(frame->end_of_stream());
// Test |frame| properties.
@@ -199,82 +195,117 @@ TEST(VideoFrame, CreateBlackFrame) {
}
}
+static void FrameNoLongerNeededCallback(
+ const scoped_refptr<media::VideoFrame>& frame,
+ bool* triggered) {
+ *triggered = true;
+}
+
+TEST(VideoFrame, WrapVideoFrame) {
+ const int kWidth = 4;
+ const int kHeight = 4;
+ scoped_refptr<media::VideoFrame> frame;
+ bool no_longer_needed_triggered = false;
+ {
+ scoped_refptr<media::VideoFrame> wrapped_frame =
+ VideoFrame::CreateBlackFrame(gfx::Size(kWidth, kHeight));
+ ASSERT_TRUE(wrapped_frame.get());
+
+ gfx::Rect visible_rect(1, 1, 1, 1);
+ gfx::Size natural_size = visible_rect.size();
+ frame = media::VideoFrame::WrapVideoFrame(
+ wrapped_frame, visible_rect, natural_size,
+ base::Bind(&FrameNoLongerNeededCallback, wrapped_frame,
+ &no_longer_needed_triggered));
+ EXPECT_EQ(wrapped_frame->coded_size(), frame->coded_size());
+ EXPECT_EQ(wrapped_frame->data(media::VideoFrame::kYPlane),
+ frame->data(media::VideoFrame::kYPlane));
+ EXPECT_NE(wrapped_frame->visible_rect(), frame->visible_rect());
+ EXPECT_EQ(visible_rect, frame->visible_rect());
+ EXPECT_NE(wrapped_frame->natural_size(), frame->natural_size());
+ EXPECT_EQ(natural_size, frame->natural_size());
+ }
+
+ EXPECT_FALSE(no_longer_needed_triggered);
+ frame = NULL;
+ EXPECT_TRUE(no_longer_needed_triggered);
+}
+
// Ensure each frame is properly sized and allocated. Will trigger OOB reads
// and writes as well as incorrect frame hashes otherwise.
TEST(VideoFrame, CheckFrameExtents) {
- // Each call consists of a VideoFrame::Format, # of planes, bytes per pixel,
- // and the expected hash of all planes if filled with kFillByte (defined in
- // ExpectFrameExtents).
- ExpectFrameExtents(
- VideoFrame::YV12, 3, 1, "71113bdfd4c0de6cf62f48fb74f7a0b1");
- ExpectFrameExtents(
- VideoFrame::YV16, 3, 1, "9bb99ac3ff350644ebff4d28dc01b461");
+ // Each call consists of a VideoFrame::Format and the expected hash of all
+ // planes if filled with kFillByte (defined in ExpectFrameExtents).
+ ExpectFrameExtents(VideoFrame::YV12, "8e5d54cb23cd0edca111dd35ffb6ff05");
+ ExpectFrameExtents(VideoFrame::YV16, "cce408a044b212db42a10dfec304b3ef");
}
-static void TextureCallback(uint32* called_sync_point, uint32 sync_point) {
- *called_sync_point = sync_point;
+static void TextureCallback(std::vector<uint32>* called_sync_point,
+ const std::vector<uint32>& release_sync_points) {
+ called_sync_point->assign(release_sync_points.begin(),
+ release_sync_points.end());
}
-// Verify the TextureNoLongerNeededCallback is called when VideoFrame is
-// destroyed with the original sync point.
+// Verify the gpu::MailboxHolder::ReleaseCallback is called when VideoFrame is
+// destroyed with the default release sync points.
TEST(VideoFrame, TextureNoLongerNeededCallbackIsCalled) {
- uint32 sync_point = 7;
- uint32 called_sync_point = 0;
+ std::vector<uint32> called_sync_points;
+ called_sync_points.push_back(1);
{
scoped_refptr<VideoFrame> frame = VideoFrame::WrapNativeTexture(
- make_scoped_ptr(new VideoFrame::MailboxHolder(
- gpu::Mailbox(),
- sync_point,
- base::Bind(&TextureCallback, &called_sync_point))),
- 5, // texture_target
- gfx::Size(10, 10), // coded_size
- gfx::Rect(10, 10), // visible_rect
- gfx::Size(10, 10), // natural_size
- base::TimeDelta(), // timestamp
- base::Callback<void(const SkBitmap&)>(), // read_pixels_cb
- base::Closure()); // no_longer_needed_cb
-
- EXPECT_EQ(0u, called_sync_point);
+ make_scoped_ptr(
+ new gpu::MailboxHolder(gpu::Mailbox(), 5, 0 /* sync_point */)),
+ base::Bind(&TextureCallback, &called_sync_points),
+ gfx::Size(10, 10), // coded_size
+ gfx::Rect(10, 10), // visible_rect
+ gfx::Size(10, 10), // natural_size
+ base::TimeDelta(), // timestamp
+ VideoFrame::ReadPixelsCB()); // read_pixels_cb
+
+ EXPECT_EQ(1u, called_sync_points.size());
}
- EXPECT_EQ(sync_point, called_sync_point);
+ EXPECT_TRUE(called_sync_points.empty());
}
-// Verify the TextureNoLongerNeededCallback is called when VideoFrame is
-// destroyed with the new sync point, when the mailbox is accessed by a caller.
+// Verify the gpu::MailboxHolder::ReleaseCallback is called when VideoFrame is
+// destroyed with the release sync points, which was updated by clients.
+// (i.e. the compositor, webgl).
TEST(VideoFrame, TextureNoLongerNeededCallbackAfterTakingAndReleasingMailbox) {
- uint32 called_sync_point = 0;
+ std::vector<uint32> called_sync_points;
gpu::Mailbox mailbox;
mailbox.name[0] = 50;
uint32 sync_point = 7;
uint32 target = 9;
+ std::vector<uint32> release_sync_points;
+ release_sync_points.push_back(1);
+ release_sync_points.push_back(2);
+ release_sync_points.push_back(3);
{
scoped_refptr<VideoFrame> frame = VideoFrame::WrapNativeTexture(
- make_scoped_ptr(new VideoFrame::MailboxHolder(
- mailbox,
- sync_point,
- base::Bind(&TextureCallback, &called_sync_point))),
- target,
- gfx::Size(10, 10), // coded_size
- gfx::Rect(10, 10), // visible_rect
- gfx::Size(10, 10), // natural_size
- base::TimeDelta(), // timestamp
- base::Callback<void(const SkBitmap&)>(), // read_pixels_cb
- base::Closure()); // no_longer_needed_cb
-
- VideoFrame::MailboxHolder* mailbox_holder = frame->texture_mailbox();
-
- EXPECT_EQ(mailbox.name[0], mailbox_holder->mailbox().name[0]);
- EXPECT_EQ(sync_point, mailbox_holder->sync_point());
- EXPECT_EQ(target, frame->texture_target());
-
- // Finish using the mailbox_holder and drop our reference.
- sync_point = 10;
- mailbox_holder->Resync(sync_point);
+ make_scoped_ptr(new gpu::MailboxHolder(mailbox, target, sync_point)),
+ base::Bind(&TextureCallback, &called_sync_points),
+ gfx::Size(10, 10), // coded_size
+ gfx::Rect(10, 10), // visible_rect
+ gfx::Size(10, 10), // natural_size
+ base::TimeDelta(), // timestamp
+ VideoFrame::ReadPixelsCB()); // read_pixels_cb
+ EXPECT_TRUE(called_sync_points.empty());
+
+ const gpu::MailboxHolder* mailbox_holder = frame->mailbox_holder();
+
+ EXPECT_EQ(mailbox.name[0], mailbox_holder->mailbox.name[0]);
+ EXPECT_EQ(target, mailbox_holder->texture_target);
+ EXPECT_EQ(sync_point, mailbox_holder->sync_point);
+
+ frame->AppendReleaseSyncPoint(release_sync_points[0]);
+ frame->AppendReleaseSyncPoint(release_sync_points[1]);
+ frame->AppendReleaseSyncPoint(release_sync_points[2]);
+ EXPECT_EQ(sync_point, mailbox_holder->sync_point);
}
- EXPECT_EQ(sync_point, called_sync_point);
+ EXPECT_EQ(release_sync_points, called_sync_points);
}
} // namespace media
diff --git a/chromium/media/base/video_renderer.h b/chromium/media/base/video_renderer.h
index 2650221e3d8..b4154a0410b 100644
--- a/chromium/media/base/video_renderer.h
+++ b/chromium/media/base/video_renderer.h
@@ -11,10 +11,6 @@
#include "media/base/media_export.h"
#include "media/base/pipeline_status.h"
-namespace gfx {
-class Size;
-}
-
namespace media {
class DemuxerStream;
@@ -26,9 +22,6 @@ class MEDIA_EXPORT VideoRenderer {
// the clock should not exceed.
typedef base::Callback<void(base::TimeDelta)> TimeCB;
- // Executed when the natural size of the video has changed.
- typedef base::Callback<void(const gfx::Size& size)> NaturalSizeChangedCB;
-
// Used to query the current time or duration of the media.
typedef base::Callback<base::TimeDelta()> TimeDeltaCB;
@@ -43,9 +36,6 @@ class MEDIA_EXPORT VideoRenderer {
//
// |time_cb| is executed whenever time has advanced by way of video rendering.
//
- // |size_changed_cb| is executed whenever the dimensions of the video has
- // changed.
- //
// |ended_cb| is executed when video rendering has reached the end of stream.
//
// |error_cb| is executed if an error was encountered.
@@ -54,10 +44,10 @@ class MEDIA_EXPORT VideoRenderer {
//
// |get_duration_cb| is used to query the media duration.
virtual void Initialize(DemuxerStream* stream,
+ bool low_delay,
const PipelineStatusCB& init_cb,
const StatisticsCB& statistics_cb,
const TimeCB& time_cb,
- const NaturalSizeChangedCB& size_changed_cb,
const base::Closure& ended_cb,
const PipelineStatusCB& error_cb,
const TimeDeltaCB& get_time_cb,
@@ -67,11 +57,8 @@ class MEDIA_EXPORT VideoRenderer {
// |callback| when playback is underway.
virtual void Play(const base::Closure& callback) = 0;
- // Temporarily suspend decoding and rendering video, executing |callback| when
- // playback has been suspended.
- virtual void Pause(const base::Closure& callback) = 0;
-
- // Discard any video data, executing |callback| when completed.
+ // Discard any video data and stop reading from |stream|, executing |callback|
+ // when completed.
virtual void Flush(const base::Closure& callback) = 0;
// Start prerolling video data. If |time| equals kNoTimestamp() then all
diff --git a/chromium/media/base/yuv_convert.cc b/chromium/media/base/yuv_convert.cc
index 893b53df147..5ad8f3079fb 100644
--- a/chromium/media/base/yuv_convert.cc
+++ b/chromium/media/base/yuv_convert.cc
@@ -20,10 +20,12 @@
#include "base/cpu.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
#include "build/build_config.h"
#include "media/base/simd/convert_rgb_to_yuv.h"
#include "media/base/simd/convert_yuv_to_rgb.h"
#include "media/base/simd/filter_yuv.h"
+#include "media/base/simd/yuv_to_rgb_table.h"
#if defined(ARCH_CPU_X86_FAMILY)
#if defined(COMPILER_MSVC)
@@ -78,21 +80,24 @@ typedef void (*ConvertYUVToRGB32RowProc)(const uint8*,
const uint8*,
const uint8*,
uint8*,
- ptrdiff_t);
+ ptrdiff_t,
+ const int16[1024][4]);
typedef void (*ConvertYUVAToARGBRowProc)(const uint8*,
const uint8*,
const uint8*,
const uint8*,
uint8*,
- ptrdiff_t);
+ ptrdiff_t,
+ const int16[1024][4]);
typedef void (*ScaleYUVToRGB32RowProc)(const uint8*,
const uint8*,
const uint8*,
uint8*,
ptrdiff_t,
- ptrdiff_t);
+ ptrdiff_t,
+ const int16[1024][4]);
static FilterYUVRowsProc g_filter_yuv_rows_proc_ = NULL;
static ConvertYUVToRGB32RowProc g_convert_yuv_to_rgb32_row_proc_ = NULL;
@@ -111,6 +116,31 @@ void EmptyRegisterStateIntrinsic() { _mm_empty(); }
typedef void (*EmptyRegisterStateProc)();
static EmptyRegisterStateProc g_empty_register_state_proc_ = NULL;
+// Get the appropriate value to bitshift by for vertical indices.
+int GetVerticalShift(YUVType type) {
+ switch (type) {
+ case YV16:
+ return 0;
+ case YV12:
+ case YV12J:
+ return 1;
+ }
+ NOTREACHED();
+ return 0;
+}
+
+const int16 (&GetLookupTable(YUVType type))[1024][4] {
+ switch (type) {
+ case YV12:
+ case YV16:
+ return kCoefficientsRgbY;
+ case YV12J:
+ return kCoefficientsRgbY_JPEG;
+ }
+ NOTREACHED();
+ return kCoefficientsRgbY;
+}
+
void InitializeCPUSpecificYUVConversions() {
CHECK(!g_filter_yuv_rows_proc_);
CHECK(!g_convert_yuv_to_rgb32_row_proc_);
@@ -132,7 +162,8 @@ void InitializeCPUSpecificYUVConversions() {
g_convert_yuva_to_argb_proc_ = ConvertYUVAToARGB_C;
g_empty_register_state_proc_ = EmptyRegisterStateStub;
-#if defined(ARCH_CPU_X86_FAMILY)
+ // Assembly code confuses MemorySanitizer.
+#if defined(ARCH_CPU_X86_FAMILY) && !defined(MEMORY_SANITIZER)
base::CPU cpu;
if (cpu.has_mmx()) {
g_convert_yuv_to_rgb32_row_proc_ = ConvertYUVToRGB32Row_MMX;
@@ -220,7 +251,7 @@ void ScaleYUVToRGB32(const uint8* y_buf,
if (source_width > kFilterBufferSize || view_rotate)
filter = FILTER_NONE;
- unsigned int y_shift = yuv_type;
+ unsigned int y_shift = GetVerticalShift(yuv_type);
// Diagram showing origin and direction of source sampling.
// ->0 4<-
// 7 3
@@ -352,14 +383,25 @@ void ScaleYUVToRGB32(const uint8* y_buf,
v_ptr = v_buf + (source_y >> y_shift) * uv_pitch;
}
if (source_dx == kFractionMax) { // Not scaled
- g_convert_yuv_to_rgb32_row_proc_(y_ptr, u_ptr, v_ptr, dest_pixel, width);
+ g_convert_yuv_to_rgb32_row_proc_(
+ y_ptr, u_ptr, v_ptr, dest_pixel, width, kCoefficientsRgbY);
} else {
if (filter & FILTER_BILINEAR_H) {
- g_linear_scale_yuv_to_rgb32_row_proc_(
- y_ptr, u_ptr, v_ptr, dest_pixel, width, source_dx);
+ g_linear_scale_yuv_to_rgb32_row_proc_(y_ptr,
+ u_ptr,
+ v_ptr,
+ dest_pixel,
+ width,
+ source_dx,
+ kCoefficientsRgbY);
} else {
- g_scale_yuv_to_rgb32_row_proc_(
- y_ptr, u_ptr, v_ptr, dest_pixel, width, source_dx);
+ g_scale_yuv_to_rgb32_row_proc_(y_ptr,
+ u_ptr,
+ v_ptr,
+ dest_pixel,
+ width,
+ source_dx,
+ kCoefficientsRgbY);
}
}
}
@@ -443,6 +485,9 @@ void ScaleYUVToRGB32WithRect(const uint8* y_buf,
const int kFilterBufferSize = 4096;
const bool kAvoidUsingOptimizedFilter = source_width > kFilterBufferSize;
uint8 yuv_temp[16 + kFilterBufferSize * 3 + 16];
+ // memset() yuv_temp to 0 to avoid bogus warnings when running on Valgrind.
+ if (RunningOnValgrind())
+ memset(yuv_temp, 0, sizeof(yuv_temp));
uint8* y_temp = reinterpret_cast<uint8*>(
reinterpret_cast<uintptr_t>(yuv_temp + 15) & ~15);
uint8* u_temp = y_temp + kFilterBufferSize;
@@ -500,7 +545,8 @@ void ScaleYUVToRGB32WithRect(const uint8* y_buf,
rgb_buf,
dest_rect_width,
source_left,
- x_step);
+ x_step,
+ kCoefficientsRgbY);
} else {
// If the frame is too large then we linear scale a single row.
LinearScaleYUVToRGB32RowWithRange_C(y0_ptr,
@@ -509,7 +555,8 @@ void ScaleYUVToRGB32WithRect(const uint8* y_buf,
rgb_buf,
dest_rect_width,
source_left,
- x_step);
+ x_step,
+ kCoefficientsRgbY);
}
// Advance vertically in the source and destination image.
diff --git a/chromium/media/base/yuv_convert.h b/chromium/media/base/yuv_convert.h
index 8f64c79689a..cf13edb17ab 100644
--- a/chromium/media/base/yuv_convert.h
+++ b/chromium/media/base/yuv_convert.h
@@ -7,6 +7,7 @@
#include "base/basictypes.h"
#include "media/base/media_export.h"
+#include "media/base/simd/yuv_to_rgb_table.h"
// Visual Studio 2010 does not support MMX intrinsics on x64.
// Some win64 yuv_convert code paths use SSE+MMX yasm, so without rewriting
@@ -22,12 +23,18 @@
namespace media {
// Type of YUV surface.
-// The value of these enums matter as they are used to shift vertical indices.
enum YUVType {
- YV16 = 0, // YV16 is half width and full height chroma channels.
- YV12 = 1, // YV12 is half width and half height chroma channels.
+ YV16 = 0, // YV16 is half width and full height chroma channels.
+ YV12 = 1, // YV12 is half width and half height chroma channels.
+ YV12J = 2, // YV12J is the same as YV12, but in JPEG color range.
};
+// Get the appropriate value to bitshift by for vertical indices.
+MEDIA_EXPORT int GetVerticalShift(YUVType type);
+
+// Get the appropriate lookup table for a given YUV format.
+MEDIA_EXPORT const int16 (&GetLookupTable(YUVType type))[1024][4];
+
// Mirror means flip the image horizontally, as in looking in a mirror.
// Rotate happens after mirroring.
enum Rotate {
diff --git a/chromium/media/base/yuv_convert_perftest.cc b/chromium/media/base/yuv_convert_perftest.cc
new file mode 100644
index 00000000000..0f30bebec8b
--- /dev/null
+++ b/chromium/media/base/yuv_convert_perftest.cc
@@ -0,0 +1,226 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/base_paths.h"
+#include "base/cpu.h"
+#include "base/file_util.h"
+#include "base/logging.h"
+#include "base/path_service.h"
+#include "base/time/time.h"
+#include "media/base/simd/convert_yuv_to_rgb.h"
+#include "media/base/yuv_convert.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+
+namespace media {
+#if !defined(ARCH_CPU_ARM_FAMILY) && !defined(ARCH_CPU_MIPS_FAMILY)
+// Size of raw image.
+static const int kSourceWidth = 640;
+static const int kSourceHeight = 360;
+static const int kSourceYSize = kSourceWidth * kSourceHeight;
+static const int kSourceUOffset = kSourceYSize;
+static const int kSourceVOffset = kSourceYSize * 5 / 4;
+static const int kBpp = 4;
+
+// Width of the row to convert. Odd so that we exercise the ending
+// one-pixel-leftover case.
+static const int kWidth = 639;
+
+// Surface sizes for various test files.
+static const int kYUV12Size = kSourceYSize * 12 / 8;
+static const int kRGBSize = kSourceYSize * kBpp;
+
+static const int kPerfTestIterations = 2000;
+
+class YUVConvertPerfTest : public testing::Test {
+ public:
+ YUVConvertPerfTest()
+ : yuv_bytes_(new uint8[kYUV12Size]),
+ rgb_bytes_converted_(new uint8[kRGBSize]) {
+ base::FilePath path;
+ CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &path));
+ path = path.Append(FILE_PATH_LITERAL("media"))
+ .Append(FILE_PATH_LITERAL("test"))
+ .Append(FILE_PATH_LITERAL("data"))
+ .Append(FILE_PATH_LITERAL("bali_640x360_P420.yuv"));
+
+ // Verify file size is correct.
+ int64 actual_size = 0;
+ base::GetFileSize(path, &actual_size);
+ CHECK_EQ(actual_size, kYUV12Size);
+
+ // Verify bytes read are correct.
+ int bytes_read = base::ReadFile(
+ path, reinterpret_cast<char*>(yuv_bytes_.get()), kYUV12Size);
+
+ CHECK_EQ(bytes_read, kYUV12Size);
+ }
+
+ scoped_ptr<uint8[]> yuv_bytes_;
+ scoped_ptr<uint8[]> rgb_bytes_converted_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(YUVConvertPerfTest);
+};
+
+TEST_F(YUVConvertPerfTest, ConvertYUVToRGB32Row_MMX) {
+ ASSERT_TRUE(base::CPU().has_mmx());
+
+ base::TimeTicks start = base::TimeTicks::HighResNow();
+ for (int i = 0; i < kPerfTestIterations; ++i) {
+ for (int row = 0; row < kSourceHeight; ++row) {
+ int chroma_row = row / 2;
+ ConvertYUVToRGB32Row_MMX(
+ yuv_bytes_.get() + row * kSourceWidth,
+ yuv_bytes_.get() + kSourceUOffset + (chroma_row * kSourceWidth / 2),
+ yuv_bytes_.get() + kSourceVOffset + (chroma_row * kSourceWidth / 2),
+ rgb_bytes_converted_.get(),
+ kWidth,
+ GetLookupTable(YV12));
+ }
+ }
+ double total_time_seconds =
+ (base::TimeTicks::HighResNow() - start).InSecondsF();
+ perf_test::PrintResult(
+ "yuv_convert_perftest", "", "ConvertYUVToRGB32Row_MMX",
+ kPerfTestIterations / total_time_seconds, "runs/s", true);
+
+ media::EmptyRegisterState();
+}
+
+TEST_F(YUVConvertPerfTest, ConvertYUVToRGB32Row_SSE) {
+ ASSERT_TRUE(base::CPU().has_sse());
+
+ base::TimeTicks start = base::TimeTicks::HighResNow();
+ for (int i = 0; i < kPerfTestIterations; ++i) {
+ for (int row = 0; row < kSourceHeight; ++row) {
+ int chroma_row = row / 2;
+ ConvertYUVToRGB32Row_SSE(
+ yuv_bytes_.get() + row * kSourceWidth,
+ yuv_bytes_.get() + kSourceUOffset + (chroma_row * kSourceWidth / 2),
+ yuv_bytes_.get() + kSourceVOffset + (chroma_row * kSourceWidth / 2),
+ rgb_bytes_converted_.get(),
+ kWidth,
+ GetLookupTable(YV12));
+ }
+ }
+ double total_time_seconds =
+ (base::TimeTicks::HighResNow() - start).InSecondsF();
+ perf_test::PrintResult(
+ "yuv_convert_perftest", "", "ConvertYUVToRGB32Row_SSE",
+ kPerfTestIterations / total_time_seconds, "runs/s", true);
+ media::EmptyRegisterState();
+}
+
+TEST_F(YUVConvertPerfTest, ScaleYUVToRGB32Row_MMX) {
+ ASSERT_TRUE(base::CPU().has_mmx());
+
+ const int kSourceDx = 80000; // This value means a scale down.
+
+ base::TimeTicks start = base::TimeTicks::HighResNow();
+ for (int i = 0; i < kPerfTestIterations; ++i) {
+ for (int row = 0; row < kSourceHeight; ++row) {
+ int chroma_row = row / 2;
+ ScaleYUVToRGB32Row_MMX(
+ yuv_bytes_.get() + row * kSourceWidth,
+ yuv_bytes_.get() + kSourceUOffset + (chroma_row * kSourceWidth / 2),
+ yuv_bytes_.get() + kSourceVOffset + (chroma_row * kSourceWidth / 2),
+ rgb_bytes_converted_.get(),
+ kWidth,
+ kSourceDx,
+ GetLookupTable(YV12));
+ }
+ }
+ double total_time_seconds =
+ (base::TimeTicks::HighResNow() - start).InSecondsF();
+ perf_test::PrintResult(
+ "yuv_convert_perftest", "", "ScaleYUVToRGB32Row_MMX",
+ kPerfTestIterations / total_time_seconds, "runs/s", true);
+ media::EmptyRegisterState();
+}
+
+TEST_F(YUVConvertPerfTest, ScaleYUVToRGB32Row_SSE) {
+ ASSERT_TRUE(base::CPU().has_sse());
+
+ const int kSourceDx = 80000; // This value means a scale down.
+
+ base::TimeTicks start = base::TimeTicks::HighResNow();
+ for (int i = 0; i < kPerfTestIterations; ++i) {
+ for (int row = 0; row < kSourceHeight; ++row) {
+ int chroma_row = row / 2;
+ ScaleYUVToRGB32Row_SSE(
+ yuv_bytes_.get() + row * kSourceWidth,
+ yuv_bytes_.get() + kSourceUOffset + (chroma_row * kSourceWidth / 2),
+ yuv_bytes_.get() + kSourceVOffset + (chroma_row * kSourceWidth / 2),
+ rgb_bytes_converted_.get(),
+ kWidth,
+ kSourceDx,
+ GetLookupTable(YV12));
+ }
+ }
+ double total_time_seconds =
+ (base::TimeTicks::HighResNow() - start).InSecondsF();
+ perf_test::PrintResult(
+ "yuv_convert_perftest", "", "ScaleYUVToRGB32Row_SSE",
+ kPerfTestIterations / total_time_seconds, "runs/s", true);
+ media::EmptyRegisterState();
+}
+
+TEST_F(YUVConvertPerfTest, LinearScaleYUVToRGB32Row_MMX) {
+ ASSERT_TRUE(base::CPU().has_mmx());
+
+ const int kSourceDx = 80000; // This value means a scale down.
+
+ base::TimeTicks start = base::TimeTicks::HighResNow();
+ for (int i = 0; i < kPerfTestIterations; ++i) {
+ for (int row = 0; row < kSourceHeight; ++row) {
+ int chroma_row = row / 2;
+ LinearScaleYUVToRGB32Row_MMX(
+ yuv_bytes_.get() + row * kSourceWidth,
+ yuv_bytes_.get() + kSourceUOffset + (chroma_row * kSourceWidth / 2),
+ yuv_bytes_.get() + kSourceVOffset + (chroma_row * kSourceWidth / 2),
+ rgb_bytes_converted_.get(),
+ kWidth,
+ kSourceDx,
+ GetLookupTable(YV12));
+ }
+ }
+ double total_time_seconds =
+ (base::TimeTicks::HighResNow() - start).InSecondsF();
+ perf_test::PrintResult(
+ "yuv_convert_perftest", "", "LinearScaleYUVToRGB32Row_MMX",
+ kPerfTestIterations / total_time_seconds, "runs/s", true);
+ media::EmptyRegisterState();
+}
+
+TEST_F(YUVConvertPerfTest, LinearScaleYUVToRGB32Row_SSE) {
+ ASSERT_TRUE(base::CPU().has_sse());
+
+ const int kSourceDx = 80000; // This value means a scale down.
+
+ base::TimeTicks start = base::TimeTicks::HighResNow();
+ for (int i = 0; i < kPerfTestIterations; ++i) {
+ for (int row = 0; row < kSourceHeight; ++row) {
+ int chroma_row = row / 2;
+ LinearScaleYUVToRGB32Row_SSE(
+ yuv_bytes_.get() + row * kSourceWidth,
+ yuv_bytes_.get() + kSourceUOffset + (chroma_row * kSourceWidth / 2),
+ yuv_bytes_.get() + kSourceVOffset + (chroma_row * kSourceWidth / 2),
+ rgb_bytes_converted_.get(),
+ kWidth,
+ kSourceDx,
+ GetLookupTable(YV12));
+ }
+ }
+ double total_time_seconds =
+ (base::TimeTicks::HighResNow() - start).InSecondsF();
+ perf_test::PrintResult(
+ "yuv_convert_perftest", "", "LinearScaleYUVToRGB32Row_SSE",
+ kPerfTestIterations / total_time_seconds, "runs/s", true);
+ media::EmptyRegisterState();
+}
+
+#endif // !defined(ARCH_CPU_ARM_FAMILY) && !defined(ARCH_CPU_MIPS_FAMILY)
+
+} // namespace media
diff --git a/chromium/media/base/yuv_convert_unittest.cc b/chromium/media/base/yuv_convert_unittest.cc
index 7c964f3771a..85d3693ec86 100644
--- a/chromium/media/base/yuv_convert_unittest.cc
+++ b/chromium/media/base/yuv_convert_unittest.cc
@@ -11,6 +11,7 @@
#include "media/base/simd/convert_rgb_to_yuv.h"
#include "media/base/simd/convert_yuv_to_rgb.h"
#include "media/base/simd/filter_yuv.h"
+#include "media/base/simd/yuv_to_rgb_table.h"
#include "media/base/yuv_convert.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gfx/rect.h"
@@ -36,6 +37,11 @@ static const int kRGBSizeScaled = kScaledWidth * kScaledHeight * kBpp;
static const int kRGB24Size = kSourceYSize * 3;
static const int kRGBSizeConverted = kSourceYSize * kBpp;
+#if !defined(ARCH_CPU_ARM_FAMILY) && !defined(ARCH_CPU_MIPS_FAMILY)
+static const int kSourceAOffset = kSourceYSize * 12 / 8;
+static const int kYUVA12Size = kSourceYSize * 20 / 8;
+#endif
+
// Helper for reading test data into a scoped_ptr<uint8[]>.
static void ReadData(const base::FilePath::CharType* filename,
int expected_size,
@@ -68,6 +74,12 @@ static void ReadYV16Data(scoped_ptr<uint8[]>* data) {
ReadData(FILE_PATH_LITERAL("bali_640x360_P422.yuv"), kYUV16Size, data);
}
+#if !defined(ARCH_CPU_ARM_FAMILY) && !defined(ARCH_CPU_MIPS_FAMILY)
+static void ReadYV12AData(scoped_ptr<uint8[]>* data) {
+ ReadData(FILE_PATH_LITERAL("bali_640x360_P420_alpha.yuv"), kYUVA12Size, data);
+}
+#endif
+
static void ReadRGB24Data(scoped_ptr<uint8[]>* data) {
ReadData(FILE_PATH_LITERAL("bali_640x360_RGB24.rgb"), kRGB24Size, data);
}
@@ -162,6 +174,7 @@ class YUVScaleTest : public ::testing::TestWithParam<YUVScaleTestData> {
YUVScaleTest() {
switch (GetParam().yuv_type) {
case media::YV12:
+ case media::YV12J:
ReadYV12Data(&yuv_bytes_);
break;
case media::YV16:
@@ -178,6 +191,7 @@ class YUVScaleTest : public ::testing::TestWithParam<YUVScaleTestData> {
uint8* v_plane() {
switch (GetParam().yuv_type) {
case media::YV12:
+ case media::YV12J:
return yuv_bytes_.get() + kSourceVOffset;
case media::YV16:
return yuv_bytes_.get() + kSourceYSize * 3 / 2;
@@ -514,6 +528,48 @@ TEST(YUVConvertTest, DownScaleYUVToRGB32WithRect) {
}
#if !defined(ARCH_CPU_ARM_FAMILY) && !defined(ARCH_CPU_MIPS_FAMILY)
+TEST(YUVConvertTest, YUVAtoARGB_MMX_MatchReference) {
+ // Allocate all surfaces.
+ scoped_ptr<uint8[]> yuv_bytes;
+ scoped_ptr<uint8[]> rgb_bytes(new uint8[kRGBSize]);
+ scoped_ptr<uint8[]> rgb_converted_bytes(new uint8[kRGBSizeConverted]);
+ scoped_ptr<uint8[]> rgb_converted_bytes_ref(new uint8[kRGBSizeConverted]);
+
+ // Read YUV reference data from file.
+ ReadYV12AData(&yuv_bytes);
+
+ // Convert a frame of YUV to 32 bit ARGB using both C and MMX versions.
+ media::ConvertYUVAToARGB_C(yuv_bytes.get(),
+ yuv_bytes.get() + kSourceUOffset,
+ yuv_bytes.get() + kSourceVOffset,
+ yuv_bytes.get() + kSourceAOffset,
+ rgb_converted_bytes_ref.get(),
+ kSourceWidth,
+ kSourceHeight,
+ kSourceWidth,
+ kSourceWidth / 2,
+ kSourceWidth,
+ kSourceWidth * kBpp,
+ media::YV12);
+ media::ConvertYUVAToARGB_MMX(yuv_bytes.get(),
+ yuv_bytes.get() + kSourceUOffset,
+ yuv_bytes.get() + kSourceVOffset,
+ yuv_bytes.get() + kSourceAOffset,
+ rgb_converted_bytes.get(),
+ kSourceWidth,
+ kSourceHeight,
+ kSourceWidth,
+ kSourceWidth / 2,
+ kSourceWidth,
+ kSourceWidth * kBpp,
+ media::YV12);
+
+ EXPECT_EQ(0,
+ memcmp(rgb_converted_bytes.get(),
+ rgb_converted_bytes_ref.get(),
+ kRGBSizeConverted));
+}
+
TEST(YUVConvertTest, RGB32ToYUV_SSE2_MatchReference) {
base::CPU cpu;
if (!cpu.has_sse2()) {
@@ -618,12 +674,14 @@ TEST(YUVConvertTest, ConvertYUVToRGB32Row_MMX) {
yuv_bytes.get() + kSourceUOffset,
yuv_bytes.get() + kSourceVOffset,
rgb_bytes_reference.get(),
- kWidth);
+ kWidth,
+ GetLookupTable(YV12));
ConvertYUVToRGB32Row_MMX(yuv_bytes.get(),
yuv_bytes.get() + kSourceUOffset,
yuv_bytes.get() + kSourceVOffset,
rgb_bytes_converted.get(),
- kWidth);
+ kWidth,
+ GetLookupTable(YV12));
media::EmptyRegisterState();
EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
rgb_bytes_converted.get(),
@@ -647,12 +705,14 @@ TEST(YUVConvertTest, ConvertYUVToRGB32Row_SSE) {
yuv_bytes.get() + kSourceUOffset,
yuv_bytes.get() + kSourceVOffset,
rgb_bytes_reference.get(),
- kWidth);
+ kWidth,
+ GetLookupTable(YV12));
ConvertYUVToRGB32Row_SSE(yuv_bytes.get(),
yuv_bytes.get() + kSourceUOffset,
yuv_bytes.get() + kSourceVOffset,
rgb_bytes_converted.get(),
- kWidth);
+ kWidth,
+ GetLookupTable(YV12));
media::EmptyRegisterState();
EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
rgb_bytes_converted.get(),
@@ -678,13 +738,15 @@ TEST(YUVConvertTest, ScaleYUVToRGB32Row_MMX) {
yuv_bytes.get() + kSourceVOffset,
rgb_bytes_reference.get(),
kWidth,
- kSourceDx);
+ kSourceDx,
+ GetLookupTable(YV12));
ScaleYUVToRGB32Row_MMX(yuv_bytes.get(),
yuv_bytes.get() + kSourceUOffset,
yuv_bytes.get() + kSourceVOffset,
rgb_bytes_converted.get(),
kWidth,
- kSourceDx);
+ kSourceDx,
+ GetLookupTable(YV12));
media::EmptyRegisterState();
EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
rgb_bytes_converted.get(),
@@ -710,13 +772,15 @@ TEST(YUVConvertTest, ScaleYUVToRGB32Row_SSE) {
yuv_bytes.get() + kSourceVOffset,
rgb_bytes_reference.get(),
kWidth,
- kSourceDx);
+ kSourceDx,
+ GetLookupTable(YV12));
ScaleYUVToRGB32Row_SSE(yuv_bytes.get(),
yuv_bytes.get() + kSourceUOffset,
yuv_bytes.get() + kSourceVOffset,
rgb_bytes_converted.get(),
kWidth,
- kSourceDx);
+ kSourceDx,
+ GetLookupTable(YV12));
media::EmptyRegisterState();
EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
rgb_bytes_converted.get(),
@@ -742,13 +806,15 @@ TEST(YUVConvertTest, LinearScaleYUVToRGB32Row_MMX) {
yuv_bytes.get() + kSourceVOffset,
rgb_bytes_reference.get(),
kWidth,
- kSourceDx);
+ kSourceDx,
+ GetLookupTable(YV12));
LinearScaleYUVToRGB32Row_MMX(yuv_bytes.get(),
yuv_bytes.get() + kSourceUOffset,
yuv_bytes.get() + kSourceVOffset,
rgb_bytes_converted.get(),
kWidth,
- kSourceDx);
+ kSourceDx,
+ GetLookupTable(YV12));
media::EmptyRegisterState();
EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
rgb_bytes_converted.get(),
@@ -774,13 +840,15 @@ TEST(YUVConvertTest, LinearScaleYUVToRGB32Row_SSE) {
yuv_bytes.get() + kSourceVOffset,
rgb_bytes_reference.get(),
kWidth,
- kSourceDx);
+ kSourceDx,
+ GetLookupTable(YV12));
LinearScaleYUVToRGB32Row_SSE(yuv_bytes.get(),
yuv_bytes.get() + kSourceUOffset,
yuv_bytes.get() + kSourceVOffset,
rgb_bytes_converted.get(),
kWidth,
- kSourceDx);
+ kSourceDx,
+ GetLookupTable(YV12));
media::EmptyRegisterState();
EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
rgb_bytes_converted.get(),
@@ -924,13 +992,15 @@ TEST(YUVConvertTest, ScaleYUVToRGB32Row_SSE2_X64) {
yuv_bytes.get() + kSourceVOffset,
rgb_bytes_reference.get(),
kWidth,
- kSourceDx);
+ kSourceDx,
+ GetLookupTable(YV12));
ScaleYUVToRGB32Row_SSE2_X64(yuv_bytes.get(),
yuv_bytes.get() + kSourceUOffset,
yuv_bytes.get() + kSourceVOffset,
rgb_bytes_converted.get(),
kWidth,
- kSourceDx);
+ kSourceDx,
+ GetLookupTable(YV12));
media::EmptyRegisterState();
EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
rgb_bytes_converted.get(),
@@ -950,13 +1020,15 @@ TEST(YUVConvertTest, LinearScaleYUVToRGB32Row_MMX_X64) {
yuv_bytes.get() + kSourceVOffset,
rgb_bytes_reference.get(),
kWidth,
- kSourceDx);
+ kSourceDx,
+ GetLookupTable(YV12));
LinearScaleYUVToRGB32Row_MMX_X64(yuv_bytes.get(),
yuv_bytes.get() + kSourceUOffset,
yuv_bytes.get() + kSourceVOffset,
rgb_bytes_converted.get(),
kWidth,
- kSourceDx);
+ kSourceDx,
+ GetLookupTable(YV12));
media::EmptyRegisterState();
EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
rgb_bytes_converted.get(),
diff --git a/chromium/media/cast/DEPS b/chromium/media/cast/DEPS
index f84b3fbbf5e..abee2864b31 100644
--- a/chromium/media/cast/DEPS
+++ b/chromium/media/cast/DEPS
@@ -2,7 +2,7 @@ include_rules = [
"+crypto",
"+media",
"+net",
- "+third_party/webrtc",
"+third_party/libyuv",
+ "+third_party/zlib",
"+ui/gfx",
]
diff --git a/chromium/media/cast/OWNERS b/chromium/media/cast/OWNERS
index 49f41be49c0..f8e61c33ccc 100644
--- a/chromium/media/cast/OWNERS
+++ b/chromium/media/cast/OWNERS
@@ -1,4 +1,5 @@
hclam@chromium.org
hubbe@chromium.org
mikhal@chromium.org
+miu@chromium.org
pwestin@google.com
diff --git a/chromium/media/cast/README b/chromium/media/cast/README
index eca4cf6a1f9..0930c1e012d 100644
--- a/chromium/media/cast/README
+++ b/chromium/media/cast/README
@@ -16,7 +16,7 @@ cast/audio_sender/
cast/congestion_control/
Bandwidth estimation and network congestion handling.
-cast/net/pacing/
+cast/transport/pacing/
Module for rate limiting data outflow.
cast/rtcp/
@@ -28,7 +28,7 @@ cast/rtp_common/
cast/rtp_receiver/
Module for reciving RTP messages.
-cast/net/rtp_sender/
+cast/transport/rtp_sender/
Module for sending RTP messages.
cast/test/
@@ -56,9 +56,6 @@ third_party/libvpx
third_party/opus
Provides audio encoder.
-third_party/webrtc
- Provides audio signal processing.
-
OWNERS
See OWNERS for ownership.
diff --git a/chromium/media/cast/audio_receiver/audio_decoder.cc b/chromium/media/cast/audio_receiver/audio_decoder.cc
deleted file mode 100644
index a761a5a84de..00000000000
--- a/chromium/media/cast/audio_receiver/audio_decoder.cc
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/logging.h"
-#include "media/cast/audio_receiver/audio_decoder.h"
-
-#include "third_party/webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
-#include "third_party/webrtc/modules/interface/module_common_types.h"
-
-namespace media {
-namespace cast {
-
-AudioDecoder::AudioDecoder(scoped_refptr<CastEnvironment> cast_environment,
- const AudioReceiverConfig& audio_config,
- RtpPayloadFeedback* incoming_payload_feedback)
- : cast_environment_(cast_environment),
- audio_decoder_(webrtc::AudioCodingModule::Create(0)),
- cast_message_builder_(cast_environment->Clock(),
- incoming_payload_feedback, &frame_id_map_, audio_config.incoming_ssrc,
- true, 0),
- have_received_packets_(false),
- last_played_out_timestamp_(0) {
- audio_decoder_->InitializeReceiver();
-
- webrtc::CodecInst receive_codec;
- switch (audio_config.codec) {
- case kPcm16:
- receive_codec.pltype = audio_config.rtp_payload_type;
- strncpy(receive_codec.plname, "L16", 4);
- receive_codec.plfreq = audio_config.frequency;
- receive_codec.pacsize = -1;
- receive_codec.channels = audio_config.channels;
- receive_codec.rate = -1;
- break;
- case kOpus:
- receive_codec.pltype = audio_config.rtp_payload_type;
- strncpy(receive_codec.plname, "opus", 5);
- receive_codec.plfreq = audio_config.frequency;
- receive_codec.pacsize = -1;
- receive_codec.channels = audio_config.channels;
- receive_codec.rate = -1;
- break;
- case kExternalAudio:
- NOTREACHED() << "Codec must be specified for audio decoder";
- break;
- }
- if (audio_decoder_->RegisterReceiveCodec(receive_codec) != 0) {
- NOTREACHED() << "Failed to register receive codec";
- }
-
- audio_decoder_->SetMaximumPlayoutDelay(audio_config.rtp_max_delay_ms);
- audio_decoder_->SetPlayoutMode(webrtc::streaming);
-}
-
-AudioDecoder::~AudioDecoder() {}
-
-bool AudioDecoder::GetRawAudioFrame(int number_of_10ms_blocks,
- int desired_frequency,
- PcmAudioFrame* audio_frame,
- uint32* rtp_timestamp) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::AUDIO_DECODER));
- // We don't care about the race case where a packet arrives at the same time
- // as this function in called. The data will be there the next time this
- // function is called.
- lock_.Acquire();
- // Get a local copy under lock.
- bool have_received_packets = have_received_packets_;
- lock_.Release();
-
- if (!have_received_packets) return false;
-
- audio_frame->samples.clear();
-
- for (int i = 0; i < number_of_10ms_blocks; ++i) {
- webrtc::AudioFrame webrtc_audio_frame;
- if (0 != audio_decoder_->PlayoutData10Ms(desired_frequency,
- &webrtc_audio_frame)) {
- return false;
- }
- if (webrtc_audio_frame.speech_type_ == webrtc::AudioFrame::kPLCCNG ||
- webrtc_audio_frame.speech_type_ == webrtc::AudioFrame::kUndefined) {
- // We are only interested in real decoded audio.
- return false;
- }
- audio_frame->frequency = webrtc_audio_frame.sample_rate_hz_;
- audio_frame->channels = webrtc_audio_frame.num_channels_;
-
- if (i == 0) {
- // Use the timestamp from the first 10ms block.
- if (0 != audio_decoder_->PlayoutTimestamp(rtp_timestamp)) {
- return false;
- }
- lock_.Acquire();
- last_played_out_timestamp_ = *rtp_timestamp;
- lock_.Release();
- }
- int samples_per_10ms = webrtc_audio_frame.samples_per_channel_;
-
- audio_frame->samples.insert(
- audio_frame->samples.end(),
- &webrtc_audio_frame.data_[0],
- &webrtc_audio_frame.data_[samples_per_10ms * audio_frame->channels]);
- }
- return true;
-}
-
-void AudioDecoder::IncomingParsedRtpPacket(const uint8* payload_data,
- size_t payload_size,
- const RtpCastHeader& rtp_header) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- DCHECK_LE(payload_size, kIpPacketSize);
- audio_decoder_->IncomingPacket(payload_data, static_cast<int32>(payload_size),
- rtp_header.webrtc);
- lock_.Acquire();
- have_received_packets_ = true;
- uint32 last_played_out_timestamp = last_played_out_timestamp_;
- lock_.Release();
-
- bool complete = false;
- if (!frame_id_map_.InsertPacket(rtp_header, &complete)) return;
- if (!complete) return;
-
- cast_message_builder_.CompleteFrameReceived(rtp_header.frame_id,
- rtp_header.is_key_frame);
-
- frame_id_rtp_timestamp_map_[rtp_header.frame_id] =
- rtp_header.webrtc.header.timestamp;
-
- if (last_played_out_timestamp == 0) return; // Nothing is played out yet.
-
- uint32 latest_frame_id_to_remove = 0;
- bool frame_to_remove = false;
-
- FrameIdRtpTimestampMap::iterator it = frame_id_rtp_timestamp_map_.begin();
- while (it != frame_id_rtp_timestamp_map_.end()) {
- if (IsNewerRtpTimestamp(it->second, last_played_out_timestamp)) {
- break;
- }
- frame_to_remove = true;
- latest_frame_id_to_remove = it->first;
- frame_id_rtp_timestamp_map_.erase(it);
- it = frame_id_rtp_timestamp_map_.begin();
- }
- if (!frame_to_remove) return;
-
- frame_id_map_.RemoveOldFrames(latest_frame_id_to_remove);
-}
-
-bool AudioDecoder::TimeToSendNextCastMessage(base::TimeTicks* time_to_send) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- return cast_message_builder_.TimeToSendNextCastMessage(time_to_send);
-}
-
-void AudioDecoder::SendCastMessage() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- cast_message_builder_.UpdateCastMessage();
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/audio_receiver/audio_decoder.h b/chromium/media/cast/audio_receiver/audio_decoder.h
deleted file mode 100644
index 8a77d79d070..00000000000
--- a/chromium/media/cast/audio_receiver/audio_decoder.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_AUDIO_RECEIVER_AUDIO_DECODER_H_
-#define MEDIA_CAST_AUDIO_RECEIVER_AUDIO_DECODER_H_
-
-#include "base/callback.h"
-#include "base/synchronization/lock.h"
-#include "media/cast/cast_config.h"
-#include "media/cast/cast_environment.h"
-#include "media/cast/framer/cast_message_builder.h"
-#include "media/cast/framer/frame_id_map.h"
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
-
-namespace webrtc {
-class AudioCodingModule;
-}
-
-namespace media {
-namespace cast {
-
-typedef std::map<uint32, uint32> FrameIdRtpTimestampMap;
-
-// Thread safe class.
-class AudioDecoder {
- public:
- AudioDecoder(scoped_refptr<CastEnvironment> cast_environment,
- const AudioReceiverConfig& audio_config,
- RtpPayloadFeedback* incoming_payload_feedback);
- virtual ~AudioDecoder();
-
- // Extract a raw audio frame from the decoder.
- // Set the number of desired 10ms blocks and frequency.
- // Should be called from the cast audio decoder thread; however that is not
- // required.
- bool GetRawAudioFrame(int number_of_10ms_blocks,
- int desired_frequency,
- PcmAudioFrame* audio_frame,
- uint32* rtp_timestamp);
-
- // Insert an RTP packet to the decoder.
- // Should be called from the main cast thread; however that is not required.
- void IncomingParsedRtpPacket(const uint8* payload_data,
- size_t payload_size,
- const RtpCastHeader& rtp_header);
-
- bool TimeToSendNextCastMessage(base::TimeTicks* time_to_send);
- void SendCastMessage();
-
- private:
- scoped_refptr<CastEnvironment> cast_environment_;
-
- // The webrtc AudioCodingModule is threadsafe.
- scoped_ptr<webrtc::AudioCodingModule> audio_decoder_;
-
- FrameIdMap frame_id_map_;
- CastMessageBuilder cast_message_builder_;
-
- base::Lock lock_;
- bool have_received_packets_;
- FrameIdRtpTimestampMap frame_id_rtp_timestamp_map_;
- uint32 last_played_out_timestamp_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioDecoder);
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_AUDIO_RECEIVER_AUDIO_DECODER_H_
diff --git a/chromium/media/cast/audio_receiver/audio_decoder_unittest.cc b/chromium/media/cast/audio_receiver/audio_decoder_unittest.cc
deleted file mode 100644
index 04df4728bd9..00000000000
--- a/chromium/media/cast/audio_receiver/audio_decoder_unittest.cc
+++ /dev/null
@@ -1,220 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/test/simple_test_tick_clock.h"
-#include "media/cast/audio_receiver/audio_decoder.h"
-#include "media/cast/cast_environment.h"
-#include "media/cast/test/fake_task_runner.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-namespace cast {
-
-namespace {
-class TestRtpPayloadFeedback : public RtpPayloadFeedback {
- public:
- TestRtpPayloadFeedback() {}
- virtual ~TestRtpPayloadFeedback() {}
-
- virtual void CastFeedback(const RtcpCastMessage& cast_feedback) OVERRIDE {
- EXPECT_EQ(1u, cast_feedback.ack_frame_id_);
- EXPECT_EQ(0u, cast_feedback.missing_frames_and_packets_.size());
- }
-};
-} // namespace.
-
-class AudioDecoderTest : public ::testing::Test {
- protected:
- AudioDecoderTest() {
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(1234));
- task_runner_ = new test::FakeTaskRunner(&testing_clock_);
- cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
- task_runner_, task_runner_, task_runner_, task_runner_,
- GetDefaultCastLoggingConfig());
- }
- virtual ~AudioDecoderTest() {}
-
- void Configure(const AudioReceiverConfig& audio_config) {
- audio_decoder_.reset(
- new AudioDecoder(cast_environment_, audio_config, &cast_feedback_));
- }
-
- TestRtpPayloadFeedback cast_feedback_;
- base::SimpleTestTickClock testing_clock_;
- scoped_refptr<test::FakeTaskRunner> task_runner_;
- scoped_refptr<CastEnvironment> cast_environment_;
- scoped_ptr<AudioDecoder> audio_decoder_;
-};
-
-TEST_F(AudioDecoderTest, Pcm16MonoNoResampleOnePacket) {
- AudioReceiverConfig audio_config;
- audio_config.rtp_payload_type = 127;
- audio_config.frequency = 16000;
- audio_config.channels = 1;
- audio_config.codec = kPcm16;
- audio_config.use_external_decoder = false;
- Configure(audio_config);
-
- RtpCastHeader rtp_header;
- rtp_header.webrtc.header.payloadType = 127;
- rtp_header.webrtc.header.sequenceNumber = 1234;
- rtp_header.webrtc.header.timestamp = 0x87654321;
- rtp_header.webrtc.header.ssrc = 0x12345678;
- rtp_header.webrtc.header.paddingLength = 0;
- rtp_header.webrtc.header.headerLength = 12;
- rtp_header.webrtc.type.Audio.channel = 1;
- rtp_header.webrtc.type.Audio.isCNG = false;
-
- std::vector<int16> payload(640, 0x1234);
- int number_of_10ms_blocks = 4;
- int desired_frequency = 16000;
- PcmAudioFrame audio_frame;
- uint32 rtp_timestamp;
-
- EXPECT_FALSE(audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
- desired_frequency,
- &audio_frame,
- &rtp_timestamp));
-
- uint8* payload_data = reinterpret_cast<uint8*>(&payload[0]);
- size_t payload_size = payload.size() * sizeof(int16);
-
- audio_decoder_->IncomingParsedRtpPacket(payload_data,
- payload_size, rtp_header);
-
- EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
- desired_frequency,
- &audio_frame,
- &rtp_timestamp));
- EXPECT_EQ(1, audio_frame.channels);
- EXPECT_EQ(16000, audio_frame.frequency);
- EXPECT_EQ(640ul, audio_frame.samples.size());
- // First 10 samples per channel are 0 from NetEq.
- for (size_t i = 10; i < audio_frame.samples.size(); ++i) {
- EXPECT_EQ(0x3412, audio_frame.samples[i]);
- }
-}
-
-TEST_F(AudioDecoderTest, Pcm16StereoNoResampleTwoPackets) {
- AudioReceiverConfig audio_config;
- audio_config.rtp_payload_type = 127;
- audio_config.frequency = 16000;
- audio_config.channels = 2;
- audio_config.codec = kPcm16;
- audio_config.use_external_decoder = false;
- Configure(audio_config);
-
- RtpCastHeader rtp_header;
- rtp_header.frame_id = 0;
- rtp_header.webrtc.header.payloadType = 127;
- rtp_header.webrtc.header.sequenceNumber = 1234;
- rtp_header.webrtc.header.timestamp = 0x87654321;
- rtp_header.webrtc.header.ssrc = 0x12345678;
- rtp_header.webrtc.header.paddingLength = 0;
- rtp_header.webrtc.header.headerLength = 12;
-
- rtp_header.webrtc.type.Audio.isCNG = false;
- rtp_header.webrtc.type.Audio.channel = 2;
-
- std::vector<int16> payload(640, 0x1234);
-
- uint8* payload_data = reinterpret_cast<uint8*>(&payload[0]);
- size_t payload_size = payload.size() * sizeof(int16);
-
- audio_decoder_->IncomingParsedRtpPacket(payload_data,
- payload_size, rtp_header);
-
- int number_of_10ms_blocks = 2;
- int desired_frequency = 16000;
- PcmAudioFrame audio_frame;
- uint32 rtp_timestamp;
-
- EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
- desired_frequency,
- &audio_frame,
- &rtp_timestamp));
- EXPECT_EQ(2, audio_frame.channels);
- EXPECT_EQ(16000, audio_frame.frequency);
- EXPECT_EQ(640ul, audio_frame.samples.size());
- // First 10 samples per channel are 0 from NetEq.
- for (size_t i = 10 * audio_config.channels; i < audio_frame.samples.size();
- ++i) {
- EXPECT_EQ(0x3412, audio_frame.samples[i]);
- }
-
- rtp_header.frame_id++;
- rtp_header.webrtc.header.sequenceNumber++;
- rtp_header.webrtc.header.timestamp += (audio_config.frequency / 100) * 2 * 2;
-
- audio_decoder_->IncomingParsedRtpPacket(payload_data,
- payload_size, rtp_header);
-
- EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
- desired_frequency,
- &audio_frame,
- &rtp_timestamp));
- EXPECT_EQ(2, audio_frame.channels);
- EXPECT_EQ(16000, audio_frame.frequency);
- EXPECT_EQ(640ul, audio_frame.samples.size());
- for (size_t i = 0; i < audio_frame.samples.size(); ++i) {
- EXPECT_NEAR(0x3412, audio_frame.samples[i], 1000);
- }
- // Test cast callback.
- audio_decoder_->SendCastMessage();
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(33));
- audio_decoder_->SendCastMessage();
-}
-
-TEST_F(AudioDecoderTest, Pcm16Resample) {
- AudioReceiverConfig audio_config;
- audio_config.rtp_payload_type = 127;
- audio_config.frequency = 16000;
- audio_config.channels = 2;
- audio_config.codec = kPcm16;
- audio_config.use_external_decoder = false;
- Configure(audio_config);
-
- RtpCastHeader rtp_header;
- rtp_header.webrtc.header.payloadType = 127;
- rtp_header.webrtc.header.sequenceNumber = 1234;
- rtp_header.webrtc.header.timestamp = 0x87654321;
- rtp_header.webrtc.header.ssrc = 0x12345678;
- rtp_header.webrtc.header.paddingLength = 0;
- rtp_header.webrtc.header.headerLength = 12;
-
- rtp_header.webrtc.type.Audio.isCNG = false;
- rtp_header.webrtc.type.Audio.channel = 2;
-
- std::vector<int16> payload(640, 0x1234);
-
- uint8* payload_data = reinterpret_cast<uint8*>(&payload[0]);
- size_t payload_size = payload.size() * sizeof(int16);
-
- audio_decoder_->IncomingParsedRtpPacket(payload_data,
- payload_size, rtp_header);
-
- int number_of_10ms_blocks = 2;
- int desired_frequency = 48000;
- PcmAudioFrame audio_frame;
- uint32 rtp_timestamp;
-
- EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
- desired_frequency,
- &audio_frame,
- &rtp_timestamp));
-
- EXPECT_EQ(2, audio_frame.channels);
- EXPECT_EQ(48000, audio_frame.frequency);
- EXPECT_EQ(1920ul, audio_frame.samples.size()); // Upsampled to 48 KHz.
- int count = 0;
- // Resampling makes the variance worse.
- for (size_t i = 100 * audio_config.channels; i < audio_frame.samples.size();
- ++i) {
- EXPECT_NEAR(0x3412, audio_frame.samples[i], 400);
- if (0x3412 == audio_frame.samples[i]) count++;
- }
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/audio_receiver/audio_receiver.cc b/chromium/media/cast/audio_receiver/audio_receiver.cc
deleted file mode 100644
index 5aad22f628c..00000000000
--- a/chromium/media/cast/audio_receiver/audio_receiver.cc
+++ /dev/null
@@ -1,490 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/audio_receiver/audio_receiver.h"
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
-#include "crypto/encryptor.h"
-#include "crypto/symmetric_key.h"
-#include "media/cast/audio_receiver/audio_decoder.h"
-#include "media/cast/framer/framer.h"
-#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtp_receiver/rtp_receiver.h"
-
-// Max time we wait until an audio frame is due to be played out is released.
-static const int64 kMaxAudioFrameWaitMs = 20;
-static const int64 kMinSchedulingDelayMs = 1;
-
-namespace media {
-namespace cast {
-
-DecodedAudioCallbackData::DecodedAudioCallbackData()
- : number_of_10ms_blocks(0),
- desired_frequency(0),
- callback() {}
-
-DecodedAudioCallbackData::~DecodedAudioCallbackData() {}
-
-// Local implementation of RtpData (defined in rtp_rtcp_defines.h).
-// Used to pass payload data into the audio receiver.
-class LocalRtpAudioData : public RtpData {
- public:
- explicit LocalRtpAudioData(AudioReceiver* audio_receiver)
- : audio_receiver_(audio_receiver) {}
-
- virtual void OnReceivedPayloadData(
- const uint8* payload_data,
- size_t payload_size,
- const RtpCastHeader* rtp_header) OVERRIDE {
- audio_receiver_->IncomingParsedRtpPacket(payload_data, payload_size,
- *rtp_header);
- }
-
- private:
- AudioReceiver* audio_receiver_;
-};
-
-// Local implementation of RtpPayloadFeedback (defined in rtp_defines.h)
-// Used to convey cast-specific feedback from receiver to sender.
-class LocalRtpAudioFeedback : public RtpPayloadFeedback {
- public:
- explicit LocalRtpAudioFeedback(AudioReceiver* audio_receiver)
- : audio_receiver_(audio_receiver) {
- }
-
- virtual void CastFeedback(const RtcpCastMessage& cast_message) OVERRIDE {
- audio_receiver_->CastFeedback(cast_message);
- }
-
- private:
- AudioReceiver* audio_receiver_;
-};
-
-class LocalRtpReceiverStatistics : public RtpReceiverStatistics {
- public:
- explicit LocalRtpReceiverStatistics(RtpReceiver* rtp_receiver)
- : rtp_receiver_(rtp_receiver) {
- }
-
- virtual void GetStatistics(uint8* fraction_lost,
- uint32* cumulative_lost, // 24 bits valid.
- uint32* extended_high_sequence_number,
- uint32* jitter) OVERRIDE {
- rtp_receiver_->GetStatistics(fraction_lost,
- cumulative_lost,
- extended_high_sequence_number,
- jitter);
- }
-
- private:
- RtpReceiver* rtp_receiver_;
-};
-
-AudioReceiver::AudioReceiver(scoped_refptr<CastEnvironment> cast_environment,
- const AudioReceiverConfig& audio_config,
- PacedPacketSender* const packet_sender)
- : cast_environment_(cast_environment),
- codec_(audio_config.codec),
- frequency_(audio_config.frequency),
- audio_buffer_(),
- audio_decoder_(),
- time_offset_(),
- weak_factory_(this) {
- target_delay_delta_ =
- base::TimeDelta::FromMilliseconds(audio_config.rtp_max_delay_ms);
- incoming_payload_callback_.reset(new LocalRtpAudioData(this));
- incoming_payload_feedback_.reset(new LocalRtpAudioFeedback(this));
- if (audio_config.use_external_decoder) {
- audio_buffer_.reset(new Framer(cast_environment->Clock(),
- incoming_payload_feedback_.get(),
- audio_config.incoming_ssrc,
- true,
- 0));
- } else {
- audio_decoder_.reset(new AudioDecoder(cast_environment,
- audio_config,
- incoming_payload_feedback_.get()));
- }
- if (audio_config.aes_iv_mask.size() == kAesKeySize &&
- audio_config.aes_key.size() == kAesKeySize) {
- iv_mask_ = audio_config.aes_iv_mask;
- crypto::SymmetricKey* key = crypto::SymmetricKey::Import(
- crypto::SymmetricKey::AES, audio_config.aes_key);
- decryptor_.reset(new crypto::Encryptor());
- decryptor_->Init(key, crypto::Encryptor::CTR, std::string());
- } else if (audio_config.aes_iv_mask.size() != 0 ||
- audio_config.aes_key.size() != 0) {
- DCHECK(false) << "Invalid crypto configuration";
- }
-
- rtp_receiver_.reset(new RtpReceiver(cast_environment->Clock(),
- &audio_config,
- NULL,
- incoming_payload_callback_.get()));
- rtp_audio_receiver_statistics_.reset(
- new LocalRtpReceiverStatistics(rtp_receiver_.get()));
- base::TimeDelta rtcp_interval_delta =
- base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval);
- rtcp_.reset(new Rtcp(cast_environment,
- NULL,
- packet_sender,
- NULL,
- rtp_audio_receiver_statistics_.get(),
- audio_config.rtcp_mode,
- rtcp_interval_delta,
- audio_config.feedback_ssrc,
- audio_config.incoming_ssrc,
- audio_config.rtcp_c_name));
-}
-
-AudioReceiver::~AudioReceiver() {}
-
-void AudioReceiver::InitializeTimers() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- ScheduleNextRtcpReport();
- ScheduleNextCastMessage();
-}
-
-void AudioReceiver::IncomingParsedRtpPacket(const uint8* payload_data,
- size_t payload_size,
- const RtpCastHeader& rtp_header) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- cast_environment_->Logging()->InsertPacketEvent(kPacketReceived,
- rtp_header.webrtc.header.timestamp, rtp_header.frame_id,
- rtp_header.packet_id, rtp_header.max_packet_id, payload_size);
-
- // TODO(pwestin): update this as video to refresh over time.
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- if (time_first_incoming_packet_.is_null()) {
- InitializeTimers();
- first_incoming_rtp_timestamp_ = rtp_header.webrtc.header.timestamp;
- time_first_incoming_packet_ = cast_environment_->Clock()->NowTicks();
- }
-
- if (audio_decoder_) {
- DCHECK(!audio_buffer_) << "Invalid internal state";
- std::string plaintext(reinterpret_cast<const char*>(payload_data),
- payload_size);
- if (decryptor_) {
- plaintext.clear();
- if (!decryptor_->SetCounter(GetAesNonce(rtp_header.frame_id, iv_mask_))) {
- NOTREACHED() << "Failed to set counter";
- return;
- }
- if (!decryptor_->Decrypt(base::StringPiece(reinterpret_cast<const char*>(
- payload_data), payload_size), &plaintext)) {
- VLOG(0) << "Decryption error";
- return;
- }
- }
- audio_decoder_->IncomingParsedRtpPacket(
- reinterpret_cast<const uint8*>(plaintext.data()), plaintext.size(),
- rtp_header);
- if (!queued_decoded_callbacks_.empty()) {
- DecodedAudioCallbackData decoded_data = queued_decoded_callbacks_.front();
- queued_decoded_callbacks_.pop_front();
- cast_environment_->PostTask(CastEnvironment::AUDIO_DECODER, FROM_HERE,
- base::Bind(&AudioReceiver::DecodeAudioFrameThread,
- base::Unretained(this),
- decoded_data.number_of_10ms_blocks,
- decoded_data.desired_frequency,
- decoded_data.callback));
- }
- return;
- }
-
- DCHECK(audio_buffer_) << "Invalid internal state";
- DCHECK(!audio_decoder_) << "Invalid internal state";
-
- bool complete = audio_buffer_->InsertPacket(payload_data, payload_size,
- rtp_header);
- if (!complete) return; // Audio frame not complete; wait for more packets.
- if (queued_encoded_callbacks_.empty()) return;
- AudioFrameEncodedCallback callback = queued_encoded_callbacks_.front();
- queued_encoded_callbacks_.pop_front();
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&AudioReceiver::GetEncodedAudioFrame,
- weak_factory_.GetWeakPtr(), callback));
-}
-
-void AudioReceiver::GetRawAudioFrame(int number_of_10ms_blocks,
- int desired_frequency, const AudioFrameDecodedCallback& callback) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- DCHECK(audio_decoder_) << "Invalid function call in this configuration";
- // TODO(pwestin): we can skip this function by posting direct to the decoder.
- cast_environment_->PostTask(CastEnvironment::AUDIO_DECODER, FROM_HERE,
- base::Bind(&AudioReceiver::DecodeAudioFrameThread,
- base::Unretained(this),
- number_of_10ms_blocks,
- desired_frequency,
- callback));
-}
-
-void AudioReceiver::DecodeAudioFrameThread(
- int number_of_10ms_blocks,
- int desired_frequency,
- const AudioFrameDecodedCallback callback) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::AUDIO_DECODER));
- // TODO(mikhal): Allow the application to allocate this memory.
- scoped_ptr<PcmAudioFrame> audio_frame(new PcmAudioFrame());
-
- uint32 rtp_timestamp = 0;
- if (!audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
- desired_frequency,
- audio_frame.get(),
- &rtp_timestamp)) {
- DecodedAudioCallbackData callback_data;
- callback_data.number_of_10ms_blocks = number_of_10ms_blocks;
- callback_data.desired_frequency = desired_frequency;
- callback_data.callback = callback;
- queued_decoded_callbacks_.push_back(callback_data);
- return;
- }
- base::TimeTicks now = cast_environment_->Clock()->NowTicks();
-
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&AudioReceiver::ReturnDecodedFrameWithPlayoutDelay,
- base::Unretained(this), base::Passed(&audio_frame), rtp_timestamp,
- callback));
-}
-
-void AudioReceiver::ReturnDecodedFrameWithPlayoutDelay(
- scoped_ptr<PcmAudioFrame> audio_frame, uint32 rtp_timestamp,
- const AudioFrameDecodedCallback callback) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- cast_environment_->Logging()->InsertFrameEvent(kAudioFrameDecoded,
- rtp_timestamp, kFrameIdUnknown);
-
- base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- base::TimeTicks playout_time = GetPlayoutTime(now, rtp_timestamp);
-
- cast_environment_->Logging()->InsertFrameEventWithDelay(kAudioPlayoutDelay,
- rtp_timestamp, kFrameIdUnknown, playout_time - now);
-
- // Frame is ready - Send back to the caller.
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(callback, base::Passed(&audio_frame), playout_time));
-}
-
-void AudioReceiver::PlayoutTimeout() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- DCHECK(audio_buffer_) << "Invalid function call in this configuration";
- if (queued_encoded_callbacks_.empty()) {
- // Already released by incoming packet.
- return;
- }
- uint32 rtp_timestamp = 0;
- bool next_frame = false;
- scoped_ptr<EncodedAudioFrame> encoded_frame(new EncodedAudioFrame());
-
- if (!audio_buffer_->GetEncodedAudioFrame(encoded_frame.get(),
- &rtp_timestamp, &next_frame)) {
- // We have no audio frames. Wait for new packet(s).
- // Since the application can post multiple AudioFrameEncodedCallback and
- // we only check the next frame to play out we might have multiple timeout
- // events firing after each other; however this should be a rare event.
- VLOG(1) << "Failed to retrieved a complete frame at this point in time";
- return;
- }
-
- if (decryptor_ && !DecryptAudioFrame(&encoded_frame)) {
- // Logging already done.
- return;
- }
-
- if (PostEncodedAudioFrame(queued_encoded_callbacks_.front(), rtp_timestamp,
- next_frame, &encoded_frame)) {
- // Call succeed remove callback from list.
- queued_encoded_callbacks_.pop_front();
- }
-}
-
-void AudioReceiver::GetEncodedAudioFrame(
- const AudioFrameEncodedCallback& callback) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- DCHECK(audio_buffer_) << "Invalid function call in this configuration";
-
- uint32 rtp_timestamp = 0;
- bool next_frame = false;
- scoped_ptr<EncodedAudioFrame> encoded_frame(new EncodedAudioFrame());
-
- if (!audio_buffer_->GetEncodedAudioFrame(encoded_frame.get(),
- &rtp_timestamp, &next_frame)) {
- // We have no audio frames. Wait for new packet(s).
- VLOG(1) << "Wait for more audio packets in frame";
- queued_encoded_callbacks_.push_back(callback);
- return;
- }
- if (decryptor_ && !DecryptAudioFrame(&encoded_frame)) {
- // Logging already done.
- queued_encoded_callbacks_.push_back(callback);
- return;
- }
- if (!PostEncodedAudioFrame(callback, rtp_timestamp, next_frame,
- &encoded_frame)) {
- // We have an audio frame; however we are missing packets and we have time
- // to wait for new packet(s).
- queued_encoded_callbacks_.push_back(callback);
- }
-}
-
-bool AudioReceiver::PostEncodedAudioFrame(
- const AudioFrameEncodedCallback& callback,
- uint32 rtp_timestamp,
- bool next_frame,
- scoped_ptr<EncodedAudioFrame>* encoded_frame) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- DCHECK(audio_buffer_) << "Invalid function call in this configuration";
-
- base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- base::TimeTicks playout_time = GetPlayoutTime(now, rtp_timestamp);
- base::TimeDelta time_until_playout = playout_time - now;
- base::TimeDelta min_wait_delta =
- base::TimeDelta::FromMilliseconds(kMaxAudioFrameWaitMs);
-
- if (!next_frame && (time_until_playout > min_wait_delta)) {
- base::TimeDelta time_until_release = time_until_playout - min_wait_delta;
- cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&AudioReceiver::PlayoutTimeout, weak_factory_.GetWeakPtr()),
- time_until_release);
- VLOG(1) << "Wait until time to playout:"
- << time_until_release.InMilliseconds();
- return false;
- }
- (*encoded_frame)->codec = codec_;
- audio_buffer_->ReleaseFrame((*encoded_frame)->frame_id);
-
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(callback, base::Passed(encoded_frame), playout_time));
- return true;
-}
-
-void AudioReceiver::IncomingPacket(const uint8* packet, size_t length,
- const base::Closure callback) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- bool rtcp_packet = Rtcp::IsRtcpPacket(packet, length);
- if (!rtcp_packet) {
- rtp_receiver_->ReceivedPacket(packet, length);
- } else {
- rtcp_->IncomingRtcpPacket(packet, length);
- }
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
-}
-
-void AudioReceiver::CastFeedback(const RtcpCastMessage& cast_message) {
- // TODO(pwestin): add logging.
- rtcp_->SendRtcpFromRtpReceiver(&cast_message, NULL);
-}
-
-base::TimeTicks AudioReceiver::GetPlayoutTime(base::TimeTicks now,
- uint32 rtp_timestamp) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- // Senders time in ms when this frame was recorded.
- // Note: the senders clock and our local clock might not be synced.
- base::TimeTicks rtp_timestamp_in_ticks;
- if (time_offset_ == base::TimeDelta()) {
- if (rtcp_->RtpTimestampInSenderTime(frequency_,
- first_incoming_rtp_timestamp_,
- &rtp_timestamp_in_ticks)) {
- time_offset_ = time_first_incoming_packet_ - rtp_timestamp_in_ticks;
- } else {
- // We have not received any RTCP to sync the stream play it out as soon as
- // possible.
- uint32 rtp_timestamp_diff = rtp_timestamp - first_incoming_rtp_timestamp_;
-
- int frequency_khz = frequency_ / 1000;
- base::TimeDelta rtp_time_diff_delta =
- base::TimeDelta::FromMilliseconds(rtp_timestamp_diff / frequency_khz);
- base::TimeDelta time_diff_delta = now - time_first_incoming_packet_;
-
- return now + std::max(rtp_time_diff_delta - time_diff_delta,
- base::TimeDelta());
- }
- }
- // This can fail if we have not received any RTCP packets in a long time.
- return rtcp_->RtpTimestampInSenderTime(frequency_, rtp_timestamp,
- &rtp_timestamp_in_ticks) ?
- rtp_timestamp_in_ticks + time_offset_ + target_delay_delta_ :
- now;
-}
-
-bool AudioReceiver::DecryptAudioFrame(
- scoped_ptr<EncodedAudioFrame>* audio_frame) {
- DCHECK(decryptor_) << "Invalid state";
-
- if (!decryptor_->SetCounter(GetAesNonce((*audio_frame)->frame_id,
- iv_mask_))) {
- NOTREACHED() << "Failed to set counter";
- return false;
- }
- std::string decrypted_audio_data;
- if (!decryptor_->Decrypt((*audio_frame)->data, &decrypted_audio_data)) {
- VLOG(0) << "Decryption error";
- // Give up on this frame, release it from jitter buffer.
- audio_buffer_->ReleaseFrame((*audio_frame)->frame_id);
- return false;
- }
- (*audio_frame)->data.swap(decrypted_audio_data);
- return true;
-}
-
-void AudioReceiver::ScheduleNextRtcpReport() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- base::TimeDelta time_to_send = rtcp_->TimeToSendNextRtcpReport() -
- cast_environment_->Clock()->NowTicks();
-
- time_to_send = std::max(time_to_send,
- base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
-
- cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&AudioReceiver::SendNextRtcpReport,
- weak_factory_.GetWeakPtr()), time_to_send);
-}
-
-void AudioReceiver::SendNextRtcpReport() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- // TODO(pwestin): add logging.
- rtcp_->SendRtcpFromRtpReceiver(NULL, NULL);
- ScheduleNextRtcpReport();
-}
-
-// Cast messages should be sent within a maximum interval. Schedule a call
-// if not triggered elsewhere, e.g. by the cast message_builder.
-void AudioReceiver::ScheduleNextCastMessage() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- base::TimeTicks send_time;
- if (audio_buffer_) {
- audio_buffer_->TimeToSendNextCastMessage(&send_time);
- } else if (audio_decoder_) {
- audio_decoder_->TimeToSendNextCastMessage(&send_time);
- } else {
- NOTREACHED();
- }
- base::TimeDelta time_to_send = send_time -
- cast_environment_->Clock()->NowTicks();
- time_to_send = std::max(time_to_send,
- base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
- cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&AudioReceiver::SendNextCastMessage,
- weak_factory_.GetWeakPtr()), time_to_send);
-}
-
-void AudioReceiver::SendNextCastMessage() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
-
- if (audio_buffer_) {
- // Will only send a message if it is time.
- audio_buffer_->SendCastMessage();
- }
- if (audio_decoder_) {
- // Will only send a message if it is time.
- audio_decoder_->SendCastMessage();
- }
- ScheduleNextCastMessage();
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/audio_receiver/audio_receiver.gypi b/chromium/media/cast/audio_receiver/audio_receiver.gypi
deleted file mode 100644
index a851612f721..00000000000
--- a/chromium/media/cast/audio_receiver/audio_receiver.gypi
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'targets': [
- {
- 'target_name': 'cast_audio_receiver',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- '<(DEPTH)/third_party/',
- '<(DEPTH)/third_party/webrtc/',
- ],
- 'sources': [
- 'audio_decoder.h',
- 'audio_decoder.cc',
- 'audio_receiver.h',
- 'audio_receiver.cc',
- ], # source
- 'dependencies': [
- '<(DEPTH)/crypto/crypto.gyp:crypto',
- '<(DEPTH)/media/cast/rtcp/rtcp.gyp:cast_rtcp',
- '<(DEPTH)/media/cast/rtp_receiver/rtp_receiver.gyp:*',
- '<(DEPTH)/third_party/webrtc/webrtc.gyp:webrtc',
- ],
- },
- ],
-}
diff --git a/chromium/media/cast/audio_receiver/audio_receiver.h b/chromium/media/cast/audio_receiver/audio_receiver.h
deleted file mode 100644
index c49e1c15c25..00000000000
--- a/chromium/media/cast/audio_receiver/audio_receiver.h
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_AUDIO_RECEIVER_AUDIO_RECEIVER_H_
-#define MEDIA_CAST_AUDIO_RECEIVER_AUDIO_RECEIVER_H_
-
-#include "base/basictypes.h"
-#include "base/callback.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "base/threading/non_thread_safe.h"
-#include "base/time/tick_clock.h"
-#include "base/time/time.h"
-#include "media/cast/cast_config.h"
-#include "media/cast/cast_environment.h"
-#include "media/cast/cast_receiver.h"
-#include "media/cast/rtcp/rtcp.h" // RtcpCastMessage
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h" // RtpCastHeader
-
-namespace crypto {
- class Encryptor;
-}
-
-namespace media {
-namespace cast {
-
-class AudioDecoder;
-class Framer;
-class LocalRtpAudioData;
-class LocalRtpAudioFeedback;
-class PacedPacketSender;
-class RtpReceiver;
-class RtpReceiverStatistics;
-
-struct DecodedAudioCallbackData {
- DecodedAudioCallbackData();
- ~DecodedAudioCallbackData();
- int number_of_10ms_blocks;
- int desired_frequency;
- AudioFrameDecodedCallback callback;
-};
-
-// This class is not thread safe. Should only be called from the Main cast
-// thread.
-class AudioReceiver : public base::NonThreadSafe,
- public base::SupportsWeakPtr<AudioReceiver> {
- public:
- AudioReceiver(scoped_refptr<CastEnvironment> cast_environment,
- const AudioReceiverConfig& audio_config,
- PacedPacketSender* const packet_sender);
-
- virtual ~AudioReceiver();
-
- // Extract a raw audio frame from the cast receiver.
- // Actual decoding will be preformed on a designated audio_decoder thread.
- void GetRawAudioFrame(int number_of_10ms_blocks,
- int desired_frequency,
- const AudioFrameDecodedCallback& callback);
-
- // Extract an encoded audio frame from the cast receiver.
- void GetEncodedAudioFrame(const AudioFrameEncodedCallback& callback);
-
- // Should only be called from the main cast thread.
- void IncomingPacket(const uint8* packet, size_t length,
- const base::Closure callback);
-
- protected:
- void IncomingParsedRtpPacket(const uint8* payload_data,
- size_t payload_size,
- const RtpCastHeader& rtp_header);
- private:
- friend class LocalRtpAudioData;
- friend class LocalRtpAudioFeedback;
-
- void CastFeedback(const RtcpCastMessage& cast_message);
-
- // Time to pull out the audio even though we are missing data.
- void PlayoutTimeout();
-
- bool PostEncodedAudioFrame(const AudioFrameEncodedCallback& callback,
- uint32 rtp_timestamp,
- bool next_frame,
- scoped_ptr<EncodedAudioFrame>* encoded_frame);
-
- // Actual decoding implementation - should be called under the audio decoder
- // thread.
- void DecodeAudioFrameThread(int number_of_10ms_blocks,
- int desired_frequency,
- const AudioFrameDecodedCallback callback);
- void ReturnDecodedFrameWithPlayoutDelay(
- scoped_ptr<PcmAudioFrame> audio_frame, uint32 rtp_timestamp,
- const AudioFrameDecodedCallback callback);
-
- // Return the playout time based on the current time and rtp timestamp.
- base::TimeTicks GetPlayoutTime(base::TimeTicks now, uint32 rtp_timestamp);
-
- void InitializeTimers();
-
- // Decrypts the data within the |audio_frame| and replaces the data with the
- // decrypted string.
- bool DecryptAudioFrame(scoped_ptr<EncodedAudioFrame>* audio_frame);
-
- // Schedule the next RTCP report.
- void ScheduleNextRtcpReport();
-
- // Actually send the next RTCP report.
- void SendNextRtcpReport();
-
- // Schedule timing for the next cast message.
- void ScheduleNextCastMessage();
-
- // Actually send the next cast message.
- void SendNextCastMessage();
-
- scoped_refptr<CastEnvironment> cast_environment_;
- base::WeakPtrFactory<AudioReceiver> weak_factory_;
-
- const AudioCodec codec_;
- const int frequency_;
- base::TimeDelta target_delay_delta_;
- scoped_ptr<Framer> audio_buffer_;
- scoped_ptr<AudioDecoder> audio_decoder_;
- scoped_ptr<LocalRtpAudioData> incoming_payload_callback_;
- scoped_ptr<LocalRtpAudioFeedback> incoming_payload_feedback_;
- scoped_ptr<RtpReceiver> rtp_receiver_;
- scoped_ptr<Rtcp> rtcp_;
- scoped_ptr<RtpReceiverStatistics> rtp_audio_receiver_statistics_;
- base::TimeDelta time_offset_;
- base::TimeTicks time_first_incoming_packet_;
- uint32 first_incoming_rtp_timestamp_;
- scoped_ptr<crypto::Encryptor> decryptor_;
- std::string iv_mask_;
-
- std::list<AudioFrameEncodedCallback> queued_encoded_callbacks_;
- std::list<DecodedAudioCallbackData> queued_decoded_callbacks_;
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_AUDIO_RECEIVER_AUDIO_RECEIVER_H_
diff --git a/chromium/media/cast/audio_receiver/audio_receiver_unittest.cc b/chromium/media/cast/audio_receiver/audio_receiver_unittest.cc
deleted file mode 100644
index a10af679925..00000000000
--- a/chromium/media/cast/audio_receiver/audio_receiver_unittest.cc
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/bind.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/test/simple_test_tick_clock.h"
-#include "media/cast/audio_receiver/audio_receiver.h"
-#include "media/cast/cast_defines.h"
-#include "media/cast/cast_environment.h"
-#include "media/cast/net/pacing/mock_paced_packet_sender.h"
-#include "media/cast/rtcp/test_rtcp_packet_builder.h"
-#include "media/cast/test/fake_task_runner.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-namespace cast {
-
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
-
-namespace {
-class TestAudioEncoderCallback :
- public base::RefCountedThreadSafe<TestAudioEncoderCallback> {
- public:
- TestAudioEncoderCallback()
- : num_called_(0) {}
-
- void SetExpectedResult(uint8 expected_frame_id,
- const base::TimeTicks& expected_playout_time) {
- expected_frame_id_ = expected_frame_id;
- expected_playout_time_ = expected_playout_time;
- }
-
- void DeliverEncodedAudioFrame(scoped_ptr<EncodedAudioFrame> audio_frame,
- const base::TimeTicks& playout_time) {
- EXPECT_EQ(expected_frame_id_, audio_frame->frame_id);
- EXPECT_EQ(kPcm16, audio_frame->codec);
- EXPECT_EQ(expected_playout_time_, playout_time);
- num_called_++;
- }
-
- int number_times_called() const { return num_called_;}
-
- protected:
- virtual ~TestAudioEncoderCallback() {}
-
- private:
- friend class base::RefCountedThreadSafe<TestAudioEncoderCallback>;
-
- int num_called_;
- uint8 expected_frame_id_;
- base::TimeTicks expected_playout_time_;
-};
-} // namespace
-
-class PeerAudioReceiver : public AudioReceiver {
- public:
- PeerAudioReceiver(scoped_refptr<CastEnvironment> cast_environment,
- const AudioReceiverConfig& audio_config,
- PacedPacketSender* const packet_sender)
- : AudioReceiver(cast_environment, audio_config, packet_sender) {}
-
- using AudioReceiver::IncomingParsedRtpPacket;
-};
-
-class AudioReceiverTest : public ::testing::Test {
- protected:
- AudioReceiverTest() {
- // Configure the audio receiver to use PCM16.
- audio_config_.rtp_payload_type = 127;
- audio_config_.frequency = 16000;
- audio_config_.channels = 1;
- audio_config_.codec = kPcm16;
- audio_config_.use_external_decoder = false;
- audio_config_.feedback_ssrc = 1234;
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kStartMillisecond));
- task_runner_ = new test::FakeTaskRunner(&testing_clock_);
- cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
- task_runner_, task_runner_, task_runner_, task_runner_,
- GetDefaultCastLoggingConfig());
- test_audio_encoder_callback_ = new TestAudioEncoderCallback();
- }
-
- void Configure(bool use_external_decoder) {
- audio_config_.use_external_decoder = use_external_decoder;
- receiver_.reset(new PeerAudioReceiver(cast_environment_, audio_config_,
- &mock_transport_));
- }
-
- virtual ~AudioReceiverTest() {}
-
- static void DummyDeletePacket(const uint8* packet) {};
-
- virtual void SetUp() {
- payload_.assign(kIpPacketSize, 0);
- rtp_header_.is_key_frame = true;
- rtp_header_.frame_id = 0;
- rtp_header_.packet_id = 0;
- rtp_header_.max_packet_id = 0;
- rtp_header_.is_reference = false;
- rtp_header_.reference_frame_id = 0;
- rtp_header_.webrtc.header.timestamp = 0;
- }
-
- AudioReceiverConfig audio_config_;
- std::vector<uint8> payload_;
- RtpCastHeader rtp_header_;
- base::SimpleTestTickClock testing_clock_;
- MockPacedPacketSender mock_transport_;
- scoped_refptr<test::FakeTaskRunner> task_runner_;
- scoped_ptr<PeerAudioReceiver> receiver_;
- scoped_refptr<CastEnvironment> cast_environment_;
- scoped_refptr<TestAudioEncoderCallback> test_audio_encoder_callback_;
-};
-
-TEST_F(AudioReceiverTest, GetOnePacketEncodedframe) {
- Configure(true);
- EXPECT_CALL(mock_transport_, SendRtcpPacket(testing::_)).Times(1);
-
- receiver_->IncomingParsedRtpPacket(payload_.data(),
- payload_.size(), rtp_header_);
- EncodedAudioFrame audio_frame;
- base::TimeTicks playout_time;
- test_audio_encoder_callback_->SetExpectedResult(0, testing_clock_.NowTicks());
-
- AudioFrameEncodedCallback frame_encoded_callback =
- base::Bind(&TestAudioEncoderCallback::DeliverEncodedAudioFrame,
- test_audio_encoder_callback_.get());
-
- receiver_->GetEncodedAudioFrame(frame_encoded_callback);
- task_runner_->RunTasks();
- EXPECT_EQ(1, test_audio_encoder_callback_->number_times_called());
-}
-
-TEST_F(AudioReceiverTest, MultiplePendingGetCalls) {
- Configure(true);
- EXPECT_CALL(mock_transport_, SendRtcpPacket(testing::_)).WillRepeatedly(
- testing::Return(true));
-
- AudioFrameEncodedCallback frame_encoded_callback =
- base::Bind(&TestAudioEncoderCallback::DeliverEncodedAudioFrame,
- test_audio_encoder_callback_.get());
-
- receiver_->GetEncodedAudioFrame(frame_encoded_callback);
-
- receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
- rtp_header_);
-
- EncodedAudioFrame audio_frame;
- base::TimeTicks playout_time;
- test_audio_encoder_callback_->SetExpectedResult(0, testing_clock_.NowTicks());
-
- task_runner_->RunTasks();
- EXPECT_EQ(1, test_audio_encoder_callback_->number_times_called());
-
- TestRtcpPacketBuilder rtcp_packet;
-
- uint32 ntp_high;
- uint32 ntp_low;
- ConvertTimeTicksToNtp(testing_clock_.NowTicks(), &ntp_high, &ntp_low);
- rtcp_packet.AddSrWithNtp(audio_config_.feedback_ssrc, ntp_high, ntp_low,
- rtp_header_.webrtc.header.timestamp);
-
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(20));
-
- receiver_->IncomingPacket(rtcp_packet.Packet(), rtcp_packet.Length(),
- base::Bind(AudioReceiverTest::DummyDeletePacket, rtcp_packet.Packet()));
-
- // Make sure that we are not continuous and that the RTP timestamp represent a
- // time in the future.
- rtp_header_.is_key_frame = false;
- rtp_header_.frame_id = 2;
- rtp_header_.is_reference = true;
- rtp_header_.reference_frame_id = 0;
- rtp_header_.webrtc.header.timestamp = 960;
- test_audio_encoder_callback_->SetExpectedResult(2,
- testing_clock_.NowTicks() + base::TimeDelta::FromMilliseconds(100));
-
- receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
- rtp_header_);
- receiver_->GetEncodedAudioFrame(frame_encoded_callback);
- task_runner_->RunTasks();
-
- // Frame 2 should not come out at this point in time.
- EXPECT_EQ(1, test_audio_encoder_callback_->number_times_called());
-
- // Through on one more pending callback.
- receiver_->GetEncodedAudioFrame(frame_encoded_callback);
-
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(100));
-
- task_runner_->RunTasks();
- EXPECT_EQ(2, test_audio_encoder_callback_->number_times_called());
-
- test_audio_encoder_callback_->SetExpectedResult(3, testing_clock_.NowTicks());
-
- // Through on one more pending audio frame.
- rtp_header_.frame_id = 3;
- rtp_header_.is_reference = false;
- rtp_header_.reference_frame_id = 0;
- rtp_header_.webrtc.header.timestamp = 1280;
- receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
- rtp_header_);
-
- receiver_->GetEncodedAudioFrame(frame_encoded_callback);
- task_runner_->RunTasks();
- EXPECT_EQ(3, test_audio_encoder_callback_->number_times_called());
-}
-
-// TODO(mikhal): Add encoded frames.
-TEST_F(AudioReceiverTest, GetRawFrame) {
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/audio_sender/audio_encoder.cc b/chromium/media/cast/audio_sender/audio_encoder.cc
index a82d1de39a5..8860c7dd2d8 100644
--- a/chromium/media/cast/audio_sender/audio_encoder.cc
+++ b/chromium/media/cast/audio_sender/audio_encoder.cc
@@ -8,8 +8,8 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
-#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
+#include "base/location.h"
+#include "base/stl_util.h"
#include "base/sys_byteorder.h"
#include "base/time/time.h"
#include "media/base/audio_bus.h"
@@ -20,121 +20,198 @@
namespace media {
namespace cast {
-void LogAudioEncodedEvent(CastEnvironment* const cast_environment,
- const base::TimeTicks& recorded_time) {
- // TODO(mikhal): Resolve timestamp calculation for audio.
- cast_environment->Logging()->InsertFrameEvent(kAudioFrameEncoded,
- GetVideoRtpTimestamp(recorded_time), kFrameIdUnknown);
-}
+namespace {
+
+// The fixed number of audio frames per second and, inversely, the duration of
+// one frame's worth of samples.
+const int kFramesPerSecond = 100;
+const int kFrameDurationMillis = 1000 / kFramesPerSecond; // No remainder!
+
+// Threshold used to decide whether audio being delivered to the encoder is
+// coming in too slow with respect to the capture timestamps.
+const int kUnderrunThresholdMillis = 3 * kFrameDurationMillis;
+
+} // namespace
+
// Base class that handles the common problem of feeding one or more AudioBus'
-// data into a 10 ms buffer and then, once the buffer is full, encoding the
-// signal and emitting an EncodedAudioFrame via the FrameEncodedCallback.
+// data into a buffer and then, once the buffer is full, encoding the signal and
+// emitting an EncodedFrame via the FrameEncodedCallback.
//
// Subclasses complete the implementation by handling the actual encoding
// details.
-class AudioEncoder::ImplBase {
+class AudioEncoder::ImplBase
+ : public base::RefCountedThreadSafe<AudioEncoder::ImplBase> {
public:
- ImplBase(CastEnvironment* cast_environment,
- AudioCodec codec, int num_channels, int sampling_rate,
+ ImplBase(const scoped_refptr<CastEnvironment>& cast_environment,
+ transport::AudioCodec codec,
+ int num_channels,
+ int sampling_rate,
const FrameEncodedCallback& callback)
: cast_environment_(cast_environment),
- codec_(codec), num_channels_(num_channels),
- samples_per_10ms_(sampling_rate / 100),
+ codec_(codec),
+ num_channels_(num_channels),
+ samples_per_frame_(sampling_rate / kFramesPerSecond),
callback_(callback),
+ cast_initialization_status_(STATUS_AUDIO_UNINITIALIZED),
buffer_fill_end_(0),
- frame_id_(0) {
- CHECK_GT(num_channels_, 0);
- CHECK_GT(samples_per_10ms_, 0);
- CHECK_EQ(sampling_rate % 100, 0);
- CHECK_LE(samples_per_10ms_ * num_channels_,
- EncodedAudioFrame::kMaxNumberOfSamples);
+ frame_id_(0),
+ frame_rtp_timestamp_(0) {
+ // Support for max sampling rate of 48KHz, 2 channels, 100 ms duration.
+ const int kMaxSamplesTimesChannelsPerFrame = 48 * 2 * 100;
+ if (num_channels_ <= 0 || samples_per_frame_ <= 0 ||
+ sampling_rate % kFramesPerSecond != 0 ||
+ samples_per_frame_ * num_channels_ > kMaxSamplesTimesChannelsPerFrame) {
+ cast_initialization_status_ = STATUS_INVALID_AUDIO_CONFIGURATION;
+ }
}
- virtual ~ImplBase() {}
+ CastInitializationStatus InitializationResult() const {
+ return cast_initialization_status_;
+ }
+
+ void EncodeAudio(scoped_ptr<AudioBus> audio_bus,
+ const base::TimeTicks& recorded_time) {
+ DCHECK_EQ(cast_initialization_status_, STATUS_AUDIO_INITIALIZED);
+ DCHECK(!recorded_time.is_null());
+
+ // Determine whether |recorded_time| is consistent with the amount of audio
+ // data having been processed in the past. Resolve the underrun problem by
+ // dropping data from the internal buffer and skipping ahead the next
+ // frame's RTP timestamp by the estimated number of frames missed. On the
+ // other hand, don't attempt to resolve overruns: A receiver should
+ // gracefully deal with an excess of audio data.
+ const base::TimeDelta frame_duration =
+ base::TimeDelta::FromMilliseconds(kFrameDurationMillis);
+ base::TimeDelta buffer_fill_duration =
+ buffer_fill_end_ * frame_duration / samples_per_frame_;
+ if (!frame_capture_time_.is_null()) {
+ const base::TimeDelta amount_ahead_by =
+ recorded_time - (frame_capture_time_ + buffer_fill_duration);
+ if (amount_ahead_by >
+ base::TimeDelta::FromMilliseconds(kUnderrunThresholdMillis)) {
+ buffer_fill_end_ = 0;
+ buffer_fill_duration = base::TimeDelta();
+ const int64 num_frames_missed = amount_ahead_by /
+ base::TimeDelta::FromMilliseconds(kFrameDurationMillis);
+ frame_rtp_timestamp_ +=
+ static_cast<uint32>(num_frames_missed * samples_per_frame_);
+ DVLOG(1) << "Skipping RTP timestamp ahead to account for "
+ << num_frames_missed * samples_per_frame_
+ << " samples' worth of underrun.";
+ }
+ }
+ frame_capture_time_ = recorded_time - buffer_fill_duration;
- void EncodeAudio(const AudioBus* audio_bus,
- const base::TimeTicks& recorded_time,
- const base::Closure& done_callback) {
+ // Encode all audio in |audio_bus| into zero or more frames.
int src_pos = 0;
while (src_pos < audio_bus->frames()) {
- const int num_samples_to_xfer =
- std::min(samples_per_10ms_ - buffer_fill_end_,
- audio_bus->frames() - src_pos);
+ const int num_samples_to_xfer = std::min(
+ samples_per_frame_ - buffer_fill_end_, audio_bus->frames() - src_pos);
DCHECK_EQ(audio_bus->channels(), num_channels_);
TransferSamplesIntoBuffer(
- audio_bus, src_pos, buffer_fill_end_, num_samples_to_xfer);
+ audio_bus.get(), src_pos, buffer_fill_end_, num_samples_to_xfer);
src_pos += num_samples_to_xfer;
buffer_fill_end_ += num_samples_to_xfer;
- if (src_pos == audio_bus->frames()) {
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- done_callback);
- // Note: |audio_bus| is now invalid..
+ if (buffer_fill_end_ < samples_per_frame_)
+ break;
+
+ scoped_ptr<transport::EncodedFrame> audio_frame(
+ new transport::EncodedFrame());
+ audio_frame->dependency = transport::EncodedFrame::KEY;
+ audio_frame->frame_id = frame_id_;
+ audio_frame->referenced_frame_id = frame_id_;
+ audio_frame->rtp_timestamp = frame_rtp_timestamp_;
+ audio_frame->reference_time = frame_capture_time_;
+
+ if (EncodeFromFilledBuffer(&audio_frame->data)) {
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(callback_, base::Passed(&audio_frame)));
}
- if (buffer_fill_end_ == samples_per_10ms_) {
- scoped_ptr<EncodedAudioFrame> audio_frame(new EncodedAudioFrame());
- audio_frame->codec = codec_;
- audio_frame->frame_id = frame_id_++;
- audio_frame->samples = samples_per_10ms_;
- if (EncodeFromFilledBuffer(&audio_frame->data)) {
- // Compute an offset to determine the recorded time for the first
- // audio sample in the buffer.
- const base::TimeDelta buffer_time_offset =
- (buffer_fill_end_ - src_pos) *
- base::TimeDelta::FromMilliseconds(10) / samples_per_10ms_;
- // TODO(miu): Consider batching EncodedAudioFrames so we only post a
- // at most one task for each call to this method.
- cast_environment_->PostTask(
- CastEnvironment::MAIN, FROM_HERE,
- base::Bind(callback_, base::Passed(&audio_frame),
- recorded_time - buffer_time_offset));
- }
- buffer_fill_end_ = 0;
- }
+ // Reset the internal buffer, frame ID, and timestamps for the next frame.
+ buffer_fill_end_ = 0;
+ ++frame_id_;
+ frame_rtp_timestamp_ += samples_per_frame_;
+ frame_capture_time_ += frame_duration;
}
}
protected:
+ friend class base::RefCountedThreadSafe<ImplBase>;
+ virtual ~ImplBase() {}
+
virtual void TransferSamplesIntoBuffer(const AudioBus* audio_bus,
int source_offset,
int buffer_fill_offset,
int num_samples) = 0;
virtual bool EncodeFromFilledBuffer(std::string* out) = 0;
- CastEnvironment* const cast_environment_;
- const AudioCodec codec_;
+ const scoped_refptr<CastEnvironment> cast_environment_;
+ const transport::AudioCodec codec_;
const int num_channels_;
- const int samples_per_10ms_;
+ const int samples_per_frame_;
const FrameEncodedCallback callback_;
+ // Subclass' ctor is expected to set this to STATUS_AUDIO_INITIALIZED.
+ CastInitializationStatus cast_initialization_status_;
+
private:
// In the case where a call to EncodeAudio() cannot completely fill the
// buffer, this points to the position at which to populate data in a later
// call.
int buffer_fill_end_;
- // A counter used to label EncodedAudioFrames.
+ // A counter used to label EncodedFrames.
uint32 frame_id_;
- private:
+ // The RTP timestamp for the next frame of encoded audio. This is defined as
+ // the number of audio samples encoded so far, plus the estimated number of
+ // samples that were missed due to data underruns. A receiver uses this value
+ // to detect gaps in the audio signal data being provided. Per the spec, RTP
+ // timestamp values are allowed to overflow and roll around past zero.
+ uint32 frame_rtp_timestamp_;
+
+ // The local system time associated with the start of the next frame of
+ // encoded audio. This value is passed on to a receiver as a reference clock
+ // timestamp for the purposes of synchronizing audio and video. Its
+ // progression is expected to drift relative to the elapsed time implied by
+ // the RTP timestamps.
+ base::TimeTicks frame_capture_time_;
+
DISALLOW_COPY_AND_ASSIGN(ImplBase);
};
class AudioEncoder::OpusImpl : public AudioEncoder::ImplBase {
public:
- OpusImpl(CastEnvironment* cast_environment,
- int num_channels, int sampling_rate, int bitrate,
+ OpusImpl(const scoped_refptr<CastEnvironment>& cast_environment,
+ int num_channels,
+ int sampling_rate,
+ int bitrate,
const FrameEncodedCallback& callback)
- : ImplBase(cast_environment, kOpus, num_channels, sampling_rate,
+ : ImplBase(cast_environment,
+ transport::kOpus,
+ num_channels,
+ sampling_rate,
callback),
encoder_memory_(new uint8[opus_encoder_get_size(num_channels)]),
opus_encoder_(reinterpret_cast<OpusEncoder*>(encoder_memory_.get())),
- buffer_(new float[num_channels * samples_per_10ms_]) {
- CHECK_EQ(opus_encoder_init(opus_encoder_, sampling_rate, num_channels,
- OPUS_APPLICATION_AUDIO),
- OPUS_OK);
+ buffer_(new float[num_channels * samples_per_frame_]) {
+ if (ImplBase::cast_initialization_status_ != STATUS_AUDIO_UNINITIALIZED)
+ return;
+ if (opus_encoder_init(opus_encoder_,
+ sampling_rate,
+ num_channels,
+ OPUS_APPLICATION_AUDIO) != OPUS_OK) {
+ ImplBase::cast_initialization_status_ =
+ STATUS_INVALID_AUDIO_CONFIGURATION;
+ return;
+ }
+ ImplBase::cast_initialization_status_ = STATUS_AUDIO_INITIALIZED;
+
if (bitrate <= 0) {
// Note: As of 2013-10-31, the encoder in "auto bitrate" mode would use a
// variable bitrate up to 102kbps for 2-channel, 48 kHz audio and a 10 ms
@@ -146,9 +223,9 @@ class AudioEncoder::OpusImpl : public AudioEncoder::ImplBase {
OPUS_OK);
}
+ private:
virtual ~OpusImpl() {}
- private:
virtual void TransferSamplesIntoBuffer(const AudioBus* audio_bus,
int source_offset,
int buffer_fill_offset,
@@ -165,9 +242,12 @@ class AudioEncoder::OpusImpl : public AudioEncoder::ImplBase {
virtual bool EncodeFromFilledBuffer(std::string* out) OVERRIDE {
out->resize(kOpusMaxPayloadSize);
- const opus_int32 result = opus_encode_float(
- opus_encoder_, buffer_.get(), samples_per_10ms_,
- reinterpret_cast<uint8*>(&out->at(0)), kOpusMaxPayloadSize);
+ const opus_int32 result =
+ opus_encode_float(opus_encoder_,
+ buffer_.get(),
+ samples_per_frame_,
+ reinterpret_cast<uint8*>(string_as_array(out)),
+ kOpusMaxPayloadSize);
if (result > 1) {
out->resize(result);
return true;
@@ -198,30 +278,40 @@ class AudioEncoder::OpusImpl : public AudioEncoder::ImplBase {
class AudioEncoder::Pcm16Impl : public AudioEncoder::ImplBase {
public:
- Pcm16Impl(CastEnvironment* cast_environment,
- int num_channels, int sampling_rate,
+ Pcm16Impl(const scoped_refptr<CastEnvironment>& cast_environment,
+ int num_channels,
+ int sampling_rate,
const FrameEncodedCallback& callback)
- : ImplBase(cast_environment, kPcm16, num_channels, sampling_rate,
+ : ImplBase(cast_environment,
+ transport::kPcm16,
+ num_channels,
+ sampling_rate,
callback),
- buffer_(new int16[num_channels * samples_per_10ms_]) {}
+ buffer_(new int16[num_channels * samples_per_frame_]) {
+ if (ImplBase::cast_initialization_status_ != STATUS_AUDIO_UNINITIALIZED)
+ return;
+ cast_initialization_status_ = STATUS_AUDIO_INITIALIZED;
+ }
+ private:
virtual ~Pcm16Impl() {}
- private:
virtual void TransferSamplesIntoBuffer(const AudioBus* audio_bus,
int source_offset,
int buffer_fill_offset,
int num_samples) OVERRIDE {
audio_bus->ToInterleavedPartial(
- source_offset, num_samples, sizeof(int16),
+ source_offset,
+ num_samples,
+ sizeof(int16),
buffer_.get() + buffer_fill_offset * num_channels_);
}
virtual bool EncodeFromFilledBuffer(std::string* out) OVERRIDE {
// Output 16-bit PCM integers in big-endian byte order.
- out->resize(num_channels_ * samples_per_10ms_ * sizeof(int16));
+ out->resize(num_channels_ * samples_per_frame_ * sizeof(int16));
const int16* src = buffer_.get();
- const int16* const src_end = src + num_channels_ * samples_per_10ms_;
+ const int16* const src_end = src + num_channels_ * samples_per_frame_;
uint16* dest = reinterpret_cast<uint16*>(&out->at(0));
for (; src < src_end; ++src, ++dest)
*dest = base::HostToNet16(*src);
@@ -242,17 +332,19 @@ AudioEncoder::AudioEncoder(
// Note: It doesn't matter which thread constructs AudioEncoder, just so long
// as all calls to InsertAudio() are by the same thread.
insert_thread_checker_.DetachFromThread();
-
switch (audio_config.codec) {
- case kOpus:
- impl_.reset(new OpusImpl(
- cast_environment, audio_config.channels, audio_config.frequency,
- audio_config.bitrate, frame_encoded_callback));
+ case transport::kOpus:
+ impl_ = new OpusImpl(cast_environment,
+ audio_config.channels,
+ audio_config.frequency,
+ audio_config.bitrate,
+ frame_encoded_callback);
break;
- case kPcm16:
- impl_.reset(new Pcm16Impl(
- cast_environment, audio_config.channels, audio_config.frequency,
- frame_encoded_callback));
+ case transport::kPcm16:
+ impl_ = new Pcm16Impl(cast_environment,
+ audio_config.channels,
+ audio_config.frequency,
+ frame_encoded_callback);
break;
default:
NOTREACHED() << "Unsupported or unspecified codec for audio encoder";
@@ -262,30 +354,28 @@ AudioEncoder::AudioEncoder(
AudioEncoder::~AudioEncoder() {}
-void AudioEncoder::InsertAudio(
- const AudioBus* audio_bus,
- const base::TimeTicks& recorded_time,
- const base::Closure& done_callback) {
+CastInitializationStatus AudioEncoder::InitializationResult() const {
+ DCHECK(insert_thread_checker_.CalledOnValidThread());
+ if (impl_) {
+ return impl_->InitializationResult();
+ }
+ return STATUS_UNSUPPORTED_AUDIO_CODEC;
+}
+
+void AudioEncoder::InsertAudio(scoped_ptr<AudioBus> audio_bus,
+ const base::TimeTicks& recorded_time) {
DCHECK(insert_thread_checker_.CalledOnValidThread());
+ DCHECK(audio_bus.get());
if (!impl_) {
NOTREACHED();
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- done_callback);
return;
}
- cast_environment_->PostTask(CastEnvironment::AUDIO_ENCODER, FROM_HERE,
- base::Bind(&AudioEncoder::EncodeAudio, this, audio_bus, recorded_time,
- done_callback));
-}
-
-void AudioEncoder::EncodeAudio(
- const AudioBus* audio_bus,
- const base::TimeTicks& recorded_time,
- const base::Closure& done_callback) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::AUDIO_ENCODER));
- impl_->EncodeAudio(audio_bus, recorded_time, done_callback);
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(LogAudioEncodedEvent, cast_environment_, recorded_time));
+ cast_environment_->PostTask(CastEnvironment::AUDIO,
+ FROM_HERE,
+ base::Bind(&AudioEncoder::ImplBase::EncodeAudio,
+ impl_,
+ base::Passed(&audio_bus),
+ recorded_time));
}
} // namespace cast
diff --git a/chromium/media/cast/audio_sender/audio_encoder.h b/chromium/media/cast/audio_sender/audio_encoder.h
index 4a22d1983bd..2297672b74b 100644
--- a/chromium/media/cast/audio_sender/audio_encoder.h
+++ b/chromium/media/cast/audio_sender/audio_encoder.h
@@ -8,6 +8,7 @@
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/threading/thread_checker.h"
+#include "media/base/audio_bus.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
@@ -16,47 +17,30 @@ class TimeTicks;
}
namespace media {
-class AudioBus;
-}
-
-namespace media {
namespace cast {
-class AudioEncoder : public base::RefCountedThreadSafe<AudioEncoder> {
+class AudioEncoder {
public:
- typedef base::Callback<void(scoped_ptr<EncodedAudioFrame>,
- const base::TimeTicks&)> FrameEncodedCallback;
+ typedef base::Callback<void(scoped_ptr<transport::EncodedFrame>)>
+ FrameEncodedCallback;
AudioEncoder(const scoped_refptr<CastEnvironment>& cast_environment,
const AudioSenderConfig& audio_config,
const FrameEncodedCallback& frame_encoded_callback);
+ virtual ~AudioEncoder();
- // The |audio_bus| must be valid until the |done_callback| is called.
- // The callback is called from the main cast thread as soon as the encoder is
- // done with |audio_bus|; it does not mean that the encoded data has been
- // sent out.
- void InsertAudio(const AudioBus* audio_bus,
- const base::TimeTicks& recorded_time,
- const base::Closure& done_callback);
+ CastInitializationStatus InitializationResult() const;
- protected:
- virtual ~AudioEncoder();
+ void InsertAudio(scoped_ptr<AudioBus> audio_bus,
+ const base::TimeTicks& recorded_time);
private:
- friend class base::RefCountedThreadSafe<AudioEncoder>;
-
class ImplBase;
class OpusImpl;
class Pcm16Impl;
- // Invokes |impl_|'s encode method on the AUDIO_ENCODER thread while holding
- // a ref-count on AudioEncoder.
- void EncodeAudio(const AudioBus* audio_bus,
- const base::TimeTicks& recorded_time,
- const base::Closure& done_callback);
-
const scoped_refptr<CastEnvironment> cast_environment_;
- scoped_ptr<ImplBase> impl_;
+ scoped_refptr<ImplBase> impl_;
// Used to ensure only one thread invokes InsertAudio().
base::ThreadChecker insert_thread_checker_;
diff --git a/chromium/media/cast/audio_sender/audio_encoder_unittest.cc b/chromium/media/cast/audio_sender/audio_encoder_unittest.cc
index d721f71ef29..b521099243b 100644
--- a/chromium/media/cast/audio_sender/audio_encoder_unittest.cc
+++ b/chromium/media/cast/audio_sender/audio_encoder_unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <stdint.h>
+
#include <sstream>
#include <string>
@@ -13,53 +15,56 @@
#include "media/cast/audio_sender/audio_encoder.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/test/audio_utility.h"
-#include "media/cast/test/fake_task_runner.h"
+#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "media/cast/test/utility/audio_utility.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
namespace cast {
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+static const int64 kStartMillisecond = INT64_C(12345678900000);
namespace {
class TestEncodedAudioFrameReceiver {
public:
- explicit TestEncodedAudioFrameReceiver(AudioCodec codec) :
- codec_(codec), frames_received_(0) {}
+ explicit TestEncodedAudioFrameReceiver(transport::AudioCodec codec)
+ : codec_(codec), frames_received_(0), rtp_lower_bound_(0) {}
virtual ~TestEncodedAudioFrameReceiver() {}
- int frames_received() const {
- return frames_received_;
- }
-
- void SetRecordedTimeLowerBound(const base::TimeTicks& t) {
- lower_bound_ = t;
- }
+ int frames_received() const { return frames_received_; }
- void SetRecordedTimeUpperBound(const base::TimeTicks& t) {
- upper_bound_ = t;
+ void SetCaptureTimeBounds(const base::TimeTicks& lower_bound,
+ const base::TimeTicks& upper_bound) {
+ lower_bound_ = lower_bound;
+ upper_bound_ = upper_bound;
}
- void FrameEncoded(scoped_ptr<EncodedAudioFrame> encoded_frame,
- const base::TimeTicks& recorded_time) {
- EXPECT_EQ(codec_, encoded_frame->codec);
+ void FrameEncoded(scoped_ptr<transport::EncodedFrame> encoded_frame) {
+ EXPECT_EQ(encoded_frame->dependency, transport::EncodedFrame::KEY);
EXPECT_EQ(static_cast<uint8>(frames_received_ & 0xff),
encoded_frame->frame_id);
- EXPECT_LT(0, encoded_frame->samples);
+ EXPECT_EQ(encoded_frame->frame_id, encoded_frame->referenced_frame_id);
+ // RTP timestamps should be monotonically increasing and integer multiples
+ // of the fixed frame size.
+ EXPECT_LE(rtp_lower_bound_, encoded_frame->rtp_timestamp);
+ rtp_lower_bound_ = encoded_frame->rtp_timestamp;
+ // Note: In audio_encoder.cc, 100 is the fixed audio frame rate.
+ const int kSamplesPerFrame = kDefaultAudioSamplingRate / 100;
+ EXPECT_EQ(0u, encoded_frame->rtp_timestamp % kSamplesPerFrame);
EXPECT_TRUE(!encoded_frame->data.empty());
- EXPECT_LE(lower_bound_, recorded_time);
- lower_bound_ = recorded_time;
- EXPECT_GT(upper_bound_, recorded_time);
+ EXPECT_LE(lower_bound_, encoded_frame->reference_time);
+ lower_bound_ = encoded_frame->reference_time;
+ EXPECT_GT(upper_bound_, encoded_frame->reference_time);
++frames_received_;
}
private:
- const AudioCodec codec_;
+ const transport::AudioCodec codec_;
int frames_received_;
+ uint32 rtp_lower_bound_;
base::TimeTicks lower_bound_;
base::TimeTicks upper_bound_;
@@ -90,46 +95,48 @@ class AudioEncoderTest : public ::testing::TestWithParam<TestScenario> {
public:
AudioEncoderTest() {
InitializeMediaLibraryForTesting();
- testing_clock_.Advance(
+ testing_clock_ = new base::SimpleTestTickClock();
+ testing_clock_->Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
}
virtual void SetUp() {
- task_runner_ = new test::FakeTaskRunner(&testing_clock_);
- cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
- task_runner_, task_runner_, task_runner_, task_runner_,
- GetDefaultCastLoggingConfig());
+ task_runner_ = new test::FakeSingleThreadTaskRunner(testing_clock_);
+ cast_environment_ =
+ new CastEnvironment(scoped_ptr<base::TickClock>(testing_clock_).Pass(),
+ task_runner_,
+ task_runner_,
+ task_runner_);
}
virtual ~AudioEncoderTest() {}
- void RunTestForCodec(AudioCodec codec) {
+ void RunTestForCodec(transport::AudioCodec codec) {
const TestScenario& scenario = GetParam();
- SCOPED_TRACE(::testing::Message()
- << "Durations: " << scenario.ToString());
+ SCOPED_TRACE(::testing::Message() << "Durations: " << scenario.ToString());
CreateObjectsForCodec(codec);
- receiver_->SetRecordedTimeLowerBound(testing_clock_.NowTicks());
+ // Note: In audio_encoder.cc, 10 ms is the fixed frame duration.
+ const base::TimeDelta frame_duration =
+ base::TimeDelta::FromMilliseconds(10);
+
for (size_t i = 0; i < scenario.num_durations; ++i) {
- const base::TimeDelta duration =
- base::TimeDelta::FromMilliseconds(scenario.durations_in_ms[i]);
- receiver_->SetRecordedTimeUpperBound(
- testing_clock_.NowTicks() + duration);
-
- const scoped_ptr<AudioBus> bus(
- audio_bus_factory_->NextAudioBus(duration));
-
- const int last_count = release_callback_count_;
- audio_encoder_->InsertAudio(
- bus.get(), testing_clock_.NowTicks(),
- base::Bind(&AudioEncoderTest::IncrementReleaseCallbackCounter,
- base::Unretained(this)));
- task_runner_->RunTasks();
- EXPECT_EQ(1, release_callback_count_ - last_count)
- << "Release callback was not invoked once.";
-
- testing_clock_.Advance(duration);
+ const bool simulate_missing_data = scenario.durations_in_ms[i] < 0;
+ const base::TimeDelta duration = base::TimeDelta::FromMilliseconds(
+ std::abs(scenario.durations_in_ms[i]));
+ receiver_->SetCaptureTimeBounds(
+ testing_clock_->NowTicks() - frame_duration,
+ testing_clock_->NowTicks() + duration);
+ if (simulate_missing_data) {
+ task_runner_->RunTasks();
+ testing_clock_->Advance(duration);
+ } else {
+ audio_encoder_->InsertAudio(audio_bus_factory_->NextAudioBus(duration),
+ testing_clock_->NowTicks());
+ task_runner_->RunTasks();
+ testing_clock_->Advance(duration);
+ }
}
DVLOG(1) << "Received " << receiver_->frames_received()
@@ -137,98 +144,101 @@ class AudioEncoderTest : public ::testing::TestWithParam<TestScenario> {
}
private:
- void CreateObjectsForCodec(AudioCodec codec) {
+ void CreateObjectsForCodec(transport::AudioCodec codec) {
AudioSenderConfig audio_config;
audio_config.codec = codec;
audio_config.use_external_encoder = false;
audio_config.frequency = kDefaultAudioSamplingRate;
audio_config.channels = 2;
audio_config.bitrate = kDefaultAudioEncoderBitrate;
- audio_config.rtp_payload_type = 127;
+ audio_config.rtp_config.payload_type = 127;
- audio_bus_factory_.reset(new TestAudioBusFactory(
- audio_config.channels, audio_config.frequency,
- TestAudioBusFactory::kMiddleANoteFreq, 0.5f));
+ audio_bus_factory_.reset(
+ new TestAudioBusFactory(audio_config.channels,
+ audio_config.frequency,
+ TestAudioBusFactory::kMiddleANoteFreq,
+ 0.5f));
receiver_.reset(new TestEncodedAudioFrameReceiver(codec));
- audio_encoder_ = new AudioEncoder(
- cast_environment_, audio_config,
+ audio_encoder_.reset(new AudioEncoder(
+ cast_environment_,
+ audio_config,
base::Bind(&TestEncodedAudioFrameReceiver::FrameEncoded,
- base::Unretained(receiver_.get())));
- release_callback_count_ = 0;
- }
-
- void IncrementReleaseCallbackCounter() {
- ++release_callback_count_;
+ base::Unretained(receiver_.get()))));
}
- base::SimpleTestTickClock testing_clock_;
- scoped_refptr<test::FakeTaskRunner> task_runner_;
+ base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
+ scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
scoped_ptr<TestAudioBusFactory> audio_bus_factory_;
scoped_ptr<TestEncodedAudioFrameReceiver> receiver_;
- scoped_refptr<AudioEncoder> audio_encoder_;
+ scoped_ptr<AudioEncoder> audio_encoder_;
scoped_refptr<CastEnvironment> cast_environment_;
- int release_callback_count_;
DISALLOW_COPY_AND_ASSIGN(AudioEncoderTest);
};
-TEST_P(AudioEncoderTest, EncodeOpus) {
- RunTestForCodec(kOpus);
-}
-
-TEST_P(AudioEncoderTest, EncodePcm16) {
- RunTestForCodec(kPcm16);
-}
-
-static const int64 kOneCall_3Millis[] = { 3 };
-static const int64 kOneCall_10Millis[] = { 10 };
-static const int64 kOneCall_13Millis[] = { 13 };
-static const int64 kOneCall_20Millis[] = { 20 };
-
-static const int64 kTwoCalls_3Millis[] = { 3, 3 };
-static const int64 kTwoCalls_10Millis[] = { 10, 10 };
-static const int64 kTwoCalls_Mixed1[] = { 3, 10 };
-static const int64 kTwoCalls_Mixed2[] = { 10, 3 };
-static const int64 kTwoCalls_Mixed3[] = { 3, 17 };
-static const int64 kTwoCalls_Mixed4[] = { 17, 3 };
-
-static const int64 kManyCalls_3Millis[] =
- { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 };
-static const int64 kManyCalls_10Millis[] =
- { 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10 };
-static const int64 kManyCalls_Mixed1[] =
- { 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10 };
-static const int64 kManyCalls_Mixed2[] =
- { 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3 };
-static const int64 kManyCalls_Mixed3[] =
- { 3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5, 8, 9, 7, 9, 3, 2, 3, 8, 4, 6, 2, 6, 4 };
-static const int64 kManyCalls_Mixed4[] =
- { 31, 4, 15, 9, 26, 53, 5, 8, 9, 7, 9, 32, 38, 4, 62, 64, 3 };
-static const int64 kManyCalls_Mixed5[] =
- { 3, 14, 15, 9, 26, 53, 58, 9, 7, 9, 3, 23, 8, 4, 6, 2, 6, 43 };
+TEST_P(AudioEncoderTest, EncodeOpus) { RunTestForCodec(transport::kOpus); }
+
+TEST_P(AudioEncoderTest, EncodePcm16) { RunTestForCodec(transport::kPcm16); }
+
+static const int64 kOneCall_3Millis[] = {3};
+static const int64 kOneCall_10Millis[] = {10};
+static const int64 kOneCall_13Millis[] = {13};
+static const int64 kOneCall_20Millis[] = {20};
+
+static const int64 kTwoCalls_3Millis[] = {3, 3};
+static const int64 kTwoCalls_10Millis[] = {10, 10};
+static const int64 kTwoCalls_Mixed1[] = {3, 10};
+static const int64 kTwoCalls_Mixed2[] = {10, 3};
+static const int64 kTwoCalls_Mixed3[] = {3, 17};
+static const int64 kTwoCalls_Mixed4[] = {17, 3};
+
+static const int64 kManyCalls_3Millis[] = {3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3};
+static const int64 kManyCalls_10Millis[] = {10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10};
+static const int64 kManyCalls_Mixed1[] = {3, 10, 3, 10, 3, 10, 3, 10, 3,
+ 10, 3, 10, 3, 10, 3, 10, 3, 10};
+static const int64 kManyCalls_Mixed2[] = {10, 3, 10, 3, 10, 3, 10, 3, 10, 3,
+ 10, 3, 10, 3, 10, 3, 10, 3, 10, 3};
+static const int64 kManyCalls_Mixed3[] = {3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5, 8,
+ 9, 7, 9, 3, 2, 3, 8, 4, 6, 2, 6, 4};
+static const int64 kManyCalls_Mixed4[] = {31, 4, 15, 9, 26, 53, 5, 8, 9,
+ 7, 9, 32, 38, 4, 62, 64, 3};
+static const int64 kManyCalls_Mixed5[] = {3, 14, 15, 9, 26, 53, 58, 9, 7,
+ 9, 3, 23, 8, 4, 6, 2, 6, 43};
+
+static const int64 kOneBigUnderrun[] = {10, 10, 10, 10, -1000, 10, 10, 10};
+static const int64 kTwoBigUnderruns[] = {10, 10, 10, 10, -712, 10, 10, 10,
+ -1311, 10, 10, 10};
+static const int64 kMixedUnderruns[] = {31, -64, 4, 15, 9, 26, -53, 5, 8, -9,
+ 7, 9, 32, 38, -4, 62, -64, 3};
INSTANTIATE_TEST_CASE_P(
- AudioEncoderTestScenarios, AudioEncoderTest,
+ AudioEncoderTestScenarios,
+ AudioEncoderTest,
::testing::Values(
- TestScenario(kOneCall_3Millis, arraysize(kOneCall_3Millis)),
- TestScenario(kOneCall_10Millis, arraysize(kOneCall_10Millis)),
- TestScenario(kOneCall_13Millis, arraysize(kOneCall_13Millis)),
- TestScenario(kOneCall_20Millis, arraysize(kOneCall_20Millis)),
- TestScenario(kTwoCalls_3Millis, arraysize(kTwoCalls_3Millis)),
- TestScenario(kTwoCalls_10Millis, arraysize(kTwoCalls_10Millis)),
- TestScenario(kTwoCalls_Mixed1, arraysize(kTwoCalls_Mixed1)),
- TestScenario(kTwoCalls_Mixed2, arraysize(kTwoCalls_Mixed2)),
- TestScenario(kTwoCalls_Mixed3, arraysize(kTwoCalls_Mixed3)),
- TestScenario(kTwoCalls_Mixed4, arraysize(kTwoCalls_Mixed4)),
- TestScenario(kManyCalls_3Millis, arraysize(kManyCalls_3Millis)),
- TestScenario(kManyCalls_10Millis, arraysize(kManyCalls_10Millis)),
- TestScenario(kManyCalls_Mixed1, arraysize(kManyCalls_Mixed1)),
- TestScenario(kManyCalls_Mixed2, arraysize(kManyCalls_Mixed2)),
- TestScenario(kManyCalls_Mixed3, arraysize(kManyCalls_Mixed3)),
- TestScenario(kManyCalls_Mixed4, arraysize(kManyCalls_Mixed4)),
- TestScenario(kManyCalls_Mixed5, arraysize(kManyCalls_Mixed5))));
+ TestScenario(kOneCall_3Millis, arraysize(kOneCall_3Millis)),
+ TestScenario(kOneCall_10Millis, arraysize(kOneCall_10Millis)),
+ TestScenario(kOneCall_13Millis, arraysize(kOneCall_13Millis)),
+ TestScenario(kOneCall_20Millis, arraysize(kOneCall_20Millis)),
+ TestScenario(kTwoCalls_3Millis, arraysize(kTwoCalls_3Millis)),
+ TestScenario(kTwoCalls_10Millis, arraysize(kTwoCalls_10Millis)),
+ TestScenario(kTwoCalls_Mixed1, arraysize(kTwoCalls_Mixed1)),
+ TestScenario(kTwoCalls_Mixed2, arraysize(kTwoCalls_Mixed2)),
+ TestScenario(kTwoCalls_Mixed3, arraysize(kTwoCalls_Mixed3)),
+ TestScenario(kTwoCalls_Mixed4, arraysize(kTwoCalls_Mixed4)),
+ TestScenario(kManyCalls_3Millis, arraysize(kManyCalls_3Millis)),
+ TestScenario(kManyCalls_10Millis, arraysize(kManyCalls_10Millis)),
+ TestScenario(kManyCalls_Mixed1, arraysize(kManyCalls_Mixed1)),
+ TestScenario(kManyCalls_Mixed2, arraysize(kManyCalls_Mixed2)),
+ TestScenario(kManyCalls_Mixed3, arraysize(kManyCalls_Mixed3)),
+ TestScenario(kManyCalls_Mixed4, arraysize(kManyCalls_Mixed4)),
+ TestScenario(kManyCalls_Mixed5, arraysize(kManyCalls_Mixed5)),
+ TestScenario(kOneBigUnderrun, arraysize(kOneBigUnderrun)),
+ TestScenario(kTwoBigUnderruns, arraysize(kTwoBigUnderruns)),
+ TestScenario(kMixedUnderruns, arraysize(kMixedUnderruns))));
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/audio_sender/audio_sender.cc b/chromium/media/cast/audio_sender/audio_sender.cc
index b1b177d3ec3..e56d634782c 100644
--- a/chromium/media/cast/audio_sender/audio_sender.cc
+++ b/chromium/media/cast/audio_sender/audio_sender.cc
@@ -7,203 +7,338 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
-#include "crypto/encryptor.h"
-#include "crypto/symmetric_key.h"
#include "media/cast/audio_sender/audio_encoder.h"
-#include "media/cast/cast_environment.h"
-#include "media/cast/net/rtp_sender/rtp_sender.h"
-#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/rtcp/rtcp_defines.h"
+#include "media/cast/transport/cast_transport_config.h"
namespace media {
namespace cast {
+namespace {
-const int64 kMinSchedulingDelayMs = 1;
+const int kNumAggressiveReportsSentAtStart = 100;
+const int kMinSchedulingDelayMs = 1;
-class LocalRtcpAudioSenderFeedback : public RtcpSenderFeedback {
- public:
- explicit LocalRtcpAudioSenderFeedback(AudioSender* audio_sender)
- : audio_sender_(audio_sender) {
- }
+// TODO(miu): This should be specified in AudioSenderConfig, but currently it is
+// fixed to 100 FPS (i.e., 10 ms per frame), and AudioEncoder assumes this as
+// well.
+const int kAudioFrameRate = 100;
- virtual void OnReceivedCastFeedback(
- const RtcpCastMessage& cast_feedback) OVERRIDE {
- if (!cast_feedback.missing_frames_and_packets_.empty()) {
- audio_sender_->ResendPackets(cast_feedback.missing_frames_and_packets_);
- }
- VLOG(1) << "Received audio ACK "
- << static_cast<int>(cast_feedback.ack_frame_id_);
- }
+// Helper function to compute the maximum unacked audio frames that is sent.
+int GetMaxUnackedFrames(base::TimeDelta target_delay) {
+ // As long as it doesn't go over |kMaxUnackedFrames|, it is okay to send more
+ // audio data than the target delay would suggest. Audio packets are tiny and
+ // receiver has the ability to drop any one of the packets.
+ // We send up to three times of the target delay of audio frames.
+ int frames =
+ 1 + 3 * target_delay * kAudioFrameRate / base::TimeDelta::FromSeconds(1);
+ return std::min(kMaxUnackedFrames, frames);
+}
+} // namespace
- private:
- AudioSender* audio_sender_;
-};
+AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
+ const AudioSenderConfig& audio_config,
+ transport::CastTransportSender* const transport_sender)
+ : cast_environment_(cast_environment),
+ target_playout_delay_(base::TimeDelta::FromMilliseconds(
+ audio_config.rtp_config.max_delay_ms)),
+ transport_sender_(transport_sender),
+ max_unacked_frames_(GetMaxUnackedFrames(target_playout_delay_)),
+ configured_encoder_bitrate_(audio_config.bitrate),
+ rtcp_(cast_environment,
+ this,
+ transport_sender_,
+ NULL, // paced sender.
+ NULL,
+ audio_config.rtcp_mode,
+ base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval),
+ audio_config.rtp_config.ssrc,
+ audio_config.incoming_feedback_ssrc,
+ audio_config.rtcp_c_name,
+ AUDIO_EVENT),
+ rtp_timestamp_helper_(audio_config.frequency),
+ num_aggressive_rtcp_reports_sent_(0),
+ last_sent_frame_id_(0),
+ latest_acked_frame_id_(0),
+ duplicate_ack_counter_(0),
+ cast_initialization_status_(STATUS_AUDIO_UNINITIALIZED),
+ weak_factory_(this) {
+ VLOG(1) << "max_unacked_frames " << max_unacked_frames_;
+ DCHECK_GT(max_unacked_frames_, 0);
-class LocalRtpSenderStatistics : public RtpSenderStatistics {
- public:
- explicit LocalRtpSenderStatistics(RtpSender* rtp_sender)
- : rtp_sender_(rtp_sender) {
+ if (!audio_config.use_external_encoder) {
+ audio_encoder_.reset(
+ new AudioEncoder(cast_environment,
+ audio_config,
+ base::Bind(&AudioSender::SendEncodedAudioFrame,
+ weak_factory_.GetWeakPtr())));
+ cast_initialization_status_ = audio_encoder_->InitializationResult();
+ } else {
+ NOTREACHED(); // No support for external audio encoding.
+ cast_initialization_status_ = STATUS_AUDIO_UNINITIALIZED;
}
- virtual void GetStatistics(const base::TimeTicks& now,
- RtcpSenderInfo* sender_info) OVERRIDE {
- rtp_sender_->RtpStatistics(now, sender_info);
- }
+ media::cast::transport::CastTransportAudioConfig transport_config;
+ transport_config.codec = audio_config.codec;
+ transport_config.rtp.config = audio_config.rtp_config;
+ transport_config.frequency = audio_config.frequency;
+ transport_config.channels = audio_config.channels;
+ transport_config.rtp.max_outstanding_frames = max_unacked_frames_;
+ transport_sender_->InitializeAudio(transport_config);
- private:
- RtpSender* rtp_sender_;
-};
+ rtcp_.SetCastReceiverEventHistorySize(kReceiverRtcpEventHistorySize);
-AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
- const AudioSenderConfig& audio_config,
- PacedPacketSender* const paced_packet_sender)
- : cast_environment_(cast_environment),
- rtp_sender_(cast_environment, &audio_config, NULL,
- paced_packet_sender),
- rtcp_feedback_(new LocalRtcpAudioSenderFeedback(this)),
- rtp_audio_sender_statistics_(
- new LocalRtpSenderStatistics(&rtp_sender_)),
- rtcp_(cast_environment,
- rtcp_feedback_.get(),
- paced_packet_sender,
- rtp_audio_sender_statistics_.get(),
- NULL,
- audio_config.rtcp_mode,
- base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval),
- audio_config.sender_ssrc,
- audio_config.incoming_feedback_ssrc,
- audio_config.rtcp_c_name),
- initialized_(false),
- weak_factory_(this) {
- if (audio_config.aes_iv_mask.size() == kAesKeySize &&
- audio_config.aes_key.size() == kAesKeySize) {
- iv_mask_ = audio_config.aes_iv_mask;
- crypto::SymmetricKey* key = crypto::SymmetricKey::Import(
- crypto::SymmetricKey::AES, audio_config.aes_key);
- encryptor_.reset(new crypto::Encryptor());
- encryptor_->Init(key, crypto::Encryptor::CTR, std::string());
- } else if (audio_config.aes_iv_mask.size() != 0 ||
- audio_config.aes_key.size() != 0) {
- DCHECK(false) << "Invalid crypto configuration";
- }
- if (!audio_config.use_external_encoder) {
- audio_encoder_ = new AudioEncoder(
- cast_environment, audio_config,
- base::Bind(&AudioSender::SendEncodedAudioFrame,
- weak_factory_.GetWeakPtr()));
- }
+ memset(frame_id_to_rtp_timestamp_, 0, sizeof(frame_id_to_rtp_timestamp_));
}
AudioSender::~AudioSender() {}
-void AudioSender::InitializeTimers() {
+void AudioSender::InsertAudio(scoped_ptr<AudioBus> audio_bus,
+ const base::TimeTicks& recorded_time) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- if (!initialized_) {
- initialized_ = true;
- ScheduleNextRtcpReport();
+ if (cast_initialization_status_ != STATUS_AUDIO_INITIALIZED) {
+ NOTREACHED();
+ return;
+ }
+ DCHECK(audio_encoder_.get()) << "Invalid internal state";
+
+ if (AreTooManyFramesInFlight()) {
+ VLOG(1) << "Dropping frame due to too many frames currently in-flight.";
+ return;
}
+
+ audio_encoder_->InsertAudio(audio_bus.Pass(), recorded_time);
}
-void AudioSender::InsertAudio(const AudioBus* audio_bus,
- const base::TimeTicks& recorded_time,
- const base::Closure& done_callback) {
+void AudioSender::SendEncodedAudioFrame(
+ scoped_ptr<transport::EncodedFrame> encoded_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- DCHECK(audio_encoder_.get()) << "Invalid internal state";
- // TODO(mikhal): Resolve calculation of the audio rtp_timestamp for logging.
- // This is a tmp solution to allow the code to build.
- cast_environment_->Logging()->InsertFrameEvent(kAudioFrameReceived,
- GetVideoRtpTimestamp(recorded_time), kFrameIdUnknown);
- audio_encoder_->InsertAudio(audio_bus, recorded_time, done_callback);
+
+ const uint32 frame_id = encoded_frame->frame_id;
+
+ const bool is_first_frame_to_be_sent = last_send_time_.is_null();
+ last_send_time_ = cast_environment_->Clock()->NowTicks();
+ last_sent_frame_id_ = frame_id;
+ // If this is the first frame about to be sent, fake the value of
+ // |latest_acked_frame_id_| to indicate the receiver starts out all caught up.
+ // Also, schedule the periodic frame re-send checks.
+ if (is_first_frame_to_be_sent) {
+ latest_acked_frame_id_ = frame_id - 1;
+ ScheduleNextResendCheck();
+ }
+
+ cast_environment_->Logging()->InsertEncodedFrameEvent(
+ last_send_time_, FRAME_ENCODED, AUDIO_EVENT, encoded_frame->rtp_timestamp,
+ frame_id, static_cast<int>(encoded_frame->data.size()),
+ encoded_frame->dependency == transport::EncodedFrame::KEY,
+ configured_encoder_bitrate_);
+ // Only use lowest 8 bits as key.
+ frame_id_to_rtp_timestamp_[frame_id & 0xff] = encoded_frame->rtp_timestamp;
+
+ DCHECK(!encoded_frame->reference_time.is_null());
+ rtp_timestamp_helper_.StoreLatestTime(encoded_frame->reference_time,
+ encoded_frame->rtp_timestamp);
+
+ // At the start of the session, it's important to send reports before each
+ // frame so that the receiver can properly compute playout times. The reason
+ // more than one report is sent is because transmission is not guaranteed,
+ // only best effort, so we send enough that one should almost certainly get
+ // through.
+ if (num_aggressive_rtcp_reports_sent_ < kNumAggressiveReportsSentAtStart) {
+ // SendRtcpReport() will schedule future reports to be made if this is the
+ // last "aggressive report."
+ ++num_aggressive_rtcp_reports_sent_;
+ const bool is_last_aggressive_report =
+ (num_aggressive_rtcp_reports_sent_ == kNumAggressiveReportsSentAtStart);
+ VLOG_IF(1, is_last_aggressive_report) << "Sending last aggressive report.";
+ SendRtcpReport(is_last_aggressive_report);
+ }
+
+ transport_sender_->InsertCodedAudioFrame(*encoded_frame);
+}
+
+void AudioSender::IncomingRtcpPacket(scoped_ptr<Packet> packet) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ rtcp_.IncomingRtcpPacket(&packet->front(), packet->size());
}
-void AudioSender::InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time,
- const base::Closure callback) {
+void AudioSender::ScheduleNextRtcpReport() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- DCHECK(audio_encoder_.get() == NULL) << "Invalid internal state";
+ base::TimeDelta time_to_next =
+ rtcp_.TimeToSendNextRtcpReport() - cast_environment_->Clock()->NowTicks();
- cast_environment_->Logging()->InsertFrameEvent(kAudioFrameReceived,
- GetVideoRtpTimestamp(recorded_time), kFrameIdUnknown);
+ time_to_next = std::max(
+ time_to_next, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
- if (encryptor_) {
- EncodedAudioFrame encrypted_frame;
- if (!EncryptAudioFrame(*audio_frame, &encrypted_frame)) {
- // Logging already done.
- return;
- }
- rtp_sender_.IncomingEncodedAudioFrame(&encrypted_frame, recorded_time);
+ cast_environment_->PostDelayedTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(&AudioSender::SendRtcpReport,
+ weak_factory_.GetWeakPtr(),
+ true),
+ time_to_next);
+}
+
+void AudioSender::SendRtcpReport(bool schedule_future_reports) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ uint32 now_as_rtp_timestamp = 0;
+ if (rtp_timestamp_helper_.GetCurrentTimeAsRtpTimestamp(
+ now, &now_as_rtp_timestamp)) {
+ rtcp_.SendRtcpFromRtpSender(now, now_as_rtp_timestamp);
} else {
- rtp_sender_.IncomingEncodedAudioFrame(audio_frame, recorded_time);
+ // |rtp_timestamp_helper_| should have stored a mapping by this point.
+ NOTREACHED();
}
- callback.Run();
+ if (schedule_future_reports)
+ ScheduleNextRtcpReport();
}
-void AudioSender::SendEncodedAudioFrame(
- scoped_ptr<EncodedAudioFrame> audio_frame,
- const base::TimeTicks& recorded_time) {
+void AudioSender::ScheduleNextResendCheck() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- InitializeTimers();
- if (encryptor_) {
- EncodedAudioFrame encrypted_frame;
- if (!EncryptAudioFrame(*audio_frame.get(), &encrypted_frame)) {
- // Logging already done.
- return;
+ DCHECK(!last_send_time_.is_null());
+ base::TimeDelta time_to_next =
+ last_send_time_ - cast_environment_->Clock()->NowTicks() +
+ target_playout_delay_;
+ time_to_next = std::max(
+ time_to_next, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+ cast_environment_->PostDelayedTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(&AudioSender::ResendCheck, weak_factory_.GetWeakPtr()),
+ time_to_next);
+}
+
+void AudioSender::ResendCheck() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ DCHECK(!last_send_time_.is_null());
+ const base::TimeDelta time_since_last_send =
+ cast_environment_->Clock()->NowTicks() - last_send_time_;
+ if (time_since_last_send > target_playout_delay_) {
+ if (latest_acked_frame_id_ == last_sent_frame_id_) {
+ // Last frame acked, no point in doing anything
+ } else {
+ VLOG(1) << "ACK timeout; last acked frame: " << latest_acked_frame_id_;
+ ResendForKickstart();
}
- rtp_sender_.IncomingEncodedAudioFrame(&encrypted_frame, recorded_time);
- } else {
- rtp_sender_.IncomingEncodedAudioFrame(audio_frame.get(), recorded_time);
}
+ ScheduleNextResendCheck();
}
-bool AudioSender::EncryptAudioFrame(const EncodedAudioFrame& audio_frame,
- EncodedAudioFrame* encrypted_frame) {
- DCHECK(encryptor_) << "Invalid state";
+void AudioSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- if (!encryptor_->SetCounter(GetAesNonce(audio_frame.frame_id, iv_mask_))) {
- NOTREACHED() << "Failed to set counter";
- return false;
+ if (rtcp_.is_rtt_available()) {
+ // Having the RTT values implies the receiver sent back a receiver report
+ // based on it having received a report from here. Therefore, ensure this
+ // sender stops aggressively sending reports.
+ if (num_aggressive_rtcp_reports_sent_ < kNumAggressiveReportsSentAtStart) {
+ VLOG(1) << "No longer a need to send reports aggressively (sent "
+ << num_aggressive_rtcp_reports_sent_ << ").";
+ num_aggressive_rtcp_reports_sent_ = kNumAggressiveReportsSentAtStart;
+ ScheduleNextRtcpReport();
+ }
}
- if (!encryptor_->Encrypt(audio_frame.data, &encrypted_frame->data)) {
- NOTREACHED() << "Encrypt error";
- return false;
+
+ if (last_send_time_.is_null())
+ return; // Cannot get an ACK without having first sent a frame.
+
+ if (cast_feedback.missing_frames_and_packets_.empty()) {
+ // We only count duplicate ACKs when we have sent newer frames.
+ if (latest_acked_frame_id_ == cast_feedback.ack_frame_id_ &&
+ latest_acked_frame_id_ != last_sent_frame_id_) {
+ duplicate_ack_counter_++;
+ } else {
+ duplicate_ack_counter_ = 0;
+ }
+ // TODO(miu): The values "2" and "3" should be derived from configuration.
+ if (duplicate_ack_counter_ >= 2 && duplicate_ack_counter_ % 3 == 2) {
+ VLOG(1) << "Received duplicate ACK for frame " << latest_acked_frame_id_;
+ ResendForKickstart();
+ }
+ } else {
+ // Only count duplicated ACKs if there is no NACK request in between.
+ // This is to avoid aggresive resend.
+ duplicate_ack_counter_ = 0;
+
+ base::TimeDelta rtt;
+ base::TimeDelta avg_rtt;
+ base::TimeDelta min_rtt;
+ base::TimeDelta max_rtt;
+ rtcp_.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt);
+
+ // A NACK is also used to cancel pending re-transmissions.
+ transport_sender_->ResendPackets(
+ true, cast_feedback.missing_frames_and_packets_, false, min_rtt);
}
- encrypted_frame->codec = audio_frame.codec;
- encrypted_frame->frame_id = audio_frame.frame_id;
- encrypted_frame->samples = audio_frame.samples;
- return true;
-}
-void AudioSender::ResendPackets(
- const MissingFramesAndPacketsMap& missing_frames_and_packets) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- rtp_sender_.ResendPackets(missing_frames_and_packets);
+ const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+
+ const RtpTimestamp rtp_timestamp =
+ frame_id_to_rtp_timestamp_[cast_feedback.ack_frame_id_ & 0xff];
+ cast_environment_->Logging()->InsertFrameEvent(now,
+ FRAME_ACK_RECEIVED,
+ AUDIO_EVENT,
+ rtp_timestamp,
+ cast_feedback.ack_frame_id_);
+
+ const bool is_acked_out_of_order =
+ static_cast<int32>(cast_feedback.ack_frame_id_ -
+ latest_acked_frame_id_) < 0;
+ VLOG(2) << "Received ACK" << (is_acked_out_of_order ? " out-of-order" : "")
+ << " for frame " << cast_feedback.ack_frame_id_;
+ if (!is_acked_out_of_order) {
+ // Cancel resends of acked frames.
+ MissingFramesAndPacketsMap missing_frames_and_packets;
+ PacketIdSet missing;
+ while (latest_acked_frame_id_ != cast_feedback.ack_frame_id_) {
+ latest_acked_frame_id_++;
+ missing_frames_and_packets[latest_acked_frame_id_] = missing;
+ }
+ transport_sender_->ResendPackets(
+ true, missing_frames_and_packets, true, base::TimeDelta());
+ latest_acked_frame_id_ = cast_feedback.ack_frame_id_;
+ }
}
-void AudioSender::IncomingRtcpPacket(const uint8* packet, size_t length,
- const base::Closure callback) {
+bool AudioSender::AreTooManyFramesInFlight() const {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- rtcp_.IncomingRtcpPacket(packet, length);
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
+ int frames_in_flight = 0;
+ if (!last_send_time_.is_null()) {
+ frames_in_flight +=
+ static_cast<int32>(last_sent_frame_id_ - latest_acked_frame_id_);
+ }
+ VLOG(2) << frames_in_flight
+ << " frames in flight; last sent: " << last_sent_frame_id_
+ << " latest acked: " << latest_acked_frame_id_;
+ return frames_in_flight >= max_unacked_frames_;
}
-void AudioSender::ScheduleNextRtcpReport() {
+void AudioSender::ResendForKickstart() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- base::TimeDelta time_to_next =
- rtcp_.TimeToSendNextRtcpReport() - cast_environment_->Clock()->NowTicks();
-
- time_to_next = std::max(time_to_next,
- base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+ DCHECK(!last_send_time_.is_null());
+ VLOG(1) << "Resending last packet of frame " << last_sent_frame_id_
+ << " to kick-start.";
+ // Send the first packet of the last encoded frame to kick start
+ // retransmission. This gives enough information to the receiver what
+ // packets and frames are missing.
+ MissingFramesAndPacketsMap missing_frames_and_packets;
+ PacketIdSet missing;
+ missing.insert(kRtcpCastLastPacket);
+ missing_frames_and_packets.insert(
+ std::make_pair(last_sent_frame_id_, missing));
+ last_send_time_ = cast_environment_->Clock()->NowTicks();
- cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&AudioSender::SendRtcpReport, weak_factory_.GetWeakPtr()),
- time_to_next);
-}
+ base::TimeDelta rtt;
+ base::TimeDelta avg_rtt;
+ base::TimeDelta min_rtt;
+ base::TimeDelta max_rtt;
+ rtcp_.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt);
-void AudioSender::SendRtcpReport() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- // We don't send audio logging messages since all captured audio frames will
- // be sent.
- rtcp_.SendRtcpFromRtpSender(NULL);
- ScheduleNextRtcpReport();
+ // Sending this extra packet is to kick-start the session. There is
+ // no need to optimize re-transmission for this case.
+ transport_sender_->ResendPackets(
+ true, missing_frames_and_packets, false, min_rtt);
}
} // namespace cast
diff --git a/chromium/media/cast/audio_sender/audio_sender.gypi b/chromium/media/cast/audio_sender/audio_sender.gypi
deleted file mode 100644
index 9d84b79af8d..00000000000
--- a/chromium/media/cast/audio_sender/audio_sender.gypi
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'targets': [
- {
- 'target_name': 'audio_sender',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- '<(DEPTH)/third_party/',
- ],
- 'sources': [
- 'audio_encoder.h',
- 'audio_encoder.cc',
- 'audio_sender.h',
- 'audio_sender.cc',
- ], # source
- 'dependencies': [
- '<(DEPTH)/crypto/crypto.gyp:crypto',
- '<(DEPTH)/media/media.gyp:media',
- '<(DEPTH)/media/media.gyp:shared_memory_support',
- '<(DEPTH)/media/cast/rtcp/rtcp.gyp:cast_rtcp',
- '<(DEPTH)/media/cast/net/rtp_sender/rtp_sender.gyp:*',
- '<(DEPTH)/third_party/opus/opus.gyp:opus',
- ],
- },
- ],
-}
-
-
diff --git a/chromium/media/cast/audio_sender/audio_sender.h b/chromium/media/cast/audio_sender/audio_sender.h
index 68f9e7a4172..80cf8a4e9e9 100644
--- a/chromium/media/cast/audio_sender/audio_sender.h
+++ b/chromium/media/cast/audio_sender/audio_sender.h
@@ -12,89 +12,146 @@
#include "base/threading/non_thread_safe.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
+#include "media/base/audio_bus.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/net/rtp_sender/rtp_sender.h"
+#include "media/cast/logging/logging_defines.h"
#include "media/cast/rtcp/rtcp.h"
-
-namespace crypto {
- class Encryptor;
-}
-
-namespace media {
-class AudioBus;
-}
+#include "media/cast/rtp_timestamp_helper.h"
namespace media {
namespace cast {
class AudioEncoder;
-class LocalRtcpAudioSenderFeedback;
-class LocalRtpSenderStatistics;
-class PacedPacketSender;
-// This class is not thread safe.
-// It's only called from the main cast thread.
-class AudioSender : public base::NonThreadSafe,
+// Not thread safe. Only called from the main cast thread.
+// This class owns all objects related to sending audio, objects that create RTP
+// packets, congestion control, audio encoder, parsing and sending of
+// RTCP packets.
+// Additionally it posts a bunch of delayed tasks to the main thread for various
+// timeouts.
+class AudioSender : public RtcpSenderFeedback,
+ public base::NonThreadSafe,
public base::SupportsWeakPtr<AudioSender> {
public:
AudioSender(scoped_refptr<CastEnvironment> cast_environment,
const AudioSenderConfig& audio_config,
- PacedPacketSender* const paced_packet_sender);
+ transport::CastTransportSender* const transport_sender);
virtual ~AudioSender();
- // The |audio_bus| must be valid until the |done_callback| is called.
- // The callback is called from the main cast thread as soon as the encoder is
- // done with |audio_bus|; it does not mean that the encoded data has been
- // sent out.
- void InsertAudio(const AudioBus* audio_bus,
- const base::TimeTicks& recorded_time,
- const base::Closure& done_callback);
-
- // The audio_frame must be valid until the closure callback is called.
- // The closure callback is called from the main cast thread as soon as
- // the cast sender is done with the frame; it does not mean that the encoded
- // frame has been sent out.
- void InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time,
- const base::Closure callback);
+ CastInitializationStatus InitializationResult() const {
+ return cast_initialization_status_;
+ }
+
+ // Note: It is not guaranteed that |audio_frame| will actually be encoded and
+ // sent, if AudioSender detects too many frames in flight. Therefore, clients
+ // should be careful about the rate at which this method is called.
+ //
+ // Note: It is invalid to call this method if InitializationResult() returns
+ // anything but STATUS_AUDIO_INITIALIZED.
+ void InsertAudio(scoped_ptr<AudioBus> audio_bus,
+ const base::TimeTicks& recorded_time);
// Only called from the main cast thread.
- void IncomingRtcpPacket(const uint8* packet, size_t length,
- const base::Closure callback);
+ void IncomingRtcpPacket(scoped_ptr<Packet> packet);
protected:
- void SendEncodedAudioFrame(scoped_ptr<EncodedAudioFrame> audio_frame,
- const base::TimeTicks& recorded_time);
+ // Protected for testability.
+ virtual void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback)
+ OVERRIDE;
private:
- friend class LocalRtcpAudioSenderFeedback;
+ // Schedule and execute periodic sending of RTCP report.
+ void ScheduleNextRtcpReport();
+ void SendRtcpReport(bool schedule_future_reports);
+
+ // Schedule and execute periodic checks for re-sending packets. If no
+ // acknowledgements have been received for "too long," AudioSender will
+ // speculatively re-send certain packets of an unacked frame to kick-start
+ // re-transmission. This is a last resort tactic to prevent the session from
+ // getting stuck after a long outage.
+ void ScheduleNextResendCheck();
+ void ResendCheck();
+ void ResendForKickstart();
+
+ // Returns true if there are too many frames in flight, as defined by the
+ // configured target playout delay plus simple logic. When this is true,
+ // InsertAudio() will silenty drop frames instead of sending them to the audio
+ // encoder.
+ bool AreTooManyFramesInFlight() const;
+
+ // Called by the |audio_encoder_| with the next EncodedFrame to send.
+ void SendEncodedAudioFrame(scoped_ptr<transport::EncodedFrame> audio_frame);
+
+ const scoped_refptr<CastEnvironment> cast_environment_;
+
+ // The total amount of time between a frame's capture/recording on the sender
+ // and its playback on the receiver (i.e., shown to a user). This is fixed as
+ // a value large enough to give the system sufficient time to encode,
+ // transmit/retransmit, receive, decode, and render; given its run-time
+ // environment (sender/receiver hardware performance, network conditions,
+ // etc.).
+ const base::TimeDelta target_playout_delay_;
+
+ // Sends encoded frames over the configured transport (e.g., UDP). In
+ // Chromium, this could be a proxy that first sends the frames from a renderer
+ // process to the browser process over IPC, with the browser process being
+ // responsible for "packetizing" the frames and pushing packets into the
+ // network layer.
+ transport::CastTransportSender* const transport_sender_;
+
+ // Maximum number of outstanding frames before the encoding and sending of
+ // new frames shall halt.
+ const int max_unacked_frames_;
+
+ // Encodes AudioBuses into EncodedFrames.
+ scoped_ptr<AudioEncoder> audio_encoder_;
+ const int configured_encoder_bitrate_;
+
+ // Manages sending/receiving of RTCP packets, including sender/receiver
+ // reports.
+ Rtcp rtcp_;
- void ResendPackets(
- const MissingFramesAndPacketsMap& missing_frames_and_packets);
+ // Records lip-sync (i.e., mapping of RTP <--> NTP timestamps), and
+ // extrapolates this mapping to any other point in time.
+ RtpTimestampHelper rtp_timestamp_helper_;
- // Caller must allocate the destination |encrypted_frame|. The data member
- // will be resized to hold the encrypted size.
- bool EncryptAudioFrame(const EncodedAudioFrame& audio_frame,
- EncodedAudioFrame* encrypted_frame);
+ // Counts how many RTCP reports are being "aggressively" sent (i.e., one per
+ // frame) at the start of the session. Once a threshold is reached, RTCP
+ // reports are instead sent at the configured interval + random drift.
+ int num_aggressive_rtcp_reports_sent_;
- void ScheduleNextRtcpReport();
- void SendRtcpReport();
+ // This is "null" until the first frame is sent. Thereafter, this tracks the
+ // last time any frame was sent or re-sent.
+ base::TimeTicks last_send_time_;
- void InitializeTimers();
+ // The ID of the last frame sent. Logic throughout AudioSender assumes this
+ // can safely wrap-around. This member is invalid until
+ // |!last_send_time_.is_null()|.
+ uint32 last_sent_frame_id_;
- base::WeakPtrFactory<AudioSender> weak_factory_;
+ // The ID of the latest (not necessarily the last) frame that has been
+ // acknowledged. Logic throughout AudioSender assumes this can safely
+ // wrap-around. This member is invalid until |!last_send_time_.is_null()|.
+ uint32 latest_acked_frame_id_;
- scoped_refptr<CastEnvironment> cast_environment_;
- scoped_refptr<AudioEncoder> audio_encoder_;
- RtpSender rtp_sender_;
- scoped_ptr<LocalRtpSenderStatistics> rtp_audio_sender_statistics_;
- scoped_ptr<LocalRtcpAudioSenderFeedback> rtcp_feedback_;
- Rtcp rtcp_;
- bool initialized_;
- scoped_ptr<crypto::Encryptor> encryptor_;
- std::string iv_mask_;
+ // Counts the number of duplicate ACK that are being received. When this
+ // number reaches a threshold, the sender will take this as a sign that the
+ // receiver hasn't yet received the first packet of the next frame. In this
+ // case, AudioSender will trigger a re-send of the next frame.
+ int duplicate_ack_counter_;
+
+ // If this sender is ready for use, this is STATUS_AUDIO_INITIALIZED.
+ CastInitializationStatus cast_initialization_status_;
+
+ // This is a "good enough" mapping for finding the RTP timestamp associated
+ // with a video frame. The key is the lowest 8 bits of frame id (which is
+ // what is sent via RTCP). This map is used for logging purposes.
+ RtpTimestamp frame_id_to_rtp_timestamp_[256];
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<AudioSender> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(AudioSender);
};
diff --git a/chromium/media/cast/audio_sender/audio_sender_unittest.cc b/chromium/media/cast/audio_sender/audio_sender_unittest.cc
index 65c2e622d8f..51edd496028 100644
--- a/chromium/media/cast/audio_sender/audio_sender_unittest.cc
+++ b/chromium/media/cast/audio_sender/audio_sender_unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <stdint.h>
+
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/memory/scoped_ptr.h"
@@ -10,92 +12,129 @@
#include "media/cast/audio_sender/audio_sender.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/net/pacing/mock_paced_packet_sender.h"
-#include "media/cast/test/audio_utility.h"
-#include "media/cast/test/fake_task_runner.h"
+#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "media/cast/test/utility/audio_utility.h"
+#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/transport/cast_transport_sender_impl.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
namespace cast {
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+class TestPacketSender : public transport::PacketSender {
+ public:
+ TestPacketSender() : number_of_rtp_packets_(0), number_of_rtcp_packets_(0) {}
+
+ virtual bool SendPacket(transport::PacketRef packet,
+ const base::Closure& cb) OVERRIDE {
+ if (Rtcp::IsRtcpPacket(&packet->data[0], packet->data.size())) {
+ ++number_of_rtcp_packets_;
+ } else {
+ // Check that at least one RTCP packet was sent before the first RTP
+ // packet. This confirms that the receiver will have the necessary lip
+ // sync info before it has to calculate the playout time of the first
+ // frame.
+ if (number_of_rtp_packets_ == 0)
+ EXPECT_LE(1, number_of_rtcp_packets_);
+ ++number_of_rtp_packets_;
+ }
+ return true;
+ }
+
+ int number_of_rtp_packets() const { return number_of_rtp_packets_; }
+
+ int number_of_rtcp_packets() const { return number_of_rtcp_packets_; }
-using testing::_;
-using testing::AtLeast;
+ private:
+ int number_of_rtp_packets_;
+ int number_of_rtcp_packets_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestPacketSender);
+};
class AudioSenderTest : public ::testing::Test {
protected:
AudioSenderTest() {
InitializeMediaLibraryForTesting();
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kStartMillisecond));
- }
-
- virtual void SetUp() {
- task_runner_ = new test::FakeTaskRunner(&testing_clock_);
- cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
- task_runner_, task_runner_, task_runner_, task_runner_,
- GetDefaultCastLoggingConfig());
- audio_config_.codec = kOpus;
+ testing_clock_ = new base::SimpleTestTickClock();
+ testing_clock_->Advance(base::TimeTicks::Now() - base::TimeTicks());
+ task_runner_ = new test::FakeSingleThreadTaskRunner(testing_clock_);
+ cast_environment_ =
+ new CastEnvironment(scoped_ptr<base::TickClock>(testing_clock_).Pass(),
+ task_runner_,
+ task_runner_,
+ task_runner_);
+ audio_config_.codec = transport::kOpus;
audio_config_.use_external_encoder = false;
audio_config_.frequency = kDefaultAudioSamplingRate;
audio_config_.channels = 2;
audio_config_.bitrate = kDefaultAudioEncoderBitrate;
- audio_config_.rtp_payload_type = 127;
-
- audio_sender_.reset(
- new AudioSender(cast_environment_, audio_config_, &mock_transport_));
+ audio_config_.rtp_config.payload_type = 127;
+
+ net::IPEndPoint dummy_endpoint;
+
+ transport_sender_.reset(new transport::CastTransportSenderImpl(
+ NULL,
+ testing_clock_,
+ dummy_endpoint,
+ base::Bind(&UpdateCastTransportStatus),
+ transport::BulkRawEventsCallback(),
+ base::TimeDelta(),
+ task_runner_,
+ &transport_));
+ audio_sender_.reset(new AudioSender(
+ cast_environment_, audio_config_, transport_sender_.get()));
+ task_runner_->RunTasks();
}
virtual ~AudioSenderTest() {}
- base::SimpleTestTickClock testing_clock_;
- MockPacedPacketSender mock_transport_;
- scoped_refptr<test::FakeTaskRunner> task_runner_;
+ static void UpdateCastTransportStatus(transport::CastTransportStatus status) {
+ EXPECT_EQ(transport::TRANSPORT_AUDIO_INITIALIZED, status);
+ }
+
+ base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
+ TestPacketSender transport_;
+ scoped_ptr<transport::CastTransportSenderImpl> transport_sender_;
+ scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
scoped_ptr<AudioSender> audio_sender_;
scoped_refptr<CastEnvironment> cast_environment_;
AudioSenderConfig audio_config_;
};
TEST_F(AudioSenderTest, Encode20ms) {
- EXPECT_CALL(mock_transport_, SendPackets(_)).Times(AtLeast(1));
-
const base::TimeDelta kDuration = base::TimeDelta::FromMilliseconds(20);
- scoped_ptr<AudioBus> bus(TestAudioBusFactory(
- audio_config_.channels, audio_config_.frequency,
- TestAudioBusFactory::kMiddleANoteFreq, 0.5f).NextAudioBus(kDuration));
-
- base::TimeTicks recorded_time = base::TimeTicks::Now();
- audio_sender_->InsertAudio(
- bus.get(), recorded_time,
- base::Bind(base::IgnoreResult(&scoped_ptr<AudioBus>::release),
- base::Unretained(&bus)));
- task_runner_->RunTasks();
+ scoped_ptr<AudioBus> bus(
+ TestAudioBusFactory(audio_config_.channels,
+ audio_config_.frequency,
+ TestAudioBusFactory::kMiddleANoteFreq,
+ 0.5f).NextAudioBus(kDuration));
- EXPECT_TRUE(!bus) << "AudioBus wasn't released after use.";
+ audio_sender_->InsertAudio(bus.Pass(), testing_clock_->NowTicks());
+ task_runner_->RunTasks();
+ EXPECT_LE(1, transport_.number_of_rtp_packets());
+ EXPECT_LE(1, transport_.number_of_rtcp_packets());
}
TEST_F(AudioSenderTest, RtcpTimer) {
- EXPECT_CALL(mock_transport_, SendPackets(_)).Times(AtLeast(1));
- EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).Times(1);
-
const base::TimeDelta kDuration = base::TimeDelta::FromMilliseconds(20);
- scoped_ptr<AudioBus> bus(TestAudioBusFactory(
- audio_config_.channels, audio_config_.frequency,
- TestAudioBusFactory::kMiddleANoteFreq, 0.5f).NextAudioBus(kDuration));
-
- base::TimeTicks recorded_time = base::TimeTicks::Now();
- audio_sender_->InsertAudio(
- bus.get(), recorded_time,
- base::Bind(base::IgnoreResult(&scoped_ptr<AudioBus>::release),
- base::Unretained(&bus)));
+ scoped_ptr<AudioBus> bus(
+ TestAudioBusFactory(audio_config_.channels,
+ audio_config_.frequency,
+ TestAudioBusFactory::kMiddleANoteFreq,
+ 0.5f).NextAudioBus(kDuration));
+
+ audio_sender_->InsertAudio(bus.Pass(), testing_clock_->NowTicks());
task_runner_->RunTasks();
// Make sure that we send at least one RTCP packet.
base::TimeDelta max_rtcp_timeout =
base::TimeDelta::FromMilliseconds(1 + kDefaultRtcpIntervalMs * 3 / 2);
- testing_clock_.Advance(max_rtcp_timeout);
+ testing_clock_->Advance(max_rtcp_timeout);
task_runner_->RunTasks();
+ EXPECT_LE(1, transport_.number_of_rtp_packets());
+ EXPECT_LE(1, transport_.number_of_rtcp_packets());
}
} // namespace cast
diff --git a/chromium/media/cast/base/clock_drift_smoother.cc b/chromium/media/cast/base/clock_drift_smoother.cc
new file mode 100644
index 00000000000..ca0380533ee
--- /dev/null
+++ b/chromium/media/cast/base/clock_drift_smoother.cc
@@ -0,0 +1,58 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/base/clock_drift_smoother.h"
+
+#include "base/logging.h"
+
+namespace media {
+namespace cast {
+
+ClockDriftSmoother::ClockDriftSmoother(base::TimeDelta time_constant)
+ : time_constant_(time_constant),
+ estimate_us_(0.0) {
+ DCHECK(time_constant_ > base::TimeDelta());
+}
+
+ClockDriftSmoother::~ClockDriftSmoother() {}
+
+base::TimeDelta ClockDriftSmoother::Current() const {
+ DCHECK(!last_update_time_.is_null());
+ return base::TimeDelta::FromMicroseconds(
+ static_cast<int64>(estimate_us_ + 0.5)); // Round to nearest microsecond.
+}
+
+void ClockDriftSmoother::Reset(base::TimeTicks now,
+ base::TimeDelta measured_offset) {
+ DCHECK(!now.is_null());
+ last_update_time_ = now;
+ estimate_us_ = measured_offset.InMicroseconds();
+}
+
+void ClockDriftSmoother::Update(base::TimeTicks now,
+ base::TimeDelta measured_offset) {
+ DCHECK(!now.is_null());
+ if (last_update_time_.is_null()) {
+ Reset(now, measured_offset);
+ } else if (now < last_update_time_) {
+ // |now| is not monotonically non-decreasing.
+ NOTREACHED();
+ } else {
+ const double elapsed_us = (now - last_update_time_).InMicroseconds();
+ last_update_time_ = now;
+ const double weight =
+ elapsed_us / (elapsed_us + time_constant_.InMicroseconds());
+ estimate_us_ = weight * measured_offset.InMicroseconds() +
+ (1.0 - weight) * estimate_us_;
+ }
+}
+
+// static
+base::TimeDelta ClockDriftSmoother::GetDefaultTimeConstant() {
+ static const int kDefaultTimeConstantInSeconds = 30;
+ return base::TimeDelta::FromSeconds(kDefaultTimeConstantInSeconds);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/base/clock_drift_smoother.h b/chromium/media/cast/base/clock_drift_smoother.h
new file mode 100644
index 00000000000..67de4cb51a8
--- /dev/null
+++ b/chromium/media/cast/base/clock_drift_smoother.h
@@ -0,0 +1,52 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_BASE_CLOCK_DRIFT_SMOOTHER_H_
+#define MEDIA_CAST_BASE_CLOCK_DRIFT_SMOOTHER_H_
+
+#include "base/time/time.h"
+
+namespace media {
+namespace cast {
+
+// Tracks the jitter and drift between clocks, providing a smoothed offset.
+// Internally, a Simple IIR filter is used to maintain a running average that
+// moves at a rate based on the passage of time.
+class ClockDriftSmoother {
+ public:
+ // |time_constant| is the amount of time an impulse signal takes to decay by
+ // ~62.6%. Interpretation: If the value passed to several Update() calls is
+ // held constant for T seconds, then the running average will have moved
+ // towards the value by ~62.6% from where it started.
+ explicit ClockDriftSmoother(base::TimeDelta time_constant);
+ ~ClockDriftSmoother();
+
+ // Returns the current offset.
+ base::TimeDelta Current() const;
+
+ // Discard all history and reset to exactly |offset|, measured |now|.
+ void Reset(base::TimeTicks now, base::TimeDelta offset);
+
+ // Update the current offset, which was measured |now|. The weighting that
+ // |measured_offset| will have on the running average is influenced by how
+ // much time has passed since the last call to this method (or Reset()).
+ // |now| should be monotonically non-decreasing over successive calls of this
+ // method.
+ void Update(base::TimeTicks now, base::TimeDelta measured_offset);
+
+ // Returns a time constant suitable for most use cases, where the clocks
+ // are expected to drift very little with respect to each other, and the
+ // jitter caused by clock imprecision is effectively canceled out.
+ static base::TimeDelta GetDefaultTimeConstant();
+
+ private:
+ const base::TimeDelta time_constant_;
+ base::TimeTicks last_update_time_;
+ double estimate_us_;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_BASE_CLOCK_DRIFT_SMOOTHER_H_
diff --git a/chromium/media/cast/cast.gyp b/chromium/media/cast/cast.gyp
index 702272fb289..5de8796079a 100644
--- a/chromium/media/cast/cast.gyp
+++ b/chromium/media/cast/cast.gyp
@@ -7,15 +7,25 @@
'include_tests%': 1,
'chromium_code': 1,
},
+ 'conditions': [
+ ['include_tests==1', {
+ 'includes': [ 'cast_testing.gypi' ]
+ }],
+ ],
'targets': [
{
- 'target_name': 'cast_config',
+ 'target_name': 'cast_base',
'type': 'static_library',
'include_dirs': [
'<(DEPTH)/',
],
'dependencies': [
+ 'cast_logging_proto',
'<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/net/net.gyp:net',
+ ],
+ 'export_dependent_settings': [
+ 'cast_logging_proto',
],
'sources': [
'cast_config.cc',
@@ -23,136 +33,193 @@
'cast_defines.h',
'cast_environment.cc',
'cast_environment.h',
+ 'base/clock_drift_smoother.cc',
+ 'base/clock_drift_smoother.h',
+ 'logging/encoding_event_subscriber.cc',
+ 'logging/encoding_event_subscriber.h',
+ 'logging/log_deserializer.cc',
+ 'logging/log_deserializer.h',
+ 'logging/log_serializer.cc',
+ 'logging/log_serializer.h',
'logging/logging_defines.cc',
'logging/logging_defines.h',
'logging/logging_impl.cc',
'logging/logging_impl.h',
'logging/logging_raw.cc',
'logging/logging_raw.h',
- 'logging/logging_stats.cc',
- 'logging/logging_stats.h',
+ 'logging/raw_event_subscriber.h',
+ 'logging/raw_event_subscriber_bundle.cc',
+ 'logging/raw_event_subscriber_bundle.h',
+ 'logging/receiver_time_offset_estimator.h',
+ 'logging/receiver_time_offset_estimator_impl.cc',
+ 'logging/receiver_time_offset_estimator_impl.h',
+ 'logging/simple_event_subscriber.cc',
+ 'logging/simple_event_subscriber.h',
+ 'logging/stats_event_subscriber.cc',
+ 'logging/stats_event_subscriber.h',
+ 'rtp_timestamp_helper.cc',
+ 'rtp_timestamp_helper.h',
+ 'transport/cast_transport_config.cc',
+ 'transport/cast_transport_config.h',
+ 'transport/cast_transport_defines.h',
+ 'transport/cast_transport_sender.h',
+ ], # source
+ },
+ {
+ 'target_name': 'cast_logging_proto',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'sources': [
+ 'logging/proto/proto_utils.cc',
+ 'logging/proto/raw_events.proto',
+ ],
+ 'variables': {
+ 'proto_in_dir': 'logging/proto',
+ 'proto_out_dir': 'media/cast/logging/proto',
+ },
+ 'includes': ['../../build/protoc.gypi'],
+ },
+ {
+ 'target_name': 'cast_receiver',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ 'cast_base',
+ 'cast_rtcp',
+ 'cast_transport',
+ '<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/media/media.gyp:media',
+ '<(DEPTH)/media/media.gyp:shared_memory_support',
+ '<(DEPTH)/third_party/opus/opus.gyp:opus',
+ '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
+ '<(DEPTH)/ui/gfx/gfx.gyp:gfx_geometry',
+ ],
+ 'sources': [
+ 'cast_receiver.h',
+ 'framer/cast_message_builder.cc',
+ 'framer/cast_message_builder.h',
+ 'framer/frame_buffer.cc',
+ 'framer/frame_buffer.h',
+ 'framer/frame_id_map.cc',
+ 'framer/frame_id_map.h',
+ 'framer/framer.cc',
+ 'framer/framer.h',
+ 'receiver/audio_decoder.cc',
+ 'receiver/audio_decoder.h',
+ 'receiver/cast_receiver_impl.cc',
+ 'receiver/cast_receiver_impl.h',
+ 'receiver/frame_receiver.cc',
+ 'receiver/frame_receiver.h',
+ 'receiver/video_decoder.cc',
+ 'receiver/video_decoder.h',
+ 'rtp_receiver/receiver_stats.cc',
+ 'rtp_receiver/receiver_stats.h',
+ 'rtp_receiver/rtp_receiver_defines.cc',
+ 'rtp_receiver/rtp_receiver_defines.h',
+ 'rtp_receiver/rtp_parser/rtp_parser.cc',
+ 'rtp_receiver/rtp_parser/rtp_parser.h',
+ ], # source
+ },
+ {
+ 'target_name': 'cast_rtcp',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ 'cast_base',
+ 'cast_transport',
+ '<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/net/net.gyp:net',
+ ],
+ 'sources': [
+ 'rtcp/rtcp_defines.cc',
+ 'rtcp/rtcp_defines.h',
+ 'rtcp/rtcp.h',
+ 'rtcp/rtcp.cc',
+ 'rtcp/rtcp_receiver.cc',
+ 'rtcp/rtcp_receiver.h',
+ 'rtcp/rtcp_sender.cc',
+ 'rtcp/rtcp_sender.h',
+ 'rtcp/rtcp_utility.cc',
+ 'rtcp/rtcp_utility.h',
+ 'rtcp/receiver_rtcp_event_subscriber.cc',
+ 'rtcp/receiver_rtcp_event_subscriber.cc',
+ ], # source
+ },
+ {
+ 'target_name': 'cast_sender',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ 'cast_base',
+ 'cast_rtcp',
+ 'cast_transport',
+ '<(DEPTH)/media/media.gyp:media',
+ '<(DEPTH)/media/media.gyp:shared_memory_support',
+ '<(DEPTH)/third_party/opus/opus.gyp:opus',
+ '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
+ ], # dependencies
+ 'sources': [
+ 'audio_sender/audio_encoder.h',
+ 'audio_sender/audio_encoder.cc',
+ 'audio_sender/audio_sender.h',
+ 'audio_sender/audio_sender.cc',
+ 'cast_sender.h',
+ 'cast_sender_impl.cc',
+ 'cast_sender_impl.h',
+ 'congestion_control/congestion_control.h',
+ 'congestion_control/congestion_control.cc',
+ 'video_sender/codecs/vp8/vp8_encoder.cc',
+ 'video_sender/codecs/vp8/vp8_encoder.h',
+ 'video_sender/external_video_encoder.h',
+ 'video_sender/external_video_encoder.cc',
+ 'video_sender/fake_software_video_encoder.h',
+ 'video_sender/fake_software_video_encoder.cc',
+ 'video_sender/software_video_encoder.h',
+ 'video_sender/video_encoder.h',
+ 'video_sender/video_encoder_impl.h',
+ 'video_sender/video_encoder_impl.cc',
+ 'video_sender/video_sender.h',
+ 'video_sender/video_sender.cc',
+ ], # source
+ },
+ {
+ 'target_name': 'cast_transport',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ 'cast_base',
+ '<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/crypto/crypto.gyp:crypto',
+ '<(DEPTH)/net/net.gyp:net',
+ ],
+ 'sources': [
+ 'transport/cast_transport_sender_impl.cc',
+ 'transport/cast_transport_sender_impl.h',
+ 'transport/pacing/paced_sender.cc',
+ 'transport/pacing/paced_sender.h',
+ 'transport/rtcp/rtcp_builder.cc',
+ 'transport/rtcp/rtcp_builder.h',
+ 'transport/rtp_sender/packet_storage/packet_storage.cc',
+ 'transport/rtp_sender/packet_storage/packet_storage.h',
+ 'transport/rtp_sender/rtp_packetizer/rtp_packetizer.cc',
+ 'transport/rtp_sender/rtp_packetizer/rtp_packetizer.h',
+ 'transport/rtp_sender/rtp_sender.cc',
+ 'transport/rtp_sender/rtp_sender.h',
+ 'transport/transport/udp_transport.cc',
+ 'transport/transport/udp_transport.h',
+ 'transport/utility/transport_encryption_handler.cc',
+ 'transport/utility/transport_encryption_handler.h',
], # source
},
- ], # targets,
- 'conditions': [
- ['include_tests==1', {
- 'targets': [
- {
- 'target_name': 'cast_unittests',
- 'type': '<(gtest_target_type)',
- 'dependencies': [
- 'cast_config',
- 'cast_receiver.gyp:cast_receiver',
- 'cast_sender.gyp:cast_sender',
- 'test/utility/utility.gyp:cast_test_utility',
- '<(DEPTH)/base/base.gyp:run_all_unittests',
- '<(DEPTH)/base/base.gyp:test_support_base',
- '<(DEPTH)/crypto/crypto.gyp:crypto',
- '<(DEPTH)/net/net.gyp:net',
- '<(DEPTH)/testing/gmock.gyp:gmock',
- '<(DEPTH)/testing/gtest.gyp:gtest',
- ],
- 'include_dirs': [
- '<(DEPTH)/',
- '<(DEPTH)/third_party/',
- '<(DEPTH)/third_party/webrtc/',
- ],
- 'sources': [
- 'audio_receiver/audio_decoder_unittest.cc',
- 'audio_receiver/audio_receiver_unittest.cc',
- 'audio_sender/audio_encoder_unittest.cc',
- 'audio_sender/audio_sender_unittest.cc',
- 'congestion_control/congestion_control_unittest.cc',
- 'framer/cast_message_builder_unittest.cc',
- 'framer/frame_buffer_unittest.cc',
- 'framer/framer_unittest.cc',
- 'net/pacing/mock_paced_packet_sender.cc',
- 'net/pacing/mock_paced_packet_sender.h',
- 'net/pacing/paced_sender_unittest.cc',
- 'rtcp/mock_rtcp_receiver_feedback.cc',
- 'rtcp/mock_rtcp_receiver_feedback.h',
- 'rtcp/mock_rtcp_sender_feedback.cc',
- 'rtcp/mock_rtcp_sender_feedback.h',
- 'rtcp/rtcp_receiver_unittest.cc',
- 'rtcp/rtcp_sender_unittest.cc',
- 'rtcp/rtcp_unittest.cc',
- 'rtp_receiver/rtp_receiver_defines.h',
- 'rtp_receiver/mock_rtp_payload_feedback.cc',
- 'rtp_receiver/mock_rtp_payload_feedback.h',
- 'rtp_receiver/receiver_stats_unittest.cc',
- 'rtp_receiver/rtp_parser/test/rtp_packet_builder.cc',
- 'rtp_receiver/rtp_parser/rtp_parser_unittest.cc',
- 'net/rtp_sender/packet_storage/packet_storage_unittest.cc',
- 'net/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc',
- 'net/rtp_sender/rtp_packetizer/test/rtp_header_parser.cc',
- 'net/rtp_sender/rtp_packetizer/test/rtp_header_parser.h',
- 'test/crypto_utility.cc',
- 'test/crypto_utility.h',
- 'test/encode_decode_test.cc',
- 'test/end2end_unittest.cc',
- 'video_receiver/video_decoder_unittest.cc',
- 'video_receiver/video_receiver_unittest.cc',
- 'video_sender/mock_video_encoder_controller.cc',
- 'video_sender/mock_video_encoder_controller.h',
- 'video_sender/video_encoder_unittest.cc',
- 'video_sender/video_sender_unittest.cc',
- ], # source
- },
- {
- 'target_name': 'cast_sender_app',
- 'type': 'executable',
- 'include_dirs': [
- '<(DEPTH)/',
- ],
- 'dependencies': [
- 'cast_config',
- '<(DEPTH)/ui/gfx/gfx.gyp:gfx',
- '<(DEPTH)/net/net.gyp:net_test_support',
- '<(DEPTH)/media/cast/cast_sender.gyp:*',
- '<(DEPTH)/media/media.gyp:media',
- '<(DEPTH)/testing/gtest.gyp:gtest',
- '<(DEPTH)/third_party/opus/opus.gyp:opus',
- '<(DEPTH)/media/cast/test/transport/transport.gyp:cast_transport',
- '<(DEPTH)/media/cast/test/utility/utility.gyp:cast_test_utility',
- ],
- 'sources': [
- '<(DEPTH)/media/cast/test/sender.cc',
- ],
- },
- {
- 'target_name': 'cast_receiver_app',
- 'type': 'executable',
- 'include_dirs': [
- '<(DEPTH)/',
- ],
- 'dependencies': [
- 'cast_config',
- '<(DEPTH)/ui/gfx/gfx.gyp:gfx',
- '<(DEPTH)/net/net.gyp:net_test_support',
- '<(DEPTH)/media/cast/cast_receiver.gyp:*',
- '<(DEPTH)/media/media.gyp:media',
- '<(DEPTH)/testing/gtest.gyp:gtest',
- '<(DEPTH)/media/cast/test/transport/transport.gyp:cast_transport',
- '<(DEPTH)/media/cast/test/utility/utility.gyp:cast_test_utility',
- '<(DEPTH)/third_party/libyuv/libyuv.gyp:libyuv',
- ],
- 'sources': [
- '<(DEPTH)/media/cast/test/receiver.cc',
- ],
- 'conditions': [
- ['OS == "linux"', {
- 'sources': [
- '<(DEPTH)/media/cast/test/linux_output_window.cc',
- '<(DEPTH)/media/cast/test/linux_output_window.h',
- ],
- 'libraries': [
- '-lXext',
- '-lX11',
- ],
- }],
- ],
- },
- ], # targets
- }], # include_tests
],
}
diff --git a/chromium/media/cast/cast_config.cc b/chromium/media/cast/cast_config.cc
index 6c324bd8759..0e7953af01e 100644
--- a/chromium/media/cast/cast_config.cc
+++ b/chromium/media/cast/cast_config.cc
@@ -7,48 +7,59 @@
namespace media {
namespace cast {
+// TODO(miu): Revisit code factoring of these structs. There are a number of
+// common elements between them all, so it might be reasonable to only have one
+// or two structs; or, at least a common base class.
+
+// TODO(miu): Make sure all POD members are initialized by ctors. Policy
+// decision: Reasonable defaults or use invalid placeholder values to expose
+// unset members?
+
+// TODO(miu): Provide IsValidConfig() functions?
+
+// TODO(miu): Throughout the code, there is a lot of copy-and-paste of the same
+// calculations based on these config values. So, why don't we add methods to
+// these classes to centralize the logic?
+
VideoSenderConfig::VideoSenderConfig()
- : rtcp_interval(kDefaultRtcpIntervalMs),
+ : incoming_feedback_ssrc(0),
+ rtcp_interval(kDefaultRtcpIntervalMs),
rtcp_mode(kRtcpReducedSize),
- rtp_history_ms(kDefaultRtpHistoryMs),
- rtp_max_delay_ms(kDefaultRtpMaxDelayMs),
+ use_external_encoder(false),
+ width(0),
+ height(0),
congestion_control_back_off(kDefaultCongestionControlBackOff),
+ max_bitrate(5000000),
+ min_bitrate(1000000),
+ start_bitrate(5000000),
max_qp(kDefaultMaxQp),
min_qp(kDefaultMinQp),
max_frame_rate(kDefaultMaxFrameRate),
- max_number_of_video_buffers_used(kDefaultNumberOfVideoBuffers) {}
+ max_number_of_video_buffers_used(kDefaultNumberOfVideoBuffers),
+ codec(transport::kVp8),
+ number_of_encode_threads(1) {}
AudioSenderConfig::AudioSenderConfig()
- : rtcp_interval(kDefaultRtcpIntervalMs),
+ : incoming_feedback_ssrc(0),
+ rtcp_interval(kDefaultRtcpIntervalMs),
rtcp_mode(kRtcpReducedSize),
- rtp_history_ms(kDefaultRtpHistoryMs),
- rtp_max_delay_ms(kDefaultRtpMaxDelayMs) {}
+ use_external_encoder(false),
+ frequency(0),
+ channels(0),
+ bitrate(0) {}
-AudioReceiverConfig::AudioReceiverConfig()
- : rtcp_interval(kDefaultRtcpIntervalMs),
- rtcp_mode(kRtcpReducedSize),
- rtp_max_delay_ms(kDefaultRtpMaxDelayMs) {}
-
-VideoReceiverConfig::VideoReceiverConfig()
- : rtcp_interval(kDefaultRtcpIntervalMs),
+FrameReceiverConfig::FrameReceiverConfig()
+ : feedback_ssrc(0),
+ incoming_ssrc(0),
+ rtcp_interval(kDefaultRtcpIntervalMs),
rtcp_mode(kRtcpReducedSize),
rtp_max_delay_ms(kDefaultRtpMaxDelayMs),
- max_frame_rate(kDefaultMaxFrameRate),
- decoder_faster_than_max_frame_rate(true) {}
-
-EncodedVideoFrame::EncodedVideoFrame() {}
-EncodedVideoFrame::~EncodedVideoFrame() {}
-
-EncodedAudioFrame::EncodedAudioFrame() {}
-EncodedAudioFrame::~EncodedAudioFrame() {}
-
-PcmAudioFrame::PcmAudioFrame() {}
-PcmAudioFrame::~PcmAudioFrame() {}
+ rtp_payload_type(0),
+ frequency(0),
+ channels(0),
+ max_frame_rate(0) {}
-// static
-void PacketReceiver::DeletePacket(const uint8* packet) {
- delete [] packet;
-}
+FrameReceiverConfig::~FrameReceiverConfig() {}
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/cast_config.h b/chromium/media/cast/cast_config.h
index 27cc67e5dae..ea25d6b6cf7 100644
--- a/chromium/media/cast/cast_config.h
+++ b/chromium/media/cast/cast_config.h
@@ -12,65 +12,53 @@
#include "base/basictypes.h"
#include "base/callback.h"
#include "base/memory/ref_counted.h"
+#include "base/memory/shared_memory.h"
+#include "base/single_thread_task_runner.h"
#include "media/cast/cast_defines.h"
+#include "media/cast/transport/cast_transport_config.h"
namespace media {
+class VideoEncodeAccelerator;
+
namespace cast {
enum RtcpMode {
- kRtcpCompound, // Compound RTCP mode is described by RFC 4585.
+ kRtcpCompound, // Compound RTCP mode is described by RFC 4585.
kRtcpReducedSize, // Reduced-size RTCP mode is described by RFC 5506.
};
-enum VideoCodec {
- kVp8,
- kH264,
- kExternalVideo,
-};
-
-enum AudioCodec {
- kOpus,
- kPcm16,
- kExternalAudio,
-};
-
+// TODO(miu): Merge AudioSenderConfig and VideoSenderConfig and make their
+// naming/documentation consistent with FrameReceiverConfig.
struct AudioSenderConfig {
AudioSenderConfig();
- uint32 sender_ssrc;
+ // The sender ssrc is in rtp_config.ssrc.
uint32 incoming_feedback_ssrc;
int rtcp_interval;
std::string rtcp_c_name;
RtcpMode rtcp_mode;
- int rtp_history_ms; // The time RTP packets are stored for retransmissions.
- int rtp_max_delay_ms;
- int rtp_payload_type;
+ transport::RtpConfig rtp_config;
bool use_external_encoder;
int frequency;
int channels;
int bitrate; // Set to <= 0 for "auto variable bitrate" (libopus knows best).
- AudioCodec codec;
-
- std::string aes_key; // Binary string of size kAesKeySize.
- std::string aes_iv_mask; // Binary string of size kAesKeySize.
+ transport::AudioCodec codec;
};
struct VideoSenderConfig {
VideoSenderConfig();
- uint32 sender_ssrc;
+ // The sender ssrc is in rtp_config.ssrc.
uint32 incoming_feedback_ssrc;
int rtcp_interval;
std::string rtcp_c_name;
RtcpMode rtcp_mode;
- int rtp_history_ms; // The time RTP packets are stored for retransmissions.
- int rtp_max_delay_ms;
- int rtp_payload_type;
+ transport::RtpConfig rtp_config;
bool use_external_encoder;
int width; // Incoming frames will be scaled to this size.
@@ -84,148 +72,92 @@ struct VideoSenderConfig {
int min_qp;
int max_frame_rate;
int max_number_of_video_buffers_used; // Max value depend on codec.
- VideoCodec codec;
- int number_of_cores;
-
- std::string aes_key; // Binary string of size kAesKeySize.
- std::string aes_iv_mask; // Binary string of size kAesKeySize.
+ transport::VideoCodec codec;
+ int number_of_encode_threads;
};
-struct AudioReceiverConfig {
- AudioReceiverConfig();
-
- uint32 feedback_ssrc;
- uint32 incoming_ssrc;
-
- int rtcp_interval;
- std::string rtcp_c_name;
- RtcpMode rtcp_mode;
-
- // The time the receiver is prepared to wait for retransmissions.
- int rtp_max_delay_ms;
- int rtp_payload_type;
-
- bool use_external_decoder;
- int frequency;
- int channels;
- AudioCodec codec;
-
- std::string aes_key; // Binary string of size kAesKeySize.
- std::string aes_iv_mask; // Binary string of size kAesKeySize.
-};
+// TODO(miu): Naming and minor type changes are badly needed in a later CL.
+struct FrameReceiverConfig {
+ FrameReceiverConfig();
+ ~FrameReceiverConfig();
-struct VideoReceiverConfig {
- VideoReceiverConfig();
+ // The receiver's SSRC identifier.
+ uint32 feedback_ssrc; // TODO(miu): Rename to receiver_ssrc for clarity.
- uint32 feedback_ssrc;
- uint32 incoming_ssrc;
+ // The sender's SSRC identifier.
+ uint32 incoming_ssrc; // TODO(miu): Rename to sender_ssrc for clarity.
+ // Mean interval (in milliseconds) between RTCP reports.
+ // TODO(miu): Remove this since it's never not kDefaultRtcpIntervalMs.
int rtcp_interval;
- std::string rtcp_c_name;
- RtcpMode rtcp_mode;
-
- // The time the receiver is prepared to wait for retransmissions.
- int rtp_max_delay_ms;
- int rtp_payload_type;
-
- bool use_external_decoder;
- int max_frame_rate;
-
- // Some HW decoders can not run faster than the frame rate, preventing it
- // from catching up after a glitch.
- bool decoder_faster_than_max_frame_rate;
- VideoCodec codec;
-
- std::string aes_key; // Binary string of size kAesKeySize.
- std::string aes_iv_mask; // Binary string of size kAesKeySize.
-};
-
-struct EncodedVideoFrame {
- EncodedVideoFrame();
- ~EncodedVideoFrame();
-
- VideoCodec codec;
- bool key_frame;
- uint32 frame_id;
- uint32 last_referenced_frame_id;
- std::string data;
-};
-
-// DEPRECATED: Do not use in new code. Please migrate existing code to use
-// media::AudioBus.
-struct PcmAudioFrame {
- PcmAudioFrame();
- ~PcmAudioFrame();
-
- int channels; // Samples in interleaved stereo format. L0, R0, L1 ,R1 ,...
- int frequency;
- std::vector<int16> samples;
-};
-
-struct EncodedAudioFrame {
- EncodedAudioFrame();
- ~EncodedAudioFrame();
-
- AudioCodec codec;
- uint32 frame_id; // Needed to release the frame.
- int samples; // Needed send side to advance the RTP timestamp.
- // Not used receive side.
- // Support for max sampling rate of 48KHz, 2 channels, 100 ms duration.
- static const int kMaxNumberOfSamples = 48 * 2 * 100;
- std::string data;
-};
-typedef std::vector<uint8> Packet;
-typedef std::vector<Packet> PacketList;
-
-class PacketSender {
- public:
- // All packets to be sent to the network will be delivered via these
- // functions.
- virtual bool SendPackets(const PacketList& packets) = 0;
+ // CNAME representing this receiver.
+ // TODO(miu): Remove this since it should be derived elsewhere (probably in
+ // the transport layer).
+ std::string rtcp_c_name;
- virtual bool SendPacket(const Packet& packet) = 0;
+ // Determines amount of detail in RTCP reports.
+ // TODO(miu): Remove this since it's never anything but kRtcpReducedSize.
+ RtcpMode rtcp_mode;
- virtual ~PacketSender() {}
-};
+ // The total amount of time between a frame's capture/recording on the sender
+ // and its playback on the receiver (i.e., shown to a user). This is fixed as
+ // a value large enough to give the system sufficient time to encode,
+ // transmit/retransmit, receive, decode, and render; given its run-time
+ // environment (sender/receiver hardware performance, network conditions,
+ // etc.).
+ int rtp_max_delay_ms; // TODO(miu): Change to TimeDelta target_playout_delay.
-class PacketReceiver : public base::RefCountedThreadSafe<PacketReceiver> {
- public:
- // All packets received from the network should be delivered via this
- // function.
- virtual void ReceivedPacket(const uint8* packet, size_t length,
- const base::Closure callback) = 0;
+ // RTP payload type enum: Specifies the type/encoding of frame data.
+ int rtp_payload_type;
- static void DeletePacket(const uint8* packet);
+ // RTP timebase: The number of RTP units advanced per one second. For audio,
+ // this is the sampling rate. For video, by convention, this is 90 kHz.
+ int frequency; // TODO(miu): Rename to rtp_timebase for clarity.
- protected:
- virtual ~PacketReceiver() {}
+ // Number of channels. For audio, this is normally 2. For video, this must
+ // be 1 as Cast does not have support for stereoscopic video.
+ int channels;
- private:
- friend class base::RefCountedThreadSafe<PacketReceiver>;
+ // The target frame rate. For audio, this is normally 100 (i.e., frames have
+ // a duration of 10ms each). For video, this is normally 30, but any frame
+ // rate is supported.
+ int max_frame_rate; // TODO(miu): Rename to target_frame_rate.
+
+ // Codec used for the compression of signal data.
+ // TODO(miu): Merge the AudioCodec and VideoCodec enums into one so this union
+ // is not necessary.
+ union MergedCodecPlaceholder {
+ transport::AudioCodec audio;
+ transport::VideoCodec video;
+ MergedCodecPlaceholder() : audio(transport::kUnknownAudioCodec) {}
+ } codec;
+
+ // The AES crypto key and initialization vector. Each of these strings
+ // contains the data in binary form, of size kAesKeySize. If they are empty
+ // strings, crypto is not being used.
+ std::string aes_key;
+ std::string aes_iv_mask;
};
-class VideoEncoderController {
- public:
- // Inform the encoder about the new target bit rate.
- virtual void SetBitRate(int new_bit_rate) = 0;
+// import from media::cast::transport
+typedef transport::Packet Packet;
+typedef transport::PacketList PacketList;
- // Inform the encoder to not encode the next frame.
- // Note: this setting is sticky and should last until called with false.
- virtual void SkipNextFrame(bool skip_next_frame) = 0;
+typedef base::Callback<void(CastInitializationStatus)>
+ CastInitializationCallback;
- // Inform the encoder to encode the next frame as a key frame.
- virtual void GenerateKeyFrame() = 0;
+typedef base::Callback<void(scoped_refptr<base::SingleThreadTaskRunner>,
+ scoped_ptr<media::VideoEncodeAccelerator>)>
+ ReceiveVideoEncodeAcceleratorCallback;
+typedef base::Callback<void(const ReceiveVideoEncodeAcceleratorCallback&)>
+ CreateVideoEncodeAcceleratorCallback;
- // Inform the encoder to only reference frames older or equal to frame_id;
- virtual void LatestFrameIdToReference(uint32 frame_id) = 0;
-
- // Query the codec about how many frames it has skipped due to slow ACK.
- virtual int NumberOfSkippedFrames() const = 0;
-
- protected:
- virtual ~VideoEncoderController() {}
-};
+typedef base::Callback<void(scoped_ptr<base::SharedMemory>)>
+ ReceiveVideoEncodeMemoryCallback;
+typedef base::Callback<void(size_t size,
+ const ReceiveVideoEncodeMemoryCallback&)>
+ CreateVideoEncodeMemoryCallback;
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/cast_defines.h b/chromium/media/cast/cast_defines.h
index aad7ae2b1d8..64b20c96da6 100644
--- a/chromium/media/cast/cast_defines.h
+++ b/chromium/media/cast/cast_defines.h
@@ -5,6 +5,8 @@
#ifndef MEDIA_CAST_CAST_DEFINES_H_
#define MEDIA_CAST_CAST_DEFINES_H_
+#include <stdint.h>
+
#include <map>
#include <set>
@@ -12,6 +14,7 @@
#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/time/time.h"
+#include "media/cast/transport/cast_transport_config.h"
namespace media {
namespace cast {
@@ -19,16 +22,33 @@ namespace cast {
const int64 kDontShowTimeoutMs = 33;
const float kDefaultCongestionControlBackOff = 0.875f;
const uint32 kVideoFrequency = 90000;
-const int64 kSkippedFramesCheckPeriodkMs = 10000;
-const uint32 kStartFrameId = GG_UINT32_C(0xffffffff);
+const uint32 kStartFrameId = UINT32_C(0xffffffff);
+
+// This is an important system-wide constant. This limits how much history the
+// implementation must retain in order to process the acknowledgements of past
+// frames.
+const int kMaxUnackedFrames = 255;
-// Number of skipped frames threshold in fps (as configured) per period above.
-const int kSkippedFramesThreshold = 3;
-const size_t kIpPacketSize = 1500;
+const size_t kMaxIpPacketSize = 1500;
const int kStartRttMs = 20;
const int64 kCastMessageUpdateIntervalMs = 33;
const int64 kNackRepeatIntervalMs = 30;
+enum CastInitializationStatus {
+ STATUS_AUDIO_UNINITIALIZED,
+ STATUS_VIDEO_UNINITIALIZED,
+ STATUS_AUDIO_INITIALIZED,
+ STATUS_VIDEO_INITIALIZED,
+ STATUS_INVALID_CAST_ENVIRONMENT,
+ STATUS_INVALID_CRYPTO_CONFIGURATION,
+ STATUS_UNSUPPORTED_AUDIO_CODEC,
+ STATUS_UNSUPPORTED_VIDEO_CODEC,
+ STATUS_INVALID_AUDIO_CONFIGURATION,
+ STATUS_INVALID_VIDEO_CONFIGURATION,
+ STATUS_GPU_ACCELERATION_NOT_SUPPORTED,
+ STATUS_GPU_ACCELERATION_ERROR,
+};
+
enum DefaultSettings {
kDefaultAudioEncoderBitrate = 0, // This means "auto," and may mean VBR.
kDefaultAudioSamplingRate = 48000,
@@ -41,17 +61,29 @@ enum DefaultSettings {
kDefaultRtpMaxDelayMs = 100,
};
+enum PacketType {
+ kNewPacket,
+ kNewPacketCompletingFrame,
+ kDuplicatePacket,
+ kTooOldPacket,
+};
+
+// kRtcpCastAllPacketsLost is used in PacketIDSet and
+// on the wire to mean that ALL packets for a particular
+// frame are lost.
const uint16 kRtcpCastAllPacketsLost = 0xffff;
+// kRtcpCastLastPacket is used in PacketIDSet to ask for
+// the last packet of a frame to be retransmitted.
+const uint16 kRtcpCastLastPacket = 0xfffe;
+
const size_t kMinLengthOfRtcp = 8;
// Basic RTP header + cast header.
const size_t kMinLengthOfRtp = 12 + 6;
-const size_t kAesBlockSize = 16;
-const size_t kAesKeySize = 16;
-
// Each uint16 represents one packet id within a cast frame.
+// Can also contain kRtcpCastAllPacketsLost and kRtcpCastLastPacket.
typedef std::set<uint16> PacketIdSet;
// Each uint8 represents one cast frame.
typedef std::map<uint8, PacketIdSet> MissingFramesAndPacketsMap;
@@ -62,20 +94,26 @@ typedef std::map<uint8, PacketIdSet> MissingFramesAndPacketsMap;
// January 1970, in NTP seconds.
// Network Time Protocol (NTP), which is in seconds relative to 0h UTC on
// 1 January 1900.
-static const int64 kUnixEpochInNtpSeconds = GG_INT64_C(2208988800);
+static const int64 kUnixEpochInNtpSeconds = INT64_C(2208988800);
// Magic fractional unit. Used to convert time (in microseconds) to/from
// fractional NTP seconds.
static const double kMagicFractionalUnit = 4.294967296E3;
+// The maximum number of Cast receiver events to keep in history for the
+// purpose of sending the events through RTCP.
+// The number chosen should be more than the number of events that can be
+// stored in a RTCP packet.
+static const size_t kReceiverRtcpEventHistorySize = 512;
+
inline bool IsNewerFrameId(uint32 frame_id, uint32 prev_frame_id) {
return (frame_id != prev_frame_id) &&
- static_cast<uint32>(frame_id - prev_frame_id) < 0x80000000;
+ static_cast<uint32>(frame_id - prev_frame_id) < 0x80000000;
}
inline bool IsNewerRtpTimestamp(uint32 timestamp, uint32 prev_timestamp) {
return (timestamp != prev_timestamp) &&
- static_cast<uint32>(timestamp - prev_timestamp) < 0x80000000;
+ static_cast<uint32>(timestamp - prev_timestamp) < 0x80000000;
}
inline bool IsOlderFrameId(uint32 frame_id, uint32 prev_frame_id) {
@@ -84,7 +122,7 @@ inline bool IsOlderFrameId(uint32 frame_id, uint32 prev_frame_id) {
inline bool IsNewerPacketId(uint16 packet_id, uint16 prev_packet_id) {
return (packet_id != prev_packet_id) &&
- static_cast<uint16>(packet_id - prev_packet_id) < 0x8000;
+ static_cast<uint16>(packet_id - prev_packet_id) < 0x8000;
}
inline bool IsNewerSequenceNumber(uint16 sequence_number,
@@ -107,13 +145,22 @@ inline base::TimeDelta ConvertFromNtpDiff(uint32 ntp_delay) {
return base::TimeDelta::FromMilliseconds(delay_ms);
}
-inline void ConvertTimeToFractions(int64 time_us,
+inline void ConvertTimeToFractions(int64 ntp_time_us,
uint32* seconds,
uint32* fractions) {
- DCHECK_GE(time_us, 0) << "Time must NOT be negative";
- *seconds = static_cast<uint32>(time_us / base::Time::kMicrosecondsPerSecond);
+ DCHECK_GE(ntp_time_us, 0) << "Time must NOT be negative";
+ const int64 seconds_component =
+ ntp_time_us / base::Time::kMicrosecondsPerSecond;
+ // NTP time will overflow in the year 2036. Also, make sure unit tests don't
+ // regress and use an origin past the year 2036. If this overflows here, the
+ // inverse calculation fails to compute the correct TimeTicks value, throwing
+ // off the entire system.
+ DCHECK_LT(seconds_component, INT64_C(4263431296))
+ << "One year left to fix the NTP year 2036 wrap-around issue!";
+ *seconds = static_cast<uint32>(seconds_component);
*fractions = static_cast<uint32>(
- (time_us % base::Time::kMicrosecondsPerSecond) * kMagicFractionalUnit);
+ (ntp_time_us % base::Time::kMicrosecondsPerSecond) *
+ kMagicFractionalUnit);
}
inline void ConvertTimeTicksToNtp(const base::TimeTicks& time,
@@ -122,7 +169,8 @@ inline void ConvertTimeTicksToNtp(const base::TimeTicks& time,
base::TimeDelta elapsed_since_unix_epoch =
time - base::TimeTicks::UnixEpoch();
- int64 ntp_time_us = elapsed_since_unix_epoch.InMicroseconds() +
+ int64 ntp_time_us =
+ elapsed_since_unix_epoch.InMicroseconds() +
(kUnixEpochInNtpSeconds * base::Time::kMicrosecondsPerSecond);
ConvertTimeToFractions(ntp_time_us, ntp_seconds, ntp_fractions);
@@ -130,30 +178,19 @@ inline void ConvertTimeTicksToNtp(const base::TimeTicks& time,
inline base::TimeTicks ConvertNtpToTimeTicks(uint32 ntp_seconds,
uint32 ntp_fractions) {
- int64 ntp_time_us = static_cast<int64>(ntp_seconds) *
- base::Time::kMicrosecondsPerSecond +
- static_cast<int64>(ntp_fractions) / kMagicFractionalUnit;
+ int64 ntp_time_us =
+ static_cast<int64>(ntp_seconds) * base::Time::kMicrosecondsPerSecond +
+ static_cast<int64>(ntp_fractions) / kMagicFractionalUnit;
- base::TimeDelta elapsed_since_unix_epoch =
- base::TimeDelta::FromMicroseconds(ntp_time_us -
- (kUnixEpochInNtpSeconds * base::Time::kMicrosecondsPerSecond));
+ base::TimeDelta elapsed_since_unix_epoch = base::TimeDelta::FromMicroseconds(
+ ntp_time_us -
+ (kUnixEpochInNtpSeconds * base::Time::kMicrosecondsPerSecond));
return base::TimeTicks::UnixEpoch() + elapsed_since_unix_epoch;
}
-inline std::string GetAesNonce(uint32 frame_id, const std::string& iv_mask) {
- std::string aes_nonce(kAesBlockSize, 0);
-
- // Serializing frame_id in big-endian order (aes_nonce[8] is the most
- // significant byte of frame_id).
- aes_nonce[11] = frame_id & 0xff;
- aes_nonce[10] = (frame_id >> 8) & 0xff;
- aes_nonce[9] = (frame_id >> 16) & 0xff;
- aes_nonce[8] = (frame_id >> 24) & 0xff;
-
- for (size_t i = 0; i < kAesBlockSize; ++i) {
- aes_nonce[i] ^= iv_mask[i];
- }
- return aes_nonce;
+inline base::TimeDelta RtpDeltaToTimeDelta(int64 rtp_delta, int rtp_timebase) {
+ DCHECK_GT(rtp_timebase, 0);
+ return rtp_delta * base::TimeDelta::FromSeconds(1) / rtp_timebase;
}
inline uint32 GetVideoRtpTimestamp(const base::TimeTicks& time_ticks) {
diff --git a/chromium/media/cast/cast_environment.cc b/chromium/media/cast/cast_environment.cc
index be636bb253d..93eb8c72522 100644
--- a/chromium/media/cast/cast_environment.cc
+++ b/chromium/media/cast/cast_environment.cc
@@ -4,65 +4,66 @@
#include "media/cast/cast_environment.h"
+#include "base/bind.h"
+#include "base/location.h"
#include "base/logging.h"
-using base::TaskRunner;
+using base::SingleThreadTaskRunner;
+
+namespace {
+
+void DeleteLoggingOnMainThread(scoped_ptr<media::cast::LoggingImpl> logging) {
+ logging.reset();
+}
+
+} // namespace
namespace media {
namespace cast {
CastEnvironment::CastEnvironment(
- base::TickClock* clock,
- scoped_refptr<TaskRunner> main_thread_proxy,
- scoped_refptr<TaskRunner> audio_encode_thread_proxy,
- scoped_refptr<TaskRunner> audio_decode_thread_proxy,
- scoped_refptr<TaskRunner> video_encode_thread_proxy,
- scoped_refptr<TaskRunner> video_decode_thread_proxy,
- const CastLoggingConfig& config)
- : clock_(clock),
- main_thread_proxy_(main_thread_proxy),
- audio_encode_thread_proxy_(audio_encode_thread_proxy),
- audio_decode_thread_proxy_(audio_decode_thread_proxy),
- video_encode_thread_proxy_(video_encode_thread_proxy),
- video_decode_thread_proxy_(video_decode_thread_proxy),
- logging_(new LoggingImpl(clock, main_thread_proxy, config)) {
- DCHECK(main_thread_proxy) << "Main thread required";
-}
+ scoped_ptr<base::TickClock> clock,
+ scoped_refptr<SingleThreadTaskRunner> main_thread_proxy,
+ scoped_refptr<SingleThreadTaskRunner> audio_thread_proxy,
+ scoped_refptr<SingleThreadTaskRunner> video_thread_proxy)
+ : main_thread_proxy_(main_thread_proxy),
+ audio_thread_proxy_(audio_thread_proxy),
+ video_thread_proxy_(video_thread_proxy),
+ clock_(clock.Pass()),
+ logging_(new LoggingImpl) {}
-CastEnvironment::~CastEnvironment() {}
+CastEnvironment::~CastEnvironment() {
+ // Logging must be deleted on the main thread.
+ if (main_thread_proxy_ && !main_thread_proxy_->RunsTasksOnCurrentThread()) {
+ main_thread_proxy_->PostTask(
+ FROM_HERE,
+ base::Bind(&DeleteLoggingOnMainThread, base::Passed(&logging_)));
+ }
+}
bool CastEnvironment::PostTask(ThreadId identifier,
- const tracked_objects::Location& from_here,
- const base::Closure& task) {
- scoped_refptr<TaskRunner> task_runner =
- GetMessageTaskRunnerForThread(identifier);
-
- return task_runner->PostTask(from_here, task);
+ const tracked_objects::Location& from_here,
+ const base::Closure& task) {
+ return GetTaskRunner(identifier)->PostTask(from_here, task);
}
-bool CastEnvironment::PostDelayedTask(ThreadId identifier,
- const tracked_objects::Location& from_here,
- const base::Closure& task,
- base::TimeDelta delay) {
- scoped_refptr<TaskRunner> task_runner =
- GetMessageTaskRunnerForThread(identifier);
-
- return task_runner->PostDelayedTask(from_here, task, delay);
+bool CastEnvironment::PostDelayedTask(
+ ThreadId identifier,
+ const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ base::TimeDelta delay) {
+ return GetTaskRunner(identifier)->PostDelayedTask(from_here, task, delay);
}
-scoped_refptr<TaskRunner> CastEnvironment::GetMessageTaskRunnerForThread(
- ThreadId identifier) {
+scoped_refptr<SingleThreadTaskRunner> CastEnvironment::GetTaskRunner(
+ ThreadId identifier) const {
switch (identifier) {
case CastEnvironment::MAIN:
return main_thread_proxy_;
- case CastEnvironment::AUDIO_ENCODER:
- return audio_encode_thread_proxy_;
- case CastEnvironment::AUDIO_DECODER:
- return audio_decode_thread_proxy_;
- case CastEnvironment::VIDEO_ENCODER:
- return video_encode_thread_proxy_;
- case CastEnvironment::VIDEO_DECODER:
- return video_decode_thread_proxy_;
+ case CastEnvironment::AUDIO:
+ return audio_thread_proxy_;
+ case CastEnvironment::VIDEO:
+ return video_thread_proxy_;
default:
NOTREACHED() << "Invalid Thread identifier";
return NULL;
@@ -72,30 +73,19 @@ scoped_refptr<TaskRunner> CastEnvironment::GetMessageTaskRunnerForThread(
bool CastEnvironment::CurrentlyOn(ThreadId identifier) {
switch (identifier) {
case CastEnvironment::MAIN:
- return main_thread_proxy_->RunsTasksOnCurrentThread();
- case CastEnvironment::AUDIO_ENCODER:
- return audio_encode_thread_proxy_->RunsTasksOnCurrentThread();
- case CastEnvironment::AUDIO_DECODER:
- return audio_decode_thread_proxy_->RunsTasksOnCurrentThread();
- case CastEnvironment::VIDEO_ENCODER:
- return video_encode_thread_proxy_->RunsTasksOnCurrentThread();
- case CastEnvironment::VIDEO_DECODER:
- return video_decode_thread_proxy_->RunsTasksOnCurrentThread();
+ return main_thread_proxy_ &&
+ main_thread_proxy_->RunsTasksOnCurrentThread();
+ case CastEnvironment::AUDIO:
+ return audio_thread_proxy_ &&
+ audio_thread_proxy_->RunsTasksOnCurrentThread();
+ case CastEnvironment::VIDEO:
+ return video_thread_proxy_ &&
+ video_thread_proxy_->RunsTasksOnCurrentThread();
default:
NOTREACHED() << "Invalid thread identifier";
return false;
}
}
-base::TickClock* CastEnvironment::Clock() const {
- return clock_;
-}
-
-LoggingImpl* CastEnvironment::Logging() {
- DCHECK(CurrentlyOn(CastEnvironment::MAIN)) <<
- "Must be called from main thread";
- return logging_.get();
-}
-
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/cast_environment.h b/chromium/media/cast/cast_environment.h
index 8a135733c04..1549747ee22 100644
--- a/chromium/media/cast/cast_environment.h
+++ b/chromium/media/cast/cast_environment.h
@@ -8,7 +8,7 @@
#include "base/basictypes.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
-#include "base/task_runner.h"
+#include "base/single_thread_task_runner.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
#include "media/cast/logging/logging_defines.h"
@@ -24,25 +24,18 @@ class CastEnvironment : public base::RefCountedThreadSafe<CastEnvironment> {
// The main thread is where the cast system is configured and where timers
// and network IO is performed.
MAIN,
- // The audio encoder thread is where all send side audio processing is done,
- // primarily encoding but also re-sampling.
- AUDIO_ENCODER,
- // The audio decoder thread is where all receive side audio processing is
- // done, primarily decoding but also error concealment and re-sampling.
- AUDIO_DECODER,
- // The video encoder thread is where the video encode processing is done.
- VIDEO_ENCODER,
- // The video decoder thread is where the video decode processing is done.
- VIDEO_DECODER,
+ // The audio thread is where all send side audio processing is done,
+ // primarily encoding / decoding but also re-sampling.
+ AUDIO,
+ // The video encoder thread is where the video processing is done.
+ VIDEO,
};
- CastEnvironment(base::TickClock* clock,
- scoped_refptr<base::TaskRunner> main_thread_proxy,
- scoped_refptr<base::TaskRunner> audio_encode_thread_proxy,
- scoped_refptr<base::TaskRunner> audio_decode_thread_proxy,
- scoped_refptr<base::TaskRunner> video_encode_thread_proxy,
- scoped_refptr<base::TaskRunner> video_decode_thread_proxy,
- const CastLoggingConfig& config);
+ CastEnvironment(
+ scoped_ptr<base::TickClock> clock,
+ scoped_refptr<base::SingleThreadTaskRunner> main_thread_proxy,
+ scoped_refptr<base::SingleThreadTaskRunner> audio_thread_proxy,
+ scoped_refptr<base::SingleThreadTaskRunner> video_thread_proxy);
// These are the same methods in message_loop.h, but are guaranteed to either
// get posted to the MessageLoop if it's still alive, or be deleted otherwise.
@@ -60,29 +53,38 @@ class CastEnvironment : public base::RefCountedThreadSafe<CastEnvironment> {
bool CurrentlyOn(ThreadId identifier);
- base::TickClock* Clock() const;
+ // All of the media::cast implementation must use this TickClock.
+ base::TickClock* Clock() const { return clock_.get(); }
- // Logging is not thread safe. Should always be called from the main thread.
- LoggingImpl* Logging();
+ // Logging is not thread safe. Its methods should always be called from the
+ // main thread.
+ // TODO(hubbe): Logging should be a thread-safe interface.
+ LoggingImpl* Logging() const { return logging_.get(); }
- protected:
- virtual ~CastEnvironment();
+ scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner(
+ ThreadId identifier) const;
- private:
- friend class base::RefCountedThreadSafe<CastEnvironment>;
+ bool HasAudioThread() {
+ return audio_thread_proxy_ ? true : false;
+ }
- scoped_refptr<base::TaskRunner> GetMessageTaskRunnerForThread(
- ThreadId identifier);
+ bool HasVideoThread() {
+ return video_thread_proxy_ ? true : false;
+ }
- base::TickClock* const clock_; // Not owned by this class.
- scoped_refptr<base::TaskRunner> main_thread_proxy_;
- scoped_refptr<base::TaskRunner> audio_encode_thread_proxy_;
- scoped_refptr<base::TaskRunner> audio_decode_thread_proxy_;
- scoped_refptr<base::TaskRunner> video_encode_thread_proxy_;
- scoped_refptr<base::TaskRunner> video_decode_thread_proxy_;
+ protected:
+ virtual ~CastEnvironment();
+ // Subclasses may override these.
+ scoped_refptr<base::SingleThreadTaskRunner> main_thread_proxy_;
+ scoped_refptr<base::SingleThreadTaskRunner> audio_thread_proxy_;
+ scoped_refptr<base::SingleThreadTaskRunner> video_thread_proxy_;
+ scoped_ptr<base::TickClock> clock_;
scoped_ptr<LoggingImpl> logging_;
+ private:
+ friend class base::RefCountedThreadSafe<CastEnvironment>;
+
DISALLOW_COPY_AND_ASSIGN(CastEnvironment);
};
diff --git a/chromium/media/cast/cast_receiver.gyp b/chromium/media/cast/cast_receiver.gyp
deleted file mode 100644
index 031aec7e16a..00000000000
--- a/chromium/media/cast/cast_receiver.gyp
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'includes': [
- 'audio_receiver/audio_receiver.gypi',
- 'video_receiver/video_receiver.gypi',
- ],
- 'targets': [
- {
- 'target_name': 'cast_receiver',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- '<(DEPTH)/third_party/',
- '<(DEPTH)/third_party/webrtc/',
- ],
- 'sources': [
- 'cast_receiver.h',
- 'cast_receiver_impl.cc',
- 'cast_receiver_impl.h',
- ], # source
- 'dependencies': [
- '<(DEPTH)/crypto/crypto.gyp:crypto',
- 'cast_audio_receiver',
- 'cast_video_receiver',
- 'net/pacing/paced_sender.gyp:cast_paced_sender',
- 'rtp_receiver/rtp_receiver.gyp:cast_rtp_receiver',
- ],
- },
- ],
-}
diff --git a/chromium/media/cast/cast_receiver.h b/chromium/media/cast/cast_receiver.h
index 75e6f68d3bb..a9d3edeb78e 100644
--- a/chromium/media/cast/cast_receiver.h
+++ b/chromium/media/cast/cast_receiver.h
@@ -13,71 +13,71 @@
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
+#include "media/base/audio_bus.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
namespace media {
class VideoFrame;
-}
-namespace media {
namespace cast {
-// Callback in which the raw audio frame and play-out time will be returned
-// once decoding is complete.
-typedef base::Callback<void(scoped_ptr<PcmAudioFrame>, const base::TimeTicks&)>
- AudioFrameDecodedCallback;
-// Callback in which the encoded audio frame and play-out time will be returned.
-typedef base::Callback<void(scoped_ptr<EncodedAudioFrame>,
- const base::TimeTicks&)> AudioFrameEncodedCallback;
+namespace transport {
+class PacketSender;
+}
-// Callback in which the raw frame and render time will be returned once
-// decoding is complete.
+// The following callbacks are used to deliver decoded audio/video frame data,
+// the frame's corresponding play-out time, and a continuity flag.
+// |is_continuous| will be false to indicate the loss of data due to a loss of
+// frames (or decoding errors). This allows the client to take steps to smooth
+// discontinuities for playback. Note: A NULL pointer can be returned when data
+// is not available (e.g., bad/missing packet).
+typedef base::Callback<void(scoped_ptr<AudioBus> audio_bus,
+ const base::TimeTicks& playout_time,
+ bool is_continuous)> AudioFrameDecodedCallback;
+// TODO(miu): |video_frame| includes a timestamp, so use that instead.
typedef base::Callback<void(const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks&)>
- VideoFrameDecodedCallback;
-
-// Callback in which the encoded video frame and render time will be returned.
-typedef base::Callback<void(scoped_ptr<EncodedVideoFrame>,
- const base::TimeTicks&)> VideoFrameEncodedCallback;
-
-// This Class is thread safe.
-class FrameReceiver : public base::RefCountedThreadSafe<FrameReceiver> {
- public:
- virtual void GetRawAudioFrame(int number_of_10ms_blocks,
- int desired_frequency,
- const AudioFrameDecodedCallback& callback) = 0;
-
- virtual void GetCodedAudioFrame(
- const AudioFrameEncodedCallback& callback) = 0;
+ const base::TimeTicks& playout_time,
+ bool is_continuous)> VideoFrameDecodedCallback;
- virtual void GetRawVideoFrame(const VideoFrameDecodedCallback& callback) = 0;
+// The following callback delivers encoded frame data and metadata. The client
+// should examine the |frame_id| field to determine whether any frames have been
+// dropped (i.e., frame_id should be incrementing by one each time). Note: A
+// NULL pointer can be returned on error.
+typedef base::Callback<void(scoped_ptr<transport::EncodedFrame>)>
+ ReceiveEncodedFrameCallback;
- virtual void GetEncodedVideoFrame(
- const VideoFrameEncodedCallback& callback) = 0;
-
- protected:
- virtual ~FrameReceiver() {}
-
- private:
- friend class base::RefCountedThreadSafe<FrameReceiver>;
-};
-
-// This Class is thread safe.
class CastReceiver {
public:
- static CastReceiver* CreateCastReceiver(
+ static scoped_ptr<CastReceiver> Create(
scoped_refptr<CastEnvironment> cast_environment,
- const AudioReceiverConfig& audio_config,
- const VideoReceiverConfig& video_config,
- PacketSender* const packet_sender);
-
- // All received RTP and RTCP packets for the call should be inserted to this
- // PacketReceiver.
- virtual scoped_refptr<PacketReceiver> packet_receiver() = 0;
-
- // Polling interface to get audio and video frames from the CastReceiver.
- virtual scoped_refptr<FrameReceiver> frame_receiver() = 0;
+ const FrameReceiverConfig& audio_config,
+ const FrameReceiverConfig& video_config,
+ transport::PacketSender* const packet_sender);
+
+ // All received RTP and RTCP packets for the call should be sent to this
+ // PacketReceiver. Can be called from any thread.
+ // TODO(hubbe): Replace with:
+ // virtual void ReceivePacket(scoped_ptr<Packet> packet) = 0;
+ virtual transport::PacketReceiverCallback packet_receiver() = 0;
+
+ // Polling interface to get audio and video frames from the CastReceiver. The
+ // the RequestDecodedXXXXXFrame() methods utilize internal software-based
+ // decoding, while the RequestEncodedXXXXXFrame() methods provides
+ // still-encoded frames for use with external/hardware decoders.
+ //
+ // In all cases, the given |callback| is guaranteed to be run at some point in
+ // the future, except for those requests still enqueued at destruction time.
+ //
+ // These methods should all be called on the CastEnvironment's MAIN thread.
+ virtual void RequestDecodedAudioFrame(
+ const AudioFrameDecodedCallback& callback) = 0;
+ virtual void RequestEncodedAudioFrame(
+ const ReceiveEncodedFrameCallback& callback) = 0;
+ virtual void RequestDecodedVideoFrame(
+ const VideoFrameDecodedCallback& callback) = 0;
+ virtual void RequestEncodedVideoFrame(
+ const ReceiveEncodedFrameCallback& callback) = 0;
virtual ~CastReceiver() {}
};
diff --git a/chromium/media/cast/cast_receiver_impl.cc b/chromium/media/cast/cast_receiver_impl.cc
deleted file mode 100644
index e2c004fe963..00000000000
--- a/chromium/media/cast/cast_receiver_impl.cc
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/cast_receiver_impl.h"
-
-#include "base/bind.h"
-#include "base/callback.h"
-#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
-
-namespace media {
-namespace cast {
-
-// The video and audio receivers should only be called from the main thread.
-// LocalFrameReciever posts tasks to the main thread, making the cast interface
-// thread safe.
-class LocalFrameReceiver : public FrameReceiver {
- public:
- LocalFrameReceiver(scoped_refptr<CastEnvironment> cast_environment,
- AudioReceiver* audio_receiver,
- VideoReceiver* video_receiver)
- : cast_environment_(cast_environment),
- audio_receiver_(audio_receiver),
- video_receiver_(video_receiver) {}
-
- virtual void GetRawVideoFrame(
- const VideoFrameDecodedCallback& callback) OVERRIDE {
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&VideoReceiver::GetRawVideoFrame,
- video_receiver_->AsWeakPtr(), callback));
- }
-
- virtual void GetEncodedVideoFrame(
- const VideoFrameEncodedCallback& callback) OVERRIDE {
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&VideoReceiver::GetEncodedVideoFrame,
- video_receiver_->AsWeakPtr(), callback));
- }
-
- virtual void GetRawAudioFrame(
- int number_of_10ms_blocks,
- int desired_frequency,
- const AudioFrameDecodedCallback& callback) OVERRIDE {
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, base::Bind(
- &AudioReceiver::GetRawAudioFrame, audio_receiver_->AsWeakPtr(),
- number_of_10ms_blocks, desired_frequency, callback));
- }
-
- virtual void GetCodedAudioFrame(
- const AudioFrameEncodedCallback& callback) OVERRIDE {
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&AudioReceiver::GetEncodedAudioFrame,
- audio_receiver_->AsWeakPtr(), callback));
- }
-
- protected:
- virtual ~LocalFrameReceiver() {}
-
- private:
- friend class base::RefCountedThreadSafe<LocalFrameReceiver>;
-
- scoped_refptr<CastEnvironment> cast_environment_;
- AudioReceiver* audio_receiver_;
- VideoReceiver* video_receiver_;
-};
-
-// The video and audio receivers should only be called from the main thread.
-class LocalPacketReceiver : public PacketReceiver {
- public:
- LocalPacketReceiver(scoped_refptr<CastEnvironment> cast_environment,
- AudioReceiver* audio_receiver,
- VideoReceiver* video_receiver,
- uint32 ssrc_of_audio_sender,
- uint32 ssrc_of_video_sender)
- : cast_environment_(cast_environment),
- audio_receiver_(audio_receiver),
- video_receiver_(video_receiver),
- ssrc_of_audio_sender_(ssrc_of_audio_sender),
- ssrc_of_video_sender_(ssrc_of_video_sender) {}
-
- virtual void ReceivedPacket(const uint8* packet,
- size_t length,
- const base::Closure callback) OVERRIDE {
- if (length < kMinLengthOfRtcp) {
- // No action; just log and call the callback informing that we are done
- // with the packet.
- VLOG(1) << "Received a packet which is too short " << length;
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
- return;
- }
- uint32 ssrc_of_sender;
- if (!Rtcp::IsRtcpPacket(packet, length)) {
- if (length < kMinLengthOfRtp) {
- // No action; just log and call the callback informing that we are done
- // with the packet.
- VLOG(1) << "Received a RTP packet which is too short " << length;
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
- return;
- }
- ssrc_of_sender = RtpReceiver::GetSsrcOfSender(packet, length);
- } else {
- ssrc_of_sender = Rtcp::GetSsrcOfSender(packet, length);
- }
- if (ssrc_of_sender == ssrc_of_audio_sender_) {
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&AudioReceiver::IncomingPacket,
- audio_receiver_->AsWeakPtr(), packet, length, callback));
- } else if (ssrc_of_sender == ssrc_of_video_sender_) {
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&VideoReceiver::IncomingPacket,
- video_receiver_->AsWeakPtr(), packet, length, callback));
- } else {
- // No action; just log and call the callback informing that we are done
- // with the packet.
- VLOG(1) << "Received a packet with a non matching sender SSRC "
- << ssrc_of_sender;
-
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
- }
- }
-
- protected:
- virtual ~LocalPacketReceiver() {}
-
- private:
- friend class base::RefCountedThreadSafe<LocalPacketReceiver>;
-
- scoped_refptr<CastEnvironment> cast_environment_;
- AudioReceiver* audio_receiver_;
- VideoReceiver* video_receiver_;
- const uint32 ssrc_of_audio_sender_;
- const uint32 ssrc_of_video_sender_;
-};
-
-CastReceiver* CastReceiver::CreateCastReceiver(
- scoped_refptr<CastEnvironment> cast_environment,
- const AudioReceiverConfig& audio_config,
- const VideoReceiverConfig& video_config,
- PacketSender* const packet_sender) {
- return new CastReceiverImpl(cast_environment,
- audio_config,
- video_config,
- packet_sender);
-}
-
-CastReceiverImpl::CastReceiverImpl(
- scoped_refptr<CastEnvironment> cast_environment,
- const AudioReceiverConfig& audio_config,
- const VideoReceiverConfig& video_config,
- PacketSender* const packet_sender)
- : pacer_(cast_environment, packet_sender),
- audio_receiver_(cast_environment, audio_config, &pacer_),
- video_receiver_(cast_environment, video_config, &pacer_),
- frame_receiver_(new LocalFrameReceiver(cast_environment,
- &audio_receiver_,
- &video_receiver_)),
- packet_receiver_(new LocalPacketReceiver(cast_environment,
- &audio_receiver_,
- &video_receiver_,
- audio_config.incoming_ssrc,
- video_config.incoming_ssrc)) {}
-
-CastReceiverImpl::~CastReceiverImpl() {}
-
-scoped_refptr<PacketReceiver> CastReceiverImpl::packet_receiver() {
- return packet_receiver_;
-}
-
-scoped_refptr<FrameReceiver> CastReceiverImpl::frame_receiver() {
- return frame_receiver_;
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/cast_receiver_impl.h b/chromium/media/cast/cast_receiver_impl.h
deleted file mode 100644
index d34a3de6514..00000000000
--- a/chromium/media/cast/cast_receiver_impl.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_CAST_RECEIVER_IMPL_H_
-#define MEDIA_CAST_CAST_RECEIVER_IMPL_H_
-
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/cast/audio_receiver/audio_receiver.h"
-#include "media/cast/cast_config.h"
-#include "media/cast/cast_environment.h"
-#include "media/cast/cast_receiver.h"
-#include "media/cast/net/pacing/paced_sender.h"
-#include "media/cast/video_receiver/video_receiver.h"
-
-namespace media {
-namespace cast {
-
-// This calls is a pure owner class that group all required receive objects
-// together such as pacer, packet receiver, frame receiver, audio and video
-// receivers.
-class CastReceiverImpl : public CastReceiver {
- public:
- CastReceiverImpl(scoped_refptr<CastEnvironment> cast_environment,
- const AudioReceiverConfig& audio_config,
- const VideoReceiverConfig& video_config,
- PacketSender* const packet_sender);
-
- virtual ~CastReceiverImpl();
-
- // All received RTP and RTCP packets for the call should be inserted to this
- // PacketReceiver.
- virtual scoped_refptr<PacketReceiver> packet_receiver() OVERRIDE;
-
- // Interface to get audio and video frames from the CastReceiver.
- virtual scoped_refptr<FrameReceiver> frame_receiver() OVERRIDE;
-
- private:
- PacedSender pacer_;
- AudioReceiver audio_receiver_;
- VideoReceiver video_receiver_;
- scoped_refptr<FrameReceiver> frame_receiver_;
- scoped_refptr<PacketReceiver> packet_receiver_;
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_CAST_RECEIVER_IMPL_
diff --git a/chromium/media/cast/cast_sender.gyp b/chromium/media/cast/cast_sender.gyp
deleted file mode 100644
index 1f9b07e4a42..00000000000
--- a/chromium/media/cast/cast_sender.gyp
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'includes': [
- 'audio_sender/audio_sender.gypi',
- 'congestion_control/congestion_control.gypi',
- 'video_sender/video_sender.gypi',
- ],
- 'targets': [
- {
- 'target_name': 'cast_sender',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- '<(DEPTH)/third_party/',
- '<(DEPTH)/third_party/webrtc/',
- ],
- 'sources': [
- 'cast_sender.h',
- 'cast_sender_impl.cc',
- 'cast_sender_impl.h',
- ], # source
- 'dependencies': [
- '<(DEPTH)/crypto/crypto.gyp:crypto',
- 'audio_sender',
- 'congestion_control',
- 'net/pacing/paced_sender.gyp:cast_paced_sender',
- 'net/rtp_sender/rtp_sender.gyp:cast_rtp_sender',
- 'rtcp/rtcp.gyp:cast_rtcp',
- 'video_sender',
- ], # dependencies
- },
- ],
-}
diff --git a/chromium/media/cast/cast_sender.h b/chromium/media/cast/cast_sender.h
index abe22f56345..eb3327ff3df 100644
--- a/chromium/media/cast/cast_sender.h
+++ b/chromium/media/cast/cast_sender.h
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// This is the main interface for the cast sender. All configuration are done
-// at creation.
+// This is the main interface for the cast sender.
//
-// The FrameInput and PacketReciever interfaces should normally be accessed from
-// the IO thread. However they are allowed to be called from any thread.
+// The AudioFrameInput, VideoFrameInput and PacketReciever interfaces should
+// be accessed from the main thread.
#ifndef MEDIA_CAST_CAST_SENDER_H_
#define MEDIA_CAST_CAST_SENDER_H_
@@ -14,83 +13,83 @@
#include "base/basictypes.h"
#include "base/callback.h"
#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
+#include "media/base/audio_bus.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
+#include "media/cast/transport/cast_transport_sender.h"
namespace media {
-class AudioBus;
class VideoFrame;
-}
-namespace media {
namespace cast {
+class AudioSender;
+class VideoSender;
-// This Class is thread safe.
-class FrameInput : public base::RefCountedThreadSafe<FrameInput> {
+class VideoFrameInput : public base::RefCountedThreadSafe<VideoFrameInput> {
public:
- // The video_frame must be valid until the callback is called.
- // The callback is called from the main cast thread as soon as
- // the encoder is done with the frame; it does not mean that the encoded frame
- // has been sent out.
+ // Insert video frames into Cast sender. Frames will be encoded, packetized
+ // and sent to the network.
virtual void InsertRawVideoFrame(
const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& capture_time) = 0;
- // The video_frame must be valid until the callback is called.
- // The callback is called from the main cast thread as soon as
- // the cast sender is done with the frame; it does not mean that the encoded
- // frame has been sent out.
- virtual void InsertCodedVideoFrame(const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time,
- const base::Closure callback) = 0;
-
- // The |audio_bus| must be valid until the |done_callback| is called.
- // The callback is called from the main cast thread as soon as the encoder is
- // done with |audio_bus|; it does not mean that the encoded data has been
- // sent out.
- virtual void InsertAudio(const AudioBus* audio_bus,
- const base::TimeTicks& recorded_time,
- const base::Closure& done_callback) = 0;
-
- // The audio_frame must be valid until the callback is called.
- // The callback is called from the main cast thread as soon as
- // the cast sender is done with the frame; it does not mean that the encoded
- // frame has been sent out.
- virtual void InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time,
- const base::Closure callback) = 0;
+ protected:
+ virtual ~VideoFrameInput() {}
+
+ private:
+ friend class base::RefCountedThreadSafe<VideoFrameInput>;
+};
+
+class AudioFrameInput : public base::RefCountedThreadSafe<AudioFrameInput> {
+ public:
+ // Insert audio frames into Cast sender. Frames will be encoded, packetized
+ // and sent to the network.
+ virtual void InsertAudio(scoped_ptr<AudioBus> audio_bus,
+ const base::TimeTicks& recorded_time) = 0;
protected:
- virtual ~FrameInput() {}
+ virtual ~AudioFrameInput() {}
private:
- friend class base::RefCountedThreadSafe<FrameInput>;
+ friend class base::RefCountedThreadSafe<AudioFrameInput>;
};
-// This Class is thread safe.
-// The provided PacketSender object will always be called form the main cast
-// thread.
+// All methods of CastSender must be called on the main thread.
+// Provided CastTransportSender will also be called on the main thread.
class CastSender {
public:
- static CastSender* CreateCastSender(
+ static scoped_ptr<CastSender> Create(
scoped_refptr<CastEnvironment> cast_environment,
- const AudioSenderConfig& audio_config,
- const VideoSenderConfig& video_config,
- VideoEncoderController* const video_encoder_controller,
- PacketSender* const packet_sender);
+ transport::CastTransportSender* const transport_sender);
virtual ~CastSender() {}
- // All audio and video frames for the session should be inserted to this
- // object.
- // Can be called from any thread.
- virtual scoped_refptr<FrameInput> frame_input() = 0;
+ // All video frames for the session should be inserted to this object.
+ virtual scoped_refptr<VideoFrameInput> video_frame_input() = 0;
+
+ // All audio frames for the session should be inserted to this object.
+ virtual scoped_refptr<AudioFrameInput> audio_frame_input() = 0;
// All RTCP packets for the session should be inserted to this object.
- // Can be called from any thread.
- virtual scoped_refptr<PacketReceiver> packet_receiver() = 0;
+ // This function and the callback must be called on the main thread.
+ virtual transport::PacketReceiverCallback packet_receiver() = 0;
+
+ // Initialize the audio stack. Must be called in order to send audio frames.
+ // Status of the initialization will be returned on cast_initialization_cb.
+ virtual void InitializeAudio(
+ const AudioSenderConfig& audio_config,
+ const CastInitializationCallback& cast_initialization_cb) = 0;
+
+ // Initialize the video stack. Must be called in order to send video frames.
+ // Status of the initialization will be returned on cast_initialization_cb.
+ virtual void InitializeVideo(
+ const VideoSenderConfig& video_config,
+ const CastInitializationCallback& cast_initialization_cb,
+ const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
+ const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb) = 0;
};
} // namespace cast
diff --git a/chromium/media/cast/cast_sender_impl.cc b/chromium/media/cast/cast_sender_impl.cc
index 69ebd53c6bd..361e4d8dc12 100644
--- a/chromium/media/cast/cast_sender_impl.cc
+++ b/chromium/media/cast/cast_sender_impl.cc
@@ -12,62 +12,136 @@
namespace media {
namespace cast {
-// The LocalFrameInput class posts all incoming frames; audio and video to the
-// main cast thread for processing.
-// This make the cast sender interface thread safe.
-class LocalFrameInput : public FrameInput {
+// The LocalVideoFrameInput class posts all incoming video frames to the main
+// cast thread for processing.
+class LocalVideoFrameInput : public VideoFrameInput {
public:
- LocalFrameInput(scoped_refptr<CastEnvironment> cast_environment,
- base::WeakPtr<AudioSender> audio_sender,
- base::WeakPtr<VideoSender> video_sender)
- : cast_environment_(cast_environment),
- audio_sender_(audio_sender),
- video_sender_(video_sender) {}
+ LocalVideoFrameInput(scoped_refptr<CastEnvironment> cast_environment,
+ base::WeakPtr<VideoSender> video_sender)
+ : cast_environment_(cast_environment), video_sender_(video_sender) {}
virtual void InsertRawVideoFrame(
const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& capture_time) OVERRIDE {
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&VideoSender::InsertRawVideoFrame, video_sender_,
- video_frame, capture_time));
+ cast_environment_->PostTask(CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(&VideoSender::InsertRawVideoFrame,
+ video_sender_,
+ video_frame,
+ capture_time));
}
- virtual void InsertCodedVideoFrame(const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time,
- const base::Closure callback) OVERRIDE {
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&VideoSender::InsertCodedVideoFrame, video_sender_,
- video_frame, capture_time, callback));
- }
+ protected:
+ virtual ~LocalVideoFrameInput() {}
- virtual void InsertAudio(const AudioBus* audio_bus,
- const base::TimeTicks& recorded_time,
- const base::Closure& done_callback) OVERRIDE {
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&AudioSender::InsertAudio, audio_sender_,
- audio_bus, recorded_time, done_callback));
- }
+ private:
+ friend class base::RefCountedThreadSafe<LocalVideoFrameInput>;
+
+ scoped_refptr<CastEnvironment> cast_environment_;
+ base::WeakPtr<VideoSender> video_sender_;
- virtual void InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time,
- const base::Closure callback) OVERRIDE {
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&AudioSender::InsertCodedAudioFrame, audio_sender_,
- audio_frame, recorded_time, callback));
+ DISALLOW_COPY_AND_ASSIGN(LocalVideoFrameInput);
+};
+
+// The LocalAudioFrameInput class posts all incoming audio frames to the main
+// cast thread for processing. Therefore frames can be inserted from any thread.
+class LocalAudioFrameInput : public AudioFrameInput {
+ public:
+ LocalAudioFrameInput(scoped_refptr<CastEnvironment> cast_environment,
+ base::WeakPtr<AudioSender> audio_sender)
+ : cast_environment_(cast_environment), audio_sender_(audio_sender) {}
+
+ virtual void InsertAudio(scoped_ptr<AudioBus> audio_bus,
+ const base::TimeTicks& recorded_time) OVERRIDE {
+ cast_environment_->PostTask(CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(&AudioSender::InsertAudio,
+ audio_sender_,
+ base::Passed(&audio_bus),
+ recorded_time));
}
protected:
- virtual ~LocalFrameInput() {}
+ virtual ~LocalAudioFrameInput() {}
private:
- friend class base::RefCountedThreadSafe<LocalFrameInput>;
+ friend class base::RefCountedThreadSafe<LocalAudioFrameInput>;
scoped_refptr<CastEnvironment> cast_environment_;
base::WeakPtr<AudioSender> audio_sender_;
- base::WeakPtr<VideoSender> video_sender_;
+
+ DISALLOW_COPY_AND_ASSIGN(LocalAudioFrameInput);
};
-// LocalCastSenderPacketReceiver handle the incoming packets to the cast sender
+scoped_ptr<CastSender> CastSender::Create(
+ scoped_refptr<CastEnvironment> cast_environment,
+ transport::CastTransportSender* const transport_sender) {
+ CHECK(cast_environment);
+ return scoped_ptr<CastSender>(
+ new CastSenderImpl(cast_environment, transport_sender));
+}
+
+CastSenderImpl::CastSenderImpl(
+ scoped_refptr<CastEnvironment> cast_environment,
+ transport::CastTransportSender* const transport_sender)
+ : cast_environment_(cast_environment),
+ transport_sender_(transport_sender),
+ weak_factory_(this) {
+ CHECK(cast_environment);
+}
+
+void CastSenderImpl::InitializeAudio(
+ const AudioSenderConfig& audio_config,
+ const CastInitializationCallback& cast_initialization_cb) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ CHECK(audio_config.use_external_encoder ||
+ cast_environment_->HasAudioThread());
+
+ VLOG(1) << "CastSenderImpl@" << this << "::InitializeAudio()";
+
+ audio_sender_.reset(
+ new AudioSender(cast_environment_, audio_config, transport_sender_));
+
+ const CastInitializationStatus status = audio_sender_->InitializationResult();
+ if (status == STATUS_AUDIO_INITIALIZED) {
+ ssrc_of_audio_sender_ = audio_config.incoming_feedback_ssrc;
+ audio_frame_input_ =
+ new LocalAudioFrameInput(cast_environment_, audio_sender_->AsWeakPtr());
+ }
+ cast_initialization_cb.Run(status);
+}
+
+void CastSenderImpl::InitializeVideo(
+ const VideoSenderConfig& video_config,
+ const CastInitializationCallback& cast_initialization_cb,
+ const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
+ const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ CHECK(video_config.use_external_encoder ||
+ cast_environment_->HasVideoThread());
+
+ VLOG(1) << "CastSenderImpl@" << this << "::InitializeVideo()";
+
+ video_sender_.reset(new VideoSender(cast_environment_,
+ video_config,
+ create_vea_cb,
+ create_video_encode_mem_cb,
+ transport_sender_));
+
+ const CastInitializationStatus status = video_sender_->InitializationResult();
+ if (status == STATUS_VIDEO_INITIALIZED) {
+ ssrc_of_video_sender_ = video_config.incoming_feedback_ssrc;
+ video_frame_input_ =
+ new LocalVideoFrameInput(cast_environment_, video_sender_->AsWeakPtr());
+ }
+ cast_initialization_cb.Run(status);
+}
+
+CastSenderImpl::~CastSenderImpl() {
+ VLOG(1) << "CastSenderImpl@" << this << "::~CastSenderImpl()";
+}
+
+// ReceivedPacket handle the incoming packets to the cast sender
// it's only expected to receive RTCP feedback packets from the remote cast
// receiver. The class verifies that that it is a RTCP packet and based on the
// SSRC of the incoming packet route the packet to the correct sender; audio or
@@ -92,102 +166,54 @@ class LocalFrameInput : public FrameInput {
// generates multiple streams in one RTP session, for example from
// separate video cameras, each MUST be identified as a different
// SSRC.
-
-class LocalCastSenderPacketReceiver : public PacketReceiver {
- public:
- LocalCastSenderPacketReceiver(scoped_refptr<CastEnvironment> cast_environment,
- base::WeakPtr<AudioSender> audio_sender,
- base::WeakPtr<VideoSender> video_sender,
- uint32 ssrc_of_audio_sender,
- uint32 ssrc_of_video_sender)
- : cast_environment_(cast_environment),
- audio_sender_(audio_sender),
- video_sender_(video_sender),
- ssrc_of_audio_sender_(ssrc_of_audio_sender),
- ssrc_of_video_sender_(ssrc_of_video_sender) {}
-
- virtual void ReceivedPacket(const uint8* packet,
- size_t length,
- const base::Closure callback) OVERRIDE {
- if (!Rtcp::IsRtcpPacket(packet, length)) {
- // We should have no incoming RTP packets.
- // No action; just log and call the callback informing that we are done
- // with the packet.
- VLOG(1) << "Unexpectedly received a RTP packet in the cast sender";
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
+void CastSenderImpl::ReceivedPacket(scoped_ptr<Packet> packet) {
+ DCHECK(cast_environment_);
+ size_t length = packet->size();
+ const uint8_t* data = &packet->front();
+ if (!Rtcp::IsRtcpPacket(data, length)) {
+ VLOG(1) << "CastSenderImpl@" << this << "::ReceivedPacket() -- "
+ << "Received an invalid (non-RTCP?) packet in the cast sender.";
+ return;
+ }
+ uint32 ssrc_of_sender = Rtcp::GetSsrcOfSender(data, length);
+ if (ssrc_of_sender == ssrc_of_audio_sender_) {
+ if (!audio_sender_) {
+ NOTREACHED();
return;
}
- uint32 ssrc_of_sender = Rtcp::GetSsrcOfSender(packet, length);
- if (ssrc_of_sender == ssrc_of_audio_sender_) {
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&AudioSender::IncomingRtcpPacket, audio_sender_,
- packet, length, callback));
- } else if (ssrc_of_sender == ssrc_of_video_sender_) {
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&VideoSender::IncomingRtcpPacket, video_sender_,
- packet, length, callback));
- } else {
- // No action; just log and call the callback informing that we are done
- // with the packet.
- VLOG(1) << "Received a RTCP packet with a non matching sender SSRC "
- << ssrc_of_sender;
-
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
+ cast_environment_->PostTask(CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(&AudioSender::IncomingRtcpPacket,
+ audio_sender_->AsWeakPtr(),
+ base::Passed(&packet)));
+ } else if (ssrc_of_sender == ssrc_of_video_sender_) {
+ if (!video_sender_) {
+ NOTREACHED();
+ return;
}
+ cast_environment_->PostTask(CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(&VideoSender::IncomingRtcpPacket,
+ video_sender_->AsWeakPtr(),
+ base::Passed(&packet)));
+ } else {
+ VLOG(1) << "CastSenderImpl@" << this << "::ReceivedPacket() -- "
+ << "Received a RTCP packet with a non matching sender SSRC "
+ << ssrc_of_sender;
}
+}
- protected:
- virtual ~LocalCastSenderPacketReceiver() {}
-
- private:
- friend class base::RefCountedThreadSafe<LocalCastSenderPacketReceiver>;
-
- scoped_refptr<CastEnvironment> cast_environment_;
- base::WeakPtr<AudioSender> audio_sender_;
- base::WeakPtr<VideoSender> video_sender_;
- const uint32 ssrc_of_audio_sender_;
- const uint32 ssrc_of_video_sender_;
-};
-
-CastSender* CastSender::CreateCastSender(
- scoped_refptr<CastEnvironment> cast_environment,
- const AudioSenderConfig& audio_config,
- const VideoSenderConfig& video_config,
- VideoEncoderController* const video_encoder_controller,
- PacketSender* const packet_sender) {
- return new CastSenderImpl(cast_environment,
- audio_config,
- video_config,
- video_encoder_controller,
- packet_sender);
+scoped_refptr<AudioFrameInput> CastSenderImpl::audio_frame_input() {
+ return audio_frame_input_;
}
-CastSenderImpl::CastSenderImpl(
- scoped_refptr<CastEnvironment> cast_environment,
- const AudioSenderConfig& audio_config,
- const VideoSenderConfig& video_config,
- VideoEncoderController* const video_encoder_controller,
- PacketSender* const packet_sender)
- : pacer_(cast_environment, packet_sender),
- audio_sender_(cast_environment, audio_config, &pacer_),
- video_sender_(cast_environment, video_config, video_encoder_controller,
- &pacer_),
- frame_input_(new LocalFrameInput(cast_environment,
- audio_sender_.AsWeakPtr(),
- video_sender_.AsWeakPtr())),
- packet_receiver_(new LocalCastSenderPacketReceiver(cast_environment,
- audio_sender_.AsWeakPtr(), video_sender_.AsWeakPtr(),
- audio_config.incoming_feedback_ssrc,
- video_config.incoming_feedback_ssrc)) {}
-
-CastSenderImpl::~CastSenderImpl() {}
-
-scoped_refptr<FrameInput> CastSenderImpl::frame_input() {
- return frame_input_;
+scoped_refptr<VideoFrameInput> CastSenderImpl::video_frame_input() {
+ return video_frame_input_;
}
-scoped_refptr<PacketReceiver> CastSenderImpl::packet_receiver() {
- return packet_receiver_;
+transport::PacketReceiverCallback CastSenderImpl::packet_receiver() {
+ return base::Bind(&CastSenderImpl::ReceivedPacket,
+ weak_factory_.GetWeakPtr());
}
} // namespace cast
diff --git a/chromium/media/cast/cast_sender_impl.h b/chromium/media/cast/cast_sender_impl.h
index 2c5dd222e1a..d09a869712c 100644
--- a/chromium/media/cast/cast_sender_impl.h
+++ b/chromium/media/cast/cast_sender_impl.h
@@ -8,47 +8,64 @@
#include "base/memory/scoped_ptr.h"
#include "media/cast/audio_sender/audio_sender.h"
#include "media/cast/cast_config.h"
+#include "media/cast/cast_defines.h"
#include "media/cast/cast_environment.h"
#include "media/cast/cast_sender.h"
-#include "media/cast/net/pacing/paced_sender.h"
#include "media/cast/video_sender/video_sender.h"
namespace media {
- class VideoFrame;
-}
+class VideoFrame;
-namespace media {
namespace cast {
-
class AudioSender;
-class PacedSender;
class VideoSender;
-// This calls is a pure owner class that group all required sending objects
-// together such as pacer, packet receiver, frame input, audio and video sender.
+// This class combines all required sending objects such as the audio and video
+// senders, pacer, packet receiver and frame input.
class CastSenderImpl : public CastSender {
public:
CastSenderImpl(scoped_refptr<CastEnvironment> cast_environment,
- const AudioSenderConfig& audio_config,
- const VideoSenderConfig& video_config,
- VideoEncoderController* const video_encoder_controller,
- PacketSender* const packet_sender);
+ transport::CastTransportSender* const transport_sender);
+
+ virtual void InitializeAudio(
+ const AudioSenderConfig& audio_config,
+ const CastInitializationCallback& cast_initialization_cb) OVERRIDE;
+ virtual void InitializeVideo(
+ const VideoSenderConfig& video_config,
+ const CastInitializationCallback& cast_initialization_cb,
+ const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
+ const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb)
+ OVERRIDE;
virtual ~CastSenderImpl();
- virtual scoped_refptr<FrameInput> frame_input() OVERRIDE;
- virtual scoped_refptr<PacketReceiver> packet_receiver() OVERRIDE;
+ virtual scoped_refptr<AudioFrameInput> audio_frame_input() OVERRIDE;
+ virtual scoped_refptr<VideoFrameInput> video_frame_input() OVERRIDE;
+
+ virtual transport::PacketReceiverCallback packet_receiver() OVERRIDE;
private:
- PacedSender pacer_;
- AudioSender audio_sender_;
- VideoSender video_sender_;
- scoped_refptr<FrameInput> frame_input_;
- scoped_refptr<PacketReceiver> packet_receiver_;
+ void ReceivedPacket(scoped_ptr<Packet> packet);
+
+ CastInitializationCallback initialization_callback_;
+ scoped_ptr<AudioSender> audio_sender_;
+ scoped_ptr<VideoSender> video_sender_;
+ scoped_refptr<AudioFrameInput> audio_frame_input_;
+ scoped_refptr<VideoFrameInput> video_frame_input_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+ // The transport sender is owned by the owner of the CastSender, and should be
+ // valid throughout the lifetime of the CastSender.
+ transport::CastTransportSender* const transport_sender_;
+ uint32 ssrc_of_audio_sender_;
+ uint32 ssrc_of_video_sender_;
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<CastSenderImpl> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(CastSenderImpl);
};
} // namespace cast
} // namespace media
#endif // MEDIA_CAST_CAST_SENDER_IMPL_H_
-
diff --git a/chromium/media/cast/cast_testing.gypi b/chromium/media/cast/cast_testing.gypi
new file mode 100644
index 00000000000..aef0fbd8c3e
--- /dev/null
+++ b/chromium/media/cast/cast_testing.gypi
@@ -0,0 +1,276 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'cast_test_utility',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ 'cast_receiver',
+ 'cast_transport',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ '<(DEPTH)/third_party/libyuv/libyuv.gyp:libyuv',
+ '<(DEPTH)/ui/gfx/gfx.gyp:gfx_geometry',
+ ],
+ 'sources': [
+ 'test/fake_single_thread_task_runner.cc',
+ 'test/fake_single_thread_task_runner.h',
+ 'test/skewed_single_thread_task_runner.cc',
+ 'test/skewed_single_thread_task_runner.h',
+ 'test/skewed_tick_clock.cc',
+ 'test/skewed_tick_clock.h',
+ 'test/utility/audio_utility.cc',
+ 'test/utility/audio_utility.h',
+ 'test/utility/barcode.cc',
+ 'test/utility/barcode.h',
+ 'test/utility/default_config.cc',
+ 'test/utility/default_config.h',
+ 'test/utility/in_process_receiver.cc',
+ 'test/utility/in_process_receiver.h',
+ 'test/utility/input_builder.cc',
+ 'test/utility/input_builder.h',
+ 'test/utility/net_utility.cc',
+ 'test/utility/net_utility.h',
+ 'test/utility/standalone_cast_environment.cc',
+ 'test/utility/standalone_cast_environment.h',
+ 'test/utility/video_utility.cc',
+ 'test/utility/video_utility.h',
+ 'test/utility/udp_proxy.cc',
+ 'test/utility/udp_proxy.h',
+ ], # source
+ },
+ {
+ 'target_name': 'cast_unittests',
+ 'type': '<(gtest_target_type)',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ 'cast_base',
+ 'cast_receiver',
+ 'cast_rtcp',
+ 'cast_sender',
+ 'cast_test_utility',
+ # Not a true dependency. This is here to make sure the CQ can verify
+ # the tools compile correctly.
+ 'cast_tools',
+ 'cast_transport',
+ '<(DEPTH)/base/base.gyp:test_support_base',
+ '<(DEPTH)/net/net.gyp:net',
+ '<(DEPTH)/testing/gmock.gyp:gmock',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ ],
+ 'sources': [
+ '<(DEPTH)/media/base/run_all_unittests.cc',
+ 'audio_sender/audio_encoder_unittest.cc',
+ 'audio_sender/audio_sender_unittest.cc',
+ 'congestion_control/congestion_control_unittest.cc',
+ 'framer/cast_message_builder_unittest.cc',
+ 'framer/frame_buffer_unittest.cc',
+ 'framer/framer_unittest.cc',
+ 'logging/encoding_event_subscriber_unittest.cc',
+ 'logging/serialize_deserialize_test.cc',
+ 'logging/logging_impl_unittest.cc',
+ 'logging/logging_raw_unittest.cc',
+ 'logging/receiver_time_offset_estimator_impl_unittest.cc',
+ 'logging/simple_event_subscriber_unittest.cc',
+ 'logging/stats_event_subscriber_unittest.cc',
+ 'receiver/audio_decoder_unittest.cc',
+ 'receiver/frame_receiver_unittest.cc',
+ 'receiver/video_decoder_unittest.cc',
+ 'rtcp/mock_rtcp_receiver_feedback.cc',
+ 'rtcp/mock_rtcp_receiver_feedback.h',
+ 'rtcp/mock_rtcp_sender_feedback.cc',
+ 'rtcp/mock_rtcp_sender_feedback.h',
+ 'rtcp/rtcp_receiver_unittest.cc',
+ 'rtcp/rtcp_sender_unittest.cc',
+ 'rtcp/rtcp_unittest.cc',
+ 'rtcp/receiver_rtcp_event_subscriber_unittest.cc',
+# TODO(miu): The following two are test utility modules. Rename/move the files.
+ 'rtcp/test_rtcp_packet_builder.cc',
+ 'rtcp/test_rtcp_packet_builder.h',
+ 'rtp_receiver/rtp_receiver_defines.h',
+ 'rtp_receiver/mock_rtp_payload_feedback.cc',
+ 'rtp_receiver/mock_rtp_payload_feedback.h',
+ 'rtp_receiver/receiver_stats_unittest.cc',
+ 'rtp_receiver/rtp_parser/test/rtp_packet_builder.cc',
+ 'rtp_receiver/rtp_parser/rtp_parser_unittest.cc',
+ 'test/end2end_unittest.cc',
+ 'test/fake_receiver_time_offset_estimator.cc',
+ 'test/fake_receiver_time_offset_estimator.h',
+ 'test/fake_single_thread_task_runner.cc',
+ 'test/fake_single_thread_task_runner.h',
+ 'test/fake_video_encode_accelerator.cc',
+ 'test/fake_video_encode_accelerator.h',
+ 'test/utility/audio_utility_unittest.cc',
+ 'test/utility/barcode_unittest.cc',
+ 'transport/cast_transport_sender_impl_unittest.cc',
+ 'transport/pacing/mock_paced_packet_sender.cc',
+ 'transport/pacing/mock_paced_packet_sender.h',
+ 'transport/pacing/paced_sender_unittest.cc',
+ 'transport/rtp_sender/packet_storage/packet_storage_unittest.cc',
+ 'transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc',
+ 'transport/rtp_sender/rtp_packetizer/test/rtp_header_parser.cc',
+ 'transport/rtp_sender/rtp_packetizer/test/rtp_header_parser.h',
+ 'transport/transport/udp_transport_unittest.cc',
+ 'video_sender/external_video_encoder_unittest.cc',
+ 'video_sender/video_encoder_impl_unittest.cc',
+ 'video_sender/video_sender_unittest.cc',
+ ], # source
+ },
+ {
+ 'target_name': 'cast_benchmarks',
+ 'type': '<(gtest_target_type)',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ 'cast_base',
+ 'cast_receiver',
+ 'cast_rtcp',
+ 'cast_sender',
+ 'cast_test_utility',
+ 'cast_transport',
+ '<(DEPTH)/base/base.gyp:test_support_base',
+ '<(DEPTH)/net/net.gyp:net',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ ],
+ 'sources': [
+ 'test/cast_benchmarks.cc',
+ 'test/fake_single_thread_task_runner.cc',
+ 'test/fake_single_thread_task_runner.h',
+ 'test/fake_video_encode_accelerator.cc',
+ 'test/fake_video_encode_accelerator.h',
+ 'test/utility/test_util.cc',
+ 'test/utility/test_util.h',
+ ], # source
+ 'conditions': [
+ ['os_posix==1 and OS!="mac" and OS!="ios" and use_allocator!="none"',
+ {
+ 'dependencies': [
+ '<(DEPTH)/base/allocator/allocator.gyp:allocator',
+ ],
+ }
+ ],
+ ],
+ },
+ {
+ # This is a target for the collection of cast development tools.
+ # They are built on bots but not shipped.
+ 'target_name': 'cast_tools',
+ 'type': 'none',
+ 'dependencies': [
+ 'cast_receiver_app',
+ 'cast_sender_app',
+ 'udp_proxy',
+ ],
+ },
+ {
+ 'target_name': 'cast_receiver_app',
+ 'type': 'executable',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ 'cast_base',
+ 'cast_receiver',
+ 'cast_test_utility',
+ 'cast_transport',
+ '<(DEPTH)/net/net.gyp:net_test_support',
+ '<(DEPTH)/media/media.gyp:media',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ '<(DEPTH)/third_party/libyuv/libyuv.gyp:libyuv',
+ ],
+ 'sources': [
+ '<(DEPTH)/media/cast/test/receiver.cc',
+ ],
+ 'conditions': [
+ ['OS == "linux" and use_x11==1', {
+ 'dependencies': [
+ '<(DEPTH)/build/linux/system.gyp:x11',
+ '<(DEPTH)/build/linux/system.gyp:xext',
+ ],
+ 'sources': [
+ '<(DEPTH)/media/cast/test/linux_output_window.cc',
+ '<(DEPTH)/media/cast/test/linux_output_window.h',
+ '<(DEPTH)/ui/gfx/gfx.gyp:gfx_geometry',
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'cast_sender_app',
+ 'type': 'executable',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ 'cast_base',
+ 'cast_sender',
+ 'cast_test_utility',
+ 'cast_transport',
+ '<(DEPTH)/net/net.gyp:net_test_support',
+ '<(DEPTH)/media/media.gyp:media',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ '<(DEPTH)/third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
+ '<(DEPTH)/third_party/opus/opus.gyp:opus',
+ '<(DEPTH)/ui/gfx/gfx.gyp:gfx_geometry',
+ ],
+ 'sources': [
+ '<(DEPTH)/media/cast/test/sender.cc',
+ ],
+ },
+ {
+ 'target_name': 'generate_barcode_video',
+ 'type': 'executable',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ 'cast_test_utility',
+ '<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/media/media.gyp:media',
+ ],
+ 'sources': [
+ 'test/utility/generate_barcode_video.cc',
+ ],
+ },
+ {
+ 'target_name': 'generate_timecode_audio',
+ 'type': 'executable',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ 'cast_base',
+ 'cast_test_utility',
+ 'cast_transport',
+ '<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/media/media.gyp:media',
+ ],
+ 'sources': [
+ 'test/utility/generate_timecode_audio.cc',
+ ],
+ },
+ {
+ 'target_name': 'udp_proxy',
+ 'type': 'executable',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ 'cast_test_utility',
+ '<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/media/media.gyp:media',
+ ],
+ 'sources': [
+ 'test/utility/udp_proxy_main.cc',
+ ],
+ }
+ ], # targets
+}
diff --git a/chromium/media/cast/congestion_control/congestion_control.cc b/chromium/media/cast/congestion_control/congestion_control.cc
index 35687e7477a..d24e0ac3d0f 100644
--- a/chromium/media/cast/congestion_control/congestion_control.cc
+++ b/chromium/media/cast/congestion_control/congestion_control.cc
@@ -2,6 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// The purpose of this file is determine what bitrate to use for mirroring.
+// Ideally this should be as much as possible, without causing any frames to
+// arrive late.
+
+// The current algorithm is to measure how much bandwidth we've been using
+// recently. We also keep track of how much data has been queued up for sending
+// in a virtual "buffer" (this virtual buffer represents all the buffers between
+// the sender and the receiver, including retransmissions and so forth.)
+// If we estimate that our virtual buffer is mostly empty, we try to use
+// more bandwidth than our recent usage, otherwise we use less.
+
#include "media/cast/congestion_control/congestion_control.h"
#include "base/logging.h"
@@ -11,104 +22,176 @@
namespace media {
namespace cast {
-static const int64 kCongestionControlMinChangeIntervalMs = 10;
-static const int64 kCongestionControlMaxChangeIntervalMs = 100;
+// This means that we *try* to keep our buffer 90% empty.
+// If it is less full, we increase the bandwidth, if it is more
+// we decrease the bandwidth. Making this smaller makes the
+// congestion control more aggressive.
+static const double kTargetEmptyBufferFraction = 0.9;
-// At 10 ms RTT TCP Reno would ramp 1500 * 8 * 100 = 1200 Kbit/s.
-// NACK is sent after a maximum of 10 ms.
-static const int kCongestionControlMaxBitrateIncreasePerMillisecond = 1200;
+// This is the size of our history in frames. Larger values makes the
+// congestion control adapt slower.
+static const size_t kHistorySize = 100;
-static const int64 kMaxElapsedTimeMs = kCongestionControlMaxChangeIntervalMs;
+CongestionControl::FrameStats::FrameStats() : frame_size(0) {
+}
CongestionControl::CongestionControl(base::TickClock* clock,
- float congestion_control_back_off,
uint32 max_bitrate_configured,
uint32 min_bitrate_configured,
- uint32 start_bitrate)
+ size_t max_unacked_frames)
: clock_(clock),
- congestion_control_back_off_(congestion_control_back_off),
max_bitrate_configured_(max_bitrate_configured),
min_bitrate_configured_(min_bitrate_configured),
- bitrate_(start_bitrate) {
- DCHECK_GT(congestion_control_back_off, 0.0f) << "Invalid config";
- DCHECK_LT(congestion_control_back_off, 1.0f) << "Invalid config";
+ last_frame_stats_(static_cast<uint32>(-1)),
+ last_acked_frame_(static_cast<uint32>(-1)),
+ last_encoded_frame_(static_cast<uint32>(-1)),
+ history_size_(max_unacked_frames + kHistorySize),
+ acked_bits_in_history_(0) {
DCHECK_GE(max_bitrate_configured, min_bitrate_configured) << "Invalid config";
- DCHECK_GE(max_bitrate_configured, start_bitrate) << "Invalid config";
- DCHECK_GE(start_bitrate, min_bitrate_configured) << "Invalid config";
+ frame_stats_.resize(2);
+ base::TimeTicks now = clock->NowTicks();
+ frame_stats_[0].ack_time = now;
+ frame_stats_[0].sent_time = now;
+ frame_stats_[1].ack_time = now;
+ DCHECK(!frame_stats_[0].ack_time.is_null());
}
-CongestionControl::~CongestionControl() {
+CongestionControl::~CongestionControl() {}
+
+void CongestionControl::UpdateRtt(base::TimeDelta rtt) {
+ rtt_ = base::TimeDelta::FromSecondsD(
+ (rtt_.InSecondsF() * 7 + rtt.InSecondsF()) / 8);
}
-bool CongestionControl::OnAck(base::TimeDelta rtt, uint32* new_bitrate) {
- base::TimeTicks now = clock_->NowTicks();
+// Calculate how much "dead air" there is between two frames.
+base::TimeDelta CongestionControl::DeadTime(const FrameStats& a,
+ const FrameStats& b) {
+ if (b.sent_time > a.ack_time) {
+ return b.sent_time - a.ack_time;
+ } else {
+ return base::TimeDelta();
+ }
+}
+
+double CongestionControl::CalculateSafeBitrate() {
+ double transmit_time =
+ (GetFrameStats(last_acked_frame_)->ack_time -
+ frame_stats_.front().sent_time - dead_time_in_history_).InSecondsF();
- // First feedback?
- if (time_last_increase_.is_null()) {
- time_last_increase_ = now;
- time_last_decrease_ = now;
- return false;
+ if (acked_bits_in_history_ == 0 || transmit_time <= 0.0) {
+ return min_bitrate_configured_;
}
- // Are we at the max bitrate?
- if (max_bitrate_configured_ == bitrate_) return false;
-
- // Make sure RTT is never less than 1 ms.
- rtt = std::max(rtt, base::TimeDelta::FromMilliseconds(1));
-
- base::TimeDelta elapsed_time = std::min(now - time_last_increase_,
- base::TimeDelta::FromMilliseconds(kMaxElapsedTimeMs));
- base::TimeDelta change_interval = std::max(rtt,
- base::TimeDelta::FromMilliseconds(kCongestionControlMinChangeIntervalMs));
- change_interval = std::min(change_interval,
- base::TimeDelta::FromMilliseconds(kCongestionControlMaxChangeIntervalMs));
-
- // Have enough time have passed?
- if (elapsed_time < change_interval) return false;
-
- time_last_increase_ = now;
-
- // One packet per RTT multiplied by the elapsed time fraction.
- // 1500 * 8 * (1000 / rtt_ms) * (elapsed_time_ms / 1000) =>
- // 1500 * 8 * elapsed_time_ms / rtt_ms.
- uint32 bitrate_increase = (1500 * 8 * elapsed_time.InMilliseconds()) /
- rtt.InMilliseconds();
- uint32 max_bitrate_increase =
- kCongestionControlMaxBitrateIncreasePerMillisecond *
- elapsed_time.InMilliseconds();
- bitrate_increase = std::min(max_bitrate_increase, bitrate_increase);
- *new_bitrate = std::min(bitrate_increase + bitrate_, max_bitrate_configured_);
- bitrate_ = *new_bitrate;
- return true;
+ return acked_bits_in_history_ / std::max(transmit_time, 1E-3);
}
-bool CongestionControl::OnNack(base::TimeDelta rtt, uint32* new_bitrate) {
- base::TimeTicks now = clock_->NowTicks();
+CongestionControl::FrameStats* CongestionControl::GetFrameStats(
+ uint32 frame_id) {
+ int32 offset = static_cast<int32>(frame_id - last_frame_stats_);
+ DCHECK_LT(offset, static_cast<int32>(kHistorySize));
+ if (offset > 0) {
+ frame_stats_.resize(frame_stats_.size() + offset);
+ last_frame_stats_ += offset;
+ offset = 0;
+ }
+ while (frame_stats_.size() > history_size_) {
+ DCHECK_GT(frame_stats_.size(), 1UL);
+ DCHECK(!frame_stats_[0].ack_time.is_null());
+ acked_bits_in_history_ -= frame_stats_[0].frame_size;
+ dead_time_in_history_ -= DeadTime(frame_stats_[0], frame_stats_[1]);
+ DCHECK_GE(acked_bits_in_history_, 0UL);
+ VLOG(2) << "DT: " << dead_time_in_history_.InSecondsF();
+ DCHECK_GE(dead_time_in_history_.InSecondsF(), 0.0);
+ frame_stats_.pop_front();
+ }
+ offset += frame_stats_.size() - 1;
+ if (offset < 0 || offset >= static_cast<int32>(frame_stats_.size())) {
+ return NULL;
+ }
+ return &frame_stats_[offset];
+}
- // First feedback?
- if (time_last_decrease_.is_null()) {
- time_last_increase_ = now;
- time_last_decrease_ = now;
- return false;
+void CongestionControl::AckFrame(uint32 frame_id, base::TimeTicks when) {
+ FrameStats* frame_stats = GetFrameStats(last_acked_frame_);
+ while (IsNewerFrameId(frame_id, last_acked_frame_)) {
+ FrameStats* last_frame_stats = frame_stats;
+ last_acked_frame_++;
+ frame_stats = GetFrameStats(last_acked_frame_);
+ DCHECK(frame_stats);
+ frame_stats->ack_time = when;
+ acked_bits_in_history_ += frame_stats->frame_size;
+ dead_time_in_history_ += DeadTime(*last_frame_stats, *frame_stats);
}
- base::TimeDelta elapsed_time = std::min(now - time_last_decrease_,
- base::TimeDelta::FromMilliseconds(kMaxElapsedTimeMs));
- base::TimeDelta change_interval = std::max(rtt,
- base::TimeDelta::FromMilliseconds(kCongestionControlMinChangeIntervalMs));
- change_interval = std::min(change_interval,
- base::TimeDelta::FromMilliseconds(kCongestionControlMaxChangeIntervalMs));
+}
- // Have enough time have passed?
- if (elapsed_time < change_interval) return false;
+void CongestionControl::SendFrameToTransport(uint32 frame_id,
+ size_t frame_size,
+ base::TimeTicks when) {
+ last_encoded_frame_ = frame_id;
+ FrameStats* frame_stats = GetFrameStats(frame_id);
+ DCHECK(frame_stats);
+ frame_stats->frame_size = frame_size;
+ frame_stats->sent_time = when;
+}
- time_last_decrease_ = now;
- time_last_increase_ = now;
+base::TimeTicks CongestionControl::EstimatedAckTime(uint32 frame_id,
+ double bitrate) {
+ FrameStats* frame_stats = GetFrameStats(frame_id);
+ DCHECK(frame_stats);
+ if (frame_stats->ack_time.is_null()) {
+ DCHECK(frame_stats->frame_size) << "frame_id: " << frame_id;
+ base::TimeTicks ret = EstimatedSendingTime(frame_id, bitrate);
+ ret += base::TimeDelta::FromSecondsD(frame_stats->frame_size / bitrate);
+ ret += rtt_;
+ base::TimeTicks now = clock_->NowTicks();
+ if (ret < now) {
+ // This is a little counter-intuitive, but it seems to work.
+ // Basically, when we estimate that the ACK should have already happened,
+ // we figure out how long ago it should have happened and guess that the
+ // ACK will happen half of that time in the future. This will cause some
+ // over-estimation when acks are late, which is actually what we want.
+ return now + (now - ret) / 2;
+ } else {
+ return ret;
+ }
+ } else {
+ return frame_stats->ack_time;
+ }
+}
- *new_bitrate = std::max(
- static_cast<uint32>(bitrate_ * congestion_control_back_off_),
- min_bitrate_configured_);
+base::TimeTicks CongestionControl::EstimatedSendingTime(uint32 frame_id,
+ double bitrate) {
+ FrameStats* frame_stats = GetFrameStats(frame_id);
+ DCHECK(frame_stats);
+ base::TimeTicks ret = EstimatedAckTime(frame_id - 1, bitrate) - rtt_;
+ if (frame_stats->sent_time.is_null()) {
+ // Not sent yet, but we can't start sending it in the past.
+ return std::max(ret, clock_->NowTicks());
+ } else {
+ return std::max(ret, frame_stats->sent_time);
+ }
+}
- bitrate_ = *new_bitrate;
- return true;
+uint32 CongestionControl::GetBitrate(base::TimeTicks playout_time,
+ base::TimeDelta playout_delay) {
+ double safe_bitrate = CalculateSafeBitrate();
+ // Estimate when we might start sending the next frame.
+ base::TimeDelta time_to_catch_up =
+ playout_time -
+ EstimatedSendingTime(last_encoded_frame_ + 1, safe_bitrate);
+
+ double empty_buffer_fraction =
+ time_to_catch_up.InSecondsF() / playout_delay.InSecondsF();
+ empty_buffer_fraction = std::min(empty_buffer_fraction, 1.0);
+ empty_buffer_fraction = std::max(empty_buffer_fraction, 0.0);
+
+ uint32 bits_per_second = static_cast<uint32>(
+ safe_bitrate * empty_buffer_fraction / kTargetEmptyBufferFraction);
+ VLOG(3) << " FBR:" << (bits_per_second / 1E6)
+ << " EBF:" << empty_buffer_fraction
+ << " SBR:" << (safe_bitrate / 1E6);
+ bits_per_second = std::max(bits_per_second, min_bitrate_configured_);
+ bits_per_second = std::min(bits_per_second, max_bitrate_configured_);
+ return bits_per_second;
}
} // namespace cast
diff --git a/chromium/media/cast/congestion_control/congestion_control.gypi b/chromium/media/cast/congestion_control/congestion_control.gypi
deleted file mode 100644
index 20a57ca2a30..00000000000
--- a/chromium/media/cast/congestion_control/congestion_control.gypi
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'targets': [
- {
- 'target_name': 'congestion_control',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- ],
- 'sources': [
- 'congestion_control.h',
- 'congestion_control.cc',
- ], # source
- 'dependencies': [
- '<(DEPTH)/base/base.gyp:base',
- ],
- },
- ],
-}
-
diff --git a/chromium/media/cast/congestion_control/congestion_control.h b/chromium/media/cast/congestion_control/congestion_control.h
index df88151eb8f..54622ab114d 100644
--- a/chromium/media/cast/congestion_control/congestion_control.h
+++ b/chromium/media/cast/congestion_control/congestion_control.h
@@ -5,6 +5,8 @@
#ifndef MEDIA_CAST_CONGESTION_CONTROL_CONGESTION_CONTROL_H_
#define MEDIA_CAST_CONGESTION_CONTROL_CONGESTION_CONTROL_H_
+#include <deque>
+
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
#include "base/time/tick_clock.h"
@@ -16,29 +18,65 @@ namespace cast {
class CongestionControl {
public:
CongestionControl(base::TickClock* clock,
- float congestion_control_back_off,
uint32 max_bitrate_configured,
uint32 min_bitrate_configured,
- uint32 start_bitrate);
+ size_t max_unacked_frames);
virtual ~CongestionControl();
- // Don't call OnAck if the same message contain a NACK.
- // Returns true if the bitrate have changed.
- bool OnAck(base::TimeDelta rtt_ms, uint32* new_bitrate);
+ void UpdateRtt(base::TimeDelta rtt);
+
+ // Called when an encoded frame is sent to the transport.
+ void SendFrameToTransport(uint32 frame_id,
+ size_t frame_size,
+ base::TimeTicks when);
- // Returns true if the bitrate have changed.
- bool OnNack(base::TimeDelta rtt_ms, uint32* new_bitrate);
+ // Called when we receive an ACK for a frame.
+ void AckFrame(uint32 frame_id, base::TimeTicks when);
+ // Returns the bitrate we should use for the next frame.
+ uint32 GetBitrate(base::TimeTicks playout_time,
+ base::TimeDelta playout_delay);
private:
+ struct FrameStats {
+ FrameStats();
+ // Time this frame was sent to the transport.
+ base::TimeTicks sent_time;
+ // Time this frame was acked.
+ base::TimeTicks ack_time;
+ // Size of encoded frame in bits.
+ size_t frame_size;
+ };
+
+ // Calculate how much "dead air" (idle time) there is between two frames.
+ static base::TimeDelta DeadTime(const FrameStats& a, const FrameStats& b);
+ // Get the FrameStats for a given |frame_id|.
+ // Note: Older FrameStats will be removed automatically.
+ FrameStats* GetFrameStats(uint32 frame_id);
+ // Calculata safe bitrate. This is based on how much we've been
+ // sending in the past.
+ double CalculateSafeBitrate();
+
+ // For a given frame, calculate when it might be acked.
+ // (Or return the time it was acked, if it was.)
+ base::TimeTicks EstimatedAckTime(uint32 frame_id, double bitrate);
+ // Calculate when we start sending the data for a given frame.
+ // This is done by calculating when we were done sending the previous
+ // frame, but obvoiusly can't be less than |sent_time| (if known).
+ base::TimeTicks EstimatedSendingTime(uint32 frame_id, double bitrate);
+
base::TickClock* const clock_; // Not owned by this class.
- const float congestion_control_back_off_;
const uint32 max_bitrate_configured_;
const uint32 min_bitrate_configured_;
- uint32 bitrate_;
- base::TimeTicks time_last_increase_;
- base::TimeTicks time_last_decrease_;
+ std::deque<FrameStats> frame_stats_;
+ uint32 last_frame_stats_;
+ uint32 last_acked_frame_;
+ uint32 last_encoded_frame_;
+ base::TimeDelta rtt_;
+ size_t history_size_;
+ size_t acked_bits_in_history_;
+ base::TimeDelta dead_time_in_history_;
DISALLOW_COPY_AND_ASSIGN(CongestionControl);
};
diff --git a/chromium/media/cast/congestion_control/congestion_control_unittest.cc b/chromium/media/cast/congestion_control/congestion_control_unittest.cc
index 108d2b340b7..5745eab21df 100644
--- a/chromium/media/cast/congestion_control/congestion_control_unittest.cc
+++ b/chromium/media/cast/congestion_control/congestion_control_unittest.cc
@@ -2,9 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <stdint.h>
+
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/cast_defines.h"
#include "media/cast/congestion_control/congestion_control.h"
+#include "media/cast/test/fake_single_thread_task_runner.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
@@ -12,169 +15,106 @@ namespace cast {
static const uint32 kMaxBitrateConfigured = 5000000;
static const uint32 kMinBitrateConfigured = 500000;
-static const uint32 kStartBitrate = 2000000;
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
-static const int64 kRttMs = 20;
-static const int64 kAckRateMs = 33;
+static const int64 kStartMillisecond = INT64_C(12345678900000);
+static const double kTargetEmptyBufferFraction = 0.9;
class CongestionControlTest : public ::testing::Test {
protected:
CongestionControlTest()
- : congestion_control_(&testing_clock_,
- kDefaultCongestionControlBackOff,
- kMaxBitrateConfigured,
- kMinBitrateConfigured,
- kStartBitrate) {
+ : task_runner_(new test::FakeSingleThreadTaskRunner(&testing_clock_)) {
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ congestion_control_.reset(new CongestionControl(
+ &testing_clock_, kMaxBitrateConfigured, kMinBitrateConfigured, 10));
}
- // Returns the last bitrate of the run.
- uint32 RunWithOneLossEventPerSecond(int fps, int rtt_ms,
- int runtime_in_seconds) {
- const base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(rtt_ms);
- const base::TimeDelta ack_rate =
- base::TimeDelta::FromMilliseconds(GG_INT64_C(1000) / fps);
- uint32 new_bitrate = 0;
- EXPECT_FALSE(congestion_control_.OnAck(rtt, &new_bitrate));
-
- for (int seconds = 0; seconds < runtime_in_seconds; ++seconds) {
- for (int i = 1; i < fps; ++i) {
- testing_clock_.Advance(ack_rate);
- congestion_control_.OnAck(rtt, &new_bitrate);
- }
- EXPECT_TRUE(congestion_control_.OnNack(rtt, &new_bitrate));
- }
- return new_bitrate;
- }
-
- base::SimpleTestTickClock testing_clock_;
- CongestionControl congestion_control_;
-};
-
-TEST_F(CongestionControlTest, Max) {
- uint32 new_bitrate = 0;
- const base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(kRttMs);
- const base::TimeDelta ack_rate =
- base::TimeDelta::FromMilliseconds(kAckRateMs);
- EXPECT_FALSE(congestion_control_.OnAck(rtt, &new_bitrate));
-
- uint32 expected_increase_bitrate = 0;
-
- // Expected time is 5 seconds. 500000 - 2000000 = 5 * 1500 * 8 * (1000 / 20).
- for (int i = 0; i < 151; ++i) {
- testing_clock_.Advance(ack_rate);
- EXPECT_TRUE(congestion_control_.OnAck(rtt, &new_bitrate));
- expected_increase_bitrate += 1500 * 8 * kAckRateMs / kRttMs;
- EXPECT_EQ(kStartBitrate + expected_increase_bitrate, new_bitrate);
- }
- testing_clock_.Advance(ack_rate);
- EXPECT_TRUE(congestion_control_.OnAck(rtt, &new_bitrate));
- EXPECT_EQ(kMaxBitrateConfigured, new_bitrate);
-}
-
-TEST_F(CongestionControlTest, Min) {
- uint32 new_bitrate = 0;
- const base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(kRttMs);
- const base::TimeDelta ack_rate =
- base::TimeDelta::FromMilliseconds(kAckRateMs);
- EXPECT_FALSE(congestion_control_.OnNack(rtt, &new_bitrate));
-
- uint32 expected_decrease_bitrate = kStartBitrate;
-
- // Expected number is 10. 2000 * 0.875^10 <= 500.
- for (int i = 0; i < 10; ++i) {
- testing_clock_.Advance(ack_rate);
- EXPECT_TRUE(congestion_control_.OnNack(rtt, &new_bitrate));
- expected_decrease_bitrate = static_cast<uint32>(
- expected_decrease_bitrate * kDefaultCongestionControlBackOff);
- EXPECT_EQ(expected_decrease_bitrate, new_bitrate);
+ void AckFrame(uint32 frame_id) {
+ congestion_control_->AckFrame(frame_id, testing_clock_.NowTicks());
}
- testing_clock_.Advance(ack_rate);
- EXPECT_TRUE(congestion_control_.OnNack(rtt, &new_bitrate));
- EXPECT_EQ(kMinBitrateConfigured, new_bitrate);
-}
-TEST_F(CongestionControlTest, Timing) {
- const base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(kRttMs);
- const base::TimeDelta ack_rate =
- base::TimeDelta::FromMilliseconds(kAckRateMs);
- uint32 new_bitrate = 0;
- uint32 expected_bitrate = kStartBitrate;
-
- EXPECT_FALSE(congestion_control_.OnAck(rtt, &new_bitrate));
-
- testing_clock_.Advance(ack_rate);
- EXPECT_TRUE(congestion_control_.OnAck(rtt, &new_bitrate));
- expected_bitrate += 1500 * 8 * kAckRateMs / kRttMs;
- EXPECT_EQ(expected_bitrate, new_bitrate);
-
- // We should back immediately.
- EXPECT_TRUE(congestion_control_.OnNack(rtt, &new_bitrate));
- expected_bitrate = static_cast<uint32>(
- expected_bitrate * kDefaultCongestionControlBackOff);
- EXPECT_EQ(expected_bitrate, new_bitrate);
-
- // Less than one RTT have passed don't back again.
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
- EXPECT_FALSE(congestion_control_.OnNack(rtt, &new_bitrate));
-
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
- EXPECT_TRUE(congestion_control_.OnNack(rtt, &new_bitrate));
- expected_bitrate = static_cast<uint32>(
- expected_bitrate * kDefaultCongestionControlBackOff);
- EXPECT_EQ(expected_bitrate, new_bitrate);
-
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
- EXPECT_FALSE(congestion_control_.OnAck(rtt, &new_bitrate));
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
- EXPECT_TRUE(congestion_control_.OnAck(rtt, &new_bitrate));
- expected_bitrate += 1500 * 8 * 20 / kRttMs;
- EXPECT_EQ(expected_bitrate, new_bitrate);
-
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
- EXPECT_FALSE(congestion_control_.OnAck(rtt, &new_bitrate));
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
- EXPECT_TRUE(congestion_control_.OnAck(rtt, &new_bitrate));
- expected_bitrate += 1500 * 8 * 20 / kRttMs;
- EXPECT_EQ(expected_bitrate, new_bitrate);
-
- // Test long elapsed time (300 ms).
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(300));
- EXPECT_TRUE(congestion_control_.OnAck(rtt, &new_bitrate));
- expected_bitrate += 1500 * 8 * 100 / kRttMs;
- EXPECT_EQ(expected_bitrate, new_bitrate);
-
- // Test many short elapsed time (1 ms).
- for (int i = 0; i < 19; ++i) {
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(1));
- EXPECT_FALSE(congestion_control_.OnAck(rtt, &new_bitrate));
+ void Run(uint32 frames,
+ size_t frame_size,
+ base::TimeDelta rtt,
+ base::TimeDelta frame_delay,
+ base::TimeDelta ack_time) {
+ for (frame_id_ = 0; frame_id_ < frames; frame_id_++) {
+ congestion_control_->UpdateRtt(rtt);
+ congestion_control_->SendFrameToTransport(
+ frame_id_, frame_size, testing_clock_.NowTicks());
+ task_runner_->PostDelayedTask(FROM_HERE,
+ base::Bind(&CongestionControlTest::AckFrame,
+ base::Unretained(this),
+ frame_id_),
+ ack_time);
+ task_runner_->Sleep(frame_delay);
+ }
}
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(1));
- EXPECT_TRUE(congestion_control_.OnAck(rtt, &new_bitrate));
- expected_bitrate += 1500 * 8 * 20 / kRttMs;
- EXPECT_EQ(expected_bitrate, new_bitrate);
-}
-TEST_F(CongestionControlTest, Convergence24fps) {
- EXPECT_GE(RunWithOneLossEventPerSecond(24, kRttMs, 100),
- GG_UINT32_C(3000000));
-}
+ base::SimpleTestTickClock testing_clock_;
+ scoped_ptr<CongestionControl> congestion_control_;
+ scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
+ uint32 frame_id_;
-TEST_F(CongestionControlTest, Convergence24fpsLongRtt) {
- EXPECT_GE(RunWithOneLossEventPerSecond(24, 100, 100),
- GG_UINT32_C(500000));
-}
+ DISALLOW_COPY_AND_ASSIGN(CongestionControlTest);
+};
-TEST_F(CongestionControlTest, Convergence60fps) {
- EXPECT_GE(RunWithOneLossEventPerSecond(60, kRttMs, 100),
- GG_UINT32_C(3500000));
+TEST_F(CongestionControlTest, SimpleRun) {
+ uint32 frame_delay = 33;
+ uint32 frame_size = 10000 * 8;
+ Run(500,
+ frame_size,
+ base::TimeDelta::FromMilliseconds(10),
+ base::TimeDelta::FromMilliseconds(frame_delay),
+ base::TimeDelta::FromMilliseconds(45));
+ // Empty the buffer.
+ task_runner_->Sleep(base::TimeDelta::FromMilliseconds(100));
+
+ uint32 safe_bitrate = frame_size * 1000 / frame_delay;
+ uint32 bitrate = congestion_control_->GetBitrate(
+ testing_clock_.NowTicks() + base::TimeDelta::FromMilliseconds(300),
+ base::TimeDelta::FromMilliseconds(300));
+ EXPECT_NEAR(
+ safe_bitrate / kTargetEmptyBufferFraction, bitrate, safe_bitrate * 0.05);
+
+ bitrate = congestion_control_->GetBitrate(
+ testing_clock_.NowTicks() + base::TimeDelta::FromMilliseconds(200),
+ base::TimeDelta::FromMilliseconds(300));
+ EXPECT_NEAR(safe_bitrate / kTargetEmptyBufferFraction * 2 / 3,
+ bitrate,
+ safe_bitrate * 0.05);
+
+ bitrate = congestion_control_->GetBitrate(
+ testing_clock_.NowTicks() + base::TimeDelta::FromMilliseconds(100),
+ base::TimeDelta::FromMilliseconds(300));
+ EXPECT_NEAR(safe_bitrate / kTargetEmptyBufferFraction * 1 / 3,
+ bitrate,
+ safe_bitrate * 0.05);
+
+ // Add a large (100ms) frame.
+ congestion_control_->SendFrameToTransport(
+ frame_id_++, safe_bitrate * 100 / 1000, testing_clock_.NowTicks());
+
+ // Results should show that we have ~200ms to send
+ bitrate = congestion_control_->GetBitrate(
+ testing_clock_.NowTicks() + base::TimeDelta::FromMilliseconds(300),
+ base::TimeDelta::FromMilliseconds(300));
+ EXPECT_NEAR(safe_bitrate / kTargetEmptyBufferFraction * 2 / 3,
+ bitrate,
+ safe_bitrate * 0.05);
+
+ // Add another large (100ms) frame.
+ congestion_control_->SendFrameToTransport(
+ frame_id_++, safe_bitrate * 100 / 1000, testing_clock_.NowTicks());
+
+ // Resulst should show that we have ~100ms to send
+ bitrate = congestion_control_->GetBitrate(
+ testing_clock_.NowTicks() + base::TimeDelta::FromMilliseconds(300),
+ base::TimeDelta::FromMilliseconds(300));
+ EXPECT_NEAR(safe_bitrate / kTargetEmptyBufferFraction * 1 / 3,
+ bitrate,
+ safe_bitrate * 0.05);
}
-TEST_F(CongestionControlTest, Convergence60fpsLongRtt) {
- EXPECT_GE(RunWithOneLossEventPerSecond(60, 100, 100),
- GG_UINT32_C(500000));
-}
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/framer/cast_message_builder.cc b/chromium/media/cast/framer/cast_message_builder.cc
index 7d89f744315..f3473f96902 100644
--- a/chromium/media/cast/framer/cast_message_builder.cc
+++ b/chromium/media/cast/framer/cast_message_builder.cc
@@ -23,7 +23,6 @@ CastMessageBuilder::CastMessageBuilder(
decoder_faster_than_max_frame_rate_(decoder_faster_than_max_frame_rate),
max_unacked_frames_(max_unacked_frames),
cast_msg_(media_ssrc),
- waiting_for_key_frame_(true),
slowing_down_ack_(false),
acked_last_frame_(true),
last_acked_frame_id_(kStartFrameId) {
@@ -32,65 +31,61 @@ CastMessageBuilder::CastMessageBuilder(
CastMessageBuilder::~CastMessageBuilder() {}
-void CastMessageBuilder::CompleteFrameReceived(uint32 frame_id,
- bool is_key_frame) {
+void CastMessageBuilder::CompleteFrameReceived(uint32 frame_id) {
+ DCHECK_GE(static_cast<int32>(frame_id - last_acked_frame_id_), 0);
+ VLOG(2) << "CompleteFrameReceived: " << frame_id;
if (last_update_time_.is_null()) {
// Our first update.
last_update_time_ = clock_->NowTicks();
}
- if (waiting_for_key_frame_) {
- if (!is_key_frame) {
- // Ignore that we have received this complete frame since we are
- // waiting on a key frame.
- return;
- }
- waiting_for_key_frame_ = false;
- cast_msg_.missing_frames_and_packets_.clear();
- cast_msg_.ack_frame_id_ = frame_id;
- last_update_time_ = clock_->NowTicks();
- // We might have other complete frames waiting after we receive the last
- // packet in the key-frame.
- UpdateAckMessage();
- } else {
- if (!UpdateAckMessage()) return;
- BuildPacketList();
+ if (!UpdateAckMessage(frame_id)) {
+ return;
}
+ BuildPacketList();
+
// Send cast message.
- VLOG(1) << "Send cast message Ack:" << static_cast<int>(frame_id);
+ VLOG(2) << "Send cast message Ack:" << static_cast<int>(frame_id);
cast_feedback_->CastFeedback(cast_msg_);
}
-bool CastMessageBuilder::UpdateAckMessage() {
+bool CastMessageBuilder::UpdateAckMessage(uint32 frame_id) {
if (!decoder_faster_than_max_frame_rate_) {
int complete_frame_count = frame_id_map_->NumberOfCompleteFrames();
if (complete_frame_count > max_unacked_frames_) {
// We have too many frames pending in our framer; slow down ACK.
- slowing_down_ack_ = true;
+ if (!slowing_down_ack_) {
+ slowing_down_ack_ = true;
+ ack_queue_.push_back(last_acked_frame_id_);
+ }
} else if (complete_frame_count <= 1) {
// We are down to one or less frames in our framer; ACK normally.
slowing_down_ack_ = false;
+ ack_queue_.clear();
}
}
+
if (slowing_down_ack_) {
// We are slowing down acknowledgment by acknowledging every other frame.
- if (acked_last_frame_) {
- acked_last_frame_ = false;
- } else {
- acked_last_frame_ = true;
- last_acked_frame_id_++;
- // Note: frame skipping and slowdown ACK is not supported at the same
- // time; and it's not needed since we can skip frames to catch up.
+ // Note: frame skipping and slowdown ACK is not supported at the same
+ // time; and it's not needed since we can skip frames to catch up.
+ if (!ack_queue_.empty() && ack_queue_.back() == frame_id) {
+ return false;
}
- } else {
- uint32 frame_id = frame_id_map_->LastContinuousFrame();
-
- // Is it a new frame?
- if (last_acked_frame_id_ == frame_id) return false;
+ ack_queue_.push_back(frame_id);
+ if (!acked_last_frame_) {
+ ack_queue_.pop_front();
+ }
+ frame_id = ack_queue_.front();
+ }
- last_acked_frame_id_ = frame_id;
- acked_last_frame_ = true;
+ acked_last_frame_ = false;
+ // Is it a new frame?
+ if (last_acked_frame_id_ == frame_id) {
+ return false;
}
+ acked_last_frame_ = true;
+ last_acked_frame_id_ = frame_id;
cast_msg_.ack_frame_id_ = last_acked_frame_id_;
cast_msg_.missing_frames_and_packets_.clear();
last_update_time_ = clock_->NowTicks();
@@ -100,23 +95,24 @@ bool CastMessageBuilder::UpdateAckMessage() {
bool CastMessageBuilder::TimeToSendNextCastMessage(
base::TimeTicks* time_to_send) {
// We haven't received any packets.
- if (last_update_time_.is_null() && frame_id_map_->Empty()) return false;
+ if (last_update_time_.is_null() && frame_id_map_->Empty())
+ return false;
- *time_to_send = last_update_time_ +
- base::TimeDelta::FromMilliseconds(kCastMessageUpdateIntervalMs);
+ *time_to_send = last_update_time_ + base::TimeDelta::FromMilliseconds(
+ kCastMessageUpdateIntervalMs);
return true;
}
void CastMessageBuilder::UpdateCastMessage() {
RtcpCastMessage message(media_ssrc_);
- if (!UpdateCastMessageInternal(&message)) return;
+ if (!UpdateCastMessageInternal(&message))
+ return;
// Send cast message.
cast_feedback_->CastFeedback(message);
}
void CastMessageBuilder::Reset() {
- waiting_for_key_frame_ = true;
cast_msg_.ack_frame_id_ = kStartFrameId;
cast_msg_.missing_frames_and_packets_.clear();
time_last_nacked_map_.clear();
@@ -138,9 +134,10 @@ bool CastMessageBuilder::UpdateCastMessageInternal(RtcpCastMessage* message) {
}
last_update_time_ = now;
- UpdateAckMessage(); // Needed to cover when a frame is skipped.
+ // Needed to cover when a frame is skipped.
+ UpdateAckMessage(last_acked_frame_id_);
BuildPacketList();
- *message = cast_msg_;
+ message->Copy(cast_msg_);
return true;
}
@@ -151,7 +148,8 @@ void CastMessageBuilder::BuildPacketList() {
cast_msg_.missing_frames_and_packets_.clear();
// Are we missing packets?
- if (frame_id_map_->Empty()) return;
+ if (frame_id_map_->Empty())
+ return;
uint32 newest_frame_id = frame_id_map_->NewestFrameId();
uint32 next_expected_frame_id = cast_msg_.ack_frame_id_ + 1;
@@ -173,8 +171,8 @@ void CastMessageBuilder::BuildPacketList() {
PacketIdSet missing;
if (frame_id_map_->FrameExists(next_expected_frame_id)) {
bool last_frame = (newest_frame_id == next_expected_frame_id);
- frame_id_map_->GetMissingPackets(next_expected_frame_id, last_frame,
- &missing);
+ frame_id_map_->GetMissingPackets(
+ next_expected_frame_id, last_frame, &missing);
if (!missing.empty()) {
time_last_nacked_map_[next_expected_frame_id] = now;
cast_msg_.missing_frames_and_packets_.insert(
diff --git a/chromium/media/cast/framer/cast_message_builder.h b/chromium/media/cast/framer/cast_message_builder.h
index b76a196111c..9db88d4a990 100644
--- a/chromium/media/cast/framer/cast_message_builder.h
+++ b/chromium/media/cast/framer/cast_message_builder.h
@@ -7,6 +7,7 @@
#ifndef MEDIA_CAST_FRAMER_CAST_MESSAGE_BUILDER_H_
#define MEDIA_CAST_FRAMER_CAST_MESSAGE_BUILDER_H_
+#include <deque>
#include <map>
#include "media/cast/framer/frame_id_map.h"
@@ -30,13 +31,13 @@ class CastMessageBuilder {
int max_unacked_frames);
~CastMessageBuilder();
- void CompleteFrameReceived(uint32 frame_id, bool is_key_frame);
+ void CompleteFrameReceived(uint32 frame_id);
bool TimeToSendNextCastMessage(base::TimeTicks* time_to_send);
void UpdateCastMessage();
void Reset();
private:
- bool UpdateAckMessage();
+ bool UpdateAckMessage(uint32 frame_id);
void BuildPacketList();
bool UpdateCastMessageInternal(RtcpCastMessage* message);
@@ -51,13 +52,13 @@ class CastMessageBuilder {
RtcpCastMessage cast_msg_;
base::TimeTicks last_update_time_;
- bool waiting_for_key_frame_;
TimeLastNackMap time_last_nacked_map_;
bool slowing_down_ack_;
bool acked_last_frame_;
uint32 last_acked_frame_id_;
+ std::deque<uint32> ack_queue_;
DISALLOW_COPY_AND_ASSIGN(CastMessageBuilder);
};
diff --git a/chromium/media/cast/framer/cast_message_builder_unittest.cc b/chromium/media/cast/framer/cast_message_builder_unittest.cc
index f4b708c90ef..ef75162a086 100644
--- a/chromium/media/cast/framer/cast_message_builder_unittest.cc
+++ b/chromium/media/cast/framer/cast_message_builder_unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <stdint.h>
+
#include "base/memory/scoped_ptr.h"
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/framer/cast_message_builder.h"
@@ -12,21 +14,18 @@
namespace media {
namespace cast {
+namespace {
static const uint32 kSsrc = 0x1234;
static const uint32 kShortTimeIncrementMs = 10;
static const uint32 kLongTimeIncrementMs = 40;
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+static const int64 kStartMillisecond = INT64_C(12345678900000);
-namespace {
typedef std::map<uint32, size_t> MissingPacketsMap;
class NackFeedbackVerification : public RtpPayloadFeedback {
public:
NackFeedbackVerification()
- : triggered_(false),
- missing_packets_(),
- last_frame_acked_(0) {}
-
+ : triggered_(false), missing_packets_(), last_frame_acked_(0) {}
virtual void CastFeedback(const RtcpCastMessage& cast_feedback) OVERRIDE {
EXPECT_EQ(kSsrc, cast_feedback.media_ssrc_);
@@ -43,10 +42,10 @@ class NackFeedbackVerification : public RtpPayloadFeedback {
if ((frame_it->second.size() == 1) &&
(*frame_it->second.begin() == kRtcpCastAllPacketsLost)) {
missing_packets_.insert(
- std::make_pair(frame_it->first, kRtcpCastAllPacketsLost));
+ std::make_pair(frame_it->first, kRtcpCastAllPacketsLost));
} else {
- missing_packets_.insert(
- std::make_pair(frame_it->first, frame_it->second.size()));
+ missing_packets_.insert(
+ std::make_pair(frame_it->first, frame_it->second.size()));
}
++frame_it;
}
@@ -56,14 +55,15 @@ class NackFeedbackVerification : public RtpPayloadFeedback {
size_t num_missing_packets(uint32 frame_id) {
MissingPacketsMap::iterator it;
it = missing_packets_.find(frame_id);
- if (it == missing_packets_.end()) return 0;
+ if (it == missing_packets_.end())
+ return 0;
return it->second;
}
// Holds value for one call.
bool triggered() {
- bool ret_val = triggered_;
+ bool ret_val = triggered_;
triggered_ = false;
return ret_val;
}
@@ -74,6 +74,8 @@ class NackFeedbackVerification : public RtpPayloadFeedback {
bool triggered_;
MissingPacketsMap missing_packets_; // Missing packets per frame.
uint32 last_frame_acked_;
+
+ DISALLOW_COPY_AND_ASSIGN(NackFeedbackVerification);
};
} // namespace
@@ -86,7 +88,7 @@ class CastMessageBuilderTest : public ::testing::Test {
kSsrc,
true,
0)) {
- rtp_header_.webrtc.header.ssrc = kSsrc;
+ rtp_header_.sender_ssrc = kSsrc;
rtp_header_.is_key_frame = false;
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
@@ -94,33 +96,23 @@ class CastMessageBuilderTest : public ::testing::Test {
virtual ~CastMessageBuilderTest() {}
- void SetFrameId(uint32 frame_id) {
+ void SetFrameIds(uint32 frame_id, uint32 reference_frame_id) {
rtp_header_.frame_id = frame_id;
+ rtp_header_.reference_frame_id = reference_frame_id;
}
- void SetPacketId(uint16 packet_id) {
- rtp_header_.packet_id = packet_id;
- }
+ void SetPacketId(uint16 packet_id) { rtp_header_.packet_id = packet_id; }
void SetMaxPacketId(uint16 max_packet_id) {
rtp_header_.max_packet_id = max_packet_id;
}
- void SetKeyFrame(bool is_key) {
- rtp_header_.is_key_frame = is_key;
- }
-
- void SetReferenceFrameId(uint32 reference_frame_id) {
- rtp_header_.is_reference = true;
- rtp_header_.reference_frame_id = reference_frame_id;
- }
+ void SetKeyFrame(bool is_key) { rtp_header_.is_key_frame = is_key; }
void InsertPacket() {
- bool complete = false;
- frame_id_map_.InsertPacket(rtp_header_, &complete);
- if (complete) {
- cast_msg_builder_->CompleteFrameReceived(rtp_header_.frame_id,
- rtp_header_.is_key_frame);
+ PacketType packet_type = frame_id_map_.InsertPacket(rtp_header_);
+ if (packet_type == kNewPacketCompletingFrame) {
+ cast_msg_builder_->CompleteFrameReceived(rtp_header_.frame_id);
}
cast_msg_builder_->UpdateCastMessage();
}
@@ -139,30 +131,12 @@ class CastMessageBuilderTest : public ::testing::Test {
RtpCastHeader rtp_header_;
FrameIdMap frame_id_map_;
base::SimpleTestTickClock testing_clock_;
-};
-TEST_F(CastMessageBuilderTest, StartWithAKeyFrame) {
- SetFrameId(3);
- SetPacketId(0);
- SetMaxPacketId(0);
- InsertPacket();
- // Should not trigger ack.
- EXPECT_FALSE(feedback_.triggered());
- SetFrameId(5);
- SetPacketId(0);
- SetMaxPacketId(0);
- SetKeyFrame(true);
- InsertPacket();
- frame_id_map_.RemoveOldFrames(5); // Simulate 5 being pulled for rendering.
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
- cast_msg_builder_->UpdateCastMessage();
- EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(5u, feedback_.last_frame_acked());
-}
+ DISALLOW_COPY_AND_ASSIGN(CastMessageBuilderTest);
+};
TEST_F(CastMessageBuilderTest, OneFrameNackList) {
- SetFrameId(0);
+ SetFrameIds(0, 0);
SetPacketId(4);
SetMaxPacketId(10);
InsertPacket();
@@ -178,13 +152,13 @@ TEST_F(CastMessageBuilderTest, OneFrameNackList) {
}
TEST_F(CastMessageBuilderTest, CompleteFrameMissing) {
- SetFrameId(0);
+ SetFrameIds(0, 0);
SetPacketId(2);
SetMaxPacketId(5);
InsertPacket();
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
- SetFrameId(2);
+ SetFrameIds(2, 1);
SetPacketId(2);
SetMaxPacketId(5);
InsertPacket();
@@ -192,55 +166,30 @@ TEST_F(CastMessageBuilderTest, CompleteFrameMissing) {
EXPECT_EQ(kRtcpCastAllPacketsLost, feedback_.num_missing_packets(1));
}
-TEST_F(CastMessageBuilderTest, FastForwardAck) {
- SetFrameId(1);
- SetPacketId(0);
- SetMaxPacketId(0);
- InsertPacket();
- EXPECT_FALSE(feedback_.triggered());
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
- SetFrameId(2);
- SetPacketId(0);
- SetMaxPacketId(0);
- InsertPacket();
- EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(kStartFrameId, feedback_.last_frame_acked());
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
- SetFrameId(0);
- SetPacketId(0);
- SetMaxPacketId(0);
- SetKeyFrame(true);
- InsertPacket();
- EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(2u, feedback_.last_frame_acked());
-}
-
TEST_F(CastMessageBuilderTest, RemoveOldFrames) {
- SetFrameId(1);
+ SetFrameIds(1, 0);
SetPacketId(0);
SetMaxPacketId(1);
InsertPacket();
EXPECT_FALSE(feedback_.triggered());
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
- SetFrameId(2);
+ SetFrameIds(2, 1);
SetPacketId(0);
SetMaxPacketId(0);
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
- SetFrameId(3);
+ SetFrameIds(3, 2);
SetPacketId(0);
SetMaxPacketId(5);
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(kStartFrameId, feedback_.last_frame_acked());
+ EXPECT_EQ(2u, feedback_.last_frame_acked());
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
- SetFrameId(5);
+ SetFrameIds(5, 5);
SetPacketId(0);
SetMaxPacketId(0);
SetKeyFrame(true);
@@ -253,7 +202,7 @@ TEST_F(CastMessageBuilderTest, RemoveOldFrames) {
EXPECT_EQ(5u, feedback_.last_frame_acked());
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
- SetFrameId(1);
+ SetFrameIds(1, 0);
SetPacketId(1);
SetMaxPacketId(1);
InsertPacket();
@@ -265,44 +214,8 @@ TEST_F(CastMessageBuilderTest, RemoveOldFrames) {
EXPECT_EQ(5u, feedback_.last_frame_acked());
}
-TEST_F(CastMessageBuilderTest, WrapFastForward) {
- SetFrameId(254);
- SetPacketId(0);
- SetMaxPacketId(1);
- SetKeyFrame(true);
- InsertPacket();
- EXPECT_FALSE(feedback_.triggered());
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
- SetFrameId(255);
- SetPacketId(0);
- SetMaxPacketId(0);
- SetKeyFrame(false);
- InsertPacket();
- EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(253u, feedback_.last_frame_acked());
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
- SetFrameId(256);
- SetPacketId(0);
- SetMaxPacketId(0);
- SetKeyFrame(false);
- InsertPacket();
- EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(253u, feedback_.last_frame_acked());
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
- SetFrameId(254);
- SetPacketId(1);
- SetMaxPacketId(1);
- SetKeyFrame(true);
- InsertPacket();
- EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(256u, feedback_.last_frame_acked());
-}
-
TEST_F(CastMessageBuilderTest, NackUntilMaxReceivedPacket) {
- SetFrameId(0);
+ SetFrameIds(0, 0);
SetPacketId(0);
SetMaxPacketId(20);
SetKeyFrame(true);
@@ -316,7 +229,7 @@ TEST_F(CastMessageBuilderTest, NackUntilMaxReceivedPacket) {
}
TEST_F(CastMessageBuilderTest, NackUntilMaxReceivedPacketNextFrame) {
- SetFrameId(0);
+ SetFrameIds(0, 0);
SetPacketId(0);
SetMaxPacketId(20);
SetKeyFrame(true);
@@ -329,7 +242,7 @@ TEST_F(CastMessageBuilderTest, NackUntilMaxReceivedPacketNextFrame) {
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
EXPECT_TRUE(feedback_.triggered());
EXPECT_EQ(4u, feedback_.num_missing_packets(0));
- SetFrameId(1);
+ SetFrameIds(1, 0);
SetMaxPacketId(2);
SetPacketId(0);
SetKeyFrame(false);
@@ -341,7 +254,7 @@ TEST_F(CastMessageBuilderTest, NackUntilMaxReceivedPacketNextFrame) {
}
TEST_F(CastMessageBuilderTest, NackUntilMaxReceivedPacketNextKey) {
- SetFrameId(0);
+ SetFrameIds(0, 0);
SetPacketId(0);
SetMaxPacketId(20);
SetKeyFrame(true);
@@ -354,7 +267,7 @@ TEST_F(CastMessageBuilderTest, NackUntilMaxReceivedPacketNextKey) {
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
EXPECT_TRUE(feedback_.triggered());
EXPECT_EQ(4u, feedback_.num_missing_packets(0));
- SetFrameId(1);
+ SetFrameIds(1, 1);
SetMaxPacketId(0);
SetPacketId(0);
SetKeyFrame(true);
@@ -378,7 +291,7 @@ TEST_F(CastMessageBuilderTest, Reset) {
}
TEST_F(CastMessageBuilderTest, DeltaAfterReset) {
- SetFrameId(0);
+ SetFrameIds(0, 0);
SetPacketId(0);
SetMaxPacketId(0);
SetKeyFrame(true);
@@ -388,7 +301,7 @@ TEST_F(CastMessageBuilderTest, DeltaAfterReset) {
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
cast_msg_builder_->Reset();
- SetFrameId(1);
+ SetFrameIds(1, 0);
SetPacketId(0);
SetMaxPacketId(0);
SetKeyFrame(true);
@@ -396,7 +309,7 @@ TEST_F(CastMessageBuilderTest, DeltaAfterReset) {
}
TEST_F(CastMessageBuilderTest, BasicRps) {
- SetFrameId(0);
+ SetFrameIds(0, 0);
SetPacketId(0);
SetMaxPacketId(0);
SetKeyFrame(true);
@@ -405,12 +318,11 @@ TEST_F(CastMessageBuilderTest, BasicRps) {
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
EXPECT_TRUE(feedback_.triggered());
EXPECT_EQ(0u, feedback_.last_frame_acked());
- SetFrameId(3);
+ SetFrameIds(3, 0);
SetKeyFrame(false);
- SetReferenceFrameId(0);
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(0u, feedback_.last_frame_acked());
+ EXPECT_EQ(3u, feedback_.last_frame_acked());
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
frame_id_map_.RemoveOldFrames(3); // Simulate 3 being pulled for rendering.
@@ -421,7 +333,7 @@ TEST_F(CastMessageBuilderTest, BasicRps) {
TEST_F(CastMessageBuilderTest, InOrderRps) {
// Create a pattern - skip to rps, and don't look back.
- SetFrameId(0);
+ SetFrameIds(0, 0);
SetPacketId(0);
SetMaxPacketId(0);
SetKeyFrame(true);
@@ -430,7 +342,7 @@ TEST_F(CastMessageBuilderTest, InOrderRps) {
base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
EXPECT_TRUE(feedback_.triggered());
EXPECT_EQ(0u, feedback_.last_frame_acked());
- SetFrameId(1);
+ SetFrameIds(1, 0);
SetPacketId(0);
SetMaxPacketId(1);
SetKeyFrame(false);
@@ -438,11 +350,10 @@ TEST_F(CastMessageBuilderTest, InOrderRps) {
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
EXPECT_FALSE(feedback_.triggered());
- SetFrameId(3);
+ SetFrameIds(3, 0);
SetPacketId(0);
SetMaxPacketId(0);
SetKeyFrame(false);
- SetReferenceFrameId(0);
InsertPacket();
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
@@ -453,7 +364,7 @@ TEST_F(CastMessageBuilderTest, InOrderRps) {
EXPECT_TRUE(feedback_.triggered());
EXPECT_EQ(3u, feedback_.last_frame_acked());
// Make an old frame complete - should not trigger an ack.
- SetFrameId(1);
+ SetFrameIds(1, 0);
SetPacketId(1);
SetMaxPacketId(1);
SetKeyFrame(false);
@@ -466,7 +377,7 @@ TEST_F(CastMessageBuilderTest, InOrderRps) {
TEST_F(CastMessageBuilderTest, SlowDownAck) {
SetDecoderSlowerThanMaxFrameRate(3);
- SetFrameId(0);
+ SetFrameIds(0, 0);
SetPacketId(0);
SetMaxPacketId(0);
SetKeyFrame(true);
@@ -479,30 +390,34 @@ TEST_F(CastMessageBuilderTest, SlowDownAck) {
for (frame_id = 1; frame_id < 3; ++frame_id) {
EXPECT_TRUE(feedback_.triggered());
EXPECT_EQ(frame_id - 1, feedback_.last_frame_acked());
- SetFrameId(frame_id);
+ SetFrameIds(frame_id, frame_id - 1);
InsertPacket();
testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
+ base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
}
// We should now have entered the slowdown ACK state.
uint32 expected_frame_id = 1;
for (; frame_id < 10; ++frame_id) {
- if (frame_id % 2) ++expected_frame_id;
- EXPECT_TRUE(feedback_.triggered());
+ if (frame_id % 2) {
+ ++expected_frame_id;
+ EXPECT_TRUE(feedback_.triggered());
+ } else {
+ EXPECT_FALSE(feedback_.triggered());
+ }
EXPECT_EQ(expected_frame_id, feedback_.last_frame_acked());
- SetFrameId(frame_id);
+ SetFrameIds(frame_id, frame_id - 1);
InsertPacket();
testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
+ base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
}
- EXPECT_TRUE(feedback_.triggered());
+ EXPECT_FALSE(feedback_.triggered());
EXPECT_EQ(expected_frame_id, feedback_.last_frame_acked());
// Simulate frame_id being pulled for rendering.
frame_id_map_.RemoveOldFrames(frame_id);
// We should now leave the slowdown ACK state.
++frame_id;
- SetFrameId(frame_id);
+ SetFrameIds(frame_id, frame_id - 1);
InsertPacket();
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
diff --git a/chromium/media/cast/framer/frame_buffer.cc b/chromium/media/cast/framer/frame_buffer.cc
index ca9f1dedd28..0b6fa8332cd 100644
--- a/chromium/media/cast/framer/frame_buffer.cc
+++ b/chromium/media/cast/framer/frame_buffer.cc
@@ -28,21 +28,17 @@ void FrameBuffer::InsertPacket(const uint8* payload_data,
frame_id_ = rtp_header.frame_id;
max_packet_id_ = rtp_header.max_packet_id;
is_key_frame_ = rtp_header.is_key_frame;
- if (rtp_header.is_reference) {
- last_referenced_frame_id_ = rtp_header.reference_frame_id;
- } else {
- last_referenced_frame_id_ = rtp_header.frame_id - 1;
- }
-
- rtp_timestamp_ = rtp_header.webrtc.header.timestamp;
+ if (is_key_frame_)
+ DCHECK_EQ(rtp_header.frame_id, rtp_header.reference_frame_id);
+ last_referenced_frame_id_ = rtp_header.reference_frame_id;
+ rtp_timestamp_ = rtp_header.rtp_timestamp;
}
// Is this the correct frame?
- if (rtp_header.frame_id != frame_id_) return;
+ if (rtp_header.frame_id != frame_id_)
+ return;
// Insert every packet only once.
if (packets_.find(rtp_header.packet_id) != packets_.end()) {
- VLOG(3) << "Packet already received, ignored: frame "
- << frame_id_ << ", packet " << rtp_header.packet_id;
return;
}
@@ -52,8 +48,8 @@ void FrameBuffer::InsertPacket(const uint8* payload_data,
// Insert the packet.
retval.first->second.resize(payload_size);
- std::copy(payload_data, payload_data + payload_size,
- retval.first->second.begin());
+ std::copy(
+ payload_data, payload_data + payload_size, retval.first->second.begin());
++num_packets_received_;
total_data_size_ += payload_size;
@@ -63,45 +59,27 @@ bool FrameBuffer::Complete() const {
return num_packets_received_ - 1 == max_packet_id_;
}
-bool FrameBuffer::GetEncodedAudioFrame(EncodedAudioFrame* audio_frame,
- uint32* rtp_timestamp) const {
- if (!Complete()) return false;
-
- *rtp_timestamp = rtp_timestamp_;
-
- // Frame is complete -> construct.
- audio_frame->frame_id = frame_id_;
-
- // Build the data vector.
- audio_frame->data.clear();
- audio_frame->data.reserve(total_data_size_);
- PacketMap::const_iterator it;
- for (it = packets_.begin(); it != packets_.end(); ++it) {
- audio_frame->data.insert(audio_frame->data.end(),
- it->second.begin(), it->second.end());
- }
- return true;
-}
-
-bool FrameBuffer::GetEncodedVideoFrame(EncodedVideoFrame* video_frame,
- uint32* rtp_timestamp) const {
- if (!Complete()) return false;
-
- *rtp_timestamp = rtp_timestamp_;
+bool FrameBuffer::AssembleEncodedFrame(transport::EncodedFrame* frame) const {
+ if (!Complete())
+ return false;
// Frame is complete -> construct.
- video_frame->key_frame = is_key_frame_;
- video_frame->frame_id = frame_id_;
- video_frame->last_referenced_frame_id = last_referenced_frame_id_;
+ if (is_key_frame_)
+ frame->dependency = transport::EncodedFrame::KEY;
+ else if (frame_id_ == last_referenced_frame_id_)
+ frame->dependency = transport::EncodedFrame::INDEPENDENT;
+ else
+ frame->dependency = transport::EncodedFrame::DEPENDENT;
+ frame->frame_id = frame_id_;
+ frame->referenced_frame_id = last_referenced_frame_id_;
+ frame->rtp_timestamp = rtp_timestamp_;
// Build the data vector.
- video_frame->data.clear();
- video_frame->data.reserve(total_data_size_);
+ frame->data.clear();
+ frame->data.reserve(total_data_size_);
PacketMap::const_iterator it;
- for (it = packets_.begin(); it != packets_.end(); ++it) {
- video_frame->data.insert(video_frame->data.end(),
- it->second.begin(), it->second.end());
- }
+ for (it = packets_.begin(); it != packets_.end(); ++it)
+ frame->data.insert(frame->data.end(), it->second.begin(), it->second.end());
return true;
}
diff --git a/chromium/media/cast/framer/frame_buffer.h b/chromium/media/cast/framer/frame_buffer.h
index b99f2b2582d..d4d5dedbbde 100644
--- a/chromium/media/cast/framer/frame_buffer.h
+++ b/chromium/media/cast/framer/frame_buffer.h
@@ -25,11 +25,11 @@ class FrameBuffer {
const RtpCastHeader& rtp_header);
bool Complete() const;
- bool GetEncodedAudioFrame(EncodedAudioFrame* audio_frame,
- uint32* rtp_timestamp) const;
-
- bool GetEncodedVideoFrame(EncodedVideoFrame* video_frame,
- uint32* rtp_timestamp) const;
+ // If a frame is complete, sets the frame IDs and RTP timestamp in |frame|,
+ // and also copies the data from all packets into the data field in |frame|.
+ // Returns true if the frame was complete; false if incomplete and |frame|
+ // remains unchanged.
+ bool AssembleEncodedFrame(transport::EncodedFrame* frame) const;
bool is_key_frame() const { return is_key_frame_; }
diff --git a/chromium/media/cast/framer/frame_buffer_unittest.cc b/chromium/media/cast/framer/frame_buffer_unittest.cc
index fb14da39f7f..d6844f3e952 100644
--- a/chromium/media/cast/framer/frame_buffer_unittest.cc
+++ b/chromium/media/cast/framer/frame_buffer_unittest.cc
@@ -10,42 +10,45 @@ namespace cast {
class FrameBufferTest : public ::testing::Test {
protected:
- FrameBufferTest() {}
+ FrameBufferTest() {
+ payload_.assign(kMaxIpPacketSize, 0);
+ }
virtual ~FrameBufferTest() {}
- virtual void SetUp() {
- payload_.assign(kIpPacketSize, 0);
-
- // Build a default one packet frame - populate webrtc header.
- rtp_header_.is_key_frame = false;
- rtp_header_.frame_id = 0;
- rtp_header_.packet_id = 0;
- rtp_header_.max_packet_id = 0;
- rtp_header_.is_reference = false;
- rtp_header_.reference_frame_id = 0;
- }
-
FrameBuffer buffer_;
std::vector<uint8> payload_;
RtpCastHeader rtp_header_;
+
+ DISALLOW_COPY_AND_ASSIGN(FrameBufferTest);
};
+TEST_F(FrameBufferTest, OnePacketInsertSanity) {
+ rtp_header_.rtp_timestamp = 3000;
+ rtp_header_.is_key_frame = true;
+ rtp_header_.frame_id = 5;
+ rtp_header_.reference_frame_id = 5;
+ buffer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ transport::EncodedFrame frame;
+ EXPECT_TRUE(buffer_.AssembleEncodedFrame(&frame));
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
+ EXPECT_EQ(5u, frame.frame_id);
+ EXPECT_EQ(5u, frame.referenced_frame_id);
+ EXPECT_EQ(3000u, frame.rtp_timestamp);
+}
+
TEST_F(FrameBufferTest, EmptyBuffer) {
EXPECT_FALSE(buffer_.Complete());
- EXPECT_FALSE(buffer_.is_key_frame());
- EncodedVideoFrame frame;
- uint32 rtp_timestamp;
- EXPECT_FALSE(buffer_.GetEncodedVideoFrame(&frame, &rtp_timestamp));
+ transport::EncodedFrame frame;
+ EXPECT_FALSE(buffer_.AssembleEncodedFrame(&frame));
}
TEST_F(FrameBufferTest, DefaultOnePacketFrame) {
buffer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
EXPECT_TRUE(buffer_.Complete());
EXPECT_FALSE(buffer_.is_key_frame());
- EncodedVideoFrame frame;
- uint32 rtp_timestamp;
- EXPECT_TRUE(buffer_.GetEncodedVideoFrame(&frame, &rtp_timestamp));
+ transport::EncodedFrame frame;
+ EXPECT_TRUE(buffer_.AssembleEncodedFrame(&frame));
EXPECT_EQ(payload_.size(), frame.data.size());
}
@@ -60,13 +63,12 @@ TEST_F(FrameBufferTest, MultiplePacketFrame) {
++rtp_header_.packet_id;
EXPECT_TRUE(buffer_.Complete());
EXPECT_TRUE(buffer_.is_key_frame());
- EncodedVideoFrame frame;
- uint32 rtp_timestamp;
- EXPECT_TRUE(buffer_.GetEncodedVideoFrame(&frame, &rtp_timestamp));
+ transport::EncodedFrame frame;
+ EXPECT_TRUE(buffer_.AssembleEncodedFrame(&frame));
EXPECT_EQ(3 * payload_.size(), frame.data.size());
}
-TEST_F(FrameBufferTest, InCompleteFrame) {
+TEST_F(FrameBufferTest, IncompleteFrame) {
rtp_header_.max_packet_id = 4;
buffer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
++rtp_header_.packet_id;
diff --git a/chromium/media/cast/framer/frame_id_map.cc b/chromium/media/cast/framer/frame_id_map.cc
index bd9b943371c..b4389fd5323 100644
--- a/chromium/media/cast/framer/frame_id_map.cc
+++ b/chromium/media/cast/framer/frame_id_map.cc
@@ -18,28 +18,27 @@ FrameInfo::FrameInfo(uint32 frame_id,
frame_id_(frame_id),
referenced_frame_id_(referenced_frame_id),
max_received_packet_id_(0) {
- if (max_packet_id > 0) {
- // Create the set with all packets missing.
- for (uint16 i = 0; i <= max_packet_id; i++) {
- missing_packets_.insert(i);
- }
+ // Create the set with all packets missing.
+ for (uint16 i = 0; i <= max_packet_id; i++) {
+ missing_packets_.insert(i);
}
}
FrameInfo::~FrameInfo() {}
-bool FrameInfo::InsertPacket(uint16 packet_id) {
+PacketType FrameInfo::InsertPacket(uint16 packet_id) {
+ if (missing_packets_.find(packet_id) == missing_packets_.end()) {
+ return kDuplicatePacket;
+ }
// Update the last received packet id.
if (IsNewerPacketId(packet_id, max_received_packet_id_)) {
max_received_packet_id_ = packet_id;
}
missing_packets_.erase(packet_id);
- return missing_packets_.empty();
+ return missing_packets_.empty() ? kNewPacketCompletingFrame : kNewPacket;
}
-bool FrameInfo::Complete() const {
- return missing_packets_.empty();
-}
+bool FrameInfo::Complete() const { return missing_packets_.empty(); }
void FrameInfo::GetMissingPackets(bool newest_frame,
PacketIdSet* missing_packets) const {
@@ -53,35 +52,29 @@ void FrameInfo::GetMissingPackets(bool newest_frame,
}
}
-
FrameIdMap::FrameIdMap()
: waiting_for_key_(true),
last_released_frame_(kStartFrameId),
- newest_frame_id_(kStartFrameId) {
-}
+ newest_frame_id_(kStartFrameId) {}
FrameIdMap::~FrameIdMap() {}
-bool FrameIdMap::InsertPacket(const RtpCastHeader& rtp_header, bool* complete) {
+PacketType FrameIdMap::InsertPacket(const RtpCastHeader& rtp_header) {
uint32 frame_id = rtp_header.frame_id;
uint32 reference_frame_id;
- if (rtp_header.is_reference) {
- reference_frame_id = rtp_header.reference_frame_id;
- } else {
- reference_frame_id = static_cast<uint32>(frame_id - 1);
- }
+ reference_frame_id = rtp_header.reference_frame_id;
if (rtp_header.is_key_frame && waiting_for_key_) {
last_released_frame_ = static_cast<uint32>(frame_id - 1);
waiting_for_key_ = false;
}
- VLOG(1) << "InsertPacket frame:" << frame_id
+ VLOG(3) << "InsertPacket frame:" << frame_id
<< " packet:" << static_cast<int>(rtp_header.packet_id)
<< " max packet:" << static_cast<int>(rtp_header.max_packet_id);
if (IsOlderFrameId(frame_id, last_released_frame_) && !waiting_for_key_) {
- return false;
+ return kTooOldPacket;
}
// Update the last received frame id.
@@ -91,6 +84,7 @@ bool FrameIdMap::InsertPacket(const RtpCastHeader& rtp_header, bool* complete) {
// Does this packet belong to a new frame?
FrameMap::iterator it = frame_map_.find(frame_id);
+ PacketType packet_type;
if (it == frame_map_.end()) {
// New frame.
linked_ptr<FrameInfo> frame_info(new FrameInfo(frame_id,
@@ -100,12 +94,12 @@ bool FrameIdMap::InsertPacket(const RtpCastHeader& rtp_header, bool* complete) {
std::pair<FrameMap::iterator, bool> retval =
frame_map_.insert(std::make_pair(frame_id, frame_info));
- *complete = retval.first->second->InsertPacket(rtp_header.packet_id);
+ packet_type = retval.first->second->InsertPacket(rtp_header.packet_id);
} else {
// Insert packet to existing frame.
- *complete = it->second->InsertPacket(rtp_header.packet_id);
+ packet_type = it->second->InsertPacket(rtp_header.packet_id);
}
- return true;
+ return packet_type;
}
void FrameIdMap::RemoveOldFrames(uint32 frame_id) {
@@ -129,9 +123,7 @@ void FrameIdMap::Clear() {
newest_frame_id_ = kStartFrameId;
}
-uint32 FrameIdMap::NewestFrameId() const {
- return newest_frame_id_;
-}
+uint32 FrameIdMap::NewestFrameId() const { return newest_frame_id_; }
bool FrameIdMap::NextContinuousFrame(uint32* frame_id) const {
FrameMap::const_iterator it;
@@ -145,6 +137,22 @@ bool FrameIdMap::NextContinuousFrame(uint32* frame_id) const {
return false;
}
+bool FrameIdMap::HaveMultipleDecodableFrames() const {
+ // Find the oldest decodable frame.
+ FrameMap::const_iterator it;
+ bool found_one = false;
+ for (it = frame_map_.begin(); it != frame_map_.end(); ++it) {
+ if (it->second->Complete() && DecodableFrame(it->second.get())) {
+ if (found_one) {
+ return true;
+ } else {
+ found_one = true;
+ }
+ }
+ }
+ return false;
+}
+
uint32 FrameIdMap::LastContinuousFrame() const {
uint32 last_continuous_frame_id = last_released_frame_;
uint32 next_expected_frame = last_released_frame_;
@@ -154,8 +162,10 @@ uint32 FrameIdMap::LastContinuousFrame() const {
do {
next_expected_frame++;
it = frame_map_.find(next_expected_frame);
- if (it == frame_map_.end()) break;
- if (!it->second->Complete()) break;
+ if (it == frame_map_.end())
+ break;
+ if (!it->second->Complete())
+ break;
// We found the next continuous frame.
last_continuous_frame_id = it->first;
@@ -163,52 +173,26 @@ uint32 FrameIdMap::LastContinuousFrame() const {
return last_continuous_frame_id;
}
-bool FrameIdMap::NextAudioFrameAllowingMissingFrames(uint32* frame_id) const {
- // First check if we have continuous frames.
- if (NextContinuousFrame(frame_id)) return true;
-
- // Find the oldest frame.
- FrameMap::const_iterator it_best_match = frame_map_.end();
- FrameMap::const_iterator it;
-
- // Find first complete frame.
- for (it = frame_map_.begin(); it != frame_map_.end(); ++it) {
- if (it->second->Complete()) {
- it_best_match = it;
- break;
- }
- }
- if (it_best_match == frame_map_.end()) return false; // No complete frame.
-
- ++it;
- for (; it != frame_map_.end(); ++it) {
- if (it->second->Complete() &&
- IsOlderFrameId(it->first, it_best_match->first)) {
- it_best_match = it;
- }
- }
- *frame_id = it_best_match->first;
- return true;
-}
-
-bool FrameIdMap::NextVideoFrameAllowingSkippingFrames(uint32* frame_id) const {
+bool FrameIdMap::NextFrameAllowingSkippingFrames(uint32* frame_id) const {
// Find the oldest decodable frame.
FrameMap::const_iterator it_best_match = frame_map_.end();
FrameMap::const_iterator it;
for (it = frame_map_.begin(); it != frame_map_.end(); ++it) {
- if (it->second->Complete() && DecodableVideoFrame(it->second.get())) {
- it_best_match = it;
+ if (it->second->Complete() && DecodableFrame(it->second.get())) {
+ if (it_best_match == frame_map_.end() ||
+ IsOlderFrameId(it->first, it_best_match->first)) {
+ it_best_match = it;
+ }
}
}
- if (it_best_match == frame_map_.end()) return false;
+ if (it_best_match == frame_map_.end())
+ return false;
*frame_id = it_best_match->first;
return true;
}
-bool FrameIdMap::Empty() const {
- return frame_map_.empty();
-}
+bool FrameIdMap::Empty() const { return frame_map_.empty(); }
int FrameIdMap::NumberOfCompleteFrames() const {
int count = 0;
@@ -229,20 +213,27 @@ void FrameIdMap::GetMissingPackets(uint32 frame_id,
bool last_frame,
PacketIdSet* missing_packets) const {
FrameMap::const_iterator it = frame_map_.find(frame_id);
- if (it == frame_map_.end()) return;
+ if (it == frame_map_.end())
+ return;
it->second->GetMissingPackets(last_frame, missing_packets);
}
bool FrameIdMap::ContinuousFrame(FrameInfo* frame) const {
DCHECK(frame);
- if (waiting_for_key_ && !frame->is_key_frame()) return false;
+ if (waiting_for_key_ && !frame->is_key_frame())
+ return false;
return static_cast<uint32>(last_released_frame_ + 1) == frame->frame_id();
}
-bool FrameIdMap::DecodableVideoFrame(FrameInfo* frame) const {
- if (frame->is_key_frame()) return true;
- if (waiting_for_key_ && !frame->is_key_frame()) return false;
+bool FrameIdMap::DecodableFrame(FrameInfo* frame) const {
+ if (frame->is_key_frame())
+ return true;
+ if (waiting_for_key_ && !frame->is_key_frame())
+ return false;
+ // Self-reference?
+ if (frame->referenced_frame_id() == frame->frame_id())
+ return true;
// Current frame is not necessarily referencing the last frame.
// Do we have the reference frame?
diff --git a/chromium/media/cast/framer/frame_id_map.h b/chromium/media/cast/framer/frame_id_map.h
index 40b0a7f3399..66e306f6718 100644
--- a/chromium/media/cast/framer/frame_id_map.h
+++ b/chromium/media/cast/framer/frame_id_map.h
@@ -25,11 +25,9 @@ class FrameInfo {
bool key_frame);
~FrameInfo();
- // Returns true if packet is inserted.
- bool InsertPacket(uint16 packet_id);
+ PacketType InsertPacket(uint16 packet_id);
bool Complete() const;
- void GetMissingPackets(bool newest_frame,
- PacketIdSet* missing_packets) const;
+ void GetMissingPackets(bool newest_frame, PacketIdSet* missing_packets) const;
bool is_key_frame() const { return is_key_frame_; }
uint32 frame_id() const { return frame_id_; }
@@ -53,8 +51,7 @@ class FrameIdMap {
FrameIdMap();
~FrameIdMap();
- // Returns false if not a valid (old) packet, otherwise returns true.
- bool InsertPacket(const RtpCastHeader& rtp_header, bool* complete);
+ PacketType InsertPacket(const RtpCastHeader& rtp_header);
bool Empty() const;
bool FrameExists(uint32 frame_id) const;
@@ -67,8 +64,8 @@ class FrameIdMap {
bool NextContinuousFrame(uint32* frame_id) const;
uint32 LastContinuousFrame() const;
- bool NextAudioFrameAllowingMissingFrames(uint32* frame_id) const;
- bool NextVideoFrameAllowingSkippingFrames(uint32* frame_id) const;
+ bool NextFrameAllowingSkippingFrames(uint32* frame_id) const;
+ bool HaveMultipleDecodableFrames() const;
int NumberOfCompleteFrames() const;
void GetMissingPackets(uint32 frame_id,
@@ -77,7 +74,7 @@ class FrameIdMap {
private:
bool ContinuousFrame(FrameInfo* frame) const;
- bool DecodableVideoFrame(FrameInfo* frame) const;
+ bool DecodableFrame(FrameInfo* frame) const;
FrameMap frame_map_;
bool waiting_for_key_;
diff --git a/chromium/media/cast/framer/framer.cc b/chromium/media/cast/framer/framer.cc
index b06e60fd035..de4451a3b4a 100644
--- a/chromium/media/cast/framer/framer.cc
+++ b/chromium/media/cast/framer/framer.cc
@@ -17,9 +17,13 @@ Framer::Framer(base::TickClock* clock,
bool decoder_faster_than_max_frame_rate,
int max_unacked_frames)
: decoder_faster_than_max_frame_rate_(decoder_faster_than_max_frame_rate),
- cast_msg_builder_(new CastMessageBuilder(clock, incoming_payload_feedback,
- &frame_id_map_, ssrc, decoder_faster_than_max_frame_rate,
- max_unacked_frames)) {
+ cast_msg_builder_(
+ new CastMessageBuilder(clock,
+ incoming_payload_feedback,
+ &frame_id_map_,
+ ssrc,
+ decoder_faster_than_max_frame_rate,
+ max_unacked_frames)) {
DCHECK(incoming_payload_feedback) << "Invalid argument";
}
@@ -27,9 +31,20 @@ Framer::~Framer() {}
bool Framer::InsertPacket(const uint8* payload_data,
size_t payload_size,
- const RtpCastHeader& rtp_header) {
- bool complete = false;
- if (!frame_id_map_.InsertPacket(rtp_header, &complete)) return false;
+ const RtpCastHeader& rtp_header,
+ bool* duplicate) {
+ *duplicate = false;
+ PacketType packet_type = frame_id_map_.InsertPacket(rtp_header);
+ if (packet_type == kTooOldPacket) {
+ return false;
+ }
+ if (packet_type == kDuplicatePacket) {
+ VLOG(3) << "Packet already received, ignored: frame "
+ << static_cast<int>(rtp_header.frame_id) << ", packet "
+ << rtp_header.packet_id;
+ *duplicate = true;
+ return false;
+ }
// Does this packet belong to a new frame?
FrameList::iterator it = frames_.find(rtp_header.frame_id);
@@ -43,42 +58,15 @@ bool Framer::InsertPacket(const uint8* payload_data,
it->second->InsertPacket(payload_data, payload_size, rtp_header);
}
- if (complete) {
- // ACK as soon as possible.
- VLOG(1) << "Complete frame " << static_cast<int>(rtp_header.frame_id);
- cast_msg_builder_->CompleteFrameReceived(rtp_header.frame_id,
- rtp_header.is_key_frame);
- }
- return complete;
+ return packet_type == kNewPacketCompletingFrame;
}
// This does not release the frame.
-bool Framer::GetEncodedAudioFrame(EncodedAudioFrame* audio_frame,
- uint32* rtp_timestamp,
- bool* next_frame) {
- uint32 frame_id;
- // Find frame id.
- if (frame_id_map_.NextContinuousFrame(&frame_id)) {
- // We have our next frame.
- *next_frame = true;
- } else {
- if (!frame_id_map_.NextAudioFrameAllowingMissingFrames(&frame_id)) {
- return false;
- }
- *next_frame = false;
- }
-
- ConstFrameIterator it = frames_.find(frame_id);
- DCHECK(it != frames_.end());
- if (it == frames_.end()) return false;
-
- return it->second->GetEncodedAudioFrame(audio_frame, rtp_timestamp);
-}
+bool Framer::GetEncodedFrame(transport::EncodedFrame* frame,
+ bool* next_frame,
+ bool* have_multiple_decodable_frames) {
+ *have_multiple_decodable_frames = frame_id_map_.HaveMultipleDecodableFrames();
-// This does not release the frame.
-bool Framer::GetEncodedVideoFrame(EncodedVideoFrame* video_frame,
- uint32* rtp_timestamp,
- bool* next_frame) {
uint32 frame_id;
// Find frame id.
if (frame_id_map_.NextContinuousFrame(&frame_id)) {
@@ -86,19 +74,26 @@ bool Framer::GetEncodedVideoFrame(EncodedVideoFrame* video_frame,
*next_frame = true;
} else {
// Check if we can skip frames when our decoder is too slow.
- if (!decoder_faster_than_max_frame_rate_) return false;
+ if (!decoder_faster_than_max_frame_rate_)
+ return false;
- if (!frame_id_map_.NextVideoFrameAllowingSkippingFrames(&frame_id)) {
+ if (!frame_id_map_.NextFrameAllowingSkippingFrames(&frame_id)) {
return false;
}
*next_frame = false;
}
+ if (*next_frame) {
+ VLOG(2) << "ACK frame " << frame_id;
+ cast_msg_builder_->CompleteFrameReceived(frame_id);
+ }
+
ConstFrameIterator it = frames_.find(frame_id);
DCHECK(it != frames_.end());
- if (it == frames_.end()) return false;
+ if (it == frames_.end())
+ return false;
- return it->second->GetEncodedVideoFrame(video_frame, rtp_timestamp);
+ return it->second->AssembleEncodedFrame(frame);
}
void Framer::Reset() {
@@ -114,7 +109,7 @@ void Framer::ReleaseFrame(uint32 frame_id) {
// We have a frame - remove all frames with lower frame id.
bool skipped_old_frame = false;
FrameList::iterator it;
- for (it = frames_.begin(); it != frames_.end(); ) {
+ for (it = frames_.begin(); it != frames_.end();) {
if (IsOlderFrameId(it->first, frame_id)) {
frames_.erase(it++);
skipped_old_frame = true;
@@ -131,9 +126,7 @@ bool Framer::TimeToSendNextCastMessage(base::TimeTicks* time_to_send) {
return cast_msg_builder_->TimeToSendNextCastMessage(time_to_send);
}
-void Framer::SendCastMessage() {
- cast_msg_builder_->UpdateCastMessage();
-}
+void Framer::SendCastMessage() { cast_msg_builder_->UpdateCastMessage(); }
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/framer/framer.gyp b/chromium/media/cast/framer/framer.gyp
deleted file mode 100644
index 7b124f0c5de..00000000000
--- a/chromium/media/cast/framer/framer.gyp
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'targets': [
- {
- 'target_name': 'cast_framer',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- '<(DEPTH)/third_party/',
- '<(DEPTH)/third_party/webrtc',
- ],
- 'sources': [
- 'cast_message_builder.cc',
- 'cast_message_builder.h',
- 'frame_buffer.cc',
- 'frame_buffer.h',
- 'frame_id_map.cc',
- 'frame_id_map.h',
- 'framer.cc',
- 'framer.h',
- ],
- },
- ], # targets
-}
diff --git a/chromium/media/cast/framer/framer.h b/chromium/media/cast/framer/framer.h
index cf72da6c35d..0b7249eff34 100644
--- a/chromium/media/cast/framer/framer.h
+++ b/chromium/media/cast/framer/framer.h
@@ -33,22 +33,21 @@ class Framer {
~Framer();
// Return true when receiving the last packet in a frame, creating a
- // complete frame.
+ // complete frame. If a duplicate packet for an already complete frame is
+ // received, the function returns false but sets |duplicate| to true.
bool InsertPacket(const uint8* payload_data,
size_t payload_size,
- const RtpCastHeader& rtp_header);
-
- // Extracts a complete encoded frame - will only return a complete continuous
- // frame.
- // Returns false if the frame does not exist or if the frame is not complete
- // within the given time frame.
- bool GetEncodedVideoFrame(EncodedVideoFrame* video_frame,
- uint32* rtp_timestamp,
- bool* next_frame);
-
- bool GetEncodedAudioFrame(EncodedAudioFrame* audio_frame,
- uint32* rtp_timestamp,
- bool* next_frame);
+ const RtpCastHeader& rtp_header,
+ bool* duplicate);
+
+ // Extracts a complete encoded frame - will only return a complete and
+ // decodable frame. Returns false if no such frames exist.
+ // |next_frame| will be set to true if the returned frame is the very
+ // next frame. |have_multiple_complete_frames| will be set to true
+ // if there are more decodadble frames available.
+ bool GetEncodedFrame(transport::EncodedFrame* video_frame,
+ bool* next_frame,
+ bool* have_multiple_complete_frames);
void ReleaseFrame(uint32 frame_id);
diff --git a/chromium/media/cast/framer/framer_unittest.cc b/chromium/media/cast/framer/framer_unittest.cc
index 871f048af46..ad53ef06eee 100644
--- a/chromium/media/cast/framer/framer_unittest.cc
+++ b/chromium/media/cast/framer/framer_unittest.cc
@@ -15,317 +15,484 @@ class FramerTest : public ::testing::Test {
FramerTest()
: mock_rtp_payload_feedback_(),
framer_(&testing_clock_, &mock_rtp_payload_feedback_, 0, true, 0) {
+ payload_.assign(kMaxIpPacketSize, 0);
+
+ EXPECT_CALL(mock_rtp_payload_feedback_, CastFeedback(testing::_))
+ .WillRepeatedly(testing::Return());
}
virtual ~FramerTest() {}
- virtual void SetUp() OVERRIDE {
- // Build a default one packet frame - populate webrtc header.
- rtp_header_.is_key_frame = false;
- rtp_header_.frame_id = 0;
- rtp_header_.packet_id = 0;
- rtp_header_.max_packet_id = 0;
- rtp_header_.is_reference = false;
- rtp_header_.reference_frame_id = 0;
- payload_.assign(kIpPacketSize, 0);
-
- EXPECT_CALL(mock_rtp_payload_feedback_,
- CastFeedback(testing::_)).WillRepeatedly(testing::Return());
- }
-
std::vector<uint8> payload_;
RtpCastHeader rtp_header_;
MockRtpPayloadFeedback mock_rtp_payload_feedback_;
Framer framer_;
base::SimpleTestTickClock testing_clock_;
-};
+ DISALLOW_COPY_AND_ASSIGN(FramerTest);
+};
TEST_F(FramerTest, EmptyState) {
- EncodedVideoFrame frame;
- uint32 rtp_timestamp;
+ transport::EncodedFrame frame;
bool next_frame = false;
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
- &next_frame));
+ bool multiple = false;
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
}
TEST_F(FramerTest, AlwaysStartWithKey) {
- EncodedVideoFrame frame;
- uint32 rtp_timestamp;
+ transport::EncodedFrame frame;
bool next_frame = false;
+ bool complete = false;
+ bool multiple = false;
+ bool duplicate = false;
// Insert non key first frame.
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
- &next_frame));
+ complete = framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_TRUE(complete);
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
rtp_header_.frame_id = 1;
+ rtp_header_.reference_frame_id = 1;
rtp_header_.is_key_frame = true;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
- &next_frame));
+ complete = framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_TRUE(complete);
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_TRUE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(1u, frame.frame_id);
- EXPECT_TRUE(frame.key_frame);
+ EXPECT_EQ(1u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
}
TEST_F(FramerTest, CompleteFrame) {
- EncodedVideoFrame frame;
- uint32 rtp_timestamp;
+ transport::EncodedFrame frame;
bool next_frame = false;
+ bool complete = false;
+ bool multiple = false;
+ bool duplicate = false;
- // start with a complete key frame.
+ // Start with a complete key frame.
rtp_header_.is_key_frame = true;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
- &next_frame));
+ complete = framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_TRUE(complete);
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(0u, frame.frame_id);
- EXPECT_TRUE(frame.key_frame);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Incomplete delta.
++rtp_header_.frame_id;
+ rtp_header_.reference_frame_id = rtp_header_.frame_id - 1;
rtp_header_.is_key_frame = false;
rtp_header_.max_packet_id = 2;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
- &next_frame));
+ complete = framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_FALSE(complete);
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
// Complete delta - can't skip, as incomplete sequence.
++rtp_header_.frame_id;
+ rtp_header_.reference_frame_id = rtp_header_.frame_id - 1;
rtp_header_.max_packet_id = 0;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
- &next_frame));
+ complete = framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_TRUE(complete);
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
+}
+
+TEST_F(FramerTest, DuplicatePackets) {
+ transport::EncodedFrame frame;
+ bool next_frame = false;
+ bool complete = false;
+ bool multiple = false;
+ bool duplicate = false;
+
+ // Start with an incomplete key frame.
+ rtp_header_.is_key_frame = true;
+ rtp_header_.max_packet_id = 1;
+ duplicate = true;
+ complete = framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_FALSE(complete);
+ EXPECT_FALSE(duplicate);
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
+
+ // Add same packet again in incomplete key frame.
+ duplicate = false;
+ complete = framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_FALSE(complete);
+ EXPECT_TRUE(duplicate);
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
+
+ // Complete key frame.
+ rtp_header_.packet_id = 1;
+ duplicate = true;
+ complete = framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_TRUE(complete);
+ EXPECT_FALSE(duplicate);
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
+
+ // Add same packet again in complete key frame.
+ duplicate = false;
+ complete = framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_FALSE(complete);
+ EXPECT_TRUE(duplicate);
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
+ EXPECT_EQ(0u, frame.frame_id);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
+ framer_.ReleaseFrame(frame.frame_id);
+
+ // Incomplete delta frame.
+ ++rtp_header_.frame_id;
+ rtp_header_.reference_frame_id = rtp_header_.frame_id - 1;
+ rtp_header_.packet_id = 0;
+ rtp_header_.is_key_frame = false;
+ duplicate = true;
+ complete = framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_FALSE(complete);
+ EXPECT_FALSE(duplicate);
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
+
+ // Add same packet again in incomplete delta frame.
+ duplicate = false;
+ complete = framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_FALSE(complete);
+ EXPECT_TRUE(duplicate);
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
+
+ // Complete delta frame.
+ rtp_header_.packet_id = 1;
+ duplicate = true;
+ complete = framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_TRUE(complete);
+ EXPECT_FALSE(duplicate);
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
+ EXPECT_EQ(1u, frame.frame_id);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
+ EXPECT_FALSE(multiple);
+
+ // Add same packet again in complete delta frame.
+ duplicate = false;
+ complete = framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_FALSE(complete);
+ EXPECT_TRUE(duplicate);
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
+ EXPECT_EQ(1u, frame.frame_id);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
+ EXPECT_FALSE(multiple);
}
TEST_F(FramerTest, ContinuousSequence) {
- EncodedVideoFrame frame;
- uint32 rtp_timestamp;
+ transport::EncodedFrame frame;
bool next_frame = false;
+ bool complete = false;
+ bool multiple = false;
+ bool duplicate = false;
- // start with a complete key frame.
+ // Start with a complete key frame.
rtp_header_.is_key_frame = true;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
- &next_frame));
+ complete = framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_TRUE(complete);
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(0u, frame.frame_id);
- EXPECT_TRUE(frame.key_frame);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Complete - not continuous.
rtp_header_.frame_id = 2;
+ rtp_header_.reference_frame_id = rtp_header_.frame_id - 1;
rtp_header_.is_key_frame = false;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
- &next_frame));
+ complete = framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_TRUE(complete);
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
}
TEST_F(FramerTest, Wrap) {
// Insert key frame, frame_id = 255 (will jump to that)
- EncodedVideoFrame frame;
- uint32 rtp_timestamp;
+ transport::EncodedFrame frame;
bool next_frame = false;
+ bool multiple = true;
+ bool duplicate = false;
// Start with a complete key frame.
rtp_header_.is_key_frame = true;
- rtp_header_.frame_id = 255u;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
- &next_frame));
+ rtp_header_.frame_id = 255;
+ rtp_header_.reference_frame_id = 255;
+ framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(255u, frame.frame_id);
+ EXPECT_EQ(255u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Insert wrapped delta frame - should be continuous.
rtp_header_.is_key_frame = false;
rtp_header_.frame_id = 256;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
- &next_frame));
+ framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(256u, frame.frame_id);
+ EXPECT_EQ(255u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
}
TEST_F(FramerTest, Reset) {
- EncodedVideoFrame frame;
- uint32 rtp_timestamp;
+ transport::EncodedFrame frame;
bool next_frame = false;
+ bool complete = false;
+ bool multiple = true;
+ bool duplicate = false;
// Start with a complete key frame.
rtp_header_.is_key_frame = true;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ complete = framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_TRUE(complete);
framer_.Reset();
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
- &next_frame));
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
}
TEST_F(FramerTest, RequireKeyAfterReset) {
- EncodedVideoFrame frame;
- uint32 rtp_timestamp;
+ transport::EncodedFrame frame;
bool next_frame = false;
+ bool multiple = false;
+ bool duplicate = false;
+
framer_.Reset();
// Start with a complete key frame.
rtp_header_.is_key_frame = false;
- rtp_header_.frame_id = 0u;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
- &next_frame));
+ rtp_header_.frame_id = 0;
+ framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
rtp_header_.frame_id = 1;
+ rtp_header_.reference_frame_id = 1;
rtp_header_.is_key_frame = true;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
- &next_frame));
+ framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_TRUE(multiple);
}
TEST_F(FramerTest, BasicNonLastReferenceId) {
- EncodedVideoFrame frame;
- uint32 rtp_timestamp;
+ transport::EncodedFrame frame;
bool next_frame = false;
+ bool multiple = false;
+ bool duplicate = false;
+
rtp_header_.is_key_frame = true;
rtp_header_.frame_id = 0;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
- &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
+ EXPECT_FALSE(multiple);
framer_.ReleaseFrame(frame.frame_id);
rtp_header_.is_key_frame = false;
- rtp_header_.is_reference = true;
rtp_header_.reference_frame_id = 0;
- rtp_header_.frame_id = 5u;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ rtp_header_.frame_id = 5;
+ framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
- &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_FALSE(next_frame);
+ EXPECT_FALSE(multiple);
}
TEST_F(FramerTest, InOrderReferenceFrameSelection) {
// Create pattern: 0, 1, 4, 5.
- EncodedVideoFrame frame;
- uint32 rtp_timestamp;
+ transport::EncodedFrame frame;
bool next_frame = false;
+ bool multiple = false;
+ bool duplicate = false;
rtp_header_.is_key_frame = true;
rtp_header_.frame_id = 0;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
rtp_header_.is_key_frame = false;
rtp_header_.frame_id = 1;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
// Insert frame #2 partially.
rtp_header_.frame_id = 2;
rtp_header_.max_packet_id = 1;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
rtp_header_.frame_id = 4;
rtp_header_.max_packet_id = 0;
- rtp_header_.is_reference = true;
rtp_header_.reference_frame_id = 0;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
- &next_frame));
+ framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(0u, frame.frame_id);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
+ EXPECT_FALSE(multiple);
framer_.ReleaseFrame(frame.frame_id);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
- &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_TRUE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(1u, frame.frame_id);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
- &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_FALSE(next_frame);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(4u, frame.frame_id);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Insert remaining packet of frame #2 - should no be continuous.
rtp_header_.frame_id = 2;
rtp_header_.packet_id = 1;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
- &next_frame));
- rtp_header_.is_reference = false;
+ framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
rtp_header_.frame_id = 5;
+ rtp_header_.reference_frame_id = rtp_header_.frame_id - 1;
rtp_header_.packet_id = 0;
rtp_header_.max_packet_id = 0;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
- &next_frame));
+ framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(5u, frame.frame_id);
+ EXPECT_EQ(4u, frame.referenced_frame_id);
}
TEST_F(FramerTest, AudioWrap) {
// All audio frames are marked as key frames.
- EncodedAudioFrame frame;
- uint32 rtp_timestamp;
+ transport::EncodedFrame frame;
bool next_frame = false;
+ bool multiple = false;
+ bool duplicate = false;
+
rtp_header_.is_key_frame = true;
rtp_header_.frame_id = 254;
+ rtp_header_.reference_frame_id = 254;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &rtp_timestamp,
- &next_frame));
+ framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(254u, frame.frame_id);
+ EXPECT_EQ(254u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
rtp_header_.frame_id = 255;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ rtp_header_.reference_frame_id = 255;
+ framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
// Insert wrapped frame - should be continuous.
rtp_header_.frame_id = 256;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ rtp_header_.reference_frame_id = 256;
+ framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
- EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &rtp_timestamp,
- &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_TRUE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(255u, frame.frame_id);
+ EXPECT_EQ(255u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
- EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &rtp_timestamp,
- &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(256u, frame.frame_id);
+ EXPECT_EQ(256u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
}
TEST_F(FramerTest, AudioWrapWithMissingFrame) {
// All audio frames are marked as key frames.
- EncodedAudioFrame frame;
- uint32 rtp_timestamp;
+ transport::EncodedFrame frame;
bool next_frame = false;
+ bool multiple = true;
+ bool duplicate = false;
// Insert and get first packet.
rtp_header_.is_key_frame = true;
rtp_header_.frame_id = 253;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &rtp_timestamp,
- &next_frame));
+ rtp_header_.reference_frame_id = 253;
+ framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(253u, frame.frame_id);
+ EXPECT_EQ(253u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Insert third and fourth packets.
rtp_header_.frame_id = 255;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ rtp_header_.reference_frame_id = 255;
+ framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
rtp_header_.frame_id = 256;
- framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
+ rtp_header_.reference_frame_id = 256;
+ framer_.InsertPacket(
+ payload_.data(), payload_.size(), rtp_header_, &duplicate);
// Get third and fourth packets.
- EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &rtp_timestamp,
- &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_FALSE(next_frame);
+ EXPECT_TRUE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(255u, frame.frame_id);
+ EXPECT_EQ(255u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
- EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &rtp_timestamp,
- &next_frame));
+ EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
+ EXPECT_FALSE(multiple);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(256u, frame.frame_id);
+ EXPECT_EQ(256u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
}
diff --git a/chromium/media/cast/logging/encoding_event_subscriber.cc b/chromium/media/cast/logging/encoding_event_subscriber.cc
new file mode 100644
index 00000000000..48cc911ba80
--- /dev/null
+++ b/chromium/media/cast/logging/encoding_event_subscriber.cc
@@ -0,0 +1,286 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/logging/encoding_event_subscriber.h"
+
+#include <cstring>
+#include <utility>
+
+#include "base/logging.h"
+#include "media/cast/logging/proto/proto_utils.h"
+
+using google::protobuf::RepeatedPtrField;
+using media::cast::proto::AggregatedFrameEvent;
+using media::cast::proto::AggregatedPacketEvent;
+using media::cast::proto::BasePacketEvent;
+using media::cast::proto::LogMetadata;
+
+namespace {
+
+// A size limit on maps to keep lookups fast.
+const size_t kMaxMapSize = 200;
+
+// The smallest (oredered by RTP timestamp) |kNumMapEntriesToTransfer| entries
+// will be moved when the map size reaches |kMaxMapSize|.
+// Must be smaller than |kMaxMapSize|.
+const size_t kNumMapEntriesToTransfer = 100;
+
+template <typename ProtoPtr>
+bool IsRtpTimestampLessThan(const ProtoPtr& lhs, const ProtoPtr& rhs) {
+ return lhs->relative_rtp_timestamp() < rhs->relative_rtp_timestamp();
+}
+
+BasePacketEvent* GetNewBasePacketEvent(AggregatedPacketEvent* event_proto,
+ int packet_id, int size) {
+ BasePacketEvent* base = event_proto->add_base_packet_event();
+ base->set_packet_id(packet_id);
+ base->set_size(size);
+ return base;
+}
+
+}
+
+namespace media {
+namespace cast {
+
+EncodingEventSubscriber::EncodingEventSubscriber(
+ EventMediaType event_media_type,
+ size_t max_frames)
+ : event_media_type_(event_media_type),
+ max_frames_(max_frames),
+ frame_event_storage_index_(0),
+ packet_event_storage_index_(0),
+ seen_first_rtp_timestamp_(false),
+ first_rtp_timestamp_(0u) {}
+
+EncodingEventSubscriber::~EncodingEventSubscriber() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+}
+
+void EncodingEventSubscriber::OnReceiveFrameEvent(
+ const FrameEvent& frame_event) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (event_media_type_ != frame_event.media_type)
+ return;
+
+ RtpTimestamp relative_rtp_timestamp =
+ GetRelativeRtpTimestamp(frame_event.rtp_timestamp);
+ FrameEventMap::iterator it = frame_event_map_.find(relative_rtp_timestamp);
+ linked_ptr<AggregatedFrameEvent> event_proto;
+
+ // Look up existing entry. If not found, create a new entry and add to map.
+ if (it == frame_event_map_.end()) {
+ event_proto.reset(new AggregatedFrameEvent);
+ event_proto->set_relative_rtp_timestamp(relative_rtp_timestamp);
+ frame_event_map_.insert(
+ std::make_pair(relative_rtp_timestamp, event_proto));
+ } else {
+ event_proto = it->second;
+ if (event_proto->event_type_size() >= kMaxEventsPerProto) {
+ DVLOG(2) << "Too many events in frame " << frame_event.rtp_timestamp
+ << ". Using new frame event proto.";
+ AddFrameEventToStorage(event_proto);
+ event_proto.reset(new AggregatedFrameEvent);
+ event_proto->set_relative_rtp_timestamp(relative_rtp_timestamp);
+ it->second = event_proto;
+ }
+ }
+
+ event_proto->add_event_type(ToProtoEventType(frame_event.type));
+ event_proto->add_event_timestamp_ms(
+ (frame_event.timestamp - base::TimeTicks()).InMilliseconds());
+
+ if (frame_event.type == FRAME_ENCODED) {
+ event_proto->set_encoded_frame_size(frame_event.size);
+ if (frame_event.media_type == VIDEO_EVENT) {
+ event_proto->set_encoded_frame_size(frame_event.size);
+ event_proto->set_key_frame(frame_event.key_frame);
+ event_proto->set_target_bitrate(frame_event.target_bitrate);
+ }
+ } else if (frame_event.type == FRAME_PLAYOUT) {
+ event_proto->set_delay_millis(frame_event.delay_delta.InMilliseconds());
+ }
+
+ if (frame_event_map_.size() > kMaxMapSize)
+ TransferFrameEvents(kNumMapEntriesToTransfer);
+
+ DCHECK(frame_event_map_.size() <= kMaxMapSize);
+ DCHECK(frame_event_storage_.size() <= max_frames_);
+}
+
+void EncodingEventSubscriber::OnReceivePacketEvent(
+ const PacketEvent& packet_event) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (event_media_type_ != packet_event.media_type)
+ return;
+
+ RtpTimestamp relative_rtp_timestamp =
+ GetRelativeRtpTimestamp(packet_event.rtp_timestamp);
+ PacketEventMap::iterator it =
+ packet_event_map_.find(relative_rtp_timestamp);
+ linked_ptr<AggregatedPacketEvent> event_proto;
+ BasePacketEvent* base_packet_event_proto = NULL;
+
+ // Look up existing entry. If not found, create a new entry and add to map.
+ if (it == packet_event_map_.end()) {
+ event_proto.reset(new AggregatedPacketEvent);
+ event_proto->set_relative_rtp_timestamp(relative_rtp_timestamp);
+ packet_event_map_.insert(
+ std::make_pair(relative_rtp_timestamp, event_proto));
+ base_packet_event_proto = GetNewBasePacketEvent(
+ event_proto.get(), packet_event.packet_id, packet_event.size);
+ } else {
+ // Found existing entry, now look up existing BasePacketEvent using packet
+ // ID. If not found, create a new entry and add to proto.
+ event_proto = it->second;
+ RepeatedPtrField<BasePacketEvent>* field =
+ event_proto->mutable_base_packet_event();
+ for (RepeatedPtrField<BasePacketEvent>::pointer_iterator base_it =
+ field->pointer_begin();
+ base_it != field->pointer_end();
+ ++base_it) {
+ if ((*base_it)->packet_id() == packet_event.packet_id) {
+ base_packet_event_proto = *base_it;
+ break;
+ }
+ }
+ if (!base_packet_event_proto) {
+ if (event_proto->base_packet_event_size() >= kMaxPacketsPerFrame) {
+ DVLOG(3) << "Too many packets in AggregatedPacketEvent "
+ << packet_event.rtp_timestamp << ". "
+ << "Using new packet event proto.";
+ AddPacketEventToStorage(event_proto);
+ event_proto.reset(new AggregatedPacketEvent);
+ event_proto->set_relative_rtp_timestamp(relative_rtp_timestamp);
+ it->second = event_proto;
+ }
+
+ base_packet_event_proto = GetNewBasePacketEvent(
+ event_proto.get(), packet_event.packet_id, packet_event.size);
+ } else if (base_packet_event_proto->event_type_size() >=
+ kMaxEventsPerProto) {
+ DVLOG(3) << "Too many events in packet "
+ << packet_event.rtp_timestamp << ", "
+ << packet_event.packet_id << ". Using new packet event proto.";
+ AddPacketEventToStorage(event_proto);
+ event_proto.reset(new AggregatedPacketEvent);
+ event_proto->set_relative_rtp_timestamp(relative_rtp_timestamp);
+ it->second = event_proto;
+ base_packet_event_proto = GetNewBasePacketEvent(
+ event_proto.get(), packet_event.packet_id, packet_event.size);
+ }
+ }
+
+ base_packet_event_proto->add_event_type(
+ ToProtoEventType(packet_event.type));
+ base_packet_event_proto->add_event_timestamp_ms(
+ (packet_event.timestamp - base::TimeTicks()).InMilliseconds());
+
+ // |base_packet_event_proto| could have been created with a receiver event
+ // which does not have the packet size and we would need to overwrite it when
+ // we see a sender event, which does have the packet size.
+ if (packet_event.size > 0) {
+ base_packet_event_proto->set_size(packet_event.size);
+ }
+
+ if (packet_event_map_.size() > kMaxMapSize)
+ TransferPacketEvents(kNumMapEntriesToTransfer);
+
+ DCHECK(packet_event_map_.size() <= kMaxMapSize);
+ DCHECK(packet_event_storage_.size() <= max_frames_);
+}
+
+void EncodingEventSubscriber::GetEventsAndReset(LogMetadata* metadata,
+ FrameEventList* frame_events, PacketEventList* packet_events) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Flush all events.
+ TransferFrameEvents(frame_event_map_.size());
+ TransferPacketEvents(packet_event_map_.size());
+ std::sort(frame_event_storage_.begin(), frame_event_storage_.end(),
+ &IsRtpTimestampLessThan<linked_ptr<AggregatedFrameEvent> >);
+ std::sort(packet_event_storage_.begin(), packet_event_storage_.end(),
+ &IsRtpTimestampLessThan<linked_ptr<AggregatedPacketEvent> >);
+
+ metadata->set_is_audio(event_media_type_ == AUDIO_EVENT);
+ metadata->set_first_rtp_timestamp(first_rtp_timestamp_);
+ metadata->set_num_frame_events(frame_event_storage_.size());
+ metadata->set_num_packet_events(packet_event_storage_.size());
+ metadata->set_reference_timestamp_ms_at_unix_epoch(
+ (base::TimeTicks::UnixEpoch() - base::TimeTicks()).InMilliseconds());
+ frame_events->swap(frame_event_storage_);
+ packet_events->swap(packet_event_storage_);
+ Reset();
+}
+
+void EncodingEventSubscriber::TransferFrameEvents(size_t max_num_entries) {
+ DCHECK(frame_event_map_.size() >= max_num_entries);
+
+ FrameEventMap::iterator it = frame_event_map_.begin();
+ for (size_t i = 0;
+ i < max_num_entries && it != frame_event_map_.end();
+ i++, ++it) {
+ AddFrameEventToStorage(it->second);
+ }
+
+ frame_event_map_.erase(frame_event_map_.begin(), it);
+}
+
+void EncodingEventSubscriber::TransferPacketEvents(size_t max_num_entries) {
+ PacketEventMap::iterator it = packet_event_map_.begin();
+ for (size_t i = 0;
+ i < max_num_entries && it != packet_event_map_.end();
+ i++, ++it) {
+ AddPacketEventToStorage(it->second);
+ }
+
+ packet_event_map_.erase(packet_event_map_.begin(), it);
+}
+
+void EncodingEventSubscriber::AddFrameEventToStorage(
+ const linked_ptr<AggregatedFrameEvent>& frame_event_proto) {
+ if (frame_event_storage_.size() >= max_frames_) {
+ frame_event_storage_[frame_event_storage_index_] = frame_event_proto;
+ } else {
+ frame_event_storage_.push_back(frame_event_proto);
+ }
+
+ frame_event_storage_index_ = (frame_event_storage_index_ + 1) % max_frames_;
+}
+
+void EncodingEventSubscriber::AddPacketEventToStorage(
+ const linked_ptr<AggregatedPacketEvent>& packet_event_proto) {
+ if (packet_event_storage_.size() >= max_frames_)
+ packet_event_storage_[packet_event_storage_index_] = packet_event_proto;
+ else
+ packet_event_storage_.push_back(packet_event_proto);
+
+ packet_event_storage_index_ = (packet_event_storage_index_ + 1) % max_frames_;
+}
+
+RtpTimestamp EncodingEventSubscriber::GetRelativeRtpTimestamp(
+ RtpTimestamp rtp_timestamp) {
+ if (!seen_first_rtp_timestamp_) {
+ seen_first_rtp_timestamp_ = true;
+ first_rtp_timestamp_ = rtp_timestamp;
+ }
+
+ return rtp_timestamp - first_rtp_timestamp_;
+}
+
+void EncodingEventSubscriber::Reset() {
+ frame_event_map_.clear();
+ frame_event_storage_.clear();
+ frame_event_storage_index_ = 0;
+ packet_event_map_.clear();
+ packet_event_storage_.clear();
+ packet_event_storage_index_ = 0;
+ seen_first_rtp_timestamp_ = false;
+ first_rtp_timestamp_ = 0u;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/logging/encoding_event_subscriber.h b/chromium/media/cast/logging/encoding_event_subscriber.h
new file mode 100644
index 00000000000..ca2cccb5f74
--- /dev/null
+++ b/chromium/media/cast/logging/encoding_event_subscriber.h
@@ -0,0 +1,122 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_LOGGING_ENCODING_EVENT_SUBSCRIBER_H_
+#define MEDIA_CAST_LOGGING_ENCODING_EVENT_SUBSCRIBER_H_
+
+#include <map>
+
+#include "base/memory/linked_ptr.h"
+#include "base/threading/thread_checker.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/logging/proto/raw_events.pb.h"
+#include "media/cast/logging/raw_event_subscriber.h"
+
+namespace media {
+namespace cast {
+
+// Number of packets per frame recorded by the subscriber.
+// Once the max number of packets has been reached, a new aggregated proto
+// will be created.
+static const int kMaxPacketsPerFrame = 64;
+// Number of events per proto recorded by the subscriber.
+// Once the max number of events has been reached, a new aggregated proto
+// will be created.
+static const int kMaxEventsPerProto = 16;
+
+typedef std::vector<linked_ptr<media::cast::proto::AggregatedFrameEvent> >
+ FrameEventList;
+typedef std::vector<linked_ptr<media::cast::proto::AggregatedPacketEvent> >
+ PacketEventList;
+
+// A RawEventSubscriber implementation that subscribes to events,
+// encodes them in protocol buffer format, and aggregates them into a more
+// compact structure. Aggregation is per-frame, and uses a map with RTP
+// timestamp as key. Periodically, old entries in the map will be transferred
+// to a storage vector. This helps keep the size of the map small and
+// lookup times fast. The storage itself is a circular buffer that will
+// overwrite old entries once it has reached the size configured by user.
+class EncodingEventSubscriber : public RawEventSubscriber {
+ public:
+ // |event_media_type|: The subscriber will only process events that
+ // corresponds to this type.
+ // |max_frames|: How many events to keep in the frame / packet storage.
+ // This helps keep memory usage bounded.
+ // Every time one of |OnReceive[Frame,Packet]Event()| is
+ // called, it will check if the respective map size has exceeded |max_frames|.
+ // If so, it will remove the oldest aggregated entry (ordered by RTP
+ // timestamp).
+ EncodingEventSubscriber(EventMediaType event_media_type, size_t max_frames);
+
+ virtual ~EncodingEventSubscriber();
+
+ // RawReventSubscriber implementations.
+ virtual void OnReceiveFrameEvent(const FrameEvent& frame_event) OVERRIDE;
+ virtual void OnReceivePacketEvent(const PacketEvent& packet_event) OVERRIDE;
+
+ // Assigns frame events and packet events received so far to |frame_events|
+ // and |packet_events| and resets the internal state.
+ // In addition, assign metadata associated with these events to |metadata|.
+ // The protos in |frame_events| and |packets_events| are sorted in
+ // ascending RTP timestamp order.
+ void GetEventsAndReset(media::cast::proto::LogMetadata* metadata,
+ FrameEventList* frame_events,
+ PacketEventList* packet_events);
+
+ private:
+ typedef std::map<RtpTimestamp,
+ linked_ptr<media::cast::proto::AggregatedFrameEvent> >
+ FrameEventMap;
+ typedef std::map<RtpTimestamp,
+ linked_ptr<media::cast::proto::AggregatedPacketEvent> >
+ PacketEventMap;
+
+ // Transfer up to |max_num_entries| smallest entries from |frame_event_map_|
+ // to |frame_event_storage_|. This helps keep size of |frame_event_map_| small
+ // and lookup speed fast.
+ void TransferFrameEvents(size_t max_num_entries);
+ // See above.
+ void TransferPacketEvents(size_t max_num_entries);
+
+ void AddFrameEventToStorage(
+ const linked_ptr<media::cast::proto::AggregatedFrameEvent>&
+ frame_event_proto);
+ void AddPacketEventToStorage(
+ const linked_ptr<media::cast::proto::AggregatedPacketEvent>&
+ packet_event_proto);
+
+ // Returns the difference between |rtp_timestamp| and |first_rtp_timestamp_|.
+ // Sets |first_rtp_timestamp_| if it is not already set.
+ RtpTimestamp GetRelativeRtpTimestamp(RtpTimestamp rtp_timestamp);
+
+ // Clears the maps and first RTP timestamp seen.
+ void Reset();
+
+ const EventMediaType event_media_type_;
+ const size_t max_frames_;
+
+ FrameEventMap frame_event_map_;
+ FrameEventList frame_event_storage_;
+ int frame_event_storage_index_;
+
+ PacketEventMap packet_event_map_;
+ PacketEventList packet_event_storage_;
+ int packet_event_storage_index_;
+
+ // All functions must be called on the main thread.
+ base::ThreadChecker thread_checker_;
+
+ // Set to true on first event encountered after a |Reset()|.
+ bool seen_first_rtp_timestamp_;
+
+ // Set to RTP timestamp of first event encountered after a |Reset()|.
+ RtpTimestamp first_rtp_timestamp_;
+
+ DISALLOW_COPY_AND_ASSIGN(EncodingEventSubscriber);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_LOGGING_ENCODING_EVENT_SUBSCRIBER_H_
diff --git a/chromium/media/cast/logging/encoding_event_subscriber_unittest.cc b/chromium/media/cast/logging/encoding_event_subscriber_unittest.cc
new file mode 100644
index 00000000000..3d77a621b78
--- /dev/null
+++ b/chromium/media/cast/logging/encoding_event_subscriber_unittest.cc
@@ -0,0 +1,668 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/logging/encoding_event_subscriber.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using media::cast::proto::AggregatedFrameEvent;
+using media::cast::proto::AggregatedPacketEvent;
+using media::cast::proto::BasePacketEvent;
+using media::cast::proto::LogMetadata;
+
+namespace {
+
+int64 InMilliseconds(base::TimeTicks event_time) {
+ return (event_time - base::TimeTicks()).InMilliseconds();
+}
+
+}
+
+namespace media {
+namespace cast {
+
+class EncodingEventSubscriberTest : public ::testing::Test {
+ protected:
+ EncodingEventSubscriberTest()
+ : testing_clock_(new base::SimpleTestTickClock()),
+ task_runner_(new test::FakeSingleThreadTaskRunner(testing_clock_)),
+ cast_environment_(new CastEnvironment(
+ scoped_ptr<base::TickClock>(testing_clock_).Pass(),
+ task_runner_,
+ task_runner_,
+ task_runner_)),
+ first_rtp_timestamp_(0) {}
+
+ void Init(EventMediaType event_media_type) {
+ DCHECK(!event_subscriber_);
+ event_subscriber_.reset(new EncodingEventSubscriber(event_media_type, 10));
+ cast_environment_->Logging()->AddRawEventSubscriber(
+ event_subscriber_.get());
+ }
+
+ virtual ~EncodingEventSubscriberTest() {
+ if (event_subscriber_) {
+ cast_environment_->Logging()->RemoveRawEventSubscriber(
+ event_subscriber_.get());
+ }
+ }
+
+ void GetEventsAndReset() {
+ event_subscriber_->GetEventsAndReset(
+ &metadata_, &frame_events_, &packet_events_);
+ first_rtp_timestamp_ = metadata_.first_rtp_timestamp();
+ }
+
+ base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
+ scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+ scoped_ptr<EncodingEventSubscriber> event_subscriber_;
+ FrameEventList frame_events_;
+ PacketEventList packet_events_;
+ LogMetadata metadata_;
+ RtpTimestamp first_rtp_timestamp_;
+};
+
+TEST_F(EncodingEventSubscriberTest, FrameEventTruncating) {
+ Init(VIDEO_EVENT);
+
+ base::TimeTicks now(testing_clock_->NowTicks());
+
+ // Entry with RTP timestamp 0 should get dropped.
+ for (int i = 0; i < 11; i++) {
+ cast_environment_->Logging()->InsertFrameEvent(now,
+ FRAME_CAPTURE_BEGIN,
+ VIDEO_EVENT,
+ i * 100,
+ /*frame_id*/ 0);
+ cast_environment_->Logging()->InsertFrameEvent(now,
+ FRAME_DECODED,
+ VIDEO_EVENT,
+ i * 100,
+ /*frame_id*/ 0);
+ }
+
+ GetEventsAndReset();
+
+ ASSERT_EQ(10u, frame_events_.size());
+ EXPECT_EQ(100u, frame_events_.front()->relative_rtp_timestamp());
+ EXPECT_EQ(1000u, frame_events_.back()->relative_rtp_timestamp());
+}
+
+TEST_F(EncodingEventSubscriberTest, PacketEventTruncating) {
+ Init(AUDIO_EVENT);
+
+ base::TimeTicks now(testing_clock_->NowTicks());
+
+ // Entry with RTP timestamp 0 should get dropped.
+ for (int i = 0; i < 11; i++) {
+ cast_environment_->Logging()->InsertPacketEvent(now,
+ PACKET_RECEIVED,
+ AUDIO_EVENT,
+ /*rtp_timestamp*/ i * 100,
+ /*frame_id*/ 0,
+ /*packet_id*/ i,
+ /*max_packet_id*/ 10,
+ /*size*/ 123);
+ }
+
+ GetEventsAndReset();
+
+ ASSERT_EQ(10u, packet_events_.size());
+ EXPECT_EQ(100u, packet_events_.front()->relative_rtp_timestamp());
+ EXPECT_EQ(1000u, packet_events_.back()->relative_rtp_timestamp());
+}
+
+TEST_F(EncodingEventSubscriberTest, EventFiltering) {
+ Init(VIDEO_EVENT);
+
+ base::TimeTicks now(testing_clock_->NowTicks());
+ RtpTimestamp rtp_timestamp = 100;
+ cast_environment_->Logging()->InsertFrameEvent(now,
+ FRAME_DECODED,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ /*frame_id*/ 0);
+
+ // This is an AUDIO_EVENT and shouldn't be processed by the subscriber.
+ cast_environment_->Logging()->InsertFrameEvent(now,
+ FRAME_DECODED,
+ AUDIO_EVENT,
+ rtp_timestamp,
+ /*frame_id*/ 0);
+
+ GetEventsAndReset();
+
+ ASSERT_EQ(1u, frame_events_.size());
+ FrameEventList::iterator it = frame_events_.begin();
+
+ linked_ptr<AggregatedFrameEvent> frame_event = *it;
+
+ ASSERT_EQ(1, frame_event->event_type_size());
+ EXPECT_EQ(media::cast::proto::FRAME_DECODED,
+ frame_event->event_type(0));
+
+ GetEventsAndReset();
+
+ EXPECT_TRUE(packet_events_.empty());
+}
+
+TEST_F(EncodingEventSubscriberTest, FrameEvent) {
+ Init(VIDEO_EVENT);
+ base::TimeTicks now(testing_clock_->NowTicks());
+ RtpTimestamp rtp_timestamp = 100;
+ cast_environment_->Logging()->InsertFrameEvent(now, FRAME_DECODED,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ /*frame_id*/ 0);
+
+ GetEventsAndReset();
+
+ ASSERT_EQ(1u, frame_events_.size());
+
+ RtpTimestamp relative_rtp_timestamp = rtp_timestamp - first_rtp_timestamp_;
+ FrameEventList::iterator it = frame_events_.begin();
+
+ linked_ptr<AggregatedFrameEvent> event = *it;
+
+ EXPECT_EQ(relative_rtp_timestamp, event->relative_rtp_timestamp());
+
+ ASSERT_EQ(1, event->event_type_size());
+ EXPECT_EQ(media::cast::proto::FRAME_DECODED, event->event_type(0));
+ ASSERT_EQ(1, event->event_timestamp_ms_size());
+ EXPECT_EQ(InMilliseconds(now), event->event_timestamp_ms(0));
+
+ EXPECT_EQ(0, event->encoded_frame_size());
+ EXPECT_EQ(0, event->delay_millis());
+
+ GetEventsAndReset();
+ EXPECT_TRUE(frame_events_.empty());
+}
+
+TEST_F(EncodingEventSubscriberTest, FrameEventDelay) {
+ Init(AUDIO_EVENT);
+ base::TimeTicks now(testing_clock_->NowTicks());
+ RtpTimestamp rtp_timestamp = 100;
+ int delay_ms = 100;
+ cast_environment_->Logging()->InsertFrameEventWithDelay(
+ now, FRAME_PLAYOUT, AUDIO_EVENT, rtp_timestamp,
+ /*frame_id*/ 0, base::TimeDelta::FromMilliseconds(delay_ms));
+
+ GetEventsAndReset();
+
+ ASSERT_EQ(1u, frame_events_.size());
+
+ RtpTimestamp relative_rtp_timestamp = rtp_timestamp - first_rtp_timestamp_;
+ FrameEventList::iterator it = frame_events_.begin();
+
+ linked_ptr<AggregatedFrameEvent> event = *it;
+
+ EXPECT_EQ(relative_rtp_timestamp, event->relative_rtp_timestamp());
+
+ ASSERT_EQ(1, event->event_type_size());
+ EXPECT_EQ(media::cast::proto::FRAME_PLAYOUT, event->event_type(0));
+ ASSERT_EQ(1, event->event_timestamp_ms_size());
+ EXPECT_EQ(InMilliseconds(now), event->event_timestamp_ms(0));
+
+ EXPECT_EQ(0, event->encoded_frame_size());
+ EXPECT_EQ(100, event->delay_millis());
+ EXPECT_FALSE(event->has_key_frame());
+}
+
+TEST_F(EncodingEventSubscriberTest, FrameEventSize) {
+ Init(VIDEO_EVENT);
+ base::TimeTicks now(testing_clock_->NowTicks());
+ RtpTimestamp rtp_timestamp = 100;
+ int size = 123;
+ bool key_frame = true;
+ int target_bitrate = 1024;
+ cast_environment_->Logging()->InsertEncodedFrameEvent(
+ now, FRAME_ENCODED, VIDEO_EVENT, rtp_timestamp,
+ /*frame_id*/ 0, size, key_frame, target_bitrate);
+
+ GetEventsAndReset();
+
+ ASSERT_EQ(1u, frame_events_.size());
+
+ RtpTimestamp relative_rtp_timestamp = rtp_timestamp - first_rtp_timestamp_;
+ FrameEventList::iterator it = frame_events_.begin();
+
+ linked_ptr<AggregatedFrameEvent> event = *it;
+
+ EXPECT_EQ(relative_rtp_timestamp, event->relative_rtp_timestamp());
+
+ ASSERT_EQ(1, event->event_type_size());
+ EXPECT_EQ(media::cast::proto::FRAME_ENCODED, event->event_type(0));
+ ASSERT_EQ(1, event->event_timestamp_ms_size());
+ EXPECT_EQ(InMilliseconds(now), event->event_timestamp_ms(0));
+
+ EXPECT_EQ(size, event->encoded_frame_size());
+ EXPECT_EQ(0, event->delay_millis());
+ EXPECT_TRUE(event->has_key_frame());
+ EXPECT_EQ(key_frame, event->key_frame());
+ EXPECT_EQ(target_bitrate, event->target_bitrate());
+}
+
+TEST_F(EncodingEventSubscriberTest, MultipleFrameEvents) {
+ Init(AUDIO_EVENT);
+ RtpTimestamp rtp_timestamp1 = 100;
+ RtpTimestamp rtp_timestamp2 = 200;
+ base::TimeTicks now1(testing_clock_->NowTicks());
+ cast_environment_->Logging()->InsertFrameEventWithDelay(
+ now1, FRAME_PLAYOUT, AUDIO_EVENT, rtp_timestamp1,
+ /*frame_id*/ 0, /*delay*/ base::TimeDelta::FromMilliseconds(100));
+
+ testing_clock_->Advance(base::TimeDelta::FromMilliseconds(20));
+ base::TimeTicks now2(testing_clock_->NowTicks());
+ cast_environment_->Logging()->InsertEncodedFrameEvent(
+ now2, FRAME_ENCODED, AUDIO_EVENT, rtp_timestamp2,
+ /*frame_id*/ 0, /*size*/ 123, /* key_frame - unused */ false,
+ /*target_bitrate - unused*/ 0);
+
+ testing_clock_->Advance(base::TimeDelta::FromMilliseconds(20));
+ base::TimeTicks now3(testing_clock_->NowTicks());
+ cast_environment_->Logging()->InsertFrameEvent(
+ now3, FRAME_DECODED, AUDIO_EVENT, rtp_timestamp1, /*frame_id*/ 0);
+
+ GetEventsAndReset();
+
+ ASSERT_EQ(2u, frame_events_.size());
+
+ RtpTimestamp relative_rtp_timestamp = rtp_timestamp1 - first_rtp_timestamp_;
+ FrameEventList::iterator it = frame_events_.begin();
+
+ linked_ptr<AggregatedFrameEvent> event = *it;
+
+ EXPECT_EQ(relative_rtp_timestamp, event->relative_rtp_timestamp());
+
+ ASSERT_EQ(2, event->event_type_size());
+ EXPECT_EQ(media::cast::proto::FRAME_PLAYOUT, event->event_type(0));
+ EXPECT_EQ(media::cast::proto::FRAME_DECODED, event->event_type(1));
+
+ ASSERT_EQ(2, event->event_timestamp_ms_size());
+ EXPECT_EQ(InMilliseconds(now1), event->event_timestamp_ms(0));
+ EXPECT_EQ(InMilliseconds(now3), event->event_timestamp_ms(1));
+
+ EXPECT_FALSE(event->has_key_frame());
+
+ relative_rtp_timestamp = rtp_timestamp2 - first_rtp_timestamp_;
+ ++it;
+
+ event = *it;
+
+ EXPECT_EQ(relative_rtp_timestamp, event->relative_rtp_timestamp());
+
+ ASSERT_EQ(1, event->event_type_size());
+ EXPECT_EQ(media::cast::proto::FRAME_ENCODED, event->event_type(0));
+
+ ASSERT_EQ(1, event->event_timestamp_ms_size());
+ EXPECT_EQ(InMilliseconds(now2), event->event_timestamp_ms(0));
+
+ EXPECT_FALSE(event->has_key_frame());
+}
+
+TEST_F(EncodingEventSubscriberTest, PacketEvent) {
+ Init(AUDIO_EVENT);
+ base::TimeTicks now(testing_clock_->NowTicks());
+ RtpTimestamp rtp_timestamp = 100;
+ int packet_id = 2;
+ int size = 100;
+ cast_environment_->Logging()->InsertPacketEvent(
+ now, PACKET_RECEIVED, AUDIO_EVENT,
+ rtp_timestamp, /*frame_id*/ 0, packet_id,
+ /*max_packet_id*/ 10, size);
+
+ GetEventsAndReset();
+
+ ASSERT_EQ(1u, packet_events_.size());
+
+ RtpTimestamp relative_rtp_timestamp = rtp_timestamp - first_rtp_timestamp_;
+ PacketEventList::iterator it = packet_events_.begin();
+
+ linked_ptr<AggregatedPacketEvent> event = *it;
+
+ EXPECT_EQ(relative_rtp_timestamp, event->relative_rtp_timestamp());
+
+ ASSERT_EQ(1, event->base_packet_event_size());
+ const BasePacketEvent& base_event = event->base_packet_event(0);
+ EXPECT_EQ(packet_id, base_event.packet_id());
+ ASSERT_EQ(1, base_event.event_type_size());
+ EXPECT_EQ(media::cast::proto::PACKET_RECEIVED,
+ base_event.event_type(0));
+ ASSERT_EQ(1, base_event.event_timestamp_ms_size());
+ EXPECT_EQ(InMilliseconds(now), base_event.event_timestamp_ms(0));
+ EXPECT_EQ(size, base_event.size());
+
+ GetEventsAndReset();
+ EXPECT_TRUE(packet_events_.empty());
+}
+
+TEST_F(EncodingEventSubscriberTest, MultiplePacketEventsForPacket) {
+ Init(VIDEO_EVENT);
+ base::TimeTicks now1(testing_clock_->NowTicks());
+ RtpTimestamp rtp_timestamp = 100;
+ int packet_id = 2;
+ int size = 100;
+ cast_environment_->Logging()->InsertPacketEvent(now1,
+ PACKET_SENT_TO_NETWORK,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ /*frame_id*/ 0,
+ packet_id,
+ /*max_packet_id*/ 10,
+ size);
+
+ testing_clock_->Advance(base::TimeDelta::FromMilliseconds(20));
+ base::TimeTicks now2(testing_clock_->NowTicks());
+ cast_environment_->Logging()->InsertPacketEvent(now2,
+ PACKET_RETRANSMITTED,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ /*frame_id*/ 0,
+ packet_id,
+ /*max_packet_id*/ 10,
+ size);
+
+ GetEventsAndReset();
+
+ ASSERT_EQ(1u, packet_events_.size());
+
+ RtpTimestamp relative_rtp_timestamp = rtp_timestamp - first_rtp_timestamp_;
+ PacketEventList::iterator it = packet_events_.begin();
+
+ linked_ptr<AggregatedPacketEvent> event = *it;
+
+ EXPECT_EQ(relative_rtp_timestamp, event->relative_rtp_timestamp());
+
+ ASSERT_EQ(1, event->base_packet_event_size());
+ const BasePacketEvent& base_event = event->base_packet_event(0);
+ EXPECT_EQ(packet_id, base_event.packet_id());
+ ASSERT_EQ(2, base_event.event_type_size());
+ EXPECT_EQ(media::cast::proto::PACKET_SENT_TO_NETWORK,
+ base_event.event_type(0));
+ EXPECT_EQ(media::cast::proto::PACKET_RETRANSMITTED,
+ base_event.event_type(1));
+ ASSERT_EQ(2, base_event.event_timestamp_ms_size());
+ EXPECT_EQ(InMilliseconds(now1), base_event.event_timestamp_ms(0));
+ EXPECT_EQ(InMilliseconds(now2), base_event.event_timestamp_ms(1));
+}
+
+TEST_F(EncodingEventSubscriberTest, MultiplePacketEventsForFrame) {
+ Init(VIDEO_EVENT);
+ base::TimeTicks now1(testing_clock_->NowTicks());
+ RtpTimestamp rtp_timestamp = 100;
+ int packet_id_1 = 2;
+ int packet_id_2 = 3;
+ int size = 100;
+ cast_environment_->Logging()->InsertPacketEvent(now1,
+ PACKET_SENT_TO_NETWORK,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ /*frame_id*/ 0,
+ packet_id_1,
+ /*max_packet_id*/ 10,
+ size);
+
+ testing_clock_->Advance(base::TimeDelta::FromMilliseconds(20));
+ base::TimeTicks now2(testing_clock_->NowTicks());
+ cast_environment_->Logging()->InsertPacketEvent(now2,
+ PACKET_RETRANSMITTED,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ /*frame_id*/ 0,
+ packet_id_2,
+ /*max_packet_id*/ 10,
+ size);
+
+ GetEventsAndReset();
+
+ ASSERT_EQ(1u, packet_events_.size());
+
+ RtpTimestamp relative_rtp_timestamp = rtp_timestamp - first_rtp_timestamp_;
+ PacketEventList::iterator it = packet_events_.begin();
+
+ linked_ptr<AggregatedPacketEvent> event = *it;
+
+ EXPECT_EQ(relative_rtp_timestamp, event->relative_rtp_timestamp());
+
+ ASSERT_EQ(2, event->base_packet_event_size());
+ const BasePacketEvent& base_event = event->base_packet_event(0);
+ EXPECT_EQ(packet_id_1, base_event.packet_id());
+ ASSERT_EQ(1, base_event.event_type_size());
+ EXPECT_EQ(media::cast::proto::PACKET_SENT_TO_NETWORK,
+ base_event.event_type(0));
+ ASSERT_EQ(1, base_event.event_timestamp_ms_size());
+ EXPECT_EQ(InMilliseconds(now1), base_event.event_timestamp_ms(0));
+
+ const BasePacketEvent& base_event_2 = event->base_packet_event(1);
+ EXPECT_EQ(packet_id_2, base_event_2.packet_id());
+ ASSERT_EQ(1, base_event_2.event_type_size());
+ EXPECT_EQ(media::cast::proto::PACKET_RETRANSMITTED,
+ base_event_2.event_type(0));
+ ASSERT_EQ(1, base_event_2.event_timestamp_ms_size());
+ EXPECT_EQ(InMilliseconds(now2), base_event_2.event_timestamp_ms(0));
+}
+
+TEST_F(EncodingEventSubscriberTest, MultiplePacketEvents) {
+ Init(VIDEO_EVENT);
+ base::TimeTicks now1(testing_clock_->NowTicks());
+ RtpTimestamp rtp_timestamp_1 = 100;
+ RtpTimestamp rtp_timestamp_2 = 200;
+ int packet_id_1 = 2;
+ int packet_id_2 = 3;
+ int size = 100;
+ cast_environment_->Logging()->InsertPacketEvent(now1,
+ PACKET_SENT_TO_NETWORK,
+ VIDEO_EVENT,
+ rtp_timestamp_1,
+ /*frame_id*/ 0,
+ packet_id_1,
+ /*max_packet_id*/ 10,
+ size);
+
+ testing_clock_->Advance(base::TimeDelta::FromMilliseconds(20));
+ base::TimeTicks now2(testing_clock_->NowTicks());
+ cast_environment_->Logging()->InsertPacketEvent(now2,
+ PACKET_RETRANSMITTED,
+ VIDEO_EVENT,
+ rtp_timestamp_2,
+ /*frame_id*/ 0,
+ packet_id_2,
+ /*max_packet_id*/ 10,
+ size);
+
+ GetEventsAndReset();
+
+ ASSERT_EQ(2u, packet_events_.size());
+
+ RtpTimestamp relative_rtp_timestamp = rtp_timestamp_1 - first_rtp_timestamp_;
+ PacketEventList::iterator it = packet_events_.begin();
+
+ linked_ptr<AggregatedPacketEvent> event = *it;
+
+ EXPECT_EQ(relative_rtp_timestamp, event->relative_rtp_timestamp());
+
+ ASSERT_EQ(1, event->base_packet_event_size());
+ const BasePacketEvent& base_event = event->base_packet_event(0);
+ EXPECT_EQ(packet_id_1, base_event.packet_id());
+ ASSERT_EQ(1, base_event.event_type_size());
+ EXPECT_EQ(media::cast::proto::PACKET_SENT_TO_NETWORK,
+ base_event.event_type(0));
+ ASSERT_EQ(1, base_event.event_timestamp_ms_size());
+ EXPECT_EQ(InMilliseconds(now1), base_event.event_timestamp_ms(0));
+
+ relative_rtp_timestamp = rtp_timestamp_2 - first_rtp_timestamp_;
+ ++it;
+ ASSERT_TRUE(it != packet_events_.end());
+
+ event = *it;
+ EXPECT_EQ(relative_rtp_timestamp, event->relative_rtp_timestamp());
+
+ ASSERT_EQ(1, event->base_packet_event_size());
+ const BasePacketEvent& base_event_2 = event->base_packet_event(0);
+ EXPECT_EQ(packet_id_2, base_event_2.packet_id());
+ ASSERT_EQ(1, base_event_2.event_type_size());
+ EXPECT_EQ(media::cast::proto::PACKET_RETRANSMITTED,
+ base_event_2.event_type(0));
+ ASSERT_EQ(1, base_event_2.event_timestamp_ms_size());
+ EXPECT_EQ(InMilliseconds(now2), base_event_2.event_timestamp_ms(0));
+}
+
+TEST_F(EncodingEventSubscriberTest, FirstRtpTimestamp) {
+ Init(VIDEO_EVENT);
+ RtpTimestamp rtp_timestamp = 12345;
+ base::TimeTicks now(testing_clock_->NowTicks());
+
+ cast_environment_->Logging()->InsertFrameEvent(now,
+ FRAME_CAPTURE_BEGIN,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ /*frame_id*/ 0);
+
+ cast_environment_->Logging()->InsertFrameEvent(now,
+ FRAME_CAPTURE_END,
+ VIDEO_EVENT,
+ rtp_timestamp + 30,
+ /*frame_id*/ 1);
+
+ GetEventsAndReset();
+
+ EXPECT_EQ(rtp_timestamp, first_rtp_timestamp_);
+ FrameEventList::iterator it = frame_events_.begin();
+ ASSERT_NE(frame_events_.end(), it);
+ EXPECT_EQ(0u, (*it)->relative_rtp_timestamp());
+
+ ++it;
+ ASSERT_NE(frame_events_.end(), it);
+ EXPECT_EQ(30u, (*it)->relative_rtp_timestamp());
+
+ rtp_timestamp = 67890;
+
+ cast_environment_->Logging()->InsertFrameEvent(now,
+ FRAME_CAPTURE_BEGIN,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ /*frame_id*/ 0);
+ GetEventsAndReset();
+
+ EXPECT_EQ(rtp_timestamp, first_rtp_timestamp_);
+}
+
+TEST_F(EncodingEventSubscriberTest, RelativeRtpTimestampWrapAround) {
+ Init(VIDEO_EVENT);
+ RtpTimestamp rtp_timestamp = 0xffffffff - 20;
+ base::TimeTicks now(testing_clock_->NowTicks());
+
+ cast_environment_->Logging()->InsertFrameEvent(now,
+ FRAME_CAPTURE_BEGIN,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ /*frame_id*/ 0);
+
+ // RtpTimestamp has now wrapped around.
+ cast_environment_->Logging()->InsertFrameEvent(now,
+ FRAME_CAPTURE_END,
+ VIDEO_EVENT,
+ rtp_timestamp + 30,
+ /*frame_id*/ 1);
+
+ GetEventsAndReset();
+
+ FrameEventList::iterator it = frame_events_.begin();
+ ASSERT_NE(frame_events_.end(), it);
+ EXPECT_EQ(0u, (*it)->relative_rtp_timestamp());
+
+ ++it;
+ ASSERT_NE(frame_events_.end(), it);
+ EXPECT_EQ(30u, (*it)->relative_rtp_timestamp());
+}
+
+TEST_F(EncodingEventSubscriberTest, MaxEventsPerProto) {
+ Init(VIDEO_EVENT);
+ RtpTimestamp rtp_timestamp = 100;
+ for (int i = 0; i < kMaxEventsPerProto + 1; i++) {
+ cast_environment_->Logging()->InsertFrameEvent(testing_clock_->NowTicks(),
+ FRAME_ACK_RECEIVED,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ /*frame_id*/ 0);
+ testing_clock_->Advance(base::TimeDelta::FromMilliseconds(30));
+ }
+
+ GetEventsAndReset();
+
+ ASSERT_EQ(2u, frame_events_.size());
+ FrameEventList::iterator frame_it = frame_events_.begin();
+ ASSERT_TRUE(frame_it != frame_events_.end());
+
+ linked_ptr<AggregatedFrameEvent> frame_event = *frame_it;
+
+ EXPECT_EQ(kMaxEventsPerProto, frame_event->event_type_size());
+
+ for (int i = 0; i < kMaxPacketsPerFrame + 1; i++) {
+ cast_environment_->Logging()->InsertPacketEvent(
+ testing_clock_->NowTicks(),
+ PACKET_SENT_TO_NETWORK,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ /*frame_id*/ 0,
+ i,
+ kMaxPacketsPerFrame,
+ 123);
+ testing_clock_->Advance(base::TimeDelta::FromMilliseconds(30));
+ }
+
+ GetEventsAndReset();
+
+ EXPECT_EQ(2u, packet_events_.size());
+
+ PacketEventList::iterator packet_it = packet_events_.begin();
+ ASSERT_TRUE(packet_it != packet_events_.end());
+
+ linked_ptr<AggregatedPacketEvent> packet_event = *packet_it;
+
+ EXPECT_EQ(kMaxPacketsPerFrame,
+ packet_event->base_packet_event_size());
+
+ ++packet_it;
+ packet_event = *packet_it;
+ EXPECT_EQ(1, packet_event->base_packet_event_size());
+
+ for (int j = 0; j < kMaxEventsPerProto + 1; j++) {
+ cast_environment_->Logging()->InsertPacketEvent(
+ testing_clock_->NowTicks(),
+ PACKET_SENT_TO_NETWORK,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ /*frame_id*/ 0,
+ 0,
+ 0,
+ 123);
+ testing_clock_->Advance(base::TimeDelta::FromMilliseconds(30));
+ }
+
+ GetEventsAndReset();
+
+ EXPECT_EQ(2u, packet_events_.size());
+ packet_it = packet_events_.begin();
+ ASSERT_TRUE(packet_it != packet_events_.end());
+
+ packet_event = *packet_it;
+
+ EXPECT_EQ(kMaxEventsPerProto,
+ packet_event->base_packet_event(0).event_type_size());
+
+ ++packet_it;
+ packet_event = *packet_it;
+ EXPECT_EQ(1, packet_event->base_packet_event(0).event_type_size());
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/logging/log_deserializer.cc b/chromium/media/cast/logging/log_deserializer.cc
new file mode 100644
index 00000000000..1c6dd572240
--- /dev/null
+++ b/chromium/media/cast/logging/log_deserializer.cc
@@ -0,0 +1,252 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/logging/log_deserializer.h"
+
+#include <map>
+#include <utility>
+
+#include "base/big_endian.h"
+#include "base/memory/scoped_ptr.h"
+#include "third_party/zlib/zlib.h"
+
+using media::cast::FrameEventMap;
+using media::cast::PacketEventMap;
+using media::cast::RtpTimestamp;
+using media::cast::proto::AggregatedFrameEvent;
+using media::cast::proto::AggregatedPacketEvent;
+using media::cast::proto::BasePacketEvent;
+using media::cast::proto::LogMetadata;
+
+namespace {
+
+// Use 60MB of temp buffer to hold uncompressed data if |compress| is true.
+// This is double the size of temp buffer used during compression (30MB)
+// since the there are two streams in the blob.
+// Keep in sync with media/cast/logging/log_serializer.cc.
+const int kMaxUncompressedBytes = 60 * 1000 * 1000;
+
+void MergePacketEvent(const AggregatedPacketEvent& from,
+ linked_ptr<AggregatedPacketEvent> to) {
+ for (int i = 0; i < from.base_packet_event_size(); i++) {
+ const BasePacketEvent& from_base_event = from.base_packet_event(i);
+ bool merged = false;
+ for (int j = 0; j < to->base_packet_event_size(); j++) {
+ BasePacketEvent* to_base_event = to->mutable_base_packet_event(j);
+ if (from_base_event.packet_id() == to_base_event->packet_id()) {
+ int packet_size = std::max(
+ from_base_event.size(), to_base_event->size());
+ // Need special merge logic here because we need to prevent a valid
+ // packet size (> 0) from being overwritten with an invalid one (= 0).
+ to_base_event->MergeFrom(from_base_event);
+ to_base_event->set_size(packet_size);
+ merged = true;
+ break;
+ }
+ }
+ if (!merged) {
+ BasePacketEvent* to_base_event = to->add_base_packet_event();
+ to_base_event->CopyFrom(from_base_event);
+ }
+ }
+}
+
+void MergeFrameEvent(const AggregatedFrameEvent& from,
+ linked_ptr<AggregatedFrameEvent> to) {
+ to->mutable_event_type()->MergeFrom(from.event_type());
+ to->mutable_event_timestamp_ms()->MergeFrom(from.event_timestamp_ms());
+ if (!to->has_encoded_frame_size() && from.has_encoded_frame_size())
+ to->set_encoded_frame_size(from.encoded_frame_size());
+ if (!to->has_delay_millis() && from.has_delay_millis())
+ to->set_delay_millis(from.delay_millis());
+ if (!to->has_key_frame() && from.has_key_frame())
+ to->set_key_frame(from.key_frame());
+ if (!to->has_target_bitrate() && from.has_target_bitrate())
+ to->set_target_bitrate(from.target_bitrate());
+}
+
+bool PopulateDeserializedLog(base::BigEndianReader* reader,
+ media::cast::DeserializedLog* log) {
+ FrameEventMap frame_event_map;
+ PacketEventMap packet_event_map;
+
+ int num_frame_events = log->metadata.num_frame_events();
+ RtpTimestamp relative_rtp_timestamp = 0;
+ uint16 proto_size = 0;
+ for (int i = 0; i < num_frame_events; i++) {
+ if (!reader->ReadU16(&proto_size))
+ return false;
+
+ linked_ptr<AggregatedFrameEvent> frame_event(new AggregatedFrameEvent);
+ if (!frame_event->ParseFromArray(reader->ptr(), proto_size))
+ return false;
+ if (!reader->Skip(proto_size))
+ return false;
+
+ // During serialization the RTP timestamp in proto is relative to previous
+ // frame.
+ // Adjust RTP timestamp back to value relative to first RTP timestamp.
+ frame_event->set_relative_rtp_timestamp(
+ frame_event->relative_rtp_timestamp() + relative_rtp_timestamp);
+ relative_rtp_timestamp = frame_event->relative_rtp_timestamp();
+
+ FrameEventMap::iterator it = frame_event_map.find(
+ frame_event->relative_rtp_timestamp());
+ if (it == frame_event_map.end()) {
+ frame_event_map.insert(
+ std::make_pair(frame_event->relative_rtp_timestamp(), frame_event));
+ } else {
+ // Events for the same frame might have been split into more than one
+ // proto. Merge them.
+ MergeFrameEvent(*frame_event, it->second);
+ }
+ }
+
+ log->frame_events.swap(frame_event_map);
+
+ int num_packet_events = log->metadata.num_packet_events();
+ relative_rtp_timestamp = 0;
+ for (int i = 0; i < num_packet_events; i++) {
+ if (!reader->ReadU16(&proto_size))
+ return false;
+
+ linked_ptr<AggregatedPacketEvent> packet_event(new AggregatedPacketEvent);
+ if (!packet_event->ParseFromArray(reader->ptr(), proto_size))
+ return false;
+ if (!reader->Skip(proto_size))
+ return false;
+
+ packet_event->set_relative_rtp_timestamp(
+ packet_event->relative_rtp_timestamp() + relative_rtp_timestamp);
+ relative_rtp_timestamp = packet_event->relative_rtp_timestamp();
+
+ PacketEventMap::iterator it = packet_event_map.find(
+ packet_event->relative_rtp_timestamp());
+ if (it == packet_event_map.end()) {
+ packet_event_map.insert(
+ std::make_pair(packet_event->relative_rtp_timestamp(), packet_event));
+ } else {
+ // Events for the same frame might have been split into more than one
+ // proto. Merge them.
+ MergePacketEvent(*packet_event, it->second);
+ }
+ }
+
+ log->packet_events.swap(packet_event_map);
+
+ return true;
+}
+
+bool DoDeserializeEvents(const char* data,
+ int data_bytes,
+ media::cast::DeserializedLog* audio_log,
+ media::cast::DeserializedLog* video_log) {
+ bool got_audio = false;
+ bool got_video = false;
+ base::BigEndianReader reader(data, data_bytes);
+
+ LogMetadata metadata;
+ uint16 proto_size = 0;
+ while (reader.remaining() > 0) {
+ if (!reader.ReadU16(&proto_size))
+ return false;
+ if (!metadata.ParseFromArray(reader.ptr(), proto_size))
+ return false;
+ reader.Skip(proto_size);
+
+ if (metadata.is_audio()) {
+ if (got_audio) {
+ VLOG(1) << "Got audio data twice.";
+ return false;
+ }
+
+ got_audio = true;
+ audio_log->metadata = metadata;
+ if (!PopulateDeserializedLog(&reader, audio_log))
+ return false;
+ } else {
+ if (got_video) {
+ VLOG(1) << "Got duplicate video log.";
+ return false;
+ }
+
+ got_video = true;
+ video_log->metadata = metadata;
+ if (!PopulateDeserializedLog(&reader, video_log))
+ return false;
+ }
+ }
+ return true;
+}
+
+bool Uncompress(const char* data,
+ int data_bytes,
+ int max_uncompressed_bytes,
+ char* uncompressed,
+ int* uncompressed_bytes) {
+ z_stream stream = {0};
+
+ stream.next_in = reinterpret_cast<uint8*>(const_cast<char*>(data));
+ stream.avail_in = data_bytes;
+ stream.next_out = reinterpret_cast<uint8*>(uncompressed);
+ stream.avail_out = max_uncompressed_bytes;
+
+ bool success = false;
+ while (stream.avail_in > 0 && stream.avail_out > 0) {
+ // 16 is added to read in gzip format.
+ int result = inflateInit2(&stream, MAX_WBITS + 16);
+ DCHECK_EQ(Z_OK, result);
+
+ result = inflate(&stream, Z_FINISH);
+ success = (result == Z_STREAM_END);
+ if (!success) {
+ DVLOG(2) << "inflate() failed. Result: " << result;
+ break;
+ }
+
+ result = inflateEnd(&stream);
+ DCHECK(result == Z_OK);
+ }
+
+ if (stream.avail_in == 0) {
+ success = true;
+ *uncompressed_bytes = max_uncompressed_bytes - stream.avail_out;
+ }
+ return success;
+}
+
+} // namespace
+
+namespace media {
+namespace cast {
+
+bool DeserializeEvents(const char* data,
+ int data_bytes,
+ bool compressed,
+ DeserializedLog* audio_log,
+ DeserializedLog* video_log) {
+ DCHECK_GT(data_bytes, 0);
+
+ if (compressed) {
+ scoped_ptr<char[]> uncompressed(new char[kMaxUncompressedBytes]);
+ int uncompressed_bytes = 0;
+ if (!Uncompress(data,
+ data_bytes,
+ kMaxUncompressedBytes,
+ uncompressed.get(),
+ &uncompressed_bytes))
+ return false;
+
+ return DoDeserializeEvents(
+ uncompressed.get(), uncompressed_bytes, audio_log, video_log);
+ } else {
+ return DoDeserializeEvents(data, data_bytes, audio_log, video_log);
+ }
+}
+
+DeserializedLog::DeserializedLog() {}
+DeserializedLog::~DeserializedLog() {}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/logging/log_deserializer.h b/chromium/media/cast/logging/log_deserializer.h
new file mode 100644
index 00000000000..01b6db7dd12
--- /dev/null
+++ b/chromium/media/cast/logging/log_deserializer.h
@@ -0,0 +1,51 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_LOGGING_LOG_DESERIALIZER_H_
+#define MEDIA_CAST_LOGGING_LOG_DESERIALIZER_H_
+
+#include <map>
+#include <string>
+
+#include "base/memory/linked_ptr.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/logging/proto/raw_events.pb.h"
+
+namespace media {
+namespace cast {
+
+typedef std::map<RtpTimestamp,
+ linked_ptr<media::cast::proto::AggregatedFrameEvent> >
+ FrameEventMap;
+typedef std::map<RtpTimestamp,
+ linked_ptr<media::cast::proto::AggregatedPacketEvent> >
+ PacketEventMap;
+
+// Represents deserialized raw event logs for a particular stream.
+struct DeserializedLog {
+ DeserializedLog();
+ ~DeserializedLog();
+ proto::LogMetadata metadata;
+ FrameEventMap frame_events;
+ PacketEventMap packet_events;
+};
+
+// This function takes the output of LogSerializer and deserializes it into
+// its original format. Returns true if deserialization is successful. All
+// output arguments are valid if this function returns true.
+// |data|: Serialized event logs with length |data_bytes|.
+// |compressed|: true if |data| is compressed in gzip format.
+// |log_metadata|: This will be populated with deserialized LogMetadata proto.
+// |audio_log|, |video_log|: These will be populated with deserialized
+// log data for audio and video streams, respectively.
+bool DeserializeEvents(const char* data,
+ int data_bytes,
+ bool compressed,
+ DeserializedLog* audio_log,
+ DeserializedLog* video_log);
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_LOGGING_LOG_DESERIALIZER_H_
diff --git a/chromium/media/cast/logging/log_serializer.cc b/chromium/media/cast/logging/log_serializer.cc
new file mode 100644
index 00000000000..afcf77013f3
--- /dev/null
+++ b/chromium/media/cast/logging/log_serializer.cc
@@ -0,0 +1,190 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// The serialization format is as follows:
+// 16-bit integer describing the following LogMetadata proto size in bytes.
+// The LogMetadata proto.
+// 32-bit integer describing number of frame events.
+// (The following repeated for number of frame events):
+// 16-bit integer describing the following AggregatedFrameEvent proto size
+// in bytes.
+// The AggregatedFrameEvent proto.
+// 32-bit integer describing number of packet events.
+// (The following repeated for number of packet events):
+// 16-bit integer describing the following AggregatedPacketEvent proto
+// size in bytes.
+// The AggregatedPacketEvent proto.
+
+#include "media/cast/logging/log_serializer.h"
+
+#include "base/big_endian.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "third_party/zlib/zlib.h"
+
+namespace media {
+namespace cast {
+
+namespace {
+
+using media::cast::proto::AggregatedFrameEvent;
+using media::cast::proto::AggregatedPacketEvent;
+using media::cast::proto::LogMetadata;
+
+// Use 30MB of temp buffer to hold uncompressed data if |compress| is true.
+const int kMaxUncompressedBytes = 30 * 1000 * 1000;
+
+// The maximum allowed size per serialized proto.
+const int kMaxSerializedProtoBytes = (1 << 16) - 1;
+bool DoSerializeEvents(const LogMetadata& metadata,
+ const FrameEventList& frame_events,
+ const PacketEventList& packet_events,
+ const int max_output_bytes,
+ char* output,
+ int* output_bytes) {
+ base::BigEndianWriter writer(output, max_output_bytes);
+
+ int proto_size = metadata.ByteSize();
+ DCHECK(proto_size <= kMaxSerializedProtoBytes);
+ if (!writer.WriteU16(proto_size))
+ return false;
+ if (!metadata.SerializeToArray(writer.ptr(), writer.remaining()))
+ return false;
+ if (!writer.Skip(proto_size))
+ return false;
+
+ RtpTimestamp prev_rtp_timestamp = 0;
+ for (media::cast::FrameEventList::const_iterator it = frame_events.begin();
+ it != frame_events.end();
+ ++it) {
+ media::cast::proto::AggregatedFrameEvent frame_event(**it);
+
+ // Adjust relative RTP timestamp so that it is relative to previous frame,
+ // rather than relative to first RTP timestamp.
+ // This is done to improve encoding size.
+ RtpTimestamp old_relative_rtp_timestamp =
+ frame_event.relative_rtp_timestamp();
+ frame_event.set_relative_rtp_timestamp(
+ old_relative_rtp_timestamp - prev_rtp_timestamp);
+ prev_rtp_timestamp = old_relative_rtp_timestamp;
+
+ proto_size = frame_event.ByteSize();
+ DCHECK(proto_size <= kMaxSerializedProtoBytes);
+
+ // Write size of the proto, then write the proto.
+ if (!writer.WriteU16(proto_size))
+ return false;
+ if (!frame_event.SerializeToArray(writer.ptr(), writer.remaining()))
+ return false;
+ if (!writer.Skip(proto_size))
+ return false;
+ }
+
+ // Write packet events.
+ prev_rtp_timestamp = 0;
+ for (media::cast::PacketEventList::const_iterator it = packet_events.begin();
+ it != packet_events.end();
+ ++it) {
+ media::cast::proto::AggregatedPacketEvent packet_event(**it);
+ RtpTimestamp old_relative_rtp_timestamp =
+ packet_event.relative_rtp_timestamp();
+ packet_event.set_relative_rtp_timestamp(
+ old_relative_rtp_timestamp - prev_rtp_timestamp);
+ prev_rtp_timestamp = old_relative_rtp_timestamp;
+
+ proto_size = packet_event.ByteSize();
+ DCHECK(proto_size <= kMaxSerializedProtoBytes);
+
+ // Write size of the proto, then write the proto.
+ if (!writer.WriteU16(proto_size))
+ return false;
+ if (!packet_event.SerializeToArray(writer.ptr(), writer.remaining()))
+ return false;
+ if (!writer.Skip(proto_size))
+ return false;
+ }
+
+ *output_bytes = max_output_bytes - writer.remaining();
+ return true;
+}
+
+bool Compress(char* uncompressed_buffer,
+ int uncompressed_bytes,
+ int max_output_bytes,
+ char* output,
+ int* output_bytes) {
+ z_stream stream = {0};
+ int result = deflateInit2(&stream,
+ Z_DEFAULT_COMPRESSION,
+ Z_DEFLATED,
+ // 16 is added to produce a gzip header + trailer.
+ MAX_WBITS + 16,
+ 8, // memLevel = 8 is default.
+ Z_DEFAULT_STRATEGY);
+ DCHECK_EQ(Z_OK, result);
+
+ stream.next_in = reinterpret_cast<uint8*>(uncompressed_buffer);
+ stream.avail_in = uncompressed_bytes;
+ stream.next_out = reinterpret_cast<uint8*>(output);
+ stream.avail_out = max_output_bytes;
+
+ // Do a one-shot compression. This will return Z_STREAM_END only if |output|
+ // is large enough to hold all compressed data.
+ result = deflate(&stream, Z_FINISH);
+ bool success = (result == Z_STREAM_END);
+
+ if (!success)
+ DVLOG(2) << "deflate() failed. Result: " << result;
+
+ result = deflateEnd(&stream);
+ DCHECK(result == Z_OK || result == Z_DATA_ERROR);
+
+ if (success)
+ *output_bytes = max_output_bytes - stream.avail_out;
+
+ return success;
+}
+
+} // namespace
+
+bool SerializeEvents(const LogMetadata& log_metadata,
+ const FrameEventList& frame_events,
+ const PacketEventList& packet_events,
+ bool compress,
+ int max_output_bytes,
+ char* output,
+ int* output_bytes) {
+ DCHECK_GT(max_output_bytes, 0);
+ DCHECK(output);
+ DCHECK(output_bytes);
+
+ if (compress) {
+ // Allocate a reasonably large temp buffer to hold uncompressed data.
+ scoped_ptr<char[]> uncompressed_buffer(new char[kMaxUncompressedBytes]);
+ int uncompressed_bytes;
+ bool success = DoSerializeEvents(log_metadata,
+ frame_events,
+ packet_events,
+ kMaxUncompressedBytes,
+ uncompressed_buffer.get(),
+ &uncompressed_bytes);
+ if (!success)
+ return false;
+ return Compress(uncompressed_buffer.get(),
+ uncompressed_bytes,
+ max_output_bytes,
+ output,
+ output_bytes);
+ } else {
+ return DoSerializeEvents(log_metadata,
+ frame_events,
+ packet_events,
+ max_output_bytes,
+ output,
+ output_bytes);
+ }
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/logging/log_serializer.h b/chromium/media/cast/logging/log_serializer.h
new file mode 100644
index 00000000000..8aff54fc95d
--- /dev/null
+++ b/chromium/media/cast/logging/log_serializer.h
@@ -0,0 +1,37 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_LOGGING_LOG_SERIALIZER_H_
+#define MEDIA_CAST_LOGGING_LOG_SERIALIZER_H_
+
+#include <string>
+
+#include "media/cast/logging/encoding_event_subscriber.h"
+
+namespace media {
+namespace cast {
+
+// Serialize |frame_events|, |packet_events|, |log_metadata|
+// returned from EncodingEventSubscriber.
+// Result is written to |output|, which can hold |max_output_bytes| of data.
+// If |compress| is true, |output| will be set with data compresssed in
+// gzip format.
+// |output_bytes| will be set to number of bytes written.
+//
+// Returns |true| if serialization is successful. This function
+// returns |false| if the serialized string will exceed |max_output_bytes|.
+//
+// See .cc file for format specification.
+bool SerializeEvents(const media::cast::proto::LogMetadata& log_metadata,
+ const FrameEventList& frame_events,
+ const PacketEventList& packet_events,
+ bool compress,
+ int max_output_bytes,
+ char* output,
+ int* output_bytes);
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_LOGGING_LOG_SERIALIZER_H_
diff --git a/chromium/media/cast/logging/logging_defines.cc b/chromium/media/cast/logging/logging_defines.cc
index 85abe7c5d45..05ceeb9521f 100644
--- a/chromium/media/cast/logging/logging_defines.cc
+++ b/chromium/media/cast/logging/logging_defines.cc
@@ -6,96 +6,46 @@
#include "base/logging.h"
+#define ENUM_TO_STRING(enum) \
+ case enum: \
+ return #enum
+
namespace media {
namespace cast {
-CastLoggingConfig::CastLoggingConfig()
- : enable_data_collection(false),
- enable_uma_stats(false),
- enable_tracing(false) {}
-
-CastLoggingConfig::~CastLoggingConfig() {}
-
-CastLoggingConfig GetDefaultCastLoggingConfig() {
- CastLoggingConfig config;
- return config;
-}
-
-std::string CastLoggingToString(CastLoggingEvent event) {
+const char* CastLoggingToString(CastLoggingEvent event) {
switch (event) {
- case(kUnknown):
- // Can happen if the sender and receiver of RTCP log messages are not
- // aligned.
- return "Unknown";
- case(kRttMs):
- return "RttMs";
- case(kPacketLoss):
- return "PacketLoss";
- case(kJitterMs):
- return "JitterMs";
- case(kAckReceived):
- return "AckReceived";
- case(kRembBitrate):
- return "RembBitrate";
- case(kAckSent):
- return "AckSent";
- case(kLastEvent):
- return "LastEvent";
- case(kAudioFrameReceived):
- return "AudioFrameReceived";
- case(kAudioFrameCaptured):
- return "AudioFrameCaptured";
- case(kAudioFrameEncoded):
- return "AudioFrameEncoded";
- case(kAudioPlayoutDelay):
- return "AudioPlayoutDelay";
- case(kAudioFrameDecoded):
- return "AudioFrameDecoded";
- case(kVideoFrameCaptured):
- return "VideoFrameCaptured";
- case(kVideoFrameReceived):
- return "VideoFrameReceived";
- case(kVideoFrameSentToEncoder):
- return "VideoFrameSentToEncoder";
- case(kVideoFrameEncoded):
- return "VideoFrameEncoded";
- case(kVideoFrameDecoded):
- return "VideoFrameDecoded";
- case(kVideoRenderDelay):
- return "VideoRenderDelay";
- case(kPacketSentToPacer):
- return "PacketSentToPacer";
- case(kPacketSentToNetwork):
- return "PacketSentToNetwork";
- case(kPacketRetransmited):
- return "PacketRetransmited";
- case(kPacketReceived):
- return "PacketReceived";
- default:
- NOTREACHED();
- return "";
+ ENUM_TO_STRING(UNKNOWN);
+ ENUM_TO_STRING(FRAME_CAPTURE_BEGIN);
+ ENUM_TO_STRING(FRAME_CAPTURE_END);
+ ENUM_TO_STRING(FRAME_ENCODED);
+ ENUM_TO_STRING(FRAME_ACK_RECEIVED);
+ ENUM_TO_STRING(FRAME_ACK_SENT);
+ ENUM_TO_STRING(FRAME_DECODED);
+ ENUM_TO_STRING(FRAME_PLAYOUT);
+ ENUM_TO_STRING(PACKET_SENT_TO_NETWORK);
+ ENUM_TO_STRING(PACKET_RETRANSMITTED);
+ ENUM_TO_STRING(PACKET_RTX_REJECTED);
+ ENUM_TO_STRING(PACKET_RECEIVED);
}
+ NOTREACHED();
+ return "";
}
-FrameEvent::FrameEvent() {}
+FrameEvent::FrameEvent()
+ : rtp_timestamp(0u), frame_id(kFrameIdUnknown), size(0u), type(UNKNOWN),
+ media_type(UNKNOWN_EVENT), key_frame(false), target_bitrate(0) {}
FrameEvent::~FrameEvent() {}
-BasePacketInfo::BasePacketInfo() {}
-BasePacketInfo::~BasePacketInfo() {}
-
-PacketEvent::PacketEvent() {}
+PacketEvent::PacketEvent()
+ : rtp_timestamp(0),
+ frame_id(kFrameIdUnknown),
+ max_packet_id(0),
+ packet_id(0),
+ size(0),
+ type(UNKNOWN),
+ media_type(UNKNOWN_EVENT) {}
PacketEvent::~PacketEvent() {}
-GenericEvent::GenericEvent() {}
-GenericEvent::~GenericEvent() {}
-
-FrameLogStats::FrameLogStats()
- : framerate_fps(0),
- bitrate_kbps(0),
- max_delay_ms(0),
- min_delay_ms(0),
- avg_delay_ms(0) {}
-FrameLogStats::~FrameLogStats() {}
-
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/logging/logging_defines.h b/chromium/media/cast/logging/logging_defines.h
index 5a7bca1500f..021a3c99a7a 100644
--- a/chromium/media/cast/logging/logging_defines.h
+++ b/chromium/media/cast/logging/logging_defines.h
@@ -9,120 +9,88 @@
#include <string>
#include <vector>
-#include "base/memory/linked_ptr.h"
#include "base/time/time.h"
namespace media {
namespace cast {
-static const uint32 kFrameIdUnknown = 0xFFFF;
+static const uint32 kFrameIdUnknown = 0xFFFFFFFF;
-struct CastLoggingConfig {
- CastLoggingConfig();
- ~CastLoggingConfig();
+typedef uint32 RtpTimestamp;
- bool enable_data_collection;
- bool enable_uma_stats;
- bool enable_tracing;
+enum CastLoggingEvent {
+ UNKNOWN,
+ // Sender side frame events.
+ FRAME_CAPTURE_BEGIN,
+ FRAME_CAPTURE_END,
+ FRAME_ENCODED,
+ FRAME_ACK_RECEIVED,
+ // Receiver side frame events.
+ FRAME_ACK_SENT,
+ FRAME_DECODED,
+ FRAME_PLAYOUT,
+ // Sender side packet events.
+ PACKET_SENT_TO_NETWORK,
+ PACKET_RETRANSMITTED,
+ PACKET_RTX_REJECTED,
+ // Receiver side packet events.
+ PACKET_RECEIVED,
+ kNumOfLoggingEvents = PACKET_RECEIVED
};
-// By default, enable raw and stats data collection. Disable tracing and UMA.
-CastLoggingConfig GetDefaultCastLoggingConfig();
+const char* CastLoggingToString(CastLoggingEvent event);
-enum CastLoggingEvent {
- // Generic events.
- kUnknown,
- kRttMs,
- kPacketLoss,
- kJitterMs,
- kAckReceived,
- kRembBitrate,
- kAckSent,
- kLastEvent,
- // Audio sender.
- kAudioFrameReceived,
- kAudioFrameCaptured,
- kAudioFrameEncoded,
- // Audio receiver.
- kAudioPlayoutDelay,
- kAudioFrameDecoded,
- // Video sender.
- kVideoFrameCaptured,
- kVideoFrameReceived,
- kVideoFrameSentToEncoder,
- kVideoFrameEncoded,
- // Video receiver.
- kVideoFrameDecoded,
- kVideoRenderDelay,
- // Send-side packet events.
- kPacketSentToPacer,
- kPacketSentToNetwork,
- kPacketRetransmited,
- // Receive-side packet events.
- kPacketReceived,
-
- kNumOfLoggingEvents,
+// CastLoggingEvent are classified into one of three following types.
+enum EventMediaType {
+ AUDIO_EVENT,
+ VIDEO_EVENT,
+ UNKNOWN_EVENT,
+ EVENT_MEDIA_TYPE_LAST = UNKNOWN_EVENT
};
-std::string CastLoggingToString(CastLoggingEvent event);
-
struct FrameEvent {
FrameEvent();
~FrameEvent();
+ RtpTimestamp rtp_timestamp;
uint32 frame_id;
- size_t size; // Encoded size only.
- std::vector<base::TimeTicks> timestamp;
- std::vector<CastLoggingEvent> type;
- base::TimeDelta delay_delta; // Render/playout delay.
-};
-
-// Internal map sorted by packet id.
-struct BasePacketInfo {
- BasePacketInfo();
- ~BasePacketInfo();
+ // Size of encoded frame. Only set for FRAME_ENCODED event.
size_t size;
- std::vector<base::TimeTicks> timestamp;
- std::vector<CastLoggingEvent> type;
-};
-typedef std::map<uint16, BasePacketInfo> BasePacketMap;
+ // Time of event logged.
+ base::TimeTicks timestamp;
-struct PacketEvent {
- PacketEvent();
- ~PacketEvent();
- uint32 frame_id;
- int max_packet_id;
- BasePacketMap packet_map;
-};
+ CastLoggingEvent type;
-struct GenericEvent {
- GenericEvent();
- ~GenericEvent();
- std::vector<int> value;
- std::vector<base::TimeTicks> timestamp;
-};
+ EventMediaType media_type;
+
+ // Render / playout delay. Only set for FRAME_PLAYOUT events.
+ base::TimeDelta delay_delta;
-struct FrameLogStats {
- FrameLogStats();
- ~FrameLogStats();
+ // Whether the frame is a key frame. Only set for video FRAME_ENCODED event.
+ bool key_frame;
- double framerate_fps;
- double bitrate_kbps;
- int max_delay_ms;
- int min_delay_ms;
- int avg_delay_ms;
+ // The requested target bitrate of the encoder at the time the frame is
+ // encoded. Only set for video FRAME_ENCODED event.
+ int target_bitrate;
};
-// Store all log types in a map based on the event.
-typedef std::map<uint32, FrameEvent> FrameRawMap;
-typedef std::map<uint32, PacketEvent> PacketRawMap;
-typedef std::map<CastLoggingEvent, GenericEvent> GenericRawMap;
+struct PacketEvent {
+ PacketEvent();
+ ~PacketEvent();
+
+ RtpTimestamp rtp_timestamp;
+ uint32 frame_id;
+ uint16 max_packet_id;
+ uint16 packet_id;
+ size_t size;
-typedef std::map<CastLoggingEvent, linked_ptr<FrameLogStats > > FrameStatsMap;
-typedef std::map<CastLoggingEvent, double> PacketStatsMap;
-typedef std::map<CastLoggingEvent, double> GenericStatsMap;
+ // Time of event logged.
+ base::TimeTicks timestamp;
+ CastLoggingEvent type;
+ EventMediaType media_type;
+};
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/logging/logging_impl.cc b/chromium/media/cast/logging/logging_impl.cc
index ea96b94b610..1143d1be217 100644
--- a/chromium/media/cast/logging/logging_impl.cc
+++ b/chromium/media/cast/logging/logging_impl.cc
@@ -2,223 +2,113 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "base/big_endian.h"
#include "base/debug/trace_event.h"
-#include "base/metrics/histogram.h"
#include "media/cast/logging/logging_impl.h"
-#include "net/base/big_endian.h"
namespace media {
namespace cast {
-LoggingImpl::LoggingImpl(base::TickClock* clock,
- scoped_refptr<base::TaskRunner> main_thread_proxy,
- const CastLoggingConfig& config)
- : main_thread_proxy_(main_thread_proxy),
- config_(config),
- raw_(clock),
- stats_(clock) {}
+// TODO(imcheng): Collapse LoggingRaw onto LoggingImpl.
+LoggingImpl::LoggingImpl() {
+ // LoggingImpl can be constructed on any thread, but its methods should all be
+ // called on the same thread.
+ thread_checker_.DetachFromThread();
+}
LoggingImpl::~LoggingImpl() {}
-void LoggingImpl::InsertFrameEvent(CastLoggingEvent event,
+void LoggingImpl::InsertFrameEvent(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
uint32 rtp_timestamp,
uint32 frame_id) {
- DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
- if (config_.enable_data_collection) {
- raw_.InsertFrameEvent(event, rtp_timestamp, frame_id);
- stats_.InsertFrameEvent(event, rtp_timestamp, frame_id);
- }
- if (config_.enable_tracing) {
- std::string event_string = CastLoggingToString(event);
- TRACE_EVENT_INSTANT2(event_string.c_str(), "FE",
- TRACE_EVENT_SCOPE_THREAD, "rtp_timestamp", rtp_timestamp, "frame_id",
- frame_id);
- }
-}
-
-void LoggingImpl::InsertFrameEventWithSize(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint32 frame_id,
- int frame_size) {
- DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
- if (config_.enable_data_collection) {
- raw_.InsertFrameEventWithSize(event, rtp_timestamp, frame_id, frame_size);
- stats_.InsertFrameEventWithSize(event, rtp_timestamp, frame_id, frame_size);
- }
- if (config_.enable_uma_stats) {
- UMA_HISTOGRAM_COUNTS(CastLoggingToString(event), frame_size);
- }
- if (config_.enable_tracing) {
- std::string event_string = CastLoggingToString(event);
- TRACE_EVENT_INSTANT2(event_string.c_str(), "FES",
- TRACE_EVENT_SCOPE_THREAD, "rtp_timestamp", rtp_timestamp, "frame_size",
- frame_size);
- }
+ DCHECK(thread_checker_.CalledOnValidThread());
+ raw_.InsertFrameEvent(time_of_event, event, event_media_type,
+ rtp_timestamp, frame_id);
}
-void LoggingImpl::InsertFrameEventWithDelay(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint32 frame_id,
- base::TimeDelta delay) {
- DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
- if (config_.enable_data_collection) {
- raw_.InsertFrameEventWithDelay(event, rtp_timestamp, frame_id, delay);
- stats_.InsertFrameEventWithDelay(event, rtp_timestamp, frame_id, delay);
- }
- if (config_.enable_uma_stats) {
- UMA_HISTOGRAM_TIMES(CastLoggingToString(event), delay);
- }
- if (config_.enable_tracing) {
- std::string event_string = CastLoggingToString(event);
- TRACE_EVENT_INSTANT2(event_string.c_str(), "FED",
- TRACE_EVENT_SCOPE_THREAD, "rtp_timestamp", rtp_timestamp, "delay",
- delay.InMilliseconds());
- }
+void LoggingImpl::InsertEncodedFrameEvent(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
+ uint32 rtp_timestamp,
+ uint32 frame_id, int frame_size,
+ bool key_frame,
+ int target_bitrate) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ raw_.InsertEncodedFrameEvent(time_of_event, event, event_media_type,
+ rtp_timestamp, frame_id, frame_size, key_frame, target_bitrate);
}
-void LoggingImpl::InsertPacketListEvent(CastLoggingEvent event,
- const PacketList& packets) {
- DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
- for (unsigned int i = 0; i < packets.size(); ++i) {
- const Packet& packet = packets[i];
- // Parse basic properties.
- uint32 rtp_timestamp;
- uint16 packet_id, max_packet_id;
- const uint8* packet_data = &packet[0];
- net::BigEndianReader big_endian_reader(packet_data + 4, 4);
- big_endian_reader.ReadU32(&rtp_timestamp);
- net::BigEndianReader cast_big_endian_reader(packet_data + 12 + 2, 4);
- cast_big_endian_reader.ReadU16(&packet_id);
- cast_big_endian_reader.ReadU16(&max_packet_id);
- // rtp_timestamp is enough - no need for frame_id as well.
- InsertPacketEvent(event, rtp_timestamp, kFrameIdUnknown, packet_id,
- max_packet_id, packet.size());
- }
+void LoggingImpl::InsertFrameEventWithDelay(
+ const base::TimeTicks& time_of_event, CastLoggingEvent event,
+ EventMediaType event_media_type, uint32 rtp_timestamp, uint32 frame_id,
+ base::TimeDelta delay) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ raw_.InsertFrameEventWithDelay(time_of_event, event, event_media_type,
+ rtp_timestamp, frame_id, delay);
}
-void LoggingImpl::InsertPacketEvent(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint32 frame_id,
- uint16 packet_id,
- uint16 max_packet_id,
- size_t size) {
- DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
- if (config_.enable_data_collection) {
- raw_.InsertPacketEvent(event, rtp_timestamp, frame_id, packet_id,
- max_packet_id, size);
- stats_.InsertPacketEvent(event, rtp_timestamp, frame_id, packet_id,
- max_packet_id, size);
- }
- if (config_.enable_tracing) {
- std::string event_string = CastLoggingToString(event);
- TRACE_EVENT_INSTANT2(event_string.c_str(), "PE",
- TRACE_EVENT_SCOPE_THREAD, "rtp_timestamp", rtp_timestamp,
- "packet_id", packet_id);
- }
+void LoggingImpl::InsertSinglePacketEvent(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
+ const Packet& packet) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Parse basic properties.
+ uint32 rtp_timestamp;
+ uint16 packet_id, max_packet_id;
+ const uint8* packet_data = &packet[0];
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(packet_data + 4), 4);
+ big_endian_reader.ReadU32(&rtp_timestamp);
+ base::BigEndianReader cast_big_endian_reader(
+ reinterpret_cast<const char*>(packet_data + 12 + 2), 4);
+ cast_big_endian_reader.ReadU16(&packet_id);
+ cast_big_endian_reader.ReadU16(&max_packet_id);
+
+ // rtp_timestamp is enough - no need for frame_id as well.
+ InsertPacketEvent(time_of_event,
+ event,
+ event_media_type,
+ rtp_timestamp,
+ kFrameIdUnknown,
+ packet_id,
+ max_packet_id,
+ packet.size());
}
-void LoggingImpl::InsertGenericEvent(CastLoggingEvent event, int value) {
- DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
- if (config_.enable_data_collection) {
- raw_.InsertGenericEvent(event, value);
- stats_.InsertGenericEvent(event, value);
- }
- if (config_.enable_uma_stats) {
- UMA_HISTOGRAM_COUNTS(CastLoggingToString(event), value);
- }
- if (config_.enable_tracing) {
- std::string event_string = CastLoggingToString(event);
- TRACE_EVENT_INSTANT1(event_string.c_str(), "GE",
- TRACE_EVENT_SCOPE_THREAD, "value", value);
- }
-}
-
-// should just get the entire class, would be much easier.
-FrameRawMap LoggingImpl::GetFrameRawData() {
- DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
- return raw_.GetFrameData();
-}
-
-PacketRawMap LoggingImpl::GetPacketRawData() {
- DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
- return raw_.GetPacketData();
-}
-
-GenericRawMap LoggingImpl::GetGenericRawData() {
- DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
- return raw_.GetGenericData();
-}
-
-const FrameStatsMap* LoggingImpl::GetFrameStatsData() {
- DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
- // Get stats data.
- const FrameStatsMap* stats = stats_.GetFrameStatsData();
- if (config_.enable_uma_stats) {
- FrameStatsMap::const_iterator it;
- for (it = stats->begin(); it != stats->end(); ++it) {
- // Check for an active event.
- if (it->second->framerate_fps > 0) {
- std::string event_string = CastLoggingToString(it->first);
- UMA_HISTOGRAM_COUNTS(event_string.append("_framerate_fps"),
- it->second->framerate_fps);
- } else {
- // All active frame events trigger framerate computation.
- continue;
- }
- if (it->second->bitrate_kbps > 0) {
- std::string evnt_string = CastLoggingToString(it->first);
- UMA_HISTOGRAM_COUNTS(evnt_string.append("_bitrate_kbps"),
- it->second->framerate_fps);
- }
- if (it->second->avg_delay_ms > 0) {
- std::string event_string = CastLoggingToString(it->first);
- UMA_HISTOGRAM_COUNTS(event_string.append("_avg_delay_ms"),
- it->second->avg_delay_ms);
- UMA_HISTOGRAM_COUNTS(event_string.append("_min_delay_ms"),
- it->second->min_delay_ms);
- UMA_HISTOGRAM_COUNTS(event_string.append("_max_delay_ms"),
- it->second->max_delay_ms);
- }
- }
+void LoggingImpl::InsertPacketListEvent(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
+ const PacketList& packets) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ for (PacketList::const_iterator it = packets.begin(); it != packets.end();
+ ++it) {
+ InsertSinglePacketEvent(time_of_event, event, event_media_type,
+ (*it)->data);
}
- return stats;
}
-const PacketStatsMap* LoggingImpl::GetPacketStatsData() {
- DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
- // Get stats data.
- const PacketStatsMap* stats = stats_.GetPacketStatsData();
- if (config_.enable_uma_stats) {
- PacketStatsMap::const_iterator it;
- for (it = stats->begin(); it != stats->end(); ++it) {
- if (it->second > 0) {
- std::string event_string = CastLoggingToString(it->first);
- UMA_HISTOGRAM_COUNTS(event_string.append("_bitrate_kbps"), it->second);
- }
- }
- }
- return stats;
+void LoggingImpl::InsertPacketEvent(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
+ uint32 rtp_timestamp, uint32 frame_id,
+ uint16 packet_id, uint16 max_packet_id,
+ size_t size) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ raw_.InsertPacketEvent(time_of_event, event, event_media_type,
+ rtp_timestamp, frame_id, packet_id, max_packet_id, size);
}
-const GenericStatsMap* LoggingImpl::GetGenericStatsData() {
- DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
- // Get stats data.
- const GenericStatsMap* stats = stats_.GetGenericStatsData();
- if (config_.enable_uma_stats) {
- GenericStatsMap::const_iterator it;
- for (it = stats->begin(); it != stats->end(); ++it) {
- if (it->second > 0) {
- UMA_HISTOGRAM_COUNTS(CastLoggingToString(it->first), it->second);
- }
- }
- }
- return stats;
+void LoggingImpl::AddRawEventSubscriber(RawEventSubscriber* subscriber) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ raw_.AddSubscriber(subscriber);
}
-void LoggingImpl::Reset() {
- DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
- raw_.Reset();
- stats_.Reset();
+void LoggingImpl::RemoveRawEventSubscriber(RawEventSubscriber* subscriber) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ raw_.RemoveSubscriber(subscriber);
}
} // namespace cast
diff --git a/chromium/media/cast/logging/logging_impl.h b/chromium/media/cast/logging/logging_impl.h
index 34021b7d03c..ba453c8c8ee 100644
--- a/chromium/media/cast/logging/logging_impl.h
+++ b/chromium/media/cast/logging/logging_impl.h
@@ -7,67 +7,66 @@
// Generic class that handles event logging for the cast library.
// Logging has three possible optional forms:
// 1. Raw data and stats accessible by the application.
-// 2. UMA stats.
-// 3. Tracing of raw events.
+// 2. Tracing of raw events.
#include "base/memory/ref_counted.h"
-#include "base/task_runner.h"
+#include "base/threading/thread_checker.h"
#include "media/cast/cast_config.h"
#include "media/cast/logging/logging_defines.h"
#include "media/cast/logging/logging_raw.h"
-#include "media/cast/logging/logging_stats.h"
namespace media {
namespace cast {
-// Should only be called from the main thread.
-class LoggingImpl : public base::NonThreadSafe {
+class LoggingImpl {
public:
- LoggingImpl(base::TickClock* clock,
- scoped_refptr<base::TaskRunner> main_thread_proxy,
- const CastLoggingConfig& config);
-
+ LoggingImpl();
~LoggingImpl();
- // TODO(pwestin): Add argument to API to send in time of event instead of
- // grabbing now.
- void InsertFrameEvent(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint32 frame_id);
- void InsertFrameEventWithSize(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint32 frame_id,
- int frame_size);
- void InsertFrameEventWithDelay(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint32 frame_id,
+ // Note: All methods below should be called from the same thread.
+
+ void InsertFrameEvent(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event, EventMediaType event_media_type,
+ uint32 rtp_timestamp, uint32 frame_id);
+
+ void InsertEncodedFrameEvent(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
+ uint32 rtp_timestamp, uint32 frame_id,
+ int frame_size, bool key_frame,
+ int target_bitrate);
+
+ void InsertFrameEventWithDelay(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
+ uint32 rtp_timestamp, uint32 frame_id,
base::TimeDelta delay);
- void InsertPacketListEvent(CastLoggingEvent event, const PacketList& packets);
-
- void InsertPacketEvent(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint32 frame_id,
- uint16 packet_id,
- uint16 max_packet_id,
- size_t size);
- void InsertGenericEvent(CastLoggingEvent event, int value);
-
- // Get raw data.
- FrameRawMap GetFrameRawData();
- PacketRawMap GetPacketRawData();
- GenericRawMap GetGenericRawData();
- // Get stats only (computed when called). Triggers UMA stats when enabled.
- const FrameStatsMap* GetFrameStatsData();
- const PacketStatsMap* GetPacketStatsData();
- const GenericStatsMap* GetGenericStatsData();
-
- void Reset();
+
+ void InsertSinglePacketEvent(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
+ const Packet& packet);
+
+ void InsertPacketListEvent(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
+ const PacketList& packets);
+
+ void InsertPacketEvent(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event,
+ EventMediaType event_media_type, uint32 rtp_timestamp,
+ uint32 frame_id, uint16 packet_id,
+ uint16 max_packet_id, size_t size);
+
+ // Delegates to |LoggingRaw::AddRawEventSubscriber()|.
+ void AddRawEventSubscriber(RawEventSubscriber* subscriber);
+
+ // Delegates to |LoggingRaw::RemoveRawEventSubscriber()|.
+ void RemoveRawEventSubscriber(RawEventSubscriber* subscriber);
private:
- scoped_refptr<base::TaskRunner> main_thread_proxy_;
- const CastLoggingConfig config_;
+ base::ThreadChecker thread_checker_;
LoggingRaw raw_;
- LoggingStats stats_;
DISALLOW_COPY_AND_ASSIGN(LoggingImpl);
};
diff --git a/chromium/media/cast/logging/logging_impl_unittest.cc b/chromium/media/cast/logging/logging_impl_unittest.cc
new file mode 100644
index 00000000000..712d76bae60
--- /dev/null
+++ b/chromium/media/cast/logging/logging_impl_unittest.cc
@@ -0,0 +1,234 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "base/rand_util.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/logging/logging_impl.h"
+#include "media/cast/logging/simple_event_subscriber.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace cast {
+
+// Insert frame duration- one second.
+const int64 kIntervalTime1S = 1;
+// Test frame rate goal - 30fps.
+const int kFrameIntervalMs = 33;
+
+static const int64 kStartMillisecond = INT64_C(12345678900000);
+
+class LoggingImplTest : public ::testing::Test {
+ protected:
+ LoggingImplTest() {
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ logging_.AddRawEventSubscriber(&event_subscriber_);
+ }
+
+ virtual ~LoggingImplTest() {
+ logging_.RemoveRawEventSubscriber(&event_subscriber_);
+ }
+
+ LoggingImpl logging_;
+ base::SimpleTestTickClock testing_clock_;
+ SimpleEventSubscriber event_subscriber_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoggingImplTest);
+};
+
+TEST_F(LoggingImplTest, BasicFrameLogging) {
+ base::TimeTicks start_time = testing_clock_.NowTicks();
+ base::TimeDelta time_interval = testing_clock_.NowTicks() - start_time;
+ uint32 rtp_timestamp = 0;
+ uint32 frame_id = 0;
+ base::TimeTicks now;
+ do {
+ now = testing_clock_.NowTicks();
+ logging_.InsertFrameEvent(
+ now, FRAME_CAPTURE_BEGIN, VIDEO_EVENT, rtp_timestamp, frame_id);
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kFrameIntervalMs));
+ rtp_timestamp += kFrameIntervalMs * 90;
+ ++frame_id;
+ time_interval = now - start_time;
+ } while (time_interval.InSeconds() < kIntervalTime1S);
+
+ // Get logging data.
+ std::vector<FrameEvent> frame_events;
+ event_subscriber_.GetFrameEventsAndReset(&frame_events);
+ // Size of vector should be equal to the number of events logged,
+ // which equals to number of frames in this case.
+ EXPECT_EQ(frame_id, frame_events.size());
+}
+
+TEST_F(LoggingImplTest, FrameLoggingWithSize) {
+ // Average packet size.
+ const int kBaseFrameSizeBytes = 25000;
+ const int kRandomSizeInterval = 100;
+ base::TimeTicks start_time = testing_clock_.NowTicks();
+ base::TimeDelta time_interval = testing_clock_.NowTicks() - start_time;
+ uint32 rtp_timestamp = 0;
+ uint32 frame_id = 0;
+ size_t sum_size = 0;
+ int target_bitrate = 1234;
+ do {
+ int size = kBaseFrameSizeBytes +
+ base::RandInt(-kRandomSizeInterval, kRandomSizeInterval);
+ sum_size += static_cast<size_t>(size);
+ logging_.InsertEncodedFrameEvent(testing_clock_.NowTicks(),
+ FRAME_ENCODED, VIDEO_EVENT, rtp_timestamp,
+ frame_id, size, true, target_bitrate);
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(kFrameIntervalMs));
+ rtp_timestamp += kFrameIntervalMs * 90;
+ ++frame_id;
+ time_interval = testing_clock_.NowTicks() - start_time;
+ } while (time_interval.InSeconds() < kIntervalTime1S);
+ // Get logging data.
+ std::vector<FrameEvent> frame_events;
+ event_subscriber_.GetFrameEventsAndReset(&frame_events);
+ // Size of vector should be equal to the number of events logged, which
+ // equals to number of frames in this case.
+ EXPECT_EQ(frame_id, frame_events.size());
+}
+
+TEST_F(LoggingImplTest, FrameLoggingWithDelay) {
+ // Average packet size.
+ const int kPlayoutDelayMs = 50;
+ const int kRandomSizeInterval = 20;
+ base::TimeTicks start_time = testing_clock_.NowTicks();
+ base::TimeDelta time_interval = testing_clock_.NowTicks() - start_time;
+ uint32 rtp_timestamp = 0;
+ uint32 frame_id = 0;
+ do {
+ int delay = kPlayoutDelayMs +
+ base::RandInt(-kRandomSizeInterval, kRandomSizeInterval);
+ logging_.InsertFrameEventWithDelay(
+ testing_clock_.NowTicks(),
+ FRAME_CAPTURE_BEGIN,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ frame_id,
+ base::TimeDelta::FromMilliseconds(delay));
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(kFrameIntervalMs));
+ rtp_timestamp += kFrameIntervalMs * 90;
+ ++frame_id;
+ time_interval = testing_clock_.NowTicks() - start_time;
+ } while (time_interval.InSeconds() < kIntervalTime1S);
+ // Get logging data.
+ std::vector<FrameEvent> frame_events;
+ event_subscriber_.GetFrameEventsAndReset(&frame_events);
+ // Size of vector should be equal to the number of frames logged.
+ EXPECT_EQ(frame_id, frame_events.size());
+}
+
+TEST_F(LoggingImplTest, MultipleEventFrameLogging) {
+ base::TimeTicks start_time = testing_clock_.NowTicks();
+ base::TimeDelta time_interval = testing_clock_.NowTicks() - start_time;
+ uint32 rtp_timestamp = 0u;
+ uint32 frame_id = 0u;
+ uint32 num_events = 0u;
+ do {
+ logging_.InsertFrameEvent(testing_clock_.NowTicks(),
+ FRAME_CAPTURE_END,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ frame_id);
+ ++num_events;
+ if (frame_id % 2) {
+ logging_.InsertEncodedFrameEvent(testing_clock_.NowTicks(),
+ FRAME_ENCODED, AUDIO_EVENT,
+ rtp_timestamp,
+ frame_id, 1500, true, 0);
+ } else if (frame_id % 3) {
+ logging_.InsertFrameEvent(testing_clock_.NowTicks(), FRAME_DECODED,
+ VIDEO_EVENT, rtp_timestamp, frame_id);
+ } else {
+ logging_.InsertFrameEventWithDelay(
+ testing_clock_.NowTicks(), FRAME_PLAYOUT, VIDEO_EVENT,
+ rtp_timestamp, frame_id, base::TimeDelta::FromMilliseconds(20));
+ }
+ ++num_events;
+
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(kFrameIntervalMs));
+ rtp_timestamp += kFrameIntervalMs * 90;
+ ++frame_id;
+ time_interval = testing_clock_.NowTicks() - start_time;
+ } while (time_interval.InSeconds() < kIntervalTime1S);
+ // Get logging data.
+ std::vector<FrameEvent> frame_events;
+ event_subscriber_.GetFrameEventsAndReset(&frame_events);
+ // Size of vector should be equal to the number of frames logged.
+ EXPECT_EQ(num_events, frame_events.size());
+ // Multiple events captured per frame.
+}
+
+TEST_F(LoggingImplTest, PacketLogging) {
+ const int kNumPacketsPerFrame = 10;
+ const int kBaseSize = 2500;
+ const int kSizeInterval = 100;
+ base::TimeTicks start_time = testing_clock_.NowTicks();
+ base::TimeTicks latest_time;
+ base::TimeDelta time_interval = testing_clock_.NowTicks() - start_time;
+ RtpTimestamp rtp_timestamp = 0;
+ int frame_id = 0;
+ int num_packets = 0;
+ int sum_size = 0u;
+ do {
+ for (int i = 0; i < kNumPacketsPerFrame; ++i) {
+ int size = kBaseSize + base::RandInt(-kSizeInterval, kSizeInterval);
+ sum_size += size;
+ latest_time = testing_clock_.NowTicks();
+ ++num_packets;
+ logging_.InsertPacketEvent(latest_time,
+ PACKET_RECEIVED,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ frame_id,
+ i,
+ kNumPacketsPerFrame,
+ size);
+ }
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(kFrameIntervalMs));
+ rtp_timestamp += kFrameIntervalMs * 90;
+ ++frame_id;
+ time_interval = testing_clock_.NowTicks() - start_time;
+ } while (time_interval.InSeconds() < kIntervalTime1S);
+ // Get logging data.
+ std::vector<PacketEvent> packet_events;
+ event_subscriber_.GetPacketEventsAndReset(&packet_events);
+ // Size of vector should be equal to the number of packets logged.
+ EXPECT_EQ(num_packets, static_cast<int>(packet_events.size()));
+}
+
+TEST_F(LoggingImplTest, MultipleRawEventSubscribers) {
+ SimpleEventSubscriber event_subscriber_2;
+
+ // Now logging_ has two subscribers.
+ logging_.AddRawEventSubscriber(&event_subscriber_2);
+
+ logging_.InsertFrameEvent(testing_clock_.NowTicks(),
+ FRAME_CAPTURE_BEGIN,
+ VIDEO_EVENT,
+ /*rtp_timestamp*/ 0u,
+ /*frame_id*/ 0u);
+
+ std::vector<FrameEvent> frame_events;
+ event_subscriber_.GetFrameEventsAndReset(&frame_events);
+ EXPECT_EQ(1u, frame_events.size());
+ frame_events.clear();
+ event_subscriber_2.GetFrameEventsAndReset(&frame_events);
+ EXPECT_EQ(1u, frame_events.size());
+
+ logging_.RemoveRawEventSubscriber(&event_subscriber_2);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/logging/logging_internal.cc b/chromium/media/cast/logging/logging_internal.cc
deleted file mode 100644
index ce2249ee4e0..00000000000
--- a/chromium/media/cast/logging/logging_internal.cc
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/logging/logging_internal.h"
-
-namespace media {
-namespace cast {
-
-FrameLogData::FrameLogData(base::TickClock* clock)
- : clock_(clock),
- frame_map_() {}
-
-FrameLogData::~FrameLogData() {}
-
-void FrameLogData::Insert(uint32 rtp_timestamp, uint32 frame_id) {
- FrameEvent info;
- InsertBase(rtp_timestamp, frame_id, info);
-}
-
-void FrameLogData::InsertWithSize(
- uint32 rtp_timestamp, uint32 frame_id, int size) {
- FrameEvent info;
- info.size = size;
- InsertBase(rtp_timestamp, frame_id, info);
-}
-
-void FrameLogData::InsertWithDelay(
- uint32 rtp_timestamp, uint32 frame_id, base::TimeDelta delay) {
- FrameEvent info;
- info.delay_delta = delay;
- InsertBase(rtp_timestamp, frame_id, info);
-}
-
-void FrameLogData::InsertBase(
- uint32 rtp_timestamp, uint32 frame_id, FrameEvent info) {
- info.timestamp = clock_->NowTicks();
- info.frame_id = frame_id;
- frame_map_.insert(std::make_pair(rtp_timestamp, info));
-}
-
-PacketLogData::PacketLogData(base::TickClock* clock)
- : clock_(clock),
- packet_map_() {}
-
-PacketLogData::~PacketLogData() {}
-
-void PacketLogData::Insert(uint32 rtp_timestamp,
- uint32 frame_id, uint16 packet_id, uint16 max_packet_id, int size) {
- PacketEvent info;
- info.size = size;
- info.max_packet_id = max_packet_id;
- info.frame_id = frame_id;
- info.timestamp = clock_->NowTicks();
- // Is this a new frame?
- PacketMap::iterator it = packet_map_.find(rtp_timestamp);
- if (it == packet_map_.end()) {
- // New rtp_timestamp id - create base packet map.
- BasePacketMap base_map;
- base_map.insert(std::make_pair(packet_id, info));
- packet_map_.insert(std::make_pair(rtp_timestamp, base_map));
- } else {
- // Existing rtp_timestamp.
- it->second.insert(std::make_pair(packet_id, info));
- }
-}
-
-GenericLogData::GenericLogData(base::TickClock* clock)
- : clock_(clock) {}
-
-GenericLogData::~GenericLogData() {}
-
-void GenericLogData::Insert(int data) {
- data_.push_back(data);
- timestamp_.push_back(clock_->NowTicks());
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/logging/logging_internal.h b/chromium/media/cast/logging/logging_internal.h
deleted file mode 100644
index 6f028b925fe..00000000000
--- a/chromium/media/cast/logging/logging_internal.h
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_LOGGING_LOGGING_INTERNAL_H_
-#define MEDIA_CAST_LOGGING_LOGGING_INTERNAL_H_
-
-#include <map>
-#include <string>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/time/tick_clock.h"
-#include "base/time/time.h"
-
-namespace media {
-namespace cast {
-
-// TODO(mikhal): Consider storing only the delta time and not absolute time.
-struct FrameEvent {
- uint32 frame_id;
- int size;
- base::TimeTicks timestamp;
- base::TimeDelta delay_delta; // render/playout delay.
-};
-
-struct PacketEvent {
- uint32 frame_id;
- int max_packet_id;
- size_t size;
- base::TimeTicks timestamp;
-};
-
-// Frame and packet maps are sorted based on the rtp_timestamp.
-typedef std::map<uint32, FrameEvent> FrameMap;
-typedef std::map<uint16, PacketEvent> BasePacketMap;
-typedef std::map<uint32, BasePacketMap> PacketMap;
-
-class FrameLogData {
- public:
- explicit FrameLogData(base::TickClock* clock);
- ~FrameLogData();
- void Insert(uint32 rtp_timestamp, uint32 frame_id);
- // Include size for encoded images (compute bitrate),
- void InsertWithSize(uint32 rtp_timestamp, uint32 frame_id, int size);
- // Include playout/render delay info.
- void InsertWithDelay(
- uint32 rtp_timestamp, uint32 frame_id, base::TimeDelta delay);
- void Reset();
-
- private:
- void InsertBase(uint32 rtp_timestamp, uint32 frame_id, FrameEvent info);
-
- base::TickClock* const clock_; // Not owned by this class.
- FrameMap frame_map_;
-
- DISALLOW_COPY_AND_ASSIGN(FrameLogData);
-};
-
-// TODO(mikhal): Should be able to handle packet bursts.
-class PacketLogData {
- public:
- explicit PacketLogData(base::TickClock* clock);
- ~PacketLogData();
- void Insert(uint32 rtp_timestamp, uint32 frame_id, uint16 packet_id,
- uint16 max_packet_id, int size);
- void Reset();
-
- private:
- base::TickClock* const clock_; // Not owned by this class.
- PacketMap packet_map_;
-
- DISALLOW_COPY_AND_ASSIGN(PacketLogData);
-};
-
-class GenericLogData {
- public:
- explicit GenericLogData(base::TickClock* clock);
- ~GenericLogData();
- void Insert(int value);
- void Reset();
-
- private:
- base::TickClock* const clock_; // Not owned by this class.
- std::vector<int> data_;
- std::vector<base::TimeTicks> timestamp_;
-
- DISALLOW_COPY_AND_ASSIGN(GenericLogData);
-};
-
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_LOGGING_LOGGING_INTERNAL_H_
diff --git a/chromium/media/cast/logging/logging_raw.cc b/chromium/media/cast/logging/logging_raw.cc
index 6a389617f62..229064d7b69 100644
--- a/chromium/media/cast/logging/logging_raw.cc
+++ b/chromium/media/cast/logging/logging_raw.cc
@@ -4,139 +4,110 @@
#include "media/cast/logging/logging_raw.h"
+#include <algorithm>
+
#include "base/logging.h"
-#include "base/metrics/histogram.h"
#include "base/time/time.h"
namespace media {
namespace cast {
-LoggingRaw::LoggingRaw(base::TickClock* clock)
- : clock_(clock),
- frame_map_(),
- packet_map_(),
- generic_map_(),
- weak_factory_(this) {}
+LoggingRaw::LoggingRaw() {}
LoggingRaw::~LoggingRaw() {}
-void LoggingRaw::InsertFrameEvent(CastLoggingEvent event,
+void LoggingRaw::InsertFrameEvent(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
uint32 rtp_timestamp,
uint32 frame_id) {
- InsertBaseFrameEvent(event, frame_id, rtp_timestamp);
+ InsertBaseFrameEvent(time_of_event, event, event_media_type, frame_id,
+ rtp_timestamp, base::TimeDelta(), 0, false, 0);
}
-void LoggingRaw::InsertFrameEventWithSize(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint32 frame_id,
- int size) {
- InsertBaseFrameEvent(event, frame_id, rtp_timestamp);
- // Now insert size.
- FrameRawMap::iterator it = frame_map_.find(rtp_timestamp);
- DCHECK(it != frame_map_.end());
- it->second.size = size;
+void LoggingRaw::InsertEncodedFrameEvent(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
+ uint32 rtp_timestamp, uint32 frame_id,
+ int size, bool key_frame,
+ int target_bitrate) {
+ InsertBaseFrameEvent(time_of_event, event, event_media_type,
+ frame_id, rtp_timestamp, base::TimeDelta(), size,
+ key_frame, target_bitrate);
}
-void LoggingRaw::InsertFrameEventWithDelay(CastLoggingEvent event,
+void LoggingRaw::InsertFrameEventWithDelay(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
uint32 rtp_timestamp,
uint32 frame_id,
base::TimeDelta delay) {
- InsertBaseFrameEvent(event, frame_id, rtp_timestamp);
- // Now insert delay.
- FrameRawMap::iterator it = frame_map_.find(rtp_timestamp);
- DCHECK(it != frame_map_.end());
- it->second.delay_delta = delay;
+ InsertBaseFrameEvent(time_of_event, event, event_media_type, frame_id,
+ rtp_timestamp, delay, 0, false, 0);
}
-void LoggingRaw::InsertBaseFrameEvent(CastLoggingEvent event,
+void LoggingRaw::InsertBaseFrameEvent(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
uint32 frame_id,
- uint32 rtp_timestamp) {
- // Is this a new event?
- FrameRawMap::iterator it = frame_map_.find(rtp_timestamp);
- if (it == frame_map_.end()) {
- // Create a new map entry.
- FrameEvent info;
- info.frame_id = frame_id;
- info.timestamp.push_back(clock_->NowTicks());
- info.type.push_back(event);
- frame_map_.insert(std::make_pair(rtp_timestamp, info));
- } else {
- // Insert to an existing entry.
- it->second.timestamp.push_back(clock_->NowTicks());
- it->second.type.push_back(event);
- // Do we have a valid frame_id?
- // Not all events have a valid frame id.
- if (it->second.frame_id == kFrameIdUnknown && frame_id != kFrameIdUnknown)
- it->second.frame_id = frame_id;
+ uint32 rtp_timestamp,
+ base::TimeDelta delay, int size,
+ bool key_frame, int target_bitrate) {
+ FrameEvent frame_event;
+ frame_event.rtp_timestamp = rtp_timestamp;
+ frame_event.frame_id = frame_id;
+ frame_event.size = size;
+ frame_event.timestamp = time_of_event;
+ frame_event.type = event;
+ frame_event.media_type = event_media_type;
+ frame_event.delay_delta = delay;
+ frame_event.key_frame = key_frame;
+ frame_event.target_bitrate = target_bitrate;
+ for (std::vector<RawEventSubscriber*>::const_iterator it =
+ subscribers_.begin();
+ it != subscribers_.end(); ++it) {
+ (*it)->OnReceiveFrameEvent(frame_event);
}
}
-void LoggingRaw::InsertPacketEvent(CastLoggingEvent event,
+void LoggingRaw::InsertPacketEvent(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
uint32 rtp_timestamp,
- uint32 frame_id,
- uint16 packet_id,
- uint16 max_packet_id,
- size_t size) {
- // Is this packet belonging to a new frame?
- PacketRawMap::iterator it = packet_map_.find(rtp_timestamp);
- if (it == packet_map_.end()) {
- // Create a new entry - start with base packet map.
- PacketEvent info;
- info.frame_id = frame_id;
- info.max_packet_id = max_packet_id;
- BasePacketInfo base_info;
- base_info.size = size;
- base_info.timestamp.push_back(clock_->NowTicks());
- base_info.type.push_back(event);
- packet_map_.insert(std::make_pair(rtp_timestamp, info));
- } else {
- // Is this a new packet?
- BasePacketMap::iterator packet_it = it->second.packet_map.find(packet_id);
- if (packet_it == it->second.packet_map.end()) {
- BasePacketInfo base_info;
- base_info.size = size;
- base_info.timestamp.push_back(clock_->NowTicks());
- base_info.type.push_back(event);
- it->second.packet_map.insert(std::make_pair(packet_id, base_info));
- } else {
- packet_it->second.timestamp.push_back(clock_->NowTicks());
- packet_it->second.type.push_back(event);
- }
- }
-}
-
-void LoggingRaw::InsertGenericEvent(CastLoggingEvent event, int value) {
- GenericEvent event_data;
- event_data.value.push_back(value);
- event_data.timestamp.push_back(clock_->NowTicks());
- // Is this a new event?
- GenericRawMap::iterator it = generic_map_.find(event);
- if (it == generic_map_.end()) {
- // Create new entry.
- generic_map_.insert(std::make_pair(event, event_data));
- } else {
- // Insert to existing entry.
- it->second.value.push_back(value);
- it->second.timestamp.push_back(clock_->NowTicks());
+ uint32 frame_id, uint16 packet_id,
+ uint16 max_packet_id, size_t size) {
+ PacketEvent packet_event;
+ packet_event.rtp_timestamp = rtp_timestamp;
+ packet_event.frame_id = frame_id;
+ packet_event.max_packet_id = max_packet_id;
+ packet_event.packet_id = packet_id;
+ packet_event.size = size;
+ packet_event.timestamp = time_of_event;
+ packet_event.type = event;
+ packet_event.media_type = event_media_type;
+ for (std::vector<RawEventSubscriber*>::const_iterator it =
+ subscribers_.begin();
+ it != subscribers_.end(); ++it) {
+ (*it)->OnReceivePacketEvent(packet_event);
}
}
-FrameRawMap LoggingRaw::GetFrameData() const {
- return frame_map_;
-}
+void LoggingRaw::AddSubscriber(RawEventSubscriber* subscriber) {
+ DCHECK(subscriber);
+ DCHECK(std::find(subscribers_.begin(), subscribers_.end(), subscriber) ==
+ subscribers_.end());
-PacketRawMap LoggingRaw::GetPacketData() const {
- return packet_map_;
+ subscribers_.push_back(subscriber);
}
-GenericRawMap LoggingRaw::GetGenericData() const {
- return generic_map_;
-}
+void LoggingRaw::RemoveSubscriber(RawEventSubscriber* subscriber) {
+ DCHECK(subscriber);
+ DCHECK(std::find(subscribers_.begin(), subscribers_.end(), subscriber) !=
+ subscribers_.end());
-void LoggingRaw::Reset() {
- frame_map_.clear();
- packet_map_.clear();
- generic_map_.clear();
+ subscribers_.erase(
+ std::remove(subscribers_.begin(), subscribers_.end(), subscriber),
+ subscribers_.end());
}
} // namespace cast
diff --git a/chromium/media/cast/logging/logging_raw.h b/chromium/media/cast/logging/logging_raw.h
index 4ac8d0fb7ad..8ed4a599602 100644
--- a/chromium/media/cast/logging/logging_raw.h
+++ b/chromium/media/cast/logging/logging_raw.h
@@ -5,75 +5,80 @@
#ifndef MEDIA_CAST_LOGGING_LOGGING_RAW_H_
#define MEDIA_CAST_LOGGING_LOGGING_RAW_H_
-#include <map>
-#include <string>
#include <vector>
#include "base/basictypes.h"
#include "base/memory/linked_ptr.h"
-#include "base/memory/weak_ptr.h"
#include "base/threading/non_thread_safe.h"
#include "base/time/tick_clock.h"
#include "media/cast/logging/logging_defines.h"
+#include "media/cast/logging/raw_event_subscriber.h"
namespace media {
namespace cast {
// This class is not thread safe, and should only be called from the main
// thread.
-class LoggingRaw : public base::NonThreadSafe,
- public base::SupportsWeakPtr<LoggingRaw> {
+class LoggingRaw : public base::NonThreadSafe {
public:
- explicit LoggingRaw(base::TickClock* clock);
+ LoggingRaw();
~LoggingRaw();
- // Inform of new event: three types of events: frame, packets and generic.
+ // Inform of new event: two types of events: frame and packet.
// Frame events can be inserted with different parameters.
- void InsertFrameEvent(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint32 frame_id);
-
- // Size - Inserting the size implies that this is an encoded frame.
- void InsertFrameEventWithSize(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint32 frame_id,
- int frame_size);
+ void InsertFrameEvent(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event, EventMediaType event_media_type,
+ uint32 rtp_timestamp, uint32 frame_id);
+
+ // This function is only applicable for FRAME_ENCODED event.
+ // |size| - Size of encoded frame.
+ // |key_frame| - Whether the frame is a key frame. This field is only
+ // applicable for video event.
+ // |target_bitrate| - The target bitrate of the encoder the time the frame
+ // was encoded. Only applicable for video event.
+ void InsertEncodedFrameEvent(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
+ uint32 rtp_timestamp, uint32 frame_id,
+ int size, bool key_frame,
+ int target_bitrate);
// Render/playout delay
- void InsertFrameEventWithDelay(CastLoggingEvent event,
+ // This function is only applicable for FRAME_PLAYOUT event.
+ void InsertFrameEventWithDelay(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
uint32 rtp_timestamp,
- uint32 frame_id,
- base::TimeDelta delay);
+ uint32 frame_id, base::TimeDelta delay);
// Insert a packet event.
- void InsertPacketEvent(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint32 frame_id,
- uint16 packet_id,
- uint16 max_packet_id,
- size_t size);
-
- void InsertGenericEvent(CastLoggingEvent event, int value);
-
- // Get raw log data.
- FrameRawMap GetFrameData() const;
- PacketRawMap GetPacketData() const;
- GenericRawMap GetGenericData() const;
-
-
- // Reset all log data.
- void Reset();
+ void InsertPacketEvent(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event,
+ EventMediaType event_media_type, uint32 rtp_timestamp,
+ uint32 frame_id, uint16 packet_id,
+ uint16 max_packet_id, size_t size);
+
+ // Adds |subscriber| so that it will start receiving events on main thread.
+ // Note that this class does not own |subscriber|.
+ // It is a no-op to add a subscriber that already exists.
+ void AddSubscriber(RawEventSubscriber* subscriber);
+
+ // Removes |subscriber| so that it will stop receiving events.
+ // Note that this class does NOT own the subscribers. This function MUST be
+ // called before |subscriber| is destroyed if it was previously added.
+ // It is a no-op to remove a subscriber that doesn't exist.
+ void RemoveSubscriber(RawEventSubscriber* subscriber);
private:
- void InsertBaseFrameEvent(CastLoggingEvent event,
- uint32 frame_id,
- uint32 rtp_timestamp);
+ void InsertBaseFrameEvent(const base::TimeTicks& time_of_event,
+ CastLoggingEvent event,
+ EventMediaType event_media_type,
+ uint32 frame_id, uint32 rtp_timestamp,
+ base::TimeDelta delay, int size, bool key_frame,
+ int target_bitrate);
- base::TickClock* const clock_; // Not owned by this class.
- FrameRawMap frame_map_;
- PacketRawMap packet_map_;
- GenericRawMap generic_map_;
- base::WeakPtrFactory<LoggingRaw> weak_factory_;
+ // List of subscriber pointers. This class does not own the subscribers.
+ std::vector<RawEventSubscriber*> subscribers_;
DISALLOW_COPY_AND_ASSIGN(LoggingRaw);
};
@@ -82,4 +87,3 @@ class LoggingRaw : public base::NonThreadSafe,
} // namespace media
#endif // MEDIA_CAST_LOGGING_LOGGING_RAW_H_
-
diff --git a/chromium/media/cast/logging/logging_raw_unittest.cc b/chromium/media/cast/logging/logging_raw_unittest.cc
new file mode 100644
index 00000000000..0b7c05aaac1
--- /dev/null
+++ b/chromium/media/cast/logging/logging_raw_unittest.cc
@@ -0,0 +1,196 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/logging/logging_raw.h"
+#include "media/cast/logging/simple_event_subscriber.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace cast {
+
+class LoggingRawTest : public ::testing::Test {
+ protected:
+ LoggingRawTest() {
+ raw_.AddSubscriber(&event_subscriber_);
+ }
+
+ virtual ~LoggingRawTest() { raw_.RemoveSubscriber(&event_subscriber_); }
+
+ LoggingRaw raw_;
+ SimpleEventSubscriber event_subscriber_;
+ std::vector<FrameEvent> frame_events_;
+ std::vector<PacketEvent> packet_events_;
+};
+
+TEST_F(LoggingRawTest, FrameEvent) {
+ CastLoggingEvent event_type = FRAME_DECODED;
+ EventMediaType media_type = VIDEO_EVENT;
+ uint32 frame_id = 456u;
+ RtpTimestamp rtp_timestamp = 123u;
+ base::TimeTicks timestamp = base::TimeTicks();
+ raw_.InsertFrameEvent(timestamp, event_type, media_type,
+ rtp_timestamp, frame_id);
+
+ event_subscriber_.GetPacketEventsAndReset(&packet_events_);
+ EXPECT_TRUE(packet_events_.empty());
+
+ event_subscriber_.GetFrameEventsAndReset(&frame_events_);
+ ASSERT_EQ(1u, frame_events_.size());
+ EXPECT_EQ(rtp_timestamp, frame_events_[0].rtp_timestamp);
+ EXPECT_EQ(frame_id, frame_events_[0].frame_id);
+ EXPECT_EQ(0u, frame_events_[0].size);
+ EXPECT_EQ(timestamp, frame_events_[0].timestamp);
+ EXPECT_EQ(event_type, frame_events_[0].type);
+ EXPECT_EQ(media_type, frame_events_[0].media_type);
+ EXPECT_EQ(base::TimeDelta(), frame_events_[0].delay_delta);
+}
+
+TEST_F(LoggingRawTest, EncodedFrameEvent) {
+ CastLoggingEvent event_type = FRAME_ENCODED;
+ EventMediaType media_type = VIDEO_EVENT;
+ uint32 frame_id = 456u;
+ RtpTimestamp rtp_timestamp = 123u;
+ base::TimeTicks timestamp = base::TimeTicks();
+ int size = 1024;
+ bool key_frame = true;
+ int target_bitrate = 4096;
+ raw_.InsertEncodedFrameEvent(timestamp, event_type, media_type,
+ rtp_timestamp, frame_id, size, key_frame, target_bitrate);
+
+ event_subscriber_.GetPacketEventsAndReset(&packet_events_);
+ EXPECT_TRUE(packet_events_.empty());
+
+ event_subscriber_.GetFrameEventsAndReset(&frame_events_);
+ ASSERT_EQ(1u, frame_events_.size());
+ EXPECT_EQ(rtp_timestamp, frame_events_[0].rtp_timestamp);
+ EXPECT_EQ(frame_id, frame_events_[0].frame_id);
+ EXPECT_EQ(size, static_cast<int>(frame_events_[0].size));
+ EXPECT_EQ(timestamp, frame_events_[0].timestamp);
+ EXPECT_EQ(event_type, frame_events_[0].type);
+ EXPECT_EQ(media_type, frame_events_[0].media_type);
+ EXPECT_EQ(base::TimeDelta(), frame_events_[0].delay_delta);
+ EXPECT_EQ(key_frame, frame_events_[0].key_frame);
+ EXPECT_EQ(target_bitrate, frame_events_[0].target_bitrate);
+}
+
+TEST_F(LoggingRawTest, FrameEventWithDelay) {
+ CastLoggingEvent event_type = FRAME_PLAYOUT;
+ EventMediaType media_type = VIDEO_EVENT;
+ uint32 frame_id = 456u;
+ RtpTimestamp rtp_timestamp = 123u;
+ base::TimeTicks timestamp = base::TimeTicks();
+ base::TimeDelta delay = base::TimeDelta::FromMilliseconds(20);
+ raw_.InsertFrameEventWithDelay(timestamp, event_type, media_type,
+ rtp_timestamp, frame_id, delay);
+
+ event_subscriber_.GetPacketEventsAndReset(&packet_events_);
+ EXPECT_TRUE(packet_events_.empty());
+
+ event_subscriber_.GetFrameEventsAndReset(&frame_events_);
+ ASSERT_EQ(1u, frame_events_.size());
+ EXPECT_EQ(rtp_timestamp, frame_events_[0].rtp_timestamp);
+ EXPECT_EQ(frame_id, frame_events_[0].frame_id);
+ EXPECT_EQ(0u, frame_events_[0].size);
+ EXPECT_EQ(timestamp, frame_events_[0].timestamp);
+ EXPECT_EQ(event_type, frame_events_[0].type);
+ EXPECT_EQ(media_type, frame_events_[0].media_type);
+ EXPECT_EQ(delay, frame_events_[0].delay_delta);
+}
+
+TEST_F(LoggingRawTest, PacketEvent) {
+ CastLoggingEvent event_type = PACKET_RECEIVED;
+ EventMediaType media_type = VIDEO_EVENT;
+ uint32 frame_id = 456u;
+ uint16 packet_id = 1u;
+ uint16 max_packet_id = 10u;
+ RtpTimestamp rtp_timestamp = 123u;
+ base::TimeTicks timestamp = base::TimeTicks();
+ size_t size = 1024u;
+ raw_.InsertPacketEvent(timestamp, event_type, media_type,
+ rtp_timestamp, frame_id, packet_id, max_packet_id, size);
+
+ event_subscriber_.GetFrameEventsAndReset(&frame_events_);
+ EXPECT_TRUE(frame_events_.empty());
+
+ event_subscriber_.GetPacketEventsAndReset(&packet_events_);
+ ASSERT_EQ(1u, packet_events_.size());
+
+ EXPECT_EQ(rtp_timestamp, packet_events_[0].rtp_timestamp);
+ EXPECT_EQ(frame_id, packet_events_[0].frame_id);
+ EXPECT_EQ(max_packet_id, packet_events_[0].max_packet_id);
+ EXPECT_EQ(packet_id, packet_events_[0].packet_id);
+ EXPECT_EQ(size, packet_events_[0].size);
+ EXPECT_EQ(timestamp, packet_events_[0].timestamp);
+ EXPECT_EQ(event_type, packet_events_[0].type);
+ EXPECT_EQ(media_type, packet_events_[0].media_type);
+}
+
+TEST_F(LoggingRawTest, MultipleSubscribers) {
+ SimpleEventSubscriber event_subscriber_2;
+
+ // Now raw_ has two subscribers.
+ raw_.AddSubscriber(&event_subscriber_2);
+
+ CastLoggingEvent event_type = FRAME_DECODED;
+ EventMediaType media_type = VIDEO_EVENT;
+ uint32 frame_id = 456u;
+ RtpTimestamp rtp_timestamp = 123u;
+ base::TimeTicks timestamp = base::TimeTicks();
+ raw_.InsertFrameEvent(timestamp, event_type, media_type,
+ rtp_timestamp, frame_id);
+
+ event_subscriber_.GetPacketEventsAndReset(&packet_events_);
+ EXPECT_TRUE(packet_events_.empty());
+
+ event_subscriber_.GetFrameEventsAndReset(&frame_events_);
+ ASSERT_EQ(1u, frame_events_.size());
+ EXPECT_EQ(rtp_timestamp, frame_events_[0].rtp_timestamp);
+ EXPECT_EQ(frame_id, frame_events_[0].frame_id);
+ EXPECT_EQ(0u, frame_events_[0].size);
+ EXPECT_EQ(timestamp, frame_events_[0].timestamp);
+ EXPECT_EQ(event_type, frame_events_[0].type);
+ EXPECT_EQ(media_type, frame_events_[0].media_type);
+ EXPECT_EQ(base::TimeDelta(), frame_events_[0].delay_delta);
+
+ event_subscriber_2.GetPacketEventsAndReset(&packet_events_);
+ EXPECT_TRUE(packet_events_.empty());
+
+ event_subscriber_2.GetFrameEventsAndReset(&frame_events_);
+ ASSERT_EQ(1u, frame_events_.size());
+ EXPECT_EQ(rtp_timestamp, frame_events_[0].rtp_timestamp);
+ EXPECT_EQ(frame_id, frame_events_[0].frame_id);
+ EXPECT_EQ(0u, frame_events_[0].size);
+ EXPECT_EQ(timestamp, frame_events_[0].timestamp);
+ EXPECT_EQ(event_type, frame_events_[0].type);
+ EXPECT_EQ(media_type, frame_events_[0].media_type);
+ EXPECT_EQ(base::TimeDelta(), frame_events_[0].delay_delta);
+
+ // Remove event_subscriber_2, so it shouldn't receive events after this.
+ raw_.RemoveSubscriber(&event_subscriber_2);
+
+ media_type = AUDIO_EVENT;
+ frame_id = 789;
+ rtp_timestamp = 456;
+ timestamp = base::TimeTicks();
+ raw_.InsertFrameEvent(timestamp, event_type, media_type,
+ rtp_timestamp, frame_id);
+
+ // |event_subscriber_| should still receive events.
+ event_subscriber_.GetFrameEventsAndReset(&frame_events_);
+ ASSERT_EQ(1u, frame_events_.size());
+ EXPECT_EQ(rtp_timestamp, frame_events_[0].rtp_timestamp);
+ EXPECT_EQ(frame_id, frame_events_[0].frame_id);
+ EXPECT_EQ(0u, frame_events_[0].size);
+ EXPECT_EQ(timestamp, frame_events_[0].timestamp);
+ EXPECT_EQ(event_type, frame_events_[0].type);
+ EXPECT_EQ(media_type, frame_events_[0].media_type);
+ EXPECT_EQ(base::TimeDelta(), frame_events_[0].delay_delta);
+
+ event_subscriber_2.GetFrameEventsAndReset(&frame_events_);
+ EXPECT_TRUE(frame_events_.empty());
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/logging/logging_stats.cc b/chromium/media/cast/logging/logging_stats.cc
deleted file mode 100644
index 84fdbf7a615..00000000000
--- a/chromium/media/cast/logging/logging_stats.cc
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/linked_ptr.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/cast/logging/logging_stats.h"
-
-namespace media {
-namespace cast {
-
-LoggingStats::LoggingStats(base::TickClock* clock)
- : frame_stats_(),
- packet_stats_(),
- generic_stats_(),
- start_time_(),
- clock_(clock) {
- memset(counts_, 0, sizeof(counts_));
- memset(start_time_, 0, sizeof(start_time_));
-}
-
-LoggingStats::~LoggingStats() {}
-
-void LoggingStats::Reset() {
- frame_stats_.clear();
- packet_stats_.clear();
- generic_stats_.clear();
- memset(counts_, 0, sizeof(counts_));
-}
-
-void LoggingStats::InsertFrameEvent(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint32 frame_id) {
- InsertBaseFrameEvent(event, frame_id, rtp_timestamp);
-}
-
-void LoggingStats::InsertFrameEventWithSize(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint32 frame_id,
- int frame_size) {
- InsertBaseFrameEvent(event, frame_id, rtp_timestamp);
- // Update size.
- FrameStatsMap::iterator it = frame_stats_.find(event);
- DCHECK(it != frame_stats_.end());
- it->second->bitrate_kbps += frame_size;
-}
-
-void LoggingStats::InsertFrameEventWithDelay(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint32 frame_id,
- base::TimeDelta delay) {
- InsertBaseFrameEvent(event, frame_id, rtp_timestamp);
- // Update size.
- FrameStatsMap::iterator it = frame_stats_.find(event);
- DCHECK(it != frame_stats_.end());
- // Using the average delay as a counter, will divide by the counter when
- // triggered.
- it->second->avg_delay_ms += delay.InMilliseconds();
- if (delay.InMilliseconds() > it->second->max_delay_ms)
- it->second->max_delay_ms = delay.InMilliseconds();
- if ((delay.InMilliseconds() < it->second->min_delay_ms) ||
- (counts_[event] == 1) )
- it->second->min_delay_ms = delay.InMilliseconds();
-}
-
-void LoggingStats::InsertBaseFrameEvent(CastLoggingEvent event,
- uint32 frame_id,
- uint32 rtp_timestamp) {
- // Does this belong to an existing event?
- FrameStatsMap::iterator it = frame_stats_.find(event);
- if (it == frame_stats_.end()) {
- // New event.
- start_time_[event] = clock_->NowTicks();
- linked_ptr<FrameLogStats> stats(new FrameLogStats());
- frame_stats_.insert(std::make_pair(event, stats));
- }
-
- ++counts_[event];
-}
-
-void LoggingStats::InsertPacketEvent(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint32 frame_id,
- uint16 packet_id,
- uint16 max_packet_id,
- size_t size) {
- // Does this packet belong to an existing event?
- PacketStatsMap::iterator it = packet_stats_.find(event);
- if (it == packet_stats_.end()) {
- // New event.
- start_time_[event] = clock_->NowTicks();
- packet_stats_.insert(std::make_pair(event, size));
- } else {
- // Add to existing.
- it->second += size;
- }
- ++counts_[event];
-}
-
-void LoggingStats::InsertGenericEvent(CastLoggingEvent event, int value) {
- // Does this event belong to an existing event?
- GenericStatsMap::iterator it = generic_stats_.find(event);
- if (it == generic_stats_.end()) {
- // New event.
- start_time_[event] = clock_->NowTicks();
- generic_stats_.insert(std::make_pair(event, value));
- } else {
- // Add to existing (will be used to compute average).
- it->second += value;
- }
- ++counts_[event];
-}
-
-const FrameStatsMap* LoggingStats::GetFrameStatsData() {
- // Compute framerate and bitrate (when available).
- FrameStatsMap::iterator it;
- for (it = frame_stats_.begin(); it != frame_stats_.end(); ++it) {
- base::TimeDelta time_diff = clock_->NowTicks() - start_time_[it->first];
- it->second->framerate_fps = counts_[it->first] / time_diff.InSecondsF();
- if (it->second->bitrate_kbps > 0) {
- it->second->bitrate_kbps = (8 / 1000) *
- it->second->bitrate_kbps / time_diff.InSecondsF();
- }
- if (it->second->avg_delay_ms > 0)
- it->second->avg_delay_ms /= counts_[it->first];
- }
- return &frame_stats_;
-}
-
-const PacketStatsMap* LoggingStats::GetPacketStatsData() {
- PacketStatsMap::iterator it;
- for (it = packet_stats_.begin(); it != packet_stats_.end(); ++it) {
- if (counts_[it->first] == 0) continue;
- base::TimeDelta time_diff = clock_->NowTicks() - start_time_[it->first];
- it->second = (8 / 1000) * it->second / time_diff.InSecondsF();
- }
- return &packet_stats_;
-}
-
-const GenericStatsMap* LoggingStats::GetGenericStatsData() {
- // Compute averages.
- GenericStatsMap::iterator it;
- for (it = generic_stats_.begin(); it != generic_stats_.end(); ++it) {
- it->second /= counts_[ it->first];
- }
- return &generic_stats_;
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/logging/logging_stats.h b/chromium/media/cast/logging/logging_stats.h
deleted file mode 100644
index f08649cc777..00000000000
--- a/chromium/media/cast/logging/logging_stats.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_LOGGING_LOGGING_STATS_H_
-#define MEDIA_CAST_LOGGING_LOGGING_STATS_H_
-
-#include "base/basictypes.h"
-#include "base/time/tick_clock.h"
-#include "base/time/time.h"
-#include "media/cast/logging/logging_defines.h"
-
-namespace media {
-namespace cast {
-
-class LoggingStats {
- public:
- explicit LoggingStats(base::TickClock* clock);
-
- ~LoggingStats();
-
- void Reset();
-
- void InsertFrameEvent(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint32 frame_id);
-
- void InsertFrameEventWithSize(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint32 frame_id,
- int frame_size);
-
- void InsertFrameEventWithDelay(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint32 frame_id,
- base::TimeDelta delay);
-
- void InsertPacketEvent(CastLoggingEvent event,
- uint32 rtp_timestamp,
- uint32 frame_id,
- uint16 packet_id,
- uint16 max_packet_id,
- size_t size);
-
- void InsertGenericEvent(CastLoggingEvent event, int value);
-
- // Get log stats: some of the values, such as frame rate and bit rates are
- // computed at the time of the call.
- const FrameStatsMap* GetFrameStatsData();
-
- const PacketStatsMap* GetPacketStatsData();
-
- const GenericStatsMap* GetGenericStatsData();
-
- private:
- void InsertBaseFrameEvent(CastLoggingEvent event,
- uint32 frame_id,
- uint32 rtp_timestamp);
- FrameStatsMap frame_stats_;
- PacketStatsMap packet_stats_;
- GenericStatsMap generic_stats_;
- // Every event has an individual start time
- base::TimeTicks start_time_[kNumOfLoggingEvents];
- // Keep track of event counts.
- int counts_[kNumOfLoggingEvents];
- base::TickClock* const clock_; // Not owned by this class.
-
- DISALLOW_COPY_AND_ASSIGN(LoggingStats);
- };
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_LOGGING_LOGGING_STATS_H_
-
diff --git a/chromium/media/cast/logging/logging_unittest.cc b/chromium/media/cast/logging/logging_unittest.cc
deleted file mode 100644
index 5ce760ec4c7..00000000000
--- a/chromium/media/cast/logging/logging_unittest.cc
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <gtest/gtest.h>
-
-#include "base/rand_util.h"
-#include "base/test/simple_test_tick_clock.h"
-#include "base/time/tick_clock.h"
-#include "base/time/time.h"
-#include "media/cast/logging/logging_impl.h"
-
-
-namespace media {
-namespace cast {
-
- // Insert frame duration- one second.
-const int64 kIntervalTime1S = 1;
-// Test frame rate goal - 30fps.
-const int kFrameIntervalMs = 33;
-
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
-
-class TestLogging : public ::testing::Test {
- protected:
- TestLogging()
- // Enable logging, disable tracing and uma.
- : logging_(&testing_clock_, true, false, false) {
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kStartMillisecond));
- }
-
- virtual ~TestLogging() {}
-
- LoggingImpl logging_;
- base::SimpleTestTickClock testing_clock_;
-};
-
-TEST_F(TestLogging, BasicFrameLogging) {
- base::TimeTicks start_time = testing_clock_.NowTicks();
- base::TimeDelta time_interval = testing_clock_.NowTicks() - start_time;
- uint32 rtp_timestamp = 0;
- uint32 frame_id = 0;
- do {
- logging_.InsertFrameEvent(kAudioFrameCaptured, rtp_timestamp, frame_id);
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kFrameIntervalMs));
- rtp_timestamp += kFrameIntervalMs * 90;
- ++frame_id;
- time_interval = testing_clock_.NowTicks() - start_time;
- } while (time_interval.InSeconds() < kIntervalTime1S);
- // Get logging data.
- FrameRawMap frame_map = logging_.GetFrameRawData();
- // Size of map should be equal to the number of frames logged.
- EXPECT_EQ(frame_id, frame_map.size());
- // Verify stats.
- const FrameStatsMap* frame_stats = logging_.GetFrameStatsData();
- // Size of stats equals the number of events.
- EXPECT_EQ(1u, frame_stats->size());
- FrameStatsMap::const_iterator it = frame_stats->find(kAudioFrameCaptured);
- EXPECT_TRUE(it != frame_stats->end());
- EXPECT_NEAR(30.3, it->second->framerate_fps, 0.1);
- EXPECT_EQ(0, it->second->bitrate_kbps);
- EXPECT_EQ(0, it->second->max_delay_ms);
- EXPECT_EQ(0, it->second->min_delay_ms);
- EXPECT_EQ(0, it->second->avg_delay_ms);
-}
-
-TEST_F(TestLogging, FrameLoggingWithSize) {
- // Average packet size.
- const int kBaseFrameSizeBytes = 25000;
- const int kRandomSizeInterval = 100;
- base::TimeTicks start_time = testing_clock_.NowTicks();
- base::TimeDelta time_interval = testing_clock_.NowTicks() - start_time;
- uint32 rtp_timestamp = 0;
- uint32 frame_id = 0;
- do {
- int size = kBaseFrameSizeBytes +
- base::RandInt(-kRandomSizeInterval, kRandomSizeInterval);
- logging_.InsertFrameEventWithSize(
- kAudioFrameCaptured, rtp_timestamp, frame_id, size);
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kFrameIntervalMs));
- rtp_timestamp += kFrameIntervalMs * 90;
- ++frame_id;
- time_interval = testing_clock_.NowTicks() - start_time;
- } while (time_interval.InSeconds() < kIntervalTime1S);
- // Get logging data.
- FrameRawMap frame_map = logging_.GetFrameRawData();
- // Size of map should be equal to the number of frames logged.
- EXPECT_EQ(frame_id, frame_map.size());
- // Verify stats.
- const FrameStatsMap* frame_stats = logging_.GetFrameStatsData();
- // Size of stats equals the number of events.
- EXPECT_EQ(1u, frame_stats->size());
- FrameStatsMap::const_iterator it = frame_stats->find(kAudioFrameCaptured);
- EXPECT_TRUE(it != frame_stats->end());
- EXPECT_NEAR(30.3, it->second->framerate_fps, 0.1);
- EXPECT_NEAR(8 * kBaseFrameSizeBytes / (kFrameIntervalMs * 1000),
- it->second->bitrate_kbps, kRandomSizeInterval);
- EXPECT_EQ(0, it->second->max_delay_ms);
- EXPECT_EQ(0, it->second->min_delay_ms);
- EXPECT_EQ(0, it->second->avg_delay_ms);
-}
-
-TEST_F(TestLogging, FrameLoggingWithDelay) {
- // Average packet size.
- const int kPlayoutDelayMs = 50;
- const int kRandomSizeInterval = 20;
- base::TimeTicks start_time = testing_clock_.NowTicks();
- base::TimeDelta time_interval = testing_clock_.NowTicks() - start_time;
- uint32 rtp_timestamp = 0;
- uint32 frame_id = 0;
- do {
- int delay = kPlayoutDelayMs +
- base::RandInt(-kRandomSizeInterval, kRandomSizeInterval);
- logging_.InsertFrameEventWithDelay(
- kAudioFrameCaptured, rtp_timestamp, frame_id,
- base::TimeDelta::FromMilliseconds(delay));
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kFrameIntervalMs));
- rtp_timestamp += kFrameIntervalMs * 90;
- ++frame_id;
- time_interval = testing_clock_.NowTicks() - start_time;
- } while (time_interval.InSeconds() < kIntervalTime1S);
- // Get logging data.
- FrameRawMap frame_map = logging_.GetFrameRawData();
- // Size of map should be equal to the number of frames logged.
- EXPECT_EQ(frame_id, frame_map.size());
- // Verify stats.
- const FrameStatsMap* frame_stats = logging_.GetFrameStatsData();
- // Size of stats equals the number of events.
- EXPECT_EQ(1u, frame_stats->size());
- FrameStatsMap::const_iterator it = frame_stats->find(kAudioFrameCaptured);
- EXPECT_TRUE(it != frame_stats->end());
- EXPECT_NEAR(30.3, it->second->framerate_fps, 0.1);
- EXPECT_EQ(0, it->second->bitrate_kbps);
- EXPECT_GE(kPlayoutDelayMs + kRandomSizeInterval, it->second->max_delay_ms);
- EXPECT_LE(kPlayoutDelayMs - kRandomSizeInterval, it->second->min_delay_ms);
- EXPECT_NEAR(kPlayoutDelayMs, it->second->avg_delay_ms,
- 0.2 * kRandomSizeInterval);
-}
-
-TEST_F(TestLogging, MultipleEventFrameLogging) {
- base::TimeTicks start_time = testing_clock_.NowTicks();
- base::TimeDelta time_interval = testing_clock_.NowTicks() - start_time;
- uint32 rtp_timestamp = 0;
- uint32 frame_id = 0;
- do {
- logging_.InsertFrameEvent(kAudioFrameCaptured, rtp_timestamp, frame_id);
- if (frame_id % 2) {
- logging_.InsertFrameEventWithSize(
- kAudioFrameEncoded, rtp_timestamp, frame_id, 1500);
- } else if (frame_id % 3) {
- logging_.InsertFrameEvent(kVideoFrameDecoded, rtp_timestamp, frame_id);
- } else {
- logging_.InsertFrameEventWithDelay(
- kVideoRenderDelay, rtp_timestamp, frame_id,
- base::TimeDelta::FromMilliseconds(20));
- }
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kFrameIntervalMs));
- rtp_timestamp += kFrameIntervalMs * 90;
- ++frame_id;
- time_interval = testing_clock_.NowTicks() - start_time;
- } while (time_interval.InSeconds() < kIntervalTime1S);
- // Get logging data.
- FrameRawMap frame_map = logging_.GetFrameRawData();
- // Size of map should be equal to the number of frames logged.
- EXPECT_EQ(frame_id, frame_map.size());
- // Multiple events captured per frame.
-}
-
-TEST_F(TestLogging, PacketLogging) {
- const int kNumPacketsPerFrame = 10;
- const int kBaseSize = 2500;
- const int kSizeInterval = 100;
- base::TimeTicks start_time = testing_clock_.NowTicks();
- base::TimeDelta time_interval = testing_clock_.NowTicks() - start_time;
- uint32 rtp_timestamp = 0;
- uint32 frame_id = 0;
- do {
- for (int i = 0; i < kNumPacketsPerFrame; ++i) {
- int size = kBaseSize + base::RandInt(-kSizeInterval, kSizeInterval);
- logging_.InsertPacketEvent(kPacketSentToPacer, rtp_timestamp, frame_id,
- i, kNumPacketsPerFrame, size);
- }
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kFrameIntervalMs));
- rtp_timestamp += kFrameIntervalMs * 90;
- ++frame_id;
- time_interval = testing_clock_.NowTicks() - start_time;
- } while (time_interval.InSeconds() < kIntervalTime1S);
- // Get logging data.
- PacketRawMap raw_map = logging_.GetPacketRawData();
- // Size of map should be equal to the number of frames logged.
- EXPECT_EQ(frame_id, raw_map.size());
- // Verify stats.
- const PacketStatsMap* stats_map = logging_.GetPacketStatsData();
- // Size of stats equals the number of events.
- EXPECT_EQ(1u, stats_map->size());
- PacketStatsMap::const_iterator it = stats_map->find(kPacketSentToPacer);
- EXPECT_TRUE(it != stats_map->end());
- // We only store the bitrate as a packet statistic.
- EXPECT_NEAR(8 * kNumPacketsPerFrame * kBaseSize / (kFrameIntervalMs * 1000),
- it->second, kSizeInterval);
-}
-
-TEST_F(TestLogging, GenericLogging) {
- // Insert multiple generic types.
- const int kNumRuns = 1000;
- const int kBaseValue = 20;
- for (int i = 0; i < kNumRuns; ++i) {
- int value = kBaseValue + base::RandInt(-5, 5);
- logging_.InsertGenericEvent(kRtt, value);
- if (i % 2) {
- logging_.InsertGenericEvent(kPacketLoss, value);
- }
- if (!(i % 4)) {
- logging_.InsertGenericEvent(kJitter, value);
- }
- }
- GenericRawMap raw_map = logging_.GetGenericRawData();
- const GenericStatsMap* stats_map = logging_.GetGenericStatsData();
- // Size of generic map = number of different events.
- EXPECT_EQ(3u, raw_map.size());
- EXPECT_EQ(3u, stats_map->size());
- // Raw events - size of internal map = number of calls.
- GenericRawMap::iterator rit = raw_map.find(kRtt);
- EXPECT_EQ(kNumRuns, rit->second.value.size());
- EXPECT_EQ(kNumRuns, rit->second.timestamp.size());
- rit = raw_map.find(kPacketLoss);
- EXPECT_EQ(kNumRuns / 2, rit->second.value.size());
- EXPECT_EQ(kNumRuns / 2, rit->second.timestamp.size());
- rit = raw_map.find(kJitter);
- EXPECT_EQ(kNumRuns / 4, rit->second.value.size());
- EXPECT_EQ(kNumRuns / 4, rit->second.timestamp.size());
- // Stats - one value per event.
- GenericStatsMap::const_iterator sit = stats_map->find(kRtt);
- EXPECT_NEAR(kBaseValue, sit->second, 2.5);
- sit = stats_map->find(kPacketLoss);
- EXPECT_NEAR(kBaseValue, sit->second, 2.5);
- sit = stats_map->find(kJitter);
- EXPECT_NEAR(kBaseValue, sit->second, 2.5);
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/logging/proto/proto_utils.cc b/chromium/media/cast/logging/proto/proto_utils.cc
new file mode 100644
index 00000000000..03251e64c03
--- /dev/null
+++ b/chromium/media/cast/logging/proto/proto_utils.cc
@@ -0,0 +1,36 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/logging/proto/proto_utils.h"
+
+#include "base/logging.h"
+
+#define TO_PROTO_ENUM(enum) \
+ case enum: \
+ return proto::enum
+
+namespace media {
+namespace cast {
+
+proto::EventType ToProtoEventType(CastLoggingEvent event) {
+ switch (event) {
+ TO_PROTO_ENUM(UNKNOWN);
+ TO_PROTO_ENUM(FRAME_CAPTURE_BEGIN);
+ TO_PROTO_ENUM(FRAME_CAPTURE_END);
+ TO_PROTO_ENUM(FRAME_ENCODED);
+ TO_PROTO_ENUM(FRAME_ACK_RECEIVED);
+ TO_PROTO_ENUM(FRAME_ACK_SENT);
+ TO_PROTO_ENUM(FRAME_DECODED);
+ TO_PROTO_ENUM(FRAME_PLAYOUT);
+ TO_PROTO_ENUM(PACKET_SENT_TO_NETWORK);
+ TO_PROTO_ENUM(PACKET_RETRANSMITTED);
+ TO_PROTO_ENUM(PACKET_RTX_REJECTED);
+ TO_PROTO_ENUM(PACKET_RECEIVED);
+ }
+ NOTREACHED();
+ return proto::UNKNOWN;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/logging/proto/proto_utils.h b/chromium/media/cast/logging/proto/proto_utils.h
new file mode 100644
index 00000000000..51232fdb964
--- /dev/null
+++ b/chromium/media/cast/logging/proto/proto_utils.h
@@ -0,0 +1,21 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_LOGGING_PROTO_PROTO_UTILS_H_
+#define MEDIA_CAST_LOGGING_PROTO_PROTO_UTILS_H_
+
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/logging/proto/raw_events.pb.h"
+
+// Utility functions for cast logging protos.
+namespace media {
+namespace cast {
+
+// Converts |event| to a corresponding value in |media::cast::proto::EventType|.
+media::cast::proto::EventType ToProtoEventType(CastLoggingEvent event);
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_LOGGING_PROTO_PROTO_UTILS_H_
diff --git a/chromium/media/cast/logging/proto/raw_events.proto b/chromium/media/cast/logging/proto/raw_events.proto
new file mode 100644
index 00000000000..1d2c537db8f
--- /dev/null
+++ b/chromium/media/cast/logging/proto/raw_events.proto
@@ -0,0 +1,149 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Protocol for audio messages.
+
+syntax = "proto2";
+
+option optimize_for = LITE_RUNTIME;
+
+package media.cast.proto;
+
+// Keep in sync with media/cast/logging/logging_defines.h.
+// For compatibility reasons, existing values in this enum must not be changed.
+enum EventType {
+ UNKNOWN = 0;
+
+ // Note: 1-28 are deprecated in favor of unified event types. Do not use.
+ // Generic events. No longer used.
+ RTT_MS = 1;
+ PACKET_LOSS = 2;
+ JITTER_MS = 3;
+ VIDEO_ACK_RECEIVED = 4; // Sender side frame event.
+ REMB_BITRATE = 5; // Generic event. No longer used.
+ // Audio receiver.
+ AUDIO_ACK_SENT = 6;
+ // Video receiver.
+ VIDEO_ACK_SENT = 7;
+ // Audio sender.
+ AUDIO_FRAME_CAPTURE_END = 8;
+ AUDIO_FRAME_CAPTURE_BEGIN = 9;
+ AUDIO_FRAME_ENCODED = 10;
+ // Audio receiver.
+ AUDIO_PLAYOUT_DELAY = 11;
+ AUDIO_FRAME_DECODED = 12;
+ // Video sender.
+ VIDEO_FRAME_CAPTURE_BEGIN = 13;
+ VIDEO_FRAME_CAPTURE_END = 14;
+ VIDEO_FRAME_SENT_TO_ENCODER = 15; // Deprecated
+ VIDEO_FRAME_ENCODED = 16;
+ // Video receiver.
+ VIDEO_FRAME_DECODED = 17;
+ VIDEO_RENDER_DELAY = 18;
+ // Send-side packet events.
+ // AUDIO_PACKET_SENT_TO_PACER = 19; // Deprecated
+ // VIDEO_PACKET_SENT_TO_PACER = 20; // Deprecated
+ AUDIO_PACKET_SENT_TO_NETWORK = 21;
+ VIDEO_PACKET_SENT_TO_NETWORK = 22;
+ AUDIO_PACKET_RETRANSMITTED = 23;
+ VIDEO_PACKET_RETRANSMITTED = 24;
+ // Receiver-side packet events.
+ AUDIO_PACKET_RECEIVED = 25;
+ VIDEO_PACKET_RECEIVED = 26;
+ DUPLICATE_AUDIO_PACKET_RECEIVED = 27;
+ DUPLICATE_VIDEO_PACKET_RECEIVED = 28;
+
+
+ // New, unified event types.
+ FRAME_CAPTURE_BEGIN = 29;
+ FRAME_CAPTURE_END = 30;
+ FRAME_ENCODED = 31;
+ FRAME_ACK_RECEIVED = 32;
+ FRAME_ACK_SENT = 33;
+ FRAME_DECODED = 34;
+ FRAME_PLAYOUT = 35;
+ PACKET_SENT_TO_NETWORK = 36;
+ PACKET_RETRANSMITTED = 37;
+ PACKET_RECEIVED = 38;
+ PACKET_RTX_REJECTED = 39;
+}
+
+// Contains information independent of the stream that describes the system
+// setup, e.g. OS and hardware info.
+message GeneralDescription {
+ optional string product = 1;
+ optional string product_version = 2;
+ optional string os = 3;
+}
+
+// Each log will contain one |LogMetadata|.
+message LogMetadata {
+ // |true| if the events are related to audio. |false| if they are related to
+ // video.
+ optional bool is_audio = 1;
+
+ // Used as a reference for all event entries.
+ // i.e. the original RTP timestamp for each event will be
+ // |first_rtp_timestamp| + |relative_rtp_timestamp|.
+ optional uint32 first_rtp_timestamp = 2;
+
+ // Number of AggregatedFrameEvent's.
+ optional int32 num_frame_events = 3;
+
+ // Number of AggregatedPacketEvent's.
+ optional int32 num_packet_events = 4;
+
+ // The internal timestamp value in milliseconds that represents the time
+ // of the Unix epoch. This is used for relating the timestamps in the events
+ // to a real time and date.
+ optional int64 reference_timestamp_ms_at_unix_epoch = 5;
+
+ // Extra data to attach to the log, e.g. experiment tags,
+ // in key-value JSON string format. The data is supplied by the application.
+ optional string extra_data = 6;
+
+ optional GeneralDescription general_description = 7;
+}
+
+message AggregatedFrameEvent {
+ optional uint32 relative_rtp_timestamp = 1;
+
+ repeated EventType event_type = 2 [packed = true];
+
+ // The internal timestamp value in milliseconds. Use
+ // LogMetadata.reference_timestamp_ms_at_unix_epoch to relate to a real time
+ // and date.
+ repeated int64 event_timestamp_ms = 3 [packed = true];
+
+ // Only set if there is a frame encoded event.
+ optional int32 encoded_frame_size = 4;
+
+ // Only set if there is a frame playout event.
+ optional int32 delay_millis = 5;
+
+ // Only set if there is a video frame encoded event.
+ optional bool key_frame = 6;
+
+ // Only set if there is a video frame encoded event.
+ optional int32 target_bitrate = 7;
+};
+
+message BasePacketEvent {
+ optional int32 packet_id = 1;
+ repeated EventType event_type = 2 [packed = true];
+
+ // The internal timestamp value in milliseconds. Use
+ // LogMetadata.reference_timestamp_ms_at_unix_epoch to relate to a real time
+ // and date.
+ repeated int64 event_timestamp_ms = 3 [packed = true];
+
+ // Size of the packet.
+ optional int32 size = 4;
+}
+
+message AggregatedPacketEvent {
+ optional uint32 relative_rtp_timestamp = 1;
+ repeated BasePacketEvent base_packet_event = 2;
+};
+
diff --git a/chromium/media/cast/logging/raw_event_subscriber.h b/chromium/media/cast/logging/raw_event_subscriber.h
new file mode 100644
index 00000000000..b8ebe8c0cde
--- /dev/null
+++ b/chromium/media/cast/logging/raw_event_subscriber.h
@@ -0,0 +1,32 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_LOGGING_RAW_EVENT_SUBSCRIBER_H_
+#define MEDIA_CAST_LOGGING_RAW_EVENT_SUBSCRIBER_H_
+
+#include "media/cast/logging/logging_defines.h"
+
+namespace media {
+namespace cast {
+
+// A subscriber interface to subscribe to cast raw event logs.
+// Those who wish to subscribe to raw event logs must implement this interface,
+// and call LoggingImpl::AddRawEventSubscriber() with the subscriber, in order
+// to start receiving raw event logs.
+class RawEventSubscriber {
+ public:
+ virtual ~RawEventSubscriber() {}
+
+ // Called on main thread when a FrameEvent, given by |frame_event|, is logged.
+ virtual void OnReceiveFrameEvent(const FrameEvent& frame_event) = 0;
+
+ // Called on main thread when a PacketEvent, given by |packet_event|,
+ // is logged.
+ virtual void OnReceivePacketEvent(const PacketEvent& packet_event) = 0;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_LOGGING_RAW_EVENT_SUBSCRIBER_H_
diff --git a/chromium/media/cast/logging/raw_event_subscriber_bundle.cc b/chromium/media/cast/logging/raw_event_subscriber_bundle.cc
new file mode 100644
index 00000000000..1946b6ce82a
--- /dev/null
+++ b/chromium/media/cast/logging/raw_event_subscriber_bundle.cc
@@ -0,0 +1,99 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/logging/raw_event_subscriber_bundle.h"
+
+#include "media/cast/cast_environment.h"
+#include "media/cast/logging/receiver_time_offset_estimator_impl.h"
+
+namespace media {
+namespace cast {
+
+RawEventSubscriberBundleForStream::RawEventSubscriberBundleForStream(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ bool is_audio,
+ ReceiverTimeOffsetEstimator* offset_estimator)
+ : cast_environment_(cast_environment),
+ event_subscriber_(
+ is_audio ? AUDIO_EVENT : VIDEO_EVENT,
+ is_audio ? kMaxAudioEventEntries : kMaxVideoEventEntries),
+ stats_subscriber_(
+ is_audio ? AUDIO_EVENT : VIDEO_EVENT,
+ cast_environment->Clock(), offset_estimator) {
+ cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber_);
+ cast_environment_->Logging()->AddRawEventSubscriber(&stats_subscriber_);
+}
+
+RawEventSubscriberBundleForStream::~RawEventSubscriberBundleForStream() {
+ cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber_);
+ cast_environment_->Logging()->RemoveRawEventSubscriber(&stats_subscriber_);
+}
+
+EncodingEventSubscriber*
+RawEventSubscriberBundleForStream::GetEncodingEventSubscriber() {
+ return &event_subscriber_;
+}
+
+StatsEventSubscriber*
+RawEventSubscriberBundleForStream::GetStatsEventSubscriber() {
+ return &stats_subscriber_;
+}
+
+RawEventSubscriberBundle::RawEventSubscriberBundle(
+ const scoped_refptr<CastEnvironment>& cast_environment)
+ : cast_environment_(cast_environment) {}
+
+RawEventSubscriberBundle::~RawEventSubscriberBundle() {
+ if (receiver_offset_estimator_.get()) {
+ cast_environment_->Logging()->RemoveRawEventSubscriber(
+ receiver_offset_estimator_.get());
+ }
+}
+
+void RawEventSubscriberBundle::AddEventSubscribers(bool is_audio) {
+ if (!receiver_offset_estimator_.get()) {
+ receiver_offset_estimator_.reset(
+ new ReceiverTimeOffsetEstimatorImpl);
+ cast_environment_->Logging()->AddRawEventSubscriber(
+ receiver_offset_estimator_.get());
+ }
+ SubscribersMapByStream::iterator it = subscribers_.find(is_audio);
+ if (it != subscribers_.end())
+ return;
+
+ subscribers_.insert(std::make_pair(
+ is_audio,
+ make_linked_ptr(new RawEventSubscriberBundleForStream(
+ cast_environment_, is_audio, receiver_offset_estimator_.get()))));
+}
+
+void RawEventSubscriberBundle::RemoveEventSubscribers(bool is_audio) {
+ SubscribersMapByStream::iterator it = subscribers_.find(is_audio);
+ if (it == subscribers_.end())
+ return;
+
+ subscribers_.erase(it);
+ if (subscribers_.empty()) {
+ cast_environment_->Logging()->RemoveRawEventSubscriber(
+ receiver_offset_estimator_.get());
+ receiver_offset_estimator_.reset();
+ }
+}
+
+EncodingEventSubscriber*
+RawEventSubscriberBundle::GetEncodingEventSubscriber(bool is_audio) {
+ SubscribersMapByStream::iterator it = subscribers_.find(is_audio);
+ return it == subscribers_.end() ?
+ NULL : it->second->GetEncodingEventSubscriber();
+}
+
+StatsEventSubscriber*
+RawEventSubscriberBundle::GetStatsEventSubscriber(bool is_audio) {
+ SubscribersMapByStream::iterator it = subscribers_.find(is_audio);
+ return it == subscribers_.end() ?
+ NULL : it->second->GetStatsEventSubscriber();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/logging/raw_event_subscriber_bundle.h b/chromium/media/cast/logging/raw_event_subscriber_bundle.h
new file mode 100644
index 00000000000..58ab21e6e8a
--- /dev/null
+++ b/chromium/media/cast/logging/raw_event_subscriber_bundle.h
@@ -0,0 +1,84 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_LOGGING_RAW_EVENT_SUBSCRIBER_BUNDLE_H_
+#define MEDIA_CAST_LOGGING_RAW_EVENT_SUBSCRIBER_BUNDLE_H_
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "media/cast/logging/encoding_event_subscriber.h"
+#include "media/cast/logging/stats_event_subscriber.h"
+
+namespace media {
+namespace cast {
+
+class CastEnvironment;
+class ReceiverTimeOffsetEstimator;
+
+// Allow 9MB for serialized video / audio event logs.
+const int kMaxSerializedBytes = 9000000;
+
+// Assume serialized log data for each frame will take up to 150 bytes.
+const int kMaxVideoEventEntries = kMaxSerializedBytes / 150;
+
+// Assume serialized log data for each frame will take up to 75 bytes.
+const int kMaxAudioEventEntries = kMaxSerializedBytes / 75;
+
+// A bundle for raw event subscribers for a single stream.
+// It contains an EncodingEventSubscriber and a StatsSubscriber.
+class RawEventSubscriberBundleForStream {
+ public:
+ RawEventSubscriberBundleForStream(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ bool is_audio,
+ ReceiverTimeOffsetEstimator* offset_estimator);
+ ~RawEventSubscriberBundleForStream();
+
+ EncodingEventSubscriber* GetEncodingEventSubscriber();
+ StatsEventSubscriber* GetStatsEventSubscriber();
+
+ private:
+ const scoped_refptr<CastEnvironment> cast_environment_;
+ EncodingEventSubscriber event_subscriber_;
+ StatsEventSubscriber stats_subscriber_;
+
+ DISALLOW_COPY_AND_ASSIGN(RawEventSubscriberBundleForStream);
+};
+
+// A bundle of subscribers for all streams. An instance of this object
+// is associated with a CastEnvironment.
+// This class can be used for managing event subscribers
+// in a session where they could be multiple streams (i.e. CastSessionDelegate).
+// It also contains a ReceiverTimeOffsetEstimator that is shared by subscribers
+// of different streams.
+class RawEventSubscriberBundle {
+ public:
+ explicit RawEventSubscriberBundle(
+ const scoped_refptr<CastEnvironment>& cast_environment);
+ ~RawEventSubscriberBundle();
+
+ void AddEventSubscribers(bool is_audio);
+ void RemoveEventSubscribers(bool is_audio);
+ EncodingEventSubscriber* GetEncodingEventSubscriber(
+ bool is_audio);
+ StatsEventSubscriber* GetStatsEventSubscriber(bool is_audio);
+
+ private:
+ // Map from (is_audio) -> RawEventSubscriberBundleForStream.
+ // TODO(imcheng): This works because we only have 1 audio and 1 video stream.
+ // This needs to scale better.
+ typedef std::map<bool, linked_ptr<RawEventSubscriberBundleForStream> >
+ SubscribersMapByStream;
+ const scoped_refptr<CastEnvironment> cast_environment_;
+ SubscribersMapByStream subscribers_;
+ scoped_ptr<ReceiverTimeOffsetEstimator> receiver_offset_estimator_;
+
+ DISALLOW_COPY_AND_ASSIGN(RawEventSubscriberBundle);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_LOGGING_RAW_EVENT_SUBSCRIBER_BUNDLE_H_
+
diff --git a/chromium/media/cast/logging/receiver_time_offset_estimator.h b/chromium/media/cast/logging/receiver_time_offset_estimator.h
new file mode 100644
index 00000000000..5880a8d5ac3
--- /dev/null
+++ b/chromium/media/cast/logging/receiver_time_offset_estimator.h
@@ -0,0 +1,39 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_LOGGING_RECEIVER_TIME_OFFSET_ESTIMATOR_H_
+#define MEDIA_CAST_LOGGING_RECEIVER_TIME_OFFSET_ESTIMATOR_H_
+
+#include "base/time/time.h"
+#include "media/cast/logging/raw_event_subscriber.h"
+
+namespace media {
+namespace cast {
+
+// Estimates receiver time offset based on raw events received.
+// In most cases, the sender and receiver run on different time lines.
+// In order to convert receiver time back to sender time (or vice versa)
+// a certain time offset has to be applied.
+// An implementation of this interface listens to raw events to figure out
+// the bounds for the offset value (assuming the true offset value is constant
+// over the lifetime of a cast session).
+// The offset values provided here should be used as follows:
+// - Convert from sender to receiver time: add offset value to sender timestamp.
+// - Convert from receiver to sender time: subtract offset value from receiver
+// timestamp.
+class ReceiverTimeOffsetEstimator : public RawEventSubscriber {
+ public:
+ virtual ~ReceiverTimeOffsetEstimator() {}
+
+ // If bounds are known, assigns |lower_bound| and |upper_bound| with the
+ // lower bound and upper bound for the offset value, respectively.
+ // Returns true if bounds are known.
+ virtual bool GetReceiverOffsetBounds(base::TimeDelta* lower_bound,
+ base::TimeDelta* upper_bound) = 0;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_LOGGING_RECEIVER_TIME_OFFSET_ESTIMATOR_H_
diff --git a/chromium/media/cast/logging/receiver_time_offset_estimator_impl.cc b/chromium/media/cast/logging/receiver_time_offset_estimator_impl.cc
new file mode 100644
index 00000000000..44d5eb0b3d7
--- /dev/null
+++ b/chromium/media/cast/logging/receiver_time_offset_estimator_impl.cc
@@ -0,0 +1,129 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+#include <utility>
+
+#include "base/logging.h"
+#include "media/cast/logging/receiver_time_offset_estimator_impl.h"
+
+namespace media {
+namespace cast {
+
+// This should be large enough so that we can collect all 3 events before
+// the entry gets removed from the map.
+const size_t kMaxEventTimesMapSize = 100;
+
+ReceiverTimeOffsetEstimatorImpl::ReceiverTimeOffsetEstimatorImpl()
+ : bounded_(false) {}
+
+ReceiverTimeOffsetEstimatorImpl::~ReceiverTimeOffsetEstimatorImpl() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+}
+
+void ReceiverTimeOffsetEstimatorImpl::OnReceiveFrameEvent(
+ const FrameEvent& frame_event) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (frame_event.media_type != VIDEO_EVENT)
+ return;
+
+ CastLoggingEvent event = frame_event.type;
+ if (event != FRAME_ENCODED && event != FRAME_ACK_SENT &&
+ event != FRAME_ACK_RECEIVED)
+ return;
+
+ EventTimesMap::iterator it = event_times_map_.find(frame_event.rtp_timestamp);
+ if (it == event_times_map_.end()) {
+ EventTimes event_times;
+ it = event_times_map_.insert(std::make_pair(frame_event.rtp_timestamp,
+ event_times)).first;
+ }
+ switch (event) {
+ case FRAME_ENCODED:
+ // Encode is supposed to happen only once. If we see duplicate event,
+ // throw away the entry.
+ if (it->second.event_a_time.is_null()) {
+ it->second.event_a_time = frame_event.timestamp;
+ } else {
+ event_times_map_.erase(it);
+ return;
+ }
+ break;
+ case FRAME_ACK_SENT:
+ if (it->second.event_b_time.is_null()) {
+ it->second.event_b_time = frame_event.timestamp;
+ } else if (it->second.event_b_time != frame_event.timestamp) {
+ // Duplicate ack sent events are normal due to RTCP redundancy,
+ // but they must have the same event timestamp.
+ event_times_map_.erase(it);
+ return;
+ }
+ break;
+ case FRAME_ACK_RECEIVED:
+ // If there are duplicate ack received events, pick the one with the
+ // smallest event timestamp so we can get a better bound.
+ if (it->second.event_c_time.is_null()) {
+ it->second.event_c_time = frame_event.timestamp;
+ } else {
+ it->second.event_c_time =
+ std::min(frame_event.timestamp, it->second.event_c_time);
+ }
+ break;
+ default:
+ NOTREACHED();
+ }
+
+ if (!it->second.event_a_time.is_null() &&
+ !it->second.event_b_time.is_null() &&
+ !it->second.event_c_time.is_null()) {
+ UpdateOffsetBounds(it->second);
+ event_times_map_.erase(it);
+ }
+
+ // Keep the map size at most |kMaxEventTimesMapSize|.
+ if (event_times_map_.size() > kMaxEventTimesMapSize)
+ event_times_map_.erase(event_times_map_.begin());
+}
+
+bool ReceiverTimeOffsetEstimatorImpl::GetReceiverOffsetBounds(
+ base::TimeDelta* lower_bound,
+ base::TimeDelta* upper_bound) {
+ if (!bounded_)
+ return false;
+
+ *lower_bound = offset_lower_bound_;
+ *upper_bound = offset_upper_bound_;
+ return true;
+}
+
+void ReceiverTimeOffsetEstimatorImpl::OnReceivePacketEvent(
+ const PacketEvent& packet_event) {
+ // Not interested in packet events.
+ DCHECK(thread_checker_.CalledOnValidThread());
+}
+
+void ReceiverTimeOffsetEstimatorImpl::UpdateOffsetBounds(
+ const EventTimes& event) {
+ base::TimeDelta lower_bound = event.event_b_time - event.event_c_time;
+ base::TimeDelta upper_bound = event.event_b_time - event.event_a_time;
+
+ if (bounded_) {
+ lower_bound = std::max(lower_bound, offset_lower_bound_);
+ upper_bound = std::min(upper_bound, offset_upper_bound_);
+ }
+
+ if (lower_bound > upper_bound) {
+ VLOG(2) << "Got bogus offset bound values [" << lower_bound.InMilliseconds()
+ << ", " << upper_bound.InMilliseconds() << "].";
+ return;
+ }
+
+ offset_lower_bound_ = lower_bound;
+ offset_upper_bound_ = upper_bound;
+ bounded_ = true;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/logging/receiver_time_offset_estimator_impl.h b/chromium/media/cast/logging/receiver_time_offset_estimator_impl.h
new file mode 100644
index 00000000000..1d0f6c8357f
--- /dev/null
+++ b/chromium/media/cast/logging/receiver_time_offset_estimator_impl.h
@@ -0,0 +1,64 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_LOGGING_RECEIVER_TIME_OFFSET_ESTIMATOR_IMPL_H_
+#define MEDIA_CAST_LOGGING_RECEIVER_TIME_OFFSET_ESTIMATOR_IMPL_H_
+
+#include "base/time/time.h"
+#include "base/threading/thread_checker.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/logging/receiver_time_offset_estimator.h"
+
+namespace media {
+namespace cast {
+
+// This implementation listens to three types of video events:
+// 1. FRAME_ENCODED (sender side)
+// 2. FRAME_ACK_SENT (receiver side)
+// 3. FRAME_ACK_RECEIVED (sender side)
+// There is a causal relationship between these events in that these events
+// must happen in order. This class obtains the lower and upper bounds for
+// the offset by taking the difference of timestamps (2) - (1) and (2) - (3),
+// respectively.
+// The bound will become better as the latency between the events decreases.
+class ReceiverTimeOffsetEstimatorImpl : public ReceiverTimeOffsetEstimator {
+ public:
+ ReceiverTimeOffsetEstimatorImpl();
+
+ virtual ~ReceiverTimeOffsetEstimatorImpl();
+
+ // RawEventSubscriber implementations.
+ virtual void OnReceiveFrameEvent(const FrameEvent& frame_event) OVERRIDE;
+ virtual void OnReceivePacketEvent(const PacketEvent& packet_event) OVERRIDE;
+
+ // ReceiverTimeOffsetEstimator implementation.
+ virtual bool GetReceiverOffsetBounds(base::TimeDelta* lower_bound,
+ base::TimeDelta* upper_bound) OVERRIDE;
+
+ private:
+ struct EventTimes {
+ base::TimeTicks event_a_time;
+ base::TimeTicks event_b_time;
+ base::TimeTicks event_c_time;
+ };
+
+ typedef std::map<RtpTimestamp, EventTimes> EventTimesMap;
+
+ void UpdateOffsetBounds(const EventTimes& event);
+
+ // Fixed size storage to store event times for recent frames.
+ EventTimesMap event_times_map_;
+
+ bool bounded_;
+ base::TimeDelta offset_lower_bound_;
+ base::TimeDelta offset_upper_bound_;
+
+ base::ThreadChecker thread_checker_;
+ DISALLOW_COPY_AND_ASSIGN(ReceiverTimeOffsetEstimatorImpl);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_LOGGING_RECEIVER_TIME_OFFSET_ESTIMATOR_IMPL_H_
diff --git a/chromium/media/cast/logging/receiver_time_offset_estimator_impl_unittest.cc b/chromium/media/cast/logging/receiver_time_offset_estimator_impl_unittest.cc
new file mode 100644
index 00000000000..1cdbecf5de6
--- /dev/null
+++ b/chromium/media/cast/logging/receiver_time_offset_estimator_impl_unittest.cc
@@ -0,0 +1,242 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/logging/receiver_time_offset_estimator_impl.h"
+#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace cast {
+
+class ReceiverTimeOffsetEstimatorImplTest : public ::testing::Test {
+ protected:
+ ReceiverTimeOffsetEstimatorImplTest()
+ : sender_clock_(new base::SimpleTestTickClock()),
+ task_runner_(new test::FakeSingleThreadTaskRunner(sender_clock_)),
+ cast_environment_(new CastEnvironment(
+ scoped_ptr<base::TickClock>(sender_clock_).Pass(),
+ task_runner_,
+ task_runner_,
+ task_runner_)) {
+ cast_environment_->Logging()->AddRawEventSubscriber(&estimator_);
+ }
+
+ virtual ~ReceiverTimeOffsetEstimatorImplTest() {
+ cast_environment_->Logging()->RemoveRawEventSubscriber(&estimator_);
+ }
+
+ void AdvanceClocks(base::TimeDelta time) {
+ sender_clock_->Advance(time);
+ receiver_clock_.Advance(time);
+ }
+
+ base::SimpleTestTickClock* sender_clock_; // Owned by CastEnvironment.
+ scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+ base::SimpleTestTickClock receiver_clock_;
+ ReceiverTimeOffsetEstimatorImpl estimator_;
+};
+
+// Suppose the true offset is 100ms.
+// Event A occurred at sender time 20ms.
+// Event B occurred at receiver time 130ms. (sender time 30ms)
+// Event C occurred at sender time 60ms.
+// Then the bound after all 3 events have arrived is [130-60=70, 130-20=110].
+TEST_F(ReceiverTimeOffsetEstimatorImplTest, EstimateOffset) {
+ int64 true_offset_ms = 100;
+ receiver_clock_.Advance(base::TimeDelta::FromMilliseconds(true_offset_ms));
+
+ base::TimeDelta lower_bound;
+ base::TimeDelta upper_bound;
+
+ EXPECT_FALSE(estimator_.GetReceiverOffsetBounds(&lower_bound, &upper_bound));
+
+ RtpTimestamp rtp_timestamp = 0;
+ uint32 frame_id = 0;
+
+ AdvanceClocks(base::TimeDelta::FromMilliseconds(20));
+
+ cast_environment_->Logging()->InsertEncodedFrameEvent(
+ sender_clock_->NowTicks(),
+ FRAME_ENCODED, VIDEO_EVENT,
+ rtp_timestamp,
+ frame_id,
+ 1234,
+ true,
+ 5678);
+
+ EXPECT_FALSE(estimator_.GetReceiverOffsetBounds(&lower_bound, &upper_bound));
+
+ AdvanceClocks(base::TimeDelta::FromMilliseconds(10));
+ cast_environment_->Logging()->InsertFrameEvent(
+ receiver_clock_.NowTicks(), FRAME_ACK_SENT, VIDEO_EVENT,
+ rtp_timestamp, frame_id);
+
+ EXPECT_FALSE(estimator_.GetReceiverOffsetBounds(&lower_bound, &upper_bound));
+
+ AdvanceClocks(base::TimeDelta::FromMilliseconds(30));
+ cast_environment_->Logging()->InsertFrameEvent(
+ sender_clock_->NowTicks(), FRAME_ACK_RECEIVED, VIDEO_EVENT,
+ rtp_timestamp, frame_id);
+
+ EXPECT_TRUE(estimator_.GetReceiverOffsetBounds(&lower_bound, &upper_bound));
+
+ int64 lower_bound_ms = lower_bound.InMilliseconds();
+ int64 upper_bound_ms = upper_bound.InMilliseconds();
+ EXPECT_EQ(70, lower_bound_ms);
+ EXPECT_EQ(110, upper_bound_ms);
+ EXPECT_GE(true_offset_ms, lower_bound_ms);
+ EXPECT_LE(true_offset_ms, upper_bound_ms);
+}
+
+// Same scenario as above, but event C arrives before event B. It doens't mean
+// event C occurred before event B.
+TEST_F(ReceiverTimeOffsetEstimatorImplTest, EventCArrivesBeforeEventB) {
+ int64 true_offset_ms = 100;
+ receiver_clock_.Advance(base::TimeDelta::FromMilliseconds(true_offset_ms));
+
+ base::TimeDelta lower_bound;
+ base::TimeDelta upper_bound;
+
+ EXPECT_FALSE(estimator_.GetReceiverOffsetBounds(&lower_bound, &upper_bound));
+
+ RtpTimestamp rtp_timestamp = 0;
+ uint32 frame_id = 0;
+
+ AdvanceClocks(base::TimeDelta::FromMilliseconds(20));
+
+ cast_environment_->Logging()->InsertEncodedFrameEvent(
+ sender_clock_->NowTicks(),
+ FRAME_ENCODED, VIDEO_EVENT,
+ rtp_timestamp,
+ frame_id,
+ 1234,
+ true,
+ 5678);
+
+ EXPECT_FALSE(estimator_.GetReceiverOffsetBounds(&lower_bound, &upper_bound));
+
+ AdvanceClocks(base::TimeDelta::FromMilliseconds(10));
+ base::TimeTicks event_b_time = receiver_clock_.NowTicks();
+ AdvanceClocks(base::TimeDelta::FromMilliseconds(30));
+ base::TimeTicks event_c_time = sender_clock_->NowTicks();
+
+ cast_environment_->Logging()->InsertFrameEvent(
+ event_c_time, FRAME_ACK_RECEIVED, VIDEO_EVENT, rtp_timestamp, frame_id);
+
+ EXPECT_FALSE(estimator_.GetReceiverOffsetBounds(&lower_bound, &upper_bound));
+
+ cast_environment_->Logging()->InsertFrameEvent(
+ event_b_time, FRAME_ACK_SENT, VIDEO_EVENT, rtp_timestamp, frame_id);
+
+ EXPECT_TRUE(estimator_.GetReceiverOffsetBounds(&lower_bound, &upper_bound));
+
+ int64 lower_bound_ms = lower_bound.InMilliseconds();
+ int64 upper_bound_ms = upper_bound.InMilliseconds();
+ EXPECT_EQ(70, lower_bound_ms);
+ EXPECT_EQ(110, upper_bound_ms);
+ EXPECT_GE(true_offset_ms, lower_bound_ms);
+ EXPECT_LE(true_offset_ms, upper_bound_ms);
+}
+
+TEST_F(ReceiverTimeOffsetEstimatorImplTest, MultipleIterations) {
+ int64 true_offset_ms = 100;
+ receiver_clock_.Advance(base::TimeDelta::FromMilliseconds(true_offset_ms));
+
+ base::TimeDelta lower_bound;
+ base::TimeDelta upper_bound;
+
+ RtpTimestamp rtp_timestamp_a = 0;
+ int frame_id_a = 0;
+ RtpTimestamp rtp_timestamp_b = 90;
+ int frame_id_b = 1;
+ RtpTimestamp rtp_timestamp_c = 180;
+ int frame_id_c = 2;
+
+ // Frame 1 times: [20, 30+100, 60]
+ // Frame 2 times: [30, 50+100, 55]
+ // Frame 3 times: [77, 80+100, 110]
+ // Bound should end up at [95, 103]
+ // Events times in chronological order: 20, 30 x2, 50, 55, 60, 77, 80, 110
+ AdvanceClocks(base::TimeDelta::FromMilliseconds(20));
+ cast_environment_->Logging()->InsertEncodedFrameEvent(
+ sender_clock_->NowTicks(),
+ FRAME_ENCODED, VIDEO_EVENT,
+ rtp_timestamp_a,
+ frame_id_a,
+ 1234,
+ true,
+ 5678);
+
+ AdvanceClocks(base::TimeDelta::FromMilliseconds(10));
+ cast_environment_->Logging()->InsertEncodedFrameEvent(
+ sender_clock_->NowTicks(),
+ FRAME_ENCODED, VIDEO_EVENT,
+ rtp_timestamp_b,
+ frame_id_b,
+ 1234,
+ true,
+ 5678);
+ cast_environment_->Logging()->InsertFrameEvent(
+ receiver_clock_.NowTicks(), FRAME_ACK_SENT, VIDEO_EVENT,
+ rtp_timestamp_a, frame_id_a);
+
+ AdvanceClocks(base::TimeDelta::FromMilliseconds(20));
+ cast_environment_->Logging()->InsertFrameEvent(
+ receiver_clock_.NowTicks(), FRAME_ACK_SENT, VIDEO_EVENT,
+ rtp_timestamp_b, frame_id_b);
+
+ AdvanceClocks(base::TimeDelta::FromMilliseconds(5));
+ cast_environment_->Logging()->InsertFrameEvent(sender_clock_->NowTicks(),
+ FRAME_ACK_RECEIVED,
+ VIDEO_EVENT,
+ rtp_timestamp_b,
+ frame_id_b);
+
+ AdvanceClocks(base::TimeDelta::FromMilliseconds(5));
+ cast_environment_->Logging()->InsertFrameEvent(sender_clock_->NowTicks(),
+ FRAME_ACK_RECEIVED,
+ VIDEO_EVENT,
+ rtp_timestamp_a,
+ frame_id_a);
+
+ AdvanceClocks(base::TimeDelta::FromMilliseconds(17));
+ cast_environment_->Logging()->InsertEncodedFrameEvent(
+ sender_clock_->NowTicks(),
+ FRAME_ENCODED, VIDEO_EVENT,
+ rtp_timestamp_c,
+ frame_id_c,
+ 1234,
+ true,
+ 5678);
+
+ AdvanceClocks(base::TimeDelta::FromMilliseconds(3));
+ cast_environment_->Logging()->InsertFrameEvent(
+ receiver_clock_.NowTicks(), FRAME_ACK_SENT, VIDEO_EVENT,
+ rtp_timestamp_c, frame_id_c);
+
+ AdvanceClocks(base::TimeDelta::FromMilliseconds(30));
+ cast_environment_->Logging()->InsertFrameEvent(sender_clock_->NowTicks(),
+ FRAME_ACK_RECEIVED,
+ VIDEO_EVENT,
+ rtp_timestamp_c,
+ frame_id_c);
+
+ EXPECT_TRUE(estimator_.GetReceiverOffsetBounds(&lower_bound, &upper_bound));
+ int64 lower_bound_ms = lower_bound.InMilliseconds();
+ int64 upper_bound_ms = upper_bound.InMilliseconds();
+ EXPECT_EQ(95, lower_bound_ms);
+ EXPECT_EQ(103, upper_bound_ms);
+ EXPECT_GE(true_offset_ms, lower_bound_ms);
+ EXPECT_LE(true_offset_ms, upper_bound_ms);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/logging/serialize_deserialize_test.cc b/chromium/media/cast/logging/serialize_deserialize_test.cc
new file mode 100644
index 00000000000..7e5aa7d3b5e
--- /dev/null
+++ b/chromium/media/cast/logging/serialize_deserialize_test.cc
@@ -0,0 +1,214 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Joint LogSerializer and LogDeserializer testing to make sure they stay in
+// sync.
+
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/logging/log_deserializer.h"
+#include "media/cast/logging/log_serializer.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/logging/proto/proto_utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using media::cast::proto::AggregatedFrameEvent;
+using media::cast::proto::AggregatedPacketEvent;
+using media::cast::proto::BasePacketEvent;
+using media::cast::proto::LogMetadata;
+
+namespace {
+
+const media::cast::CastLoggingEvent kVideoFrameEvents[] = {
+ media::cast::FRAME_CAPTURE_BEGIN, media::cast::FRAME_CAPTURE_END,
+ media::cast::FRAME_ENCODED, media::cast::FRAME_DECODED,
+ media::cast::FRAME_PLAYOUT };
+
+const media::cast::CastLoggingEvent kVideoPacketEvents[] = {
+ media::cast::PACKET_SENT_TO_NETWORK, media::cast::PACKET_RECEIVED};
+
+// The frame event fields cycle through these numbers.
+const int kEncodedFrameSize[] = {512, 425, 399, 400, 237};
+const int kDelayMillis[] = {15, 4, 8, 42, 23, 16};
+
+const int kMaxSerializedBytes = 10000;
+
+}
+
+namespace media {
+namespace cast {
+
+class SerializeDeserializeTest : public ::testing::Test {
+ protected:
+ SerializeDeserializeTest()
+ : serialized_(new char[kMaxSerializedBytes]), output_bytes_(0) {}
+
+ virtual ~SerializeDeserializeTest() {}
+
+ void Init() {
+ metadata_.set_first_rtp_timestamp(12345678 * 90);
+ metadata_.set_is_audio(false);
+ metadata_.set_num_frame_events(10);
+ metadata_.set_num_packet_events(10);
+
+ int64 event_time_ms = 0;
+ // Insert frame and packet events with RTP timestamps 0, 90, 180, ...
+ for (int i = 0; i < metadata_.num_frame_events(); i++) {
+ linked_ptr<AggregatedFrameEvent> frame_event(new AggregatedFrameEvent);
+ frame_event->set_relative_rtp_timestamp(i * 90);
+ for (uint32 event_index = 0; event_index < arraysize(kVideoFrameEvents);
+ ++event_index) {
+ frame_event->add_event_type(
+ ToProtoEventType(kVideoFrameEvents[event_index]));
+ frame_event->add_event_timestamp_ms(event_time_ms);
+ event_time_ms += 1024;
+ }
+ frame_event->set_encoded_frame_size(
+ kEncodedFrameSize[i % arraysize(kEncodedFrameSize)]);
+ frame_event->set_delay_millis(kDelayMillis[i % arraysize(kDelayMillis)]);
+
+ frame_event_list_.push_back(frame_event);
+ }
+
+ event_time_ms = 0;
+ int packet_id = 0;
+ for (int i = 0; i < metadata_.num_packet_events(); i++) {
+ linked_ptr<AggregatedPacketEvent> packet_event(new AggregatedPacketEvent);
+ packet_event->set_relative_rtp_timestamp(i * 90);
+ for (int j = 0; j < 10; j++) {
+ BasePacketEvent* base_event = packet_event->add_base_packet_event();
+ base_event->set_packet_id(packet_id);
+ packet_id++;
+ for (uint32 event_index = 0;
+ event_index < arraysize(kVideoPacketEvents);
+ ++event_index) {
+ base_event->add_event_type(
+ ToProtoEventType(kVideoPacketEvents[event_index]));
+ base_event->add_event_timestamp_ms(event_time_ms);
+ event_time_ms += 256;
+ }
+ }
+ packet_event_list_.push_back(packet_event);
+ }
+ }
+
+ void Verify(const DeserializedLog& video_log) {
+ const LogMetadata& returned_metadata = video_log.metadata;
+ const FrameEventMap& returned_frame_events = video_log.frame_events;
+ const PacketEventMap& returned_packet_events = video_log.packet_events;
+
+ EXPECT_EQ(metadata_.SerializeAsString(),
+ returned_metadata.SerializeAsString());
+
+ // Check that the returned map is equal to the original map.
+ EXPECT_EQ(frame_event_list_.size(), returned_frame_events.size());
+ for (FrameEventMap::const_iterator frame_it = returned_frame_events.begin();
+ frame_it != returned_frame_events.end();
+ ++frame_it) {
+ FrameEventList::iterator original_it = frame_event_list_.begin();
+ ASSERT_NE(frame_event_list_.end(), original_it);
+ // Compare protos by serializing and checking the bytes.
+ EXPECT_EQ((*original_it)->SerializeAsString(),
+ frame_it->second->SerializeAsString());
+ frame_event_list_.erase(frame_event_list_.begin());
+ }
+ EXPECT_TRUE(frame_event_list_.empty());
+
+ EXPECT_EQ(packet_event_list_.size(), returned_packet_events.size());
+ for (PacketEventMap::const_iterator packet_it =
+ returned_packet_events.begin();
+ packet_it != returned_packet_events.end();
+ ++packet_it) {
+ PacketEventList::iterator original_it = packet_event_list_.begin();
+ ASSERT_NE(packet_event_list_.end(), original_it);
+ // Compare protos by serializing and checking the bytes.
+ EXPECT_EQ((*original_it)->SerializeAsString(),
+ packet_it->second->SerializeAsString());
+ packet_event_list_.erase(packet_event_list_.begin());
+ }
+ EXPECT_TRUE(packet_event_list_.empty());
+ }
+
+ LogMetadata metadata_;
+ FrameEventList frame_event_list_;
+ PacketEventList packet_event_list_;
+ scoped_ptr<char[]> serialized_;
+ int output_bytes_;
+};
+
+TEST_F(SerializeDeserializeTest, Uncompressed) {
+ bool compressed = false;
+ Init();
+
+ bool success = SerializeEvents(metadata_,
+ frame_event_list_,
+ packet_event_list_,
+ compressed,
+ kMaxSerializedBytes,
+ serialized_.get(),
+ &output_bytes_);
+ ASSERT_TRUE(success);
+ ASSERT_GT(output_bytes_, 0);
+
+ DeserializedLog audio_log;
+ DeserializedLog video_log;
+ success = DeserializeEvents(
+ serialized_.get(), output_bytes_, compressed, &audio_log, &video_log);
+ ASSERT_TRUE(success);
+
+ Verify(video_log);
+}
+
+TEST_F(SerializeDeserializeTest, UncompressedInsufficientSpace) {
+ bool compressed = false;
+ Init();
+ serialized_.reset(new char[100]);
+ bool success = SerializeEvents(metadata_,
+ frame_event_list_,
+ packet_event_list_,
+ compressed,
+ 100,
+ serialized_.get(),
+ &output_bytes_);
+ EXPECT_FALSE(success);
+ EXPECT_EQ(0, output_bytes_);
+}
+
+TEST_F(SerializeDeserializeTest, Compressed) {
+ bool compressed = true;
+ Init();
+ bool success = SerializeEvents(metadata_,
+ frame_event_list_,
+ packet_event_list_,
+ compressed,
+ kMaxSerializedBytes,
+ serialized_.get(),
+ &output_bytes_);
+ ASSERT_TRUE(success);
+ ASSERT_GT(output_bytes_, 0);
+
+ DeserializedLog audio_log;
+ DeserializedLog video_log;
+ success = DeserializeEvents(
+ serialized_.get(), output_bytes_, compressed, &audio_log, &video_log);
+ ASSERT_TRUE(success);
+ Verify(video_log);
+}
+
+TEST_F(SerializeDeserializeTest, CompressedInsufficientSpace) {
+ bool compressed = true;
+ Init();
+ serialized_.reset(new char[100]);
+ bool success = SerializeEvents(metadata_,
+ frame_event_list_,
+ packet_event_list_,
+ compressed,
+ 100,
+ serialized_.get(),
+ &output_bytes_);
+ EXPECT_FALSE(success);
+ EXPECT_EQ(0, output_bytes_);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/logging/simple_event_subscriber.cc b/chromium/media/cast/logging/simple_event_subscriber.cc
new file mode 100644
index 00000000000..984d8f7d830
--- /dev/null
+++ b/chromium/media/cast/logging/simple_event_subscriber.cc
@@ -0,0 +1,46 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/logging/simple_event_subscriber.h"
+
+#include <vector>
+
+#include "base/logging.h"
+
+namespace media {
+namespace cast {
+
+SimpleEventSubscriber::SimpleEventSubscriber() {}
+
+SimpleEventSubscriber::~SimpleEventSubscriber() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+}
+
+void SimpleEventSubscriber::OnReceiveFrameEvent(const FrameEvent& frame_event) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ frame_events_.push_back(frame_event);
+}
+
+void SimpleEventSubscriber::OnReceivePacketEvent(
+ const PacketEvent& packet_event) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ packet_events_.push_back(packet_event);
+}
+
+void SimpleEventSubscriber::GetFrameEventsAndReset(
+ std::vector<FrameEvent>* frame_events) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ frame_events->swap(frame_events_);
+ frame_events_.clear();
+}
+
+void SimpleEventSubscriber::GetPacketEventsAndReset(
+ std::vector<PacketEvent>* packet_events) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ packet_events->swap(packet_events_);
+ packet_events_.clear();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/logging/simple_event_subscriber.h b/chromium/media/cast/logging/simple_event_subscriber.h
new file mode 100644
index 00000000000..adc4763f5f4
--- /dev/null
+++ b/chromium/media/cast/logging/simple_event_subscriber.h
@@ -0,0 +1,52 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_LOGGING_SIMPLE_EVENT_SUBSCRIBER_H_
+#define MEDIA_CAST_LOGGING_SIMPLE_EVENT_SUBSCRIBER_H_
+
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "base/threading/thread_checker.h"
+#include "media/cast/logging/raw_event_subscriber.h"
+
+namespace media {
+namespace cast {
+
+// RawEventSubscriber implementation that records all incoming raw events
+// in std::vector's.
+// The user of this class can call the GetXXXEventsAndReset functions to get
+// list of events that have acccumulated since last inovcation.
+class SimpleEventSubscriber : public RawEventSubscriber {
+ public:
+ SimpleEventSubscriber();
+
+ virtual ~SimpleEventSubscriber();
+
+ // RawEventSubscriber implementations.
+ virtual void OnReceiveFrameEvent(const FrameEvent& frame_event) OVERRIDE;
+ virtual void OnReceivePacketEvent(const PacketEvent& packet_event) OVERRIDE;
+
+ // Assigns frame events received so far to |frame_events| and clears them
+ // from this object.
+ void GetFrameEventsAndReset(std::vector<FrameEvent>* frame_events);
+
+ // Assigns packet events received so far to |packet_events| and clears them
+ // from this object.
+ void GetPacketEventsAndReset(std::vector<PacketEvent>* packet_events);
+
+ private:
+ std::vector<FrameEvent> frame_events_;
+ std::vector<PacketEvent> packet_events_;
+
+ // All functions must be called on the main thread.
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(SimpleEventSubscriber);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_LOGGING_SIMPLE_EVENT_SUBSCRIBER_H_
diff --git a/chromium/media/cast/logging/simple_event_subscriber_unittest.cc b/chromium/media/cast/logging/simple_event_subscriber_unittest.cc
new file mode 100644
index 00000000000..311a2341951
--- /dev/null
+++ b/chromium/media/cast/logging/simple_event_subscriber_unittest.cc
@@ -0,0 +1,87 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/logging/simple_event_subscriber.h"
+#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace cast {
+
+class SimpleEventSubscriberTest : public ::testing::Test {
+ protected:
+ SimpleEventSubscriberTest()
+ : testing_clock_(new base::SimpleTestTickClock()),
+ task_runner_(new test::FakeSingleThreadTaskRunner(testing_clock_)),
+ cast_environment_(new CastEnvironment(
+ scoped_ptr<base::TickClock>(testing_clock_).Pass(),
+ task_runner_,
+ task_runner_,
+ task_runner_)) {
+ cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber_);
+ }
+
+ virtual ~SimpleEventSubscriberTest() {
+ cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber_);
+ }
+
+ base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
+ scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+ SimpleEventSubscriber event_subscriber_;
+};
+
+TEST_F(SimpleEventSubscriberTest, GetAndResetEvents) {
+ // Log some frame events.
+ cast_environment_->Logging()->InsertEncodedFrameEvent(
+ testing_clock_->NowTicks(), FRAME_ENCODED, AUDIO_EVENT,
+ /*rtp_timestamp*/ 100u, /*frame_id*/ 0u, /*frame_size*/ 123,
+ /*key_frame*/ false, 0);
+ cast_environment_->Logging()->InsertFrameEventWithDelay(
+ testing_clock_->NowTicks(), FRAME_PLAYOUT, AUDIO_EVENT,
+ /*rtp_timestamp*/ 100u,
+ /*frame_id*/ 0u, /*delay*/ base::TimeDelta::FromMilliseconds(100));
+ cast_environment_->Logging()->InsertFrameEvent(
+ testing_clock_->NowTicks(), FRAME_DECODED, AUDIO_EVENT,
+ /*rtp_timestamp*/ 200u,
+ /*frame_id*/ 0u);
+
+ // Log some packet events.
+ cast_environment_->Logging()->InsertPacketEvent(
+ testing_clock_->NowTicks(), PACKET_RECEIVED, AUDIO_EVENT,
+ /*rtp_timestamp*/ 200u,
+ /*frame_id*/ 0u, /*packet_id*/ 1u, /*max_packet_id*/ 5u, /*size*/ 100u);
+ cast_environment_->Logging()->InsertPacketEvent(
+ testing_clock_->NowTicks(), FRAME_DECODED, VIDEO_EVENT,
+ /*rtp_timestamp*/ 200u, /*frame_id*/ 0u, /*packet_id*/ 1u,
+ /*max_packet_id*/ 5u, /*size*/ 100u);
+ cast_environment_->Logging()->InsertPacketEvent(
+ testing_clock_->NowTicks(), FRAME_DECODED, VIDEO_EVENT,
+ /*rtp_timestamp*/ 300u, /*frame_id*/ 0u, /*packet_id*/ 1u,
+ /*max_packet_id*/ 5u, /*size*/ 100u);
+
+ std::vector<FrameEvent> frame_events;
+ event_subscriber_.GetFrameEventsAndReset(&frame_events);
+ EXPECT_EQ(3u, frame_events.size());
+
+ std::vector<PacketEvent> packet_events;
+ event_subscriber_.GetPacketEventsAndReset(&packet_events);
+ EXPECT_EQ(3u, packet_events.size());
+
+ // Calling this function again should result in empty vector because no events
+ // were logged since last call.
+ event_subscriber_.GetFrameEventsAndReset(&frame_events);
+ event_subscriber_.GetPacketEventsAndReset(&packet_events);
+ EXPECT_TRUE(frame_events.empty());
+ EXPECT_TRUE(packet_events.empty());
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/logging/stats_event_subscriber.cc b/chromium/media/cast/logging/stats_event_subscriber.cc
new file mode 100644
index 00000000000..9e3226a2161
--- /dev/null
+++ b/chromium/media/cast/logging/stats_event_subscriber.cc
@@ -0,0 +1,400 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/logging/stats_event_subscriber.h"
+
+#include "base/logging.h"
+#include "base/values.h"
+
+#define STAT_ENUM_TO_STRING(enum) \
+ case enum: \
+ return #enum
+
+namespace media {
+namespace cast {
+
+namespace {
+
+using media::cast::CastLoggingEvent;
+using media::cast::EventMediaType;
+
+const size_t kMaxFrameEventTimeMapSize = 100;
+const size_t kMaxPacketEventTimeMapSize = 1000;
+
+bool IsReceiverEvent(CastLoggingEvent event) {
+ return event == FRAME_DECODED
+ || event == FRAME_PLAYOUT
+ || event == FRAME_ACK_SENT
+ || event == PACKET_RECEIVED;
+}
+
+} // namespace
+
+StatsEventSubscriber::StatsEventSubscriber(
+ EventMediaType event_media_type,
+ base::TickClock* clock,
+ ReceiverTimeOffsetEstimator* offset_estimator)
+ : event_media_type_(event_media_type),
+ clock_(clock),
+ offset_estimator_(offset_estimator),
+ network_latency_datapoints_(0),
+ e2e_latency_datapoints_(0) {
+ DCHECK(event_media_type == AUDIO_EVENT || event_media_type == VIDEO_EVENT);
+ base::TimeTicks now = clock_->NowTicks();
+ start_time_ = now;
+ last_response_received_time_ = base::TimeTicks();
+}
+
+StatsEventSubscriber::~StatsEventSubscriber() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+}
+
+void StatsEventSubscriber::OnReceiveFrameEvent(const FrameEvent& frame_event) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ CastLoggingEvent type = frame_event.type;
+ if (frame_event.media_type != event_media_type_)
+ return;
+
+ FrameStatsMap::iterator it = frame_stats_.find(type);
+ if (it == frame_stats_.end()) {
+ FrameLogStats stats;
+ stats.event_counter = 1;
+ stats.sum_size = frame_event.size;
+ stats.sum_delay = frame_event.delay_delta;
+ frame_stats_.insert(std::make_pair(type, stats));
+ } else {
+ ++(it->second.event_counter);
+ it->second.sum_size += frame_event.size;
+ it->second.sum_delay += frame_event.delay_delta;
+ }
+
+ if (type == FRAME_CAPTURE_BEGIN) {
+ RecordFrameCapturedTime(frame_event);
+ } else if (type == FRAME_PLAYOUT) {
+ RecordE2ELatency(frame_event);
+ }
+
+ if (IsReceiverEvent(type))
+ UpdateLastResponseTime(frame_event.timestamp);
+}
+
+void StatsEventSubscriber::OnReceivePacketEvent(
+ const PacketEvent& packet_event) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ CastLoggingEvent type = packet_event.type;
+ if (packet_event.media_type != event_media_type_)
+ return;
+
+ PacketStatsMap::iterator it = packet_stats_.find(type);
+ if (it == packet_stats_.end()) {
+ PacketLogStats stats;
+ stats.event_counter = 1;
+ stats.sum_size = packet_event.size;
+ packet_stats_.insert(std::make_pair(type, stats));
+ } else {
+ ++(it->second.event_counter);
+ it->second.sum_size += packet_event.size;
+ }
+
+ if (type == PACKET_SENT_TO_NETWORK ||
+ type == PACKET_RECEIVED) {
+ RecordNetworkLatency(packet_event);
+ } else if (type == PACKET_RETRANSMITTED) {
+ // We only measure network latency using packets that doesn't have to be
+ // retransmitted as there is precisely one sent-receive timestamp pairs.
+ ErasePacketSentTime(packet_event);
+ }
+
+ if (IsReceiverEvent(type))
+ UpdateLastResponseTime(packet_event.timestamp);
+}
+
+scoped_ptr<base::DictionaryValue> StatsEventSubscriber::GetStats() const {
+ StatsMap stats_map;
+ GetStatsInternal(&stats_map);
+ scoped_ptr<base::DictionaryValue> ret(new base::DictionaryValue);
+
+ scoped_ptr<base::DictionaryValue> stats(new base::DictionaryValue);
+ for (StatsMap::const_iterator it = stats_map.begin(); it != stats_map.end();
+ ++it) {
+ stats->SetDouble(CastStatToString(it->first), it->second);
+ }
+
+ ret->Set(event_media_type_ == AUDIO_EVENT ? "audio" : "video",
+ stats.release());
+
+ return ret.Pass();
+}
+
+void StatsEventSubscriber::Reset() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ frame_stats_.clear();
+ packet_stats_.clear();
+ total_network_latency_ = base::TimeDelta();
+ network_latency_datapoints_ = 0;
+ total_e2e_latency_ = base::TimeDelta();
+ e2e_latency_datapoints_ = 0;
+ frame_captured_times_.clear();
+ packet_sent_times_.clear();
+ start_time_ = clock_->NowTicks();
+ last_response_received_time_ = base::TimeTicks();
+}
+
+// static
+const char* StatsEventSubscriber::CastStatToString(CastStat stat) {
+ switch (stat) {
+ STAT_ENUM_TO_STRING(CAPTURE_FPS);
+ STAT_ENUM_TO_STRING(ENCODE_FPS);
+ STAT_ENUM_TO_STRING(DECODE_FPS);
+ STAT_ENUM_TO_STRING(AVG_ENCODE_TIME_MS);
+ STAT_ENUM_TO_STRING(AVG_PLAYOUT_DELAY_MS);
+ STAT_ENUM_TO_STRING(AVG_NETWORK_LATENCY_MS);
+ STAT_ENUM_TO_STRING(AVG_E2E_LATENCY_MS);
+ STAT_ENUM_TO_STRING(ENCODE_KBPS);
+ STAT_ENUM_TO_STRING(TRANSMISSION_KBPS);
+ STAT_ENUM_TO_STRING(RETRANSMISSION_KBPS);
+ STAT_ENUM_TO_STRING(PACKET_LOSS_FRACTION);
+ STAT_ENUM_TO_STRING(MS_SINCE_LAST_RECEIVER_RESPONSE);
+ }
+ NOTREACHED();
+ return "";
+}
+
+void StatsEventSubscriber::GetStatsInternal(StatsMap* stats_map) const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ stats_map->clear();
+
+ base::TimeTicks end_time = clock_->NowTicks();
+
+ PopulateFpsStat(
+ end_time, FRAME_CAPTURE_BEGIN, CAPTURE_FPS, stats_map);
+ PopulateFpsStat(
+ end_time, FRAME_ENCODED, ENCODE_FPS, stats_map);
+ PopulateFpsStat(
+ end_time, FRAME_DECODED, DECODE_FPS, stats_map);
+ PopulatePlayoutDelayStat(stats_map);
+ PopulateFrameBitrateStat(end_time, stats_map);
+ PopulatePacketBitrateStat(end_time,
+ PACKET_SENT_TO_NETWORK,
+ TRANSMISSION_KBPS,
+ stats_map);
+ PopulatePacketBitrateStat(end_time,
+ PACKET_RETRANSMITTED,
+ RETRANSMISSION_KBPS,
+ stats_map);
+ PopulatePacketLossPercentageStat(stats_map);
+
+ if (network_latency_datapoints_ > 0) {
+ double avg_network_latency_ms =
+ total_network_latency_.InMillisecondsF() /
+ network_latency_datapoints_;
+ stats_map->insert(
+ std::make_pair(AVG_NETWORK_LATENCY_MS, avg_network_latency_ms));
+ }
+
+ if (e2e_latency_datapoints_ > 0) {
+ double avg_e2e_latency_ms =
+ total_e2e_latency_.InMillisecondsF() / e2e_latency_datapoints_;
+ stats_map->insert(std::make_pair(AVG_E2E_LATENCY_MS, avg_e2e_latency_ms));
+ }
+
+ if (!last_response_received_time_.is_null()) {
+ stats_map->insert(
+ std::make_pair(MS_SINCE_LAST_RECEIVER_RESPONSE,
+ (end_time - last_response_received_time_).InMillisecondsF()));
+ }
+}
+
+bool StatsEventSubscriber::GetReceiverOffset(base::TimeDelta* offset) {
+ base::TimeDelta receiver_offset_lower_bound;
+ base::TimeDelta receiver_offset_upper_bound;
+ if (!offset_estimator_->GetReceiverOffsetBounds(
+ &receiver_offset_lower_bound, &receiver_offset_upper_bound)) {
+ return false;
+ }
+
+ *offset = (receiver_offset_lower_bound + receiver_offset_upper_bound) / 2;
+ return true;
+}
+
+void StatsEventSubscriber::RecordFrameCapturedTime(
+ const FrameEvent& frame_event) {
+ frame_captured_times_.insert(
+ std::make_pair(frame_event.rtp_timestamp, frame_event.timestamp));
+ if (frame_captured_times_.size() > kMaxFrameEventTimeMapSize)
+ frame_captured_times_.erase(frame_captured_times_.begin());
+}
+
+void StatsEventSubscriber::RecordE2ELatency(const FrameEvent& frame_event) {
+ base::TimeDelta receiver_offset;
+ if (!GetReceiverOffset(&receiver_offset))
+ return;
+
+ FrameEventTimeMap::iterator it =
+ frame_captured_times_.find(frame_event.rtp_timestamp);
+ if (it == frame_captured_times_.end())
+ return;
+
+ // Playout time is event time + playout delay.
+ base::TimeTicks playout_time =
+ frame_event.timestamp + frame_event.delay_delta - receiver_offset;
+ total_e2e_latency_ += playout_time - it->second;
+ e2e_latency_datapoints_++;
+}
+
+void StatsEventSubscriber::UpdateLastResponseTime(
+ base::TimeTicks receiver_time) {
+ base::TimeDelta receiver_offset;
+ if (!GetReceiverOffset(&receiver_offset))
+ return;
+ base::TimeTicks sender_time = receiver_time - receiver_offset;
+ last_response_received_time_ = sender_time;
+}
+
+void StatsEventSubscriber::ErasePacketSentTime(
+ const PacketEvent& packet_event) {
+ std::pair<RtpTimestamp, uint16> key(
+ std::make_pair(packet_event.rtp_timestamp, packet_event.packet_id));
+ packet_sent_times_.erase(key);
+}
+
+void StatsEventSubscriber::RecordNetworkLatency(
+ const PacketEvent& packet_event) {
+ base::TimeDelta receiver_offset;
+ if (!GetReceiverOffset(&receiver_offset))
+ return;
+
+ std::pair<RtpTimestamp, uint16> key(
+ std::make_pair(packet_event.rtp_timestamp, packet_event.packet_id));
+ PacketEventTimeMap::iterator it = packet_sent_times_.find(key);
+ if (it == packet_sent_times_.end()) {
+ std::pair<RtpTimestamp, uint16> key(
+ std::make_pair(packet_event.rtp_timestamp, packet_event.packet_id));
+ std::pair<base::TimeTicks, CastLoggingEvent> value =
+ std::make_pair(packet_event.timestamp, packet_event.type);
+ packet_sent_times_.insert(std::make_pair(key, value));
+ if (packet_sent_times_.size() > kMaxPacketEventTimeMapSize)
+ packet_sent_times_.erase(packet_sent_times_.begin());
+ } else {
+ std::pair<base::TimeTicks, CastLoggingEvent> value = it->second;
+ CastLoggingEvent recorded_type = value.second;
+ bool match = false;
+ base::TimeTicks packet_sent_time;
+ base::TimeTicks packet_received_time;
+ if (recorded_type == PACKET_SENT_TO_NETWORK &&
+ packet_event.type == PACKET_RECEIVED) {
+ packet_sent_time = value.first;
+ packet_received_time = packet_event.timestamp;
+ match = true;
+ } else if (recorded_type == PACKET_RECEIVED &&
+ packet_event.type == PACKET_SENT_TO_NETWORK) {
+ packet_sent_time = packet_event.timestamp;
+ packet_received_time = value.first;
+ match = true;
+ }
+ if (match) {
+ // Subtract by offset.
+ packet_received_time -= receiver_offset;
+
+ total_network_latency_ += packet_received_time - packet_sent_time;
+ network_latency_datapoints_++;
+ packet_sent_times_.erase(it);
+ }
+ }
+}
+
+void StatsEventSubscriber::PopulateFpsStat(base::TimeTicks end_time,
+ CastLoggingEvent event,
+ CastStat stat,
+ StatsMap* stats_map) const {
+ FrameStatsMap::const_iterator it = frame_stats_.find(event);
+ if (it != frame_stats_.end()) {
+ double fps = 0.0;
+ base::TimeDelta duration = (end_time - start_time_);
+ int count = it->second.event_counter;
+ if (duration > base::TimeDelta())
+ fps = count / duration.InSecondsF();
+ stats_map->insert(std::make_pair(stat, fps));
+ }
+}
+
+void StatsEventSubscriber::PopulatePlayoutDelayStat(StatsMap* stats_map) const {
+ FrameStatsMap::const_iterator it = frame_stats_.find(FRAME_PLAYOUT);
+ if (it != frame_stats_.end()) {
+ double avg_delay_ms = 0.0;
+ base::TimeDelta sum_delay = it->second.sum_delay;
+ int count = it->second.event_counter;
+ if (count != 0)
+ avg_delay_ms = sum_delay.InMillisecondsF() / count;
+ stats_map->insert(std::make_pair(AVG_PLAYOUT_DELAY_MS, avg_delay_ms));
+ }
+}
+
+void StatsEventSubscriber::PopulateFrameBitrateStat(base::TimeTicks end_time,
+ StatsMap* stats_map) const {
+ FrameStatsMap::const_iterator it = frame_stats_.find(FRAME_ENCODED);
+ if (it != frame_stats_.end()) {
+ double kbps = 0.0;
+ base::TimeDelta duration = end_time - start_time_;
+ if (duration > base::TimeDelta()) {
+ kbps = it->second.sum_size / duration.InMillisecondsF() * 8;
+ }
+
+ stats_map->insert(std::make_pair(ENCODE_KBPS, kbps));
+ }
+}
+
+void StatsEventSubscriber::PopulatePacketBitrateStat(
+ base::TimeTicks end_time,
+ CastLoggingEvent event,
+ CastStat stat,
+ StatsMap* stats_map) const {
+ PacketStatsMap::const_iterator it = packet_stats_.find(event);
+ if (it != packet_stats_.end()) {
+ double kbps = 0;
+ base::TimeDelta duration = end_time - start_time_;
+ if (duration > base::TimeDelta()) {
+ kbps = it->second.sum_size / duration.InMillisecondsF() * 8;
+ }
+
+ stats_map->insert(std::make_pair(stat, kbps));
+ }
+}
+
+void StatsEventSubscriber::PopulatePacketLossPercentageStat(
+ StatsMap* stats_map) const {
+ // We assume that retransmission means that the packet's previous
+ // (re)transmission was lost.
+ // This means the percentage of packet loss is
+ // (# of retransmit events) / (# of transmit + retransmit events).
+ PacketStatsMap::const_iterator sent_it =
+ packet_stats_.find(PACKET_SENT_TO_NETWORK);
+ if (sent_it == packet_stats_.end())
+ return;
+ PacketStatsMap::const_iterator retransmitted_it =
+ packet_stats_.find(PACKET_RETRANSMITTED);
+ int sent_count = sent_it->second.event_counter;
+ int retransmitted_count = 0;
+ if (retransmitted_it != packet_stats_.end())
+ retransmitted_count = retransmitted_it->second.event_counter;
+ double packet_loss_fraction = static_cast<double>(retransmitted_count) /
+ (sent_count + retransmitted_count);
+ stats_map->insert(
+ std::make_pair(PACKET_LOSS_FRACTION, packet_loss_fraction));
+}
+
+StatsEventSubscriber::FrameLogStats::FrameLogStats()
+ : event_counter(0), sum_size(0) {}
+StatsEventSubscriber::FrameLogStats::~FrameLogStats() {}
+
+StatsEventSubscriber::PacketLogStats::PacketLogStats()
+ : event_counter(0), sum_size(0) {}
+StatsEventSubscriber::PacketLogStats::~PacketLogStats() {}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/logging/stats_event_subscriber.h b/chromium/media/cast/logging/stats_event_subscriber.h
new file mode 100644
index 00000000000..173378ab0b2
--- /dev/null
+++ b/chromium/media/cast/logging/stats_event_subscriber.h
@@ -0,0 +1,176 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_LOGGING_STATS_EVENT_SUBSCRIBER_H_
+#define MEDIA_CAST_LOGGING_STATS_EVENT_SUBSCRIBER_H_
+
+#include "base/gtest_prod_util.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/threading/thread_checker.h"
+#include "base/time/tick_clock.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/logging/raw_event_subscriber.h"
+#include "media/cast/logging/receiver_time_offset_estimator.h"
+
+namespace base {
+class DictionaryValue;
+}
+
+namespace media {
+namespace cast {
+
+class StatsEventSubscriberTest;
+
+// A RawEventSubscriber implementation that subscribes to events,
+// and aggregates them into stats.
+class StatsEventSubscriber : public RawEventSubscriber {
+ public:
+ StatsEventSubscriber(EventMediaType event_media_type,
+ base::TickClock* clock,
+ ReceiverTimeOffsetEstimator* offset_estimator);
+
+ virtual ~StatsEventSubscriber();
+
+ // RawReventSubscriber implementations.
+ virtual void OnReceiveFrameEvent(const FrameEvent& frame_event) OVERRIDE;
+ virtual void OnReceivePacketEvent(const PacketEvent& packet_event) OVERRIDE;
+
+ // Returns stats as a DictionaryValue. The dictionary contains one entry -
+ // "audio" or "video" pointing to an inner dictionary.
+ // The inner dictionary consists of string - double entries, where the string
+ // describes the name of the stat, and the double describes
+ // the value of the stat. See CastStat and StatsMap below.
+ scoped_ptr<base::DictionaryValue> GetStats() const;
+
+ // Resets stats in this object.
+ void Reset();
+
+ private:
+ friend class StatsEventSubscriberTest;
+ FRIEND_TEST_ALL_PREFIXES(StatsEventSubscriberTest, EmptyStats);
+ FRIEND_TEST_ALL_PREFIXES(StatsEventSubscriberTest, Capture);
+ FRIEND_TEST_ALL_PREFIXES(StatsEventSubscriberTest, Encode);
+ FRIEND_TEST_ALL_PREFIXES(StatsEventSubscriberTest, Decode);
+ FRIEND_TEST_ALL_PREFIXES(StatsEventSubscriberTest, PlayoutDelay);
+ FRIEND_TEST_ALL_PREFIXES(StatsEventSubscriberTest, E2ELatency);
+ FRIEND_TEST_ALL_PREFIXES(StatsEventSubscriberTest, Packets);
+
+ // Generic statistics given the raw data. More specific data (e.g. frame rate
+ // and bit rate) can be computed given the basic metrics.
+ // Some of the metrics will only be set when applicable, e.g. delay and size.
+ struct FrameLogStats {
+ FrameLogStats();
+ ~FrameLogStats();
+ int event_counter;
+ size_t sum_size;
+ base::TimeDelta sum_delay;
+ };
+
+ struct PacketLogStats {
+ PacketLogStats();
+ ~PacketLogStats();
+ int event_counter;
+ size_t sum_size;
+ };
+
+ enum CastStat {
+ // Capture frame rate.
+ CAPTURE_FPS,
+ // Encode frame rate.
+ ENCODE_FPS,
+ // Decode frame rate.
+ DECODE_FPS,
+ // Average encode duration in milliseconds.
+ // TODO(imcheng): This stat is not populated yet because we do not have
+ // the time when encode started. Record it in FRAME_ENCODED event.
+ AVG_ENCODE_TIME_MS,
+ // Average playout delay in milliseconds, with target delay already
+ // accounted for. Ideally, every frame should have a playout delay of 0.
+ AVG_PLAYOUT_DELAY_MS,
+ // Duration from when a packet is transmitted to when it is received.
+ // This measures latency from sender to receiver.
+ AVG_NETWORK_LATENCY_MS,
+ // Duration from when a frame is captured to when it should be played out.
+ AVG_E2E_LATENCY_MS,
+ // Encode bitrate in kbps.
+ ENCODE_KBPS,
+ // Packet transmission bitrate in kbps.
+ TRANSMISSION_KBPS,
+ // Packet retransmission bitrate in kbps.
+ RETRANSMISSION_KBPS,
+ // Fraction of packet loss.
+ PACKET_LOSS_FRACTION,
+ // Duration in milliseconds since last receiver response.
+ MS_SINCE_LAST_RECEIVER_RESPONSE
+ };
+
+ typedef std::map<CastStat, double> StatsMap;
+ typedef std::map<RtpTimestamp, base::TimeTicks> FrameEventTimeMap;
+ typedef std::map<
+ std::pair<RtpTimestamp, uint16>,
+ std::pair<base::TimeTicks, CastLoggingEvent> >
+ PacketEventTimeMap;
+ typedef std::map<CastLoggingEvent, FrameLogStats> FrameStatsMap;
+ typedef std::map<CastLoggingEvent, PacketLogStats> PacketStatsMap;
+
+ static const char* CastStatToString(CastStat stat);
+
+ // Assigns |stats_map| with stats data. Used for testing.
+ void GetStatsInternal(StatsMap* stats_map) const;
+
+ bool GetReceiverOffset(base::TimeDelta* offset);
+ void RecordFrameCapturedTime(const FrameEvent& frame_event);
+ void RecordE2ELatency(const FrameEvent& frame_event);
+ void RecordPacketSentTime(const PacketEvent& packet_event);
+ void ErasePacketSentTime(const PacketEvent& packet_event);
+ void RecordNetworkLatency(const PacketEvent& packet_event);
+ void UpdateLastResponseTime(base::TimeTicks receiver_time);
+
+ void PopulateFpsStat(base::TimeTicks now,
+ CastLoggingEvent event,
+ CastStat stat,
+ StatsMap* stats_map) const;
+ void PopulatePlayoutDelayStat(StatsMap* stats_map) const;
+ void PopulateFrameBitrateStat(base::TimeTicks now, StatsMap* stats_map) const;
+ void PopulatePacketBitrateStat(base::TimeTicks now,
+ CastLoggingEvent event,
+ CastStat stat,
+ StatsMap* stats_map) const;
+ void PopulatePacketLossPercentageStat(StatsMap* stats_map) const;
+
+ const EventMediaType event_media_type_;
+
+ // Not owned by this class.
+ base::TickClock* const clock_;
+
+ // Not owned by this class.
+ ReceiverTimeOffsetEstimator* const offset_estimator_;
+
+ FrameStatsMap frame_stats_;
+ PacketStatsMap packet_stats_;
+
+ base::TimeDelta total_network_latency_;
+ int network_latency_datapoints_;
+ base::TimeDelta total_e2e_latency_;
+ int e2e_latency_datapoints_;
+
+ base::TimeTicks last_response_received_time_;
+
+ // Fixed size map to record when recent frames were captured.
+ FrameEventTimeMap frame_captured_times_;
+
+ // Fixed size map to record when recent packets were sent.
+ PacketEventTimeMap packet_sent_times_;
+
+ // Sender time assigned on creation and |Reset()|.
+ base::TimeTicks start_time_;
+
+ base::ThreadChecker thread_checker_;
+ DISALLOW_COPY_AND_ASSIGN(StatsEventSubscriber);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_LOGGING_STATS_EVENT_SUBSCRIBER_H_
diff --git a/chromium/media/cast/logging/stats_event_subscriber_unittest.cc b/chromium/media/cast/logging/stats_event_subscriber_unittest.cc
new file mode 100644
index 00000000000..33faa020596
--- /dev/null
+++ b/chromium/media/cast/logging/stats_event_subscriber_unittest.cc
@@ -0,0 +1,401 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/rand_util.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/logging/stats_event_subscriber.h"
+#include "media/cast/test/fake_receiver_time_offset_estimator.h"
+#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+const int kReceiverOffsetSecs = 100;
+}
+
+namespace media {
+namespace cast {
+
+class StatsEventSubscriberTest : public ::testing::Test {
+ protected:
+ StatsEventSubscriberTest()
+ : sender_clock_(new base::SimpleTestTickClock()),
+ task_runner_(new test::FakeSingleThreadTaskRunner(sender_clock_)),
+ cast_environment_(new CastEnvironment(
+ scoped_ptr<base::TickClock>(sender_clock_).Pass(),
+ task_runner_,
+ task_runner_,
+ task_runner_)),
+ fake_offset_estimator_(
+ base::TimeDelta::FromSeconds(kReceiverOffsetSecs)) {
+ receiver_clock_.Advance(base::TimeDelta::FromSeconds(kReceiverOffsetSecs));
+ cast_environment_->Logging()->AddRawEventSubscriber(
+ &fake_offset_estimator_);
+ }
+
+ virtual ~StatsEventSubscriberTest() {
+ if (subscriber_.get())
+ cast_environment_->Logging()->RemoveRawEventSubscriber(subscriber_.get());
+ cast_environment_->Logging()->RemoveRawEventSubscriber(
+ &fake_offset_estimator_);
+ }
+
+ void AdvanceClocks(base::TimeDelta delta) {
+ sender_clock_->Advance(delta);
+ receiver_clock_.Advance(delta);
+ }
+
+ void Init(EventMediaType event_media_type) {
+ DCHECK(!subscriber_.get());
+ subscriber_.reset(new StatsEventSubscriber(
+ event_media_type, cast_environment_->Clock(), &fake_offset_estimator_));
+ cast_environment_->Logging()->AddRawEventSubscriber(subscriber_.get());
+ }
+
+ base::SimpleTestTickClock* sender_clock_; // Owned by CastEnvironment.
+ base::SimpleTestTickClock receiver_clock_;
+ scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+ test::FakeReceiverTimeOffsetEstimator fake_offset_estimator_;
+ scoped_ptr<StatsEventSubscriber> subscriber_;
+};
+
+TEST_F(StatsEventSubscriberTest, Capture) {
+ Init(VIDEO_EVENT);
+
+ uint32 rtp_timestamp = 0;
+ uint32 frame_id = 0;
+ int num_frames = 10;
+ base::TimeTicks start_time = sender_clock_->NowTicks();
+ for (int i = 0; i < num_frames; i++) {
+ cast_environment_->Logging()->InsertFrameEvent(sender_clock_->NowTicks(),
+ FRAME_CAPTURE_BEGIN,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ frame_id);
+
+ AdvanceClocks(base::TimeDelta::FromMicroseconds(34567));
+ rtp_timestamp += 90;
+ frame_id++;
+ }
+
+ base::TimeTicks end_time = sender_clock_->NowTicks();
+
+ StatsEventSubscriber::StatsMap stats_map;
+ subscriber_->GetStatsInternal(&stats_map);
+
+ StatsEventSubscriber::StatsMap::iterator it =
+ stats_map.find(StatsEventSubscriber::CAPTURE_FPS);
+ ASSERT_NE(it, stats_map.end());
+
+ base::TimeDelta duration = end_time - start_time;
+ EXPECT_DOUBLE_EQ(
+ it->second,
+ static_cast<double>(num_frames) / duration.InMillisecondsF() * 1000);
+}
+
+TEST_F(StatsEventSubscriberTest, Encode) {
+ Init(VIDEO_EVENT);
+
+ uint32 rtp_timestamp = 0;
+ uint32 frame_id = 0;
+ int num_frames = 10;
+ base::TimeTicks start_time = sender_clock_->NowTicks();
+ int total_size = 0;
+ for (int i = 0; i < num_frames; i++) {
+ int size = 1000 + base::RandInt(-100, 100);
+ total_size += size;
+ cast_environment_->Logging()->InsertEncodedFrameEvent(
+ sender_clock_->NowTicks(),
+ FRAME_ENCODED, VIDEO_EVENT,
+ rtp_timestamp,
+ frame_id,
+ size,
+ true,
+ 5678);
+
+ AdvanceClocks(base::TimeDelta::FromMicroseconds(35678));
+ rtp_timestamp += 90;
+ frame_id++;
+ }
+
+ base::TimeTicks end_time = sender_clock_->NowTicks();
+
+ StatsEventSubscriber::StatsMap stats_map;
+ subscriber_->GetStatsInternal(&stats_map);
+
+ StatsEventSubscriber::StatsMap::iterator it =
+ stats_map.find(StatsEventSubscriber::ENCODE_FPS);
+ ASSERT_NE(it, stats_map.end());
+
+ base::TimeDelta duration = end_time - start_time;
+ EXPECT_DOUBLE_EQ(
+ it->second,
+ static_cast<double>(num_frames) / duration.InMillisecondsF() * 1000);
+
+ it = stats_map.find(StatsEventSubscriber::ENCODE_KBPS);
+ ASSERT_NE(it, stats_map.end());
+
+ EXPECT_DOUBLE_EQ(it->second,
+ static_cast<double>(total_size) / duration.InMillisecondsF() * 8);
+}
+
+TEST_F(StatsEventSubscriberTest, Decode) {
+ Init(VIDEO_EVENT);
+
+ uint32 rtp_timestamp = 0;
+ uint32 frame_id = 0;
+ int num_frames = 10;
+ base::TimeTicks start_time = sender_clock_->NowTicks();
+ for (int i = 0; i < num_frames; i++) {
+ cast_environment_->Logging()->InsertFrameEvent(receiver_clock_.NowTicks(),
+ FRAME_DECODED, VIDEO_EVENT,
+ rtp_timestamp,
+ frame_id);
+
+ AdvanceClocks(base::TimeDelta::FromMicroseconds(36789));
+ rtp_timestamp += 90;
+ frame_id++;
+ }
+
+ base::TimeTicks end_time = sender_clock_->NowTicks();
+
+ StatsEventSubscriber::StatsMap stats_map;
+ subscriber_->GetStatsInternal(&stats_map);
+
+ StatsEventSubscriber::StatsMap::iterator it =
+ stats_map.find(StatsEventSubscriber::DECODE_FPS);
+ ASSERT_NE(it, stats_map.end());
+
+ base::TimeDelta duration = end_time - start_time;
+ EXPECT_DOUBLE_EQ(
+ it->second,
+ static_cast<double>(num_frames) / duration.InMillisecondsF() * 1000);
+}
+
+TEST_F(StatsEventSubscriberTest, PlayoutDelay) {
+ Init(VIDEO_EVENT);
+
+ uint32 rtp_timestamp = 0;
+ uint32 frame_id = 0;
+ int num_frames = 10;
+ int total_delay_ms = 0;
+ for (int i = 0; i < num_frames; i++) {
+ int delay_ms = base::RandInt(-50, 50);
+ base::TimeDelta delay = base::TimeDelta::FromMilliseconds(delay_ms);
+ total_delay_ms += delay_ms;
+ cast_environment_->Logging()->InsertFrameEventWithDelay(
+ receiver_clock_.NowTicks(),
+ FRAME_PLAYOUT,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ frame_id,
+ delay);
+
+ AdvanceClocks(base::TimeDelta::FromMicroseconds(37890));
+ rtp_timestamp += 90;
+ frame_id++;
+ }
+
+ StatsEventSubscriber::StatsMap stats_map;
+ subscriber_->GetStatsInternal(&stats_map);
+
+ StatsEventSubscriber::StatsMap::iterator it =
+ stats_map.find(StatsEventSubscriber::AVG_PLAYOUT_DELAY_MS);
+ ASSERT_NE(it, stats_map.end());
+
+ EXPECT_DOUBLE_EQ(
+ it->second, static_cast<double>(total_delay_ms) / num_frames);
+}
+
+TEST_F(StatsEventSubscriberTest, E2ELatency) {
+ Init(VIDEO_EVENT);
+
+ uint32 rtp_timestamp = 0;
+ uint32 frame_id = 0;
+ int num_frames = 10;
+ base::TimeDelta total_latency;
+ for (int i = 0; i < num_frames; i++) {
+ cast_environment_->Logging()->InsertFrameEvent(sender_clock_->NowTicks(),
+ FRAME_CAPTURE_BEGIN,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ frame_id);
+
+ int latency_micros = 100000 + base::RandInt(-5000, 50000);
+ base::TimeDelta latency = base::TimeDelta::FromMicroseconds(latency_micros);
+ AdvanceClocks(latency);
+
+ int delay_micros = base::RandInt(-50000, 50000);
+ base::TimeDelta delay = base::TimeDelta::FromMilliseconds(delay_micros);
+ total_latency += latency + delay;
+
+ cast_environment_->Logging()->InsertFrameEventWithDelay(
+ receiver_clock_.NowTicks(),
+ FRAME_PLAYOUT,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ frame_id,
+ delay);
+
+ rtp_timestamp += 90;
+ frame_id++;
+ }
+
+ StatsEventSubscriber::StatsMap stats_map;
+ subscriber_->GetStatsInternal(&stats_map);
+
+ StatsEventSubscriber::StatsMap::iterator it =
+ stats_map.find(StatsEventSubscriber::AVG_E2E_LATENCY_MS);
+ ASSERT_NE(it, stats_map.end());
+
+ EXPECT_DOUBLE_EQ(
+ it->second, total_latency.InMillisecondsF() / num_frames);
+}
+
+TEST_F(StatsEventSubscriberTest, Packets) {
+ Init(VIDEO_EVENT);
+
+ uint32 rtp_timestamp = 0;
+ int num_packets = 10;
+ int num_latency_recorded_packets = 0;
+ base::TimeTicks start_time = sender_clock_->NowTicks();
+ int total_size = 0;
+ int retransmit_total_size = 0;
+ base::TimeDelta total_latency;
+ int num_packets_sent = 0;
+ int num_packets_retransmitted = 0;
+ // Every 2nd packet will be retransmitted once.
+ // Every 4th packet will be retransmitted twice.
+ // Every 8th packet will be retransmitted 3 times.
+ for (int i = 0; i < num_packets; i++) {
+ int size = 1000 + base::RandInt(-100, 100);
+ total_size += size;
+
+ cast_environment_->Logging()->InsertPacketEvent(sender_clock_->NowTicks(),
+ PACKET_SENT_TO_NETWORK,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ 0,
+ i,
+ num_packets - 1,
+ size);
+ num_packets_sent++;
+
+ int latency_micros = 20000 + base::RandInt(-10000, 10000);
+ base::TimeDelta latency = base::TimeDelta::FromMicroseconds(latency_micros);
+ // Latency is only recorded for packets that aren't retransmitted.
+ if (i % 2 != 0) {
+ total_latency += latency;
+ num_latency_recorded_packets++;
+ }
+
+ AdvanceClocks(latency);
+
+ base::TimeTicks received_time = receiver_clock_.NowTicks();
+
+ // Retransmission 1.
+ AdvanceClocks(base::TimeDelta::FromMicroseconds(12345));
+ if (i % 2 == 0) {
+ cast_environment_->Logging()->InsertPacketEvent(
+ receiver_clock_.NowTicks(),
+ PACKET_RETRANSMITTED,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ 0,
+ i,
+ num_packets - 1,
+ size);
+ retransmit_total_size += size;
+ num_packets_sent++;
+ num_packets_retransmitted++;
+ }
+
+ // Retransmission 2.
+ AdvanceClocks(base::TimeDelta::FromMicroseconds(13456));
+ if (i % 4 == 0) {
+ cast_environment_->Logging()->InsertPacketEvent(
+ receiver_clock_.NowTicks(),
+ PACKET_RETRANSMITTED,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ 0,
+ i,
+ num_packets - 1,
+ size);
+ retransmit_total_size += size;
+ num_packets_sent++;
+ num_packets_retransmitted++;
+ }
+
+ // Retransmission 3.
+ AdvanceClocks(base::TimeDelta::FromMicroseconds(14567));
+ if (i % 8 == 0) {
+ cast_environment_->Logging()->InsertPacketEvent(
+ receiver_clock_.NowTicks(),
+ PACKET_RETRANSMITTED,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ 0,
+ i,
+ num_packets - 1,
+ size);
+ retransmit_total_size += size;
+ num_packets_sent++;
+ num_packets_retransmitted++;
+ }
+
+ cast_environment_->Logging()->InsertPacketEvent(received_time,
+ PACKET_RECEIVED,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ 0,
+ i,
+ num_packets - 1,
+ size);
+ }
+
+ base::TimeTicks end_time = sender_clock_->NowTicks();
+ base::TimeDelta duration = end_time - start_time;
+
+ StatsEventSubscriber::StatsMap stats_map;
+ subscriber_->GetStatsInternal(&stats_map);
+
+ // Measure AVG_NETWORK_LATENCY_MS, TRANSMISSION_KBPS, RETRANSMISSION_KBPS,
+ // and PACKET_LOSS_FRACTION.
+ StatsEventSubscriber::StatsMap::iterator it =
+ stats_map.find(StatsEventSubscriber::AVG_NETWORK_LATENCY_MS);
+ ASSERT_NE(it, stats_map.end());
+
+ EXPECT_DOUBLE_EQ(
+ it->second,
+ total_latency.InMillisecondsF() / num_latency_recorded_packets);
+
+ it = stats_map.find(StatsEventSubscriber::TRANSMISSION_KBPS);
+ ASSERT_NE(it, stats_map.end());
+
+ EXPECT_DOUBLE_EQ(it->second,
+ static_cast<double>(total_size) / duration.InMillisecondsF() * 8);
+
+ it = stats_map.find(StatsEventSubscriber::RETRANSMISSION_KBPS);
+ ASSERT_NE(it, stats_map.end());
+
+ EXPECT_DOUBLE_EQ(it->second,
+ static_cast<double>(retransmit_total_size) /
+ duration.InMillisecondsF() * 8);
+
+ it = stats_map.find(StatsEventSubscriber::PACKET_LOSS_FRACTION);
+ ASSERT_NE(it, stats_map.end());
+
+ EXPECT_DOUBLE_EQ(
+ it->second,
+ static_cast<double>(num_packets_retransmitted) / num_packets_sent);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/net/cast_net_defines.h b/chromium/media/cast/net/cast_net_defines.h
deleted file mode 100644
index a9f1629a91a..00000000000
--- a/chromium/media/cast/net/cast_net_defines.h
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_NET_CAST_NET_DEFINES_H_
-#define MEDIA_CAST_NET_CAST_NET_DEFINES_H_
-
-#include "base/basictypes.h"
-
-namespace media {
-namespace cast {
-
-class FrameIdWrapHelper {
- public:
- FrameIdWrapHelper()
- : first_(true),
- frame_id_wrap_count_(0),
- range_(kLowRange) {}
-
- uint32 MapTo32bitsFrameId(const uint8 over_the_wire_frame_id) {
- if (first_) {
- first_ = false;
- if (over_the_wire_frame_id == 0xff) {
- // Special case for startup.
- return kStartFrameId;
- }
- }
-
- uint32 wrap_count = frame_id_wrap_count_;
- switch (range_) {
- case kLowRange:
- if (over_the_wire_frame_id > kLowRangeThreshold &&
- over_the_wire_frame_id < kHighRangeThreshold) {
- range_ = kMiddleRange;
- }
- if (over_the_wire_frame_id > kHighRangeThreshold) {
- // Wrap count was incremented in High->Low transition, but this frame
- // is 'old', actually from before the wrap count got incremented.
- --wrap_count;
- }
- break;
- case kMiddleRange:
- if (over_the_wire_frame_id > kHighRangeThreshold) {
- range_ = kHighRange;
- }
- break;
- case kHighRange:
- if (over_the_wire_frame_id < kLowRangeThreshold) {
- // Wrap-around detected.
- range_ = kLowRange;
- ++frame_id_wrap_count_;
- // Frame triggering wrap-around so wrap count should be incremented as
- // as well to match |frame_id_wrap_count_|.
- ++wrap_count;
- }
- break;
- }
- return (wrap_count << 8) + over_the_wire_frame_id;
- }
-
- private:
- enum Range {
- kLowRange,
- kMiddleRange,
- kHighRange,
- };
-
- static const uint8 kLowRangeThreshold = 0x0f;
- static const uint8 kHighRangeThreshold = 0xf0;
- static const uint32 kStartFrameId = GG_UINT32_C(0xffffffff);
-
- bool first_;
- uint32 frame_id_wrap_count_;
- Range range_;
-};
-
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_NET_CAST_NET_DEFINES_H_
diff --git a/chromium/media/cast/net/pacing/mock_paced_packet_sender.h b/chromium/media/cast/net/pacing/mock_paced_packet_sender.h
deleted file mode 100644
index 9933516f14c..00000000000
--- a/chromium/media/cast/net/pacing/mock_paced_packet_sender.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_NET_PACING_MOCK_PACED_PACKET_SENDER_H_
-#define MEDIA_CAST_NET_PACING_MOCK_PACED_PACKET_SENDER_H_
-
-#include "media/cast/net/pacing/paced_sender.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-namespace cast {
-
-class MockPacedPacketSender : public PacedPacketSender {
- public:
- MockPacedPacketSender();
- virtual ~MockPacedPacketSender();
-
- MOCK_METHOD1(SendPackets, bool(const PacketList& packets));
- MOCK_METHOD1(ResendPackets, bool(const PacketList& packets));
- MOCK_METHOD1(SendRtcpPacket, bool(const Packet& packet));
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_NET_PACING_MOCK_PACED_PACKET_SENDER_H_
diff --git a/chromium/media/cast/net/pacing/paced_sender.cc b/chromium/media/cast/net/pacing/paced_sender.cc
deleted file mode 100644
index 8a07380df0d..00000000000
--- a/chromium/media/cast/net/pacing/paced_sender.cc
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/net/pacing/paced_sender.h"
-
-#include "base/bind.h"
-#include "base/message_loop/message_loop.h"
-
-namespace media {
-namespace cast {
-
-static const int64 kPacingIntervalMs = 10;
-// Each frame will be split into no more than kPacingMaxBurstsPerFrame
-// bursts of packets.
-static const size_t kPacingMaxBurstsPerFrame = 3;
-
-PacedSender::PacedSender(scoped_refptr<CastEnvironment> cast_environment,
- PacketSender* transport)
- : cast_environment_(cast_environment),
- burst_size_(1),
- packets_sent_in_burst_(0),
- transport_(transport),
- weak_factory_(this) {
- ScheduleNextSend();
-}
-
-PacedSender::~PacedSender() {}
-
-bool PacedSender::SendPackets(const PacketList& packets) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- cast_environment_->Logging()->InsertPacketListEvent(kPacketSentToPacer,
- packets);
- return SendPacketsToTransport(packets, &packet_list_);
-}
-
-bool PacedSender::ResendPackets(const PacketList& packets) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- cast_environment_->Logging()->InsertPacketListEvent(kPacketRetransmited,
- packets);
- return SendPacketsToTransport(packets, &resend_packet_list_);
-}
-
-bool PacedSender::SendPacketsToTransport(const PacketList& packets,
- PacketList* packets_not_sent) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- UpdateBurstSize(packets.size());
-
- if (!packets_not_sent->empty()) {
- packets_not_sent->insert(packets_not_sent->end(),
- packets.begin(), packets.end());
- return true;
- }
- PacketList packets_to_send;
- PacketList::const_iterator first_to_store_it = packets.begin();
-
- size_t max_packets_to_send_now = burst_size_ - packets_sent_in_burst_;
- if (max_packets_to_send_now > 0) {
- size_t packets_to_send_now = std::min(max_packets_to_send_now,
- packets.size());
-
- std::advance(first_to_store_it, packets_to_send_now);
- packets_to_send.insert(packets_to_send.begin(),
- packets.begin(), first_to_store_it);
- }
- packets_not_sent->insert(packets_not_sent->end(),
- first_to_store_it, packets.end());
- packets_sent_in_burst_ += packets_to_send.size();
- if (packets_to_send.empty()) return true;
-
- cast_environment_->Logging()->InsertPacketListEvent(kPacketSentToNetwork,
- packets);
- return transport_->SendPackets(packets_to_send);
-}
-
-bool PacedSender::SendRtcpPacket(const Packet& packet) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- // We pass the RTCP packets straight through.
- return transport_->SendPacket(packet);
-}
-
-void PacedSender::ScheduleNextSend() {
- base::TimeDelta time_to_next = time_last_process_ -
- cast_environment_->Clock()->NowTicks() +
- base::TimeDelta::FromMilliseconds(kPacingIntervalMs);
-
- time_to_next = std::max(time_to_next, base::TimeDelta());
-
- cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&PacedSender::SendNextPacketBurst, weak_factory_.GetWeakPtr()),
- time_to_next);
-}
-
-void PacedSender::SendNextPacketBurst() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- SendStoredPackets();
- time_last_process_ = cast_environment_->Clock()->NowTicks();
- ScheduleNextSend();
-}
-
-void PacedSender::SendStoredPackets() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- if (packet_list_.empty() && resend_packet_list_.empty()) return;
-
- size_t packets_to_send = burst_size_;
- PacketList packets_to_resend;
-
- // Send our re-send packets first.
- if (!resend_packet_list_.empty()) {
- PacketList::iterator it = resend_packet_list_.begin();
- size_t packets_to_send_now = std::min(packets_to_send,
- resend_packet_list_.size());
- std::advance(it, packets_to_send_now);
- packets_to_resend.insert(packets_to_resend.begin(),
- resend_packet_list_.begin(), it);
- resend_packet_list_.erase(resend_packet_list_.begin(), it);
- packets_to_send -= packets_to_resend.size();
- }
- if (!packet_list_.empty() && packets_to_send > 0) {
- PacketList::iterator it = packet_list_.begin();
- size_t packets_to_send_now = std::min(packets_to_send,
- packet_list_.size());
-
- std::advance(it, packets_to_send_now);
- packets_to_resend.insert(packets_to_resend.end(),
- packet_list_.begin(), it);
- packet_list_.erase(packet_list_.begin(), it);
-
- if (packet_list_.empty()) {
- burst_size_ = 1; // Reset burst size after we sent the last stored packet
- packets_sent_in_burst_ = 0;
- }
- }
- transport_->SendPackets(packets_to_resend);
-}
-
-void PacedSender::UpdateBurstSize(size_t packets_to_send) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- packets_to_send = std::max(packets_to_send,
- resend_packet_list_.size() + packet_list_.size());
-
- packets_to_send += (kPacingMaxBurstsPerFrame - 1); // Round up.
- burst_size_ = std::max(packets_to_send / kPacingMaxBurstsPerFrame,
- burst_size_);
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/net/pacing/paced_sender.gyp b/chromium/media/cast/net/pacing/paced_sender.gyp
deleted file mode 100644
index 1947dd4ec40..00000000000
--- a/chromium/media/cast/net/pacing/paced_sender.gyp
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'targets': [
- {
- 'target_name': 'cast_paced_sender',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- ],
- 'sources': [
- 'paced_sender.h',
- 'paced_sender.cc',
- ],
- 'dependencies': [
- '<(DEPTH)/base/base.gyp:base',
- ],
- },
- ], # targets
-}
diff --git a/chromium/media/cast/net/pacing/paced_sender.h b/chromium/media/cast/net/pacing/paced_sender.h
deleted file mode 100644
index 89283257134..00000000000
--- a/chromium/media/cast/net/pacing/paced_sender.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_NET_PACING_PACED_SENDER_H_
-#define MEDIA_CAST_NET_PACING_PACED_SENDER_H_
-
-#include <list>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "base/threading/non_thread_safe.h"
-#include "base/time/default_tick_clock.h"
-#include "base/time/tick_clock.h"
-#include "base/time/time.h"
-#include "media/cast/cast_config.h"
-#include "media/cast/cast_environment.h"
-
-namespace media {
-namespace cast {
-
-// We have this pure virtual class to enable mocking.
-class PacedPacketSender {
- public:
- // Inform the pacer / sender of the total number of packets.
- virtual bool SendPackets(const PacketList& packets) = 0;
-
- virtual bool ResendPackets(const PacketList& packets) = 0;
-
- virtual bool SendRtcpPacket(const Packet& packet) = 0;
-
- virtual ~PacedPacketSender() {}
-};
-
-class PacedSender : public PacedPacketSender,
- public base::NonThreadSafe,
- public base::SupportsWeakPtr<PacedSender> {
- public:
- PacedSender(scoped_refptr<CastEnvironment> cast_environment,
- PacketSender* transport);
- virtual ~PacedSender();
-
- virtual bool SendPackets(const PacketList& packets) OVERRIDE;
-
- virtual bool ResendPackets(const PacketList& packets) OVERRIDE;
-
- virtual bool SendRtcpPacket(const Packet& packet) OVERRIDE;
-
- protected:
- // Schedule a delayed task on the main cast thread when it's time to send the
- // next packet burst.
- void ScheduleNextSend();
-
- // Process any pending packets in the queue(s).
- void SendNextPacketBurst();
-
- private:
- bool SendPacketsToTransport(const PacketList& packets,
- PacketList* packets_not_sent);
- void SendStoredPackets();
- void UpdateBurstSize(size_t num_of_packets);
-
- scoped_refptr<CastEnvironment> cast_environment_;
- size_t burst_size_;
- size_t packets_sent_in_burst_;
- base::TimeTicks time_last_process_;
- // Note: We can't combine the |packet_list_| and the |resend_packet_list_|
- // since then we might get reordering of the retransmitted packets.
- PacketList packet_list_;
- PacketList resend_packet_list_;
- PacketSender* transport_;
-
- base::WeakPtrFactory<PacedSender> weak_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(PacedSender);
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_NET_PACING_PACED_SENDER_H_
diff --git a/chromium/media/cast/net/pacing/paced_sender_unittest.cc b/chromium/media/cast/net/pacing/paced_sender_unittest.cc
deleted file mode 100644
index 15b81362f69..00000000000
--- a/chromium/media/cast/net/pacing/paced_sender_unittest.cc
+++ /dev/null
@@ -1,257 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/test/simple_test_tick_clock.h"
-#include "media/cast/net/pacing/paced_sender.h"
-#include "media/cast/test/fake_task_runner.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-namespace cast {
-
-using testing::_;
-
-static const uint8 kValue = 123;
-static const size_t kSize1 = 100;
-static const size_t kSize2 = 101;
-static const size_t kSize3 = 102;
-static const size_t kSize4 = 103;
-static const size_t kNackSize = 104;
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
-
-class TestPacketSender : public PacketSender {
- public:
- virtual bool SendPackets(const PacketList& packets) OVERRIDE {
- PacketList::const_iterator it = packets.begin();
- for (; it != packets.end(); ++it) {
- EXPECT_FALSE(expected_packet_size_.empty());
- size_t expected_packet_size = expected_packet_size_.front();
- expected_packet_size_.pop_front();
- EXPECT_EQ(expected_packet_size, it->size());
- }
- return true;
- }
-
- virtual bool SendPacket(const Packet& packet) OVERRIDE {
- return true;
- }
-
- void AddExpectedSize(int expected_packet_size, int repeat_count) {
- for (int i = 0; i < repeat_count; ++i) {
- expected_packet_size_.push_back(expected_packet_size);
- }
- }
-
- private:
- std::list<int> expected_packet_size_;
-};
-
-class PacedSenderTest : public ::testing::Test {
- protected:
- PacedSenderTest() {
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kStartMillisecond));
- }
-
- virtual ~PacedSenderTest() {}
-
- virtual void SetUp() {
- task_runner_ = new test::FakeTaskRunner(&testing_clock_);
- cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
- task_runner_, task_runner_, task_runner_, task_runner_,
- GetDefaultCastLoggingConfig());
- paced_sender_.reset(new PacedSender(cast_environment_, &mock_transport_));
- }
-
- PacketList CreatePacketList(size_t packet_size, int num_of_packets_in_frame) {
- PacketList packets;
- for (int i = 0; i < num_of_packets_in_frame; ++i) {
- packets.push_back(Packet(packet_size, kValue));
- }
- return packets;
- }
-
- base::SimpleTestTickClock testing_clock_;
- TestPacketSender mock_transport_;
- scoped_refptr<test::FakeTaskRunner> task_runner_;
- scoped_ptr<PacedSender> paced_sender_;
- scoped_refptr<CastEnvironment> cast_environment_;
-};
-
-TEST_F(PacedSenderTest, PassThroughRtcp) {
- mock_transport_.AddExpectedSize(kSize1, 1);
- PacketList packets = CreatePacketList(kSize1, 1);
-
- EXPECT_TRUE(paced_sender_->SendPackets(packets));
- EXPECT_TRUE(paced_sender_->ResendPackets(packets));
-
- mock_transport_.AddExpectedSize(kSize2, 1);
- EXPECT_TRUE(paced_sender_->SendRtcpPacket(Packet(kSize2, kValue)));
-}
-
-TEST_F(PacedSenderTest, BasicPace) {
- int num_of_packets = 9;
- PacketList packets = CreatePacketList(kSize1, num_of_packets);
-
- mock_transport_.AddExpectedSize(kSize1, 3);
- EXPECT_TRUE(paced_sender_->SendPackets(packets));
-
- // Check that we get the next burst.
- mock_transport_.AddExpectedSize(kSize1, 3);
-
- base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(10);
- testing_clock_.Advance(timeout);
- task_runner_->RunTasks();
-
- // If we call process too early make sure we don't send any packets.
- timeout = base::TimeDelta::FromMilliseconds(5);
- testing_clock_.Advance(timeout);
- task_runner_->RunTasks();
-
- // Check that we get the next burst.
- mock_transport_.AddExpectedSize(kSize1, 3);
- testing_clock_.Advance(timeout);
- task_runner_->RunTasks();
-
- // Check that we don't get any more packets.
- testing_clock_.Advance(timeout);
- task_runner_->RunTasks();
-}
-
-TEST_F(PacedSenderTest, PaceWithNack) {
- // Testing what happen when we get multiple NACK requests for a fully lost
- // frames just as we sent the first packets in a frame.
- int num_of_packets_in_frame = 9;
- int num_of_packets_in_nack = 9;
-
- PacketList first_frame_packets =
- CreatePacketList(kSize1, num_of_packets_in_frame);
-
- PacketList second_frame_packets =
- CreatePacketList(kSize2, num_of_packets_in_frame);
-
- PacketList nack_packets =
- CreatePacketList(kNackSize, num_of_packets_in_nack);
-
- // Check that the first burst of the frame go out on the wire.
- mock_transport_.AddExpectedSize(kSize1, 3);
- EXPECT_TRUE(paced_sender_->SendPackets(first_frame_packets));
-
- // Add first NACK request.
- EXPECT_TRUE(paced_sender_->ResendPackets(nack_packets));
-
- // Check that we get the first NACK burst.
- mock_transport_.AddExpectedSize(kNackSize, 5);
- base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(10);
- testing_clock_.Advance(timeout);
- task_runner_->RunTasks();
-
- // Add second NACK request.
- EXPECT_TRUE(paced_sender_->ResendPackets(nack_packets));
-
- // Check that we get the next NACK burst.
- mock_transport_.AddExpectedSize(kNackSize, 7);
- testing_clock_.Advance(timeout);
- task_runner_->RunTasks();
-
- // End of NACK plus a packet from the oldest frame.
- mock_transport_.AddExpectedSize(kNackSize, 6);
- mock_transport_.AddExpectedSize(kSize1, 1);
- testing_clock_.Advance(timeout);
- task_runner_->RunTasks();
-
- // Add second frame.
- // Make sure we don't delay the second frame due to the previous packets.
- EXPECT_TRUE(paced_sender_->SendPackets(second_frame_packets));
-
- // Last packets of frame 1 and the first packets of frame 2.
- mock_transport_.AddExpectedSize(kSize1, 5);
- mock_transport_.AddExpectedSize(kSize2, 2);
- testing_clock_.Advance(timeout);
- task_runner_->RunTasks();
-
- // Last packets of frame 2.
- mock_transport_.AddExpectedSize(kSize2, 7);
- testing_clock_.Advance(timeout);
- task_runner_->RunTasks();
-
- // No more packets.
- testing_clock_.Advance(timeout);
- task_runner_->RunTasks();
-}
-
-TEST_F(PacedSenderTest, PaceWith60fps) {
- // Testing what happen when we get multiple NACK requests for a fully lost
- // frames just as we sent the first packets in a frame.
- int num_of_packets_in_frame = 9;
-
- PacketList first_frame_packets =
- CreatePacketList(kSize1, num_of_packets_in_frame);
-
- PacketList second_frame_packets =
- CreatePacketList(kSize2, num_of_packets_in_frame);
-
- PacketList third_frame_packets =
- CreatePacketList(kSize3, num_of_packets_in_frame);
-
- PacketList fourth_frame_packets =
- CreatePacketList(kSize4, num_of_packets_in_frame);
-
- base::TimeDelta timeout_10ms = base::TimeDelta::FromMilliseconds(10);
-
- // Check that the first burst of the frame go out on the wire.
- mock_transport_.AddExpectedSize(kSize1, 3);
- EXPECT_TRUE(paced_sender_->SendPackets(first_frame_packets));
-
- mock_transport_.AddExpectedSize(kSize1, 3);
- testing_clock_.Advance(timeout_10ms);
- task_runner_->RunTasks();
-
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(6));
-
- // Add second frame, after 16 ms.
- EXPECT_TRUE(paced_sender_->SendPackets(second_frame_packets));
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(4));
-
- mock_transport_.AddExpectedSize(kSize1, 3);
- mock_transport_.AddExpectedSize(kSize2, 1);
- testing_clock_.Advance(timeout_10ms);
- task_runner_->RunTasks();
-
- mock_transport_.AddExpectedSize(kSize2, 4);
- testing_clock_.Advance(timeout_10ms);
- task_runner_->RunTasks();
-
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(3));
-
- // Add third frame, after 33 ms.
- EXPECT_TRUE(paced_sender_->SendPackets(third_frame_packets));
- mock_transport_.AddExpectedSize(kSize2, 4);
- mock_transport_.AddExpectedSize(kSize3, 1);
-
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(7));
- task_runner_->RunTasks();
-
- // Add fourth frame, after 50 ms.
- EXPECT_TRUE(paced_sender_->SendPackets(fourth_frame_packets));
-
- mock_transport_.AddExpectedSize(kSize3, 6);
- testing_clock_.Advance(timeout_10ms);
- task_runner_->RunTasks();
-
- mock_transport_.AddExpectedSize(kSize3, 2);
- mock_transport_.AddExpectedSize(kSize4, 4);
- testing_clock_.Advance(timeout_10ms);
- task_runner_->RunTasks();
-
- mock_transport_.AddExpectedSize(kSize4, 5);
- testing_clock_.Advance(timeout_10ms);
- task_runner_->RunTasks();
-
- testing_clock_.Advance(timeout_10ms);
- task_runner_->RunTasks();
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/net/rtp_sender/mock_rtp_sender.h b/chromium/media/cast/net/rtp_sender/mock_rtp_sender.h
deleted file mode 100644
index 2c3f19f2ae9..00000000000
--- a/chromium/media/cast/net/rtp_sender/mock_rtp_sender.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_RTP_SENDER_MOCK_RTP_SENDER_H_
-#define MEDIA_CAST_RTP_SENDER_MOCK_RTP_SENDER_H_
-
-#include <vector>
-
-#include "media/cast/net/rtp_sender/rtp_sender.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-namespace cast {
-
-class MockRtpSender : public RtpSender {
- public:
- MOCK_METHOD2(IncomingEncodedVideoFrame,
- bool(const EncodedVideoFrame& frame, int64 capture_time));
-
- MOCK_METHOD2(IncomingEncodedAudioFrame,
- bool(const EncodedAudioFrame& frame, int64 recorded_time));
-
- MOCK_METHOD3(ResendPacket,
- bool(bool is_audio, uint32 frame_id, uint16 packet_id));
-
- MOCK_METHOD0(RtpStatistics, void());
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_RTP_SENDER_MOCK_RTP_SENDER_H_
-
diff --git a/chromium/media/cast/net/rtp_sender/packet_storage/packet_storage.cc b/chromium/media/cast/net/rtp_sender/packet_storage/packet_storage.cc
deleted file mode 100644
index 3bd8f900665..00000000000
--- a/chromium/media/cast/net/rtp_sender/packet_storage/packet_storage.cc
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/net/rtp_sender/packet_storage/packet_storage.h"
-
-#include <string>
-
-#include "base/logging.h"
-#include "media/cast/cast_defines.h"
-
-namespace media {
-namespace cast {
-
-// Limit the max time delay to avoid frame id wrap around; 256 / 60 fps.
-const int kMaxAllowedTimeStoredMs = 4000;
-
-typedef PacketMap::iterator PacketMapIterator;
-typedef TimeToPacketMap::iterator TimeToPacketIterator;
-
-class StoredPacket {
- public:
- StoredPacket() {
- packet_.reserve(kIpPacketSize);
- }
-
- void Save(const Packet* packet) {
- DCHECK_LT(packet->size(), kIpPacketSize) << "Invalid argument";
- packet_.clear();
- packet_.insert(packet_.begin(), packet->begin(), packet->end());
- }
-
- void GetCopy(PacketList* packets) {
- packets->push_back(Packet(packet_.begin(), packet_.end()));
- }
-
- private:
- Packet packet_;
-};
-
-PacketStorage::PacketStorage(base::TickClock* clock,
- int max_time_stored_ms)
- : clock_(clock) {
- max_time_stored_ = base::TimeDelta::FromMilliseconds(max_time_stored_ms);
- DCHECK_LE(max_time_stored_ms, kMaxAllowedTimeStoredMs) << "Invalid argument";
-}
-
-PacketStorage::~PacketStorage() {
- time_to_packet_map_.clear();
-
- PacketMapIterator store_it = stored_packets_.begin();
- for (; store_it != stored_packets_.end();
- store_it = stored_packets_.begin()) {
- stored_packets_.erase(store_it);
- }
- while (!free_packets_.empty()) {
- free_packets_.pop_front();
- }
-}
-
-void PacketStorage::CleanupOldPackets(base::TimeTicks now) {
- TimeToPacketIterator time_it = time_to_packet_map_.begin();
-
- // Check max size.
- while (time_to_packet_map_.size() >= kMaxStoredPackets) {
- PacketMapIterator store_it = stored_packets_.find(time_it->second);
-
- // We should always find the packet.
- DCHECK(store_it != stored_packets_.end()) << "Invalid state";
- time_to_packet_map_.erase(time_it);
- // Save the pointer.
- linked_ptr<StoredPacket> storted_packet = store_it->second;
- stored_packets_.erase(store_it);
- // Add this packet to the free list for later re-use.
- free_packets_.push_back(storted_packet);
- time_it = time_to_packet_map_.begin();
- }
-
- // Time out old packets.
- while (time_it != time_to_packet_map_.end()) {
- if (now < time_it->first + max_time_stored_) {
- break;
- }
- // Packet too old.
- PacketMapIterator store_it = stored_packets_.find(time_it->second);
-
- // We should always find the packet.
- DCHECK(store_it != stored_packets_.end()) << "Invalid state";
- time_to_packet_map_.erase(time_it);
- // Save the pointer.
- linked_ptr<StoredPacket> storted_packet = store_it->second;
- stored_packets_.erase(store_it);
- // Add this packet to the free list for later re-use.
- free_packets_.push_back(storted_packet);
- time_it = time_to_packet_map_.begin();
- }
-}
-
-void PacketStorage::StorePacket(uint32 frame_id, uint16 packet_id,
- const Packet* packet) {
- base::TimeTicks now = clock_->NowTicks();
- CleanupOldPackets(now);
-
- // Internally we only use the 8 LSB of the frame id.
- uint32 index = ((0xff & frame_id) << 16) + packet_id;
- PacketMapIterator it = stored_packets_.find(index);
- if (it != stored_packets_.end()) {
- // We have already saved this.
- DCHECK(false) << "Invalid state";
- return;
- }
- linked_ptr<StoredPacket> stored_packet;
- if (free_packets_.empty()) {
- // No previous allocated packets allocate one.
- stored_packet.reset(new StoredPacket());
- } else {
- // Re-use previous allocated packet.
- stored_packet = free_packets_.front();
- free_packets_.pop_front();
- }
- stored_packet->Save(packet);
- stored_packets_[index] = stored_packet;
- time_to_packet_map_.insert(std::make_pair(now, index));
-}
-
-PacketList PacketStorage::GetPackets(
- const MissingFramesAndPacketsMap& missing_frames_and_packets) {
- PacketList packets_to_resend;
-
- // Iterate over all frames in the list.
- for (MissingFramesAndPacketsMap::const_iterator it =
- missing_frames_and_packets.begin();
- it != missing_frames_and_packets.end(); ++it) {
- uint8 frame_id = it->first;
- const PacketIdSet& packets_set = it->second;
- bool success = false;
-
- if (packets_set.empty()) {
- VLOG(1) << "Missing all packets in frame " << static_cast<int>(frame_id);
-
- uint16 packet_id = 0;
- do {
- // Get packet from storage.
- success = GetPacket(frame_id, packet_id, &packets_to_resend);
- ++packet_id;
- } while (success);
- } else {
- // Iterate over all of the packets in the frame.
- for (PacketIdSet::const_iterator set_it = packets_set.begin();
- set_it != packets_set.end(); ++set_it) {
- GetPacket(frame_id, *set_it, &packets_to_resend);
- }
- }
- }
- return packets_to_resend;
-}
-
-bool PacketStorage::GetPacket(uint8 frame_id,
- uint16 packet_id,
- PacketList* packets) {
- // Internally we only use the 8 LSB of the frame id.
- uint32 index = (static_cast<uint32>(frame_id) << 16) + packet_id;
- PacketMapIterator it = stored_packets_.find(index);
- if (it == stored_packets_.end()) {
- return false;
- }
- it->second->GetCopy(packets);
- VLOG(1) << "Resend " << static_cast<int>(frame_id)
- << ":" << packet_id;
- return true;
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/net/rtp_sender/packet_storage/packet_storage.gyp b/chromium/media/cast/net/rtp_sender/packet_storage/packet_storage.gyp
deleted file mode 100644
index f691d9e9b69..00000000000
--- a/chromium/media/cast/net/rtp_sender/packet_storage/packet_storage.gyp
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'targets': [
- {
- 'target_name': 'packet_storage',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- ],
- 'sources': [
- 'packet_storage.h',
- 'packet_storage.cc',
- ], # source
- 'dependencies': [
- '<(DEPTH)/base/base.gyp:base',
- ],
- },
- ],
-}
-
diff --git a/chromium/media/cast/net/rtp_sender/packet_storage/packet_storage.h b/chromium/media/cast/net/rtp_sender/packet_storage/packet_storage.h
deleted file mode 100644
index 34933ef5f6d..00000000000
--- a/chromium/media/cast/net/rtp_sender/packet_storage/packet_storage.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_NET_RTP_SENDER_PACKET_STORAGE_PACKET_STORAGE_H_
-#define MEDIA_CAST_NET_RTP_SENDER_PACKET_STORAGE_PACKET_STORAGE_H_
-
-#include <list>
-#include <map>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/memory/linked_ptr.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/time/tick_clock.h"
-#include "base/time/time.h"
-#include "media/cast/cast_config.h"
-
-namespace media {
-namespace cast {
-
-class StoredPacket;
-typedef std::map<uint32, linked_ptr<StoredPacket> > PacketMap;
-typedef std::multimap<base::TimeTicks, uint32> TimeToPacketMap;
-
-class PacketStorage {
- public:
- static const int kMaxStoredPackets = 1000;
-
- PacketStorage(base::TickClock* clock, int max_time_stored_ms);
- virtual ~PacketStorage();
-
- void StorePacket(uint32 frame_id, uint16 packet_id, const Packet* packet);
-
- // Copies all missing packets into the packet list.
- PacketList GetPackets(
- const MissingFramesAndPacketsMap& missing_frames_and_packets);
-
- // Copies packet into the packet list.
- bool GetPacket(uint8 frame_id, uint16 packet_id, PacketList* packets);
-
- private:
- void CleanupOldPackets(base::TimeTicks now);
-
- base::TickClock* const clock_; // Not owned by this class.
- base::TimeDelta max_time_stored_;
- PacketMap stored_packets_;
- TimeToPacketMap time_to_packet_map_;
- std::list<linked_ptr<StoredPacket> > free_packets_;
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_NET_RTP_SENDER_PACKET_STORAGE_PACKET_STORAGE_H_
diff --git a/chromium/media/cast/net/rtp_sender/packet_storage/packet_storage_unittest.cc b/chromium/media/cast/net/rtp_sender/packet_storage/packet_storage_unittest.cc
deleted file mode 100644
index 049d3ae29b6..00000000000
--- a/chromium/media/cast/net/rtp_sender/packet_storage/packet_storage_unittest.cc
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/net/rtp_sender/packet_storage/packet_storage.h"
-
-#include <vector>
-
-#include "base/test/simple_test_tick_clock.h"
-#include "base/time/time.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-namespace cast {
-
-static const int kMaxDeltaStoredMs = 500;
-static const base::TimeDelta kDeltaBetweenFrames =
- base::TimeDelta::FromMilliseconds(33);
-
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
-
-class PacketStorageTest : public ::testing::Test {
- protected:
- PacketStorageTest() : packet_storage_(&testing_clock_, kMaxDeltaStoredMs) {
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kStartMillisecond));
- }
-
- base::SimpleTestTickClock testing_clock_;
- PacketStorage packet_storage_;
-};
-
-TEST_F(PacketStorageTest, TimeOut) {
- Packet test_123(100, 123); // 100 insertions of the value 123.
- PacketList packets;
- for (uint32 frame_id = 0; frame_id < 30; ++frame_id) {
- for (uint16 packet_id = 0; packet_id < 10; ++packet_id) {
- packet_storage_.StorePacket(frame_id, packet_id, &test_123);
- }
- testing_clock_.Advance(kDeltaBetweenFrames);
- }
-
- // All packets belonging to the first 14 frames is expected to be expired.
- for (uint32 frame_id = 0; frame_id < 14; ++frame_id) {
- for (uint16 packet_id = 0; packet_id < 10; ++packet_id) {
- Packet packet;
- EXPECT_FALSE(packet_storage_.GetPacket(frame_id, packet_id, &packets));
- }
- }
- // All packets belonging to the next 15 frames is expected to be valid.
- for (uint32 frame_id = 14; frame_id < 30; ++frame_id) {
- for (uint16 packet_id = 0; packet_id < 10; ++packet_id) {
- EXPECT_TRUE(packet_storage_.GetPacket(frame_id, packet_id, &packets));
- EXPECT_TRUE(packets.front() == test_123);
- }
- }
-}
-
-TEST_F(PacketStorageTest, MaxNumberOfPackets) {
- Packet test_123(100, 123); // 100 insertions of the value 123.
- PacketList packets;
-
- uint32 frame_id = 0;
- for (uint16 packet_id = 0; packet_id <= PacketStorage::kMaxStoredPackets;
- ++packet_id) {
- packet_storage_.StorePacket(frame_id, packet_id, &test_123);
- }
- Packet packet;
- uint16 packet_id = 0;
- EXPECT_FALSE(packet_storage_.GetPacket(frame_id, packet_id, &packets));
-
- ++packet_id;
- for (; packet_id <= PacketStorage::kMaxStoredPackets; ++packet_id) {
- EXPECT_TRUE(packet_storage_.GetPacket(frame_id, packet_id, &packets));
- EXPECT_TRUE(packets.back() == test_123);
- }
-}
-
-TEST_F(PacketStorageTest, PacketContent) {
- Packet test_123(100, 123); // 100 insertions of the value 123.
- Packet test_234(200, 234); // 200 insertions of the value 234.
- PacketList packets;
-
- for (uint32 frame_id = 0; frame_id < 10; ++frame_id) {
- for (uint16 packet_id = 0; packet_id < 10; ++packet_id) {
- // Every other packet.
- if (packet_id % 2 == 0) {
- packet_storage_.StorePacket(frame_id, packet_id, &test_123);
- } else {
- packet_storage_.StorePacket(frame_id, packet_id, &test_234);
- }
- }
- testing_clock_.Advance(kDeltaBetweenFrames);
- }
- for (uint32 frame_id = 0; frame_id < 10; ++frame_id) {
- for (uint16 packet_id = 0; packet_id < 10; ++packet_id) {
- EXPECT_TRUE(packet_storage_.GetPacket(frame_id, packet_id, &packets));
- // Every other packet.
- if (packet_id % 2 == 0) {
- EXPECT_TRUE(packets.back() == test_123);
- } else {
- EXPECT_TRUE(packets.back() == test_234);
- }
- }
- }
-}
-
-} // namespace cast
-} // namespace media
-
diff --git a/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.cc b/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.cc
deleted file mode 100644
index 8a50f8a8aad..00000000000
--- a/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.cc
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.h"
-
-#include "base/logging.h"
-#include "media/cast/cast_defines.h"
-#include "media/cast/net/pacing/paced_sender.h"
-#include "net/base/big_endian.h"
-
-namespace media {
-namespace cast {
-
-static const uint16 kCommonRtpHeaderLength = 12;
-static const uint16 kCastRtpHeaderLength = 7;
-static const uint8 kCastKeyFrameBitMask = 0x80;
-static const uint8 kCastReferenceFrameIdBitMask = 0x40;
-
-RtpPacketizer::RtpPacketizer(PacedPacketSender* transport,
- PacketStorage* packet_storage,
- RtpPacketizerConfig rtp_packetizer_config)
- : config_(rtp_packetizer_config),
- transport_(transport),
- packet_storage_(packet_storage),
- sequence_number_(config_.sequence_number),
- rtp_timestamp_(config_.rtp_timestamp),
- packet_id_(0),
- send_packets_count_(0),
- send_octet_count_(0) {
- DCHECK(transport) << "Invalid argument";
-}
-
-RtpPacketizer::~RtpPacketizer() {}
-
-void RtpPacketizer::IncomingEncodedVideoFrame(
- const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time) {
- DCHECK(!config_.audio) << "Invalid state";
- if (config_.audio) return;
-
- // Timestamp is in 90 KHz for video.
- rtp_timestamp_ = GetVideoRtpTimestamp(capture_time);
- time_last_sent_rtp_timestamp_ = capture_time;
-
- Cast(video_frame->key_frame,
- video_frame->frame_id,
- video_frame->last_referenced_frame_id,
- rtp_timestamp_,
- video_frame->data);
-}
-
-void RtpPacketizer::IncomingEncodedAudioFrame(
- const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time) {
- DCHECK(config_.audio) << "Invalid state";
- if (!config_.audio) return;
-
- rtp_timestamp_ += audio_frame->samples; // Timestamp is in samples for audio.
- time_last_sent_rtp_timestamp_ = recorded_time;
- Cast(true, audio_frame->frame_id, 0, rtp_timestamp_, audio_frame->data);
-}
-
-uint16 RtpPacketizer::NextSequenceNumber() {
- ++sequence_number_;
- return sequence_number_ - 1;
-}
-
-bool RtpPacketizer::LastSentTimestamp(base::TimeTicks* time_sent,
- uint32* rtp_timestamp) const {
- if (time_last_sent_rtp_timestamp_.is_null()) return false;
-
- *time_sent = time_last_sent_rtp_timestamp_;
- *rtp_timestamp = rtp_timestamp_;
- return true;
-}
-
-// TODO(mikhal): Switch to pass data with a const_ref.
-void RtpPacketizer::Cast(bool is_key,
- uint32 frame_id,
- uint32 reference_frame_id,
- uint32 timestamp,
- const std::string& data) {
- uint16 rtp_header_length = kCommonRtpHeaderLength + kCastRtpHeaderLength;
- uint16 max_length = config_.max_payload_length - rtp_header_length - 1;
-
- // Split the payload evenly (round number up).
- size_t num_packets = (data.size() + max_length) / max_length;
- size_t payload_length = (data.size() + num_packets) / num_packets;
- DCHECK_LE(payload_length, max_length) << "Invalid argument";
-
- PacketList packets;
-
- size_t remaining_size = data.size();
- std::string::const_iterator data_iter = data.begin();
- while (remaining_size > 0) {
- Packet packet;
-
- if (remaining_size < payload_length) {
- payload_length = remaining_size;
- }
- remaining_size -= payload_length;
- BuildCommonRTPheader(&packet, remaining_size == 0, timestamp);
-
- // Build Cast header.
- packet.push_back(
- (is_key ? kCastKeyFrameBitMask : 0) | kCastReferenceFrameIdBitMask);
- packet.push_back(frame_id);
- size_t start_size = packet.size();
- packet.resize(start_size + 4);
- net::BigEndianWriter big_endian_writer(&(packet[start_size]), 4);
- big_endian_writer.WriteU16(packet_id_);
- big_endian_writer.WriteU16(static_cast<uint16>(num_packets - 1));
- packet.push_back(static_cast<uint8>(reference_frame_id));
-
- // Copy payload data.
- packet.insert(packet.end(), data_iter, data_iter + payload_length);
-
- // Store packet.
- packet_storage_->StorePacket(frame_id, packet_id_, &packet);
- ++packet_id_;
- data_iter += payload_length;
-
- // Update stats.
- ++send_packets_count_;
- send_octet_count_ += payload_length;
- packets.push_back(packet);
- }
- DCHECK(packet_id_ == num_packets) << "Invalid state";
-
- // Send to network.
- transport_->SendPackets(packets);
-
- // Prepare for next frame.
- packet_id_ = 0;
-}
-
-void RtpPacketizer::BuildCommonRTPheader(
- Packet* packet, bool marker_bit, uint32 time_stamp) {
- packet->push_back(0x80);
- packet->push_back(static_cast<uint8>(config_.payload_type) |
- (marker_bit ? kRtpMarkerBitMask : 0));
- size_t start_size = packet->size();
- packet->resize(start_size + 10);
- net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 10);
- big_endian_writer.WriteU16(sequence_number_);
- big_endian_writer.WriteU32(time_stamp);
- big_endian_writer.WriteU32(config_.ssrc);
- ++sequence_number_;
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.gyp b/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.gyp
deleted file mode 100644
index d75d8a66911..00000000000
--- a/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.gyp
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'targets': [
- {
- 'target_name': 'cast_rtp_packetizer',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- '<(DEPTH)/third_party/',
- '<(DEPTH)/third_party/webrtc/',
- ],
- 'sources': [
- 'rtp_packetizer.cc',
- 'rtp_packetizer.h',
- 'rtp_packetizer_config.cc',
- 'rtp_packetizer_config.h',
- ], # source
- 'dependencies': [
- '<(DEPTH)/base/base.gyp:base',
- '<(DEPTH)/net/net.gyp:net',
- ],
- },
- ],
-}
diff --git a/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.h b/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.h
deleted file mode 100644
index 9f9be5fe163..00000000000
--- a/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_NET_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_H_
-#define MEDIA_CAST_NET_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_H_
-
-#include <cmath>
-#include <list>
-#include <map>
-
-#include "base/time/time.h"
-#include "media/cast/net/rtp_sender/packet_storage/packet_storage.h"
-#include "media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_config.h"
-
-namespace media {
-namespace cast {
-
-class PacedPacketSender;
-
-// This object is only called from the main cast thread.
-// This class break encoded audio and video frames into packets and add an RTP
-// header to each packet.
-class RtpPacketizer {
- public:
- RtpPacketizer(PacedPacketSender* transport,
- PacketStorage* packet_storage,
- RtpPacketizerConfig rtp_packetizer_config);
- ~RtpPacketizer();
-
- // The video_frame objects ownership is handled by the main cast thread.
- void IncomingEncodedVideoFrame(const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time);
-
- // The audio_frame objects ownership is handled by the main cast thread.
- void IncomingEncodedAudioFrame(const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time);
-
- bool LastSentTimestamp(base::TimeTicks* time_sent,
- uint32* rtp_timestamp) const;
-
- // Return the next sequence number, and increment by one. Enables unique
- // incremental sequence numbers for every packet (including retransmissions).
- uint16 NextSequenceNumber();
-
- int send_packets_count() { return send_packets_count_; }
-
- size_t send_octet_count() { return send_octet_count_; }
-
- private:
- void Cast(bool is_key, uint32 frame_id, uint32 reference_frame_id,
- uint32 timestamp, const std::string& data);
-
- void BuildCommonRTPheader(std::vector<uint8>* packet, bool marker_bit,
- uint32 time_stamp);
-
- RtpPacketizerConfig config_;
- PacedPacketSender* transport_;
- PacketStorage* packet_storage_;
-
- base::TimeTicks time_last_sent_rtp_timestamp_;
- uint16 sequence_number_;
- uint32 rtp_timestamp_;
- uint16 packet_id_;
-
- int send_packets_count_;
- size_t send_octet_count_;
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_NET_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_H_
diff --git a/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_config.cc b/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_config.cc
deleted file mode 100644
index 5fe3a92b61b..00000000000
--- a/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_config.cc
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_config.h"
-
-namespace media {
-namespace cast {
-
-RtpPacketizerConfig::RtpPacketizerConfig()
- : ssrc(0),
- max_payload_length(kIpPacketSize - 28), // Default is IP-v4/UDP.
- audio(false),
- frequency(8000),
- payload_type(-1),
- sequence_number(0),
- rtp_timestamp(0) {
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_config.h b/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_config.h
deleted file mode 100644
index 1a2549e66b2..00000000000
--- a/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_config.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CAST_NET_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_CONFIG_H_
-#define CAST_NET_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_CONFIG_H_
-
-#include "media/cast/cast_config.h"
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
-
-namespace media {
-namespace cast {
-
-struct RtpPacketizerConfig {
- RtpPacketizerConfig();
-
- // General.
- bool audio;
- int payload_type;
- uint16 max_payload_length;
- uint16 sequence_number;
- uint32 rtp_timestamp;
- int frequency;
-
- // SSRC.
- unsigned int ssrc;
-
- // Video.
- VideoCodec video_codec;
-
- // Audio.
- uint8 channels;
- AudioCodec audio_codec;
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // CAST_NET_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_CONFIG_H_
diff --git a/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc b/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
deleted file mode 100644
index defdecf7584..00000000000
--- a/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.h"
-
-#include "base/memory/scoped_ptr.h"
-#include "base/test/simple_test_tick_clock.h"
-#include "media/cast/cast_config.h"
-#include "media/cast/net/pacing/paced_sender.h"
-#include "media/cast/net/rtp_sender/packet_storage/packet_storage.h"
-#include "media/cast/net/rtp_sender/rtp_packetizer/test/rtp_header_parser.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-namespace cast {
-
-static const int kPayload = 127;
-static const uint32 kTimestampMs = 10;
-static const uint16 kSeqNum = 33;
-static const int kMaxPacketLength = 1500;
-static const int kSsrc = 0x12345;
-static const unsigned int kFrameSize = 5000;
-static const int kMaxPacketStorageTimeMs = 300;
-
-class TestRtpPacketTransport : public PacedPacketSender {
- public:
- explicit TestRtpPacketTransport(RtpPacketizerConfig config)
- : config_(config),
- sequence_number_(kSeqNum),
- packets_sent_(0),
- expected_number_of_packets_(0),
- expected_packet_id_(0),
- expected_frame_id_(0) {}
-
- void VerifyRtpHeader(const RtpCastTestHeader& rtp_header) {
- VerifyCommonRtpHeader(rtp_header);
- VerifyCastRtpHeader(rtp_header);
- }
-
- void VerifyCommonRtpHeader(const RtpCastTestHeader& rtp_header) {
- EXPECT_EQ(expected_number_of_packets_ == packets_sent_,
- rtp_header.marker);
- EXPECT_EQ(kPayload, rtp_header.payload_type);
- EXPECT_EQ(sequence_number_, rtp_header.sequence_number);
- EXPECT_EQ(kTimestampMs * 90, rtp_header.rtp_timestamp);
- EXPECT_EQ(config_.ssrc, rtp_header.ssrc);
- EXPECT_EQ(0, rtp_header.num_csrcs);
- }
-
- void VerifyCastRtpHeader(const RtpCastTestHeader& rtp_header) {
- EXPECT_FALSE(rtp_header.is_key_frame);
- EXPECT_EQ(expected_frame_id_, rtp_header.frame_id);
- EXPECT_EQ(expected_packet_id_, rtp_header.packet_id);
- EXPECT_EQ(expected_number_of_packets_ - 1, rtp_header.max_packet_id);
- EXPECT_TRUE(rtp_header.is_reference);
- EXPECT_EQ(expected_frame_id_ - 1u, rtp_header.reference_frame_id);
- }
-
- virtual bool SendPackets(const PacketList& packets) OVERRIDE {
- EXPECT_EQ(expected_number_of_packets_, static_cast<int>(packets.size()));
- PacketList::const_iterator it = packets.begin();
- for (; it != packets.end(); ++it) {
- ++packets_sent_;
- RtpHeaderParser parser(it->data(), it->size());
- RtpCastTestHeader rtp_header;
- parser.Parse(&rtp_header);
- VerifyRtpHeader(rtp_header);
- ++sequence_number_;
- ++expected_packet_id_;
- }
- return true;
- }
-
- virtual bool ResendPackets(const PacketList& packets) OVERRIDE {
- EXPECT_TRUE(false);
- return false;
- }
-
- virtual bool SendRtcpPacket(const std::vector<uint8>& packet) OVERRIDE {
- EXPECT_TRUE(false);
- return false;
- }
-
- void SetExpectedNumberOfPackets(int num) {
- expected_number_of_packets_ = num;
- }
-
- RtpPacketizerConfig config_;
- uint32 sequence_number_;
- int packets_sent_;
- int expected_number_of_packets_;
- // Assuming packets arrive in sequence.
- int expected_packet_id_;
- uint32 expected_frame_id_;
-};
-
-class RtpPacketizerTest : public ::testing::Test {
- protected:
- RtpPacketizerTest()
- :video_frame_(),
- packet_storage_(&testing_clock_, kMaxPacketStorageTimeMs) {
- config_.sequence_number = kSeqNum;
- config_.ssrc = kSsrc;
- config_.payload_type = kPayload;
- config_.max_payload_length = kMaxPacketLength;
- transport_.reset(new TestRtpPacketTransport(config_));
- rtp_packetizer_.reset(
- new RtpPacketizer(transport_.get(), &packet_storage_, config_));
- }
-
- virtual ~RtpPacketizerTest() {}
-
- virtual void SetUp() {
- video_frame_.key_frame = false;
- video_frame_.frame_id = 0;
- video_frame_.last_referenced_frame_id = kStartFrameId;
- video_frame_.data.assign(kFrameSize, 123);
- }
-
- base::SimpleTestTickClock testing_clock_;
- scoped_ptr<RtpPacketizer> rtp_packetizer_;
- RtpPacketizerConfig config_;
- scoped_ptr<TestRtpPacketTransport> transport_;
- EncodedVideoFrame video_frame_;
- PacketStorage packet_storage_;
-};
-
-TEST_F(RtpPacketizerTest, SendStandardPackets) {
- int expected_num_of_packets = kFrameSize / kMaxPacketLength + 1;
- transport_->SetExpectedNumberOfPackets(expected_num_of_packets);
-
- base::TimeTicks time;
- time += base::TimeDelta::FromMilliseconds(kTimestampMs);
- rtp_packetizer_->IncomingEncodedVideoFrame(&video_frame_, time);
-}
-
-TEST_F(RtpPacketizerTest, Stats) {
- EXPECT_FALSE(rtp_packetizer_->send_packets_count());
- EXPECT_FALSE(rtp_packetizer_->send_octet_count());
- // Insert packets at varying lengths.
- int expected_num_of_packets = kFrameSize / kMaxPacketLength + 1;
- transport_->SetExpectedNumberOfPackets(expected_num_of_packets);
-
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(kTimestampMs));
- rtp_packetizer_->IncomingEncodedVideoFrame(&video_frame_,
- testing_clock_.NowTicks());
- EXPECT_EQ(expected_num_of_packets, rtp_packetizer_->send_packets_count());
- EXPECT_EQ(kFrameSize, rtp_packetizer_->send_octet_count());
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/net/rtp_sender/rtp_sender.cc b/chromium/media/cast/net/rtp_sender/rtp_sender.cc
deleted file mode 100644
index 2b017bc1784..00000000000
--- a/chromium/media/cast/net/rtp_sender/rtp_sender.cc
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/net/rtp_sender/rtp_sender.h"
-
-#include "base/logging.h"
-#include "base/rand_util.h"
-#include "media/cast/cast_defines.h"
-#include "media/cast/net/pacing/paced_sender.h"
-#include "media/cast/rtcp/rtcp_defines.h"
-#include "net/base/big_endian.h"
-
-namespace media {
-namespace cast {
-
-RtpSender::RtpSender(scoped_refptr<CastEnvironment> cast_environment,
- const AudioSenderConfig* audio_config,
- const VideoSenderConfig* video_config,
- PacedPacketSender* transport)
- : cast_environment_(cast_environment),
- config_(),
- transport_(transport) {
- // Store generic cast config and create packetizer config.
- DCHECK(audio_config || video_config) << "Invalid argument";
- if (audio_config) {
- storage_.reset(new PacketStorage(cast_environment->Clock(),
- audio_config->rtp_history_ms));
- config_.audio = true;
- config_.ssrc = audio_config->sender_ssrc;
- config_.payload_type = audio_config->rtp_payload_type;
- config_.frequency = audio_config->frequency;
- config_.audio_codec = audio_config->codec;
- } else {
- storage_.reset(new PacketStorage(cast_environment->Clock(),
- video_config->rtp_history_ms));
- config_.audio = false;
- config_.ssrc = video_config->sender_ssrc;
- config_.payload_type = video_config->rtp_payload_type;
- config_.frequency = kVideoFrequency;
- config_.video_codec = video_config->codec;
- }
- // Randomly set start values.
- config_.sequence_number = base::RandInt(0, 65535);
- config_.rtp_timestamp = base::RandInt(0, 65535);
- config_.rtp_timestamp += base::RandInt(0, 65535) << 16;
- packetizer_.reset(new RtpPacketizer(transport, storage_.get(), config_));
-}
-
-RtpSender::~RtpSender() {}
-
-void RtpSender::IncomingEncodedVideoFrame(const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time) {
- packetizer_->IncomingEncodedVideoFrame(video_frame, capture_time);
-}
-
-void RtpSender::IncomingEncodedAudioFrame(const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time) {
- packetizer_->IncomingEncodedAudioFrame(audio_frame, recorded_time);
-}
-
-void RtpSender::ResendPackets(
- const MissingFramesAndPacketsMap& missing_frames_and_packets) {
- // Iterate over all frames in the list.
- for (MissingFramesAndPacketsMap::const_iterator it =
- missing_frames_and_packets.begin();
- it != missing_frames_and_packets.end(); ++it) {
- PacketList packets_to_resend;
- uint8 frame_id = it->first;
- const PacketIdSet& packets_set = it->second;
- bool success = false;
-
- if (packets_set.empty()) {
- VLOG(1) << "Missing all packets in frame " << static_cast<int>(frame_id);
-
- uint16 packet_id = 0;
- do {
- // Get packet from storage.
- success = storage_->GetPacket(frame_id, packet_id, &packets_to_resend);
-
- // Resend packet to the network.
- if (success) {
- VLOG(1) << "Resend " << static_cast<int>(frame_id)
- << ":" << packet_id;
- // Set a unique incremental sequence number for every packet.
- Packet& packet = packets_to_resend.back();
- UpdateSequenceNumber(&packet);
- // Set the size as correspond to each frame.
- ++packet_id;
- }
- } while (success);
- } else {
- // Iterate over all of the packets in the frame.
- for (PacketIdSet::const_iterator set_it = packets_set.begin();
- set_it != packets_set.end(); ++set_it) {
- uint16 packet_id = *set_it;
- success = storage_->GetPacket(frame_id, packet_id, &packets_to_resend);
-
- // Resend packet to the network.
- if (success) {
- VLOG(1) << "Resend " << static_cast<int>(frame_id)
- << ":" << packet_id;
- Packet& packet = packets_to_resend.back();
- UpdateSequenceNumber(&packet);
- }
- }
- }
- transport_->ResendPackets(packets_to_resend);
- }
-}
-
-void RtpSender::UpdateSequenceNumber(Packet* packet) {
- uint16 new_sequence_number = packetizer_->NextSequenceNumber();
- int index = 2;
- (*packet)[index] = (static_cast<uint8>(new_sequence_number));
- (*packet)[index + 1] =(static_cast<uint8>(new_sequence_number >> 8));
-}
-
-void RtpSender::RtpStatistics(const base::TimeTicks& now,
- RtcpSenderInfo* sender_info) {
- // The timestamp of this Rtcp packet should be estimated as the timestamp of
- // the frame being captured at this moment. We are calculating that
- // timestamp as the last frame's timestamp + the time since the last frame
- // was captured.
- uint32 ntp_seconds = 0;
- uint32 ntp_fraction = 0;
- ConvertTimeTicksToNtp(now, &ntp_seconds, &ntp_fraction);
- sender_info->ntp_seconds = ntp_seconds;
- sender_info->ntp_fraction = ntp_fraction;
-
- base::TimeTicks time_sent;
- uint32 rtp_timestamp;
- if (packetizer_->LastSentTimestamp(&time_sent, &rtp_timestamp)) {
- base::TimeDelta time_since_last_send = now - time_sent;
- sender_info->rtp_timestamp = rtp_timestamp +
- time_since_last_send.InMilliseconds() * (config_.frequency / 1000);
- } else {
- sender_info->rtp_timestamp = 0;
- }
- sender_info->send_packet_count = packetizer_->send_packets_count();
- sender_info->send_octet_count = packetizer_->send_octet_count();
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/net/rtp_sender/rtp_sender.gyp b/chromium/media/cast/net/rtp_sender/rtp_sender.gyp
deleted file mode 100644
index f689b99b149..00000000000
--- a/chromium/media/cast/net/rtp_sender/rtp_sender.gyp
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'targets': [
- {
- 'target_name': 'cast_rtp_sender',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- '<(DEPTH)/third_party/',
- '<(DEPTH)/third_party/webrtc/',
- ],
- 'sources': [
- 'rtp_sender.cc',
- 'rtp_sender.h',
- ], # source
- 'dependencies': [
- '<(DEPTH)/base/base.gyp:base',
- 'packet_storage/packet_storage.gyp:*',
- 'rtp_packetizer/rtp_packetizer.gyp:*',
- ],
- },
- ],
-}
diff --git a/chromium/media/cast/net/rtp_sender/rtp_sender.h b/chromium/media/cast/net/rtp_sender/rtp_sender.h
deleted file mode 100644
index 038165992db..00000000000
--- a/chromium/media/cast/net/rtp_sender/rtp_sender.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file contains the interface to the cast RTP sender.
-
-#ifndef MEDIA_CAST_NET_RTP_SENDER_RTP_SENDER_H_
-#define MEDIA_CAST_NET_RTP_SENDER_RTP_SENDER_H_
-
-#include <map>
-#include <set>
-
-#include "base/memory/scoped_ptr.h"
-#include "base/time/tick_clock.h"
-#include "base/time/time.h"
-#include "media/cast/cast_config.h"
-#include "media/cast/cast_environment.h"
-#include "media/cast/net/rtp_sender/packet_storage/packet_storage.h"
-#include "media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.h"
-#include "media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_config.h"
-
-namespace media {
-namespace cast {
-
-class PacedPacketSender;
-struct RtcpSenderInfo;
-
-// This object is only called from the main cast thread.
-// This class handles splitting encoded audio and video frames into packets and
-// add an RTP header to each packet. The sent packets are stored until they are
-// acknowledged by the remote peer or timed out.
-class RtpSender {
- public:
- RtpSender(scoped_refptr<CastEnvironment> cast_environment,
- const AudioSenderConfig* audio_config,
- const VideoSenderConfig* video_config,
- PacedPacketSender* transport);
-
- ~RtpSender();
-
- // The video_frame objects ownership is handled by the main cast thread.
- void IncomingEncodedVideoFrame(const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time);
-
- // The audio_frame objects ownership is handled by the main cast thread.
- void IncomingEncodedAudioFrame(const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time);
-
- void ResendPackets(const MissingFramesAndPacketsMap& missing_packets);
-
- void RtpStatistics(const base::TimeTicks& now, RtcpSenderInfo* sender_info);
-
- private:
- void UpdateSequenceNumber(std::vector<uint8>* packet);
-
- scoped_refptr<CastEnvironment> cast_environment_;
- RtpPacketizerConfig config_;
- scoped_ptr<RtpPacketizer> packetizer_;
- scoped_ptr<PacketStorage> storage_;
- PacedPacketSender* transport_;
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_NET_RTP_SENDER_RTP_SENDER_H_
diff --git a/chromium/media/cast/receiver/audio_decoder.cc b/chromium/media/cast/receiver/audio_decoder.cc
new file mode 100644
index 00000000000..a4d18968355
--- /dev/null
+++ b/chromium/media/cast/receiver/audio_decoder.cc
@@ -0,0 +1,246 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/receiver/audio_decoder.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/sys_byteorder.h"
+#include "media/cast/cast_defines.h"
+#include "third_party/opus/src/include/opus.h"
+
+namespace media {
+namespace cast {
+
+// Base class that handles the common problem of detecting dropped frames, and
+// then invoking the Decode() method implemented by the subclasses to convert
+// the encoded payload data into usable audio data.
+class AudioDecoder::ImplBase
+ : public base::RefCountedThreadSafe<AudioDecoder::ImplBase> {
+ public:
+ ImplBase(const scoped_refptr<CastEnvironment>& cast_environment,
+ transport::AudioCodec codec,
+ int num_channels,
+ int sampling_rate)
+ : cast_environment_(cast_environment),
+ codec_(codec),
+ num_channels_(num_channels),
+ cast_initialization_status_(STATUS_AUDIO_UNINITIALIZED),
+ seen_first_frame_(false) {
+ if (num_channels_ <= 0 || sampling_rate <= 0 || sampling_rate % 100 != 0)
+ cast_initialization_status_ = STATUS_INVALID_AUDIO_CONFIGURATION;
+ }
+
+ CastInitializationStatus InitializationResult() const {
+ return cast_initialization_status_;
+ }
+
+ void DecodeFrame(scoped_ptr<transport::EncodedFrame> encoded_frame,
+ const DecodeFrameCallback& callback) {
+ DCHECK_EQ(cast_initialization_status_, STATUS_AUDIO_INITIALIZED);
+
+ COMPILE_ASSERT(sizeof(encoded_frame->frame_id) == sizeof(last_frame_id_),
+ size_of_frame_id_types_do_not_match);
+ bool is_continuous = true;
+ if (seen_first_frame_) {
+ const uint32 frames_ahead = encoded_frame->frame_id - last_frame_id_;
+ if (frames_ahead > 1) {
+ RecoverBecauseFramesWereDropped();
+ is_continuous = false;
+ }
+ } else {
+ seen_first_frame_ = true;
+ }
+ last_frame_id_ = encoded_frame->frame_id;
+
+ scoped_ptr<AudioBus> decoded_audio = Decode(
+ encoded_frame->mutable_bytes(),
+ static_cast<int>(encoded_frame->data.size()));
+ cast_environment_->PostTask(CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(callback,
+ base::Passed(&decoded_audio),
+ is_continuous));
+ }
+
+ protected:
+ friend class base::RefCountedThreadSafe<ImplBase>;
+ virtual ~ImplBase() {}
+
+ virtual void RecoverBecauseFramesWereDropped() {}
+
+ // Note: Implementation of Decode() is allowed to mutate |data|.
+ virtual scoped_ptr<AudioBus> Decode(uint8* data, int len) = 0;
+
+ const scoped_refptr<CastEnvironment> cast_environment_;
+ const transport::AudioCodec codec_;
+ const int num_channels_;
+
+ // Subclass' ctor is expected to set this to STATUS_AUDIO_INITIALIZED.
+ CastInitializationStatus cast_initialization_status_;
+
+ private:
+ bool seen_first_frame_;
+ uint32 last_frame_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(ImplBase);
+};
+
+class AudioDecoder::OpusImpl : public AudioDecoder::ImplBase {
+ public:
+ OpusImpl(const scoped_refptr<CastEnvironment>& cast_environment,
+ int num_channels,
+ int sampling_rate)
+ : ImplBase(cast_environment,
+ transport::kOpus,
+ num_channels,
+ sampling_rate),
+ decoder_memory_(new uint8[opus_decoder_get_size(num_channels)]),
+ opus_decoder_(reinterpret_cast<OpusDecoder*>(decoder_memory_.get())),
+ max_samples_per_frame_(
+ kOpusMaxFrameDurationMillis * sampling_rate / 1000),
+ buffer_(new float[max_samples_per_frame_ * num_channels]) {
+ if (ImplBase::cast_initialization_status_ != STATUS_AUDIO_UNINITIALIZED)
+ return;
+ if (opus_decoder_init(opus_decoder_, sampling_rate, num_channels) !=
+ OPUS_OK) {
+ ImplBase::cast_initialization_status_ =
+ STATUS_INVALID_AUDIO_CONFIGURATION;
+ return;
+ }
+ ImplBase::cast_initialization_status_ = STATUS_AUDIO_INITIALIZED;
+ }
+
+ private:
+ virtual ~OpusImpl() {}
+
+ virtual void RecoverBecauseFramesWereDropped() OVERRIDE {
+ // Passing NULL for the input data notifies the decoder of frame loss.
+ const opus_int32 result =
+ opus_decode_float(
+ opus_decoder_, NULL, 0, buffer_.get(), max_samples_per_frame_, 0);
+ DCHECK_GE(result, 0);
+ }
+
+ virtual scoped_ptr<AudioBus> Decode(uint8* data, int len) OVERRIDE {
+ scoped_ptr<AudioBus> audio_bus;
+ const opus_int32 num_samples_decoded = opus_decode_float(
+ opus_decoder_, data, len, buffer_.get(), max_samples_per_frame_, 0);
+ if (num_samples_decoded <= 0)
+ return audio_bus.Pass(); // Decode error.
+
+ // Copy interleaved samples from |buffer_| into a new AudioBus (where
+ // samples are stored in planar format, for each channel).
+ audio_bus = AudioBus::Create(num_channels_, num_samples_decoded).Pass();
+ // TODO(miu): This should be moved into AudioBus::FromInterleaved().
+ for (int ch = 0; ch < num_channels_; ++ch) {
+ const float* src = buffer_.get() + ch;
+ const float* const src_end = src + num_samples_decoded * num_channels_;
+ float* dest = audio_bus->channel(ch);
+ for (; src < src_end; src += num_channels_, ++dest)
+ *dest = *src;
+ }
+ return audio_bus.Pass();
+ }
+
+ const scoped_ptr<uint8[]> decoder_memory_;
+ OpusDecoder* const opus_decoder_;
+ const int max_samples_per_frame_;
+ const scoped_ptr<float[]> buffer_;
+
+ // According to documentation in third_party/opus/src/include/opus.h, we must
+ // provide enough space in |buffer_| to contain 120ms of samples. At 48 kHz,
+ // then, that means 5760 samples times the number of channels.
+ static const int kOpusMaxFrameDurationMillis = 120;
+
+ DISALLOW_COPY_AND_ASSIGN(OpusImpl);
+};
+
+class AudioDecoder::Pcm16Impl : public AudioDecoder::ImplBase {
+ public:
+ Pcm16Impl(const scoped_refptr<CastEnvironment>& cast_environment,
+ int num_channels,
+ int sampling_rate)
+ : ImplBase(cast_environment,
+ transport::kPcm16,
+ num_channels,
+ sampling_rate) {
+ if (ImplBase::cast_initialization_status_ != STATUS_AUDIO_UNINITIALIZED)
+ return;
+ ImplBase::cast_initialization_status_ = STATUS_AUDIO_INITIALIZED;
+ }
+
+ private:
+ virtual ~Pcm16Impl() {}
+
+ virtual scoped_ptr<AudioBus> Decode(uint8* data, int len) OVERRIDE {
+ scoped_ptr<AudioBus> audio_bus;
+ const int num_samples = len / sizeof(int16) / num_channels_;
+ if (num_samples <= 0)
+ return audio_bus.Pass();
+
+ int16* const pcm_data = reinterpret_cast<int16*>(data);
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+ // Convert endianness.
+ const int num_elements = num_samples * num_channels_;
+ for (int i = 0; i < num_elements; ++i)
+ pcm_data[i] = static_cast<int16>(base::NetToHost16(pcm_data[i]));
+#endif
+ audio_bus = AudioBus::Create(num_channels_, num_samples).Pass();
+ audio_bus->FromInterleaved(pcm_data, num_samples, sizeof(int16));
+ return audio_bus.Pass();
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(Pcm16Impl);
+};
+
+AudioDecoder::AudioDecoder(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ int channels,
+ int sampling_rate,
+ transport::AudioCodec codec)
+ : cast_environment_(cast_environment) {
+ switch (codec) {
+ case transport::kOpus:
+ impl_ = new OpusImpl(cast_environment, channels, sampling_rate);
+ break;
+ case transport::kPcm16:
+ impl_ = new Pcm16Impl(cast_environment, channels, sampling_rate);
+ break;
+ default:
+ NOTREACHED() << "Unknown or unspecified codec.";
+ break;
+ }
+}
+
+AudioDecoder::~AudioDecoder() {}
+
+CastInitializationStatus AudioDecoder::InitializationResult() const {
+ if (impl_)
+ return impl_->InitializationResult();
+ return STATUS_UNSUPPORTED_AUDIO_CODEC;
+}
+
+void AudioDecoder::DecodeFrame(
+ scoped_ptr<transport::EncodedFrame> encoded_frame,
+ const DecodeFrameCallback& callback) {
+ DCHECK(encoded_frame.get());
+ DCHECK(!callback.is_null());
+ if (!impl_ || impl_->InitializationResult() != STATUS_AUDIO_INITIALIZED) {
+ callback.Run(make_scoped_ptr<AudioBus>(NULL), false);
+ return;
+ }
+ cast_environment_->PostTask(CastEnvironment::AUDIO,
+ FROM_HERE,
+ base::Bind(&AudioDecoder::ImplBase::DecodeFrame,
+ impl_,
+ base::Passed(&encoded_frame),
+ callback));
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/receiver/audio_decoder.h b/chromium/media/cast/receiver/audio_decoder.h
new file mode 100644
index 00000000000..c66735e4e64
--- /dev/null
+++ b/chromium/media/cast/receiver/audio_decoder.h
@@ -0,0 +1,64 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RECEIVER_AUDIO_DECODER_H_
+#define MEDIA_CAST_RECEIVER_AUDIO_DECODER_H_
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "media/base/audio_bus.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/transport/cast_transport_config.h"
+
+namespace media {
+namespace cast {
+
+class AudioDecoder {
+ public:
+ // Callback passed to DecodeFrame, to deliver decoded audio data from the
+ // decoder. The number of samples in |audio_bus| may vary, and |audio_bus|
+ // can be NULL when errors occur. |is_continuous| is normally true, but will
+ // be false if the decoder has detected a frame skip since the last decode
+ // operation; and the client should take steps to smooth audio discontinuities
+ // in this case.
+ typedef base::Callback<void(scoped_ptr<AudioBus> audio_bus,
+ bool is_continuous)> DecodeFrameCallback;
+
+ AudioDecoder(const scoped_refptr<CastEnvironment>& cast_environment,
+ int channels,
+ int sampling_rate,
+ transport::AudioCodec codec);
+ virtual ~AudioDecoder();
+
+ // Returns STATUS_AUDIO_INITIALIZED if the decoder was successfully
+ // constructed from the given FrameReceiverConfig. If this method returns any
+ // other value, calls to DecodeFrame() will not succeed.
+ CastInitializationStatus InitializationResult() const;
+
+ // Decode the payload in |encoded_frame| asynchronously. |callback| will be
+ // invoked on the CastEnvironment::MAIN thread with the result.
+ //
+ // In the normal case, |encoded_frame->frame_id| will be
+ // monotonically-increasing by 1 for each successive call to this method.
+ // When it is not, the decoder will assume one or more frames have been
+ // dropped (e.g., due to packet loss), and will perform recovery actions.
+ void DecodeFrame(scoped_ptr<transport::EncodedFrame> encoded_frame,
+ const DecodeFrameCallback& callback);
+
+ private:
+ class ImplBase;
+ class OpusImpl;
+ class Pcm16Impl;
+
+ const scoped_refptr<CastEnvironment> cast_environment_;
+ scoped_refptr<ImplBase> impl_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioDecoder);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RECEIVER_AUDIO_DECODER_H_
diff --git a/chromium/media/cast/receiver/audio_decoder_unittest.cc b/chromium/media/cast/receiver/audio_decoder_unittest.cc
new file mode 100644
index 00000000000..6985a694232
--- /dev/null
+++ b/chromium/media/cast/receiver/audio_decoder_unittest.cc
@@ -0,0 +1,241 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/sys_byteorder.h"
+#include "base/time/time.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/receiver/audio_decoder.h"
+#include "media/cast/test/utility/audio_utility.h"
+#include "media/cast/test/utility/standalone_cast_environment.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/opus/src/include/opus.h"
+
+namespace media {
+namespace cast {
+
+namespace {
+struct TestScenario {
+ transport::AudioCodec codec;
+ int num_channels;
+ int sampling_rate;
+
+ TestScenario(transport::AudioCodec c, int n, int s)
+ : codec(c), num_channels(n), sampling_rate(s) {}
+};
+} // namespace
+
+class AudioDecoderTest : public ::testing::TestWithParam<TestScenario> {
+ public:
+ AudioDecoderTest()
+ : cast_environment_(new StandaloneCastEnvironment()),
+ cond_(&lock_) {}
+
+ protected:
+ virtual void SetUp() OVERRIDE {
+ audio_decoder_.reset(new AudioDecoder(cast_environment_,
+ GetParam().num_channels,
+ GetParam().sampling_rate,
+ GetParam().codec));
+ CHECK_EQ(STATUS_AUDIO_INITIALIZED, audio_decoder_->InitializationResult());
+
+ audio_bus_factory_.reset(
+ new TestAudioBusFactory(GetParam().num_channels,
+ GetParam().sampling_rate,
+ TestAudioBusFactory::kMiddleANoteFreq,
+ 0.5f));
+ last_frame_id_ = 0;
+ seen_a_decoded_frame_ = false;
+
+ if (GetParam().codec == transport::kOpus) {
+ opus_encoder_memory_.reset(
+ new uint8[opus_encoder_get_size(GetParam().num_channels)]);
+ OpusEncoder* const opus_encoder =
+ reinterpret_cast<OpusEncoder*>(opus_encoder_memory_.get());
+ CHECK_EQ(OPUS_OK, opus_encoder_init(opus_encoder,
+ GetParam().sampling_rate,
+ GetParam().num_channels,
+ OPUS_APPLICATION_AUDIO));
+ CHECK_EQ(OPUS_OK,
+ opus_encoder_ctl(opus_encoder, OPUS_SET_BITRATE(OPUS_AUTO)));
+ }
+
+ total_audio_feed_in_ = base::TimeDelta();
+ total_audio_decoded_ = base::TimeDelta();
+ }
+
+ // Called from the unit test thread to create another EncodedFrame and push it
+ // into the decoding pipeline.
+ void FeedMoreAudio(const base::TimeDelta& duration,
+ int num_dropped_frames) {
+ // Prepare a simulated EncodedFrame to feed into the AudioDecoder.
+ scoped_ptr<transport::EncodedFrame> encoded_frame(
+ new transport::EncodedFrame());
+ encoded_frame->dependency = transport::EncodedFrame::KEY;
+ encoded_frame->frame_id = last_frame_id_ + 1 + num_dropped_frames;
+ encoded_frame->referenced_frame_id = encoded_frame->frame_id;
+ last_frame_id_ = encoded_frame->frame_id;
+
+ const scoped_ptr<AudioBus> audio_bus(
+ audio_bus_factory_->NextAudioBus(duration).Pass());
+
+ // Encode |audio_bus| into |encoded_frame->data|.
+ const int num_elements = audio_bus->channels() * audio_bus->frames();
+ std::vector<int16> interleaved(num_elements);
+ audio_bus->ToInterleaved(
+ audio_bus->frames(), sizeof(int16), &interleaved.front());
+ if (GetParam().codec == transport::kPcm16) {
+ encoded_frame->data.resize(num_elements * sizeof(int16));
+ int16* const pcm_data =
+ reinterpret_cast<int16*>(encoded_frame->mutable_bytes());
+ for (size_t i = 0; i < interleaved.size(); ++i)
+ pcm_data[i] = static_cast<int16>(base::HostToNet16(interleaved[i]));
+ } else if (GetParam().codec == transport::kOpus) {
+ OpusEncoder* const opus_encoder =
+ reinterpret_cast<OpusEncoder*>(opus_encoder_memory_.get());
+ const int kOpusEncodeBufferSize = 4000;
+ encoded_frame->data.resize(kOpusEncodeBufferSize);
+ const int payload_size =
+ opus_encode(opus_encoder,
+ &interleaved.front(),
+ audio_bus->frames(),
+ encoded_frame->mutable_bytes(),
+ encoded_frame->data.size());
+ CHECK_GT(payload_size, 1);
+ encoded_frame->data.resize(payload_size);
+ } else {
+ ASSERT_TRUE(false); // Not reached.
+ }
+
+ {
+ base::AutoLock auto_lock(lock_);
+ total_audio_feed_in_ += duration;
+ }
+
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(&AudioDecoder::DecodeFrame,
+ base::Unretained(audio_decoder_.get()),
+ base::Passed(&encoded_frame),
+ base::Bind(&AudioDecoderTest::OnDecodedFrame,
+ base::Unretained(this),
+ num_dropped_frames == 0)));
+ }
+
+ // Blocks the caller until all audio that has been feed in has been decoded.
+ void WaitForAllAudioToBeDecoded() {
+ DCHECK(!cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ base::AutoLock auto_lock(lock_);
+ while (total_audio_decoded_ < total_audio_feed_in_)
+ cond_.Wait();
+ EXPECT_EQ(total_audio_feed_in_.InMicroseconds(),
+ total_audio_decoded_.InMicroseconds());
+ }
+
+ private:
+ // Called by |audio_decoder_| to deliver each frame of decoded audio.
+ void OnDecodedFrame(bool should_be_continuous,
+ scoped_ptr<AudioBus> audio_bus,
+ bool is_continuous) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ // A NULL |audio_bus| indicates a decode error, which we don't expect.
+ ASSERT_FALSE(!audio_bus);
+
+ // Did the decoder detect whether frames were dropped?
+ EXPECT_EQ(should_be_continuous, is_continuous);
+
+ // Does the audio data seem to be intact? For Opus, we have to ignore the
+ // first frame seen at the start (and immediately after dropped packet
+ // recovery) because it introduces a tiny, significant delay.
+ bool examine_signal = true;
+ if (GetParam().codec == transport::kOpus) {
+ examine_signal = seen_a_decoded_frame_ && should_be_continuous;
+ seen_a_decoded_frame_ = true;
+ }
+ if (examine_signal) {
+ for (int ch = 0; ch < audio_bus->channels(); ++ch) {
+ EXPECT_NEAR(
+ TestAudioBusFactory::kMiddleANoteFreq * 2 * audio_bus->frames() /
+ GetParam().sampling_rate,
+ CountZeroCrossings(audio_bus->channel(ch), audio_bus->frames()),
+ 1);
+ }
+ }
+
+ // Signal the main test thread that more audio was decoded.
+ base::AutoLock auto_lock(lock_);
+ total_audio_decoded_ += base::TimeDelta::FromSeconds(1) *
+ audio_bus->frames() / GetParam().sampling_rate;
+ cond_.Signal();
+ }
+
+ const scoped_refptr<StandaloneCastEnvironment> cast_environment_;
+ scoped_ptr<AudioDecoder> audio_decoder_;
+ scoped_ptr<TestAudioBusFactory> audio_bus_factory_;
+ uint32 last_frame_id_;
+ bool seen_a_decoded_frame_;
+ scoped_ptr<uint8[]> opus_encoder_memory_;
+
+ base::Lock lock_;
+ base::ConditionVariable cond_;
+ base::TimeDelta total_audio_feed_in_;
+ base::TimeDelta total_audio_decoded_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioDecoderTest);
+};
+
+TEST_P(AudioDecoderTest, DecodesFramesWithSameDuration) {
+ const base::TimeDelta kTenMilliseconds =
+ base::TimeDelta::FromMilliseconds(10);
+ const int kNumFrames = 10;
+ for (int i = 0; i < kNumFrames; ++i)
+ FeedMoreAudio(kTenMilliseconds, 0);
+ WaitForAllAudioToBeDecoded();
+}
+
+TEST_P(AudioDecoderTest, DecodesFramesWithVaryingDuration) {
+ // These are the set of frame durations supported by the Opus encoder.
+ const int kFrameDurationMs[] = { 5, 10, 20, 40, 60 };
+
+ const int kNumFrames = 10;
+ for (size_t i = 0; i < arraysize(kFrameDurationMs); ++i)
+ for (int j = 0; j < kNumFrames; ++j)
+ FeedMoreAudio(base::TimeDelta::FromMilliseconds(kFrameDurationMs[i]), 0);
+ WaitForAllAudioToBeDecoded();
+}
+
+TEST_P(AudioDecoderTest, RecoversFromDroppedFrames) {
+ const base::TimeDelta kTenMilliseconds =
+ base::TimeDelta::FromMilliseconds(10);
+ const int kNumFrames = 100;
+ int next_drop_at = 3;
+ int next_num_dropped = 1;
+ for (int i = 0; i < kNumFrames; ++i) {
+ if (i == next_drop_at) {
+ const int num_dropped = next_num_dropped++;
+ next_drop_at *= 2;
+ i += num_dropped;
+ FeedMoreAudio(kTenMilliseconds, num_dropped);
+ } else {
+ FeedMoreAudio(kTenMilliseconds, 0);
+ }
+ }
+ WaitForAllAudioToBeDecoded();
+}
+
+INSTANTIATE_TEST_CASE_P(AudioDecoderTestScenarios,
+ AudioDecoderTest,
+ ::testing::Values(
+ TestScenario(transport::kPcm16, 1, 8000),
+ TestScenario(transport::kPcm16, 2, 48000),
+ TestScenario(transport::kOpus, 1, 8000),
+ TestScenario(transport::kOpus, 2, 48000)));
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/receiver/cast_receiver_impl.cc b/chromium/media/cast/receiver/cast_receiver_impl.cc
new file mode 100644
index 00000000000..7cff354c146
--- /dev/null
+++ b/chromium/media/cast/receiver/cast_receiver_impl.cc
@@ -0,0 +1,232 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/receiver/cast_receiver_impl.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "media/cast/receiver/audio_decoder.h"
+#include "media/cast/receiver/video_decoder.h"
+
+namespace media {
+namespace cast {
+
+scoped_ptr<CastReceiver> CastReceiver::Create(
+ scoped_refptr<CastEnvironment> cast_environment,
+ const FrameReceiverConfig& audio_config,
+ const FrameReceiverConfig& video_config,
+ transport::PacketSender* const packet_sender) {
+ return scoped_ptr<CastReceiver>(new CastReceiverImpl(
+ cast_environment, audio_config, video_config, packet_sender));
+}
+
+CastReceiverImpl::CastReceiverImpl(
+ scoped_refptr<CastEnvironment> cast_environment,
+ const FrameReceiverConfig& audio_config,
+ const FrameReceiverConfig& video_config,
+ transport::PacketSender* const packet_sender)
+ : cast_environment_(cast_environment),
+ pacer_(cast_environment->Clock(),
+ cast_environment->Logging(),
+ packet_sender,
+ cast_environment->GetTaskRunner(CastEnvironment::MAIN)),
+ audio_receiver_(cast_environment, audio_config, AUDIO_EVENT, &pacer_),
+ video_receiver_(cast_environment, video_config, VIDEO_EVENT, &pacer_),
+ ssrc_of_audio_sender_(audio_config.incoming_ssrc),
+ ssrc_of_video_sender_(video_config.incoming_ssrc),
+ num_audio_channels_(audio_config.channels),
+ audio_sampling_rate_(audio_config.frequency),
+ audio_codec_(audio_config.codec.audio),
+ video_codec_(video_config.codec.video) {}
+
+CastReceiverImpl::~CastReceiverImpl() {}
+
+void CastReceiverImpl::DispatchReceivedPacket(scoped_ptr<Packet> packet) {
+ const uint8_t* const data = &packet->front();
+ const size_t length = packet->size();
+
+ uint32 ssrc_of_sender;
+ if (Rtcp::IsRtcpPacket(data, length)) {
+ ssrc_of_sender = Rtcp::GetSsrcOfSender(data, length);
+ } else if (!FrameReceiver::ParseSenderSsrc(data, length, &ssrc_of_sender)) {
+ VLOG(1) << "Invalid RTP packet.";
+ return;
+ }
+
+ base::WeakPtr<FrameReceiver> target;
+ if (ssrc_of_sender == ssrc_of_video_sender_) {
+ target = video_receiver_.AsWeakPtr();
+ } else if (ssrc_of_sender == ssrc_of_audio_sender_) {
+ target = audio_receiver_.AsWeakPtr();
+ } else {
+ VLOG(1) << "Dropping packet with a non matching sender SSRC: "
+ << ssrc_of_sender;
+ return;
+ }
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(base::IgnoreResult(&FrameReceiver::ProcessPacket),
+ target,
+ base::Passed(&packet)));
+}
+
+transport::PacketReceiverCallback CastReceiverImpl::packet_receiver() {
+ return base::Bind(&CastReceiverImpl::DispatchReceivedPacket,
+ // TODO(miu): This code structure is dangerous, since the
+ // callback could be stored and then invoked after
+ // destruction of |this|.
+ base::Unretained(this));
+}
+
+void CastReceiverImpl::RequestDecodedAudioFrame(
+ const AudioFrameDecodedCallback& callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ DCHECK(!callback.is_null());
+ audio_receiver_.RequestEncodedFrame(base::Bind(
+ &CastReceiverImpl::DecodeEncodedAudioFrame,
+ // Note: Use of Unretained is safe since this Closure is guaranteed to be
+ // invoked or discarded by |audio_receiver_| before destruction of |this|.
+ base::Unretained(this),
+ callback));
+}
+
+void CastReceiverImpl::RequestEncodedAudioFrame(
+ const ReceiveEncodedFrameCallback& callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ audio_receiver_.RequestEncodedFrame(callback);
+}
+
+void CastReceiverImpl::RequestDecodedVideoFrame(
+ const VideoFrameDecodedCallback& callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ DCHECK(!callback.is_null());
+ video_receiver_.RequestEncodedFrame(base::Bind(
+ &CastReceiverImpl::DecodeEncodedVideoFrame,
+ // Note: Use of Unretained is safe since this Closure is guaranteed to be
+ // invoked or discarded by |video_receiver_| before destruction of |this|.
+ base::Unretained(this),
+ callback));
+}
+
+void CastReceiverImpl::RequestEncodedVideoFrame(
+ const ReceiveEncodedFrameCallback& callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ video_receiver_.RequestEncodedFrame(callback);
+}
+
+void CastReceiverImpl::DecodeEncodedAudioFrame(
+ const AudioFrameDecodedCallback& callback,
+ scoped_ptr<transport::EncodedFrame> encoded_frame) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ if (!encoded_frame) {
+ callback.Run(make_scoped_ptr<AudioBus>(NULL), base::TimeTicks(), false);
+ return;
+ }
+
+ if (!audio_decoder_) {
+ audio_decoder_.reset(new AudioDecoder(cast_environment_,
+ num_audio_channels_,
+ audio_sampling_rate_,
+ audio_codec_));
+ }
+ const uint32 frame_id = encoded_frame->frame_id;
+ const uint32 rtp_timestamp = encoded_frame->rtp_timestamp;
+ const base::TimeTicks playout_time = encoded_frame->reference_time;
+ audio_decoder_->DecodeFrame(
+ encoded_frame.Pass(),
+ base::Bind(&CastReceiverImpl::EmitDecodedAudioFrame,
+ cast_environment_,
+ callback,
+ frame_id,
+ rtp_timestamp,
+ playout_time));
+}
+
+void CastReceiverImpl::DecodeEncodedVideoFrame(
+ const VideoFrameDecodedCallback& callback,
+ scoped_ptr<transport::EncodedFrame> encoded_frame) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ if (!encoded_frame) {
+ callback.Run(
+ make_scoped_refptr<VideoFrame>(NULL), base::TimeTicks(), false);
+ return;
+ }
+
+ // Used by chrome/browser/extension/api/cast_streaming/performance_test.cc
+ TRACE_EVENT_INSTANT2(
+ "cast_perf_test", "PullEncodedVideoFrame",
+ TRACE_EVENT_SCOPE_THREAD,
+ "rtp_timestamp", encoded_frame->rtp_timestamp,
+ "render_time", encoded_frame->reference_time.ToInternalValue());
+
+ if (!video_decoder_)
+ video_decoder_.reset(new VideoDecoder(cast_environment_, video_codec_));
+ const uint32 frame_id = encoded_frame->frame_id;
+ const uint32 rtp_timestamp = encoded_frame->rtp_timestamp;
+ const base::TimeTicks playout_time = encoded_frame->reference_time;
+ video_decoder_->DecodeFrame(
+ encoded_frame.Pass(),
+ base::Bind(&CastReceiverImpl::EmitDecodedVideoFrame,
+ cast_environment_,
+ callback,
+ frame_id,
+ rtp_timestamp,
+ playout_time));
+}
+
+// static
+void CastReceiverImpl::EmitDecodedAudioFrame(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ const AudioFrameDecodedCallback& callback,
+ uint32 frame_id,
+ uint32 rtp_timestamp,
+ const base::TimeTicks& playout_time,
+ scoped_ptr<AudioBus> audio_bus,
+ bool is_continuous) {
+ DCHECK(cast_environment->CurrentlyOn(CastEnvironment::MAIN));
+ if (audio_bus.get()) {
+ const base::TimeTicks now = cast_environment->Clock()->NowTicks();
+ cast_environment->Logging()->InsertFrameEvent(
+ now, FRAME_DECODED, AUDIO_EVENT, rtp_timestamp, frame_id);
+ cast_environment->Logging()->InsertFrameEventWithDelay(
+ now, FRAME_PLAYOUT, AUDIO_EVENT, rtp_timestamp, frame_id,
+ playout_time - now);
+ }
+ callback.Run(audio_bus.Pass(), playout_time, is_continuous);
+}
+
+// static
+void CastReceiverImpl::EmitDecodedVideoFrame(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ const VideoFrameDecodedCallback& callback,
+ uint32 frame_id,
+ uint32 rtp_timestamp,
+ const base::TimeTicks& playout_time,
+ const scoped_refptr<VideoFrame>& video_frame,
+ bool is_continuous) {
+ DCHECK(cast_environment->CurrentlyOn(CastEnvironment::MAIN));
+ if (video_frame) {
+ const base::TimeTicks now = cast_environment->Clock()->NowTicks();
+ cast_environment->Logging()->InsertFrameEvent(
+ now, FRAME_DECODED, VIDEO_EVENT, rtp_timestamp, frame_id);
+ cast_environment->Logging()->InsertFrameEventWithDelay(
+ now, FRAME_PLAYOUT, VIDEO_EVENT, rtp_timestamp, frame_id,
+ playout_time - now);
+
+ // Used by chrome/browser/extension/api/cast_streaming/performance_test.cc
+ TRACE_EVENT_INSTANT1(
+ "cast_perf_test", "FrameDecoded",
+ TRACE_EVENT_SCOPE_THREAD,
+ "rtp_timestamp", rtp_timestamp);
+ }
+ callback.Run(video_frame, playout_time, is_continuous);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/receiver/cast_receiver_impl.h b/chromium/media/cast/receiver/cast_receiver_impl.h
new file mode 100644
index 00000000000..c0dd5f38d10
--- /dev/null
+++ b/chromium/media/cast/receiver/cast_receiver_impl.h
@@ -0,0 +1,122 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RECEIVER_CAST_RECEIVER_IMPL_H_
+#define MEDIA_CAST_RECEIVER_CAST_RECEIVER_IMPL_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/cast_receiver.h"
+#include "media/cast/receiver/frame_receiver.h"
+#include "media/cast/transport/pacing/paced_sender.h"
+
+namespace media {
+namespace cast {
+
+class AudioDecoder;
+class VideoDecoder;
+
+// This is a pure owner class that groups all required receiver-related objects
+// together, such as the paced packet sender, audio/video RTP frame receivers,
+// and software decoders (created on-demand).
+class CastReceiverImpl : public CastReceiver {
+ public:
+ CastReceiverImpl(scoped_refptr<CastEnvironment> cast_environment,
+ const FrameReceiverConfig& audio_config,
+ const FrameReceiverConfig& video_config,
+ transport::PacketSender* const packet_sender);
+
+ virtual ~CastReceiverImpl();
+
+ // CastReceiver implementation.
+ virtual transport::PacketReceiverCallback packet_receiver() OVERRIDE;
+ virtual void RequestDecodedAudioFrame(
+ const AudioFrameDecodedCallback& callback) OVERRIDE;
+ virtual void RequestEncodedAudioFrame(
+ const ReceiveEncodedFrameCallback& callback) OVERRIDE;
+ virtual void RequestDecodedVideoFrame(
+ const VideoFrameDecodedCallback& callback) OVERRIDE;
+ virtual void RequestEncodedVideoFrame(
+ const ReceiveEncodedFrameCallback& callback) OVERRIDE;
+
+ private:
+ // Forwards |packet| to a specific RTP frame receiver, or drops it if SSRC
+ // does not map to one of the receivers.
+ void DispatchReceivedPacket(scoped_ptr<Packet> packet);
+
+ // Feeds an EncodedFrame into |audio_decoder_|. RequestDecodedAudioFrame()
+ // uses this as a callback for RequestEncodedAudioFrame().
+ void DecodeEncodedAudioFrame(
+ const AudioFrameDecodedCallback& callback,
+ scoped_ptr<transport::EncodedFrame> encoded_frame);
+
+ // Feeds an EncodedFrame into |video_decoder_|. RequestDecodedVideoFrame()
+ // uses this as a callback for RequestEncodedVideoFrame().
+ void DecodeEncodedVideoFrame(
+ const VideoFrameDecodedCallback& callback,
+ scoped_ptr<transport::EncodedFrame> encoded_frame);
+
+ // Receives an AudioBus from |audio_decoder_|, logs the event, and passes the
+ // data on by running the given |callback|. This method is static to ensure
+ // it can be called after a CastReceiverImpl instance is destroyed.
+ // DecodeEncodedAudioFrame() uses this as a callback for
+ // AudioDecoder::DecodeFrame().
+ static void EmitDecodedAudioFrame(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ const AudioFrameDecodedCallback& callback,
+ uint32 frame_id,
+ uint32 rtp_timestamp,
+ const base::TimeTicks& playout_time,
+ scoped_ptr<AudioBus> audio_bus,
+ bool is_continuous);
+
+ // Receives a VideoFrame from |video_decoder_|, logs the event, and passes the
+ // data on by running the given |callback|. This method is static to ensure
+ // it can be called after a CastReceiverImpl instance is destroyed.
+ // DecodeEncodedVideoFrame() uses this as a callback for
+ // VideoDecoder::DecodeFrame().
+ static void EmitDecodedVideoFrame(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ const VideoFrameDecodedCallback& callback,
+ uint32 frame_id,
+ uint32 rtp_timestamp,
+ const base::TimeTicks& playout_time,
+ const scoped_refptr<VideoFrame>& video_frame,
+ bool is_continuous);
+
+ const scoped_refptr<CastEnvironment> cast_environment_;
+ transport::PacedSender pacer_;
+ FrameReceiver audio_receiver_;
+ FrameReceiver video_receiver_;
+
+ // Used by DispatchReceivedPacket() to direct packets to the appropriate frame
+ // receiver.
+ const uint32 ssrc_of_audio_sender_;
+ const uint32 ssrc_of_video_sender_;
+
+ // Parameters for the decoders that are created on-demand. The values here
+ // might be nonsense if the client of CastReceiverImpl never intends to use
+ // the internal software-based decoders.
+ const int num_audio_channels_;
+ const int audio_sampling_rate_;
+ const transport::AudioCodec audio_codec_;
+ const transport::VideoCodec video_codec_;
+
+ // Created on-demand to decode frames from |audio_receiver_| into AudioBuses
+ // for playback.
+ scoped_ptr<AudioDecoder> audio_decoder_;
+
+ // Created on-demand to decode frames from |video_receiver_| into VideoFrame
+ // images for playback.
+ scoped_ptr<VideoDecoder> video_decoder_;
+
+ DISALLOW_COPY_AND_ASSIGN(CastReceiverImpl);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RECEIVER_CAST_RECEIVER_IMPL_
diff --git a/chromium/media/cast/receiver/frame_receiver.cc b/chromium/media/cast/receiver/frame_receiver.cc
new file mode 100644
index 00000000000..e189cc99a7f
--- /dev/null
+++ b/chromium/media/cast/receiver/frame_receiver.cc
@@ -0,0 +1,326 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/receiver/frame_receiver.h"
+
+#include <algorithm>
+
+#include "base/big_endian.h"
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "media/cast/cast_environment.h"
+
+namespace {
+const int kMinSchedulingDelayMs = 1;
+} // namespace
+
+namespace media {
+namespace cast {
+
+FrameReceiver::FrameReceiver(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ const FrameReceiverConfig& config,
+ EventMediaType event_media_type,
+ transport::PacedPacketSender* const packet_sender)
+ : cast_environment_(cast_environment),
+ packet_parser_(config.incoming_ssrc, config.rtp_payload_type),
+ stats_(cast_environment->Clock()),
+ event_media_type_(event_media_type),
+ event_subscriber_(kReceiverRtcpEventHistorySize, event_media_type),
+ rtp_timebase_(config.frequency),
+ target_playout_delay_(
+ base::TimeDelta::FromMilliseconds(config.rtp_max_delay_ms)),
+ expected_frame_duration_(
+ base::TimeDelta::FromSeconds(1) / config.max_frame_rate),
+ reports_are_scheduled_(false),
+ framer_(cast_environment->Clock(),
+ this,
+ config.incoming_ssrc,
+ true,
+ config.rtp_max_delay_ms * config.max_frame_rate / 1000),
+ rtcp_(cast_environment_,
+ NULL,
+ NULL,
+ packet_sender,
+ &stats_,
+ config.rtcp_mode,
+ base::TimeDelta::FromMilliseconds(config.rtcp_interval),
+ config.feedback_ssrc,
+ config.incoming_ssrc,
+ config.rtcp_c_name,
+ event_media_type),
+ is_waiting_for_consecutive_frame_(false),
+ lip_sync_drift_(ClockDriftSmoother::GetDefaultTimeConstant()),
+ weak_factory_(this) {
+ DCHECK_GT(config.rtp_max_delay_ms, 0);
+ DCHECK_GT(config.max_frame_rate, 0);
+ decryptor_.Initialize(config.aes_key, config.aes_iv_mask);
+ rtcp_.SetTargetDelay(target_playout_delay_);
+ cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber_);
+ memset(frame_id_to_rtp_timestamp_, 0, sizeof(frame_id_to_rtp_timestamp_));
+}
+
+FrameReceiver::~FrameReceiver() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber_);
+}
+
+void FrameReceiver::RequestEncodedFrame(
+ const ReceiveEncodedFrameCallback& callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ frame_request_queue_.push_back(callback);
+ EmitAvailableEncodedFrames();
+}
+
+bool FrameReceiver::ProcessPacket(scoped_ptr<Packet> packet) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ if (Rtcp::IsRtcpPacket(&packet->front(), packet->size())) {
+ rtcp_.IncomingRtcpPacket(&packet->front(), packet->size());
+ } else {
+ RtpCastHeader rtp_header;
+ const uint8* payload_data;
+ size_t payload_size;
+ if (!packet_parser_.ParsePacket(&packet->front(),
+ packet->size(),
+ &rtp_header,
+ &payload_data,
+ &payload_size)) {
+ return false;
+ }
+
+ ProcessParsedPacket(rtp_header, payload_data, payload_size);
+ stats_.UpdateStatistics(rtp_header);
+ }
+
+ if (!reports_are_scheduled_) {
+ ScheduleNextRtcpReport();
+ ScheduleNextCastMessage();
+ reports_are_scheduled_ = true;
+ }
+
+ return true;
+}
+
+// static
+bool FrameReceiver::ParseSenderSsrc(const uint8* packet,
+ size_t length,
+ uint32* ssrc) {
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(packet), length);
+ return big_endian_reader.Skip(8) && big_endian_reader.ReadU32(ssrc);
+}
+
+void FrameReceiver::ProcessParsedPacket(const RtpCastHeader& rtp_header,
+ const uint8* payload_data,
+ size_t payload_size) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+
+ frame_id_to_rtp_timestamp_[rtp_header.frame_id & 0xff] =
+ rtp_header.rtp_timestamp;
+ cast_environment_->Logging()->InsertPacketEvent(
+ now, PACKET_RECEIVED, event_media_type_, rtp_header.rtp_timestamp,
+ rtp_header.frame_id, rtp_header.packet_id, rtp_header.max_packet_id,
+ payload_size);
+
+ bool duplicate = false;
+ const bool complete =
+ framer_.InsertPacket(payload_data, payload_size, rtp_header, &duplicate);
+
+ // Duplicate packets are ignored.
+ if (duplicate)
+ return;
+
+ // Update lip-sync values upon receiving the first packet of each frame, or if
+ // they have never been set yet.
+ if (rtp_header.packet_id == 0 || lip_sync_reference_time_.is_null()) {
+ RtpTimestamp fresh_sync_rtp;
+ base::TimeTicks fresh_sync_reference;
+ if (!rtcp_.GetLatestLipSyncTimes(&fresh_sync_rtp, &fresh_sync_reference)) {
+ // HACK: The sender should have provided Sender Reports before the first
+ // frame was sent. However, the spec does not currently require this.
+ // Therefore, when the data is missing, the local clock is used to
+ // generate reference timestamps.
+ VLOG(2) << "Lip sync info missing. Falling-back to local clock.";
+ fresh_sync_rtp = rtp_header.rtp_timestamp;
+ fresh_sync_reference = now;
+ }
+ // |lip_sync_reference_time_| is always incremented according to the time
+ // delta computed from the difference in RTP timestamps. Then,
+ // |lip_sync_drift_| accounts for clock drift and also smoothes-out any
+ // sudden/discontinuous shifts in the series of reference time values.
+ if (lip_sync_reference_time_.is_null()) {
+ lip_sync_reference_time_ = fresh_sync_reference;
+ } else {
+ lip_sync_reference_time_ += RtpDeltaToTimeDelta(
+ static_cast<int32>(fresh_sync_rtp - lip_sync_rtp_timestamp_),
+ rtp_timebase_);
+ }
+ lip_sync_rtp_timestamp_ = fresh_sync_rtp;
+ lip_sync_drift_.Update(
+ now, fresh_sync_reference - lip_sync_reference_time_);
+ }
+
+ // Another frame is complete from a non-duplicate packet. Attempt to emit
+ // more frames to satisfy enqueued requests.
+ if (complete)
+ EmitAvailableEncodedFrames();
+}
+
+void FrameReceiver::CastFeedback(const RtcpCastMessage& cast_message) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ RtpTimestamp rtp_timestamp =
+ frame_id_to_rtp_timestamp_[cast_message.ack_frame_id_ & 0xff];
+ cast_environment_->Logging()->InsertFrameEvent(
+ now, FRAME_ACK_SENT, event_media_type_,
+ rtp_timestamp, cast_message.ack_frame_id_);
+
+ ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
+ event_subscriber_.GetRtcpEventsAndReset(&rtcp_events);
+ rtcp_.SendRtcpFromRtpReceiver(&cast_message, &rtcp_events);
+}
+
+void FrameReceiver::EmitAvailableEncodedFrames() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ while (!frame_request_queue_.empty()) {
+ // Attempt to peek at the next completed frame from the |framer_|.
+ // TODO(miu): We should only be peeking at the metadata, and not copying the
+ // payload yet! Or, at least, peek using a StringPiece instead of a copy.
+ scoped_ptr<transport::EncodedFrame> encoded_frame(
+ new transport::EncodedFrame());
+ bool is_consecutively_next_frame = false;
+ bool have_multiple_complete_frames = false;
+ if (!framer_.GetEncodedFrame(encoded_frame.get(),
+ &is_consecutively_next_frame,
+ &have_multiple_complete_frames)) {
+ VLOG(1) << "Wait for more packets to produce a completed frame.";
+ return; // ProcessParsedPacket() will invoke this method in the future.
+ }
+
+ const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ const base::TimeTicks playout_time =
+ GetPlayoutTime(encoded_frame->rtp_timestamp);
+
+ // If we have multiple decodable frames, and the current frame is
+ // too old, then skip it and decode the next frame instead.
+ if (have_multiple_complete_frames && now > playout_time) {
+ framer_.ReleaseFrame(encoded_frame->frame_id);
+ continue;
+ }
+
+ // If |framer_| has a frame ready that is out of sequence, examine the
+ // playout time to determine whether it's acceptable to continue, thereby
+ // skipping one or more frames. Skip if the missing frame wouldn't complete
+ // playing before the start of playback of the available frame.
+ if (!is_consecutively_next_frame) {
+ // TODO(miu): Also account for expected decode time here?
+ const base::TimeTicks earliest_possible_end_time_of_missing_frame =
+ now + expected_frame_duration_;
+ if (earliest_possible_end_time_of_missing_frame < playout_time) {
+ VLOG(1) << "Wait for next consecutive frame instead of skipping.";
+ if (!is_waiting_for_consecutive_frame_) {
+ is_waiting_for_consecutive_frame_ = true;
+ cast_environment_->PostDelayedTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(&FrameReceiver::EmitAvailableEncodedFramesAfterWaiting,
+ weak_factory_.GetWeakPtr()),
+ playout_time - now);
+ }
+ return;
+ }
+ }
+
+ // Decrypt the payload data in the frame, if crypto is being used.
+ if (decryptor_.initialized()) {
+ std::string decrypted_data;
+ if (!decryptor_.Decrypt(encoded_frame->frame_id,
+ encoded_frame->data,
+ &decrypted_data)) {
+ // Decryption failed. Give up on this frame.
+ framer_.ReleaseFrame(encoded_frame->frame_id);
+ continue;
+ }
+ encoded_frame->data.swap(decrypted_data);
+ }
+
+ // At this point, we have a decrypted EncodedFrame ready to be emitted.
+ encoded_frame->reference_time = playout_time;
+ framer_.ReleaseFrame(encoded_frame->frame_id);
+ cast_environment_->PostTask(CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(frame_request_queue_.front(),
+ base::Passed(&encoded_frame)));
+ frame_request_queue_.pop_front();
+ }
+}
+
+void FrameReceiver::EmitAvailableEncodedFramesAfterWaiting() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ DCHECK(is_waiting_for_consecutive_frame_);
+ is_waiting_for_consecutive_frame_ = false;
+ EmitAvailableEncodedFrames();
+}
+
+base::TimeTicks FrameReceiver::GetPlayoutTime(uint32 rtp_timestamp) const {
+ return lip_sync_reference_time_ +
+ lip_sync_drift_.Current() +
+ RtpDeltaToTimeDelta(
+ static_cast<int32>(rtp_timestamp - lip_sync_rtp_timestamp_),
+ rtp_timebase_) +
+ target_playout_delay_;
+}
+
+void FrameReceiver::ScheduleNextCastMessage() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ base::TimeTicks send_time;
+ framer_.TimeToSendNextCastMessage(&send_time);
+ base::TimeDelta time_to_send =
+ send_time - cast_environment_->Clock()->NowTicks();
+ time_to_send = std::max(
+ time_to_send, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+ cast_environment_->PostDelayedTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(&FrameReceiver::SendNextCastMessage,
+ weak_factory_.GetWeakPtr()),
+ time_to_send);
+}
+
+void FrameReceiver::SendNextCastMessage() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ framer_.SendCastMessage(); // Will only send a message if it is time.
+ ScheduleNextCastMessage();
+}
+
+void FrameReceiver::ScheduleNextRtcpReport() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ base::TimeDelta time_to_next = rtcp_.TimeToSendNextRtcpReport() -
+ cast_environment_->Clock()->NowTicks();
+
+ time_to_next = std::max(
+ time_to_next, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+
+ cast_environment_->PostDelayedTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(&FrameReceiver::SendNextRtcpReport,
+ weak_factory_.GetWeakPtr()),
+ time_to_next);
+}
+
+void FrameReceiver::SendNextRtcpReport() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ rtcp_.SendRtcpFromRtpReceiver(NULL, NULL);
+ ScheduleNextRtcpReport();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/receiver/frame_receiver.h b/chromium/media/cast/receiver/frame_receiver.h
new file mode 100644
index 00000000000..ac14ab1e0fb
--- /dev/null
+++ b/chromium/media/cast/receiver/frame_receiver.h
@@ -0,0 +1,184 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RECEIVER_FRAME_RECEIVER_H_
+#define MEDIA_CAST_RECEIVER_FRAME_RECEIVER_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/time/time.h"
+#include "media/cast/base/clock_drift_smoother.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_receiver.h"
+#include "media/cast/framer/framer.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/rtcp/receiver_rtcp_event_subscriber.h"
+#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/rtp_receiver/receiver_stats.h"
+#include "media/cast/rtp_receiver/rtp_parser/rtp_parser.h"
+#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
+#include "media/cast/transport/utility/transport_encryption_handler.h"
+
+namespace media {
+namespace cast {
+
+class CastEnvironment;
+
+// FrameReceiver receives packets out-of-order while clients make requests for
+// complete frames in-order. (A frame consists of one or more packets.)
+//
+// FrameReceiver also includes logic for computing the playout time for each
+// frame, accounting for a constant targeted playout delay. The purpose of the
+// playout delay is to provide a fixed window of time between the capture event
+// on the sender and the playout on the receiver. This is important because
+// each step of the pipeline (i.e., encode frame, then transmit/retransmit from
+// the sender, then receive and re-order packets on the receiver, then decode
+// frame) can vary in duration and is typically very hard to predict.
+//
+// Each request for a frame includes a callback which FrameReceiver guarantees
+// will be called at some point in the future unless the FrameReceiver is
+// destroyed. Clients should generally limit the number of outstanding requests
+// (perhaps to just one or two).
+//
+// This class is not thread safe. Should only be called from the Main cast
+// thread.
+class FrameReceiver : public RtpPayloadFeedback,
+ public base::SupportsWeakPtr<FrameReceiver> {
+ public:
+ FrameReceiver(const scoped_refptr<CastEnvironment>& cast_environment,
+ const FrameReceiverConfig& config,
+ EventMediaType event_media_type,
+ transport::PacedPacketSender* const packet_sender);
+
+ virtual ~FrameReceiver();
+
+ // Request an encoded frame.
+ //
+ // The given |callback| is guaranteed to be run at some point in the future,
+ // except for those requests still enqueued at destruction time.
+ void RequestEncodedFrame(const ReceiveEncodedFrameCallback& callback);
+
+ // Called to deliver another packet, possibly a duplicate, and possibly
+ // out-of-order. Returns true if the parsing of the packet succeeded.
+ bool ProcessPacket(scoped_ptr<Packet> packet);
+
+ // TODO(miu): This is the wrong place for this, but the (de)serialization
+ // implementation needs to be consolidated first.
+ static bool ParseSenderSsrc(const uint8* packet, size_t length, uint32* ssrc);
+
+ protected:
+ friend class FrameReceiverTest; // Invokes ProcessParsedPacket().
+
+ void ProcessParsedPacket(const RtpCastHeader& rtp_header,
+ const uint8* payload_data,
+ size_t payload_size);
+
+ // RtpPayloadFeedback implementation.
+ virtual void CastFeedback(const RtcpCastMessage& cast_message) OVERRIDE;
+
+ private:
+ // Processes ready-to-consume packets from |framer_|, decrypting each packet's
+ // payload data, and then running the enqueued callbacks in order (one for
+ // each packet). This method may post a delayed task to re-invoke itself in
+ // the future to wait for missing/incomplete frames.
+ void EmitAvailableEncodedFrames();
+
+ // Clears the |is_waiting_for_consecutive_frame_| flag and invokes
+ // EmitAvailableEncodedFrames().
+ void EmitAvailableEncodedFramesAfterWaiting();
+
+ // Computes the playout time for a frame with the given |rtp_timestamp|.
+ // Because lip-sync info is refreshed regularly, calling this method with the
+ // same argument may return different results.
+ base::TimeTicks GetPlayoutTime(uint32 rtp_timestamp) const;
+
+ // Schedule timing for the next cast message.
+ void ScheduleNextCastMessage();
+
+ // Schedule timing for the next RTCP report.
+ void ScheduleNextRtcpReport();
+
+ // Actually send the next cast message.
+ void SendNextCastMessage();
+
+ // Actually send the next RTCP report.
+ void SendNextRtcpReport();
+
+ const scoped_refptr<CastEnvironment> cast_environment_;
+
+ // Deserializes a packet into a RtpHeader + payload bytes.
+ RtpParser packet_parser_;
+
+ // Accumulates packet statistics, including packet loss, counts, and jitter.
+ ReceiverStats stats_;
+
+ // Partitions logged events by the type of media passing through.
+ EventMediaType event_media_type_;
+
+ // Subscribes to raw events.
+ // Processes raw events to be sent over to the cast sender via RTCP.
+ ReceiverRtcpEventSubscriber event_subscriber_;
+
+ // RTP timebase: The number of RTP units advanced per one second.
+ const int rtp_timebase_;
+
+ // The total amount of time between a frame's capture/recording on the sender
+ // and its playback on the receiver (i.e., shown to a user). This is fixed as
+ // a value large enough to give the system sufficient time to encode,
+ // transmit/retransmit, receive, decode, and render; given its run-time
+ // environment (sender/receiver hardware performance, network conditions,
+ // etc.).
+ const base::TimeDelta target_playout_delay_;
+
+ // Hack: This is used in logic that determines whether to skip frames.
+ // TODO(miu): Revisit this. Logic needs to also account for expected decode
+ // time.
+ const base::TimeDelta expected_frame_duration_;
+
+ // Set to false initially, then set to true after scheduling the periodic
+ // sending of reports back to the sender. Reports are first scheduled just
+ // after receiving a first packet (since the first packet identifies the
+ // sender for the remainder of the session).
+ bool reports_are_scheduled_;
+
+ // Assembles packets into frames, providing this receiver with complete,
+ // decodable EncodedFrames.
+ Framer framer_;
+
+ // Manages sending/receiving of RTCP packets, including sender/receiver
+ // reports.
+ Rtcp rtcp_;
+
+ // Decrypts encrypted frames.
+ transport::TransportEncryptionHandler decryptor_;
+
+ // Outstanding callbacks to run to deliver on client requests for frames.
+ std::list<ReceiveEncodedFrameCallback> frame_request_queue_;
+
+ // True while there's an outstanding task to re-invoke
+ // EmitAvailableEncodedFrames().
+ bool is_waiting_for_consecutive_frame_;
+
+ // This mapping allows us to log FRAME_ACK_SENT as a frame event. In addition
+ // it allows the event to be transmitted via RTCP.
+ RtpTimestamp frame_id_to_rtp_timestamp_[256];
+
+ // Lip-sync values used to compute the playout time of each frame from its RTP
+ // timestamp. These are updated each time the first packet of a frame is
+ // received.
+ RtpTimestamp lip_sync_rtp_timestamp_;
+ base::TimeTicks lip_sync_reference_time_;
+ ClockDriftSmoother lip_sync_drift_;
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<FrameReceiver> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(FrameReceiver);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RECEIVER_FRAME_RECEIVER_H_
diff --git a/chromium/media/cast/receiver/frame_receiver_unittest.cc b/chromium/media/cast/receiver/frame_receiver_unittest.cc
new file mode 100644
index 00000000000..4d8273e132a
--- /dev/null
+++ b/chromium/media/cast/receiver/frame_receiver_unittest.cc
@@ -0,0 +1,419 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <deque>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/logging/simple_event_subscriber.h"
+#include "media/cast/receiver/frame_receiver.h"
+#include "media/cast/rtcp/test_rtcp_packet_builder.h"
+#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "media/cast/test/utility/default_config.h"
+#include "media/cast/transport/pacing/mock_paced_packet_sender.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using ::testing::_;
+
+namespace media {
+namespace cast {
+
+namespace {
+
+const int kPacketSize = 1500;
+const uint32 kFirstFrameId = 1234;
+const int kPlayoutDelayMillis = 100;
+
+class FakeFrameClient {
+ public:
+ FakeFrameClient() : num_called_(0) {}
+ virtual ~FakeFrameClient() {}
+
+ void AddExpectedResult(uint32 expected_frame_id,
+ const base::TimeTicks& expected_playout_time) {
+ expected_results_.push_back(
+ std::make_pair(expected_frame_id, expected_playout_time));
+ }
+
+ void DeliverEncodedFrame(scoped_ptr<transport::EncodedFrame> frame) {
+ SCOPED_TRACE(::testing::Message() << "num_called_ is " << num_called_);
+ ASSERT_FALSE(!frame)
+ << "If at shutdown: There were unsatisfied requests enqueued.";
+ ASSERT_FALSE(expected_results_.empty());
+ EXPECT_EQ(expected_results_.front().first, frame->frame_id);
+ EXPECT_EQ(expected_results_.front().second, frame->reference_time);
+ expected_results_.pop_front();
+ ++num_called_;
+ }
+
+ int number_times_called() const { return num_called_; }
+
+ private:
+ std::deque<std::pair<uint32, base::TimeTicks> > expected_results_;
+ int num_called_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeFrameClient);
+};
+} // namespace
+
+class FrameReceiverTest : public ::testing::Test {
+ protected:
+ FrameReceiverTest() {
+ testing_clock_ = new base::SimpleTestTickClock();
+ testing_clock_->Advance(base::TimeTicks::Now() - base::TimeTicks());
+ start_time_ = testing_clock_->NowTicks();
+ task_runner_ = new test::FakeSingleThreadTaskRunner(testing_clock_);
+
+ cast_environment_ =
+ new CastEnvironment(scoped_ptr<base::TickClock>(testing_clock_).Pass(),
+ task_runner_,
+ task_runner_,
+ task_runner_);
+ }
+
+ virtual ~FrameReceiverTest() {}
+
+ virtual void SetUp() {
+ payload_.assign(kPacketSize, 0);
+
+ // Always start with a key frame.
+ rtp_header_.is_key_frame = true;
+ rtp_header_.frame_id = kFirstFrameId;
+ rtp_header_.packet_id = 0;
+ rtp_header_.max_packet_id = 0;
+ rtp_header_.reference_frame_id = rtp_header_.frame_id;
+ rtp_header_.rtp_timestamp = 0;
+ }
+
+ void CreateFrameReceiverOfAudio() {
+ config_ = GetDefaultAudioReceiverConfig();
+ config_.rtp_max_delay_ms = kPlayoutDelayMillis;
+
+ receiver_.reset(new FrameReceiver(
+ cast_environment_, config_, AUDIO_EVENT, &mock_transport_));
+ }
+
+ void CreateFrameReceiverOfVideo() {
+ config_ = GetDefaultVideoReceiverConfig();
+ config_.rtp_max_delay_ms = kPlayoutDelayMillis;
+ // Note: Frame rate must divide 1000 without remainder so the test code
+ // doesn't have to account for rounding errors.
+ config_.max_frame_rate = 25;
+
+ receiver_.reset(new FrameReceiver(
+ cast_environment_, config_, VIDEO_EVENT, &mock_transport_));
+ }
+
+ void FeedOneFrameIntoReceiver() {
+ // Note: For testing purposes, a frame consists of only a single packet.
+ receiver_->ProcessParsedPacket(
+ rtp_header_, payload_.data(), payload_.size());
+ }
+
+ void FeedLipSyncInfoIntoReceiver() {
+ const base::TimeTicks now = testing_clock_->NowTicks();
+ const int64 rtp_timestamp = (now - start_time_) *
+ config_.frequency / base::TimeDelta::FromSeconds(1);
+ CHECK_LE(0, rtp_timestamp);
+ uint32 ntp_seconds;
+ uint32 ntp_fraction;
+ ConvertTimeTicksToNtp(now, &ntp_seconds, &ntp_fraction);
+ TestRtcpPacketBuilder rtcp_packet;
+ rtcp_packet.AddSrWithNtp(config_.incoming_ssrc,
+ ntp_seconds, ntp_fraction,
+ static_cast<uint32>(rtp_timestamp));
+ ASSERT_TRUE(receiver_->ProcessPacket(rtcp_packet.GetPacket().Pass()));
+ }
+
+ FrameReceiverConfig config_;
+ std::vector<uint8> payload_;
+ RtpCastHeader rtp_header_;
+ base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
+ base::TimeTicks start_time_;
+ transport::MockPacedPacketSender mock_transport_;
+ scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+ FakeFrameClient frame_client_;
+
+ // Important for the FrameReceiver to be declared last, since its dependencies
+ // must remain alive until after its destruction.
+ scoped_ptr<FrameReceiver> receiver_;
+
+ DISALLOW_COPY_AND_ASSIGN(FrameReceiverTest);
+};
+
+TEST_F(FrameReceiverTest, RejectsUnparsablePackets) {
+ CreateFrameReceiverOfVideo();
+
+ SimpleEventSubscriber event_subscriber;
+ cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber);
+
+ const bool success = receiver_->ProcessPacket(
+ scoped_ptr<Packet>(new Packet(kPacketSize, 0xff)).Pass());
+ EXPECT_FALSE(success);
+
+ // Confirm no log events.
+ std::vector<FrameEvent> frame_events;
+ event_subscriber.GetFrameEventsAndReset(&frame_events);
+ EXPECT_TRUE(frame_events.empty());
+ cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber);
+}
+
+TEST_F(FrameReceiverTest, ReceivesOneFrame) {
+ CreateFrameReceiverOfAudio();
+
+ SimpleEventSubscriber event_subscriber;
+ cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber);
+
+ EXPECT_CALL(mock_transport_, SendRtcpPacket(_, _))
+ .WillRepeatedly(testing::Return(true));
+
+ FeedLipSyncInfoIntoReceiver();
+ task_runner_->RunTasks();
+
+ // Enqueue a request for a frame.
+ receiver_->RequestEncodedFrame(
+ base::Bind(&FakeFrameClient::DeliverEncodedFrame,
+ base::Unretained(&frame_client_)));
+
+ // The request should not be satisfied since no packets have been received.
+ task_runner_->RunTasks();
+ EXPECT_EQ(0, frame_client_.number_times_called());
+
+ // Deliver one frame to the receiver and expect to get one frame back.
+ const base::TimeDelta target_playout_delay =
+ base::TimeDelta::FromMilliseconds(kPlayoutDelayMillis);
+ frame_client_.AddExpectedResult(
+ kFirstFrameId, testing_clock_->NowTicks() + target_playout_delay);
+ FeedOneFrameIntoReceiver();
+ task_runner_->RunTasks();
+ EXPECT_EQ(1, frame_client_.number_times_called());
+
+ // Was the frame logged?
+ std::vector<FrameEvent> frame_events;
+ event_subscriber.GetFrameEventsAndReset(&frame_events);
+ ASSERT_TRUE(!frame_events.empty());
+ EXPECT_EQ(FRAME_ACK_SENT, frame_events.begin()->type);
+ EXPECT_EQ(AUDIO_EVENT, frame_events.begin()->media_type);
+ EXPECT_EQ(rtp_header_.frame_id, frame_events.begin()->frame_id);
+ EXPECT_EQ(rtp_header_.rtp_timestamp, frame_events.begin()->rtp_timestamp);
+ cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber);
+}
+
+TEST_F(FrameReceiverTest, ReceivesFramesSkippingWhenAppropriate) {
+ CreateFrameReceiverOfAudio();
+
+ SimpleEventSubscriber event_subscriber;
+ cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber);
+
+ EXPECT_CALL(mock_transport_, SendRtcpPacket(_, _))
+ .WillRepeatedly(testing::Return(true));
+
+ const uint32 rtp_advance_per_frame =
+ config_.frequency / config_.max_frame_rate;
+ const base::TimeDelta time_advance_per_frame =
+ base::TimeDelta::FromSeconds(1) / config_.max_frame_rate;
+
+ // Feed and process lip sync in receiver.
+ FeedLipSyncInfoIntoReceiver();
+ task_runner_->RunTasks();
+ const base::TimeTicks first_frame_capture_time = testing_clock_->NowTicks();
+
+ // Enqueue a request for a frame.
+ const ReceiveEncodedFrameCallback frame_encoded_callback =
+ base::Bind(&FakeFrameClient::DeliverEncodedFrame,
+ base::Unretained(&frame_client_));
+ receiver_->RequestEncodedFrame(frame_encoded_callback);
+ task_runner_->RunTasks();
+ EXPECT_EQ(0, frame_client_.number_times_called());
+
+ // Receive one frame and expect to see the first request satisfied.
+ const base::TimeDelta target_playout_delay =
+ base::TimeDelta::FromMilliseconds(kPlayoutDelayMillis);
+ frame_client_.AddExpectedResult(
+ kFirstFrameId, first_frame_capture_time + target_playout_delay);
+ rtp_header_.rtp_timestamp = 0;
+ FeedOneFrameIntoReceiver(); // Frame 1
+ task_runner_->RunTasks();
+ EXPECT_EQ(1, frame_client_.number_times_called());
+
+ // Enqueue a second request for a frame, but it should not be fulfilled yet.
+ receiver_->RequestEncodedFrame(frame_encoded_callback);
+ task_runner_->RunTasks();
+ EXPECT_EQ(1, frame_client_.number_times_called());
+
+ // Receive one frame out-of-order: Make sure that we are not continuous and
+ // that the RTP timestamp represents a time in the future.
+ rtp_header_.frame_id = kFirstFrameId + 2; // "Frame 3"
+ rtp_header_.reference_frame_id = rtp_header_.frame_id;
+ rtp_header_.rtp_timestamp += 2 * rtp_advance_per_frame;
+ frame_client_.AddExpectedResult(
+ kFirstFrameId + 2,
+ first_frame_capture_time + 2 * time_advance_per_frame +
+ target_playout_delay);
+ FeedOneFrameIntoReceiver(); // Frame 3
+
+ // Frame 2 should not come out at this point in time.
+ task_runner_->RunTasks();
+ EXPECT_EQ(1, frame_client_.number_times_called());
+
+ // Enqueue a third request for a frame.
+ receiver_->RequestEncodedFrame(frame_encoded_callback);
+ task_runner_->RunTasks();
+ EXPECT_EQ(1, frame_client_.number_times_called());
+
+ // Now, advance time forward such that the receiver is convinced it should
+ // skip Frame 2. Frame 3 is emitted (to satisfy the second request) because a
+ // decision was made to skip over the no-show Frame 2.
+ testing_clock_->Advance(2 * time_advance_per_frame + target_playout_delay);
+ task_runner_->RunTasks();
+ EXPECT_EQ(2, frame_client_.number_times_called());
+
+ // Receive Frame 4 and expect it to fulfill the third request immediately.
+ rtp_header_.frame_id = kFirstFrameId + 3; // "Frame 4"
+ rtp_header_.reference_frame_id = rtp_header_.frame_id;
+ rtp_header_.rtp_timestamp += rtp_advance_per_frame;
+ frame_client_.AddExpectedResult(
+ kFirstFrameId + 3, first_frame_capture_time + 3 * time_advance_per_frame +
+ target_playout_delay);
+ FeedOneFrameIntoReceiver(); // Frame 4
+ task_runner_->RunTasks();
+ EXPECT_EQ(3, frame_client_.number_times_called());
+
+ // Move forward to the playout time of an unreceived Frame 5. Expect no
+ // additional frames were emitted.
+ testing_clock_->Advance(3 * time_advance_per_frame);
+ task_runner_->RunTasks();
+ EXPECT_EQ(3, frame_client_.number_times_called());
+
+ // Were only non-skipped frames logged?
+ std::vector<FrameEvent> frame_events;
+ event_subscriber.GetFrameEventsAndReset(&frame_events);
+ ASSERT_TRUE(!frame_events.empty());
+ for (size_t i = 0; i < frame_events.size(); ++i) {
+ EXPECT_EQ(FRAME_ACK_SENT, frame_events[i].type);
+ EXPECT_EQ(AUDIO_EVENT, frame_events[i].media_type);
+ EXPECT_LE(kFirstFrameId, frame_events[i].frame_id);
+ EXPECT_GE(kFirstFrameId + 4, frame_events[i].frame_id);
+ const int frame_offset = frame_events[i].frame_id - kFirstFrameId;
+ EXPECT_NE(frame_offset, 1); // Frame 2 never received.
+ EXPECT_EQ(frame_offset * rtp_advance_per_frame,
+ frame_events[i].rtp_timestamp);
+ }
+ cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber);
+}
+
+TEST_F(FrameReceiverTest, ReceivesFramesRefusingToSkipAny) {
+ CreateFrameReceiverOfVideo();
+
+ SimpleEventSubscriber event_subscriber;
+ cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber);
+
+ EXPECT_CALL(mock_transport_, SendRtcpPacket(_, _))
+ .WillRepeatedly(testing::Return(true));
+
+ const uint32 rtp_advance_per_frame =
+ config_.frequency / config_.max_frame_rate;
+ const base::TimeDelta time_advance_per_frame =
+ base::TimeDelta::FromSeconds(1) / config_.max_frame_rate;
+
+ // Feed and process lip sync in receiver.
+ FeedLipSyncInfoIntoReceiver();
+ task_runner_->RunTasks();
+ const base::TimeTicks first_frame_capture_time = testing_clock_->NowTicks();
+
+ // Enqueue a request for a frame.
+ const ReceiveEncodedFrameCallback frame_encoded_callback =
+ base::Bind(&FakeFrameClient::DeliverEncodedFrame,
+ base::Unretained(&frame_client_));
+ receiver_->RequestEncodedFrame(frame_encoded_callback);
+ task_runner_->RunTasks();
+ EXPECT_EQ(0, frame_client_.number_times_called());
+
+ // Receive one frame and expect to see the first request satisfied.
+ const base::TimeDelta target_playout_delay =
+ base::TimeDelta::FromMilliseconds(kPlayoutDelayMillis);
+ frame_client_.AddExpectedResult(
+ kFirstFrameId, first_frame_capture_time + target_playout_delay);
+ rtp_header_.rtp_timestamp = 0;
+ FeedOneFrameIntoReceiver(); // Frame 1
+ task_runner_->RunTasks();
+ EXPECT_EQ(1, frame_client_.number_times_called());
+
+ // Enqueue a second request for a frame, but it should not be fulfilled yet.
+ receiver_->RequestEncodedFrame(frame_encoded_callback);
+ task_runner_->RunTasks();
+ EXPECT_EQ(1, frame_client_.number_times_called());
+
+ // Receive one frame out-of-order: Make sure that we are not continuous and
+ // that the RTP timestamp represents a time in the future.
+ rtp_header_.is_key_frame = false;
+ rtp_header_.frame_id = kFirstFrameId + 2; // "Frame 3"
+ rtp_header_.reference_frame_id = kFirstFrameId + 1; // "Frame 2"
+ rtp_header_.rtp_timestamp += 2 * rtp_advance_per_frame;
+ FeedOneFrameIntoReceiver(); // Frame 3
+
+ // Frame 2 should not come out at this point in time.
+ task_runner_->RunTasks();
+ EXPECT_EQ(1, frame_client_.number_times_called());
+
+ // Enqueue a third request for a frame.
+ receiver_->RequestEncodedFrame(frame_encoded_callback);
+ task_runner_->RunTasks();
+ EXPECT_EQ(1, frame_client_.number_times_called());
+
+ // Now, advance time forward such that Frame 2 is now too late for playback.
+ // Regardless, the receiver must NOT emit Frame 3 yet because it is not
+ // allowed to skip frames when dependencies are not satisfied. In other
+ // words, Frame 3 is not decodable without Frame 2.
+ testing_clock_->Advance(2 * time_advance_per_frame + target_playout_delay);
+ task_runner_->RunTasks();
+ EXPECT_EQ(1, frame_client_.number_times_called());
+
+ // Now receive Frame 2 and expect both the second and third requests to be
+ // fulfilled immediately.
+ frame_client_.AddExpectedResult(
+ kFirstFrameId + 1, // "Frame 2"
+ first_frame_capture_time + 1 * time_advance_per_frame +
+ target_playout_delay);
+ frame_client_.AddExpectedResult(
+ kFirstFrameId + 2, // "Frame 3"
+ first_frame_capture_time + 2 * time_advance_per_frame +
+ target_playout_delay);
+ --rtp_header_.frame_id; // "Frame 2"
+ --rtp_header_.reference_frame_id; // "Frame 1"
+ rtp_header_.rtp_timestamp -= rtp_advance_per_frame;
+ FeedOneFrameIntoReceiver(); // Frame 2
+ task_runner_->RunTasks();
+ EXPECT_EQ(3, frame_client_.number_times_called());
+
+ // Move forward to the playout time of an unreceived Frame 5. Expect no
+ // additional frames were emitted.
+ testing_clock_->Advance(3 * time_advance_per_frame);
+ task_runner_->RunTasks();
+ EXPECT_EQ(3, frame_client_.number_times_called());
+
+ // Sanity-check logging results.
+ std::vector<FrameEvent> frame_events;
+ event_subscriber.GetFrameEventsAndReset(&frame_events);
+ ASSERT_TRUE(!frame_events.empty());
+ for (size_t i = 0; i < frame_events.size(); ++i) {
+ EXPECT_EQ(FRAME_ACK_SENT, frame_events[i].type);
+ EXPECT_EQ(VIDEO_EVENT, frame_events[i].media_type);
+ EXPECT_LE(kFirstFrameId, frame_events[i].frame_id);
+ EXPECT_GE(kFirstFrameId + 3, frame_events[i].frame_id);
+ const int frame_offset = frame_events[i].frame_id - kFirstFrameId;
+ EXPECT_EQ(frame_offset * rtp_advance_per_frame,
+ frame_events[i].rtp_timestamp);
+ }
+ cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/receiver/video_decoder.cc b/chromium/media/cast/receiver/video_decoder.cc
new file mode 100644
index 00000000000..6db3fd35f39
--- /dev/null
+++ b/chromium/media/cast/receiver/video_decoder.cc
@@ -0,0 +1,259 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/receiver/video_decoder.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/json/json_reader.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/values.h"
+#include "media/base/video_util.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/cast_environment.h"
+// VPX_CODEC_DISABLE_COMPAT excludes parts of the libvpx API that provide
+// backwards compatibility for legacy applications using the library.
+#define VPX_CODEC_DISABLE_COMPAT 1
+#include "third_party/libvpx/source/libvpx/vpx/vp8dx.h"
+#include "third_party/libvpx/source/libvpx/vpx/vpx_decoder.h"
+#include "ui/gfx/size.h"
+
+namespace media {
+namespace cast {
+
+// Base class that handles the common problem of detecting dropped frames, and
+// then invoking the Decode() method implemented by the subclasses to convert
+// the encoded payload data into a usable video frame.
+class VideoDecoder::ImplBase
+ : public base::RefCountedThreadSafe<VideoDecoder::ImplBase> {
+ public:
+ ImplBase(const scoped_refptr<CastEnvironment>& cast_environment,
+ transport::VideoCodec codec)
+ : cast_environment_(cast_environment),
+ codec_(codec),
+ cast_initialization_status_(STATUS_VIDEO_UNINITIALIZED),
+ seen_first_frame_(false) {}
+
+ CastInitializationStatus InitializationResult() const {
+ return cast_initialization_status_;
+ }
+
+ void DecodeFrame(scoped_ptr<transport::EncodedFrame> encoded_frame,
+ const DecodeFrameCallback& callback) {
+ DCHECK_EQ(cast_initialization_status_, STATUS_VIDEO_INITIALIZED);
+
+ COMPILE_ASSERT(sizeof(encoded_frame->frame_id) == sizeof(last_frame_id_),
+ size_of_frame_id_types_do_not_match);
+ bool is_continuous = true;
+ if (seen_first_frame_) {
+ const uint32 frames_ahead = encoded_frame->frame_id - last_frame_id_;
+ if (frames_ahead > 1) {
+ RecoverBecauseFramesWereDropped();
+ is_continuous = false;
+ }
+ } else {
+ seen_first_frame_ = true;
+ }
+ last_frame_id_ = encoded_frame->frame_id;
+
+ const scoped_refptr<VideoFrame> decoded_frame = Decode(
+ encoded_frame->mutable_bytes(),
+ static_cast<int>(encoded_frame->data.size()));
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(callback, decoded_frame, is_continuous));
+ }
+
+ protected:
+ friend class base::RefCountedThreadSafe<ImplBase>;
+ virtual ~ImplBase() {}
+
+ virtual void RecoverBecauseFramesWereDropped() {}
+
+ // Note: Implementation of Decode() is allowed to mutate |data|.
+ virtual scoped_refptr<VideoFrame> Decode(uint8* data, int len) = 0;
+
+ const scoped_refptr<CastEnvironment> cast_environment_;
+ const transport::VideoCodec codec_;
+
+ // Subclass' ctor is expected to set this to STATUS_VIDEO_INITIALIZED.
+ CastInitializationStatus cast_initialization_status_;
+
+ private:
+ bool seen_first_frame_;
+ uint32 last_frame_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(ImplBase);
+};
+
+class VideoDecoder::Vp8Impl : public VideoDecoder::ImplBase {
+ public:
+ explicit Vp8Impl(const scoped_refptr<CastEnvironment>& cast_environment)
+ : ImplBase(cast_environment, transport::kVp8) {
+ if (ImplBase::cast_initialization_status_ != STATUS_VIDEO_UNINITIALIZED)
+ return;
+
+ vpx_codec_dec_cfg_t cfg = {0};
+ // TODO(miu): Revisit this for typical multi-core desktop use case. This
+ // feels like it should be 4 or 8.
+ cfg.threads = 1;
+
+ DCHECK(vpx_codec_get_caps(vpx_codec_vp8_dx()) & VPX_CODEC_CAP_POSTPROC);
+ if (vpx_codec_dec_init(&context_,
+ vpx_codec_vp8_dx(),
+ &cfg,
+ VPX_CODEC_USE_POSTPROC) != VPX_CODEC_OK) {
+ ImplBase::cast_initialization_status_ =
+ STATUS_INVALID_VIDEO_CONFIGURATION;
+ return;
+ }
+ ImplBase::cast_initialization_status_ = STATUS_VIDEO_INITIALIZED;
+ }
+
+ private:
+ virtual ~Vp8Impl() {
+ if (ImplBase::cast_initialization_status_ == STATUS_VIDEO_INITIALIZED)
+ CHECK_EQ(VPX_CODEC_OK, vpx_codec_destroy(&context_));
+ }
+
+ virtual scoped_refptr<VideoFrame> Decode(uint8* data, int len) OVERRIDE {
+ if (len <= 0 || vpx_codec_decode(&context_,
+ data,
+ static_cast<unsigned int>(len),
+ NULL,
+ 0) != VPX_CODEC_OK) {
+ return NULL;
+ }
+
+ vpx_codec_iter_t iter = NULL;
+ vpx_image_t* const image = vpx_codec_get_frame(&context_, &iter);
+ if (!image)
+ return NULL;
+ if (image->fmt != VPX_IMG_FMT_I420 && image->fmt != VPX_IMG_FMT_YV12) {
+ NOTREACHED();
+ return NULL;
+ }
+ DCHECK(vpx_codec_get_frame(&context_, &iter) == NULL)
+ << "Should have only decoded exactly one frame.";
+
+ const gfx::Size frame_size(image->d_w, image->d_h);
+ // Note: Timestamp for the VideoFrame will be set in VideoReceiver.
+ const scoped_refptr<VideoFrame> decoded_frame =
+ VideoFrame::CreateFrame(VideoFrame::YV12,
+ frame_size,
+ gfx::Rect(frame_size),
+ frame_size,
+ base::TimeDelta());
+ CopyYPlane(image->planes[VPX_PLANE_Y],
+ image->stride[VPX_PLANE_Y],
+ image->d_h,
+ decoded_frame);
+ CopyUPlane(image->planes[VPX_PLANE_U],
+ image->stride[VPX_PLANE_U],
+ (image->d_h + 1) / 2,
+ decoded_frame);
+ CopyVPlane(image->planes[VPX_PLANE_V],
+ image->stride[VPX_PLANE_V],
+ (image->d_h + 1) / 2,
+ decoded_frame);
+ return decoded_frame;
+ }
+
+ // VPX decoder context (i.e., an instantiation).
+ vpx_codec_ctx_t context_;
+
+ DISALLOW_COPY_AND_ASSIGN(Vp8Impl);
+};
+
+#ifndef OFFICIAL_BUILD
+// A fake video decoder that always output 2x2 black frames.
+class VideoDecoder::FakeImpl : public VideoDecoder::ImplBase {
+ public:
+ explicit FakeImpl(const scoped_refptr<CastEnvironment>& cast_environment)
+ : ImplBase(cast_environment, transport::kFakeSoftwareVideo),
+ last_decoded_id_(-1) {
+ if (ImplBase::cast_initialization_status_ != STATUS_VIDEO_UNINITIALIZED)
+ return;
+ ImplBase::cast_initialization_status_ = STATUS_VIDEO_INITIALIZED;
+ }
+
+ private:
+ virtual ~FakeImpl() {}
+
+ virtual scoped_refptr<VideoFrame> Decode(uint8* data, int len) OVERRIDE {
+ base::JSONReader reader;
+ scoped_ptr<base::Value> values(
+ reader.Read(base::StringPiece(reinterpret_cast<char*>(data))));
+ base::DictionaryValue* dict = NULL;
+ values->GetAsDictionary(&dict);
+
+ bool key = false;
+ int id = 0;
+ int ref = 0;
+ dict->GetBoolean("key", &key);
+ dict->GetInteger("id", &id);
+ dict->GetInteger("ref", &ref);
+ DCHECK(id == last_decoded_id_ + 1);
+ last_decoded_id_ = id;
+ return media::VideoFrame::CreateBlackFrame(gfx::Size(2, 2));
+ }
+
+ int last_decoded_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeImpl);
+};
+#endif
+
+VideoDecoder::VideoDecoder(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ transport::VideoCodec codec)
+ : cast_environment_(cast_environment) {
+ switch (codec) {
+#ifndef OFFICIAL_BUILD
+ case transport::kFakeSoftwareVideo:
+ impl_ = new FakeImpl(cast_environment);
+ break;
+#endif
+ case transport::kVp8:
+ impl_ = new Vp8Impl(cast_environment);
+ break;
+ case transport::kH264:
+ // TODO(miu): Need implementation.
+ NOTIMPLEMENTED();
+ break;
+ default:
+ NOTREACHED() << "Unknown or unspecified codec.";
+ break;
+ }
+}
+
+VideoDecoder::~VideoDecoder() {}
+
+CastInitializationStatus VideoDecoder::InitializationResult() const {
+ if (impl_)
+ return impl_->InitializationResult();
+ return STATUS_UNSUPPORTED_VIDEO_CODEC;
+}
+
+void VideoDecoder::DecodeFrame(
+ scoped_ptr<transport::EncodedFrame> encoded_frame,
+ const DecodeFrameCallback& callback) {
+ DCHECK(encoded_frame.get());
+ DCHECK(!callback.is_null());
+ if (!impl_ || impl_->InitializationResult() != STATUS_VIDEO_INITIALIZED) {
+ callback.Run(make_scoped_refptr<VideoFrame>(NULL), false);
+ return;
+ }
+ cast_environment_->PostTask(CastEnvironment::VIDEO,
+ FROM_HERE,
+ base::Bind(&VideoDecoder::ImplBase::DecodeFrame,
+ impl_,
+ base::Passed(&encoded_frame),
+ callback));
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/receiver/video_decoder.h b/chromium/media/cast/receiver/video_decoder.h
new file mode 100644
index 00000000000..66dc36bb2ac
--- /dev/null
+++ b/chromium/media/cast/receiver/video_decoder.h
@@ -0,0 +1,63 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RECEIVER_VIDEO_DECODER_H_
+#define MEDIA_CAST_RECEIVER_VIDEO_DECODER_H_
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/video_frame.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/transport/cast_transport_config.h"
+
+namespace media {
+namespace cast {
+
+class CastEnvironment;
+
+class VideoDecoder {
+ public:
+ // Callback passed to DecodeFrame, to deliver a decoded video frame from the
+ // decoder. |frame| can be NULL when errors occur. |is_continuous| is
+ // normally true, but will be false if the decoder has detected a frame skip
+ // since the last decode operation; and the client might choose to take steps
+ // to smooth/interpolate video discontinuities in this case.
+ typedef base::Callback<void(const scoped_refptr<VideoFrame>& frame,
+ bool is_continuous)> DecodeFrameCallback;
+
+ VideoDecoder(const scoped_refptr<CastEnvironment>& cast_environment,
+ transport::VideoCodec codec);
+ virtual ~VideoDecoder();
+
+ // Returns STATUS_VIDEO_INITIALIZED if the decoder was successfully
+ // constructed from the given FrameReceiverConfig. If this method returns any
+ // other value, calls to DecodeFrame() will not succeed.
+ CastInitializationStatus InitializationResult() const;
+
+ // Decode the payload in |encoded_frame| asynchronously. |callback| will be
+ // invoked on the CastEnvironment::MAIN thread with the result.
+ //
+ // In the normal case, |encoded_frame->frame_id| will be
+ // monotonically-increasing by 1 for each successive call to this method.
+ // When it is not, the decoder will assume one or more frames have been
+ // dropped (e.g., due to packet loss), and will perform recovery actions.
+ void DecodeFrame(scoped_ptr<transport::EncodedFrame> encoded_frame,
+ const DecodeFrameCallback& callback);
+
+ private:
+ class FakeImpl;
+ class ImplBase;
+ class Vp8Impl;
+
+ const scoped_refptr<CastEnvironment> cast_environment_;
+ scoped_refptr<ImplBase> impl_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoDecoder);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RECEIVER_VIDEO_DECODER_H_
diff --git a/chromium/media/cast/receiver/video_decoder_unittest.cc b/chromium/media/cast/receiver/video_decoder_unittest.cc
new file mode 100644
index 00000000000..1d16534b968
--- /dev/null
+++ b/chromium/media/cast/receiver/video_decoder_unittest.cc
@@ -0,0 +1,183 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cstdlib>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/time/time.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/receiver/video_decoder.h"
+#include "media/cast/test/utility/standalone_cast_environment.h"
+#include "media/cast/test/utility/video_utility.h"
+#include "media/cast/video_sender/codecs/vp8/vp8_encoder.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace cast {
+
+namespace {
+
+const int kWidth = 360;
+const int kHeight = 240;
+const int kFrameRate = 10;
+
+VideoSenderConfig GetVideoSenderConfigForTest() {
+ VideoSenderConfig config;
+ config.width = kWidth;
+ config.height = kHeight;
+ config.max_frame_rate = kFrameRate;
+ return config;
+}
+
+} // namespace
+
+class VideoDecoderTest
+ : public ::testing::TestWithParam<transport::VideoCodec> {
+ public:
+ VideoDecoderTest()
+ : cast_environment_(new StandaloneCastEnvironment()),
+ vp8_encoder_(GetVideoSenderConfigForTest(), 0),
+ cond_(&lock_) {
+ vp8_encoder_.Initialize();
+ }
+
+ protected:
+ virtual void SetUp() OVERRIDE {
+ video_decoder_.reset(new VideoDecoder(cast_environment_, GetParam()));
+ CHECK_EQ(STATUS_VIDEO_INITIALIZED, video_decoder_->InitializationResult());
+
+ next_frame_timestamp_ = base::TimeDelta();
+ last_frame_id_ = 0;
+ seen_a_decoded_frame_ = false;
+
+ total_video_frames_feed_in_ = 0;
+ total_video_frames_decoded_ = 0;
+ }
+
+ // Called from the unit test thread to create another EncodedFrame and push it
+ // into the decoding pipeline.
+ void FeedMoreVideo(int num_dropped_frames) {
+ // Prepare a simulated EncodedFrame to feed into the VideoDecoder.
+
+ const gfx::Size frame_size(kWidth, kHeight);
+ const scoped_refptr<VideoFrame> video_frame =
+ VideoFrame::CreateFrame(VideoFrame::YV12,
+ frame_size,
+ gfx::Rect(frame_size),
+ frame_size,
+ next_frame_timestamp_);
+ next_frame_timestamp_ += base::TimeDelta::FromSeconds(1) / kFrameRate;
+ PopulateVideoFrame(video_frame, 0);
+
+ // Encode |frame| into |encoded_frame->data|.
+ scoped_ptr<transport::EncodedFrame> encoded_frame(
+ new transport::EncodedFrame());
+ CHECK_EQ(transport::kVp8, GetParam()); // Only support VP8 test currently.
+ vp8_encoder_.Encode(video_frame, encoded_frame.get());
+ encoded_frame->frame_id = last_frame_id_ + 1 + num_dropped_frames;
+ last_frame_id_ = encoded_frame->frame_id;
+
+ {
+ base::AutoLock auto_lock(lock_);
+ ++total_video_frames_feed_in_;
+ }
+
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(&VideoDecoder::DecodeFrame,
+ base::Unretained(video_decoder_.get()),
+ base::Passed(&encoded_frame),
+ base::Bind(&VideoDecoderTest::OnDecodedFrame,
+ base::Unretained(this),
+ video_frame,
+ num_dropped_frames == 0)));
+ }
+
+ // Blocks the caller until all video that has been feed in has been decoded.
+ void WaitForAllVideoToBeDecoded() {
+ DCHECK(!cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ base::AutoLock auto_lock(lock_);
+ while (total_video_frames_decoded_ < total_video_frames_feed_in_)
+ cond_.Wait();
+ EXPECT_EQ(total_video_frames_feed_in_, total_video_frames_decoded_);
+ }
+
+ private:
+ // Called by |vp8_decoder_| to deliver each frame of decoded video.
+ void OnDecodedFrame(const scoped_refptr<VideoFrame>& expected_video_frame,
+ bool should_be_continuous,
+ const scoped_refptr<VideoFrame>& video_frame,
+ bool is_continuous) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ // A NULL |video_frame| indicates a decode error, which we don't expect.
+ ASSERT_FALSE(!video_frame);
+
+ // Did the decoder detect whether frames were dropped?
+ EXPECT_EQ(should_be_continuous, is_continuous);
+
+ // Does the video data seem to be intact?
+ EXPECT_EQ(expected_video_frame->coded_size().width(),
+ video_frame->coded_size().width());
+ EXPECT_EQ(expected_video_frame->coded_size().height(),
+ video_frame->coded_size().height());
+ EXPECT_LT(40.0, I420PSNR(expected_video_frame, video_frame));
+ // TODO(miu): Once we start using VideoFrame::timestamp_, check that here.
+
+ // Signal the main test thread that more video was decoded.
+ base::AutoLock auto_lock(lock_);
+ ++total_video_frames_decoded_;
+ cond_.Signal();
+ }
+
+ const scoped_refptr<StandaloneCastEnvironment> cast_environment_;
+ scoped_ptr<VideoDecoder> video_decoder_;
+ base::TimeDelta next_frame_timestamp_;
+ uint32 last_frame_id_;
+ bool seen_a_decoded_frame_;
+
+ Vp8Encoder vp8_encoder_;
+
+ base::Lock lock_;
+ base::ConditionVariable cond_;
+ int total_video_frames_feed_in_;
+ int total_video_frames_decoded_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoDecoderTest);
+};
+
+TEST_P(VideoDecoderTest, DecodesFrames) {
+ const int kNumFrames = 10;
+ for (int i = 0; i < kNumFrames; ++i)
+ FeedMoreVideo(0);
+ WaitForAllVideoToBeDecoded();
+}
+
+TEST_P(VideoDecoderTest, RecoversFromDroppedFrames) {
+ const int kNumFrames = 100;
+ int next_drop_at = 3;
+ int next_num_dropped = 1;
+ for (int i = 0; i < kNumFrames; ++i) {
+ if (i == next_drop_at) {
+ const int num_dropped = next_num_dropped++;
+ next_drop_at *= 2;
+ i += num_dropped;
+ FeedMoreVideo(num_dropped);
+ } else {
+ FeedMoreVideo(0);
+ }
+ }
+ WaitForAllVideoToBeDecoded();
+}
+
+INSTANTIATE_TEST_CASE_P(VideoDecoderTestScenarios,
+ VideoDecoderTest,
+ ::testing::Values(transport::kVp8));
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.cc b/chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.cc
index daaa1ad0883..9ff2d48f03c 100644
--- a/chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.cc
+++ b/chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.cc
@@ -7,17 +7,13 @@
namespace media {
namespace cast {
-MockRtcpReceiverFeedback::MockRtcpReceiverFeedback() {
-}
+MockRtcpReceiverFeedback::MockRtcpReceiverFeedback() {}
-MockRtcpReceiverFeedback::~MockRtcpReceiverFeedback() {
-}
+MockRtcpReceiverFeedback::~MockRtcpReceiverFeedback() {}
-MockRtcpRttFeedback::MockRtcpRttFeedback() {
-}
+MockRtcpRttFeedback::MockRtcpRttFeedback() {}
-MockRtcpRttFeedback::~MockRtcpRttFeedback() {
-}
+MockRtcpRttFeedback::~MockRtcpRttFeedback() {}
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.h b/chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.h
index 0316d9819f2..56fe1ca6995 100644
--- a/chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.h
+++ b/chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.h
@@ -7,7 +7,9 @@
#include <vector>
+#include "media/cast/rtcp/rtcp_defines.h"
#include "media/cast/rtcp/rtcp_receiver.h"
+#include "media/cast/transport/cast_transport_defines.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
@@ -19,7 +21,7 @@ class MockRtcpReceiverFeedback : public RtcpReceiverFeedback {
virtual ~MockRtcpReceiverFeedback();
MOCK_METHOD1(OnReceivedSenderReport,
- void(const RtcpSenderInfo& remote_sender_info));
+ void(const transport::RtcpSenderInfo& remote_sender_info));
MOCK_METHOD1(OnReceiverReferenceTimeReport,
void(const RtcpReceiverReferenceTimeReport& remote_time_report));
@@ -28,8 +30,6 @@ class MockRtcpReceiverFeedback : public RtcpReceiverFeedback {
MOCK_METHOD1(OnReceivedReceiverLog,
void(const RtcpReceiverLogMessage& receiver_log));
- MOCK_METHOD1(OnReceivedSenderLog,
- void(const RtcpSenderLogMessage& sender_log));
};
class MockRtcpRttFeedback : public RtcpRttFeedback {
diff --git a/chromium/media/cast/rtcp/mock_rtcp_sender_feedback.cc b/chromium/media/cast/rtcp/mock_rtcp_sender_feedback.cc
index 65c630148c2..e44e0bfdef4 100644
--- a/chromium/media/cast/rtcp/mock_rtcp_sender_feedback.cc
+++ b/chromium/media/cast/rtcp/mock_rtcp_sender_feedback.cc
@@ -7,11 +7,9 @@
namespace media {
namespace cast {
-MockRtcpSenderFeedback::MockRtcpSenderFeedback() {
-}
+MockRtcpSenderFeedback::MockRtcpSenderFeedback() {}
-MockRtcpSenderFeedback::~MockRtcpSenderFeedback() {
-}
+MockRtcpSenderFeedback::~MockRtcpSenderFeedback() {}
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/rtcp/receiver_rtcp_event_subscriber.cc b/chromium/media/cast/rtcp/receiver_rtcp_event_subscriber.cc
new file mode 100644
index 00000000000..9a9c0aeeb74
--- /dev/null
+++ b/chromium/media/cast/rtcp/receiver_rtcp_event_subscriber.cc
@@ -0,0 +1,96 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtcp/receiver_rtcp_event_subscriber.h"
+
+#include <utility>
+
+namespace media {
+namespace cast {
+
+ReceiverRtcpEventSubscriber::ReceiverRtcpEventSubscriber(
+ const size_t max_size_to_retain, EventMediaType type)
+ : max_size_to_retain_(max_size_to_retain), type_(type) {
+ DCHECK(max_size_to_retain_ > 0u);
+ DCHECK(type_ == AUDIO_EVENT || type_ == VIDEO_EVENT);
+}
+
+ReceiverRtcpEventSubscriber::~ReceiverRtcpEventSubscriber() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+}
+
+void ReceiverRtcpEventSubscriber::OnReceiveFrameEvent(
+ const FrameEvent& frame_event) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (ShouldProcessEvent(frame_event.type, frame_event.media_type)) {
+ RtcpEvent rtcp_event;
+ switch (frame_event.type) {
+ case FRAME_PLAYOUT:
+ rtcp_event.delay_delta = frame_event.delay_delta;
+ case FRAME_ACK_SENT:
+ case FRAME_DECODED:
+ rtcp_event.type = frame_event.type;
+ rtcp_event.timestamp = frame_event.timestamp;
+ rtcp_events_.insert(
+ std::make_pair(frame_event.rtp_timestamp, rtcp_event));
+ break;
+ default:
+ break;
+ }
+ }
+
+ TruncateMapIfNeeded();
+
+ DCHECK(rtcp_events_.size() <= max_size_to_retain_);
+}
+
+void ReceiverRtcpEventSubscriber::OnReceivePacketEvent(
+ const PacketEvent& packet_event) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (ShouldProcessEvent(packet_event.type, packet_event.media_type)) {
+ RtcpEvent rtcp_event;
+ if (packet_event.type == PACKET_RECEIVED) {
+ rtcp_event.type = packet_event.type;
+ rtcp_event.timestamp = packet_event.timestamp;
+ rtcp_event.packet_id = packet_event.packet_id;
+ rtcp_events_.insert(
+ std::make_pair(packet_event.rtp_timestamp, rtcp_event));
+ }
+ }
+
+ TruncateMapIfNeeded();
+
+ DCHECK(rtcp_events_.size() <= max_size_to_retain_);
+}
+
+void ReceiverRtcpEventSubscriber::GetRtcpEventsAndReset(
+ RtcpEventMultiMap* rtcp_events) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(rtcp_events);
+ rtcp_events->swap(rtcp_events_);
+ rtcp_events_.clear();
+}
+
+void ReceiverRtcpEventSubscriber::TruncateMapIfNeeded() {
+ // If map size has exceeded |max_size_to_retain_|, remove entry with
+ // the smallest RTP timestamp.
+ if (rtcp_events_.size() > max_size_to_retain_) {
+ DVLOG(3) << "RTCP event map exceeded size limit; "
+ << "removing oldest entry";
+ // This is fine since we only insert elements one at a time.
+ rtcp_events_.erase(rtcp_events_.begin());
+ }
+}
+
+bool ReceiverRtcpEventSubscriber::ShouldProcessEvent(
+ CastLoggingEvent event_type, EventMediaType event_media_type) {
+ return type_ == event_media_type &&
+ (event_type == FRAME_ACK_SENT || event_type == FRAME_DECODED ||
+ event_type == FRAME_PLAYOUT || event_type == PACKET_RECEIVED);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtcp/receiver_rtcp_event_subscriber.h b/chromium/media/cast/rtcp/receiver_rtcp_event_subscriber.h
new file mode 100644
index 00000000000..84af7cbaf3f
--- /dev/null
+++ b/chromium/media/cast/rtcp/receiver_rtcp_event_subscriber.h
@@ -0,0 +1,79 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTCP_RECEIVER_RTCP_EVENT_SUBSCRIBER_H_
+#define MEDIA_CAST_RTCP_RECEIVER_RTCP_EVENT_SUBSCRIBER_H_
+
+#include <map>
+
+#include "base/threading/thread_checker.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/logging/raw_event_subscriber.h"
+#include "media/cast/rtcp/rtcp_defines.h"
+
+namespace media {
+namespace cast {
+
+// A RawEventSubscriber implementation with the following properties:
+// - Only processes raw event types that are relevant for sending from cast
+// receiver to cast sender via RTCP.
+// - Captures information to be sent over to RTCP from raw event logs into the
+// more compact RtcpEvent struct.
+// - Orders events by RTP timestamp with a multimap.
+// - Internally, the map is capped at a maximum size configurable by the caller.
+// The subscriber only keeps the most recent events (determined by RTP
+// timestamp) up to the size limit.
+class ReceiverRtcpEventSubscriber : public RawEventSubscriber {
+ public:
+ typedef std::multimap<RtpTimestamp, RtcpEvent> RtcpEventMultiMap;
+
+ // |max_size_to_retain|: The object will keep up to |max_size_to_retain|
+ // events
+ // in the map. Once threshold has been reached, an event with the smallest
+ // RTP timestamp will be removed.
+ // |type|: Determines whether the subscriber will process only audio or video
+ // events.
+ ReceiverRtcpEventSubscriber(const size_t max_size_to_retain,
+ EventMediaType type);
+
+ virtual ~ReceiverRtcpEventSubscriber();
+
+ // RawEventSubscriber implementation.
+ virtual void OnReceiveFrameEvent(const FrameEvent& frame_event) OVERRIDE;
+ virtual void OnReceivePacketEvent(const PacketEvent& packet_event) OVERRIDE;
+
+ // Assigns events collected to |rtcp_events| and clears them from this
+ // object.
+ void GetRtcpEventsAndReset(RtcpEventMultiMap* rtcp_events);
+
+ private:
+ // If |rtcp_events_.size()| exceeds |max_size_to_retain_|, remove an oldest
+ // entry (determined by RTP timestamp) so its size no greater than
+ // |max_size_to_retain_|.
+ void TruncateMapIfNeeded();
+
+ // Returns |true| if events of |event_type| and |media_type|
+ // should be processed.
+ bool ShouldProcessEvent(CastLoggingEvent event_type,
+ EventMediaType media_type);
+
+ const size_t max_size_to_retain_;
+ EventMediaType type_;
+
+ // The key should really be something more than just a RTP timestamp in order
+ // to differentiate between video and audio frames, but since the
+ // implementation doesn't mix audio and video frame events, RTP timestamp
+ // only as key is fine.
+ RtcpEventMultiMap rtcp_events_;
+
+ // Ensures methods are only called on the main thread.
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReceiverRtcpEventSubscriber);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_RTCP_RECEIVER_RTCP_EVENT_SUBSCRIBER_H_
diff --git a/chromium/media/cast/rtcp/receiver_rtcp_event_subscriber_unittest.cc b/chromium/media/cast/rtcp/receiver_rtcp_event_subscriber_unittest.cc
new file mode 100644
index 00000000000..e0d0f172160
--- /dev/null
+++ b/chromium/media/cast/rtcp/receiver_rtcp_event_subscriber_unittest.cc
@@ -0,0 +1,131 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/rtcp/receiver_rtcp_event_subscriber.h"
+#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace cast {
+
+namespace {
+
+const size_t kMaxEventEntries = 10u;
+const int64 kDelayMs = 20L;
+
+} // namespace
+
+class ReceiverRtcpEventSubscriberTest : public ::testing::Test {
+ protected:
+ ReceiverRtcpEventSubscriberTest()
+ : testing_clock_(new base::SimpleTestTickClock()),
+ task_runner_(new test::FakeSingleThreadTaskRunner(testing_clock_)),
+ cast_environment_(new CastEnvironment(
+ scoped_ptr<base::TickClock>(testing_clock_).Pass(),
+ task_runner_,
+ task_runner_,
+ task_runner_)) {}
+
+ virtual ~ReceiverRtcpEventSubscriberTest() {}
+
+ virtual void TearDown() OVERRIDE {
+ if (event_subscriber_) {
+ cast_environment_->Logging()->RemoveRawEventSubscriber(
+ event_subscriber_.get());
+ }
+ }
+
+ void Init(EventMediaType type) {
+ event_subscriber_.reset(
+ new ReceiverRtcpEventSubscriber(kMaxEventEntries, type));
+ cast_environment_->Logging()->AddRawEventSubscriber(
+ event_subscriber_.get());
+ }
+
+ void InsertEvents() {
+ // Video events
+ cast_environment_->Logging()->InsertFrameEventWithDelay(
+ testing_clock_->NowTicks(), FRAME_PLAYOUT, VIDEO_EVENT,
+ /*rtp_timestamp*/ 100u, /*frame_id*/ 2u,
+ base::TimeDelta::FromMilliseconds(kDelayMs));
+ cast_environment_->Logging()->InsertFrameEvent(
+ testing_clock_->NowTicks(), FRAME_DECODED, VIDEO_EVENT,
+ /*rtp_timestamp*/ 200u, /*frame_id*/ 1u);
+ cast_environment_->Logging()->InsertPacketEvent(
+ testing_clock_->NowTicks(), PACKET_RECEIVED, VIDEO_EVENT,
+ /*rtp_timestamp */ 200u, /*frame_id*/ 2u, /*packet_id*/ 1u,
+ /*max_packet_id*/ 10u, /*size*/ 1024u);
+
+ // Audio events
+ cast_environment_->Logging()->InsertFrameEventWithDelay(
+ testing_clock_->NowTicks(), FRAME_PLAYOUT, AUDIO_EVENT,
+ /*rtp_timestamp*/ 300u, /*frame_id*/ 4u,
+ base::TimeDelta::FromMilliseconds(kDelayMs));
+ cast_environment_->Logging()->InsertFrameEvent(
+ testing_clock_->NowTicks(), FRAME_DECODED, AUDIO_EVENT,
+ /*rtp_timestamp*/ 400u, /*frame_id*/ 3u);
+ cast_environment_->Logging()->InsertPacketEvent(
+ testing_clock_->NowTicks(), PACKET_RECEIVED, AUDIO_EVENT,
+ /*rtp_timestamp */ 400u, /*frame_id*/ 5u, /*packet_id*/ 1u,
+ /*max_packet_id*/ 10u, /*size*/ 128u);
+
+ // Unrelated events
+ cast_environment_->Logging()->InsertFrameEvent(testing_clock_->NowTicks(),
+ FRAME_CAPTURE_END,
+ VIDEO_EVENT,
+ /*rtp_timestamp*/ 100u,
+ /*frame_id*/ 1u);
+ cast_environment_->Logging()->InsertFrameEvent(testing_clock_->NowTicks(),
+ FRAME_CAPTURE_END,
+ AUDIO_EVENT,
+ /*rtp_timestamp*/ 100u,
+ /*frame_id*/ 1u);
+ }
+
+ base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
+ scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+ scoped_ptr<ReceiverRtcpEventSubscriber> event_subscriber_;
+};
+
+TEST_F(ReceiverRtcpEventSubscriberTest, LogVideoEvents) {
+ Init(VIDEO_EVENT);
+
+ InsertEvents();
+ ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
+ event_subscriber_->GetRtcpEventsAndReset(&rtcp_events);
+ EXPECT_EQ(3u, rtcp_events.size());
+}
+
+TEST_F(ReceiverRtcpEventSubscriberTest, LogAudioEvents) {
+ Init(AUDIO_EVENT);
+
+ InsertEvents();
+ ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
+ event_subscriber_->GetRtcpEventsAndReset(&rtcp_events);
+ EXPECT_EQ(3u, rtcp_events.size());
+}
+
+TEST_F(ReceiverRtcpEventSubscriberTest, DropEventsWhenSizeExceeded) {
+ Init(VIDEO_EVENT);
+
+ for (uint32 i = 1u; i <= 10u; ++i) {
+ cast_environment_->Logging()->InsertFrameEvent(
+ testing_clock_->NowTicks(), FRAME_DECODED, VIDEO_EVENT,
+ /*rtp_timestamp*/ i * 10, /*frame_id*/ i);
+ }
+
+ ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
+ event_subscriber_->GetRtcpEventsAndReset(&rtcp_events);
+ EXPECT_EQ(10u, rtcp_events.size());
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtcp/rtcp.cc b/chromium/media/cast/rtcp/rtcp.cc
index 4ea4bc99ba9..480b2ac3990 100644
--- a/chromium/media/cast/rtcp/rtcp.cc
+++ b/chromium/media/cast/rtcp/rtcp.cc
@@ -4,6 +4,7 @@
#include "media/cast/rtcp/rtcp.h"
+#include "base/big_endian.h"
#include "base/rand_util.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
@@ -12,28 +13,22 @@
#include "media/cast/rtcp/rtcp_receiver.h"
#include "media/cast/rtcp/rtcp_sender.h"
#include "media/cast/rtcp/rtcp_utility.h"
-#include "net/base/big_endian.h"
+#include "media/cast/transport/cast_transport_defines.h"
namespace media {
namespace cast {
static const int kMaxRttMs = 10000; // 10 seconds.
-
-// Time limit for received RTCP messages when we stop using it for lip-sync.
-static const int64 kMaxDiffSinceReceivedRtcpMs = 100000; // 100 seconds.
+static const int kMaxDelay = 2000;
class LocalRtcpRttFeedback : public RtcpRttFeedback {
public:
- explicit LocalRtcpRttFeedback(Rtcp* rtcp)
- : rtcp_(rtcp) {
- }
+ explicit LocalRtcpRttFeedback(Rtcp* rtcp) : rtcp_(rtcp) {}
virtual void OnReceivedDelaySinceLastReport(
- uint32 receivers_ssrc,
- uint32 last_report,
+ uint32 receivers_ssrc, uint32 last_report,
uint32 delay_since_last_report) OVERRIDE {
- rtcp_->OnReceivedDelaySinceLastReport(receivers_ssrc,
- last_report,
+ rtcp_->OnReceivedDelaySinceLastReport(receivers_ssrc, last_report,
delay_since_last_report);
}
@@ -41,31 +36,14 @@ class LocalRtcpRttFeedback : public RtcpRttFeedback {
Rtcp* rtcp_;
};
-RtcpCastMessage::RtcpCastMessage(uint32 media_ssrc)
- : media_ssrc_(media_ssrc) {}
-
-RtcpCastMessage::~RtcpCastMessage() {}
-
-RtcpNackMessage::RtcpNackMessage() {}
-RtcpNackMessage::~RtcpNackMessage() {}
-
-RtcpRembMessage::RtcpRembMessage() {}
-RtcpRembMessage::~RtcpRembMessage() {}
-
-RtcpReceiverFrameLogMessage::RtcpReceiverFrameLogMessage(uint32 timestamp)
- : rtp_timestamp_(timestamp) {}
-
-RtcpReceiverFrameLogMessage::~RtcpReceiverFrameLogMessage() {}
-
class LocalRtcpReceiverFeedback : public RtcpReceiverFeedback {
public:
LocalRtcpReceiverFeedback(Rtcp* rtcp,
- scoped_refptr<CastEnvironment> cast_environment)
- : rtcp_(rtcp), cast_environment_(cast_environment) {
- }
+ scoped_refptr<CastEnvironment> cast_environment)
+ : rtcp_(rtcp), cast_environment_(cast_environment) {}
virtual void OnReceivedSenderReport(
- const RtcpSenderInfo& remote_sender_info) OVERRIDE {
+ const transport::RtcpSenderInfo& remote_sender_info) OVERRIDE {
rtcp_->OnReceivedNtp(remote_sender_info.ntp_seconds,
remote_sender_info.ntp_fraction);
if (remote_sender_info.send_packet_count != 0) {
@@ -85,80 +63,9 @@ class LocalRtcpReceiverFeedback : public RtcpReceiverFeedback {
rtcp_->OnReceivedSendReportRequest();
}
- virtual void OnReceivedReceiverLog(
- const RtcpReceiverLogMessage& receiver_log) OVERRIDE {
- // Add received log messages into our log system.
- RtcpReceiverLogMessage::const_iterator it = receiver_log.begin();
-
- for (; it != receiver_log.end(); ++it) {
- uint32 rtp_timestamp = it->rtp_timestamp_;
-
- RtcpReceiverEventLogMessages::const_iterator event_it =
- it->event_log_messages_.begin();
- for (; event_it != it->event_log_messages_.end(); ++event_it) {
- // TODO(pwestin): we need to send in the event_it->event_timestamp to
- // the log system too.
- switch (event_it->type) {
- case kPacketReceived:
- cast_environment_->Logging()->InsertPacketEvent(kPacketReceived,
- rtp_timestamp, kFrameIdUnknown, event_it->packet_id, 0, 0);
- break;
- case kAckSent:
- case kAudioFrameDecoded:
- case kVideoFrameDecoded:
- cast_environment_->Logging()->InsertFrameEvent(event_it->type,
- rtp_timestamp, kFrameIdUnknown);
- break;
- case kAudioPlayoutDelay:
- case kVideoRenderDelay:
- cast_environment_->Logging()->InsertFrameEventWithDelay(
- event_it->type, rtp_timestamp, kFrameIdUnknown,
- event_it->delay_delta);
- break;
- default:
- VLOG(2) << "Received log message via RTCP that we did not expect: "
- << static_cast<int>(event_it->type);
- break;
- }
- }
- }
- }
-
- virtual void OnReceivedSenderLog(
- const RtcpSenderLogMessage& sender_log) OVERRIDE {
- RtcpSenderLogMessage::const_iterator it = sender_log.begin();
-
- for (; it != sender_log.end(); ++it) {
- uint32 rtp_timestamp = it->rtp_timestamp;
- CastLoggingEvent log_event = kUnknown;
-
- // These events are provided to know the status of frames that never
- // reached the receiver. The timing information for these events are not
- // relevant and is not sent over the wire.
- switch (it->frame_status) {
- case kRtcpSenderFrameStatusDroppedByFlowControl:
- // A frame that have been dropped by the flow control would have
- // kVideoFrameCaptured as its last event in the log.
- log_event = kVideoFrameCaptured;
- break;
- case kRtcpSenderFrameStatusDroppedByEncoder:
- // A frame that have been dropped by the encoder would have
- // kVideoFrameSentToEncoder as its last event in the log.
- log_event = kVideoFrameSentToEncoder;
- break;
- case kRtcpSenderFrameStatusSentToNetwork:
- // A frame that have be encoded is always sent to the network. We
- // do not add a new log entry for this.
- log_event = kVideoFrameEncoded;
- break;
- default:
- continue;
- }
- // TODO(pwestin): how do we handle the truncated rtp_timestamp?
- // Add received log messages into our log system.
- cast_environment_->Logging()->InsertFrameEvent(log_event, rtp_timestamp,
- kFrameIdUnknown);
- }
+ virtual void OnReceivedReceiverLog(const RtcpReceiverLogMessage& receiver_log)
+ OVERRIDE {
+ rtcp_->OnReceivedReceiverLog(receiver_log);
}
private:
@@ -168,36 +75,34 @@ class LocalRtcpReceiverFeedback : public RtcpReceiverFeedback {
Rtcp::Rtcp(scoped_refptr<CastEnvironment> cast_environment,
RtcpSenderFeedback* sender_feedback,
- PacedPacketSender* paced_packet_sender,
- RtpSenderStatistics* rtp_sender_statistics,
- RtpReceiverStatistics* rtp_receiver_statistics,
- RtcpMode rtcp_mode,
- const base::TimeDelta& rtcp_interval,
- uint32 local_ssrc,
- uint32 remote_ssrc,
- const std::string& c_name)
- : rtcp_interval_(rtcp_interval),
+ transport::CastTransportSender* const transport_sender,
+ transport::PacedPacketSender* paced_packet_sender,
+ RtpReceiverStatistics* rtp_receiver_statistics, RtcpMode rtcp_mode,
+ const base::TimeDelta& rtcp_interval, uint32 local_ssrc,
+ uint32 remote_ssrc, const std::string& c_name,
+ EventMediaType event_media_type)
+ : cast_environment_(cast_environment),
+ transport_sender_(transport_sender),
+ rtcp_interval_(rtcp_interval),
rtcp_mode_(rtcp_mode),
local_ssrc_(local_ssrc),
remote_ssrc_(remote_ssrc),
- rtp_sender_statistics_(rtp_sender_statistics),
+ c_name_(c_name),
+ event_media_type_(event_media_type),
rtp_receiver_statistics_(rtp_receiver_statistics),
- receiver_feedback_(new LocalRtcpReceiverFeedback(this, cast_environment)),
rtt_feedback_(new LocalRtcpRttFeedback(this)),
+ receiver_feedback_(new LocalRtcpReceiverFeedback(this, cast_environment)),
rtcp_sender_(new RtcpSender(cast_environment, paced_packet_sender,
local_ssrc, c_name)),
- last_report_received_(0),
- last_received_rtp_timestamp_(0),
- last_received_ntp_seconds_(0),
- last_received_ntp_fraction_(0),
+ last_report_truncated_ntp_(0),
+ local_clock_ahead_by_(ClockDriftSmoother::GetDefaultTimeConstant()),
+ lip_sync_rtp_timestamp_(0),
+ lip_sync_ntp_timestamp_(0),
min_rtt_(base::TimeDelta::FromMilliseconds(kMaxRttMs)),
- number_of_rtt_in_avg_(0),
- cast_environment_(cast_environment) {
- rtcp_receiver_.reset(new RtcpReceiver(cast_environment,
- sender_feedback,
+ number_of_rtt_in_avg_(0) {
+ rtcp_receiver_.reset(new RtcpReceiver(cast_environment, sender_feedback,
receiver_feedback_.get(),
- rtt_feedback_.get(),
- local_ssrc));
+ rtt_feedback_.get(), local_ssrc));
rtcp_receiver_->SetRemoteSSRC(remote_ssrc);
}
@@ -209,7 +114,8 @@ bool Rtcp::IsRtcpPacket(const uint8* packet, size_t length) {
if (length < kMinLengthOfRtcp) return false;
uint8 packet_type = packet[1];
- if (packet_type >= kPacketTypeLow && packet_type <= kPacketTypeHigh) {
+ if (packet_type >= transport::kPacketTypeLow &&
+ packet_type <= transport::kPacketTypeHigh) {
return true;
}
return false;
@@ -219,7 +125,8 @@ bool Rtcp::IsRtcpPacket(const uint8* packet, size_t length) {
uint32 Rtcp::GetSsrcOfSender(const uint8* rtcp_buffer, size_t length) {
DCHECK_GE(length, kMinLengthOfRtcp) << "Invalid RTCP packet";
uint32 ssrc_of_sender;
- net::BigEndianReader big_endian_reader(rtcp_buffer, length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_buffer), length);
big_endian_reader.Skip(4); // Skip header
big_endian_reader.ReadU32(&ssrc_of_sender);
return ssrc_of_sender;
@@ -242,117 +149,145 @@ void Rtcp::IncomingRtcpPacket(const uint8* rtcp_buffer, size_t length) {
rtcp_receiver_->IncomingRtcpPacket(&rtcp_parser);
}
-void Rtcp::SendRtcpFromRtpReceiver(const RtcpCastMessage* cast_message,
- RtcpReceiverLogMessage* receiver_log) {
+void Rtcp::SendRtcpFromRtpReceiver(
+ const RtcpCastMessage* cast_message,
+ const ReceiverRtcpEventSubscriber::RtcpEventMultiMap* rtcp_events) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
uint32 packet_type_flags = 0;
base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- RtcpReportBlock report_block;
+ transport::RtcpReportBlock report_block;
RtcpReceiverReferenceTimeReport rrtr;
+ // Attach our NTP to all RTCP packets; with this information a "smart" sender
+ // can make decisions based on how old the RTCP message is.
+ packet_type_flags |= transport::kRtcpRrtr;
+ ConvertTimeTicksToNtp(now, &rrtr.ntp_seconds, &rrtr.ntp_fraction);
+ SaveLastSentNtpTime(now, rrtr.ntp_seconds, rrtr.ntp_fraction);
+
if (cast_message) {
- packet_type_flags |= RtcpSender::kRtcpCast;
- cast_environment_->Logging()->InsertGenericEvent(kAckSent,
- cast_message->ack_frame_id_);
+ packet_type_flags |= transport::kRtcpCast;
}
- if (receiver_log) {
- packet_type_flags |= RtcpSender::kRtcpReceiverLog;
+ if (rtcp_events) {
+ packet_type_flags |= transport::kRtcpReceiverLog;
}
if (rtcp_mode_ == kRtcpCompound || now >= next_time_to_send_rtcp_) {
- packet_type_flags |= RtcpSender::kRtcpRr;
+ packet_type_flags |= transport::kRtcpRr;
- report_block.remote_ssrc = 0; // Not needed to set send side.
+ report_block.remote_ssrc = 0; // Not needed to set send side.
report_block.media_ssrc = remote_ssrc_; // SSRC of the RTP packet sender.
if (rtp_receiver_statistics_) {
rtp_receiver_statistics_->GetStatistics(
- &report_block.fraction_lost,
- &report_block.cumulative_lost,
- &report_block.extended_high_sequence_number,
- &report_block.jitter);
- cast_environment_->Logging()->InsertGenericEvent(kJitterMs,
- report_block.jitter);
- cast_environment_->Logging()->InsertGenericEvent(kPacketLoss,
- report_block.fraction_lost);
-
+ &report_block.fraction_lost, &report_block.cumulative_lost,
+ &report_block.extended_high_sequence_number, &report_block.jitter);
}
- report_block.last_sr = last_report_received_;
+ report_block.last_sr = last_report_truncated_ntp_;
if (!time_last_report_received_.is_null()) {
uint32 delay_seconds = 0;
uint32 delay_fraction = 0;
base::TimeDelta delta = now - time_last_report_received_;
- ConvertTimeToFractions(delta.InMicroseconds(),
- &delay_seconds,
+ ConvertTimeToFractions(delta.InMicroseconds(), &delay_seconds,
&delay_fraction);
report_block.delay_since_last_sr =
ConvertToNtpDiff(delay_seconds, delay_fraction);
} else {
report_block.delay_since_last_sr = 0;
}
-
- packet_type_flags |= RtcpSender::kRtcpRrtr;
- ConvertTimeTicksToNtp(now, &rrtr.ntp_seconds, &rrtr.ntp_fraction);
- SaveLastSentNtpTime(now, rrtr.ntp_seconds, rrtr.ntp_fraction);
UpdateNextTimeToSendRtcp();
}
rtcp_sender_->SendRtcpFromRtpReceiver(packet_type_flags,
&report_block,
&rrtr,
cast_message,
- receiver_log);
+ rtcp_events,
+ target_delay_ms_);
}
-void Rtcp::SendRtcpFromRtpSender(
- RtcpSenderLogMessage* sender_log_message) {
- uint32 packet_type_flags = RtcpSender::kRtcpSr;
- base::TimeTicks now = cast_environment_->Clock()->NowTicks();
-
- if (sender_log_message) {
- packet_type_flags |= RtcpSender::kRtcpSenderLog;
- }
-
- RtcpSenderInfo sender_info;
- if (rtp_sender_statistics_) {
- rtp_sender_statistics_->GetStatistics(now, &sender_info);
- } else {
- memset(&sender_info, 0, sizeof(sender_info));
- }
- SaveLastSentNtpTime(now, sender_info.ntp_seconds, sender_info.ntp_fraction);
-
- RtcpDlrrReportBlock dlrr;
+void Rtcp::SendRtcpFromRtpSender(base::TimeTicks current_time,
+ uint32 current_time_as_rtp_timestamp) {
+ DCHECK(transport_sender_);
+ uint32 packet_type_flags = transport::kRtcpSr;
+ uint32 current_ntp_seconds = 0;
+ uint32 current_ntp_fractions = 0;
+ ConvertTimeTicksToNtp(current_time, &current_ntp_seconds,
+ &current_ntp_fractions);
+ SaveLastSentNtpTime(current_time, current_ntp_seconds,
+ current_ntp_fractions);
+
+ transport::RtcpDlrrReportBlock dlrr;
if (!time_last_report_received_.is_null()) {
- packet_type_flags |= RtcpSender::kRtcpDlrr;
- dlrr.last_rr = last_report_received_;
+ packet_type_flags |= transport::kRtcpDlrr;
+ dlrr.last_rr = last_report_truncated_ntp_;
uint32 delay_seconds = 0;
uint32 delay_fraction = 0;
- base::TimeDelta delta = now - time_last_report_received_;
- ConvertTimeToFractions(delta.InMicroseconds(),
- &delay_seconds,
+ base::TimeDelta delta = current_time - time_last_report_received_;
+ ConvertTimeToFractions(delta.InMicroseconds(), &delay_seconds,
&delay_fraction);
dlrr.delay_since_last_rr = ConvertToNtpDiff(delay_seconds, delay_fraction);
}
- rtcp_sender_->SendRtcpFromRtpSender(packet_type_flags,
- &sender_info,
- &dlrr,
- sender_log_message);
+ transport_sender_->SendRtcpFromRtpSender(
+ packet_type_flags, current_ntp_seconds, current_ntp_fractions,
+ current_time_as_rtp_timestamp, dlrr, local_ssrc_, c_name_);
UpdateNextTimeToSendRtcp();
}
void Rtcp::OnReceivedNtp(uint32 ntp_seconds, uint32 ntp_fraction) {
- last_report_received_ = (ntp_seconds << 16) + (ntp_fraction >> 16);
+ last_report_truncated_ntp_ = ConvertToNtpDiff(ntp_seconds, ntp_fraction);
- base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
time_last_report_received_ = now;
+
+ // TODO(miu): This clock offset calculation does not account for packet
+ // transit time over the network. End2EndTest.EvilNetwork confirms that this
+ // contributes a very significant source of error here. Fix this along with
+ // the RTT clean-up.
+ const base::TimeDelta measured_offset =
+ now - ConvertNtpToTimeTicks(ntp_seconds, ntp_fraction);
+ local_clock_ahead_by_.Update(now, measured_offset);
+ if (measured_offset < local_clock_ahead_by_.Current()) {
+ // Logically, the minimum offset between the clocks has to be the correct
+ // one. For example, the time it took to transmit the current report may
+ // have been lower than usual, and so some of the error introduced by the
+ // transmission time can be eliminated.
+ local_clock_ahead_by_.Reset(now, measured_offset);
+ }
+ VLOG(1) << "Local clock is ahead of the remote clock by: "
+ << "measured=" << measured_offset.InMicroseconds() << " usec, "
+ << "filtered=" << local_clock_ahead_by_.Current().InMicroseconds()
+ << " usec.";
}
-void Rtcp::OnReceivedLipSyncInfo(uint32 rtp_timestamp,
- uint32 ntp_seconds,
+void Rtcp::OnReceivedLipSyncInfo(uint32 rtp_timestamp, uint32 ntp_seconds,
uint32 ntp_fraction) {
- last_received_rtp_timestamp_ = rtp_timestamp;
- last_received_ntp_seconds_ = ntp_seconds;
- last_received_ntp_fraction_ = ntp_fraction;
+ if (ntp_seconds == 0) {
+ NOTREACHED();
+ return;
+ }
+ lip_sync_rtp_timestamp_ = rtp_timestamp;
+ lip_sync_ntp_timestamp_ =
+ (static_cast<uint64>(ntp_seconds) << 32) | ntp_fraction;
+}
+
+bool Rtcp::GetLatestLipSyncTimes(uint32* rtp_timestamp,
+ base::TimeTicks* reference_time) const {
+ if (!lip_sync_ntp_timestamp_)
+ return false;
+
+ const base::TimeTicks local_reference_time =
+ ConvertNtpToTimeTicks(static_cast<uint32>(lip_sync_ntp_timestamp_ >> 32),
+ static_cast<uint32>(lip_sync_ntp_timestamp_)) +
+ local_clock_ahead_by_.Current();
+
+ // Sanity-check: Getting regular lip sync updates?
+ DCHECK((cast_environment_->Clock()->NowTicks() - local_reference_time) <
+ base::TimeDelta::FromMinutes(1));
+
+ *rtp_timestamp = lip_sync_rtp_timestamp_;
+ *reference_time = local_reference_time;
+ return true;
}
void Rtcp::OnReceivedSendReportRequest() {
@@ -362,35 +297,13 @@ void Rtcp::OnReceivedSendReportRequest() {
next_time_to_send_rtcp_ = now;
}
-bool Rtcp::RtpTimestampInSenderTime(int frequency, uint32 rtp_timestamp,
- base::TimeTicks* rtp_timestamp_in_ticks) const {
- if (last_received_ntp_seconds_ == 0) return false;
-
- int wrap = CheckForWrapAround(rtp_timestamp, last_received_rtp_timestamp_);
- int64 rtp_timestamp_int64 = rtp_timestamp;
- int64 last_received_rtp_timestamp_int64 = last_received_rtp_timestamp_;
-
- if (wrap == 1) {
- rtp_timestamp_int64 += (1LL << 32);
- } else if (wrap == -1) {
- last_received_rtp_timestamp_int64 += (1LL << 32);
- }
- // Time since the last RTCP message.
- // Note that this can be negative since we can compare a rtp timestamp from
- // a frame older than the last received RTCP message.
- int64 rtp_timestamp_diff =
- rtp_timestamp_int64 - last_received_rtp_timestamp_int64;
-
- int frequency_khz = frequency / 1000;
- int64 rtp_time_diff_ms = rtp_timestamp_diff / frequency_khz;
-
- // Sanity check.
- if (abs(rtp_time_diff_ms) > kMaxDiffSinceReceivedRtcpMs) return false;
+void Rtcp::SetCastReceiverEventHistorySize(size_t size) {
+ rtcp_receiver_->SetCastReceiverEventHistorySize(size);
+}
- *rtp_timestamp_in_ticks = ConvertNtpToTimeTicks(last_received_ntp_seconds_,
- last_received_ntp_fraction_) +
- base::TimeDelta::FromMilliseconds(rtp_time_diff_ms);
- return true;
+void Rtcp::SetTargetDelay(base::TimeDelta target_delay) {
+ DCHECK(target_delay.InMilliseconds() < kMaxDelay);
+ target_delay_ms_ = static_cast<uint16>(target_delay.InMilliseconds());
}
void Rtcp::OnReceivedDelaySinceLastReport(uint32 receivers_ssrc,
@@ -401,8 +314,8 @@ void Rtcp::OnReceivedDelaySinceLastReport(uint32 receivers_ssrc,
return; // Feedback on another report.
}
- base::TimeDelta sender_delay = cast_environment_->Clock()->NowTicks()
- - it->second;
+ base::TimeDelta sender_delay =
+ cast_environment_->Clock()->NowTicks() - it->second;
UpdateRtt(sender_delay, ConvertFromNtpDiff(delay_since_last_report));
}
@@ -411,9 +324,8 @@ void Rtcp::SaveLastSentNtpTime(const base::TimeTicks& now,
uint32 last_ntp_fraction) {
// Make sure |now| is always greater than the last element in
// |last_reports_sent_queue_|.
- if (!last_reports_sent_queue_.empty()) {
+ if (!last_reports_sent_queue_.empty())
DCHECK(now >= last_reports_sent_queue_.back().second);
- }
uint32 last_report = ConvertToNtpDiff(last_ntp_seconds, last_ntp_fraction);
last_reports_sent_map_[last_report] = now;
@@ -436,66 +348,85 @@ void Rtcp::SaveLastSentNtpTime(const base::TimeTicks& now,
void Rtcp::UpdateRtt(const base::TimeDelta& sender_delay,
const base::TimeDelta& receiver_delay) {
base::TimeDelta rtt = sender_delay - receiver_delay;
+ // TODO(miu): Find out why this must be >= 1 ms, and remove the fudge if it's
+ // bogus.
rtt = std::max(rtt, base::TimeDelta::FromMilliseconds(1));
rtt_ = rtt;
min_rtt_ = std::min(min_rtt_, rtt);
max_rtt_ = std::max(max_rtt_, rtt);
+ // TODO(miu): Replace "average for all time" with an EWMA, or suitable
+ // "average over recent past" mechanism.
if (number_of_rtt_in_avg_ != 0) {
- float ac = static_cast<float>(number_of_rtt_in_avg_);
- avg_rtt_ms_= ((ac / (ac + 1.0)) * avg_rtt_ms_) +
- ((1.0 / (ac + 1.0)) * rtt.InMilliseconds());
+ const double ac = static_cast<double>(number_of_rtt_in_avg_);
+ avg_rtt_ms_ = ((ac / (ac + 1.0)) * avg_rtt_ms_) +
+ ((1.0 / (ac + 1.0)) * rtt.InMillisecondsF());
} else {
- avg_rtt_ms_ = rtt.InMilliseconds();
+ avg_rtt_ms_ = rtt.InMillisecondsF();
}
number_of_rtt_in_avg_++;
}
-bool Rtcp::Rtt(base::TimeDelta* rtt,
- base::TimeDelta* avg_rtt,
- base::TimeDelta* min_rtt,
- base::TimeDelta* max_rtt) const {
+bool Rtcp::Rtt(base::TimeDelta* rtt, base::TimeDelta* avg_rtt,
+ base::TimeDelta* min_rtt, base::TimeDelta* max_rtt) const {
DCHECK(rtt) << "Invalid argument";
DCHECK(avg_rtt) << "Invalid argument";
DCHECK(min_rtt) << "Invalid argument";
DCHECK(max_rtt) << "Invalid argument";
if (number_of_rtt_in_avg_ == 0) return false;
- cast_environment_->Logging()->InsertGenericEvent(kRttMs,
- rtt->InMilliseconds());
*rtt = rtt_;
- *avg_rtt = base::TimeDelta::FromMilliseconds(avg_rtt_ms_);
+ *avg_rtt = base::TimeDelta::FromMillisecondsD(avg_rtt_ms_);
*min_rtt = min_rtt_;
*max_rtt = max_rtt_;
return true;
}
-int Rtcp::CheckForWrapAround(uint32 new_timestamp,
- uint32 old_timestamp) const {
- if (new_timestamp < old_timestamp) {
- // This difference should be less than -2^31 if we have had a wrap around
- // (e.g. |new_timestamp| = 1, |rtcp_rtp_timestamp| = 2^32 - 1). Since it is
- // cast to a int32_t, it should be positive.
- if (static_cast<int32>(new_timestamp - old_timestamp) > 0) {
- return 1; // Forward wrap around.
- }
- } else if (static_cast<int32>(old_timestamp - new_timestamp) > 0) {
- // This difference should be less than -2^31 if we have had a backward wrap
- // around. Since it is cast to a int32, it should be positive.
- return -1;
- }
- return 0;
-}
-
void Rtcp::UpdateNextTimeToSendRtcp() {
int random = base::RandInt(0, 999);
- base::TimeDelta time_to_next = (rtcp_interval_ / 2) +
- (rtcp_interval_ * random / 1000);
+ base::TimeDelta time_to_next =
+ (rtcp_interval_ / 2) + (rtcp_interval_ * random / 1000);
base::TimeTicks now = cast_environment_->Clock()->NowTicks();
next_time_to_send_rtcp_ = now + time_to_next;
}
+void Rtcp::OnReceivedReceiverLog(const RtcpReceiverLogMessage& receiver_log) {
+ // Add received log messages into our log system.
+ RtcpReceiverLogMessage::const_iterator it = receiver_log.begin();
+ for (; it != receiver_log.end(); ++it) {
+ uint32 rtp_timestamp = it->rtp_timestamp_;
+
+ RtcpReceiverEventLogMessages::const_iterator event_it =
+ it->event_log_messages_.begin();
+ for (; event_it != it->event_log_messages_.end(); ++event_it) {
+ switch (event_it->type) {
+ case PACKET_RECEIVED:
+ cast_environment_->Logging()->InsertPacketEvent(
+ event_it->event_timestamp, event_it->type,
+ event_media_type_, rtp_timestamp,
+ kFrameIdUnknown, event_it->packet_id, 0, 0);
+ break;
+ case FRAME_ACK_SENT:
+ case FRAME_DECODED:
+ cast_environment_->Logging()->InsertFrameEvent(
+ event_it->event_timestamp, event_it->type, event_media_type_,
+ rtp_timestamp, kFrameIdUnknown);
+ break;
+ case FRAME_PLAYOUT:
+ cast_environment_->Logging()->InsertFrameEventWithDelay(
+ event_it->event_timestamp, event_it->type, event_media_type_,
+ rtp_timestamp, kFrameIdUnknown, event_it->delay_delta);
+ break;
+ default:
+ VLOG(2) << "Received log message via RTCP that we did not expect: "
+ << static_cast<int>(event_it->type);
+ break;
+ }
+ }
+ }
+}
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/rtcp/rtcp.gyp b/chromium/media/cast/rtcp/rtcp.gyp
deleted file mode 100644
index 14119988c8e..00000000000
--- a/chromium/media/cast/rtcp/rtcp.gyp
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'targets': [
- {
- 'target_name': 'cast_rtcp',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- ],
- 'sources': [
- 'rtcp_defines.h',
- 'rtcp.h',
- 'rtcp.cc',
- 'rtcp_receiver.cc',
- 'rtcp_receiver.h',
- 'rtcp_sender.cc',
- 'rtcp_sender.h',
- 'rtcp_utility.cc',
- 'rtcp_utility.h',
- ], # source
- 'dependencies': [
- '<(DEPTH)/base/base.gyp:base',
- '<(DEPTH)/net/net.gyp:net',
- ],
- },
- {
- 'target_name': 'cast_rtcp_test',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- ],
- 'sources': [
- 'test_rtcp_packet_builder.cc',
- 'test_rtcp_packet_builder.h',
- ], # source
- 'dependencies': [
- 'cast_rtcp',
- '<(DEPTH)/testing/gtest.gyp:gtest',
- ],
- },
- ],
-}
-
diff --git a/chromium/media/cast/rtcp/rtcp.h b/chromium/media/cast/rtcp/rtcp.h
index aa083a5a4dd..9d0184f9033 100644
--- a/chromium/media/cast/rtcp/rtcp.h
+++ b/chromium/media/cast/rtcp/rtcp.h
@@ -5,20 +5,23 @@
#ifndef MEDIA_CAST_RTCP_RTCP_H_
#define MEDIA_CAST_RTCP_RTCP_H_
-#include <list>
#include <map>
#include <queue>
-#include <set>
#include <string>
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
+#include "media/cast/base/clock_drift_smoother.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
#include "media/cast/cast_environment.h"
+#include "media/cast/rtcp/receiver_rtcp_event_subscriber.h"
#include "media/cast/rtcp/rtcp_defines.h"
+#include "media/cast/transport/cast_transport_defines.h"
+#include "media/cast/transport/cast_transport_sender.h"
+#include "media/cast/transport/pacing/paced_sender.h"
namespace media {
namespace cast {
@@ -40,14 +43,6 @@ class RtcpSenderFeedback {
virtual ~RtcpSenderFeedback() {}
};
-class RtpSenderStatistics {
- public:
- virtual void GetStatistics(const base::TimeTicks& now,
- RtcpSenderInfo* sender_info) = 0;
-
- virtual ~RtpSenderStatistics() {}
-};
-
class RtpReceiverStatistics {
public:
virtual void GetStatistics(uint8* fraction_lost,
@@ -60,16 +55,20 @@ class RtpReceiverStatistics {
class Rtcp {
public:
+ // Rtcp accepts two transports, one to be used by Cast senders
+ // (CastTransportSender) only, and the other (PacedPacketSender) should only
+ // be used by the Cast receivers and test applications.
Rtcp(scoped_refptr<CastEnvironment> cast_environment,
RtcpSenderFeedback* sender_feedback,
- PacedPacketSender* paced_packet_sender,
- RtpSenderStatistics* rtp_sender_statistics,
+ transport::CastTransportSender* const transport_sender, // Send-side.
+ transport::PacedPacketSender* paced_packet_sender, // Receive side.
RtpReceiverStatistics* rtp_receiver_statistics,
RtcpMode rtcp_mode,
const base::TimeDelta& rtcp_interval,
uint32 local_ssrc,
uint32 remote_ssrc,
- const std::string& c_name);
+ const std::string& c_name,
+ EventMediaType event_media_type);
virtual ~Rtcp();
@@ -78,35 +77,55 @@ class Rtcp {
static uint32 GetSsrcOfSender(const uint8* rtcp_buffer, size_t length);
base::TimeTicks TimeToSendNextRtcpReport();
- // |sender_log_message| is optional; without it no log messages will be
- // attached to the RTCP report; instead a normal RTCP send report will be
- // sent.
- // Additionally if all messages in |sender_log_message| does
- // not fit in the packet the |sender_log_message| will contain the remaining
- // unsent messages.
- void SendRtcpFromRtpSender(RtcpSenderLogMessage* sender_log_message);
-
- // |cast_message| and |receiver_log| is optional; if |cast_message| is
+
+ // Send a RTCP sender report.
+ // |current_time| is the current time reported by a tick clock.
+ // |current_time_as_rtp_timestamp| is the corresponding RTP timestamp.
+ void SendRtcpFromRtpSender(base::TimeTicks current_time,
+ uint32 current_time_as_rtp_timestamp);
+
+ // |cast_message| and |rtcp_events| is optional; if |cast_message| is
// provided the RTCP receiver report will append a Cast message containing
- // Acks and Nacks; if |receiver_log| is provided the RTCP receiver report will
- // append the log messages. If no argument is set a normal RTCP receiver
- // report will be sent. Additionally if all messages in |receiver_log| does
- // not fit in the packet the |receiver_log| will contain the remaining unsent
- // messages.
- void SendRtcpFromRtpReceiver(const RtcpCastMessage* cast_message,
- RtcpReceiverLogMessage* receiver_log);
+ // Acks and Nacks; if |rtcp_events| is provided the RTCP receiver report
+ // will append the log messages.
+ void SendRtcpFromRtpReceiver(
+ const RtcpCastMessage* cast_message,
+ const ReceiverRtcpEventSubscriber::RtcpEventMultiMap* rtcp_events);
void IncomingRtcpPacket(const uint8* rtcp_buffer, size_t length);
- bool Rtt(base::TimeDelta* rtt, base::TimeDelta* avg_rtt,
- base::TimeDelta* min_rtt, base::TimeDelta* max_rtt) const;
- bool RtpTimestampInSenderTime(int frequency,
- uint32 rtp_timestamp,
- base::TimeTicks* rtp_timestamp_in_ticks) const;
- protected:
- int CheckForWrapAround(uint32 new_timestamp,
- uint32 old_timestamp) const;
+ // TODO(miu): Clean up this method and downstream code: Only VideoSender uses
+ // this (for congestion control), and only the |rtt| and |avg_rtt| values, and
+ // it's not clear that any of the downstream code is doing the right thing
+ // with this data.
+ bool Rtt(base::TimeDelta* rtt,
+ base::TimeDelta* avg_rtt,
+ base::TimeDelta* min_rtt,
+ base::TimeDelta* max_rtt) const;
+
+ bool is_rtt_available() const { return number_of_rtt_in_avg_ > 0; }
+
+ // If available, returns true and sets the output arguments to the latest
+ // lip-sync timestamps gleaned from the sender reports. While the sender
+ // provides reference NTP times relative to its own wall clock, the
+ // |reference_time| returned here has been translated to the local
+ // CastEnvironment clock.
+ bool GetLatestLipSyncTimes(uint32* rtp_timestamp,
+ base::TimeTicks* reference_time) const;
+
+ // Set the history size to record Cast receiver events. The event history is
+ // used to remove duplicates. The history will store at most |size| events.
+ void SetCastReceiverEventHistorySize(size_t size);
+ // Update the target delay. Will be added to every report sent back to the
+ // sender.
+ // TODO(miu): Remove this deprecated functionality. The sender ignores this.
+ void SetTargetDelay(base::TimeDelta target_delay);
+
+ void OnReceivedReceiverLog(const RtcpReceiverLogMessage& receiver_log);
+
+ protected:
+ void OnReceivedNtp(uint32 ntp_seconds, uint32 ntp_fraction);
void OnReceivedLipSyncInfo(uint32 rtp_timestamp,
uint32 ntp_seconds,
uint32 ntp_fraction);
@@ -115,13 +134,6 @@ class Rtcp {
friend class LocalRtcpRttFeedback;
friend class LocalRtcpReceiverFeedback;
- void SendRtcp(const base::TimeTicks& now,
- uint32 packet_type_flags,
- uint32 media_ssrc,
- const RtcpCastMessage* cast_message);
-
- void OnReceivedNtp(uint32 ntp_seconds, uint32 ntp_fraction);
-
void OnReceivedDelaySinceLastReport(uint32 receivers_ssrc,
uint32 last_report,
uint32 delay_since_last_report);
@@ -133,17 +145,20 @@ class Rtcp {
void UpdateNextTimeToSendRtcp();
- void SaveLastSentNtpTime(const base::TimeTicks& now, uint32 last_ntp_seconds,
+ void SaveLastSentNtpTime(const base::TimeTicks& now,
+ uint32 last_ntp_seconds,
uint32 last_ntp_fraction);
scoped_refptr<CastEnvironment> cast_environment_;
+ transport::CastTransportSender* const transport_sender_;
const base::TimeDelta rtcp_interval_;
const RtcpMode rtcp_mode_;
const uint32 local_ssrc_;
const uint32 remote_ssrc_;
+ const std::string c_name_;
+ const EventMediaType event_media_type_;
// Not owned by this class.
- RtpSenderStatistics* const rtp_sender_statistics_;
RtpReceiverStatistics* const rtp_receiver_statistics_;
scoped_ptr<LocalRtcpRttFeedback> rtt_feedback_;
@@ -154,18 +169,33 @@ class Rtcp {
base::TimeTicks next_time_to_send_rtcp_;
RtcpSendTimeMap last_reports_sent_map_;
RtcpSendTimeQueue last_reports_sent_queue_;
+
+ // The truncated (i.e., 64-->32-bit) NTP timestamp provided in the last report
+ // from the remote peer, along with the local time at which the report was
+ // received. These values are used for ping-pong'ing NTP timestamps between
+ // the peers so that they can estimate the network's round-trip time.
+ uint32 last_report_truncated_ntp_;
base::TimeTicks time_last_report_received_;
- uint32 last_report_received_;
- uint32 last_received_rtp_timestamp_;
- uint32 last_received_ntp_seconds_;
- uint32 last_received_ntp_fraction_;
+ // Maintains a smoothed offset between the local clock and the remote clock.
+ // Calling this member's Current() method is only valid if
+ // |time_last_report_received_| is not "null."
+ ClockDriftSmoother local_clock_ahead_by_;
+
+ // Latest "lip sync" info from the sender. The sender provides the RTP
+ // timestamp of some frame of its choosing and also a corresponding reference
+ // NTP timestamp sampled from a clock common to all media streams. It is
+ // expected that the sender will update this data regularly and in a timely
+ // manner (e.g., about once per second).
+ uint32 lip_sync_rtp_timestamp_;
+ uint64 lip_sync_ntp_timestamp_;
base::TimeDelta rtt_;
base::TimeDelta min_rtt_;
base::TimeDelta max_rtt_;
int number_of_rtt_in_avg_;
- float avg_rtt_ms_;
+ double avg_rtt_ms_;
+ uint16 target_delay_ms_;
DISALLOW_COPY_AND_ASSIGN(Rtcp);
};
diff --git a/chromium/media/cast/rtcp/rtcp_defines.cc b/chromium/media/cast/rtcp/rtcp_defines.cc
new file mode 100644
index 00000000000..214100d4d9e
--- /dev/null
+++ b/chromium/media/cast/rtcp/rtcp_defines.cc
@@ -0,0 +1,49 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtcp/rtcp_defines.h"
+
+#include "media/cast/logging/logging_defines.h"
+
+namespace media {
+namespace cast {
+
+RtcpCastMessage::RtcpCastMessage(uint32 media_ssrc)
+ : media_ssrc_(media_ssrc), ack_frame_id_(0u), target_delay_ms_(0) {}
+RtcpCastMessage::~RtcpCastMessage() {}
+
+void RtcpCastMessage::Copy(const RtcpCastMessage& cast_message) {
+ media_ssrc_ = cast_message.media_ssrc_;
+ ack_frame_id_ = cast_message.ack_frame_id_;
+ target_delay_ms_ = cast_message.target_delay_ms_;
+ missing_frames_and_packets_ = cast_message.missing_frames_and_packets_;
+}
+
+RtcpReceiverEventLogMessage::RtcpReceiverEventLogMessage()
+ : type(UNKNOWN), packet_id(0u) {}
+RtcpReceiverEventLogMessage::~RtcpReceiverEventLogMessage() {}
+
+RtcpReceiverFrameLogMessage::RtcpReceiverFrameLogMessage(uint32 timestamp)
+ : rtp_timestamp_(timestamp) {}
+RtcpReceiverFrameLogMessage::~RtcpReceiverFrameLogMessage() {}
+
+RtcpRpsiMessage::RtcpRpsiMessage()
+ : remote_ssrc(0u), payload_type(0u), picture_id(0u) {}
+RtcpRpsiMessage::~RtcpRpsiMessage() {}
+
+RtcpNackMessage::RtcpNackMessage() : remote_ssrc(0u) {}
+RtcpNackMessage::~RtcpNackMessage() {}
+
+RtcpRembMessage::RtcpRembMessage() : remb_bitrate(0u) {}
+RtcpRembMessage::~RtcpRembMessage() {}
+
+RtcpReceiverReferenceTimeReport::RtcpReceiverReferenceTimeReport()
+ : remote_ssrc(0u), ntp_seconds(0u), ntp_fraction(0u) {}
+RtcpReceiverReferenceTimeReport::~RtcpReceiverReferenceTimeReport() {}
+
+RtcpEvent::RtcpEvent() : type(UNKNOWN), packet_id(0u) {}
+RtcpEvent::~RtcpEvent() {}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtcp/rtcp_defines.h b/chromium/media/cast/rtcp/rtcp_defines.h
index 0277bd1feaf..31795648c64 100644
--- a/chromium/media/cast/rtcp/rtcp_defines.h
+++ b/chromium/media/cast/rtcp/rtcp_defines.h
@@ -5,45 +5,42 @@
#ifndef MEDIA_CAST_RTCP_RTCP_DEFINES_H_
#define MEDIA_CAST_RTCP_RTCP_DEFINES_H_
-#include <list>
#include <map>
#include <set>
#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
#include "media/cast/logging/logging_defines.h"
+#include "media/cast/transport/cast_transport_defines.h"
namespace media {
namespace cast {
+static const size_t kRtcpCastLogHeaderSize = 12;
+static const size_t kRtcpReceiverFrameLogSize = 8;
+static const size_t kRtcpReceiverEventLogSize = 4;
+
// Handle the per frame ACK and NACK messages.
class RtcpCastMessage {
public:
explicit RtcpCastMessage(uint32 media_ssrc);
~RtcpCastMessage();
+ void Copy(const RtcpCastMessage& cast_message);
+
uint32 media_ssrc_;
uint32 ack_frame_id_;
+ uint16 target_delay_ms_;
MissingFramesAndPacketsMap missing_frames_and_packets_;
-};
-
-// Log messages form sender to receiver.
-enum RtcpSenderFrameStatus {
- kRtcpSenderFrameStatusUnknown = 0,
- kRtcpSenderFrameStatusDroppedByEncoder = 1,
- kRtcpSenderFrameStatusDroppedByFlowControl = 2,
- kRtcpSenderFrameStatusSentToNetwork = 3,
-};
-struct RtcpSenderFrameLogMessage {
- RtcpSenderFrameStatus frame_status;
- uint32 rtp_timestamp;
+ DISALLOW_COPY_AND_ASSIGN(RtcpCastMessage);
};
-typedef std::list<RtcpSenderFrameLogMessage> RtcpSenderLogMessage;
-
// Log messages from receiver to sender.
struct RtcpReceiverEventLogMessage {
+ RtcpReceiverEventLogMessage();
+ ~RtcpReceiverEventLogMessage();
+
CastLoggingEvent type;
base::TimeTicks event_timestamp;
base::TimeDelta delay_delta;
@@ -52,99 +49,84 @@ struct RtcpReceiverEventLogMessage {
typedef std::list<RtcpReceiverEventLogMessage> RtcpReceiverEventLogMessages;
-class RtcpReceiverFrameLogMessage {
- public:
+struct RtcpReceiverFrameLogMessage {
explicit RtcpReceiverFrameLogMessage(uint32 rtp_timestamp);
~RtcpReceiverFrameLogMessage();
uint32 rtp_timestamp_;
RtcpReceiverEventLogMessages event_log_messages_;
-};
-typedef std::list<RtcpReceiverFrameLogMessage> RtcpReceiverLogMessage;
-
-struct RtcpSenderInfo {
- // First three members are used for lipsync.
- // First two members are used for rtt.
- uint32 ntp_seconds;
- uint32 ntp_fraction;
- uint32 rtp_timestamp;
- uint32 send_packet_count;
- size_t send_octet_count;
+ // TODO(mikhal): Investigate what's the best way to allow adding
+ // DISALLOW_COPY_AND_ASSIGN, as currently it contradicts the implementation
+ // and possible changes have a big impact on design.
};
-struct RtcpReportBlock {
- uint32 remote_ssrc; // SSRC of sender of this report.
- uint32 media_ssrc; // SSRC of the RTP packet sender.
- uint8 fraction_lost;
- uint32 cumulative_lost; // 24 bits valid.
- uint32 extended_high_sequence_number;
- uint32 jitter;
- uint32 last_sr;
- uint32 delay_since_last_sr;
-};
+typedef std::list<RtcpReceiverFrameLogMessage> RtcpReceiverLogMessage;
struct RtcpRpsiMessage {
+ RtcpRpsiMessage();
+ ~RtcpRpsiMessage();
+
uint32 remote_ssrc;
uint8 payload_type;
uint64 picture_id;
};
-class RtcpNackMessage {
- public:
+struct RtcpNackMessage {
RtcpNackMessage();
~RtcpNackMessage();
uint32 remote_ssrc;
std::list<uint16> nack_list;
+
+ DISALLOW_COPY_AND_ASSIGN(RtcpNackMessage);
};
-class RtcpRembMessage {
- public:
+struct RtcpRembMessage {
RtcpRembMessage();
~RtcpRembMessage();
uint32 remb_bitrate;
std::list<uint32> remb_ssrcs;
+
+ DISALLOW_COPY_AND_ASSIGN(RtcpRembMessage);
};
struct RtcpReceiverReferenceTimeReport {
+ RtcpReceiverReferenceTimeReport();
+ ~RtcpReceiverReferenceTimeReport();
+
uint32 remote_ssrc;
uint32 ntp_seconds;
uint32 ntp_fraction;
};
-struct RtcpDlrrReportBlock {
- uint32 last_rr;
- uint32 delay_since_last_rr;
-};
-
-inline bool operator==(RtcpReportBlock lhs, RtcpReportBlock rhs) {
- return lhs.remote_ssrc == rhs.remote_ssrc &&
- lhs.media_ssrc == rhs.media_ssrc &&
- lhs.fraction_lost == rhs.fraction_lost &&
- lhs.cumulative_lost == rhs.cumulative_lost &&
- lhs.extended_high_sequence_number == rhs.extended_high_sequence_number &&
- lhs.jitter == rhs.jitter &&
- lhs.last_sr == rhs.last_sr &&
- lhs.delay_since_last_sr == rhs.delay_since_last_sr;
-}
-
-inline bool operator==(RtcpSenderInfo lhs, RtcpSenderInfo rhs) {
- return lhs.ntp_seconds == rhs.ntp_seconds &&
- lhs.ntp_fraction == rhs.ntp_fraction &&
- lhs.rtp_timestamp == rhs.rtp_timestamp &&
- lhs.send_packet_count == rhs.send_packet_count &&
- lhs.send_octet_count == rhs.send_octet_count;
-}
-
inline bool operator==(RtcpReceiverReferenceTimeReport lhs,
RtcpReceiverReferenceTimeReport rhs) {
return lhs.remote_ssrc == rhs.remote_ssrc &&
- lhs.ntp_seconds == rhs.ntp_seconds &&
- lhs.ntp_fraction == rhs.ntp_fraction;
+ lhs.ntp_seconds == rhs.ntp_seconds &&
+ lhs.ntp_fraction == rhs.ntp_fraction;
}
+// Struct used by raw event subscribers as an intermediate format before
+// sending off to the other side via RTCP.
+// (i.e., {Sender,Receiver}RtcpEventSubscriber)
+struct RtcpEvent {
+ RtcpEvent();
+ ~RtcpEvent();
+
+ CastLoggingEvent type;
+
+ // Time of event logged.
+ base::TimeTicks timestamp;
+
+ // Render/playout delay. Only set for FRAME_PLAYOUT events.
+ base::TimeDelta delay_delta;
+
+ // Only set for packet events.
+ uint16 packet_id;
+};
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/rtcp/rtcp_receiver.cc b/chromium/media/cast/rtcp/rtcp_receiver.cc
index 152ebc00d7b..3be8e921c46 100644
--- a/chromium/media/cast/rtcp/rtcp_receiver.cc
+++ b/chromium/media/cast/rtcp/rtcp_receiver.cc
@@ -6,50 +6,31 @@
#include "base/logging.h"
#include "media/cast/rtcp/rtcp_utility.h"
+#include "media/cast/transport/cast_transport_defines.h"
namespace {
-media::cast::CastLoggingEvent TranslateToLogEventFromWireFormat(uint8 event) {
- switch (event) {
- case 1:
- return media::cast::kAckSent;
- case 2:
- return media::cast::kAudioPlayoutDelay;
- case 3:
- return media::cast::kAudioFrameDecoded;
- case 4:
- return media::cast::kVideoFrameDecoded;
- case 5:
- return media::cast::kVideoRenderDelay;
- case 6:
- return media::cast::kPacketReceived;
- default:
- // If the sender adds new log messages we will end up here until we add
- // the new messages in the receiver.
- VLOG(1) << "Unexpected log message received: " << static_cast<int>(event);
- NOTREACHED();
- return media::cast::kUnknown;
- }
-}
-
-media::cast::RtcpSenderFrameStatus TranslateToFrameStatusFromWireFormat(
- uint8 status) {
- switch (status) {
- case 0:
- return media::cast::kRtcpSenderFrameStatusUnknown;
- case 1:
- return media::cast::kRtcpSenderFrameStatusDroppedByEncoder;
- case 2:
- return media::cast::kRtcpSenderFrameStatusDroppedByFlowControl;
- case 3:
- return media::cast::kRtcpSenderFrameStatusSentToNetwork;
- default:
- // If the sender adds new log messages we will end up here until we add
- // the new messages in the receiver.
- NOTREACHED();
- VLOG(1) << "Unexpected status received: " << static_cast<int>(status);
- return media::cast::kRtcpSenderFrameStatusUnknown;
- }
+// A receiver frame event is identified by frame RTP timestamp, event timestamp
+// and event type.
+// A receiver packet event is identified by all of the above plus packet id.
+// The key format is as follows:
+// First uint64:
+// bits 0-11: zeroes (unused).
+// bits 12-15: event type ID.
+// bits 16-31: packet ID if packet event, 0 otherwise.
+// bits 32-63: RTP timestamp.
+// Second uint64:
+// bits 0-63: event TimeTicks internal value.
+std::pair<uint64, uint64> GetReceiverEventKey(
+ uint32 frame_rtp_timestamp, const base::TimeTicks& event_timestamp,
+ uint8 event_type, uint16 packet_id_or_zero) {
+ uint64 value1 = event_type;
+ value1 <<= 16;
+ value1 |= packet_id_or_zero;
+ value1 <<= 32;
+ value1 |= frame_rtp_timestamp;
+ return std::make_pair(
+ value1, static_cast<uint64>(event_timestamp.ToInternalValue()));
}
} // namespace
@@ -67,12 +48,15 @@ RtcpReceiver::RtcpReceiver(scoped_refptr<CastEnvironment> cast_environment,
sender_feedback_(sender_feedback),
receiver_feedback_(receiver_feedback),
rtt_feedback_(rtt_feedback),
- cast_environment_(cast_environment) {}
+ cast_environment_(cast_environment),
+ receiver_event_history_size_(0) {}
RtcpReceiver::~RtcpReceiver() {}
-void RtcpReceiver::SetRemoteSSRC(uint32 ssrc) {
- remote_ssrc_ = ssrc;
+void RtcpReceiver::SetRemoteSSRC(uint32 ssrc) { remote_ssrc_ = ssrc; }
+
+void RtcpReceiver::SetCastReceiverEventHistorySize(size_t size) {
+ receiver_event_history_size_ = size;
}
void RtcpReceiver::IncomingRtcpPacket(RtcpParser* rtcp_parser) {
@@ -117,9 +101,6 @@ void RtcpReceiver::IncomingRtcpPacket(RtcpParser* rtcp_parser) {
case kRtcpApplicationSpecificCastReceiverLogCode:
HandleApplicationSpecificCastReceiverLog(rtcp_parser);
break;
- case kRtcpApplicationSpecificCastSenderLogCode:
- HandleApplicationSpecificCastSenderLog(rtcp_parser);
- break;
case kRtcpPayloadSpecificRembCode:
case kRtcpPayloadSpecificRembItemCode:
case kRtcpPayloadSpecificCastCode:
@@ -151,16 +132,15 @@ void RtcpReceiver::HandleSenderReport(RtcpParser* rtcp_parser) {
// Synchronization source identifier for the originator of this SR packet.
uint32 remote_ssrc = rtcp_field.sender_report.sender_ssrc;
- VLOG(1) << "Cast RTCP received SR from SSRC " << remote_ssrc;
+ VLOG(2) << "Cast RTCP received SR from SSRC " << remote_ssrc;
if (remote_ssrc_ == remote_ssrc) {
- RtcpSenderInfo remote_sender_info;
+ transport::RtcpSenderInfo remote_sender_info;
remote_sender_info.ntp_seconds =
rtcp_field.sender_report.ntp_most_significant;
remote_sender_info.ntp_fraction =
rtcp_field.sender_report.ntp_least_significant;
- remote_sender_info.rtp_timestamp =
- rtcp_field.sender_report.rtp_timestamp;
+ remote_sender_info.rtp_timestamp = rtcp_field.sender_report.rtp_timestamp;
remote_sender_info.send_packet_count =
rtcp_field.sender_report.sender_packet_count;
remote_sender_info.send_octet_count =
@@ -184,7 +164,7 @@ void RtcpReceiver::HandleReceiverReport(RtcpParser* rtcp_parser) {
uint32 remote_ssrc = rtcp_field.receiver_report.sender_ssrc;
- VLOG(1) << "Cast RTCP received RR from SSRC " << remote_ssrc;
+ VLOG(2) << "Cast RTCP received RR from SSRC " << remote_ssrc;
rtcp_field_type = rtcp_parser->Iterate();
while (rtcp_field_type == kRtcpReportBlockItemCode) {
@@ -211,13 +191,9 @@ void RtcpReceiver::HandleReportBlock(const RtcpField* rtcp_field,
// This block is not for us ignore it.
return;
}
- VLOG(1) << "Cast RTCP received RB from SSRC " << remote_ssrc;
- cast_environment_->Logging()->InsertGenericEvent(kPacketLoss,
- rb.fraction_lost);
- cast_environment_->Logging()->InsertGenericEvent(kJitterMs,
- rb.jitter);
+ VLOG(2) << "Cast RTCP received RB from SSRC " << remote_ssrc;
- RtcpReportBlock report_block;
+ transport::RtcpReportBlock report_block;
report_block.remote_ssrc = remote_ssrc;
report_block.media_ssrc = rb.ssrc;
report_block.fraction_lost = rb.fraction_lost;
@@ -229,9 +205,8 @@ void RtcpReceiver::HandleReportBlock(const RtcpField* rtcp_field,
report_block.delay_since_last_sr = rb.delay_last_sender_report;
if (rtt_feedback_) {
- rtt_feedback_->OnReceivedDelaySinceLastReport(rb.ssrc,
- rb.last_sender_report,
- rb.delay_last_sender_report);
+ rtt_feedback_->OnReceivedDelaySinceLastReport(
+ rb.ssrc, rb.last_sender_report, rb.delay_last_sender_report);
}
}
@@ -245,7 +220,7 @@ void RtcpReceiver::HandleSDES(RtcpParser* rtcp_parser) {
void RtcpReceiver::HandleSDESChunk(RtcpParser* rtcp_parser) {
const RtcpField& rtcp_field = rtcp_parser->Field();
- VLOG(1) << "Cast RTCP received SDES with cname " << rtcp_field.c_name.name;
+ VLOG(2) << "Cast RTCP received SDES with cname " << rtcp_field.c_name.name;
}
void RtcpReceiver::HandleXr(RtcpParser* rtcp_parser) {
@@ -337,7 +312,7 @@ void RtcpReceiver::HandleBYE(RtcpParser* rtcp_parser) {
const RtcpField& rtcp_field = rtcp_parser->Field();
uint32 remote_ssrc = rtcp_field.bye.sender_ssrc;
if (remote_ssrc_ == remote_ssrc) {
- VLOG(1) << "Cast RTCP received BYE from SSRC " << remote_ssrc;
+ VLOG(2) << "Cast RTCP received BYE from SSRC " << remote_ssrc;
}
rtcp_parser->Iterate();
}
@@ -346,7 +321,7 @@ void RtcpReceiver::HandlePLI(RtcpParser* rtcp_parser) {
const RtcpField& rtcp_field = rtcp_parser->Field();
if (ssrc_ == rtcp_field.pli.media_ssrc) {
// Received a signal that we need to send a new key frame.
- VLOG(1) << "Cast RTCP received PLI on our SSRC " << ssrc_;
+ VLOG(2) << "Cast RTCP received PLI on our SSRC " << ssrc_;
}
rtcp_parser->Iterate();
}
@@ -377,7 +352,7 @@ void RtcpReceiver::HandleRpsi(RtcpParser* rtcp_parser) {
}
rpsi_picture_id += (rtcp_field.rpsi.native_bit_string[bytes - 1] & 0x7f);
- VLOG(1) << "Cast RTCP received RPSI with picture_id " << rpsi_picture_id;
+ VLOG(2) << "Cast RTCP received RPSI with picture_id " << rpsi_picture_id;
}
void RtcpReceiver::HandlePayloadSpecificApp(RtcpParser* rtcp_parser) {
@@ -421,7 +396,7 @@ void RtcpReceiver::HandlePayloadSpecificRembItem(RtcpParser* rtcp_parser) {
for (int i = 0; i < rtcp_field.remb_item.number_of_ssrcs; ++i) {
if (rtcp_field.remb_item.ssrcs[i] == ssrc_) {
// Found matching ssrc.
- VLOG(1) << "Cast RTCP received REMB with received_bitrate "
+ VLOG(2) << "Cast RTCP received REMB with received_bitrate "
<< rtcp_field.remb_item.bitrate;
return;
}
@@ -450,11 +425,15 @@ void RtcpReceiver::HandleApplicationSpecificCastReceiverLog(
field_type = rtcp_parser->Iterate();
while (field_type == kRtcpApplicationSpecificCastReceiverLogEventCode) {
- HandleApplicationSpecificCastReceiverEventLog(rtcp_parser,
+ HandleApplicationSpecificCastReceiverEventLog(
+ rtcp_field.cast_receiver_log.rtp_timestamp,
+ rtcp_parser,
&frame_log.event_log_messages_);
field_type = rtcp_parser->Iterate();
}
- receiver_log.push_back(frame_log);
+
+ if (!frame_log.event_log_messages_.empty())
+ receiver_log.push_back(frame_log);
}
if (receiver_feedback_ && !receiver_log.empty()) {
@@ -463,52 +442,50 @@ void RtcpReceiver::HandleApplicationSpecificCastReceiverLog(
}
void RtcpReceiver::HandleApplicationSpecificCastReceiverEventLog(
+ uint32 frame_rtp_timestamp,
RtcpParser* rtcp_parser,
RtcpReceiverEventLogMessages* event_log_messages) {
const RtcpField& rtcp_field = rtcp_parser->Field();
- RtcpReceiverEventLogMessage event_log;
- event_log.type = TranslateToLogEventFromWireFormat(
- rtcp_field.cast_receiver_log.event);
- event_log.event_timestamp = base::TimeTicks() +
+ const uint8 event = rtcp_field.cast_receiver_log.event;
+ const CastLoggingEvent event_type = TranslateToLogEventFromWireFormat(event);
+ uint16 packet_id = event_type == PACKET_RECEIVED ?
+ rtcp_field.cast_receiver_log.delay_delta_or_packet_id.packet_id : 0;
+ const base::TimeTicks event_timestamp =
+ base::TimeTicks() +
base::TimeDelta::FromMilliseconds(
rtcp_field.cast_receiver_log.event_timestamp_base +
rtcp_field.cast_receiver_log.event_timestamp_delta);
- event_log.delay_delta = base::TimeDelta::FromMilliseconds(
- rtcp_field.cast_receiver_log.delay_delta_or_packet_id);
- event_log.packet_id =
- rtcp_field.cast_receiver_log.delay_delta_or_packet_id;
- event_log_messages->push_back(event_log);
-}
-
-void RtcpReceiver::HandleApplicationSpecificCastSenderLog(
- RtcpParser* rtcp_parser) {
- const RtcpField& rtcp_field = rtcp_parser->Field();
- uint32 remote_ssrc = rtcp_field.cast_sender_log.sender_ssrc;
- if (remote_ssrc_ != remote_ssrc) {
- RtcpFieldTypes field_type;
- // Message not to us. Iterate until we have passed this message.
- do {
- field_type = rtcp_parser->Iterate();
- } while (field_type == kRtcpApplicationSpecificCastSenderLogCode);
+ // The following code checks to see if we have already seen this event.
+ // The algorithm works by maintaining a sliding window of events. We have
+ // a queue and a set of events. We enqueue every new event and insert it
+ // into the set. When the queue becomes too big we remove the oldest event
+ // from both the queue and the set.
+ ReceiverEventKey key =
+ GetReceiverEventKey(
+ frame_rtp_timestamp, event_timestamp, event, packet_id);
+ if (receiver_event_key_set_.find(key) != receiver_event_key_set_.end()) {
return;
+ } else {
+ receiver_event_key_set_.insert(key);
+ receiver_event_key_queue_.push(key);
+
+ if (receiver_event_key_queue_.size() > receiver_event_history_size_) {
+ const ReceiverEventKey oldest_key = receiver_event_key_queue_.front();
+ receiver_event_key_queue_.pop();
+ receiver_event_key_set_.erase(oldest_key);
+ }
}
- RtcpSenderLogMessage sender_log;
- RtcpFieldTypes field_type = rtcp_parser->Iterate();
- while (field_type == kRtcpApplicationSpecificCastSenderLogCode) {
- const RtcpField& rtcp_field = rtcp_parser->Field();
- RtcpSenderFrameLogMessage frame_log;
- frame_log.frame_status =
- TranslateToFrameStatusFromWireFormat(rtcp_field.cast_sender_log.status);
- frame_log.rtp_timestamp = rtcp_field.cast_sender_log.rtp_timestamp;
- sender_log.push_back(frame_log);
- field_type = rtcp_parser->Iterate();
- }
- if (receiver_feedback_) {
- receiver_feedback_->OnReceivedSenderLog(sender_log);
- }
+ RtcpReceiverEventLogMessage event_log;
+ event_log.type = event_type;
+ event_log.event_timestamp = event_timestamp;
+ event_log.delay_delta = base::TimeDelta::FromMilliseconds(
+ rtcp_field.cast_receiver_log.delay_delta_or_packet_id.delay_delta);
+ event_log.packet_id =
+ rtcp_field.cast_receiver_log.delay_delta_or_packet_id.packet_id;
+ event_log_messages->push_back(event_log);
}
void RtcpReceiver::HandlePayloadSpecificCastItem(RtcpParser* rtcp_parser) {
@@ -516,6 +493,7 @@ void RtcpReceiver::HandlePayloadSpecificCastItem(RtcpParser* rtcp_parser) {
RtcpCastMessage cast_message(remote_ssrc_);
cast_message.ack_frame_id_ = ack_frame_id_wrap_helper_.MapTo32bitsFrameId(
rtcp_field.cast_item.last_frame_id);
+ cast_message.target_delay_ms_ = rtcp_field.cast_item.target_delay_ms;
RtcpFieldTypes packet_type = rtcp_parser->Iterate();
while (packet_type == kRtcpPayloadSpecificCastNackItemCode) {
@@ -545,15 +523,15 @@ void RtcpReceiver::HandlePayloadSpecificCastNackItem(
frame_it = ret.first;
DCHECK(frame_it != missing_frames_and_packets->end()) << "Invalid state";
}
- if (rtcp_field->cast_nack_item.packet_id == kRtcpCastAllPacketsLost) {
+ uint16 packet_id = rtcp_field->cast_nack_item.packet_id;
+ frame_it->second.insert(packet_id);
+
+ if (packet_id == kRtcpCastAllPacketsLost) {
// Special case all packets in a frame is missing.
return;
}
- uint16 packet_id = rtcp_field->cast_nack_item.packet_id;
uint8 bitmask = rtcp_field->cast_nack_item.bitmask;
- frame_it->second.insert(packet_id);
-
if (bitmask) {
for (int i = 1; i <= 8; ++i) {
if (bitmask & 1) {
@@ -576,9 +554,10 @@ void RtcpReceiver::HandleFIR(RtcpParser* rtcp_parser) {
void RtcpReceiver::HandleFIRItem(const RtcpField* rtcp_field) {
// Is it our sender that is requested to generate a new keyframe.
- if (ssrc_ != rtcp_field->fir_item.ssrc) return;
+ if (ssrc_ != rtcp_field->fir_item.ssrc)
+ return;
- VLOG(1) << "Cast RTCP received FIR on our SSRC " << ssrc_;
+ VLOG(2) << "Cast RTCP received FIR on our SSRC " << ssrc_;
}
} // namespace cast
diff --git a/chromium/media/cast/rtcp/rtcp_receiver.h b/chromium/media/cast/rtcp/rtcp_receiver.h
index 81383c4ec10..d3cef9e57b4 100644
--- a/chromium/media/cast/rtcp/rtcp_receiver.h
+++ b/chromium/media/cast/rtcp/rtcp_receiver.h
@@ -5,10 +5,13 @@
#ifndef MEDIA_CAST_RTCP_RTCP_RECEIVER_H_
#define MEDIA_CAST_RTCP_RTCP_RECEIVER_H_
-#include "media/cast/net/cast_net_defines.h"
+#include <queue>
+
+#include "base/containers/hash_tables.h"
#include "media/cast/rtcp/rtcp.h"
#include "media/cast/rtcp/rtcp_defines.h"
#include "media/cast/rtcp/rtcp_utility.h"
+#include "media/cast/transport/cast_transport_defines.h"
namespace media {
namespace cast {
@@ -16,7 +19,7 @@ namespace cast {
class RtcpReceiverFeedback {
public:
virtual void OnReceivedSenderReport(
- const RtcpSenderInfo& remote_sender_info) = 0;
+ const transport::RtcpSenderInfo& remote_sender_info) = 0;
virtual void OnReceiverReferenceTimeReport(
const RtcpReceiverReferenceTimeReport& remote_time_report) = 0;
@@ -26,9 +29,6 @@ class RtcpReceiverFeedback {
virtual void OnReceivedReceiverLog(
const RtcpReceiverLogMessage& receiver_log) = 0;
- virtual void OnReceivedSenderLog(
- const RtcpSenderLogMessage& sender_log) = 0;
-
virtual ~RtcpReceiverFeedback() {}
};
@@ -53,6 +53,10 @@ class RtcpReceiver {
void SetRemoteSSRC(uint32 ssrc);
+ // Set the history size to record Cast receiver events. Event history is
+ // used to remove duplicates. The history has no more than |size| events.
+ void SetCastReceiverEventHistorySize(size_t size);
+
void IncomingRtcpPacket(RtcpParser* rtcp_parser);
private:
@@ -60,8 +64,7 @@ class RtcpReceiver {
void HandleReceiverReport(RtcpParser* rtcp_parser);
- void HandleReportBlock(const RtcpField* rtcp_field,
- uint32 remote_ssrc);
+ void HandleReportBlock(const RtcpField* rtcp_field, uint32 remote_ssrc);
void HandleSDES(RtcpParser* rtcp_parser);
void HandleSDESChunk(RtcpParser* rtcp_parser);
@@ -100,6 +103,7 @@ class RtcpReceiver {
void HandleApplicationSpecificCastReceiverLog(RtcpParser* rtcp_parser);
void HandleApplicationSpecificCastSenderLog(RtcpParser* rtcp_parser);
void HandleApplicationSpecificCastReceiverEventLog(
+ uint32 frame_rtp_timestamp,
RtcpParser* rtcp_parser,
RtcpReceiverEventLogMessages* event_log_messages);
@@ -112,7 +116,13 @@ class RtcpReceiver {
RtcpRttFeedback* const rtt_feedback_;
scoped_refptr<CastEnvironment> cast_environment_;
- FrameIdWrapHelper ack_frame_id_wrap_helper_;
+ transport::FrameIdWrapHelper ack_frame_id_wrap_helper_;
+
+ // Maintains a history of receiver events.
+ size_t receiver_event_history_size_;
+ typedef std::pair<uint64, uint64> ReceiverEventKey;
+ base::hash_set<ReceiverEventKey> receiver_event_key_set_;
+ std::queue<ReceiverEventKey> receiver_event_key_queue_;
DISALLOW_COPY_AND_ASSIGN(RtcpReceiver);
};
diff --git a/chromium/media/cast/rtcp/rtcp_receiver_unittest.cc b/chromium/media/cast/rtcp/rtcp_receiver_unittest.cc
index b5c5d2d3889..51026d1554b 100644
--- a/chromium/media/cast/rtcp/rtcp_receiver_unittest.cc
+++ b/chromium/media/cast/rtcp/rtcp_receiver_unittest.cc
@@ -10,7 +10,8 @@
#include "media/cast/rtcp/rtcp_receiver.h"
#include "media/cast/rtcp/rtcp_utility.h"
#include "media/cast/rtcp/test_rtcp_packet_builder.h"
-#include "media/cast/test/fake_task_runner.h"
+#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "media/cast/transport/cast_transport_defines.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
@@ -21,6 +22,7 @@ using testing::_;
static const uint32 kSenderSsrc = 0x10203;
static const uint32 kSourceSsrc = 0x40506;
static const uint32 kUnknownSsrc = 0xDEAD;
+static const uint16 kTargetDelayMs = 100;
static const std::string kCName("test@10.1.1.1");
namespace {
@@ -28,8 +30,8 @@ class SenderFeedbackCastVerification : public RtcpSenderFeedback {
public:
SenderFeedbackCastVerification() : called_(false) {}
- virtual void OnReceivedCastFeedback(
- const RtcpCastMessage& cast_feedback) OVERRIDE {
+ virtual void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback)
+ OVERRIDE {
EXPECT_EQ(cast_feedback.media_ssrc_, kSenderSsrc);
EXPECT_EQ(cast_feedback.ack_frame_id_, kAckFrameId);
@@ -38,7 +40,8 @@ class SenderFeedbackCastVerification : public RtcpSenderFeedback {
EXPECT_TRUE(frame_it != cast_feedback.missing_frames_and_packets_.end());
EXPECT_EQ(kLostFrameId, frame_it->first);
- EXPECT_TRUE(frame_it->second.empty());
+ EXPECT_EQ(frame_it->second.size(), 1UL);
+ EXPECT_EQ(*frame_it->second.begin(), kRtcpCastAllPacketsLost);
++frame_it;
EXPECT_TRUE(frame_it != cast_feedback.missing_frames_and_packets_.end());
EXPECT_EQ(kFrameIdWithLostPackets, frame_it->first);
@@ -58,6 +61,8 @@ class SenderFeedbackCastVerification : public RtcpSenderFeedback {
private:
bool called_;
+
+ DISALLOW_COPY_AND_ASSIGN(SenderFeedbackCastVerification);
};
class RtcpReceiverCastLogVerification : public RtcpReceiverFeedback {
@@ -67,18 +72,18 @@ class RtcpReceiverCastLogVerification : public RtcpReceiverFeedback {
called_on_received_receiver_log_(false) {}
virtual void OnReceivedSenderReport(
- const RtcpSenderInfo& remote_sender_info) OVERRIDE {};
+ const transport::RtcpSenderInfo& remote_sender_info) OVERRIDE{};
virtual void OnReceiverReferenceTimeReport(
- const RtcpReceiverReferenceTimeReport& remote_time_report) OVERRIDE {};
+ const RtcpReceiverReferenceTimeReport& remote_time_report) OVERRIDE{};
- virtual void OnReceivedSendReportRequest() OVERRIDE {};
+ virtual void OnReceivedSendReportRequest() OVERRIDE{};
- virtual void OnReceivedReceiverLog(
- const RtcpReceiverLogMessage& receiver_log) OVERRIDE {
+ virtual void OnReceivedReceiverLog(const RtcpReceiverLogMessage& receiver_log)
+ OVERRIDE {
EXPECT_EQ(expected_receiver_log_.size(), receiver_log.size());
RtcpReceiverLogMessage::const_iterator expected_it =
- expected_receiver_log_.begin();
+ expected_receiver_log_.begin();
RtcpReceiverLogMessage::const_iterator incoming_it = receiver_log.begin();
for (; incoming_it != receiver_log.end(); ++incoming_it) {
EXPECT_EQ(expected_it->rtp_timestamp_, incoming_it->rtp_timestamp_);
@@ -94,7 +99,7 @@ class RtcpReceiverCastLogVerification : public RtcpReceiverFeedback {
EXPECT_EQ(event_expected_it->type, event_incoming_it->type);
EXPECT_EQ(event_expected_it->event_timestamp,
event_incoming_it->event_timestamp);
- if (event_expected_it->type == kPacketReceived) {
+ if (event_expected_it->type == PACKET_RECEIVED) {
EXPECT_EQ(event_expected_it->packet_id, event_incoming_it->packet_id);
} else {
EXPECT_EQ(event_expected_it->delay_delta,
@@ -107,26 +112,6 @@ class RtcpReceiverCastLogVerification : public RtcpReceiverFeedback {
called_on_received_receiver_log_ = true;
}
- virtual void OnReceivedSenderLog(
- const RtcpSenderLogMessage& sender_log) OVERRIDE {
- EXPECT_EQ(expected_sender_log_.size(), sender_log.size());
-
- RtcpSenderLogMessage::const_iterator expected_it =
- expected_sender_log_.begin();
- RtcpSenderLogMessage::const_iterator incoming_it = sender_log.begin();
- for (; expected_it != expected_sender_log_.end();
- ++expected_it, ++incoming_it) {
- EXPECT_EQ(expected_it->frame_status, incoming_it->frame_status);
- EXPECT_EQ(0xffffff & expected_it->rtp_timestamp,
- incoming_it->rtp_timestamp);
- }
- called_on_received_sender_log_ = true;
- }
-
- bool OnReceivedSenderLogCalled() {
- return called_on_received_sender_log_;
- }
-
bool OnReceivedReceiverLogCalled() {
return called_on_received_receiver_log_ && expected_receiver_log_.empty();
}
@@ -135,15 +120,12 @@ class RtcpReceiverCastLogVerification : public RtcpReceiverFeedback {
expected_receiver_log_ = receiver_log;
}
- void SetExpectedSenderLog(const RtcpSenderLogMessage& sender_log) {
- expected_sender_log_ = sender_log;
- }
-
private:
RtcpReceiverLogMessage expected_receiver_log_;
- RtcpSenderLogMessage expected_sender_log_;
bool called_on_received_sender_log_;
bool called_on_received_receiver_log_;
+
+ DISALLOW_COPY_AND_ASSIGN(RtcpReceiverCastLogVerification);
};
} // namespace
@@ -151,28 +133,26 @@ class RtcpReceiverCastLogVerification : public RtcpReceiverFeedback {
class RtcpReceiverTest : public ::testing::Test {
protected:
RtcpReceiverTest()
- : task_runner_(new test::FakeTaskRunner(&testing_clock_)),
- cast_environment_(new CastEnvironment(&testing_clock_, task_runner_,
- task_runner_, task_runner_, task_runner_, task_runner_,
- GetDefaultCastLoggingConfig())),
+ : testing_clock_(new base::SimpleTestTickClock()),
+ task_runner_(new test::FakeSingleThreadTaskRunner(testing_clock_)),
+ cast_environment_(new CastEnvironment(
+ scoped_ptr<base::TickClock>(testing_clock_).Pass(),
+ task_runner_,
+ task_runner_,
+ task_runner_)),
rtcp_receiver_(new RtcpReceiver(cast_environment_,
&mock_sender_feedback_,
&mock_receiver_feedback_,
&mock_rtt_feedback_,
kSourceSsrc)) {
- }
-
- virtual ~RtcpReceiverTest() {}
-
- virtual void SetUp() OVERRIDE {
EXPECT_CALL(mock_receiver_feedback_, OnReceivedSenderReport(_)).Times(0);
- EXPECT_CALL(mock_receiver_feedback_,
- OnReceiverReferenceTimeReport(_)).Times(0);
- EXPECT_CALL(mock_receiver_feedback_,
- OnReceivedSendReportRequest()).Times(0);
+ EXPECT_CALL(mock_receiver_feedback_, OnReceiverReferenceTimeReport(_))
+ .Times(0);
+ EXPECT_CALL(mock_receiver_feedback_, OnReceivedSendReportRequest())
+ .Times(0);
EXPECT_CALL(mock_sender_feedback_, OnReceivedCastFeedback(_)).Times(0);
- EXPECT_CALL(mock_rtt_feedback_,
- OnReceivedDelaySinceLastReport(_, _, _)).Times(0);
+ EXPECT_CALL(mock_rtt_feedback_, OnReceivedDelaySinceLastReport(_, _, _))
+ .Times(0);
expected_sender_info_.ntp_seconds = kNtpHigh;
expected_sender_info_.ntp_fraction = kNtpLow;
@@ -193,22 +173,26 @@ class RtcpReceiverTest : public ::testing::Test {
expected_receiver_reference_report_.ntp_fraction = kNtpLow;
}
+ virtual ~RtcpReceiverTest() {}
+
// Injects an RTCP packet into the receiver.
void InjectRtcpPacket(const uint8* packet, uint16 length) {
RtcpParser rtcp_parser(packet, length);
rtcp_receiver_->IncomingRtcpPacket(&rtcp_parser);
}
- base::SimpleTestTickClock testing_clock_;
- scoped_refptr<test::FakeTaskRunner> task_runner_;
+ base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
+ scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
scoped_refptr<CastEnvironment> cast_environment_;
MockRtcpReceiverFeedback mock_receiver_feedback_;
MockRtcpRttFeedback mock_rtt_feedback_;
MockRtcpSenderFeedback mock_sender_feedback_;
scoped_ptr<RtcpReceiver> rtcp_receiver_;
- RtcpSenderInfo expected_sender_info_;
- RtcpReportBlock expected_report_block_;
+ transport::RtcpSenderInfo expected_sender_info_;
+ transport::RtcpReportBlock expected_report_block_;
RtcpReceiverReferenceTimeReport expected_receiver_reference_report_;
+
+ DISALLOW_COPY_AND_ASSIGN(RtcpReceiverTest);
};
TEST_F(RtcpReceiverTest, BrokenPacketIsIgnored) {
@@ -222,14 +206,14 @@ TEST_F(RtcpReceiverTest, InjectSenderReportPacket) {
// Expected to be ignored since the sender ssrc does not match our
// remote ssrc.
- InjectRtcpPacket(p.Packet(), p.Length());
+ InjectRtcpPacket(p.Data(), p.Length());
EXPECT_CALL(mock_receiver_feedback_,
OnReceivedSenderReport(expected_sender_info_)).Times(1);
rtcp_receiver_->SetRemoteSSRC(kSenderSsrc);
// Expected to be pass through since the sender ssrc match our remote ssrc.
- InjectRtcpPacket(p.Packet(), p.Length());
+ InjectRtcpPacket(p.Data(), p.Length());
}
TEST_F(RtcpReceiverTest, InjectReceiveReportPacket) {
@@ -239,19 +223,18 @@ TEST_F(RtcpReceiverTest, InjectReceiveReportPacket) {
// Expected to be ignored since the source ssrc does not match our
// local ssrc.
- InjectRtcpPacket(p1.Packet(), p1.Length());
+ InjectRtcpPacket(p1.Data(), p1.Length());
EXPECT_CALL(mock_rtt_feedback_,
- OnReceivedDelaySinceLastReport(kSourceSsrc,
- kLastSr,
- kDelayLastSr)).Times(1);
+ OnReceivedDelaySinceLastReport(
+ kSourceSsrc, kLastSr, kDelayLastSr)).Times(1);
TestRtcpPacketBuilder p2;
p2.AddRr(kSenderSsrc, 1);
p2.AddRb(kSourceSsrc);
// Expected to be pass through since the sender ssrc match our local ssrc.
- InjectRtcpPacket(p2.Packet(), p2.Length());
+ InjectRtcpPacket(p2.Data(), p2.Length());
}
TEST_F(RtcpReceiverTest, InjectSenderReportWithReportBlockPacket) {
@@ -263,7 +246,7 @@ TEST_F(RtcpReceiverTest, InjectSenderReportWithReportBlockPacket) {
// our remote ssrc.
// Report block expected to be ignored since the source ssrc does not match
// our local ssrc.
- InjectRtcpPacket(p1.Packet(), p1.Length());
+ InjectRtcpPacket(p1.Data(), p1.Length());
EXPECT_CALL(mock_receiver_feedback_,
OnReceivedSenderReport(expected_sender_info_)).Times(1);
@@ -273,13 +256,12 @@ TEST_F(RtcpReceiverTest, InjectSenderReportWithReportBlockPacket) {
// remote ssrc.
// Report block expected to be ignored since the source ssrc does not match
// our local ssrc.
- InjectRtcpPacket(p1.Packet(), p1.Length());
+ InjectRtcpPacket(p1.Data(), p1.Length());
EXPECT_CALL(mock_receiver_feedback_, OnReceivedSenderReport(_)).Times(0);
EXPECT_CALL(mock_rtt_feedback_,
- OnReceivedDelaySinceLastReport(kSourceSsrc,
- kLastSr,
- kDelayLastSr)).Times(1);
+ OnReceivedDelaySinceLastReport(
+ kSourceSsrc, kLastSr, kDelayLastSr)).Times(1);
rtcp_receiver_->SetRemoteSSRC(0);
@@ -291,14 +273,13 @@ TEST_F(RtcpReceiverTest, InjectSenderReportWithReportBlockPacket) {
// our remote ssrc.
// Receiver report expected to be pass through since the sender ssrc match
// our local ssrc.
- InjectRtcpPacket(p2.Packet(), p2.Length());
+ InjectRtcpPacket(p2.Data(), p2.Length());
EXPECT_CALL(mock_receiver_feedback_,
OnReceivedSenderReport(expected_sender_info_)).Times(1);
EXPECT_CALL(mock_rtt_feedback_,
- OnReceivedDelaySinceLastReport(kSourceSsrc,
- kLastSr,
- kDelayLastSr)).Times(1);
+ OnReceivedDelaySinceLastReport(
+ kSourceSsrc, kLastSr, kDelayLastSr)).Times(1);
rtcp_receiver_->SetRemoteSSRC(kSenderSsrc);
@@ -306,7 +287,7 @@ TEST_F(RtcpReceiverTest, InjectSenderReportWithReportBlockPacket) {
// remote ssrc.
// Receiver report expected to be pass through since the sender ssrc match
// our local ssrc.
- InjectRtcpPacket(p2.Packet(), p2.Length());
+ InjectRtcpPacket(p2.Data(), p2.Length());
}
TEST_F(RtcpReceiverTest, InjectSenderReportPacketWithDlrr) {
@@ -320,20 +301,19 @@ TEST_F(RtcpReceiverTest, InjectSenderReportPacketWithDlrr) {
// Expected to be ignored since the source ssrc does not match our
// local ssrc.
- InjectRtcpPacket(p.Packet(), p.Length());
+ InjectRtcpPacket(p.Data(), p.Length());
EXPECT_CALL(mock_receiver_feedback_,
OnReceivedSenderReport(expected_sender_info_)).Times(1);
EXPECT_CALL(mock_rtt_feedback_,
- OnReceivedDelaySinceLastReport(kSenderSsrc,
- kLastSr,
- kDelayLastSr)).Times(1);
+ OnReceivedDelaySinceLastReport(
+ kSenderSsrc, kLastSr, kDelayLastSr)).Times(1);
// Enable receiving sender report.
rtcp_receiver_->SetRemoteSSRC(kSenderSsrc);
// Expected to be pass through since the sender ssrc match our local ssrc.
- InjectRtcpPacket(p.Packet(), p.Length());
+ InjectRtcpPacket(p.Data(), p.Length());
}
TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithRrtr) {
@@ -345,14 +325,14 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithRrtr) {
// Expected to be ignored since the source ssrc does not match our
// local ssrc.
- InjectRtcpPacket(p1.Packet(), p1.Length());
+ InjectRtcpPacket(p1.Data(), p1.Length());
EXPECT_CALL(mock_rtt_feedback_,
- OnReceivedDelaySinceLastReport(kSourceSsrc,
- kLastSr,
- kDelayLastSr)).Times(1);
- EXPECT_CALL(mock_receiver_feedback_, OnReceiverReferenceTimeReport(
- expected_receiver_reference_report_)).Times(1);
+ OnReceivedDelaySinceLastReport(
+ kSourceSsrc, kLastSr, kDelayLastSr)).Times(1);
+ EXPECT_CALL(mock_receiver_feedback_,
+ OnReceiverReferenceTimeReport(
+ expected_receiver_reference_report_)).Times(1);
// Enable receiving reference time report.
rtcp_receiver_->SetRemoteSSRC(kSenderSsrc);
@@ -364,7 +344,7 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithRrtr) {
p2.AddXrRrtrBlock();
// Expected to be pass through since the sender ssrc match our local ssrc.
- InjectRtcpPacket(p2.Packet(), p2.Length());
+ InjectRtcpPacket(p2.Data(), p2.Length());
}
TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithIntraFrameRequest) {
@@ -375,12 +355,11 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithIntraFrameRequest) {
// Expected to be ignored since the source ssrc does not match our
// local ssrc.
- InjectRtcpPacket(p1.Packet(), p1.Length());
+ InjectRtcpPacket(p1.Data(), p1.Length());
EXPECT_CALL(mock_rtt_feedback_,
- OnReceivedDelaySinceLastReport(kSourceSsrc,
- kLastSr,
- kDelayLastSr)).Times(1);
+ OnReceivedDelaySinceLastReport(
+ kSourceSsrc, kLastSr, kDelayLastSr)).Times(1);
TestRtcpPacketBuilder p2;
p2.AddRr(kSenderSsrc, 1);
@@ -388,23 +367,22 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithIntraFrameRequest) {
p2.AddPli(kSenderSsrc, kSourceSsrc);
// Expected to be pass through since the sender ssrc match our local ssrc.
- InjectRtcpPacket(p2.Packet(), p2.Length());
+ InjectRtcpPacket(p2.Data(), p2.Length());
}
TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithCastFeedback) {
TestRtcpPacketBuilder p1;
p1.AddRr(kSenderSsrc, 1);
p1.AddRb(kUnknownSsrc);
- p1.AddCast(kSenderSsrc, kUnknownSsrc);
+ p1.AddCast(kSenderSsrc, kUnknownSsrc, kTargetDelayMs);
// Expected to be ignored since the source ssrc does not match our
// local ssrc.
- InjectRtcpPacket(p1.Packet(), p1.Length());
+ InjectRtcpPacket(p1.Data(), p1.Length());
EXPECT_CALL(mock_rtt_feedback_,
- OnReceivedDelaySinceLastReport(kSourceSsrc,
- kLastSr,
- kDelayLastSr)).Times(1);
+ OnReceivedDelaySinceLastReport(
+ kSourceSsrc, kLastSr, kDelayLastSr)).Times(1);
EXPECT_CALL(mock_sender_feedback_, OnReceivedCastFeedback(_)).Times(1);
// Enable receiving the cast feedback.
@@ -413,10 +391,10 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithCastFeedback) {
TestRtcpPacketBuilder p2;
p2.AddRr(kSenderSsrc, 1);
p2.AddRb(kSourceSsrc);
- p2.AddCast(kSenderSsrc, kSourceSsrc);
+ p2.AddCast(kSenderSsrc, kSourceSsrc, kTargetDelayMs);
// Expected to be pass through since the sender ssrc match our local ssrc.
- InjectRtcpPacket(p2.Packet(), p2.Length());
+ InjectRtcpPacket(p2.Data(), p2.Length());
}
TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithCastVerification) {
@@ -428,9 +406,8 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithCastVerification) {
kSourceSsrc);
EXPECT_CALL(mock_rtt_feedback_,
- OnReceivedDelaySinceLastReport(kSourceSsrc,
- kLastSr,
- kDelayLastSr)).Times(1);
+ OnReceivedDelaySinceLastReport(
+ kSourceSsrc, kLastSr, kDelayLastSr)).Times(1);
// Enable receiving the cast feedback.
rtcp_receiver.SetRemoteSSRC(kSenderSsrc);
@@ -438,48 +415,15 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithCastVerification) {
TestRtcpPacketBuilder p;
p.AddRr(kSenderSsrc, 1);
p.AddRb(kSourceSsrc);
- p.AddCast(kSenderSsrc, kSourceSsrc);
+ p.AddCast(kSenderSsrc, kSourceSsrc, kTargetDelayMs);
// Expected to be pass through since the sender ssrc match our local ssrc.
- RtcpParser rtcp_parser(p.Packet(), p.Length());
+ RtcpParser rtcp_parser(p.Data(), p.Length());
rtcp_receiver.IncomingRtcpPacket(&rtcp_parser);
EXPECT_TRUE(sender_feedback_cast_verification.called());
}
-TEST_F(RtcpReceiverTest, InjectSenderReportWithCastSenderLogVerification) {
- RtcpReceiverCastLogVerification cast_log_verification;
- RtcpReceiver rtcp_receiver(cast_environment_,
- &mock_sender_feedback_,
- &cast_log_verification,
- &mock_rtt_feedback_,
- kSourceSsrc);
- rtcp_receiver.SetRemoteSSRC(kSenderSsrc);
-
- RtcpSenderLogMessage sender_log;
- for (int j = 0; j < 359; ++j) {
- RtcpSenderFrameLogMessage sender_frame_log;
- sender_frame_log.frame_status = kRtcpSenderFrameStatusSentToNetwork;
- sender_frame_log.rtp_timestamp = kRtpTimestamp + j * 90;
- sender_log.push_back(sender_frame_log);
- }
- cast_log_verification.SetExpectedSenderLog(sender_log);
-
- TestRtcpPacketBuilder p;
- p.AddSr(kSenderSsrc, 0);
- p.AddSdesCname(kSenderSsrc, kCName);
- p.AddSenderLog(kSenderSsrc);
-
- for (int i = 0; i < 359; ++i) {
- p.AddSenderFrameLog(kRtcpSenderFrameStatusSentToNetwork,
- kRtpTimestamp + i * 90);
- }
- RtcpParser rtcp_parser(p.Packet(), p.Length());
- rtcp_receiver.IncomingRtcpPacket(&rtcp_parser);
-
- EXPECT_TRUE(cast_log_verification.OnReceivedSenderLogCalled());
-}
-
TEST_F(RtcpReceiverTest, InjectReceiverReportWithReceiverLogVerificationBase) {
static const uint32 kTimeBaseMs = 12345678;
static const uint32 kTimeDelayMs = 10;
@@ -494,21 +438,28 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportWithReceiverLogVerificationBase) {
&mock_rtt_feedback_,
kSourceSsrc);
rtcp_receiver.SetRemoteSSRC(kSenderSsrc);
+ rtcp_receiver.SetCastReceiverEventHistorySize(100);
RtcpReceiverLogMessage receiver_log;
RtcpReceiverFrameLogMessage frame_log(kRtpTimestamp);
RtcpReceiverEventLogMessage event_log;
- event_log.type = kAckSent;
+ event_log.type = FRAME_ACK_SENT;
event_log.event_timestamp = testing_clock.NowTicks();
event_log.delay_delta = base::TimeDelta::FromMilliseconds(kDelayDeltaMs);
frame_log.event_log_messages_.push_back(event_log);
testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeDelayMs));
- event_log.type = kPacketReceived;
+ event_log.type = PACKET_RECEIVED;
event_log.event_timestamp = testing_clock.NowTicks();
event_log.packet_id = kLostPacketId1;
frame_log.event_log_messages_.push_back(event_log);
+
+ event_log.type = PACKET_RECEIVED;
+ event_log.event_timestamp = testing_clock.NowTicks();
+ event_log.packet_id = kLostPacketId2;
+ frame_log.event_log_messages_.push_back(event_log);
+
receiver_log.push_back(frame_log);
cast_log_verification.SetExpectedReceiverLog(receiver_log);
@@ -517,15 +468,22 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportWithReceiverLogVerificationBase) {
p.AddRr(kSenderSsrc, 1);
p.AddRb(kSourceSsrc);
p.AddReceiverLog(kSenderSsrc);
- p.AddReceiverFrameLog(kRtpTimestamp, 2, kTimeBaseMs);
- p.AddReceiverEventLog(kDelayDeltaMs, 1, 0);
- p.AddReceiverEventLog(kLostPacketId1, 6, kTimeDelayMs);
+ p.AddReceiverFrameLog(kRtpTimestamp, 3, kTimeBaseMs);
+ p.AddReceiverEventLog(kDelayDeltaMs, FRAME_ACK_SENT, 0);
+ p.AddReceiverEventLog(kLostPacketId1, PACKET_RECEIVED, kTimeDelayMs);
+ p.AddReceiverEventLog(kLostPacketId2, PACKET_RECEIVED, kTimeDelayMs);
+
+ // Adds duplicated receiver event.
+ p.AddReceiverFrameLog(kRtpTimestamp, 3, kTimeBaseMs);
+ p.AddReceiverEventLog(kDelayDeltaMs, FRAME_ACK_SENT, 0);
+ p.AddReceiverEventLog(kLostPacketId1, PACKET_RECEIVED, kTimeDelayMs);
+ p.AddReceiverEventLog(kLostPacketId2, PACKET_RECEIVED, kTimeDelayMs);
EXPECT_CALL(mock_rtt_feedback_,
- OnReceivedDelaySinceLastReport(kSourceSsrc, kLastSr, kDelayLastSr)).
- Times(1);
+ OnReceivedDelaySinceLastReport(
+ kSourceSsrc, kLastSr, kDelayLastSr)).Times(1);
- RtcpParser rtcp_parser(p.Packet(), p.Length());
+ RtcpParser rtcp_parser(p.Data(), p.Length());
rtcp_receiver.IncomingRtcpPacket(&rtcp_parser);
EXPECT_TRUE(cast_log_verification.OnReceivedReceiverLogCalled());
@@ -551,7 +509,7 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportWithReceiverLogVerificationMulti) {
for (int j = 0; j < 100; ++j) {
RtcpReceiverFrameLogMessage frame_log(kRtpTimestamp);
RtcpReceiverEventLogMessage event_log;
- event_log.type = kAckSent;
+ event_log.type = FRAME_ACK_SENT;
event_log.event_timestamp = testing_clock.NowTicks();
event_log.delay_delta = base::TimeDelta::FromMilliseconds(kDelayDeltaMs);
frame_log.event_log_messages_.push_back(event_log);
@@ -566,21 +524,19 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportWithReceiverLogVerificationMulti) {
p.AddRb(kSourceSsrc);
p.AddReceiverLog(kSenderSsrc);
for (int i = 0; i < 100; ++i) {
- p.AddReceiverFrameLog(kRtpTimestamp, 1, kTimeBaseMs + i * kTimeDelayMs);
- p.AddReceiverEventLog(kDelayDeltaMs, 1, 0);
+ p.AddReceiverFrameLog(kRtpTimestamp, 1, kTimeBaseMs + i * kTimeDelayMs);
+ p.AddReceiverEventLog(kDelayDeltaMs, FRAME_ACK_SENT, 0);
}
EXPECT_CALL(mock_rtt_feedback_,
- OnReceivedDelaySinceLastReport(kSourceSsrc, kLastSr, kDelayLastSr)).
- Times(1);
+ OnReceivedDelaySinceLastReport(
+ kSourceSsrc, kLastSr, kDelayLastSr)).Times(1);
- RtcpParser rtcp_parser(p.Packet(), p.Length());
+ RtcpParser rtcp_parser(p.Data(), p.Length());
rtcp_receiver.IncomingRtcpPacket(&rtcp_parser);
EXPECT_TRUE(cast_log_verification.OnReceivedReceiverLogCalled());
}
-
-
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/rtcp/rtcp_sender.cc b/chromium/media/cast/rtcp/rtcp_sender.cc
index b5cf4ce4ced..bf7d30c84c8 100644
--- a/chromium/media/cast/rtcp/rtcp_sender.cc
+++ b/chromium/media/cast/rtcp/rtcp_sender.cc
@@ -4,263 +4,226 @@
#include "media/cast/rtcp/rtcp_sender.h"
+#include <stdint.h>
+
#include <algorithm>
#include <vector>
+#include "base/big_endian.h"
#include "base/logging.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/net/pacing/paced_sender.h"
+#include "media/cast/rtcp/rtcp_defines.h"
#include "media/cast/rtcp/rtcp_utility.h"
-#include "net/base/big_endian.h"
-
-static const size_t kRtcpCastLogHeaderSize = 12;
-static const size_t kRtcpSenderFrameLogSize = 4;
-static const size_t kRtcpReceiverFrameLogSize = 8;
-static const size_t kRtcpReceiverEventLogSize = 4;
+#include "media/cast/transport/cast_transport_defines.h"
+#include "media/cast/transport/pacing/paced_sender.h"
+namespace media {
+namespace cast {
namespace {
+
+// Max delta is 4095 milliseconds because we need to be able to encode it in
+// 12 bits.
+const int64 kMaxWireFormatTimeDeltaMs = INT64_C(0xfff);
+
uint16 MergeEventTypeAndTimestampForWireFormat(
- const media::cast::CastLoggingEvent& event,
+ const CastLoggingEvent& event,
const base::TimeDelta& time_delta) {
int64 time_delta_ms = time_delta.InMilliseconds();
- // Max delta is 4096 milliseconds.
- DCHECK_GE(GG_INT64_C(0xfff), time_delta_ms);
-
- uint16 event_type_and_timestamp_delta =
- static_cast<uint16>(time_delta_ms & 0xfff);
-
- uint16 event_type = 0;
- switch (event) {
- case media::cast::kAckSent:
- event_type = 1;
- break;
- case media::cast::kAudioPlayoutDelay:
- event_type = 2;
- break;
- case media::cast::kAudioFrameDecoded:
- event_type = 3;
- break;
- case media::cast::kVideoFrameDecoded:
- event_type = 4;
- break;
- case media::cast::kVideoRenderDelay:
- event_type = 5;
- break;
- case media::cast::kPacketReceived:
- event_type = 6;
- break;
- default:
- NOTREACHED();
- }
- DCHECK(!(event_type & 0xfff0));
- return (event_type << 12) + event_type_and_timestamp_delta;
-}
-bool ScanRtcpReceiverLogMessage(
- const media::cast::RtcpReceiverLogMessage& receiver_log_message,
- size_t start_size,
- size_t* number_of_frames,
- size_t* total_number_of_messages_to_send,
- size_t* rtcp_log_size) {
- if (receiver_log_message.empty()) return false;
+ DCHECK_GE(time_delta_ms, 0);
+ DCHECK_LE(time_delta_ms, kMaxWireFormatTimeDeltaMs);
- size_t remaining_space = media::cast::kIpPacketSize - start_size;
+ uint16 time_delta_12_bits =
+ static_cast<uint16>(time_delta_ms & kMaxWireFormatTimeDeltaMs);
- // We must have space for at least one message
- DCHECK_GE(remaining_space, kRtcpCastLogHeaderSize +
- kRtcpReceiverFrameLogSize + kRtcpReceiverEventLogSize)
- << "Not enough buffer space";
+ uint16 event_type_4_bits = ConvertEventTypeToWireFormat(event);
+ DCHECK(event_type_4_bits);
+ DCHECK(~(event_type_4_bits & 0xfff0));
+ return (event_type_4_bits << 12) | time_delta_12_bits;
+}
- if (remaining_space < kRtcpCastLogHeaderSize + kRtcpReceiverFrameLogSize +
- kRtcpReceiverEventLogSize) {
- return false;
- }
- // Account for the RTCP header for an application-defined packet.
- remaining_space -= kRtcpCastLogHeaderSize;
+bool EventTimestampLessThan(const RtcpReceiverEventLogMessage& lhs,
+ const RtcpReceiverEventLogMessage& rhs) {
+ return lhs.event_timestamp < rhs.event_timestamp;
+}
- media::cast::RtcpReceiverLogMessage::const_iterator frame_it =
- receiver_log_message.begin();
- for (; frame_it != receiver_log_message.end(); ++frame_it) {
- (*number_of_frames)++;
+void AddReceiverLog(
+ const RtcpReceiverLogMessage& redundancy_receiver_log_message,
+ RtcpReceiverLogMessage* receiver_log_message,
+ size_t* remaining_space,
+ size_t* number_of_frames,
+ size_t* total_number_of_messages_to_send) {
+ RtcpReceiverLogMessage::const_iterator it =
+ redundancy_receiver_log_message.begin();
+ while (it != redundancy_receiver_log_message.end() &&
+ *remaining_space >=
+ kRtcpReceiverFrameLogSize + kRtcpReceiverEventLogSize) {
+ receiver_log_message->push_front(*it);
+ size_t num_event_logs = (*remaining_space - kRtcpReceiverFrameLogSize) /
+ kRtcpReceiverEventLogSize;
+ RtcpReceiverEventLogMessages& event_log_messages =
+ receiver_log_message->front().event_log_messages_;
+ if (num_event_logs < event_log_messages.size())
+ event_log_messages.resize(num_event_logs);
+
+ *remaining_space -= kRtcpReceiverFrameLogSize +
+ event_log_messages.size() * kRtcpReceiverEventLogSize;
+ ++*number_of_frames;
+ *total_number_of_messages_to_send += event_log_messages.size();
+ ++it;
+ }
+}
- remaining_space -= kRtcpReceiverFrameLogSize;
+// A class to build a string representing the NACK list in Cast message.
+//
+// The string will look like "23:3-6 25:1,5-6", meaning packets 3 to 6 in frame
+// 23 are being NACK'ed (i.e. they are missing from the receiver's point of
+// view) and packets 1, 5 and 6 are missing in frame 25. A frame that is
+// completely missing will show as "26:65535".
+class NackStringBuilder {
+ public:
+ NackStringBuilder()
+ : frame_count_(0),
+ packet_count_(0),
+ last_frame_id_(-1),
+ last_packet_id_(-1),
+ contiguous_sequence_(false) {}
+ ~NackStringBuilder() {}
+
+ bool Empty() const { return frame_count_ == 0; }
+
+ void PushFrame(int frame_id) {
+ DCHECK_GE(frame_id, 0);
+ if (frame_count_ > 0) {
+ if (frame_id == last_frame_id_) {
+ return;
+ }
+ if (contiguous_sequence_) {
+ stream_ << "-" << last_packet_id_;
+ }
+ stream_ << ", ";
+ }
+ stream_ << frame_id;
+ last_frame_id_ = frame_id;
+ packet_count_ = 0;
+ contiguous_sequence_ = false;
+ ++frame_count_;
+ }
- size_t messages_in_frame = frame_it->event_log_messages_.size();
- size_t remaining_space_in_messages =
- remaining_space / kRtcpReceiverEventLogSize;
- size_t messages_to_send = std::min(messages_in_frame,
- remaining_space_in_messages);
- if (messages_to_send > media::cast::kRtcpMaxReceiverLogMessages) {
- // We can't send more than 256 messages.
- remaining_space -= media::cast::kRtcpMaxReceiverLogMessages *
- kRtcpReceiverEventLogSize;
- *total_number_of_messages_to_send +=
- media::cast::kRtcpMaxReceiverLogMessages;
- break;
+ void PushPacket(int packet_id) {
+ DCHECK_GE(last_frame_id_, 0);
+ DCHECK_GE(packet_id, 0);
+ if (packet_count_ == 0) {
+ stream_ << ":" << packet_id;
+ } else if (packet_id == last_packet_id_ + 1) {
+ contiguous_sequence_ = true;
+ } else {
+ if (contiguous_sequence_) {
+ stream_ << "-" << last_packet_id_;
+ contiguous_sequence_ = false;
+ }
+ stream_ << "," << packet_id;
}
- remaining_space -= messages_to_send * kRtcpReceiverEventLogSize;
- *total_number_of_messages_to_send += messages_to_send;
+ ++packet_count_;
+ last_packet_id_ = packet_id;
+ }
- if (remaining_space <
- kRtcpReceiverFrameLogSize + kRtcpReceiverEventLogSize) {
- // Make sure that we have room for at least one more message.
- break;
+ std::string GetString() {
+ if (contiguous_sequence_) {
+ stream_ << "-" << last_packet_id_;
+ contiguous_sequence_ = false;
}
+ return stream_.str();
}
- *rtcp_log_size = kRtcpCastLogHeaderSize +
- *number_of_frames * kRtcpReceiverFrameLogSize +
- *total_number_of_messages_to_send * kRtcpReceiverEventLogSize;
- DCHECK_GE(media::cast::kIpPacketSize,
- start_size + *rtcp_log_size) << "Not enough buffer space";
- VLOG(1) << "number of frames " << *number_of_frames;
- VLOG(1) << "total messages to send " << *total_number_of_messages_to_send;
- VLOG(1) << "rtcp log size " << *rtcp_log_size;
- return true;
-}
+ private:
+ std::ostringstream stream_;
+ int frame_count_;
+ int packet_count_;
+ int last_frame_id_;
+ int last_packet_id_;
+ bool contiguous_sequence_;
+};
} // namespace
-namespace media {
-namespace cast {
-
+// TODO(mikhal): This is only used by the receiver. Consider renaming.
RtcpSender::RtcpSender(scoped_refptr<CastEnvironment> cast_environment,
- PacedPacketSender* outgoing_transport,
+ transport::PacedPacketSender* outgoing_transport,
uint32 sending_ssrc,
const std::string& c_name)
- : ssrc_(sending_ssrc),
- c_name_(c_name),
- transport_(outgoing_transport),
- cast_environment_(cast_environment) {
+ : ssrc_(sending_ssrc),
+ c_name_(c_name),
+ transport_(outgoing_transport),
+ cast_environment_(cast_environment) {
DCHECK_LT(c_name_.length(), kRtcpCnameSize) << "Invalid config";
}
RtcpSender::~RtcpSender() {}
-void RtcpSender::SendRtcpFromRtpSender(uint32 packet_type_flags,
- const RtcpSenderInfo* sender_info,
- const RtcpDlrrReportBlock* dlrr,
- RtcpSenderLogMessage* sender_log) {
- if (packet_type_flags & kRtcpRr ||
- packet_type_flags & kRtcpPli ||
- packet_type_flags & kRtcpRrtr ||
- packet_type_flags & kRtcpCast ||
- packet_type_flags & kRtcpReceiverLog ||
- packet_type_flags & kRtcpRpsi ||
- packet_type_flags & kRtcpRemb ||
- packet_type_flags & kRtcpNack) {
- NOTREACHED() << "Invalid argument";
- }
-
- std::vector<uint8> packet;
- packet.reserve(kIpPacketSize);
- if (packet_type_flags & kRtcpSr) {
- DCHECK(sender_info) << "Invalid argument";
- BuildSR(*sender_info, NULL, &packet);
- BuildSdec(&packet);
- }
- if (packet_type_flags & kRtcpBye) {
- BuildBye(&packet);
- }
- if (packet_type_flags & kRtcpDlrr) {
- DCHECK(dlrr) << "Invalid argument";
- BuildDlrrRb(dlrr, &packet);
- }
- if (packet_type_flags & kRtcpSenderLog) {
- DCHECK(sender_log) << "Invalid argument";
- BuildSenderLog(sender_log, &packet);
- }
- if (packet.empty())
- return; // Sanity don't send empty packets.
-
- transport_->SendRtcpPacket(packet);
-}
-
void RtcpSender::SendRtcpFromRtpReceiver(
uint32 packet_type_flags,
- const RtcpReportBlock* report_block,
+ const transport::RtcpReportBlock* report_block,
const RtcpReceiverReferenceTimeReport* rrtr,
const RtcpCastMessage* cast_message,
- RtcpReceiverLogMessage* receiver_log) {
- if (packet_type_flags & kRtcpSr ||
- packet_type_flags & kRtcpDlrr ||
- packet_type_flags & kRtcpSenderLog) {
+ const ReceiverRtcpEventSubscriber::RtcpEventMultiMap* rtcp_events,
+ uint16 target_delay_ms) {
+ if (packet_type_flags & transport::kRtcpSr ||
+ packet_type_flags & transport::kRtcpDlrr ||
+ packet_type_flags & transport::kRtcpSenderLog) {
NOTREACHED() << "Invalid argument";
}
- if (packet_type_flags & kRtcpPli ||
- packet_type_flags & kRtcpRpsi ||
- packet_type_flags & kRtcpRemb ||
- packet_type_flags & kRtcpNack) {
+ if (packet_type_flags & transport::kRtcpPli ||
+ packet_type_flags & transport::kRtcpRpsi ||
+ packet_type_flags & transport::kRtcpRemb ||
+ packet_type_flags & transport::kRtcpNack) {
// Implement these for webrtc interop.
NOTIMPLEMENTED();
}
- std::vector<uint8> packet;
- packet.reserve(kIpPacketSize);
+ transport::PacketRef packet(new base::RefCountedData<Packet>);
+ packet->data.reserve(kMaxIpPacketSize);
- if (packet_type_flags & kRtcpRr) {
- BuildRR(report_block, &packet);
+ if (packet_type_flags & transport::kRtcpRr) {
+ BuildRR(report_block, &packet->data);
if (!c_name_.empty()) {
- BuildSdec(&packet);
+ BuildSdec(&packet->data);
}
}
- if (packet_type_flags & kRtcpBye) {
- BuildBye(&packet);
+ if (packet_type_flags & transport::kRtcpBye) {
+ BuildBye(&packet->data);
}
- if (packet_type_flags & kRtcpRrtr) {
+ if (packet_type_flags & transport::kRtcpRrtr) {
DCHECK(rrtr) << "Invalid argument";
- BuildRrtr(rrtr, &packet);
+ BuildRrtr(rrtr, &packet->data);
}
- if (packet_type_flags & kRtcpCast) {
+ if (packet_type_flags & transport::kRtcpCast) {
DCHECK(cast_message) << "Invalid argument";
- BuildCast(cast_message, &packet);
+ BuildCast(cast_message, target_delay_ms, &packet->data);
}
- if (packet_type_flags & kRtcpReceiverLog) {
- DCHECK(receiver_log) << "Invalid argument";
- BuildReceiverLog(receiver_log, &packet);
+ if (packet_type_flags & transport::kRtcpReceiverLog) {
+ DCHECK(rtcp_events) << "Invalid argument";
+ BuildReceiverLog(*rtcp_events, &packet->data);
}
- if (packet.empty()) return; // Sanity don't send empty packets.
- transport_->SendRtcpPacket(packet);
-}
-
-void RtcpSender::BuildSR(const RtcpSenderInfo& sender_info,
- const RtcpReportBlock* report_block,
- std::vector<uint8>* packet) const {
- // Sender report.
- size_t start_size = packet->size();
- DCHECK_LT(start_size + 52, kIpPacketSize) << "Not enough buffer space";
- if (start_size + 52 > kIpPacketSize) return;
-
- uint16 number_of_rows = (report_block) ? 12 : 6;
- packet->resize(start_size + 28);
-
- net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 28);
- big_endian_writer.WriteU8(0x80 + (report_block ? 1 : 0));
- big_endian_writer.WriteU8(kPacketTypeSenderReport);
- big_endian_writer.WriteU16(number_of_rows);
- big_endian_writer.WriteU32(ssrc_);
- big_endian_writer.WriteU32(sender_info.ntp_seconds);
- big_endian_writer.WriteU32(sender_info.ntp_fraction);
- big_endian_writer.WriteU32(sender_info.rtp_timestamp);
- big_endian_writer.WriteU32(sender_info.send_packet_count);
- big_endian_writer.WriteU32(static_cast<uint32>(sender_info.send_octet_count));
+ if (packet->data.empty())
+ return; // Sanity don't send empty packets.
- if (report_block) {
- AddReportBlocks(*report_block, packet); // Adds 24 bytes.
- }
+ transport_->SendRtcpPacket(ssrc_, packet);
}
-void RtcpSender::BuildRR(const RtcpReportBlock* report_block,
- std::vector<uint8>* packet) const {
+void RtcpSender::BuildRR(const transport::RtcpReportBlock* report_block,
+ Packet* packet) const {
size_t start_size = packet->size();
- DCHECK_LT(start_size + 32, kIpPacketSize) << "Not enough buffer space";
- if (start_size + 32 > kIpPacketSize) return;
+ DCHECK_LT(start_size + 32, kMaxIpPacketSize) << "Not enough buffer space";
+ if (start_size + 32 > kMaxIpPacketSize)
+ return;
uint16 number_of_rows = (report_block) ? 7 : 1;
packet->resize(start_size + 8);
- net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 8);
+ base::BigEndianWriter big_endian_writer(
+ reinterpret_cast<char*>(&((*packet)[start_size])), 8);
big_endian_writer.WriteU8(0x80 + (report_block ? 1 : 0));
- big_endian_writer.WriteU8(kPacketTypeReceiverReport);
+ big_endian_writer.WriteU8(transport::kPacketTypeReceiverReport);
big_endian_writer.WriteU16(number_of_rows);
big_endian_writer.WriteU32(ssrc_);
@@ -269,15 +232,17 @@ void RtcpSender::BuildRR(const RtcpReportBlock* report_block,
}
}
-void RtcpSender::AddReportBlocks(const RtcpReportBlock& report_block,
- std::vector<uint8>* packet) const {
+void RtcpSender::AddReportBlocks(const transport::RtcpReportBlock& report_block,
+ Packet* packet) const {
size_t start_size = packet->size();
- DCHECK_LT(start_size + 24, kIpPacketSize) << "Not enough buffer space";
- if (start_size + 24 > kIpPacketSize) return;
+ DCHECK_LT(start_size + 24, kMaxIpPacketSize) << "Not enough buffer space";
+ if (start_size + 24 > kMaxIpPacketSize)
+ return;
packet->resize(start_size + 24);
- net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 24);
+ base::BigEndianWriter big_endian_writer(
+ reinterpret_cast<char*>(&((*packet)[start_size])), 24);
big_endian_writer.WriteU32(report_block.media_ssrc);
big_endian_writer.WriteU8(report_block.fraction_lost);
big_endian_writer.WriteU8(report_block.cumulative_lost >> 16);
@@ -297,30 +262,32 @@ void RtcpSender::AddReportBlocks(const RtcpReportBlock& report_block,
big_endian_writer.WriteU32(report_block.delay_since_last_sr);
}
-void RtcpSender::BuildSdec(std::vector<uint8>* packet) const {
+void RtcpSender::BuildSdec(Packet* packet) const {
size_t start_size = packet->size();
- DCHECK_LT(start_size + 12 + c_name_.length(), kIpPacketSize)
+ DCHECK_LT(start_size + 12 + c_name_.length(), kMaxIpPacketSize)
<< "Not enough buffer space";
- if (start_size + 12 > kIpPacketSize) return;
+ if (start_size + 12 > kMaxIpPacketSize)
+ return;
// SDES Source Description.
packet->resize(start_size + 10);
- net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 10);
+ base::BigEndianWriter big_endian_writer(
+ reinterpret_cast<char*>(&((*packet)[start_size])), 10);
// We always need to add one SDES CNAME.
big_endian_writer.WriteU8(0x80 + 1);
- big_endian_writer.WriteU8(kPacketTypeSdes);
+ big_endian_writer.WriteU8(transport::kPacketTypeSdes);
// Handle SDES length later on.
uint32 sdes_length_position = static_cast<uint32>(start_size) + 3;
big_endian_writer.WriteU16(0);
big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
- big_endian_writer.WriteU8(1); // CNAME = 1
+ big_endian_writer.WriteU8(1); // CNAME = 1
big_endian_writer.WriteU8(static_cast<uint8>(c_name_.length()));
size_t sdes_length = 10 + c_name_.length();
- packet->insert(packet->end(), c_name_.c_str(),
- c_name_.c_str() + c_name_.length());
+ packet->insert(
+ packet->end(), c_name_.c_str(), c_name_.c_str() + c_name_.length());
size_t padding = 0;
@@ -340,20 +307,21 @@ void RtcpSender::BuildSdec(std::vector<uint8>* packet) const {
(*packet)[sdes_length_position] = buffer_length;
}
-void RtcpSender::BuildPli(uint32 remote_ssrc,
- std::vector<uint8>* packet) const {
+void RtcpSender::BuildPli(uint32 remote_ssrc, Packet* packet) const {
size_t start_size = packet->size();
- DCHECK_LT(start_size + 12, kIpPacketSize) << "Not enough buffer space";
- if (start_size + 12 > kIpPacketSize) return;
+ DCHECK_LT(start_size + 12, kMaxIpPacketSize) << "Not enough buffer space";
+ if (start_size + 12 > kMaxIpPacketSize)
+ return;
packet->resize(start_size + 12);
- net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 12);
+ base::BigEndianWriter big_endian_writer(
+ reinterpret_cast<char*>(&((*packet)[start_size])), 12);
uint8 FMT = 1; // Picture loss indicator.
big_endian_writer.WriteU8(0x80 + FMT);
- big_endian_writer.WriteU8(kPacketTypePayloadSpecific);
- big_endian_writer.WriteU16(2); // Used fixed length of 2.
- big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
+ big_endian_writer.WriteU8(transport::kPacketTypePayloadSpecific);
+ big_endian_writer.WriteU16(2); // Used fixed length of 2.
+ big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
big_endian_writer.WriteU32(remote_ssrc); // Add the remote SSRC.
}
@@ -366,18 +334,19 @@ void RtcpSender::BuildPli(uint32 remote_ssrc,
| defined per codec ... | Padding (0) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
-void RtcpSender::BuildRpsi(const RtcpRpsiMessage* rpsi,
- std::vector<uint8>* packet) const {
+void RtcpSender::BuildRpsi(const RtcpRpsiMessage* rpsi, Packet* packet) const {
size_t start_size = packet->size();
- DCHECK_LT(start_size + 24, kIpPacketSize) << "Not enough buffer space";
- if (start_size + 24 > kIpPacketSize) return;
+ DCHECK_LT(start_size + 24, kMaxIpPacketSize) << "Not enough buffer space";
+ if (start_size + 24 > kMaxIpPacketSize)
+ return;
packet->resize(start_size + 24);
- net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 24);
+ base::BigEndianWriter big_endian_writer(
+ reinterpret_cast<char*>(&((*packet)[start_size])), 24);
uint8 FMT = 3; // Reference Picture Selection Indication.
big_endian_writer.WriteU8(0x80 + FMT);
- big_endian_writer.WriteU8(kPacketTypePayloadSpecific);
+ big_endian_writer.WriteU8(transport::kPacketTypePayloadSpecific);
// Calculate length.
uint32 bits_required = 7;
@@ -407,8 +376,8 @@ void RtcpSender::BuildRpsi(const RtcpRpsiMessage* rpsi,
// Add picture ID.
for (int i = bytes_required - 1; i > 0; i--) {
- big_endian_writer.WriteU8(
- 0x80 | static_cast<uint8>(rpsi->picture_id >> (i * 7)));
+ big_endian_writer.WriteU8(0x80 |
+ static_cast<uint8>(rpsi->picture_id >> (i * 7)));
}
// Add last byte of picture ID.
big_endian_writer.WriteU8(static_cast<uint8>(rpsi->picture_id & 0x7f));
@@ -419,38 +388,38 @@ void RtcpSender::BuildRpsi(const RtcpRpsiMessage* rpsi,
}
}
-void RtcpSender::BuildRemb(const RtcpRembMessage* remb,
- std::vector<uint8>* packet) const {
+void RtcpSender::BuildRemb(const RtcpRembMessage* remb, Packet* packet) const {
size_t start_size = packet->size();
size_t remb_size = 20 + 4 * remb->remb_ssrcs.size();
- DCHECK_LT(start_size + remb_size, kIpPacketSize)
+ DCHECK_LT(start_size + remb_size, kMaxIpPacketSize)
<< "Not enough buffer space";
- if (start_size + remb_size > kIpPacketSize) return;
+ if (start_size + remb_size > kMaxIpPacketSize)
+ return;
packet->resize(start_size + remb_size);
- net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), remb_size);
+ base::BigEndianWriter big_endian_writer(
+ reinterpret_cast<char*>(&((*packet)[start_size])), remb_size);
// Add application layer feedback.
uint8 FMT = 15;
big_endian_writer.WriteU8(0x80 + FMT);
- big_endian_writer.WriteU8(kPacketTypePayloadSpecific);
+ big_endian_writer.WriteU8(transport::kPacketTypePayloadSpecific);
big_endian_writer.WriteU8(0);
big_endian_writer.WriteU8(static_cast<uint8>(remb->remb_ssrcs.size() + 4));
big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
- big_endian_writer.WriteU32(0); // Remote SSRC must be 0.
+ big_endian_writer.WriteU32(0); // Remote SSRC must be 0.
big_endian_writer.WriteU32(kRemb);
big_endian_writer.WriteU8(static_cast<uint8>(remb->remb_ssrcs.size()));
// 6 bit exponent and a 18 bit mantissa.
uint8 bitrate_exponent;
uint32 bitrate_mantissa;
- BitrateToRembExponentBitrate(remb->remb_bitrate,
- &bitrate_exponent,
- &bitrate_mantissa);
+ BitrateToRembExponentBitrate(
+ remb->remb_bitrate, &bitrate_exponent, &bitrate_mantissa);
- big_endian_writer.WriteU8(static_cast<uint8>((bitrate_exponent << 2) +
- ((bitrate_mantissa >> 16) & 0x03)));
+ big_endian_writer.WriteU8(static_cast<uint8>(
+ (bitrate_exponent << 2) + ((bitrate_mantissa >> 16) & 0x03)));
big_endian_writer.WriteU8(static_cast<uint8>(bitrate_mantissa >> 8));
big_endian_writer.WriteU8(static_cast<uint8>(bitrate_mantissa));
@@ -458,34 +427,33 @@ void RtcpSender::BuildRemb(const RtcpRembMessage* remb,
for (; it != remb->remb_ssrcs.end(); ++it) {
big_endian_writer.WriteU32(*it);
}
- cast_environment_->Logging()->InsertGenericEvent(kRembBitrate,
- remb->remb_bitrate);
}
-void RtcpSender::BuildNack(const RtcpNackMessage* nack,
- std::vector<uint8>* packet) const {
+void RtcpSender::BuildNack(const RtcpNackMessage* nack, Packet* packet) const {
size_t start_size = packet->size();
- DCHECK_LT(start_size + 16, kIpPacketSize) << "Not enough buffer space";
- if (start_size + 16 > kIpPacketSize) return;
+ DCHECK_LT(start_size + 16, kMaxIpPacketSize) << "Not enough buffer space";
+ if (start_size + 16 > kMaxIpPacketSize)
+ return;
packet->resize(start_size + 16);
- net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 16);
+ base::BigEndianWriter big_endian_writer(
+ reinterpret_cast<char*>(&((*packet)[start_size])), 16);
uint8 FMT = 1;
big_endian_writer.WriteU8(0x80 + FMT);
- big_endian_writer.WriteU8(kPacketTypeGenericRtpFeedback);
+ big_endian_writer.WriteU8(transport::kPacketTypeGenericRtpFeedback);
big_endian_writer.WriteU8(0);
size_t nack_size_pos = start_size + 3;
big_endian_writer.WriteU8(3);
- big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
+ big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
big_endian_writer.WriteU32(nack->remote_ssrc); // Add the remote SSRC.
// Build NACK bitmasks and write them to the Rtcp message.
// The nack list should be sorted and not contain duplicates.
size_t number_of_nack_fields = 0;
- size_t max_number_of_nack_fields = std::min<size_t>(kRtcpMaxNackFields,
- (kIpPacketSize - packet->size()) / 4);
+ size_t max_number_of_nack_fields = std::min<size_t>(
+ kRtcpMaxNackFields, (kMaxIpPacketSize - packet->size()) / 4);
std::list<uint16>::const_iterator it = nack->nack_list.begin();
while (it != nack->nack_list.end() &&
@@ -504,11 +472,13 @@ void RtcpSender::BuildNack(const RtcpNackMessage* nack,
}
// Write the sequence number and the bitmask to the packet.
start_size = packet->size();
- DCHECK_LT(start_size + 4, kIpPacketSize) << "Not enough buffer space";
- if (start_size + 4 > kIpPacketSize) return;
+ DCHECK_LT(start_size + 4, kMaxIpPacketSize) << "Not enough buffer space";
+ if (start_size + 4 > kMaxIpPacketSize)
+ return;
packet->resize(start_size + 4);
- net::BigEndianWriter big_endian_nack_writer(&((*packet)[start_size]), 4);
+ base::BigEndianWriter big_endian_nack_writer(
+ reinterpret_cast<char*>(&((*packet)[start_size])), 4);
big_endian_nack_writer.WriteU16(nack_sequence_number);
big_endian_nack_writer.WriteU16(bitmask);
number_of_nack_fields++;
@@ -517,75 +487,41 @@ void RtcpSender::BuildNack(const RtcpNackMessage* nack,
(*packet)[nack_size_pos] = static_cast<uint8>(2 + number_of_nack_fields);
}
-void RtcpSender::BuildBye(std::vector<uint8>* packet) const {
+void RtcpSender::BuildBye(Packet* packet) const {
size_t start_size = packet->size();
- DCHECK_LT(start_size + 8, kIpPacketSize) << "Not enough buffer space";
- if (start_size + 8 > kIpPacketSize) return;
+ DCHECK_LT(start_size + 8, kMaxIpPacketSize) << "Not enough buffer space";
+ if (start_size + 8 > kMaxIpPacketSize)
+ return;
packet->resize(start_size + 8);
- net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 8);
+ base::BigEndianWriter big_endian_writer(
+ reinterpret_cast<char*>(&((*packet)[start_size])), 8);
big_endian_writer.WriteU8(0x80 + 1);
- big_endian_writer.WriteU8(kPacketTypeBye);
- big_endian_writer.WriteU16(1); // Length.
- big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
-}
-
-/*
- 0 1 2 3
- 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- |V=2|P|reserved | PT=XR=207 | length |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | SSRC |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | BT=5 | reserved | block length |
- +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
- | SSRC_1 (SSRC of first receiver) | sub-
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ block
- | last RR (LRR) | 1
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | delay since last RR (DLRR) |
- +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
-*/
-void RtcpSender::BuildDlrrRb(const RtcpDlrrReportBlock* dlrr,
- std::vector<uint8>* packet) const {
- size_t start_size = packet->size();
- DCHECK_LT(start_size + 24, kIpPacketSize) << "Not enough buffer space";
- if (start_size + 24 > kIpPacketSize) return;
-
- packet->resize(start_size + 24);
-
- net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 24);
- big_endian_writer.WriteU8(0x80);
- big_endian_writer.WriteU8(kPacketTypeXr);
- big_endian_writer.WriteU16(5); // Length.
+ big_endian_writer.WriteU8(transport::kPacketTypeBye);
+ big_endian_writer.WriteU16(1); // Length.
big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
- big_endian_writer.WriteU8(5); // Add block type.
- big_endian_writer.WriteU8(0); // Add reserved.
- big_endian_writer.WriteU16(3); // Block length.
- big_endian_writer.WriteU32(ssrc_); // Add the media (received RTP) SSRC.
- big_endian_writer.WriteU32(dlrr->last_rr);
- big_endian_writer.WriteU32(dlrr->delay_since_last_rr);
}
void RtcpSender::BuildRrtr(const RtcpReceiverReferenceTimeReport* rrtr,
- std::vector<uint8>* packet) const {
+ Packet* packet) const {
size_t start_size = packet->size();
- DCHECK_LT(start_size + 20, kIpPacketSize) << "Not enough buffer space";
- if (start_size + 20 > kIpPacketSize) return;
+ DCHECK_LT(start_size + 20, kMaxIpPacketSize) << "Not enough buffer space";
+ if (start_size + 20 > kMaxIpPacketSize)
+ return;
packet->resize(start_size + 20);
- net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 20);
+ base::BigEndianWriter big_endian_writer(
+ reinterpret_cast<char*>(&((*packet)[start_size])), 20);
big_endian_writer.WriteU8(0x80);
- big_endian_writer.WriteU8(kPacketTypeXr);
- big_endian_writer.WriteU16(4); // Length.
+ big_endian_writer.WriteU8(transport::kPacketTypeXr);
+ big_endian_writer.WriteU16(4); // Length.
big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
- big_endian_writer.WriteU8(4); // Add block type.
- big_endian_writer.WriteU8(0); // Add reserved.
- big_endian_writer.WriteU16(2); // Block length.
+ big_endian_writer.WriteU8(4); // Add block type.
+ big_endian_writer.WriteU8(0); // Add reserved.
+ big_endian_writer.WriteU16(2); // Block length.
// Add the media (received RTP) SSRC.
big_endian_writer.WriteU32(rrtr->ntp_seconds);
@@ -593,47 +529,54 @@ void RtcpSender::BuildRrtr(const RtcpReceiverReferenceTimeReport* rrtr,
}
void RtcpSender::BuildCast(const RtcpCastMessage* cast,
- std::vector<uint8>* packet) const {
+ uint16 target_delay_ms,
+ Packet* packet) const {
size_t start_size = packet->size();
- DCHECK_LT(start_size + 20, kIpPacketSize) << "Not enough buffer space";
- if (start_size + 20 > kIpPacketSize) return;
+ DCHECK_LT(start_size + 20, kMaxIpPacketSize) << "Not enough buffer space";
+ if (start_size + 20 > kMaxIpPacketSize)
+ return;
packet->resize(start_size + 20);
- net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 20);
+ base::BigEndianWriter big_endian_writer(
+ reinterpret_cast<char*>(&((*packet)[start_size])), 20);
uint8 FMT = 15; // Application layer feedback.
big_endian_writer.WriteU8(0x80 + FMT);
- big_endian_writer.WriteU8(kPacketTypePayloadSpecific);
+ big_endian_writer.WriteU8(transport::kPacketTypePayloadSpecific);
big_endian_writer.WriteU8(0);
- size_t cast_size_pos = start_size + 3; // Save length position.
+ size_t cast_size_pos = start_size + 3; // Save length position.
big_endian_writer.WriteU8(4);
- big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
+ big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
big_endian_writer.WriteU32(cast->media_ssrc_); // Remote SSRC.
big_endian_writer.WriteU32(kCast);
big_endian_writer.WriteU8(static_cast<uint8>(cast->ack_frame_id_));
size_t cast_loss_field_pos = start_size + 17; // Save loss field position.
big_endian_writer.WriteU8(0); // Overwritten with number_of_loss_fields.
- big_endian_writer.WriteU8(0); // Reserved.
- big_endian_writer.WriteU8(0); // Reserved.
+ big_endian_writer.WriteU16(target_delay_ms);
size_t number_of_loss_fields = 0;
- size_t max_number_of_loss_fields = std::min<size_t>(kRtcpMaxCastLossFields,
- (kIpPacketSize - packet->size()) / 4);
+ size_t max_number_of_loss_fields = std::min<size_t>(
+ kRtcpMaxCastLossFields, (kMaxIpPacketSize - packet->size()) / 4);
MissingFramesAndPacketsMap::const_iterator frame_it =
cast->missing_frames_and_packets_.begin();
+ NackStringBuilder nack_string_builder;
for (; frame_it != cast->missing_frames_and_packets_.end() &&
- number_of_loss_fields < max_number_of_loss_fields; ++frame_it) {
+ number_of_loss_fields < max_number_of_loss_fields;
+ ++frame_it) {
+ nack_string_builder.PushFrame(frame_it->first);
// Iterate through all frames with missing packets.
if (frame_it->second.empty()) {
// Special case all packets in a frame is missing.
start_size = packet->size();
packet->resize(start_size + 4);
- net::BigEndianWriter big_endian_nack_writer(&((*packet)[start_size]), 4);
+ base::BigEndianWriter big_endian_nack_writer(
+ reinterpret_cast<char*>(&((*packet)[start_size])), 4);
big_endian_nack_writer.WriteU8(static_cast<uint8>(frame_it->first));
big_endian_nack_writer.WriteU16(kRtcpCastAllPacketsLost);
big_endian_nack_writer.WriteU8(0);
+ nack_string_builder.PushPacket(kRtcpCastAllPacketsLost);
++number_of_loss_fields;
} else {
PacketIdSet::const_iterator packet_it = frame_it->second.begin();
@@ -642,18 +585,20 @@ void RtcpSender::BuildCast(const RtcpCastMessage* cast,
start_size = packet->size();
packet->resize(start_size + 4);
- net::BigEndianWriter big_endian_nack_writer(
- &((*packet)[start_size]), 4);
+ base::BigEndianWriter big_endian_nack_writer(
+ reinterpret_cast<char*>(&((*packet)[start_size])), 4);
// Write frame and packet id to buffer before calculating bitmask.
big_endian_nack_writer.WriteU8(static_cast<uint8>(frame_it->first));
big_endian_nack_writer.WriteU16(packet_id);
+ nack_string_builder.PushPacket(packet_id);
uint8 bitmask = 0;
++packet_it;
while (packet_it != frame_it->second.end()) {
int shift = static_cast<uint8>(*packet_it - packet_id) - 1;
if (shift >= 0 && shift <= 7) {
+ nack_string_builder.PushPacket(*packet_it);
bitmask |= (1 << shift);
++packet_it;
} else {
@@ -665,80 +610,48 @@ void RtcpSender::BuildCast(const RtcpCastMessage* cast,
}
}
}
+ VLOG_IF(1, !nack_string_builder.Empty())
+ << "SSRC: " << cast->media_ssrc_
+ << ", ACK: " << cast->ack_frame_id_
+ << ", NACK: " << nack_string_builder.GetString();
DCHECK_LE(number_of_loss_fields, kRtcpMaxCastLossFields);
(*packet)[cast_size_pos] = static_cast<uint8>(4 + number_of_loss_fields);
(*packet)[cast_loss_field_pos] = static_cast<uint8>(number_of_loss_fields);
}
-void RtcpSender::BuildSenderLog(RtcpSenderLogMessage* sender_log_message,
- std::vector<uint8>* packet) const {
- DCHECK(sender_log_message);
- DCHECK(packet);
- size_t start_size = packet->size();
- size_t remaining_space = kIpPacketSize - start_size;
- DCHECK_GE(remaining_space, kRtcpCastLogHeaderSize + kRtcpSenderFrameLogSize)
- << "Not enough buffer space";
- if (remaining_space < kRtcpCastLogHeaderSize + kRtcpSenderFrameLogSize)
- return;
-
- size_t space_for_x_messages =
- (remaining_space - kRtcpCastLogHeaderSize) / kRtcpSenderFrameLogSize;
- size_t number_of_messages = std::min(space_for_x_messages,
- sender_log_message->size());
-
- size_t log_size = kRtcpCastLogHeaderSize +
- number_of_messages * kRtcpSenderFrameLogSize;
- packet->resize(start_size + log_size);
-
- net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), log_size);
- big_endian_writer.WriteU8(0x80 + kSenderLogSubtype);
- big_endian_writer.WriteU8(kPacketTypeApplicationDefined);
- big_endian_writer.WriteU16(static_cast<uint16>(2 + number_of_messages));
- big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
- big_endian_writer.WriteU32(kCast);
-
- for (; number_of_messages > 0; --number_of_messages) {
- DCHECK(!sender_log_message->empty());
- const RtcpSenderFrameLogMessage& message = sender_log_message->front();
- big_endian_writer.WriteU8(static_cast<uint8>(message.frame_status));
- // We send the 24 east significant bits of the RTP timestamp.
- big_endian_writer.WriteU8(static_cast<uint8>(message.rtp_timestamp >> 16));
- big_endian_writer.WriteU8(static_cast<uint8>(message.rtp_timestamp >> 8));
- big_endian_writer.WriteU8(static_cast<uint8>(message.rtp_timestamp));
- sender_log_message->pop_front();
- }
-}
-
-void RtcpSender::BuildReceiverLog(RtcpReceiverLogMessage* receiver_log_message,
- std::vector<uint8>* packet) const {
- DCHECK(receiver_log_message);
+void RtcpSender::BuildReceiverLog(
+ const ReceiverRtcpEventSubscriber::RtcpEventMultiMap& rtcp_events,
+ Packet* packet) {
const size_t packet_start_size = packet->size();
size_t number_of_frames = 0;
size_t total_number_of_messages_to_send = 0;
size_t rtcp_log_size = 0;
-
- if (!ScanRtcpReceiverLogMessage(*receiver_log_message,
- packet_start_size,
- &number_of_frames,
- &total_number_of_messages_to_send,
- &rtcp_log_size)) {
+ RtcpReceiverLogMessage receiver_log_message;
+
+ if (!BuildRtcpReceiverLogMessage(rtcp_events,
+ packet_start_size,
+ &receiver_log_message,
+ &number_of_frames,
+ &total_number_of_messages_to_send,
+ &rtcp_log_size)) {
return;
}
packet->resize(packet_start_size + rtcp_log_size);
- net::BigEndianWriter big_endian_writer(&((*packet)[packet_start_size]),
- rtcp_log_size);
+ base::BigEndianWriter big_endian_writer(
+ reinterpret_cast<char*>(&((*packet)[packet_start_size])), rtcp_log_size);
big_endian_writer.WriteU8(0x80 + kReceiverLogSubtype);
- big_endian_writer.WriteU8(kPacketTypeApplicationDefined);
- big_endian_writer.WriteU16(static_cast<uint16>(2 + 2 * number_of_frames +
- total_number_of_messages_to_send));
+ big_endian_writer.WriteU8(transport::kPacketTypeApplicationDefined);
+ big_endian_writer.WriteU16(static_cast<uint16>(
+ 2 + 2 * number_of_frames + total_number_of_messages_to_send));
big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
big_endian_writer.WriteU32(kCast);
- while (!receiver_log_message->empty() &&
+ while (!receiver_log_message.empty() &&
total_number_of_messages_to_send > 0) {
- RtcpReceiverFrameLogMessage& frame_log_messages =
- receiver_log_message->front();
+ RtcpReceiverFrameLogMessage& frame_log_messages(
+ receiver_log_message.front());
+
// Add our frame header.
big_endian_writer.WriteU32(frame_log_messages.rtp_timestamp_);
size_t messages_in_frame = frame_log_messages.event_log_messages_.size();
@@ -765,19 +678,18 @@ void RtcpSender::BuildReceiverLog(RtcpReceiverLogMessage* receiver_log_message,
const RtcpReceiverEventLogMessage& event_message =
frame_log_messages.event_log_messages_.front();
uint16 event_type_and_timestamp_delta =
- MergeEventTypeAndTimestampForWireFormat(event_message.type,
- event_message.event_timestamp - event_timestamp_base);
+ MergeEventTypeAndTimestampForWireFormat(
+ event_message.type,
+ event_message.event_timestamp - event_timestamp_base);
switch (event_message.type) {
- case kAckSent:
- case kAudioPlayoutDelay:
- case kAudioFrameDecoded:
- case kVideoFrameDecoded:
- case kVideoRenderDelay:
- big_endian_writer.WriteU16(static_cast<uint16>(
- event_message.delay_delta.InMilliseconds()));
+ case FRAME_ACK_SENT:
+ case FRAME_PLAYOUT:
+ case FRAME_DECODED:
+ big_endian_writer.WriteU16(
+ static_cast<uint16>(event_message.delay_delta.InMilliseconds()));
big_endian_writer.WriteU16(event_type_and_timestamp_delta);
break;
- case kPacketReceived:
+ case PACKET_RECEIVED:
big_endian_writer.WriteU16(event_message.packet_id);
big_endian_writer.WriteU16(event_type_and_timestamp_delta);
break;
@@ -789,10 +701,124 @@ void RtcpSender::BuildReceiverLog(RtcpReceiverLogMessage* receiver_log_message,
}
if (frame_log_messages.event_log_messages_.empty()) {
// We sent all messages on this frame; pop the frame header.
- receiver_log_message->pop_front();
+ receiver_log_message.pop_front();
}
}
- DCHECK_EQ(total_number_of_messages_to_send, 0);
+ DCHECK_EQ(total_number_of_messages_to_send, 0u);
+}
+
+bool RtcpSender::BuildRtcpReceiverLogMessage(
+ const ReceiverRtcpEventSubscriber::RtcpEventMultiMap& rtcp_events,
+ size_t start_size,
+ RtcpReceiverLogMessage* receiver_log_message,
+ size_t* number_of_frames,
+ size_t* total_number_of_messages_to_send,
+ size_t* rtcp_log_size) {
+ size_t remaining_space =
+ std::min(kMaxReceiverLogBytes, kMaxIpPacketSize - start_size);
+ if (remaining_space < kRtcpCastLogHeaderSize + kRtcpReceiverFrameLogSize +
+ kRtcpReceiverEventLogSize) {
+ return false;
+ }
+
+ // We use this to do event timestamp sorting and truncating for events of
+ // a single frame.
+ std::vector<RtcpReceiverEventLogMessage> sorted_log_messages;
+
+ // Account for the RTCP header for an application-defined packet.
+ remaining_space -= kRtcpCastLogHeaderSize;
+
+ ReceiverRtcpEventSubscriber::RtcpEventMultiMap::const_reverse_iterator rit =
+ rtcp_events.rbegin();
+
+ while (rit != rtcp_events.rend() &&
+ remaining_space >=
+ kRtcpReceiverFrameLogSize + kRtcpReceiverEventLogSize) {
+ const RtpTimestamp rtp_timestamp = rit->first;
+ RtcpReceiverFrameLogMessage frame_log(rtp_timestamp);
+ remaining_space -= kRtcpReceiverFrameLogSize;
+ ++*number_of_frames;
+
+ // Get all events of a single frame.
+ sorted_log_messages.clear();
+ do {
+ RtcpReceiverEventLogMessage event_log_message;
+ event_log_message.type = rit->second.type;
+ event_log_message.event_timestamp = rit->second.timestamp;
+ event_log_message.delay_delta = rit->second.delay_delta;
+ event_log_message.packet_id = rit->second.packet_id;
+ sorted_log_messages.push_back(event_log_message);
+ ++rit;
+ } while (rit != rtcp_events.rend() && rit->first == rtp_timestamp);
+
+ std::sort(sorted_log_messages.begin(),
+ sorted_log_messages.end(),
+ &EventTimestampLessThan);
+
+ // From |sorted_log_messages|, only take events that are no greater than
+ // |kMaxWireFormatTimeDeltaMs| seconds away from the latest event. Events
+ // older than that cannot be encoded over the wire.
+ std::vector<RtcpReceiverEventLogMessage>::reverse_iterator sorted_rit =
+ sorted_log_messages.rbegin();
+ base::TimeTicks first_event_timestamp = sorted_rit->event_timestamp;
+ size_t events_in_frame = 0;
+ while (sorted_rit != sorted_log_messages.rend() &&
+ events_in_frame < kRtcpMaxReceiverLogMessages &&
+ remaining_space >= kRtcpReceiverEventLogSize) {
+ base::TimeDelta delta(first_event_timestamp -
+ sorted_rit->event_timestamp);
+ if (delta.InMilliseconds() > kMaxWireFormatTimeDeltaMs)
+ break;
+ frame_log.event_log_messages_.push_front(*sorted_rit);
+ ++events_in_frame;
+ ++*total_number_of_messages_to_send;
+ remaining_space -= kRtcpReceiverEventLogSize;
+ ++sorted_rit;
+ }
+
+ receiver_log_message->push_front(frame_log);
+ }
+
+ rtcp_events_history_.push_front(*receiver_log_message);
+
+ // We don't try to match RTP timestamps of redundancy frame logs with those
+ // from the newest set (which would save the space of an extra RTP timestamp
+ // over the wire). Unless the redundancy frame logs are very recent, it's
+ // unlikely there will be a match anyway.
+ if (rtcp_events_history_.size() > kFirstRedundancyOffset) {
+ // Add first redundnacy messages, if enough space remaining
+ AddReceiverLog(rtcp_events_history_[kFirstRedundancyOffset],
+ receiver_log_message,
+ &remaining_space,
+ number_of_frames,
+ total_number_of_messages_to_send);
+ }
+
+ if (rtcp_events_history_.size() > kSecondRedundancyOffset) {
+ // Add second redundancy messages, if enough space remaining
+ AddReceiverLog(rtcp_events_history_[kSecondRedundancyOffset],
+ receiver_log_message,
+ &remaining_space,
+ number_of_frames,
+ total_number_of_messages_to_send);
+ }
+
+ if (rtcp_events_history_.size() > kReceiveLogMessageHistorySize) {
+ rtcp_events_history_.pop_back();
+ }
+
+ DCHECK_LE(rtcp_events_history_.size(), kReceiveLogMessageHistorySize);
+
+ *rtcp_log_size =
+ kRtcpCastLogHeaderSize + *number_of_frames * kRtcpReceiverFrameLogSize +
+ *total_number_of_messages_to_send * kRtcpReceiverEventLogSize;
+ DCHECK_GE(kMaxIpPacketSize, start_size + *rtcp_log_size)
+ << "Not enough buffer space.";
+
+ VLOG(3) << "number of frames: " << *number_of_frames;
+ VLOG(3) << "total messages to send: " << *total_number_of_messages_to_send;
+ VLOG(3) << "rtcp log size: " << *rtcp_log_size;
+ return *number_of_frames > 0;
}
} // namespace cast
diff --git a/chromium/media/cast/rtcp/rtcp_sender.h b/chromium/media/cast/rtcp/rtcp_sender.h
index e931c693c0f..f09a4fb0e53 100644
--- a/chromium/media/cast/rtcp/rtcp_sender.h
+++ b/chromium/media/cast/rtcp/rtcp_sender.h
@@ -5,95 +5,96 @@
#ifndef MEDIA_CAST_RTCP_RTCP_SENDER_H_
#define MEDIA_CAST_RTCP_RTCP_SENDER_H_
+#include <deque>
#include <list>
#include <string>
#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
+#include "media/cast/rtcp/receiver_rtcp_event_subscriber.h"
#include "media/cast/rtcp/rtcp.h"
#include "media/cast/rtcp/rtcp_defines.h"
+#include "media/cast/transport/cast_transport_defines.h"
+#include "media/cast/transport/rtcp/rtcp_builder.h"
namespace media {
namespace cast {
+// We limit the size of receiver logs to avoid queuing up packets.
+const size_t kMaxReceiverLogBytes = 200;
+
+// The determines how long to hold receiver log events, based on how
+// many "receiver log message reports" ago the events were sent.
+const size_t kReceiveLogMessageHistorySize = 20;
+
+// This determines when to send events the second time.
+const size_t kFirstRedundancyOffset = 10;
+COMPILE_ASSERT(kFirstRedundancyOffset > 0 &&
+ kFirstRedundancyOffset <= kReceiveLogMessageHistorySize,
+ redundancy_offset_out_of_range);
+
+// When to send events the third time.
+const size_t kSecondRedundancyOffset = 20;
+COMPILE_ASSERT(kSecondRedundancyOffset >
+ kFirstRedundancyOffset && kSecondRedundancyOffset <=
+ kReceiveLogMessageHistorySize,
+ redundancy_offset_out_of_range);
+
+// TODO(mikhal): Resolve duplication between this and RtcpBuilder.
class RtcpSender {
public:
RtcpSender(scoped_refptr<CastEnvironment> cast_environment,
- PacedPacketSender* const paced_packet_sender,
+ transport::PacedPacketSender* outgoing_transport,
uint32 sending_ssrc,
const std::string& c_name);
virtual ~RtcpSender();
- void SendRtcpFromRtpSender(uint32 packet_type_flags,
- const RtcpSenderInfo* sender_info,
- const RtcpDlrrReportBlock* dlrr,
- RtcpSenderLogMessage* sender_log);
-
- void SendRtcpFromRtpReceiver(uint32 packet_type_flags,
- const RtcpReportBlock* report_block,
- const RtcpReceiverReferenceTimeReport* rrtr,
- const RtcpCastMessage* cast_message,
- RtcpReceiverLogMessage* receiver_log);
-
- enum RtcpPacketType {
- kRtcpSr = 0x0002,
- kRtcpRr = 0x0004,
- kRtcpBye = 0x0008,
- kRtcpPli = 0x0010,
- kRtcpNack = 0x0020,
- kRtcpFir = 0x0040,
- kRtcpSrReq = 0x0200,
- kRtcpDlrr = 0x0400,
- kRtcpRrtr = 0x0800,
- kRtcpRpsi = 0x8000,
- kRtcpRemb = 0x10000,
- kRtcpCast = 0x20000,
- kRtcpSenderLog = 0x40000,
- kRtcpReceiverLog = 0x80000,
- };
+ void SendRtcpFromRtpReceiver(
+ uint32 packet_type_flags,
+ const transport::RtcpReportBlock* report_block,
+ const RtcpReceiverReferenceTimeReport* rrtr,
+ const RtcpCastMessage* cast_message,
+ const ReceiverRtcpEventSubscriber::RtcpEventMultiMap* rtcp_events,
+ uint16 target_delay_ms);
private:
- void BuildSR(const RtcpSenderInfo& sender_info,
- const RtcpReportBlock* report_block,
- std::vector<uint8>* packet) const;
+ void BuildRR(const transport::RtcpReportBlock* report_block,
+ Packet* packet) const;
- void BuildRR(const RtcpReportBlock* report_block,
- std::vector<uint8>* packet) const;
+ void AddReportBlocks(const transport::RtcpReportBlock& report_block,
+ Packet* packet) const;
- void AddReportBlocks(const RtcpReportBlock& report_block,
- std::vector<uint8>* packet) const;
+ void BuildSdec(Packet* packet) const;
- void BuildSdec(std::vector<uint8>* packet) const;
+ void BuildPli(uint32 remote_ssrc, Packet* packet) const;
- void BuildPli(uint32 remote_ssrc,
- std::vector<uint8>* packet) const;
+ void BuildRemb(const RtcpRembMessage* remb, Packet* packet) const;
- void BuildRemb(const RtcpRembMessage* remb,
- std::vector<uint8>* packet) const;
+ void BuildRpsi(const RtcpRpsiMessage* rpsi, Packet* packet) const;
- void BuildRpsi(const RtcpRpsiMessage* rpsi,
- std::vector<uint8>* packet) const;
+ void BuildNack(const RtcpNackMessage* nack, Packet* packet) const;
- void BuildNack(const RtcpNackMessage* nack,
- std::vector<uint8>* packet) const;
-
- void BuildBye(std::vector<uint8>* packet) const;
-
- void BuildDlrrRb(const RtcpDlrrReportBlock* dlrr,
- std::vector<uint8>* packet) const;
+ void BuildBye(Packet* packet) const;
void BuildRrtr(const RtcpReceiverReferenceTimeReport* rrtr,
- std::vector<uint8>* packet) const;
+ Packet* packet) const;
void BuildCast(const RtcpCastMessage* cast_message,
- std::vector<uint8>* packet) const;
+ uint16 target_delay_ms,
+ Packet* packet) const;
- void BuildSenderLog(RtcpSenderLogMessage* sender_log_message,
- std::vector<uint8>* packet) const;
+ void BuildReceiverLog(
+ const ReceiverRtcpEventSubscriber::RtcpEventMultiMap& rtcp_events,
+ Packet* packet);
- void BuildReceiverLog(RtcpReceiverLogMessage* receiver_log_message,
- std::vector<uint8>* packet) const;
+ bool BuildRtcpReceiverLogMessage(
+ const ReceiverRtcpEventSubscriber::RtcpEventMultiMap& rtcp_events,
+ size_t start_size,
+ RtcpReceiverLogMessage* receiver_log_message,
+ size_t* number_of_frames,
+ size_t* total_number_of_messages_to_send,
+ size_t* rtcp_log_size);
inline void BitrateToRembExponentBitrate(uint32 bitrate,
uint8* exponent,
@@ -113,9 +114,11 @@ class RtcpSender {
const std::string c_name_;
// Not owned by this class.
- PacedPacketSender* transport_;
+ transport::PacedPacketSender* const transport_;
scoped_refptr<CastEnvironment> cast_environment_;
+ std::deque<RtcpReceiverLogMessage> rtcp_events_history_;
+
DISALLOW_COPY_AND_ASSIGN(RtcpSender);
};
diff --git a/chromium/media/cast/rtcp/rtcp_sender_unittest.cc b/chromium/media/cast/rtcp/rtcp_sender_unittest.cc
index 16e9ee18ffb..0b0c7d3ab89 100644
--- a/chromium/media/cast/rtcp/rtcp_sender_unittest.cc
+++ b/chromium/media/cast/rtcp/rtcp_sender_unittest.cc
@@ -6,11 +6,13 @@
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/cast_defines.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/net/pacing/paced_sender.h"
+#include "media/cast/rtcp/receiver_rtcp_event_subscriber.h"
#include "media/cast/rtcp/rtcp_sender.h"
#include "media/cast/rtcp/rtcp_utility.h"
#include "media/cast/rtcp/test_rtcp_packet_builder.h"
-#include "media/cast/test/fake_task_runner.h"
+#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "media/cast/transport/cast_transport_defines.h"
+#include "media/cast/transport/pacing/paced_sender.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
@@ -19,95 +21,99 @@ namespace cast {
namespace {
static const uint32 kSendingSsrc = 0x12345678;
static const uint32 kMediaSsrc = 0x87654321;
+static const int16 kDefaultDelay = 100;
static const std::string kCName("test@10.1.1.1");
+
+transport::RtcpReportBlock GetReportBlock() {
+ transport::RtcpReportBlock report_block;
+ // Initialize remote_ssrc to a "clearly illegal" value.
+ report_block.remote_ssrc = 0xDEAD;
+ report_block.media_ssrc = kMediaSsrc; // SSRC of the RTP packet sender.
+ report_block.fraction_lost = kLoss >> 24;
+ report_block.cumulative_lost = kLoss; // 24 bits valid.
+ report_block.extended_high_sequence_number = kExtendedMax;
+ report_block.jitter = kTestJitter;
+ report_block.last_sr = kLastSr;
+ report_block.delay_since_last_sr = kDelayLastSr;
+ return report_block;
+}
+
} // namespace
-class TestRtcpTransport : public PacedPacketSender {
+class TestRtcpTransport : public transport::PacedPacketSender {
public:
- TestRtcpTransport()
- : expected_packet_length_(0),
- packet_count_(0) {
- }
-
- virtual bool SendRtcpPacket(const Packet& packet) OVERRIDE {
- EXPECT_EQ(expected_packet_length_, packet.size());
- EXPECT_EQ(0, memcmp(expected_packet_, &(packet[0]), packet.size()));
+ TestRtcpTransport() : packet_count_(0) {}
+
+ virtual bool SendRtcpPacket(uint32 ssrc,
+ transport::PacketRef packet) OVERRIDE {
+ EXPECT_EQ(expected_packet_.size(), packet->data.size());
+ EXPECT_EQ(0, memcmp(expected_packet_.data(),
+ packet->data.data(),
+ packet->data.size()));
packet_count_++;
return true;
}
- virtual bool SendPackets(const PacketList& packets) OVERRIDE {
+ virtual bool SendPackets(
+ const transport::SendPacketVector& packets) OVERRIDE {
return false;
}
-
- virtual bool ResendPackets(const PacketList& packets) OVERRIDE {
+ virtual bool ResendPackets(
+ const transport::SendPacketVector& packets,
+ base::TimeDelta dedupe_window) OVERRIDE {
return false;
}
- void SetExpectedRtcpPacket(const uint8* rtcp_buffer, size_t length) {
- expected_packet_length_ = length;
- memcpy(expected_packet_, rtcp_buffer, length);
+ virtual void CancelSendingPacket(
+ const transport::PacketKey& packet_key) OVERRIDE {
+ }
+
+ void SetExpectedRtcpPacket(scoped_ptr<Packet> packet) {
+ expected_packet_.swap(*packet);
}
int packet_count() const { return packet_count_; }
private:
- uint8 expected_packet_[kIpPacketSize];
- size_t expected_packet_length_;
+ Packet expected_packet_;
int packet_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestRtcpTransport);
};
class RtcpSenderTest : public ::testing::Test {
protected:
RtcpSenderTest()
- : task_runner_(new test::FakeTaskRunner(&testing_clock_)),
- cast_environment_(new CastEnvironment(&testing_clock_, task_runner_,
- task_runner_, task_runner_, task_runner_, task_runner_,
- GetDefaultCastLoggingConfig())),
+ : testing_clock_(new base::SimpleTestTickClock()),
+ task_runner_(new test::FakeSingleThreadTaskRunner(testing_clock_)),
+ cast_environment_(new CastEnvironment(
+ scoped_ptr<base::TickClock>(testing_clock_).Pass(),
+ task_runner_,
+ task_runner_,
+ task_runner_)),
rtcp_sender_(new RtcpSender(cast_environment_,
&test_transport_,
kSendingSsrc,
- kCName)) {
- }
+ kCName)) {}
- base::SimpleTestTickClock testing_clock_;
+ base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
TestRtcpTransport test_transport_;
- scoped_refptr<test::FakeTaskRunner> task_runner_;
+ scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
scoped_refptr<CastEnvironment> cast_environment_;
scoped_ptr<RtcpSender> rtcp_sender_;
-};
-
-TEST_F(RtcpSenderTest, RtcpSenderReport) {
- RtcpSenderInfo sender_info;
- sender_info.ntp_seconds = kNtpHigh;
- sender_info.ntp_fraction = kNtpLow;
- sender_info.rtp_timestamp = kRtpTimestamp;
- sender_info.send_packet_count = kSendPacketCount;
- sender_info.send_octet_count = kSendOctetCount;
- // Sender report + c_name.
- TestRtcpPacketBuilder p;
- p.AddSr(kSendingSsrc, 0);
- p.AddSdesCname(kSendingSsrc, kCName);
- test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
-
- rtcp_sender_->SendRtcpFromRtpSender(RtcpSender::kRtcpSr,
- &sender_info,
- NULL,
- NULL);
-
- EXPECT_EQ(1, test_transport_.packet_count());
-}
+ DISALLOW_COPY_AND_ASSIGN(RtcpSenderTest);
+};
TEST_F(RtcpSenderTest, RtcpReceiverReport) {
// Empty receiver report + c_name.
TestRtcpPacketBuilder p1;
p1.AddRr(kSendingSsrc, 0);
p1.AddSdesCname(kSendingSsrc, kCName);
- test_transport_.SetExpectedRtcpPacket(p1.Packet(), p1.Length());
+ test_transport_.SetExpectedRtcpPacket(p1.GetPacket());
- rtcp_sender_->SendRtcpFromRtpReceiver(RtcpSender::kRtcpRr,
- NULL, NULL, NULL, NULL);
+ rtcp_sender_->SendRtcpFromRtpReceiver(
+ transport::kRtcpRr, NULL, NULL, NULL, NULL, kDefaultDelay);
EXPECT_EQ(1, test_transport_.packet_count());
@@ -116,133 +122,16 @@ TEST_F(RtcpSenderTest, RtcpReceiverReport) {
p2.AddRr(kSendingSsrc, 1);
p2.AddRb(kMediaSsrc);
p2.AddSdesCname(kSendingSsrc, kCName);
- test_transport_.SetExpectedRtcpPacket(p2.Packet(), p2.Length());
+ test_transport_.SetExpectedRtcpPacket(p2.GetPacket().Pass());
- RtcpReportBlock report_block;
- // Initialize remote_ssrc to a "clearly illegal" value.
- report_block.remote_ssrc = 0xDEAD;
- report_block.media_ssrc = kMediaSsrc; // SSRC of the RTP packet sender.
- report_block.fraction_lost = kLoss >> 24;
- report_block.cumulative_lost = kLoss; // 24 bits valid.
- report_block.extended_high_sequence_number = kExtendedMax;
- report_block.jitter = kTestJitter;
- report_block.last_sr = kLastSr;
- report_block.delay_since_last_sr = kDelayLastSr;
+ transport::RtcpReportBlock report_block = GetReportBlock();
- rtcp_sender_->SendRtcpFromRtpReceiver(RtcpSender::kRtcpRr, &report_block,
- NULL, NULL, NULL);
+ rtcp_sender_->SendRtcpFromRtpReceiver(
+ transport::kRtcpRr, &report_block, NULL, NULL, NULL, kDefaultDelay);
EXPECT_EQ(2, test_transport_.packet_count());
}
-TEST_F(RtcpSenderTest, RtcpSenderReportWithDlrr) {
- RtcpSenderInfo sender_info;
- sender_info.ntp_seconds = kNtpHigh;
- sender_info.ntp_fraction = kNtpLow;
- sender_info.rtp_timestamp = kRtpTimestamp;
- sender_info.send_packet_count = kSendPacketCount;
- sender_info.send_octet_count = kSendOctetCount;
-
- // Sender report + c_name + dlrr.
- TestRtcpPacketBuilder p1;
- p1.AddSr(kSendingSsrc, 0);
- p1.AddSdesCname(kSendingSsrc, kCName);
- p1.AddXrHeader(kSendingSsrc);
- p1.AddXrDlrrBlock(kSendingSsrc);
- test_transport_.SetExpectedRtcpPacket(p1.Packet(), p1.Length());
-
- RtcpDlrrReportBlock dlrr_rb;
- dlrr_rb.last_rr = kLastRr;
- dlrr_rb.delay_since_last_rr = kDelayLastRr;
-
- rtcp_sender_->SendRtcpFromRtpSender(
- RtcpSender::kRtcpSr | RtcpSender::kRtcpDlrr,
- &sender_info,
- &dlrr_rb,
- NULL);
-
- EXPECT_EQ(1, test_transport_.packet_count());
-}
-
-TEST_F(RtcpSenderTest, RtcpSenderReportWithDlrrAndLog) {
- RtcpSenderInfo sender_info;
- sender_info.ntp_seconds = kNtpHigh;
- sender_info.ntp_fraction = kNtpLow;
- sender_info.rtp_timestamp = kRtpTimestamp;
- sender_info.send_packet_count = kSendPacketCount;
- sender_info.send_octet_count = kSendOctetCount;
-
- // Sender report + c_name + dlrr + sender log.
- TestRtcpPacketBuilder p;
- p.AddSr(kSendingSsrc, 0);
- p.AddSdesCname(kSendingSsrc, kCName);
- p.AddXrHeader(kSendingSsrc);
- p.AddXrDlrrBlock(kSendingSsrc);
- p.AddSenderLog(kSendingSsrc);
- p.AddSenderFrameLog(kRtcpSenderFrameStatusSentToNetwork, kRtpTimestamp);
-
- test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
-
- RtcpDlrrReportBlock dlrr_rb;
- dlrr_rb.last_rr = kLastRr;
- dlrr_rb.delay_since_last_rr = kDelayLastRr;
-
- RtcpSenderFrameLogMessage sender_frame_log;
- sender_frame_log.frame_status = kRtcpSenderFrameStatusSentToNetwork;
- sender_frame_log.rtp_timestamp = kRtpTimestamp;
-
- RtcpSenderLogMessage sender_log;
- sender_log.push_back(sender_frame_log);
-
- rtcp_sender_->SendRtcpFromRtpSender(
- RtcpSender::kRtcpSr | RtcpSender::kRtcpDlrr | RtcpSender::kRtcpSenderLog,
- &sender_info,
- &dlrr_rb,
- &sender_log);
-
- EXPECT_EQ(1, test_transport_.packet_count());
- EXPECT_TRUE(sender_log.empty());
-}
-
-TEST_F(RtcpSenderTest, RtcpSenderReporWithTooManyLogFrames) {
- RtcpSenderInfo sender_info;
- sender_info.ntp_seconds = kNtpHigh;
- sender_info.ntp_fraction = kNtpLow;
- sender_info.rtp_timestamp = kRtpTimestamp;
- sender_info.send_packet_count = kSendPacketCount;
- sender_info.send_octet_count = kSendOctetCount;
-
- // Sender report + c_name + sender log.
- TestRtcpPacketBuilder p;
- p.AddSr(kSendingSsrc, 0);
- p.AddSdesCname(kSendingSsrc, kCName);
- p.AddSenderLog(kSendingSsrc);
-
- for (int i = 0; i < 359; ++i) {
- p.AddSenderFrameLog(kRtcpSenderFrameStatusSentToNetwork,
- kRtpTimestamp + i * 90);
- }
- test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
-
-
- RtcpSenderLogMessage sender_log;
- for (int j = 0; j < 400; ++j) {
- RtcpSenderFrameLogMessage sender_frame_log;
- sender_frame_log.frame_status = kRtcpSenderFrameStatusSentToNetwork;
- sender_frame_log.rtp_timestamp = kRtpTimestamp + j * 90;
- sender_log.push_back(sender_frame_log);
- }
-
- rtcp_sender_->SendRtcpFromRtpSender(
- RtcpSender::kRtcpSr | RtcpSender::kRtcpSenderLog,
- &sender_info,
- NULL,
- &sender_log);
-
- EXPECT_EQ(1, test_transport_.packet_count());
- EXPECT_EQ(41u, sender_log.size());
-}
-
TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtr) {
// Receiver report with report block + c_name.
TestRtcpPacketBuilder p;
@@ -251,29 +140,21 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtr) {
p.AddSdesCname(kSendingSsrc, kCName);
p.AddXrHeader(kSendingSsrc);
p.AddXrRrtrBlock();
- test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
+ test_transport_.SetExpectedRtcpPacket(p.GetPacket().Pass());
- RtcpReportBlock report_block;
- // Initialize remote_ssrc to a "clearly illegal" value.
- report_block.remote_ssrc = 0xDEAD;
- report_block.media_ssrc = kMediaSsrc; // SSRC of the RTP packet sender.
- report_block.fraction_lost = kLoss >> 24;
- report_block.cumulative_lost = kLoss; // 24 bits valid.
- report_block.extended_high_sequence_number = kExtendedMax;
- report_block.jitter = kTestJitter;
- report_block.last_sr = kLastSr;
- report_block.delay_since_last_sr = kDelayLastSr;
+ transport::RtcpReportBlock report_block = GetReportBlock();
RtcpReceiverReferenceTimeReport rrtr;
rrtr.ntp_seconds = kNtpHigh;
rrtr.ntp_fraction = kNtpLow;
rtcp_sender_->SendRtcpFromRtpReceiver(
- RtcpSender::kRtcpRr | RtcpSender::kRtcpRrtr,
+ transport::kRtcpRr | transport::kRtcpRrtr,
&report_block,
&rrtr,
NULL,
- NULL);
+ NULL,
+ kDefaultDelay);
EXPECT_EQ(1, test_transport_.packet_count());
}
@@ -284,19 +165,10 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithCast) {
p.AddRr(kSendingSsrc, 1);
p.AddRb(kMediaSsrc);
p.AddSdesCname(kSendingSsrc, kCName);
- p.AddCast(kSendingSsrc, kMediaSsrc);
- test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
+ p.AddCast(kSendingSsrc, kMediaSsrc, kDefaultDelay);
+ test_transport_.SetExpectedRtcpPacket(p.GetPacket().Pass());
- RtcpReportBlock report_block;
- // Initialize remote_ssrc to a "clearly illegal" value.
- report_block.remote_ssrc = 0xDEAD;
- report_block.media_ssrc = kMediaSsrc; // SSRC of the RTP packet sender.
- report_block.fraction_lost = kLoss >> 24;
- report_block.cumulative_lost = kLoss; // 24 bits valid.
- report_block.extended_high_sequence_number = kExtendedMax;
- report_block.jitter = kTestJitter;
- report_block.last_sr = kLastSr;
- report_block.delay_since_last_sr = kDelayLastSr;
+ transport::RtcpReportBlock report_block = GetReportBlock();
RtcpCastMessage cast_message(kMediaSsrc);
cast_message.ack_frame_id_ = kAckFrameId;
@@ -310,11 +182,12 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithCast) {
missing_packets;
rtcp_sender_->SendRtcpFromRtpReceiver(
- RtcpSender::kRtcpRr | RtcpSender::kRtcpCast,
+ transport::kRtcpRr | transport::kRtcpCast,
&report_block,
NULL,
&cast_message,
- NULL);
+ NULL,
+ kDefaultDelay);
EXPECT_EQ(1, test_transport_.packet_count());
}
@@ -326,19 +199,10 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtraAndCastMessage) {
p.AddSdesCname(kSendingSsrc, kCName);
p.AddXrHeader(kSendingSsrc);
p.AddXrRrtrBlock();
- p.AddCast(kSendingSsrc, kMediaSsrc);
- test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
+ p.AddCast(kSendingSsrc, kMediaSsrc, kDefaultDelay);
+ test_transport_.SetExpectedRtcpPacket(p.GetPacket().Pass());
- RtcpReportBlock report_block;
- // Initialize remote_ssrc to a "clearly illegal" value.
- report_block.remote_ssrc = 0xDEAD;
- report_block.media_ssrc = kMediaSsrc; // SSRC of the RTP packet sender.
- report_block.fraction_lost = kLoss >> 24;
- report_block.cumulative_lost = kLoss; // 24 bits valid.
- report_block.extended_high_sequence_number = kExtendedMax;
- report_block.jitter = kTestJitter;
- report_block.last_sr = kLastSr;
- report_block.delay_since_last_sr = kDelayLastSr;
+ transport::RtcpReportBlock report_block = GetReportBlock();
RtcpReceiverReferenceTimeReport rrtr;
rrtr.ntp_seconds = kNtpHigh;
@@ -356,11 +220,12 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtraAndCastMessage) {
missing_packets;
rtcp_sender_->SendRtcpFromRtpReceiver(
- RtcpSender::kRtcpRr | RtcpSender::kRtcpRrtr | RtcpSender::kRtcpCast,
+ transport::kRtcpRr | transport::kRtcpRrtr | transport::kRtcpCast,
&report_block,
&rrtr,
&cast_message,
- NULL);
+ NULL,
+ kDefaultDelay);
EXPECT_EQ(1, test_transport_.packet_count());
}
@@ -368,7 +233,6 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtraAndCastMessage) {
TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtrCastMessageAndLog) {
static const uint32 kTimeBaseMs = 12345678;
static const uint32 kTimeDelayMs = 10;
- static const uint32 kDelayDeltaMs = 123;
TestRtcpPacketBuilder p;
p.AddRr(kSendingSsrc, 1);
@@ -376,19 +240,10 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtrCastMessageAndLog) {
p.AddSdesCname(kSendingSsrc, kCName);
p.AddXrHeader(kSendingSsrc);
p.AddXrRrtrBlock();
- p.AddCast(kSendingSsrc, kMediaSsrc);
- test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
+ p.AddCast(kSendingSsrc, kMediaSsrc, kDefaultDelay);
+ test_transport_.SetExpectedRtcpPacket(p.GetPacket().Pass());
- RtcpReportBlock report_block;
- // Initialize remote_ssrc to a "clearly illegal" value.
- report_block.remote_ssrc = 0xDEAD;
- report_block.media_ssrc = kMediaSsrc; // SSRC of the RTP packet sender.
- report_block.fraction_lost = kLoss >> 24;
- report_block.cumulative_lost = kLoss; // 24 bits valid.
- report_block.extended_high_sequence_number = kExtendedMax;
- report_block.jitter = kTestJitter;
- report_block.last_sr = kLastSr;
- report_block.delay_since_last_sr = kDelayLastSr;
+ transport::RtcpReportBlock report_block = GetReportBlock();
RtcpReceiverReferenceTimeReport rrtr;
rrtr.ntp_seconds = kNtpHigh;
@@ -405,185 +260,296 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtrCastMessageAndLog) {
cast_message.missing_frames_and_packets_[kFrameIdWithLostPackets] =
missing_packets;
- // Test empty Log message.
- RtcpReceiverLogMessage receiver_log;
+ ReceiverRtcpEventSubscriber event_subscriber(500, VIDEO_EVENT);
+ ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
- VLOG(0) << " Test empty Log " ;
rtcp_sender_->SendRtcpFromRtpReceiver(
- RtcpSender::kRtcpRr | RtcpSender::kRtcpRrtr | RtcpSender::kRtcpCast |
- RtcpSender::kRtcpReceiverLog,
+ transport::kRtcpRr | transport::kRtcpRrtr | transport::kRtcpCast |
+ transport::kRtcpReceiverLog,
&report_block,
&rrtr,
&cast_message,
- &receiver_log);
-
+ &rtcp_events,
+ kDefaultDelay);
base::SimpleTestTickClock testing_clock;
testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeBaseMs));
p.AddReceiverLog(kSendingSsrc);
p.AddReceiverFrameLog(kRtpTimestamp, 2, kTimeBaseMs);
- p.AddReceiverEventLog(kDelayDeltaMs, 1, 0);
- p.AddReceiverEventLog(kLostPacketId1, 6, kTimeDelayMs);
-
- test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
+ p.AddReceiverEventLog(0, FRAME_ACK_SENT, 0);
+ p.AddReceiverEventLog(kLostPacketId1, PACKET_RECEIVED, kTimeDelayMs);
- RtcpReceiverFrameLogMessage frame_log(kRtpTimestamp);
- RtcpReceiverEventLogMessage event_log;
-
- event_log.type = kAckSent;
- event_log.event_timestamp = testing_clock.NowTicks();
- event_log.delay_delta = base::TimeDelta::FromMilliseconds(kDelayDeltaMs);
- frame_log.event_log_messages_.push_back(event_log);
+ test_transport_.SetExpectedRtcpPacket(p.GetPacket().Pass());
+ FrameEvent frame_event;
+ frame_event.rtp_timestamp = kRtpTimestamp;
+ frame_event.type = FRAME_ACK_SENT;
+ frame_event.media_type = VIDEO_EVENT;
+ frame_event.timestamp = testing_clock.NowTicks();
+ event_subscriber.OnReceiveFrameEvent(frame_event);
testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeDelayMs));
- event_log.type = kPacketReceived;
- event_log.event_timestamp = testing_clock.NowTicks();
- event_log.packet_id = kLostPacketId1;
- frame_log.event_log_messages_.push_back(event_log);
- receiver_log.push_back(frame_log);
+ PacketEvent packet_event;
+ packet_event.rtp_timestamp = kRtpTimestamp;
+ packet_event.type = PACKET_RECEIVED;
+ packet_event.media_type = VIDEO_EVENT;
+ packet_event.timestamp = testing_clock.NowTicks();
+ packet_event.packet_id = kLostPacketId1;
+ event_subscriber.OnReceivePacketEvent(packet_event);
+ event_subscriber.GetRtcpEventsAndReset(&rtcp_events);
+ EXPECT_EQ(2u, rtcp_events.size());
- VLOG(0) << " Test Log " ;
rtcp_sender_->SendRtcpFromRtpReceiver(
- RtcpSender::kRtcpRr | RtcpSender::kRtcpRrtr | RtcpSender::kRtcpCast |
- RtcpSender::kRtcpReceiverLog,
+ transport::kRtcpRr | transport::kRtcpRrtr | transport::kRtcpCast |
+ transport::kRtcpReceiverLog,
&report_block,
&rrtr,
&cast_message,
- &receiver_log);
+ &rtcp_events,
+ kDefaultDelay);
- EXPECT_TRUE(receiver_log.empty());
EXPECT_EQ(2, test_transport_.packet_count());
}
TEST_F(RtcpSenderTest, RtcpReceiverReportWithOversizedFrameLog) {
static const uint32 kTimeBaseMs = 12345678;
static const uint32 kTimeDelayMs = 10;
- static const uint32 kDelayDeltaMs = 123;
TestRtcpPacketBuilder p;
p.AddRr(kSendingSsrc, 1);
p.AddRb(kMediaSsrc);
p.AddSdesCname(kSendingSsrc, kCName);
- RtcpReportBlock report_block;
- // Initialize remote_ssrc to a "clearly illegal" value.
- report_block.remote_ssrc = 0xDEAD;
- report_block.media_ssrc = kMediaSsrc; // SSRC of the RTP packet sender.
- report_block.fraction_lost = kLoss >> 24;
- report_block.cumulative_lost = kLoss; // 24 bits valid.
- report_block.extended_high_sequence_number = kExtendedMax;
- report_block.jitter = kTestJitter;
- report_block.last_sr = kLastSr;
- report_block.delay_since_last_sr = kDelayLastSr;
+ transport::RtcpReportBlock report_block = GetReportBlock();
base::SimpleTestTickClock testing_clock;
testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeBaseMs));
p.AddReceiverLog(kSendingSsrc);
- p.AddReceiverFrameLog(kRtpTimestamp, 1, kTimeBaseMs);
- p.AddReceiverEventLog(kDelayDeltaMs, 1, 0);
- p.AddReceiverFrameLog(kRtpTimestamp + 2345,
- kRtcpMaxReceiverLogMessages, kTimeBaseMs);
-
- for (size_t i = 0; i < kRtcpMaxReceiverLogMessages; ++i) {
+ int remaining_bytes = kMaxReceiverLogBytes;
+ remaining_bytes -= kRtcpCastLogHeaderSize;
+
+ remaining_bytes -= kRtcpReceiverFrameLogSize;
+ int num_events = remaining_bytes / kRtcpReceiverEventLogSize;
+ EXPECT_LE(num_events, static_cast<int>(kRtcpMaxReceiverLogMessages));
+ // Only the last |num_events| events are sent due to receiver log size cap.
+ p.AddReceiverFrameLog(
+ kRtpTimestamp + 2345,
+ num_events,
+ kTimeBaseMs + (kRtcpMaxReceiverLogMessages - num_events) * kTimeDelayMs);
+ for (int i = 0; i < num_events; i++) {
p.AddReceiverEventLog(
- kLostPacketId1, 6, static_cast<uint16>(kTimeDelayMs * i));
+ kLostPacketId1, PACKET_RECEIVED,
+ static_cast<uint16>(kTimeDelayMs * i));
}
- test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
-
- RtcpReceiverFrameLogMessage frame_1_log(kRtpTimestamp);
- RtcpReceiverEventLogMessage event_log;
-
- event_log.type = kAckSent;
- event_log.event_timestamp = testing_clock.NowTicks();
- event_log.delay_delta = base::TimeDelta::FromMilliseconds(kDelayDeltaMs);
- frame_1_log.event_log_messages_.push_back(event_log);
+ test_transport_.SetExpectedRtcpPacket(p.GetPacket().Pass());
- RtcpReceiverLogMessage receiver_log;
- receiver_log.push_back(frame_1_log);
+ ReceiverRtcpEventSubscriber event_subscriber(500, VIDEO_EVENT);
+ FrameEvent frame_event;
+ frame_event.rtp_timestamp = kRtpTimestamp;
+ frame_event.type = FRAME_ACK_SENT;
+ frame_event.media_type = VIDEO_EVENT;
+ frame_event.timestamp = testing_clock.NowTicks();
+ event_subscriber.OnReceiveFrameEvent(frame_event);
- RtcpReceiverFrameLogMessage frame_2_log(kRtpTimestamp + 2345);
-
- for (int j = 0; j < 300; ++j) {
- event_log.type = kPacketReceived;
- event_log.event_timestamp = testing_clock.NowTicks();
- event_log.packet_id = kLostPacketId1;
- frame_2_log.event_log_messages_.push_back(event_log);
+ for (size_t i = 0; i < kRtcpMaxReceiverLogMessages; ++i) {
+ PacketEvent packet_event;
+ packet_event.rtp_timestamp = kRtpTimestamp + 2345;
+ packet_event.type = PACKET_RECEIVED;
+ packet_event.media_type = VIDEO_EVENT;
+ packet_event.timestamp = testing_clock.NowTicks();
+ packet_event.packet_id = kLostPacketId1;
+ event_subscriber.OnReceivePacketEvent(packet_event);
testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeDelayMs));
}
- receiver_log.push_back(frame_2_log);
+
+ ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
+ event_subscriber.GetRtcpEventsAndReset(&rtcp_events);
rtcp_sender_->SendRtcpFromRtpReceiver(
- RtcpSender::kRtcpRr | RtcpSender::kRtcpReceiverLog,
+ transport::kRtcpRr | transport::kRtcpReceiverLog,
&report_block,
NULL,
NULL,
- &receiver_log);
+ &rtcp_events,
+ kDefaultDelay);
EXPECT_EQ(1, test_transport_.packet_count());
- EXPECT_EQ(1u, receiver_log.size());
- EXPECT_EQ(300u - kRtcpMaxReceiverLogMessages,
- receiver_log.front().event_log_messages_.size());
}
TEST_F(RtcpSenderTest, RtcpReceiverReportWithTooManyLogFrames) {
static const uint32 kTimeBaseMs = 12345678;
static const uint32 kTimeDelayMs = 10;
- static const uint32 kDelayDeltaMs = 123;
TestRtcpPacketBuilder p;
p.AddRr(kSendingSsrc, 1);
p.AddRb(kMediaSsrc);
p.AddSdesCname(kSendingSsrc, kCName);
- RtcpReportBlock report_block;
- // Initialize remote_ssrc to a "clearly illegal" value.
- report_block.remote_ssrc = 0xDEAD;
- report_block.media_ssrc = kMediaSsrc; // SSRC of the RTP packet sender.
- report_block.fraction_lost = kLoss >> 24;
- report_block.cumulative_lost = kLoss; // 24 bits valid.
- report_block.extended_high_sequence_number = kExtendedMax;
- report_block.jitter = kTestJitter;
- report_block.last_sr = kLastSr;
- report_block.delay_since_last_sr = kDelayLastSr;
+ transport::RtcpReportBlock report_block = GetReportBlock();
base::SimpleTestTickClock testing_clock;
testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeBaseMs));
p.AddReceiverLog(kSendingSsrc);
- for (int i = 0; i < 119; ++i) {
- p.AddReceiverFrameLog(kRtpTimestamp, 1, kTimeBaseMs + i * kTimeDelayMs);
- p.AddReceiverEventLog(kDelayDeltaMs, 1, 0);
- }
- test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
+ int remaining_bytes = kMaxReceiverLogBytes;
+ remaining_bytes -= kRtcpCastLogHeaderSize;
+
+ int num_events =
+ remaining_bytes / (kRtcpReceiverFrameLogSize + kRtcpReceiverEventLogSize);
- RtcpReceiverLogMessage receiver_log;
+ // The last |num_events| events are sent due to receiver log size cap.
+ for (size_t i = kRtcpMaxReceiverLogMessages - num_events;
+ i < kRtcpMaxReceiverLogMessages;
+ ++i) {
+ p.AddReceiverFrameLog(kRtpTimestamp + i, 1, kTimeBaseMs + i * kTimeDelayMs);
+ p.AddReceiverEventLog(0, FRAME_ACK_SENT, 0);
+ }
+ test_transport_.SetExpectedRtcpPacket(p.GetPacket().Pass());
- for (int j = 0; j < 200; ++j) {
- RtcpReceiverFrameLogMessage frame_log(kRtpTimestamp);
- RtcpReceiverEventLogMessage event_log;
+ ReceiverRtcpEventSubscriber event_subscriber(500, VIDEO_EVENT);
- event_log.type = kAckSent;
- event_log.event_timestamp = testing_clock.NowTicks();
- event_log.delay_delta = base::TimeDelta::FromMilliseconds(kDelayDeltaMs);
- frame_log.event_log_messages_.push_back(event_log);
- receiver_log.push_back(frame_log);
+ for (size_t i = 0; i < kRtcpMaxReceiverLogMessages; ++i) {
+ FrameEvent frame_event;
+ frame_event.rtp_timestamp = kRtpTimestamp + static_cast<int>(i);
+ frame_event.type = FRAME_ACK_SENT;
+ frame_event.media_type = VIDEO_EVENT;
+ frame_event.timestamp = testing_clock.NowTicks();
+ event_subscriber.OnReceiveFrameEvent(frame_event);
testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeDelayMs));
}
+
+ ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
+ event_subscriber.GetRtcpEventsAndReset(&rtcp_events);
+
rtcp_sender_->SendRtcpFromRtpReceiver(
- RtcpSender::kRtcpRr | RtcpSender::kRtcpReceiverLog,
+ transport::kRtcpRr | transport::kRtcpReceiverLog,
&report_block,
NULL,
NULL,
- &receiver_log);
+ &rtcp_events,
+ kDefaultDelay);
EXPECT_EQ(1, test_transport_.packet_count());
- EXPECT_EQ(81u, receiver_log.size());
+}
+
+TEST_F(RtcpSenderTest, RtcpReceiverReportWithOldLogFrames) {
+ static const uint32 kTimeBaseMs = 12345678;
+
+ TestRtcpPacketBuilder p;
+ p.AddRr(kSendingSsrc, 1);
+ p.AddRb(kMediaSsrc);
+ p.AddSdesCname(kSendingSsrc, kCName);
+
+ transport::RtcpReportBlock report_block = GetReportBlock();
+
+ base::SimpleTestTickClock testing_clock;
+ testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeBaseMs));
+
+ p.AddReceiverLog(kSendingSsrc);
+
+ // Log 11 events for a single frame, each |kTimeBetweenEventsMs| apart.
+ // Only last 10 events will be sent because the first event is more than
+ // 4095 milliseconds away from latest event.
+ const int kTimeBetweenEventsMs = 410;
+ p.AddReceiverFrameLog(kRtpTimestamp, 10, kTimeBaseMs + kTimeBetweenEventsMs);
+ for (int i = 0; i < 10; ++i) {
+ p.AddReceiverEventLog(0, FRAME_ACK_SENT, i * kTimeBetweenEventsMs);
+ }
+ test_transport_.SetExpectedRtcpPacket(p.GetPacket().Pass());
+
+ ReceiverRtcpEventSubscriber event_subscriber(500, VIDEO_EVENT);
+ for (int i = 0; i < 11; ++i) {
+ FrameEvent frame_event;
+ frame_event.rtp_timestamp = kRtpTimestamp;
+ frame_event.type = FRAME_ACK_SENT;
+ frame_event.media_type = VIDEO_EVENT;
+ frame_event.timestamp = testing_clock.NowTicks();
+ event_subscriber.OnReceiveFrameEvent(frame_event);
+ testing_clock.Advance(
+ base::TimeDelta::FromMilliseconds(kTimeBetweenEventsMs));
+ }
+
+ ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
+ event_subscriber.GetRtcpEventsAndReset(&rtcp_events);
+
+ rtcp_sender_->SendRtcpFromRtpReceiver(
+ transport::kRtcpRr | transport::kRtcpReceiverLog,
+ &report_block,
+ NULL,
+ NULL,
+ &rtcp_events,
+ kDefaultDelay);
+
+ EXPECT_EQ(1, test_transport_.packet_count());
+}
+
+TEST_F(RtcpSenderTest, RtcpReceiverReportRedundancy) {
+ uint32 time_base_ms = 12345678;
+ int kTimeBetweenEventsMs = 10;
+
+ transport::RtcpReportBlock report_block = GetReportBlock();
+
+ base::SimpleTestTickClock testing_clock;
+ testing_clock.Advance(base::TimeDelta::FromMilliseconds(time_base_ms));
+
+ ReceiverRtcpEventSubscriber event_subscriber(500, VIDEO_EVENT);
+ size_t packet_count = kReceiveLogMessageHistorySize + 10;
+ for (size_t i = 0; i < packet_count; i++) {
+ TestRtcpPacketBuilder p;
+ p.AddRr(kSendingSsrc, 1);
+ p.AddRb(kMediaSsrc);
+ p.AddSdesCname(kSendingSsrc, kCName);
+
+ p.AddReceiverLog(kSendingSsrc);
+
+ if (i >= kSecondRedundancyOffset) {
+ p.AddReceiverFrameLog(
+ kRtpTimestamp,
+ 1,
+ time_base_ms - kSecondRedundancyOffset * kTimeBetweenEventsMs);
+ p.AddReceiverEventLog(0, FRAME_ACK_SENT, 0);
+ }
+ if (i >= kFirstRedundancyOffset) {
+ p.AddReceiverFrameLog(
+ kRtpTimestamp,
+ 1,
+ time_base_ms - kFirstRedundancyOffset * kTimeBetweenEventsMs);
+ p.AddReceiverEventLog(0, FRAME_ACK_SENT, 0);
+ }
+ p.AddReceiverFrameLog(kRtpTimestamp, 1, time_base_ms);
+ p.AddReceiverEventLog(0, FRAME_ACK_SENT, 0);
+
+ test_transport_.SetExpectedRtcpPacket(p.GetPacket().Pass());
+
+ FrameEvent frame_event;
+ frame_event.rtp_timestamp = kRtpTimestamp;
+ frame_event.type = FRAME_ACK_SENT;
+ frame_event.media_type = VIDEO_EVENT;
+ frame_event.timestamp = testing_clock.NowTicks();
+ event_subscriber.OnReceiveFrameEvent(frame_event);
+
+ ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
+ event_subscriber.GetRtcpEventsAndReset(&rtcp_events);
+
+ rtcp_sender_->SendRtcpFromRtpReceiver(
+ transport::kRtcpRr | transport::kRtcpReceiverLog,
+ &report_block,
+ NULL,
+ NULL,
+ &rtcp_events,
+ kDefaultDelay);
+
+ testing_clock.Advance(
+ base::TimeDelta::FromMilliseconds(kTimeBetweenEventsMs));
+ time_base_ms += kTimeBetweenEventsMs;
+ }
+
+ EXPECT_EQ(static_cast<int>(packet_count), test_transport_.packet_count());
}
} // namespace cast
diff --git a/chromium/media/cast/rtcp/rtcp_unittest.cc b/chromium/media/cast/rtcp/rtcp_unittest.cc
index 535f3c34f83..095e6d24df9 100644
--- a/chromium/media/cast/rtcp/rtcp_unittest.cc
+++ b/chromium/media/cast/rtcp/rtcp_unittest.cc
@@ -2,15 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <stdint.h>
+
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/cast_defines.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/net/pacing/paced_sender.h"
#include "media/cast/rtcp/mock_rtcp_receiver_feedback.h"
#include "media/cast/rtcp/mock_rtcp_sender_feedback.h"
#include "media/cast/rtcp/rtcp.h"
#include "media/cast/rtcp/test_rtcp_packet_builder.h"
-#include "media/cast/test/fake_task_runner.h"
+#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/transport/cast_transport_sender_impl.h"
+#include "media/cast/transport/pacing/paced_sender.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
@@ -22,60 +26,109 @@ static const uint32 kSenderSsrc = 0x10203;
static const uint32 kReceiverSsrc = 0x40506;
static const std::string kCName("test@10.1.1.1");
static const uint32 kRtcpIntervalMs = 500;
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
static const int64 kAddedDelay = 123;
-static const int64 kAddedShortDelay= 100;
+static const int64 kAddedShortDelay = 100;
-class LocalRtcpTransport : public PacedPacketSender {
+class RtcpTestPacketSender : public transport::PacketSender {
public:
- explicit LocalRtcpTransport(scoped_refptr<CastEnvironment> cast_environment,
- base::SimpleTestTickClock* testing_clock)
+ explicit RtcpTestPacketSender(base::SimpleTestTickClock* testing_clock)
: drop_packets_(false),
short_delay_(false),
+ rtcp_receiver_(NULL),
testing_clock_(testing_clock) {}
+ virtual ~RtcpTestPacketSender() {}
+ // Packet lists imply a RTP packet.
+ void set_rtcp_receiver(Rtcp* rtcp) { rtcp_receiver_ = rtcp; }
+
+ void set_short_delay() { short_delay_ = true; }
+
+ void set_drop_packets(bool drop_packets) { drop_packets_ = drop_packets; }
+
+ // A singular packet implies a RTCP packet.
+ virtual bool SendPacket(transport::PacketRef packet,
+ const base::Closure& cb) OVERRIDE {
+ if (short_delay_) {
+ testing_clock_->Advance(
+ base::TimeDelta::FromMilliseconds(kAddedShortDelay));
+ } else {
+ testing_clock_->Advance(base::TimeDelta::FromMilliseconds(kAddedDelay));
+ }
+ if (drop_packets_)
+ return true;
+
+ rtcp_receiver_->IncomingRtcpPacket(&packet->data[0], packet->data.size());
+ return true;
+ }
- void SetRtcpReceiver(Rtcp* rtcp) { rtcp_ = rtcp; }
+ private:
+ bool drop_packets_;
+ bool short_delay_;
+ Rtcp* rtcp_receiver_;
+ base::SimpleTestTickClock* testing_clock_;
- void SetShortDelay() { short_delay_ = true; }
+ DISALLOW_COPY_AND_ASSIGN(RtcpTestPacketSender);
+};
- void SetDropPackets(bool drop_packets) { drop_packets_ = drop_packets; }
+class LocalRtcpTransport : public transport::PacedPacketSender {
+ public:
+ LocalRtcpTransport(scoped_refptr<CastEnvironment> cast_environment,
+ base::SimpleTestTickClock* testing_clock)
+ : drop_packets_(false),
+ short_delay_(false),
+ testing_clock_(testing_clock) {}
+ void set_rtcp_receiver(Rtcp* rtcp) { rtcp_ = rtcp; }
- virtual bool SendRtcpPacket(const std::vector<uint8>& packet) OVERRIDE {
+ void set_short_delay() { short_delay_ = true; }
+
+ void set_drop_packets(bool drop_packets) { drop_packets_ = drop_packets; }
+
+ virtual bool SendRtcpPacket(uint32 ssrc,
+ transport::PacketRef packet) OVERRIDE {
if (short_delay_) {
testing_clock_->Advance(
base::TimeDelta::FromMilliseconds(kAddedShortDelay));
} else {
testing_clock_->Advance(base::TimeDelta::FromMilliseconds(kAddedDelay));
}
- if (drop_packets_) return true;
+ if (drop_packets_)
+ return true;
- rtcp_->IncomingRtcpPacket(&(packet[0]), packet.size());
+ rtcp_->IncomingRtcpPacket(&packet->data[0], packet->data.size());
return true;
}
- virtual bool SendPackets(const PacketList& packets) OVERRIDE {
+ virtual bool SendPackets(
+ const transport::SendPacketVector& packets) OVERRIDE {
return false;
}
- virtual bool ResendPackets(const PacketList& packets) OVERRIDE {
+ virtual bool ResendPackets(
+ const transport::SendPacketVector& packets,
+ base::TimeDelta dedupe_window) OVERRIDE {
return false;
}
+ virtual void CancelSendingPacket(
+ const transport::PacketKey& packet_key) OVERRIDE {
+ }
+
private:
bool drop_packets_;
bool short_delay_;
Rtcp* rtcp_;
base::SimpleTestTickClock* testing_clock_;
scoped_refptr<CastEnvironment> cast_environment_;
+
+ DISALLOW_COPY_AND_ASSIGN(LocalRtcpTransport);
};
class RtcpPeer : public Rtcp {
public:
RtcpPeer(scoped_refptr<CastEnvironment> cast_environment,
RtcpSenderFeedback* sender_feedback,
- PacedPacketSender* const paced_packet_sender,
- RtpSenderStatistics* rtp_sender_statistics,
+ transport::CastTransportSender* const transport_sender,
+ transport::PacedPacketSender* paced_packet_sender,
RtpReceiverStatistics* rtp_receiver_statistics,
RtcpMode rtcp_mode,
const base::TimeDelta& rtcp_interval,
@@ -84,95 +137,129 @@ class RtcpPeer : public Rtcp {
const std::string& c_name)
: Rtcp(cast_environment,
sender_feedback,
+ transport_sender,
paced_packet_sender,
- rtp_sender_statistics,
rtp_receiver_statistics,
rtcp_mode,
rtcp_interval,
local_ssrc,
remote_ssrc,
- c_name) {
- }
+ c_name,
+ AUDIO_EVENT) {}
- using Rtcp::CheckForWrapAround;
+ using Rtcp::OnReceivedNtp;
using Rtcp::OnReceivedLipSyncInfo;
};
class RtcpTest : public ::testing::Test {
protected:
RtcpTest()
- : task_runner_(new test::FakeTaskRunner(&testing_clock_)),
- cast_environment_(new CastEnvironment(&testing_clock_, task_runner_,
- task_runner_, task_runner_, task_runner_, task_runner_,
- GetDefaultCastLoggingConfig())),
- transport_(cast_environment_, &testing_clock_) {
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ : testing_clock_(new base::SimpleTestTickClock()),
+ task_runner_(new test::FakeSingleThreadTaskRunner(testing_clock_)),
+ cast_environment_(new CastEnvironment(
+ scoped_ptr<base::TickClock>(testing_clock_).Pass(),
+ task_runner_,
+ task_runner_,
+ task_runner_)),
+ sender_to_receiver_(testing_clock_),
+ receiver_to_sender_(cast_environment_, testing_clock_) {
+ testing_clock_->Advance(base::TimeTicks::Now() - base::TimeTicks());
+ net::IPEndPoint dummy_endpoint;
+ transport_sender_.reset(new transport::CastTransportSenderImpl(
+ NULL,
+ testing_clock_,
+ dummy_endpoint,
+ base::Bind(&UpdateCastTransportStatus),
+ transport::BulkRawEventsCallback(),
+ base::TimeDelta(),
+ task_runner_,
+ &sender_to_receiver_));
+ transport::CastTransportAudioConfig config;
+ config.rtp.config.ssrc = kSenderSsrc;
+ config.rtp.max_outstanding_frames = 1;
+ transport_sender_->InitializeAudio(config);
+ EXPECT_CALL(mock_sender_feedback_, OnReceivedCastFeedback(_)).Times(0);
}
virtual ~RtcpTest() {}
- virtual void SetUp() {
- EXPECT_CALL(mock_sender_feedback_, OnReceivedCastFeedback(_)).Times(0);
+ static void UpdateCastTransportStatus(transport::CastTransportStatus status) {
+ bool result = (status == transport::TRANSPORT_AUDIO_INITIALIZED ||
+ status == transport::TRANSPORT_VIDEO_INITIALIZED);
+ EXPECT_TRUE(result);
+ }
+
+ void RunTasks(int during_ms) {
+ for (int i = 0; i < during_ms; ++i) {
+ // Call process the timers every 1 ms.
+ testing_clock_->Advance(base::TimeDelta::FromMilliseconds(1));
+ task_runner_->RunTasks();
+ }
}
- base::SimpleTestTickClock testing_clock_;
- scoped_refptr<test::FakeTaskRunner> task_runner_;
+ base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
+ scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
scoped_refptr<CastEnvironment> cast_environment_;
- LocalRtcpTransport transport_;
+ RtcpTestPacketSender sender_to_receiver_;
+ scoped_ptr<transport::CastTransportSenderImpl> transport_sender_;
+ LocalRtcpTransport receiver_to_sender_;
MockRtcpSenderFeedback mock_sender_feedback_;
+
+ DISALLOW_COPY_AND_ASSIGN(RtcpTest);
};
TEST_F(RtcpTest, TimeToSend) {
- base::TimeTicks start_time;
- start_time += base::TimeDelta::FromMilliseconds(kStartMillisecond);
+ const base::TimeTicks start_time = testing_clock_->NowTicks();
Rtcp rtcp(cast_environment_,
&mock_sender_feedback_,
- &transport_,
- NULL,
+ transport_sender_.get(),
+ &receiver_to_sender_,
NULL,
kRtcpCompound,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
kSenderSsrc,
kReceiverSsrc,
- kCName);
- transport_.SetRtcpReceiver(&rtcp);
+ kCName,
+ AUDIO_EVENT);
+ receiver_to_sender_.set_rtcp_receiver(&rtcp);
EXPECT_LE(start_time, rtcp.TimeToSendNextRtcpReport());
- EXPECT_GE(start_time + base::TimeDelta::FromMilliseconds(
- kRtcpIntervalMs * 3 / 2),
- rtcp.TimeToSendNextRtcpReport());
+ EXPECT_GE(
+ start_time + base::TimeDelta::FromMilliseconds(kRtcpIntervalMs * 3 / 2),
+ rtcp.TimeToSendNextRtcpReport());
base::TimeDelta delta = rtcp.TimeToSendNextRtcpReport() - start_time;
- testing_clock_.Advance(delta);
- EXPECT_EQ(testing_clock_.NowTicks(), rtcp.TimeToSendNextRtcpReport());
+ testing_clock_->Advance(delta);
+ EXPECT_EQ(testing_clock_->NowTicks(), rtcp.TimeToSendNextRtcpReport());
}
TEST_F(RtcpTest, BasicSenderReport) {
Rtcp rtcp(cast_environment_,
&mock_sender_feedback_,
- &transport_,
+ transport_sender_.get(),
NULL,
NULL,
kRtcpCompound,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
kSenderSsrc,
kReceiverSsrc,
- kCName);
- transport_.SetRtcpReceiver(&rtcp);
- rtcp.SendRtcpFromRtpSender(NULL);
+ kCName,
+ AUDIO_EVENT);
+ sender_to_receiver_.set_rtcp_receiver(&rtcp);
+ rtcp.SendRtcpFromRtpSender(base::TimeTicks(), 0);
}
TEST_F(RtcpTest, BasicReceiverReport) {
Rtcp rtcp(cast_environment_,
&mock_sender_feedback_,
- &transport_,
NULL,
+ &receiver_to_sender_,
NULL,
kRtcpCompound,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
kSenderSsrc,
kReceiverSsrc,
- kCName);
- transport_.SetRtcpReceiver(&rtcp);
+ kCName,
+ AUDIO_EVENT);
+ receiver_to_sender_.set_rtcp_receiver(&rtcp);
rtcp.SendRtcpFromRtpReceiver(NULL, NULL);
}
@@ -182,20 +269,20 @@ TEST_F(RtcpTest, BasicCast) {
// Media receiver.
Rtcp rtcp(cast_environment_,
&mock_sender_feedback_,
- &transport_,
NULL,
+ &receiver_to_sender_,
NULL,
kRtcpReducedSize,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
kSenderSsrc,
kSenderSsrc,
- kCName);
- transport_.SetRtcpReceiver(&rtcp);
+ kCName,
+ AUDIO_EVENT);
+ receiver_to_sender_.set_rtcp_receiver(&rtcp);
RtcpCastMessage cast_message(kSenderSsrc);
cast_message.ack_frame_id_ = kAckFrameId;
PacketIdSet missing_packets;
- cast_message.missing_frames_and_packets_[
- kLostFrameId] = missing_packets;
+ cast_message.missing_frames_and_packets_[kLostFrameId] = missing_packets;
missing_packets.insert(kLostPacketId1);
missing_packets.insert(kLostPacketId2);
@@ -207,334 +294,254 @@ TEST_F(RtcpTest, BasicCast) {
TEST_F(RtcpTest, RttReducedSizeRtcp) {
// Media receiver.
- LocalRtcpTransport receiver_transport(cast_environment_, &testing_clock_);
Rtcp rtcp_receiver(cast_environment_,
&mock_sender_feedback_,
- &receiver_transport,
NULL,
+ &receiver_to_sender_,
NULL,
kRtcpReducedSize,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
kReceiverSsrc,
kSenderSsrc,
- kCName);
+ kCName,
+ AUDIO_EVENT);
// Media sender.
- LocalRtcpTransport sender_transport(cast_environment_, &testing_clock_);
Rtcp rtcp_sender(cast_environment_,
&mock_sender_feedback_,
- &sender_transport,
+ transport_sender_.get(),
NULL,
NULL,
kRtcpReducedSize,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
kSenderSsrc,
kReceiverSsrc,
- kCName);
+ kCName,
+ AUDIO_EVENT);
- receiver_transport.SetRtcpReceiver(&rtcp_sender);
- sender_transport.SetRtcpReceiver(&rtcp_receiver);
+ sender_to_receiver_.set_rtcp_receiver(&rtcp_receiver);
+ receiver_to_sender_.set_rtcp_receiver(&rtcp_sender);
base::TimeDelta rtt;
base::TimeDelta avg_rtt;
base::TimeDelta min_rtt;
base::TimeDelta max_rtt;
- EXPECT_FALSE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
- EXPECT_FALSE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_FALSE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_FALSE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
- rtcp_sender.SendRtcpFromRtpSender(NULL);
+ rtcp_sender.SendRtcpFromRtpSender(testing_clock_->NowTicks(), 1);
+ RunTasks(33);
rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
- EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
- EXPECT_FALSE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
- EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 1);
- EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 1);
- EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 1);
- EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
- rtcp_sender.SendRtcpFromRtpSender(NULL);
- EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
-
- EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 1);
- EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 1);
- EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 1);
- EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
+ EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_FALSE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 2);
+ EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 2);
+ EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 2);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 2);
+ rtcp_sender.SendRtcpFromRtpSender(testing_clock_->NowTicks(), 2);
+ RunTasks(33);
+ EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+
+ EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 2);
+ EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 2);
+ EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 2);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 2);
}
TEST_F(RtcpTest, Rtt) {
// Media receiver.
- LocalRtcpTransport receiver_transport(cast_environment_, &testing_clock_);
Rtcp rtcp_receiver(cast_environment_,
&mock_sender_feedback_,
- &receiver_transport,
NULL,
+ &receiver_to_sender_,
NULL,
kRtcpCompound,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
kReceiverSsrc,
kSenderSsrc,
- kCName);
+ kCName,
+ AUDIO_EVENT);
// Media sender.
- LocalRtcpTransport sender_transport(cast_environment_, &testing_clock_);
Rtcp rtcp_sender(cast_environment_,
&mock_sender_feedback_,
- &sender_transport,
+ transport_sender_.get(),
NULL,
NULL,
kRtcpCompound,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
kSenderSsrc,
kReceiverSsrc,
- kCName);
+ kCName,
+ AUDIO_EVENT);
- receiver_transport.SetRtcpReceiver(&rtcp_sender);
- sender_transport.SetRtcpReceiver(&rtcp_receiver);
+ receiver_to_sender_.set_rtcp_receiver(&rtcp_sender);
+ sender_to_receiver_.set_rtcp_receiver(&rtcp_receiver);
base::TimeDelta rtt;
base::TimeDelta avg_rtt;
base::TimeDelta min_rtt;
base::TimeDelta max_rtt;
- EXPECT_FALSE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
- EXPECT_FALSE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_FALSE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_FALSE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
- rtcp_sender.SendRtcpFromRtpSender(NULL);
+ rtcp_sender.SendRtcpFromRtpSender(testing_clock_->NowTicks(), 1);
+ RunTasks(33);
rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
- EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
- EXPECT_FALSE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
- EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 1);
- EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 1);
- EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 1);
- EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
- rtcp_sender.SendRtcpFromRtpSender(NULL);
- EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
- EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 1);
- EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 1);
- EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 1);
- EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
+ EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ RunTasks(33);
- receiver_transport.SetShortDelay();
- sender_transport.SetShortDelay();
- rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
- EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
- EXPECT_NEAR(kAddedDelay + kAddedShortDelay, rtt.InMilliseconds(), 1);
- EXPECT_NEAR((kAddedShortDelay + 3 * kAddedDelay) / 2,
- avg_rtt.InMilliseconds(),
- 1);
- EXPECT_NEAR(kAddedDelay + kAddedShortDelay, min_rtt.InMilliseconds(), 1);
- EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
+ EXPECT_FALSE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ RunTasks(33);
- rtcp_sender.SendRtcpFromRtpSender(NULL);
- EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 2);
+ EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 2);
+ EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 2);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 2);
+
+ rtcp_sender.SendRtcpFromRtpSender(testing_clock_->NowTicks(), 2);
+ RunTasks(33);
+ EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 2);
+ EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 2);
+ EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 2);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 2);
+
+ receiver_to_sender_.set_short_delay();
+ sender_to_receiver_.set_short_delay();
+ rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
+ EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_NEAR(kAddedDelay + kAddedShortDelay, rtt.InMilliseconds(), 2);
+ EXPECT_NEAR(
+ (kAddedShortDelay + 3 * kAddedDelay) / 2, avg_rtt.InMilliseconds(), 2);
+ EXPECT_NEAR(kAddedDelay + kAddedShortDelay, min_rtt.InMilliseconds(), 2);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 2);
+
+ rtcp_sender.SendRtcpFromRtpSender(testing_clock_->NowTicks(), 3);
+ RunTasks(33);
+ EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
EXPECT_NEAR(2 * kAddedShortDelay, rtt.InMilliseconds(), 1);
EXPECT_NEAR((2 * kAddedShortDelay + 2 * kAddedDelay) / 2,
- avg_rtt.InMilliseconds(),
- 1);
- EXPECT_NEAR(2 * kAddedShortDelay, min_rtt.InMilliseconds(), 1);
- EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
+ avg_rtt.InMilliseconds(),
+ 1);
+ EXPECT_NEAR(2 * kAddedShortDelay, min_rtt.InMilliseconds(), 2);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 2);
rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
- EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
- EXPECT_NEAR(2 * kAddedShortDelay, rtt.InMilliseconds(), 1);
- EXPECT_NEAR(2 * kAddedShortDelay, min_rtt.InMilliseconds(), 1);
- EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
+ EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_NEAR(2 * kAddedShortDelay, rtt.InMilliseconds(), 2);
+ EXPECT_NEAR(2 * kAddedShortDelay, min_rtt.InMilliseconds(), 2);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 2);
rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
- EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
- EXPECT_NEAR(2 * kAddedShortDelay, rtt.InMilliseconds(), 1);
- EXPECT_NEAR(2 * kAddedShortDelay, min_rtt.InMilliseconds(), 1);
- EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
+ EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_NEAR(2 * kAddedShortDelay, rtt.InMilliseconds(), 2);
+ EXPECT_NEAR(2 * kAddedShortDelay, min_rtt.InMilliseconds(), 2);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 2);
}
TEST_F(RtcpTest, RttWithPacketLoss) {
// Media receiver.
- LocalRtcpTransport receiver_transport(cast_environment_, &testing_clock_);
Rtcp rtcp_receiver(cast_environment_,
&mock_sender_feedback_,
- &receiver_transport,
NULL,
+ &receiver_to_sender_,
NULL,
kRtcpReducedSize,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- kSenderSsrc,
kReceiverSsrc,
- kCName);
+ kSenderSsrc,
+ kCName,
+ AUDIO_EVENT);
// Media sender.
- LocalRtcpTransport sender_transport(cast_environment_, &testing_clock_);
Rtcp rtcp_sender(cast_environment_,
&mock_sender_feedback_,
- &sender_transport,
+ transport_sender_.get(),
NULL,
NULL,
kRtcpReducedSize,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- kReceiverSsrc,
kSenderSsrc,
- kCName);
+ kReceiverSsrc,
+ kCName,
+ AUDIO_EVENT);
- receiver_transport.SetRtcpReceiver(&rtcp_sender);
- sender_transport.SetRtcpReceiver(&rtcp_receiver);
+ receiver_to_sender_.set_rtcp_receiver(&rtcp_sender);
+ sender_to_receiver_.set_rtcp_receiver(&rtcp_receiver);
rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
- rtcp_sender.SendRtcpFromRtpSender(NULL);
+ rtcp_sender.SendRtcpFromRtpSender(testing_clock_->NowTicks(), 0);
+ RunTasks(33);
base::TimeDelta rtt;
base::TimeDelta avg_rtt;
base::TimeDelta min_rtt;
base::TimeDelta max_rtt;
- EXPECT_FALSE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
- EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_FALSE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 1);
EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 1);
EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 1);
EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
- receiver_transport.SetShortDelay();
- sender_transport.SetShortDelay();
- receiver_transport.SetDropPackets(true);
+ receiver_to_sender_.set_short_delay();
+ sender_to_receiver_.set_short_delay();
+ receiver_to_sender_.set_drop_packets(true);
rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
- rtcp_sender.SendRtcpFromRtpSender(NULL);
+ rtcp_sender.SendRtcpFromRtpSender(testing_clock_->NowTicks(), 1);
+ RunTasks(33);
- EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
- EXPECT_NEAR(kAddedDelay + kAddedShortDelay, rtt.InMilliseconds(), 1);
+ EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_NEAR(kAddedDelay + kAddedShortDelay, rtt.InMilliseconds(), 2);
}
TEST_F(RtcpTest, NtpAndTime) {
- const int64 kSecondsbetweenYear1900and2010 = GG_INT64_C(40176 * 24 * 60 * 60);
- const int64 kSecondsbetweenYear1900and2030 = GG_INT64_C(47481 * 24 * 60 * 60);
+ const int64 kSecondsbetweenYear1900and2010 = INT64_C(40176 * 24 * 60 * 60);
+ const int64 kSecondsbetweenYear1900and2030 = INT64_C(47481 * 24 * 60 * 60);
uint32 ntp_seconds_1 = 0;
- uint32 ntp_fractions_1 = 0;
+ uint32 ntp_fraction_1 = 0;
base::TimeTicks input_time = base::TimeTicks::Now();
- ConvertTimeTicksToNtp(input_time, &ntp_seconds_1, &ntp_fractions_1);
+ ConvertTimeTicksToNtp(input_time, &ntp_seconds_1, &ntp_fraction_1);
// Verify absolute value.
EXPECT_GT(ntp_seconds_1, kSecondsbetweenYear1900and2010);
EXPECT_LT(ntp_seconds_1, kSecondsbetweenYear1900and2030);
- base::TimeTicks out_1 = ConvertNtpToTimeTicks(ntp_seconds_1, ntp_fractions_1);
+ base::TimeTicks out_1 = ConvertNtpToTimeTicks(ntp_seconds_1, ntp_fraction_1);
EXPECT_EQ(input_time, out_1); // Verify inverse.
base::TimeDelta time_delta = base::TimeDelta::FromMilliseconds(1000);
input_time += time_delta;
uint32 ntp_seconds_2 = 0;
- uint32 ntp_fractions_2 = 0;
+ uint32 ntp_fraction_2 = 0;
- ConvertTimeTicksToNtp(input_time, &ntp_seconds_2, &ntp_fractions_2);
- base::TimeTicks out_2 = ConvertNtpToTimeTicks(ntp_seconds_2, ntp_fractions_2);
+ ConvertTimeTicksToNtp(input_time, &ntp_seconds_2, &ntp_fraction_2);
+ base::TimeTicks out_2 = ConvertNtpToTimeTicks(ntp_seconds_2, ntp_fraction_2);
EXPECT_EQ(input_time, out_2); // Verify inverse.
// Verify delta.
EXPECT_EQ((out_2 - out_1), time_delta);
- EXPECT_EQ((ntp_seconds_2 - ntp_seconds_1), GG_UINT32_C(1));
- EXPECT_NEAR(ntp_fractions_2, ntp_fractions_1, 1);
+ EXPECT_EQ((ntp_seconds_2 - ntp_seconds_1), UINT32_C(1));
+ EXPECT_NEAR(ntp_fraction_2, ntp_fraction_1, 1);
time_delta = base::TimeDelta::FromMilliseconds(500);
input_time += time_delta;
uint32 ntp_seconds_3 = 0;
- uint32 ntp_fractions_3 = 0;
+ uint32 ntp_fraction_3 = 0;
- ConvertTimeTicksToNtp(input_time, &ntp_seconds_3, &ntp_fractions_3);
- base::TimeTicks out_3 = ConvertNtpToTimeTicks(ntp_seconds_3, ntp_fractions_3);
+ ConvertTimeTicksToNtp(input_time, &ntp_seconds_3, &ntp_fraction_3);
+ base::TimeTicks out_3 = ConvertNtpToTimeTicks(ntp_seconds_3, ntp_fraction_3);
EXPECT_EQ(input_time, out_3); // Verify inverse.
// Verify delta.
EXPECT_EQ((out_3 - out_2), time_delta);
- EXPECT_NEAR((ntp_fractions_3 - ntp_fractions_2), 0xffffffff / 2, 1);
-}
-
-TEST_F(RtcpTest, WrapAround) {
- RtcpPeer rtcp_peer(cast_environment_,
- &mock_sender_feedback_,
- NULL,
- NULL,
- NULL,
- kRtcpReducedSize,
- base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- kReceiverSsrc,
- kSenderSsrc,
- kCName);
- uint32 new_timestamp = 0;
- uint32 old_timestamp = 0;
- EXPECT_EQ(0, rtcp_peer.CheckForWrapAround(new_timestamp, old_timestamp));
- new_timestamp = 1234567890;
- old_timestamp = 1234567000;
- EXPECT_EQ(0, rtcp_peer.CheckForWrapAround(new_timestamp, old_timestamp));
- new_timestamp = 1234567000;
- old_timestamp = 1234567890;
- EXPECT_EQ(0, rtcp_peer.CheckForWrapAround(new_timestamp, old_timestamp));
- new_timestamp = 123;
- old_timestamp = 4234567890u;
- EXPECT_EQ(1, rtcp_peer.CheckForWrapAround(new_timestamp, old_timestamp));
- new_timestamp = 4234567890u;
- old_timestamp = 123;
- EXPECT_EQ(-1, rtcp_peer.CheckForWrapAround(new_timestamp, old_timestamp));
-}
-
-TEST_F(RtcpTest, RtpTimestampInSenderTime) {
- RtcpPeer rtcp_peer(cast_environment_,
- &mock_sender_feedback_,
- NULL,
- NULL,
- NULL,
- kRtcpReducedSize,
- base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- kReceiverSsrc,
- kSenderSsrc,
- kCName);
- int frequency = 32000;
- uint32 rtp_timestamp = 64000;
- base::TimeTicks rtp_timestamp_in_ticks;
-
- // Test fail before we get a OnReceivedLipSyncInfo.
- EXPECT_FALSE(rtcp_peer.RtpTimestampInSenderTime(frequency, rtp_timestamp,
- &rtp_timestamp_in_ticks));
-
- uint32 ntp_seconds = 0;
- uint32 ntp_fractions = 0;
- uint64 input_time_us = 12345678901000LL;
- base::TimeTicks input_time;
- input_time += base::TimeDelta::FromMicroseconds(input_time_us);
-
- // Test exact match.
- ConvertTimeTicksToNtp(input_time, &ntp_seconds, &ntp_fractions);
- rtcp_peer.OnReceivedLipSyncInfo(rtp_timestamp, ntp_seconds, ntp_fractions);
- EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(frequency, rtp_timestamp,
- &rtp_timestamp_in_ticks));
- EXPECT_EQ(input_time, rtp_timestamp_in_ticks);
-
- // Test older rtp_timestamp.
- rtp_timestamp = 32000;
- EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(frequency, rtp_timestamp,
- &rtp_timestamp_in_ticks));
- EXPECT_EQ(input_time - base::TimeDelta::FromMilliseconds(1000),
- rtp_timestamp_in_ticks);
-
- // Test older rtp_timestamp with wrap.
- rtp_timestamp = 4294903296u;
- EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(frequency, rtp_timestamp,
- &rtp_timestamp_in_ticks));
- EXPECT_EQ(input_time - base::TimeDelta::FromMilliseconds(4000),
- rtp_timestamp_in_ticks);
-
- // Test newer rtp_timestamp.
- rtp_timestamp = 128000;
- EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(frequency, rtp_timestamp,
- &rtp_timestamp_in_ticks));
- EXPECT_EQ(input_time + base::TimeDelta::FromMilliseconds(2000),
- rtp_timestamp_in_ticks);
-
- // Test newer rtp_timestamp with wrap.
- rtp_timestamp = 4294903296u;
- rtcp_peer.OnReceivedLipSyncInfo(rtp_timestamp, ntp_seconds, ntp_fractions);
- rtp_timestamp = 64000;
- EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(frequency, rtp_timestamp,
- &rtp_timestamp_in_ticks));
- EXPECT_EQ(input_time + base::TimeDelta::FromMilliseconds(4000),
- rtp_timestamp_in_ticks);
+ EXPECT_NEAR((ntp_fraction_3 - ntp_fraction_2), 0xffffffff / 2, 1);
}
} // namespace cast
diff --git a/chromium/media/cast/rtcp/rtcp_utility.cc b/chromium/media/cast/rtcp/rtcp_utility.cc
index daeaa8aaceb..e29f82e9cf9 100644
--- a/chromium/media/cast/rtcp/rtcp_utility.cc
+++ b/chromium/media/cast/rtcp/rtcp_utility.cc
@@ -4,8 +4,9 @@
#include "media/cast/rtcp/rtcp_utility.h"
+#include "base/big_endian.h"
#include "base/logging.h"
-#include "net/base/big_endian.h"
+#include "media/cast/transport/cast_transport_defines.h"
namespace media {
namespace cast {
@@ -19,18 +20,15 @@ RtcpParser::RtcpParser(const uint8* rtcpData, size_t rtcpDataLength)
state_(kStateTopLevel),
number_of_blocks_(0),
field_type_(kRtcpNotValidCode) {
+ memset(&field_, 0, sizeof(field_));
Validate();
}
RtcpParser::~RtcpParser() {}
-RtcpFieldTypes RtcpParser::FieldType() const {
- return field_type_;
-}
+RtcpFieldTypes RtcpParser::FieldType() const { return field_type_; }
-const RtcpField& RtcpParser::Field() const {
- return field_;
-}
+const RtcpField& RtcpParser::Field() const { return field_; }
RtcpFieldTypes RtcpParser::Begin() {
rtcp_data_ = rtcp_data_begin_;
@@ -41,7 +39,8 @@ RtcpFieldTypes RtcpParser::Iterate() {
// Reset packet type
field_type_ = kRtcpNotValidCode;
- if (!IsValid()) return kRtcpNotValidCode;
+ if (!IsValid())
+ return kRtcpNotValidCode;
switch (state_) {
case kStateTopLevel:
@@ -62,9 +61,6 @@ RtcpFieldTypes RtcpParser::Iterate() {
case kStateApplicationSpecificCastReceiverEventLog:
IterateCastReceiverLogEvent();
break;
- case kStateApplicationSpecificCastSenderLog:
- IterateCastSenderLog();
- break;
case kStateExtendedReportBlock:
IterateExtendedReportItem();
break;
@@ -101,51 +97,53 @@ void RtcpParser::IterateTopLevel() {
RtcpCommonHeader header;
bool success = RtcpParseCommonHeader(rtcp_data_, rtcp_data_end_, &header);
- if (!success) return;
+ if (!success)
+ return;
rtcp_block_end_ = rtcp_data_ + header.length_in_octets;
- if (rtcp_block_end_ > rtcp_data_end_) return; // Bad block!
+ if (rtcp_block_end_ > rtcp_data_end_)
+ return; // Bad block!
switch (header.PT) {
- case kPacketTypeSenderReport:
+ case transport::kPacketTypeSenderReport:
// number of Report blocks
number_of_blocks_ = header.IC;
ParseSR();
return;
- case kPacketTypeReceiverReport:
+ case transport::kPacketTypeReceiverReport:
// number of Report blocks
number_of_blocks_ = header.IC;
ParseRR();
return;
- case kPacketTypeSdes:
+ case transport::kPacketTypeSdes:
// number of Sdes blocks
number_of_blocks_ = header.IC;
if (!ParseSdes()) {
break; // Nothing supported found, continue to next block!
}
return;
- case kPacketTypeBye:
+ case transport::kPacketTypeBye:
number_of_blocks_ = header.IC;
if (!ParseBye()) {
// Nothing supported found, continue to next block!
break;
}
return;
- case kPacketTypeApplicationDefined:
+ case transport::kPacketTypeApplicationDefined:
if (!ParseApplicationDefined(header.IC)) {
// Nothing supported found, continue to next block!
break;
}
return;
- case kPacketTypeGenericRtpFeedback: // Fall through!
- case kPacketTypePayloadSpecific:
+ case transport::kPacketTypeGenericRtpFeedback: // Fall through!
+ case transport::kPacketTypePayloadSpecific:
if (!ParseFeedBackCommon(header)) {
// Nothing supported found, continue to next block!
break;
}
return;
- case kPacketTypeXr:
+ case transport::kPacketTypeXr:
if (!ParseExtendedReport()) {
break; // Nothing supported found, continue to next block!
}
@@ -160,103 +158,111 @@ void RtcpParser::IterateTopLevel() {
void RtcpParser::IterateReportBlockItem() {
bool success = ParseReportBlockItem();
- if (!success) Iterate();
+ if (!success)
+ Iterate();
}
void RtcpParser::IterateSdesItem() {
bool success = ParseSdesItem();
- if (!success) Iterate();
+ if (!success)
+ Iterate();
}
void RtcpParser::IterateByeItem() {
bool success = ParseByeItem();
- if (!success) Iterate();
+ if (!success)
+ Iterate();
}
void RtcpParser::IterateExtendedReportItem() {
bool success = ParseExtendedReportItem();
- if (!success) Iterate();
+ if (!success)
+ Iterate();
}
void RtcpParser::IterateExtendedReportDelaySinceLastReceiverReportItem() {
bool success = ParseExtendedReportDelaySinceLastReceiverReport();
- if (!success) Iterate();
+ if (!success)
+ Iterate();
}
void RtcpParser::IterateNackItem() {
bool success = ParseNackItem();
- if (!success) Iterate();
+ if (!success)
+ Iterate();
}
void RtcpParser::IterateRpsiItem() {
bool success = ParseRpsiItem();
- if (!success) Iterate();
+ if (!success)
+ Iterate();
}
void RtcpParser::IterateFirItem() {
bool success = ParseFirItem();
- if (!success) Iterate();
+ if (!success)
+ Iterate();
}
void RtcpParser::IteratePayloadSpecificAppItem() {
bool success = ParsePayloadSpecificAppItem();
- if (!success) Iterate();
+ if (!success)
+ Iterate();
}
void RtcpParser::IteratePayloadSpecificRembItem() {
bool success = ParsePayloadSpecificRembItem();
- if (!success) Iterate();
+ if (!success)
+ Iterate();
}
void RtcpParser::IteratePayloadSpecificCastItem() {
bool success = ParsePayloadSpecificCastItem();
- if (!success) Iterate();
+ if (!success)
+ Iterate();
}
void RtcpParser::IteratePayloadSpecificCastNackItem() {
bool success = ParsePayloadSpecificCastNackItem();
- if (!success) Iterate();
+ if (!success)
+ Iterate();
}
void RtcpParser::IterateCastReceiverLogFrame() {
bool success = ParseCastReceiverLogFrameItem();
- if (!success) Iterate();
+ if (!success)
+ Iterate();
}
void RtcpParser::IterateCastReceiverLogEvent() {
bool success = ParseCastReceiverLogEventItem();
- if (!success) Iterate();
-}
-
-void RtcpParser::IterateCastSenderLog() {
- bool success = ParseCastSenderLogItem();
- if (!success) Iterate();
+ if (!success)
+ Iterate();
}
void RtcpParser::Validate() {
- if (rtcp_data_ == NULL) return; // NOT VALID
+ if (rtcp_data_ == NULL)
+ return; // NOT VALID
RtcpCommonHeader header;
- bool success = RtcpParseCommonHeader(rtcp_data_begin_, rtcp_data_end_,
- &header);
+ bool success =
+ RtcpParseCommonHeader(rtcp_data_begin_, rtcp_data_end_, &header);
- if (!success) return; // NOT VALID!
+ if (!success)
+ return; // NOT VALID!
valid_packet_ = true;
}
-bool RtcpParser::IsValid() const {
- return valid_packet_;
-}
+bool RtcpParser::IsValid() const { return valid_packet_; }
-void RtcpParser::EndCurrentBlock() {
- rtcp_data_ = rtcp_block_end_;
-}
+void RtcpParser::EndCurrentBlock() { rtcp_data_ = rtcp_block_end_; }
bool RtcpParser::RtcpParseCommonHeader(const uint8* data_begin,
const uint8* data_end,
RtcpCommonHeader* parsed_header) const {
- if (!data_begin || !data_end) return false;
+ if (!data_begin || !data_end)
+ return false;
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
@@ -266,31 +272,36 @@ bool RtcpParser::RtcpParseCommonHeader(const uint8* data_begin,
//
// Common header for all Rtcp packets, 4 octets.
- if ((data_end - data_begin) < 4) return false;
+ if ((data_end - data_begin) < 4)
+ return false;
- parsed_header->V = data_begin[0] >> 6;
- parsed_header->P = ((data_begin[0] & 0x20) == 0) ? false : true;
+ parsed_header->V = data_begin[0] >> 6;
+ parsed_header->P = ((data_begin[0] & 0x20) == 0) ? false : true;
parsed_header->IC = data_begin[0] & 0x1f;
parsed_header->PT = data_begin[1];
parsed_header->length_in_octets =
((data_begin[2] << 8) + data_begin[3] + 1) * 4;
- if (parsed_header->length_in_octets == 0) return false;
+ if (parsed_header->length_in_octets == 0)
+ return false;
// Check if RTP version field == 2.
- if (parsed_header->V != 2) return false;
+ if (parsed_header->V != 2)
+ return false;
return true;
}
bool RtcpParser::ParseRR() {
ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
- if (length < 8) return false;
+ if (length < 8)
+ return false;
field_type_ = kRtcpRrCode;
- net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_data_), length);
big_endian_reader.Skip(4); // Skip header
big_endian_reader.ReadU32(&field_.receiver_report.sender_ssrc);
field_.receiver_report.number_of_report_blocks = number_of_blocks_;
@@ -309,7 +320,8 @@ bool RtcpParser::ParseSR() {
}
field_type_ = kRtcpSrCode;
- net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_data_), length);
big_endian_reader.Skip(4); // Skip header
big_endian_reader.ReadU32(&field_.sender_report.sender_ssrc);
big_endian_reader.ReadU32(&field_.sender_report.ntp_most_significant);
@@ -339,7 +351,8 @@ bool RtcpParser::ParseReportBlockItem() {
return false;
}
- net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_data_), length);
big_endian_reader.ReadU32(&field_.report_block_item.ssrc);
big_endian_reader.ReadU8(&field_.report_block_item.fraction_lost);
@@ -399,7 +412,8 @@ bool RtcpParser::ParseSdesItem() {
}
uint32 ssrc;
- net::BigEndianReader big_endian_reader(rtcp_data_, data_length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_data_), data_length);
big_endian_reader.ReadU32(&ssrc);
rtcp_data_ += 4;
@@ -418,7 +432,8 @@ bool RtcpParser::ParseSdesTypes() {
// Only the c_name item is mandatory. RFC 3550 page 46.
bool found_c_name = false;
ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
- net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_data_), length);
while (big_endian_reader.remaining() > 0) {
uint8 tag;
@@ -484,7 +499,8 @@ bool RtcpParser::ParseByeItem() {
field_type_ = kRtcpByeCode;
- net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_data_), length);
big_endian_reader.ReadU32(&field_.bye.sender_ssrc);
rtcp_data_ += 4;
@@ -498,8 +514,7 @@ bool RtcpParser::ParseByeItem() {
bool RtcpParser::ParseApplicationDefined(uint8 subtype) {
ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
- if (length < 16 ||
- !(subtype == kSenderLogSubtype || subtype == kReceiverLogSubtype)) {
+ if (length < 16 || subtype != kReceiverLogSubtype) {
state_ = kStateTopLevel;
EndCurrentBlock();
return false;
@@ -508,7 +523,8 @@ bool RtcpParser::ParseApplicationDefined(uint8 subtype) {
uint32 sender_ssrc;
uint32 name;
- net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_data_), length);
big_endian_reader.Skip(4); // Skip header.
big_endian_reader.ReadU32(&sender_ssrc);
big_endian_reader.ReadU32(&name);
@@ -520,11 +536,6 @@ bool RtcpParser::ParseApplicationDefined(uint8 subtype) {
}
rtcp_data_ += 12;
switch (subtype) {
- case kSenderLogSubtype:
- state_ = kStateApplicationSpecificCastSenderLog;
- field_type_ = kRtcpApplicationSpecificCastSenderLogCode;
- field_.cast_sender_log.sender_ssrc = sender_ssrc;
- break;
case kReceiverLogSubtype:
state_ = kStateApplicationSpecificCastReceiverFrameLog;
field_type_ = kRtcpApplicationSpecificCastReceiverLogCode;
@@ -545,7 +556,8 @@ bool RtcpParser::ParseCastReceiverLogFrameItem() {
}
uint32 rtp_timestamp;
uint32 data;
- net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_data_), length);
big_endian_reader.ReadU32(&rtp_timestamp);
big_endian_reader.ReadU32(&data);
@@ -577,7 +589,8 @@ bool RtcpParser::ParseCastReceiverLogEventItem() {
uint16 delay_delta_or_packet_id;
uint16 event_type_and_timestamp_delta;
- net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_data_), length);
big_endian_reader.ReadU16(&delay_delta_or_packet_id);
big_endian_reader.ReadU16(&event_type_and_timestamp_delta);
@@ -585,7 +598,9 @@ bool RtcpParser::ParseCastReceiverLogEventItem() {
field_.cast_receiver_log.event =
static_cast<uint8>(event_type_and_timestamp_delta >> 12);
- field_.cast_receiver_log.delay_delta_or_packet_id = delay_delta_or_packet_id;
+ // delay_delta is in union'ed with packet_id.
+ field_.cast_receiver_log.delay_delta_or_packet_id.packet_id =
+ delay_delta_or_packet_id;
field_.cast_receiver_log.event_timestamp_delta =
event_type_and_timestamp_delta & 0xfff;
@@ -593,30 +608,10 @@ bool RtcpParser::ParseCastReceiverLogEventItem() {
return true;
}
-bool RtcpParser::ParseCastSenderLogItem() {
- ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
-
- if (length < 4) {
- state_ = kStateTopLevel;
- EndCurrentBlock();
- return false;
- }
- uint32 data;
- net::BigEndianReader big_endian_reader(rtcp_data_, length);
- big_endian_reader.ReadU32(&data);
-
- rtcp_data_ += 4;
-
- field_.cast_sender_log.status = static_cast<uint8>(data >> 24);
- // We have 24 LSB of the RTP timestamp on the wire.
- field_.cast_sender_log.rtp_timestamp = data & 0xffffff;
- field_type_ = kRtcpApplicationSpecificCastSenderLogCode;
- return true;
-}
-
bool RtcpParser::ParseFeedBackCommon(const RtcpCommonHeader& header) {
- DCHECK((header.PT == kPacketTypeGenericRtpFeedback) ||
- (header.PT == kPacketTypePayloadSpecific)) << "Invalid state";
+ DCHECK((header.PT == transport::kPacketTypeGenericRtpFeedback) ||
+ (header.PT == transport::kPacketTypePayloadSpecific))
+ << "Invalid state";
ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
@@ -627,21 +622,22 @@ bool RtcpParser::ParseFeedBackCommon(const RtcpCommonHeader& header) {
uint32 sender_ssrc;
uint32 media_ssrc;
- net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_data_), length);
big_endian_reader.Skip(4); // Skip header.
big_endian_reader.ReadU32(&sender_ssrc);
big_endian_reader.ReadU32(&media_ssrc);
rtcp_data_ += 12;
- if (header.PT == kPacketTypeGenericRtpFeedback) {
+ if (header.PT == transport::kPacketTypeGenericRtpFeedback) {
// Transport layer feedback
switch (header.IC) {
case 1:
// Nack
field_type_ = kRtcpGenericRtpFeedbackNackCode;
field_.nack.sender_ssrc = sender_ssrc;
- field_.nack.media_ssrc = media_ssrc;
+ field_.nack.media_ssrc = media_ssrc;
state_ = kStateGenericRtpFeedbackNack;
return true;
case 2:
@@ -667,14 +663,14 @@ bool RtcpParser::ParseFeedBackCommon(const RtcpCommonHeader& header) {
EndCurrentBlock();
return false;
- } else if (header.PT == kPacketTypePayloadSpecific) {
+ } else if (header.PT == transport::kPacketTypePayloadSpecific) {
// Payload specific feedback
switch (header.IC) {
case 1:
// PLI
field_type_ = kRtcpPayloadSpecificPliCode;
field_.pli.sender_ssrc = sender_ssrc;
- field_.pli.media_ssrc = media_ssrc;
+ field_.pli.media_ssrc = media_ssrc;
// Note: No state transition, PLI FCI is empty!
return true;
@@ -684,7 +680,7 @@ bool RtcpParser::ParseFeedBackCommon(const RtcpCommonHeader& header) {
case 3:
field_type_ = kRtcpPayloadSpecificRpsiCode;
field_.rpsi.sender_ssrc = sender_ssrc;
- field_.rpsi.media_ssrc = media_ssrc;
+ field_.rpsi.media_ssrc = media_ssrc;
state_ = kStatePayloadSpecificRpsi;
return true;
case 4:
@@ -693,7 +689,7 @@ bool RtcpParser::ParseFeedBackCommon(const RtcpCommonHeader& header) {
case 15:
field_type_ = kRtcpPayloadSpecificAppCode;
field_.application_specific.sender_ssrc = sender_ssrc;
- field_.application_specific.media_ssrc = media_ssrc;
+ field_.application_specific.media_ssrc = media_ssrc;
state_ = kStatePayloadSpecificApplication;
return true;
default:
@@ -736,7 +732,8 @@ bool RtcpParser::ParseRpsiItem() {
field_type_ = kRtcpPayloadSpecificRpsiCode;
uint8 padding_bits;
- net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_data_), length);
big_endian_reader.ReadU8(&padding_bits);
big_endian_reader.ReadU8(&field_.rpsi.payload_type);
big_endian_reader.ReadBytes(&field_.rpsi.native_bit_string, length - 2);
@@ -759,7 +756,8 @@ bool RtcpParser::ParseNackItem() {
field_type_ = kRtcpGenericRtpFeedbackNackItemCode;
- net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_data_), length);
big_endian_reader.ReadU16(&field_.nack_item.packet_id);
big_endian_reader.ReadU16(&field_.nack_item.bitmask);
rtcp_data_ += 4;
@@ -775,7 +773,8 @@ bool RtcpParser::ParsePayloadSpecificAppItem() {
return false;
}
uint32 name;
- net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_data_), length);
big_endian_reader.ReadU32(&name);
rtcp_data_ += 4;
@@ -802,7 +801,8 @@ bool RtcpParser::ParsePayloadSpecificRembItem() {
return false;
}
- net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_data_), length);
big_endian_reader.ReadU8(&field_.remb_item.number_of_ssrcs);
uint8 byte_1;
@@ -841,9 +841,11 @@ bool RtcpParser::ParsePayloadSpecificCastItem() {
}
field_type_ = kRtcpPayloadSpecificCastCode;
- net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_data_), length);
big_endian_reader.ReadU8(&field_.cast_item.last_frame_id);
big_endian_reader.ReadU8(&field_.cast_item.number_of_lost_fields);
+ big_endian_reader.ReadU16(&field_.cast_item.target_delay_ms);
rtcp_data_ += 4;
@@ -867,7 +869,8 @@ bool RtcpParser::ParsePayloadSpecificCastNackItem() {
}
field_type_ = kRtcpPayloadSpecificCastNackItemCode;
- net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_data_), length);
big_endian_reader.ReadU8(&field_.cast_nack_item.frame_id);
big_endian_reader.ReadU16(&field_.cast_nack_item.packet_id);
big_endian_reader.ReadU8(&field_.cast_nack_item.bitmask);
@@ -887,7 +890,8 @@ bool RtcpParser::ParseFirItem() {
}
field_type_ = kRtcpPayloadSpecificFirItemCode;
- net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_data_), length);
big_endian_reader.ReadU32(&field_.fir_item.ssrc);
big_endian_reader.ReadU8(&field_.fir_item.command_sequence_number);
@@ -897,11 +901,13 @@ bool RtcpParser::ParseFirItem() {
bool RtcpParser::ParseExtendedReport() {
ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
- if (length < 8) return false;
+ if (length < 8)
+ return false;
field_type_ = kRtcpXrCode;
- net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_data_), length);
big_endian_reader.Skip(4); // Skip header.
big_endian_reader.ReadU32(&field_.extended_report.sender_ssrc);
@@ -921,7 +927,8 @@ bool RtcpParser::ParseExtendedReportItem() {
uint8 block_type;
uint16 block_length;
- net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_data_), length);
big_endian_reader.ReadU8(&block_type);
big_endian_reader.Skip(1); // Ignore reserved.
big_endian_reader.ReadU16(&block_length);
@@ -970,7 +977,8 @@ bool RtcpParser::ParseExtendedReportReceiverReferenceTimeReport() {
return false;
}
- net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_data_), length);
big_endian_reader.ReadU32(&field_.rrtr.ntp_most_significant);
big_endian_reader.ReadU32(&field_.rrtr.ntp_least_significant);
@@ -993,7 +1001,8 @@ bool RtcpParser::ParseExtendedReportDelaySinceLastReceiverReport() {
return false;
}
- net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ base::BigEndianReader big_endian_reader(
+ reinterpret_cast<const char*>(rtcp_data_), length);
big_endian_reader.ReadU32(&field_.dlrr.receivers_ssrc);
big_endian_reader.ReadU32(&field_.dlrr.last_receiver_report);
big_endian_reader.ReadU32(&field_.dlrr.delay_last_receiver_report);
@@ -1005,5 +1014,53 @@ bool RtcpParser::ParseExtendedReportDelaySinceLastReceiverReport() {
return true;
}
+// Converts a log event type to an integer value.
+// NOTE: We have only allocated 4 bits to represent the type of event over the
+// wire. Therefore, this function can only return values from 0 to 15.
+uint8 ConvertEventTypeToWireFormat(CastLoggingEvent event) {
+ switch (event) {
+ case FRAME_ACK_SENT:
+ return 11;
+ case FRAME_PLAYOUT:
+ return 12;
+ case FRAME_DECODED:
+ return 13;
+ case PACKET_RECEIVED:
+ return 14;
+ default:
+ return 0; // Not an interesting event.
+ }
+}
+
+CastLoggingEvent TranslateToLogEventFromWireFormat(uint8 event) {
+ // TODO(imcheng): Remove the old mappings once they are no longer used.
+ switch (event) {
+ case 1: // AudioAckSent
+ case 5: // VideoAckSent
+ case 11: // Unified
+ return FRAME_ACK_SENT;
+ case 2: // AudioPlayoutDelay
+ case 7: // VideoRenderDelay
+ case 12: // Unified
+ return FRAME_PLAYOUT;
+ case 3: // AudioFrameDecoded
+ case 6: // VideoFrameDecoded
+ case 13: // Unified
+ return FRAME_DECODED;
+ case 4: // AudioPacketReceived
+ case 8: // VideoPacketReceived
+ case 14: // Unified
+ return PACKET_RECEIVED;
+ case 9: // DuplicateAudioPacketReceived
+ case 10: // DuplicateVideoPacketReceived
+ default:
+ // If the sender adds new log messages we will end up here until we add
+ // the new messages in the receiver.
+ VLOG(1) << "Unexpected log message received: " << static_cast<int>(event);
+ NOTREACHED();
+ return UNKNOWN;
+ }
+}
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/rtcp/rtcp_utility.h b/chromium/media/cast/rtcp/rtcp_utility.h
index 5cf55d91060..34f3f25a889 100644
--- a/chromium/media/cast/rtcp/rtcp_utility.h
+++ b/chromium/media/cast/rtcp/rtcp_utility.h
@@ -7,6 +7,7 @@
#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
+#include "media/cast/logging/logging_defines.h"
#include "media/cast/rtcp/rtcp_defines.h"
namespace media {
@@ -21,7 +22,6 @@ static const int kRtcpMaxNumberOfRembFeedbackSsrcs = 255;
static const uint32 kRemb = ('R' << 24) + ('E' << 16) + ('M' << 8) + 'B';
static const uint32 kCast = ('C' << 24) + ('A' << 16) + ('S' << 8) + 'T';
-static const uint8 kSenderLogSubtype = 1;
static const uint8 kReceiverLogSubtype = 2;
static const size_t kRtcpMaxReceiverLogMessages = 256;
@@ -37,7 +37,7 @@ struct RtcpFieldReceiverReport {
struct RtcpFieldSenderReport {
// RFC 3550.
uint32 sender_ssrc;
- uint8 number_of_report_blocks;
+ uint8 number_of_report_blocks;
uint32 ntp_most_significant;
uint32 ntp_least_significant;
uint32 rtp_timestamp;
@@ -48,7 +48,7 @@ struct RtcpFieldSenderReport {
struct RtcpFieldReportBlockItem {
// RFC 3550.
uint32 ssrc;
- uint8 fraction_lost;
+ uint8 fraction_lost;
uint32 cumulative_number_of_packets_lost;
uint32 extended_highest_sequence_number;
uint32 jitter;
@@ -101,9 +101,9 @@ struct RtcpFieldPayloadSpecificRpsi {
// RFC 4585.
uint32 sender_ssrc;
uint32 media_ssrc;
- uint8 payload_type;
+ uint8 payload_type;
uint16 number_of_valid_bits;
- uint8 native_bit_string[kRtcpRpsiDataSize];
+ uint8 native_bit_string[kRtcpRpsiDataSize];
};
struct RtcpFieldXr {
@@ -138,6 +138,7 @@ struct RtcpFieldPayloadSpecificRembItem {
struct RtcpFieldPayloadSpecificCastItem {
uint8 last_frame_id;
uint8 number_of_lost_fields;
+ uint16 target_delay_ms;
};
struct RtcpFieldPayloadSpecificCastNackItem {
@@ -151,41 +152,37 @@ struct RtcpFieldApplicationSpecificCastReceiverLogItem {
uint32 rtp_timestamp;
uint32 event_timestamp_base;
uint8 event;
- uint16 delay_delta_or_packet_id;
+ union {
+ uint16 packet_id;
+ int16 delay_delta;
+ } delay_delta_or_packet_id;
uint16 event_timestamp_delta;
};
-struct RtcpFieldApplicationSpecificCastSenderLogItem {
- uint32 sender_ssrc;
- uint8 status;
- uint32 rtp_timestamp;
-};
-
union RtcpField {
- RtcpFieldReceiverReport receiver_report;
- RtcpFieldSenderReport sender_report;
- RtcpFieldReportBlockItem report_block_item;
- RtcpFieldSdesCName c_name;
- RtcpFieldBye bye;
-
- RtcpFieldXr extended_report;
- RtcpFieldXrRrtr rrtr;
- RtcpFieldXrDlrr dlrr;
-
- RtcpFieldGenericRtpFeedbackNack nack;
- RtcpFieldGenericRtpFeedbackNackItem nack_item;
-
- RtcpFieldPayloadSpecificPli pli;
- RtcpFieldPayloadSpecificRpsi rpsi;
- RtcpFieldPayloadSpecificFir fir;
- RtcpFieldPayloadSpecificFirItem fir_item;
- RtcpFieldPayloadSpecificApplication application_specific;
- RtcpFieldPayloadSpecificRembItem remb_item;
- RtcpFieldPayloadSpecificCastItem cast_item;
- RtcpFieldPayloadSpecificCastNackItem cast_nack_item;
+ RtcpFieldReceiverReport receiver_report;
+ RtcpFieldSenderReport sender_report;
+ RtcpFieldReportBlockItem report_block_item;
+ RtcpFieldSdesCName c_name;
+ RtcpFieldBye bye;
+
+ RtcpFieldXr extended_report;
+ RtcpFieldXrRrtr rrtr;
+ RtcpFieldXrDlrr dlrr;
+
+ RtcpFieldGenericRtpFeedbackNack nack;
+ RtcpFieldGenericRtpFeedbackNackItem nack_item;
+
+ RtcpFieldPayloadSpecificPli pli;
+ RtcpFieldPayloadSpecificRpsi rpsi;
+ RtcpFieldPayloadSpecificFir fir;
+ RtcpFieldPayloadSpecificFirItem fir_item;
+ RtcpFieldPayloadSpecificApplication application_specific;
+ RtcpFieldPayloadSpecificRembItem remb_item;
+ RtcpFieldPayloadSpecificCastItem cast_item;
+ RtcpFieldPayloadSpecificCastNackItem cast_nack_item;
RtcpFieldApplicationSpecificCastReceiverLogItem cast_receiver_log;
- RtcpFieldApplicationSpecificCastSenderLogItem cast_sender_log;
};
enum RtcpFieldTypes {
@@ -195,7 +192,6 @@ enum RtcpFieldTypes {
kRtcpRrCode,
kRtcpSrCode,
kRtcpReportBlockItemCode,
-
kRtcpSdesCode,
kRtcpSdesChunkCode,
kRtcpByeCode,
@@ -209,7 +205,6 @@ enum RtcpFieldTypes {
// RFC 4585.
kRtcpGenericRtpFeedbackNackCode,
kRtcpGenericRtpFeedbackNackItemCode,
-
kRtcpPayloadSpecificPliCode,
kRtcpPayloadSpecificRpsiCode,
kRtcpPayloadSpecificAppCode,
@@ -222,7 +217,6 @@ enum RtcpFieldTypes {
kRtcpApplicationSpecificCastReceiverLogCode,
kRtcpApplicationSpecificCastReceiverLogFrameCode,
kRtcpApplicationSpecificCastReceiverLogEventCode,
- kRtcpApplicationSpecificCastSenderLogCode,
// RFC 5104.
kRtcpPayloadSpecificFirCode,
@@ -233,27 +227,13 @@ enum RtcpFieldTypes {
};
struct RtcpCommonHeader {
- uint8 V; // Version.
- bool P; // Padding.
- uint8 IC; // Item count / subtype.
- uint8 PT; // Packet Type.
+ uint8 V; // Version.
+ bool P; // Padding.
+ uint8 IC; // Item count / subtype.
+ uint8 PT; // Packet Type.
uint16 length_in_octets;
};
-enum RtcpPacketTypes {
- kPacketTypeLow = 194, // SMPTE time-code mapping.
- kPacketTypeInterArrivalJitterReport = 195,
- kPacketTypeSenderReport = 200,
- kPacketTypeReceiverReport = 201,
- kPacketTypeSdes = 202,
- kPacketTypeBye = 203,
- kPacketTypeApplicationDefined = 204,
- kPacketTypeGenericRtpFeedback = 205,
- kPacketTypePayloadSpecific = 206,
- kPacketTypeXr = 207,
- kPacketTypeHigh = 210, // Port Mapping.
-};
-
class RtcpParser {
public:
RtcpParser(const uint8* rtcp_data, size_t rtcp_length);
@@ -275,7 +255,6 @@ class RtcpParser {
kStateBye,
kStateApplicationSpecificCastReceiverFrameLog,
kStateApplicationSpecificCastReceiverEventLog,
- kStateApplicationSpecificCastSenderLog,
kStateExtendedReportBlock,
kStateExtendedReportDelaySinceLastReceiverReport,
kStateGenericRtpFeedbackNack,
@@ -297,7 +276,6 @@ class RtcpParser {
void IterateByeItem();
void IterateCastReceiverLogFrame();
void IterateCastReceiverLogEvent();
- void IterateCastSenderLog();
void IterateExtendedReportItem();
void IterateExtendedReportDelaySinceLastReceiverReportItem();
void IterateNackItem();
@@ -323,7 +301,6 @@ class RtcpParser {
bool ParseApplicationDefined(uint8 subtype);
bool ParseCastReceiverLogFrameItem();
bool ParseCastReceiverLogEventItem();
- bool ParseCastSenderLogItem();
bool ParseExtendedReport();
bool ParseExtendedReportItem();
@@ -355,6 +332,14 @@ class RtcpParser {
DISALLOW_COPY_AND_ASSIGN(RtcpParser);
};
+// Converts a log event type to an integer value.
+// NOTE: We have only allocated 4 bits to represent the type of event over the
+// wire. Therefore, this function can only return values from 0 to 15.
+uint8 ConvertEventTypeToWireFormat(CastLoggingEvent event);
+
+// The inverse of |ConvertEventTypeToWireFormat()|.
+CastLoggingEvent TranslateToLogEventFromWireFormat(uint8 event);
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/rtcp/test_rtcp_packet_builder.cc b/chromium/media/cast/rtcp/test_rtcp_packet_builder.cc
index f4117f53dec..8d0809d928e 100644
--- a/chromium/media/cast/rtcp/test_rtcp_packet_builder.cc
+++ b/chromium/media/cast/rtcp/test_rtcp_packet_builder.cc
@@ -5,14 +5,14 @@
#include "media/cast/rtcp/test_rtcp_packet_builder.h"
#include "base/logging.h"
+#include "media/cast/rtcp/rtcp_utility.h"
namespace media {
namespace cast {
TestRtcpPacketBuilder::TestRtcpPacketBuilder()
: ptr_of_length_(NULL),
- big_endian_writer_(buffer_, kIpPacketSize) {
-}
+ big_endian_writer_(reinterpret_cast<char*>(buffer_), kMaxIpPacketSize) {}
void TestRtcpPacketBuilder::AddSr(uint32 sender_ssrc,
int number_of_report_blocks) {
@@ -91,8 +91,8 @@ void TestRtcpPacketBuilder::AddXrHeader(uint32 sender_ssrc) {
}
void TestRtcpPacketBuilder::AddXrUnknownBlock() {
- big_endian_writer_.WriteU8(9); // Block type.
- big_endian_writer_.WriteU8(0); // Reserved.
+ big_endian_writer_.WriteU8(9); // Block type.
+ big_endian_writer_.WriteU8(0); // Reserved.
big_endian_writer_.WriteU16(4); // Block length.
// First receiver same as sender of this report.
big_endian_writer_.WriteU32(0);
@@ -102,8 +102,8 @@ void TestRtcpPacketBuilder::AddXrUnknownBlock() {
}
void TestRtcpPacketBuilder::AddXrDlrrBlock(uint32 sender_ssrc) {
- big_endian_writer_.WriteU8(5); // Block type.
- big_endian_writer_.WriteU8(0); // Reserved.
+ big_endian_writer_.WriteU8(5); // Block type.
+ big_endian_writer_.WriteU8(0); // Reserved.
big_endian_writer_.WriteU16(3); // Block length.
// First receiver same as sender of this report.
@@ -113,8 +113,8 @@ void TestRtcpPacketBuilder::AddXrDlrrBlock(uint32 sender_ssrc) {
}
void TestRtcpPacketBuilder::AddXrExtendedDlrrBlock(uint32 sender_ssrc) {
- big_endian_writer_.WriteU8(5); // Block type.
- big_endian_writer_.WriteU8(0); // Reserved.
+ big_endian_writer_.WriteU8(5); // Block type.
+ big_endian_writer_.WriteU8(0); // Reserved.
big_endian_writer_.WriteU16(9); // Block length.
big_endian_writer_.WriteU32(0xaaaaaaaa);
big_endian_writer_.WriteU32(0xaaaaaaaa);
@@ -130,8 +130,8 @@ void TestRtcpPacketBuilder::AddXrExtendedDlrrBlock(uint32 sender_ssrc) {
}
void TestRtcpPacketBuilder::AddXrRrtrBlock() {
- big_endian_writer_.WriteU8(4); // Block type.
- big_endian_writer_.WriteU8(0); // Reserved.
+ big_endian_writer_.WriteU8(4); // Block type.
+ big_endian_writer_.WriteU8(0); // Reserved.
big_endian_writer_.WriteU16(2); // Block length.
big_endian_writer_.WriteU32(kNtpHigh);
big_endian_writer_.WriteU32(kNtpLow);
@@ -167,8 +167,8 @@ void TestRtcpPacketBuilder::AddRpsi(uint32 sender_ssrc, uint32 media_ssrc) {
uint64 picture_id = kPictureId;
for (int i = 9; i > 0; i--) {
- big_endian_writer_.WriteU8(
- 0x80 | static_cast<uint8>(picture_id >> (i * 7)));
+ big_endian_writer_.WriteU8(0x80 |
+ static_cast<uint8>(picture_id >> (i * 7)));
}
// Add last byte of picture ID.
big_endian_writer_.WriteU8(static_cast<uint8>(picture_id & 0x7f));
@@ -189,7 +189,9 @@ void TestRtcpPacketBuilder::AddRemb(uint32 sender_ssrc, uint32 media_ssrc) {
big_endian_writer_.WriteU32(media_ssrc);
}
-void TestRtcpPacketBuilder::AddCast(uint32 sender_ssrc, uint32 media_ssrc) {
+void TestRtcpPacketBuilder::AddCast(uint32 sender_ssrc,
+ uint32 media_ssrc,
+ uint16 target_delay_ms) {
AddRtcpHeader(206, 15);
big_endian_writer_.WriteU32(sender_ssrc);
big_endian_writer_.WriteU32(media_ssrc);
@@ -198,8 +200,8 @@ void TestRtcpPacketBuilder::AddCast(uint32 sender_ssrc, uint32 media_ssrc) {
big_endian_writer_.WriteU8('S');
big_endian_writer_.WriteU8('T');
big_endian_writer_.WriteU8(kAckFrameId);
- big_endian_writer_.WriteU8(3); // Loss fields.
- big_endian_writer_.WriteU16(0); // Reserved.
+ big_endian_writer_.WriteU8(3); // Loss fields.
+ big_endian_writer_.WriteU16(target_delay_ms);
big_endian_writer_.WriteU8(kLostFrameId);
big_endian_writer_.WriteU16(kRtcpCastAllPacketsLost);
big_endian_writer_.WriteU8(0); // Lost packet id mask.
@@ -211,21 +213,6 @@ void TestRtcpPacketBuilder::AddCast(uint32 sender_ssrc, uint32 media_ssrc) {
big_endian_writer_.WriteU8(0); // Lost packet id mask.
}
-void TestRtcpPacketBuilder::AddSenderLog(uint32 sender_ssrc) {
- AddRtcpHeader(204, 1);
- big_endian_writer_.WriteU32(sender_ssrc);
- big_endian_writer_.WriteU8('C');
- big_endian_writer_.WriteU8('A');
- big_endian_writer_.WriteU8('S');
- big_endian_writer_.WriteU8('T');
-}
-
-void TestRtcpPacketBuilder::AddSenderFrameLog(uint8 event_id,
- uint32 rtp_timestamp) {
- big_endian_writer_.WriteU32(
- (static_cast<uint32>(event_id) << 24) + (rtp_timestamp & 0xffffff));
-}
-
void TestRtcpPacketBuilder::AddReceiverLog(uint32 sender_ssrc) {
AddRtcpHeader(204, 2);
big_endian_writer_.WriteU32(sender_ssrc);
@@ -236,7 +223,8 @@ void TestRtcpPacketBuilder::AddReceiverLog(uint32 sender_ssrc) {
}
void TestRtcpPacketBuilder::AddReceiverFrameLog(uint32 rtp_timestamp,
- int num_events, uint32 event_timesamp_base) {
+ int num_events,
+ uint32 event_timesamp_base) {
big_endian_writer_.WriteU32(rtp_timestamp);
big_endian_writer_.WriteU8(static_cast<uint8>(num_events - 1));
big_endian_writer_.WriteU8(static_cast<uint8>(event_timesamp_base >> 16));
@@ -245,14 +233,22 @@ void TestRtcpPacketBuilder::AddReceiverFrameLog(uint32 rtp_timestamp,
}
void TestRtcpPacketBuilder::AddReceiverEventLog(uint16 event_data,
- uint8 event_id, uint16 event_timesamp_delta) {
+ CastLoggingEvent event,
+ uint16 event_timesamp_delta) {
big_endian_writer_.WriteU16(event_data);
+ uint8 event_id = ConvertEventTypeToWireFormat(event);
uint16 type_and_delta = static_cast<uint16>(event_id) << 12;
type_and_delta += event_timesamp_delta & 0x0fff;
big_endian_writer_.WriteU16(type_and_delta);
}
-const uint8* TestRtcpPacketBuilder::Packet() {
+scoped_ptr<media::cast::Packet> TestRtcpPacketBuilder::GetPacket() {
+ PatchLengthField();
+ return scoped_ptr<media::cast::Packet>(
+ new media::cast::Packet(buffer_, buffer_ + Length()));
+}
+
+const uint8* TestRtcpPacketBuilder::Data() {
PatchLengthField();
return buffer_;
}
diff --git a/chromium/media/cast/rtcp/test_rtcp_packet_builder.h b/chromium/media/cast/rtcp/test_rtcp_packet_builder.h
index 9b63a37fa4a..d4266670aba 100644
--- a/chromium/media/cast/rtcp/test_rtcp_packet_builder.h
+++ b/chromium/media/cast/rtcp/test_rtcp_packet_builder.h
@@ -7,8 +7,9 @@
#ifndef MEDIA_CAST_RTCP_TEST_RTCP_PACKET_BUILDER_H_
#define MEDIA_CAST_RTCP_TEST_RTCP_PACKET_BUILDER_H_
+#include "base/big_endian.h"
+#include "media/cast/cast_config.h"
#include "media/cast/rtcp/rtcp_defines.h"
-#include "net/base/big_endian.h"
namespace media {
namespace cast {
@@ -58,7 +59,9 @@ class TestRtcpPacketBuilder {
TestRtcpPacketBuilder();
void AddSr(uint32 sender_ssrc, int number_of_report_blocks);
- void AddSrWithNtp(uint32 sender_ssrc, uint32 ntp_high, uint32 ntp_low,
+ void AddSrWithNtp(uint32 sender_ssrc,
+ uint32 ntp_high,
+ uint32 ntp_low,
uint32 rtp_timestamp);
void AddRr(uint32 sender_ssrc, int number_of_report_blocks);
void AddRb(uint32 rtp_ssrc);
@@ -76,17 +79,18 @@ class TestRtcpPacketBuilder {
void AddPli(uint32 sender_ssrc, uint32 media_ssrc);
void AddRpsi(uint32 sender_ssrc, uint32 media_ssrc);
void AddRemb(uint32 sender_ssrc, uint32 media_ssrc);
- void AddCast(uint32 sender_ssrc, uint32 media_ssrc);
- void AddSenderLog(uint32 sender_ssrc);
- void AddSenderFrameLog(uint8 event_id, uint32 rtp_timestamp);
+ void AddCast(uint32 sender_ssrc, uint32 media_ssrc, uint16 target_delay_ms);
void AddReceiverLog(uint32 sender_ssrc);
- void AddReceiverFrameLog(uint32 rtp_timestamp, int num_events,
+ void AddReceiverFrameLog(uint32 rtp_timestamp,
+ int num_events,
uint32 event_timesamp_base);
- void AddReceiverEventLog(uint16 event_data, uint8 event_id,
+ void AddReceiverEventLog(uint16 event_data,
+ CastLoggingEvent event,
uint16 event_timesamp_delta);
- const uint8* Packet();
- int Length() { return kIpPacketSize - big_endian_writer_.remaining(); }
+ scoped_ptr<Packet> GetPacket();
+ const uint8* Data();
+ int Length() { return kMaxIpPacketSize - big_endian_writer_.remaining(); }
private:
void AddRtcpHeader(int payload, int format_or_count);
@@ -94,12 +98,14 @@ class TestRtcpPacketBuilder {
// Where the length field of the current packet is.
// Note: 0 is not a legal value, it is used for "uninitialized".
- uint8 buffer_[kIpPacketSize];
+ uint8 buffer_[kMaxIpPacketSize];
char* ptr_of_length_;
- net::BigEndianWriter big_endian_writer_;
+ base::BigEndianWriter big_endian_writer_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestRtcpPacketBuilder);
};
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_RTCP_TEST_RTCP_PACKET_BUILDER_H_
+#endif // MEDIA_CAST_RTCP_TEST_RTCP_PACKET_BUILDER_H_
diff --git a/chromium/media/cast/rtp_receiver/mock_rtp_payload_feedback.cc b/chromium/media/cast/rtp_receiver/mock_rtp_payload_feedback.cc
index 8681d087aa3..02b6c0be454 100644
--- a/chromium/media/cast/rtp_receiver/mock_rtp_payload_feedback.cc
+++ b/chromium/media/cast/rtp_receiver/mock_rtp_payload_feedback.cc
@@ -7,11 +7,9 @@
namespace media {
namespace cast {
-MockRtpPayloadFeedback::MockRtpPayloadFeedback() {
-}
+MockRtpPayloadFeedback::MockRtpPayloadFeedback() {}
-MockRtpPayloadFeedback::~MockRtpPayloadFeedback() {
-}
+MockRtpPayloadFeedback::~MockRtpPayloadFeedback() {}
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/rtp_receiver/mock_rtp_payload_feedback.h b/chromium/media/cast/rtp_receiver/mock_rtp_payload_feedback.h
index 003b67bc0da..14e48673bd3 100644
--- a/chromium/media/cast/rtp_receiver/mock_rtp_payload_feedback.h
+++ b/chromium/media/cast/rtp_receiver/mock_rtp_payload_feedback.h
@@ -16,8 +16,7 @@ class MockRtpPayloadFeedback : public RtpPayloadFeedback {
MockRtpPayloadFeedback();
virtual ~MockRtpPayloadFeedback();
- MOCK_METHOD1(CastFeedback,
- void(const RtcpCastMessage& cast_feedback));
+ MOCK_METHOD1(CastFeedback, void(const RtcpCastMessage& cast_feedback));
};
} // namespace cast
diff --git a/chromium/media/cast/rtp_receiver/receiver_stats.cc b/chromium/media/cast/rtp_receiver/receiver_stats.cc
index 9d34583a769..7eff86763f8 100644
--- a/chromium/media/cast/rtp_receiver/receiver_stats.cc
+++ b/chromium/media/cast/rtp_receiver/receiver_stats.cc
@@ -37,15 +37,16 @@ void ReceiverStats::GetStatistics(uint8* fraction_lost,
diff = max_sequence_number_ - interval_min_sequence_number_ + 1;
} else {
diff = kMaxSequenceNumber * (interval_wrap_count_ - 1) +
- (max_sequence_number_ - interval_min_sequence_number_ +
- kMaxSequenceNumber + 1);
+ (max_sequence_number_ - interval_min_sequence_number_ +
+ kMaxSequenceNumber + 1);
}
if (diff < 1) {
*fraction_lost = 0;
} else {
- *fraction_lost = static_cast<uint8>((256 * (1 -
- static_cast<float>(interval_number_packets_) / abs(diff))));
+ float tmp_ratio =
+ (1 - static_cast<float>(interval_number_packets_) / abs(diff));
+ *fraction_lost = static_cast<uint8>(256 * tmp_ratio);
}
}
@@ -55,16 +56,17 @@ void ReceiverStats::GetStatistics(uint8* fraction_lost,
} else if (sequence_number_cycles_ == 0) {
*cumulative_lost = expected_packets_num - total_number_packets_;
} else {
- *cumulative_lost = kMaxSequenceNumber * (sequence_number_cycles_ - 1) +
+ *cumulative_lost =
+ kMaxSequenceNumber * (sequence_number_cycles_ - 1) +
(expected_packets_num - total_number_packets_ + kMaxSequenceNumber);
}
// Extended high sequence number consists of the highest seq number and the
// number of cycles (wrap).
- *extended_high_sequence_number = (sequence_number_cycles_ << 16) +
- max_sequence_number_;
+ *extended_high_sequence_number =
+ (sequence_number_cycles_ << 16) + max_sequence_number_;
- *jitter = static_cast<uint32>(abs(jitter_.InMillisecondsRoundedUp()));
+ *jitter = static_cast<uint32>(std::abs(jitter_.InMillisecondsRoundedUp()));
// Reset interval values.
interval_min_sequence_number_ = 0;
@@ -73,7 +75,7 @@ void ReceiverStats::GetStatistics(uint8* fraction_lost,
}
void ReceiverStats::UpdateStatistics(const RtpCastHeader& header) {
- uint16 new_seq_num = header.webrtc.header.sequenceNumber;
+ const uint16 new_seq_num = header.sequence_number;
if (interval_number_packets_ == 0) {
// First packet in the interval.
@@ -97,10 +99,11 @@ void ReceiverStats::UpdateStatistics(const RtpCastHeader& header) {
// Compute Jitter.
base::TimeTicks now = clock_->NowTicks();
base::TimeDelta delta_new_timestamp =
- base::TimeDelta::FromMilliseconds(header.webrtc.header.timestamp);
+ base::TimeDelta::FromMilliseconds(header.rtp_timestamp);
if (total_number_packets_ > 0) {
// Update jitter.
- base::TimeDelta delta = (now - last_received_packet_time_) -
+ base::TimeDelta delta =
+ (now - last_received_packet_time_) -
((delta_new_timestamp - last_received_timestamp_) / 90);
jitter_ += (delta - jitter_) / 16;
}
diff --git a/chromium/media/cast/rtp_receiver/receiver_stats.h b/chromium/media/cast/rtp_receiver/receiver_stats.h
index c91ee507e0c..05a067f7870 100644
--- a/chromium/media/cast/rtp_receiver/receiver_stats.h
+++ b/chromium/media/cast/rtp_receiver/receiver_stats.h
@@ -7,20 +7,21 @@
#include "base/time/tick_clock.h"
#include "base/time/time.h"
+#include "media/cast/rtcp/rtcp.h"
#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
namespace media {
namespace cast {
-class ReceiverStats {
+class ReceiverStats : public RtpReceiverStatistics {
public:
explicit ReceiverStats(base::TickClock* clock);
- ~ReceiverStats();
+ virtual ~ReceiverStats() OVERRIDE;
- void GetStatistics(uint8* fraction_lost,
- uint32* cumulative_lost, // 24 bits valid.
- uint32* extended_high_sequence_number,
- uint32* jitter);
+ virtual void GetStatistics(uint8* fraction_lost,
+ uint32* cumulative_lost, // 24 bits valid.
+ uint32* extended_high_sequence_number,
+ uint32* jitter) OVERRIDE;
void UpdateStatistics(const RtpCastHeader& header);
private:
@@ -39,6 +40,8 @@ class ReceiverStats {
int interval_min_sequence_number_;
int interval_number_packets_;
int interval_wrap_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReceiverStats);
};
} // namespace cast
diff --git a/chromium/media/cast/rtp_receiver/receiver_stats_unittest.cc b/chromium/media/cast/rtp_receiver/receiver_stats_unittest.cc
index 2788cb592de..98059cdde71 100644
--- a/chromium/media/cast/rtp_receiver/receiver_stats_unittest.cc
+++ b/chromium/media/cast/rtp_receiver/receiver_stats_unittest.cc
@@ -4,6 +4,8 @@
#include <gtest/gtest.h>
+#include <stdint.h>
+
#include "base/test/simple_test_tick_clock.h"
#include "base/time/time.h"
#include "media/cast/rtp_receiver/receiver_stats.h"
@@ -12,30 +14,24 @@
namespace media {
namespace cast {
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+static const int64 kStartMillisecond = INT64_C(12345678900000);
static const uint32 kStdTimeIncrementMs = 33;
class ReceiverStatsTest : public ::testing::Test {
protected:
ReceiverStatsTest()
: stats_(&testing_clock_),
- rtp_header_(),
fraction_lost_(0),
cumulative_lost_(0),
extended_high_sequence_number_(0),
jitter_(0) {
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
- start_time_ = testing_clock_.NowTicks();
+ start_time_ = testing_clock_.NowTicks();
delta_increments_ = base::TimeDelta::FromMilliseconds(kStdTimeIncrementMs);
}
virtual ~ReceiverStatsTest() {}
- virtual void SetUp() {
- rtp_header_.webrtc.header.sequenceNumber = 0;
- rtp_header_.webrtc.header.timestamp = 0;
- }
-
uint32 ExpectedJitter(uint32 const_interval, int num_packets) {
float jitter = 0;
// Assume timestamps have a constant kStdTimeIncrementMs interval.
@@ -56,11 +52,15 @@ class ReceiverStatsTest : public ::testing::Test {
base::SimpleTestTickClock testing_clock_;
base::TimeTicks start_time_;
base::TimeDelta delta_increments_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReceiverStatsTest);
};
TEST_F(ReceiverStatsTest, ResetState) {
- stats_.GetStatistics(&fraction_lost_, &cumulative_lost_,
- &extended_high_sequence_number_, &jitter_);
+ stats_.GetStatistics(&fraction_lost_,
+ &cumulative_lost_,
+ &extended_high_sequence_number_,
+ &jitter_);
EXPECT_EQ(0u, fraction_lost_);
EXPECT_EQ(0u, cumulative_lost_);
EXPECT_EQ(0u, extended_high_sequence_number_);
@@ -72,75 +72,81 @@ TEST_F(ReceiverStatsTest, LossCount) {
if (i % 4)
stats_.UpdateStatistics(rtp_header_);
if (i % 3) {
- rtp_header_.webrtc.header.timestamp += 33 * 90;
+ rtp_header_.rtp_timestamp += 33 * 90;
}
- ++rtp_header_.webrtc.header.sequenceNumber;
+ ++rtp_header_.sequence_number;
testing_clock_.Advance(delta_increments_);
}
- stats_.GetStatistics(&fraction_lost_, &cumulative_lost_,
- &extended_high_sequence_number_, &jitter_);
+ stats_.GetStatistics(&fraction_lost_,
+ &cumulative_lost_,
+ &extended_high_sequence_number_,
+ &jitter_);
EXPECT_EQ(63u, fraction_lost_);
EXPECT_EQ(74u, cumulative_lost_);
// Build extended sequence number.
- uint32 extended_seq_num = rtp_header_.webrtc.header.sequenceNumber - 1;
+ const uint32 extended_seq_num = rtp_header_.sequence_number - 1;
EXPECT_EQ(extended_seq_num, extended_high_sequence_number_);
}
TEST_F(ReceiverStatsTest, NoLossWrap) {
- rtp_header_.webrtc.header.sequenceNumber = 65500;
+ rtp_header_.sequence_number = 65500;
for (int i = 0; i < 300; ++i) {
- stats_.UpdateStatistics(rtp_header_);
+ stats_.UpdateStatistics(rtp_header_);
if (i % 3) {
- rtp_header_.webrtc.header.timestamp += 33 * 90;
+ rtp_header_.rtp_timestamp += 33 * 90;
}
- ++rtp_header_.webrtc.header.sequenceNumber;
+ ++rtp_header_.sequence_number;
testing_clock_.Advance(delta_increments_);
}
- stats_.GetStatistics(&fraction_lost_, &cumulative_lost_,
- &extended_high_sequence_number_, &jitter_);
+ stats_.GetStatistics(&fraction_lost_,
+ &cumulative_lost_,
+ &extended_high_sequence_number_,
+ &jitter_);
EXPECT_EQ(0u, fraction_lost_);
EXPECT_EQ(0u, cumulative_lost_);
// Build extended sequence number (one wrap cycle).
- uint32 extended_seq_num = (1 << 16) +
- rtp_header_.webrtc.header.sequenceNumber - 1;
+ const uint32 extended_seq_num = (1 << 16) + rtp_header_.sequence_number - 1;
EXPECT_EQ(extended_seq_num, extended_high_sequence_number_);
}
TEST_F(ReceiverStatsTest, LossCountWrap) {
- const uint32 start_sequence_number = 65500;
- rtp_header_.webrtc.header.sequenceNumber = start_sequence_number;
+ const uint32 kStartSequenceNumber = 65500;
+ rtp_header_.sequence_number = kStartSequenceNumber;
for (int i = 0; i < 300; ++i) {
if (i % 4)
stats_.UpdateStatistics(rtp_header_);
if (i % 3)
// Update timestamp.
- ++rtp_header_.webrtc.header.timestamp;
- ++rtp_header_.webrtc.header.sequenceNumber;
+ ++rtp_header_.rtp_timestamp;
+ ++rtp_header_.sequence_number;
testing_clock_.Advance(delta_increments_);
}
- stats_.GetStatistics(&fraction_lost_, &cumulative_lost_,
- &extended_high_sequence_number_, &jitter_);
+ stats_.GetStatistics(&fraction_lost_,
+ &cumulative_lost_,
+ &extended_high_sequence_number_,
+ &jitter_);
EXPECT_EQ(63u, fraction_lost_);
EXPECT_EQ(74u, cumulative_lost_);
// Build extended sequence number (one wrap cycle).
- uint32 extended_seq_num = (1 << 16) +
- rtp_header_.webrtc.header.sequenceNumber - 1;
+ const uint32 extended_seq_num = (1 << 16) + rtp_header_.sequence_number - 1;
EXPECT_EQ(extended_seq_num, extended_high_sequence_number_);
}
TEST_F(ReceiverStatsTest, BasicJitter) {
for (int i = 0; i < 300; ++i) {
stats_.UpdateStatistics(rtp_header_);
- ++rtp_header_.webrtc.header.sequenceNumber;
- rtp_header_.webrtc.header.timestamp += 33 * 90;
+ ++rtp_header_.sequence_number;
+ rtp_header_.rtp_timestamp += 33 * 90;
testing_clock_.Advance(delta_increments_);
}
- stats_.GetStatistics(&fraction_lost_, &cumulative_lost_,
- &extended_high_sequence_number_, &jitter_);
+ stats_.GetStatistics(&fraction_lost_,
+ &cumulative_lost_,
+ &extended_high_sequence_number_,
+ &jitter_);
EXPECT_FALSE(fraction_lost_);
EXPECT_FALSE(cumulative_lost_);
// Build extended sequence number (one wrap cycle).
- uint32 extended_seq_num = rtp_header_.webrtc.header.sequenceNumber - 1;
+ const uint32 extended_seq_num = rtp_header_.sequence_number - 1;
EXPECT_EQ(extended_seq_num, extended_high_sequence_number_);
EXPECT_EQ(ExpectedJitter(kStdTimeIncrementMs, 300), jitter_);
}
@@ -149,21 +155,23 @@ TEST_F(ReceiverStatsTest, NonTrivialJitter) {
const int kAdditionalIncrement = 5;
for (int i = 0; i < 300; ++i) {
stats_.UpdateStatistics(rtp_header_);
- ++rtp_header_.webrtc.header.sequenceNumber;
- rtp_header_.webrtc.header.timestamp += 33 * 90;
+ ++rtp_header_.sequence_number;
+ rtp_header_.rtp_timestamp += 33 * 90;
base::TimeDelta additional_delta =
base::TimeDelta::FromMilliseconds(kAdditionalIncrement);
testing_clock_.Advance(delta_increments_ + additional_delta);
}
- stats_.GetStatistics(&fraction_lost_, &cumulative_lost_,
- &extended_high_sequence_number_, &jitter_);
+ stats_.GetStatistics(&fraction_lost_,
+ &cumulative_lost_,
+ &extended_high_sequence_number_,
+ &jitter_);
EXPECT_FALSE(fraction_lost_);
EXPECT_FALSE(cumulative_lost_);
// Build extended sequence number (one wrap cycle).
- uint32 extended_seq_num = rtp_header_.webrtc.header.sequenceNumber - 1;
+ const uint32 extended_seq_num = rtp_header_.sequence_number - 1;
EXPECT_EQ(extended_seq_num, extended_high_sequence_number_);
- EXPECT_EQ(
- ExpectedJitter(kStdTimeIncrementMs + kAdditionalIncrement, 300), jitter_);
+ EXPECT_EQ(ExpectedJitter(kStdTimeIncrementMs + kAdditionalIncrement, 300),
+ jitter_);
}
} // namespace cast
diff --git a/chromium/media/cast/rtp_receiver/rtp_parser/include/mock/mock_rtp_feedback.h b/chromium/media/cast/rtp_receiver/rtp_parser/include/mock/mock_rtp_feedback.h
index b6647a835be..f5edf7c43fe 100644
--- a/chromium/media/cast/rtp_receiver/rtp_parser/include/mock/mock_rtp_feedback.h
+++ b/chromium/media/cast/rtp_receiver/rtp_parser/include/mock/mock_rtp_feedback.h
@@ -14,19 +14,17 @@ namespace cast {
class MockRtpFeedback : public RtpFeedback {
public:
MOCK_METHOD4(OnInitializeDecoder,
- int32(const int8 payloadType,
- const int frequency,
- const uint8 channels,
- const uint32 rate));
+ int32(const int8 payloadType,
+ const int frequency,
+ const uint8 channels,
+ const uint32 rate));
- MOCK_METHOD1(OnPacketTimeout,
- void(const int32 id));
+ MOCK_METHOD1(OnPacketTimeout, void(const int32 id));
MOCK_METHOD2(OnReceivedPacket,
- void(const int32 id, const RtpRtcpPacketType packet_type));
+ void(const int32 id, const RtpRtcpPacketField packet_type));
MOCK_METHOD2(OnPeriodicDeadOrAlive,
void(const int32 id, const RTPAliveType alive));
- MOCK_METHOD2(OnIncomingSSRCChanged,
- void(const int32 id, const uint32 ssrc));
+ MOCK_METHOD2(OnIncomingSSRCChanged, void(const int32 id, const uint32 ssrc));
MOCK_METHOD3(OnIncomingCSRCChanged,
void(const int32 id, const uint32 csrc, const bool added));
};
diff --git a/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc
index 6ef20fe64e3..f44e82dac2b 100644
--- a/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc
+++ b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc
@@ -4,104 +4,113 @@
#include "media/cast/rtp_receiver/rtp_parser/rtp_parser.h"
+#include "base/big_endian.h"
#include "base/logging.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/rtp_receiver/rtp_receiver.h"
-#include "net/base/big_endian.h"
namespace media {
namespace cast {
-static const size_t kRtpCommonHeaderLength = 12;
-static const size_t kRtpCastHeaderLength = 7;
+static const size_t kRtpHeaderLength = 12;
+static const size_t kCastHeaderLength = 7;
+static const uint8 kRtpExtensionBitMask = 0x10;
+static const uint8 kRtpMarkerBitMask = 0x80;
static const uint8 kCastKeyFrameBitMask = 0x80;
static const uint8 kCastReferenceFrameIdBitMask = 0x40;
-RtpParser::RtpParser(RtpData* incoming_payload_callback,
- const RtpParserConfig parser_config)
- : data_callback_(incoming_payload_callback),
- parser_config_(parser_config) {}
+RtpParser::RtpParser(uint32 expected_sender_ssrc, uint8 expected_payload_type)
+ : expected_sender_ssrc_(expected_sender_ssrc),
+ expected_payload_type_(expected_payload_type) {}
RtpParser::~RtpParser() {}
-bool RtpParser::ParsePacket(const uint8* packet, size_t length,
- RtpCastHeader* rtp_header) {
- if (length == 0) return false;
- // Get RTP general header.
- if (!ParseCommon(packet, length, rtp_header)) return false;
- if (rtp_header->webrtc.header.payloadType == parser_config_.payload_type &&
- rtp_header->webrtc.header.ssrc == parser_config_.ssrc) {
- return ParseCast(packet + kRtpCommonHeaderLength,
- length - kRtpCommonHeaderLength, rtp_header);
- }
- // Not a valid payload type / ssrc combination.
- return false;
-}
-
-bool RtpParser::ParseCommon(const uint8* packet,
+bool RtpParser::ParsePacket(const uint8* packet,
size_t length,
- RtpCastHeader* rtp_header) {
- if (length < kRtpCommonHeaderLength) return false;
- uint8 version = packet[0] >> 6;
- if (version != 2) return false;
- uint8 cc = packet[0] & 0x0f;
- bool marker = ((packet[1] & 0x80) != 0);
- int payload_type = packet[1] & 0x7f;
-
- uint16 sequence_number;
- uint32 rtp_timestamp, ssrc;
- net::BigEndianReader big_endian_reader(packet + 2, 10);
- big_endian_reader.ReadU16(&sequence_number);
- big_endian_reader.ReadU32(&rtp_timestamp);
- big_endian_reader.ReadU32(&ssrc);
-
- if (ssrc != parser_config_.ssrc) return false;
-
- rtp_header->webrtc.header.markerBit = marker;
- rtp_header->webrtc.header.payloadType = payload_type;
- rtp_header->webrtc.header.sequenceNumber = sequence_number;
- rtp_header->webrtc.header.timestamp = rtp_timestamp;
- rtp_header->webrtc.header.ssrc = ssrc;
- rtp_header->webrtc.header.numCSRCs = cc;
-
- uint8 csrc_octs = cc * 4;
- rtp_header->webrtc.type.Audio.numEnergy = rtp_header->webrtc.header.numCSRCs;
- rtp_header->webrtc.header.headerLength = kRtpCommonHeaderLength + csrc_octs;
- rtp_header->webrtc.type.Audio.isCNG = false;
- rtp_header->webrtc.type.Audio.channel = parser_config_.audio_channels;
- // TODO(pwestin): look at x bit and skip data.
- return true;
-}
-
-bool RtpParser::ParseCast(const uint8* packet,
- size_t length,
- RtpCastHeader* rtp_header) {
- if (length < kRtpCastHeaderLength) return false;
-
- // Extract header.
- const uint8* data_ptr = packet;
- size_t data_length = length;
- rtp_header->is_key_frame = (data_ptr[0] & kCastKeyFrameBitMask);
- rtp_header->is_reference = (data_ptr[0] & kCastReferenceFrameIdBitMask);
- rtp_header->frame_id = frame_id_wrap_helper_.MapTo32bitsFrameId(data_ptr[1]);
-
- net::BigEndianReader big_endian_reader(data_ptr + 2, 4);
- big_endian_reader.ReadU16(&rtp_header->packet_id);
- big_endian_reader.ReadU16(&rtp_header->max_packet_id);
-
- if (rtp_header->is_reference) {
- rtp_header->reference_frame_id =
- reference_frame_id_wrap_helper_.MapTo32bitsFrameId(data_ptr[6]);
- data_ptr += kRtpCastHeaderLength;
- data_length -= kRtpCastHeaderLength;
- } else {
- data_ptr += kRtpCastHeaderLength - 1;
- data_length -= kRtpCastHeaderLength - 1;
+ RtpCastHeader* header,
+ const uint8** payload_data,
+ size_t* payload_size) {
+ DCHECK(packet);
+ DCHECK(header);
+ DCHECK(payload_data);
+ DCHECK(payload_size);
+
+ if (length < (kRtpHeaderLength + kCastHeaderLength))
+ return false;
+
+ base::BigEndianReader reader(reinterpret_cast<const char*>(packet), length);
+
+ // Parse the RTP header. See
+ // http://en.wikipedia.org/wiki/Real-time_Transport_Protocol for an
+ // explanation of the standard RTP packet header.
+ uint8 bits;
+ if (!reader.ReadU8(&bits))
+ return false;
+ const uint8 version = bits >> 6;
+ if (version != 2)
+ return false;
+ if (bits & kRtpExtensionBitMask)
+ return false; // We lack the implementation to skip over an extension.
+ if (!reader.ReadU8(&bits))
+ return false;
+ header->marker = !!(bits & kRtpMarkerBitMask);
+ header->payload_type = bits & ~kRtpMarkerBitMask;
+ if (header->payload_type != expected_payload_type_)
+ return false; // Punt: Unexpected payload type.
+ if (!reader.ReadU16(&header->sequence_number) ||
+ !reader.ReadU32(&header->rtp_timestamp) ||
+ !reader.ReadU32(&header->sender_ssrc)) {
+ return false;
+ }
+ if (header->sender_ssrc != expected_sender_ssrc_)
+ return false; // Punt: Sender's SSRC does not match the expected one.
+
+ // Parse the Cast header. Note that, from the RTP protocol's perspective, the
+ // Cast header is part of the payload (and not meant to be an extension
+ // header).
+ if (!reader.ReadU8(&bits))
+ return false;
+ header->is_key_frame = !!(bits & kCastKeyFrameBitMask);
+ const bool includes_specific_frame_reference =
+ !!(bits & kCastReferenceFrameIdBitMask);
+ uint8 truncated_frame_id;
+ if (!reader.ReadU8(&truncated_frame_id) ||
+ !reader.ReadU16(&header->packet_id) ||
+ !reader.ReadU16(&header->max_packet_id)) {
+ return false;
+ }
+ // Sanity-check: Do the packet ID values make sense w.r.t. each other?
+ if (header->max_packet_id < header->packet_id)
+ return false;
+ uint8 truncated_reference_frame_id;
+ if (!includes_specific_frame_reference) {
+ // By default, a key frame only references itself; and non-key frames
+ // reference their direct predecessor.
+ truncated_reference_frame_id = truncated_frame_id;
+ if (!header->is_key_frame)
+ --truncated_reference_frame_id;
+ } else if (!reader.ReadU8(&truncated_reference_frame_id)) {
+ return false;
}
- if (rtp_header->max_packet_id < rtp_header->packet_id) return false;
+ // Only the lower 8 bits of the |frame_id| were serialized, so do some magic
+ // to restore the upper 24 bits.
+ //
+ // Note: The call to |frame_id_wrap_helper_| has side effects, so we must not
+ // call it until we know the entire deserialization will succeed.
+ header->frame_id =
+ frame_id_wrap_helper_.MapTo32bitsFrameId(truncated_frame_id);
+ // When the upper 24 bits are restored to |reference_frame_id|, make sure
+ // |reference_frame_id| will be strictly less than or equal to |frame_id|.
+ if (truncated_reference_frame_id <= truncated_frame_id)
+ header->reference_frame_id = header->frame_id & 0xffffff00;
+ else
+ header->reference_frame_id = (header->frame_id & 0xffffff00) - 0x00000100;
+ header->reference_frame_id |= truncated_reference_frame_id;
+
+ // All remaining data in the packet is the payload.
+ *payload_data = reinterpret_cast<const uint8*>(reader.ptr());
+ *payload_size = reader.remaining();
- data_callback_->OnReceivedPayloadData(data_ptr, data_length, rtp_header);
return true;
}
diff --git a/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.gyp b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.gyp
deleted file mode 100644
index 258b0bff532..00000000000
--- a/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.gyp
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'targets': [
- {
- 'target_name': 'cast_rtp_parser',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- '<(DEPTH)/third_party/',
- ],
- 'sources': [
- 'rtp_parser.cc',
- 'rtp_parser.h',
- ], # source
- 'dependencies': [
- '<(DEPTH)/base/base.gyp:base',
- ],
- },
- ],
-}
diff --git a/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.h b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.h
index 33bc92a6e6e..35118cf1446 100644
--- a/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.h
+++ b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.h
@@ -5,49 +5,40 @@
#ifndef MEDIA_CAST_RTP_RECEIVER_RTP_PARSER_RTP_PARSER_H_
#define MEDIA_CAST_RTP_RECEIVER_RTP_PARSER_RTP_PARSER_H_
-#include "media/cast/net/cast_net_defines.h"
#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
+#include "media/cast/transport/cast_transport_defines.h"
namespace media {
namespace cast {
-class RtpData;
-
-struct RtpParserConfig {
- RtpParserConfig() {
- ssrc = 0;
- payload_type = 0;
- audio_channels = 0;
- }
-
- uint32 ssrc;
- int payload_type;
- AudioCodec audio_codec;
- VideoCodec video_codec;
- int audio_channels;
-};
-
+// TODO(miu): RtpParser and RtpPacketizer should be consolidated into a single
+// module that handles all RTP/Cast packet serialization and deserialization
+// throughout the media/cast library.
class RtpParser {
public:
- RtpParser(RtpData* incoming_payload_callback,
- const RtpParserConfig parser_config);
-
- ~RtpParser();
-
- bool ParsePacket(const uint8* packet, size_t length,
- RtpCastHeader* rtp_header);
+ RtpParser(uint32 expected_sender_ssrc, uint8 expected_payload_type);
+
+ virtual ~RtpParser();
+
+ // Parses the |packet|, expecting an RTP header along with a Cast header at
+ // the beginning of the the RTP payload. This method populates the structure
+ // pointed to by |rtp_header| and sets the |payload_data| pointer and
+ // |payload_size| to the memory region within |packet| containing the Cast
+ // payload data. Returns false if the data appears to be invalid, is not from
+ // the expected sender (as identified by the SSRC field), or is not the
+ // expected payload type.
+ bool ParsePacket(const uint8* packet,
+ size_t length,
+ RtpCastHeader* rtp_header,
+ const uint8** payload_data,
+ size_t* payload_size);
private:
- bool ParseCommon(const uint8* packet, size_t length,
- RtpCastHeader* rtp_header);
-
- bool ParseCast(const uint8* packet, size_t length,
- RtpCastHeader* rtp_header);
+ const uint32 expected_sender_ssrc_;
+ const uint8 expected_payload_type_;
+ transport::FrameIdWrapHelper frame_id_wrap_helper_;
- RtpData* data_callback_;
- RtpParserConfig parser_config_;
- FrameIdWrapHelper frame_id_wrap_helper_;
- FrameIdWrapHelper reference_frame_id_wrap_helper_;
+ DISALLOW_COPY_AND_ASSIGN(RtpParser);
};
} // namespace cast
diff --git a/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser_unittest.cc b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser_unittest.cc
index c0f91d10fff..47c79139ffc 100644
--- a/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser_unittest.cc
+++ b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser_unittest.cc
@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <gtest/gtest.h>
-
#include "base/memory/scoped_ptr.h"
+#include "base/rand_util.h"
#include "media/cast/rtp_receiver/rtp_parser/rtp_parser.h"
#include "media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.h"
-#include "media/cast/rtp_receiver/rtp_receiver.h"
#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
+#include "testing/gtest/include/gtest/gtest.h"
namespace media {
namespace cast {
@@ -20,178 +19,178 @@ static const uint32 kTestTimestamp = 111111;
static const uint16 kTestSeqNum = 4321;
static const uint8 kRefFrameId = 17;
-class RtpDataTest : public RtpData {
- public:
- RtpDataTest() {
- expected_header_.reset(new RtpCastHeader());
- }
-
- virtual ~RtpDataTest() {}
-
- void SetExpectedHeader(const RtpCastHeader& cast_header) {
- memcpy(expected_header_.get(), &cast_header, sizeof(RtpCastHeader));
- }
-
- virtual void OnReceivedPayloadData(const uint8* payloadData,
- size_t payloadSize,
- const RtpCastHeader* rtpHeader) OVERRIDE {
- VerifyCommonHeader(*rtpHeader);
- VerifyCastHeader(*rtpHeader);
- }
-
- void VerifyCommonHeader(const RtpCastHeader& parsed_header) {
- EXPECT_EQ(expected_header_->packet_id == expected_header_->max_packet_id,
- parsed_header.webrtc.header.markerBit);
- EXPECT_EQ(kTestPayloadType, parsed_header.webrtc.header.payloadType);
- EXPECT_EQ(kTestSsrc, parsed_header.webrtc.header.ssrc);
- EXPECT_EQ(0, parsed_header.webrtc.header.numCSRCs);
- }
-
- void VerifyCastHeader(const RtpCastHeader& parsed_header) {
- EXPECT_EQ(expected_header_->is_key_frame, parsed_header.is_key_frame);
- EXPECT_EQ(expected_header_->frame_id, parsed_header.frame_id);
- EXPECT_EQ(expected_header_->packet_id, parsed_header.packet_id);
- EXPECT_EQ(expected_header_->max_packet_id, parsed_header.max_packet_id);
- EXPECT_EQ(expected_header_->is_reference, parsed_header.is_reference);
- }
-
- private:
- scoped_ptr<RtpCastHeader> expected_header_;
-};
-
class RtpParserTest : public ::testing::Test {
protected:
- RtpParserTest() {
- PopulateConfig();
- rtp_data_.reset(new RtpDataTest());
- rtp_parser_.reset(new RtpParser(rtp_data_.get(), config_));
- }
-
- virtual ~RtpParserTest() {}
-
- virtual void SetUp() {
- cast_header_.is_reference = true;
- cast_header_.reference_frame_id = kRefFrameId;
+ RtpParserTest() : rtp_parser_(kTestSsrc, kTestPayloadType) {
packet_builder_.SetSsrc(kTestSsrc);
- packet_builder_.SetReferenceFrameId(kRefFrameId, true);
packet_builder_.SetSequenceNumber(kTestSeqNum);
packet_builder_.SetTimestamp(kTestTimestamp);
packet_builder_.SetPayloadType(kTestPayloadType);
packet_builder_.SetMarkerBit(true); // Only one packet.
+ cast_header_.sender_ssrc = kTestSsrc;
+ cast_header_.sequence_number = kTestSeqNum;
+ cast_header_.rtp_timestamp = kTestTimestamp;
+ cast_header_.payload_type = kTestPayloadType;
+ cast_header_.marker = true;
+ }
+
+ virtual ~RtpParserTest() {}
+
+ void ExpectParsesPacket() {
+ RtpCastHeader parsed_header;
+ const uint8* payload = NULL;
+ size_t payload_size = -1;
+ EXPECT_TRUE(rtp_parser_.ParsePacket(
+ packet_, kPacketLength, &parsed_header, &payload, &payload_size));
+
+ EXPECT_EQ(cast_header_.marker, parsed_header.marker);
+ EXPECT_EQ(cast_header_.payload_type, parsed_header.payload_type);
+ EXPECT_EQ(cast_header_.sequence_number, parsed_header.sequence_number);
+ EXPECT_EQ(cast_header_.rtp_timestamp, parsed_header.rtp_timestamp);
+ EXPECT_EQ(cast_header_.sender_ssrc, parsed_header.sender_ssrc);
+
+ EXPECT_EQ(cast_header_.is_key_frame, parsed_header.is_key_frame);
+ EXPECT_EQ(cast_header_.frame_id, parsed_header.frame_id);
+ EXPECT_EQ(cast_header_.packet_id, parsed_header.packet_id);
+ EXPECT_EQ(cast_header_.max_packet_id, parsed_header.max_packet_id);
+ EXPECT_EQ(cast_header_.reference_frame_id,
+ parsed_header.reference_frame_id);
+
+ EXPECT_TRUE(!!payload);
+ EXPECT_NE(static_cast<size_t>(-1), payload_size);
}
- void PopulateConfig() {
- config_.payload_type = kTestPayloadType;
- config_.ssrc = kTestSsrc;
+ void ExpectDoesNotParsePacket() {
+ RtpCastHeader parsed_header;
+ const uint8* payload = NULL;
+ size_t payload_size = -1;
+ EXPECT_FALSE(rtp_parser_.ParsePacket(
+ packet_, kPacketLength, &parsed_header, &payload, &payload_size));
}
- scoped_ptr<RtpDataTest> rtp_data_;
RtpPacketBuilder packet_builder_;
- scoped_ptr<RtpParser> rtp_parser_;
- RtpParserConfig config_;
+ uint8 packet_[kPacketLength];
+ RtpParser rtp_parser_;
RtpCastHeader cast_header_;
};
TEST_F(RtpParserTest, ParseDefaultCastPacket) {
- // Build generic data packet.
- uint8 packet[kPacketLength];
- packet_builder_.BuildHeader(packet, kPacketLength);
- // Parse packet as is.
- RtpCastHeader rtp_header;
- rtp_data_->SetExpectedHeader(cast_header_);
- EXPECT_TRUE(rtp_parser_->ParsePacket(packet, kPacketLength, &rtp_header));
+ packet_builder_.BuildHeader(packet_, kPacketLength);
+ ExpectParsesPacket();
}
TEST_F(RtpParserTest, ParseNonDefaultCastPacket) {
- // Build generic data packet.
- uint8 packet[kPacketLength];
packet_builder_.SetKeyFrame(true);
- packet_builder_.SetFrameId(10);
+ packet_builder_.SetFrameIds(10, 10);
packet_builder_.SetPacketId(5);
packet_builder_.SetMaxPacketId(15);
packet_builder_.SetMarkerBit(false);
- packet_builder_.BuildHeader(packet, kPacketLength);
+ packet_builder_.BuildHeader(packet_, kPacketLength);
cast_header_.is_key_frame = true;
cast_header_.frame_id = 10;
+ cast_header_.reference_frame_id = 10;
cast_header_.packet_id = 5;
cast_header_.max_packet_id = 15;
- rtp_data_->SetExpectedHeader(cast_header_);
- // Parse packet as is.
- RtpCastHeader rtp_header;
- EXPECT_TRUE(rtp_parser_->ParsePacket(packet, kPacketLength, &rtp_header));
+ cast_header_.marker = false;
+ ExpectParsesPacket();
}
TEST_F(RtpParserTest, TooBigPacketId) {
- // Build generic data packet.
- uint8 packet[kPacketLength];
packet_builder_.SetKeyFrame(true);
- packet_builder_.SetFrameId(10);
+ packet_builder_.SetFrameIds(10, 10);
packet_builder_.SetPacketId(15);
packet_builder_.SetMaxPacketId(5);
- packet_builder_.BuildHeader(packet, kPacketLength);
- // Parse packet as is.
- RtpCastHeader rtp_header;
- EXPECT_FALSE(rtp_parser_->ParsePacket(packet, kPacketLength, &rtp_header));
+ packet_builder_.BuildHeader(packet_, kPacketLength);
+ cast_header_.is_key_frame = true;
+ cast_header_.frame_id = 10;
+ cast_header_.reference_frame_id = 10;
+ cast_header_.packet_id = 15;
+ cast_header_.max_packet_id = 5;
+ ExpectDoesNotParsePacket();
}
TEST_F(RtpParserTest, MaxPacketId) {
- // Build generic data packet.
- uint8 packet[kPacketLength];
packet_builder_.SetKeyFrame(true);
- packet_builder_.SetFrameId(10);
+ packet_builder_.SetFrameIds(10, 10);
packet_builder_.SetPacketId(65535);
packet_builder_.SetMaxPacketId(65535);
- packet_builder_.BuildHeader(packet, kPacketLength);
+ packet_builder_.BuildHeader(packet_, kPacketLength);
cast_header_.is_key_frame = true;
cast_header_.frame_id = 10;
+ cast_header_.reference_frame_id = 10;
cast_header_.packet_id = 65535;
cast_header_.max_packet_id = 65535;
- rtp_data_->SetExpectedHeader(cast_header_);
- // Parse packet as is.
- RtpCastHeader rtp_header;
- EXPECT_TRUE(rtp_parser_->ParsePacket(packet, kPacketLength, &rtp_header));
+ ExpectParsesPacket();
}
TEST_F(RtpParserTest, InvalidPayloadType) {
- // Build generic data packet.
- uint8 packet[kPacketLength];
packet_builder_.SetKeyFrame(true);
- packet_builder_.SetFrameId(10);
+ packet_builder_.SetFrameIds(10, 10);
packet_builder_.SetPacketId(65535);
packet_builder_.SetMaxPacketId(65535);
packet_builder_.SetPayloadType(kTestPayloadType - 1);
- packet_builder_.BuildHeader(packet, kPacketLength);
- // Parse packet as is.
- RtpCastHeader rtp_header;
- EXPECT_FALSE(rtp_parser_->ParsePacket(packet, kPacketLength, &rtp_header));
+ packet_builder_.BuildHeader(packet_, kPacketLength);
+ cast_header_.is_key_frame = true;
+ cast_header_.frame_id = 10;
+ cast_header_.reference_frame_id = 10;
+ cast_header_.packet_id = 65535;
+ cast_header_.max_packet_id = 65535;
+ cast_header_.payload_type = kTestPayloadType - 1;
+ ExpectDoesNotParsePacket();
}
TEST_F(RtpParserTest, InvalidSsrc) {
- // Build generic data packet.
- uint8 packet[kPacketLength];
packet_builder_.SetKeyFrame(true);
- packet_builder_.SetFrameId(10);
+ packet_builder_.SetFrameIds(10, 10);
packet_builder_.SetPacketId(65535);
packet_builder_.SetMaxPacketId(65535);
packet_builder_.SetSsrc(kTestSsrc - 1);
- packet_builder_.BuildHeader(packet, kPacketLength);
- // Parse packet as is.
- RtpCastHeader rtp_header;
- EXPECT_FALSE(rtp_parser_->ParsePacket(packet, kPacketLength, &rtp_header));
+ packet_builder_.BuildHeader(packet_, kPacketLength);
+ cast_header_.is_key_frame = true;
+ cast_header_.frame_id = 10;
+ cast_header_.reference_frame_id = 10;
+ cast_header_.packet_id = 65535;
+ cast_header_.max_packet_id = 65535;
+ cast_header_.sender_ssrc = kTestSsrc - 1;
+ ExpectDoesNotParsePacket();
+}
+
+TEST_F(RtpParserTest, ParseCastPacketWithSpecificFrameReference) {
+ packet_builder_.SetFrameIds(kRefFrameId + 3, kRefFrameId);
+ packet_builder_.BuildHeader(packet_, kPacketLength);
+ cast_header_.frame_id = kRefFrameId + 3;
+ cast_header_.reference_frame_id = kRefFrameId;
+ ExpectParsesPacket();
+}
+
+TEST_F(RtpParserTest, ParseExpandingFrameIdTo32Bits) {
+ const uint32 kMaxFrameId = 1000;
+ packet_builder_.SetKeyFrame(true);
+ cast_header_.is_key_frame = true;
+ for (uint32 frame_id = 0; frame_id <= kMaxFrameId; ++frame_id) {
+ packet_builder_.SetFrameIds(frame_id, frame_id);
+ packet_builder_.BuildHeader(packet_, kPacketLength);
+ cast_header_.frame_id = frame_id;
+ cast_header_.reference_frame_id = frame_id;
+ ExpectParsesPacket();
+ }
}
-TEST_F(RtpParserTest, ParseCastPacketWithoutReference) {
- cast_header_.is_reference = false;
- cast_header_.reference_frame_id = 0;
- packet_builder_.SetReferenceFrameId(kRefFrameId, false);
-
- // Build generic data packet.
- uint8 packet[kPacketLength];
- packet_builder_.BuildHeader(packet, kPacketLength);
- // Parse packet as is.
- RtpCastHeader rtp_header;
- rtp_data_->SetExpectedHeader(cast_header_);
- EXPECT_TRUE(rtp_parser_->ParsePacket(packet, kPacketLength, &rtp_header));
+TEST_F(RtpParserTest, ParseExpandingReferenceFrameIdTo32Bits) {
+ const uint32 kMaxFrameId = 1000;
+ const uint32 kMaxBackReferenceOffset = 10;
+ packet_builder_.SetKeyFrame(false);
+ cast_header_.is_key_frame = false;
+ for (uint32 frame_id = kMaxBackReferenceOffset;
+ frame_id <= kMaxFrameId; ++frame_id) {
+ const uint32 reference_frame_id =
+ frame_id - base::RandInt(1, kMaxBackReferenceOffset);
+ packet_builder_.SetFrameIds(frame_id, reference_frame_id);
+ packet_builder_.BuildHeader(packet_, kPacketLength);
+ cast_header_.frame_id = frame_id;
+ cast_header_.reference_frame_id = reference_frame_id;
+ ExpectParsesPacket();
+ }
}
} // namespace cast
diff --git a/chromium/media/cast/rtp_receiver/rtp_receiver.cc b/chromium/media/cast/rtp_receiver/rtp_receiver.cc
deleted file mode 100644
index 3c804d9bd9b..00000000000
--- a/chromium/media/cast/rtp_receiver/rtp_receiver.cc
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/rtp_receiver/rtp_receiver.h"
-
-#include "base/logging.h"
-#include "media/cast/rtp_receiver/receiver_stats.h"
-#include "media/cast/rtp_receiver/rtp_parser/rtp_parser.h"
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
-#include "net/base/big_endian.h"
-
-namespace media {
-namespace cast {
-
-RtpReceiver::RtpReceiver(base::TickClock* clock,
- const AudioReceiverConfig* audio_config,
- const VideoReceiverConfig* video_config,
- RtpData* incoming_payload_callback) {
- DCHECK(incoming_payload_callback) << "Invalid argument";
- DCHECK(audio_config || video_config) << "Invalid argument";
-
- // Configure parser.
- RtpParserConfig config;
- if (audio_config) {
- config.ssrc = audio_config->incoming_ssrc;
- config.payload_type = audio_config->rtp_payload_type;
- config.audio_codec = audio_config->codec;
- config.audio_channels = audio_config->channels;
- } else {
- config.ssrc = video_config->incoming_ssrc;
- config.payload_type = video_config->rtp_payload_type;
- config.video_codec = video_config->codec;
- }
- stats_.reset(new ReceiverStats(clock));
- parser_.reset(new RtpParser(incoming_payload_callback, config));
-}
-
-RtpReceiver::~RtpReceiver() {}
-
-// static
-uint32 RtpReceiver::GetSsrcOfSender(const uint8* rtcp_buffer, size_t length) {
- DCHECK_GE(length, kMinLengthOfRtp) << "Invalid RTP packet";
- uint32 ssrc_of_sender;
- net::BigEndianReader big_endian_reader(rtcp_buffer, length);
- big_endian_reader.Skip(8); // Skip header
- big_endian_reader.ReadU32(&ssrc_of_sender);
- return ssrc_of_sender;
-}
-
-bool RtpReceiver::ReceivedPacket(const uint8* packet, size_t length) {
- RtpCastHeader rtp_header;
- if (!parser_->ParsePacket(packet, length, &rtp_header)) return false;
-
- stats_->UpdateStatistics(rtp_header);
- return true;
-}
-
-void RtpReceiver::GetStatistics(uint8* fraction_lost,
- uint32* cumulative_lost,
- uint32* extended_high_sequence_number,
- uint32* jitter) {
- stats_->GetStatistics(fraction_lost,
- cumulative_lost,
- extended_high_sequence_number,
- jitter);
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/rtp_receiver/rtp_receiver.gyp b/chromium/media/cast/rtp_receiver/rtp_receiver.gyp
deleted file mode 100644
index b612964c070..00000000000
--- a/chromium/media/cast/rtp_receiver/rtp_receiver.gyp
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'targets': [
- {
- 'target_name': 'cast_rtp_receiver',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- '<(DEPTH)/third_party/',
- ],
- 'sources': [
- 'receiver_stats.cc',
- 'receiver_stats.h',
- 'rtp_receiver.cc',
- 'rtp_receiver.h',
- ], # source
- 'dependencies': [
- '<(DEPTH)/base/base.gyp:base',
- 'rtp_parser/rtp_parser.gyp:*',
- ],
- },
- ],
-}
diff --git a/chromium/media/cast/rtp_receiver/rtp_receiver.h b/chromium/media/cast/rtp_receiver/rtp_receiver.h
deleted file mode 100644
index 5639d7d8c36..00000000000
--- a/chromium/media/cast/rtp_receiver/rtp_receiver.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Interface to the rtp receiver.
-
-#ifndef MEDIA_CAST_RTP_RECEIVER_RTP_RECEIVER_H_
-#define MEDIA_CAST_RTP_RECEIVER_RTP_RECEIVER_H_
-
-#include "base/memory/scoped_ptr.h"
-#include "media/cast/cast_config.h"
-#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
-
-namespace media {
-namespace cast {
-
-class RtpData {
- public:
- virtual void OnReceivedPayloadData(const uint8* payload_data,
- size_t payload_size,
- const RtpCastHeader* rtp_header) = 0;
-
- protected:
- virtual ~RtpData() {}
-};
-
-class ReceiverStats;
-class RtpParser;
-
-class RtpReceiver {
- public:
- RtpReceiver(base::TickClock* clock,
- const AudioReceiverConfig* audio_config,
- const VideoReceiverConfig* video_config,
- RtpData* incoming_payload_callback);
- ~RtpReceiver();
-
- static uint32 GetSsrcOfSender(const uint8* rtcp_buffer, size_t length);
-
- bool ReceivedPacket(const uint8* packet, size_t length);
-
- void GetStatistics(uint8* fraction_lost,
- uint32* cumulative_lost, // 24 bits valid.
- uint32* extended_high_sequence_number,
- uint32* jitter);
-
- private:
- scoped_ptr<ReceiverStats> stats_;
- scoped_ptr<RtpParser> parser_;
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_RTP_RECEIVER_RTP_RECEIVER_H_
diff --git a/chromium/media/cast/rtp_receiver/rtp_receiver_defines.cc b/chromium/media/cast/rtp_receiver/rtp_receiver_defines.cc
new file mode 100644
index 00000000000..e42b2b733c7
--- /dev/null
+++ b/chromium/media/cast/rtp_receiver/rtp_receiver_defines.cc
@@ -0,0 +1,25 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
+
+namespace media {
+namespace cast {
+
+RtpCastHeader::RtpCastHeader()
+ : marker(false),
+ payload_type(0),
+ sequence_number(0),
+ rtp_timestamp(0),
+ sender_ssrc(0),
+ is_key_frame(false),
+ frame_id(0),
+ packet_id(0),
+ max_packet_id(0),
+ reference_frame_id(0) {}
+
+RtpPayloadFeedback::~RtpPayloadFeedback() {}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtp_receiver/rtp_receiver_defines.h b/chromium/media/cast/rtp_receiver/rtp_receiver_defines.h
index ae957e3ae6b..d907436f489 100644
--- a/chromium/media/cast/rtp_receiver/rtp_receiver_defines.h
+++ b/chromium/media/cast/rtp_receiver/rtp_receiver_defines.h
@@ -8,29 +8,25 @@
#include "base/basictypes.h"
#include "media/cast/cast_config.h"
#include "media/cast/rtcp/rtcp_defines.h"
-#include "third_party/webrtc/modules/interface/module_common_types.h"
namespace media {
namespace cast {
-const uint8 kRtpMarkerBitMask = 0x80;
-
struct RtpCastHeader {
- RtpCastHeader() {
- is_key_frame = false;
- frame_id = 0;
- packet_id = 0;
- max_packet_id = 0;
- is_reference = false;
- reference_frame_id = 0;
- }
- webrtc::WebRtcRTPHeader webrtc;
+ RtpCastHeader();
+
+ // Elements from RTP packet header.
+ bool marker;
+ uint8 payload_type;
+ uint16 sequence_number;
+ uint32 rtp_timestamp;
+ uint32 sender_ssrc;
+
+ // Elements from Cast header (at beginning of RTP payload).
bool is_key_frame;
uint32 frame_id;
uint16 packet_id;
uint16 max_packet_id;
- bool is_reference; // Set to true if the previous frame is not available,
- // and the reference frame id is available.
uint32 reference_frame_id;
};
@@ -39,7 +35,7 @@ class RtpPayloadFeedback {
virtual void CastFeedback(const RtcpCastMessage& cast_feedback) = 0;
protected:
- virtual ~RtpPayloadFeedback() {}
+ virtual ~RtpPayloadFeedback();
};
} // namespace cast
diff --git a/chromium/media/cast/rtp_timestamp_helper.cc b/chromium/media/cast/rtp_timestamp_helper.cc
new file mode 100644
index 00000000000..3349e7b33fd
--- /dev/null
+++ b/chromium/media/cast/rtp_timestamp_helper.cc
@@ -0,0 +1,36 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtp_timestamp_helper.h"
+
+namespace media {
+namespace cast {
+
+RtpTimestampHelper::RtpTimestampHelper(int frequency)
+ : frequency_(frequency),
+ last_rtp_timestamp_(0) {
+}
+
+RtpTimestampHelper::~RtpTimestampHelper() {
+}
+
+bool RtpTimestampHelper::GetCurrentTimeAsRtpTimestamp(
+ const base::TimeTicks& now, uint32* rtp_timestamp) const {
+ if (last_capture_time_.is_null())
+ return false;
+ const base::TimeDelta elapsed_time = now - last_capture_time_;
+ const int64 rtp_delta =
+ elapsed_time * frequency_ / base::TimeDelta::FromSeconds(1);
+ *rtp_timestamp = last_rtp_timestamp_ + static_cast<uint32>(rtp_delta);
+ return true;
+}
+
+void RtpTimestampHelper::StoreLatestTime(
+ base::TimeTicks capture_time, uint32 rtp_timestamp) {
+ last_capture_time_ = capture_time;
+ last_rtp_timestamp_ = rtp_timestamp;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtp_timestamp_helper.h b/chromium/media/cast/rtp_timestamp_helper.h
new file mode 100644
index 00000000000..b9c650c5063
--- /dev/null
+++ b/chromium/media/cast/rtp_timestamp_helper.h
@@ -0,0 +1,41 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_RTP_TIMESTAMP_HELPER_H_
+#define MEDIA_CAST_RTP_TIMESTAMP_HELPER_H_
+
+#include "base/basictypes.h"
+#include "base/time/time.h"
+
+namespace media {
+namespace cast {
+
+// A helper class used to convert current time ticks into RTP timestamp.
+class RtpTimestampHelper {
+ public:
+ explicit RtpTimestampHelper(int frequency);
+ ~RtpTimestampHelper();
+
+ // Compute a RTP timestamp using current time, last encoded time and
+ // last encoded RTP timestamp.
+ // Return true if |rtp_timestamp| is computed.
+ bool GetCurrentTimeAsRtpTimestamp(const base::TimeTicks& now,
+ uint32* rtp_timestamp) const;
+
+ // Store the capture time and the corresponding RTP timestamp for the
+ // last encoded frame.
+ void StoreLatestTime(base::TimeTicks capture_time, uint32 rtp_timestamp);
+
+ private:
+ int frequency_;
+ base::TimeTicks last_capture_time_;
+ uint32 last_rtp_timestamp_;
+
+ DISALLOW_COPY_AND_ASSIGN(RtpTimestampHelper);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_CAST_DEFINES_H_
diff --git a/chromium/media/cast/test/transport/transport.gyp b/chromium/media/cast/test/transport/transport.gyp
deleted file mode 100644
index 79be3d28e6d..00000000000
--- a/chromium/media/cast/test/transport/transport.gyp
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'targets': [
- {
- 'target_name': 'cast_transport',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- ],
- 'sources': [
- 'transport.cc',
- 'transport.h',
- ], # source
- 'dependencies': [
- '<(DEPTH)/net/net.gyp:net',
- ],
- },
- ],
-} \ No newline at end of file
diff --git a/chromium/media/cast/test/utility/utility.gyp b/chromium/media/cast/test/utility/utility.gyp
deleted file mode 100644
index 021c2d9a416..00000000000
--- a/chromium/media/cast/test/utility/utility.gyp
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'targets': [
- {
- 'target_name': 'cast_test_utility',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- ],
- 'dependencies': [
- '<(DEPTH)/ui/gfx/gfx.gyp:gfx',
- '<(DEPTH)/testing/gtest.gyp:gtest',
- '<(DEPTH)/third_party/libyuv/libyuv.gyp:libyuv',
-
- ],
- 'sources': [
- 'input_helper.cc',
- 'input_helper.h',
- '<(DEPTH)/media/cast/test/audio_utility.cc',
- '<(DEPTH)/media/cast/test/fake_task_runner.cc',
- '<(DEPTH)/media/cast/test/video_utility.cc',
- ], # source
- },
- ],
-} \ No newline at end of file
diff --git a/chromium/media/cast/transport/cast_transport_config.cc b/chromium/media/cast/transport/cast_transport_config.cc
new file mode 100644
index 00000000000..16e90347137
--- /dev/null
+++ b/chromium/media/cast/transport/cast_transport_config.cc
@@ -0,0 +1,82 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/transport/cast_transport_config.h"
+
+namespace media {
+namespace cast {
+namespace transport {
+
+namespace {
+const int kDefaultRtpMaxDelayMs = 100;
+} // namespace
+
+RtpConfig::RtpConfig()
+ : ssrc(0),
+ max_delay_ms(kDefaultRtpMaxDelayMs),
+ payload_type(0) {}
+
+RtpConfig::~RtpConfig() {}
+
+CastTransportRtpConfig::CastTransportRtpConfig()
+ : max_outstanding_frames(-1) {}
+
+CastTransportRtpConfig::~CastTransportRtpConfig() {}
+
+CastTransportAudioConfig::CastTransportAudioConfig()
+ : codec(kOpus), frequency(0), channels(0) {}
+
+CastTransportAudioConfig::~CastTransportAudioConfig() {}
+
+CastTransportVideoConfig::CastTransportVideoConfig() : codec(kVp8) {}
+
+CastTransportVideoConfig::~CastTransportVideoConfig() {}
+
+EncodedFrame::EncodedFrame()
+ : dependency(UNKNOWN_DEPENDENCY),
+ frame_id(0),
+ referenced_frame_id(0),
+ rtp_timestamp(0) {}
+
+EncodedFrame::~EncodedFrame() {}
+
+void EncodedFrame::CopyMetadataTo(EncodedFrame* dest) const {
+ DCHECK(dest);
+ dest->dependency = this->dependency;
+ dest->frame_id = this->frame_id;
+ dest->referenced_frame_id = this->referenced_frame_id;
+ dest->rtp_timestamp = this->rtp_timestamp;
+ dest->reference_time = this->reference_time;
+}
+
+RtcpSenderInfo::RtcpSenderInfo()
+ : ntp_seconds(0),
+ ntp_fraction(0),
+ rtp_timestamp(0),
+ send_packet_count(0),
+ send_octet_count(0) {}
+RtcpSenderInfo::~RtcpSenderInfo() {}
+
+RtcpReportBlock::RtcpReportBlock()
+ : remote_ssrc(0),
+ media_ssrc(0),
+ fraction_lost(0),
+ cumulative_lost(0),
+ extended_high_sequence_number(0),
+ jitter(0),
+ last_sr(0),
+ delay_since_last_sr(0) {}
+RtcpReportBlock::~RtcpReportBlock() {}
+
+RtcpDlrrReportBlock::RtcpDlrrReportBlock()
+ : last_rr(0), delay_since_last_rr(0) {}
+RtcpDlrrReportBlock::~RtcpDlrrReportBlock() {}
+
+SendRtcpFromRtpSenderData::SendRtcpFromRtpSenderData()
+ : packet_type_flags(0), sending_ssrc(0) {}
+SendRtcpFromRtpSenderData::~SendRtcpFromRtpSenderData() {}
+
+} // namespace transport
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/transport/cast_transport_config.h b/chromium/media/cast/transport/cast_transport_config.h
new file mode 100644
index 00000000000..96b771acb99
--- /dev/null
+++ b/chromium/media/cast/transport/cast_transport_config.h
@@ -0,0 +1,221 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_CONFIG_H_
+#define MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_CONFIG_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/memory/linked_ptr.h"
+#include "base/memory/ref_counted.h"
+#include "base/stl_util.h"
+#include "media/cast/transport/cast_transport_defines.h"
+#include "net/base/ip_endpoint.h"
+
+namespace media {
+namespace cast {
+namespace transport {
+
+enum RtcpMode {
+ kRtcpCompound, // Compound RTCP mode is described by RFC 4585.
+ kRtcpReducedSize, // Reduced-size RTCP mode is described by RFC 5506.
+};
+
+enum VideoCodec {
+ kUnknownVideoCodec,
+ kFakeSoftwareVideo,
+ kVp8,
+ kH264,
+ kVideoCodecLast = kH264
+};
+
+enum AudioCodec {
+ kUnknownAudioCodec,
+ kOpus,
+ kPcm16,
+ kAudioCodecLast = kPcm16
+};
+
+struct RtpConfig {
+ RtpConfig();
+ ~RtpConfig();
+ uint32 ssrc;
+ int max_delay_ms;
+ int payload_type;
+ std::string aes_key; // Binary string of size kAesKeySize.
+ std::string aes_iv_mask; // Binary string of size kAesBlockSize.
+};
+
+struct CastTransportRtpConfig {
+ CastTransportRtpConfig();
+ ~CastTransportRtpConfig();
+ RtpConfig config;
+ int max_outstanding_frames;
+};
+
+struct CastTransportAudioConfig {
+ CastTransportAudioConfig();
+ ~CastTransportAudioConfig();
+
+ CastTransportRtpConfig rtp;
+ AudioCodec codec;
+ int frequency;
+ int channels;
+};
+
+struct CastTransportVideoConfig {
+ CastTransportVideoConfig();
+ ~CastTransportVideoConfig();
+
+ CastTransportRtpConfig rtp;
+ VideoCodec codec;
+};
+
+// A combination of metadata and data for one encoded frame. This can contain
+// audio data or video data or other.
+struct EncodedFrame {
+ enum Dependency {
+ // "null" value, used to indicate whether |dependency| has been set.
+ UNKNOWN_DEPENDENCY,
+
+ // Not decodable without the reference frame indicated by
+ // |referenced_frame_id|.
+ DEPENDENT,
+
+ // Independently decodable.
+ INDEPENDENT,
+
+ // Independently decodable, and no future frames will depend on any frames
+ // before this one.
+ KEY,
+
+ DEPENDENCY_LAST = KEY
+ };
+
+ EncodedFrame();
+ ~EncodedFrame();
+
+ // Convenience accessors to data as an array of uint8 elements.
+ const uint8* bytes() const {
+ return reinterpret_cast<uint8*>(string_as_array(
+ const_cast<std::string*>(&data)));
+ }
+ uint8* mutable_bytes() {
+ return reinterpret_cast<uint8*>(string_as_array(&data));
+ }
+
+ // Copies all data members except |data| to |dest|.
+ // Does not modify |dest->data|.
+ void CopyMetadataTo(EncodedFrame* dest) const;
+
+ // This frame's dependency relationship with respect to other frames.
+ Dependency dependency;
+
+ // The label associated with this frame. Implies an ordering relative to
+ // other frames in the same stream.
+ uint32 frame_id;
+
+ // The label associated with the frame upon which this frame depends. If
+ // this frame does not require any other frame in order to become decodable
+ // (e.g., key frames), |referenced_frame_id| must equal |frame_id|.
+ uint32 referenced_frame_id;
+
+ // The stream timestamp, on the timeline of the signal data. For example, RTP
+ // timestamps for audio are usually defined as the total number of audio
+ // samples encoded in all prior frames. A playback system uses this value to
+ // detect gaps in the stream, and otherwise stretch the signal to match
+ // playout targets.
+ uint32 rtp_timestamp;
+
+ // The common reference clock timestamp for this frame. This value originates
+ // from a sender and is used to provide lip synchronization between streams in
+ // a receiver. Thus, in the sender context, this is set to the time at which
+ // the frame was captured/recorded. In the receiver context, this is set to
+ // the target playout time. Over a sequence of frames, this time value is
+ // expected to drift with respect to the elapsed time implied by the RTP
+ // timestamps; and it may not necessarily increment with precise regularity.
+ base::TimeTicks reference_time;
+
+ // The encoded signal data.
+ std::string data;
+};
+
+typedef std::vector<uint8> Packet;
+typedef scoped_refptr<base::RefCountedData<Packet> > PacketRef;
+typedef std::vector<PacketRef> PacketList;
+
+typedef base::Callback<void(scoped_ptr<Packet> packet)> PacketReceiverCallback;
+
+class PacketSender {
+ public:
+ // Send a packet to the network. Returns false if the network is blocked
+ // and we should wait for |cb| to be called. It is not allowed to called
+ // SendPacket again until |cb| has been called. Any other errors that
+ // occur will be reported through side channels, in such cases, this function
+ // will return true indicating that the channel is not blocked.
+ virtual bool SendPacket(PacketRef packet, const base::Closure& cb) = 0;
+ virtual ~PacketSender() {}
+};
+
+struct RtcpSenderInfo {
+ RtcpSenderInfo();
+ ~RtcpSenderInfo();
+ // First three members are used for lipsync.
+ // First two members are used for rtt.
+ uint32 ntp_seconds;
+ uint32 ntp_fraction;
+ uint32 rtp_timestamp;
+ uint32 send_packet_count;
+ size_t send_octet_count;
+};
+
+struct RtcpReportBlock {
+ RtcpReportBlock();
+ ~RtcpReportBlock();
+ uint32 remote_ssrc; // SSRC of sender of this report.
+ uint32 media_ssrc; // SSRC of the RTP packet sender.
+ uint8 fraction_lost;
+ uint32 cumulative_lost; // 24 bits valid.
+ uint32 extended_high_sequence_number;
+ uint32 jitter;
+ uint32 last_sr;
+ uint32 delay_since_last_sr;
+};
+
+struct RtcpDlrrReportBlock {
+ RtcpDlrrReportBlock();
+ ~RtcpDlrrReportBlock();
+ uint32 last_rr;
+ uint32 delay_since_last_rr;
+};
+
+// This is only needed because IPC messages don't support more than
+// 5 arguments.
+struct SendRtcpFromRtpSenderData {
+ SendRtcpFromRtpSenderData();
+ ~SendRtcpFromRtpSenderData();
+ uint32 packet_type_flags;
+ uint32 sending_ssrc;
+ std::string c_name;
+ uint32 ntp_seconds;
+ uint32 ntp_fraction;
+ uint32 rtp_timestamp;
+};
+
+inline bool operator==(RtcpSenderInfo lhs, RtcpSenderInfo rhs) {
+ return lhs.ntp_seconds == rhs.ntp_seconds &&
+ lhs.ntp_fraction == rhs.ntp_fraction &&
+ lhs.rtp_timestamp == rhs.rtp_timestamp &&
+ lhs.send_packet_count == rhs.send_packet_count &&
+ lhs.send_octet_count == rhs.send_octet_count;
+}
+
+} // namespace transport
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_CONFIG_H_
diff --git a/chromium/media/cast/transport/cast_transport_defines.h b/chromium/media/cast/transport/cast_transport_defines.h
new file mode 100644
index 00000000000..a34f7c539ab
--- /dev/null
+++ b/chromium/media/cast/transport/cast_transport_defines.h
@@ -0,0 +1,169 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_DEFINES_H_
+#define MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_DEFINES_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <set>
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/time/time.h"
+
+namespace media {
+namespace cast {
+namespace transport {
+
+// TODO(mikhal): Implement and add more types.
+enum CastTransportStatus {
+ TRANSPORT_AUDIO_UNINITIALIZED = 0,
+ TRANSPORT_VIDEO_UNINITIALIZED,
+ TRANSPORT_AUDIO_INITIALIZED,
+ TRANSPORT_VIDEO_INITIALIZED,
+ TRANSPORT_INVALID_CRYPTO_CONFIG,
+ TRANSPORT_SOCKET_ERROR,
+ CAST_TRANSPORT_STATUS_LAST = TRANSPORT_SOCKET_ERROR
+};
+
+const size_t kMaxIpPacketSize = 1500;
+// Each uint16 represents one packet id within a cast frame.
+typedef std::set<uint16> PacketIdSet;
+// Each uint8 represents one cast frame.
+typedef std::map<uint8, PacketIdSet> MissingFramesAndPacketsMap;
+
+// Crypto.
+const size_t kAesBlockSize = 16;
+const size_t kAesKeySize = 16;
+
+inline std::string GetAesNonce(uint32 frame_id, const std::string& iv_mask) {
+ std::string aes_nonce(kAesBlockSize, 0);
+
+ // Serializing frame_id in big-endian order (aes_nonce[8] is the most
+ // significant byte of frame_id).
+ aes_nonce[11] = frame_id & 0xff;
+ aes_nonce[10] = (frame_id >> 8) & 0xff;
+ aes_nonce[9] = (frame_id >> 16) & 0xff;
+ aes_nonce[8] = (frame_id >> 24) & 0xff;
+
+ for (size_t i = 0; i < kAesBlockSize; ++i) {
+ aes_nonce[i] ^= iv_mask[i];
+ }
+ return aes_nonce;
+}
+
+// Rtcp defines.
+
+enum RtcpPacketFields {
+ kPacketTypeLow = 194, // SMPTE time-code mapping.
+ kPacketTypeInterArrivalJitterReport = 195,
+ kPacketTypeSenderReport = 200,
+ kPacketTypeReceiverReport = 201,
+ kPacketTypeSdes = 202,
+ kPacketTypeBye = 203,
+ kPacketTypeApplicationDefined = 204,
+ kPacketTypeGenericRtpFeedback = 205,
+ kPacketTypePayloadSpecific = 206,
+ kPacketTypeXr = 207,
+ kPacketTypeHigh = 210, // Port Mapping.
+};
+
+enum RtcpPacketField {
+ kRtcpSr = 0x0002,
+ kRtcpRr = 0x0004,
+ kRtcpBye = 0x0008,
+ kRtcpPli = 0x0010,
+ kRtcpNack = 0x0020,
+ kRtcpFir = 0x0040,
+ kRtcpSrReq = 0x0200,
+ kRtcpDlrr = 0x0400,
+ kRtcpRrtr = 0x0800,
+ kRtcpRpsi = 0x8000,
+ kRtcpRemb = 0x10000,
+ kRtcpCast = 0x20000,
+ kRtcpSenderLog = 0x40000,
+ kRtcpReceiverLog = 0x80000,
+ };
+
+// Each uint16 represents one packet id within a cast frame.
+typedef std::set<uint16> PacketIdSet;
+// Each uint8 represents one cast frame.
+typedef std::map<uint8, PacketIdSet> MissingFramesAndPacketsMap;
+
+// TODO(miu): UGLY IN-LINE DEFINITION IN HEADER FILE! Move to appropriate
+// location, separated into .h and .cc files.
+class FrameIdWrapHelper {
+ public:
+ FrameIdWrapHelper()
+ : first_(true), frame_id_wrap_count_(0), range_(kLowRange) {}
+
+ uint32 MapTo32bitsFrameId(const uint8 over_the_wire_frame_id) {
+ if (first_) {
+ first_ = false;
+ if (over_the_wire_frame_id == 0xff) {
+ // Special case for startup.
+ return kStartFrameId;
+ }
+ }
+
+ uint32 wrap_count = frame_id_wrap_count_;
+ switch (range_) {
+ case kLowRange:
+ if (over_the_wire_frame_id > kLowRangeThreshold &&
+ over_the_wire_frame_id < kHighRangeThreshold) {
+ range_ = kMiddleRange;
+ }
+ if (over_the_wire_frame_id >= kHighRangeThreshold) {
+ // Wrap count was incremented in High->Low transition, but this frame
+ // is 'old', actually from before the wrap count got incremented.
+ --wrap_count;
+ }
+ break;
+ case kMiddleRange:
+ if (over_the_wire_frame_id >= kHighRangeThreshold) {
+ range_ = kHighRange;
+ }
+ break;
+ case kHighRange:
+ if (over_the_wire_frame_id <= kLowRangeThreshold) {
+ // Wrap-around detected.
+ range_ = kLowRange;
+ ++frame_id_wrap_count_;
+ // Frame triggering wrap-around so wrap count should be incremented as
+ // as well to match |frame_id_wrap_count_|.
+ ++wrap_count;
+ }
+ break;
+ }
+ return (wrap_count << 8) + over_the_wire_frame_id;
+ }
+
+ private:
+ enum Range { kLowRange, kMiddleRange, kHighRange, };
+
+ static const uint8 kLowRangeThreshold = 63;
+ static const uint8 kHighRangeThreshold = 192;
+ static const uint32 kStartFrameId = UINT32_C(0xffffffff);
+
+ bool first_;
+ uint32 frame_id_wrap_count_;
+ Range range_;
+
+ DISALLOW_COPY_AND_ASSIGN(FrameIdWrapHelper);
+};
+
+inline uint32 GetVideoRtpTimestamp(const base::TimeTicks& time_ticks) {
+ base::TimeTicks zero_time;
+ base::TimeDelta recorded_delta = time_ticks - zero_time;
+ // Timestamp is in 90 KHz for video.
+ return static_cast<uint32>(recorded_delta.InMilliseconds() * 90);
+}
+
+} // namespace transport
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_DEFINES_H_
diff --git a/chromium/media/cast/transport/cast_transport_sender.h b/chromium/media/cast/transport/cast_transport_sender.h
new file mode 100644
index 00000000000..e88f2f4f098
--- /dev/null
+++ b/chromium/media/cast/transport/cast_transport_sender.h
@@ -0,0 +1,113 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is the main interface for the cast transport sender. It accepts encoded
+// frames (both audio and video), encrypts their encoded data, packetizes them
+// and feeds them into a transport (e.g., UDP).
+
+// Construction of the Cast Sender and the Cast Transport Sender should be done
+// in the following order:
+// 1. Create CastTransportSender.
+// 2. Create CastSender (accepts CastTransportSender as an input).
+// 3. Call CastTransportSender::SetPacketReceiver to ensure that the packets
+// received by the CastTransportSender will be sent to the CastSender.
+// Steps 3 can be done interchangeably.
+
+// Destruction: The CastTransportSender is assumed to be valid as long as the
+// CastSender is alive. Therefore the CastSender should be destructed before the
+// CastTransportSender.
+// This also works when the CastSender acts as a receiver for the RTCP packets
+// due to the weak pointers in the ReceivedPacket method in cast_sender_impl.cc.
+
+#ifndef MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_SENDER_H_
+#define MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_SENDER_H_
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/non_thread_safe.h"
+#include "base/time/tick_clock.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/transport/cast_transport_defines.h"
+
+namespace net {
+class NetLog;
+} // namespace net
+
+namespace media {
+namespace cast {
+namespace transport {
+
+// Following the initialization of either audio or video an initialization
+// status will be sent via this callback.
+typedef base::Callback<void(CastTransportStatus status)>
+ CastTransportStatusCallback;
+
+typedef base::Callback<void(const std::vector<PacketEvent>&)>
+ BulkRawEventsCallback;
+
+// The application should only trigger this class from the transport thread.
+class CastTransportSender : public base::NonThreadSafe {
+ public:
+ static scoped_ptr<CastTransportSender> Create(
+ net::NetLog* net_log,
+ base::TickClock* clock,
+ const net::IPEndPoint& remote_end_point,
+ const CastTransportStatusCallback& status_callback,
+ const BulkRawEventsCallback& raw_events_callback,
+ base::TimeDelta raw_events_callback_interval,
+ const scoped_refptr<base::SingleThreadTaskRunner>& transport_task_runner);
+
+ virtual ~CastTransportSender() {}
+
+ // Audio/Video initialization.
+ // Encoded frames cannot be transmitted until the relevant initialize method
+ // is called. Usually called by CastSender.
+ virtual void InitializeAudio(const CastTransportAudioConfig& config) = 0;
+
+ virtual void InitializeVideo(const CastTransportVideoConfig& config) = 0;
+
+ // Sets the Cast packet receiver. Should be called after creation on the
+ // Cast sender. Packets won't be received until this function is called.
+ virtual void SetPacketReceiver(
+ const PacketReceiverCallback& packet_receiver) = 0;
+
+ // The following two functions handle the encoded media frames (audio and
+ // video) to be processed.
+ // Frames will be encrypted, packetized and transmitted to the network.
+ virtual void InsertCodedAudioFrame(const EncodedFrame& audio_frame) = 0;
+ virtual void InsertCodedVideoFrame(const EncodedFrame& video_frame) = 0;
+
+ // Builds an RTCP packet and sends it to the network.
+ // |ntp_seconds|, |ntp_fraction| and |rtp_timestamp| are used in the
+ // RTCP Sender Report.
+ virtual void SendRtcpFromRtpSender(uint32 packet_type_flags,
+ uint32 ntp_seconds,
+ uint32 ntp_fraction,
+ uint32 rtp_timestamp,
+ const RtcpDlrrReportBlock& dlrr,
+ uint32 sending_ssrc,
+ const std::string& c_name) = 0;
+
+ // Retransmission request.
+ // |missing_packets| includes the list of frames and packets in each
+ // frame to be re-transmitted.
+ // If |cancel_rtx_if_not_in_list| is used as an optimization to cancel
+ // pending re-transmission requests of packets not listed in
+ // |missing_packets|. If the requested packet(s) were sent recently
+ // (how long is specified by |dedupe_window|) then this re-transmit
+ // will be ignored.
+ virtual void ResendPackets(
+ bool is_audio,
+ const MissingFramesAndPacketsMap& missing_packets,
+ bool cancel_rtx_if_not_in_list,
+ base::TimeDelta dedupe_window) = 0;
+};
+
+} // namespace transport
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_SENDER_H_
diff --git a/chromium/media/cast/transport/cast_transport_sender_impl.cc b/chromium/media/cast/transport/cast_transport_sender_impl.cc
new file mode 100644
index 00000000000..6fd848f27bf
--- /dev/null
+++ b/chromium/media/cast/transport/cast_transport_sender_impl.cc
@@ -0,0 +1,212 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/transport/cast_transport_sender_impl.h"
+
+#include "base/single_thread_task_runner.h"
+#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/transport/cast_transport_defines.h"
+#include "net/base/net_util.h"
+
+namespace media {
+namespace cast {
+namespace transport {
+
+scoped_ptr<CastTransportSender> CastTransportSender::Create(
+ net::NetLog* net_log,
+ base::TickClock* clock,
+ const net::IPEndPoint& remote_end_point,
+ const CastTransportStatusCallback& status_callback,
+ const BulkRawEventsCallback& raw_events_callback,
+ base::TimeDelta raw_events_callback_interval,
+ const scoped_refptr<base::SingleThreadTaskRunner>& transport_task_runner) {
+ return scoped_ptr<CastTransportSender>(
+ new CastTransportSenderImpl(net_log,
+ clock,
+ remote_end_point,
+ status_callback,
+ raw_events_callback,
+ raw_events_callback_interval,
+ transport_task_runner.get(),
+ NULL));
+}
+
+CastTransportSenderImpl::CastTransportSenderImpl(
+ net::NetLog* net_log,
+ base::TickClock* clock,
+ const net::IPEndPoint& remote_end_point,
+ const CastTransportStatusCallback& status_callback,
+ const BulkRawEventsCallback& raw_events_callback,
+ base::TimeDelta raw_events_callback_interval,
+ const scoped_refptr<base::SingleThreadTaskRunner>& transport_task_runner,
+ PacketSender* external_transport)
+ : clock_(clock),
+ status_callback_(status_callback),
+ transport_task_runner_(transport_task_runner),
+ transport_(external_transport ? NULL
+ : new UdpTransport(net_log,
+ transport_task_runner,
+ net::IPEndPoint(),
+ remote_end_point,
+ status_callback)),
+ logging_(),
+ pacer_(clock,
+ &logging_,
+ external_transport ? external_transport : transport_.get(),
+ transport_task_runner),
+ rtcp_builder_(&pacer_),
+ raw_events_callback_(raw_events_callback) {
+ DCHECK(clock_);
+ if (!raw_events_callback_.is_null()) {
+ DCHECK(raw_events_callback_interval > base::TimeDelta());
+ event_subscriber_.reset(new SimpleEventSubscriber);
+ logging_.AddRawEventSubscriber(event_subscriber_.get());
+ raw_events_timer_.Start(FROM_HERE,
+ raw_events_callback_interval,
+ this,
+ &CastTransportSenderImpl::SendRawEvents);
+ }
+ if (transport_) {
+ // The default DSCP value for cast is AF41. Which gives it a higher
+ // priority over other traffic.
+ transport_->SetDscp(net::DSCP_AF41);
+ }
+}
+
+CastTransportSenderImpl::~CastTransportSenderImpl() {
+ if (event_subscriber_.get())
+ logging_.RemoveRawEventSubscriber(event_subscriber_.get());
+}
+
+void CastTransportSenderImpl::InitializeAudio(
+ const CastTransportAudioConfig& config) {
+ LOG_IF(WARNING, config.rtp.config.aes_key.empty() ||
+ config.rtp.config.aes_iv_mask.empty())
+ << "Unsafe to send audio with encryption DISABLED.";
+ if (!audio_encryptor_.Initialize(config.rtp.config.aes_key,
+ config.rtp.config.aes_iv_mask)) {
+ status_callback_.Run(TRANSPORT_AUDIO_UNINITIALIZED);
+ return;
+ }
+ audio_sender_.reset(new RtpSender(clock_, transport_task_runner_, &pacer_));
+ if (audio_sender_->InitializeAudio(config)) {
+ pacer_.RegisterAudioSsrc(config.rtp.config.ssrc);
+ status_callback_.Run(TRANSPORT_AUDIO_INITIALIZED);
+ } else {
+ audio_sender_.reset();
+ status_callback_.Run(TRANSPORT_AUDIO_UNINITIALIZED);
+ }
+}
+
+void CastTransportSenderImpl::InitializeVideo(
+ const CastTransportVideoConfig& config) {
+ LOG_IF(WARNING, config.rtp.config.aes_key.empty() ||
+ config.rtp.config.aes_iv_mask.empty())
+ << "Unsafe to send video with encryption DISABLED.";
+ if (!video_encryptor_.Initialize(config.rtp.config.aes_key,
+ config.rtp.config.aes_iv_mask)) {
+ status_callback_.Run(TRANSPORT_VIDEO_UNINITIALIZED);
+ return;
+ }
+ video_sender_.reset(new RtpSender(clock_, transport_task_runner_, &pacer_));
+ if (video_sender_->InitializeVideo(config)) {
+ pacer_.RegisterVideoSsrc(config.rtp.config.ssrc);
+ status_callback_.Run(TRANSPORT_VIDEO_INITIALIZED);
+ } else {
+ video_sender_.reset();
+ status_callback_.Run(TRANSPORT_VIDEO_UNINITIALIZED);
+ }
+}
+
+void CastTransportSenderImpl::SetPacketReceiver(
+ const PacketReceiverCallback& packet_receiver) {
+ transport_->StartReceiving(packet_receiver);
+}
+
+namespace {
+void EncryptAndSendFrame(const EncodedFrame& frame,
+ TransportEncryptionHandler* encryptor,
+ RtpSender* sender) {
+ if (encryptor->initialized()) {
+ EncodedFrame encrypted_frame;
+ frame.CopyMetadataTo(&encrypted_frame);
+ if (encryptor->Encrypt(frame.frame_id, frame.data, &encrypted_frame.data)) {
+ sender->SendFrame(encrypted_frame);
+ } else {
+ LOG(ERROR) << "Encryption failed. Not sending frame with ID "
+ << frame.frame_id;
+ }
+ } else {
+ sender->SendFrame(frame);
+ }
+}
+} // namespace
+
+void CastTransportSenderImpl::InsertCodedAudioFrame(
+ const EncodedFrame& audio_frame) {
+ DCHECK(audio_sender_) << "Audio sender uninitialized";
+ EncryptAndSendFrame(audio_frame, &audio_encryptor_, audio_sender_.get());
+}
+
+void CastTransportSenderImpl::InsertCodedVideoFrame(
+ const EncodedFrame& video_frame) {
+ DCHECK(video_sender_) << "Video sender uninitialized";
+ EncryptAndSendFrame(video_frame, &video_encryptor_, video_sender_.get());
+}
+
+void CastTransportSenderImpl::SendRtcpFromRtpSender(
+ uint32 packet_type_flags,
+ uint32 ntp_seconds,
+ uint32 ntp_fraction,
+ uint32 rtp_timestamp,
+ const RtcpDlrrReportBlock& dlrr,
+ uint32 sending_ssrc,
+ const std::string& c_name) {
+ RtcpSenderInfo sender_info;
+ sender_info.ntp_seconds = ntp_seconds;
+ sender_info.ntp_fraction = ntp_fraction;
+ sender_info.rtp_timestamp = rtp_timestamp;
+ if (audio_sender_ && audio_sender_->ssrc() == sending_ssrc) {
+ sender_info.send_packet_count = audio_sender_->send_packet_count();
+ sender_info.send_octet_count = audio_sender_->send_octet_count();
+ } else if (video_sender_ && video_sender_->ssrc() == sending_ssrc) {
+ sender_info.send_packet_count = video_sender_->send_packet_count();
+ sender_info.send_octet_count = video_sender_->send_octet_count();
+ } else {
+ LOG(ERROR) << "Sending RTCP with an invalid SSRC.";
+ return;
+ }
+ rtcp_builder_.SendRtcpFromRtpSender(
+ packet_type_flags, sender_info, dlrr, sending_ssrc, c_name);
+}
+
+void CastTransportSenderImpl::ResendPackets(
+ bool is_audio,
+ const MissingFramesAndPacketsMap& missing_packets,
+ bool cancel_rtx_if_not_in_list,
+ base::TimeDelta dedupe_window) {
+ if (is_audio) {
+ DCHECK(audio_sender_) << "Audio sender uninitialized";
+ audio_sender_->ResendPackets(missing_packets,
+ cancel_rtx_if_not_in_list,
+ dedupe_window);
+ } else {
+ DCHECK(video_sender_) << "Video sender uninitialized";
+ video_sender_->ResendPackets(missing_packets,
+ cancel_rtx_if_not_in_list,
+ dedupe_window);
+ }
+}
+
+void CastTransportSenderImpl::SendRawEvents() {
+ DCHECK(event_subscriber_.get());
+ DCHECK(!raw_events_callback_.is_null());
+ std::vector<PacketEvent> packet_events;
+ event_subscriber_->GetPacketEventsAndReset(&packet_events);
+ raw_events_callback_.Run(packet_events);
+}
+
+} // namespace transport
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/transport/cast_transport_sender_impl.h b/chromium/media/cast/transport/cast_transport_sender_impl.h
new file mode 100644
index 00000000000..035ef844b68
--- /dev/null
+++ b/chromium/media/cast/transport/cast_transport_sender_impl.h
@@ -0,0 +1,110 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_IMPL_H_
+#define MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_IMPL_H_
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "base/timer/timer.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/logging/simple_event_subscriber.h"
+#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/transport/cast_transport_sender.h"
+#include "media/cast/transport/pacing/paced_sender.h"
+#include "media/cast/transport/rtcp/rtcp_builder.h"
+#include "media/cast/transport/rtp_sender/rtp_sender.h"
+#include "media/cast/transport/utility/transport_encryption_handler.h"
+
+namespace media {
+namespace cast {
+namespace transport {
+
+class CastTransportSenderImpl : public CastTransportSender {
+ public:
+ // external_transport is only used for testing.
+ // Note that SetPacketReceiver does not work if an external
+ // transport is provided.
+ // |raw_events_callback|: Raw events will be returned on this callback
+ // which will be invoked every |raw_events_callback_interval|.
+ // This can be a null callback, i.e. if user is not interested in raw events.
+ // |raw_events_callback_interval|: This can be |base::TimeDelta()| if
+ // |raw_events_callback| is a null callback.
+ CastTransportSenderImpl(
+ net::NetLog* net_log,
+ base::TickClock* clock,
+ const net::IPEndPoint& remote_end_point,
+ const CastTransportStatusCallback& status_callback,
+ const BulkRawEventsCallback& raw_events_callback,
+ base::TimeDelta raw_events_callback_interval,
+ const scoped_refptr<base::SingleThreadTaskRunner>& transport_task_runner,
+ PacketSender* external_transport);
+
+ virtual ~CastTransportSenderImpl();
+
+ virtual void InitializeAudio(const CastTransportAudioConfig& config) OVERRIDE;
+
+ virtual void InitializeVideo(const CastTransportVideoConfig& config) OVERRIDE;
+
+ // CastTransportSender implementation.
+ virtual void SetPacketReceiver(const PacketReceiverCallback& packet_receiver)
+ OVERRIDE;
+
+ virtual void InsertCodedAudioFrame(const EncodedFrame& audio_frame) OVERRIDE;
+ virtual void InsertCodedVideoFrame(const EncodedFrame& video_frame) OVERRIDE;
+
+ virtual void SendRtcpFromRtpSender(uint32 packet_type_flags,
+ uint32 ntp_seconds,
+ uint32 ntp_fraction,
+ uint32 rtp_timestamp,
+ const RtcpDlrrReportBlock& dlrr,
+ uint32 sending_ssrc,
+ const std::string& c_name) OVERRIDE;
+
+ virtual void ResendPackets(bool is_audio,
+ const MissingFramesAndPacketsMap& missing_packets,
+ bool cancel_rtx_if_not_in_list,
+ base::TimeDelta dedupe_window)
+ OVERRIDE;
+
+ private:
+ // If |raw_events_callback_| is non-null, calls it with events collected
+ // by |event_subscriber_| since last call.
+ void SendRawEvents();
+
+ base::TickClock* clock_; // Not owned by this class.
+ CastTransportStatusCallback status_callback_;
+ scoped_refptr<base::SingleThreadTaskRunner> transport_task_runner_;
+
+ scoped_ptr<UdpTransport> transport_;
+ LoggingImpl logging_;
+ PacedSender pacer_;
+ RtcpBuilder rtcp_builder_;
+ scoped_ptr<RtpSender> audio_sender_;
+ scoped_ptr<RtpSender> video_sender_;
+
+ // Encrypts data in EncodedFrames before they are sent. Note that it's
+ // important for the encryption to happen here, in code that would execute in
+ // the main browser process, for security reasons. This helps to mitigate
+ // the damage that could be caused by a compromised renderer process.
+ TransportEncryptionHandler audio_encryptor_;
+ TransportEncryptionHandler video_encryptor_;
+
+ // This is non-null iff |raw_events_callback_| is non-null.
+ scoped_ptr<SimpleEventSubscriber> event_subscriber_;
+ base::RepeatingTimer<CastTransportSenderImpl> raw_events_timer_;
+
+ BulkRawEventsCallback raw_events_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(CastTransportSenderImpl);
+};
+
+} // namespace transport
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_IMPL_H_
diff --git a/chromium/media/cast/transport/cast_transport_sender_impl_unittest.cc b/chromium/media/cast/transport/cast_transport_sender_impl_unittest.cc
new file mode 100644
index 00000000000..67eb39a47aa
--- /dev/null
+++ b/chromium/media/cast/transport/cast_transport_sender_impl_unittest.cc
@@ -0,0 +1,113 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <gtest/gtest.h>
+#include <stdint.h>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/transport/cast_transport_sender_impl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace cast {
+namespace transport {
+
+static const int64 kStartMillisecond = INT64_C(12345678900000);
+
+class FakePacketSender : public transport::PacketSender {
+ public:
+ FakePacketSender() {}
+
+ virtual bool SendPacket(PacketRef packet, const base::Closure& cb) OVERRIDE {
+ return true;
+ }
+};
+
+class CastTransportSenderImplTest : public ::testing::Test {
+ protected:
+ CastTransportSenderImplTest()
+ : num_times_callback_called_(0) {
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ task_runner_ = new test::FakeSingleThreadTaskRunner(&testing_clock_);
+ }
+
+ virtual ~CastTransportSenderImplTest() {}
+
+ void InitWithoutLogging() {
+ transport_sender_.reset(
+ new CastTransportSenderImpl(NULL,
+ &testing_clock_,
+ net::IPEndPoint(),
+ base::Bind(&UpdateCastTransportStatus),
+ BulkRawEventsCallback(),
+ base::TimeDelta(),
+ task_runner_,
+ &transport_));
+ task_runner_->RunTasks();
+ }
+
+ void InitWithLogging() {
+ transport_sender_.reset(new CastTransportSenderImpl(
+ NULL,
+ &testing_clock_,
+ net::IPEndPoint(),
+ base::Bind(&UpdateCastTransportStatus),
+ base::Bind(&CastTransportSenderImplTest::LogRawEvents,
+ base::Unretained(this)),
+ base::TimeDelta::FromMilliseconds(10),
+ task_runner_,
+ &transport_));
+ task_runner_->RunTasks();
+ }
+
+ void LogRawEvents(const std::vector<PacketEvent>& packet_events) {
+ num_times_callback_called_++;
+ if (num_times_callback_called_ == 3) {
+ run_loop_.Quit();
+ }
+ }
+
+ static void UpdateCastTransportStatus(transport::CastTransportStatus status) {
+ }
+
+ base::SimpleTestTickClock testing_clock_;
+ scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
+ scoped_ptr<CastTransportSenderImpl> transport_sender_;
+ FakePacketSender transport_;
+ base::MessageLoopForIO message_loop_;
+ base::RunLoop run_loop_;
+ int num_times_callback_called_;
+};
+
+TEST_F(CastTransportSenderImplTest, InitWithoutLogging) {
+ InitWithoutLogging();
+ message_loop_.PostDelayedTask(FROM_HERE,
+ run_loop_.QuitClosure(),
+ base::TimeDelta::FromMilliseconds(50));
+ run_loop_.Run();
+ EXPECT_EQ(0, num_times_callback_called_);
+}
+
+TEST_F(CastTransportSenderImplTest, InitWithLogging) {
+ InitWithLogging();
+ message_loop_.PostDelayedTask(FROM_HERE,
+ run_loop_.QuitClosure(),
+ base::TimeDelta::FromMilliseconds(50));
+ run_loop_.Run();
+ EXPECT_GT(num_times_callback_called_, 1);
+}
+
+} // namespace transport
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/net/frame_id_wrap_helper_test.cc b/chromium/media/cast/transport/frame_id_wrap_helper_test.cc
index f6b89b01d22..3a2060d3aaf 100644
--- a/chromium/media/cast/net/frame_id_wrap_helper_test.cc
+++ b/chromium/media/cast/transport/frame_id_wrap_helper_test.cc
@@ -3,10 +3,11 @@
// found in the LICENSE file.
#include <gtest/gtest.h>
-#include "media/cast/net/cast_net_defines.h"
+#include "media/cast/transport/cast_transport_defines.h"
namespace media {
namespace cast {
+namespace transport {
class FrameIdWrapHelperTest : public ::testing::Test {
protected:
@@ -14,6 +15,8 @@ class FrameIdWrapHelperTest : public ::testing::Test {
virtual ~FrameIdWrapHelperTest() {}
FrameIdWrapHelper frame_id_wrap_helper_;
+
+ DISALLOW_COPY_AND_ASSIGN(FrameIdWrapHelperTest);
};
TEST_F(FrameIdWrapHelperTest, FirstFrame) {
@@ -23,8 +26,8 @@ TEST_F(FrameIdWrapHelperTest, FirstFrame) {
TEST_F(FrameIdWrapHelperTest, Rollover) {
uint32 new_frame_id = 0u;
for (int i = 0; i <= 256; ++i) {
- new_frame_id = frame_id_wrap_helper_.MapTo32bitsFrameId(
- static_cast<uint8>(i));
+ new_frame_id =
+ frame_id_wrap_helper_.MapTo32bitsFrameId(static_cast<uint8>(i));
}
EXPECT_EQ(256u, new_frame_id);
}
@@ -32,8 +35,8 @@ TEST_F(FrameIdWrapHelperTest, Rollover) {
TEST_F(FrameIdWrapHelperTest, OutOfOrder) {
uint32 new_frame_id = 0u;
for (int i = 0; i < 255; ++i) {
- new_frame_id = frame_id_wrap_helper_.MapTo32bitsFrameId(
- static_cast<uint8>(i));
+ new_frame_id =
+ frame_id_wrap_helper_.MapTo32bitsFrameId(static_cast<uint8>(i));
}
EXPECT_EQ(254u, new_frame_id);
new_frame_id = frame_id_wrap_helper_.MapTo32bitsFrameId(0u);
@@ -44,5 +47,6 @@ TEST_F(FrameIdWrapHelperTest, OutOfOrder) {
EXPECT_EQ(257u, new_frame_id);
}
+} // namespace transport
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/net/pacing/mock_paced_packet_sender.cc b/chromium/media/cast/transport/pacing/mock_paced_packet_sender.cc
index 6caf8f6390e..5e325f02335 100644
--- a/chromium/media/cast/net/pacing/mock_paced_packet_sender.cc
+++ b/chromium/media/cast/transport/pacing/mock_paced_packet_sender.cc
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/net/pacing/mock_paced_packet_sender.h"
+#include "media/cast/transport/pacing/mock_paced_packet_sender.h"
namespace media {
namespace cast {
+namespace transport {
-MockPacedPacketSender::MockPacedPacketSender() {
-}
+MockPacedPacketSender::MockPacedPacketSender() {}
-MockPacedPacketSender::~MockPacedPacketSender() {
-}
+MockPacedPacketSender::~MockPacedPacketSender() {}
+} // namespace transport
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/transport/pacing/mock_paced_packet_sender.h b/chromium/media/cast/transport/pacing/mock_paced_packet_sender.h
new file mode 100644
index 00000000000..20b76470351
--- /dev/null
+++ b/chromium/media/cast/transport/pacing/mock_paced_packet_sender.h
@@ -0,0 +1,31 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_TRANSPORT_PACING_MOCK_PACED_PACKET_SENDER_H_
+#define MEDIA_CAST_TRANSPORT_PACING_MOCK_PACED_PACKET_SENDER_H_
+
+#include "media/cast/transport/pacing/paced_sender.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+namespace transport {
+
+class MockPacedPacketSender : public PacedPacketSender {
+ public:
+ MockPacedPacketSender();
+ virtual ~MockPacedPacketSender();
+
+ MOCK_METHOD1(SendPackets, bool(const SendPacketVector& packets));
+ MOCK_METHOD2(ResendPackets, bool(const SendPacketVector& packets,
+ base::TimeDelta dedupe_window));
+ MOCK_METHOD2(SendRtcpPacket, bool(unsigned int ssrc, PacketRef packet));
+ MOCK_METHOD1(CancelSendingPacket, void(const PacketKey& packet_key));
+};
+
+} // namespace transport
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_TRANSPORT_PACING_MOCK_PACED_PACKET_SENDER_H_
diff --git a/chromium/media/cast/transport/pacing/paced_sender.cc b/chromium/media/cast/transport/pacing/paced_sender.cc
new file mode 100644
index 00000000000..20cbde85be9
--- /dev/null
+++ b/chromium/media/cast/transport/pacing/paced_sender.cc
@@ -0,0 +1,260 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/transport/pacing/paced_sender.h"
+
+#include "base/big_endian.h"
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+
+namespace media {
+namespace cast {
+namespace transport {
+
+namespace {
+
+static const int64 kPacingIntervalMs = 10;
+// Each frame will be split into no more than kPacingMaxBurstsPerFrame
+// bursts of packets.
+static const size_t kPacingMaxBurstsPerFrame = 3;
+static const size_t kTargetBurstSize = 10;
+static const size_t kMaxBurstSize = 20;
+static const size_t kMaxDedupeWindowMs = 500;
+
+} // namespace
+
+// static
+PacketKey PacedPacketSender::MakePacketKey(const base::TimeTicks& ticks,
+ uint32 ssrc,
+ uint16 packet_id) {
+ return std::make_pair(ticks, std::make_pair(ssrc, packet_id));
+}
+
+PacedSender::PacedSender(
+ base::TickClock* clock,
+ LoggingImpl* logging,
+ PacketSender* transport,
+ const scoped_refptr<base::SingleThreadTaskRunner>& transport_task_runner)
+ : clock_(clock),
+ logging_(logging),
+ transport_(transport),
+ transport_task_runner_(transport_task_runner),
+ audio_ssrc_(0),
+ video_ssrc_(0),
+ max_burst_size_(kTargetBurstSize),
+ next_max_burst_size_(kTargetBurstSize),
+ next_next_max_burst_size_(kTargetBurstSize),
+ current_burst_size_(0),
+ state_(State_Unblocked),
+ weak_factory_(this) {
+}
+
+PacedSender::~PacedSender() {}
+
+void PacedSender::RegisterAudioSsrc(uint32 audio_ssrc) {
+ audio_ssrc_ = audio_ssrc;
+}
+
+void PacedSender::RegisterVideoSsrc(uint32 video_ssrc) {
+ video_ssrc_ = video_ssrc;
+}
+
+bool PacedSender::SendPackets(const SendPacketVector& packets) {
+ if (packets.empty()) {
+ return true;
+ }
+ for (size_t i = 0; i < packets.size(); i++) {
+ packet_list_[packets[i].first] =
+ make_pair(PacketType_Normal, packets[i].second);
+ }
+ if (state_ == State_Unblocked) {
+ SendStoredPackets();
+ }
+ return true;
+}
+
+bool PacedSender::ResendPackets(const SendPacketVector& packets,
+ base::TimeDelta dedupe_window) {
+ if (packets.empty()) {
+ return true;
+ }
+ base::TimeTicks now = clock_->NowTicks();
+ for (size_t i = 0; i < packets.size(); i++) {
+ std::map<PacketKey, base::TimeTicks>::const_iterator j =
+ sent_time_.find(packets[i].first);
+
+ if (j != sent_time_.end() && now - j->second < dedupe_window) {
+ LogPacketEvent(packets[i].second->data, PACKET_RTX_REJECTED);
+ continue;
+ }
+
+ packet_list_[packets[i].first] =
+ make_pair(PacketType_Resend, packets[i].second);
+ }
+ if (state_ == State_Unblocked) {
+ SendStoredPackets();
+ }
+ return true;
+}
+
+bool PacedSender::SendRtcpPacket(uint32 ssrc, PacketRef packet) {
+ if (state_ == State_TransportBlocked) {
+ packet_list_[PacedPacketSender::MakePacketKey(base::TimeTicks(), ssrc, 0)] =
+ make_pair(PacketType_RTCP, packet);
+ } else {
+ // We pass the RTCP packets straight through.
+ if (!transport_->SendPacket(
+ packet,
+ base::Bind(&PacedSender::SendStoredPackets,
+ weak_factory_.GetWeakPtr()))) {
+ state_ = State_TransportBlocked;
+ }
+
+ }
+ return true;
+}
+
+void PacedSender::CancelSendingPacket(const PacketKey& packet_key) {
+ packet_list_.erase(packet_key);
+}
+
+PacketRef PacedSender::GetNextPacket(PacketType* packet_type,
+ PacketKey* packet_key) {
+ std::map<PacketKey, std::pair<PacketType, PacketRef> >::iterator i;
+ i = packet_list_.begin();
+ DCHECK(i != packet_list_.end());
+ *packet_type = i->second.first;
+ *packet_key = i->first;
+ PacketRef ret = i->second.second;
+ packet_list_.erase(i);
+ return ret;
+}
+
+bool PacedSender::empty() const {
+ return packet_list_.empty();
+}
+
+size_t PacedSender::size() const {
+ return packet_list_.size();
+}
+
+// This function can be called from three places:
+// 1. User called one of the Send* functions and we were in an unblocked state.
+// 2. state_ == State_TransportBlocked and the transport is calling us to
+// let us know that it's ok to send again.
+// 3. state_ == State_BurstFull and there are still packets to send. In this
+// case we called PostDelayedTask on this function to start a new burst.
+void PacedSender::SendStoredPackets() {
+ State previous_state = state_;
+ state_ = State_Unblocked;
+ if (empty()) {
+ return;
+ }
+
+ base::TimeTicks now = clock_->NowTicks();
+ // I don't actually trust that PostDelayTask(x - now) will mean that
+ // now >= x when the call happens, so check if the previous state was
+ // State_BurstFull too.
+ if (now >= burst_end_ || previous_state == State_BurstFull) {
+ // Start a new burst.
+ current_burst_size_ = 0;
+ burst_end_ = now + base::TimeDelta::FromMilliseconds(kPacingIntervalMs);
+
+ // The goal here is to try to send out the queued packets over the next
+ // three bursts, while trying to keep the burst size below 10 if possible.
+ // We have some evidence that sending more than 12 packets in a row doesn't
+ // work very well, but we don't actually know why yet. Sending out packets
+ // sooner is better than sending out packets later as that gives us more
+ // time to re-send them if needed. So if we have less than 30 packets, just
+ // send 10 at a time. If we have less than 60 packets, send n / 3 at a time.
+ // if we have more than 60, we send 20 at a time. 20 packets is ~24Mbit/s
+ // which is more bandwidth than the cast library should need, and sending
+ // out more data per second is unlikely to be helpful.
+ size_t max_burst_size = std::min(
+ kMaxBurstSize,
+ std::max(kTargetBurstSize, size() / kPacingMaxBurstsPerFrame));
+
+ // If the queue is long, issue a warning. Try to limit the number of
+ // warnings issued by only issuing the warning when the burst size
+ // grows. Otherwise we might get 100 warnings per second.
+ if (max_burst_size > next_next_max_burst_size_ && size() > 100) {
+ LOG(WARNING) << "Packet queue is very long:" << size();
+ }
+
+ max_burst_size_ = std::max(next_max_burst_size_, max_burst_size);
+ next_max_burst_size_ = std::max(next_next_max_burst_size_, max_burst_size);
+ next_next_max_burst_size_ = max_burst_size;
+ }
+
+ base::Closure cb = base::Bind(&PacedSender::SendStoredPackets,
+ weak_factory_.GetWeakPtr());
+ while (!empty()) {
+ if (current_burst_size_ >= max_burst_size_) {
+ transport_task_runner_->PostDelayedTask(FROM_HERE,
+ cb,
+ burst_end_ - now);
+ state_ = State_BurstFull;
+ return;
+ }
+ PacketType packet_type;
+ PacketKey packet_key;
+ PacketRef packet = GetNextPacket(&packet_type, &packet_key);
+ sent_time_[packet_key] = now;
+ sent_time_buffer_[packet_key] = now;
+
+ switch (packet_type) {
+ case PacketType_Resend:
+ LogPacketEvent(packet->data, PACKET_RETRANSMITTED);
+ break;
+ case PacketType_Normal:
+ LogPacketEvent(packet->data, PACKET_SENT_TO_NETWORK);
+ break;
+ case PacketType_RTCP:
+ break;
+ }
+ if (!transport_->SendPacket(packet, cb)) {
+ state_ = State_TransportBlocked;
+ return;
+ }
+ current_burst_size_++;
+ }
+ // Keep ~0.5 seconds of data (1000 packets)
+ if (sent_time_buffer_.size() >=
+ kMaxBurstSize * kMaxDedupeWindowMs / kPacingIntervalMs) {
+ sent_time_.swap(sent_time_buffer_);
+ sent_time_buffer_.clear();
+ }
+ DCHECK_LE(sent_time_buffer_.size(),
+ kMaxBurstSize * kMaxDedupeWindowMs / kPacingIntervalMs);
+ DCHECK_LE(sent_time_.size(),
+ 2 * kMaxBurstSize * kMaxDedupeWindowMs / kPacingIntervalMs);
+ state_ = State_Unblocked;
+}
+
+void PacedSender::LogPacketEvent(const Packet& packet, CastLoggingEvent event) {
+ // Get SSRC from packet and compare with the audio_ssrc / video_ssrc to see
+ // if the packet is audio or video.
+ DCHECK_GE(packet.size(), 12u);
+ base::BigEndianReader reader(reinterpret_cast<const char*>(&packet[8]), 4);
+ uint32 ssrc;
+ bool success = reader.ReadU32(&ssrc);
+ DCHECK(success);
+ bool is_audio;
+ if (ssrc == audio_ssrc_) {
+ is_audio = true;
+ } else if (ssrc == video_ssrc_) {
+ is_audio = false;
+ } else {
+ DVLOG(3) << "Got unknown ssrc " << ssrc << " when logging packet event";
+ return;
+ }
+
+ EventMediaType media_type = is_audio ? AUDIO_EVENT : VIDEO_EVENT;
+ logging_->InsertSinglePacketEvent(clock_->NowTicks(), event, media_type,
+ packet);
+}
+
+} // namespace transport
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/transport/pacing/paced_sender.h b/chromium/media/cast/transport/pacing/paced_sender.h
new file mode 100644
index 00000000000..9fc0c8b8b85
--- /dev/null
+++ b/chromium/media/cast/transport/pacing/paced_sender.h
@@ -0,0 +1,147 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_TRANSPORT_PACING_PACED_SENDER_H_
+#define MEDIA_CAST_TRANSPORT_PACING_PACED_SENDER_H_
+
+#include <list>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/non_thread_safe.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/transport/transport/udp_transport.h"
+
+namespace media {
+namespace cast {
+
+class LoggingImpl;
+
+namespace transport {
+
+// Use std::pair for free comparison operators.
+// { capture_time, ssrc, packet_id }
+// The PacketKey is designed to meet two criteria:
+// 1. When we re-send the same packet again, we can use the packet key
+// to identify it so that we can de-duplicate packets in the queue.
+// 2. The sort order of the PacketKey determines the order that packets
+// are sent out. Using the capture_time as the first member basically
+// means that older packets are sent first.
+typedef std::pair<base::TimeTicks, std::pair<uint32, uint16> > PacketKey;
+typedef std::vector<std::pair<PacketKey, PacketRef> > SendPacketVector;
+
+// We have this pure virtual class to enable mocking.
+class PacedPacketSender {
+ public:
+ virtual bool SendPackets(const SendPacketVector& packets) = 0;
+ virtual bool ResendPackets(const SendPacketVector& packets,
+ base::TimeDelta dedupe_window) = 0;
+ virtual bool SendRtcpPacket(uint32 ssrc, PacketRef packet) = 0;
+ virtual void CancelSendingPacket(const PacketKey& packet_key) = 0;
+
+ virtual ~PacedPacketSender() {}
+
+ static PacketKey MakePacketKey(const base::TimeTicks& ticks,
+ uint32 ssrc,
+ uint16 packet_id);
+};
+
+class PacedSender : public PacedPacketSender,
+ public base::NonThreadSafe,
+ public base::SupportsWeakPtr<PacedSender> {
+ public:
+ // The |external_transport| should only be used by the Cast receiver and for
+ // testing.
+ PacedSender(
+ base::TickClock* clock,
+ LoggingImpl* logging,
+ PacketSender* external_transport,
+ const scoped_refptr<base::SingleThreadTaskRunner>& transport_task_runner);
+
+ virtual ~PacedSender();
+
+ // These must be called before non-RTCP packets are sent.
+ void RegisterAudioSsrc(uint32 audio_ssrc);
+ void RegisterVideoSsrc(uint32 video_ssrc);
+
+ // PacedPacketSender implementation.
+ virtual bool SendPackets(const SendPacketVector& packets) OVERRIDE;
+ virtual bool ResendPackets(const SendPacketVector& packets,
+ base::TimeDelta dedupe_window) OVERRIDE;
+ virtual bool SendRtcpPacket(uint32 ssrc, PacketRef packet) OVERRIDE;
+ virtual void CancelSendingPacket(const PacketKey& packet_key) OVERRIDE;
+
+ private:
+ // Actually sends the packets to the transport.
+ void SendStoredPackets();
+ void LogPacketEvent(const Packet& packet, CastLoggingEvent event);
+
+ enum PacketType {
+ PacketType_RTCP,
+ PacketType_Resend,
+ PacketType_Normal
+ };
+ enum State {
+ // In an unblocked state, we can send more packets.
+ // We have to check the current time against |burst_end_| to see if we are
+ // appending to the current burst or if we can start a new one.
+ State_Unblocked,
+ // In this state, we are waiting for a callback from the udp transport.
+ // This happens when the OS-level buffer is full. Once we receive the
+ // callback, we go to State_Unblocked and see if we can write more packets
+ // to the current burst. (Or the next burst if enough time has passed.)
+ State_TransportBlocked,
+ // Once we've written enough packets for a time slice, we go into this
+ // state and PostDelayTask a call to ourselves to wake up when we can
+ // send more data.
+ State_BurstFull
+ };
+
+ bool empty() const;
+ size_t size() const;
+
+ // Returns the next packet to send. RTCP packets have highest priority,
+ // resend packets have second highest priority and then comes everything
+ // else.
+ PacketRef GetNextPacket(PacketType* packet_type,
+ PacketKey* packet_key);
+
+ base::TickClock* const clock_; // Not owned by this class.
+ LoggingImpl* const logging_; // Not owned by this class.
+ PacketSender* transport_; // Not owned by this class.
+ scoped_refptr<base::SingleThreadTaskRunner> transport_task_runner_;
+ uint32 audio_ssrc_;
+ uint32 video_ssrc_;
+ std::map<PacketKey, std::pair<PacketType, PacketRef> > packet_list_;
+ std::map<PacketKey, base::TimeTicks> sent_time_;
+ std::map<PacketKey, base::TimeTicks> sent_time_buffer_;
+
+ // Maximum burst size for the next three bursts.
+ size_t max_burst_size_;
+ size_t next_max_burst_size_;
+ size_t next_next_max_burst_size_;
+ // Number of packets already sent in the current burst.
+ size_t current_burst_size_;
+ // This is when the current burst ends.
+ base::TimeTicks burst_end_;
+
+ State state_;
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<PacedSender> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(PacedSender);
+};
+
+} // namespace transport
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_TRANSPORT_PACING_PACED_SENDER_H_
diff --git a/chromium/media/cast/transport/pacing/paced_sender_unittest.cc b/chromium/media/cast/transport/pacing/paced_sender_unittest.cc
new file mode 100644
index 00000000000..5e24fca4b56
--- /dev/null
+++ b/chromium/media/cast/transport/pacing/paced_sender_unittest.cc
@@ -0,0 +1,351 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+
+#include "base/big_endian.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "media/cast/logging/simple_event_subscriber.h"
+#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "media/cast/transport/pacing/paced_sender.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+namespace transport {
+
+using testing::_;
+
+static const uint8 kValue = 123;
+static const size_t kSize1 = 100;
+static const size_t kSize2 = 101;
+static const size_t kSize3 = 102;
+static const size_t kSize4 = 103;
+static const size_t kNackSize = 104;
+static const int64 kStartMillisecond = INT64_C(12345678900000);
+static const uint32 kVideoSsrc = 0x1234;
+static const uint32 kAudioSsrc = 0x5678;
+
+class TestPacketSender : public PacketSender {
+ public:
+ TestPacketSender() {}
+
+ virtual bool SendPacket(PacketRef packet, const base::Closure& cb) OVERRIDE {
+ EXPECT_FALSE(expected_packet_size_.empty());
+ size_t expected_packet_size = expected_packet_size_.front();
+ expected_packet_size_.pop_front();
+ EXPECT_EQ(expected_packet_size, packet->data.size());
+ return true;
+ }
+
+ void AddExpectedSize(int expected_packet_size, int repeat_count) {
+ for (int i = 0; i < repeat_count; ++i) {
+ expected_packet_size_.push_back(expected_packet_size);
+ }
+ }
+
+ public:
+ std::list<int> expected_packet_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestPacketSender);
+};
+
+class PacedSenderTest : public ::testing::Test {
+ protected:
+ PacedSenderTest() {
+ logging_.AddRawEventSubscriber(&subscriber_);
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ task_runner_ = new test::FakeSingleThreadTaskRunner(&testing_clock_);
+ paced_sender_.reset(new PacedSender(
+ &testing_clock_, &logging_, &mock_transport_, task_runner_));
+ paced_sender_->RegisterAudioSsrc(kAudioSsrc);
+ paced_sender_->RegisterVideoSsrc(kVideoSsrc);
+ }
+
+ virtual ~PacedSenderTest() {
+ logging_.RemoveRawEventSubscriber(&subscriber_);
+ }
+
+ static void UpdateCastTransportStatus(transport::CastTransportStatus status) {
+ NOTREACHED();
+ }
+
+ SendPacketVector CreateSendPacketVector(size_t packet_size,
+ int num_of_packets_in_frame,
+ bool audio) {
+ DCHECK_GE(packet_size, 12u);
+ SendPacketVector packets;
+ base::TimeTicks frame_tick = testing_clock_.NowTicks();
+ // Advance the clock so that we don't get the same frame_tick
+ // next time this function is called.
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(1));
+ for (int i = 0; i < num_of_packets_in_frame; ++i) {
+ PacketKey key = PacedPacketSender::MakePacketKey(
+ frame_tick,
+ audio ? kAudioSsrc : kVideoSsrc, // ssrc
+ i);
+
+ PacketRef packet(new base::RefCountedData<Packet>);
+ packet->data.resize(packet_size, kValue);
+ // Write ssrc to packet so that it can be recognized as a
+ // "video frame" for logging purposes.
+ base::BigEndianWriter writer(
+ reinterpret_cast<char*>(&packet->data[8]), 4);
+ bool success = writer.WriteU32(audio ? kAudioSsrc : kVideoSsrc);
+ DCHECK(success);
+ packets.push_back(std::make_pair(key, packet));
+ }
+ return packets;
+ }
+
+ // Use this function to drain the packet list in PacedSender without having
+ // to test the pacing implementation details.
+ bool RunUntilEmpty(int max_tries) {
+ for (int i = 0; i < max_tries; i++) {
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
+ task_runner_->RunTasks();
+ if (mock_transport_.expected_packet_size_.empty())
+ return true;
+ i++;
+ }
+
+ return mock_transport_.expected_packet_size_.empty();
+ }
+
+ LoggingImpl logging_;
+ SimpleEventSubscriber subscriber_;
+ base::SimpleTestTickClock testing_clock_;
+ TestPacketSender mock_transport_;
+ scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
+ scoped_ptr<PacedSender> paced_sender_;
+
+ DISALLOW_COPY_AND_ASSIGN(PacedSenderTest);
+};
+
+TEST_F(PacedSenderTest, PassThroughRtcp) {
+ mock_transport_.AddExpectedSize(kSize1, 2);
+ SendPacketVector packets = CreateSendPacketVector(kSize1, 1, true);
+
+ EXPECT_TRUE(paced_sender_->SendPackets(packets));
+ EXPECT_TRUE(paced_sender_->ResendPackets(packets, base::TimeDelta()));
+
+ mock_transport_.AddExpectedSize(kSize2, 1);
+ Packet tmp(kSize2, kValue);
+ EXPECT_TRUE(paced_sender_->SendRtcpPacket(
+ 1,
+ new base::RefCountedData<Packet>(tmp)));
+}
+
+TEST_F(PacedSenderTest, BasicPace) {
+ int num_of_packets = 27;
+ SendPacketVector packets = CreateSendPacketVector(kSize1,
+ num_of_packets,
+ false);
+
+ mock_transport_.AddExpectedSize(kSize1, 10);
+ EXPECT_TRUE(paced_sender_->SendPackets(packets));
+
+ // Check that we get the next burst.
+ mock_transport_.AddExpectedSize(kSize1, 10);
+
+ base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(10);
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // If we call process too early make sure we don't send any packets.
+ timeout = base::TimeDelta::FromMilliseconds(5);
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // Check that we get the next burst.
+ mock_transport_.AddExpectedSize(kSize1, 7);
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // Check that we don't get any more packets.
+ EXPECT_TRUE(RunUntilEmpty(3));
+
+ std::vector<PacketEvent> packet_events;
+ subscriber_.GetPacketEventsAndReset(&packet_events);
+ EXPECT_EQ(num_of_packets, static_cast<int>(packet_events.size()));
+ int sent_to_network_event_count = 0;
+ for (std::vector<PacketEvent>::iterator it = packet_events.begin();
+ it != packet_events.end();
+ ++it) {
+ if (it->type == PACKET_SENT_TO_NETWORK)
+ sent_to_network_event_count++;
+ else
+ FAIL() << "Got unexpected event type " << CastLoggingToString(it->type);
+ }
+ EXPECT_EQ(num_of_packets, sent_to_network_event_count);
+}
+
+TEST_F(PacedSenderTest, PaceWithNack) {
+ // Testing what happen when we get multiple NACK requests for a fully lost
+ // frames just as we sent the first packets in a frame.
+ int num_of_packets_in_frame = 12;
+ int num_of_packets_in_nack = 12;
+
+ SendPacketVector nack_packets =
+ CreateSendPacketVector(kNackSize, num_of_packets_in_nack, false);
+
+ SendPacketVector first_frame_packets =
+ CreateSendPacketVector(kSize1, num_of_packets_in_frame, false);
+
+ SendPacketVector second_frame_packets =
+ CreateSendPacketVector(kSize2, num_of_packets_in_frame, true);
+
+ // Check that the first burst of the frame go out on the wire.
+ mock_transport_.AddExpectedSize(kSize1, 10);
+ EXPECT_TRUE(paced_sender_->SendPackets(first_frame_packets));
+
+ // Add first NACK request.
+ EXPECT_TRUE(paced_sender_->ResendPackets(nack_packets, base::TimeDelta()));
+
+ // Check that we get the first NACK burst.
+ mock_transport_.AddExpectedSize(kNackSize, 10);
+ base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(10);
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // Add second NACK request.
+ EXPECT_TRUE(paced_sender_->ResendPackets(nack_packets, base::TimeDelta()));
+
+ // Check that we get the next NACK burst.
+ mock_transport_.AddExpectedSize(kNackSize, 10);
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // End of NACK plus two packets from the oldest frame.
+ // Note that two of the NACKs have been de-duped.
+ mock_transport_.AddExpectedSize(kNackSize, 2);
+ mock_transport_.AddExpectedSize(kSize1, 2);
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // Add second frame.
+ // Make sure we don't delay the second frame due to the previous packets.
+ mock_transport_.AddExpectedSize(kSize2, 10);
+ EXPECT_TRUE(paced_sender_->SendPackets(second_frame_packets));
+
+ // Last packets of frame 2.
+ mock_transport_.AddExpectedSize(kSize2, 2);
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // No more packets.
+ EXPECT_TRUE(RunUntilEmpty(5));
+
+ std::vector<PacketEvent> packet_events;
+ subscriber_.GetPacketEventsAndReset(&packet_events);
+ int expected_video_network_event_count = num_of_packets_in_frame;
+ int expected_video_retransmitted_event_count = 2 * num_of_packets_in_nack;
+ expected_video_retransmitted_event_count -= 2; // 2 packets deduped
+ int expected_audio_network_event_count = num_of_packets_in_frame;
+ EXPECT_EQ(expected_video_network_event_count +
+ expected_video_retransmitted_event_count +
+ expected_audio_network_event_count,
+ static_cast<int>(packet_events.size()));
+ int audio_network_event_count = 0;
+ int video_network_event_count = 0;
+ int video_retransmitted_event_count = 0;
+ for (std::vector<PacketEvent>::iterator it = packet_events.begin();
+ it != packet_events.end();
+ ++it) {
+ if (it->type == PACKET_SENT_TO_NETWORK) {
+ if (it->media_type == VIDEO_EVENT)
+ video_network_event_count++;
+ else
+ audio_network_event_count++;
+ } else if (it->type == PACKET_RETRANSMITTED) {
+ if (it->media_type == VIDEO_EVENT)
+ video_retransmitted_event_count++;
+ } else {
+ FAIL() << "Got unexpected event type " << CastLoggingToString(it->type);
+ }
+ }
+ EXPECT_EQ(expected_audio_network_event_count, audio_network_event_count);
+ EXPECT_EQ(expected_video_network_event_count, video_network_event_count);
+ EXPECT_EQ(expected_video_retransmitted_event_count,
+ video_retransmitted_event_count);
+}
+
+TEST_F(PacedSenderTest, PaceWith60fps) {
+ // Testing what happen when we get multiple NACK requests for a fully lost
+ // frames just as we sent the first packets in a frame.
+ int num_of_packets_in_frame = 17;
+
+ SendPacketVector first_frame_packets =
+ CreateSendPacketVector(kSize1, num_of_packets_in_frame, false);
+
+ SendPacketVector second_frame_packets =
+ CreateSendPacketVector(kSize2, num_of_packets_in_frame, false);
+
+ SendPacketVector third_frame_packets =
+ CreateSendPacketVector(kSize3, num_of_packets_in_frame, false);
+
+ SendPacketVector fourth_frame_packets =
+ CreateSendPacketVector(kSize4, num_of_packets_in_frame, false);
+
+ base::TimeDelta timeout_10ms = base::TimeDelta::FromMilliseconds(10);
+
+ // Check that the first burst of the frame go out on the wire.
+ mock_transport_.AddExpectedSize(kSize1, 10);
+ EXPECT_TRUE(paced_sender_->SendPackets(first_frame_packets));
+
+ mock_transport_.AddExpectedSize(kSize1, 7);
+ testing_clock_.Advance(timeout_10ms);
+ task_runner_->RunTasks();
+
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(6));
+
+ // Add second frame, after 16 ms.
+ mock_transport_.AddExpectedSize(kSize2, 3);
+ EXPECT_TRUE(paced_sender_->SendPackets(second_frame_packets));
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(4));
+
+ mock_transport_.AddExpectedSize(kSize2, 10);
+ testing_clock_.Advance(timeout_10ms);
+ task_runner_->RunTasks();
+
+ mock_transport_.AddExpectedSize(kSize2, 4);
+ testing_clock_.Advance(timeout_10ms);
+ task_runner_->RunTasks();
+
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(3));
+
+ // Add third frame, after 33 ms.
+ mock_transport_.AddExpectedSize(kSize3, 6);
+ EXPECT_TRUE(paced_sender_->SendPackets(third_frame_packets));
+
+ mock_transport_.AddExpectedSize(kSize3, 10);
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(7));
+ task_runner_->RunTasks();
+
+ // Add fourth frame, after 50 ms.
+ EXPECT_TRUE(paced_sender_->SendPackets(fourth_frame_packets));
+
+ mock_transport_.AddExpectedSize(kSize3, 1);
+ mock_transport_.AddExpectedSize(kSize4, 9);
+ testing_clock_.Advance(timeout_10ms);
+ task_runner_->RunTasks();
+
+ mock_transport_.AddExpectedSize(kSize4, 8);
+ testing_clock_.Advance(timeout_10ms);
+ task_runner_->RunTasks();
+
+ testing_clock_.Advance(timeout_10ms);
+ task_runner_->RunTasks();
+
+ testing_clock_.Advance(timeout_10ms);
+ task_runner_->RunTasks();
+
+ // No more packets.
+ EXPECT_TRUE(RunUntilEmpty(5));
+}
+
+} // namespace transport
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/transport/rtcp/rtcp_builder.cc b/chromium/media/cast/transport/rtcp/rtcp_builder.cc
new file mode 100644
index 00000000000..b8875fc96bd
--- /dev/null
+++ b/chromium/media/cast/transport/rtcp/rtcp_builder.cc
@@ -0,0 +1,197 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/transport/rtcp/rtcp_builder.h"
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "base/big_endian.h"
+#include "base/logging.h"
+#include "media/cast/transport/cast_transport_defines.h"
+#include "media/cast/transport/pacing/paced_sender.h"
+
+namespace media {
+namespace cast {
+namespace transport {
+
+RtcpBuilder::RtcpBuilder(PacedSender* const outgoing_transport)
+ : transport_(outgoing_transport),
+ ssrc_(0) {
+}
+
+RtcpBuilder::~RtcpBuilder() {}
+
+void RtcpBuilder::SendRtcpFromRtpSender(
+ uint32 packet_type_flags,
+ const RtcpSenderInfo& sender_info,
+ const RtcpDlrrReportBlock& dlrr,
+ uint32 sending_ssrc,
+ const std::string& c_name) {
+ if (packet_type_flags & kRtcpRr ||
+ packet_type_flags & kRtcpPli ||
+ packet_type_flags & kRtcpRrtr ||
+ packet_type_flags & kRtcpCast ||
+ packet_type_flags & kRtcpReceiverLog ||
+ packet_type_flags & kRtcpRpsi ||
+ packet_type_flags & kRtcpRemb ||
+ packet_type_flags & kRtcpNack) {
+ NOTREACHED() << "Invalid argument";
+ }
+ ssrc_ = sending_ssrc;
+ c_name_ = c_name;
+ PacketRef packet(new base::RefCountedData<Packet>);
+ packet->data.reserve(kMaxIpPacketSize);
+ if (packet_type_flags & kRtcpSr) {
+ if (!BuildSR(sender_info, &packet->data)) return;
+ if (!BuildSdec(&packet->data)) return;
+ }
+ if (packet_type_flags & kRtcpBye) {
+ if (!BuildBye(&packet->data)) return;
+ }
+ if (packet_type_flags & kRtcpDlrr) {
+ if (!BuildDlrrRb(dlrr, &packet->data)) return;
+ }
+ if (packet->data.empty())
+ return; // Sanity - don't send empty packets.
+
+ transport_->SendRtcpPacket(ssrc_, packet);
+}
+
+bool RtcpBuilder::BuildSR(const RtcpSenderInfo& sender_info,
+ Packet* packet) const {
+ // Sender report.
+ size_t start_size = packet->size();
+ if (start_size + 52 > kMaxIpPacketSize) {
+ DLOG(FATAL) << "Not enough buffer space";
+ return false;
+ }
+
+ uint16 number_of_rows = 6;
+ packet->resize(start_size + 28);
+
+ base::BigEndianWriter big_endian_writer(
+ reinterpret_cast<char*>(&((*packet)[start_size])), 28);
+ big_endian_writer.WriteU8(0x80);
+ big_endian_writer.WriteU8(kPacketTypeSenderReport);
+ big_endian_writer.WriteU16(number_of_rows);
+ big_endian_writer.WriteU32(ssrc_);
+ big_endian_writer.WriteU32(sender_info.ntp_seconds);
+ big_endian_writer.WriteU32(sender_info.ntp_fraction);
+ big_endian_writer.WriteU32(sender_info.rtp_timestamp);
+ big_endian_writer.WriteU32(sender_info.send_packet_count);
+ big_endian_writer.WriteU32(static_cast<uint32>(sender_info.send_octet_count));
+ return true;
+}
+
+bool RtcpBuilder::BuildSdec(Packet* packet) const {
+ size_t start_size = packet->size();
+ if (start_size + 12 + c_name_.length() > kMaxIpPacketSize) {
+ DLOG(FATAL) << "Not enough buffer space";
+ return false;
+ }
+
+ // SDES Source Description.
+ packet->resize(start_size + 10);
+
+ base::BigEndianWriter big_endian_writer(
+ reinterpret_cast<char*>(&((*packet)[start_size])), 10);
+ // We always need to add one SDES CNAME.
+ big_endian_writer.WriteU8(0x80 + 1);
+ big_endian_writer.WriteU8(kPacketTypeSdes);
+
+ // Handle SDES length later on.
+ uint32 sdes_length_position = static_cast<uint32>(start_size) + 3;
+ big_endian_writer.WriteU16(0);
+ big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
+ big_endian_writer.WriteU8(1); // CNAME = 1
+ big_endian_writer.WriteU8(static_cast<uint8>(c_name_.length()));
+
+ size_t sdes_length = 10 + c_name_.length();
+ packet->insert(packet->end(), c_name_.c_str(),
+ c_name_.c_str() + c_name_.length());
+
+ size_t padding = 0;
+
+ // We must have a zero field even if we have an even multiple of 4 bytes.
+ if ((packet->size() % 4) == 0) {
+ padding++;
+ packet->push_back(0);
+ }
+ while ((packet->size() % 4) != 0) {
+ padding++;
+ packet->push_back(0);
+ }
+ sdes_length += padding;
+
+ // In 32-bit words minus one and we don't count the header.
+ uint8 buffer_length = static_cast<uint8>((sdes_length / 4) - 1);
+ (*packet)[sdes_length_position] = buffer_length;
+ return true;
+}
+
+bool RtcpBuilder::BuildBye(Packet* packet) const {
+ size_t start_size = packet->size();
+ if (start_size + 8 > kMaxIpPacketSize) {
+ DLOG(FATAL) << "Not enough buffer space";
+ return false;
+ }
+
+ packet->resize(start_size + 8);
+
+ base::BigEndianWriter big_endian_writer(
+ reinterpret_cast<char*>(&((*packet)[start_size])), 8);
+ big_endian_writer.WriteU8(0x80 + 1);
+ big_endian_writer.WriteU8(kPacketTypeBye);
+ big_endian_writer.WriteU16(1); // Length.
+ big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
+ return true;
+}
+
+/*
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |V=2|P|reserved | PT=XR=207 | length |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | SSRC |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | BT=5 | reserved | block length |
+ +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+ | SSRC_1 (SSRC of first receiver) | sub-
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ block
+ | last RR (LRR) | 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | delay since last RR (DLRR) |
+ +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+*/
+bool RtcpBuilder::BuildDlrrRb(const RtcpDlrrReportBlock& dlrr,
+ Packet* packet) const {
+ size_t start_size = packet->size();
+ if (start_size + 24 > kMaxIpPacketSize) {
+ DLOG(FATAL) << "Not enough buffer space";
+ return false;
+ }
+
+ packet->resize(start_size + 24);
+
+ base::BigEndianWriter big_endian_writer(
+ reinterpret_cast<char*>(&((*packet)[start_size])), 24);
+ big_endian_writer.WriteU8(0x80);
+ big_endian_writer.WriteU8(kPacketTypeXr);
+ big_endian_writer.WriteU16(5); // Length.
+ big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
+ big_endian_writer.WriteU8(5); // Add block type.
+ big_endian_writer.WriteU8(0); // Add reserved.
+ big_endian_writer.WriteU16(3); // Block length.
+ big_endian_writer.WriteU32(ssrc_); // Add the media (received RTP) SSRC.
+ big_endian_writer.WriteU32(dlrr.last_rr);
+ big_endian_writer.WriteU32(dlrr.delay_since_last_rr);
+ return true;
+}
+
+} // namespace transport
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/transport/rtcp/rtcp_builder.h b/chromium/media/cast/transport/rtcp/rtcp_builder.h
new file mode 100644
index 00000000000..f095ae9ee54
--- /dev/null
+++ b/chromium/media/cast/transport/rtcp/rtcp_builder.h
@@ -0,0 +1,49 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_TRANSPORT_RTCP_RTCP_BUILDER_H_
+#define MEDIA_CAST_TRANSPORT_RTCP_RTCP_BUILDER_H_
+
+#include <list>
+#include <string>
+#include <vector>
+
+#include "media/cast/transport/cast_transport_defines.h"
+#include "media/cast/transport/pacing/paced_sender.h"
+
+namespace media {
+namespace cast {
+namespace transport {
+
+class RtcpBuilder {
+ public:
+ explicit RtcpBuilder(PacedSender* const paced_packet_sender);
+
+ virtual ~RtcpBuilder();
+
+ void SendRtcpFromRtpSender(uint32 packet_type_flags,
+ const RtcpSenderInfo& sender_info,
+ const RtcpDlrrReportBlock& dlrr,
+ uint32 ssrc,
+ const std::string& c_name);
+
+ private:
+ bool BuildSR(const RtcpSenderInfo& sender_info, Packet* packet) const;
+ bool BuildSdec(Packet* packet) const;
+ bool BuildBye(Packet* packet) const;
+ bool BuildDlrrRb(const RtcpDlrrReportBlock& dlrr,
+ Packet* packet) const;
+
+ PacedSender* const transport_; // Not owned by this class.
+ uint32 ssrc_;
+ std::string c_name_;
+
+ DISALLOW_COPY_AND_ASSIGN(RtcpBuilder);
+};
+
+} // namespace transport
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_TRANSPORT_RTCP_RTCP_BUILDER_H_
diff --git a/chromium/media/cast/transport/rtcp/rtcp_builder_unittest.cc b/chromium/media/cast/transport/rtcp/rtcp_builder_unittest.cc
new file mode 100644
index 00000000000..0322612f27e
--- /dev/null
+++ b/chromium/media/cast/transport/rtcp/rtcp_builder_unittest.cc
@@ -0,0 +1,164 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/scoped_ptr.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/rtcp/rtcp_utility.h"
+#include "media/cast/rtcp/test_rtcp_packet_builder.h"
+#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "media/cast/transport/pacing/paced_sender.h"
+#include "media/cast/transport/rtcp/rtcp_builder.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+namespace {
+static const uint32 kSendingSsrc = 0x12345678;
+static const std::string kCName("test@10.1.1.1");
+} // namespace
+
+class TestRtcpTransport : public PacedPacketSender {
+ public:
+ TestRtcpTransport()
+ : expected_packet_length_(0),
+ packet_count_(0) {
+ }
+
+ virtual bool SendRtcpPacket(const Packet& packet) OVERRIDE {
+ EXPECT_EQ(expected_packet_length_, packet.size());
+ EXPECT_EQ(0, memcmp(expected_packet_, &(packet[0]), packet.size()));
+ packet_count_++;
+ return true;
+ }
+
+ virtual bool SendPackets(const PacketList& packets) OVERRIDE {
+ return false;
+ }
+
+ virtual bool ResendPackets(const PacketList& packets) OVERRIDE {
+ return false;
+ }
+
+ void SetExpectedRtcpPacket(const uint8* rtcp_buffer, size_t length) {
+ expected_packet_length_ = length;
+ memcpy(expected_packet_, rtcp_buffer, length);
+ }
+
+ int packet_count() const { return packet_count_; }
+
+ private:
+ uint8 expected_packet_[kMaxIpPacketSize];
+ size_t expected_packet_length_;
+ int packet_count_;
+};
+
+class RtcpBuilderTest : public ::testing::Test {
+ protected:
+ RtcpBuilderTest()
+ : task_runner_(new test::FakeSingleThreadTaskRunner(&testing_clock_)),
+ cast_environment_(new CastEnvironment(&testing_clock_, task_runner_,
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastSenderLoggingConfig())),
+ rtcp_builder_(new RtcpBuilder(&test_transport_, kSendingSsrc, kCName)) {
+ }
+
+ base::SimpleTestTickClock testing_clock_;
+ TestRtcpTransport test_transport_;
+ scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+ scoped_ptr<RtcpBuilder> rtcp_builder_;
+};
+
+TEST_F(RtcpBuilderTest, RtcpSenderReport) {
+ RtcpSenderInfo sender_info;
+ sender_info.ntp_seconds = kNtpHigh;
+ sender_info.ntp_fraction = kNtpLow;
+ sender_info.rtp_timestamp = kRtpTimestamp;
+ sender_info.send_packet_count = kSendPacketCount;
+ sender_info.send_octet_count = kSendOctetCount;
+
+ // Sender report + c_name.
+ TestRtcpPacketBuilder p;
+ p.AddSr(kSendingSsrc, 0);
+ p.AddSdesCname(kSendingSsrc, kCName);
+ test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
+
+ rtcp_builder_->SendRtcpFromRtpSender(RtcpBuilder::kRtcpSr,
+ &sender_info,
+ NULL,
+ NULL,
+ kSendingSsrc,
+ kCName);
+
+ EXPECT_EQ(1, test_transport_.packet_count());
+}
+
+TEST_F(RtcpBuilderTest, RtcpSenderReportWithDlrr) {
+ RtcpSenderInfo sender_info;
+ sender_info.ntp_seconds = kNtpHigh;
+ sender_info.ntp_fraction = kNtpLow;
+ sender_info.rtp_timestamp = kRtpTimestamp;
+ sender_info.send_packet_count = kSendPacketCount;
+ sender_info.send_octet_count = kSendOctetCount;
+
+ // Sender report + c_name + dlrr.
+ TestRtcpPacketBuilder p1;
+ p1.AddSr(kSendingSsrc, 0);
+ p1.AddSdesCname(kSendingSsrc, kCName);
+ p1.AddXrHeader(kSendingSsrc);
+ p1.AddXrDlrrBlock(kSendingSsrc);
+ test_transport_.SetExpectedRtcpPacket(p1.Packet(), p1.Length());
+
+ RtcpDlrrReportBlock dlrr_rb;
+ dlrr_rb.last_rr = kLastRr;
+ dlrr_rb.delay_since_last_rr = kDelayLastRr;
+
+ rtcp_builder_->SendRtcpFromRtpSender(
+ RtcpBuilder::kRtcpSr | RtcpBuilder::kRtcpDlrr,
+ &sender_info,
+ &dlrr_rb,
+ NULL,
+ kSendingSsrc,
+ kCName);
+
+ EXPECT_EQ(1, test_transport_.packet_count());
+}
+
+TEST_F(RtcpBuilderTest, RtcpSenderReportWithDlrr) {
+ RtcpSenderInfo sender_info;
+ sender_info.ntp_seconds = kNtpHigh;
+ sender_info.ntp_fraction = kNtpLow;
+ sender_info.rtp_timestamp = kRtpTimestamp;
+ sender_info.send_packet_count = kSendPacketCount;
+ sender_info.send_octet_count = kSendOctetCount;
+
+ // Sender report + c_name + dlrr + sender log.
+ TestRtcpPacketBuilder p;
+ p.AddSr(kSendingSsrc, 0);
+ p.AddSdesCname(kSendingSsrc, kCName);
+ p.AddXrHeader(kSendingSsrc);
+ p.AddXrDlrrBlock(kSendingSsrc);
+
+ test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
+
+ RtcpDlrrReportBlock dlrr_rb;
+ dlrr_rb.last_rr = kLastRr;
+ dlrr_rb.delay_since_last_rr = kDelayLastRr;
+
+ rtcp_builder_->SendRtcpFromRtpSender(
+ RtcpBuilder::kRtcpSr | RtcpBuilder::kRtcpDlrr |
+ RtcpBuilder::kRtcpSenderLog,
+ &sender_info,
+ &dlrr_rb,
+ kSendingSsrc,
+ kCName);
+
+ EXPECT_EQ(1, test_transport_.packet_count());
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/transport/rtp_sender/packet_storage/packet_storage.cc b/chromium/media/cast/transport/rtp_sender/packet_storage/packet_storage.cc
new file mode 100644
index 00000000000..a748baa27ab
--- /dev/null
+++ b/chromium/media/cast/transport/rtp_sender/packet_storage/packet_storage.cc
@@ -0,0 +1,65 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/transport/rtp_sender/packet_storage/packet_storage.h"
+
+#include <string>
+
+#include "base/logging.h"
+
+namespace media {
+namespace cast {
+namespace transport {
+
+PacketStorage::PacketStorage(size_t stored_frames)
+ : max_stored_frames_(stored_frames),
+ first_frame_id_in_list_(0),
+ last_frame_id_in_list_(0) {
+}
+
+PacketStorage::~PacketStorage() {
+}
+
+bool PacketStorage::IsValid() const {
+ return max_stored_frames_ > 0 &&
+ static_cast<int>(max_stored_frames_) <= kMaxUnackedFrames;
+}
+
+size_t PacketStorage::GetNumberOfStoredFrames() const {
+ return frames_.size();
+}
+
+void PacketStorage::StoreFrame(uint32 frame_id,
+ const SendPacketVector& packets) {
+ if (frames_.empty()) {
+ first_frame_id_in_list_ = frame_id;
+ } else {
+ // Make sure frame IDs are consecutive.
+ DCHECK_EQ(last_frame_id_in_list_ + 1, frame_id);
+ }
+
+ // Save new frame to the end of the list.
+ last_frame_id_in_list_ = frame_id;
+ frames_.push_back(packets);
+
+ // Evict the oldest frame if the list is too long.
+ if (frames_.size() > max_stored_frames_) {
+ frames_.pop_front();
+ ++first_frame_id_in_list_;
+ }
+}
+
+const SendPacketVector* PacketStorage::GetFrame8(uint8 frame_id_8bits) const {
+ // The requested frame ID has only 8-bits so convert the first frame ID
+ // in list to match.
+ uint8 index_8bits = first_frame_id_in_list_ & 0xFF;
+ index_8bits = frame_id_8bits - index_8bits;
+ if (index_8bits >= frames_.size())
+ return NULL;
+ return &(frames_[index_8bits]);
+}
+
+} // namespace transport
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/transport/rtp_sender/packet_storage/packet_storage.h b/chromium/media/cast/transport/rtp_sender/packet_storage/packet_storage.h
new file mode 100644
index 00000000000..037ead1edf6
--- /dev/null
+++ b/chromium/media/cast/transport/rtp_sender/packet_storage/packet_storage.h
@@ -0,0 +1,62 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_TRANSPORT_RTP_SENDER_PACKET_STORAGE_PACKET_STORAGE_H_
+#define MEDIA_CAST_TRANSPORT_RTP_SENDER_PACKET_STORAGE_PACKET_STORAGE_H_
+
+#include <deque>
+#include <list>
+#include <map>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/memory/linked_ptr.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/transport/cast_transport_defines.h"
+#include "media/cast/transport/pacing/paced_sender.h"
+
+namespace media {
+namespace cast {
+namespace transport {
+
+// Stores a list of frames. Each frame consists a list of packets.
+typedef std::deque<SendPacketVector> FrameQueue;
+
+class PacketStorage {
+ public:
+ explicit PacketStorage(size_t stored_frames);
+ virtual ~PacketStorage();
+
+ // Returns true if this class is configured correctly.
+ // (stored frames > 0 && stored_frames < kMaxStoredFrames)
+ bool IsValid() const;
+
+ // Store all of the packets for a frame.
+ void StoreFrame(uint32 frame_id, const SendPacketVector& packets);
+
+ // Returns a list of packets for a frame indexed by a 8-bits ID.
+ // It is the lowest 8 bits of a frame ID.
+ // Returns NULL if the frame cannot be found.
+ const SendPacketVector* GetFrame8(uint8 frame_id_8bits) const;
+
+ // Get the number of stored frames.
+ size_t GetNumberOfStoredFrames() const;
+
+ private:
+ const size_t max_stored_frames_;
+ FrameQueue frames_;
+ uint32 first_frame_id_in_list_;
+ uint32 last_frame_id_in_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(PacketStorage);
+};
+
+} // namespace transport
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_TRANSPORT_RTP_SENDER_PACKET_STORAGE_PACKET_STORAGE_H_
diff --git a/chromium/media/cast/transport/rtp_sender/packet_storage/packet_storage_unittest.cc b/chromium/media/cast/transport/rtp_sender/packet_storage/packet_storage_unittest.cc
new file mode 100644
index 00000000000..298942c80a5
--- /dev/null
+++ b/chromium/media/cast/transport/rtp_sender/packet_storage/packet_storage_unittest.cc
@@ -0,0 +1,115 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/transport/rtp_sender/packet_storage/packet_storage.h"
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "base/test/simple_test_tick_clock.h"
+#include "base/time/time.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+namespace transport {
+
+static size_t kStoredFrames = 10;
+
+// Generate |number_of_frames| and store into |*storage|.
+// First frame has 1 packet, second frame has 2 packets, etc.
+static void StoreFrames(size_t number_of_frames,
+ uint32 first_frame_id,
+ PacketStorage* storage) {
+ const base::TimeTicks kTicks;
+ const int kSsrc = 1;
+ for (size_t i = 0; i < number_of_frames; ++i) {
+ SendPacketVector packets;
+ // First frame has 1 packet, second frame has 2 packets, etc.
+ const size_t kNumberOfPackets = i + 1;
+ for (size_t j = 0; j < kNumberOfPackets; ++j) {
+ Packet test_packet(1, 0);
+ packets.push_back(
+ std::make_pair(
+ PacedPacketSender::MakePacketKey(kTicks, kSsrc, j),
+ new base::RefCountedData<Packet>(test_packet)));
+ }
+ storage->StoreFrame(first_frame_id, packets);
+ ++first_frame_id;
+ }
+}
+
+TEST(PacketStorageTest, NumberOfStoredFrames) {
+ PacketStorage storage(kStoredFrames);
+
+ uint32 frame_id = 0;
+ frame_id = ~frame_id; // The maximum value of uint32.
+ StoreFrames(200, frame_id, &storage);
+ EXPECT_EQ(kStoredFrames, storage.GetNumberOfStoredFrames());
+}
+
+TEST(PacketStorageTest, GetFrameWrapAround8bits) {
+ PacketStorage storage(kStoredFrames);
+
+ const uint32 kFirstFrameId = 250;
+ StoreFrames(kStoredFrames, kFirstFrameId, &storage);
+ EXPECT_EQ(kStoredFrames, storage.GetNumberOfStoredFrames());
+
+ // Expect we get the correct frames by looking at the number of
+ // packets.
+ uint32 frame_id = kFirstFrameId;
+ for (size_t i = 0; i < kStoredFrames; ++i) {
+ ASSERT_TRUE(storage.GetFrame8(frame_id));
+ EXPECT_EQ(i + 1, storage.GetFrame8(frame_id)->size());
+ ++frame_id;
+ }
+}
+
+TEST(PacketStorageTest, GetFrameWrapAround32bits) {
+ PacketStorage storage(kStoredFrames);
+
+ // First frame ID is close to the maximum value of uint32.
+ uint32 first_frame_id = 0xffffffff - 5;
+ StoreFrames(kStoredFrames, first_frame_id, &storage);
+ EXPECT_EQ(kStoredFrames, storage.GetNumberOfStoredFrames());
+
+ // Expect we get the correct frames by looking at the number of
+ // packets.
+ uint32 frame_id = first_frame_id;
+ for (size_t i = 0; i < kStoredFrames; ++i) {
+ ASSERT_TRUE(storage.GetFrame8(frame_id));
+ EXPECT_EQ(i + 1, storage.GetFrame8(frame_id)->size());
+ ++frame_id;
+ }
+}
+
+TEST(PacketStorageTest, GetFrameTooOld) {
+ PacketStorage storage(kStoredFrames);
+
+ // First frame ID is close to the maximum value of uint32.
+ uint32 first_frame_id = 0xffffffff - 5;
+
+ // Store two times the capacity.
+ StoreFrames(2 * kStoredFrames, first_frame_id, &storage);
+ EXPECT_EQ(kStoredFrames, storage.GetNumberOfStoredFrames());
+
+ uint32 frame_id = first_frame_id;
+ // Old frames are evicted.
+ for (size_t i = 0; i < kStoredFrames; ++i) {
+ EXPECT_FALSE(storage.GetFrame8(frame_id));
+ ++frame_id;
+ }
+ // Check recent frames are there.
+ for (size_t i = 0; i < kStoredFrames; ++i) {
+ ASSERT_TRUE(storage.GetFrame8(frame_id));
+ EXPECT_EQ(kStoredFrames + i + 1,
+ storage.GetFrame8(frame_id)->size());
+ ++frame_id;
+ }
+}
+
+} // namespace transport
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.cc b/chromium/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.cc
new file mode 100644
index 00000000000..d40f99f1446
--- /dev/null
+++ b/chromium/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.cc
@@ -0,0 +1,137 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h"
+
+#include "base/big_endian.h"
+#include "base/logging.h"
+#include "media/cast/transport/pacing/paced_sender.h"
+
+namespace media {
+namespace cast {
+namespace transport {
+
+static const uint16 kCommonRtpHeaderLength = 12;
+static const uint16 kCastRtpHeaderLength = 7;
+static const uint8 kCastKeyFrameBitMask = 0x80;
+static const uint8 kCastReferenceFrameIdBitMask = 0x40;
+static const uint8 kRtpMarkerBitMask = 0x80;
+
+RtpPacketizerConfig::RtpPacketizerConfig()
+ : audio(false),
+ payload_type(-1),
+ max_payload_length(kMaxIpPacketSize - 28), // Default is IP-v4/UDP.
+ sequence_number(0),
+ frequency(8000),
+ ssrc(0),
+ channels(0) {}
+
+RtpPacketizerConfig::~RtpPacketizerConfig() {}
+
+RtpPacketizer::RtpPacketizer(PacedSender* const transport,
+ PacketStorage* packet_storage,
+ RtpPacketizerConfig rtp_packetizer_config)
+ : config_(rtp_packetizer_config),
+ transport_(transport),
+ packet_storage_(packet_storage),
+ sequence_number_(config_.sequence_number),
+ rtp_timestamp_(0),
+ packet_id_(0),
+ send_packet_count_(0),
+ send_octet_count_(0) {
+ DCHECK(transport) << "Invalid argument";
+}
+
+RtpPacketizer::~RtpPacketizer() {}
+
+uint16 RtpPacketizer::NextSequenceNumber() {
+ ++sequence_number_;
+ return sequence_number_ - 1;
+}
+
+void RtpPacketizer::SendFrameAsPackets(const EncodedFrame& frame) {
+ uint16 rtp_header_length = kCommonRtpHeaderLength + kCastRtpHeaderLength;
+ uint16 max_length = config_.max_payload_length - rtp_header_length - 1;
+ rtp_timestamp_ = frame.rtp_timestamp;
+
+ // Split the payload evenly (round number up).
+ size_t num_packets = (frame.data.size() + max_length) / max_length;
+ size_t payload_length = (frame.data.size() + num_packets) / num_packets;
+ DCHECK_LE(payload_length, max_length) << "Invalid argument";
+
+ SendPacketVector packets;
+
+ size_t remaining_size = frame.data.size();
+ std::string::const_iterator data_iter = frame.data.begin();
+ while (remaining_size > 0) {
+ PacketRef packet(new base::RefCountedData<Packet>);
+
+ if (remaining_size < payload_length) {
+ payload_length = remaining_size;
+ }
+ remaining_size -= payload_length;
+ BuildCommonRTPheader(
+ &packet->data, remaining_size == 0, frame.rtp_timestamp);
+
+ // Build Cast header.
+ // TODO(miu): Should we always set the ref frame bit and the ref_frame_id?
+ DCHECK_NE(frame.dependency, EncodedFrame::UNKNOWN_DEPENDENCY);
+ packet->data.push_back(
+ ((frame.dependency == EncodedFrame::KEY) ? kCastKeyFrameBitMask : 0) |
+ kCastReferenceFrameIdBitMask);
+ packet->data.push_back(static_cast<uint8>(frame.frame_id));
+ size_t start_size = packet->data.size();
+ packet->data.resize(start_size + 4);
+ base::BigEndianWriter big_endian_writer(
+ reinterpret_cast<char*>(&(packet->data[start_size])), 4);
+ big_endian_writer.WriteU16(packet_id_);
+ big_endian_writer.WriteU16(static_cast<uint16>(num_packets - 1));
+ packet->data.push_back(static_cast<uint8>(frame.referenced_frame_id));
+
+ // Copy payload data.
+ packet->data.insert(packet->data.end(),
+ data_iter,
+ data_iter + payload_length);
+ data_iter += payload_length;
+
+ const PacketKey key =
+ PacedPacketSender::MakePacketKey(frame.reference_time,
+ config_.ssrc,
+ packet_id_++);
+ packets.push_back(make_pair(key, packet));
+
+ // Update stats.
+ ++send_packet_count_;
+ send_octet_count_ += payload_length;
+ }
+ DCHECK(packet_id_ == num_packets) << "Invalid state";
+
+ packet_storage_->StoreFrame(frame.frame_id, packets);
+
+ // Send to network.
+ transport_->SendPackets(packets);
+
+ // Prepare for next frame.
+ packet_id_ = 0;
+}
+
+void RtpPacketizer::BuildCommonRTPheader(Packet* packet,
+ bool marker_bit,
+ uint32 time_stamp) {
+ packet->push_back(0x80);
+ packet->push_back(static_cast<uint8>(config_.payload_type) |
+ (marker_bit ? kRtpMarkerBitMask : 0));
+ size_t start_size = packet->size();
+ packet->resize(start_size + 10);
+ base::BigEndianWriter big_endian_writer(
+ reinterpret_cast<char*>(&((*packet)[start_size])), 10);
+ big_endian_writer.WriteU16(sequence_number_);
+ big_endian_writer.WriteU32(time_stamp);
+ big_endian_writer.WriteU32(config_.ssrc);
+ ++sequence_number_;
+}
+
+} // namespace transport
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h b/chromium/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h
new file mode 100644
index 00000000000..ebdbf010183
--- /dev/null
+++ b/chromium/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h
@@ -0,0 +1,86 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_TRANSPORT_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_H_
+#define MEDIA_CAST_TRANSPORT_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_H_
+
+#include <cmath>
+#include <list>
+#include <map>
+
+#include "base/time/time.h"
+#include "media/cast/transport/rtp_sender/packet_storage/packet_storage.h"
+
+namespace base {
+class TickClock;
+}
+
+namespace media {
+namespace cast {
+
+namespace transport {
+
+class PacedSender;
+
+struct RtpPacketizerConfig {
+ RtpPacketizerConfig();
+ ~RtpPacketizerConfig();
+
+ // General.
+ bool audio;
+ int payload_type;
+ uint16 max_payload_length;
+ uint16 sequence_number;
+ int frequency;
+
+ // SSRC.
+ unsigned int ssrc;
+
+ // Video.
+ VideoCodec video_codec;
+
+ // Audio.
+ uint8 channels;
+ AudioCodec audio_codec;
+};
+
+// This object is only called from the main cast thread.
+// This class break encoded audio and video frames into packets and add an RTP
+// header to each packet.
+class RtpPacketizer {
+ public:
+ RtpPacketizer(PacedSender* const transport,
+ PacketStorage* packet_storage,
+ RtpPacketizerConfig rtp_packetizer_config);
+ ~RtpPacketizer();
+
+ void SendFrameAsPackets(const EncodedFrame& frame);
+
+ // Return the next sequence number, and increment by one. Enables unique
+ // incremental sequence numbers for every packet (including retransmissions).
+ uint16 NextSequenceNumber();
+
+ size_t send_packet_count() const { return send_packet_count_; }
+ size_t send_octet_count() const { return send_octet_count_; }
+
+ private:
+ void BuildCommonRTPheader(Packet* packet, bool marker_bit, uint32 time_stamp);
+
+ RtpPacketizerConfig config_;
+ PacedSender* const transport_; // Not owned by this class.
+ PacketStorage* packet_storage_;
+
+ uint16 sequence_number_;
+ uint32 rtp_timestamp_;
+ uint16 packet_id_;
+
+ size_t send_packet_count_;
+ size_t send_octet_count_;
+};
+
+} // namespace transport
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_TRANSPORT_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_H_
diff --git a/chromium/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc b/chromium/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
new file mode 100644
index 00000000000..64def4ce7fa
--- /dev/null
+++ b/chromium/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
@@ -0,0 +1,175 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h"
+
+#include <stdint.h>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "media/cast/logging/simple_event_subscriber.h"
+#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "media/cast/transport/pacing/paced_sender.h"
+#include "media/cast/transport/rtp_sender/packet_storage/packet_storage.h"
+#include "media/cast/transport/rtp_sender/rtp_packetizer/test/rtp_header_parser.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+namespace transport {
+
+namespace {
+static const int kPayload = 127;
+static const uint32 kTimestampMs = 10;
+static const uint16 kSeqNum = 33;
+static const int kMaxPacketLength = 1500;
+static const int kSsrc = 0x12345;
+static const unsigned int kFrameSize = 5000;
+static const uint32 kStartFrameId = UINT32_C(0xffffffff);
+}
+
+class TestRtpPacketTransport : public PacketSender {
+ public:
+ explicit TestRtpPacketTransport(RtpPacketizerConfig config)
+ : config_(config),
+ sequence_number_(kSeqNum),
+ packets_sent_(0),
+ expected_number_of_packets_(0),
+ expected_packet_id_(0),
+ expected_frame_id_(0),
+ expectd_rtp_timestamp_(0) {}
+
+ void VerifyRtpHeader(const RtpCastTestHeader& rtp_header) {
+ VerifyCommonRtpHeader(rtp_header);
+ VerifyCastRtpHeader(rtp_header);
+ }
+
+ void VerifyCommonRtpHeader(const RtpCastTestHeader& rtp_header) {
+ EXPECT_EQ(kPayload, rtp_header.payload_type);
+ EXPECT_EQ(sequence_number_, rtp_header.sequence_number);
+ EXPECT_EQ(expectd_rtp_timestamp_, rtp_header.rtp_timestamp);
+ EXPECT_EQ(config_.ssrc, rtp_header.ssrc);
+ EXPECT_EQ(0, rtp_header.num_csrcs);
+ }
+
+ void VerifyCastRtpHeader(const RtpCastTestHeader& rtp_header) {
+ EXPECT_FALSE(rtp_header.is_key_frame);
+ EXPECT_EQ(expected_frame_id_, rtp_header.frame_id);
+ EXPECT_EQ(expected_packet_id_, rtp_header.packet_id);
+ EXPECT_EQ(expected_number_of_packets_ - 1, rtp_header.max_packet_id);
+ EXPECT_TRUE(rtp_header.is_reference);
+ EXPECT_EQ(expected_frame_id_ - 1u, rtp_header.reference_frame_id);
+ }
+
+ virtual bool SendPacket(PacketRef packet, const base::Closure& cb) OVERRIDE {
+ ++packets_sent_;
+ RtpHeaderParser parser(packet->data.data(), packet->data.size());
+ RtpCastTestHeader rtp_header;
+ parser.Parse(&rtp_header);
+ VerifyRtpHeader(rtp_header);
+ ++sequence_number_;
+ ++expected_packet_id_;
+ return true;
+ }
+
+ size_t number_of_packets_received() const { return packets_sent_; }
+
+ void set_expected_number_of_packets(size_t expected_number_of_packets) {
+ expected_number_of_packets_ = expected_number_of_packets;
+ }
+
+ void set_rtp_timestamp(uint32 rtp_timestamp) {
+ expectd_rtp_timestamp_ = rtp_timestamp;
+ }
+
+ RtpPacketizerConfig config_;
+ uint32 sequence_number_;
+ size_t packets_sent_;
+ size_t number_of_packets_;
+ size_t expected_number_of_packets_;
+ // Assuming packets arrive in sequence.
+ int expected_packet_id_;
+ uint32 expected_frame_id_;
+ uint32 expectd_rtp_timestamp_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestRtpPacketTransport);
+};
+
+class RtpPacketizerTest : public ::testing::Test {
+ protected:
+ RtpPacketizerTest()
+ : task_runner_(new test::FakeSingleThreadTaskRunner(&testing_clock_)),
+ video_frame_(),
+ packet_storage_(200) {
+ config_.sequence_number = kSeqNum;
+ config_.ssrc = kSsrc;
+ config_.payload_type = kPayload;
+ config_.max_payload_length = kMaxPacketLength;
+ transport_.reset(new TestRtpPacketTransport(config_));
+ pacer_.reset(new PacedSender(
+ &testing_clock_, &logging_, transport_.get(), task_runner_));
+ pacer_->RegisterVideoSsrc(config_.ssrc);
+ rtp_packetizer_.reset(new RtpPacketizer(
+ pacer_.get(), &packet_storage_, config_));
+ video_frame_.dependency = EncodedFrame::DEPENDENT;
+ video_frame_.frame_id = 0;
+ video_frame_.referenced_frame_id = kStartFrameId;
+ video_frame_.data.assign(kFrameSize, 123);
+ video_frame_.rtp_timestamp =
+ GetVideoRtpTimestamp(testing_clock_.NowTicks());
+ }
+
+ void RunTasks(int during_ms) {
+ for (int i = 0; i < during_ms; ++i) {
+ // Call process the timers every 1 ms.
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(1));
+ task_runner_->RunTasks();
+ }
+ }
+
+ base::SimpleTestTickClock testing_clock_;
+ scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
+ EncodedFrame video_frame_;
+ PacketStorage packet_storage_;
+ RtpPacketizerConfig config_;
+ scoped_ptr<TestRtpPacketTransport> transport_;
+ LoggingImpl logging_;
+ scoped_ptr<PacedSender> pacer_;
+ scoped_ptr<RtpPacketizer> rtp_packetizer_;
+
+ DISALLOW_COPY_AND_ASSIGN(RtpPacketizerTest);
+};
+
+TEST_F(RtpPacketizerTest, SendStandardPackets) {
+ size_t expected_num_of_packets = kFrameSize / kMaxPacketLength + 1;
+ transport_->set_expected_number_of_packets(expected_num_of_packets);
+ transport_->set_rtp_timestamp(video_frame_.rtp_timestamp);
+
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(kTimestampMs));
+ video_frame_.reference_time = testing_clock_.NowTicks();
+ rtp_packetizer_->SendFrameAsPackets(video_frame_);
+ RunTasks(33 + 1);
+ EXPECT_EQ(expected_num_of_packets, transport_->number_of_packets_received());
+}
+
+TEST_F(RtpPacketizerTest, Stats) {
+ EXPECT_FALSE(rtp_packetizer_->send_packet_count());
+ EXPECT_FALSE(rtp_packetizer_->send_octet_count());
+ // Insert packets at varying lengths.
+ size_t expected_num_of_packets = kFrameSize / kMaxPacketLength + 1;
+ transport_->set_expected_number_of_packets(expected_num_of_packets);
+ transport_->set_rtp_timestamp(video_frame_.rtp_timestamp);
+
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(kTimestampMs));
+ video_frame_.reference_time = testing_clock_.NowTicks();
+ rtp_packetizer_->SendFrameAsPackets(video_frame_);
+ RunTasks(33 + 1);
+ EXPECT_EQ(expected_num_of_packets, rtp_packetizer_->send_packet_count());
+ EXPECT_EQ(kFrameSize, rtp_packetizer_->send_octet_count());
+ EXPECT_EQ(expected_num_of_packets, transport_->number_of_packets_received());
+}
+
+} // namespace transport
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/transport/rtp_sender/rtp_sender.cc b/chromium/media/cast/transport/rtp_sender/rtp_sender.cc
new file mode 100644
index 00000000000..b807b347576
--- /dev/null
+++ b/chromium/media/cast/transport/rtp_sender/rtp_sender.cc
@@ -0,0 +1,150 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/transport/rtp_sender/rtp_sender.h"
+
+#include "base/big_endian.h"
+#include "base/logging.h"
+#include "base/rand_util.h"
+#include "media/cast/transport/cast_transport_defines.h"
+#include "media/cast/transport/pacing/paced_sender.h"
+
+namespace media {
+namespace cast {
+namespace transport {
+
+namespace {
+
+// If there is only one referecne to the packet then copy the
+// reference and return.
+// Otherwise return a deep copy of the packet.
+PacketRef FastCopyPacket(const PacketRef& packet) {
+ if (packet->HasOneRef())
+ return packet;
+ return make_scoped_refptr(new base::RefCountedData<Packet>(packet->data));
+}
+
+} // namespace
+
+RtpSender::RtpSender(
+ base::TickClock* clock,
+ const scoped_refptr<base::SingleThreadTaskRunner>& transport_task_runner,
+ PacedSender* const transport)
+ : clock_(clock),
+ transport_(transport),
+ transport_task_runner_(transport_task_runner),
+ weak_factory_(this) {
+ // Randomly set sequence number start value.
+ config_.sequence_number = base::RandInt(0, 65535);
+}
+
+RtpSender::~RtpSender() {}
+
+bool RtpSender::InitializeAudio(const CastTransportAudioConfig& config) {
+ storage_.reset(new PacketStorage(config.rtp.max_outstanding_frames));
+ if (!storage_->IsValid()) {
+ return false;
+ }
+ config_.audio = true;
+ config_.ssrc = config.rtp.config.ssrc;
+ config_.payload_type = config.rtp.config.payload_type;
+ config_.frequency = config.frequency;
+ config_.audio_codec = config.codec;
+ packetizer_.reset(new RtpPacketizer(transport_, storage_.get(), config_));
+ return true;
+}
+
+bool RtpSender::InitializeVideo(const CastTransportVideoConfig& config) {
+ storage_.reset(new PacketStorage(config.rtp.max_outstanding_frames));
+ if (!storage_->IsValid()) {
+ return false;
+ }
+ config_.audio = false;
+ config_.ssrc = config.rtp.config.ssrc;
+ config_.payload_type = config.rtp.config.payload_type;
+ config_.frequency = kVideoFrequency;
+ config_.video_codec = config.codec;
+ packetizer_.reset(new RtpPacketizer(transport_, storage_.get(), config_));
+ return true;
+}
+
+void RtpSender::SendFrame(const EncodedFrame& frame) {
+ DCHECK(packetizer_);
+ packetizer_->SendFrameAsPackets(frame);
+}
+
+void RtpSender::ResendPackets(
+ const MissingFramesAndPacketsMap& missing_frames_and_packets,
+ bool cancel_rtx_if_not_in_list,
+ base::TimeDelta dedupe_window) {
+ DCHECK(storage_);
+ // Iterate over all frames in the list.
+ for (MissingFramesAndPacketsMap::const_iterator it =
+ missing_frames_and_packets.begin();
+ it != missing_frames_and_packets.end();
+ ++it) {
+ SendPacketVector packets_to_resend;
+ uint8 frame_id = it->first;
+ // Set of packets that the receiver wants us to re-send.
+ // If empty, we need to re-send all packets for this frame.
+ const PacketIdSet& missing_packet_set = it->second;
+
+ bool resend_all = missing_packet_set.find(kRtcpCastAllPacketsLost) !=
+ missing_packet_set.end();
+ bool resend_last = missing_packet_set.find(kRtcpCastLastPacket) !=
+ missing_packet_set.end();
+
+ const SendPacketVector* stored_packets = storage_->GetFrame8(frame_id);
+ if (!stored_packets)
+ continue;
+
+ for (SendPacketVector::const_iterator it = stored_packets->begin();
+ it != stored_packets->end(); ++it) {
+ const PacketKey& packet_key = it->first;
+ const uint16 packet_id = packet_key.second.second;
+
+ // Should we resend the packet?
+ bool resend = resend_all;
+
+ // Should we resend it because it's in the missing_packet_set?
+ if (!resend &&
+ missing_packet_set.find(packet_id) != missing_packet_set.end()) {
+ resend = true;
+ }
+
+ // If we were asked to resend the last packet, check if it's the
+ // last packet.
+ if (!resend && resend_last && (it + 1) == stored_packets->end()) {
+ resend = true;
+ }
+
+ if (resend) {
+ // Resend packet to the network.
+ VLOG(3) << "Resend " << static_cast<int>(frame_id) << ":"
+ << packet_id;
+ // Set a unique incremental sequence number for every packet.
+ PacketRef packet_copy = FastCopyPacket(it->second);
+ UpdateSequenceNumber(&packet_copy->data);
+ packets_to_resend.push_back(std::make_pair(packet_key, packet_copy));
+ } else if (cancel_rtx_if_not_in_list) {
+ transport_->CancelSendingPacket(it->first);
+ }
+ }
+ transport_->ResendPackets(packets_to_resend, dedupe_window);
+ }
+}
+
+void RtpSender::UpdateSequenceNumber(Packet* packet) {
+ // TODO(miu): This is an abstraction violation. This needs to be a part of
+ // the overall packet (de)serialization consolidation.
+ static const int kByteOffsetToSequenceNumber = 2;
+ base::BigEndianWriter big_endian_writer(
+ reinterpret_cast<char*>((&packet->front()) + kByteOffsetToSequenceNumber),
+ sizeof(uint16));
+ big_endian_writer.WriteU16(packetizer_->NextSequenceNumber());
+}
+
+} // namespace transport
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/transport/rtp_sender/rtp_sender.h b/chromium/media/cast/transport/rtp_sender/rtp_sender.h
new file mode 100644
index 00000000000..e65326abf16
--- /dev/null
+++ b/chromium/media/cast/transport/rtp_sender/rtp_sender.h
@@ -0,0 +1,85 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the interface to the cast RTP sender.
+
+#ifndef MEDIA_CAST_TRANSPORT_RTP_SENDER_RTP_SENDER_H_
+#define MEDIA_CAST_TRANSPORT_RTP_SENDER_RTP_SENDER_H_
+
+#include <map>
+#include <set>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "base/memory/weak_ptr.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/transport/cast_transport_defines.h"
+#include "media/cast/transport/cast_transport_sender.h"
+#include "media/cast/transport/pacing/paced_sender.h"
+#include "media/cast/transport/rtp_sender/packet_storage/packet_storage.h"
+#include "media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h"
+
+namespace media {
+namespace cast {
+
+namespace transport {
+
+// This object is only called from the main cast thread.
+// This class handles splitting encoded audio and video frames into packets and
+// add an RTP header to each packet. The sent packets are stored until they are
+// acknowledged by the remote peer or timed out.
+class RtpSender {
+ public:
+ RtpSender(
+ base::TickClock* clock,
+ const scoped_refptr<base::SingleThreadTaskRunner>& transport_task_runner,
+ PacedSender* const transport);
+
+ ~RtpSender();
+
+ // Initialize audio stack. Audio must be initialized prior to sending encoded
+ // audio frames. Returns false if configuration is invalid.
+ bool InitializeAudio(const CastTransportAudioConfig& config);
+
+ // Initialize video stack. Video must be initialized prior to sending encoded
+ // video frames. Returns false if configuration is invalid.
+ bool InitializeVideo(const CastTransportVideoConfig& config);
+
+ void SendFrame(const EncodedFrame& frame);
+
+ void ResendPackets(const MissingFramesAndPacketsMap& missing_packets,
+ bool cancel_rtx_if_not_in_list,
+ base::TimeDelta dedupe_window);
+
+ size_t send_packet_count() const {
+ return packetizer_ ? packetizer_->send_packet_count() : 0;
+ }
+ size_t send_octet_count() const {
+ return packetizer_ ? packetizer_->send_octet_count() : 0;
+ }
+ uint32 ssrc() const { return config_.ssrc; }
+
+ private:
+ void UpdateSequenceNumber(Packet* packet);
+
+ base::TickClock* clock_; // Not owned by this class.
+ RtpPacketizerConfig config_;
+ scoped_ptr<RtpPacketizer> packetizer_;
+ scoped_ptr<PacketStorage> storage_;
+ PacedSender* const transport_;
+ scoped_refptr<base::SingleThreadTaskRunner> transport_task_runner_;
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<RtpSender> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(RtpSender);
+};
+
+} // namespace transport
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_TRANSPORT_RTP_SENDER_RTP_SENDER_H_
diff --git a/chromium/media/cast/transport/transport/udp_transport.cc b/chromium/media/cast/transport/transport/udp_transport.cc
new file mode 100644
index 00000000000..9669b17d438
--- /dev/null
+++ b/chromium/media/cast/transport/transport/udp_transport.cc
@@ -0,0 +1,242 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/transport/transport/udp_transport.h"
+
+#include <algorithm>
+#include <string>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/rand_util.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/base/rand_callback.h"
+
+namespace media {
+namespace cast {
+namespace transport {
+
+namespace {
+const int kMaxPacketSize = 1500;
+
+bool IsEmpty(const net::IPEndPoint& addr) {
+ net::IPAddressNumber empty_addr(addr.address().size());
+ return std::equal(
+ empty_addr.begin(), empty_addr.end(), addr.address().begin()) &&
+ !addr.port();
+}
+
+bool IsEqual(const net::IPEndPoint& addr1, const net::IPEndPoint& addr2) {
+ return addr1.port() == addr2.port() && std::equal(addr1.address().begin(),
+ addr1.address().end(),
+ addr2.address().begin());
+}
+} // namespace
+
+UdpTransport::UdpTransport(
+ net::NetLog* net_log,
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_thread_proxy,
+ const net::IPEndPoint& local_end_point,
+ const net::IPEndPoint& remote_end_point,
+ const CastTransportStatusCallback& status_callback)
+ : io_thread_proxy_(io_thread_proxy),
+ local_addr_(local_end_point),
+ remote_addr_(remote_end_point),
+ udp_socket_(new net::UDPSocket(net::DatagramSocket::DEFAULT_BIND,
+ net::RandIntCallback(),
+ net_log,
+ net::NetLog::Source())),
+ send_pending_(false),
+ receive_pending_(false),
+ client_connected_(false),
+ next_dscp_value_(net::DSCP_NO_CHANGE),
+ status_callback_(status_callback),
+ weak_factory_(this) {
+ DCHECK(!IsEmpty(local_end_point) || !IsEmpty(remote_end_point));
+}
+
+UdpTransport::~UdpTransport() {}
+
+void UdpTransport::StartReceiving(
+ const PacketReceiverCallback& packet_receiver) {
+ DCHECK(io_thread_proxy_->RunsTasksOnCurrentThread());
+
+ packet_receiver_ = packet_receiver;
+ udp_socket_->AllowAddressReuse();
+ udp_socket_->SetMulticastLoopbackMode(true);
+ if (!IsEmpty(local_addr_)) {
+ if (udp_socket_->Bind(local_addr_) < 0) {
+ status_callback_.Run(TRANSPORT_SOCKET_ERROR);
+ LOG(ERROR) << "Failed to bind local address.";
+ return;
+ }
+ } else if (!IsEmpty(remote_addr_)) {
+ if (udp_socket_->Connect(remote_addr_) < 0) {
+ status_callback_.Run(TRANSPORT_SOCKET_ERROR);
+ LOG(ERROR) << "Failed to connect to remote address.";
+ return;
+ }
+ client_connected_ = true;
+ } else {
+ NOTREACHED() << "Either local or remote address has to be defined.";
+ }
+
+ ScheduleReceiveNextPacket();
+}
+
+void UdpTransport::SetDscp(net::DiffServCodePoint dscp) {
+ DCHECK(io_thread_proxy_->RunsTasksOnCurrentThread());
+ next_dscp_value_ = dscp;
+}
+
+void UdpTransport::ScheduleReceiveNextPacket() {
+ DCHECK(io_thread_proxy_->RunsTasksOnCurrentThread());
+ if (!packet_receiver_.is_null() && !receive_pending_) {
+ receive_pending_ = true;
+ io_thread_proxy_->PostTask(FROM_HERE,
+ base::Bind(&UdpTransport::ReceiveNextPacket,
+ weak_factory_.GetWeakPtr(),
+ net::ERR_IO_PENDING));
+ }
+}
+
+void UdpTransport::ReceiveNextPacket(int length_or_status) {
+ DCHECK(io_thread_proxy_->RunsTasksOnCurrentThread());
+
+ // Loop while UdpSocket is delivering data synchronously. When it responds
+ // with a "pending" status, break and expect this method to be called back in
+ // the future when a packet is ready.
+ while (true) {
+ if (length_or_status == net::ERR_IO_PENDING) {
+ next_packet_.reset(new Packet(kMaxPacketSize));
+ recv_buf_ = new net::WrappedIOBuffer(
+ reinterpret_cast<char*>(&next_packet_->front()));
+ length_or_status = udp_socket_->RecvFrom(
+ recv_buf_,
+ kMaxPacketSize,
+ &recv_addr_,
+ base::Bind(&UdpTransport::ReceiveNextPacket,
+ weak_factory_.GetWeakPtr()));
+ if (length_or_status == net::ERR_IO_PENDING) {
+ receive_pending_ = true;
+ return;
+ }
+ }
+
+ // Note: At this point, either a packet is ready or an error has occurred.
+ if (length_or_status < 0) {
+ VLOG(1) << "Failed to receive packet: Status code is "
+ << length_or_status;
+ status_callback_.Run(TRANSPORT_SOCKET_ERROR);
+ receive_pending_ = false;
+ return;
+ }
+
+ // Confirm the packet has come from the expected remote address; otherwise,
+ // ignore it. If this is the first packet being received and no remote
+ // address has been set, set the remote address and expect all future
+ // packets to come from the same one.
+ // TODO(hubbe): We should only do this if the caller used a valid ssrc.
+ if (IsEmpty(remote_addr_)) {
+ remote_addr_ = recv_addr_;
+ VLOG(1) << "Setting remote address from first received packet: "
+ << remote_addr_.ToString();
+ } else if (!IsEqual(remote_addr_, recv_addr_)) {
+ VLOG(1) << "Ignoring packet received from an unrecognized address: "
+ << recv_addr_.ToString() << ".";
+ length_or_status = net::ERR_IO_PENDING;
+ continue;
+ }
+
+ next_packet_->resize(length_or_status);
+ packet_receiver_.Run(next_packet_.Pass());
+ length_or_status = net::ERR_IO_PENDING;
+ }
+}
+
+bool UdpTransport::SendPacket(PacketRef packet, const base::Closure& cb) {
+ DCHECK(io_thread_proxy_->RunsTasksOnCurrentThread());
+
+ DCHECK(!send_pending_);
+ if (send_pending_) {
+ VLOG(1) << "Cannot send because of pending IO.";
+ return true;
+ }
+
+ if (next_dscp_value_ != net::DSCP_NO_CHANGE) {
+ int result = udp_socket_->SetDiffServCodePoint(next_dscp_value_);
+ if (result != net::OK) {
+ LOG(ERROR) << "Unable to set DSCP: " << next_dscp_value_
+ << " to socket; Error: " << result;
+ }
+ // Don't change DSCP in next send.
+ next_dscp_value_ = net::DSCP_NO_CHANGE;
+ }
+
+ scoped_refptr<net::IOBuffer> buf =
+ new net::WrappedIOBuffer(reinterpret_cast<char*>(&packet->data.front()));
+
+ int result;
+ base::Callback<void(int)> callback = base::Bind(&UdpTransport::OnSent,
+ weak_factory_.GetWeakPtr(),
+ buf,
+ packet,
+ cb);
+ if (client_connected_) {
+ // If we called Connect() before we must call Write() instead of
+ // SendTo(). Otherwise on some platforms we might get
+ // ERR_SOCKET_IS_CONNECTED.
+ result = udp_socket_->Write(buf,
+ static_cast<int>(packet->data.size()),
+ callback);
+ } else if (!IsEmpty(remote_addr_)) {
+ result = udp_socket_->SendTo(buf,
+ static_cast<int>(packet->data.size()),
+ remote_addr_,
+ callback);
+ } else {
+ return true;
+ }
+
+ if (result == net::ERR_IO_PENDING) {
+ send_pending_ = true;
+ return false;
+ } else if (result < 0) {
+ LOG(ERROR) << "Failed to send packet: " << result << ".";
+ status_callback_.Run(TRANSPORT_SOCKET_ERROR);
+ return true;
+ } else {
+ // Successful send, re-start reading if needed.
+ ScheduleReceiveNextPacket();
+ return true;
+ }
+}
+
+void UdpTransport::OnSent(const scoped_refptr<net::IOBuffer>& buf,
+ PacketRef packet,
+ const base::Closure& cb,
+ int result) {
+ DCHECK(io_thread_proxy_->RunsTasksOnCurrentThread());
+
+ send_pending_ = false;
+ if (result < 0) {
+ LOG(ERROR) << "Failed to send packet: " << result << ".";
+ status_callback_.Run(TRANSPORT_SOCKET_ERROR);
+ } else {
+ // Successful send, re-start reading if needed.
+ ScheduleReceiveNextPacket();
+ }
+
+ if (!cb.is_null()) {
+ cb.Run();
+ }
+}
+
+} // namespace transport
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/transport/transport/udp_transport.h b/chromium/media/cast/transport/transport/udp_transport.h
new file mode 100644
index 00000000000..1a568501d5f
--- /dev/null
+++ b/chromium/media/cast/transport/transport/udp_transport.h
@@ -0,0 +1,97 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_TRANSPORT_TRANSPORT_UDP_TRANSPORT_H_
+#define MEDIA_CAST_TRANSPORT_TRANSPORT_UDP_TRANSPORT_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/transport/cast_transport_sender.h"
+#include "net/base/ip_endpoint.h"
+#include "net/base/net_util.h"
+#include "net/udp/udp_socket.h"
+
+namespace net {
+class IOBuffer;
+class IPEndPoint;
+class NetLog;
+} // namespace net
+
+namespace media {
+namespace cast {
+namespace transport {
+
+// This class implements UDP transport mechanism for Cast.
+class UdpTransport : public PacketSender {
+ public:
+ // Construct a UDP transport.
+ // All methods must be called on |io_thread_proxy|.
+ // |local_end_point| specifies the address and port to bind and listen
+ // to incoming packets. If the value is 0.0.0.0:0 then a bind is not
+ // performed.
+ // |remote_end_point| specifies the address and port to send packets
+ // to. If the value is 0.0.0.0:0 the the end point is set to the source
+ // address of the first packet received.
+ UdpTransport(
+ net::NetLog* net_log,
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_thread_proxy,
+ const net::IPEndPoint& local_end_point,
+ const net::IPEndPoint& remote_end_point,
+ const CastTransportStatusCallback& status_callback);
+ virtual ~UdpTransport();
+
+ // Start receiving packets. Packets are submitted to |packet_receiver|.
+ void StartReceiving(const PacketReceiverCallback& packet_receiver);
+
+ // Set a new DSCP value to the socket. The value will be set right before
+ // the next send.
+ void SetDscp(net::DiffServCodePoint dscp);
+
+ // PacketSender implementations.
+ virtual bool SendPacket(PacketRef packet,
+ const base::Closure& cb) OVERRIDE;
+
+ private:
+ // Requests and processes packets from |udp_socket_|. This method is called
+ // once with |length_or_status| set to net::ERR_IO_PENDING to start receiving
+ // packets. Thereafter, it is called with some other value as the callback
+ // response from UdpSocket::RecvFrom().
+ void ReceiveNextPacket(int length_or_status);
+
+ // Schedule packet receiving, if needed.
+ void ScheduleReceiveNextPacket();
+
+ void OnSent(const scoped_refptr<net::IOBuffer>& buf,
+ PacketRef packet,
+ const base::Closure& cb,
+ int result);
+
+ const scoped_refptr<base::SingleThreadTaskRunner> io_thread_proxy_;
+ const net::IPEndPoint local_addr_;
+ net::IPEndPoint remote_addr_;
+ const scoped_ptr<net::UDPSocket> udp_socket_;
+ bool send_pending_;
+ bool receive_pending_;
+ bool client_connected_;
+ net::DiffServCodePoint next_dscp_value_;
+ scoped_ptr<Packet> next_packet_;
+ scoped_refptr<net::WrappedIOBuffer> recv_buf_;
+ net::IPEndPoint recv_addr_;
+ PacketReceiverCallback packet_receiver_;
+ const CastTransportStatusCallback status_callback_;
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<UdpTransport> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(UdpTransport);
+};
+
+} // namespace transport
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_TRANSPORT_TRANSPORT_UDP_TRANSPORT_H_
diff --git a/chromium/media/cast/transport/transport/udp_transport_unittest.cc b/chromium/media/cast/transport/transport/udp_transport_unittest.cc
new file mode 100644
index 00000000000..26879492f05
--- /dev/null
+++ b/chromium/media/cast/transport/transport/udp_transport_unittest.cc
@@ -0,0 +1,100 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/transport/transport/udp_transport.h"
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "media/cast/test/utility/net_utility.h"
+#include "media/cast/transport/cast_transport_config.h"
+#include "net/base/net_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace cast {
+namespace transport {
+
+class MockPacketReceiver {
+ public:
+ MockPacketReceiver(const base::Closure& callback)
+ : packet_callback_(callback) {}
+
+ void ReceivedPacket(scoped_ptr<Packet> packet) {
+ packet_ = std::string(packet->size(), '\0');
+ std::copy(packet->begin(), packet->end(), packet_.begin());
+ packet_callback_.Run();
+ }
+
+ std::string packet() const { return packet_; }
+ transport::PacketReceiverCallback packet_receiver() {
+ return base::Bind(&MockPacketReceiver::ReceivedPacket,
+ base::Unretained(this));
+ }
+
+ private:
+ std::string packet_;
+ base::Closure packet_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockPacketReceiver);
+};
+
+void SendPacket(UdpTransport* transport, Packet packet) {
+ base::Closure cb;
+ transport->SendPacket(new base::RefCountedData<Packet>(packet), cb);
+}
+
+static void UpdateCastTransportStatus(transport::CastTransportStatus status) {
+ NOTREACHED();
+}
+
+TEST(UdpTransport, SendAndReceive) {
+ base::MessageLoopForIO message_loop;
+
+ net::IPEndPoint free_local_port1 = test::GetFreeLocalPort();
+ net::IPEndPoint free_local_port2 = test::GetFreeLocalPort();
+ net::IPAddressNumber empty_addr_number;
+ net::ParseIPLiteralToNumber("0.0.0.0", &empty_addr_number);
+
+ UdpTransport send_transport(NULL,
+ message_loop.message_loop_proxy(),
+ free_local_port1,
+ free_local_port2,
+ base::Bind(&UpdateCastTransportStatus));
+ UdpTransport recv_transport(NULL,
+ message_loop.message_loop_proxy(),
+ free_local_port2,
+ net::IPEndPoint(empty_addr_number, 0),
+ base::Bind(&UpdateCastTransportStatus));
+
+ Packet packet;
+ packet.push_back('t');
+ packet.push_back('e');
+ packet.push_back('s');
+ packet.push_back('t');
+
+ base::RunLoop run_loop;
+ MockPacketReceiver receiver1(run_loop.QuitClosure());
+ MockPacketReceiver receiver2(
+ base::Bind(&SendPacket, &recv_transport, packet));
+ send_transport.StartReceiving(receiver1.packet_receiver());
+ recv_transport.StartReceiving(receiver2.packet_receiver());
+
+ base::Closure cb;
+ send_transport.SendPacket(new base::RefCountedData<Packet>(packet), cb);
+ run_loop.Run();
+ EXPECT_TRUE(
+ std::equal(packet.begin(), packet.end(), receiver1.packet().begin()));
+ EXPECT_TRUE(
+ std::equal(packet.begin(), packet.end(), receiver2.packet().begin()));
+}
+
+} // namespace transport
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/transport/utility/transport_encryption_handler.cc b/chromium/media/cast/transport/utility/transport_encryption_handler.cc
new file mode 100644
index 00000000000..89db2cf95b3
--- /dev/null
+++ b/chromium/media/cast/transport/utility/transport_encryption_handler.cc
@@ -0,0 +1,76 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/transport/utility/transport_encryption_handler.h"
+
+#include "base/logging.h"
+#include "crypto/encryptor.h"
+#include "crypto/symmetric_key.h"
+#include "media/cast/transport/cast_transport_defines.h"
+
+namespace media {
+namespace cast {
+namespace transport {
+
+TransportEncryptionHandler::TransportEncryptionHandler()
+ : key_(), encryptor_(), iv_mask_(), initialized_(false) {}
+
+TransportEncryptionHandler::~TransportEncryptionHandler() {}
+
+bool TransportEncryptionHandler::Initialize(std::string aes_key,
+ std::string aes_iv_mask) {
+ initialized_ = false;
+ if (aes_iv_mask.size() == kAesKeySize && aes_key.size() == kAesKeySize) {
+ iv_mask_ = aes_iv_mask;
+ key_.reset(
+ crypto::SymmetricKey::Import(crypto::SymmetricKey::AES, aes_key));
+ encryptor_.reset(new crypto::Encryptor());
+ encryptor_->Init(key_.get(), crypto::Encryptor::CTR, std::string());
+ initialized_ = true;
+ } else if (aes_iv_mask.size() != 0 || aes_key.size() != 0) {
+ DCHECK_EQ(aes_iv_mask.size(), 0u)
+ << "Invalid Crypto configuration: aes_iv_mask.size";
+ DCHECK_EQ(aes_key.size(), 0u)
+ << "Invalid Crypto configuration: aes_key.size";
+ return false;
+ }
+ return true;
+}
+
+bool TransportEncryptionHandler::Encrypt(uint32 frame_id,
+ const base::StringPiece& data,
+ std::string* encrypted_data) {
+ if (!initialized_)
+ return false;
+ if (!encryptor_->SetCounter(GetAesNonce(frame_id, iv_mask_))) {
+ NOTREACHED() << "Failed to set counter";
+ return false;
+ }
+ if (!encryptor_->Encrypt(data, encrypted_data)) {
+ NOTREACHED() << "Encrypt error";
+ return false;
+ }
+ return true;
+}
+
+bool TransportEncryptionHandler::Decrypt(uint32 frame_id,
+ const base::StringPiece& ciphertext,
+ std::string* plaintext) {
+ if (!initialized_) {
+ return false;
+ }
+ if (!encryptor_->SetCounter(transport::GetAesNonce(frame_id, iv_mask_))) {
+ NOTREACHED() << "Failed to set counter";
+ return false;
+ }
+ if (!encryptor_->Decrypt(ciphertext, plaintext)) {
+ VLOG(1) << "Decryption error";
+ return false;
+ }
+ return true;
+}
+
+} // namespace transport
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/transport/utility/transport_encryption_handler.h b/chromium/media/cast/transport/utility/transport_encryption_handler.h
new file mode 100644
index 00000000000..06d0e3f34d6
--- /dev/null
+++ b/chromium/media/cast/transport/utility/transport_encryption_handler.h
@@ -0,0 +1,58 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_TRANSPORT_TRANSPORT_UTILITY_ENCRYPTION_HANDLER_H_
+#define MEDIA_CAST_TRANSPORT_TRANSPORT_UTILITY_ENCRYPTION_HANDLER_H_
+
+// Helper class to handle encryption for the Cast Transport library.
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_piece.h"
+#include "base/threading/non_thread_safe.h"
+
+namespace crypto {
+class Encryptor;
+class SymmetricKey;
+}
+
+namespace media {
+namespace cast {
+namespace transport {
+
+class TransportEncryptionHandler : public base::NonThreadSafe {
+ public:
+ TransportEncryptionHandler();
+ ~TransportEncryptionHandler();
+
+ bool Initialize(std::string aes_key, std::string aes_iv_mask);
+
+ bool Encrypt(uint32 frame_id,
+ const base::StringPiece& data,
+ std::string* encrypted_data);
+
+ bool Decrypt(uint32 frame_id,
+ const base::StringPiece& ciphertext,
+ std::string* plaintext);
+
+ // TODO(miu): This naming is very misleading. It should be called
+ // is_activated() since Initialize() without keys (i.e., cypto is disabled)
+ // may have succeeded.
+ bool initialized() const { return initialized_; }
+
+ private:
+ scoped_ptr<crypto::SymmetricKey> key_;
+ scoped_ptr<crypto::Encryptor> encryptor_;
+ std::string iv_mask_;
+ bool initialized_;
+
+ DISALLOW_COPY_AND_ASSIGN(TransportEncryptionHandler);
+};
+
+} // namespace transport
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_TRANSPORT_TRANSPORT_UTILITY_ENCRYPTION_HANDLER_H_
diff --git a/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc b/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc
deleted file mode 100644
index 10fcb85d36e..00000000000
--- a/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/video_receiver/codecs/vp8/vp8_decoder.h"
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
-#include "media/base/video_frame.h"
-#include "media/base/video_util.h"
-#include "third_party/libvpx/source/libvpx/vpx/vp8dx.h"
-#include "ui/gfx/size.h"
-
-namespace media {
-namespace cast {
-
-void LogFrameDecodedEvent(CastEnvironment* const cast_environment,
- uint32 frame_id) {
-// TODO(mikhal): Sort out passing of rtp_timestamp.
-// cast_environment->Logging()->InsertFrameEvent(kVideoFrameDecoded,
-// 0, frame_id);
-}
-
-Vp8Decoder::Vp8Decoder(scoped_refptr<CastEnvironment> cast_environment)
- : decoder_(new vpx_dec_ctx_t()),
- cast_environment_(cast_environment) {
- // Make sure that we initialize the decoder from the correct thread.
- cast_environment_->PostTask(CastEnvironment::VIDEO_DECODER, FROM_HERE,
- base::Bind(&Vp8Decoder::InitDecoder, base::Unretained(this)));
-}
-
-Vp8Decoder::~Vp8Decoder() {}
-
-void Vp8Decoder::InitDecoder() {
- vpx_codec_dec_cfg_t cfg;
- // Initializing to use one core.
- cfg.threads = 1;
- vpx_codec_flags_t flags = VPX_CODEC_USE_POSTPROC;
-
- if (vpx_codec_dec_init(decoder_.get(), vpx_codec_vp8_dx(), &cfg, flags)) {
- DCHECK(false) << "VP8 decode error";
- }
-}
-
-bool Vp8Decoder::Decode(const EncodedVideoFrame* encoded_frame,
- const base::TimeTicks render_time,
- const VideoFrameDecodedCallback& frame_decoded_cb) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::VIDEO_DECODER));
- const int frame_id_int = static_cast<int>(encoded_frame->frame_id);
- VLOG(1) << "VP8 decode frame:" << frame_id_int
- << " sized:" << encoded_frame->data.size();
-
- if (encoded_frame->data.empty()) return false;
-
- vpx_codec_iter_t iter = NULL;
- vpx_image_t* img;
- if (vpx_codec_decode(
- decoder_.get(),
- reinterpret_cast<const uint8*>(encoded_frame->data.data()),
- static_cast<unsigned int>(encoded_frame->data.size()),
- 0,
- 1 /* real time*/)) {
- VLOG(1) << "Failed to decode VP8 frame.";
- return false;
- }
-
- img = vpx_codec_get_frame(decoder_.get(), &iter);
- if (img == NULL) {
- VLOG(1) << "Skip rendering VP8 frame:" << frame_id_int;
- return false;
- }
-
- gfx::Size visible_size(img->d_w, img->d_h);
- gfx::Size full_size(img->stride[VPX_PLANE_Y], img->d_h);
- DCHECK(VideoFrame::IsValidConfig(VideoFrame::I420, visible_size,
- gfx::Rect(visible_size), full_size));
- // Temp timing setting - will sort out timing in a follow up cl.
- scoped_refptr<VideoFrame> decoded_frame =
- VideoFrame::CreateFrame(VideoFrame::I420, visible_size,
- gfx::Rect(visible_size), full_size, base::TimeDelta());
-
- // Copy each plane individually (need to account for stride).
- // TODO(mikhal): Eliminate copy once http://crbug.com/321856 is resolved.
- CopyPlane(VideoFrame::kYPlane, img->planes[VPX_PLANE_Y],
- img->stride[VPX_PLANE_Y], img->d_h, decoded_frame.get());
- CopyPlane(VideoFrame::kUPlane, img->planes[VPX_PLANE_U],
- img->stride[VPX_PLANE_U], (img->d_h + 1) / 2, decoded_frame.get());
- CopyPlane(VideoFrame::kVPlane, img->planes[VPX_PLANE_V],
- img->stride[VPX_PLANE_V], (img->d_h + 1) / 2, decoded_frame.get());
-
- // Log:: Decoding complete (should be called from the main thread).
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, base::Bind(
- LogFrameDecodedEvent, cast_environment_,encoded_frame->frame_id));
-
- VLOG(1) << "Decoded frame " << frame_id_int;
- // Frame decoded - return frame to the user via callback.
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(frame_decoded_cb, decoded_frame, render_time));
-
- return true;
-}
-
-} // namespace cast
-} // namespace media
-
diff --git a/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp b/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp
deleted file mode 100644
index 4bc9434d2d2..00000000000
--- a/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of the source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'targets': [
- {
- 'target_name': 'cast_vp8_decoder',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- '<(DEPTH)/third_party/',
- '<(DEPTH)/third_party/libvpx/',
- ],
- 'sources': [
- 'vp8_decoder.cc',
- 'vp8_decoder.h',
- ], # source
- 'dependencies': [
- '<(DEPTH)/base/base.gyp:base',
- '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
- ],
- },
- ],
-}
diff --git a/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.h b/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.h
deleted file mode 100644
index 6a93c41abc9..00000000000
--- a/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_RTP_RECEVIER_CODECS_VP8_VP8_DECODER_H_
-#define MEDIA_CAST_RTP_RECEVIER_CODECS_VP8_VP8_DECODER_H_
-
-#include "base/memory/scoped_ptr.h"
-#include "base/threading/non_thread_safe.h"
-#include "media/cast/cast_config.h"
-#include "media/cast/cast_environment.h"
-#include "media/cast/cast_receiver.h"
-#include "third_party/libvpx/source/libvpx/vpx/vpx_decoder.h"
-
-typedef struct vpx_codec_ctx vpx_dec_ctx_t;
-
-// TODO(mikhal): Look into reusing VpxVideoDecoder.
-namespace media {
-namespace cast {
-
-// This class is not thread safe; it's only called from the cast video decoder
-// thread.
-class Vp8Decoder : public base::NonThreadSafe {
- public:
- explicit Vp8Decoder(scoped_refptr<CastEnvironment> cast_environment);
- ~Vp8Decoder();
-
- // Decode frame - The decoded frame will be passed via the callback.
- // Will return false in case of error, and then it's up to the caller to
- // release the memory.
- // Ownership of the encoded_frame does not pass to the Vp8Decoder.
- bool Decode(const EncodedVideoFrame* encoded_frame,
- const base::TimeTicks render_time,
- const VideoFrameDecodedCallback& frame_decoded_cb);
-
- private:
- // Initialize the decoder.
- void InitDecoder();
- scoped_ptr<vpx_dec_ctx_t> decoder_;
- scoped_refptr<CastEnvironment> cast_environment_;
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_RTP_RECEVIER_CODECS_VP8_VP8_DECODER_H_
diff --git a/chromium/media/cast/video_receiver/video_decoder.cc b/chromium/media/cast/video_receiver/video_decoder.cc
deleted file mode 100644
index 360cdaa36e9..00000000000
--- a/chromium/media/cast/video_receiver/video_decoder.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/video_receiver/video_decoder.h"
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
-#include "media/cast/video_receiver/codecs/vp8/vp8_decoder.h"
-
-namespace media {
-namespace cast {
-
-VideoDecoder::VideoDecoder(const VideoReceiverConfig& video_config,
- scoped_refptr<CastEnvironment> cast_environment)
- : codec_(video_config.codec),
- vp8_decoder_() {
- switch (video_config.codec) {
- case kVp8:
- vp8_decoder_.reset(new Vp8Decoder(cast_environment));
- break;
- case kH264:
- NOTIMPLEMENTED();
- break;
- case kExternalVideo:
- DCHECK(false) << "Invalid codec";
- break;
- }
-}
-
-VideoDecoder::~VideoDecoder() {}
-
-bool VideoDecoder::DecodeVideoFrame(const EncodedVideoFrame* encoded_frame,
- const base::TimeTicks render_time,
- const VideoFrameDecodedCallback&
- frame_decoded_cb) {
- DCHECK(encoded_frame->codec == codec_) << "Invalid codec";
- DCHECK_GT(encoded_frame->data.size(), GG_UINT64_C(0)) << "Empty video frame";
- return vp8_decoder_->Decode(encoded_frame, render_time, frame_decoded_cb);
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/video_receiver/video_decoder.h b/chromium/media/cast/video_receiver/video_decoder.h
deleted file mode 100644
index 97a8a62cc70..00000000000
--- a/chromium/media/cast/video_receiver/video_decoder.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_VIDEO_RECEIVER_VIDEO_DECODER_H_
-#define MEDIA_CAST_VIDEO_RECEIVER_VIDEO_DECODER_H_
-
-#include "base/memory/scoped_ptr.h"
-#include "base/threading/non_thread_safe.h"
-#include "media/cast/cast_config.h"
-#include "media/cast/cast_receiver.h"
-
-namespace media {
-namespace cast {
-
-class Vp8Decoder;
-class VideoFrame;
-
-// This class is not thread safe; it's only called from the cast video decoder
-// thread.
-class VideoDecoder : public base::NonThreadSafe {
- public:
- VideoDecoder(const VideoReceiverConfig& video_config,
- scoped_refptr<CastEnvironment> cast_environment);
- virtual ~VideoDecoder();
-
- // Decode a video frame. Decoded (raw) frame will be returned via the
- // provided callback
- bool DecodeVideoFrame(const EncodedVideoFrame* encoded_frame,
- const base::TimeTicks render_time,
- const VideoFrameDecodedCallback& frame_decoded_cb);
-
- private:
- VideoCodec codec_;
- scoped_ptr<Vp8Decoder> vp8_decoder_;
-
- DISALLOW_COPY_AND_ASSIGN(VideoDecoder);
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_VIDEO_RECEIVER_VIDEO_DECODER_H_
diff --git a/chromium/media/cast/video_receiver/video_decoder_unittest.cc b/chromium/media/cast/video_receiver/video_decoder_unittest.cc
deleted file mode 100644
index 6405d1d7bee..00000000000
--- a/chromium/media/cast/video_receiver/video_decoder_unittest.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/bind.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/test/simple_test_tick_clock.h"
-#include "base/time/tick_clock.h"
-#include "media/cast/cast_config.h"
-#include "media/cast/cast_defines.h"
-#include "media/cast/cast_environment.h"
-#include "media/cast/cast_receiver.h"
-#include "media/cast/test/fake_task_runner.h"
-#include "media/cast/video_receiver/video_decoder.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-namespace cast {
-
-using testing::_;
-
-// Random frame size for testing.
-const int kFrameSize = 2345;
-static const int64 kStartMillisecond = GG_INT64_C(1245);
-
-namespace {
-class DecodeTestFrameCallback :
- public base::RefCountedThreadSafe<DecodeTestFrameCallback> {
- public:
- DecodeTestFrameCallback() {}
-
- void DecodeComplete(const scoped_refptr<media::VideoFrame>& decoded_frame,
- const base::TimeTicks& render_time) {}
- protected:
- virtual ~DecodeTestFrameCallback() {}
- private:
- friend class base::RefCountedThreadSafe<DecodeTestFrameCallback>;
-};
-} // namespace
-
-class VideoDecoderTest : public ::testing::Test {
- protected:
- VideoDecoderTest()
- : task_runner_(new test::FakeTaskRunner(&testing_clock_)),
- cast_environment_(new CastEnvironment(&testing_clock_, task_runner_,
- task_runner_, task_runner_, task_runner_, task_runner_,
- GetDefaultCastLoggingConfig())),
- test_callback_(new DecodeTestFrameCallback()) {
- // Configure to vp8.
- config_.codec = kVp8;
- config_.use_external_decoder = false;
- decoder_.reset(new VideoDecoder(config_, cast_environment_));
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kStartMillisecond));
- }
-
- virtual ~VideoDecoderTest() {}
-
- scoped_ptr<VideoDecoder> decoder_;
- VideoReceiverConfig config_;
- base::SimpleTestTickClock testing_clock_;
- scoped_refptr<test::FakeTaskRunner> task_runner_;
- scoped_refptr<CastEnvironment> cast_environment_;
- scoped_refptr<DecodeTestFrameCallback> test_callback_;
-};
-
-// TODO(pwestin): EXPECT_DEATH tests can not pass valgrind.
-TEST_F(VideoDecoderTest, DISABLED_SizeZero) {
- EncodedVideoFrame encoded_frame;
- base::TimeTicks render_time;
- encoded_frame.codec = kVp8;
- EXPECT_DEATH(
- decoder_->DecodeVideoFrame(
- &encoded_frame, render_time,
- base::Bind(&DecodeTestFrameCallback::DecodeComplete, test_callback_)),
- "Empty frame");
-}
-
-// TODO(pwestin): EXPECT_DEATH tests can not pass valgrind.
-TEST_F(VideoDecoderTest, DISABLED_InvalidCodec) {
- EncodedVideoFrame encoded_frame;
- base::TimeTicks render_time;
- encoded_frame.data.assign(kFrameSize, 0);
- encoded_frame.codec = kExternalVideo;
- EXPECT_DEATH(
- decoder_->DecodeVideoFrame(&encoded_frame, render_time, base::Bind(
- &DecodeTestFrameCallback::DecodeComplete, test_callback_)),
- "Invalid codec");
-}
-
-// TODO(pwestin): Test decoding a real frame.
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/video_receiver/video_receiver.cc b/chromium/media/cast/video_receiver/video_receiver.cc
deleted file mode 100644
index 98bed1fc699..00000000000
--- a/chromium/media/cast/video_receiver/video_receiver.cc
+++ /dev/null
@@ -1,465 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/video_receiver/video_receiver.h"
-
-#include <algorithm>
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
-#include "crypto/encryptor.h"
-#include "crypto/symmetric_key.h"
-#include "media/cast/cast_defines.h"
-#include "media/cast/framer/framer.h"
-#include "media/cast/video_receiver/video_decoder.h"
-
-namespace media {
-namespace cast {
-
-const int64 kMinSchedulingDelayMs = 1;
-
-static const int64 kMinTimeBetweenOffsetUpdatesMs = 2000;
-static const int kTimeOffsetFilter = 8;
-static const int64_t kMinProcessIntervalMs = 5;
-
-// Local implementation of RtpData (defined in rtp_rtcp_defines.h).
-// Used to pass payload data into the video receiver.
-class LocalRtpVideoData : public RtpData {
- public:
- explicit LocalRtpVideoData(VideoReceiver* video_receiver)
- : video_receiver_(video_receiver) {}
-
- virtual ~LocalRtpVideoData() {}
-
- virtual void OnReceivedPayloadData(const uint8* payload_data,
- size_t payload_size,
- const RtpCastHeader* rtp_header) OVERRIDE {
- video_receiver_->IncomingParsedRtpPacket(payload_data, payload_size,
- *rtp_header);
- }
-
- private:
- VideoReceiver* video_receiver_;
-};
-
-// Local implementation of RtpPayloadFeedback (defined in rtp_defines.h)
-// Used to convey cast-specific feedback from receiver to sender.
-// Callback triggered by the Framer (cast message builder).
-class LocalRtpVideoFeedback : public RtpPayloadFeedback {
- public:
- explicit LocalRtpVideoFeedback(VideoReceiver* video_receiver)
- : video_receiver_(video_receiver) {
- }
-
- virtual void CastFeedback(const RtcpCastMessage& cast_message) OVERRIDE {
- video_receiver_->CastFeedback(cast_message);
- }
-
- private:
- VideoReceiver* video_receiver_;
-};
-
-// Local implementation of RtpReceiverStatistics (defined by rtcp.h).
-// Used to pass statistics data from the RTP module to the RTCP module.
-class LocalRtpReceiverStatistics : public RtpReceiverStatistics {
- public:
- explicit LocalRtpReceiverStatistics(RtpReceiver* rtp_receiver)
- : rtp_receiver_(rtp_receiver) {
- }
-
- virtual void GetStatistics(uint8* fraction_lost,
- uint32* cumulative_lost, // 24 bits valid.
- uint32* extended_high_sequence_number,
- uint32* jitter) OVERRIDE {
- rtp_receiver_->GetStatistics(fraction_lost,
- cumulative_lost,
- extended_high_sequence_number,
- jitter);
- }
-
- private:
- RtpReceiver* rtp_receiver_;
-};
-
-VideoReceiver::VideoReceiver(scoped_refptr<CastEnvironment> cast_environment,
- const VideoReceiverConfig& video_config,
- PacedPacketSender* const packet_sender)
- : cast_environment_(cast_environment),
- codec_(video_config.codec),
- target_delay_delta_(
- base::TimeDelta::FromMilliseconds(video_config.rtp_max_delay_ms)),
- frame_delay_(base::TimeDelta::FromMilliseconds(
- 1000 / video_config.max_frame_rate)),
- incoming_payload_callback_(new LocalRtpVideoData(this)),
- incoming_payload_feedback_(new LocalRtpVideoFeedback(this)),
- rtp_receiver_(cast_environment_->Clock(), NULL, &video_config,
- incoming_payload_callback_.get()),
- rtp_video_receiver_statistics_(
- new LocalRtpReceiverStatistics(&rtp_receiver_)),
- time_incoming_packet_updated_(false),
- incoming_rtp_timestamp_(0),
- weak_factory_(this) {
- int max_unacked_frames = video_config.rtp_max_delay_ms *
- video_config.max_frame_rate / 1000;
- DCHECK(max_unacked_frames) << "Invalid argument";
-
- if (video_config.aes_iv_mask.size() == kAesKeySize &&
- video_config.aes_key.size() == kAesKeySize) {
- iv_mask_ = video_config.aes_iv_mask;
- crypto::SymmetricKey* key = crypto::SymmetricKey::Import(
- crypto::SymmetricKey::AES, video_config.aes_key);
- decryptor_.reset(new crypto::Encryptor());
- decryptor_->Init(key, crypto::Encryptor::CTR, std::string());
- } else if (video_config.aes_iv_mask.size() != 0 ||
- video_config.aes_key.size() != 0) {
- DCHECK(false) << "Invalid crypto configuration";
- }
-
- framer_.reset(new Framer(cast_environment->Clock(),
- incoming_payload_feedback_.get(),
- video_config.incoming_ssrc,
- video_config.decoder_faster_than_max_frame_rate,
- max_unacked_frames));
- if (!video_config.use_external_decoder) {
- video_decoder_.reset(new VideoDecoder(video_config, cast_environment));
- }
-
- rtcp_.reset(
- new Rtcp(cast_environment_,
- NULL,
- packet_sender,
- NULL,
- rtp_video_receiver_statistics_.get(),
- video_config.rtcp_mode,
- base::TimeDelta::FromMilliseconds(video_config.rtcp_interval),
- video_config.feedback_ssrc,
- video_config.incoming_ssrc,
- video_config.rtcp_c_name));
-}
-
-VideoReceiver::~VideoReceiver() {}
-
-void VideoReceiver::InitializeTimers() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- ScheduleNextRtcpReport();
- ScheduleNextCastMessage();
-}
-
-void VideoReceiver::GetRawVideoFrame(
- const VideoFrameDecodedCallback& callback) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- GetEncodedVideoFrame(base::Bind(&VideoReceiver::DecodeVideoFrame,
- base::Unretained(this), callback));
-}
-
-// Called when we have a frame to decode.
-void VideoReceiver::DecodeVideoFrame(
- const VideoFrameDecodedCallback& callback,
- scoped_ptr<EncodedVideoFrame> encoded_frame,
- const base::TimeTicks& render_time) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- // Hand the ownership of the encoded frame to the decode thread.
- cast_environment_->PostTask(CastEnvironment::VIDEO_DECODER, FROM_HERE,
- base::Bind(&VideoReceiver::DecodeVideoFrameThread, base::Unretained(this),
- base::Passed(&encoded_frame), render_time, callback));
-}
-
-// Utility function to run the decoder on a designated decoding thread.
-void VideoReceiver::DecodeVideoFrameThread(
- scoped_ptr<EncodedVideoFrame> encoded_frame,
- const base::TimeTicks render_time,
- const VideoFrameDecodedCallback& frame_decoded_callback) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::VIDEO_DECODER));
- DCHECK(video_decoder_);
-
- if (!(video_decoder_->DecodeVideoFrame(encoded_frame.get(), render_time,
- frame_decoded_callback))) {
- // This will happen if we decide to decode but not show a frame.
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&VideoReceiver::GetRawVideoFrame, base::Unretained(this),
- frame_decoded_callback));
- }
-}
-
-bool VideoReceiver::DecryptVideoFrame(
- scoped_ptr<EncodedVideoFrame>* video_frame) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- DCHECK(decryptor_) << "Invalid state";
-
- if (!decryptor_->SetCounter(GetAesNonce((*video_frame)->frame_id,
- iv_mask_))) {
- NOTREACHED() << "Failed to set counter";
- return false;
- }
- std::string decrypted_video_data;
- if (!decryptor_->Decrypt((*video_frame)->data, &decrypted_video_data)) {
- VLOG(1) << "Decryption error";
- // Give up on this frame, release it from jitter buffer.
- framer_->ReleaseFrame((*video_frame)->frame_id);
- return false;
- }
- (*video_frame)->data.swap(decrypted_video_data);
- return true;
-}
-
-// Called from the main cast thread.
-void VideoReceiver::GetEncodedVideoFrame(
- const VideoFrameEncodedCallback& callback) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- scoped_ptr<EncodedVideoFrame> encoded_frame(new EncodedVideoFrame());
- uint32 rtp_timestamp = 0;
- bool next_frame = false;
-
- if (!framer_->GetEncodedVideoFrame(encoded_frame.get(), &rtp_timestamp,
- &next_frame)) {
- // We have no video frames. Wait for new packet(s).
- queued_encoded_callbacks_.push_back(callback);
- return;
- }
-
- if (decryptor_ && !DecryptVideoFrame(&encoded_frame)) {
- // Logging already done.
- queued_encoded_callbacks_.push_back(callback);
- return;
- }
-
- base::TimeTicks render_time;
- if (PullEncodedVideoFrame(rtp_timestamp, next_frame, &encoded_frame,
- &render_time)) {
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(callback, base::Passed(&encoded_frame), render_time));
- } else {
- // We have a video frame; however we are missing packets and we have time
- // to wait for new packet(s).
- queued_encoded_callbacks_.push_back(callback);
- }
-}
-
-// Should we pull the encoded video frame from the framer? decided by if this is
-// the next frame or we are running out of time and have to pull the following
-// frame.
-// If the frame is too old to be rendered we set the don't show flag in the
-// video bitstream where possible.
-bool VideoReceiver::PullEncodedVideoFrame(uint32 rtp_timestamp,
- bool next_frame, scoped_ptr<EncodedVideoFrame>* encoded_frame,
- base::TimeTicks* render_time) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- *render_time = GetRenderTime(now, rtp_timestamp);
-
- // TODO(mikhal): Store actual render time and not diff.
- cast_environment_->Logging()->InsertFrameEventWithDelay(kVideoRenderDelay,
- rtp_timestamp, (*encoded_frame)->frame_id, now - *render_time);
-
- // Minimum time before a frame is due to be rendered before we pull it for
- // decode.
- base::TimeDelta min_wait_delta = frame_delay_;
- base::TimeDelta time_until_render = *render_time - now;
- if (!next_frame && (time_until_render > min_wait_delta)) {
- // Example:
- // We have decoded frame 1 and we have received the complete frame 3, but
- // not frame 2. If we still have time before frame 3 should be rendered we
- // will wait for 2 to arrive, however if 2 never show up this timer will hit
- // and we will pull out frame 3 for decoding and rendering.
- base::TimeDelta time_until_release = time_until_render - min_wait_delta;
- cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&VideoReceiver::PlayoutTimeout, weak_factory_.GetWeakPtr()),
- time_until_release);
- VLOG(1) << "Wait before releasing frame "
- << static_cast<int>((*encoded_frame)->frame_id)
- << " time " << time_until_release.InMilliseconds();
- return false;
- }
-
- base::TimeDelta dont_show_timeout_delta =
- base::TimeDelta::FromMilliseconds(-kDontShowTimeoutMs);
- if (codec_ == kVp8 && time_until_render < dont_show_timeout_delta) {
- (*encoded_frame)->data[0] &= 0xef;
- VLOG(1) << "Don't show frame "
- << static_cast<int>((*encoded_frame)->frame_id)
- << " time_until_render:" << time_until_render.InMilliseconds();
- } else {
- VLOG(1) << "Show frame "
- << static_cast<int>((*encoded_frame)->frame_id)
- << " time_until_render:" << time_until_render.InMilliseconds();
- }
- // We have a copy of the frame, release this one.
- framer_->ReleaseFrame((*encoded_frame)->frame_id);
- (*encoded_frame)->codec = codec_;
- return true;
-}
-
-void VideoReceiver::PlayoutTimeout() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- if (queued_encoded_callbacks_.empty()) return;
-
- uint32 rtp_timestamp = 0;
- bool next_frame = false;
- scoped_ptr<EncodedVideoFrame> encoded_frame(new EncodedVideoFrame());
-
- if (!framer_->GetEncodedVideoFrame(encoded_frame.get(), &rtp_timestamp,
- &next_frame)) {
- // We have no video frames. Wait for new packet(s).
- // Since the application can post multiple VideoFrameEncodedCallback and
- // we only check the next frame to play out we might have multiple timeout
- // events firing after each other; however this should be a rare event.
- VLOG(1) << "Failed to retrieved a complete frame at this point in time";
- return;
- }
- VLOG(1) << "PlayoutTimeout retrieved frame "
- << static_cast<int>(encoded_frame->frame_id);
-
- if (decryptor_ && !DecryptVideoFrame(&encoded_frame)) {
- // Logging already done.
- return;
- }
-
- base::TimeTicks render_time;
- if (PullEncodedVideoFrame(rtp_timestamp, next_frame, &encoded_frame,
- &render_time)) {
- if (!queued_encoded_callbacks_.empty()) {
- VideoFrameEncodedCallback callback = queued_encoded_callbacks_.front();
- queued_encoded_callbacks_.pop_front();
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(callback, base::Passed(&encoded_frame), render_time));
- }
- } else {
- // We have a video frame; however we are missing packets and we have time
- // to wait for new packet(s).
- }
-}
-
-base::TimeTicks VideoReceiver::GetRenderTime(base::TimeTicks now,
- uint32 rtp_timestamp) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- // Senders time in ms when this frame was captured.
- // Note: the senders clock and our local clock might not be synced.
- base::TimeTicks rtp_timestamp_in_ticks;
-
- if (time_offset_.InMilliseconds() == 0) {
- if (!rtcp_->RtpTimestampInSenderTime(kVideoFrequency,
- incoming_rtp_timestamp_,
- &rtp_timestamp_in_ticks)) {
- // We have not received any RTCP to sync the stream play it out as soon as
- // possible.
- return now;
- }
- time_offset_ = time_incoming_packet_ - rtp_timestamp_in_ticks;
- } else if (time_incoming_packet_updated_) {
- if (rtcp_->RtpTimestampInSenderTime(kVideoFrequency,
- incoming_rtp_timestamp_,
- &rtp_timestamp_in_ticks)) {
- // Time to update the time_offset.
- base::TimeDelta time_offset =
- time_incoming_packet_ - rtp_timestamp_in_ticks;
- time_offset_ = ((kTimeOffsetFilter - 1) * time_offset_ + time_offset)
- / kTimeOffsetFilter;
- }
- }
- // Reset |time_incoming_packet_updated_| to enable a future measurement.
- time_incoming_packet_updated_ = false;
- if (!rtcp_->RtpTimestampInSenderTime(kVideoFrequency,
- rtp_timestamp,
- &rtp_timestamp_in_ticks)) {
- // This can fail if we have not received any RTCP packets in a long time.
- return now;
- }
- return (rtp_timestamp_in_ticks + time_offset_ + target_delay_delta_);
-}
-
-void VideoReceiver::IncomingPacket(const uint8* packet, size_t length,
- const base::Closure callback) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- if (Rtcp::IsRtcpPacket(packet, length)) {
- rtcp_->IncomingRtcpPacket(packet, length);
- } else {
- rtp_receiver_.ReceivedPacket(packet, length);
- }
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
-}
-
-void VideoReceiver::IncomingParsedRtpPacket(const uint8* payload_data,
- size_t payload_size,
- const RtpCastHeader& rtp_header) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
-
- base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- if (time_incoming_packet_.is_null() || now - time_incoming_packet_ >
- base::TimeDelta::FromMilliseconds(kMinTimeBetweenOffsetUpdatesMs)) {
- if (time_incoming_packet_.is_null()) InitializeTimers();
- incoming_rtp_timestamp_ = rtp_header.webrtc.header.timestamp;
- time_incoming_packet_ = now;
- time_incoming_packet_updated_ = true;
- }
-
- cast_environment_->Logging()->InsertPacketEvent(kPacketReceived,
- rtp_header.webrtc.header.timestamp, rtp_header.frame_id,
- rtp_header.packet_id, rtp_header.max_packet_id, payload_size);
-
- bool complete = framer_->InsertPacket(payload_data, payload_size, rtp_header);
-
- if (!complete) return; // Video frame not complete; wait for more packets.
- if (queued_encoded_callbacks_.empty()) return; // No pending callback.
-
- VideoFrameEncodedCallback callback = queued_encoded_callbacks_.front();
- queued_encoded_callbacks_.pop_front();
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&VideoReceiver::GetEncodedVideoFrame,
- weak_factory_.GetWeakPtr(), callback));
-}
-
-// Send a cast feedback message. Actual message created in the framer (cast
-// message builder).
-void VideoReceiver::CastFeedback(const RtcpCastMessage& cast_message) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- // TODO(pwestin): wire up log messages.
- rtcp_->SendRtcpFromRtpReceiver(&cast_message, NULL);
- time_last_sent_cast_message_= cast_environment_->Clock()->NowTicks();
-}
-
-// Cast messages should be sent within a maximum interval. Schedule a call
-// if not triggered elsewhere, e.g. by the cast message_builder.
-void VideoReceiver::ScheduleNextCastMessage() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- base::TimeTicks send_time;
- framer_->TimeToSendNextCastMessage(&send_time);
-
- base::TimeDelta time_to_send = send_time -
- cast_environment_->Clock()->NowTicks();
- time_to_send = std::max(time_to_send,
- base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
- cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&VideoReceiver::SendNextCastMessage,
- weak_factory_.GetWeakPtr()), time_to_send);
-}
-
-void VideoReceiver::SendNextCastMessage() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- framer_->SendCastMessage(); // Will only send a message if it is time.
- ScheduleNextCastMessage();
-}
-
-// Schedule the next RTCP report to be sent back to the sender.
-void VideoReceiver::ScheduleNextRtcpReport() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- base::TimeDelta time_to_next = rtcp_->TimeToSendNextRtcpReport() -
- cast_environment_->Clock()->NowTicks();
-
- time_to_next = std::max(time_to_next,
- base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
-
- cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&VideoReceiver::SendNextRtcpReport,
- weak_factory_.GetWeakPtr()), time_to_next);
-}
-
-void VideoReceiver::SendNextRtcpReport() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- rtcp_->SendRtcpFromRtpReceiver(NULL, NULL);
- ScheduleNextRtcpReport();
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/video_receiver/video_receiver.gypi b/chromium/media/cast/video_receiver/video_receiver.gypi
deleted file mode 100644
index e1a9902872e..00000000000
--- a/chromium/media/cast/video_receiver/video_receiver.gypi
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of the source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'targets': [
- {
- 'target_name': 'cast_video_receiver',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- '<(DEPTH)/third_party/',
- '<(DEPTH)/third_party/webrtc',
- ],
- 'sources': [
- 'video_decoder.h',
- 'video_decoder.cc',
- 'video_receiver.h',
- 'video_receiver.cc',
- ], # source
- 'dependencies': [
- '<(DEPTH)/crypto/crypto.gyp:crypto',
- 'framer/framer.gyp:cast_framer',
- 'video_receiver/codecs/vp8/vp8_decoder.gyp:cast_vp8_decoder',
- 'rtp_receiver/rtp_receiver.gyp:cast_rtp_receiver',
- ],
- },
- ],
-}
-
-
diff --git a/chromium/media/cast/video_receiver/video_receiver.h b/chromium/media/cast/video_receiver/video_receiver.h
deleted file mode 100644
index fbc3653a514..00000000000
--- a/chromium/media/cast/video_receiver/video_receiver.h
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_VIDEO_RECEIVER_VIDEO_RECEIVER_H_
-#define MEDIA_CAST_VIDEO_RECEIVER_VIDEO_RECEIVER_H_
-
-#include "base/basictypes.h"
-#include "base/callback.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "base/threading/non_thread_safe.h"
-#include "base/time/tick_clock.h"
-#include "base/time/time.h"
-#include "media/cast/cast_config.h"
-#include "media/cast/cast_environment.h"
-#include "media/cast/cast_receiver.h"
-#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtp_receiver/rtp_receiver.h"
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
-
-namespace crypto {
- class Encryptor;
-}
-
-namespace media {
-namespace cast {
-
-class Framer;
-class LocalRtpVideoData;
-class LocalRtpVideoFeedback;
-class PacedPacketSender;
-class PeerVideoReceiver;
-class Rtcp;
-class RtpReceiverStatistics;
-class VideoDecoder;
-
-// Should only be called from the Main cast thread.
-class VideoReceiver : public base::NonThreadSafe,
- public base::SupportsWeakPtr<VideoReceiver> {
- public:
- VideoReceiver(scoped_refptr<CastEnvironment> cast_environment,
- const VideoReceiverConfig& video_config,
- PacedPacketSender* const packet_sender);
-
- virtual ~VideoReceiver();
-
- // Request a raw frame. Will return frame via callback when available.
- void GetRawVideoFrame(const VideoFrameDecodedCallback& callback);
-
- // Request an encoded frame. Will return frame via callback when available.
- void GetEncodedVideoFrame(const VideoFrameEncodedCallback& callback);
-
- // Insert a RTP packet to the video receiver.
- void IncomingPacket(const uint8* packet, size_t length,
- const base::Closure callback);
-
- protected:
- void IncomingParsedRtpPacket(const uint8* payload_data,
- size_t payload_size,
- const RtpCastHeader& rtp_header);
-
- void DecodeVideoFrameThread(
- scoped_ptr<EncodedVideoFrame> encoded_frame,
- const base::TimeTicks render_time,
- const VideoFrameDecodedCallback& frame_decoded_callback);
-
- private:
- friend class LocalRtpVideoData;
- friend class LocalRtpVideoFeedback;
-
- void CastFeedback(const RtcpCastMessage& cast_message);
-
- void DecodeVideoFrame(const VideoFrameDecodedCallback& callback,
- scoped_ptr<EncodedVideoFrame> encoded_frame,
- const base::TimeTicks& render_time);
-
- bool DecryptVideoFrame(scoped_ptr<EncodedVideoFrame>* video_frame);
-
- bool PullEncodedVideoFrame(uint32 rtp_timestamp,
- bool next_frame,
- scoped_ptr<EncodedVideoFrame>* encoded_frame,
- base::TimeTicks* render_time);
-
- void PlayoutTimeout();
-
- // Returns Render time based on current time and the rtp timestamp.
- base::TimeTicks GetRenderTime(base::TimeTicks now, uint32 rtp_timestamp);
-
- void InitializeTimers();
-
- // Schedule timing for the next cast message.
- void ScheduleNextCastMessage();
-
- // Schedule timing for the next RTCP report.
- void ScheduleNextRtcpReport();
-
- // Actually send the next cast message.
- void SendNextCastMessage();
-
- // Actually send the next RTCP report.
- void SendNextRtcpReport();
-
- scoped_ptr<VideoDecoder> video_decoder_;
- scoped_refptr<CastEnvironment> cast_environment_;
- scoped_ptr<Framer> framer_;
- const VideoCodec codec_;
- base::TimeDelta target_delay_delta_;
- base::TimeDelta frame_delay_;
- scoped_ptr<LocalRtpVideoData> incoming_payload_callback_;
- scoped_ptr<LocalRtpVideoFeedback> incoming_payload_feedback_;
- RtpReceiver rtp_receiver_;
- scoped_ptr<Rtcp> rtcp_;
- scoped_ptr<RtpReceiverStatistics> rtp_video_receiver_statistics_;
- base::TimeTicks time_last_sent_cast_message_;
- base::TimeDelta time_offset_; // Sender-receiver offset estimation.
- scoped_ptr<crypto::Encryptor> decryptor_;
- std::string iv_mask_;
- std::list<VideoFrameEncodedCallback> queued_encoded_callbacks_;
- bool time_incoming_packet_updated_;
- base::TimeTicks time_incoming_packet_;
- uint32 incoming_rtp_timestamp_;
-
- base::WeakPtrFactory<VideoReceiver> weak_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(VideoReceiver);
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_VIDEO_RECEIVER_VIDEO_RECEIVER_H_
diff --git a/chromium/media/cast/video_receiver/video_receiver_unittest.cc b/chromium/media/cast/video_receiver/video_receiver_unittest.cc
deleted file mode 100644
index 8001ac430d6..00000000000
--- a/chromium/media/cast/video_receiver/video_receiver_unittest.cc
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/bind.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/test/simple_test_tick_clock.h"
-#include "media/cast/cast_defines.h"
-#include "media/cast/cast_environment.h"
-#include "media/cast/net/pacing/mock_paced_packet_sender.h"
-#include "media/cast/test/fake_task_runner.h"
-#include "media/cast/video_receiver/video_receiver.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-static const int kPacketSize = 1500;
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
-
-namespace media {
-namespace cast {
-
-using testing::_;
-
-namespace {
-// Was thread counted thread safe.
-class TestVideoReceiverCallback :
- public base::RefCountedThreadSafe<TestVideoReceiverCallback> {
- public:
- TestVideoReceiverCallback()
- : num_called_(0) {}
-
- // TODO(mikhal): Set and check expectations.
- void DecodeComplete(const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& render_time) {
- ++num_called_;
- }
-
- void FrameToDecode(scoped_ptr<EncodedVideoFrame> video_frame,
- const base::TimeTicks& render_time) {
- EXPECT_TRUE(video_frame->key_frame);
- EXPECT_EQ(kVp8, video_frame->codec);
- ++num_called_;
- }
-
- int number_times_called() const { return num_called_;}
-
- protected:
- virtual ~TestVideoReceiverCallback() {}
-
- private:
- friend class base::RefCountedThreadSafe<TestVideoReceiverCallback>;
-
- int num_called_;
-};
-} // namespace
-
-class PeerVideoReceiver : public VideoReceiver {
- public:
- PeerVideoReceiver(scoped_refptr<CastEnvironment> cast_environment,
- const VideoReceiverConfig& video_config,
- PacedPacketSender* const packet_sender)
- : VideoReceiver(cast_environment, video_config, packet_sender) {
- }
- using VideoReceiver::IncomingParsedRtpPacket;
-};
-
-
-class VideoReceiverTest : public ::testing::Test {
- protected:
- VideoReceiverTest() {
- // Configure to use vp8 software implementation.
- config_.codec = kVp8;
- config_.use_external_decoder = false;
- task_runner_ = new test::FakeTaskRunner(&testing_clock_);
- cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
- task_runner_, task_runner_, task_runner_, task_runner_,
- GetDefaultCastLoggingConfig());
- receiver_.reset(new
- PeerVideoReceiver(cast_environment_, config_, &mock_transport_));
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kStartMillisecond));
- video_receiver_callback_ = new TestVideoReceiverCallback();
- }
-
- virtual ~VideoReceiverTest() {}
-
- virtual void SetUp() {
- payload_.assign(kPacketSize, 0);
-
- // Always start with a key frame.
- rtp_header_.is_key_frame = true;
- rtp_header_.frame_id = 0;
- rtp_header_.packet_id = 0;
- rtp_header_.max_packet_id = 0;
- rtp_header_.is_reference = false;
- rtp_header_.reference_frame_id = 0;
- }
-
- MockPacedPacketSender mock_transport_;
- VideoReceiverConfig config_;
- scoped_ptr<PeerVideoReceiver> receiver_;
- std::vector<uint8> payload_;
- RtpCastHeader rtp_header_;
- base::SimpleTestTickClock testing_clock_;
-
- scoped_refptr<test::FakeTaskRunner> task_runner_;
- scoped_refptr<CastEnvironment> cast_environment_;
- scoped_refptr<TestVideoReceiverCallback> video_receiver_callback_;
-};
-
-TEST_F(VideoReceiverTest, GetOnePacketEncodedframe) {
- EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).WillRepeatedly(
- testing::Return(true));
- receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
- rtp_header_);
-
- VideoFrameEncodedCallback frame_to_decode_callback =
- base::Bind(&TestVideoReceiverCallback::FrameToDecode,
- video_receiver_callback_);
-
- receiver_->GetEncodedVideoFrame(frame_to_decode_callback);
- task_runner_->RunTasks();
- EXPECT_EQ(video_receiver_callback_->number_times_called(), 1);
-}
-
-TEST_F(VideoReceiverTest, MultiplePackets) {
- EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).WillRepeatedly(
- testing::Return(true));
- rtp_header_.max_packet_id = 2;
- receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
- rtp_header_);
- ++rtp_header_.packet_id;
- ++rtp_header_.webrtc.header.sequenceNumber;
- receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
- rtp_header_);
- ++rtp_header_.packet_id;
- receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
- rtp_header_);
-
- VideoFrameEncodedCallback frame_to_decode_callback =
- base::Bind(&TestVideoReceiverCallback::FrameToDecode,
- video_receiver_callback_);
-
- receiver_->GetEncodedVideoFrame(frame_to_decode_callback);
-
- task_runner_->RunTasks();
- EXPECT_EQ(video_receiver_callback_->number_times_called(), 1);
-}
-
-TEST_F(VideoReceiverTest, GetOnePacketRawframe) {
- EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).WillRepeatedly(
- testing::Return(true));
- receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
- rtp_header_);
- // Decode error - requires legal input.
- VideoFrameDecodedCallback frame_decoded_callback =
- base::Bind(&TestVideoReceiverCallback::DecodeComplete,
- video_receiver_callback_);
- receiver_->GetRawVideoFrame(frame_decoded_callback);
- task_runner_->RunTasks();
- EXPECT_EQ(video_receiver_callback_->number_times_called(), 0);
-}
-
-// TODO(pwestin): add encoded frames.
-
-} // namespace cast
-} // namespace media
-
-
diff --git a/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.cc b/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
index 099be63a2c8..c7374babd19 100644
--- a/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
+++ b/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
@@ -11,6 +11,7 @@
#include "base/logging.h"
#include "media/base/video_frame.h"
#include "media/cast/cast_defines.h"
+#include "media/cast/transport/cast_transport_config.h"
#include "third_party/libvpx/source/libvpx/vpx/vp8cx.h"
namespace media {
@@ -18,31 +19,31 @@ namespace cast {
static const uint32 kMinIntra = 300;
+static int ComputeMaxNumOfRepeatedBuffes(int max_unacked_frames) {
+ if (max_unacked_frames > kNumberOfVp8VideoBuffers)
+ return (max_unacked_frames - 1) / kNumberOfVp8VideoBuffers;
+
+ return 0;
+}
+
Vp8Encoder::Vp8Encoder(const VideoSenderConfig& video_config,
- uint8 max_unacked_frames)
+ int max_unacked_frames)
: cast_config_(video_config),
use_multiple_video_buffers_(
cast_config_.max_number_of_video_buffers_used ==
kNumberOfVp8VideoBuffers),
max_number_of_repeated_buffers_in_a_row_(
- (max_unacked_frames > kNumberOfVp8VideoBuffers) ?
- ((max_unacked_frames - 1) / kNumberOfVp8VideoBuffers) : 0),
- config_(new vpx_codec_enc_cfg_t()),
- encoder_(new vpx_codec_ctx_t()),
- // Creating a wrapper to the image - setting image data to NULL. Actual
- // pointer will be set during encode. Setting align to 1, as it is
- // meaningless (actual memory is not allocated).
- raw_image_(vpx_img_wrap(NULL, IMG_FMT_I420, video_config.width,
- video_config.height, 1, NULL)),
+ ComputeMaxNumOfRepeatedBuffes(max_unacked_frames)),
key_frame_requested_(true),
- timestamp_(0),
+ first_frame_received_(false),
last_encoded_frame_id_(kStartFrameId),
number_of_repeated_buffers_(0) {
// TODO(pwestin): we need to figure out how to synchronize the acking with the
// internal state of the encoder, ideally the encoder will tell if we can
// send another frame.
DCHECK(!use_multiple_video_buffers_ ||
- max_number_of_repeated_buffers_in_a_row_ == 0) << "Invalid config";
+ max_number_of_repeated_buffers_in_a_row_ == 0)
+ << "Invalid config";
// VP8 have 3 buffers available for prediction, with
// max_number_of_video_buffers_used set to 1 we maximize the coding efficiency
@@ -52,21 +53,37 @@ Vp8Encoder::Vp8Encoder(const VideoSenderConfig& video_config,
// propagation.
DCHECK(cast_config_.max_number_of_video_buffers_used == 1 ||
cast_config_.max_number_of_video_buffers_used ==
- kNumberOfVp8VideoBuffers) << "Invalid argument";
+ kNumberOfVp8VideoBuffers)
+ << "Invalid argument";
- for (int i = 0; i < kNumberOfVp8VideoBuffers; ++i) {
- acked_frame_buffers_[i] = true;
- used_buffers_frame_id_[i] = kStartFrameId;
- }
- InitEncode(video_config.number_of_cores);
+ thread_checker_.DetachFromThread();
}
Vp8Encoder::~Vp8Encoder() {
- vpx_codec_destroy(encoder_);
+ vpx_codec_destroy(encoder_.get());
vpx_img_free(raw_image_);
}
-void Vp8Encoder::InitEncode(int number_of_cores) {
+void Vp8Encoder::Initialize() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ config_.reset(new vpx_codec_enc_cfg_t());
+ encoder_.reset(new vpx_codec_ctx_t());
+
+ // Creating a wrapper to the image - setting image data to NULL. Actual
+ // pointer will be set during encode. Setting align to 1, as it is
+ // meaningless (actual memory is not allocated).
+ raw_image_ = vpx_img_wrap(
+ NULL, IMG_FMT_I420, cast_config_.width, cast_config_.height, 1, NULL);
+
+ for (int i = 0; i < kNumberOfVp8VideoBuffers; ++i) {
+ acked_frame_buffers_[i] = true;
+ used_buffers_frame_id_[i] = kStartFrameId;
+ }
+ InitEncode(cast_config_.number_of_encode_threads);
+}
+
+void Vp8Encoder::InitEncode(int number_of_encode_threads) {
+ DCHECK(thread_checker_.CalledOnValidThread());
// Populate encoder configuration with default values.
if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(), config_.get(), 0)) {
DCHECK(false) << "Invalid return value";
@@ -85,17 +102,11 @@ void Vp8Encoder::InitEncode(int number_of_cores) {
// codec requirements.
config_->g_error_resilient = 1;
}
-
- if (cast_config_.width * cast_config_.height > 640 * 480
- && number_of_cores >= 2) {
- config_->g_threads = 2; // 2 threads for qHD/HD.
- } else {
- config_->g_threads = 1; // 1 thread for VGA or less.
- }
+ config_->g_threads = number_of_encode_threads;
// Rate control settings.
- // TODO(pwestin): revisit these constants. Currently identical to webrtc.
- config_->rc_dropframe_thresh = 30;
+ // Never allow the encoder to drop frame internally.
+ config_->rc_dropframe_thresh = 0;
config_->rc_end_usage = VPX_CBR;
config_->g_pass = VPX_RC_ONE_PASS;
config_->rc_resize_allowed = 0;
@@ -110,19 +121,22 @@ void Vp8Encoder::InitEncode(int number_of_cores) {
// set the maximum target size of any key-frame.
uint32 rc_max_intra_target = MaxIntraTarget(config_->rc_buf_optimal_sz);
vpx_codec_flags_t flags = 0;
- // TODO(mikhal): Tune settings.
- if (vpx_codec_enc_init(encoder_, vpx_codec_vp8_cx(), config_.get(), flags)) {
- DCHECK(false) << "Invalid return value";
+ if (vpx_codec_enc_init(
+ encoder_.get(), vpx_codec_vp8_cx(), config_.get(), flags)) {
+ DCHECK(false) << "vpx_codec_enc_init() failed.";
+ encoder_.reset();
+ return;
}
- vpx_codec_control(encoder_, VP8E_SET_STATIC_THRESHOLD, 1);
- vpx_codec_control(encoder_, VP8E_SET_NOISE_SENSITIVITY, 0);
- vpx_codec_control(encoder_, VP8E_SET_CPUUSED, -6);
- vpx_codec_control(encoder_, VP8E_SET_MAX_INTRA_BITRATE_PCT,
- rc_max_intra_target);
+ vpx_codec_control(encoder_.get(), VP8E_SET_STATIC_THRESHOLD, 1);
+ vpx_codec_control(encoder_.get(), VP8E_SET_NOISE_SENSITIVITY, 0);
+ vpx_codec_control(encoder_.get(), VP8E_SET_CPUUSED, -6);
+ vpx_codec_control(
+ encoder_.get(), VP8E_SET_MAX_INTRA_BITRATE_PCT, rc_max_intra_target);
}
bool Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
- EncodedVideoFrame* encoded_image) {
+ transport::EncodedFrame* encoded_image) {
+ DCHECK(thread_checker_.CalledOnValidThread());
// Image in vpx_image_t format.
// Input image is const. VP8's raw image is not defined as const.
raw_image_->planes[PLANE_Y] =
@@ -142,8 +156,7 @@ bool Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
if (key_frame_requested_) {
flags = VPX_EFLAG_FORCE_KF;
// Self reference.
- latest_frame_id_to_reference =
- static_cast<uint8>(last_encoded_frame_id_ + 1);
+ latest_frame_id_to_reference = last_encoded_frame_id_ + 1;
// We can pick any buffer as buffer_to_update since we update
// them all.
buffer_to_update = kLastBuffer;
@@ -157,45 +170,63 @@ bool Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
// Note: The duration does not reflect the real time between frames. This is
// done to keep the encoder happy.
+ //
+ // TODO(miu): This is a semi-hack. We should consider using
+ // |video_frame->timestamp()| instead.
uint32 duration = kVideoFrequency / cast_config_.max_frame_rate;
- if (vpx_codec_encode(encoder_, raw_image_, timestamp_, duration, flags,
- VPX_DL_REALTIME)) {
+
+ // Note: Timestamp here is used for bitrate calculation. The absolute value
+ // is not important.
+ if (!first_frame_received_) {
+ first_frame_received_ = true;
+ first_frame_timestamp_ = video_frame->timestamp();
+ }
+
+ vpx_codec_pts_t timestamp =
+ (video_frame->timestamp() - first_frame_timestamp_).InMicroseconds() *
+ kVideoFrequency / base::Time::kMicrosecondsPerSecond;
+
+ if (vpx_codec_encode(encoder_.get(),
+ raw_image_,
+ timestamp,
+ duration,
+ flags,
+ VPX_DL_REALTIME) != VPX_CODEC_OK) {
+ LOG(ERROR) << "Failed to encode for once.";
return false;
}
- timestamp_ += duration;
// Get encoded frame.
- const vpx_codec_cx_pkt_t *pkt = NULL;
+ const vpx_codec_cx_pkt_t* pkt = NULL;
vpx_codec_iter_t iter = NULL;
- size_t total_size = 0;
- while ((pkt = vpx_codec_get_cx_data(encoder_, &iter)) != NULL) {
- if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
- total_size += pkt->data.frame.sz;
- encoded_image->data.reserve(total_size);
- encoded_image->data.insert(
- encoded_image->data.end(),
- static_cast<const uint8*>(pkt->data.frame.buf),
- static_cast<const uint8*>(pkt->data.frame.buf) +
- pkt->data.frame.sz);
- if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
- encoded_image->key_frame = true;
- } else {
- encoded_image->key_frame = false;
- }
- }
+ bool is_key_frame = false;
+ while ((pkt = vpx_codec_get_cx_data(encoder_.get(), &iter)) != NULL) {
+ if (pkt->kind != VPX_CODEC_CX_FRAME_PKT)
+ continue;
+ encoded_image->data.assign(
+ static_cast<const uint8*>(pkt->data.frame.buf),
+ static_cast<const uint8*>(pkt->data.frame.buf) + pkt->data.frame.sz);
+ is_key_frame = !!(pkt->data.frame.flags & VPX_FRAME_IS_KEY);
+ break; // Done, since all data is provided in one CX_FRAME_PKT packet.
}
// Don't update frame_id for zero size frames.
- if (total_size == 0) return true;
+ if (encoded_image->data.empty())
+ return true;
// Populate the encoded frame.
- encoded_image->codec = kVp8;
- encoded_image->last_referenced_frame_id = latest_frame_id_to_reference;
encoded_image->frame_id = ++last_encoded_frame_id_;
+ if (is_key_frame) {
+ encoded_image->dependency = transport::EncodedFrame::KEY;
+ encoded_image->referenced_frame_id = encoded_image->frame_id;
+ } else {
+ encoded_image->dependency = transport::EncodedFrame::DEPENDENT;
+ encoded_image->referenced_frame_id = latest_frame_id_to_reference;
+ }
- VLOG(1) << "VP8 encoded frame:" << static_cast<int>(encoded_image->frame_id)
- << " sized:" << total_size;
+ DVLOG(1) << "VP8 encoded frame_id " << encoded_image->frame_id
+ << ", sized:" << encoded_image->data.size();
- if (encoded_image->key_frame) {
+ if (is_key_frame) {
key_frame_requested_ = false;
for (int i = 0; i < kNumberOfVp8VideoBuffers; ++i) {
@@ -215,12 +246,14 @@ bool Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
}
void Vp8Encoder::GetCodecReferenceFlags(vpx_codec_flags_t* flags) {
- if (!use_multiple_video_buffers_) return;
+ if (!use_multiple_video_buffers_)
+ return;
// We need to reference something.
DCHECK(acked_frame_buffers_[kAltRefBuffer] ||
acked_frame_buffers_[kGoldenBuffer] ||
- acked_frame_buffers_[kLastBuffer]) << "Invalid state";
+ acked_frame_buffers_[kLastBuffer])
+ << "Invalid state";
if (!acked_frame_buffers_[kAltRefBuffer]) {
*flags |= VP8_EFLAG_NO_REF_ARF;
@@ -234,7 +267,8 @@ void Vp8Encoder::GetCodecReferenceFlags(vpx_codec_flags_t* flags) {
}
uint32 Vp8Encoder::GetLatestFrameIdToReference() {
- if (!use_multiple_video_buffers_) return last_encoded_frame_id_;
+ if (!use_multiple_video_buffers_)
+ return last_encoded_frame_id_;
int latest_frame_id_to_reference = -1;
if (acked_frame_buffers_[kAltRefBuffer]) {
@@ -265,9 +299,12 @@ uint32 Vp8Encoder::GetLatestFrameIdToReference() {
}
Vp8Encoder::Vp8Buffers Vp8Encoder::GetNextBufferToUpdate() {
+ if (!use_multiple_video_buffers_)
+ return kNoBuffer;
+
// Update at most one buffer, except for key-frames.
- Vp8Buffers buffer_to_update;
+ Vp8Buffers buffer_to_update = kNoBuffer;
if (number_of_repeated_buffers_ < max_number_of_repeated_buffers_in_a_row_) {
// TODO(pwestin): experiment with this. The issue with only this change is
// that we can end up with only 4 frames in flight when we expect 6.
@@ -299,7 +336,8 @@ Vp8Encoder::Vp8Buffers Vp8Encoder::GetNextBufferToUpdate() {
void Vp8Encoder::GetCodecUpdateFlags(Vp8Buffers buffer_to_update,
vpx_codec_flags_t* flags) {
- if (!use_multiple_video_buffers_) return;
+ if (!use_multiple_video_buffers_)
+ return;
// Update at most one buffer, except for key-frames.
switch (buffer_to_update) {
@@ -325,19 +363,23 @@ void Vp8Encoder::GetCodecUpdateFlags(Vp8Buffers buffer_to_update,
}
void Vp8Encoder::UpdateRates(uint32 new_bitrate) {
+ DCHECK(thread_checker_.CalledOnValidThread());
uint32 new_bitrate_kbit = new_bitrate / 1000;
- if (config_->rc_target_bitrate == new_bitrate_kbit) return;
+ if (config_->rc_target_bitrate == new_bitrate_kbit)
+ return;
config_->rc_target_bitrate = new_bitrate_kbit;
// Update encoder context.
- if (vpx_codec_enc_config_set(encoder_, config_.get())) {
+ if (vpx_codec_enc_config_set(encoder_.get(), config_.get())) {
DCHECK(false) << "Invalid return value";
}
}
void Vp8Encoder::LatestFrameIdToReference(uint32 frame_id) {
- if (!use_multiple_video_buffers_) return;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!use_multiple_video_buffers_)
+ return;
VLOG(1) << "VP8 ok to reference frame:" << static_cast<int>(frame_id);
for (int i = 0; i < kNumberOfVp8VideoBuffers; ++i) {
@@ -348,6 +390,7 @@ void Vp8Encoder::LatestFrameIdToReference(uint32 frame_id) {
}
void Vp8Encoder::GenerateKeyFrame() {
+ DCHECK(thread_checker_.CalledOnValidThread());
key_frame_requested_ = true;
}
@@ -362,7 +405,7 @@ uint32 Vp8Encoder::MaxIntraTarget(uint32 optimal_buffer_size_ms) const {
float scale_parameter = 0.5;
uint32 target_pct = optimal_buffer_size_ms * scale_parameter *
- cast_config_.max_frame_rate / 10;
+ cast_config_.max_frame_rate / 10;
// Don't go below 3 times the per frame bandwidth.
return std::max(target_pct, kMinIntra);
diff --git a/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi b/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi
deleted file mode 100644
index fa9c2944a15..00000000000
--- a/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi
+++ /dev/null
@@ -1,20 +0,0 @@
-{
- 'targets': [
- {
- 'target_name': 'cast_vp8_encoder',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- '<(DEPTH)/third_party/',
- ],
- 'sources': [
- 'vp8_encoder.cc',
- 'vp8_encoder.h',
- ], # source
- 'dependencies': [
- '<(DEPTH)/ui/gfx/gfx.gyp:gfx',
- '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
- ],
- },
- ],
-}
diff --git a/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.h b/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.h
index d09cc27dabc..2421cf15114 100644
--- a/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.h
+++ b/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.h
@@ -7,7 +7,10 @@
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
+#include "base/threading/thread_checker.h"
+#include "base/time/time.h"
#include "media/cast/cast_config.h"
+#include "media/cast/video_sender/software_video_encoder.h"
#include "third_party/libvpx/source/libvpx/vpx/vpx_encoder.h"
namespace media {
@@ -22,24 +25,27 @@ namespace cast {
const int kNumberOfVp8VideoBuffers = 3;
-class Vp8Encoder {
+class Vp8Encoder : public SoftwareVideoEncoder {
public:
- Vp8Encoder(const VideoSenderConfig& video_config,
- uint8 max_unacked_frames);
+ Vp8Encoder(const VideoSenderConfig& video_config, int max_unacked_frames);
- ~Vp8Encoder();
+ virtual ~Vp8Encoder();
+
+ // Initialize the encoder before Encode() can be called. This method
+ // must be called on the thread that Encode() is called.
+ virtual void Initialize() OVERRIDE;
// Encode a raw image (as a part of a video stream).
- bool Encode(const scoped_refptr<media::VideoFrame>& video_frame,
- EncodedVideoFrame* encoded_image);
+ virtual bool Encode(const scoped_refptr<media::VideoFrame>& video_frame,
+ transport::EncodedFrame* encoded_image) OVERRIDE;
// Update the encoder with a new target bit rate.
- void UpdateRates(uint32 new_bitrate);
+ virtual void UpdateRates(uint32 new_bitrate) OVERRIDE;
// Set the next frame to be a key frame.
- void GenerateKeyFrame();
+ virtual void GenerateKeyFrame() OVERRIDE;
- void LatestFrameIdToReference(uint32 frame_id);
+ virtual void LatestFrameIdToReference(uint32 frame_id) OVERRIDE;
private:
enum Vp8Buffers {
@@ -73,16 +79,22 @@ class Vp8Encoder {
// VP8 internal objects.
scoped_ptr<vpx_codec_enc_cfg_t> config_;
- vpx_enc_ctx_t* encoder_;
+ scoped_ptr<vpx_enc_ctx_t> encoder_;
vpx_image_t* raw_image_;
bool key_frame_requested_;
- int64 timestamp_;
+ bool first_frame_received_;
+ base::TimeDelta first_frame_timestamp_;
uint32 last_encoded_frame_id_;
uint32 used_buffers_frame_id_[kNumberOfVp8VideoBuffers];
bool acked_frame_buffers_[kNumberOfVp8VideoBuffers];
Vp8Buffers last_used_vp8_buffer_;
int number_of_repeated_buffers_;
+
+ // This is bound to the thread where Initialize() is called.
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(Vp8Encoder);
};
} // namespace cast
diff --git a/chromium/media/cast/video_sender/external_video_encoder.cc b/chromium/media/cast/video_sender/external_video_encoder.cc
new file mode 100644
index 00000000000..ca30bcd47af
--- /dev/null
+++ b/chromium/media/cast/video_sender/external_video_encoder.cc
@@ -0,0 +1,436 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/video_sender/external_video_encoder.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/memory/scoped_vector.h"
+#include "base/memory/shared_memory.h"
+#include "base/message_loop/message_loop.h"
+#include "media/base/video_frame.h"
+#include "media/base/video_util.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/transport/cast_transport_config.h"
+#include "media/video/video_encode_accelerator.h"
+
+namespace media {
+namespace cast {
+class LocalVideoEncodeAcceleratorClient;
+} // namespace cast
+} // namespace media
+
+namespace {
+static const size_t kOutputBufferCount = 3;
+
+void LogFrameEncodedEvent(
+ const scoped_refptr<media::cast::CastEnvironment>& cast_environment,
+ base::TimeTicks event_time,
+ media::cast::RtpTimestamp rtp_timestamp,
+ uint32 frame_id) {
+ cast_environment->Logging()->InsertFrameEvent(
+ event_time, media::cast::FRAME_ENCODED, media::cast::VIDEO_EVENT,
+ rtp_timestamp, frame_id);
+}
+
+// Proxy this call to ExternalVideoEncoder on the cast main thread.
+void ProxyCreateVideoEncodeAccelerator(
+ const scoped_refptr<media::cast::CastEnvironment>& cast_environment,
+ const base::WeakPtr<media::cast::ExternalVideoEncoder>& weak_ptr,
+ const media::cast::CreateVideoEncodeMemoryCallback&
+ create_video_encode_mem_cb,
+ scoped_refptr<base::SingleThreadTaskRunner> encoder_task_runner,
+ scoped_ptr<media::VideoEncodeAccelerator> vea) {
+ cast_environment->PostTask(
+ media::cast::CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(
+ &media::cast::ExternalVideoEncoder::OnCreateVideoEncodeAccelerator,
+ weak_ptr,
+ create_video_encode_mem_cb,
+ encoder_task_runner,
+ base::Passed(&vea)));
+}
+} // namespace
+
+namespace media {
+namespace cast {
+
+// Container for the associated data of a video frame being processed.
+struct EncodedFrameReturnData {
+ EncodedFrameReturnData(base::TimeTicks c_time,
+ VideoEncoder::FrameEncodedCallback callback) {
+ capture_time = c_time;
+ frame_encoded_callback = callback;
+ }
+ base::TimeTicks capture_time;
+ VideoEncoder::FrameEncodedCallback frame_encoded_callback;
+};
+
+// The ExternalVideoEncoder class can be deleted directly by cast, while
+// LocalVideoEncodeAcceleratorClient stays around long enough to properly shut
+// down the VideoEncodeAccelerator.
+class LocalVideoEncodeAcceleratorClient
+ : public VideoEncodeAccelerator::Client,
+ public base::RefCountedThreadSafe<LocalVideoEncodeAcceleratorClient> {
+ public:
+ LocalVideoEncodeAcceleratorClient(
+ scoped_refptr<CastEnvironment> cast_environment,
+ scoped_refptr<base::SingleThreadTaskRunner> encoder_task_runner,
+ scoped_ptr<media::VideoEncodeAccelerator> vea,
+ const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb,
+ const base::WeakPtr<ExternalVideoEncoder>& weak_owner)
+ : cast_environment_(cast_environment),
+ encoder_task_runner_(encoder_task_runner),
+ video_encode_accelerator_(vea.Pass()),
+ create_video_encode_memory_cb_(create_video_encode_mem_cb),
+ weak_owner_(weak_owner),
+ last_encoded_frame_id_(kStartFrameId),
+ key_frame_encountered_(false) {
+ DCHECK(encoder_task_runner_);
+ }
+
+ // Initialize the real HW encoder.
+ void Initialize(const VideoSenderConfig& video_config) {
+ DCHECK(encoder_task_runner_);
+ DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
+
+ VideoCodecProfile output_profile = media::VIDEO_CODEC_PROFILE_UNKNOWN;
+ switch (video_config.codec) {
+ case transport::kVp8:
+ output_profile = media::VP8PROFILE_MAIN;
+ break;
+ case transport::kH264:
+ output_profile = media::H264PROFILE_MAIN;
+ break;
+ case transport::kFakeSoftwareVideo:
+ NOTREACHED() << "Fake software video encoder cannot be external";
+ break;
+ case transport::kUnknownVideoCodec:
+ NOTREACHED() << "Video codec not specified";
+ break;
+ }
+ codec_ = video_config.codec;
+ max_frame_rate_ = video_config.max_frame_rate;
+
+ if (!video_encode_accelerator_->Initialize(
+ media::VideoFrame::I420,
+ gfx::Size(video_config.width, video_config.height),
+ output_profile,
+ video_config.start_bitrate,
+ this)) {
+ NotifyError(VideoEncodeAccelerator::kInvalidArgumentError);
+ return;
+ }
+
+ // Wait until shared memory is allocated to indicate that encoder is
+ // initialized.
+ }
+
+ // Free the HW.
+ void Destroy() {
+ DCHECK(encoder_task_runner_);
+ DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
+
+ video_encode_accelerator_.reset();
+ }
+
+ void SetBitRate(uint32 bit_rate) {
+ DCHECK(encoder_task_runner_);
+ DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
+
+ video_encode_accelerator_->RequestEncodingParametersChange(bit_rate,
+ max_frame_rate_);
+ }
+
+ void EncodeVideoFrame(
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& capture_time,
+ bool key_frame_requested,
+ const VideoEncoder::FrameEncodedCallback& frame_encoded_callback) {
+ DCHECK(encoder_task_runner_);
+ DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
+
+ encoded_frame_data_storage_.push_back(
+ EncodedFrameReturnData(capture_time, frame_encoded_callback));
+
+ // BitstreamBufferReady will be called once the encoder is done.
+ video_encode_accelerator_->Encode(video_frame, key_frame_requested);
+ }
+
+ protected:
+ virtual void NotifyError(VideoEncodeAccelerator::Error error) OVERRIDE {
+ DCHECK(encoder_task_runner_);
+ DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
+ VLOG(1) << "ExternalVideoEncoder NotifyError: " << error;
+
+ video_encode_accelerator_.reset();
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(&ExternalVideoEncoder::EncoderError, weak_owner_));
+ }
+
+ // Called to allocate the input and output buffers.
+ virtual void RequireBitstreamBuffers(unsigned int input_count,
+ const gfx::Size& input_coded_size,
+ size_t output_buffer_size) OVERRIDE {
+ DCHECK(encoder_task_runner_);
+ DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
+ DCHECK(video_encode_accelerator_);
+
+ for (size_t j = 0; j < kOutputBufferCount; ++j) {
+ create_video_encode_memory_cb_.Run(
+ output_buffer_size,
+ base::Bind(&LocalVideoEncodeAcceleratorClient::OnCreateSharedMemory,
+ this));
+ }
+ }
+
+ // Encoder has encoded a frame and it's available in one of out output
+ // buffers.
+ virtual void BitstreamBufferReady(int32 bitstream_buffer_id,
+ size_t payload_size,
+ bool key_frame) OVERRIDE {
+ DCHECK(encoder_task_runner_);
+ DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
+ if (bitstream_buffer_id < 0 ||
+ bitstream_buffer_id >= static_cast<int32>(output_buffers_.size())) {
+ NOTREACHED();
+ VLOG(1) << "BitstreamBufferReady(): invalid bitstream_buffer_id="
+ << bitstream_buffer_id;
+ NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
+ return;
+ }
+ base::SharedMemory* output_buffer = output_buffers_[bitstream_buffer_id];
+ if (payload_size > output_buffer->mapped_size()) {
+ NOTREACHED();
+ VLOG(1) << "BitstreamBufferReady(): invalid payload_size = "
+ << payload_size;
+ NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
+ return;
+ }
+ if (key_frame)
+ key_frame_encountered_ = true;
+ if (!key_frame_encountered_) {
+ // Do not send video until we have encountered the first key frame.
+ // Save the bitstream buffer in |stream_header_| to be sent later along
+ // with the first key frame.
+ stream_header_.append(static_cast<const char*>(output_buffer->memory()),
+ payload_size);
+ } else if (!encoded_frame_data_storage_.empty()) {
+ scoped_ptr<transport::EncodedFrame> encoded_frame(
+ new transport::EncodedFrame());
+ encoded_frame->dependency = key_frame ? transport::EncodedFrame::KEY :
+ transport::EncodedFrame::DEPENDENT;
+ encoded_frame->frame_id = ++last_encoded_frame_id_;
+ if (key_frame)
+ encoded_frame->referenced_frame_id = encoded_frame->frame_id;
+ else
+ encoded_frame->referenced_frame_id = encoded_frame->frame_id - 1;
+ encoded_frame->reference_time =
+ encoded_frame_data_storage_.front().capture_time;
+ encoded_frame->rtp_timestamp =
+ GetVideoRtpTimestamp(encoded_frame->reference_time);
+ if (!stream_header_.empty()) {
+ encoded_frame->data = stream_header_;
+ stream_header_.clear();
+ }
+ encoded_frame->data.append(
+ static_cast<const char*>(output_buffer->memory()), payload_size);
+
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(&LogFrameEncodedEvent,
+ cast_environment_,
+ cast_environment_->Clock()->NowTicks(),
+ encoded_frame->rtp_timestamp,
+ encoded_frame->frame_id));
+
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(encoded_frame_data_storage_.front().frame_encoded_callback,
+ base::Passed(&encoded_frame)));
+
+ encoded_frame_data_storage_.pop_front();
+ } else {
+ VLOG(1) << "BitstreamBufferReady(): no encoded frame data available";
+ }
+
+ // We need to re-add the output buffer to the encoder after we are done
+ // with it.
+ video_encode_accelerator_->UseOutputBitstreamBuffer(media::BitstreamBuffer(
+ bitstream_buffer_id,
+ output_buffers_[bitstream_buffer_id]->handle(),
+ output_buffers_[bitstream_buffer_id]->mapped_size()));
+ }
+
+ private:
+ // Note: This method can be called on any thread.
+ void OnCreateSharedMemory(scoped_ptr<base::SharedMemory> memory) {
+ encoder_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&LocalVideoEncodeAcceleratorClient::ReceivedSharedMemory,
+ this,
+ base::Passed(&memory)));
+ }
+
+ void ReceivedSharedMemory(scoped_ptr<base::SharedMemory> memory) {
+ DCHECK(encoder_task_runner_);
+ DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
+
+ output_buffers_.push_back(memory.release());
+
+ // Wait until all requested buffers are received.
+ if (output_buffers_.size() < kOutputBufferCount)
+ return;
+
+ // Immediately provide all output buffers to the VEA.
+ for (size_t i = 0; i < output_buffers_.size(); ++i) {
+ video_encode_accelerator_->UseOutputBitstreamBuffer(
+ media::BitstreamBuffer(static_cast<int32>(i),
+ output_buffers_[i]->handle(),
+ output_buffers_[i]->mapped_size()));
+ }
+
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(&ExternalVideoEncoder::EncoderInitialized, weak_owner_));
+ }
+
+ friend class base::RefCountedThreadSafe<LocalVideoEncodeAcceleratorClient>;
+
+ virtual ~LocalVideoEncodeAcceleratorClient() {}
+
+ const scoped_refptr<CastEnvironment> cast_environment_;
+ scoped_refptr<base::SingleThreadTaskRunner> encoder_task_runner_;
+ scoped_ptr<media::VideoEncodeAccelerator> video_encode_accelerator_;
+ const CreateVideoEncodeMemoryCallback create_video_encode_memory_cb_;
+ const base::WeakPtr<ExternalVideoEncoder> weak_owner_;
+ int max_frame_rate_;
+ transport::VideoCodec codec_;
+ uint32 last_encoded_frame_id_;
+ bool key_frame_encountered_;
+ std::string stream_header_;
+
+ // Shared memory buffers for output with the VideoAccelerator.
+ ScopedVector<base::SharedMemory> output_buffers_;
+
+ // FIFO list.
+ std::list<EncodedFrameReturnData> encoded_frame_data_storage_;
+
+ DISALLOW_COPY_AND_ASSIGN(LocalVideoEncodeAcceleratorClient);
+};
+
+ExternalVideoEncoder::ExternalVideoEncoder(
+ scoped_refptr<CastEnvironment> cast_environment,
+ const VideoSenderConfig& video_config,
+ const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
+ const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb)
+ : video_config_(video_config),
+ cast_environment_(cast_environment),
+ encoder_active_(false),
+ key_frame_requested_(false),
+ weak_factory_(this) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ create_vea_cb.Run(base::Bind(&ProxyCreateVideoEncodeAccelerator,
+ cast_environment,
+ weak_factory_.GetWeakPtr(),
+ create_video_encode_mem_cb));
+}
+
+ExternalVideoEncoder::~ExternalVideoEncoder() {
+ encoder_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&LocalVideoEncodeAcceleratorClient::Destroy,
+ video_accelerator_client_));
+}
+
+void ExternalVideoEncoder::EncoderInitialized() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ encoder_active_ = true;
+}
+
+void ExternalVideoEncoder::EncoderError() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ encoder_active_ = false;
+}
+
+void ExternalVideoEncoder::OnCreateVideoEncodeAccelerator(
+ const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb,
+ scoped_refptr<base::SingleThreadTaskRunner> encoder_task_runner,
+ scoped_ptr<media::VideoEncodeAccelerator> vea) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ encoder_task_runner_ = encoder_task_runner;
+
+ video_accelerator_client_ =
+ new LocalVideoEncodeAcceleratorClient(cast_environment_,
+ encoder_task_runner,
+ vea.Pass(),
+ create_video_encode_mem_cb,
+ weak_factory_.GetWeakPtr());
+ encoder_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&LocalVideoEncodeAcceleratorClient::Initialize,
+ video_accelerator_client_,
+ video_config_));
+}
+
+bool ExternalVideoEncoder::EncodeVideoFrame(
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& capture_time,
+ const FrameEncodedCallback& frame_encoded_callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ if (!encoder_active_)
+ return false;
+
+ encoder_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&LocalVideoEncodeAcceleratorClient::EncodeVideoFrame,
+ video_accelerator_client_,
+ video_frame,
+ capture_time,
+ key_frame_requested_,
+ frame_encoded_callback));
+
+ key_frame_requested_ = false;
+ return true;
+}
+
+// Inform the encoder about the new target bit rate.
+void ExternalVideoEncoder::SetBitRate(int new_bit_rate) {
+ if (!encoder_active_) {
+ // If we receive SetBitRate() before VEA creation callback is invoked,
+ // cache the new bit rate in the encoder config and use the new settings
+ // to initialize VEA.
+ video_config_.start_bitrate = new_bit_rate;
+ return;
+ }
+
+ encoder_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&LocalVideoEncodeAcceleratorClient::SetBitRate,
+ video_accelerator_client_,
+ new_bit_rate));
+}
+
+// Inform the encoder to encode the next frame as a key frame.
+void ExternalVideoEncoder::GenerateKeyFrame() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ key_frame_requested_ = true;
+}
+
+// Inform the encoder to only reference frames older or equal to frame_id;
+void ExternalVideoEncoder::LatestFrameIdToReference(uint32 /*frame_id*/) {
+ // Do nothing not supported.
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/video_sender/external_video_encoder.h b/chromium/media/cast/video_sender/external_video_encoder.h
new file mode 100644
index 00000000000..29fe0c5fcdb
--- /dev/null
+++ b/chromium/media/cast/video_sender/external_video_encoder.h
@@ -0,0 +1,86 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_VIDEO_SENDER_EXTERNAL_VIDEO_ENCODER_H_
+#define MEDIA_CAST_VIDEO_SENDER_EXTERNAL_VIDEO_ENCODER_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/video_sender/video_encoder.h"
+#include "media/video/video_encode_accelerator.h"
+
+namespace media {
+class VideoFrame;
+}
+
+namespace media {
+namespace cast {
+
+class LocalVideoEncodeAcceleratorClient;
+
+// This object is called external from the main cast thread and internally from
+// the video encoder thread.
+class ExternalVideoEncoder : public VideoEncoder {
+ public:
+ ExternalVideoEncoder(
+ scoped_refptr<CastEnvironment> cast_environment,
+ const VideoSenderConfig& video_config,
+ const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
+ const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb);
+
+ virtual ~ExternalVideoEncoder();
+
+ // Called from the main cast thread. This function post the encode task to the
+ // video encoder thread;
+ // The video_frame must be valid until the closure callback is called.
+ // The closure callback is called from the video encoder thread as soon as
+ // the encoder is done with the frame; it does not mean that the encoded frame
+ // has been sent out.
+ // Once the encoded frame is ready the frame_encoded_callback is called.
+ virtual bool EncodeVideoFrame(
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& capture_time,
+ const FrameEncodedCallback& frame_encoded_callback) OVERRIDE;
+
+ // The following functions are called from the main cast thread.
+ virtual void SetBitRate(int new_bit_rate) OVERRIDE;
+ virtual void GenerateKeyFrame() OVERRIDE;
+ virtual void LatestFrameIdToReference(uint32 frame_id) OVERRIDE;
+
+ // Called when a VEA is created.
+ void OnCreateVideoEncodeAccelerator(
+ const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb,
+ scoped_refptr<base::SingleThreadTaskRunner> encoder_task_runner,
+ scoped_ptr<media::VideoEncodeAccelerator> vea);
+
+ protected:
+ void EncoderInitialized();
+ void EncoderError();
+
+ private:
+ friend class LocalVideoEncodeAcceleratorClient;
+
+ VideoSenderConfig video_config_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+
+ bool encoder_active_;
+ bool key_frame_requested_;
+
+ scoped_refptr<LocalVideoEncodeAcceleratorClient> video_accelerator_client_;
+ scoped_refptr<base::SingleThreadTaskRunner> encoder_task_runner_;
+
+ // Weak pointer factory for posting back LocalVideoEncodeAcceleratorClient
+ // notifications to ExternalVideoEncoder.
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<ExternalVideoEncoder> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(ExternalVideoEncoder);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_VIDEO_SENDER_EXTERNAL_VIDEO_ENCODER_H_
diff --git a/chromium/media/cast/video_sender/external_video_encoder_unittest.cc b/chromium/media/cast/video_sender/external_video_encoder_unittest.cc
new file mode 100644
index 00000000000..853258ce30a
--- /dev/null
+++ b/chromium/media/cast/video_sender/external_video_encoder_unittest.cc
@@ -0,0 +1,191 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/video_frame.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "media/cast/test/fake_video_encode_accelerator.h"
+#include "media/cast/test/utility/video_utility.h"
+#include "media/cast/video_sender/external_video_encoder.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+using testing::_;
+
+namespace {
+
+void CreateVideoEncodeAccelerator(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ scoped_ptr<VideoEncodeAccelerator> fake_vea,
+ const ReceiveVideoEncodeAcceleratorCallback& callback) {
+ callback.Run(task_runner, fake_vea.Pass());
+}
+
+void CreateSharedMemory(
+ size_t size, const ReceiveVideoEncodeMemoryCallback& callback) {
+ scoped_ptr<base::SharedMemory> shm(new base::SharedMemory());
+ if (!shm->CreateAndMapAnonymous(size)) {
+ NOTREACHED();
+ return;
+ }
+ callback.Run(shm.Pass());
+}
+
+class TestVideoEncoderCallback
+ : public base::RefCountedThreadSafe<TestVideoEncoderCallback> {
+ public:
+ TestVideoEncoderCallback() {}
+
+ void SetExpectedResult(uint32 expected_frame_id,
+ uint32 expected_last_referenced_frame_id,
+ const base::TimeTicks& expected_capture_time) {
+ expected_frame_id_ = expected_frame_id;
+ expected_last_referenced_frame_id_ = expected_last_referenced_frame_id;
+ expected_capture_time_ = expected_capture_time;
+ }
+
+ void DeliverEncodedVideoFrame(
+ scoped_ptr<transport::EncodedFrame> encoded_frame) {
+ if (expected_frame_id_ == expected_last_referenced_frame_id_) {
+ EXPECT_EQ(transport::EncodedFrame::KEY, encoded_frame->dependency);
+ } else {
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT,
+ encoded_frame->dependency);
+ }
+ EXPECT_EQ(expected_frame_id_, encoded_frame->frame_id);
+ EXPECT_EQ(expected_last_referenced_frame_id_,
+ encoded_frame->referenced_frame_id);
+ EXPECT_EQ(expected_capture_time_, encoded_frame->reference_time);
+ }
+
+ protected:
+ virtual ~TestVideoEncoderCallback() {}
+
+ private:
+ friend class base::RefCountedThreadSafe<TestVideoEncoderCallback>;
+
+ bool expected_key_frame_;
+ uint32 expected_frame_id_;
+ uint32 expected_last_referenced_frame_id_;
+ base::TimeTicks expected_capture_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestVideoEncoderCallback);
+};
+} // namespace
+
+class ExternalVideoEncoderTest : public ::testing::Test {
+ protected:
+ ExternalVideoEncoderTest()
+ : test_video_encoder_callback_(new TestVideoEncoderCallback()) {
+ video_config_.rtp_config.ssrc = 1;
+ video_config_.incoming_feedback_ssrc = 2;
+ video_config_.rtp_config.payload_type = 127;
+ video_config_.use_external_encoder = true;
+ video_config_.width = 320;
+ video_config_.height = 240;
+ video_config_.max_bitrate = 5000000;
+ video_config_.min_bitrate = 1000000;
+ video_config_.start_bitrate = 2000000;
+ video_config_.max_qp = 56;
+ video_config_.min_qp = 0;
+ video_config_.max_frame_rate = 30;
+ video_config_.max_number_of_video_buffers_used = 3;
+ video_config_.codec = transport::kVp8;
+ gfx::Size size(video_config_.width, video_config_.height);
+ video_frame_ = media::VideoFrame::CreateFrame(
+ VideoFrame::I420, size, gfx::Rect(size), size, base::TimeDelta());
+ PopulateVideoFrame(video_frame_, 123);
+
+ testing_clock_ = new base::SimpleTestTickClock();
+ task_runner_ = new test::FakeSingleThreadTaskRunner(testing_clock_);
+ cast_environment_ =
+ new CastEnvironment(scoped_ptr<base::TickClock>(testing_clock_).Pass(),
+ task_runner_,
+ task_runner_,
+ task_runner_);
+
+ fake_vea_ = new test::FakeVideoEncodeAccelerator(task_runner_);
+ scoped_ptr<VideoEncodeAccelerator> fake_vea(fake_vea_);
+ video_encoder_.reset(
+ new ExternalVideoEncoder(cast_environment_,
+ video_config_,
+ base::Bind(&CreateVideoEncodeAccelerator,
+ task_runner_,
+ base::Passed(&fake_vea)),
+ base::Bind(&CreateSharedMemory)));
+ }
+
+ virtual ~ExternalVideoEncoderTest() {}
+
+ base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
+ test::FakeVideoEncodeAccelerator* fake_vea_; // Owned by video_encoder_.
+ scoped_refptr<TestVideoEncoderCallback> test_video_encoder_callback_;
+ VideoSenderConfig video_config_;
+ scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
+ scoped_ptr<VideoEncoder> video_encoder_;
+ scoped_refptr<media::VideoFrame> video_frame_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+
+ DISALLOW_COPY_AND_ASSIGN(ExternalVideoEncoderTest);
+};
+
+TEST_F(ExternalVideoEncoderTest, EncodePattern30fpsRunningOutOfAck) {
+ task_runner_->RunTasks(); // Run the initializer on the correct thread.
+
+ VideoEncoder::FrameEncodedCallback frame_encoded_callback =
+ base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
+ test_video_encoder_callback_.get());
+
+ base::TimeTicks capture_time;
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(0, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
+ video_frame_, capture_time, frame_encoded_callback));
+ task_runner_->RunTasks();
+
+ for (int i = 0; i < 6; ++i) {
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(i + 1, i, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
+ video_frame_, capture_time, frame_encoded_callback));
+ task_runner_->RunTasks();
+ }
+ // We need to run the task to cleanup the GPU instance.
+ video_encoder_.reset(NULL);
+ task_runner_->RunTasks();
+}
+
+TEST_F(ExternalVideoEncoderTest, StreamHeader) {
+ task_runner_->RunTasks(); // Run the initializer on the correct thread.
+
+ VideoEncoder::FrameEncodedCallback frame_encoded_callback =
+ base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
+ test_video_encoder_callback_.get());
+
+ // Force the FakeVideoEncodeAccelerator to return a dummy non-key frame first.
+ fake_vea_->SendDummyFrameForTesting(false);
+
+ // Verify the first returned bitstream buffer is still a key frame.
+ base::TimeTicks capture_time;
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(0, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
+ video_frame_, capture_time, frame_encoded_callback));
+ task_runner_->RunTasks();
+
+ // We need to run the task to cleanup the GPU instance.
+ video_encoder_.reset(NULL);
+ task_runner_->RunTasks();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/video_sender/fake_software_video_encoder.cc b/chromium/media/cast/video_sender/fake_software_video_encoder.cc
new file mode 100644
index 00000000000..7c5c9526419
--- /dev/null
+++ b/chromium/media/cast/video_sender/fake_software_video_encoder.cc
@@ -0,0 +1,69 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/video_sender/fake_software_video_encoder.h"
+
+#include "base/json/json_writer.h"
+#include "base/values.h"
+#include "media/cast/transport/cast_transport_config.h"
+
+#ifndef OFFICIAL_BUILD
+
+namespace media {
+namespace cast {
+
+FakeSoftwareVideoEncoder::FakeSoftwareVideoEncoder(
+ const VideoSenderConfig& video_config)
+ : video_config_(video_config),
+ next_frame_is_key_(true),
+ frame_id_(0),
+ frame_id_to_reference_(0),
+ frame_size_(0) {
+}
+
+FakeSoftwareVideoEncoder::~FakeSoftwareVideoEncoder() {}
+
+void FakeSoftwareVideoEncoder::Initialize() {}
+
+bool FakeSoftwareVideoEncoder::Encode(
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ transport::EncodedFrame* encoded_image) {
+ encoded_image->frame_id = frame_id_++;
+ if (next_frame_is_key_) {
+ encoded_image->dependency = transport::EncodedFrame::KEY;
+ encoded_image->referenced_frame_id = encoded_image->frame_id;
+ next_frame_is_key_ = false;
+ } else {
+ encoded_image->dependency = transport::EncodedFrame::DEPENDENT;
+ encoded_image->referenced_frame_id = encoded_image->frame_id - 1;
+ }
+
+ base::DictionaryValue values;
+ values.SetBoolean("key",
+ encoded_image->dependency == transport::EncodedFrame::KEY);
+ values.SetInteger("ref", encoded_image->referenced_frame_id);
+ values.SetInteger("id", encoded_image->frame_id);
+ values.SetInteger("size", frame_size_);
+ base::JSONWriter::Write(&values, &encoded_image->data);
+ encoded_image->data.resize(
+ std::max<size_t>(encoded_image->data.size(), frame_size_));
+ return true;
+}
+
+void FakeSoftwareVideoEncoder::UpdateRates(uint32 new_bitrate) {
+ frame_size_ = new_bitrate / video_config_.max_frame_rate / 8;
+}
+
+void FakeSoftwareVideoEncoder::GenerateKeyFrame() {
+ next_frame_is_key_ = true;
+}
+
+void FakeSoftwareVideoEncoder::LatestFrameIdToReference(uint32 frame_id) {
+ frame_id_to_reference_ = frame_id;
+}
+
+} // namespace cast
+} // namespace media
+
+#endif
diff --git a/chromium/media/cast/video_sender/fake_software_video_encoder.h b/chromium/media/cast/video_sender/fake_software_video_encoder.h
new file mode 100644
index 00000000000..0eb88ddfe17
--- /dev/null
+++ b/chromium/media/cast/video_sender/fake_software_video_encoder.h
@@ -0,0 +1,38 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_VIDEO_SENDER_FAKE_SOFTWARE_VIDEO_ENCODER_H_
+#define MEDIA_CAST_VIDEO_SENDER_FAKE_SOFTWARE_VIDEO_ENCODER_H_
+
+#include "media/cast/cast_config.h"
+#include "media/cast/video_sender/software_video_encoder.h"
+
+namespace media {
+namespace cast {
+
+class FakeSoftwareVideoEncoder : public SoftwareVideoEncoder {
+ public:
+ FakeSoftwareVideoEncoder(const VideoSenderConfig& video_config);
+ virtual ~FakeSoftwareVideoEncoder();
+
+ // SoftwareVideoEncoder implementations.
+ virtual void Initialize() OVERRIDE;
+ virtual bool Encode(const scoped_refptr<media::VideoFrame>& video_frame,
+ transport::EncodedFrame* encoded_image) OVERRIDE;
+ virtual void UpdateRates(uint32 new_bitrate) OVERRIDE;
+ virtual void GenerateKeyFrame() OVERRIDE;
+ virtual void LatestFrameIdToReference(uint32 frame_id) OVERRIDE;
+
+ private:
+ VideoSenderConfig video_config_;
+ bool next_frame_is_key_;
+ uint32 frame_id_;
+ uint32 frame_id_to_reference_;
+ int frame_size_;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_VIDEO_SENDER_FAKE_SOFTWARE_VIDEO_ENCODER_H_
diff --git a/chromium/media/cast/video_sender/mock_video_encoder_controller.cc b/chromium/media/cast/video_sender/mock_video_encoder_controller.cc
deleted file mode 100644
index 4f649aa44fe..00000000000
--- a/chromium/media/cast/video_sender/mock_video_encoder_controller.cc
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/video_sender/mock_video_encoder_controller.h"
-
-namespace media {
-namespace cast {
-
-MockVideoEncoderController::MockVideoEncoderController() {
-}
-
-MockVideoEncoderController::~MockVideoEncoderController() {
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/video_sender/mock_video_encoder_controller.h b/chromium/media/cast/video_sender/mock_video_encoder_controller.h
deleted file mode 100644
index cfc58a9eb8f..00000000000
--- a/chromium/media/cast/video_sender/mock_video_encoder_controller.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_VIDEO_SENDER_MOCK_VIDEO_ENCODER_CONTROLLER_H_
-#define MEDIA_CAST_VIDEO_SENDER_MOCK_VIDEO_ENCODER_CONTROLLER_H_
-
-#include "media/cast/cast_config.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-namespace cast {
-
-class MockVideoEncoderController : public VideoEncoderController {
- public:
- MockVideoEncoderController();
- virtual ~MockVideoEncoderController();
-
- MOCK_METHOD1(SetBitRate, void(int new_bit_rate));
-
- MOCK_METHOD1(SkipNextFrame, void(bool skip_next_frame));
-
- MOCK_METHOD0(GenerateKeyFrame, void());
-
- MOCK_METHOD1(LatestFrameIdToReference, void(uint32 frame_id));
-
- MOCK_CONST_METHOD0(NumberOfSkippedFrames, int());
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_VIDEO_SENDER_MOCK_VIDEO_ENCODER_CONTROLLER_H_
-
diff --git a/chromium/media/cast/video_sender/software_video_encoder.h b/chromium/media/cast/video_sender/software_video_encoder.h
new file mode 100644
index 00000000000..f1bf6f63316
--- /dev/null
+++ b/chromium/media/cast/video_sender/software_video_encoder.h
@@ -0,0 +1,46 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_VIDEO_SENDER_SOFTWARE_VIDEO_ENCODER_H_
+#define MEDIA_CAST_VIDEO_SENDER_SOFTWARE_VIDEO_ENCODER_H_
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+
+namespace media {
+class VideoFrame;
+}
+
+namespace media {
+namespace cast {
+namespace transport {
+struct EncodedFrame;
+} // namespace transport
+
+class SoftwareVideoEncoder {
+ public:
+ virtual ~SoftwareVideoEncoder() {}
+
+ // Initialize the encoder before Encode() can be called. This method
+ // must be called on the thread that Encode() is called.
+ virtual void Initialize() = 0;
+
+ // Encode a raw image (as a part of a video stream).
+ virtual bool Encode(const scoped_refptr<media::VideoFrame>& video_frame,
+ transport::EncodedFrame* encoded_image) = 0;
+
+ // Update the encoder with a new target bit rate.
+ virtual void UpdateRates(uint32 new_bitrate) = 0;
+
+ // Set the next frame to be a key frame.
+ virtual void GenerateKeyFrame() = 0;
+
+ // Set the last frame to reference.
+ virtual void LatestFrameIdToReference(uint32 frame_id) = 0;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_VIDEO_SENDER_SOFTWARE_VIDEO_ENCODER_H_
diff --git a/chromium/media/cast/video_sender/video_encoder.cc b/chromium/media/cast/video_sender/video_encoder.cc
deleted file mode 100644
index faa78d3a3e7..00000000000
--- a/chromium/media/cast/video_sender/video_encoder.cc
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/video_sender/video_encoder.h"
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
-#include "media/base/video_frame.h"
-#include "media/cast/cast_defines.h"
-
-namespace media {
-namespace cast {
-
-void LogFrameEncodedEvent(CastEnvironment* const cast_environment,
- const base::TimeTicks& capture_time) {
- cast_environment->Logging()->InsertFrameEvent(kVideoFrameEncoded,
- GetVideoRtpTimestamp(capture_time), kFrameIdUnknown);
-}
-
-VideoEncoder::VideoEncoder(scoped_refptr<CastEnvironment> cast_environment,
- const VideoSenderConfig& video_config,
- uint8 max_unacked_frames)
- : video_config_(video_config),
- cast_environment_(cast_environment),
- skip_next_frame_(false),
- skip_count_(0) {
- if (video_config.codec == kVp8) {
- vp8_encoder_.reset(new Vp8Encoder(video_config, max_unacked_frames));
- } else {
- DCHECK(false) << "Invalid config"; // Codec not supported.
- }
-
- dynamic_config_.key_frame_requested = false;
- dynamic_config_.latest_frame_id_to_reference = kStartFrameId;
- dynamic_config_.bit_rate = video_config.start_bitrate;
-}
-
-VideoEncoder::~VideoEncoder() {}
-
-bool VideoEncoder::EncodeVideoFrame(
- const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& capture_time,
- const FrameEncodedCallback& frame_encoded_callback) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- if (video_config_.codec != kVp8) return false;
-
- if (skip_next_frame_) {
- ++skip_count_;
- VLOG(1) << "Skip encoding frame";
- return false;
- }
-
- cast_environment_->Logging()->InsertFrameEvent(kVideoFrameSentToEncoder,
- GetVideoRtpTimestamp(capture_time), kFrameIdUnknown);
- cast_environment_->PostTask(CastEnvironment::VIDEO_ENCODER, FROM_HERE,
- base::Bind(&VideoEncoder::EncodeVideoFrameEncoderThread,
- base::Unretained(this), video_frame, capture_time,
- dynamic_config_, frame_encoded_callback));
-
- dynamic_config_.key_frame_requested = false;
- return true;
-}
-
-void VideoEncoder::EncodeVideoFrameEncoderThread(
- const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& capture_time,
- const CodecDynamicConfig& dynamic_config,
- const FrameEncodedCallback& frame_encoded_callback) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::VIDEO_ENCODER));
- if (dynamic_config.key_frame_requested) {
- vp8_encoder_->GenerateKeyFrame();
- }
- vp8_encoder_->LatestFrameIdToReference(
- dynamic_config.latest_frame_id_to_reference);
- vp8_encoder_->UpdateRates(dynamic_config.bit_rate);
-
- scoped_ptr<EncodedVideoFrame> encoded_frame(new EncodedVideoFrame());
- bool retval = vp8_encoder_->Encode(video_frame, encoded_frame.get());
-
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(LogFrameEncodedEvent, cast_environment_, capture_time));
-
- if (!retval) {
- VLOG(1) << "Encoding failed";
- return;
- }
- if (encoded_frame->data.size() <= 0) {
- VLOG(1) << "Encoding resulted in an empty frame";
- return;
- }
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(frame_encoded_callback,
- base::Passed(&encoded_frame), capture_time));
-}
-
-// Inform the encoder about the new target bit rate.
-void VideoEncoder::SetBitRate(int new_bit_rate) {
- dynamic_config_.bit_rate = new_bit_rate;
-}
-
-// Inform the encoder to not encode the next frame.
-void VideoEncoder::SkipNextFrame(bool skip_next_frame) {
- skip_next_frame_ = skip_next_frame;
-}
-
-// Inform the encoder to encode the next frame as a key frame.
-void VideoEncoder::GenerateKeyFrame() {
- dynamic_config_.key_frame_requested = true;
-}
-
-// Inform the encoder to only reference frames older or equal to frame_id;
-void VideoEncoder::LatestFrameIdToReference(uint32 frame_id) {
- dynamic_config_.latest_frame_id_to_reference = frame_id;
-}
-
-int VideoEncoder::NumberOfSkippedFrames() const {
- return skip_count_;
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/video_sender/video_encoder.h b/chromium/media/cast/video_sender/video_encoder.h
index 559dff16734..c7b1049ce67 100644
--- a/chromium/media/cast/video_sender/video_encoder.h
+++ b/chromium/media/cast/video_sender/video_encoder.h
@@ -5,76 +5,44 @@
#ifndef MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_H_
#define MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_H_
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
-#include "base/message_loop/message_loop.h"
+#include "base/time/time.h"
+#include "media/base/video_frame.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/video_sender/codecs/vp8/vp8_encoder.h"
-
-namespace media {
-class VideoFrame;
-}
namespace media {
namespace cast {
-// This object is called external from the main cast thread and internally from
-// the video encoder thread.
-class VideoEncoder : public VideoEncoderController {
+// All these functions are called from the main cast thread.
+class VideoEncoder {
public:
- typedef base::Callback<void(scoped_ptr<EncodedVideoFrame>,
- const base::TimeTicks&)> FrameEncodedCallback;
-
- VideoEncoder(scoped_refptr<CastEnvironment> cast_environment,
- const VideoSenderConfig& video_config,
- uint8 max_unacked_frames);
+ typedef base::Callback<void(scoped_ptr<transport::EncodedFrame>)>
+ FrameEncodedCallback;
- virtual ~VideoEncoder();
+ virtual ~VideoEncoder() {}
- // Called from the main cast thread. This function post the encode task to the
- // video encoder thread;
// The video_frame must be valid until the closure callback is called.
// The closure callback is called from the video encoder thread as soon as
// the encoder is done with the frame; it does not mean that the encoded frame
// has been sent out.
// Once the encoded frame is ready the frame_encoded_callback is called.
- bool EncodeVideoFrame(const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& capture_time,
- const FrameEncodedCallback& frame_encoded_callback);
-
- protected:
- struct CodecDynamicConfig {
- bool key_frame_requested;
- uint32 latest_frame_id_to_reference;
- int bit_rate;
- };
-
- // The actual encode, called from the video encoder thread.
- void EncodeVideoFrameEncoderThread(
+ virtual bool EncodeVideoFrame(
const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& capture_time,
- const CodecDynamicConfig& dynamic_config,
- const FrameEncodedCallback& frame_encoded_callback);
-
- // The following functions are called from the main cast thread.
- virtual void SetBitRate(int new_bit_rate) OVERRIDE;
- virtual void SkipNextFrame(bool skip_next_frame) OVERRIDE;
- virtual void GenerateKeyFrame() OVERRIDE;
- virtual void LatestFrameIdToReference(uint32 frame_id) OVERRIDE;
- virtual int NumberOfSkippedFrames() const OVERRIDE;
+ const FrameEncodedCallback& frame_encoded_callback) = 0;
- private:
- friend class base::RefCountedThreadSafe<VideoEncoder>;
+ // Inform the encoder about the new target bit rate.
+ virtual void SetBitRate(int new_bit_rate) = 0;
- const VideoSenderConfig video_config_;
- scoped_refptr<CastEnvironment> cast_environment_;
- scoped_ptr<Vp8Encoder> vp8_encoder_;
- CodecDynamicConfig dynamic_config_;
- bool skip_next_frame_;
- int skip_count_;
+ // Inform the encoder to encode the next frame as a key frame.
+ virtual void GenerateKeyFrame() = 0;
- DISALLOW_COPY_AND_ASSIGN(VideoEncoder);
+ // Inform the encoder to only reference frames older or equal to frame_id;
+ virtual void LatestFrameIdToReference(uint32 frame_id) = 0;
};
} // namespace cast
diff --git a/chromium/media/cast/video_sender/video_encoder_impl.cc b/chromium/media/cast/video_sender/video_encoder_impl.cc
new file mode 100644
index 00000000000..b90ef0f07e3
--- /dev/null
+++ b/chromium/media/cast/video_sender/video_encoder_impl.cc
@@ -0,0 +1,139 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/video_sender/video_encoder_impl.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "media/base/video_frame.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/video_sender/codecs/vp8/vp8_encoder.h"
+#include "media/cast/video_sender/fake_software_video_encoder.h"
+
+namespace media {
+namespace cast {
+
+namespace {
+
+typedef base::Callback<void(Vp8Encoder*)> PassEncoderCallback;
+
+void InitializeEncoderOnEncoderThread(
+ const scoped_refptr<CastEnvironment>& environment,
+ SoftwareVideoEncoder* encoder) {
+ DCHECK(environment->CurrentlyOn(CastEnvironment::VIDEO));
+ encoder->Initialize();
+}
+
+void EncodeVideoFrameOnEncoderThread(
+ scoped_refptr<CastEnvironment> environment,
+ SoftwareVideoEncoder* encoder,
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& capture_time,
+ const VideoEncoderImpl::CodecDynamicConfig& dynamic_config,
+ const VideoEncoderImpl::FrameEncodedCallback& frame_encoded_callback) {
+ DCHECK(environment->CurrentlyOn(CastEnvironment::VIDEO));
+ if (dynamic_config.key_frame_requested) {
+ encoder->GenerateKeyFrame();
+ }
+ encoder->LatestFrameIdToReference(
+ dynamic_config.latest_frame_id_to_reference);
+ encoder->UpdateRates(dynamic_config.bit_rate);
+
+ scoped_ptr<transport::EncodedFrame> encoded_frame(
+ new transport::EncodedFrame());
+ if (!encoder->Encode(video_frame, encoded_frame.get())) {
+ VLOG(1) << "Encoding failed";
+ return;
+ }
+ if (encoded_frame->data.empty()) {
+ VLOG(1) << "Encoding resulted in an empty frame";
+ return;
+ }
+ encoded_frame->rtp_timestamp = transport::GetVideoRtpTimestamp(capture_time);
+ encoded_frame->reference_time = capture_time;
+
+ environment->PostTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(
+ frame_encoded_callback, base::Passed(&encoded_frame)));
+}
+} // namespace
+
+VideoEncoderImpl::VideoEncoderImpl(
+ scoped_refptr<CastEnvironment> cast_environment,
+ const VideoSenderConfig& video_config,
+ int max_unacked_frames)
+ : video_config_(video_config),
+ cast_environment_(cast_environment) {
+ if (video_config.codec == transport::kVp8) {
+ encoder_.reset(new Vp8Encoder(video_config, max_unacked_frames));
+ cast_environment_->PostTask(CastEnvironment::VIDEO,
+ FROM_HERE,
+ base::Bind(&InitializeEncoderOnEncoderThread,
+ cast_environment,
+ encoder_.get()));
+#ifndef OFFICIAL_BUILD
+ } else if (video_config.codec == transport::kFakeSoftwareVideo) {
+ encoder_.reset(new FakeSoftwareVideoEncoder(video_config));
+#endif
+ } else {
+ DCHECK(false) << "Invalid config"; // Codec not supported.
+ }
+
+ dynamic_config_.key_frame_requested = false;
+ dynamic_config_.latest_frame_id_to_reference = kStartFrameId;
+ dynamic_config_.bit_rate = video_config.start_bitrate;
+}
+
+VideoEncoderImpl::~VideoEncoderImpl() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ if (encoder_) {
+ cast_environment_->PostTask(
+ CastEnvironment::VIDEO,
+ FROM_HERE,
+ base::Bind(&base::DeletePointer<SoftwareVideoEncoder>,
+ encoder_.release()));
+ }
+}
+
+bool VideoEncoderImpl::EncodeVideoFrame(
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& capture_time,
+ const FrameEncodedCallback& frame_encoded_callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ cast_environment_->PostTask(CastEnvironment::VIDEO,
+ FROM_HERE,
+ base::Bind(&EncodeVideoFrameOnEncoderThread,
+ cast_environment_,
+ encoder_.get(),
+ video_frame,
+ capture_time,
+ dynamic_config_,
+ frame_encoded_callback));
+
+ dynamic_config_.key_frame_requested = false;
+ return true;
+}
+
+// Inform the encoder about the new target bit rate.
+void VideoEncoderImpl::SetBitRate(int new_bit_rate) {
+ dynamic_config_.bit_rate = new_bit_rate;
+}
+
+// Inform the encoder to encode the next frame as a key frame.
+void VideoEncoderImpl::GenerateKeyFrame() {
+ dynamic_config_.key_frame_requested = true;
+}
+
+// Inform the encoder to only reference frames older or equal to frame_id;
+void VideoEncoderImpl::LatestFrameIdToReference(uint32 frame_id) {
+ dynamic_config_.latest_frame_id_to_reference = frame_id;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/video_sender/video_encoder_impl.h b/chromium/media/cast/video_sender/video_encoder_impl.h
new file mode 100644
index 00000000000..b34b440c935
--- /dev/null
+++ b/chromium/media/cast/video_sender/video_encoder_impl.h
@@ -0,0 +1,72 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_IMPL_H_
+#define MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_IMPL_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/video_sender/software_video_encoder.h"
+#include "media/cast/video_sender/video_encoder.h"
+
+namespace media {
+class VideoFrame;
+
+namespace cast {
+
+// This object is called external from the main cast thread and internally from
+// the video encoder thread.
+class VideoEncoderImpl : public VideoEncoder {
+ public:
+ struct CodecDynamicConfig {
+ bool key_frame_requested;
+ uint32 latest_frame_id_to_reference;
+ int bit_rate;
+ };
+
+ typedef base::Callback<void(scoped_ptr<transport::EncodedFrame>)>
+ FrameEncodedCallback;
+
+ VideoEncoderImpl(scoped_refptr<CastEnvironment> cast_environment,
+ const VideoSenderConfig& video_config,
+ int max_unacked_frames);
+
+ virtual ~VideoEncoderImpl();
+
+ // Called from the main cast thread. This function post the encode task to the
+ // video encoder thread;
+ // The video_frame must be valid until the closure callback is called.
+ // The closure callback is called from the video encoder thread as soon as
+ // the encoder is done with the frame; it does not mean that the encoded frame
+ // has been sent out.
+ // Once the encoded frame is ready the frame_encoded_callback is called.
+ virtual bool EncodeVideoFrame(
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& capture_time,
+ const FrameEncodedCallback& frame_encoded_callback) OVERRIDE;
+
+ // The following functions are called from the main cast thread.
+ virtual void SetBitRate(int new_bit_rate) OVERRIDE;
+ virtual void GenerateKeyFrame() OVERRIDE;
+ virtual void LatestFrameIdToReference(uint32 frame_id) OVERRIDE;
+
+ private:
+ const VideoSenderConfig video_config_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+ CodecDynamicConfig dynamic_config_;
+
+ // This member belongs to the video encoder thread. It must not be
+ // dereferenced on the main thread. We manage the lifetime of this member
+ // manually because it needs to be initialize, used and destroyed on the
+ // video encoder thread and video encoder thread can out-live the main thread.
+ scoped_ptr<SoftwareVideoEncoder> encoder_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoEncoderImpl);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_IMPL_H_
diff --git a/chromium/media/cast/video_sender/video_encoder_impl_unittest.cc b/chromium/media/cast/video_sender/video_encoder_impl_unittest.cc
new file mode 100644
index 00000000000..a60812304f2
--- /dev/null
+++ b/chromium/media/cast/video_sender/video_encoder_impl_unittest.cc
@@ -0,0 +1,260 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/video_frame.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "media/cast/test/utility/video_utility.h"
+#include "media/cast/video_sender/video_encoder_impl.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+using testing::_;
+
+namespace {
+class TestVideoEncoderCallback
+ : public base::RefCountedThreadSafe<TestVideoEncoderCallback> {
+ public:
+ TestVideoEncoderCallback() {}
+
+ void SetExpectedResult(uint32 expected_frame_id,
+ uint32 expected_last_referenced_frame_id,
+ const base::TimeTicks& expected_capture_time) {
+ expected_frame_id_ = expected_frame_id;
+ expected_last_referenced_frame_id_ = expected_last_referenced_frame_id;
+ expected_capture_time_ = expected_capture_time;
+ }
+
+ void DeliverEncodedVideoFrame(
+ scoped_ptr<transport::EncodedFrame> encoded_frame) {
+ if (expected_frame_id_ == expected_last_referenced_frame_id_) {
+ EXPECT_EQ(transport::EncodedFrame::KEY, encoded_frame->dependency);
+ } else {
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT,
+ encoded_frame->dependency);
+ }
+ EXPECT_EQ(expected_frame_id_, encoded_frame->frame_id);
+ EXPECT_EQ(expected_last_referenced_frame_id_,
+ encoded_frame->referenced_frame_id);
+ EXPECT_EQ(expected_capture_time_, encoded_frame->reference_time);
+ }
+
+ protected:
+ virtual ~TestVideoEncoderCallback() {}
+
+ private:
+ friend class base::RefCountedThreadSafe<TestVideoEncoderCallback>;
+
+ uint32 expected_frame_id_;
+ uint32 expected_last_referenced_frame_id_;
+ base::TimeTicks expected_capture_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestVideoEncoderCallback);
+};
+} // namespace
+
+class VideoEncoderImplTest : public ::testing::Test {
+ protected:
+ VideoEncoderImplTest()
+ : test_video_encoder_callback_(new TestVideoEncoderCallback()) {
+ video_config_.rtp_config.ssrc = 1;
+ video_config_.incoming_feedback_ssrc = 2;
+ video_config_.rtp_config.payload_type = 127;
+ video_config_.use_external_encoder = false;
+ video_config_.width = 320;
+ video_config_.height = 240;
+ video_config_.max_bitrate = 5000000;
+ video_config_.min_bitrate = 1000000;
+ video_config_.start_bitrate = 2000000;
+ video_config_.max_qp = 56;
+ video_config_.min_qp = 0;
+ video_config_.max_frame_rate = 30;
+ video_config_.max_number_of_video_buffers_used = 3;
+ video_config_.codec = transport::kVp8;
+ gfx::Size size(video_config_.width, video_config_.height);
+ video_frame_ = media::VideoFrame::CreateFrame(
+ VideoFrame::I420, size, gfx::Rect(size), size, base::TimeDelta());
+ PopulateVideoFrame(video_frame_, 123);
+ }
+
+ virtual ~VideoEncoderImplTest() {}
+
+ virtual void SetUp() OVERRIDE {
+ testing_clock_ = new base::SimpleTestTickClock();
+ task_runner_ = new test::FakeSingleThreadTaskRunner(testing_clock_);
+ cast_environment_ =
+ new CastEnvironment(scoped_ptr<base::TickClock>(testing_clock_).Pass(),
+ task_runner_,
+ task_runner_,
+ task_runner_);
+ }
+
+ virtual void TearDown() OVERRIDE {
+ video_encoder_.reset();
+ task_runner_->RunTasks();
+ }
+
+ void Configure(int max_unacked_frames) {
+ video_encoder_.reset(new VideoEncoderImpl(
+ cast_environment_, video_config_, max_unacked_frames));
+ }
+
+ base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
+ scoped_refptr<TestVideoEncoderCallback> test_video_encoder_callback_;
+ VideoSenderConfig video_config_;
+ scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
+ scoped_ptr<VideoEncoder> video_encoder_;
+ scoped_refptr<media::VideoFrame> video_frame_;
+
+ scoped_refptr<CastEnvironment> cast_environment_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoEncoderImplTest);
+};
+
+TEST_F(VideoEncoderImplTest, EncodePattern30fpsRunningOutOfAck) {
+ Configure(3);
+
+ VideoEncoder::FrameEncodedCallback frame_encoded_callback =
+ base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
+ test_video_encoder_callback_.get());
+
+ base::TimeTicks capture_time;
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(0, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
+ video_frame_, capture_time, frame_encoded_callback));
+ task_runner_->RunTasks();
+
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ video_encoder_->LatestFrameIdToReference(0);
+ test_video_encoder_callback_->SetExpectedResult(1, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
+ video_frame_, capture_time, frame_encoded_callback));
+ task_runner_->RunTasks();
+
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ video_encoder_->LatestFrameIdToReference(1);
+ test_video_encoder_callback_->SetExpectedResult(2, 1, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
+ video_frame_, capture_time, frame_encoded_callback));
+ task_runner_->RunTasks();
+
+ video_encoder_->LatestFrameIdToReference(2);
+
+ for (int i = 3; i < 6; ++i) {
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(i, 2, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
+ video_frame_, capture_time, frame_encoded_callback));
+ task_runner_->RunTasks();
+ }
+}
+
+// TODO(pwestin): Re-enabled after redesign the encoder to control number of
+// frames in flight.
+TEST_F(VideoEncoderImplTest, DISABLED_EncodePattern60fpsRunningOutOfAck) {
+ video_config_.max_number_of_video_buffers_used = 1;
+ Configure(6);
+
+ base::TimeTicks capture_time;
+ VideoEncoder::FrameEncodedCallback frame_encoded_callback =
+ base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
+ test_video_encoder_callback_.get());
+
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(0, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
+ video_frame_, capture_time, frame_encoded_callback));
+ task_runner_->RunTasks();
+
+ video_encoder_->LatestFrameIdToReference(0);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(1, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
+ video_frame_, capture_time, frame_encoded_callback));
+ task_runner_->RunTasks();
+
+ video_encoder_->LatestFrameIdToReference(1);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(2, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
+ video_frame_, capture_time, frame_encoded_callback));
+ task_runner_->RunTasks();
+
+ video_encoder_->LatestFrameIdToReference(2);
+
+ for (int i = 3; i < 9; ++i) {
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(i, 2, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
+ video_frame_, capture_time, frame_encoded_callback));
+ task_runner_->RunTasks();
+ }
+}
+
+// TODO(pwestin): Re-enabled after redesign the encoder to control number of
+// frames in flight.
+TEST_F(VideoEncoderImplTest,
+ DISABLED_EncodePattern60fps200msDelayRunningOutOfAck) {
+ Configure(12);
+
+ base::TimeTicks capture_time;
+ VideoEncoder::FrameEncodedCallback frame_encoded_callback =
+ base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
+ test_video_encoder_callback_.get());
+
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(0, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
+ video_frame_, capture_time, frame_encoded_callback));
+ task_runner_->RunTasks();
+
+ video_encoder_->LatestFrameIdToReference(0);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(1, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
+ video_frame_, capture_time, frame_encoded_callback));
+ task_runner_->RunTasks();
+
+ video_encoder_->LatestFrameIdToReference(1);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(2, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
+ video_frame_, capture_time, frame_encoded_callback));
+ task_runner_->RunTasks();
+
+ video_encoder_->LatestFrameIdToReference(2);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(3, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
+ video_frame_, capture_time, frame_encoded_callback));
+ task_runner_->RunTasks();
+
+ video_encoder_->LatestFrameIdToReference(3);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(4, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
+ video_frame_, capture_time, frame_encoded_callback));
+ task_runner_->RunTasks();
+
+ video_encoder_->LatestFrameIdToReference(4);
+
+ for (int i = 5; i < 17; ++i) {
+ test_video_encoder_callback_->SetExpectedResult(i, 4, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
+ video_frame_, capture_time, frame_encoded_callback));
+ task_runner_->RunTasks();
+ }
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/video_sender/video_encoder_unittest.cc b/chromium/media/cast/video_sender/video_encoder_unittest.cc
deleted file mode 100644
index b68b8364c43..00000000000
--- a/chromium/media/cast/video_sender/video_encoder_unittest.cc
+++ /dev/null
@@ -1,247 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <vector>
-
-#include "base/bind.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/base/video_frame.h"
-#include "media/cast/cast_defines.h"
-#include "media/cast/cast_environment.h"
-#include "media/cast/test/fake_task_runner.h"
-#include "media/cast/test/video_utility.h"
-#include "media/cast/video_sender/video_encoder.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-namespace cast {
-
-using testing::_;
-
-namespace {
-class TestVideoEncoderCallback :
- public base::RefCountedThreadSafe<TestVideoEncoderCallback> {
- public:
- TestVideoEncoderCallback() {}
-
- void SetExpectedResult(bool expected_key_frame,
- uint8 expected_frame_id,
- uint8 expected_last_referenced_frame_id,
- const base::TimeTicks& expected_capture_time) {
- expected_key_frame_ = expected_key_frame;
- expected_frame_id_ = expected_frame_id;
- expected_last_referenced_frame_id_ = expected_last_referenced_frame_id;
- expected_capture_time_ = expected_capture_time;
- }
-
- void DeliverEncodedVideoFrame(scoped_ptr<EncodedVideoFrame> encoded_frame,
- const base::TimeTicks& capture_time) {
- EXPECT_EQ(expected_key_frame_, encoded_frame->key_frame);
- EXPECT_EQ(expected_frame_id_, encoded_frame->frame_id);
- EXPECT_EQ(expected_last_referenced_frame_id_,
- encoded_frame->last_referenced_frame_id);
- EXPECT_EQ(expected_capture_time_, capture_time);
- }
-
- protected:
- virtual ~TestVideoEncoderCallback() {}
-
- private:
- friend class base::RefCountedThreadSafe<TestVideoEncoderCallback>;
-
- bool expected_key_frame_;
- uint8 expected_frame_id_;
- uint8 expected_last_referenced_frame_id_;
- base::TimeTicks expected_capture_time_;
-};
-} // namespace
-
-class VideoEncoderTest : public ::testing::Test {
- protected:
- VideoEncoderTest()
- : test_video_encoder_callback_(new TestVideoEncoderCallback()) {
- video_config_.sender_ssrc = 1;
- video_config_.incoming_feedback_ssrc = 2;
- video_config_.rtp_payload_type = 127;
- video_config_.use_external_encoder = false;
- video_config_.width = 320;
- video_config_.height = 240;
- video_config_.max_bitrate = 5000000;
- video_config_.min_bitrate = 1000000;
- video_config_.start_bitrate = 2000000;
- video_config_.max_qp = 56;
- video_config_.min_qp = 0;
- video_config_.max_frame_rate = 30;
- video_config_.max_number_of_video_buffers_used = 3;
- video_config_.codec = kVp8;
- gfx::Size size(video_config_.width, video_config_.height);
- video_frame_ = media::VideoFrame::CreateFrame(VideoFrame::I420,
- size, gfx::Rect(size), size, base::TimeDelta());
- PopulateVideoFrame(video_frame_, 123);
- }
-
- virtual ~VideoEncoderTest() {}
-
- virtual void SetUp() {
- task_runner_ = new test::FakeTaskRunner(&testing_clock_);
- cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
- task_runner_, task_runner_, task_runner_, task_runner_,
- GetDefaultCastLoggingConfig());
- }
-
- void Configure(uint8 max_unacked_frames) {
- video_encoder_.reset(new VideoEncoder(cast_environment_, video_config_,
- max_unacked_frames));
- video_encoder_controller_ = video_encoder_.get();
- }
-
- base::SimpleTestTickClock testing_clock_;
- scoped_refptr<TestVideoEncoderCallback> test_video_encoder_callback_;
- VideoSenderConfig video_config_;
- scoped_refptr<test::FakeTaskRunner> task_runner_;
- scoped_ptr<VideoEncoder> video_encoder_;
- VideoEncoderController* video_encoder_controller_;
- scoped_refptr<media::VideoFrame> video_frame_;
-
- scoped_refptr<CastEnvironment> cast_environment_;
-};
-
-TEST_F(VideoEncoderTest, EncodePattern30fpsRunningOutOfAck) {
- Configure(3);
-
- VideoEncoder::FrameEncodedCallback frame_encoded_callback =
- base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
- test_video_encoder_callback_.get());
-
- base::TimeTicks capture_time;
- capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
- frame_encoded_callback));
- task_runner_->RunTasks();
-
- capture_time += base::TimeDelta::FromMilliseconds(33);
- video_encoder_controller_->LatestFrameIdToReference(0);
- test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
- frame_encoded_callback));
- task_runner_->RunTasks();
-
- capture_time += base::TimeDelta::FromMilliseconds(33);
- video_encoder_controller_->LatestFrameIdToReference(1);
- test_video_encoder_callback_->SetExpectedResult(false, 2, 1, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
- frame_encoded_callback));
- task_runner_->RunTasks();
-
- video_encoder_controller_->LatestFrameIdToReference(2);
-
- for (int i = 3; i < 6; ++i) {
- capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, i, 2, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
- frame_encoded_callback));
- task_runner_->RunTasks();
- }
-}
-
-// TODO(pwestin): Re-enabled after redesign the encoder to control number of
-// frames in flight.
-TEST_F(VideoEncoderTest,DISABLED_EncodePattern60fpsRunningOutOfAck) {
- video_config_.max_number_of_video_buffers_used = 1;
- Configure(6);
-
- base::TimeTicks capture_time;
- VideoEncoder::FrameEncodedCallback frame_encoded_callback =
- base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
- test_video_encoder_callback_.get());
-
- capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
- frame_encoded_callback));
- task_runner_->RunTasks();
-
- video_encoder_controller_->LatestFrameIdToReference(0);
- capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
- frame_encoded_callback));
- task_runner_->RunTasks();
-
- video_encoder_controller_->LatestFrameIdToReference(1);
- capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, 2, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
- frame_encoded_callback));
- task_runner_->RunTasks();
-
- video_encoder_controller_->LatestFrameIdToReference(2);
-
- for (int i = 3; i < 9; ++i) {
- capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, i, 2, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
- frame_encoded_callback));
- task_runner_->RunTasks();
- }
-}
-
-// TODO(pwestin): Re-enabled after redesign the encoder to control number of
-// frames in flight.
-TEST_F(VideoEncoderTest, DISABLED_EncodePattern60fps200msDelayRunningOutOfAck) {
- Configure(12);
-
- base::TimeTicks capture_time;
- VideoEncoder::FrameEncodedCallback frame_encoded_callback =
- base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
- test_video_encoder_callback_.get());
-
- capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
- frame_encoded_callback));
- task_runner_->RunTasks();
-
- video_encoder_controller_->LatestFrameIdToReference(0);
- capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
- frame_encoded_callback));
- task_runner_->RunTasks();
-
- video_encoder_controller_->LatestFrameIdToReference(1);
- capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, 2, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
- frame_encoded_callback));
- task_runner_->RunTasks();
-
- video_encoder_controller_->LatestFrameIdToReference(2);
- capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, 3, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
- frame_encoded_callback));
- task_runner_->RunTasks();
-
- video_encoder_controller_->LatestFrameIdToReference(3);
- capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, 4, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
- frame_encoded_callback));
- task_runner_->RunTasks();
-
- video_encoder_controller_->LatestFrameIdToReference(4);
-
- for (int i = 5; i < 17; ++i) {
- test_video_encoder_callback_->SetExpectedResult(false, i, 4, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
- frame_encoded_callback));
- task_runner_->RunTasks();
- }
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/video_sender/video_sender.cc b/chromium/media/cast/video_sender/video_sender.cc
index 7391fe8e645..cf050b7f10c 100644
--- a/chromium/media/cast/video_sender/video_sender.cc
+++ b/chromium/media/cast/video_sender/video_sender.cc
@@ -4,451 +4,395 @@
#include "media/cast/video_sender/video_sender.h"
-#include <list>
+#include <algorithm>
+#include <cstring>
#include "base/bind.h"
+#include "base/debug/trace_event.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
-#include "crypto/encryptor.h"
-#include "crypto/symmetric_key.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/net/pacing/paced_sender.h"
-#include "media/cast/video_sender/video_encoder.h"
+#include "media/cast/rtcp/rtcp_defines.h"
+#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/video_sender/external_video_encoder.h"
+#include "media/cast/video_sender/video_encoder_impl.h"
namespace media {
namespace cast {
-const int64 kMinSchedulingDelayMs = 1;
-
-class LocalRtcpVideoSenderFeedback : public RtcpSenderFeedback {
- public:
- explicit LocalRtcpVideoSenderFeedback(VideoSender* video_sender)
- : video_sender_(video_sender) {
- }
-
- virtual void OnReceivedCastFeedback(
- const RtcpCastMessage& cast_feedback) OVERRIDE {
- video_sender_->OnReceivedCastFeedback(cast_feedback);
- }
-
- private:
- VideoSender* video_sender_;
-};
-
-class LocalRtpVideoSenderStatistics : public RtpSenderStatistics {
- public:
- explicit LocalRtpVideoSenderStatistics(RtpSender* rtp_sender)
- : rtp_sender_(rtp_sender) {
- }
-
- virtual void GetStatistics(const base::TimeTicks& now,
- RtcpSenderInfo* sender_info) OVERRIDE {
- rtp_sender_->RtpStatistics(now, sender_info);
- }
-
- private:
- RtpSender* rtp_sender_;
-};
+const int kNumAggressiveReportsSentAtStart = 100;
+const int kMinSchedulingDelayMs = 1;
VideoSender::VideoSender(
scoped_refptr<CastEnvironment> cast_environment,
const VideoSenderConfig& video_config,
- VideoEncoderController* const video_encoder_controller,
- PacedPacketSender* const paced_packet_sender)
- : rtp_max_delay_(
- base::TimeDelta::FromMilliseconds(video_config.rtp_max_delay_ms)),
- max_frame_rate_(video_config.max_frame_rate),
- cast_environment_(cast_environment),
- rtcp_feedback_(new LocalRtcpVideoSenderFeedback(this)),
- rtp_sender_(new RtpSender(cast_environment, NULL, &video_config,
- paced_packet_sender)),
- last_acked_frame_id_(-1),
- last_sent_frame_id_(-1),
- duplicate_ack_(0),
- last_skip_count_(0),
+ const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
+ const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb,
+ transport::CastTransportSender* const transport_sender)
+ : cast_environment_(cast_environment),
+ target_playout_delay_(base::TimeDelta::FromMilliseconds(
+ video_config.rtp_config.max_delay_ms)),
+ transport_sender_(transport_sender),
+ max_unacked_frames_(
+ std::min(kMaxUnackedFrames,
+ 1 + static_cast<int>(target_playout_delay_ *
+ video_config.max_frame_rate /
+ base::TimeDelta::FromSeconds(1)))),
+ rtcp_(cast_environment_,
+ this,
+ transport_sender_,
+ NULL, // paced sender.
+ NULL,
+ video_config.rtcp_mode,
+ base::TimeDelta::FromMilliseconds(video_config.rtcp_interval),
+ video_config.rtp_config.ssrc,
+ video_config.incoming_feedback_ssrc,
+ video_config.rtcp_c_name,
+ VIDEO_EVENT),
+ rtp_timestamp_helper_(kVideoFrequency),
+ num_aggressive_rtcp_reports_sent_(0),
+ frames_in_encoder_(0),
+ last_sent_frame_id_(0),
+ latest_acked_frame_id_(0),
+ duplicate_ack_counter_(0),
congestion_control_(cast_environment->Clock(),
- video_config.congestion_control_back_off,
video_config.max_bitrate,
video_config.min_bitrate,
- video_config.start_bitrate),
- initialized_(false),
+ max_unacked_frames_),
+ cast_initialization_status_(STATUS_VIDEO_UNINITIALIZED),
weak_factory_(this) {
- max_unacked_frames_ = static_cast<uint8>(video_config.rtp_max_delay_ms *
- video_config.max_frame_rate / 1000) + 1;
- VLOG(1) << "max_unacked_frames " << static_cast<int>(max_unacked_frames_);
- DCHECK_GT(max_unacked_frames_, 0) << "Invalid argument";
-
- rtp_video_sender_statistics_.reset(
- new LocalRtpVideoSenderStatistics(rtp_sender_.get()));
+ VLOG(1) << "max_unacked_frames " << max_unacked_frames_;
+ DCHECK_GT(max_unacked_frames_, 0);
if (video_config.use_external_encoder) {
- DCHECK(video_encoder_controller) << "Invalid argument";
- video_encoder_controller_ = video_encoder_controller;
+ video_encoder_.reset(new ExternalVideoEncoder(cast_environment,
+ video_config,
+ create_vea_cb,
+ create_video_encode_mem_cb));
} else {
- video_encoder_.reset(new VideoEncoder(cast_environment, video_config,
- max_unacked_frames_));
- video_encoder_controller_ = video_encoder_.get();
+ video_encoder_.reset(new VideoEncoderImpl(
+ cast_environment, video_config, max_unacked_frames_));
}
+ cast_initialization_status_ = STATUS_VIDEO_INITIALIZED;
- if (video_config.aes_iv_mask.size() == kAesKeySize &&
- video_config.aes_key.size() == kAesKeySize) {
- iv_mask_ = video_config.aes_iv_mask;
- crypto::SymmetricKey* key = crypto::SymmetricKey::Import(
- crypto::SymmetricKey::AES, video_config.aes_key);
- encryptor_.reset(new crypto::Encryptor());
- encryptor_->Init(key, crypto::Encryptor::CTR, std::string());
- } else if (video_config.aes_iv_mask.size() != 0 ||
- video_config.aes_key.size() != 0) {
- DCHECK(false) << "Invalid crypto configuration";
- }
+ media::cast::transport::CastTransportVideoConfig transport_config;
+ transport_config.codec = video_config.codec;
+ transport_config.rtp.config = video_config.rtp_config;
+ transport_config.rtp.max_outstanding_frames = max_unacked_frames_;
+ transport_sender_->InitializeVideo(transport_config);
- rtcp_.reset(new Rtcp(
- cast_environment_,
- rtcp_feedback_.get(),
- paced_packet_sender,
- rtp_video_sender_statistics_.get(),
- NULL,
- video_config.rtcp_mode,
- base::TimeDelta::FromMilliseconds(video_config.rtcp_interval),
- video_config.sender_ssrc,
- video_config.incoming_feedback_ssrc,
- video_config.rtcp_c_name));
-}
+ rtcp_.SetCastReceiverEventHistorySize(kReceiverRtcpEventHistorySize);
-VideoSender::~VideoSender() {}
+ memset(frame_id_to_rtp_timestamp_, 0, sizeof(frame_id_to_rtp_timestamp_));
+}
-void VideoSender::InitializeTimers() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- if (!initialized_) {
- initialized_ = true;
- ScheduleNextRtcpReport();
- ScheduleNextResendCheck();
- ScheduleNextSkippedFramesCheck();
- }
+VideoSender::~VideoSender() {
}
void VideoSender::InsertRawVideoFrame(
const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& capture_time) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ if (cast_initialization_status_ != STATUS_VIDEO_INITIALIZED) {
+ NOTREACHED();
+ return;
+ }
DCHECK(video_encoder_.get()) << "Invalid state";
- cast_environment_->Logging()->InsertFrameEvent(kVideoFrameReceived,
- GetVideoRtpTimestamp(capture_time), kFrameIdUnknown);
- if (!video_encoder_->EncodeVideoFrame(video_frame, capture_time,
- base::Bind(&VideoSender::SendEncodedVideoFrameMainThread,
- weak_factory_.GetWeakPtr()))) {
+ RtpTimestamp rtp_timestamp = GetVideoRtpTimestamp(capture_time);
+ cast_environment_->Logging()->InsertFrameEvent(
+ capture_time, FRAME_CAPTURE_BEGIN, VIDEO_EVENT,
+ rtp_timestamp, kFrameIdUnknown);
+ cast_environment_->Logging()->InsertFrameEvent(
+ cast_environment_->Clock()->NowTicks(),
+ FRAME_CAPTURE_END, VIDEO_EVENT,
+ rtp_timestamp,
+ kFrameIdUnknown);
+
+ // Used by chrome/browser/extension/api/cast_streaming/performance_test.cc
+ TRACE_EVENT_INSTANT2(
+ "cast_perf_test", "InsertRawVideoFrame",
+ TRACE_EVENT_SCOPE_THREAD,
+ "timestamp", capture_time.ToInternalValue(),
+ "rtp_timestamp", rtp_timestamp);
+
+ if (AreTooManyFramesInFlight()) {
+ VLOG(1) << "Dropping frame due to too many frames currently in-flight.";
+ return;
}
-}
-
-void VideoSender::InsertCodedVideoFrame(const EncodedVideoFrame* encoded_frame,
- const base::TimeTicks& capture_time,
- const base::Closure callback) {
- DCHECK(!video_encoder_.get()) << "Invalid state";
- DCHECK(encoded_frame) << "Invalid argument";
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
-
- SendEncodedVideoFrame(encoded_frame, capture_time);
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
-}
-void VideoSender::SendEncodedVideoFrameMainThread(
- scoped_ptr<EncodedVideoFrame> video_frame,
- const base::TimeTicks& capture_time) {
- SendEncodedVideoFrame(video_frame.get(), capture_time);
-}
+ uint32 bitrate = congestion_control_.GetBitrate(
+ capture_time + target_playout_delay_, target_playout_delay_);
-bool VideoSender::EncryptVideoFrame(const EncodedVideoFrame& video_frame,
- EncodedVideoFrame* encrypted_frame) {
- DCHECK(encryptor_) << "Invalid state";
+ video_encoder_->SetBitRate(bitrate);
- if (!encryptor_->SetCounter(GetAesNonce(video_frame.frame_id, iv_mask_))) {
- NOTREACHED() << "Failed to set counter";
- return false;
- }
-
- if (!encryptor_->Encrypt(video_frame.data, &encrypted_frame->data)) {
- NOTREACHED() << "Encrypt error";
- return false;
+ if (video_encoder_->EncodeVideoFrame(
+ video_frame,
+ capture_time,
+ base::Bind(&VideoSender::SendEncodedVideoFrame,
+ weak_factory_.GetWeakPtr(),
+ bitrate))) {
+ frames_in_encoder_++;
+ } else {
+ VLOG(1) << "Encoder rejected a frame. Skipping...";
}
- encrypted_frame->codec = video_frame.codec;
- encrypted_frame->key_frame = video_frame.key_frame;
- encrypted_frame->frame_id = video_frame.frame_id;
- encrypted_frame->last_referenced_frame_id =
- video_frame.last_referenced_frame_id;
- return true;
}
-void VideoSender::SendEncodedVideoFrame(const EncodedVideoFrame* encoded_frame,
- const base::TimeTicks& capture_time) {
+void VideoSender::SendEncodedVideoFrame(
+ int requested_bitrate_before_encode,
+ scoped_ptr<transport::EncodedFrame> encoded_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- last_send_time_ = cast_environment_->Clock()->NowTicks();
- if (encryptor_) {
- EncodedVideoFrame encrypted_video_frame;
+ DCHECK_GT(frames_in_encoder_, 0);
+ frames_in_encoder_--;
- if (!EncryptVideoFrame(*encoded_frame, &encrypted_video_frame)) {
- // Logging already done.
- return;
- }
- rtp_sender_->IncomingEncodedVideoFrame(&encrypted_video_frame,
- capture_time);
- } else {
- rtp_sender_->IncomingEncodedVideoFrame(encoded_frame, capture_time);
+ const uint32 frame_id = encoded_frame->frame_id;
+
+ const bool is_first_frame_to_be_sent = last_send_time_.is_null();
+ last_send_time_ = cast_environment_->Clock()->NowTicks();
+ last_sent_frame_id_ = frame_id;
+ // If this is the first frame about to be sent, fake the value of
+ // |latest_acked_frame_id_| to indicate the receiver starts out all caught up.
+ // Also, schedule the periodic frame re-send checks.
+ if (is_first_frame_to_be_sent) {
+ latest_acked_frame_id_ = frame_id - 1;
+ ScheduleNextResendCheck();
}
- if (encoded_frame->key_frame) {
- VLOG(1) << "Send encoded key frame; frame_id:"
- << static_cast<int>(encoded_frame->frame_id);
+
+ VLOG_IF(1, encoded_frame->dependency == transport::EncodedFrame::KEY)
+ << "Send encoded key frame; frame_id: " << frame_id;
+
+ cast_environment_->Logging()->InsertEncodedFrameEvent(
+ last_send_time_, FRAME_ENCODED, VIDEO_EVENT, encoded_frame->rtp_timestamp,
+ frame_id, static_cast<int>(encoded_frame->data.size()),
+ encoded_frame->dependency == transport::EncodedFrame::KEY,
+ requested_bitrate_before_encode);
+ // Only use lowest 8 bits as key.
+ frame_id_to_rtp_timestamp_[frame_id & 0xff] = encoded_frame->rtp_timestamp;
+
+ // Used by chrome/browser/extension/api/cast_streaming/performance_test.cc
+ TRACE_EVENT_INSTANT1(
+ "cast_perf_test", "VideoFrameEncoded",
+ TRACE_EVENT_SCOPE_THREAD,
+ "rtp_timestamp", encoded_frame->rtp_timestamp);
+
+ DCHECK(!encoded_frame->reference_time.is_null());
+ rtp_timestamp_helper_.StoreLatestTime(encoded_frame->reference_time,
+ encoded_frame->rtp_timestamp);
+
+ // At the start of the session, it's important to send reports before each
+ // frame so that the receiver can properly compute playout times. The reason
+ // more than one report is sent is because transmission is not guaranteed,
+ // only best effort, so send enough that one should almost certainly get
+ // through.
+ if (num_aggressive_rtcp_reports_sent_ < kNumAggressiveReportsSentAtStart) {
+ // SendRtcpReport() will schedule future reports to be made if this is the
+ // last "aggressive report."
+ ++num_aggressive_rtcp_reports_sent_;
+ const bool is_last_aggressive_report =
+ (num_aggressive_rtcp_reports_sent_ == kNumAggressiveReportsSentAtStart);
+ VLOG_IF(1, is_last_aggressive_report) << "Sending last aggressive report.";
+ SendRtcpReport(is_last_aggressive_report);
}
- last_sent_frame_id_ = static_cast<int>(encoded_frame->frame_id);
- UpdateFramesInFlight();
- InitializeTimers();
+
+ congestion_control_.SendFrameToTransport(
+ frame_id, encoded_frame->data.size() * 8, last_send_time_);
+
+ transport_sender_->InsertCodedVideoFrame(*encoded_frame);
}
-void VideoSender::IncomingRtcpPacket(const uint8* packet, size_t length,
- const base::Closure callback) {
+void VideoSender::IncomingRtcpPacket(scoped_ptr<Packet> packet) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- rtcp_->IncomingRtcpPacket(packet, length);
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
+ rtcp_.IncomingRtcpPacket(&packet->front(), packet->size());
}
void VideoSender::ScheduleNextRtcpReport() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- base::TimeDelta time_to_next = rtcp_->TimeToSendNextRtcpReport() -
- cast_environment_->Clock()->NowTicks();
-
- time_to_next = std::max(time_to_next,
- base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
-
- cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&VideoSender::SendRtcpReport, weak_factory_.GetWeakPtr()),
- time_to_next);
+ base::TimeDelta time_to_next = rtcp_.TimeToSendNextRtcpReport() -
+ cast_environment_->Clock()->NowTicks();
+
+ time_to_next = std::max(
+ time_to_next, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+
+ cast_environment_->PostDelayedTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(&VideoSender::SendRtcpReport,
+ weak_factory_.GetWeakPtr(),
+ true),
+ time_to_next);
}
-void VideoSender::SendRtcpReport() {
+void VideoSender::SendRtcpReport(bool schedule_future_reports) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
-
- RtcpSenderLogMessage sender_log_message;
- const FrameRawMap& frame_raw_map =
- cast_environment_->Logging()->GetFrameRawData();
-
- FrameRawMap::const_iterator it = frame_raw_map.begin();
- while (it != frame_raw_map.end()) {
- RtcpSenderFrameLogMessage frame_message;
- frame_message.rtp_timestamp = it->first;
- frame_message.frame_status = kRtcpSenderFrameStatusUnknown;
- if (it->second.type.empty()) {
- ++it;
- continue;
- }
- CastLoggingEvent last_event = it->second.type.back();
- switch (last_event) {
- case kVideoFrameCaptured:
- frame_message.frame_status = kRtcpSenderFrameStatusDroppedByFlowControl;
- break;
- case kVideoFrameSentToEncoder:
- frame_message.frame_status = kRtcpSenderFrameStatusDroppedByEncoder;
- break;
- case kVideoFrameEncoded:
- frame_message.frame_status = kRtcpSenderFrameStatusSentToNetwork;
- break;
- default:
- ++it;
- continue;
- }
- ++it;
- if (it == frame_raw_map.end()) {
- // Last message on our map; only send if it is kVideoFrameEncoded.
- if (last_event != kVideoFrameEncoded) {
- // For other events we will wait for it to finish and report the result
- // in the next report.
- break;
- }
- }
- sender_log_message.push_back(frame_message);
- }
- rtcp_->SendRtcpFromRtpSender(&sender_log_message);
- if (!sender_log_message.empty()) {
- VLOG(1) << "Failed to send all log messages";
+ const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ uint32 now_as_rtp_timestamp = 0;
+ if (rtp_timestamp_helper_.GetCurrentTimeAsRtpTimestamp(
+ now, &now_as_rtp_timestamp)) {
+ rtcp_.SendRtcpFromRtpSender(now, now_as_rtp_timestamp);
+ } else {
+ // |rtp_timestamp_helper_| should have stored a mapping by this point.
+ NOTREACHED();
}
-
- // TODO(pwestin): When we start pulling out the logging by other means we need
- // to synchronize this.
- cast_environment_->Logging()->Reset();
- ScheduleNextRtcpReport();
+ if (schedule_future_reports)
+ ScheduleNextRtcpReport();
}
void VideoSender::ScheduleNextResendCheck() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- base::TimeDelta time_to_next;
- if (last_send_time_.is_null()) {
- time_to_next = rtp_max_delay_;
- } else {
- time_to_next = last_send_time_ - cast_environment_->Clock()->NowTicks() +
- rtp_max_delay_;
- }
- time_to_next = std::max(time_to_next,
- base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
-
- cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
+ DCHECK(!last_send_time_.is_null());
+ base::TimeDelta time_to_next =
+ last_send_time_ - cast_environment_->Clock()->NowTicks() +
+ target_playout_delay_;
+ time_to_next = std::max(
+ time_to_next, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+ cast_environment_->PostDelayedTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
base::Bind(&VideoSender::ResendCheck, weak_factory_.GetWeakPtr()),
- time_to_next);
+ time_to_next);
}
void VideoSender::ResendCheck() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- if (!last_send_time_.is_null() && last_sent_frame_id_ != -1) {
- base::TimeDelta time_since_last_send =
- cast_environment_->Clock()->NowTicks() - last_send_time_;
- if (time_since_last_send > rtp_max_delay_) {
- if (last_acked_frame_id_ == -1) {
- // We have not received any ack, send a key frame.
- video_encoder_controller_->GenerateKeyFrame();
- last_acked_frame_id_ = -1;
- last_sent_frame_id_ = -1;
- UpdateFramesInFlight();
- } else {
- DCHECK_LE(0, last_acked_frame_id_);
-
- uint32 frame_id = static_cast<uint32>(last_acked_frame_id_ + 1);
- VLOG(1) << "ACK timeout resend frame:" << static_cast<int>(frame_id);
- ResendFrame(frame_id);
- }
+ DCHECK(!last_send_time_.is_null());
+ const base::TimeDelta time_since_last_send =
+ cast_environment_->Clock()->NowTicks() - last_send_time_;
+ if (time_since_last_send > target_playout_delay_) {
+ if (latest_acked_frame_id_ == last_sent_frame_id_) {
+ // Last frame acked, no point in doing anything
+ } else {
+ VLOG(1) << "ACK timeout; last acked frame: " << latest_acked_frame_id_;
+ ResendForKickstart();
}
}
ScheduleNextResendCheck();
}
-void VideoSender::ScheduleNextSkippedFramesCheck() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- base::TimeDelta time_to_next;
- if (last_checked_skip_count_time_.is_null()) {
- time_to_next =
- base::TimeDelta::FromMilliseconds(kSkippedFramesCheckPeriodkMs);
- } else {
- time_to_next = last_checked_skip_count_time_ -
- cast_environment_->Clock()->NowTicks() +
- base::TimeDelta::FromMilliseconds(kSkippedFramesCheckPeriodkMs);
- }
- time_to_next = std::max(time_to_next,
- base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
-
- cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&VideoSender::SkippedFramesCheck, weak_factory_.GetWeakPtr()),
- time_to_next);
-}
-
-void VideoSender::SkippedFramesCheck() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- int skip_count = video_encoder_controller_->NumberOfSkippedFrames();
- if (skip_count - last_skip_count_ >
- kSkippedFramesThreshold * max_frame_rate_) {
- // TODO(pwestin): Propagate this up to the application.
- }
- last_skip_count_ = skip_count;
- last_checked_skip_count_time_ = cast_environment_->Clock()->NowTicks();
- ScheduleNextSkippedFramesCheck();
-}
-
void VideoSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
base::TimeDelta rtt;
base::TimeDelta avg_rtt;
base::TimeDelta min_rtt;
base::TimeDelta max_rtt;
+ if (rtcp_.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt)) {
+ congestion_control_.UpdateRtt(rtt);
- if (rtcp_->Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt)) {
- cast_environment_->Logging()->InsertGenericEvent(kRttMs,
- rtt.InMilliseconds());
// Don't use a RTT lower than our average.
rtt = std::max(rtt, avg_rtt);
+
+ // Having the RTT values implies the receiver sent back a receiver report
+ // based on it having received a report from here. Therefore, ensure this
+ // sender stops aggressively sending reports.
+ if (num_aggressive_rtcp_reports_sent_ < kNumAggressiveReportsSentAtStart) {
+ VLOG(1) << "No longer a need to send reports aggressively (sent "
+ << num_aggressive_rtcp_reports_sent_ << ").";
+ num_aggressive_rtcp_reports_sent_ = kNumAggressiveReportsSentAtStart;
+ ScheduleNextRtcpReport();
+ }
} else {
// We have no measured value use default.
rtt = base::TimeDelta::FromMilliseconds(kStartRttMs);
}
+
+ if (last_send_time_.is_null())
+ return; // Cannot get an ACK without having first sent a frame.
+
if (cast_feedback.missing_frames_and_packets_.empty()) {
- // No lost packets.
- int resend_frame = -1;
- if (last_sent_frame_id_ == -1) return;
-
- video_encoder_controller_->LatestFrameIdToReference(
- cast_feedback.ack_frame_id_);
-
- if (static_cast<uint32>(last_acked_frame_id_ + 1) ==
- cast_feedback.ack_frame_id_) {
- uint32 new_bitrate = 0;
- if (congestion_control_.OnAck(rtt, &new_bitrate)) {
- video_encoder_controller_->SetBitRate(new_bitrate);
- }
- }
- if (static_cast<uint32>(last_acked_frame_id_) == cast_feedback.ack_frame_id_
- // We only count duplicate ACKs when we have sent newer frames.
- && IsNewerFrameId(last_sent_frame_id_, last_acked_frame_id_)) {
- duplicate_ack_++;
+ video_encoder_->LatestFrameIdToReference(cast_feedback.ack_frame_id_);
+
+ // We only count duplicate ACKs when we have sent newer frames.
+ if (latest_acked_frame_id_ == cast_feedback.ack_frame_id_ &&
+ latest_acked_frame_id_ != last_sent_frame_id_) {
+ duplicate_ack_counter_++;
} else {
- duplicate_ack_ = 0;
+ duplicate_ack_counter_ = 0;
}
- if (duplicate_ack_ >= 2 && duplicate_ack_ % 3 == 2) {
- // Resend last ACK + 1 frame.
- resend_frame = static_cast<uint32>(last_acked_frame_id_ + 1);
- }
- if (resend_frame != -1) {
- DCHECK_LE(0, resend_frame);
- VLOG(1) << "Received duplicate ACK for frame:"
- << static_cast<int>(resend_frame);
- ResendFrame(static_cast<uint32>(resend_frame));
+ // TODO(miu): The values "2" and "3" should be derived from configuration.
+ if (duplicate_ack_counter_ >= 2 && duplicate_ack_counter_ % 3 == 2) {
+ VLOG(1) << "Received duplicate ACK for frame " << latest_acked_frame_id_;
+ ResendForKickstart();
}
} else {
- rtp_sender_->ResendPackets(cast_feedback.missing_frames_and_packets_);
- last_send_time_ = cast_environment_->Clock()->NowTicks();
+ // Only count duplicated ACKs if there is no NACK request in between.
+ // This is to avoid aggresive resend.
+ duplicate_ack_counter_ = 0;
- uint32 new_bitrate = 0;
- if (congestion_control_.OnNack(rtt, &new_bitrate)) {
- video_encoder_controller_->SetBitRate(new_bitrate);
- }
+ // A NACK is also used to cancel pending re-transmissions.
+ transport_sender_->ResendPackets(
+ false, cast_feedback.missing_frames_and_packets_, true, rtt);
}
- ReceivedAck(cast_feedback.ack_frame_id_);
-}
-void VideoSender::ReceivedAck(uint32 acked_frame_id) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- last_acked_frame_id_ = static_cast<int>(acked_frame_id);
- cast_environment_->Logging()->InsertGenericEvent(kAckReceived,
- acked_frame_id);
- VLOG(1) << "ReceivedAck:" << static_cast<int>(acked_frame_id);
- last_acked_frame_id_ = acked_frame_id;
- UpdateFramesInFlight();
+ base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ congestion_control_.AckFrame(cast_feedback.ack_frame_id_, now);
+
+ RtpTimestamp rtp_timestamp =
+ frame_id_to_rtp_timestamp_[cast_feedback.ack_frame_id_ & 0xff];
+ cast_environment_->Logging()->InsertFrameEvent(now,
+ FRAME_ACK_RECEIVED,
+ VIDEO_EVENT,
+ rtp_timestamp,
+ cast_feedback.ack_frame_id_);
+
+ const bool is_acked_out_of_order =
+ static_cast<int32>(cast_feedback.ack_frame_id_ -
+ latest_acked_frame_id_) < 0;
+ VLOG(2) << "Received ACK" << (is_acked_out_of_order ? " out-of-order" : "")
+ << " for frame " << cast_feedback.ack_frame_id_;
+ if (!is_acked_out_of_order) {
+ // Cancel resends of acked frames.
+ MissingFramesAndPacketsMap missing_frames_and_packets;
+ PacketIdSet missing;
+ while (latest_acked_frame_id_ != cast_feedback.ack_frame_id_) {
+ latest_acked_frame_id_++;
+ missing_frames_and_packets[latest_acked_frame_id_] = missing;
+ }
+ transport_sender_->ResendPackets(
+ false, missing_frames_and_packets, true, rtt);
+ latest_acked_frame_id_ = cast_feedback.ack_frame_id_;
+ }
}
-void VideoSender::UpdateFramesInFlight() {
+bool VideoSender::AreTooManyFramesInFlight() const {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- if (last_sent_frame_id_ != -1) {
- DCHECK_LE(0, last_sent_frame_id_);
- uint32 frames_in_flight;
- if (last_acked_frame_id_ != -1) {
- DCHECK_LE(0, last_acked_frame_id_);
- frames_in_flight = static_cast<uint32>(last_sent_frame_id_) -
- static_cast<uint32>(last_acked_frame_id_);
- } else {
- frames_in_flight = static_cast<uint32>(last_sent_frame_id_) + 1;
- }
- VLOG(1) << "Frames in flight; last sent: " << last_sent_frame_id_
- << " last acked:" << last_acked_frame_id_;
- if (frames_in_flight >= max_unacked_frames_) {
- video_encoder_controller_->SkipNextFrame(true);
- return;
- }
+ int frames_in_flight = frames_in_encoder_;
+ if (!last_send_time_.is_null()) {
+ frames_in_flight +=
+ static_cast<int32>(last_sent_frame_id_ - latest_acked_frame_id_);
}
- video_encoder_controller_->SkipNextFrame(false);
+ VLOG(2) << frames_in_flight
+ << " frames in flight; last sent: " << last_sent_frame_id_
+ << " latest acked: " << latest_acked_frame_id_
+ << " frames in encoder: " << frames_in_encoder_;
+ return frames_in_flight >= max_unacked_frames_;
}
-void VideoSender::ResendFrame(uint32 resend_frame_id) {
+void VideoSender::ResendForKickstart() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ DCHECK(!last_send_time_.is_null());
+ VLOG(1) << "Resending last packet of frame " << last_sent_frame_id_
+ << " to kick-start.";
+ // Send the first packet of the last encoded frame to kick start
+ // retransmission. This gives enough information to the receiver what
+ // packets and frames are missing.
MissingFramesAndPacketsMap missing_frames_and_packets;
PacketIdSet missing;
- missing_frames_and_packets.insert(std::make_pair(resend_frame_id, missing));
- rtp_sender_->ResendPackets(missing_frames_and_packets);
+ missing.insert(kRtcpCastLastPacket);
+ missing_frames_and_packets.insert(
+ std::make_pair(last_sent_frame_id_, missing));
last_send_time_ = cast_environment_->Clock()->NowTicks();
+
+ base::TimeDelta rtt;
+ base::TimeDelta avg_rtt;
+ base::TimeDelta min_rtt;
+ base::TimeDelta max_rtt;
+ rtcp_.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt);
+
+ // Sending this extra packet is to kick-start the session. There is
+ // no need to optimize re-transmission for this case.
+ transport_sender_->ResendPackets(false, missing_frames_and_packets,
+ false, rtt);
}
} // namespace cast
diff --git a/chromium/media/cast/video_sender/video_sender.gypi b/chromium/media/cast/video_sender/video_sender.gypi
deleted file mode 100644
index e91a8c97efe..00000000000
--- a/chromium/media/cast/video_sender/video_sender.gypi
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'includes': [
- 'codecs/vp8/vp8_encoder.gypi',
- ],
- 'targets': [
- {
- 'target_name': 'video_sender',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- '<(DEPTH)/third_party/',
- ],
- 'sources': [
- 'video_encoder.h',
- 'video_encoder.cc',
- 'video_sender.h',
- 'video_sender.cc',
- ], # source
- 'dependencies': [
- '<(DEPTH)/crypto/crypto.gyp:crypto',
- '<(DEPTH)/media/cast/rtcp/rtcp.gyp:*',
- '<(DEPTH)/media/cast/net/rtp_sender/rtp_sender.gyp:*',
- '<(DEPTH)/media/media.gyp:media',
- '<(DEPTH)/media/media.gyp:shared_memory_support',
- 'congestion_control',
- 'cast_vp8_encoder',
- ],
- },
- ],
-}
diff --git a/chromium/media/cast/video_sender/video_sender.h b/chromium/media/cast/video_sender/video_sender.h
index eb7b5ea4f21..cf8d27511c2 100644
--- a/chromium/media/cast/video_sender/video_sender.h
+++ b/chromium/media/cast/video_sender/video_sender.h
@@ -15,25 +15,22 @@
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
#include "media/cast/congestion_control/congestion_control.h"
-#include "media/cast/net/rtp_sender/rtp_sender.h"
+#include "media/cast/logging/logging_defines.h"
#include "media/cast/rtcp/rtcp.h"
-
-namespace crypto {
- class Encryptor;
-}
+#include "media/cast/rtp_timestamp_helper.h"
namespace media {
+
class VideoFrame;
-}
-namespace media {
namespace cast {
-class VideoEncoder;
-class LocalRtcpVideoSenderFeedback;
-class LocalRtpVideoSenderStatistics;
class LocalVideoEncoderCallback;
-class PacedPacketSender;
+class VideoEncoder;
+
+namespace transport {
+class CastTransportSender;
+}
// Not thread safe. Only called from the main cast thread.
// This class owns all objects related to sending video, objects that create RTP
@@ -41,101 +38,139 @@ class PacedPacketSender;
// RTCP packets.
// Additionally it posts a bunch of delayed tasks to the main thread for various
// timeouts.
-class VideoSender : public base::NonThreadSafe,
+class VideoSender : public RtcpSenderFeedback,
+ public base::NonThreadSafe,
public base::SupportsWeakPtr<VideoSender> {
public:
VideoSender(scoped_refptr<CastEnvironment> cast_environment,
const VideoSenderConfig& video_config,
- VideoEncoderController* const video_encoder_controller,
- PacedPacketSender* const paced_packet_sender);
+ const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
+ const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb,
+ transport::CastTransportSender* const transport_sender);
virtual ~VideoSender();
- // The video_frame must be valid until the closure callback is called.
- // The closure callback is called from the video encoder thread as soon as
- // the encoder is done with the frame; it does not mean that the encoded frame
- // has been sent out.
- void InsertRawVideoFrame(
- const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& capture_time);
-
- // The video_frame must be valid until the closure callback is called.
- // The closure callback is called from the main thread as soon as
- // the cast sender is done with the frame; it does not mean that the encoded
- // frame has been sent out.
- void InsertCodedVideoFrame(const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time,
- const base::Closure callback);
+ CastInitializationStatus InitializationResult() const {
+ return cast_initialization_status_;
+ }
+
+ // Note: It is not guaranteed that |video_frame| will actually be encoded and
+ // sent, if VideoSender detects too many frames in flight. Therefore, clients
+ // should be careful about the rate at which this method is called.
+ //
+ // Note: It is invalid to call this method if InitializationResult() returns
+ // anything but STATUS_VIDEO_INITIALIZED.
+ void InsertRawVideoFrame(const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& capture_time);
// Only called from the main cast thread.
- void IncomingRtcpPacket(const uint8* packet, size_t length,
- const base::Closure callback);
+ void IncomingRtcpPacket(scoped_ptr<Packet> packet);
protected:
// Protected for testability.
- void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback);
+ virtual void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback)
+ OVERRIDE;
private:
- friend class LocalRtcpVideoSenderFeedback;
-
- // Schedule when we should send the next RTPC report,
- // via a PostDelayedTask to the main cast thread.
+ // Schedule and execute periodic sending of RTCP report.
void ScheduleNextRtcpReport();
- void SendRtcpReport();
-
- // Schedule when we should check that we have received an acknowledgment, or a
- // loss report from our remote peer. If we have not heard back from our remote
- // peer we speculatively resend our oldest unacknowledged frame (the whole
- // frame). Note for this to happen we need to lose all pending packets (in
- // normal operation 3 full frames), hence this is the last resort to prevent
- // us getting stuck after a long outage.
+ void SendRtcpReport(bool schedule_future_reports);
+
+ // Schedule and execute periodic checks for re-sending packets. If no
+ // acknowledgements have been received for "too long," VideoSender will
+ // speculatively re-send certain packets of an unacked frame to kick-start
+ // re-transmission. This is a last resort tactic to prevent the session from
+ // getting stuck after a long outage.
void ScheduleNextResendCheck();
void ResendCheck();
+ void ResendForKickstart();
+
+ // Returns true if there are too many frames in flight, as defined by the
+ // configured target playout delay plus simple logic. When this is true,
+ // InsertRawVideoFrame() will silenty drop frames instead of sending them to
+ // the video encoder.
+ bool AreTooManyFramesInFlight() const;
+
+ // Called by the |video_encoder_| with the next EncodeFrame to send.
+ void SendEncodedVideoFrame(int requested_bitrate_before_encode,
+ scoped_ptr<transport::EncodedFrame> encoded_frame);
+
+ const scoped_refptr<CastEnvironment> cast_environment_;
+
+ // The total amount of time between a frame's capture/recording on the sender
+ // and its playback on the receiver (i.e., shown to a user). This is fixed as
+ // a value large enough to give the system sufficient time to encode,
+ // transmit/retransmit, receive, decode, and render; given its run-time
+ // environment (sender/receiver hardware performance, network conditions,
+ // etc.).
+ const base::TimeDelta target_playout_delay_;
+
+ // Sends encoded frames over the configured transport (e.g., UDP). In
+ // Chromium, this could be a proxy that first sends the frames from a renderer
+ // process to the browser process over IPC, with the browser process being
+ // responsible for "packetizing" the frames and pushing packets into the
+ // network layer.
+ transport::CastTransportSender* const transport_sender_;
+
+ // Maximum number of outstanding frames before the encoding and sending of
+ // new frames shall halt.
+ const int max_unacked_frames_;
+
+ // Encodes media::VideoFrame images into EncodedFrames. Per configuration,
+ // this will point to either the internal software-based encoder or a proxy to
+ // a hardware-based encoder.
+ scoped_ptr<VideoEncoder> video_encoder_;
- // Monitor how many frames that are silently dropped by the video sender
- // per time unit.
- void ScheduleNextSkippedFramesCheck();
- void SkippedFramesCheck();
-
- void SendEncodedVideoFrame(const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time);
- void ResendFrame(uint32 resend_frame_id);
- void ReceivedAck(uint32 acked_frame_id);
- void UpdateFramesInFlight();
-
- void SendEncodedVideoFrameMainThread(
- scoped_ptr<EncodedVideoFrame> video_frame,
- const base::TimeTicks& capture_time);
+ // Manages sending/receiving of RTCP packets, including sender/receiver
+ // reports.
+ Rtcp rtcp_;
- void InitializeTimers();
+ // Records lip-sync (i.e., mapping of RTP <--> NTP timestamps), and
+ // extrapolates this mapping to any other point in time.
+ RtpTimestampHelper rtp_timestamp_helper_;
- // Caller must allocate the destination |encrypted_video_frame| the data
- // member will be resized to hold the encrypted size.
- bool EncryptVideoFrame(const EncodedVideoFrame& encoded_frame,
- EncodedVideoFrame* encrypted_video_frame);
+ // Counts how many RTCP reports are being "aggressively" sent (i.e., one per
+ // frame) at the start of the session. Once a threshold is reached, RTCP
+ // reports are instead sent at the configured interval + random drift.
+ int num_aggressive_rtcp_reports_sent_;
- const base::TimeDelta rtp_max_delay_;
- const int max_frame_rate_;
+ // The number of frames currently being processed in |video_encoder_|.
+ int frames_in_encoder_;
- scoped_refptr<CastEnvironment> cast_environment_;
- scoped_ptr<LocalRtcpVideoSenderFeedback> rtcp_feedback_;
- scoped_ptr<LocalRtpVideoSenderStatistics> rtp_video_sender_statistics_;
- scoped_ptr<VideoEncoder> video_encoder_;
- scoped_ptr<Rtcp> rtcp_;
- scoped_ptr<RtpSender> rtp_sender_;
- VideoEncoderController* video_encoder_controller_;
- uint8 max_unacked_frames_;
- scoped_ptr<crypto::Encryptor> encryptor_;
- std::string iv_mask_;
- int last_acked_frame_id_;
- int last_sent_frame_id_;
- int duplicate_ack_;
+ // This is "null" until the first frame is sent. Thereafter, this tracks the
+ // last time any frame was sent or re-sent.
base::TimeTicks last_send_time_;
- base::TimeTicks last_checked_skip_count_time_;
- int last_skip_count_;
+
+ // The ID of the last frame sent. Logic throughout VideoSender assumes this
+ // can safely wrap-around. This member is invalid until
+ // |!last_send_time_.is_null()|.
+ uint32 last_sent_frame_id_;
+
+ // The ID of the latest (not necessarily the last) frame that has been
+ // acknowledged. Logic throughout VideoSender assumes this can safely
+ // wrap-around. This member is invalid until |!last_send_time_.is_null()|.
+ uint32 latest_acked_frame_id_;
+
+ // Counts the number of duplicate ACK that are being received. When this
+ // number reaches a threshold, the sender will take this as a sign that the
+ // receiver hasn't yet received the first packet of the next frame. In this
+ // case, VideoSender will trigger a re-send of the next frame.
+ int duplicate_ack_counter_;
+
+ // When we get close to the max number of un-acked frames, we set lower
+ // the bitrate drastically to ensure that we catch up. Without this we
+ // risk getting stuck in a catch-up state forever.
CongestionControl congestion_control_;
- bool initialized_;
+ // If this sender is ready for use, this is STATUS_VIDEO_INITIALIZED.
+ CastInitializationStatus cast_initialization_status_;
+
+ // This is a "good enough" mapping for finding the RTP timestamp associated
+ // with a video frame. The key is the lowest 8 bits of frame id (which is
+ // what is sent via RTCP). This map is used for logging purposes.
+ RtpTimestamp frame_id_to_rtp_timestamp_[256];
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<VideoSender> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(VideoSender);
@@ -145,4 +180,3 @@ class VideoSender : public base::NonThreadSafe,
} // namespace media
#endif // MEDIA_CAST_VIDEO_SENDER_VIDEO_SENDER_H_
-
diff --git a/chromium/media/cast/video_sender/video_sender_unittest.cc b/chromium/media/cast/video_sender/video_sender_unittest.cc
index c4968415ffb..49fae46c73d 100644
--- a/chromium/media/cast/video_sender/video_sender_unittest.cc
+++ b/chromium/media/cast/video_sender/video_sender_unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <stdint.h>
+
#include <vector>
#include "base/bind.h"
@@ -9,11 +11,14 @@
#include "base/test/simple_test_tick_clock.h"
#include "media/base/video_frame.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/net/pacing/mock_paced_packet_sender.h"
-#include "media/cast/net/pacing/paced_sender.h"
-#include "media/cast/test/fake_task_runner.h"
-#include "media/cast/test/video_utility.h"
-#include "media/cast/video_sender/mock_video_encoder_controller.h"
+#include "media/cast/logging/simple_event_subscriber.h"
+#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "media/cast/test/fake_video_encode_accelerator.h"
+#include "media/cast/test/utility/default_config.h"
+#include "media/cast/test/utility/video_utility.h"
+#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/transport/cast_transport_sender_impl.h"
+#include "media/cast/transport/pacing/paced_sender.h"
#include "media/cast/video_sender/video_sender.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -22,25 +27,94 @@ namespace media {
namespace cast {
namespace {
-static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
static const uint8 kPixelValue = 123;
static const int kWidth = 320;
static const int kHeight = 240;
-}
using testing::_;
using testing::AtLeast;
-namespace {
-class PeerVideoSender : public VideoSender {
+void CreateVideoEncodeAccelerator(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ scoped_ptr<VideoEncodeAccelerator> fake_vea,
+ const ReceiveVideoEncodeAcceleratorCallback& callback) {
+ callback.Run(task_runner, fake_vea.Pass());
+}
+
+void CreateSharedMemory(
+ size_t size, const ReceiveVideoEncodeMemoryCallback& callback) {
+ scoped_ptr<base::SharedMemory> shm(new base::SharedMemory());
+ if (!shm->CreateAndMapAnonymous(size)) {
+ NOTREACHED();
+ return;
+ }
+ callback.Run(shm.Pass());
+}
+
+class TestPacketSender : public transport::PacketSender {
public:
- PeerVideoSender(scoped_refptr<CastEnvironment> cast_environment,
- const VideoSenderConfig& video_config,
- VideoEncoderController* const video_encoder_controller,
- PacedPacketSender* const paced_packet_sender)
- : VideoSender(cast_environment, video_config,
- video_encoder_controller, paced_packet_sender) {
+ TestPacketSender()
+ : number_of_rtp_packets_(0),
+ number_of_rtcp_packets_(0),
+ paused_(false) {}
+
+ // A singular packet implies a RTCP packet.
+ virtual bool SendPacket(transport::PacketRef packet,
+ const base::Closure& cb) OVERRIDE {
+ if (paused_) {
+ stored_packet_ = packet;
+ callback_ = cb;
+ return false;
+ }
+ if (Rtcp::IsRtcpPacket(&packet->data[0], packet->data.size())) {
+ ++number_of_rtcp_packets_;
+ } else {
+ // Check that at least one RTCP packet was sent before the first RTP
+ // packet. This confirms that the receiver will have the necessary lip
+ // sync info before it has to calculate the playout time of the first
+ // frame.
+ if (number_of_rtp_packets_ == 0)
+ EXPECT_LE(1, number_of_rtcp_packets_);
+ ++number_of_rtp_packets_;
+ }
+ return true;
}
+
+ int number_of_rtp_packets() const { return number_of_rtp_packets_; }
+
+ int number_of_rtcp_packets() const { return number_of_rtcp_packets_; }
+
+ void SetPause(bool paused) {
+ paused_ = paused;
+ if (!paused && stored_packet_) {
+ SendPacket(stored_packet_, callback_);
+ callback_.Run();
+ }
+ }
+
+ private:
+ int number_of_rtp_packets_;
+ int number_of_rtcp_packets_;
+ bool paused_;
+ base::Closure callback_;
+ transport::PacketRef stored_packet_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestPacketSender);
+};
+
+class PeerVideoSender : public VideoSender {
+ public:
+ PeerVideoSender(
+ scoped_refptr<CastEnvironment> cast_environment,
+ const VideoSenderConfig& video_config,
+ const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
+ const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb,
+ transport::CastTransportSender* const transport_sender)
+ : VideoSender(cast_environment,
+ video_config,
+ create_vea_cb,
+ create_video_encode_mem_cb,
+ transport_sender) {}
using VideoSender::OnReceivedCastFeedback;
};
} // namespace
@@ -48,17 +122,44 @@ class PeerVideoSender : public VideoSender {
class VideoSenderTest : public ::testing::Test {
protected:
VideoSenderTest() {
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ testing_clock_ = new base::SimpleTestTickClock();
+ testing_clock_->Advance(base::TimeTicks::Now() - base::TimeTicks());
+ task_runner_ = new test::FakeSingleThreadTaskRunner(testing_clock_);
+ cast_environment_ =
+ new CastEnvironment(scoped_ptr<base::TickClock>(testing_clock_).Pass(),
+ task_runner_,
+ task_runner_,
+ task_runner_);
+ last_pixel_value_ = kPixelValue;
+ net::IPEndPoint dummy_endpoint;
+ transport_sender_.reset(new transport::CastTransportSenderImpl(
+ NULL,
+ testing_clock_,
+ dummy_endpoint,
+ base::Bind(&UpdateCastTransportStatus),
+ transport::BulkRawEventsCallback(),
+ base::TimeDelta(),
+ task_runner_,
+ &transport_));
}
virtual ~VideoSenderTest() {}
+ virtual void TearDown() OVERRIDE {
+ video_sender_.reset();
+ task_runner_->RunTasks();
+ }
+
+ static void UpdateCastTransportStatus(transport::CastTransportStatus status) {
+ EXPECT_EQ(transport::TRANSPORT_VIDEO_INITIALIZED, status);
+ }
+
void InitEncoder(bool external) {
VideoSenderConfig video_config;
- video_config.sender_ssrc = 1;
+ video_config.rtp_config.ssrc = 1;
video_config.incoming_feedback_ssrc = 2;
- video_config.rtp_payload_type = 127;
+ video_config.rtcp_c_name = "video_test@10.1.1.1";
+ video_config.rtp_config.payload_type = 127;
video_config.use_external_encoder = external;
video_config.width = kWidth;
video_config.height = kHeight;
@@ -69,111 +170,123 @@ class VideoSenderTest : public ::testing::Test {
video_config.min_qp = 0;
video_config.max_frame_rate = 30;
video_config.max_number_of_video_buffers_used = 1;
- video_config.codec = kVp8;
+ video_config.codec = transport::kVp8;
if (external) {
- video_sender_.reset(new PeerVideoSender(cast_environment_,
- video_config, &mock_video_encoder_controller_, &mock_transport_));
+ scoped_ptr<VideoEncodeAccelerator> fake_vea(
+ new test::FakeVideoEncodeAccelerator(task_runner_));
+ video_sender_.reset(
+ new PeerVideoSender(cast_environment_,
+ video_config,
+ base::Bind(&CreateVideoEncodeAccelerator,
+ task_runner_,
+ base::Passed(&fake_vea)),
+ base::Bind(&CreateSharedMemory),
+ transport_sender_.get()));
} else {
- video_sender_.reset(new PeerVideoSender(cast_environment_, video_config,
- NULL, &mock_transport_));
+ video_sender_.reset(
+ new PeerVideoSender(cast_environment_,
+ video_config,
+ CreateDefaultVideoEncodeAcceleratorCallback(),
+ CreateDefaultVideoEncodeMemoryCallback(),
+ transport_sender_.get()));
}
+ ASSERT_EQ(STATUS_VIDEO_INITIALIZED, video_sender_->InitializationResult());
}
- virtual void SetUp() {
- task_runner_ = new test::FakeTaskRunner(&testing_clock_);
- cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
- task_runner_, task_runner_, task_runner_, task_runner_,
- GetDefaultCastLoggingConfig());
+ scoped_refptr<media::VideoFrame> GetNewVideoFrame() {
+ gfx::Size size(kWidth, kHeight);
+ scoped_refptr<media::VideoFrame> video_frame =
+ media::VideoFrame::CreateFrame(
+ VideoFrame::I420, size, gfx::Rect(size), size, base::TimeDelta());
+ PopulateVideoFrame(video_frame, last_pixel_value_++);
+ return video_frame;
}
- scoped_refptr<media::VideoFrame> GetNewVideoFrame() {
+ scoped_refptr<media::VideoFrame> GetLargeNewVideoFrame() {
gfx::Size size(kWidth, kHeight);
scoped_refptr<media::VideoFrame> video_frame =
- media::VideoFrame::CreateFrame(VideoFrame::I420, size, gfx::Rect(size),
- size, base::TimeDelta());
- PopulateVideoFrame(video_frame, kPixelValue);
+ media::VideoFrame::CreateFrame(
+ VideoFrame::I420, size, gfx::Rect(size), size, base::TimeDelta());
+ PopulateVideoFrameWithNoise(video_frame);
return video_frame;
}
- MockVideoEncoderController mock_video_encoder_controller_;
- base::SimpleTestTickClock testing_clock_;
- MockPacedPacketSender mock_transport_;
- scoped_refptr<test::FakeTaskRunner> task_runner_;
+ void RunTasks(int during_ms) {
+ task_runner_->Sleep(base::TimeDelta::FromMilliseconds(during_ms));
+ }
+
+ base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
+ TestPacketSender transport_;
+ scoped_ptr<transport::CastTransportSenderImpl> transport_sender_;
+ scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
scoped_ptr<PeerVideoSender> video_sender_;
scoped_refptr<CastEnvironment> cast_environment_;
+ int last_pixel_value_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoSenderTest);
};
TEST_F(VideoSenderTest, BuiltInEncoder) {
- EXPECT_CALL(mock_transport_, SendPackets(_)).Times(1);
-
InitEncoder(false);
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- base::TimeTicks capture_time;
+ const base::TimeTicks capture_time = testing_clock_->NowTicks();
video_sender_->InsertRawVideoFrame(video_frame, capture_time);
task_runner_->RunTasks();
+ EXPECT_LE(1, transport_.number_of_rtp_packets());
+ EXPECT_LE(1, transport_.number_of_rtcp_packets());
}
TEST_F(VideoSenderTest, ExternalEncoder) {
- EXPECT_CALL(mock_transport_, SendPackets(_)).Times(1);
- EXPECT_CALL(mock_video_encoder_controller_, SkipNextFrame(false)).Times(1);
InitEncoder(true);
+ task_runner_->RunTasks();
+
+ scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- EncodedVideoFrame video_frame;
- base::TimeTicks capture_time;
+ const base::TimeTicks capture_time = testing_clock_->NowTicks();
+ video_sender_->InsertRawVideoFrame(video_frame, capture_time);
- video_frame.codec = kVp8;
- video_frame.key_frame = true;
- video_frame.frame_id = 0;
- video_frame.last_referenced_frame_id = 0;
- video_frame.data.insert(video_frame.data.begin(), 1000, kPixelValue);
+ task_runner_->RunTasks();
- video_sender_->InsertCodedVideoFrame(&video_frame, capture_time,
- base::Bind(base::DoNothing));
+ // We need to run the task to cleanup the GPU instance.
+ video_sender_.reset(NULL);
+ task_runner_->RunTasks();
}
TEST_F(VideoSenderTest, RtcpTimer) {
- EXPECT_CALL(mock_transport_, SendPackets(_)).Times(AtLeast(1));
- EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).Times(1);
- EXPECT_CALL(mock_video_encoder_controller_,
- SkipNextFrame(false)).Times(AtLeast(1));
- InitEncoder(true);
-
- EncodedVideoFrame video_frame;
- base::TimeTicks capture_time;
+ InitEncoder(false);
- video_frame.codec = kVp8;
- video_frame.key_frame = true;
- video_frame.frame_id = 0;
- video_frame.last_referenced_frame_id = 0;
- video_frame.data.insert(video_frame.data.begin(), 1000, kPixelValue);
+ scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- video_sender_->InsertCodedVideoFrame(&video_frame, capture_time,
- base::Bind(base::DoNothing));
+ const base::TimeTicks capture_time = testing_clock_->NowTicks();
+ video_sender_->InsertRawVideoFrame(video_frame, capture_time);
// Make sure that we send at least one RTCP packet.
base::TimeDelta max_rtcp_timeout =
base::TimeDelta::FromMilliseconds(1 + kDefaultRtcpIntervalMs * 3 / 2);
- testing_clock_.Advance(max_rtcp_timeout);
- task_runner_->RunTasks();
+ RunTasks(max_rtcp_timeout.InMilliseconds());
+ EXPECT_LE(1, transport_.number_of_rtp_packets());
+ EXPECT_LE(1, transport_.number_of_rtcp_packets());
+ // Build Cast msg and expect RTCP packet.
+ RtcpCastMessage cast_feedback(1);
+ cast_feedback.media_ssrc_ = 2;
+ cast_feedback.ack_frame_id_ = 0;
+ video_sender_->OnReceivedCastFeedback(cast_feedback);
+ RunTasks(max_rtcp_timeout.InMilliseconds());
+ EXPECT_LE(1, transport_.number_of_rtcp_packets());
}
TEST_F(VideoSenderTest, ResendTimer) {
- EXPECT_CALL(mock_transport_, SendPackets(_)).Times(2);
- EXPECT_CALL(mock_transport_, ResendPackets(_)).Times(1);
-
InitEncoder(false);
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- base::TimeTicks capture_time;
+ const base::TimeTicks capture_time = testing_clock_->NowTicks();
video_sender_->InsertRawVideoFrame(video_frame, capture_time);
- task_runner_->RunTasks();
-
// ACK the key frame.
RtcpCastMessage cast_feedback(1);
cast_feedback.media_ssrc_ = 2;
@@ -183,16 +296,230 @@ TEST_F(VideoSenderTest, ResendTimer) {
video_frame = GetNewVideoFrame();
video_sender_->InsertRawVideoFrame(video_frame, capture_time);
- task_runner_->RunTasks();
-
base::TimeDelta max_resend_timeout =
base::TimeDelta::FromMilliseconds(1 + kDefaultRtpMaxDelayMs);
// Make sure that we do a re-send.
- testing_clock_.Advance(max_resend_timeout);
+ RunTasks(max_resend_timeout.InMilliseconds());
+ // Should have sent at least 3 packets.
+ EXPECT_LE(
+ 3,
+ transport_.number_of_rtp_packets() + transport_.number_of_rtcp_packets());
+}
+
+TEST_F(VideoSenderTest, LogAckReceivedEvent) {
+ InitEncoder(false);
+ SimpleEventSubscriber event_subscriber;
+ cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber);
+
+ int num_frames = 10;
+ for (int i = 0; i < num_frames; i++) {
+ scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
+
+ const base::TimeTicks capture_time = testing_clock_->NowTicks();
+ video_sender_->InsertRawVideoFrame(video_frame, capture_time);
+ RunTasks(33);
+ }
+
task_runner_->RunTasks();
+
+ RtcpCastMessage cast_feedback(1);
+ cast_feedback.ack_frame_id_ = num_frames - 1;
+
+ video_sender_->OnReceivedCastFeedback(cast_feedback);
+
+ std::vector<FrameEvent> frame_events;
+ event_subscriber.GetFrameEventsAndReset(&frame_events);
+
+ ASSERT_TRUE(!frame_events.empty());
+ EXPECT_EQ(FRAME_ACK_RECEIVED, frame_events.rbegin()->type);
+ EXPECT_EQ(VIDEO_EVENT, frame_events.rbegin()->media_type);
+ EXPECT_EQ(num_frames - 1u, frame_events.rbegin()->frame_id);
+
+ cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber);
+}
+
+TEST_F(VideoSenderTest, StopSendingInTheAbsenceOfAck) {
+ InitEncoder(false);
+ // Send a stream of frames and don't ACK; by default we shouldn't have more
+ // than 4 frames in flight.
+ scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
+ video_sender_->InsertRawVideoFrame(video_frame, testing_clock_->NowTicks());
+ RunTasks(33);
+
+ // Send 3 more frames and record the number of packets sent.
+ for (int i = 0; i < 3; ++i) {
+ scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
+ video_sender_->InsertRawVideoFrame(video_frame, testing_clock_->NowTicks());
+ RunTasks(33);
+ }
+ const int number_of_packets_sent = transport_.number_of_rtp_packets();
+
+ // Send 3 more frames - they should not be encoded, as we have not received
+ // any acks.
+ for (int i = 0; i < 3; ++i) {
+ scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
+ video_sender_->InsertRawVideoFrame(video_frame, testing_clock_->NowTicks());
+ RunTasks(33);
+ }
+
+ // We expect a frame to be retransmitted because of duplicated ACKs.
+ // Only one packet of the frame is re-transmitted.
+ EXPECT_EQ(number_of_packets_sent + 1,
+ transport_.number_of_rtp_packets());
+
+ // Start acking and make sure we're back to steady-state.
+ RtcpCastMessage cast_feedback(1);
+ cast_feedback.media_ssrc_ = 2;
+ cast_feedback.ack_frame_id_ = 0;
+ video_sender_->OnReceivedCastFeedback(cast_feedback);
+ EXPECT_LE(
+ 4,
+ transport_.number_of_rtp_packets() + transport_.number_of_rtcp_packets());
+
+ // Empty the pipeline.
+ RunTasks(100);
+ // Should have sent at least 7 packets.
+ EXPECT_LE(
+ 7,
+ transport_.number_of_rtp_packets() + transport_.number_of_rtcp_packets());
+}
+
+TEST_F(VideoSenderTest, DuplicateAckRetransmit) {
+ InitEncoder(false);
+ scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
+ video_sender_->InsertRawVideoFrame(video_frame, testing_clock_->NowTicks());
+ RunTasks(33);
+ RtcpCastMessage cast_feedback(1);
+ cast_feedback.media_ssrc_ = 2;
+ cast_feedback.ack_frame_id_ = 0;
+
+ // Send 3 more frames but don't ACK.
+ for (int i = 0; i < 3; ++i) {
+ scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
+ video_sender_->InsertRawVideoFrame(video_frame, testing_clock_->NowTicks());
+ RunTasks(33);
+ }
+ const int number_of_packets_sent = transport_.number_of_rtp_packets();
+
+ // Send duplicated ACKs and mix some invalid NACKs.
+ for (int i = 0; i < 10; ++i) {
+ RtcpCastMessage ack_feedback(1);
+ ack_feedback.media_ssrc_ = 2;
+ ack_feedback.ack_frame_id_ = 0;
+ RtcpCastMessage nack_feedback(1);
+ nack_feedback.media_ssrc_ = 2;
+ nack_feedback.missing_frames_and_packets_[255] = PacketIdSet();
+ video_sender_->OnReceivedCastFeedback(ack_feedback);
+ video_sender_->OnReceivedCastFeedback(nack_feedback);
+ }
+ EXPECT_EQ(number_of_packets_sent, transport_.number_of_rtp_packets());
+
+ // Re-transmit one packet because of duplicated ACKs.
+ for (int i = 0; i < 3; ++i) {
+ RtcpCastMessage ack_feedback(1);
+ ack_feedback.media_ssrc_ = 2;
+ ack_feedback.ack_frame_id_ = 0;
+ video_sender_->OnReceivedCastFeedback(ack_feedback);
+ }
+ EXPECT_EQ(number_of_packets_sent + 1, transport_.number_of_rtp_packets());
+}
+
+TEST_F(VideoSenderTest, DuplicateAckRetransmitDoesNotCancelRetransmits) {
+ InitEncoder(false);
+ scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
+ video_sender_->InsertRawVideoFrame(video_frame, testing_clock_->NowTicks());
+ RunTasks(33);
+ RtcpCastMessage cast_feedback(1);
+ cast_feedback.media_ssrc_ = 2;
+ cast_feedback.ack_frame_id_ = 0;
+
+ // Send 2 more frames but don't ACK.
+ for (int i = 0; i < 2; ++i) {
+ scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
+ video_sender_->InsertRawVideoFrame(video_frame, testing_clock_->NowTicks());
+ RunTasks(33);
+ }
+ // Pause the transport
+ transport_.SetPause(true);
+
+ // Insert one more video frame.
+ video_frame = GetLargeNewVideoFrame();
+ video_sender_->InsertRawVideoFrame(video_frame, testing_clock_->NowTicks());
+ RunTasks(33);
+
+ const int number_of_packets_sent = transport_.number_of_rtp_packets();
+
+ // Send duplicated ACKs and mix some invalid NACKs.
+ for (int i = 0; i < 10; ++i) {
+ RtcpCastMessage ack_feedback(1);
+ ack_feedback.media_ssrc_ = 2;
+ ack_feedback.ack_frame_id_ = 0;
+ RtcpCastMessage nack_feedback(1);
+ nack_feedback.media_ssrc_ = 2;
+ nack_feedback.missing_frames_and_packets_[255] = PacketIdSet();
+ video_sender_->OnReceivedCastFeedback(ack_feedback);
+ video_sender_->OnReceivedCastFeedback(nack_feedback);
+ }
+ EXPECT_EQ(number_of_packets_sent, transport_.number_of_rtp_packets());
+
+ // Re-transmit one packet because of duplicated ACKs.
+ for (int i = 0; i < 3; ++i) {
+ RtcpCastMessage ack_feedback(1);
+ ack_feedback.media_ssrc_ = 2;
+ ack_feedback.ack_frame_id_ = 0;
+ video_sender_->OnReceivedCastFeedback(ack_feedback);
+ }
+
+ transport_.SetPause(false);
+ RunTasks(100);
+ EXPECT_LT(number_of_packets_sent + 1, transport_.number_of_rtp_packets());
+}
+
+TEST_F(VideoSenderTest, AcksCancelRetransmits) {
+ InitEncoder(false);
+ transport_.SetPause(true);
+ scoped_refptr<media::VideoFrame> video_frame = GetLargeNewVideoFrame();
+ video_sender_->InsertRawVideoFrame(video_frame, testing_clock_->NowTicks());
+ RunTasks(33);
+
+ // Frame should be in buffer, waiting. Now let's ack it.
+ RtcpCastMessage cast_feedback(1);
+ cast_feedback.media_ssrc_ = 2;
+ cast_feedback.ack_frame_id_ = 0;
+ video_sender_->OnReceivedCastFeedback(cast_feedback);
+
+ transport_.SetPause(false);
+ RunTasks(33);
+ EXPECT_EQ(0, transport_.number_of_rtp_packets());
+}
+
+TEST_F(VideoSenderTest, NAcksCancelRetransmits) {
+ InitEncoder(false);
+ transport_.SetPause(true);
+ // Send two video frames.
+ scoped_refptr<media::VideoFrame> video_frame = GetLargeNewVideoFrame();
+ video_sender_->InsertRawVideoFrame(video_frame, testing_clock_->NowTicks());
+ RunTasks(33);
+ video_frame = GetLargeNewVideoFrame();
+ video_sender_->InsertRawVideoFrame(video_frame, testing_clock_->NowTicks());
+ RunTasks(33);
+
+ // Frames should be in buffer, waiting. Now let's ack the first one and nack
+ // one packet in the second one.
+ RtcpCastMessage cast_feedback(1);
+ cast_feedback.media_ssrc_ = 2;
+ cast_feedback.ack_frame_id_ = 0;
+ PacketIdSet missing_packets;
+ missing_packets.insert(0);
+ cast_feedback.missing_frames_and_packets_[1] = missing_packets;
+ video_sender_->OnReceivedCastFeedback(cast_feedback);
+
+ transport_.SetPause(false);
+ RunTasks(33);
+ // Only one packet should be retransmitted.
+ EXPECT_EQ(1, transport_.number_of_rtp_packets());
}
} // namespace cast
} // namespace media
-
diff --git a/chromium/media/cdm/aes_decryptor.cc b/chromium/media/cdm/aes_decryptor.cc
index de6f83474f0..3530c3e9de0 100644
--- a/chromium/media/cdm/aes_decryptor.cc
+++ b/chromium/media/cdm/aes_decryptor.cc
@@ -13,6 +13,7 @@
#include "crypto/encryptor.h"
#include "crypto/symmetric_key.h"
#include "media/base/audio_decoder_config.h"
+#include "media/base/cdm_promise.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decrypt_config.h"
#include "media/base/video_decoder_config.h"
@@ -28,7 +29,7 @@ class AesDecryptor::SessionIdDecryptionKeyMap {
// Use a std::list to actually hold the data. Insertion is always done
// at the front, so the "latest" decryption key is always the first one
// in the list.
- typedef std::list<std::pair<uint32, DecryptionKey*> > KeyList;
+ typedef std::list<std::pair<std::string, DecryptionKey*> > KeyList;
public:
SessionIdDecryptionKeyMap() {}
@@ -37,10 +38,11 @@ class AesDecryptor::SessionIdDecryptionKeyMap {
// Replaces value if |session_id| is already present, or adds it if not.
// This |decryption_key| becomes the latest until another insertion or
// |session_id| is erased.
- void Insert(uint32 session_id, scoped_ptr<DecryptionKey> decryption_key);
+ void Insert(const std::string& web_session_id,
+ scoped_ptr<DecryptionKey> decryption_key);
// Deletes the entry for |session_id| if present.
- void Erase(const uint32 session_id);
+ void Erase(const std::string& web_session_id);
// Returns whether the list is empty
bool Empty() const { return key_list_.empty(); }
@@ -52,8 +54,8 @@ class AesDecryptor::SessionIdDecryptionKeyMap {
}
private:
- // Searches the list for an element with |session_id|.
- KeyList::iterator Find(const uint32 session_id);
+ // Searches the list for an element with |web_session_id|.
+ KeyList::iterator Find(const std::string& web_session_id);
// Deletes the entry pointed to by |position|.
void Erase(KeyList::iterator position);
@@ -64,26 +66,28 @@ class AesDecryptor::SessionIdDecryptionKeyMap {
};
void AesDecryptor::SessionIdDecryptionKeyMap::Insert(
- uint32 session_id,
+ const std::string& web_session_id,
scoped_ptr<DecryptionKey> decryption_key) {
- KeyList::iterator it = Find(session_id);
+ KeyList::iterator it = Find(web_session_id);
if (it != key_list_.end())
Erase(it);
DecryptionKey* raw_ptr = decryption_key.release();
- key_list_.push_front(std::make_pair(session_id, raw_ptr));
+ key_list_.push_front(std::make_pair(web_session_id, raw_ptr));
}
-void AesDecryptor::SessionIdDecryptionKeyMap::Erase(const uint32 session_id) {
- KeyList::iterator it = Find(session_id);
+void AesDecryptor::SessionIdDecryptionKeyMap::Erase(
+ const std::string& web_session_id) {
+ KeyList::iterator it = Find(web_session_id);
if (it == key_list_.end())
return;
Erase(it);
}
AesDecryptor::SessionIdDecryptionKeyMap::KeyList::iterator
-AesDecryptor::SessionIdDecryptionKeyMap::Find(const uint32 session_id) {
+AesDecryptor::SessionIdDecryptionKeyMap::Find(
+ const std::string& web_session_id) {
for (KeyList::iterator it = key_list_.begin(); it != key_list_.end(); ++it) {
- if (it->first == session_id)
+ if (it->first == web_session_id)
return it;
}
return key_list_.end();
@@ -141,11 +145,8 @@ static scoped_refptr<DecoderBuffer> DecryptData(const DecoderBuffer& input,
return NULL;
}
- const int data_offset = input.decrypt_config()->data_offset();
- const char* sample =
- reinterpret_cast<const char*>(input.data() + data_offset);
- DCHECK_GT(input.data_size(), data_offset);
- size_t sample_size = static_cast<size_t>(input.data_size() - data_offset);
+ const char* sample = reinterpret_cast<const char*>(input.data());
+ size_t sample_size = static_cast<size_t>(input.data_size());
DCHECK_GT(sample_size, 0U) << "No sample data to be decrypted.";
if (sample_size == 0)
@@ -218,60 +219,73 @@ static scoped_refptr<DecoderBuffer> DecryptData(const DecoderBuffer& input,
return output;
}
-AesDecryptor::AesDecryptor(const SessionCreatedCB& session_created_cb,
- const SessionMessageCB& session_message_cb,
- const SessionReadyCB& session_ready_cb,
- const SessionClosedCB& session_closed_cb,
- const SessionErrorCB& session_error_cb)
- : session_created_cb_(session_created_cb),
- session_message_cb_(session_message_cb),
- session_ready_cb_(session_ready_cb),
- session_closed_cb_(session_closed_cb),
- session_error_cb_(session_error_cb) {}
+AesDecryptor::AesDecryptor(const SessionMessageCB& session_message_cb,
+ const SessionClosedCB& session_closed_cb)
+ : session_message_cb_(session_message_cb),
+ session_closed_cb_(session_closed_cb) {
+ DCHECK(!session_message_cb_.is_null());
+ DCHECK(!session_closed_cb_.is_null());
+}
AesDecryptor::~AesDecryptor() {
key_map_.clear();
}
-bool AesDecryptor::CreateSession(uint32 session_id,
- const std::string& type,
+void AesDecryptor::CreateSession(const std::string& init_data_type,
const uint8* init_data,
- int init_data_length) {
- // Validate that this is a new session.
- DCHECK(valid_sessions_.find(session_id) == valid_sessions_.end());
- valid_sessions_.insert(session_id);
-
- std::string web_session_id_string(base::UintToString(next_web_session_id_++));
-
- // For now, the AesDecryptor does not care about |type|;
- // just fire the event with the |init_data| as the request.
+ int init_data_length,
+ SessionType session_type,
+ scoped_ptr<NewSessionCdmPromise> promise) {
+ std::string web_session_id(base::UintToString(next_web_session_id_++));
+ valid_sessions_.insert(web_session_id);
+
+ // For now, the AesDecryptor does not care about |init_data_type| or
+ // |session_type|; just resolve the promise and then fire a message event
+ // with the |init_data| as the request.
+ // TODO(jrummell): Validate |init_data_type| and |session_type|.
std::vector<uint8> message;
if (init_data && init_data_length)
message.assign(init_data, init_data + init_data_length);
- session_created_cb_.Run(session_id, web_session_id_string);
- session_message_cb_.Run(session_id, message, std::string());
- return true;
+ promise->resolve(web_session_id);
+
+ session_message_cb_.Run(web_session_id, message, GURL());
}
-void AesDecryptor::UpdateSession(uint32 session_id,
+void AesDecryptor::LoadSession(const std::string& web_session_id,
+ scoped_ptr<NewSessionCdmPromise> promise) {
+ // TODO(xhwang): Change this to NOTREACHED() when blink checks for key systems
+ // that do not support loadSession. See http://crbug.com/342481
+ promise->reject(NOT_SUPPORTED_ERROR, 0, "LoadSession() is not supported.");
+}
+
+void AesDecryptor::UpdateSession(const std::string& web_session_id,
const uint8* response,
- int response_length) {
+ int response_length,
+ scoped_ptr<SimpleCdmPromise> promise) {
CHECK(response);
CHECK_GT(response_length, 0);
- DCHECK(valid_sessions_.find(session_id) != valid_sessions_.end());
+
+ // TODO(jrummell): Convert back to a DCHECK once prefixed EME is removed.
+ if (valid_sessions_.find(web_session_id) == valid_sessions_.end()) {
+ promise->reject(INVALID_ACCESS_ERROR, 0, "Session does not exist.");
+ return;
+ }
std::string key_string(reinterpret_cast<const char*>(response),
response_length);
+
KeyIdAndKeyPairs keys;
if (!ExtractKeysFromJWKSet(key_string, &keys)) {
- session_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
+ promise->reject(
+ INVALID_ACCESS_ERROR, 0, "response is not a valid JSON Web Key Set.");
return;
}
// Make sure that at least one key was extracted.
if (keys.empty()) {
- session_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
+ promise->reject(
+ INVALID_ACCESS_ERROR, 0, "response does not contain any keys.");
return;
}
@@ -279,32 +293,44 @@ void AesDecryptor::UpdateSession(uint32 session_id,
if (it->second.length() !=
static_cast<size_t>(DecryptConfig::kDecryptionKeySize)) {
DVLOG(1) << "Invalid key length: " << key_string.length();
- session_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
+ promise->reject(INVALID_ACCESS_ERROR, 0, "Invalid key length.");
return;
}
- if (!AddDecryptionKey(session_id, it->first, it->second)) {
- session_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
+ if (!AddDecryptionKey(web_session_id, it->first, it->second)) {
+ promise->reject(INVALID_ACCESS_ERROR, 0, "Unable to add key.");
return;
}
}
- if (!new_audio_key_cb_.is_null())
- new_audio_key_cb_.Run();
+ {
+ base::AutoLock auto_lock(new_key_cb_lock_);
+
+ if (!new_audio_key_cb_.is_null())
+ new_audio_key_cb_.Run();
- if (!new_video_key_cb_.is_null())
- new_video_key_cb_.Run();
+ if (!new_video_key_cb_.is_null())
+ new_video_key_cb_.Run();
+ }
- session_ready_cb_.Run(session_id);
+ promise->resolve();
}
-void AesDecryptor::ReleaseSession(uint32 session_id) {
+void AesDecryptor::ReleaseSession(const std::string& web_session_id,
+ scoped_ptr<SimpleCdmPromise> promise) {
// Validate that this is a reference to an active session and then forget it.
- std::set<uint32>::iterator it = valid_sessions_.find(session_id);
- DCHECK(it != valid_sessions_.end());
+ std::set<std::string>::iterator it = valid_sessions_.find(web_session_id);
+ // TODO(jrummell): Convert back to a DCHECK once prefixed EME is removed.
+ if (it == valid_sessions_.end()) {
+ promise->reject(INVALID_ACCESS_ERROR, 0, "Session does not exist.");
+ return;
+ }
+
valid_sessions_.erase(it);
- DeleteKeysForSession(session_id);
- session_closed_cb_.Run(session_id);
+ // Close the session.
+ DeleteKeysForSession(web_session_id);
+ promise->resolve();
+ session_closed_cb_.Run(web_session_id);
}
Decryptor* AesDecryptor::GetDecryptor() {
@@ -313,6 +339,8 @@ Decryptor* AesDecryptor::GetDecryptor() {
void AesDecryptor::RegisterNewKeyCB(StreamType stream_type,
const NewKeyCB& new_key_cb) {
+ base::AutoLock auto_lock(new_key_cb_lock_);
+
switch (stream_type) {
case kAudio:
new_audio_key_cb_ = new_key_cb;
@@ -333,9 +361,8 @@ void AesDecryptor::Decrypt(StreamType stream_type,
scoped_refptr<DecoderBuffer> decrypted;
// An empty iv string signals that the frame is unencrypted.
if (encrypted->decrypt_config()->iv().empty()) {
- int data_offset = encrypted->decrypt_config()->data_offset();
- decrypted = DecoderBuffer::CopyFrom(encrypted->data() + data_offset,
- encrypted->data_size() - data_offset);
+ decrypted = DecoderBuffer::CopyFrom(encrypted->data(),
+ encrypted->data_size());
} else {
const std::string& key_id = encrypted->decrypt_config()->key_id();
DecryptionKey* key = GetKey(key_id);
@@ -395,15 +422,10 @@ void AesDecryptor::DeinitializeDecoder(StreamType stream_type) {
NOTREACHED() << "AesDecryptor does not support audio/video decoding";
}
-bool AesDecryptor::AddDecryptionKey(const uint32 session_id,
+bool AesDecryptor::AddDecryptionKey(const std::string& web_session_id,
const std::string& key_id,
const std::string& key_string) {
scoped_ptr<DecryptionKey> decryption_key(new DecryptionKey(key_string));
- if (!decryption_key) {
- DVLOG(1) << "Could not create key.";
- return false;
- }
-
if (!decryption_key->Init()) {
DVLOG(1) << "Could not initialize decryption key.";
return false;
@@ -412,14 +434,14 @@ bool AesDecryptor::AddDecryptionKey(const uint32 session_id,
base::AutoLock auto_lock(key_map_lock_);
KeyIdToSessionKeysMap::iterator key_id_entry = key_map_.find(key_id);
if (key_id_entry != key_map_.end()) {
- key_id_entry->second->Insert(session_id, decryption_key.Pass());
+ key_id_entry->second->Insert(web_session_id, decryption_key.Pass());
return true;
}
// |key_id| not found, so need to create new entry.
scoped_ptr<SessionIdDecryptionKeyMap> inner_map(
new SessionIdDecryptionKeyMap());
- inner_map->Insert(session_id, decryption_key.Pass());
+ inner_map->Insert(web_session_id, decryption_key.Pass());
key_map_.add(key_id, inner_map.Pass());
return true;
}
@@ -435,14 +457,15 @@ AesDecryptor::DecryptionKey* AesDecryptor::GetKey(
return key_id_found->second->LatestDecryptionKey();
}
-void AesDecryptor::DeleteKeysForSession(const uint32 session_id) {
+void AesDecryptor::DeleteKeysForSession(const std::string& web_session_id) {
base::AutoLock auto_lock(key_map_lock_);
- // Remove all keys associated with |session_id|. Since the data is optimized
- // for access in GetKey(), we need to look at each entry in |key_map_|.
+ // Remove all keys associated with |web_session_id|. Since the data is
+ // optimized for access in GetKey(), we need to look at each entry in
+ // |key_map_|.
KeyIdToSessionKeysMap::iterator it = key_map_.begin();
while (it != key_map_.end()) {
- it->second->Erase(session_id);
+ it->second->Erase(web_session_id);
if (it->second->Empty()) {
// Need to get rid of the entry for this key_id. This will mess up the
// iterator, so we need to increment it first.
diff --git a/chromium/media/cdm/aes_decryptor.h b/chromium/media/cdm/aes_decryptor.h
index a72674c102d..3a177701960 100644
--- a/chromium/media/cdm/aes_decryptor.h
+++ b/chromium/media/cdm/aes_decryptor.h
@@ -27,22 +27,24 @@ namespace media {
// encryption must be CTR with a key size of 128bits.
class MEDIA_EXPORT AesDecryptor : public MediaKeys, public Decryptor {
public:
- AesDecryptor(const SessionCreatedCB& session_created_cb,
- const SessionMessageCB& session_message_cb,
- const SessionReadyCB& session_ready_cb,
- const SessionClosedCB& session_closed_cb,
- const SessionErrorCB& session_error_cb);
+ AesDecryptor(const SessionMessageCB& session_message_cb,
+ const SessionClosedCB& session_closed_cb);
virtual ~AesDecryptor();
// MediaKeys implementation.
- virtual bool CreateSession(uint32 session_id,
- const std::string& type,
+ virtual void CreateSession(const std::string& init_data_type,
const uint8* init_data,
- int init_data_length) OVERRIDE;
- virtual void UpdateSession(uint32 session_id,
+ int init_data_length,
+ SessionType session_type,
+ scoped_ptr<NewSessionCdmPromise> promise) OVERRIDE;
+ virtual void LoadSession(const std::string& web_session_id,
+ scoped_ptr<NewSessionCdmPromise> promise) OVERRIDE;
+ virtual void UpdateSession(const std::string& web_session_id,
const uint8* response,
- int response_length) OVERRIDE;
- virtual void ReleaseSession(uint32 session_id) OVERRIDE;
+ int response_length,
+ scoped_ptr<SimpleCdmPromise> promise) OVERRIDE;
+ virtual void ReleaseSession(const std::string& web_session_id,
+ scoped_ptr<SimpleCdmPromise> promise) OVERRIDE;
virtual Decryptor* GetDecryptor() OVERRIDE;
// Decryptor implementation.
@@ -101,7 +103,7 @@ class MEDIA_EXPORT AesDecryptor : public MediaKeys, public Decryptor {
// Creates a DecryptionKey using |key_string| and associates it with |key_id|.
// Returns true if successful.
- bool AddDecryptionKey(const uint32 session_id,
+ bool AddDecryptionKey(const std::string& web_session_id,
const std::string& key_id,
const std::string& key_string);
@@ -109,15 +111,12 @@ class MEDIA_EXPORT AesDecryptor : public MediaKeys, public Decryptor {
// the key. Returns NULL if no key is associated with |key_id|.
DecryptionKey* GetKey(const std::string& key_id) const;
- // Deletes all keys associated with |session_id|.
- void DeleteKeysForSession(const uint32 session_id);
+ // Deletes all keys associated with |web_session_id|.
+ void DeleteKeysForSession(const std::string& web_session_id);
// Callbacks for firing session events.
- SessionCreatedCB session_created_cb_;
SessionMessageCB session_message_cb_;
- SessionReadyCB session_ready_cb_;
SessionClosedCB session_closed_cb_;
- SessionErrorCB session_error_cb_;
// Since only Decrypt() is called off the renderer thread, we only need to
// protect |key_map_|, the only member variable that is shared between
@@ -125,8 +124,8 @@ class MEDIA_EXPORT AesDecryptor : public MediaKeys, public Decryptor {
KeyIdToSessionKeysMap key_map_; // Protected by |key_map_lock_|.
mutable base::Lock key_map_lock_; // Protects the |key_map_|.
- // Keeps track of current valid session IDs.
- std::set<uint32> valid_sessions_;
+ // Keeps track of current valid sessions.
+ std::set<std::string> valid_sessions_;
// Make web session ID unique per renderer by making it static. Web session
// IDs seen by the app will be "1", "2", etc.
@@ -135,6 +134,10 @@ class MEDIA_EXPORT AesDecryptor : public MediaKeys, public Decryptor {
NewKeyCB new_audio_key_cb_;
NewKeyCB new_video_key_cb_;
+ // Protect |new_audio_key_cb_| and |new_video_key_cb_| as they are set on the
+ // main thread but called on the media thread.
+ mutable base::Lock new_key_cb_lock_;
+
DISALLOW_COPY_AND_ASSIGN(AesDecryptor);
};
diff --git a/chromium/media/cdm/aes_decryptor_unittest.cc b/chromium/media/cdm/aes_decryptor_unittest.cc
index 3076d5a3f2e..d2d7ee0c8e5 100644
--- a/chromium/media/cdm/aes_decryptor_unittest.cc
+++ b/chromium/media/cdm/aes_decryptor_unittest.cc
@@ -7,11 +7,11 @@
#include "base/basictypes.h"
#include "base/bind.h"
+#include "media/base/cdm_promise.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decrypt_config.h"
#include "media/base/mock_filters.h"
#include "media/cdm/aes_decryptor.h"
-#include "media/webm/webm_constants.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -23,6 +23,9 @@ using ::testing::SaveArg;
using ::testing::StrNe;
MATCHER(IsEmpty, "") { return arg.empty(); }
+MATCHER(IsNotEmpty, "") { return !arg.empty(); }
+
+class GURL;
namespace media {
@@ -177,12 +180,10 @@ static scoped_refptr<DecoderBuffer> CreateEncryptedBuffer(
const std::vector<uint8>& data,
const std::vector<uint8>& key_id,
const std::vector<uint8>& iv,
- int offset,
const std::vector<SubsampleEntry>& subsample_entries) {
DCHECK(!data.empty());
- int padded_size = offset + data.size();
- scoped_refptr<DecoderBuffer> encrypted_buffer(new DecoderBuffer(padded_size));
- memcpy(encrypted_buffer->writable_data() + offset, &data[0], data.size());
+ scoped_refptr<DecoderBuffer> encrypted_buffer(new DecoderBuffer(data.size()));
+ memcpy(encrypted_buffer->writable_data(), &data[0], data.size());
CHECK(encrypted_buffer.get());
std::string key_id_string(
reinterpret_cast<const char*>(key_id.empty() ? NULL : &key_id[0]),
@@ -190,22 +191,18 @@ static scoped_refptr<DecoderBuffer> CreateEncryptedBuffer(
std::string iv_string(
reinterpret_cast<const char*>(iv.empty() ? NULL : &iv[0]), iv.size());
encrypted_buffer->set_decrypt_config(scoped_ptr<DecryptConfig>(
- new DecryptConfig(key_id_string, iv_string, offset, subsample_entries)));
+ new DecryptConfig(key_id_string, iv_string, subsample_entries)));
return encrypted_buffer;
}
+enum PromiseResult { RESOLVED, REJECTED };
+
class AesDecryptorTest : public testing::Test {
public:
AesDecryptorTest()
- : decryptor_(base::Bind(&AesDecryptorTest::OnSessionCreated,
- base::Unretained(this)),
- base::Bind(&AesDecryptorTest::OnSessionMessage,
- base::Unretained(this)),
- base::Bind(&AesDecryptorTest::OnSessionReady,
+ : decryptor_(base::Bind(&AesDecryptorTest::OnSessionMessage,
base::Unretained(this)),
base::Bind(&AesDecryptorTest::OnSessionClosed,
- base::Unretained(this)),
- base::Bind(&AesDecryptorTest::OnSessionError,
base::Unretained(this))),
decrypt_cb_(base::Bind(&AesDecryptorTest::BufferDecrypted,
base::Unretained(this))),
@@ -219,52 +216,80 @@ class AesDecryptorTest : public testing::Test {
iv_(kIv, kIv + arraysize(kIv)),
normal_subsample_entries_(
kSubsampleEntriesNormal,
- kSubsampleEntriesNormal + arraysize(kSubsampleEntriesNormal)),
- next_session_id_(1) {
+ kSubsampleEntriesNormal + arraysize(kSubsampleEntriesNormal)) {
}
protected:
+ void OnResolveWithSession(PromiseResult expected,
+ const std::string& web_session_id) {
+ EXPECT_EQ(expected, RESOLVED);
+ EXPECT_GT(web_session_id.length(), 0ul);
+ web_session_id_ = web_session_id;
+ }
+
+ void OnResolve(PromiseResult expected) {
+ EXPECT_EQ(expected, RESOLVED);
+ }
+
+ void OnReject(PromiseResult expected,
+ MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_message) {
+ EXPECT_EQ(expected, REJECTED);
+ }
+
+ scoped_ptr<SimpleCdmPromise> CreatePromise(PromiseResult expected) {
+ scoped_ptr<SimpleCdmPromise> promise(new SimpleCdmPromise(
+ base::Bind(
+ &AesDecryptorTest::OnResolve, base::Unretained(this), expected),
+ base::Bind(
+ &AesDecryptorTest::OnReject, base::Unretained(this), expected)));
+ return promise.Pass();
+ }
+
+ scoped_ptr<NewSessionCdmPromise> CreateSessionPromise(
+ PromiseResult expected) {
+ scoped_ptr<NewSessionCdmPromise> promise(new NewSessionCdmPromise(
+ base::Bind(&AesDecryptorTest::OnResolveWithSession,
+ base::Unretained(this),
+ expected),
+ base::Bind(
+ &AesDecryptorTest::OnReject, base::Unretained(this), expected)));
+ return promise.Pass();
+ }
+
// Creates a new session using |key_id|. Returns the session ID.
- uint32 CreateSession(const std::vector<uint8>& key_id) {
+ std::string CreateSession(const std::vector<uint8>& key_id) {
DCHECK(!key_id.empty());
- uint32 session_id = next_session_id_++;
- EXPECT_CALL(*this, OnSessionCreated(session_id, StrNe(std::string())));
- EXPECT_CALL(*this, OnSessionMessage(session_id, key_id, ""));
- EXPECT_TRUE(decryptor_.CreateSession(
- session_id, std::string(), &key_id[0], key_id.size()));
- return session_id;
+ EXPECT_CALL(*this,
+ OnSessionMessage(IsNotEmpty(), key_id, GURL::EmptyGURL()));
+ decryptor_.CreateSession(std::string(),
+ &key_id[0],
+ key_id.size(),
+ MediaKeys::TEMPORARY_SESSION,
+ CreateSessionPromise(RESOLVED));
+ // This expects the promise to be called synchronously, which is the case
+ // for AesDecryptor.
+ return web_session_id_;
}
// Releases the session specified by |session_id|.
- void ReleaseSession(uint32 session_id) {
+ void ReleaseSession(const std::string& session_id) {
EXPECT_CALL(*this, OnSessionClosed(session_id));
- decryptor_.ReleaseSession(session_id);
+ decryptor_.ReleaseSession(session_id, CreatePromise(RESOLVED));
}
- enum UpdateSessionExpectation {
- SESSION_READY,
- SESSION_ERROR
- };
-
// Updates the session specified by |session_id| with |key|. |result|
// tests that the update succeeds or generates an error.
- void UpdateSessionAndExpect(uint32 session_id,
+ void UpdateSessionAndExpect(std::string session_id,
const std::string& key,
- UpdateSessionExpectation result) {
+ PromiseResult result) {
DCHECK(!key.empty());
- switch (result) {
- case SESSION_READY:
- EXPECT_CALL(*this, OnSessionReady(session_id));
- break;
- case SESSION_ERROR:
- EXPECT_CALL(*this,
- OnSessionError(session_id, MediaKeys::kUnknownError, 0));
- break;
- }
-
- decryptor_.UpdateSession(
- session_id, reinterpret_cast<const uint8*>(key.c_str()), key.length());
+ decryptor_.UpdateSession(session_id,
+ reinterpret_cast<const uint8*>(key.c_str()),
+ key.length(),
+ CreatePromise(result));
}
MOCK_METHOD2(BufferDecrypted, void(Decryptor::Status,
@@ -326,19 +351,15 @@ class AesDecryptorTest : public testing::Test {
}
}
- MOCK_METHOD2(OnSessionCreated,
- void(uint32 session_id, const std::string& web_session_id));
MOCK_METHOD3(OnSessionMessage,
- void(uint32 session_id,
+ void(const std::string& web_session_id,
const std::vector<uint8>& message,
- const std::string& default_url));
- MOCK_METHOD1(OnSessionReady, void(uint32 session_id));
- MOCK_METHOD1(OnSessionClosed, void(uint32 session_id));
- MOCK_METHOD3(OnSessionError,
- void(uint32 session_id, MediaKeys::KeyError, int system_code));
+ const GURL& destination_url));
+ MOCK_METHOD1(OnSessionClosed, void(const std::string& web_session_id));
AesDecryptor decryptor_;
AesDecryptor::DecryptCB decrypt_cb_;
+ std::string web_session_id_;
// Constants for testing.
const std::vector<uint8> original_data_;
@@ -348,101 +369,102 @@ class AesDecryptorTest : public testing::Test {
const std::vector<uint8> iv_;
const std::vector<SubsampleEntry> normal_subsample_entries_;
const std::vector<SubsampleEntry> no_subsample_entries_;
-
- // Generate new session ID every time
- uint32 next_session_id_;
};
TEST_F(AesDecryptorTest, CreateSessionWithNullInitData) {
- uint32 session_id = 8;
- EXPECT_CALL(*this, OnSessionMessage(session_id, IsEmpty(), ""));
- EXPECT_CALL(*this, OnSessionCreated(session_id, StrNe(std::string())));
- EXPECT_TRUE(decryptor_.CreateSession(session_id, std::string(), NULL, 0));
+ EXPECT_CALL(*this,
+ OnSessionMessage(IsNotEmpty(), IsEmpty(), GURL::EmptyGURL()));
+ decryptor_.CreateSession(std::string(),
+ NULL,
+ 0,
+ MediaKeys::TEMPORARY_SESSION,
+ CreateSessionPromise(RESOLVED));
}
TEST_F(AesDecryptorTest, MultipleCreateSession) {
- uint32 session_id1 = 10;
- EXPECT_CALL(*this, OnSessionMessage(session_id1, IsEmpty(), ""));
- EXPECT_CALL(*this, OnSessionCreated(session_id1, StrNe(std::string())));
- EXPECT_TRUE(decryptor_.CreateSession(session_id1, std::string(), NULL, 0));
-
- uint32 session_id2 = 11;
- EXPECT_CALL(*this, OnSessionMessage(session_id2, IsEmpty(), ""));
- EXPECT_CALL(*this, OnSessionCreated(session_id2, StrNe(std::string())));
- EXPECT_TRUE(decryptor_.CreateSession(session_id2, std::string(), NULL, 0));
-
- uint32 session_id3 = 23;
- EXPECT_CALL(*this, OnSessionMessage(session_id3, IsEmpty(), ""));
- EXPECT_CALL(*this, OnSessionCreated(session_id3, StrNe(std::string())));
- EXPECT_TRUE(decryptor_.CreateSession(session_id3, std::string(), NULL, 0));
+ EXPECT_CALL(*this,
+ OnSessionMessage(IsNotEmpty(), IsEmpty(), GURL::EmptyGURL()));
+ decryptor_.CreateSession(std::string(),
+ NULL,
+ 0,
+ MediaKeys::TEMPORARY_SESSION,
+ CreateSessionPromise(RESOLVED));
+
+ EXPECT_CALL(*this,
+ OnSessionMessage(IsNotEmpty(), IsEmpty(), GURL::EmptyGURL()));
+ decryptor_.CreateSession(std::string(),
+ NULL,
+ 0,
+ MediaKeys::TEMPORARY_SESSION,
+ CreateSessionPromise(RESOLVED));
+
+ EXPECT_CALL(*this,
+ OnSessionMessage(IsNotEmpty(), IsEmpty(), GURL::EmptyGURL()));
+ decryptor_.CreateSession(std::string(),
+ NULL,
+ 0,
+ MediaKeys::TEMPORARY_SESSION,
+ CreateSessionPromise(RESOLVED));
}
TEST_F(AesDecryptorTest, NormalDecryption) {
- uint32 session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
+ std::string session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
- encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
- DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
-}
-
-TEST_F(AesDecryptorTest, DecryptionWithOffset) {
- uint32 session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
- scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
- encrypted_data_, key_id_, iv_, 23, no_subsample_entries_);
+ encrypted_data_, key_id_, iv_, no_subsample_entries_);
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
}
TEST_F(AesDecryptorTest, UnencryptedFrame) {
// An empty iv string signals that the frame is unencrypted.
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
- original_data_, key_id_, std::vector<uint8>(), 0, no_subsample_entries_);
+ original_data_, key_id_, std::vector<uint8>(), no_subsample_entries_);
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
}
TEST_F(AesDecryptorTest, WrongKey) {
- uint32 session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kWrongKeyAsJWK, SESSION_READY);
+ std::string session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kWrongKeyAsJWK, RESOLVED);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
- encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
+ encrypted_data_, key_id_, iv_, no_subsample_entries_);
DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH);
}
TEST_F(AesDecryptorTest, NoKey) {
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
- encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
+ encrypted_data_, key_id_, iv_, no_subsample_entries_);
EXPECT_CALL(*this, BufferDecrypted(AesDecryptor::kNoKey, IsNull()));
decryptor_.Decrypt(Decryptor::kVideo, encrypted_buffer, decrypt_cb_);
}
TEST_F(AesDecryptorTest, KeyReplacement) {
- uint32 session_id = CreateSession(key_id_);
+ std::string session_id = CreateSession(key_id_);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
- encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
+ encrypted_data_, key_id_, iv_, no_subsample_entries_);
- UpdateSessionAndExpect(session_id, kWrongKeyAsJWK, SESSION_READY);
+ UpdateSessionAndExpect(session_id, kWrongKeyAsJWK, RESOLVED);
ASSERT_NO_FATAL_FAILURE(DecryptAndExpect(
encrypted_buffer, original_data_, DATA_MISMATCH));
- UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
ASSERT_NO_FATAL_FAILURE(
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
}
TEST_F(AesDecryptorTest, WrongSizedKey) {
- uint32 session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kWrongSizedKeyAsJWK, SESSION_ERROR);
+ std::string session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kWrongSizedKeyAsJWK, REJECTED);
}
TEST_F(AesDecryptorTest, MultipleKeysAndFrames) {
- uint32 session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
+ std::string session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
- encrypted_data_, key_id_, iv_, 10, no_subsample_entries_);
+ encrypted_data_, key_id_, iv_, no_subsample_entries_);
ASSERT_NO_FATAL_FAILURE(
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
- UpdateSessionAndExpect(session_id, kKey2AsJWK, SESSION_READY);
+ UpdateSessionAndExpect(session_id, kKey2AsJWK, RESOLVED);
// The first key is still available after we added a second key.
ASSERT_NO_FATAL_FAILURE(
@@ -454,7 +476,6 @@ TEST_F(AesDecryptorTest, MultipleKeysAndFrames) {
kEncryptedData2 + arraysize(kEncryptedData2)),
std::vector<uint8>(kKeyId2, kKeyId2 + arraysize(kKeyId2)),
std::vector<uint8>(kIv2, kIv2 + arraysize(kIv2)),
- 30,
no_subsample_entries_);
ASSERT_NO_FATAL_FAILURE(DecryptAndExpect(
encrypted_buffer,
@@ -464,43 +485,43 @@ TEST_F(AesDecryptorTest, MultipleKeysAndFrames) {
}
TEST_F(AesDecryptorTest, CorruptedIv) {
- uint32 session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
+ std::string session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
std::vector<uint8> bad_iv = iv_;
bad_iv[1]++;
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
- encrypted_data_, key_id_, bad_iv, 0, no_subsample_entries_);
+ encrypted_data_, key_id_, bad_iv, no_subsample_entries_);
DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH);
}
TEST_F(AesDecryptorTest, CorruptedData) {
- uint32 session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
+ std::string session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
std::vector<uint8> bad_data = encrypted_data_;
bad_data[1]++;
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
- bad_data, key_id_, iv_, 0, no_subsample_entries_);
+ bad_data, key_id_, iv_, no_subsample_entries_);
DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH);
}
TEST_F(AesDecryptorTest, EncryptedAsUnencryptedFailure) {
- uint32 session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
+ std::string session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
- encrypted_data_, key_id_, std::vector<uint8>(), 0, no_subsample_entries_);
+ encrypted_data_, key_id_, std::vector<uint8>(), no_subsample_entries_);
DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH);
}
TEST_F(AesDecryptorTest, SubsampleDecryption) {
- uint32 session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
+ std::string session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
- subsample_encrypted_data_, key_id_, iv_, 0, normal_subsample_entries_);
+ subsample_encrypted_data_, key_id_, iv_, normal_subsample_entries_);
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
}
@@ -508,29 +529,29 @@ TEST_F(AesDecryptorTest, SubsampleDecryption) {
// expect to encounter this in the wild, but since the DecryptConfig doesn't
// disallow such a configuration, it should be covered.
TEST_F(AesDecryptorTest, SubsampleDecryptionWithOffset) {
- uint32 session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
+ std::string session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
- subsample_encrypted_data_, key_id_, iv_, 23, normal_subsample_entries_);
+ subsample_encrypted_data_, key_id_, iv_, normal_subsample_entries_);
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
}
TEST_F(AesDecryptorTest, SubsampleWrongSize) {
- uint32 session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
+ std::string session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
std::vector<SubsampleEntry> subsample_entries_wrong_size(
kSubsampleEntriesWrongSize,
kSubsampleEntriesWrongSize + arraysize(kSubsampleEntriesWrongSize));
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
- subsample_encrypted_data_, key_id_, iv_, 0, subsample_entries_wrong_size);
+ subsample_encrypted_data_, key_id_, iv_, subsample_entries_wrong_size);
DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH);
}
TEST_F(AesDecryptorTest, SubsampleInvalidTotalSize) {
- uint32 session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
+ std::string session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
std::vector<SubsampleEntry> subsample_entries_invalid_total_size(
kSubsampleEntriesInvalidTotalSize,
@@ -538,45 +559,45 @@ TEST_F(AesDecryptorTest, SubsampleInvalidTotalSize) {
arraysize(kSubsampleEntriesInvalidTotalSize));
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
- subsample_encrypted_data_, key_id_, iv_, 0,
+ subsample_encrypted_data_, key_id_, iv_,
subsample_entries_invalid_total_size);
DecryptAndExpect(encrypted_buffer, original_data_, DECRYPT_ERROR);
}
// No cypher bytes in any of the subsamples.
TEST_F(AesDecryptorTest, SubsampleClearBytesOnly) {
- uint32 session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
+ std::string session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
std::vector<SubsampleEntry> clear_only_subsample_entries(
kSubsampleEntriesClearOnly,
kSubsampleEntriesClearOnly + arraysize(kSubsampleEntriesClearOnly));
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
- original_data_, key_id_, iv_, 0, clear_only_subsample_entries);
+ original_data_, key_id_, iv_, clear_only_subsample_entries);
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
}
// No clear bytes in any of the subsamples.
TEST_F(AesDecryptorTest, SubsampleCypherBytesOnly) {
- uint32 session_id = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
+ std::string session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
std::vector<SubsampleEntry> cypher_only_subsample_entries(
kSubsampleEntriesCypherOnly,
kSubsampleEntriesCypherOnly + arraysize(kSubsampleEntriesCypherOnly));
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
- encrypted_data_, key_id_, iv_, 0, cypher_only_subsample_entries);
+ encrypted_data_, key_id_, iv_, cypher_only_subsample_entries);
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
}
TEST_F(AesDecryptorTest, ReleaseSession) {
- uint32 session_id = CreateSession(key_id_);
+ std::string session_id = CreateSession(key_id_);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
- encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
+ encrypted_data_, key_id_, iv_, no_subsample_entries_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
ASSERT_NO_FATAL_FAILURE(
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
@@ -584,11 +605,11 @@ TEST_F(AesDecryptorTest, ReleaseSession) {
}
TEST_F(AesDecryptorTest, NoKeyAfterReleaseSession) {
- uint32 session_id = CreateSession(key_id_);
+ std::string session_id = CreateSession(key_id_);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
- encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
+ encrypted_data_, key_id_, iv_, no_subsample_entries_);
- UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, RESOLVED);
ASSERT_NO_FATAL_FAILURE(
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
@@ -598,18 +619,18 @@ TEST_F(AesDecryptorTest, NoKeyAfterReleaseSession) {
}
TEST_F(AesDecryptorTest, LatestKeyUsed) {
- uint32 session_id1 = CreateSession(key_id_);
+ std::string session_id1 = CreateSession(key_id_);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
- encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
+ encrypted_data_, key_id_, iv_, no_subsample_entries_);
// Add alternate key, buffer should not be decoded properly.
- UpdateSessionAndExpect(session_id1, kKeyAlternateAsJWK, SESSION_READY);
+ UpdateSessionAndExpect(session_id1, kKeyAlternateAsJWK, RESOLVED);
ASSERT_NO_FATAL_FAILURE(
DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH));
// Create a second session with a correct key value for key_id_.
- uint32 session_id2 = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id2, kKeyAsJWK, SESSION_READY);
+ std::string session_id2 = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id2, kKeyAsJWK, RESOLVED);
// Should be able to decode with latest key.
ASSERT_NO_FATAL_FAILURE(
@@ -617,16 +638,16 @@ TEST_F(AesDecryptorTest, LatestKeyUsed) {
}
TEST_F(AesDecryptorTest, LatestKeyUsedAfterReleaseSession) {
- uint32 session_id1 = CreateSession(key_id_);
+ std::string session_id1 = CreateSession(key_id_);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
- encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
- UpdateSessionAndExpect(session_id1, kKeyAsJWK, SESSION_READY);
+ encrypted_data_, key_id_, iv_, no_subsample_entries_);
+ UpdateSessionAndExpect(session_id1, kKeyAsJWK, RESOLVED);
ASSERT_NO_FATAL_FAILURE(
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
// Create a second session with a different key value for key_id_.
- uint32 session_id2 = CreateSession(key_id_);
- UpdateSessionAndExpect(session_id2, kKeyAlternateAsJWK, SESSION_READY);
+ std::string session_id2 = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id2, kKeyAlternateAsJWK, RESOLVED);
// Should not be able to decode with new key.
ASSERT_NO_FATAL_FAILURE(
@@ -639,7 +660,7 @@ TEST_F(AesDecryptorTest, LatestKeyUsedAfterReleaseSession) {
}
TEST_F(AesDecryptorTest, JWKKey) {
- uint32 session_id = CreateSession(key_id_);
+ std::string session_id = CreateSession(key_id_);
// Try a simple JWK key (i.e. not in a set)
const std::string kJwkSimple =
@@ -648,7 +669,7 @@ TEST_F(AesDecryptorTest, JWKKey) {
" \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM\","
" \"k\": \"FBUWFxgZGhscHR4fICEiIw\""
"}";
- UpdateSessionAndExpect(session_id, kJwkSimple, SESSION_ERROR);
+ UpdateSessionAndExpect(session_id, kJwkSimple, REJECTED);
// Try a key list with multiple entries.
const std::string kJwksMultipleEntries =
@@ -666,40 +687,38 @@ TEST_F(AesDecryptorTest, JWKKey) {
" }"
" ]"
"}";
- UpdateSessionAndExpect(session_id, kJwksMultipleEntries, SESSION_READY);
+ UpdateSessionAndExpect(session_id, kJwksMultipleEntries, RESOLVED);
// Try a key with no spaces and some \n plus additional fields.
const std::string kJwksNoSpaces =
"\n\n{\"something\":1,\"keys\":[{\n\n\"kty\":\"oct\",\"alg\":\"A128KW\","
"\"kid\":\"AAECAwQFBgcICQoLDA0ODxAREhM\",\"k\":\"GawgguFyGrWKav7AX4VKUg"
"\",\"foo\":\"bar\"}]}\n\n";
- UpdateSessionAndExpect(session_id, kJwksNoSpaces, SESSION_READY);
+ UpdateSessionAndExpect(session_id, kJwksNoSpaces, RESOLVED);
// Try some non-ASCII characters.
- UpdateSessionAndExpect(session_id,
- "This is not ASCII due to \xff\xfe\xfd in it.",
- SESSION_ERROR);
+ UpdateSessionAndExpect(
+ session_id, "This is not ASCII due to \xff\xfe\xfd in it.", REJECTED);
// Try a badly formatted key. Assume that the JSON parser is fully tested,
// so we won't try a lot of combinations. However, need a test to ensure
// that the code doesn't crash if invalid JSON received.
- UpdateSessionAndExpect(session_id, "This is not a JSON key.", SESSION_ERROR);
+ UpdateSessionAndExpect(session_id, "This is not a JSON key.", REJECTED);
// Try passing some valid JSON that is not a dictionary at the top level.
- UpdateSessionAndExpect(session_id, "40", SESSION_ERROR);
+ UpdateSessionAndExpect(session_id, "40", REJECTED);
// Try an empty dictionary.
- UpdateSessionAndExpect(session_id, "{ }", SESSION_ERROR);
+ UpdateSessionAndExpect(session_id, "{ }", REJECTED);
// Try an empty 'keys' dictionary.
- UpdateSessionAndExpect(session_id, "{ \"keys\": [] }", SESSION_ERROR);
+ UpdateSessionAndExpect(session_id, "{ \"keys\": [] }", REJECTED);
// Try with 'keys' not a dictionary.
- UpdateSessionAndExpect(session_id, "{ \"keys\":\"1\" }", SESSION_ERROR);
+ UpdateSessionAndExpect(session_id, "{ \"keys\":\"1\" }", REJECTED);
// Try with 'keys' a list of integers.
- UpdateSessionAndExpect(
- session_id, "{ \"keys\": [ 1, 2, 3 ] }", SESSION_ERROR);
+ UpdateSessionAndExpect(session_id, "{ \"keys\": [ 1, 2, 3 ] }", REJECTED);
// Try padding(=) at end of 'k' base64 string.
const std::string kJwksWithPaddedKey =
@@ -712,7 +731,7 @@ TEST_F(AesDecryptorTest, JWKKey) {
" }"
" ]"
"}";
- UpdateSessionAndExpect(session_id, kJwksWithPaddedKey, SESSION_ERROR);
+ UpdateSessionAndExpect(session_id, kJwksWithPaddedKey, REJECTED);
// Try padding(=) at end of 'kid' base64 string.
const std::string kJwksWithPaddedKeyId =
@@ -725,7 +744,7 @@ TEST_F(AesDecryptorTest, JWKKey) {
" }"
" ]"
"}";
- UpdateSessionAndExpect(session_id, kJwksWithPaddedKeyId, SESSION_ERROR);
+ UpdateSessionAndExpect(session_id, kJwksWithPaddedKeyId, REJECTED);
// Try a key with invalid base64 encoding.
const std::string kJwksWithInvalidBase64 =
@@ -738,7 +757,7 @@ TEST_F(AesDecryptorTest, JWKKey) {
" }"
" ]"
"}";
- UpdateSessionAndExpect(session_id, kJwksWithInvalidBase64, SESSION_ERROR);
+ UpdateSessionAndExpect(session_id, kJwksWithInvalidBase64, REJECTED);
// Try a 3-byte 'kid' where no base64 padding is required.
// |kJwksMultipleEntries| above has 2 'kid's that require 1 and 2 padding
@@ -753,7 +772,7 @@ TEST_F(AesDecryptorTest, JWKKey) {
" }"
" ]"
"}";
- UpdateSessionAndExpect(session_id, kJwksWithNoPadding, SESSION_READY);
+ UpdateSessionAndExpect(session_id, kJwksWithNoPadding, RESOLVED);
// Empty key id.
const std::string kJwksWithEmptyKeyId =
@@ -766,7 +785,7 @@ TEST_F(AesDecryptorTest, JWKKey) {
" }"
" ]"
"}";
- UpdateSessionAndExpect(session_id, kJwksWithEmptyKeyId, SESSION_ERROR);
+ UpdateSessionAndExpect(session_id, kJwksWithEmptyKeyId, REJECTED);
ReleaseSession(session_id);
}
diff --git a/chromium/media/cdm/json_web_key.cc b/chromium/media/cdm/json_web_key.cc
index 522f1c9b367..4b9d8221f76 100644
--- a/chromium/media/cdm/json_web_key.cc
+++ b/chromium/media/cdm/json_web_key.cc
@@ -81,7 +81,7 @@ std::string GenerateJWKSet(const uint8* key, int key_length,
// Processes a JSON Web Key to extract the key id and key value. Sets |jwk_key|
// to the id/value pair and returns true on success.
-static bool ConvertJwkToKeyPair(const DictionaryValue& jwk,
+static bool ConvertJwkToKeyPair(const base::DictionaryValue& jwk,
KeyIdAndKeyPair* jwk_key) {
// Have found a JWK, start by checking that it is a symmetric key.
std::string type;
@@ -121,16 +121,17 @@ static bool ConvertJwkToKeyPair(const DictionaryValue& jwk,
}
bool ExtractKeysFromJWKSet(const std::string& jwk_set, KeyIdAndKeyPairs* keys) {
- if (!IsStringASCII(jwk_set))
+ if (!base::IsStringASCII(jwk_set))
return false;
- scoped_ptr<Value> root(base::JSONReader().ReadToValue(jwk_set));
- if (!root.get() || root->GetType() != Value::TYPE_DICTIONARY)
+ scoped_ptr<base::Value> root(base::JSONReader().ReadToValue(jwk_set));
+ if (!root.get() || root->GetType() != base::Value::TYPE_DICTIONARY)
return false;
// Locate the set from the dictionary.
- DictionaryValue* dictionary = static_cast<DictionaryValue*>(root.get());
- ListValue* list_val = NULL;
+ base::DictionaryValue* dictionary =
+ static_cast<base::DictionaryValue*>(root.get());
+ base::ListValue* list_val = NULL;
if (!dictionary->GetList(kKeysTag, &list_val)) {
DVLOG(1) << "Missing '" << kKeysTag
<< "' parameter or not a list in JWK Set";
@@ -141,7 +142,7 @@ bool ExtractKeysFromJWKSet(const std::string& jwk_set, KeyIdAndKeyPairs* keys) {
// success.
KeyIdAndKeyPairs local_keys;
for (size_t i = 0; i < list_val->GetSize(); ++i) {
- DictionaryValue* jwk = NULL;
+ base::DictionaryValue* jwk = NULL;
if (!list_val->GetDictionary(i, &jwk)) {
DVLOG(1) << "Unable to access '" << kKeysTag << "'[" << i
<< "] in JWK Set";
diff --git a/chromium/media/cdm/key_system_names.cc b/chromium/media/cdm/key_system_names.cc
index b9eceb2f4db..32b74754d69 100644
--- a/chromium/media/cdm/key_system_names.cc
+++ b/chromium/media/cdm/key_system_names.cc
@@ -8,8 +8,7 @@
namespace media {
-const char kPrefixedClearKey[] = "webkit-org.w3.clearkey";
-const char kUnprefixedClearKey[] = "org.w3.clearkey";
+const char kClearKey[] = "org.w3.clearkey";
const char kExternalClearKey[] = "org.chromium.externalclearkey";
static bool IsParentKeySystemOf(const std::string& parent_key_system,
@@ -18,7 +17,6 @@ static bool IsParentKeySystemOf(const std::string& parent_key_system,
return key_system.substr(0, prefix.size()) == prefix;
}
-
bool IsExternalClearKey(const std::string& key_system) {
return key_system == kExternalClearKey ||
IsParentKeySystemOf(kExternalClearKey, key_system);
diff --git a/chromium/media/cdm/key_system_names.h b/chromium/media/cdm/key_system_names.h
index 1b2686911c4..c1818490bb0 100644
--- a/chromium/media/cdm/key_system_names.h
+++ b/chromium/media/cdm/key_system_names.h
@@ -13,16 +13,15 @@ namespace media {
// TODO(jrummell): Change other uses of Clear Key to use this common value.
-// The key system names for Clear Key.
-MEDIA_EXPORT extern const char kPrefixedClearKey[];
-MEDIA_EXPORT extern const char kUnprefixedClearKey[];
+// The key system name for Clear Key.
+MEDIA_EXPORT extern const char kClearKey[];
// The key system name for External Clear Key.
MEDIA_EXPORT extern const char kExternalClearKey[];
// Returns true if |key_system| is Clear Key, false otherwise.
MEDIA_EXPORT inline bool IsClearKey(const std::string& key_system) {
- return key_system == kPrefixedClearKey || key_system == kUnprefixedClearKey;
+ return key_system == kClearKey;
}
// Returns true if |key_system| is External Clear Key, false otherwise.
diff --git a/chromium/media/cdm/player_tracker_impl.cc b/chromium/media/cdm/player_tracker_impl.cc
new file mode 100644
index 00000000000..8f102bd724e
--- /dev/null
+++ b/chromium/media/cdm/player_tracker_impl.cc
@@ -0,0 +1,57 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cdm/player_tracker_impl.h"
+
+#include <utility>
+
+#include "base/stl_util.h"
+
+namespace media {
+
+PlayerTrackerImpl::PlayerCallbacks::PlayerCallbacks(
+ base::Closure new_key_cb,
+ base::Closure cdm_unset_cb)
+ : new_key_cb(new_key_cb), cdm_unset_cb(cdm_unset_cb) {
+}
+
+PlayerTrackerImpl::PlayerCallbacks::~PlayerCallbacks() {
+}
+
+PlayerTrackerImpl::PlayerTrackerImpl() : next_registration_id_(1) {}
+
+PlayerTrackerImpl::~PlayerTrackerImpl() {}
+
+int PlayerTrackerImpl::RegisterPlayer(const base::Closure& new_key_cb,
+ const base::Closure& cdm_unset_cb) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ int registration_id = next_registration_id_++;
+ DCHECK(!ContainsKey(player_callbacks_map_, registration_id));
+ player_callbacks_map_.insert(std::make_pair(
+ registration_id, PlayerCallbacks(new_key_cb, cdm_unset_cb)));
+ return registration_id;
+}
+
+void PlayerTrackerImpl::UnregisterPlayer(int registration_id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(ContainsKey(player_callbacks_map_, registration_id))
+ << registration_id;
+ player_callbacks_map_.erase(registration_id);
+}
+
+void PlayerTrackerImpl::NotifyNewKey() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ std::map<int, PlayerCallbacks>::iterator iter = player_callbacks_map_.begin();
+ for (; iter != player_callbacks_map_.end(); ++iter)
+ iter->second.new_key_cb.Run();
+}
+
+void PlayerTrackerImpl::NotifyCdmUnset() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ std::map<int, PlayerCallbacks>::iterator iter = player_callbacks_map_.begin();
+ for (; iter != player_callbacks_map_.end(); ++iter)
+ iter->second.cdm_unset_cb.Run();
+}
+
+} // namespace media
diff --git a/chromium/media/cdm/player_tracker_impl.h b/chromium/media/cdm/player_tracker_impl.h
new file mode 100644
index 00000000000..bcbeaf8c3c2
--- /dev/null
+++ b/chromium/media/cdm/player_tracker_impl.h
@@ -0,0 +1,54 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CDM_PLAYER_TRACKER_IMPL_H_
+#define MEDIA_CDM_PLAYER_TRACKER_IMPL_H_
+
+#include <map>
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/threading/thread_checker.h"
+#include "media/base/media_export.h"
+#include "media/base/player_tracker.h"
+
+namespace media {
+
+// A common implementation that can be shared by different PlayerTracker
+// implementations. This class is not thread safe and should only be called
+// on one thread.
+class MEDIA_EXPORT PlayerTrackerImpl : public PlayerTracker {
+ public:
+ PlayerTrackerImpl();
+ virtual ~PlayerTrackerImpl();
+
+ // PlayerTracker implementation.
+ virtual int RegisterPlayer(const base::Closure& new_key_cb,
+ const base::Closure& cdm_unset_cb) OVERRIDE;
+ virtual void UnregisterPlayer(int registration_id) OVERRIDE;
+
+ // Helpers methods to fire registered callbacks.
+ void NotifyNewKey();
+ void NotifyCdmUnset();
+
+ private:
+ struct PlayerCallbacks {
+ PlayerCallbacks(base::Closure new_key_cb, base::Closure cdm_unset_cb);
+ ~PlayerCallbacks();
+
+ base::Closure new_key_cb;
+ base::Closure cdm_unset_cb;
+ };
+
+ int next_registration_id_;
+ std::map<int, PlayerCallbacks> player_callbacks_map_;
+
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(PlayerTrackerImpl);
+};
+
+} // namespace media
+
+#endif // MEDIA_CDM_PLAYER_TRACKER_IMPL_H_
diff --git a/chromium/media/cdm/ppapi/api/content_decryption_module.h b/chromium/media/cdm/ppapi/api/content_decryption_module.h
index 07f5bbd62df..1ac32908bbf 100644
--- a/chromium/media/cdm/ppapi/api/content_decryption_module.h
+++ b/chromium/media/cdm/ppapi/api/content_decryption_module.h
@@ -75,13 +75,11 @@ CDM_EXPORT const char* GetCdmVersion();
namespace cdm {
-class AudioFrames_1;
class AudioFrames_2;
typedef AudioFrames_2 AudioFrames;
-class Host_1;
-class Host_2;
-class Host_3;
+class Host_4;
+class Host_5;
class DecryptedBlock;
class VideoFrame;
@@ -97,15 +95,39 @@ enum Status {
};
// This must be consistent with MediaKeyError defined in the spec:
-// https://dvcs.w3.org/hg/html-media/raw-file/default/encrypted-media/encrypted-media.html#error-codes
-// The error codes are in the process of changing. For now, support the minimum
-// required set with backwards compatible values.
+// https://dvcs.w3.org/hg/html-media/raw-file/eme-v0.1b/encrypted-media/encrypted-media.html#error-codes
+// Support the minimum required set with backwards compatible values.
enum MediaKeyError {
- kUnknownError = 1,
- kClientError = 2,
- kOutputError = 4
+ kPrefixedUnknownError = 1,
+ kPrefixedClientError = 2,
+ kPrefixedOutputError = 4
};
+// This must at least contain the exceptions defined in the spec:
+// https://dvcs.w3.org/hg/html-media/raw-file/default/encrypted-media/encrypted-media.html#exceptions
+// The following starts with the list of DOM4 exceptions from:
+// http://www.w3.org/TR/dom/#domexception
+// Some DOM4 exceptions are not included as they are not expected to be used.
+enum Error {
+ kNotSupportedError = 9,
+ kInvalidStateError = 11,
+ kInvalidAccessError = 15,
+ kQuotaExceededError = 22,
+
+ // Additional exceptions that don't have assigned codes.
+ // There are other non-EME-specific values, not included in this list.
+ kUnknownError = 30,
+
+ // Additional values from previous EME versions. They currently have no
+ // matching DOMException.
+ kClientError = 100,
+ kOutputError = 101
+};
+
+// Time is defined as the number of seconds since the
+// Epoch (00:00:00 UTC, January 1, 1970).
+typedef double Time;
+
// An input buffer can be split into several continuous subsamples.
// A SubsampleEntry specifies the number of clear and cipher bytes in each
// subsample. For example, the following buffer has three subsamples:
@@ -224,12 +246,13 @@ struct VideoDecoderConfig {
enum VideoCodec {
kUnknownVideoCodec = 0,
kCodecVp8,
- kCodecH264
+ kCodecH264,
+ kCodecVp9
};
enum VideoCodecProfile {
kUnknownVideoCodecProfile = 0,
- kVp8ProfileMain,
+ kProfileNotNeeded,
kH264ProfileBaseline,
kH264ProfileMain,
kH264ProfileExtended,
@@ -302,137 +325,88 @@ enum OutputLinkTypes {
kLinkTypeNetwork = 1 << 6
};
-//
-// WARNING: Deprecated. Will be removed in the near future. CDMs should
-// implement ContentDecryptionModule_2 instead.
+// The type of session to create. The valid types are defined in the spec:
+// https://dvcs.w3.org/hg/html-media/raw-file/default/encrypted-media/encrypted-media.html#dom-sessiontype
+enum SessionType {
+ kTemporary = 0,
+ kPersistent = 1
+};
-// ContentDecryptionModule interface that all CDMs need to implement.
-// The interface is versioned for backward compatibility.
-// Note: ContentDecryptionModule implementations must use the allocator
-// provided in CreateCdmInstance() to allocate any Buffer that needs to
-// be passed back to the caller. Implementations must call Buffer::Destroy()
-// when a Buffer is created that will never be returned to the caller.
-class ContentDecryptionModule_1 {
+// FileIO interface provides a way for the CDM to store data in a file in
+// persistent storage. This interface aims only at providing basic read/write
+// capabilities and should not be used as a full fledged file IO API.
+// Each domain (e.g. "example.com") and each CDM has it's own persistent
+// storage. All instances of a given CDM associated with a given domain share
+// the same persistent storage.
+class FileIO {
public:
- static const int kVersion = 1;
- typedef Host_1 Host;
-
- // Generates a |key_request| given |type| and |init_data|.
- //
- // Returns kSuccess if the key request was successfully generated, in which
- // case the CDM must send the key message by calling Host::SendKeyMessage().
- // Returns kSessionError if any error happened, in which case the CDM must
- // send a key error by calling Host::SendKeyError().
- virtual Status GenerateKeyRequest(
- const char* type, uint32_t type_size,
- const uint8_t* init_data, uint32_t init_data_size) = 0;
-
- // Adds the |key| to the CDM to be associated with |key_id|.
- //
- // Returns kSuccess if the key was successfully added, kSessionError
- // otherwise.
- virtual Status AddKey(const char* session_id, uint32_t session_id_size,
- const uint8_t* key, uint32_t key_size,
- const uint8_t* key_id, uint32_t key_id_size) = 0;
-
- // Cancels any pending key request made to the CDM for |session_id|.
- //
- // Returns kSuccess if all pending key requests for |session_id| were
- // successfully canceled or there was no key request to be canceled,
- // kSessionError otherwise.
- virtual Status CancelKeyRequest(
- const char* session_id, uint32_t session_id_size) = 0;
-
- // Performs scheduled operation with |context| when the timer fires.
- virtual void TimerExpired(void* context) = 0;
-
- // Decrypts the |encrypted_buffer|.
- //
- // Returns kSuccess if decryption succeeded, in which case the callee
- // should have filled the |decrypted_buffer| and passed the ownership of
- // |data| in |decrypted_buffer| to the caller.
- // Returns kNoKey if the CDM did not have the necessary decryption key
- // to decrypt.
- // Returns kDecryptError if any other error happened.
- // If the return value is not kSuccess, |decrypted_buffer| should be ignored
- // by the caller.
- virtual Status Decrypt(const InputBuffer& encrypted_buffer,
- DecryptedBlock* decrypted_buffer) = 0;
-
- // Initializes the CDM audio decoder with |audio_decoder_config|. This
- // function must be called before DecryptAndDecodeSamples() is called.
- //
- // Returns kSuccess if the |audio_decoder_config| is supported and the CDM
- // audio decoder is successfully initialized.
- // Returns kSessionError if |audio_decoder_config| is not supported. The CDM
- // may still be able to do Decrypt().
- virtual Status InitializeAudioDecoder(
- const AudioDecoderConfig& audio_decoder_config) = 0;
-
- // Initializes the CDM video decoder with |video_decoder_config|. This
- // function must be called before DecryptAndDecodeFrame() is called.
- //
- // Returns kSuccess if the |video_decoder_config| is supported and the CDM
- // video decoder is successfully initialized.
- // Returns kSessionError if |video_decoder_config| is not supported. The CDM
- // may still be able to do Decrypt().
- virtual Status InitializeVideoDecoder(
- const VideoDecoderConfig& video_decoder_config) = 0;
+ // Opens the file with |file_name| for read and write.
+ // FileIOClient::OnOpenComplete() will be called after the opening
+ // operation finishes.
+ // - When the file is opened by a CDM instance, it will be classified as "in
+ // use". In this case other CDM instances in the same domain may receive
+ // kInUse status when trying to open it.
+ // - |file_name| should not include path separators.
+ virtual void Open(const char* file_name, uint32_t file_name_size) = 0;
+
+ // Reads the contents of the file. FileIOClient::OnReadComplete() will be
+ // called with the read status. Read() should not be called if a previous
+ // Read() or Write() call is still pending; otherwise OnReadComplete() will
+ // be called with kInUse.
+ virtual void Read() = 0;
+
+ // Writes |data_size| bytes of |data| into the file.
+ // FileIOClient::OnWriteComplete() will be called with the write status.
+ // All existing contents in the file will be overwritten. Calling Write() with
+ // NULL |data| will clear all contents in the file. Write() should not be
+ // called if a previous Write() or Read() call is still pending; otherwise
+ // OnWriteComplete() will be called with kInUse.
+ virtual void Write(const uint8_t* data, uint32_t data_size) = 0;
+
+ // Closes the file if opened, destroys this FileIO object and releases any
+ // resources allocated. The CDM must call this method when it finished using
+ // this object. A FileIO object must not be used after Close() is called.
+ virtual void Close() = 0;
- // De-initializes the CDM decoder and sets it to an uninitialized state. The
- // caller can initialize the decoder again after this call to re-initialize
- // it. This can be used to reconfigure the decoder if the configuration
- // changes.
- virtual void DeinitializeDecoder(StreamType decoder_type) = 0;
-
- // Resets the CDM decoder to an initialized clean state. All internal buffers
- // MUST be flushed.
- virtual void ResetDecoder(StreamType decoder_type) = 0;
-
- // Decrypts the |encrypted_buffer| and decodes the decrypted buffer into a
- // |video_frame|. Upon end-of-stream, the caller should call this function
- // repeatedly with empty |encrypted_buffer| (|data| == NULL) until only empty
- // |video_frame| (|format| == kEmptyVideoFrame) is produced.
- //
- // Returns kSuccess if decryption and decoding both succeeded, in which case
- // the callee will have filled the |video_frame| and passed the ownership of
- // |frame_buffer| in |video_frame| to the caller.
- // Returns kNoKey if the CDM did not have the necessary decryption key
- // to decrypt.
- // Returns kNeedMoreData if more data was needed by the decoder to generate
- // a decoded frame (e.g. during initialization and end-of-stream).
- // Returns kDecryptError if any decryption error happened.
- // Returns kDecodeError if any decoding error happened.
- // If the return value is not kSuccess, |video_frame| should be ignored by
- // the caller.
- virtual Status DecryptAndDecodeFrame(const InputBuffer& encrypted_buffer,
- VideoFrame* video_frame) = 0;
+ protected:
+ FileIO() {}
+ virtual ~FileIO() {}
+};
- // Decrypts the |encrypted_buffer| and decodes the decrypted buffer into
- // |audio_frames|. Upon end-of-stream, the caller should call this function
- // repeatedly with empty |encrypted_buffer| (|data| == NULL) until only empty
- // |audio_frames| is produced.
- //
- // Returns kSuccess if decryption and decoding both succeeded, in which case
- // the callee will have filled |audio_frames| and passed the ownership of
- // |data| in |audio_frames| to the caller.
- // Returns kNoKey if the CDM did not have the necessary decryption key
- // to decrypt.
- // Returns kNeedMoreData if more data was needed by the decoder to generate
- // audio samples (e.g. during initialization and end-of-stream).
- // Returns kDecryptError if any decryption error happened.
- // Returns kDecodeError if any decoding error happened.
- // If the return value is not kSuccess, |audio_frames| should be ignored by
- // the caller.
- virtual Status DecryptAndDecodeSamples(const InputBuffer& encrypted_buffer,
- AudioFrames_1* audio_frames) = 0;
+// Responses to FileIO calls. All responses will be called asynchronously.
+class FileIOClient {
+ public:
+ enum Status {
+ kSuccess = 0,
+ kInUse,
+ kError
+ };
- // Destroys the object in the same context as it was created.
- virtual void Destroy() = 0;
+ // Response to a FileIO::Open() call with the open |status|.
+ virtual void OnOpenComplete(Status status) = 0;
+
+ // Response to a FileIO::Read() call to provide |data_size| bytes of |data|
+ // read from the file.
+ // - kSuccess indicates that all contents of the file has been successfully
+ // read. In this case, 0 |data_size| means that the file is empty.
+ // - kInUse indicates that there are other read/write operations pending.
+ // - kError indicates read failure, e.g. the storage isn't open or cannot be
+ // fully read.
+ virtual void OnReadComplete(Status status,
+ const uint8_t* data, uint32_t data_size) = 0;
+
+ // Response to a FileIO::Write() call.
+ // - kSuccess indicates that all the data has been written into the file
+ // successfully.
+ // - kInUse indicates that there are other read/write operations pending.
+ // - kError indicates write failure, e.g. the storage isn't open or cannot be
+ // fully written. Upon write failure, the contents of the file should be
+ // regarded as corrupt and should not used.
+ virtual void OnWriteComplete(Status status) = 0;
protected:
- ContentDecryptionModule_1() {}
- virtual ~ContentDecryptionModule_1() {}
+ FileIOClient() {}
+ virtual ~FileIOClient() {}
};
// ContentDecryptionModule interface that all CDMs need to implement.
@@ -441,36 +415,35 @@ class ContentDecryptionModule_1 {
// provided in CreateCdmInstance() to allocate any Buffer that needs to
// be passed back to the caller. Implementations must call Buffer::Destroy()
// when a Buffer is created that will never be returned to the caller.
-class ContentDecryptionModule_2 {
+class ContentDecryptionModule_4 {
public:
- static const int kVersion = 2;
- typedef Host_2 Host;
+ static const int kVersion = 4;
+ typedef Host_4 Host;
- // Generates a |key_request| given |type| and |init_data|.
- //
- // Returns kSuccess if the key request was successfully generated, in which
- // case the CDM must send the key message by calling Host::SendKeyMessage().
- // Returns kSessionError if any error happened, in which case the CDM must
- // send a key error by calling Host::SendKeyError().
- virtual Status GenerateKeyRequest(
+ // CreateSession(), UpdateSession(), and ReleaseSession() get passed a
+ // |session_id| for a MediaKeySession object. It must be used in the reply via
+ // Host methods (e.g. Host::OnSessionMessage()).
+ // Note: |session_id| is different from MediaKeySession's sessionId attribute,
+ // which is referred to as |web_session_id| in this file.
+
+ // Creates a session given |type| and |init_data|.
+ virtual void CreateSession(
+ uint32_t session_id,
const char* type, uint32_t type_size,
const uint8_t* init_data, uint32_t init_data_size) = 0;
- // Adds the |key| to the CDM to be associated with |key_id|.
- //
- // Returns kSuccess if the key was successfully added, kSessionError
- // otherwise.
- virtual Status AddKey(const char* session_id, uint32_t session_id_size,
- const uint8_t* key, uint32_t key_size,
- const uint8_t* key_id, uint32_t key_id_size) = 0;
+ // Loads a session that has |web_session_id|.
+ virtual void LoadSession(
+ uint32_t session_id,
+ const char* web_session_id, uint32_t web_session_id_length) = 0;
- // Cancels any pending key request made to the CDM for |session_id|.
- //
- // Returns kSuccess if all pending key requests for |session_id| were
- // successfully canceled or there was no key request to be canceled,
- // kSessionError otherwise.
- virtual Status CancelKeyRequest(
- const char* session_id, uint32_t session_id_size) = 0;
+ // Updates the session with |response|.
+ virtual void UpdateSession(
+ uint32_t session_id,
+ const uint8_t* response, uint32_t response_size) = 0;
+
+ // Releases the resources for the session.
+ virtual void ReleaseSession(uint32_t session_id) = 0;
// Performs scheduled operation with |context| when the timer fires.
virtual void TimerExpired(void* context) = 0;
@@ -577,40 +550,63 @@ class ContentDecryptionModule_2 {
virtual void Destroy() = 0;
protected:
- ContentDecryptionModule_2() {}
- virtual ~ContentDecryptionModule_2() {}
+ ContentDecryptionModule_4() {}
+ virtual ~ContentDecryptionModule_4() {}
};
+
// ContentDecryptionModule interface that all CDMs need to implement.
// The interface is versioned for backward compatibility.
// Note: ContentDecryptionModule implementations must use the allocator
// provided in CreateCdmInstance() to allocate any Buffer that needs to
// be passed back to the caller. Implementations must call Buffer::Destroy()
// when a Buffer is created that will never be returned to the caller.
-class ContentDecryptionModule_3 {
+class ContentDecryptionModule_5 {
public:
- static const int kVersion = 3;
- typedef Host_3 Host;
+ static const int kVersion = 5;
+ typedef Host_5 Host;
- // CreateSession(), UpdateSession(), and ReleaseSession() get passed a
- // |session_id| for a MediaKeySession object. It must be used in the reply via
- // Host methods (e.g. Host::OnSessionMessage()).
- // Note: |session_id| is different from MediaKeySession's sessionId attribute,
- // which is referred to as |web_session_id| in this file.
+ // CreateSession(), LoadSession(), UpdateSession(), and ReleaseSession()
+ // accept a |promise_id|, which must be passed to the completion Host method
+ // (e.g. Host::OnResolveNewSessionPromise()).
- // Creates a session given |type| and |init_data|.
+ // Creates a session given |init_data_type|, |init_data| and |session_type|.
+ // The CDM must respond by calling either Host::OnResolveNewSessionPromise()
+ // or Host::OnRejectPromise().
virtual void CreateSession(
- uint32_t session_id,
- const char* type, uint32_t type_size,
- const uint8_t* init_data, uint32_t init_data_size) = 0;
+ uint32_t promise_id,
+ const char* init_data_type, uint32_t init_data_type_size,
+ const uint8_t* init_data, uint32_t init_data_size,
+ SessionType session_type) = 0;
+
+ // Loads the session with |web_session_id|. The CDM must respond by calling
+ // either Host::OnResolveNewSessionPromise() or Host::OnRejectPromise().
+ // If the session is not found, call Host::OnResolveNewSessionPromise()
+ // with web_session_id = NULL.
+ virtual void LoadSession(
+ uint32_t promise_id,
+ const char* web_session_id, uint32_t web_session_id_length) = 0;
- // Updates the session with |response|.
+ // Updates the session with |response|. The CDM must respond by calling
+ // either Host::OnResolvePromise() or Host::OnRejectPromise().
virtual void UpdateSession(
- uint32_t session_id,
+ uint32_t promise_id,
+ const char* web_session_id, uint32_t web_session_id_length,
const uint8_t* response, uint32_t response_size) = 0;
- // Releases the resources for the session.
- virtual void ReleaseSession(uint32_t session_id) = 0;
+ // Releases the resources for the session. The CDM must respond by calling
+ // either Host::OnResolvePromise() or Host::OnRejectPromise().
+ // Once the session is closed, Host::OnSessionClosed() must also be called.
+ virtual void ReleaseSession(
+ uint32_t promise_id,
+ const char* web_session_id, uint32_t web_session_id_length) = 0;
+
+ // Provides a server certificate to be used to encrypt messages to the
+ // license server.
+ virtual void SetServerCertificate(
+ uint32_t promise_id,
+ const uint8_t* server_certificate_data,
+ uint32_t server_certificate_data_size) = 0;
// Performs scheduled operation with |context| when the timer fires.
virtual void TimerExpired(void* context) = 0;
@@ -717,11 +713,11 @@ class ContentDecryptionModule_3 {
virtual void Destroy() = 0;
protected:
- ContentDecryptionModule_3() {}
- virtual ~ContentDecryptionModule_3() {}
+ ContentDecryptionModule_5() {}
+ virtual ~ContentDecryptionModule_5() {}
};
-typedef ContentDecryptionModule_3 ContentDecryptionModule;
+typedef ContentDecryptionModule_5 ContentDecryptionModule;
// Represents a buffer created by Allocator implementations.
class Buffer {
@@ -743,12 +739,9 @@ class Buffer {
void operator=(const Buffer&);
};
-// Host interface that the CDM can call into to access browser side services.
-// Host interfaces are versioned for backward compatibility. CDM should use
-// HostFactory object to request a Host interface of a particular version.
-class Host_1 {
+class Host_4 {
public:
- static const int kVersion = 1;
+ static const int kVersion = 4;
// Returns a Buffer* containing non-zero members upon success, or NULL on
// failure. The caller owns the Buffer* after this call. The buffer is not
@@ -763,64 +756,39 @@ class Host_1 {
// Returns the current epoch wall time in seconds.
virtual double GetCurrentWallTimeInSeconds() = 0;
- // Sends a keymessage event to the application.
+ // Called by the CDM when a session is created or loaded and the value for the
+ // MediaKeySession's sessionId attribute is available (|web_session_id|).
+ // This must be called before OnSessionMessage() or OnSessionReady() is called
+ // for |session_id|. |web_session_id_length| should not include null
+ // termination.
+ // When called in response to LoadSession(), the |web_session_id| must be the
+ // same as the |web_session_id| passed in LoadSession().
+ virtual void OnSessionCreated(
+ uint32_t session_id,
+ const char* web_session_id, uint32_t web_session_id_length) = 0;
+
+ // Called by the CDM when it has a message for session |session_id|.
// Length parameters should not include null termination.
- virtual void SendKeyMessage(
- const char* session_id, uint32_t session_id_length,
+ virtual void OnSessionMessage(
+ uint32_t session_id,
const char* message, uint32_t message_length,
- const char* default_url, uint32_t default_url_length) = 0;
-
- // Sends a keyerror event to the application.
- // |session_id_length| should not include null termination.
- virtual void SendKeyError(const char* session_id,
- uint32_t session_id_length,
- MediaKeyError error_code,
- uint32_t system_code) = 0;
-
- // Get private data from the host. This function is limited to internal use.
- typedef const void* (*GetPrivateInterface)(const char* interface_name);
- virtual void GetPrivateData(int32_t* instance,
- GetPrivateInterface* get_interface) = 0;
-
- protected:
- Host_1() {}
- virtual ~Host_1() {}
-};
-
-class Host_2 {
- public:
- static const int kVersion = 2;
-
- // Returns a Buffer* containing non-zero members upon success, or NULL on
- // failure. The caller owns the Buffer* after this call. The buffer is not
- // guaranteed to be zero initialized. The capacity of the allocated Buffer
- // is guaranteed to be not less than |capacity|.
- virtual Buffer* Allocate(uint32_t capacity) = 0;
-
- // Requests the host to call ContentDecryptionModule::TimerFired() |delay_ms|
- // from now with |context|.
- virtual void SetTimer(int64_t delay_ms, void* context) = 0;
+ const char* destination_url, uint32_t destination_url_length) = 0;
- // Returns the current epoch wall time in seconds.
- virtual double GetCurrentWallTimeInSeconds() = 0;
+ // Called by the CDM when session |session_id| is ready.
+ // Note: "ready" event is deprecated. This is only used for the prefixed EME
+ // API's "keyAdded" event. Drop this when we deprecate prefixed EME API.
+ virtual void OnSessionReady(uint32_t session_id) = 0;
- // Sends a keymessage event to the application.
- // Length parameters should not include null termination.
- virtual void SendKeyMessage(
- const char* session_id, uint32_t session_id_length,
- const char* message, uint32_t message_length,
- const char* default_url, uint32_t default_url_length) = 0;
+ // Called by the CDM when session |session_id| is closed.
+ virtual void OnSessionClosed(uint32_t session_id) = 0;
- // Sends a keyerror event to the application.
- // |session_id_length| should not include null termination.
- virtual void SendKeyError(const char* session_id,
- uint32_t session_id_length,
- MediaKeyError error_code,
- uint32_t system_code) = 0;
+ // Called by the CDM when an error occurs in session |session_id|.
+ virtual void OnSessionError(uint32_t session_id,
+ MediaKeyError error_code,
+ uint32_t system_code) = 0;
- // Get private data from the host. This function is limited to internal use.
- virtual void GetPrivateData(int32_t* instance,
- Host_1::GetPrivateInterface* get_interface) = 0;
+ // The following are optional methods that may not be implemented on all
+ // platforms.
// Sends a platform challenge for the given |service_id|. |challenge| is at
// most 256 bits of data to be signed. Once the challenge has been completed,
@@ -846,14 +814,20 @@ class Host_2 {
virtual void OnDeferredInitializationDone(StreamType stream_type,
Status decoder_status) = 0;
+ // Creates a FileIO object from the host to do file IO operation. Returns NULL
+ // if a FileIO object cannot be obtained. Once a valid FileIO object is
+ // returned, |client| must be valid until FileIO::Close() is called. The
+ // CDM can call this method multiple times to operate on different files.
+ virtual FileIO* CreateFileIO(FileIOClient* client) = 0;
+
protected:
- Host_2() {}
- virtual ~Host_2() {}
+ Host_4() {}
+ virtual ~Host_4() {}
};
-class Host_3 {
+class Host_5 {
public:
- static const int kVersion = 3;
+ static const int kVersion = 5;
// Returns a Buffer* containing non-zero members upon success, or NULL on
// failure. The caller owns the Buffer* after this call. The buffer is not
@@ -865,35 +839,82 @@ class Host_3 {
// from now with |context|.
virtual void SetTimer(int64_t delay_ms, void* context) = 0;
- // Returns the current epoch wall time in seconds.
- virtual double GetCurrentWallTimeInSeconds() = 0;
+ // Returns the current wall time in seconds.
+ virtual Time GetCurrentTime() = 0;
- // Called by the CDM when a session is created and the value for the
+ // Called by the CDM when a session is created or loaded and the value for the
// MediaKeySession's sessionId attribute is available (|web_session_id|).
// This must be called before OnSessionMessage() or OnSessionReady() is called
- // for |session_id|. |web_session_id_length| should not include null
+ // for the same session. |web_session_id_length| should not include null
// termination.
- virtual void OnSessionCreated(
- uint32_t session_id,
+ // When called in response to LoadSession(), the |web_session_id| must be the
+ // same as the |web_session_id| passed in LoadSession(), or NULL if the
+ // session could not be loaded.
+ virtual void OnResolveNewSessionPromise(
+ uint32_t promise_id,
const char* web_session_id, uint32_t web_session_id_length) = 0;
- // Called by the CDM when it has a message for session |session_id|.
+ // Called by the CDM when a session is updated or released.
+ virtual void OnResolvePromise(uint32_t promise_id) = 0;
+
+ // Called by the CDM when an error occurs as a result of one of the
+ // ContentDecryptionModule calls that accept a |promise_id|.
+ // |error| must be specified, |error_message| and |system_code|
+ // are optional. Length parameters should not include null termination.
+ virtual void OnRejectPromise(
+ uint32_t promise_id,
+ Error error,
+ uint32_t system_code,
+ const char* error_message, uint32_t error_message_length) = 0;
+
+ // Called by the CDM when it has a message for session |web_session_id|.
// Length parameters should not include null termination.
virtual void OnSessionMessage(
- uint32_t session_id,
+ const char* web_session_id, uint32_t web_session_id_length,
const char* message, uint32_t message_length,
const char* destination_url, uint32_t destination_url_length) = 0;
- // Called by the CDM when session |session_id| is ready.
- virtual void OnSessionReady(uint32_t session_id) = 0;
+ // Called by the CDM when there has been a change in usable keys for
+ // session |web_session_id|. |has_additional_usable_key| should be set if a
+ // key is newly usable (e.g. new key available, previously expired key has
+ // been renewed, etc.) and the browser should attempt to resume playback.
+ // Length parameter should not include null termination.
+ virtual void OnSessionKeysChange(
+ const char* web_session_id, uint32_t web_session_id_length,
+ bool has_additional_usable_key) = 0;
+
+ // Called by the CDM when there has been a change in the expiration time for
+ // session |web_session_id|. This can happen as the result of an Update() call
+ // or some other event. If this happens as a result of a call to Update(),
+ // it must be called before resolving the Update() promise. |new_expiry_time|
+ // can be 0 to represent "undefined". Length parameter should not include
+ // null termination.
+ virtual void OnExpirationChange(
+ const char* web_session_id, uint32_t web_session_id_length,
+ Time new_expiry_time) = 0;
+
+ // Called by the CDM when session |web_session_id| is ready.
+ // Note: "ready" event is deprecated. This is only used for the prefixed EME
+ // API's "keyAdded" event. Drop this when we deprecate prefixed EME API.
+ // Length parameter should not include null termination.
+ virtual void OnSessionReady(
+ const char* web_session_id, uint32_t web_session_id_length) = 0;
- // Called by the CDM when session |session_id| is closed.
- virtual void OnSessionClosed(uint32_t session_id) = 0;
+ // Called by the CDM when session |web_session_id| is closed. Length
+ // parameter should not include null termination.
+ virtual void OnSessionClosed(
+ const char* web_session_id, uint32_t web_session_id_length) = 0;
- // Called by the CDM when an error occurs in session |session_id|.
- virtual void OnSessionError(uint32_t session_id,
- MediaKeyError error_code,
- uint32_t system_code) = 0;
+ // Called by the CDM when an error occurs in session |web_session_id|
+ // unrelated to one of the ContentDecryptionModule calls that accept a
+ // |promise_id|. |error| must be specified, |error_message| and
+ // |system_code| are optional. Length parameters should not include null
+ // termination.
+ virtual void OnSessionError(
+ const char* web_session_id, uint32_t web_session_id_length,
+ Error error,
+ uint32_t system_code,
+ const char* error_message, uint32_t error_message_length) = 0;
// The following are optional methods that may not be implemented on all
// platforms.
@@ -922,9 +943,15 @@ class Host_3 {
virtual void OnDeferredInitializationDone(StreamType stream_type,
Status decoder_status) = 0;
+ // Creates a FileIO object from the host to do file IO operation. Returns NULL
+ // if a FileIO object cannot be obtained. Once a valid FileIO object is
+ // returned, |client| must be valid until FileIO::Close() is called. The
+ // CDM can call this method multiple times to operate on different files.
+ virtual FileIO* CreateFileIO(FileIOClient* client) = 0;
+
protected:
- Host_3() {}
- virtual ~Host_3() {}
+ Host_5() {}
+ virtual ~Host_5() {}
};
// Represents a decrypted block that has not been decoded.
@@ -975,10 +1002,6 @@ class VideoFrame {
virtual ~VideoFrame() {}
};
-//
-// WARNING: Deprecated. Will be removed in the near future. CDMs should be
-// implementing ContentDecryptionModule_2 instead which uses AudioFrames_2.
-//
// Represents decrypted and decoded audio frames. AudioFrames can contain
// multiple audio output buffers, which are serialized into this format:
//
@@ -990,25 +1013,14 @@ class VideoFrame {
//
// |<----------------- AudioFrames ------------------>|
// | audio buffer 0 | audio buffer 1 | audio buffer 2 |
-class AudioFrames_1 {
- public:
- virtual void SetFrameBuffer(Buffer* buffer) = 0;
- virtual Buffer* FrameBuffer() = 0;
-
- protected:
- AudioFrames_1() {}
- virtual ~AudioFrames_1() {}
-};
-
-// Same as AudioFrames except the format of the data may be specified to avoid
-// unnecessary conversion steps. Planar data should be stored end to end; e.g.,
-// |ch1 sample1||ch1 sample2|....|ch1 sample_last||ch2 sample1|...
class AudioFrames_2 {
public:
virtual void SetFrameBuffer(Buffer* buffer) = 0;
virtual Buffer* FrameBuffer() = 0;
- // Layout of the audio data. Defaults to kAudioFormatS16.
+ // The CDM must call this method, providing a valid format, when providing
+ // frame buffers. Planar data should be stored end to end; e.g.,
+ // |ch1 sample1||ch1 sample2|....|ch1 sample_last||ch2 sample1|...
virtual void SetFormat(AudioFormat format) = 0;
virtual AudioFormat Format() const = 0;
diff --git a/chromium/media/cdm/ppapi/cdm_adapter.cc b/chromium/media/cdm/ppapi/cdm_adapter.cc
index d92890328a8..ab02df3fb56 100644
--- a/chromium/media/cdm/ppapi/cdm_adapter.cc
+++ b/chromium/media/cdm/ppapi/cdm_adapter.cc
@@ -4,10 +4,12 @@
#include "media/cdm/ppapi/cdm_adapter.h"
+#include "media/cdm/ppapi/cdm_file_io_impl.h"
#include "media/cdm/ppapi/cdm_helpers.h"
#include "media/cdm/ppapi/cdm_logging.h"
#include "media/cdm/ppapi/supported_cdm_versions.h"
#include "ppapi/c/ppb_console.h"
+#include "ppapi/cpp/private/uma_private.h"
#if defined(CHECK_DOCUMENT_URL)
#include "ppapi/cpp/dev/url_util_dev.h"
@@ -56,7 +58,6 @@ void ConfigureInputBuffer(
input_buffer->data = static_cast<uint8_t*>(encrypted_buffer.data());
input_buffer->data_size = encrypted_block_info.data_size;
PP_DCHECK(encrypted_buffer.size() >= input_buffer->data_size);
- input_buffer->data_offset = encrypted_block_info.data_offset;
PP_DCHECK(encrypted_block_info.key_id_size <=
arraysize(encrypted_block_info.key_id));
@@ -154,6 +155,8 @@ cdm::VideoDecoderConfig::VideoCodec PpVideoCodecToCdmVideoCodec(
return cdm::VideoDecoderConfig::kCodecVp8;
case PP_VIDEOCODEC_H264:
return cdm::VideoDecoderConfig::kCodecH264;
+ case PP_VIDEOCODEC_VP9:
+ return cdm::VideoDecoderConfig::kCodecVp9;
default:
return cdm::VideoDecoderConfig::kUnknownVideoCodec;
}
@@ -162,8 +165,8 @@ cdm::VideoDecoderConfig::VideoCodec PpVideoCodecToCdmVideoCodec(
cdm::VideoDecoderConfig::VideoCodecProfile PpVCProfileToCdmVCProfile(
PP_VideoCodecProfile profile) {
switch (profile) {
- case PP_VIDEOCODECPROFILE_VP8_MAIN:
- return cdm::VideoDecoderConfig::kVp8ProfileMain;
+ case PP_VIDEOCODECPROFILE_NOT_NEEDED:
+ return cdm::VideoDecoderConfig::kProfileNotNeeded;
case PP_VIDEOCODECPROFILE_H264_BASELINE:
return cdm::VideoDecoderConfig::kH264ProfileBaseline;
case PP_VIDEOCODECPROFILE_H264_MAIN:
@@ -208,6 +211,40 @@ cdm::StreamType PpDecryptorStreamTypeToCdmStreamType(
return cdm::kStreamTypeVideo;
}
+cdm::SessionType PpSessionTypeToCdmSessionType(PP_SessionType session_type) {
+ switch (session_type) {
+ case PP_SESSIONTYPE_TEMPORARY:
+ return cdm::kTemporary;
+ case PP_SESSIONTYPE_PERSISTENT:
+ return cdm::kPersistent;
+ default:
+ PP_NOTREACHED();
+ return cdm::kTemporary;
+ }
+}
+
+PP_CdmExceptionCode CdmExceptionTypeToPpCdmExceptionType(cdm::Error error) {
+ switch (error) {
+ case cdm::kNotSupportedError:
+ return PP_CDMEXCEPTIONCODE_NOTSUPPORTEDERROR;
+ case cdm::kInvalidStateError:
+ return PP_CDMEXCEPTIONCODE_INVALIDSTATEERROR;
+ case cdm::kInvalidAccessError:
+ return PP_CDMEXCEPTIONCODE_INVALIDACCESSERROR;
+ case cdm::kQuotaExceededError:
+ return PP_CDMEXCEPTIONCODE_QUOTAEXCEEDEDERROR;
+ case cdm::kUnknownError:
+ return PP_CDMEXCEPTIONCODE_UNKNOWNERROR;
+ case cdm::kClientError:
+ return PP_CDMEXCEPTIONCODE_CLIENTERROR;
+ case cdm::kOutputError:
+ return PP_CDMEXCEPTIONCODE_OUTPUTERROR;
+ default:
+ PP_NOTREACHED();
+ return PP_CDMEXCEPTIONCODE_UNKNOWNERROR;
+ }
+}
+
} // namespace
namespace media {
@@ -222,6 +259,8 @@ CdmAdapter::CdmAdapter(PP_Instance instance, pp::Module* module)
output_link_mask_(0),
output_protection_mask_(0),
query_output_protection_in_progress_(false),
+ uma_for_output_protection_query_reported_(false),
+ uma_for_output_protection_positive_result_reported_(false),
#endif
allocator_(this),
cdm_(NULL),
@@ -248,13 +287,36 @@ bool CdmAdapter::CreateCdmInstance(const std::string& key_system) {
return success;
}
-// No KeyErrors should be reported in this function because they cannot be
-// bubbled up in the WD EME API. Those errors will be reported during session
-// creation (CreateSession).
+// No errors should be reported in this function because the spec says:
+// "Store this new error object internally with the MediaKeys instance being
+// created. This will be used to fire an error against any session created for
+// this instance." These errors will be reported during session creation
+// (CreateSession()) or session loading (LoadSession()).
+// TODO(xhwang): If necessary, we need to store the error here if we want to
+// support more specific error reporting (other than "Unknown").
void CdmAdapter::Initialize(const std::string& key_system) {
PP_DCHECK(!key_system.empty());
PP_DCHECK(key_system_.empty() || (key_system_ == key_system && cdm_));
+#if defined(CHECK_DOCUMENT_URL)
+ PP_URLComponents_Dev url_components = {};
+ const pp::URLUtil_Dev* url_util = pp::URLUtil_Dev::Get();
+ if (!url_util)
+ return;
+ pp::Var href = url_util->GetDocumentURL(pp::InstanceHandle(pp_instance()),
+ &url_components);
+ PP_DCHECK(href.is_string());
+ std::string url = href.AsString();
+ PP_DCHECK(!url.empty());
+ std::string url_scheme =
+ url.substr(url_components.scheme.begin, url_components.scheme.len);
+ if (url_scheme != "file") {
+ // Skip this check for file:// URLs as they don't have a host component.
+ PP_DCHECK(url_components.host.begin);
+ PP_DCHECK(0 < url_components.host.len);
+ }
+#endif // defined(CHECK_DOCUMENT_URL)
+
if (!cdm_ && !CreateCdmInstance(key_system))
return;
@@ -262,87 +324,70 @@ void CdmAdapter::Initialize(const std::string& key_system) {
key_system_ = key_system;
}
-void CdmAdapter::CreateSession(uint32_t session_id,
- const std::string& type,
- pp::VarArrayBuffer init_data) {
+void CdmAdapter::CreateSession(uint32_t promise_id,
+ const std::string& init_data_type,
+ pp::VarArrayBuffer init_data,
+ PP_SessionType session_type) {
// Initialize() doesn't report an error, so CreateSession() can be called
// even if Initialize() failed.
+ // TODO(jrummell): Remove this code when prefixed EME gets removed.
+ // TODO(jrummell): Verify that Initialize() failing does not resolve the
+ // MediaKeys.create() promise.
if (!cdm_) {
- OnSessionError(session_id, cdm::kUnknownError, 0);
+ RejectPromise(promise_id,
+ cdm::kInvalidStateError,
+ 0,
+ "CDM has not been initialized.");
return;
}
-#if defined(CHECK_DOCUMENT_URL)
- PP_URLComponents_Dev url_components = {};
- const pp::URLUtil_Dev* url_util = pp::URLUtil_Dev::Get();
- if (!url_util) {
- OnSessionError(session_id, cdm::kUnknownError, 0);
- return;
- }
- pp::Var href = url_util->GetDocumentURL(
- pp::InstanceHandle(pp_instance()), &url_components);
- PP_DCHECK(href.is_string());
- PP_DCHECK(!href.AsString().empty());
- PP_DCHECK(url_components.host.begin);
- PP_DCHECK(0 < url_components.host.len);
-#endif // defined(CHECK_DOCUMENT_URL)
-
- cdm_->CreateSession(session_id,
- type.data(),
- type.size(),
+ cdm_->CreateSession(promise_id,
+ init_data_type.data(),
+ init_data_type.size(),
static_cast<const uint8_t*>(init_data.Map()),
- init_data.ByteLength());
+ init_data.ByteLength(),
+ PpSessionTypeToCdmSessionType(session_type));
}
-void CdmAdapter::UpdateSession(uint32_t session_id,
- pp::VarArrayBuffer response) {
- // TODO(jrummell): In EME WD, AddKey() can only be called on valid sessions.
- // We should be able to DCHECK(cdm_) when addressing http://crbug.com/249976.
+void CdmAdapter::LoadSession(uint32_t promise_id,
+ const std::string& web_session_id) {
+ // Initialize() doesn't report an error, so LoadSession() can be called
+ // even if Initialize() failed.
+ // TODO(jrummell): Remove this code when prefixed EME gets removed.
+ // TODO(jrummell): Verify that Initialize() failing does not resolve the
+ // MediaKeys.create() promise.
if (!cdm_) {
- OnSessionError(session_id, cdm::kUnknownError, 0);
+ RejectPromise(promise_id,
+ cdm::kInvalidStateError,
+ 0,
+ "CDM has not been initialized.");
return;
}
+ cdm_->LoadSession(promise_id, web_session_id.data(), web_session_id.size());
+}
+
+void CdmAdapter::UpdateSession(uint32_t promise_id,
+ const std::string& web_session_id,
+ pp::VarArrayBuffer response) {
const uint8_t* response_ptr = static_cast<const uint8_t*>(response.Map());
const uint32_t response_size = response.ByteLength();
- if (!response_ptr || response_size <= 0) {
- OnSessionError(session_id, cdm::kUnknownError, 0);
- return;
- }
- CdmWrapper::Result result =
- cdm_->UpdateSession(session_id, response_ptr, response_size);
- switch (result) {
- case CdmWrapper::NO_ACTION:
- break;
- case CdmWrapper::CALL_KEY_ADDED:
- OnSessionReady(session_id);
- break;
- case CdmWrapper::CALL_KEY_ERROR:
- OnSessionError(session_id, cdm::kUnknownError, 0);
- break;
- }
-}
+ PP_DCHECK(!web_session_id.empty());
+ PP_DCHECK(response_ptr);
+ PP_DCHECK(response_size > 0);
-void CdmAdapter::ReleaseSession(uint32_t session_id) {
- // TODO(jrummell): In EME WD, AddKey() can only be called on valid sessions.
- // We should be able to DCHECK(cdm_) when addressing http://crbug.com/249976.
- if (!cdm_) {
- OnSessionError(session_id, cdm::kUnknownError, 0);
- return;
- }
+ cdm_->UpdateSession(promise_id,
+ web_session_id.data(),
+ web_session_id.length(),
+ response_ptr,
+ response_size);
+}
- CdmWrapper::Result result = cdm_->ReleaseSession(session_id);
- switch (result) {
- case CdmWrapper::NO_ACTION:
- break;
- case CdmWrapper::CALL_KEY_ADDED:
- PP_NOTREACHED();
- break;
- case CdmWrapper::CALL_KEY_ERROR:
- OnSessionError(session_id, cdm::kUnknownError, 0);
- break;
- }
+void CdmAdapter::ReleaseSession(uint32_t promise_id,
+ const std::string& web_session_id) {
+ cdm_->ReleaseSession(
+ promise_id, web_session_id.data(), web_session_id.length());
}
// Note: In the following decryption/decoding related functions, errors are NOT
@@ -539,93 +584,221 @@ void CdmAdapter::TimerExpired(int32_t result, void* context) {
cdm_->TimerExpired(context);
}
+// cdm::Host_4 methods
+
double CdmAdapter::GetCurrentWallTimeInSeconds() {
- return pp::Module::Get()->core()->GetTime();
+ return GetCurrentTime();
}
-void CdmAdapter::SendKeyMessage(
- const char* session_id, uint32_t session_id_length,
- const char* message, uint32_t message_length,
- const char* default_url, uint32_t default_url_length) {
- PP_DCHECK(!key_system_.empty());
+void CdmAdapter::OnSessionCreated(uint32_t session_id,
+ const char* web_session_id,
+ uint32_t web_session_id_length) {
+ uint32_t promise_id = cdm_->LookupPromiseId(session_id);
+ cdm_->AssignWebSessionId(session_id, web_session_id, web_session_id_length);
+ OnResolveNewSessionPromise(promise_id, web_session_id, web_session_id_length);
+}
- std::string session_id_str(session_id, session_id_length);
- PP_DCHECK(!session_id_str.empty());
- uint32_t session_reference_id = cdm_->LookupSessionId(session_id_str);
+void CdmAdapter::OnSessionMessage(uint32_t session_id,
+ const char* message,
+ uint32_t message_length,
+ const char* destination_url,
+ uint32_t destination_url_length) {
+ std::string web_session_id = cdm_->LookupWebSessionId(session_id);
+ OnSessionMessage(web_session_id.data(),
+ web_session_id.length(),
+ message,
+ message_length,
+ destination_url,
+ destination_url_length);
+}
- OnSessionCreated(session_reference_id, session_id, session_id_length);
- OnSessionMessage(session_reference_id,
- message, message_length,
- default_url, default_url_length);
+void CdmAdapter::OnSessionReady(uint32_t session_id) {
+ uint32_t promise_id = cdm_->LookupPromiseId(session_id);
+ if (promise_id) {
+ OnResolvePromise(promise_id);
+ } else {
+ std::string web_session_id = cdm_->LookupWebSessionId(session_id);
+ OnSessionReady(web_session_id.data(), web_session_id.length());
+ }
}
-void CdmAdapter::SendKeyError(const char* session_id,
- uint32_t session_id_length,
- cdm::MediaKeyError error_code,
- uint32_t system_code) {
- std::string session_id_str(session_id, session_id_length);
- uint32_t session_reference_id = cdm_->LookupSessionId(session_id_str);
- OnSessionError(session_reference_id, error_code, system_code);
+void CdmAdapter::OnSessionClosed(uint32_t session_id) {
+ uint32_t promise_id = cdm_->LookupPromiseId(session_id);
+ std::string web_session_id = cdm_->LookupWebSessionId(session_id);
+ cdm_->DropWebSessionId(web_session_id);
+ if (promise_id) {
+ OnResolvePromise(promise_id);
+ } else {
+ OnSessionClosed(web_session_id.data(), web_session_id.length());
+ }
}
-void CdmAdapter::GetPrivateData(int32_t* instance,
- GetPrivateInterface* get_interface) {
- *instance = pp_instance();
- *get_interface = pp::Module::Get()->get_browser_interface();
+void CdmAdapter::OnSessionError(uint32_t session_id,
+ cdm::MediaKeyError error_code,
+ uint32_t system_code) {
+ uint32_t promise_id = cdm_->LookupPromiseId(session_id);
+
+ // Existing cdm::MediaKeyError don't map to DOM error names. Convert them
+ // into non-standard names so that the prefixed API can extract them.
+ // TODO(jrummell): Remove this conversion and the inverse when CDM4 is gone.
+ cdm::Error error;
+ switch (error_code) {
+ case cdm::kPrefixedClientError:
+ error = cdm::kClientError;
+ break;
+ case cdm::kPrefixedOutputError:
+ error = cdm::kOutputError;
+ break;
+ case cdm::kPrefixedUnknownError:
+ default:
+ error = cdm::kUnknownError;
+ break;
+ }
+
+ if (promise_id) {
+ RejectPromise(promise_id, error, system_code, std::string());
+ } else {
+ std::string web_session_id = cdm_->LookupWebSessionId(session_id);
+ OnSessionError(web_session_id.data(),
+ web_session_id.length(),
+ error,
+ system_code,
+ NULL,
+ 0);
+ }
}
-void CdmAdapter::OnSessionCreated(uint32_t session_id,
- const char* web_session_id,
- uint32_t web_session_id_length) {
+// cdm::Host_5 methods
+
+cdm::Time CdmAdapter::GetCurrentTime() {
+ return pp::Module::Get()->core()->GetTime();
+}
+
+void CdmAdapter::OnResolvePromise(uint32_t promise_id) {
+ PostOnMain(callback_factory_.NewCallback(
+ &CdmAdapter::SendPromiseResolvedInternal, promise_id));
+}
+
+void CdmAdapter::OnResolveNewSessionPromise(uint32_t promise_id,
+ const char* web_session_id,
+ uint32_t web_session_id_length) {
PostOnMain(callback_factory_.NewCallback(
- &CdmAdapter::SendSessionCreatedInternal,
- session_id,
+ &CdmAdapter::SendPromiseResolvedWithSessionInternal,
+ promise_id,
std::string(web_session_id, web_session_id_length)));
}
-void CdmAdapter::OnSessionMessage(uint32_t session_id,
+void CdmAdapter::OnRejectPromise(uint32_t promise_id,
+ cdm::Error error,
+ uint32_t system_code,
+ const char* error_message,
+ uint32_t error_message_length) {
+ RejectPromise(promise_id,
+ error,
+ system_code,
+ std::string(error_message, error_message_length));
+}
+
+void CdmAdapter::RejectPromise(uint32_t promise_id,
+ cdm::Error error,
+ uint32_t system_code,
+ const std::string& error_message) {
+ PostOnMain(callback_factory_.NewCallback(
+ &CdmAdapter::SendPromiseRejectedInternal,
+ promise_id,
+ SessionError(error, system_code, error_message)));
+}
+
+void CdmAdapter::OnSessionMessage(const char* web_session_id,
+ uint32_t web_session_id_length,
const char* message,
uint32_t message_length,
const char* destination_url,
uint32_t destination_url_length) {
PostOnMain(callback_factory_.NewCallback(
&CdmAdapter::SendSessionMessageInternal,
- session_id,
+ std::string(web_session_id, web_session_id_length),
std::vector<uint8>(message, message + message_length),
std::string(destination_url, destination_url_length)));
}
-void CdmAdapter::OnSessionReady(uint32_t session_id) {
+void CdmAdapter::OnSessionKeysChange(const char* web_session_id,
+ uint32_t web_session_id_length,
+ bool has_additional_usable_key) {
+ // TODO(jrummell): Implement this event in subsequent CL
+ // (http://crbug.com/370251).
+ PP_NOTREACHED();
+}
+
+void CdmAdapter::OnExpirationChange(const char* web_session_id,
+ uint32_t web_session_id_length,
+ cdm::Time new_expiry_time) {
+ // TODO(jrummell): Implement this event in subsequent CL
+ // (http://crbug.com/370251).
+ PP_NOTREACHED();
+}
+
+void CdmAdapter::OnSessionReady(const char* web_session_id,
+ uint32_t web_session_id_length) {
PostOnMain(callback_factory_.NewCallback(
- &CdmAdapter::SendSessionReadyInternal, session_id));
+ &CdmAdapter::SendSessionReadyInternal,
+ std::string(web_session_id, web_session_id_length)));
}
-void CdmAdapter::OnSessionClosed(uint32_t session_id) {
+void CdmAdapter::OnSessionClosed(const char* web_session_id,
+ uint32_t web_session_id_length) {
PostOnMain(callback_factory_.NewCallback(
- &CdmAdapter::SendSessionClosedInternal, session_id));
+ &CdmAdapter::SendSessionClosedInternal,
+ std::string(web_session_id, web_session_id_length)));
}
-void CdmAdapter::OnSessionError(uint32_t session_id,
- cdm::MediaKeyError error_code,
- uint32_t system_code) {
+void CdmAdapter::OnSessionError(const char* web_session_id,
+ uint32_t web_session_id_length,
+ cdm::Error error,
+ uint32_t system_code,
+ const char* error_message,
+ uint32_t error_message_length) {
PostOnMain(callback_factory_.NewCallback(
&CdmAdapter::SendSessionErrorInternal,
- session_id,
- error_code,
- system_code));
+ std::string(web_session_id, web_session_id_length),
+ SessionError(error,
+ system_code,
+ std::string(error_message, error_message_length))));
}
-void CdmAdapter::SendSessionCreatedInternal(int32_t result,
- uint32_t session_id,
- const std::string& web_session_id) {
+// Helpers to pass the event to Pepper.
+
+void CdmAdapter::SendPromiseResolvedInternal(int32_t result,
+ uint32_t promise_id) {
PP_DCHECK(result == PP_OK);
- pp::ContentDecryptor_Private::SessionCreated(session_id, web_session_id);
+ pp::ContentDecryptor_Private::PromiseResolved(promise_id);
}
-void CdmAdapter::SendSessionMessageInternal(int32_t result,
- uint32_t session_id,
- const std::vector<uint8>& message,
- const std::string& default_url) {
+void CdmAdapter::SendPromiseResolvedWithSessionInternal(
+ int32_t result,
+ uint32_t promise_id,
+ const std::string& web_session_id) {
+ PP_DCHECK(result == PP_OK);
+ pp::ContentDecryptor_Private::PromiseResolvedWithSession(promise_id,
+ web_session_id);
+}
+
+void CdmAdapter::SendPromiseRejectedInternal(int32_t result,
+ uint32_t promise_id,
+ const SessionError& error) {
+ PP_DCHECK(result == PP_OK);
+ pp::ContentDecryptor_Private::PromiseRejected(
+ promise_id,
+ CdmExceptionTypeToPpCdmExceptionType(error.error),
+ error.system_code,
+ error.error_description);
+}
+
+void CdmAdapter::SendSessionMessageInternal(
+ int32_t result,
+ const std::string& web_session_id,
+ const std::vector<uint8>& message,
+ const std::string& destination_url) {
PP_DCHECK(result == PP_OK);
pp::VarArrayBuffer message_array_buffer(message.size());
@@ -634,27 +807,30 @@ void CdmAdapter::SendSessionMessageInternal(int32_t result,
}
pp::ContentDecryptor_Private::SessionMessage(
- session_id, message_array_buffer, default_url);
+ web_session_id, message_array_buffer, destination_url);
}
-void CdmAdapter::SendSessionReadyInternal(int32_t result, uint32_t session_id) {
+void CdmAdapter::SendSessionReadyInternal(int32_t result,
+ const std::string& web_session_id) {
PP_DCHECK(result == PP_OK);
- pp::ContentDecryptor_Private::SessionReady(session_id);
+ pp::ContentDecryptor_Private::SessionReady(web_session_id);
}
void CdmAdapter::SendSessionClosedInternal(int32_t result,
- uint32_t session_id) {
+ const std::string& web_session_id) {
PP_DCHECK(result == PP_OK);
- pp::ContentDecryptor_Private::SessionClosed(session_id);
+ pp::ContentDecryptor_Private::SessionClosed(web_session_id);
}
void CdmAdapter::SendSessionErrorInternal(int32_t result,
- uint32_t session_id,
- cdm::MediaKeyError error_code,
- uint32_t system_code) {
+ const std::string& web_session_id,
+ const SessionError& error) {
PP_DCHECK(result == PP_OK);
pp::ContentDecryptor_Private::SessionError(
- session_id, error_code, system_code);
+ web_session_id,
+ CdmExceptionTypeToPpCdmExceptionType(error.error),
+ error.system_code,
+ error.error_description);
}
void CdmAdapter::DeliverBlock(int32_t result,
@@ -662,7 +838,7 @@ void CdmAdapter::DeliverBlock(int32_t result,
const LinkedDecryptedBlock& decrypted_block,
const PP_DecryptTrackingInfo& tracking_info) {
PP_DCHECK(result == PP_OK);
- PP_DecryptedBlockInfo decrypted_block_info;
+ PP_DecryptedBlockInfo decrypted_block_info = {};
decrypted_block_info.tracking_info = tracking_info;
decrypted_block_info.tracking_info.timestamp = decrypted_block->Timestamp();
decrypted_block_info.tracking_info.buffer_id = 0;
@@ -679,9 +855,10 @@ void CdmAdapter::DeliverBlock(int32_t result,
} else {
PpbBuffer* ppb_buffer =
static_cast<PpbBuffer*>(decrypted_block->DecryptedBuffer());
- buffer = ppb_buffer->buffer_dev();
decrypted_block_info.tracking_info.buffer_id = ppb_buffer->buffer_id();
decrypted_block_info.data_size = ppb_buffer->Size();
+
+ buffer = ppb_buffer->TakeBuffer();
}
}
@@ -717,7 +894,7 @@ void CdmAdapter::DeliverFrame(
const LinkedVideoFrame& video_frame,
const PP_DecryptTrackingInfo& tracking_info) {
PP_DCHECK(result == PP_OK);
- PP_DecryptedFrameInfo decrypted_frame_info;
+ PP_DecryptedFrameInfo decrypted_frame_info = {};
decrypted_frame_info.tracking_info.request_id = tracking_info.request_id;
decrypted_frame_info.tracking_info.buffer_id = 0;
decrypted_frame_info.result = CdmStatusToPpDecryptResult(status);
@@ -732,8 +909,6 @@ void CdmAdapter::DeliverFrame(
PpbBuffer* ppb_buffer =
static_cast<PpbBuffer*>(video_frame->FrameBuffer());
- buffer = ppb_buffer->buffer_dev();
-
decrypted_frame_info.tracking_info.timestamp = video_frame->Timestamp();
decrypted_frame_info.tracking_info.buffer_id = ppb_buffer->buffer_id();
decrypted_frame_info.format =
@@ -752,8 +927,11 @@ void CdmAdapter::DeliverFrame(
video_frame->Stride(cdm::VideoFrame::kUPlane);
decrypted_frame_info.strides[PP_DECRYPTEDFRAMEPLANES_V] =
video_frame->Stride(cdm::VideoFrame::kVPlane);
+
+ buffer = ppb_buffer->TakeBuffer();
}
}
+
pp::ContentDecryptor_Private::DeliverFrame(buffer, decrypted_frame_info);
}
@@ -763,7 +941,7 @@ void CdmAdapter::DeliverSamples(int32_t result,
const PP_DecryptTrackingInfo& tracking_info) {
PP_DCHECK(result == PP_OK);
- PP_DecryptedSampleInfo decrypted_sample_info;
+ PP_DecryptedSampleInfo decrypted_sample_info = {};
decrypted_sample_info.tracking_info = tracking_info;
decrypted_sample_info.tracking_info.timestamp = 0;
decrypted_sample_info.tracking_info.buffer_id = 0;
@@ -780,11 +958,13 @@ void CdmAdapter::DeliverSamples(int32_t result,
} else {
PpbBuffer* ppb_buffer =
static_cast<PpbBuffer*>(audio_frames->FrameBuffer());
- buffer = ppb_buffer->buffer_dev();
+
decrypted_sample_info.tracking_info.buffer_id = ppb_buffer->buffer_id();
decrypted_sample_info.data_size = ppb_buffer->Size();
decrypted_sample_info.format =
CdmAudioFormatToPpDecryptedSampleFormat(audio_frames->Format());
+
+ buffer = ppb_buffer->TakeBuffer();
}
}
@@ -885,6 +1065,7 @@ void CdmAdapter::QueryOutputProtectionStatus() {
&CdmAdapter::QueryOutputProtectionStatusDone));
if (result == PP_OK_COMPLETIONPENDING) {
query_output_protection_in_progress_ = true;
+ ReportOutputProtectionQuery();
return;
}
@@ -921,7 +1102,58 @@ void CdmAdapter::OnDeferredInitializationDone(cdm::StreamType stream_type,
}
}
+// The CDM owns the returned object and must call FileIO::Close() to release it.
+cdm::FileIO* CdmAdapter::CreateFileIO(cdm::FileIOClient* client) {
+ return new CdmFileIOImpl(client, pp_instance());
+}
+
#if defined(OS_CHROMEOS)
+void CdmAdapter::ReportOutputProtectionUMA(OutputProtectionStatus status) {
+ pp::UMAPrivate uma_interface_(this);
+ uma_interface_.HistogramEnumeration(
+ "Media.EME.OutputProtection", status, OUTPUT_PROTECTION_MAX);
+}
+
+void CdmAdapter::ReportOutputProtectionQuery() {
+ if (uma_for_output_protection_query_reported_)
+ return;
+
+ ReportOutputProtectionUMA(OUTPUT_PROTECTION_QUERIED);
+ uma_for_output_protection_query_reported_ = true;
+}
+
+void CdmAdapter::ReportOutputProtectionQueryResult() {
+ if (uma_for_output_protection_positive_result_reported_)
+ return;
+
+ // Report UMAs for output protection query result.
+ uint32_t external_links = (output_link_mask_ & ~cdm::kLinkTypeInternal);
+
+ if (!external_links) {
+ ReportOutputProtectionUMA(OUTPUT_PROTECTION_NO_EXTERNAL_LINK);
+ uma_for_output_protection_positive_result_reported_ = true;
+ return;
+ }
+
+ const uint32_t kProtectableLinks =
+ cdm::kLinkTypeHDMI | cdm::kLinkTypeDVI | cdm::kLinkTypeDisplayPort;
+ bool is_unprotectable_link_connected = external_links & ~kProtectableLinks;
+ bool is_hdcp_enabled_on_all_protectable_links =
+ output_protection_mask_ & cdm::kProtectionHDCP;
+
+ if (!is_unprotectable_link_connected &&
+ is_hdcp_enabled_on_all_protectable_links) {
+ ReportOutputProtectionUMA(
+ OUTPUT_PROTECTION_ALL_EXTERNAL_LINKS_PROTECTED);
+ uma_for_output_protection_positive_result_reported_ = true;
+ return;
+ }
+
+ // Do not report a negative result because it could be a false negative.
+ // Instead, we will calculate number of negatives using the total number of
+ // queries and success results.
+}
+
void CdmAdapter::SendPlatformChallengeDone(int32_t result) {
challenge_in_progress_ = false;
@@ -938,15 +1170,12 @@ void CdmAdapter::SendPlatformChallengeDone(int32_t result) {
platform_key_certificate_output_.AsString();
cdm::PlatformChallengeResponse response = {
- static_cast<uint8_t*>(signed_data_var.Map()),
- signed_data_var.ByteLength(),
-
- static_cast<uint8_t*>(signed_data_signature_var.Map()),
- signed_data_signature_var.ByteLength(),
-
- reinterpret_cast<const uint8_t*>(platform_key_certificate_string.c_str()),
- static_cast<uint32_t>(platform_key_certificate_string.length())
- };
+ static_cast<uint8_t*>(signed_data_var.Map()),
+ signed_data_var.ByteLength(),
+ static_cast<uint8_t*>(signed_data_signature_var.Map()),
+ signed_data_signature_var.ByteLength(),
+ reinterpret_cast<const uint8_t*>(platform_key_certificate_string.data()),
+ static_cast<uint32_t>(platform_key_certificate_string.length())};
cdm_->OnPlatformChallengeResponse(response);
signed_data_var.Unmap();
@@ -966,45 +1195,53 @@ void CdmAdapter::QueryOutputProtectionStatusDone(int32_t result) {
// Return a protection status of none on error.
if (result != PP_OK)
output_link_mask_ = output_protection_mask_ = 0;
+ else
+ ReportOutputProtectionQueryResult();
cdm_->OnQueryOutputProtectionStatus(output_link_mask_,
output_protection_mask_);
}
#endif
+CdmAdapter::SessionError::SessionError(cdm::Error error,
+ uint32_t system_code,
+ std::string error_description)
+ : error(error),
+ system_code(system_code),
+ error_description(error_description) {
+}
+
void* GetCdmHost(int host_interface_version, void* user_data) {
if (!host_interface_version || !user_data)
return NULL;
- COMPILE_ASSERT(cdm::ContentDecryptionModule::Host::kVersion ==
- cdm::ContentDecryptionModule_3::Host::kVersion,
- update_code_below);
+ COMPILE_ASSERT(
+ cdm::ContentDecryptionModule::Host::kVersion == cdm::Host_5::kVersion,
+ update_code_below);
// Ensure IsSupportedCdmHostVersion matches implementation of this function.
// Always update this DCHECK when updating this function.
// If this check fails, update this function and DCHECK or update
// IsSupportedCdmHostVersion.
+
PP_DCHECK(
// Future version is not supported.
- !IsSupportedCdmHostVersion(
- cdm::ContentDecryptionModule::Host::kVersion + 1) &&
+ !IsSupportedCdmHostVersion(cdm::Host_5::kVersion + 1) &&
// Current version is supported.
- IsSupportedCdmHostVersion(cdm::ContentDecryptionModule::Host::kVersion) &&
- // Include all previous supported versions here.
- IsSupportedCdmHostVersion(cdm::Host_1::kVersion) &&
+ IsSupportedCdmHostVersion(cdm::Host_5::kVersion) &&
+ // Include all previous supported versions (if any) here.
+ IsSupportedCdmHostVersion(cdm::Host_4::kVersion) &&
// One older than the oldest supported version is not supported.
- !IsSupportedCdmHostVersion(cdm::Host_1::kVersion - 1));
+ !IsSupportedCdmHostVersion(cdm::Host_4::kVersion - 1));
PP_DCHECK(IsSupportedCdmHostVersion(host_interface_version));
CdmAdapter* cdm_adapter = static_cast<CdmAdapter*>(user_data);
CDM_DLOG() << "Create CDM Host with version " << host_interface_version;
switch (host_interface_version) {
- case cdm::Host_3::kVersion:
- return static_cast<cdm::Host_3*>(cdm_adapter);
- case cdm::Host_2::kVersion:
- return static_cast<cdm::Host_2*>(cdm_adapter);
- case cdm::Host_1::kVersion:
- return static_cast<cdm::Host_1*>(cdm_adapter);
+ case cdm::Host_4::kVersion:
+ return static_cast<cdm::Host_4*>(cdm_adapter);
+ case cdm::Host_5::kVersion:
+ return static_cast<cdm::Host_5*>(cdm_adapter);
default:
PP_NOTREACHED();
return NULL;
@@ -1027,6 +1264,9 @@ class CdmAdapterModule : public pp::Module {
virtual pp::Instance* CreateInstance(PP_Instance instance) {
return new CdmAdapter(instance, this);
}
+
+ private:
+ CdmFileIOImpl::ResourceTracker cdm_file_io_impl_resource_tracker;
};
} // namespace media
diff --git a/chromium/media/cdm/ppapi/cdm_adapter.h b/chromium/media/cdm/ppapi/cdm_adapter.h
index d256913aede..cd65b181f07 100644
--- a/chromium/media/cdm/ppapi/cdm_adapter.h
+++ b/chromium/media/cdm/ppapi/cdm_adapter.h
@@ -28,6 +28,11 @@
#include "ppapi/cpp/private/platform_verification.h"
#endif
+#if defined(GetCurrentTime)
+// winbase.h defines this which messes up calls to Host_5::GetCurrentTime.
+#undef GetCurrentTime
+#endif
+
namespace media {
// GetCdmHostFunc implementation.
@@ -37,9 +42,8 @@ void* GetCdmHost(int host_interface_version, void* user_data);
// Content Decryption Module (CDM).
class CdmAdapter : public pp::Instance,
public pp::ContentDecryptor_Private,
- public cdm::Host_1,
- public cdm::Host_2,
- public cdm::Host_3 {
+ public cdm::Host_4,
+ public cdm::Host_5 {
public:
CdmAdapter(PP_Instance instance, pp::Module* module);
virtual ~CdmAdapter();
@@ -53,12 +57,17 @@ class CdmAdapter : public pp::Instance,
// Note: Results of calls to these methods must be reported through the
// PPB_ContentDecryptor_Private interface.
virtual void Initialize(const std::string& key_system) OVERRIDE;
- virtual void CreateSession(uint32_t session_id,
- const std::string& type,
- pp::VarArrayBuffer init_data) OVERRIDE;
- virtual void UpdateSession(uint32_t session_id,
+ virtual void CreateSession(uint32_t promise_id,
+ const std::string& init_data_type,
+ pp::VarArrayBuffer init_data,
+ PP_SessionType session_type) OVERRIDE;
+ virtual void LoadSession(uint32_t promise_id,
+ const std::string& web_session_id) OVERRIDE;
+ virtual void UpdateSession(uint32_t promise_id,
+ const std::string& web_session_id,
pp::VarArrayBuffer response) OVERRIDE;
- virtual void ReleaseSession(uint32_t session_id) OVERRIDE;
+ virtual void ReleaseSession(uint32_t promise_id,
+ const std::string& web_session_id) OVERRIDE;
virtual void Decrypt(
pp::Buffer_Dev encrypted_buffer,
const PP_EncryptedBlockInfo& encrypted_block_info) OVERRIDE;
@@ -77,33 +86,12 @@ class CdmAdapter : public pp::Instance,
pp::Buffer_Dev encrypted_buffer,
const PP_EncryptedBlockInfo& encrypted_block_info) OVERRIDE;
- // cdm::Host implementation.
+ // cdm::Host_4 and cdm::Host_5 implementation.
virtual cdm::Buffer* Allocate(uint32_t capacity) OVERRIDE;
virtual void SetTimer(int64_t delay_ms, void* context) OVERRIDE;
- virtual double GetCurrentWallTimeInSeconds() OVERRIDE;
- virtual void SendKeyMessage(
- const char* session_id, uint32_t session_id_length,
- const char* message, uint32_t message_length,
- const char* default_url, uint32_t default_url_length) OVERRIDE;
- virtual void SendKeyError(const char* session_id,
- uint32_t session_id_length,
- cdm::MediaKeyError error_code,
- uint32_t system_code) OVERRIDE;
- virtual void GetPrivateData(int32_t* instance,
- GetPrivateInterface* get_interface) OVERRIDE;
-
- // cdm::Host_2 implementation.
- virtual void SendPlatformChallenge(
- const char* service_id, uint32_t service_id_length,
- const char* challenge, uint32_t challenge_length) OVERRIDE;
- virtual void EnableOutputProtection(
- uint32_t desired_protection_mask) OVERRIDE;
- virtual void QueryOutputProtectionStatus() OVERRIDE;
- virtual void OnDeferredInitializationDone(
- cdm::StreamType stream_type,
- cdm::Status decoder_status) OVERRIDE;
- // cdm::Host_3 implementation.
+ // cdm::Host_4 implementation.
+ virtual double GetCurrentWallTimeInSeconds() OVERRIDE;
virtual void OnSessionCreated(uint32_t session_id,
const char* web_session_id,
uint32_t web_session_id_length) OVERRIDE;
@@ -118,29 +106,104 @@ class CdmAdapter : public pp::Instance,
cdm::MediaKeyError error_code,
uint32_t system_code) OVERRIDE;
+ // cdm::Host_5 implementation.
+ virtual cdm::Time GetCurrentTime() OVERRIDE;
+ virtual void OnResolveNewSessionPromise(
+ uint32_t promise_id,
+ const char* web_session_id,
+ uint32_t web_session_id_length) OVERRIDE;
+ virtual void OnResolvePromise(uint32_t promise_id) OVERRIDE;
+ virtual void OnRejectPromise(uint32_t promise_id,
+ cdm::Error error,
+ uint32_t system_code,
+ const char* error_message,
+ uint32_t error_message_length) OVERRIDE;
+ virtual void OnSessionMessage(const char* web_session_id,
+ uint32_t web_session_id_length,
+ const char* message,
+ uint32_t message_length,
+ const char* destination_url,
+ uint32_t destination_url_length) OVERRIDE;
+ virtual void OnSessionKeysChange(const char* web_session_id,
+ uint32_t web_session_id_length,
+ bool has_additional_usable_key);
+ virtual void OnExpirationChange(const char* web_session_id,
+ uint32_t web_session_id_length,
+ cdm::Time new_expiry_time);
+ virtual void OnSessionReady(const char* web_session_id,
+ uint32_t web_session_id_length) OVERRIDE;
+ virtual void OnSessionClosed(const char* web_session_id,
+ uint32_t web_session_id_length) OVERRIDE;
+ virtual void OnSessionError(const char* web_session_id,
+ uint32_t web_session_id_length,
+ cdm::Error error,
+ uint32_t system_code,
+ const char* error_message,
+ uint32_t error_message_length) OVERRIDE;
+
+ // cdm::Host_4 and cdm::Host_5 implementation.
+ virtual void SendPlatformChallenge(const char* service_id,
+ uint32_t service_id_length,
+ const char* challenge,
+ uint32_t challenge_length) OVERRIDE;
+ virtual void EnableOutputProtection(
+ uint32_t desired_protection_mask) OVERRIDE;
+ virtual void QueryOutputProtectionStatus() OVERRIDE;
+ virtual void OnDeferredInitializationDone(
+ cdm::StreamType stream_type,
+ cdm::Status decoder_status) OVERRIDE;
+ virtual cdm::FileIO* CreateFileIO(cdm::FileIOClient* client) OVERRIDE;
+
private:
+ // These are reported to UMA server. Do not change the existing values!
+ enum OutputProtectionStatus {
+ OUTPUT_PROTECTION_QUERIED = 0,
+ OUTPUT_PROTECTION_NO_EXTERNAL_LINK = 1,
+ OUTPUT_PROTECTION_ALL_EXTERNAL_LINKS_PROTECTED = 2,
+ OUTPUT_PROTECTION_MAX = 3
+ };
+
typedef linked_ptr<DecryptedBlockImpl> LinkedDecryptedBlock;
typedef linked_ptr<VideoFrameImpl> LinkedVideoFrame;
typedef linked_ptr<AudioFramesImpl> LinkedAudioFrames;
+ struct SessionError {
+ SessionError(cdm::Error error,
+ uint32_t system_code,
+ std::string error_description);
+ cdm::Error error;
+ uint32_t system_code;
+ std::string error_description;
+ };
+
bool CreateCdmInstance(const std::string& key_system);
// <code>PPB_ContentDecryptor_Private</code> dispatchers. These are passed to
// <code>callback_factory_</code> to ensure that calls into
// <code>PPP_ContentDecryptor_Private</code> are asynchronous.
- void SendSessionCreatedInternal(int32_t result,
- uint32_t session_id,
- const std::string& web_session_id);
+ void SendPromiseResolvedInternal(int32_t result, uint32_t promise_id);
+ void SendPromiseResolvedWithSessionInternal(
+ int32_t result,
+ uint32_t promise_id,
+ const std::string& web_session_id);
+ void SendPromiseRejectedInternal(int32_t result,
+ uint32_t promise_id,
+ const SessionError& error);
void SendSessionMessageInternal(int32_t result,
- uint32_t session_id,
+ const std::string& web_session_id,
const std::vector<uint8>& message,
- const std::string& default_url);
- void SendSessionReadyInternal(int32_t result, uint32_t session_id);
- void SendSessionClosedInternal(int32_t result, uint32_t session_id);
+ const std::string& destination_url);
+ void SendSessionReadyInternal(int32_t result,
+ const std::string& web_session_id);
+ void SendSessionClosedInternal(int32_t result,
+ const std::string& web_session_id);
void SendSessionErrorInternal(int32_t result,
- uint32_t session_id,
- cdm::MediaKeyError error_code,
- uint32_t system_code);
+ const std::string& web_session_id,
+ const SessionError& error);
+ void RejectPromise(uint32_t promise_id,
+ cdm::Error error,
+ uint32_t system_code,
+ const std::string& error_message);
void DeliverBlock(int32_t result,
const cdm::Status& status,
@@ -178,6 +241,10 @@ class CdmAdapter : public pp::Instance,
#endif // !defined(NDEBUG)
#if defined(OS_CHROMEOS)
+ void ReportOutputProtectionUMA(OutputProtectionStatus status);
+ void ReportOutputProtectionQuery();
+ void ReportOutputProtectionQueryResult();
+
void SendPlatformChallengeDone(int32_t result);
void EnableProtectionDone(int32_t result);
void QueryOutputProtectionStatusDone(int32_t result);
@@ -197,6 +264,11 @@ class CdmAdapter : public pp::Instance,
uint32_t output_link_mask_;
uint32_t output_protection_mask_;
bool query_output_protection_in_progress_;
+
+ // Tracks whether an output protection query and a positive query result (no
+ // unprotected external link) have been reported to UMA.
+ bool uma_for_output_protection_query_reported_;
+ bool uma_for_output_protection_positive_result_reported_;
#endif
PpbBufferAllocator allocator_;
diff --git a/chromium/media/cdm/ppapi/cdm_file_io_impl.cc b/chromium/media/cdm/ppapi/cdm_file_io_impl.cc
new file mode 100644
index 00000000000..726388949ec
--- /dev/null
+++ b/chromium/media/cdm/ppapi/cdm_file_io_impl.cc
@@ -0,0 +1,457 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cdm/ppapi/cdm_file_io_impl.h"
+
+#include <algorithm>
+#include <sstream>
+
+#include "media/cdm/ppapi/cdm_logging.h"
+#include "ppapi/c/pp_errors.h"
+#include "ppapi/cpp/dev/url_util_dev.h"
+
+namespace media {
+
+const int kReadSize = 4 * 1024; // Arbitrary choice.
+
+// Call func_call and check the result. If the result is not
+// PP_OK_COMPLETIONPENDING, print out logs, call OnError() and return.
+#define CHECK_PP_OK_COMPLETIONPENDING(func_call, error_type) \
+ do { \
+ int32_t result = func_call; \
+ PP_DCHECK(result != PP_OK); \
+ if (result != PP_OK_COMPLETIONPENDING) { \
+ CDM_DLOG() << #func_call << " failed with result: " << result; \
+ OnError(error_type); \
+ return; \
+ } \
+ } while (0)
+
+#if !defined(NDEBUG)
+// PPAPI calls should only be made on the main thread. In this file, main thread
+// checking is only performed in public APIs and the completion callbacks. This
+// ensures all functions are running on the main thread since internal methods
+// are called either by the public APIs or by the completion callbacks.
+static bool IsMainThread() {
+ return pp::Module::Get()->core()->IsMainThread();
+}
+#endif // !defined(NDEBUG)
+
+// Posts a task to run |cb| on the main thread. The task is posted even if the
+// current thread is the main thread.
+static void PostOnMain(pp::CompletionCallback cb) {
+ pp::Module::Get()->core()->CallOnMainThread(0, cb, PP_OK);
+}
+
+CdmFileIOImpl::FileLockMap* CdmFileIOImpl::file_lock_map_ = NULL;
+
+CdmFileIOImpl::ResourceTracker::ResourceTracker() {
+ // Do nothing here since we lazy-initialize CdmFileIOImpl::file_lock_map_
+ // in CdmFileIOImpl::AcquireFileLock().
+}
+
+CdmFileIOImpl::ResourceTracker::~ResourceTracker() {
+ delete CdmFileIOImpl::file_lock_map_;
+}
+
+CdmFileIOImpl::CdmFileIOImpl(cdm::FileIOClient* client, PP_Instance pp_instance)
+ : state_(FILE_UNOPENED),
+ client_(client),
+ pp_instance_handle_(pp_instance),
+ callback_factory_(this),
+ io_offset_(0) {
+ PP_DCHECK(IsMainThread());
+ PP_DCHECK(pp_instance); // 0 indicates a "NULL handle".
+}
+
+CdmFileIOImpl::~CdmFileIOImpl() {
+ PP_DCHECK(state_ == FILE_CLOSED);
+}
+
+// Call sequence: Open() -> OpenFileSystem() -> OpenFile() -> FILE_OPENED.
+void CdmFileIOImpl::Open(const char* file_name, uint32_t file_name_size) {
+ CDM_DLOG() << __FUNCTION__;
+ PP_DCHECK(IsMainThread());
+
+ if (state_ != FILE_UNOPENED) {
+ CDM_DLOG() << "Open() called in an invalid state.";
+ OnError(OPEN_ERROR);
+ return;
+ }
+
+ // File name should not contain any path separators.
+ std::string file_name_str(file_name, file_name_size);
+ if (file_name_str.find('/') != std::string::npos ||
+ file_name_str.find('\\') != std::string::npos) {
+ CDM_DLOG() << "Invalid file name.";
+ OnError(OPEN_ERROR);
+ return;
+ }
+
+ // pp::FileRef only accepts path that begins with a '/' character.
+ file_name_ = '/' + file_name_str;
+
+ if (!AcquireFileLock()) {
+ CDM_DLOG() << "File is in use by other cdm::FileIO objects.";
+ OnError(OPEN_WHILE_IN_USE);
+ return;
+ }
+
+ state_ = OPENING_FILE_SYSTEM;
+ OpenFileSystem();
+}
+
+// Call sequence:
+// finished
+// Read() -> ReadFile() -> OnFileRead() ----------> Done.
+// ^ |
+// | not finished |
+// |--------------|
+void CdmFileIOImpl::Read() {
+ CDM_DLOG() << __FUNCTION__;
+ PP_DCHECK(IsMainThread());
+
+ if (state_ == READING_FILE || state_ == WRITING_FILE) {
+ CDM_DLOG() << "Read() called during pending read/write.";
+ OnError(READ_WHILE_IN_USE);
+ return;
+ }
+
+ if (state_ != FILE_OPENED) {
+ CDM_DLOG() << "Read() called in an invalid state.";
+ OnError(READ_ERROR);
+ return;
+ }
+
+ PP_DCHECK(io_buffer_.empty());
+ PP_DCHECK(cumulative_read_buffer_.empty());
+
+ io_buffer_.resize(kReadSize);
+ io_offset_ = 0;
+
+ state_ = READING_FILE;
+ ReadFile();
+}
+
+// Call sequence:
+// finished
+// Write() -> WriteFile() -> OnFileWritten() ----------> Done.
+// ^ |
+// | | not finished
+// |------------------|
+void CdmFileIOImpl::Write(const uint8_t* data, uint32_t data_size) {
+ CDM_DLOG() << __FUNCTION__;
+ PP_DCHECK(IsMainThread());
+
+ if (state_ == READING_FILE || state_ == WRITING_FILE) {
+ CDM_DLOG() << "Write() called during pending read/write.";
+ OnError(WRITE_WHILE_IN_USE);
+ return;
+ }
+
+ if (state_ != FILE_OPENED) {
+ CDM_DLOG() << "Write() called in an invalid state.";
+ OnError(WRITE_ERROR);
+ return;
+ }
+
+ PP_DCHECK(io_offset_ == 0);
+ PP_DCHECK(io_buffer_.empty());
+ if (data_size > 0)
+ io_buffer_.assign(data, data + data_size);
+ else
+ PP_DCHECK(!data);
+
+ state_ = WRITING_FILE;
+
+ // Always SetLength() in case |data_size| is less than the file size.
+ SetLength(data_size);
+}
+
+void CdmFileIOImpl::Close() {
+ CDM_DLOG() << __FUNCTION__;
+ PP_DCHECK(IsMainThread());
+ PP_DCHECK(state_ != FILE_CLOSED);
+ CloseFile();
+ ReleaseFileLock();
+ // All pending callbacks are canceled since |callback_factory_| is destroyed.
+ delete this;
+}
+
+bool CdmFileIOImpl::SetFileID() {
+ PP_DCHECK(file_id_.empty());
+ PP_DCHECK(!file_name_.empty() && file_name_[0] == '/');
+
+ // Not taking ownership of |url_util_dev| (which is a singleton).
+ const pp::URLUtil_Dev* url_util_dev = pp::URLUtil_Dev::Get();
+ PP_URLComponents_Dev components;
+ pp::Var url_var =
+ url_util_dev->GetDocumentURL(pp_instance_handle_, &components);
+ if (!url_var.is_string())
+ return false;
+ std::string url = url_var.AsString();
+
+ file_id_.append(url, components.scheme.begin, components.scheme.len);
+ file_id_ += ':';
+ file_id_.append(url, components.host.begin, components.host.len);
+ file_id_ += ':';
+ file_id_.append(url, components.port.begin, components.port.len);
+ file_id_ += file_name_;
+
+ return true;
+}
+
+bool CdmFileIOImpl::AcquireFileLock() {
+ PP_DCHECK(IsMainThread());
+
+ if (file_id_.empty() && !SetFileID())
+ return false;
+
+ if (!file_lock_map_) {
+ file_lock_map_ = new FileLockMap();
+ } else {
+ FileLockMap::iterator found = file_lock_map_->find(file_id_);
+ if (found != file_lock_map_->end() && found->second)
+ return false;
+ }
+
+ (*file_lock_map_)[file_id_] = true;
+ return true;
+}
+
+void CdmFileIOImpl::ReleaseFileLock() {
+ PP_DCHECK(IsMainThread());
+
+ if (!file_lock_map_)
+ return;
+
+ FileLockMap::iterator found = file_lock_map_->find(file_id_);
+ if (found != file_lock_map_->end() && found->second)
+ found->second = false;
+}
+
+void CdmFileIOImpl::OpenFileSystem() {
+ PP_DCHECK(state_ == OPENING_FILE_SYSTEM);
+
+ pp::CompletionCallbackWithOutput<pp::FileSystem> cb =
+ callback_factory_.NewCallbackWithOutput(
+ &CdmFileIOImpl::OnFileSystemOpened);
+ isolated_file_system_ = pp::IsolatedFileSystemPrivate(
+ pp_instance_handle_, PP_ISOLATEDFILESYSTEMTYPE_PRIVATE_PLUGINPRIVATE);
+
+ CHECK_PP_OK_COMPLETIONPENDING(isolated_file_system_.Open(cb), OPEN_ERROR);
+}
+
+void CdmFileIOImpl::OnFileSystemOpened(int32_t result,
+ pp::FileSystem file_system) {
+ PP_DCHECK(IsMainThread());
+ PP_DCHECK(state_ == OPENING_FILE_SYSTEM);
+
+ if (result != PP_OK) {
+ CDM_DLOG() << "File system open failed asynchronously.";
+ ReleaseFileLock();
+ OnError(OPEN_ERROR);
+ return;
+ }
+
+ file_system_ = file_system;
+ state_ = OPENING_FILE;
+ OpenFile();
+}
+
+void CdmFileIOImpl::OpenFile() {
+ PP_DCHECK(state_ == OPENING_FILE);
+
+ file_io_ = pp::FileIO(pp_instance_handle_);
+ file_ref_ = pp::FileRef(file_system_, file_name_.c_str());
+ int32_t file_open_flag = PP_FILEOPENFLAG_READ |
+ PP_FILEOPENFLAG_WRITE |
+ PP_FILEOPENFLAG_CREATE;
+ pp::CompletionCallback cb =
+ callback_factory_.NewCallback(&CdmFileIOImpl::OnFileOpened);
+ CHECK_PP_OK_COMPLETIONPENDING(file_io_.Open(file_ref_, file_open_flag, cb),
+ OPEN_ERROR);
+}
+
+void CdmFileIOImpl::OnFileOpened(int32_t result) {
+ PP_DCHECK(IsMainThread());
+ PP_DCHECK(state_ == OPENING_FILE);
+
+ if (result != PP_OK) {
+ CDM_DLOG() << "File open failed.";
+ ReleaseFileLock();
+ OnError(OPEN_ERROR);
+ return;
+ }
+
+ state_ = FILE_OPENED;
+ client_->OnOpenComplete(cdm::FileIOClient::kSuccess);
+}
+
+void CdmFileIOImpl::ReadFile() {
+ PP_DCHECK(state_ == READING_FILE);
+ PP_DCHECK(!io_buffer_.empty());
+
+ pp::CompletionCallback cb =
+ callback_factory_.NewCallback(&CdmFileIOImpl::OnFileRead);
+ CHECK_PP_OK_COMPLETIONPENDING(
+ file_io_.Read(io_offset_, &io_buffer_[0], io_buffer_.size(), cb),
+ READ_ERROR);
+}
+
+void CdmFileIOImpl::OnFileRead(int32_t bytes_read) {
+ CDM_DLOG() << __FUNCTION__ << ": " << bytes_read;
+ PP_DCHECK(IsMainThread());
+ PP_DCHECK(state_ == READING_FILE);
+
+ // 0 |bytes_read| indicates end-of-file reached.
+ if (bytes_read < PP_OK) {
+ CDM_DLOG() << "Read file failed.";
+ OnError(READ_ERROR);
+ return;
+ }
+
+ PP_DCHECK(static_cast<size_t>(bytes_read) <= io_buffer_.size());
+ // Append |bytes_read| in |io_buffer_| to |cumulative_read_buffer_|.
+ cumulative_read_buffer_.insert(cumulative_read_buffer_.end(),
+ io_buffer_.begin(),
+ io_buffer_.begin() + bytes_read);
+ io_offset_ += bytes_read;
+
+ // Not received end-of-file yet.
+ if (bytes_read > 0) {
+ ReadFile();
+ return;
+ }
+
+ // We hit end-of-file. Return read data to the client.
+ io_buffer_.clear();
+ io_offset_ = 0;
+ // Clear |cumulative_read_buffer_| in case OnReadComplete() calls Read() or
+ // Write().
+ std::vector<char> local_buffer;
+ std::swap(cumulative_read_buffer_, local_buffer);
+
+ state_ = FILE_OPENED;
+ const uint8_t* data = local_buffer.empty() ?
+ NULL : reinterpret_cast<const uint8_t*>(&local_buffer[0]);
+ client_->OnReadComplete(
+ cdm::FileIOClient::kSuccess, data, local_buffer.size());
+}
+
+void CdmFileIOImpl::SetLength(uint32_t length) {
+ PP_DCHECK(state_ == WRITING_FILE);
+
+ pp::CompletionCallback cb =
+ callback_factory_.NewCallback(&CdmFileIOImpl::OnLengthSet);
+ CHECK_PP_OK_COMPLETIONPENDING(file_io_.SetLength(length, cb), WRITE_ERROR);
+}
+
+void CdmFileIOImpl::OnLengthSet(int32_t result) {
+ CDM_DLOG() << __FUNCTION__ << ": " << result;
+ PP_DCHECK(IsMainThread());
+ PP_DCHECK(state_ == WRITING_FILE);
+
+ if (result != PP_OK) {
+ CDM_DLOG() << "File SetLength failed.";
+ OnError(WRITE_ERROR);
+ return;
+ }
+
+ if (io_buffer_.empty()) {
+ state_ = FILE_OPENED;
+ client_->OnWriteComplete(cdm::FileIOClient::kSuccess);
+ return;
+ }
+
+ WriteFile();
+}
+
+void CdmFileIOImpl::WriteFile() {
+ PP_DCHECK(state_ == WRITING_FILE);
+ PP_DCHECK(io_offset_ < io_buffer_.size());
+
+ pp::CompletionCallback cb =
+ callback_factory_.NewCallback(&CdmFileIOImpl::OnFileWritten);
+ CHECK_PP_OK_COMPLETIONPENDING(file_io_.Write(io_offset_,
+ &io_buffer_[io_offset_],
+ io_buffer_.size() - io_offset_,
+ cb),
+ WRITE_ERROR);
+}
+
+void CdmFileIOImpl::OnFileWritten(int32_t bytes_written) {
+ CDM_DLOG() << __FUNCTION__ << ": " << bytes_written;
+ PP_DCHECK(IsMainThread());
+ PP_DCHECK(state_ == WRITING_FILE);
+
+ if (bytes_written <= PP_OK) {
+ CDM_DLOG() << "Write file failed.";
+ OnError(READ_ERROR);
+ return;
+ }
+
+ io_offset_ += bytes_written;
+ PP_DCHECK(io_offset_ <= io_buffer_.size());
+
+ if (io_offset_ < io_buffer_.size()) {
+ WriteFile();
+ return;
+ }
+
+ io_buffer_.clear();
+ io_offset_ = 0;
+ state_ = FILE_OPENED;
+ client_->OnWriteComplete(cdm::FileIOClient::kSuccess);
+}
+
+void CdmFileIOImpl::CloseFile() {
+ PP_DCHECK(IsMainThread());
+
+ state_ = FILE_CLOSED;
+
+ file_io_.Close();
+ io_buffer_.clear();
+ io_offset_ = 0;
+ cumulative_read_buffer_.clear();
+}
+
+void CdmFileIOImpl::OnError(ErrorType error_type) {
+ // For *_WHILE_IN_USE errors, do not reset these values. Otherwise, the
+ // existing read/write operation will fail.
+ if (error_type == READ_ERROR || error_type == WRITE_ERROR) {
+ io_buffer_.clear();
+ io_offset_ = 0;
+ cumulative_read_buffer_.clear();
+ }
+ PostOnMain(callback_factory_.NewCallback(&CdmFileIOImpl::NotifyClientOfError,
+ error_type));
+}
+
+void CdmFileIOImpl::NotifyClientOfError(int32_t result,
+ ErrorType error_type) {
+ PP_DCHECK(result == PP_OK);
+ switch (error_type) {
+ case OPEN_ERROR:
+ client_->OnOpenComplete(cdm::FileIOClient::kError);
+ break;
+ case READ_ERROR:
+ client_->OnReadComplete(cdm::FileIOClient::kError, NULL, 0);
+ break;
+ case WRITE_ERROR:
+ client_->OnWriteComplete(cdm::FileIOClient::kError);
+ break;
+ case OPEN_WHILE_IN_USE:
+ client_->OnOpenComplete(cdm::FileIOClient::kInUse);
+ break;
+ case READ_WHILE_IN_USE:
+ client_->OnReadComplete(cdm::FileIOClient::kInUse, NULL, 0);
+ break;
+ case WRITE_WHILE_IN_USE:
+ client_->OnWriteComplete(cdm::FileIOClient::kInUse);
+ break;
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/cdm/ppapi/cdm_file_io_impl.h b/chromium/media/cdm/ppapi/cdm_file_io_impl.h
new file mode 100644
index 00000000000..fc8215a17a8
--- /dev/null
+++ b/chromium/media/cdm/ppapi/cdm_file_io_impl.h
@@ -0,0 +1,166 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CDM_PPAPI_CDM_FILE_IO_IMPL_H_
+#define MEDIA_CDM_PPAPI_CDM_FILE_IO_IMPL_H_
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "media/cdm/ppapi/api/content_decryption_module.h"
+#include "ppapi/c/ppb_file_io.h"
+#include "ppapi/cpp/file_io.h"
+#include "ppapi/cpp/file_ref.h"
+#include "ppapi/cpp/instance.h"
+#include "ppapi/cpp/module.h"
+#include "ppapi/cpp/private/isolated_file_system_private.h"
+#include "ppapi/utility/completion_callback_factory.h"
+
+namespace media {
+
+// Due to PPAPI limitations, all functions must be called on the main thread.
+class CdmFileIOImpl : public cdm::FileIO {
+ public:
+ // A class that helps release |file_lock_map_|.
+ // There should be only one instance of ResourceTracker in a process. Also,
+ // ResourceTracker should outlive all CdmFileIOImpl instances.
+ class ResourceTracker {
+ public:
+ ResourceTracker();
+ ~ResourceTracker();
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ResourceTracker);
+ };
+
+ CdmFileIOImpl(cdm::FileIOClient* client, PP_Instance pp_instance);
+
+ // cdm::FileIO implementation.
+ virtual void Open(const char* file_name, uint32_t file_name_size) OVERRIDE;
+ virtual void Read() OVERRIDE;
+ virtual void Write(const uint8_t* data, uint32_t data_size) OVERRIDE;
+ virtual void Close() OVERRIDE;
+
+ private:
+ enum State {
+ FILE_UNOPENED,
+ OPENING_FILE_SYSTEM,
+ OPENING_FILE,
+ FILE_OPENED,
+ READING_FILE,
+ WRITING_FILE,
+ FILE_CLOSED
+ };
+
+ enum ErrorType {
+ OPEN_WHILE_IN_USE,
+ READ_WHILE_IN_USE,
+ WRITE_WHILE_IN_USE,
+ OPEN_ERROR,
+ READ_ERROR,
+ WRITE_ERROR
+ };
+
+ // Always use Close() to release |this| object.
+ virtual ~CdmFileIOImpl();
+
+ // |file_id_| -> |is_file_lock_acquired_| map.
+ // Design detail:
+ // - We never erase an entry from this map.
+ // - Pros: When the same file is read or written repeatedly, we don't need to
+ // insert/erase the entry repeatedly, which is expensive.
+ // - Cons: If there are a lot of one-off files used, this map will be
+ // unnecessarily large. But this should be a rare case.
+ // - Ideally we could use unordered_map for this. But unordered_set is only
+ // available in C++11.
+ typedef std::map<std::string, bool> FileLockMap;
+
+ // File lock map shared by all CdmFileIOImpl objects to prevent read/write
+ // race. A CdmFileIOImpl object tries to acquire a lock before opening a
+ // file. If the file open failed, the lock is released. Otherwise, the
+ // CdmFileIOImpl object holds the lock until Close() is called.
+ // TODO(xhwang): Investigate the following cases and make sure we are good:
+ // - This assumes all CDM instances run in the same process for a given file
+ // system.
+ // - When multiple CDM instances are running in different profiles (e.g.
+ // normal/incognito window, multiple profiles), we may be overlocking.
+ static FileLockMap* file_lock_map_;
+
+ // Sets |file_id_|. Returns false if |file_id_| cannot be set (e.g. origin URL
+ // cannot be fetched).
+ bool SetFileID();
+
+ // Acquires the file lock. Returns true if the lock is successfully acquired.
+ // After the lock is acquired, other cdm::FileIO objects in the same process
+ // and in the same origin will get kInUse when trying to open the same file.
+ bool AcquireFileLock();
+
+ // Releases the file lock so that the file can be opened by other cdm::FileIO
+ // objects.
+ void ReleaseFileLock();
+
+ void OpenFileSystem();
+ void OnFileSystemOpened(int32_t result, pp::FileSystem file_system);
+ void OpenFile();
+ void OnFileOpened(int32_t result);
+ void ReadFile();
+ void OnFileRead(int32_t bytes_read);
+ void SetLength(uint32_t length);
+ void OnLengthSet(int32_t result);
+ void WriteFile();
+ void OnFileWritten(int32_t bytes_written);
+
+ void CloseFile();
+
+ // Calls client_->OnXxxxComplete with kError asynchronously. In some cases we
+ // could actually call them synchronously, but since these errors shouldn't
+ // happen in normal cases, we are not optimizing such cases.
+ void OnError(ErrorType error_type);
+
+ // Callback to notify client of error asynchronously.
+ void NotifyClientOfError(int32_t result, ErrorType error_type);
+
+ State state_;
+
+ // Non-owning pointer.
+ cdm::FileIOClient* const client_;
+
+ const pp::InstanceHandle pp_instance_handle_;
+
+ std::string file_name_;
+
+ // A string ID that uniquely identifies a file in the user's profile.
+ // It consists of the origin of the document URL (including scheme, host and
+ // port, delimited by colons) and the |file_name_|.
+ // For example: http:example.com:8080/foo_file.txt
+ std::string file_id_;
+
+ pp::IsolatedFileSystemPrivate isolated_file_system_;
+ pp::FileSystem file_system_;
+ pp::FileIO file_io_;
+ pp::FileRef file_ref_;
+
+ pp::CompletionCallbackFactory<CdmFileIOImpl> callback_factory_;
+
+ // A temporary buffer to hold (partial) data to write or the data that has
+ // been read. The size of |io_buffer_| is always "bytes to write" or "bytes to
+ // read". Use "char" instead of "unit8_t" because PPB_FileIO uses char* for
+ // binary data read and write.
+ std::vector<char> io_buffer_;
+
+ // Offset into the file for reading/writing data. When writing data to the
+ // file, this is also the offset to the |io_buffer_|.
+ size_t io_offset_;
+
+ // Buffer to hold all read data requested. This buffer is passed to |client_|
+ // when read completes.
+ std::vector<char> cumulative_read_buffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(CdmFileIOImpl);
+};
+
+} // namespace media
+
+#endif // MEDIA_CDM_PPAPI_CDM_FILE_IO_IMPL_H_
diff --git a/chromium/media/cdm/ppapi/cdm_file_io_test.cc b/chromium/media/cdm/ppapi/cdm_file_io_test.cc
new file mode 100644
index 00000000000..ee6aa0b58ba
--- /dev/null
+++ b/chromium/media/cdm/ppapi/cdm_file_io_test.cc
@@ -0,0 +1,454 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cdm/ppapi/cdm_file_io_test.h"
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/logging.h"
+
+namespace media {
+
+#define FILE_IO_DVLOG(level) DVLOG(level) << "File IO Test: "
+
+const uint8 kData[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
+const uint32 kDataSize = arraysize(kData);
+
+const uint8 kBigData[] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
+ 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff,
+ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
+ 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff,
+ 0x00 };
+const uint32 kBigDataSize = arraysize(kBigData);
+
+// Must be > kReadSize in cdm_file_io_impl.cc.
+const uint32 kLargeDataSize = 9 * 1024 + 7;
+
+// Macros to help add test cases/steps.
+#define START_TEST_CASE(test_name) \
+ do { \
+ FileIOTest test_case(create_file_io_cb_, "FileIOTest." test_name); \
+ CREATE_FILE_IO // Create FileIO for each test case.
+
+#define ADD_TEST_STEP(type, status, data, data_size) \
+ test_case.AddTestStep(FileIOTest::type, cdm::FileIOClient::status, \
+ (data), (data_size));
+
+#define END_TEST_CASE \
+ remaining_tests_.push_back(test_case); \
+ } while(0);
+
+#define CREATE_FILE_IO \
+ ADD_TEST_STEP(ACTION_CREATE, kSuccess, NULL, 0)
+
+#define OPEN_FILE \
+ ADD_TEST_STEP(ACTION_OPEN, kSuccess, NULL, 0)
+
+#define EXPECT_FILE_OPENED(status) \
+ ADD_TEST_STEP(RESULT_OPEN, status, NULL, 0)
+
+#define READ_FILE \
+ ADD_TEST_STEP(ACTION_READ, kSuccess, NULL, 0)
+
+#define EXPECT_FILE_READ(status, data, data_size) \
+ ADD_TEST_STEP(RESULT_READ, status, data, data_size)
+
+#define WRITE_FILE(data, data_size) \
+ ADD_TEST_STEP(ACTION_WRITE, kSuccess, data, data_size)
+
+#define EXPECT_FILE_WRITTEN(status) \
+ ADD_TEST_STEP(RESULT_WRITE, status, NULL, 0)
+
+#define CLOSE_FILE \
+ ADD_TEST_STEP(ACTION_CLOSE, kSuccess, NULL, 0)
+
+// FileIOTestRunner implementation.
+
+FileIOTestRunner::FileIOTestRunner(const CreateFileIOCB& create_file_io_cb)
+ : create_file_io_cb_(create_file_io_cb),
+ total_num_tests_(0),
+ num_passed_tests_(0) {
+ // Generate |large_data_|.
+ large_data_.resize(kLargeDataSize);
+ for (size_t i = 0; i < kLargeDataSize; ++i)
+ large_data_[i] = i % kuint8max;
+
+ AddTests();
+}
+
+FileIOTestRunner::~FileIOTestRunner() {
+ if (remaining_tests_.empty())
+ return;
+
+ DCHECK_LT(num_passed_tests_, total_num_tests_);
+ FILE_IO_DVLOG(1) << "Not Finished (probably due to timeout). "
+ << num_passed_tests_ << " passed in "
+ << total_num_tests_ << " tests.";
+}
+
+// Note: Consecutive expectations (EXPECT*) can happen in any order.
+void FileIOTestRunner::AddTests() {
+ START_TEST_CASE("ReadBeforeOpeningFile")
+ READ_FILE
+ EXPECT_FILE_READ(kError, NULL, 0)
+ END_TEST_CASE
+
+ START_TEST_CASE("WriteBeforeOpeningFile")
+ WRITE_FILE(kData, kDataSize)
+ EXPECT_FILE_WRITTEN(kError)
+ END_TEST_CASE
+
+ START_TEST_CASE("ReadBeforeFileOpened")
+ OPEN_FILE
+ READ_FILE
+ EXPECT_FILE_OPENED(kSuccess)
+ EXPECT_FILE_READ(kError, NULL, 0)
+ END_TEST_CASE
+
+ START_TEST_CASE("WriteBeforeFileOpened")
+ OPEN_FILE
+ WRITE_FILE(kData, kDataSize)
+ EXPECT_FILE_WRITTEN(kError)
+ EXPECT_FILE_OPENED(kSuccess)
+ END_TEST_CASE
+
+ START_TEST_CASE("ReadDuringPendingRead")
+ OPEN_FILE
+ EXPECT_FILE_OPENED(kSuccess)
+ WRITE_FILE(kData, kDataSize)
+ EXPECT_FILE_WRITTEN(kSuccess)
+ READ_FILE
+ READ_FILE
+ EXPECT_FILE_READ(kInUse, NULL, 0)
+ EXPECT_FILE_READ(kSuccess, kData, kDataSize)
+ END_TEST_CASE
+
+ START_TEST_CASE("ReadDuringPendingWrite")
+ OPEN_FILE
+ EXPECT_FILE_OPENED(kSuccess)
+ WRITE_FILE(kData, kDataSize)
+ READ_FILE
+ EXPECT_FILE_READ(kInUse, NULL, 0)
+ EXPECT_FILE_WRITTEN(kSuccess)
+ END_TEST_CASE
+
+ START_TEST_CASE("WriteDuringPendingRead")
+ OPEN_FILE
+ EXPECT_FILE_OPENED(kSuccess)
+ READ_FILE
+ WRITE_FILE(kData, kDataSize)
+ EXPECT_FILE_WRITTEN(kInUse)
+ EXPECT_FILE_READ(kSuccess, NULL, 0)
+ END_TEST_CASE
+
+ START_TEST_CASE("WriteDuringPendingWrite")
+ OPEN_FILE
+ EXPECT_FILE_OPENED(kSuccess)
+ WRITE_FILE(kData, kDataSize)
+ WRITE_FILE(kBigData, kBigDataSize)
+ EXPECT_FILE_WRITTEN(kInUse)
+ EXPECT_FILE_WRITTEN(kSuccess)
+ END_TEST_CASE
+
+ START_TEST_CASE("ReadEmptyFile")
+ OPEN_FILE
+ EXPECT_FILE_OPENED(kSuccess)
+ READ_FILE
+ EXPECT_FILE_READ(kSuccess, NULL, 0)
+ END_TEST_CASE
+
+ START_TEST_CASE("WriteAndRead")
+ OPEN_FILE
+ EXPECT_FILE_OPENED(kSuccess)
+ WRITE_FILE(kData, kDataSize)
+ EXPECT_FILE_WRITTEN(kSuccess)
+ READ_FILE
+ EXPECT_FILE_READ(kSuccess, kData, kDataSize)
+ END_TEST_CASE
+
+ START_TEST_CASE("WriteZeroBytes")
+ OPEN_FILE
+ EXPECT_FILE_OPENED(kSuccess)
+ WRITE_FILE(NULL, 0)
+ EXPECT_FILE_WRITTEN(kSuccess)
+ READ_FILE
+ EXPECT_FILE_READ(kSuccess, NULL, 0)
+ END_TEST_CASE
+
+ START_TEST_CASE("WriteAndReadLargeData")
+ OPEN_FILE
+ EXPECT_FILE_OPENED(kSuccess)
+ WRITE_FILE(&large_data_[0], kLargeDataSize)
+ EXPECT_FILE_WRITTEN(kSuccess)
+ READ_FILE
+ EXPECT_FILE_READ(kSuccess, &large_data_[0], kLargeDataSize)
+ END_TEST_CASE
+
+ START_TEST_CASE("OverwriteZeroBytes")
+ OPEN_FILE
+ EXPECT_FILE_OPENED(kSuccess)
+ WRITE_FILE(kData, kDataSize)
+ EXPECT_FILE_WRITTEN(kSuccess)
+ READ_FILE
+ EXPECT_FILE_READ(kSuccess, kData, kDataSize)
+ WRITE_FILE(NULL, 0)
+ EXPECT_FILE_WRITTEN(kSuccess)
+ READ_FILE
+ EXPECT_FILE_READ(kSuccess, NULL, 0)
+ END_TEST_CASE
+
+ START_TEST_CASE("OverwriteWithSmallerData")
+ OPEN_FILE
+ EXPECT_FILE_OPENED(kSuccess)
+ WRITE_FILE(kBigData, kBigDataSize)
+ EXPECT_FILE_WRITTEN(kSuccess)
+ WRITE_FILE(kData, kDataSize)
+ EXPECT_FILE_WRITTEN(kSuccess)
+ READ_FILE
+ EXPECT_FILE_READ(kSuccess, kData, kDataSize)
+ END_TEST_CASE
+
+ START_TEST_CASE("OverwriteWithLargerData")
+ OPEN_FILE
+ EXPECT_FILE_OPENED(kSuccess)
+ WRITE_FILE(kData, kDataSize)
+ EXPECT_FILE_WRITTEN(kSuccess)
+ WRITE_FILE(kBigData, kBigDataSize)
+ EXPECT_FILE_WRITTEN(kSuccess)
+ READ_FILE
+ EXPECT_FILE_READ(kSuccess, kBigData, kBigDataSize)
+ END_TEST_CASE
+
+ START_TEST_CASE("ReadExistingFile")
+ OPEN_FILE
+ EXPECT_FILE_OPENED(kSuccess)
+ WRITE_FILE(kData, kDataSize)
+ EXPECT_FILE_WRITTEN(kSuccess)
+ CLOSE_FILE
+ CREATE_FILE_IO
+ OPEN_FILE
+ EXPECT_FILE_OPENED(kSuccess)
+ READ_FILE
+ EXPECT_FILE_READ(kSuccess, kData, kDataSize)
+ END_TEST_CASE
+
+ START_TEST_CASE("ReopenFileInTheSameFileIO")
+ OPEN_FILE
+ OPEN_FILE
+ EXPECT_FILE_OPENED(kError) // The second Open() failed.
+ EXPECT_FILE_OPENED(kSuccess) // The first Open() succeeded.
+ END_TEST_CASE
+
+ START_TEST_CASE("ReopenFileInSeparateFileIO")
+ OPEN_FILE
+ EXPECT_FILE_OPENED(kSuccess)
+ WRITE_FILE(kData, kDataSize)
+ EXPECT_FILE_WRITTEN(kSuccess)
+ CREATE_FILE_IO // Create a second FileIO without closing the first one.
+ OPEN_FILE
+ EXPECT_FILE_OPENED(kInUse)
+ END_TEST_CASE
+
+ START_TEST_CASE("CloseAfterCreation")
+ CLOSE_FILE
+ END_TEST_CASE
+
+ START_TEST_CASE("CloseDuringPendingOpen")
+ OPEN_FILE
+ CLOSE_FILE
+ END_TEST_CASE
+
+ START_TEST_CASE("CloseDuringPendingWrite")
+ OPEN_FILE
+ EXPECT_FILE_OPENED(kSuccess)
+ WRITE_FILE(kData, kDataSize)
+ CLOSE_FILE
+ END_TEST_CASE
+
+ START_TEST_CASE("CloseDuringPendingRead")
+ OPEN_FILE
+ EXPECT_FILE_OPENED(kSuccess)
+ WRITE_FILE(kData, kDataSize)
+ EXPECT_FILE_WRITTEN(kSuccess)
+ READ_FILE
+ CLOSE_FILE
+ END_TEST_CASE
+}
+
+void FileIOTestRunner::RunAllTests(const CompletionCB& completion_cb) {
+ completion_cb_ = completion_cb;
+ total_num_tests_ = remaining_tests_.size();
+ RunNextTest();
+}
+
+void FileIOTestRunner::RunNextTest() {
+ if (remaining_tests_.empty()) {
+ FILE_IO_DVLOG(1) << num_passed_tests_ << " passed and "
+ << (total_num_tests_ - num_passed_tests_) << " failed in "
+ << total_num_tests_ << " tests.";
+ bool success = (num_passed_tests_ == total_num_tests_);
+ base::ResetAndReturn(&completion_cb_).Run(success);
+ return;
+ }
+
+ remaining_tests_.front().Run(
+ base::Bind(&FileIOTestRunner::OnTestComplete, base::Unretained(this)));
+}
+
+void FileIOTestRunner::OnTestComplete(bool success) {
+ if (success)
+ num_passed_tests_++;
+ remaining_tests_.pop_front();
+ RunNextTest();
+}
+
+// FileIOTest implementation.
+
+FileIOTest::FileIOTest(const CreateFileIOCB& create_file_io_cb,
+ const std::string& test_name)
+ : create_file_io_cb_(create_file_io_cb),
+ test_name_(test_name) {}
+
+FileIOTest::~FileIOTest() {}
+
+void FileIOTest::AddTestStep(
+ StepType type, Status status, const uint8* data, uint32 data_size) {
+ test_steps_.push_back(TestStep(type, status, data, data_size));
+}
+
+void FileIOTest::Run(const CompletionCB& completion_cb) {
+ FILE_IO_DVLOG(3) << "Run " << test_name_;
+ completion_cb_ = completion_cb;
+ DCHECK(!test_steps_.empty() && !IsResult(test_steps_.front()));
+ RunNextStep();
+}
+
+void FileIOTest::OnOpenComplete(Status status) {
+ OnResult(TestStep(RESULT_OPEN, status, NULL, 0));
+}
+
+void FileIOTest::OnReadComplete(Status status,
+ const uint8_t* data,
+ uint32_t data_size) {
+ OnResult(TestStep(RESULT_READ, status, data, data_size));
+}
+
+void FileIOTest::OnWriteComplete(Status status) {
+ OnResult(TestStep(RESULT_WRITE, status, NULL, 0));
+}
+
+bool FileIOTest::IsResult(const TestStep& test_step) {
+ switch (test_step.type) {
+ case RESULT_OPEN:
+ case RESULT_READ:
+ case RESULT_WRITE:
+ return true;
+ case ACTION_CREATE:
+ case ACTION_OPEN:
+ case ACTION_READ:
+ case ACTION_WRITE:
+ case ACTION_CLOSE:
+ return false;
+ }
+ NOTREACHED();
+ return false;
+}
+
+bool FileIOTest::MatchesResult(const TestStep& a, const TestStep& b) {
+ DCHECK(IsResult(a) && IsResult(b));
+ if (a.type != b.type || a.status != b.status)
+ return false;
+
+ if (a.type != RESULT_READ || a.status != cdm::FileIOClient::kSuccess)
+ return true;
+
+ return (a.data_size == a.data_size &&
+ std::equal(a.data, a.data + a.data_size, b.data));
+}
+
+void FileIOTest::RunNextStep() {
+ // Run all actions in the current action group.
+ while (!test_steps_.empty()) {
+ // Start to wait for test results when the next step is a test result.
+ if (IsResult(test_steps_.front()))
+ return;
+
+ TestStep test_step = test_steps_.front();
+ test_steps_.pop_front();
+
+ cdm::FileIO* file_io = file_io_stack_.empty()? NULL : file_io_stack_.top();
+
+ switch (test_step.type) {
+ case ACTION_CREATE:
+ file_io = create_file_io_cb_.Run(this);
+ if (!file_io) {
+ FILE_IO_DVLOG(3) << "Cannot create FileIO object.";
+ OnTestComplete(false);
+ return;
+ }
+ file_io_stack_.push(file_io);
+ break;
+ case ACTION_OPEN:
+ // Use test name as the test file name.
+ file_io->Open(test_name_.data(), test_name_.size());
+ break;
+ case ACTION_READ:
+ file_io->Read();
+ break;
+ case ACTION_WRITE:
+ file_io->Write(test_step.data, test_step.data_size);
+ break;
+ case ACTION_CLOSE:
+ file_io->Close();
+ file_io_stack_.pop();
+ break;
+ default:
+ NOTREACHED();
+ }
+ }
+
+ OnTestComplete(true);
+}
+
+void FileIOTest::OnResult(const TestStep& result) {
+ DCHECK(IsResult(result));
+ if (!CheckResult(result)) {
+ OnTestComplete(false);
+ return;
+ }
+
+ RunNextStep();
+}
+
+bool FileIOTest::CheckResult(const TestStep& result) {
+ if (test_steps_.empty() || !IsResult(test_steps_.front()))
+ return false;
+
+ // If there are multiple results expected, the order does not matter.
+ std::list<TestStep>::iterator iter = test_steps_.begin();
+ for (; iter != test_steps_.end(); ++iter) {
+ if (!IsResult(*iter))
+ return false;
+
+ if (!MatchesResult(*iter, result))
+ continue;
+
+ test_steps_.erase(iter);
+ return true;
+ }
+
+ return false;
+}
+
+void FileIOTest::OnTestComplete(bool success) {
+ while (!file_io_stack_.empty()) {
+ file_io_stack_.top()->Close();
+ file_io_stack_.pop();
+ }
+ FILE_IO_DVLOG(3) << test_name_ << (success ? " PASSED" : " FAILED");
+ base::ResetAndReturn(&completion_cb_).Run(success);
+}
+
+} // namespace media
diff --git a/chromium/media/cdm/ppapi/cdm_file_io_test.h b/chromium/media/cdm/ppapi/cdm_file_io_test.h
new file mode 100644
index 00000000000..3e0060cc3bf
--- /dev/null
+++ b/chromium/media/cdm/ppapi/cdm_file_io_test.h
@@ -0,0 +1,157 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CDM_PPAPI_CDM_FILE_IO_TEST_H_
+#define MEDIA_CDM_PPAPI_CDM_FILE_IO_TEST_H_
+
+#include <list>
+#include <stack>
+#include <string>
+#include <vector>
+
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "media/cdm/ppapi/api/content_decryption_module.h"
+
+namespace media {
+
+typedef base::Callback<void(bool success)> CompletionCB;
+typedef base::Callback<cdm::FileIO*(cdm::FileIOClient* client)> CreateFileIOCB;
+
+// A customizable test class that tests cdm::FileIO implementation.
+// - To create a test, call AddTestStep() to add a test step. A test step can be
+// either an action to make (use ACTION_* types), or a result to verify (use
+// RESULT_* types).
+// - To run the test, simply call Run() with a completion callback. The result
+// will be reported in the completion callback when the test is finished.
+//
+// The following rules apply to the test steps:
+// - Test steps are ordered (with the exception that results in a result group
+// is not ordered).
+// - Consecutive action steps form an "action group". Consecutively result
+// steps form a "result group". An action group is followed by a result
+// group and vice versa.
+// - A test must start with an action group.
+// - To process an action group, the test runner runs (and clears) all steps
+// in the group in the order they were added. Then it waits for test
+// results.
+// - When a cdm::FileIOClient method is called, the test runner compares the
+// result with all results in the current result group. If no result in that
+// group matches the test result, the test fails. Otherwise, the matching
+// result is cleared from the group. If the group is empty, the test runner
+// starts to process the next action group. Otherwise, the test runner keeps
+// waiting for the next test result.
+// - After all steps are cleared, the test passes.
+class FileIOTest : public cdm::FileIOClient {
+ public:
+ // Types of allowed test steps:
+ // - ACTION_* specifies the next step to test.
+ // - RESULT_* specifies the expected result of the previous step(s).
+ enum StepType {
+ ACTION_CREATE,
+ ACTION_OPEN, // |test_name_| will be used used as the file name to open.
+ RESULT_OPEN,
+ ACTION_READ,
+ RESULT_READ,
+ ACTION_WRITE,
+ RESULT_WRITE,
+ ACTION_CLOSE // If ACTION_CLOSE is not specified, FileIO::Close() will be
+ // automatically called at the end of the test.
+ };
+
+ FileIOTest(const CreateFileIOCB& create_file_io_cb,
+ const std::string& test_name);
+ ~FileIOTest();
+
+ // Adds a test step in this test. |this| object doesn't take the ownership of
+ // |data|, which should be valid throughout the lifetime of |this| object.
+ void AddTestStep(
+ StepType type, Status status, const uint8* data, uint32 data_size);
+
+ // Runs this test case and returns the test result through |completion_cb|.
+ void Run(const CompletionCB& completion_cb);
+
+ private:
+ struct TestStep {
+ // |this| object doesn't take the ownership of |data|, which should be valid
+ // throughout the lifetime of |this| object.
+ TestStep(StepType type, Status status, const uint8* data, uint32 data_size)
+ : type(type), status(status), data(data), data_size(data_size) {}
+
+ StepType type;
+
+ // Expected status for RESULT* steps.
+ Status status;
+
+ // Data to write in ACTION_WRITE, or read data in RESULT_READ.
+ const uint8* data;
+ uint32 data_size;
+ };
+
+ // Returns whether |test_step| is a RESULT_* step.
+ static bool IsResult(const TestStep& test_step);
+
+ // Returns whether two results match.
+ static bool MatchesResult(const TestStep& a, const TestStep& b);
+
+ // cdm::FileIOClient implementation.
+ virtual void OnOpenComplete(Status status) OVERRIDE;
+ virtual void OnReadComplete(Status status,
+ const uint8_t* data,
+ uint32_t data_size) OVERRIDE;
+ virtual void OnWriteComplete(Status status) OVERRIDE;
+
+ // Runs the next step in this test case.
+ void RunNextStep();
+
+ void OnResult(const TestStep& result);
+
+ // Checks whether the test result matches this step. This can only be called
+ // when this step is a RESULT_* step.
+ bool CheckResult(const TestStep& result);
+
+ void OnTestComplete(bool success);
+
+ CreateFileIOCB create_file_io_cb_;
+ CompletionCB completion_cb_;
+
+ std::string test_name_;
+ std::list<TestStep> test_steps_;
+
+ // All opened cdm::FileIO objects. We keep multiple cdm::FileIO objects open
+ // so that we can test multiple cdm::FileIO objects accessing the same file.
+ // In the current implementation, all ACTION_* are performed on the latest
+ // opened cdm::FileIO object, hence the stack.
+ std::stack<cdm::FileIO*> file_io_stack_;
+};
+
+// Tests cdm::FileIO implementation.
+class FileIOTestRunner {
+ public:
+ explicit FileIOTestRunner(const CreateFileIOCB& create_file_io_cb);
+ ~FileIOTestRunner();
+
+ void AddTests();
+
+ // Run all tests. When tests are completed, the result will be reported in the
+ // |completion_cb|.
+ void RunAllTests(const CompletionCB& completion_cb);
+
+ private:
+ void OnTestComplete(bool success);
+ void RunNextTest();
+
+ CreateFileIOCB create_file_io_cb_;
+ CompletionCB completion_cb_;
+ std::list<FileIOTest> remaining_tests_;
+ std::vector<uint8> large_data_;
+ size_t total_num_tests_; // Total number of tests.
+ size_t num_passed_tests_; // Number of passed tests.
+
+ DISALLOW_COPY_AND_ASSIGN (FileIOTestRunner);
+};
+
+} // namespace media
+
+#endif // MEDIA_CDM_PPAPI_CDM_FILE_IO_TEST_H_
diff --git a/chromium/media/cdm/ppapi/cdm_helpers.cc b/chromium/media/cdm/ppapi/cdm_helpers.cc
index 36b95021f8c..62f93a009dd 100644
--- a/chromium/media/cdm/ppapi/cdm_helpers.cc
+++ b/chromium/media/cdm/ppapi/cdm_helpers.cc
@@ -4,6 +4,7 @@
#include "media/cdm/ppapi/cdm_helpers.h"
+#include <algorithm>
#include <utility>
#include "base/basictypes.h"
@@ -20,6 +21,61 @@
namespace media {
+// static
+PpbBuffer* PpbBuffer::Create(const pp::Buffer_Dev& buffer,
+ uint32_t buffer_id,
+ PpbBufferAllocator* allocator) {
+ PP_DCHECK(buffer.data());
+ PP_DCHECK(buffer.size());
+ PP_DCHECK(buffer_id);
+ PP_DCHECK(allocator);
+ return new PpbBuffer(buffer, buffer_id, allocator);
+}
+
+void PpbBuffer::Destroy() {
+ delete this;
+}
+
+uint32_t PpbBuffer::Capacity() const {
+ return buffer_.size();
+}
+
+uint8_t* PpbBuffer::Data() {
+ return static_cast<uint8_t*>(buffer_.data());
+}
+
+void PpbBuffer::SetSize(uint32_t size) {
+ PP_DCHECK(size <= Capacity());
+ if (size > Capacity()) {
+ size_ = 0;
+ return;
+ }
+
+ size_ = size;
+}
+
+pp::Buffer_Dev PpbBuffer::TakeBuffer() {
+ PP_DCHECK(!buffer_.is_null());
+ pp::Buffer_Dev buffer;
+ std::swap(buffer, buffer_);
+ buffer_id_ = 0;
+ size_ = 0;
+ return buffer;
+}
+
+PpbBuffer::PpbBuffer(pp::Buffer_Dev buffer,
+ uint32_t buffer_id,
+ PpbBufferAllocator* allocator)
+ : buffer_(buffer), buffer_id_(buffer_id), size_(0), allocator_(allocator) {
+}
+
+PpbBuffer::~PpbBuffer() {
+ PP_DCHECK(!buffer_id_ == buffer_.is_null());
+ // If still owning the |buffer_|, release it in the |allocator_|.
+ if (buffer_id_)
+ allocator_->Release(buffer_id_);
+}
+
cdm::Buffer* PpbBufferAllocator::Allocate(uint32_t capacity) {
PP_DCHECK(pp::Module::Get()->core()->IsMainThread());
@@ -46,7 +102,7 @@ cdm::Buffer* PpbBufferAllocator::Allocate(uint32_t capacity) {
allocated_buffers_.insert(std::make_pair(buffer_id, buffer));
- return PpbBuffer::Create(buffer, buffer_id);
+ return PpbBuffer::Create(buffer, buffer_id, this);
}
void PpbBufferAllocator::Release(uint32_t buffer_id) {
diff --git a/chromium/media/cdm/ppapi/cdm_helpers.h b/chromium/media/cdm/ppapi/cdm_helpers.h
index cb9203e105a..1ee579b8f0f 100644
--- a/chromium/media/cdm/ppapi/cdm_helpers.h
+++ b/chromium/media/cdm/ppapi/cdm_helpers.h
@@ -20,6 +20,8 @@
namespace media {
+class PpbBufferAllocator;
+
// cdm::Buffer implementation that provides access to memory owned by a
// pp::Buffer_Dev.
// This class holds a reference to the Buffer_Dev throughout its lifetime.
@@ -27,48 +29,37 @@ namespace media {
// pp::Buffer_Dev and PPB_Buffer_Dev.
class PpbBuffer : public cdm::Buffer {
public:
- static PpbBuffer* Create(const pp::Buffer_Dev& buffer, uint32_t buffer_id) {
- PP_DCHECK(buffer.data());
- PP_DCHECK(buffer.size());
- PP_DCHECK(buffer_id);
- return new PpbBuffer(buffer, buffer_id);
- }
+ static PpbBuffer* Create(const pp::Buffer_Dev& buffer, uint32_t buffer_id,
+ PpbBufferAllocator* allocator);
// cdm::Buffer implementation.
- virtual void Destroy() OVERRIDE { delete this; }
-
- virtual uint32_t Capacity() const OVERRIDE { return buffer_.size(); }
-
- virtual uint8_t* Data() OVERRIDE {
- return static_cast<uint8_t*>(buffer_.data());
- }
-
- virtual void SetSize(uint32_t size) OVERRIDE {
- PP_DCHECK(size <= Capacity());
- if (size > Capacity()) {
- size_ = 0;
- return;
- }
-
- size_ = size;
- }
-
+ virtual void Destroy() OVERRIDE;
+ virtual uint32_t Capacity() const OVERRIDE;
+ virtual uint8_t* Data() OVERRIDE;
+ virtual void SetSize(uint32_t size) OVERRIDE;
virtual uint32_t Size() const OVERRIDE { return size_; }
- pp::Buffer_Dev buffer_dev() const { return buffer_; }
+ // Takes the |buffer_| from this class and returns it.
+ // Note: The caller must ensure |allocator->Release()| is called later so that
+ // the buffer can be reused by the allocator.
+ // Since pp::Buffer_Dev is ref-counted, the caller now holds one reference to
+ // the buffer and this class holds no reference. Note that other references
+ // may still exist. For example, PpbBufferAllocator always holds a reference
+ // to all allocated buffers.
+ pp::Buffer_Dev TakeBuffer();
uint32_t buffer_id() const { return buffer_id_; }
private:
- PpbBuffer(pp::Buffer_Dev buffer, uint32_t buffer_id)
- : buffer_(buffer),
- buffer_id_(buffer_id),
- size_(0) {}
- virtual ~PpbBuffer() {}
+ PpbBuffer(pp::Buffer_Dev buffer,
+ uint32_t buffer_id,
+ PpbBufferAllocator* allocator);
+ virtual ~PpbBuffer();
pp::Buffer_Dev buffer_;
uint32_t buffer_id_;
uint32_t size_;
+ PpbBufferAllocator* allocator_;
DISALLOW_COPY_AND_ASSIGN(PpbBuffer);
};
@@ -189,8 +180,7 @@ class VideoFrameImpl : public cdm::VideoFrame {
DISALLOW_COPY_AND_ASSIGN(VideoFrameImpl);
};
-class AudioFramesImpl : public cdm::AudioFrames_1,
- public cdm::AudioFrames_2 {
+class AudioFramesImpl : public cdm::AudioFrames_2 {
public:
AudioFramesImpl() : buffer_(NULL), format_(cdm::kUnknownAudioFormat) {}
virtual ~AudioFramesImpl() {
diff --git a/chromium/media/cdm/ppapi/cdm_wrapper.h b/chromium/media/cdm/ppapi/cdm_wrapper.h
index d827336a8c6..665b6b68dad 100644
--- a/chromium/media/cdm/ppapi/cdm_wrapper.h
+++ b/chromium/media/cdm/ppapi/cdm_wrapper.h
@@ -35,16 +35,6 @@ namespace media {
// (just a shim layer in most cases), everything is done in this header file.
class CdmWrapper {
public:
- // CDM_1 and CDM_2 methods AddKey() and CancelKeyRequest() may require
- // callbacks to fire. Use this enum to indicate the additional calls required.
- // TODO(jrummell): Remove return value once CDM_1 and CDM_2 are no longer
- // supported.
- enum Result {
- NO_ACTION,
- CALL_KEY_ADDED,
- CALL_KEY_ERROR
- };
-
static CdmWrapper* Create(const char* key_system,
uint32_t key_system_size,
GetCdmHostFunc get_cdm_host_func,
@@ -52,15 +42,23 @@ class CdmWrapper {
virtual ~CdmWrapper() {};
- virtual void CreateSession(uint32_t session_id,
- const char* type,
- uint32_t type_size,
+ virtual void CreateSession(uint32_t promise_id,
+ const char* init_data_type,
+ uint32_t init_data_type_size,
const uint8_t* init_data,
- uint32_t init_data_size) = 0;
- virtual Result UpdateSession(uint32_t session_id,
- const uint8_t* response,
- uint32_t response_size) = 0;
- virtual Result ReleaseSession(uint32_t session_id) = 0;
+ uint32_t init_data_size,
+ cdm::SessionType session_type) = 0;
+ virtual void LoadSession(uint32_t promise_id,
+ const char* web_session_id,
+ uint32_t web_session_id_size) = 0;
+ virtual void UpdateSession(uint32_t promise_id,
+ const char* web_session_id,
+ uint32_t web_session_id_size,
+ const uint8_t* response,
+ uint32_t response_size) = 0;
+ virtual void ReleaseSession(uint32_t promise_id,
+ const char* web_session_id,
+ uint32_t web_session_id_size) = 0;
virtual void TimerExpired(void* context) = 0;
virtual cdm::Status Decrypt(const cdm::InputBuffer& encrypted_buffer,
cdm::DecryptedBlock* decrypted_buffer) = 0;
@@ -82,42 +80,23 @@ class CdmWrapper {
uint32_t link_mask,
uint32_t output_protection_mask) = 0;
- // ContentDecryptionModule_1 and ContentDecryptionModule_2 interface methods
- // AddKey() and CancelKeyRequest() (older versions of UpdateSession() and
- // ReleaseSession(), respectively) pass in the web_session_id rather than the
- // session_id. As well, Host_1 and Host_2 callbacks SendKeyMessage() and
- // SendKeyError() include the web_session_id, but the actual callbacks need
- // session_id.
- //
- // The following functions maintain the session_id <-> web_session_id mapping.
- // These can be removed once _1 and _2 interfaces are no longer supported.
-
- // Determine the corresponding session_id for |web_session_id|.
- virtual uint32_t LookupSessionId(const std::string& web_session_id) = 0;
-
- // Determine the corresponding session_id for |session_id|.
- virtual const std::string LookupWebSessionId(uint32_t session_id) = 0;
-
- // Map between session_id and web_session_id.
- // TODO(jrummell): The following can be removed once CDM_1 and CDM_2 are
- // no longer supported.
- typedef std::map<uint32_t, std::string> SessionMap;
- SessionMap session_map_;
-
- static const uint32_t kInvalidSessionId = 0;
-
- // As the response from PrefixedGenerateKeyRequest() may be synchronous or
- // asynchronous, keep track of the current request during the call to handle
- // synchronous responses or errors. If no response received, add this request
- // to a queue and assume that the subsequent responses come back in the order
- // issued.
- // TODO(jrummell): Remove once all supported CDM host interfaces support
- // session_id.
- uint32_t current_key_request_session_id_;
- std::queue<uint32_t> pending_key_request_session_ids_;
+ // Helper function for the cdm::Host_4 methods. Calls to CreateSession(),
+ // LoadSession(), UpdateSession(), and ReleaseSession() pass in promise ids,
+ // but the CDM interface needs session ids. For create and load, we need to
+ // create a new session_id to pass to the CDM. For update and release, we need
+ // to look up |web_session_id| and convert it into the existing |session_id|.
+ // Since the callbacks don't come through this interface, cdm_adapter needs to
+ // create the mapping (and delete it on release).
+ // TODO(jrummell): Remove these once Host_4 interface is removed.
+ virtual uint32_t LookupPromiseId(uint32_t session_id) = 0;
+ virtual void AssignWebSessionId(uint32_t session_id,
+ const char* web_session_id,
+ uint32_t web_session_id_size) = 0;
+ virtual std::string LookupWebSessionId(uint32_t session_id) = 0;
+ virtual void DropWebSessionId(std::string web_session_id) = 0;
protected:
- CdmWrapper() : current_key_request_session_id_(kInvalidSessionId) {}
+ CdmWrapper() {}
private:
DISALLOW_COPY_AND_ASSIGN(CdmWrapper);
@@ -147,24 +126,42 @@ class CdmWrapperImpl : public CdmWrapper {
cdm_->Destroy();
}
- virtual void CreateSession(uint32_t session_id,
- const char* type,
- uint32_t type_size,
+ virtual void CreateSession(uint32_t promise_id,
+ const char* init_data_type,
+ uint32_t init_data_type_size,
const uint8_t* init_data,
- uint32_t init_data_size) OVERRIDE {
- cdm_->CreateSession(session_id, type, type_size, init_data, init_data_size);
+ uint32_t init_data_size,
+ cdm::SessionType session_type) OVERRIDE {
+ cdm_->CreateSession(promise_id,
+ init_data_type,
+ init_data_type_size,
+ init_data,
+ init_data_size,
+ session_type);
+ }
+
+ virtual void LoadSession(uint32_t promise_id,
+ const char* web_session_id,
+ uint32_t web_session_id_size) OVERRIDE {
+ cdm_->LoadSession(promise_id, web_session_id, web_session_id_size);
}
- virtual Result UpdateSession(uint32_t session_id,
- const uint8_t* response,
- uint32_t response_size) OVERRIDE {
- cdm_->UpdateSession(session_id, response, response_size);
- return NO_ACTION;
+ virtual void UpdateSession(uint32_t promise_id,
+ const char* web_session_id,
+ uint32_t web_session_id_size,
+ const uint8_t* response,
+ uint32_t response_size) OVERRIDE {
+ cdm_->UpdateSession(promise_id,
+ web_session_id,
+ web_session_id_size,
+ response,
+ response_size);
}
- virtual Result ReleaseSession(uint32_t session_id) OVERRIDE {
- cdm_->ReleaseSession(session_id);
- return NO_ACTION;
+ virtual void ReleaseSession(uint32_t promise_id,
+ const char* web_session_id,
+ uint32_t web_session_id_size) OVERRIDE {
+ cdm_->ReleaseSession(promise_id, web_session_id, web_session_id_size);
}
virtual void TimerExpired(void* context) OVERRIDE {
@@ -217,223 +214,125 @@ class CdmWrapperImpl : public CdmWrapper {
cdm_->OnQueryOutputProtectionStatus(link_mask, output_protection_mask);
}
- uint32_t LookupSessionId(const std::string& web_session_id) {
- for (SessionMap::iterator it = session_map_.begin();
- it != session_map_.end();
- ++it) {
- if (it->second == web_session_id)
- return it->first;
- }
-
- // There is no entry in the map; assume it came from the current
- // PrefixedGenerateKeyRequest() call (if possible). If no current request,
- // assume it came from the oldest PrefixedGenerateKeyRequest() call.
- uint32_t session_id = current_key_request_session_id_;
- if (current_key_request_session_id_) {
- // Only 1 response is allowed for the current
- // PrefixedGenerateKeyRequest().
- current_key_request_session_id_ = kInvalidSessionId;
- } else {
- PP_DCHECK(!pending_key_request_session_ids_.empty());
- session_id = pending_key_request_session_ids_.front();
- pending_key_request_session_ids_.pop();
- }
-
- // If this is a valid |session_id|, add it to the list. Otherwise, avoid
- // adding empty string as a mapping to prevent future calls with an empty
- // string from using the wrong session_id.
- if (!web_session_id.empty()) {
- PP_DCHECK(session_map_.find(session_id) == session_map_.end());
- session_map_[session_id] = web_session_id;
- }
-
- return session_id;
+ uint32_t CreateSessionId() {
+ return next_session_id_++;
}
- const std::string LookupWebSessionId(uint32_t session_id) {
- // Session may not exist if error happens during CreateSession().
- SessionMap::iterator it = session_map_.find(session_id);
- return (it != session_map_.end()) ? it->second : std::string();
+ void RegisterPromise(uint32_t session_id, uint32_t promise_id) {
+ PP_DCHECK(promise_to_session_id_map_.find(session_id) ==
+ promise_to_session_id_map_.end());
+ promise_to_session_id_map_.insert(std::make_pair(session_id, promise_id));
}
- private:
- CdmWrapperImpl(CdmInterface* cdm) : cdm_(cdm) {
- PP_DCHECK(cdm_);
+ virtual uint32_t LookupPromiseId(uint32_t session_id) {
+ std::map<uint32_t, uint32_t>::iterator it =
+ promise_to_session_id_map_.find(session_id);
+ if (it == promise_to_session_id_map_.end())
+ return 0;
+ uint32_t promise_id = it->second;
+ promise_to_session_id_map_.erase(it);
+ return promise_id;
}
- CdmInterface* cdm_;
-
- DISALLOW_COPY_AND_ASSIGN(CdmWrapperImpl);
-};
-
-// For ContentDecryptionModule_1 and ContentDecryptionModule_2,
-// CreateSession(), UpdateSession(), and ReleaseSession() call methods
-// are incompatible with ContentDecryptionModule_3. Use the following
-// templated functions to handle this.
-
-template <class CdmInterface>
-void PrefixedGenerateKeyRequest(CdmWrapper* wrapper,
- CdmInterface* cdm,
- uint32_t session_id,
- const char* type,
- uint32_t type_size,
- const uint8_t* init_data,
- uint32_t init_data_size) {
- // As it is possible for CDMs to reply synchronously during the call to
- // GenerateKeyRequest(), keep track of |session_id|.
- wrapper->current_key_request_session_id_ = session_id;
-
- cdm::Status status =
- cdm->GenerateKeyRequest(type, type_size, init_data, init_data_size);
- PP_DCHECK(status == cdm::kSuccess || status == cdm::kSessionError);
- if (status != cdm::kSuccess) {
- // If GenerateKeyRequest() failed, no subsequent asynchronous replies
- // will be sent. Verify that a response was sent synchronously.
- PP_DCHECK(wrapper->current_key_request_session_id_ ==
- CdmWrapper::kInvalidSessionId);
- wrapper->current_key_request_session_id_ = CdmWrapper::kInvalidSessionId;
- return;
+ virtual void AssignWebSessionId(uint32_t session_id,
+ const char* web_session_id,
+ uint32_t web_session_id_size) {
+ web_session_to_session_id_map_.insert(std::make_pair(
+ std::string(web_session_id, web_session_id_size), session_id));
}
- if (wrapper->current_key_request_session_id_) {
- // If this request is still pending (SendKeyMessage() or SendKeyError()
- // not called synchronously), add |session_id| to the end of the queue.
- // Without CDM support, it is impossible to match SendKeyMessage()
- // (or SendKeyError()) responses to the |session_id|. Doing the best
- // we can by keeping track of this in a queue, and assuming the responses
- // come back in order.
- wrapper->pending_key_request_session_ids_.push(session_id);
- wrapper->current_key_request_session_id_ = CdmWrapper::kInvalidSessionId;
+ uint32_t LookupSessionId(std::string web_session_id) {
+ return web_session_to_session_id_map_.find(web_session_id)->second;
}
-}
-template <class CdmInterface>
-CdmWrapper::Result PrefixedAddKey(CdmWrapper* wrapper,
- CdmInterface* cdm,
- uint32_t session_id,
- const uint8_t* response,
- uint32_t response_size) {
- const std::string web_session_id = wrapper->LookupWebSessionId(session_id);
- if (web_session_id.empty()) {
- // Possible if UpdateSession() called before CreateSession().
- return CdmWrapper::CALL_KEY_ERROR;
+ virtual std::string LookupWebSessionId(uint32_t session_id) {
+ std::map<std::string, uint32_t>::iterator it;
+ for (it = web_session_to_session_id_map_.begin();
+ it != web_session_to_session_id_map_.end();
+ ++it) {
+ if (it->second == session_id)
+ return it->first;
+ }
+ PP_NOTREACHED();
+ return std::string();
}
- // CDM_1 and CDM_2 accept initdata, which is no longer needed.
- // In it's place pass in NULL.
- cdm::Status status = cdm->AddKey(web_session_id.data(), web_session_id.size(),
- response, response_size,
- NULL, 0);
- PP_DCHECK(status == cdm::kSuccess || status == cdm::kSessionError);
- if (status != cdm::kSuccess) {
- // Some CDMs using Host_1/2 don't call keyerror, so send one.
- return CdmWrapper::CALL_KEY_ERROR;
+ virtual void DropWebSessionId(std::string web_session_id) {
+ web_session_to_session_id_map_.erase(web_session_id);
}
- return CdmWrapper::CALL_KEY_ADDED;
-}
-
-template <class CdmInterface>
-CdmWrapper::Result PrefixedCancelKeyRequest(CdmWrapper* wrapper,
- CdmInterface* cdm,
- uint32_t session_id) {
- const std::string web_session_id = wrapper->LookupWebSessionId(session_id);
- if (web_session_id.empty()) {
- // Possible if ReleaseSession() called before CreateSession().
- return CdmWrapper::CALL_KEY_ERROR;
+ private:
+ CdmWrapperImpl(CdmInterface* cdm) : cdm_(cdm), next_session_id_(100) {
+ PP_DCHECK(cdm_);
}
- wrapper->session_map_.erase(session_id);
- cdm::Status status =
- cdm->CancelKeyRequest(web_session_id.data(), web_session_id.size());
+ CdmInterface* cdm_;
- PP_DCHECK(status == cdm::kSuccess || status == cdm::kSessionError);
- if (status != cdm::kSuccess) {
- // Some CDMs using Host_1/2 don't call keyerror, so send one.
- return CdmWrapper::CALL_KEY_ERROR;
- }
+ std::map<uint32_t, uint32_t> promise_to_session_id_map_;
+ uint32_t next_session_id_;
+ std::map<std::string, uint32_t> web_session_to_session_id_map_;
- return CdmWrapper::NO_ACTION;
-}
+ DISALLOW_COPY_AND_ASSIGN(CdmWrapperImpl);
+};
-// Specializations for ContentDecryptionModule_1.
+// Overrides for the cdm::Host_4 methods. Calls to CreateSession(),
+// LoadSession(), UpdateSession(), and ReleaseSession() pass in promise ids,
+// but the CDM interface needs session ids. For create and load, we need to
+// create a new session_id to pass to the CDM. For update and release, we need
+// to look up |web_session_id| and convert it into the existing |session_id|.
+// Since the callbacks don't come through this interface, cdm_adapter needs to
+// create the mapping (and delete it on release).
+// TODO(jrummell): Remove these once Host_4 interface is removed.
template <>
-void CdmWrapperImpl<cdm::ContentDecryptionModule_1>::CreateSession(
- uint32_t session_id,
- const char* type,
- uint32_t type_size,
+void CdmWrapperImpl<cdm::ContentDecryptionModule_4>::CreateSession(
+ uint32_t promise_id,
+ const char* init_data_type,
+ uint32_t init_data_type_size,
const uint8_t* init_data,
- uint32_t init_data_size) {
- PrefixedGenerateKeyRequest(
- this, cdm_, session_id, type, type_size, init_data, init_data_size);
+ uint32_t init_data_size,
+ cdm::SessionType session_type) {
+ uint32_t session_id = CreateSessionId();
+ RegisterPromise(session_id, promise_id);
+ cdm_->CreateSession(session_id,
+ init_data_type,
+ init_data_type_size,
+ init_data,
+ init_data_size);
}
template <>
-CdmWrapper::Result CdmWrapperImpl<
- cdm::ContentDecryptionModule_1>::UpdateSession(uint32_t session_id,
- const uint8_t* response,
- uint32_t response_size) {
- return PrefixedAddKey(this, cdm_, session_id, response, response_size);
+void CdmWrapperImpl<cdm::ContentDecryptionModule_4>::LoadSession(
+ uint32_t promise_id,
+ const char* web_session_id,
+ uint32_t web_session_id_size) {
+ uint32_t session_id = CreateSessionId();
+ RegisterPromise(session_id, promise_id);
+ cdm_->LoadSession(session_id, web_session_id, web_session_id_size);
}
template <>
-CdmWrapper::Result CdmWrapperImpl<
- cdm::ContentDecryptionModule_1>::ReleaseSession(uint32_t session_id) {
- return PrefixedCancelKeyRequest(this, cdm_, session_id);
-}
-
-template <> void CdmWrapperImpl<cdm::ContentDecryptionModule_1>::
- OnPlatformChallengeResponse(
- const cdm::PlatformChallengeResponse& response) {
- PP_NOTREACHED();
-}
-
-template <> void CdmWrapperImpl<cdm::ContentDecryptionModule_1>::
- OnQueryOutputProtectionStatus(uint32_t link_mask,
- uint32_t output_protection_mask) {
- PP_NOTREACHED();
-}
-
-template <> cdm::Status CdmWrapperImpl<cdm::ContentDecryptionModule_1>::
- DecryptAndDecodeSamples(const cdm::InputBuffer& encrypted_buffer,
- cdm::AudioFrames* audio_frames) {
- AudioFramesImpl audio_frames_1;
- cdm::Status status =
- cdm_->DecryptAndDecodeSamples(encrypted_buffer, &audio_frames_1);
- if (status != cdm::kSuccess)
- return status;
-
- audio_frames->SetFrameBuffer(audio_frames_1.PassFrameBuffer());
- audio_frames->SetFormat(cdm::kAudioFormatS16);
- return cdm::kSuccess;
+void CdmWrapperImpl<cdm::ContentDecryptionModule_4>::UpdateSession(
+ uint32_t promise_id,
+ const char* web_session_id,
+ uint32_t web_session_id_size,
+ const uint8_t* response,
+ uint32_t response_size) {
+ std::string web_session_str(web_session_id, web_session_id_size);
+ uint32_t session_id = LookupSessionId(web_session_str);
+ RegisterPromise(session_id, promise_id);
+ cdm_->UpdateSession(session_id, response, response_size);
}
-// Specializations for ContentDecryptionModule_2.
-
template <>
-void CdmWrapperImpl<cdm::ContentDecryptionModule_2>::CreateSession(
- uint32_t session_id,
- const char* type,
- uint32_t type_size,
- const uint8_t* init_data,
- uint32_t init_data_size) {
- PrefixedGenerateKeyRequest(
- this, cdm_, session_id, type, type_size, init_data, init_data_size);
-}
-
-template <>
-CdmWrapper::Result CdmWrapperImpl<
- cdm::ContentDecryptionModule_2>::UpdateSession(uint32_t session_id,
- const uint8_t* response,
- uint32_t response_size) {
- return PrefixedAddKey(this, cdm_, session_id, response, response_size);
-}
-
-template <>
-CdmWrapper::Result CdmWrapperImpl<
- cdm::ContentDecryptionModule_2>::ReleaseSession(uint32_t session_id) {
- return PrefixedCancelKeyRequest(this, cdm_, session_id);
+void CdmWrapperImpl<cdm::ContentDecryptionModule_4>::ReleaseSession(
+ uint32_t promise_id,
+ const char* web_session_id,
+ uint32_t web_session_id_size) {
+ std::string web_session_str(web_session_id, web_session_id_size);
+ uint32_t session_id = LookupSessionId(web_session_str);
+ RegisterPromise(session_id, promise_id);
+ cdm_->ReleaseSession(session_id);
}
CdmWrapper* CdmWrapper::Create(const char* key_system,
@@ -441,23 +340,21 @@ CdmWrapper* CdmWrapper::Create(const char* key_system,
GetCdmHostFunc get_cdm_host_func,
void* user_data) {
COMPILE_ASSERT(cdm::ContentDecryptionModule::kVersion ==
- cdm::ContentDecryptionModule_3::kVersion,
+ cdm::ContentDecryptionModule_5::kVersion,
update_code_below);
- // Ensure IsSupportedCdmInterfaceVersion matches this implementation.
+ // Ensure IsSupportedCdmInterfaceVersion() matches this implementation.
// Always update this DCHECK when updating this function.
// If this check fails, update this function and DCHECK or update
- // IsSupportedCdmInterfaceVersion.
+ // IsSupportedCdmInterfaceVersion().
PP_DCHECK(
- !IsSupportedCdmInterfaceVersion(
- cdm::ContentDecryptionModule::kVersion + 1) &&
+ !IsSupportedCdmInterfaceVersion(cdm::ContentDecryptionModule::kVersion +
+ 1) &&
IsSupportedCdmInterfaceVersion(cdm::ContentDecryptionModule::kVersion) &&
IsSupportedCdmInterfaceVersion(
- cdm::ContentDecryptionModule_2::kVersion) &&
- IsSupportedCdmInterfaceVersion(
- cdm::ContentDecryptionModule_1::kVersion) &&
- !IsSupportedCdmInterfaceVersion(
- cdm::ContentDecryptionModule_1::kVersion - 1));
+ cdm::ContentDecryptionModule_4::kVersion) &&
+ !IsSupportedCdmInterfaceVersion(cdm::ContentDecryptionModule_4::kVersion -
+ 1));
// Try to create the CDM using the latest CDM interface version.
CdmWrapper* cdm_wrapper =
@@ -466,13 +363,9 @@ CdmWrapper* CdmWrapper::Create(const char* key_system,
if (cdm_wrapper)
return cdm_wrapper;
- // Try to see if the CDM supports older version(s) of the CDM interface.
- cdm_wrapper = CdmWrapperImpl<cdm::ContentDecryptionModule_2>::Create(
- key_system, key_system_size, get_cdm_host_func, user_data);
- if (cdm_wrapper)
- return cdm_wrapper;
-
- cdm_wrapper = CdmWrapperImpl<cdm::ContentDecryptionModule_1>::Create(
+ // If |cdm_wrapper| is NULL, try to create the CDM using older supported
+ // versions of the CDM interface.
+ cdm_wrapper = CdmWrapperImpl<cdm::ContentDecryptionModule_4>::Create(
key_system, key_system_size, get_cdm_host_func, user_data);
return cdm_wrapper;
}
@@ -482,7 +375,7 @@ CdmWrapper* CdmWrapper::Create(const char* key_system,
// does not have.
// Also update supported_cdm_versions.h.
COMPILE_ASSERT(cdm::ContentDecryptionModule::kVersion ==
- cdm::ContentDecryptionModule_3::kVersion,
+ cdm::ContentDecryptionModule_5::kVersion,
ensure_cdm_wrapper_templates_have_old_version_support);
} // namespace media
diff --git a/chromium/media/cdm/ppapi/cdm_video_decoder.cc b/chromium/media/cdm/ppapi/external_clear_key/cdm_video_decoder.cc
index 0477c0a3835..297ea844918 100644
--- a/chromium/media/cdm/ppapi/cdm_video_decoder.cc
+++ b/chromium/media/cdm/ppapi/external_clear_key/cdm_video_decoder.cc
@@ -4,18 +4,18 @@
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
-#include "media/cdm/ppapi/cdm_video_decoder.h"
+#include "media/cdm/ppapi/external_clear_key/cdm_video_decoder.h"
#if defined(CLEAR_KEY_CDM_USE_FAKE_VIDEO_DECODER)
-#include "media/cdm/ppapi/fake_cdm_video_decoder.h"
+#include "media/cdm/ppapi/external_clear_key/fake_cdm_video_decoder.h"
#endif
#if defined(CLEAR_KEY_CDM_USE_FFMPEG_DECODER)
-#include "media/cdm/ppapi/ffmpeg_cdm_video_decoder.h"
+#include "media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.h"
#endif
#if defined(CLEAR_KEY_CDM_USE_LIBVPX_DECODER)
-#include "media/cdm/ppapi/libvpx_cdm_video_decoder.h"
+#include "media/cdm/ppapi/external_clear_key/libvpx_cdm_video_decoder.h"
#endif
namespace media {
@@ -31,7 +31,8 @@ scoped_ptr<CdmVideoDecoder> CreateVideoDecoder(
#else
#if defined(CLEAR_KEY_CDM_USE_LIBVPX_DECODER)
- if (config.codec == cdm::VideoDecoderConfig::kCodecVp8) {
+ if (config.codec == cdm::VideoDecoderConfig::kCodecVp8 ||
+ config.codec == cdm::VideoDecoderConfig::kCodecVp9) {
video_decoder.reset(new LibvpxCdmVideoDecoder(host));
if (!video_decoder->Initialize(config))
diff --git a/chromium/media/cdm/ppapi/cdm_video_decoder.h b/chromium/media/cdm/ppapi/external_clear_key/cdm_video_decoder.h
index 3eefb63584d..fd84b773619 100644
--- a/chromium/media/cdm/ppapi/cdm_video_decoder.h
+++ b/chromium/media/cdm/ppapi/external_clear_key/cdm_video_decoder.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CDM_PPAPI_CDM_VIDEO_DECODER_H_
-#define MEDIA_CDM_PPAPI_CDM_VIDEO_DECODER_H_
+#ifndef MEDIA_CDM_PPAPI_EXTERNAL_CLEAR_KEY_CDM_VIDEO_DECODER_H_
+#define MEDIA_CDM_PPAPI_EXTERNAL_CLEAR_KEY_CDM_VIDEO_DECODER_H_
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
#include "media/cdm/ppapi/api/content_decryption_module.h"
-#include "media/cdm/ppapi/clear_key_cdm_common.h"
+#include "media/cdm/ppapi/external_clear_key/clear_key_cdm_common.h"
namespace media {
@@ -41,4 +41,4 @@ scoped_ptr<CdmVideoDecoder> CreateVideoDecoder(
} // namespace media
-#endif // MEDIA_CDM_PPAPI_CDM_VIDEO_DECODER_H_
+#endif // MEDIA_CDM_PPAPI_EXTERNAL_CLEAR_KEY_CDM_VIDEO_DECODER_H_
diff --git a/chromium/media/cdm/ppapi/clear_key_cdm.cc b/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm.cc
index 6d3a68ce5ac..6f0809f89ed 100644
--- a/chromium/media/cdm/ppapi/clear_key_cdm.cc
+++ b/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cdm/ppapi/clear_key_cdm.h"
+#include "media/cdm/ppapi/external_clear_key/clear_key_cdm.h"
#include <algorithm>
+#include <cstring>
#include <sstream>
#include <string>
#include <vector>
@@ -13,9 +14,12 @@
#include "base/debug/trace_event.h"
#include "base/logging.h"
#include "base/time/time.h"
+#include "media/base/cdm_promise.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decrypt_config.h"
-#include "media/cdm/ppapi/cdm_video_decoder.h"
+#include "media/cdm/json_web_key.h"
+#include "media/cdm/ppapi/cdm_file_io_test.h"
+#include "media/cdm/ppapi/external_clear_key/cdm_video_decoder.h"
#if defined(CLEAR_KEY_CDM_USE_FAKE_AUDIO_DECODER)
#include "base/basictypes.h"
@@ -27,8 +31,8 @@ const int64 kNoTimestamp = kint64min;
#include "base/files/file_path.h"
#include "base/path_service.h"
#include "media/base/media.h"
-#include "media/cdm/ppapi/ffmpeg_cdm_audio_decoder.h"
-#include "media/cdm/ppapi/ffmpeg_cdm_video_decoder.h"
+#include "media/cdm/ppapi/external_clear_key/ffmpeg_cdm_audio_decoder.h"
+#include "media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.h"
// Include FFmpeg avformat.h for av_register_all().
extern "C" {
@@ -63,6 +67,21 @@ const char kClearKeyCdmVersion[] = "0.1.0.1";
const char kExternalClearKeyKeySystem[] = "org.chromium.externalclearkey";
const char kExternalClearKeyDecryptOnlyKeySystem[] =
"org.chromium.externalclearkey.decryptonly";
+const char kExternalClearKeyFileIOTestKeySystem[] =
+ "org.chromium.externalclearkey.fileiotest";
+const char kExternalClearKeyCrashKeySystem[] =
+ "org.chromium.externalclearkey.crash";
+
+// Constants for the enumalted session that can be loaded by LoadSession().
+// These constants need to be in sync with
+// chrome/test/data/media/encrypted_media_utils.js
+const char kLoadableWebSessionId[] = "LoadableSession";
+const char kLoadableSessionContentType[] = "video/webm";
+const uint8 kLoadableSessionKeyId[] = "0123456789012345";
+const uint8 kLoadableSessionKey[] =
+ {0xeb, 0xdd, 0x62, 0xf1, 0x68, 0x14, 0xd2, 0x7b,
+ 0x68, 0xef, 0x12, 0x2a, 0xfc, 0xe4, 0xae, 0x3c};
+
const int64 kSecondsPerMinute = 60;
const int64 kMsPerSecond = 1000;
const int64 kInitialTimerDelayMs = 200;
@@ -70,6 +89,8 @@ const int64 kMaxTimerDelayMs = 1 * kSecondsPerMinute * kMsPerSecond;
// Heart beat message header. If a key message starts with |kHeartBeatHeader|,
// it's a heart beat message. Otherwise, it's a key request.
const char kHeartBeatHeader[] = "HEARTBEAT";
+// CDM file IO test result header.
+const char kFileIOTestResultHeader[] = "FILEIOTESTRESULT";
// Copies |input_buffer| into a media::DecoderBuffer. If the |input_buffer| is
// empty, an empty (end-of-stream) media::DecoderBuffer is returned.
@@ -80,7 +101,7 @@ static scoped_refptr<media::DecoderBuffer> CopyDecoderBufferFrom(
return media::DecoderBuffer::CreateEOSBuffer();
}
- // TODO(tomfinegan): Get rid of this copy.
+ // TODO(xhwang): Get rid of this copy.
scoped_refptr<media::DecoderBuffer> output_buffer =
media::DecoderBuffer::CopyFrom(input_buffer.data, input_buffer.data_size);
@@ -92,12 +113,12 @@ static scoped_refptr<media::DecoderBuffer> CopyDecoderBufferFrom(
subsamples.push_back(subsample);
}
+ DCHECK_EQ(input_buffer.data_offset, 0u);
scoped_ptr<media::DecryptConfig> decrypt_config(new media::DecryptConfig(
std::string(reinterpret_cast<const char*>(input_buffer.key_id),
input_buffer.key_id_size),
std::string(reinterpret_cast<const char*>(input_buffer.iv),
input_buffer.iv_size),
- input_buffer.data_offset,
subsamples));
output_buffer->set_decrypt_config(decrypt_config.Pass());
@@ -107,6 +128,45 @@ static scoped_refptr<media::DecoderBuffer> CopyDecoderBufferFrom(
return output_buffer;
}
+static std::string GetFileIOTestResultMessage(bool success) {
+ std::string message(kFileIOTestResultHeader);
+ message += success ? '1' : '0';
+ return message;
+}
+
+static cdm::Error ConvertException(media::MediaKeys::Exception exception_code) {
+ switch (exception_code) {
+ case media::MediaKeys::NOT_SUPPORTED_ERROR:
+ return cdm::kNotSupportedError;
+ case media::MediaKeys::INVALID_STATE_ERROR:
+ return cdm::kInvalidStateError;
+ case media::MediaKeys::INVALID_ACCESS_ERROR:
+ return cdm::kInvalidAccessError;
+ case media::MediaKeys::QUOTA_EXCEEDED_ERROR:
+ return cdm::kQuotaExceededError;
+ case media::MediaKeys::UNKNOWN_ERROR:
+ return cdm::kUnknownError;
+ case media::MediaKeys::CLIENT_ERROR:
+ return cdm::kClientError;
+ case media::MediaKeys::OUTPUT_ERROR:
+ return cdm::kOutputError;
+ }
+ NOTIMPLEMENTED();
+ return cdm::kUnknownError;
+}
+
+static media::MediaKeys::SessionType ConvertSessionType(
+ cdm::SessionType session_type) {
+ switch (session_type) {
+ case cdm::kPersistent:
+ return media::MediaKeys::PERSISTENT_SESSION;
+ case cdm::kTemporary:
+ return media::MediaKeys::TEMPORARY_SESSION;
+ }
+ NOTIMPLEMENTED();
+ return media::MediaKeys::TEMPORARY_SESSION;
+}
+
template<typename Type>
class ScopedResetter {
public:
@@ -135,7 +195,9 @@ void* CreateCdmInstance(int cdm_interface_version,
std::string key_system_string(key_system, key_system_size);
if (key_system_string != kExternalClearKeyKeySystem &&
- key_system_string != kExternalClearKeyDecryptOnlyKeySystem) {
+ key_system_string != kExternalClearKeyDecryptOnlyKeySystem &&
+ key_system_string != kExternalClearKeyFileIOTestKeySystem &&
+ key_system_string != kExternalClearKeyCrashKeySystem) {
DVLOG(1) << "Unsupported key system:" << key_system_string;
return NULL;
}
@@ -148,8 +210,7 @@ void* CreateCdmInstance(int cdm_interface_version,
if (!host)
return NULL;
- return new media::ClearKeyCdm(
- host, key_system_string == kExternalClearKeyDecryptOnlyKeySystem);
+ return new media::ClearKeyCdm(host, key_system_string);
}
const char* GetCdmVersion() {
@@ -158,66 +219,14 @@ const char* GetCdmVersion() {
namespace media {
-// Since all the calls to AesDecryptor are synchronous, pass a dummy value for
-// session_id that is never exposed outside this class.
-// TODO(jrummell): Remove usage of this when the CDM interface is updated
-// to use session_id.
-
-ClearKeyCdm::Client::Client()
- : status_(kNone), error_code_(MediaKeys::kUnknownError), system_code_(0) {}
-
-ClearKeyCdm::Client::~Client() {}
-
-void ClearKeyCdm::Client::Reset() {
- status_ = kNone;
- web_session_id_.clear();
- message_.clear();
- destination_url_.clear();
- error_code_ = MediaKeys::kUnknownError;
- system_code_ = 0;
-}
-
-void ClearKeyCdm::Client::OnSessionCreated(uint32 session_id,
- const std::string& web_session_id) {
- status_ = static_cast<Status>(status_ | kCreated);
- web_session_id_ = web_session_id;
-}
-
-void ClearKeyCdm::Client::OnSessionMessage(uint32 session_id,
- const std::vector<uint8>& message,
- const std::string& destination_url) {
- status_ = static_cast<Status>(status_ | kMessage);
- message_ = message;
- destination_url_ = destination_url;
-}
-
-void ClearKeyCdm::Client::OnSessionReady(uint32 session_id) {
- status_ = static_cast<Status>(status_ | kReady);
-}
-
-void ClearKeyCdm::Client::OnSessionClosed(uint32 session_id) {
- status_ = static_cast<Status>(status_ | kClosed);
-}
-
-void ClearKeyCdm::Client::OnSessionError(uint32 session_id,
- media::MediaKeys::KeyError error_code,
- int system_code) {
- status_ = static_cast<Status>(status_ | kError);
- error_code_ = error_code;
- system_code_ = system_code;
-}
-
-ClearKeyCdm::ClearKeyCdm(ClearKeyCdmHost* host, bool is_decrypt_only)
+ClearKeyCdm::ClearKeyCdm(ClearKeyCdmHost* host, const std::string& key_system)
: decryptor_(
- base::Bind(&Client::OnSessionCreated, base::Unretained(&client_)),
- base::Bind(&Client::OnSessionMessage, base::Unretained(&client_)),
- base::Bind(&Client::OnSessionReady, base::Unretained(&client_)),
- base::Bind(&Client::OnSessionClosed, base::Unretained(&client_)),
- base::Bind(&Client::OnSessionError, base::Unretained(&client_))),
+ base::Bind(&ClearKeyCdm::OnSessionMessage, base::Unretained(this)),
+ base::Bind(&ClearKeyCdm::OnSessionClosed, base::Unretained(this))),
host_(host),
- is_decrypt_only_(is_decrypt_only),
+ key_system_(key_system),
timer_delay_ms_(kInitialTimerDelayMs),
- timer_set_(false) {
+ heartbeat_timer_set_(false) {
#if defined(CLEAR_KEY_CDM_USE_FAKE_AUDIO_DECODER)
channel_count_ = 0;
bits_per_channel_ = 0;
@@ -229,85 +238,118 @@ ClearKeyCdm::ClearKeyCdm(ClearKeyCdmHost* host, bool is_decrypt_only)
ClearKeyCdm::~ClearKeyCdm() {}
-cdm::Status ClearKeyCdm::GenerateKeyRequest(const char* type,
- uint32_t type_size,
- const uint8_t* init_data,
- uint32_t init_data_size) {
- DVLOG(1) << "GenerateKeyRequest()";
- base::AutoLock auto_lock(client_lock_);
- ScopedResetter<Client> auto_resetter(&client_);
- decryptor_.CreateSession(MediaKeys::kInvalidSessionId,
- std::string(type, type_size),
- init_data, init_data_size);
-
- if (client_.status() != (Client::kMessage | Client::kCreated)) {
- // Use values returned to client if possible.
- host_->SendKeyError(client_.web_session_id().data(),
- client_.web_session_id().size(),
- static_cast<cdm::MediaKeyError>(client_.error_code()),
- client_.system_code());
- return cdm::kSessionError;
- }
-
- host_->SendKeyMessage(
- client_.web_session_id().data(), client_.web_session_id().size(),
- reinterpret_cast<const char*>(&client_.message()[0]),
- client_.message().size(),
- client_.destination_url().data(), client_.destination_url().size());
-
- // Only save the latest session ID for heartbeat messages.
- heartbeat_session_id_ = client_.web_session_id();
-
- return cdm::kSuccess;
-}
-
-cdm::Status ClearKeyCdm::AddKey(const char* session_id,
- uint32_t session_id_size,
- const uint8_t* key,
- uint32_t key_size,
- const uint8_t* key_id,
- uint32_t key_id_size) {
- DVLOG(1) << "AddKey()";
- DCHECK(!key_id && !key_id_size);
- base::AutoLock auto_lock(client_lock_);
- ScopedResetter<Client> auto_resetter(&client_);
- decryptor_.UpdateSession(MediaKeys::kInvalidSessionId, key, key_size);
-
- if (client_.status() != Client::kReady) {
- host_->SendKeyError(session_id, session_id_size,
- static_cast<cdm::MediaKeyError>(client_.error_code()),
- client_.system_code());
- return cdm::kSessionError;
+void ClearKeyCdm::CreateSession(uint32 promise_id,
+ const char* init_data_type,
+ uint32 init_data_type_size,
+ const uint8* init_data,
+ uint32 init_data_size,
+ cdm::SessionType session_type) {
+ DVLOG(1) << __FUNCTION__;
+
+ scoped_ptr<media::NewSessionCdmPromise> promise(
+ new media::NewSessionCdmPromise(base::Bind(&ClearKeyCdm::OnSessionCreated,
+ base::Unretained(this),
+ promise_id),
+ base::Bind(&ClearKeyCdm::OnPromiseFailed,
+ base::Unretained(this),
+ promise_id)));
+ decryptor_.CreateSession(std::string(init_data_type, init_data_type_size),
+ init_data,
+ init_data_size,
+ ConvertSessionType(session_type),
+ promise.Pass());
+
+ if (key_system_ == kExternalClearKeyFileIOTestKeySystem)
+ StartFileIOTest();
+}
+
+// Loads a emulated stored session. Currently only |kLoadableWebSessionId|
+// (containing a |kLoadableSessionKey| for |kLoadableSessionKeyId|) is
+// supported.
+void ClearKeyCdm::LoadSession(uint32 promise_id,
+ const char* web_session_id,
+ uint32_t web_session_id_length) {
+ DVLOG(1) << __FUNCTION__;
+
+ if (std::string(kLoadableWebSessionId) !=
+ std::string(web_session_id, web_session_id_length)) {
+ std::string message("Incorrect session id specified for LoadSession().");
+ host_->OnRejectPromise(promise_id,
+ cdm::kInvalidAccessError,
+ 0,
+ message.data(),
+ message.length());
+ return;
}
- if (!timer_set_) {
+ scoped_ptr<media::NewSessionCdmPromise> promise(
+ new media::NewSessionCdmPromise(base::Bind(&ClearKeyCdm::OnSessionLoaded,
+ base::Unretained(this),
+ promise_id),
+ base::Bind(&ClearKeyCdm::OnPromiseFailed,
+ base::Unretained(this),
+ promise_id)));
+ decryptor_.CreateSession(std::string(kLoadableSessionContentType),
+ NULL,
+ 0,
+ MediaKeys::TEMPORARY_SESSION,
+ promise.Pass());
+}
+
+void ClearKeyCdm::UpdateSession(uint32 promise_id,
+ const char* web_session_id,
+ uint32_t web_session_id_size,
+ const uint8* response,
+ uint32 response_size) {
+ DVLOG(1) << __FUNCTION__;
+ std::string web_session_str(web_session_id, web_session_id_size);
+
+ scoped_ptr<media::SimpleCdmPromise> promise(new media::SimpleCdmPromise(
+ base::Bind(&ClearKeyCdm::OnSessionUpdated,
+ base::Unretained(this),
+ promise_id,
+ web_session_str),
+ base::Bind(
+ &ClearKeyCdm::OnPromiseFailed, base::Unretained(this), promise_id)));
+ decryptor_.UpdateSession(
+ web_session_str, response, response_size, promise.Pass());
+
+ if (!heartbeat_timer_set_) {
ScheduleNextHeartBeat();
- timer_set_ = true;
+ heartbeat_timer_set_ = true;
}
-
- return cdm::kSuccess;
}
-cdm::Status ClearKeyCdm::CancelKeyRequest(const char* session_id,
- uint32_t session_id_size) {
- DVLOG(1) << "CancelKeyRequest()";
- base::AutoLock auto_lock(client_lock_);
- ScopedResetter<Client> auto_resetter(&client_);
- decryptor_.ReleaseSession(MediaKeys::kInvalidSessionId);
+void ClearKeyCdm::ReleaseSession(uint32 promise_id,
+ const char* web_session_id,
+ uint32_t web_session_id_size) {
+ DVLOG(1) << __FUNCTION__;
+ std::string web_session_str(web_session_id, web_session_id_size);
- // No message normally sent by Release(), but if an error occurred,
- // report it as a failure.
- if (client_.status() == Client::kError) {
- host_->SendKeyError(session_id, session_id_size,
- static_cast<cdm::MediaKeyError>(client_.error_code()),
- client_.system_code());
- return cdm::kSessionError;
- }
+ scoped_ptr<media::SimpleCdmPromise> promise(new media::SimpleCdmPromise(
+ base::Bind(&ClearKeyCdm::OnSessionReleased,
+ base::Unretained(this),
+ promise_id,
+ web_session_str),
+ base::Bind(
+ &ClearKeyCdm::OnPromiseFailed, base::Unretained(this), promise_id)));
+ decryptor_.ReleaseSession(web_session_str, promise.Pass());
+}
- return cdm::kSuccess;
+void ClearKeyCdm::SetServerCertificate(uint32 promise_id,
+ const uint8_t* server_certificate_data,
+ uint32_t server_certificate_data_size) {
+ // ClearKey doesn't use a server certificate.
+ host_->OnResolvePromise(promise_id);
}
void ClearKeyCdm::TimerExpired(void* context) {
+ if (context == &session_id_for_emulated_loadsession_) {
+ LoadLoadableSession();
+ return;
+ }
+
+ DCHECK(heartbeat_timer_set_);
std::string heartbeat_message;
if (!next_heartbeat_message_.empty() &&
context == &next_heartbeat_message_[0]) {
@@ -320,10 +362,12 @@ void ClearKeyCdm::TimerExpired(void* context) {
// There is no service at this URL, so applications should ignore it.
const char url[] = "http://test.externalclearkey.chromium.org";
- host_->SendKeyMessage(
- heartbeat_session_id_.data(), heartbeat_session_id_.size(),
- heartbeat_message.data(), heartbeat_message.size(),
- url, arraysize(url) - 1);
+ host_->OnSessionMessage(last_session_id_.data(),
+ last_session_id_.length(),
+ heartbeat_message.data(),
+ heartbeat_message.length(),
+ url,
+ arraysize(url) - 1);
ScheduleNextHeartBeat();
}
@@ -363,7 +407,7 @@ cdm::Status ClearKeyCdm::Decrypt(
cdm::Status ClearKeyCdm::InitializeAudioDecoder(
const cdm::AudioDecoderConfig& audio_decoder_config) {
- if (is_decrypt_only_)
+ if (key_system_ == kExternalClearKeyDecryptOnlyKeySystem)
return cdm::kSessionError;
#if defined(CLEAR_KEY_CDM_USE_FFMPEG_DECODER)
@@ -387,7 +431,7 @@ cdm::Status ClearKeyCdm::InitializeAudioDecoder(
cdm::Status ClearKeyCdm::InitializeVideoDecoder(
const cdm::VideoDecoderConfig& video_decoder_config) {
- if (is_decrypt_only_)
+ if (key_system_ == kExternalClearKeyDecryptOnlyKeySystem)
return cdm::kSessionError;
if (video_decoder_ && video_decoder_->is_initialized()) {
@@ -472,6 +516,10 @@ cdm::Status ClearKeyCdm::DecryptAndDecodeSamples(
cdm::AudioFrames* audio_frames) {
DVLOG(1) << "DecryptAndDecodeSamples()";
+ // Trigger a crash on purpose for testing purpose.
+ if (key_system_ == kExternalClearKeyCrashKeySystem)
+ CHECK(false);
+
scoped_refptr<media::DecoderBuffer> buffer;
cdm::Status status = DecryptToMediaDecoderBuffer(encrypted_buffer, &buffer);
@@ -510,7 +558,7 @@ void ClearKeyCdm::ScheduleNextHeartBeat() {
// Prepare the next heartbeat message and set timer.
std::ostringstream msg_stream;
msg_stream << kHeartBeatHeader << " from ClearKey CDM set at time "
- << host_->GetCurrentWallTimeInSeconds() << ".";
+ << host_->GetCurrentTime() << ".";
next_heartbeat_message_ = msg_stream.str();
host_->SetTimer(timer_delay_ms_, &next_heartbeat_message_[0]);
@@ -562,6 +610,115 @@ void ClearKeyCdm::OnQueryOutputProtectionStatus(
NOTIMPLEMENTED();
};
+void ClearKeyCdm::LoadLoadableSession() {
+ std::string jwk_set = GenerateJWKSet(kLoadableSessionKey,
+ sizeof(kLoadableSessionKey),
+ kLoadableSessionKeyId,
+ sizeof(kLoadableSessionKeyId) - 1);
+ // TODO(xhwang): This triggers OnSessionUpdated(). For prefixed EME support,
+ // this is okay. Check WD EME support.
+ scoped_ptr<media::SimpleCdmPromise> promise(new media::SimpleCdmPromise(
+ base::Bind(&ClearKeyCdm::OnSessionUpdated,
+ base::Unretained(this),
+ promise_id_for_emulated_loadsession_,
+ session_id_for_emulated_loadsession_),
+ base::Bind(&ClearKeyCdm::OnPromiseFailed,
+ base::Unretained(this),
+ promise_id_for_emulated_loadsession_)));
+ decryptor_.UpdateSession(session_id_for_emulated_loadsession_,
+ reinterpret_cast<const uint8*>(jwk_set.data()),
+ jwk_set.size(),
+ promise.Pass());
+}
+
+void ClearKeyCdm::OnSessionMessage(const std::string& web_session_id,
+ const std::vector<uint8>& message,
+ const GURL& destination_url) {
+ DVLOG(1) << "OnSessionMessage: " << message.size();
+
+ // Ignore the message when we are waiting to update the loadable session.
+ if (web_session_id == session_id_for_emulated_loadsession_)
+ return;
+
+ // OnSessionMessage() only called during CreateSession(), so no promise
+ // involved (OnSessionCreated() called to resolve the CreateSession()
+ // promise).
+ host_->OnSessionMessage(web_session_id.data(),
+ web_session_id.length(),
+ reinterpret_cast<const char*>(message.data()),
+ message.size(),
+ destination_url.spec().data(),
+ destination_url.spec().size());
+}
+
+void ClearKeyCdm::OnSessionClosed(const std::string& web_session_id) {
+ host_->OnSessionClosed(web_session_id.data(), web_session_id.length());
+}
+
+void ClearKeyCdm::OnSessionCreated(uint32 promise_id,
+ const std::string& web_session_id) {
+ // Save the latest session ID for heartbeat and file IO test messages.
+ last_session_id_ = web_session_id;
+
+ host_->OnResolveNewSessionPromise(
+ promise_id, web_session_id.data(), web_session_id.length());
+}
+
+void ClearKeyCdm::OnSessionLoaded(uint32 promise_id,
+ const std::string& web_session_id) {
+ // Save the latest session ID for heartbeat and file IO test messages.
+ last_session_id_ = web_session_id;
+
+ // |decryptor_| created some session as |web_session_id|, but going forward
+ // we need to map that to |kLoadableWebSessionId|, as that is what callers
+ // expect.
+ session_id_for_emulated_loadsession_ = web_session_id;
+
+ // Delay LoadLoadableSession() to test the case where Decrypt*() calls are
+ // made before the session is fully loaded.
+ const int64 kDelayToLoadSessionMs = 500;
+
+ // Defer resolving the promise until the session is loaded.
+ promise_id_for_emulated_loadsession_ = promise_id;
+
+ // Use the address of |session_id_for_emulated_loadsession_| as the timer
+ // context so that we can call LoadLoadableSession() when the timer expires.
+ host_->SetTimer(kDelayToLoadSessionMs, &session_id_for_emulated_loadsession_);
+}
+
+void ClearKeyCdm::OnSessionUpdated(uint32 promise_id,
+ const std::string& web_session_id) {
+ // OnSessionReady() only called as success for UpdateSession(). However,
+ // UpdateSession() also called to finish loading sessions, so handle
+ // appropriately.
+ if (web_session_id == session_id_for_emulated_loadsession_) {
+ session_id_for_emulated_loadsession_ = std::string();
+ // |promise_id| is the LoadSession() promise, so resolve appropriately.
+ host_->OnResolveNewSessionPromise(
+ promise_id, kLoadableWebSessionId, strlen(kLoadableWebSessionId));
+ host_->OnSessionReady(kLoadableWebSessionId, strlen(kLoadableWebSessionId));
+ return;
+ }
+
+ host_->OnResolvePromise(promise_id);
+}
+
+void ClearKeyCdm::OnSessionReleased(uint32 promise_id,
+ const std::string& web_session_id) {
+ host_->OnResolvePromise(promise_id);
+}
+
+void ClearKeyCdm::OnPromiseFailed(uint32 promise_id,
+ MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_message) {
+ host_->OnRejectPromise(promise_id,
+ ConvertException(exception_code),
+ system_code,
+ error_message.data(),
+ error_message.length());
+}
+
#if defined(CLEAR_KEY_CDM_USE_FAKE_AUDIO_DECODER)
int64 ClearKeyCdm::CurrentTimeStampInMicroseconds() const {
return output_timestamp_base_in_microseconds_ +
@@ -621,4 +778,23 @@ cdm::Status ClearKeyCdm::GenerateFakeAudioFrames(
}
#endif // CLEAR_KEY_CDM_USE_FAKE_AUDIO_DECODER
+void ClearKeyCdm::StartFileIOTest() {
+ file_io_test_runner_.reset(new FileIOTestRunner(
+ base::Bind(&ClearKeyCdmHost::CreateFileIO, base::Unretained(host_))));
+ file_io_test_runner_->RunAllTests(
+ base::Bind(&ClearKeyCdm::OnFileIOTestComplete, base::Unretained(this)));
+}
+
+void ClearKeyCdm::OnFileIOTestComplete(bool success) {
+ DVLOG(1) << __FUNCTION__ << ": " << success;
+ std::string message = GetFileIOTestResultMessage(success);
+ host_->OnSessionMessage(last_session_id_.data(),
+ last_session_id_.length(),
+ message.data(),
+ message.length(),
+ NULL,
+ 0);
+ file_io_test_runner_.reset();
+}
+
} // namespace media
diff --git a/chromium/media/cdm/ppapi/clear_key_cdm.h b/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm.h
index 0ec18a101a0..5903642a582 100644
--- a/chromium/media/cdm/ppapi/clear_key_cdm.h
+++ b/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CDM_PPAPI_CLEAR_KEY_CDM_H_
-#define MEDIA_CDM_PPAPI_CLEAR_KEY_CDM_H_
+#ifndef MEDIA_CDM_PPAPI_EXTERNAL_CLEAR_KEY_CLEAR_KEY_CDM_H_
+#define MEDIA_CDM_PPAPI_EXTERNAL_CLEAR_KEY_CLEAR_KEY_CDM_H_
#include <string>
#include <vector>
@@ -14,7 +14,7 @@
#include "base/memory/scoped_ptr.h"
#include "base/synchronization/lock.h"
#include "media/cdm/aes_decryptor.h"
-#include "media/cdm/ppapi/clear_key_cdm_common.h"
+#include "media/cdm/ppapi/external_clear_key/clear_key_cdm_common.h"
// Enable this to use the fake decoder for testing.
// TODO(tomfinegan): Move fake audio decoder into a separate class.
@@ -23,6 +23,7 @@
#endif
namespace media {
+class FileIOTestRunner;
class CdmVideoDecoder;
class DecoderBuffer;
class FFmpegCdmAudioDecoder;
@@ -30,21 +31,31 @@ class FFmpegCdmAudioDecoder;
// Clear key implementation of the cdm::ContentDecryptionModule interface.
class ClearKeyCdm : public ClearKeyCdmInterface {
public:
- explicit ClearKeyCdm(Host* host, bool is_decrypt_only);
+ ClearKeyCdm(Host* host, const std::string& key_system);
virtual ~ClearKeyCdm();
// ContentDecryptionModule implementation.
- virtual cdm::Status GenerateKeyRequest(
- const char* type, uint32_t type_size,
- const uint8_t* init_data, uint32_t init_data_size) OVERRIDE;
- virtual cdm::Status AddKey(const char* session_id,
- uint32_t session_id_size,
- const uint8_t* key,
- uint32_t key_size,
- const uint8_t* key_id,
- uint32_t key_id_size) OVERRIDE;
- virtual cdm::Status CancelKeyRequest(const char* session_id,
- uint32_t session_id_size) OVERRIDE;
+ virtual void CreateSession(uint32 promise_id,
+ const char* init_data_type,
+ uint32 init_data_type_size,
+ const uint8* init_data,
+ uint32 init_data_size,
+ cdm::SessionType session_type) OVERRIDE;
+ virtual void LoadSession(uint32 promise_id,
+ const char* web_session_id,
+ uint32_t web_session_id_length) OVERRIDE;
+ virtual void UpdateSession(uint32 promise_id,
+ const char* web_session_id,
+ uint32_t web_session_id_length,
+ const uint8* response,
+ uint32 response_size) OVERRIDE;
+ virtual void ReleaseSession(uint32 promise_id,
+ const char* web_session_id,
+ uint32_t web_session_id_length) OVERRIDE;
+ virtual void SetServerCertificate(
+ uint32 promise_id,
+ const uint8_t* server_certificate_data,
+ uint32_t server_certificate_data_size) OVERRIDE;
virtual void TimerExpired(void* context) OVERRIDE;
virtual cdm::Status Decrypt(const cdm::InputBuffer& encrypted_buffer,
cdm::DecryptedBlock* decrypted_block) OVERRIDE;
@@ -67,52 +78,26 @@ class ClearKeyCdm : public ClearKeyCdmInterface {
uint32_t link_mask, uint32_t output_protection_mask) OVERRIDE;
private:
- // TODO(xhwang): After we removed DecryptorClient. We probably can also remove
- // this Client class as well. Investigate this possibility.
- class Client {
- public:
- // TODO(jrummell): Remove bitmask and rename kNone to kInvalid once CDM
- // interface supports session_id passing completely.
- enum Status {
- kNone = 0,
- kCreated = 1 << 0,
- kMessage = 1 << 1,
- kReady = 1 << 2,
- kClosed = 1 << 3,
- kError = 1 << 4
- };
-
- Client();
- virtual ~Client();
-
- Status status() { return status_; }
- const std::string& web_session_id() { return web_session_id_; }
- const std::vector<uint8>& message() { return message_; }
- const std::string& destination_url() { return destination_url_; }
- MediaKeys::KeyError error_code() { return error_code_; }
- int system_code() { return system_code_; }
-
- // Resets the Client to a clean state.
- void Reset();
-
- void OnSessionCreated(uint32 session_id, const std::string& web_session_id);
- void OnSessionMessage(uint32 session_id,
- const std::vector<uint8>& message,
- const std::string& destination_url);
- void OnSessionReady(uint32 session_id);
- void OnSessionClosed(uint32 session_id);
- void OnSessionError(uint32 session_id,
- MediaKeys::KeyError error_code,
- int system_code);
-
- private:
- Status status_;
- std::string web_session_id_;
- std::vector<uint8> message_;
- std::string destination_url_;
- MediaKeys::KeyError error_code_;
- int system_code_;
- };
+ // Emulates a session stored for |session_id_for_emulated_loadsession_|. This
+ // is necessary since aes_decryptor.cc does not support storing sessions.
+ void LoadLoadableSession();
+
+ // ContentDecryptionModule callbacks.
+ void OnSessionMessage(const std::string& web_session_id,
+ const std::vector<uint8>& message,
+ const GURL& destination_url);
+ void OnSessionClosed(const std::string& web_session_id);
+
+ // Handle the success/failure of a promise. These methods are responsible for
+ // calling |host_| to resolve or reject the promise.
+ void OnSessionCreated(uint32 promise_id, const std::string& web_session_id);
+ void OnSessionLoaded(uint32 promise_id, const std::string& web_session_id);
+ void OnSessionUpdated(uint32 promise_id, const std::string& web_session_id);
+ void OnSessionReleased(uint32 promise_id, const std::string& web_session_id);
+ void OnPromiseFailed(uint32 promise_id,
+ MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_message);
// Prepares next heartbeat message and sets a timer for it.
void ScheduleNextHeartBeat();
@@ -143,26 +128,34 @@ class ClearKeyCdm : public ClearKeyCdmInterface {
cdm::AudioFrames* audio_frames);
#endif // CLEAR_KEY_CDM_USE_FAKE_AUDIO_DECODER
- Client client_;
- AesDecryptor decryptor_;
+ void StartFileIOTest();
+
+ // Callback for CDM File IO test.
+ void OnFileIOTestComplete(bool success);
+
+ // Keep track of the last session created.
+ void SetSessionId(const std::string& web_session_id);
- // Protects the |client_| from being accessed by the |decryptor_|
- // simultaneously.
- base::Lock client_lock_;
+ AesDecryptor decryptor_;
ClearKeyCdmHost* host_;
- const bool is_decrypt_only_;
+ const std::string key_system_;
- std::string heartbeat_session_id_;
+ std::string last_session_id_;
std::string next_heartbeat_message_;
+ // TODO(xhwang): Extract testing code from main implementation.
+ // See http://crbug.com/341751
+ std::string session_id_for_emulated_loadsession_;
+ uint32_t promise_id_for_emulated_loadsession_;
+
// Timer delay in milliseconds for the next host_->SetTimer() call.
int64 timer_delay_ms_;
- // Indicates whether a timer has been set to prevent multiple timers from
- // running.
- bool timer_set_;
+ // Indicates whether a heartbeat timer has been set to prevent multiple timers
+ // from running.
+ bool heartbeat_timer_set_;
#if defined(CLEAR_KEY_CDM_USE_FAKE_AUDIO_DECODER)
int channel_count_;
@@ -178,9 +171,11 @@ class ClearKeyCdm : public ClearKeyCdmInterface {
scoped_ptr<CdmVideoDecoder> video_decoder_;
+ scoped_ptr<FileIOTestRunner> file_io_test_runner_;
+
DISALLOW_COPY_AND_ASSIGN(ClearKeyCdm);
};
} // namespace media
-#endif // MEDIA_CDM_PPAPI_CLEAR_KEY_CDM_H_
+#endif // MEDIA_CDM_PPAPI_EXTERNAL_CLEAR_KEY_CLEAR_KEY_CDM_H_
diff --git a/chromium/media/cdm/ppapi/clear_key_cdm_common.h b/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm_common.h
index 8f843974eca..2bbc5b1c324 100644
--- a/chromium/media/cdm/ppapi/clear_key_cdm_common.h
+++ b/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm_common.h
@@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CDM_PPAPI_CLEAR_KEY_CDM_COMMON_H_
-#define MEDIA_CDM_PPAPI_CLEAR_KEY_CDM_COMMON_H_
+#ifndef MEDIA_CDM_PPAPI_EXTERNAL_CLEAR_KEY_CLEAR_KEY_CDM_COMMON_H_
+#define MEDIA_CDM_PPAPI_EXTERNAL_CLEAR_KEY_CLEAR_KEY_CDM_COMMON_H_
#include "media/cdm/ppapi/api/content_decryption_module.h"
namespace media {
// Aliases for the version of the interfaces that this CDM implements.
-typedef cdm::ContentDecryptionModule_2 ClearKeyCdmInterface;
+typedef cdm::ContentDecryptionModule_5 ClearKeyCdmInterface;
typedef ClearKeyCdmInterface::Host ClearKeyCdmHost;
} // namespace media
-#endif // MEDIA_CDM_PPAPI_CLEAR_KEY_CDM_COMMON_H_
+#endif // MEDIA_CDM_PPAPI_EXTERNAL_CLEAR_KEY_CLEAR_KEY_CDM_COMMON_H_
diff --git a/chromium/media/cdm/ppapi/fake_cdm_video_decoder.cc b/chromium/media/cdm/ppapi/external_clear_key/fake_cdm_video_decoder.cc
index b23e5a721cb..4c0d870e6a4 100644
--- a/chromium/media/cdm/ppapi/fake_cdm_video_decoder.cc
+++ b/chromium/media/cdm/ppapi/external_clear_key/fake_cdm_video_decoder.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cdm/ppapi/fake_cdm_video_decoder.h"
+#include "media/cdm/ppapi/external_clear_key/fake_cdm_video_decoder.h"
#include "base/logging.h"
diff --git a/chromium/media/cdm/ppapi/fake_cdm_video_decoder.h b/chromium/media/cdm/ppapi/external_clear_key/fake_cdm_video_decoder.h
index 05b16ad0565..57469d15bda 100644
--- a/chromium/media/cdm/ppapi/fake_cdm_video_decoder.h
+++ b/chromium/media/cdm/ppapi/external_clear_key/fake_cdm_video_decoder.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CDM_PPAPI_FAKE_CDM_VIDEO_DECODER_H_
-#define MEDIA_CDM_PPAPI_FAKE_CDM_VIDEO_DECODER_H_
+#ifndef MEDIA_CDM_PPAPI_EXTERNAL_CLEAR_KEY_FAKE_CDM_VIDEO_DECODER_H_
+#define MEDIA_CDM_PPAPI_EXTERNAL_CLEAR_KEY_FAKE_CDM_VIDEO_DECODER_H_
#include "base/basictypes.h"
#include "base/compiler_specific.h"
#include "media/cdm/ppapi/api/content_decryption_module.h"
-#include "media/cdm/ppapi/cdm_video_decoder.h"
+#include "media/cdm/ppapi/external_clear_key/cdm_video_decoder.h"
namespace media {
@@ -38,4 +38,4 @@ class FakeCdmVideoDecoder : public CdmVideoDecoder {
} // namespace media
-#endif // MEDIA_CDM_PPAPI_FAKE_CDM_VIDEO_DECODER_H_
+#endif // MEDIA_CDM_PPAPI_EXTERNAL_CLEAR_KEY_FAKE_CDM_VIDEO_DECODER_H_
diff --git a/chromium/media/cdm/ppapi/ffmpeg_cdm_audio_decoder.cc b/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_audio_decoder.cc
index 082b35eccc6..c35b1789c29 100644
--- a/chromium/media/cdm/ppapi/ffmpeg_cdm_audio_decoder.cc
+++ b/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_audio_decoder.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cdm/ppapi/ffmpeg_cdm_audio_decoder.h"
+#include "media/cdm/ppapi/external_clear_key/ffmpeg_cdm_audio_decoder.h"
#include <algorithm>
@@ -271,7 +271,7 @@ cdm::Status FFmpegCdmAudioDecoder::DecodeBuffer(
// skipping end of stream packets since they have a size of zero.
do {
// Reset frame to default values.
- avcodec_get_frame_defaults(av_frame_.get());
+ av_frame_unref(av_frame_.get());
int frame_decoded = 0;
int result = avcodec_decode_audio4(
diff --git a/chromium/media/cdm/ppapi/ffmpeg_cdm_audio_decoder.h b/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_audio_decoder.h
index 81362d498fb..e32b227fb95 100644
--- a/chromium/media/cdm/ppapi/ffmpeg_cdm_audio_decoder.h
+++ b/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_audio_decoder.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CDM_PPAPI_FFMPEG_CDM_AUDIO_DECODER_H_
-#define MEDIA_CDM_PPAPI_FFMPEG_CDM_AUDIO_DECODER_H_
+#ifndef MEDIA_CDM_PPAPI_EXTERNAL_CLEAR_KEY_FFMPEG_CDM_AUDIO_DECODER_H_
+#define MEDIA_CDM_PPAPI_EXTERNAL_CLEAR_KEY_FFMPEG_CDM_AUDIO_DECODER_H_
#include <vector>
@@ -11,7 +11,8 @@
#include "base/compiler_specific.h"
#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
-#include "media/cdm/ppapi/clear_key_cdm_common.h"
+#include "media/cdm/ppapi/external_clear_key/clear_key_cdm_common.h"
+#include "media/ffmpeg/ffmpeg_deleters.h"
struct AVCodecContext;
struct AVFrame;
@@ -19,8 +20,6 @@ struct AVFrame;
namespace media {
class AudioBus;
class AudioTimestampHelper;
-class ScopedPtrAVFreeContext;
-class ScopedPtrAVFreeFrame;
}
namespace media {
@@ -64,8 +63,8 @@ class FFmpegCdmAudioDecoder {
ClearKeyCdmHost* const host_;
// FFmpeg structures owned by this object.
- scoped_ptr_malloc<AVCodecContext, ScopedPtrAVFreeContext> codec_context_;
- scoped_ptr_malloc<AVFrame, ScopedPtrAVFreeFrame> av_frame_;
+ scoped_ptr<AVCodecContext, ScopedPtrAVFreeContext> codec_context_;
+ scoped_ptr<AVFrame, ScopedPtrAVFreeFrame> av_frame_;
// Audio format.
int samples_per_second_;
@@ -92,4 +91,4 @@ class FFmpegCdmAudioDecoder {
} // namespace media
-#endif // MEDIA_CDM_PPAPI_FFMPEG_CDM_AUDIO_DECODER_H_
+#endif // MEDIA_CDM_PPAPI_EXTERNAL_CLEAR_KEY_FFMPEG_CDM_AUDIO_DECODER_H_
diff --git a/chromium/media/cdm/ppapi/ffmpeg_cdm_video_decoder.cc b/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc
index 7ffda24a727..942dce11e8e 100644
--- a/chromium/media/cdm/ppapi/ffmpeg_cdm_video_decoder.cc
+++ b/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cdm/ppapi/ffmpeg_cdm_video_decoder.h"
+#include "media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
@@ -51,6 +51,8 @@ static AVCodecID CdmVideoCodecToCodecID(
return AV_CODEC_ID_VP8;
case cdm::VideoDecoderConfig::kCodecH264:
return AV_CODEC_ID_H264;
+ case cdm::VideoDecoderConfig::kCodecVp9:
+ return AV_CODEC_ID_VP9;
case cdm::VideoDecoderConfig::kUnknownVideoCodec:
default:
NOTREACHED() << "Unsupported cdm::VideoCodec: " << video_codec;
@@ -61,8 +63,10 @@ static AVCodecID CdmVideoCodecToCodecID(
static int CdmVideoCodecProfileToProfileID(
cdm::VideoDecoderConfig::VideoCodecProfile profile) {
switch (profile) {
- case cdm::VideoDecoderConfig::kVp8ProfileMain:
- return FF_PROFILE_UNKNOWN; // VP8 does not define an FFmpeg profile.
+ case cdm::VideoDecoderConfig::kProfileNotNeeded:
+ // For codecs that do not need a profile (e.g. VP8/VP9), does not define
+ // an FFmpeg profile.
+ return FF_PROFILE_UNKNOWN;
case cdm::VideoDecoderConfig::kH264ProfileBaseline:
return FF_PROFILE_H264_BASELINE;
case cdm::VideoDecoderConfig::kH264ProfileMain:
@@ -222,7 +226,7 @@ cdm::Status FFmpegCdmVideoDecoder::DecodeFrame(
codec_context_->reordered_opaque = timestamp;
// Reset frame to default values.
- avcodec_get_frame_defaults(av_frame_.get());
+ av_frame_unref(av_frame_.get());
// This is for codecs not using get_buffer to initialize
// |av_frame_->reordered_opaque|
diff --git a/chromium/media/cdm/ppapi/ffmpeg_cdm_video_decoder.h b/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.h
index 9ce87e6c860..446fb283fb4 100644
--- a/chromium/media/cdm/ppapi/ffmpeg_cdm_video_decoder.h
+++ b/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.h
@@ -2,23 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CDM_PPAPI_FFMPEG_CDM_VIDEO_DECODER_H_
-#define MEDIA_CDM_PPAPI_FFMPEG_CDM_VIDEO_DECODER_H_
+#ifndef MEDIA_CDM_PPAPI_EXTERNAL_CLEAR_KEY_FFMPEG_CDM_VIDEO_DECODER_H_
+#define MEDIA_CDM_PPAPI_EXTERNAL_CLEAR_KEY_FFMPEG_CDM_VIDEO_DECODER_H_
#include "base/basictypes.h"
#include "base/compiler_specific.h"
#include "base/memory/scoped_ptr.h"
-#include "media/cdm/ppapi/cdm_video_decoder.h"
-#include "media/cdm/ppapi/clear_key_cdm_common.h"
+#include "media/cdm/ppapi/external_clear_key/cdm_video_decoder.h"
+#include "media/cdm/ppapi/external_clear_key/clear_key_cdm_common.h"
+#include "media/ffmpeg/ffmpeg_deleters.h"
struct AVCodecContext;
struct AVFrame;
namespace media {
-class ScopedPtrAVFreeContext;
-class ScopedPtrAVFreeFrame;
-
class FFmpegCdmVideoDecoder : public CdmVideoDecoder {
public:
explicit FFmpegCdmVideoDecoder(ClearKeyCdmHost* host);
@@ -47,8 +45,8 @@ class FFmpegCdmVideoDecoder : public CdmVideoDecoder {
void ReleaseFFmpegResources();
// FFmpeg structures owned by this object.
- scoped_ptr_malloc<AVCodecContext, ScopedPtrAVFreeContext> codec_context_;
- scoped_ptr_malloc<AVFrame, ScopedPtrAVFreeFrame> av_frame_;
+ scoped_ptr<AVCodecContext, ScopedPtrAVFreeContext> codec_context_;
+ scoped_ptr<AVFrame, ScopedPtrAVFreeFrame> av_frame_;
bool is_initialized_;
@@ -59,4 +57,4 @@ class FFmpegCdmVideoDecoder : public CdmVideoDecoder {
} // namespace media
-#endif // MEDIA_CDM_PPAPI_FFMPEG_CDM_VIDEO_DECODER_H_
+#endif // MEDIA_CDM_PPAPI_EXTERNAL_CLEAR_KEY_FFMPEG_CDM_VIDEO_DECODER_H_
diff --git a/chromium/media/cdm/ppapi/libvpx_cdm_video_decoder.cc b/chromium/media/cdm/ppapi/external_clear_key/libvpx_cdm_video_decoder.cc
index cb8f19e9306..b5a804389fd 100644
--- a/chromium/media/cdm/ppapi/libvpx_cdm_video_decoder.cc
+++ b/chromium/media/cdm/ppapi/external_clear_key/libvpx_cdm_video_decoder.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cdm/ppapi/libvpx_cdm_video_decoder.h"
+#include "media/cdm/ppapi/external_clear_key/libvpx_cdm_video_decoder.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
@@ -15,8 +15,8 @@
#define VPX_CODEC_DISABLE_COMPAT 1
extern "C" {
// Note: vpx_decoder.h must be first or compile will fail.
-#include "third_party/libvpx/source/libvpx/vpx/vpx_decoder.h" // NOLINT
#include "third_party/libvpx/source/libvpx/vpx/vp8dx.h"
+#include "third_party/libvpx/source/libvpx/vpx/vpx_decoder.h" // NOLINT
}
// Enable USE_COPYPLANE_WITH_LIBVPX to use |CopyPlane()| instead of memcpy to
diff --git a/chromium/media/cdm/ppapi/libvpx_cdm_video_decoder.h b/chromium/media/cdm/ppapi/external_clear_key/libvpx_cdm_video_decoder.h
index d3ad264e638..efa0e3664b3 100644
--- a/chromium/media/cdm/ppapi/libvpx_cdm_video_decoder.h
+++ b/chromium/media/cdm/ppapi/external_clear_key/libvpx_cdm_video_decoder.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CDM_PPAPI_LIBVPX_CDM_VIDEO_DECODER_H_
-#define MEDIA_CDM_PPAPI_LIBVPX_CDM_VIDEO_DECODER_H_
+#ifndef MEDIA_CDM_PPAPI_EXTERNAL_CLEAR_KEY_LIBVPX_CDM_VIDEO_DECODER_H_
+#define MEDIA_CDM_PPAPI_EXTERNAL_CLEAR_KEY_LIBVPX_CDM_VIDEO_DECODER_H_
#include "base/basictypes.h"
#include "base/compiler_specific.h"
#include "media/cdm/ppapi/api/content_decryption_module.h"
-#include "media/cdm/ppapi/cdm_video_decoder.h"
+#include "media/cdm/ppapi/external_clear_key/cdm_video_decoder.h"
struct vpx_codec_ctx;
struct vpx_image;
@@ -52,4 +52,4 @@ class LibvpxCdmVideoDecoder : public CdmVideoDecoder {
} // namespace media
-#endif // MEDIA_CDM_PPAPI_LIBVPX_CDM_VIDEO_DECODER_H_
+#endif // MEDIA_CDM_PPAPI_EXTERNAL_CLEAR_KEY_LIBVPX_CDM_VIDEO_DECODER_H_
diff --git a/chromium/media/cdm/ppapi/supported_cdm_versions.h b/chromium/media/cdm/ppapi/supported_cdm_versions.h
index 04723d8e5eb..8de7a8c2acb 100644
--- a/chromium/media/cdm/ppapi/supported_cdm_versions.h
+++ b/chromium/media/cdm/ppapi/supported_cdm_versions.h
@@ -21,13 +21,12 @@ bool IsSupportedCdmModuleVersion(int version) {
bool IsSupportedCdmInterfaceVersion(int version) {
COMPILE_ASSERT(cdm::ContentDecryptionModule::kVersion ==
- cdm::ContentDecryptionModule_3::kVersion,
+ cdm::ContentDecryptionModule_5::kVersion,
update_code_below);
switch(version) {
// Supported versions in decreasing order.
- case cdm::ContentDecryptionModule_3::kVersion:
- case cdm::ContentDecryptionModule_2::kVersion:
- case cdm::ContentDecryptionModule_1::kVersion:
+ case cdm::ContentDecryptionModule_5::kVersion:
+ case cdm::ContentDecryptionModule_4::kVersion:
return true;
default:
return false;
@@ -36,13 +35,12 @@ bool IsSupportedCdmInterfaceVersion(int version) {
bool IsSupportedCdmHostVersion(int version) {
COMPILE_ASSERT(cdm::ContentDecryptionModule::Host::kVersion ==
- cdm::ContentDecryptionModule_3::Host::kVersion,
+ cdm::ContentDecryptionModule_5::Host::kVersion,
update_code_below);
switch(version) {
// Supported versions in decreasing order.
- case cdm::Host_3::kVersion:
- case cdm::Host_2::kVersion:
- case cdm::Host_1::kVersion:
+ case cdm::Host_5::kVersion:
+ case cdm::Host_4::kVersion:
return true;
default:
return false;
diff --git a/chromium/media/ffmpeg/ffmpeg_common.cc b/chromium/media/ffmpeg/ffmpeg_common.cc
index 6e7bd155cd8..d87aa820830 100644
--- a/chromium/media/ffmpeg/ffmpeg_common.cc
+++ b/chromium/media/ffmpeg/ffmpeg_common.cc
@@ -8,6 +8,7 @@
#include "base/logging.h"
#include "base/metrics/histogram.h"
#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
#include "media/base/decoder_buffer.h"
#include "media/base/video_frame.h"
#include "media/base/video_util.h"
@@ -269,7 +270,7 @@ static AVSampleFormat SampleFormatToAVSampleFormat(SampleFormat sample_format) {
return AV_SAMPLE_FMT_NONE;
}
-static void AVCodecContextToAudioDecoderConfig(
+void AVCodecContextToAudioDecoderConfig(
const AVCodecContext* codec_context,
bool is_encrypted,
AudioDecoderConfig* config,
@@ -297,12 +298,6 @@ static void AVCodecContextToAudioDecoderConfig(
codec_context->seek_preroll * 1000000.0 / codec_context->sample_rate);
}
- base::TimeDelta codec_delay;
- if (codec_context->delay > 0) {
- codec_delay = base::TimeDelta::FromMicroseconds(
- codec_context->delay * 1000000.0 / codec_context->sample_rate);
- }
-
config->Initialize(codec,
sample_format,
channel_layout,
@@ -312,7 +307,7 @@ static void AVCodecContextToAudioDecoderConfig(
is_encrypted,
record_stats,
seek_preroll,
- codec_delay);
+ codec_context->delay);
if (codec != kCodecOpus) {
DCHECK_EQ(av_get_bytes_per_sample(codec_context->sample_fmt) * 8,
config->bits_per_channel());
@@ -389,9 +384,12 @@ void AVStreamToVideoDecoderConfig(
visible_rect.size(), aspect_ratio.num, aspect_ratio.den);
if (record_stats) {
+ // Note the PRESUBMIT_IGNORE_UMA_MAX below, this silences the PRESUBMIT.py
+ // check for uma enum max usage, since we're abusing
+ // UMA_HISTOGRAM_ENUMERATION to report a discrete value.
UMA_HISTOGRAM_ENUMERATION("Media.VideoColorRange",
stream->codec->color_range,
- AVCOL_RANGE_NB);
+ AVCOL_RANGE_NB); // PRESUBMIT_IGNORE_UMA_MAX
}
VideoFrame::Format format = PixelFormatToVideoFormat(stream->codec->pix_fmt);
@@ -401,6 +399,13 @@ void AVStreamToVideoDecoderConfig(
coded_size = natural_size;
}
+ // Pad out |coded_size| for subsampled YUV formats.
+ if (format != VideoFrame::YV24) {
+ coded_size.set_width((coded_size.width() + 1) / 2 * 2);
+ if (format != VideoFrame::YV16)
+ coded_size.set_height((coded_size.height() + 1) / 2 * 2);
+ }
+
bool is_encrypted = false;
AVDictionaryEntry* key = av_dict_get(stream->metadata, "enc_key_id", NULL, 0);
if (key)
@@ -514,6 +519,8 @@ VideoFrame::Format PixelFormatToVideoFormat(PixelFormat pixel_format) {
switch (pixel_format) {
case PIX_FMT_YUV422P:
return VideoFrame::YV16;
+ case PIX_FMT_YUV444P:
+ return VideoFrame::YV24;
case PIX_FMT_YUV420P:
return VideoFrame::YV12;
case PIX_FMT_YUVJ420P:
@@ -536,10 +543,45 @@ PixelFormat VideoFormatToPixelFormat(VideoFrame::Format video_format) {
return PIX_FMT_YUVJ420P;
case VideoFrame::YV12A:
return PIX_FMT_YUVA420P;
+ case VideoFrame::YV24:
+ return PIX_FMT_YUV444P;
default:
DVLOG(1) << "Unsupported VideoFrame::Format: " << video_format;
}
return PIX_FMT_NONE;
}
+bool FFmpegUTCDateToTime(const char* date_utc,
+ base::Time* out) {
+ DCHECK(date_utc);
+ DCHECK(out);
+
+ std::vector<std::string> fields;
+ std::vector<std::string> date_fields;
+ std::vector<std::string> time_fields;
+ base::Time::Exploded exploded;
+ exploded.millisecond = 0;
+
+ // TODO(acolwell): Update this parsing code when FFmpeg returns sub-second
+ // information.
+ if ((Tokenize(date_utc, " ", &fields) == 2) &&
+ (Tokenize(fields[0], "-", &date_fields) == 3) &&
+ (Tokenize(fields[1], ":", &time_fields) == 3) &&
+ base::StringToInt(date_fields[0], &exploded.year) &&
+ base::StringToInt(date_fields[1], &exploded.month) &&
+ base::StringToInt(date_fields[2], &exploded.day_of_month) &&
+ base::StringToInt(time_fields[0], &exploded.hour) &&
+ base::StringToInt(time_fields[1], &exploded.minute) &&
+ base::StringToInt(time_fields[2], &exploded.second)) {
+ base::Time parsed_time = base::Time::FromUTCExploded(exploded);
+ if (parsed_time.is_null())
+ return false;
+
+ *out = parsed_time;
+ return true;
+ }
+
+ return false;
+}
+
} // namespace media
diff --git a/chromium/media/ffmpeg/ffmpeg_common.h b/chromium/media/ffmpeg/ffmpeg_common.h
index 9a98c85aa79..ef1a7b6a762 100644
--- a/chromium/media/ffmpeg/ffmpeg_common.h
+++ b/chromium/media/ffmpeg/ffmpeg_common.h
@@ -15,9 +15,18 @@
#include "media/base/media_export.h"
#include "media/base/video_decoder_config.h"
#include "media/base/video_frame.h"
+#include "media/ffmpeg/ffmpeg_deleters.h"
// Include FFmpeg header files.
extern "C" {
+// Disable deprecated features which result in spammy compile warnings. This
+// list of defines must mirror those in the 'defines' section of the ffmpeg.gyp
+// file or the headers below will generate different structures.
+#define FF_API_PIX_FMT_DESC 0
+#define FF_API_OLD_DECODE_AUDIO 0
+#define FF_API_DESTRUCT_PACKET 0
+#define FF_API_GET_BUFFER 0
+
// Temporarily disable possible loss of data warning.
// TODO(scherkus): fix and upstream the compiler warnings.
MSVC_PUSH_DISABLE_WARNING(4244);
@@ -37,48 +46,31 @@ namespace media {
class AudioDecoderConfig;
class VideoDecoderConfig;
-// Wraps FFmpeg's av_free() in a class that can be passed as a template argument
-// to scoped_ptr_malloc.
-class ScopedPtrAVFree {
- public:
- inline void operator()(void* x) const {
- av_free(x);
- }
-};
-
-// This assumes that the AVPacket being captured was allocated outside of
-// FFmpeg via the new operator. Do not use this with AVPacket instances that
-// are allocated via malloc() or av_malloc().
-class ScopedPtrAVFreePacket {
- public:
- inline void operator()(void* x) const {
- AVPacket* packet = static_cast<AVPacket*>(x);
- av_free_packet(packet);
- delete packet;
- }
-};
-
-// Frees an AVCodecContext object in a class that can be passed as a Deleter
-// argument to scoped_ptr_malloc.
-class ScopedPtrAVFreeContext {
- public:
- inline void operator()(void* x) const {
- AVCodecContext* codec_context = static_cast<AVCodecContext*>(x);
- av_free(codec_context->extradata);
- avcodec_close(codec_context);
- av_free(codec_context);
- }
-};
-
-// Frees an AVFrame object in a class that can be passed as a Deleter argument
-// to scoped_ptr_malloc.
-class ScopedPtrAVFreeFrame {
- public:
- inline void operator()(void* x) const {
- AVFrame* frame = static_cast<AVFrame*>(x);
- avcodec_free_frame(&frame);
- }
-};
+// The following implement the deleters declared in ffmpeg_deleters.h (which
+// contains the declarations needed for use with |scoped_ptr| without #include
+// "pollution").
+
+inline void ScopedPtrAVFree::operator()(void* x) const {
+ av_free(x);
+}
+
+inline void ScopedPtrAVFreePacket::operator()(void* x) const {
+ AVPacket* packet = static_cast<AVPacket*>(x);
+ av_free_packet(packet);
+ delete packet;
+}
+
+inline void ScopedPtrAVFreeContext::operator()(void* x) const {
+ AVCodecContext* codec_context = static_cast<AVCodecContext*>(x);
+ av_free(codec_context->extradata);
+ avcodec_close(codec_context);
+ av_free(codec_context);
+}
+
+inline void ScopedPtrAVFreeFrame::operator()(void* x) const {
+ AVFrame* frame = static_cast<AVFrame*>(x);
+ av_frame_free(&frame);
+}
// Converts an int64 timestamp in |time_base| units to a base::TimeDelta.
// For example if |timestamp| equals 11025 and |time_base| equals {1, 44100}
@@ -110,22 +102,34 @@ void VideoDecoderConfigToAVCodecContext(
const VideoDecoderConfig& config,
AVCodecContext* codec_context);
+MEDIA_EXPORT void AVCodecContextToAudioDecoderConfig(
+ const AVCodecContext* codec_context,
+ bool is_encrypted,
+ AudioDecoderConfig* config,
+ bool record_stats);
+
// Converts FFmpeg's channel layout to chrome's ChannelLayout. |channels| can
// be used when FFmpeg's channel layout is not informative in order to make a
// good guess about the plausible channel layout based on number of channels.
-ChannelLayout ChannelLayoutToChromeChannelLayout(int64_t layout,
- int channels);
+MEDIA_EXPORT ChannelLayout ChannelLayoutToChromeChannelLayout(int64_t layout,
+ int channels);
// Converts FFmpeg's audio sample format to Chrome's SampleFormat.
MEDIA_EXPORT SampleFormat
AVSampleFormatToSampleFormat(AVSampleFormat sample_format);
// Converts FFmpeg's pixel formats to its corresponding supported video format.
-VideoFrame::Format PixelFormatToVideoFormat(PixelFormat pixel_format);
+MEDIA_EXPORT VideoFrame::Format PixelFormatToVideoFormat(
+ PixelFormat pixel_format);
// Converts video formats to its corresponding FFmpeg's pixel formats.
PixelFormat VideoFormatToPixelFormat(VideoFrame::Format video_format);
+// Convert FFmpeg UTC representation (YYYY-MM-DD HH:MM:SS) to base::Time.
+// Returns true and sets |*out| if |date_utc| contains a valid
+// date string. Otherwise returns fals and timeline_offset is unmodified.
+MEDIA_EXPORT bool FFmpegUTCDateToTime(const char* date_utc, base::Time* out);
+
} // namespace media
#endif // MEDIA_FFMPEG_FFMPEG_COMMON_H_
diff --git a/chromium/media/ffmpeg/ffmpeg_common_unittest.cc b/chromium/media/ffmpeg/ffmpeg_common_unittest.cc
index 2fa61ace486..31397df7fac 100644
--- a/chromium/media/ffmpeg/ffmpeg_common_unittest.cc
+++ b/chromium/media/ffmpeg/ffmpeg_common_unittest.cc
@@ -97,4 +97,58 @@ TEST_F(FFmpegCommonTest, VerifyFormatSizes) {
}
}
+TEST_F(FFmpegCommonTest, UTCDateToTime_Valid) {
+ base::Time result;
+ EXPECT_TRUE(FFmpegUTCDateToTime("2012-11-10 12:34:56", &result));
+
+ base::Time::Exploded exploded;
+ result.UTCExplode(&exploded);
+ EXPECT_TRUE(exploded.HasValidValues());
+ EXPECT_EQ(2012, exploded.year);
+ EXPECT_EQ(11, exploded.month);
+ EXPECT_EQ(6, exploded.day_of_week);
+ EXPECT_EQ(10, exploded.day_of_month);
+ EXPECT_EQ(12, exploded.hour);
+ EXPECT_EQ(34, exploded.minute);
+ EXPECT_EQ(56, exploded.second);
+ EXPECT_EQ(0, exploded.millisecond);
+}
+
+TEST_F(FFmpegCommonTest, UTCDateToTime_Invalid) {
+ const char* invalid_date_strings[] = {
+ "",
+ "2012-11-10",
+ "12:34:56",
+ "-- ::",
+ "2012-11-10 12:34:",
+ "2012-11-10 12::56",
+ "2012-11-10 :34:56",
+ "2012-11- 12:34:56",
+ "2012--10 12:34:56",
+ "-11-10 12:34:56",
+ "2012-11 12:34:56",
+ "2012-11-10-12 12:34:56",
+ "2012-11-10 12:34",
+ "2012-11-10 12:34:56:78",
+ "ABCD-11-10 12:34:56",
+ "2012-EF-10 12:34:56",
+ "2012-11-GH 12:34:56",
+ "2012-11-10 IJ:34:56",
+ "2012-11-10 12:JL:56",
+ "2012-11-10 12:34:MN",
+ "2012-11-10 12:34:56.123",
+ "2012-11-1012:34:56",
+ "2012-11-10 12:34:56 UTC",
+ };
+
+ for (size_t i = 0; i < arraysize(invalid_date_strings); ++i) {
+ const char* date_string = invalid_date_strings[i];
+ base::Time result;
+ EXPECT_FALSE(FFmpegUTCDateToTime(date_string, &result))
+ << "date_string '" << date_string << "'";
+ EXPECT_TRUE(result.is_null());
+ }
+}
+
+
} // namespace media
diff --git a/chromium/media/ffmpeg/ffmpeg_deleters.h b/chromium/media/ffmpeg/ffmpeg_deleters.h
new file mode 100644
index 00000000000..3429e05c7a6
--- /dev/null
+++ b/chromium/media/ffmpeg/ffmpeg_deleters.h
@@ -0,0 +1,42 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains declarations for deleters for use with |scoped_ptr|. To
+// avoid requiring additional #includes, the (inline) definitions are in
+// ffmpeg_common.h. (Forward declarations of deleters aren't sufficient for
+// |scoped_ptr|.)
+
+#ifndef MEDIA_FFMPEG_FFMPEG_DELETERS_H_
+#define MEDIA_FFMPEG_FFMPEG_DELETERS_H_
+
+namespace media {
+
+// Wraps FFmpeg's av_free() in a class that can be passed as a template argument
+// to scoped_ptr_malloc.
+struct ScopedPtrAVFree {
+ void operator()(void* x) const;
+};
+
+// This assumes that the AVPacket being captured was allocated outside of
+// FFmpeg via the new operator. Do not use this with AVPacket instances that
+// are allocated via malloc() or av_malloc().
+struct ScopedPtrAVFreePacket {
+ void operator()(void* x) const;
+};
+
+// Frees an AVCodecContext object in a class that can be passed as a Deleter
+// argument to scoped_ptr_malloc.
+struct ScopedPtrAVFreeContext {
+ void operator()(void* x) const;
+};
+
+// Frees an AVFrame object in a class that can be passed as a Deleter argument
+// to scoped_ptr_malloc.
+struct ScopedPtrAVFreeFrame {
+ void operator()(void* x) const;
+};
+
+} // namespace media
+
+#endif // MEDIA_FFMPEG_FFMPEG_DELETERS_H_
diff --git a/chromium/media/ffmpeg/ffmpeg_regression_tests.cc b/chromium/media/ffmpeg/ffmpeg_regression_tests.cc
index 0b68fd0896c..311a28e103b 100644
--- a/chromium/media/ffmpeg/ffmpeg_regression_tests.cc
+++ b/chromium/media/ffmpeg/ffmpeg_regression_tests.cc
@@ -323,6 +323,7 @@ FLAKY_FFMPEG_TEST_CASE(BIG_MEM_4, "security/looping4.mov");
FLAKY_FFMPEG_TEST_CASE(Cr99652, "security/99652.webm");
FLAKY_FFMPEG_TEST_CASE(Cr100464, "security/100464.webm");
FLAKY_FFMPEG_TEST_CASE(Cr111342, "security/111342.ogm");
+FLAKY_FFMPEG_TEST_CASE(Cr368980, "security/368980.mp4");
FLAKY_FFMPEG_TEST_CASE(OGV_0, "security/big_dims.ogv");
FLAKY_FFMPEG_TEST_CASE(OGV_3, "security/smclock_1_0.ogv");
FLAKY_FFMPEG_TEST_CASE(OGV_4, "security/smclock.ogv.1.0.ogv");
diff --git a/chromium/media/ffmpeg/ffmpeg_unittest.cc b/chromium/media/ffmpeg/ffmpeg_unittest.cc
index 9f24845dc98..dbc28c55a93 100644
--- a/chromium/media/ffmpeg/ffmpeg_unittest.cc
+++ b/chromium/media/ffmpeg/ffmpeg_unittest.cc
@@ -233,7 +233,7 @@ class FFmpegTest : public testing::TestWithParam<const char*> {
memcpy(&packet, audio_packets_.peek(), sizeof(packet));
}
- avcodec_get_frame_defaults(audio_buffer_.get());
+ av_frame_unref(audio_buffer_.get());
result = avcodec_decode_audio4(av_audio_context(), audio_buffer_.get(),
&got_audio, &packet);
if (!audio_packets_.empty()) {
@@ -287,7 +287,7 @@ class FFmpegTest : public testing::TestWithParam<const char*> {
memcpy(&packet, video_packets_.peek(), sizeof(packet));
}
- avcodec_get_frame_defaults(video_buffer_.get());
+ av_frame_unref(video_buffer_.get());
av_video_context()->reordered_opaque = packet.pts;
result = avcodec_decode_video2(av_video_context(), video_buffer_.get(),
&got_picture, &packet);
@@ -382,8 +382,8 @@ class FFmpegTest : public testing::TestWithParam<const char*> {
AVPacketQueue audio_packets_;
AVPacketQueue video_packets_;
- scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFreeFrame> audio_buffer_;
- scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFreeFrame> video_buffer_;
+ scoped_ptr<AVFrame, media::ScopedPtrAVFreeFrame> audio_buffer_;
+ scoped_ptr<AVFrame, media::ScopedPtrAVFreeFrame> video_buffer_;
int64 decoded_audio_time_;
int64 decoded_audio_duration_;
diff --git a/chromium/media/filters/audio_clock.cc b/chromium/media/filters/audio_clock.cc
new file mode 100644
index 00000000000..0454e85e8f5
--- /dev/null
+++ b/chromium/media/filters/audio_clock.cc
@@ -0,0 +1,135 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/audio_clock.h"
+
+#include "base/logging.h"
+#include "media/base/buffers.h"
+
+namespace media {
+
+AudioClock::AudioClock(int sample_rate)
+ : sample_rate_(sample_rate), last_endpoint_timestamp_(kNoTimestamp()) {
+}
+
+AudioClock::~AudioClock() {
+}
+
+void AudioClock::WroteAudio(int frames,
+ int delay_frames,
+ float playback_rate,
+ base::TimeDelta timestamp) {
+ CHECK_GT(playback_rate, 0);
+ CHECK(timestamp != kNoTimestamp());
+ DCHECK_GE(frames, 0);
+ DCHECK_GE(delay_frames, 0);
+
+ if (last_endpoint_timestamp_ == kNoTimestamp())
+ PushBufferedAudio(delay_frames, 0, kNoTimestamp());
+
+ TrimBufferedAudioToMatchDelay(delay_frames);
+ PushBufferedAudio(frames, playback_rate, timestamp);
+
+ last_endpoint_timestamp_ = timestamp;
+}
+
+void AudioClock::WroteSilence(int frames, int delay_frames) {
+ DCHECK_GE(frames, 0);
+ DCHECK_GE(delay_frames, 0);
+
+ if (last_endpoint_timestamp_ == kNoTimestamp())
+ PushBufferedAudio(delay_frames, 0, kNoTimestamp());
+
+ TrimBufferedAudioToMatchDelay(delay_frames);
+ PushBufferedAudio(frames, 0, kNoTimestamp());
+}
+
+base::TimeDelta AudioClock::CurrentMediaTimestamp() const {
+ int silence_frames = 0;
+ for (size_t i = 0; i < buffered_audio_.size(); ++i) {
+ // Account for silence ahead of the buffer closest to being played.
+ if (buffered_audio_[i].playback_rate == 0) {
+ silence_frames += buffered_audio_[i].frames;
+ continue;
+ }
+
+ // Multiply by playback rate as frames represent time-scaled audio.
+ return buffered_audio_[i].endpoint_timestamp -
+ base::TimeDelta::FromMicroseconds(
+ ((buffered_audio_[i].frames * buffered_audio_[i].playback_rate) +
+ silence_frames) /
+ sample_rate_ * base::Time::kMicrosecondsPerSecond);
+ }
+
+ // Either:
+ // 1) AudioClock is uninitialziated and we'll return kNoTimestamp()
+ // 2) All previously buffered audio has been replaced by silence,
+ // meaning media time is now at the last endpoint
+ return last_endpoint_timestamp_;
+}
+
+void AudioClock::TrimBufferedAudioToMatchDelay(int delay_frames) {
+ if (buffered_audio_.empty())
+ return;
+
+ size_t i = buffered_audio_.size() - 1;
+ while (true) {
+ if (buffered_audio_[i].frames <= delay_frames) {
+ // Reached the end before accounting for all of |delay_frames|. This
+ // means we haven't written enough audio data yet to account for hardware
+ // delay. In this case, do nothing.
+ if (i == 0)
+ return;
+
+ // Keep accounting for |delay_frames|.
+ delay_frames -= buffered_audio_[i].frames;
+ --i;
+ continue;
+ }
+
+ // All of |delay_frames| has been accounted for: adjust amount of frames
+ // left in current buffer. All preceeding elements with index < |i| should
+ // be considered played out and hence discarded.
+ buffered_audio_[i].frames = delay_frames;
+ break;
+ }
+
+ // At this point |i| points at what will be the new head of |buffered_audio_|
+ // however if it contains no audio it should be removed as well.
+ if (buffered_audio_[i].frames == 0)
+ ++i;
+
+ buffered_audio_.erase(buffered_audio_.begin(), buffered_audio_.begin() + i);
+}
+
+void AudioClock::PushBufferedAudio(int frames,
+ float playback_rate,
+ base::TimeDelta endpoint_timestamp) {
+ if (playback_rate == 0)
+ DCHECK(endpoint_timestamp == kNoTimestamp());
+
+ if (frames == 0)
+ return;
+
+ // Avoid creating extra elements where possible.
+ if (!buffered_audio_.empty() &&
+ buffered_audio_.back().playback_rate == playback_rate) {
+ buffered_audio_.back().frames += frames;
+ buffered_audio_.back().endpoint_timestamp = endpoint_timestamp;
+ return;
+ }
+
+ buffered_audio_.push_back(
+ BufferedAudio(frames, playback_rate, endpoint_timestamp));
+}
+
+AudioClock::BufferedAudio::BufferedAudio(int frames,
+ float playback_rate,
+ base::TimeDelta endpoint_timestamp)
+ : frames(frames),
+ playback_rate(playback_rate),
+ endpoint_timestamp(endpoint_timestamp) {
+}
+
+} // namespace media
diff --git a/chromium/media/filters/audio_clock.h b/chromium/media/filters/audio_clock.h
new file mode 100644
index 00000000000..a0d8212f948
--- /dev/null
+++ b/chromium/media/filters/audio_clock.h
@@ -0,0 +1,76 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_AUDIO_CLOCK_H_
+#define MEDIA_FILTERS_AUDIO_CLOCK_H_
+
+#include <deque>
+
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Models a queue of buffered audio in a playback pipeline for use with
+// estimating the amount of delay in wall clock time. Takes changes in playback
+// rate into account to handle scenarios where multiple rates may be present in
+// a playback pipeline with large delay.
+class MEDIA_EXPORT AudioClock {
+ public:
+ explicit AudioClock(int sample_rate);
+ ~AudioClock();
+
+ // |frames| amount of audio data scaled to |playback_rate| was written.
+ // |delay_frames| is the current amount of hardware delay.
+ // |timestamp| is the endpoint media timestamp of the audio data written.
+ void WroteAudio(int frames,
+ int delay_frames,
+ float playback_rate,
+ base::TimeDelta timestamp);
+
+ // |frames| amount of silence was written.
+ // |delay_frames| is the current amount of hardware delay.
+ void WroteSilence(int frames, int delay_frames);
+
+ // Calculates the current media timestamp taking silence and changes in
+ // playback rate into account.
+ base::TimeDelta CurrentMediaTimestamp() const;
+
+ // Returns the last endpoint timestamp provided to WroteAudio().
+ base::TimeDelta last_endpoint_timestamp() const {
+ return last_endpoint_timestamp_;
+ }
+
+ private:
+ void TrimBufferedAudioToMatchDelay(int delay_frames);
+ void PushBufferedAudio(int frames,
+ float playback_rate,
+ base::TimeDelta endpoint_timestamp);
+
+ const int sample_rate_;
+
+ // Initially set to kNoTimestamp(), otherwise is the last endpoint timestamp
+ // delivered to WroteAudio(). A copy is kept outside of |buffered_audio_| to
+ // handle the case where all of |buffered_audio_| has been replaced with
+ // silence.
+ base::TimeDelta last_endpoint_timestamp_;
+
+ struct BufferedAudio {
+ BufferedAudio(int frames,
+ float playback_rate,
+ base::TimeDelta endpoint_timestamp);
+
+ int frames;
+ float playback_rate;
+ base::TimeDelta endpoint_timestamp;
+ };
+
+ std::deque<BufferedAudio> buffered_audio_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioClock);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_AUDIO_CLOCK_H_
diff --git a/chromium/media/filters/audio_clock_unittest.cc b/chromium/media/filters/audio_clock_unittest.cc
new file mode 100644
index 00000000000..a924a24e62e
--- /dev/null
+++ b/chromium/media/filters/audio_clock_unittest.cc
@@ -0,0 +1,177 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_timestamp_helper.h"
+#include "media/base/buffers.h"
+#include "media/filters/audio_clock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class AudioClockTest : public testing::Test {
+ public:
+ AudioClockTest()
+ : sample_rate_(10),
+ timestamp_helper_(sample_rate_),
+ clock_(sample_rate_) {
+ timestamp_helper_.SetBaseTimestamp(base::TimeDelta());
+ }
+
+ virtual ~AudioClockTest() {}
+
+ void WroteAudio(int frames, int delay_frames, float playback_rate) {
+ timestamp_helper_.AddFrames(static_cast<int>(frames * playback_rate));
+ clock_.WroteAudio(
+ frames, delay_frames, playback_rate, timestamp_helper_.GetTimestamp());
+ }
+
+ void WroteSilence(int frames, int delay_frames) {
+ clock_.WroteSilence(frames, delay_frames);
+ }
+
+ int CurrentMediaTimestampInMilliseconds() {
+ return clock_.CurrentMediaTimestamp().InMilliseconds();
+ }
+
+ int LastEndpointTimestampInMilliseconds() {
+ return clock_.last_endpoint_timestamp().InMilliseconds();
+ }
+
+ const int sample_rate_;
+ AudioTimestampHelper timestamp_helper_;
+ AudioClock clock_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AudioClockTest);
+};
+
+TEST_F(AudioClockTest, TimestampsStartAtNoTimestamp) {
+ EXPECT_EQ(kNoTimestamp(), clock_.CurrentMediaTimestamp());
+ EXPECT_EQ(kNoTimestamp(), clock_.last_endpoint_timestamp());
+}
+
+TEST_F(AudioClockTest, Playback) {
+ // The first time we write data we should expect a negative time matching the
+ // current delay.
+ WroteAudio(10, 20, 1.0);
+ EXPECT_EQ(-2000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(1000, LastEndpointTimestampInMilliseconds());
+
+ // The media time should keep advancing as we write data.
+ WroteAudio(10, 20, 1.0);
+ EXPECT_EQ(-1000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(2000, LastEndpointTimestampInMilliseconds());
+
+ WroteAudio(10, 20, 1.0);
+ EXPECT_EQ(0, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(3000, LastEndpointTimestampInMilliseconds());
+
+ WroteAudio(10, 20, 1.0);
+ EXPECT_EQ(1000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(4000, LastEndpointTimestampInMilliseconds());
+
+ // Introduce a rate change to slow down time. Current time will keep advancing
+ // by one second until it hits the slowed down audio.
+ WroteAudio(10, 20, 0.5);
+ EXPECT_EQ(2000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(4500, LastEndpointTimestampInMilliseconds());
+
+ WroteAudio(10, 20, 0.5);
+ EXPECT_EQ(3000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(5000, LastEndpointTimestampInMilliseconds());
+
+ WroteAudio(10, 20, 0.5);
+ EXPECT_EQ(4000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(5500, LastEndpointTimestampInMilliseconds());
+
+ WroteAudio(10, 20, 0.5);
+ EXPECT_EQ(4500, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(6000, LastEndpointTimestampInMilliseconds());
+
+ // Introduce a rate change to speed up time. Current time will keep advancing
+ // by half a second until it hits the the sped up audio.
+ WroteAudio(10, 20, 2);
+ EXPECT_EQ(5000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(8000, LastEndpointTimestampInMilliseconds());
+
+ WroteAudio(10, 20, 2);
+ EXPECT_EQ(5500, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(10000, LastEndpointTimestampInMilliseconds());
+
+ WroteAudio(10, 20, 2);
+ EXPECT_EQ(6000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(12000, LastEndpointTimestampInMilliseconds());
+
+ WroteAudio(10, 20, 2);
+ EXPECT_EQ(8000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(14000, LastEndpointTimestampInMilliseconds());
+
+ // Write silence to simulate reaching end of stream.
+ WroteSilence(10, 20);
+ EXPECT_EQ(10000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(14000, LastEndpointTimestampInMilliseconds());
+
+ WroteSilence(10, 20);
+ EXPECT_EQ(12000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(14000, LastEndpointTimestampInMilliseconds());
+
+ WroteSilence(10, 20);
+ EXPECT_EQ(14000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(14000, LastEndpointTimestampInMilliseconds());
+
+ // At this point media time should stop increasing.
+ WroteSilence(10, 20);
+ EXPECT_EQ(14000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(14000, LastEndpointTimestampInMilliseconds());
+}
+
+TEST_F(AudioClockTest, AlternatingAudioAndSilence) {
+ // Buffer #1: [0, 1000)
+ WroteAudio(10, 20, 1.0);
+ EXPECT_EQ(-2000, CurrentMediaTimestampInMilliseconds());
+
+ // Buffer #2: 1000ms of silence
+ WroteSilence(10, 20);
+ EXPECT_EQ(-1000, CurrentMediaTimestampInMilliseconds());
+
+ // Buffer #3: [1000, 2000), buffer #1 is at front
+ WroteAudio(10, 20, 1.0);
+ EXPECT_EQ(0, CurrentMediaTimestampInMilliseconds());
+
+ // Buffer #4: 1000ms of silence, time shouldn't advance
+ WroteSilence(10, 20);
+ EXPECT_EQ(0, CurrentMediaTimestampInMilliseconds());
+
+ // Buffer #5: [2000, 3000), buffer #3 is at front
+ WroteAudio(10, 20, 1.0);
+ EXPECT_EQ(1000, CurrentMediaTimestampInMilliseconds());
+}
+
+TEST_F(AudioClockTest, ZeroDelay) {
+ // The first time we write data we should expect the first timestamp
+ // immediately.
+ WroteAudio(10, 0, 1.0);
+ EXPECT_EQ(0, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(1000, LastEndpointTimestampInMilliseconds());
+
+ // Ditto for all subsequent buffers.
+ WroteAudio(10, 0, 1.0);
+ EXPECT_EQ(1000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(2000, LastEndpointTimestampInMilliseconds());
+
+ WroteAudio(10, 0, 1.0);
+ EXPECT_EQ(2000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(3000, LastEndpointTimestampInMilliseconds());
+
+ // Ditto for silence.
+ WroteSilence(10, 0);
+ EXPECT_EQ(3000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(3000, LastEndpointTimestampInMilliseconds());
+
+ WroteSilence(10, 0);
+ EXPECT_EQ(3000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(3000, LastEndpointTimestampInMilliseconds());
+}
+
+} // namespace media
diff --git a/chromium/media/filters/audio_decoder_selector.cc b/chromium/media/filters/audio_decoder_selector.cc
deleted file mode 100644
index a08d3c79c3c..00000000000
--- a/chromium/media/filters/audio_decoder_selector.cc
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/filters/audio_decoder_selector.h"
-
-#include "base/bind.h"
-#include "base/callback_helpers.h"
-#include "base/logging.h"
-#include "base/message_loop/message_loop_proxy.h"
-#include "media/base/audio_decoder_config.h"
-#include "media/base/bind_to_loop.h"
-#include "media/base/demuxer_stream.h"
-#include "media/base/pipeline.h"
-#include "media/filters/decrypting_audio_decoder.h"
-#include "media/filters/decrypting_demuxer_stream.h"
-
-namespace media {
-
-AudioDecoderSelector::AudioDecoderSelector(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
- ScopedVector<AudioDecoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb)
- : message_loop_(message_loop),
- decoders_(decoders.Pass()),
- set_decryptor_ready_cb_(set_decryptor_ready_cb),
- input_stream_(NULL),
- weak_ptr_factory_(this) {
-}
-
-AudioDecoderSelector::~AudioDecoderSelector() {
- DVLOG(2) << __FUNCTION__;
-}
-
-void AudioDecoderSelector::SelectAudioDecoder(
- DemuxerStream* stream,
- const StatisticsCB& statistics_cb,
- const SelectDecoderCB& select_decoder_cb) {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(stream);
-
- // Make sure |select_decoder_cb| runs on a different execution stack.
- select_decoder_cb_ = BindToCurrentLoop(select_decoder_cb);
-
- const AudioDecoderConfig& config = stream->audio_decoder_config();
- if (!config.IsValidConfig()) {
- DLOG(ERROR) << "Invalid audio stream config.";
- ReturnNullDecoder();
- return;
- }
-
- input_stream_ = stream;
- statistics_cb_ = statistics_cb;
-
- if (!config.is_encrypted()) {
- InitializeDecoder();
- return;
- }
-
- // This could happen if Encrypted Media Extension (EME) is not enabled.
- if (set_decryptor_ready_cb_.is_null()) {
- ReturnNullDecoder();
- return;
- }
-
- audio_decoder_.reset(new DecryptingAudioDecoder(
- message_loop_, set_decryptor_ready_cb_));
-
- audio_decoder_->Initialize(
- input_stream_,
- base::Bind(&AudioDecoderSelector::DecryptingAudioDecoderInitDone,
- weak_ptr_factory_.GetWeakPtr()),
- statistics_cb_);
-}
-
-void AudioDecoderSelector::Abort() {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- // This could happen when SelectAudioDecoder() was not called or when
- // |select_decoder_cb_| was already posted but not fired (e.g. in the
- // message loop queue).
- if (select_decoder_cb_.is_null())
- return;
-
- // We must be trying to initialize the |audio_decoder_| or the
- // |decrypted_stream_|. Invalid all weak pointers so that all initialization
- // callbacks won't fire.
- weak_ptr_factory_.InvalidateWeakPtrs();
-
- if (audio_decoder_) {
- // AudioDecoder doesn't provide a Stop() method. Also, |decrypted_stream_|
- // is either NULL or already initialized. We don't need to Stop()
- // |decrypted_stream_| in either case.
- ReturnNullDecoder();
- return;
- }
-
- if (decrypted_stream_) {
- decrypted_stream_->Stop(
- base::Bind(&AudioDecoderSelector::ReturnNullDecoder,
- weak_ptr_factory_.GetWeakPtr()));
- return;
- }
-
- NOTREACHED();
-}
-
-void AudioDecoderSelector::DecryptingAudioDecoderInitDone(
- PipelineStatus status) {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- if (status == PIPELINE_OK) {
- base::ResetAndReturn(&select_decoder_cb_).Run(
- audio_decoder_.Pass(), scoped_ptr<DecryptingDemuxerStream>());
- return;
- }
-
- audio_decoder_.reset();
-
- decrypted_stream_.reset(new DecryptingDemuxerStream(
- message_loop_, set_decryptor_ready_cb_));
-
- decrypted_stream_->Initialize(
- input_stream_,
- base::Bind(&AudioDecoderSelector::DecryptingDemuxerStreamInitDone,
- weak_ptr_factory_.GetWeakPtr()));
-}
-
-void AudioDecoderSelector::DecryptingDemuxerStreamInitDone(
- PipelineStatus status) {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- if (status != PIPELINE_OK) {
- ReturnNullDecoder();
- return;
- }
-
- DCHECK(!decrypted_stream_->audio_decoder_config().is_encrypted());
- input_stream_ = decrypted_stream_.get();
- InitializeDecoder();
-}
-
-void AudioDecoderSelector::InitializeDecoder() {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(!audio_decoder_);
-
- if (decoders_.empty()) {
- ReturnNullDecoder();
- return;
- }
-
- audio_decoder_.reset(decoders_.front());
- decoders_.weak_erase(decoders_.begin());
-
- audio_decoder_->Initialize(input_stream_,
- base::Bind(&AudioDecoderSelector::DecoderInitDone,
- weak_ptr_factory_.GetWeakPtr()),
- statistics_cb_);
-}
-
-void AudioDecoderSelector::DecoderInitDone(PipelineStatus status) {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- if (status != PIPELINE_OK) {
- audio_decoder_.reset();
- InitializeDecoder();
- return;
- }
-
- base::ResetAndReturn(&select_decoder_cb_).Run(audio_decoder_.Pass(),
- decrypted_stream_.Pass());
-}
-
-void AudioDecoderSelector::ReturnNullDecoder() {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
- base::ResetAndReturn(&select_decoder_cb_).Run(
- scoped_ptr<AudioDecoder>(), scoped_ptr<DecryptingDemuxerStream>());
-}
-
-} // namespace media
diff --git a/chromium/media/filters/audio_decoder_selector.h b/chromium/media/filters/audio_decoder_selector.h
deleted file mode 100644
index 338aa6c91f7..00000000000
--- a/chromium/media/filters/audio_decoder_selector.h
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_FILTERS_AUDIO_DECODER_SELECTOR_H_
-#define MEDIA_FILTERS_AUDIO_DECODER_SELECTOR_H_
-
-#include "base/callback.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_vector.h"
-#include "base/memory/weak_ptr.h"
-#include "media/base/decryptor.h"
-#include "media/base/media_export.h"
-#include "media/base/pipeline_status.h"
-
-namespace base {
-class MessageLoopProxy;
-}
-
-namespace media {
-
-class AudioDecoder;
-class DecoderBuffer;
-class DecryptingDemuxerStream;
-class DemuxerStream;
-
-// AudioDecoderSelector (creates if necessary and) initializes the proper
-// AudioDecoder for a given DemuxerStream. If the given DemuxerStream is
-// encrypted, a DecryptingDemuxerStream may also be created.
-class MEDIA_EXPORT AudioDecoderSelector {
- public:
- // Indicates completion of AudioDecoder selection.
- // - First parameter: The initialized AudioDecoder. If it's set to NULL, then
- // AudioDecoder initialization failed.
- // - Second parameter: The initialized DecryptingDemuxerStream. If it's not
- // NULL, then a DecryptingDemuxerStream is created and initialized to do
- // decryption for the initialized AudioDecoder.
- // Note: The caller owns selected AudioDecoder and DecryptingDemuxerStream.
- // The caller should call DecryptingDemuxerStream::Reset() before
- // calling AudioDecoder::Reset() to release any pending decryption or read.
- typedef base::Callback<
- void(scoped_ptr<AudioDecoder>,
- scoped_ptr<DecryptingDemuxerStream>)> SelectDecoderCB;
-
- // |decoders| contains the AudioDecoders to use when initializing.
- //
- // |set_decryptor_ready_cb| is optional. If |set_decryptor_ready_cb| is null,
- // no decryptor will be available to perform decryption.
- AudioDecoderSelector(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
- ScopedVector<AudioDecoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb);
- ~AudioDecoderSelector();
-
- // Initializes and selects an AudioDecoder that can decode the |stream|.
- // Selected AudioDecoder (and DecryptingDemuxerStream) is returned via
- // the |select_decoder_cb|.
- void SelectAudioDecoder(DemuxerStream* stream,
- const StatisticsCB& statistics_cb,
- const SelectDecoderCB& select_decoder_cb);
-
- // Aborts pending AudioDecoder selection and fires |select_decoder_cb| with
- // NULL and NULL immediately if it's pending.
- void Abort();
-
- private:
- void DecryptingAudioDecoderInitDone(PipelineStatus status);
- void DecryptingDemuxerStreamInitDone(PipelineStatus status);
- void InitializeDecoder();
- void DecoderInitDone(PipelineStatus status);
- void ReturnNullDecoder();
-
- scoped_refptr<base::MessageLoopProxy> message_loop_;
- ScopedVector<AudioDecoder> decoders_;
- SetDecryptorReadyCB set_decryptor_ready_cb_;
-
- DemuxerStream* input_stream_;
- StatisticsCB statistics_cb_;
- SelectDecoderCB select_decoder_cb_;
-
- scoped_ptr<AudioDecoder> audio_decoder_;
- scoped_ptr<DecryptingDemuxerStream> decrypted_stream_;
-
- base::WeakPtrFactory<AudioDecoderSelector> weak_ptr_factory_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(AudioDecoderSelector);
-};
-
-} // namespace media
-
-#endif // MEDIA_FILTERS_AUDIO_DECODER_SELECTOR_H_
diff --git a/chromium/media/filters/audio_decoder_selector_unittest.cc b/chromium/media/filters/audio_decoder_selector_unittest.cc
index be2daff6385..56005822b10 100644
--- a/chromium/media/filters/audio_decoder_selector_unittest.cc
+++ b/chromium/media/filters/audio_decoder_selector_unittest.cc
@@ -8,7 +8,8 @@
#include "base/message_loop/message_loop.h"
#include "media/base/gmock_callback_support.h"
#include "media/base/mock_filters.h"
-#include "media/filters/audio_decoder_selector.h"
+#include "media/base/test_helpers.h"
+#include "media/filters/decoder_selector.h"
#include "media/filters/decrypting_demuxer_stream.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -43,15 +44,21 @@ class AudioDecoderSelectorTest : public ::testing::Test {
all_decoders_.push_back(decoder_2_);
}
- MOCK_METHOD1(OnStatistics, void(const PipelineStatistics&));
+ ~AudioDecoderSelectorTest() {
+ if (selected_decoder_)
+ selected_decoder_->Stop();
+
+ message_loop_.RunUntilIdle();
+ }
+
MOCK_METHOD1(SetDecryptorReadyCallback, void(const media::DecryptorReadyCB&));
MOCK_METHOD2(OnDecoderSelected,
void(AudioDecoder*, DecryptingDemuxerStream*));
- void MockOnDecoderSelected(
- scoped_ptr<AudioDecoder> decoder,
- scoped_ptr<DecryptingDemuxerStream> stream) {
+ void MockOnDecoderSelected(scoped_ptr<AudioDecoder> decoder,
+ scoped_ptr<DecryptingDemuxerStream> stream) {
OnDecoderSelected(decoder.get(), stream.get());
+ selected_decoder_ = decoder.Pass();
}
void UseClearStream() {
@@ -91,8 +98,9 @@ class AudioDecoderSelectorTest : public ::testing::Test {
.WillRepeatedly(RunCallback<1>(true));
}
} else if (decryptor_capability == kHoldSetDecryptor) {
- // Set DecryptorReadyCB but the callback is never fired.
- EXPECT_CALL(*this, SetDecryptorReadyCallback(_));
+ // Set and cancel DecryptorReadyCB but the callback is never fired.
+ EXPECT_CALL(*this, SetDecryptorReadyCallback(_))
+ .Times(2);
}
DCHECK_GE(all_decoders_.size(), static_cast<size_t>(num_decoders));
@@ -106,12 +114,12 @@ class AudioDecoderSelectorTest : public ::testing::Test {
}
void SelectDecoder() {
- decoder_selector_->SelectAudioDecoder(
+ decoder_selector_->SelectDecoder(
demuxer_stream_.get(),
- base::Bind(&AudioDecoderSelectorTest::OnStatistics,
- base::Unretained(this)),
+ false,
base::Bind(&AudioDecoderSelectorTest::MockOnDecoderSelected,
- base::Unretained(this)));
+ base::Unretained(this)),
+ base::Bind(&AudioDecoderSelectorTest::OnDecoderOutput));
message_loop_.RunUntilIdle();
}
@@ -123,6 +131,10 @@ class AudioDecoderSelectorTest : public ::testing::Test {
message_loop_.RunUntilIdle();
}
+ static void OnDecoderOutput(const scoped_refptr<AudioBuffer>& output) {
+ NOTREACHED();
+ }
+
// Fixture members.
scoped_ptr<AudioDecoderSelector> decoder_selector_;
scoped_ptr<StrictMock<MockDemuxerStream> > demuxer_stream_;
@@ -132,12 +144,18 @@ class AudioDecoderSelectorTest : public ::testing::Test {
StrictMock<MockAudioDecoder>* decoder_1_;
StrictMock<MockAudioDecoder>* decoder_2_;
ScopedVector<AudioDecoder> all_decoders_;
+ scoped_ptr<AudioDecoder> selected_decoder_;
base::MessageLoop message_loop_;
private:
DISALLOW_COPY_AND_ASSIGN(AudioDecoderSelectorTest);
};
+// Note:
+// In all the tests, Stop() is expected to be called on a decoder if a decoder:
+// - is pending initialization and DecoderSelector::Abort() is called, or
+// - has been successfully initialized.
+
// The stream is not encrypted but we have no clear decoder. No decoder can be
// selected.
TEST_F(AudioDecoderSelectorTest, ClearStream_NoDecryptor_NoClearDecoder) {
@@ -158,6 +176,7 @@ TEST_F(AudioDecoderSelectorTest, ClearStream_NoDecryptor_OneClearDecoder) {
EXPECT_CALL(*decoder_1_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
EXPECT_CALL(*this, OnDecoderSelected(decoder_1_, IsNull()));
+ EXPECT_CALL(*decoder_1_, Stop());
SelectDecoder();
}
@@ -168,6 +187,7 @@ TEST_F(AudioDecoderSelectorTest,
InitializeDecoderSelector(kNoDecryptor, 1);
EXPECT_CALL(*decoder_1_, Initialize(_, _, _));
+ EXPECT_CALL(*decoder_1_, Stop());
SelectDecoderAndAbort();
}
@@ -183,6 +203,7 @@ TEST_F(AudioDecoderSelectorTest, ClearStream_NoDecryptor_MultipleClearDecoder) {
EXPECT_CALL(*decoder_2_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
EXPECT_CALL(*this, OnDecoderSelected(decoder_2_, IsNull()));
+ EXPECT_CALL(*decoder_2_, Stop());
SelectDecoder();
}
@@ -195,6 +216,7 @@ TEST_F(AudioDecoderSelectorTest,
EXPECT_CALL(*decoder_1_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(DECODER_ERROR_NOT_SUPPORTED));
EXPECT_CALL(*decoder_2_, Initialize(_, _, _));
+ EXPECT_CALL(*decoder_2_, Stop());
SelectDecoderAndAbort();
}
@@ -208,6 +230,7 @@ TEST_F(AudioDecoderSelectorTest, ClearStream_HasDecryptor) {
EXPECT_CALL(*decoder_1_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
EXPECT_CALL(*this, OnDecoderSelected(decoder_1_, IsNull()));
+ EXPECT_CALL(*decoder_1_, Stop());
SelectDecoder();
}
@@ -217,6 +240,7 @@ TEST_F(AudioDecoderSelectorTest, Abort_ClearStream_HasDecryptor) {
InitializeDecoderSelector(kDecryptOnly, 1);
EXPECT_CALL(*decoder_1_, Initialize(_, _, _));
+ EXPECT_CALL(*decoder_1_, Stop());
SelectDecoderAndAbort();
}
@@ -259,6 +283,7 @@ TEST_F(AudioDecoderSelectorTest, EncryptedStream_DecryptOnly_OneClearDecoder) {
EXPECT_CALL(*decoder_1_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
EXPECT_CALL(*this, OnDecoderSelected(decoder_1_, NotNull()));
+ EXPECT_CALL(*decoder_1_, Stop());
SelectDecoder();
}
@@ -269,6 +294,7 @@ TEST_F(AudioDecoderSelectorTest,
InitializeDecoderSelector(kDecryptOnly, 1);
EXPECT_CALL(*decoder_1_, Initialize(_, _, _));
+ EXPECT_CALL(*decoder_1_, Stop());
SelectDecoderAndAbort();
}
@@ -286,6 +312,7 @@ TEST_F(AudioDecoderSelectorTest,
EXPECT_CALL(*decoder_2_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
EXPECT_CALL(*this, OnDecoderSelected(decoder_2_, NotNull()));
+ EXPECT_CALL(*decoder_2_, Stop());
SelectDecoder();
}
@@ -298,6 +325,7 @@ TEST_F(AudioDecoderSelectorTest,
EXPECT_CALL(*decoder_1_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(DECODER_ERROR_NOT_SUPPORTED));
EXPECT_CALL(*decoder_2_, Initialize(_, _, _));
+ EXPECT_CALL(*decoder_2_, Stop());
SelectDecoderAndAbort();
}
diff --git a/chromium/media/filters/audio_file_reader.cc b/chromium/media/filters/audio_file_reader.cc
index 092c8f5329d..b20fd8d9ce3 100644
--- a/chromium/media/filters/audio_file_reader.cc
+++ b/chromium/media/filters/audio_file_reader.cc
@@ -4,6 +4,8 @@
#include "media/filters/audio_file_reader.h"
+#include <cmath>
+
#include "base/logging.h"
#include "base/time/time.h"
#include "media/base/audio_bus.h"
@@ -25,21 +27,6 @@ AudioFileReader::~AudioFileReader() {
Close();
}
-base::TimeDelta AudioFileReader::duration() const {
- const AVRational av_time_base = {1, AV_TIME_BASE};
-
- // Add one microsecond to avoid rounding-down errors which can occur when
- // |duration| has been calculated from an exact number of sample-frames.
- // One microsecond is much less than the time of a single sample-frame
- // at any real-world sample-rate.
- return ConvertFromTimeBase(
- av_time_base, glue_->format_context()->duration + 1);
-}
-
-int64 AudioFileReader::number_of_frames() const {
- return static_cast<int64>(duration().InSecondsF() * sample_rate());
-}
-
bool AudioFileReader::Open() {
glue_.reset(new FFmpegGlue(protocol_));
AVFormatContext* format_context = glue_->format_context();
@@ -131,8 +118,7 @@ int AudioFileReader::Read(AudioBus* audio_bus) {
size_t bytes_per_sample = av_get_bytes_per_sample(codec_context_->sample_fmt);
// Holds decoded audio.
- scoped_ptr_malloc<AVFrame, ScopedPtrAVFreeFrame> av_frame(
- av_frame_alloc());
+ scoped_ptr<AVFrame, ScopedPtrAVFreeFrame> av_frame(av_frame_alloc());
// Read until we hit EOF or we've read the requested number of frames.
AVPacket packet;
@@ -140,19 +126,14 @@ int AudioFileReader::Read(AudioBus* audio_bus) {
bool continue_decoding = true;
while (current_frame < audio_bus->frames() && continue_decoding &&
- av_read_frame(glue_->format_context(), &packet) >= 0 &&
- av_dup_packet(&packet) >= 0) {
- // Skip packets from other streams.
- if (packet.stream_index != stream_index_) {
- av_free_packet(&packet);
- continue;
- }
-
+ ReadPacket(&packet)) {
// Make a shallow copy of packet so we can slide packet.data as frames are
// decoded from the packet; otherwise av_free_packet() will corrupt memory.
AVPacket packet_temp = packet;
do {
- avcodec_get_frame_defaults(av_frame.get());
+ // Reset frame to default values.
+ av_frame_unref(av_frame.get());
+
int frame_decoded = 0;
int result = avcodec_decode_audio4(
codec_context_, av_frame.get(), &frame_decoded, &packet_temp);
@@ -161,7 +142,6 @@ int AudioFileReader::Read(AudioBus* audio_bus) {
DLOG(WARNING)
<< "AudioFileReader::Read() : error in avcodec_decode_audio4() -"
<< result;
- continue_decoding = false;
break;
}
@@ -203,8 +183,10 @@ int AudioFileReader::Read(AudioBus* audio_bus) {
}
// Truncate, if necessary, if the destination isn't big enough.
- if (current_frame + frames_read > audio_bus->frames())
+ if (current_frame + frames_read > audio_bus->frames()) {
+ DLOG(ERROR) << "Truncating decoded data due to output size.";
frames_read = audio_bus->frames() - current_frame;
+ }
// Deinterleave each channel and convert to 32bit floating-point with
// nominal range -1.0 -> +1.0. If the output is already in float planar
@@ -243,4 +225,36 @@ int AudioFileReader::Read(AudioBus* audio_bus) {
return current_frame;
}
+base::TimeDelta AudioFileReader::GetDuration() const {
+ const AVRational av_time_base = {1, AV_TIME_BASE};
+
+ // Add one microsecond to avoid rounding-down errors which can occur when
+ // |duration| has been calculated from an exact number of sample-frames.
+ // One microsecond is much less than the time of a single sample-frame
+ // at any real-world sample-rate.
+ return ConvertFromTimeBase(av_time_base,
+ glue_->format_context()->duration + 1);
+}
+
+int AudioFileReader::GetNumberOfFrames() const {
+ return static_cast<int>(ceil(GetDuration().InSecondsF() * sample_rate()));
+}
+
+bool AudioFileReader::ReadPacketForTesting(AVPacket* output_packet) {
+ return ReadPacket(output_packet);
+}
+
+bool AudioFileReader::ReadPacket(AVPacket* output_packet) {
+ while (av_read_frame(glue_->format_context(), output_packet) >= 0 &&
+ av_dup_packet(output_packet) >= 0) {
+ // Skip packets from other streams.
+ if (output_packet->stream_index != stream_index_) {
+ av_free_packet(output_packet);
+ continue;
+ }
+ return true;
+ }
+ return false;
+}
+
} // namespace media
diff --git a/chromium/media/filters/audio_file_reader.h b/chromium/media/filters/audio_file_reader.h
index e345dc05808..e7b7b4b71f8 100644
--- a/chromium/media/filters/audio_file_reader.h
+++ b/chromium/media/filters/audio_file_reader.h
@@ -10,6 +10,7 @@
#include "media/base/media_export.h"
struct AVCodecContext;
+struct AVPacket;
namespace base { class TimeDelta; }
@@ -28,7 +29,7 @@ class MEDIA_EXPORT AudioFileReader {
virtual ~AudioFileReader();
// Open() reads the audio data format so that the sample_rate(),
- // channels(), duration(), and number_of_frames() methods can be called.
+ // channels(), GetDuration(), and GetNumberOfFrames() methods can be called.
// It returns |true| on success.
bool Open();
void Close();
@@ -46,14 +47,27 @@ class MEDIA_EXPORT AudioFileReader {
int channels() const { return channels_; }
int sample_rate() const { return sample_rate_; }
- // Please note that duration() and number_of_frames() attempt to be accurate,
- // but are only estimates. For some encoded formats, the actual duration
- // of the file can only be determined once all the file data has been read.
- // The Read() method returns the actual number of sample-frames it has read.
- base::TimeDelta duration() const;
- int64 number_of_frames() const;
+ // Please note that GetDuration() and GetNumberOfFrames() attempt to be
+ // accurate, but are only estimates. For some encoded formats, the actual
+ // duration of the file can only be determined once all the file data has been
+ // read. The Read() method returns the actual number of sample-frames it has
+ // read.
+ base::TimeDelta GetDuration() const;
+ int GetNumberOfFrames() const;
+
+ // Helper methods which allows AudioFileReader to double as a test utility for
+ // demuxing audio files. Returns true if a packet could be demuxed from the
+ // first audio stream in the file, |output_packet| will contain the demuxed
+ // packet then.
+ bool ReadPacketForTesting(AVPacket* output_packet);
+
+ const AVCodecContext* codec_context_for_testing() const {
+ return codec_context_;
+ }
private:
+ bool ReadPacket(AVPacket* output_packet);
+
scoped_ptr<FFmpegGlue> glue_;
AVCodecContext* codec_context_;
int stream_index_;
diff --git a/chromium/media/filters/audio_file_reader_unittest.cc b/chromium/media/filters/audio_file_reader_unittest.cc
index bf4acd176c2..28c9837cb2e 100644
--- a/chromium/media/filters/audio_file_reader_unittest.cc
+++ b/chromium/media/filters/audio_file_reader_unittest.cc
@@ -30,7 +30,7 @@ class AudioFileReaderTest : public testing::Test {
// Reads and the entire file provided to Initialize().
void ReadAndVerify(const char* expected_audio_hash, int expected_frames) {
scoped_ptr<AudioBus> decoded_audio_data = AudioBus::Create(
- reader_->channels(), reader_->number_of_frames());
+ reader_->channels(), reader_->GetNumberOfFrames());
int actual_frames = reader_->Read(decoded_audio_data.get());
ASSERT_LE(actual_frames, decoded_audio_data->frames());
ASSERT_EQ(expected_frames, actual_frames);
@@ -46,8 +46,9 @@ class AudioFileReaderTest : public testing::Test {
ASSERT_TRUE(reader_->Open());
EXPECT_EQ(channels, reader_->channels());
EXPECT_EQ(sample_rate, reader_->sample_rate());
- EXPECT_EQ(duration.InMicroseconds(), reader_->duration().InMicroseconds());
- EXPECT_EQ(frames, reader_->number_of_frames());
+ EXPECT_EQ(duration.InMicroseconds(),
+ reader_->GetDuration().InMicroseconds());
+ EXPECT_EQ(frames, reader_->GetNumberOfFrames());
ReadAndVerify(hash, trimmed_frames);
}
@@ -60,7 +61,7 @@ class AudioFileReaderTest : public testing::Test {
Initialize(fn);
EXPECT_TRUE(reader_->Open());
scoped_ptr<AudioBus> decoded_audio_data = AudioBus::Create(
- reader_->channels(), reader_->number_of_frames());
+ reader_->channels(), reader_->GetNumberOfFrames());
EXPECT_EQ(reader_->Read(decoded_audio_data.get()), 0);
}
@@ -81,44 +82,49 @@ TEST_F(AudioFileReaderTest, InvalidFile) {
}
TEST_F(AudioFileReaderTest, WithVideo) {
- RunTest("bear.ogv", "-2.49,-0.75,0.38,1.60,-0.15,-1.22,", 2, 44100,
- base::TimeDelta::FromMicroseconds(1011520), 44608, 44608);
+ RunTest("bear.ogv", "-2.49,-0.75,0.38,1.60,0.70,-1.22,", 2, 44100,
+ base::TimeDelta::FromMicroseconds(1011520), 44609, 44609);
}
TEST_F(AudioFileReaderTest, Vorbis) {
- RunTest("sfx.ogg", "4.36,4.81,4.84,4.34,4.61,4.63,", 1, 44100,
- base::TimeDelta::FromMicroseconds(350001), 15435, 15435);
+ RunTest("sfx.ogg", "4.36,4.81,4.84,4.45,4.61,4.63,", 1, 44100,
+ base::TimeDelta::FromMicroseconds(350001), 15436, 15436);
}
TEST_F(AudioFileReaderTest, WaveU8) {
RunTest("sfx_u8.wav", "-1.23,-1.57,-1.14,-0.91,-0.87,-0.07,", 1, 44100,
- base::TimeDelta::FromMicroseconds(288414), 12719, 12719);
+ base::TimeDelta::FromMicroseconds(288414), 12720, 12719);
}
TEST_F(AudioFileReaderTest, WaveS16LE) {
RunTest("sfx_s16le.wav", "3.05,2.87,3.00,3.32,3.58,4.08,", 1, 44100,
- base::TimeDelta::FromMicroseconds(288414), 12719, 12719);
+ base::TimeDelta::FromMicroseconds(288414), 12720, 12719);
}
TEST_F(AudioFileReaderTest, WaveS24LE) {
RunTest("sfx_s24le.wav", "3.03,2.86,2.99,3.31,3.57,4.06,", 1, 44100,
- base::TimeDelta::FromMicroseconds(288414), 12719, 12719);
+ base::TimeDelta::FromMicroseconds(288414), 12720, 12719);
}
TEST_F(AudioFileReaderTest, WaveF32LE) {
RunTest("sfx_f32le.wav", "3.03,2.86,2.99,3.31,3.57,4.06,", 1, 44100,
- base::TimeDelta::FromMicroseconds(288414), 12719, 12719);
+ base::TimeDelta::FromMicroseconds(288414), 12720, 12719);
}
#if defined(USE_PROPRIETARY_CODECS)
TEST_F(AudioFileReaderTest, MP3) {
RunTest("sfx.mp3", "3.05,2.87,3.00,3.32,3.58,4.08,", 1, 44100,
- base::TimeDelta::FromMicroseconds(313470), 13824, 12719);
+ base::TimeDelta::FromMicroseconds(313470), 13825, 12719);
+}
+
+TEST_F(AudioFileReaderTest, CorruptMP3) {
+ RunTest("corrupt.mp3", "-4.95,-2.95,-0.44,1.16,0.31,-2.21,", 1, 44100,
+ base::TimeDelta::FromMicroseconds(1018826), 44931, 44928);
}
TEST_F(AudioFileReaderTest, AAC) {
RunTest("sfx.m4a", "1.81,1.66,2.32,3.27,4.46,3.36,", 1, 44100,
- base::TimeDelta::FromMicroseconds(312001), 13759, 13312);
+ base::TimeDelta::FromMicroseconds(312001), 13760, 13312);
}
TEST_F(AudioFileReaderTest, MidStreamConfigChangesFail) {
@@ -132,7 +138,7 @@ TEST_F(AudioFileReaderTest, VorbisInvalidChannelLayout) {
TEST_F(AudioFileReaderTest, WaveValidFourChannelLayout) {
RunTest("4ch.wav", "131.71,38.02,130.31,44.89,135.98,42.52,", 4, 44100,
- base::TimeDelta::FromMicroseconds(100001), 4410, 4410);
+ base::TimeDelta::FromMicroseconds(100001), 4411, 4410);
}
} // namespace media
diff --git a/chromium/media/filters/audio_renderer_algorithm_unittest.cc b/chromium/media/filters/audio_renderer_algorithm_unittest.cc
index aab4a9d8aca..0f639223980 100644
--- a/chromium/media/filters/audio_renderer_algorithm_unittest.cc
+++ b/chromium/media/filters/audio_renderer_algorithm_unittest.cc
@@ -69,7 +69,9 @@ class AudioRendererAlgorithmTest : public testing::Test {
AudioRendererAlgorithmTest()
: frames_enqueued_(0),
channels_(0),
+ channel_layout_(CHANNEL_LAYOUT_NONE),
sample_format_(kUnknownSampleFormat),
+ samples_per_second_(0),
bytes_per_sample_(0) {
}
@@ -83,6 +85,8 @@ class AudioRendererAlgorithmTest : public testing::Test {
SampleFormat sample_format,
int samples_per_second) {
channels_ = ChannelLayoutToChannelCount(channel_layout);
+ samples_per_second_ = samples_per_second;
+ channel_layout_ = channel_layout;
sample_format_ = sample_format;
bytes_per_sample_ = SampleFormatToBytesPerChannel(sample_format);
AudioParameters params(media::AudioParameters::AUDIO_PCM_LINEAR,
@@ -101,31 +105,37 @@ class AudioRendererAlgorithmTest : public testing::Test {
while (!algorithm_.IsQueueFull()) {
switch (sample_format_) {
case kSampleFormatU8:
- buffer = MakeInterleavedAudioBuffer<uint8>(sample_format_,
- channels_,
- 1,
- 1,
- kFrameSize,
- kNoTimestamp(),
- kNoTimestamp());
+ buffer = MakeAudioBuffer<uint8>(
+ sample_format_,
+ channel_layout_,
+ ChannelLayoutToChannelCount(channel_layout_),
+ samples_per_second_,
+ 1,
+ 1,
+ kFrameSize,
+ kNoTimestamp());
break;
case kSampleFormatS16:
- buffer = MakeInterleavedAudioBuffer<int16>(sample_format_,
- channels_,
- 1,
- 1,
- kFrameSize,
- kNoTimestamp(),
- kNoTimestamp());
+ buffer = MakeAudioBuffer<int16>(
+ sample_format_,
+ channel_layout_,
+ ChannelLayoutToChannelCount(channel_layout_),
+ samples_per_second_,
+ 1,
+ 1,
+ kFrameSize,
+ kNoTimestamp());
break;
case kSampleFormatS32:
- buffer = MakeInterleavedAudioBuffer<int32>(sample_format_,
- channels_,
- 1,
- 1,
- kFrameSize,
- kNoTimestamp(),
- kNoTimestamp());
+ buffer = MakeAudioBuffer<int32>(
+ sample_format_,
+ channel_layout_,
+ ChannelLayoutToChannelCount(channel_layout_),
+ samples_per_second_,
+ 1,
+ 1,
+ kFrameSize,
+ kNoTimestamp());
break;
default:
NOTREACHED() << "Unrecognized format " << sample_format_;
@@ -225,7 +235,7 @@ class AudioRendererAlgorithmTest : public testing::Test {
void WsolaTest(float playback_rate) {
const int kSampleRateHz = 48000;
- const media::ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
+ const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
const int kBytesPerSample = 2;
const int kNumFrames = kSampleRateHz / 100; // 10 milliseconds.
@@ -242,8 +252,12 @@ class AudioRendererAlgorithmTest : public testing::Test {
scoped_ptr<AudioBus> output = AudioBus::Create(channels_, 1);
// Input buffer to inject pulses.
- scoped_refptr<AudioBuffer> input = AudioBuffer::CreateBuffer(
- kSampleFormatPlanarF32, channels_, kPulseWidthSamples);
+ scoped_refptr<AudioBuffer> input =
+ AudioBuffer::CreateBuffer(kSampleFormatPlanarF32,
+ kChannelLayout,
+ channels_,
+ kSampleRateHz,
+ kPulseWidthSamples);
const std::vector<uint8*>& channel_data = input->channel_data();
@@ -300,7 +314,9 @@ class AudioRendererAlgorithmTest : public testing::Test {
AudioRendererAlgorithm algorithm_;
int frames_enqueued_;
int channels_;
+ ChannelLayout channel_layout_;
SampleFormat sample_format_;
+ int samples_per_second_;
int bytes_per_sample_;
};
@@ -578,7 +594,7 @@ TEST_F(AudioRendererAlgorithmTest, FullAndDecimatedSearch) {
exclude_interval));
}
-TEST_F(AudioRendererAlgorithmTest, CubicInterpolation) {
+TEST_F(AudioRendererAlgorithmTest, QuadraticInterpolation) {
// Arbitrary coefficients.
const float kA = 0.7f;
const float kB = 1.2f;
@@ -592,7 +608,7 @@ TEST_F(AudioRendererAlgorithmTest, CubicInterpolation) {
float extremum;
float extremum_value;
- internal::CubicInterpolation(y_values, &extremum, &extremum_value);
+ internal::QuadraticInterpolation(y_values, &extremum, &extremum_value);
float x_star = -kB / (2.f * kA);
float y_star = kA * x_star * x_star + kB * x_star + kC;
@@ -601,6 +617,21 @@ TEST_F(AudioRendererAlgorithmTest, CubicInterpolation) {
EXPECT_FLOAT_EQ(y_star, extremum_value);
}
+TEST_F(AudioRendererAlgorithmTest, QuadraticInterpolation_Colinear) {
+ float y_values[3];
+ y_values[0] = 1.0;
+ y_values[1] = 1.0;
+ y_values[2] = 1.0;
+
+ float extremum;
+ float extremum_value;
+
+ internal::QuadraticInterpolation(y_values, &extremum, &extremum_value);
+
+ EXPECT_FLOAT_EQ(extremum, 0.0);
+ EXPECT_FLOAT_EQ(extremum_value, 1.0);
+}
+
TEST_F(AudioRendererAlgorithmTest, WsolaSlowdown) {
WsolaTest(0.6f);
}
diff --git a/chromium/media/filters/audio_renderer_impl.cc b/chromium/media/filters/audio_renderer_impl.cc
index 2df537d8831..d07826a243c 100644
--- a/chromium/media/filters/audio_renderer_impl.cc
+++ b/chromium/media/filters/audio_renderer_impl.cc
@@ -12,13 +12,15 @@
#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop_proxy.h"
#include "base/metrics/histogram.h"
+#include "base/single_thread_task_runner.h"
#include "media/base/audio_buffer.h"
+#include "media/base/audio_buffer_converter.h"
+#include "media/base/audio_hardware_config.h"
#include "media/base/audio_splicer.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/demuxer_stream.h"
-#include "media/filters/audio_decoder_selector.h"
+#include "media/filters/audio_clock.h"
#include "media/filters/decrypting_demuxer_stream.h"
namespace media {
@@ -28,35 +30,41 @@ namespace {
enum AudioRendererEvent {
INITIALIZED,
RENDER_ERROR,
- MAX_EVENTS
+ RENDER_EVENT_MAX = RENDER_ERROR,
};
void HistogramRendererEvent(AudioRendererEvent event) {
- UMA_HISTOGRAM_ENUMERATION("Media.AudioRendererEvents", event, MAX_EVENTS);
+ UMA_HISTOGRAM_ENUMERATION(
+ "Media.AudioRendererEvents", event, RENDER_EVENT_MAX + 1);
}
} // namespace
AudioRendererImpl::AudioRendererImpl(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
media::AudioRendererSink* sink,
ScopedVector<AudioDecoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb)
- : message_loop_(message_loop),
- weak_factory_(this),
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
+ AudioHardwareConfig* hardware_config)
+ : task_runner_(task_runner),
sink_(sink),
- decoder_selector_(new AudioDecoderSelector(
- message_loop, decoders.Pass(), set_decryptor_ready_cb)),
+ audio_buffer_stream_(task_runner,
+ decoders.Pass(),
+ set_decryptor_ready_cb),
+ hardware_config_(hardware_config),
now_cb_(base::Bind(&base::TimeTicks::Now)),
state_(kUninitialized),
+ rendering_(false),
sink_playing_(false),
pending_read_(false),
received_end_of_stream_(false),
rendered_end_of_stream_(false),
- audio_time_buffered_(kNoTimestamp()),
- current_time_(kNoTimestamp()),
- underflow_disabled_(false),
- preroll_aborted_(false) {
+ preroll_aborted_(false),
+ weak_factory_(this) {
+ audio_buffer_stream_.set_splice_observer(base::Bind(
+ &AudioRendererImpl::OnNewSpliceBuffer, weak_factory_.GetWeakPtr()));
+ audio_buffer_stream_.set_config_change_observer(base::Bind(
+ &AudioRendererImpl::OnConfigChange, weak_factory_.GetWeakPtr()));
}
AudioRendererImpl::~AudioRendererImpl() {
@@ -65,68 +73,74 @@ AudioRendererImpl::~AudioRendererImpl() {
DCHECK(!algorithm_.get());
}
-void AudioRendererImpl::Play(const base::Closure& callback) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+void AudioRendererImpl::StartRendering() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(!rendering_);
+ rendering_ = true;
base::AutoLock auto_lock(lock_);
- DCHECK_EQ(state_, kPaused);
- ChangeState_Locked(kPlaying);
- callback.Run();
- earliest_end_time_ = now_cb_.Run();
-
- if (algorithm_->playback_rate() != 0)
- DoPlay_Locked();
- else
+ // Wait for an eventual call to SetPlaybackRate() to start rendering.
+ if (algorithm_->playback_rate() == 0) {
DCHECK(!sink_playing_);
+ return;
+ }
+
+ StartRendering_Locked();
}
-void AudioRendererImpl::DoPlay_Locked() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+void AudioRendererImpl::StartRendering_Locked() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(state_ == kPlaying || state_ == kRebuffering || state_ == kUnderflow)
+ << "state_=" << state_;
+ DCHECK(!sink_playing_);
+ DCHECK_NE(algorithm_->playback_rate(), 0);
lock_.AssertAcquired();
- earliest_end_time_ = now_cb_.Run();
- if ((state_ == kPlaying || state_ == kRebuffering || state_ == kUnderflow) &&
- !sink_playing_) {
- {
- base::AutoUnlock auto_unlock(lock_);
- sink_->Play();
- }
+ earliest_end_time_ = now_cb_.Run();
+ sink_playing_ = true;
- sink_playing_ = true;
- }
+ base::AutoUnlock auto_unlock(lock_);
+ sink_->Play();
}
-void AudioRendererImpl::Pause(const base::Closure& callback) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+void AudioRendererImpl::StopRendering() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(rendering_);
+ rendering_ = false;
base::AutoLock auto_lock(lock_);
- DCHECK(state_ == kPlaying || state_ == kUnderflow ||
- state_ == kRebuffering) << "state_ == " << state_;
- ChangeState_Locked(kPaused);
-
- DoPause_Locked();
+ // Rendering should have already been stopped with a zero playback rate.
+ if (algorithm_->playback_rate() == 0) {
+ DCHECK(!sink_playing_);
+ return;
+ }
- callback.Run();
+ StopRendering_Locked();
}
-void AudioRendererImpl::DoPause_Locked() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+void AudioRendererImpl::StopRendering_Locked() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(state_ == kPlaying || state_ == kRebuffering || state_ == kUnderflow)
+ << "state_=" << state_;
+ DCHECK(sink_playing_);
lock_.AssertAcquired();
- if (sink_playing_) {
- {
- base::AutoUnlock auto_unlock(lock_);
- sink_->Pause();
- }
- sink_playing_ = false;
- }
+ sink_playing_ = false;
+
+ base::AutoUnlock auto_unlock(lock_);
+ sink_->Pause();
}
void AudioRendererImpl::Flush(const base::Closure& callback) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
- DCHECK_EQ(state_, kPaused);
+ DCHECK(state_ == kPlaying || state_ == kRebuffering || state_ == kUnderflow)
+ << "state_=" << state_;
DCHECK(flush_cb_.is_null());
flush_cb_ = callback;
@@ -136,84 +150,84 @@ void AudioRendererImpl::Flush(const base::Closure& callback) {
return;
}
+ ChangeState_Locked(kFlushed);
DoFlush_Locked();
}
void AudioRendererImpl::DoFlush_Locked() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
lock_.AssertAcquired();
DCHECK(!pending_read_);
- DCHECK_EQ(state_, kPaused);
-
- if (decrypting_demuxer_stream_) {
- decrypting_demuxer_stream_->Reset(BindToCurrentLoop(
- base::Bind(&AudioRendererImpl::ResetDecoder, weak_this_)));
- return;
- }
-
- ResetDecoder();
-}
+ DCHECK_EQ(state_, kFlushed);
-void AudioRendererImpl::ResetDecoder() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- decoder_->Reset(BindToCurrentLoop(
- base::Bind(&AudioRendererImpl::ResetDecoderDone, weak_this_)));
+ audio_buffer_stream_.Reset(base::Bind(&AudioRendererImpl::ResetDecoderDone,
+ weak_factory_.GetWeakPtr()));
}
void AudioRendererImpl::ResetDecoderDone() {
- base::AutoLock auto_lock(lock_);
- if (state_ == kStopped)
- return;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ {
+ base::AutoLock auto_lock(lock_);
+ if (state_ == kStopped)
+ return;
- DCHECK_EQ(state_, kPaused);
- DCHECK(!flush_cb_.is_null());
+ DCHECK_EQ(state_, kFlushed);
+ DCHECK(!flush_cb_.is_null());
- audio_time_buffered_ = kNoTimestamp();
- current_time_ = kNoTimestamp();
- received_end_of_stream_ = false;
- rendered_end_of_stream_ = false;
- preroll_aborted_ = false;
-
- earliest_end_time_ = now_cb_.Run();
- splicer_->Reset();
- algorithm_->FlushBuffers();
+ audio_clock_.reset(new AudioClock(audio_parameters_.sample_rate()));
+ received_end_of_stream_ = false;
+ rendered_end_of_stream_ = false;
+ preroll_aborted_ = false;
+ earliest_end_time_ = now_cb_.Run();
+ splicer_->Reset();
+ if (buffer_converter_)
+ buffer_converter_->Reset();
+ algorithm_->FlushBuffers();
+ }
base::ResetAndReturn(&flush_cb_).Run();
}
void AudioRendererImpl::Stop(const base::Closure& callback) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(!callback.is_null());
// TODO(scherkus): Consider invalidating |weak_factory_| and replacing
// task-running guards that check |state_| with DCHECK().
- if (sink_) {
- sink_->Stop();
- sink_ = NULL;
- }
-
{
base::AutoLock auto_lock(lock_);
+
+ if (state_ == kStopped) {
+ task_runner_->PostTask(FROM_HERE, callback);
+ return;
+ }
+
ChangeState_Locked(kStopped);
- algorithm_.reset(NULL);
- init_cb_.Reset();
+ algorithm_.reset();
underflow_cb_.Reset();
time_cb_.Reset();
flush_cb_.Reset();
}
- callback.Run();
+ if (sink_) {
+ sink_->Stop();
+ sink_ = NULL;
+ }
+
+ audio_buffer_stream_.Stop(callback);
}
void AudioRendererImpl::Preroll(base::TimeDelta time,
const PipelineStatusCB& cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DVLOG(1) << __FUNCTION__ << "(" << time.InMicroseconds() << ")";
+ DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
DCHECK(!sink_playing_);
- DCHECK_EQ(state_, kPaused);
+ DCHECK_EQ(state_, kFlushed);
DCHECK(!pending_read_) << "Pending read must complete before seeking";
DCHECK(preroll_cb_.is_null());
@@ -230,9 +244,8 @@ void AudioRendererImpl::Initialize(DemuxerStream* stream,
const base::Closure& underflow_cb,
const TimeCB& time_cb,
const base::Closure& ended_cb,
- const base::Closure& disabled_cb,
const PipelineStatusCB& error_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(stream);
DCHECK_EQ(stream->type(), DemuxerStream::AUDIO);
DCHECK(!init_cb.is_null());
@@ -240,74 +253,99 @@ void AudioRendererImpl::Initialize(DemuxerStream* stream,
DCHECK(!underflow_cb.is_null());
DCHECK(!time_cb.is_null());
DCHECK(!ended_cb.is_null());
- DCHECK(!disabled_cb.is_null());
DCHECK(!error_cb.is_null());
DCHECK_EQ(kUninitialized, state_);
DCHECK(sink_);
- weak_this_ = weak_factory_.GetWeakPtr();
+ state_ = kInitializing;
+
init_cb_ = init_cb;
- statistics_cb_ = statistics_cb;
underflow_cb_ = underflow_cb;
time_cb_ = time_cb;
ended_cb_ = ended_cb;
- disabled_cb_ = disabled_cb;
error_cb_ = error_cb;
- decoder_selector_->SelectAudioDecoder(
+ expecting_config_changes_ = stream->SupportsConfigChanges();
+ if (!expecting_config_changes_) {
+ // The actual buffer size is controlled via the size of the AudioBus
+ // provided to Render(), so just choose something reasonable here for looks.
+ int buffer_size = stream->audio_decoder_config().samples_per_second() / 100;
+ audio_parameters_.Reset(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ stream->audio_decoder_config().channel_layout(),
+ ChannelLayoutToChannelCount(
+ stream->audio_decoder_config().channel_layout()),
+ 0,
+ stream->audio_decoder_config().samples_per_second(),
+ stream->audio_decoder_config().bits_per_channel(),
+ buffer_size);
+ buffer_converter_.reset();
+ } else {
+ // TODO(rileya): Support hardware config changes
+ const AudioParameters& hw_params = hardware_config_->GetOutputConfig();
+ audio_parameters_.Reset(
+ hw_params.format(),
+ // Always use the source's channel layout and channel count to avoid
+ // premature downmixing (http://crbug.com/379288), platform specific
+ // issues around channel layouts (http://crbug.com/266674), and
+ // unnecessary upmixing overhead.
+ stream->audio_decoder_config().channel_layout(),
+ ChannelLayoutToChannelCount(
+ stream->audio_decoder_config().channel_layout()),
+ hw_params.input_channels(),
+ hw_params.sample_rate(),
+ hw_params.bits_per_sample(),
+ hardware_config_->GetHighLatencyBufferSize());
+ }
+
+ audio_clock_.reset(new AudioClock(audio_parameters_.sample_rate()));
+
+ audio_buffer_stream_.Initialize(
stream,
+ false,
statistics_cb,
- base::Bind(&AudioRendererImpl::OnDecoderSelected, weak_this_));
+ base::Bind(&AudioRendererImpl::OnAudioBufferStreamInitialized,
+ weak_factory_.GetWeakPtr()));
}
-void AudioRendererImpl::OnDecoderSelected(
- scoped_ptr<AudioDecoder> decoder,
- scoped_ptr<DecryptingDemuxerStream> decrypting_demuxer_stream) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+void AudioRendererImpl::OnAudioBufferStreamInitialized(bool success) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
- scoped_ptr<AudioDecoderSelector> deleter(decoder_selector_.Pass());
if (state_ == kStopped) {
- DCHECK(!sink_);
+ base::ResetAndReturn(&init_cb_).Run(PIPELINE_ERROR_ABORT);
return;
}
- if (!decoder) {
+ if (!success) {
+ state_ = kUninitialized;
base::ResetAndReturn(&init_cb_).Run(DECODER_ERROR_NOT_SUPPORTED);
return;
}
- decoder_ = decoder.Pass();
- decrypting_demuxer_stream_ = decrypting_demuxer_stream.Pass();
-
- int sample_rate = decoder_->samples_per_second();
-
- // The actual buffer size is controlled via the size of the AudioBus provided
- // to Render(), so just choose something reasonable here for looks.
- int buffer_size = decoder_->samples_per_second() / 100;
- audio_parameters_ = AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, decoder_->channel_layout(),
- sample_rate, decoder_->bits_per_channel(), buffer_size);
if (!audio_parameters_.IsValid()) {
+ ChangeState_Locked(kUninitialized);
base::ResetAndReturn(&init_cb_).Run(PIPELINE_ERROR_INITIALIZATION_FAILED);
return;
}
- splicer_.reset(new AudioSplicer(sample_rate));
+ if (expecting_config_changes_)
+ buffer_converter_.reset(new AudioBufferConverter(audio_parameters_));
+ splicer_.reset(new AudioSplicer(audio_parameters_.sample_rate()));
- // We're all good! Continue initializing the rest of the audio renderer based
- // on the decoder format.
+ // We're all good! Continue initializing the rest of the audio renderer
+ // based on the decoder format.
algorithm_.reset(new AudioRendererAlgorithm());
algorithm_->Initialize(0, audio_parameters_);
- ChangeState_Locked(kPaused);
+ ChangeState_Locked(kFlushed);
HistogramRendererEvent(INITIALIZED);
{
base::AutoUnlock auto_unlock(lock_);
- sink_->Initialize(audio_parameters_, weak_this_.get());
+ sink_->Initialize(audio_parameters_, this);
sink_->Start();
// Some sinks play on start...
@@ -320,7 +358,7 @@ void AudioRendererImpl::OnDecoderSelected(
}
void AudioRendererImpl::ResumeAfterUnderflow() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
if (state_ == kUnderflow) {
// The "!preroll_aborted_" is a hack. If preroll is aborted, then we
@@ -337,16 +375,16 @@ void AudioRendererImpl::ResumeAfterUnderflow() {
}
void AudioRendererImpl::SetVolume(float volume) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(sink_);
sink_->SetVolume(volume);
}
void AudioRendererImpl::DecodedAudioReady(
- AudioDecoder::Status status,
+ AudioBufferStream::Status status,
const scoped_refptr<AudioBuffer>& buffer) {
- DVLOG(1) << __FUNCTION__ << "(" << status << ")";
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DVLOG(2) << __FUNCTION__ << "(" << status << ")";
+ DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
DCHECK(state_ != kUninitialized);
@@ -354,28 +392,40 @@ void AudioRendererImpl::DecodedAudioReady(
CHECK(pending_read_);
pending_read_ = false;
- if (status == AudioDecoder::kAborted) {
+ if (status == AudioBufferStream::ABORTED ||
+ status == AudioBufferStream::DEMUXER_READ_ABORTED) {
HandleAbortedReadOrDecodeError(false);
return;
}
- if (status == AudioDecoder::kDecodeError) {
+ if (status == AudioBufferStream::DECODE_ERROR) {
HandleAbortedReadOrDecodeError(true);
return;
}
- DCHECK_EQ(status, AudioDecoder::kOk);
+ DCHECK_EQ(status, AudioBufferStream::OK);
DCHECK(buffer.get());
if (state_ == kFlushing) {
- ChangeState_Locked(kPaused);
+ ChangeState_Locked(kFlushed);
DoFlush_Locked();
return;
}
- if (!splicer_->AddInput(buffer)) {
- HandleAbortedReadOrDecodeError(true);
- return;
+ if (expecting_config_changes_) {
+ DCHECK(buffer_converter_);
+ buffer_converter_->AddInput(buffer);
+ while (buffer_converter_->HasNextBuffer()) {
+ if (!splicer_->AddInput(buffer_converter_->GetNextBuffer())) {
+ HandleAbortedReadOrDecodeError(true);
+ return;
+ }
+ }
+ } else {
+ if (!splicer_->AddInput(buffer)) {
+ HandleAbortedReadOrDecodeError(true);
+ return;
+ }
}
if (!splicer_->HasNextBuffer()) {
@@ -426,18 +476,19 @@ bool AudioRendererImpl::HandleSplicerBuffer(
switch (state_) {
case kUninitialized:
+ case kInitializing:
case kFlushing:
NOTREACHED();
return false;
- case kPaused:
+ case kFlushed:
DCHECK(!pending_read_);
return false;
case kPrerolling:
if (!buffer->end_of_stream() && !algorithm_->IsQueueFull())
return true;
- ChangeState_Locked(kPaused);
+ ChangeState_Locked(kPlaying);
base::ResetAndReturn(&preroll_cb_).Run(PIPELINE_OK);
return false;
@@ -463,14 +514,15 @@ void AudioRendererImpl::AttemptRead() {
}
void AudioRendererImpl::AttemptRead_Locked() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
lock_.AssertAcquired();
if (!CanRead_Locked())
return;
pending_read_ = true;
- decoder_->Read(base::Bind(&AudioRendererImpl::DecodedAudioReady, weak_this_));
+ audio_buffer_stream_.Read(base::Bind(&AudioRendererImpl::DecodedAudioReady,
+ weak_factory_.GetWeakPtr()));
}
bool AudioRendererImpl::CanRead_Locked() {
@@ -478,7 +530,8 @@ bool AudioRendererImpl::CanRead_Locked() {
switch (state_) {
case kUninitialized:
- case kPaused:
+ case kInitializing:
+ case kFlushed:
case kFlushing:
case kStopped:
return false;
@@ -496,7 +549,7 @@ bool AudioRendererImpl::CanRead_Locked() {
void AudioRendererImpl::SetPlaybackRate(float playback_rate) {
DVLOG(1) << __FUNCTION__ << "(" << playback_rate << ")";
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_GE(playback_rate, 0);
DCHECK(sink_);
@@ -506,12 +559,20 @@ void AudioRendererImpl::SetPlaybackRate(float playback_rate) {
// Play: current_playback_rate == 0 && playback_rate != 0
// Pause: current_playback_rate != 0 && playback_rate == 0
float current_playback_rate = algorithm_->playback_rate();
- if (current_playback_rate == 0 && playback_rate != 0)
- DoPlay_Locked();
- else if (current_playback_rate != 0 && playback_rate == 0)
- DoPause_Locked();
-
algorithm_->SetPlaybackRate(playback_rate);
+
+ if (!rendering_)
+ return;
+
+ if (current_playback_rate == 0 && playback_rate != 0) {
+ StartRendering_Locked();
+ return;
+ }
+
+ if (current_playback_rate != 0 && playback_rate == 0) {
+ StopRendering_Locked();
+ return;
+ }
}
bool AudioRendererImpl::IsBeforePrerollTime(
@@ -524,27 +585,33 @@ bool AudioRendererImpl::IsBeforePrerollTime(
int AudioRendererImpl::Render(AudioBus* audio_bus,
int audio_delay_milliseconds) {
const int requested_frames = audio_bus->frames();
- base::TimeDelta current_time = kNoTimestamp();
- base::TimeDelta max_time = kNoTimestamp();
base::TimeDelta playback_delay = base::TimeDelta::FromMilliseconds(
audio_delay_milliseconds);
-
+ const int delay_frames = static_cast<int>(playback_delay.InSecondsF() *
+ audio_parameters_.sample_rate());
int frames_written = 0;
+ base::Closure time_cb;
base::Closure underflow_cb;
{
base::AutoLock auto_lock(lock_);
// Ensure Stop() hasn't destroyed our |algorithm_| on the pipeline thread.
- if (!algorithm_)
+ if (!algorithm_) {
+ audio_clock_->WroteSilence(requested_frames, delay_frames);
return 0;
+ }
float playback_rate = algorithm_->playback_rate();
- if (playback_rate == 0)
+ if (playback_rate == 0) {
+ audio_clock_->WroteSilence(requested_frames, delay_frames);
return 0;
+ }
// Mute audio by returning 0 when not playing.
- if (state_ != kPlaying)
+ if (state_ != kPlaying) {
+ audio_clock_->WroteSilence(requested_frames, delay_frames);
return 0;
+ }
// We use the following conditions to determine end of playback:
// 1) Algorithm can not fill the audio callback buffer
@@ -561,7 +628,15 @@ int AudioRendererImpl::Render(AudioBus* audio_bus,
// 3) We are in the kPlaying state
//
// Otherwise the buffer has data we can send to the device.
- frames_written = algorithm_->FillBuffer(audio_bus, requested_frames);
+ const base::TimeDelta media_timestamp_before_filling =
+ audio_clock_->CurrentMediaTimestamp();
+ if (algorithm_->frames_buffered() > 0) {
+ frames_written = algorithm_->FillBuffer(audio_bus, requested_frames);
+ audio_clock_->WroteAudio(
+ frames_written, delay_frames, playback_rate, algorithm_->GetTime());
+ }
+ audio_clock_->WroteSilence(requested_frames - frames_written, delay_frames);
+
if (frames_written == 0) {
const base::TimeTicks now = now_cb_.Run();
@@ -569,8 +644,7 @@ int AudioRendererImpl::Render(AudioBus* audio_bus,
now >= earliest_end_time_) {
rendered_end_of_stream_ = true;
ended_cb_.Run();
- } else if (!received_end_of_stream_ && state_ == kPlaying &&
- !underflow_disabled_) {
+ } else if (!received_end_of_stream_ && state_ == kPlaying) {
ChangeState_Locked(kUnderflow);
underflow_cb = underflow_cb_;
} else {
@@ -581,55 +655,29 @@ int AudioRendererImpl::Render(AudioBus* audio_bus,
}
if (CanRead_Locked()) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &AudioRendererImpl::AttemptRead, weak_this_));
+ task_runner_->PostTask(FROM_HERE,
+ base::Bind(&AudioRendererImpl::AttemptRead,
+ weak_factory_.GetWeakPtr()));
}
- // The |audio_time_buffered_| is the ending timestamp of the last frame
- // buffered at the audio device. |playback_delay| is the amount of time
- // buffered at the audio device. The current time can be computed by their
- // difference.
- if (audio_time_buffered_ != kNoTimestamp()) {
- // Adjust the delay according to playback rate.
- base::TimeDelta adjusted_playback_delay =
- base::TimeDelta::FromMicroseconds(ceil(
- playback_delay.InMicroseconds() * playback_rate));
-
- base::TimeDelta previous_time = current_time_;
- current_time_ = audio_time_buffered_ - adjusted_playback_delay;
-
- // Time can change in one of two ways:
- // 1) The time of the audio data at the audio device changed, or
- // 2) The playback delay value has changed
- //
- // We only want to set |current_time| (and thus execute |time_cb_|) if
- // time has progressed and we haven't signaled end of stream yet.
- //
- // Why? The current latency of the system results in getting the last call
- // to FillBuffer() later than we'd like, which delays firing the 'ended'
- // event, which delays the looping/trigging performance of short sound
- // effects.
- //
- // TODO(scherkus): revisit this and switch back to relying on playback
- // delay after we've revamped our audio IPC subsystem.
- if (current_time_ > previous_time && !rendered_end_of_stream_) {
- current_time = current_time_;
- }
+ // We only want to execute |time_cb_| if time has progressed and we haven't
+ // signaled end of stream yet.
+ if (media_timestamp_before_filling !=
+ audio_clock_->CurrentMediaTimestamp() &&
+ !rendered_end_of_stream_) {
+ time_cb = base::Bind(time_cb_,
+ audio_clock_->CurrentMediaTimestamp(),
+ audio_clock_->last_endpoint_timestamp());
}
- // The call to FillBuffer() on |algorithm_| has increased the amount of
- // buffered audio data. Update the new amount of time buffered.
- max_time = algorithm_->GetTime();
- audio_time_buffered_ = max_time;
-
if (frames_written > 0) {
UpdateEarliestEndTime_Locked(
frames_written, playback_delay, now_cb_.Run());
}
}
- if (current_time != kNoTimestamp() && max_time != kNoTimestamp())
- time_cb_.Run(current_time, max_time);
+ if (!time_cb.is_null())
+ task_runner_->PostTask(FROM_HERE, time_cb);
if (!underflow_cb.is_null())
underflow_cb.Run();
@@ -653,12 +701,12 @@ void AudioRendererImpl::UpdateEarliestEndTime_Locked(
}
void AudioRendererImpl::OnRenderError() {
+ // UMA data tells us this happens ~0.01% of the time. Trigger an error instead
+ // of trying to gracefully fall back to a fake sink. It's very likely
+ // OnRenderError() should be removed and the audio stack handle errors without
+ // notifying clients. See http://crbug.com/234708 for details.
HistogramRendererEvent(RENDER_ERROR);
- disabled_cb_.Run();
-}
-
-void AudioRendererImpl::DisableUnderflowForTesting() {
- underflow_disabled_ = true;
+ error_cb_.Run(PIPELINE_ERROR_DECODE);
}
void AudioRendererImpl::HandleAbortedReadOrDecodeError(bool is_decode_error) {
@@ -667,14 +715,11 @@ void AudioRendererImpl::HandleAbortedReadOrDecodeError(bool is_decode_error) {
PipelineStatus status = is_decode_error ? PIPELINE_ERROR_DECODE : PIPELINE_OK;
switch (state_) {
case kUninitialized:
+ case kInitializing:
NOTREACHED();
return;
- case kPaused:
- if (status != PIPELINE_OK)
- error_cb_.Run(status);
- return;
case kFlushing:
- ChangeState_Locked(kPaused);
+ ChangeState_Locked(kFlushed);
if (status == PIPELINE_OK) {
DoFlush_Locked();
@@ -687,9 +732,10 @@ void AudioRendererImpl::HandleAbortedReadOrDecodeError(bool is_decode_error) {
case kPrerolling:
// This is a signal for abort if it's not an error.
preroll_aborted_ = !is_decode_error;
- ChangeState_Locked(kPaused);
+ ChangeState_Locked(kPlaying);
base::ResetAndReturn(&preroll_cb_).Run(status);
return;
+ case kFlushed:
case kPlaying:
case kUnderflow:
case kRebuffering:
@@ -706,4 +752,20 @@ void AudioRendererImpl::ChangeState_Locked(State new_state) {
state_ = new_state;
}
+void AudioRendererImpl::OnNewSpliceBuffer(base::TimeDelta splice_timestamp) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ splicer_->SetSpliceTimestamp(splice_timestamp);
+}
+
+void AudioRendererImpl::OnConfigChange() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(expecting_config_changes_);
+ buffer_converter_->ResetTimestampState();
+ // Drain flushed buffers from the converter so the AudioSplicer receives all
+ // data ahead of any OnNewSpliceBuffer() calls. Since discontinuities should
+ // only appear after config changes, AddInput() should never fail here.
+ while (buffer_converter_->HasNextBuffer())
+ CHECK(splicer_->AddInput(buffer_converter_->GetNextBuffer()));
+}
+
} // namespace media
diff --git a/chromium/media/filters/audio_renderer_impl.h b/chromium/media/filters/audio_renderer_impl.h
index 335a6c7a454..7829366b3f8 100644
--- a/chromium/media/filters/audio_renderer_impl.h
+++ b/chromium/media/filters/audio_renderer_impl.h
@@ -29,15 +29,18 @@
#include "media/base/audio_renderer_sink.h"
#include "media/base/decryptor.h"
#include "media/filters/audio_renderer_algorithm.h"
+#include "media/filters/decoder_stream.h"
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
+class AudioBufferConverter;
class AudioBus;
-class AudioDecoderSelector;
+class AudioClock;
+class AudioHardwareConfig;
class AudioSplicer;
class DecryptingDemuxerStream;
@@ -45,7 +48,7 @@ class MEDIA_EXPORT AudioRendererImpl
: public AudioRenderer,
NON_EXPORTED_BASE(public AudioRendererSink::RenderCallback) {
public:
- // |message_loop| is the thread on which AudioRendererImpl will execute.
+ // |task_runner| is the thread on which AudioRendererImpl will execute.
//
// |sink| is used as the destination for the rendered audio.
//
@@ -53,10 +56,12 @@ class MEDIA_EXPORT AudioRendererImpl
//
// |set_decryptor_ready_cb| is fired when the audio decryptor is available
// (only applicable if the stream is encrypted and we have a decryptor).
- AudioRendererImpl(const scoped_refptr<base::MessageLoopProxy>& message_loop,
- AudioRendererSink* sink,
- ScopedVector<AudioDecoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb);
+ AudioRendererImpl(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ AudioRendererSink* sink,
+ ScopedVector<AudioDecoder> decoders,
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
+ AudioHardwareConfig* hardware_params);
virtual ~AudioRendererImpl();
// AudioRenderer implementation.
@@ -66,10 +71,9 @@ class MEDIA_EXPORT AudioRendererImpl
const base::Closure& underflow_cb,
const TimeCB& time_cb,
const base::Closure& ended_cb,
- const base::Closure& disabled_cb,
const PipelineStatusCB& error_cb) OVERRIDE;
- virtual void Play(const base::Closure& callback) OVERRIDE;
- virtual void Pause(const base::Closure& callback) OVERRIDE;
+ virtual void StartRendering() OVERRIDE;
+ virtual void StopRendering() OVERRIDE;
virtual void Flush(const base::Closure& callback) OVERRIDE;
virtual void Stop(const base::Closure& callback) OVERRIDE;
virtual void SetPlaybackRate(float rate) OVERRIDE;
@@ -78,12 +82,6 @@ class MEDIA_EXPORT AudioRendererImpl
virtual void ResumeAfterUnderflow() OVERRIDE;
virtual void SetVolume(float volume) OVERRIDE;
- // Disables underflow support. When used, |state_| will never transition to
- // kUnderflow resulting in Render calls that underflow returning 0 frames
- // instead of some number of silence frames. Must be called prior to
- // Initialize().
- void DisableUnderflowForTesting();
-
// Allows injection of a custom time callback for non-realtime testing.
typedef base::Callback<base::TimeTicks()> NowCB;
void set_now_cb_for_testing(const NowCB& now_cb) {
@@ -93,11 +91,33 @@ class MEDIA_EXPORT AudioRendererImpl
private:
friend class AudioRendererImplTest;
- // TODO(acolwell): Add a state machine graph.
+ // Important detail: being in kPlaying doesn't imply that audio is being
+ // rendered. Rather, it means that the renderer is ready to go. The actual
+ // rendering of audio is controlled via Start/StopRendering().
+ //
+ // kUninitialized
+ // | Initialize()
+ // |
+ // V
+ // kInitializing
+ // | Decoders initialized
+ // |
+ // V Decoders reset
+ // kFlushed <------------------ kFlushing
+ // | Preroll() ^
+ // | |
+ // V | Flush()
+ // kPrerolling ----------------> kPlaying ---------.
+ // Enough data buffered ^ | Not enough data
+ // | | buffered
+ // Enough data buffered | V
+ // kRebuffering <--- kUnderflow
+ // ResumeAfterUnderflow()
enum State {
kUninitialized,
- kPaused,
+ kInitializing,
kFlushing,
+ kFlushed,
kPrerolling,
kPlaying,
kStopped,
@@ -106,7 +126,7 @@ class MEDIA_EXPORT AudioRendererImpl
};
// Callback from the audio decoder delivering decoded audio samples.
- void DecodedAudioReady(AudioDecoder::Status status,
+ void DecodedAudioReady(AudioBufferStream::Status status,
const scoped_refptr<AudioBuffer>& buffer);
// Handles buffers that come out of |splicer_|.
@@ -122,8 +142,8 @@ class MEDIA_EXPORT AudioRendererImpl
const base::TimeDelta& playback_delay,
const base::TimeTicks& time_now);
- void DoPlay_Locked();
- void DoPause_Locked();
+ void StartRendering_Locked();
+ void StopRendering_Locked();
// AudioRendererSink::RenderCallback implementation.
//
@@ -151,7 +171,7 @@ class MEDIA_EXPORT AudioRendererImpl
// Helper methods that schedule an asynchronous read from the decoder as long
// as there isn't a pending read.
//
- // Must be called on |message_loop_|.
+ // Must be called on |task_runner_|.
void AttemptRead();
void AttemptRead_Locked();
bool CanRead_Locked();
@@ -162,14 +182,9 @@ class MEDIA_EXPORT AudioRendererImpl
// in the kPrerolling state.
bool IsBeforePrerollTime(const scoped_refptr<AudioBuffer>& buffer);
- // Called when |decoder_selector_| has selected |decoder| or is null if no
- // decoder could be selected.
- //
- // |decrypting_demuxer_stream| is non-null if a DecryptingDemuxerStream was
- // created to help decrypt the encrypted stream.
- void OnDecoderSelected(
- scoped_ptr<AudioDecoder> decoder,
- scoped_ptr<DecryptingDemuxerStream> decrypting_demuxer_stream);
+ // Called upon AudioBufferStream initialization, or failure thereof (indicated
+ // by the value of |success|).
+ void OnAudioBufferStreamInitialized(bool succes);
// Used to initiate the flush operation once all pending reads have
// completed.
@@ -182,33 +197,38 @@ class MEDIA_EXPORT AudioRendererImpl
// Called when the |decoder_|.Reset() has completed.
void ResetDecoderDone();
- scoped_refptr<base::MessageLoopProxy> message_loop_;
- base::WeakPtrFactory<AudioRendererImpl> weak_factory_;
- base::WeakPtr<AudioRendererImpl> weak_this_;
+ // Called by the AudioBufferStream when a splice buffer is demuxed.
+ void OnNewSpliceBuffer(base::TimeDelta);
+
+ // Called by the AudioBufferStream when a config change occurs.
+ void OnConfigChange();
+
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
scoped_ptr<AudioSplicer> splicer_;
+ scoped_ptr<AudioBufferConverter> buffer_converter_;
+
+ // Whether or not we expect to handle config changes.
+ bool expecting_config_changes_;
// The sink (destination) for rendered audio. |sink_| must only be accessed
- // on |message_loop_|. |sink_| must never be called under |lock_| or else we
- // may deadlock between |message_loop_| and the audio callback thread.
+ // on |task_runner_|. |sink_| must never be called under |lock_| or else we
+ // may deadlock between |task_runner_| and the audio callback thread.
scoped_refptr<media::AudioRendererSink> sink_;
- scoped_ptr<AudioDecoderSelector> decoder_selector_;
+ AudioBufferStream audio_buffer_stream_;
- // These two will be set by AudioDecoderSelector::SelectAudioDecoder().
- scoped_ptr<AudioDecoder> decoder_;
- scoped_ptr<DecryptingDemuxerStream> decrypting_demuxer_stream_;
+ // Interface to the hardware audio params.
+ const AudioHardwareConfig* const hardware_config_;
- // AudioParameters constructed during Initialize() based on |decoder_|.
+ // Cached copy of hardware params from |hardware_config_|.
AudioParameters audio_parameters_;
// Callbacks provided during Initialize().
PipelineStatusCB init_cb_;
- StatisticsCB statistics_cb_;
base::Closure underflow_cb_;
TimeCB time_cb_;
base::Closure ended_cb_;
- base::Closure disabled_cb_;
PipelineStatusCB error_cb_;
// Callback provided to Flush().
@@ -230,7 +250,9 @@ class MEDIA_EXPORT AudioRendererImpl
// Simple state tracking variable.
State state_;
- // Keep track of whether or not the sink is playing.
+ // Keep track of whether or not the sink is playing and whether we should be
+ // rendering.
+ bool rendering_;
bool sink_playing_;
// Keep track of our outstanding read to |decoder_|.
@@ -240,10 +262,7 @@ class MEDIA_EXPORT AudioRendererImpl
bool received_end_of_stream_;
bool rendered_end_of_stream_;
- // The timestamp of the last frame (i.e. furthest in the future) buffered as
- // well as the current time that takes current playback delay into account.
- base::TimeDelta audio_time_buffered_;
- base::TimeDelta current_time_;
+ scoped_ptr<AudioClock> audio_clock_;
base::TimeDelta preroll_timestamp_;
@@ -264,14 +283,15 @@ class MEDIA_EXPORT AudioRendererImpl
base::TimeTicks earliest_end_time_;
size_t total_frames_filled_;
- bool underflow_disabled_;
-
// True if the renderer receives a buffer with kAborted status during preroll,
// false otherwise. This flag is cleared on the next Preroll() call.
bool preroll_aborted_;
// End variables which must be accessed under |lock_|. ----------------------
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<AudioRendererImpl> weak_factory_;
+
DISALLOW_COPY_AND_ASSIGN(AudioRendererImpl);
};
diff --git a/chromium/media/filters/audio_renderer_impl_unittest.cc b/chromium/media/filters/audio_renderer_impl_unittest.cc
index 5adfbc499f7..ef00769db4f 100644
--- a/chromium/media/filters/audio_renderer_impl_unittest.cc
+++ b/chromium/media/filters/audio_renderer_impl_unittest.cc
@@ -7,9 +7,13 @@
#include "base/gtest_prod_util.h"
#include "base/memory/scoped_vector.h"
#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "media/base/audio_buffer.h"
+#include "media/base/audio_buffer_converter.h"
+#include "media/base/audio_hardware_config.h"
+#include "media/base/audio_splicer.h"
#include "media/base/audio_timestamp_helper.h"
#include "media/base/fake_audio_renderer_sink.h"
#include "media/base/gmock_callback_support.h"
@@ -25,6 +29,7 @@ using ::testing::_;
using ::testing::AnyNumber;
using ::testing::Invoke;
using ::testing::Return;
+using ::testing::SaveArg;
namespace media {
@@ -32,8 +37,11 @@ namespace media {
static AudioCodec kCodec = kCodecVorbis;
static SampleFormat kSampleFormat = kSampleFormatPlanarF32;
static ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
+static int kChannelCount = 2;
static int kChannels = ChannelLayoutToChannelCount(kChannelLayout);
static int kSamplesPerSecond = 44100;
+// Use a different output sample rate so the AudioBufferConverter is invoked.
+static int kOutputSamplesPerSecond = 48000;
// Constants for distinguishing between muted audio and playing audio when using
// ConsumeBufferedData(). Must match the type needed by kSampleFormat.
@@ -42,12 +50,19 @@ static float kPlayingAudio = 0.5f;
static const int kDataSize = 1024;
+ACTION_P(EnterPendingDecoderInitStateAction, test) {
+ test->EnterPendingDecoderInitState(arg1);
+}
+
class AudioRendererImplTest : public ::testing::Test {
public:
// Give the decoder some non-garbage media properties.
AudioRendererImplTest()
- : demuxer_stream_(DemuxerStream::AUDIO),
- decoder_(new MockAudioDecoder()) {
+ : hardware_config_(AudioParameters(), AudioParameters()),
+ needs_stop_(true),
+ demuxer_stream_(DemuxerStream::AUDIO),
+ decoder_(new MockAudioDecoder()),
+ last_time_update_(kNoTimestamp()) {
AudioDecoderConfig audio_config(kCodec,
kSampleFormat,
kChannelLayout,
@@ -58,28 +73,31 @@ class AudioRendererImplTest : public ::testing::Test {
demuxer_stream_.set_audio_decoder_config(audio_config);
// Used to save callbacks and run them at a later time.
- EXPECT_CALL(*decoder_, Read(_))
- .WillRepeatedly(Invoke(this, &AudioRendererImplTest::ReadDecoder));
-
+ EXPECT_CALL(*decoder_, Decode(_, _))
+ .WillRepeatedly(Invoke(this, &AudioRendererImplTest::DecodeDecoder));
EXPECT_CALL(*decoder_, Reset(_))
.WillRepeatedly(Invoke(this, &AudioRendererImplTest::ResetDecoder));
- // Set up audio properties.
- EXPECT_CALL(*decoder_, bits_per_channel())
- .WillRepeatedly(Return(audio_config.bits_per_channel()));
- EXPECT_CALL(*decoder_, channel_layout())
- .WillRepeatedly(Return(audio_config.channel_layout()));
- EXPECT_CALL(*decoder_, samples_per_second())
- .WillRepeatedly(Return(audio_config.samples_per_second()));
-
+ // Mock out demuxer reads.
+ EXPECT_CALL(demuxer_stream_, Read(_)).WillRepeatedly(
+ RunCallback<0>(DemuxerStream::kOk,
+ scoped_refptr<DecoderBuffer>(new DecoderBuffer(0))));
+ EXPECT_CALL(demuxer_stream_, SupportsConfigChanges())
+ .WillRepeatedly(Return(true));
+ AudioParameters out_params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ kChannelLayout,
+ kOutputSamplesPerSecond,
+ SampleFormatToBytesPerChannel(kSampleFormat) * 8,
+ 512);
+ hardware_config_.UpdateOutputConfig(out_params);
ScopedVector<AudioDecoder> decoders;
decoders.push_back(decoder_);
sink_ = new FakeAudioRendererSink();
- renderer_.reset(new AudioRendererImpl(
- message_loop_.message_loop_proxy(),
- sink_,
- decoders.Pass(),
- SetDecryptorReadyCB()));
+ renderer_.reset(new AudioRendererImpl(message_loop_.message_loop_proxy(),
+ sink_,
+ decoders.Pass(),
+ SetDecryptorReadyCB(),
+ &hardware_config_));
// Stub out time.
renderer_->set_now_cb_for_testing(base::Bind(
@@ -88,52 +106,32 @@ class AudioRendererImplTest : public ::testing::Test {
virtual ~AudioRendererImplTest() {
SCOPED_TRACE("~AudioRendererImplTest()");
- WaitableMessageLoopEvent event;
- renderer_->Stop(event.GetClosure());
- event.RunAndWait();
+ if (needs_stop_) {
+ WaitableMessageLoopEvent event;
+ renderer_->Stop(event.GetClosure());
+ event.RunAndWait();
+ }
}
void ExpectUnsupportedAudioDecoder() {
EXPECT_CALL(*decoder_, Initialize(_, _, _))
- .WillOnce(RunCallback<1>(DECODER_ERROR_NOT_SUPPORTED));
- }
-
- void ExpectUnsupportedAudioDecoderConfig() {
- EXPECT_CALL(*decoder_, bits_per_channel())
- .WillRepeatedly(Return(3));
- EXPECT_CALL(*decoder_, channel_layout())
- .WillRepeatedly(Return(CHANNEL_LAYOUT_UNSUPPORTED));
- EXPECT_CALL(*decoder_, samples_per_second())
- .WillRepeatedly(Return(0));
- EXPECT_CALL(*decoder_, Initialize(_, _, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
+ .WillOnce(DoAll(SaveArg<2>(&output_cb_),
+ RunCallback<1>(DECODER_ERROR_NOT_SUPPORTED)));
}
MOCK_METHOD1(OnStatistics, void(const PipelineStatistics&));
MOCK_METHOD0(OnUnderflow, void());
- MOCK_METHOD0(OnDisabled, void());
MOCK_METHOD1(OnError, void(PipelineStatus));
void OnAudioTimeCallback(TimeDelta current_time, TimeDelta max_time) {
CHECK(current_time <= max_time);
+ last_time_update_ = current_time;
}
- void Initialize() {
- EXPECT_CALL(*decoder_, Initialize(_, _, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
- InitializeWithStatus(PIPELINE_OK);
-
- next_timestamp_.reset(
- new AudioTimestampHelper(decoder_->samples_per_second()));
- }
-
- void InitializeWithStatus(PipelineStatus expected) {
- SCOPED_TRACE(base::StringPrintf("InitializeWithStatus(%d)", expected));
-
- WaitableMessageLoopEvent event;
+ void InitializeRenderer(const PipelineStatusCB& pipeline_status_cb) {
renderer_->Initialize(
&demuxer_stream_,
- event.GetPipelineStatusCB(),
+ pipeline_status_cb,
base::Bind(&AudioRendererImplTest::OnStatistics,
base::Unretained(this)),
base::Bind(&AudioRendererImplTest::OnUnderflow,
@@ -141,14 +139,70 @@ class AudioRendererImplTest : public ::testing::Test {
base::Bind(&AudioRendererImplTest::OnAudioTimeCallback,
base::Unretained(this)),
ended_event_.GetClosure(),
- base::Bind(&AudioRendererImplTest::OnDisabled,
- base::Unretained(this)),
base::Bind(&AudioRendererImplTest::OnError,
base::Unretained(this)));
+ }
+
+ void Initialize() {
+ EXPECT_CALL(*decoder_, Initialize(_, _, _))
+ .WillOnce(DoAll(SaveArg<2>(&output_cb_),
+ RunCallback<1>(PIPELINE_OK)));
+ EXPECT_CALL(*decoder_, Stop());
+ InitializeWithStatus(PIPELINE_OK);
+
+ next_timestamp_.reset(new AudioTimestampHelper(
+ hardware_config_.GetOutputConfig().sample_rate()));
+ }
+
+ void InitializeWithStatus(PipelineStatus expected) {
+ SCOPED_TRACE(base::StringPrintf("InitializeWithStatus(%d)", expected));
+
+ WaitableMessageLoopEvent event;
+ InitializeRenderer(event.GetPipelineStatusCB());
event.RunAndWaitForStatus(expected);
// We should have no reads.
- EXPECT_TRUE(read_cb_.is_null());
+ EXPECT_TRUE(decode_cb_.is_null());
+ }
+
+ void InitializeAndStop() {
+ EXPECT_CALL(*decoder_, Initialize(_, _, _))
+ .WillOnce(DoAll(SaveArg<2>(&output_cb_),
+ RunCallback<1>(PIPELINE_OK)));
+ EXPECT_CALL(*decoder_, Stop());
+
+ WaitableMessageLoopEvent event;
+ InitializeRenderer(event.GetPipelineStatusCB());
+
+ // Stop before we let the MessageLoop run, this simulates an interleaving
+ // in which we end up calling Stop() while the OnDecoderSelected callback
+ // is in flight.
+ renderer_->Stop(NewExpectedClosure());
+ event.RunAndWaitForStatus(PIPELINE_ERROR_ABORT);
+ EXPECT_EQ(renderer_->state_, AudioRendererImpl::kStopped);
+ }
+
+ void InitializeAndStopDuringDecoderInit() {
+ EXPECT_CALL(*decoder_, Initialize(_, _, _))
+ .WillOnce(DoAll(SaveArg<2>(&output_cb_),
+ EnterPendingDecoderInitStateAction(this)));
+ EXPECT_CALL(*decoder_, Stop());
+
+ WaitableMessageLoopEvent event;
+ InitializeRenderer(event.GetPipelineStatusCB());
+
+ base::RunLoop().RunUntilIdle();
+ DCHECK(!init_decoder_cb_.is_null());
+
+ renderer_->Stop(NewExpectedClosure());
+ base::ResetAndReturn(&init_decoder_cb_).Run(PIPELINE_OK);
+
+ event.RunAndWaitForStatus(PIPELINE_ERROR_ABORT);
+ EXPECT_EQ(renderer_->state_, AudioRendererImpl::kStopped);
+ }
+
+ void EnterPendingDecoderInitState(PipelineStatusCB cb) {
+ init_decoder_cb_ = cb;
}
void Flush() {
@@ -175,30 +229,20 @@ class AudioRendererImplTest : public ::testing::Test {
WaitForPendingRead();
DeliverRemainingAudio();
event.RunAndWaitForStatus(PIPELINE_OK);
-
- // We should have no reads.
- EXPECT_TRUE(read_cb_.is_null());
}
- void Play() {
- SCOPED_TRACE("Play()");
- WaitableMessageLoopEvent event;
- renderer_->Play(event.GetClosure());
+ void StartRendering() {
+ renderer_->StartRendering();
renderer_->SetPlaybackRate(1.0f);
- event.RunAndWait();
}
- void Pause() {
- WaitableMessageLoopEvent pause_event;
- renderer_->Pause(pause_event.GetClosure());
- pause_event.RunAndWait();
+ void StopRendering() {
+ renderer_->StopRendering();
}
void Seek() {
- Pause();
-
+ StopRendering();
Flush();
-
Preroll();
}
@@ -208,48 +252,63 @@ class AudioRendererImplTest : public ::testing::Test {
}
bool IsReadPending() const {
- return !read_cb_.is_null();
+ return !decode_cb_.is_null();
}
void WaitForPendingRead() {
SCOPED_TRACE("WaitForPendingRead()");
- if (!read_cb_.is_null())
+ if (!decode_cb_.is_null())
return;
- DCHECK(wait_for_pending_read_cb_.is_null());
+ DCHECK(wait_for_pending_decode_cb_.is_null());
WaitableMessageLoopEvent event;
- wait_for_pending_read_cb_ = event.GetClosure();
+ wait_for_pending_decode_cb_ = event.GetClosure();
event.RunAndWait();
- DCHECK(!read_cb_.is_null());
- DCHECK(wait_for_pending_read_cb_.is_null());
+ DCHECK(!decode_cb_.is_null());
+ DCHECK(wait_for_pending_decode_cb_.is_null());
}
// Delivers |size| frames with value kPlayingAudio to |renderer_|.
void SatisfyPendingRead(int size) {
CHECK_GT(size, 0);
- CHECK(!read_cb_.is_null());
+ CHECK(!decode_cb_.is_null());
scoped_refptr<AudioBuffer> buffer =
- MakePlanarAudioBuffer<float>(kSampleFormat,
- kChannels,
- kPlayingAudio,
- 0.0f,
- size,
- next_timestamp_->GetTimestamp(),
- next_timestamp_->GetFrameDuration(size));
+ MakeAudioBuffer<float>(kSampleFormat,
+ kChannelLayout,
+ kChannelCount,
+ kSamplesPerSecond,
+ kPlayingAudio,
+ 0.0f,
+ size,
+ next_timestamp_->GetTimestamp());
next_timestamp_->AddFrames(size);
DeliverBuffer(AudioDecoder::kOk, buffer);
}
- void AbortPendingRead() {
- DeliverBuffer(AudioDecoder::kAborted, NULL);
- }
-
void DeliverEndOfStream() {
- DeliverBuffer(AudioDecoder::kOk, AudioBuffer::CreateEOSBuffer());
+ DCHECK(!decode_cb_.is_null());
+
+ // Return EOS buffer to trigger EOS frame.
+ EXPECT_CALL(demuxer_stream_, Read(_))
+ .WillOnce(RunCallback<0>(DemuxerStream::kOk,
+ DecoderBuffer::CreateEOSBuffer()));
+
+ // Satify pending |decode_cb_| to trigger a new DemuxerStream::Read().
+ message_loop_.PostTask(
+ FROM_HERE,
+ base::Bind(base::ResetAndReturn(&decode_cb_), AudioDecoder::kOk));
+
+ WaitForPendingRead();
+
+ message_loop_.PostTask(
+ FROM_HERE,
+ base::Bind(base::ResetAndReturn(&decode_cb_), AudioDecoder::kOk));
+
+ message_loop_.RunUntilIdle();
}
// Delivers frames until |renderer_|'s internal buffer is full and no longer
@@ -283,8 +342,6 @@ class AudioRendererImplTest : public ::testing::Test {
// number of frames read. Since time is frozen, the audio delay will increase
// as frames come in.
int ConsumeAllBufferedData() {
- renderer_->DisableUnderflowForTesting();
-
int frames_read = 0;
int total_frames_read = 0;
@@ -293,7 +350,7 @@ class AudioRendererImplTest : public ::testing::Test {
do {
TimeDelta audio_delay = TimeDelta::FromMicroseconds(
total_frames_read * Time::kMicrosecondsPerSecond /
- static_cast<float>(decoder_->samples_per_second()));
+ static_cast<float>(hardware_config_.GetOutputConfig().sample_rate()));
frames_read = renderer_->Render(
bus.get(), audio_delay.InMilliseconds());
@@ -333,7 +390,7 @@ class AudioRendererImplTest : public ::testing::Test {
void EndOfStreamTest(float playback_rate) {
Initialize();
Preroll();
- Play();
+ StartRendering();
renderer_->SetPlaybackRate(playback_rate);
// Drain internal buffer, we should have a pending read.
@@ -371,10 +428,31 @@ class AudioRendererImplTest : public ::testing::Test {
time_ += time;
}
+ void force_config_change() {
+ renderer_->OnConfigChange();
+ }
+
+ int converter_input_frames_left() const {
+ return renderer_->buffer_converter_->input_frames_left_for_testing();
+ }
+
+ bool splicer_has_next_buffer() const {
+ return renderer_->splicer_->HasNextBuffer();
+ }
+
+ base::TimeDelta last_time_update() const {
+ return last_time_update_;
+ }
+
// Fixture members.
base::MessageLoop message_loop_;
scoped_ptr<AudioRendererImpl> renderer_;
scoped_refptr<FakeAudioRendererSink> sink_;
+ AudioHardwareConfig hardware_config_;
+
+ // Whether or not the test needs the destructor to call Stop() on
+ // |renderer_| at destruction.
+ bool needs_stop_;
private:
TimeTicks GetTime() {
@@ -382,34 +460,49 @@ class AudioRendererImplTest : public ::testing::Test {
return time_;
}
- void ReadDecoder(const AudioDecoder::ReadCB& read_cb) {
+ void DecodeDecoder(const scoped_refptr<DecoderBuffer>& buffer,
+ const AudioDecoder::DecodeCB& decode_cb) {
+ // We shouldn't ever call Read() after Stop():
+ EXPECT_TRUE(stop_decoder_cb_.is_null());
+
// TODO(scherkus): Make this a DCHECK after threading semantics are fixed.
if (base::MessageLoop::current() != &message_loop_) {
message_loop_.PostTask(FROM_HERE, base::Bind(
- &AudioRendererImplTest::ReadDecoder,
- base::Unretained(this), read_cb));
+ &AudioRendererImplTest::DecodeDecoder,
+ base::Unretained(this), buffer, decode_cb));
return;
}
- CHECK(read_cb_.is_null()) << "Overlapping reads are not permitted";
- read_cb_ = read_cb;
+ CHECK(decode_cb_.is_null()) << "Overlapping decodes are not permitted";
+ decode_cb_ = decode_cb;
// Wake up WaitForPendingRead() if needed.
- if (!wait_for_pending_read_cb_.is_null())
- base::ResetAndReturn(&wait_for_pending_read_cb_).Run();
+ if (!wait_for_pending_decode_cb_.is_null())
+ base::ResetAndReturn(&wait_for_pending_decode_cb_).Run();
}
void ResetDecoder(const base::Closure& reset_cb) {
- CHECK(read_cb_.is_null())
- << "Reset overlapping with reads is not permitted";
+ if (!decode_cb_.is_null()) {
+ // |reset_cb| will be called in DeliverBuffer(), after the decoder is
+ // flushed.
+ reset_cb_ = reset_cb;
+ return;
+ }
message_loop_.PostTask(FROM_HERE, reset_cb);
}
void DeliverBuffer(AudioDecoder::Status status,
const scoped_refptr<AudioBuffer>& buffer) {
- CHECK(!read_cb_.is_null());
- base::ResetAndReturn(&read_cb_).Run(status, buffer);
+ CHECK(!decode_cb_.is_null());
+ if (buffer && !buffer->end_of_stream())
+ output_cb_.Run(buffer);
+ base::ResetAndReturn(&decode_cb_).Run(status);
+
+ if (!reset_cb_.is_null())
+ base::ResetAndReturn(&reset_cb_).Run();
+
+ message_loop_.RunUntilIdle();
}
MockDemuxerStream demuxer_stream_;
@@ -420,22 +513,23 @@ class AudioRendererImplTest : public ::testing::Test {
TimeTicks time_;
// Used for satisfying reads.
- AudioDecoder::ReadCB read_cb_;
+ AudioDecoder::OutputCB output_cb_;
+ AudioDecoder::DecodeCB decode_cb_;
+ base::Closure reset_cb_;
scoped_ptr<AudioTimestampHelper> next_timestamp_;
WaitableMessageLoopEvent ended_event_;
- // Run during ReadDecoder() to unblock WaitForPendingRead().
- base::Closure wait_for_pending_read_cb_;
+ // Run during DecodeDecoder() to unblock WaitForPendingRead().
+ base::Closure wait_for_pending_decode_cb_;
+ base::Closure stop_decoder_cb_;
+
+ PipelineStatusCB init_decoder_cb_;
+ base::TimeDelta last_time_update_;
DISALLOW_COPY_AND_ASSIGN(AudioRendererImplTest);
};
-TEST_F(AudioRendererImplTest, Initialize_Failed) {
- ExpectUnsupportedAudioDecoderConfig();
- InitializeWithStatus(PIPELINE_ERROR_INITIALIZATION_FAILED);
-}
-
TEST_F(AudioRendererImplTest, Initialize_Successful) {
Initialize();
}
@@ -450,10 +544,10 @@ TEST_F(AudioRendererImplTest, Preroll) {
Preroll();
}
-TEST_F(AudioRendererImplTest, Play) {
+TEST_F(AudioRendererImplTest, StartRendering) {
Initialize();
Preroll();
- Play();
+ StartRendering();
// Drain internal buffer, we should have a pending read.
EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
@@ -478,7 +572,7 @@ TEST_F(AudioRendererImplTest, Underflow) {
int initial_capacity = buffer_capacity();
- Play();
+ StartRendering();
// Drain internal buffer, we should have a pending read.
EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
@@ -506,13 +600,13 @@ TEST_F(AudioRendererImplTest, Underflow) {
EXPECT_FALSE(muted);
}
-TEST_F(AudioRendererImplTest, Underflow_FollowedByFlush) {
+TEST_F(AudioRendererImplTest, Underflow_CapacityResetsAfterFlush) {
Initialize();
Preroll();
int initial_capacity = buffer_capacity();
- Play();
+ StartRendering();
// Drain internal buffer, we should have a pending read.
EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
@@ -523,24 +617,41 @@ TEST_F(AudioRendererImplTest, Underflow_FollowedByFlush) {
EXPECT_CALL(*this, OnUnderflow());
EXPECT_FALSE(ConsumeBufferedData(kDataSize, NULL));
+ // Verify that the buffer capacity increased as a result of resuming after
+ // underflow.
+ EXPECT_EQ(buffer_capacity(), initial_capacity);
renderer_->ResumeAfterUnderflow();
-
- // Verify that the buffer capacity increased as a result of the underflow.
EXPECT_GT(buffer_capacity(), initial_capacity);
- // Deliver data to get the renderer out of the underflow/rebuffer state.
- DeliverRemainingAudio();
-
- Seek();
-
// Verify that the buffer capacity is restored to the |initial_capacity|.
+ DeliverEndOfStream();
+ Flush();
EXPECT_EQ(buffer_capacity(), initial_capacity);
}
+TEST_F(AudioRendererImplTest, Underflow_FlushWhileUnderflowed) {
+ Initialize();
+ Preroll();
+ StartRendering();
+
+ // Drain internal buffer, we should have a pending read.
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
+ WaitForPendingRead();
+
+ // Verify the next FillBuffer() call triggers the underflow callback
+ // since the decoder hasn't delivered any data after it was drained.
+ EXPECT_CALL(*this, OnUnderflow());
+ EXPECT_FALSE(ConsumeBufferedData(kDataSize, NULL));
+
+ // Verify that we can still Flush() before entering the rebuffering state.
+ DeliverEndOfStream();
+ Flush();
+}
+
TEST_F(AudioRendererImplTest, Underflow_EndOfStream) {
Initialize();
Preroll();
- Play();
+ StartRendering();
// Figure out how long until the ended event should fire. Since
// ConsumeBufferedData() doesn't provide audio delay information, the time
@@ -561,16 +672,19 @@ TEST_F(AudioRendererImplTest, Underflow_EndOfStream) {
SatisfyPendingRead(kDataSize);
WaitForPendingRead();
- // Verify we're getting muted audio during underflow.
+ // Verify we're getting muted audio during underflow. Note: Since resampling
+ // is active, the number of frames_buffered() won't always match kDataSize.
bool muted = false;
- EXPECT_EQ(kDataSize, frames_buffered());
- EXPECT_FALSE(ConsumeBufferedData(kDataSize, &muted));
+ const int kInitialFramesBuffered = 1114;
+ EXPECT_EQ(kInitialFramesBuffered, frames_buffered());
+ EXPECT_FALSE(ConsumeBufferedData(kInitialFramesBuffered, &muted));
EXPECT_TRUE(muted);
// Now deliver end of stream, we should get our little bit of data back.
DeliverEndOfStream();
- EXPECT_EQ(kDataSize, frames_buffered());
- EXPECT_TRUE(ConsumeBufferedData(kDataSize, &muted));
+ const int kNextFramesBuffered = 1408;
+ EXPECT_EQ(kNextFramesBuffered, frames_buffered());
+ EXPECT_TRUE(ConsumeBufferedData(kNextFramesBuffered, &muted));
EXPECT_FALSE(muted);
// Attempt to read to make sure we're truly at the end of stream.
@@ -583,7 +697,7 @@ TEST_F(AudioRendererImplTest, Underflow_EndOfStream) {
TEST_F(AudioRendererImplTest, Underflow_ResumeFromCallback) {
Initialize();
Preroll();
- Play();
+ StartRendering();
// Drain internal buffer, we should have a pending read.
EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
@@ -610,7 +724,7 @@ TEST_F(AudioRendererImplTest, Underflow_ResumeFromCallback) {
TEST_F(AudioRendererImplTest, Underflow_SetPlaybackRate) {
Initialize();
Preroll();
- Play();
+ StartRendering();
// Drain internal buffer, we should have a pending read.
EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
@@ -646,7 +760,7 @@ TEST_F(AudioRendererImplTest, Underflow_SetPlaybackRate) {
TEST_F(AudioRendererImplTest, Underflow_PausePlay) {
Initialize();
Preroll();
- Play();
+ StartRendering();
// Drain internal buffer, we should have a pending read.
EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
@@ -674,60 +788,17 @@ TEST_F(AudioRendererImplTest, Underflow_PausePlay) {
EXPECT_EQ(FakeAudioRendererSink::kPlaying, sink_->state());
}
-TEST_F(AudioRendererImplTest, AbortPendingRead_Preroll) {
- Initialize();
-
- // Start prerolling and wait for a read.
- WaitableMessageLoopEvent event;
- renderer_->Preroll(TimeDelta(), event.GetPipelineStatusCB());
- WaitForPendingRead();
-
- // Simulate the decoder aborting the pending read.
- AbortPendingRead();
- event.RunAndWaitForStatus(PIPELINE_OK);
-
- Flush();
-
- // Preroll again to a different timestamp and verify it completed normally.
- Preroll(1000, PIPELINE_OK);
-}
-
-TEST_F(AudioRendererImplTest, AbortPendingRead_Pause) {
- Initialize();
-
- Preroll();
- Play();
-
- // Partially drain internal buffer so we get a pending read.
- EXPECT_TRUE(ConsumeBufferedData(frames_buffered() / 2, NULL));
- WaitForPendingRead();
-
- // Start pausing.
- WaitableMessageLoopEvent event;
- renderer_->Pause(event.GetClosure());
-
- // Simulate the decoder aborting the pending read.
- AbortPendingRead();
- event.RunAndWait();
-
- Flush();
-
- // Preroll again to a different timestamp and verify it completed normally.
- Preroll(1000, PIPELINE_OK);
-}
-
-
-TEST_F(AudioRendererImplTest, AbortPendingRead_Flush) {
+TEST_F(AudioRendererImplTest, PendingRead_Flush) {
Initialize();
Preroll();
- Play();
+ StartRendering();
// Partially drain internal buffer so we get a pending read.
EXPECT_TRUE(ConsumeBufferedData(frames_buffered() / 2, NULL));
WaitForPendingRead();
- Pause();
+ StopRendering();
EXPECT_TRUE(IsReadPending());
@@ -735,8 +806,8 @@ TEST_F(AudioRendererImplTest, AbortPendingRead_Flush) {
WaitableMessageLoopEvent flush_event;
renderer_->Flush(flush_event.GetClosure());
- // Simulate the decoder aborting the pending read.
- AbortPendingRead();
+ SatisfyPendingRead(kDataSize);
+
flush_event.RunAndWait();
EXPECT_FALSE(IsReadPending());
@@ -745,42 +816,42 @@ TEST_F(AudioRendererImplTest, AbortPendingRead_Flush) {
Preroll(1000, PIPELINE_OK);
}
-TEST_F(AudioRendererImplTest, PendingRead_Pause) {
+TEST_F(AudioRendererImplTest, PendingRead_Stop) {
Initialize();
Preroll();
- Play();
+ StartRendering();
// Partially drain internal buffer so we get a pending read.
EXPECT_TRUE(ConsumeBufferedData(frames_buffered() / 2, NULL));
WaitForPendingRead();
- // Start pausing.
- WaitableMessageLoopEvent event;
- renderer_->Pause(event.GetClosure());
+ StopRendering();
- SatisfyPendingRead(kDataSize);
+ EXPECT_TRUE(IsReadPending());
- event.RunAndWait();
+ WaitableMessageLoopEvent stop_event;
+ renderer_->Stop(stop_event.GetClosure());
+ needs_stop_ = false;
- Flush();
+ SatisfyPendingRead(kDataSize);
- // Preroll again to a different timestamp and verify it completed normally.
- Preroll(1000, PIPELINE_OK);
-}
+ stop_event.RunAndWait();
+ EXPECT_FALSE(IsReadPending());
+}
-TEST_F(AudioRendererImplTest, PendingRead_Flush) {
+TEST_F(AudioRendererImplTest, PendingFlush_Stop) {
Initialize();
Preroll();
- Play();
+ StartRendering();
// Partially drain internal buffer so we get a pending read.
EXPECT_TRUE(ConsumeBufferedData(frames_buffered() / 2, NULL));
WaitForPendingRead();
- Pause();
+ StopRendering();
EXPECT_TRUE(IsReadPending());
@@ -790,38 +861,132 @@ TEST_F(AudioRendererImplTest, PendingRead_Flush) {
SatisfyPendingRead(kDataSize);
- flush_event.RunAndWait();
+ WaitableMessageLoopEvent event;
+ renderer_->Stop(event.GetClosure());
+ event.RunAndWait();
+ needs_stop_ = false;
+}
- EXPECT_FALSE(IsReadPending());
+TEST_F(AudioRendererImplTest, InitializeThenStop) {
+ InitializeAndStop();
+}
- // Preroll again to a different timestamp and verify it completed normally.
- Preroll(1000, PIPELINE_OK);
+TEST_F(AudioRendererImplTest, InitializeThenStopDuringDecoderInit) {
+ InitializeAndStopDuringDecoderInit();
}
-TEST_F(AudioRendererImplTest, StopDuringFlush) {
+TEST_F(AudioRendererImplTest, ConfigChangeDrainsConverter) {
Initialize();
+ Preroll();
+ StartRendering();
+
+ // Drain internal buffer, we should have a pending read.
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
+ WaitForPendingRead();
+
+ // Deliver a little bit of data. Use an odd data size to ensure there is data
+ // left in the AudioBufferConverter. Ensure no buffers are in the splicer.
+ SatisfyPendingRead(2053);
+ EXPECT_FALSE(splicer_has_next_buffer());
+ EXPECT_GT(converter_input_frames_left(), 0);
+
+ // Force a config change and then ensure all buffered data has been put into
+ // the splicer.
+ force_config_change();
+ EXPECT_TRUE(splicer_has_next_buffer());
+ EXPECT_EQ(0, converter_input_frames_left());
+}
+TEST_F(AudioRendererImplTest, TimeUpdatesOnFirstBuffer) {
+ Initialize();
Preroll();
- Play();
+ StartRendering();
- // Partially drain internal buffer so we get a pending read.
- EXPECT_TRUE(ConsumeBufferedData(frames_buffered() / 2, NULL));
+ AudioTimestampHelper timestamp_helper(kOutputSamplesPerSecond);
+ EXPECT_EQ(kNoTimestamp(), last_time_update());
+
+ // Preroll() should be buffered some data, consume half of it now.
+ int frames_to_consume = frames_buffered() / 2;
+ EXPECT_TRUE(ConsumeBufferedData(frames_to_consume, NULL));
WaitForPendingRead();
+ base::RunLoop().RunUntilIdle();
+
+ // ConsumeBufferedData() uses an audio delay of zero, so ensure we received
+ // a time update that's equal to |kFramesToConsume| from above.
+ timestamp_helper.SetBaseTimestamp(base::TimeDelta());
+ timestamp_helper.AddFrames(frames_to_consume);
+ EXPECT_EQ(timestamp_helper.GetTimestamp(), last_time_update());
+
+ // The next time update should match the remaining frames_buffered(), but only
+ // after running the message loop.
+ frames_to_consume = frames_buffered();
+ EXPECT_TRUE(ConsumeBufferedData(frames_to_consume, NULL));
+ EXPECT_EQ(timestamp_helper.GetTimestamp(), last_time_update());
+
+ base::RunLoop().RunUntilIdle();
+ timestamp_helper.AddFrames(frames_to_consume);
+ EXPECT_EQ(timestamp_helper.GetTimestamp(), last_time_update());
+}
- Pause();
+TEST_F(AudioRendererImplTest, ImmediateEndOfStream) {
+ Initialize();
+ {
+ SCOPED_TRACE("Preroll()");
+ WaitableMessageLoopEvent event;
+ renderer_->Preroll(base::TimeDelta(), event.GetPipelineStatusCB());
+ WaitForPendingRead();
+ DeliverEndOfStream();
+ event.RunAndWaitForStatus(PIPELINE_OK);
+ }
+ StartRendering();
- EXPECT_TRUE(IsReadPending());
+ // Read a single frame. We shouldn't be able to satisfy it.
+ EXPECT_FALSE(ConsumeBufferedData(1, NULL));
+ WaitForEnded();
+}
- // Start flushing.
- WaitableMessageLoopEvent flush_event;
- renderer_->Flush(flush_event.GetClosure());
+TEST_F(AudioRendererImplTest, OnRenderErrorCausesDecodeError) {
+ Initialize();
+ Preroll();
+ StartRendering();
- SatisfyPendingRead(kDataSize);
+ EXPECT_CALL(*this, OnError(PIPELINE_ERROR_DECODE));
+ sink_->OnRenderError();
+}
- // Request a Stop() before the flush completes.
- WaitableMessageLoopEvent stop_event;
- renderer_->Stop(stop_event.GetClosure());
- stop_event.RunAndWait();
+// Test for AudioRendererImpl calling Pause()/Play() on the sink when the
+// playback rate is set to zero and non-zero.
+TEST_F(AudioRendererImplTest, SetPlaybackRate) {
+ Initialize();
+ Preroll();
+
+ // Rendering hasn't started. Sink should always be paused.
+ EXPECT_EQ(FakeAudioRendererSink::kPaused, sink_->state());
+ renderer_->SetPlaybackRate(0.0f);
+ EXPECT_EQ(FakeAudioRendererSink::kPaused, sink_->state());
+ renderer_->SetPlaybackRate(1.0f);
+ EXPECT_EQ(FakeAudioRendererSink::kPaused, sink_->state());
+
+ // Rendering has started with non-zero rate. Rate changes will affect sink
+ // state.
+ renderer_->StartRendering();
+ EXPECT_EQ(FakeAudioRendererSink::kPlaying, sink_->state());
+ renderer_->SetPlaybackRate(0.0f);
+ EXPECT_EQ(FakeAudioRendererSink::kPaused, sink_->state());
+ renderer_->SetPlaybackRate(1.0f);
+ EXPECT_EQ(FakeAudioRendererSink::kPlaying, sink_->state());
+
+ // Rendering has stopped. Sink should be paused.
+ renderer_->StopRendering();
+ EXPECT_EQ(FakeAudioRendererSink::kPaused, sink_->state());
+
+ // Start rendering with zero playback rate. Sink should be paused until
+ // non-zero rate is set.
+ renderer_->SetPlaybackRate(0.0f);
+ renderer_->StartRendering();
+ EXPECT_EQ(FakeAudioRendererSink::kPaused, sink_->state());
+ renderer_->SetPlaybackRate(1.0f);
+ EXPECT_EQ(FakeAudioRendererSink::kPlaying, sink_->state());
}
} // namespace media
diff --git a/chromium/media/filters/chunk_demuxer.cc b/chromium/media/filters/chunk_demuxer.cc
index 57ee3f95a9e..8c6f8b9019c 100644
--- a/chromium/media/filters/chunk_demuxer.cc
+++ b/chromium/media/filters/chunk_demuxer.cc
@@ -5,8 +5,8 @@
#include "media/filters/chunk_demuxer.h"
#include <algorithm>
-#include <deque>
#include <limits>
+#include <list>
#include "base/bind.h"
#include "base/callback_helpers.h"
@@ -14,15 +14,75 @@
#include "base/message_loop/message_loop_proxy.h"
#include "base/stl_util.h"
#include "media/base/audio_decoder_config.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/stream_parser_buffer.h"
#include "media/base/video_decoder_config.h"
+#include "media/filters/frame_processor.h"
#include "media/filters/stream_parser_factory.h"
using base::TimeDelta;
namespace media {
+static TimeDelta EndTimestamp(const StreamParser::BufferQueue& queue) {
+ return queue.back()->timestamp() + queue.back()->duration();
+}
+
+// List of time ranges for each SourceBuffer.
+typedef std::list<Ranges<TimeDelta> > RangesList;
+static Ranges<TimeDelta> ComputeIntersection(const RangesList& activeRanges,
+ bool ended) {
+ // Implementation of HTMLMediaElement.buffered algorithm in MSE spec.
+ // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#dom-htmlmediaelement.buffered
+
+ // Step 1: If activeSourceBuffers.length equals 0 then return an empty
+ // TimeRanges object and abort these steps.
+ if (activeRanges.empty())
+ return Ranges<TimeDelta>();
+
+ // Step 2: Let active ranges be the ranges returned by buffered for each
+ // SourceBuffer object in activeSourceBuffers.
+ // Step 3: Let highest end time be the largest range end time in the active
+ // ranges.
+ TimeDelta highest_end_time;
+ for (RangesList::const_iterator itr = activeRanges.begin();
+ itr != activeRanges.end(); ++itr) {
+ if (!itr->size())
+ continue;
+
+ highest_end_time = std::max(highest_end_time, itr->end(itr->size() - 1));
+ }
+
+ // Step 4: Let intersection ranges equal a TimeRange object containing a
+ // single range from 0 to highest end time.
+ Ranges<TimeDelta> intersection_ranges;
+ intersection_ranges.Add(TimeDelta(), highest_end_time);
+
+ // Step 5: For each SourceBuffer object in activeSourceBuffers run the
+ // following steps:
+ for (RangesList::const_iterator itr = activeRanges.begin();
+ itr != activeRanges.end(); ++itr) {
+ // Step 5.1: Let source ranges equal the ranges returned by the buffered
+ // attribute on the current SourceBuffer.
+ Ranges<TimeDelta> source_ranges = *itr;
+
+ // Step 5.2: If readyState is "ended", then set the end time on the last
+ // range in source ranges to highest end time.
+ if (ended && source_ranges.size() > 0u) {
+ source_ranges.Add(source_ranges.start(source_ranges.size() - 1),
+ highest_end_time);
+ }
+
+ // Step 5.3: Let new intersection ranges equal the intersection between
+ // the intersection ranges and the source ranges.
+ // Step 5.4: Replace the ranges in intersection ranges with the new
+ // intersection ranges.
+ intersection_ranges = intersection_ranges.IntersectionWith(source_ranges);
+ }
+
+ return intersection_ranges;
+}
+
// Contains state belonging to a source id.
class SourceState {
public:
@@ -30,17 +90,13 @@ class SourceState {
typedef base::Callback<ChunkDemuxerStream*(
DemuxerStream::Type)> CreateDemuxerStreamCB;
- // Callback signature used to notify ChunkDemuxer of timestamps
- // that may cause the duration to be updated.
- typedef base::Callback<void(
- TimeDelta, ChunkDemuxerStream*)> IncreaseDurationCB;
-
typedef base::Callback<void(
ChunkDemuxerStream*, const TextTrackConfig&)> NewTextTrackCB;
- SourceState(scoped_ptr<StreamParser> stream_parser, const LogCB& log_cb,
- const CreateDemuxerStreamCB& create_demuxer_stream_cb,
- const IncreaseDurationCB& increase_duration_cb);
+ SourceState(
+ scoped_ptr<StreamParser> stream_parser,
+ scoped_ptr<FrameProcessor> frame_processor, const LogCB& log_cb,
+ const CreateDemuxerStreamCB& create_demuxer_stream_cb);
~SourceState();
@@ -52,33 +108,63 @@ class SourceState {
// Appends new data to the StreamParser.
// Returns true if the data was successfully appended. Returns false if an
- // error occurred.
- bool Append(const uint8* data, size_t length);
+ // error occurred. |*timestamp_offset| is used and possibly updated by the
+ // append. |append_window_start| and |append_window_end| correspond to the MSE
+ // spec's similarly named source buffer attributes that are used in coded
+ // frame processing.
+ bool Append(const uint8* data, size_t length,
+ TimeDelta append_window_start,
+ TimeDelta append_window_end,
+ TimeDelta* timestamp_offset);
// Aborts the current append sequence and resets the parser.
- void Abort();
+ void Abort(TimeDelta append_window_start,
+ TimeDelta append_window_end,
+ TimeDelta* timestamp_offset);
- // Sets |timestamp_offset_| if possible.
- // Returns if the offset was set. Returns false if the offset could not be
- // updated at this time.
- bool SetTimestampOffset(TimeDelta timestamp_offset);
+ // Calls Remove(|start|, |end|, |duration|) on all
+ // ChunkDemuxerStreams managed by this object.
+ void Remove(TimeDelta start, TimeDelta end, TimeDelta duration);
- TimeDelta timestamp_offset() const { return timestamp_offset_; }
+ // Returns true if currently parsing a media segment, or false otherwise.
+ bool parsing_media_segment() const { return parsing_media_segment_; }
- void set_append_window_start(TimeDelta start) {
- append_window_start_ = start;
- }
- void set_append_window_end(TimeDelta end) { append_window_end_ = end; }
+ // Sets |frame_processor_|'s sequence mode to |sequence_mode|.
+ void SetSequenceMode(bool sequence_mode);
+
+ // Signals the coded frame processor to update its group start timestamp to be
+ // |timestamp_offset| if it is in sequence append mode.
+ void SetGroupStartTimestampIfInSequenceMode(base::TimeDelta timestamp_offset);
+
+ // Returns the range of buffered data in this source, capped at |duration|.
+ // |ended| - Set to true if end of stream has been signaled and the special
+ // end of stream range logic needs to be executed.
+ Ranges<TimeDelta> GetBufferedRanges(TimeDelta duration, bool ended) const;
+ // Returns the highest buffered duration across all streams managed
+ // by this object.
+ // Returns TimeDelta() if none of the streams contain buffered data.
+ TimeDelta GetMaxBufferedDuration() const;
+
+ // Helper methods that call methods with similar names on all the
+ // ChunkDemuxerStreams managed by this object.
void StartReturningData();
void AbortReads();
void Seek(TimeDelta seek_time);
void CompletePendingReadIfPossible();
+ void OnSetDuration(TimeDelta duration);
+ void MarkEndOfStream();
+ void UnmarkEndOfStream();
+ void Shutdown();
+ // Sets the memory limit on each stream. |memory_limit| is the
+ // maximum number of bytes each stream is allowed to hold in its buffer.
+ void SetMemoryLimitsForTesting(int memory_limit);
+ bool IsSeekWaitingForData() const;
private:
// Called by the |stream_parser_| when a new initialization segment is
// encountered.
- // Returns true on a successful call. Returns false if an error occured while
+ // Returns true on a successful call. Returns false if an error occurred while
// processing decoder configurations.
bool OnNewConfigs(bool allow_audio, bool allow_video,
const AudioDecoderConfig& audio_config,
@@ -91,47 +177,33 @@ class SourceState {
// Called by the |stream_parser_| at the end of a media segment.
void OnEndOfMediaSegment();
- // Called by the |stream_parser_| when new buffers have been parsed. It
- // applies |timestamp_offset_| to all buffers in |audio_buffers| and
- // |video_buffers| and then calls Append() on |audio_| and/or
- // |video_| with the modified buffers.
- // Returns true on a successful call. Returns false if an error occured while
+ // Called by the |stream_parser_| when new buffers have been parsed.
+ // It processes the new buffers using |frame_processor_|, which includes
+ // appending the processed frames to associated demuxer streams for each
+ // frame's track.
+ // Returns true on a successful call. Returns false if an error occurred while
// processing the buffers.
bool OnNewBuffers(const StreamParser::BufferQueue& audio_buffers,
- const StreamParser::BufferQueue& video_buffers);
+ const StreamParser::BufferQueue& video_buffers,
+ const StreamParser::TextBufferQueueMap& text_map);
- // Called by the |stream_parser_| when new text buffers have been parsed. It
- // applies |timestamp_offset_| to all buffers in |buffers| and then appends
- // the (modified) buffers to the demuxer stream associated with
- // the track having |text_track_number|.
- // Returns true on a successful call. Returns false if an error occured while
- // processing the buffers.
- bool OnTextBuffers(int text_track_number,
- const StreamParser::BufferQueue& buffers);
-
- // Helper function that adds |timestamp_offset_| to each buffer in |buffers|.
- void AdjustBufferTimestamps(const StreamParser::BufferQueue& buffers);
-
- // Filters out buffers that are outside of the append window
- // [|append_window_start_|, |append_window_end_|).
- // |needs_keyframe| is a pointer to the |xxx_need_keyframe_| flag
- // associated with the |buffers|. Its state is read an updated as
- // this method filters |buffers|.
- // Buffers that are inside the append window are appended to the end
- // of |filtered_buffers|.
- void FilterWithAppendWindow(const StreamParser::BufferQueue& buffers,
- bool* needs_keyframe,
- StreamParser::BufferQueue* filtered_buffers);
+ void OnSourceInitDone(bool success,
+ const StreamParser::InitParameters& params);
CreateDemuxerStreamCB create_demuxer_stream_cb_;
- IncreaseDurationCB increase_duration_cb_;
NewTextTrackCB new_text_track_cb_;
- // The offset to apply to media segment timestamps.
- TimeDelta timestamp_offset_;
+ // During Append(), if OnNewBuffers() coded frame processing updates the
+ // timestamp offset then |*timestamp_offset_during_append_| is also updated
+ // so Append()'s caller can know the new offset. This pointer is only non-NULL
+ // during the lifetime of an Append() call.
+ TimeDelta* timestamp_offset_during_append_;
- TimeDelta append_window_start_;
- TimeDelta append_window_end_;
+ // During Append(), coded frame processing triggered by OnNewBuffers()
+ // requires these two attributes. These are only valid during the lifetime of
+ // an Append() call.
+ TimeDelta append_window_start_during_append_;
+ TimeDelta append_window_end_during_append_;
// Set to true if the next buffers appended within the append window
// represent the start of a new media segment. This flag being set
@@ -139,207 +211,175 @@ class SourceState {
// appended. The flag is set on actual media segment boundaries and
// when the "append window" filtering causes discontinuities in the
// appended data.
+ // TODO(wolenetz/acolwell): Investigate if we need this, or if coded frame
+ // processing's discontinuity logic is enough. See http://crbug.com/351489.
bool new_media_segment_;
- // Keeps track of whether |timestamp_offset_| can be modified.
- bool can_update_offset_;
+ // Keeps track of whether a media segment is being parsed.
+ bool parsing_media_segment_;
// The object used to parse appended data.
scoped_ptr<StreamParser> stream_parser_;
- ChunkDemuxerStream* audio_;
- bool audio_needs_keyframe_;
-
- ChunkDemuxerStream* video_;
- bool video_needs_keyframe_;
+ ChunkDemuxerStream* audio_; // Not owned by |this|.
+ ChunkDemuxerStream* video_; // Not owned by |this|.
- typedef std::map<int, ChunkDemuxerStream*> TextStreamMap;
- TextStreamMap text_stream_map_;
+ typedef std::map<StreamParser::TrackId, ChunkDemuxerStream*> TextStreamMap;
+ TextStreamMap text_stream_map_; // |this| owns the map's stream pointers.
+ scoped_ptr<FrameProcessor> frame_processor_;
LogCB log_cb_;
+ StreamParser::InitCB init_cb_;
+
+ // Indicates that timestampOffset should be updated automatically during
+ // OnNewBuffers() based on the earliest end timestamp of the buffers provided.
+ // TODO(wolenetz): Refactor this function while integrating April 29, 2014
+ // changes to MSE spec. See http://crbug.com/371499.
+ bool auto_update_timestamp_offset_;
DISALLOW_COPY_AND_ASSIGN(SourceState);
};
-class ChunkDemuxerStream : public DemuxerStream {
- public:
- typedef std::deque<scoped_refptr<StreamParserBuffer> > BufferQueue;
-
- explicit ChunkDemuxerStream(Type type);
- virtual ~ChunkDemuxerStream();
-
- // ChunkDemuxerStream control methods.
- void StartReturningData();
- void AbortReads();
- void CompletePendingReadIfPossible();
- void Shutdown();
-
- // SourceBufferStream manipulation methods.
- void Seek(TimeDelta time);
- bool IsSeekWaitingForData() const;
-
- // Add buffers to this stream. Buffers are stored in SourceBufferStreams,
- // which handle ordering and overlap resolution.
- // Returns true if buffers were successfully added.
- bool Append(const StreamParser::BufferQueue& buffers);
-
- // Removes buffers between |start| and |end| according to the steps
- // in the "Coded Frame Removal Algorithm" in the Media Source
- // Extensions Spec.
- // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#sourcebuffer-coded-frame-removal
- //
- // |duration| is the current duration of the presentation. It is
- // required by the computation outlined in the spec.
- void Remove(TimeDelta start, TimeDelta end, TimeDelta duration);
-
- // Signal to the stream that duration has changed to |duration|.
- void OnSetDuration(TimeDelta duration);
-
- // Returns the range of buffered data in this stream, capped at |duration|.
- Ranges<TimeDelta> GetBufferedRanges(TimeDelta duration) const;
-
- // Signal to the stream that buffers handed in through subsequent calls to
- // Append() belong to a media segment that starts at |start_timestamp|.
- void OnNewMediaSegment(TimeDelta start_timestamp);
-
- // Called when midstream config updates occur.
- // Returns true if the new config is accepted.
- // Returns false if the new config should trigger an error.
- bool UpdateAudioConfig(const AudioDecoderConfig& config, const LogCB& log_cb);
- bool UpdateVideoConfig(const VideoDecoderConfig& config, const LogCB& log_cb);
- void UpdateTextConfig(const TextTrackConfig& config, const LogCB& log_cb);
+SourceState::SourceState(scoped_ptr<StreamParser> stream_parser,
+ scoped_ptr<FrameProcessor> frame_processor,
+ const LogCB& log_cb,
+ const CreateDemuxerStreamCB& create_demuxer_stream_cb)
+ : create_demuxer_stream_cb_(create_demuxer_stream_cb),
+ timestamp_offset_during_append_(NULL),
+ new_media_segment_(false),
+ parsing_media_segment_(false),
+ stream_parser_(stream_parser.release()),
+ audio_(NULL),
+ video_(NULL),
+ frame_processor_(frame_processor.release()),
+ log_cb_(log_cb),
+ auto_update_timestamp_offset_(false) {
+ DCHECK(!create_demuxer_stream_cb_.is_null());
+ DCHECK(frame_processor_);
+}
- void MarkEndOfStream();
- void UnmarkEndOfStream();
+SourceState::~SourceState() {
+ Shutdown();
- // DemuxerStream methods.
- virtual void Read(const ReadCB& read_cb) OVERRIDE;
- virtual Type type() OVERRIDE;
- virtual void EnableBitstreamConverter() OVERRIDE;
- virtual AudioDecoderConfig audio_decoder_config() OVERRIDE;
- virtual VideoDecoderConfig video_decoder_config() OVERRIDE;
+ STLDeleteValues(&text_stream_map_);
+}
- // Returns the text track configuration. It is an error to call this method
- // if type() != TEXT.
- TextTrackConfig text_track_config();
+void SourceState::Init(const StreamParser::InitCB& init_cb,
+ bool allow_audio,
+ bool allow_video,
+ const StreamParser::NeedKeyCB& need_key_cb,
+ const NewTextTrackCB& new_text_track_cb) {
+ new_text_track_cb_ = new_text_track_cb;
+ init_cb_ = init_cb;
- void set_memory_limit_for_testing(int memory_limit) {
- stream_->set_memory_limit_for_testing(memory_limit);
- }
+ stream_parser_->Init(
+ base::Bind(&SourceState::OnSourceInitDone, base::Unretained(this)),
+ base::Bind(&SourceState::OnNewConfigs,
+ base::Unretained(this),
+ allow_audio,
+ allow_video),
+ base::Bind(&SourceState::OnNewBuffers, base::Unretained(this)),
+ new_text_track_cb_.is_null(),
+ need_key_cb,
+ base::Bind(&SourceState::OnNewMediaSegment, base::Unretained(this)),
+ base::Bind(&SourceState::OnEndOfMediaSegment, base::Unretained(this)),
+ log_cb_);
+}
- private:
- enum State {
- UNINITIALIZED,
- RETURNING_DATA_FOR_READS,
- RETURNING_ABORT_FOR_READS,
- SHUTDOWN,
- };
+void SourceState::SetSequenceMode(bool sequence_mode) {
+ DCHECK(!parsing_media_segment_);
- // Assigns |state_| to |state|
- void ChangeState_Locked(State state);
+ frame_processor_->SetSequenceMode(sequence_mode);
+}
- void CompletePendingReadIfPossible_Locked();
+void SourceState::SetGroupStartTimestampIfInSequenceMode(
+ base::TimeDelta timestamp_offset) {
+ DCHECK(!parsing_media_segment_);
- // Gets the value to pass to the next Read() callback. Returns true if
- // |status| and |buffer| should be passed to the callback. False indicates
- // that |status| and |buffer| were not set and more data is needed.
- bool GetNextBuffer_Locked(DemuxerStream::Status* status,
- scoped_refptr<StreamParserBuffer>* buffer);
+ frame_processor_->SetGroupStartTimestampIfInSequenceMode(timestamp_offset);
+}
- // Specifies the type of the stream.
- Type type_;
+bool SourceState::Append(const uint8* data, size_t length,
+ TimeDelta append_window_start,
+ TimeDelta append_window_end,
+ TimeDelta* timestamp_offset) {
+ DCHECK(timestamp_offset);
+ DCHECK(!timestamp_offset_during_append_);
+ append_window_start_during_append_ = append_window_start;
+ append_window_end_during_append_ = append_window_end;
+ timestamp_offset_during_append_ = timestamp_offset;
- scoped_ptr<SourceBufferStream> stream_;
+ // TODO(wolenetz/acolwell): Curry and pass a NewBuffersCB here bound with
+ // append window and timestamp offset pointer. See http://crbug.com/351454.
+ bool err = stream_parser_->Parse(data, length);
+ timestamp_offset_during_append_ = NULL;
+ return err;
+}
- mutable base::Lock lock_;
- State state_;
- ReadCB read_cb_;
+void SourceState::Abort(TimeDelta append_window_start,
+ TimeDelta append_window_end,
+ base::TimeDelta* timestamp_offset) {
+ DCHECK(timestamp_offset);
+ DCHECK(!timestamp_offset_during_append_);
+ timestamp_offset_during_append_ = timestamp_offset;
+ append_window_start_during_append_ = append_window_start;
+ append_window_end_during_append_ = append_window_end;
- DISALLOW_IMPLICIT_CONSTRUCTORS(ChunkDemuxerStream);
-};
+ stream_parser_->Flush();
+ timestamp_offset_during_append_ = NULL;
-SourceState::SourceState(scoped_ptr<StreamParser> stream_parser,
- const LogCB& log_cb,
- const CreateDemuxerStreamCB& create_demuxer_stream_cb,
- const IncreaseDurationCB& increase_duration_cb)
- : create_demuxer_stream_cb_(create_demuxer_stream_cb),
- increase_duration_cb_(increase_duration_cb),
- append_window_end_(kInfiniteDuration()),
- new_media_segment_(false),
- can_update_offset_(true),
- stream_parser_(stream_parser.release()),
- audio_(NULL),
- audio_needs_keyframe_(true),
- video_(NULL),
- video_needs_keyframe_(true),
- log_cb_(log_cb) {
- DCHECK(!create_demuxer_stream_cb_.is_null());
- DCHECK(!increase_duration_cb_.is_null());
+ frame_processor_->Reset();
+ parsing_media_segment_ = false;
}
-SourceState::~SourceState() {
+void SourceState::Remove(TimeDelta start, TimeDelta end, TimeDelta duration) {
if (audio_)
- audio_->Shutdown();
+ audio_->Remove(start, end, duration);
if (video_)
- video_->Shutdown();
+ video_->Remove(start, end, duration);
for (TextStreamMap::iterator itr = text_stream_map_.begin();
itr != text_stream_map_.end(); ++itr) {
- itr->second->Shutdown();
- delete itr->second;
+ itr->second->Remove(start, end, duration);
}
}
-void SourceState::Init(const StreamParser::InitCB& init_cb,
- bool allow_audio,
- bool allow_video,
- const StreamParser::NeedKeyCB& need_key_cb,
- const NewTextTrackCB& new_text_track_cb) {
- new_text_track_cb_ = new_text_track_cb;
+Ranges<TimeDelta> SourceState::GetBufferedRanges(TimeDelta duration,
+ bool ended) const {
+ // TODO(acolwell): When we start allowing disabled tracks we'll need to update
+ // this code to only add ranges from active tracks.
+ RangesList ranges_list;
+ if (audio_)
+ ranges_list.push_back(audio_->GetBufferedRanges(duration));
- StreamParser::NewTextBuffersCB new_text_buffers_cb;
+ if (video_)
+ ranges_list.push_back(video_->GetBufferedRanges(duration));
- if (!new_text_track_cb_.is_null()) {
- new_text_buffers_cb = base::Bind(&SourceState::OnTextBuffers,
- base::Unretained(this));
+ for (TextStreamMap::const_iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ ranges_list.push_back(itr->second->GetBufferedRanges(duration));
}
- stream_parser_->Init(init_cb,
- base::Bind(&SourceState::OnNewConfigs,
- base::Unretained(this),
- allow_audio,
- allow_video),
- base::Bind(&SourceState::OnNewBuffers,
- base::Unretained(this)),
- new_text_buffers_cb,
- need_key_cb,
- base::Bind(&SourceState::OnNewMediaSegment,
- base::Unretained(this)),
- base::Bind(&SourceState::OnEndOfMediaSegment,
- base::Unretained(this)),
- log_cb_);
-}
-
-bool SourceState::SetTimestampOffset(TimeDelta timestamp_offset) {
- if (!can_update_offset_)
- return false;
-
- timestamp_offset_ = timestamp_offset;
- return true;
+ return ComputeIntersection(ranges_list, ended);
}
-bool SourceState::Append(const uint8* data, size_t length) {
- return stream_parser_->Parse(data, length);
-}
+TimeDelta SourceState::GetMaxBufferedDuration() const {
+ TimeDelta max_duration;
-void SourceState::Abort() {
- stream_parser_->Flush();
- audio_needs_keyframe_ = true;
- video_needs_keyframe_ = true;
- can_update_offset_ = true;
-}
+ if (audio_)
+ max_duration = std::max(max_duration, audio_->GetBufferedDuration());
+
+ if (video_)
+ max_duration = std::max(max_duration, video_->GetBufferedDuration());
+
+ for (TextStreamMap::const_iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ max_duration = std::max(max_duration, itr->second->GetBufferedDuration());
+ }
+ return max_duration;
+}
void SourceState::StartReturningData() {
if (audio_)
@@ -393,19 +433,88 @@ void SourceState::CompletePendingReadIfPossible() {
}
}
-void SourceState::AdjustBufferTimestamps(
- const StreamParser::BufferQueue& buffers) {
- if (timestamp_offset_ == TimeDelta())
- return;
+void SourceState::OnSetDuration(TimeDelta duration) {
+ if (audio_)
+ audio_->OnSetDuration(duration);
+
+ if (video_)
+ video_->OnSetDuration(duration);
- for (StreamParser::BufferQueue::const_iterator itr = buffers.begin();
- itr != buffers.end(); ++itr) {
- (*itr)->SetDecodeTimestamp(
- (*itr)->GetDecodeTimestamp() + timestamp_offset_);
- (*itr)->set_timestamp((*itr)->timestamp() + timestamp_offset_);
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->OnSetDuration(duration);
}
}
+void SourceState::MarkEndOfStream() {
+ if (audio_)
+ audio_->MarkEndOfStream();
+
+ if (video_)
+ video_->MarkEndOfStream();
+
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->MarkEndOfStream();
+ }
+}
+
+void SourceState::UnmarkEndOfStream() {
+ if (audio_)
+ audio_->UnmarkEndOfStream();
+
+ if (video_)
+ video_->UnmarkEndOfStream();
+
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->UnmarkEndOfStream();
+ }
+}
+
+void SourceState::Shutdown() {
+ if (audio_)
+ audio_->Shutdown();
+
+ if (video_)
+ video_->Shutdown();
+
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->Shutdown();
+ }
+}
+
+void SourceState::SetMemoryLimitsForTesting(int memory_limit) {
+ if (audio_)
+ audio_->set_memory_limit_for_testing(memory_limit);
+
+ if (video_)
+ video_->set_memory_limit_for_testing(memory_limit);
+
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->set_memory_limit_for_testing(memory_limit);
+ }
+}
+
+bool SourceState::IsSeekWaitingForData() const {
+ if (audio_ && audio_->IsSeekWaitingForData())
+ return true;
+
+ if (video_ && video_->IsSeekWaitingForData())
+ return true;
+
+ // NOTE: We are intentionally not checking the text tracks
+ // because text tracks are discontinuous and may not have data
+ // for the seek position. This is ok and playback should not be
+ // stalled because we don't have cues. If cues, with timestamps after
+ // the seek time, eventually arrive they will be delivered properly
+ // in response to ChunkDemuxerStream::Read() calls.
+
+ return false;
+}
+
bool SourceState::OnNewConfigs(
bool allow_audio, bool allow_video,
const AudioDecoderConfig& audio_config,
@@ -451,8 +560,15 @@ bool SourceState::OnNewConfigs(
DVLOG(1) << "Failed to create an audio stream.";
return false;
}
+
+ if (!frame_processor_->AddTrack(FrameProcessorBase::kAudioTrackId,
+ audio_)) {
+ DVLOG(1) << "Failed to add audio track to frame processor.";
+ return false;
+ }
}
+ frame_processor_->OnPossibleAudioConfigUpdate(audio_config);
success &= audio_->UpdateAudioConfig(audio_config, log_cb_);
}
@@ -464,6 +580,12 @@ bool SourceState::OnNewConfigs(
DVLOG(1) << "Failed to create a video stream.";
return false;
}
+
+ if (!frame_processor_->AddTrack(FrameProcessorBase::kVideoTrackId,
+ video_)) {
+ DVLOG(1) << "Failed to add video track to frame processor.";
+ return false;
+ }
}
success &= video_->UpdateVideoConfig(video_config, log_cb_);
@@ -475,6 +597,12 @@ bool SourceState::OnNewConfigs(
itr != text_configs.end(); ++itr) {
ChunkDemuxerStream* const text_stream =
create_demuxer_stream_cb_.Run(DemuxerStream::TEXT);
+ if (!frame_processor_->AddTrack(itr->first, text_stream)) {
+ success &= false;
+ MEDIA_LOG(log_cb_) << "Failed to add text track ID " << itr->first
+ << " to frame processor.";
+ break;
+ }
text_stream->UpdateTextConfig(itr->second, log_cb_);
text_stream_map_[itr->first] = text_stream;
new_text_track_cb_.Run(text_stream, itr->second);
@@ -494,8 +622,17 @@ bool SourceState::OnNewConfigs(
success &= false;
MEDIA_LOG(log_cb_) << "New text track config does not match old one.";
} else {
- text_stream_map_.clear();
- text_stream_map_[config_itr->first] = text_stream;
+ StreamParser::TrackId old_id = stream_itr->first;
+ StreamParser::TrackId new_id = config_itr->first;
+ if (new_id != old_id) {
+ if (frame_processor_->UpdateTrack(old_id, new_id)) {
+ text_stream_map_.clear();
+ text_stream_map_[config_itr->first] = text_stream;
+ } else {
+ success &= false;
+ MEDIA_LOG(log_cb_) << "Error remapping single text track number";
+ }
+ }
}
} else {
for (TextConfigItr config_itr = text_configs.begin();
@@ -524,152 +661,81 @@ bool SourceState::OnNewConfigs(
}
}
+ frame_processor_->SetAllTrackBuffersNeedRandomAccessPoint();
+
DVLOG(1) << "OnNewConfigs() : " << (success ? "success" : "failed");
return success;
}
void SourceState::OnNewMediaSegment() {
DVLOG(2) << "OnNewMediaSegment()";
- can_update_offset_ = false;
+ parsing_media_segment_ = true;
new_media_segment_ = true;
}
void SourceState::OnEndOfMediaSegment() {
DVLOG(2) << "OnEndOfMediaSegment()";
- can_update_offset_ = true;
+ parsing_media_segment_ = false;
new_media_segment_ = false;
}
-bool SourceState::OnNewBuffers(const StreamParser::BufferQueue& audio_buffers,
- const StreamParser::BufferQueue& video_buffers) {
- DCHECK(!audio_buffers.empty() || !video_buffers.empty());
- AdjustBufferTimestamps(audio_buffers);
- AdjustBufferTimestamps(video_buffers);
-
- StreamParser::BufferQueue filtered_audio;
- StreamParser::BufferQueue filtered_video;
-
- FilterWithAppendWindow(audio_buffers, &audio_needs_keyframe_,
- &filtered_audio);
-
- FilterWithAppendWindow(video_buffers, &video_needs_keyframe_,
- &filtered_video);
-
- if (filtered_audio.empty() && filtered_video.empty())
- return true;
-
- if (new_media_segment_) {
- // Find the earliest timestamp in the filtered buffers and use that for the
- // segment start timestamp.
- TimeDelta segment_timestamp = kNoTimestamp();
-
- if (!filtered_audio.empty())
- segment_timestamp = filtered_audio.front()->GetDecodeTimestamp();
-
- if (!filtered_video.empty() &&
- (segment_timestamp == kNoTimestamp() ||
- filtered_video.front()->GetDecodeTimestamp() < segment_timestamp)) {
- segment_timestamp = filtered_video.front()->GetDecodeTimestamp();
- }
-
- new_media_segment_ = false;
-
- if (audio_)
- audio_->OnNewMediaSegment(segment_timestamp);
-
- if (video_)
- video_->OnNewMediaSegment(segment_timestamp);
-
- for (TextStreamMap::iterator itr = text_stream_map_.begin();
- itr != text_stream_map_.end(); ++itr) {
- itr->second->OnNewMediaSegment(segment_timestamp);
+bool SourceState::OnNewBuffers(
+ const StreamParser::BufferQueue& audio_buffers,
+ const StreamParser::BufferQueue& video_buffers,
+ const StreamParser::TextBufferQueueMap& text_map) {
+ DVLOG(2) << "OnNewBuffers()";
+ DCHECK(timestamp_offset_during_append_);
+ DCHECK(parsing_media_segment_);
+
+ const TimeDelta timestamp_offset_before_processing =
+ *timestamp_offset_during_append_;
+
+ // Calculate the new timestamp offset for audio/video tracks if the stream
+ // parser has requested automatic updates.
+ TimeDelta new_timestamp_offset = timestamp_offset_before_processing;
+ if (auto_update_timestamp_offset_) {
+ const bool have_audio_buffers = !audio_buffers.empty();
+ const bool have_video_buffers = !video_buffers.empty();
+ if (have_audio_buffers && have_video_buffers) {
+ new_timestamp_offset +=
+ std::min(EndTimestamp(audio_buffers), EndTimestamp(video_buffers));
+ } else if (have_audio_buffers) {
+ new_timestamp_offset += EndTimestamp(audio_buffers);
+ } else if (have_video_buffers) {
+ new_timestamp_offset += EndTimestamp(video_buffers);
}
}
- if (!filtered_audio.empty()) {
- if (!audio_ || !audio_->Append(filtered_audio))
- return false;
- increase_duration_cb_.Run(filtered_audio.back()->timestamp(), audio_);
+ if (!frame_processor_->ProcessFrames(audio_buffers,
+ video_buffers,
+ text_map,
+ append_window_start_during_append_,
+ append_window_end_during_append_,
+ &new_media_segment_,
+ timestamp_offset_during_append_)) {
+ return false;
}
- if (!filtered_video.empty()) {
- if (!video_ || !video_->Append(filtered_video))
- return false;
- increase_duration_cb_.Run(filtered_video.back()->timestamp(), video_);
+ // Only update the timestamp offset if the frame processor hasn't already.
+ if (auto_update_timestamp_offset_ &&
+ timestamp_offset_before_processing == *timestamp_offset_during_append_) {
+ *timestamp_offset_during_append_ = new_timestamp_offset;
}
return true;
}
-bool SourceState::OnTextBuffers(
- int text_track_number,
- const StreamParser::BufferQueue& buffers) {
- DCHECK(!buffers.empty());
-
- TextStreamMap::iterator itr = text_stream_map_.find(text_track_number);
- if (itr == text_stream_map_.end())
- return false;
-
- AdjustBufferTimestamps(buffers);
-
- return itr->second->Append(buffers);
-}
-
-void SourceState::FilterWithAppendWindow(
- const StreamParser::BufferQueue& buffers, bool* needs_keyframe,
- StreamParser::BufferQueue* filtered_buffers) {
- DCHECK(needs_keyframe);
- DCHECK(filtered_buffers);
-
- // This loop implements steps 1.9, 1.10, & 1.11 of the "Coded frame
- // processing loop" in the Media Source Extensions spec.
- // These steps filter out buffers that are not within the "append
- // window" and handles resyncing on the next random access point
- // (i.e., next keyframe) if a buffer gets dropped.
- for (StreamParser::BufferQueue::const_iterator itr = buffers.begin();
- itr != buffers.end(); ++itr) {
- // Filter out buffers that are outside the append window. Anytime
- // a buffer gets dropped we need to set |*needs_keyframe| to true
- // because we can only resume decoding at keyframes.
- TimeDelta presentation_timestamp = (*itr)->timestamp();
-
- // TODO(acolwell): Change |frame_end_timestamp| value to
- // |presentation_timestamp + (*itr)->duration()|, like the spec
- // requires, once frame durations are actually present in all buffers.
- TimeDelta frame_end_timestamp = presentation_timestamp;
- if (presentation_timestamp < append_window_start_ ||
- frame_end_timestamp > append_window_end_) {
- DVLOG(1) << "Dropping buffer outside append window."
- << " presentation_timestamp "
- << presentation_timestamp.InSecondsF();
- *needs_keyframe = true;
-
- // This triggers a discontinuity so we need to treat the next frames
- // appended within the append window as if they were the beginning of a
- // new segment.
- new_media_segment_ = true;
- continue;
- }
-
- // If |*needs_keyframe| is true then filter out buffers until we
- // encounter the next keyframe.
- if (*needs_keyframe) {
- if (!(*itr)->IsKeyframe()) {
- DVLOG(1) << "Dropping non-keyframe. presentation_timestamp "
- << presentation_timestamp.InSecondsF();
- continue;
- }
-
- *needs_keyframe = false;
- }
-
- filtered_buffers->push_back(*itr);
- }
+void SourceState::OnSourceInitDone(bool success,
+ const StreamParser::InitParameters& params) {
+ auto_update_timestamp_offset_ = params.auto_update_timestamp_offset;
+ base::ResetAndReturn(&init_cb_).Run(success, params);
}
-ChunkDemuxerStream::ChunkDemuxerStream(Type type)
+ChunkDemuxerStream::ChunkDemuxerStream(Type type, bool splice_frames_enabled)
: type_(type),
- state_(UNINITIALIZED) {
+ state_(UNINITIALIZED),
+ splice_frames_enabled_(splice_frames_enabled),
+ partial_append_window_trimming_enabled_(false) {
}
void ChunkDemuxerStream::StartReturningData() {
@@ -710,6 +776,11 @@ void ChunkDemuxerStream::Shutdown() {
bool ChunkDemuxerStream::IsSeekWaitingForData() const {
base::AutoLock auto_lock(lock_);
+
+ // This method should not be called for text tracks. See the note in
+ // SourceState::IsSeekWaitingForData().
+ DCHECK_NE(type_, DemuxerStream::TEXT);
+
return stream_->IsSeekPending();
}
@@ -754,6 +825,17 @@ void ChunkDemuxerStream::OnSetDuration(TimeDelta duration) {
Ranges<TimeDelta> ChunkDemuxerStream::GetBufferedRanges(
TimeDelta duration) const {
base::AutoLock auto_lock(lock_);
+
+ if (type_ == TEXT) {
+ // Since text tracks are discontinuous and the lack of cues should not block
+ // playback, report the buffered range for text tracks as [0, |duration|) so
+ // that intesections with audio & video tracks are computed correctly when
+ // no cues are present.
+ Ranges<TimeDelta> text_range;
+ text_range.Add(TimeDelta(), duration);
+ return text_range;
+ }
+
Ranges<TimeDelta> range = stream_->GetBufferedTime();
if (range.size() == 0u)
@@ -767,6 +849,10 @@ Ranges<TimeDelta> ChunkDemuxerStream::GetBufferedRanges(
return range.IntersectionWith(valid_time_range);
}
+TimeDelta ChunkDemuxerStream::GetBufferedDuration() const {
+ return stream_->GetBufferedDuration();
+}
+
void ChunkDemuxerStream::OnNewMediaSegment(TimeDelta start_timestamp) {
DVLOG(2) << "ChunkDemuxerStream::OnNewMediaSegment("
<< start_timestamp.InSecondsF() << ")";
@@ -781,7 +867,18 @@ bool ChunkDemuxerStream::UpdateAudioConfig(const AudioDecoderConfig& config,
base::AutoLock auto_lock(lock_);
if (!stream_) {
DCHECK_EQ(state_, UNINITIALIZED);
- stream_.reset(new SourceBufferStream(config, log_cb));
+
+ // On platforms which support splice frames, enable splice frames and
+ // partial append window support for most codecs (notably: not opus).
+ const bool codec_supported = config.codec() == kCodecMP3 ||
+ config.codec() == kCodecAAC ||
+ config.codec() == kCodecVorbis;
+ splice_frames_enabled_ = splice_frames_enabled_ && codec_supported;
+ partial_append_window_trimming_enabled_ =
+ splice_frames_enabled_ && codec_supported;
+
+ stream_.reset(
+ new SourceBufferStream(config, log_cb, splice_frames_enabled_));
return true;
}
@@ -796,7 +893,8 @@ bool ChunkDemuxerStream::UpdateVideoConfig(const VideoDecoderConfig& config,
if (!stream_) {
DCHECK_EQ(state_, UNINITIALIZED);
- stream_.reset(new SourceBufferStream(config, log_cb));
+ stream_.reset(
+ new SourceBufferStream(config, log_cb, splice_frames_enabled_));
return true;
}
@@ -809,7 +907,7 @@ void ChunkDemuxerStream::UpdateTextConfig(const TextTrackConfig& config,
base::AutoLock auto_lock(lock_);
DCHECK(!stream_);
DCHECK_EQ(state_, UNINITIALIZED);
- stream_.reset(new SourceBufferStream(config, log_cb));
+ stream_.reset(new SourceBufferStream(config, log_cb, splice_frames_enabled_));
}
void ChunkDemuxerStream::MarkEndOfStream() {
@@ -848,6 +946,8 @@ VideoDecoderConfig ChunkDemuxerStream::video_decoder_config() {
return stream_->GetCurrentVideoDecoderConfig();
}
+bool ChunkDemuxerStream::SupportsConfigChanges() { return true; }
+
TextTrackConfig ChunkDemuxerStream::text_track_config() {
CHECK_EQ(type_, TEXT);
base::AutoLock auto_lock(lock_);
@@ -913,7 +1013,8 @@ void ChunkDemuxerStream::CompletePendingReadIfPossible_Locked() {
ChunkDemuxer::ChunkDemuxer(const base::Closure& open_cb,
const NeedKeyCB& need_key_cb,
- const LogCB& log_cb)
+ const LogCB& log_cb,
+ bool splice_frames_enabled)
: state_(WAITING_FOR_INIT),
cancel_next_seek_(false),
host_(NULL),
@@ -922,7 +1023,9 @@ ChunkDemuxer::ChunkDemuxer(const base::Closure& open_cb,
enable_text_(false),
log_cb_(log_cb),
duration_(kNoTimestamp()),
- user_specified_duration_(-1) {
+ user_specified_duration_(-1),
+ liveness_(LIVENESS_UNKNOWN),
+ splice_frames_enabled_(splice_frames_enabled) {
DCHECK(!open_cb_.is_null());
DCHECK(!need_key_cb_.is_null());
}
@@ -985,12 +1088,6 @@ void ChunkDemuxer::Seek(TimeDelta time, const PipelineStatusCB& cb) {
base::ResetAndReturn(&seek_cb_).Run(PIPELINE_OK);
}
-void ChunkDemuxer::OnAudioRendererDisabled() {
- base::AutoLock auto_lock(lock_);
- audio_->Shutdown();
- disabled_audio_ = audio_.Pass();
-}
-
// Demuxer implementation.
DemuxerStream* ChunkDemuxer::GetStream(DemuxerStream::Type type) {
DCHECK_NE(type, DemuxerStream::TEXT);
@@ -1008,6 +1105,14 @@ TimeDelta ChunkDemuxer::GetStartTime() const {
return TimeDelta();
}
+base::Time ChunkDemuxer::GetTimelineOffset() const {
+ return timeline_offset_;
+}
+
+Demuxer::Liveness ChunkDemuxer::GetLiveness() const {
+ return liveness_;
+}
+
void ChunkDemuxer::StartWaitingForSeek(TimeDelta seek_time) {
DVLOG(1) << "StartWaitingForSeek()";
base::AutoLock auto_lock(lock_);
@@ -1072,11 +1177,14 @@ ChunkDemuxer::Status ChunkDemuxer::AddId(const std::string& id,
if (has_video)
source_id_video_ = id;
+ scoped_ptr<FrameProcessor> frame_processor(
+ new FrameProcessor(base::Bind(&ChunkDemuxer::IncreaseDurationIfNecessary,
+ base::Unretained(this))));
+
scoped_ptr<SourceState> source_state(
- new SourceState(stream_parser.Pass(), log_cb_,
+ new SourceState(stream_parser.Pass(),
+ frame_processor.Pass(), log_cb_,
base::Bind(&ChunkDemuxer::CreateDemuxerStream,
- base::Unretained(this)),
- base::Bind(&ChunkDemuxer::IncreaseDurationIfNecessary,
base::Unretained(this))));
SourceState::NewTextTrackCB new_text_track_cb;
@@ -1114,59 +1222,22 @@ void ChunkDemuxer::RemoveId(const std::string& id) {
Ranges<TimeDelta> ChunkDemuxer::GetBufferedRanges(const std::string& id) const {
base::AutoLock auto_lock(lock_);
DCHECK(!id.empty());
- DCHECK(IsValidId(id));
- DCHECK(id == source_id_audio_ || id == source_id_video_);
-
- if (id == source_id_audio_ && id != source_id_video_) {
- // Only include ranges that have been buffered in |audio_|
- return audio_ ? audio_->GetBufferedRanges(duration_) : Ranges<TimeDelta>();
- }
-
- if (id != source_id_audio_ && id == source_id_video_) {
- // Only include ranges that have been buffered in |video_|
- return video_ ? video_->GetBufferedRanges(duration_) : Ranges<TimeDelta>();
- }
-
- return ComputeIntersection();
-}
-
-Ranges<TimeDelta> ChunkDemuxer::ComputeIntersection() const {
- lock_.AssertAcquired();
- if (!audio_ || !video_)
- return Ranges<TimeDelta>();
-
- // Include ranges that have been buffered in both |audio_| and |video_|.
- Ranges<TimeDelta> audio_ranges = audio_->GetBufferedRanges(duration_);
- Ranges<TimeDelta> video_ranges = video_->GetBufferedRanges(duration_);
- Ranges<TimeDelta> result = audio_ranges.IntersectionWith(video_ranges);
-
- if (state_ == ENDED && result.size() > 0) {
- // If appending has ended, extend the last intersection range to include the
- // max end time of the last audio/video range. This allows the buffered
- // information to match the actual time range that will get played out if
- // the streams have slightly different lengths.
- TimeDelta audio_start = audio_ranges.start(audio_ranges.size() - 1);
- TimeDelta audio_end = audio_ranges.end(audio_ranges.size() - 1);
- TimeDelta video_start = video_ranges.start(video_ranges.size() - 1);
- TimeDelta video_end = video_ranges.end(video_ranges.size() - 1);
-
- // Verify the last audio range overlaps with the last video range.
- // This is enforced by the logic that controls the transition to ENDED.
- DCHECK((audio_start <= video_start && video_start <= audio_end) ||
- (video_start <= audio_start && audio_start <= video_end));
- result.Add(result.end(result.size()-1), std::max(audio_end, video_end));
- }
+ SourceStateMap::const_iterator itr = source_state_map_.find(id);
- return result;
+ DCHECK(itr != source_state_map_.end());
+ return itr->second->GetBufferedRanges(duration_, state_ == ENDED);
}
void ChunkDemuxer::AppendData(const std::string& id,
- const uint8* data,
- size_t length) {
+ const uint8* data, size_t length,
+ TimeDelta append_window_start,
+ TimeDelta append_window_end,
+ TimeDelta* timestamp_offset) {
DVLOG(1) << "AppendData(" << id << ", " << length << ")";
DCHECK(!id.empty());
+ DCHECK(timestamp_offset);
Ranges<TimeDelta> ranges;
@@ -1186,7 +1257,10 @@ void ChunkDemuxer::AppendData(const std::string& id,
switch (state_) {
case INITIALIZING:
DCHECK(IsValidId(id));
- if (!source_state_map_[id]->Append(data, length)) {
+ if (!source_state_map_[id]->Append(data, length,
+ append_window_start,
+ append_window_end,
+ timestamp_offset)) {
ReportError_Locked(DEMUXER_ERROR_COULD_NOT_OPEN);
return;
}
@@ -1194,7 +1268,10 @@ void ChunkDemuxer::AppendData(const std::string& id,
case INITIALIZED: {
DCHECK(IsValidId(id));
- if (!source_state_map_[id]->Append(data, length)) {
+ if (!source_state_map_[id]->Append(data, length,
+ append_window_start,
+ append_window_end,
+ timestamp_offset)) {
ReportError_Locked(PIPELINE_ERROR_DECODE);
return;
}
@@ -1225,25 +1302,38 @@ void ChunkDemuxer::AppendData(const std::string& id,
host_->AddBufferedTimeRange(ranges.start(i), ranges.end(i));
}
-void ChunkDemuxer::Abort(const std::string& id) {
+void ChunkDemuxer::Abort(const std::string& id,
+ TimeDelta append_window_start,
+ TimeDelta append_window_end,
+ TimeDelta* timestamp_offset) {
DVLOG(1) << "Abort(" << id << ")";
base::AutoLock auto_lock(lock_);
DCHECK(!id.empty());
CHECK(IsValidId(id));
- source_state_map_[id]->Abort();
+ source_state_map_[id]->Abort(append_window_start,
+ append_window_end,
+ timestamp_offset);
}
-void ChunkDemuxer::Remove(const std::string& id, base::TimeDelta start,
- base::TimeDelta end) {
+void ChunkDemuxer::Remove(const std::string& id, TimeDelta start,
+ TimeDelta end) {
DVLOG(1) << "Remove(" << id << ", " << start.InSecondsF()
<< ", " << end.InSecondsF() << ")";
base::AutoLock auto_lock(lock_);
- if (id == source_id_audio_ && audio_)
- audio_->Remove(start, end, duration_);
+ DCHECK(!id.empty());
+ CHECK(IsValidId(id));
+ DCHECK(start >= base::TimeDelta()) << start.InSecondsF();
+ DCHECK(start < end) << "start " << start.InSecondsF()
+ << " end " << end.InSecondsF();
+ DCHECK(duration_ != kNoTimestamp());
+ DCHECK(start <= duration_) << "start " << start.InSecondsF()
+ << " duration " << duration_.InSecondsF();
+
+ if (start == duration_)
+ return;
- if (id == source_id_video_ && video_)
- video_->Remove(start, end, duration_);
+ source_state_map_[id]->Remove(start, end, duration_);
}
double ChunkDemuxer::GetDuration() {
@@ -1279,6 +1369,7 @@ void ChunkDemuxer::SetDuration(double duration) {
// This can be different if the value of |duration| doesn't fit the range or
// precision of TimeDelta.
TimeDelta min_duration = TimeDelta::FromInternalValue(1);
+ // Don't use TimeDelta::Max() here, as we want the largest finite time delta.
TimeDelta max_duration = TimeDelta::FromInternalValue(kint64max - 1);
double min_duration_in_seconds = min_duration.InSecondsF();
double max_duration_in_seconds = max_duration.InSecondsF();
@@ -1301,21 +1392,44 @@ void ChunkDemuxer::SetDuration(double duration) {
duration_ = duration_td;
host_->SetDuration(duration_);
- if (audio_)
- audio_->OnSetDuration(duration_);
+ for (SourceStateMap::iterator itr = source_state_map_.begin();
+ itr != source_state_map_.end(); ++itr) {
+ itr->second->OnSetDuration(duration_);
+ }
+}
- if (video_)
- video_->OnSetDuration(duration_);
+bool ChunkDemuxer::IsParsingMediaSegment(const std::string& id) {
+ base::AutoLock auto_lock(lock_);
+ DVLOG(1) << "IsParsingMediaSegment(" << id << ")";
+ CHECK(IsValidId(id));
+
+ return source_state_map_[id]->parsing_media_segment();
}
-bool ChunkDemuxer::SetTimestampOffset(const std::string& id, TimeDelta offset) {
+void ChunkDemuxer::SetSequenceMode(const std::string& id,
+ bool sequence_mode) {
base::AutoLock auto_lock(lock_);
- DVLOG(1) << "SetTimestampOffset(" << id << ", " << offset.InSecondsF() << ")";
+ DVLOG(1) << "SetSequenceMode(" << id << ", " << sequence_mode << ")";
CHECK(IsValidId(id));
+ DCHECK_NE(state_, ENDED);
- return source_state_map_[id]->SetTimestampOffset(offset);
+ source_state_map_[id]->SetSequenceMode(sequence_mode);
}
+void ChunkDemuxer::SetGroupStartTimestampIfInSequenceMode(
+ const std::string& id,
+ base::TimeDelta timestamp_offset) {
+ base::AutoLock auto_lock(lock_);
+ DVLOG(1) << "SetGroupStartTimestampIfInSequenceMode(" << id << ", "
+ << timestamp_offset.InSecondsF() << ")";
+ CHECK(IsValidId(id));
+ DCHECK_NE(state_, ENDED);
+
+ source_state_map_[id]->SetGroupStartTimestampIfInSequenceMode(
+ timestamp_offset);
+}
+
+
void ChunkDemuxer::MarkEndOfStream(PipelineStatus status) {
DVLOG(1) << "MarkEndOfStream(" << status << ")";
base::AutoLock auto_lock(lock_);
@@ -1331,11 +1445,10 @@ void ChunkDemuxer::MarkEndOfStream(PipelineStatus status) {
}
bool old_waiting_for_data = IsSeekWaitingForData_Locked();
- if (audio_)
- audio_->MarkEndOfStream();
-
- if (video_)
- video_->MarkEndOfStream();
+ for (SourceStateMap::iterator itr = source_state_map_.begin();
+ itr != source_state_map_.end(); ++itr) {
+ itr->second->MarkEndOfStream();
+ }
CompletePendingReadsIfPossible();
@@ -1361,27 +1474,10 @@ void ChunkDemuxer::UnmarkEndOfStream() {
ChangeState_Locked(INITIALIZED);
- if (audio_)
- audio_->UnmarkEndOfStream();
-
- if (video_)
- video_->UnmarkEndOfStream();
-}
-
-void ChunkDemuxer::SetAppendWindowStart(const std::string& id,
- TimeDelta start) {
- base::AutoLock auto_lock(lock_);
- DVLOG(1) << "SetAppendWindowStart(" << id << ", "
- << start.InSecondsF() << ")";
- CHECK(IsValidId(id));
- source_state_map_[id]->set_append_window_start(start);
-}
-
-void ChunkDemuxer::SetAppendWindowEnd(const std::string& id, TimeDelta end) {
- base::AutoLock auto_lock(lock_);
- DVLOG(1) << "SetAppendWindowEnd(" << id << ", " << end.InSecondsF() << ")";
- CHECK(IsValidId(id));
- source_state_map_[id]->set_append_window_end(end);
+ for (SourceStateMap::iterator itr = source_state_map_.begin();
+ itr != source_state_map_.end(); ++itr) {
+ itr->second->UnmarkEndOfStream();
+ }
}
void ChunkDemuxer::Shutdown() {
@@ -1391,11 +1487,7 @@ void ChunkDemuxer::Shutdown() {
if (state_ == SHUTDOWN)
return;
- if (audio_)
- audio_->Shutdown();
-
- if (video_)
- video_->Shutdown();
+ ShutdownAllStreams();
ChangeState_Locked(SHUTDOWN);
@@ -1404,11 +1496,10 @@ void ChunkDemuxer::Shutdown() {
}
void ChunkDemuxer::SetMemoryLimitsForTesting(int memory_limit) {
- if (audio_)
- audio_->set_memory_limit_for_testing(memory_limit);
-
- if (video_)
- video_->set_memory_limit_for_testing(memory_limit);
+ for (SourceStateMap::iterator itr = source_state_map_.begin();
+ itr != source_state_map_.end(); ++itr) {
+ itr->second->SetMemoryLimitsForTesting(memory_limit);
+ }
}
void ChunkDemuxer::ChangeState_Locked(State new_state) {
@@ -1420,11 +1511,8 @@ void ChunkDemuxer::ChangeState_Locked(State new_state) {
ChunkDemuxer::~ChunkDemuxer() {
DCHECK_NE(state_, INITIALIZED);
- for (SourceStateMap::iterator it = source_state_map_.begin();
- it != source_state_map_.end(); ++it) {
- delete it->second;
- }
- source_state_map_.clear();
+
+ STLDeleteValues(&source_state_map_);
}
void ChunkDemuxer::ReportError_Locked(PipelineStatus error) {
@@ -1442,11 +1530,7 @@ void ChunkDemuxer::ReportError_Locked(PipelineStatus error) {
if (!seek_cb_.is_null())
std::swap(cb, seek_cb_);
- if (audio_)
- audio_->Shutdown();
-
- if (video_)
- video_->Shutdown();
+ ShutdownAllStreams();
}
if (!cb.is_null()) {
@@ -1460,20 +1544,20 @@ void ChunkDemuxer::ReportError_Locked(PipelineStatus error) {
bool ChunkDemuxer::IsSeekWaitingForData_Locked() const {
lock_.AssertAcquired();
- bool waiting_for_data = false;
-
- if (audio_)
- waiting_for_data = audio_->IsSeekWaitingForData();
-
- if (!waiting_for_data && video_)
- waiting_for_data = video_->IsSeekWaitingForData();
+ for (SourceStateMap::const_iterator itr = source_state_map_.begin();
+ itr != source_state_map_.end(); ++itr) {
+ if (itr->second->IsSeekWaitingForData())
+ return true;
+ }
- return waiting_for_data;
+ return false;
}
-void ChunkDemuxer::OnSourceInitDone(bool success, TimeDelta duration) {
+void ChunkDemuxer::OnSourceInitDone(
+ bool success,
+ const StreamParser::InitParameters& params) {
DVLOG(1) << "OnSourceInitDone(" << success << ", "
- << duration.InSecondsF() << ")";
+ << params.duration.InSecondsF() << ")";
lock_.AssertAcquired();
DCHECK_EQ(state_, INITIALIZING);
if (!success || (!audio_ && !video_)) {
@@ -1481,13 +1565,37 @@ void ChunkDemuxer::OnSourceInitDone(bool success, TimeDelta duration) {
return;
}
- if (duration != TimeDelta() && duration_ == kNoTimestamp())
- UpdateDuration(duration);
+ if (params.duration != TimeDelta() && duration_ == kNoTimestamp())
+ UpdateDuration(params.duration);
+
+ if (!params.timeline_offset.is_null()) {
+ if (!timeline_offset_.is_null() &&
+ params.timeline_offset != timeline_offset_) {
+ MEDIA_LOG(log_cb_)
+ << "Timeline offset is not the same across all SourceBuffers.";
+ ReportError_Locked(DEMUXER_ERROR_COULD_NOT_OPEN);
+ return;
+ }
+
+ timeline_offset_ = params.timeline_offset;
+ }
+
+ if (params.liveness != LIVENESS_UNKNOWN) {
+ if (liveness_ != LIVENESS_UNKNOWN && params.liveness != liveness_) {
+ MEDIA_LOG(log_cb_)
+ << "Liveness is not the same across all SourceBuffers.";
+ ReportError_Locked(DEMUXER_ERROR_COULD_NOT_OPEN);
+ return;
+ }
+
+ liveness_ = params.liveness;
+ }
// Wait until all streams have initialized.
if ((!source_id_audio_.empty() && !audio_) ||
- (!source_id_video_.empty() && !video_))
+ (!source_id_video_.empty() && !video_)) {
return;
+ }
SeekAllSources(GetStartTime());
StartReturningData();
@@ -1506,17 +1614,20 @@ ChunkDemuxer::CreateDemuxerStream(DemuxerStream::Type type) {
case DemuxerStream::AUDIO:
if (audio_)
return NULL;
- audio_.reset(new ChunkDemuxerStream(DemuxerStream::AUDIO));
+ audio_.reset(
+ new ChunkDemuxerStream(DemuxerStream::AUDIO, splice_frames_enabled_));
return audio_.get();
break;
case DemuxerStream::VIDEO:
if (video_)
return NULL;
- video_.reset(new ChunkDemuxerStream(DemuxerStream::VIDEO));
+ video_.reset(
+ new ChunkDemuxerStream(DemuxerStream::VIDEO, splice_frames_enabled_));
return video_.get();
break;
case DemuxerStream::TEXT: {
- return new ChunkDemuxerStream(DemuxerStream::TEXT);
+ return new ChunkDemuxerStream(DemuxerStream::TEXT,
+ splice_frames_enabled_);
break;
}
case DemuxerStream::UNKNOWN:
@@ -1547,30 +1658,42 @@ void ChunkDemuxer::UpdateDuration(TimeDelta new_duration) {
host_->SetDuration(new_duration);
}
-void ChunkDemuxer::IncreaseDurationIfNecessary(
- TimeDelta last_appended_buffer_timestamp,
- ChunkDemuxerStream* stream) {
- DCHECK(last_appended_buffer_timestamp != kNoTimestamp());
- if (last_appended_buffer_timestamp <= duration_)
+void ChunkDemuxer::IncreaseDurationIfNecessary(TimeDelta new_duration) {
+ DCHECK(new_duration != kNoTimestamp());
+ DCHECK(new_duration != kInfiniteDuration());
+
+ // Per April 1, 2014 MSE spec editor's draft:
+ // https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/
+ // media-source.html#sourcebuffer-coded-frame-processing
+ // 5. If the media segment contains data beyond the current duration, then run
+ // the duration change algorithm with new duration set to the maximum of
+ // the current duration and the group end timestamp.
+
+ if (new_duration <= duration_)
return;
- Ranges<TimeDelta> ranges = stream->GetBufferedRanges(kInfiniteDuration());
- DCHECK_GT(ranges.size(), 0u);
+ DVLOG(2) << __FUNCTION__ << ": Increasing duration: "
+ << duration_.InSecondsF() << " -> " << new_duration.InSecondsF();
- TimeDelta last_timestamp_buffered = ranges.end(ranges.size() - 1);
- if (last_timestamp_buffered > duration_)
- UpdateDuration(last_timestamp_buffered);
+ UpdateDuration(new_duration);
}
void ChunkDemuxer::DecreaseDurationIfNecessary() {
lock_.AssertAcquired();
- Ranges<TimeDelta> ranges = GetBufferedRanges_Locked();
- if (ranges.size() == 0u)
+
+ TimeDelta max_duration;
+
+ for (SourceStateMap::const_iterator itr = source_state_map_.begin();
+ itr != source_state_map_.end(); ++itr) {
+ max_duration = std::max(max_duration,
+ itr->second->GetMaxBufferedDuration());
+ }
+
+ if (max_duration == TimeDelta())
return;
- TimeDelta last_timestamp_buffered = ranges.end(ranges.size() - 1);
- if (last_timestamp_buffered < duration_)
- UpdateDuration(last_timestamp_buffered);
+ if (max_duration < duration_)
+ UpdateDuration(max_duration);
}
Ranges<TimeDelta> ChunkDemuxer::GetBufferedRanges() const {
@@ -1580,11 +1703,17 @@ Ranges<TimeDelta> ChunkDemuxer::GetBufferedRanges() const {
Ranges<TimeDelta> ChunkDemuxer::GetBufferedRanges_Locked() const {
lock_.AssertAcquired();
- if (audio_ && !video_)
- return audio_->GetBufferedRanges(duration_);
- else if (!audio_ && video_)
- return video_->GetBufferedRanges(duration_);
- return ComputeIntersection();
+
+ bool ended = state_ == ENDED;
+ // TODO(acolwell): When we start allowing SourceBuffers that are not active,
+ // we'll need to update this loop to only add ranges from active sources.
+ RangesList ranges_list;
+ for (SourceStateMap::const_iterator itr = source_state_map_.begin();
+ itr != source_state_map_.end(); ++itr) {
+ ranges_list.push_back(itr->second->GetBufferedRanges(duration_, ended));
+ }
+
+ return ComputeIntersection(ranges_list, ended);
}
void ChunkDemuxer::StartReturningData() {
@@ -1615,4 +1744,11 @@ void ChunkDemuxer::CompletePendingReadsIfPossible() {
}
}
+void ChunkDemuxer::ShutdownAllStreams() {
+ for (SourceStateMap::iterator itr = source_state_map_.begin();
+ itr != source_state_map_.end(); ++itr) {
+ itr->second->Shutdown();
+ }
+}
+
} // namespace media
diff --git a/chromium/media/filters/chunk_demuxer.h b/chromium/media/filters/chunk_demuxer.h
index 51739dbeeca..1b117b92294 100644
--- a/chromium/media/filters/chunk_demuxer.h
+++ b/chromium/media/filters/chunk_demuxer.h
@@ -5,6 +5,7 @@
#ifndef MEDIA_FILTERS_CHUNK_DEMUXER_H_
#define MEDIA_FILTERS_CHUNK_DEMUXER_H_
+#include <deque>
#include <map>
#include <string>
#include <utility>
@@ -19,10 +20,113 @@
namespace media {
-class ChunkDemuxerStream;
class FFmpegURLProtocol;
class SourceState;
+class MEDIA_EXPORT ChunkDemuxerStream : public DemuxerStream {
+ public:
+ typedef std::deque<scoped_refptr<StreamParserBuffer> > BufferQueue;
+
+ explicit ChunkDemuxerStream(Type type, bool splice_frames_enabled);
+ virtual ~ChunkDemuxerStream();
+
+ // ChunkDemuxerStream control methods.
+ void StartReturningData();
+ void AbortReads();
+ void CompletePendingReadIfPossible();
+ void Shutdown();
+
+ // SourceBufferStream manipulation methods.
+ void Seek(base::TimeDelta time);
+ bool IsSeekWaitingForData() const;
+
+ // Add buffers to this stream. Buffers are stored in SourceBufferStreams,
+ // which handle ordering and overlap resolution.
+ // Returns true if buffers were successfully added.
+ bool Append(const StreamParser::BufferQueue& buffers);
+
+ // Removes buffers between |start| and |end| according to the steps
+ // in the "Coded Frame Removal Algorithm" in the Media Source
+ // Extensions Spec.
+ // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#sourcebuffer-coded-frame-removal
+ //
+ // |duration| is the current duration of the presentation. It is
+ // required by the computation outlined in the spec.
+ void Remove(base::TimeDelta start, base::TimeDelta end,
+ base::TimeDelta duration);
+
+ // Signal to the stream that duration has changed to |duration|.
+ void OnSetDuration(base::TimeDelta duration);
+
+ // Returns the range of buffered data in this stream, capped at |duration|.
+ Ranges<base::TimeDelta> GetBufferedRanges(base::TimeDelta duration) const;
+
+ // Returns the duration of the buffered data.
+ // Returns base::TimeDelta() if the stream has no buffered data.
+ base::TimeDelta GetBufferedDuration() const;
+
+ // Signal to the stream that buffers handed in through subsequent calls to
+ // Append() belong to a media segment that starts at |start_timestamp|.
+ void OnNewMediaSegment(base::TimeDelta start_timestamp);
+
+ // Called when midstream config updates occur.
+ // Returns true if the new config is accepted.
+ // Returns false if the new config should trigger an error.
+ bool UpdateAudioConfig(const AudioDecoderConfig& config, const LogCB& log_cb);
+ bool UpdateVideoConfig(const VideoDecoderConfig& config, const LogCB& log_cb);
+ void UpdateTextConfig(const TextTrackConfig& config, const LogCB& log_cb);
+
+ void MarkEndOfStream();
+ void UnmarkEndOfStream();
+
+ // DemuxerStream methods.
+ virtual void Read(const ReadCB& read_cb) OVERRIDE;
+ virtual Type type() OVERRIDE;
+ virtual void EnableBitstreamConverter() OVERRIDE;
+ virtual AudioDecoderConfig audio_decoder_config() OVERRIDE;
+ virtual VideoDecoderConfig video_decoder_config() OVERRIDE;
+ virtual bool SupportsConfigChanges() OVERRIDE;
+
+ // Returns the text track configuration. It is an error to call this method
+ // if type() != TEXT.
+ TextTrackConfig text_track_config();
+
+ // Sets the memory limit, in bytes, on the SourceBufferStream.
+ void set_memory_limit_for_testing(int memory_limit) {
+ stream_->set_memory_limit_for_testing(memory_limit);
+ }
+
+ bool supports_partial_append_window_trimming() const {
+ return partial_append_window_trimming_enabled_;
+ }
+
+ private:
+ enum State {
+ UNINITIALIZED,
+ RETURNING_DATA_FOR_READS,
+ RETURNING_ABORT_FOR_READS,
+ SHUTDOWN,
+ };
+
+ // Assigns |state_| to |state|
+ void ChangeState_Locked(State state);
+
+ void CompletePendingReadIfPossible_Locked();
+
+ // Specifies the type of the stream.
+ Type type_;
+
+ scoped_ptr<SourceBufferStream> stream_;
+
+ mutable base::Lock lock_;
+ State state_;
+ ReadCB read_cb_;
+ bool splice_frames_enabled_;
+ bool partial_append_window_trimming_enabled_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ChunkDemuxerStream);
+};
+
// Demuxer implementation that allows chunks of media data to be passed
// from JavaScript to the media stack.
class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
@@ -41,9 +145,13 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
// otherwise ignore them.
// |log_cb| Run when parsing error messages need to be logged to the error
// console.
+ // |splice_frames_enabled| Indicates that it's okay to generate splice frames
+ // per the MSE specification. Renderers must understand DecoderBuffer's
+ // splice_timestamp() field.
ChunkDemuxer(const base::Closure& open_cb,
const NeedKeyCB& need_key_cb,
- const LogCB& log_cb);
+ const LogCB& log_cb,
+ bool splice_frames_enabled);
virtual ~ChunkDemuxer();
// Demuxer implementation.
@@ -52,9 +160,10 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
bool enable_text_tracks) OVERRIDE;
virtual void Stop(const base::Closure& callback) OVERRIDE;
virtual void Seek(base::TimeDelta time, const PipelineStatusCB& cb) OVERRIDE;
- virtual void OnAudioRendererDisabled() OVERRIDE;
virtual DemuxerStream* GetStream(DemuxerStream::Type type) OVERRIDE;
virtual base::TimeDelta GetStartTime() const OVERRIDE;
+ virtual base::Time GetTimelineOffset() const OVERRIDE;
+ virtual Liveness GetLiveness() const OVERRIDE;
// Methods used by an external object to control this demuxer.
//
@@ -98,12 +207,24 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
// Gets the currently buffered ranges for the specified ID.
Ranges<base::TimeDelta> GetBufferedRanges(const std::string& id) const;
- // Appends media data to the source buffer associated with |id|.
- void AppendData(const std::string& id, const uint8* data, size_t length);
+ // Appends media data to the source buffer associated with |id|, applying
+ // and possibly updating |*timestamp_offset| during coded frame processing.
+ // |append_window_start| and |append_window_end| correspond to the MSE spec's
+ // similarly named source buffer attributes that are used in coded frame
+ // processing.
+ void AppendData(const std::string& id, const uint8* data, size_t length,
+ base::TimeDelta append_window_start,
+ base::TimeDelta append_window_end,
+ base::TimeDelta* timestamp_offset);
// Aborts parsing the current segment and reset the parser to a state where
// it can accept a new segment.
- void Abort(const std::string& id);
+ // Some pending frames can be emitted during that process. These frames are
+ // applied |timestamp_offset|.
+ void Abort(const std::string& id,
+ base::TimeDelta append_window_start,
+ base::TimeDelta append_window_end,
+ base::TimeDelta* timestamp_offset);
// Remove buffers between |start| and |end| for the source buffer
// associated with |id|.
@@ -118,11 +239,21 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
// |duration|.
void SetDuration(double duration);
- // Sets a time |offset| to be applied to subsequent buffers appended to the
- // source buffer associated with |id|. Returns true if the offset is set
- // properly, false if the offset cannot be applied because we're in the
- // middle of parsing a media segment.
- bool SetTimestampOffset(const std::string& id, base::TimeDelta offset);
+ // Returns true if the source buffer associated with |id| is currently parsing
+ // a media segment, or false otherwise.
+ bool IsParsingMediaSegment(const std::string& id);
+
+ // Set the append mode to be applied to subsequent buffers appended to the
+ // source buffer associated with |id|. If |sequence_mode| is true, caller
+ // is requesting "sequence" mode. Otherwise, caller is requesting "segments"
+ // mode.
+ void SetSequenceMode(const std::string& id, bool sequence_mode);
+
+ // Signals the coded frame processor for the source buffer associated with
+ // |id| to update its group start timestamp to be |timestamp_offset| if it is
+ // in sequence append mode.
+ void SetGroupStartTimestampIfInSequenceMode(const std::string& id,
+ base::TimeDelta timestamp_offset);
// Called to signal changes in the "end of stream"
// state. UnmarkEndOfStream() must not be called if a matching
@@ -130,13 +261,10 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
void MarkEndOfStream(PipelineStatus status);
void UnmarkEndOfStream();
- // Set the append window start and end values for the source buffer
- // associated with |id|.
- void SetAppendWindowStart(const std::string& id, base::TimeDelta start);
- void SetAppendWindowEnd(const std::string& id, base::TimeDelta end);
-
void Shutdown();
+ // Sets the memory limit on each stream. |memory_limit| is the
+ // maximum number of bytes each stream is allowed to hold in its buffer.
void SetMemoryLimitsForTesting(int memory_limit);
// Returns the ranges representing the buffered data in the demuxer.
@@ -169,7 +297,8 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
bool CanEndOfStream_Locked() const;
// SourceState callbacks.
- void OnSourceInitDone(bool success, base::TimeDelta duration);
+ void OnSourceInitDone(bool success,
+ const StreamParser::InitParameters& params);
// Creates a DemuxerStream for the specified |type|.
// Returns a new ChunkDemuxerStream instance if a stream of this type
@@ -178,26 +307,12 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
void OnNewTextTrack(ChunkDemuxerStream* text_stream,
const TextTrackConfig& config);
- void OnNewMediaSegment(const std::string& source_id,
- base::TimeDelta start_timestamp);
-
- // Computes the intersection between the video & audio
- // buffered ranges.
- Ranges<base::TimeDelta> ComputeIntersection() const;
-
- // Applies |time_offset| to the timestamps of |buffers|.
- void AdjustBufferTimestamps(const StreamParser::BufferQueue& buffers,
- base::TimeDelta timestamp_offset);
// Returns true if |source_id| is valid, false otherwise.
bool IsValidId(const std::string& source_id) const;
- // Increases |duration_| if |last_appended_buffer_timestamp| exceeds the
- // current |duration_|. The |duration_| is set to the end buffered timestamp
- // of |stream|.
- void IncreaseDurationIfNecessary(
- base::TimeDelta last_appended_buffer_timestamp,
- ChunkDemuxerStream* stream);
+ // Increases |duration_| to |new_duration|, if |new_duration| is higher.
+ void IncreaseDurationIfNecessary(base::TimeDelta new_duration);
// Decreases |duration_| if the buffered region is less than |duration_| when
// EndOfStream() is called.
@@ -222,6 +337,10 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
// Seeks all SourceBufferStreams to |seek_time|.
void SeekAllSources(base::TimeDelta seek_time);
+ // Shuts down all DemuxerStreams by calling Shutdown() on
+ // all objects in |source_state_map_|.
+ void ShutdownAllStreams();
+
mutable base::Lock lock_;
State state_;
bool cancel_next_seek_;
@@ -244,9 +363,6 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
scoped_ptr<ChunkDemuxerStream> audio_;
scoped_ptr<ChunkDemuxerStream> video_;
- // Keeps |audio_| alive when audio has been disabled.
- scoped_ptr<ChunkDemuxerStream> disabled_audio_;
-
base::TimeDelta duration_;
// The duration passed to the last SetDuration(). If
@@ -256,6 +372,9 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
// the actual duration instead of a user specified value.
double user_specified_duration_;
+ base::Time timeline_offset_;
+ Liveness liveness_;
+
typedef std::map<std::string, SourceState*> SourceStateMap;
SourceStateMap source_state_map_;
@@ -265,6 +384,9 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
std::string source_id_audio_;
std::string source_id_video_;
+ // Indicates that splice frame generation is enabled.
+ const bool splice_frames_enabled_;
+
DISALLOW_COPY_AND_ASSIGN(ChunkDemuxer);
};
diff --git a/chromium/media/filters/chunk_demuxer_unittest.cc b/chromium/media/filters/chunk_demuxer_unittest.cc
index 87c9f7074b6..2326de2de66 100644
--- a/chromium/media/filters/chunk_demuxer_unittest.cc
+++ b/chromium/media/filters/chunk_demuxer_unittest.cc
@@ -16,9 +16,9 @@
#include "media/base/test_data_util.h"
#include "media/base/test_helpers.h"
#include "media/filters/chunk_demuxer.h"
-#include "media/webm/cluster_builder.h"
-#include "media/webm/webm_constants.h"
-#include "media/webm/webm_crypto_helpers.h"
+#include "media/formats/webm/cluster_builder.h"
+#include "media/formats/webm/webm_constants.h"
+#include "media/formats/webm/webm_crypto_helpers.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::AnyNumber;
@@ -32,47 +32,55 @@ using ::testing::_;
namespace media {
-static const uint8 kTracksHeader[] = {
+const uint8 kTracksHeader[] = {
0x16, 0x54, 0xAE, 0x6B, // Tracks ID
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // tracks(size = 0)
};
// WebM Block bytes that represent a VP8 keyframe.
-static const uint8 kVP8Keyframe[] = {
+const uint8 kVP8Keyframe[] = {
0x010, 0x00, 0x00, 0x9d, 0x01, 0x2a, 0x00, 0x10, 0x00, 0x10, 0x00
};
// WebM Block bytes that represent a VP8 interframe.
-static const uint8 kVP8Interframe[] = { 0x11, 0x00, 0x00 };
+const uint8 kVP8Interframe[] = { 0x11, 0x00, 0x00 };
-static const int kTracksHeaderSize = sizeof(kTracksHeader);
-static const int kTracksSizeOffset = 4;
+static const uint8 kCuesHeader[] = {
+ 0x1C, 0x53, 0xBB, 0x6B, // Cues ID
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // cues(size = 0)
+};
+
+const int kTracksHeaderSize = sizeof(kTracksHeader);
+const int kTracksSizeOffset = 4;
// The size of TrackEntry element in test file "webm_vorbis_track_entry" starts
// at index 1 and spans 8 bytes.
-static const int kAudioTrackSizeOffset = 1;
-static const int kAudioTrackSizeWidth = 8;
-static const int kAudioTrackEntryHeaderSize = kAudioTrackSizeOffset +
- kAudioTrackSizeWidth;
+const int kAudioTrackSizeOffset = 1;
+const int kAudioTrackSizeWidth = 8;
+const int kAudioTrackEntryHeaderSize =
+ kAudioTrackSizeOffset + kAudioTrackSizeWidth;
// The size of TrackEntry element in test file "webm_vp8_track_entry" starts at
// index 1 and spans 8 bytes.
-static const int kVideoTrackSizeOffset = 1;
-static const int kVideoTrackSizeWidth = 8;
-static const int kVideoTrackEntryHeaderSize = kVideoTrackSizeOffset +
- kVideoTrackSizeWidth;
-
-static const int kVideoTrackNum = 1;
-static const int kAudioTrackNum = 2;
-
-static const int kAudioBlockDuration = 23;
-static const int kVideoBlockDuration = 33;
-static const int kBlockSize = 10;
-
-static const char kSourceId[] = "SourceId";
-static const char kDefaultFirstClusterRange[] = "{ [0,46) }";
-static const int kDefaultFirstClusterEndTimestamp = 66;
-static const int kDefaultSecondClusterEndTimestamp = 132;
+const int kVideoTrackSizeOffset = 1;
+const int kVideoTrackSizeWidth = 8;
+const int kVideoTrackEntryHeaderSize =
+ kVideoTrackSizeOffset + kVideoTrackSizeWidth;
+
+const int kVideoTrackNum = 1;
+const int kAudioTrackNum = 2;
+const int kTextTrackNum = 3;
+const int kAlternateTextTrackNum = 4;
+
+const int kAudioBlockDuration = 23;
+const int kVideoBlockDuration = 33;
+const int kTextBlockDuration = 100;
+const int kBlockSize = 10;
+
+const char kSourceId[] = "SourceId";
+const char kDefaultFirstClusterRange[] = "{ [0,46) }";
+const int kDefaultFirstClusterEndTimestamp = 66;
+const int kDefaultSecondClusterEndTimestamp = 132;
base::TimeDelta kDefaultDuration() {
return base::TimeDelta::FromMilliseconds(201224);
@@ -82,7 +90,7 @@ base::TimeDelta kDefaultDuration() {
// The data pointed by |buffer| should be at least 8 bytes long.
// |number| should be in the range 0 <= number < 0x00FFFFFFFFFFFFFF.
static void WriteInt64(uint8* buffer, int64 number) {
- DCHECK(number >= 0 && number < GG_LONGLONG(0x00FFFFFFFFFFFFFF));
+ DCHECK(number >= 0 && number < 0x00FFFFFFFFFFFFFFLL);
buffer[0] = 0x01;
int64 tmp = number;
for (int i = 7; i > 0; i--) {
@@ -128,7 +136,9 @@ static void OnSeekDone_OKExpected(bool* called, PipelineStatus status) {
*called = true;
}
-class ChunkDemuxerTest : public testing::Test {
+static void LogFunc(const std::string& str) { DVLOG(1) << str; }
+
+class ChunkDemuxerTest : public ::testing::Test {
protected:
enum CodecsIndex {
AUDIO,
@@ -150,7 +160,8 @@ class ChunkDemuxerTest : public testing::Test {
return GenerateCluster(46, 66, 5);
}
- ChunkDemuxerTest() {
+ ChunkDemuxerTest()
+ : append_window_end_for_next_append_(kInfiniteDuration()) {
CreateNewDemuxer();
}
@@ -159,17 +170,44 @@ class ChunkDemuxerTest : public testing::Test {
base::Bind(&ChunkDemuxerTest::DemuxerOpened, base::Unretained(this));
Demuxer::NeedKeyCB need_key_cb =
base::Bind(&ChunkDemuxerTest::DemuxerNeedKey, base::Unretained(this));
- demuxer_.reset(new ChunkDemuxer(open_cb, need_key_cb, LogCB()));
+ demuxer_.reset(
+ new ChunkDemuxer(open_cb, need_key_cb, base::Bind(&LogFunc), true));
}
virtual ~ChunkDemuxerTest() {
ShutdownDemuxer();
}
- void CreateInitSegment(bool has_audio, bool has_video, bool has_text,
- bool is_audio_encrypted, bool is_video_encrypted,
+ void CreateInitSegment(int stream_flags,
+ bool is_audio_encrypted,
+ bool is_video_encrypted,
scoped_ptr<uint8[]>* buffer,
int* size) {
+ CreateInitSegmentInternal(
+ stream_flags, is_audio_encrypted, is_video_encrypted, buffer, false,
+ size);
+ }
+
+ void CreateInitSegmentWithAlternateTextTrackNum(int stream_flags,
+ bool is_audio_encrypted,
+ bool is_video_encrypted,
+ scoped_ptr<uint8[]>* buffer,
+ int* size) {
+ DCHECK(stream_flags & HAS_TEXT);
+ CreateInitSegmentInternal(
+ stream_flags, is_audio_encrypted, is_video_encrypted, buffer, true,
+ size);
+ }
+
+ void CreateInitSegmentInternal(int stream_flags,
+ bool is_audio_encrypted,
+ bool is_video_encrypted,
+ scoped_ptr<uint8[]>* buffer,
+ bool use_alternate_text_track_id,
+ int* size) {
+ bool has_audio = (stream_flags & HAS_AUDIO) != 0;
+ bool has_video = (stream_flags & HAS_VIDEO) != 0;
+ bool has_text = (stream_flags & HAS_TEXT) != 0;
scoped_refptr<DecoderBuffer> ebml_header;
scoped_refptr<DecoderBuffer> info;
scoped_refptr<DecoderBuffer> audio_track_entry;
@@ -209,13 +247,18 @@ class ChunkDemuxerTest : public testing::Test {
//
// This is the track entry for a text track,
// TrackEntry [AE], size=30
- // TrackNum [D7], size=1, val=3
- // TrackUID [73] [C5], size=1, value=3
+ // TrackNum [D7], size=1, val=3 (or 4 if use_alternate_text_track_id)
+ // TrackUID [73] [C5], size=1, value=3 (must remain constant for same
+ // track, even if TrackNum changes)
// TrackType [83], size=1, val=0x11
// CodecId [86], size=18, val="D_WEBVTT/SUBTITLES"
- const char str[] = "\xAE\x9E\xD7\x81\x03\x73\xC5\x81\x03"
- "\x83\x81\x11\x86\x92"
- "D_WEBVTT/SUBTITLES";
+ char str[] = "\xAE\x9E\xD7\x81\x03\x73\xC5\x81\x03"
+ "\x83\x81\x11\x86\x92"
+ "D_WEBVTT/SUBTITLES";
+ DCHECK_EQ(str[4], kTextTrackNum);
+ if (use_alternate_text_track_id)
+ str[4] = kAlternateTextTrackNum;
+
const int len = strlen(str);
DCHECK_EQ(len, 32);
const uint8* const buf = reinterpret_cast<const uint8*>(str);
@@ -281,11 +324,12 @@ class ChunkDemuxerTest : public testing::Test {
}
ChunkDemuxer::Status AddId() {
- return AddId(kSourceId, true, true);
+ return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
}
- ChunkDemuxer::Status AddId(const std::string& source_id,
- bool has_audio, bool has_video) {
+ ChunkDemuxer::Status AddId(const std::string& source_id, int stream_flags) {
+ bool has_audio = (stream_flags & HAS_AUDIO) != 0;
+ bool has_video = (stream_flags & HAS_VIDEO) != 0;
std::vector<std::string> codecs;
std::string type;
@@ -300,12 +344,20 @@ class ChunkDemuxerTest : public testing::Test {
}
if (!has_audio && !has_video) {
- return AddId(kSourceId, true, true);
+ return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
}
return demuxer_->AddId(source_id, type, codecs);
}
+ ChunkDemuxer::Status AddIdForMp2tSource(const std::string& source_id) {
+ std::vector<std::string> codecs;
+ std::string type = "video/mp2t";
+ codecs.push_back("mp4a.40.2");
+ codecs.push_back("avc1.640028");
+ return demuxer_->AddId(source_id, type, codecs);
+ }
+
void AppendData(const uint8* data, size_t length) {
AppendData(kSourceId, data, length);
}
@@ -326,13 +378,17 @@ class ChunkDemuxerTest : public testing::Test {
void AppendSingleStreamCluster(const std::string& source_id, int track_number,
int timecode, int block_count) {
int block_duration = 0;
- switch(track_number) {
+ switch (track_number) {
case kVideoTrackNum:
block_duration = kVideoBlockDuration;
break;
case kAudioTrackNum:
block_duration = kAudioBlockDuration;
break;
+ case kTextTrackNum: // Fall-through.
+ case kAlternateTextTrackNum:
+ block_duration = kTextBlockDuration;
+ break;
}
ASSERT_NE(block_duration, 0);
int end_timecode = timecode + block_count * block_duration;
@@ -341,6 +397,12 @@ class ChunkDemuxerTest : public testing::Test {
timecode, end_timecode, track_number, block_duration));
}
+ // |cluster_description| - A space delimited string of buffer info that
+ // is used to construct a cluster. Each buffer info is a timestamp in
+ // milliseconds and optionally followed by a 'K' to indicate that a buffer
+ // should be marked as a keyframe. For example "0K 30 60" should constuct
+ // a cluster with 3 blocks: a keyframe with timestamp 0 and 2 non-keyframes
+ // at 30ms and 60ms.
void AppendSingleStreamCluster(const std::string& source_id, int track_number,
const std::string& cluster_description) {
std::vector<std::string> timestamps;
@@ -362,8 +424,14 @@ class ChunkDemuxerTest : public testing::Test {
if (i == 0)
cb.SetClusterTimecode(timestamp_in_ms);
- cb.AddSimpleBlock(track_number, timestamp_in_ms, block_flags,
- &data[0], data.size());
+ if (track_number == kTextTrackNum ||
+ track_number == kAlternateTextTrackNum) {
+ cb.AddBlockGroup(track_number, timestamp_in_ms, kTextBlockDuration,
+ block_flags, &data[0], data.size());
+ } else {
+ cb.AddSimpleBlock(track_number, timestamp_in_ms, block_flags,
+ &data[0], data.size());
+ }
}
AppendCluster(source_id, cb.Finish());
}
@@ -371,7 +439,11 @@ class ChunkDemuxerTest : public testing::Test {
void AppendData(const std::string& source_id,
const uint8* data, size_t length) {
EXPECT_CALL(host_, AddBufferedTimeRange(_, _)).Times(AnyNumber());
- demuxer_->AppendData(source_id, data, length);
+
+ demuxer_->AppendData(source_id, data, length,
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ &timestamp_offset_map_[source_id]);
}
void AppendDataInPieces(const uint8* data, size_t length) {
@@ -389,29 +461,22 @@ class ChunkDemuxerTest : public testing::Test {
}
}
- void AppendInitSegment(bool has_audio, bool has_video) {
- AppendInitSegmentWithSourceId(kSourceId, has_audio, has_video, false);
- }
-
- void AppendInitSegmentText(bool has_audio, bool has_video) {
- AppendInitSegmentWithSourceId(kSourceId, has_audio, has_video, true);
+ void AppendInitSegment(int stream_flags) {
+ AppendInitSegmentWithSourceId(kSourceId, stream_flags);
}
void AppendInitSegmentWithSourceId(const std::string& source_id,
- bool has_audio, bool has_video,
- bool has_text) {
- AppendInitSegmentWithEncryptedInfo(
- source_id, has_audio, has_video, has_text, false, false);
+ int stream_flags) {
+ AppendInitSegmentWithEncryptedInfo(source_id, stream_flags, false, false);
}
void AppendInitSegmentWithEncryptedInfo(const std::string& source_id,
- bool has_audio, bool has_video,
- bool has_text,
+ int stream_flags,
bool is_audio_encrypted,
bool is_video_encrypted) {
scoped_ptr<uint8[]> info_tracks;
int info_tracks_size = 0;
- CreateInitSegment(has_audio, has_video, has_text,
+ CreateInitSegment(stream_flags,
is_audio_encrypted, is_video_encrypted,
&info_tracks, &info_tracks_size);
AppendData(source_id, info_tracks.get(), info_tracks_size);
@@ -448,21 +513,21 @@ class ChunkDemuxerTest : public testing::Test {
expected_status);
}
- bool InitDemuxer(bool has_audio, bool has_video) {
- return InitDemuxerWithEncryptionInfo(has_audio, has_video, false,
- false, false);
- }
+ enum StreamFlags {
+ HAS_AUDIO = 1 << 0,
+ HAS_VIDEO = 1 << 1,
+ HAS_TEXT = 1 << 2
+ };
- bool InitDemuxerText(bool has_audio, bool has_video) {
- return InitDemuxerWithEncryptionInfo(has_audio, has_video, true,
- false, false);
+ bool InitDemuxer(int stream_flags) {
+ return InitDemuxerWithEncryptionInfo(stream_flags, false, false);
}
bool InitDemuxerWithEncryptionInfo(
- bool has_audio, bool has_video, bool has_text,
- bool is_audio_encrypted, bool is_video_encrypted) {
+ int stream_flags, bool is_audio_encrypted, bool is_video_encrypted) {
+
PipelineStatus expected_status =
- (has_audio || has_video) ? PIPELINE_OK : DEMUXER_ERROR_COULD_NOT_OPEN;
+ (stream_flags != 0) ? PIPELINE_OK : DEMUXER_ERROR_COULD_NOT_OPEN;
base::TimeDelta expected_duration = kNoTimestamp();
if (expected_status == PIPELINE_OK)
@@ -472,11 +537,11 @@ class ChunkDemuxerTest : public testing::Test {
demuxer_->Initialize(
&host_, CreateInitDoneCB(expected_duration, expected_status), true);
- if (AddId(kSourceId, has_audio, has_video) != ChunkDemuxer::kOk)
+ if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
return false;
AppendInitSegmentWithEncryptedInfo(
- kSourceId, has_audio, has_video, has_text,
+ kSourceId, stream_flags,
is_audio_encrypted, is_video_encrypted);
return true;
}
@@ -488,13 +553,21 @@ class ChunkDemuxerTest : public testing::Test {
demuxer_->Initialize(
&host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
- if (AddId(audio_id, true, false) != ChunkDemuxer::kOk)
+ if (AddId(audio_id, HAS_AUDIO) != ChunkDemuxer::kOk)
return false;
- if (AddId(video_id, false, true) != ChunkDemuxer::kOk)
+ if (AddId(video_id, HAS_VIDEO) != ChunkDemuxer::kOk)
return false;
- AppendInitSegmentWithSourceId(audio_id, true, false, has_text);
- AppendInitSegmentWithSourceId(video_id, false, true, has_text);
+ int audio_flags = HAS_AUDIO;
+ int video_flags = HAS_VIDEO;
+
+ if (has_text) {
+ audio_flags |= HAS_TEXT;
+ video_flags |= HAS_TEXT;
+ }
+
+ AppendInitSegmentWithSourceId(audio_id, audio_flags);
+ AppendInitSegmentWithSourceId(video_id, video_flags);
return true;
}
@@ -511,30 +584,40 @@ class ChunkDemuxerTest : public testing::Test {
// bear-640x360.webm VideoDecoderConfig returns 640x360 for its natural_size()
// The resulting video stream returns data from each file for the following
// time ranges.
- // bear-320x240.webm : [0-501) [801-2737)
+ // bear-320x240.webm : [0-501) [801-2736)
// bear-640x360.webm : [527-793)
//
// bear-320x240.webm AudioDecoderConfig returns 3863 for its extra_data_size()
// bear-640x360.webm AudioDecoderConfig returns 3935 for its extra_data_size()
// The resulting audio stream returns data from each file for the following
// time ranges.
- // bear-320x240.webm : [0-524) [779-2737)
+ // bear-320x240.webm : [0-524) [779-2736)
// bear-640x360.webm : [527-759)
bool InitDemuxerWithConfigChangeData() {
scoped_refptr<DecoderBuffer> bear1 = ReadTestDataFile("bear-320x240.webm");
scoped_refptr<DecoderBuffer> bear2 = ReadTestDataFile("bear-640x360.webm");
EXPECT_CALL(*this, DemuxerOpened());
+
demuxer_->Initialize(
&host_, CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744),
PIPELINE_OK), true);
- if (AddId(kSourceId, true, true) != ChunkDemuxer::kOk)
+ if (AddId(kSourceId, HAS_AUDIO | HAS_VIDEO) != ChunkDemuxer::kOk)
return false;
// Append the whole bear1 file.
+ // TODO(wolenetz/acolwell): Remove this extra SetDuration expectation once
+ // the files are fixed to have the correct duration in their init segments,
+ // and the CreateInitDoneCB() call, above, is fixed to used that duration.
+ // See http://crbug.com/354284.
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
AppendData(bear1->data(), bear1->data_size());
- CheckExpectedRanges(kSourceId, "{ [0,2737) }");
+ // Last audio frame has timestamp 2721 and duration 24 (estimated from max
+ // seen so far for audio track).
+ // Last video frame has timestamp 2703 and duration 33 (from TrackEntry
+ // DefaultDuration for video track).
+ CheckExpectedRanges(kSourceId, "{ [0,2736) }");
// Append initialization segment for bear2.
// Note: Offsets here and below are derived from
@@ -546,13 +629,13 @@ class ChunkDemuxerTest : public testing::Test {
// Append a media segment that goes from [0.527000, 1.014000).
AppendData(bear2->data() + 55290, 18785);
- CheckExpectedRanges(kSourceId, "{ [0,1028) [1201,2737) }");
+ CheckExpectedRanges(kSourceId, "{ [0,1027) [1201,2736) }");
// Append initialization segment for bear1 & fill gap with [779-1197)
// segment.
AppendData(bear1->data(), 4370);
AppendData(bear1->data() + 72737, 28183);
- CheckExpectedRanges(kSourceId, "{ [0,2737) }");
+ CheckExpectedRanges(kSourceId, "{ [0,2736) }");
MarkEndOfStream(PIPELINE_OK);
return true;
@@ -586,6 +669,13 @@ class ChunkDemuxerTest : public testing::Test {
scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
int first_video_timecode,
int block_count) {
+ return GenerateCluster(first_audio_timecode, first_video_timecode,
+ block_count, false);
+ }
+ scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
+ int first_video_timecode,
+ int block_count,
+ bool unknown_size) {
CHECK_GT(block_count, 0);
int size = 10;
@@ -635,7 +725,7 @@ class ChunkDemuxerTest : public testing::Test {
kWebMFlagKeyframe, data.get(), size);
}
- return cb.Finish();
+ return unknown_size ? cb.FinishWithUnknownSize() : cb.Finish();
}
scoped_ptr<Cluster> GenerateSingleStreamCluster(int timecode,
@@ -650,14 +740,12 @@ class ChunkDemuxerTest : public testing::Test {
cb.SetClusterTimecode(timecode);
// Create simple blocks for everything except the last block.
- for (int i = 0; timecode < (end_timecode - block_duration); i++) {
+ while (timecode < (end_timecode - block_duration)) {
cb.AddSimpleBlock(track_number, timecode, kWebMFlagKeyframe,
&data[0], data.size());
timecode += block_duration;
}
- // Make the last block a BlockGroup so that it doesn't get delayed by the
- // block duration calculation logic.
if (track_number == kVideoTrackNum) {
AddVideoBlockGroup(&cb, track_number, timecode, block_duration,
kWebMFlagKeyframe);
@@ -665,6 +753,7 @@ class ChunkDemuxerTest : public testing::Test {
cb.AddBlockGroup(track_number, timecode, block_duration,
kWebMFlagKeyframe, &data[0], data.size());
}
+
return cb.Finish();
}
@@ -754,7 +843,7 @@ class ChunkDemuxerTest : public testing::Test {
<< r.end(i).InMilliseconds() << ") ";
}
ss << "}";
- EXPECT_EQ(ss.str(), expected);
+ EXPECT_EQ(expected, ss.str());
}
MOCK_METHOD2(ReadDone, void(DemuxerStream::Status status,
@@ -812,7 +901,10 @@ class ChunkDemuxerTest : public testing::Test {
base::SplitString(expected, ' ', &timestamps);
std::stringstream ss;
for (size_t i = 0; i < timestamps.size(); ++i) {
- DemuxerStream::Status status;
+ // Initialize status to kAborted since it's possible for Read() to return
+ // without calling StoreStatusAndBuffer() if it doesn't have any buffers
+ // left to return.
+ DemuxerStream::Status status = DemuxerStream::kAborted;
scoped_refptr<DecoderBuffer> buffer;
stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
base::Unretained(this), &status, &buffer));
@@ -823,6 +915,13 @@ class ChunkDemuxerTest : public testing::Test {
if (i > 0)
ss << " ";
ss << buffer->timestamp().InMilliseconds();
+
+ // Handle preroll buffers.
+ if (EndsWith(timestamps[i], "P", true)) {
+ ASSERT_EQ(kInfiniteDuration(), buffer->discard_padding().first);
+ ASSERT_EQ(base::TimeDelta(), buffer->discard_padding().second);
+ ss << "P";
+ }
}
EXPECT_EQ(expected, ss.str());
}
@@ -844,18 +943,18 @@ class ChunkDemuxerTest : public testing::Test {
bool ParseWebMFile(const std::string& filename,
const BufferTimestamps* timestamps,
const base::TimeDelta& duration) {
- return ParseWebMFile(filename, timestamps, duration, true, true);
+ return ParseWebMFile(filename, timestamps, duration, HAS_AUDIO | HAS_VIDEO);
}
bool ParseWebMFile(const std::string& filename,
const BufferTimestamps* timestamps,
const base::TimeDelta& duration,
- bool has_audio, bool has_video) {
+ int stream_flags) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(duration, PIPELINE_OK), true);
- if (AddId(kSourceId, has_audio, has_video) != ChunkDemuxer::kOk)
+ if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
return false;
// Read a WebM file into memory and send the data to the demuxer.
@@ -915,11 +1014,27 @@ class ChunkDemuxerTest : public testing::Test {
message_loop_.RunUntilIdle();
}
+ bool SetTimestampOffset(const std::string& id,
+ base::TimeDelta timestamp_offset) {
+ if (demuxer_->IsParsingMediaSegment(id))
+ return false;
+
+ timestamp_offset_map_[id] = timestamp_offset;
+ return true;
+ }
+
base::MessageLoop message_loop_;
MockDemuxerHost host_;
scoped_ptr<ChunkDemuxer> demuxer_;
+ base::TimeDelta append_window_start_for_next_append_;
+ base::TimeDelta append_window_end_for_next_append_;
+
+ // Map of source id to timestamp offset to use for the next AppendData()
+ // operation for that source id.
+ std::map<std::string, base::TimeDelta> timestamp_offset_map_;
+
private:
DISALLOW_COPY_AND_ASSIGN(ChunkDemuxerTest);
};
@@ -949,8 +1064,15 @@ TEST_F(ChunkDemuxerTest, Init) {
.Times(Exactly(need_key_count));
}
+ int stream_flags = 0;
+ if (has_audio)
+ stream_flags |= HAS_AUDIO;
+
+ if (has_video)
+ stream_flags |= HAS_VIDEO;
+
ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
- has_audio, has_video, false, is_audio_encrypted, is_video_encrypted));
+ stream_flags, is_audio_encrypted, is_video_encrypted));
DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
if (has_audio) {
@@ -966,6 +1088,8 @@ TEST_F(ChunkDemuxerTest, Init) {
EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
EXPECT_EQ(is_audio_encrypted,
audio_stream->audio_decoder_config().is_encrypted());
+ EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
+ ->supports_partial_append_window_trimming());
} else {
EXPECT_FALSE(audio_stream);
}
@@ -975,6 +1099,8 @@ TEST_F(ChunkDemuxerTest, Init) {
EXPECT_TRUE(video_stream);
EXPECT_EQ(is_video_encrypted,
video_stream->video_decoder_config().is_encrypted());
+ EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
+ ->supports_partial_append_window_trimming());
} else {
EXPECT_FALSE(video_stream);
}
@@ -984,6 +1110,8 @@ TEST_F(ChunkDemuxerTest, Init) {
}
}
+// TODO(acolwell): Fold this test into Init tests since the tests are
+// almost identical.
TEST_F(ChunkDemuxerTest, InitText) {
// Test with 1 video stream and 1 text streams, and 0 or 1 audio streams.
// No encryption cases handled here.
@@ -997,15 +1125,24 @@ TEST_F(ChunkDemuxerTest, InitText) {
DemuxerStream* text_stream = NULL;
TextTrackConfig text_config;
- EXPECT_CALL(host_, AddTextStream(_,_))
+ EXPECT_CALL(host_, AddTextStream(_, _))
.WillOnce(DoAll(SaveArg<0>(&text_stream),
SaveArg<1>(&text_config)));
+ int stream_flags = HAS_TEXT;
+ if (has_audio)
+ stream_flags |= HAS_AUDIO;
+
+ if (has_video)
+ stream_flags |= HAS_VIDEO;
+
ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
- has_audio, has_video, true, is_audio_encrypted, is_video_encrypted));
+ stream_flags, is_audio_encrypted, is_video_encrypted));
ASSERT_TRUE(text_stream);
EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
EXPECT_EQ(kTextSubtitles, text_config.kind());
+ EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(text_stream)
+ ->supports_partial_append_window_trimming());
DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
if (has_audio) {
@@ -1021,6 +1158,8 @@ TEST_F(ChunkDemuxerTest, InitText) {
EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
EXPECT_EQ(is_audio_encrypted,
audio_stream->audio_decoder_config().is_encrypted());
+ EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
+ ->supports_partial_append_window_trimming());
} else {
EXPECT_FALSE(audio_stream);
}
@@ -1030,6 +1169,8 @@ TEST_F(ChunkDemuxerTest, InitText) {
EXPECT_TRUE(video_stream);
EXPECT_EQ(is_video_encrypted,
video_stream->video_decoder_config().is_encrypted());
+ EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
+ ->supports_partial_append_window_trimming());
} else {
EXPECT_FALSE(video_stream);
}
@@ -1039,39 +1180,153 @@ TEST_F(ChunkDemuxerTest, InitText) {
}
}
+TEST_F(ChunkDemuxerTest, SingleTextTrackIdChange) {
+ // Test with 1 video stream, 1 audio, and 1 text stream. Send a second init
+ // segment in which the text track ID changes. Verify appended buffers before
+ // and after the second init segment map to the same underlying track buffers.
+ CreateNewDemuxer();
+ DemuxerStream* text_stream = NULL;
+ TextTrackConfig text_config;
+ EXPECT_CALL(host_, AddTextStream(_, _))
+ .WillOnce(DoAll(SaveArg<0>(&text_stream),
+ SaveArg<1>(&text_config)));
+ ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
+ HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
+ DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
+ DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
+ ASSERT_TRUE(audio_stream);
+ ASSERT_TRUE(video_stream);
+ ASSERT_TRUE(text_stream);
+
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
+ AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 30");
+ AppendSingleStreamCluster(kSourceId, kTextTrackNum, "10K");
+ CheckExpectedRanges(kSourceId, "{ [0,46) }");
+
+ scoped_ptr<uint8[]> info_tracks;
+ int info_tracks_size = 0;
+ CreateInitSegmentWithAlternateTextTrackNum(HAS_TEXT | HAS_AUDIO | HAS_VIDEO,
+ false, false,
+ &info_tracks, &info_tracks_size);
+ demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ &timestamp_offset_map_[kSourceId]);
+
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "46K 69K");
+ AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "60K");
+ AppendSingleStreamCluster(kSourceId, kAlternateTextTrackNum, "45K");
+
+ CheckExpectedRanges(kSourceId, "{ [0,92) }");
+ CheckExpectedBuffers(audio_stream, "0 23 46 69");
+ CheckExpectedBuffers(video_stream, "0 30 60");
+ CheckExpectedBuffers(text_stream, "10 45");
+
+ ShutdownDemuxer();
+}
+
+TEST_F(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
+ // Tests that non-keyframes following an init segment are allowed
+ // and dropped, as expected if the initialization segment received
+ // algorithm correctly sets the needs random access point flag to true for all
+ // track buffers. Note that the first initialization segment is insufficient
+ // to fully test this since needs random access point flag initializes to
+ // true.
+ CreateNewDemuxer();
+ DemuxerStream* text_stream = NULL;
+ EXPECT_CALL(host_, AddTextStream(_, _))
+ .WillOnce(SaveArg<0>(&text_stream));
+ ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
+ HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
+ DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
+ DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
+ ASSERT_TRUE(audio_stream && video_stream && text_stream);
+
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0 23K");
+ AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0 30K");
+ AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0 40K");
+ CheckExpectedRanges(kSourceId, "{ [30,46) }");
+
+ AppendInitSegment(HAS_TEXT | HAS_AUDIO | HAS_VIDEO);
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "46 69K");
+ AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "60 90K");
+ AppendSingleStreamCluster(kSourceId, kTextTrackNum, "80 90K");
+ CheckExpectedRanges(kSourceId, "{ [30,92) }");
+
+ CheckExpectedBuffers(audio_stream, "23 69");
+ CheckExpectedBuffers(video_stream, "30 90");
+
+ // WebM parser marks all text buffers as keyframes.
+ CheckExpectedBuffers(text_stream, "0 40 80 90");
+}
+
// Make sure that the demuxer reports an error if Shutdown()
// is called before all the initialization segments are appended.
-TEST_F(ChunkDemuxerTest, ShutdownBeforeAllInitSegmentsAppended) {
+TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppended) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(
kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
- EXPECT_EQ(AddId("audio", true, false), ChunkDemuxer::kOk);
- EXPECT_EQ(AddId("video", false, true), ChunkDemuxer::kOk);
+ EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
+ EXPECT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
- AppendInitSegmentWithSourceId("audio", true, false, false);
+ AppendInitSegmentWithSourceId("audio", HAS_AUDIO);
+
+ ShutdownDemuxer();
}
-TEST_F(ChunkDemuxerTest, ShutdownBeforeAllInitSegmentsAppendedText) {
+TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppendedText) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(
kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
- EXPECT_EQ(AddId("audio", true, false), ChunkDemuxer::kOk);
- EXPECT_EQ(AddId("video", false, true), ChunkDemuxer::kOk);
+ EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
+ EXPECT_EQ(AddId("video_and_text", HAS_VIDEO), ChunkDemuxer::kOk);
- EXPECT_CALL(host_, AddTextStream(_,_))
+ EXPECT_CALL(host_, AddTextStream(_, _))
.Times(Exactly(1));
- AppendInitSegmentWithSourceId("video", false, true, true);
+ AppendInitSegmentWithSourceId("video_and_text", HAS_VIDEO | HAS_TEXT);
+
+ ShutdownDemuxer();
+}
+
+// Verifies that all streams waiting for data receive an end of stream
+// buffer when Shutdown() is called.
+TEST_F(ChunkDemuxerTest, Shutdown_EndOfStreamWhileWaitingForData) {
+ DemuxerStream* text_stream = NULL;
+ EXPECT_CALL(host_, AddTextStream(_, _))
+ .WillOnce(SaveArg<0>(&text_stream));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
+
+ DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
+ DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
+
+ bool audio_read_done = false;
+ bool video_read_done = false;
+ bool text_read_done = false;
+ audio_stream->Read(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
+ video_stream->Read(base::Bind(&OnReadDone_EOSExpected, &video_read_done));
+ text_stream->Read(base::Bind(&OnReadDone_EOSExpected, &text_read_done));
+ message_loop_.RunUntilIdle();
+
+ EXPECT_FALSE(audio_read_done);
+ EXPECT_FALSE(video_read_done);
+ EXPECT_FALSE(text_read_done);
+
+ ShutdownDemuxer();
+
+ EXPECT_TRUE(audio_read_done);
+ EXPECT_TRUE(video_read_done);
+ EXPECT_TRUE(text_read_done);
}
// Test that Seek() completes successfully when the first cluster
// arrives.
TEST_F(ChunkDemuxerTest, AppendDataAfterSeek) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
InSequence s;
@@ -1093,7 +1348,7 @@ TEST_F(ChunkDemuxerTest, AppendDataAfterSeek) {
// Test that parsing errors are handled for clusters appended after init.
TEST_F(ChunkDemuxerTest, ErrorWhileParsingClusterAfterInit) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
@@ -1104,7 +1359,7 @@ TEST_F(ChunkDemuxerTest, ErrorWhileParsingClusterAfterInit) {
// is in the middle of cluster. This is to verify that the parser
// does not reset itself on a seek.
TEST_F(ChunkDemuxerTest, SeekWhileParsingCluster) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
InSequence s;
@@ -1120,10 +1375,6 @@ TEST_F(ChunkDemuxerTest, SeekWhileParsingCluster) {
ExpectRead(DemuxerStream::AUDIO, 0);
ExpectRead(DemuxerStream::VIDEO, 0);
ExpectRead(DemuxerStream::AUDIO, kAudioBlockDuration);
- // Note: We skip trying to read a video buffer here because computing
- // the duration for this block relies on successfully parsing the last block
- // in the cluster the cluster.
- ExpectRead(DemuxerStream::AUDIO, 2 * kAudioBlockDuration);
Seek(base::TimeDelta::FromSeconds(5));
@@ -1140,15 +1391,17 @@ TEST_F(ChunkDemuxerTest, SeekWhileParsingCluster) {
TEST_F(ChunkDemuxerTest, AppendDataBeforeInit) {
scoped_ptr<uint8[]> info_tracks;
int info_tracks_size = 0;
- CreateInitSegment(true, true, false,
+ CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
false, false, &info_tracks, &info_tracks_size);
-
- demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size);
+ demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ &timestamp_offset_map_[kSourceId]);
}
// Make sure Read() callbacks are dispatched with the proper data.
TEST_F(ChunkDemuxerTest, Read) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
@@ -1166,7 +1419,7 @@ TEST_F(ChunkDemuxerTest, Read) {
}
TEST_F(ChunkDemuxerTest, OutOfOrderClusters) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
AppendCluster(GenerateCluster(10, 4));
@@ -1176,11 +1429,14 @@ TEST_F(ChunkDemuxerTest, OutOfOrderClusters) {
// Verify that AppendData() can still accept more data.
scoped_ptr<Cluster> cluster_c(GenerateCluster(45, 2));
- demuxer_->AppendData(kSourceId, cluster_c->data(), cluster_c->size());
+ demuxer_->AppendData(kSourceId, cluster_c->data(), cluster_c->size(),
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ &timestamp_offset_map_[kSourceId]);
}
TEST_F(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
ClusterBuilder cb;
@@ -1198,11 +1454,14 @@ TEST_F(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
// Verify that AppendData() ignores data after the error.
scoped_ptr<Cluster> cluster_b(GenerateCluster(20, 2));
- demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size());
+ demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ &timestamp_offset_map_[kSourceId]);
}
TEST_F(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
ClusterBuilder cb;
@@ -1220,12 +1479,15 @@ TEST_F(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
// Verify that AppendData() ignores data after the error.
scoped_ptr<Cluster> cluster_b(GenerateCluster(6, 2));
- demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size());
+ demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ &timestamp_offset_map_[kSourceId]);
}
TEST_F(ChunkDemuxerTest, PerStreamMonotonicallyIncreasingTimestamps) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
ClusterBuilder cb;
@@ -1278,7 +1540,7 @@ TEST_F(ChunkDemuxerTest, EndOfStreamWithNoAppend) {
}
TEST_F(ChunkDemuxerTest, EndOfStreamWithNoMediaAppend) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
CheckExpectedRanges("{ }");
MarkEndOfStream(PIPELINE_OK);
@@ -1286,7 +1548,7 @@ TEST_F(ChunkDemuxerTest, EndOfStreamWithNoMediaAppend) {
}
TEST_F(ChunkDemuxerTest, DecodeErrorEndOfStream) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
CheckExpectedRanges(kDefaultFirstClusterRange);
@@ -1297,7 +1559,7 @@ TEST_F(ChunkDemuxerTest, DecodeErrorEndOfStream) {
}
TEST_F(ChunkDemuxerTest, NetworkErrorEndOfStream) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
CheckExpectedRanges(kDefaultFirstClusterRange);
@@ -1357,7 +1619,7 @@ class EndOfStreamHelper {
// Make sure that all pending reads that we don't have media data for get an
// "end of stream" buffer when MarkEndOfStream() is called.
TEST_F(ChunkDemuxerTest, EndOfStreamWithPendingReads) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(GenerateCluster(0, 2));
@@ -1392,7 +1654,7 @@ TEST_F(ChunkDemuxerTest, EndOfStreamWithPendingReads) {
// Make sure that all Read() calls after we get an MarkEndOfStream()
// call return an "end of stream" buffer.
TEST_F(ChunkDemuxerTest, ReadsAfterEndOfStream) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(GenerateCluster(0, 2));
@@ -1431,7 +1693,7 @@ TEST_F(ChunkDemuxerTest, ReadsAfterEndOfStream) {
}
TEST_F(ChunkDemuxerTest, EndOfStreamDuringCanceledSeek) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(0, 10);
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(138)));
@@ -1460,6 +1722,45 @@ TEST_F(ChunkDemuxerTest, EndOfStreamDuringCanceledSeek) {
ASSERT_EQ(status, DemuxerStream::kOk);
}
+// Verify buffered range change behavior for audio/video/text tracks.
+TEST_F(ChunkDemuxerTest, EndOfStreamRangeChanges) {
+ DemuxerStream* text_stream = NULL;
+
+ EXPECT_CALL(host_, AddTextStream(_, _))
+ .WillOnce(SaveArg<0>(&text_stream));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
+
+ AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
+
+ // Check expected ranges and verify that an empty text track does not
+ // affect the expected ranges.
+ CheckExpectedRanges(kSourceId, "{ [0,46) }");
+
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
+ MarkEndOfStream(PIPELINE_OK);
+
+ // Check expected ranges and verify that an empty text track does not
+ // affect the expected ranges.
+ CheckExpectedRanges(kSourceId, "{ [0,66) }");
+
+ // Unmark end of stream state and verify that the ranges return to
+ // their pre-"end of stream" values.
+ demuxer_->UnmarkEndOfStream();
+ CheckExpectedRanges(kSourceId, "{ [0,46) }");
+
+ // Add text track data and verify that the buffered ranges don't change
+ // since the intersection of all the tracks doesn't change.
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(200)));
+ AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K");
+ CheckExpectedRanges(kSourceId, "{ [0,46) }");
+
+ // Mark end of stream and verify that text track data is reflected in
+ // the new range.
+ MarkEndOfStream(PIPELINE_OK);
+ CheckExpectedRanges(kSourceId, "{ [0,200) }");
+}
+
// Make sure AppendData() will accept elements that span multiple calls.
TEST_F(ChunkDemuxerTest, AppendingInPieces) {
EXPECT_CALL(*this, DemuxerOpened());
@@ -1470,7 +1771,7 @@ TEST_F(ChunkDemuxerTest, AppendingInPieces) {
scoped_ptr<uint8[]> info_tracks;
int info_tracks_size = 0;
- CreateInitSegment(true, true, false,
+ CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
false, false, &info_tracks, &info_tracks_size);
scoped_ptr<Cluster> cluster_a(kDefaultFirstCluster());
@@ -1503,6 +1804,11 @@ TEST_F(ChunkDemuxerTest, WebMFile_AudioAndVideo) {
{kSkip, kSkip},
};
+ // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
+ // ParseWebMFile() call's expected duration, below, once the file is fixed to
+ // have the correct duration in the init segment. See http://crbug.com/354284.
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
+
ASSERT_TRUE(ParseWebMFile("bear-320x240.webm", buffer_timestamps,
base::TimeDelta::FromMilliseconds(2744)));
}
@@ -1531,9 +1837,14 @@ TEST_F(ChunkDemuxerTest, WebMFile_AudioOnly) {
{kSkip, kSkip},
};
+ // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
+ // ParseWebMFile() call's expected duration, below, once the file is fixed to
+ // have the correct duration in the init segment. See http://crbug.com/354284.
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
+
ASSERT_TRUE(ParseWebMFile("bear-320x240-audio-only.webm", buffer_timestamps,
base::TimeDelta::FromMilliseconds(2744),
- true, false));
+ HAS_AUDIO));
}
TEST_F(ChunkDemuxerTest, WebMFile_VideoOnly) {
@@ -1546,9 +1857,14 @@ TEST_F(ChunkDemuxerTest, WebMFile_VideoOnly) {
{kSkip, kSkip},
};
+ // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
+ // ParseWebMFile() call's expected duration, below, once the file is fixed to
+ // have the correct duration in the init segment. See http://crbug.com/354284.
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2736)));
+
ASSERT_TRUE(ParseWebMFile("bear-320x240-video-only.webm", buffer_timestamps,
base::TimeDelta::FromMilliseconds(2703),
- false, true));
+ HAS_VIDEO));
}
TEST_F(ChunkDemuxerTest, WebMFile_AltRefFrames) {
@@ -1567,7 +1883,7 @@ TEST_F(ChunkDemuxerTest, WebMFile_AltRefFrames) {
// Verify that we output buffers before the entire cluster has been parsed.
TEST_F(ChunkDemuxerTest, IncrementalClusterParsing) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendEmptyCluster(0);
scoped_ptr<Cluster> cluster(GenerateCluster(0, 6));
@@ -1585,27 +1901,17 @@ TEST_F(ChunkDemuxerTest, IncrementalClusterParsing) {
EXPECT_FALSE(audio_read_done);
EXPECT_FALSE(video_read_done);
- // Append data one byte at a time until the audio read completes.
+ // Append data one byte at a time until one or both reads complete.
int i = 0;
- for (; i < cluster->size() && !audio_read_done; ++i) {
+ for (; i < cluster->size() && !(audio_read_done || video_read_done); ++i) {
AppendData(cluster->data() + i, 1);
message_loop_.RunUntilIdle();
}
- EXPECT_TRUE(audio_read_done);
- EXPECT_FALSE(video_read_done);
+ EXPECT_TRUE(audio_read_done || video_read_done);
EXPECT_GT(i, 0);
EXPECT_LT(i, cluster->size());
- // Append data one byte at a time until the video read completes.
- for (; i < cluster->size() && !video_read_done; ++i) {
- AppendData(cluster->data() + i, 1);
- message_loop_.RunUntilIdle();
- }
-
- EXPECT_TRUE(video_read_done);
- EXPECT_LT(i, cluster->size());
-
audio_read_done = false;
video_read_done = false;
ReadAudio(base::Bind(&OnReadDone,
@@ -1638,7 +1944,10 @@ TEST_F(ChunkDemuxerTest, ParseErrorDuringInit) {
ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
uint8 tmp = 0;
- demuxer_->AppendData(kSourceId, &tmp, 1);
+ demuxer_->AppendData(kSourceId, &tmp, 1,
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ &timestamp_offset_map_[kSourceId]);
}
TEST_F(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
@@ -1652,7 +1961,7 @@ TEST_F(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
ASSERT_EQ(demuxer_->AddId(kSourceId, "audio/webm", codecs),
ChunkDemuxer::kOk);
- AppendInitSegment(true, true);
+ AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
}
TEST_F(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
@@ -1666,16 +1975,16 @@ TEST_F(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
ASSERT_EQ(demuxer_->AddId(kSourceId, "video/webm", codecs),
ChunkDemuxer::kOk);
- AppendInitSegment(true, true);
+ AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
}
TEST_F(ChunkDemuxerTest, MultipleHeaders) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
// Append another identical initialization segment.
- AppendInitSegment(true, true);
+ AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
AppendCluster(kDefaultSecondCluster());
@@ -1703,7 +2012,7 @@ TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideoText) {
std::string audio_id = "audio1";
std::string video_id = "video1";
- EXPECT_CALL(host_, AddTextStream(_,_))
+ EXPECT_CALL(host_, AddTextStream(_, _))
.Times(Exactly(2));
ASSERT_TRUE(InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, true));
@@ -1724,15 +2033,15 @@ TEST_F(ChunkDemuxerTest, AddIdFailures) {
std::string audio_id = "audio1";
std::string video_id = "video1";
- ASSERT_EQ(AddId(audio_id, true, false), ChunkDemuxer::kOk);
+ ASSERT_EQ(AddId(audio_id, HAS_AUDIO), ChunkDemuxer::kOk);
// Adding an id with audio/video should fail because we already added audio.
ASSERT_EQ(AddId(), ChunkDemuxer::kReachedIdLimit);
- AppendInitSegmentWithSourceId(audio_id, true, false, false);
+ AppendInitSegmentWithSourceId(audio_id, HAS_AUDIO);
// Adding an id after append should fail.
- ASSERT_EQ(AddId(video_id, false, true), ChunkDemuxer::kReachedIdLimit);
+ ASSERT_EQ(AddId(video_id, HAS_VIDEO), ChunkDemuxer::kReachedIdLimit);
}
// Test that Read() calls after a RemoveId() return "end of stream" buffers.
@@ -1767,15 +2076,15 @@ TEST_F(ChunkDemuxerTest, RemoveId) {
// quota for new IDs in the future.
TEST_F(ChunkDemuxerTest, RemoveAndAddId) {
std::string audio_id_1 = "audio1";
- ASSERT_TRUE(AddId(audio_id_1, true, false) == ChunkDemuxer::kOk);
+ ASSERT_TRUE(AddId(audio_id_1, HAS_AUDIO) == ChunkDemuxer::kOk);
demuxer_->RemoveId(audio_id_1);
std::string audio_id_2 = "audio2";
- ASSERT_TRUE(AddId(audio_id_2, true, false) == ChunkDemuxer::kOk);
+ ASSERT_TRUE(AddId(audio_id_2, HAS_AUDIO) == ChunkDemuxer::kOk);
}
TEST_F(ChunkDemuxerTest, SeekCanceled) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
// Append cluster at the beginning of the stream.
AppendCluster(GenerateCluster(0, 4));
@@ -1805,7 +2114,7 @@ TEST_F(ChunkDemuxerTest, SeekCanceled) {
}
TEST_F(ChunkDemuxerTest, SeekCanceledWhileWaitingForSeek) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
// Append cluster at the beginning of the stream.
AppendCluster(GenerateCluster(0, 4));
@@ -1891,7 +2200,7 @@ TEST_F(ChunkDemuxerTest, SeekAudioAndVideoSources) {
// This scenario might be useful if seeking past the end of stream
// of either audio or video (or both).
TEST_F(ChunkDemuxerTest, EndOfStreamAfterPastEosSeek) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
@@ -1920,7 +2229,7 @@ TEST_F(ChunkDemuxerTest, EndOfStreamAfterPastEosSeek) {
// Test that EndOfStream is ignored if coming during a pending seek
// whose seek time is before some existing ranges.
TEST_F(ChunkDemuxerTest, EndOfStreamDuringPendingSeek) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
@@ -1960,8 +2269,8 @@ TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioIdOnly) {
demuxer_->Initialize(
&host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
- ASSERT_EQ(AddId(kSourceId, true, false), ChunkDemuxer::kOk);
- AppendInitSegment(true, false);
+ ASSERT_EQ(AddId(kSourceId, HAS_AUDIO), ChunkDemuxer::kOk);
+ AppendInitSegment(HAS_AUDIO);
// Test a simple cluster.
AppendCluster(
@@ -1982,8 +2291,8 @@ TEST_F(ChunkDemuxerTest, GetBufferedRanges_VideoIdOnly) {
demuxer_->Initialize(
&host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
- ASSERT_EQ(AddId(kSourceId, false, true), ChunkDemuxer::kOk);
- AppendInitSegment(false, true);
+ ASSERT_EQ(AddId(kSourceId, HAS_VIDEO), ChunkDemuxer::kOk);
+ AppendInitSegment(HAS_VIDEO);
// Test a simple cluster.
AppendCluster(
@@ -1999,7 +2308,7 @@ TEST_F(ChunkDemuxerTest, GetBufferedRanges_VideoIdOnly) {
}
TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideo) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
// Audio: 0 -> 23
// Video: 0 -> 33
@@ -2055,25 +2364,93 @@ TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideo) {
CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
}
+TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideoText) {
+ EXPECT_CALL(host_, AddTextStream(_, _));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
+
+ // Append audio & video data
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23");
+ AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
+
+ // Verify that a text track with no cues does not result in an empty buffered
+ // range.
+ CheckExpectedRanges("{ [0,46) }");
+
+ // Add some text cues.
+ AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K");
+
+ // Verify that the new cues did not affect the buffered ranges.
+ CheckExpectedRanges("{ [0,46) }");
+
+ // Remove the buffered range.
+ demuxer_->Remove(kSourceId, base::TimeDelta(),
+ base::TimeDelta::FromMilliseconds(46));
+ CheckExpectedRanges("{ }");
+}
+
// Once MarkEndOfStream() is called, GetBufferedRanges should not cut off any
// over-hanging tails at the end of the ranges as this is likely due to block
// duration differences.
TEST_F(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
+
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
+ AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
+
+ CheckExpectedRanges("{ [0,46) }");
+
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
+ MarkEndOfStream(PIPELINE_OK);
+
+ // Verify that the range extends to the end of the video data.
+ CheckExpectedRanges("{ [0,66) }");
+
+ // Verify that the range reverts to the intersection when end of stream
+ // has been cancelled.
+ demuxer_->UnmarkEndOfStream();
+ CheckExpectedRanges("{ [0,46) }");
+
+ // Append and remove data so that the 2 streams' end ranges do not overlap.
+
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(246)));
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(398)));
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "200K 223K");
+ AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
+ "200K 233 266 299 332K 365");
+
+ // At this point, the per-stream ranges are as follows:
+ // Audio: [0,46) [200,246)
+ // Video: [0,66) [200,398)
+ CheckExpectedRanges("{ [0,46) [200,246) }");
+
+ demuxer_->Remove(kSourceId, base::TimeDelta::FromMilliseconds(200),
+ base::TimeDelta::FromMilliseconds(300));
- AppendCluster(GenerateSingleStreamCluster(0, 90, kAudioTrackNum, 90));
- AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 100));
+ // At this point, the per-stream ranges are as follows:
+ // Audio: [0,46)
+ // Video: [0,66) [332,398)
+ CheckExpectedRanges("{ [0,46) }");
- CheckExpectedRanges("{ [0,90) }");
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "200K 223K");
+ AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "200K 233");
+
+ // At this point, the per-stream ranges are as follows:
+ // Audio: [0,46) [200,246)
+ // Video: [0,66) [200,266) [332,398)
+ // NOTE: The last range on each stream do not overlap in time.
+ CheckExpectedRanges("{ [0,46) [200,246) }");
- EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(100)));
MarkEndOfStream(PIPELINE_OK);
- CheckExpectedRanges("{ [0,100) }");
+ // NOTE: The last range on each stream gets extended to the highest
+ // end timestamp according to the spec. The last audio range gets extended
+ // from [200,246) to [200,398) which is why the intersection results in the
+ // middle range getting larger AND the new range appearing.
+ CheckExpectedRanges("{ [0,46) [200,266) [332,398) }");
}
TEST_F(ChunkDemuxerTest, DifferentStreamTimecodes) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
// Create a cluster where the video timecode begins 25ms after the audio.
AppendCluster(GenerateCluster(0, 25, 8));
@@ -2130,7 +2507,7 @@ TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesOutOfRange) {
}
TEST_F(ChunkDemuxerTest, ClusterWithNoBuffers) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
// Generate and append an empty cluster beginning at 0.
AppendEmptyCluster(0);
@@ -2184,7 +2561,7 @@ TEST_F(ChunkDemuxerTest, CodecIDsThatAreNotRFC6381Compliant) {
}
TEST_F(ChunkDemuxerTest, EndOfStreamStillSetAfterSeek) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
EXPECT_CALL(host_, SetDuration(_))
.Times(AnyNumber());
@@ -2223,8 +2600,8 @@ TEST_F(ChunkDemuxerTest, EndOfStreamStillSetAfterSeek) {
TEST_F(ChunkDemuxerTest, GetBufferedRangesBeforeInitSegment) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(&host_, CreateInitDoneCB(PIPELINE_OK), true);
- ASSERT_EQ(AddId("audio", true, false), ChunkDemuxer::kOk);
- ASSERT_EQ(AddId("video", false, true), ChunkDemuxer::kOk);
+ ASSERT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
+ ASSERT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
CheckExpectedRanges("audio", "{ }");
CheckExpectedRanges("video", "{ }");
@@ -2235,7 +2612,7 @@ TEST_F(ChunkDemuxerTest, GetBufferedRangesBeforeInitSegment) {
TEST_F(ChunkDemuxerTest, EndOfStreamDuringSeek) {
InSequence s;
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
@@ -2322,8 +2699,9 @@ TEST_F(ChunkDemuxerTest, ConfigChange_Audio) {
ExpectRead(DemuxerStream::AUDIO, 0);
+ // The first config change seen is from a splice frame representing an overlap
+ // of buffer from config 1 by buffers from config 2.
ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
-
ASSERT_EQ(status, DemuxerStream::kConfigChanged);
EXPECT_EQ(last_timestamp.InMilliseconds(), 524);
@@ -2333,22 +2711,18 @@ TEST_F(ChunkDemuxerTest, ConfigChange_Audio) {
EXPECT_EQ(audio_config_2.samples_per_second(), 44100);
EXPECT_EQ(audio_config_2.extra_data_size(), 3935u);
- ExpectRead(DemuxerStream::AUDIO, 527);
-
- // Read until the next config change.
+ // The next config change is from a splice frame representing an overlap of
+ // buffers from config 2 by buffers from config 1.
ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
ASSERT_EQ(status, DemuxerStream::kConfigChanged);
- EXPECT_EQ(last_timestamp.InMilliseconds(), 759);
-
- // Get the new config and verify that it matches the first one.
+ EXPECT_EQ(last_timestamp.InMilliseconds(), 782);
ASSERT_TRUE(audio_config_1.Matches(audio->audio_decoder_config()));
- ExpectRead(DemuxerStream::AUDIO, 779);
-
// Read until the end of the stream just to make sure there aren't any other
// config changes.
ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
ASSERT_EQ(status, DemuxerStream::kOk);
+ EXPECT_EQ(last_timestamp.InMilliseconds(), 2744);
}
TEST_F(ChunkDemuxerTest, ConfigChange_Seek) {
@@ -2399,10 +2773,9 @@ TEST_F(ChunkDemuxerTest, ConfigChange_Seek) {
}
TEST_F(ChunkDemuxerTest, TimestampPositiveOffset) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
- ASSERT_TRUE(demuxer_->SetTimestampOffset(
- kSourceId, base::TimeDelta::FromSeconds(30)));
+ ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(30)));
AppendCluster(GenerateCluster(0, 2));
Seek(base::TimeDelta::FromMilliseconds(30000));
@@ -2411,10 +2784,9 @@ TEST_F(ChunkDemuxerTest, TimestampPositiveOffset) {
}
TEST_F(ChunkDemuxerTest, TimestampNegativeOffset) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
- ASSERT_TRUE(demuxer_->SetTimestampOffset(
- kSourceId, base::TimeDelta::FromSeconds(-1)));
+ ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(-1)));
AppendCluster(GenerateCluster(1000, 2));
GenerateExpectedReads(0, 2);
@@ -2425,9 +2797,9 @@ TEST_F(ChunkDemuxerTest, TimestampOffsetSeparateStreams) {
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
- ASSERT_TRUE(demuxer_->SetTimestampOffset(
+ ASSERT_TRUE(SetTimestampOffset(
audio_id, base::TimeDelta::FromMilliseconds(-2500)));
- ASSERT_TRUE(demuxer_->SetTimestampOffset(
+ ASSERT_TRUE(SetTimestampOffset(
video_id, base::TimeDelta::FromMilliseconds(-2500)));
AppendCluster(audio_id, GenerateSingleStreamCluster(2500,
2500 + kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
@@ -2438,9 +2810,9 @@ TEST_F(ChunkDemuxerTest, TimestampOffsetSeparateStreams) {
Seek(base::TimeDelta::FromMilliseconds(27300));
- ASSERT_TRUE(demuxer_->SetTimestampOffset(
+ ASSERT_TRUE(SetTimestampOffset(
audio_id, base::TimeDelta::FromMilliseconds(27300)));
- ASSERT_TRUE(demuxer_->SetTimestampOffset(
+ ASSERT_TRUE(SetTimestampOffset(
video_id, base::TimeDelta::FromMilliseconds(27300)));
AppendCluster(audio_id, GenerateSingleStreamCluster(
0, kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
@@ -2450,27 +2822,114 @@ TEST_F(ChunkDemuxerTest, TimestampOffsetSeparateStreams) {
GenerateAudioStreamExpectedReads(27300, 4);
}
-TEST_F(ChunkDemuxerTest, TimestampOffsetMidParse) {
- ASSERT_TRUE(InitDemuxer(true, true));
+TEST_F(ChunkDemuxerTest, IsParsingMediaSegmentMidMediaSegment) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
scoped_ptr<Cluster> cluster = GenerateCluster(0, 2);
// Append only part of the cluster data.
AppendData(cluster->data(), cluster->size() - 13);
- // Setting a timestamp should fail because we're in the middle of a cluster.
- ASSERT_FALSE(demuxer_->SetTimestampOffset(
- kSourceId, base::TimeDelta::FromSeconds(25)));
+ // Confirm we're in the middle of parsing a media segment.
+ ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
+
+ demuxer_->Abort(kSourceId,
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ &timestamp_offset_map_[kSourceId]);
+
+ // After Abort(), parsing should no longer be in the middle of a media
+ // segment.
+ ASSERT_FALSE(demuxer_->IsParsingMediaSegment(kSourceId));
+}
+
+#if defined(USE_PROPRIETARY_CODECS)
+#if defined(ENABLE_MPEG2TS_STREAM_PARSER)
+TEST_F(ChunkDemuxerTest, EmitBuffersDuringAbort) {
+ EXPECT_CALL(*this, DemuxerOpened());
+ demuxer_->Initialize(
+ &host_, CreateInitDoneCB(kInfiniteDuration(), PIPELINE_OK), true);
+ EXPECT_EQ(ChunkDemuxer::kOk, AddIdForMp2tSource(kSourceId));
+
+ // For info:
+ // DTS/PTS derived using dvbsnoop -s ts -if bear-1280x720.ts -tssubdecode
+ // Video: first PES:
+ // PTS: 126912 (0x0001efc0) [= 90 kHz-Timestamp: 0:00:01.4101]
+ // DTS: 123909 (0x0001e405) [= 90 kHz-Timestamp: 0:00:01.3767]
+ // Audio: first PES:
+ // PTS: 126000 (0x0001ec30) [= 90 kHz-Timestamp: 0:00:01.4000]
+ // DTS: 123910 (0x0001e406) [= 90 kHz-Timestamp: 0:00:01.3767]
+ // Video: last PES:
+ // PTS: 370155 (0x0005a5eb) [= 90 kHz-Timestamp: 0:00:04.1128]
+ // DTS: 367152 (0x00059a30) [= 90 kHz-Timestamp: 0:00:04.0794]
+ // Audio: last PES:
+ // PTS: 353788 (0x000565fc) [= 90 kHz-Timestamp: 0:00:03.9309]
+
+ scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
+ AppendData(kSourceId, buffer->data(), buffer->data_size());
+
+ // Confirm we're in the middle of parsing a media segment.
+ ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
+
+ // Abort on the Mpeg2 TS parser triggers the emission of the last video
+ // buffer which is pending in the stream parser.
+ Ranges<base::TimeDelta> range_before_abort =
+ demuxer_->GetBufferedRanges(kSourceId);
+ demuxer_->Abort(kSourceId,
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ &timestamp_offset_map_[kSourceId]);
+ Ranges<base::TimeDelta> range_after_abort =
+ demuxer_->GetBufferedRanges(kSourceId);
+
+ ASSERT_EQ(range_before_abort.size(), 1u);
+ ASSERT_EQ(range_after_abort.size(), 1u);
+ EXPECT_EQ(range_after_abort.start(0), range_before_abort.start(0));
+ EXPECT_GT(range_after_abort.end(0), range_before_abort.end(0));
+}
+#endif
+#endif
+
+TEST_F(ChunkDemuxerTest, WebMIsParsingMediaSegmentDetection) {
+ const uint8 kBuffer[] = {
+ 0x1F, 0x43, 0xB6, 0x75, 0x83, // CLUSTER (size = 3)
+ 0xE7, 0x81, 0x01, // Cluster TIMECODE (value = 1)
+
+ 0x1F, 0x43, 0xB6, 0x75, 0xFF, // CLUSTER (size = unknown; really 3 due to:)
+ 0xE7, 0x81, 0x02, // Cluster TIMECODE (value = 2)
+ /* e.g. put some blocks here... */
+ 0x1A, 0x45, 0xDF, 0xA3, 0x8A, // EBMLHEADER (size = 10, not fully appended)
+ };
+
+ // This array indicates expected return value of IsParsingMediaSegment()
+ // following each incrementally appended byte in |kBuffer|.
+ const bool kExpectedReturnValues[] = {
+ false, false, false, false, true,
+ true, true, false,
+
+ false, false, false, false, true,
+ true, true, true,
+
+ true, true, true, true, false,
+ };
+
+ COMPILE_ASSERT(arraysize(kBuffer) == arraysize(kExpectedReturnValues),
+ test_arrays_out_of_sync);
+ COMPILE_ASSERT(arraysize(kBuffer) == sizeof(kBuffer), not_one_byte_per_index);
+
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
- demuxer_->Abort(kSourceId);
- // After Abort(), setting a timestamp should succeed since we're no longer
- // in the middle of a cluster
- ASSERT_TRUE(demuxer_->SetTimestampOffset(
- kSourceId, base::TimeDelta::FromSeconds(25)));
+ for (size_t i = 0; i < sizeof(kBuffer); i++) {
+ DVLOG(3) << "Appending and testing index " << i;
+ AppendData(kBuffer + i, 1);
+ bool expected_return_value = kExpectedReturnValues[i];
+ EXPECT_EQ(expected_return_value,
+ demuxer_->IsParsingMediaSegment(kSourceId));
+ }
}
TEST_F(ChunkDemuxerTest, DurationChange) {
- ASSERT_TRUE(InitDemuxer(true, true));
- static const int kStreamDuration = kDefaultDuration().InMilliseconds();
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
+ const int kStreamDuration = kDefaultDuration().InMilliseconds();
// Add data leading up to the currently set duration.
AppendCluster(GenerateCluster(kStreamDuration - kAudioBlockDuration,
@@ -2479,38 +2938,41 @@ TEST_F(ChunkDemuxerTest, DurationChange) {
CheckExpectedRanges(kSourceId, "{ [201191,201224) }");
- // Add data at the currently set duration. The duration should not increase.
+ // Add data beginning at the currently set duration and expect a new duration
+ // to be signaled. Note that the last video block will have a higher end
+ // timestamp than the last audio block.
+ const int kNewStreamDurationVideo = kStreamDuration + kVideoBlockDuration;
+ EXPECT_CALL(host_, SetDuration(
+ base::TimeDelta::FromMilliseconds(kNewStreamDurationVideo)));
AppendCluster(GenerateCluster(kDefaultDuration().InMilliseconds(), 2));
- // Range should not be affected.
- CheckExpectedRanges(kSourceId, "{ [201191,201224) }");
+ CheckExpectedRanges(kSourceId, "{ [201191,201247) }");
- // Now add data past the duration and expect a new duration to be signalled.
- static const int kNewStreamDuration =
- kStreamDuration + kAudioBlockDuration * 2;
+ // Add more data to the end of each media type. Note that the last audio block
+ // will have a higher end timestamp than the last video block.
+ const int kFinalStreamDuration = kStreamDuration + kAudioBlockDuration * 3;
EXPECT_CALL(host_, SetDuration(
- base::TimeDelta::FromMilliseconds(kNewStreamDuration)));
+ base::TimeDelta::FromMilliseconds(kFinalStreamDuration)));
AppendCluster(GenerateCluster(kStreamDuration + kAudioBlockDuration,
kStreamDuration + kVideoBlockDuration,
- 2));
+ 3));
- // See that the range has increased appropriately.
- CheckExpectedRanges(kSourceId, "{ [201191,201270) }");
+ // See that the range has increased appropriately (but not to the full
+ // duration of 201293, since there is not enough video appended for that).
+ CheckExpectedRanges(kSourceId, "{ [201191,201290) }");
}
TEST_F(ChunkDemuxerTest, DurationChangeTimestampOffset) {
- ASSERT_TRUE(InitDemuxer(true, true));
-
- ASSERT_TRUE(demuxer_->SetTimestampOffset(kSourceId, kDefaultDuration()));
-
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
+ ASSERT_TRUE(SetTimestampOffset(kSourceId, kDefaultDuration()));
EXPECT_CALL(host_, SetDuration(
kDefaultDuration() + base::TimeDelta::FromMilliseconds(
- kAudioBlockDuration * 2)));
+ kVideoBlockDuration * 2)));
AppendCluster(GenerateCluster(0, 4));
}
TEST_F(ChunkDemuxerTest, EndOfStreamTruncateDuration) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
@@ -2521,12 +2983,12 @@ TEST_F(ChunkDemuxerTest, EndOfStreamTruncateDuration) {
TEST_F(ChunkDemuxerTest, ZeroLengthAppend) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendData(NULL, 0);
}
TEST_F(ChunkDemuxerTest, AppendAfterEndOfStream) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
EXPECT_CALL(host_, SetDuration(_))
.Times(AnyNumber());
@@ -2543,43 +3005,22 @@ TEST_F(ChunkDemuxerTest, AppendAfterEndOfStream) {
// Test receiving a Shutdown() call before we get an Initialize()
// call. This can happen if video element gets destroyed before
// the pipeline has a chance to initialize the demuxer.
-TEST_F(ChunkDemuxerTest, ShutdownBeforeInitialize) {
+TEST_F(ChunkDemuxerTest, Shutdown_BeforeInitialize) {
demuxer_->Shutdown();
demuxer_->Initialize(
&host_, CreateInitDoneCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
message_loop_.RunUntilIdle();
}
-TEST_F(ChunkDemuxerTest, ReadAfterAudioDisabled) {
- ASSERT_TRUE(InitDemuxer(true, true));
- AppendCluster(kDefaultFirstCluster());
-
- DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
- ASSERT_TRUE(stream);
-
- // The stream should no longer be present.
- demuxer_->OnAudioRendererDisabled();
- ASSERT_FALSE(demuxer_->GetStream(DemuxerStream::AUDIO));
-
- // Normally this would return an audio buffer at timestamp zero, but
- // all reads should return EOS buffers when disabled.
- bool audio_read_done = false;
- stream->Read(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
- message_loop_.RunUntilIdle();
-
- EXPECT_TRUE(audio_read_done);
-}
-
-// Verifies that signalling end of stream while stalled at a gap
+// Verifies that signaling end of stream while stalled at a gap
// boundary does not trigger end of stream buffers to be returned.
TEST_F(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(0, 10);
AppendCluster(300, 10);
CheckExpectedRanges(kSourceId, "{ [0,132) [300,432) }");
-
GenerateExpectedReads(0, 10);
bool audio_read_done = false;
@@ -2604,18 +3045,18 @@ TEST_F(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
demuxer_->UnmarkEndOfStream();
- AppendCluster(138, 24);
+ AppendCluster(138, 22);
message_loop_.RunUntilIdle();
- CheckExpectedRanges(kSourceId, "{ [0,438) }");
+ CheckExpectedRanges(kSourceId, "{ [0,435) }");
// Verify that the reads have completed.
EXPECT_TRUE(audio_read_done);
EXPECT_TRUE(video_read_done);
// Read the rest of the buffers.
- GenerateExpectedReads(161, 171, 22);
+ GenerateExpectedReads(161, 171, 20);
// Verify that reads block because the append cleared the end of stream state.
audio_read_done = false;
@@ -2629,6 +3070,7 @@ TEST_F(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
EXPECT_FALSE(audio_read_done);
EXPECT_FALSE(video_read_done);
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(437)));
MarkEndOfStream(PIPELINE_OK);
EXPECT_TRUE(audio_read_done);
@@ -2636,7 +3078,7 @@ TEST_F(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
}
TEST_F(ChunkDemuxerTest, CanceledSeekDuringInitialPreroll) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
// Cancel preroll.
base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(200);
@@ -2650,7 +3092,7 @@ TEST_F(ChunkDemuxerTest, CanceledSeekDuringInitialPreroll) {
}
TEST_F(ChunkDemuxerTest, GCDuringSeek) {
- ASSERT_TRUE(InitDemuxer(true, false));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
demuxer_->SetMemoryLimitsForTesting(5 * kBlockSize);
@@ -2692,54 +3134,351 @@ TEST_F(ChunkDemuxerTest, GCDuringSeek) {
CheckExpectedRanges(kSourceId, "{ [500,592) [792,815) }");
}
-TEST_F(ChunkDemuxerTest, RemoveBeforeInitSegment) {
- EXPECT_CALL(*this, DemuxerOpened());
- demuxer_->Initialize(
- &host_, CreateInitDoneCB(kNoTimestamp(), PIPELINE_OK), true);
+TEST_F(ChunkDemuxerTest, AppendWindow_Video) {
+ ASSERT_TRUE(InitDemuxer(HAS_VIDEO));
+ DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
+
+ // Set the append window to [50,280).
+ append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
+ append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
- EXPECT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, true, true));
+ // Append a cluster that starts before and ends after the append window.
+ AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
+ "0K 30 60 90 120K 150 180 210 240K 270 300 330K");
- demuxer_->Remove(kSourceId, base::TimeDelta::FromMilliseconds(0),
- base::TimeDelta::FromMilliseconds(1));
+ // Verify that GOPs that start outside the window are not included
+ // in the buffer. Also verify that buffers that start inside the
+ // window and extend beyond the end of the window are not included.
+ CheckExpectedRanges(kSourceId, "{ [120,270) }");
+ CheckExpectedBuffers(stream, "120 150 180 210 240");
+
+ // Extend the append window to [50,650).
+ append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
+
+ // Append more data and verify that adding buffers start at the next
+ // keyframe.
+ AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
+ "360 390 420K 450 480 510 540K 570 600 630K");
+ CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
}
-TEST_F(ChunkDemuxerTest, AppendWindow) {
- ASSERT_TRUE(InitDemuxer(false, true));
- DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
+TEST_F(ChunkDemuxerTest, AppendWindow_Audio) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
+ DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
- // Set the append window to [20,280).
- demuxer_->SetAppendWindowStart(kSourceId,
- base::TimeDelta::FromMilliseconds(20));
- demuxer_->SetAppendWindowEnd(kSourceId,
- base::TimeDelta::FromMilliseconds(280));
+ // Set the append window to [50,280).
+ append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
+ append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
+
+ // Append a cluster that starts before and ends after the append window.
+ AppendSingleStreamCluster(
+ kSourceId, kAudioTrackNum,
+ "0K 30K 60K 90K 120K 150K 180K 210K 240K 270K 300K 330K");
+
+ // Verify that frames that end outside the window are not included
+ // in the buffer. Also verify that buffers that start inside the
+ // window and extend beyond the end of the window are not included.
+ //
+ // The first 50ms of the range should be truncated since it overlaps
+ // the start of the append window.
+ CheckExpectedRanges(kSourceId, "{ [50,270) }");
+
+ // The "50P" buffer is the "0" buffer marked for complete discard. The next
+ // "50" buffer is the "30" buffer marked with 20ms of start discard.
+ CheckExpectedBuffers(stream, "50P 50 60 90 120 150 180 210 240");
+
+ // Extend the append window to [50,650).
+ append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
+
+ // Append more data and verify that a new range is created.
+ AppendSingleStreamCluster(
+ kSourceId, kAudioTrackNum,
+ "360K 390K 420K 450K 480K 510K 540K 570K 600K 630K");
+ CheckExpectedRanges(kSourceId, "{ [50,270) [360,630) }");
+}
+
+TEST_F(ChunkDemuxerTest, AppendWindow_AudioOverlapStartAndEnd) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
+
+ // Set the append window to [10,20).
+ append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(10);
+ append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
// Append a cluster that starts before and ends after the append window.
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K");
+
+ // Verify that everything is dropped in this case. No partial append should
+ // be generated.
+ CheckExpectedRanges(kSourceId, "{ }");
+}
+
+TEST_F(ChunkDemuxerTest, AppendWindow_WebMFile_AudioOnly) {
+ EXPECT_CALL(*this, DemuxerOpened());
+ demuxer_->Initialize(
+ &host_,
+ CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
+ true);
+ ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
+
+ // Set the append window to [50,150).
+ append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
+ append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(150);
+
+ // Read a WebM file into memory and send the data to the demuxer. The chunk
+ // size has been chosen carefully to ensure the preroll buffer used by the
+ // partial append window trim must come from a previous Append() call.
+ scoped_refptr<DecoderBuffer> buffer =
+ ReadTestDataFile("bear-320x240-audio-only.webm");
+ AppendDataInPieces(buffer->data(), buffer->data_size(), 128);
+
+ DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
+ CheckExpectedBuffers(stream, "50P 50 62 86 109 122 125 128");
+}
+
+TEST_F(ChunkDemuxerTest, AppendWindow_AudioConfigUpdateRemovesPreroll) {
+ EXPECT_CALL(*this, DemuxerOpened());
+ demuxer_->Initialize(
+ &host_,
+ CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
+ true);
+ ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
+
+ // Set the append window such that the first file is completely before the
+ // append window.
+ // TODO(wolenetz/acolwell): Update this duration once the files are fixed to
+ // have the correct duration in their init segments, and the
+ // CreateInitDoneCB() call, above, is fixed to used that duration. See
+ // http://crbug.com/354284.
+ const base::TimeDelta duration_1 = base::TimeDelta::FromMilliseconds(2746);
+ append_window_start_for_next_append_ = duration_1;
+
+ // Read a WebM file into memory and append the data.
+ scoped_refptr<DecoderBuffer> buffer =
+ ReadTestDataFile("bear-320x240-audio-only.webm");
+ AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
+ CheckExpectedRanges(kSourceId, "{ }");
+
+ DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
+ AudioDecoderConfig config_1 = stream->audio_decoder_config();
+
+ // Read a second WebM with a different config in and append the data.
+ scoped_refptr<DecoderBuffer> buffer2 =
+ ReadTestDataFile("bear-320x240-audio-only-48khz.webm");
+ EXPECT_CALL(host_, SetDuration(_)).Times(AnyNumber());
+ ASSERT_TRUE(SetTimestampOffset(kSourceId, duration_1));
+ AppendDataInPieces(buffer2->data(), buffer2->data_size(), 512);
+ CheckExpectedRanges(kSourceId, "{ [2746,5519) }");
+
+ Seek(duration_1);
+ ExpectConfigChanged(DemuxerStream::AUDIO);
+ ASSERT_FALSE(config_1.Matches(stream->audio_decoder_config()));
+ CheckExpectedBuffers(stream, "2746 2767 2789 2810");
+}
+
+TEST_F(ChunkDemuxerTest, AppendWindow_Text) {
+ DemuxerStream* text_stream = NULL;
+ EXPECT_CALL(host_, AddTextStream(_, _))
+ .WillOnce(SaveArg<0>(&text_stream));
+ ASSERT_TRUE(InitDemuxer(HAS_VIDEO | HAS_TEXT));
+ DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
+
+ // Set the append window to [20,280).
+ append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
+ append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
+
+ // Append a cluster that starts before and ends after the append
+ // window.
AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
"0K 30 60 90 120K 150 180 210 240K 270 300 330K");
+ AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K 200K 300K");
- // Verify that GOPs that start outside the window are not included
- // in the buffer. Also verify that buffers that extend beyond the
+ // Verify that text cues that start outside the window are not included
+ // in the buffer. Also verify that cues that extend beyond the
// window are not included.
- CheckExpectedRanges(kSourceId, "{ [120,300) }");
- CheckExpectedBuffers(stream, "120 150 180 210 240 270");
+ CheckExpectedRanges(kSourceId, "{ [120,270) }");
+ CheckExpectedBuffers(video_stream, "120 150 180 210 240");
+ CheckExpectedBuffers(text_stream, "100");
// Extend the append window to [20,650).
- demuxer_->SetAppendWindowEnd(kSourceId,
- base::TimeDelta::FromMilliseconds(650));
+ append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
- // Append more data and verify that adding buffers start at the next
- // keyframe.
+ // Append more data and verify that a new range is created.
AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
"360 390 420K 450 480 510 540K 570 600 630K");
- CheckExpectedRanges(kSourceId, "{ [120,300) [420,660) }");
+ AppendSingleStreamCluster(kSourceId, kTextTrackNum, "400K 500K 600K 700K");
+ CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
+
+ // Seek to the new range and verify that the expected buffers are returned.
+ Seek(base::TimeDelta::FromMilliseconds(420));
+ CheckExpectedBuffers(video_stream, "420 450 480 510 540 570 600");
+ CheckExpectedBuffers(text_stream, "400 500");
}
TEST_F(ChunkDemuxerTest, StartWaitingForSeekAfterParseError) {
- ASSERT_TRUE(InitDemuxer(true, true));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
AppendGarbage();
base::TimeDelta seek_time = base::TimeDelta::FromSeconds(50);
demuxer_->StartWaitingForSeek(seek_time);
}
+TEST_F(ChunkDemuxerTest, Remove_AudioVideoText) {
+ DemuxerStream* text_stream = NULL;
+ EXPECT_CALL(host_, AddTextStream(_, _))
+ .WillOnce(SaveArg<0>(&text_stream));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
+
+ DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
+ DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
+
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
+ "0K 20K 40K 60K 80K 100K 120K 140K");
+ AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
+ "0K 30 60 90 120K 150 180");
+ AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K 200K");
+
+ CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
+ CheckExpectedBuffers(video_stream, "0 30 60 90 120 150 180");
+ CheckExpectedBuffers(text_stream, "0 100 200");
+
+ // Remove the buffers that were added.
+ demuxer_->Remove(kSourceId, base::TimeDelta(),
+ base::TimeDelta::FromMilliseconds(300));
+
+ // Verify that all the appended data has been removed.
+ CheckExpectedRanges(kSourceId, "{ }");
+
+ // Append new buffers that are clearly different than the original
+ // ones and verify that only the new buffers are returned.
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
+ "1K 21K 41K 61K 81K 101K 121K 141K");
+ AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
+ "1K 31 61 91 121K 151 181");
+ AppendSingleStreamCluster(kSourceId, kTextTrackNum, "1K 101K 201K");
+
+ Seek(base::TimeDelta());
+ CheckExpectedBuffers(audio_stream, "1 21 41 61 81 101 121 141");
+ CheckExpectedBuffers(video_stream, "1 31 61 91 121 151 181");
+ CheckExpectedBuffers(text_stream, "1 101 201");
+}
+
+TEST_F(ChunkDemuxerTest, Remove_StartAtDuration) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
+ DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
+
+ // Set the duration to something small so that the append that
+ // follows updates the duration to reflect the end of the appended data.
+ EXPECT_CALL(host_, SetDuration(
+ base::TimeDelta::FromMilliseconds(1)));
+ demuxer_->SetDuration(0.001);
+
+ EXPECT_CALL(host_, SetDuration(
+ base::TimeDelta::FromMilliseconds(160)));
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
+ "0K 20K 40K 60K 80K 100K 120K 140K");
+
+ CheckExpectedRanges(kSourceId, "{ [0,160) }");
+ CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
+
+ demuxer_->Remove(kSourceId,
+ base::TimeDelta::FromSecondsD(demuxer_->GetDuration()),
+ kInfiniteDuration());
+
+ Seek(base::TimeDelta());
+ CheckExpectedRanges(kSourceId, "{ [0,160) }");
+ CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
+}
+
+// Verifies that a Seek() will complete without text cues for
+// the seek point and will return cues after the seek position
+// when they are eventually appended.
+TEST_F(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
+ DemuxerStream* text_stream = NULL;
+ EXPECT_CALL(host_, AddTextStream(_, _))
+ .WillOnce(SaveArg<0>(&text_stream));
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
+
+ DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
+ DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
+
+ base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(120);
+ bool seek_cb_was_called = false;
+ demuxer_->StartWaitingForSeek(seek_time);
+ demuxer_->Seek(seek_time,
+ base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
+ message_loop_.RunUntilIdle();
+
+ EXPECT_FALSE(seek_cb_was_called);
+
+ bool text_read_done = false;
+ text_stream->Read(base::Bind(&OnReadDone,
+ base::TimeDelta::FromMilliseconds(125),
+ &text_read_done));
+
+ // Append audio & video data so the seek completes.
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
+ "0K 20K 40K 60K 80K 100K 120K 140K 160K 180K");
+ AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
+ "0K 30 60 90 120K 150 180 210");
+
+ message_loop_.RunUntilIdle();
+ EXPECT_TRUE(seek_cb_was_called);
+ EXPECT_FALSE(text_read_done);
+
+ // Read some audio & video buffers to further verify seek completion.
+ CheckExpectedBuffers(audio_stream, "120 140");
+ CheckExpectedBuffers(video_stream, "120 150");
+
+ EXPECT_FALSE(text_read_done);
+
+ // Append text cues that start after the seek point and verify that
+ // they are returned by Read() calls.
+ AppendSingleStreamCluster(kSourceId, kTextTrackNum, "125K 175K 225K");
+
+ message_loop_.RunUntilIdle();
+ EXPECT_TRUE(text_read_done);
+
+ // NOTE: we start at 175 here because the buffer at 125 was returned
+ // to the pending read initiated above.
+ CheckExpectedBuffers(text_stream, "175 225");
+
+ // Verify that audio & video streams continue to return expected values.
+ CheckExpectedBuffers(audio_stream, "160 180");
+ CheckExpectedBuffers(video_stream, "180 210");
+}
+
+TEST_F(ChunkDemuxerTest, ClusterWithUnknownSize) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
+
+ AppendCluster(GenerateCluster(0, 0, 4, true));
+ CheckExpectedRanges(kSourceId, "{ [0,46) }");
+
+ // A new cluster indicates end of the previous cluster with unknown size.
+ AppendCluster(GenerateCluster(46, 66, 5, true));
+ CheckExpectedRanges(kSourceId, "{ [0,115) }");
+}
+
+TEST_F(ChunkDemuxerTest, CuesBetweenClustersWithUnknownSize) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
+
+ // Add two clusters separated by Cues in a single Append() call.
+ scoped_ptr<Cluster> cluster = GenerateCluster(0, 0, 4, true);
+ std::vector<uint8> data(cluster->data(), cluster->data() + cluster->size());
+ data.insert(data.end(), kCuesHeader, kCuesHeader + sizeof(kCuesHeader));
+ cluster = GenerateCluster(46, 66, 5, true);
+ data.insert(data.end(), cluster->data(), cluster->data() + cluster->size());
+ AppendData(&*data.begin(), data.size());
+
+ CheckExpectedRanges(kSourceId, "{ [0,115) }");
+}
+
+TEST_F(ChunkDemuxerTest, CuesBetweenClusters) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
+
+ AppendCluster(GenerateCluster(0, 0, 4));
+ AppendData(kCuesHeader, sizeof(kCuesHeader));
+ AppendCluster(GenerateCluster(46, 66, 5));
+ CheckExpectedRanges(kSourceId, "{ [0,115) }");
+}
+
} // namespace media
diff --git a/chromium/media/filters/clockless_video_frame_scheduler.cc b/chromium/media/filters/clockless_video_frame_scheduler.cc
new file mode 100644
index 00000000000..b37d4307763
--- /dev/null
+++ b/chromium/media/filters/clockless_video_frame_scheduler.cc
@@ -0,0 +1,34 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/clockless_video_frame_scheduler.h"
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "media/base/video_frame.h"
+
+namespace media {
+
+ClocklessVideoFrameScheduler::ClocklessVideoFrameScheduler(
+ const DisplayCB& display_cb)
+ : display_cb_(display_cb) {
+}
+
+ClocklessVideoFrameScheduler::~ClocklessVideoFrameScheduler() {
+}
+
+void ClocklessVideoFrameScheduler::ScheduleVideoFrame(
+ const scoped_refptr<VideoFrame>& frame,
+ base::TimeTicks /* wall_ticks */,
+ const DoneCB& done_cb) {
+ display_cb_.Run(frame);
+ base::MessageLoopProxy::current()->PostTask(
+ FROM_HERE, base::Bind(done_cb, frame, DISPLAYED));
+}
+
+void ClocklessVideoFrameScheduler::Reset() {
+}
+
+} // namespace media
diff --git a/chromium/media/filters/clockless_video_frame_scheduler.h b/chromium/media/filters/clockless_video_frame_scheduler.h
new file mode 100644
index 00000000000..e52a73b91ed
--- /dev/null
+++ b/chromium/media/filters/clockless_video_frame_scheduler.h
@@ -0,0 +1,34 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_CLOCKLESS_VIDEO_FRAME_SCHEDULER_H_
+#define MEDIA_FILTERS_CLOCKLESS_VIDEO_FRAME_SCHEDULER_H_
+
+#include "media/filters/video_frame_scheduler.h"
+
+namespace media {
+
+// A scheduler that immediately displays frames.
+class ClocklessVideoFrameScheduler : public VideoFrameScheduler {
+ public:
+ typedef base::Callback<void(const scoped_refptr<VideoFrame>&)> DisplayCB;
+
+ explicit ClocklessVideoFrameScheduler(const DisplayCB& display_cb);
+ virtual ~ClocklessVideoFrameScheduler();
+
+ // VideoFrameScheduler implementation.
+ virtual void ScheduleVideoFrame(const scoped_refptr<VideoFrame>& frame,
+ base::TimeTicks wall_ticks,
+ const DoneCB& done_cb) OVERRIDE;
+ virtual void Reset() OVERRIDE;
+
+ private:
+ DisplayCB display_cb_;
+
+ DISALLOW_COPY_AND_ASSIGN(ClocklessVideoFrameScheduler);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_CLOCKLESS_VIDEO_FRAME_SCHEDULER_H_
diff --git a/chromium/media/filters/decoder_selector.cc b/chromium/media/filters/decoder_selector.cc
new file mode 100644
index 00000000000..9020597798d
--- /dev/null
+++ b/chromium/media/filters/decoder_selector.cc
@@ -0,0 +1,242 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "decoder_selector.h"
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/logging.h"
+#include "base/single_thread_task_runner.h"
+#include "media/base/audio_decoder.h"
+#include "media/base/bind_to_current_loop.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/pipeline.h"
+#include "media/base/video_decoder.h"
+#include "media/filters/decoder_stream_traits.h"
+#include "media/filters/decrypting_audio_decoder.h"
+#include "media/filters/decrypting_demuxer_stream.h"
+#include "media/filters/decrypting_video_decoder.h"
+
+namespace media {
+
+static bool HasValidStreamConfig(DemuxerStream* stream) {
+ switch (stream->type()) {
+ case DemuxerStream::AUDIO:
+ return stream->audio_decoder_config().IsValidConfig();
+ case DemuxerStream::VIDEO:
+ return stream->video_decoder_config().IsValidConfig();
+ case DemuxerStream::UNKNOWN:
+ case DemuxerStream::TEXT:
+ case DemuxerStream::NUM_TYPES:
+ NOTREACHED();
+ }
+ return false;
+}
+
+static bool IsStreamEncrypted(DemuxerStream* stream) {
+ switch (stream->type()) {
+ case DemuxerStream::AUDIO:
+ return stream->audio_decoder_config().is_encrypted();
+ case DemuxerStream::VIDEO:
+ return stream->video_decoder_config().is_encrypted();
+ case DemuxerStream::UNKNOWN:
+ case DemuxerStream::TEXT:
+ case DemuxerStream::NUM_TYPES:
+ NOTREACHED();
+ }
+ return false;
+}
+
+template <DemuxerStream::Type StreamType>
+DecoderSelector<StreamType>::DecoderSelector(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ ScopedVector<Decoder> decoders,
+ const SetDecryptorReadyCB& set_decryptor_ready_cb)
+ : task_runner_(task_runner),
+ decoders_(decoders.Pass()),
+ set_decryptor_ready_cb_(set_decryptor_ready_cb),
+ input_stream_(NULL),
+ weak_ptr_factory_(this) {}
+
+template <DemuxerStream::Type StreamType>
+DecoderSelector<StreamType>::~DecoderSelector() {
+ DVLOG(2) << __FUNCTION__;
+ DCHECK(select_decoder_cb_.is_null());
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderSelector<StreamType>::SelectDecoder(
+ DemuxerStream* stream,
+ bool low_delay,
+ const SelectDecoderCB& select_decoder_cb,
+ const typename Decoder::OutputCB& output_cb) {
+ DVLOG(2) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(stream);
+
+ // Make sure |select_decoder_cb| runs on a different execution stack.
+ select_decoder_cb_ = BindToCurrentLoop(select_decoder_cb);
+
+ if (!HasValidStreamConfig(stream)) {
+ DLOG(ERROR) << "Invalid stream config.";
+ ReturnNullDecoder();
+ return;
+ }
+
+ input_stream_ = stream;
+ low_delay_ = low_delay;
+ output_cb_ = output_cb;
+
+ if (!IsStreamEncrypted(input_stream_)) {
+ InitializeDecoder();
+ return;
+ }
+
+ // This could happen if Encrypted Media Extension (EME) is not enabled.
+ if (set_decryptor_ready_cb_.is_null()) {
+ ReturnNullDecoder();
+ return;
+ }
+
+ decoder_.reset(new typename StreamTraits::DecryptingDecoderType(
+ task_runner_, set_decryptor_ready_cb_));
+
+ DecoderStreamTraits<StreamType>::Initialize(
+ decoder_.get(),
+ StreamTraits::GetDecoderConfig(*input_stream_),
+ low_delay_,
+ base::Bind(&DecoderSelector<StreamType>::DecryptingDecoderInitDone,
+ weak_ptr_factory_.GetWeakPtr()),
+ output_cb_);
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderSelector<StreamType>::Abort() {
+ DVLOG(2) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ // This could happen when SelectDecoder() was not called or when
+ // |select_decoder_cb_| was already posted but not fired (e.g. in the
+ // message loop queue).
+ if (select_decoder_cb_.is_null())
+ return;
+
+ // We must be trying to initialize the |decoder_| or the
+ // |decrypted_stream_|. Invalid all weak pointers so that all initialization
+ // callbacks won't fire.
+ weak_ptr_factory_.InvalidateWeakPtrs();
+
+ if (decoder_) {
+ // |decrypted_stream_| is either NULL or already initialized. We don't
+ // need to Stop() |decrypted_stream_| in either case.
+ decoder_->Stop();
+ ReturnNullDecoder();
+ return;
+ }
+
+ if (decrypted_stream_) {
+ decrypted_stream_->Stop(
+ base::Bind(&DecoderSelector<StreamType>::ReturnNullDecoder,
+ weak_ptr_factory_.GetWeakPtr()));
+ return;
+ }
+
+ NOTREACHED();
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderSelector<StreamType>::DecryptingDecoderInitDone(
+ PipelineStatus status) {
+ DVLOG(2) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ if (status == PIPELINE_OK) {
+ base::ResetAndReturn(&select_decoder_cb_)
+ .Run(decoder_.Pass(), scoped_ptr<DecryptingDemuxerStream>());
+ return;
+ }
+
+ decoder_.reset();
+
+ decrypted_stream_.reset(
+ new DecryptingDemuxerStream(task_runner_, set_decryptor_ready_cb_));
+
+ decrypted_stream_->Initialize(
+ input_stream_,
+ base::Bind(&DecoderSelector<StreamType>::DecryptingDemuxerStreamInitDone,
+ weak_ptr_factory_.GetWeakPtr()));
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderSelector<StreamType>::DecryptingDemuxerStreamInitDone(
+ PipelineStatus status) {
+ DVLOG(2) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ if (status != PIPELINE_OK) {
+ ReturnNullDecoder();
+ return;
+ }
+
+ DCHECK(!IsStreamEncrypted(decrypted_stream_.get()));
+ input_stream_ = decrypted_stream_.get();
+ InitializeDecoder();
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderSelector<StreamType>::InitializeDecoder() {
+ DVLOG(2) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(!decoder_);
+
+ if (decoders_.empty()) {
+ ReturnNullDecoder();
+ return;
+ }
+
+ decoder_.reset(decoders_.front());
+ decoders_.weak_erase(decoders_.begin());
+
+ DecoderStreamTraits<StreamType>::Initialize(
+ decoder_.get(),
+ StreamTraits::GetDecoderConfig(*input_stream_),
+ low_delay_,
+ base::Bind(&DecoderSelector<StreamType>::DecoderInitDone,
+ weak_ptr_factory_.GetWeakPtr()),
+ output_cb_);
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderSelector<StreamType>::DecoderInitDone(PipelineStatus status) {
+ DVLOG(2) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ if (status != PIPELINE_OK) {
+ decoder_.reset();
+ InitializeDecoder();
+ return;
+ }
+
+ base::ResetAndReturn(&select_decoder_cb_)
+ .Run(decoder_.Pass(), decrypted_stream_.Pass());
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderSelector<StreamType>::ReturnNullDecoder() {
+ DVLOG(2) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ base::ResetAndReturn(&select_decoder_cb_)
+ .Run(scoped_ptr<Decoder>(),
+ scoped_ptr<DecryptingDemuxerStream>());
+}
+
+// These forward declarations tell the compiler that we will use
+// DecoderSelector with these arguments, allowing us to keep these definitions
+// in our .cc without causing linker errors. This also means if anyone tries to
+// instantiate a DecoderSelector with anything but these two specializations
+// they'll most likely get linker errors.
+template class DecoderSelector<DemuxerStream::AUDIO>;
+template class DecoderSelector<DemuxerStream::VIDEO>;
+
+} // namespace media
diff --git a/chromium/media/filters/decoder_selector.h b/chromium/media/filters/decoder_selector.h
new file mode 100644
index 00000000000..662e8082ea2
--- /dev/null
+++ b/chromium/media/filters/decoder_selector.h
@@ -0,0 +1,104 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_DECODER_SELECTOR_H_
+#define MEDIA_FILTERS_DECODER_SELECTOR_H_
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_vector.h"
+#include "base/memory/weak_ptr.h"
+#include "media/base/decryptor.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/pipeline_status.h"
+#include "media/filters/decoder_stream_traits.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+}
+
+namespace media {
+
+class DecoderBuffer;
+class DecryptingDemuxerStream;
+class Decryptor;
+
+// DecoderSelector (creates if necessary and) initializes the proper
+// Decoder for a given DemuxerStream. If the given DemuxerStream is
+// encrypted, a DecryptingDemuxerStream may also be created.
+// The template parameter |StreamType| is the type of stream we will be
+// selecting a decoder for.
+template<DemuxerStream::Type StreamType>
+class MEDIA_EXPORT DecoderSelector {
+ public:
+ typedef DecoderStreamTraits<StreamType> StreamTraits;
+ typedef typename StreamTraits::DecoderType Decoder;
+
+ // Indicates completion of Decoder selection.
+ // - First parameter: The initialized Decoder. If it's set to NULL, then
+ // Decoder initialization failed.
+ // - Second parameter: The initialized DecryptingDemuxerStream. If it's not
+ // NULL, then a DecryptingDemuxerStream is created and initialized to do
+ // decryption for the initialized Decoder.
+ // Note: The caller owns selected Decoder and DecryptingDemuxerStream.
+ // The caller should call DecryptingDemuxerStream::Reset() before
+ // calling Decoder::Reset() to release any pending decryption or read.
+ typedef base::Callback<
+ void(scoped_ptr<Decoder>,
+ scoped_ptr<DecryptingDemuxerStream>)>
+ SelectDecoderCB;
+
+ // |decoders| contains the Decoders to use when initializing.
+ //
+ // |set_decryptor_ready_cb| is optional. If |set_decryptor_ready_cb| is null,
+ // no decryptor will be available to perform decryption.
+ DecoderSelector(
+ const scoped_refptr<base::SingleThreadTaskRunner>& message_loop,
+ ScopedVector<Decoder> decoders,
+ const SetDecryptorReadyCB& set_decryptor_ready_cb);
+ ~DecoderSelector();
+
+ // Initializes and selects a Decoder that can decode the |stream|.
+ // Selected Decoder (and DecryptingDemuxerStream) is returned via
+ // the |select_decoder_cb|.
+ void SelectDecoder(DemuxerStream* stream,
+ bool low_delay,
+ const SelectDecoderCB& select_decoder_cb,
+ const typename Decoder::OutputCB& output_cb);
+
+ // Aborts pending Decoder selection and fires |select_decoder_cb| with
+ // NULL and NULL immediately if it's pending.
+ void Abort();
+
+ private:
+ void DecryptingDecoderInitDone(PipelineStatus status);
+ void DecryptingDemuxerStreamInitDone(PipelineStatus status);
+ void InitializeDecoder();
+ void DecoderInitDone(PipelineStatus status);
+ void ReturnNullDecoder();
+
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ ScopedVector<Decoder> decoders_;
+ SetDecryptorReadyCB set_decryptor_ready_cb_;
+
+ DemuxerStream* input_stream_;
+ bool low_delay_;
+ SelectDecoderCB select_decoder_cb_;
+ typename Decoder::OutputCB output_cb_;
+
+ scoped_ptr<Decoder> decoder_;
+ scoped_ptr<DecryptingDemuxerStream> decrypted_stream_;
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<DecoderSelector> weak_ptr_factory_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(DecoderSelector);
+};
+
+typedef DecoderSelector<DemuxerStream::VIDEO> VideoDecoderSelector;
+typedef DecoderSelector<DemuxerStream::AUDIO> AudioDecoderSelector;
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_DECODER_SELECTOR_H_
diff --git a/chromium/media/filters/decoder_stream.cc b/chromium/media/filters/decoder_stream.cc
new file mode 100644
index 00000000000..a912398f539
--- /dev/null
+++ b/chromium/media/filters/decoder_stream.cc
@@ -0,0 +1,598 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/decoder_stream.h"
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/debug/trace_event.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/single_thread_task_runner.h"
+#include "media/base/audio_decoder.h"
+#include "media/base/bind_to_current_loop.h"
+#include "media/base/decoder_buffer.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/video_decoder.h"
+#include "media/filters/decrypting_demuxer_stream.h"
+
+namespace media {
+
+// TODO(rileya): Devise a better way of specifying trace/UMA/etc strings for
+// templated classes such as this.
+template <DemuxerStream::Type StreamType>
+static const char* GetTraceString();
+
+#define FUNCTION_DVLOG(level) \
+ DVLOG(level) << __FUNCTION__ << \
+ "<" << DecoderStreamTraits<StreamType>::ToString() << ">"
+
+template <>
+const char* GetTraceString<DemuxerStream::VIDEO>() {
+ return "DecoderStream<VIDEO>::Decode";
+}
+
+template <>
+const char* GetTraceString<DemuxerStream::AUDIO>() {
+ return "DecoderStream<AUDIO>::Decode";
+}
+
+template <DemuxerStream::Type StreamType>
+DecoderStream<StreamType>::DecoderStream(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ ScopedVector<Decoder> decoders,
+ const SetDecryptorReadyCB& set_decryptor_ready_cb)
+ : task_runner_(task_runner),
+ state_(STATE_UNINITIALIZED),
+ stream_(NULL),
+ decoder_selector_(
+ new DecoderSelector<StreamType>(task_runner,
+ decoders.Pass(),
+ set_decryptor_ready_cb)),
+ active_splice_(false),
+ pending_decode_requests_(0),
+ weak_factory_(this) {}
+
+template <DemuxerStream::Type StreamType>
+DecoderStream<StreamType>::~DecoderStream() {
+ DCHECK(state_ == STATE_UNINITIALIZED || state_ == STATE_STOPPED) << state_;
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderStream<StreamType>::Initialize(DemuxerStream* stream,
+ bool low_delay,
+ const StatisticsCB& statistics_cb,
+ const InitCB& init_cb) {
+ FUNCTION_DVLOG(2);
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, STATE_UNINITIALIZED) << state_;
+ DCHECK(init_cb_.is_null());
+ DCHECK(!init_cb.is_null());
+
+ statistics_cb_ = statistics_cb;
+ init_cb_ = init_cb;
+ stream_ = stream;
+ low_delay_ = low_delay;
+
+ state_ = STATE_INITIALIZING;
+ // TODO(xhwang): DecoderSelector only needs a config to select a decoder.
+ decoder_selector_->SelectDecoder(
+ stream, low_delay,
+ base::Bind(&DecoderStream<StreamType>::OnDecoderSelected,
+ weak_factory_.GetWeakPtr()),
+ base::Bind(&DecoderStream<StreamType>::OnDecodeOutputReady,
+ weak_factory_.GetWeakPtr()));
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderStream<StreamType>::Read(const ReadCB& read_cb) {
+ FUNCTION_DVLOG(2);
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(state_ != STATE_UNINITIALIZED && state_ != STATE_INITIALIZING &&
+ state_ != STATE_STOPPED) << state_;
+ // No two reads in the flight at any time.
+ DCHECK(read_cb_.is_null());
+ // No read during resetting or stopping process.
+ DCHECK(reset_cb_.is_null());
+ DCHECK(stop_cb_.is_null());
+
+ if (state_ == STATE_ERROR) {
+ task_runner_->PostTask(
+ FROM_HERE, base::Bind(read_cb, DECODE_ERROR, scoped_refptr<Output>()));
+ return;
+ }
+
+ if (state_ == STATE_END_OF_STREAM && ready_outputs_.empty()) {
+ task_runner_->PostTask(
+ FROM_HERE, base::Bind(read_cb, OK, StreamTraits::CreateEOSOutput()));
+ return;
+ }
+
+ if (!ready_outputs_.empty()) {
+ task_runner_->PostTask(FROM_HERE,
+ base::Bind(read_cb, OK, ready_outputs_.front()));
+ ready_outputs_.pop_front();
+ } else {
+ read_cb_ = read_cb;
+ }
+
+ if (state_ == STATE_NORMAL && CanDecodeMore())
+ ReadFromDemuxerStream();
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderStream<StreamType>::Reset(const base::Closure& closure) {
+ FUNCTION_DVLOG(2);
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(state_ != STATE_UNINITIALIZED && state_ != STATE_STOPPED) << state_;
+ DCHECK(reset_cb_.is_null());
+ DCHECK(stop_cb_.is_null());
+
+ reset_cb_ = closure;
+
+ if (!read_cb_.is_null()) {
+ task_runner_->PostTask(FROM_HERE, base::Bind(
+ base::ResetAndReturn(&read_cb_), ABORTED, scoped_refptr<Output>()));
+ }
+
+ ready_outputs_.clear();
+
+ // During decoder reinitialization, the Decoder does not need to be and
+ // cannot be Reset(). |decrypting_demuxer_stream_| was reset before decoder
+ // reinitialization.
+ if (state_ == STATE_REINITIALIZING_DECODER)
+ return;
+
+ // During pending demuxer read and when not using DecryptingDemuxerStream,
+ // the Decoder will be reset after demuxer read is returned
+ // (in OnBufferReady()).
+ if (state_ == STATE_PENDING_DEMUXER_READ && !decrypting_demuxer_stream_)
+ return;
+
+ if (decrypting_demuxer_stream_) {
+ decrypting_demuxer_stream_->Reset(base::Bind(
+ &DecoderStream<StreamType>::ResetDecoder, weak_factory_.GetWeakPtr()));
+ return;
+ }
+
+ ResetDecoder();
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderStream<StreamType>::Stop(const base::Closure& closure) {
+ FUNCTION_DVLOG(2);
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_NE(state_, STATE_STOPPED) << state_;
+ DCHECK(stop_cb_.is_null());
+
+ stop_cb_ = closure;
+
+ if (state_ == STATE_INITIALIZING) {
+ decoder_selector_->Abort();
+ return;
+ }
+
+ DCHECK(init_cb_.is_null());
+
+ // All pending callbacks will be dropped.
+ weak_factory_.InvalidateWeakPtrs();
+
+ // Post callbacks to prevent reentrance into this object.
+ if (!read_cb_.is_null()) {
+ task_runner_->PostTask(FROM_HERE, base::Bind(
+ base::ResetAndReturn(&read_cb_), ABORTED, scoped_refptr<Output>()));
+ }
+ if (!reset_cb_.is_null())
+ task_runner_->PostTask(FROM_HERE, base::ResetAndReturn(&reset_cb_));
+
+ if (decrypting_demuxer_stream_) {
+ decrypting_demuxer_stream_->Stop(base::Bind(
+ &DecoderStream<StreamType>::StopDecoder, weak_factory_.GetWeakPtr()));
+ return;
+ }
+
+ // We may not have a |decoder_| if Stop() was called during initialization.
+ if (decoder_) {
+ StopDecoder();
+ return;
+ }
+
+ state_ = STATE_STOPPED;
+ stream_ = NULL;
+ decoder_.reset();
+ decrypting_demuxer_stream_.reset();
+ task_runner_->PostTask(FROM_HERE, base::ResetAndReturn(&stop_cb_));
+}
+
+template <DemuxerStream::Type StreamType>
+bool DecoderStream<StreamType>::CanReadWithoutStalling() const {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ return !ready_outputs_.empty() || decoder_->CanReadWithoutStalling();
+}
+
+template <>
+bool DecoderStream<DemuxerStream::AUDIO>::CanReadWithoutStalling() const {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ return true;
+}
+
+template <DemuxerStream::Type StreamType>
+int DecoderStream<StreamType>::GetMaxDecodeRequests() const {
+ return decoder_->GetMaxDecodeRequests();
+}
+
+template <>
+int DecoderStream<DemuxerStream::AUDIO>::GetMaxDecodeRequests() const {
+ return 1;
+}
+
+template <DemuxerStream::Type StreamType>
+bool DecoderStream<StreamType>::CanDecodeMore() const {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ // Limit total number of outputs stored in |ready_outputs_| and being decoded.
+ // It only makes sense to saturate decoder completely when output queue is
+ // empty.
+ int num_decodes =
+ static_cast<int>(ready_outputs_.size()) + pending_decode_requests_;
+ return num_decodes < GetMaxDecodeRequests();
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderStream<StreamType>::OnDecoderSelected(
+ scoped_ptr<Decoder> selected_decoder,
+ scoped_ptr<DecryptingDemuxerStream> decrypting_demuxer_stream) {
+ FUNCTION_DVLOG(2);
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, STATE_INITIALIZING) << state_;
+ DCHECK(!init_cb_.is_null());
+ DCHECK(read_cb_.is_null());
+ DCHECK(reset_cb_.is_null());
+
+ decoder_selector_.reset();
+ if (decrypting_demuxer_stream)
+ stream_ = decrypting_demuxer_stream.get();
+
+ if (!selected_decoder) {
+ state_ = STATE_UNINITIALIZED;
+ StreamTraits::FinishInitialization(
+ base::ResetAndReturn(&init_cb_), selected_decoder.get(), stream_);
+ } else {
+ state_ = STATE_NORMAL;
+ decoder_ = selected_decoder.Pass();
+ decrypting_demuxer_stream_ = decrypting_demuxer_stream.Pass();
+ StreamTraits::FinishInitialization(
+ base::ResetAndReturn(&init_cb_), decoder_.get(), stream_);
+ }
+
+ // Stop() called during initialization.
+ if (!stop_cb_.is_null()) {
+ Stop(base::ResetAndReturn(&stop_cb_));
+ return;
+ }
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderStream<StreamType>::SatisfyRead(
+ Status status,
+ const scoped_refptr<Output>& output) {
+ DCHECK(!read_cb_.is_null());
+ base::ResetAndReturn(&read_cb_).Run(status, output);
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderStream<StreamType>::Decode(
+ const scoped_refptr<DecoderBuffer>& buffer) {
+ FUNCTION_DVLOG(2);
+ DCHECK(state_ == STATE_NORMAL || state_ == STATE_FLUSHING_DECODER) << state_;
+ DCHECK_LT(pending_decode_requests_, GetMaxDecodeRequests());
+ DCHECK(reset_cb_.is_null());
+ DCHECK(stop_cb_.is_null());
+ DCHECK(buffer);
+
+ int buffer_size = buffer->end_of_stream() ? 0 : buffer->data_size();
+
+ TRACE_EVENT_ASYNC_BEGIN0("media", GetTraceString<StreamType>(), this);
+ ++pending_decode_requests_;
+ decoder_->Decode(buffer,
+ base::Bind(&DecoderStream<StreamType>::OnDecodeDone,
+ weak_factory_.GetWeakPtr(),
+ buffer_size,
+ buffer->end_of_stream()));
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderStream<StreamType>::FlushDecoder() {
+ Decode(DecoderBuffer::CreateEOSBuffer());
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderStream<StreamType>::OnDecodeDone(int buffer_size,
+ bool end_of_stream,
+ typename Decoder::Status status) {
+ FUNCTION_DVLOG(2) << status;
+ DCHECK(state_ == STATE_NORMAL || state_ == STATE_FLUSHING_DECODER ||
+ state_ == STATE_PENDING_DEMUXER_READ || state_ == STATE_ERROR)
+ << state_;
+ DCHECK(stop_cb_.is_null());
+ DCHECK_GT(pending_decode_requests_, 0);
+
+ --pending_decode_requests_;
+
+ TRACE_EVENT_ASYNC_END0("media", GetTraceString<StreamType>(), this);
+
+ if (state_ == STATE_ERROR) {
+ DCHECK(read_cb_.is_null());
+ return;
+ }
+
+ // Drop decoding result if Reset() was called during decoding.
+ // The resetting process will be handled when the decoder is reset.
+ if (!reset_cb_.is_null())
+ return;
+
+ switch (status) {
+ case Decoder::kDecodeError:
+ case Decoder::kDecryptError:
+ state_ = STATE_ERROR;
+ ready_outputs_.clear();
+ if (!read_cb_.is_null())
+ SatisfyRead(DECODE_ERROR, NULL);
+ return;
+
+ case Decoder::kAborted:
+ // Decoder can return kAborted only when Reset is pending.
+ NOTREACHED();
+ return;
+
+ case Decoder::kOk:
+ // Any successful decode counts!
+ if (buffer_size > 0)
+ StreamTraits::ReportStatistics(statistics_cb_, buffer_size);
+
+ if (state_ == STATE_NORMAL) {
+ if (end_of_stream) {
+ state_ = STATE_END_OF_STREAM;
+ if (ready_outputs_.empty() && !read_cb_.is_null())
+ SatisfyRead(OK, StreamTraits::CreateEOSOutput());
+ return;
+ }
+
+ if (CanDecodeMore())
+ ReadFromDemuxerStream();
+ return;
+ }
+
+ if (state_ == STATE_FLUSHING_DECODER && !pending_decode_requests_)
+ ReinitializeDecoder();
+ return;
+ }
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderStream<StreamType>::OnDecodeOutputReady(
+ const scoped_refptr<Output>& output) {
+ FUNCTION_DVLOG(2) << ": " << output->timestamp().InMilliseconds() << " ms";
+ DCHECK(output);
+ DCHECK(!output->end_of_stream());
+ DCHECK(state_ == STATE_NORMAL || state_ == STATE_FLUSHING_DECODER ||
+ state_ == STATE_PENDING_DEMUXER_READ || state_ == STATE_ERROR)
+ << state_;
+
+ if (state_ == STATE_ERROR) {
+ DCHECK(read_cb_.is_null());
+ return;
+ }
+
+ // Drop decoding result if Reset() was called during decoding.
+ // The resetting process will be handled when the decoder is reset.
+ if (!reset_cb_.is_null())
+ return;
+
+ // TODO(xhwang): VideoDecoder doesn't need to return EOS after it's flushed.
+ // Fix all decoders and remove this block.
+ // Store decoded output.
+ ready_outputs_.push_back(output);
+
+ if (read_cb_.is_null())
+ return;
+
+ // Satisfy outstanding read request, if any.
+ scoped_refptr<Output> read_result = ready_outputs_.front();
+ ready_outputs_.pop_front();
+ SatisfyRead(OK, output);
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderStream<StreamType>::ReadFromDemuxerStream() {
+ FUNCTION_DVLOG(2);
+ DCHECK_EQ(state_, STATE_NORMAL) << state_;
+ DCHECK(CanDecodeMore());
+ DCHECK(reset_cb_.is_null());
+ DCHECK(stop_cb_.is_null());
+
+ state_ = STATE_PENDING_DEMUXER_READ;
+ stream_->Read(base::Bind(&DecoderStream<StreamType>::OnBufferReady,
+ weak_factory_.GetWeakPtr()));
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderStream<StreamType>::OnBufferReady(
+ DemuxerStream::Status status,
+ const scoped_refptr<DecoderBuffer>& buffer) {
+ FUNCTION_DVLOG(2) << ": " << status << ", "
+ << buffer->AsHumanReadableString();
+
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(state_ == STATE_PENDING_DEMUXER_READ || state_ == STATE_ERROR ||
+ state_ == STATE_STOPPED)
+ << state_;
+ DCHECK_EQ(buffer.get() != NULL, status == DemuxerStream::kOk) << status;
+ DCHECK(stop_cb_.is_null());
+
+ // Decoding has been stopped (e.g due to an error).
+ if (state_ != STATE_PENDING_DEMUXER_READ) {
+ DCHECK(state_ == STATE_ERROR || state_ == STATE_STOPPED);
+ DCHECK(read_cb_.is_null());
+ return;
+ }
+
+ state_ = STATE_NORMAL;
+
+ if (status == DemuxerStream::kConfigChanged) {
+ FUNCTION_DVLOG(2) << ": " << "ConfigChanged";
+ DCHECK(stream_->SupportsConfigChanges());
+
+ if (!config_change_observer_cb_.is_null())
+ config_change_observer_cb_.Run();
+
+ state_ = STATE_FLUSHING_DECODER;
+ if (!reset_cb_.is_null()) {
+ // If we are using DecryptingDemuxerStream, we already called DDS::Reset()
+ // which will continue the resetting process in it's callback.
+ if (!decrypting_demuxer_stream_)
+ Reset(base::ResetAndReturn(&reset_cb_));
+ // Reinitialization will continue after Reset() is done.
+ } else {
+ FlushDecoder();
+ }
+ return;
+ }
+
+ if (!reset_cb_.is_null()) {
+ // If we are using DecryptingDemuxerStream, we already called DDS::Reset()
+ // which will continue the resetting process in it's callback.
+ if (!decrypting_demuxer_stream_)
+ Reset(base::ResetAndReturn(&reset_cb_));
+ return;
+ }
+
+ if (status == DemuxerStream::kAborted) {
+ if (!read_cb_.is_null())
+ SatisfyRead(DEMUXER_READ_ABORTED, NULL);
+ return;
+ }
+
+ if (!splice_observer_cb_.is_null() && !buffer->end_of_stream()) {
+ const bool has_splice_ts = buffer->splice_timestamp() != kNoTimestamp();
+ if (active_splice_ || has_splice_ts) {
+ splice_observer_cb_.Run(buffer->splice_timestamp());
+ active_splice_ = has_splice_ts;
+ }
+ }
+
+ DCHECK(status == DemuxerStream::kOk) << status;
+ Decode(buffer);
+
+ // Read more data if the decoder supports multiple parallel decoding requests.
+ if (CanDecodeMore() && !buffer->end_of_stream())
+ ReadFromDemuxerStream();
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderStream<StreamType>::ReinitializeDecoder() {
+ FUNCTION_DVLOG(2);
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, STATE_FLUSHING_DECODER) << state_;
+ DCHECK_EQ(pending_decode_requests_, 0);
+
+ DCHECK(StreamTraits::GetDecoderConfig(*stream_).IsValidConfig());
+ state_ = STATE_REINITIALIZING_DECODER;
+ DecoderStreamTraits<StreamType>::Initialize(
+ decoder_.get(),
+ StreamTraits::GetDecoderConfig(*stream_),
+ low_delay_,
+ base::Bind(&DecoderStream<StreamType>::OnDecoderReinitialized,
+ weak_factory_.GetWeakPtr()),
+ base::Bind(&DecoderStream<StreamType>::OnDecodeOutputReady,
+ weak_factory_.GetWeakPtr()));
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderStream<StreamType>::OnDecoderReinitialized(PipelineStatus status) {
+ FUNCTION_DVLOG(2);
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, STATE_REINITIALIZING_DECODER) << state_;
+ DCHECK(stop_cb_.is_null());
+
+ // ReinitializeDecoder() can be called in two cases:
+ // 1, Flushing decoder finished (see OnDecodeOutputReady()).
+ // 2, Reset() was called during flushing decoder (see OnDecoderReset()).
+ // Also, Reset() can be called during pending ReinitializeDecoder().
+ // This function needs to handle them all!
+
+ state_ = (status == PIPELINE_OK) ? STATE_NORMAL : STATE_ERROR;
+
+ if (!reset_cb_.is_null()) {
+ base::ResetAndReturn(&reset_cb_).Run();
+ return;
+ }
+
+ if (read_cb_.is_null())
+ return;
+
+ if (state_ == STATE_ERROR) {
+ SatisfyRead(DECODE_ERROR, NULL);
+ return;
+ }
+
+ ReadFromDemuxerStream();
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderStream<StreamType>::ResetDecoder() {
+ FUNCTION_DVLOG(2);
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(state_ == STATE_NORMAL || state_ == STATE_FLUSHING_DECODER ||
+ state_ == STATE_ERROR || state_ == STATE_END_OF_STREAM) << state_;
+ DCHECK(!reset_cb_.is_null());
+
+ decoder_->Reset(base::Bind(&DecoderStream<StreamType>::OnDecoderReset,
+ weak_factory_.GetWeakPtr()));
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderStream<StreamType>::OnDecoderReset() {
+ FUNCTION_DVLOG(2);
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(state_ == STATE_NORMAL || state_ == STATE_FLUSHING_DECODER ||
+ state_ == STATE_ERROR || state_ == STATE_END_OF_STREAM) << state_;
+ // If Reset() was called during pending read, read callback should be fired
+ // before the reset callback is fired.
+ DCHECK(read_cb_.is_null());
+ DCHECK(!reset_cb_.is_null());
+ DCHECK(stop_cb_.is_null());
+
+ if (state_ != STATE_FLUSHING_DECODER) {
+ state_ = STATE_NORMAL;
+ active_splice_ = false;
+ base::ResetAndReturn(&reset_cb_).Run();
+ return;
+ }
+
+ // The resetting process will be continued in OnDecoderReinitialized().
+ ReinitializeDecoder();
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderStream<StreamType>::StopDecoder() {
+ FUNCTION_DVLOG(2);
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(state_ != STATE_UNINITIALIZED && state_ != STATE_STOPPED) << state_;
+ DCHECK(!stop_cb_.is_null());
+
+ state_ = STATE_STOPPED;
+ decoder_->Stop();
+ stream_ = NULL;
+ decoder_.reset();
+ decrypting_demuxer_stream_.reset();
+ // Post |stop_cb_| because pending |read_cb_| and/or |reset_cb_| are also
+ // posted in Stop().
+ task_runner_->PostTask(FROM_HERE, base::ResetAndReturn(&stop_cb_));
+}
+
+template class DecoderStream<DemuxerStream::VIDEO>;
+template class DecoderStream<DemuxerStream::AUDIO>;
+
+} // namespace media
diff --git a/chromium/media/filters/decoder_stream.h b/chromium/media/filters/decoder_stream.h
new file mode 100644
index 00000000000..7cb78738dac
--- /dev/null
+++ b/chromium/media/filters/decoder_stream.h
@@ -0,0 +1,223 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_DECODER_STREAM_H_
+#define MEDIA_FILTERS_DECODER_STREAM_H_
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_vector.h"
+#include "base/memory/weak_ptr.h"
+#include "media/base/audio_decoder.h"
+#include "media/base/decryptor.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/media_export.h"
+#include "media/base/pipeline_status.h"
+#include "media/filters/decoder_selector.h"
+#include "media/filters/decoder_stream_traits.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+}
+
+namespace media {
+
+class DecryptingDemuxerStream;
+
+// Wraps a DemuxerStream and a list of Decoders and provides decoded
+// output to its client (e.g. Audio/VideoRendererImpl).
+template<DemuxerStream::Type StreamType>
+class MEDIA_EXPORT DecoderStream {
+ public:
+ typedef DecoderStreamTraits<StreamType> StreamTraits;
+ typedef typename StreamTraits::DecoderType Decoder;
+ typedef typename StreamTraits::OutputType Output;
+ typedef typename StreamTraits::StreamInitCB InitCB;
+ typedef typename Decoder::Status DecoderStatus;
+
+ enum Status {
+ OK, // Everything went as planned.
+ ABORTED, // Read aborted due to Reset() during pending read.
+ DEMUXER_READ_ABORTED, // Demuxer returned aborted read.
+ DECODE_ERROR, // Decoder returned decode error.
+ DECRYPT_ERROR // Decoder returned decrypt error.
+ };
+
+ // Indicates completion of a DecoderStream read.
+ typedef base::Callback<void(Status, const scoped_refptr<Output>&)> ReadCB;
+
+ DecoderStream(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ ScopedVector<Decoder> decoders,
+ const SetDecryptorReadyCB& set_decryptor_ready_cb);
+ virtual ~DecoderStream();
+
+ // Initializes the DecoderStream and returns the initialization result
+ // through |init_cb|. Note that |init_cb| is always called asynchronously.
+ void Initialize(DemuxerStream* stream,
+ bool low_delay,
+ const StatisticsCB& statistics_cb,
+ const InitCB& init_cb);
+
+ // Reads a decoded Output and returns it via the |read_cb|. Note that
+ // |read_cb| is always called asynchronously. This method should only be
+ // called after initialization has succeeded and must not be called during
+ // any pending Reset() and/or Stop().
+ void Read(const ReadCB& read_cb);
+
+ // Resets the decoder, flushes all decoded outputs and/or internal buffers,
+ // fires any existing pending read callback and calls |closure| on completion.
+ // Note that |closure| is always called asynchronously. This method should
+ // only be called after initialization has succeeded and must not be called
+ // during any pending Reset() and/or Stop().
+ void Reset(const base::Closure& closure);
+
+ // Stops the decoder, fires any existing pending read callback or reset
+ // callback and calls |closure| on completion. Note that |closure| is always
+ // called asynchronously. The DecoderStream cannot be used anymore after
+ // it is stopped. This method can be called at any time but not during another
+ // pending Stop().
+ void Stop(const base::Closure& closure);
+
+ // Returns true if the decoder currently has the ability to decode and return
+ // an Output.
+ // TODO(rileya): Remove the need for this by refactoring Decoder queueing
+ // behavior.
+ bool CanReadWithoutStalling() const;
+
+ // Returns maximum concurrent decode requests for the current |decoder_|.
+ int GetMaxDecodeRequests() const;
+
+ // Returns true if one more decode request can be submitted to the decoder.
+ bool CanDecodeMore() const;
+
+ // Allows callers to register for notification of splice buffers from the
+ // demuxer. I.e., DecoderBuffer::splice_timestamp() is not kNoTimestamp().
+ //
+ // The observer will be notified of all buffers with a splice_timestamp() and
+ // the first buffer after which has a splice_timestamp() of kNoTimestamp().
+ typedef base::Callback<void(base::TimeDelta)> SpliceObserverCB;
+ void set_splice_observer(const SpliceObserverCB& splice_observer) {
+ splice_observer_cb_ = splice_observer;
+ }
+
+ // Allows callers to register for notification of config changes; this is
+ // called immediately after recieving the 'kConfigChanged' status from the
+ // DemuxerStream, before any action is taken to handle the config change.
+ typedef base::Closure ConfigChangeObserverCB;
+ void set_config_change_observer(
+ const ConfigChangeObserverCB& config_change_observer) {
+ config_change_observer_cb_ = config_change_observer;
+ }
+
+ private:
+ enum State {
+ STATE_UNINITIALIZED,
+ STATE_INITIALIZING,
+ STATE_NORMAL, // Includes idle, pending decoder decode/reset/stop.
+ STATE_FLUSHING_DECODER,
+ STATE_PENDING_DEMUXER_READ,
+ STATE_REINITIALIZING_DECODER,
+ STATE_END_OF_STREAM, // End of stream reached; returns EOS on all reads.
+ STATE_STOPPED,
+ STATE_ERROR
+ };
+
+ // Called when |decoder_selector| selected the |selected_decoder|.
+ // |decrypting_demuxer_stream| was also populated if a DecryptingDemuxerStream
+ // is created to help decrypt the encrypted stream.
+ void OnDecoderSelected(
+ scoped_ptr<Decoder> selected_decoder,
+ scoped_ptr<DecryptingDemuxerStream> decrypting_demuxer_stream);
+
+ // Satisfy pending |read_cb_| with |status| and |output|.
+ void SatisfyRead(Status status,
+ const scoped_refptr<Output>& output);
+
+ // Decodes |buffer| and returns the result via OnDecodeOutputReady().
+ void Decode(const scoped_refptr<DecoderBuffer>& buffer);
+
+ // Flushes the decoder with an EOS buffer to retrieve internally buffered
+ // decoder output.
+ void FlushDecoder();
+
+ // Callback for Decoder::Decode().
+ void OnDecodeDone(int buffer_size, bool end_of_stream, DecoderStatus status);
+
+ // Output callback passed to Decoder::Initialize().
+ void OnDecodeOutputReady(const scoped_refptr<Output>& output);
+
+ // Reads a buffer from |stream_| and returns the result via OnBufferReady().
+ void ReadFromDemuxerStream();
+
+ // Callback for DemuxerStream::Read().
+ void OnBufferReady(DemuxerStream::Status status,
+ const scoped_refptr<DecoderBuffer>& buffer);
+
+ void ReinitializeDecoder();
+
+ // Callback for Decoder reinitialization.
+ void OnDecoderReinitialized(PipelineStatus status);
+
+ void ResetDecoder();
+ void OnDecoderReset();
+
+ void StopDecoder();
+
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+
+ State state_;
+
+ StatisticsCB statistics_cb_;
+ InitCB init_cb_;
+
+ ReadCB read_cb_;
+ base::Closure reset_cb_;
+ base::Closure stop_cb_;
+
+ DemuxerStream* stream_;
+ bool low_delay_;
+
+ scoped_ptr<DecoderSelector<StreamType> > decoder_selector_;
+
+ // These two will be set by DecoderSelector::SelectDecoder().
+ scoped_ptr<Decoder> decoder_;
+ scoped_ptr<DecryptingDemuxerStream> decrypting_demuxer_stream_;
+
+ SpliceObserverCB splice_observer_cb_;
+ ConfigChangeObserverCB config_change_observer_cb_;
+
+ // If a splice_timestamp() has been seen, this is true until a
+ // splice_timestamp() of kNoTimestamp() is encountered.
+ bool active_splice_;
+
+ // Decoded buffers that haven't been read yet. Used when the decoder supports
+ // parallel decoding.
+ std::list<scoped_refptr<Output> > ready_outputs_;
+
+ // Number of outstanding decode requests sent to the |decoder_|.
+ int pending_decode_requests_;
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<DecoderStream<StreamType> > weak_factory_;
+
+ // This is required so the VideoFrameStream can access private members in
+ // FinishInitialization() and ReportStatistics().
+ DISALLOW_IMPLICIT_CONSTRUCTORS(DecoderStream);
+};
+
+template <>
+bool DecoderStream<DemuxerStream::AUDIO>::CanReadWithoutStalling() const;
+
+template <>
+int DecoderStream<DemuxerStream::AUDIO>::GetMaxDecodeRequests() const;
+
+typedef DecoderStream<DemuxerStream::VIDEO> VideoFrameStream;
+typedef DecoderStream<DemuxerStream::AUDIO> AudioBufferStream;
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_DECODER_STREAM_H_
diff --git a/chromium/media/filters/decoder_stream_traits.cc b/chromium/media/filters/decoder_stream_traits.cc
new file mode 100644
index 00000000000..c86862f7fa4
--- /dev/null
+++ b/chromium/media/filters/decoder_stream_traits.cc
@@ -0,0 +1,109 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/decoder_stream_traits.h"
+
+#include "base/logging.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/audio_decoder.h"
+#include "media/base/audio_decoder_config.h"
+#include "media/base/video_decoder.h"
+#include "media/base/video_decoder_config.h"
+#include "media/base/video_frame.h"
+
+namespace media {
+
+std::string DecoderStreamTraits<DemuxerStream::AUDIO>::ToString() {
+ return "Audio";
+}
+
+void DecoderStreamTraits<DemuxerStream::AUDIO>::Initialize(
+ DecoderType* decoder,
+ const DecoderConfigType& config,
+ bool low_delay,
+ const PipelineStatusCB& status_cb,
+ const OutputCB& output_cb) {
+ decoder->Initialize(config, status_cb, output_cb);
+}
+
+bool DecoderStreamTraits<DemuxerStream::AUDIO>::FinishInitialization(
+ const StreamInitCB& init_cb,
+ DecoderType* decoder,
+ DemuxerStream* stream) {
+ DCHECK(stream);
+ if (!decoder) {
+ init_cb.Run(false);
+ return false;
+ }
+ init_cb.Run(true);
+ return true;
+}
+
+void DecoderStreamTraits<DemuxerStream::AUDIO>::ReportStatistics(
+ const StatisticsCB& statistics_cb,
+ int bytes_decoded) {
+ PipelineStatistics statistics;
+ statistics.audio_bytes_decoded = bytes_decoded;
+ statistics_cb.Run(statistics);
+}
+
+DecoderStreamTraits<DemuxerStream::AUDIO>::DecoderConfigType
+ DecoderStreamTraits<DemuxerStream::AUDIO>::GetDecoderConfig(
+ DemuxerStream& stream) {
+ return stream.audio_decoder_config();
+}
+
+scoped_refptr<DecoderStreamTraits<DemuxerStream::AUDIO>::OutputType>
+ DecoderStreamTraits<DemuxerStream::AUDIO>::CreateEOSOutput() {
+ return OutputType::CreateEOSBuffer();
+}
+
+std::string DecoderStreamTraits<DemuxerStream::VIDEO>::ToString() {
+ return "Video";
+}
+
+void DecoderStreamTraits<DemuxerStream::VIDEO>::Initialize(
+ DecoderType* decoder,
+ const DecoderConfigType& config,
+ bool low_delay,
+ const PipelineStatusCB& status_cb,
+ const OutputCB& output_cb) {
+ decoder->Initialize(config, low_delay, status_cb, output_cb);
+}
+
+bool DecoderStreamTraits<DemuxerStream::VIDEO>::FinishInitialization(
+ const StreamInitCB& init_cb,
+ DecoderType* decoder,
+ DemuxerStream* stream) {
+ DCHECK(stream);
+ if (!decoder) {
+ init_cb.Run(false);
+ return false;
+ }
+ if (decoder->NeedsBitstreamConversion())
+ stream->EnableBitstreamConverter();
+ init_cb.Run(true);
+ return true;
+}
+
+void DecoderStreamTraits<DemuxerStream::VIDEO>::ReportStatistics(
+ const StatisticsCB& statistics_cb,
+ int bytes_decoded) {
+ PipelineStatistics statistics;
+ statistics.video_bytes_decoded = bytes_decoded;
+ statistics_cb.Run(statistics);
+}
+
+DecoderStreamTraits<DemuxerStream::VIDEO>::DecoderConfigType
+ DecoderStreamTraits<DemuxerStream::VIDEO>::GetDecoderConfig(
+ DemuxerStream& stream) {
+ return stream.video_decoder_config();
+}
+
+scoped_refptr<DecoderStreamTraits<DemuxerStream::VIDEO>::OutputType>
+ DecoderStreamTraits<DemuxerStream::VIDEO>::CreateEOSOutput() {
+ return OutputType::CreateEOSFrame();
+}
+
+} // namespace media
diff --git a/chromium/media/filters/decoder_stream_traits.h b/chromium/media/filters/decoder_stream_traits.h
new file mode 100644
index 00000000000..59e534d2e68
--- /dev/null
+++ b/chromium/media/filters/decoder_stream_traits.h
@@ -0,0 +1,74 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_DECODER_STREAM_TRAITS_H_
+#define MEDIA_FILTERS_DECODER_STREAM_TRAITS_H_
+
+#include "media/base/demuxer_stream.h"
+#include "media/base/pipeline_status.h"
+
+namespace media {
+
+class AudioBuffer;
+class AudioDecoder;
+class DecryptingAudioDecoder;
+class DecryptingVideoDecoder;
+class DemuxerStream;
+class VideoDecoder;
+class VideoFrame;
+
+template <DemuxerStream::Type StreamType>
+struct DecoderStreamTraits {};
+
+template <>
+struct DecoderStreamTraits<DemuxerStream::AUDIO> {
+ typedef AudioBuffer OutputType;
+ typedef AudioDecoder DecoderType;
+ typedef AudioDecoderConfig DecoderConfigType;
+ typedef DecryptingAudioDecoder DecryptingDecoderType;
+ typedef base::Callback<void(bool success)> StreamInitCB;
+ typedef base::Callback<void(const scoped_refptr<OutputType>&)> OutputCB;
+
+ static std::string ToString();
+ static void Initialize(DecoderType* decoder,
+ const DecoderConfigType& config,
+ bool low_delay,
+ const PipelineStatusCB& status_cb,
+ const OutputCB& output_cb);
+ static bool FinishInitialization(const StreamInitCB& init_cb,
+ DecoderType* decoder,
+ DemuxerStream* stream);
+ static void ReportStatistics(const StatisticsCB& statistics_cb,
+ int bytes_decoded);
+ static DecoderConfigType GetDecoderConfig(DemuxerStream& stream);
+ static scoped_refptr<OutputType> CreateEOSOutput();
+};
+
+template <>
+struct DecoderStreamTraits<DemuxerStream::VIDEO> {
+ typedef VideoFrame OutputType;
+ typedef VideoDecoder DecoderType;
+ typedef VideoDecoderConfig DecoderConfigType;
+ typedef DecryptingVideoDecoder DecryptingDecoderType;
+ typedef base::Callback<void(bool success)> StreamInitCB;
+ typedef base::Callback<void(const scoped_refptr<OutputType>&)> OutputCB;
+
+ static std::string ToString();
+ static void Initialize(DecoderType* decoder,
+ const DecoderConfigType& config,
+ bool low_delay,
+ const PipelineStatusCB& status_cb,
+ const OutputCB& output_cb);
+ static bool FinishInitialization(const StreamInitCB& init_cb,
+ DecoderType* decoder,
+ DemuxerStream* stream);
+ static void ReportStatistics(const StatisticsCB& statistics_cb,
+ int bytes_decoded);
+ static DecoderConfigType GetDecoderConfig(DemuxerStream& stream);
+ static scoped_refptr<OutputType> CreateEOSOutput();
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_DECODER_STREAM_TRAITS_H_
diff --git a/chromium/media/filters/decrypting_audio_decoder.cc b/chromium/media/filters/decrypting_audio_decoder.cc
index 2c144b4fc7e..136e171d738 100644
--- a/chromium/media/filters/decrypting_audio_decoder.cc
+++ b/chromium/media/filters/decrypting_audio_decoder.cc
@@ -10,11 +10,11 @@
#include "base/callback_helpers.h"
#include "base/location.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
#include "media/base/audio_buffer.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/audio_timestamp_helper.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/buffers.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decryptor.h"
@@ -35,33 +35,27 @@ static inline bool IsOutOfSync(const base::TimeDelta& timestamp_1,
}
DecryptingAudioDecoder::DecryptingAudioDecoder(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
const SetDecryptorReadyCB& set_decryptor_ready_cb)
- : message_loop_(message_loop),
- weak_factory_(this),
+ : task_runner_(task_runner),
state_(kUninitialized),
- demuxer_stream_(NULL),
set_decryptor_ready_cb_(set_decryptor_ready_cb),
decryptor_(NULL),
key_added_while_decode_pending_(false),
- bits_per_channel_(0),
- channel_layout_(CHANNEL_LAYOUT_NONE),
- samples_per_second_(0) {
-}
+ weak_factory_(this) {}
-void DecryptingAudioDecoder::Initialize(
- DemuxerStream* stream,
- const PipelineStatusCB& status_cb,
- const StatisticsCB& statistics_cb) {
+void DecryptingAudioDecoder::Initialize(const AudioDecoderConfig& config,
+ const PipelineStatusCB& status_cb,
+ const OutputCB& output_cb) {
DVLOG(2) << "Initialize()";
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK_EQ(state_, kUninitialized) << state_;
- DCHECK(stream);
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(decode_cb_.is_null());
+ DCHECK(reset_cb_.is_null());
weak_this_ = weak_factory_.GetWeakPtr();
init_cb_ = BindToCurrentLoop(status_cb);
+ output_cb_ = BindToCurrentLoop(output_cb);
- const AudioDecoderConfig& config = stream->audio_decoder_config();
if (!config.IsValidConfig()) {
DLOG(ERROR) << "Invalid audio stream config.";
base::ResetAndReturn(&init_cb_).Run(PIPELINE_ERROR_DECODE);
@@ -74,53 +68,60 @@ void DecryptingAudioDecoder::Initialize(
return;
}
- DCHECK(!demuxer_stream_);
- demuxer_stream_ = stream;
- statistics_cb_ = statistics_cb;
+ config_ = config;
+
+ if (state_ == kUninitialized) {
+ state_ = kDecryptorRequested;
+ set_decryptor_ready_cb_.Run(BindToCurrentLoop(
+ base::Bind(&DecryptingAudioDecoder::SetDecryptor, weak_this_)));
+ return;
+ }
- state_ = kDecryptorRequested;
- set_decryptor_ready_cb_.Run(BindToCurrentLoop(
- base::Bind(&DecryptingAudioDecoder::SetDecryptor, weak_this_)));
+ // Reinitialization (i.e. upon a config change)
+ decryptor_->DeinitializeDecoder(Decryptor::kAudio);
+ InitializeDecoder();
}
-void DecryptingAudioDecoder::Read(const ReadCB& read_cb) {
- DVLOG(3) << "Read()";
- DCHECK(message_loop_->BelongsToCurrentThread());
+void DecryptingAudioDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
+ const DecodeCB& decode_cb) {
+ DVLOG(3) << "Decode()";
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(state_ == kIdle || state_ == kDecodeFinished) << state_;
- DCHECK(!read_cb.is_null());
- CHECK(read_cb_.is_null()) << "Overlapping decodes are not supported.";
+ DCHECK(!decode_cb.is_null());
+ CHECK(decode_cb_.is_null()) << "Overlapping decodes are not supported.";
- read_cb_ = BindToCurrentLoop(read_cb);
+ decode_cb_ = BindToCurrentLoop(decode_cb);
// Return empty (end-of-stream) frames if decoding has finished.
if (state_ == kDecodeFinished) {
- base::ResetAndReturn(&read_cb_).Run(kOk, AudioBuffer::CreateEOSBuffer());
+ output_cb_.Run(AudioBuffer::CreateEOSBuffer());
+ base::ResetAndReturn(&decode_cb_).Run(kOk);
return;
}
- if (!queued_audio_frames_.empty()) {
- base::ResetAndReturn(&read_cb_).Run(kOk, queued_audio_frames_.front());
- queued_audio_frames_.pop_front();
- return;
+ // Initialize the |next_output_timestamp_| to be the timestamp of the first
+ // non-EOS buffer.
+ if (timestamp_helper_->base_timestamp() == kNoTimestamp() &&
+ !buffer->end_of_stream()) {
+ timestamp_helper_->SetBaseTimestamp(buffer->timestamp());
}
- state_ = kPendingDemuxerRead;
- ReadFromDemuxerStream();
+ pending_buffer_to_decode_ = buffer;
+ state_ = kPendingDecode;
+ DecodePendingBuffer();
}
void DecryptingAudioDecoder::Reset(const base::Closure& closure) {
DVLOG(2) << "Reset() - state: " << state_;
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(state_ == kIdle ||
- state_ == kPendingConfigChange ||
- state_ == kPendingDemuxerRead ||
state_ == kPendingDecode ||
state_ == kWaitingForKey ||
state_ == kDecodeFinished) << state_;
DCHECK(init_cb_.is_null()); // No Reset() during pending initialization.
DCHECK(reset_cb_.is_null());
- reset_cb_ = closure;
+ reset_cb_ = BindToCurrentLoop(closure);
decryptor_->ResetDecoder(Decryptor::kAudio);
@@ -128,44 +129,53 @@ void DecryptingAudioDecoder::Reset(const base::Closure& closure) {
// Defer the resetting process in this case. The |reset_cb_| will be fired
// after the read callback is fired - see DecryptAndDecodeBuffer() and
// DeliverFrame().
- if (state_ == kPendingConfigChange ||
- state_ == kPendingDemuxerRead ||
- state_ == kPendingDecode) {
- DCHECK(!read_cb_.is_null());
+ if (state_ == kPendingDecode) {
+ DCHECK(!decode_cb_.is_null());
return;
}
if (state_ == kWaitingForKey) {
- DCHECK(!read_cb_.is_null());
+ DCHECK(!decode_cb_.is_null());
pending_buffer_to_decode_ = NULL;
- base::ResetAndReturn(&read_cb_).Run(kAborted, NULL);
+ base::ResetAndReturn(&decode_cb_).Run(kAborted);
}
- DCHECK(read_cb_.is_null());
+ DCHECK(decode_cb_.is_null());
DoReset();
}
-int DecryptingAudioDecoder::bits_per_channel() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- return bits_per_channel_;
-}
+void DecryptingAudioDecoder::Stop() {
+ DVLOG(2) << "Stop() - state: " << state_;
+ DCHECK(task_runner_->BelongsToCurrentThread());
-ChannelLayout DecryptingAudioDecoder::channel_layout() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- return channel_layout_;
-}
+ // Invalidate all weak pointers so that pending callbacks won't be fired into
+ // this object.
+ weak_factory_.InvalidateWeakPtrs();
-int DecryptingAudioDecoder::samples_per_second() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- return samples_per_second_;
+ if (decryptor_) {
+ decryptor_->DeinitializeDecoder(Decryptor::kAudio);
+ decryptor_ = NULL;
+ }
+ if (!set_decryptor_ready_cb_.is_null())
+ base::ResetAndReturn(&set_decryptor_ready_cb_).Run(DecryptorReadyCB());
+ pending_buffer_to_decode_ = NULL;
+ if (!init_cb_.is_null())
+ base::ResetAndReturn(&init_cb_).Run(DECODER_ERROR_NOT_SUPPORTED);
+ if (!decode_cb_.is_null())
+ base::ResetAndReturn(&decode_cb_).Run(kAborted);
+ if (!reset_cb_.is_null())
+ base::ResetAndReturn(&reset_cb_).Run();
+
+ state_ = kStopped;
}
DecryptingAudioDecoder::~DecryptingAudioDecoder() {
+ DCHECK(state_ == kUninitialized || state_ == kStopped) << state_;
}
void DecryptingAudioDecoder::SetDecryptor(Decryptor* decryptor) {
DVLOG(2) << "SetDecryptor()";
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kDecryptorRequested) << state_;
DCHECK(!init_cb_.is_null());
DCHECK(!set_decryptor_ready_cb_.is_null());
@@ -175,156 +185,52 @@ void DecryptingAudioDecoder::SetDecryptor(Decryptor* decryptor) {
if (!decryptor) {
base::ResetAndReturn(&init_cb_).Run(DECODER_ERROR_NOT_SUPPORTED);
// TODO(xhwang): Add kError state. See http://crbug.com/251503
- state_ = kDecodeFinished;
+ state_ = kStopped;
return;
}
decryptor_ = decryptor;
- const AudioDecoderConfig& input_config =
- demuxer_stream_->audio_decoder_config();
- AudioDecoderConfig config;
- config.Initialize(input_config.codec(),
- kSampleFormatS16,
- input_config.channel_layout(),
- input_config.samples_per_second(),
- input_config.extra_data(),
- input_config.extra_data_size(),
- input_config.is_encrypted(),
- false,
- base::TimeDelta(),
- base::TimeDelta());
+ InitializeDecoder();
+}
+void DecryptingAudioDecoder::InitializeDecoder() {
state_ = kPendingDecoderInit;
decryptor_->InitializeAudioDecoder(
- config,
+ config_,
BindToCurrentLoop(base::Bind(
&DecryptingAudioDecoder::FinishInitialization, weak_this_)));
}
void DecryptingAudioDecoder::FinishInitialization(bool success) {
DVLOG(2) << "FinishInitialization()";
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK_EQ(state_, kPendingDecoderInit) << state_;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(state_ == kPendingDecoderInit) << state_;
DCHECK(!init_cb_.is_null());
DCHECK(reset_cb_.is_null()); // No Reset() before initialization finished.
- DCHECK(read_cb_.is_null()); // No Read() before initialization finished.
+ DCHECK(decode_cb_.is_null()); // No Decode() before initialization finished.
if (!success) {
base::ResetAndReturn(&init_cb_).Run(DECODER_ERROR_NOT_SUPPORTED);
- state_ = kDecodeFinished;
+ state_ = kStopped;
return;
}
// Success!
- UpdateDecoderConfig();
+ timestamp_helper_.reset(
+ new AudioTimestampHelper(config_.samples_per_second()));
decryptor_->RegisterNewKeyCB(
- Decryptor::kAudio, BindToCurrentLoop(base::Bind(
- &DecryptingAudioDecoder::OnKeyAdded, weak_this_)));
+ Decryptor::kAudio,
+ BindToCurrentLoop(
+ base::Bind(&DecryptingAudioDecoder::OnKeyAdded, weak_this_)));
state_ = kIdle;
base::ResetAndReturn(&init_cb_).Run(PIPELINE_OK);
}
-void DecryptingAudioDecoder::FinishConfigChange(bool success) {
- DVLOG(2) << "FinishConfigChange()";
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK_EQ(state_, kPendingConfigChange) << state_;
- DCHECK(!read_cb_.is_null());
-
- if (!success) {
- base::ResetAndReturn(&read_cb_).Run(kDecodeError, NULL);
- state_ = kDecodeFinished;
- if (!reset_cb_.is_null())
- base::ResetAndReturn(&reset_cb_).Run();
- return;
- }
-
- // Config change succeeded.
- UpdateDecoderConfig();
-
- if (!reset_cb_.is_null()) {
- base::ResetAndReturn(&read_cb_).Run(kAborted, NULL);
- DoReset();
- return;
- }
-
- state_ = kPendingDemuxerRead;
- ReadFromDemuxerStream();
-}
-
-void DecryptingAudioDecoder::ReadFromDemuxerStream() {
- DCHECK_EQ(state_, kPendingDemuxerRead) << state_;
- DCHECK(!read_cb_.is_null());
-
- demuxer_stream_->Read(
- base::Bind(&DecryptingAudioDecoder::DecryptAndDecodeBuffer, weak_this_));
-}
-
-void DecryptingAudioDecoder::DecryptAndDecodeBuffer(
- DemuxerStream::Status status,
- const scoped_refptr<DecoderBuffer>& buffer) {
- DVLOG(3) << "DecryptAndDecodeBuffer()";
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK_EQ(state_, kPendingDemuxerRead) << state_;
- DCHECK(!read_cb_.is_null());
- DCHECK_EQ(buffer.get() != NULL, status == DemuxerStream::kOk) << status;
-
- if (status == DemuxerStream::kConfigChanged) {
- DVLOG(2) << "DecryptAndDecodeBuffer() - kConfigChanged";
-
- const AudioDecoderConfig& input_config =
- demuxer_stream_->audio_decoder_config();
- AudioDecoderConfig config;
- config.Initialize(input_config.codec(),
- kSampleFormatS16,
- input_config.channel_layout(),
- input_config.samples_per_second(),
- input_config.extra_data(),
- input_config.extra_data_size(),
- input_config.is_encrypted(),
- false,
- base::TimeDelta(),
- base::TimeDelta());
-
- state_ = kPendingConfigChange;
- decryptor_->DeinitializeDecoder(Decryptor::kAudio);
- decryptor_->InitializeAudioDecoder(
- config, BindToCurrentLoop(base::Bind(
- &DecryptingAudioDecoder::FinishConfigChange, weak_this_)));
- return;
- }
-
- if (!reset_cb_.is_null()) {
- base::ResetAndReturn(&read_cb_).Run(kAborted, NULL);
- DoReset();
- return;
- }
-
- if (status == DemuxerStream::kAborted) {
- DVLOG(2) << "DecryptAndDecodeBuffer() - kAborted";
- state_ = kIdle;
- base::ResetAndReturn(&read_cb_).Run(kAborted, NULL);
- return;
- }
-
- DCHECK_EQ(status, DemuxerStream::kOk);
-
- // Initialize the |next_output_timestamp_| to be the timestamp of the first
- // non-EOS buffer.
- if (timestamp_helper_->base_timestamp() == kNoTimestamp() &&
- !buffer->end_of_stream()) {
- timestamp_helper_->SetBaseTimestamp(buffer->timestamp());
- }
-
- pending_buffer_to_decode_ = buffer;
- state_ = kPendingDecode;
- DecodePendingBuffer();
-}
-
void DecryptingAudioDecoder::DecodePendingBuffer() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kPendingDecode) << state_;
int buffer_size = 0;
@@ -343,11 +249,10 @@ void DecryptingAudioDecoder::DeliverFrame(
Decryptor::Status status,
const Decryptor::AudioBuffers& frames) {
DVLOG(3) << "DeliverFrame() - status: " << status;
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kPendingDecode) << state_;
- DCHECK(!read_cb_.is_null());
+ DCHECK(!decode_cb_.is_null());
DCHECK(pending_buffer_to_decode_.get());
- DCHECK(queued_audio_frames_.empty());
bool need_to_try_again_if_nokey_is_returned = key_added_while_decode_pending_;
key_added_while_decode_pending_ = false;
@@ -357,7 +262,7 @@ void DecryptingAudioDecoder::DeliverFrame(
pending_buffer_to_decode_ = NULL;
if (!reset_cb_.is_null()) {
- base::ResetAndReturn(&read_cb_).Run(kAborted, NULL);
+ base::ResetAndReturn(&decode_cb_).Run(kAborted);
DoReset();
return;
}
@@ -366,8 +271,8 @@ void DecryptingAudioDecoder::DeliverFrame(
if (status == Decryptor::kError) {
DVLOG(2) << "DeliverFrame() - kError";
- state_ = kDecodeFinished;
- base::ResetAndReturn(&read_cb_).Run(kDecodeError, NULL);
+ state_ = kDecodeFinished; // TODO add kError state
+ base::ResetAndReturn(&decode_cb_).Run(kDecodeError);
return;
}
@@ -387,37 +292,32 @@ void DecryptingAudioDecoder::DeliverFrame(
return;
}
- // The buffer has been accepted by the decoder, let's report statistics.
- if (buffer_size) {
- PipelineStatistics statistics;
- statistics.audio_bytes_decoded = buffer_size;
- statistics_cb_.Run(statistics);
- }
-
if (status == Decryptor::kNeedMoreData) {
DVLOG(2) << "DeliverFrame() - kNeedMoreData";
- if (scoped_pending_buffer_to_decode->end_of_stream()) {
- state_ = kDecodeFinished;
- base::ResetAndReturn(&read_cb_).Run(kOk, AudioBuffer::CreateEOSBuffer());
- return;
- }
-
- state_ = kPendingDemuxerRead;
- ReadFromDemuxerStream();
+ state_ = scoped_pending_buffer_to_decode->end_of_stream() ? kDecodeFinished
+ : kIdle;
+ base::ResetAndReturn(&decode_cb_).Run(kOk);
return;
}
DCHECK_EQ(status, Decryptor::kSuccess);
DCHECK(!frames.empty());
- EnqueueFrames(frames);
+ ProcessDecodedFrames(frames);
+
+ if (scoped_pending_buffer_to_decode->end_of_stream()) {
+ // Set |pending_buffer_to_decode_| back as we need to keep flushing the
+ // decryptor until kNeedMoreData is returned.
+ pending_buffer_to_decode_ = scoped_pending_buffer_to_decode;
+ DecodePendingBuffer();
+ return;
+ }
state_ = kIdle;
- base::ResetAndReturn(&read_cb_).Run(kOk, queued_audio_frames_.front());
- queued_audio_frames_.pop_front();
+ base::ResetAndReturn(&decode_cb_).Run(kOk);
}
void DecryptingAudioDecoder::OnKeyAdded() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
if (state_ == kPendingDecode) {
key_added_while_decode_pending_ = true;
@@ -432,28 +332,18 @@ void DecryptingAudioDecoder::OnKeyAdded() {
void DecryptingAudioDecoder::DoReset() {
DCHECK(init_cb_.is_null());
- DCHECK(read_cb_.is_null());
+ DCHECK(decode_cb_.is_null());
timestamp_helper_->SetBaseTimestamp(kNoTimestamp());
state_ = kIdle;
base::ResetAndReturn(&reset_cb_).Run();
}
-void DecryptingAudioDecoder::UpdateDecoderConfig() {
- const AudioDecoderConfig& config = demuxer_stream_->audio_decoder_config();
- bits_per_channel_ = kSupportedBitsPerChannel;
- channel_layout_ = config.channel_layout();
- samples_per_second_ = config.samples_per_second();
- timestamp_helper_.reset(new AudioTimestampHelper(samples_per_second_));
-}
-
-void DecryptingAudioDecoder::EnqueueFrames(
+void DecryptingAudioDecoder::ProcessDecodedFrames(
const Decryptor::AudioBuffers& frames) {
- queued_audio_frames_ = frames;
-
- for (Decryptor::AudioBuffers::iterator iter = queued_audio_frames_.begin();
- iter != queued_audio_frames_.end();
+ for (Decryptor::AudioBuffers::const_iterator iter = frames.begin();
+ iter != frames.end();
++iter) {
- scoped_refptr<AudioBuffer>& frame = *iter;
+ scoped_refptr<AudioBuffer> frame = *iter;
DCHECK(!frame->end_of_stream()) << "EOS frame returned.";
DCHECK_GT(frame->frame_count(), 0) << "Empty frame returned.";
@@ -467,9 +357,9 @@ void DecryptingAudioDecoder::EnqueueFrames(
}
frame->set_timestamp(current_time);
- frame->set_duration(
- timestamp_helper_->GetFrameDuration(frame->frame_count()));
timestamp_helper_->AddFrames(frame->frame_count());
+
+ output_cb_.Run(frame);
}
}
diff --git a/chromium/media/filters/decrypting_audio_decoder.h b/chromium/media/filters/decrypting_audio_decoder.h
index defe0137d4b..6d1df7c3c93 100644
--- a/chromium/media/filters/decrypting_audio_decoder.h
+++ b/chromium/media/filters/decrypting_audio_decoder.h
@@ -15,7 +15,7 @@
#include "media/base/demuxer_stream.h"
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
@@ -26,7 +26,7 @@ class Decryptor;
// Decryptor-based AudioDecoder implementation that can decrypt and decode
// encrypted audio buffers and return decrypted and decompressed audio frames.
-// All public APIs and callbacks are trampolined to the |message_loop_| so
+// All public APIs and callbacks are trampolined to the |task_runner_| so
// that no locks are required for thread safety.
class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
public:
@@ -38,19 +38,18 @@ class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
static const int kSupportedBitsPerChannel;
DecryptingAudioDecoder(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
const SetDecryptorReadyCB& set_decryptor_ready_cb);
virtual ~DecryptingAudioDecoder();
// AudioDecoder implementation.
- virtual void Initialize(DemuxerStream* stream,
+ virtual void Initialize(const AudioDecoderConfig& config,
const PipelineStatusCB& status_cb,
- const StatisticsCB& statistics_cb) OVERRIDE;
- virtual void Read(const ReadCB& read_cb) OVERRIDE;
+ const OutputCB& output_cb) OVERRIDE;
+ virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
+ const DecodeCB& decode_cb) OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
- virtual int bits_per_channel() OVERRIDE;
- virtual ChannelLayout channel_layout() OVERRIDE;
- virtual int samples_per_second() OVERRIDE;
+ virtual void Stop() OVERRIDE;
private:
// For a detailed state diagram please see this link: http://goo.gl/8jAok
@@ -62,27 +61,21 @@ class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
kDecryptorRequested,
kPendingDecoderInit,
kIdle,
- kPendingConfigChange,
- kPendingDemuxerRead,
kPendingDecode,
kWaitingForKey,
kDecodeFinished,
+ kStopped,
};
// Callback for DecryptorHost::RequestDecryptor().
void SetDecryptor(Decryptor* decryptor);
+ // Initializes the audio decoder on the |decryptor_| with |config_|.
+ void InitializeDecoder();
+
// Callback for Decryptor::InitializeAudioDecoder() during initialization.
void FinishInitialization(bool success);
- // Callback for Decryptor::InitializeAudioDecoder() during config change.
- void FinishConfigChange(bool success);
-
- // Reads from the demuxer stream with corresponding callback method.
- void ReadFromDemuxerStream();
- void DecryptAndDecodeBuffer(DemuxerStream::Status status,
- const scoped_refptr<DecoderBuffer>& buffer);
-
void DecodePendingBuffer();
// Callback for Decryptor::DecryptAndDecodeAudio().
@@ -97,34 +90,28 @@ class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
// Resets decoder and calls |reset_cb_|.
void DoReset();
- // Updates audio configs from |demuxer_stream_| and resets
- // |output_timestamp_base_| and |total_samples_decoded_|.
- void UpdateDecoderConfig();
+ // Sets timestamps for |frames| and then passes them to |output_cb_|.
+ void ProcessDecodedFrames(const Decryptor::AudioBuffers& frames);
- // Sets timestamp and duration for |queued_audio_frames_| to make sure the
- // renderer always receives continuous frames without gaps and overlaps.
- void EnqueueFrames(const Decryptor::AudioBuffers& frames);
-
- scoped_refptr<base::MessageLoopProxy> message_loop_;
- base::WeakPtrFactory<DecryptingAudioDecoder> weak_factory_;
- base::WeakPtr<DecryptingAudioDecoder> weak_this_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
State state_;
PipelineStatusCB init_cb_;
- StatisticsCB statistics_cb_;
- ReadCB read_cb_;
+ OutputCB output_cb_;
+ DecodeCB decode_cb_;
base::Closure reset_cb_;
+ base::Closure stop_cb_;
- // Pointer to the demuxer stream that will feed us compressed buffers.
- DemuxerStream* demuxer_stream_;
+ // The current decoder configuration.
+ AudioDecoderConfig config_;
// Callback to request/cancel decryptor creation notification.
SetDecryptorReadyCB set_decryptor_ready_cb_;
Decryptor* decryptor_;
- // The buffer returned by the demuxer that needs decrypting/decoding.
+ // The buffer that needs decrypting/decoding.
scoped_refptr<media::DecoderBuffer> pending_buffer_to_decode_;
// Indicates the situation where new key is added during pending decode
@@ -134,15 +121,12 @@ class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
// decryption key.
bool key_added_while_decode_pending_;
- Decryptor::AudioBuffers queued_audio_frames_;
-
- // Decoded audio format.
- int bits_per_channel_;
- ChannelLayout channel_layout_;
- int samples_per_second_;
-
scoped_ptr<AudioTimestampHelper> timestamp_helper_;
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<DecryptingAudioDecoder> weak_factory_;
+ base::WeakPtr<DecryptingAudioDecoder> weak_this_;
+
DISALLOW_COPY_AND_ASSIGN(DecryptingAudioDecoder);
};
diff --git a/chromium/media/filters/decrypting_audio_decoder_unittest.cc b/chromium/media/filters/decrypting_audio_decoder_unittest.cc
index 2f07e231c03..8f187e1ae75 100644
--- a/chromium/media/filters/decrypting_audio_decoder_unittest.cc
+++ b/chromium/media/filters/decrypting_audio_decoder_unittest.cc
@@ -20,17 +20,19 @@
using ::testing::_;
using ::testing::AtMost;
-using ::testing::IsNull;
using ::testing::SaveArg;
using ::testing::StrictMock;
namespace media {
+const int kSampleRate = 44100;
+
// Make sure the kFakeAudioFrameSize is a valid frame size for all audio decoder
// configs used in this test.
-static const int kFakeAudioFrameSize = 48;
-static const uint8 kFakeKeyId[] = { 0x4b, 0x65, 0x79, 0x20, 0x49, 0x44 };
-static const uint8 kFakeIv[DecryptConfig::kDecryptionKeySize] = { 0 };
+const int kFakeAudioFrameSize = 48;
+const uint8 kFakeKeyId[] = { 0x4b, 0x65, 0x79, 0x20, 0x49, 0x44 };
+const uint8 kFakeIv[DecryptConfig::kDecryptionKeySize] = { 0 };
+const int kDecodingDelay = 3;
// Create a fake non-empty encrypted buffer.
static scoped_refptr<DecoderBuffer> CreateFakeEncryptedBuffer() {
@@ -40,7 +42,6 @@ static scoped_refptr<DecoderBuffer> CreateFakeEncryptedBuffer() {
std::string(reinterpret_cast<const char*>(kFakeKeyId),
arraysize(kFakeKeyId)),
std::string(reinterpret_cast<const char*>(kFakeIv), arraysize(kFakeIv)),
- 0,
std::vector<SubsampleEntry>())));
return buffer;
}
@@ -50,7 +51,7 @@ static scoped_refptr<DecoderBuffer> CreateFakeEncryptedBuffer() {
namespace {
ACTION_P(ReturnBuffer, buffer) {
- arg0.Run(buffer.get() ? DemuxerStream::kOk : DemuxerStream::kAborted, buffer);
+ return buffer;
}
ACTION_P(RunCallbackIfNotNull, param) {
@@ -58,14 +59,6 @@ ACTION_P(RunCallbackIfNotNull, param) {
arg0.Run(param);
}
-ACTION_P2(ResetAndRunCallback, callback, param) {
- base::ResetAndReturn(callback).Run(param);
-}
-
-MATCHER(IsEndOfStream, "end of stream") {
- return (arg->end_of_stream());
-}
-
} // namespace
class DecryptingAudioDecoderTest : public testing::Test {
@@ -77,11 +70,16 @@ class DecryptingAudioDecoderTest : public testing::Test {
&DecryptingAudioDecoderTest::RequestDecryptorNotification,
base::Unretained(this)))),
decryptor_(new StrictMock<MockDecryptor>()),
- demuxer_(new StrictMock<MockDemuxerStream>(DemuxerStream::AUDIO)),
+ num_decrypt_and_decode_calls_(0),
+ num_frames_in_decryptor_(0),
encrypted_buffer_(CreateFakeEncryptedBuffer()),
decoded_frame_(NULL),
- end_of_stream_frame_(AudioBuffer::CreateEOSBuffer()),
- decoded_frame_list_() {
+ decoded_frame_list_() {}
+
+ virtual ~DecryptingAudioDecoderTest() {
+ EXPECT_CALL(*this, RequestDecryptorNotification(_))
+ .Times(testing::AnyNumber());
+ Stop();
}
void InitializeAndExpectStatus(const AudioDecoderConfig& config,
@@ -90,16 +88,18 @@ class DecryptingAudioDecoderTest : public testing::Test {
// invalid values (that CreateEmptyBuffer() doesn't support), tweak them
// just for CreateEmptyBuffer().
int channels = ChannelLayoutToChannelCount(config.channel_layout());
- if (channels < 1)
- channels = 1;
- decoded_frame_ = AudioBuffer::CreateEmptyBuffer(
- channels, kFakeAudioFrameSize, kNoTimestamp(), kNoTimestamp());
+ if (channels < 0)
+ channels = 0;
+ decoded_frame_ = AudioBuffer::CreateEmptyBuffer(config.channel_layout(),
+ channels,
+ kSampleRate,
+ kFakeAudioFrameSize,
+ kNoTimestamp());
decoded_frame_list_.push_back(decoded_frame_);
- demuxer_->set_audio_decoder_config(config);
- decoder_->Initialize(demuxer_.get(), NewExpectedStatusCB(status),
- base::Bind(&MockStatisticsCB::OnStatistics,
- base::Unretained(&statistics_cb_)));
+ decoder_->Initialize(config, NewExpectedStatusCB(status),
+ base::Bind(&DecryptingAudioDecoderTest::FrameReady,
+ base::Unretained(this)));
message_loop_.RunUntilIdle();
}
@@ -113,91 +113,98 @@ class DecryptingAudioDecoderTest : public testing::Test {
.WillOnce(SaveArg<1>(&key_added_cb_));
config_.Initialize(kCodecVorbis, kSampleFormatPlanarF32,
- CHANNEL_LAYOUT_STEREO, 44100, NULL, 0, true, true,
- base::TimeDelta(), base::TimeDelta());
+ CHANNEL_LAYOUT_STEREO, kSampleRate, NULL, 0, true, true,
+ base::TimeDelta(), 0);
InitializeAndExpectStatus(config_, PIPELINE_OK);
+ }
- EXPECT_EQ(DecryptingAudioDecoder::kSupportedBitsPerChannel,
- decoder_->bits_per_channel());
- EXPECT_EQ(config_.channel_layout(), decoder_->channel_layout());
- EXPECT_EQ(config_.samples_per_second(), decoder_->samples_per_second());
+ void Reinitialize() {
+ ReinitializeConfigChange(config_);
}
- void ReadAndExpectFrameReadyWith(
- AudioDecoder::Status status,
- const scoped_refptr<AudioBuffer>& audio_frame) {
- if (status != AudioDecoder::kOk)
- EXPECT_CALL(*this, FrameReady(status, IsNull()));
- else if (audio_frame->end_of_stream())
- EXPECT_CALL(*this, FrameReady(status, IsEndOfStream()));
- else
- EXPECT_CALL(*this, FrameReady(status, audio_frame));
-
- decoder_->Read(base::Bind(&DecryptingAudioDecoderTest::FrameReady,
- base::Unretained(this)));
+ void ReinitializeConfigChange(const AudioDecoderConfig& new_config) {
+ EXPECT_CALL(*decryptor_, DeinitializeDecoder(Decryptor::kAudio));
+ EXPECT_CALL(*decryptor_, InitializeAudioDecoder(_, _))
+ .WillOnce(RunCallback<1>(true));
+ EXPECT_CALL(*decryptor_, RegisterNewKeyCB(Decryptor::kAudio, _))
+ .WillOnce(SaveArg<1>(&key_added_cb_));
+ decoder_->Initialize(new_config, NewExpectedStatusCB(PIPELINE_OK),
+ base::Bind(&DecryptingAudioDecoderTest::FrameReady,
+ base::Unretained(this)));
+ }
+
+ // Decode |buffer| and expect DecodeDone to get called with |status|.
+ void DecodeAndExpect(const scoped_refptr<DecoderBuffer>& buffer,
+ AudioDecoder::Status status) {
+ EXPECT_CALL(*this, DecodeDone(status));
+ decoder_->Decode(buffer,
+ base::Bind(&DecryptingAudioDecoderTest::DecodeDone,
+ base::Unretained(this)));
message_loop_.RunUntilIdle();
}
+ // Helper function to simulate the decrypting and decoding process in the
+ // |decryptor_| with a decoding delay of kDecodingDelay buffers.
+ void DecryptAndDecodeAudio(const scoped_refptr<DecoderBuffer>& encrypted,
+ const Decryptor::AudioDecodeCB& audio_decode_cb) {
+ num_decrypt_and_decode_calls_++;
+ if (!encrypted->end_of_stream())
+ num_frames_in_decryptor_++;
+
+ if (num_decrypt_and_decode_calls_ <= kDecodingDelay ||
+ num_frames_in_decryptor_ == 0) {
+ audio_decode_cb.Run(Decryptor::kNeedMoreData, Decryptor::AudioBuffers());
+ return;
+ }
+
+ num_frames_in_decryptor_--;
+ audio_decode_cb.Run(Decryptor::kSuccess,
+ Decryptor::AudioBuffers(1, decoded_frame_));
+ }
+
// Sets up expectations and actions to put DecryptingAudioDecoder in an
// active normal decoding state.
void EnterNormalDecodingState() {
- Decryptor::AudioBuffers end_of_stream_frames_(1, end_of_stream_frame_);
-
- EXPECT_CALL(*demuxer_, Read(_))
- .WillOnce(ReturnBuffer(encrypted_buffer_))
- .WillRepeatedly(ReturnBuffer(DecoderBuffer::CreateEOSBuffer()));
- EXPECT_CALL(*decryptor_, DecryptAndDecodeAudio(_, _))
- .WillOnce(RunCallback<1>(Decryptor::kSuccess, decoded_frame_list_))
- .WillRepeatedly(RunCallback<1>(Decryptor::kNeedMoreData,
- Decryptor::AudioBuffers()));
- EXPECT_CALL(statistics_cb_, OnStatistics(_));
-
- ReadAndExpectFrameReadyWith(AudioDecoder::kOk, decoded_frame_);
+ EXPECT_CALL(*decryptor_, DecryptAndDecodeAudio(_, _)).WillRepeatedly(
+ Invoke(this, &DecryptingAudioDecoderTest::DecryptAndDecodeAudio));
+ EXPECT_CALL(*this, FrameReady(decoded_frame_));
+ for (int i = 0; i < kDecodingDelay + 1; ++i)
+ DecodeAndExpect(encrypted_buffer_, AudioDecoder::kOk);
}
// Sets up expectations and actions to put DecryptingAudioDecoder in an end
// of stream state. This function must be called after
// EnterNormalDecodingState() to work.
void EnterEndOfStreamState() {
- ReadAndExpectFrameReadyWith(AudioDecoder::kOk, end_of_stream_frame_);
- }
-
- // Make the read callback pending by saving and not firing it.
- void EnterPendingReadState() {
- EXPECT_TRUE(pending_demuxer_read_cb_.is_null());
- EXPECT_CALL(*demuxer_, Read(_))
- .WillOnce(SaveArg<0>(&pending_demuxer_read_cb_));
- decoder_->Read(base::Bind(&DecryptingAudioDecoderTest::FrameReady,
- base::Unretained(this)));
- message_loop_.RunUntilIdle();
- // Make sure the Read() on the decoder triggers a Read() on the demuxer.
- EXPECT_FALSE(pending_demuxer_read_cb_.is_null());
+ // The codec in the |decryptor_| will be flushed.
+ EXPECT_CALL(*this, FrameReady(decoded_frame_))
+ .Times(kDecodingDelay);
+ DecodeAndExpect(DecoderBuffer::CreateEOSBuffer(), AudioDecoder::kOk);
+ EXPECT_EQ(0, num_frames_in_decryptor_);
}
// Make the audio decode callback pending by saving and not firing it.
void EnterPendingDecodeState() {
EXPECT_TRUE(pending_audio_decode_cb_.is_null());
- EXPECT_CALL(*demuxer_, Read(_))
- .WillRepeatedly(ReturnBuffer(encrypted_buffer_));
EXPECT_CALL(*decryptor_, DecryptAndDecodeAudio(encrypted_buffer_, _))
.WillOnce(SaveArg<1>(&pending_audio_decode_cb_));
- decoder_->Read(base::Bind(&DecryptingAudioDecoderTest::FrameReady,
- base::Unretained(this)));
+ decoder_->Decode(encrypted_buffer_,
+ base::Bind(&DecryptingAudioDecoderTest::DecodeDone,
+ base::Unretained(this)));
message_loop_.RunUntilIdle();
- // Make sure the Read() on the decoder triggers a DecryptAndDecode() on the
- // decryptor.
+ // Make sure the Decode() on the decoder triggers a DecryptAndDecode() on
+ // the decryptor.
EXPECT_FALSE(pending_audio_decode_cb_.is_null());
}
void EnterWaitingForKeyState() {
- EXPECT_CALL(*demuxer_, Read(_))
- .WillRepeatedly(ReturnBuffer(encrypted_buffer_));
EXPECT_CALL(*decryptor_, DecryptAndDecodeAudio(encrypted_buffer_, _))
.WillRepeatedly(RunCallback<1>(Decryptor::kNoKey,
Decryptor::AudioBuffers()));
- decoder_->Read(base::Bind(&DecryptingAudioDecoderTest::FrameReady,
- base::Unretained(this)));
+ decoder_->Decode(encrypted_buffer_,
+ base::Bind(&DecryptingAudioDecoderTest::DecodeDone,
+ base::Unretained(this)));
message_loop_.RunUntilIdle();
}
@@ -208,6 +215,16 @@ class DecryptingAudioDecoderTest : public testing::Test {
}
}
+ void AbortAllPendingCBs() {
+ if (!pending_init_cb_.is_null()) {
+ ASSERT_TRUE(pending_audio_decode_cb_.is_null());
+ base::ResetAndReturn(&pending_init_cb_).Run(false);
+ return;
+ }
+
+ AbortPendingAudioDecodeCB();
+ }
+
void Reset() {
EXPECT_CALL(*decryptor_, ResetDecoder(Decryptor::kAudio))
.WillRepeatedly(InvokeWithoutArgs(
@@ -217,27 +234,36 @@ class DecryptingAudioDecoderTest : public testing::Test {
message_loop_.RunUntilIdle();
}
+ void Stop() {
+ EXPECT_CALL(*decryptor_, DeinitializeDecoder(Decryptor::kAudio))
+ .WillRepeatedly(InvokeWithoutArgs(
+ this, &DecryptingAudioDecoderTest::AbortAllPendingCBs));
+
+ decoder_->Stop();
+ message_loop_.RunUntilIdle();
+ }
+
MOCK_METHOD1(RequestDecryptorNotification, void(const DecryptorReadyCB&));
- MOCK_METHOD2(FrameReady,
- void(AudioDecoder::Status, const scoped_refptr<AudioBuffer>&));
+ MOCK_METHOD1(FrameReady, void(const scoped_refptr<AudioBuffer>&));
+ MOCK_METHOD1(DecodeDone, void(AudioDecoder::Status));
base::MessageLoop message_loop_;
scoped_ptr<DecryptingAudioDecoder> decoder_;
scoped_ptr<StrictMock<MockDecryptor> > decryptor_;
- scoped_ptr<StrictMock<MockDemuxerStream> > demuxer_;
- MockStatisticsCB statistics_cb_;
AudioDecoderConfig config_;
- DemuxerStream::ReadCB pending_demuxer_read_cb_;
+ // Variables to help the |decryptor_| to simulate decoding delay and flushing.
+ int num_decrypt_and_decode_calls_;
+ int num_frames_in_decryptor_;
+
Decryptor::DecoderInitCB pending_init_cb_;
Decryptor::NewKeyCB key_added_cb_;
Decryptor::AudioDecodeCB pending_audio_decode_cb_;
- // Constant buffer/frames to be returned by the |demuxer_| and |decryptor_|.
+ // Constant buffer/frames, to be used/returned by |decoder_| and |decryptor_|.
scoped_refptr<DecoderBuffer> encrypted_buffer_;
scoped_refptr<AudioBuffer> decoded_frame_;
- scoped_refptr<AudioBuffer> end_of_stream_frame_;
Decryptor::AudioBuffers decoded_frame_list_;
private:
@@ -251,7 +277,7 @@ TEST_F(DecryptingAudioDecoderTest, Initialize_Normal) {
// Ensure that DecryptingAudioDecoder only accepts encrypted audio.
TEST_F(DecryptingAudioDecoderTest, Initialize_UnencryptedAudioConfig) {
AudioDecoderConfig config(kCodecVorbis, kSampleFormatPlanarF32,
- CHANNEL_LAYOUT_STEREO, 44100, NULL, 0, false);
+ CHANNEL_LAYOUT_STEREO, kSampleRate, NULL, 0, false);
InitializeAndExpectStatus(config, DECODER_ERROR_NOT_SUPPORTED);
}
@@ -272,7 +298,7 @@ TEST_F(DecryptingAudioDecoderTest, Initialize_UnsupportedAudioConfig) {
.WillOnce(RunCallbackIfNotNull(decryptor_.get()));
AudioDecoderConfig config(kCodecVorbis, kSampleFormatPlanarF32,
- CHANNEL_LAYOUT_STEREO, 44100, NULL, 0, true);
+ CHANNEL_LAYOUT_STEREO, kSampleRate, NULL, 0, true);
InitializeAndExpectStatus(config, DECODER_ERROR_NOT_SUPPORTED);
}
@@ -281,7 +307,7 @@ TEST_F(DecryptingAudioDecoderTest, Initialize_NullDecryptor) {
.WillRepeatedly(RunCallbackIfNotNull(static_cast<Decryptor*>(NULL)));
AudioDecoderConfig config(kCodecVorbis, kSampleFormatPlanarF32,
- CHANNEL_LAYOUT_STEREO, 44100, NULL, 0, true);
+ CHANNEL_LAYOUT_STEREO, kSampleRate, NULL, 0, true);
InitializeAndExpectStatus(config, DECODER_ERROR_NOT_SUPPORTED);
}
@@ -296,31 +322,11 @@ TEST_F(DecryptingAudioDecoderTest, DecryptAndDecode_Normal) {
TEST_F(DecryptingAudioDecoderTest, DecryptAndDecode_DecodeError) {
Initialize();
- EXPECT_CALL(*demuxer_, Read(_))
- .WillRepeatedly(ReturnBuffer(encrypted_buffer_));
EXPECT_CALL(*decryptor_, DecryptAndDecodeAudio(_, _))
.WillRepeatedly(RunCallback<1>(Decryptor::kError,
Decryptor::AudioBuffers()));
- ReadAndExpectFrameReadyWith(AudioDecoder::kDecodeError, NULL);
-}
-
-// Test the case where the decryptor returns kNeedMoreData to ask for more
-// buffers before it can produce a frame.
-TEST_F(DecryptingAudioDecoderTest, DecryptAndDecode_NeedMoreData) {
- Initialize();
-
- EXPECT_CALL(*demuxer_, Read(_))
- .Times(2)
- .WillRepeatedly(ReturnBuffer(encrypted_buffer_));
- EXPECT_CALL(*decryptor_, DecryptAndDecodeAudio(_, _))
- .WillOnce(RunCallback<1>(Decryptor::kNeedMoreData,
- Decryptor::AudioBuffers()))
- .WillRepeatedly(RunCallback<1>(Decryptor::kSuccess, decoded_frame_list_));
- EXPECT_CALL(statistics_cb_, OnStatistics(_))
- .Times(2);
-
- ReadAndExpectFrameReadyWith(AudioDecoder::kOk, decoded_frame_);
+ DecodeAndExpect(encrypted_buffer_, AudioDecoder::kDecodeError);
}
// Test the case where the decryptor returns multiple decoded frames.
@@ -328,27 +334,27 @@ TEST_F(DecryptingAudioDecoderTest, DecryptAndDecode_MultipleFrames) {
Initialize();
scoped_refptr<AudioBuffer> frame_a = AudioBuffer::CreateEmptyBuffer(
+ config_.channel_layout(),
ChannelLayoutToChannelCount(config_.channel_layout()),
+ kSampleRate,
kFakeAudioFrameSize,
- kNoTimestamp(),
kNoTimestamp());
scoped_refptr<AudioBuffer> frame_b = AudioBuffer::CreateEmptyBuffer(
+ config_.channel_layout(),
ChannelLayoutToChannelCount(config_.channel_layout()),
+ kSampleRate,
kFakeAudioFrameSize,
- kNoTimestamp(),
kNoTimestamp());
decoded_frame_list_.push_back(frame_a);
decoded_frame_list_.push_back(frame_b);
- EXPECT_CALL(*demuxer_, Read(_))
- .WillOnce(ReturnBuffer(encrypted_buffer_));
EXPECT_CALL(*decryptor_, DecryptAndDecodeAudio(_, _))
.WillOnce(RunCallback<1>(Decryptor::kSuccess, decoded_frame_list_));
- EXPECT_CALL(statistics_cb_, OnStatistics(_));
- ReadAndExpectFrameReadyWith(AudioDecoder::kOk, decoded_frame_);
- ReadAndExpectFrameReadyWith(AudioDecoder::kOk, frame_a);
- ReadAndExpectFrameReadyWith(AudioDecoder::kOk, frame_b);
+ EXPECT_CALL(*this, FrameReady(decoded_frame_));
+ EXPECT_CALL(*this, FrameReady(frame_a));
+ EXPECT_CALL(*this, FrameReady(frame_b));
+ DecodeAndExpect(encrypted_buffer_, AudioDecoder::kOk);
}
// Test the case where the decryptor receives end-of-stream buffer.
@@ -358,61 +364,24 @@ TEST_F(DecryptingAudioDecoderTest, DecryptAndDecode_EndOfStream) {
EnterEndOfStreamState();
}
-// Test aborted read on the demuxer stream.
-TEST_F(DecryptingAudioDecoderTest, DemuxerRead_Aborted) {
+// Test reinitializing decode with a new config
+TEST_F(DecryptingAudioDecoderTest, Reinitialize_ConfigChange) {
Initialize();
- // ReturnBuffer() with NULL triggers aborted demuxer read.
- EXPECT_CALL(*demuxer_, Read(_))
- .WillOnce(ReturnBuffer(scoped_refptr<DecoderBuffer>()));
-
- ReadAndExpectFrameReadyWith(AudioDecoder::kAborted, NULL);
-}
-
-// Test config change on the demuxer stream.
-TEST_F(DecryptingAudioDecoderTest, DemuxerRead_ConfigChange) {
- Initialize();
+ EXPECT_CALL(*decryptor_, InitializeAudioDecoder(_, _))
+ .Times(AtMost(1))
+ .WillOnce(RunCallback<1>(true));
// The new config is different from the initial config in bits-per-channel,
// channel layout and samples_per_second.
AudioDecoderConfig new_config(kCodecVorbis, kSampleFormatPlanarS16,
- CHANNEL_LAYOUT_5_1, 88200, NULL, 0, false);
+ CHANNEL_LAYOUT_5_1, 88200, NULL, 0, true);
EXPECT_NE(new_config.bits_per_channel(), config_.bits_per_channel());
EXPECT_NE(new_config.channel_layout(), config_.channel_layout());
EXPECT_NE(new_config.samples_per_second(), config_.samples_per_second());
- demuxer_->set_audio_decoder_config(new_config);
- EXPECT_CALL(*decryptor_, DeinitializeDecoder(Decryptor::kAudio));
- EXPECT_CALL(*decryptor_, InitializeAudioDecoder(_, _))
- .WillOnce(RunCallback<1>(true));
- EXPECT_CALL(*demuxer_, Read(_))
- .WillOnce(RunCallback<0>(DemuxerStream::kConfigChanged,
- scoped_refptr<DecoderBuffer>()))
- .WillRepeatedly(ReturnBuffer(encrypted_buffer_));
- EXPECT_CALL(*decryptor_, DecryptAndDecodeAudio(_, _))
- .WillRepeatedly(RunCallback<1>(Decryptor::kSuccess, decoded_frame_list_));
- EXPECT_CALL(statistics_cb_, OnStatistics(_));
-
- ReadAndExpectFrameReadyWith(AudioDecoder::kOk, decoded_frame_);
-
- EXPECT_EQ(new_config.bits_per_channel(), decoder_->bits_per_channel());
- EXPECT_EQ(new_config.channel_layout(), decoder_->channel_layout());
- EXPECT_EQ(new_config.samples_per_second(), decoder_->samples_per_second());
-}
-
-// Test config change failure.
-TEST_F(DecryptingAudioDecoderTest, DemuxerRead_ConfigChangeFailed) {
- Initialize();
-
- EXPECT_CALL(*decryptor_, DeinitializeDecoder(Decryptor::kAudio));
- EXPECT_CALL(*decryptor_, InitializeAudioDecoder(_, _))
- .WillOnce(RunCallback<1>(false));
- EXPECT_CALL(*demuxer_, Read(_))
- .WillOnce(RunCallback<0>(DemuxerStream::kConfigChanged,
- scoped_refptr<DecoderBuffer>()))
- .WillRepeatedly(ReturnBuffer(encrypted_buffer_));
-
- ReadAndExpectFrameReadyWith(AudioDecoder::kDecodeError, NULL);
+ ReinitializeConfigChange(new_config);
+ message_loop_.RunUntilIdle();
}
// Test the case where the a key is added when the decryptor is in
@@ -423,8 +392,8 @@ TEST_F(DecryptingAudioDecoderTest, KeyAdded_DuringWaitingForKey) {
EXPECT_CALL(*decryptor_, DecryptAndDecodeAudio(_, _))
.WillRepeatedly(RunCallback<1>(Decryptor::kSuccess, decoded_frame_list_));
- EXPECT_CALL(statistics_cb_, OnStatistics(_));
- EXPECT_CALL(*this, FrameReady(AudioDecoder::kOk, decoded_frame_));
+ EXPECT_CALL(*this, FrameReady(decoded_frame_));
+ EXPECT_CALL(*this, DecodeDone(AudioDecoder::kOk));
key_added_cb_.Run();
message_loop_.RunUntilIdle();
}
@@ -437,8 +406,8 @@ TEST_F(DecryptingAudioDecoderTest, KeyAdded_DruingPendingDecode) {
EXPECT_CALL(*decryptor_, DecryptAndDecodeAudio(_, _))
.WillRepeatedly(RunCallback<1>(Decryptor::kSuccess, decoded_frame_list_));
- EXPECT_CALL(statistics_cb_, OnStatistics(_));
- EXPECT_CALL(*this, FrameReady(AudioDecoder::kOk, decoded_frame_));
+ EXPECT_CALL(*this, FrameReady(decoded_frame_));
+ EXPECT_CALL(*this, DecodeDone(AudioDecoder::kOk));
// The audio decode callback is returned after the correct decryption key is
// added.
key_added_cb_.Run();
@@ -462,118 +431,12 @@ TEST_F(DecryptingAudioDecoderTest, Reset_DuringIdleAfterDecodedOneFrame) {
Reset();
}
-// Test resetting when the decoder is in kPendingDemuxerRead state and the read
-// callback is returned with kOk.
-TEST_F(DecryptingAudioDecoderTest, Reset_DuringDemuxerRead_Ok) {
- Initialize();
- EnterPendingReadState();
-
- EXPECT_CALL(*this, FrameReady(AudioDecoder::kAborted, IsNull()));
-
- Reset();
- base::ResetAndReturn(&pending_demuxer_read_cb_).Run(DemuxerStream::kOk,
- encrypted_buffer_);
- message_loop_.RunUntilIdle();
-}
-
-// Test resetting when the decoder is in kPendingDemuxerRead state and the read
-// callback is returned with kAborted.
-TEST_F(DecryptingAudioDecoderTest, Reset_DuringDemuxerRead_Aborted) {
- Initialize();
- EnterPendingReadState();
-
- // Make sure we get a NULL audio frame returned.
- EXPECT_CALL(*this, FrameReady(AudioDecoder::kAborted, IsNull()));
-
- Reset();
- base::ResetAndReturn(&pending_demuxer_read_cb_).Run(DemuxerStream::kAborted,
- NULL);
- message_loop_.RunUntilIdle();
-}
-
-// Test resetting when the decoder is in kPendingDemuxerRead state and the read
-// callback is returned with kConfigChanged.
-TEST_F(DecryptingAudioDecoderTest, Reset_DuringDemuxerRead_ConfigChange) {
- Initialize();
- EnterPendingReadState();
-
- Reset();
-
- // The new config is different from the initial config in bits-per-channel,
- // channel layout and samples_per_second.
- AudioDecoderConfig new_config(kCodecVorbis, kSampleFormatPlanarS16,
- CHANNEL_LAYOUT_5_1, 88200, NULL, 0, false);
- EXPECT_NE(new_config.bits_per_channel(), config_.bits_per_channel());
- EXPECT_NE(new_config.channel_layout(), config_.channel_layout());
- EXPECT_NE(new_config.samples_per_second(), config_.samples_per_second());
-
- // Even during pending reset, the decoder still needs to be initialized with
- // the new config.
- demuxer_->set_audio_decoder_config(new_config);
- EXPECT_CALL(*decryptor_, DeinitializeDecoder(Decryptor::kAudio));
- EXPECT_CALL(*decryptor_, InitializeAudioDecoder(_, _))
- .WillOnce(RunCallback<1>(true));
- EXPECT_CALL(*this, FrameReady(AudioDecoder::kAborted, IsNull()));
-
- base::ResetAndReturn(&pending_demuxer_read_cb_)
- .Run(DemuxerStream::kConfigChanged, NULL);
- message_loop_.RunUntilIdle();
-
- EXPECT_EQ(new_config.bits_per_channel(), decoder_->bits_per_channel());
- EXPECT_EQ(new_config.channel_layout(), decoder_->channel_layout());
- EXPECT_EQ(new_config.samples_per_second(), decoder_->samples_per_second());
-}
-
-// Test resetting when the decoder is in kPendingDemuxerRead state, the read
-// callback is returned with kConfigChanged and the config change fails.
-TEST_F(DecryptingAudioDecoderTest, Reset_DuringDemuxerRead_ConfigChangeFailed) {
- Initialize();
- EnterPendingReadState();
-
- Reset();
-
- // Even during pending reset, the decoder still needs to be initialized with
- // the new config.
- EXPECT_CALL(*decryptor_, DeinitializeDecoder(Decryptor::kAudio));
- EXPECT_CALL(*decryptor_, InitializeAudioDecoder(_, _))
- .WillOnce(RunCallback<1>(false));
- EXPECT_CALL(*this, FrameReady(AudioDecoder::kDecodeError, IsNull()));
-
- base::ResetAndReturn(&pending_demuxer_read_cb_)
- .Run(DemuxerStream::kConfigChanged, NULL);
- message_loop_.RunUntilIdle();
-}
-
-// Test resetting when the decoder is in kPendingConfigChange state.
-TEST_F(DecryptingAudioDecoderTest, Reset_DuringPendingConfigChange) {
- Initialize();
- EnterNormalDecodingState();
-
- EXPECT_CALL(*demuxer_, Read(_))
- .WillOnce(RunCallback<0>(DemuxerStream::kConfigChanged,
- scoped_refptr<DecoderBuffer>()));
- EXPECT_CALL(*decryptor_, DeinitializeDecoder(Decryptor::kAudio));
- EXPECT_CALL(*decryptor_, InitializeAudioDecoder(_, _))
- .WillOnce(SaveArg<1>(&pending_init_cb_));
-
- decoder_->Read(base::Bind(&DecryptingAudioDecoderTest::FrameReady,
- base::Unretained(this)));
- message_loop_.RunUntilIdle();
- EXPECT_FALSE(pending_init_cb_.is_null());
-
- EXPECT_CALL(*this, FrameReady(AudioDecoder::kAborted, IsNull()));
-
- Reset();
- base::ResetAndReturn(&pending_init_cb_).Run(true);
- message_loop_.RunUntilIdle();
-}
-
// Test resetting when the decoder is in kPendingDecode state.
TEST_F(DecryptingAudioDecoderTest, Reset_DuringPendingDecode) {
Initialize();
EnterPendingDecodeState();
- EXPECT_CALL(*this, FrameReady(AudioDecoder::kAborted, IsNull()));
+ EXPECT_CALL(*this, DecodeDone(AudioDecoder::kAborted));
Reset();
}
@@ -583,7 +446,7 @@ TEST_F(DecryptingAudioDecoderTest, Reset_DuringWaitingForKey) {
Initialize();
EnterWaitingForKeyState();
- EXPECT_CALL(*this, FrameReady(AudioDecoder::kAborted, IsNull()));
+ EXPECT_CALL(*this, DecodeDone(AudioDecoder::kAborted));
Reset();
}
diff --git a/chromium/media/filters/decrypting_demuxer_stream.cc b/chromium/media/filters/decrypting_demuxer_stream.cc
index a26498cda55..6a1de5f38c4 100644
--- a/chromium/media/filters/decrypting_demuxer_stream.cc
+++ b/chromium/media/filters/decrypting_demuxer_stream.cc
@@ -8,20 +8,17 @@
#include "base/callback_helpers.h"
#include "base/location.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
#include "media/base/audio_decoder_config.h"
-#include "media/base/video_decoder_config.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decryptor.h"
#include "media/base/demuxer_stream.h"
#include "media/base/pipeline.h"
+#include "media/base/video_decoder_config.h"
namespace media {
-#define BIND_TO_LOOP(function) \
- media::BindToLoop(message_loop_, base::Bind(function, weak_this_))
-
static bool IsStreamValidAndEncrypted(DemuxerStream* stream) {
return ((stream->type() == DemuxerStream::AUDIO &&
stream->audio_decoder_config().IsValidConfig() &&
@@ -32,21 +29,20 @@ static bool IsStreamValidAndEncrypted(DemuxerStream* stream) {
}
DecryptingDemuxerStream::DecryptingDemuxerStream(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
const SetDecryptorReadyCB& set_decryptor_ready_cb)
- : message_loop_(message_loop),
- weak_factory_(this),
+ : task_runner_(task_runner),
state_(kUninitialized),
demuxer_stream_(NULL),
set_decryptor_ready_cb_(set_decryptor_ready_cb),
decryptor_(NULL),
- key_added_while_decrypt_pending_(false) {
-}
+ key_added_while_decrypt_pending_(false),
+ weak_factory_(this) {}
void DecryptingDemuxerStream::Initialize(DemuxerStream* stream,
const PipelineStatusCB& status_cb) {
DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kUninitialized) << state_;
DCHECK(!demuxer_stream_);
@@ -57,13 +53,13 @@ void DecryptingDemuxerStream::Initialize(DemuxerStream* stream,
InitializeDecoderConfig();
state_ = kDecryptorRequested;
- set_decryptor_ready_cb_.Run(
- BIND_TO_LOOP(&DecryptingDemuxerStream::SetDecryptor));
+ set_decryptor_ready_cb_.Run(BindToCurrentLoop(
+ base::Bind(&DecryptingDemuxerStream::SetDecryptor, weak_this_)));
}
void DecryptingDemuxerStream::Read(const ReadCB& read_cb) {
DVLOG(3) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kIdle) << state_;
DCHECK(!read_cb.is_null());
CHECK(read_cb_.is_null()) << "Overlapping reads are not supported.";
@@ -76,7 +72,7 @@ void DecryptingDemuxerStream::Read(const ReadCB& read_cb) {
void DecryptingDemuxerStream::Reset(const base::Closure& closure) {
DVLOG(2) << __FUNCTION__ << " - state: " << state_;
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(state_ != kUninitialized) << state_;
DCHECK(state_ != kStopped) << state_;
DCHECK(reset_cb_.is_null());
@@ -116,19 +112,17 @@ void DecryptingDemuxerStream::Reset(const base::Closure& closure) {
void DecryptingDemuxerStream::Stop(const base::Closure& closure) {
DVLOG(2) << __FUNCTION__ << " - state: " << state_;
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(state_ != kUninitialized) << state_;
- // Invalidate all weak pointers so that pending callbacks won't fire.
+ // Invalidate all weak pointers so that pending callbacks won't be fired into
+ // this object.
weak_factory_.InvalidateWeakPtrs();
// At this point the render thread is likely paused (in WebMediaPlayerImpl's
// Destroy()), so running |closure| can't wait for anything that requires the
// render thread to process messages to complete (such as PPAPI methods).
if (decryptor_) {
- // Clear the callback.
- decryptor_->RegisterNewKeyCB(GetDecryptorStreamType(),
- Decryptor::NewKeyCB());
decryptor_->CancelDecrypt(GetDecryptorStreamType());
decryptor_ = NULL;
}
@@ -167,13 +161,17 @@ void DecryptingDemuxerStream::EnableBitstreamConverter() {
demuxer_stream_->EnableBitstreamConverter();
}
+bool DecryptingDemuxerStream::SupportsConfigChanges() {
+ return demuxer_stream_->SupportsConfigChanges();
+}
+
DecryptingDemuxerStream::~DecryptingDemuxerStream() {
DVLOG(2) << __FUNCTION__ << " : state_ = " << state_;
}
void DecryptingDemuxerStream::SetDecryptor(Decryptor* decryptor) {
DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kDecryptorRequested) << state_;
DCHECK(!init_cb_.is_null());
DCHECK(!set_decryptor_ready_cb_.is_null());
@@ -190,7 +188,8 @@ void DecryptingDemuxerStream::SetDecryptor(Decryptor* decryptor) {
decryptor_->RegisterNewKeyCB(
GetDecryptorStreamType(),
- BIND_TO_LOOP(&DecryptingDemuxerStream::OnKeyAdded));
+ BindToCurrentLoop(
+ base::Bind(&DecryptingDemuxerStream::OnKeyAdded, weak_this_)));
state_ = kIdle;
base::ResetAndReturn(&init_cb_).Run(PIPELINE_OK);
@@ -200,7 +199,7 @@ void DecryptingDemuxerStream::DecryptBuffer(
DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>& buffer) {
DVLOG(3) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kPendingDemuxerRead) << state_;
DCHECK(!read_cb_.is_null());
DCHECK_EQ(buffer.get() != NULL, status == kOk) << status;
@@ -246,9 +245,8 @@ void DecryptingDemuxerStream::DecryptBuffer(
// An empty iv string signals that the frame is unencrypted.
if (buffer->decrypt_config()->iv().empty()) {
DVLOG(2) << "DoDecryptBuffer() - clear buffer.";
- int data_offset = buffer->decrypt_config()->data_offset();
scoped_refptr<DecoderBuffer> decrypted = DecoderBuffer::CopyFrom(
- buffer->data() + data_offset, buffer->data_size() - data_offset);
+ buffer->data(), buffer->data_size());
decrypted->set_timestamp(buffer->timestamp());
decrypted->set_duration(buffer->duration());
state_ = kIdle;
@@ -262,19 +260,20 @@ void DecryptingDemuxerStream::DecryptBuffer(
}
void DecryptingDemuxerStream::DecryptPendingBuffer() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kPendingDecrypt) << state_;
decryptor_->Decrypt(
GetDecryptorStreamType(),
pending_buffer_to_decrypt_,
- BIND_TO_LOOP(&DecryptingDemuxerStream::DeliverBuffer));
+ BindToCurrentLoop(
+ base::Bind(&DecryptingDemuxerStream::DeliverBuffer, weak_this_)));
}
void DecryptingDemuxerStream::DeliverBuffer(
Decryptor::Status status,
const scoped_refptr<DecoderBuffer>& decrypted_buffer) {
DVLOG(3) << __FUNCTION__ << " - status: " << status;
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kPendingDecrypt) << state_;
DCHECK_NE(status, Decryptor::kNeedMoreData);
DCHECK(!read_cb_.is_null());
@@ -319,7 +318,7 @@ void DecryptingDemuxerStream::DeliverBuffer(
}
void DecryptingDemuxerStream::OnKeyAdded() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
if (state_ == kPendingDecrypt) {
key_added_while_decrypt_pending_ = true;
@@ -371,7 +370,7 @@ void DecryptingDemuxerStream::InitializeDecoderConfig() {
false, // Output audio is not encrypted.
false,
base::TimeDelta(),
- base::TimeDelta());
+ 0);
break;
}
diff --git a/chromium/media/filters/decrypting_demuxer_stream.h b/chromium/media/filters/decrypting_demuxer_stream.h
index 394cb5b7fc9..ec9f4b46632 100644
--- a/chromium/media/filters/decrypting_demuxer_stream.h
+++ b/chromium/media/filters/decrypting_demuxer_stream.h
@@ -15,7 +15,7 @@
#include "media/base/video_decoder_config.h"
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
@@ -24,12 +24,12 @@ class DecoderBuffer;
// Decryptor-based DemuxerStream implementation that converts a potentially
// encrypted demuxer stream to a clear demuxer stream.
-// All public APIs and callbacks are trampolined to the |message_loop_| so
+// All public APIs and callbacks are trampolined to the |task_runner_| so
// that no locks are required for thread safety.
class MEDIA_EXPORT DecryptingDemuxerStream : public DemuxerStream {
public:
DecryptingDemuxerStream(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
const SetDecryptorReadyCB& set_decryptor_ready_cb);
virtual ~DecryptingDemuxerStream();
@@ -55,6 +55,7 @@ class MEDIA_EXPORT DecryptingDemuxerStream : public DemuxerStream {
virtual VideoDecoderConfig video_decoder_config() OVERRIDE;
virtual Type type() OVERRIDE;
virtual void EnableBitstreamConverter() OVERRIDE;
+ virtual bool SupportsConfigChanges() OVERRIDE;
private:
// For a detailed state diagram please see this link: http://goo.gl/8jAok
@@ -98,9 +99,7 @@ class MEDIA_EXPORT DecryptingDemuxerStream : public DemuxerStream {
// |demuxer_stream_|.
void InitializeDecoderConfig();
- scoped_refptr<base::MessageLoopProxy> message_loop_;
- base::WeakPtrFactory<DecryptingDemuxerStream> weak_factory_;
- base::WeakPtr<DecryptingDemuxerStream> weak_this_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
State state_;
@@ -128,6 +127,10 @@ class MEDIA_EXPORT DecryptingDemuxerStream : public DemuxerStream {
// decrypting again in case the newly added key is the correct decryption key.
bool key_added_while_decrypt_pending_;
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<DecryptingDemuxerStream> weak_factory_;
+ base::WeakPtr<DecryptingDemuxerStream> weak_this_;
+
DISALLOW_COPY_AND_ASSIGN(DecryptingDemuxerStream);
};
diff --git a/chromium/media/filters/decrypting_demuxer_stream_unittest.cc b/chromium/media/filters/decrypting_demuxer_stream_unittest.cc
index 3e41734aa4e..14485c0e4eb 100644
--- a/chromium/media/filters/decrypting_demuxer_stream_unittest.cc
+++ b/chromium/media/filters/decrypting_demuxer_stream_unittest.cc
@@ -38,7 +38,7 @@ static scoped_refptr<DecoderBuffer> CreateFakeEncryptedStreamBuffer(
buffer->set_decrypt_config(scoped_ptr<DecryptConfig>(new DecryptConfig(
std::string(reinterpret_cast<const char*>(kFakeKeyId),
arraysize(kFakeKeyId)),
- iv, 0, std::vector<SubsampleEntry>())));
+ iv, std::vector<SubsampleEntry>())));
return buffer;
}
@@ -50,6 +50,9 @@ ACTION_P(ReturnBuffer, buffer) {
arg0.Run(buffer.get() ? DemuxerStream::kOk : DemuxerStream::kAborted, buffer);
}
+// Sets the |decryptor| if the DecryptorReadyCB (arg0) is not null. Sets
+// |is_decryptor_set| to true if a non-NULL |decryptor| has been set through the
+// callback.
ACTION_P2(SetDecryptorIfNotNull, decryptor, is_decryptor_set) {
if (!arg0.is_null())
arg0.Run(decryptor);
@@ -111,7 +114,7 @@ class DecryptingDemuxerStreamTest : public testing::Test {
EXPECT_CALL(*this, RequestDecryptorNotification(_))
.WillOnce(SetDecryptorIfNotNull(decryptor_.get(), &is_decryptor_set_));
EXPECT_CALL(*decryptor_, RegisterNewKeyCB(Decryptor::kAudio, _))
- .WillRepeatedly(SaveArg<1>(&key_added_cb_));
+ .WillOnce(SaveArg<1>(&key_added_cb_));
AudioDecoderConfig input_config(
kCodecVorbis, kSampleFormatPlanarF32, CHANNEL_LAYOUT_STEREO, 44100,
@@ -251,7 +254,7 @@ class DecryptingDemuxerStreamTest : public testing::Test {
base::MessageLoop message_loop_;
scoped_ptr<DecryptingDemuxerStream> demuxer_stream_;
scoped_ptr<StrictMock<MockDecryptor> > decryptor_;
- // Whether a valid Decryptor is set to the |demuxer_stream_|.
+ // Whether a valid Decryptor has been set in the |demuxer_stream_|.
bool is_decryptor_set_;
scoped_ptr<StrictMock<MockDemuxerStream> > input_audio_stream_;
scoped_ptr<StrictMock<MockDemuxerStream> > input_video_stream_;
diff --git a/chromium/media/filters/decrypting_video_decoder.cc b/chromium/media/filters/decrypting_video_decoder.cc
index b3ea2144558..eb40625f8a3 100644
--- a/chromium/media/filters/decrypting_video_decoder.cc
+++ b/chromium/media/filters/decrypting_video_decoder.cc
@@ -9,8 +9,8 @@
#include "base/debug/trace_event.h"
#include "base/location.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop_proxy.h"
-#include "media/base/bind_to_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decryptor.h"
#include "media/base/pipeline.h"
@@ -20,21 +20,22 @@
namespace media {
DecryptingVideoDecoder::DecryptingVideoDecoder(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
const SetDecryptorReadyCB& set_decryptor_ready_cb)
- : message_loop_(message_loop),
- weak_factory_(this),
+ : task_runner_(task_runner),
state_(kUninitialized),
set_decryptor_ready_cb_(set_decryptor_ready_cb),
decryptor_(NULL),
key_added_while_decode_pending_(false),
- trace_id_(0) {
-}
+ trace_id_(0),
+ weak_factory_(this) {}
void DecryptingVideoDecoder::Initialize(const VideoDecoderConfig& config,
- const PipelineStatusCB& status_cb) {
+ bool /* low_delay */,
+ const PipelineStatusCB& status_cb,
+ const OutputCB& output_cb) {
DVLOG(2) << "Initialize()";
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(state_ == kUninitialized ||
state_ == kIdle ||
state_ == kDecodeFinished) << state_;
@@ -44,6 +45,7 @@ void DecryptingVideoDecoder::Initialize(const VideoDecoderConfig& config,
DCHECK(config.is_encrypted());
init_cb_ = BindToCurrentLoop(status_cb);
+ output_cb_ = BindToCurrentLoop(output_cb);
weak_this_ = weak_factory_.GetWeakPtr();
config_ = config;
@@ -64,7 +66,7 @@ void DecryptingVideoDecoder::Initialize(const VideoDecoderConfig& config,
void DecryptingVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) {
DVLOG(3) << "Decode()";
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(state_ == kIdle ||
state_ == kDecodeFinished ||
state_ == kError) << state_;
@@ -74,13 +76,13 @@ void DecryptingVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
decode_cb_ = BindToCurrentLoop(decode_cb);
if (state_ == kError) {
- base::ResetAndReturn(&decode_cb_).Run(kDecodeError, NULL);
+ base::ResetAndReturn(&decode_cb_).Run(kDecodeError);
return;
}
// Return empty frames if decoding has finished.
if (state_ == kDecodeFinished) {
- base::ResetAndReturn(&decode_cb_).Run(kOk, VideoFrame::CreateEOSFrame());
+ base::ResetAndReturn(&decode_cb_).Run(kOk);
return;
}
@@ -91,7 +93,7 @@ void DecryptingVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
void DecryptingVideoDecoder::Reset(const base::Closure& closure) {
DVLOG(2) << "Reset() - state: " << state_;
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(state_ == kIdle ||
state_ == kPendingDecode ||
state_ == kWaitingForKey ||
@@ -116,23 +118,26 @@ void DecryptingVideoDecoder::Reset(const base::Closure& closure) {
if (state_ == kWaitingForKey) {
DCHECK(!decode_cb_.is_null());
pending_buffer_to_decode_ = NULL;
- base::ResetAndReturn(&decode_cb_).Run(kOk, NULL);
+ base::ResetAndReturn(&decode_cb_).Run(kAborted);
}
DCHECK(decode_cb_.is_null());
DoReset();
}
-void DecryptingVideoDecoder::Stop(const base::Closure& closure) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+void DecryptingVideoDecoder::Stop() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
DVLOG(2) << "Stop() - state: " << state_;
+ // Invalidate all weak pointers so that pending callbacks won't be fired into
+ // this object.
+ weak_factory_.InvalidateWeakPtrs();
+
// At this point the render thread is likely paused (in WebMediaPlayerImpl's
// Destroy()), so running |closure| can't wait for anything that requires the
// render thread to be processing messages to complete (such as PPAPI
// callbacks).
if (decryptor_) {
- decryptor_->RegisterNewKeyCB(Decryptor::kVideo, Decryptor::NewKeyCB());
decryptor_->DeinitializeDecoder(Decryptor::kVideo);
decryptor_ = NULL;
}
@@ -142,11 +147,11 @@ void DecryptingVideoDecoder::Stop(const base::Closure& closure) {
if (!init_cb_.is_null())
base::ResetAndReturn(&init_cb_).Run(DECODER_ERROR_NOT_SUPPORTED);
if (!decode_cb_.is_null())
- base::ResetAndReturn(&decode_cb_).Run(kOk, NULL);
+ base::ResetAndReturn(&decode_cb_).Run(kAborted);
if (!reset_cb_.is_null())
base::ResetAndReturn(&reset_cb_).Run();
+
state_ = kStopped;
- BindToCurrentLoop(closure).Run();
}
DecryptingVideoDecoder::~DecryptingVideoDecoder() {
@@ -155,11 +160,7 @@ DecryptingVideoDecoder::~DecryptingVideoDecoder() {
void DecryptingVideoDecoder::SetDecryptor(Decryptor* decryptor) {
DVLOG(2) << "SetDecryptor()";
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- if (state_ == kStopped)
- return;
-
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kDecryptorRequested) << state_;
DCHECK(!init_cb_.is_null());
DCHECK(!set_decryptor_ready_cb_.is_null());
@@ -182,11 +183,7 @@ void DecryptingVideoDecoder::SetDecryptor(Decryptor* decryptor) {
void DecryptingVideoDecoder::FinishInitialization(bool success) {
DVLOG(2) << "FinishInitialization()";
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- if (state_ == kStopped)
- return;
-
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kPendingDecoderInit) << state_;
DCHECK(!init_cb_.is_null());
DCHECK(reset_cb_.is_null()); // No Reset() before initialization finished.
@@ -198,8 +195,10 @@ void DecryptingVideoDecoder::FinishInitialization(bool success) {
return;
}
- decryptor_->RegisterNewKeyCB(Decryptor::kVideo, BindToCurrentLoop(
- base::Bind(&DecryptingVideoDecoder::OnKeyAdded, weak_this_)));
+ decryptor_->RegisterNewKeyCB(
+ Decryptor::kVideo,
+ BindToCurrentLoop(
+ base::Bind(&DecryptingVideoDecoder::OnKeyAdded, weak_this_)));
// Success!
state_ = kIdle;
@@ -208,7 +207,7 @@ void DecryptingVideoDecoder::FinishInitialization(bool success) {
void DecryptingVideoDecoder::DecodePendingBuffer() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kPendingDecode) << state_;
TRACE_EVENT_ASYNC_BEGIN0(
"media", "DecryptingVideoDecoder::DecodePendingBuffer", ++trace_id_);
@@ -228,17 +227,15 @@ void DecryptingVideoDecoder::DeliverFrame(
Decryptor::Status status,
const scoped_refptr<VideoFrame>& frame) {
DVLOG(3) << "DeliverFrame() - status: " << status;
- DCHECK(message_loop_->BelongsToCurrentThread());
- TRACE_EVENT_ASYNC_END0(
- "media", "DecryptingVideoDecoder::DecodePendingBuffer", trace_id_);
-
- if (state_ == kStopped)
- return;
-
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kPendingDecode) << state_;
DCHECK(!decode_cb_.is_null());
DCHECK(pending_buffer_to_decode_.get());
+ TRACE_EVENT_ASYNC_END2(
+ "media", "DecryptingVideoDecoder::DecodePendingBuffer", trace_id_,
+ "buffer_size", buffer_size, "status", status);
+
bool need_to_try_again_if_nokey_is_returned = key_added_while_decode_pending_;
key_added_while_decode_pending_ = false;
@@ -247,7 +244,7 @@ void DecryptingVideoDecoder::DeliverFrame(
pending_buffer_to_decode_ = NULL;
if (!reset_cb_.is_null()) {
- base::ResetAndReturn(&decode_cb_).Run(kOk, NULL);
+ base::ResetAndReturn(&decode_cb_).Run(kAborted);
DoReset();
return;
}
@@ -257,7 +254,7 @@ void DecryptingVideoDecoder::DeliverFrame(
if (status == Decryptor::kError) {
DVLOG(2) << "DeliverFrame() - kError";
state_ = kError;
- base::ResetAndReturn(&decode_cb_).Run(kDecodeError, NULL);
+ base::ResetAndReturn(&decode_cb_).Run(kDecodeError);
return;
}
@@ -279,28 +276,32 @@ void DecryptingVideoDecoder::DeliverFrame(
if (status == Decryptor::kNeedMoreData) {
DVLOG(2) << "DeliverFrame() - kNeedMoreData";
- if (scoped_pending_buffer_to_decode->end_of_stream()) {
- state_ = kDecodeFinished;
- base::ResetAndReturn(&decode_cb_).Run(
- kOk, media::VideoFrame::CreateEOSFrame());
- return;
- }
-
- state_ = kIdle;
- base::ResetAndReturn(&decode_cb_).Run(kNotEnoughData, NULL);
+ state_ = scoped_pending_buffer_to_decode->end_of_stream() ? kDecodeFinished
+ : kIdle;
+ base::ResetAndReturn(&decode_cb_).Run(kOk);
return;
}
DCHECK_EQ(status, Decryptor::kSuccess);
// No frame returned with kSuccess should be end-of-stream frame.
DCHECK(!frame->end_of_stream());
+ output_cb_.Run(frame);
+
+ if (scoped_pending_buffer_to_decode->end_of_stream()) {
+ // Set |pending_buffer_to_decode_| back as we need to keep flushing the
+ // decryptor.
+ pending_buffer_to_decode_ = scoped_pending_buffer_to_decode;
+ DecodePendingBuffer();
+ return;
+ }
+
state_ = kIdle;
- base::ResetAndReturn(&decode_cb_).Run(kOk, frame);
+ base::ResetAndReturn(&decode_cb_).Run(kOk);
}
void DecryptingVideoDecoder::OnKeyAdded() {
DVLOG(2) << "OnKeyAdded()";
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
if (state_ == kPendingDecode) {
key_added_while_decode_pending_ = true;
diff --git a/chromium/media/filters/decrypting_video_decoder.h b/chromium/media/filters/decrypting_video_decoder.h
index 3fbdcae7ca5..ac4caf86951 100644
--- a/chromium/media/filters/decrypting_video_decoder.h
+++ b/chromium/media/filters/decrypting_video_decoder.h
@@ -12,7 +12,7 @@
#include "media/base/video_decoder_config.h"
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
@@ -22,22 +22,24 @@ class Decryptor;
// Decryptor-based VideoDecoder implementation that can decrypt and decode
// encrypted video buffers and return decrypted and decompressed video frames.
-// All public APIs and callbacks are trampolined to the |message_loop_| so
+// All public APIs and callbacks are trampolined to the |task_runner_| so
// that no locks are required for thread safety.
class MEDIA_EXPORT DecryptingVideoDecoder : public VideoDecoder {
public:
DecryptingVideoDecoder(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
const SetDecryptorReadyCB& set_decryptor_ready_cb);
virtual ~DecryptingVideoDecoder();
// VideoDecoder implementation.
virtual void Initialize(const VideoDecoderConfig& config,
- const PipelineStatusCB& status_cb) OVERRIDE;
+ bool low_delay,
+ const PipelineStatusCB& status_cb,
+ const OutputCB& output_cb) OVERRIDE;
virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
- virtual void Stop(const base::Closure& closure) OVERRIDE;
+ virtual void Stop() OVERRIDE;
private:
// For a detailed state diagram please see this link: http://goo.gl/8jAok
@@ -78,13 +80,12 @@ class MEDIA_EXPORT DecryptingVideoDecoder : public VideoDecoder {
// Free decoder resources and call |stop_cb_|.
void DoStop();
- scoped_refptr<base::MessageLoopProxy> message_loop_;
- base::WeakPtrFactory<DecryptingVideoDecoder> weak_factory_;
- base::WeakPtr<DecryptingVideoDecoder> weak_this_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
State state_;
PipelineStatusCB init_cb_;
+ OutputCB output_cb_;
DecodeCB decode_cb_;
base::Closure reset_cb_;
@@ -109,6 +110,10 @@ class MEDIA_EXPORT DecryptingVideoDecoder : public VideoDecoder {
// matching DecryptCB call (in DoDeliverFrame()).
uint32 trace_id_;
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<DecryptingVideoDecoder> weak_factory_;
+ base::WeakPtr<DecryptingVideoDecoder> weak_this_;
+
DISALLOW_COPY_AND_ASSIGN(DecryptingVideoDecoder);
};
diff --git a/chromium/media/filters/decrypting_video_decoder_unittest.cc b/chromium/media/filters/decrypting_video_decoder_unittest.cc
index 1e8bee9fece..738753b518b 100644
--- a/chromium/media/filters/decrypting_video_decoder_unittest.cc
+++ b/chromium/media/filters/decrypting_video_decoder_unittest.cc
@@ -18,16 +18,15 @@
#include "testing/gmock/include/gmock/gmock.h"
using ::testing::_;
-using ::testing::AtMost;
-using ::testing::IsNull;
-using ::testing::ReturnRef;
+using ::testing::Invoke;
using ::testing::SaveArg;
using ::testing::StrictMock;
namespace media {
-static const uint8 kFakeKeyId[] = { 0x4b, 0x65, 0x79, 0x20, 0x49, 0x44 };
-static const uint8 kFakeIv[DecryptConfig::kDecryptionKeySize] = { 0 };
+const uint8 kFakeKeyId[] = { 0x4b, 0x65, 0x79, 0x20, 0x49, 0x44 };
+const uint8 kFakeIv[DecryptConfig::kDecryptionKeySize] = { 0 };
+const int kDecodingDelay = 3;
// Create a fake non-empty encrypted buffer.
static scoped_refptr<DecoderBuffer> CreateFakeEncryptedBuffer() {
@@ -37,7 +36,6 @@ static scoped_refptr<DecoderBuffer> CreateFakeEncryptedBuffer() {
std::string(reinterpret_cast<const char*>(kFakeKeyId),
arraysize(kFakeKeyId)),
std::string(reinterpret_cast<const char*>(kFakeIv), arraysize(kFakeIv)),
- 0,
std::vector<SubsampleEntry>())));
return buffer;
}
@@ -55,10 +53,6 @@ ACTION_P2(ResetAndRunCallback, callback, param) {
base::ResetAndReturn(callback).Run(param);
}
-MATCHER(IsEndOfStream, "end of stream") {
- return (arg->end_of_stream());
-}
-
} // namespace
class DecryptingVideoDecoderTest : public testing::Test {
@@ -70,11 +64,12 @@ class DecryptingVideoDecoderTest : public testing::Test {
&DecryptingVideoDecoderTest::RequestDecryptorNotification,
base::Unretained(this)))),
decryptor_(new StrictMock<MockDecryptor>()),
+ num_decrypt_and_decode_calls_(0),
+ num_frames_in_decryptor_(0),
encrypted_buffer_(CreateFakeEncryptedBuffer()),
decoded_video_frame_(VideoFrame::CreateBlackFrame(
TestVideoConfig::NormalCodedSize())),
- null_video_frame_(scoped_refptr<VideoFrame>()),
- end_of_stream_video_frame_(VideoFrame::CreateEOSFrame()) {
+ null_video_frame_(scoped_refptr<VideoFrame>()) {
EXPECT_CALL(*this, RequestDecryptorNotification(_))
.WillRepeatedly(RunCallbackIfNotNull(decryptor_.get()));
}
@@ -83,61 +78,85 @@ class DecryptingVideoDecoderTest : public testing::Test {
Stop();
}
+ // Initializes the |decoder_| and expects |status|. Note the initialization
+ // can succeed or fail.
void InitializeAndExpectStatus(const VideoDecoderConfig& config,
PipelineStatus status) {
- decoder_->Initialize(config, NewExpectedStatusCB(status));
+ decoder_->Initialize(config, false, NewExpectedStatusCB(status),
+ base::Bind(&DecryptingVideoDecoderTest::FrameReady,
+ base::Unretained(this)));
message_loop_.RunUntilIdle();
}
+ // Initialize the |decoder_| and expects it to succeed.
void Initialize() {
EXPECT_CALL(*decryptor_, InitializeVideoDecoder(_, _))
- .WillRepeatedly(RunCallback<1>(true));
+ .WillOnce(RunCallback<1>(true));
EXPECT_CALL(*decryptor_, RegisterNewKeyCB(Decryptor::kVideo, _))
- .WillRepeatedly(SaveArg<1>(&key_added_cb_));
+ .WillOnce(SaveArg<1>(&key_added_cb_));
InitializeAndExpectStatus(TestVideoConfig::NormalEncrypted(), PIPELINE_OK);
}
+ // Reinitialize the |decoder_| and expects it to succeed.
void Reinitialize() {
EXPECT_CALL(*decryptor_, DeinitializeDecoder(Decryptor::kVideo));
+ EXPECT_CALL(*decryptor_, InitializeVideoDecoder(_, _))
+ .WillOnce(RunCallback<1>(true));
+ EXPECT_CALL(*decryptor_, RegisterNewKeyCB(Decryptor::kVideo, _))
+ .WillOnce(SaveArg<1>(&key_added_cb_));
+
InitializeAndExpectStatus(TestVideoConfig::LargeEncrypted(), PIPELINE_OK);
}
- void ReadAndExpectFrameReadyWith(
- const scoped_refptr<DecoderBuffer>& buffer,
- VideoDecoder::Status status,
- const scoped_refptr<VideoFrame>& video_frame) {
- if (status != VideoDecoder::kOk)
- EXPECT_CALL(*this, FrameReady(status, IsNull()));
- else if (video_frame.get() && video_frame->end_of_stream())
- EXPECT_CALL(*this, FrameReady(status, IsEndOfStream()));
- else
- EXPECT_CALL(*this, FrameReady(status, video_frame));
-
+ // Decode |buffer| and expect DecodeDone to get called with |status|.
+ void DecodeAndExpect(const scoped_refptr<DecoderBuffer>& buffer,
+ VideoDecoder::Status status) {
+ EXPECT_CALL(*this, DecodeDone(status));
decoder_->Decode(buffer,
- base::Bind(&DecryptingVideoDecoderTest::FrameReady,
+ base::Bind(&DecryptingVideoDecoderTest::DecodeDone,
base::Unretained(this)));
message_loop_.RunUntilIdle();
}
+ // Helper function to simulate the decrypting and decoding process in the
+ // |decryptor_| with a decoding delay of kDecodingDelay buffers.
+ void DecryptAndDecodeVideo(const scoped_refptr<DecoderBuffer>& encrypted,
+ const Decryptor::VideoDecodeCB& video_decode_cb) {
+ num_decrypt_and_decode_calls_++;
+ if (!encrypted->end_of_stream())
+ num_frames_in_decryptor_++;
+
+ if (num_decrypt_and_decode_calls_ <= kDecodingDelay ||
+ num_frames_in_decryptor_ == 0) {
+ video_decode_cb.Run(Decryptor::kNeedMoreData,
+ scoped_refptr<VideoFrame>());
+ return;
+ }
+
+ num_frames_in_decryptor_--;
+ video_decode_cb.Run(Decryptor::kSuccess, decoded_video_frame_);
+ }
+
// Sets up expectations and actions to put DecryptingVideoDecoder in an
// active normal decoding state.
void EnterNormalDecodingState() {
- EXPECT_CALL(*decryptor_, DecryptAndDecodeVideo(_, _))
- .WillOnce(RunCallback<1>(Decryptor::kSuccess, decoded_video_frame_))
- .WillRepeatedly(RunCallback<1>(Decryptor::kNeedMoreData,
- scoped_refptr<VideoFrame>()));
- ReadAndExpectFrameReadyWith(
- encrypted_buffer_, VideoDecoder::kOk, decoded_video_frame_);
+ EXPECT_CALL(*decryptor_, DecryptAndDecodeVideo(_, _)).WillRepeatedly(
+ Invoke(this, &DecryptingVideoDecoderTest::DecryptAndDecodeVideo));
+ EXPECT_CALL(*this, FrameReady(decoded_video_frame_));
+ for (int i = 0; i < kDecodingDelay + 1; ++i)
+ DecodeAndExpect(encrypted_buffer_, VideoDecoder::kOk);
}
// Sets up expectations and actions to put DecryptingVideoDecoder in an end
// of stream state. This function must be called after
// EnterNormalDecodingState() to work.
void EnterEndOfStreamState() {
- ReadAndExpectFrameReadyWith(DecoderBuffer::CreateEOSBuffer(),
- VideoDecoder::kOk,
- end_of_stream_video_frame_);
+ // The codec in the |decryptor_| will be flushed.
+ EXPECT_CALL(*this, FrameReady(decoded_video_frame_))
+ .Times(kDecodingDelay);
+ DecodeAndExpect(DecoderBuffer::CreateEOSBuffer(), VideoDecoder::kOk);
+ EXPECT_EQ(0, num_frames_in_decryptor_);
}
// Make the video decode callback pending by saving and not firing it.
@@ -147,7 +166,7 @@ class DecryptingVideoDecoderTest : public testing::Test {
.WillOnce(SaveArg<1>(&pending_video_decode_cb_));
decoder_->Decode(encrypted_buffer_,
- base::Bind(&DecryptingVideoDecoderTest::FrameReady,
+ base::Bind(&DecryptingVideoDecoderTest::DecodeDone,
base::Unretained(this)));
message_loop_.RunUntilIdle();
// Make sure the Decode() on the decoder triggers a DecryptAndDecode() on
@@ -159,7 +178,7 @@ class DecryptingVideoDecoderTest : public testing::Test {
EXPECT_CALL(*decryptor_, DecryptAndDecodeVideo(_, _))
.WillRepeatedly(RunCallback<1>(Decryptor::kNoKey, null_video_frame_));
decoder_->Decode(encrypted_buffer_,
- base::Bind(&DecryptingVideoDecoderTest::FrameReady,
+ base::Bind(&DecryptingVideoDecoderTest::DecodeDone,
base::Unretained(this)));
message_loop_.RunUntilIdle();
}
@@ -191,26 +210,27 @@ class DecryptingVideoDecoderTest : public testing::Test {
}
void Stop() {
- EXPECT_CALL(*decryptor_, RegisterNewKeyCB(Decryptor::kVideo,
- IsNullCallback()))
- .Times(AtMost(1));
EXPECT_CALL(*decryptor_, DeinitializeDecoder(Decryptor::kVideo))
.WillRepeatedly(InvokeWithoutArgs(
this, &DecryptingVideoDecoderTest::AbortAllPendingCBs));
- decoder_->Stop(NewExpectedClosure());
+ decoder_->Stop();
message_loop_.RunUntilIdle();
}
MOCK_METHOD1(RequestDecryptorNotification, void(const DecryptorReadyCB&));
- MOCK_METHOD2(FrameReady, void(VideoDecoder::Status,
- const scoped_refptr<VideoFrame>&));
+ MOCK_METHOD1(FrameReady, void(const scoped_refptr<VideoFrame>&));
+ MOCK_METHOD1(DecodeDone, void(VideoDecoder::Status));
base::MessageLoop message_loop_;
scoped_ptr<DecryptingVideoDecoder> decoder_;
scoped_ptr<StrictMock<MockDecryptor> > decryptor_;
+ // Variables to help the |decryptor_| to simulate decoding delay and flushing.
+ int num_decrypt_and_decode_calls_;
+ int num_frames_in_decryptor_;
+
Decryptor::DecoderInitCB pending_init_cb_;
Decryptor::NewKeyCB key_added_cb_;
Decryptor::VideoDecodeCB pending_video_decode_cb_;
@@ -219,7 +239,6 @@ class DecryptingVideoDecoderTest : public testing::Test {
scoped_refptr<DecoderBuffer> encrypted_buffer_;
scoped_refptr<VideoFrame> decoded_video_frame_;
scoped_refptr<VideoFrame> null_video_frame_;
- scoped_refptr<VideoFrame> end_of_stream_video_frame_;
private:
DISALLOW_COPY_AND_ASSIGN(DecryptingVideoDecoderTest);
@@ -260,6 +279,8 @@ TEST_F(DecryptingVideoDecoderTest, Reinitialize_Failure) {
EXPECT_CALL(*decryptor_, InitializeVideoDecoder(_, _))
.WillOnce(RunCallback<1>(false));
+ // Reinitialize() expects the reinitialization to succeed. Call
+ // InitializeAndExpectStatus() directly to test the reinitialization failure.
InitializeAndExpectStatus(TestVideoConfig::NormalEncrypted(),
DECODER_ERROR_NOT_SUPPORTED);
}
@@ -279,29 +300,10 @@ TEST_F(DecryptingVideoDecoderTest, DecryptAndDecode_DecodeError) {
.WillRepeatedly(RunCallback<1>(Decryptor::kError,
scoped_refptr<VideoFrame>(NULL)));
- ReadAndExpectFrameReadyWith(
- encrypted_buffer_, VideoDecoder::kDecodeError, null_video_frame_);
+ DecodeAndExpect(encrypted_buffer_, VideoDecoder::kDecodeError);
// After a decode error occurred, all following decode returns kDecodeError.
- ReadAndExpectFrameReadyWith(
- encrypted_buffer_, VideoDecoder::kDecodeError, null_video_frame_);
-}
-
-// Test the case where the decryptor returns kNeedMoreData to ask for more
-// buffers before it can produce a frame.
-TEST_F(DecryptingVideoDecoderTest, DecryptAndDecode_NeedMoreData) {
- Initialize();
-
- EXPECT_CALL(*decryptor_, DecryptAndDecodeVideo(_, _))
- .WillOnce(RunCallback<1>(Decryptor::kNeedMoreData,
- scoped_refptr<VideoFrame>()))
- .WillRepeatedly(RunCallback<1>(Decryptor::kSuccess,
- decoded_video_frame_));
-
- ReadAndExpectFrameReadyWith(
- encrypted_buffer_, VideoDecoder::kNotEnoughData, decoded_video_frame_);
- ReadAndExpectFrameReadyWith(
- encrypted_buffer_, VideoDecoder::kOk, decoded_video_frame_);
+ DecodeAndExpect(encrypted_buffer_, VideoDecoder::kDecodeError);
}
// Test the case where the decryptor receives end-of-stream buffer.
@@ -320,7 +322,8 @@ TEST_F(DecryptingVideoDecoderTest, KeyAdded_DuringWaitingForKey) {
EXPECT_CALL(*decryptor_, DecryptAndDecodeVideo(_, _))
.WillRepeatedly(RunCallback<1>(Decryptor::kSuccess,
decoded_video_frame_));
- EXPECT_CALL(*this, FrameReady(VideoDecoder::kOk, decoded_video_frame_));
+ EXPECT_CALL(*this, FrameReady(decoded_video_frame_));
+ EXPECT_CALL(*this, DecodeDone(VideoDecoder::kOk));
key_added_cb_.Run();
message_loop_.RunUntilIdle();
}
@@ -334,7 +337,8 @@ TEST_F(DecryptingVideoDecoderTest, KeyAdded_DruingPendingDecode) {
EXPECT_CALL(*decryptor_, DecryptAndDecodeVideo(_, _))
.WillRepeatedly(RunCallback<1>(Decryptor::kSuccess,
decoded_video_frame_));
- EXPECT_CALL(*this, FrameReady(VideoDecoder::kOk, decoded_video_frame_));
+ EXPECT_CALL(*this, FrameReady(decoded_video_frame_));
+ EXPECT_CALL(*this, DecodeDone(VideoDecoder::kOk));
// The video decode callback is returned after the correct decryption key is
// added.
key_added_cb_.Run();
@@ -363,7 +367,7 @@ TEST_F(DecryptingVideoDecoderTest, Reset_DuringPendingDecode) {
Initialize();
EnterPendingDecodeState();
- EXPECT_CALL(*this, FrameReady(VideoDecoder::kOk, IsNull()));
+ EXPECT_CALL(*this, DecodeDone(VideoDecoder::kAborted));
Reset();
}
@@ -373,7 +377,7 @@ TEST_F(DecryptingVideoDecoderTest, Reset_DuringWaitingForKey) {
Initialize();
EnterWaitingForKeyState();
- EXPECT_CALL(*this, FrameReady(VideoDecoder::kOk, IsNull()));
+ EXPECT_CALL(*this, DecodeDone(VideoDecoder::kAborted));
Reset();
}
@@ -401,7 +405,10 @@ TEST_F(DecryptingVideoDecoderTest, Stop_DuringDecryptorRequested) {
EXPECT_CALL(*this, RequestDecryptorNotification(_))
.WillOnce(SaveArg<0>(&decryptor_ready_cb));
decoder_->Initialize(TestVideoConfig::NormalEncrypted(),
- NewExpectedStatusCB(DECODER_ERROR_NOT_SUPPORTED));
+ false,
+ NewExpectedStatusCB(DECODER_ERROR_NOT_SUPPORTED),
+ base::Bind(&DecryptingVideoDecoderTest::FrameReady,
+ base::Unretained(this)));
message_loop_.RunUntilIdle();
// |decryptor_ready_cb| is saved but not called here.
EXPECT_FALSE(decryptor_ready_cb.is_null());
@@ -446,7 +453,7 @@ TEST_F(DecryptingVideoDecoderTest, Stop_DuringPendingDecode) {
Initialize();
EnterPendingDecodeState();
- EXPECT_CALL(*this, FrameReady(VideoDecoder::kOk, IsNull()));
+ EXPECT_CALL(*this, DecodeDone(VideoDecoder::kAborted));
Stop();
}
@@ -456,7 +463,7 @@ TEST_F(DecryptingVideoDecoderTest, Stop_DuringWaitingForKey) {
Initialize();
EnterWaitingForKeyState();
- EXPECT_CALL(*this, FrameReady(VideoDecoder::kOk, IsNull()));
+ EXPECT_CALL(*this, DecodeDone(VideoDecoder::kAborted));
Stop();
}
@@ -478,7 +485,7 @@ TEST_F(DecryptingVideoDecoderTest, Stop_DuringPendingReset) {
EnterPendingDecodeState();
EXPECT_CALL(*decryptor_, ResetDecoder(Decryptor::kVideo));
- EXPECT_CALL(*this, FrameReady(VideoDecoder::kOk, IsNull()));
+ EXPECT_CALL(*this, DecodeDone(VideoDecoder::kAborted));
decoder_->Reset(NewExpectedClosure());
Stop();
diff --git a/chromium/media/filters/fake_demuxer_stream.cc b/chromium/media/filters/fake_demuxer_stream.cc
index c6daa9f405f..78386e3b046 100644
--- a/chromium/media/filters/fake_demuxer_stream.cc
+++ b/chromium/media/filters/fake_demuxer_stream.cc
@@ -8,8 +8,8 @@
#include "base/callback_helpers.h"
#include "base/location.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
-#include "media/base/bind_to_loop.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/test_helpers.h"
#include "media/base/video_frame.h"
@@ -33,26 +33,34 @@ const uint8 kIv[] = {
FakeDemuxerStream::FakeDemuxerStream(int num_configs,
int num_buffers_in_one_config,
bool is_encrypted)
- : message_loop_(base::MessageLoopProxy::current()),
- num_configs_left_(num_configs),
+ : task_runner_(base::MessageLoopProxy::current()),
+ num_configs_(num_configs),
num_buffers_in_one_config_(num_buffers_in_one_config),
+ config_changes_(num_configs > 1),
is_encrypted_(is_encrypted),
- num_buffers_left_in_current_config_(num_buffers_in_one_config),
- num_buffers_returned_(0),
- current_timestamp_(base::TimeDelta::FromMilliseconds(kStartTimestampMs)),
- duration_(base::TimeDelta::FromMilliseconds(kDurationMs)),
- next_coded_size_(kStartWidth, kStartHeight),
- next_read_num_(0),
read_to_hold_(-1) {
- DCHECK_GT(num_configs_left_, 0);
- DCHECK_GT(num_buffers_in_one_config_, 0);
+ DCHECK_GT(num_configs, 0);
+ DCHECK_GT(num_buffers_in_one_config, 0);
+ Initialize();
UpdateVideoDecoderConfig();
}
FakeDemuxerStream::~FakeDemuxerStream() {}
+void FakeDemuxerStream::Initialize() {
+ DCHECK_EQ(-1, read_to_hold_);
+ num_configs_left_ = num_configs_;
+ num_buffers_left_in_current_config_ = num_buffers_in_one_config_;
+ num_buffers_returned_ = 0;
+ current_timestamp_ = base::TimeDelta::FromMilliseconds(kStartTimestampMs);
+ duration_ = base::TimeDelta::FromMilliseconds(kDurationMs);
+ splice_timestamp_ = kNoTimestamp();
+ next_coded_size_ = gfx::Size(kStartWidth, kStartHeight);
+ next_read_num_ = 0;
+}
+
void FakeDemuxerStream::Read(const ReadCB& read_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(read_cb_.is_null());
read_cb_ = BindToCurrentLoop(read_cb);
@@ -65,40 +73,44 @@ void FakeDemuxerStream::Read(const ReadCB& read_cb) {
}
AudioDecoderConfig FakeDemuxerStream::audio_decoder_config() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
NOTREACHED();
return AudioDecoderConfig();
}
VideoDecoderConfig FakeDemuxerStream::video_decoder_config() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
return video_decoder_config_;
}
// TODO(xhwang): Support audio if needed.
DemuxerStream::Type FakeDemuxerStream::type() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
return VIDEO;
}
void FakeDemuxerStream::EnableBitstreamConverter() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
+}
+
+bool FakeDemuxerStream::SupportsConfigChanges() {
+ return config_changes_;
}
void FakeDemuxerStream::HoldNextRead() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
read_to_hold_ = next_read_num_;
}
void FakeDemuxerStream::HoldNextConfigChangeRead() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
// Set |read_to_hold_| to be the next config change read.
read_to_hold_ = next_read_num_ + num_buffers_in_one_config_ -
next_read_num_ % (num_buffers_in_one_config_ + 1);
}
void FakeDemuxerStream::SatisfyRead() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(read_to_hold_, next_read_num_);
DCHECK(!read_cb_.is_null());
@@ -106,6 +118,15 @@ void FakeDemuxerStream::SatisfyRead() {
DoRead();
}
+void FakeDemuxerStream::SatisfyReadAndHoldNext() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(read_to_hold_, next_read_num_);
+ DCHECK(!read_cb_.is_null());
+
+ ++read_to_hold_;
+ DoRead();
+}
+
void FakeDemuxerStream::Reset() {
read_to_hold_ = -1;
@@ -113,6 +134,11 @@ void FakeDemuxerStream::Reset() {
base::ResetAndReturn(&read_cb_).Run(kAborted, NULL);
}
+void FakeDemuxerStream::SeekToStart() {
+ Reset();
+ Initialize();
+}
+
void FakeDemuxerStream::UpdateVideoDecoderConfig() {
const gfx::Rect kVisibleRect(kStartWidth, kStartHeight);
video_decoder_config_.Initialize(
@@ -123,7 +149,7 @@ void FakeDemuxerStream::UpdateVideoDecoderConfig() {
}
void FakeDemuxerStream::DoRead() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(!read_cb_.is_null());
next_read_num_++;
@@ -151,11 +177,11 @@ void FakeDemuxerStream::DoRead() {
buffer->set_decrypt_config(scoped_ptr<DecryptConfig>(
new DecryptConfig(std::string(kKeyId, kKeyId + arraysize(kKeyId)),
std::string(kIv, kIv + arraysize(kIv)),
- 0,
std::vector<SubsampleEntry>())));
}
buffer->set_timestamp(current_timestamp_);
buffer->set_duration(duration_);
+ buffer->set_splice_timestamp(splice_timestamp_);
current_timestamp_ += duration_;
num_buffers_left_in_current_config_--;
diff --git a/chromium/media/filters/fake_demuxer_stream.h b/chromium/media/filters/fake_demuxer_stream.h
index 4ed479687e7..bacf0bddefd 100644
--- a/chromium/media/filters/fake_demuxer_stream.h
+++ b/chromium/media/filters/fake_demuxer_stream.h
@@ -12,7 +12,7 @@
#include "media/base/video_decoder_config.h"
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
} // namespace base
namespace media {
@@ -33,6 +33,9 @@ class FakeDemuxerStream : public DemuxerStream {
virtual VideoDecoderConfig video_decoder_config() OVERRIDE;
virtual Type type() OVERRIDE;
virtual void EnableBitstreamConverter() OVERRIDE;
+ virtual bool SupportsConfigChanges() OVERRIDE;
+
+ void Initialize();
int num_buffers_returned() const { return num_buffers_returned_; }
@@ -48,19 +51,34 @@ class FakeDemuxerStream : public DemuxerStream {
// Satisfies the pending read with the next scheduled status and buffer.
void SatisfyRead();
+ // Satisfies pending read request and then holds the following read.
+ void SatisfyReadAndHoldNext();
+
// Satisfies the pending read (if any) with kAborted and NULL. This call
// always clears |hold_next_read_|.
void Reset();
+ // Reset() this demuxer stream and set the reading position to the start of
+ // the stream.
+ void SeekToStart();
+
+ // Sets the splice timestamp for all furture buffers returned via Read().
+ void set_splice_timestamp(base::TimeDelta splice_timestamp) {
+ splice_timestamp_ = splice_timestamp;
+ }
+
private:
void UpdateVideoDecoderConfig();
void DoRead();
- scoped_refptr<base::MessageLoopProxy> message_loop_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+
+ const int num_configs_;
+ const int num_buffers_in_one_config_;
+ const bool config_changes_;
+ const bool is_encrypted_;
int num_configs_left_;
- int num_buffers_in_one_config_;
- bool is_encrypted_;
// Number of frames left with the current decoder config.
int num_buffers_left_in_current_config_;
@@ -69,6 +87,7 @@ class FakeDemuxerStream : public DemuxerStream {
base::TimeDelta current_timestamp_;
base::TimeDelta duration_;
+ base::TimeDelta splice_timestamp_;
gfx::Size next_coded_size_;
VideoDecoderConfig video_decoder_config_;
diff --git a/chromium/media/filters/fake_demuxer_stream_unittest.cc b/chromium/media/filters/fake_demuxer_stream_unittest.cc
index 553a2f080c3..ced749623cf 100644
--- a/chromium/media/filters/fake_demuxer_stream_unittest.cc
+++ b/chromium/media/filters/fake_demuxer_stream_unittest.cc
@@ -13,9 +13,9 @@
namespace media {
-static const int kNumBuffersInOneConfig = 9;
-static const int kNumBuffersToReadFirst = 5;
-static const int kNumConfigs = 3;
+const int kNumBuffersInOneConfig = 9;
+const int kNumBuffersToReadFirst = 5;
+const int kNumConfigs = 3;
COMPILE_ASSERT(kNumBuffersToReadFirst < kNumBuffersInOneConfig,
do_not_read_too_many_buffers);
COMPILE_ASSERT(kNumConfigs > 0, need_multiple_configs_to_trigger_config_change);
@@ -24,7 +24,8 @@ class FakeDemuxerStreamTest : public testing::Test {
public:
FakeDemuxerStreamTest()
: status_(DemuxerStream::kAborted),
- read_pending_(false) {}
+ read_pending_(false),
+ num_buffers_received_(0) {}
virtual ~FakeDemuxerStreamTest() {}
void BufferReady(DemuxerStream::Status status,
@@ -33,6 +34,8 @@ class FakeDemuxerStreamTest : public testing::Test {
read_pending_ = false;
status_ = status;
buffer_ = buffer;
+ if (status == DemuxerStream::kOk && !buffer->end_of_stream())
+ num_buffers_received_++;
}
enum ReadResult {
@@ -48,12 +51,14 @@ class FakeDemuxerStreamTest : public testing::Test {
new FakeDemuxerStream(kNumConfigs, kNumBuffersInOneConfig, false));
for (int i = 0; i < kNumBuffersToReadFirst; ++i)
ReadAndExpect(OK);
+ DCHECK_EQ(kNumBuffersToReadFirst, num_buffers_received_);
}
void EnterBeforeEOSState() {
stream_.reset(new FakeDemuxerStream(1, kNumBuffersInOneConfig, false));
for (int i = 0; i < kNumBuffersInOneConfig; ++i)
ReadAndExpect(OK);
+ DCHECK_EQ(kNumBuffersInOneConfig, num_buffers_received_);
}
void ExpectReadResult(ReadResult result) {
@@ -72,6 +77,7 @@ class FakeDemuxerStreamTest : public testing::Test {
break;
case CONFIG_CHANGED:
+ EXPECT_TRUE(stream_->SupportsConfigChanges());
EXPECT_FALSE(read_pending_);
EXPECT_EQ(DemuxerStream::kConfigChanged, status_);
EXPECT_FALSE(buffer_.get());
@@ -127,23 +133,12 @@ class FakeDemuxerStreamTest : public testing::Test {
ExpectReadResult(ABORTED);
}
- void TestRead(int num_configs,
- int num_buffers_in_one_config,
- bool is_encrypted) {
- stream_.reset(new FakeDemuxerStream(
- num_configs, num_buffers_in_one_config, is_encrypted));
-
- int num_buffers_received = 0;
-
- const VideoDecoderConfig& config = stream_->video_decoder_config();
- EXPECT_TRUE(config.IsValidConfig());
- EXPECT_EQ(is_encrypted, config.is_encrypted());
-
+ void ReadAllBuffers(int num_configs, int num_buffers_in_one_config) {
+ DCHECK_EQ(0, num_buffers_received_);
for (int i = 0; i < num_configs; ++i) {
for (int j = 0; j < num_buffers_in_one_config; ++j) {
ReadAndExpect(OK);
- num_buffers_received++;
- EXPECT_EQ(num_buffers_received, stream_->num_buffers_returned());
+ EXPECT_EQ(num_buffers_received_, stream_->num_buffers_returned());
}
if (i == num_configs - 1)
@@ -155,7 +150,20 @@ class FakeDemuxerStreamTest : public testing::Test {
// Will always get EOS after we hit EOS.
ReadAndExpect(EOS);
- EXPECT_EQ(num_configs * num_buffers_in_one_config, num_buffers_received);
+ EXPECT_EQ(num_configs * num_buffers_in_one_config, num_buffers_received_);
+ }
+
+ void TestRead(int num_configs,
+ int num_buffers_in_one_config,
+ bool is_encrypted) {
+ stream_.reset(new FakeDemuxerStream(
+ num_configs, num_buffers_in_one_config, is_encrypted));
+
+ const VideoDecoderConfig& config = stream_->video_decoder_config();
+ EXPECT_TRUE(config.IsValidConfig());
+ EXPECT_EQ(is_encrypted, config.is_encrypted());
+
+ ReadAllBuffers(num_configs, num_buffers_in_one_config);
}
base::MessageLoop message_loop_;
@@ -244,4 +252,34 @@ TEST_F(FakeDemuxerStreamTest, Reset_BeforeEOS) {
ReadAndExpect(EOS);
}
+TEST_F(FakeDemuxerStreamTest, NoConfigChanges) {
+ stream_.reset(
+ new FakeDemuxerStream(1, kNumBuffersInOneConfig, false));
+ EXPECT_FALSE(stream_->SupportsConfigChanges());
+ for (int i = 0; i < kNumBuffersInOneConfig; ++i)
+ ReadAndExpect(OK);
+ ReadAndExpect(EOS);
+}
+
+TEST_F(FakeDemuxerStreamTest, SeekToStart_Normal) {
+ EnterNormalReadState();
+ stream_->SeekToStart();
+ num_buffers_received_ = 0;
+ ReadAllBuffers(kNumConfigs, kNumBuffersInOneConfig);
+}
+
+TEST_F(FakeDemuxerStreamTest, SeekToStart_BeforeEOS) {
+ EnterBeforeEOSState();
+ stream_->SeekToStart();
+ num_buffers_received_ = 0;
+ ReadAllBuffers(1, kNumBuffersInOneConfig);
+}
+
+TEST_F(FakeDemuxerStreamTest, SeekToStart_AfterEOS) {
+ TestRead(3, 5, false);
+ stream_->SeekToStart();
+ num_buffers_received_ = 0;
+ ReadAllBuffers(3, 5);
+}
+
} // namespace media
diff --git a/chromium/media/filters/fake_video_decoder.cc b/chromium/media/filters/fake_video_decoder.cc
index dbb16db0d9e..1df718227a5 100644
--- a/chromium/media/filters/fake_video_decoder.cc
+++ b/chromium/media/filters/fake_video_decoder.cc
@@ -8,187 +8,238 @@
#include "base/callback_helpers.h"
#include "base/location.h"
#include "base/message_loop/message_loop_proxy.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/test_helpers.h"
namespace media {
-FakeVideoDecoder::FakeVideoDecoder(int decoding_delay)
- : message_loop_(base::MessageLoopProxy::current()),
- weak_factory_(this),
- decoding_delay_(decoding_delay),
- state_(UNINITIALIZED),
- total_bytes_decoded_(0) {
+FakeVideoDecoder::FakeVideoDecoder(int decoding_delay,
+ int max_parallel_decoding_requests)
+ : decoding_delay_(decoding_delay),
+ max_parallel_decoding_requests_(max_parallel_decoding_requests),
+ state_(STATE_UNINITIALIZED),
+ hold_decode_(false),
+ total_bytes_decoded_(0),
+ weak_factory_(this) {
DCHECK_GE(decoding_delay, 0);
}
FakeVideoDecoder::~FakeVideoDecoder() {
- DCHECK_EQ(state_, UNINITIALIZED);
+ DCHECK_EQ(state_, STATE_UNINITIALIZED);
}
void FakeVideoDecoder::Initialize(const VideoDecoderConfig& config,
- const PipelineStatusCB& status_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ bool low_delay,
+ const PipelineStatusCB& status_cb,
+ const OutputCB& output_cb) {
+ DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(config.IsValidConfig());
- DCHECK(decode_cb_.IsNull()) << "No reinitialization during pending decode.";
+ DCHECK(held_decode_callbacks_.empty())
+ << "No reinitialization during pending decode.";
DCHECK(reset_cb_.IsNull()) << "No reinitialization during pending reset.";
- weak_this_ = weak_factory_.GetWeakPtr();
-
current_config_ = config;
init_cb_.SetCallback(BindToCurrentLoop(status_cb));
+ // Don't need BindToCurrentLoop() because |output_cb_| is only called from
+ // RunDecodeCallback() which is posted from Decode().
+ output_cb_ = output_cb;
+
if (!decoded_frames_.empty()) {
DVLOG(1) << "Decoded frames dropped during reinitialization.";
decoded_frames_.clear();
}
- state_ = NORMAL;
+ state_ = STATE_NORMAL;
init_cb_.RunOrHold(PIPELINE_OK);
}
void FakeVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(decode_cb_.IsNull()) << "Overlapping decodes are not supported.";
+ DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(reset_cb_.IsNull());
- DCHECK_LE(decoded_frames_.size(), static_cast<size_t>(decoding_delay_));
+ DCHECK_LE(decoded_frames_.size(),
+ decoding_delay_ + held_decode_callbacks_.size());
+ DCHECK_LT(static_cast<int>(held_decode_callbacks_.size()),
+ max_parallel_decoding_requests_);
int buffer_size = buffer->end_of_stream() ? 0 : buffer->data_size();
- decode_cb_.SetCallback(BindToCurrentLoop(base::Bind(
- &FakeVideoDecoder::OnFrameDecoded, weak_this_, buffer_size, decode_cb)));
+ DecodeCB wrapped_decode_cb =
+ BindToCurrentLoop(base::Bind(&FakeVideoDecoder::OnFrameDecoded,
+ weak_factory_.GetWeakPtr(),
+ buffer_size, decode_cb));
- if (buffer->end_of_stream() && decoded_frames_.empty()) {
- decode_cb_.RunOrHold(kOk, VideoFrame::CreateEOSFrame());
+ if (state_ == STATE_ERROR) {
+ wrapped_decode_cb.Run(kDecodeError);
return;
}
- if (!buffer->end_of_stream()) {
+ if (buffer->end_of_stream()) {
+ state_ = STATE_END_OF_STREAM;
+ } else {
DCHECK(VerifyFakeVideoBufferForTest(buffer, current_config_));
scoped_refptr<VideoFrame> video_frame = VideoFrame::CreateColorFrame(
current_config_.coded_size(), 0, 0, 0, buffer->timestamp());
decoded_frames_.push_back(video_frame);
-
- if (decoded_frames_.size() <= static_cast<size_t>(decoding_delay_)) {
- decode_cb_.RunOrHold(kNotEnoughData, scoped_refptr<VideoFrame>());
- return;
- }
}
- scoped_refptr<VideoFrame> frame = decoded_frames_.front();
- decoded_frames_.pop_front();
- decode_cb_.RunOrHold(kOk, frame);
+ RunOrHoldDecode(wrapped_decode_cb);
}
void FakeVideoDecoder::Reset(const base::Closure& closure) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(reset_cb_.IsNull());
+
reset_cb_.SetCallback(BindToCurrentLoop(closure));
+ decoded_frames_.clear();
// Defer the reset if a decode is pending.
- if (!decode_cb_.IsNull())
+ if (!held_decode_callbacks_.empty())
return;
DoReset();
}
-void FakeVideoDecoder::Stop(const base::Closure& closure) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- stop_cb_.SetCallback(BindToCurrentLoop(closure));
+void FakeVideoDecoder::Stop() {
+ DCHECK(thread_checker_.CalledOnValidThread());
- // Defer the stop if an init, a decode or a reset is pending.
- if (!init_cb_.IsNull() || !decode_cb_.IsNull() || !reset_cb_.IsNull())
- return;
+ if (!init_cb_.IsNull())
+ SatisfyInit();
+ if (!held_decode_callbacks_.empty())
+ SatisfyDecode();
+ if (!reset_cb_.IsNull())
+ SatisfyReset();
- DoStop();
+ decoded_frames_.clear();
+ state_ = STATE_UNINITIALIZED;
}
void FakeVideoDecoder::HoldNextInit() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
init_cb_.HoldCallback();
}
-void FakeVideoDecoder::HoldNextRead() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- decode_cb_.HoldCallback();
+void FakeVideoDecoder::HoldDecode() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ hold_decode_ = true;
}
void FakeVideoDecoder::HoldNextReset() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
reset_cb_.HoldCallback();
}
-void FakeVideoDecoder::HoldNextStop() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- stop_cb_.HoldCallback();
-}
-
void FakeVideoDecoder::SatisfyInit() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(decode_cb_.IsNull());
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(held_decode_callbacks_.empty());
DCHECK(reset_cb_.IsNull());
init_cb_.RunHeldCallback();
+}
- if (!stop_cb_.IsNull())
- DoStop();
+void FakeVideoDecoder::SatisfyDecode() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(hold_decode_);
+
+ hold_decode_ = false;
+
+ while (!held_decode_callbacks_.empty()) {
+ SatisfySingleDecode();
+ }
}
-void FakeVideoDecoder::SatisfyRead() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- decode_cb_.RunHeldCallback();
+void FakeVideoDecoder::SatisfySingleDecode() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!held_decode_callbacks_.empty());
- if (!reset_cb_.IsNull())
- DoReset();
+ DecodeCB decode_cb = held_decode_callbacks_.front();
+ held_decode_callbacks_.pop_front();
+ RunDecodeCallback(decode_cb);
- if (reset_cb_.IsNull() && !stop_cb_.IsNull())
- DoStop();
+ if (!reset_cb_.IsNull() && held_decode_callbacks_.empty())
+ DoReset();
}
void FakeVideoDecoder::SatisfyReset() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(decode_cb_.IsNull());
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(held_decode_callbacks_.empty());
reset_cb_.RunHeldCallback();
+}
+
+void FakeVideoDecoder::SimulateError() {
+ DCHECK(thread_checker_.CalledOnValidThread());
- if (!stop_cb_.IsNull())
- DoStop();
+ state_ = STATE_ERROR;
+ while (!held_decode_callbacks_.empty()) {
+ held_decode_callbacks_.front().Run(kDecodeError);
+ held_decode_callbacks_.pop_front();
+ }
+ decoded_frames_.clear();
}
-void FakeVideoDecoder::SatisfyStop() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(decode_cb_.IsNull());
- DCHECK(reset_cb_.IsNull());
- stop_cb_.RunHeldCallback();
+int FakeVideoDecoder::GetMaxDecodeRequests() const {
+ return max_parallel_decoding_requests_;
}
-void FakeVideoDecoder::DoReset() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(decode_cb_.IsNull());
- DCHECK(!reset_cb_.IsNull());
+void FakeVideoDecoder::OnFrameDecoded(int buffer_size,
+ const DecodeCB& decode_cb,
+ Status status) {
+ DCHECK(thread_checker_.CalledOnValidThread());
- decoded_frames_.clear();
- reset_cb_.RunOrHold();
+ if (status == kOk)
+ total_bytes_decoded_ += buffer_size;
+ decode_cb.Run(status);
}
-void FakeVideoDecoder::DoStop() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(decode_cb_.IsNull());
- DCHECK(reset_cb_.IsNull());
- DCHECK(!stop_cb_.IsNull());
+void FakeVideoDecoder::RunOrHoldDecode(const DecodeCB& decode_cb) {
+ DCHECK(thread_checker_.CalledOnValidThread());
- state_ = UNINITIALIZED;
- decoded_frames_.clear();
- stop_cb_.RunOrHold();
+ if (hold_decode_) {
+ held_decode_callbacks_.push_back(decode_cb);
+ } else {
+ DCHECK(held_decode_callbacks_.empty());
+ RunDecodeCallback(decode_cb);
+ }
}
-void FakeVideoDecoder::OnFrameDecoded(
- int buffer_size,
- const DecodeCB& decode_cb,
- Status status,
- const scoped_refptr<VideoFrame>& video_frame) {
- if (status == kOk || status == kNotEnoughData)
- total_bytes_decoded_ += buffer_size;
- decode_cb.Run(status, video_frame);
+void FakeVideoDecoder::RunDecodeCallback(const DecodeCB& decode_cb) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (!reset_cb_.IsNull()) {
+ DCHECK(decoded_frames_.empty());
+ decode_cb.Run(kAborted);
+ return;
+ }
+
+ // Make sure we leave decoding_delay_ frames in the queue and also frames for
+ // all pending decode callbacks, except the current one.
+ if (decoded_frames_.size() >
+ decoding_delay_ + held_decode_callbacks_.size()) {
+ output_cb_.Run(decoded_frames_.front());
+ decoded_frames_.pop_front();
+ } else if (state_ == STATE_END_OF_STREAM) {
+ // Drain the queue if this was the last request in the stream, otherwise
+ // just pop the last frame from the queue.
+ if (held_decode_callbacks_.empty()) {
+ while (!decoded_frames_.empty()) {
+ output_cb_.Run(decoded_frames_.front());
+ decoded_frames_.pop_front();
+ }
+ } else if (!decoded_frames_.empty()) {
+ output_cb_.Run(decoded_frames_.front());
+ decoded_frames_.pop_front();
+ }
+ }
+
+ decode_cb.Run(kOk);
+}
+
+void FakeVideoDecoder::DoReset() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(held_decode_callbacks_.empty());
+ DCHECK(!reset_cb_.IsNull());
+
+ reset_cb_.RunOrHold();
}
} // namespace media
diff --git a/chromium/media/filters/fake_video_decoder.h b/chromium/media/filters/fake_video_decoder.h
index c1cf1a6a4cf..21cb2a1f796 100644
--- a/chromium/media/filters/fake_video_decoder.h
+++ b/chromium/media/filters/fake_video_decoder.h
@@ -11,6 +11,7 @@
#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/memory/weak_ptr.h"
+#include "base/threading/thread_checker.h"
#include "media/base/callback_holder.h"
#include "media/base/decoder_buffer.h"
#include "media/base/pipeline_status.h"
@@ -22,7 +23,7 @@
using base::ResetAndReturn;
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
@@ -30,59 +31,75 @@ namespace media {
class FakeVideoDecoder : public VideoDecoder {
public:
// Constructs an object with a decoding delay of |decoding_delay| frames.
- explicit FakeVideoDecoder(int decoding_delay);
+ FakeVideoDecoder(int decoding_delay,
+ int max_parallel_decoding_requests);
virtual ~FakeVideoDecoder();
// VideoDecoder implementation.
virtual void Initialize(const VideoDecoderConfig& config,
- const PipelineStatusCB& status_cb) OVERRIDE;
+ bool low_delay,
+ const PipelineStatusCB& status_cb,
+ const OutputCB& output_cb) OVERRIDE;
virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
- virtual void Stop(const base::Closure& closure) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual int GetMaxDecodeRequests() const OVERRIDE;
- // Holds the next init/read/reset/stop callback from firing.
+ // Holds the next init/decode/reset callback from firing.
void HoldNextInit();
- void HoldNextRead();
+ void HoldDecode();
void HoldNextReset();
- void HoldNextStop();
- // Satisfies the pending init/read/reset/stop callback, which must be ready
- // to fire when these methods are called.
+ // Satisfies the pending init/decode/reset callback, which must be ready to
+ // fire when these methods are called.
void SatisfyInit();
- void SatisfyRead();
+ void SatisfyDecode();
void SatisfyReset();
- void SatisfyStop();
+
+ // Satisfies single decode request.
+ void SatisfySingleDecode();
+
+ void SimulateError();
int total_bytes_decoded() const { return total_bytes_decoded_; }
private:
enum State {
- UNINITIALIZED,
- NORMAL
+ STATE_UNINITIALIZED,
+ STATE_NORMAL,
+ STATE_END_OF_STREAM,
+ STATE_ERROR,
};
// Callback for updating |total_bytes_decoded_|.
void OnFrameDecoded(int buffer_size,
- const DecodeCB& read_cb,
- Status status,
- const scoped_refptr<VideoFrame>& video_frame);
+ const DecodeCB& decode_cb,
+ Status status);
+
+ // Runs |decode_cb| or puts it to |held_decode_callbacks_| depending on
+ // current value of |hold_decode_|.
+ void RunOrHoldDecode(const DecodeCB& decode_cb);
+
+ // Runs |decode_cb| with a frame from |decoded_frames_|.
+ void RunDecodeCallback(const DecodeCB& decode_cb);
void DoReset();
- void DoStop();
- scoped_refptr<base::MessageLoopProxy> message_loop_;
- base::WeakPtrFactory<FakeVideoDecoder> weak_factory_;
- base::WeakPtr<FakeVideoDecoder> weak_this_;
+ base::ThreadChecker thread_checker_;
- const int decoding_delay_;
+ const size_t decoding_delay_;
+ const int max_parallel_decoding_requests_;
State state_;
CallbackHolder<PipelineStatusCB> init_cb_;
- CallbackHolder<DecodeCB> decode_cb_;
CallbackHolder<base::Closure> reset_cb_;
- CallbackHolder<base::Closure> stop_cb_;
+
+ OutputCB output_cb_;
+
+ bool hold_decode_;
+ std::list<DecodeCB> held_decode_callbacks_;
VideoDecoderConfig current_config_;
@@ -90,6 +107,9 @@ class FakeVideoDecoder : public VideoDecoder {
int total_bytes_decoded_;
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<FakeVideoDecoder> weak_factory_;
+
DISALLOW_COPY_AND_ASSIGN(FakeVideoDecoder);
};
diff --git a/chromium/media/filters/fake_video_decoder_unittest.cc b/chromium/media/filters/fake_video_decoder_unittest.cc
index 0aa3b5fc31d..2772b54ffbf 100644
--- a/chromium/media/filters/fake_video_decoder_unittest.cc
+++ b/chromium/media/filters/fake_video_decoder_unittest.cc
@@ -14,27 +14,38 @@
namespace media {
-static const int kDecodingDelay = 9;
static const int kTotalBuffers = 12;
static const int kDurationMs = 30;
-class FakeVideoDecoderTest : public testing::Test {
+struct FakeVideoDecoderTestParams {
+ FakeVideoDecoderTestParams(int decoding_delay, int max_decode_requests)
+ : decoding_delay(decoding_delay),
+ max_decode_requests(max_decode_requests) {}
+ int decoding_delay;
+ int max_decode_requests;
+};
+
+class FakeVideoDecoderTest
+ : public testing::Test,
+ public testing::WithParamInterface<FakeVideoDecoderTestParams> {
public:
FakeVideoDecoderTest()
- : decoder_(new FakeVideoDecoder(kDecodingDelay)),
+ : decoder_(new FakeVideoDecoder(GetParam().decoding_delay,
+ GetParam().max_decode_requests)),
num_input_buffers_(0),
num_decoded_frames_(0),
- decode_status_(VideoDecoder::kNotEnoughData),
- is_decode_pending_(false),
- is_reset_pending_(false),
- is_stop_pending_(false) {}
+ last_decode_status_(VideoDecoder::kOk),
+ pending_decode_requests_(0),
+ is_reset_pending_(false) {}
virtual ~FakeVideoDecoderTest() {
- StopAndExpect(OK);
+ Stop();
}
void InitializeWithConfig(const VideoDecoderConfig& config) {
- decoder_->Initialize(config, NewExpectedStatusCB(PIPELINE_OK));
+ decoder_->Initialize(
+ config, false, NewExpectedStatusCB(PIPELINE_OK),
+ base::Bind(&FakeVideoDecoderTest::FrameReady, base::Unretained(this)));
message_loop_.RunUntilIdle();
current_config_ = config;
}
@@ -53,55 +64,45 @@ class FakeVideoDecoderTest : public testing::Test {
message_loop_.RunUntilIdle();
}
- // Callback for VideoDecoder::Read().
- void FrameReady(VideoDecoder::Status status,
- const scoped_refptr<VideoFrame>& frame) {
- DCHECK(is_decode_pending_);
- ASSERT_TRUE(status == VideoDecoder::kOk ||
- status == VideoDecoder::kNotEnoughData);
- is_decode_pending_ = false;
- decode_status_ = status;
- frame_decoded_ = frame;
-
- if (frame && !frame->end_of_stream())
- num_decoded_frames_++;
+ // Callback for VideoDecoder::Decode().
+ void DecodeDone(VideoDecoder::Status status) {
+ DCHECK_GT(pending_decode_requests_, 0);
+ --pending_decode_requests_;
+ last_decode_status_ = status;
+ }
+
+ void FrameReady(const scoped_refptr<VideoFrame>& frame) {
+ DCHECK(!frame->end_of_stream());
+ last_decoded_frame_ = frame;
+ num_decoded_frames_++;
}
enum CallbackResult {
PENDING,
OK,
NOT_ENOUGH_DATA,
- ABROTED,
- EOS
+ ABORTED
};
void ExpectReadResult(CallbackResult result) {
switch (result) {
case PENDING:
- EXPECT_TRUE(is_decode_pending_);
- ASSERT_FALSE(frame_decoded_);
+ EXPECT_GT(pending_decode_requests_, 0);
break;
case OK:
- EXPECT_FALSE(is_decode_pending_);
- ASSERT_EQ(VideoDecoder::kOk, decode_status_);
- ASSERT_TRUE(frame_decoded_);
- EXPECT_FALSE(frame_decoded_->end_of_stream());
+ EXPECT_EQ(0, pending_decode_requests_);
+ ASSERT_EQ(VideoDecoder::kOk, last_decode_status_);
+ ASSERT_TRUE(last_decoded_frame_);
break;
case NOT_ENOUGH_DATA:
- EXPECT_FALSE(is_decode_pending_);
- ASSERT_EQ(VideoDecoder::kNotEnoughData, decode_status_);
- ASSERT_FALSE(frame_decoded_);
- break;
- case ABROTED:
- EXPECT_FALSE(is_decode_pending_);
- ASSERT_EQ(VideoDecoder::kOk, decode_status_);
- EXPECT_FALSE(frame_decoded_);
+ EXPECT_EQ(0, pending_decode_requests_);
+ ASSERT_EQ(VideoDecoder::kOk, last_decode_status_);
+ ASSERT_FALSE(last_decoded_frame_);
break;
- case EOS:
- EXPECT_FALSE(is_decode_pending_);
- ASSERT_EQ(VideoDecoder::kOk, decode_status_);
- ASSERT_TRUE(frame_decoded_);
- EXPECT_TRUE(frame_decoded_->end_of_stream());
+ case ABORTED:
+ EXPECT_EQ(0, pending_decode_requests_);
+ ASSERT_EQ(VideoDecoder::kAborted, last_decode_status_);
+ EXPECT_FALSE(last_decoded_frame_);
break;
}
}
@@ -114,50 +115,48 @@ class FakeVideoDecoderTest : public testing::Test {
current_config_,
base::TimeDelta::FromMilliseconds(kDurationMs * num_input_buffers_),
base::TimeDelta::FromMilliseconds(kDurationMs));
- num_input_buffers_++;
} else {
buffer = DecoderBuffer::CreateEOSBuffer();
}
- decode_status_ = VideoDecoder::kDecodeError;
- frame_decoded_ = NULL;
- is_decode_pending_ = true;
+ ++num_input_buffers_;
+ ++pending_decode_requests_;
decoder_->Decode(
buffer,
- base::Bind(&FakeVideoDecoderTest::FrameReady, base::Unretained(this)));
+ base::Bind(&FakeVideoDecoderTest::DecodeDone, base::Unretained(this)));
message_loop_.RunUntilIdle();
}
void ReadOneFrame() {
+ last_decoded_frame_ = NULL;
do {
Decode();
- } while (decode_status_ == VideoDecoder::kNotEnoughData &&
- !is_decode_pending_);
+ } while (!last_decoded_frame_ && pending_decode_requests_ == 0);
}
- void ReadUntilEOS() {
+ void ReadAllFrames() {
do {
- ReadOneFrame();
- } while (frame_decoded_ && !frame_decoded_->end_of_stream());
+ Decode();
+ } while (num_input_buffers_ <= kTotalBuffers); // All input buffers + EOS.
}
void EnterPendingReadState() {
// Pass the initial NOT_ENOUGH_DATA stage.
ReadOneFrame();
- decoder_->HoldNextRead();
+ decoder_->HoldDecode();
ReadOneFrame();
ExpectReadResult(PENDING);
}
- void SatisfyReadAndExpect(CallbackResult result) {
- decoder_->SatisfyRead();
+ void SatisfyDecodeAndExpect(CallbackResult result) {
+ decoder_->SatisfyDecode();
message_loop_.RunUntilIdle();
ExpectReadResult(result);
}
void SatisfyRead() {
- SatisfyReadAndExpect(OK);
+ SatisfyDecodeAndExpect(OK);
}
// Callback for VideoDecoder::Reset().
@@ -198,42 +197,13 @@ class FakeVideoDecoderTest : public testing::Test {
ExpectResetResult(OK);
}
- // Callback for VideoDecoder::Stop().
- void OnDecoderStopped() {
- DCHECK(is_stop_pending_);
- is_stop_pending_ = false;
- }
-
- void ExpectStopResult(CallbackResult result) {
- switch (result) {
- case PENDING:
- EXPECT_TRUE(is_stop_pending_);
- break;
- case OK:
- EXPECT_FALSE(is_stop_pending_);
- break;
- default:
- NOTREACHED();
- }
- }
-
- void StopAndExpect(CallbackResult result) {
- is_stop_pending_ = true;
- decoder_->Stop(base::Bind(&FakeVideoDecoderTest::OnDecoderStopped,
- base::Unretained(this)));
+ void Stop() {
+ decoder_->Stop();
message_loop_.RunUntilIdle();
- ExpectStopResult(result);
- }
-
- void EnterPendingStopState() {
- decoder_->HoldNextStop();
- StopAndExpect(PENDING);
- }
- void SatisfyStop() {
- decoder_->SatisfyStop();
- message_loop_.RunUntilIdle();
- ExpectStopResult(OK);
+ // All pending callbacks must have been fired.
+ DCHECK_EQ(pending_decode_requests_, 0);
+ DCHECK(!is_reset_pending_);
}
base::MessageLoop message_loop_;
@@ -245,37 +215,46 @@ class FakeVideoDecoderTest : public testing::Test {
int num_decoded_frames_;
// Callback result/status.
- VideoDecoder::Status decode_status_;
- scoped_refptr<VideoFrame> frame_decoded_;
- bool is_decode_pending_;
+ VideoDecoder::Status last_decode_status_;
+ scoped_refptr<VideoFrame> last_decoded_frame_;
+ int pending_decode_requests_;
bool is_reset_pending_;
- bool is_stop_pending_;
private:
DISALLOW_COPY_AND_ASSIGN(FakeVideoDecoderTest);
};
-TEST_F(FakeVideoDecoderTest, Initialize) {
+INSTANTIATE_TEST_CASE_P(NoParallelDecode,
+ FakeVideoDecoderTest,
+ ::testing::Values(FakeVideoDecoderTestParams(9, 1),
+ FakeVideoDecoderTestParams(0, 1)));
+INSTANTIATE_TEST_CASE_P(ParallelDecode,
+ FakeVideoDecoderTest,
+ ::testing::Values(FakeVideoDecoderTestParams(9, 3),
+ FakeVideoDecoderTestParams(0, 3)));
+
+TEST_P(FakeVideoDecoderTest, Initialize) {
Initialize();
}
-TEST_F(FakeVideoDecoderTest, Read_AllFrames) {
+TEST_P(FakeVideoDecoderTest, Read_AllFrames) {
Initialize();
- ReadUntilEOS();
+ ReadAllFrames();
EXPECT_EQ(kTotalBuffers, num_decoded_frames_);
}
-TEST_F(FakeVideoDecoderTest, Read_DecodingDelay) {
+TEST_P(FakeVideoDecoderTest, Read_DecodingDelay) {
Initialize();
while (num_input_buffers_ < kTotalBuffers) {
ReadOneFrame();
- EXPECT_EQ(num_input_buffers_, num_decoded_frames_ + kDecodingDelay);
+ EXPECT_EQ(num_input_buffers_,
+ num_decoded_frames_ + GetParam().decoding_delay);
}
}
-TEST_F(FakeVideoDecoderTest, Read_ZeroDelay) {
- decoder_.reset(new FakeVideoDecoder(0));
+TEST_P(FakeVideoDecoderTest, Read_ZeroDelay) {
+ decoder_.reset(new FakeVideoDecoder(0, 1));
Initialize();
while (num_input_buffers_ < kTotalBuffers) {
@@ -284,22 +263,62 @@ TEST_F(FakeVideoDecoderTest, Read_ZeroDelay) {
}
}
-TEST_F(FakeVideoDecoderTest, Read_Pending_NotEnoughData) {
+TEST_P(FakeVideoDecoderTest, Read_Pending_NotEnoughData) {
+ if (GetParam().decoding_delay < 1)
+ return;
+
Initialize();
- decoder_->HoldNextRead();
+ decoder_->HoldDecode();
ReadOneFrame();
ExpectReadResult(PENDING);
- SatisfyReadAndExpect(NOT_ENOUGH_DATA);
+ SatisfyDecodeAndExpect(NOT_ENOUGH_DATA);
+
+ // Verify that FrameReady() hasn't been called.
+ EXPECT_FALSE(last_decoded_frame_);
}
-TEST_F(FakeVideoDecoderTest, Read_Pending_OK) {
+TEST_P(FakeVideoDecoderTest, Read_Pending_OK) {
Initialize();
- ReadOneFrame();
EnterPendingReadState();
- SatisfyReadAndExpect(OK);
+ SatisfyDecodeAndExpect(OK);
+}
+
+TEST_P(FakeVideoDecoderTest, Read_Parallel) {
+ if (GetParam().max_decode_requests < 2)
+ return;
+
+ Initialize();
+ decoder_->HoldDecode();
+ for (int i = 0; i < GetParam().max_decode_requests; ++i) {
+ ReadOneFrame();
+ ExpectReadResult(PENDING);
+ }
+ EXPECT_EQ(GetParam().max_decode_requests, pending_decode_requests_);
+ SatisfyDecodeAndExpect(
+ GetParam().max_decode_requests > GetParam().decoding_delay
+ ? OK
+ : NOT_ENOUGH_DATA);
+}
+
+TEST_P(FakeVideoDecoderTest, ReadWithHold_DecodingDelay) {
+ Initialize();
+
+ // Hold all decodes and satisfy one decode at a time.
+ decoder_->HoldDecode();
+ int num_decodes_satisfied = 0;
+ while (num_decoded_frames_ == 0) {
+ while (pending_decode_requests_ < decoder_->GetMaxDecodeRequests())
+ Decode();
+ decoder_->SatisfySingleDecode();
+ ++num_decodes_satisfied;
+ message_loop_.RunUntilIdle();
+ }
+
+ DCHECK_EQ(num_decoded_frames_, 1);
+ DCHECK_EQ(num_decodes_satisfied, GetParam().decoding_delay + 1);
}
-TEST_F(FakeVideoDecoderTest, Reinitialize) {
+TEST_P(FakeVideoDecoderTest, Reinitialize) {
Initialize();
ReadOneFrame();
InitializeWithConfig(TestVideoConfig::Large());
@@ -308,114 +327,73 @@ TEST_F(FakeVideoDecoderTest, Reinitialize) {
// Reinitializing the decoder during the middle of the decoding process can
// cause dropped frames.
-TEST_F(FakeVideoDecoderTest, Reinitialize_FrameDropped) {
+TEST_P(FakeVideoDecoderTest, Reinitialize_FrameDropped) {
+ if (GetParam().decoding_delay < 1)
+ return;
+
Initialize();
ReadOneFrame();
Initialize();
- ReadUntilEOS();
+ ReadAllFrames();
EXPECT_LT(num_decoded_frames_, kTotalBuffers);
}
-TEST_F(FakeVideoDecoderTest, Reset) {
+TEST_P(FakeVideoDecoderTest, Reset) {
Initialize();
ReadOneFrame();
ResetAndExpect(OK);
}
-TEST_F(FakeVideoDecoderTest, Reset_DuringPendingRead) {
+TEST_P(FakeVideoDecoderTest, Reset_DuringPendingRead) {
Initialize();
EnterPendingReadState();
ResetAndExpect(PENDING);
- SatisfyRead();
+ SatisfyDecodeAndExpect(ABORTED);
}
-TEST_F(FakeVideoDecoderTest, Reset_Pending) {
+TEST_P(FakeVideoDecoderTest, Reset_Pending) {
Initialize();
EnterPendingResetState();
SatisfyReset();
}
-TEST_F(FakeVideoDecoderTest, Reset_PendingDuringPendingRead) {
+TEST_P(FakeVideoDecoderTest, Reset_PendingDuringPendingRead) {
Initialize();
EnterPendingReadState();
EnterPendingResetState();
- SatisfyRead();
+ SatisfyDecodeAndExpect(ABORTED);
SatisfyReset();
}
-TEST_F(FakeVideoDecoderTest, Stop) {
+TEST_P(FakeVideoDecoderTest, Stop) {
Initialize();
ReadOneFrame();
ExpectReadResult(OK);
- StopAndExpect(OK);
+ Stop();
}
-TEST_F(FakeVideoDecoderTest, Stop_DuringPendingInitialization) {
+TEST_P(FakeVideoDecoderTest, Stop_DuringPendingInitialization) {
EnterPendingInitState();
- EnterPendingStopState();
- SatisfyInit();
- SatisfyStop();
-}
-
-TEST_F(FakeVideoDecoderTest, Stop_DuringPendingRead) {
- Initialize();
- EnterPendingReadState();
- StopAndExpect(PENDING);
- SatisfyRead();
- ExpectStopResult(OK);
+ Stop();
}
-TEST_F(FakeVideoDecoderTest, Stop_DuringPendingReset) {
- Initialize();
- EnterPendingResetState();
- StopAndExpect(PENDING);
- SatisfyReset();
- ExpectStopResult(OK);
-}
-
-TEST_F(FakeVideoDecoderTest, Stop_DuringPendingReadAndPendingReset) {
- Initialize();
- EnterPendingReadState();
- EnterPendingResetState();
- StopAndExpect(PENDING);
- SatisfyRead();
- SatisfyReset();
- ExpectStopResult(OK);
-}
-
-TEST_F(FakeVideoDecoderTest, Stop_Pending) {
- Initialize();
- decoder_->HoldNextStop();
- StopAndExpect(PENDING);
- decoder_->SatisfyStop();
- message_loop_.RunUntilIdle();
- ExpectStopResult(OK);
-}
-
-TEST_F(FakeVideoDecoderTest, Stop_PendingDuringPendingRead) {
+TEST_P(FakeVideoDecoderTest, Stop_DuringPendingRead) {
Initialize();
EnterPendingReadState();
- EnterPendingStopState();
- SatisfyRead();
- SatisfyStop();
+ Stop();
}
-TEST_F(FakeVideoDecoderTest, Stop_PendingDuringPendingReset) {
+TEST_P(FakeVideoDecoderTest, Stop_DuringPendingReset) {
Initialize();
EnterPendingResetState();
- EnterPendingStopState();
- SatisfyReset();
- SatisfyStop();
+ Stop();
}
-TEST_F(FakeVideoDecoderTest, Stop_PendingDuringPendingReadAndPendingReset) {
+TEST_P(FakeVideoDecoderTest, Stop_DuringPendingReadAndPendingReset) {
Initialize();
EnterPendingReadState();
EnterPendingResetState();
- EnterPendingStopState();
- SatisfyRead();
- SatisfyReset();
- SatisfyStop();
+ Stop();
}
} // namespace media
diff --git a/chromium/media/filters/ffmpeg_audio_decoder.cc b/chromium/media/filters/ffmpeg_audio_decoder.cc
index 00f7566789b..eec3bdf90e7 100644
--- a/chromium/media/filters/ffmpeg_audio_decoder.cc
+++ b/chromium/media/filters/ffmpeg_audio_decoder.cc
@@ -4,31 +4,21 @@
#include "media/filters/ffmpeg_audio_decoder.h"
-#include "base/bind.h"
#include "base/callback_helpers.h"
-#include "base/location.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
#include "media/base/audio_buffer.h"
#include "media/base/audio_bus.h"
#include "media/base/audio_decoder_config.h"
-#include "media/base/audio_timestamp_helper.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/audio_discard_helper.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
-#include "media/base/demuxer.h"
#include "media/base/limits.h"
-#include "media/base/pipeline.h"
#include "media/base/sample_format.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/filters/ffmpeg_glue.h"
namespace media {
-// Helper structure for managing multiple decoded audio frames per packet.
-struct QueuedAudioBuffer {
- AudioDecoder::Status status;
- scoped_refptr<AudioBuffer> buffer;
-};
-
// Returns true if the decode result was end of stream.
static inline bool IsEndOfStream(int result,
int decoded_size,
@@ -50,18 +40,6 @@ static inline int DetermineChannels(AVFrame* frame) {
#endif
}
-// Called by FFmpeg's allocation routine to allocate a buffer. Uses
-// AVCodecContext.opaque to get the object reference in order to call
-// GetAudioBuffer() to do the actual allocation.
-static int GetAudioBufferImpl(struct AVCodecContext* s,
- AVFrame* frame,
- int flags) {
- DCHECK(s->codec->capabilities & CODEC_CAP_DR1);
- DCHECK_EQ(s->codec_type, AVMEDIA_TYPE_AUDIO);
- FFmpegAudioDecoder* decoder = static_cast<FFmpegAudioDecoder*>(s->opaque);
- return decoder->GetAudioBuffer(s, frame, flags);
-}
-
// Called by FFmpeg's allocation routine to free a buffer. |opaque| is the
// AudioBuffer allocated, so unref it.
static void ReleaseAudioBufferImpl(void* opaque, uint8* data) {
@@ -69,108 +47,20 @@ static void ReleaseAudioBufferImpl(void* opaque, uint8* data) {
buffer.swap(reinterpret_cast<AudioBuffer**>(&opaque));
}
-FFmpegAudioDecoder::FFmpegAudioDecoder(
- const scoped_refptr<base::MessageLoopProxy>& message_loop)
- : message_loop_(message_loop),
- weak_factory_(this),
- demuxer_stream_(NULL),
- bytes_per_channel_(0),
- channel_layout_(CHANNEL_LAYOUT_NONE),
- channels_(0),
- samples_per_second_(0),
- av_sample_format_(0),
- last_input_timestamp_(kNoTimestamp()),
- output_frames_to_drop_(0) {
-}
-
-void FFmpegAudioDecoder::Initialize(
- DemuxerStream* stream,
- const PipelineStatusCB& status_cb,
- const StatisticsCB& statistics_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- PipelineStatusCB initialize_cb = BindToCurrentLoop(status_cb);
-
- FFmpegGlue::InitializeFFmpeg();
-
- if (demuxer_stream_) {
- // TODO(scherkus): initialization currently happens more than once in
- // PipelineIntegrationTest.BasicPlayback.
- LOG(ERROR) << "Initialize has already been called.";
- CHECK(false);
- }
-
- weak_this_ = weak_factory_.GetWeakPtr();
- demuxer_stream_ = stream;
-
- if (!ConfigureDecoder()) {
- status_cb.Run(DECODER_ERROR_NOT_SUPPORTED);
- return;
- }
-
- statistics_cb_ = statistics_cb;
- initialize_cb.Run(PIPELINE_OK);
-}
-
-void FFmpegAudioDecoder::Read(const ReadCB& read_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(!read_cb.is_null());
- CHECK(read_cb_.is_null()) << "Overlapping decodes are not supported.";
-
- read_cb_ = BindToCurrentLoop(read_cb);
-
- // If we don't have any queued audio from the last packet we decoded, ask for
- // more data from the demuxer to satisfy this read.
- if (queued_audio_.empty()) {
- ReadFromDemuxerStream();
- return;
- }
-
- base::ResetAndReturn(&read_cb_).Run(
- queued_audio_.front().status, queued_audio_.front().buffer);
- queued_audio_.pop_front();
-}
-
-int FFmpegAudioDecoder::bits_per_channel() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- return bytes_per_channel_ * 8;
-}
-
-ChannelLayout FFmpegAudioDecoder::channel_layout() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- return channel_layout_;
-}
-
-int FFmpegAudioDecoder::samples_per_second() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- return samples_per_second_;
-}
-
-void FFmpegAudioDecoder::Reset(const base::Closure& closure) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- base::Closure reset_cb = BindToCurrentLoop(closure);
-
- avcodec_flush_buffers(codec_context_.get());
- ResetTimestampState();
- queued_audio_.clear();
- reset_cb.Run();
-}
-
-FFmpegAudioDecoder::~FFmpegAudioDecoder() {
- // TODO(scherkus): should we require Stop() to be called? this might end up
- // getting called on a random thread due to refcounting.
- ReleaseFFmpegResources();
-}
+// Called by FFmpeg's allocation routine to allocate a buffer. Uses
+// AVCodecContext.opaque to get the object reference in order to call
+// GetAudioBuffer() to do the actual allocation.
+static int GetAudioBuffer(struct AVCodecContext* s, AVFrame* frame, int flags) {
+ DCHECK(s->codec->capabilities & CODEC_CAP_DR1);
+ DCHECK_EQ(s->codec_type, AVMEDIA_TYPE_AUDIO);
-int FFmpegAudioDecoder::GetAudioBuffer(AVCodecContext* codec,
- AVFrame* frame,
- int flags) {
// Since this routine is called by FFmpeg when a buffer is required for audio
// data, use the values supplied by FFmpeg (ignoring the current settings).
- // RunDecodeLoop() gets to determine if the buffer is useable or not.
+ // FFmpegDecode() gets to determine if the buffer is useable or not.
AVSampleFormat format = static_cast<AVSampleFormat>(frame->format);
SampleFormat sample_format = AVSampleFormatToSampleFormat(format);
int channels = DetermineChannels(frame);
- if ((channels <= 0) || (channels >= limits::kMaxChannels)) {
+ if (channels <= 0 || channels >= limits::kMaxChannels) {
DLOG(ERROR) << "Requested number of channels (" << channels
<< ") exceeds limit.";
return AVERROR(EINVAL);
@@ -180,6 +70,11 @@ int FFmpegAudioDecoder::GetAudioBuffer(AVCodecContext* codec,
if (frame->nb_samples <= 0)
return AVERROR(EINVAL);
+ if (s->channels != channels) {
+ DLOG(ERROR) << "AVCodecContext and AVFrame disagree on channel count.";
+ return AVERROR(EINVAL);
+ }
+
// Determine how big the buffer should be and allocate it. FFmpeg may adjust
// how big each channel data is in order to meet the alignment policy, so
// we need to take this into consideration.
@@ -194,8 +89,12 @@ int FFmpegAudioDecoder::GetAudioBuffer(AVCodecContext* codec,
return buffer_size_in_bytes;
int frames_required = buffer_size_in_bytes / bytes_per_channel / channels;
DCHECK_GE(frames_required, frame->nb_samples);
- scoped_refptr<AudioBuffer> buffer =
- AudioBuffer::CreateBuffer(sample_format, channels, frames_required);
+ scoped_refptr<AudioBuffer> buffer = AudioBuffer::CreateBuffer(
+ sample_format,
+ ChannelLayoutToChromeChannelLayout(s->channel_layout, s->channels),
+ channels,
+ s->sample_rate,
+ frames_required);
// Initialize the data[] and extended_data[] fields to point into the memory
// allocated for AudioBuffer. |number_of_planes| will be 1 for interleaved
@@ -226,197 +125,142 @@ int FFmpegAudioDecoder::GetAudioBuffer(AVCodecContext* codec,
return 0;
}
-void FFmpegAudioDecoder::ReadFromDemuxerStream() {
- DCHECK(!read_cb_.is_null());
- demuxer_stream_->Read(base::Bind(
- &FFmpegAudioDecoder::BufferReady, weak_this_));
+FFmpegAudioDecoder::FFmpegAudioDecoder(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const LogCB& log_cb)
+ : task_runner_(task_runner),
+ state_(kUninitialized),
+ av_sample_format_(0),
+ log_cb_(log_cb) {
}
-void FFmpegAudioDecoder::BufferReady(
- DemuxerStream::Status status,
- const scoped_refptr<DecoderBuffer>& input) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(!read_cb_.is_null());
- DCHECK(queued_audio_.empty());
- DCHECK_EQ(status != DemuxerStream::kOk, !input.get()) << status;
-
- if (status == DemuxerStream::kAborted) {
- DCHECK(!input.get());
- base::ResetAndReturn(&read_cb_).Run(kAborted, NULL);
- return;
- }
-
- if (status == DemuxerStream::kConfigChanged) {
- DCHECK(!input.get());
-
- // Send a "end of stream" buffer to the decode loop
- // to output any remaining data still in the decoder.
- RunDecodeLoop(DecoderBuffer::CreateEOSBuffer(), true);
-
- DVLOG(1) << "Config changed.";
+FFmpegAudioDecoder::~FFmpegAudioDecoder() {
+ DCHECK_EQ(state_, kUninitialized);
+ DCHECK(!codec_context_);
+ DCHECK(!av_frame_);
+}
- if (!ConfigureDecoder()) {
- base::ResetAndReturn(&read_cb_).Run(kDecodeError, NULL);
- return;
- }
+void FFmpegAudioDecoder::Initialize(const AudioDecoderConfig& config,
+ const PipelineStatusCB& status_cb,
+ const OutputCB& output_cb) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(!config.is_encrypted());
- ResetTimestampState();
+ FFmpegGlue::InitializeFFmpeg();
- if (queued_audio_.empty()) {
- ReadFromDemuxerStream();
- return;
- }
+ config_ = config;
+ PipelineStatusCB initialize_cb = BindToCurrentLoop(status_cb);
- base::ResetAndReturn(&read_cb_).Run(
- queued_audio_.front().status, queued_audio_.front().buffer);
- queued_audio_.pop_front();
+ if (!config.IsValidConfig() || !ConfigureDecoder()) {
+ initialize_cb.Run(DECODER_ERROR_NOT_SUPPORTED);
return;
}
- DCHECK_EQ(status, DemuxerStream::kOk);
- DCHECK(input.get());
-
- // Make sure we are notified if http://crbug.com/49709 returns. Issue also
- // occurs with some damaged files.
- if (!input->end_of_stream() && input->timestamp() == kNoTimestamp() &&
- output_timestamp_helper_->base_timestamp() == kNoTimestamp()) {
- DVLOG(1) << "Received a buffer without timestamps!";
- base::ResetAndReturn(&read_cb_).Run(kDecodeError, NULL);
- return;
- }
+ // Success!
+ output_cb_ = BindToCurrentLoop(output_cb);
+ state_ = kNormal;
+ initialize_cb.Run(PIPELINE_OK);
+}
- if (!input->end_of_stream()) {
- if (last_input_timestamp_ == kNoTimestamp() &&
- codec_context_->codec_id == AV_CODEC_ID_VORBIS &&
- input->timestamp() < base::TimeDelta()) {
- // Dropping frames for negative timestamps as outlined in section A.2
- // in the Vorbis spec. http://xiph.org/vorbis/doc/Vorbis_I_spec.html
- output_frames_to_drop_ = floor(
- 0.5 + -input->timestamp().InSecondsF() * samples_per_second_);
- } else {
- if (last_input_timestamp_ != kNoTimestamp() &&
- input->timestamp() < last_input_timestamp_) {
- const base::TimeDelta diff = input->timestamp() - last_input_timestamp_;
- DLOG(WARNING)
- << "Input timestamps are not monotonically increasing! "
- << " ts " << input->timestamp().InMicroseconds() << " us"
- << " diff " << diff.InMicroseconds() << " us";
- }
+void FFmpegAudioDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
+ const DecodeCB& decode_cb) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(!decode_cb.is_null());
+ CHECK_NE(state_, kUninitialized);
+ DecodeCB decode_cb_bound = BindToCurrentLoop(decode_cb);
- last_input_timestamp_ = input->timestamp();
- }
+ if (state_ == kError) {
+ decode_cb_bound.Run(kDecodeError);
+ return;
}
- RunDecodeLoop(input, false);
-
- // We exhausted the provided packet, but it wasn't enough for a frame. Ask
- // for more data in order to fulfill this read.
- if (queued_audio_.empty()) {
- ReadFromDemuxerStream();
+ // Do nothing if decoding has finished.
+ if (state_ == kDecodeFinished) {
+ decode_cb_bound.Run(kOk);
return;
}
- // Execute callback to return the first frame we decoded.
- base::ResetAndReturn(&read_cb_).Run(
- queued_audio_.front().status, queued_audio_.front().buffer);
- queued_audio_.pop_front();
+ DecodeBuffer(buffer, decode_cb_bound);
}
-bool FFmpegAudioDecoder::ConfigureDecoder() {
- const AudioDecoderConfig& config = demuxer_stream_->audio_decoder_config();
+void FFmpegAudioDecoder::Reset(const base::Closure& closure) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
- if (!config.IsValidConfig()) {
- DLOG(ERROR) << "Invalid audio stream -"
- << " codec: " << config.codec()
- << " channel layout: " << config.channel_layout()
- << " bits per channel: " << config.bits_per_channel()
- << " samples per second: " << config.samples_per_second();
- return false;
- }
+ avcodec_flush_buffers(codec_context_.get());
+ state_ = kNormal;
+ ResetTimestampState();
+ task_runner_->PostTask(FROM_HERE, closure);
+}
- if (config.is_encrypted()) {
- DLOG(ERROR) << "Encrypted audio stream not supported";
- return false;
- }
+void FFmpegAudioDecoder::Stop() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
- if (codec_context_.get() &&
- (bytes_per_channel_ != config.bytes_per_channel() ||
- channel_layout_ != config.channel_layout() ||
- samples_per_second_ != config.samples_per_second())) {
- DVLOG(1) << "Unsupported config change :";
- DVLOG(1) << "\tbytes_per_channel : " << bytes_per_channel_
- << " -> " << config.bytes_per_channel();
- DVLOG(1) << "\tchannel_layout : " << channel_layout_
- << " -> " << config.channel_layout();
- DVLOG(1) << "\tsample_rate : " << samples_per_second_
- << " -> " << config.samples_per_second();
- return false;
- }
+ if (state_ == kUninitialized)
+ return;
- // Release existing decoder resources if necessary.
ReleaseFFmpegResources();
+ ResetTimestampState();
+ state_ = kUninitialized;
+}
- // Initialize AVCodecContext structure.
- codec_context_.reset(avcodec_alloc_context3(NULL));
- AudioDecoderConfigToAVCodecContext(config, codec_context_.get());
+void FFmpegAudioDecoder::DecodeBuffer(
+ const scoped_refptr<DecoderBuffer>& buffer,
+ const DecodeCB& decode_cb) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_NE(state_, kUninitialized);
+ DCHECK_NE(state_, kDecodeFinished);
+ DCHECK_NE(state_, kError);
- codec_context_->opaque = this;
- codec_context_->get_buffer2 = GetAudioBufferImpl;
- codec_context_->refcounted_frames = 1;
+ DCHECK(buffer);
- AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
- if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) {
- DLOG(ERROR) << "Could not initialize audio decoder: "
- << codec_context_->codec_id;
- return false;
+ // Make sure we are notified if http://crbug.com/49709 returns. Issue also
+ // occurs with some damaged files.
+ if (!buffer->end_of_stream() && buffer->timestamp() == kNoTimestamp()) {
+ DVLOG(1) << "Received a buffer without timestamps!";
+ decode_cb.Run(kDecodeError);
+ return;
}
- // Success!
- av_frame_.reset(av_frame_alloc());
- channel_layout_ = config.channel_layout();
- samples_per_second_ = config.samples_per_second();
- output_timestamp_helper_.reset(
- new AudioTimestampHelper(config.samples_per_second()));
-
- // Store initial values to guard against midstream configuration changes.
- channels_ = codec_context_->channels;
- if (channels_ != ChannelLayoutToChannelCount(channel_layout_)) {
- DLOG(ERROR) << "Audio configuration specified "
- << ChannelLayoutToChannelCount(channel_layout_)
- << " channels, but FFmpeg thinks the file contains "
- << channels_ << " channels";
- return false;
+ if (!buffer->end_of_stream() && !discard_helper_->initialized() &&
+ codec_context_->codec_id == AV_CODEC_ID_VORBIS &&
+ buffer->timestamp() < base::TimeDelta()) {
+ // Dropping frames for negative timestamps as outlined in section A.2
+ // in the Vorbis spec. http://xiph.org/vorbis/doc/Vorbis_I_spec.html
+ const int discard_frames =
+ discard_helper_->TimeDeltaToFrames(-buffer->timestamp());
+ discard_helper_->Reset(discard_frames);
}
- av_sample_format_ = codec_context_->sample_fmt;
- sample_format_ = AVSampleFormatToSampleFormat(
- static_cast<AVSampleFormat>(av_sample_format_));
- bytes_per_channel_ = SampleFormatToBytesPerChannel(sample_format_);
- return true;
-}
+ bool has_produced_frame;
+ do {
+ has_produced_frame = false;
+ if (!FFmpegDecode(buffer, &has_produced_frame)) {
+ state_ = kError;
+ decode_cb.Run(kDecodeError);
+ return;
+ }
+ // Repeat to flush the decoder after receiving EOS buffer.
+ } while (buffer->end_of_stream() && has_produced_frame);
-void FFmpegAudioDecoder::ReleaseFFmpegResources() {
- codec_context_.reset();
- av_frame_.reset();
-}
+ if (buffer->end_of_stream())
+ state_ = kDecodeFinished;
-void FFmpegAudioDecoder::ResetTimestampState() {
- output_timestamp_helper_->SetBaseTimestamp(kNoTimestamp());
- last_input_timestamp_ = kNoTimestamp();
- output_frames_to_drop_ = 0;
+ decode_cb.Run(kOk);
}
-void FFmpegAudioDecoder::RunDecodeLoop(
- const scoped_refptr<DecoderBuffer>& input,
- bool skip_eos_append) {
+bool FFmpegAudioDecoder::FFmpegDecode(
+ const scoped_refptr<DecoderBuffer>& buffer,
+ bool* has_produced_frame) {
+ DCHECK(!*has_produced_frame);
+
AVPacket packet;
av_init_packet(&packet);
- if (input->end_of_stream()) {
+ if (buffer->end_of_stream()) {
packet.data = NULL;
packet.size = 0;
} else {
- packet.data = const_cast<uint8*>(input->data());
- packet.size = input->data_size();
+ packet.data = const_cast<uint8*>(buffer->data());
+ packet.size = buffer->data_size();
}
// Each audio packet may contain several frames, so we must call the decoder
@@ -425,20 +269,20 @@ void FFmpegAudioDecoder::RunDecodeLoop(
// skipping end of stream packets since they have a size of zero.
do {
int frame_decoded = 0;
- int result = avcodec_decode_audio4(
+ const int result = avcodec_decode_audio4(
codec_context_.get(), av_frame_.get(), &frame_decoded, &packet);
if (result < 0) {
- DCHECK(!input->end_of_stream())
+ DCHECK(!buffer->end_of_stream())
<< "End of stream buffer produced an error! "
<< "This is quite possibly a bug in the audio decoder not handling "
<< "end of stream AVPackets correctly.";
- DLOG(WARNING)
- << "Failed to decode an audio frame with timestamp: "
- << input->timestamp().InMicroseconds() << " us, duration: "
- << input->duration().InMicroseconds() << " us, packet size: "
- << input->data_size() << " bytes";
+ MEDIA_LOG(log_cb_)
+ << "Dropping audio frame which failed decode with timestamp: "
+ << buffer->timestamp().InMicroseconds() << " us, duration: "
+ << buffer->duration().InMicroseconds() << " us, packet size: "
+ << buffer->data_size() << " bytes";
break;
}
@@ -448,94 +292,121 @@ void FFmpegAudioDecoder::RunDecodeLoop(
packet.size -= result;
packet.data += result;
- if (output_timestamp_helper_->base_timestamp() == kNoTimestamp() &&
- !input->end_of_stream()) {
- DCHECK(input->timestamp() != kNoTimestamp());
- if (output_frames_to_drop_ > 0) {
- // Currently Vorbis is the only codec that causes us to drop samples.
- // If we have to drop samples it always means the timeline starts at 0.
- DCHECK_EQ(codec_context_->codec_id, AV_CODEC_ID_VORBIS);
- output_timestamp_helper_->SetBaseTimestamp(base::TimeDelta());
- } else {
- output_timestamp_helper_->SetBaseTimestamp(input->timestamp());
- }
- }
-
scoped_refptr<AudioBuffer> output;
- int decoded_frames = 0;
- int original_frames = 0;
- int channels = DetermineChannels(av_frame_.get());
+ const int channels = DetermineChannels(av_frame_.get());
if (frame_decoded) {
- if (av_frame_->sample_rate != samples_per_second_ ||
- channels != channels_ ||
+ if (av_frame_->sample_rate != config_.samples_per_second() ||
+ channels != ChannelLayoutToChannelCount(config_.channel_layout()) ||
av_frame_->format != av_sample_format_) {
DLOG(ERROR) << "Unsupported midstream configuration change!"
<< " Sample Rate: " << av_frame_->sample_rate << " vs "
- << samples_per_second_
+ << config_.samples_per_second()
<< ", Channels: " << channels << " vs "
- << channels_
+ << ChannelLayoutToChannelCount(config_.channel_layout())
<< ", Sample Format: " << av_frame_->format << " vs "
<< av_sample_format_;
+ if (config_.codec() == kCodecAAC &&
+ av_frame_->sample_rate == 2 * config_.samples_per_second()) {
+ MEDIA_LOG(log_cb_) << "Implicit HE-AAC signalling is being used."
+ << " Please use mp4a.40.5 instead of mp4a.40.2 in"
+ << " the mimetype.";
+ }
// This is an unrecoverable error, so bail out.
- QueuedAudioBuffer queue_entry = { kDecodeError, NULL };
- queued_audio_.push_back(queue_entry);
av_frame_unref(av_frame_.get());
- break;
+ return false;
}
// Get the AudioBuffer that the data was decoded into. Adjust the number
// of frames, in case fewer than requested were actually decoded.
output = reinterpret_cast<AudioBuffer*>(
av_buffer_get_opaque(av_frame_->buf[0]));
- DCHECK_EQ(channels_, output->channel_count());
- original_frames = av_frame_->nb_samples;
- int unread_frames = output->frame_count() - original_frames;
+
+ DCHECK_EQ(ChannelLayoutToChannelCount(config_.channel_layout()),
+ output->channel_count());
+ const int unread_frames = output->frame_count() - av_frame_->nb_samples;
DCHECK_GE(unread_frames, 0);
if (unread_frames > 0)
output->TrimEnd(unread_frames);
-
- // If there are frames to drop, get rid of as many as we can.
- if (output_frames_to_drop_ > 0) {
- int drop = std::min(output->frame_count(), output_frames_to_drop_);
- output->TrimStart(drop);
- output_frames_to_drop_ -= drop;
- }
-
- decoded_frames = output->frame_count();
av_frame_unref(av_frame_.get());
}
// WARNING: |av_frame_| no longer has valid data at this point.
-
- if (decoded_frames > 0) {
- // Set the timestamp/duration once all the extra frames have been
- // discarded.
- output->set_timestamp(output_timestamp_helper_->GetTimestamp());
- output->set_duration(
- output_timestamp_helper_->GetFrameDuration(decoded_frames));
- output_timestamp_helper_->AddFrames(decoded_frames);
- } else if (IsEndOfStream(result, original_frames, input) &&
- !skip_eos_append) {
+ const int decoded_frames = frame_decoded ? output->frame_count() : 0;
+ if (IsEndOfStream(result, decoded_frames, buffer)) {
DCHECK_EQ(packet.size, 0);
- output = AudioBuffer::CreateEOSBuffer();
- } else {
- // In case all the frames in the buffer were dropped.
- output = NULL;
+ } else if (discard_helper_->ProcessBuffers(buffer, output)) {
+ *has_produced_frame = true;
+ output_cb_.Run(output);
}
+ } while (packet.size > 0);
- if (output.get()) {
- QueuedAudioBuffer queue_entry = { kOk, output };
- queued_audio_.push_back(queue_entry);
- }
+ return true;
+}
- // Decoding finished successfully, update statistics.
- if (result > 0) {
- PipelineStatistics statistics;
- statistics.audio_bytes_decoded = result;
- statistics_cb_.Run(statistics);
- }
- } while (packet.size > 0);
+void FFmpegAudioDecoder::ReleaseFFmpegResources() {
+ codec_context_.reset();
+ av_frame_.reset();
+}
+
+bool FFmpegAudioDecoder::ConfigureDecoder() {
+ if (!config_.IsValidConfig()) {
+ DLOG(ERROR) << "Invalid audio stream -"
+ << " codec: " << config_.codec()
+ << " channel layout: " << config_.channel_layout()
+ << " bits per channel: " << config_.bits_per_channel()
+ << " samples per second: " << config_.samples_per_second();
+ return false;
+ }
+
+ if (config_.is_encrypted()) {
+ DLOG(ERROR) << "Encrypted audio stream not supported";
+ return false;
+ }
+
+ // Release existing decoder resources if necessary.
+ ReleaseFFmpegResources();
+
+ // Initialize AVCodecContext structure.
+ codec_context_.reset(avcodec_alloc_context3(NULL));
+ AudioDecoderConfigToAVCodecContext(config_, codec_context_.get());
+
+ codec_context_->opaque = this;
+ codec_context_->get_buffer2 = GetAudioBuffer;
+ codec_context_->refcounted_frames = 1;
+
+ AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
+ if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) {
+ DLOG(ERROR) << "Could not initialize audio decoder: "
+ << codec_context_->codec_id;
+ ReleaseFFmpegResources();
+ state_ = kUninitialized;
+ return false;
+ }
+
+ // Success!
+ av_frame_.reset(av_frame_alloc());
+ discard_helper_.reset(new AudioDiscardHelper(config_.samples_per_second(),
+ config_.codec_delay()));
+ av_sample_format_ = codec_context_->sample_fmt;
+
+ if (codec_context_->channels !=
+ ChannelLayoutToChannelCount(config_.channel_layout())) {
+ DLOG(ERROR) << "Audio configuration specified "
+ << ChannelLayoutToChannelCount(config_.channel_layout())
+ << " channels, but FFmpeg thinks the file contains "
+ << codec_context_->channels << " channels";
+ ReleaseFFmpegResources();
+ state_ = kUninitialized;
+ return false;
+ }
+
+ ResetTimestampState();
+ return true;
+}
+
+void FFmpegAudioDecoder::ResetTimestampState() {
+ discard_helper_->Reset(config_.codec_delay());
}
} // namespace media
diff --git a/chromium/media/filters/ffmpeg_audio_decoder.h b/chromium/media/filters/ffmpeg_audio_decoder.h
index 40103b8d751..39a408973dc 100644
--- a/chromium/media/filters/ffmpeg_audio_decoder.h
+++ b/chromium/media/filters/ffmpeg_audio_decoder.h
@@ -9,93 +9,105 @@
#include "base/callback.h"
#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
#include "base/time/time.h"
#include "media/base/audio_decoder.h"
#include "media/base/demuxer_stream.h"
+#include "media/base/media_log.h"
#include "media/base/sample_format.h"
+#include "media/ffmpeg/ffmpeg_deleters.h"
struct AVCodecContext;
struct AVFrame;
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
-class AudioTimestampHelper;
+class AudioDiscardHelper;
class DecoderBuffer;
-struct QueuedAudioBuffer;
-class ScopedPtrAVFreeContext;
-class ScopedPtrAVFreeFrame;
class MEDIA_EXPORT FFmpegAudioDecoder : public AudioDecoder {
public:
- explicit FFmpegAudioDecoder(
- const scoped_refptr<base::MessageLoopProxy>& message_loop);
+ FFmpegAudioDecoder(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const LogCB& log_cb);
virtual ~FFmpegAudioDecoder();
// AudioDecoder implementation.
- virtual void Initialize(DemuxerStream* stream,
+ virtual void Initialize(const AudioDecoderConfig& config,
const PipelineStatusCB& status_cb,
- const StatisticsCB& statistics_cb) OVERRIDE;
- virtual void Read(const ReadCB& read_cb) OVERRIDE;
- virtual int bits_per_channel() OVERRIDE;
- virtual ChannelLayout channel_layout() OVERRIDE;
- virtual int samples_per_second() OVERRIDE;
+ const OutputCB& output_cb) OVERRIDE;
+ virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
+ const DecodeCB& decode_cb) OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
-
- // Callback called from within FFmpeg to allocate a buffer based on
- // the dimensions of |codec_context|. See AVCodecContext.get_buffer2
- // documentation inside FFmpeg.
- int GetAudioBuffer(AVCodecContext* codec, AVFrame* frame, int flags);
+ virtual void Stop() OVERRIDE;
private:
- // Reads from the demuxer stream with corresponding callback method.
- void ReadFromDemuxerStream();
- void BufferReady(DemuxerStream::Status status,
- const scoped_refptr<DecoderBuffer>& input);
-
+ // There are four states the decoder can be in:
+ //
+ // - kUninitialized: The decoder is not initialized.
+ // - kNormal: This is the normal state. The decoder is idle and ready to
+ // decode input buffers, or is decoding an input buffer.
+ // - kDecodeFinished: EOS buffer received, codec flushed and decode finished.
+ // No further Decode() call should be made.
+ // - kError: Unexpected error happened.
+ //
+ // These are the possible state transitions.
+ //
+ // kUninitialized -> kNormal:
+ // The decoder is successfully initialized and is ready to decode buffers.
+ // kNormal -> kDecodeFinished:
+ // When buffer->end_of_stream() is true and avcodec_decode_audio4()
+ // returns 0 data.
+ // kNormal -> kError:
+ // A decoding error occurs and decoding needs to stop.
+ // (any state) -> kNormal:
+ // Any time Reset() is called.
+ enum DecoderState {
+ kUninitialized,
+ kNormal,
+ kDecodeFinished,
+ kError
+ };
+
+ // Reset decoder and call |reset_cb_|.
+ void DoReset();
+
+ // Handles decoding an unencrypted encoded buffer.
+ void DecodeBuffer(const scoped_refptr<DecoderBuffer>& buffer,
+ const DecodeCB& decode_cb);
+ bool FFmpegDecode(const scoped_refptr<DecoderBuffer>& buffer,
+ bool* has_produced_frame);
+
+ // Handles (re-)initializing the decoder with a (new) config.
+ // Returns true if initialization was successful.
bool ConfigureDecoder();
+
+ // Releases resources associated with |codec_context_| and |av_frame_|
+ // and resets them to NULL.
void ReleaseFFmpegResources();
void ResetTimestampState();
- void RunDecodeLoop(const scoped_refptr<DecoderBuffer>& input,
- bool skip_eos_append);
-
- scoped_refptr<base::MessageLoopProxy> message_loop_;
- base::WeakPtrFactory<FFmpegAudioDecoder> weak_factory_;
- base::WeakPtr<FFmpegAudioDecoder> weak_this_;
- DemuxerStream* demuxer_stream_;
- StatisticsCB statistics_cb_;
- scoped_ptr_malloc<AVCodecContext, ScopedPtrAVFreeContext> codec_context_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
- // Decoded audio format.
- int bytes_per_channel_;
- ChannelLayout channel_layout_;
- int channels_;
- int samples_per_second_;
+ OutputCB output_cb_;
- // AVSampleFormat initially requested; not Chrome's SampleFormat.
- int av_sample_format_;
- SampleFormat sample_format_;
+ DecoderState state_;
- // Used for computing output timestamps.
- scoped_ptr<AudioTimestampHelper> output_timestamp_helper_;
- base::TimeDelta last_input_timestamp_;
+ // FFmpeg structures owned by this object.
+ scoped_ptr<AVCodecContext, ScopedPtrAVFreeContext> codec_context_;
+ scoped_ptr<AVFrame, ScopedPtrAVFreeFrame> av_frame_;
- // Number of frames to drop before generating output buffers.
- int output_frames_to_drop_;
+ AudioDecoderConfig config_;
- // Holds decoded audio.
- scoped_ptr_malloc<AVFrame, ScopedPtrAVFreeFrame> av_frame_;
+ // AVSampleFormat initially requested; not Chrome's SampleFormat.
+ int av_sample_format_;
- ReadCB read_cb_;
+ scoped_ptr<AudioDiscardHelper> discard_helper_;
- // Since multiple frames may be decoded from the same packet we need to queue
- // them up and hand them out as we receive Read() calls.
- std::list<QueuedAudioBuffer> queued_audio_;
+ LogCB log_cb_;
DISALLOW_IMPLICIT_CONSTRUCTORS(FFmpegAudioDecoder);
};
diff --git a/chromium/media/filters/ffmpeg_audio_decoder_unittest.cc b/chromium/media/filters/ffmpeg_audio_decoder_unittest.cc
index c19bbabf580..05d1ee28bb0 100644
--- a/chromium/media/filters/ffmpeg_audio_decoder_unittest.cc
+++ b/chromium/media/filters/ffmpeg_audio_decoder_unittest.cc
@@ -6,6 +6,7 @@
#include "base/bind.h"
#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
#include "base/strings/stringprintf.h"
#include "media/base/audio_buffer.h"
#include "media/base/decoder_buffer.h"
@@ -22,15 +23,14 @@ using ::testing::StrictMock;
namespace media {
-ACTION_P(InvokeReadPacket, test) {
- test->ReadPacket(arg0);
-}
-
class FFmpegAudioDecoderTest : public testing::Test {
public:
FFmpegAudioDecoderTest()
- : decoder_(new FFmpegAudioDecoder(message_loop_.message_loop_proxy())),
- demuxer_(new StrictMock<MockDemuxerStream>(DemuxerStream::AUDIO)) {
+ : decoder_(new FFmpegAudioDecoder(message_loop_.message_loop_proxy(),
+ LogCB())),
+ pending_decode_(false),
+ pending_reset_(false),
+ last_decode_status_(AudioDecoder::kOk) {
FFmpegGlue::InitializeFFmpeg();
vorbis_extradata_ = ReadTestDataFile("vorbis-extradata");
@@ -52,9 +52,14 @@ class FFmpegAudioDecoderTest : public testing::Test {
// Push in an EOS buffer.
encoded_audio_.push_back(DecoderBuffer::CreateEOSBuffer());
+
+ Initialize();
}
- virtual ~FFmpegAudioDecoderTest() {}
+ virtual ~FFmpegAudioDecoderTest() {
+ EXPECT_FALSE(pending_decode_);
+ EXPECT_FALSE(pending_reset_);
+ }
void Initialize() {
AudioDecoderConfig config(kCodecVorbis,
@@ -64,108 +69,129 @@ class FFmpegAudioDecoderTest : public testing::Test {
vorbis_extradata_->data(),
vorbis_extradata_->data_size(),
false); // Not encrypted.
- demuxer_->set_audio_decoder_config(config);
- decoder_->Initialize(demuxer_.get(),
+ decoder_->Initialize(config,
NewExpectedStatusCB(PIPELINE_OK),
- base::Bind(&MockStatisticsCB::OnStatistics,
- base::Unretained(&statistics_cb_)));
-
- message_loop_.RunUntilIdle();
+ base::Bind(&FFmpegAudioDecoderTest::OnDecoderOutput,
+ base::Unretained(this)));
+ base::RunLoop().RunUntilIdle();
}
- void ReadPacket(const DemuxerStream::ReadCB& read_cb) {
- CHECK(!encoded_audio_.empty()) << "ReadPacket() called too many times";
+ void SatisfyPendingDecode() {
+ base::RunLoop().RunUntilIdle();
+ }
+ void Decode() {
+ pending_decode_ = true;
scoped_refptr<DecoderBuffer> buffer(encoded_audio_.front());
- DemuxerStream::Status status =
- buffer.get() ? DemuxerStream::kOk : DemuxerStream::kAborted;
encoded_audio_.pop_front();
- read_cb.Run(status, buffer);
+ decoder_->Decode(buffer,
+ base::Bind(&FFmpegAudioDecoderTest::DecodeFinished,
+ base::Unretained(this)));
+ base::RunLoop().RunUntilIdle();
+ EXPECT_FALSE(pending_decode_);
+ EXPECT_EQ(AudioDecoder::kOk, last_decode_status_);
}
- void Read() {
- decoder_->Read(base::Bind(
- &FFmpegAudioDecoderTest::DecodeFinished, base::Unretained(this)));
- message_loop_.RunUntilIdle();
+ void Reset() {
+ pending_reset_ = true;
+ decoder_->Reset(base::Bind(
+ &FFmpegAudioDecoderTest::ResetFinished, base::Unretained(this)));
+ base::RunLoop().RunUntilIdle();
}
- void DecodeFinished(AudioDecoder::Status status,
- const scoped_refptr<AudioBuffer>& buffer) {
+ void Stop() {
+ decoder_->Stop();
+ base::RunLoop().RunUntilIdle();
+ }
+
+ void OnDecoderOutput(const scoped_refptr<AudioBuffer>& buffer) {
+ EXPECT_FALSE(buffer->end_of_stream());
decoded_audio_.push_back(buffer);
}
+ void DecodeFinished(AudioDecoder::Status status) {
+ EXPECT_TRUE(pending_decode_);
+ pending_decode_ = false;
+
+ last_decode_status_ = status;
+ }
+
+ void ResetFinished() {
+ EXPECT_TRUE(pending_reset_);
+ // Reset should always finish after Decode.
+ EXPECT_FALSE(pending_decode_);
+
+ pending_reset_ = false;
+ }
+
void ExpectDecodedAudio(size_t i, int64 timestamp, int64 duration) {
EXPECT_LT(i, decoded_audio_.size());
EXPECT_EQ(timestamp, decoded_audio_[i]->timestamp().InMicroseconds());
EXPECT_EQ(duration, decoded_audio_[i]->duration().InMicroseconds());
- EXPECT_FALSE(decoded_audio_[i]->end_of_stream());
- }
-
- void ExpectEndOfStream(size_t i) {
- EXPECT_LT(i, decoded_audio_.size());
- EXPECT_TRUE(decoded_audio_[i]->end_of_stream());
}
base::MessageLoop message_loop_;
scoped_ptr<FFmpegAudioDecoder> decoder_;
- scoped_ptr<StrictMock<MockDemuxerStream> > demuxer_;
- MockStatisticsCB statistics_cb_;
+ bool pending_decode_;
+ bool pending_reset_;
scoped_refptr<DecoderBuffer> vorbis_extradata_;
std::deque<scoped_refptr<DecoderBuffer> > encoded_audio_;
std::deque<scoped_refptr<AudioBuffer> > decoded_audio_;
+ AudioDecoder::Status last_decode_status_;
};
TEST_F(FFmpegAudioDecoderTest, Initialize) {
- Initialize();
-
- const AudioDecoderConfig& config = demuxer_->audio_decoder_config();
- EXPECT_EQ(config.bits_per_channel(), decoder_->bits_per_channel());
- EXPECT_EQ(config.channel_layout(), decoder_->channel_layout());
- EXPECT_EQ(config.samples_per_second(), decoder_->samples_per_second());
+ AudioDecoderConfig config(kCodecVorbis,
+ kSampleFormatPlanarF32,
+ CHANNEL_LAYOUT_STEREO,
+ 44100,
+ vorbis_extradata_->data(),
+ vorbis_extradata_->data_size(),
+ false); // Not encrypted.
+ Stop();
}
TEST_F(FFmpegAudioDecoderTest, ProduceAudioSamples) {
- Initialize();
-
// Vorbis requires N+1 packets to produce audio data for N packets.
//
// This will should result in the demuxer receiving three reads for two
// requests to produce audio samples.
- EXPECT_CALL(*demuxer_, Read(_))
- .Times(5)
- .WillRepeatedly(InvokeReadPacket(this));
- EXPECT_CALL(statistics_cb_, OnStatistics(_))
- .Times(4);
-
- Read();
- Read();
- Read();
+ Decode();
+ Decode();
+ Decode();
+ Decode();
ASSERT_EQ(3u, decoded_audio_.size());
ExpectDecodedAudio(0, 0, 2902);
ExpectDecodedAudio(1, 2902, 13061);
- ExpectDecodedAudio(2, 15963, 23220);
+ ExpectDecodedAudio(2, 15963, 23219);
- // Call one more time to trigger EOS.
- Read();
- ASSERT_EQ(4u, decoded_audio_.size());
- ExpectEndOfStream(3);
+ // Call one more time with EOS.
+ Decode();
+ ASSERT_EQ(3u, decoded_audio_.size());
+ Stop();
}
-TEST_F(FFmpegAudioDecoderTest, ReadAbort) {
- Initialize();
-
- encoded_audio_.clear();
- encoded_audio_.push_back(NULL);
+TEST_F(FFmpegAudioDecoderTest, PendingDecode_Stop) {
+ Decode();
+ Stop();
+ SatisfyPendingDecode();
+}
- EXPECT_CALL(*demuxer_, Read(_))
- .WillOnce(InvokeReadPacket(this));
- Read();
+TEST_F(FFmpegAudioDecoderTest, PendingDecode_Reset) {
+ Decode();
+ Reset();
+ SatisfyPendingDecode();
+ Stop();
+}
- EXPECT_EQ(decoded_audio_.size(), 1u);
- EXPECT_TRUE(decoded_audio_[0].get() == NULL);
+TEST_F(FFmpegAudioDecoderTest, PendingDecode_ResetStop) {
+ Decode();
+ Reset();
+ Stop();
+ SatisfyPendingDecode();
}
} // namespace media
diff --git a/chromium/media/filters/ffmpeg_demuxer.cc b/chromium/media/filters/ffmpeg_demuxer.cc
index 6b8027164bd..f5b4fddad3b 100644
--- a/chromium/media/filters/ffmpeg_demuxer.cc
+++ b/chromium/media/filters/ffmpeg_demuxer.cc
@@ -12,7 +12,7 @@
#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/memory/scoped_ptr.h"
-#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_proxy.h"
#include "base/metrics/sparse_histogram.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
@@ -20,7 +20,7 @@
#include "base/task_runner_util.h"
#include "base/time/time.h"
#include "media/base/audio_decoder_config.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decrypt_config.h"
#include "media/base/limits.h"
@@ -30,10 +30,31 @@
#include "media/filters/ffmpeg_glue.h"
#include "media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.h"
#include "media/filters/webvtt_util.h"
-#include "media/webm/webm_crypto_helpers.h"
+#include "media/formats/webm/webm_crypto_helpers.h"
namespace media {
+static base::Time ExtractTimelineOffset(AVFormatContext* format_context) {
+ if (strstr(format_context->iformat->name, "webm") ||
+ strstr(format_context->iformat->name, "matroska")) {
+ const AVDictionaryEntry* entry =
+ av_dict_get(format_context->metadata, "creation_time", NULL, 0);
+
+ base::Time timeline_offset;
+ if (entry != NULL && entry->value != NULL &&
+ FFmpegUTCDateToTime(entry->value, &timeline_offset)) {
+ return timeline_offset;
+ }
+ }
+
+ return base::Time();
+}
+
+static base::TimeDelta FramesToTimeDelta(int frames, double sample_rate) {
+ return base::TimeDelta::FromMicroseconds(
+ frames * base::Time::kMicrosecondsPerSecond / sample_rate);
+}
+
//
// FFmpegDemuxerStream
//
@@ -41,7 +62,7 @@ FFmpegDemuxerStream::FFmpegDemuxerStream(
FFmpegDemuxer* demuxer,
AVStream* stream)
: demuxer_(demuxer),
- message_loop_(base::MessageLoopProxy::current()),
+ task_runner_(base::MessageLoopProxy::current()),
stream_(stream),
type_(UNKNOWN),
end_of_stream_(false),
@@ -74,10 +95,12 @@ FFmpegDemuxerStream::FFmpegDemuxerStream(
// Calculate the duration.
duration_ = ConvertStreamTimestamp(stream->time_base, stream->duration);
+#if defined(USE_PROPRIETARY_CODECS)
if (stream_->codec->codec_id == AV_CODEC_ID_H264) {
bitstream_converter_.reset(
new FFmpegH264ToAnnexBBitstreamConverter(stream_->codec));
}
+#endif
if (is_encrypted) {
AVDictionaryEntry* key = av_dict_get(stream->metadata, "enc_key_id", NULL,
@@ -99,23 +122,26 @@ FFmpegDemuxerStream::FFmpegDemuxerStream(
}
void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
if (!demuxer_ || end_of_stream_) {
NOTREACHED() << "Attempted to enqueue packet on a stopped stream";
return;
}
+#if defined(USE_PROPRIETARY_CODECS)
// Convert the packet if there is a bitstream filter.
if (packet->data && bitstream_converter_enabled_ &&
!bitstream_converter_->ConvertPacket(packet.get())) {
LOG(ERROR) << "Format conversion failed.";
}
+#endif
// Get side data if any. For now, the only type of side_data is VP8 Alpha. We
// keep this generic so that other side_data types in the future can be
// handled the same way as well.
av_packet_split_side_data(packet.get());
+
scoped_refptr<DecoderBuffer> buffer;
if (type() == DemuxerStream::TEXT) {
@@ -145,43 +171,59 @@ void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL,
&side_data_size);
+ scoped_ptr<DecryptConfig> decrypt_config;
+ int data_offset = 0;
+ if ((type() == DemuxerStream::AUDIO && audio_config_.is_encrypted()) ||
+ (type() == DemuxerStream::VIDEO && video_config_.is_encrypted())) {
+ if (!WebMCreateDecryptConfig(
+ packet->data, packet->size,
+ reinterpret_cast<const uint8*>(encryption_key_id_.data()),
+ encryption_key_id_.size(),
+ &decrypt_config,
+ &data_offset)) {
+ LOG(ERROR) << "Creation of DecryptConfig failed.";
+ }
+ }
+
// If a packet is returned by FFmpeg's av_parser_parse2() the packet will
// reference inner memory of FFmpeg. As such we should transfer the packet
// into memory we control.
if (side_data_size > 0) {
- buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size,
+ buffer = DecoderBuffer::CopyFrom(packet.get()->data + data_offset,
+ packet.get()->size - data_offset,
side_data, side_data_size);
} else {
- buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size);
+ buffer = DecoderBuffer::CopyFrom(packet.get()->data + data_offset,
+ packet.get()->size - data_offset);
}
int skip_samples_size = 0;
- uint8* skip_samples = av_packet_get_side_data(packet.get(),
- AV_PKT_DATA_SKIP_SAMPLES,
- &skip_samples_size);
+ const uint32* skip_samples_ptr =
+ reinterpret_cast<const uint32*>(av_packet_get_side_data(
+ packet.get(), AV_PKT_DATA_SKIP_SAMPLES, &skip_samples_size));
const int kSkipSamplesValidSize = 10;
- const int kSkipSamplesOffset = 4;
+ const int kSkipEndSamplesOffset = 1;
if (skip_samples_size >= kSkipSamplesValidSize) {
- int discard_padding_samples = base::ByteSwapToLE32(
- *(reinterpret_cast<const uint32*>(skip_samples +
- kSkipSamplesOffset)));
- // TODO(vigneshv): Change decoder buffer to use number of samples so that
- // this conversion can be avoided.
- buffer->set_discard_padding(base::TimeDelta::FromMicroseconds(
- discard_padding_samples * 1000000.0 /
- audio_decoder_config().samples_per_second()));
+ // Because FFmpeg rolls codec delay and skip samples into one we can only
+ // allow front discard padding on the first buffer. Otherwise the discard
+ // helper can't figure out which data to discard. See AudioDiscardHelper.
+ int discard_front_samples = base::ByteSwapToLE32(*skip_samples_ptr);
+ if (last_packet_timestamp_ != kNoTimestamp()) {
+ DLOG(ERROR) << "Skip samples are only allowed for the first packet.";
+ discard_front_samples = 0;
+ }
+
+ const int discard_end_samples =
+ base::ByteSwapToLE32(*(skip_samples_ptr + kSkipEndSamplesOffset));
+ const int samples_per_second =
+ audio_decoder_config().samples_per_second();
+ buffer->set_discard_padding(std::make_pair(
+ FramesToTimeDelta(discard_front_samples, samples_per_second),
+ FramesToTimeDelta(discard_end_samples, samples_per_second)));
}
- }
- if ((type() == DemuxerStream::AUDIO && audio_config_.is_encrypted()) ||
- (type() == DemuxerStream::VIDEO && video_config_.is_encrypted())) {
- scoped_ptr<DecryptConfig> config(WebMCreateDecryptConfig(
- packet->data, packet->size,
- reinterpret_cast<const uint8*>(encryption_key_id_.data()),
- encryption_key_id_.size()));
- if (!config)
- LOG(ERROR) << "Creation of DecryptConfig failed.";
- buffer->set_decrypt_config(config.Pass());
+ if (decrypt_config)
+ buffer->set_decrypt_config(decrypt_config.Pass());
}
buffer->set_timestamp(ConvertStreamTimestamp(
@@ -201,13 +243,13 @@ void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
}
void FFmpegDemuxerStream::SetEndOfStream() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
end_of_stream_ = true;
SatisfyPendingRead();
}
void FFmpegDemuxerStream::FlushBuffers() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(read_cb_.is_null()) << "There should be no pending read";
buffer_queue_.Clear();
end_of_stream_ = false;
@@ -215,7 +257,7 @@ void FFmpegDemuxerStream::FlushBuffers() {
}
void FFmpegDemuxerStream::Stop() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
buffer_queue_.Clear();
if (!read_cb_.is_null()) {
base::ResetAndReturn(&read_cb_).Run(
@@ -231,12 +273,12 @@ base::TimeDelta FFmpegDemuxerStream::duration() {
}
DemuxerStream::Type FFmpegDemuxerStream::type() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
return type_;
}
void FFmpegDemuxerStream::Read(const ReadCB& read_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
CHECK(read_cb_.is_null()) << "Overlapping reads are not supported";
read_cb_ = BindToCurrentLoop(read_cb);
@@ -254,19 +296,26 @@ void FFmpegDemuxerStream::Read(const ReadCB& read_cb) {
}
void FFmpegDemuxerStream::EnableBitstreamConverter() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+#if defined(USE_PROPRIETARY_CODECS)
CHECK(bitstream_converter_.get());
bitstream_converter_enabled_ = true;
+#else
+ NOTREACHED() << "Proprietary codecs not enabled.";
+#endif
}
+bool FFmpegDemuxerStream::SupportsConfigChanges() { return false; }
+
AudioDecoderConfig FFmpegDemuxerStream::audio_decoder_config() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
CHECK_EQ(type_, AUDIO);
return audio_config_;
}
VideoDecoderConfig FFmpegDemuxerStream::video_decoder_config() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
CHECK_EQ(type_, VIDEO);
return video_config_;
}
@@ -286,7 +335,7 @@ Ranges<base::TimeDelta> FFmpegDemuxerStream::GetBufferedRanges() const {
}
void FFmpegDemuxerStream::SatisfyPendingRead() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
if (!read_cb_.is_null()) {
if (!buffer_queue_.IsEmpty()) {
base::ResetAndReturn(&read_cb_).Run(
@@ -304,14 +353,20 @@ void FFmpegDemuxerStream::SatisfyPendingRead() {
}
bool FFmpegDemuxerStream::HasAvailableCapacity() {
- // TODO(scherkus): Remove early return and reenable time-based capacity
+ // TODO(scherkus): Remove this return and reenable time-based capacity
// after our data sources support canceling/concurrent reads, see
// http://crbug.com/165762 for details.
+#if 1
return !read_cb_.is_null();
-
+#else
// Try to have one second's worth of encoded data per stream.
const base::TimeDelta kCapacity = base::TimeDelta::FromSeconds(1);
return buffer_queue_.IsEmpty() || buffer_queue_.Duration() < kCapacity;
+#endif
+}
+
+size_t FFmpegDemuxerStream::MemoryUsage() const {
+ return buffer_queue_.data_size();
}
TextKind FFmpegDemuxerStream::GetTextKind() const {
@@ -348,13 +403,12 @@ base::TimeDelta FFmpegDemuxerStream::ConvertStreamTimestamp(
// FFmpegDemuxer
//
FFmpegDemuxer::FFmpegDemuxer(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
DataSource* data_source,
const NeedKeyCB& need_key_cb,
const scoped_refptr<MediaLog>& media_log)
: host_(NULL),
- message_loop_(message_loop),
- weak_factory_(this),
+ task_runner_(task_runner),
blocking_thread_("FFmpegDemuxer"),
pending_read_(false),
pending_seek_(false),
@@ -362,29 +416,29 @@ FFmpegDemuxer::FFmpegDemuxer(
media_log_(media_log),
bitrate_(0),
start_time_(kNoTimestamp()),
- audio_disabled_(false),
+ liveness_(LIVENESS_UNKNOWN),
text_enabled_(false),
duration_known_(false),
- url_protocol_(data_source, BindToLoop(message_loop_, base::Bind(
- &FFmpegDemuxer::OnDataSourceError, base::Unretained(this)))),
- need_key_cb_(need_key_cb) {
- DCHECK(message_loop_.get());
+ need_key_cb_(need_key_cb),
+ weak_factory_(this) {
+ DCHECK(task_runner_.get());
DCHECK(data_source_);
}
FFmpegDemuxer::~FFmpegDemuxer() {}
void FFmpegDemuxer::Stop(const base::Closure& callback) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- url_protocol_.Abort();
- data_source_->Stop(BindToCurrentLoop(base::Bind(
- &FFmpegDemuxer::OnDataSourceStopped, weak_this_,
- BindToCurrentLoop(callback))));
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ url_protocol_->Abort();
+ data_source_->Stop(
+ BindToCurrentLoop(base::Bind(&FFmpegDemuxer::OnDataSourceStopped,
+ weak_factory_.GetWeakPtr(),
+ BindToCurrentLoop(callback))));
data_source_ = NULL;
}
void FFmpegDemuxer::Seek(base::TimeDelta time, const PipelineStatusCB& cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
CHECK(!pending_seek_);
// TODO(scherkus): Inspect |pending_read_| and cancel IO via |blocking_url_|,
@@ -406,33 +460,20 @@ void FFmpegDemuxer::Seek(base::TimeDelta time, const PipelineStatusCB& cb) {
-1,
time.InMicroseconds(),
flags),
- base::Bind(&FFmpegDemuxer::OnSeekFrameDone, weak_this_, cb));
-}
-
-void FFmpegDemuxer::OnAudioRendererDisabled() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- audio_disabled_ = true;
- StreamVector::iterator iter;
- for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
- if (*iter && (*iter)->type() == DemuxerStream::AUDIO) {
- (*iter)->Stop();
- }
- }
+ base::Bind(
+ &FFmpegDemuxer::OnSeekFrameDone, weak_factory_.GetWeakPtr(), cb));
}
void FFmpegDemuxer::Initialize(DemuxerHost* host,
const PipelineStatusCB& status_cb,
bool enable_text_tracks) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
host_ = host;
- weak_this_ = weak_factory_.GetWeakPtr();
text_enabled_ = enable_text_tracks;
- // TODO(scherkus): DataSource should have a host by this point,
- // see http://crbug.com/122071
- data_source_->set_host(host);
-
- glue_.reset(new FFmpegGlue(&url_protocol_));
+ url_protocol_.reset(new BlockingUrlProtocol(data_source_, BindToCurrentLoop(
+ base::Bind(&FFmpegDemuxer::OnDataSourceError, base::Unretained(this)))));
+ glue_.reset(new FFmpegGlue(url_protocol_.get()));
AVFormatContext* format_context = glue_->format_context();
// Disable ID3v1 tag reading to avoid costly seeks to end of file for data we
@@ -446,11 +487,13 @@ void FFmpegDemuxer::Initialize(DemuxerHost* host,
blocking_thread_.message_loop_proxy().get(),
FROM_HERE,
base::Bind(&FFmpegGlue::OpenContext, base::Unretained(glue_.get())),
- base::Bind(&FFmpegDemuxer::OnOpenContextDone, weak_this_, status_cb));
+ base::Bind(&FFmpegDemuxer::OnOpenContextDone,
+ weak_factory_.GetWeakPtr(),
+ status_cb));
}
DemuxerStream* FFmpegDemuxer::GetStream(DemuxerStream::Type type) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
return GetFFmpegStream(type);
}
@@ -466,12 +509,21 @@ FFmpegDemuxerStream* FFmpegDemuxer::GetFFmpegStream(
}
base::TimeDelta FFmpegDemuxer::GetStartTime() const {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
return start_time_;
}
+base::Time FFmpegDemuxer::GetTimelineOffset() const {
+ return timeline_offset_;
+}
+
+Demuxer::Liveness FFmpegDemuxer::GetLiveness() const {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ return liveness_;
+}
+
void FFmpegDemuxer::AddTextStreams() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
for (StreamVector::size_type idx = 0; idx < streams_.size(); ++idx) {
FFmpegDemuxerStream* stream = streams_[idx];
@@ -527,7 +579,7 @@ static int CalculateBitrate(
void FFmpegDemuxer::OnOpenContextDone(const PipelineStatusCB& status_cb,
bool result) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
if (!blocking_thread_.IsRunning()) {
status_cb.Run(PIPELINE_ERROR_ABORT);
return;
@@ -545,12 +597,14 @@ void FFmpegDemuxer::OnOpenContextDone(const PipelineStatusCB& status_cb,
base::Bind(&avformat_find_stream_info,
glue_->format_context(),
static_cast<AVDictionary**>(NULL)),
- base::Bind(&FFmpegDemuxer::OnFindStreamInfoDone, weak_this_, status_cb));
+ base::Bind(&FFmpegDemuxer::OnFindStreamInfoDone,
+ weak_factory_.GetWeakPtr(),
+ status_cb));
}
void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
int result) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
if (!blocking_thread_.IsRunning() || !data_source_) {
status_cb.Run(PIPELINE_ERROR_ABORT);
return;
@@ -656,13 +710,23 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
if (strcmp(format_context->iformat->name, "avi") == 0)
format_context->flags |= AVFMT_FLAG_GENPTS;
+ timeline_offset_ = ExtractTimelineOffset(format_context);
+
+ if (max_duration == kInfiniteDuration() && !timeline_offset_.is_null()) {
+ liveness_ = LIVENESS_LIVE;
+ } else if (max_duration != kInfiniteDuration()) {
+ liveness_ = LIVENESS_RECORDED;
+ } else {
+ liveness_ = LIVENESS_UNKNOWN;
+ }
+
// Good to go: set the duration and bitrate and notify we're done
// initializing.
host_->SetDuration(max_duration);
duration_known_ = (max_duration != kInfiniteDuration());
int64 filesize_in_bytes = 0;
- url_protocol_.GetSize(&filesize_in_bytes);
+ url_protocol_->GetSize(&filesize_in_bytes);
bitrate_ = CalculateBitrate(format_context, max_duration, filesize_in_bytes);
if (bitrate_ > 0)
data_source_->SetBitrate(bitrate_);
@@ -720,15 +784,15 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
}
- media_log_->SetDoubleProperty("max_duration", max_duration.InSecondsF());
- media_log_->SetDoubleProperty("start_time", start_time_.InSecondsF());
+ media_log_->SetTimeProperty("max_duration", max_duration);
+ media_log_->SetTimeProperty("start_time", start_time_);
media_log_->SetIntegerProperty("bitrate", bitrate_);
status_cb.Run(PIPELINE_OK);
}
void FFmpegDemuxer::OnSeekFrameDone(const PipelineStatusCB& cb, int result) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
CHECK(pending_seek_);
pending_seek_ = false;
@@ -759,7 +823,7 @@ void FFmpegDemuxer::OnSeekFrameDone(const PipelineStatusCB& cb, int result) {
}
void FFmpegDemuxer::ReadFrameIfNeeded() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
// Make sure we have work to do before reading.
if (!blocking_thread_.IsRunning() || !StreamsHaveAvailableCapacity() ||
@@ -778,12 +842,13 @@ void FFmpegDemuxer::ReadFrameIfNeeded() {
blocking_thread_.message_loop_proxy().get(),
FROM_HERE,
base::Bind(&av_read_frame, glue_->format_context(), packet_ptr),
- base::Bind(
- &FFmpegDemuxer::OnReadFrameDone, weak_this_, base::Passed(&packet)));
+ base::Bind(&FFmpegDemuxer::OnReadFrameDone,
+ weak_factory_.GetWeakPtr(),
+ base::Passed(&packet)));
}
void FFmpegDemuxer::OnReadFrameDone(ScopedAVPacket packet, int result) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(pending_read_);
pending_read_ = false;
@@ -791,22 +856,29 @@ void FFmpegDemuxer::OnReadFrameDone(ScopedAVPacket packet, int result) {
return;
}
- if (result < 0) {
- // Update the duration based on the audio stream if
- // it was previously unknown http://crbug.com/86830
+ // Consider the stream as ended if:
+ // - either underlying ffmpeg returned an error
+ // - or FFMpegDemuxer reached the maximum allowed memory usage.
+ if (result < 0 || IsMaxMemoryUsageReached()) {
+ // Update the duration based on the highest elapsed time across all streams
+ // if it was previously unknown.
if (!duration_known_) {
- // Search streams for AUDIO one.
+ base::TimeDelta max_duration;
+
for (StreamVector::iterator iter = streams_.begin();
iter != streams_.end();
++iter) {
- if (*iter && (*iter)->type() == DemuxerStream::AUDIO) {
- base::TimeDelta duration = (*iter)->GetElapsedTime();
- if (duration != kNoTimestamp() && duration > base::TimeDelta()) {
- host_->SetDuration(duration);
- duration_known_ = true;
- }
- break;
- }
+ if (!*iter)
+ continue;
+
+ base::TimeDelta duration = (*iter)->GetElapsedTime();
+ if (duration != kNoTimestamp() && duration > max_duration)
+ max_duration = duration;
+ }
+
+ if (max_duration > base::TimeDelta()) {
+ host_->SetDuration(max_duration);
+ duration_known_ = true;
}
}
// If we have reached the end of stream, tell the downstream filters about
@@ -822,10 +894,7 @@ void FFmpegDemuxer::OnReadFrameDone(ScopedAVPacket packet, int result) {
// Defend against ffmpeg giving us a bad stream index.
if (packet->stream_index >= 0 &&
packet->stream_index < static_cast<int>(streams_.size()) &&
- streams_[packet->stream_index] &&
- (!audio_disabled_ ||
- streams_[packet->stream_index]->type() != DemuxerStream::AUDIO)) {
-
+ streams_[packet->stream_index]) {
// TODO(scherkus): Fix demuxing upstream to never return packets w/o data
// when av_read_frame() returns success code. See bug comment for ideas:
//
@@ -833,15 +902,7 @@ void FFmpegDemuxer::OnReadFrameDone(ScopedAVPacket packet, int result) {
if (!packet->data) {
ScopedAVPacket new_packet(new AVPacket());
av_new_packet(new_packet.get(), 0);
-
- new_packet->pts = packet->pts;
- new_packet->dts = packet->dts;
- new_packet->pos = packet->pos;
- new_packet->duration = packet->duration;
- new_packet->convergence_duration = packet->convergence_duration;
- new_packet->flags = packet->flags;
- new_packet->stream_index = packet->stream_index;
-
+ av_packet_copy_props(new_packet.get(), packet.get());
packet.swap(new_packet);
}
@@ -871,7 +932,7 @@ void FFmpegDemuxer::OnDataSourceStopped(const base::Closure& callback) {
// possible for reply tasks (e.g., OnReadFrameDone()) to be queued on this
// thread. Each of the reply task methods must check whether we've stopped the
// thread and drop their results on the floor.
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
blocking_thread_.Stop();
StreamVector::iterator iter;
@@ -884,7 +945,7 @@ void FFmpegDemuxer::OnDataSourceStopped(const base::Closure& callback) {
}
bool FFmpegDemuxer::StreamsHaveAvailableCapacity() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
StreamVector::iterator iter;
for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
if (*iter && (*iter)->HasAvailableCapacity()) {
@@ -894,14 +955,32 @@ bool FFmpegDemuxer::StreamsHaveAvailableCapacity() {
return false;
}
+bool FFmpegDemuxer::IsMaxMemoryUsageReached() const {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ // Max allowed memory usage, all streams combined.
+ const size_t kDemuxerMemoryLimit = 150 * 1024 * 1024;
+
+ size_t memory_left = kDemuxerMemoryLimit;
+ for (StreamVector::const_iterator iter = streams_.begin();
+ iter != streams_.end(); ++iter) {
+ if (!(*iter))
+ continue;
+
+ size_t stream_memory_usage = (*iter)->MemoryUsage();
+ if (stream_memory_usage > memory_left)
+ return true;
+ memory_left -= stream_memory_usage;
+ }
+ return false;
+}
+
void FFmpegDemuxer::StreamHasEnded() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
StreamVector::iterator iter;
for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
- if (!*iter ||
- (audio_disabled_ && (*iter)->type() == DemuxerStream::AUDIO)) {
+ if (!*iter)
continue;
- }
(*iter)->SetEndOfStream();
}
}
@@ -914,15 +993,14 @@ void FFmpegDemuxer::FireNeedKey(const std::string& init_data_type,
}
void FFmpegDemuxer::NotifyCapacityAvailable() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
ReadFrameIfNeeded();
}
void FFmpegDemuxer::NotifyBufferingChanged() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
Ranges<base::TimeDelta> buffered;
- FFmpegDemuxerStream* audio =
- audio_disabled_ ? NULL : GetFFmpegStream(DemuxerStream::AUDIO);
+ FFmpegDemuxerStream* audio = GetFFmpegStream(DemuxerStream::AUDIO);
FFmpegDemuxerStream* video = GetFFmpegStream(DemuxerStream::VIDEO);
if (audio && video) {
buffered = audio->GetBufferedRanges().IntersectionWith(
diff --git a/chromium/media/filters/ffmpeg_demuxer.h b/chromium/media/filters/ffmpeg_demuxer.h
index 69f6c969ab0..02682bbc8b2 100644
--- a/chromium/media/filters/ffmpeg_demuxer.h
+++ b/chromium/media/filters/ffmpeg_demuxer.h
@@ -36,6 +36,7 @@
#include "media/base/pipeline.h"
#include "media/base/text_track_config.h"
#include "media/base/video_decoder_config.h"
+#include "media/ffmpeg/ffmpeg_deleters.h"
#include "media/filters/blocking_url_protocol.h"
// FFmpeg forward declarations.
@@ -49,9 +50,8 @@ class MediaLog;
class FFmpegDemuxer;
class FFmpegGlue;
class FFmpegH264ToAnnexBBitstreamConverter;
-class ScopedPtrAVFreePacket;
-typedef scoped_ptr_malloc<AVPacket, ScopedPtrAVFreePacket> ScopedAVPacket;
+typedef scoped_ptr<AVPacket, ScopedPtrAVFreePacket> ScopedAVPacket;
class FFmpegDemuxerStream : public DemuxerStream {
public:
@@ -81,6 +81,7 @@ class FFmpegDemuxerStream : public DemuxerStream {
virtual Type type() OVERRIDE;
virtual void Read(const ReadCB& read_cb) OVERRIDE;
virtual void EnableBitstreamConverter() OVERRIDE;
+ virtual bool SupportsConfigChanges() OVERRIDE;
virtual AudioDecoderConfig audio_decoder_config() OVERRIDE;
virtual VideoDecoderConfig video_decoder_config() OVERRIDE;
@@ -94,6 +95,9 @@ class FFmpegDemuxerStream : public DemuxerStream {
// Returns true if this stream has capacity for additional data.
bool HasAvailableCapacity();
+ // Returns the total buffer size FFMpegDemuxerStream is holding onto.
+ size_t MemoryUsage() const;
+
TextKind GetTextKind() const;
// Returns the value associated with |key| in the metadata for the avstream.
@@ -112,7 +116,7 @@ class FFmpegDemuxerStream : public DemuxerStream {
int64 timestamp);
FFmpegDemuxer* demuxer_;
- scoped_refptr<base::MessageLoopProxy> message_loop_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
AVStream* stream_;
AudioDecoderConfig audio_config_;
VideoDecoderConfig video_config_;
@@ -125,7 +129,10 @@ class FFmpegDemuxerStream : public DemuxerStream {
DecoderBufferQueue buffer_queue_;
ReadCB read_cb_;
+#if defined(USE_PROPRIETARY_CODECS)
scoped_ptr<FFmpegH264ToAnnexBBitstreamConverter> bitstream_converter_;
+#endif
+
bool bitstream_converter_enabled_;
std::string encryption_key_id_;
@@ -135,7 +142,7 @@ class FFmpegDemuxerStream : public DemuxerStream {
class MEDIA_EXPORT FFmpegDemuxer : public Demuxer {
public:
- FFmpegDemuxer(const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ FFmpegDemuxer(const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
DataSource* data_source,
const NeedKeyCB& need_key_cb,
const scoped_refptr<MediaLog>& media_log);
@@ -147,9 +154,10 @@ class MEDIA_EXPORT FFmpegDemuxer : public Demuxer {
bool enable_text_tracks) OVERRIDE;
virtual void Stop(const base::Closure& callback) OVERRIDE;
virtual void Seek(base::TimeDelta time, const PipelineStatusCB& cb) OVERRIDE;
- virtual void OnAudioRendererDisabled() OVERRIDE;
virtual DemuxerStream* GetStream(DemuxerStream::Type type) OVERRIDE;
virtual base::TimeDelta GetStartTime() const OVERRIDE;
+ virtual base::Time GetTimelineOffset() const OVERRIDE;
+ virtual Liveness GetLiveness() const OVERRIDE;
// Calls |need_key_cb_| with the initialization data encountered in the file.
void FireNeedKey(const std::string& init_data_type,
@@ -182,6 +190,9 @@ class MEDIA_EXPORT FFmpegDemuxer : public Demuxer {
// go over capacity depending on how the file is muxed.
bool StreamsHaveAvailableCapacity();
+ // Returns true if the maximum allowed memory usage has been reached.
+ bool IsMaxMemoryUsageReached() const;
+
// Signal all FFmpegDemuxerStreams that the stream has ended.
void StreamHasEnded();
@@ -198,9 +209,7 @@ class MEDIA_EXPORT FFmpegDemuxer : public Demuxer {
DemuxerHost* host_;
- scoped_refptr<base::MessageLoopProxy> message_loop_;
- base::WeakPtrFactory<FFmpegDemuxer> weak_factory_;
- base::WeakPtr<FFmpegDemuxer> weak_this_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
// Thread on which all blocking FFmpeg operations are executed.
base::Thread blocking_thread_;
@@ -241,9 +250,12 @@ class MEDIA_EXPORT FFmpegDemuxer : public Demuxer {
// is 0.
base::TimeDelta start_time_;
- // Whether audio has been disabled for this demuxer (in which case this class
- // drops packets destined for AUDIO demuxer streams on the floor).
- bool audio_disabled_;
+ // The Time associated with timestamp 0. Set to a null
+ // time if the file doesn't have an association to Time.
+ base::Time timeline_offset_;
+
+ // Liveness of the stream.
+ Liveness liveness_;
// Whether text streams have been enabled for this demuxer.
bool text_enabled_;
@@ -253,11 +265,14 @@ class MEDIA_EXPORT FFmpegDemuxer : public Demuxer {
bool duration_known_;
// FFmpegURLProtocol implementation and corresponding glue bits.
- BlockingUrlProtocol url_protocol_;
+ scoped_ptr<BlockingUrlProtocol> url_protocol_;
scoped_ptr<FFmpegGlue> glue_;
const NeedKeyCB need_key_cb_;
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<FFmpegDemuxer> weak_factory_;
+
DISALLOW_COPY_AND_ASSIGN(FFmpegDemuxer);
};
diff --git a/chromium/media/filters/ffmpeg_demuxer_unittest.cc b/chromium/media/filters/ffmpeg_demuxer_unittest.cc
index 7c6fcb5b11a..a1614a42129 100644
--- a/chromium/media/filters/ffmpeg_demuxer_unittest.cc
+++ b/chromium/media/filters/ffmpeg_demuxer_unittest.cc
@@ -8,6 +8,7 @@
#include "base/bind.h"
#include "base/files/file_path.h"
+#include "base/logging.h"
#include "base/path_service.h"
#include "base/threading/thread.h"
#include "media/base/decrypt_config.h"
@@ -17,7 +18,8 @@
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/filters/ffmpeg_demuxer.h"
#include "media/filters/file_data_source.h"
-#include "media/webm/webm_crypto_helpers.h"
+#include "media/formats/mp4/avc.h"
+#include "media/formats/webm/webm_crypto_helpers.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::AnyNumber;
@@ -75,8 +77,6 @@ class FFmpegDemuxerTest : public testing::Test {
void CreateDemuxer(const std::string& name) {
CHECK(!demuxer_);
- EXPECT_CALL(host_, SetTotalBytes(_)).Times(AnyNumber());
- EXPECT_CALL(host_, AddBufferedByteRange(_, _)).Times(AnyNumber());
EXPECT_CALL(host_, AddBufferedTimeRange(_, _)).Times(AnyNumber());
CreateDataSource(name);
@@ -167,14 +167,11 @@ class FFmpegDemuxerTest : public testing::Test {
return demuxer_->glue_->format_context();
}
- void ReadUntilEndOfStream() {
- // We should expect an end of stream buffer.
- DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
-
+ void ReadUntilEndOfStream(DemuxerStream* stream) {
bool got_eos_buffer = false;
const int kMaxBuffers = 170;
for (int i = 0; !got_eos_buffer && i < kMaxBuffers; i++) {
- audio->Read(base::Bind(&EosOnReadDone, &got_eos_buffer));
+ stream->Read(base::Bind(&EosOnReadDone, &got_eos_buffer));
message_loop_.Run();
}
@@ -414,7 +411,7 @@ TEST_F(FFmpegDemuxerTest, Read_EndOfStream) {
// Verify that end of stream buffers are created.
CreateDemuxer("bear-320x240.webm");
InitializeDemuxer();
- ReadUntilEndOfStream();
+ ReadUntilEndOfStream(demuxer_->GetStream(DemuxerStream::AUDIO));
}
TEST_F(FFmpegDemuxerTest, Read_EndOfStreamText) {
@@ -442,8 +439,37 @@ TEST_F(FFmpegDemuxerTest, Read_EndOfStream_NoDuration) {
CreateDemuxer("bear-320x240.webm");
InitializeDemuxer();
set_duration_known(false);
- EXPECT_CALL(host_, SetDuration(_));
- ReadUntilEndOfStream();
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2767)));
+ ReadUntilEndOfStream(demuxer_->GetStream(DemuxerStream::AUDIO));
+ ReadUntilEndOfStream(demuxer_->GetStream(DemuxerStream::VIDEO));
+}
+
+TEST_F(FFmpegDemuxerTest, Read_EndOfStream_NoDuration_VideoOnly) {
+ // Verify that end of stream buffers are created.
+ CreateDemuxer("bear-320x240-video-only.webm");
+ InitializeDemuxer();
+ set_duration_known(false);
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2703)));
+ ReadUntilEndOfStream(demuxer_->GetStream(DemuxerStream::VIDEO));
+}
+
+TEST_F(FFmpegDemuxerTest, Read_EndOfStream_NoDuration_AudioOnly) {
+ // Verify that end of stream buffers are created.
+ CreateDemuxer("bear-320x240-audio-only.webm");
+ InitializeDemuxer();
+ set_duration_known(false);
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2767)));
+ ReadUntilEndOfStream(demuxer_->GetStream(DemuxerStream::AUDIO));
+}
+
+TEST_F(FFmpegDemuxerTest, Read_EndOfStream_NoDuration_UnsupportedStream) {
+ // Verify that end of stream buffers are created and we don't crash
+ // if there are streams in the file that we don't support.
+ CreateDemuxer("vorbis_audio_wmv_video.mkv");
+ InitializeDemuxer();
+ set_duration_known(false);
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(1014)));
+ ReadUntilEndOfStream(demuxer_->GetStream(DemuxerStream::AUDIO));
}
TEST_F(FFmpegDemuxerTest, Seek) {
@@ -574,40 +600,6 @@ TEST_F(FFmpegDemuxerTest, Stop) {
demuxer_.reset();
}
-TEST_F(FFmpegDemuxerTest, DisableAudioStream) {
- // We are doing the following things here:
- // 1. Initialize the demuxer with audio and video stream.
- // 2. Send a "disable audio stream" message to the demuxer.
- // 3. Demuxer will free audio packets even if audio stream was initialized.
- CreateDemuxer("bear-320x240.webm");
- InitializeDemuxer();
-
- // Submit a "disable audio stream" message to the demuxer.
- demuxer_->OnAudioRendererDisabled();
- message_loop_.RunUntilIdle();
-
- // Get our streams.
- DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
- DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
- ASSERT_TRUE(video);
- ASSERT_TRUE(audio);
-
- // The audio stream should have been prematurely stopped.
- EXPECT_FALSE(IsStreamStopped(DemuxerStream::VIDEO));
- EXPECT_TRUE(IsStreamStopped(DemuxerStream::AUDIO));
-
- // Attempt a read from the video stream: it should return valid data.
- video->Read(NewReadCB(FROM_HERE, 22084, 0));
- message_loop_.Run();
-
- // Attempt a read from the audio stream: it should immediately return end of
- // stream without requiring the message loop to read data.
- bool got_eos_buffer = false;
- audio->Read(base::Bind(&EosOnReadDone, &got_eos_buffer));
- message_loop_.RunUntilIdle();
- EXPECT_TRUE(got_eos_buffer);
-}
-
// Verify that seek works properly when the WebM cues data is at the start of
// the file instead of at the end.
TEST_F(FFmpegDemuxerTest, SeekWithCuesBeforeFirstCluster) {
@@ -647,24 +639,21 @@ TEST_F(FFmpegDemuxerTest, SeekWithCuesBeforeFirstCluster) {
message_loop_.Run();
}
+#if defined(USE_PROPRIETARY_CODECS)
// Ensure ID3v1 tag reading is disabled. id3_test.mp3 has an ID3v1 tag with the
// field "title" set to "sample for id3 test".
TEST_F(FFmpegDemuxerTest, NoID3TagData) {
-#if !defined(USE_PROPRIETARY_CODECS)
- return;
-#endif
CreateDemuxer("id3_test.mp3");
InitializeDemuxer();
EXPECT_FALSE(av_dict_get(format_context()->metadata, "title", NULL, 0));
}
+#endif
+#if defined(USE_PROPRIETARY_CODECS)
// Ensure MP3 files with large image/video based ID3 tags demux okay. FFmpeg
// will hand us a video stream to the data which will likely be in a format we
// don't accept as video; e.g. PNG.
TEST_F(FFmpegDemuxerTest, Mp3WithVideoStreamID3TagData) {
-#if !defined(USE_PROPRIETARY_CODECS)
- return;
-#endif
CreateDemuxer("id3_png_test.mp3");
InitializeDemuxer();
@@ -672,6 +661,7 @@ TEST_F(FFmpegDemuxerTest, Mp3WithVideoStreamID3TagData) {
EXPECT_FALSE(demuxer_->GetStream(DemuxerStream::VIDEO));
EXPECT_TRUE(demuxer_->GetStream(DemuxerStream::AUDIO));
}
+#endif
// Ensure a video with an unsupported audio track still results in the video
// stream being demuxed.
@@ -695,15 +685,68 @@ TEST_F(FFmpegDemuxerTest, UnsupportedVideoSupportedAudioDemux) {
EXPECT_TRUE(demuxer_->GetStream(DemuxerStream::AUDIO));
}
+#if defined(USE_PROPRIETARY_CODECS)
// FFmpeg returns null data pointers when samples have zero size, leading to
// mistakenly creating end of stream buffers http://crbug.com/169133
TEST_F(FFmpegDemuxerTest, MP4_ZeroStszEntry) {
-#if !defined(USE_PROPRIETARY_CODECS)
- return;
-#endif
CreateDemuxer("bear-1280x720-zero-stsz-entry.mp4");
InitializeDemuxer();
- ReadUntilEndOfStream();
+ ReadUntilEndOfStream(demuxer_->GetStream(DemuxerStream::AUDIO));
+}
+
+
+static void ValidateAnnexB(DemuxerStream* stream,
+ DemuxerStream::Status status,
+ const scoped_refptr<DecoderBuffer>& buffer) {
+ EXPECT_EQ(status, DemuxerStream::kOk);
+
+ if (buffer->end_of_stream()) {
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE, base::MessageLoop::QuitWhenIdleClosure());
+ return;
+ }
+
+ bool is_valid =
+ mp4::AVC::IsValidAnnexB(buffer->data(), buffer->data_size());
+ EXPECT_TRUE(is_valid);
+
+ if (!is_valid) {
+ LOG(ERROR) << "Buffer contains invalid Annex B data.";
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE, base::MessageLoop::QuitWhenIdleClosure());
+ return;
+ }
+
+ stream->Read(base::Bind(&ValidateAnnexB, stream));
+};
+
+TEST_F(FFmpegDemuxerTest, IsValidAnnexB) {
+ const char* files[] = {
+ "bear-1280x720-av_frag.mp4",
+ "bear-1280x720-av_with-aud-nalus_frag.mp4"
+ };
+
+ for (size_t i = 0; i < arraysize(files); ++i) {
+ DVLOG(1) << "Testing " << files[i];
+ CreateDemuxer(files[i]);
+ InitializeDemuxer();
+
+ // Ensure the expected streams are present.
+ DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
+ ASSERT_TRUE(stream);
+ stream->EnableBitstreamConverter();
+
+ stream->Read(base::Bind(&ValidateAnnexB, stream));
+ message_loop_.Run();
+
+ WaitableMessageLoopEvent event;
+ demuxer_->Stop(event.GetClosure());
+ event.RunAndWait();
+ demuxer_.reset();
+ data_source_.reset();
+ }
}
+#endif
+
} // namespace media
diff --git a/chromium/media/filters/ffmpeg_glue.h b/chromium/media/filters/ffmpeg_glue.h
index 17241b9730a..0073ac3c823 100644
--- a/chromium/media/filters/ffmpeg_glue.h
+++ b/chromium/media/filters/ffmpeg_glue.h
@@ -28,14 +28,13 @@
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
#include "media/base/media_export.h"
+#include "media/ffmpeg/ffmpeg_deleters.h"
struct AVFormatContext;
struct AVIOContext;
namespace media {
-class ScopedPtrAVFree;
-
class MEDIA_EXPORT FFmpegURLProtocol {
public:
// Read the given amount of bytes into data, returns the number of bytes read
@@ -73,7 +72,7 @@ class MEDIA_EXPORT FFmpegGlue {
private:
bool open_called_;
AVFormatContext* format_context_;
- scoped_ptr_malloc<AVIOContext, ScopedPtrAVFree> avio_context_;
+ scoped_ptr<AVIOContext, ScopedPtrAVFree> avio_context_;
DISALLOW_COPY_AND_ASSIGN(FFmpegGlue);
};
diff --git a/chromium/media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.cc b/chromium/media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.cc
index 31f03f5646d..16dbe854d81 100644
--- a/chromium/media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.cc
+++ b/chromium/media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.cc
@@ -6,6 +6,7 @@
#include "base/logging.h"
#include "media/ffmpeg/ffmpeg_common.h"
+#include "media/formats/mp4/box_definitions.h"
namespace media {
@@ -19,73 +20,55 @@ FFmpegH264ToAnnexBBitstreamConverter::FFmpegH264ToAnnexBBitstreamConverter(
FFmpegH264ToAnnexBBitstreamConverter::~FFmpegH264ToAnnexBBitstreamConverter() {}
bool FFmpegH264ToAnnexBBitstreamConverter::ConvertPacket(AVPacket* packet) {
- uint32 output_packet_size = 0;
- uint32 configuration_size = 0;
- uint32 io_size = 0;
- if (packet == NULL) {
+ scoped_ptr<mp4::AVCDecoderConfigurationRecord> avc_config;
+
+ if (packet == NULL || !packet->data)
return false;
- }
// Calculate the needed output buffer size.
if (!configuration_processed_) {
- // FFmpeg's AVCodecContext's extradata field contains the Decoder
- // Specific Information from MP4 headers that contain the H.264 SPS and
- // PPS members. See ISO/IEC 14496-15 Chapter 5.2.4
- // AVCDecoderConfigurationRecord for exact specification.
- // Extradata must be at least 7 bytes long.
- if (stream_context_->extradata == NULL ||
- stream_context_->extradata_size <= 7) {
- return false; // Can't go on with conversion without configuration.
- }
- configuration_size += converter_.ParseConfigurationAndCalculateSize(
- stream_context_->extradata,
- stream_context_->extradata_size);
- if (configuration_size == 0) {
- return false; // Not possible to parse the configuration.
+ if (!stream_context_->extradata || stream_context_->extradata_size <= 0)
+ return false;
+
+ avc_config.reset(new mp4::AVCDecoderConfigurationRecord());
+
+ if (!converter_.ParseConfiguration(
+ stream_context_->extradata,
+ stream_context_->extradata_size,
+ avc_config.get())) {
+ return false;
}
}
- uint32 output_nal_size =
- converter_.CalculateNeededOutputBufferSize(packet->data, packet->size);
- if (output_nal_size == 0) {
+
+ uint32 output_packet_size = converter_.CalculateNeededOutputBufferSize(
+ packet->data, packet->size, avc_config.get());
+
+ if (output_packet_size == 0)
return false; // Invalid input packet.
- }
- output_packet_size = configuration_size + output_nal_size;
// Allocate new packet for the output.
AVPacket dest_packet;
- if (av_new_packet(&dest_packet, output_packet_size) != 0) {
+ if (av_new_packet(&dest_packet, output_packet_size) != 0)
return false; // Memory allocation failure.
- }
+
// This is a bit tricky: since the interface does not allow us to replace
// the pointer of the old packet with a new one, we will initially copy the
// metadata from old packet to new bigger packet.
- dest_packet.pts = packet->pts;
- dest_packet.dts = packet->dts;
- dest_packet.pos = packet->pos;
- dest_packet.duration = packet->duration;
- dest_packet.convergence_duration = packet->convergence_duration;
- dest_packet.flags = packet->flags;
- dest_packet.stream_index = packet->stream_index;
-
- // Process the configuration if not done earlier.
- if (!configuration_processed_) {
- if (!converter_.ConvertAVCDecoderConfigToByteStream(
- stream_context_->extradata, stream_context_->extradata_size,
- dest_packet.data, &configuration_size)) {
- return false; // Failed to convert the buffer.
- }
- configuration_processed_ = true;
- }
+ av_packet_copy_props(&dest_packet, packet);
// Proceed with the conversion of the actual in-band NAL units, leave room
// for configuration in the beginning.
- io_size = dest_packet.size - configuration_size;
+ uint32 io_size = dest_packet.size;
if (!converter_.ConvertNalUnitStreamToByteStream(
packet->data, packet->size,
- dest_packet.data + configuration_size, &io_size)) {
+ avc_config.get(),
+ dest_packet.data, &io_size)) {
return false;
}
+ if (avc_config)
+ configuration_processed_ = true;
+
// At the end we must destroy the old packet.
av_free_packet(packet);
*packet = dest_packet; // Finally, replace the values in the input packet.
@@ -94,4 +77,3 @@ bool FFmpegH264ToAnnexBBitstreamConverter::ConvertPacket(AVPacket* packet) {
}
} // namespace media
-
diff --git a/chromium/media/filters/ffmpeg_video_decoder.cc b/chromium/media/filters/ffmpeg_video_decoder.cc
index b8757657548..3436aa9955d 100644
--- a/chromium/media/filters/ffmpeg_video_decoder.cc
+++ b/chromium/media/filters/ffmpeg_video_decoder.cc
@@ -11,9 +11,9 @@
#include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/location.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
#include "base/strings/string_number_conversions.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/limits.h"
#include "media/base/media_switches.h"
@@ -54,15 +54,32 @@ static int GetThreadCount(AVCodecID codec_id) {
return decode_threads;
}
-FFmpegVideoDecoder::FFmpegVideoDecoder(
- const scoped_refptr<base::MessageLoopProxy>& message_loop)
- : message_loop_(message_loop),
- weak_factory_(this),
- state_(kUninitialized) {
+static int GetVideoBufferImpl(struct AVCodecContext* s,
+ AVFrame* frame,
+ int flags) {
+ FFmpegVideoDecoder* decoder = static_cast<FFmpegVideoDecoder*>(s->opaque);
+ return decoder->GetVideoBuffer(s, frame, flags);
+}
+
+static void ReleaseVideoBufferImpl(void* opaque, uint8* data) {
+ scoped_refptr<VideoFrame> video_frame;
+ video_frame.swap(reinterpret_cast<VideoFrame**>(&opaque));
}
-int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context,
- AVFrame* frame) {
+static size_t RoundUp(size_t value, size_t alignment) {
+ // Check that |alignment| is a power of 2.
+ DCHECK((alignment + (alignment - 1)) == (alignment | (alignment - 1)));
+ return ((value + (alignment - 1)) & ~(alignment - 1));
+}
+
+FFmpegVideoDecoder::FFmpegVideoDecoder(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner)
+ : task_runner_(task_runner), state_(kUninitialized),
+ decode_nalus_(false) {}
+
+int FFmpegVideoDecoder::GetVideoBuffer(struct AVCodecContext* codec_context,
+ AVFrame* frame,
+ int flags) {
// Don't use |codec_context_| here! With threaded decoding,
// it will contain unsynchronized width/height/pix_fmt values,
// whereas |codec_context| contains the current threads's
@@ -72,11 +89,11 @@ int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context,
if (format == VideoFrame::UNKNOWN)
return AVERROR(EINVAL);
DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16 ||
- format == VideoFrame::YV12J);
+ format == VideoFrame::YV12J || format == VideoFrame::YV24);
gfx::Size size(codec_context->width, codec_context->height);
- int ret;
- if ((ret = av_image_check_size(size.width(), size.height(), 0, NULL)) < 0)
+ const int ret = av_image_check_size(size.width(), size.height(), 0, NULL);
+ if (ret < 0)
return ret;
gfx::Size natural_size;
@@ -88,62 +105,70 @@ int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context,
natural_size = config_.natural_size();
}
- if (!VideoFrame::IsValidConfig(format, size, gfx::Rect(size), natural_size))
+ // FFmpeg has specific requirements on the allocation size of the frame. The
+ // following logic replicates FFmpeg's allocation strategy to ensure buffers
+ // are not overread / overwritten. See ff_init_buffer_info() for details.
+ //
+ // When lowres is non-zero, dimensions should be divided by 2^(lowres), but
+ // since we don't use this, just DCHECK that it's zero.
+ //
+ // Always round up to a multiple of two to match VideoFrame restrictions on
+ // frame alignment.
+ DCHECK_EQ(codec_context->lowres, 0);
+ gfx::Size coded_size(
+ RoundUp(std::max(size.width(), codec_context->coded_width), 2),
+ RoundUp(std::max(size.height(), codec_context->coded_height), 2));
+
+ if (!VideoFrame::IsValidConfig(
+ format, coded_size, gfx::Rect(size), natural_size))
return AVERROR(EINVAL);
- scoped_refptr<VideoFrame> video_frame =
- frame_pool_.CreateFrame(format, size, gfx::Rect(size),
- natural_size, kNoTimestamp());
+ scoped_refptr<VideoFrame> video_frame = frame_pool_.CreateFrame(
+ format, coded_size, gfx::Rect(size), natural_size, kNoTimestamp());
for (int i = 0; i < 3; i++) {
- frame->base[i] = video_frame->data(i);
frame->data[i] = video_frame->data(i);
frame->linesize[i] = video_frame->stride(i);
}
- frame->opaque = NULL;
- video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque));
- frame->type = FF_BUFFER_TYPE_USER;
- frame->width = codec_context->width;
- frame->height = codec_context->height;
+ frame->width = coded_size.width();
+ frame->height = coded_size.height();
frame->format = codec_context->pix_fmt;
-
+ frame->reordered_opaque = codec_context->reordered_opaque;
+
+ // Now create an AVBufferRef for the data just allocated. It will own the
+ // reference to the VideoFrame object.
+ void* opaque = NULL;
+ video_frame.swap(reinterpret_cast<VideoFrame**>(&opaque));
+ frame->buf[0] =
+ av_buffer_create(frame->data[0],
+ VideoFrame::AllocationSize(format, coded_size),
+ ReleaseVideoBufferImpl,
+ opaque,
+ 0);
return 0;
}
-static int GetVideoBufferImpl(AVCodecContext* s, AVFrame* frame) {
- FFmpegVideoDecoder* decoder = static_cast<FFmpegVideoDecoder*>(s->opaque);
- return decoder->GetVideoBuffer(s, frame);
-}
-
-static void ReleaseVideoBufferImpl(AVCodecContext* s, AVFrame* frame) {
- scoped_refptr<VideoFrame> video_frame;
- video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque));
-
- // The FFmpeg API expects us to zero the data pointers in
- // this callback
- memset(frame->data, 0, sizeof(frame->data));
- frame->opaque = NULL;
-}
-
void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config,
- const PipelineStatusCB& status_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(decode_cb_.is_null());
- DCHECK(reset_cb_.is_null());
+ bool low_delay,
+ const PipelineStatusCB& status_cb,
+ const OutputCB& output_cb) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(!config.is_encrypted());
+ DCHECK(!output_cb.is_null());
FFmpegGlue::InitializeFFmpeg();
- weak_this_ = weak_factory_.GetWeakPtr();
config_ = config;
PipelineStatusCB initialize_cb = BindToCurrentLoop(status_cb);
- if (!config.IsValidConfig() || !ConfigureDecoder()) {
+ if (!config.IsValidConfig() || !ConfigureDecoder(low_delay)) {
initialize_cb.Run(DECODER_ERROR_NOT_SUPPORTED);
return;
}
+ output_cb_ = BindToCurrentLoop(output_cb);
+
// Success!
state_ = kNormal;
initialize_cb.Run(PIPELINE_OK);
@@ -151,79 +176,24 @@ void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config,
void FFmpegVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(buffer);
DCHECK(!decode_cb.is_null());
CHECK_NE(state_, kUninitialized);
- CHECK(decode_cb_.is_null()) << "Overlapping decodes are not supported.";
- decode_cb_ = BindToCurrentLoop(decode_cb);
+
+ DecodeCB decode_cb_bound = BindToCurrentLoop(decode_cb);
if (state_ == kError) {
- base::ResetAndReturn(&decode_cb_).Run(kDecodeError, NULL);
+ decode_cb_bound.Run(kDecodeError);
return;
}
- // Return empty frames if decoding has finished.
if (state_ == kDecodeFinished) {
- base::ResetAndReturn(&decode_cb_).Run(kOk, VideoFrame::CreateEOSFrame());
+ decode_cb_bound.Run(kOk);
return;
}
- DecodeBuffer(buffer);
-}
-
-void FFmpegVideoDecoder::Reset(const base::Closure& closure) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(reset_cb_.is_null());
- reset_cb_ = BindToCurrentLoop(closure);
-
- // Defer the reset if a decode is pending.
- if (!decode_cb_.is_null())
- return;
-
- DoReset();
-}
-
-void FFmpegVideoDecoder::DoReset() {
- DCHECK(decode_cb_.is_null());
-
- avcodec_flush_buffers(codec_context_.get());
- state_ = kNormal;
- base::ResetAndReturn(&reset_cb_).Run();
-}
-
-void FFmpegVideoDecoder::Stop(const base::Closure& closure) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- base::ScopedClosureRunner runner(BindToCurrentLoop(closure));
-
- if (state_ == kUninitialized)
- return;
-
- if (!decode_cb_.is_null()) {
- base::ResetAndReturn(&decode_cb_).Run(kOk, NULL);
- // Reset is pending only when decode is pending.
- if (!reset_cb_.is_null())
- base::ResetAndReturn(&reset_cb_).Run();
- }
-
- ReleaseFFmpegResources();
- state_ = kUninitialized;
-}
-
-FFmpegVideoDecoder::~FFmpegVideoDecoder() {
- DCHECK_EQ(kUninitialized, state_);
- DCHECK(!codec_context_);
- DCHECK(!av_frame_);
-}
-
-void FFmpegVideoDecoder::DecodeBuffer(
- const scoped_refptr<DecoderBuffer>& buffer) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK_NE(state_, kUninitialized);
- DCHECK_NE(state_, kDecodeFinished);
- DCHECK_NE(state_, kError);
- DCHECK(reset_cb_.is_null());
- DCHECK(!decode_cb_.is_null());
- DCHECK(buffer);
+ DCHECK_EQ(state_, kNormal);
// During decode, because reads are issued asynchronously, it is possible to
// receive multiple end of stream buffers since each decode is acked. When the
@@ -234,60 +204,63 @@ void FFmpegVideoDecoder::DecodeBuffer(
//
// kNormal: This is the starting state. Buffers are decoded. Decode errors
// are discarded.
- // kFlushCodec: There isn't any more input data. Call avcodec_decode_video2
- // until no more data is returned to flush out remaining
- // frames. The input buffer is ignored at this point.
// kDecodeFinished: All calls return empty frames.
// kError: Unexpected error happened.
//
// These are the possible state transitions.
//
- // kNormal -> kFlushCodec:
- // When buffer->end_of_stream() is first true.
+ // kNormal -> kDecodeFinished:
+ // When EOS buffer is received and the codec has been flushed.
// kNormal -> kError:
// A decoding error occurs and decoding needs to stop.
- // kFlushCodec -> kDecodeFinished:
- // When avcodec_decode_video2() returns 0 data.
- // kFlushCodec -> kError:
- // When avcodec_decode_video2() errors out.
// (any state) -> kNormal:
// Any time Reset() is called.
- // Transition to kFlushCodec on the first end of stream buffer.
- if (state_ == kNormal && buffer->end_of_stream()) {
- state_ = kFlushCodec;
- }
-
- scoped_refptr<VideoFrame> video_frame;
- if (!FFmpegDecode(buffer, &video_frame)) {
- state_ = kError;
- base::ResetAndReturn(&decode_cb_).Run(kDecodeError, NULL);
- return;
- }
-
- if (!video_frame.get()) {
- if (state_ == kFlushCodec) {
- DCHECK(buffer->end_of_stream());
- state_ = kDecodeFinished;
- base::ResetAndReturn(&decode_cb_)
- .Run(kOk, VideoFrame::CreateEOSFrame());
+ bool has_produced_frame;
+ do {
+ has_produced_frame = false;
+ if (!FFmpegDecode(buffer, &has_produced_frame)) {
+ state_ = kError;
+ decode_cb_bound.Run(kDecodeError);
return;
}
+ // Repeat to flush the decoder after receiving EOS buffer.
+ } while (buffer->end_of_stream() && has_produced_frame);
+
+ if (buffer->end_of_stream())
+ state_ = kDecodeFinished;
+
+ decode_cb_bound.Run(kOk);
+}
+
+void FFmpegVideoDecoder::Reset(const base::Closure& closure) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ avcodec_flush_buffers(codec_context_.get());
+ state_ = kNormal;
+ task_runner_->PostTask(FROM_HERE, closure);
+}
- base::ResetAndReturn(&decode_cb_).Run(kNotEnoughData, NULL);
+void FFmpegVideoDecoder::Stop() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ if (state_ == kUninitialized)
return;
- }
- base::ResetAndReturn(&decode_cb_).Run(kOk, video_frame);
+ ReleaseFFmpegResources();
+ state_ = kUninitialized;
+}
+
+FFmpegVideoDecoder::~FFmpegVideoDecoder() {
+ DCHECK_EQ(kUninitialized, state_);
+ DCHECK(!codec_context_);
+ DCHECK(!av_frame_);
}
bool FFmpegVideoDecoder::FFmpegDecode(
const scoped_refptr<DecoderBuffer>& buffer,
- scoped_refptr<VideoFrame>* video_frame) {
- DCHECK(video_frame);
-
- // Reset frame to default values.
- avcodec_get_frame_defaults(av_frame_.get());
+ bool* has_produced_frame) {
+ DCHECK(!*has_produced_frame);
// Create a packet for input data.
// Due to FFmpeg API changes we no longer have const read-only pointers.
@@ -302,10 +275,6 @@ bool FFmpegVideoDecoder::FFmpegDecode(
// Let FFmpeg handle presentation timestamp reordering.
codec_context_->reordered_opaque = buffer->timestamp().InMicroseconds();
-
- // This is for codecs not using get_buffer to initialize
- // |av_frame_->reordered_opaque|
- av_frame_->reordered_opaque = codec_context_->reordered_opaque;
}
int frame_decoded = 0;
@@ -316,16 +285,19 @@ bool FFmpegVideoDecoder::FFmpegDecode(
// Log the problem if we can't decode a video frame and exit early.
if (result < 0) {
LOG(ERROR) << "Error decoding video: " << buffer->AsHumanReadableString();
- *video_frame = NULL;
return false;
}
+ // FFmpeg says some codecs might have multiple frames per packet. Previous
+ // discussions with rbultje@ indicate this shouldn't be true for the codecs
+ // we use.
+ DCHECK_EQ(result, packet.size);
+
// If no frame was produced then signal that more data is required to
// produce more frames. This can happen under two circumstances:
// 1) Decoder was recently initialized/flushed
// 2) End of stream was reached and all internal frames have been output
if (frame_decoded == 0) {
- *video_frame = NULL;
return true;
}
@@ -336,19 +308,18 @@ bool FFmpegVideoDecoder::FFmpegDecode(
!av_frame_->data[VideoFrame::kUPlane] ||
!av_frame_->data[VideoFrame::kVPlane]) {
LOG(ERROR) << "Video frame was produced yet has invalid frame data.";
- *video_frame = NULL;
+ av_frame_unref(av_frame_.get());
return false;
}
- if (!av_frame_->opaque) {
- LOG(ERROR) << "VideoFrame object associated with frame data not set.";
- return false;
- }
- *video_frame = static_cast<VideoFrame*>(av_frame_->opaque);
-
- (*video_frame)->SetTimestamp(
+ scoped_refptr<VideoFrame> frame =
+ reinterpret_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0]));
+ frame->set_timestamp(
base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque));
+ *has_produced_frame = true;
+ output_cb_.Run(frame);
+ av_frame_unref(av_frame_.get());
return true;
}
@@ -357,7 +328,7 @@ void FFmpegVideoDecoder::ReleaseFFmpegResources() {
av_frame_.reset();
}
-bool FFmpegVideoDecoder::ConfigureDecoder() {
+bool FFmpegVideoDecoder::ConfigureDecoder(bool low_delay) {
// Release existing decoder resources if necessary.
ReleaseFFmpegResources();
@@ -365,14 +336,15 @@ bool FFmpegVideoDecoder::ConfigureDecoder() {
codec_context_.reset(avcodec_alloc_context3(NULL));
VideoDecoderConfigToAVCodecContext(config_, codec_context_.get());
- // Enable motion vector search (potentially slow), strong deblocking filter
- // for damaged macroblocks, and set our error detection sensitivity.
- codec_context_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
codec_context_->thread_count = GetThreadCount(codec_context_->codec_id);
+ codec_context_->thread_type = low_delay ? FF_THREAD_SLICE : FF_THREAD_FRAME;
codec_context_->opaque = this;
codec_context_->flags |= CODEC_FLAG_EMU_EDGE;
- codec_context_->get_buffer = GetVideoBufferImpl;
- codec_context_->release_buffer = ReleaseVideoBufferImpl;
+ codec_context_->get_buffer2 = GetVideoBufferImpl;
+ codec_context_->refcounted_frames = 1;
+
+ if (decode_nalus_)
+ codec_context_->flags2 |= CODEC_FLAG2_CHUNKS;
AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) {
diff --git a/chromium/media/filters/ffmpeg_video_decoder.h b/chromium/media/filters/ffmpeg_video_decoder.h
index 28bb4e0d0cc..d7b35f1d37b 100644
--- a/chromium/media/filters/ffmpeg_video_decoder.h
+++ b/chromium/media/filters/ffmpeg_video_decoder.h
@@ -9,85 +9,85 @@
#include "base/callback.h"
#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
#include "media/base/video_decoder.h"
#include "media/base/video_decoder_config.h"
#include "media/base/video_frame_pool.h"
+#include "media/ffmpeg/ffmpeg_deleters.h"
struct AVCodecContext;
struct AVFrame;
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
class DecoderBuffer;
-class ScopedPtrAVFreeContext;
-class ScopedPtrAVFreeFrame;
class MEDIA_EXPORT FFmpegVideoDecoder : public VideoDecoder {
public:
explicit FFmpegVideoDecoder(
- const scoped_refptr<base::MessageLoopProxy>& message_loop);
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner);
virtual ~FFmpegVideoDecoder();
+ // Allow decoding of individual NALU. Entire frames are required by default.
+ // Disables low-latency mode. Must be called before Initialize().
+ void set_decode_nalus(bool decode_nalus) { decode_nalus_ = decode_nalus; }
+
// VideoDecoder implementation.
virtual void Initialize(const VideoDecoderConfig& config,
- const PipelineStatusCB& status_cb) OVERRIDE;
+ bool low_delay,
+ const PipelineStatusCB& status_cb,
+ const OutputCB& output_cb) OVERRIDE;
virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
- virtual void Stop(const base::Closure& closure) OVERRIDE;
+ virtual void Stop() OVERRIDE;
// Callback called from within FFmpeg to allocate a buffer based on
- // the dimensions of |codec_context|. See AVCodecContext.get_buffer
+ // the dimensions of |codec_context|. See AVCodecContext.get_buffer2
// documentation inside FFmpeg.
- int GetVideoBuffer(AVCodecContext *codec_context, AVFrame* frame);
+ int GetVideoBuffer(struct AVCodecContext* codec_context,
+ AVFrame* frame,
+ int flags);
private:
enum DecoderState {
kUninitialized,
kNormal,
- kFlushCodec,
kDecodeFinished,
kError
};
// Handles decoding an unencrypted encoded buffer.
- void DecodeBuffer(const scoped_refptr<DecoderBuffer>& buffer);
bool FFmpegDecode(const scoped_refptr<DecoderBuffer>& buffer,
- scoped_refptr<VideoFrame>* video_frame);
+ bool* has_produced_frame);
// Handles (re-)initializing the decoder with a (new) config.
// Returns true if initialization was successful.
- bool ConfigureDecoder();
+ bool ConfigureDecoder(bool low_delay);
// Releases resources associated with |codec_context_| and |av_frame_|
// and resets them to NULL.
void ReleaseFFmpegResources();
- // Reset decoder and call |reset_cb_|.
- void DoReset();
-
- scoped_refptr<base::MessageLoopProxy> message_loop_;
- base::WeakPtrFactory<FFmpegVideoDecoder> weak_factory_;
- base::WeakPtr<FFmpegVideoDecoder> weak_this_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
DecoderState state_;
- DecodeCB decode_cb_;
- base::Closure reset_cb_;
+ OutputCB output_cb_;
// FFmpeg structures owned by this object.
- scoped_ptr_malloc<AVCodecContext, ScopedPtrAVFreeContext> codec_context_;
- scoped_ptr_malloc<AVFrame, ScopedPtrAVFreeFrame> av_frame_;
+ scoped_ptr<AVCodecContext, ScopedPtrAVFreeContext> codec_context_;
+ scoped_ptr<AVFrame, ScopedPtrAVFreeFrame> av_frame_;
VideoDecoderConfig config_;
VideoFramePool frame_pool_;
+ bool decode_nalus_;
+
DISALLOW_COPY_AND_ASSIGN(FFmpegVideoDecoder);
};
diff --git a/chromium/media/filters/ffmpeg_video_decoder_unittest.cc b/chromium/media/filters/ffmpeg_video_decoder_unittest.cc
index 9663dd13604..9ccdbc2865f 100644
--- a/chromium/media/filters/ffmpeg_video_decoder_unittest.cc
+++ b/chromium/media/filters/ffmpeg_video_decoder_unittest.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <list>
#include <string>
#include <vector>
@@ -26,6 +27,7 @@
using ::testing::_;
using ::testing::AtLeast;
+using ::testing::AtMost;
using ::testing::InSequence;
using ::testing::IsNull;
using ::testing::Return;
@@ -47,7 +49,7 @@ class FFmpegVideoDecoderTest : public testing::Test {
public:
FFmpegVideoDecoderTest()
: decoder_(new FFmpegVideoDecoder(message_loop_.message_loop_proxy())),
- decode_cb_(base::Bind(&FFmpegVideoDecoderTest::FrameReady,
+ decode_cb_(base::Bind(&FFmpegVideoDecoderTest::DecodeDone,
base::Unretained(this))) {
FFmpegGlue::InitializeFFmpeg();
@@ -68,7 +70,9 @@ class FFmpegVideoDecoderTest : public testing::Test {
void InitializeWithConfigAndStatus(const VideoDecoderConfig& config,
PipelineStatus status) {
- decoder_->Initialize(config, NewExpectedStatusCB(status));
+ decoder_->Initialize(config, false, NewExpectedStatusCB(status),
+ base::Bind(&FFmpegVideoDecoderTest::FrameReady,
+ base::Unretained(this)));
message_loop_.RunUntilIdle();
}
@@ -86,98 +90,60 @@ class FFmpegVideoDecoderTest : public testing::Test {
}
void Stop() {
- decoder_->Stop(NewExpectedClosure());
+ decoder_->Stop();
message_loop_.RunUntilIdle();
}
// Sets up expectations and actions to put FFmpegVideoDecoder in an active
// decoding state.
void EnterDecodingState() {
- VideoDecoder::Status status;
- scoped_refptr<VideoFrame> video_frame;
- DecodeSingleFrame(i_frame_buffer_, &status, &video_frame);
-
- EXPECT_EQ(VideoDecoder::kOk, status);
- ASSERT_TRUE(video_frame.get());
- EXPECT_FALSE(video_frame->end_of_stream());
+ EXPECT_EQ(VideoDecoder::kOk, DecodeSingleFrame(i_frame_buffer_));
+ ASSERT_EQ(1U, output_frames_.size());
}
// Sets up expectations and actions to put FFmpegVideoDecoder in an end
// of stream state.
void EnterEndOfStreamState() {
- VideoDecoder::Status status;
- scoped_refptr<VideoFrame> video_frame;
- DecodeSingleFrame(end_of_stream_buffer_, &status, &video_frame);
- EXPECT_EQ(VideoDecoder::kOk, status);
- ASSERT_TRUE(video_frame.get());
- EXPECT_TRUE(video_frame->end_of_stream());
+ EXPECT_EQ(VideoDecoder::kOk, DecodeSingleFrame(end_of_stream_buffer_));
+ ASSERT_FALSE(output_frames_.empty());
}
typedef std::vector<scoped_refptr<DecoderBuffer> > InputBuffers;
typedef std::vector<scoped_refptr<VideoFrame> > OutputFrames;
// Decodes all buffers in |input_buffers| and push all successfully decoded
- // output frames (excluding EOS frames) into |output_frames|.
+ // output frames into |output_frames|.
// Returns the last decode status returned by the decoder.
- VideoDecoder::Status DecodeMultipleFrames(const InputBuffers& input_buffers,
- OutputFrames* output_frames) {
- InputBuffers::const_iterator input_iter = input_buffers.begin();
-
- for (;;) {
- // Prepare input buffer.
- scoped_refptr<DecoderBuffer> buffer;
- if (input_iter != input_buffers.end()) {
- buffer = *input_iter;
- ++input_iter;
- } else {
- buffer = end_of_stream_buffer_;
- }
-
- VideoDecoder::Status status;
- scoped_refptr<VideoFrame> frame;
- Decode(buffer, &status, &frame);
-
+ VideoDecoder::Status DecodeMultipleFrames(const InputBuffers& input_buffers) {
+ for (InputBuffers::const_iterator iter = input_buffers.begin();
+ iter != input_buffers.end();
+ ++iter) {
+ VideoDecoder::Status status = Decode(*iter);
switch (status) {
case VideoDecoder::kOk:
- DCHECK(frame);
- if (!frame->end_of_stream()) {
- output_frames->push_back(frame);
- continue;
- } else { // EOS
- return status;
- }
- case VideoDecoder::kNotEnoughData:
- DCHECK(!frame);
- continue;
+ break;
+ case VideoDecoder::kAborted:
+ NOTREACHED();
case VideoDecoder::kDecodeError:
case VideoDecoder::kDecryptError:
- DCHECK(!frame);
+ DCHECK(output_frames_.empty());
return status;
}
}
+ return VideoDecoder::kOk;
}
// Decodes the single compressed frame in |buffer| and writes the
// uncompressed output to |video_frame|. This method works with single
// and multithreaded decoders. End of stream buffers are used to trigger
// the frame to be returned in the multithreaded decoder case.
- void DecodeSingleFrame(const scoped_refptr<DecoderBuffer>& buffer,
- VideoDecoder::Status* status,
- scoped_refptr<VideoFrame>* video_frame) {
+ VideoDecoder::Status DecodeSingleFrame(
+ const scoped_refptr<DecoderBuffer>& buffer) {
InputBuffers input_buffers;
input_buffers.push_back(buffer);
+ input_buffers.push_back(end_of_stream_buffer_);
- OutputFrames output_frames;
- *status = DecodeMultipleFrames(input_buffers, &output_frames);
-
- if (*status != VideoDecoder::kOk)
- return;
-
- ASSERT_LE(output_frames.size(), 1U);
- if (output_frames.size() == 1U)
- *video_frame = output_frames[0];
- else
- *video_frame = VideoFrame::CreateEOSFrame();
+ return DecodeMultipleFrames(input_buffers);
}
// Decodes |i_frame_buffer_| and then decodes the data contained in
@@ -192,38 +158,42 @@ class FFmpegVideoDecoderTest : public testing::Test {
InputBuffers input_buffers;
input_buffers.push_back(i_frame_buffer_);
input_buffers.push_back(buffer);
+ input_buffers.push_back(end_of_stream_buffer_);
- OutputFrames output_frames;
VideoDecoder::Status status =
- DecodeMultipleFrames(input_buffers, &output_frames);
+ DecodeMultipleFrames(input_buffers);
EXPECT_EQ(VideoDecoder::kOk, status);
- ASSERT_EQ(2U, output_frames.size());
+ ASSERT_EQ(2U, output_frames_.size());
gfx::Size original_size = kVisibleRect.size();
EXPECT_EQ(original_size.width(),
- output_frames[0]->visible_rect().size().width());
+ output_frames_[0]->visible_rect().size().width());
EXPECT_EQ(original_size.height(),
- output_frames[0]->visible_rect().size().height());
+ output_frames_[0]->visible_rect().size().height());
EXPECT_EQ(expected_width,
- output_frames[1]->visible_rect().size().width());
+ output_frames_[1]->visible_rect().size().width());
EXPECT_EQ(expected_height,
- output_frames[1]->visible_rect().size().height());
+ output_frames_[1]->visible_rect().size().height());
}
- void Decode(const scoped_refptr<DecoderBuffer>& buffer,
- VideoDecoder::Status* status,
- scoped_refptr<VideoFrame>* video_frame) {
- EXPECT_CALL(*this, FrameReady(_, _))
- .WillOnce(DoAll(SaveArg<0>(status), SaveArg<1>(video_frame)));
+ VideoDecoder::Status Decode(const scoped_refptr<DecoderBuffer>& buffer) {
+ VideoDecoder::Status status;
+ EXPECT_CALL(*this, DecodeDone(_)).WillOnce(SaveArg<0>(&status));
decoder_->Decode(buffer, decode_cb_);
message_loop_.RunUntilIdle();
+
+ return status;
+ }
+
+ void FrameReady(const scoped_refptr<VideoFrame>& frame) {
+ DCHECK(!frame->end_of_stream());
+ output_frames_.push_back(frame);
}
- MOCK_METHOD2(FrameReady, void(VideoDecoder::Status,
- const scoped_refptr<VideoFrame>&));
+ MOCK_METHOD1(DecodeDone, void(VideoDecoder::Status));
base::MessageLoop message_loop_;
scoped_ptr<FFmpegVideoDecoder> decoder_;
@@ -236,6 +206,8 @@ class FFmpegVideoDecoderTest : public testing::Test {
scoped_refptr<DecoderBuffer> i_frame_buffer_;
scoped_refptr<DecoderBuffer> corrupt_i_frame_buffer_;
+ OutputFrames output_frames_;
+
private:
DISALLOW_COPY_AND_ASSIGN(FFmpegVideoDecoderTest);
};
@@ -353,13 +325,8 @@ TEST_F(FFmpegVideoDecoderTest, DecodeFrame_Normal) {
Initialize();
// Simulate decoding a single frame.
- VideoDecoder::Status status;
- scoped_refptr<VideoFrame> video_frame;
- DecodeSingleFrame(i_frame_buffer_, &status, &video_frame);
-
- EXPECT_EQ(VideoDecoder::kOk, status);
- ASSERT_TRUE(video_frame.get());
- EXPECT_FALSE(video_frame->end_of_stream());
+ EXPECT_EQ(VideoDecoder::kOk, DecodeSingleFrame(i_frame_buffer_));
+ ASSERT_EQ(1U, output_frames_.size());
}
// Verify current behavior for 0 byte frames. FFmpeg simply ignores
@@ -373,39 +340,29 @@ TEST_F(FFmpegVideoDecoderTest, DecodeFrame_0ByteFrame) {
input_buffers.push_back(i_frame_buffer_);
input_buffers.push_back(zero_byte_buffer);
input_buffers.push_back(i_frame_buffer_);
+ input_buffers.push_back(end_of_stream_buffer_);
- OutputFrames output_frames;
- VideoDecoder::Status status =
- DecodeMultipleFrames(input_buffers, &output_frames);
+ VideoDecoder::Status status = DecodeMultipleFrames(input_buffers);
EXPECT_EQ(VideoDecoder::kOk, status);
- ASSERT_EQ(2U, output_frames.size());
-
- EXPECT_FALSE(output_frames[0]->end_of_stream());
- EXPECT_FALSE(output_frames[1]->end_of_stream());
+ ASSERT_EQ(2U, output_frames_.size());
}
TEST_F(FFmpegVideoDecoderTest, DecodeFrame_DecodeError) {
Initialize();
- VideoDecoder::Status status;
- scoped_refptr<VideoFrame> frame;
-
// The error is only raised on the second decode attempt, so we expect at
// least one successful decode but we don't expect valid frame to be decoded.
// During the second decode attempt an error is raised.
- Decode(corrupt_i_frame_buffer_, &status, &frame);
- DCHECK(!frame);
- DCHECK_EQ(VideoDecoder::kNotEnoughData, status);
- Decode(i_frame_buffer_, &status, &frame);
- DCHECK(!frame);
- DCHECK_EQ(VideoDecoder::kDecodeError, status);
+ EXPECT_EQ(VideoDecoder::kOk, Decode(corrupt_i_frame_buffer_));
+ EXPECT_TRUE(output_frames_.empty());
+ EXPECT_EQ(VideoDecoder::kDecodeError, Decode(i_frame_buffer_));
+ EXPECT_TRUE(output_frames_.empty());
// After a decode error occurred, all following decodes will return
// kDecodeError.
- Decode(i_frame_buffer_, &status, &frame);
- DCHECK(!frame);
- DCHECK_EQ(VideoDecoder::kDecodeError, status);
+ EXPECT_EQ(VideoDecoder::kDecodeError, Decode(i_frame_buffer_));
+ EXPECT_TRUE(output_frames_.empty());
}
// Multi-threaded decoders have different behavior than single-threaded
@@ -416,13 +373,7 @@ TEST_F(FFmpegVideoDecoderTest, DecodeFrame_DecodeError) {
TEST_F(FFmpegVideoDecoderTest, DecodeFrame_DecodeErrorAtEndOfStream) {
Initialize();
- VideoDecoder::Status status;
- scoped_refptr<VideoFrame> video_frame;
- DecodeSingleFrame(corrupt_i_frame_buffer_, &status, &video_frame);
-
- EXPECT_EQ(VideoDecoder::kOk, status);
- ASSERT_TRUE(video_frame.get());
- EXPECT_TRUE(video_frame->end_of_stream());
+ EXPECT_EQ(VideoDecoder::kOk, DecodeSingleFrame(corrupt_i_frame_buffer_));
}
// Decode |i_frame_buffer_| and then a frame with a larger width and verify
diff --git a/chromium/media/filters/file_data_source.cc b/chromium/media/filters/file_data_source.cc
index 341347e78a1..e8b3292898f 100644
--- a/chromium/media/filters/file_data_source.cc
+++ b/chromium/media/filters/file_data_source.cc
@@ -15,30 +15,15 @@ FileDataSource::FileDataSource()
force_streaming_(false) {
}
-bool FileDataSource::Initialize(const base::FilePath& file_path) {
- DCHECK(!file_.IsValid());
-
- if (!file_.Initialize(file_path))
- return false;
-
- UpdateHostBytes();
- return true;
+FileDataSource::FileDataSource(base::File file)
+ : force_read_errors_(false),
+ force_streaming_(false) {
+ file_.Initialize(file.Pass());
}
-bool FileDataSource::InitializeFromPlatformFile(
- const base::PlatformFile& file) {
+bool FileDataSource::Initialize(const base::FilePath& file_path) {
DCHECK(!file_.IsValid());
-
- if (!file_.Initialize(file))
- return false;
-
- UpdateHostBytes();
- return true;
-}
-
-void FileDataSource::set_host(DataSourceHost* host) {
- DataSource::set_host(host);
- UpdateHostBytes();
+ return file_.Initialize(file_path);
}
void FileDataSource::Stop(const base::Closure& callback) {
@@ -79,11 +64,4 @@ void FileDataSource::SetBitrate(int bitrate) {}
FileDataSource::~FileDataSource() {}
-void FileDataSource::UpdateHostBytes() {
- if (host() && file_.IsValid()) {
- host()->SetTotalBytes(file_.length());
- host()->AddBufferedByteRange(0, file_.length());
- }
-}
-
} // namespace media
diff --git a/chromium/media/filters/file_data_source.h b/chromium/media/filters/file_data_source.h
index c0164dac574..739bc2ec3ed 100644
--- a/chromium/media/filters/file_data_source.h
+++ b/chromium/media/filters/file_data_source.h
@@ -7,9 +7,9 @@
#include <string>
+#include "base/files/file.h"
#include "base/files/file_path.h"
#include "base/files/memory_mapped_file.h"
-#include "base/platform_file.h"
#include "media/base/data_source.h"
namespace media {
@@ -19,13 +19,12 @@ namespace media {
class MEDIA_EXPORT FileDataSource : public DataSource {
public:
FileDataSource();
+ explicit FileDataSource(base::File file);
virtual ~FileDataSource();
bool Initialize(const base::FilePath& file_path);
- bool InitializeFromPlatformFile(const base::PlatformFile& file);
// Implementation of DataSource.
- virtual void set_host(DataSourceHost* host) OVERRIDE;
virtual void Stop(const base::Closure& callback) OVERRIDE;
virtual void Read(int64 position, int size, uint8* data,
const DataSource::ReadCB& read_cb) OVERRIDE;
@@ -38,9 +37,6 @@ class MEDIA_EXPORT FileDataSource : public DataSource {
void force_streaming_for_testing() { force_streaming_ = true; }
private:
- // Informs the host of changes in total and buffered bytes.
- void UpdateHostBytes();
-
base::MemoryMappedFile file_;
bool force_read_errors_;
diff --git a/chromium/media/filters/file_data_source_unittest.cc b/chromium/media/filters/file_data_source_unittest.cc
index f6c9b898858..5eb94ca9e37 100644
--- a/chromium/media/filters/file_data_source_unittest.cc
+++ b/chromium/media/filters/file_data_source_unittest.cc
@@ -9,7 +9,6 @@
#include "base/files/file_path.h"
#include "base/path_service.h"
#include "base/strings/utf_string_conversions.h"
-#include "media/base/mock_data_source_host.h"
#include "media/base/test_helpers.h"
#include "media/filters/file_data_source.h"
@@ -44,31 +43,15 @@ base::FilePath TestFileURL() {
return data_dir;
}
-// Test that FileDataSource call the appropriate methods on its filter host.
-TEST(FileDataSourceTest, OpenFile) {
- StrictMock<MockDataSourceHost> host;
- EXPECT_CALL(host, SetTotalBytes(10));
- EXPECT_CALL(host, AddBufferedByteRange(0, 10));
-
- FileDataSource data_source;
- data_source.set_host(&host);
- EXPECT_TRUE(data_source.Initialize(TestFileURL()));
-
- data_source.Stop(NewExpectedClosure());
-}
-
// Use the mock filter host to directly call the Read and GetPosition methods.
TEST(FileDataSourceTest, ReadData) {
int64 size;
uint8 ten_bytes[10];
// Create our mock filter host and initialize the data source.
- NiceMock<MockDataSourceHost> host;
FileDataSource data_source;
- data_source.set_host(&host);
EXPECT_TRUE(data_source.Initialize(TestFileURL()));
-
EXPECT_TRUE(data_source.GetSize(&size));
EXPECT_EQ(10, size);
diff --git a/chromium/media/filters/frame_processor.cc b/chromium/media/filters/frame_processor.cc
new file mode 100644
index 00000000000..68f4c613d81
--- /dev/null
+++ b/chromium/media/filters/frame_processor.cc
@@ -0,0 +1,373 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/frame_processor.h"
+
+#include "base/stl_util.h"
+#include "media/base/buffers.h"
+#include "media/base/stream_parser_buffer.h"
+
+namespace media {
+
+FrameProcessor::FrameProcessor(const UpdateDurationCB& update_duration_cb)
+ : update_duration_cb_(update_duration_cb) {
+ DVLOG(2) << __FUNCTION__ << "()";
+ DCHECK(!update_duration_cb.is_null());
+}
+
+FrameProcessor::~FrameProcessor() {
+ DVLOG(2) << __FUNCTION__;
+}
+
+void FrameProcessor::SetSequenceMode(bool sequence_mode) {
+ DVLOG(2) << __FUNCTION__ << "(" << sequence_mode << ")";
+
+ // Per April 1, 2014 MSE spec editor's draft:
+ // https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/media-source.html#widl-SourceBuffer-mode
+ // Step 7: If the new mode equals "sequence", then set the group start
+ // timestamp to the group end timestamp.
+ if (sequence_mode) {
+ DCHECK(kNoTimestamp() != group_end_timestamp_);
+ group_start_timestamp_ = group_end_timestamp_;
+ }
+
+ // Step 8: Update the attribute to new mode.
+ sequence_mode_ = sequence_mode;
+}
+
+bool FrameProcessor::ProcessFrames(
+ const StreamParser::BufferQueue& audio_buffers,
+ const StreamParser::BufferQueue& video_buffers,
+ const StreamParser::TextBufferQueueMap& text_map,
+ base::TimeDelta append_window_start,
+ base::TimeDelta append_window_end,
+ bool* new_media_segment,
+ base::TimeDelta* timestamp_offset) {
+ StreamParser::BufferQueue frames;
+ if (!MergeBufferQueues(audio_buffers, video_buffers, text_map, &frames)) {
+ DVLOG(2) << "Parse error discovered while merging parser's buffers";
+ return false;
+ }
+
+ DCHECK(!frames.empty());
+
+ // Implements the coded frame processing algorithm's outer loop for step 1.
+ // Note that ProcessFrame() implements an inner loop for a single frame that
+ // handles "jump to the Loop Top step to restart processing of the current
+ // coded frame" per April 1, 2014 MSE spec editor's draft:
+ // https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/
+ // media-source.html#sourcebuffer-coded-frame-processing
+ // 1. For each coded frame in the media segment run the following steps:
+ for (StreamParser::BufferQueue::const_iterator frames_itr = frames.begin();
+ frames_itr != frames.end(); ++frames_itr) {
+ if (!ProcessFrame(*frames_itr, append_window_start, append_window_end,
+ timestamp_offset, new_media_segment)) {
+ return false;
+ }
+ }
+
+ // 2. - 4. Are handled by the WebMediaPlayer / Pipeline / Media Element.
+
+ // Step 5:
+ update_duration_cb_.Run(group_end_timestamp_);
+
+ return true;
+}
+
+bool FrameProcessor::ProcessFrame(
+ const scoped_refptr<StreamParserBuffer>& frame,
+ base::TimeDelta append_window_start,
+ base::TimeDelta append_window_end,
+ base::TimeDelta* timestamp_offset,
+ bool* new_media_segment) {
+ // Implements the loop within step 1 of the coded frame processing algorithm
+ // for a single input frame per April 1, 2014 MSE spec editor's draft:
+ // https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/
+ // media-source.html#sourcebuffer-coded-frame-processing
+
+ while (true) {
+ // 1. Loop Top: Let presentation timestamp be a double precision floating
+ // point representation of the coded frame's presentation timestamp in
+ // seconds.
+ // 2. Let decode timestamp be a double precision floating point
+ // representation of the coded frame's decode timestamp in seconds.
+ // 3. Let frame duration be a double precision floating point representation
+ // of the coded frame's duration in seconds.
+ // We use base::TimeDelta instead of double.
+ base::TimeDelta presentation_timestamp = frame->timestamp();
+ base::TimeDelta decode_timestamp = frame->GetDecodeTimestamp();
+ base::TimeDelta frame_duration = frame->duration();
+
+ DVLOG(3) << __FUNCTION__ << ": Processing frame "
+ << "Type=" << frame->type()
+ << ", TrackID=" << frame->track_id()
+ << ", PTS=" << presentation_timestamp.InSecondsF()
+ << ", DTS=" << decode_timestamp.InSecondsF()
+ << ", DUR=" << frame_duration.InSecondsF();
+
+ // Sanity check the timestamps.
+ if (presentation_timestamp == kNoTimestamp()) {
+ DVLOG(2) << __FUNCTION__ << ": Unknown frame PTS";
+ return false;
+ }
+ if (decode_timestamp == kNoTimestamp()) {
+ DVLOG(2) << __FUNCTION__ << ": Unknown frame DTS";
+ return false;
+ }
+ if (decode_timestamp > presentation_timestamp) {
+ // TODO(wolenetz): Determine whether DTS>PTS should really be allowed. See
+ // http://crbug.com/354518.
+ DVLOG(2) << __FUNCTION__ << ": WARNING: Frame DTS("
+ << decode_timestamp.InSecondsF() << ") > PTS("
+ << presentation_timestamp.InSecondsF() << ")";
+ }
+
+ // TODO(acolwell/wolenetz): All stream parsers must emit valid (positive)
+ // frame durations. For now, we allow non-negative frame duration.
+ // See http://crbug.com/351166.
+ if (frame_duration == kNoTimestamp()) {
+ DVLOG(2) << __FUNCTION__ << ": Frame missing duration (kNoTimestamp())";
+ return false;
+ }
+ if (frame_duration < base::TimeDelta()) {
+ DVLOG(2) << __FUNCTION__ << ": Frame duration negative: "
+ << frame_duration.InSecondsF();
+ return false;
+ }
+
+ // 4. If mode equals "sequence" and group start timestamp is set, then run
+ // the following steps:
+ if (sequence_mode_ && group_start_timestamp_ != kNoTimestamp()) {
+ // 4.1. Set timestampOffset equal to group start timestamp -
+ // presentation timestamp.
+ *timestamp_offset = group_start_timestamp_ - presentation_timestamp;
+
+ DVLOG(3) << __FUNCTION__ << ": updated timestampOffset is now "
+ << timestamp_offset->InSecondsF();
+
+ // 4.2. Set group end timestamp equal to group start timestamp.
+ group_end_timestamp_ = group_start_timestamp_;
+
+ // 4.3. Set the need random access point flag on all track buffers to
+ // true.
+ SetAllTrackBuffersNeedRandomAccessPoint();
+
+ // 4.4. Unset group start timestamp.
+ group_start_timestamp_ = kNoTimestamp();
+ }
+
+ // 5. If timestampOffset is not 0, then run the following steps:
+ if (*timestamp_offset != base::TimeDelta()) {
+ // 5.1. Add timestampOffset to the presentation timestamp.
+ // Note: |frame| PTS is only updated if it survives discontinuity
+ // processing.
+ presentation_timestamp += *timestamp_offset;
+
+ // 5.2. Add timestampOffset to the decode timestamp.
+ // Frame DTS is only updated if it survives discontinuity processing.
+ decode_timestamp += *timestamp_offset;
+ }
+
+ // 6. Let track buffer equal the track buffer that the coded frame will be
+ // added to.
+
+ // Remap audio and video track types to their special singleton identifiers.
+ StreamParser::TrackId track_id = kAudioTrackId;
+ switch (frame->type()) {
+ case DemuxerStream::AUDIO:
+ break;
+ case DemuxerStream::VIDEO:
+ track_id = kVideoTrackId;
+ break;
+ case DemuxerStream::TEXT:
+ track_id = frame->track_id();
+ break;
+ case DemuxerStream::UNKNOWN:
+ case DemuxerStream::NUM_TYPES:
+ DCHECK(false) << ": Invalid frame type " << frame->type();
+ return false;
+ }
+
+ MseTrackBuffer* track_buffer = FindTrack(track_id);
+ if (!track_buffer) {
+ DVLOG(2) << __FUNCTION__ << ": Unknown track: type=" << frame->type()
+ << ", frame processor track id=" << track_id
+ << ", parser track id=" << frame->track_id();
+ return false;
+ }
+
+ // 7. If last decode timestamp for track buffer is set and decode timestamp
+ // is less than last decode timestamp
+ // OR
+ // If last decode timestamp for track buffer is set and the difference
+ // between decode timestamp and last decode timestamp is greater than 2
+ // times last frame duration:
+ base::TimeDelta last_decode_timestamp =
+ track_buffer->last_decode_timestamp();
+ if (last_decode_timestamp != kNoTimestamp()) {
+ base::TimeDelta dts_delta = decode_timestamp - last_decode_timestamp;
+ if (dts_delta < base::TimeDelta() ||
+ dts_delta > 2 * track_buffer->last_frame_duration()) {
+ // 7.1. If mode equals "segments": Set group end timestamp to
+ // presentation timestamp.
+ // If mode equals "sequence": Set group start timestamp equal to
+ // the group end timestamp.
+ if (!sequence_mode_) {
+ group_end_timestamp_ = presentation_timestamp;
+ // This triggers a discontinuity so we need to treat the next frames
+ // appended within the append window as if they were the beginning of
+ // a new segment.
+ *new_media_segment = true;
+ } else {
+ DVLOG(3) << __FUNCTION__ << " : Sequence mode discontinuity, GETS: "
+ << group_end_timestamp_.InSecondsF();
+ DCHECK(kNoTimestamp() != group_end_timestamp_);
+ group_start_timestamp_ = group_end_timestamp_;
+ }
+
+ // 7.2. - 7.5.:
+ Reset();
+
+ // 7.6. Jump to the Loop Top step above to restart processing of the
+ // current coded frame.
+ DVLOG(3) << __FUNCTION__ << ": Discontinuity: reprocessing frame";
+ continue;
+ }
+ }
+
+ // 9. Let frame end timestamp equal the sum of presentation timestamp and
+ // frame duration.
+ const base::TimeDelta frame_end_timestamp =
+ presentation_timestamp + frame_duration;
+
+ // 10. If presentation timestamp is less than appendWindowStart, then set
+ // the need random access point flag to true, drop the coded frame, and
+ // jump to the top of the loop to start processing the next coded
+ // frame.
+ // Note: We keep the result of partial discard of a buffer that overlaps
+ // |append_window_start| and does not end after |append_window_end|.
+ // 11. If frame end timestamp is greater than appendWindowEnd, then set the
+ // need random access point flag to true, drop the coded frame, and jump
+ // to the top of the loop to start processing the next coded frame.
+ frame->set_timestamp(presentation_timestamp);
+ frame->SetDecodeTimestamp(decode_timestamp);
+ if (track_buffer->stream()->supports_partial_append_window_trimming() &&
+ HandlePartialAppendWindowTrimming(append_window_start,
+ append_window_end,
+ frame)) {
+ // If |frame| was shortened a discontinuity may exist, so treat the next
+ // frames appended as if they were the beginning of a new media segment.
+ if (frame->timestamp() != presentation_timestamp && !sequence_mode_)
+ *new_media_segment = true;
+
+ // |frame| has been partially trimmed or had preroll added. Though
+ // |frame|'s duration may have changed, do not update |frame_duration|
+ // here, so |track_buffer|'s last frame duration update uses original
+ // frame duration and reduces spurious discontinuity detection.
+ decode_timestamp = frame->GetDecodeTimestamp();
+ presentation_timestamp = frame->timestamp();
+
+ // The end timestamp of the frame should be unchanged.
+ DCHECK(frame_end_timestamp == presentation_timestamp + frame->duration());
+ }
+
+ if (presentation_timestamp < append_window_start ||
+ frame_end_timestamp > append_window_end) {
+ track_buffer->set_needs_random_access_point(true);
+ DVLOG(3) << "Dropping frame that is outside append window.";
+
+ if (!sequence_mode_) {
+ // This also triggers a discontinuity so we need to treat the next
+ // frames appended within the append window as if they were the
+ // beginning of a new segment.
+ *new_media_segment = true;
+ }
+
+ return true;
+ }
+
+ // Note: This step is relocated, versus April 1 spec, to allow append window
+ // processing to first filter coded frames shifted by |timestamp_offset_| in
+ // such a way that their PTS is negative.
+ // 8. If the presentation timestamp or decode timestamp is less than the
+ // presentation start time, then run the end of stream algorithm with the
+ // error parameter set to "decode", and abort these steps.
+ DCHECK(presentation_timestamp >= base::TimeDelta());
+ if (decode_timestamp < base::TimeDelta()) {
+ // B-frames may still result in negative DTS here after being shifted by
+ // |timestamp_offset_|.
+ DVLOG(2) << __FUNCTION__
+ << ": frame PTS=" << presentation_timestamp.InSecondsF()
+ << " has negative DTS=" << decode_timestamp.InSecondsF()
+ << " after applying timestampOffset, handling any discontinuity,"
+ << " and filtering against append window";
+ return false;
+ }
+
+ // 12. If the need random access point flag on track buffer equals true,
+ // then run the following steps:
+ if (track_buffer->needs_random_access_point()) {
+ // 12.1. If the coded frame is not a random access point, then drop the
+ // coded frame and jump to the top of the loop to start processing
+ // the next coded frame.
+ if (!frame->IsKeyframe()) {
+ DVLOG(3) << __FUNCTION__
+ << ": Dropping frame that is not a random access point";
+ return true;
+ }
+
+ // 12.2. Set the need random access point flag on track buffer to false.
+ track_buffer->set_needs_random_access_point(false);
+ }
+
+ // We now have a processed buffer to append to the track buffer's stream.
+ // If it is the first in a new media segment or following a discontinuity,
+ // notify all the track buffers' streams that a new segment is beginning.
+ if (*new_media_segment) {
+ *new_media_segment = false;
+ NotifyNewMediaSegmentStarting(decode_timestamp);
+ }
+
+ DVLOG(3) << __FUNCTION__ << ": Sending processed frame to stream, "
+ << "PTS=" << presentation_timestamp.InSecondsF()
+ << ", DTS=" << decode_timestamp.InSecondsF();
+
+ // Steps 13-18:
+ // TODO(wolenetz): Collect and emit more than one buffer at a time, if
+ // possible. Also refactor SourceBufferStream to conform to spec GC timing.
+ // See http://crbug.com/371197.
+ StreamParser::BufferQueue buffer_to_append;
+ buffer_to_append.push_back(frame);
+ if (!track_buffer->stream()->Append(buffer_to_append)) {
+ DVLOG(3) << __FUNCTION__ << ": Failure appending frame to stream";
+ return false;
+ }
+
+ // 19. Set last decode timestamp for track buffer to decode timestamp.
+ track_buffer->set_last_decode_timestamp(decode_timestamp);
+
+ // 20. Set last frame duration for track buffer to frame duration.
+ track_buffer->set_last_frame_duration(frame_duration);
+
+ // 21. If highest presentation timestamp for track buffer is unset or frame
+ // end timestamp is greater than highest presentation timestamp, then
+ // set highest presentation timestamp for track buffer to frame end
+ // timestamp.
+ track_buffer->SetHighestPresentationTimestampIfIncreased(
+ frame_end_timestamp);
+
+ // 22. If frame end timestamp is greater than group end timestamp, then set
+ // group end timestamp equal to frame end timestamp.
+ if (frame_end_timestamp > group_end_timestamp_)
+ group_end_timestamp_ = frame_end_timestamp;
+ DCHECK(group_end_timestamp_ >= base::TimeDelta());
+
+ return true;
+ }
+
+ NOTREACHED();
+ return false;
+}
+
+} // namespace media
diff --git a/chromium/media/filters/frame_processor.h b/chromium/media/filters/frame_processor.h
new file mode 100644
index 00000000000..fcfe737572a
--- /dev/null
+++ b/chromium/media/filters/frame_processor.h
@@ -0,0 +1,57 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_FRAME_PROCESSOR_H_
+#define MEDIA_FILTERS_FRAME_PROCESSOR_H_
+
+#include "base/basictypes.h"
+#include "base/callback_forward.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+#include "media/base/stream_parser.h"
+#include "media/filters/frame_processor_base.h"
+
+namespace media {
+
+// Helper class that implements Media Source Extension's coded frame processing
+// algorithm.
+class MEDIA_EXPORT FrameProcessor : public FrameProcessorBase {
+ public:
+ typedef base::Callback<void(base::TimeDelta)> UpdateDurationCB;
+ explicit FrameProcessor(const UpdateDurationCB& update_duration_cb);
+ virtual ~FrameProcessor();
+
+ // FrameProcessorBase implementation
+ virtual void SetSequenceMode(bool sequence_mode) OVERRIDE;
+ virtual bool ProcessFrames(const StreamParser::BufferQueue& audio_buffers,
+ const StreamParser::BufferQueue& video_buffers,
+ const StreamParser::TextBufferQueueMap& text_map,
+ base::TimeDelta append_window_start,
+ base::TimeDelta append_window_end,
+ bool* new_media_segment,
+ base::TimeDelta* timestamp_offset) OVERRIDE;
+
+ private:
+ // Helper that processes one frame with the coded frame processing algorithm.
+ // Returns false on error or true on success.
+ bool ProcessFrame(const scoped_refptr<StreamParserBuffer>& frame,
+ base::TimeDelta append_window_start,
+ base::TimeDelta append_window_end,
+ base::TimeDelta* timestamp_offset,
+ bool* new_media_segment);
+
+ // Tracks the MSE coded frame processing variable of same name. It stores the
+ // highest coded frame end timestamp across all coded frames in the current
+ // coded frame group. It is set to 0 when the SourceBuffer object is created
+ // and gets updated by ProcessFrames().
+ base::TimeDelta group_end_timestamp_;
+
+ UpdateDurationCB update_duration_cb_;
+
+ DISALLOW_COPY_AND_ASSIGN(FrameProcessor);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_FRAME_PROCESSOR_H_
diff --git a/chromium/media/filters/frame_processor_base.cc b/chromium/media/filters/frame_processor_base.cc
new file mode 100644
index 00000000000..c0593fe4c02
--- /dev/null
+++ b/chromium/media/filters/frame_processor_base.cc
@@ -0,0 +1,214 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/frame_processor_base.h"
+
+#include <cstdlib>
+
+#include "base/stl_util.h"
+#include "media/base/buffers.h"
+
+namespace media {
+
+MseTrackBuffer::MseTrackBuffer(ChunkDemuxerStream* stream)
+ : last_decode_timestamp_(kNoTimestamp()),
+ last_frame_duration_(kNoTimestamp()),
+ highest_presentation_timestamp_(kNoTimestamp()),
+ needs_random_access_point_(true),
+ stream_(stream) {
+ DCHECK(stream_);
+}
+
+MseTrackBuffer::~MseTrackBuffer() {
+ DVLOG(2) << __FUNCTION__ << "()";
+}
+
+void MseTrackBuffer::Reset() {
+ DVLOG(2) << __FUNCTION__ << "()";
+
+ last_decode_timestamp_ = kNoTimestamp();
+ last_frame_duration_ = kNoTimestamp();
+ highest_presentation_timestamp_ = kNoTimestamp();
+ needs_random_access_point_ = true;
+}
+
+void MseTrackBuffer::SetHighestPresentationTimestampIfIncreased(
+ base::TimeDelta timestamp) {
+ if (highest_presentation_timestamp_ == kNoTimestamp() ||
+ timestamp > highest_presentation_timestamp_) {
+ highest_presentation_timestamp_ = timestamp;
+ }
+}
+
+FrameProcessorBase::FrameProcessorBase()
+ : sequence_mode_(false),
+ group_start_timestamp_(kNoTimestamp()) {}
+
+FrameProcessorBase::~FrameProcessorBase() {
+ DVLOG(2) << __FUNCTION__ << "()";
+
+ STLDeleteValues(&track_buffers_);
+}
+
+void FrameProcessorBase::SetGroupStartTimestampIfInSequenceMode(
+ base::TimeDelta timestamp_offset) {
+ DVLOG(2) << __FUNCTION__ << "(" << timestamp_offset.InSecondsF() << ")";
+ DCHECK(kNoTimestamp() != timestamp_offset);
+ if (sequence_mode_)
+ group_start_timestamp_ = timestamp_offset;
+
+ // Changes to timestampOffset should invalidate the preroll buffer.
+ audio_preroll_buffer_ = NULL;
+}
+
+bool FrameProcessorBase::AddTrack(StreamParser::TrackId id,
+ ChunkDemuxerStream* stream) {
+ DVLOG(2) << __FUNCTION__ << "(): id=" << id;
+
+ MseTrackBuffer* existing_track = FindTrack(id);
+ DCHECK(!existing_track);
+ if (existing_track)
+ return false;
+
+ track_buffers_[id] = new MseTrackBuffer(stream);
+ return true;
+}
+
+bool FrameProcessorBase::UpdateTrack(StreamParser::TrackId old_id,
+ StreamParser::TrackId new_id) {
+ DVLOG(2) << __FUNCTION__ << "() : old_id=" << old_id << ", new_id=" << new_id;
+
+ if (old_id == new_id || !FindTrack(old_id) || FindTrack(new_id))
+ return false;
+
+ track_buffers_[new_id] = track_buffers_[old_id];
+ CHECK_EQ(1u, track_buffers_.erase(old_id));
+ return true;
+}
+
+void FrameProcessorBase::SetAllTrackBuffersNeedRandomAccessPoint() {
+ for (TrackBufferMap::iterator itr = track_buffers_.begin();
+ itr != track_buffers_.end();
+ ++itr) {
+ itr->second->set_needs_random_access_point(true);
+ }
+}
+
+void FrameProcessorBase::Reset() {
+ DVLOG(2) << __FUNCTION__ << "()";
+ for (TrackBufferMap::iterator itr = track_buffers_.begin();
+ itr != track_buffers_.end(); ++itr) {
+ itr->second->Reset();
+ }
+}
+
+MseTrackBuffer* FrameProcessorBase::FindTrack(StreamParser::TrackId id) {
+ TrackBufferMap::iterator itr = track_buffers_.find(id);
+ if (itr == track_buffers_.end())
+ return NULL;
+
+ return itr->second;
+}
+
+void FrameProcessorBase::NotifyNewMediaSegmentStarting(
+ base::TimeDelta segment_timestamp) {
+ DVLOG(2) << __FUNCTION__ << "(" << segment_timestamp.InSecondsF() << ")";
+
+ for (TrackBufferMap::iterator itr = track_buffers_.begin();
+ itr != track_buffers_.end();
+ ++itr) {
+ itr->second->stream()->OnNewMediaSegment(segment_timestamp);
+ }
+}
+
+bool FrameProcessorBase::HandlePartialAppendWindowTrimming(
+ base::TimeDelta append_window_start,
+ base::TimeDelta append_window_end,
+ const scoped_refptr<StreamParserBuffer>& buffer) {
+ DCHECK(buffer->duration() > base::TimeDelta());
+ DCHECK_EQ(DemuxerStream::AUDIO, buffer->type());
+
+ const base::TimeDelta frame_end_timestamp =
+ buffer->timestamp() + buffer->duration();
+
+ // Ignore any buffers which start after |append_window_start| or end after
+ // |append_window_end|. For simplicity, even those that start before
+ // |append_window_start|.
+ if (buffer->timestamp() > append_window_start ||
+ frame_end_timestamp > append_window_end) {
+ // TODO(dalecurtis): Partial append window trimming could also be done
+ // around |append_window_end|, but is not necessary since splice frames
+ // cover overlaps there.
+ return false;
+ }
+
+ // If the buffer is entirely before |append_window_start|, save it as preroll
+ // for the first buffer which overlaps |append_window_start|.
+ if (buffer->timestamp() < append_window_start &&
+ frame_end_timestamp <= append_window_start) {
+ audio_preroll_buffer_ = buffer;
+ return false;
+ }
+
+ // There's nothing to be done if we have no preroll and the buffer starts on
+ // the append window start.
+ if (buffer->timestamp() == append_window_start && !audio_preroll_buffer_)
+ return false;
+
+ // See if a partial discard can be done around |append_window_start|.
+ DCHECK(buffer->timestamp() <= append_window_start);
+ DCHECK(buffer->IsKeyframe());
+ DVLOG(1) << "Truncating buffer which overlaps append window start."
+ << " presentation_timestamp " << buffer->timestamp().InSecondsF()
+ << " append_window_start " << append_window_start.InSecondsF();
+
+ // If this isn't the first buffer discarded by the append window, try to use
+ // the last buffer discarded for preroll. This ensures that the partially
+ // trimmed buffer can be correctly decoded.
+ if (audio_preroll_buffer_) {
+ // We only want to use the preroll buffer if it directly precedes (less than
+ // one sample apart) the current buffer.
+ const int64 delta = std::abs((audio_preroll_buffer_->timestamp() +
+ audio_preroll_buffer_->duration() -
+ buffer->timestamp()).InMicroseconds());
+ if (delta < sample_duration_.InMicroseconds()) {
+ buffer->SetPrerollBuffer(audio_preroll_buffer_);
+ } else {
+ // TODO(dalecurtis): Add a MEDIA_LOG() for when this is dropped unused.
+ }
+ audio_preroll_buffer_ = NULL;
+ }
+
+ // Decrease the duration appropriately. We only need to shorten the buffer if
+ // it overlaps |append_window_start|.
+ if (buffer->timestamp() < append_window_start) {
+ buffer->set_discard_padding(std::make_pair(
+ append_window_start - buffer->timestamp(), base::TimeDelta()));
+ buffer->set_duration(frame_end_timestamp - append_window_start);
+ }
+
+ // Adjust the timestamp of this buffer forward to |append_window_start|. The
+ // timestamps are always set, even if |buffer|'s timestamp is already set to
+ // |append_window_start|, to ensure the preroll buffer is setup correctly.
+ buffer->set_timestamp(append_window_start);
+ buffer->SetDecodeTimestamp(append_window_start);
+ return true;
+}
+
+void FrameProcessorBase::OnPossibleAudioConfigUpdate(
+ const AudioDecoderConfig& config) {
+ DCHECK(config.IsValidConfig());
+
+ // Always clear the preroll buffer when a config update is received.
+ audio_preroll_buffer_ = NULL;
+
+ if (config.Matches(current_audio_config_))
+ return;
+
+ current_audio_config_ = config;
+ sample_duration_ = base::TimeDelta::FromSecondsD(
+ 1.0 / current_audio_config_.samples_per_second());
+}
+
+} // namespace media
diff --git a/chromium/media/filters/frame_processor_base.h b/chromium/media/filters/frame_processor_base.h
new file mode 100644
index 00000000000..7947efb892b
--- /dev/null
+++ b/chromium/media/filters/frame_processor_base.h
@@ -0,0 +1,234 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_FRAME_PROCESSOR_BASE_H_
+#define MEDIA_FILTERS_FRAME_PROCESSOR_BASE_H_
+
+#include <map>
+
+#include "base/basictypes.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+#include "media/base/stream_parser.h"
+#include "media/filters/chunk_demuxer.h"
+
+namespace media {
+
+// Helper class to capture per-track details needed by a frame processor. Some
+// of this information may be duplicated in the short-term in the associated
+// ChunkDemuxerStream and SourceBufferStream for a track.
+// This parallels the MSE spec each of a SourceBuffer's Track Buffers at
+// http://www.w3.org/TR/media-source/#track-buffers.
+class MseTrackBuffer {
+ public:
+ explicit MseTrackBuffer(ChunkDemuxerStream* stream);
+ ~MseTrackBuffer();
+
+ // Get/set |last_decode_timestamp_|.
+ base::TimeDelta last_decode_timestamp() const {
+ return last_decode_timestamp_;
+ }
+ void set_last_decode_timestamp(base::TimeDelta timestamp) {
+ last_decode_timestamp_ = timestamp;
+ }
+
+ // Get/set |last_frame_duration_|.
+ base::TimeDelta last_frame_duration() const {
+ return last_frame_duration_;
+ }
+ void set_last_frame_duration(base::TimeDelta duration) {
+ last_frame_duration_ = duration;
+ }
+
+ // Gets |highest_presentation_timestamp_|.
+ base::TimeDelta highest_presentation_timestamp() const {
+ return highest_presentation_timestamp_;
+ }
+
+ // Get/set |needs_random_access_point_|.
+ bool needs_random_access_point() const {
+ return needs_random_access_point_;
+ }
+ void set_needs_random_access_point(bool needs_random_access_point) {
+ needs_random_access_point_ = needs_random_access_point;
+ }
+
+ // Gets a pointer to this track's ChunkDemuxerStream.
+ ChunkDemuxerStream* stream() const { return stream_; }
+
+ // Unsets |last_decode_timestamp_|, unsets |last_frame_duration_|,
+ // unsets |highest_presentation_timestamp_|, and sets
+ // |needs_random_access_point_| to true.
+ void Reset();
+
+ // If |highest_presentation_timestamp_| is unset or |timestamp| is greater
+ // than |highest_presentation_timestamp_|, sets
+ // |highest_presentation_timestamp_| to |timestamp|. Note that bidirectional
+ // prediction between coded frames can cause |timestamp| to not be
+ // monotonically increasing even though the decode timestamps are
+ // monotonically increasing.
+ void SetHighestPresentationTimestampIfIncreased(base::TimeDelta timestamp);
+
+ private:
+ // The decode timestamp of the last coded frame appended in the current coded
+ // frame group. Initially kNoTimestamp(), meaning "unset".
+ base::TimeDelta last_decode_timestamp_;
+
+ // The coded frame duration of the last coded frame appended in the current
+ // coded frame group. Initially kNoTimestamp(), meaning "unset".
+ base::TimeDelta last_frame_duration_;
+
+ // The highest presentation timestamp encountered in a coded frame appended
+ // in the current coded frame group. Initially kNoTimestamp(), meaning
+ // "unset".
+ base::TimeDelta highest_presentation_timestamp_;
+
+ // Keeps track of whether the track buffer is waiting for a random access
+ // point coded frame. Initially set to true to indicate that a random access
+ // point coded frame is needed before anything can be added to the track
+ // buffer.
+ bool needs_random_access_point_;
+
+ // Pointer to the stream associated with this track. The stream is not owned
+ // by |this|.
+ ChunkDemuxerStream* const stream_;
+
+ DISALLOW_COPY_AND_ASSIGN(MseTrackBuffer);
+};
+
+// Abstract interface for helper class implementation of Media Source
+// Extension's coded frame processing algorithm.
+// TODO(wolenetz): Once the new FrameProcessor implementation stabilizes, remove
+// LegacyFrameProcessor and fold this interface into FrameProcessor. See
+// http://crbug.com/249422.
+class MEDIA_EXPORT FrameProcessorBase {
+ public:
+ // TODO(wolenetz/acolwell): Ensure that all TrackIds are coherent and unique
+ // for each track buffer. For now, special track identifiers are used for each
+ // of audio and video here, and text TrackIds are assumed to be non-negative.
+ // See http://crbug.com/341581.
+ enum {
+ kAudioTrackId = -2,
+ kVideoTrackId = -3
+ };
+
+ virtual ~FrameProcessorBase();
+
+ // Get/set the current append mode, which if true means "sequence" and if
+ // false means "segments".
+ // See http://www.w3.org/TR/media-source/#widl-SourceBuffer-mode.
+ bool sequence_mode() { return sequence_mode_; }
+ virtual void SetSequenceMode(bool sequence_mode) = 0;
+
+ // Processes buffers in |audio_buffers|, |video_buffers|, and |text_map|.
+ // Returns true on success or false on failure which indicates decode error.
+ // |append_window_start| and |append_window_end| correspond to the MSE spec's
+ // similarly named source buffer attributes that are used in coded frame
+ // processing.
+ // |*new_media_segment| tracks whether the next buffers processed within the
+ // append window represent the start of a new media segment. This method may
+ // both use and update this flag.
+ // Uses |*timestamp_offset| according to the coded frame processing algorithm,
+ // including updating it as required in 'sequence' mode frame processing.
+ virtual bool ProcessFrames(const StreamParser::BufferQueue& audio_buffers,
+ const StreamParser::BufferQueue& video_buffers,
+ const StreamParser::TextBufferQueueMap& text_map,
+ base::TimeDelta append_window_start,
+ base::TimeDelta append_window_end,
+ bool* new_media_segment,
+ base::TimeDelta* timestamp_offset) = 0;
+
+ // Signals the frame processor to update its group start timestamp to be
+ // |timestamp_offset| if it is in sequence append mode.
+ void SetGroupStartTimestampIfInSequenceMode(base::TimeDelta timestamp_offset);
+
+ // Adds a new track with unique track ID |id|.
+ // If |id| has previously been added, returns false to indicate error.
+ // Otherwise, returns true, indicating future ProcessFrames() will emit
+ // frames for the track |id| to |stream|.
+ bool AddTrack(StreamParser::TrackId id, ChunkDemuxerStream* stream);
+
+ // Updates the internal mapping of TrackId to track buffer for the track
+ // buffer formerly associated with |old_id| to be associated with |new_id|.
+ // Returns false to indicate failure due to either no existing track buffer
+ // for |old_id| or collision with previous track buffer already mapped to
+ // |new_id|. Otherwise returns true.
+ bool UpdateTrack(StreamParser::TrackId old_id, StreamParser::TrackId new_id);
+
+ // Sets the need random access point flag on all track buffers to true.
+ void SetAllTrackBuffersNeedRandomAccessPoint();
+
+ // Resets state for the coded frame processing algorithm as described in steps
+ // 2-5 of the MSE Reset Parser State algorithm described at
+ // http://www.w3.org/TR/media-source/#sourcebuffer-reset-parser-state
+ void Reset();
+
+ // Must be called when the audio config is updated. Used to manage when
+ // the preroll buffer is cleared and the allowed "fudge" factor between
+ // preroll buffers.
+ void OnPossibleAudioConfigUpdate(const AudioDecoderConfig& config);
+
+ protected:
+ typedef std::map<StreamParser::TrackId, MseTrackBuffer*> TrackBufferMap;
+
+ FrameProcessorBase();
+
+ // If |track_buffers_| contains |id|, returns a pointer to the associated
+ // MseTrackBuffer. Otherwise, returns NULL.
+ MseTrackBuffer* FindTrack(StreamParser::TrackId id);
+
+ // Signals all track buffers' streams that a new media segment is starting
+ // with timestamp |segment_timestamp|.
+ void NotifyNewMediaSegmentStarting(base::TimeDelta segment_timestamp);
+
+ // Handles partial append window trimming of |buffer|. Returns true if the
+ // given |buffer| can be partially trimmed or have preroll added; otherwise,
+ // returns false.
+ //
+ // If |buffer| overlaps |append_window_start|, the portion of |buffer| before
+ // |append_window_start| will be marked for post-decode discard. Further, if
+ // |audio_preroll_buffer_| exists and abuts |buffer|, it will be set as
+ // preroll on |buffer| and |audio_preroll_buffer_| will be cleared. If the
+ // preroll buffer does not abut |buffer|, it will be discarded, but not used.
+ //
+ // If |buffer| lies entirely before |append_window_start|, and thus would
+ // normally be discarded, |audio_preroll_buffer_| will be set to |buffer| and
+ // the method will return false.
+ bool HandlePartialAppendWindowTrimming(
+ base::TimeDelta append_window_start,
+ base::TimeDelta append_window_end,
+ const scoped_refptr<StreamParserBuffer>& buffer);
+
+ // The AppendMode of the associated SourceBuffer.
+ // See SetSequenceMode() for interpretation of |sequence_mode_|.
+ // Per http://www.w3.org/TR/media-source/#widl-SourceBuffer-mode:
+ // Controls how a sequence of media segments are handled. This is initially
+ // set to false ("segments").
+ bool sequence_mode_;
+
+ // TrackId-indexed map of each track's stream.
+ TrackBufferMap track_buffers_;
+
+ // Tracks the MSE coded frame processing variable of same name.
+ // Initially kNoTimestamp(), meaning "unset".
+ // Note: LegacyFrameProcessor does not use this member; it's here to reduce
+ // short-term plumbing of SetGroupStartTimestampIfInSequenceMode() until
+ // LegacyFrameProcessor is removed.
+ base::TimeDelta group_start_timestamp_;
+
+ private:
+ // The last audio buffer seen by the frame processor that was removed because
+ // it was entirely before the start of the append window.
+ scoped_refptr<StreamParserBuffer> audio_preroll_buffer_;
+
+ // The AudioDecoderConfig associated with buffers handed to ProcessFrames().
+ AudioDecoderConfig current_audio_config_;
+ base::TimeDelta sample_duration_;
+
+ DISALLOW_COPY_AND_ASSIGN(FrameProcessorBase);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_FRAME_PROCESSOR_BASE_H_
diff --git a/chromium/media/filters/frame_processor_unittest.cc b/chromium/media/filters/frame_processor_unittest.cc
new file mode 100644
index 00000000000..f4cde5a1c0c
--- /dev/null
+++ b/chromium/media/filters/frame_processor_unittest.cc
@@ -0,0 +1,658 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <map>
+#include <string>
+
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+#include "base/time/time.h"
+#include "media/base/mock_filters.h"
+#include "media/base/test_helpers.h"
+#include "media/filters/chunk_demuxer.h"
+#include "media/filters/frame_processor.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::InSequence;
+using ::testing::StrictMock;
+using ::testing::Values;
+
+namespace media {
+
+typedef StreamParser::BufferQueue BufferQueue;
+typedef StreamParser::TextBufferQueueMap TextBufferQueueMap;
+typedef StreamParser::TrackId TrackId;
+
+static void LogFunc(const std::string& str) { DVLOG(1) << str; }
+
+// Used for setting expectations on callbacks. Using a StrictMock also lets us
+// test for missing or extra callbacks.
+class FrameProcessorTestCallbackHelper {
+ public:
+ FrameProcessorTestCallbackHelper() {}
+ virtual ~FrameProcessorTestCallbackHelper() {}
+
+ MOCK_METHOD1(PossibleDurationIncrease, void(base::TimeDelta new_duration));
+
+ // Helper that calls the mock method as well as does basic sanity checks on
+ // |new_duration|.
+ void OnPossibleDurationIncrease(base::TimeDelta new_duration) {
+ PossibleDurationIncrease(new_duration);
+ ASSERT_NE(kNoTimestamp(), new_duration);
+ ASSERT_NE(kInfiniteDuration(), new_duration);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FrameProcessorTestCallbackHelper);
+};
+
+// Test parameter determines indicates if the TEST_P instance is targeted for
+// sequence mode (if true), or segments mode (if false).
+class FrameProcessorTest : public testing::TestWithParam<bool> {
+ protected:
+ FrameProcessorTest()
+ : frame_processor_(new FrameProcessor(base::Bind(
+ &FrameProcessorTestCallbackHelper::OnPossibleDurationIncrease,
+ base::Unretained(&callbacks_)))),
+ append_window_end_(kInfiniteDuration()),
+ new_media_segment_(false),
+ audio_id_(FrameProcessor::kAudioTrackId),
+ video_id_(FrameProcessor::kVideoTrackId),
+ frame_duration_(base::TimeDelta::FromMilliseconds(10)) {
+ }
+
+ enum StreamFlags {
+ HAS_AUDIO = 1 << 0,
+ HAS_VIDEO = 1 << 1
+ };
+
+ void AddTestTracks(int stream_flags) {
+ const bool has_audio = (stream_flags & HAS_AUDIO) != 0;
+ const bool has_video = (stream_flags & HAS_VIDEO) != 0;
+ ASSERT_TRUE(has_audio || has_video);
+
+ if (has_audio) {
+ CreateAndConfigureStream(DemuxerStream::AUDIO);
+ ASSERT_TRUE(audio_);
+ EXPECT_TRUE(frame_processor_->AddTrack(audio_id_, audio_.get()));
+ audio_->Seek(base::TimeDelta());
+ audio_->StartReturningData();
+ }
+ if (has_video) {
+ CreateAndConfigureStream(DemuxerStream::VIDEO);
+ ASSERT_TRUE(video_);
+ EXPECT_TRUE(frame_processor_->AddTrack(video_id_, video_.get()));
+ video_->Seek(base::TimeDelta());
+ video_->StartReturningData();
+ }
+ }
+
+ void SetTimestampOffset(base::TimeDelta new_offset) {
+ timestamp_offset_ = new_offset;
+ frame_processor_->SetGroupStartTimestampIfInSequenceMode(timestamp_offset_);
+ }
+
+ BufferQueue StringToBufferQueue(const std::string& buffers_to_append,
+ const TrackId track_id,
+ const DemuxerStream::Type type) {
+ std::vector<std::string> timestamps;
+ base::SplitString(buffers_to_append, ' ', &timestamps);
+
+ BufferQueue buffers;
+ for (size_t i = 0; i < timestamps.size(); i++) {
+ bool is_keyframe = false;
+ if (EndsWith(timestamps[i], "K", true)) {
+ is_keyframe = true;
+ // Remove the "K" off of the token.
+ timestamps[i] = timestamps[i].substr(0, timestamps[i].length() - 1);
+ }
+
+ double time_in_ms;
+ CHECK(base::StringToDouble(timestamps[i], &time_in_ms));
+
+ // Create buffer. Encode the original time_in_ms as the buffer's data to
+ // enable later verification of possible buffer relocation in presentation
+ // timeline due to coded frame processing.
+ const uint8* timestamp_as_data = reinterpret_cast<uint8*>(&time_in_ms);
+ scoped_refptr<StreamParserBuffer> buffer =
+ StreamParserBuffer::CopyFrom(timestamp_as_data, sizeof(time_in_ms),
+ is_keyframe, type, track_id);
+ base::TimeDelta timestamp = base::TimeDelta::FromSecondsD(
+ time_in_ms / base::Time::kMillisecondsPerSecond);
+ buffer->set_timestamp(timestamp);
+ buffer->SetDecodeTimestamp(timestamp);
+ buffer->set_duration(frame_duration_);
+ buffers.push_back(buffer);
+ }
+ return buffers;
+ }
+
+ void ProcessFrames(const std::string& audio_timestamps,
+ const std::string& video_timestamps) {
+ ASSERT_TRUE(frame_processor_->ProcessFrames(
+ StringToBufferQueue(audio_timestamps, audio_id_, DemuxerStream::AUDIO),
+ StringToBufferQueue(video_timestamps, video_id_, DemuxerStream::VIDEO),
+ empty_text_buffers_,
+ append_window_start_, append_window_end_,
+ &new_media_segment_, &timestamp_offset_));
+ }
+
+ void CheckExpectedRangesByTimestamp(ChunkDemuxerStream* stream,
+ const std::string& expected) {
+ // Note, DemuxerStream::TEXT streams return [0,duration (==infinity here))
+ Ranges<base::TimeDelta> r = stream->GetBufferedRanges(kInfiniteDuration());
+
+ std::stringstream ss;
+ ss << "{ ";
+ for (size_t i = 0; i < r.size(); ++i) {
+ int64 start = r.start(i).InMilliseconds();
+ int64 end = r.end(i).InMilliseconds();
+ ss << "[" << start << "," << end << ") ";
+ }
+ ss << "}";
+ EXPECT_EQ(expected, ss.str());
+ }
+
+ void CheckReadStalls(ChunkDemuxerStream* stream) {
+ int loop_count = 0;
+
+ do {
+ read_callback_called_ = false;
+ stream->Read(base::Bind(&FrameProcessorTest::StoreStatusAndBuffer,
+ base::Unretained(this)));
+ message_loop_.RunUntilIdle();
+ } while (++loop_count < 2 && read_callback_called_ &&
+ last_read_status_ == DemuxerStream::kAborted);
+
+ ASSERT_FALSE(read_callback_called_ &&
+ last_read_status_ == DemuxerStream::kAborted)
+ << "2 kAborted reads in a row. Giving up.";
+ EXPECT_FALSE(read_callback_called_);
+ }
+
+ // Format of |expected| is a space-delimited sequence of
+ // timestamp_in_ms:original_timestamp_in_ms
+ // original_timestamp_in_ms (and the colon) must be omitted if it is the same
+ // as timestamp_in_ms.
+ void CheckReadsThenReadStalls(ChunkDemuxerStream* stream,
+ const std::string& expected) {
+ std::vector<std::string> timestamps;
+ base::SplitString(expected, ' ', &timestamps);
+ std::stringstream ss;
+ for (size_t i = 0; i < timestamps.size(); ++i) {
+ int loop_count = 0;
+
+ do {
+ read_callback_called_ = false;
+ stream->Read(base::Bind(&FrameProcessorTest::StoreStatusAndBuffer,
+ base::Unretained(this)));
+ message_loop_.RunUntilIdle();
+ EXPECT_TRUE(read_callback_called_);
+ } while (++loop_count < 2 &&
+ last_read_status_ == DemuxerStream::kAborted);
+
+ ASSERT_FALSE(last_read_status_ == DemuxerStream::kAborted)
+ << "2 kAborted reads in a row. Giving up.";
+ EXPECT_EQ(DemuxerStream::kOk, last_read_status_);
+ EXPECT_FALSE(last_read_buffer_->end_of_stream());
+
+ if (i > 0)
+ ss << " ";
+
+ int time_in_ms = last_read_buffer_->timestamp().InMilliseconds();
+ ss << time_in_ms;
+
+ // Decode the original_time_in_ms from the buffer's data.
+ double original_time_in_ms;
+ ASSERT_EQ(static_cast<int>(sizeof(original_time_in_ms)),
+ last_read_buffer_->data_size());
+ original_time_in_ms = *(reinterpret_cast<const double*>(
+ last_read_buffer_->data()));
+ if (original_time_in_ms != time_in_ms)
+ ss << ":" << original_time_in_ms;
+
+ // Detect full-discard preroll buffer.
+ if (last_read_buffer_->discard_padding().first == kInfiniteDuration() &&
+ last_read_buffer_->discard_padding().second == base::TimeDelta()) {
+ ss << "P";
+ }
+ }
+
+ EXPECT_EQ(expected, ss.str());
+ CheckReadStalls(stream);
+ }
+
+ base::MessageLoop message_loop_;
+ StrictMock<FrameProcessorTestCallbackHelper> callbacks_;
+
+ scoped_ptr<FrameProcessor> frame_processor_;
+ base::TimeDelta append_window_start_;
+ base::TimeDelta append_window_end_;
+ bool new_media_segment_;
+ base::TimeDelta timestamp_offset_;
+ scoped_ptr<ChunkDemuxerStream> audio_;
+ scoped_ptr<ChunkDemuxerStream> video_;
+ const TrackId audio_id_;
+ const TrackId video_id_;
+ const base::TimeDelta frame_duration_; // Currently the same for all streams.
+ const BufferQueue empty_queue_;
+ const TextBufferQueueMap empty_text_buffers_;
+
+ // StoreStatusAndBuffer's most recent result.
+ DemuxerStream::Status last_read_status_;
+ scoped_refptr<DecoderBuffer> last_read_buffer_;
+ bool read_callback_called_;
+
+ private:
+ void StoreStatusAndBuffer(DemuxerStream::Status status,
+ const scoped_refptr<DecoderBuffer>& buffer) {
+ if (status == DemuxerStream::kOk && buffer) {
+ DVLOG(3) << __FUNCTION__ << "status: " << status << " ts: "
+ << buffer->timestamp().InSecondsF();
+ } else {
+ DVLOG(3) << __FUNCTION__ << "status: " << status << " ts: n/a";
+ }
+
+ read_callback_called_ = true;
+ last_read_status_ = status;
+ last_read_buffer_ = buffer;
+ }
+
+ void CreateAndConfigureStream(DemuxerStream::Type type) {
+ // TODO(wolenetz/dalecurtis): Also test with splicing disabled?
+ switch (type) {
+ case DemuxerStream::AUDIO: {
+ ASSERT_FALSE(audio_);
+ audio_.reset(new ChunkDemuxerStream(DemuxerStream::AUDIO, true));
+ AudioDecoderConfig decoder_config(kCodecVorbis,
+ kSampleFormatPlanarF32,
+ CHANNEL_LAYOUT_STEREO,
+ 1000,
+ NULL,
+ 0,
+ false);
+ frame_processor_->OnPossibleAudioConfigUpdate(decoder_config);
+ ASSERT_TRUE(
+ audio_->UpdateAudioConfig(decoder_config, base::Bind(&LogFunc)));
+ break;
+ }
+ case DemuxerStream::VIDEO: {
+ ASSERT_FALSE(video_);
+ video_.reset(new ChunkDemuxerStream(DemuxerStream::VIDEO, true));
+ ASSERT_TRUE(video_->UpdateVideoConfig(TestVideoConfig::Normal(),
+ base::Bind(&LogFunc)));
+ break;
+ }
+ // TODO(wolenetz): Test text coded frame processing.
+ case DemuxerStream::TEXT:
+ case DemuxerStream::UNKNOWN:
+ case DemuxerStream::NUM_TYPES: {
+ ASSERT_FALSE(true);
+ }
+ }
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(FrameProcessorTest);
+};
+
+TEST_F(FrameProcessorTest, WrongTypeInAppendedBuffer) {
+ AddTestTracks(HAS_AUDIO);
+ new_media_segment_ = true;
+
+ ASSERT_FALSE(frame_processor_->ProcessFrames(
+ StringToBufferQueue("0K", audio_id_, DemuxerStream::VIDEO),
+ empty_queue_,
+ empty_text_buffers_,
+ append_window_start_, append_window_end_,
+ &new_media_segment_, &timestamp_offset_));
+ EXPECT_TRUE(new_media_segment_);
+ EXPECT_EQ(base::TimeDelta(), timestamp_offset_);
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ }");
+ CheckReadStalls(audio_.get());
+}
+
+TEST_F(FrameProcessorTest, NonMonotonicallyIncreasingTimestampInOneCall) {
+ AddTestTracks(HAS_AUDIO);
+ new_media_segment_ = true;
+
+ ASSERT_FALSE(frame_processor_->ProcessFrames(
+ StringToBufferQueue("10K 0K", audio_id_, DemuxerStream::AUDIO),
+ empty_queue_,
+ empty_text_buffers_,
+ append_window_start_, append_window_end_,
+ &new_media_segment_, &timestamp_offset_));
+ EXPECT_TRUE(new_media_segment_);
+ EXPECT_EQ(base::TimeDelta(), timestamp_offset_);
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ }");
+ CheckReadStalls(audio_.get());
+}
+
+TEST_P(FrameProcessorTest, AudioOnly_SingleFrame) {
+ // Tests A: P(A) -> (a)
+ InSequence s;
+ AddTestTracks(HAS_AUDIO);
+ new_media_segment_ = true;
+ if (GetParam())
+ frame_processor_->SetSequenceMode(true);
+
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_));
+ ProcessFrames("0K", "");
+ EXPECT_FALSE(new_media_segment_);
+ EXPECT_EQ(base::TimeDelta(), timestamp_offset_);
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,10) }");
+ CheckReadsThenReadStalls(audio_.get(), "0");
+}
+
+TEST_P(FrameProcessorTest, VideoOnly_SingleFrame) {
+ // Tests V: P(V) -> (v)
+ InSequence s;
+ AddTestTracks(HAS_VIDEO);
+ new_media_segment_ = true;
+ if (GetParam())
+ frame_processor_->SetSequenceMode(true);
+
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_));
+ ProcessFrames("", "0K");
+ EXPECT_FALSE(new_media_segment_);
+ EXPECT_EQ(base::TimeDelta(), timestamp_offset_);
+ CheckExpectedRangesByTimestamp(video_.get(), "{ [0,10) }");
+ CheckReadsThenReadStalls(video_.get(), "0");
+}
+
+TEST_P(FrameProcessorTest, AudioOnly_TwoFrames) {
+ // Tests A: P(A0, A10) -> (a0, a10)
+ InSequence s;
+ AddTestTracks(HAS_AUDIO);
+ new_media_segment_ = true;
+ if (GetParam())
+ frame_processor_->SetSequenceMode(true);
+
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_ * 2));
+ ProcessFrames("0K 10K", "");
+ EXPECT_FALSE(new_media_segment_);
+ EXPECT_EQ(base::TimeDelta(), timestamp_offset_);
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,20) }");
+ CheckReadsThenReadStalls(audio_.get(), "0 10");
+}
+
+TEST_P(FrameProcessorTest, AudioOnly_SetOffsetThenSingleFrame) {
+ // Tests A: STSO(50)+P(A0) -> TSO==50,(a0@50)
+ InSequence s;
+ AddTestTracks(HAS_AUDIO);
+ new_media_segment_ = true;
+ if (GetParam())
+ frame_processor_->SetSequenceMode(true);
+
+ const base::TimeDelta fifty_ms = base::TimeDelta::FromMilliseconds(50);
+ SetTimestampOffset(fifty_ms);
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_ + fifty_ms));
+ ProcessFrames("0K", "");
+ EXPECT_FALSE(new_media_segment_);
+ EXPECT_EQ(fifty_ms, timestamp_offset_);
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [50,60) }");
+
+ // We do not stall on reading without seeking to 50ms due to
+ // SourceBufferStream::kSeekToStartFudgeRoom().
+ CheckReadsThenReadStalls(audio_.get(), "50:0");
+}
+
+TEST_P(FrameProcessorTest, AudioOnly_SetOffsetThenFrameTimestampBelowOffset) {
+ // Tests A: STSO(50)+P(A20) ->
+ // if sequence mode: TSO==30,(a20@50)
+ // if segments mode: TSO==50,(a20@70)
+ InSequence s;
+ AddTestTracks(HAS_AUDIO);
+ new_media_segment_ = true;
+ bool using_sequence_mode = GetParam();
+ if (using_sequence_mode)
+ frame_processor_->SetSequenceMode(true);
+
+ const base::TimeDelta fifty_ms = base::TimeDelta::FromMilliseconds(50);
+ const base::TimeDelta twenty_ms = base::TimeDelta::FromMilliseconds(20);
+ SetTimestampOffset(fifty_ms);
+
+ if (using_sequence_mode) {
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(
+ fifty_ms + frame_duration_));
+ } else {
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(
+ fifty_ms + twenty_ms + frame_duration_));
+ }
+
+ ProcessFrames("20K", "");
+ EXPECT_FALSE(new_media_segment_);
+
+ // We do not stall on reading without seeking to 50ms / 70ms due to
+ // SourceBufferStream::kSeekToStartFudgeRoom().
+ if (using_sequence_mode) {
+ EXPECT_EQ(fifty_ms - twenty_ms, timestamp_offset_);
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [50,60) }");
+ CheckReadsThenReadStalls(audio_.get(), "50:20");
+ } else {
+ EXPECT_EQ(fifty_ms, timestamp_offset_);
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [70,80) }");
+ CheckReadsThenReadStalls(audio_.get(), "70:20");
+ }
+}
+
+TEST_P(FrameProcessorTest, AudioOnly_SequentialProcessFrames) {
+ // Tests A: P(A0,A10)+P(A20,A30) -> (a0,a10,a20,a30)
+ InSequence s;
+ AddTestTracks(HAS_AUDIO);
+ new_media_segment_ = true;
+ if (GetParam())
+ frame_processor_->SetSequenceMode(true);
+
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_ * 2));
+ ProcessFrames("0K 10K", "");
+ EXPECT_FALSE(new_media_segment_);
+ EXPECT_EQ(base::TimeDelta(), timestamp_offset_);
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,20) }");
+
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_ * 4));
+ ProcessFrames("20K 30K", "");
+ EXPECT_FALSE(new_media_segment_);
+ EXPECT_EQ(base::TimeDelta(), timestamp_offset_);
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,40) }");
+
+ CheckReadsThenReadStalls(audio_.get(), "0 10 20 30");
+}
+
+TEST_P(FrameProcessorTest, AudioOnly_NonSequentialProcessFrames) {
+ // Tests A: P(A20,A30)+P(A0,A10) ->
+ // if sequence mode: TSO==-20 after first P(), 20 after second P(), and
+ // a(20@0,a30@10,a0@20,a10@30)
+ // if segments mode: TSO==0,(a0,a10,a20,a30)
+ InSequence s;
+ AddTestTracks(HAS_AUDIO);
+ new_media_segment_ = true;
+ bool using_sequence_mode = GetParam();
+ if (using_sequence_mode) {
+ frame_processor_->SetSequenceMode(true);
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_ * 2));
+ } else {
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_ * 4));
+ }
+
+ ProcessFrames("20K 30K", "");
+ EXPECT_FALSE(new_media_segment_);
+
+ if (using_sequence_mode) {
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,20) }");
+ EXPECT_EQ(frame_duration_ * -2, timestamp_offset_);
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_ * 4));
+ } else {
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [20,40) }");
+ EXPECT_EQ(base::TimeDelta(), timestamp_offset_);
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_ * 2));
+ }
+
+ ProcessFrames("0K 10K", "");
+ EXPECT_FALSE(new_media_segment_);
+
+ if (using_sequence_mode) {
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,40) }");
+ EXPECT_EQ(frame_duration_ * 2, timestamp_offset_);
+ CheckReadsThenReadStalls(audio_.get(), "0:20 10:30 20:0 30:10");
+ } else {
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,40) }");
+ EXPECT_EQ(base::TimeDelta(), timestamp_offset_);
+ // TODO(wolenetz): Fix this need to seek to 0ms, possibly by having
+ // SourceBufferStream defer initial seek until next read. See
+ // http://crbug.com/371493.
+ audio_->AbortReads();
+ audio_->Seek(base::TimeDelta());
+ audio_->StartReturningData();
+ CheckReadsThenReadStalls(audio_.get(), "0 10 20 30");
+ }
+}
+
+TEST_P(FrameProcessorTest, AudioVideo_SequentialProcessFrames) {
+ // Tests AV: P(A0,A10;V0k,V10,V20)+P(A20,A30,A40,V30) ->
+ // (a0,a10,a20,a30,a40);(v0,v10,v20,v30)
+ InSequence s;
+ AddTestTracks(HAS_AUDIO | HAS_VIDEO);
+ new_media_segment_ = true;
+ if (GetParam())
+ frame_processor_->SetSequenceMode(true);
+
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_ * 3));
+ ProcessFrames("0K 10K", "0K 10 20");
+ EXPECT_FALSE(new_media_segment_);
+ EXPECT_EQ(base::TimeDelta(), timestamp_offset_);
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,20) }");
+ CheckExpectedRangesByTimestamp(video_.get(), "{ [0,30) }");
+
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_ * 5));
+ ProcessFrames("20K 30K 40K", "30");
+ EXPECT_FALSE(new_media_segment_);
+ EXPECT_EQ(base::TimeDelta(), timestamp_offset_);
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,50) }");
+ CheckExpectedRangesByTimestamp(video_.get(), "{ [0,40) }");
+
+ CheckReadsThenReadStalls(audio_.get(), "0 10 20 30 40");
+ CheckReadsThenReadStalls(video_.get(), "0 10 20 30");
+}
+
+TEST_P(FrameProcessorTest, AudioVideo_Discontinuity) {
+ // Tests AV: P(A0,A10,A30,A40,A50;V0k,V10,V40,V50key) ->
+ // if sequence mode: TSO==10,(a0,a10,a30,a40,a50@60);(v0,v10,v50@60)
+ // if segments mode: TSO==0,(a0,a10,a30,a40,a50);(v0,v10,v50)
+ // This assumes A40K is processed before V40, which depends currently on
+ // MergeBufferQueues() behavior.
+ InSequence s;
+ AddTestTracks(HAS_AUDIO | HAS_VIDEO);
+ new_media_segment_ = true;
+ bool using_sequence_mode = GetParam();
+ if (using_sequence_mode) {
+ frame_processor_->SetSequenceMode(true);
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_ * 7));
+ } else {
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_ * 6));
+ }
+
+ ProcessFrames("0K 10K 30K 40K 50K", "0K 10 40 50K");
+ EXPECT_FALSE(new_media_segment_);
+
+ if (using_sequence_mode) {
+ EXPECT_EQ(frame_duration_, timestamp_offset_);
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,70) }");
+ CheckExpectedRangesByTimestamp(video_.get(), "{ [0,70) }");
+ CheckReadsThenReadStalls(audio_.get(), "0 10 30 40 60:50");
+ CheckReadsThenReadStalls(video_.get(), "0 10 60:50");
+ } else {
+ EXPECT_EQ(base::TimeDelta(), timestamp_offset_);
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,60) }");
+ CheckExpectedRangesByTimestamp(video_.get(), "{ [0,20) [50,60) }");
+ CheckReadsThenReadStalls(audio_.get(), "0 10 30 40 50");
+ CheckReadsThenReadStalls(video_.get(), "0 10");
+ video_->AbortReads();
+ video_->Seek(frame_duration_ * 5);
+ video_->StartReturningData();
+ CheckReadsThenReadStalls(video_.get(), "50");
+ }
+}
+
+TEST_P(FrameProcessorTest,
+ AppendWindowFilterOfNegativeBufferTimestampsWithPrerollDiscard) {
+ InSequence s;
+ AddTestTracks(HAS_AUDIO);
+ new_media_segment_ = true;
+ if (GetParam())
+ frame_processor_->SetSequenceMode(true);
+
+ SetTimestampOffset(frame_duration_ * -2);
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_));
+ ProcessFrames("0K 10K 20K", "");
+ EXPECT_FALSE(new_media_segment_);
+ EXPECT_EQ(frame_duration_ * -2, timestamp_offset_);
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,10) }");
+ CheckReadsThenReadStalls(audio_.get(), "0:10P 0:20");
+}
+
+TEST_P(FrameProcessorTest, AppendWindowFilterWithInexactPreroll) {
+ InSequence s;
+ AddTestTracks(HAS_AUDIO);
+ new_media_segment_ = true;
+ if (GetParam())
+ frame_processor_->SetSequenceMode(true);
+ SetTimestampOffset(-frame_duration_);
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_ * 2));
+ ProcessFrames("0K 9.75K 20K", "");
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,20) }");
+ CheckReadsThenReadStalls(audio_.get(), "0P 0:9.75 10:20");
+}
+
+TEST_P(FrameProcessorTest, AllowNegativeFramePTSAndDTSBeforeOffsetAdjustment) {
+ InSequence s;
+ AddTestTracks(HAS_AUDIO);
+ new_media_segment_ = true;
+ bool using_sequence_mode = GetParam();
+ if (using_sequence_mode) {
+ frame_processor_->SetSequenceMode(true);
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_ * 3));
+ } else {
+ EXPECT_CALL(callbacks_,
+ PossibleDurationIncrease((frame_duration_ * 5) / 2));
+ }
+
+ ProcessFrames("-5K 5K 15K", "");
+
+ if (using_sequence_mode) {
+ EXPECT_EQ(frame_duration_ / 2, timestamp_offset_);
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,30) }");
+ CheckReadsThenReadStalls(audio_.get(), "0:-5 10:5 20:15");
+ } else {
+ EXPECT_EQ(base::TimeDelta(), timestamp_offset_);
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,25) }");
+ CheckReadsThenReadStalls(audio_.get(), "0:-5 5 15");
+ }
+}
+
+TEST_P(FrameProcessorTest, PartialAppendWindowFilterNoDiscontinuity) {
+ // Tests that spurious discontinuity is not introduced by a partially
+ // trimmed frame.
+ InSequence s;
+ AddTestTracks(HAS_AUDIO);
+ new_media_segment_ = true;
+ if (GetParam())
+ frame_processor_->SetSequenceMode(true);
+ EXPECT_CALL(callbacks_,
+ PossibleDurationIncrease(base::TimeDelta::FromMilliseconds(29)));
+
+ append_window_start_ = base::TimeDelta::FromMilliseconds(7);
+ ProcessFrames("0K 19K", "");
+
+ EXPECT_EQ(base::TimeDelta(), timestamp_offset_);
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [7,29) }");
+ CheckReadsThenReadStalls(audio_.get(), "7:0 19");
+}
+
+INSTANTIATE_TEST_CASE_P(SequenceMode, FrameProcessorTest, Values(true));
+INSTANTIATE_TEST_CASE_P(SegmentsMode, FrameProcessorTest, Values(false));
+
+} // namespace media
diff --git a/chromium/media/filters/gpu_video_accelerator_factories.h b/chromium/media/filters/gpu_video_accelerator_factories.h
index c152c2a4bda..a6859c78f22 100644
--- a/chromium/media/filters/gpu_video_accelerator_factories.h
+++ b/chromium/media/filters/gpu_video_accelerator_factories.h
@@ -5,62 +5,73 @@
#ifndef MEDIA_FILTERS_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
#define MEDIA_FILTERS_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
+#include <vector>
+
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
-#include "media/video/video_decode_accelerator.h"
-#include "media/video/video_encode_accelerator.h"
+#include "gpu/command_buffer/common/mailbox.h"
+#include "media/base/media_export.h"
+
+class SkBitmap;
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
class SharedMemory;
}
-class SkBitmap;
+namespace gfx {
+class Rect;
+class Size;
+}
namespace media {
+class VideoDecodeAccelerator;
+class VideoEncodeAccelerator;
+
// Helper interface for specifying factories needed to instantiate a hardware
// video accelerator.
+// Threading model:
+// * The GpuVideoAcceleratorFactories may be constructed on any thread.
+// * The GpuVideoAcceleratorFactories has an associated message loop, which may
+// be retrieved as |GetMessageLoop()|.
+// * All calls to the Factories after construction must be made on its message
+// loop.
class MEDIA_EXPORT GpuVideoAcceleratorFactories
: public base::RefCountedThreadSafe<GpuVideoAcceleratorFactories> {
public:
- // Caller owns returned pointer.
- virtual scoped_ptr<VideoDecodeAccelerator> CreateVideoDecodeAccelerator(
- VideoCodecProfile profile,
- VideoDecodeAccelerator::Client* client) = 0;
+ // Caller owns returned pointer, but should call Destroy() on it (instead of
+ // directly deleting) for proper destruction, as per the
+ // VideoDecodeAccelerator interface.
+ virtual scoped_ptr<VideoDecodeAccelerator> CreateVideoDecodeAccelerator() = 0;
- // Caller owns returned pointer.
- virtual scoped_ptr<VideoEncodeAccelerator> CreateVideoEncodeAccelerator(
- VideoEncodeAccelerator::Client* client) = 0;
+ // Caller owns returned pointer, but should call Destroy() on it (instead of
+ // directly deleting) for proper destruction, as per the
+ // VideoEncodeAccelerator interface.
+ virtual scoped_ptr<VideoEncodeAccelerator> CreateVideoEncodeAccelerator() = 0;
// Allocate & delete native textures.
- virtual uint32 CreateTextures(int32 count,
- const gfx::Size& size,
- std::vector<uint32>* texture_ids,
- std::vector<gpu::Mailbox>* texture_mailboxes,
- uint32 texture_target) = 0;
+ virtual bool CreateTextures(int32 count,
+ const gfx::Size& size,
+ std::vector<uint32>* texture_ids,
+ std::vector<gpu::Mailbox>* texture_mailboxes,
+ uint32 texture_target) = 0;
virtual void DeleteTexture(uint32 texture_id) = 0;
virtual void WaitSyncPoint(uint32 sync_point) = 0;
- // Read pixels from a native texture and store into |pixels| as RGBA.
+ // Read pixels within |visible_rect| boundaries from a native texture and
+ // store into |pixels| as RGBA.
virtual void ReadPixels(uint32 texture_id,
- const gfx::Size& size,
+ const gfx::Rect& visible_rect,
const SkBitmap& pixels) = 0;
// Allocate & return a shared memory segment. Caller is responsible for
// Close()ing the returned pointer.
virtual base::SharedMemory* CreateSharedMemory(size_t size) = 0;
- // Returns the message loop the video accelerator runs on.
- virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() = 0;
-
- // Abort any outstanding factory operations and error any future
- // attempts at factory operations
- virtual void Abort() = 0;
-
- // Returns true if Abort() has been called.
- virtual bool IsAborted() = 0;
+ // Returns the task runner the video accelerator runs on.
+ virtual scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() = 0;
protected:
friend class base::RefCountedThreadSafe<GpuVideoAcceleratorFactories>;
diff --git a/chromium/media/filters/gpu_video_decoder.cc b/chromium/media/filters/gpu_video_decoder.cc
index 6f2fe93c0ab..bc545b7d652 100644
--- a/chromium/media/filters/gpu_video_decoder.cc
+++ b/chromium/media/filters/gpu_video_decoder.cc
@@ -8,18 +8,23 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
+#include "base/command_line.h"
#include "base/cpu.h"
#include "base/message_loop/message_loop.h"
#include "base/metrics/histogram.h"
#include "base/stl_util.h"
+#include "base/synchronization/waitable_event.h"
#include "base/task_runner_util.h"
-#include "media/base/bind_to_loop.h"
+#include "gpu/command_buffer/common/mailbox_holder.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/media_log.h"
+#include "media/base/media_switches.h"
#include "media/base/pipeline.h"
#include "media/base/pipeline_status.h"
#include "media/base/video_decoder_config.h"
#include "media/filters/gpu_video_accelerator_factories.h"
+#include "third_party/skia/include/core/SkBitmap.h"
namespace media {
@@ -38,12 +43,14 @@ GpuVideoDecoder::SHMBuffer::SHMBuffer(base::SharedMemory* m, size_t s)
GpuVideoDecoder::SHMBuffer::~SHMBuffer() {}
-GpuVideoDecoder::BufferPair::BufferPair(
- SHMBuffer* s, const scoped_refptr<DecoderBuffer>& b)
- : shm_buffer(s), buffer(b) {
+GpuVideoDecoder::PendingDecoderBuffer::PendingDecoderBuffer(
+ SHMBuffer* s,
+ const scoped_refptr<DecoderBuffer>& b,
+ const DecodeCB& done_cb)
+ : shm_buffer(s), buffer(b), done_cb(done_cb) {
}
-GpuVideoDecoder::BufferPair::~BufferPair() {}
+GpuVideoDecoder::PendingDecoderBuffer::~PendingDecoderBuffer() {}
GpuVideoDecoder::BufferData::BufferData(
int32 bbid, base::TimeDelta ts, const gfx::Rect& vr, const gfx::Size& ns)
@@ -57,69 +64,71 @@ GpuVideoDecoder::GpuVideoDecoder(
const scoped_refptr<GpuVideoAcceleratorFactories>& factories,
const scoped_refptr<MediaLog>& media_log)
: needs_bitstream_conversion_(false),
- gvd_loop_proxy_(factories->GetMessageLoop()),
- weak_factory_(this),
factories_(factories),
state_(kNormal),
media_log_(media_log),
decoder_texture_target_(0),
next_picture_buffer_id_(0),
next_bitstream_buffer_id_(0),
- available_pictures_(0) {
+ available_pictures_(0),
+ weak_factory_(this) {
DCHECK(factories_.get());
}
void GpuVideoDecoder::Reset(const base::Closure& closure) {
DVLOG(3) << "Reset()";
- DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
-
- if (state_ == kDrainingDecoder && !factories_->IsAborted()) {
- gvd_loop_proxy_->PostTask(FROM_HERE, base::Bind(
- &GpuVideoDecoder::Reset, weak_this_, closure));
- // NOTE: if we're deferring Reset() until a Flush() completes, return
- // queued pictures to the VDA so they can be used to finish that Flush().
- if (pending_decode_cb_.is_null())
- ready_video_frames_.clear();
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
+
+ if (state_ == kDrainingDecoder) {
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &GpuVideoDecoder::Reset, weak_factory_.GetWeakPtr(), closure));
return;
}
- // Throw away any already-decoded, not-yet-delivered frames.
- ready_video_frames_.clear();
-
if (!vda_) {
- gvd_loop_proxy_->PostTask(FROM_HERE, closure);
+ base::MessageLoop::current()->PostTask(FROM_HERE, closure);
return;
}
- if (!pending_decode_cb_.is_null())
- EnqueueFrameAndTriggerFrameDelivery(VideoFrame::CreateEOSFrame());
-
DCHECK(pending_reset_cb_.is_null());
pending_reset_cb_ = BindToCurrentLoop(closure);
vda_->Reset();
}
-void GpuVideoDecoder::Stop(const base::Closure& closure) {
- DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
+void GpuVideoDecoder::Stop() {
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
if (vda_)
DestroyVDA();
- if (!pending_decode_cb_.is_null())
- EnqueueFrameAndTriggerFrameDelivery(VideoFrame::CreateEOSFrame());
+ DCHECK(bitstream_buffers_in_decoder_.empty());
if (!pending_reset_cb_.is_null())
base::ResetAndReturn(&pending_reset_cb_).Run();
- BindToCurrentLoop(closure).Run();
}
static bool IsCodedSizeSupported(const gfx::Size& coded_size) {
+#if defined(OS_WIN)
+ // Windows Media Foundation H.264 decoding does not support decoding videos
+ // with any dimension smaller than 48 pixels:
+ // http://msdn.microsoft.com/en-us/library/windows/desktop/dd797815
+ if (coded_size.width() < 48 || coded_size.height() < 48)
+ return false;
+#endif
+
// Only non-Windows, Ivy Bridge+ platforms can support more than 1920x1080.
// We test against 1088 to account for 16x16 macroblocks.
if (coded_size.width() <= 1920 && coded_size.height() <= 1088)
return true;
+ // NOTE: additional autodetection logic may require updating input buffer size
+ // selection in platform-specific implementations, such as
+ // V4L2VideoDecodeAccelerator.
base::CPU cpu;
bool hw_large_video_support =
- (cpu.vendor_name() == "GenuineIntel") && cpu.model() >= 58;
+ CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kIgnoreResolutionLimitsForAcceleratedVideoDecode) ||
+ ((cpu.vendor_name() == "GenuineIntel") && cpu.model() >= 55);
bool os_large_video_support = true;
#if defined(OS_WIN)
os_large_video_support = false;
@@ -134,33 +143,24 @@ static void ReportGpuVideoDecoderInitializeStatusToUMAAndRunCB(
const PipelineStatusCB& cb,
PipelineStatus status) {
UMA_HISTOGRAM_ENUMERATION(
- "Media.GpuVideoDecoderInitializeStatus", status, PIPELINE_STATUS_MAX);
+ "Media.GpuVideoDecoderInitializeStatus", status, PIPELINE_STATUS_MAX + 1);
cb.Run(status);
}
void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
- const PipelineStatusCB& orig_status_cb) {
+ bool /* low_delay */,
+ const PipelineStatusCB& orig_status_cb,
+ const OutputCB& output_cb) {
DVLOG(3) << "Initialize()";
- DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
DCHECK(config.IsValidConfig());
DCHECK(!config.is_encrypted());
- weak_this_ = weak_factory_.GetWeakPtr();
-
PipelineStatusCB status_cb =
base::Bind(&ReportGpuVideoDecoderInitializeStatusToUMAAndRunCB,
BindToCurrentLoop(orig_status_cb));
bool previously_initialized = config_.IsValidConfig();
-#if !defined(OS_CHROMEOS) && !defined(OS_WIN)
- if (previously_initialized) {
- // TODO(xhwang): Make GpuVideoDecoder reinitializable.
- // See http://crbug.com/233608
- DVLOG(1) << "GpuVideoDecoder reinitialization not supported.";
- status_cb.Run(DECODER_ERROR_NOT_SUPPORTED);
- return;
- }
-#endif
DVLOG(1) << "(Re)initializing GVD with config: "
<< config.AsHumanReadableString();
@@ -179,6 +179,7 @@ void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
config_ = config;
needs_bitstream_conversion_ = (config.codec() == kCodecH264);
+ output_cb_ = BindToCurrentLoop(output_cb);
if (previously_initialized) {
// Reinitialization with a different config (but same codec and profile).
@@ -188,9 +189,8 @@ void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
return;
}
- vda_ =
- factories_->CreateVideoDecodeAccelerator(config.profile(), this).Pass();
- if (!vda_) {
+ vda_ = factories_->CreateVideoDecodeAccelerator().Pass();
+ if (!vda_ || !vda_->Initialize(config.profile(), this)) {
status_cb.Run(DECODER_ERROR_NOT_SUPPORTED);
return;
}
@@ -201,7 +201,7 @@ void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
}
void GpuVideoDecoder::DestroyPictureBuffers(PictureBufferMap* buffers) {
- DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
for (PictureBufferMap::iterator it = buffers->begin(); it != buffers->end();
++it) {
factories_->DeleteTexture(it->second.texture_id());
@@ -211,66 +211,58 @@ void GpuVideoDecoder::DestroyPictureBuffers(PictureBufferMap* buffers) {
}
void GpuVideoDecoder::DestroyVDA() {
- DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
- if (vda_)
- vda_.release()->Destroy();
+ vda_.reset();
- DestroyPictureBuffers(&assigned_picture_buffers_);
- // Not destroying PictureBuffers in |dismissed_picture_buffers_| yet, since
+ // Not destroying PictureBuffers in |picture_buffers_at_display_| yet, since
// their textures may still be in use by the user of this GpuVideoDecoder.
+ for (PictureBufferTextureMap::iterator it =
+ picture_buffers_at_display_.begin();
+ it != picture_buffers_at_display_.end();
+ ++it) {
+ assigned_picture_buffers_.erase(it->first);
+ }
+ DestroyPictureBuffers(&assigned_picture_buffers_);
}
void GpuVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) {
- DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
DCHECK(pending_reset_cb_.is_null());
- DCHECK(pending_decode_cb_.is_null());
- pending_decode_cb_ = BindToCurrentLoop(decode_cb);
+ DecodeCB bound_decode_cb = BindToCurrentLoop(decode_cb);
if (state_ == kError || !vda_) {
- base::ResetAndReturn(&pending_decode_cb_).Run(kDecodeError, NULL);
+ bound_decode_cb.Run(kDecodeError);
return;
}
switch (state_) {
case kDecoderDrained:
- if (!ready_video_frames_.empty()) {
- EnqueueFrameAndTriggerFrameDelivery(NULL);
- return;
- }
state_ = kNormal;
// Fall-through.
case kNormal:
break;
case kDrainingDecoder:
- DCHECK(buffer->end_of_stream());
- // Do nothing. Will be satisfied either by a PictureReady or
- // NotifyFlushDone below.
- return;
case kError:
NOTREACHED();
return;
}
+ DCHECK_EQ(state_, kNormal);
+
if (buffer->end_of_stream()) {
- if (state_ == kNormal) {
- state_ = kDrainingDecoder;
- vda_->Flush();
- // If we have ready frames, go ahead and process them to ensure that the
- // Flush operation does not block in the VDA due to lack of picture
- // buffers.
- if (!ready_video_frames_.empty())
- EnqueueFrameAndTriggerFrameDelivery(NULL);
- }
+ state_ = kDrainingDecoder;
+ eos_decode_cb_ = bound_decode_cb;
+ vda_->Flush();
return;
}
size_t size = buffer->data_size();
SHMBuffer* shm_buffer = GetSHM(size);
if (!shm_buffer) {
- base::ResetAndReturn(&pending_decode_cb_).Run(kDecodeError, NULL);
+ bound_decode_cb.Run(kDecodeError);
return;
}
@@ -279,24 +271,15 @@ void GpuVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
next_bitstream_buffer_id_, shm_buffer->shm->handle(), size);
// Mask against 30 bits, to avoid (undefined) wraparound on signed integer.
next_bitstream_buffer_id_ = (next_bitstream_buffer_id_ + 1) & 0x3FFFFFFF;
- bool inserted = bitstream_buffers_in_decoder_.insert(std::make_pair(
- bitstream_buffer.id(), BufferPair(shm_buffer, buffer))).second;
- DCHECK(inserted);
+ DCHECK(!ContainsKey(bitstream_buffers_in_decoder_, bitstream_buffer.id()));
+ bitstream_buffers_in_decoder_.insert(
+ std::make_pair(bitstream_buffer.id(),
+ PendingDecoderBuffer(shm_buffer, buffer, decode_cb)));
+ DCHECK_LE(static_cast<int>(bitstream_buffers_in_decoder_.size()),
+ kMaxInFlightDecodes);
RecordBufferData(bitstream_buffer, *buffer.get());
vda_->Decode(bitstream_buffer);
-
- if (!ready_video_frames_.empty()) {
- EnqueueFrameAndTriggerFrameDelivery(NULL);
- return;
- }
-
- if (CanMoreDecodeWorkBeDone())
- base::ResetAndReturn(&pending_decode_cb_).Run(kNotEnoughData, NULL);
-}
-
-bool GpuVideoDecoder::CanMoreDecodeWorkBeDone() {
- return bitstream_buffers_in_decoder_.size() < kMaxInFlightDecodes;
}
void GpuVideoDecoder::RecordBufferData(const BitstreamBuffer& bitstream_buffer,
@@ -331,25 +314,20 @@ void GpuVideoDecoder::GetBufferData(int32 id, base::TimeDelta* timestamp,
NOTREACHED() << "Missing bitstreambuffer id: " << id;
}
-bool GpuVideoDecoder::HasAlpha() const {
- DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
- return true;
-}
-
bool GpuVideoDecoder::NeedsBitstreamConversion() const {
- DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
return needs_bitstream_conversion_;
}
bool GpuVideoDecoder::CanReadWithoutStalling() const {
- DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
return
next_picture_buffer_id_ == 0 || // Decode() will ProvidePictureBuffers().
- available_pictures_ > 0 || !ready_video_frames_.empty();
+ available_pictures_ > 0;
}
-void GpuVideoDecoder::NotifyInitializeDone() {
- NOTREACHED() << "GpuVideoDecodeAcceleratorHost::Initialize is synchronous!";
+int GpuVideoDecoder::GetMaxDecodeRequests() const {
+ return kMaxInFlightDecodes;
}
void GpuVideoDecoder::ProvidePictureBuffers(uint32 count,
@@ -357,13 +335,11 @@ void GpuVideoDecoder::ProvidePictureBuffers(uint32 count,
uint32 texture_target) {
DVLOG(3) << "ProvidePictureBuffers(" << count << ", "
<< size.width() << "x" << size.height() << ")";
- DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
std::vector<uint32> texture_ids;
std::vector<gpu::Mailbox> texture_mailboxes;
decoder_texture_target_ = texture_target;
- // Discards the sync point returned here since PictureReady will imply that
- // the produce has already happened, and the texture is ready for use.
if (!factories_->CreateTextures(count,
size,
&texture_ids,
@@ -394,7 +370,7 @@ void GpuVideoDecoder::ProvidePictureBuffers(uint32 count,
void GpuVideoDecoder::DismissPictureBuffer(int32 id) {
DVLOG(3) << "DismissPictureBuffer(" << id << ")";
- DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
PictureBufferMap::iterator it = assigned_picture_buffers_.find(id);
if (it == assigned_picture_buffers_.end()) {
@@ -405,25 +381,46 @@ void GpuVideoDecoder::DismissPictureBuffer(int32 id) {
PictureBuffer buffer_to_dismiss = it->second;
assigned_picture_buffers_.erase(it);
- std::set<int32>::iterator at_display_it =
- picture_buffers_at_display_.find(id);
-
- if (at_display_it == picture_buffers_at_display_.end()) {
+ if (!picture_buffers_at_display_.count(id)) {
// We can delete the texture immediately as it's not being displayed.
factories_->DeleteTexture(buffer_to_dismiss.texture_id());
CHECK_GT(available_pictures_, 0);
--available_pictures_;
- } else {
- // Texture in display. Postpone deletion until after it's returned to us.
- bool inserted = dismissed_picture_buffers_.insert(std::make_pair(
- id, buffer_to_dismiss)).second;
- DCHECK(inserted);
}
+ // Not destroying a texture in display in |picture_buffers_at_display_|.
+ // Postpone deletion until after it's returned to us.
+}
+
+static void ReadPixelsSyncInner(
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& factories,
+ uint32 texture_id,
+ const gfx::Rect& visible_rect,
+ const SkBitmap& pixels,
+ base::WaitableEvent* event) {
+ factories->ReadPixels(texture_id, visible_rect, pixels);
+ event->Signal();
+}
+
+static void ReadPixelsSync(
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& factories,
+ uint32 texture_id,
+ const gfx::Rect& visible_rect,
+ const SkBitmap& pixels) {
+ base::WaitableEvent event(true, false);
+ if (!factories->GetTaskRunner()->PostTask(FROM_HERE,
+ base::Bind(&ReadPixelsSyncInner,
+ factories,
+ texture_id,
+ visible_rect,
+ pixels,
+ &event)))
+ return;
+ event.Wait();
}
void GpuVideoDecoder::PictureReady(const media::Picture& picture) {
DVLOG(3) << "PictureReady()";
- DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
PictureBufferMap::iterator it =
assigned_picture_buffers_.find(picture.picture_buffer_id());
@@ -443,86 +440,88 @@ void GpuVideoDecoder::PictureReady(const media::Picture& picture) {
DCHECK(decoder_texture_target_);
scoped_refptr<VideoFrame> frame(VideoFrame::WrapNativeTexture(
- make_scoped_ptr(new VideoFrame::MailboxHolder(
- pb.texture_mailbox(),
- 0, // sync_point
- BindToCurrentLoop(base::Bind(&GpuVideoDecoder::ReusePictureBuffer,
- weak_this_,
- picture.picture_buffer_id())))),
- decoder_texture_target_,
+ make_scoped_ptr(new gpu::MailboxHolder(
+ pb.texture_mailbox(), decoder_texture_target_, 0 /* sync_point */)),
+ BindToCurrentLoop(base::Bind(&GpuVideoDecoder::ReleaseMailbox,
+ weak_factory_.GetWeakPtr(),
+ factories_,
+ picture.picture_buffer_id(),
+ pb.texture_id())),
pb.size(),
visible_rect,
natural_size,
timestamp,
- base::Bind(&GpuVideoAcceleratorFactories::ReadPixels,
- factories_,
- pb.texture_id(),
- gfx::Size(visible_rect.width(), visible_rect.height())),
- base::Closure()));
+ base::Bind(&ReadPixelsSync, factories_, pb.texture_id(), visible_rect)));
CHECK_GT(available_pictures_, 0);
--available_pictures_;
bool inserted =
- picture_buffers_at_display_.insert(picture.picture_buffer_id()).second;
+ picture_buffers_at_display_.insert(std::make_pair(
+ picture.picture_buffer_id(),
+ pb.texture_id())).second;
DCHECK(inserted);
- EnqueueFrameAndTriggerFrameDelivery(frame);
+ DeliverFrame(frame);
}
-void GpuVideoDecoder::EnqueueFrameAndTriggerFrameDelivery(
+void GpuVideoDecoder::DeliverFrame(
const scoped_refptr<VideoFrame>& frame) {
- DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
// During a pending vda->Reset(), we don't accumulate frames. Drop it on the
// floor and return.
if (!pending_reset_cb_.is_null())
return;
- if (frame.get())
- ready_video_frames_.push_back(frame);
- else
- DCHECK(!ready_video_frames_.empty());
-
- if (pending_decode_cb_.is_null())
- return;
-
- base::ResetAndReturn(&pending_decode_cb_)
- .Run(kOk, ready_video_frames_.front());
- ready_video_frames_.pop_front();
+ output_cb_.Run(frame);
}
-void GpuVideoDecoder::ReusePictureBuffer(int64 picture_buffer_id,
- uint32 sync_point) {
- DVLOG(3) << "ReusePictureBuffer(" << picture_buffer_id << ")";
- DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
+// static
+void GpuVideoDecoder::ReleaseMailbox(
+ base::WeakPtr<GpuVideoDecoder> decoder,
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& factories,
+ int64 picture_buffer_id,
+ uint32 texture_id,
+ const std::vector<uint32>& release_sync_points) {
+ DCHECK(factories->GetTaskRunner()->BelongsToCurrentThread());
- if (!vda_)
- return;
+ for (size_t i = 0; i < release_sync_points.size(); i++)
+ factories->WaitSyncPoint(release_sync_points[i]);
- CHECK(!picture_buffers_at_display_.empty());
+ if (decoder) {
+ decoder->ReusePictureBuffer(picture_buffer_id);
+ return;
+ }
+ // It's the last chance to delete the texture after display,
+ // because GpuVideoDecoder was destructed.
+ factories->DeleteTexture(texture_id);
+}
- size_t num_erased = picture_buffers_at_display_.erase(picture_buffer_id);
- DCHECK(num_erased);
+void GpuVideoDecoder::ReusePictureBuffer(int64 picture_buffer_id) {
+ DVLOG(3) << "ReusePictureBuffer(" << picture_buffer_id << ")";
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
- PictureBufferMap::iterator it =
- assigned_picture_buffers_.find(picture_buffer_id);
+ DCHECK(!picture_buffers_at_display_.empty());
+ PictureBufferTextureMap::iterator display_iterator =
+ picture_buffers_at_display_.find(picture_buffer_id);
+ uint32 texture_id = display_iterator->second;
+ DCHECK(display_iterator != picture_buffers_at_display_.end());
+ picture_buffers_at_display_.erase(display_iterator);
- if (it == assigned_picture_buffers_.end()) {
+ if (!assigned_picture_buffers_.count(picture_buffer_id)) {
// This picture was dismissed while in display, so we postponed deletion.
- it = dismissed_picture_buffers_.find(picture_buffer_id);
- DCHECK(it != dismissed_picture_buffers_.end());
- factories_->DeleteTexture(it->second.texture_id());
- dismissed_picture_buffers_.erase(it);
+ factories_->DeleteTexture(texture_id);
return;
}
- factories_->WaitSyncPoint(sync_point);
++available_pictures_;
- vda_->ReusePictureBuffer(picture_buffer_id);
+ // DestroyVDA() might already have been called.
+ if (vda_)
+ vda_->ReusePictureBuffer(picture_buffer_id);
}
GpuVideoDecoder::SHMBuffer* GpuVideoDecoder::GetSHM(size_t min_size) {
- DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
if (available_shm_segments_.empty() ||
available_shm_segments_.back()->size < min_size) {
size_t size_to_allocate = std::max(min_size, kSharedMemorySegmentBytes);
@@ -538,15 +537,15 @@ GpuVideoDecoder::SHMBuffer* GpuVideoDecoder::GetSHM(size_t min_size) {
}
void GpuVideoDecoder::PutSHM(SHMBuffer* shm_buffer) {
- DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
available_shm_segments_.push_back(shm_buffer);
}
void GpuVideoDecoder::NotifyEndOfBitstreamBuffer(int32 id) {
DVLOG(3) << "NotifyEndOfBitstreamBuffer(" << id << ")";
- DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
- std::map<int32, BufferPair>::iterator it =
+ std::map<int32, PendingDecoderBuffer>::iterator it =
bitstream_buffers_in_decoder_.find(id);
if (it == bitstream_buffers_in_decoder_.end()) {
NotifyError(VideoDecodeAccelerator::PLATFORM_FAILURE);
@@ -555,46 +554,40 @@ void GpuVideoDecoder::NotifyEndOfBitstreamBuffer(int32 id) {
}
PutSHM(it->second.shm_buffer);
+ it->second.done_cb.Run(state_ == kError ? kDecodeError : kOk);
bitstream_buffers_in_decoder_.erase(it);
-
- if (pending_reset_cb_.is_null() && state_ != kDrainingDecoder &&
- CanMoreDecodeWorkBeDone() && !pending_decode_cb_.is_null()) {
- base::ResetAndReturn(&pending_decode_cb_).Run(kNotEnoughData, NULL);
- }
}
GpuVideoDecoder::~GpuVideoDecoder() {
- DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
- DCHECK(!vda_.get()); // Stop should have been already called.
- DCHECK(pending_decode_cb_.is_null());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
+ // Stop should have been already called.
+ DCHECK(!vda_.get() && assigned_picture_buffers_.empty());
+ DCHECK(bitstream_buffers_in_decoder_.empty());
for (size_t i = 0; i < available_shm_segments_.size(); ++i) {
available_shm_segments_[i]->shm->Close();
delete available_shm_segments_[i];
}
available_shm_segments_.clear();
- for (std::map<int32, BufferPair>::iterator it =
+ for (std::map<int32, PendingDecoderBuffer>::iterator it =
bitstream_buffers_in_decoder_.begin();
it != bitstream_buffers_in_decoder_.end(); ++it) {
it->second.shm_buffer->shm->Close();
}
bitstream_buffers_in_decoder_.clear();
-
- DestroyPictureBuffers(&assigned_picture_buffers_);
- DestroyPictureBuffers(&dismissed_picture_buffers_);
}
void GpuVideoDecoder::NotifyFlushDone() {
DVLOG(3) << "NotifyFlushDone()";
- DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
DCHECK_EQ(state_, kDrainingDecoder);
state_ = kDecoderDrained;
- EnqueueFrameAndTriggerFrameDelivery(VideoFrame::CreateEOSFrame());
+ base::ResetAndReturn(&eos_decode_cb_).Run(kOk);
}
void GpuVideoDecoder::NotifyResetDone() {
DVLOG(3) << "NotifyResetDone()";
- DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
- DCHECK(ready_video_frames_.empty());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
+ DCHECK(bitstream_buffers_in_decoder_.empty());
// This needs to happen after the Reset() on vda_ is done to ensure pictures
// delivered during the reset can find their time data.
@@ -602,25 +595,22 @@ void GpuVideoDecoder::NotifyResetDone() {
if (!pending_reset_cb_.is_null())
base::ResetAndReturn(&pending_reset_cb_).Run();
-
- if (!pending_decode_cb_.is_null())
- EnqueueFrameAndTriggerFrameDelivery(VideoFrame::CreateEOSFrame());
}
void GpuVideoDecoder::NotifyError(media::VideoDecodeAccelerator::Error error) {
- DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
if (!vda_)
return;
+ state_ = kError;
+
DLOG(ERROR) << "VDA Error: " << error;
DestroyVDA();
+}
- state_ = kError;
-
- if (!pending_decode_cb_.is_null()) {
- base::ResetAndReturn(&pending_decode_cb_).Run(kDecodeError, NULL);
- return;
- }
+void GpuVideoDecoder::DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent()
+ const {
+ DCHECK(factories_->GetTaskRunner()->BelongsToCurrentThread());
}
} // namespace media
diff --git a/chromium/media/filters/gpu_video_decoder.h b/chromium/media/filters/gpu_video_decoder.h
index 5f43d84abbc..e15200830b9 100644
--- a/chromium/media/filters/gpu_video_decoder.h
+++ b/chromium/media/filters/gpu_video_decoder.h
@@ -19,8 +19,8 @@
template <class T> class scoped_refptr;
namespace base {
-class MessageLoopProxy;
class SharedMemory;
+class SingleThreadTaskRunner;
}
namespace media {
@@ -30,29 +30,31 @@ class GpuVideoAcceleratorFactories;
class MediaLog;
// GPU-accelerated video decoder implementation. Relies on
-// AcceleratedVideoDecoderMsg_Decode and friends.
+// AcceleratedVideoDecoderMsg_Decode and friends. Can be created on any thread
+// but must be accessed and destroyed on GpuVideoAcceleratorFactories's
+// GetMessageLoop().
class MEDIA_EXPORT GpuVideoDecoder
: public VideoDecoder,
public VideoDecodeAccelerator::Client {
public:
- // The message loop of |factories| will be saved to |gvd_loop_proxy_|.
explicit GpuVideoDecoder(
const scoped_refptr<GpuVideoAcceleratorFactories>& factories,
const scoped_refptr<MediaLog>& media_log);
// VideoDecoder implementation.
virtual void Initialize(const VideoDecoderConfig& config,
- const PipelineStatusCB& status_cb) OVERRIDE;
+ bool low_delay,
+ const PipelineStatusCB& status_cb,
+ const OutputCB& output_cb) OVERRIDE;
virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
- virtual void Stop(const base::Closure& closure) OVERRIDE;
- virtual bool HasAlpha() const OVERRIDE;
+ virtual void Stop() OVERRIDE;
virtual bool NeedsBitstreamConversion() const OVERRIDE;
virtual bool CanReadWithoutStalling() const OVERRIDE;
+ virtual int GetMaxDecodeRequests() const OVERRIDE;
// VideoDecodeAccelerator::Client implementation.
- virtual void NotifyInitializeDone() OVERRIDE;
virtual void ProvidePictureBuffers(uint32 count,
const gfx::Size& size,
uint32 texture_target) OVERRIDE;
@@ -83,28 +85,29 @@ class MEDIA_EXPORT GpuVideoDecoder
};
// A SHMBuffer and the DecoderBuffer its data came from.
- struct BufferPair {
- BufferPair(SHMBuffer* s, const scoped_refptr<DecoderBuffer>& b);
- ~BufferPair();
+ struct PendingDecoderBuffer {
+ PendingDecoderBuffer(SHMBuffer* s,
+ const scoped_refptr<DecoderBuffer>& b,
+ const DecodeCB& done_cb);
+ ~PendingDecoderBuffer();
SHMBuffer* shm_buffer;
scoped_refptr<DecoderBuffer> buffer;
+ DecodeCB done_cb;
};
typedef std::map<int32, PictureBuffer> PictureBufferMap;
- // Return true if more decode work can be piled on to the VDA.
- bool CanMoreDecodeWorkBeDone();
-
- // Enqueue a frame for later delivery (or drop it on the floor if a
- // vda->Reset() is in progress) and trigger out-of-line delivery of the oldest
- // ready frame to the client if there is a pending read. A NULL |frame|
- // merely triggers delivery, and requires the ready_video_frames_ queue not be
- // empty.
- void EnqueueFrameAndTriggerFrameDelivery(
- const scoped_refptr<VideoFrame>& frame);
+ void DeliverFrame(const scoped_refptr<VideoFrame>& frame);
+ // Static method is to allow it to run even after GVD is deleted.
+ static void ReleaseMailbox(
+ base::WeakPtr<GpuVideoDecoder> decoder,
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& factories,
+ int64 picture_buffer_id,
+ uint32 texture_id,
+ const std::vector<uint32>& release_sync_points);
// Indicate the picture buffer can be reused by the decoder.
- void ReusePictureBuffer(int64 picture_buffer_id, uint32 sync_point);
+ void ReusePictureBuffer(int64 picture_buffer_id);
void RecordBufferData(
const BitstreamBuffer& bitstream_buffer, const DecoderBuffer& buffer);
@@ -123,12 +126,10 @@ class MEDIA_EXPORT GpuVideoDecoder
// Destroy all PictureBuffers in |buffers|, and delete their textures.
void DestroyPictureBuffers(PictureBufferMap* buffers);
- bool needs_bitstream_conversion_;
+ // Assert the contract that this class is operated on the right thread.
+ void DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent() const;
- // Message loop which this class and |factories_| run on.
- scoped_refptr<base::MessageLoopProxy> gvd_loop_proxy_;
- base::WeakPtrFactory<GpuVideoDecoder> weak_factory_;
- base::WeakPtr<GpuVideoDecoder> weak_this_;
+ bool needs_bitstream_conversion_;
scoped_refptr<GpuVideoAcceleratorFactories> factories_;
@@ -136,9 +137,11 @@ class MEDIA_EXPORT GpuVideoDecoder
// occurs.
scoped_ptr<VideoDecodeAccelerator> vda_;
- // Callbacks that are !is_null() only during their respective operation being
- // asynchronously executed.
- DecodeCB pending_decode_cb_;
+ OutputCB output_cb_;
+
+ DecodeCB eos_decode_cb_;
+
+ // Not null only during reset.
base::Closure pending_reset_cb_;
State state_;
@@ -152,13 +155,14 @@ class MEDIA_EXPORT GpuVideoDecoder
scoped_refptr<MediaLog> media_log_;
- std::map<int32, BufferPair> bitstream_buffers_in_decoder_;
+ std::map<int32, PendingDecoderBuffer> bitstream_buffers_in_decoder_;
PictureBufferMap assigned_picture_buffers_;
- PictureBufferMap dismissed_picture_buffers_;
// PictureBuffers given to us by VDA via PictureReady, which we sent forward
// as VideoFrames to be rendered via decode_cb_, and which will be returned
// to us via ReusePictureBuffer.
- std::set<int32> picture_buffers_at_display_;
+ typedef std::map<int32 /* picture_buffer_id */, uint32 /* texture_id */>
+ PictureBufferTextureMap;
+ PictureBufferTextureMap picture_buffers_at_display_;
// The texture target used for decoded pictures.
uint32 decoder_texture_target_;
@@ -176,7 +180,6 @@ class MEDIA_EXPORT GpuVideoDecoder
// picture_buffer_id and the frame wrapping the corresponding Picture, for
// frames that have been decoded but haven't been requested by a Decode() yet.
- std::list<scoped_refptr<VideoFrame> > ready_video_frames_;
int32 next_picture_buffer_id_;
int32 next_bitstream_buffer_id_;
@@ -184,6 +187,10 @@ class MEDIA_EXPORT GpuVideoDecoder
// HasAvailableOutputFrames().
int available_pictures_;
+ // Bound to factories_->GetMessageLoop().
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<GpuVideoDecoder> weak_factory_;
+
DISALLOW_COPY_AND_ASSIGN(GpuVideoDecoder);
};
diff --git a/chromium/media/filters/h264_bit_reader.cc b/chromium/media/filters/h264_bit_reader.cc
new file mode 100644
index 00000000000..9894d978978
--- /dev/null
+++ b/chromium/media/filters/h264_bit_reader.cc
@@ -0,0 +1,113 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "media/filters/h264_bit_reader.h"
+
+namespace media {
+
+H264BitReader::H264BitReader()
+ : data_(NULL),
+ bytes_left_(0),
+ curr_byte_(0),
+ num_remaining_bits_in_curr_byte_(0),
+ prev_two_bytes_(0),
+ emulation_prevention_bytes_(0) {}
+
+H264BitReader::~H264BitReader() {}
+
+bool H264BitReader::Initialize(const uint8* data, off_t size) {
+ DCHECK(data);
+
+ if (size < 1)
+ return false;
+
+ data_ = data;
+ bytes_left_ = size;
+ num_remaining_bits_in_curr_byte_ = 0;
+ // Initially set to 0xffff to accept all initial two-byte sequences.
+ prev_two_bytes_ = 0xffff;
+ emulation_prevention_bytes_ = 0;
+
+ return true;
+}
+
+bool H264BitReader::UpdateCurrByte() {
+ if (bytes_left_ < 1)
+ return false;
+
+ // Emulation prevention three-byte detection.
+ // If a sequence of 0x000003 is found, skip (ignore) the last byte (0x03).
+ if (*data_ == 0x03 && (prev_two_bytes_ & 0xffff) == 0) {
+ // Detected 0x000003, skip last byte.
+ ++data_;
+ --bytes_left_;
+ ++emulation_prevention_bytes_;
+ // Need another full three bytes before we can detect the sequence again.
+ prev_two_bytes_ = 0xffff;
+
+ if (bytes_left_ < 1)
+ return false;
+ }
+
+ // Load a new byte and advance pointers.
+ curr_byte_ = *data_++ & 0xff;
+ --bytes_left_;
+ num_remaining_bits_in_curr_byte_ = 8;
+
+ prev_two_bytes_ = (prev_two_bytes_ << 8) | curr_byte_;
+
+ return true;
+}
+
+// Read |num_bits| (1 to 31 inclusive) from the stream and return them
+// in |out|, with first bit in the stream as MSB in |out| at position
+// (|num_bits| - 1).
+bool H264BitReader::ReadBits(int num_bits, int* out) {
+ int bits_left = num_bits;
+ *out = 0;
+ DCHECK(num_bits <= 31);
+
+ while (num_remaining_bits_in_curr_byte_ < bits_left) {
+ // Take all that's left in current byte, shift to make space for the rest.
+ *out |= (curr_byte_ << (bits_left - num_remaining_bits_in_curr_byte_));
+ bits_left -= num_remaining_bits_in_curr_byte_;
+
+ if (!UpdateCurrByte())
+ return false;
+ }
+
+ *out |= (curr_byte_ >> (num_remaining_bits_in_curr_byte_ - bits_left));
+ *out &= ((1 << num_bits) - 1);
+ num_remaining_bits_in_curr_byte_ -= bits_left;
+
+ return true;
+}
+
+off_t H264BitReader::NumBitsLeft() {
+ return (num_remaining_bits_in_curr_byte_ + bytes_left_ * 8);
+}
+
+bool H264BitReader::HasMoreRBSPData() {
+ // Make sure we have more bits, if we are at 0 bits in current byte
+ // and updating current byte fails, we don't have more data anyway.
+ if (num_remaining_bits_in_curr_byte_ == 0 && !UpdateCurrByte())
+ return false;
+
+ // On last byte?
+ if (bytes_left_)
+ return true;
+
+ // Last byte, look for stop bit;
+ // We have more RBSP data if the last non-zero bit we find is not the
+ // first available bit.
+ return (curr_byte_ &
+ ((1 << (num_remaining_bits_in_curr_byte_ - 1)) - 1)) != 0;
+}
+
+size_t H264BitReader::NumEmulationPreventionBytesRead() {
+ return emulation_prevention_bytes_;
+}
+
+} // namespace media
diff --git a/chromium/media/filters/h264_bit_reader.h b/chromium/media/filters/h264_bit_reader.h
new file mode 100644
index 00000000000..01cfd74109f
--- /dev/null
+++ b/chromium/media/filters/h264_bit_reader.h
@@ -0,0 +1,79 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file contains an implementation of an H264 Annex-B video stream parser.
+
+#ifndef MEDIA_FILTERS_H264_BIT_READER_H_
+#define MEDIA_FILTERS_H264_BIT_READER_H_
+
+#include <sys/types.h>
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// A class to provide bit-granularity reading of H.264 streams.
+// This is not a generic bit reader class, as it takes into account
+// H.264 stream-specific constraints, such as skipping emulation-prevention
+// bytes and stop bits. See spec for more details.
+class MEDIA_EXPORT H264BitReader {
+ public:
+ H264BitReader();
+ ~H264BitReader();
+
+ // Initialize the reader to start reading at |data|, |size| being size
+ // of |data| in bytes.
+ // Return false on insufficient size of stream..
+ // TODO(posciak,fischman): consider replacing Initialize() with
+ // heap-allocating and creating bit readers on demand instead.
+ bool Initialize(const uint8* data, off_t size);
+
+ // Read |num_bits| next bits from stream and return in |*out|, first bit
+ // from the stream starting at |num_bits| position in |*out|.
+ // |num_bits| may be 1-32, inclusive.
+ // Return false if the given number of bits cannot be read (not enough
+ // bits in the stream), true otherwise.
+ bool ReadBits(int num_bits, int* out);
+
+ // Return the number of bits left in the stream.
+ off_t NumBitsLeft();
+
+ // See the definition of more_rbsp_data() in spec.
+ bool HasMoreRBSPData();
+
+ // Return the number of emulation prevention bytes already read.
+ size_t NumEmulationPreventionBytesRead();
+
+ private:
+ // Advance to the next byte, loading it into curr_byte_.
+ // Return false on end of stream.
+ bool UpdateCurrByte();
+
+ // Pointer to the next unread (not in curr_byte_) byte in the stream.
+ const uint8* data_;
+
+ // Bytes left in the stream (without the curr_byte_).
+ off_t bytes_left_;
+
+ // Contents of the current byte; first unread bit starting at position
+ // 8 - num_remaining_bits_in_curr_byte_ from MSB.
+ int curr_byte_;
+
+ // Number of bits remaining in curr_byte_
+ int num_remaining_bits_in_curr_byte_;
+
+ // Used in emulation prevention three byte detection (see spec).
+ // Initially set to 0xffff to accept all initial two-byte sequences.
+ int prev_two_bytes_;
+
+ // Number of emulation preventation bytes (0x000003) we met.
+ size_t emulation_prevention_bytes_;
+
+ DISALLOW_COPY_AND_ASSIGN(H264BitReader);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_H264_BIT_READER_H_
diff --git a/chromium/media/filters/h264_bit_reader_unittest.cc b/chromium/media/filters/h264_bit_reader_unittest.cc
new file mode 100644
index 00000000000..e12e75ebcd9
--- /dev/null
+++ b/chromium/media/filters/h264_bit_reader_unittest.cc
@@ -0,0 +1,73 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/h264_bit_reader.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+TEST(H264BitReaderTest, ReadStreamWithoutEscapeAndTrailingZeroBytes) {
+ H264BitReader reader;
+ const unsigned char rbsp[] = {0x01, 0x23, 0x45, 0x67, 0x89, 0xa0};
+ int dummy = 0;
+
+ EXPECT_TRUE(reader.Initialize(rbsp, sizeof(rbsp)));
+
+ EXPECT_TRUE(reader.ReadBits(1, &dummy));
+ EXPECT_EQ(dummy, 0x00);
+ EXPECT_EQ(reader.NumBitsLeft(), 47);
+ EXPECT_TRUE(reader.HasMoreRBSPData());
+
+ EXPECT_TRUE(reader.ReadBits(8, &dummy));
+ EXPECT_EQ(dummy, 0x02);
+ EXPECT_EQ(reader.NumBitsLeft(), 39);
+ EXPECT_TRUE(reader.HasMoreRBSPData());
+
+ EXPECT_TRUE(reader.ReadBits(31, &dummy));
+ EXPECT_EQ(dummy, 0x23456789);
+ EXPECT_EQ(reader.NumBitsLeft(), 8);
+ EXPECT_TRUE(reader.HasMoreRBSPData());
+
+ EXPECT_TRUE(reader.ReadBits(1, &dummy));
+ EXPECT_EQ(dummy, 1);
+ EXPECT_EQ(reader.NumBitsLeft(), 7);
+ EXPECT_TRUE(reader.HasMoreRBSPData());
+
+ EXPECT_TRUE(reader.ReadBits(1, &dummy));
+ EXPECT_EQ(dummy, 0);
+ EXPECT_EQ(reader.NumBitsLeft(), 6);
+ EXPECT_FALSE(reader.HasMoreRBSPData());
+}
+
+TEST(H264BitReaderTest, SingleByteStream) {
+ H264BitReader reader;
+ const unsigned char rbsp[] = {0x18};
+ int dummy = 0;
+
+ EXPECT_TRUE(reader.Initialize(rbsp, sizeof(rbsp)));
+ EXPECT_EQ(reader.NumBitsLeft(), 8);
+ EXPECT_TRUE(reader.HasMoreRBSPData());
+
+ EXPECT_TRUE(reader.ReadBits(4, &dummy));
+ EXPECT_EQ(dummy, 0x01);
+ EXPECT_EQ(reader.NumBitsLeft(), 4);
+ EXPECT_FALSE(reader.HasMoreRBSPData());
+}
+
+TEST(H264BitReaderTest, StopBitOccupyFullByte) {
+ H264BitReader reader;
+ const unsigned char rbsp[] = {0xab, 0x80};
+ int dummy = 0;
+
+ EXPECT_TRUE(reader.Initialize(rbsp, sizeof(rbsp)));
+ EXPECT_EQ(reader.NumBitsLeft(), 16);
+ EXPECT_TRUE(reader.HasMoreRBSPData());
+
+ EXPECT_TRUE(reader.ReadBits(8, &dummy));
+ EXPECT_EQ(dummy, 0xab);
+ EXPECT_EQ(reader.NumBitsLeft(), 8);
+ EXPECT_FALSE(reader.HasMoreRBSPData());
+}
+
+} // namespace media
diff --git a/chromium/media/filters/h264_parser.cc b/chromium/media/filters/h264_parser.cc
new file mode 100644
index 00000000000..4cdc695933f
--- /dev/null
+++ b/chromium/media/filters/h264_parser.cc
@@ -0,0 +1,1264 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/stl_util.h"
+
+#include "media/filters/h264_parser.h"
+
+namespace media {
+
+bool H264SliceHeader::IsPSlice() const {
+ return (slice_type % 5 == kPSlice);
+}
+
+bool H264SliceHeader::IsBSlice() const {
+ return (slice_type % 5 == kBSlice);
+}
+
+bool H264SliceHeader::IsISlice() const {
+ return (slice_type % 5 == kISlice);
+}
+
+bool H264SliceHeader::IsSPSlice() const {
+ return (slice_type % 5 == kSPSlice);
+}
+
+bool H264SliceHeader::IsSISlice() const {
+ return (slice_type % 5 == kSISlice);
+}
+
+H264NALU::H264NALU() {
+ memset(this, 0, sizeof(*this));
+}
+
+H264SPS::H264SPS() {
+ memset(this, 0, sizeof(*this));
+}
+
+H264PPS::H264PPS() {
+ memset(this, 0, sizeof(*this));
+}
+
+H264SliceHeader::H264SliceHeader() {
+ memset(this, 0, sizeof(*this));
+}
+
+H264SEIMessage::H264SEIMessage() {
+ memset(this, 0, sizeof(*this));
+}
+
+#define READ_BITS_OR_RETURN(num_bits, out) \
+ do { \
+ int _out; \
+ if (!br_.ReadBits(num_bits, &_out)) { \
+ DVLOG(1) \
+ << "Error in stream: unexpected EOS while trying to read " #out; \
+ return kInvalidStream; \
+ } \
+ *out = _out; \
+ } while (0)
+
+#define READ_BOOL_OR_RETURN(out) \
+ do { \
+ int _out; \
+ if (!br_.ReadBits(1, &_out)) { \
+ DVLOG(1) \
+ << "Error in stream: unexpected EOS while trying to read " #out; \
+ return kInvalidStream; \
+ } \
+ *out = _out != 0; \
+ } while (0)
+
+#define READ_UE_OR_RETURN(out) \
+ do { \
+ if (ReadUE(out) != kOk) { \
+ DVLOG(1) << "Error in stream: invalid value while trying to read " #out; \
+ return kInvalidStream; \
+ } \
+ } while (0)
+
+#define READ_SE_OR_RETURN(out) \
+ do { \
+ if (ReadSE(out) != kOk) { \
+ DVLOG(1) << "Error in stream: invalid value while trying to read " #out; \
+ return kInvalidStream; \
+ } \
+ } while (0)
+
+#define IN_RANGE_OR_RETURN(val, min, max) \
+ do { \
+ if ((val) < (min) || (val) > (max)) { \
+ DVLOG(1) << "Error in stream: invalid value, expected " #val " to be" \
+ << " in range [" << (min) << ":" << (max) << "]" \
+ << " found " << (val) << " instead"; \
+ return kInvalidStream; \
+ } \
+ } while (0)
+
+#define TRUE_OR_RETURN(a) \
+ do { \
+ if (!(a)) { \
+ DVLOG(1) << "Error in stream: invalid value, expected " << #a; \
+ return kInvalidStream; \
+ } \
+ } while (0)
+
+enum AspectRatioIdc {
+ kExtendedSar = 255,
+};
+
+// ISO 14496 part 10
+// VUI parameters: Table E-1 "Meaning of sample aspect ratio indicator"
+static const int kTableSarWidth[] = {
+ 0, 1, 12, 10, 16, 40, 24, 20, 32, 80, 18, 15, 64, 160, 4, 3, 2
+};
+static const int kTableSarHeight[] = {
+ 0, 1, 11, 11, 11, 33, 11, 11, 11, 33, 11, 11, 33, 99, 3, 2, 1
+};
+COMPILE_ASSERT(arraysize(kTableSarWidth) == arraysize(kTableSarHeight),
+ sar_tables_must_have_same_size);
+
+H264Parser::H264Parser() {
+ Reset();
+}
+
+H264Parser::~H264Parser() {
+ STLDeleteValues(&active_SPSes_);
+ STLDeleteValues(&active_PPSes_);
+}
+
+void H264Parser::Reset() {
+ stream_ = NULL;
+ bytes_left_ = 0;
+}
+
+void H264Parser::SetStream(const uint8* stream, off_t stream_size) {
+ DCHECK(stream);
+ DCHECK_GT(stream_size, 0);
+
+ stream_ = stream;
+ bytes_left_ = stream_size;
+}
+
+const H264PPS* H264Parser::GetPPS(int pps_id) {
+ return active_PPSes_[pps_id];
+}
+
+const H264SPS* H264Parser::GetSPS(int sps_id) {
+ return active_SPSes_[sps_id];
+}
+
+static inline bool IsStartCode(const uint8* data) {
+ return data[0] == 0x00 && data[1] == 0x00 && data[2] == 0x01;
+}
+
+// static
+bool H264Parser::FindStartCode(const uint8* data, off_t data_size,
+ off_t* offset, off_t* start_code_size) {
+ DCHECK_GE(data_size, 0);
+ off_t bytes_left = data_size;
+
+ while (bytes_left >= 3) {
+ if (IsStartCode(data)) {
+ // Found three-byte start code, set pointer at its beginning.
+ *offset = data_size - bytes_left;
+ *start_code_size = 3;
+
+ // If there is a zero byte before this start code,
+ // then it's actually a four-byte start code, so backtrack one byte.
+ if (*offset > 0 && *(data - 1) == 0x00) {
+ --(*offset);
+ ++(*start_code_size);
+ }
+
+ return true;
+ }
+
+ ++data;
+ --bytes_left;
+ }
+
+ // End of data: offset is pointing to the first byte that was not considered
+ // as a possible start of a start code.
+ // Note: there is no security issue when receiving a negative |data_size|
+ // since in this case, |bytes_left| is equal to |data_size| and thus
+ // |*offset| is equal to 0 (valid offset).
+ *offset = data_size - bytes_left;
+ *start_code_size = 0;
+ return false;
+}
+
+bool H264Parser::LocateNALU(off_t* nalu_size, off_t* start_code_size) {
+ // Find the start code of next NALU.
+ off_t nalu_start_off = 0;
+ off_t annexb_start_code_size = 0;
+ if (!FindStartCode(stream_, bytes_left_,
+ &nalu_start_off, &annexb_start_code_size)) {
+ DVLOG(4) << "Could not find start code, end of stream?";
+ return false;
+ }
+
+ // Move the stream to the beginning of the NALU (pointing at the start code).
+ stream_ += nalu_start_off;
+ bytes_left_ -= nalu_start_off;
+
+ const uint8* nalu_data = stream_ + annexb_start_code_size;
+ off_t max_nalu_data_size = bytes_left_ - annexb_start_code_size;
+ if (max_nalu_data_size <= 0) {
+ DVLOG(3) << "End of stream";
+ return false;
+ }
+
+ // Find the start code of next NALU;
+ // if successful, |nalu_size_without_start_code| is the number of bytes from
+ // after previous start code to before this one;
+ // if next start code is not found, it is still a valid NALU since there
+ // are some bytes left after the first start code: all the remaining bytes
+ // belong to the current NALU.
+ off_t next_start_code_size = 0;
+ off_t nalu_size_without_start_code = 0;
+ if (!FindStartCode(nalu_data, max_nalu_data_size,
+ &nalu_size_without_start_code, &next_start_code_size)) {
+ nalu_size_without_start_code = max_nalu_data_size;
+ }
+ *nalu_size = nalu_size_without_start_code + annexb_start_code_size;
+ *start_code_size = annexb_start_code_size;
+ return true;
+}
+
+H264Parser::Result H264Parser::ReadUE(int* val) {
+ int num_bits = -1;
+ int bit;
+ int rest;
+
+ // Count the number of contiguous zero bits.
+ do {
+ READ_BITS_OR_RETURN(1, &bit);
+ num_bits++;
+ } while (bit == 0);
+
+ if (num_bits > 31)
+ return kInvalidStream;
+
+ // Calculate exp-Golomb code value of size num_bits.
+ *val = (1 << num_bits) - 1;
+
+ if (num_bits > 0) {
+ READ_BITS_OR_RETURN(num_bits, &rest);
+ *val += rest;
+ }
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ReadSE(int* val) {
+ int ue;
+ Result res;
+
+ // See Chapter 9 in the spec.
+ res = ReadUE(&ue);
+ if (res != kOk)
+ return res;
+
+ if (ue % 2 == 0)
+ *val = -(ue / 2);
+ else
+ *val = ue / 2 + 1;
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::AdvanceToNextNALU(H264NALU* nalu) {
+ off_t start_code_size;
+ off_t nalu_size_with_start_code;
+ if (!LocateNALU(&nalu_size_with_start_code, &start_code_size)) {
+ DVLOG(4) << "Could not find next NALU, bytes left in stream: "
+ << bytes_left_;
+ return kEOStream;
+ }
+
+ nalu->data = stream_ + start_code_size;
+ nalu->size = nalu_size_with_start_code - start_code_size;
+ DVLOG(4) << "NALU found: size=" << nalu_size_with_start_code;
+
+ // Initialize bit reader at the start of found NALU.
+ if (!br_.Initialize(nalu->data, nalu->size))
+ return kEOStream;
+
+ // Move parser state to after this NALU, so next time AdvanceToNextNALU
+ // is called, we will effectively be skipping it;
+ // other parsing functions will use the position saved
+ // in bit reader for parsing, so we don't have to remember it here.
+ stream_ += nalu_size_with_start_code;
+ bytes_left_ -= nalu_size_with_start_code;
+
+ // Read NALU header, skip the forbidden_zero_bit, but check for it.
+ int data;
+ READ_BITS_OR_RETURN(1, &data);
+ TRUE_OR_RETURN(data == 0);
+
+ READ_BITS_OR_RETURN(2, &nalu->nal_ref_idc);
+ READ_BITS_OR_RETURN(5, &nalu->nal_unit_type);
+
+ DVLOG(4) << "NALU type: " << static_cast<int>(nalu->nal_unit_type)
+ << " at: " << reinterpret_cast<const void*>(nalu->data)
+ << " size: " << nalu->size
+ << " ref: " << static_cast<int>(nalu->nal_ref_idc);
+
+ return kOk;
+}
+
+// Default scaling lists (per spec).
+static const int kDefault4x4Intra[kH264ScalingList4x4Length] = {
+ 6, 13, 13, 20, 20, 20, 28, 28, 28, 28, 32, 32, 32, 37, 37, 42, };
+
+static const int kDefault4x4Inter[kH264ScalingList4x4Length] = {
+ 10, 14, 14, 20, 20, 20, 24, 24, 24, 24, 27, 27, 27, 30, 30, 34, };
+
+static const int kDefault8x8Intra[kH264ScalingList8x8Length] = {
+ 6, 10, 10, 13, 11, 13, 16, 16, 16, 16, 18, 18, 18, 18, 18, 23,
+ 23, 23, 23, 23, 23, 25, 25, 25, 25, 25, 25, 25, 27, 27, 27, 27,
+ 27, 27, 27, 27, 29, 29, 29, 29, 29, 29, 29, 31, 31, 31, 31, 31,
+ 31, 33, 33, 33, 33, 33, 36, 36, 36, 36, 38, 38, 38, 40, 40, 42, };
+
+static const int kDefault8x8Inter[kH264ScalingList8x8Length] = {
+ 9, 13, 13, 15, 13, 15, 17, 17, 17, 17, 19, 19, 19, 19, 19, 21,
+ 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 24, 24, 24, 24,
+ 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 27, 27, 27, 27, 27,
+ 27, 28, 28, 28, 28, 28, 30, 30, 30, 30, 32, 32, 32, 33, 33, 35, };
+
+static inline void DefaultScalingList4x4(
+ int i,
+ int scaling_list4x4[][kH264ScalingList4x4Length]) {
+ DCHECK_LT(i, 6);
+
+ if (i < 3)
+ memcpy(scaling_list4x4[i], kDefault4x4Intra, sizeof(kDefault4x4Intra));
+ else if (i < 6)
+ memcpy(scaling_list4x4[i], kDefault4x4Inter, sizeof(kDefault4x4Inter));
+}
+
+static inline void DefaultScalingList8x8(
+ int i,
+ int scaling_list8x8[][kH264ScalingList8x8Length]) {
+ DCHECK_LT(i, 6);
+
+ if (i % 2 == 0)
+ memcpy(scaling_list8x8[i], kDefault8x8Intra, sizeof(kDefault8x8Intra));
+ else
+ memcpy(scaling_list8x8[i], kDefault8x8Inter, sizeof(kDefault8x8Inter));
+}
+
+static void FallbackScalingList4x4(
+ int i,
+ const int default_scaling_list_intra[],
+ const int default_scaling_list_inter[],
+ int scaling_list4x4[][kH264ScalingList4x4Length]) {
+ static const int kScalingList4x4ByteSize =
+ sizeof(scaling_list4x4[0][0]) * kH264ScalingList4x4Length;
+
+ switch (i) {
+ case 0:
+ memcpy(scaling_list4x4[i], default_scaling_list_intra,
+ kScalingList4x4ByteSize);
+ break;
+
+ case 1:
+ memcpy(scaling_list4x4[i], scaling_list4x4[0], kScalingList4x4ByteSize);
+ break;
+
+ case 2:
+ memcpy(scaling_list4x4[i], scaling_list4x4[1], kScalingList4x4ByteSize);
+ break;
+
+ case 3:
+ memcpy(scaling_list4x4[i], default_scaling_list_inter,
+ kScalingList4x4ByteSize);
+ break;
+
+ case 4:
+ memcpy(scaling_list4x4[i], scaling_list4x4[3], kScalingList4x4ByteSize);
+ break;
+
+ case 5:
+ memcpy(scaling_list4x4[i], scaling_list4x4[4], kScalingList4x4ByteSize);
+ break;
+
+ default:
+ NOTREACHED();
+ break;
+ }
+}
+
+static void FallbackScalingList8x8(
+ int i,
+ const int default_scaling_list_intra[],
+ const int default_scaling_list_inter[],
+ int scaling_list8x8[][kH264ScalingList8x8Length]) {
+ static const int kScalingList8x8ByteSize =
+ sizeof(scaling_list8x8[0][0]) * kH264ScalingList8x8Length;
+
+ switch (i) {
+ case 0:
+ memcpy(scaling_list8x8[i], default_scaling_list_intra,
+ kScalingList8x8ByteSize);
+ break;
+
+ case 1:
+ memcpy(scaling_list8x8[i], default_scaling_list_inter,
+ kScalingList8x8ByteSize);
+ break;
+
+ case 2:
+ memcpy(scaling_list8x8[i], scaling_list8x8[0], kScalingList8x8ByteSize);
+ break;
+
+ case 3:
+ memcpy(scaling_list8x8[i], scaling_list8x8[1], kScalingList8x8ByteSize);
+ break;
+
+ case 4:
+ memcpy(scaling_list8x8[i], scaling_list8x8[2], kScalingList8x8ByteSize);
+ break;
+
+ case 5:
+ memcpy(scaling_list8x8[i], scaling_list8x8[3], kScalingList8x8ByteSize);
+ break;
+
+ default:
+ NOTREACHED();
+ break;
+ }
+}
+
+H264Parser::Result H264Parser::ParseScalingList(int size,
+ int* scaling_list,
+ bool* use_default) {
+ // See chapter 7.3.2.1.1.1.
+ int last_scale = 8;
+ int next_scale = 8;
+ int delta_scale;
+
+ *use_default = false;
+
+ for (int j = 0; j < size; ++j) {
+ if (next_scale != 0) {
+ READ_SE_OR_RETURN(&delta_scale);
+ IN_RANGE_OR_RETURN(delta_scale, -128, 127);
+ next_scale = (last_scale + delta_scale + 256) & 0xff;
+
+ if (j == 0 && next_scale == 0) {
+ *use_default = true;
+ return kOk;
+ }
+ }
+
+ scaling_list[j] = (next_scale == 0) ? last_scale : next_scale;
+ last_scale = scaling_list[j];
+ }
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParseSPSScalingLists(H264SPS* sps) {
+ // See 7.4.2.1.1.
+ bool seq_scaling_list_present_flag;
+ bool use_default;
+ Result res;
+
+ // Parse scaling_list4x4.
+ for (int i = 0; i < 6; ++i) {
+ READ_BOOL_OR_RETURN(&seq_scaling_list_present_flag);
+
+ if (seq_scaling_list_present_flag) {
+ res = ParseScalingList(arraysize(sps->scaling_list4x4[i]),
+ sps->scaling_list4x4[i],
+ &use_default);
+ if (res != kOk)
+ return res;
+
+ if (use_default)
+ DefaultScalingList4x4(i, sps->scaling_list4x4);
+
+ } else {
+ FallbackScalingList4x4(
+ i, kDefault4x4Intra, kDefault4x4Inter, sps->scaling_list4x4);
+ }
+ }
+
+ // Parse scaling_list8x8.
+ for (int i = 0; i < ((sps->chroma_format_idc != 3) ? 2 : 6); ++i) {
+ READ_BOOL_OR_RETURN(&seq_scaling_list_present_flag);
+
+ if (seq_scaling_list_present_flag) {
+ res = ParseScalingList(arraysize(sps->scaling_list8x8[i]),
+ sps->scaling_list8x8[i],
+ &use_default);
+ if (res != kOk)
+ return res;
+
+ if (use_default)
+ DefaultScalingList8x8(i, sps->scaling_list8x8);
+
+ } else {
+ FallbackScalingList8x8(
+ i, kDefault8x8Intra, kDefault8x8Inter, sps->scaling_list8x8);
+ }
+ }
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParsePPSScalingLists(const H264SPS& sps,
+ H264PPS* pps) {
+ // See 7.4.2.2.
+ bool pic_scaling_list_present_flag;
+ bool use_default;
+ Result res;
+
+ for (int i = 0; i < 6; ++i) {
+ READ_BOOL_OR_RETURN(&pic_scaling_list_present_flag);
+
+ if (pic_scaling_list_present_flag) {
+ res = ParseScalingList(arraysize(pps->scaling_list4x4[i]),
+ pps->scaling_list4x4[i],
+ &use_default);
+ if (res != kOk)
+ return res;
+
+ if (use_default)
+ DefaultScalingList4x4(i, pps->scaling_list4x4);
+
+ } else {
+ if (sps.seq_scaling_matrix_present_flag) {
+ // Table 7-2 fallback rule A in spec.
+ FallbackScalingList4x4(
+ i, kDefault4x4Intra, kDefault4x4Inter, pps->scaling_list4x4);
+ } else {
+ // Table 7-2 fallback rule B in spec.
+ FallbackScalingList4x4(i,
+ sps.scaling_list4x4[0],
+ sps.scaling_list4x4[3],
+ pps->scaling_list4x4);
+ }
+ }
+ }
+
+ if (pps->transform_8x8_mode_flag) {
+ for (int i = 0; i < ((sps.chroma_format_idc != 3) ? 2 : 6); ++i) {
+ READ_BOOL_OR_RETURN(&pic_scaling_list_present_flag);
+
+ if (pic_scaling_list_present_flag) {
+ res = ParseScalingList(arraysize(pps->scaling_list8x8[i]),
+ pps->scaling_list8x8[i],
+ &use_default);
+ if (res != kOk)
+ return res;
+
+ if (use_default)
+ DefaultScalingList8x8(i, pps->scaling_list8x8);
+
+ } else {
+ if (sps.seq_scaling_matrix_present_flag) {
+ // Table 7-2 fallback rule A in spec.
+ FallbackScalingList8x8(
+ i, kDefault8x8Intra, kDefault8x8Inter, pps->scaling_list8x8);
+ } else {
+ // Table 7-2 fallback rule B in spec.
+ FallbackScalingList8x8(i,
+ sps.scaling_list8x8[0],
+ sps.scaling_list8x8[1],
+ pps->scaling_list8x8);
+ }
+ }
+ }
+ }
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParseAndIgnoreHRDParameters(
+ bool* hrd_parameters_present) {
+ int data;
+ READ_BOOL_OR_RETURN(&data); // {nal,vcl}_hrd_parameters_present_flag
+ if (!data)
+ return kOk;
+
+ *hrd_parameters_present = true;
+
+ int cpb_cnt_minus1;
+ READ_UE_OR_RETURN(&cpb_cnt_minus1);
+ IN_RANGE_OR_RETURN(cpb_cnt_minus1, 0, 31);
+ READ_BITS_OR_RETURN(8, &data); // bit_rate_scale, cpb_size_scale
+ for (int i = 0; i <= cpb_cnt_minus1; ++i) {
+ READ_UE_OR_RETURN(&data); // bit_rate_value_minus1[i]
+ READ_UE_OR_RETURN(&data); // cpb_size_value_minus1[i]
+ READ_BOOL_OR_RETURN(&data); // cbr_flag
+ }
+ READ_BITS_OR_RETURN(20, &data); // cpb/dpb delays, etc.
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParseVUIParameters(H264SPS* sps) {
+ bool aspect_ratio_info_present_flag;
+ READ_BOOL_OR_RETURN(&aspect_ratio_info_present_flag);
+ if (aspect_ratio_info_present_flag) {
+ int aspect_ratio_idc;
+ READ_BITS_OR_RETURN(8, &aspect_ratio_idc);
+ if (aspect_ratio_idc == kExtendedSar) {
+ READ_BITS_OR_RETURN(16, &sps->sar_width);
+ READ_BITS_OR_RETURN(16, &sps->sar_height);
+ } else {
+ const int max_aspect_ratio_idc = arraysize(kTableSarWidth) - 1;
+ IN_RANGE_OR_RETURN(aspect_ratio_idc, 0, max_aspect_ratio_idc);
+ sps->sar_width = kTableSarWidth[aspect_ratio_idc];
+ sps->sar_height = kTableSarHeight[aspect_ratio_idc];
+ }
+ }
+
+ int data;
+ // Read and ignore overscan and video signal type info.
+ READ_BOOL_OR_RETURN(&data); // overscan_info_present_flag
+ if (data)
+ READ_BOOL_OR_RETURN(&data); // overscan_appropriate_flag
+
+ READ_BOOL_OR_RETURN(&data); // video_signal_type_present_flag
+ if (data) {
+ READ_BITS_OR_RETURN(3, &data); // video_format
+ READ_BOOL_OR_RETURN(&data); // video_full_range_flag
+ READ_BOOL_OR_RETURN(&data); // colour_description_present_flag
+ if (data)
+ READ_BITS_OR_RETURN(24, &data); // color description syntax elements
+ }
+
+ READ_BOOL_OR_RETURN(&data); // chroma_loc_info_present_flag
+ if (data) {
+ READ_UE_OR_RETURN(&data); // chroma_sample_loc_type_top_field
+ READ_UE_OR_RETURN(&data); // chroma_sample_loc_type_bottom_field
+ }
+
+ // Read and ignore timing info.
+ READ_BOOL_OR_RETURN(&data); // timing_info_present_flag
+ if (data) {
+ READ_BITS_OR_RETURN(16, &data); // num_units_in_tick
+ READ_BITS_OR_RETURN(16, &data); // num_units_in_tick
+ READ_BITS_OR_RETURN(16, &data); // time_scale
+ READ_BITS_OR_RETURN(16, &data); // time_scale
+ READ_BOOL_OR_RETURN(&data); // fixed_frame_rate_flag
+ }
+
+ // Read and ignore NAL HRD parameters, if present.
+ bool hrd_parameters_present = false;
+ Result res = ParseAndIgnoreHRDParameters(&hrd_parameters_present);
+ if (res != kOk)
+ return res;
+
+ // Read and ignore VCL HRD parameters, if present.
+ res = ParseAndIgnoreHRDParameters(&hrd_parameters_present);
+ if (res != kOk)
+ return res;
+
+ if (hrd_parameters_present) // One of NAL or VCL params present is enough.
+ READ_BOOL_OR_RETURN(&data); // low_delay_hrd_flag
+
+ READ_BOOL_OR_RETURN(&data); // pic_struct_present_flag
+ READ_BOOL_OR_RETURN(&sps->bitstream_restriction_flag);
+ if (sps->bitstream_restriction_flag) {
+ READ_BOOL_OR_RETURN(&data); // motion_vectors_over_pic_boundaries_flag
+ READ_UE_OR_RETURN(&data); // max_bytes_per_pic_denom
+ READ_UE_OR_RETURN(&data); // max_bits_per_mb_denom
+ READ_UE_OR_RETURN(&data); // log2_max_mv_length_horizontal
+ READ_UE_OR_RETURN(&data); // log2_max_mv_length_vertical
+ READ_UE_OR_RETURN(&sps->max_num_reorder_frames);
+ READ_UE_OR_RETURN(&sps->max_dec_frame_buffering);
+ TRUE_OR_RETURN(sps->max_dec_frame_buffering >= sps->max_num_ref_frames);
+ IN_RANGE_OR_RETURN(
+ sps->max_num_reorder_frames, 0, sps->max_dec_frame_buffering);
+ }
+
+ return kOk;
+}
+
+static void FillDefaultSeqScalingLists(H264SPS* sps) {
+ for (int i = 0; i < 6; ++i)
+ for (int j = 0; j < kH264ScalingList4x4Length; ++j)
+ sps->scaling_list4x4[i][j] = 16;
+
+ for (int i = 0; i < 6; ++i)
+ for (int j = 0; j < kH264ScalingList8x8Length; ++j)
+ sps->scaling_list8x8[i][j] = 16;
+}
+
+H264Parser::Result H264Parser::ParseSPS(int* sps_id) {
+ // See 7.4.2.1.
+ int data;
+ Result res;
+
+ *sps_id = -1;
+
+ scoped_ptr<H264SPS> sps(new H264SPS());
+
+ READ_BITS_OR_RETURN(8, &sps->profile_idc);
+ READ_BOOL_OR_RETURN(&sps->constraint_set0_flag);
+ READ_BOOL_OR_RETURN(&sps->constraint_set1_flag);
+ READ_BOOL_OR_RETURN(&sps->constraint_set2_flag);
+ READ_BOOL_OR_RETURN(&sps->constraint_set3_flag);
+ READ_BOOL_OR_RETURN(&sps->constraint_set4_flag);
+ READ_BOOL_OR_RETURN(&sps->constraint_set5_flag);
+ READ_BITS_OR_RETURN(2, &data); // reserved_zero_2bits
+ READ_BITS_OR_RETURN(8, &sps->level_idc);
+ READ_UE_OR_RETURN(&sps->seq_parameter_set_id);
+ TRUE_OR_RETURN(sps->seq_parameter_set_id < 32);
+
+ if (sps->profile_idc == 100 || sps->profile_idc == 110 ||
+ sps->profile_idc == 122 || sps->profile_idc == 244 ||
+ sps->profile_idc == 44 || sps->profile_idc == 83 ||
+ sps->profile_idc == 86 || sps->profile_idc == 118 ||
+ sps->profile_idc == 128) {
+ READ_UE_OR_RETURN(&sps->chroma_format_idc);
+ TRUE_OR_RETURN(sps->chroma_format_idc < 4);
+
+ if (sps->chroma_format_idc == 3)
+ READ_BOOL_OR_RETURN(&sps->separate_colour_plane_flag);
+
+ READ_UE_OR_RETURN(&sps->bit_depth_luma_minus8);
+ TRUE_OR_RETURN(sps->bit_depth_luma_minus8 < 7);
+
+ READ_UE_OR_RETURN(&sps->bit_depth_chroma_minus8);
+ TRUE_OR_RETURN(sps->bit_depth_chroma_minus8 < 7);
+
+ READ_BOOL_OR_RETURN(&sps->qpprime_y_zero_transform_bypass_flag);
+ READ_BOOL_OR_RETURN(&sps->seq_scaling_matrix_present_flag);
+
+ if (sps->seq_scaling_matrix_present_flag) {
+ DVLOG(4) << "Scaling matrix present";
+ res = ParseSPSScalingLists(sps.get());
+ if (res != kOk)
+ return res;
+ } else {
+ FillDefaultSeqScalingLists(sps.get());
+ }
+ } else {
+ sps->chroma_format_idc = 1;
+ FillDefaultSeqScalingLists(sps.get());
+ }
+
+ if (sps->separate_colour_plane_flag)
+ sps->chroma_array_type = 0;
+ else
+ sps->chroma_array_type = sps->chroma_format_idc;
+
+ READ_UE_OR_RETURN(&sps->log2_max_frame_num_minus4);
+ TRUE_OR_RETURN(sps->log2_max_frame_num_minus4 < 13);
+
+ READ_UE_OR_RETURN(&sps->pic_order_cnt_type);
+ TRUE_OR_RETURN(sps->pic_order_cnt_type < 3);
+
+ sps->expected_delta_per_pic_order_cnt_cycle = 0;
+ if (sps->pic_order_cnt_type == 0) {
+ READ_UE_OR_RETURN(&sps->log2_max_pic_order_cnt_lsb_minus4);
+ TRUE_OR_RETURN(sps->log2_max_pic_order_cnt_lsb_minus4 < 13);
+ } else if (sps->pic_order_cnt_type == 1) {
+ READ_BOOL_OR_RETURN(&sps->delta_pic_order_always_zero_flag);
+ READ_SE_OR_RETURN(&sps->offset_for_non_ref_pic);
+ READ_SE_OR_RETURN(&sps->offset_for_top_to_bottom_field);
+ READ_UE_OR_RETURN(&sps->num_ref_frames_in_pic_order_cnt_cycle);
+ TRUE_OR_RETURN(sps->num_ref_frames_in_pic_order_cnt_cycle < 255);
+
+ for (int i = 0; i < sps->num_ref_frames_in_pic_order_cnt_cycle; ++i) {
+ READ_SE_OR_RETURN(&sps->offset_for_ref_frame[i]);
+ sps->expected_delta_per_pic_order_cnt_cycle +=
+ sps->offset_for_ref_frame[i];
+ }
+ }
+
+ READ_UE_OR_RETURN(&sps->max_num_ref_frames);
+ READ_BOOL_OR_RETURN(&sps->gaps_in_frame_num_value_allowed_flag);
+
+ if (sps->gaps_in_frame_num_value_allowed_flag)
+ return kUnsupportedStream;
+
+ READ_UE_OR_RETURN(&sps->pic_width_in_mbs_minus1);
+ READ_UE_OR_RETURN(&sps->pic_height_in_map_units_minus1);
+
+ READ_BOOL_OR_RETURN(&sps->frame_mbs_only_flag);
+ if (!sps->frame_mbs_only_flag)
+ READ_BOOL_OR_RETURN(&sps->mb_adaptive_frame_field_flag);
+
+ READ_BOOL_OR_RETURN(&sps->direct_8x8_inference_flag);
+
+ READ_BOOL_OR_RETURN(&sps->frame_cropping_flag);
+ if (sps->frame_cropping_flag) {
+ READ_UE_OR_RETURN(&sps->frame_crop_left_offset);
+ READ_UE_OR_RETURN(&sps->frame_crop_right_offset);
+ READ_UE_OR_RETURN(&sps->frame_crop_top_offset);
+ READ_UE_OR_RETURN(&sps->frame_crop_bottom_offset);
+ }
+
+ READ_BOOL_OR_RETURN(&sps->vui_parameters_present_flag);
+ if (sps->vui_parameters_present_flag) {
+ DVLOG(4) << "VUI parameters present";
+ res = ParseVUIParameters(sps.get());
+ if (res != kOk)
+ return res;
+ }
+
+ // If an SPS with the same id already exists, replace it.
+ *sps_id = sps->seq_parameter_set_id;
+ delete active_SPSes_[*sps_id];
+ active_SPSes_[*sps_id] = sps.release();
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParsePPS(int* pps_id) {
+ // See 7.4.2.2.
+ const H264SPS* sps;
+ Result res;
+
+ *pps_id = -1;
+
+ scoped_ptr<H264PPS> pps(new H264PPS());
+
+ READ_UE_OR_RETURN(&pps->pic_parameter_set_id);
+ READ_UE_OR_RETURN(&pps->seq_parameter_set_id);
+ TRUE_OR_RETURN(pps->seq_parameter_set_id < 32);
+
+ sps = GetSPS(pps->seq_parameter_set_id);
+ TRUE_OR_RETURN(sps);
+
+ READ_BOOL_OR_RETURN(&pps->entropy_coding_mode_flag);
+ READ_BOOL_OR_RETURN(&pps->bottom_field_pic_order_in_frame_present_flag);
+
+ READ_UE_OR_RETURN(&pps->num_slice_groups_minus1);
+ if (pps->num_slice_groups_minus1 > 1) {
+ DVLOG(1) << "Slice groups not supported";
+ return kUnsupportedStream;
+ }
+
+ READ_UE_OR_RETURN(&pps->num_ref_idx_l0_default_active_minus1);
+ TRUE_OR_RETURN(pps->num_ref_idx_l0_default_active_minus1 < 32);
+
+ READ_UE_OR_RETURN(&pps->num_ref_idx_l1_default_active_minus1);
+ TRUE_OR_RETURN(pps->num_ref_idx_l1_default_active_minus1 < 32);
+
+ READ_BOOL_OR_RETURN(&pps->weighted_pred_flag);
+ READ_BITS_OR_RETURN(2, &pps->weighted_bipred_idc);
+ TRUE_OR_RETURN(pps->weighted_bipred_idc < 3);
+
+ READ_SE_OR_RETURN(&pps->pic_init_qp_minus26);
+ IN_RANGE_OR_RETURN(pps->pic_init_qp_minus26, -26, 25);
+
+ READ_SE_OR_RETURN(&pps->pic_init_qs_minus26);
+ IN_RANGE_OR_RETURN(pps->pic_init_qs_minus26, -26, 25);
+
+ READ_SE_OR_RETURN(&pps->chroma_qp_index_offset);
+ IN_RANGE_OR_RETURN(pps->chroma_qp_index_offset, -12, 12);
+ pps->second_chroma_qp_index_offset = pps->chroma_qp_index_offset;
+
+ READ_BOOL_OR_RETURN(&pps->deblocking_filter_control_present_flag);
+ READ_BOOL_OR_RETURN(&pps->constrained_intra_pred_flag);
+ READ_BOOL_OR_RETURN(&pps->redundant_pic_cnt_present_flag);
+
+ if (br_.HasMoreRBSPData()) {
+ READ_BOOL_OR_RETURN(&pps->transform_8x8_mode_flag);
+ READ_BOOL_OR_RETURN(&pps->pic_scaling_matrix_present_flag);
+
+ if (pps->pic_scaling_matrix_present_flag) {
+ DVLOG(4) << "Picture scaling matrix present";
+ res = ParsePPSScalingLists(*sps, pps.get());
+ if (res != kOk)
+ return res;
+ }
+
+ READ_SE_OR_RETURN(&pps->second_chroma_qp_index_offset);
+ }
+
+ // If a PPS with the same id already exists, replace it.
+ *pps_id = pps->pic_parameter_set_id;
+ delete active_PPSes_[*pps_id];
+ active_PPSes_[*pps_id] = pps.release();
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParseRefPicListModification(
+ int num_ref_idx_active_minus1,
+ H264ModificationOfPicNum* ref_list_mods) {
+ H264ModificationOfPicNum* pic_num_mod;
+
+ if (num_ref_idx_active_minus1 >= 32)
+ return kInvalidStream;
+
+ for (int i = 0; i < 32; ++i) {
+ pic_num_mod = &ref_list_mods[i];
+ READ_UE_OR_RETURN(&pic_num_mod->modification_of_pic_nums_idc);
+ TRUE_OR_RETURN(pic_num_mod->modification_of_pic_nums_idc < 4);
+
+ switch (pic_num_mod->modification_of_pic_nums_idc) {
+ case 0:
+ case 1:
+ READ_UE_OR_RETURN(&pic_num_mod->abs_diff_pic_num_minus1);
+ break;
+
+ case 2:
+ READ_UE_OR_RETURN(&pic_num_mod->long_term_pic_num);
+ break;
+
+ case 3:
+ // Per spec, list cannot be empty.
+ if (i == 0)
+ return kInvalidStream;
+ return kOk;
+
+ default:
+ return kInvalidStream;
+ }
+ }
+
+ // If we got here, we didn't get loop end marker prematurely,
+ // so make sure it is there for our client.
+ int modification_of_pic_nums_idc;
+ READ_UE_OR_RETURN(&modification_of_pic_nums_idc);
+ TRUE_OR_RETURN(modification_of_pic_nums_idc == 3);
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParseRefPicListModifications(
+ H264SliceHeader* shdr) {
+ Result res;
+
+ if (!shdr->IsISlice() && !shdr->IsSISlice()) {
+ READ_BOOL_OR_RETURN(&shdr->ref_pic_list_modification_flag_l0);
+ if (shdr->ref_pic_list_modification_flag_l0) {
+ res = ParseRefPicListModification(shdr->num_ref_idx_l0_active_minus1,
+ shdr->ref_list_l0_modifications);
+ if (res != kOk)
+ return res;
+ }
+ }
+
+ if (shdr->IsBSlice()) {
+ READ_BOOL_OR_RETURN(&shdr->ref_pic_list_modification_flag_l1);
+ if (shdr->ref_pic_list_modification_flag_l1) {
+ res = ParseRefPicListModification(shdr->num_ref_idx_l1_active_minus1,
+ shdr->ref_list_l1_modifications);
+ if (res != kOk)
+ return res;
+ }
+ }
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParseWeightingFactors(
+ int num_ref_idx_active_minus1,
+ int chroma_array_type,
+ int luma_log2_weight_denom,
+ int chroma_log2_weight_denom,
+ H264WeightingFactors* w_facts) {
+
+ int def_luma_weight = 1 << luma_log2_weight_denom;
+ int def_chroma_weight = 1 << chroma_log2_weight_denom;
+
+ for (int i = 0; i < num_ref_idx_active_minus1 + 1; ++i) {
+ READ_BOOL_OR_RETURN(&w_facts->luma_weight_flag);
+ if (w_facts->luma_weight_flag) {
+ READ_SE_OR_RETURN(&w_facts->luma_weight[i]);
+ IN_RANGE_OR_RETURN(w_facts->luma_weight[i], -128, 127);
+
+ READ_SE_OR_RETURN(&w_facts->luma_offset[i]);
+ IN_RANGE_OR_RETURN(w_facts->luma_offset[i], -128, 127);
+ } else {
+ w_facts->luma_weight[i] = def_luma_weight;
+ w_facts->luma_offset[i] = 0;
+ }
+
+ if (chroma_array_type != 0) {
+ READ_BOOL_OR_RETURN(&w_facts->chroma_weight_flag);
+ if (w_facts->chroma_weight_flag) {
+ for (int j = 0; j < 2; ++j) {
+ READ_SE_OR_RETURN(&w_facts->chroma_weight[i][j]);
+ IN_RANGE_OR_RETURN(w_facts->chroma_weight[i][j], -128, 127);
+
+ READ_SE_OR_RETURN(&w_facts->chroma_offset[i][j]);
+ IN_RANGE_OR_RETURN(w_facts->chroma_offset[i][j], -128, 127);
+ }
+ } else {
+ for (int j = 0; j < 2; ++j) {
+ w_facts->chroma_weight[i][j] = def_chroma_weight;
+ w_facts->chroma_offset[i][j] = 0;
+ }
+ }
+ }
+ }
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParsePredWeightTable(const H264SPS& sps,
+ H264SliceHeader* shdr) {
+ READ_UE_OR_RETURN(&shdr->luma_log2_weight_denom);
+ TRUE_OR_RETURN(shdr->luma_log2_weight_denom < 8);
+
+ if (sps.chroma_array_type != 0)
+ READ_UE_OR_RETURN(&shdr->chroma_log2_weight_denom);
+ TRUE_OR_RETURN(shdr->chroma_log2_weight_denom < 8);
+
+ Result res = ParseWeightingFactors(shdr->num_ref_idx_l0_active_minus1,
+ sps.chroma_array_type,
+ shdr->luma_log2_weight_denom,
+ shdr->chroma_log2_weight_denom,
+ &shdr->pred_weight_table_l0);
+ if (res != kOk)
+ return res;
+
+ if (shdr->IsBSlice()) {
+ res = ParseWeightingFactors(shdr->num_ref_idx_l1_active_minus1,
+ sps.chroma_array_type,
+ shdr->luma_log2_weight_denom,
+ shdr->chroma_log2_weight_denom,
+ &shdr->pred_weight_table_l1);
+ if (res != kOk)
+ return res;
+ }
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParseDecRefPicMarking(H264SliceHeader* shdr) {
+ if (shdr->idr_pic_flag) {
+ READ_BOOL_OR_RETURN(&shdr->no_output_of_prior_pics_flag);
+ READ_BOOL_OR_RETURN(&shdr->long_term_reference_flag);
+ } else {
+ READ_BOOL_OR_RETURN(&shdr->adaptive_ref_pic_marking_mode_flag);
+
+ H264DecRefPicMarking* marking;
+ if (shdr->adaptive_ref_pic_marking_mode_flag) {
+ size_t i;
+ for (i = 0; i < arraysize(shdr->ref_pic_marking); ++i) {
+ marking = &shdr->ref_pic_marking[i];
+
+ READ_UE_OR_RETURN(&marking->memory_mgmnt_control_operation);
+ if (marking->memory_mgmnt_control_operation == 0)
+ break;
+
+ if (marking->memory_mgmnt_control_operation == 1 ||
+ marking->memory_mgmnt_control_operation == 3)
+ READ_UE_OR_RETURN(&marking->difference_of_pic_nums_minus1);
+
+ if (marking->memory_mgmnt_control_operation == 2)
+ READ_UE_OR_RETURN(&marking->long_term_pic_num);
+
+ if (marking->memory_mgmnt_control_operation == 3 ||
+ marking->memory_mgmnt_control_operation == 6)
+ READ_UE_OR_RETURN(&marking->long_term_frame_idx);
+
+ if (marking->memory_mgmnt_control_operation == 4)
+ READ_UE_OR_RETURN(&marking->max_long_term_frame_idx_plus1);
+
+ if (marking->memory_mgmnt_control_operation > 6)
+ return kInvalidStream;
+ }
+
+ if (i == arraysize(shdr->ref_pic_marking)) {
+ DVLOG(1) << "Ran out of dec ref pic marking fields";
+ return kUnsupportedStream;
+ }
+ }
+ }
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParseSliceHeader(const H264NALU& nalu,
+ H264SliceHeader* shdr) {
+ // See 7.4.3.
+ const H264SPS* sps;
+ const H264PPS* pps;
+ Result res;
+
+ memset(shdr, 0, sizeof(*shdr));
+
+ shdr->idr_pic_flag = (nalu.nal_unit_type == 5);
+ shdr->nal_ref_idc = nalu.nal_ref_idc;
+ shdr->nalu_data = nalu.data;
+ shdr->nalu_size = nalu.size;
+
+ READ_UE_OR_RETURN(&shdr->first_mb_in_slice);
+ READ_UE_OR_RETURN(&shdr->slice_type);
+ TRUE_OR_RETURN(shdr->slice_type < 10);
+
+ READ_UE_OR_RETURN(&shdr->pic_parameter_set_id);
+
+ pps = GetPPS(shdr->pic_parameter_set_id);
+ TRUE_OR_RETURN(pps);
+
+ sps = GetSPS(pps->seq_parameter_set_id);
+ TRUE_OR_RETURN(sps);
+
+ if (sps->separate_colour_plane_flag) {
+ DVLOG(1) << "Interlaced streams not supported";
+ return kUnsupportedStream;
+ }
+
+ READ_BITS_OR_RETURN(sps->log2_max_frame_num_minus4 + 4, &shdr->frame_num);
+ if (!sps->frame_mbs_only_flag) {
+ READ_BOOL_OR_RETURN(&shdr->field_pic_flag);
+ if (shdr->field_pic_flag) {
+ DVLOG(1) << "Interlaced streams not supported";
+ return kUnsupportedStream;
+ }
+ }
+
+ if (shdr->idr_pic_flag)
+ READ_UE_OR_RETURN(&shdr->idr_pic_id);
+
+ if (sps->pic_order_cnt_type == 0) {
+ READ_BITS_OR_RETURN(sps->log2_max_pic_order_cnt_lsb_minus4 + 4,
+ &shdr->pic_order_cnt_lsb);
+ if (pps->bottom_field_pic_order_in_frame_present_flag &&
+ !shdr->field_pic_flag)
+ READ_SE_OR_RETURN(&shdr->delta_pic_order_cnt_bottom);
+ }
+
+ if (sps->pic_order_cnt_type == 1 && !sps->delta_pic_order_always_zero_flag) {
+ READ_SE_OR_RETURN(&shdr->delta_pic_order_cnt[0]);
+ if (pps->bottom_field_pic_order_in_frame_present_flag &&
+ !shdr->field_pic_flag)
+ READ_SE_OR_RETURN(&shdr->delta_pic_order_cnt[1]);
+ }
+
+ if (pps->redundant_pic_cnt_present_flag) {
+ READ_UE_OR_RETURN(&shdr->redundant_pic_cnt);
+ TRUE_OR_RETURN(shdr->redundant_pic_cnt < 128);
+ }
+
+ if (shdr->IsBSlice())
+ READ_BOOL_OR_RETURN(&shdr->direct_spatial_mv_pred_flag);
+
+ if (shdr->IsPSlice() || shdr->IsSPSlice() || shdr->IsBSlice()) {
+ READ_BOOL_OR_RETURN(&shdr->num_ref_idx_active_override_flag);
+ if (shdr->num_ref_idx_active_override_flag) {
+ READ_UE_OR_RETURN(&shdr->num_ref_idx_l0_active_minus1);
+ if (shdr->IsBSlice())
+ READ_UE_OR_RETURN(&shdr->num_ref_idx_l1_active_minus1);
+ } else {
+ shdr->num_ref_idx_l0_active_minus1 =
+ pps->num_ref_idx_l0_default_active_minus1;
+ if (shdr->IsBSlice()) {
+ shdr->num_ref_idx_l1_active_minus1 =
+ pps->num_ref_idx_l1_default_active_minus1;
+ }
+ }
+ }
+ if (shdr->field_pic_flag) {
+ TRUE_OR_RETURN(shdr->num_ref_idx_l0_active_minus1 < 32);
+ TRUE_OR_RETURN(shdr->num_ref_idx_l1_active_minus1 < 32);
+ } else {
+ TRUE_OR_RETURN(shdr->num_ref_idx_l0_active_minus1 < 16);
+ TRUE_OR_RETURN(shdr->num_ref_idx_l1_active_minus1 < 16);
+ }
+
+ if (nalu.nal_unit_type == H264NALU::kCodedSliceExtension) {
+ return kUnsupportedStream;
+ } else {
+ res = ParseRefPicListModifications(shdr);
+ if (res != kOk)
+ return res;
+ }
+
+ if ((pps->weighted_pred_flag && (shdr->IsPSlice() || shdr->IsSPSlice())) ||
+ (pps->weighted_bipred_idc == 1 && shdr->IsBSlice())) {
+ res = ParsePredWeightTable(*sps, shdr);
+ if (res != kOk)
+ return res;
+ }
+
+ if (nalu.nal_ref_idc != 0) {
+ res = ParseDecRefPicMarking(shdr);
+ if (res != kOk)
+ return res;
+ }
+
+ if (pps->entropy_coding_mode_flag && !shdr->IsISlice() &&
+ !shdr->IsSISlice()) {
+ READ_UE_OR_RETURN(&shdr->cabac_init_idc);
+ TRUE_OR_RETURN(shdr->cabac_init_idc < 3);
+ }
+
+ READ_SE_OR_RETURN(&shdr->slice_qp_delta);
+
+ if (shdr->IsSPSlice() || shdr->IsSISlice()) {
+ if (shdr->IsSPSlice())
+ READ_BOOL_OR_RETURN(&shdr->sp_for_switch_flag);
+ READ_SE_OR_RETURN(&shdr->slice_qs_delta);
+ }
+
+ if (pps->deblocking_filter_control_present_flag) {
+ READ_UE_OR_RETURN(&shdr->disable_deblocking_filter_idc);
+ TRUE_OR_RETURN(shdr->disable_deblocking_filter_idc < 3);
+
+ if (shdr->disable_deblocking_filter_idc != 1) {
+ READ_SE_OR_RETURN(&shdr->slice_alpha_c0_offset_div2);
+ IN_RANGE_OR_RETURN(shdr->slice_alpha_c0_offset_div2, -6, 6);
+
+ READ_SE_OR_RETURN(&shdr->slice_beta_offset_div2);
+ IN_RANGE_OR_RETURN(shdr->slice_beta_offset_div2, -6, 6);
+ }
+ }
+
+ if (pps->num_slice_groups_minus1 > 0) {
+ DVLOG(1) << "Slice groups not supported";
+ return kUnsupportedStream;
+ }
+
+ size_t epb = br_.NumEmulationPreventionBytesRead();
+ shdr->header_bit_size = (shdr->nalu_size - epb) * 8 - br_.NumBitsLeft();
+
+ return kOk;
+}
+
+H264Parser::Result H264Parser::ParseSEI(H264SEIMessage* sei_msg) {
+ int byte;
+
+ memset(sei_msg, 0, sizeof(*sei_msg));
+
+ READ_BITS_OR_RETURN(8, &byte);
+ while (byte == 0xff) {
+ sei_msg->type += 255;
+ READ_BITS_OR_RETURN(8, &byte);
+ }
+ sei_msg->type += byte;
+
+ READ_BITS_OR_RETURN(8, &byte);
+ while (byte == 0xff) {
+ sei_msg->payload_size += 255;
+ READ_BITS_OR_RETURN(8, &byte);
+ }
+ sei_msg->payload_size += byte;
+
+ DVLOG(4) << "Found SEI message type: " << sei_msg->type
+ << " payload size: " << sei_msg->payload_size;
+
+ switch (sei_msg->type) {
+ case H264SEIMessage::kSEIRecoveryPoint:
+ READ_UE_OR_RETURN(&sei_msg->recovery_point.recovery_frame_cnt);
+ READ_BOOL_OR_RETURN(&sei_msg->recovery_point.exact_match_flag);
+ READ_BOOL_OR_RETURN(&sei_msg->recovery_point.broken_link_flag);
+ READ_BITS_OR_RETURN(2, &sei_msg->recovery_point.changing_slice_group_idc);
+ break;
+
+ default:
+ DVLOG(4) << "Unsupported SEI message";
+ break;
+ }
+
+ return kOk;
+}
+
+} // namespace media
diff --git a/chromium/media/filters/h264_parser.h b/chromium/media/filters/h264_parser.h
new file mode 100644
index 00000000000..3a60dcea650
--- /dev/null
+++ b/chromium/media/filters/h264_parser.h
@@ -0,0 +1,410 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file contains an implementation of an H264 Annex-B video stream parser.
+
+#ifndef MEDIA_FILTERS_H264_PARSER_H_
+#define MEDIA_FILTERS_H264_PARSER_H_
+
+#include <sys/types.h>
+
+#include <map>
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+#include "media/filters/h264_bit_reader.h"
+
+namespace media {
+
+// For explanations of each struct and its members, see H.264 specification
+// at http://www.itu.int/rec/T-REC-H.264.
+struct MEDIA_EXPORT H264NALU {
+ H264NALU();
+
+ enum Type {
+ kUnspecified = 0,
+ kNonIDRSlice = 1,
+ kSliceDataA = 2,
+ kSliceDataB = 3,
+ kSliceDataC = 4,
+ kIDRSlice = 5,
+ kSEIMessage = 6,
+ kSPS = 7,
+ kPPS = 8,
+ kAUD = 9,
+ kEOSeq = 10,
+ kEOStream = 11,
+ kFiller = 12,
+ kSPSExt = 13,
+ kReserved14 = 14,
+ kReserved15 = 15,
+ kReserved16 = 16,
+ kReserved17 = 17,
+ kReserved18 = 18,
+ kCodedSliceAux = 19,
+ kCodedSliceExtension = 20,
+ };
+
+ // After (without) start code; we don't own the underlying memory
+ // and a shallow copy should be made when copying this struct.
+ const uint8* data;
+ off_t size; // From after start code to start code of next NALU (or EOS).
+
+ int nal_ref_idc;
+ int nal_unit_type;
+};
+
+enum {
+ kH264ScalingList4x4Length = 16,
+ kH264ScalingList8x8Length = 64,
+};
+
+struct MEDIA_EXPORT H264SPS {
+ H264SPS();
+
+ int profile_idc;
+ bool constraint_set0_flag;
+ bool constraint_set1_flag;
+ bool constraint_set2_flag;
+ bool constraint_set3_flag;
+ bool constraint_set4_flag;
+ bool constraint_set5_flag;
+ int level_idc;
+ int seq_parameter_set_id;
+
+ int chroma_format_idc;
+ bool separate_colour_plane_flag;
+ int bit_depth_luma_minus8;
+ int bit_depth_chroma_minus8;
+ bool qpprime_y_zero_transform_bypass_flag;
+
+ bool seq_scaling_matrix_present_flag;
+ int scaling_list4x4[6][kH264ScalingList4x4Length];
+ int scaling_list8x8[6][kH264ScalingList8x8Length];
+
+ int log2_max_frame_num_minus4;
+ int pic_order_cnt_type;
+ int log2_max_pic_order_cnt_lsb_minus4;
+ bool delta_pic_order_always_zero_flag;
+ int offset_for_non_ref_pic;
+ int offset_for_top_to_bottom_field;
+ int num_ref_frames_in_pic_order_cnt_cycle;
+ int expected_delta_per_pic_order_cnt_cycle; // calculated
+ int offset_for_ref_frame[255];
+ int max_num_ref_frames;
+ bool gaps_in_frame_num_value_allowed_flag;
+ int pic_width_in_mbs_minus1;
+ int pic_height_in_map_units_minus1;
+ bool frame_mbs_only_flag;
+ bool mb_adaptive_frame_field_flag;
+ bool direct_8x8_inference_flag;
+ bool frame_cropping_flag;
+ int frame_crop_left_offset;
+ int frame_crop_right_offset;
+ int frame_crop_top_offset;
+ int frame_crop_bottom_offset;
+
+ bool vui_parameters_present_flag;
+ int sar_width; // Set to 0 when not specified.
+ int sar_height; // Set to 0 when not specified.
+ bool bitstream_restriction_flag;
+ int max_num_reorder_frames;
+ int max_dec_frame_buffering;
+
+ int chroma_array_type;
+};
+
+struct MEDIA_EXPORT H264PPS {
+ H264PPS();
+
+ int pic_parameter_set_id;
+ int seq_parameter_set_id;
+ bool entropy_coding_mode_flag;
+ bool bottom_field_pic_order_in_frame_present_flag;
+ int num_slice_groups_minus1;
+ // TODO(posciak): Slice groups not implemented, could be added at some point.
+ int num_ref_idx_l0_default_active_minus1;
+ int num_ref_idx_l1_default_active_minus1;
+ bool weighted_pred_flag;
+ int weighted_bipred_idc;
+ int pic_init_qp_minus26;
+ int pic_init_qs_minus26;
+ int chroma_qp_index_offset;
+ bool deblocking_filter_control_present_flag;
+ bool constrained_intra_pred_flag;
+ bool redundant_pic_cnt_present_flag;
+ bool transform_8x8_mode_flag;
+
+ bool pic_scaling_matrix_present_flag;
+ int scaling_list4x4[6][kH264ScalingList4x4Length];
+ int scaling_list8x8[6][kH264ScalingList8x8Length];
+
+ int second_chroma_qp_index_offset;
+};
+
+struct MEDIA_EXPORT H264ModificationOfPicNum {
+ int modification_of_pic_nums_idc;
+ union {
+ int abs_diff_pic_num_minus1;
+ int long_term_pic_num;
+ };
+};
+
+struct MEDIA_EXPORT H264WeightingFactors {
+ bool luma_weight_flag;
+ bool chroma_weight_flag;
+ int luma_weight[32];
+ int luma_offset[32];
+ int chroma_weight[32][2];
+ int chroma_offset[32][2];
+};
+
+struct MEDIA_EXPORT H264DecRefPicMarking {
+ int memory_mgmnt_control_operation;
+ int difference_of_pic_nums_minus1;
+ int long_term_pic_num;
+ int long_term_frame_idx;
+ int max_long_term_frame_idx_plus1;
+};
+
+struct MEDIA_EXPORT H264SliceHeader {
+ H264SliceHeader();
+
+ enum {
+ kRefListSize = 32,
+ kRefListModSize = kRefListSize
+ };
+
+ enum Type {
+ kPSlice = 0,
+ kBSlice = 1,
+ kISlice = 2,
+ kSPSlice = 3,
+ kSISlice = 4,
+ };
+
+ bool IsPSlice() const;
+ bool IsBSlice() const;
+ bool IsISlice() const;
+ bool IsSPSlice() const;
+ bool IsSISlice() const;
+
+ bool idr_pic_flag; // from NAL header
+ int nal_ref_idc; // from NAL header
+ const uint8* nalu_data; // from NAL header
+ off_t nalu_size; // from NAL header
+ off_t header_bit_size; // calculated
+
+ int first_mb_in_slice;
+ int slice_type;
+ int pic_parameter_set_id;
+ int colour_plane_id; // TODO(posciak): use this! http://crbug.com/139878
+ int frame_num;
+ bool field_pic_flag;
+ bool bottom_field_flag;
+ int idr_pic_id;
+ int pic_order_cnt_lsb;
+ int delta_pic_order_cnt_bottom;
+ int delta_pic_order_cnt[2];
+ int redundant_pic_cnt;
+ bool direct_spatial_mv_pred_flag;
+
+ bool num_ref_idx_active_override_flag;
+ int num_ref_idx_l0_active_minus1;
+ int num_ref_idx_l1_active_minus1;
+ bool ref_pic_list_modification_flag_l0;
+ bool ref_pic_list_modification_flag_l1;
+ H264ModificationOfPicNum ref_list_l0_modifications[kRefListModSize];
+ H264ModificationOfPicNum ref_list_l1_modifications[kRefListModSize];
+
+ int luma_log2_weight_denom;
+ int chroma_log2_weight_denom;
+
+ bool luma_weight_l0_flag;
+ bool chroma_weight_l0_flag;
+ H264WeightingFactors pred_weight_table_l0;
+
+ bool luma_weight_l1_flag;
+ bool chroma_weight_l1_flag;
+ H264WeightingFactors pred_weight_table_l1;
+
+ bool no_output_of_prior_pics_flag;
+ bool long_term_reference_flag;
+
+ bool adaptive_ref_pic_marking_mode_flag;
+ H264DecRefPicMarking ref_pic_marking[kRefListSize];
+
+ int cabac_init_idc;
+ int slice_qp_delta;
+ bool sp_for_switch_flag;
+ int slice_qs_delta;
+ int disable_deblocking_filter_idc;
+ int slice_alpha_c0_offset_div2;
+ int slice_beta_offset_div2;
+};
+
+struct H264SEIRecoveryPoint {
+ int recovery_frame_cnt;
+ bool exact_match_flag;
+ bool broken_link_flag;
+ int changing_slice_group_idc;
+};
+
+struct MEDIA_EXPORT H264SEIMessage {
+ H264SEIMessage();
+
+ enum Type {
+ kSEIRecoveryPoint = 6,
+ };
+
+ int type;
+ int payload_size;
+ union {
+ // Placeholder; in future more supported types will contribute to more
+ // union members here.
+ H264SEIRecoveryPoint recovery_point;
+ };
+};
+
+// Class to parse an Annex-B H.264 stream,
+// as specified in chapters 7 and Annex B of the H.264 spec.
+class MEDIA_EXPORT H264Parser {
+ public:
+ enum Result {
+ kOk,
+ kInvalidStream, // error in stream
+ kUnsupportedStream, // stream not supported by the parser
+ kEOStream, // end of stream
+ };
+
+ // Find offset from start of data to next NALU start code
+ // and size of found start code (3 or 4 bytes).
+ // If no start code is found, offset is pointing to the first unprocessed byte
+ // (i.e. the first byte that was not considered as a possible start of a start
+ // code) and |*start_code_size| is set to 0.
+ // Preconditions:
+ // - |data_size| >= 0
+ // Postconditions:
+ // - |*offset| is between 0 and |data_size| included.
+ // It is strictly less than |data_size| if |data_size| > 0.
+ // - |*start_code_size| is either 0, 3 or 4.
+ static bool FindStartCode(const uint8* data, off_t data_size,
+ off_t* offset, off_t* start_code_size);
+
+ H264Parser();
+ ~H264Parser();
+
+ void Reset();
+ // Set current stream pointer to |stream| of |stream_size| in bytes,
+ // |stream| owned by caller.
+ void SetStream(const uint8* stream, off_t stream_size);
+
+ // Read the stream to find the next NALU, identify it and return
+ // that information in |*nalu|. This advances the stream to the beginning
+ // of this NALU, but not past it, so subsequent calls to NALU-specific
+ // parsing functions (ParseSPS, etc.) will parse this NALU.
+ // If the caller wishes to skip the current NALU, it can call this function
+ // again, instead of any NALU-type specific parse functions below.
+ Result AdvanceToNextNALU(H264NALU* nalu);
+
+ // NALU-specific parsing functions.
+ // These should be called after AdvanceToNextNALU().
+
+ // SPSes and PPSes are owned by the parser class and the memory for their
+ // structures is managed here, not by the caller, as they are reused
+ // across NALUs.
+ //
+ // Parse an SPS/PPS NALU and save their data in the parser, returning id
+ // of the parsed structure in |*pps_id|/|*sps_id|.
+ // To get a pointer to a given SPS/PPS structure, use GetSPS()/GetPPS(),
+ // passing the returned |*sps_id|/|*pps_id| as parameter.
+ // TODO(posciak,fischman): consider replacing returning Result from Parse*()
+ // methods with a scoped_ptr and adding an AtEOS() function to check for EOS
+ // if Parse*() return NULL.
+ Result ParseSPS(int* sps_id);
+ Result ParsePPS(int* pps_id);
+
+ // Return a pointer to SPS/PPS with given |sps_id|/|pps_id| or NULL if not
+ // present.
+ const H264SPS* GetSPS(int sps_id);
+ const H264PPS* GetPPS(int pps_id);
+
+ // Slice headers and SEI messages are not used across NALUs by the parser
+ // and can be discarded after current NALU, so the parser does not store
+ // them, nor does it manage their memory.
+ // The caller has to provide and manage it instead.
+
+ // Parse a slice header, returning it in |*shdr|. |*nalu| must be set to
+ // the NALU returned from AdvanceToNextNALU() and corresponding to |*shdr|.
+ Result ParseSliceHeader(const H264NALU& nalu, H264SliceHeader* shdr);
+
+ // Parse a SEI message, returning it in |*sei_msg|, provided and managed
+ // by the caller.
+ Result ParseSEI(H264SEIMessage* sei_msg);
+
+ private:
+ // Move the stream pointer to the beginning of the next NALU,
+ // i.e. pointing at the next start code.
+ // Return true if a NALU has been found.
+ // If a NALU is found:
+ // - its size in bytes is returned in |*nalu_size| and includes
+ // the start code as well as the trailing zero bits.
+ // - the size in bytes of the start code is returned in |*start_code_size|.
+ bool LocateNALU(off_t* nalu_size, off_t* start_code_size);
+
+ // Exp-Golomb code parsing as specified in chapter 9.1 of the spec.
+ // Read one unsigned exp-Golomb code from the stream and return in |*val|.
+ Result ReadUE(int* val);
+
+ // Read one signed exp-Golomb code from the stream and return in |*val|.
+ Result ReadSE(int* val);
+
+ // Parse scaling lists (see spec).
+ Result ParseScalingList(int size, int* scaling_list, bool* use_default);
+ Result ParseSPSScalingLists(H264SPS* sps);
+ Result ParsePPSScalingLists(const H264SPS& sps, H264PPS* pps);
+
+ // Parse optional VUI parameters in SPS (see spec).
+ Result ParseVUIParameters(H264SPS* sps);
+ // Set |hrd_parameters_present| to true only if they are present.
+ Result ParseAndIgnoreHRDParameters(bool* hrd_parameters_present);
+
+ // Parse reference picture lists' modifications (see spec).
+ Result ParseRefPicListModifications(H264SliceHeader* shdr);
+ Result ParseRefPicListModification(int num_ref_idx_active_minus1,
+ H264ModificationOfPicNum* ref_list_mods);
+
+ // Parse prediction weight table (see spec).
+ Result ParsePredWeightTable(const H264SPS& sps, H264SliceHeader* shdr);
+
+ // Parse weighting factors (see spec).
+ Result ParseWeightingFactors(int num_ref_idx_active_minus1,
+ int chroma_array_type,
+ int luma_log2_weight_denom,
+ int chroma_log2_weight_denom,
+ H264WeightingFactors* w_facts);
+
+ // Parse decoded reference picture marking information (see spec).
+ Result ParseDecRefPicMarking(H264SliceHeader* shdr);
+
+ // Pointer to the current NALU in the stream.
+ const uint8* stream_;
+
+ // Bytes left in the stream after the current NALU.
+ off_t bytes_left_;
+
+ H264BitReader br_;
+
+ // PPSes and SPSes stored for future reference.
+ typedef std::map<int, H264SPS*> SPSById;
+ typedef std::map<int, H264PPS*> PPSById;
+ SPSById active_SPSes_;
+ PPSById active_PPSes_;
+
+ DISALLOW_COPY_AND_ASSIGN(H264Parser);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_H264_PARSER_H_
diff --git a/chromium/media/filters/h264_parser_unittest.cc b/chromium/media/filters/h264_parser_unittest.cc
new file mode 100644
index 00000000000..a08cf26a156
--- /dev/null
+++ b/chromium/media/filters/h264_parser_unittest.cc
@@ -0,0 +1,72 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/command_line.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/logging.h"
+#include "base/path_service.h"
+#include "base/strings/string_number_conversions.h"
+#include "media/base/test_data_util.h"
+#include "media/filters/h264_parser.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+TEST(H264ParserTest, StreamFileParsing) {
+ base::FilePath file_path = GetTestDataFilePath("test-25fps.h264");
+ // Number of NALUs in the test stream to be parsed.
+ int num_nalus = 759;
+
+ base::MemoryMappedFile stream;
+ ASSERT_TRUE(stream.Initialize(file_path))
+ << "Couldn't open stream file: " << file_path.MaybeAsASCII();
+
+ H264Parser parser;
+ parser.SetStream(stream.data(), stream.length());
+
+ // Parse until the end of stream/unsupported stream/error in stream is found.
+ int num_parsed_nalus = 0;
+ while (true) {
+ media::H264SliceHeader shdr;
+ media::H264SEIMessage sei_msg;
+ H264NALU nalu;
+ H264Parser::Result res = parser.AdvanceToNextNALU(&nalu);
+ if (res == H264Parser::kEOStream) {
+ DVLOG(1) << "Number of successfully parsed NALUs before EOS: "
+ << num_parsed_nalus;
+ ASSERT_EQ(num_nalus, num_parsed_nalus);
+ return;
+ }
+ ASSERT_EQ(res, H264Parser::kOk);
+
+ ++num_parsed_nalus;
+
+ int id;
+ switch (nalu.nal_unit_type) {
+ case H264NALU::kIDRSlice:
+ case H264NALU::kNonIDRSlice:
+ ASSERT_EQ(parser.ParseSliceHeader(nalu, &shdr), H264Parser::kOk);
+ break;
+
+ case H264NALU::kSPS:
+ ASSERT_EQ(parser.ParseSPS(&id), H264Parser::kOk);
+ break;
+
+ case H264NALU::kPPS:
+ ASSERT_EQ(parser.ParsePPS(&id), H264Parser::kOk);
+ break;
+
+ case H264NALU::kSEIMessage:
+ ASSERT_EQ(parser.ParseSEI(&sei_msg), H264Parser::kOk);
+ break;
+
+ default:
+ // Skip unsupported NALU.
+ DVLOG(4) << "Skipping unsupported NALU";
+ break;
+ }
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/filters/h264_to_annex_b_bitstream_converter.cc b/chromium/media/filters/h264_to_annex_b_bitstream_converter.cc
index fc45607a0b1..49456dd3372 100644
--- a/chromium/media/filters/h264_to_annex_b_bitstream_converter.cc
+++ b/chromium/media/filters/h264_to_annex_b_bitstream_converter.cc
@@ -5,10 +5,13 @@
#include "media/filters/h264_to_annex_b_bitstream_converter.h"
#include "base/logging.h"
+#include "media/filters/h264_parser.h"
+#include "media/formats/mp4/box_definitions.h"
namespace media {
static const uint8 kStartCodePrefix[3] = {0, 0, 1};
+static const uint32 kParamSetStartCodeSize = 1 + sizeof(kStartCodePrefix);
// Helper function which determines whether NAL unit of given type marks
// access unit boundary.
@@ -33,80 +36,55 @@ H264ToAnnexBBitstreamConverter::H264ToAnnexBBitstreamConverter()
H264ToAnnexBBitstreamConverter::~H264ToAnnexBBitstreamConverter() {}
-uint32 H264ToAnnexBBitstreamConverter::ParseConfigurationAndCalculateSize(
+bool H264ToAnnexBBitstreamConverter::ParseConfiguration(
const uint8* configuration_record,
- uint32 configuration_record_size) {
- // FFmpeg's AVCodecContext's extradata field contains the Decoder Specific
- // Information from MP4 headers that contain the H.264 SPS and PPS members.
- // ISO 14496-15 Chapter 5.2.4 AVCDecoderConfigurationRecord.
- // AVCConfigurationRecord must be at least 7 bytes long.
- if (configuration_record == NULL || configuration_record_size < 7) {
- return 0; // Error: invalid input
- }
- const uint8* decoder_configuration = configuration_record;
- uint32 parameter_set_size_bytes = 0;
-
- // We can skip the four first bytes as they're only profile information
- decoder_configuration += 4;
- // Fifth byte's two LSBs contain the interleaving field's size minus one
- uint8 size_of_len_field = (*decoder_configuration & 0x3) + 1;
- if (size_of_len_field != 1 && size_of_len_field != 2 &&
- size_of_len_field != 4) {
- return 0; // Error: invalid input, NAL unit field len is not correct
- }
- decoder_configuration++;
- // Sixth byte's five LSBs contain the number of SPSs
- uint8 sps_count = *decoder_configuration & 0x1F;
- decoder_configuration++;
- // Then we have N * SPS's with two byte length field and actual SPS
- while (sps_count-- > 0) {
- if ((decoder_configuration - configuration_record) + 2 >
- static_cast<int32>(configuration_record_size)) {
- return 0; // Error: ran out of data
- }
- uint16 sps_len = decoder_configuration[0] << 8 | decoder_configuration[1];
- decoder_configuration += 2;
- // write the SPS to output, always with zero byte + start code prefix
- parameter_set_size_bytes += 1 + sizeof(kStartCodePrefix);
- decoder_configuration += sps_len;
- parameter_set_size_bytes += sps_len;
- }
- // Then we have the numner of pps in one byte
- uint8 pps_count = *decoder_configuration;
- decoder_configuration++;
- // And finally, we have N * PPS with two byte length field and actual PPS
- while (pps_count-- > 0) {
- if ((decoder_configuration - configuration_record) + 2 >
- static_cast<int32>(configuration_record_size)) {
- return 0; // Error: ran out of data
- }
- uint16 pps_len = decoder_configuration[0] << 8 | decoder_configuration[1];
- decoder_configuration += 2;
- // write the SPS to output, always with zero byte + start code prefix
- parameter_set_size_bytes += 1 + sizeof(kStartCodePrefix);
- decoder_configuration += pps_len;
- parameter_set_size_bytes += pps_len;
- }
+ int configuration_record_size,
+ mp4::AVCDecoderConfigurationRecord* avc_config) {
+ DCHECK(configuration_record);
+ DCHECK_GT(configuration_record_size, 0);
+ DCHECK(avc_config);
+
+ if (!avc_config->Parse(configuration_record, configuration_record_size))
+ return false; // Error: invalid input
+
// We're done processing the AVCDecoderConfigurationRecord,
// store the needed information for parsing actual payload
- nal_unit_length_field_width_ = size_of_len_field;
+ nal_unit_length_field_width_ = avc_config->length_size;
configuration_processed_ = true;
- return parameter_set_size_bytes;
+ return true;
+}
+
+uint32 H264ToAnnexBBitstreamConverter::GetConfigSize(
+ const mp4::AVCDecoderConfigurationRecord& avc_config) const {
+ uint32 config_size = 0;
+
+ for (size_t i = 0; i < avc_config.sps_list.size(); ++i)
+ config_size += kParamSetStartCodeSize + avc_config.sps_list[i].size();
+
+ for (size_t i = 0; i < avc_config.pps_list.size(); ++i)
+ config_size += kParamSetStartCodeSize + avc_config.pps_list[i].size();
+
+ return config_size;
}
uint32 H264ToAnnexBBitstreamConverter::CalculateNeededOutputBufferSize(
const uint8* input,
- uint32 input_size) const {
+ uint32 input_size,
+ const mp4::AVCDecoderConfigurationRecord* avc_config) const {
uint32 output_size = 0;
uint32 data_left = input_size;
bool first_nal_in_this_access_unit = first_nal_unit_in_access_unit_;
- if (input == NULL || input_size == 0) {
+ if (input_size == 0)
return 0; // Error: invalid input data
- }
+
if (!configuration_processed_) {
return 0; // Error: configuration not handled, we don't know nal unit width
}
+
+ if (avc_config)
+ output_size += GetConfigSize(*avc_config);
+
CHECK(nal_unit_length_field_width_ == 1 ||
nal_unit_length_field_width_ == 2 ||
nal_unit_length_field_width_ == 4);
@@ -152,93 +130,37 @@ uint32 H264ToAnnexBBitstreamConverter::CalculateNeededOutputBufferSize(
}
bool H264ToAnnexBBitstreamConverter::ConvertAVCDecoderConfigToByteStream(
- const uint8* input,
- uint32 input_size,
+ const mp4::AVCDecoderConfigurationRecord& avc_config,
uint8* output,
uint32* output_size) {
- uint8* outscan = output;
- // FFmpeg's AVCodecContext's extradata field contains the Decoder Specific
- // Information from MP4 headers that contain the H.264 SPS and PPS members.
- // ISO 14496-15 Chapter 5.2.4 AVCDecoderConfigurationRecord.
- const uint8* decoder_configuration = input;
- uint32 decoderconfiguration_size = input_size;
- uint32 out_size = 0;
-
- if (decoder_configuration == NULL || decoderconfiguration_size == 0) {
- return 0; // Error: input invalid
+ uint8* out = output;
+ uint32 out_size = *output_size;
+ *output_size = 0;
+ for (size_t i = 0; i < avc_config.sps_list.size(); ++i) {
+ if (!WriteParamSet(avc_config.sps_list[i], &out, &out_size))
+ return false;
}
- // We can skip the four first bytes as they're only profile information.
- decoder_configuration += 4;
- // Fifth byte's two LSBs contain the interleaving field's size minus one
- uint8 size_of_len_field = (*decoder_configuration & 0x3) + 1;
- if (size_of_len_field != 1 && size_of_len_field != 2 &&
- size_of_len_field != 4) {
- return 0; // Error: invalid input, NAL unit field len is not correct
- }
- decoder_configuration++;
- // Sixth byte's five LSBs contain the number of SPSs
- uint8 sps_count = *decoder_configuration & 0x1F;
- decoder_configuration++;
- // Then we have N * SPS's with two byte length field and actual SPS
- while (sps_count-- > 0) {
- uint16 sps_len = decoder_configuration[0] << 8 |
- decoder_configuration[1];
- decoder_configuration += 2;
- if (out_size + 1 + sizeof(kStartCodePrefix) + sps_len >
- *output_size) {
- *output_size = 0;
- return 0; // too small output buffer;
- }
- // write the SPS to output, always with zero byte + start code prefix
- *outscan = 0; // zero byte
- outscan += 1;
- memcpy(outscan, kStartCodePrefix, sizeof(kStartCodePrefix));
- outscan += sizeof(kStartCodePrefix);
- memcpy(outscan, decoder_configuration, sps_len);
- decoder_configuration += sps_len;
- outscan += sps_len;
- out_size += 1 + sizeof(kStartCodePrefix) + sps_len;
+ for (size_t i = 0; i < avc_config.pps_list.size(); ++i) {
+ if (!WriteParamSet(avc_config.pps_list[i], &out, &out_size))
+ return false;
}
- // Then we have the numner of pps in one byte
- uint8 pps_count = *decoder_configuration;
- decoder_configuration++;
- // And finally, we have N * PPS with two byte length field and actual PPS
- while (pps_count-- > 0) {
- uint16 pps_len = decoder_configuration[0] << 8 | decoder_configuration[1];
- decoder_configuration += 2;
- if (out_size + 1 + sizeof(kStartCodePrefix) + pps_len >
- *output_size) {
- *output_size = 0;
- return 0; // too small output buffer;
- }
- // write the SPS to output, always with zero byte + start code prefix
- *outscan = 0; // zero byte
- outscan += 1;
- memcpy(outscan, kStartCodePrefix, sizeof(kStartCodePrefix));
- outscan += sizeof(kStartCodePrefix);
- memcpy(outscan, decoder_configuration, pps_len);
- decoder_configuration += pps_len;
- outscan += pps_len;
- out_size += 1 + sizeof(kStartCodePrefix) + pps_len;
- }
- // We're done processing the AVCDecoderConfigurationRecord, store the needed
- // information
- nal_unit_length_field_width_ = size_of_len_field;
+
+ nal_unit_length_field_width_ = avc_config.length_size;
configuration_processed_ = true;
- *output_size = out_size;
+ *output_size = out - output;
return true;
}
bool H264ToAnnexBBitstreamConverter::ConvertNalUnitStreamToByteStream(
const uint8* input, uint32 input_size,
+ const mp4::AVCDecoderConfigurationRecord* avc_config,
uint8* output, uint32* output_size) {
const uint8* inscan = input; // We read the input from here progressively
uint8* outscan = output; // We write the output to here progressively
uint32 data_left = input_size;
- if (inscan == NULL || input_size == 0 ||
- outscan == NULL || *output_size == 0) {
+ if (input_size == 0 || *output_size == 0) {
*output_size = 0;
return false; // Error: invalid input
}
@@ -249,6 +171,7 @@ bool H264ToAnnexBBitstreamConverter::ConvertNalUnitStreamToByteStream(
nal_unit_length_field_width_ == 4);
// Do the actual conversion for the actual input packet
+ int nal_unit_count = 0;
while (data_left > 0) {
uint8 i;
uint32 nal_unit_length;
@@ -269,6 +192,30 @@ bool H264ToAnnexBBitstreamConverter::ConvertNalUnitStreamToByteStream(
return false; // Error: not enough data for correct conversion
}
+ // Five least significant bits of first NAL unit byte signify
+ // nal_unit_type.
+ int nal_unit_type = *inscan & 0x1F;
+ nal_unit_count++;
+
+ // Insert the config after the AUD if an AUD is the first NAL unit or
+ // before all NAL units if the first one isn't an AUD.
+ if (avc_config &&
+ (nal_unit_type != H264NALU::kAUD || nal_unit_count > 1)) {
+ uint32 output_bytes_used = outscan - output;
+
+ DCHECK_GE(*output_size, output_bytes_used);
+
+ uint32 config_size = *output_size - output_bytes_used;
+ if (!ConvertAVCDecoderConfigToByteStream(*avc_config,
+ outscan,
+ &config_size)) {
+ DVLOG(1) << "Failed to insert parameter sets.";
+ *output_size = 0;
+ return false; // Failed to convert the buffer.
+ }
+ outscan += config_size;
+ avc_config = NULL;
+ }
uint32 start_code_len;
first_nal_unit_in_access_unit_ ?
start_code_len = sizeof(kStartCodePrefix) + 1 :
@@ -279,10 +226,6 @@ bool H264ToAnnexBBitstreamConverter::ConvertNalUnitStreamToByteStream(
return false; // Error: too small output buffer
}
- // Five least significant bits of first NAL unit byte signify
- // nal_unit_type.
- int nal_unit_type = *inscan & 0x1F;
-
// Check if this packet marks access unit boundary by checking the
// packet type.
if (IsAccessUnitBoundaryNal(nal_unit_type)) {
@@ -313,4 +256,30 @@ bool H264ToAnnexBBitstreamConverter::ConvertNalUnitStreamToByteStream(
return true;
}
+bool H264ToAnnexBBitstreamConverter::WriteParamSet(
+ const std::vector<uint8>& param_set,
+ uint8** out,
+ uint32* out_size) const {
+ uint32 bytes_left = *out_size;
+ if (bytes_left < kParamSetStartCodeSize ||
+ bytes_left - kParamSetStartCodeSize < param_set.size()) {
+ return false;
+ }
+
+ uint8* start = *out;
+ uint8* buf = start;
+
+ // Write the 4 byte Annex B start code.
+ *buf++ = 0; // zero byte
+ memcpy(buf, kStartCodePrefix, sizeof(kStartCodePrefix));
+ buf += sizeof(kStartCodePrefix);
+
+ memcpy(buf, &param_set[0], param_set.size());
+ buf += param_set.size();
+
+ *out = buf;
+ *out_size -= buf - start;
+ return true;
+}
+
} // namespace media
diff --git a/chromium/media/filters/h264_to_annex_b_bitstream_converter.h b/chromium/media/filters/h264_to_annex_b_bitstream_converter.h
index afb204af98d..b6a27d0606a 100644
--- a/chromium/media/filters/h264_to_annex_b_bitstream_converter.h
+++ b/chromium/media/filters/h264_to_annex_b_bitstream_converter.h
@@ -5,11 +5,17 @@
#ifndef MEDIA_FILTERS_H264_TO_ANNEX_B_BITSTREAM_CONVERTER_H_
#define MEDIA_FILTERS_H264_TO_ANNEX_B_BITSTREAM_CONVERTER_H_
+#include <vector>
+
#include "base/basictypes.h"
#include "media/base/media_export.h"
namespace media {
+namespace mp4 {
+struct AVCDecoderConfigurationRecord;
+}
+
// H264ToAnnexBBitstreamConverter is a class to convert H.264 bitstream from
// MP4 format (as specified in ISO/IEC 14496-15) into H.264 bytestream
// (as specified in ISO/IEC 14496-10 Annex B).
@@ -27,88 +33,112 @@ class MEDIA_EXPORT H264ToAnnexBBitstreamConverter {
// Pointer to buffer containing AVCDecoderConfigurationRecord.
// configuration_record_size
// Size of the buffer in bytes.
+ // avc_config
+ // Pointer to place the parsed AVCDecoderConfigurationRecord data into.
//
// Returns
- // Required buffer size for AVCDecoderConfigurationRecord when converted
- // to bytestream format, or 0 if could not determine the configuration
- // from the input buffer.
- uint32 ParseConfigurationAndCalculateSize(const uint8* configuration_record,
- uint32 configuration_record_size);
+ // Returns true if |configuration_record| was successfully parsed. False
+ // is returned if a parsing error occurred.
+ // |avc_config| only contains valid data when true is returned.
+ bool ParseConfiguration(
+ const uint8* configuration_record,
+ int configuration_record_size,
+ mp4::AVCDecoderConfigurationRecord* avc_config);
+
+ // Returns the buffer size needed to store the parameter sets in |avc_config|
+ // in Annex B form.
+ uint32 GetConfigSize(
+ const mp4::AVCDecoderConfigurationRecord& avc_config) const;
// Calculates needed buffer size for the bitstream converted into bytestream.
// Lightweight implementation that does not do the actual conversion.
//
// Parameters
- // configuration_record
- // Pointer to buffer containing AVCDecoderConfigurationRecord.
- // configuration_record_size
+ // input
+ // Pointer to buffer containing NAL units in MP4 format.
+ // input_size
// Size of the buffer in bytes.
+ // avc_config
+ // The AVCDecoderConfigurationRecord that contains the parameter sets that
+ // will be inserted into the output. NULL if no parameter sets need to be
+ // inserted.
//
// Returns
- // Required buffer size for the input NAL unit buffer when converted
- // to bytestream format, or 0 if could not determine the configuration
- // from the input buffer.
- uint32 CalculateNeededOutputBufferSize(const uint8* input,
- uint32 input_size) const;
+ // Required buffer size for the output NAL unit buffer when converted
+ // to bytestream format, or 0 if could not determine the size of
+ // the output buffer from the data in |input| and |avc_config|.
+ uint32 CalculateNeededOutputBufferSize(
+ const uint8* input,
+ uint32 input_size,
+ const mp4::AVCDecoderConfigurationRecord* avc_config) const;
// ConvertAVCDecoderConfigToByteStream converts the
// AVCDecoderConfigurationRecord from the MP4 headers to bytestream format.
// Client is responsible for making sure the output buffer is large enough
// to hold the output data. Client can precalculate the needed output buffer
- // size by using ParseConfigurationAndCalculateSize.
- //
- // In case of failed conversion object H264BitstreamConverter may have written
- // some bytes to buffer pointed by pinput but user should ignore those bytes.
- // None of the outputs should be considered valid.
+ // size by using GetConfigSize().
//
// Parameters
- // pinput
- // Pointer to buffer containing AVCDecoderConfigurationRecord.
- // input_size
- // Size of the buffer in bytes.
- // poutput
+ // avc_config
+ // The AVCDecoderConfigurationRecord that contains the parameter sets that
+ // will be written to |output|.
+ // output
// Pointer to buffer where the output should be written to.
- // poutput_size (i/o)
+ // output_size (i/o)
// Pointer to the size of the output buffer. Will contain the number of
// bytes written to output after successful call.
//
// Returns
- // true if successful conversion
- // false if conversion not successful (poutput_size will hold the amount
+ // true if successful conversion|
+ // false if conversion not successful (|output_size| will hold the amount
// of converted data)
- bool ConvertAVCDecoderConfigToByteStream(const uint8* input,
- uint32 input_size,
- uint8* output,
- uint32* output_size);
+ bool ConvertAVCDecoderConfigToByteStream(
+ const mp4::AVCDecoderConfigurationRecord& avc_config,
+ uint8* output,
+ uint32* output_size);
// ConvertNalUnitStreamToByteStream converts the NAL unit from MP4 format
// to bytestream format. Client is responsible for making sure the output
// buffer is large enough to hold the output data. Client can precalculate the
// needed output buffer size by using CalculateNeededOutputBufferSize.
//
- // In case of failed conversion object H264BitstreamConverter may have written
- // some bytes to buffer pointed by pinput but user should ignore those bytes.
- // None of the outputs should be considered valid.
- //
// Parameters
- // pinput
- // Pointer to buffer containing AVCDecoderConfigurationRecord.
+ // input
+ // Pointer to buffer containing NAL units in MP4 format.
// input_size
// Size of the buffer in bytes.
- // poutput
+ // avc_config
+ // The AVCDecoderConfigurationRecord that contains the parameter sets to
+ // insert into the output. NULL if no parameter sets need to be inserted.
+ // output
// Pointer to buffer where the output should be written to.
- // poutput_size (i/o)
+ // output_size (i/o)
// Pointer to the size of the output buffer. Will contain the number of
// bytes written to output after successful call.
//
// Returns
// true if successful conversion
- // false if conversion not successful (poutput_size will hold the amount
+ // false if conversion not successful (output_size will hold the amount
// of converted data)
- bool ConvertNalUnitStreamToByteStream(const uint8* input, uint32 input_size,
- uint8* output, uint32* output_size);
+ bool ConvertNalUnitStreamToByteStream(
+ const uint8* input,
+ uint32 input_size,
+ const mp4::AVCDecoderConfigurationRecord* avc_config,
+ uint8* output,
+ uint32* output_size);
private:
+ // Writes Annex B start code and |param_set| to |*out|.
+ // |*out| - Is the memory location to write the parameter set.
+ // |*out_size| - Number of bytes available for the parameter set.
+ // Returns true if the start code and param set were successfully
+ // written. On a successful write, |*out| is updated to point to the first
+ // byte after the data that was written. |*out_size| is updated to reflect
+ // the new number of bytes left in |*out|.
+ bool WriteParamSet(const std::vector<uint8>& param_set,
+ uint8** out,
+ uint32* out_size) const;
+
// Flag for indicating whether global parameter sets have been processed.
bool configuration_processed_;
// Flag for indicating whether next NAL unit starts new access unit.
@@ -122,4 +152,3 @@ class MEDIA_EXPORT H264ToAnnexBBitstreamConverter {
} // namespace media
#endif // MEDIA_FILTERS_H264_TO_ANNEX_B_BITSTREAM_CONVERTER_H_
-
diff --git a/chromium/media/filters/h264_to_annex_b_bitstream_converter_unittest.cc b/chromium/media/filters/h264_to_annex_b_bitstream_converter_unittest.cc
index a921e6ed6a2..46f3d7b01a0 100644
--- a/chromium/media/filters/h264_to_annex_b_bitstream_converter_unittest.cc
+++ b/chromium/media/filters/h264_to_annex_b_bitstream_converter_unittest.cc
@@ -4,6 +4,7 @@
#include "base/memory/scoped_ptr.h"
#include "media/filters/h264_to_annex_b_bitstream_converter.h"
+#include "media/formats/mp4/box_definitions.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
@@ -14,6 +15,9 @@ class H264ToAnnexBBitstreamConverterTest : public testing::Test {
virtual ~H264ToAnnexBBitstreamConverterTest() {}
+ protected:
+ mp4::AVCDecoderConfigurationRecord avc_config_;
+
private:
DISALLOW_COPY_AND_ASSIGN(H264ToAnnexBBitstreamConverterTest);
};
@@ -269,65 +273,38 @@ TEST_F(H264ToAnnexBBitstreamConverterTest, Success) {
H264ToAnnexBBitstreamConverter converter;
// Parse the headers.
- uint32 config_size = converter.ParseConfigurationAndCalculateSize(
+ EXPECT_TRUE(converter.ParseConfiguration(
kHeaderDataOkWithFieldLen4,
- sizeof(kHeaderDataOkWithFieldLen4));
+ sizeof(kHeaderDataOkWithFieldLen4),
+ &avc_config_));
+ uint32 config_size = converter.GetConfigSize(avc_config_);
EXPECT_GT(config_size, 0U);
// Go on with converting the headers.
output.reset(new uint8[config_size]);
EXPECT_TRUE(output.get() != NULL);
EXPECT_TRUE(converter.ConvertAVCDecoderConfigToByteStream(
- kHeaderDataOkWithFieldLen4,
- sizeof(kHeaderDataOkWithFieldLen4),
- output.get(),
- &config_size));
+ avc_config_,
+ output.get(),
+ &config_size));
// Calculate buffer size for actual NAL unit.
uint32 output_size = converter.CalculateNeededOutputBufferSize(
kPacketDataOkWithFieldLen4,
- sizeof(kPacketDataOkWithFieldLen4));
+ sizeof(kPacketDataOkWithFieldLen4),
+ &avc_config_);
EXPECT_GT(output_size, 0U);
- output_size += config_size;
output.reset(new uint8[output_size]);
EXPECT_TRUE(output.get() != NULL);
- uint32 output_size_left_for_nal_unit = output_size - config_size;
+ uint32 output_size_left_for_nal_unit = output_size;
// Do the conversion for actual NAL unit.
EXPECT_TRUE(converter.ConvertNalUnitStreamToByteStream(
kPacketDataOkWithFieldLen4,
sizeof(kPacketDataOkWithFieldLen4),
- output.get() + config_size,
+ &avc_config_,
+ output.get(),
&output_size_left_for_nal_unit));
-
- // Classes allocated in stack are automatically destroyed.
-}
-
-TEST_F(H264ToAnnexBBitstreamConverterTest, FailureNullData) {
- // Initialize converter.
- H264ToAnnexBBitstreamConverter converter;
-
- // Simulate situation where there is no header data.
- uint32 config_size = converter.ParseConfigurationAndCalculateSize(NULL, 0);
- EXPECT_EQ(config_size, 0U);
-
- // Go on with converting the headers with NULL parameters.
- EXPECT_FALSE(converter.ConvertAVCDecoderConfigToByteStream(NULL,
- 0,
- NULL,
- &config_size));
-
- // Simulate NULL parameters for buffer calculation.
- uint32 output_size = converter.CalculateNeededOutputBufferSize(NULL, 0);
- EXPECT_EQ(output_size, 0U);
-
- // Do the conversion for actual NAL unit with NULL paramaters.
- EXPECT_FALSE(converter.ConvertNalUnitStreamToByteStream(NULL,
- 0,
- NULL,
- &output_size));
-
- // Classes allocated in stack are automatically destroyed.
}
TEST_F(H264ToAnnexBBitstreamConverterTest, FailureHeaderBufferOverflow) {
@@ -343,12 +320,10 @@ TEST_F(H264ToAnnexBBitstreamConverterTest, FailureHeaderBufferOverflow) {
corrupted_header[5] = corrupted_header[5] | 0xA;
// Parse the headers
- uint32 config_size = converter.ParseConfigurationAndCalculateSize(
+ EXPECT_FALSE(converter.ParseConfiguration(
corrupted_header,
- sizeof(corrupted_header));
- EXPECT_EQ(config_size, 0U); // Failure as a result of buffer overflows.
-
- // Classes allocated in stack are automatically destroyed.
+ sizeof(corrupted_header),
+ &avc_config_));
}
TEST_F(H264ToAnnexBBitstreamConverterTest, FailureNalUnitBreakage) {
@@ -357,19 +332,20 @@ TEST_F(H264ToAnnexBBitstreamConverterTest, FailureNalUnitBreakage) {
H264ToAnnexBBitstreamConverter converter;
// Parse the headers.
- uint32 config_size = converter.ParseConfigurationAndCalculateSize(
+ EXPECT_TRUE(converter.ParseConfiguration(
kHeaderDataOkWithFieldLen4,
- sizeof(kHeaderDataOkWithFieldLen4));
+ sizeof(kHeaderDataOkWithFieldLen4),
+ &avc_config_));
+ uint32 config_size = converter.GetConfigSize(avc_config_);
EXPECT_GT(config_size, 0U);
// Go on with converting the headers.
output.reset(new uint8[config_size]);
EXPECT_TRUE(output.get() != NULL);
EXPECT_TRUE(converter.ConvertAVCDecoderConfigToByteStream(
- kHeaderDataOkWithFieldLen4,
- sizeof(kHeaderDataOkWithFieldLen4),
- output.get(),
- &config_size));
+ avc_config_,
+ output.get(),
+ &config_size));
// Simulate NAL unit broken in middle by writing only some of the data.
uint8 corrupted_nal_unit[sizeof(kPacketDataOkWithFieldLen4) - 100];
@@ -380,24 +356,24 @@ TEST_F(H264ToAnnexBBitstreamConverterTest, FailureNalUnitBreakage) {
// incomplete input buffer.
uint32 output_size = converter.CalculateNeededOutputBufferSize(
corrupted_nal_unit,
- sizeof(corrupted_nal_unit));
+ sizeof(corrupted_nal_unit),
+ &avc_config_);
EXPECT_EQ(output_size, 0U);
// Ignore the error and try to go on with conversion simulating wrong usage.
- output_size = sizeof(kPacketDataOkWithFieldLen4) + config_size;
+ output_size = sizeof(kPacketDataOkWithFieldLen4);
output.reset(new uint8[output_size]);
EXPECT_TRUE(output.get() != NULL);
- uint32 output_size_left_for_nal_unit = output_size - config_size;
+ uint32 output_size_left_for_nal_unit = output_size;
// Do the conversion for actual NAL unit, expecting failure.
EXPECT_FALSE(converter.ConvertNalUnitStreamToByteStream(
corrupted_nal_unit,
sizeof(corrupted_nal_unit),
- output.get() + config_size,
+ &avc_config_,
+ output.get(),
&output_size_left_for_nal_unit));
EXPECT_EQ(output_size_left_for_nal_unit, 0U);
-
- // Classes allocated in stack are automatically destroyed.
}
TEST_F(H264ToAnnexBBitstreamConverterTest, FailureTooSmallOutputBuffer) {
@@ -406,9 +382,11 @@ TEST_F(H264ToAnnexBBitstreamConverterTest, FailureTooSmallOutputBuffer) {
H264ToAnnexBBitstreamConverter converter;
// Parse the headers.
- uint32 config_size = converter.ParseConfigurationAndCalculateSize(
+ EXPECT_TRUE(converter.ParseConfiguration(
kHeaderDataOkWithFieldLen4,
- sizeof(kHeaderDataOkWithFieldLen4));
+ sizeof(kHeaderDataOkWithFieldLen4),
+ &avc_config_));
+ uint32 config_size = converter.GetConfigSize(avc_config_);
EXPECT_GT(config_size, 0U);
uint32 real_config_size = config_size;
@@ -417,10 +395,9 @@ TEST_F(H264ToAnnexBBitstreamConverterTest, FailureTooSmallOutputBuffer) {
output.reset(new uint8[config_size]);
EXPECT_TRUE(output.get() != NULL);
EXPECT_FALSE(converter.ConvertAVCDecoderConfigToByteStream(
- kHeaderDataOkWithFieldLen4,
- sizeof(kHeaderDataOkWithFieldLen4),
- output.get(),
- &config_size));
+ avc_config_,
+ output.get(),
+ &config_size));
EXPECT_EQ(config_size, 0U);
// Still too small (but only 1 byte short).
@@ -428,10 +405,9 @@ TEST_F(H264ToAnnexBBitstreamConverterTest, FailureTooSmallOutputBuffer) {
output.reset(new uint8[config_size]);
EXPECT_TRUE(output.get() != NULL);
EXPECT_FALSE(converter.ConvertAVCDecoderConfigToByteStream(
- kHeaderDataOkWithFieldLen4,
- sizeof(kHeaderDataOkWithFieldLen4),
- output.get(),
- &config_size));
+ avc_config_,
+ output.get(),
+ &config_size));
EXPECT_EQ(config_size, 0U);
// Finally, retry with valid buffer.
@@ -439,32 +415,30 @@ TEST_F(H264ToAnnexBBitstreamConverterTest, FailureTooSmallOutputBuffer) {
output.reset(new uint8[config_size]);
EXPECT_TRUE(output.get() != NULL);
EXPECT_TRUE(converter.ConvertAVCDecoderConfigToByteStream(
- kHeaderDataOkWithFieldLen4,
- sizeof(kHeaderDataOkWithFieldLen4),
- output.get(),
- &config_size));
+ avc_config_,
+ output.get(),
+ &config_size));
// Calculate buffer size for actual NAL unit.
uint32 output_size = converter.CalculateNeededOutputBufferSize(
kPacketDataOkWithFieldLen4,
- sizeof(kPacketDataOkWithFieldLen4));
+ sizeof(kPacketDataOkWithFieldLen4),
+ &avc_config_);
EXPECT_GT(output_size, 0U);
- output_size += config_size;
// Simulate too small output buffer.
output_size -= 1;
output.reset(new uint8[output_size]);
EXPECT_TRUE(output.get() != NULL);
- uint32 output_size_left_for_nal_unit = output_size - config_size;
+ uint32 output_size_left_for_nal_unit = output_size;
// Do the conversion for actual NAL unit (expect failure).
EXPECT_FALSE(converter.ConvertNalUnitStreamToByteStream(
kPacketDataOkWithFieldLen4,
sizeof(kPacketDataOkWithFieldLen4),
- output.get() + config_size,
+ &avc_config_,
+ output.get(),
&output_size_left_for_nal_unit));
EXPECT_EQ(output_size_left_for_nal_unit, 0U);
-
- // Classes allocated in stack are automatically destroyed.
}
// Generated from crash dump in http://crbug.com/234449 using xxd -i [file].
@@ -487,23 +461,25 @@ TEST_F(H264ToAnnexBBitstreamConverterTest, CorruptedPacket) {
H264ToAnnexBBitstreamConverter converter;
// Parse the headers.
- uint32 config_size = converter.ParseConfigurationAndCalculateSize(
+ EXPECT_TRUE(converter.ParseConfiguration(
kCorruptedPacketConfiguration,
- sizeof(kCorruptedPacketConfiguration));
+ sizeof(kCorruptedPacketConfiguration),
+ &avc_config_));
+ uint32 config_size = converter.GetConfigSize(avc_config_);
EXPECT_GT(config_size, 0U);
// Go on with converting the headers.
output.reset(new uint8[config_size]);
EXPECT_TRUE(converter.ConvertAVCDecoderConfigToByteStream(
- kCorruptedPacketConfiguration,
- sizeof(kCorruptedPacketConfiguration),
+ avc_config_,
output.get(),
&config_size));
// Expect an error here.
uint32 output_size = converter.CalculateNeededOutputBufferSize(
kCorruptedPacketData,
- sizeof(kCorruptedPacketData));
+ sizeof(kCorruptedPacketData),
+ &avc_config_);
EXPECT_EQ(output_size, 0U);
}
diff --git a/chromium/media/filters/in_memory_url_protocol.cc b/chromium/media/filters/in_memory_url_protocol.cc
index 85fa290e501..da8a7dd8da9 100644
--- a/chromium/media/filters/in_memory_url_protocol.cc
+++ b/chromium/media/filters/in_memory_url_protocol.cc
@@ -4,6 +4,8 @@
#include "media/filters/in_memory_url_protocol.h"
+#include "media/ffmpeg/ffmpeg_common.h"
+
namespace media {
InMemoryUrlProtocol::InMemoryUrlProtocol(const uint8* data, int64 size,
@@ -17,12 +19,18 @@ InMemoryUrlProtocol::InMemoryUrlProtocol(const uint8* data, int64 size,
InMemoryUrlProtocol::~InMemoryUrlProtocol() {}
int InMemoryUrlProtocol::Read(int size, uint8* data) {
- int available_bytes = size_ - position_;
+ if (size < 0)
+ return AVERROR(EIO);
+
+ int64 available_bytes = size_ - position_;
if (size > available_bytes)
size = available_bytes;
- memcpy(data, data_ + position_, size);
- position_ += size;
+ if (size > 0) {
+ memcpy(data, data_ + position_, size);
+ position_ += size;
+ }
+
return size;
}
diff --git a/chromium/media/filters/in_memory_url_protocol_unittest.cc b/chromium/media/filters/in_memory_url_protocol_unittest.cc
new file mode 100644
index 00000000000..7b615cdd041
--- /dev/null
+++ b/chromium/media/filters/in_memory_url_protocol_unittest.cc
@@ -0,0 +1,51 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/ffmpeg/ffmpeg_common.h"
+#include "media/filters/in_memory_url_protocol.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+static const uint8 kData[] = { 0x01, 0x02, 0x03, 0x04 };
+
+TEST(InMemoryUrlProtocolTest, ReadFromLargeBuffer) {
+ InMemoryUrlProtocol protocol(kData, std::numeric_limits<int64>::max(), false);
+
+ uint8 out[sizeof(kData)];
+ EXPECT_EQ(4, protocol.Read(sizeof(out), out));
+ EXPECT_EQ(0, memcmp(out, kData, sizeof(out)));
+}
+
+TEST(InMemoryUrlProtocolTest, ReadWithNegativeSize) {
+ InMemoryUrlProtocol protocol(kData, sizeof(kData), false);
+
+ uint8 out[sizeof(kData)];
+ EXPECT_EQ(AVERROR(EIO), protocol.Read(-2, out));
+}
+
+TEST(InMemoryUrlProtocolTest, ReadWithZeroSize) {
+ InMemoryUrlProtocol protocol(kData, sizeof(kData), false);
+
+ uint8 out;
+ EXPECT_EQ(0, protocol.Read(0, &out));
+}
+
+TEST(InMemoryUrlProtocolTest, SetPosition) {
+ InMemoryUrlProtocol protocol(kData, sizeof(kData), false);
+
+ EXPECT_FALSE(protocol.SetPosition(-1));
+ EXPECT_FALSE(protocol.SetPosition(sizeof(kData) + 1));
+
+ uint8 out;
+ EXPECT_TRUE(protocol.SetPosition(sizeof(kData)));
+ EXPECT_EQ(0, protocol.Read(1, &out));
+
+ int i = sizeof(kData) / 2;
+ EXPECT_TRUE(protocol.SetPosition(i));
+ EXPECT_EQ(1, protocol.Read(1, &out));
+ EXPECT_EQ(kData[i], out);
+}
+
+} // namespace media
diff --git a/chromium/media/filters/mock_gpu_video_accelerator_factories.cc b/chromium/media/filters/mock_gpu_video_accelerator_factories.cc
index f4f39973600..eeb3ba6dce0 100644
--- a/chromium/media/filters/mock_gpu_video_accelerator_factories.cc
+++ b/chromium/media/filters/mock_gpu_video_accelerator_factories.cc
@@ -11,18 +11,13 @@ MockGpuVideoAcceleratorFactories::MockGpuVideoAcceleratorFactories() {}
MockGpuVideoAcceleratorFactories::~MockGpuVideoAcceleratorFactories() {}
scoped_ptr<VideoDecodeAccelerator>
-MockGpuVideoAcceleratorFactories::CreateVideoDecodeAccelerator(
- VideoCodecProfile profile,
- VideoDecodeAccelerator::Client* client) {
- return scoped_ptr<VideoDecodeAccelerator>(
- DoCreateVideoDecodeAccelerator(profile, client));
+MockGpuVideoAcceleratorFactories::CreateVideoDecodeAccelerator() {
+ return scoped_ptr<VideoDecodeAccelerator>(DoCreateVideoDecodeAccelerator());
}
scoped_ptr<VideoEncodeAccelerator>
-MockGpuVideoAcceleratorFactories::CreateVideoEncodeAccelerator(
- VideoEncodeAccelerator::Client* client) {
- return scoped_ptr<VideoEncodeAccelerator>(
- DoCreateVideoEncodeAccelerator(client));
+MockGpuVideoAcceleratorFactories::CreateVideoEncodeAccelerator() {
+ return scoped_ptr<VideoEncodeAccelerator>(DoCreateVideoEncodeAccelerator());
}
} // namespace media
diff --git a/chromium/media/filters/mock_gpu_video_accelerator_factories.h b/chromium/media/filters/mock_gpu_video_accelerator_factories.h
index 1dfac3da8b4..fde3b08fea3 100644
--- a/chromium/media/filters/mock_gpu_video_accelerator_factories.h
+++ b/chromium/media/filters/mock_gpu_video_accelerator_factories.h
@@ -6,7 +6,7 @@
#define MEDIA_FILTERS_MOCK_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
#include "base/memory/scoped_ptr.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
#include "media/filters/gpu_video_accelerator_factories.h"
#include "media/video/video_decode_accelerator.h"
#include "media/video/video_encode_accelerator.h"
@@ -28,35 +28,29 @@ class MockGpuVideoAcceleratorFactories : public GpuVideoAcceleratorFactories {
// CreateVideo{Decode,Encode}Accelerator returns scoped_ptr, which the mocking
// framework does not want. Trampoline them.
- MOCK_METHOD2(DoCreateVideoDecodeAccelerator,
- VideoDecodeAccelerator*(VideoCodecProfile,
- VideoDecodeAccelerator::Client*));
- MOCK_METHOD1(DoCreateVideoEncodeAccelerator,
- VideoEncodeAccelerator*(VideoEncodeAccelerator::Client*));
+ MOCK_METHOD0(DoCreateVideoDecodeAccelerator, VideoDecodeAccelerator*());
+ MOCK_METHOD0(DoCreateVideoEncodeAccelerator, VideoEncodeAccelerator*());
MOCK_METHOD5(CreateTextures,
- uint32(int32 count,
- const gfx::Size& size,
- std::vector<uint32>* texture_ids,
- std::vector<gpu::Mailbox>* texture_mailboxes,
- uint32 texture_target));
+ bool(int32 count,
+ const gfx::Size& size,
+ std::vector<uint32>* texture_ids,
+ std::vector<gpu::Mailbox>* texture_mailboxes,
+ uint32 texture_target));
MOCK_METHOD1(DeleteTexture, void(uint32 texture_id));
MOCK_METHOD1(WaitSyncPoint, void(uint32 sync_point));
MOCK_METHOD3(ReadPixels,
void(uint32 texture_id,
- const gfx::Size& size,
+ const gfx::Rect& visible_rect,
const SkBitmap& pixels));
MOCK_METHOD1(CreateSharedMemory, base::SharedMemory*(size_t size));
- MOCK_METHOD0(GetMessageLoop, scoped_refptr<base::MessageLoopProxy>());
- MOCK_METHOD0(Abort, void());
- MOCK_METHOD0(IsAborted, bool());
+ MOCK_METHOD0(GetTaskRunner, scoped_refptr<base::SingleThreadTaskRunner>());
- virtual scoped_ptr<VideoDecodeAccelerator> CreateVideoDecodeAccelerator(
- VideoCodecProfile profile,
- VideoDecodeAccelerator::Client* client) OVERRIDE;
+ virtual scoped_ptr<VideoDecodeAccelerator> CreateVideoDecodeAccelerator()
+ OVERRIDE;
- virtual scoped_ptr<VideoEncodeAccelerator> CreateVideoEncodeAccelerator(
- VideoEncodeAccelerator::Client* client) OVERRIDE;
+ virtual scoped_ptr<VideoEncodeAccelerator> CreateVideoEncodeAccelerator()
+ OVERRIDE;
private:
virtual ~MockGpuVideoAcceleratorFactories();
diff --git a/chromium/media/filters/opus_audio_decoder.cc b/chromium/media/filters/opus_audio_decoder.cc
index c1de6df2158..51fee630304 100644
--- a/chromium/media/filters/opus_audio_decoder.cc
+++ b/chromium/media/filters/opus_audio_decoder.cc
@@ -6,19 +6,14 @@
#include <cmath>
-#include "base/bind.h"
-#include "base/callback_helpers.h"
-#include "base/location.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
#include "base/sys_byteorder.h"
#include "media/base/audio_buffer.h"
#include "media/base/audio_decoder_config.h"
-#include "media/base/audio_timestamp_helper.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/audio_discard_helper.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/buffers.h"
#include "media/base/decoder_buffer.h"
-#include "media/base/demuxer.h"
-#include "media/base/pipeline.h"
#include "third_party/opus/src/include/opus.h"
#include "third_party/opus/src/include/opus_multistream.h"
@@ -31,11 +26,6 @@ static uint16 ReadLE16(const uint8* data, size_t data_size, int read_offset) {
return base::ByteSwapToLE16(value);
}
-static int TimeDeltaToAudioFrames(base::TimeDelta time_delta,
- int frame_rate) {
- return std::ceil(time_delta.InSecondsF() * frame_rate);
-}
-
// The Opus specification is part of IETF RFC 6716:
// http://tools.ietf.org/html/rfc6716
@@ -183,7 +173,8 @@ struct OpusExtraData {
channel_mapping(0),
num_streams(0),
num_coupled(0),
- gain_db(0) {
+ gain_db(0),
+ stream_map() {
memcpy(stream_map,
kDefaultOpusChannelLayout,
kMaxChannelsWithDefaultLayout);
@@ -253,234 +244,145 @@ static bool ParseOpusExtraData(const uint8* data, int data_size,
}
OpusAudioDecoder::OpusAudioDecoder(
- const scoped_refptr<base::MessageLoopProxy>& message_loop)
- : message_loop_(message_loop),
- weak_factory_(this),
- demuxer_stream_(NULL),
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner)
+ : task_runner_(task_runner),
opus_decoder_(NULL),
- channel_layout_(CHANNEL_LAYOUT_NONE),
- samples_per_second_(0),
- sample_format_(kSampleFormatF32),
- bits_per_channel_(SampleFormatToBytesPerChannel(sample_format_) * 8),
- last_input_timestamp_(kNoTimestamp()),
- frames_to_discard_(0),
- frame_delay_at_start_(0),
- start_input_timestamp_(kNoTimestamp()) {
-}
+ start_input_timestamp_(kNoTimestamp()) {}
-void OpusAudioDecoder::Initialize(
- DemuxerStream* stream,
- const PipelineStatusCB& status_cb,
- const StatisticsCB& statistics_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+void OpusAudioDecoder::Initialize(const AudioDecoderConfig& config,
+ const PipelineStatusCB& status_cb,
+ const OutputCB& output_cb) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
PipelineStatusCB initialize_cb = BindToCurrentLoop(status_cb);
- if (demuxer_stream_) {
- // TODO(scherkus): initialization currently happens more than once in
- // PipelineIntegrationTest.BasicPlayback.
- DLOG(ERROR) << "Initialize has already been called.";
- CHECK(false);
- }
-
- weak_this_ = weak_factory_.GetWeakPtr();
- demuxer_stream_ = stream;
+ config_ = config;
+ output_cb_ = BindToCurrentLoop(output_cb);
if (!ConfigureDecoder()) {
initialize_cb.Run(DECODER_ERROR_NOT_SUPPORTED);
return;
}
- statistics_cb_ = statistics_cb;
initialize_cb.Run(PIPELINE_OK);
}
-void OpusAudioDecoder::Read(const ReadCB& read_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(!read_cb.is_null());
- CHECK(read_cb_.is_null()) << "Overlapping decodes are not supported.";
- read_cb_ = BindToCurrentLoop(read_cb);
+void OpusAudioDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
+ const DecodeCB& decode_cb) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(!decode_cb.is_null());
- ReadFromDemuxerStream();
+ DecodeBuffer(buffer, BindToCurrentLoop(decode_cb));
}
-int OpusAudioDecoder::bits_per_channel() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- return bits_per_channel_;
-}
+void OpusAudioDecoder::Reset(const base::Closure& closure) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
-ChannelLayout OpusAudioDecoder::channel_layout() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- return channel_layout_;
+ opus_multistream_decoder_ctl(opus_decoder_, OPUS_RESET_STATE);
+ ResetTimestampState();
+ task_runner_->PostTask(FROM_HERE, closure);
}
-int OpusAudioDecoder::samples_per_second() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- return samples_per_second_;
-}
+void OpusAudioDecoder::Stop() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
-void OpusAudioDecoder::Reset(const base::Closure& closure) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- base::Closure reset_cb = BindToCurrentLoop(closure);
+ if (!opus_decoder_)
+ return;
opus_multistream_decoder_ctl(opus_decoder_, OPUS_RESET_STATE);
ResetTimestampState();
- reset_cb.Run();
-}
-
-OpusAudioDecoder::~OpusAudioDecoder() {
- // TODO(scherkus): should we require Stop() to be called? this might end up
- // getting called on a random thread due to refcounting.
CloseDecoder();
}
-void OpusAudioDecoder::ReadFromDemuxerStream() {
- DCHECK(!read_cb_.is_null());
- demuxer_stream_->Read(base::Bind(&OpusAudioDecoder::BufferReady, weak_this_));
-}
-
-void OpusAudioDecoder::BufferReady(
- DemuxerStream::Status status,
- const scoped_refptr<DecoderBuffer>& input) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(!read_cb_.is_null());
- DCHECK_EQ(status != DemuxerStream::kOk, !input.get()) << status;
+OpusAudioDecoder::~OpusAudioDecoder() {}
- if (status == DemuxerStream::kAborted) {
- DCHECK(!input.get());
- base::ResetAndReturn(&read_cb_).Run(kAborted, NULL);
- return;
- }
-
- if (status == DemuxerStream::kConfigChanged) {
- DCHECK(!input.get());
- DVLOG(1) << "Config changed.";
-
- if (!ConfigureDecoder()) {
- base::ResetAndReturn(&read_cb_).Run(kDecodeError, NULL);
- return;
- }
+void OpusAudioDecoder::DecodeBuffer(
+ const scoped_refptr<DecoderBuffer>& input,
+ const DecodeCB& decode_cb) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(!decode_cb.is_null());
- ResetTimestampState();
- ReadFromDemuxerStream();
- return;
- }
-
- DCHECK_EQ(status, DemuxerStream::kOk);
DCHECK(input.get());
// Libopus does not buffer output. Decoding is complete when an end of stream
// input buffer is received.
if (input->end_of_stream()) {
- base::ResetAndReturn(&read_cb_).Run(kOk, AudioBuffer::CreateEOSBuffer());
+ decode_cb.Run(kOk);
return;
}
// Make sure we are notified if http://crbug.com/49709 returns. Issue also
// occurs with some damaged files.
- if (input->timestamp() == kNoTimestamp() &&
- output_timestamp_helper_->base_timestamp() == kNoTimestamp()) {
+ if (input->timestamp() == kNoTimestamp()) {
DLOG(ERROR) << "Received a buffer without timestamps!";
- base::ResetAndReturn(&read_cb_).Run(kDecodeError, NULL);
- return;
- }
-
- if (last_input_timestamp_ != kNoTimestamp() &&
- input->timestamp() != kNoTimestamp() &&
- input->timestamp() < last_input_timestamp_) {
- base::TimeDelta diff = input->timestamp() - last_input_timestamp_;
- DLOG(ERROR) << "Input timestamps are not monotonically increasing! "
- << " ts " << input->timestamp().InMicroseconds() << " us"
- << " diff " << diff.InMicroseconds() << " us";
- base::ResetAndReturn(&read_cb_).Run(kDecodeError, NULL);
+ decode_cb.Run(kDecodeError);
return;
}
// Apply the necessary codec delay.
if (start_input_timestamp_ == kNoTimestamp())
start_input_timestamp_ = input->timestamp();
- if (last_input_timestamp_ == kNoTimestamp() &&
+ if (!discard_helper_->initialized() &&
input->timestamp() == start_input_timestamp_) {
- frames_to_discard_ = frame_delay_at_start_;
+ discard_helper_->Reset(config_.codec_delay());
}
- last_input_timestamp_ = input->timestamp();
-
scoped_refptr<AudioBuffer> output_buffer;
if (!Decode(input, &output_buffer)) {
- base::ResetAndReturn(&read_cb_).Run(kDecodeError, NULL);
+ decode_cb.Run(kDecodeError);
return;
}
- if (output_buffer.get()) {
- // Execute callback to return the decoded audio.
- base::ResetAndReturn(&read_cb_).Run(kOk, output_buffer);
- } else {
- // We exhausted the input data, but it wasn't enough for a frame. Ask for
- // more data in order to fulfill this read.
- ReadFromDemuxerStream();
+ if (output_buffer) {
+ output_cb_.Run(output_buffer);
}
+
+ decode_cb.Run(kOk);
}
bool OpusAudioDecoder::ConfigureDecoder() {
- const AudioDecoderConfig& config = demuxer_stream_->audio_decoder_config();
-
- if (config.codec() != kCodecOpus) {
+ if (config_.codec() != kCodecOpus) {
DVLOG(1) << "Codec must be kCodecOpus.";
return false;
}
const int channel_count =
- ChannelLayoutToChannelCount(config.channel_layout());
- if (!config.IsValidConfig() || channel_count > kMaxVorbisChannels) {
+ ChannelLayoutToChannelCount(config_.channel_layout());
+ if (!config_.IsValidConfig() || channel_count > kMaxVorbisChannels) {
DLOG(ERROR) << "Invalid or unsupported audio stream -"
- << " codec: " << config.codec()
+ << " codec: " << config_.codec()
<< " channel count: " << channel_count
- << " channel layout: " << config.channel_layout()
- << " bits per channel: " << config.bits_per_channel()
- << " samples per second: " << config.samples_per_second();
+ << " channel layout: " << config_.channel_layout()
+ << " bits per channel: " << config_.bits_per_channel()
+ << " samples per second: " << config_.samples_per_second();
return false;
}
- if (config.is_encrypted()) {
+ if (config_.is_encrypted()) {
DLOG(ERROR) << "Encrypted audio stream not supported.";
return false;
}
- if (opus_decoder_ &&
- (channel_layout_ != config.channel_layout() ||
- samples_per_second_ != config.samples_per_second())) {
- DLOG(ERROR) << "Unsupported config change -"
- << ", channel_layout: " << channel_layout_
- << " -> " << config.channel_layout()
- << ", sample_rate: " << samples_per_second_
- << " -> " << config.samples_per_second();
- return false;
- }
-
// Clean up existing decoder if necessary.
CloseDecoder();
// Parse the Opus Extra Data.
OpusExtraData opus_extra_data;
- if (!ParseOpusExtraData(config.extra_data(), config.extra_data_size(),
- config,
+ if (!ParseOpusExtraData(config_.extra_data(), config_.extra_data_size(),
+ config_,
&opus_extra_data))
return false;
- // Convert from seconds to samples.
- timestamp_offset_ = config.codec_delay();
- frame_delay_at_start_ = TimeDeltaToAudioFrames(config.codec_delay(),
- config.samples_per_second());
- if (timestamp_offset_ <= base::TimeDelta() || frame_delay_at_start_ < 0) {
+ if (config_.codec_delay() < 0) {
DLOG(ERROR) << "Invalid file. Incorrect value for codec delay: "
- << config.codec_delay().InMicroseconds();
+ << config_.codec_delay();
return false;
}
- if (frame_delay_at_start_ != opus_extra_data.skip_samples) {
+ if (config_.codec_delay() != opus_extra_data.skip_samples) {
DLOG(ERROR) << "Invalid file. Codec Delay in container does not match the "
- << "value in Opus Extra Data.";
+ << "value in Opus Extra Data. " << config_.codec_delay()
+ << " vs " << opus_extra_data.skip_samples;
return false;
}
@@ -497,7 +399,7 @@ bool OpusAudioDecoder::ConfigureDecoder() {
// Init Opus.
int status = OPUS_INVALID_STATE;
- opus_decoder_ = opus_multistream_decoder_create(config.samples_per_second(),
+ opus_decoder_ = opus_multistream_decoder_create(config_.samples_per_second(),
channel_count,
opus_extra_data.num_streams,
opus_extra_data.num_coupled,
@@ -517,10 +419,8 @@ bool OpusAudioDecoder::ConfigureDecoder() {
return false;
}
- channel_layout_ = config.channel_layout();
- samples_per_second_ = config.samples_per_second();
- output_timestamp_helper_.reset(
- new AudioTimestampHelper(config.samples_per_second()));
+ discard_helper_.reset(
+ new AudioDiscardHelper(config_.samples_per_second(), 0));
start_input_timestamp_ = kNoTimestamp();
return true;
}
@@ -533,24 +433,23 @@ void OpusAudioDecoder::CloseDecoder() {
}
void OpusAudioDecoder::ResetTimestampState() {
- output_timestamp_helper_->SetBaseTimestamp(kNoTimestamp());
- last_input_timestamp_ = kNoTimestamp();
- frames_to_discard_ = TimeDeltaToAudioFrames(
- demuxer_stream_->audio_decoder_config().seek_preroll(),
- samples_per_second_);
+ discard_helper_->Reset(
+ discard_helper_->TimeDeltaToFrames(config_.seek_preroll()));
}
bool OpusAudioDecoder::Decode(const scoped_refptr<DecoderBuffer>& input,
scoped_refptr<AudioBuffer>* output_buffer) {
// Allocate a buffer for the output samples.
*output_buffer = AudioBuffer::CreateBuffer(
- sample_format_,
- ChannelLayoutToChannelCount(channel_layout_),
+ config_.sample_format(),
+ config_.channel_layout(),
+ ChannelLayoutToChannelCount(config_.channel_layout()),
+ config_.samples_per_second(),
kMaxOpusOutputPacketSizeSamples);
const int buffer_size =
output_buffer->get()->channel_count() *
output_buffer->get()->frame_count() *
- SampleFormatToBytesPerChannel(sample_format_);
+ SampleFormatToBytesPerChannel(config_.sample_format());
float* float_output_buffer = reinterpret_cast<float*>(
output_buffer->get()->channel_data()[0]);
@@ -571,55 +470,14 @@ bool OpusAudioDecoder::Decode(const scoped_refptr<DecoderBuffer>& input,
return false;
}
- if (output_timestamp_helper_->base_timestamp() == kNoTimestamp() &&
- !input->end_of_stream()) {
- DCHECK(input->timestamp() != kNoTimestamp());
- output_timestamp_helper_->SetBaseTimestamp(input->timestamp());
- }
-
// Trim off any extraneous allocation.
DCHECK_LE(frames_decoded, output_buffer->get()->frame_count());
const int trim_frames = output_buffer->get()->frame_count() - frames_decoded;
if (trim_frames > 0)
output_buffer->get()->TrimEnd(trim_frames);
- // Handle frame discard and trimming.
- int frames_to_output = frames_decoded;
- if (frames_decoded > frames_to_discard_) {
- if (frames_to_discard_ > 0) {
- output_buffer->get()->TrimStart(frames_to_discard_);
- frames_to_output -= frames_to_discard_;
- frames_to_discard_ = 0;
- }
- if (input->discard_padding().InMicroseconds() > 0) {
- int discard_padding = TimeDeltaToAudioFrames(input->discard_padding(),
- samples_per_second_);
- if (discard_padding < 0 || discard_padding > frames_to_output) {
- DVLOG(1) << "Invalid file. Incorrect discard padding value.";
- return false;
- }
- output_buffer->get()->TrimEnd(discard_padding);
- frames_to_output -= discard_padding;
- }
- } else {
- frames_to_discard_ -= frames_to_output;
- frames_to_output = 0;
- }
-
- // Decoding finished successfully, update statistics.
- PipelineStatistics statistics;
- statistics.audio_bytes_decoded = input->data_size();
- statistics_cb_.Run(statistics);
-
- // Assign timestamp and duration to the buffer.
- output_buffer->get()->set_timestamp(
- output_timestamp_helper_->GetTimestamp() - timestamp_offset_);
- output_buffer->get()->set_duration(
- output_timestamp_helper_->GetFrameDuration(frames_to_output));
- output_timestamp_helper_->AddFrames(frames_decoded);
-
- // Discard the buffer to indicate we need more data.
- if (!frames_to_output)
+ // Handles discards and timestamping. Discard the buffer if more data needed.
+ if (!discard_helper_->ProcessBuffers(input, *output_buffer))
*output_buffer = NULL;
return true;
diff --git a/chromium/media/filters/opus_audio_decoder.h b/chromium/media/filters/opus_audio_decoder.h
index 982458b1afa..504701a52f8 100644
--- a/chromium/media/filters/opus_audio_decoder.h
+++ b/chromium/media/filters/opus_audio_decoder.h
@@ -6,7 +6,6 @@
#define MEDIA_FILTERS_OPUS_AUDIO_DECODER_H_
#include "base/callback.h"
-#include "base/memory/weak_ptr.h"
#include "base/time/time.h"
#include "media/base/audio_decoder.h"
#include "media/base/demuxer_stream.h"
@@ -15,38 +14,36 @@
struct OpusMSDecoder;
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
class AudioBuffer;
-class AudioTimestampHelper;
+class AudioDiscardHelper;
class DecoderBuffer;
struct QueuedAudioBuffer;
class MEDIA_EXPORT OpusAudioDecoder : public AudioDecoder {
public:
explicit OpusAudioDecoder(
- const scoped_refptr<base::MessageLoopProxy>& message_loop);
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner);
virtual ~OpusAudioDecoder();
// AudioDecoder implementation.
- virtual void Initialize(DemuxerStream* stream,
+ virtual void Initialize(const AudioDecoderConfig& config,
const PipelineStatusCB& status_cb,
- const StatisticsCB& statistics_cb) OVERRIDE;
- virtual void Read(const ReadCB& read_cb) OVERRIDE;
- virtual int bits_per_channel() OVERRIDE;
- virtual ChannelLayout channel_layout() OVERRIDE;
- virtual int samples_per_second() OVERRIDE;
+ const OutputCB& output_cb) OVERRIDE;
+ virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
+ const DecodeCB& decode_cb) OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
+ virtual void Stop() OVERRIDE;
private:
// Reads from the demuxer stream with corresponding callback method.
void ReadFromDemuxerStream();
- void BufferReady(DemuxerStream::Status status,
- const scoped_refptr<DecoderBuffer>& input);
-
+ void DecodeBuffer(const scoped_refptr<DecoderBuffer>& input,
+ const DecodeCB& decode_cb);
bool ConfigureDecoder();
void CloseDecoder();
@@ -54,42 +51,17 @@ class MEDIA_EXPORT OpusAudioDecoder : public AudioDecoder {
bool Decode(const scoped_refptr<DecoderBuffer>& input,
scoped_refptr<AudioBuffer>* output_buffer);
- scoped_refptr<base::MessageLoopProxy> message_loop_;
- base::WeakPtrFactory<OpusAudioDecoder> weak_factory_;
- base::WeakPtr<OpusAudioDecoder> weak_this_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
- DemuxerStream* demuxer_stream_;
- StatisticsCB statistics_cb_;
+ AudioDecoderConfig config_;
+ OutputCB output_cb_;
OpusMSDecoder* opus_decoder_;
- // Decoded audio format.
- ChannelLayout channel_layout_;
- int samples_per_second_;
- const SampleFormat sample_format_;
- const int bits_per_channel_;
-
- // Used for computing output timestamps.
- scoped_ptr<AudioTimestampHelper> output_timestamp_helper_;
- base::TimeDelta last_input_timestamp_;
-
- ReadCB read_cb_;
-
- // Number of frames to be discarded from the start of the packet. This value
- // is respected for all packets except for the first one in the stream. For
- // the first packet in the stream, |frame_delay_at_start_| is used. This is
- // usually set to the SeekPreRoll value from the container whenever a seek
- // happens.
- int frames_to_discard_;
-
- // Number of frames to be discarded at the start of the stream. This value
- // is typically the CodecDelay value from the container. This value should
- // only be applied when input timestamp is |start_input_timestamp_|.
- int frame_delay_at_start_;
+ // When the input timestamp is |start_input_timestamp_| the decoder needs to
+ // drop |config_.codec_delay()| frames.
base::TimeDelta start_input_timestamp_;
- // Timestamp to be subtracted from all the frames. This is typically computed
- // from the CodecDelay value in the container.
- base::TimeDelta timestamp_offset_;
+ scoped_ptr<AudioDiscardHelper> discard_helper_;
DISALLOW_IMPLICIT_CONSTRUCTORS(OpusAudioDecoder);
};
diff --git a/chromium/media/filters/opus_audio_decoder_unittest.cc b/chromium/media/filters/opus_audio_decoder_unittest.cc
new file mode 100644
index 00000000000..4e3305675fa
--- /dev/null
+++ b/chromium/media/filters/opus_audio_decoder_unittest.cc
@@ -0,0 +1,219 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <deque>
+
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/decoder_buffer.h"
+#include "media/base/test_data_util.h"
+#include "media/base/test_helpers.h"
+#include "media/ffmpeg/ffmpeg_common.h"
+#include "media/filters/audio_file_reader.h"
+#include "media/filters/in_memory_url_protocol.h"
+#include "media/filters/opus_audio_decoder.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class OpusAudioDecoderTest : public testing::Test {
+ public:
+ OpusAudioDecoderTest()
+ : decoder_(new OpusAudioDecoder(message_loop_.message_loop_proxy())),
+ pending_decode_(false),
+ pending_reset_(false) {}
+
+ virtual ~OpusAudioDecoderTest() {
+ EXPECT_FALSE(pending_decode_);
+ EXPECT_FALSE(pending_reset_);
+ }
+
+ protected:
+ void SatisfyPendingDecode() { base::RunLoop().RunUntilIdle(); }
+
+ void SendEndOfStream() {
+ pending_decode_ = true;
+ decoder_->Decode(DecoderBuffer::CreateEOSBuffer(),
+ base::Bind(&OpusAudioDecoderTest::DecodeFinished,
+ base::Unretained(this)));
+ base::RunLoop().RunUntilIdle();
+ }
+
+ void Initialize() {
+ // Load the test data file.
+ data_ = ReadTestDataFile("bear-opus.ogg");
+ protocol_.reset(
+ new InMemoryUrlProtocol(data_->data(), data_->data_size(), false));
+ reader_.reset(new AudioFileReader(protocol_.get()));
+ reader_->Open();
+
+ AudioDecoderConfig config;
+ AVCodecContextToAudioDecoderConfig(
+ reader_->codec_context_for_testing(), false, &config, false);
+ InitializeDecoder(config);
+ }
+
+ void InitializeDecoder(const AudioDecoderConfig& config) {
+ decoder_->Initialize(config,
+ NewExpectedStatusCB(PIPELINE_OK),
+ base::Bind(&OpusAudioDecoderTest::OnDecoderOutput,
+ base::Unretained(this)));
+ base::RunLoop().RunUntilIdle();
+ }
+
+ void Decode() {
+ pending_decode_ = true;
+
+ AVPacket packet;
+ ASSERT_TRUE(reader_->ReadPacketForTesting(&packet));
+ scoped_refptr<DecoderBuffer> buffer =
+ DecoderBuffer::CopyFrom(packet.data, packet.size);
+ buffer->set_timestamp(ConvertFromTimeBase(
+ reader_->codec_context_for_testing()->time_base, packet.pts));
+ buffer->set_duration(ConvertFromTimeBase(
+ reader_->codec_context_for_testing()->time_base, packet.duration));
+ decoder_->Decode(buffer,
+ base::Bind(&OpusAudioDecoderTest::DecodeFinished,
+ base::Unretained(this)));
+ av_free_packet(&packet);
+ base::RunLoop().RunUntilIdle();
+ }
+
+ void Reset() {
+ pending_reset_ = true;
+ decoder_->Reset(base::Bind(&OpusAudioDecoderTest::ResetFinished,
+ base::Unretained(this)));
+ base::RunLoop().RunUntilIdle();
+ }
+
+ void Stop() {
+ decoder_->Stop();
+ base::RunLoop().RunUntilIdle();
+ }
+
+ void OnDecoderOutput(const scoped_refptr<AudioBuffer>& buffer) {
+ decoded_audio_.push_back(buffer);
+ }
+
+ void DecodeFinished(AudioDecoder::Status status) {
+ EXPECT_TRUE(pending_decode_);
+ pending_decode_ = false;
+
+ // If we have a pending reset, we expect an abort.
+ if (pending_reset_) {
+ EXPECT_EQ(status, AudioDecoder::kAborted);
+ return;
+ }
+
+ EXPECT_EQ(status, AudioDecoder::kOk);
+ }
+
+ void ResetFinished() {
+ EXPECT_TRUE(pending_reset_);
+ // Reset should always finish after Decode.
+ EXPECT_FALSE(pending_decode_);
+
+ pending_reset_ = false;
+ }
+
+ void ExpectDecodedAudio(size_t i, int64 timestamp, int64 duration) {
+ EXPECT_LT(i, decoded_audio_.size());
+ EXPECT_EQ(timestamp, decoded_audio_[i]->timestamp().InMicroseconds());
+ EXPECT_EQ(duration, decoded_audio_[i]->duration().InMicroseconds());
+ EXPECT_FALSE(decoded_audio_[i]->end_of_stream());
+ }
+
+ size_t decoded_audio_size() const {
+ return decoded_audio_.size();
+ }
+
+ private:
+ base::MessageLoop message_loop_;
+ scoped_refptr<DecoderBuffer> data_;
+ scoped_ptr<InMemoryUrlProtocol> protocol_;
+ scoped_ptr<AudioFileReader> reader_;
+
+ scoped_ptr<OpusAudioDecoder> decoder_;
+ bool pending_decode_;
+ bool pending_reset_;
+
+ std::deque<scoped_refptr<AudioBuffer> > decoded_audio_;
+
+ DISALLOW_COPY_AND_ASSIGN(OpusAudioDecoderTest);
+};
+
+TEST_F(OpusAudioDecoderTest, Initialize) {
+ Initialize();
+ Stop();
+}
+
+TEST_F(OpusAudioDecoderTest, InitializeWithNoCodecDelay) {
+ const uint8_t kOpusExtraData[] = {
+ 0x4f, 0x70, 0x75, 0x73, 0x48, 0x65, 0x61, 0x64, 0x01, 0x02,
+ // The next two bytes represent the codec delay.
+ 0x00, 0x00, 0x80, 0xbb, 0x00, 0x00, 0x00, 0x00, 0x00};
+ AudioDecoderConfig decoder_config;
+ decoder_config.Initialize(kCodecOpus,
+ kSampleFormatF32,
+ CHANNEL_LAYOUT_STEREO,
+ 48000,
+ kOpusExtraData,
+ ARRAYSIZE_UNSAFE(kOpusExtraData),
+ false,
+ false,
+ base::TimeDelta::FromMilliseconds(80),
+ 0);
+ InitializeDecoder(decoder_config);
+ Stop();
+}
+
+TEST_F(OpusAudioDecoderTest, ProduceAudioSamples) {
+ Initialize();
+ Decode();
+ Decode();
+ Decode();
+
+ ASSERT_EQ(3u, decoded_audio_size());
+ ExpectDecodedAudio(0, 0, 3500);
+ ExpectDecodedAudio(1, 3500, 10000);
+ ExpectDecodedAudio(2, 13500, 10000);
+
+ // Call one more time with EOS.
+ SendEndOfStream();
+ ASSERT_EQ(3u, decoded_audio_size());
+ Stop();
+}
+
+TEST_F(OpusAudioDecoderTest, DecodeAbort) {
+ Initialize();
+ Decode();
+ Stop();
+}
+
+TEST_F(OpusAudioDecoderTest, PendingDecode_Stop) {
+ Initialize();
+ Decode();
+ Stop();
+ SatisfyPendingDecode();
+}
+
+TEST_F(OpusAudioDecoderTest, PendingDecode_Reset) {
+ Initialize();
+ Decode();
+ Reset();
+ SatisfyPendingDecode();
+ Stop();
+}
+
+TEST_F(OpusAudioDecoderTest, PendingDecode_ResetStop) {
+ Initialize();
+ Decode();
+ Reset();
+ Stop();
+ SatisfyPendingDecode();
+}
+
+} // namespace media
diff --git a/chromium/media/filters/pipeline_integration_test.cc b/chromium/media/filters/pipeline_integration_test.cc
index bdf33f22418..f991dc39977 100644
--- a/chromium/media/filters/pipeline_integration_test.cc
+++ b/chromium/media/filters/pipeline_integration_test.cc
@@ -9,6 +9,7 @@
#include "base/memory/scoped_ptr.h"
#include "base/strings/string_util.h"
#include "build/build_config.h"
+#include "media/base/cdm_promise.h"
#include "media/base/decoder_buffer.h"
#include "media/base/media_keys.h"
#include "media/base/media_switches.h"
@@ -17,8 +18,10 @@
#include "media/cdm/json_web_key.h"
#include "media/filters/chunk_demuxer.h"
+using testing::_;
using testing::AnyNumber;
using testing::AtMost;
+using testing::SaveArg;
namespace media {
@@ -33,6 +36,7 @@ const char kVideoOnlyWebM[] = "video/webm; codecs=\"vp8\"";
const char kMP4VideoType[] = "video/mp4";
const char kMP4AudioType[] = "audio/mp4";
#if defined(USE_PROPRIETARY_CODECS)
+const char kADTS[] = "audio/aac";
const char kMP4[] = "video/mp4; codecs=\"avc1.4D4041,mp4a.40.2\"";
const char kMP4Video[] = "video/mp4; codecs=\"avc1.4D4041\"";
const char kMP4VideoAVC3[] = "video/mp4; codecs=\"avc3.64001f\"";
@@ -57,19 +61,50 @@ const int kAppendWholeFile = -1;
// Constants for the Media Source config change tests.
const int kAppendTimeSec = 1;
const int kAppendTimeMs = kAppendTimeSec * 1000;
-const int k320WebMFileDurationMs = 2737;
-const int k640WebMFileDurationMs = 2763;
-const int kOpusEndTrimmingWebMFileDurationMs = 2771;
-const int kVP9WebMFileDurationMs = 2735;
-const int kVP8AWebMFileDurationMs = 2700;
+const int k320WebMFileDurationMs = 2736;
+const int k640WebMFileDurationMs = 2749;
+const int kOpusEndTrimmingWebMFileDurationMs = 2741;
+const int kVP9WebMFileDurationMs = 2736;
+const int kVP8AWebMFileDurationMs = 2733;
#if defined(USE_PROPRIETARY_CODECS)
const int k640IsoFileDurationMs = 2737;
const int k640IsoCencFileDurationMs = 2736;
const int k1280IsoFileDurationMs = 2736;
-const int k1280IsoAVC3FileDurationMs = 2735;
+const int k1280IsoAVC3FileDurationMs = 2736;
#endif // defined(USE_PROPRIETARY_CODECS)
+// Return a timeline offset for bear-320x240-live.webm.
+static base::Time kLiveTimelineOffset() {
+ // The file contians the following UTC timeline offset:
+ // 2012-11-10 12:34:56.789123456
+ // Since base::Time only has a resolution of microseconds,
+ // construct a base::Time for 2012-11-10 12:34:56.789123.
+ base::Time::Exploded exploded_time;
+ exploded_time.year = 2012;
+ exploded_time.month = 11;
+ exploded_time.day_of_month = 10;
+ exploded_time.hour = 12;
+ exploded_time.minute = 34;
+ exploded_time.second = 56;
+ exploded_time.millisecond = 789;
+ base::Time timeline_offset = base::Time::FromUTCExploded(exploded_time);
+
+ timeline_offset += base::TimeDelta::FromMicroseconds(123);
+
+ return timeline_offset;
+}
+
+// FFmpeg only supports time a resolution of seconds so this
+// helper function truncates a base::Time to seconds resolution.
+static base::Time TruncateToFFmpegTimeResolution(base::Time t) {
+ base::Time::Exploded exploded_time;
+ t.UTCExplode(&exploded_time);
+ exploded_time.millisecond = 0;
+
+ return base::Time::FromUTCExploded(exploded_time);
+}
+
// Note: Tests using this class only exercise the DecryptingDemuxerStream path.
// They do not exercise the Decrypting{Audio|Video}Decoder path.
class FakeEncryptedMedia {
@@ -79,21 +114,19 @@ class FakeEncryptedMedia {
public:
virtual ~AppBase() {}
- virtual void OnSessionCreated(uint32 session_id,
- const std::string& web_session_id) = 0;
-
- virtual void OnSessionMessage(uint32 session_id,
+ virtual void OnSessionMessage(const std::string& web_session_id,
const std::vector<uint8>& message,
- const std::string& destination_url) = 0;
+ const GURL& destination_url) = 0;
- virtual void OnSessionReady(uint32 session_id) = 0;
+ virtual void OnSessionReady(const std::string& web_session_id) = 0;
- virtual void OnSessionClosed(uint32 session_id) = 0;
+ virtual void OnSessionClosed(const std::string& web_session_id) = 0;
// Errors are not expected unless overridden.
- virtual void OnSessionError(uint32 session_id,
- MediaKeys::KeyError error_code,
- int system_code) {
+ virtual void OnSessionError(const std::string& web_session_id,
+ const std::string& error_name,
+ uint32 system_code,
+ const std::string& error_message) {
FAIL() << "Unexpected Key Error";
}
@@ -103,15 +136,9 @@ class FakeEncryptedMedia {
};
FakeEncryptedMedia(AppBase* app)
- : decryptor_(base::Bind(&FakeEncryptedMedia::OnSessionCreated,
- base::Unretained(this)),
- base::Bind(&FakeEncryptedMedia::OnSessionMessage,
- base::Unretained(this)),
- base::Bind(&FakeEncryptedMedia::OnSessionReady,
+ : decryptor_(base::Bind(&FakeEncryptedMedia::OnSessionMessage,
base::Unretained(this)),
base::Bind(&FakeEncryptedMedia::OnSessionClosed,
- base::Unretained(this)),
- base::Bind(&FakeEncryptedMedia::OnSessionError,
base::Unretained(this))),
app_(app) {}
@@ -120,28 +147,26 @@ class FakeEncryptedMedia {
}
// Callbacks for firing session events. Delegate to |app_|.
- void OnSessionCreated(uint32 session_id, const std::string& web_session_id) {
- app_->OnSessionCreated(session_id, web_session_id);
- }
-
- void OnSessionMessage(uint32 session_id,
+ void OnSessionMessage(const std::string& web_session_id,
const std::vector<uint8>& message,
- const std::string& destination_url) {
- app_->OnSessionMessage(session_id, message, destination_url);
+ const GURL& destination_url) {
+ app_->OnSessionMessage(web_session_id, message, destination_url);
}
- void OnSessionReady(uint32 session_id) {
- app_->OnSessionReady(session_id);
+ void OnSessionReady(const std::string& web_session_id) {
+ app_->OnSessionReady(web_session_id);
}
- void OnSessionClosed(uint32 session_id) {
- app_->OnSessionClosed(session_id);
+ void OnSessionClosed(const std::string& web_session_id) {
+ app_->OnSessionClosed(web_session_id);
}
- void OnSessionError(uint32 session_id,
- MediaKeys::KeyError error_code,
- int system_code) {
- app_->OnSessionError(session_id, error_code, system_code);
+ void OnSessionError(const std::string& web_session_id,
+ const std::string& error_name,
+ uint32 system_code,
+ const std::string& error_message) {
+ app_->OnSessionError(
+ web_session_id, error_name, system_code, error_message);
}
void NeedKey(const std::string& type,
@@ -154,44 +179,80 @@ class FakeEncryptedMedia {
scoped_ptr<AppBase> app_;
};
+enum PromiseResult { RESOLVED, REJECTED };
+
// Provides |kSecretKey| in response to needkey.
class KeyProvidingApp : public FakeEncryptedMedia::AppBase {
public:
- KeyProvidingApp() : current_session_id_(0) {}
+ KeyProvidingApp() {}
- virtual void OnSessionCreated(uint32 session_id,
- const std::string& web_session_id) OVERRIDE {
- EXPECT_GT(session_id, 0u);
- EXPECT_FALSE(web_session_id.empty());
+ void OnResolveWithSession(PromiseResult expected,
+ const std::string& web_session_id) {
+ EXPECT_EQ(expected, RESOLVED);
+ EXPECT_GT(web_session_id.length(), 0ul);
+ current_session_id_ = web_session_id;
+ }
+
+ void OnResolve(PromiseResult expected) {
+ EXPECT_EQ(expected, RESOLVED);
+ }
+
+ void OnReject(PromiseResult expected,
+ media::MediaKeys::Exception exception_code,
+ uint32 system_code,
+ const std::string& error_message) {
+ EXPECT_EQ(expected, REJECTED);
+ }
+
+ scoped_ptr<SimpleCdmPromise> CreatePromise(PromiseResult expected) {
+ scoped_ptr<media::SimpleCdmPromise> promise(new media::SimpleCdmPromise(
+ base::Bind(
+ &KeyProvidingApp::OnResolve, base::Unretained(this), expected),
+ base::Bind(
+ &KeyProvidingApp::OnReject, base::Unretained(this), expected)));
+ return promise.Pass();
+ }
+
+ scoped_ptr<NewSessionCdmPromise> CreateSessionPromise(
+ PromiseResult expected) {
+ scoped_ptr<media::NewSessionCdmPromise> promise(
+ new media::NewSessionCdmPromise(
+ base::Bind(&KeyProvidingApp::OnResolveWithSession,
+ base::Unretained(this),
+ expected),
+ base::Bind(
+ &KeyProvidingApp::OnReject, base::Unretained(this), expected)));
+ return promise.Pass();
}
- virtual void OnSessionMessage(uint32 session_id,
+ virtual void OnSessionMessage(const std::string& web_session_id,
const std::vector<uint8>& message,
- const std::string& default_url) OVERRIDE {
- EXPECT_GT(session_id, 0u);
+ const GURL& destination_url) OVERRIDE {
+ EXPECT_FALSE(web_session_id.empty());
EXPECT_FALSE(message.empty());
-
- current_session_id_ = session_id;
+ EXPECT_EQ(current_session_id_, web_session_id);
}
- virtual void OnSessionReady(uint32 session_id) OVERRIDE {
- EXPECT_GT(session_id, 0u);
+ virtual void OnSessionReady(const std::string& web_session_id) OVERRIDE {
+ EXPECT_EQ(current_session_id_, web_session_id);
}
- virtual void OnSessionClosed(uint32 session_id) OVERRIDE {
- EXPECT_GT(session_id, 0u);
+ virtual void OnSessionClosed(const std::string& web_session_id) OVERRIDE {
+ EXPECT_EQ(current_session_id_, web_session_id);
}
virtual void NeedKey(const std::string& type,
const std::vector<uint8>& init_data,
AesDecryptor* decryptor) OVERRIDE {
- if (current_session_id_ == 0u) {
- EXPECT_TRUE(
- decryptor->CreateSession(12, type, kInitData, arraysize(kInitData)));
+ if (current_session_id_.empty()) {
+ decryptor->CreateSession(type,
+ kInitData,
+ arraysize(kInitData),
+ MediaKeys::TEMPORARY_SESSION,
+ CreateSessionPromise(RESOLVED));
+ EXPECT_FALSE(current_session_id_.empty());
}
- EXPECT_EQ(current_session_id_, 12u);
-
// Clear Key really needs the key ID in |init_data|. For WebM, they are the
// same, but this is not the case for ISO CENC. Therefore, provide the
// correct key ID.
@@ -207,36 +268,103 @@ class KeyProvidingApp : public FakeEncryptedMedia::AppBase {
kSecretKey, arraysize(kSecretKey), key_id, key_id_length);
decryptor->UpdateSession(current_session_id_,
reinterpret_cast<const uint8*>(jwk.data()),
- jwk.size());
+ jwk.size(),
+ CreatePromise(RESOLVED));
}
- uint32 current_session_id_;
+ std::string current_session_id_;
};
-// Ignores needkey and does not perform a license request
-class NoResponseApp : public FakeEncryptedMedia::AppBase {
+class RotatingKeyProvidingApp : public KeyProvidingApp {
public:
- virtual void OnSessionCreated(uint32 session_id,
- const std::string& web_session_id) OVERRIDE {
- EXPECT_GT(session_id, 0u);
- EXPECT_FALSE(web_session_id.empty());
+ RotatingKeyProvidingApp() : num_distint_need_key_calls_(0) {}
+ virtual ~RotatingKeyProvidingApp() {
+ // Expect that NeedKey is fired multiple times with different |init_data|.
+ EXPECT_GT(num_distint_need_key_calls_, 1u);
}
- virtual void OnSessionMessage(uint32 session_id,
+ virtual void NeedKey(const std::string& type,
+ const std::vector<uint8>& init_data,
+ AesDecryptor* decryptor) OVERRIDE {
+ // Skip the request if the |init_data| has been seen.
+ if (init_data == prev_init_data_)
+ return;
+ prev_init_data_ = init_data;
+ ++num_distint_need_key_calls_;
+
+ decryptor->CreateSession(type,
+ vector_as_array(&init_data),
+ init_data.size(),
+ MediaKeys::TEMPORARY_SESSION,
+ CreateSessionPromise(RESOLVED));
+
+ std::vector<uint8> key_id;
+ std::vector<uint8> key;
+ EXPECT_TRUE(GetKeyAndKeyId(init_data, &key, &key_id));
+
+ // Convert key into a JSON structure and then add it.
+ std::string jwk = GenerateJWKSet(vector_as_array(&key),
+ key.size(),
+ vector_as_array(&key_id),
+ key_id.size());
+ decryptor->UpdateSession(current_session_id_,
+ reinterpret_cast<const uint8*>(jwk.data()),
+ jwk.size(),
+ CreatePromise(RESOLVED));
+ }
+
+ private:
+ bool GetKeyAndKeyId(std::vector<uint8> init_data,
+ std::vector<uint8>* key,
+ std::vector<uint8>* key_id) {
+ // For WebM, init_data is key_id; for ISO CENC, init_data should contain
+ // the key_id. We assume key_id is in the end of init_data here (that is
+ // only a reasonable assumption for WebM and clear key ISO CENC).
+ DCHECK_GE(init_data.size(), arraysize(kKeyId));
+ std::vector<uint8> key_id_from_init_data(
+ init_data.end() - arraysize(kKeyId), init_data.end());
+
+ key->assign(kSecretKey, kSecretKey + arraysize(kSecretKey));
+ key_id->assign(kKeyId, kKeyId + arraysize(kKeyId));
+
+ // The Key and KeyId for this testing key provider are created by left
+ // rotating kSecretKey and kKeyId. Note that this implementation is only
+ // intended for testing purpose. The actual key rotation algorithm can be
+ // much more complicated.
+ // Find out the rotating position from |key_id_from_init_data| and apply on
+ // |key|.
+ for (size_t pos = 0; pos < arraysize(kKeyId); ++pos) {
+ std::rotate(key_id->begin(), key_id->begin() + pos, key_id->end());
+ if (*key_id == key_id_from_init_data) {
+ std::rotate(key->begin(), key->begin() + pos, key->end());
+ return true;
+ }
+ }
+ return false;
+ }
+
+ std::vector<uint8> prev_init_data_;
+ uint32 num_distint_need_key_calls_;
+};
+
+// Ignores needkey and does not perform a license request
+class NoResponseApp : public FakeEncryptedMedia::AppBase {
+ public:
+ virtual void OnSessionMessage(const std::string& web_session_id,
const std::vector<uint8>& message,
- const std::string& default_url) OVERRIDE {
- EXPECT_GT(session_id, 0u);
+ const GURL& default_url) OVERRIDE {
+ EXPECT_FALSE(web_session_id.empty());
EXPECT_FALSE(message.empty());
- FAIL() << "Unexpected KeyMessage";
+ FAIL() << "Unexpected Message";
}
- virtual void OnSessionReady(uint32 session_id) OVERRIDE {
- EXPECT_GT(session_id, 0u);
+ virtual void OnSessionReady(const std::string& web_session_id) OVERRIDE {
+ EXPECT_FALSE(web_session_id.empty());
FAIL() << "Unexpected Ready";
}
- virtual void OnSessionClosed(uint32 session_id) OVERRIDE {
- EXPECT_GT(session_id, 0u);
+ virtual void OnSessionClosed(const std::string& web_session_id) OVERRIDE {
+ EXPECT_FALSE(web_session_id.empty());
FAIL() << "Unexpected Closed";
}
@@ -250,18 +378,19 @@ class NoResponseApp : public FakeEncryptedMedia::AppBase {
// Media Source API.
class MockMediaSource {
public:
- MockMediaSource(const std::string& filename, const std::string& mimetype,
+ MockMediaSource(const std::string& filename,
+ const std::string& mimetype,
int initial_append_size)
: file_path_(GetTestDataFilePath(filename)),
current_position_(0),
initial_append_size_(initial_append_size),
mimetype_(mimetype),
chunk_demuxer_(new ChunkDemuxer(
- base::Bind(&MockMediaSource::DemuxerOpened,
- base::Unretained(this)),
+ base::Bind(&MockMediaSource::DemuxerOpened, base::Unretained(this)),
base::Bind(&MockMediaSource::DemuxerNeedKey,
base::Unretained(this)),
- LogCB())),
+ LogCB(),
+ true)),
owned_chunk_demuxer_(chunk_demuxer_) {
file_data_ = ReadTestDataFile(filename);
@@ -284,7 +413,9 @@ class MockMediaSource {
void Seek(base::TimeDelta seek_time, int new_position, int seek_append_size) {
chunk_demuxer_->StartWaitingForSeek(seek_time);
- chunk_demuxer_->Abort(kSourceId);
+ chunk_demuxer_->Abort(
+ kSourceId,
+ base::TimeDelta(), kInfiniteDuration(), &last_timestamp_offset_);
DCHECK_GE(new_position, 0);
DCHECK_LT(new_position, file_data_->data_size());
@@ -297,16 +428,36 @@ class MockMediaSource {
DCHECK(chunk_demuxer_);
DCHECK_LT(current_position_, file_data_->data_size());
DCHECK_LE(current_position_ + size, file_data_->data_size());
+
chunk_demuxer_->AppendData(
- kSourceId, file_data_->data() + current_position_, size);
+ kSourceId, file_data_->data() + current_position_, size,
+ base::TimeDelta(), kInfiniteDuration(), &last_timestamp_offset_);
current_position_ += size;
}
- void AppendAtTime(const base::TimeDelta& timestampOffset,
- const uint8* pData, int size) {
- CHECK(chunk_demuxer_->SetTimestampOffset(kSourceId, timestampOffset));
- chunk_demuxer_->AppendData(kSourceId, pData, size);
- CHECK(chunk_demuxer_->SetTimestampOffset(kSourceId, base::TimeDelta()));
+ void AppendAtTime(base::TimeDelta timestamp_offset,
+ const uint8* pData,
+ int size) {
+ CHECK(!chunk_demuxer_->IsParsingMediaSegment(kSourceId));
+ chunk_demuxer_->AppendData(kSourceId, pData, size,
+ base::TimeDelta(), kInfiniteDuration(),
+ &timestamp_offset);
+ last_timestamp_offset_ = timestamp_offset;
+ }
+
+ void AppendAtTimeWithWindow(base::TimeDelta timestamp_offset,
+ base::TimeDelta append_window_start,
+ base::TimeDelta append_window_end,
+ const uint8* pData,
+ int size) {
+ CHECK(!chunk_demuxer_->IsParsingMediaSegment(kSourceId));
+ chunk_demuxer_->AppendData(kSourceId,
+ pData,
+ size,
+ append_window_start,
+ append_window_end,
+ &timestamp_offset);
+ last_timestamp_offset_ = timestamp_offset;
}
void EndOfStream() {
@@ -352,6 +503,7 @@ class MockMediaSource {
}
CHECK_EQ(chunk_demuxer_->AddId(kSourceId, type, codecs), ChunkDemuxer::kOk);
+
AppendData(initial_append_size_);
}
@@ -362,6 +514,10 @@ class MockMediaSource {
need_key_cb_.Run(type, init_data);
}
+ base::TimeDelta last_timestamp_offset() const {
+ return last_timestamp_offset_;
+ }
+
private:
base::FilePath file_path_;
scoped_refptr<DecoderBuffer> file_data_;
@@ -371,6 +527,7 @@ class MockMediaSource {
ChunkDemuxer* chunk_demuxer_;
scoped_ptr<Demuxer> owned_chunk_demuxer_;
Demuxer::NeedKeyCB need_key_cb_;
+ base::TimeDelta last_timestamp_offset_;
};
class PipelineIntegrationTest
@@ -378,16 +535,17 @@ class PipelineIntegrationTest
public PipelineIntegrationTestBase {
public:
void StartPipelineWithMediaSource(MockMediaSource* source) {
- EXPECT_CALL(*this, OnBufferingState(Pipeline::kHaveMetadata))
- .Times(AtMost(1));
- EXPECT_CALL(*this, OnBufferingState(Pipeline::kPrerollCompleted))
- .Times(AtMost(1));
+ EXPECT_CALL(*this, OnMetadata(_)).Times(AtMost(1))
+ .WillRepeatedly(SaveArg<0>(&metadata_));
+ EXPECT_CALL(*this, OnPrerollCompleted()).Times(AtMost(1));
pipeline_->Start(
CreateFilterCollection(source->GetDemuxer(), NULL),
base::Bind(&PipelineIntegrationTest::OnEnded, base::Unretained(this)),
base::Bind(&PipelineIntegrationTest::OnError, base::Unretained(this)),
QuitOnStatusCB(PIPELINE_OK),
- base::Bind(&PipelineIntegrationTest::OnBufferingState,
+ base::Bind(&PipelineIntegrationTest::OnMetadata,
+ base::Unretained(this)),
+ base::Bind(&PipelineIntegrationTest::OnPrerollCompleted,
base::Unretained(this)),
base::Closure());
@@ -402,17 +560,18 @@ class PipelineIntegrationTest
void StartPipelineWithEncryptedMedia(
MockMediaSource* source,
FakeEncryptedMedia* encrypted_media) {
- EXPECT_CALL(*this, OnBufferingState(Pipeline::kHaveMetadata))
- .Times(AtMost(1));
- EXPECT_CALL(*this, OnBufferingState(Pipeline::kPrerollCompleted))
- .Times(AtMost(1));
+ EXPECT_CALL(*this, OnMetadata(_)).Times(AtMost(1))
+ .WillRepeatedly(SaveArg<0>(&metadata_));
+ EXPECT_CALL(*this, OnPrerollCompleted()).Times(AtMost(1));
pipeline_->Start(
CreateFilterCollection(source->GetDemuxer(),
encrypted_media->decryptor()),
base::Bind(&PipelineIntegrationTest::OnEnded, base::Unretained(this)),
base::Bind(&PipelineIntegrationTest::OnError, base::Unretained(this)),
QuitOnStatusCB(PIPELINE_OK),
- base::Bind(&PipelineIntegrationTest::OnBufferingState,
+ base::Bind(&PipelineIntegrationTest::OnMetadata,
+ base::Unretained(this)),
+ base::Bind(&PipelineIntegrationTest::OnPrerollCompleted,
base::Unretained(this)),
base::Closure());
@@ -462,6 +621,14 @@ TEST_F(PipelineIntegrationTest, BasicPlayback) {
ASSERT_TRUE(WaitUntilOnEnded());
}
+TEST_F(PipelineIntegrationTest, BasicPlaybackOpusOgg) {
+ ASSERT_TRUE(Start(GetTestDataFilePath("bear-opus.ogg"), PIPELINE_OK));
+
+ Play();
+
+ ASSERT_TRUE(WaitUntilOnEnded());
+}
+
TEST_F(PipelineIntegrationTest, BasicPlaybackHashed) {
ASSERT_TRUE(Start(
GetTestDataFilePath("bear-320x240.webm"), PIPELINE_OK, kHashed));
@@ -472,6 +639,24 @@ TEST_F(PipelineIntegrationTest, BasicPlaybackHashed) {
EXPECT_EQ("f0be120a90a811506777c99a2cdf7cc1", GetVideoHash());
EXPECT_EQ("-3.59,-2.06,-0.43,2.15,0.77,-0.95,", GetAudioHash());
+ EXPECT_TRUE(demuxer_->GetTimelineOffset().is_null());
+}
+
+TEST_F(PipelineIntegrationTest, BasicPlaybackLive) {
+ ASSERT_TRUE(Start(
+ GetTestDataFilePath("bear-320x240-live.webm"), PIPELINE_OK, kHashed));
+
+ Play();
+
+ ASSERT_TRUE(WaitUntilOnEnded());
+
+ EXPECT_EQ("f0be120a90a811506777c99a2cdf7cc1", GetVideoHash());
+ EXPECT_EQ("-3.59,-2.06,-0.43,2.15,0.77,-0.95,", GetAudioHash());
+
+ // TODO: Fix FFmpeg code to return higher resolution time values so
+ // we don't have to truncate our expectations here.
+ EXPECT_EQ(TruncateToFFmpegTimeResolution(kLiveTimelineOffset()),
+ demuxer_->GetTimelineOffset());
}
TEST_F(PipelineIntegrationTest, F32PlaybackHashed) {
@@ -510,15 +695,34 @@ TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource) {
Play();
ASSERT_TRUE(WaitUntilOnEnded());
+
+ EXPECT_TRUE(demuxer_->GetTimelineOffset().is_null());
source.Abort();
Stop();
}
-// TODO(fgalligan): Enable after new vp9 files are landed.
-// http://crbug.com/259116
-TEST_F(PipelineIntegrationTest,
- DISABLED_BasicPlayback_MediaSource_VideoOnly_VP9_WebM) {
- MockMediaSource source("bear-vp9.webm", kWebMVP9, 32393);
+TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource_Live) {
+ MockMediaSource source("bear-320x240-live.webm", kWebM, 219221);
+ StartPipelineWithMediaSource(&source);
+ source.EndOfStream();
+
+ EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
+ EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
+ EXPECT_EQ(k320WebMFileDurationMs,
+ pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
+
+ Play();
+
+ ASSERT_TRUE(WaitUntilOnEnded());
+
+ EXPECT_EQ(kLiveTimelineOffset(),
+ demuxer_->GetTimelineOffset());
+ source.Abort();
+ Stop();
+}
+
+TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource_VP9_WebM) {
+ MockMediaSource source("bear-vp9.webm", kWebMVP9, 67504);
StartPipelineWithMediaSource(&source);
source.EndOfStream();
@@ -535,7 +739,6 @@ TEST_F(PipelineIntegrationTest,
}
TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource_VP8A_WebM) {
- EXPECT_CALL(*this, OnSetOpaque(false)).Times(AnyNumber());
MockMediaSource source("bear-vp8a.webm", kVideoOnlyWebM, kAppendWholeFile);
StartPipelineWithMediaSource(&source);
source.EndOfStream();
@@ -553,7 +756,6 @@ TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource_VP8A_WebM) {
}
TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource_Opus_WebM) {
- EXPECT_CALL(*this, OnSetOpaque(false)).Times(AnyNumber());
MockMediaSource source("bear-opus-end-trimming.webm", kOpusAudioOnlyWebM,
kAppendWholeFile);
StartPipelineWithMediaSource(&source);
@@ -572,12 +774,10 @@ TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource_Opus_WebM) {
// Flaky. http://crbug.com/304776
TEST_F(PipelineIntegrationTest, DISABLED_MediaSource_Opus_Seeking_WebM) {
- EXPECT_CALL(*this, OnSetOpaque(false)).Times(AnyNumber());
MockMediaSource source("bear-opus-end-trimming.webm", kOpusAudioOnlyWebM,
kAppendWholeFile);
StartHashedPipelineWithMediaSource(&source);
-
EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
EXPECT_EQ(kOpusEndTrimmingWebMFileDurationMs,
@@ -711,16 +911,110 @@ TEST_F(PipelineIntegrationTest,
}
#if defined(USE_PROPRIETARY_CODECS)
+TEST_F(PipelineIntegrationTest, MediaSource_ADTS) {
+ MockMediaSource source("sfx.adts", kADTS, kAppendWholeFile);
+ StartPipelineWithMediaSource(&source);
+ source.EndOfStream();
+
+ EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
+ EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
+ EXPECT_EQ(325, pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
+
+ Play();
+
+ EXPECT_TRUE(WaitUntilOnEnded());
+}
+
+TEST_F(PipelineIntegrationTest, MediaSource_ADTS_TimestampOffset) {
+ MockMediaSource source("sfx.adts", kADTS, kAppendWholeFile);
+ StartHashedPipelineWithMediaSource(&source);
+ EXPECT_EQ(325, source.last_timestamp_offset().InMilliseconds());
+
+ // Trim off multiple frames off the beginning of the segment which will cause
+ // the first decoded frame to be incorrect if preroll isn't implemented.
+ const base::TimeDelta adts_preroll_duration =
+ base::TimeDelta::FromSecondsD(2.5 * 1024 / 44100);
+ const base::TimeDelta append_time =
+ source.last_timestamp_offset() - adts_preroll_duration;
+
+ scoped_refptr<DecoderBuffer> second_file = ReadTestDataFile("sfx.adts");
+ source.AppendAtTimeWithWindow(append_time,
+ append_time + adts_preroll_duration,
+ kInfiniteDuration(),
+ second_file->data(),
+ second_file->data_size());
+ source.EndOfStream();
+
+ EXPECT_EQ(592, source.last_timestamp_offset().InMilliseconds());
+ EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
+ EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
+ EXPECT_EQ(592, pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
+
+ Play();
+
+ EXPECT_TRUE(WaitUntilOnEnded());
+
+ // Verify preroll is stripped.
+ EXPECT_EQ("-0.06,0.97,-0.90,-0.70,-0.53,-0.34,", GetAudioHash());
+}
+
+TEST_F(PipelineIntegrationTest, BasicPlaybackHashed_MP3) {
+ ASSERT_TRUE(Start(GetTestDataFilePath("sfx.mp3"), PIPELINE_OK, kHashed));
+
+ Play();
+
+ ASSERT_TRUE(WaitUntilOnEnded());
+
+ // Verify codec delay and preroll are stripped.
+ EXPECT_EQ("3.05,2.87,3.00,3.32,3.58,4.08,", GetAudioHash());
+}
+
TEST_F(PipelineIntegrationTest, MediaSource_MP3) {
MockMediaSource source("sfx.mp3", kMP3, kAppendWholeFile);
- StartPipelineWithMediaSource(&source);
+ StartHashedPipelineWithMediaSource(&source);
source.EndOfStream();
+ EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
+ EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
+ EXPECT_EQ(313, pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
+
Play();
EXPECT_TRUE(WaitUntilOnEnded());
+
+ // Verify that codec delay was stripped.
+ EXPECT_EQ("1.01,2.71,4.18,4.32,3.04,1.12,", GetAudioHash());
}
+TEST_F(PipelineIntegrationTest, MediaSource_MP3_TimestampOffset) {
+ MockMediaSource source("sfx.mp3", kMP3, kAppendWholeFile);
+ StartPipelineWithMediaSource(&source);
+ EXPECT_EQ(313, source.last_timestamp_offset().InMilliseconds());
+
+ // There are 576 silent frames at the start of this mp3. The second append
+ // should trim them off.
+ const base::TimeDelta mp3_preroll_duration =
+ base::TimeDelta::FromSecondsD(576.0 / 44100);
+ const base::TimeDelta append_time =
+ source.last_timestamp_offset() - mp3_preroll_duration;
+
+ scoped_refptr<DecoderBuffer> second_file = ReadTestDataFile("sfx.mp3");
+ source.AppendAtTimeWithWindow(append_time,
+ append_time + mp3_preroll_duration,
+ kInfiniteDuration(),
+ second_file->data(),
+ second_file->data_size());
+ source.EndOfStream();
+
+ EXPECT_EQ(613, source.last_timestamp_offset().InMilliseconds());
+ EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
+ EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
+ EXPECT_EQ(613, pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
+
+ Play();
+
+ EXPECT_TRUE(WaitUntilOnEnded());
+}
TEST_F(PipelineIntegrationTest, MediaSource_MP3_Icecast) {
MockMediaSource source("icy_sfx.mp3", kMP3, kAppendWholeFile);
@@ -758,8 +1052,8 @@ TEST_F(PipelineIntegrationTest, MediaSource_ConfigChange_MP4) {
TEST_F(PipelineIntegrationTest,
MediaSource_ConfigChange_Encrypted_MP4_CENC_VideoOnly) {
- MockMediaSource source("bear-640x360-v_frag-cenc.mp4",
- kMP4Video, kAppendWholeFile);
+ MockMediaSource source("bear-640x360-v_frag-cenc.mp4", kMP4Video,
+ kAppendWholeFile);
FakeEncryptedMedia encrypted_media(new KeyProvidingApp());
StartPipelineWithEncryptedMedia(&source, &encrypted_media);
@@ -783,6 +1077,33 @@ TEST_F(PipelineIntegrationTest,
Stop();
}
+TEST_F(PipelineIntegrationTest,
+ MediaSource_ConfigChange_Encrypted_MP4_CENC_KeyRotation_VideoOnly) {
+ MockMediaSource source("bear-640x360-v_frag-cenc-key_rotation.mp4", kMP4Video,
+ kAppendWholeFile);
+ FakeEncryptedMedia encrypted_media(new RotatingKeyProvidingApp());
+ StartPipelineWithEncryptedMedia(&source, &encrypted_media);
+
+ scoped_refptr<DecoderBuffer> second_file =
+ ReadTestDataFile("bear-1280x720-v_frag-cenc-key_rotation.mp4");
+
+ source.AppendAtTime(base::TimeDelta::FromSeconds(kAppendTimeSec),
+ second_file->data(), second_file->data_size());
+
+ source.EndOfStream();
+
+ EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
+ EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
+ EXPECT_EQ(kAppendTimeMs + k1280IsoFileDurationMs,
+ pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
+
+ Play();
+
+ EXPECT_TRUE(WaitUntilOnEnded());
+ source.Abort();
+ Stop();
+}
+
// Config changes from clear to encrypted are not currently supported.
// TODO(ddorwin): Figure out why this CHECKs in AppendAtTime().
TEST_F(PipelineIntegrationTest,
@@ -818,8 +1139,8 @@ TEST_F(PipelineIntegrationTest,
// Config changes from encrypted to clear are not currently supported.
TEST_F(PipelineIntegrationTest,
MediaSource_ConfigChange_EncryptedThenClear_MP4_CENC) {
- MockMediaSource source("bear-640x360-v_frag-cenc.mp4",
- kMP4Video, kAppendWholeFile);
+ MockMediaSource source("bear-640x360-v_frag-cenc.mp4", kMP4Video,
+ kAppendWholeFile);
FakeEncryptedMedia encrypted_media(new KeyProvidingApp());
StartPipelineWithEncryptedMedia(&source, &encrypted_media);
@@ -876,8 +1197,8 @@ TEST_F(PipelineIntegrationTest, EncryptedPlayback_WebM) {
}
TEST_F(PipelineIntegrationTest, EncryptedPlayback_ClearStart_WebM) {
- MockMediaSource source("bear-320x240-av_enc-av_clear-1s.webm",
- kWebM, kAppendWholeFile);
+ MockMediaSource source("bear-320x240-av_enc-av_clear-1s.webm", kWebM,
+ kAppendWholeFile);
FakeEncryptedMedia encrypted_media(new KeyProvidingApp());
StartPipelineWithEncryptedMedia(&source, &encrypted_media);
@@ -892,8 +1213,8 @@ TEST_F(PipelineIntegrationTest, EncryptedPlayback_ClearStart_WebM) {
}
TEST_F(PipelineIntegrationTest, EncryptedPlayback_NoEncryptedFrames_WebM) {
- MockMediaSource source("bear-320x240-av_enc-av_clear-all.webm",
- kWebM, kAppendWholeFile);
+ MockMediaSource source("bear-320x240-av_enc-av_clear-all.webm", kWebM,
+ kAppendWholeFile);
FakeEncryptedMedia encrypted_media(new NoResponseApp());
StartPipelineWithEncryptedMedia(&source, &encrypted_media);
@@ -909,8 +1230,8 @@ TEST_F(PipelineIntegrationTest, EncryptedPlayback_NoEncryptedFrames_WebM) {
#if defined(USE_PROPRIETARY_CODECS)
TEST_F(PipelineIntegrationTest, EncryptedPlayback_MP4_CENC_VideoOnly) {
- MockMediaSource source("bear-1280x720-v_frag-cenc.mp4",
- kMP4Video, kAppendWholeFile);
+ MockMediaSource source("bear-1280x720-v_frag-cenc.mp4", kMP4Video,
+ kAppendWholeFile);
FakeEncryptedMedia encrypted_media(new KeyProvidingApp());
StartPipelineWithEncryptedMedia(&source, &encrypted_media);
@@ -925,8 +1246,8 @@ TEST_F(PipelineIntegrationTest, EncryptedPlayback_MP4_CENC_VideoOnly) {
}
TEST_F(PipelineIntegrationTest, EncryptedPlayback_MP4_CENC_AudioOnly) {
- MockMediaSource source("bear-1280x720-a_frag-cenc.mp4",
- kMP4Audio, kAppendWholeFile);
+ MockMediaSource source("bear-1280x720-a_frag-cenc.mp4", kMP4Audio,
+ kAppendWholeFile);
FakeEncryptedMedia encrypted_media(new KeyProvidingApp());
StartPipelineWithEncryptedMedia(&source, &encrypted_media);
@@ -942,8 +1263,8 @@ TEST_F(PipelineIntegrationTest, EncryptedPlayback_MP4_CENC_AudioOnly) {
TEST_F(PipelineIntegrationTest,
EncryptedPlayback_NoEncryptedFrames_MP4_CENC_VideoOnly) {
- MockMediaSource source("bear-1280x720-v_frag-cenc_clear-all.mp4",
- kMP4Video, kAppendWholeFile);
+ MockMediaSource source("bear-1280x720-v_frag-cenc_clear-all.mp4", kMP4Video,
+ kAppendWholeFile);
FakeEncryptedMedia encrypted_media(new NoResponseApp());
StartPipelineWithEncryptedMedia(&source, &encrypted_media);
@@ -959,8 +1280,8 @@ TEST_F(PipelineIntegrationTest,
TEST_F(PipelineIntegrationTest,
EncryptedPlayback_NoEncryptedFrames_MP4_CENC_AudioOnly) {
- MockMediaSource source("bear-1280x720-a_frag-cenc_clear-all.mp4",
- kMP4Audio, kAppendWholeFile);
+ MockMediaSource source("bear-1280x720-a_frag-cenc_clear-all.mp4", kMP4Audio,
+ kAppendWholeFile);
FakeEncryptedMedia encrypted_media(new NoResponseApp());
StartPipelineWithEncryptedMedia(&source, &encrypted_media);
@@ -992,6 +1313,37 @@ TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource_VideoOnly_MP4_AVC3) {
Stop();
}
+TEST_F(PipelineIntegrationTest, EncryptedPlayback_MP4_CENC_KeyRotation_Video) {
+ MockMediaSource source("bear-1280x720-v_frag-cenc-key_rotation.mp4",
+ kMP4Video, kAppendWholeFile);
+ FakeEncryptedMedia encrypted_media(new RotatingKeyProvidingApp());
+ StartPipelineWithEncryptedMedia(&source, &encrypted_media);
+
+ source.EndOfStream();
+ ASSERT_EQ(PIPELINE_OK, pipeline_status_);
+
+ Play();
+
+ ASSERT_TRUE(WaitUntilOnEnded());
+ source.Abort();
+ Stop();
+}
+
+TEST_F(PipelineIntegrationTest, EncryptedPlayback_MP4_CENC_KeyRotation_Audio) {
+ MockMediaSource source("bear-1280x720-a_frag-cenc-key_rotation.mp4",
+ kMP4Audio, kAppendWholeFile);
+ FakeEncryptedMedia encrypted_media(new RotatingKeyProvidingApp());
+ StartPipelineWithEncryptedMedia(&source, &encrypted_media);
+
+ source.EndOfStream();
+ ASSERT_EQ(PIPELINE_OK, pipeline_status_);
+
+ Play();
+
+ ASSERT_TRUE(WaitUntilOnEnded());
+ source.Abort();
+ Stop();
+}
#endif
// TODO(acolwell): Fix flakiness http://crbug.com/117921
@@ -1051,7 +1403,7 @@ TEST_F(PipelineIntegrationTest, ChunkDemuxerAbortRead_AudioOnly) {
TEST_F(PipelineIntegrationTest, ChunkDemuxerAbortRead_VideoOnly) {
ASSERT_TRUE(TestSeekDuringRead("bear-320x240-video-only.webm", kVideoOnlyWebM,
32768,
- base::TimeDelta::FromMilliseconds(200),
+ base::TimeDelta::FromMilliseconds(167),
base::TimeDelta::FromMilliseconds(1668),
0x1C896, 65536));
}
@@ -1060,18 +1412,12 @@ TEST_F(PipelineIntegrationTest, ChunkDemuxerAbortRead_VideoOnly) {
TEST_F(PipelineIntegrationTest, BasicPlayback_AudioOnly_Opus_WebM) {
ASSERT_TRUE(Start(GetTestDataFilePath("bear-opus-end-trimming.webm"),
PIPELINE_OK));
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
- EXPECT_EQ(kOpusEndTrimmingWebMFileDurationMs,
- pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
Play();
ASSERT_TRUE(WaitUntilOnEnded());
}
// Verify that VP9 video in WebM containers can be played back.
-// TODO(fgalligan): Enable after new vp9 files are landed.
-// http://crbug.com/259116
-TEST_F(PipelineIntegrationTest, DISABLED_BasicPlayback_VideoOnly_VP9_WebM) {
+TEST_F(PipelineIntegrationTest, BasicPlayback_VideoOnly_VP9_WebM) {
ASSERT_TRUE(Start(GetTestDataFilePath("bear-vp9.webm"),
PIPELINE_OK));
Play();
@@ -1080,9 +1426,7 @@ TEST_F(PipelineIntegrationTest, DISABLED_BasicPlayback_VideoOnly_VP9_WebM) {
// Verify that VP9 video and Opus audio in the same WebM container can be played
// back.
-// TODO(fgalligan): Enable after new vp9 files are landed.
-// http://crbug.com/259116
-TEST_F(PipelineIntegrationTest, DISABLED_BasicPlayback_VP9_Opus_WebM) {
+TEST_F(PipelineIntegrationTest, BasicPlayback_VP9_Opus_WebM) {
ASSERT_TRUE(Start(GetTestDataFilePath("bear-vp9-opus.webm"),
PIPELINE_OK));
Play();
@@ -1091,7 +1435,6 @@ TEST_F(PipelineIntegrationTest, DISABLED_BasicPlayback_VP9_Opus_WebM) {
// Verify that VP8 video with alpha channel can be played back.
TEST_F(PipelineIntegrationTest, BasicPlayback_VP8A_WebM) {
- EXPECT_CALL(*this, OnSetOpaque(false)).Times(AnyNumber());
ASSERT_TRUE(Start(GetTestDataFilePath("bear-vp8a.webm"),
PIPELINE_OK));
Play();
@@ -1101,7 +1444,6 @@ TEST_F(PipelineIntegrationTest, BasicPlayback_VP8A_WebM) {
// Verify that VP8A video with odd width/height can be played back.
TEST_F(PipelineIntegrationTest, BasicPlayback_VP8A_Odd_WebM) {
- EXPECT_CALL(*this, OnSetOpaque(false)).Times(AnyNumber());
ASSERT_TRUE(Start(GetTestDataFilePath("bear-vp8a-odd-dimensions.webm"),
PIPELINE_OK));
Play();
@@ -1118,4 +1460,21 @@ TEST_F(PipelineIntegrationTest,
ASSERT_TRUE(WaitUntilOnEnded());
}
+// Verify that VP9 video with 4:4:4 subsampling can be played back.
+TEST_F(PipelineIntegrationTest, P444_VP9_WebM) {
+ ASSERT_TRUE(Start(GetTestDataFilePath("bear-320x240-P444.webm"),
+ PIPELINE_OK));
+ Play();
+ ASSERT_TRUE(WaitUntilOnEnded());
+ EXPECT_EQ(last_video_frame_format_, VideoFrame::YV24);
+}
+
+// Verify that videos with an odd frame size playback successfully.
+TEST_F(PipelineIntegrationTest, BasicPlayback_OddVideoSize) {
+ ASSERT_TRUE(Start(GetTestDataFilePath("butterfly-853x480.webm"),
+ PIPELINE_OK));
+ Play();
+ ASSERT_TRUE(WaitUntilOnEnded());
+}
+
} // namespace media
diff --git a/chromium/media/filters/pipeline_integration_test_base.cc b/chromium/media/filters/pipeline_integration_test_base.cc
index 8dce18cc493..c179903ad32 100644
--- a/chromium/media/filters/pipeline_integration_test_base.cc
+++ b/chromium/media/filters/pipeline_integration_test_base.cc
@@ -17,8 +17,10 @@
#include "media/filters/opus_audio_decoder.h"
#include "media/filters/vpx_video_decoder.h"
+using ::testing::_;
using ::testing::AnyNumber;
using ::testing::AtMost;
+using ::testing::SaveArg;
namespace media {
@@ -28,13 +30,13 @@ const char kNullAudioHash[] = "0.00,0.00,0.00,0.00,0.00,0.00,";
PipelineIntegrationTestBase::PipelineIntegrationTestBase()
: hashing_enabled_(false),
clockless_playback_(false),
- pipeline_(new Pipeline(message_loop_.message_loop_proxy(),
- new MediaLog())),
+ pipeline_(
+ new Pipeline(message_loop_.message_loop_proxy(), new MediaLog())),
ended_(false),
pipeline_status_(PIPELINE_OK),
- last_video_frame_format_(VideoFrame::UNKNOWN) {
+ last_video_frame_format_(VideoFrame::UNKNOWN),
+ hardware_config_(AudioParameters(), AudioParameters()) {
base::MD5Init(&md5_context_);
- EXPECT_CALL(*this, OnSetOpaque(true)).Times(AnyNumber());
}
PipelineIntegrationTestBase::~PipelineIntegrationTestBase() {
@@ -102,16 +104,17 @@ void PipelineIntegrationTestBase::OnError(PipelineStatus status) {
bool PipelineIntegrationTestBase::Start(const base::FilePath& file_path,
PipelineStatus expected_status) {
- EXPECT_CALL(*this, OnBufferingState(Pipeline::kHaveMetadata))
- .Times(AtMost(1));
- EXPECT_CALL(*this, OnBufferingState(Pipeline::kPrerollCompleted))
- .Times(AtMost(1));
+ EXPECT_CALL(*this, OnMetadata(_)).Times(AtMost(1))
+ .WillRepeatedly(SaveArg<0>(&metadata_));
+ EXPECT_CALL(*this, OnPrerollCompleted()).Times(AtMost(1));
pipeline_->Start(
CreateFilterCollection(file_path, NULL),
base::Bind(&PipelineIntegrationTestBase::OnEnded, base::Unretained(this)),
base::Bind(&PipelineIntegrationTestBase::OnError, base::Unretained(this)),
QuitOnStatusCB(expected_status),
- base::Bind(&PipelineIntegrationTestBase::OnBufferingState,
+ base::Bind(&PipelineIntegrationTestBase::OnMetadata,
+ base::Unretained(this)),
+ base::Bind(&PipelineIntegrationTestBase::OnPrerollCompleted,
base::Unretained(this)),
base::Closure());
message_loop_.Run();
@@ -135,17 +138,18 @@ bool PipelineIntegrationTestBase::Start(const base::FilePath& file_path) {
bool PipelineIntegrationTestBase::Start(const base::FilePath& file_path,
Decryptor* decryptor) {
- EXPECT_CALL(*this, OnBufferingState(Pipeline::kHaveMetadata))
- .Times(AtMost(1));
- EXPECT_CALL(*this, OnBufferingState(Pipeline::kPrerollCompleted))
- .Times(AtMost(1));
+ EXPECT_CALL(*this, OnMetadata(_)).Times(AtMost(1))
+ .WillRepeatedly(SaveArg<0>(&metadata_));
+ EXPECT_CALL(*this, OnPrerollCompleted()).Times(AtMost(1));
pipeline_->Start(
CreateFilterCollection(file_path, decryptor),
base::Bind(&PipelineIntegrationTestBase::OnEnded, base::Unretained(this)),
base::Bind(&PipelineIntegrationTestBase::OnError, base::Unretained(this)),
base::Bind(&PipelineIntegrationTestBase::OnStatusCallback,
base::Unretained(this)),
- base::Bind(&PipelineIntegrationTestBase::OnBufferingState,
+ base::Bind(&PipelineIntegrationTestBase::OnMetadata,
+ base::Unretained(this)),
+ base::Bind(&PipelineIntegrationTestBase::OnPrerollCompleted,
base::Unretained(this)),
base::Closure());
message_loop_.Run();
@@ -163,7 +167,7 @@ void PipelineIntegrationTestBase::Pause() {
bool PipelineIntegrationTestBase::Seek(base::TimeDelta seek_time) {
ended_ = false;
- EXPECT_CALL(*this, OnBufferingState(Pipeline::kPrerollCompleted));
+ EXPECT_CALL(*this, OnPrerollCompleted());
pipeline_->Seek(seek_time, QuitOnStatusCB(PIPELINE_OK));
message_loop_.Run();
return (pipeline_status_ == PIPELINE_OK);
@@ -211,7 +215,8 @@ PipelineIntegrationTestBase::CreateFilterCollection(
const base::FilePath& file_path,
Decryptor* decryptor) {
FileDataSource* file_data_source = new FileDataSource();
- CHECK(file_data_source->Initialize(file_path));
+ CHECK(file_data_source->Initialize(file_path)) << "Is " << file_path.value()
+ << " missing?";
data_source_.reset(file_data_source);
Demuxer::NeedKeyCB need_key_cb = base::Bind(
@@ -248,8 +253,6 @@ PipelineIntegrationTestBase::CreateFilterCollection(
decryptor),
base::Bind(&PipelineIntegrationTestBase::OnVideoRendererPaint,
base::Unretained(this)),
- base::Bind(&PipelineIntegrationTestBase::OnSetOpaque,
- base::Unretained(this)),
false));
collection->SetVideoRenderer(renderer.Pass());
@@ -261,10 +264,17 @@ PipelineIntegrationTestBase::CreateFilterCollection(
ScopedVector<AudioDecoder> audio_decoders;
audio_decoders.push_back(
- new FFmpegAudioDecoder(message_loop_.message_loop_proxy()));
+ new FFmpegAudioDecoder(message_loop_.message_loop_proxy(), LogCB()));
audio_decoders.push_back(
new OpusAudioDecoder(message_loop_.message_loop_proxy()));
+ AudioParameters out_params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO,
+ 44100,
+ 16,
+ 512);
+ hardware_config_.UpdateOutputConfig(out_params);
+
AudioRendererImpl* audio_renderer_impl = new AudioRendererImpl(
message_loop_.message_loop_proxy(),
(clockless_playback_)
@@ -273,12 +283,10 @@ PipelineIntegrationTestBase::CreateFilterCollection(
audio_decoders.Pass(),
base::Bind(&PipelineIntegrationTestBase::SetDecryptor,
base::Unretained(this),
- decryptor));
- // Disable underflow if hashing is enabled.
- if (hashing_enabled_) {
+ decryptor),
+ &hardware_config_);
+ if (hashing_enabled_)
audio_sink_->StartAudioHashForTesting();
- audio_renderer_impl->DisableUnderflowForTesting();
- }
scoped_ptr<AudioRenderer> audio_renderer(audio_renderer_impl);
collection->SetAudioRenderer(audio_renderer.Pass());
diff --git a/chromium/media/filters/pipeline_integration_test_base.h b/chromium/media/filters/pipeline_integration_test_base.h
index d162d0b3187..10cf2620a86 100644
--- a/chromium/media/filters/pipeline_integration_test_base.h
+++ b/chromium/media/filters/pipeline_integration_test_base.h
@@ -9,6 +9,7 @@
#include "base/message_loop/message_loop.h"
#include "media/audio/clockless_audio_sink.h"
#include "media/audio/null_audio_sink.h"
+#include "media/base/audio_hardware_config.h"
#include "media/base/demuxer.h"
#include "media/base/filter_collection.h"
#include "media/base/media_keys.h"
@@ -109,6 +110,8 @@ class PipelineIntegrationTestBase {
Demuxer::NeedKeyCB need_key_cb_;
VideoFrame::Format last_video_frame_format_;
DummyTickClock dummy_clock_;
+ AudioHardwareConfig hardware_config_;
+ PipelineMetadata metadata_;
void OnStatusCallbackChecked(PipelineStatus expected_status,
PipelineStatus status);
@@ -130,8 +133,8 @@ class PipelineIntegrationTestBase {
const DecryptorReadyCB& decryptor_ready_cb);
void OnVideoRendererPaint(const scoped_refptr<VideoFrame>& frame);
- MOCK_METHOD1(OnSetOpaque, void(bool));
- MOCK_METHOD1(OnBufferingState, void(Pipeline::BufferingState));
+ MOCK_METHOD1(OnMetadata, void(PipelineMetadata));
+ MOCK_METHOD0(OnPrerollCompleted, void());
};
} // namespace media
diff --git a/chromium/media/filters/skcanvas_video_renderer.cc b/chromium/media/filters/skcanvas_video_renderer.cc
index ec3e92fe754..0f5dd0ecdbb 100644
--- a/chromium/media/filters/skcanvas_video_renderer.cc
+++ b/chromium/media/filters/skcanvas_video_renderer.cc
@@ -7,180 +7,36 @@
#include "base/logging.h"
#include "media/base/video_frame.h"
#include "media/base/yuv_convert.h"
+#include "third_party/libyuv/include/libyuv.h"
#include "third_party/skia/include/core/SkCanvas.h"
-#include "third_party/skia/include/core/SkDevice.h"
+
+// Skia internal format depends on a platform. On Android it is ABGR, on others
+// it is ARGB.
+#if SK_B32_SHIFT == 0 && SK_G32_SHIFT == 8 && SK_R32_SHIFT == 16 && \
+ SK_A32_SHIFT == 24
+#define LIBYUV_I420_TO_ARGB libyuv::I420ToARGB
+#define LIBYUV_I422_TO_ARGB libyuv::I422ToARGB
+#elif SK_R32_SHIFT == 0 && SK_G32_SHIFT == 8 && SK_B32_SHIFT == 16 && \
+ SK_A32_SHIFT == 24
+#define LIBYUV_I420_TO_ARGB libyuv::I420ToABGR
+#define LIBYUV_I422_TO_ARGB libyuv::I422ToABGR
+#else
+#error Unexpected Skia ARGB_8888 layout!
+#endif
namespace media {
-static bool IsEitherYV12OrYV16(media::VideoFrame::Format format) {
+static bool IsYUV(media::VideoFrame::Format format) {
return format == media::VideoFrame::YV12 ||
format == media::VideoFrame::YV16 ||
- format == media::VideoFrame::YV12J;
-}
-
-static bool IsEitherYV12OrYV16OrNative(media::VideoFrame::Format format) {
- return IsEitherYV12OrYV16(format) ||
- format == media::VideoFrame::NATIVE_TEXTURE;
-}
-
-static bool IsEitherYV12OrYV12AOrYV16(media::VideoFrame::Format format) {
- return IsEitherYV12OrYV16(format) ||
- format == media::VideoFrame::YV12A;
+ format == media::VideoFrame::I420 ||
+ format == media::VideoFrame::YV12A ||
+ format == media::VideoFrame::YV12J ||
+ format == media::VideoFrame::YV24;
}
-static bool IsEitherYV12OrYV12AOrYV16OrNative(
- media::VideoFrame::Format format) {
- return IsEitherYV12OrYV16OrNative(format) ||
- format == media::VideoFrame::YV12A;
-}
-
-// CanFastPaint is a helper method to determine the conditions for fast
-// painting. The conditions are:
-// 1. No skew in canvas matrix.
-// 2. No flipping nor mirroring.
-// 3. Canvas has pixel format ARGB8888.
-// 4. Canvas is opaque.
-// 5. Frame format is YV12 or YV16.
-//
-// TODO(hclam): The fast paint method should support flipping and mirroring.
-// Disable the flipping and mirroring checks once we have it.
-static bool CanFastPaint(SkCanvas* canvas, uint8 alpha,
- media::VideoFrame::Format format) {
- if (alpha != 0xFF || !IsEitherYV12OrYV16(format))
- return false;
-
- const SkMatrix& total_matrix = canvas->getTotalMatrix();
- // Perform the following checks here:
- // 1. Check for skewing factors of the transformation matrix. They should be
- // zero.
- // 2. Check for mirroring and flipping. Make sure they are greater than zero.
- if (SkScalarNearlyZero(total_matrix.getSkewX()) &&
- SkScalarNearlyZero(total_matrix.getSkewY()) &&
- total_matrix.getScaleX() > 0 &&
- total_matrix.getScaleY() > 0) {
- SkBaseDevice* device = canvas->getDevice();
- const SkBitmap::Config config = device->config();
-
- if (config == SkBitmap::kARGB_8888_Config && device->isOpaque()) {
- return true;
- }
- }
-
- return false;
-}
-
-// Fast paint does YUV => RGB, scaling, blitting all in one step into the
-// canvas. It's not always safe and appropriate to perform fast paint.
-// CanFastPaint() is used to determine the conditions.
-static void FastPaint(
- const scoped_refptr<media::VideoFrame>& video_frame,
- SkCanvas* canvas,
- const SkRect& dest_rect) {
- DCHECK(IsEitherYV12OrYV16(video_frame->format())) << video_frame->format();
- DCHECK_EQ(video_frame->stride(media::VideoFrame::kUPlane),
- video_frame->stride(media::VideoFrame::kVPlane));
-
- const SkBitmap& bitmap = canvas->getDevice()->accessBitmap(true);
- media::YUVType yuv_type = media::YV16;
- int y_shift = 0;
- if (video_frame->format() == media::VideoFrame::YV12 ||
- video_frame->format() == media::VideoFrame::YV12A) {
- yuv_type = media::YV12;
- y_shift = 1;
- }
-
- if (video_frame->format() == media::VideoFrame::YV12J) {
- yuv_type = media::YV12;
- y_shift = 1;
- }
-
- // Transform the destination rectangle to local coordinates.
- const SkMatrix& local_matrix = canvas->getTotalMatrix();
- SkRect local_dest_rect;
- local_matrix.mapRect(&local_dest_rect, dest_rect);
-
- // After projecting the destination rectangle to local coordinates, round
- // the projected rectangle to integer values, this will give us pixel values
- // of the rectangle.
- SkIRect local_dest_irect, local_dest_irect_saved;
- local_dest_rect.round(&local_dest_irect);
- local_dest_rect.round(&local_dest_irect_saved);
-
- // No point painting if the destination rect doesn't intersect with the
- // clip rect.
- if (!local_dest_irect.intersect(canvas->getTotalClip().getBounds()))
- return;
-
- // At this point |local_dest_irect| contains the rect that we should draw
- // to within the clipping rect.
-
- // Calculate the address for the top left corner of destination rect in
- // the canvas that we will draw to. The address is obtained by the base
- // address of the canvas shifted by "left" and "top" of the rect.
- uint8* dest_rect_pointer = static_cast<uint8*>(bitmap.getPixels()) +
- local_dest_irect.fTop * bitmap.rowBytes() +
- local_dest_irect.fLeft * 4;
-
- // Project the clip rect to the original video frame, obtains the
- // dimensions of the projected clip rect, "left" and "top" of the rect.
- // The math here are all integer math so we won't have rounding error and
- // write outside of the canvas.
- // We have the assumptions of dest_rect.width() and dest_rect.height()
- // being non-zero, these are valid assumptions since finding intersection
- // above rejects empty rectangle so we just do a DCHECK here.
- DCHECK_NE(0, dest_rect.width());
- DCHECK_NE(0, dest_rect.height());
- size_t frame_clip_width = local_dest_irect.width() *
- video_frame->visible_rect().width() / local_dest_irect_saved.width();
- size_t frame_clip_height = local_dest_irect.height() *
- video_frame->visible_rect().height() / local_dest_irect_saved.height();
-
- // Project the "left" and "top" of the final destination rect to local
- // coordinates of the video frame, use these values to find the offsets
- // in the video frame to start reading.
- size_t frame_clip_left =
- video_frame->visible_rect().x() +
- (local_dest_irect.fLeft - local_dest_irect_saved.fLeft) *
- video_frame->visible_rect().width() / local_dest_irect_saved.width();
- size_t frame_clip_top =
- video_frame->visible_rect().y() +
- (local_dest_irect.fTop - local_dest_irect_saved.fTop) *
- video_frame->visible_rect().height() / local_dest_irect_saved.height();
-
- // Use the "left" and "top" of the destination rect to locate the offset
- // in Y, U and V planes.
- size_t y_offset = (video_frame->stride(media::VideoFrame::kYPlane) *
- frame_clip_top) + frame_clip_left;
-
- // For format YV12, there is one U, V value per 2x2 block.
- // For format YV16, there is one U, V value per 2x1 block.
- size_t uv_offset = (video_frame->stride(media::VideoFrame::kUPlane) *
- (frame_clip_top >> y_shift)) + (frame_clip_left >> 1);
- uint8* frame_clip_y =
- video_frame->data(media::VideoFrame::kYPlane) + y_offset;
- uint8* frame_clip_u =
- video_frame->data(media::VideoFrame::kUPlane) + uv_offset;
- uint8* frame_clip_v =
- video_frame->data(media::VideoFrame::kVPlane) + uv_offset;
-
- // TODO(hclam): do rotation and mirroring here.
- // TODO(fbarchard): switch filtering based on performance.
- bitmap.lockPixels();
- media::ScaleYUVToRGB32(frame_clip_y,
- frame_clip_u,
- frame_clip_v,
- dest_rect_pointer,
- frame_clip_width,
- frame_clip_height,
- local_dest_irect.width(),
- local_dest_irect.height(),
- video_frame->stride(media::VideoFrame::kYPlane),
- video_frame->stride(media::VideoFrame::kUPlane),
- bitmap.rowBytes(),
- yuv_type,
- media::ROTATE_0,
- media::FILTER_BILINEAR);
- bitmap.unlockPixels();
+static bool IsYUVOrNative(media::VideoFrame::Format format) {
+ return IsYUV(format) || format == media::VideoFrame::NATIVE_TEXTURE;
}
// Converts a VideoFrame containing YUV data to a SkBitmap containing RGB data.
@@ -189,9 +45,9 @@ static void FastPaint(
static void ConvertVideoFrameToBitmap(
const scoped_refptr<media::VideoFrame>& video_frame,
SkBitmap* bitmap) {
- DCHECK(IsEitherYV12OrYV12AOrYV16OrNative(video_frame->format()))
+ DCHECK(IsYUVOrNative(video_frame->format()))
<< video_frame->format();
- if (IsEitherYV12OrYV12AOrYV16(video_frame->format())) {
+ if (IsYUV(video_frame->format())) {
DCHECK_EQ(video_frame->stride(media::VideoFrame::kUPlane),
video_frame->stride(media::VideoFrame::kVPlane));
}
@@ -211,7 +67,7 @@ static void ConvertVideoFrameToBitmap(
size_t y_offset = 0;
size_t uv_offset = 0;
- if (IsEitherYV12OrYV12AOrYV16(video_frame->format())) {
+ if (IsYUV(video_frame->format())) {
int y_shift = (video_frame->format() == media::VideoFrame::YV16) ? 0 : 1;
// Use the "left" and "top" of the destination rect to locate the offset
// in Y, U and V planes.
@@ -227,6 +83,20 @@ static void ConvertVideoFrameToBitmap(
switch (video_frame->format()) {
case media::VideoFrame::YV12:
+ case media::VideoFrame::I420:
+ LIBYUV_I420_TO_ARGB(
+ video_frame->data(media::VideoFrame::kYPlane) + y_offset,
+ video_frame->stride(media::VideoFrame::kYPlane),
+ video_frame->data(media::VideoFrame::kUPlane) + uv_offset,
+ video_frame->stride(media::VideoFrame::kUPlane),
+ video_frame->data(media::VideoFrame::kVPlane) + uv_offset,
+ video_frame->stride(media::VideoFrame::kVPlane),
+ static_cast<uint8*>(bitmap->getPixels()),
+ bitmap->rowBytes(),
+ video_frame->visible_rect().width(),
+ video_frame->visible_rect().height());
+ break;
+
case media::VideoFrame::YV12J:
media::ConvertYUVToRGB32(
video_frame->data(media::VideoFrame::kYPlane) + y_offset,
@@ -238,24 +108,27 @@ static void ConvertVideoFrameToBitmap(
video_frame->stride(media::VideoFrame::kYPlane),
video_frame->stride(media::VideoFrame::kUPlane),
bitmap->rowBytes(),
- media::YV12);
+ media::YV12J);
break;
case media::VideoFrame::YV16:
- media::ConvertYUVToRGB32(
+ LIBYUV_I422_TO_ARGB(
video_frame->data(media::VideoFrame::kYPlane) + y_offset,
+ video_frame->stride(media::VideoFrame::kYPlane),
video_frame->data(media::VideoFrame::kUPlane) + uv_offset,
+ video_frame->stride(media::VideoFrame::kUPlane),
video_frame->data(media::VideoFrame::kVPlane) + uv_offset,
+ video_frame->stride(media::VideoFrame::kVPlane),
static_cast<uint8*>(bitmap->getPixels()),
- video_frame->visible_rect().width(),
- video_frame->visible_rect().height(),
- video_frame->stride(media::VideoFrame::kYPlane),
- video_frame->stride(media::VideoFrame::kUPlane),
bitmap->rowBytes(),
- media::YV16);
+ video_frame->visible_rect().width(),
+ video_frame->visible_rect().height());
break;
case media::VideoFrame::YV12A:
+ // Since libyuv doesn't support YUVA, fallback to media, which is not ARM
+ // optimized.
+ // TODO(fbarchard, mtomasz): Use libyuv, then copy the alpha channel.
media::ConvertYUVAToARGB(
video_frame->data(media::VideoFrame::kYPlane) + y_offset,
video_frame->data(media::VideoFrame::kUPlane) + uv_offset,
@@ -271,6 +144,30 @@ static void ConvertVideoFrameToBitmap(
media::YV12);
break;
+ case media::VideoFrame::YV24:
+ libyuv::I444ToARGB(
+ video_frame->data(media::VideoFrame::kYPlane) + y_offset,
+ video_frame->stride(media::VideoFrame::kYPlane),
+ video_frame->data(media::VideoFrame::kUPlane) + uv_offset,
+ video_frame->stride(media::VideoFrame::kUPlane),
+ video_frame->data(media::VideoFrame::kVPlane) + uv_offset,
+ video_frame->stride(media::VideoFrame::kVPlane),
+ static_cast<uint8*>(bitmap->getPixels()),
+ bitmap->rowBytes(),
+ video_frame->visible_rect().width(),
+ video_frame->visible_rect().height());
+#if SK_R32_SHIFT == 0 && SK_G32_SHIFT == 8 && SK_B32_SHIFT == 16 && \
+ SK_A32_SHIFT == 24
+ libyuv::ARGBToABGR(
+ static_cast<uint8*>(bitmap->getPixels()),
+ bitmap->rowBytes(),
+ static_cast<uint8*>(bitmap->getPixels()),
+ bitmap->rowBytes(),
+ video_frame->visible_rect().width(),
+ video_frame->visible_rect().height());
+#endif
+ break;
+
case media::VideoFrame::NATIVE_TEXTURE:
DCHECK_EQ(video_frame->format(), media::VideoFrame::NATIVE_TEXTURE);
video_frame->ReadPixelsFromNativeTexture(*bitmap);
@@ -306,27 +203,20 @@ void SkCanvasVideoRenderer::Paint(media::VideoFrame* video_frame,
// Paint black rectangle if there isn't a frame available or the
// frame has an unexpected format.
- if (!video_frame ||
- !IsEitherYV12OrYV12AOrYV16OrNative(video_frame->format())) {
+ if (!video_frame || !IsYUVOrNative(video_frame->format())) {
canvas->drawRect(dest, paint);
return;
}
- // Scale and convert to RGB in one step if we can.
- if (CanFastPaint(canvas, alpha, video_frame->format())) {
- FastPaint(video_frame, canvas, dest);
- return;
- }
-
// Check if we should convert and update |last_frame_|.
if (last_frame_.isNull() ||
- video_frame->GetTimestamp() != last_frame_timestamp_) {
+ video_frame->timestamp() != last_frame_timestamp_) {
ConvertVideoFrameToBitmap(video_frame, &last_frame_);
- last_frame_timestamp_ = video_frame->GetTimestamp();
+ last_frame_timestamp_ = video_frame->timestamp();
}
- // Do a slower paint using |last_frame_|.
- paint.setFilterBitmap(true);
+ // Paint using |last_frame_|.
+ paint.setFilterLevel(SkPaint::kLow_FilterLevel);
canvas->drawBitmapRect(last_frame_, NULL, dest, &paint);
}
diff --git a/chromium/media/filters/skcanvas_video_renderer_unittest.cc b/chromium/media/filters/skcanvas_video_renderer_unittest.cc
index 1550dacc63c..dd01c704968 100644
--- a/chromium/media/filters/skcanvas_video_renderer_unittest.cc
+++ b/chromium/media/filters/skcanvas_video_renderer_unittest.cc
@@ -5,7 +5,6 @@
#include "media/base/video_frame.h"
#include "media/base/video_util.h"
#include "testing/gtest/include/gtest/gtest.h"
-#include "third_party/skia/include/core/SkBitmapDevice.h"
#include "third_party/skia/include/core/SkCanvas.h"
#include "media/filters/skcanvas_video_renderer.h"
@@ -19,19 +18,17 @@ static const gfx::Rect kNaturalRect(0, 0, kWidth, kHeight);
// Helper for filling a |canvas| with a solid |color|.
void FillCanvas(SkCanvas* canvas, SkColor color) {
- const SkBitmap& bitmap = canvas->getDevice()->accessBitmap(true);
- bitmap.lockPixels();
- bitmap.eraseColor(color);
- bitmap.unlockPixels();
+ canvas->clear(color);
}
// Helper for returning the color of a solid |canvas|.
SkColor GetColorAt(SkCanvas* canvas, int x, int y) {
- const SkBitmap& bitmap = canvas->getDevice()->accessBitmap(false);
- bitmap.lockPixels();
- SkColor c = bitmap.getColor(x, y);
- bitmap.unlockPixels();
- return c;
+ SkBitmap bitmap;
+ if (!bitmap.allocN32Pixels(1, 1))
+ return 0;
+ if (!canvas->readPixels(&bitmap, x, y))
+ return 0;
+ return bitmap.getColor(0, 0);
}
SkColor GetColor(SkCanvas* canvas) {
@@ -63,9 +60,8 @@ class SkCanvasVideoRendererTest : public testing::Test {
VideoFrame* smaller_frame() { return smaller_frame_.get(); }
VideoFrame* cropped_frame() { return cropped_frame_.get(); }
- // Getters for canvases that trigger the various painting paths.
- SkCanvas* fast_path_canvas() { return &fast_path_canvas_; }
- SkCanvas* slow_path_canvas() { return &slow_path_canvas_; }
+ // Standard canvas.
+ SkCanvas* target_canvas() { return &target_canvas_; }
private:
SkCanvasVideoRenderer renderer_;
@@ -75,14 +71,18 @@ class SkCanvasVideoRendererTest : public testing::Test {
scoped_refptr<VideoFrame> smaller_frame_;
scoped_refptr<VideoFrame> cropped_frame_;
- SkBitmapDevice fast_path_device_;
- SkCanvas fast_path_canvas_;
- SkBitmapDevice slow_path_device_;
- SkCanvas slow_path_canvas_;
+ SkCanvas target_canvas_;
DISALLOW_COPY_AND_ASSIGN(SkCanvasVideoRendererTest);
};
+static SkBitmap AllocBitmap(int width, int height) {
+ SkBitmap bitmap;
+ bitmap.allocPixels(SkImageInfo::MakeN32(width, height, kPremul_SkAlphaType));
+ bitmap.eraseColor(0);
+ return bitmap;
+}
+
SkCanvasVideoRendererTest::SkCanvasVideoRendererTest()
: natural_frame_(VideoFrame::CreateBlackFrame(gfx::Size(kWidth, kHeight))),
larger_frame_(VideoFrame::CreateBlackFrame(
@@ -95,19 +95,16 @@ SkCanvasVideoRendererTest::SkCanvasVideoRendererTest()
gfx::Rect(6, 6, 8, 6),
gfx::Size(8, 6),
base::TimeDelta::FromMilliseconds(4))),
- fast_path_device_(SkBitmap::kARGB_8888_Config, kWidth, kHeight, true),
- fast_path_canvas_(&fast_path_device_),
- slow_path_device_(SkBitmap::kARGB_8888_Config, kWidth, kHeight, false),
- slow_path_canvas_(&slow_path_device_) {
+ target_canvas_(AllocBitmap(kWidth, kHeight)) {
// Give each frame a unique timestamp.
- natural_frame_->SetTimestamp(base::TimeDelta::FromMilliseconds(1));
- larger_frame_->SetTimestamp(base::TimeDelta::FromMilliseconds(2));
- smaller_frame_->SetTimestamp(base::TimeDelta::FromMilliseconds(3));
+ natural_frame_->set_timestamp(base::TimeDelta::FromMilliseconds(1));
+ larger_frame_->set_timestamp(base::TimeDelta::FromMilliseconds(2));
+ smaller_frame_->set_timestamp(base::TimeDelta::FromMilliseconds(3));
// Make sure the cropped video frame's aspect ratio matches the output device.
// Update cropped_frame_'s crop dimensions if this is not the case.
- EXPECT_EQ(cropped_frame()->natural_size().width() * kHeight,
- cropped_frame()->natural_size().height() * kWidth);
+ EXPECT_EQ(cropped_frame()->visible_rect().width() * kHeight,
+ cropped_frame()->visible_rect().height() * kWidth);
// Fill in the cropped frame's entire data with colors:
//
@@ -210,139 +207,98 @@ void SkCanvasVideoRendererTest::Paint(VideoFrame* video_frame,
renderer_.Paint(video_frame, canvas, kNaturalRect, 0xFF);
}
-TEST_F(SkCanvasVideoRendererTest, FastPaint_NoFrame) {
+TEST_F(SkCanvasVideoRendererTest, NoFrame) {
// Test that black gets painted over canvas.
- FillCanvas(fast_path_canvas(), SK_ColorRED);
- PaintWithoutFrame(fast_path_canvas());
- EXPECT_EQ(SK_ColorBLACK, GetColor(fast_path_canvas()));
-}
-
-TEST_F(SkCanvasVideoRendererTest, SlowPaint_NoFrame) {
- // Test that black gets painted over canvas.
- FillCanvas(slow_path_canvas(), SK_ColorRED);
- PaintWithoutFrame(slow_path_canvas());
- EXPECT_EQ(SK_ColorBLACK, GetColor(slow_path_canvas()));
-}
-
-TEST_F(SkCanvasVideoRendererTest, FastPaint_Natural) {
- Paint(natural_frame(), fast_path_canvas(), kRed);
- EXPECT_EQ(SK_ColorRED, GetColor(fast_path_canvas()));
+ FillCanvas(target_canvas(), SK_ColorRED);
+ PaintWithoutFrame(target_canvas());
+ EXPECT_EQ(SK_ColorBLACK, GetColor(target_canvas()));
}
-TEST_F(SkCanvasVideoRendererTest, SlowPaint_Natural) {
- Paint(natural_frame(), slow_path_canvas(), kRed);
- EXPECT_EQ(SK_ColorRED, GetColor(slow_path_canvas()));
-}
-
-TEST_F(SkCanvasVideoRendererTest, FastPaint_Larger) {
- Paint(natural_frame(), fast_path_canvas(), kRed);
- EXPECT_EQ(SK_ColorRED, GetColor(fast_path_canvas()));
-
- Paint(larger_frame(), fast_path_canvas(), kBlue);
- EXPECT_EQ(SK_ColorBLUE, GetColor(fast_path_canvas()));
-}
-
-TEST_F(SkCanvasVideoRendererTest, SlowPaint_Larger) {
- Paint(natural_frame(), slow_path_canvas(), kRed);
- EXPECT_EQ(SK_ColorRED, GetColor(slow_path_canvas()));
-
- Paint(larger_frame(), slow_path_canvas(), kBlue);
- EXPECT_EQ(SK_ColorBLUE, GetColor(slow_path_canvas()));
+TEST_F(SkCanvasVideoRendererTest, Natural) {
+ Paint(natural_frame(), target_canvas(), kRed);
+ EXPECT_EQ(SK_ColorRED, GetColor(target_canvas()));
}
-TEST_F(SkCanvasVideoRendererTest, FastPaint_Smaller) {
- Paint(natural_frame(), fast_path_canvas(), kRed);
- EXPECT_EQ(SK_ColorRED, GetColor(fast_path_canvas()));
+TEST_F(SkCanvasVideoRendererTest, Larger) {
+ Paint(natural_frame(), target_canvas(), kRed);
+ EXPECT_EQ(SK_ColorRED, GetColor(target_canvas()));
- Paint(smaller_frame(), fast_path_canvas(), kBlue);
- EXPECT_EQ(SK_ColorBLUE, GetColor(fast_path_canvas()));
+ Paint(larger_frame(), target_canvas(), kBlue);
+ EXPECT_EQ(SK_ColorBLUE, GetColor(target_canvas()));
}
-TEST_F(SkCanvasVideoRendererTest, SlowPaint_Smaller) {
- Paint(natural_frame(), slow_path_canvas(), kRed);
- EXPECT_EQ(SK_ColorRED, GetColor(slow_path_canvas()));
+TEST_F(SkCanvasVideoRendererTest, Smaller) {
+ Paint(natural_frame(), target_canvas(), kRed);
+ EXPECT_EQ(SK_ColorRED, GetColor(target_canvas()));
- Paint(smaller_frame(), slow_path_canvas(), kBlue);
- EXPECT_EQ(SK_ColorBLUE, GetColor(slow_path_canvas()));
+ Paint(smaller_frame(), target_canvas(), kBlue);
+ EXPECT_EQ(SK_ColorBLUE, GetColor(target_canvas()));
}
-TEST_F(SkCanvasVideoRendererTest, FastPaint_NoTimestamp) {
+TEST_F(SkCanvasVideoRendererTest, NoTimestamp) {
VideoFrame* video_frame = natural_frame();
- video_frame->SetTimestamp(media::kNoTimestamp());
- Paint(video_frame, fast_path_canvas(), kRed);
- EXPECT_EQ(SK_ColorRED, GetColor(fast_path_canvas()));
+ video_frame->set_timestamp(media::kNoTimestamp());
+ Paint(video_frame, target_canvas(), kRed);
+ EXPECT_EQ(SK_ColorRED, GetColor(target_canvas()));
}
-TEST_F(SkCanvasVideoRendererTest, SlowPaint_NoTimestamp) {
- VideoFrame* video_frame = natural_frame();
- video_frame->SetTimestamp(media::kNoTimestamp());
- Paint(video_frame, slow_path_canvas(), kRed);
- EXPECT_EQ(SK_ColorRED, GetColor(slow_path_canvas()));
-}
-
-TEST_F(SkCanvasVideoRendererTest, FastPaint_SameVideoFrame) {
- Paint(natural_frame(), fast_path_canvas(), kRed);
- EXPECT_EQ(SK_ColorRED, GetColor(fast_path_canvas()));
-
- // Fast paints always get painted to the canvas.
- Paint(natural_frame(), fast_path_canvas(), kBlue);
- EXPECT_EQ(SK_ColorBLUE, GetColor(fast_path_canvas()));
-}
-
-TEST_F(SkCanvasVideoRendererTest, SlowPaint_SameVideoFrame) {
- Paint(natural_frame(), slow_path_canvas(), kRed);
- EXPECT_EQ(SK_ColorRED, GetColor(slow_path_canvas()));
+TEST_F(SkCanvasVideoRendererTest, SameVideoFrame) {
+ Paint(natural_frame(), target_canvas(), kRed);
+ EXPECT_EQ(SK_ColorRED, GetColor(target_canvas()));
// Slow paints can get cached, expect the old color value.
- Paint(natural_frame(), slow_path_canvas(), kBlue);
- EXPECT_EQ(SK_ColorRED, GetColor(slow_path_canvas()));
+ Paint(natural_frame(), target_canvas(), kBlue);
+ EXPECT_EQ(SK_ColorRED, GetColor(target_canvas()));
}
-TEST_F(SkCanvasVideoRendererTest, FastPaint_CroppedFrame) {
- Paint(cropped_frame(), fast_path_canvas(), kNone);
+TEST_F(SkCanvasVideoRendererTest, CroppedFrame) {
+ Paint(cropped_frame(), target_canvas(), kNone);
// Check the corners.
- EXPECT_EQ(SK_ColorBLACK, GetColorAt(fast_path_canvas(), 0, 0));
- EXPECT_EQ(SK_ColorRED, GetColorAt(fast_path_canvas(), kWidth - 1, 0));
- EXPECT_EQ(SK_ColorGREEN, GetColorAt(fast_path_canvas(), 0, kHeight - 1));
- EXPECT_EQ(SK_ColorBLUE, GetColorAt(fast_path_canvas(), kWidth - 1,
- kHeight - 1));
+ EXPECT_EQ(SK_ColorBLACK, GetColorAt(target_canvas(), 0, 0));
+ EXPECT_EQ(SK_ColorRED, GetColorAt(target_canvas(), kWidth - 1, 0));
+ EXPECT_EQ(SK_ColorGREEN, GetColorAt(target_canvas(), 0, kHeight - 1));
+ EXPECT_EQ(SK_ColorBLUE, GetColorAt(target_canvas(), kWidth - 1,
+ kHeight - 1));
// Check the interior along the border between color regions. Note that we're
// bilinearly upscaling, so we'll need to take care to pick sample points that
// are just outside the "zone of resampling".
- // TODO(sheu): commenting out two checks due to http://crbug.com/158462.
-#if 0
- EXPECT_EQ(SK_ColorBLACK, GetColorAt(fast_path_canvas(), kWidth * 1 / 8 - 1,
- kHeight * 1 / 6 - 1));
-#endif
- EXPECT_EQ(SK_ColorRED, GetColorAt(fast_path_canvas(), kWidth * 3 / 8,
- kHeight * 1 / 6 - 1));
-#if 0
- EXPECT_EQ(SK_ColorGREEN, GetColorAt(fast_path_canvas(), kWidth * 1 / 8 - 1,
- kHeight * 3 / 6));
-#endif
- EXPECT_EQ(SK_ColorBLUE, GetColorAt(fast_path_canvas(), kWidth * 3 / 8,
- kHeight * 3 / 6));
+ EXPECT_EQ(SK_ColorBLACK, GetColorAt(target_canvas(), kWidth * 1 / 8 - 1,
+ kHeight * 1 / 6 - 1));
+ EXPECT_EQ(SK_ColorRED, GetColorAt(target_canvas(), kWidth * 3 / 8,
+ kHeight * 1 / 6 - 1));
+ EXPECT_EQ(SK_ColorGREEN, GetColorAt(target_canvas(), kWidth * 1 / 8 - 1,
+ kHeight * 3 / 6));
+ EXPECT_EQ(SK_ColorBLUE, GetColorAt(target_canvas(), kWidth * 3 / 8,
+ kHeight * 3 / 6));
}
-TEST_F(SkCanvasVideoRendererTest, SlowPaint_CroppedFrame) {
- Paint(cropped_frame(), slow_path_canvas(), kNone);
+TEST_F(SkCanvasVideoRendererTest, CroppedFrame_NoScaling) {
+ SkCanvas canvas(AllocBitmap(kWidth, kHeight));
+ const gfx::Rect crop_rect = cropped_frame()->visible_rect();
+
+ // Force painting to a non-zero position on the destination bitmap, to check
+ // if the coordinates are calculated properly.
+ const int offset_x = 10;
+ const int offset_y = 15;
+ canvas.translate(offset_x, offset_y);
+
+ // Create a destination canvas with dimensions and scale which would not
+ // cause scaling.
+ canvas.scale(static_cast<SkScalar>(crop_rect.width()) / kWidth,
+ static_cast<SkScalar>(crop_rect.height()) / kHeight);
+
+ Paint(cropped_frame(), &canvas, kNone);
+
// Check the corners.
- EXPECT_EQ(SK_ColorBLACK, GetColorAt(slow_path_canvas(), 0, 0));
- EXPECT_EQ(SK_ColorRED, GetColorAt(slow_path_canvas(), kWidth - 1, 0));
- EXPECT_EQ(SK_ColorGREEN, GetColorAt(slow_path_canvas(), 0, kHeight - 1));
- EXPECT_EQ(SK_ColorBLUE, GetColorAt(slow_path_canvas(), kWidth - 1,
- kHeight - 1));
- // Check the interior along the border between color regions. Note that we're
- // bilinearly upscaling, so we'll need to take care to pick sample points that
- // are just outside the "zone of resampling".
- EXPECT_EQ(SK_ColorBLACK, GetColorAt(slow_path_canvas(), kWidth * 1 / 8 - 1,
- kHeight * 1 / 6 - 1));
- EXPECT_EQ(SK_ColorRED, GetColorAt(slow_path_canvas(), kWidth * 3 / 8,
- kHeight * 1 / 6 - 1));
- EXPECT_EQ(SK_ColorGREEN, GetColorAt(slow_path_canvas(), kWidth * 1 / 8 - 1,
- kHeight * 3 / 6));
- EXPECT_EQ(SK_ColorBLUE, GetColorAt(slow_path_canvas(), kWidth * 3 / 8,
- kHeight * 3 / 6));
+ EXPECT_EQ(SK_ColorBLACK, GetColorAt(&canvas, offset_x, offset_y));
+ EXPECT_EQ(SK_ColorRED,
+ GetColorAt(&canvas, offset_x + crop_rect.width() - 1, offset_y));
+ EXPECT_EQ(SK_ColorGREEN,
+ GetColorAt(&canvas, offset_x, offset_y + crop_rect.height() - 1));
+ EXPECT_EQ(SK_ColorBLUE,
+ GetColorAt(&canvas,
+ offset_x + crop_rect.width() - 1,
+ offset_y + crop_rect.height() - 1));
}
} // namespace media
diff --git a/chromium/media/filters/source_buffer_stream.cc b/chromium/media/filters/source_buffer_stream.cc
index 77fb279550a..fe22b4a795e 100644
--- a/chromium/media/filters/source_buffer_stream.cc
+++ b/chromium/media/filters/source_buffer_stream.cc
@@ -10,31 +10,41 @@
#include "base/bind.h"
#include "base/debug/trace_event.h"
#include "base/logging.h"
+#include "media/base/audio_splicer.h"
namespace media {
+typedef StreamParser::BufferQueue BufferQueue;
+
// Buffers with the same timestamp are only allowed under certain conditions.
-// Video: Allowed when the previous frame and current frame are NOT keyframes.
-// This is the situation for VP8 Alt-Ref frames.
-// Otherwise: Allowed in all situations except where a non-keyframe is followed
-// by a keyframe.
+// More precisely, it is allowed in all situations except when the previous
+// frame is not a key frame and the current is a key frame.
+// Examples of situations where DTS of two consecutive frames can be equal:
+// - Video: VP8 Alt-Ref frames.
+// - Video: IPBPBP...: DTS for I frame and for P frame can be equal.
+// - Text track cues that start at same time.
// Returns true if |prev_is_keyframe| and |current_is_keyframe| indicate a
// same timestamp situation that is allowed. False is returned otherwise.
static bool AllowSameTimestamp(
- bool prev_is_keyframe, bool current_is_keyframe, bool is_video) {
- if (is_video)
- return !prev_is_keyframe && !current_is_keyframe;
-
+ bool prev_is_keyframe, bool current_is_keyframe,
+ SourceBufferStream::Type type) {
return prev_is_keyframe || !current_is_keyframe;
}
+// Returns the config ID of |buffer| if |buffer| has no splice buffers or
+// |index| is out of range. Otherwise returns the config ID for the fade out
+// preroll buffer at position |index|.
+static int GetConfigId(StreamParserBuffer* buffer, size_t index) {
+ return index < buffer->splice_buffers().size()
+ ? buffer->splice_buffers()[index]->GetConfigId()
+ : buffer->GetConfigId();
+}
+
// Helper class representing a range of buffered data. All buffers in a
// SourceBufferRange are ordered sequentially in presentation order with no
// gaps.
class SourceBufferRange {
public:
- typedef std::deque<scoped_refptr<StreamParserBuffer> > BufferQueue;
-
// Returns the maximum distance in time between any buffer seen in this
// stream. Used to estimate the duration of a buffer if its duration is not
// known.
@@ -44,7 +54,7 @@ class SourceBufferRange {
// empty and the front of |new_buffers| must be a keyframe.
// |media_segment_start_time| refers to the starting timestamp for the media
// segment to which these buffers belong.
- SourceBufferRange(bool is_video,
+ SourceBufferRange(SourceBufferStream::Type type,
const BufferQueue& new_buffers,
base::TimeDelta media_segment_start_time,
const InterbufferDistanceCB& interbuffer_distance_cb);
@@ -94,7 +104,9 @@ class SourceBufferRange {
// were removed.
// |deleted_buffers| contains the buffers that were deleted from this range,
// starting at the buffer that had been at |next_buffer_index_|.
- void TruncateAt(base::TimeDelta timestamp,
+ // Returns true if everything in the range was deleted. Otherwise
+ // returns false.
+ bool TruncateAt(base::TimeDelta timestamp,
BufferQueue* deleted_buffers, bool is_exclusive);
// Deletes all buffers in range.
void DeleteAll(BufferQueue* deleted_buffers);
@@ -156,7 +168,9 @@ class SourceBufferRange {
// Gets the timestamp for the keyframe that is after |timestamp|. If
// there isn't a keyframe in the range after |timestamp| then kNoTimestamp()
- // is returned.
+ // is returned. If |timestamp| is in the "gap" between the value returned by
+ // GetStartTimestamp() and the timestamp on the first buffer in |buffers_|,
+ // then |timestamp| is returned.
base::TimeDelta NextKeyframeTimestamp(base::TimeDelta timestamp);
// Gets the timestamp for the closest keyframe that is <= |timestamp|. If
@@ -185,6 +199,11 @@ class SourceBufferRange {
// sequence after |buffers_.back()|, false otherwise.
bool IsNextInSequence(base::TimeDelta timestamp, bool is_keyframe) const;
+ // Adds all buffers which overlap [start, end) to the end of |buffers|. If
+ // no buffers exist in the range returns false, true otherwise.
+ bool GetBuffersInRange(base::TimeDelta start, base::TimeDelta end,
+ BufferQueue* buffers);
+
int size_in_bytes() const { return size_in_bytes_; }
private:
@@ -213,7 +232,9 @@ class SourceBufferRange {
// Helper method to delete buffers in |buffers_| starting at
// |starting_point|, an iterator in |buffers_|.
- void TruncateAt(const BufferQueue::iterator& starting_point,
+ // Returns true if everything in the range was removed. Returns
+ // false if the range still contains buffers.
+ bool TruncateAt(const BufferQueue::iterator& starting_point,
BufferQueue* deleted_buffers);
// Frees the buffers in |buffers_| from [|start_point|,|ending_point|) and
@@ -228,8 +249,8 @@ class SourceBufferRange {
// Returns the approximate duration of a buffer in this range.
base::TimeDelta GetApproximateDuration() const;
- // True if this object stores video data.
- bool is_video_;
+ // Type of this stream.
+ const SourceBufferStream::Type type_;
// An ordered list of buffers in this range.
BufferQueue buffers_;
@@ -281,11 +302,16 @@ static bool IsRangeListSorted(
return true;
}
-// Comparison function for two Buffers based on timestamp.
-static bool BufferComparator(
- const scoped_refptr<media::StreamParserBuffer>& first,
- const scoped_refptr<media::StreamParserBuffer>& second) {
- return first->GetDecodeTimestamp() < second->GetDecodeTimestamp();
+// Comparison operators for std::upper_bound() and std::lower_bound().
+static bool CompareTimeDeltaToStreamParserBuffer(
+ const base::TimeDelta& decode_timestamp,
+ const scoped_refptr<media::StreamParserBuffer>& buffer) {
+ return decode_timestamp < buffer->GetDecodeTimestamp();
+}
+static bool CompareStreamParserBufferToTimeDelta(
+ const scoped_refptr<media::StreamParserBuffer>& buffer,
+ const base::TimeDelta& decode_timestamp) {
+ return buffer->GetDecodeTimestamp() < decode_timestamp;
}
// Returns an estimate of how far from the beginning or end of a range a buffer
@@ -308,24 +334,16 @@ static base::TimeDelta kSeekToStartFudgeRoom() {
return base::TimeDelta::FromMilliseconds(1000);
}
// The maximum amount of data in bytes the stream will keep in memory.
-#if defined(GOOGLE_TV)
-// In Google TV, set the size of the buffer to 1 min because of
-// the limited memory of the embedded system.
-// 2MB: approximately 1 minutes of 256Kbps content.
-// 30MB: approximately 1 minutes of 4Mbps content.
-static int kDefaultAudioMemoryLimit = 2 * 1024 * 1024;
-static int kDefaultVideoMemoryLimit = 30 * 1024 * 1024;
-#else
// 12MB: approximately 5 minutes of 320Kbps content.
// 150MB: approximately 5 minutes of 4Mbps content.
static int kDefaultAudioMemoryLimit = 12 * 1024 * 1024;
static int kDefaultVideoMemoryLimit = 150 * 1024 * 1024;
-#endif
namespace media {
SourceBufferStream::SourceBufferStream(const AudioDecoderConfig& audio_config,
- const LogCB& log_cb)
+ const LogCB& log_cb,
+ bool splice_frames_enabled)
: log_cb_(log_cb),
current_config_index_(0),
append_config_index_(0),
@@ -341,13 +359,17 @@ SourceBufferStream::SourceBufferStream(const AudioDecoderConfig& audio_config,
last_output_buffer_timestamp_(kNoTimestamp()),
max_interbuffer_distance_(kNoTimestamp()),
memory_limit_(kDefaultAudioMemoryLimit),
- config_change_pending_(false) {
+ config_change_pending_(false),
+ splice_buffers_index_(0),
+ pending_buffers_complete_(false),
+ splice_frames_enabled_(splice_frames_enabled) {
DCHECK(audio_config.IsValidConfig());
audio_configs_.push_back(audio_config);
}
SourceBufferStream::SourceBufferStream(const VideoDecoderConfig& video_config,
- const LogCB& log_cb)
+ const LogCB& log_cb,
+ bool splice_frames_enabled)
: log_cb_(log_cb),
current_config_index_(0),
append_config_index_(0),
@@ -363,13 +385,17 @@ SourceBufferStream::SourceBufferStream(const VideoDecoderConfig& video_config,
last_output_buffer_timestamp_(kNoTimestamp()),
max_interbuffer_distance_(kNoTimestamp()),
memory_limit_(kDefaultVideoMemoryLimit),
- config_change_pending_(false) {
+ config_change_pending_(false),
+ splice_buffers_index_(0),
+ pending_buffers_complete_(false),
+ splice_frames_enabled_(splice_frames_enabled) {
DCHECK(video_config.IsValidConfig());
video_configs_.push_back(video_config);
}
SourceBufferStream::SourceBufferStream(const TextTrackConfig& text_config,
- const LogCB& log_cb)
+ const LogCB& log_cb,
+ bool splice_frames_enabled)
: log_cb_(log_cb),
current_config_index_(0),
append_config_index_(0),
@@ -386,8 +412,10 @@ SourceBufferStream::SourceBufferStream(const TextTrackConfig& text_config,
last_output_buffer_timestamp_(kNoTimestamp()),
max_interbuffer_distance_(kNoTimestamp()),
memory_limit_(kDefaultAudioMemoryLimit),
- config_change_pending_(false) {
-}
+ config_change_pending_(false),
+ splice_buffers_index_(0),
+ pending_buffers_complete_(false),
+ splice_frames_enabled_(splice_frames_enabled) {}
SourceBufferStream::~SourceBufferStream() {
while (!ranges_.empty()) {
@@ -417,14 +445,14 @@ void SourceBufferStream::OnNewMediaSegment(
}
}
-bool SourceBufferStream::Append(
- const SourceBufferStream::BufferQueue& buffers) {
+bool SourceBufferStream::Append(const BufferQueue& buffers) {
TRACE_EVENT2("media", "SourceBufferStream::Append",
"stream type", GetStreamTypeName(),
"buffers to append", buffers.size());
DCHECK(!buffers.empty());
DCHECK(media_segment_start_time_ != kNoTimestamp());
+ DCHECK(media_segment_start_time_ <= buffers.front()->GetDecodeTimestamp());
DCHECK(!end_of_stream_);
// New media segments must begin with a keyframe.
@@ -468,7 +496,8 @@ bool SourceBufferStream::Append(
last_appended_buffer_timestamp_ = buffers.back()->GetDecodeTimestamp();
last_appended_buffer_is_keyframe_ = buffers.back()->IsKeyframe();
} else {
- base::TimeDelta new_range_start_time = media_segment_start_time_;
+ base::TimeDelta new_range_start_time = std::min(
+ media_segment_start_time_, buffers.front()->GetDecodeTimestamp());
const BufferQueue* buffers_for_new_range = &buffers;
BufferQueue trimmed_buffers;
@@ -476,7 +505,7 @@ bool SourceBufferStream::Append(
// segment, then we must make sure that we start with a keyframe.
// This can happen if the GOP in the previous append gets destroyed
// by a Remove() call.
- if (!new_media_segment_ && !buffers.front()->IsKeyframe()) {
+ if (!new_media_segment_) {
BufferQueue::const_iterator itr = buffers.begin();
// Scan past all the non-keyframes.
@@ -490,18 +519,20 @@ bool SourceBufferStream::Append(
last_appended_buffer_timestamp_ = buffers.back()->GetDecodeTimestamp();
last_appended_buffer_is_keyframe_ = buffers.back()->IsKeyframe();
return true;
+ } else if (itr != buffers.begin()) {
+ // Copy the first keyframe and everything after it into
+ // |trimmed_buffers|.
+ trimmed_buffers.assign(itr, buffers.end());
+ buffers_for_new_range = &trimmed_buffers;
}
- // Copy the first keyframe and everything after it into |trimmed_buffers|.
- trimmed_buffers.assign(itr, buffers.end());
-
- new_range_start_time = trimmed_buffers.front()->GetDecodeTimestamp();
- buffers_for_new_range = &trimmed_buffers;
+ new_range_start_time =
+ buffers_for_new_range->front()->GetDecodeTimestamp();
}
range_for_next_append_ =
AddToRanges(new SourceBufferRange(
- is_video(), *buffers_for_new_range, new_range_start_time,
+ GetType(), *buffers_for_new_range, new_range_start_time,
base::Bind(&SourceBufferStream::GetMaxInterbufferDistance,
base::Unretained(this))));
last_appended_buffer_timestamp_ =
@@ -609,15 +640,10 @@ void SourceBufferStream::RemoveInternal(
SetSelectedRange(new_range);
}
- // If the current range now is completely covered by the removal
- // range then we want to delete it.
- bool delete_range = start < range->GetStartTimestamp() ||
- (!is_exclusive && start == range->GetStartTimestamp());
-
// Truncate the current range so that it only contains data before
// the removal range.
BufferQueue saved_buffers;
- range->TruncateAt(start, &saved_buffers, is_exclusive);
+ bool delete_range = range->TruncateAt(start, &saved_buffers, is_exclusive);
// Check to see if the current playback position was removed and
// update the selected range appropriately.
@@ -670,6 +696,9 @@ void SourceBufferStream::ResetSeekState() {
track_buffer_.clear();
config_change_pending_ = false;
last_output_buffer_timestamp_ = kNoTimestamp();
+ splice_buffers_index_ = 0;
+ pending_buffer_ = NULL;
+ pending_buffers_complete_ = false;
}
bool SourceBufferStream::ShouldSeekToStartOfBuffered(
@@ -701,7 +730,7 @@ bool SourceBufferStream::IsMonotonicallyIncreasing(
if (current_timestamp == prev_timestamp &&
!AllowSameTimestamp(prev_is_keyframe, current_is_keyframe,
- is_video())) {
+ GetType())) {
MEDIA_LOG(log_cb_) << "Unexpected combination of buffers with the"
<< " same timestamp detected at "
<< current_timestamp.InSecondsF();
@@ -720,7 +749,7 @@ bool SourceBufferStream::IsNextTimestampValid(
return (last_appended_buffer_timestamp_ != next_timestamp) ||
new_media_segment_ ||
AllowSameTimestamp(last_appended_buffer_is_keyframe_, next_is_keyframe,
- is_video());
+ GetType());
}
@@ -883,7 +912,7 @@ int SourceBufferStream::FreeBuffers(int total_bytes_to_free,
DCHECK(!new_range_for_append);
// Create a new range containing these buffers.
new_range_for_append = new SourceBufferRange(
- is_video(), buffers, kNoTimestamp(),
+ GetType(), buffers, kNoTimestamp(),
base::Bind(&SourceBufferStream::GetMaxInterbufferDistance,
base::Unretained(this)));
range_for_next_append_ = ranges_.end();
@@ -948,6 +977,12 @@ void SourceBufferStream::PrepareRangesForNextAppend(
}
}
+ // Handle splices between the existing buffers and the new buffers. If a
+ // splice is generated the timestamp and duration of the first buffer in
+ // |new_buffers| will be modified.
+ if (splice_frames_enabled_)
+ GenerateSpliceFrame(new_buffers);
+
base::TimeDelta prev_timestamp = last_appended_buffer_timestamp_;
bool prev_is_keyframe = last_appended_buffer_is_keyframe_;
base::TimeDelta next_timestamp = new_buffers.front()->GetDecodeTimestamp();
@@ -963,8 +998,15 @@ void SourceBufferStream::PrepareRangesForNextAppend(
// timestamp situation. This prevents the first buffer in the current append
// from deleting the last buffer in the previous append if both buffers
// have the same timestamp.
- bool is_exclusive = (prev_timestamp == next_timestamp) &&
- AllowSameTimestamp(prev_is_keyframe, next_is_keyframe, is_video());
+ //
+ // The delete range should never be exclusive if a splice frame was generated
+ // because we don't generate splice frames for same timestamp situations.
+ DCHECK(new_buffers.front()->splice_timestamp() !=
+ new_buffers.front()->timestamp());
+ const bool is_exclusive =
+ new_buffers.front()->splice_buffers().empty() &&
+ prev_timestamp == next_timestamp &&
+ AllowSameTimestamp(prev_is_keyframe, next_is_keyframe, GetType());
// Delete the buffers that |new_buffers| overlaps.
base::TimeDelta start = new_buffers.front()->GetDecodeTimestamp();
@@ -1074,10 +1116,15 @@ void SourceBufferStream::OnSetDuration(base::TimeDelta duration) {
// Need to partially truncate this range.
if ((*itr)->GetStartTimestamp() < duration) {
- (*itr)->TruncateAt(duration, NULL, false);
+ bool delete_range = (*itr)->TruncateAt(duration, NULL, false);
if ((*itr == selected_range_) && !selected_range_->HasNextBufferPosition())
SetSelectedRange(NULL);
- ++itr;
+
+ if (delete_range) {
+ DeleteAndRemoveRange(&itr);
+ } else {
+ ++itr;
+ }
}
// Delete all ranges that begin after |duration|.
@@ -1092,17 +1139,105 @@ void SourceBufferStream::OnSetDuration(base::TimeDelta duration) {
SourceBufferStream::Status SourceBufferStream::GetNextBuffer(
scoped_refptr<StreamParserBuffer>* out_buffer) {
+ if (!pending_buffer_) {
+ const SourceBufferStream::Status status = GetNextBufferInternal(out_buffer);
+ if (status != SourceBufferStream::kSuccess || !SetPendingBuffer(out_buffer))
+ return status;
+ }
+
+ if (!pending_buffer_->splice_buffers().empty())
+ return HandleNextBufferWithSplice(out_buffer);
+
+ DCHECK(pending_buffer_->preroll_buffer());
+ return HandleNextBufferWithPreroll(out_buffer);
+}
+
+SourceBufferStream::Status SourceBufferStream::HandleNextBufferWithSplice(
+ scoped_refptr<StreamParserBuffer>* out_buffer) {
+ const BufferQueue& splice_buffers = pending_buffer_->splice_buffers();
+ const size_t last_splice_buffer_index = splice_buffers.size() - 1;
+
+ // Are there any splice buffers left to hand out? The last buffer should be
+ // handed out separately since it represents the first post-splice buffer.
+ if (splice_buffers_index_ < last_splice_buffer_index) {
+ // Account for config changes which occur between fade out buffers.
+ if (current_config_index_ !=
+ splice_buffers[splice_buffers_index_]->GetConfigId()) {
+ config_change_pending_ = true;
+ DVLOG(1) << "Config change (splice buffer config ID does not match).";
+ return SourceBufferStream::kConfigChange;
+ }
+
+ // Every pre splice buffer must have the same splice_timestamp().
+ DCHECK(pending_buffer_->splice_timestamp() ==
+ splice_buffers[splice_buffers_index_]->splice_timestamp());
+
+ // No pre splice buffers should have preroll.
+ DCHECK(!splice_buffers[splice_buffers_index_]->preroll_buffer());
+
+ *out_buffer = splice_buffers[splice_buffers_index_++];
+ return SourceBufferStream::kSuccess;
+ }
+
+ // Did we hand out the last pre-splice buffer on the previous call?
+ if (!pending_buffers_complete_) {
+ DCHECK_EQ(splice_buffers_index_, last_splice_buffer_index);
+ pending_buffers_complete_ = true;
+ config_change_pending_ = true;
+ DVLOG(1) << "Config change (forced for fade in of splice frame).";
+ return SourceBufferStream::kConfigChange;
+ }
+
+ // All pre-splice buffers have been handed out and a config change completed,
+ // so hand out the final buffer for fade in. Because a config change is
+ // always issued prior to handing out this buffer, any changes in config id
+ // have been inherently handled.
+ DCHECK(pending_buffers_complete_);
+ DCHECK_EQ(splice_buffers_index_, splice_buffers.size() - 1);
+ DCHECK(splice_buffers.back()->splice_timestamp() == kNoTimestamp());
+ *out_buffer = splice_buffers.back();
+ pending_buffer_ = NULL;
+
+ // If the last splice buffer has preroll, hand off to the preroll handler.
+ return SetPendingBuffer(out_buffer) ? HandleNextBufferWithPreroll(out_buffer)
+ : SourceBufferStream::kSuccess;
+}
+
+SourceBufferStream::Status SourceBufferStream::HandleNextBufferWithPreroll(
+ scoped_refptr<StreamParserBuffer>* out_buffer) {
+ // Any config change should have already been handled.
+ DCHECK_EQ(current_config_index_, pending_buffer_->GetConfigId());
+
+ // Check if the preroll buffer has already been handed out.
+ if (!pending_buffers_complete_) {
+ pending_buffers_complete_ = true;
+ *out_buffer = pending_buffer_->preroll_buffer();
+ return SourceBufferStream::kSuccess;
+ }
+
+ // Preroll complete, hand out the final buffer.
+ *out_buffer = pending_buffer_;
+ pending_buffer_ = NULL;
+ return SourceBufferStream::kSuccess;
+}
+
+SourceBufferStream::Status SourceBufferStream::GetNextBufferInternal(
+ scoped_refptr<StreamParserBuffer>* out_buffer) {
CHECK(!config_change_pending_);
if (!track_buffer_.empty()) {
DCHECK(!selected_range_);
- if (track_buffer_.front()->GetConfigId() != current_config_index_) {
+ scoped_refptr<StreamParserBuffer>& next_buffer = track_buffer_.front();
+
+ // If the next buffer is an audio splice frame, the next effective config id
+ // comes from the first splice buffer.
+ if (GetConfigId(next_buffer, 0) != current_config_index_) {
config_change_pending_ = true;
DVLOG(1) << "Config change (track buffer config ID does not match).";
return kConfigChange;
}
- *out_buffer = track_buffer_.front();
+ *out_buffer = next_buffer;
track_buffer_.pop_front();
last_output_buffer_timestamp_ = (*out_buffer)->GetDecodeTimestamp();
@@ -1204,6 +1339,13 @@ Ranges<base::TimeDelta> SourceBufferStream::GetBufferedTime() const {
return ranges;
}
+base::TimeDelta SourceBufferStream::GetBufferedDuration() const {
+ if (ranges_.empty())
+ return base::TimeDelta();
+
+ return ranges_.back()->GetBufferedEndTimestamp();
+}
+
void SourceBufferStream::MarkEndOfStream() {
DCHECK(!end_of_stream_);
end_of_stream_ = true;
@@ -1256,21 +1398,6 @@ bool SourceBufferStream::UpdateAudioConfig(const AudioDecoderConfig& config) {
return false;
}
- if (audio_configs_[0].samples_per_second() != config.samples_per_second()) {
- MEDIA_LOG(log_cb_) << "Audio sample rate changes not allowed.";
- return false;
- }
-
- if (audio_configs_[0].channel_layout() != config.channel_layout()) {
- MEDIA_LOG(log_cb_) << "Audio channel layout changes not allowed.";
- return false;
- }
-
- if (audio_configs_[0].bits_per_channel() != config.bits_per_channel()) {
- MEDIA_LOG(log_cb_) << "Audio bits per channel changes not allowed.";
- return false;
- }
-
if (audio_configs_[0].is_encrypted() != config.is_encrypted()) {
MEDIA_LOG(log_cb_) << "Audio encryption changes not allowed.";
return false;
@@ -1297,11 +1424,6 @@ bool SourceBufferStream::UpdateVideoConfig(const VideoDecoderConfig& config) {
DCHECK(audio_configs_.empty());
DVLOG(3) << "UpdateVideoConfig.";
- if (video_configs_[0].is_encrypted() != config.is_encrypted()) {
- MEDIA_LOG(log_cb_) << "Video Encryption changes not allowed.";
- return false;
- }
-
if (video_configs_[0].codec() != config.codec()) {
MEDIA_LOG(log_cb_) << "Video codec changes not allowed.";
return false;
@@ -1331,8 +1453,14 @@ bool SourceBufferStream::UpdateVideoConfig(const VideoDecoderConfig& config) {
void SourceBufferStream::CompleteConfigChange() {
config_change_pending_ = false;
+ if (pending_buffer_) {
+ current_config_index_ =
+ GetConfigId(pending_buffer_, splice_buffers_index_);
+ return;
+ }
+
if (!track_buffer_.empty()) {
- current_config_index_ = track_buffer_.front()->GetConfigId();
+ current_config_index_ = GetConfigId(track_buffer_.front(), 0);
return;
}
@@ -1462,13 +1590,25 @@ base::TimeDelta SourceBufferStream::FindKeyframeAfterTimestamp(
}
std::string SourceBufferStream::GetStreamTypeName() const {
- if (!video_configs_.empty()) {
- DCHECK(audio_configs_.empty());
- return "VIDEO";
+ switch (GetType()) {
+ case kAudio:
+ return "AUDIO";
+ case kVideo:
+ return "VIDEO";
+ case kText:
+ return "TEXT";
}
+ NOTREACHED();
+ return "";
+}
- DCHECK(!audio_configs_.empty());
- return "AUDIO";
+SourceBufferStream::Type SourceBufferStream::GetType() const {
+ if (!audio_configs_.empty())
+ return kAudio;
+ if (!video_configs_.empty())
+ return kVideo;
+ DCHECK_NE(text_track_config_.kind(), kTextNone);
+ return kText;
}
void SourceBufferStream::DeleteAndRemoveRange(RangeList::iterator* itr) {
@@ -1483,23 +1623,93 @@ void SourceBufferStream::DeleteAndRemoveRange(RangeList::iterator* itr) {
if (*itr == range_for_next_append_) {
DVLOG(1) << __FUNCTION__ << " deleting range_for_next_append_.";
range_for_next_append_ = ranges_.end();
+ last_appended_buffer_timestamp_ = kNoTimestamp();
+ last_appended_buffer_is_keyframe_ = false;
}
delete **itr;
*itr = ranges_.erase(*itr);
}
+void SourceBufferStream::GenerateSpliceFrame(const BufferQueue& new_buffers) {
+ DCHECK(!new_buffers.empty());
+
+ // Splice frames are only supported for audio.
+ if (GetType() != kAudio)
+ return;
+
+ // Find the overlapped range (if any).
+ const base::TimeDelta splice_timestamp = new_buffers.front()->timestamp();
+ RangeList::iterator range_itr = FindExistingRangeFor(splice_timestamp);
+ if (range_itr == ranges_.end())
+ return;
+
+ const base::TimeDelta max_splice_end_timestamp =
+ splice_timestamp + base::TimeDelta::FromMilliseconds(
+ AudioSplicer::kCrossfadeDurationInMilliseconds);
+
+ // Find all buffers involved before the splice point.
+ BufferQueue pre_splice_buffers;
+ if (!(*range_itr)->GetBuffersInRange(
+ splice_timestamp, max_splice_end_timestamp, &pre_splice_buffers)) {
+ return;
+ }
+
+ // If there are gaps in the timeline, it's possible that we only find buffers
+ // after the splice point but within the splice range. For simplicity, we do
+ // not generate splice frames in this case.
+ //
+ // We also do not want to generate splices if the first new buffer replaces an
+ // existing buffer exactly.
+ if (pre_splice_buffers.front()->timestamp() >= splice_timestamp)
+ return;
+
+ // If any |pre_splice_buffers| are already splices or preroll, do not generate
+ // a splice.
+ for (size_t i = 0; i < pre_splice_buffers.size(); ++i) {
+ const BufferQueue& original_splice_buffers =
+ pre_splice_buffers[i]->splice_buffers();
+ if (!original_splice_buffers.empty()) {
+ DVLOG(1) << "Can't generate splice: overlapped buffers contain a "
+ "pre-existing splice.";
+ return;
+ }
+
+ if (pre_splice_buffers[i]->preroll_buffer()) {
+ DVLOG(1) << "Can't generate splice: overlapped buffers contain preroll.";
+ return;
+ }
+ }
+
+ // Don't generate splice frames which represent less than two frames, since we
+ // need at least that much to generate a crossfade. Per the spec, make this
+ // check using the sample rate of the overlapping buffers.
+ const base::TimeDelta splice_duration =
+ pre_splice_buffers.back()->timestamp() +
+ pre_splice_buffers.back()->duration() - splice_timestamp;
+ const base::TimeDelta minimum_splice_duration = base::TimeDelta::FromSecondsD(
+ 2.0 / audio_configs_[append_config_index_].samples_per_second());
+ if (splice_duration < minimum_splice_duration) {
+ DVLOG(1) << "Can't generate splice: not enough samples for crossfade; have "
+ << splice_duration.InMicroseconds() << " us, but need "
+ << minimum_splice_duration.InMicroseconds() << " us.";
+ return;
+ }
+
+ new_buffers.front()->ConvertToSpliceBuffer(pre_splice_buffers);
+}
+
SourceBufferRange::SourceBufferRange(
- bool is_video, const BufferQueue& new_buffers,
+ SourceBufferStream::Type type, const BufferQueue& new_buffers,
base::TimeDelta media_segment_start_time,
const InterbufferDistanceCB& interbuffer_distance_cb)
- : is_video_(is_video),
+ : type_(type),
keyframe_map_index_base_(0),
next_buffer_index_(-1),
media_segment_start_time_(media_segment_start_time),
interbuffer_distance_cb_(interbuffer_distance_cb),
size_in_bytes_(0) {
- DCHECK(!new_buffers.empty());
+ CHECK(!new_buffers.empty());
DCHECK(new_buffers.front()->IsKeyframe());
DCHECK(!interbuffer_distance_cb.is_null());
AppendBuffersToEnd(new_buffers);
@@ -1507,9 +1717,12 @@ SourceBufferRange::SourceBufferRange(
void SourceBufferRange::AppendBuffersToEnd(const BufferQueue& new_buffers) {
DCHECK(buffers_.empty() || CanAppendBuffersToEnd(new_buffers));
-
+ DCHECK(media_segment_start_time_ == kNoTimestamp() ||
+ media_segment_start_time_ <=
+ new_buffers.front()->GetDecodeTimestamp());
for (BufferQueue::const_iterator itr = new_buffers.begin();
- itr != new_buffers.end(); ++itr) {
+ itr != new_buffers.end();
+ ++itr) {
DCHECK((*itr)->GetDecodeTimestamp() != kNoTimestamp());
buffers_.push_back(*itr);
size_in_bytes_ += (*itr)->data_size();
@@ -1563,6 +1776,8 @@ void SourceBufferRange::SeekToStart() {
SourceBufferRange* SourceBufferRange::SplitRange(
base::TimeDelta timestamp, bool is_exclusive) {
+ CHECK(!buffers_.empty());
+
// Find the first keyframe after |timestamp|. If |is_exclusive|, do not
// include keyframes at |timestamp|.
KeyframeMap::iterator new_beginning_keyframe =
@@ -1579,13 +1794,25 @@ SourceBufferRange* SourceBufferRange::SplitRange(
DCHECK_LT(keyframe_index, static_cast<int>(buffers_.size()));
BufferQueue::iterator starting_point = buffers_.begin() + keyframe_index;
BufferQueue removed_buffers(starting_point, buffers_.end());
+
+ base::TimeDelta new_range_start_timestamp = kNoTimestamp();
+ if (GetStartTimestamp() < buffers_.front()->GetDecodeTimestamp() &&
+ timestamp < removed_buffers.front()->GetDecodeTimestamp()) {
+ // The split is in the gap between |media_segment_start_time_| and
+ // the first buffer of the new range so we should set the start
+ // time of the new range to |timestamp| so we preserve part of the
+ // gap in the new range.
+ new_range_start_timestamp = timestamp;
+ }
+
keyframe_map_.erase(new_beginning_keyframe, keyframe_map_.end());
FreeBufferRange(starting_point, buffers_.end());
// Create a new range with |removed_buffers|.
SourceBufferRange* split_range =
new SourceBufferRange(
- is_video_, removed_buffers, kNoTimestamp(), interbuffer_distance_cb_);
+ type_, removed_buffers, new_range_start_timestamp,
+ interbuffer_distance_cb_);
// If the next buffer position is now in |split_range|, update the state of
// this range and |split_range| accordingly.
@@ -1597,20 +1824,18 @@ SourceBufferRange* SourceBufferRange::SplitRange(
return split_range;
}
-SourceBufferRange::BufferQueue::iterator SourceBufferRange::GetBufferItrAt(
- base::TimeDelta timestamp, bool skip_given_timestamp) {
- // Need to make a dummy buffer with timestamp |timestamp| in order to search
- // the |buffers_| container.
- scoped_refptr<StreamParserBuffer> dummy_buffer =
- StreamParserBuffer::CopyFrom(NULL, 0, false);
- dummy_buffer->SetDecodeTimestamp(timestamp);
-
- if (skip_given_timestamp) {
- return std::upper_bound(
- buffers_.begin(), buffers_.end(), dummy_buffer, BufferComparator);
- }
- return std::lower_bound(
- buffers_.begin(), buffers_.end(), dummy_buffer, BufferComparator);
+BufferQueue::iterator SourceBufferRange::GetBufferItrAt(
+ base::TimeDelta timestamp,
+ bool skip_given_timestamp) {
+ return skip_given_timestamp
+ ? std::upper_bound(buffers_.begin(),
+ buffers_.end(),
+ timestamp,
+ CompareTimeDeltaToStreamParserBuffer)
+ : std::lower_bound(buffers_.begin(),
+ buffers_.end(),
+ timestamp,
+ CompareStreamParserBufferToTimeDelta);
}
SourceBufferRange::KeyframeMap::iterator
@@ -1638,13 +1863,13 @@ void SourceBufferRange::DeleteAll(BufferQueue* removed_buffers) {
TruncateAt(buffers_.begin(), removed_buffers);
}
-void SourceBufferRange::TruncateAt(
+bool SourceBufferRange::TruncateAt(
base::TimeDelta timestamp, BufferQueue* removed_buffers,
bool is_exclusive) {
// Find the place in |buffers_| where we will begin deleting data.
BufferQueue::iterator starting_point =
GetBufferItrAt(timestamp, is_exclusive);
- TruncateAt(starting_point, removed_buffers);
+ return TruncateAt(starting_point, removed_buffers);
}
int SourceBufferRange::DeleteGOPFromFront(BufferQueue* deleted_buffers) {
@@ -1800,13 +2025,13 @@ void SourceBufferRange::FreeBufferRange(
buffers_.erase(starting_point, ending_point);
}
-void SourceBufferRange::TruncateAt(
+bool SourceBufferRange::TruncateAt(
const BufferQueue::iterator& starting_point, BufferQueue* removed_buffers) {
DCHECK(!removed_buffers || removed_buffers->empty());
// Return if we're not deleting anything.
if (starting_point == buffers_.end())
- return;
+ return buffers_.empty();
// Reset the next buffer index if we will be deleting the buffer that's next
// in sequence.
@@ -1832,6 +2057,7 @@ void SourceBufferRange::TruncateAt(
// Remove everything from |starting_point| onward.
FreeBufferRange(starting_point, buffers_.end());
+ return buffers_.empty();
}
bool SourceBufferRange::GetNextBuffer(
@@ -1839,7 +2065,7 @@ bool SourceBufferRange::GetNextBuffer(
if (!HasNextBuffer())
return false;
- *out_buffer = buffers_.at(next_buffer_index_);
+ *out_buffer = buffers_[next_buffer_index_];
next_buffer_index_++;
return true;
}
@@ -1851,7 +2077,9 @@ bool SourceBufferRange::HasNextBuffer() const {
int SourceBufferRange::GetNextConfigId() const {
DCHECK(HasNextBuffer());
- return buffers_.at(next_buffer_index_)->GetConfigId();
+ // If the next buffer is an audio splice frame, the next effective config id
+ // comes from the first fade out preroll buffer.
+ return GetConfigId(buffers_[next_buffer_index_], 0);
}
base::TimeDelta SourceBufferRange::GetNextTimestamp() const {
@@ -1862,7 +2090,7 @@ base::TimeDelta SourceBufferRange::GetNextTimestamp() const {
return kNoTimestamp();
}
- return buffers_.at(next_buffer_index_)->GetDecodeTimestamp();
+ return buffers_[next_buffer_index_]->GetDecodeTimestamp();
}
bool SourceBufferRange::HasNextBufferPosition() const {
@@ -1952,6 +2180,16 @@ base::TimeDelta SourceBufferRange::NextKeyframeTimestamp(
KeyframeMap::iterator itr = GetFirstKeyframeAt(timestamp, false);
if (itr == keyframe_map_.end())
return kNoTimestamp();
+
+ // If the timestamp is inside the gap between the start of the media
+ // segment and the first buffer, then just pretend there is a
+ // keyframe at the specified timestamp.
+ if (itr == keyframe_map_.begin() &&
+ timestamp > media_segment_start_time_ &&
+ timestamp < itr->first) {
+ return timestamp;
+ }
+
return itr->first;
}
@@ -1968,9 +2206,14 @@ base::TimeDelta SourceBufferRange::KeyframeBeforeTimestamp(
bool SourceBufferRange::IsNextInSequence(
base::TimeDelta timestamp, bool is_keyframe) const {
base::TimeDelta end = buffers_.back()->GetDecodeTimestamp();
- return (end < timestamp && timestamp <= end + GetFudgeRoom()) ||
- (timestamp == end && AllowSameTimestamp(
- buffers_.back()->IsKeyframe(), is_keyframe, is_video_));
+ if (end < timestamp &&
+ (type_ == SourceBufferStream::kText ||
+ timestamp <= end + GetFudgeRoom())) {
+ return true;
+ }
+
+ return timestamp == end && AllowSameTimestamp(
+ buffers_.back()->IsKeyframe(), is_keyframe, type_);
}
base::TimeDelta SourceBufferRange::GetFudgeRoom() const {
@@ -1983,4 +2226,50 @@ base::TimeDelta SourceBufferRange::GetApproximateDuration() const {
return max_interbuffer_distance;
}
+bool SourceBufferRange::GetBuffersInRange(base::TimeDelta start,
+ base::TimeDelta end,
+ BufferQueue* buffers) {
+ // Find the nearest buffer with a decode timestamp <= start.
+ const base::TimeDelta first_timestamp = KeyframeBeforeTimestamp(start);
+ if (first_timestamp == kNoTimestamp())
+ return false;
+
+ // Find all buffers involved in the range.
+ const size_t previous_size = buffers->size();
+ for (BufferQueue::iterator it = GetBufferItrAt(first_timestamp, false);
+ it != buffers_.end();
+ ++it) {
+ const scoped_refptr<StreamParserBuffer>& buffer = *it;
+ // Buffers without duration are not supported, so bail if we encounter any.
+ if (buffer->duration() == kNoTimestamp() ||
+ buffer->duration() <= base::TimeDelta()) {
+ return false;
+ }
+ if (buffer->end_of_stream() || buffer->timestamp() >= end)
+ break;
+ if (buffer->timestamp() + buffer->duration() <= start)
+ continue;
+ buffers->push_back(buffer);
+ }
+ return previous_size < buffers->size();
+}
+
+bool SourceBufferStream::SetPendingBuffer(
+ scoped_refptr<StreamParserBuffer>* out_buffer) {
+ DCHECK(*out_buffer);
+ DCHECK(!pending_buffer_);
+
+ const bool have_splice_buffers = !(*out_buffer)->splice_buffers().empty();
+ const bool have_preroll_buffer = !!(*out_buffer)->preroll_buffer();
+
+ if (!have_splice_buffers && !have_preroll_buffer)
+ return false;
+
+ DCHECK_NE(have_splice_buffers, have_preroll_buffer);
+ splice_buffers_index_ = 0;
+ pending_buffer_.swap(*out_buffer);
+ pending_buffers_complete_ = false;
+ return true;
+}
+
} // namespace media
diff --git a/chromium/media/filters/source_buffer_stream.h b/chromium/media/filters/source_buffer_stream.h
index 4b00504cfb2..95b2e0970b1 100644
--- a/chromium/media/filters/source_buffer_stream.h
+++ b/chromium/media/filters/source_buffer_stream.h
@@ -32,7 +32,7 @@ class SourceBufferRange;
// See file-level comment for complete description.
class MEDIA_EXPORT SourceBufferStream {
public:
- typedef std::deque<scoped_refptr<StreamParserBuffer> > BufferQueue;
+ typedef StreamParser::BufferQueue BufferQueue;
// Status returned by GetNextBuffer().
// kSuccess: Indicates that the next buffer was returned.
@@ -46,12 +46,21 @@ class MEDIA_EXPORT SourceBufferStream {
kEndOfStream
};
+ enum Type {
+ kAudio,
+ kVideo,
+ kText
+ };
+
SourceBufferStream(const AudioDecoderConfig& audio_config,
- const LogCB& log_cb);
+ const LogCB& log_cb,
+ bool splice_frames_enabled);
SourceBufferStream(const VideoDecoderConfig& video_config,
- const LogCB& log_cb);
+ const LogCB& log_cb,
+ bool splice_frames_enabled);
SourceBufferStream(const TextTrackConfig& text_config,
- const LogCB& log_cb);
+ const LogCB& log_cb,
+ bool splice_frames_enabled);
~SourceBufferStream();
@@ -102,6 +111,11 @@ class MEDIA_EXPORT SourceBufferStream {
// Returns a list of the buffered time ranges.
Ranges<base::TimeDelta> GetBufferedTime() const;
+ // Returns the duration of the buffered ranges, which is equivalent
+ // to the end timestamp of the last buffered range. If no data is buffered
+ // then base::TimeDelta() is returned.
+ base::TimeDelta GetBufferedDuration() const;
+
// Notifies this object that end of stream has been signalled.
void MarkEndOfStream();
@@ -260,8 +274,8 @@ class MEDIA_EXPORT SourceBufferStream {
// have a keyframe after |timestamp| then kNoTimestamp() is returned.
base::TimeDelta FindKeyframeAfterTimestamp(const base::TimeDelta timestamp);
- // Returns "VIDEO" for a video SourceBufferStream and "AUDIO" for an audio
- // one.
+ // Returns "VIDEO" for a video SourceBufferStream, "AUDIO" for an audio
+ // stream, and "TEXT" for a text stream.
std::string GetStreamTypeName() const;
// Returns true if we don't have any ranges or the last range is selected
@@ -286,7 +300,32 @@ class MEDIA_EXPORT SourceBufferStream {
base::TimeDelta start, base::TimeDelta end, bool is_exclusive,
BufferQueue* deleted_buffers);
- bool is_video() const { return video_configs_.size() > 0; }
+ Type GetType() const;
+
+ // See GetNextBuffer() for additional details. This method handles splice
+ // frame processing.
+ Status HandleNextBufferWithSplice(
+ scoped_refptr<StreamParserBuffer>* out_buffer);
+
+ // See GetNextBuffer() for additional details. This method handles preroll
+ // frame processing.
+ Status HandleNextBufferWithPreroll(
+ scoped_refptr<StreamParserBuffer>* out_buffer);
+
+ // See GetNextBuffer() for additional details. The internal method hands out
+ // single buffers from the |track_buffer_| and |selected_range_| without
+ // additional processing for splice frame or preroll buffers.
+ Status GetNextBufferInternal(scoped_refptr<StreamParserBuffer>* out_buffer);
+
+ // Called by PrepareRangesForNextAppend() before pruning overlapped buffers to
+ // generate a splice frame with a small portion of the overlapped buffers. If
+ // a splice frame is generated, the first buffer in |new_buffers| will have
+ // its timestamps, duration, and fade out preroll updated.
+ void GenerateSpliceFrame(const BufferQueue& new_buffers);
+
+ // If |out_buffer| has splice buffers or preroll, sets |pending_buffer_|
+ // appropriately and returns true. Otherwise returns false.
+ bool SetPendingBuffer(scoped_refptr<StreamParserBuffer>* out_buffer);
// Callback used to report error strings that can help the web developer
// figure out what is wrong with the content.
@@ -364,6 +403,21 @@ class MEDIA_EXPORT SourceBufferStream {
// GetCurrentXXXDecoderConfig() has been called.
bool config_change_pending_;
+ // Used by HandleNextBufferWithSplice() or HandleNextBufferWithPreroll() when
+ // a splice frame buffer or buffer with preroll is returned from
+ // GetNextBufferInternal().
+ scoped_refptr<StreamParserBuffer> pending_buffer_;
+
+ // Indicates which of the splice buffers in |splice_buffer_| should be
+ // handled out next.
+ size_t splice_buffers_index_;
+
+ // Indicates that all buffers before |pending_buffer_| have been handed out.
+ bool pending_buffers_complete_;
+
+ // Indicates that splice frame generation is enabled.
+ const bool splice_frames_enabled_;
+
DISALLOW_COPY_AND_ASSIGN(SourceBufferStream);
};
diff --git a/chromium/media/filters/source_buffer_stream_unittest.cc b/chromium/media/filters/source_buffer_stream_unittest.cc
index 9e7373a16ad..50efdacd4f2 100644
--- a/chromium/media/filters/source_buffer_stream_unittest.cc
+++ b/chromium/media/filters/source_buffer_stream_unittest.cc
@@ -6,6 +6,8 @@
#include <string>
+#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/logging.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
@@ -13,10 +15,14 @@
#include "media/base/data_buffer.h"
#include "media/base/media_log.h"
#include "media/base/test_helpers.h"
+#include "media/base/text_track_config.h"
+#include "media/filters/webvtt_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
+typedef StreamParser::BufferQueue BufferQueue;
+
static const int kDefaultFramesPerSecond = 30;
static const int kDefaultKeyframesPerSecond = 6;
static const uint8 kDataA = 0x11;
@@ -25,10 +31,11 @@ static const int kDataSize = 1;
class SourceBufferStreamTest : public testing::Test {
protected:
- SourceBufferStreamTest() {
- config_ = TestVideoConfig::Normal();
- stream_.reset(new SourceBufferStream(config_, LogCB()));
+ SourceBufferStreamTest()
+ : accurate_durations_(false) {
+ video_config_ = TestVideoConfig::Normal();
SetStreamInfo(kDefaultFramesPerSecond, kDefaultKeyframesPerSecond);
+ stream_.reset(new SourceBufferStream(video_config_, log_cb(), true));
}
void SetMemoryLimit(int buffers_of_data) {
@@ -41,6 +48,32 @@ class SourceBufferStreamTest : public testing::Test {
frame_duration_ = ConvertToFrameDuration(frames_per_second);
}
+ void SetTextStream() {
+ video_config_ = TestVideoConfig::Invalid();
+ TextTrackConfig config(kTextSubtitles, "", "", "");
+ stream_.reset(new SourceBufferStream(config, LogCB(), true));
+ SetStreamInfo(2, 2);
+ }
+
+ void SetAudioStream() {
+ video_config_ = TestVideoConfig::Invalid();
+ accurate_durations_ = true;
+ audio_config_.Initialize(kCodecVorbis,
+ kSampleFormatPlanarF32,
+ CHANNEL_LAYOUT_STEREO,
+ 1000,
+ NULL,
+ 0,
+ false,
+ false,
+ base::TimeDelta(),
+ 0);
+ stream_.reset(new SourceBufferStream(audio_config_, LogCB(), true));
+
+ // Equivalent to 2ms per frame.
+ SetStreamInfo(500, 500);
+ }
+
void NewSegmentAppend(int starting_position, int number_of_buffers) {
AppendBuffers(starting_position, number_of_buffers, true,
base::TimeDelta(), true, &kDataA, kDataSize);
@@ -77,27 +110,32 @@ class SourceBufferStreamTest : public testing::Test {
}
void NewSegmentAppend(const std::string& buffers_to_append) {
- AppendBuffers(buffers_to_append, true, false, true);
+ AppendBuffers(buffers_to_append, true, kNoTimestamp(), false, true);
+ }
+
+ void NewSegmentAppend(base::TimeDelta start_timestamp,
+ const std::string& buffers_to_append) {
+ AppendBuffers(buffers_to_append, true, start_timestamp, false, true);
}
void AppendBuffers(const std::string& buffers_to_append) {
- AppendBuffers(buffers_to_append, false, false, true);
+ AppendBuffers(buffers_to_append, false, kNoTimestamp(), false, true);
}
void NewSegmentAppendOneByOne(const std::string& buffers_to_append) {
- AppendBuffers(buffers_to_append, true, true, true);
+ AppendBuffers(buffers_to_append, true, kNoTimestamp(), true, true);
}
void AppendBuffersOneByOne(const std::string& buffers_to_append) {
- AppendBuffers(buffers_to_append, false, true, true);
+ AppendBuffers(buffers_to_append, false, kNoTimestamp(), true, true);
}
void NewSegmentAppend_ExpectFailure(const std::string& buffers_to_append) {
- AppendBuffers(buffers_to_append, true, false, false);
+ AppendBuffers(buffers_to_append, true, kNoTimestamp(), false, false);
}
void AppendBuffers_ExpectFailure(const std::string& buffers_to_append) {
- AppendBuffers(buffers_to_append, false, false, false);
+ AppendBuffers(buffers_to_append, false, kNoTimestamp(), false, false);
}
void Seek(int position) {
@@ -218,6 +256,8 @@ class SourceBufferStreamTest : public testing::Test {
std::vector<std::string> timestamps;
base::SplitString(expected, ' ', &timestamps);
std::stringstream ss;
+ const SourceBufferStream::Type type = stream_->GetType();
+ base::TimeDelta active_splice_timestamp = kNoTimestamp();
for (size_t i = 0; i < timestamps.size(); i++) {
scoped_refptr<StreamParserBuffer> buffer;
SourceBufferStream::Status status = stream_->GetNextBuffer(&buffer);
@@ -225,13 +265,68 @@ class SourceBufferStreamTest : public testing::Test {
if (i > 0)
ss << " ";
+ if (status == SourceBufferStream::kConfigChange) {
+ switch (type) {
+ case SourceBufferStream::kVideo:
+ stream_->GetCurrentVideoDecoderConfig();
+ break;
+ case SourceBufferStream::kAudio:
+ stream_->GetCurrentAudioDecoderConfig();
+ break;
+ case SourceBufferStream::kText:
+ stream_->GetCurrentTextTrackConfig();
+ break;
+ }
+
+ if (timestamps[i] == "C")
+ EXPECT_EQ(SourceBufferStream::kConfigChange, status);
+
+ ss << "C";
+ continue;
+ }
+
EXPECT_EQ(SourceBufferStream::kSuccess, status);
if (status != SourceBufferStream::kSuccess)
break;
ss << buffer->GetDecodeTimestamp().InMilliseconds();
- if (buffer->IsKeyframe())
+
+ // Handle preroll buffers.
+ if (EndsWith(timestamps[i], "P", true)) {
+ ASSERT_TRUE(buffer->IsKeyframe());
+ scoped_refptr<StreamParserBuffer> preroll_buffer;
+ preroll_buffer.swap(buffer);
+
+ // When a preroll buffer is encountered we should be able to request one
+ // more buffer. The first buffer should match the timestamp and config
+ // of the second buffer, except that its discard_padding() should be its
+ // duration.
+ ASSERT_EQ(SourceBufferStream::kSuccess,
+ stream_->GetNextBuffer(&buffer));
+ ASSERT_EQ(buffer->GetConfigId(), preroll_buffer->GetConfigId());
+ ASSERT_EQ(buffer->track_id(), preroll_buffer->track_id());
+ ASSERT_EQ(buffer->timestamp(), preroll_buffer->timestamp());
+ ASSERT_EQ(buffer->GetDecodeTimestamp(),
+ preroll_buffer->GetDecodeTimestamp());
+ ASSERT_EQ(kInfiniteDuration(), preroll_buffer->discard_padding().first);
+ ASSERT_EQ(base::TimeDelta(), preroll_buffer->discard_padding().second);
+ ASSERT_TRUE(buffer->IsKeyframe());
+
+ ss << "P";
+ } else if (buffer->IsKeyframe()) {
ss << "K";
+ }
+
+ // Until the last splice frame is seen, indicated by a matching timestamp,
+ // all buffers must have the same splice_timestamp().
+ if (buffer->timestamp() == active_splice_timestamp) {
+ ASSERT_EQ(buffer->splice_timestamp(), kNoTimestamp());
+ } else {
+ ASSERT_TRUE(active_splice_timestamp == kNoTimestamp() ||
+ active_splice_timestamp == buffer->splice_timestamp());
+ }
+
+ active_splice_timestamp = buffer->splice_timestamp();
}
EXPECT_EQ(expected, ss.str());
}
@@ -241,17 +336,30 @@ class SourceBufferStreamTest : public testing::Test {
EXPECT_EQ(SourceBufferStream::kNeedBuffer, stream_->GetNextBuffer(&buffer));
}
- void CheckConfig(const VideoDecoderConfig& config) {
+ void CheckVideoConfig(const VideoDecoderConfig& config) {
const VideoDecoderConfig& actual = stream_->GetCurrentVideoDecoderConfig();
EXPECT_TRUE(actual.Matches(config))
<< "Expected: " << config.AsHumanReadableString()
<< "\nActual: " << actual.AsHumanReadableString();
}
+ void CheckAudioConfig(const AudioDecoderConfig& config) {
+ const AudioDecoderConfig& actual = stream_->GetCurrentAudioDecoderConfig();
+ EXPECT_TRUE(actual.Matches(config))
+ << "Expected: " << config.AsHumanReadableString()
+ << "\nActual: " << actual.AsHumanReadableString();
+ }
+
+ const LogCB log_cb() {
+ return base::Bind(&SourceBufferStreamTest::DebugMediaLog,
+ base::Unretained(this));
+ }
+
base::TimeDelta frame_duration() const { return frame_duration_; }
scoped_ptr<SourceBufferStream> stream_;
- VideoDecoderConfig config_;
+ VideoDecoderConfig video_config_;
+ AudioDecoderConfig audio_config_;
private:
base::TimeDelta ConvertToFrameDuration(int frames_per_second) {
@@ -271,12 +379,14 @@ class SourceBufferStreamTest : public testing::Test {
int keyframe_interval = frames_per_second_ / keyframes_per_second_;
- SourceBufferStream::BufferQueue queue;
+ BufferQueue queue;
for (int i = 0; i < number_of_buffers; i++) {
int position = starting_position + i;
bool is_keyframe = position % keyframe_interval == 0;
+ // Buffer type and track ID are meaningless to these tests.
scoped_refptr<StreamParserBuffer> buffer =
- StreamParserBuffer::CopyFrom(data, size, is_keyframe);
+ StreamParserBuffer::CopyFrom(data, size, is_keyframe,
+ DemuxerStream::AUDIO, 0);
base::TimeDelta timestamp = frame_duration_ * position;
if (i == 0)
@@ -297,6 +407,8 @@ class SourceBufferStreamTest : public testing::Test {
presentation_timestamp = timestamp - frame_duration_;
}
buffer->set_timestamp(presentation_timestamp);
+ if (accurate_durations_)
+ buffer->set_duration(frame_duration_);
queue.push_back(buffer);
}
@@ -304,37 +416,134 @@ class SourceBufferStreamTest : public testing::Test {
EXPECT_EQ(expect_success, stream_->Append(queue));
}
- void AppendBuffers(const std::string& buffers_to_append,
- bool start_new_segment, bool one_by_one,
- bool expect_success) {
+ // StringToBufferQueue() allows for the generation of StreamParserBuffers from
+ // coded strings of timestamps separated by spaces. Supported syntax:
+ //
+ // ##:
+ // Generates a StreamParserBuffer with decode timestamp ##. E.g., "0 1 2 3".
+ //
+ // ##K:
+ // Indicates the buffer with timestamp ## reflects a keyframe. E.g., "0K 1".
+ //
+ // S(a# ... y# z#)
+ // Indicates a splice frame buffer should be created with timestamp z#. The
+ // preceding timestamps a# ... y# will be treated as the fade out preroll for
+ // the splice frame. If a timestamp within the preroll ends with C the config
+ // id to use for that and subsequent preroll appends is incremented by one.
+ // The config id for non-splice frame appends will not be affected.
+ BufferQueue StringToBufferQueue(const std::string& buffers_to_append) {
std::vector<std::string> timestamps;
base::SplitString(buffers_to_append, ' ', &timestamps);
CHECK_GT(timestamps.size(), 0u);
- SourceBufferStream::BufferQueue buffers;
+ bool splice_frame = false;
+ size_t splice_config_id = stream_->append_config_index_;
+ BufferQueue pre_splice_buffers;
+ BufferQueue buffers;
for (size_t i = 0; i < timestamps.size(); i++) {
bool is_keyframe = false;
+ bool has_preroll = false;
+ bool last_splice_frame = false;
+ // Handle splice frame starts.
+ if (StartsWithASCII(timestamps[i], "S(", true)) {
+ CHECK(!splice_frame);
+ splice_frame = true;
+ // Remove the "S(" off of the token.
+ timestamps[i] = timestamps[i].substr(2, timestamps[i].length());
+ }
+ if (splice_frame && EndsWith(timestamps[i], ")", true)) {
+ splice_frame = false;
+ last_splice_frame = true;
+ // Remove the ")" off of the token.
+ timestamps[i] = timestamps[i].substr(0, timestamps[i].length() - 1);
+ }
+ // Handle config changes within the splice frame.
+ if (splice_frame && EndsWith(timestamps[i], "C", true)) {
+ splice_config_id++;
+ CHECK(splice_config_id < stream_->audio_configs_.size() ||
+ splice_config_id < stream_->video_configs_.size());
+ // Remove the "C" off of the token.
+ timestamps[i] = timestamps[i].substr(0, timestamps[i].length() - 1);
+ }
if (EndsWith(timestamps[i], "K", true)) {
is_keyframe = true;
// Remove the "K" off of the token.
timestamps[i] = timestamps[i].substr(0, timestamps[i].length() - 1);
}
+ // Handle preroll buffers.
+ if (EndsWith(timestamps[i], "P", true)) {
+ is_keyframe = true;
+ has_preroll = true;
+ // Remove the "P" off of the token.
+ timestamps[i] = timestamps[i].substr(0, timestamps[i].length() - 1);
+ }
+
int time_in_ms;
CHECK(base::StringToInt(timestamps[i], &time_in_ms));
- // Create buffer.
+ // Create buffer. Buffer type and track ID are meaningless to these tests.
scoped_refptr<StreamParserBuffer> buffer =
- StreamParserBuffer::CopyFrom(&kDataA, kDataSize, is_keyframe);
+ StreamParserBuffer::CopyFrom(&kDataA, kDataSize, is_keyframe,
+ DemuxerStream::AUDIO, 0);
base::TimeDelta timestamp =
base::TimeDelta::FromMilliseconds(time_in_ms);
+ buffer->set_timestamp(timestamp);
+ if (accurate_durations_)
+ buffer->set_duration(frame_duration_);
buffer->SetDecodeTimestamp(timestamp);
- if (i == 0u && start_new_segment)
- stream_->OnNewMediaSegment(timestamp);
+ // Simulate preroll buffers by just generating another buffer and sticking
+ // it as the preroll.
+ if (has_preroll) {
+ scoped_refptr<StreamParserBuffer> preroll_buffer =
+ StreamParserBuffer::CopyFrom(
+ &kDataA, kDataSize, is_keyframe, DemuxerStream::AUDIO, 0);
+ preroll_buffer->set_duration(frame_duration_);
+ buffer->SetPrerollBuffer(preroll_buffer);
+ }
+
+ if (splice_frame) {
+ if (!pre_splice_buffers.empty()) {
+ // Enforce strictly monotonically increasing timestamps.
+ CHECK_GT(
+ timestamp.InMicroseconds(),
+ pre_splice_buffers.back()->GetDecodeTimestamp().InMicroseconds());
+ }
+ buffer->SetConfigId(splice_config_id);
+ pre_splice_buffers.push_back(buffer);
+ continue;
+ }
+
+ if (last_splice_frame) {
+ // Require at least one additional buffer for a splice.
+ CHECK(!pre_splice_buffers.empty());
+ buffer->SetConfigId(splice_config_id);
+ buffer->ConvertToSpliceBuffer(pre_splice_buffers);
+ pre_splice_buffers.clear();
+ }
buffers.push_back(buffer);
}
+ return buffers;
+ }
+
+ void AppendBuffers(const std::string& buffers_to_append,
+ bool start_new_segment,
+ base::TimeDelta segment_start_timestamp,
+ bool one_by_one,
+ bool expect_success) {
+ BufferQueue buffers = StringToBufferQueue(buffers_to_append);
+
+ if (start_new_segment) {
+ base::TimeDelta start_timestamp = segment_start_timestamp;
+ if (start_timestamp == kNoTimestamp())
+ start_timestamp = buffers[0]->GetDecodeTimestamp();
+
+ ASSERT_TRUE(start_timestamp <= buffers[0]->GetDecodeTimestamp());
+
+ stream_->OnNewMediaSegment(start_timestamp);
+ }
if (!one_by_one) {
EXPECT_EQ(expect_success, stream_->Append(buffers));
@@ -343,15 +552,23 @@ class SourceBufferStreamTest : public testing::Test {
// Append each buffer one by one.
for (size_t i = 0; i < buffers.size(); i++) {
- SourceBufferStream::BufferQueue wrapper;
+ BufferQueue wrapper;
wrapper.push_back(buffers[i]);
EXPECT_TRUE(stream_->Append(wrapper));
}
}
+ void DebugMediaLog(const std::string& log) {
+ DVLOG(1) << log;
+ }
+
int frames_per_second_;
int keyframes_per_second_;
base::TimeDelta frame_duration_;
+ // TODO(dalecurtis): It's silly to have this, all tests should use accurate
+ // durations instead. However, currently all tests are written with an
+ // expectation of 0 duration, so it's an involved change.
+ bool accurate_durations_;
DISALLOW_COPY_AND_ASSIGN(SourceBufferStreamTest);
};
@@ -462,6 +679,27 @@ TEST_F(SourceBufferStreamTest, Complete_Overlap) {
CheckExpectedBuffers(0, 14);
}
+TEST_F(SourceBufferStreamTest,
+ Complete_Overlap_AfterSegmentTimestampAndBeforeFirstBufferTimestamp) {
+ // Append a segment with a start timestamp of 0, but the first
+ // buffer starts at 30ms. This can happen in muxed content where the
+ // audio starts before the first frame.
+ NewSegmentAppend(base::TimeDelta::FromMilliseconds(0), "30K 60K 90K 120K");
+
+ CheckExpectedRangesByTimestamp("{ [0,150) }");
+
+ // Completely overlap the old buffers, with a segment that starts
+ // after the old segment start timestamp, but before the timestamp
+ // of the first buffer in the segment.
+ NewSegmentAppend("20K 50K 80K 110K");
+
+ // Verify that the buffered ranges are updated properly and we don't crash.
+ CheckExpectedRangesByTimestamp("{ [20,150) }");
+
+ SeekToTimestamp(base::TimeDelta::FromMilliseconds(20));
+ CheckExpectedBuffers("20K 50K 80K 110K 120K");
+}
+
TEST_F(SourceBufferStreamTest, Complete_Overlap_EdgeCase) {
// Make each frame a keyframe so that it's okay to overlap frames at any point
// (instead of needing to respect keyframe boundaries).
@@ -1346,7 +1584,7 @@ TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer) {
CheckExpectedRangesByTimestamp("{ [10,160) }");
// Seek to 70ms.
- SeekToTimestamp(base::TimeDelta::FromMilliseconds(10));
+ SeekToTimestamp(base::TimeDelta::FromMilliseconds(70));
CheckExpectedBuffers("10K 40");
// Overlap with a new segment from 0 to 120ms.
@@ -1504,7 +1742,7 @@ TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer6) {
CheckExpectedRangesByTimestamp("{ [10,160) [200,260) }");
// Seek to 70ms.
- SeekToTimestamp(base::TimeDelta::FromMilliseconds(10));
+ SeekToTimestamp(base::TimeDelta::FromMilliseconds(70));
CheckExpectedBuffers("10K 40");
// Overlap with a new segment from 0 to 120ms.
@@ -2662,22 +2900,22 @@ TEST_F(SourceBufferStreamTest, GetRemovalRange_Range) {
TEST_F(SourceBufferStreamTest, ConfigChange_Basic) {
VideoDecoderConfig new_config = TestVideoConfig::Large();
- ASSERT_FALSE(new_config.Matches(config_));
+ ASSERT_FALSE(new_config.Matches(video_config_));
Seek(0);
- CheckConfig(config_);
+ CheckVideoConfig(video_config_);
// Append 5 buffers at positions 0 through 4
NewSegmentAppend(0, 5, &kDataA);
- CheckConfig(config_);
+ CheckVideoConfig(video_config_);
// Signal a config change.
stream_->UpdateVideoConfig(new_config);
// Make sure updating the config doesn't change anything since new_config
// should not be associated with the buffer GetNextBuffer() will return.
- CheckConfig(config_);
+ CheckVideoConfig(video_config_);
// Append 5 buffers at positions 5 through 9.
NewSegmentAppend(5, 5, &kDataB);
@@ -2686,7 +2924,7 @@ TEST_F(SourceBufferStreamTest, ConfigChange_Basic) {
scoped_refptr<StreamParserBuffer> buffer;
for (int i = 0; i < 5; i++) {
EXPECT_EQ(stream_->GetNextBuffer(&buffer), SourceBufferStream::kSuccess);
- CheckConfig(config_);
+ CheckVideoConfig(video_config_);
}
// Verify the next attempt to get a buffer will signal that a config change
@@ -2694,11 +2932,11 @@ TEST_F(SourceBufferStreamTest, ConfigChange_Basic) {
EXPECT_EQ(stream_->GetNextBuffer(&buffer), SourceBufferStream::kConfigChange);
// Verify that the new config is now returned.
- CheckConfig(new_config);
+ CheckVideoConfig(new_config);
// Consume the remaining buffers associated with the new config.
for (int i = 0; i < 5; i++) {
- CheckConfig(new_config);
+ CheckVideoConfig(new_config);
EXPECT_EQ(stream_->GetNextBuffer(&buffer), SourceBufferStream::kSuccess);
}
}
@@ -2714,29 +2952,29 @@ TEST_F(SourceBufferStreamTest, ConfigChange_Seek) {
// Seek to the start of the buffers with the new config and make sure a
// config change is signalled.
- CheckConfig(config_);
+ CheckVideoConfig(video_config_);
Seek(5);
- CheckConfig(config_);
+ CheckVideoConfig(video_config_);
EXPECT_EQ(stream_->GetNextBuffer(&buffer), SourceBufferStream::kConfigChange);
- CheckConfig(new_config);
+ CheckVideoConfig(new_config);
CheckExpectedBuffers(5, 9, &kDataB);
// Seek to the start which has a different config. Don't fetch any buffers and
// seek back to buffers with the current config. Make sure a config change
// isn't signalled in this case.
- CheckConfig(new_config);
+ CheckVideoConfig(new_config);
Seek(0);
Seek(7);
CheckExpectedBuffers(5, 9, &kDataB);
// Seek to the start and make sure a config change is signalled.
- CheckConfig(new_config);
+ CheckVideoConfig(new_config);
Seek(0);
- CheckConfig(new_config);
+ CheckVideoConfig(new_config);
EXPECT_EQ(stream_->GetNextBuffer(&buffer), SourceBufferStream::kConfigChange);
- CheckConfig(config_);
+ CheckVideoConfig(video_config_);
CheckExpectedBuffers(0, 4, &kDataA);
}
@@ -2889,6 +3127,27 @@ TEST_F(SourceBufferStreamTest, SetExplicitDuration_UpdateSelectedRange) {
CheckExpectedRangesByTimestamp("{ [0,60) [120,180) }");
}
+TEST_F(SourceBufferStreamTest,
+ SetExplicitDuration_AfterSegmentTimestampAndBeforeFirstBufferTimestamp) {
+
+ NewSegmentAppend("0K 30K 60K");
+
+ // Append a segment with a start timestamp of 200, but the first
+ // buffer starts at 230ms. This can happen in muxed content where the
+ // audio starts before the first frame.
+ NewSegmentAppend(base::TimeDelta::FromMilliseconds(200),
+ "230K 260K 290K 320K");
+
+ NewSegmentAppend("400K 430K 460K");
+
+ CheckExpectedRangesByTimestamp("{ [0,90) [200,350) [400,490) }");
+
+ stream_->OnSetDuration(base::TimeDelta::FromMilliseconds(120));
+
+ // Verify that the buffered ranges are updated properly and we don't crash.
+ CheckExpectedRangesByTimestamp("{ [0,90) }");
+}
+
// Test the case were the current playback position is at the end of the
// buffered data and several overlaps occur that causes the selected
// range to get split and then merged back into a single range.
@@ -2957,16 +3216,18 @@ TEST_F(SourceBufferStreamTest, SameTimestamp_Video_Invalid_2) {
}
// Verify that a keyframe followed by a non-keyframe with the same timestamp
-// is not allowed.
-TEST_F(SourceBufferStreamTest, SameTimestamp_Video_Invalid_3) {
+// is allowed.
+TEST_F(SourceBufferStreamTest, SameTimestamp_VideoKeyFrame_TwoAppends) {
Seek(0);
NewSegmentAppend("0K 30K");
- AppendBuffers_ExpectFailure("30 60");
+ AppendBuffers("30 60");
+ CheckExpectedBuffers("0K 30K 30 60");
}
-TEST_F(SourceBufferStreamTest, SameTimestamp_Video_Invalid_4) {
+TEST_F(SourceBufferStreamTest, SameTimestamp_VideoKeyFrame_SingleAppend) {
Seek(0);
- NewSegmentAppend_ExpectFailure("0K 30K 30 60");
+ NewSegmentAppend("0K 30K 30 60");
+ CheckExpectedBuffers("0K 30K 30 60");
}
TEST_F(SourceBufferStreamTest, SameTimestamp_Video_Overlap_1) {
@@ -3000,7 +3261,7 @@ TEST_F(SourceBufferStreamTest, SameTimestamp_Video_Overlap_3) {
TEST_F(SourceBufferStreamTest, SameTimestamp_Audio) {
AudioDecoderConfig config(kCodecMP3, kSampleFormatF32, CHANNEL_LAYOUT_STEREO,
44100, NULL, 0, false);
- stream_.reset(new SourceBufferStream(config, LogCB()));
+ stream_.reset(new SourceBufferStream(config, log_cb(), true));
Seek(0);
NewSegmentAppend("0K 0K 30K 30 60 60");
CheckExpectedBuffers("0K 0K 30K 30 60 60");
@@ -3009,7 +3270,7 @@ TEST_F(SourceBufferStreamTest, SameTimestamp_Audio) {
TEST_F(SourceBufferStreamTest, SameTimestamp_Audio_Invalid_1) {
AudioDecoderConfig config(kCodecMP3, kSampleFormatF32, CHANNEL_LAYOUT_STEREO,
44100, NULL, 0, false);
- stream_.reset(new SourceBufferStream(config, LogCB()));
+ stream_.reset(new SourceBufferStream(config, log_cb(), true));
Seek(0);
NewSegmentAppend_ExpectFailure("0K 30 30K 60");
}
@@ -3250,6 +3511,32 @@ TEST_F(SourceBufferStreamTest, Remove_GOPBeingAppended) {
CheckExpectedBuffers("240K 270 300");
}
+TEST_F(SourceBufferStreamTest, Remove_WholeGOPBeingAppended) {
+ Seek(0);
+ NewSegmentAppend("0K 30 60 90");
+ CheckExpectedRangesByTimestamp("{ [0,120) }");
+
+ // Remove the keyframe of the current GOP being appended.
+ RemoveInMs(0, 30, 120);
+ CheckExpectedRangesByTimestamp("{ }");
+
+ // Continue appending the current GOP.
+ AppendBuffers("210 240");
+
+ CheckExpectedRangesByTimestamp("{ }");
+
+ // Append the beginning of the next GOP.
+ AppendBuffers("270K 300");
+
+ // Verify that the new range is started at the
+ // beginning of the next GOP.
+ CheckExpectedRangesByTimestamp("{ [270,330) }");
+
+ // Verify the buffers in the ranges.
+ CheckNoNextBuffer();
+ SeekToTimestamp(base::TimeDelta::FromMilliseconds(270));
+ CheckExpectedBuffers("270K 300");
+}
TEST_F(SourceBufferStreamTest,
Remove_PreviousAppendDestroyedAndOverwriteExistingRange) {
@@ -3275,6 +3562,308 @@ TEST_F(SourceBufferStreamTest,
CheckExpectedBuffers("90K 121 151");
}
+TEST_F(SourceBufferStreamTest, Remove_GapAtBeginningOfMediaSegment) {
+ Seek(0);
+
+ // Append a media segment that has a gap at the beginning of it.
+ NewSegmentAppend(base::TimeDelta::FromMilliseconds(0),
+ "30K 60 90 120K 150");
+ CheckExpectedRangesByTimestamp("{ [0,180) }");
+
+ // Remove the gap that doesn't contain any buffers.
+ RemoveInMs(0, 10, 180);
+ CheckExpectedRangesByTimestamp("{ [10,180) }");
+
+ // Verify we still get the first buffer still since only part of
+ // the gap was removed.
+ // TODO(acolwell/wolenetz): Consider not returning a buffer at this
+ // point since the current seek position has been explicitly
+ // removed but didn't happen to remove any buffers.
+ // http://crbug.com/384016
+ CheckExpectedBuffers("30K");
+
+ // Remove a range that includes the first GOP.
+ RemoveInMs(0, 60, 180);
+
+ // Verify that no buffer is returned because the current buffer
+ // position has been removed.
+ CheckNoNextBuffer();
+
+ CheckExpectedRangesByTimestamp("{ [120,180) }");
+}
+
+TEST_F(SourceBufferStreamTest, Text_Append_SingleRange) {
+ SetTextStream();
+ NewSegmentAppend("0K 500K 1000K");
+ CheckExpectedRangesByTimestamp("{ [0,1500) }");
+
+ Seek(0);
+ CheckExpectedBuffers("0K 500K 1000K");
+}
+
+TEST_F(SourceBufferStreamTest, Text_Append_DisjointAfter) {
+ SetTextStream();
+ NewSegmentAppend("0K 500K 1000K");
+ CheckExpectedRangesByTimestamp("{ [0,1500) }");
+ NewSegmentAppend("3000K 3500K 4000K");
+ CheckExpectedRangesByTimestamp("{ [0,4500) }");
+
+ Seek(0);
+ CheckExpectedBuffers("0K 500K 1000K 3000K 3500K 4000K");
+}
+
+TEST_F(SourceBufferStreamTest, Text_Append_DisjointBefore) {
+ SetTextStream();
+ NewSegmentAppend("3000K 3500K 4000K");
+ CheckExpectedRangesByTimestamp("{ [3000,4500) }");
+ NewSegmentAppend("0K 500K 1000K");
+ CheckExpectedRangesByTimestamp("{ [0,4500) }");
+
+ Seek(0);
+ CheckExpectedBuffers("0K 500K 1000K 3000K 3500K 4000K");
+}
+
+TEST_F(SourceBufferStreamTest, Text_CompleteOverlap) {
+ SetTextStream();
+ NewSegmentAppend("3000K 3500K 4000K");
+ CheckExpectedRangesByTimestamp("{ [3000,4500) }");
+ NewSegmentAppend("0K 501K 1001K 1501K 2001K 2501K "
+ "3001K 3501K 4001K 4501K 5001K");
+ CheckExpectedRangesByTimestamp("{ [0,5502) }");
+
+ Seek(0);
+ CheckExpectedBuffers("0K 501K 1001K 1501K 2001K 2501K "
+ "3001K 3501K 4001K 4501K 5001K");
+}
+
+TEST_F(SourceBufferStreamTest, Text_OverlapAfter) {
+ SetTextStream();
+ NewSegmentAppend("0K 500K 1000K 1500K 2000K");
+ CheckExpectedRangesByTimestamp("{ [0,2500) }");
+ NewSegmentAppend("1499K 2001K 2501K 3001K");
+ CheckExpectedRangesByTimestamp("{ [0,3503) }");
+
+ Seek(0);
+ CheckExpectedBuffers("0K 500K 1000K 1499K 2001K 2501K 3001K");
+}
+
+TEST_F(SourceBufferStreamTest, Text_OverlapBefore) {
+ SetTextStream();
+ NewSegmentAppend("1500K 2000K 2500K 3000K 3500K");
+ CheckExpectedRangesByTimestamp("{ [1500,4000) }");
+ NewSegmentAppend("0K 501K 1001K 1501K 2001K");
+ CheckExpectedRangesByTimestamp("{ [0,4001) }");
+
+ Seek(0);
+ CheckExpectedBuffers("0K 501K 1001K 1501K 2001K 2500K 3000K 3500K");
+}
+
+TEST_F(SourceBufferStreamTest, SpliceFrame_Basic) {
+ Seek(0);
+ NewSegmentAppend("0K S(3K 6 9 10) 15 20 S(25K 30 35) 40");
+ CheckExpectedBuffers("0K 3K 6 9 C 10 15 20 25K 30 C 35 40");
+ CheckNoNextBuffer();
+}
+
+TEST_F(SourceBufferStreamTest, SpliceFrame_SeekClearsSplice) {
+ Seek(0);
+ NewSegmentAppend("0K S(3K 6 9 10) 15K 20");
+ CheckExpectedBuffers("0K 3K 6");
+
+ SeekToTimestamp(base::TimeDelta::FromMilliseconds(15));
+ CheckExpectedBuffers("15K 20");
+ CheckNoNextBuffer();
+}
+
+TEST_F(SourceBufferStreamTest, SpliceFrame_SeekClearsSpliceFromTrackBuffer) {
+ Seek(0);
+ NewSegmentAppend("0K 2K S(3K 6 9 10) 15K 20");
+ CheckExpectedBuffers("0K 2K");
+
+ // Overlap the existing segment.
+ NewSegmentAppend("5K 15K 20");
+ CheckExpectedBuffers("3K 6");
+
+ SeekToTimestamp(base::TimeDelta::FromMilliseconds(15));
+ CheckExpectedBuffers("15K 20");
+ CheckNoNextBuffer();
+}
+
+TEST_F(SourceBufferStreamTest, SpliceFrame_ConfigChangeWithinSplice) {
+ VideoDecoderConfig new_config = TestVideoConfig::Large();
+ ASSERT_FALSE(new_config.Matches(video_config_));
+
+ // Add a new video config, then reset the config index back to the original.
+ stream_->UpdateVideoConfig(new_config);
+ stream_->UpdateVideoConfig(video_config_);
+
+ Seek(0);
+ CheckVideoConfig(video_config_);
+ NewSegmentAppend("0K S(3K 6C 9 10) 15");
+
+ CheckExpectedBuffers("0K 3K C");
+ CheckVideoConfig(new_config);
+ CheckExpectedBuffers("6 9 C");
+ CheckExpectedBuffers("10 C");
+ CheckVideoConfig(video_config_);
+ CheckExpectedBuffers("15");
+ CheckNoNextBuffer();
+}
+
+TEST_F(SourceBufferStreamTest, SpliceFrame_BasicFromTrackBuffer) {
+ Seek(0);
+ NewSegmentAppend("0K 5K S(8K 9 10) 20");
+ CheckExpectedBuffers("0K 5K");
+
+ // Overlap the existing segment.
+ NewSegmentAppend("5K 20");
+ CheckExpectedBuffers("8K 9 C 10 20");
+ CheckNoNextBuffer();
+}
+
+TEST_F(SourceBufferStreamTest,
+ SpliceFrame_ConfigChangeWithinSpliceFromTrackBuffer) {
+ VideoDecoderConfig new_config = TestVideoConfig::Large();
+ ASSERT_FALSE(new_config.Matches(video_config_));
+
+ // Add a new video config, then reset the config index back to the original.
+ stream_->UpdateVideoConfig(new_config);
+ stream_->UpdateVideoConfig(video_config_);
+
+ Seek(0);
+ CheckVideoConfig(video_config_);
+ NewSegmentAppend("0K 5K S(7K 8C 9 10) 20");
+ CheckExpectedBuffers("0K 5K");
+
+ // Overlap the existing segment.
+ NewSegmentAppend("5K 20");
+ CheckExpectedBuffers("7K C");
+ CheckVideoConfig(new_config);
+ CheckExpectedBuffers("8 9 C");
+ CheckExpectedBuffers("10 C");
+ CheckVideoConfig(video_config_);
+ CheckExpectedBuffers("20");
+ CheckNoNextBuffer();
+}
+
+TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_Basic) {
+ SetAudioStream();
+ Seek(0);
+ NewSegmentAppend("0K 2K 4K 6K 8K 10K 12K");
+ NewSegmentAppend("11K 13K 15K 17K");
+ CheckExpectedBuffers("0K 2K 4K 6K 8K 10K 12K C 11K 13K 15K 17K");
+ CheckNoNextBuffer();
+}
+
+TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_NoExactSplices) {
+ SetAudioStream();
+ Seek(0);
+ NewSegmentAppend("0K 2K 4K 6K 8K 10K 12K");
+ NewSegmentAppend("10K 14K");
+ CheckExpectedBuffers("0K 2K 4K 6K 8K 10K 14K");
+ CheckNoNextBuffer();
+}
+
+// Do not allow splices on top of splices.
+TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_NoDoubleSplice) {
+ SetAudioStream();
+ Seek(0);
+ NewSegmentAppend("0K 2K 4K 6K 8K 10K 12K");
+ NewSegmentAppend("11K 13K 15K 17K");
+
+ // Verify the splice was created.
+ CheckExpectedBuffers("0K 2K 4K 6K 8K 10K 12K C 11K 13K 15K 17K");
+ CheckNoNextBuffer();
+ Seek(0);
+
+ // Create a splice before the first splice which would include it.
+ NewSegmentAppend("9K");
+
+ // A splice on top of a splice should result in a discard of the original
+ // splice and no new splice frame being generated.
+ CheckExpectedBuffers("0K 2K 4K 6K 8K 9K 13K 15K 17K");
+ CheckNoNextBuffer();
+}
+
+// Test that a splice is not created if an end timestamp and start timestamp
+// overlap.
+TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_NoSplice) {
+ SetAudioStream();
+ Seek(0);
+ NewSegmentAppend("0K 2K 4K 6K 8K 10K");
+ NewSegmentAppend("12K 14K 16K 18K");
+ CheckExpectedBuffers("0K 2K 4K 6K 8K 10K 12K 14K 16K 18K");
+ CheckNoNextBuffer();
+}
+
+TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_CorrectMediaSegmentStartTime) {
+ SetAudioStream();
+ Seek(0);
+ NewSegmentAppend("0K 2K 4K");
+ CheckExpectedRangesByTimestamp("{ [0,6) }");
+ NewSegmentAppend("6K 8K 10K");
+ CheckExpectedRangesByTimestamp("{ [0,12) }");
+ NewSegmentAppend("1K 4K");
+ CheckExpectedRangesByTimestamp("{ [0,12) }");
+ CheckExpectedBuffers("0K 2K 4K C 1K 4K 6K 8K 10K");
+ CheckNoNextBuffer();
+}
+
+TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_ConfigChange) {
+ SetAudioStream();
+
+ AudioDecoderConfig new_config(kCodecVorbis,
+ kSampleFormatPlanarF32,
+ CHANNEL_LAYOUT_MONO,
+ 1000,
+ NULL,
+ 0,
+ false);
+ ASSERT_NE(new_config.channel_layout(), audio_config_.channel_layout());
+
+ Seek(0);
+ CheckAudioConfig(audio_config_);
+ NewSegmentAppend("0K 2K 4K 6K");
+ stream_->UpdateAudioConfig(new_config);
+ NewSegmentAppend("5K 8K 12K");
+ CheckExpectedBuffers("0K 2K 4K 6K C 5K 8K 12K");
+ CheckAudioConfig(new_config);
+ CheckNoNextBuffer();
+}
+
+// Ensure splices are not created if there are not enough frames to crossfade.
+TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_NoTinySplices) {
+ SetAudioStream();
+ Seek(0);
+
+ // Overlap the range [0, 2) with [1, 3). Since each frame has a duration of
+ // 2ms this results in an overlap of 1ms between the ranges. A splice frame
+ // should not be generated since it requires at least 2 frames, or 2ms in this
+ // case, of data to crossfade.
+ NewSegmentAppend("0K");
+ CheckExpectedRangesByTimestamp("{ [0,2) }");
+ NewSegmentAppend("1K");
+ CheckExpectedRangesByTimestamp("{ [0,3) }");
+ CheckExpectedBuffers("0K 1K");
+ CheckNoNextBuffer();
+}
+
+TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_Preroll) {
+ SetAudioStream();
+ Seek(0);
+ NewSegmentAppend("0K 2K 4K 6K 8K 10K 12K");
+ NewSegmentAppend("11P 13K 15K 17K");
+ CheckExpectedBuffers("0K 2K 4K 6K 8K 10K 12K C 11P 13K 15K 17K");
+ CheckNoNextBuffer();
+}
+
+TEST_F(SourceBufferStreamTest, Audio_PrerollFrame) {
+ Seek(0);
+ NewSegmentAppend("0K 3P 6K");
+ CheckExpectedBuffers("0K 3P 6K");
+ CheckNoNextBuffer();
+}
+
// TODO(vrk): Add unit tests where keyframes are unaligned between streams.
// (crbug.com/133557)
diff --git a/chromium/media/filters/stream_parser_factory.cc b/chromium/media/filters/stream_parser_factory.cc
index 53ee1b7d190..dd0e95aa82b 100644
--- a/chromium/media/filters/stream_parser_factory.cc
+++ b/chromium/media/filters/stream_parser_factory.cc
@@ -10,8 +10,9 @@
#include "base/strings/string_util.h"
#include "media/base/media_log.h"
#include "media/base/media_switches.h"
-#include "media/mp3/mp3_stream_parser.h"
-#include "media/webm/webm_stream_parser.h"
+#include "media/formats/mpeg/adts_stream_parser.h"
+#include "media/formats/mpeg/mp3_stream_parser.h"
+#include "media/formats/webm/webm_stream_parser.h"
#if defined(OS_ANDROID)
#include "base/android/build_info.h"
@@ -19,10 +20,10 @@
#if defined(USE_PROPRIETARY_CODECS)
#if defined(ENABLE_MPEG2TS_STREAM_PARSER)
-#include "media/mp2t/mp2t_stream_parser.h"
+#include "media/formats/mp2t/mp2t_stream_parser.h"
#endif
-#include "media/mp4/es_descriptor.h"
-#include "media/mp4/mp4_stream_parser.h"
+#include "media/formats/mp4/es_descriptor.h"
+#include "media/formats/mp4/mp4_stream_parser.h"
#endif
namespace media {
@@ -49,7 +50,7 @@ struct CodecInfo {
HISTOGRAM_EAC3,
HISTOGRAM_MP3,
HISTOGRAM_OPUS,
- HISTOGRAM_MAX // Must be the last entry.
+ HISTOGRAM_MAX = HISTOGRAM_OPUS // Must be equal to largest logged entry.
};
const char* pattern;
@@ -141,11 +142,6 @@ static const CodecInfo kMPEG2AACLCCodecInfo = { "mp4a.67", CodecInfo::AUDIO,
NULL,
CodecInfo::HISTOGRAM_MPEG2AAC };
-#if defined(ENABLE_EAC3_PLAYBACK)
-static const CodecInfo kEAC3CodecInfo = { "mp4a.a6", CodecInfo::AUDIO, NULL,
- CodecInfo::HISTOGRAM_EAC3 };
-#endif
-
static const CodecInfo* kVideoMP4Codecs[] = {
&kH264AVC1CodecInfo,
&kH264AVC3CodecInfo,
@@ -157,9 +153,6 @@ static const CodecInfo* kVideoMP4Codecs[] = {
static const CodecInfo* kAudioMP4Codecs[] = {
&kMPEG4AACCodecInfo,
&kMPEG2AACLCCodecInfo,
-#if defined(ENABLE_EAC3_PLAYBACK)
- &kEAC3CodecInfo,
-#endif
NULL
};
@@ -168,10 +161,6 @@ static StreamParser* BuildMP4Parser(
std::set<int> audio_object_types;
bool has_sbr = false;
-#if defined(ENABLE_EAC3_PLAYBACK)
- bool enable_eac3 = CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kEnableEac3Playback);
-#endif
for (size_t i = 0; i < codecs.size(); ++i) {
std::string codec_id = codecs[i];
if (MatchPattern(codec_id, kMPEG2AACLCCodecInfo.pattern)) {
@@ -186,10 +175,6 @@ static StreamParser* BuildMP4Parser(
has_sbr = true;
break;
}
-#if defined(ENABLE_EAC3_PLAYBACK)
- } else if (enable_eac3 && MatchPattern(codec_id, kEAC3CodecInfo.pattern)) {
- audio_object_types.insert(mp4::kEAC3);
-#endif
}
}
@@ -209,6 +194,18 @@ static StreamParser* BuildMP3Parser(
return new MP3StreamParser();
}
+static const CodecInfo kADTSCodecInfo = { NULL, CodecInfo::AUDIO, NULL,
+ CodecInfo::HISTOGRAM_MPEG4AAC };
+static const CodecInfo* kAudioADTSCodecs[] = {
+ &kADTSCodecInfo,
+ NULL
+};
+
+static StreamParser* BuildADTSParser(
+ const std::vector<std::string>& codecs, const LogCB& log_cb) {
+ return new ADTSStreamParser();
+}
+
#if defined(ENABLE_MPEG2TS_STREAM_PARSER)
static const CodecInfo* kVideoMP2TCodecs[] = {
&kH264AVC1CodecInfo,
@@ -239,6 +236,7 @@ static const SupportedTypeInfo kSupportedTypeInfo[] = {
{ "video/webm", &BuildWebMParser, kVideoWebMCodecs },
{ "audio/webm", &BuildWebMParser, kAudioWebMCodecs },
#if defined(USE_PROPRIETARY_CODECS)
+ { "audio/aac", &BuildADTSParser, kAudioADTSCodecs },
{ "audio/mpeg", &BuildMP3Parser, kAudioMP3Codecs },
{ "video/mp4", &BuildMP4Parser, kVideoMP4Codecs },
{ "audio/mp4", &BuildMP4Parser, kAudioMP4Codecs },
@@ -270,11 +268,6 @@ static bool VerifyCodec(
return false;
}
#endif
- if (codec_info->tag == CodecInfo::HISTOGRAM_OPUS) {
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- if (cmd_line->HasSwitch(switches::kDisableOpusPlayback))
- return false;
- }
if (audio_codecs)
audio_codecs->push_back(codec_info->tag);
return true;
@@ -324,16 +317,6 @@ static bool CheckTypeAndCodecs(
const SupportedTypeInfo& type_info = kSupportedTypeInfo[i];
if (type == type_info.type) {
if (codecs.empty()) {
-
-#if defined(USE_PROPRIETARY_CODECS)
- if (type_info.codecs == kAudioMP3Codecs &&
- !CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kEnableMP3StreamParser)) {
- DVLOG(1) << "MP3StreamParser is not enabled.";
- return false;
- }
-#endif
-
const CodecInfo* codec_info = type_info.codecs[0];
if (codec_info && !codec_info->pattern &&
VerifyCodec(codec_info, audio_codecs, video_codecs)) {
@@ -413,12 +396,14 @@ scoped_ptr<StreamParser> StreamParserFactory::Create(
// Log the number of codecs specified, as well as the details on each one.
UMA_HISTOGRAM_COUNTS_100("Media.MSE.NumberOfTracks", codecs.size());
for (size_t i = 0; i < audio_codecs.size(); ++i) {
- UMA_HISTOGRAM_ENUMERATION(
- "Media.MSE.AudioCodec", audio_codecs[i], CodecInfo::HISTOGRAM_MAX);
+ UMA_HISTOGRAM_ENUMERATION("Media.MSE.AudioCodec",
+ audio_codecs[i],
+ CodecInfo::HISTOGRAM_MAX + 1);
}
for (size_t i = 0; i < video_codecs.size(); ++i) {
- UMA_HISTOGRAM_ENUMERATION(
- "Media.MSE.VideoCodec", video_codecs[i], CodecInfo::HISTOGRAM_MAX);
+ UMA_HISTOGRAM_ENUMERATION("Media.MSE.VideoCodec",
+ video_codecs[i],
+ CodecInfo::HISTOGRAM_MAX + 1);
}
stream_parser.reset(factory_function(codecs, log_cb));
diff --git a/chromium/media/filters/test_video_frame_scheduler.cc b/chromium/media/filters/test_video_frame_scheduler.cc
new file mode 100644
index 00000000000..9dba38d0759
--- /dev/null
+++ b/chromium/media/filters/test_video_frame_scheduler.cc
@@ -0,0 +1,66 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/test_video_frame_scheduler.h"
+
+#include "media/base/video_frame.h"
+
+namespace media {
+
+TestVideoFrameScheduler::ScheduledFrame::ScheduledFrame(
+ const scoped_refptr<VideoFrame> frame,
+ base::TimeTicks wall_ticks,
+ const DoneCB& done_cb)
+ : frame(frame), wall_ticks(wall_ticks), done_cb(done_cb) {
+}
+
+TestVideoFrameScheduler::ScheduledFrame::~ScheduledFrame() {
+}
+
+TestVideoFrameScheduler::TestVideoFrameScheduler() {
+}
+
+TestVideoFrameScheduler::~TestVideoFrameScheduler() {
+}
+
+void TestVideoFrameScheduler::ScheduleVideoFrame(
+ const scoped_refptr<VideoFrame>& frame,
+ base::TimeTicks wall_ticks,
+ const DoneCB& done_cb) {
+ scheduled_frames_.push_back(ScheduledFrame(frame, wall_ticks, done_cb));
+}
+
+void TestVideoFrameScheduler::Reset() {
+ scheduled_frames_.clear();
+}
+
+void TestVideoFrameScheduler::DisplayFramesUpTo(base::TimeTicks wall_ticks) {
+ RunDoneCBForFramesUpTo(wall_ticks, DISPLAYED);
+}
+
+void TestVideoFrameScheduler::DropFramesUpTo(base::TimeTicks wall_ticks) {
+ RunDoneCBForFramesUpTo(wall_ticks, DROPPED);
+}
+
+void TestVideoFrameScheduler::RunDoneCBForFramesUpTo(base::TimeTicks wall_ticks,
+ Reason reason) {
+ std::vector<ScheduledFrame> done_frames;
+ std::vector<ScheduledFrame> remaining_frames;
+
+ for (size_t i = 0; i < scheduled_frames_.size(); ++i) {
+ if (scheduled_frames_[i].wall_ticks <= wall_ticks) {
+ done_frames.push_back(scheduled_frames_[i]);
+ } else {
+ remaining_frames.push_back(scheduled_frames_[i]);
+ }
+ }
+
+ scheduled_frames_.swap(remaining_frames);
+
+ for (size_t i = 0; i < done_frames.size(); ++i) {
+ done_frames[i].done_cb.Run(done_frames[i].frame, reason);
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/filters/test_video_frame_scheduler.h b/chromium/media/filters/test_video_frame_scheduler.h
new file mode 100644
index 00000000000..c2920627a3e
--- /dev/null
+++ b/chromium/media/filters/test_video_frame_scheduler.h
@@ -0,0 +1,57 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_TEST_VIDEO_FRAME_SCHEDULER_H_
+#define MEDIA_FILTERS_TEST_VIDEO_FRAME_SCHEDULER_H_
+
+#include <vector>
+
+#include "media/filters/video_frame_scheduler.h"
+
+namespace media {
+
+// A scheduler that queues frames until told otherwise.
+class TestVideoFrameScheduler : public VideoFrameScheduler {
+ public:
+ struct ScheduledFrame {
+ ScheduledFrame(const scoped_refptr<VideoFrame> frame,
+ base::TimeTicks wall_ticks,
+ const DoneCB& done_cb);
+ ~ScheduledFrame();
+
+ scoped_refptr<VideoFrame> frame;
+ base::TimeTicks wall_ticks;
+ DoneCB done_cb;
+ };
+
+ TestVideoFrameScheduler();
+ virtual ~TestVideoFrameScheduler();
+
+ // VideoFrameScheduler implementation.
+ virtual void ScheduleVideoFrame(const scoped_refptr<VideoFrame>& frame,
+ base::TimeTicks wall_ticks,
+ const DoneCB& done_cb) OVERRIDE;
+ virtual void Reset() OVERRIDE;
+
+ // Displays all frames with scheduled times <= |wall_ticks|.
+ void DisplayFramesUpTo(base::TimeTicks wall_ticks);
+
+ // Drops all frames with scheduled times <= |wall_ticks|.
+ void DropFramesUpTo(base::TimeTicks wall_ticks);
+
+ const std::vector<ScheduledFrame>& scheduled_frames() const {
+ return scheduled_frames_;
+ }
+
+ private:
+ void RunDoneCBForFramesUpTo(base::TimeTicks wall_ticks, Reason reason);
+
+ std::vector<ScheduledFrame> scheduled_frames_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestVideoFrameScheduler);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_TEST_VIDEO_FRAME_SCHEDULER_H_
diff --git a/chromium/media/filters/video_decoder_selector.cc b/chromium/media/filters/video_decoder_selector.cc
deleted file mode 100644
index 9e646a77d13..00000000000
--- a/chromium/media/filters/video_decoder_selector.cc
+++ /dev/null
@@ -1,184 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/filters/video_decoder_selector.h"
-
-#include "base/bind.h"
-#include "base/callback_helpers.h"
-#include "base/logging.h"
-#include "base/message_loop/message_loop_proxy.h"
-#include "media/base/bind_to_loop.h"
-#include "media/base/demuxer_stream.h"
-#include "media/base/pipeline.h"
-#include "media/base/video_decoder_config.h"
-#include "media/filters/decrypting_demuxer_stream.h"
-#include "media/filters/decrypting_video_decoder.h"
-
-namespace media {
-
-VideoDecoderSelector::VideoDecoderSelector(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
- ScopedVector<VideoDecoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb)
- : message_loop_(message_loop),
- decoders_(decoders.Pass()),
- set_decryptor_ready_cb_(set_decryptor_ready_cb),
- input_stream_(NULL),
- weak_ptr_factory_(this) {
-}
-
-VideoDecoderSelector::~VideoDecoderSelector() {
- DVLOG(2) << __FUNCTION__;
- DCHECK(select_decoder_cb_.is_null());
-}
-
-void VideoDecoderSelector::SelectVideoDecoder(
- DemuxerStream* stream,
- const SelectDecoderCB& select_decoder_cb) {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(stream);
-
- // Make sure |select_decoder_cb| runs on a different execution stack.
- select_decoder_cb_ = BindToCurrentLoop(select_decoder_cb);
-
- const VideoDecoderConfig& config = stream->video_decoder_config();
- if (!config.IsValidConfig()) {
- DLOG(ERROR) << "Invalid video stream config.";
- ReturnNullDecoder();
- return;
- }
-
- input_stream_ = stream;
-
- if (!config.is_encrypted()) {
- InitializeDecoder();
- return;
- }
-
- // This could happen if Encrypted Media Extension (EME) is not enabled.
- if (set_decryptor_ready_cb_.is_null()) {
- ReturnNullDecoder();
- return;
- }
-
- video_decoder_.reset(new DecryptingVideoDecoder(
- message_loop_, set_decryptor_ready_cb_));
-
- video_decoder_->Initialize(
- input_stream_->video_decoder_config(),
- base::Bind(&VideoDecoderSelector::DecryptingVideoDecoderInitDone,
- weak_ptr_factory_.GetWeakPtr()));
-}
-
-void VideoDecoderSelector::Abort() {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- // This could happen when SelectVideoDecoder() was not called or when
- // |select_decoder_cb_| was already posted but not fired (e.g. in the
- // message loop queue).
- if (select_decoder_cb_.is_null())
- return;
-
- // We must be trying to initialize the |video_decoder_| or the
- // |decrypted_stream_|. Invalid all weak pointers so that all initialization
- // callbacks won't fire.
- weak_ptr_factory_.InvalidateWeakPtrs();
-
- if (video_decoder_) {
- // |decrypted_stream_| is either NULL or already initialized. We don't
- // need to Stop() |decrypted_stream_| in either case.
- video_decoder_->Stop(base::Bind(&VideoDecoderSelector::ReturnNullDecoder,
- weak_ptr_factory_.GetWeakPtr()));
- return;
- }
-
- if (decrypted_stream_) {
- decrypted_stream_->Stop(
- base::Bind(&VideoDecoderSelector::ReturnNullDecoder,
- weak_ptr_factory_.GetWeakPtr()));
- return;
- }
-
- NOTREACHED();
-}
-
-void VideoDecoderSelector::DecryptingVideoDecoderInitDone(
- PipelineStatus status) {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- if (status == PIPELINE_OK) {
- base::ResetAndReturn(&select_decoder_cb_).Run(
- video_decoder_.Pass(), scoped_ptr<DecryptingDemuxerStream>());
- return;
- }
-
- video_decoder_.reset();
-
- decrypted_stream_.reset(new DecryptingDemuxerStream(
- message_loop_, set_decryptor_ready_cb_));
-
- decrypted_stream_->Initialize(
- input_stream_,
- base::Bind(&VideoDecoderSelector::DecryptingDemuxerStreamInitDone,
- weak_ptr_factory_.GetWeakPtr()));
-}
-
-void VideoDecoderSelector::DecryptingDemuxerStreamInitDone(
- PipelineStatus status) {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- if (status != PIPELINE_OK) {
- ReturnNullDecoder();
- return;
- }
-
- DCHECK(!decrypted_stream_->video_decoder_config().is_encrypted());
- input_stream_ = decrypted_stream_.get();
- InitializeDecoder();
-}
-
-void VideoDecoderSelector::InitializeDecoder() {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(!video_decoder_);
-
- if (decoders_.empty()) {
- ReturnNullDecoder();
- return;
- }
-
- video_decoder_.reset(decoders_.front());
- decoders_.weak_erase(decoders_.begin());
-
- video_decoder_->Initialize(input_stream_->video_decoder_config(),
- base::Bind(&VideoDecoderSelector::DecoderInitDone,
- weak_ptr_factory_.GetWeakPtr()));
-}
-
-void VideoDecoderSelector::DecoderInitDone(PipelineStatus status) {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- if (status != PIPELINE_OK) {
- video_decoder_.reset();
- InitializeDecoder();
- return;
- }
-
- base::ResetAndReturn(&select_decoder_cb_).Run(video_decoder_.Pass(),
- decrypted_stream_.Pass());
-}
-
-void VideoDecoderSelector::ReturnNullDecoder() {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
- base::ResetAndReturn(&select_decoder_cb_).Run(
- scoped_ptr<VideoDecoder>(), scoped_ptr<DecryptingDemuxerStream>());
-}
-
-} // namespace media
diff --git a/chromium/media/filters/video_decoder_selector.h b/chromium/media/filters/video_decoder_selector.h
deleted file mode 100644
index 90e0dd51a49..00000000000
--- a/chromium/media/filters/video_decoder_selector.h
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_FILTERS_VIDEO_DECODER_SELECTOR_H_
-#define MEDIA_FILTERS_VIDEO_DECODER_SELECTOR_H_
-
-#include "base/callback.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_vector.h"
-#include "base/memory/weak_ptr.h"
-#include "media/base/decryptor.h"
-#include "media/base/demuxer_stream.h"
-#include "media/base/video_decoder.h"
-
-namespace base {
-class MessageLoopProxy;
-}
-
-namespace media {
-
-class DecoderBuffer;
-class DecryptingDemuxerStream;
-class Decryptor;
-
-// VideoDecoderSelector (creates if necessary and) initializes the proper
-// VideoDecoder for a given DemuxerStream. If the given DemuxerStream is
-// encrypted, a DecryptingDemuxerStream may also be created.
-class MEDIA_EXPORT VideoDecoderSelector {
- public:
- // Indicates completion of VideoDecoder selection.
- // - First parameter: The initialized VideoDecoder. If it's set to NULL, then
- // VideoDecoder initialization failed.
- // - Second parameter: The initialized DecryptingDemuxerStream. If it's not
- // NULL, then a DecryptingDemuxerStream is created and initialized to do
- // decryption for the initialized VideoDecoder.
- // Note: The caller owns selected VideoDecoder and DecryptingDemuxerStream.
- // The caller should call DecryptingDemuxerStream::Reset() before
- // calling VideoDecoder::Reset() to release any pending decryption or read.
- typedef base::Callback<
- void(scoped_ptr<VideoDecoder>,
- scoped_ptr<DecryptingDemuxerStream>)> SelectDecoderCB;
-
- // |decoders| contains the VideoDecoders to use when initializing.
- //
- // |set_decryptor_ready_cb| is optional. If |set_decryptor_ready_cb| is null,
- // no decryptor will be available to perform decryption.
- VideoDecoderSelector(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
- ScopedVector<VideoDecoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb);
- ~VideoDecoderSelector();
-
- // Initializes and selects an VideoDecoder that can decode the |stream|.
- // Selected VideoDecoder (and DecryptingDemuxerStream) is returned via
- // the |select_decoder_cb|.
- void SelectVideoDecoder(DemuxerStream* stream,
- const SelectDecoderCB& select_decoder_cb);
-
- // Aborts pending VideoDecoder selection and fires |select_decoder_cb| with
- // NULL and NULL immediately if it's pending.
- void Abort();
-
- private:
- void DecryptingVideoDecoderInitDone(PipelineStatus status);
- void DecryptingDemuxerStreamInitDone(PipelineStatus status);
- void InitializeDecoder();
- void DecoderInitDone(PipelineStatus status);
- void ReturnNullDecoder();
-
- scoped_refptr<base::MessageLoopProxy> message_loop_;
- ScopedVector<VideoDecoder> decoders_;
- SetDecryptorReadyCB set_decryptor_ready_cb_;
-
- DemuxerStream* input_stream_;
- SelectDecoderCB select_decoder_cb_;
-
- scoped_ptr<VideoDecoder> video_decoder_;
- scoped_ptr<DecryptingDemuxerStream> decrypted_stream_;
-
- base::WeakPtrFactory<VideoDecoderSelector> weak_ptr_factory_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(VideoDecoderSelector);
-};
-
-} // namespace media
-
-#endif // MEDIA_FILTERS_VIDEO_DECODER_SELECTOR_H_
diff --git a/chromium/media/filters/video_decoder_selector_unittest.cc b/chromium/media/filters/video_decoder_selector_unittest.cc
index ddb53bc315d..62b1a421456 100644
--- a/chromium/media/filters/video_decoder_selector_unittest.cc
+++ b/chromium/media/filters/video_decoder_selector_unittest.cc
@@ -9,8 +9,8 @@
#include "media/base/gmock_callback_support.h"
#include "media/base/mock_filters.h"
#include "media/base/test_helpers.h"
+#include "media/filters/decoder_selector.h"
#include "media/filters/decrypting_demuxer_stream.h"
-#include "media/filters/video_decoder_selector.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::_;
@@ -42,17 +42,11 @@ class VideoDecoderSelectorTest : public ::testing::Test {
decoder_2_(new StrictMock<MockVideoDecoder>()) {
all_decoders_.push_back(decoder_1_);
all_decoders_.push_back(decoder_2_);
-
- EXPECT_CALL(*decoder_1_, Stop(_))
- .WillRepeatedly(RunClosure<0>());
- EXPECT_CALL(*decoder_2_, Stop(_))
- .WillRepeatedly(RunClosure<0>());
}
~VideoDecoderSelectorTest() {
- if (selected_decoder_) {
- selected_decoder_->Stop(NewExpectedClosure());
- }
+ if (selected_decoder_)
+ selected_decoder_->Stop();
message_loop_.RunUntilIdle();
}
@@ -115,9 +109,12 @@ class VideoDecoderSelectorTest : public ::testing::Test {
}
void SelectDecoder() {
- decoder_selector_->SelectVideoDecoder(
+ decoder_selector_->SelectDecoder(
demuxer_stream_.get(),
+ false,
base::Bind(&VideoDecoderSelectorTest::MockOnDecoderSelected,
+ base::Unretained(this)),
+ base::Bind(&VideoDecoderSelectorTest::FrameReady,
base::Unretained(this)));
message_loop_.RunUntilIdle();
}
@@ -130,6 +127,10 @@ class VideoDecoderSelectorTest : public ::testing::Test {
message_loop_.RunUntilIdle();
}
+ void FrameReady(const scoped_refptr<VideoFrame>& frame) {
+ NOTREACHED();
+ }
+
// Fixture members.
scoped_ptr<VideoDecoderSelector> decoder_selector_;
scoped_ptr<StrictMock<MockDemuxerStream> > demuxer_stream_;
@@ -148,6 +149,11 @@ class VideoDecoderSelectorTest : public ::testing::Test {
DISALLOW_COPY_AND_ASSIGN(VideoDecoderSelectorTest);
};
+// Note:
+// In all the tests, Stop() is expected to be called on a decoder if a decoder:
+// - is pending initialization and DecoderSelector::Abort() is called, or
+// - has been successfully initialized.
+
// The stream is not encrypted but we have no clear decoder. No decoder can be
// selected.
TEST_F(VideoDecoderSelectorTest, ClearStream_NoDecryptor_NoClearDecoder) {
@@ -165,9 +171,10 @@ TEST_F(VideoDecoderSelectorTest, ClearStream_NoDecryptor_OneClearDecoder) {
UseClearStream();
InitializeDecoderSelector(kNoDecryptor, 1);
- EXPECT_CALL(*decoder_1_, Initialize(_, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
+ EXPECT_CALL(*decoder_1_, Initialize(_, _, _, _))
+ .WillOnce(RunCallback<2>(PIPELINE_OK));
EXPECT_CALL(*this, OnDecoderSelected(decoder_1_, IsNull()));
+ EXPECT_CALL(*decoder_1_, Stop());
SelectDecoder();
}
@@ -177,7 +184,8 @@ TEST_F(VideoDecoderSelectorTest,
UseClearStream();
InitializeDecoderSelector(kNoDecryptor, 1);
- EXPECT_CALL(*decoder_1_, Initialize(_, _));
+ EXPECT_CALL(*decoder_1_, Initialize(_, _, _, _));
+ EXPECT_CALL(*decoder_1_, Stop());
SelectDecoderAndAbort();
}
@@ -188,11 +196,12 @@ TEST_F(VideoDecoderSelectorTest, ClearStream_NoDecryptor_MultipleClearDecoder) {
UseClearStream();
InitializeDecoderSelector(kNoDecryptor, 2);
- EXPECT_CALL(*decoder_1_, Initialize(_, _))
- .WillOnce(RunCallback<1>(DECODER_ERROR_NOT_SUPPORTED));
- EXPECT_CALL(*decoder_2_, Initialize(_, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
+ EXPECT_CALL(*decoder_1_, Initialize(_, _, _, _))
+ .WillOnce(RunCallback<2>(DECODER_ERROR_NOT_SUPPORTED));
+ EXPECT_CALL(*decoder_2_, Initialize(_, _, _, _))
+ .WillOnce(RunCallback<2>(PIPELINE_OK));
EXPECT_CALL(*this, OnDecoderSelected(decoder_2_, IsNull()));
+ EXPECT_CALL(*decoder_2_, Stop());
SelectDecoder();
}
@@ -202,9 +211,10 @@ TEST_F(VideoDecoderSelectorTest,
UseClearStream();
InitializeDecoderSelector(kNoDecryptor, 2);
- EXPECT_CALL(*decoder_1_, Initialize(_, _))
- .WillOnce(RunCallback<1>(DECODER_ERROR_NOT_SUPPORTED));
- EXPECT_CALL(*decoder_2_, Initialize(_, _));
+ EXPECT_CALL(*decoder_1_, Initialize(_, _, _, _))
+ .WillOnce(RunCallback<2>(DECODER_ERROR_NOT_SUPPORTED));
+ EXPECT_CALL(*decoder_2_, Initialize(_, _, _, _));
+ EXPECT_CALL(*decoder_2_, Stop());
SelectDecoderAndAbort();
}
@@ -215,9 +225,10 @@ TEST_F(VideoDecoderSelectorTest, ClearStream_HasDecryptor) {
UseClearStream();
InitializeDecoderSelector(kDecryptOnly, 1);
- EXPECT_CALL(*decoder_1_, Initialize(_, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
+ EXPECT_CALL(*decoder_1_, Initialize(_, _, _, _))
+ .WillOnce(RunCallback<2>(PIPELINE_OK));
EXPECT_CALL(*this, OnDecoderSelected(decoder_1_, IsNull()));
+ EXPECT_CALL(*decoder_1_, Stop());
SelectDecoder();
}
@@ -226,7 +237,8 @@ TEST_F(VideoDecoderSelectorTest, Abort_ClearStream_HasDecryptor) {
UseClearStream();
InitializeDecoderSelector(kDecryptOnly, 1);
- EXPECT_CALL(*decoder_1_, Initialize(_, _));
+ EXPECT_CALL(*decoder_1_, Initialize(_, _, _, _));
+ EXPECT_CALL(*decoder_1_, Stop());
SelectDecoderAndAbort();
}
@@ -266,9 +278,10 @@ TEST_F(VideoDecoderSelectorTest, EncryptedStream_DecryptOnly_OneClearDecoder) {
UseEncryptedStream();
InitializeDecoderSelector(kDecryptOnly, 1);
- EXPECT_CALL(*decoder_1_, Initialize(_, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
+ EXPECT_CALL(*decoder_1_, Initialize(_, _, _, _))
+ .WillOnce(RunCallback<2>(PIPELINE_OK));
EXPECT_CALL(*this, OnDecoderSelected(decoder_1_, NotNull()));
+ EXPECT_CALL(*decoder_1_, Stop());
SelectDecoder();
}
@@ -278,7 +291,8 @@ TEST_F(VideoDecoderSelectorTest,
UseEncryptedStream();
InitializeDecoderSelector(kDecryptOnly, 1);
- EXPECT_CALL(*decoder_1_, Initialize(_, _));
+ EXPECT_CALL(*decoder_1_, Initialize(_, _, _, _));
+ EXPECT_CALL(*decoder_1_, Stop());
SelectDecoderAndAbort();
}
@@ -291,11 +305,12 @@ TEST_F(VideoDecoderSelectorTest,
UseEncryptedStream();
InitializeDecoderSelector(kDecryptOnly, 2);
- EXPECT_CALL(*decoder_1_, Initialize(_, _))
- .WillOnce(RunCallback<1>(DECODER_ERROR_NOT_SUPPORTED));
- EXPECT_CALL(*decoder_2_, Initialize(_, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
+ EXPECT_CALL(*decoder_1_, Initialize(_, _, _, _))
+ .WillOnce(RunCallback<2>(DECODER_ERROR_NOT_SUPPORTED));
+ EXPECT_CALL(*decoder_2_, Initialize(_, _, _, _))
+ .WillOnce(RunCallback<2>(PIPELINE_OK));
EXPECT_CALL(*this, OnDecoderSelected(decoder_2_, NotNull()));
+ EXPECT_CALL(*decoder_2_, Stop());
SelectDecoder();
}
@@ -305,9 +320,10 @@ TEST_F(VideoDecoderSelectorTest,
UseEncryptedStream();
InitializeDecoderSelector(kDecryptOnly, 2);
- EXPECT_CALL(*decoder_1_, Initialize(_, _))
- .WillOnce(RunCallback<1>(DECODER_ERROR_NOT_SUPPORTED));
- EXPECT_CALL(*decoder_2_, Initialize(_, _));
+ EXPECT_CALL(*decoder_1_, Initialize(_, _, _, _))
+ .WillOnce(RunCallback<2>(DECODER_ERROR_NOT_SUPPORTED));
+ EXPECT_CALL(*decoder_2_, Initialize(_, _, _, _));
+ EXPECT_CALL(*decoder_2_, Stop());
SelectDecoderAndAbort();
}
diff --git a/chromium/media/filters/video_frame_scheduler.h b/chromium/media/filters/video_frame_scheduler.h
new file mode 100644
index 00000000000..f90726211cd
--- /dev/null
+++ b/chromium/media/filters/video_frame_scheduler.h
@@ -0,0 +1,48 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_VIDEO_FRAME_SCHEDULER_H_
+#define MEDIA_FILTERS_VIDEO_FRAME_SCHEDULER_H_
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class VideoFrame;
+
+// Defines an abstract video frame scheduler that is capable of managing the
+// display of video frames at explicit times.
+class MEDIA_EXPORT VideoFrameScheduler {
+ public:
+ VideoFrameScheduler() {}
+ virtual ~VideoFrameScheduler() {}
+
+ enum Reason {
+ DISPLAYED, // Frame was displayed.
+ DROPPED, // Frame was dropped.
+ };
+ typedef base::Callback<void(const scoped_refptr<VideoFrame>&, Reason)> DoneCB;
+
+ // Schedule |frame| to be displayed at |wall_ticks|, firing |done_cb| when
+ // the scheduler has finished with the frame.
+ //
+ // To avoid reentrancy issues, |done_cb| is run on a separate calling stack.
+ virtual void ScheduleVideoFrame(const scoped_refptr<VideoFrame>& frame,
+ base::TimeTicks wall_ticks,
+ const DoneCB& done_cb) = 0;
+
+ // Causes the scheduler to cancel any previously scheduled frames.
+ //
+ // There is no guarantee that |done_cb|'s for previously scheduled frames
+ // will not be run. Clients should implement callback tracking/cancellation
+ // if they are sensitive to old callbacks being run.
+ virtual void Reset() = 0;
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_VIDEO_FRAME_SCHEDULER_H_
diff --git a/chromium/media/filters/video_frame_scheduler_impl.cc b/chromium/media/filters/video_frame_scheduler_impl.cc
new file mode 100644
index 00000000000..ee06bb1cd96
--- /dev/null
+++ b/chromium/media/filters/video_frame_scheduler_impl.cc
@@ -0,0 +1,105 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/video_frame_scheduler_impl.h"
+
+#include <list>
+
+#include "base/single_thread_task_runner.h"
+#include "base/time/default_tick_clock.h"
+#include "media/base/video_frame.h"
+
+namespace media {
+
+VideoFrameSchedulerImpl::VideoFrameSchedulerImpl(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const DisplayCB& display_cb)
+ : task_runner_(task_runner),
+ display_cb_(display_cb),
+ tick_clock_(new base::DefaultTickClock()) {
+}
+
+VideoFrameSchedulerImpl::~VideoFrameSchedulerImpl() {
+}
+
+void VideoFrameSchedulerImpl::ScheduleVideoFrame(
+ const scoped_refptr<VideoFrame>& frame,
+ base::TimeTicks wall_ticks,
+ const DoneCB& done_cb) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(!frame->end_of_stream());
+ pending_frames_.push(PendingFrame(frame, wall_ticks, done_cb));
+ ResetTimerIfNecessary();
+}
+
+void VideoFrameSchedulerImpl::Reset() {
+ pending_frames_ = PendingFrameQueue();
+ timer_.Stop();
+}
+
+void VideoFrameSchedulerImpl::SetTickClockForTesting(
+ scoped_ptr<base::TickClock> tick_clock) {
+ tick_clock_.swap(tick_clock);
+}
+
+void VideoFrameSchedulerImpl::ResetTimerIfNecessary() {
+ if (pending_frames_.empty()) {
+ DCHECK(!timer_.IsRunning());
+ return;
+ }
+
+ // Negative times will schedule the callback to run immediately.
+ timer_.Stop();
+ timer_.Start(FROM_HERE,
+ pending_frames_.top().wall_ticks - tick_clock_->NowTicks(),
+ base::Bind(&VideoFrameSchedulerImpl::OnTimerFired,
+ base::Unretained(this)));
+}
+
+void VideoFrameSchedulerImpl::OnTimerFired() {
+ base::TimeTicks now = tick_clock_->NowTicks();
+
+ // Move all frames that have reached their deadline into a separate queue.
+ std::list<PendingFrame> expired_frames;
+ while (!pending_frames_.empty() && pending_frames_.top().wall_ticks <= now) {
+ expired_frames.push_back(pending_frames_.top());
+ pending_frames_.pop();
+ }
+
+ // Signal that all frames except for the last one as dropped.
+ while (expired_frames.size() > 1) {
+ expired_frames.front().done_cb.Run(expired_frames.front().frame, DROPPED);
+ expired_frames.pop_front();
+ }
+
+ // Display the last expired frame.
+ if (!expired_frames.empty()) {
+ display_cb_.Run(expired_frames.front().frame);
+ expired_frames.front().done_cb.Run(expired_frames.front().frame, DISPLAYED);
+ expired_frames.pop_front();
+ }
+
+ ResetTimerIfNecessary();
+}
+
+VideoFrameSchedulerImpl::PendingFrame::PendingFrame(
+ const scoped_refptr<VideoFrame>& frame,
+ base::TimeTicks wall_ticks,
+ const DoneCB& done_cb)
+ : frame(frame), wall_ticks(wall_ticks), done_cb(done_cb) {
+}
+
+VideoFrameSchedulerImpl::PendingFrame::~PendingFrame() {
+}
+
+bool VideoFrameSchedulerImpl::PendingFrame::operator<(
+ const PendingFrame& other) const {
+ // Flip the comparison as std::priority_queue<T>::top() returns the largest
+ // element.
+ //
+ // Assume video frames with identical timestamps contain identical content.
+ return wall_ticks > other.wall_ticks;
+}
+
+} // namespace media
diff --git a/chromium/media/filters/video_frame_scheduler_impl.h b/chromium/media/filters/video_frame_scheduler_impl.h
new file mode 100644
index 00000000000..f6bc78dd275
--- /dev/null
+++ b/chromium/media/filters/video_frame_scheduler_impl.h
@@ -0,0 +1,74 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_VIDEO_FRAME_SCHEDULER_IMPL_H_
+#define MEDIA_FILTERS_VIDEO_FRAME_SCHEDULER_IMPL_H_
+
+#include <queue>
+
+#include "base/memory/ref_counted.h"
+#include "base/timer/timer.h"
+#include "media/filters/video_frame_scheduler.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+class TickClock;
+}
+
+namespace media {
+
+// A scheduler that uses delayed tasks on a task runner for timing the display
+// of video frames.
+//
+// Single threaded. Calls must be on |task_runner|.
+class MEDIA_EXPORT VideoFrameSchedulerImpl : public VideoFrameScheduler {
+ public:
+ typedef base::Callback<void(const scoped_refptr<VideoFrame>&)> DisplayCB;
+
+ // |task_runner| is used for scheduling the delayed tasks.
+ // |display_cb| is run when a frame is to be displayed.
+ VideoFrameSchedulerImpl(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const DisplayCB& display_cb);
+ virtual ~VideoFrameSchedulerImpl();
+
+ // VideoFrameScheduler implementation.
+ virtual void ScheduleVideoFrame(const scoped_refptr<VideoFrame>& frame,
+ base::TimeTicks wall_ticks,
+ const DoneCB& done_cb) OVERRIDE;
+ virtual void Reset() OVERRIDE;
+
+ void SetTickClockForTesting(scoped_ptr<base::TickClock> tick_clock);
+
+ private:
+ void ResetTimerIfNecessary();
+ void OnTimerFired();
+
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ DisplayCB display_cb_;
+ scoped_ptr<base::TickClock> tick_clock_;
+ base::OneShotTimer<VideoFrameScheduler> timer_;
+
+ struct PendingFrame {
+ PendingFrame(const scoped_refptr<VideoFrame>& frame,
+ base::TimeTicks wall_ticks,
+ const DoneCB& done_cb);
+ ~PendingFrame();
+
+ // For use with std::priority_queue<T>.
+ bool operator<(const PendingFrame& other) const;
+
+ scoped_refptr<VideoFrame> frame;
+ base::TimeTicks wall_ticks;
+ DoneCB done_cb;
+ };
+ typedef std::priority_queue<PendingFrame> PendingFrameQueue;
+ PendingFrameQueue pending_frames_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoFrameSchedulerImpl);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_VIDEO_FRAME_SCHEDULER_IMPL_H_
diff --git a/chromium/media/filters/video_frame_scheduler_impl_unittest.cc b/chromium/media/filters/video_frame_scheduler_impl_unittest.cc
new file mode 100644
index 00000000000..cf5ee0a3d45
--- /dev/null
+++ b/chromium/media/filters/video_frame_scheduler_impl_unittest.cc
@@ -0,0 +1,150 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_loop.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "media/base/test_helpers.h"
+#include "media/base/video_frame.h"
+#include "media/filters/video_frame_scheduler_impl.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+using testing::_;
+
+// NOTE: millisecond-level resolution is used for times as real delayed tasks
+// are posted. Don't use large values if you want to keep tests running fast.
+class VideoFrameSchedulerImplTest : public testing::Test {
+ public:
+ VideoFrameSchedulerImplTest()
+ : scheduler_(message_loop_.message_loop_proxy(),
+ base::Bind(&VideoFrameSchedulerImplTest::OnDisplay,
+ base::Unretained(this))),
+ tick_clock_(new base::SimpleTestTickClock()) {
+ scheduler_.SetTickClockForTesting(scoped_ptr<base::TickClock>(tick_clock_));
+ }
+
+ virtual ~VideoFrameSchedulerImplTest() {}
+
+ MOCK_METHOD1(OnDisplay, void(const scoped_refptr<VideoFrame>&));
+ MOCK_METHOD2(OnFrameDone,
+ void(const scoped_refptr<VideoFrame>&,
+ VideoFrameScheduler::Reason));
+
+ void Schedule(const scoped_refptr<VideoFrame>& frame, int64 target_ms) {
+ scheduler_.ScheduleVideoFrame(
+ frame,
+ base::TimeTicks() + base::TimeDelta::FromMilliseconds(target_ms),
+ base::Bind(&VideoFrameSchedulerImplTest::OnFrameDone,
+ base::Unretained(this)));
+ }
+
+ void RunUntilTimeHasElapsed(int64 ms) {
+ WaitableMessageLoopEvent waiter;
+ message_loop_.PostDelayedTask(
+ FROM_HERE, waiter.GetClosure(), base::TimeDelta::FromMilliseconds(ms));
+ waiter.RunAndWait();
+ }
+
+ void AdvanceTime(int64 ms) {
+ tick_clock_->Advance(base::TimeDelta::FromMilliseconds(ms));
+ }
+
+ void Reset() {
+ scheduler_.Reset();
+ }
+
+ private:
+ base::MessageLoop message_loop_;
+ VideoFrameSchedulerImpl scheduler_;
+ base::SimpleTestTickClock* tick_clock_; // Owned by |scheduler_|.
+
+ DISALLOW_COPY_AND_ASSIGN(VideoFrameSchedulerImplTest);
+};
+
+TEST_F(VideoFrameSchedulerImplTest, ImmediateDisplay) {
+ scoped_refptr<VideoFrame> frame =
+ VideoFrame::CreateBlackFrame(gfx::Size(8, 8));
+ Schedule(frame, 0);
+
+ EXPECT_CALL(*this, OnDisplay(frame));
+ EXPECT_CALL(*this, OnFrameDone(frame, VideoFrameScheduler::DISPLAYED));
+ RunUntilTimeHasElapsed(0);
+}
+
+TEST_F(VideoFrameSchedulerImplTest, EventualDisplay) {
+ scoped_refptr<VideoFrame> frame =
+ VideoFrame::CreateBlackFrame(gfx::Size(8, 8));
+ Schedule(frame, 10);
+
+ // Nothing should happen.
+ RunUntilTimeHasElapsed(10);
+
+ // Now we should get the frame.
+ EXPECT_CALL(*this, OnDisplay(frame));
+ EXPECT_CALL(*this, OnFrameDone(frame, VideoFrameScheduler::DISPLAYED));
+ AdvanceTime(10);
+ RunUntilTimeHasElapsed(10);
+}
+
+TEST_F(VideoFrameSchedulerImplTest, DroppedFrame) {
+ scoped_refptr<VideoFrame> dropped =
+ VideoFrame::CreateBlackFrame(gfx::Size(8, 8));
+ scoped_refptr<VideoFrame> displayed =
+ VideoFrame::CreateBlackFrame(gfx::Size(8, 8));
+ Schedule(dropped, 10);
+ Schedule(displayed, 20);
+
+ // The frame past its deadline will get dropped.
+ EXPECT_CALL(*this, OnDisplay(displayed));
+ EXPECT_CALL(*this, OnFrameDone(dropped, VideoFrameScheduler::DROPPED));
+ EXPECT_CALL(*this, OnFrameDone(displayed, VideoFrameScheduler::DISPLAYED));
+ AdvanceTime(20);
+ RunUntilTimeHasElapsed(20);
+}
+
+TEST_F(VideoFrameSchedulerImplTest, SingleFrameLate) {
+ scoped_refptr<VideoFrame> frame =
+ VideoFrame::CreateBlackFrame(gfx::Size(8, 8));
+ Schedule(frame, 10);
+
+ // Despite frame being late it should still get displayed as it's the only
+ // one.
+ EXPECT_CALL(*this, OnDisplay(frame));
+ EXPECT_CALL(*this, OnFrameDone(frame, VideoFrameScheduler::DISPLAYED));
+ AdvanceTime(20);
+ RunUntilTimeHasElapsed(20);
+}
+
+TEST_F(VideoFrameSchedulerImplTest, ManyFramesLate) {
+ scoped_refptr<VideoFrame> dropped =
+ VideoFrame::CreateBlackFrame(gfx::Size(8, 8));
+ scoped_refptr<VideoFrame> displayed =
+ VideoFrame::CreateBlackFrame(gfx::Size(8, 8));
+ Schedule(dropped, 10);
+ Schedule(displayed, 20);
+
+ // Despite both being late, the scheduler should always displays the latest
+ // expired frame.
+ EXPECT_CALL(*this, OnDisplay(displayed));
+ EXPECT_CALL(*this, OnFrameDone(dropped, VideoFrameScheduler::DROPPED));
+ EXPECT_CALL(*this, OnFrameDone(displayed, VideoFrameScheduler::DISPLAYED));
+ AdvanceTime(30);
+ RunUntilTimeHasElapsed(30);
+}
+
+TEST_F(VideoFrameSchedulerImplTest, Reset) {
+ scoped_refptr<VideoFrame> frame =
+ VideoFrame::CreateBlackFrame(gfx::Size(8, 8));
+ Schedule(frame, 10);
+
+ // Despite being on time, frame callback isn't run.
+ EXPECT_CALL(*this, OnFrameDone(_, _)).Times(0);
+ AdvanceTime(10);
+ Reset();
+ RunUntilTimeHasElapsed(10);
+}
+
+} // namespace media
diff --git a/chromium/media/filters/video_frame_scheduler_proxy.cc b/chromium/media/filters/video_frame_scheduler_proxy.cc
new file mode 100644
index 00000000000..590412e6dca
--- /dev/null
+++ b/chromium/media/filters/video_frame_scheduler_proxy.cc
@@ -0,0 +1,48 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/video_frame_scheduler_proxy.h"
+
+#include "base/single_thread_task_runner.h"
+#include "media/base/bind_to_current_loop.h"
+#include "media/base/video_frame.h"
+
+namespace media {
+
+VideoFrameSchedulerProxy::VideoFrameSchedulerProxy(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const scoped_refptr<base::SingleThreadTaskRunner>& scheduler_runner,
+ scoped_ptr<VideoFrameScheduler> scheduler)
+ : task_runner_(task_runner),
+ scheduler_runner_(scheduler_runner),
+ scheduler_(scheduler.Pass()),
+ weak_factory_(this) {
+}
+
+VideoFrameSchedulerProxy::~VideoFrameSchedulerProxy() {
+ scheduler_runner_->DeleteSoon(FROM_HERE, scheduler_.release());
+}
+
+void VideoFrameSchedulerProxy::ScheduleVideoFrame(
+ const scoped_refptr<VideoFrame>& frame,
+ base::TimeTicks wall_ticks,
+ const DoneCB& done_cb) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ scheduler_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoFrameScheduler::ScheduleVideoFrame,
+ base::Unretained(scheduler_.get()),
+ frame,
+ wall_ticks,
+ BindToCurrentLoop(done_cb)));
+}
+
+void VideoFrameSchedulerProxy::Reset() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ scheduler_runner_->PostTask(FROM_HERE,
+ base::Bind(&VideoFrameScheduler::Reset,
+ base::Unretained(scheduler_.get())));
+}
+
+} // namespace media
diff --git a/chromium/media/filters/video_frame_scheduler_proxy.h b/chromium/media/filters/video_frame_scheduler_proxy.h
new file mode 100644
index 00000000000..9130f603f1b
--- /dev/null
+++ b/chromium/media/filters/video_frame_scheduler_proxy.h
@@ -0,0 +1,51 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_VIDEO_FRAME_SCHEDULER_PROXY_H_
+#define MEDIA_FILTERS_VIDEO_FRAME_SCHEDULER_PROXY_H_
+
+#include "base/memory/weak_ptr.h"
+#include "media/filters/video_frame_scheduler.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+}
+
+namespace media {
+
+// Provides a thread-safe proxy for a VideoFrameScheduler. Typical use is to
+// use a real VideoFrameScheduler on the task runner responsible for graphics
+// display and provide a proxy on the task runner responsible for background
+// video decoding.
+class MEDIA_EXPORT VideoFrameSchedulerProxy : public VideoFrameScheduler {
+ public:
+ // |task_runner| is the runner that this object will be called on.
+ // |scheduler_runner| is the runner that |scheduler| will be called on.
+ // |scheduler| will be deleted on |scheduler_runner|.
+ VideoFrameSchedulerProxy(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const scoped_refptr<base::SingleThreadTaskRunner>& scheduler_runner,
+ scoped_ptr<VideoFrameScheduler> scheduler);
+ virtual ~VideoFrameSchedulerProxy();
+
+ // VideoFrameScheduler implementation.
+ virtual void ScheduleVideoFrame(const scoped_refptr<VideoFrame>& frame,
+ base::TimeTicks wall_ticks,
+ const DoneCB& done_cb) OVERRIDE;
+ virtual void Reset() OVERRIDE;
+
+ private:
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ scoped_refptr<base::SingleThreadTaskRunner> scheduler_runner_;
+ scoped_ptr<VideoFrameScheduler> scheduler_;
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<VideoFrameSchedulerProxy> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoFrameSchedulerProxy);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_VIDEO_FRAME_SCHEDULER_PROXY_H_
diff --git a/chromium/media/filters/video_frame_scheduler_unittest.cc b/chromium/media/filters/video_frame_scheduler_unittest.cc
new file mode 100644
index 00000000000..02b64ae8263
--- /dev/null
+++ b/chromium/media/filters/video_frame_scheduler_unittest.cc
@@ -0,0 +1,80 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/debug/stack_trace.h"
+#include "base/run_loop.h"
+#include "media/base/video_frame.h"
+#include "media/filters/clockless_video_frame_scheduler.h"
+#include "media/filters/test_video_frame_scheduler.h"
+#include "media/filters/video_frame_scheduler_impl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+static void DoNothing(const scoped_refptr<VideoFrame>& frame) {
+}
+
+static void CheckForReentrancy(std::string* stack_trace,
+ const scoped_refptr<VideoFrame>& frame,
+ VideoFrameScheduler::Reason reason) {
+ *stack_trace = base::debug::StackTrace().ToString();
+ base::MessageLoop::current()->PostTask(FROM_HERE,
+ base::MessageLoop::QuitClosure());
+}
+
+// Type parameterized test harness for validating API contract of
+// VideoFrameScheduler implementations.
+//
+// NOTE: C++ requires using "this" for derived class templates when referencing
+// class members.
+template <typename T>
+class VideoFrameSchedulerTest : public testing::Test {
+ public:
+ VideoFrameSchedulerTest() {}
+ virtual ~VideoFrameSchedulerTest() {}
+
+ base::MessageLoop message_loop_;
+ T scheduler_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(VideoFrameSchedulerTest);
+};
+
+template <>
+VideoFrameSchedulerTest<ClocklessVideoFrameScheduler>::VideoFrameSchedulerTest()
+ : scheduler_(base::Bind(&DoNothing)) {
+}
+
+template <>
+VideoFrameSchedulerTest<VideoFrameSchedulerImpl>::VideoFrameSchedulerTest()
+ : scheduler_(message_loop_.message_loop_proxy(), base::Bind(&DoNothing)) {
+}
+
+TYPED_TEST_CASE_P(VideoFrameSchedulerTest);
+
+TYPED_TEST_P(VideoFrameSchedulerTest, ScheduleVideoFrameIsntReentrant) {
+ scoped_refptr<VideoFrame> frame =
+ VideoFrame::CreateBlackFrame(gfx::Size(8, 8));
+
+ std::string stack_trace;
+ this->scheduler_.ScheduleVideoFrame(
+ frame, base::TimeTicks(), base::Bind(&CheckForReentrancy, &stack_trace));
+ EXPECT_TRUE(stack_trace.empty()) << "Reentracy detected:\n" << stack_trace;
+}
+
+REGISTER_TYPED_TEST_CASE_P(VideoFrameSchedulerTest,
+ ScheduleVideoFrameIsntReentrant);
+
+INSTANTIATE_TYPED_TEST_CASE_P(ClocklessVideoFrameScheduler,
+ VideoFrameSchedulerTest,
+ ClocklessVideoFrameScheduler);
+INSTANTIATE_TYPED_TEST_CASE_P(VideoFrameSchedulerImpl,
+ VideoFrameSchedulerTest,
+ VideoFrameSchedulerImpl);
+INSTANTIATE_TYPED_TEST_CASE_P(TestVideoFrameScheduler,
+ VideoFrameSchedulerTest,
+ TestVideoFrameScheduler);
+
+} // namespace media
diff --git a/chromium/media/filters/video_frame_stream.cc b/chromium/media/filters/video_frame_stream.cc
deleted file mode 100644
index b18cedafd85..00000000000
--- a/chromium/media/filters/video_frame_stream.cc
+++ /dev/null
@@ -1,455 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/filters/video_frame_stream.h"
-
-#include "base/bind.h"
-#include "base/callback_helpers.h"
-#include "base/debug/trace_event.h"
-#include "base/location.h"
-#include "base/logging.h"
-#include "base/message_loop/message_loop_proxy.h"
-#include "media/base/bind_to_loop.h"
-#include "media/base/decoder_buffer.h"
-#include "media/base/demuxer_stream.h"
-#include "media/base/video_decoder_config.h"
-#include "media/filters/decrypting_demuxer_stream.h"
-#include "media/filters/video_decoder_selector.h"
-
-namespace media {
-
-VideoFrameStream::VideoFrameStream(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
- ScopedVector<VideoDecoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb)
- : message_loop_(message_loop),
- weak_factory_(this),
- state_(STATE_UNINITIALIZED),
- stream_(NULL),
- decoder_selector_(new VideoDecoderSelector(message_loop,
- decoders.Pass(),
- set_decryptor_ready_cb)) {
-}
-
-VideoFrameStream::~VideoFrameStream() {
- DCHECK(state_ == STATE_UNINITIALIZED || state_ == STATE_STOPPED) << state_;
-}
-
-void VideoFrameStream::Initialize(DemuxerStream* stream,
- const StatisticsCB& statistics_cb,
- const InitCB& init_cb) {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK_EQ(state_, STATE_UNINITIALIZED) << state_;
- DCHECK(init_cb_.is_null());
- DCHECK(!init_cb.is_null());
-
- statistics_cb_ = statistics_cb;
- init_cb_ = init_cb;
- stream_ = stream;
-
- state_ = STATE_INITIALIZING;
- // TODO(xhwang): VideoDecoderSelector only needs a config to select a decoder.
- decoder_selector_->SelectVideoDecoder(
- stream,
- base::Bind(&VideoFrameStream::OnDecoderSelected,
- weak_factory_.GetWeakPtr()));
-}
-
-void VideoFrameStream::Read(const ReadCB& read_cb) {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(state_ == STATE_NORMAL || state_ == STATE_FLUSHING_DECODER ||
- state_ == STATE_ERROR) << state_;
- // No two reads in the flight at any time.
- DCHECK(read_cb_.is_null());
- // No read during resetting or stopping process.
- DCHECK(reset_cb_.is_null());
- DCHECK(stop_cb_.is_null());
-
- if (state_ == STATE_ERROR) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
- read_cb, DECODE_ERROR, scoped_refptr<VideoFrame>()));
- return;
- }
-
- read_cb_ = read_cb;
-
- if (state_ == STATE_FLUSHING_DECODER) {
- FlushDecoder();
- return;
- }
-
- ReadFromDemuxerStream();
-}
-
-void VideoFrameStream::Reset(const base::Closure& closure) {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(state_ != STATE_UNINITIALIZED && state_ != STATE_STOPPED) << state_;
- DCHECK(reset_cb_.is_null());
- DCHECK(stop_cb_.is_null());
-
- reset_cb_ = closure;
-
- // During decoder reinitialization, VideoDecoder does not need to be and
- // cannot be Reset(). |decrypting_demuxer_stream_| was reset before decoder
- // reinitialization.
- if (state_ == STATE_REINITIALIZING_DECODER)
- return;
-
- // During pending demuxer read and when not using DecryptingDemuxerStream,
- // VideoDecoder will be reset after demuxer read is returned
- // (in OnBufferReady()).
- if (state_ == STATE_PENDING_DEMUXER_READ && !decrypting_demuxer_stream_)
- return;
-
- // VideoDecoder API guarantees that if VideoDecoder::Reset() is called during
- // a pending decode, the decode callback must be fired before the reset
- // callback is fired. Therefore, we can call VideoDecoder::Reset() regardless
- // of if we have a pending decode and always satisfy the reset callback when
- // the decoder reset is finished.
- if (decrypting_demuxer_stream_) {
- decrypting_demuxer_stream_->Reset(base::Bind(
- &VideoFrameStream::ResetDecoder, weak_factory_.GetWeakPtr()));
- return;
- }
-
- ResetDecoder();
-}
-
-void VideoFrameStream::Stop(const base::Closure& closure) {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK_NE(state_, STATE_STOPPED) << state_;
- DCHECK(stop_cb_.is_null());
-
- stop_cb_ = closure;
-
- if (state_ == STATE_INITIALIZING) {
- decoder_selector_->Abort();
- return;
- }
-
- DCHECK(init_cb_.is_null());
-
- // All pending callbacks will be dropped.
- weak_factory_.InvalidateWeakPtrs();
-
- // Post callbacks to prevent reentrance into this object.
- if (!read_cb_.is_null())
- message_loop_->PostTask(FROM_HERE, base::Bind(
- base::ResetAndReturn(&read_cb_), ABORTED, scoped_refptr<VideoFrame>()));
- if (!reset_cb_.is_null())
- message_loop_->PostTask(FROM_HERE, base::ResetAndReturn(&reset_cb_));
-
- if (decrypting_demuxer_stream_) {
- decrypting_demuxer_stream_->Stop(base::Bind(
- &VideoFrameStream::StopDecoder, weak_factory_.GetWeakPtr()));
- return;
- }
-
- // We may not have a |decoder_| if Stop() was called during initialization.
- if (decoder_) {
- StopDecoder();
- return;
- }
-
- state_ = STATE_STOPPED;
- stream_ = NULL;
- decoder_.reset();
- decrypting_demuxer_stream_.reset();
- message_loop_->PostTask(FROM_HERE, base::ResetAndReturn(&stop_cb_));
-}
-
-bool VideoFrameStream::CanReadWithoutStalling() const {
- DCHECK(message_loop_->BelongsToCurrentThread());
- return decoder_->CanReadWithoutStalling();
-}
-
-void VideoFrameStream::OnDecoderSelected(
- scoped_ptr<VideoDecoder> selected_decoder,
- scoped_ptr<DecryptingDemuxerStream> decrypting_demuxer_stream) {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK_EQ(state_, STATE_INITIALIZING) << state_;
- DCHECK(!init_cb_.is_null());
- DCHECK(read_cb_.is_null());
- DCHECK(reset_cb_.is_null());
-
- decoder_selector_.reset();
-
- if (!selected_decoder) {
- state_ = STATE_UNINITIALIZED;
- base::ResetAndReturn(&init_cb_).Run(false, false);
- } else {
- state_ = STATE_NORMAL;
- decrypting_demuxer_stream_ = decrypting_demuxer_stream.Pass();
- if (decrypting_demuxer_stream_)
- stream_ = decrypting_demuxer_stream_.get();
- decoder_ = selected_decoder.Pass();
- if (decoder_->NeedsBitstreamConversion())
- stream_->EnableBitstreamConverter();
- // TODO(xhwang): We assume |decoder_->HasAlpha()| does not change after
- // reinitialization. Check this condition.
- base::ResetAndReturn(&init_cb_).Run(true, decoder_->HasAlpha());
- }
-
- // Stop() called during initialization.
- if (!stop_cb_.is_null()) {
- Stop(base::ResetAndReturn(&stop_cb_));
- return;
- }
-}
-
-void VideoFrameStream::SatisfyRead(Status status,
- const scoped_refptr<VideoFrame>& frame) {
- DCHECK(!read_cb_.is_null());
- base::ResetAndReturn(&read_cb_).Run(status, frame);
-}
-
-void VideoFrameStream::AbortRead() {
- // Abort read during pending reset. It is safe to fire the |read_cb_| directly
- // instead of posting it because VideoRenderBase won't call into this class
- // again when it's in kFlushing state.
- // TODO(xhwang): Improve the resetting process to avoid this dependency on the
- // caller.
- DCHECK(!reset_cb_.is_null());
- SatisfyRead(ABORTED, NULL);
-}
-
-void VideoFrameStream::Decode(const scoped_refptr<DecoderBuffer>& buffer) {
- DVLOG(2) << __FUNCTION__;
- DCHECK(state_ == STATE_NORMAL || state_ == STATE_FLUSHING_DECODER) << state_;
- DCHECK(!read_cb_.is_null());
- DCHECK(reset_cb_.is_null());
- DCHECK(stop_cb_.is_null());
- DCHECK(buffer);
-
- int buffer_size = buffer->end_of_stream() ? 0 : buffer->data_size();
-
- TRACE_EVENT_ASYNC_BEGIN0("media", "VideoFrameStream::Decode", this);
- decoder_->Decode(buffer, base::Bind(&VideoFrameStream::OnFrameReady,
- weak_factory_.GetWeakPtr(), buffer_size));
-}
-
-void VideoFrameStream::FlushDecoder() {
- Decode(DecoderBuffer::CreateEOSBuffer());
-}
-
-void VideoFrameStream::OnFrameReady(int buffer_size,
- const VideoDecoder::Status status,
- const scoped_refptr<VideoFrame>& frame) {
- DVLOG(2) << __FUNCTION__;
- DCHECK(state_ == STATE_NORMAL || state_ == STATE_FLUSHING_DECODER) << state_;
- DCHECK(!read_cb_.is_null());
- DCHECK(stop_cb_.is_null());
-
- TRACE_EVENT_ASYNC_END0("media", "VideoFrameStream::Decode", this);
-
- if (status == VideoDecoder::kDecodeError) {
- DCHECK(!frame.get());
- state_ = STATE_ERROR;
- SatisfyRead(DECODE_ERROR, NULL);
- return;
- }
-
- if (status == VideoDecoder::kDecryptError) {
- DCHECK(!frame.get());
- state_ = STATE_ERROR;
- SatisfyRead(DECRYPT_ERROR, NULL);
- return;
- }
-
- // Any successful decode counts!
- if (buffer_size > 0) {
- PipelineStatistics statistics;
- statistics.video_bytes_decoded = buffer_size;
- statistics_cb_.Run(statistics);
- }
-
- // Drop decoding result if Reset() was called during decoding.
- // The resetting process will be handled when the decoder is reset.
- if (!reset_cb_.is_null()) {
- AbortRead();
- return;
- }
-
- // Decoder flushed. Reinitialize the video decoder.
- if (state_ == STATE_FLUSHING_DECODER &&
- status == VideoDecoder::kOk && frame->end_of_stream()) {
- ReinitializeDecoder();
- return;
- }
-
- if (status == VideoDecoder::kNotEnoughData) {
- if (state_ == STATE_NORMAL)
- ReadFromDemuxerStream();
- else if (state_ == STATE_FLUSHING_DECODER)
- FlushDecoder();
- return;
- }
-
- SatisfyRead(OK, frame);
-}
-
-void VideoFrameStream::ReadFromDemuxerStream() {
- DVLOG(2) << __FUNCTION__;
- DCHECK_EQ(state_, STATE_NORMAL) << state_;
- DCHECK(!read_cb_.is_null());
- DCHECK(reset_cb_.is_null());
- DCHECK(stop_cb_.is_null());
-
- state_ = STATE_PENDING_DEMUXER_READ;
- stream_->Read(
- base::Bind(&VideoFrameStream::OnBufferReady, weak_factory_.GetWeakPtr()));
-}
-
-void VideoFrameStream::OnBufferReady(
- DemuxerStream::Status status,
- const scoped_refptr<DecoderBuffer>& buffer) {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK_EQ(state_, STATE_PENDING_DEMUXER_READ) << state_;
- DCHECK_EQ(buffer.get() != NULL, status == DemuxerStream::kOk) << status;
- DCHECK(!read_cb_.is_null());
- DCHECK(stop_cb_.is_null());
-
- state_ = STATE_NORMAL;
-
- if (status == DemuxerStream::kConfigChanged) {
- state_ = STATE_FLUSHING_DECODER;
- if (!reset_cb_.is_null()) {
- AbortRead();
- // If we are using DecryptingDemuxerStream, we already called DDS::Reset()
- // which will continue the resetting process in it's callback.
- if (!decrypting_demuxer_stream_)
- Reset(base::ResetAndReturn(&reset_cb_));
- // Reinitialization will continue after Reset() is done.
- } else {
- FlushDecoder();
- }
- return;
- }
-
- if (!reset_cb_.is_null()) {
- AbortRead();
- // If we are using DecryptingDemuxerStream, we already called DDS::Reset()
- // which will continue the resetting process in it's callback.
- if (!decrypting_demuxer_stream_)
- Reset(base::ResetAndReturn(&reset_cb_));
- return;
- }
-
- if (status == DemuxerStream::kAborted) {
- SatisfyRead(DEMUXER_READ_ABORTED, NULL);
- return;
- }
-
- DCHECK(status == DemuxerStream::kOk) << status;
- Decode(buffer);
-}
-
-void VideoFrameStream::ReinitializeDecoder() {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK_EQ(state_, STATE_FLUSHING_DECODER) << state_;
-
- DCHECK(stream_->video_decoder_config().IsValidConfig());
- state_ = STATE_REINITIALIZING_DECODER;
- decoder_->Initialize(stream_->video_decoder_config(),
- base::Bind(&VideoFrameStream::OnDecoderReinitialized,
- weak_factory_.GetWeakPtr()));
-}
-
-void VideoFrameStream::OnDecoderReinitialized(PipelineStatus status) {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK_EQ(state_, STATE_REINITIALIZING_DECODER) << state_;
- DCHECK(stop_cb_.is_null());
-
- // ReinitializeDecoder() can be called in two cases:
- // 1, Flushing decoder finished (see OnFrameReady()).
- // 2, Reset() was called during flushing decoder (see OnDecoderReset()).
- // Also, Reset() can be called during pending ReinitializeDecoder().
- // This function needs to handle them all!
-
- state_ = (status == PIPELINE_OK) ? STATE_NORMAL : STATE_ERROR;
-
- if (!reset_cb_.is_null()) {
- if (!read_cb_.is_null())
- AbortRead();
- base::ResetAndReturn(&reset_cb_).Run();
- }
-
- if (read_cb_.is_null())
- return;
-
- if (state_ == STATE_ERROR) {
- SatisfyRead(DECODE_ERROR, NULL);
- return;
- }
-
- ReadFromDemuxerStream();
-}
-
-void VideoFrameStream::ResetDecoder() {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(state_ == STATE_NORMAL || state_ == STATE_FLUSHING_DECODER ||
- state_ == STATE_ERROR) << state_;
- DCHECK(!reset_cb_.is_null());
-
- decoder_->Reset(base::Bind(&VideoFrameStream::OnDecoderReset,
- weak_factory_.GetWeakPtr()));
-}
-
-void VideoFrameStream::OnDecoderReset() {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(state_ == STATE_NORMAL || state_ == STATE_FLUSHING_DECODER ||
- state_ == STATE_ERROR) << state_;
- // If Reset() was called during pending read, read callback should be fired
- // before the reset callback is fired.
- DCHECK(read_cb_.is_null());
- DCHECK(!reset_cb_.is_null());
- DCHECK(stop_cb_.is_null());
-
- if (state_ != STATE_FLUSHING_DECODER) {
- base::ResetAndReturn(&reset_cb_).Run();
- return;
- }
-
- // The resetting process will be continued in OnDecoderReinitialized().
- ReinitializeDecoder();
-}
-
-void VideoFrameStream::StopDecoder() {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(state_ != STATE_UNINITIALIZED && state_ != STATE_STOPPED) << state_;
- DCHECK(!stop_cb_.is_null());
-
- decoder_->Stop(base::Bind(&VideoFrameStream::OnDecoderStopped,
- weak_factory_.GetWeakPtr()));
-}
-
-void VideoFrameStream::OnDecoderStopped() {
- DVLOG(2) << __FUNCTION__;
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(state_ != STATE_UNINITIALIZED && state_ != STATE_STOPPED) << state_;
- // If Stop() was called during pending read/reset, read/reset callback should
- // be fired before the stop callback is fired.
- DCHECK(read_cb_.is_null());
- DCHECK(reset_cb_.is_null());
- DCHECK(!stop_cb_.is_null());
-
- state_ = STATE_STOPPED;
- stream_ = NULL;
- decoder_.reset();
- decrypting_demuxer_stream_.reset();
- base::ResetAndReturn(&stop_cb_).Run();
-}
-
-} // namespace media
diff --git a/chromium/media/filters/video_frame_stream.h b/chromium/media/filters/video_frame_stream.h
deleted file mode 100644
index f315677247c..00000000000
--- a/chromium/media/filters/video_frame_stream.h
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_FILTERS_VIDEO_FRAME_STREAM_H_
-#define MEDIA_FILTERS_VIDEO_FRAME_STREAM_H_
-
-#include "base/basictypes.h"
-#include "base/callback.h"
-#include "base/compiler_specific.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_vector.h"
-#include "base/memory/weak_ptr.h"
-#include "media/base/decryptor.h"
-#include "media/base/demuxer_stream.h"
-#include "media/base/media_export.h"
-#include "media/base/pipeline_status.h"
-#include "media/base/video_decoder.h"
-
-namespace base {
-class MessageLoopProxy;
-}
-
-namespace media {
-
-class DecryptingDemuxerStream;
-class VideoDecoderSelector;
-
-// Wraps a DemuxerStream and a list of VideoDecoders and provides decoded
-// VideoFrames to its client (e.g. VideoRendererImpl).
-class MEDIA_EXPORT VideoFrameStream {
- public:
- // Indicates completion of VideoFrameStream initialization.
- typedef base::Callback<void(bool success, bool has_alpha)> InitCB;
-
- enum Status {
- OK, // Everything went as planned.
- ABORTED, // Read aborted due to Reset() during pending read.
- DEMUXER_READ_ABORTED, // Demuxer returned aborted read.
- DECODE_ERROR, // Decoder returned decode error.
- DECRYPT_ERROR // Decoder returned decrypt error.
- };
-
- // Indicates completion of a VideoFrameStream read.
- typedef base::Callback<void(Status, const scoped_refptr<VideoFrame>&)> ReadCB;
-
- VideoFrameStream(const scoped_refptr<base::MessageLoopProxy>& message_loop,
- ScopedVector<VideoDecoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb);
- virtual ~VideoFrameStream();
-
- // Initializes the VideoFrameStream and returns the initialization result
- // through |init_cb|. Note that |init_cb| is always called asynchronously.
- void Initialize(DemuxerStream* stream,
- const StatisticsCB& statistics_cb,
- const InitCB& init_cb);
-
- // Reads a decoded VideoFrame and returns it via the |read_cb|. Note that
- // |read_cb| is always called asynchronously. This method should only be
- // called after initialization has succeeded and must not be called during
- // any pending Reset() and/or Stop().
- void Read(const ReadCB& read_cb);
-
- // Resets the decoder, flushes all decoded frames and/or internal buffers,
- // fires any existing pending read callback and calls |closure| on completion.
- // Note that |closure| is always called asynchronously. This method should
- // only be called after initialization has succeeded and must not be called
- // during any pending Reset() and/or Stop().
- void Reset(const base::Closure& closure);
-
- // Stops the decoder, fires any existing pending read callback or reset
- // callback and calls |closure| on completion. Note that |closure| is always
- // called asynchronously. The VideoFrameStream cannot be used anymore after
- // it is stopped. This method can be called at any time but not during another
- // pending Stop().
- void Stop(const base::Closure& closure);
-
- // Returns true if the decoder currently has the ability to decode and return
- // a VideoFrame.
- bool CanReadWithoutStalling() const;
-
- private:
- enum State {
- STATE_UNINITIALIZED,
- STATE_INITIALIZING,
- STATE_NORMAL, // Includes idle, pending decoder decode/reset/stop.
- STATE_FLUSHING_DECODER,
- STATE_PENDING_DEMUXER_READ,
- STATE_REINITIALIZING_DECODER,
- STATE_STOPPED,
- STATE_ERROR
- };
-
- // Called when |decoder_selector| selected the |selected_decoder|.
- // |decrypting_demuxer_stream| was also populated if a DecryptingDemuxerStream
- // is created to help decrypt the encrypted stream.
- void OnDecoderSelected(
- scoped_ptr<VideoDecoder> selected_decoder,
- scoped_ptr<DecryptingDemuxerStream> decrypting_demuxer_stream);
-
- // Satisfy pending |read_cb_| with |status| and |frame|.
- void SatisfyRead(Status status, const scoped_refptr<VideoFrame>& frame);
-
- // Abort pending |read_cb_|.
- void AbortRead();
-
- // Decodes |buffer| and returns the result via OnFrameReady().
- void Decode(const scoped_refptr<DecoderBuffer>& buffer);
-
- // Flushes the decoder with an EOS buffer to retrieve internally buffered
- // video frames.
- void FlushDecoder();
-
- // Callback for VideoDecoder::Decode().
- void OnFrameReady(int buffer_size,
- const VideoDecoder::Status status,
- const scoped_refptr<VideoFrame>& frame);
-
- // Reads a buffer from |stream_| and returns the result via OnBufferReady().
- void ReadFromDemuxerStream();
-
- // Callback for DemuxerStream::Read().
- void OnBufferReady(DemuxerStream::Status status,
- const scoped_refptr<DecoderBuffer>& buffer);
-
- void ReinitializeDecoder();
-
- // Callback for VideoDecoder reinitialization.
- void OnDecoderReinitialized(PipelineStatus status);
-
- void ResetDecoder();
- void OnDecoderReset();
-
- void StopDecoder();
- void OnDecoderStopped();
-
- scoped_refptr<base::MessageLoopProxy> message_loop_;
- base::WeakPtrFactory<VideoFrameStream> weak_factory_;
-
- State state_;
-
- StatisticsCB statistics_cb_;
- InitCB init_cb_;
-
- ReadCB read_cb_;
- base::Closure reset_cb_;
- base::Closure stop_cb_;
-
- DemuxerStream* stream_;
-
- scoped_ptr<VideoDecoderSelector> decoder_selector_;
-
- // These two will be set by VideoDecoderSelector::SelectVideoDecoder().
- scoped_ptr<VideoDecoder> decoder_;
- scoped_ptr<DecryptingDemuxerStream> decrypting_demuxer_stream_;
-
- DISALLOW_COPY_AND_ASSIGN(VideoFrameStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_FILTERS_VIDEO_FRAME_STREAM_H_
diff --git a/chromium/media/filters/video_frame_stream_unittest.cc b/chromium/media/filters/video_frame_stream_unittest.cc
index c7d22acfa9e..f2494042a19 100644
--- a/chromium/media/filters/video_frame_stream_unittest.cc
+++ b/chromium/media/filters/video_frame_stream_unittest.cc
@@ -8,12 +8,13 @@
#include "media/base/gmock_callback_support.h"
#include "media/base/mock_filters.h"
#include "media/base/test_helpers.h"
+#include "media/filters/decoder_stream.h"
#include "media/filters/fake_demuxer_stream.h"
#include "media/filters/fake_video_decoder.h"
-#include "media/filters/video_frame_stream.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::_;
+using ::testing::AnyNumber;
using ::testing::Assign;
using ::testing::Invoke;
using ::testing::NiceMock;
@@ -22,18 +23,33 @@ using ::testing::SaveArg;
static const int kNumConfigs = 3;
static const int kNumBuffersInOneConfig = 5;
-static const int kDecodingDelay = 7;
namespace media {
-class VideoFrameStreamTest : public testing::TestWithParam<bool> {
+struct VideoFrameStreamTestParams {
+ VideoFrameStreamTestParams(bool is_encrypted,
+ int decoding_delay,
+ int parallel_decoding)
+ : is_encrypted(is_encrypted),
+ decoding_delay(decoding_delay),
+ parallel_decoding(parallel_decoding) {}
+
+ bool is_encrypted;
+ int decoding_delay;
+ int parallel_decoding;
+};
+
+class VideoFrameStreamTest
+ : public testing::Test,
+ public testing::WithParamInterface<VideoFrameStreamTestParams> {
public:
VideoFrameStreamTest()
: demuxer_stream_(new FakeDemuxerStream(kNumConfigs,
kNumBuffersInOneConfig,
- GetParam())),
+ GetParam().is_encrypted)),
decryptor_(new NiceMock<MockDecryptor>()),
- decoder_(new FakeVideoDecoder(kDecodingDelay)),
+ decoder_(new FakeVideoDecoder(GetParam().decoding_delay,
+ GetParam().parallel_decoding)),
is_initialized_(false),
num_decoded_frames_(0),
pending_initialize_(false),
@@ -70,13 +86,14 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
EXPECT_FALSE(is_initialized_);
}
+ MOCK_METHOD1(OnNewSpliceBuffer, void(base::TimeDelta));
MOCK_METHOD1(SetDecryptorReadyCallback, void(const media::DecryptorReadyCB&));
void OnStatistics(const PipelineStatistics& statistics) {
total_bytes_decoded_ += statistics.video_bytes_decoded;
}
- void OnInitialized(bool success, bool has_alpha) {
+ void OnInitialized(bool success) {
DCHECK(!pending_read_);
DCHECK(!pending_reset_);
DCHECK(pending_initialize_);
@@ -91,6 +108,7 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
pending_initialize_ = true;
video_frame_stream_->Initialize(
demuxer_stream_.get(),
+ false,
base::Bind(&VideoFrameStreamTest::OnStatistics, base::Unretained(this)),
base::Bind(&VideoFrameStreamTest::OnInitialized,
base::Unretained(this)));
@@ -109,8 +127,8 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
}
DCHECK_EQ(stream_type, Decryptor::kVideo);
- scoped_refptr<DecoderBuffer> decrypted = DecoderBuffer::CopyFrom(
- encrypted->data(), encrypted->data_size());
+ scoped_refptr<DecoderBuffer> decrypted =
+ DecoderBuffer::CopyFrom(encrypted->data(), encrypted->data_size());
decrypted->set_timestamp(encrypted->timestamp());
decrypted->set_duration(encrypted->duration());
decrypt_cb.Run(Decryptor::kSuccess, decrypted);
@@ -120,16 +138,19 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
void FrameReady(VideoFrameStream::Status status,
const scoped_refptr<VideoFrame>& frame) {
DCHECK(pending_read_);
- // TODO(xhwang): Add test cases where the fake decoder returns error or
- // the fake demuxer aborts demuxer read.
- ASSERT_TRUE(status == VideoFrameStream::OK ||
- status == VideoFrameStream::ABORTED) << status;
frame_read_ = frame;
+ last_read_status_ = status;
if (frame.get() && !frame->end_of_stream())
num_decoded_frames_++;
pending_read_ = false;
}
+ void FrameReadyHoldDemuxer(VideoFrameStream::Status status,
+ const scoped_refptr<VideoFrame>& frame) {
+ FrameReady(status, frame);
+
+ }
+
void OnReset() {
DCHECK(!pending_read_);
DCHECK(pending_reset_);
@@ -137,6 +158,7 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
}
void OnStopped() {
+ DCHECK(!pending_initialize_);
DCHECK(!pending_read_);
DCHECK(!pending_reset_);
DCHECK(pending_stop_);
@@ -159,6 +181,15 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
} while (!pending_read_);
}
+ void ReadAllFrames() {
+ do {
+ ReadOneFrame();
+ } while (frame_read_.get() && !frame_read_->end_of_stream());
+
+ const int total_num_frames = kNumConfigs * kNumBuffersInOneConfig;
+ DCHECK_EQ(num_decoded_frames_, total_num_frames);
+ }
+
enum PendingState {
NOT_PENDING,
DEMUXER_READ_NORMAL,
@@ -167,9 +198,8 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
DECRYPTOR_NO_KEY,
DECODER_INIT,
DECODER_REINIT,
- DECODER_READ,
- DECODER_RESET,
- DECODER_STOP
+ DECODER_DECODE,
+ DECODER_RESET
};
void EnterPendingState(PendingState state) {
@@ -212,8 +242,8 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
ReadUntilPending();
break;
- case DECODER_READ:
- decoder_->HoldNextRead();
+ case DECODER_DECODE:
+ decoder_->HoldDecode();
ReadUntilPending();
break;
@@ -225,16 +255,6 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
message_loop_.RunUntilIdle();
break;
- case DECODER_STOP:
- decoder_->HoldNextStop();
- // Check that the pipeline statistics callback was fired correctly.
- EXPECT_EQ(decoder_->total_bytes_decoded(), total_bytes_decoded_);
- pending_stop_ = true;
- video_frame_stream_->Stop(base::Bind(&VideoFrameStreamTest::OnStopped,
- base::Unretained(this)));
- message_loop_.RunUntilIdle();
- break;
-
case NOT_PENDING:
NOTREACHED();
break;
@@ -264,19 +284,14 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
decoder_->SatisfyInit();
break;
- case DECODER_READ:
- decoder_->SatisfyRead();
+ case DECODER_DECODE:
+ decoder_->SatisfyDecode();
break;
case DECODER_RESET:
decoder_->SatisfyReset();
break;
- case DECODER_STOP:
- DCHECK(pending_stop_);
- decoder_->SatisfyStop();
- break;
-
case NOT_PENDING:
NOTREACHED();
break;
@@ -291,8 +306,8 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
}
void Read() {
- EnterPendingState(DECODER_READ);
- SatisfyPendingCallback(DECODER_READ);
+ EnterPendingState(DECODER_DECODE);
+ SatisfyPendingCallback(DECODER_DECODE);
}
void Reset() {
@@ -301,8 +316,12 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
}
void Stop() {
- EnterPendingState(DECODER_STOP);
- SatisfyPendingCallback(DECODER_STOP);
+ // Check that the pipeline statistics callback was fired correctly.
+ EXPECT_EQ(decoder_->total_bytes_decoded(), total_bytes_decoded_);
+ pending_stop_ = true;
+ video_frame_stream_->Stop(base::Bind(&VideoFrameStreamTest::OnStopped,
+ base::Unretained(this)));
+ message_loop_.RunUntilIdle();
}
base::MessageLoop message_loop_;
@@ -322,6 +341,7 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
bool pending_stop_;
int total_bytes_decoded_;
scoped_refptr<VideoFrame> frame_read_;
+ VideoFrameStream::Status last_read_status_;
// Decryptor has no key to decrypt a frame.
bool has_no_key_;
@@ -330,8 +350,26 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
DISALLOW_COPY_AND_ASSIGN(VideoFrameStreamTest);
};
-INSTANTIATE_TEST_CASE_P(Clear, VideoFrameStreamTest, testing::Values(false));
-INSTANTIATE_TEST_CASE_P(Encrypted, VideoFrameStreamTest, testing::Values(true));
+INSTANTIATE_TEST_CASE_P(
+ Clear,
+ VideoFrameStreamTest,
+ ::testing::Values(
+ VideoFrameStreamTestParams(false, 0, 1),
+ VideoFrameStreamTestParams(false, 3, 1),
+ VideoFrameStreamTestParams(false, 7, 1)));
+INSTANTIATE_TEST_CASE_P(
+ Encrypted,
+ VideoFrameStreamTest,
+ ::testing::Values(
+ VideoFrameStreamTestParams(true, 7, 1)));
+
+INSTANTIATE_TEST_CASE_P(
+ Clear_Parallel,
+ VideoFrameStreamTest,
+ ::testing::Values(
+ VideoFrameStreamTestParams(false, 0, 3),
+ VideoFrameStreamTestParams(false, 2, 3)));
+
TEST_P(VideoFrameStreamTest, Initialization) {
Initialize();
@@ -344,12 +382,7 @@ TEST_P(VideoFrameStreamTest, ReadOneFrame) {
TEST_P(VideoFrameStreamTest, ReadAllFrames) {
Initialize();
- do {
- Read();
- } while (frame_read_.get() && !frame_read_->end_of_stream());
-
- const int total_num_frames = kNumConfigs * kNumBuffersInOneConfig;
- DCHECK_EQ(num_decoded_frames_, total_num_frames);
+ ReadAllFrames();
}
TEST_P(VideoFrameStreamTest, Read_AfterReset) {
@@ -360,6 +393,86 @@ TEST_P(VideoFrameStreamTest, Read_AfterReset) {
Read();
}
+TEST_P(VideoFrameStreamTest, Read_BlockedDemuxer) {
+ Initialize();
+ demuxer_stream_->HoldNextRead();
+ ReadOneFrame();
+ EXPECT_TRUE(pending_read_);
+
+ int demuxed_buffers = 0;
+
+ // Pass frames from the demuxer to the VideoFrameStream until the first read
+ // request is satisfied.
+ while (pending_read_) {
+ ++demuxed_buffers;
+ demuxer_stream_->SatisfyReadAndHoldNext();
+ message_loop_.RunUntilIdle();
+ }
+
+ EXPECT_EQ(std::min(GetParam().decoding_delay + 1, kNumBuffersInOneConfig + 1),
+ demuxed_buffers);
+
+ // At this point the stream is waiting on read from the demuxer, but there is
+ // no pending read from the stream. The stream should be blocked if we try
+ // reading from it again.
+ ReadUntilPending();
+
+ demuxer_stream_->SatisfyRead();
+ message_loop_.RunUntilIdle();
+ EXPECT_FALSE(pending_read_);
+}
+
+TEST_P(VideoFrameStreamTest, Read_BlockedDemuxerAndDecoder) {
+ // Test applies only when the decoder allows multiple parallel requests.
+ if (GetParam().parallel_decoding == 1)
+ return;
+
+ Initialize();
+ demuxer_stream_->HoldNextRead();
+ decoder_->HoldDecode();
+ ReadOneFrame();
+ EXPECT_TRUE(pending_read_);
+
+ int demuxed_buffers = 0;
+
+ // Pass frames from the demuxer to the VideoFrameStream until the first read
+ // request is satisfied, while always keeping one decode request pending.
+ while (pending_read_) {
+ ++demuxed_buffers;
+ demuxer_stream_->SatisfyReadAndHoldNext();
+ message_loop_.RunUntilIdle();
+
+ // Always keep one decode request pending.
+ if (demuxed_buffers > 1) {
+ decoder_->SatisfySingleDecode();
+ message_loop_.RunUntilIdle();
+ }
+ }
+
+ ReadUntilPending();
+ EXPECT_TRUE(pending_read_);
+
+ // Unblocking one decode request should unblock read even when demuxer is
+ // still blocked.
+ decoder_->SatisfySingleDecode();
+ message_loop_.RunUntilIdle();
+ EXPECT_FALSE(pending_read_);
+
+ // Stream should still be blocked on the demuxer after unblocking the decoder.
+ decoder_->SatisfyDecode();
+ ReadUntilPending();
+ EXPECT_TRUE(pending_read_);
+
+ // Verify that the stream has returned all frames that have been demuxed,
+ // accounting for the decoder delay.
+ EXPECT_EQ(demuxed_buffers - GetParam().decoding_delay, num_decoded_frames_);
+
+ // Unblocking the demuxer will unblock the stream.
+ demuxer_stream_->SatisfyRead();
+ message_loop_.RunUntilIdle();
+ EXPECT_FALSE(pending_read_);
+}
+
// No Reset() before initialization is successfully completed.
TEST_P(VideoFrameStreamTest, Reset_AfterInitialization) {
@@ -405,11 +518,11 @@ TEST_P(VideoFrameStreamTest, Reset_DuringDemuxerRead_ConfigChange) {
Read();
}
-TEST_P(VideoFrameStreamTest, Reset_DuringNormalDecoderRead) {
+TEST_P(VideoFrameStreamTest, Reset_DuringNormalDecoderDecode) {
Initialize();
- EnterPendingState(DECODER_READ);
+ EnterPendingState(DECODER_DECODE);
EnterPendingState(DECODER_RESET);
- SatisfyPendingCallback(DECODER_READ);
+ SatisfyPendingCallback(DECODER_DECODE);
SatisfyPendingCallback(DECODER_RESET);
Read();
}
@@ -421,6 +534,26 @@ TEST_P(VideoFrameStreamTest, Reset_AfterNormalRead) {
Read();
}
+TEST_P(VideoFrameStreamTest, Reset_AfterNormalReadWithActiveSplice) {
+ video_frame_stream_->set_splice_observer(base::Bind(
+ &VideoFrameStreamTest::OnNewSpliceBuffer, base::Unretained(this)));
+ Initialize();
+
+ // Send buffers with a splice timestamp, which sets the active splice flag.
+ const base::TimeDelta splice_timestamp = base::TimeDelta();
+ demuxer_stream_->set_splice_timestamp(splice_timestamp);
+ EXPECT_CALL(*this, OnNewSpliceBuffer(splice_timestamp)).Times(AnyNumber());
+ Read();
+
+ // Issue an explicit Reset() and clear the splice timestamp.
+ Reset();
+ demuxer_stream_->set_splice_timestamp(kNoTimestamp());
+
+ // Ensure none of the upcoming calls indicate they have a splice timestamp.
+ EXPECT_CALL(*this, OnNewSpliceBuffer(_)).Times(0);
+ Read();
+}
+
TEST_P(VideoFrameStreamTest, Reset_AfterDemuxerRead_ConfigChange) {
Initialize();
EnterPendingState(DEMUXER_READ_CONFIG_CHANGE);
@@ -429,6 +562,15 @@ TEST_P(VideoFrameStreamTest, Reset_AfterDemuxerRead_ConfigChange) {
Read();
}
+TEST_P(VideoFrameStreamTest, Reset_AfterEndOfStream) {
+ Initialize();
+ ReadAllFrames();
+ Reset();
+ num_decoded_frames_ = 0;
+ demuxer_stream_->SeekToStart();
+ ReadAllFrames();
+}
+
TEST_P(VideoFrameStreamTest, Reset_DuringNoKeyRead) {
Initialize();
EnterPendingState(DECRYPTOR_NO_KEY);
@@ -443,7 +585,7 @@ TEST_P(VideoFrameStreamTest, Stop_BeforeInitialization) {
}
TEST_P(VideoFrameStreamTest, Stop_DuringSetDecryptor) {
- if (!GetParam()) {
+ if (!GetParam().is_encrypted) {
DVLOG(1) << "SetDecryptor test only runs when the stream is encrytped.";
return;
}
@@ -457,9 +599,7 @@ TEST_P(VideoFrameStreamTest, Stop_DuringSetDecryptor) {
TEST_P(VideoFrameStreamTest, Stop_DuringInitialization) {
EnterPendingState(DECODER_INIT);
- EnterPendingState(DECODER_STOP);
- SatisfyPendingCallback(DECODER_INIT);
- SatisfyPendingCallback(DECODER_STOP);
+ Stop();
}
TEST_P(VideoFrameStreamTest, Stop_AfterInitialization) {
@@ -470,9 +610,7 @@ TEST_P(VideoFrameStreamTest, Stop_AfterInitialization) {
TEST_P(VideoFrameStreamTest, Stop_DuringReinitialization) {
Initialize();
EnterPendingState(DECODER_REINIT);
- EnterPendingState(DECODER_STOP);
- SatisfyPendingCallback(DECODER_REINIT);
- SatisfyPendingCallback(DECODER_STOP);
+ Stop();
}
TEST_P(VideoFrameStreamTest, Stop_AfterReinitialization) {
@@ -485,25 +623,19 @@ TEST_P(VideoFrameStreamTest, Stop_AfterReinitialization) {
TEST_P(VideoFrameStreamTest, Stop_DuringDemuxerRead_Normal) {
Initialize();
EnterPendingState(DEMUXER_READ_NORMAL);
- EnterPendingState(DECODER_STOP);
- SatisfyPendingCallback(DEMUXER_READ_NORMAL);
- SatisfyPendingCallback(DECODER_STOP);
+ Stop();
}
TEST_P(VideoFrameStreamTest, Stop_DuringDemuxerRead_ConfigChange) {
Initialize();
EnterPendingState(DEMUXER_READ_CONFIG_CHANGE);
- EnterPendingState(DECODER_STOP);
- SatisfyPendingCallback(DEMUXER_READ_CONFIG_CHANGE);
- SatisfyPendingCallback(DECODER_STOP);
+ Stop();
}
-TEST_P(VideoFrameStreamTest, Stop_DuringNormalDecoderRead) {
+TEST_P(VideoFrameStreamTest, Stop_DuringNormalDecoderDecode) {
Initialize();
- EnterPendingState(DECODER_READ);
- EnterPendingState(DECODER_STOP);
- SatisfyPendingCallback(DECODER_READ);
- SatisfyPendingCallback(DECODER_STOP);
+ EnterPendingState(DECODER_DECODE);
+ Stop();
}
TEST_P(VideoFrameStreamTest, Stop_AfterNormalRead) {
@@ -528,9 +660,7 @@ TEST_P(VideoFrameStreamTest, Stop_DuringNoKeyRead) {
TEST_P(VideoFrameStreamTest, Stop_DuringReset) {
Initialize();
EnterPendingState(DECODER_RESET);
- EnterPendingState(DECODER_STOP);
- SatisfyPendingCallback(DECODER_RESET);
- SatisfyPendingCallback(DECODER_STOP);
+ Stop();
}
TEST_P(VideoFrameStreamTest, Stop_AfterReset) {
@@ -541,22 +671,17 @@ TEST_P(VideoFrameStreamTest, Stop_AfterReset) {
TEST_P(VideoFrameStreamTest, Stop_DuringRead_DuringReset) {
Initialize();
- EnterPendingState(DECODER_READ);
+ EnterPendingState(DECODER_DECODE);
EnterPendingState(DECODER_RESET);
- EnterPendingState(DECODER_STOP);
- SatisfyPendingCallback(DECODER_READ);
- SatisfyPendingCallback(DECODER_RESET);
- SatisfyPendingCallback(DECODER_STOP);
+ Stop();
}
TEST_P(VideoFrameStreamTest, Stop_AfterRead_DuringReset) {
Initialize();
- EnterPendingState(DECODER_READ);
+ EnterPendingState(DECODER_DECODE);
EnterPendingState(DECODER_RESET);
- SatisfyPendingCallback(DECODER_READ);
- EnterPendingState(DECODER_STOP);
- SatisfyPendingCallback(DECODER_RESET);
- SatisfyPendingCallback(DECODER_STOP);
+ SatisfyPendingCallback(DECODER_DECODE);
+ Stop();
}
TEST_P(VideoFrameStreamTest, Stop_AfterRead_AfterReset) {
@@ -566,4 +691,38 @@ TEST_P(VideoFrameStreamTest, Stop_AfterRead_AfterReset) {
Stop();
}
+TEST_P(VideoFrameStreamTest, DecoderErrorWhenReading) {
+ Initialize();
+ EnterPendingState(DECODER_DECODE);
+ decoder_->SimulateError();
+ message_loop_.RunUntilIdle();
+ ASSERT_FALSE(pending_read_);
+ ASSERT_EQ(VideoFrameStream::DECODE_ERROR, last_read_status_);
+}
+
+TEST_P(VideoFrameStreamTest, DecoderErrorWhenNotReading) {
+ Initialize();
+
+ decoder_->HoldDecode();
+ ReadOneFrame();
+ EXPECT_TRUE(pending_read_);
+
+ // Satisfy decode requests until we get the first frame out.
+ while (pending_read_) {
+ decoder_->SatisfySingleDecode();
+ message_loop_.RunUntilIdle();
+ }
+
+ // Trigger an error in the decoding.
+ decoder_->SimulateError();
+
+ // The error must surface from Read() as DECODE_ERROR.
+ while (last_read_status_ == VideoFrameStream::OK) {
+ ReadOneFrame();
+ message_loop_.RunUntilIdle();
+ EXPECT_FALSE(pending_read_);
+ }
+ EXPECT_EQ(VideoFrameStream::DECODE_ERROR, last_read_status_);
+}
+
} // namespace media
diff --git a/chromium/media/filters/video_renderer_impl.cc b/chromium/media/filters/video_renderer_impl.cc
index da07d9a6afb..736a91f2f28 100644
--- a/chromium/media/filters/video_renderer_impl.cc
+++ b/chromium/media/filters/video_renderer_impl.cc
@@ -8,7 +8,8 @@
#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/debug/trace_event.h"
-#include "base/message_loop/message_loop.h"
+#include "base/location.h"
+#include "base/single_thread_task_runner.h"
#include "base/threading/platform_thread.h"
#include "media/base/buffers.h"
#include "media/base/limits.h"
@@ -17,22 +18,17 @@
namespace media {
-base::TimeDelta VideoRendererImpl::kMaxLastFrameDuration() {
- return base::TimeDelta::FromMilliseconds(250);
-}
-
VideoRendererImpl::VideoRendererImpl(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
ScopedVector<VideoDecoder> decoders,
const SetDecryptorReadyCB& set_decryptor_ready_cb,
const PaintCB& paint_cb,
- const SetOpaqueCB& set_opaque_cb,
bool drop_frames)
- : message_loop_(message_loop),
- weak_factory_(this),
- video_frame_stream_(
- message_loop, decoders.Pass(), set_decryptor_ready_cb),
+ : task_runner_(task_runner),
+ video_frame_stream_(task_runner, decoders.Pass(), set_decryptor_ready_cb),
+ low_delay_(false),
received_end_of_stream_(false),
+ rendered_end_of_stream_(false),
frame_available_(&lock_),
state_(kUninitialized),
thread_(),
@@ -40,10 +36,10 @@ VideoRendererImpl::VideoRendererImpl(
drop_frames_(drop_frames),
playback_rate_(0),
paint_cb_(paint_cb),
- set_opaque_cb_(set_opaque_cb),
last_timestamp_(kNoTimestamp()),
frames_decoded_(0),
- frames_dropped_(0) {
+ frames_dropped_(0),
+ weak_factory_(this) {
DCHECK(!paint_cb_.is_null());
}
@@ -54,25 +50,17 @@ VideoRendererImpl::~VideoRendererImpl() {
}
void VideoRendererImpl::Play(const base::Closure& callback) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
DCHECK_EQ(kPrerolled, state_);
state_ = kPlaying;
callback.Run();
}
-void VideoRendererImpl::Pause(const base::Closure& callback) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- base::AutoLock auto_lock(lock_);
- DCHECK(state_ != kUninitialized || state_ == kError);
- state_ = kPaused;
- callback.Run();
-}
-
void VideoRendererImpl::Flush(const base::Closure& callback) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
- DCHECK_EQ(state_, kPaused);
+ DCHECK_NE(state_, kUninitialized);
flush_cb_ = callback;
state_ = kFlushing;
@@ -80,12 +68,14 @@ void VideoRendererImpl::Flush(const base::Closure& callback) {
// stream and needs to drain it before flushing it.
ready_frames_.clear();
received_end_of_stream_ = false;
- video_frame_stream_.Reset(base::Bind(
- &VideoRendererImpl::OnVideoFrameStreamResetDone, weak_this_));
+ rendered_end_of_stream_ = false;
+ video_frame_stream_.Reset(
+ base::Bind(&VideoRendererImpl::OnVideoFrameStreamResetDone,
+ weak_factory_.GetWeakPtr()));
}
void VideoRendererImpl::Stop(const base::Closure& callback) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
if (state_ == kUninitialized || state_ == kStopped) {
callback.Run();
@@ -119,18 +109,18 @@ void VideoRendererImpl::Stop(const base::Closure& callback) {
}
void VideoRendererImpl::SetPlaybackRate(float playback_rate) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
playback_rate_ = playback_rate;
}
void VideoRendererImpl::Preroll(base::TimeDelta time,
const PipelineStatusCB& cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
DCHECK(!cb.is_null());
DCHECK(preroll_cb_.is_null());
- DCHECK(state_ == kFlushed || state_== kPaused) << "state_ " << state_;
+ DCHECK(state_ == kFlushed || state_ == kPlaying) << "state_ " << state_;
if (state_ == kFlushed) {
DCHECK(time != kNoTimestamp());
@@ -153,32 +143,31 @@ void VideoRendererImpl::Preroll(base::TimeDelta time,
}
void VideoRendererImpl::Initialize(DemuxerStream* stream,
+ bool low_delay,
const PipelineStatusCB& init_cb,
const StatisticsCB& statistics_cb,
const TimeCB& max_time_cb,
- const NaturalSizeChangedCB& size_changed_cb,
const base::Closure& ended_cb,
const PipelineStatusCB& error_cb,
const TimeDeltaCB& get_time_cb,
const TimeDeltaCB& get_duration_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
DCHECK(stream);
DCHECK_EQ(stream->type(), DemuxerStream::VIDEO);
DCHECK(!init_cb.is_null());
DCHECK(!statistics_cb.is_null());
DCHECK(!max_time_cb.is_null());
- DCHECK(!size_changed_cb.is_null());
DCHECK(!ended_cb.is_null());
DCHECK(!get_time_cb.is_null());
DCHECK(!get_duration_cb.is_null());
DCHECK_EQ(kUninitialized, state_);
- weak_this_ = weak_factory_.GetWeakPtr();
+ low_delay_ = low_delay;
+
init_cb_ = init_cb;
statistics_cb_ = statistics_cb;
max_time_cb_ = max_time_cb;
- size_changed_cb_ = size_changed_cb;
ended_cb_ = ended_cb;
error_cb_ = error_cb;
get_time_cb_ = get_time_cb;
@@ -187,14 +176,14 @@ void VideoRendererImpl::Initialize(DemuxerStream* stream,
video_frame_stream_.Initialize(
stream,
+ low_delay,
statistics_cb,
base::Bind(&VideoRendererImpl::OnVideoFrameStreamInitialized,
- weak_this_));
+ weak_factory_.GetWeakPtr()));
}
-void VideoRendererImpl::OnVideoFrameStreamInitialized(bool success,
- bool has_alpha) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+void VideoRendererImpl::OnVideoFrameStreamInitialized(bool success) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
if (state_ == kStopped)
@@ -214,16 +203,8 @@ void VideoRendererImpl::OnVideoFrameStreamInitialized(bool success,
// have not populated any buffers yet.
state_ = kFlushed;
- set_opaque_cb_.Run(!has_alpha);
- set_opaque_cb_.Reset();
-
// Create our video thread.
- if (!base::PlatformThread::Create(0, this, &thread_)) {
- NOTREACHED() << "Video thread creation failed";
- state_ = kError;
- base::ResetAndReturn(&init_cb_).Run(PIPELINE_ERROR_INITIALIZATION_FAILED);
- return;
- }
+ CHECK(base::PlatformThread::Create(0, this, &thread_));
#if defined(OS_WIN)
// Bump up our priority so our sleeping is more accurate.
@@ -261,12 +242,9 @@ void VideoRendererImpl::ThreadMain() {
// Remain idle until we have the next frame ready for rendering.
if (ready_frames_.empty()) {
- if (received_end_of_stream_) {
- state_ = kEnded;
+ if (received_end_of_stream_ && !rendered_end_of_stream_) {
+ rendered_end_of_stream_ = true;
ended_cb_.Run();
-
- // No need to sleep here as we idle when |state_ != kPlaying|.
- continue;
}
UpdateStatsAndWait_Locked(kIdleTimeDelta);
@@ -297,8 +275,8 @@ void VideoRendererImpl::ThreadMain() {
// the accuracy of our frame timing code. http://crbug.com/149829
if (drop_frames_ && last_timestamp_ != kNoTimestamp()) {
base::TimeDelta now = get_time_cb_.Run();
- base::TimeDelta deadline = ready_frames_.front()->GetTimestamp() +
- (ready_frames_.front()->GetTimestamp() - last_timestamp_) / 2;
+ base::TimeDelta deadline = ready_frames_.front()->timestamp() +
+ (ready_frames_.front()->timestamp() - last_timestamp_) / 2;
if (now > deadline) {
DropNextReadyFrame_Locked();
@@ -321,18 +299,13 @@ void VideoRendererImpl::PaintNextReadyFrame_Locked() {
ready_frames_.pop_front();
frames_decoded_++;
- last_timestamp_ = next_frame->GetTimestamp();
-
- const gfx::Size& natural_size = next_frame->natural_size();
- if (natural_size != last_natural_size_) {
- last_natural_size_ = natural_size;
- size_changed_cb_.Run(natural_size);
- }
+ last_timestamp_ = next_frame->timestamp();
paint_cb_.Run(next_frame);
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &VideoRendererImpl::AttemptRead, weak_this_));
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoRendererImpl::AttemptRead, weak_factory_.GetWeakPtr()));
}
void VideoRendererImpl::DropNextReadyFrame_Locked() {
@@ -340,13 +313,14 @@ void VideoRendererImpl::DropNextReadyFrame_Locked() {
lock_.AssertAcquired();
- last_timestamp_ = ready_frames_.front()->GetTimestamp();
+ last_timestamp_ = ready_frames_.front()->timestamp();
ready_frames_.pop_front();
frames_decoded_++;
frames_dropped_++;
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &VideoRendererImpl::AttemptRead, weak_this_));
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoRendererImpl::AttemptRead, weak_factory_.GetWeakPtr()));
}
void VideoRendererImpl::FrameReady(VideoFrameStream::Status status,
@@ -376,7 +350,7 @@ void VideoRendererImpl::FrameReady(VideoFrameStream::Status status,
// Already-queued VideoFrameStream ReadCB's can fire after various state
// transitions have happened; in that case just drop those frames immediately.
- if (state_ == kStopped || state_ == kError || state_ == kFlushing)
+ if (state_ == kStopped || state_ == kFlushing)
return;
if (!frame.get()) {
@@ -403,7 +377,7 @@ void VideoRendererImpl::FrameReady(VideoFrameStream::Status status,
// Maintain the latest frame decoded so the correct frame is displayed after
// prerolling has completed.
if (state_ == kPrerolling && preroll_timestamp_ != kNoTimestamp() &&
- frame->GetTimestamp() <= preroll_timestamp_) {
+ frame->timestamp() <= preroll_timestamp_) {
ready_frames_.clear();
}
@@ -422,7 +396,8 @@ void VideoRendererImpl::FrameReady(VideoFrameStream::Status status,
bool VideoRendererImpl::ShouldTransitionToPrerolled_Locked() {
return state_ == kPrerolling &&
(!video_frame_stream_.CanReadWithoutStalling() ||
- ready_frames_.size() >= static_cast<size_t>(limits::kMaxVideoFrames));
+ ready_frames_.size() >= static_cast<size_t>(limits::kMaxVideoFrames) ||
+ (low_delay_ && ready_frames_.size() > 0));
}
void VideoRendererImpl::AddReadyFrame_Locked(
@@ -436,15 +411,15 @@ void VideoRendererImpl::AddReadyFrame_Locked(
// frame rate. Another way for this to happen is for the container to state
// a smaller duration than the largest packet timestamp.
base::TimeDelta duration = get_duration_cb_.Run();
- if (frame->GetTimestamp() > duration) {
- frame->SetTimestamp(duration);
+ if (frame->timestamp() > duration) {
+ frame->set_timestamp(duration);
}
ready_frames_.push_back(frame);
DCHECK_LE(ready_frames_.size(),
static_cast<size_t>(limits::kMaxVideoFrames));
- max_time_cb_.Run(frame->GetTimestamp());
+ max_time_cb_.Run(frame->timestamp());
// Avoid needlessly waking up |thread_| unless playing.
if (state_ == kPlaying)
@@ -457,7 +432,7 @@ void VideoRendererImpl::AttemptRead() {
}
void VideoRendererImpl::AttemptRead_Locked() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
lock_.AssertAcquired();
if (pending_read_ || received_end_of_stream_ ||
@@ -466,22 +441,19 @@ void VideoRendererImpl::AttemptRead_Locked() {
}
switch (state_) {
- case kPaused:
case kPrerolling:
+ case kPrerolled:
case kPlaying:
pending_read_ = true;
video_frame_stream_.Read(base::Bind(&VideoRendererImpl::FrameReady,
- weak_this_));
+ weak_factory_.GetWeakPtr()));
return;
case kUninitialized:
case kInitializing:
- case kPrerolled:
case kFlushing:
case kFlushed:
- case kEnded:
case kStopped:
- case kError:
return;
}
}
@@ -495,6 +467,7 @@ void VideoRendererImpl::OnVideoFrameStreamResetDone() {
DCHECK(!pending_read_);
DCHECK(ready_frames_.empty());
DCHECK(!received_end_of_stream_);
+ DCHECK(!rendered_end_of_stream_);
state_ = kFlushed;
last_timestamp_ = kNoTimestamp();
@@ -506,7 +479,7 @@ base::TimeDelta VideoRendererImpl::CalculateSleepDuration(
float playback_rate) {
// Determine the current and next presentation timestamps.
base::TimeDelta now = get_time_cb_.Run();
- base::TimeDelta next_pts = next_frame->GetTimestamp();
+ base::TimeDelta next_pts = next_frame->timestamp();
// Scale our sleep based on the playback rate.
base::TimeDelta sleep = next_pts - now;
diff --git a/chromium/media/filters/video_renderer_impl.h b/chromium/media/filters/video_renderer_impl.h
index 79c5d824ced..4fef25d55c6 100644
--- a/chromium/media/filters/video_renderer_impl.h
+++ b/chromium/media/filters/video_renderer_impl.h
@@ -19,10 +19,10 @@
#include "media/base/video_decoder.h"
#include "media/base/video_frame.h"
#include "media/base/video_renderer.h"
-#include "media/filters/video_frame_stream.h"
+#include "media/filters/decoder_stream.h"
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
@@ -36,44 +36,36 @@ class MEDIA_EXPORT VideoRendererImpl
public base::PlatformThread::Delegate {
public:
typedef base::Callback<void(const scoped_refptr<VideoFrame>&)> PaintCB;
- typedef base::Callback<void(bool)> SetOpaqueCB;
-
- // Maximum duration of the last frame.
- static base::TimeDelta kMaxLastFrameDuration();
// |decoders| contains the VideoDecoders to use when initializing.
//
// |paint_cb| is executed on the video frame timing thread whenever a new
// frame is available for painting.
//
- // |set_opaque_cb| is executed when the renderer is initialized to inform
- // the player whether the decoded output will be opaque or not.
- //
// Implementors should avoid doing any sort of heavy work in this method and
// instead post a task to a common/worker thread to handle rendering. Slowing
// down the video thread may result in losing synchronization with audio.
//
// Setting |drop_frames_| to true causes the renderer to drop expired frames.
- VideoRendererImpl(const scoped_refptr<base::MessageLoopProxy>& message_loop,
- ScopedVector<VideoDecoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb,
- const PaintCB& paint_cb,
- const SetOpaqueCB& set_opaque_cb,
- bool drop_frames);
+ VideoRendererImpl(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ ScopedVector<VideoDecoder> decoders,
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
+ const PaintCB& paint_cb,
+ bool drop_frames);
virtual ~VideoRendererImpl();
// VideoRenderer implementation.
virtual void Initialize(DemuxerStream* stream,
+ bool low_delay,
const PipelineStatusCB& init_cb,
const StatisticsCB& statistics_cb,
const TimeCB& max_time_cb,
- const NaturalSizeChangedCB& size_changed_cb,
const base::Closure& ended_cb,
const PipelineStatusCB& error_cb,
const TimeDeltaCB& get_time_cb,
const TimeDeltaCB& get_duration_cb) OVERRIDE;
virtual void Play(const base::Closure& callback) OVERRIDE;
- virtual void Pause(const base::Closure& callback) OVERRIDE;
virtual void Flush(const base::Closure& callback) OVERRIDE;
virtual void Preroll(base::TimeDelta time,
const PipelineStatusCB& cb) OVERRIDE;
@@ -85,7 +77,7 @@ class MEDIA_EXPORT VideoRendererImpl
private:
// Callback for |video_frame_stream_| initialization.
- void OnVideoFrameStreamInitialized(bool success, bool has_alpha);
+ void OnVideoFrameStreamInitialized(bool success);
// Callback for |video_frame_stream_| to deliver decoded video frames and
// report video decoding status.
@@ -115,9 +107,7 @@ class MEDIA_EXPORT VideoRendererImpl
// Helper function that flushes the buffers when a Stop() or error occurs.
void DoStopOrError_Locked();
- // Runs |paint_cb_| with the next frame from |ready_frames_|, updating
- // |last_natural_size_| and running |size_changed_cb_| if the natural size
- // changes.
+ // Runs |paint_cb_| with the next frame from |ready_frames_|.
//
// A read is scheduled to replace the frame.
void PaintNextReadyFrame_Locked();
@@ -138,9 +128,7 @@ class MEDIA_EXPORT VideoRendererImpl
// |wait_duration|.
void UpdateStatsAndWait_Locked(base::TimeDelta wait_duration);
- scoped_refptr<base::MessageLoopProxy> message_loop_;
- base::WeakPtrFactory<VideoRendererImpl> weak_factory_;
- base::WeakPtr<VideoRendererImpl> weak_this_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
// Used for accessing data members.
base::Lock lock_;
@@ -148,19 +136,24 @@ class MEDIA_EXPORT VideoRendererImpl
// Provides video frames to VideoRendererImpl.
VideoFrameStream video_frame_stream_;
+ // Flag indicating low-delay mode.
+ bool low_delay_;
+
// Queue of incoming frames yet to be painted.
typedef std::deque<scoped_refptr<VideoFrame> > VideoFrameQueue;
VideoFrameQueue ready_frames_;
- // Keeps track of whether we received the end of stream buffer.
+ // Keeps track of whether we received the end of stream buffer and finished
+ // rendering.
bool received_end_of_stream_;
+ bool rendered_end_of_stream_;
// Used to signal |thread_| as frames are added to |frames_|. Rule of thumb:
// always check |state_| to see if it was set to STOPPED after waking up!
base::ConditionVariable frame_available_;
// State transition Diagram of this class:
- // [kUninitialized] -------> [kError]
+ // [kUninitialized]
// |
// | Initialize()
// [kInitializing]
@@ -170,33 +163,27 @@ class MEDIA_EXPORT VideoRendererImpl
// | | Preroll() or upon ^
// | V got first frame [kFlushing]
// | [kPrerolling] ^
- // | | | Flush()
+ // | | |
// | V Got enough frames |
- // | [kPrerolled]---------------------->[kPaused]
- // | | Pause() ^
+ // | [kPrerolled]--------------------------|
+ // | | Flush() ^
// | V Play() |
// | [kPlaying]---------------------------|
- // | | Pause() ^
- // | V Receive EOF frame. | Pause()
- // | [kEnded]-----------------------------+
- // | ^
+ // | Flush() ^ Flush()
// | |
// +-----> [kStopped] [Any state other than]
- // [kUninitialized/kError]
+ // [ kUninitialized ]
// Simple state tracking variable.
enum State {
kUninitialized,
kInitializing,
kPrerolled,
- kPaused,
kFlushing,
kFlushed,
kPrerolling,
kPlaying,
- kEnded,
kStopped,
- kError,
};
State state_;
@@ -219,7 +206,6 @@ class MEDIA_EXPORT VideoRendererImpl
PipelineStatusCB init_cb_;
StatisticsCB statistics_cb_;
TimeCB max_time_cb_;
- NaturalSizeChangedCB size_changed_cb_;
base::Closure ended_cb_;
PipelineStatusCB error_cb_;
TimeDeltaCB get_time_cb_;
@@ -230,16 +216,6 @@ class MEDIA_EXPORT VideoRendererImpl
// Embedder callback for notifying a new frame is available for painting.
PaintCB paint_cb_;
- // Callback to execute to inform the player if the decoded output is opaque.
- SetOpaqueCB set_opaque_cb_;
-
- // The last natural size |size_changed_cb_| was called with.
- //
- // TODO(scherkus): WebMediaPlayerImpl should track this instead of plumbing
- // this through Pipeline. The one tricky bit might be guaranteeing we deliver
- // the size information before we reach HAVE_METADATA.
- gfx::Size last_natural_size_;
-
// The timestamp of the last frame removed from the |ready_frames_| queue,
// either for calling |paint_cb_| or for dropping. Set to kNoTimestamp()
// during flushing.
@@ -250,6 +226,9 @@ class MEDIA_EXPORT VideoRendererImpl
int frames_decoded_;
int frames_dropped_;
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<VideoRendererImpl> weak_factory_;
+
DISALLOW_COPY_AND_ASSIGN(VideoRendererImpl);
};
diff --git a/chromium/media/filters/video_renderer_impl_unittest.cc b/chromium/media/filters/video_renderer_impl_unittest.cc
index 0b07a751d37..355d8751741 100644
--- a/chromium/media/filters/video_renderer_impl_unittest.cc
+++ b/chromium/media/filters/video_renderer_impl_unittest.cc
@@ -10,6 +10,8 @@
#include "base/debug/stack_trace.h"
#include "base/message_loop/message_loop.h"
#include "base/stl_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/lock.h"
#include "base/timer/timer.h"
@@ -24,17 +26,28 @@
using ::testing::_;
using ::testing::AnyNumber;
+using ::testing::AtLeast;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::NiceMock;
using ::testing::NotNull;
using ::testing::Return;
+using ::testing::SaveArg;
using ::testing::StrictMock;
namespace media {
-static const int kFrameDurationInMs = 10;
-static const int kVideoDurationInMs = kFrameDurationInMs * 100;
+ACTION_P(RunClosure, closure) {
+ closure.Run();
+}
+
+MATCHER_P(HasTimestamp, ms, "") {
+ *result_listener << "has timestamp " << arg->timestamp().InMilliseconds();
+ return arg->timestamp().InMilliseconds() == ms;
+}
+
+// Arbitrary value. Has to be larger to cover any timestamp value used in tests.
+static const int kVideoDurationInMs = 1000;
class VideoRendererImplTest : public ::testing::Test {
public:
@@ -44,90 +57,74 @@ class VideoRendererImplTest : public ::testing::Test {
ScopedVector<VideoDecoder> decoders;
decoders.push_back(decoder_);
- renderer_.reset(new VideoRendererImpl(
- message_loop_.message_loop_proxy(),
- decoders.Pass(),
- media::SetDecryptorReadyCB(),
- base::Bind(&VideoRendererImplTest::OnPaint, base::Unretained(this)),
- base::Bind(&VideoRendererImplTest::OnSetOpaque, base::Unretained(this)),
- true));
+ renderer_.reset(
+ new VideoRendererImpl(message_loop_.message_loop_proxy(),
+ decoders.Pass(),
+ media::SetDecryptorReadyCB(),
+ base::Bind(&StrictMock<MockDisplayCB>::Display,
+ base::Unretained(&mock_display_cb_)),
+ true));
demuxer_stream_.set_video_decoder_config(TestVideoConfig::Normal());
// We expect these to be called but we don't care how/when.
- EXPECT_CALL(demuxer_stream_, Read(_))
- .WillRepeatedly(RunCallback<0>(DemuxerStream::kOk,
- DecoderBuffer::CreateEOSBuffer()));
- EXPECT_CALL(*decoder_, Stop(_))
+ EXPECT_CALL(demuxer_stream_, Read(_)).WillRepeatedly(
+ RunCallback<0>(DemuxerStream::kOk,
+ scoped_refptr<DecoderBuffer>(new DecoderBuffer(0))));
+ EXPECT_CALL(*decoder_, Stop())
.WillRepeatedly(Invoke(this, &VideoRendererImplTest::StopRequested));
EXPECT_CALL(statistics_cb_object_, OnStatistics(_))
.Times(AnyNumber());
EXPECT_CALL(*this, OnTimeUpdate(_))
.Times(AnyNumber());
- EXPECT_CALL(*this, OnSetOpaque(_))
- .Times(AnyNumber());
}
virtual ~VideoRendererImplTest() {}
- // Callbacks passed into VideoRendererImpl().
- MOCK_CONST_METHOD1(OnSetOpaque, void(bool));
-
// Callbacks passed into Initialize().
MOCK_METHOD1(OnTimeUpdate, void(base::TimeDelta));
- MOCK_METHOD1(OnNaturalSizeChanged, void(const gfx::Size&));
void Initialize() {
- InitializeWithDuration(kVideoDurationInMs);
+ InitializeWithLowDelay(false);
}
- void InitializeWithDuration(int duration_ms) {
- duration_ = base::TimeDelta::FromMilliseconds(duration_ms);
-
+ void InitializeWithLowDelay(bool low_delay) {
// Monitor decodes from the decoder.
EXPECT_CALL(*decoder_, Decode(_, _))
- .WillRepeatedly(Invoke(this, &VideoRendererImplTest::FrameRequested));
+ .WillRepeatedly(Invoke(this, &VideoRendererImplTest::DecodeRequested));
EXPECT_CALL(*decoder_, Reset(_))
.WillRepeatedly(Invoke(this, &VideoRendererImplTest::FlushRequested));
InSequence s;
- EXPECT_CALL(*decoder_, Initialize(_, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
-
// Set playback rate before anything else happens.
renderer_->SetPlaybackRate(1.0f);
// Initialize, we shouldn't have any reads.
- InitializeRenderer(PIPELINE_OK);
-
- // We expect the video size to be set.
- EXPECT_CALL(*this,
- OnNaturalSizeChanged(TestVideoConfig::NormalCodedSize()));
-
- // Start prerolling.
- QueuePrerollFrames(0);
- Preroll(0, PIPELINE_OK);
+ InitializeRenderer(PIPELINE_OK, low_delay);
}
- void InitializeRenderer(PipelineStatus expected) {
+ void InitializeRenderer(PipelineStatus expected, bool low_delay) {
SCOPED_TRACE(base::StringPrintf("InitializeRenderer(%d)", expected));
WaitableMessageLoopEvent event;
- CallInitialize(event.GetPipelineStatusCB());
+ CallInitialize(event.GetPipelineStatusCB(), low_delay, expected);
event.RunAndWaitForStatus(expected);
}
- void CallInitialize(const PipelineStatusCB& status_cb) {
+ void CallInitialize(const PipelineStatusCB& status_cb,
+ bool low_delay,
+ PipelineStatus decoder_status) {
+ EXPECT_CALL(*decoder_, Initialize(_, _, _, _)).WillOnce(
+ DoAll(SaveArg<3>(&output_cb_), RunCallback<2>(decoder_status)));
renderer_->Initialize(
&demuxer_stream_,
+ low_delay,
status_cb,
base::Bind(&MockStatisticsCB::OnStatistics,
base::Unretained(&statistics_cb_object_)),
base::Bind(&VideoRendererImplTest::OnTimeUpdate,
base::Unretained(this)),
- base::Bind(&VideoRendererImplTest::OnNaturalSizeChanged,
- base::Unretained(this)),
ended_event_.GetClosure(),
error_event_.GetPipelineStatusCB(),
base::Bind(&VideoRendererImplTest::GetTime, base::Unretained(this)),
@@ -151,13 +148,6 @@ class VideoRendererImplTest : public ::testing::Test {
event.RunAndWaitForStatus(expected);
}
- void Pause() {
- SCOPED_TRACE("Pause()");
- WaitableMessageLoopEvent event;
- renderer_->Pause(event.GetClosure());
- event.RunAndWait();
- }
-
void Flush() {
SCOPED_TRACE("Flush()");
WaitableMessageLoopEvent event;
@@ -173,76 +163,58 @@ class VideoRendererImplTest : public ::testing::Test {
}
void Shutdown() {
- Pause();
Flush();
Stop();
}
- // Queues a VideoFrame with |next_frame_timestamp_|.
- void QueueNextFrame() {
- DCHECK_EQ(&message_loop_, base::MessageLoop::current());
- DCHECK_LT(next_frame_timestamp_.InMicroseconds(),
- duration_.InMicroseconds());
-
- gfx::Size natural_size = TestVideoConfig::NormalCodedSize();
- scoped_refptr<VideoFrame> frame = VideoFrame::CreateFrame(
- VideoFrame::YV12, natural_size, gfx::Rect(natural_size), natural_size,
- next_frame_timestamp_);
- decode_results_.push_back(std::make_pair(
- VideoDecoder::kOk, frame));
- next_frame_timestamp_ +=
- base::TimeDelta::FromMilliseconds(kFrameDurationInMs);
- }
-
- void QueueEndOfStream() {
- DCHECK_EQ(&message_loop_, base::MessageLoop::current());
- decode_results_.push_back(std::make_pair(
- VideoDecoder::kOk, VideoFrame::CreateEOSFrame()));
- }
-
- void QueueDecodeError() {
- DCHECK_EQ(&message_loop_, base::MessageLoop::current());
- scoped_refptr<VideoFrame> null_frame;
- decode_results_.push_back(std::make_pair(
- VideoDecoder::kDecodeError, null_frame));
- }
-
- void QueueAbortedRead() {
- DCHECK_EQ(&message_loop_, base::MessageLoop::current());
- scoped_refptr<VideoFrame> null_frame;
- decode_results_.push_back(std::make_pair(
- VideoDecoder::kOk, null_frame));
- }
-
- void QueuePrerollFrames(int timestamp_ms) {
- DCHECK_EQ(&message_loop_, base::MessageLoop::current());
- next_frame_timestamp_ = base::TimeDelta();
- base::TimeDelta timestamp = base::TimeDelta::FromMilliseconds(timestamp_ms);
- while (next_frame_timestamp_ < timestamp) {
- QueueNextFrame();
- }
-
- // Queue the frame at |timestamp| plus additional ones for prerolling.
- for (int i = 0; i < limits::kMaxVideoFrames; ++i) {
- QueueNextFrame();
+ // Parses a string representation of video frames and generates corresponding
+ // VideoFrame objects in |decode_results_|.
+ //
+ // Syntax:
+ // nn - Queue a decoder buffer with timestamp nn * 1000us
+ // abort - Queue an aborted read
+ // error - Queue a decoder error
+ //
+ // Examples:
+ // A clip that is four frames long: "0 10 20 30"
+ // A clip that has a decode error: "60 70 error"
+ void QueueFrames(const std::string& str) {
+ std::vector<std::string> tokens;
+ base::SplitString(str, ' ', &tokens);
+ for (size_t i = 0; i < tokens.size(); ++i) {
+ if (tokens[i] == "abort") {
+ scoped_refptr<VideoFrame> null_frame;
+ decode_results_.push_back(
+ std::make_pair(VideoDecoder::kAborted, null_frame));
+ continue;
+ }
+
+ if (tokens[i] == "error") {
+ scoped_refptr<VideoFrame> null_frame;
+ decode_results_.push_back(
+ std::make_pair(VideoDecoder::kDecodeError, null_frame));
+ continue;
+ }
+
+ int timestamp_in_ms = 0;
+ if (base::StringToInt(tokens[i], &timestamp_in_ms)) {
+ gfx::Size natural_size = TestVideoConfig::NormalCodedSize();
+ scoped_refptr<VideoFrame> frame = VideoFrame::CreateFrame(
+ VideoFrame::YV12,
+ natural_size,
+ gfx::Rect(natural_size),
+ natural_size,
+ base::TimeDelta::FromMilliseconds(timestamp_in_ms));
+ decode_results_.push_back(std::make_pair(VideoDecoder::kOk, frame));
+ continue;
+ }
+
+ CHECK(false) << "Unrecognized decoder buffer token: " << tokens[i];
}
}
- void ResetCurrentFrame() {
- base::AutoLock l(lock_);
- current_frame_ = NULL;
- }
-
- scoped_refptr<VideoFrame> GetCurrentFrame() {
- base::AutoLock l(lock_);
- return current_frame_;
- }
-
- int GetCurrentTimestampInMs() {
- scoped_refptr<VideoFrame> frame = GetCurrentFrame();
- if (!frame.get())
- return -1;
- return frame->GetTimestamp().InMilliseconds();
+ bool IsReadPending() {
+ return !decode_cb_.is_null();
}
void WaitForError(PipelineStatus expected) {
@@ -257,38 +229,58 @@ class VideoRendererImplTest : public ::testing::Test {
void WaitForPendingRead() {
SCOPED_TRACE("WaitForPendingRead()");
- if (!read_cb_.is_null())
+ if (!decode_cb_.is_null())
return;
- DCHECK(wait_for_pending_read_cb_.is_null());
+ DCHECK(wait_for_pending_decode_cb_.is_null());
WaitableMessageLoopEvent event;
- wait_for_pending_read_cb_ = event.GetClosure();
+ wait_for_pending_decode_cb_ = event.GetClosure();
event.RunAndWait();
- DCHECK(!read_cb_.is_null());
- DCHECK(wait_for_pending_read_cb_.is_null());
+ DCHECK(!decode_cb_.is_null());
+ DCHECK(wait_for_pending_decode_cb_.is_null());
}
void SatisfyPendingRead() {
- CHECK(!read_cb_.is_null());
+ CHECK(!decode_cb_.is_null());
CHECK(!decode_results_.empty());
- base::Closure closure = base::Bind(
- read_cb_, decode_results_.front().first,
- decode_results_.front().second);
-
- read_cb_.Reset();
+ // Post tasks for OutputCB and DecodeCB.
+ scoped_refptr<VideoFrame> frame = decode_results_.front().second;
+ if (frame)
+ message_loop_.PostTask(FROM_HERE, base::Bind(output_cb_, frame));
+ message_loop_.PostTask(
+ FROM_HERE, base::Bind(base::ResetAndReturn(&decode_cb_),
+ decode_results_.front().first));
decode_results_.pop_front();
+ }
+
+ void SatisfyPendingReadWithEndOfStream() {
+ DCHECK(!decode_cb_.is_null());
+
+ // Return EOS buffer to trigger EOS frame.
+ EXPECT_CALL(demuxer_stream_, Read(_))
+ .WillOnce(RunCallback<0>(DemuxerStream::kOk,
+ DecoderBuffer::CreateEOSBuffer()));
+
+ // Satify pending |decode_cb_| to trigger a new DemuxerStream::Read().
+ message_loop_.PostTask(
+ FROM_HERE,
+ base::Bind(base::ResetAndReturn(&decode_cb_), VideoDecoder::kOk));
- message_loop_.PostTask(FROM_HERE, closure);
+ WaitForPendingRead();
+
+ message_loop_.PostTask(
+ FROM_HERE,
+ base::Bind(base::ResetAndReturn(&decode_cb_), VideoDecoder::kOk));
}
void AdvanceTimeInMs(int time_ms) {
DCHECK_EQ(&message_loop_, base::MessageLoop::current());
base::AutoLock l(lock_);
time_ += base::TimeDelta::FromMilliseconds(time_ms);
- DCHECK_LE(time_.InMicroseconds(), duration_.InMicroseconds());
+ DCHECK_LE(time_.InMicroseconds(), GetDuration().InMicroseconds());
}
protected:
@@ -298,6 +290,13 @@ class VideoRendererImplTest : public ::testing::Test {
NiceMock<MockDemuxerStream> demuxer_stream_;
MockStatisticsCB statistics_cb_object_;
+ // Use StrictMock<T> to catch missing/extra display callbacks.
+ class MockDisplayCB {
+ public:
+ MOCK_METHOD1(Display, void(const scoped_refptr<VideoFrame>&));
+ };
+ StrictMock<MockDisplayCB> mock_display_cb_;
+
private:
base::TimeDelta GetTime() {
base::AutoLock l(lock_);
@@ -305,23 +304,18 @@ class VideoRendererImplTest : public ::testing::Test {
}
base::TimeDelta GetDuration() {
- return duration_;
- }
-
- void OnPaint(const scoped_refptr<VideoFrame>& frame) {
- base::AutoLock l(lock_);
- current_frame_ = frame;
+ return base::TimeDelta::FromMilliseconds(kVideoDurationInMs);
}
- void FrameRequested(const scoped_refptr<DecoderBuffer>& buffer,
- const VideoDecoder::DecodeCB& read_cb) {
+ void DecodeRequested(const scoped_refptr<DecoderBuffer>& buffer,
+ const VideoDecoder::DecodeCB& decode_cb) {
DCHECK_EQ(&message_loop_, base::MessageLoop::current());
- CHECK(read_cb_.is_null());
- read_cb_ = read_cb;
+ CHECK(decode_cb_.is_null());
+ decode_cb_ = decode_cb;
// Wake up WaitForPendingRead() if needed.
- if (!wait_for_pending_read_cb_.is_null())
- base::ResetAndReturn(&wait_for_pending_read_cb_).Run();
+ if (!wait_for_pending_decode_cb_.is_null())
+ base::ResetAndReturn(&wait_for_pending_decode_cb_).Run();
if (decode_results_.empty())
return;
@@ -332,42 +326,39 @@ class VideoRendererImplTest : public ::testing::Test {
void FlushRequested(const base::Closure& callback) {
DCHECK_EQ(&message_loop_, base::MessageLoop::current());
decode_results_.clear();
- if (!read_cb_.is_null()) {
- QueueAbortedRead();
+ if (!decode_cb_.is_null()) {
+ QueueFrames("abort");
SatisfyPendingRead();
}
message_loop_.PostTask(FROM_HERE, callback);
}
- void StopRequested(const base::Closure& callback) {
+ void StopRequested() {
DCHECK_EQ(&message_loop_, base::MessageLoop::current());
decode_results_.clear();
- if (!read_cb_.is_null()) {
- QueueAbortedRead();
+ if (!decode_cb_.is_null()) {
+ QueueFrames("abort");
SatisfyPendingRead();
}
-
- message_loop_.PostTask(FROM_HERE, callback);
}
base::MessageLoop message_loop_;
- // Used to protect |time_| and |current_frame_|.
+ // Used to protect |time_|.
base::Lock lock_;
base::TimeDelta time_;
- scoped_refptr<VideoFrame> current_frame_;
// Used for satisfying reads.
- VideoDecoder::DecodeCB read_cb_;
+ VideoDecoder::OutputCB output_cb_;
+ VideoDecoder::DecodeCB decode_cb_;
base::TimeDelta next_frame_timestamp_;
- base::TimeDelta duration_;
WaitableMessageLoopEvent error_event_;
WaitableMessageLoopEvent ended_event_;
- // Run during FrameRequested() to unblock WaitForPendingRead().
- base::Closure wait_for_pending_read_cb_;
+ // Run during DecodeRequested() to unblock WaitForPendingRead().
+ base::Closure wait_for_pending_decode_cb_;
std::deque<std::pair<
VideoDecoder::Status, scoped_refptr<VideoFrame> > > decode_results_;
@@ -386,7 +377,14 @@ TEST_F(VideoRendererImplTest, StopWithoutInitialize) {
TEST_F(VideoRendererImplTest, Initialize) {
Initialize();
- EXPECT_EQ(0, GetCurrentTimestampInMs());
+ Shutdown();
+}
+
+TEST_F(VideoRendererImplTest, InitializeAndPreroll) {
+ Initialize();
+ QueueFrames("0 10 20 30");
+ EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(0)));
+ Preroll(0, PIPELINE_OK);
Shutdown();
}
@@ -396,9 +394,7 @@ static void ExpectNotCalled(PipelineStatus) {
}
TEST_F(VideoRendererImplTest, StopWhileInitializing) {
- EXPECT_CALL(*decoder_, Initialize(_, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
- CallInitialize(base::Bind(&ExpectNotCalled));
+ CallInitialize(base::Bind(&ExpectNotCalled), false, PIPELINE_OK);
Stop();
// ~VideoRendererImpl() will CHECK() if we left anything initialized.
@@ -406,7 +402,6 @@ TEST_F(VideoRendererImplTest, StopWhileInitializing) {
TEST_F(VideoRendererImplTest, StopWhileFlushing) {
Initialize();
- Pause();
renderer_->Flush(base::Bind(&ExpectNotCalled, PIPELINE_OK));
Stop();
@@ -415,47 +410,31 @@ TEST_F(VideoRendererImplTest, StopWhileFlushing) {
TEST_F(VideoRendererImplTest, Play) {
Initialize();
+ QueueFrames("0 10 20 30");
+ EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(0)));
+ Preroll(0, PIPELINE_OK);
Play();
Shutdown();
}
-TEST_F(VideoRendererImplTest, EndOfStream_DefaultFrameDuration) {
- Initialize();
- Play();
-
- // Verify that the ended callback fires when the default last frame duration
- // has elapsed.
- int end_timestamp = kFrameDurationInMs * limits::kMaxVideoFrames +
- VideoRendererImpl::kMaxLastFrameDuration().InMilliseconds();
- EXPECT_LT(end_timestamp, kVideoDurationInMs);
-
- QueueEndOfStream();
- AdvanceTimeInMs(end_timestamp);
- WaitForEnded();
-
- Shutdown();
-}
-
TEST_F(VideoRendererImplTest, EndOfStream_ClipDuration) {
- int duration = kVideoDurationInMs + kFrameDurationInMs / 2;
- InitializeWithDuration(duration);
+ Initialize();
+ QueueFrames("0 10 20 30");
+ EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(0)));
+ Preroll(0, PIPELINE_OK);
Play();
- // Render all frames except for the last |limits::kMaxVideoFrames| frames
- // and deliver all the frames between the start and |duration|. The preroll
- // inside Initialize() makes this a little confusing, but |timestamp| is
- // the current render time and QueueNextFrame() delivers a frame with a
- // timestamp that is |timestamp| + limits::kMaxVideoFrames *
- // kFrameDurationInMs.
- int timestamp = kFrameDurationInMs;
- int end_timestamp = duration - limits::kMaxVideoFrames * kFrameDurationInMs;
- for (; timestamp < end_timestamp; timestamp += kFrameDurationInMs) {
- QueueNextFrame();
- }
+ // Next frame has timestamp way past duration. Its timestamp will be adjusted
+ // to match the duration of the video.
+ QueueFrames(base::IntToString(kVideoDurationInMs + 1000));
+ SatisfyPendingRead();
+ WaitForPendingRead();
// Queue the end of stream frame and wait for the last frame to be rendered.
- QueueEndOfStream();
- AdvanceTimeInMs(duration);
+ SatisfyPendingReadWithEndOfStream();
+
+ EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(kVideoDurationInMs)));
+ AdvanceTimeInMs(kVideoDurationInMs);
WaitForEnded();
Shutdown();
@@ -463,95 +442,109 @@ TEST_F(VideoRendererImplTest, EndOfStream_ClipDuration) {
TEST_F(VideoRendererImplTest, DecodeError_Playing) {
Initialize();
+ QueueFrames("0 10 20 30");
+ EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(0)));
+ Preroll(0, PIPELINE_OK);
Play();
- QueueDecodeError();
- AdvanceTimeInMs(kVideoDurationInMs);
+ QueueFrames("error");
+ SatisfyPendingRead();
WaitForError(PIPELINE_ERROR_DECODE);
Shutdown();
}
TEST_F(VideoRendererImplTest, DecodeError_DuringPreroll) {
Initialize();
- Pause();
- Flush();
-
- QueueDecodeError();
- Preroll(kFrameDurationInMs * 6, PIPELINE_ERROR_DECODE);
+ QueueFrames("error");
+ Preroll(0, PIPELINE_ERROR_DECODE);
Shutdown();
}
TEST_F(VideoRendererImplTest, Preroll_Exact) {
Initialize();
- Pause();
- Flush();
- QueuePrerollFrames(kFrameDurationInMs * 6);
+ QueueFrames("50 60 70 80 90");
- Preroll(kFrameDurationInMs * 6, PIPELINE_OK);
- EXPECT_EQ(kFrameDurationInMs * 6, GetCurrentTimestampInMs());
+ EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(60)));
+ Preroll(60, PIPELINE_OK);
Shutdown();
}
TEST_F(VideoRendererImplTest, Preroll_RightBefore) {
Initialize();
- Pause();
- Flush();
- QueuePrerollFrames(kFrameDurationInMs * 6);
+ QueueFrames("50 60 70 80 90");
- Preroll(kFrameDurationInMs * 6 - 1, PIPELINE_OK);
- EXPECT_EQ(kFrameDurationInMs * 5, GetCurrentTimestampInMs());
+ EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(50)));
+ Preroll(59, PIPELINE_OK);
Shutdown();
}
TEST_F(VideoRendererImplTest, Preroll_RightAfter) {
Initialize();
- Pause();
- Flush();
- QueuePrerollFrames(kFrameDurationInMs * 6);
+ QueueFrames("50 60 70 80 90");
+
+ EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(60)));
+ Preroll(61, PIPELINE_OK);
+ Shutdown();
+}
+
+TEST_F(VideoRendererImplTest, Preroll_LowDelay) {
+ // In low-delay mode only one frame is required to finish preroll.
+ InitializeWithLowDelay(true);
+ QueueFrames("0");
+
+ EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(0)));
+ Preroll(0, PIPELINE_OK);
+ Play();
+
+ QueueFrames("10");
+ SatisfyPendingRead();
+
+ WaitableMessageLoopEvent event;
+ EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(10)))
+ .WillOnce(RunClosure(event.GetClosure()));
+ AdvanceTimeInMs(10);
+ event.RunAndWait();
- Preroll(kFrameDurationInMs * 6 + 1, PIPELINE_OK);
- EXPECT_EQ(kFrameDurationInMs * 6, GetCurrentTimestampInMs());
Shutdown();
}
TEST_F(VideoRendererImplTest, PlayAfterPreroll) {
Initialize();
- Pause();
- Flush();
- QueuePrerollFrames(kFrameDurationInMs * 4);
+ QueueFrames("0 10 20 30");
+ EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(0)));
+ Preroll(0, PIPELINE_OK);
+ Play();
- Preroll(kFrameDurationInMs * 4, PIPELINE_OK);
- EXPECT_EQ(kFrameDurationInMs * 4, GetCurrentTimestampInMs());
+ // Check that there is an outstanding Read() request.
+ EXPECT_TRUE(IsReadPending());
- Play();
- // Advance time past prerolled time to trigger a Read().
- AdvanceTimeInMs(5 * kFrameDurationInMs);
- WaitForPendingRead();
Shutdown();
}
TEST_F(VideoRendererImplTest, Rebuffer) {
Initialize();
-
+ QueueFrames("0 10 20 30");
+ EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(0)));
+ Preroll(0, PIPELINE_OK);
Play();
// Advance time past prerolled time drain the ready frame queue.
- AdvanceTimeInMs(5 * kFrameDurationInMs);
+ AdvanceTimeInMs(50);
WaitForPendingRead();
- // Simulate a Pause/Preroll/Play rebuffer sequence.
- Pause();
-
+ // Simulate a Preroll/Play rebuffer sequence.
WaitableMessageLoopEvent event;
renderer_->Preroll(kNoTimestamp(),
event.GetPipelineStatusCB());
// Queue enough frames to satisfy preroll.
- for (int i = 0; i < limits::kMaxVideoFrames; ++i)
- QueueNextFrame();
-
+ QueueFrames("40 50 60 70");
SatisfyPendingRead();
+ // TODO(scherkus): We shouldn't display the next ready frame in a rebuffer
+ // situation, see http://crbug.com/365516
+ EXPECT_CALL(mock_display_cb_, Display(_)).Times(AtLeast(1));
+
event.RunAndWaitForStatus(PIPELINE_OK);
Play();
@@ -561,14 +554,21 @@ TEST_F(VideoRendererImplTest, Rebuffer) {
TEST_F(VideoRendererImplTest, Rebuffer_AlreadyHaveEnoughFrames) {
Initialize();
+ QueueFrames("0 10 20 30");
+ EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(0)));
+ Preroll(0, PIPELINE_OK);
// Queue an extra frame so that we'll have enough frames to satisfy
// preroll even after the first frame is painted.
- QueueNextFrame();
+ QueueFrames("40");
+ SatisfyPendingRead();
Play();
- // Simulate a Pause/Preroll/Play rebuffer sequence.
- Pause();
+ // Simulate a Preroll/Play rebuffer sequence.
+ //
+ // TODO(scherkus): We shouldn't display the next ready frame in a rebuffer
+ // situation, see http://crbug.com/365516
+ EXPECT_CALL(mock_display_cb_, Display(_)).Times(AtLeast(1));
WaitableMessageLoopEvent event;
renderer_->Preroll(kNoTimestamp(),
@@ -581,144 +581,25 @@ TEST_F(VideoRendererImplTest, Rebuffer_AlreadyHaveEnoughFrames) {
Shutdown();
}
-TEST_F(VideoRendererImplTest, GetCurrentFrame_Initialized) {
- Initialize();
- EXPECT_TRUE(GetCurrentFrame().get()); // Due to prerolling.
- Shutdown();
-}
-
-TEST_F(VideoRendererImplTest, GetCurrentFrame_Playing) {
- Initialize();
- Play();
- EXPECT_TRUE(GetCurrentFrame().get());
- Shutdown();
-}
-
-TEST_F(VideoRendererImplTest, GetCurrentFrame_Paused) {
- Initialize();
- Play();
- Pause();
- EXPECT_TRUE(GetCurrentFrame().get());
- Shutdown();
-}
-
-TEST_F(VideoRendererImplTest, GetCurrentFrame_Flushed) {
- Initialize();
- Play();
- Pause();
-
- // Frame shouldn't be updated.
- ResetCurrentFrame();
- Flush();
- EXPECT_FALSE(GetCurrentFrame().get());
-
- Shutdown();
-}
-
-TEST_F(VideoRendererImplTest, GetCurrentFrame_EndOfStream) {
- Initialize();
- Play();
- Pause();
- Flush();
-
- // Preroll only end of stream frames.
- QueueEndOfStream();
-
- // Frame shouldn't be updated.
- ResetCurrentFrame();
- Preroll(0, PIPELINE_OK);
- EXPECT_FALSE(GetCurrentFrame().get());
-
- // Start playing, we should immediately get notified of end of stream.
- Play();
- WaitForEnded();
-
- Shutdown();
-}
-
-TEST_F(VideoRendererImplTest, GetCurrentFrame_Shutdown) {
- Initialize();
-
- // Frame shouldn't be updated.
- ResetCurrentFrame();
- Shutdown();
- EXPECT_FALSE(GetCurrentFrame().get());
-}
-
-// Stop() is called immediately during an error.
-TEST_F(VideoRendererImplTest, GetCurrentFrame_Error) {
- Initialize();
-
- // Frame shouldn't be updated.
- ResetCurrentFrame();
- Stop();
- EXPECT_FALSE(GetCurrentFrame().get());
-}
-
// Verify that a late decoder response doesn't break invariants in the renderer.
TEST_F(VideoRendererImplTest, StopDuringOutstandingRead) {
Initialize();
+ QueueFrames("0 10 20 30");
+ EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(0)));
+ Preroll(0, PIPELINE_OK);
Play();
- // Advance time a bit to trigger a Read().
- AdvanceTimeInMs(kFrameDurationInMs);
- WaitForPendingRead();
+ // Check that there is an outstanding Read() request.
+ EXPECT_TRUE(IsReadPending());
WaitableMessageLoopEvent event;
renderer_->Stop(event.GetClosure());
-
event.RunAndWait();
}
-TEST_F(VideoRendererImplTest, AbortPendingRead_Playing) {
- Initialize();
- Play();
-
- // Advance time a bit to trigger a Read().
- AdvanceTimeInMs(kFrameDurationInMs);
- WaitForPendingRead();
-
- QueueAbortedRead();
- SatisfyPendingRead();
-
- Pause();
- Flush();
- QueuePrerollFrames(kFrameDurationInMs * 6);
- Preroll(kFrameDurationInMs * 6, PIPELINE_OK);
- EXPECT_EQ(kFrameDurationInMs * 6, GetCurrentTimestampInMs());
- Shutdown();
-}
-
-TEST_F(VideoRendererImplTest, AbortPendingRead_Flush) {
- Initialize();
- Play();
-
- // Advance time a bit to trigger a Read().
- AdvanceTimeInMs(kFrameDurationInMs);
- WaitForPendingRead();
-
- Pause();
- Flush();
- Shutdown();
-}
-
-TEST_F(VideoRendererImplTest, AbortPendingRead_Preroll) {
- Initialize();
- Pause();
- Flush();
-
- QueueAbortedRead();
- Preroll(kFrameDurationInMs * 6, PIPELINE_OK);
- Shutdown();
-}
-
TEST_F(VideoRendererImplTest, VideoDecoder_InitFailure) {
InSequence s;
-
- EXPECT_CALL(*decoder_, Initialize(_, _))
- .WillOnce(RunCallback<1>(DECODER_ERROR_NOT_SUPPORTED));
- InitializeRenderer(DECODER_ERROR_NOT_SUPPORTED);
-
+ InitializeRenderer(DECODER_ERROR_NOT_SUPPORTED, false);
Stop();
}
diff --git a/chromium/media/filters/vpx_video_decoder.cc b/chromium/media/filters/vpx_video_decoder.cc
index e270335504a..b3d3dfd555f 100644
--- a/chromium/media/filters/vpx_video_decoder.cc
+++ b/chromium/media/filters/vpx_video_decoder.cc
@@ -6,18 +6,21 @@
#include <algorithm>
#include <string>
+#include <vector>
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/location.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/sys_byteorder.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/demuxer_stream.h"
+#include "media/base/limits.h"
#include "media/base/media_switches.h"
#include "media/base/pipeline.h"
#include "media/base/video_decoder_config.h"
@@ -30,6 +33,7 @@
#define VPX_CODEC_DISABLE_COMPAT 1
extern "C" {
#include "third_party/libvpx/source/libvpx/vpx/vpx_decoder.h"
+#include "third_party/libvpx/source/libvpx/vpx/vpx_frame_buffer.h"
#include "third_party/libvpx/source/libvpx/vpx/vp8dx.h"
}
@@ -67,14 +71,131 @@ static int GetThreadCount(const VideoDecoderConfig& config) {
return decode_threads;
}
+class VpxVideoDecoder::MemoryPool
+ : public base::RefCountedThreadSafe<VpxVideoDecoder::MemoryPool> {
+ public:
+ MemoryPool();
+
+ // Callback that will be called by libvpx when it needs a frame buffer.
+ // Parameters:
+ // |user_priv| Private data passed to libvpx (pointer to memory pool).
+ // |min_size| Minimum size needed by libvpx to decompress the next frame.
+ // |fb| Pointer to the frame buffer to update.
+ // Returns 0 on success. Returns < 0 on failure.
+ static int32 GetVP9FrameBuffer(void* user_priv, size_t min_size,
+ vpx_codec_frame_buffer* fb);
+
+ // Callback that will be called by libvpx when the frame buffer is no longer
+ // being used by libvpx. Parameters:
+ // |user_priv| Private data passed to libvpx (pointer to memory pool).
+ // |fb| Pointer to the frame buffer that's being released.
+ static int32 ReleaseVP9FrameBuffer(void *user_priv,
+ vpx_codec_frame_buffer *fb);
+
+ // Generates a "no_longer_needed" closure that holds a reference
+ // to this pool.
+ base::Closure CreateFrameCallback(void* fb_priv_data);
+
+ private:
+ friend class base::RefCountedThreadSafe<VpxVideoDecoder::MemoryPool>;
+ ~MemoryPool();
+
+ // Reference counted frame buffers used for VP9 decoding. Reference counting
+ // is done manually because both chromium and libvpx has to release this
+ // before a buffer can be re-used.
+ struct VP9FrameBuffer {
+ VP9FrameBuffer() : ref_cnt(0) {}
+ std::vector<uint8> data;
+ uint32 ref_cnt;
+ };
+
+ // Gets the next available frame buffer for use by libvpx.
+ VP9FrameBuffer* GetFreeFrameBuffer(size_t min_size);
+
+ // Method that gets called when a VideoFrame that references this pool gets
+ // destroyed.
+ void OnVideoFrameDestroyed(VP9FrameBuffer* frame_buffer);
+
+ // Frame buffers to be used by libvpx for VP9 Decoding.
+ std::vector<VP9FrameBuffer*> frame_buffers_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryPool);
+};
+
+VpxVideoDecoder::MemoryPool::MemoryPool() {}
+
+VpxVideoDecoder::MemoryPool::~MemoryPool() {
+ STLDeleteElements(&frame_buffers_);
+}
+
+VpxVideoDecoder::MemoryPool::VP9FrameBuffer*
+ VpxVideoDecoder::MemoryPool::GetFreeFrameBuffer(size_t min_size) {
+ // Check if a free frame buffer exists.
+ size_t i = 0;
+ for (; i < frame_buffers_.size(); ++i) {
+ if (frame_buffers_[i]->ref_cnt == 0)
+ break;
+ }
+
+ if (i == frame_buffers_.size()) {
+ // Create a new frame buffer.
+ frame_buffers_.push_back(new VP9FrameBuffer());
+ }
+
+ // Resize the frame buffer if necessary.
+ if (frame_buffers_[i]->data.size() < min_size)
+ frame_buffers_[i]->data.resize(min_size);
+ return frame_buffers_[i];
+}
+
+int32 VpxVideoDecoder::MemoryPool::GetVP9FrameBuffer(
+ void* user_priv, size_t min_size, vpx_codec_frame_buffer* fb) {
+ DCHECK(user_priv);
+ DCHECK(fb);
+
+ VpxVideoDecoder::MemoryPool* memory_pool =
+ static_cast<VpxVideoDecoder::MemoryPool*>(user_priv);
+
+ VP9FrameBuffer* fb_to_use = memory_pool->GetFreeFrameBuffer(min_size);
+ if (fb_to_use == NULL)
+ return -1;
+
+ fb->data = &fb_to_use->data[0];
+ fb->size = fb_to_use->data.size();
+ ++fb_to_use->ref_cnt;
+
+ // Set the frame buffer's private data to point at the external frame buffer.
+ fb->priv = static_cast<void*>(fb_to_use);
+ return 0;
+}
+
+int32 VpxVideoDecoder::MemoryPool::ReleaseVP9FrameBuffer(
+ void *user_priv, vpx_codec_frame_buffer *fb) {
+ VP9FrameBuffer* frame_buffer = static_cast<VP9FrameBuffer*>(fb->priv);
+ --frame_buffer->ref_cnt;
+ return 0;
+}
+
+base::Closure VpxVideoDecoder::MemoryPool::CreateFrameCallback(
+ void* fb_priv_data) {
+ VP9FrameBuffer* frame_buffer = static_cast<VP9FrameBuffer*>(fb_priv_data);
+ ++frame_buffer->ref_cnt;
+ return BindToCurrentLoop(
+ base::Bind(&MemoryPool::OnVideoFrameDestroyed, this,
+ frame_buffer));
+}
+
+void VpxVideoDecoder::MemoryPool::OnVideoFrameDestroyed(
+ VP9FrameBuffer* frame_buffer) {
+ --frame_buffer->ref_cnt;
+}
+
VpxVideoDecoder::VpxVideoDecoder(
- const scoped_refptr<base::MessageLoopProxy>& message_loop)
- : message_loop_(message_loop),
- weak_factory_(this),
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner)
+ : task_runner_(task_runner),
state_(kUninitialized),
vpx_codec_(NULL),
- vpx_codec_alpha_(NULL) {
-}
+ vpx_codec_alpha_(NULL) {}
VpxVideoDecoder::~VpxVideoDecoder() {
DCHECK_EQ(kUninitialized, state_);
@@ -82,14 +203,13 @@ VpxVideoDecoder::~VpxVideoDecoder() {
}
void VpxVideoDecoder::Initialize(const VideoDecoderConfig& config,
- const PipelineStatusCB& status_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ bool low_delay,
+ const PipelineStatusCB& status_cb,
+ const OutputCB& output_cb) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(config.IsValidConfig());
DCHECK(!config.is_encrypted());
DCHECK(decode_cb_.is_null());
- DCHECK(reset_cb_.is_null());
-
- weak_this_ = weak_factory_.GetWeakPtr();
if (!ConfigureDecoder(config)) {
status_cb.Run(DECODER_ERROR_NOT_SUPPORTED);
@@ -99,6 +219,7 @@ void VpxVideoDecoder::Initialize(const VideoDecoderConfig& config,
// Success!
config_ = config;
state_ = kNormal;
+ output_cb_ = BindToCurrentLoop(output_cb);
status_cb.Run(PIPELINE_OK);
}
@@ -125,15 +246,12 @@ static vpx_codec_ctx* InitializeVpxContext(vpx_codec_ctx* context,
}
bool VpxVideoDecoder::ConfigureDecoder(const VideoDecoderConfig& config) {
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- bool can_handle = false;
- if (config.codec() == kCodecVP9)
- can_handle = true;
- if (!cmd_line->HasSwitch(switches::kDisableVp8AlphaPlayback) &&
- config.codec() == kCodecVP8 && config.format() == VideoFrame::YV12A) {
- can_handle = true;
- }
- if (!can_handle)
+ if (config.codec() != kCodecVP8 && config.codec() != kCodecVP9)
+ return false;
+
+ // In VP8 videos, only those with alpha are handled by VpxVideoDecoder. All
+ // other VP8 videos go to FFmpegVideoDecoder.
+ if (config.codec() == kCodecVP8 && config.format() != VideoFrame::YV12A)
return false;
CloseDecoder();
@@ -142,6 +260,19 @@ bool VpxVideoDecoder::ConfigureDecoder(const VideoDecoderConfig& config) {
if (!vpx_codec_)
return false;
+ // We use our own buffers for VP9 so that there is no need to copy data after
+ // decoding.
+ if (config.codec() == kCodecVP9) {
+ memory_pool_ = new MemoryPool();
+ if (vpx_codec_set_frame_buffer_functions(vpx_codec_,
+ &MemoryPool::GetVP9FrameBuffer,
+ &MemoryPool::ReleaseVP9FrameBuffer,
+ memory_pool_)) {
+ LOG(ERROR) << "Failed to configure external buffers.";
+ return false;
+ }
+ }
+
if (config.format() == VideoFrame::YV12A) {
vpx_codec_alpha_ = InitializeVpxContext(vpx_codec_alpha_, config);
if (!vpx_codec_alpha_)
@@ -156,6 +287,7 @@ void VpxVideoDecoder::CloseDecoder() {
vpx_codec_destroy(vpx_codec_);
delete vpx_codec_;
vpx_codec_ = NULL;
+ memory_pool_ = NULL;
}
if (vpx_codec_alpha_) {
vpx_codec_destroy(vpx_codec_alpha_);
@@ -166,7 +298,7 @@ void VpxVideoDecoder::CloseDecoder() {
void VpxVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(!decode_cb.is_null());
CHECK_NE(state_, kUninitialized);
CHECK(decode_cb_.is_null()) << "Overlapping decodes are not supported.";
@@ -174,13 +306,13 @@ void VpxVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
decode_cb_ = BindToCurrentLoop(decode_cb);
if (state_ == kError) {
- base::ResetAndReturn(&decode_cb_).Run(kDecodeError, NULL);
+ base::ResetAndReturn(&decode_cb_).Run(kDecodeError);
return;
}
// Return empty frames if decoding has finished.
if (state_ == kDecodeFinished) {
- base::ResetAndReturn(&decode_cb_).Run(kOk, VideoFrame::CreateEOSFrame());
+ base::ResetAndReturn(&decode_cb_).Run(kOk);
return;
}
@@ -188,68 +320,45 @@ void VpxVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
}
void VpxVideoDecoder::Reset(const base::Closure& closure) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(reset_cb_.is_null());
- reset_cb_ = BindToCurrentLoop(closure);
-
- // Defer the reset if a decode is pending.
- if (!decode_cb_.is_null())
- return;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(decode_cb_.is_null());
- DoReset();
+ state_ = kNormal;
+ task_runner_->PostTask(FROM_HERE, closure);
}
-void VpxVideoDecoder::Stop(const base::Closure& closure) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- base::ScopedClosureRunner runner(BindToCurrentLoop(closure));
-
- if (state_ == kUninitialized)
- return;
-
- if (!decode_cb_.is_null()) {
- base::ResetAndReturn(&decode_cb_).Run(kOk, NULL);
- // Reset is pending only when decode is pending.
- if (!reset_cb_.is_null())
- base::ResetAndReturn(&reset_cb_).Run();
- }
+void VpxVideoDecoder::Stop() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
state_ = kUninitialized;
}
-bool VpxVideoDecoder::HasAlpha() const {
- return vpx_codec_alpha_ != NULL;
-}
-
void VpxVideoDecoder::DecodeBuffer(const scoped_refptr<DecoderBuffer>& buffer) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_NE(state_, kUninitialized);
DCHECK_NE(state_, kDecodeFinished);
DCHECK_NE(state_, kError);
- DCHECK(reset_cb_.is_null());
DCHECK(!decode_cb_.is_null());
DCHECK(buffer);
// Transition to kDecodeFinished on the first end of stream buffer.
if (state_ == kNormal && buffer->end_of_stream()) {
state_ = kDecodeFinished;
- base::ResetAndReturn(&decode_cb_).Run(kOk, VideoFrame::CreateEOSFrame());
+ base::ResetAndReturn(&decode_cb_).Run(kOk);
return;
}
scoped_refptr<VideoFrame> video_frame;
if (!VpxDecode(buffer, &video_frame)) {
state_ = kError;
- base::ResetAndReturn(&decode_cb_).Run(kDecodeError, NULL);
+ base::ResetAndReturn(&decode_cb_).Run(kDecodeError);
return;
}
- // If we didn't get a frame we need more data.
- if (!video_frame.get()) {
- base::ResetAndReturn(&decode_cb_).Run(kNotEnoughData, NULL);
- return;
- }
+ base::ResetAndReturn(&decode_cb_).Run(kOk);
- base::ResetAndReturn(&decode_cb_).Run(kOk, video_frame);
+ if (video_frame)
+ output_cb_.Run(video_frame);
}
bool VpxVideoDecoder::VpxDecode(const scoped_refptr<DecoderBuffer>& buffer,
@@ -321,29 +430,48 @@ bool VpxVideoDecoder::VpxDecode(const scoped_refptr<DecoderBuffer>& buffer,
}
CopyVpxImageTo(vpx_image, vpx_image_alpha, video_frame);
- (*video_frame)->SetTimestamp(base::TimeDelta::FromMicroseconds(timestamp));
+ (*video_frame)->set_timestamp(base::TimeDelta::FromMicroseconds(timestamp));
return true;
}
-void VpxVideoDecoder::DoReset() {
- DCHECK(decode_cb_.is_null());
-
- state_ = kNormal;
- reset_cb_.Run();
- reset_cb_.Reset();
-}
-
void VpxVideoDecoder::CopyVpxImageTo(const vpx_image* vpx_image,
const struct vpx_image* vpx_image_alpha,
scoped_refptr<VideoFrame>* video_frame) {
CHECK(vpx_image);
CHECK(vpx_image->fmt == VPX_IMG_FMT_I420 ||
- vpx_image->fmt == VPX_IMG_FMT_YV12);
+ vpx_image->fmt == VPX_IMG_FMT_YV12 ||
+ vpx_image->fmt == VPX_IMG_FMT_I444);
+
+ VideoFrame::Format codec_format = VideoFrame::YV12;
+ int uv_rows = (vpx_image->d_h + 1) / 2;
+
+ if (vpx_image->fmt == VPX_IMG_FMT_I444) {
+ CHECK(!vpx_codec_alpha_);
+ codec_format = VideoFrame::YV24;
+ uv_rows = vpx_image->d_h;
+ } else if (vpx_codec_alpha_) {
+ codec_format = VideoFrame::YV12A;
+ }
gfx::Size size(vpx_image->d_w, vpx_image->d_h);
+ if (!vpx_codec_alpha_ && memory_pool_) {
+ *video_frame = VideoFrame::WrapExternalYuvData(
+ codec_format,
+ size, gfx::Rect(size), config_.natural_size(),
+ vpx_image->stride[VPX_PLANE_Y],
+ vpx_image->stride[VPX_PLANE_U],
+ vpx_image->stride[VPX_PLANE_V],
+ vpx_image->planes[VPX_PLANE_Y],
+ vpx_image->planes[VPX_PLANE_U],
+ vpx_image->planes[VPX_PLANE_V],
+ kNoTimestamp(),
+ memory_pool_->CreateFrameCallback(vpx_image->fb_priv));
+ return;
+ }
+
*video_frame = frame_pool_.CreateFrame(
- vpx_codec_alpha_ ? VideoFrame::YV12A : VideoFrame::YV12,
+ codec_format,
size,
gfx::Rect(size),
config_.natural_size(),
@@ -355,11 +483,11 @@ void VpxVideoDecoder::CopyVpxImageTo(const vpx_image* vpx_image,
video_frame->get());
CopyUPlane(vpx_image->planes[VPX_PLANE_U],
vpx_image->stride[VPX_PLANE_U],
- (vpx_image->d_h + 1) / 2,
+ uv_rows,
video_frame->get());
CopyVPlane(vpx_image->planes[VPX_PLANE_V],
vpx_image->stride[VPX_PLANE_V],
- (vpx_image->d_h + 1) / 2,
+ uv_rows,
video_frame->get());
if (!vpx_codec_alpha_)
return;
diff --git a/chromium/media/filters/vpx_video_decoder.h b/chromium/media/filters/vpx_video_decoder.h
index cc02e89aad0..22d119bea7e 100644
--- a/chromium/media/filters/vpx_video_decoder.h
+++ b/chromium/media/filters/vpx_video_decoder.h
@@ -6,7 +6,6 @@
#define MEDIA_FILTERS_VPX_VIDEO_DECODER_H_
#include "base/callback.h"
-#include "base/memory/weak_ptr.h"
#include "media/base/demuxer_stream.h"
#include "media/base/video_decoder.h"
#include "media/base/video_decoder_config.h"
@@ -17,7 +16,7 @@ struct vpx_codec_ctx;
struct vpx_image;
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
@@ -29,17 +28,18 @@ namespace media {
class MEDIA_EXPORT VpxVideoDecoder : public VideoDecoder {
public:
explicit VpxVideoDecoder(
- const scoped_refptr<base::MessageLoopProxy>& message_loop);
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner);
virtual ~VpxVideoDecoder();
// VideoDecoder implementation.
virtual void Initialize(const VideoDecoderConfig& config,
- const PipelineStatusCB& status_cb) OVERRIDE;
+ bool low_delay,
+ const PipelineStatusCB& status_cb,
+ const OutputCB& output_cb) OVERRIDE;
virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
- virtual void Stop(const base::Closure& closure) OVERRIDE;
- virtual bool HasAlpha() const OVERRIDE;
+ virtual void Stop() OVERRIDE;
private:
enum DecoderState {
@@ -60,27 +60,28 @@ class MEDIA_EXPORT VpxVideoDecoder : public VideoDecoder {
bool VpxDecode(const scoped_refptr<DecoderBuffer>& buffer,
scoped_refptr<VideoFrame>* video_frame);
- // Reset decoder and call |reset_cb_|.
- void DoReset();
-
void CopyVpxImageTo(const vpx_image* vpx_image,
const struct vpx_image* vpx_image_alpha,
scoped_refptr<VideoFrame>* video_frame);
- scoped_refptr<base::MessageLoopProxy> message_loop_;
- base::WeakPtrFactory<VpxVideoDecoder> weak_factory_;
- base::WeakPtr<VpxVideoDecoder> weak_this_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
DecoderState state_;
+ OutputCB output_cb_;
+
+ // TODO(xhwang): Merge DecodeBuffer() into Decode() and remove this.
DecodeCB decode_cb_;
- base::Closure reset_cb_;
VideoDecoderConfig config_;
vpx_codec_ctx* vpx_codec_;
vpx_codec_ctx* vpx_codec_alpha_;
+ // Memory pool used for VP9 decoding.
+ class MemoryPool;
+ scoped_refptr<MemoryPool> memory_pool_;
+
VideoFramePool frame_pool_;
DISALLOW_COPY_AND_ASSIGN(VpxVideoDecoder);
diff --git a/chromium/media/filters/wsola_internals.cc b/chromium/media/filters/wsola_internals.cc
index 45cdd8ffad5..9ce1f0d966d 100644
--- a/chromium/media/filters/wsola_internals.cc
+++ b/chromium/media/filters/wsola_internals.cc
@@ -84,19 +84,25 @@ void MultiChannelMovingBlockEnergies(const AudioBus* input,
}
// Fit the curve f(x) = a * x^2 + b * x + c such that
-// f(-1) = |y[0]|
-// f(0) = |y[1]|
-// f(1) = |y[2]|.
-void CubicInterpolation(const float* y_values,
- float* extremum,
- float* extremum_value) {
+// f(-1) = y[0]
+// f(0) = y[1]
+// f(1) = y[2]
+// and return the maximum, assuming that y[0] <= y[1] >= y[2].
+void QuadraticInterpolation(const float* y_values,
+ float* extremum,
+ float* extremum_value) {
float a = 0.5f * (y_values[2] + y_values[0]) - y_values[1];
float b = 0.5f * (y_values[2] - y_values[0]);
float c = y_values[1];
- DCHECK_NE(a, 0);
- *extremum = -b / (2.f * a);
- *extremum_value = a * (*extremum) * (*extremum) + b * (*extremum) + c;
+ if (a == 0.f) {
+ // The coordinates are colinear (within floating-point error).
+ *extremum = 0;
+ *extremum_value = y_values[1];
+ } else {
+ *extremum = -b / (2.f * a);
+ *extremum_value = a * (*extremum) * (*extremum) + b * (*extremum) + c;
+ }
}
int DecimatedSearch(int decimation,
@@ -154,8 +160,8 @@ int DecimatedSearch(int decimation,
// estimate of candidate maximum.
float normalized_candidate_index;
float candidate_similarity;
- CubicInterpolation(similarity, &normalized_candidate_index,
- &candidate_similarity);
+ QuadraticInterpolation(similarity, &normalized_candidate_index,
+ &candidate_similarity);
int candidate_index = n - decimation + static_cast<int>(
normalized_candidate_index * decimation + 0.5f);
diff --git a/chromium/media/filters/wsola_internals.h b/chromium/media/filters/wsola_internals.h
index 55fff04d30b..13d2875cb31 100644
--- a/chromium/media/filters/wsola_internals.h
+++ b/chromium/media/filters/wsola_internals.h
@@ -38,19 +38,13 @@ MEDIA_EXPORT void MultiChannelMovingBlockEnergies(const AudioBus* input,
float* energy);
// Fit the curve f(x) = a * x^2 + b * x + c such that
-//
-// f(-1) = |y[0]|
-// f(0) = |y[1]|
-// f(1) = |y[2]|.
-//
-// Then compute the |extremum| point -b / (2*a) and |extremum_value|
-// b^2 / (4*a) - b^2 / (2*a) + c.
-//
-// It is not expected that this function is called with
-// y[0] == y[1] == y[2].
-MEDIA_EXPORT void CubicInterpolation(const float* y_values,
- float* extremum,
- float* extremum_value);
+// f(-1) = y[0]
+// f(0) = y[1]
+// f(1) = y[2]
+// and return the maximum, assuming that y[0] <= y[1] >= y[2].
+MEDIA_EXPORT void QuadraticInterpolation(const float* y_values,
+ float* extremum,
+ float* extremum_value);
// Search a subset of all candid blocks. The search is performed every
// |decimation| frames. This reduces complexity by a factor of about
diff --git a/chromium/media/mp4/offset_byte_queue.cc b/chromium/media/formats/common/offset_byte_queue.cc
index a530150899b..a2b6994e40c 100644
--- a/chromium/media/mp4/offset_byte_queue.cc
+++ b/chromium/media/formats/common/offset_byte_queue.cc
@@ -1,8 +1,8 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/mp4/offset_byte_queue.h"
+#include "media/formats/common/offset_byte_queue.h"
#include "base/basictypes.h"
#include "base/logging.h"
diff --git a/chromium/media/mp4/offset_byte_queue.h b/chromium/media/formats/common/offset_byte_queue.h
index 9349b96088f..0996f073463 100644
--- a/chromium/media/mp4/offset_byte_queue.h
+++ b/chromium/media/formats/common/offset_byte_queue.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP4_OFFSET_BYTE_QUEUE_H_
-#define MEDIA_MP4_OFFSET_BYTE_QUEUE_H_
+#ifndef MEDIA_FORMATS_COMMON_OFFSET_BYTE_QUEUE_H_
+#define MEDIA_FORMATS_COMMON_OFFSET_BYTE_QUEUE_H_
#include "base/basictypes.h"
#include "media/base/byte_queue.h"
@@ -63,4 +63,4 @@ class MEDIA_EXPORT OffsetByteQueue {
} // namespace media
-#endif // MEDIA_MP4_MP4_STREAM_PARSER_H_
+#endif // MEDIA_FORMATS_COMMON_OFFSET_BYTE_QUEUE_H_
diff --git a/chromium/media/mp4/offset_byte_queue_unittest.cc b/chromium/media/formats/common/offset_byte_queue_unittest.cc
index b9afbc8e1bc..43d693a0b17 100644
--- a/chromium/media/mp4/offset_byte_queue_unittest.cc
+++ b/chromium/media/formats/common/offset_byte_queue_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,7 +6,7 @@
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
-#include "media/mp4/offset_byte_queue.h"
+#include "media/formats/common/offset_byte_queue.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
diff --git a/chromium/media/formats/common/stream_parser_test_base.cc b/chromium/media/formats/common/stream_parser_test_base.cc
new file mode 100644
index 00000000000..1d5f4b9c0c2
--- /dev/null
+++ b/chromium/media/formats/common/stream_parser_test_base.cc
@@ -0,0 +1,128 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/common/stream_parser_test_base.h"
+
+#include "base/bind.h"
+#include "media/base/test_data_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+static std::string BufferQueueToString(
+ const StreamParser::BufferQueue& buffers) {
+ std::stringstream ss;
+
+ ss << "{";
+ for (StreamParser::BufferQueue::const_iterator itr = buffers.begin();
+ itr != buffers.end();
+ ++itr) {
+ ss << " " << (*itr)->timestamp().InMilliseconds();
+ if ((*itr)->IsKeyframe())
+ ss << "K";
+ }
+ ss << " }";
+
+ return ss.str();
+}
+
+StreamParserTestBase::StreamParserTestBase(
+ scoped_ptr<StreamParser> stream_parser)
+ : parser_(stream_parser.Pass()) {
+ parser_->Init(
+ base::Bind(&StreamParserTestBase::OnInitDone, base::Unretained(this)),
+ base::Bind(&StreamParserTestBase::OnNewConfig, base::Unretained(this)),
+ base::Bind(&StreamParserTestBase::OnNewBuffers, base::Unretained(this)),
+ true,
+ base::Bind(&StreamParserTestBase::OnKeyNeeded, base::Unretained(this)),
+ base::Bind(&StreamParserTestBase::OnNewSegment, base::Unretained(this)),
+ base::Bind(&StreamParserTestBase::OnEndOfSegment, base::Unretained(this)),
+ LogCB());
+}
+
+StreamParserTestBase::~StreamParserTestBase() {}
+
+std::string StreamParserTestBase::ParseFile(const std::string& filename,
+ int append_bytes) {
+ results_stream_.clear();
+ scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
+ EXPECT_TRUE(
+ AppendDataInPieces(buffer->data(), buffer->data_size(), append_bytes));
+ return results_stream_.str();
+}
+
+std::string StreamParserTestBase::ParseData(const uint8* data, size_t length) {
+ results_stream_.clear();
+ EXPECT_TRUE(AppendDataInPieces(data, length, length));
+ return results_stream_.str();
+}
+
+bool StreamParserTestBase::AppendDataInPieces(const uint8* data,
+ size_t length,
+ size_t piece_size) {
+ const uint8* start = data;
+ const uint8* end = data + length;
+ while (start < end) {
+ size_t append_size = std::min(piece_size, static_cast<size_t>(end - start));
+ if (!parser_->Parse(start, append_size))
+ return false;
+ start += append_size;
+ }
+ return true;
+}
+
+void StreamParserTestBase::OnInitDone(
+ bool success,
+ const StreamParser::InitParameters& params) {
+ EXPECT_TRUE(params.auto_update_timestamp_offset);
+ DVLOG(1) << __FUNCTION__ << "(" << success << ", "
+ << params.duration.InMilliseconds() << ", "
+ << params.auto_update_timestamp_offset << ")";
+}
+
+bool StreamParserTestBase::OnNewConfig(
+ const AudioDecoderConfig& audio_config,
+ const VideoDecoderConfig& video_config,
+ const StreamParser::TextTrackConfigMap& text_config) {
+ DVLOG(1) << __FUNCTION__ << "(" << audio_config.IsValidConfig() << ", "
+ << video_config.IsValidConfig() << ")";
+ EXPECT_TRUE(audio_config.IsValidConfig());
+ EXPECT_FALSE(video_config.IsValidConfig());
+ last_audio_config_ = audio_config;
+ return true;
+}
+
+bool StreamParserTestBase::OnNewBuffers(
+ const StreamParser::BufferQueue& audio_buffers,
+ const StreamParser::BufferQueue& video_buffers,
+ const StreamParser::TextBufferQueueMap& text_map) {
+ EXPECT_FALSE(audio_buffers.empty());
+ EXPECT_TRUE(video_buffers.empty());
+
+ // TODO(wolenetz/acolwell): Add text track support to more MSE parsers. See
+ // http://crbug.com/336926.
+ EXPECT_TRUE(text_map.empty());
+
+ const std::string buffers_str = BufferQueueToString(audio_buffers);
+ DVLOG(1) << __FUNCTION__ << " : " << buffers_str;
+ results_stream_ << buffers_str;
+ return true;
+}
+
+void StreamParserTestBase::OnKeyNeeded(const std::string& type,
+ const std::vector<uint8>& init_data) {
+ DVLOG(1) << __FUNCTION__ << "(" << type << ", " << init_data.size() << ")";
+}
+
+void StreamParserTestBase::OnNewSegment() {
+ DVLOG(1) << __FUNCTION__;
+ results_stream_ << "NewSegment";
+}
+
+void StreamParserTestBase::OnEndOfSegment() {
+ DVLOG(1) << __FUNCTION__;
+ results_stream_ << "EndOfSegment";
+}
+
+} // namespace media
diff --git a/chromium/media/formats/common/stream_parser_test_base.h b/chromium/media/formats/common/stream_parser_test_base.h
new file mode 100644
index 00000000000..eb31562dd7f
--- /dev/null
+++ b/chromium/media/formats/common/stream_parser_test_base.h
@@ -0,0 +1,74 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FORMATS_COMMON_STREAM_PARSER_TEST_BASE_H_
+#define MEDIA_FORMATS_COMMON_STREAM_PARSER_TEST_BASE_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "media/base/audio_decoder_config.h"
+#include "media/base/stream_parser.h"
+#include "media/base/stream_parser_buffer.h"
+#include "media/base/text_track_config.h"
+#include "media/base/video_decoder_config.h"
+
+namespace media {
+
+// Test helper for verifying StreamParser behavior.
+class StreamParserTestBase {
+ public:
+ explicit StreamParserTestBase(scoped_ptr<StreamParser> stream_parser);
+ virtual ~StreamParserTestBase();
+
+ protected:
+ // Chunks a given parser appropriate file. Appends |append_bytes| at a time
+ // until the file is exhausted. Returns a coded string representing the
+ // segments and timestamps of the extracted frames.
+ //
+ // The start of each media segment is designated by "NewSegment", similarly
+ // the end of each segment by "EndOfSegment". Segments end when one or more
+ // frames are parsed from an append. If the append contains a partial frame
+ // the segment will continue into the next append.
+ //
+ // Parsed frame(s) are represented as "{ xxK yyK zzK }" Where xx, yy, and zz
+ // are the timestamps in milliseconds of each parsed frame. For example:
+ //
+ // "NewSegment{ 0K 23K 46K }EndOfSegment"
+ // "NewSegment{ 0K }{ 23K }{ 46K }EndOfSegment"
+ // "NewSegment{ 0K }{ 23K }EndOfSegmentNewSegment{ 46K }EndOfSegment"
+ //
+ std::string ParseFile(const std::string& filename, int append_bytes);
+
+ // Similar to ParseFile() except parses the given |data| in a single append of
+ // size |length|.
+ std::string ParseData(const uint8* data, size_t length);
+
+ // The last AudioDecoderConfig handed to OnNewConfig().
+ const AudioDecoderConfig& last_audio_config() const {
+ return last_audio_config_;
+ }
+
+ private:
+ bool AppendDataInPieces(const uint8* data, size_t length, size_t piece_size);
+ void OnInitDone(bool success, const StreamParser::InitParameters& params);
+ bool OnNewConfig(const AudioDecoderConfig& audio_config,
+ const VideoDecoderConfig& video_config,
+ const StreamParser::TextTrackConfigMap& text_config);
+ bool OnNewBuffers(const StreamParser::BufferQueue& audio_buffers,
+ const StreamParser::BufferQueue& video_buffers,
+ const StreamParser::TextBufferQueueMap& text_map);
+ void OnKeyNeeded(const std::string& type,
+ const std::vector<uint8>& init_data);
+ void OnNewSegment();
+ void OnEndOfSegment();
+
+ scoped_ptr<StreamParser> parser_;
+ std::stringstream results_stream_;
+ AudioDecoderConfig last_audio_config_;
+
+ DISALLOW_COPY_AND_ASSIGN(StreamParserTestBase);
+};
+
+} // namespace media
+
+#endif // MEDIA_FORMATS_COMMON_STREAM_PARSER_TEST_BASE_H_
diff --git a/chromium/media/mp2t/es_parser.h b/chromium/media/formats/mp2t/es_parser.h
index da06c5ef673..5297d321332 100644
--- a/chromium/media/mp2t/es_parser.h
+++ b/chromium/media/formats/mp2t/es_parser.h
@@ -1,9 +1,9 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP2T_ES_PARSER_H_
-#define MEDIA_MP2T_ES_PARSER_H_
+#ifndef MEDIA_FORMATS_MP2T_ES_PARSER_H_
+#define MEDIA_FORMATS_MP2T_ES_PARSER_H_
#include "base/basictypes.h"
#include "base/callback.h"
diff --git a/chromium/media/mp2t/es_parser_adts.cc b/chromium/media/formats/mp2t/es_parser_adts.cc
index 85de023e8fa..84ddf785ae9 100644
--- a/chromium/media/mp2t/es_parser_adts.cc
+++ b/chromium/media/formats/mp2t/es_parser_adts.cc
@@ -1,8 +1,8 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/mp2t/es_parser_adts.h"
+#include "media/formats/mp2t/es_parser_adts.h"
#include <list>
@@ -14,44 +14,10 @@
#include "media/base/buffers.h"
#include "media/base/channel_layout.h"
#include "media/base/stream_parser_buffer.h"
-#include "media/mp2t/mp2t_common.h"
-
-// Adts header is at least 7 bytes (can be 9 bytes).
-static const int kAdtsHeaderMinSize = 7;
-
-static const int adts_frequency_table[16] = {
- 96000,
- 88200,
- 64000,
- 48000,
- 44100,
- 32000,
- 24000,
- 22050,
- 16000,
- 12000,
- 11025,
- 8000,
- 7350,
- 0,
- 0,
- 0,
-};
-static const int kMaxSupportedFrequencyIndex = 12;
-
-static media::ChannelLayout adts_channel_layout[8] = {
- media::CHANNEL_LAYOUT_NONE,
- media::CHANNEL_LAYOUT_MONO,
- media::CHANNEL_LAYOUT_STEREO,
- media::CHANNEL_LAYOUT_SURROUND,
- media::CHANNEL_LAYOUT_4_0,
- media::CHANNEL_LAYOUT_5_0_BACK,
- media::CHANNEL_LAYOUT_5_1_BACK,
- media::CHANNEL_LAYOUT_7_1,
-};
-
-// Number of samples per frame.
-static const int kNumberSamplesPerAACFrame = 1024;
+#include "media/formats/mp2t/mp2t_common.h"
+#include "media/formats/mpeg/adts_constants.h"
+
+namespace media {
static int ExtractAdtsFrameSize(const uint8* adts_header) {
return ((static_cast<int>(adts_header[5]) >> 5) |
@@ -59,11 +25,11 @@ static int ExtractAdtsFrameSize(const uint8* adts_header) {
((static_cast<int>(adts_header[3]) & 0x3) << 11));
}
-static int ExtractAdtsFrequencyIndex(const uint8* adts_header) {
+static size_t ExtractAdtsFrequencyIndex(const uint8* adts_header) {
return ((adts_header[2] >> 2) & 0xf);
}
-static int ExtractAdtsChannelConfig(const uint8* adts_header) {
+static size_t ExtractAdtsChannelConfig(const uint8* adts_header) {
return (((adts_header[3] >> 6) & 0x3) |
((adts_header[2] & 0x1) << 2));
}
@@ -87,7 +53,7 @@ static bool LookForSyncWord(const uint8* raw_es, int raw_es_size,
DCHECK_GE(pos, 0);
DCHECK_LE(pos, raw_es_size);
- int max_offset = raw_es_size - kAdtsHeaderMinSize;
+ int max_offset = raw_es_size - kADTSHeaderMinSize;
if (pos >= max_offset) {
// Do not change the position if:
// - max_offset < 0: not enough bytes to get a full header
@@ -108,7 +74,7 @@ static bool LookForSyncWord(const uint8* raw_es, int raw_es_size,
continue;
int frame_size = ExtractAdtsFrameSize(cur_buf);
- if (frame_size < kAdtsHeaderMinSize) {
+ if (frame_size < kADTSHeaderMinSize) {
// Too short to be an ADTS frame.
continue;
}
@@ -130,7 +96,6 @@ static bool LookForSyncWord(const uint8* raw_es, int raw_es_size,
return false;
}
-namespace media {
namespace mp2t {
EsParserAdts::EsParserAdts(
@@ -172,7 +137,7 @@ bool EsParserAdts::Parse(const uint8* buf, int size,
<< " frame_size=" << frame_size;
DVLOG(LOG_LEVEL_ES)
<< "ADTS header: "
- << base::HexEncode(&raw_es[es_position], kAdtsHeaderMinSize);
+ << base::HexEncode(&raw_es[es_position], kADTSHeaderMinSize);
// Do not process the frame if this one is a partial frame.
int remaining_size = raw_es_size - es_position;
@@ -180,7 +145,7 @@ bool EsParserAdts::Parse(const uint8* buf, int size,
break;
// Update the audio configuration if needed.
- DCHECK_GE(frame_size, kAdtsHeaderMinSize);
+ DCHECK_GE(frame_size, kADTSHeaderMinSize);
if (!UpdateAudioConfiguration(&raw_es[es_position]))
return false;
@@ -193,22 +158,26 @@ bool EsParserAdts::Parse(const uint8* buf, int size,
base::TimeDelta current_pts = audio_timestamp_helper_->GetTimestamp();
base::TimeDelta frame_duration =
- audio_timestamp_helper_->GetFrameDuration(kNumberSamplesPerAACFrame);
+ audio_timestamp_helper_->GetFrameDuration(kSamplesPerAACFrame);
// Emit an audio frame.
bool is_key_frame = true;
+
+ // TODO(wolenetz/acolwell): Validate and use a common cross-parser TrackId
+ // type and allow multiple audio tracks. See https://crbug.com/341581.
scoped_refptr<StreamParserBuffer> stream_parser_buffer =
StreamParserBuffer::CopyFrom(
&raw_es[es_position],
frame_size,
- is_key_frame);
+ is_key_frame,
+ DemuxerStream::AUDIO, 0);
stream_parser_buffer->SetDecodeTimestamp(current_pts);
stream_parser_buffer->set_timestamp(current_pts);
stream_parser_buffer->set_duration(frame_duration);
emit_buffer_cb_.Run(stream_parser_buffer);
// Update the PTS of the next frame.
- audio_timestamp_helper_->AddFrames(kNumberSamplesPerAACFrame);
+ audio_timestamp_helper_->AddFrames(kSamplesPerAACFrame);
// Skip the current frame.
es_position += frame_size;
@@ -230,23 +199,24 @@ void EsParserAdts::Reset() {
}
bool EsParserAdts::UpdateAudioConfiguration(const uint8* adts_header) {
- int frequency_index = ExtractAdtsFrequencyIndex(adts_header);
- if (frequency_index > kMaxSupportedFrequencyIndex) {
+ size_t frequency_index = ExtractAdtsFrequencyIndex(adts_header);
+ if (frequency_index >= kADTSFrequencyTableSize) {
// Frequency index 13 & 14 are reserved
// while 15 means that the frequency is explicitly written
// (not supported).
return false;
}
- int channel_configuration = ExtractAdtsChannelConfig(adts_header);
- if (channel_configuration == 0) {
+ size_t channel_configuration = ExtractAdtsChannelConfig(adts_header);
+ if (channel_configuration == 0 ||
+ channel_configuration >= kADTSChannelLayoutTableSize) {
// TODO(damienv): Add support for inband channel configuration.
return false;
}
// TODO(damienv): support HE-AAC frequency doubling (SBR)
// based on the incoming ADTS profile.
- int samples_per_second = adts_frequency_table[frequency_index];
+ int samples_per_second = kADTSFrequencyTable[frequency_index];
int adts_profile = (adts_header[2] >> 6) & 0x3;
// The following code is written according to ISO 14496 Part 3 Table 1.11 and
@@ -257,12 +227,26 @@ bool EsParserAdts::UpdateAudioConfiguration(const uint8* adts_header) {
? std::min(2 * samples_per_second, 48000)
: samples_per_second;
+ // The following code is written according to ISO 14496 Part 3 Table 1.13 -
+ // Syntax of AudioSpecificConfig.
+ uint16 extra_data_int =
+ // Note: adts_profile is in the range [0,3], since the ADTS header only
+ // allows two bits for its value.
+ ((adts_profile + 1) << 11) +
+ (frequency_index << 7) +
+ (channel_configuration << 3);
+ uint8 extra_data[2] = {
+ static_cast<uint8>(extra_data_int >> 8),
+ static_cast<uint8>(extra_data_int & 0xff)
+ };
+
AudioDecoderConfig audio_decoder_config(
kCodecAAC,
kSampleFormatS16,
- adts_channel_layout[channel_configuration],
+ kADTSChannelLayoutTable[channel_configuration],
extended_samples_per_second,
- NULL, 0,
+ extra_data,
+ arraysize(extra_data),
false);
if (!audio_decoder_config.Matches(last_audio_decoder_config_)) {
diff --git a/chromium/media/mp2t/es_parser_adts.h b/chromium/media/formats/mp2t/es_parser_adts.h
index 0fc619fdfb8..e55eaf70e12 100644
--- a/chromium/media/mp2t/es_parser_adts.h
+++ b/chromium/media/formats/mp2t/es_parser_adts.h
@@ -1,9 +1,9 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP2T_ES_PARSER_ADTS_H_
-#define MEDIA_MP2T_ES_PARSER_ADTS_H_
+#ifndef MEDIA_FORMATS_MP2T_ES_PARSER_ADTS_H_
+#define MEDIA_FORMATS_MP2T_ES_PARSER_ADTS_H_
#include <list>
#include <utility>
@@ -14,7 +14,7 @@
#include "base/time/time.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/byte_queue.h"
-#include "media/mp2t/es_parser.h"
+#include "media/formats/mp2t/es_parser.h"
namespace media {
class AudioTimestampHelper;
diff --git a/chromium/media/formats/mp2t/es_parser_h264.cc b/chromium/media/formats/mp2t/es_parser_h264.cc
new file mode 100644
index 00000000000..691678ce81e
--- /dev/null
+++ b/chromium/media/formats/mp2t/es_parser_h264.cc
@@ -0,0 +1,332 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/mp2t/es_parser_h264.h"
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+#include "media/base/buffers.h"
+#include "media/base/stream_parser_buffer.h"
+#include "media/base/video_frame.h"
+#include "media/filters/h264_parser.h"
+#include "media/formats/common/offset_byte_queue.h"
+#include "media/formats/mp2t/mp2t_common.h"
+#include "ui/gfx/rect.h"
+#include "ui/gfx/size.h"
+
+namespace media {
+namespace mp2t {
+
+// An AUD NALU is at least 4 bytes:
+// 3 bytes for the start code + 1 byte for the NALU type.
+const int kMinAUDSize = 4;
+
+EsParserH264::EsParserH264(
+ const NewVideoConfigCB& new_video_config_cb,
+ const EmitBufferCB& emit_buffer_cb)
+ : new_video_config_cb_(new_video_config_cb),
+ emit_buffer_cb_(emit_buffer_cb),
+ es_queue_(new media::OffsetByteQueue()),
+ h264_parser_(new H264Parser()),
+ current_access_unit_pos_(0),
+ next_access_unit_pos_(0) {
+}
+
+EsParserH264::~EsParserH264() {
+}
+
+bool EsParserH264::Parse(const uint8* buf, int size,
+ base::TimeDelta pts,
+ base::TimeDelta dts) {
+ // Note: Parse is invoked each time a PES packet has been reassembled.
+ // Unfortunately, a PES packet does not necessarily map
+ // to an h264 access unit, although the HLS recommendation is to use one PES
+ // for each access unit (but this is just a recommendation and some streams
+ // do not comply with this recommendation).
+
+ // HLS recommendation: "In AVC video, you should have both a DTS and a
+ // PTS in each PES header".
+ // However, some streams do not comply with this recommendation.
+ DVLOG_IF(1, pts == kNoTimestamp()) << "Each video PES should have a PTS";
+ if (pts != kNoTimestamp()) {
+ TimingDesc timing_desc;
+ timing_desc.pts = pts;
+ timing_desc.dts = (dts != kNoTimestamp()) ? dts : pts;
+
+ // Link the end of the byte queue with the incoming timing descriptor.
+ timing_desc_list_.push_back(
+ std::pair<int64, TimingDesc>(es_queue_->tail(), timing_desc));
+ }
+
+ // Add the incoming bytes to the ES queue.
+ es_queue_->Push(buf, size);
+ return ParseInternal();
+}
+
+void EsParserH264::Flush() {
+ DVLOG(1) << "EsParserH264::Flush";
+ if (!FindAUD(&current_access_unit_pos_))
+ return;
+
+ // Simulate an additional AUD to force emitting the last access unit
+ // which is assumed to be complete at this point.
+ uint8 aud[] = { 0x00, 0x00, 0x01, 0x09 };
+ es_queue_->Push(aud, sizeof(aud));
+ ParseInternal();
+}
+
+void EsParserH264::Reset() {
+ DVLOG(1) << "EsParserH264::Reset";
+ es_queue_.reset(new media::OffsetByteQueue());
+ h264_parser_.reset(new H264Parser());
+ current_access_unit_pos_ = 0;
+ next_access_unit_pos_ = 0;
+ timing_desc_list_.clear();
+ last_video_decoder_config_ = VideoDecoderConfig();
+}
+
+bool EsParserH264::FindAUD(int64* stream_pos) {
+ while (true) {
+ const uint8* es;
+ int size;
+ es_queue_->PeekAt(*stream_pos, &es, &size);
+
+ // Find a start code and move the stream to the start code parser position.
+ off_t start_code_offset;
+ off_t start_code_size;
+ bool start_code_found = H264Parser::FindStartCode(
+ es, size, &start_code_offset, &start_code_size);
+ *stream_pos += start_code_offset;
+
+ // No H264 start code found or NALU type not available yet.
+ if (!start_code_found || start_code_offset + start_code_size >= size)
+ return false;
+
+ // Exit the parser loop when an AUD is found.
+ // Note: NALU header for an AUD:
+ // - nal_ref_idc must be 0
+ // - nal_unit_type must be H264NALU::kAUD
+ if (es[start_code_offset + start_code_size] == H264NALU::kAUD)
+ break;
+
+ // The current NALU is not an AUD, skip the start code
+ // and continue parsing the stream.
+ *stream_pos += start_code_size;
+ }
+
+ return true;
+}
+
+bool EsParserH264::ParseInternal() {
+ DCHECK_LE(es_queue_->head(), current_access_unit_pos_);
+ DCHECK_LE(current_access_unit_pos_, next_access_unit_pos_);
+ DCHECK_LE(next_access_unit_pos_, es_queue_->tail());
+
+ // Find the next AUD located at or after |current_access_unit_pos_|. This is
+ // needed since initially |current_access_unit_pos_| might not point to
+ // an AUD.
+ // Discard all the data before the updated |current_access_unit_pos_|
+ // since it won't be used again.
+ bool aud_found = FindAUD(&current_access_unit_pos_);
+ es_queue_->Trim(current_access_unit_pos_);
+ if (next_access_unit_pos_ < current_access_unit_pos_)
+ next_access_unit_pos_ = current_access_unit_pos_;
+
+ // Resume parsing later if no AUD was found.
+ if (!aud_found)
+ return true;
+
+ // Find the next AUD to make sure we have a complete access unit.
+ if (next_access_unit_pos_ < current_access_unit_pos_ + kMinAUDSize) {
+ next_access_unit_pos_ = current_access_unit_pos_ + kMinAUDSize;
+ DCHECK_LE(next_access_unit_pos_, es_queue_->tail());
+ }
+ if (!FindAUD(&next_access_unit_pos_))
+ return true;
+
+ // At this point, we know we have a full access unit.
+ bool is_key_frame = false;
+ int pps_id_for_access_unit = -1;
+
+ const uint8* es;
+ int size;
+ es_queue_->PeekAt(current_access_unit_pos_, &es, &size);
+ int access_unit_size = base::checked_cast<int, int64>(
+ next_access_unit_pos_ - current_access_unit_pos_);
+ DCHECK_LE(access_unit_size, size);
+ h264_parser_->SetStream(es, access_unit_size);
+
+ while (true) {
+ bool is_eos = false;
+ H264NALU nalu;
+ switch (h264_parser_->AdvanceToNextNALU(&nalu)) {
+ case H264Parser::kOk:
+ break;
+ case H264Parser::kInvalidStream:
+ case H264Parser::kUnsupportedStream:
+ return false;
+ case H264Parser::kEOStream:
+ is_eos = true;
+ break;
+ }
+ if (is_eos)
+ break;
+
+ switch (nalu.nal_unit_type) {
+ case H264NALU::kAUD: {
+ DVLOG(LOG_LEVEL_ES) << "NALU: AUD";
+ break;
+ }
+ case H264NALU::kSPS: {
+ DVLOG(LOG_LEVEL_ES) << "NALU: SPS";
+ int sps_id;
+ if (h264_parser_->ParseSPS(&sps_id) != H264Parser::kOk)
+ return false;
+ break;
+ }
+ case H264NALU::kPPS: {
+ DVLOG(LOG_LEVEL_ES) << "NALU: PPS";
+ int pps_id;
+ if (h264_parser_->ParsePPS(&pps_id) != H264Parser::kOk)
+ return false;
+ break;
+ }
+ case H264NALU::kIDRSlice:
+ case H264NALU::kNonIDRSlice: {
+ is_key_frame = (nalu.nal_unit_type == H264NALU::kIDRSlice);
+ DVLOG(LOG_LEVEL_ES) << "NALU: slice IDR=" << is_key_frame;
+ H264SliceHeader shdr;
+ if (h264_parser_->ParseSliceHeader(nalu, &shdr) != H264Parser::kOk) {
+ // Only accept an invalid SPS/PPS at the beginning when the stream
+ // does not necessarily start with an SPS/PPS/IDR.
+ // TODO(damienv): Should be able to differentiate a missing SPS/PPS
+ // from a slice header parsing error.
+ if (last_video_decoder_config_.IsValidConfig())
+ return false;
+ } else {
+ pps_id_for_access_unit = shdr.pic_parameter_set_id;
+ }
+ break;
+ }
+ default: {
+ DVLOG(LOG_LEVEL_ES) << "NALU: " << nalu.nal_unit_type;
+ }
+ }
+ }
+
+ // Emit a frame and move the stream to the next AUD position.
+ RCHECK(EmitFrame(current_access_unit_pos_, access_unit_size,
+ is_key_frame, pps_id_for_access_unit));
+ current_access_unit_pos_ = next_access_unit_pos_;
+ es_queue_->Trim(current_access_unit_pos_);
+
+ return true;
+}
+
+bool EsParserH264::EmitFrame(int64 access_unit_pos, int access_unit_size,
+ bool is_key_frame, int pps_id) {
+ // Get the access unit timing info.
+ TimingDesc current_timing_desc = {kNoTimestamp(), kNoTimestamp()};
+ while (!timing_desc_list_.empty() &&
+ timing_desc_list_.front().first <= access_unit_pos) {
+ current_timing_desc = timing_desc_list_.front().second;
+ timing_desc_list_.pop_front();
+ }
+ if (current_timing_desc.pts == kNoTimestamp())
+ return false;
+
+ // Update the video decoder configuration if needed.
+ const H264PPS* pps = h264_parser_->GetPPS(pps_id);
+ if (!pps) {
+ // Only accept an invalid PPS at the beginning when the stream
+ // does not necessarily start with an SPS/PPS/IDR.
+ // In this case, the initial frames are conveyed to the upper layer with
+ // an invalid VideoDecoderConfig and it's up to the upper layer
+ // to process this kind of frame accordingly.
+ if (last_video_decoder_config_.IsValidConfig())
+ return false;
+ } else {
+ const H264SPS* sps = h264_parser_->GetSPS(pps->seq_parameter_set_id);
+ if (!sps)
+ return false;
+ RCHECK(UpdateVideoDecoderConfig(sps));
+ }
+
+ // Emit a frame.
+ DVLOG(LOG_LEVEL_ES) << "Emit frame: stream_pos=" << current_access_unit_pos_
+ << " size=" << access_unit_size;
+ int es_size;
+ const uint8* es;
+ es_queue_->PeekAt(current_access_unit_pos_, &es, &es_size);
+ CHECK_GE(es_size, access_unit_size);
+
+ // TODO(wolenetz/acolwell): Validate and use a common cross-parser TrackId
+ // type and allow multiple video tracks. See https://crbug.com/341581.
+ scoped_refptr<StreamParserBuffer> stream_parser_buffer =
+ StreamParserBuffer::CopyFrom(
+ es,
+ access_unit_size,
+ is_key_frame,
+ DemuxerStream::VIDEO,
+ 0);
+ stream_parser_buffer->SetDecodeTimestamp(current_timing_desc.dts);
+ stream_parser_buffer->set_timestamp(current_timing_desc.pts);
+ emit_buffer_cb_.Run(stream_parser_buffer);
+ return true;
+}
+
+bool EsParserH264::UpdateVideoDecoderConfig(const H264SPS* sps) {
+ // Set the SAR to 1 when not specified in the H264 stream.
+ int sar_width = (sps->sar_width == 0) ? 1 : sps->sar_width;
+ int sar_height = (sps->sar_height == 0) ? 1 : sps->sar_height;
+
+ // TODO(damienv): a MAP unit can be either 16 or 32 pixels.
+ // although it's 16 pixels for progressive non MBAFF frames.
+ gfx::Size coded_size((sps->pic_width_in_mbs_minus1 + 1) * 16,
+ (sps->pic_height_in_map_units_minus1 + 1) * 16);
+ gfx::Rect visible_rect(
+ sps->frame_crop_left_offset,
+ sps->frame_crop_top_offset,
+ (coded_size.width() - sps->frame_crop_right_offset) -
+ sps->frame_crop_left_offset,
+ (coded_size.height() - sps->frame_crop_bottom_offset) -
+ sps->frame_crop_top_offset);
+ if (visible_rect.width() <= 0 || visible_rect.height() <= 0)
+ return false;
+ gfx::Size natural_size(
+ (visible_rect.width() * sar_width) / sar_height,
+ visible_rect.height());
+ if (natural_size.width() == 0)
+ return false;
+
+ VideoDecoderConfig video_decoder_config(
+ kCodecH264,
+ VIDEO_CODEC_PROFILE_UNKNOWN,
+ VideoFrame::YV12,
+ coded_size,
+ visible_rect,
+ natural_size,
+ NULL, 0,
+ false);
+
+ if (!video_decoder_config.Matches(last_video_decoder_config_)) {
+ DVLOG(1) << "Profile IDC: " << sps->profile_idc;
+ DVLOG(1) << "Level IDC: " << sps->level_idc;
+ DVLOG(1) << "Pic width: " << coded_size.width();
+ DVLOG(1) << "Pic height: " << coded_size.height();
+ DVLOG(1) << "log2_max_frame_num_minus4: "
+ << sps->log2_max_frame_num_minus4;
+ DVLOG(1) << "SAR: width=" << sps->sar_width
+ << " height=" << sps->sar_height;
+ last_video_decoder_config_ = video_decoder_config;
+ new_video_config_cb_.Run(video_decoder_config);
+ }
+
+ return true;
+}
+
+} // namespace mp2t
+} // namespace media
+
diff --git a/chromium/media/mp2t/es_parser_h264.h b/chromium/media/formats/mp2t/es_parser_h264.h
index 5cb247e8961..bf4f4cc1d9c 100644
--- a/chromium/media/mp2t/es_parser_h264.h
+++ b/chromium/media/formats/mp2t/es_parser_h264.h
@@ -1,9 +1,9 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP2T_ES_PARSER_H264_H_
-#define MEDIA_MP2T_ES_PARSER_H264_H_
+#ifndef MEDIA_FORMATS_MP2T_ES_PARSER_H264_H_
+#define MEDIA_FORMATS_MP2T_ES_PARSER_H264_H_
#include <list>
#include <utility>
@@ -11,14 +11,16 @@
#include "base/basictypes.h"
#include "base/callback.h"
#include "base/compiler_specific.h"
+#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
-#include "media/base/byte_queue.h"
+#include "media/base/media_export.h"
#include "media/base/video_decoder_config.h"
-#include "media/mp2t/es_parser.h"
+#include "media/formats/mp2t/es_parser.h"
namespace media {
-class BitReader;
-class StreamParserBuffer;
+class H264Parser;
+struct H264SPS;
+class OffsetByteQueue;
}
namespace media {
@@ -29,7 +31,7 @@ namespace mp2t {
// Mpeg2 TS spec: "2.14 Carriage of Rec. ITU-T H.264 | ISO/IEC 14496-10 video"
// "Each AVC access unit shall contain an access unit delimiter NAL Unit;"
//
-class EsParserH264 : public EsParser {
+class MEDIA_EXPORT EsParserH264 : NON_EXPORTED_BASE(public EsParser) {
public:
typedef base::Callback<void(const VideoDecoderConfig&)> NewVideoConfigCB;
@@ -50,41 +52,40 @@ class EsParserH264 : public EsParser {
base::TimeDelta pts;
};
- // H264 parser.
- // It resumes parsing from byte position |es_pos_|.
- bool ParseInternal();
-
- // Emit a frame if a frame has been started earlier.
- void EmitFrameIfNeeded(int next_aud_pos);
+ // Find the AUD located at or after |*stream_pos|.
+ // Return true if an AUD is found.
+ // If found, |*stream_pos| corresponds to the position of the AUD start code
+ // in the stream. Otherwise, |*stream_pos| corresponds to the last position
+ // of the start code parser.
+ bool FindAUD(int64* stream_pos);
- // Start a new frame.
- // Note: if aud_pos < 0, clear the current frame.
- void StartFrame(int aud_pos);
+ // Resumes the H264 ES parsing.
+ // Return true if successful.
+ bool ParseInternal();
- // Discard |nbytes| of ES from the ES byte queue.
- void DiscardEs(int nbytes);
+ // Emit a frame whose position in the ES queue starts at |access_unit_pos|.
+ // Returns true if successful, false if no PTS is available for the frame.
+ bool EmitFrame(int64 access_unit_pos, int access_unit_size,
+ bool is_key_frame, int pps_id);
- // Parse a NAL / SPS.
- // Returns true if successful (compliant bitstream).
- bool NalParser(const uint8* buf, int size);
- bool ProcessSPS(const uint8* buf, int size);
+ // Update the video decoder config based on an H264 SPS.
+ // Return true if successful.
+ bool UpdateVideoDecoderConfig(const H264SPS* sps);
// Callbacks to pass the stream configuration and the frames.
NewVideoConfigCB new_video_config_cb_;
EmitBufferCB emit_buffer_cb_;
// Bytes of the ES stream that have not been emitted yet.
- ByteQueue es_byte_queue_;
- std::list<std::pair<int, TimingDesc> > timing_desc_list_;
+ scoped_ptr<media::OffsetByteQueue> es_queue_;
+ std::list<std::pair<int64, TimingDesc> > timing_desc_list_;
// H264 parser state.
- // Note: |current_access_unit_pos_| is pointing to an annexB syncword
- // while |current_nal_pos_| is pointing to the NAL unit
- // (i.e. does not include the annexB syncword).
- int es_pos_;
- int current_nal_pos_;
- int current_access_unit_pos_;
- bool is_key_frame_;
+ // - |current_access_unit_pos_| is pointing to an annexB syncword
+ // representing the first NALU of an H264 access unit.
+ scoped_ptr<H264Parser> h264_parser_;
+ int64 current_access_unit_pos_;
+ int64 next_access_unit_pos_;
// Last video decoder config.
VideoDecoderConfig last_video_decoder_config_;
diff --git a/chromium/media/formats/mp2t/es_parser_h264_unittest.cc b/chromium/media/formats/mp2t/es_parser_h264_unittest.cc
new file mode 100644
index 00000000000..2c13df0d853
--- /dev/null
+++ b/chromium/media/formats/mp2t/es_parser_h264_unittest.cc
@@ -0,0 +1,300 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/logging.h"
+#include "base/path_service.h"
+#include "base/time/time.h"
+#include "media/base/stream_parser_buffer.h"
+#include "media/base/test_data_util.h"
+#include "media/filters/h264_parser.h"
+#include "media/formats/mp2t/es_parser_h264.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+class VideoDecoderConfig;
+
+namespace mp2t {
+
+namespace {
+
+struct Packet {
+ // Offset in the stream.
+ size_t offset;
+
+ // Size of the packet.
+ size_t size;
+
+ // Timestamp of the packet.
+ base::TimeDelta pts;
+};
+
+// Compute the size of each packet assuming packets are given in stream order
+// and the last packet covers the end of the stream.
+void ComputePacketSize(std::vector<Packet>& packets, size_t stream_size) {
+ for (size_t k = 0; k < packets.size() - 1; k++) {
+ DCHECK_GE(packets[k + 1].offset, packets[k].offset);
+ packets[k].size = packets[k + 1].offset - packets[k].offset;
+ }
+ packets[packets.size() - 1].size =
+ stream_size - packets[packets.size() - 1].offset;
+}
+
+// Get the offset of the start of each access unit.
+// This function assumes there is only one slice per access unit.
+// This is a very simplified access unit segmenter that is good
+// enough for unit tests.
+std::vector<Packet> GetAccessUnits(const uint8* stream, size_t stream_size) {
+ std::vector<Packet> access_units;
+ bool start_access_unit = true;
+
+ // In a first pass, retrieve the offsets of all access units.
+ size_t offset = 0;
+ while (true) {
+ // Find the next start code.
+ off_t relative_offset = 0;
+ off_t start_code_size = 0;
+ bool success = H264Parser::FindStartCode(
+ &stream[offset], stream_size - offset,
+ &relative_offset, &start_code_size);
+ if (!success)
+ break;
+ offset += relative_offset;
+
+ if (start_access_unit) {
+ Packet cur_access_unit;
+ cur_access_unit.offset = offset;
+ access_units.push_back(cur_access_unit);
+ start_access_unit = false;
+ }
+
+ // Get the NALU type.
+ offset += start_code_size;
+ if (offset >= stream_size)
+ break;
+ int nal_unit_type = stream[offset] & 0x1f;
+
+ // We assume there is only one slice per access unit.
+ if (nal_unit_type == H264NALU::kIDRSlice ||
+ nal_unit_type == H264NALU::kNonIDRSlice) {
+ start_access_unit = true;
+ }
+ }
+
+ ComputePacketSize(access_units, stream_size);
+ return access_units;
+}
+
+// Append an AUD NALU at the beginning of each access unit
+// needed for streams which do not already have AUD NALUs.
+void AppendAUD(
+ const uint8* stream, size_t stream_size,
+ const std::vector<Packet>& access_units,
+ std::vector<uint8>& stream_with_aud,
+ std::vector<Packet>& access_units_with_aud) {
+ uint8 aud[] = { 0x00, 0x00, 0x01, 0x09 };
+ stream_with_aud.resize(stream_size + access_units.size() * sizeof(aud));
+ access_units_with_aud.resize(access_units.size());
+
+ size_t offset = 0;
+ for (size_t k = 0; k < access_units.size(); k++) {
+ access_units_with_aud[k].offset = offset;
+ access_units_with_aud[k].size = access_units[k].size + sizeof(aud);
+
+ memcpy(&stream_with_aud[offset], aud, sizeof(aud));
+ offset += sizeof(aud);
+
+ memcpy(&stream_with_aud[offset],
+ &stream[access_units[k].offset], access_units[k].size);
+ offset += access_units[k].size;
+ }
+}
+
+} // namespace
+
+class EsParserH264Test : public testing::Test {
+ public:
+ EsParserH264Test() : buffer_count_(0) {
+ }
+ virtual ~EsParserH264Test() {}
+
+ protected:
+ void LoadStream(const char* filename);
+ void GetPesTimestamps(std::vector<Packet>& pes_packets);
+ void ProcessPesPackets(const std::vector<Packet>& pes_packets,
+ bool force_timing);
+
+ // Stream with AUD NALUs.
+ std::vector<uint8> stream_;
+
+ // Access units of the stream with AUD NALUs.
+ std::vector<Packet> access_units_;
+
+ // Number of buffers generated while parsing the H264 stream.
+ size_t buffer_count_;
+
+ private:
+ void EmitBuffer(scoped_refptr<StreamParserBuffer> buffer);
+
+ void NewVideoConfig(const VideoDecoderConfig& config) {
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(EsParserH264Test);
+};
+
+void EsParserH264Test::LoadStream(const char* filename) {
+ base::FilePath file_path = GetTestDataFilePath(filename);
+
+ base::MemoryMappedFile stream_without_aud;
+ ASSERT_TRUE(stream_without_aud.Initialize(file_path))
+ << "Couldn't open stream file: " << file_path.MaybeAsASCII();
+
+ // The input file does not have AUDs.
+ std::vector<Packet> access_units_without_aud = GetAccessUnits(
+ stream_without_aud.data(), stream_without_aud.length());
+ ASSERT_GT(access_units_without_aud.size(), 0u);
+ AppendAUD(stream_without_aud.data(), stream_without_aud.length(),
+ access_units_without_aud,
+ stream_, access_units_);
+
+ // Generate some timestamps based on a 25fps stream.
+ for (size_t k = 0; k < access_units_.size(); k++)
+ access_units_[k].pts = base::TimeDelta::FromMilliseconds(k * 40u);
+}
+
+void EsParserH264Test::GetPesTimestamps(std::vector<Packet>& pes_packets) {
+ // Default: set to a negative timestamp to be able to differentiate from
+ // real timestamps.
+ // Note: we don't use kNoTimestamp() here since this one has already
+ // a special meaning in EsParserH264. The negative timestamps should be
+ // ultimately discarded by the H264 parser since not relevant.
+ for (size_t k = 0; k < pes_packets.size(); k++) {
+ pes_packets[k].pts = base::TimeDelta::FromMilliseconds(-1);
+ }
+
+ // Set a valid timestamp for PES packets which include the start
+ // of an H264 access unit.
+ size_t pes_idx = 0;
+ for (size_t k = 0; k < access_units_.size(); k++) {
+ for (; pes_idx < pes_packets.size(); pes_idx++) {
+ size_t pes_start = pes_packets[pes_idx].offset;
+ size_t pes_end = pes_packets[pes_idx].offset + pes_packets[pes_idx].size;
+ if (pes_start <= access_units_[k].offset &&
+ pes_end > access_units_[k].offset) {
+ pes_packets[pes_idx].pts = access_units_[k].pts;
+ break;
+ }
+ }
+ }
+}
+
+void EsParserH264Test::ProcessPesPackets(
+ const std::vector<Packet>& pes_packets,
+ bool force_timing) {
+ EsParserH264 es_parser(
+ base::Bind(&EsParserH264Test::NewVideoConfig, base::Unretained(this)),
+ base::Bind(&EsParserH264Test::EmitBuffer, base::Unretained(this)));
+
+ for (size_t k = 0; k < pes_packets.size(); k++) {
+ size_t cur_pes_offset = pes_packets[k].offset;
+ size_t cur_pes_size = pes_packets[k].size;
+
+ base::TimeDelta pts = kNoTimestamp();
+ base::TimeDelta dts = kNoTimestamp();
+ if (pes_packets[k].pts >= base::TimeDelta() || force_timing)
+ pts = pes_packets[k].pts;
+
+ ASSERT_TRUE(
+ es_parser.Parse(&stream_[cur_pes_offset], cur_pes_size, pts, dts));
+ }
+ es_parser.Flush();
+}
+
+void EsParserH264Test::EmitBuffer(scoped_refptr<StreamParserBuffer> buffer) {
+ ASSERT_LT(buffer_count_, access_units_.size());
+ EXPECT_EQ(buffer->timestamp(), access_units_[buffer_count_].pts);
+ buffer_count_++;
+}
+
+TEST_F(EsParserH264Test, OneAccessUnitPerPes) {
+ LoadStream("bear.h264");
+
+ // One to one equivalence between PES packets and access units.
+ std::vector<Packet> pes_packets(access_units_);
+ GetPesTimestamps(pes_packets);
+
+ // Process each PES packet.
+ ProcessPesPackets(pes_packets, false);
+ EXPECT_EQ(buffer_count_, access_units_.size());
+}
+
+TEST_F(EsParserH264Test, NonAlignedPesPacket) {
+ LoadStream("bear.h264");
+
+ // Generate the PES packets.
+ std::vector<Packet> pes_packets;
+ Packet cur_pes_packet;
+ cur_pes_packet.offset = 0;
+ for (size_t k = 0; k < access_units_.size(); k++) {
+ pes_packets.push_back(cur_pes_packet);
+
+ // The current PES packet includes the remaining bytes of the previous
+ // access unit and some bytes of the current access unit
+ // (487 bytes in this unit test but no more than the current access unit
+ // size).
+ cur_pes_packet.offset = access_units_[k].offset +
+ std::min<size_t>(487u, access_units_[k].size);
+ }
+ ComputePacketSize(pes_packets, stream_.size());
+ GetPesTimestamps(pes_packets);
+
+ // Process each PES packet.
+ ProcessPesPackets(pes_packets, false);
+ EXPECT_EQ(buffer_count_, access_units_.size());
+}
+
+TEST_F(EsParserH264Test, SeveralPesPerAccessUnit) {
+ LoadStream("bear.h264");
+
+ // Get the minimum size of an access unit.
+ size_t min_access_unit_size = stream_.size();
+ for (size_t k = 0; k < access_units_.size(); k++) {
+ if (min_access_unit_size >= access_units_[k].size)
+ min_access_unit_size = access_units_[k].size;
+ }
+
+ // Use a small PES packet size or the minimum access unit size
+ // if it is even smaller.
+ size_t pes_size = 512;
+ if (min_access_unit_size < pes_size)
+ pes_size = min_access_unit_size;
+
+ std::vector<Packet> pes_packets;
+ Packet cur_pes_packet;
+ cur_pes_packet.offset = 0;
+ while (cur_pes_packet.offset < stream_.size()) {
+ pes_packets.push_back(cur_pes_packet);
+ cur_pes_packet.offset += pes_size;
+ }
+ ComputePacketSize(pes_packets, stream_.size());
+ GetPesTimestamps(pes_packets);
+
+ // Process each PES packet.
+ ProcessPesPackets(pes_packets, false);
+ EXPECT_EQ(buffer_count_, access_units_.size());
+
+ // Process PES packets forcing timings for each PES packet.
+ buffer_count_ = 0;
+ ProcessPesPackets(pes_packets, true);
+ EXPECT_EQ(buffer_count_, access_units_.size());
+}
+
+} // namespace mp2t
+} // namespace media
+
diff --git a/chromium/media/mp2t/mp2t_common.h b/chromium/media/formats/mp2t/mp2t_common.h
index 7bc8d7b3247..64446fb454e 100644
--- a/chromium/media/mp2t/mp2t_common.h
+++ b/chromium/media/formats/mp2t/mp2t_common.h
@@ -1,9 +1,9 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP2T_MP2T_COMMON_H_
-#define MEDIA_MP2T_MP2T_COMMON_H_
+#ifndef MEDIA_FORMATS_MP2T_MP2T_COMMON_H_
+#define MEDIA_FORMATS_MP2T_MP2T_COMMON_H_
#define LOG_LEVEL_TS 5
#define LOG_LEVEL_PES 4
diff --git a/chromium/media/mp2t/mp2t_stream_parser.cc b/chromium/media/formats/mp2t/mp2t_stream_parser.cc
index 4a22f37d576..48497559d6a 100644
--- a/chromium/media/mp2t/mp2t_stream_parser.cc
+++ b/chromium/media/formats/mp2t/mp2t_stream_parser.cc
@@ -1,10 +1,11 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/mp2t/mp2t_stream_parser.h"
+#include "media/formats/mp2t/mp2t_stream_parser.h"
#include "base/bind.h"
+#include "base/callback_helpers.h"
#include "base/memory/scoped_ptr.h"
#include "base/stl_util.h"
#include "media/base/audio_decoder_config.h"
@@ -12,15 +13,15 @@
#include "media/base/stream_parser_buffer.h"
#include "media/base/text_track_config.h"
#include "media/base/video_decoder_config.h"
-#include "media/mp2t/es_parser.h"
-#include "media/mp2t/es_parser_adts.h"
-#include "media/mp2t/es_parser_h264.h"
-#include "media/mp2t/mp2t_common.h"
-#include "media/mp2t/ts_packet.h"
-#include "media/mp2t/ts_section.h"
-#include "media/mp2t/ts_section_pat.h"
-#include "media/mp2t/ts_section_pes.h"
-#include "media/mp2t/ts_section_pmt.h"
+#include "media/formats/mp2t/es_parser.h"
+#include "media/formats/mp2t/es_parser_adts.h"
+#include "media/formats/mp2t/es_parser_h264.h"
+#include "media/formats/mp2t/mp2t_common.h"
+#include "media/formats/mp2t/ts_packet.h"
+#include "media/formats/mp2t/ts_section.h"
+#include "media/formats/mp2t/ts_section_pat.h"
+#include "media/formats/mp2t/ts_section_pes.h"
+#include "media/formats/mp2t/ts_section_pmt.h"
namespace media {
namespace mp2t {
@@ -168,7 +169,7 @@ void Mp2tStreamParser::Init(
const InitCB& init_cb,
const NewConfigCB& config_cb,
const NewBuffersCB& new_buffers_cb,
- const NewTextBuffersCB& /* text_cb */ ,
+ bool /* ignore_text_tracks */ ,
const NeedKeyCB& need_key_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
@@ -210,6 +211,7 @@ void Mp2tStreamParser::Flush() {
// stream parser already involves the end of the current segment.
segment_started_ = false;
first_video_frame_in_segment_ = true;
+ discarded_frames_dts_.clear();
// Remove any bytes left in the TS buffer.
// (i.e. any partial TS packet => less than 188 bytes).
@@ -484,7 +486,8 @@ bool Mp2tStreamParser::FinishInitializationIfNeeded() {
// For Mpeg2 TS, the duration is not known.
DVLOG(1) << "Mpeg2TS stream parser initialization done";
- init_cb_.Run(true, kInfiniteDuration());
+ base::ResetAndReturn(&init_cb_)
+ .Run(true, InitParameters(kInfiniteDuration()));
is_initialized_ = true;
return true;
@@ -537,23 +540,27 @@ void Mp2tStreamParser::OnEmitVideoBuffer(
stream_parser_buffer->SetDecodeTimestamp(
stream_parser_buffer->GetDecodeTimestamp() - time_offset_);
- // Ignore the incoming buffer if it is not associated with any config.
- if (buffer_queue_chain_.empty()) {
- DVLOG(1) << "Ignoring video buffer with no corresponding video config:"
+ // Discard the incoming buffer:
+ // - if it is not associated with any config,
+ // - or if only non-key frames have been added to a new segment.
+ if (buffer_queue_chain_.empty() ||
+ (first_video_frame_in_segment_ && !stream_parser_buffer->IsKeyframe())) {
+ DVLOG(1) << "Discard video buffer:"
<< " keyframe=" << stream_parser_buffer->IsKeyframe()
<< " dts="
<< stream_parser_buffer->GetDecodeTimestamp().InMilliseconds();
+ if (discarded_frames_dts_.empty() ||
+ discarded_frames_min_pts_ > stream_parser_buffer->timestamp()) {
+ discarded_frames_min_pts_ = stream_parser_buffer->timestamp();
+ }
+ discarded_frames_dts_.push_back(
+ stream_parser_buffer->GetDecodeTimestamp());
return;
}
- // A segment cannot start with a non key frame.
- // Ignore the frame if that's the case.
- if (first_video_frame_in_segment_ && !stream_parser_buffer->IsKeyframe()) {
- DVLOG(1) << "Ignoring non-key frame:"
- << " dts="
- << stream_parser_buffer->GetDecodeTimestamp().InMilliseconds();
- return;
- }
+ // Fill the gap created by frames that have been discarded.
+ if (!discarded_frames_dts_.empty())
+ FillVideoGap(stream_parser_buffer);
first_video_frame_in_segment_ = false;
buffer_queue_chain_.back().video_queue.push_back(stream_parser_buffer);
@@ -575,6 +582,12 @@ bool Mp2tStreamParser::EmitRemainingBuffers() {
VideoDecoderConfig last_video_config =
buffer_queue_chain_.back().video_config;
+ // Do not have all the configs, need more data.
+ if (selected_audio_pid_ >= 0 && !last_audio_config.IsValidConfig())
+ return true;
+ if (selected_video_pid_ >= 0 && !last_video_config.IsValidConfig())
+ return true;
+
// Buffer emission.
while (!buffer_queue_chain_.empty()) {
// Start a segment if needed.
@@ -595,10 +608,12 @@ bool Mp2tStreamParser::EmitRemainingBuffers() {
}
// Add buffers.
+ TextBufferQueueMap empty_text_map;
if (!queue_with_config.audio_queue.empty() ||
!queue_with_config.video_queue.empty()) {
if (!new_buffers_cb_.Run(queue_with_config.audio_queue,
- queue_with_config.video_queue)) {
+ queue_with_config.video_queue,
+ empty_text_map)) {
return false;
}
}
@@ -615,6 +630,33 @@ bool Mp2tStreamParser::EmitRemainingBuffers() {
return true;
}
+void Mp2tStreamParser::FillVideoGap(
+ const scoped_refptr<StreamParserBuffer>& stream_parser_buffer) {
+ DCHECK(!buffer_queue_chain_.empty());
+ DCHECK(!discarded_frames_dts_.empty());
+ DCHECK(stream_parser_buffer->IsKeyframe());
+
+ // PTS is interpolated between the min PTS of discarded frames
+ // and the PTS of the first valid buffer.
+ base::TimeDelta pts = discarded_frames_min_pts_;
+ base::TimeDelta pts_delta =
+ (stream_parser_buffer->timestamp() - pts) / discarded_frames_dts_.size();
+
+ while (!discarded_frames_dts_.empty()) {
+ scoped_refptr<StreamParserBuffer> frame =
+ StreamParserBuffer::CopyFrom(
+ stream_parser_buffer->data(),
+ stream_parser_buffer->data_size(),
+ stream_parser_buffer->IsKeyframe(),
+ stream_parser_buffer->type(),
+ stream_parser_buffer->track_id());
+ frame->SetDecodeTimestamp(discarded_frames_dts_.front());
+ frame->set_timestamp(pts);
+ buffer_queue_chain_.back().video_queue.push_back(frame);
+ pts += pts_delta;
+ discarded_frames_dts_.pop_front();
+ }
+}
+
} // namespace mp2t
} // namespace media
-
diff --git a/chromium/media/mp2t/mp2t_stream_parser.h b/chromium/media/formats/mp2t/mp2t_stream_parser.h
index 11e48d19b1b..61f344067ea 100644
--- a/chromium/media/mp2t/mp2t_stream_parser.h
+++ b/chromium/media/formats/mp2t/mp2t_stream_parser.h
@@ -1,9 +1,9 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP2T_MP2T_STREAM_PARSER_H_
-#define MEDIA_MP2T_MP2T_STREAM_PARSER_H_
+#ifndef MEDIA_FORMATS_MP2T_MP2T_STREAM_PARSER_H_
+#define MEDIA_FORMATS_MP2T_MP2T_STREAM_PARSER_H_
#include <list>
#include <map>
@@ -33,7 +33,7 @@ class MEDIA_EXPORT Mp2tStreamParser : public StreamParser {
virtual void Init(const InitCB& init_cb,
const NewConfigCB& config_cb,
const NewBuffersCB& new_buffers_cb,
- const NewTextBuffersCB& text_cb,
+ bool ignore_text_tracks,
const NeedKeyCB& need_key_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
@@ -92,6 +92,12 @@ class MEDIA_EXPORT Mp2tStreamParser : public StreamParser {
scoped_refptr<StreamParserBuffer> stream_parser_buffer);
bool EmitRemainingBuffers();
+ // At the beginning of a new segment, some video frames might be discarded.
+ // This function fills the hole by duplicating the first valid key frame
+ // given by |stream_parser_buffer|.
+ void FillVideoGap(
+ const scoped_refptr<StreamParserBuffer>& stream_parser_buffer);
+
// List of callbacks.
InitCB init_cb_;
NewConfigCB config_cb_;
@@ -115,6 +121,11 @@ class MEDIA_EXPORT Mp2tStreamParser : public StreamParser {
int selected_audio_pid_;
int selected_video_pid_;
+ // DTS of discarded buffers.
+ // Min PTS of discarded buffers.
+ std::list<base::TimeDelta> discarded_frames_dts_;
+ base::TimeDelta discarded_frames_min_pts_;
+
// Pending audio & video buffers.
std::list<BufferQueueWithConfig> buffer_queue_chain_;
diff --git a/chromium/media/mp2t/mp2t_stream_parser_unittest.cc b/chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc
index fab0a008102..1f329862859 100644
--- a/chromium/media/mp2t/mp2t_stream_parser_unittest.cc
+++ b/chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -16,7 +16,7 @@
#include "media/base/test_data_util.h"
#include "media/base/text_track_config.h"
#include "media/base/video_decoder_config.h"
-#include "media/mp2t/mp2t_stream_parser.h"
+#include "media/formats/mp2t/mp2t_stream_parser.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
@@ -57,9 +57,11 @@ class Mp2tStreamParserTest : public testing::Test {
return true;
}
- void OnInit(bool init_ok, base::TimeDelta duration) {
+ void OnInit(bool init_ok,
+ const StreamParser::InitParameters& params) {
DVLOG(1) << "OnInit: ok=" << init_ok
- << ", dur=" << duration.InMilliseconds();
+ << ", dur=" << params.duration.InMilliseconds()
+ << ", autoTimestampOffset=" << params.auto_update_timestamp_offset;
}
bool OnNewConfig(const AudioDecoderConfig& ac,
@@ -67,6 +69,9 @@ class Mp2tStreamParserTest : public testing::Test {
const StreamParser::TextTrackConfigMap& tc) {
DVLOG(1) << "OnNewConfig: audio=" << ac.IsValidConfig()
<< ", video=" << vc.IsValidConfig();
+ // Test streams have both audio and video, verify the configs are valid.
+ EXPECT_TRUE(ac.IsValidConfig());
+ EXPECT_TRUE(vc.IsValidConfig());
return true;
}
@@ -83,12 +88,18 @@ class Mp2tStreamParserTest : public testing::Test {
}
bool OnNewBuffers(const StreamParser::BufferQueue& audio_buffers,
- const StreamParser::BufferQueue& video_buffers) {
+ const StreamParser::BufferQueue& video_buffers,
+ const StreamParser::TextBufferQueueMap& text_map) {
DumpBuffers("audio_buffers", audio_buffers);
DumpBuffers("video_buffers", video_buffers);
audio_frame_count_ += audio_buffers.size();
video_frame_count_ += video_buffers.size();
+ // TODO(wolenetz/acolwell): Add text track support to more MSE parsers. See
+ // http://crbug.com/336926.
+ if (!text_map.empty())
+ return false;
+
if (video_min_dts_ == kNoTimestamp() && !video_buffers.empty())
video_min_dts_ = video_buffers.front()->GetDecodeTimestamp();
if (!video_buffers.empty()) {
@@ -126,7 +137,7 @@ class Mp2tStreamParserTest : public testing::Test {
base::Unretained(this)),
base::Bind(&Mp2tStreamParserTest::OnNewBuffers,
base::Unretained(this)),
- StreamParser::NewTextBuffersCB(),
+ true,
base::Bind(&Mp2tStreamParserTest::OnKeyNeeded,
base::Unretained(this)),
base::Bind(&Mp2tStreamParserTest::OnNewSegment,
@@ -137,8 +148,6 @@ class Mp2tStreamParserTest : public testing::Test {
}
bool ParseMpeg2TsFile(const std::string& filename, int append_bytes) {
- InitializeParser();
-
scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
EXPECT_TRUE(AppendDataInPieces(buffer->data(),
buffer->data_size(),
@@ -149,6 +158,7 @@ class Mp2tStreamParserTest : public testing::Test {
TEST_F(Mp2tStreamParserTest, UnalignedAppend17) {
// Test small, non-segment-aligned appends.
+ InitializeParser();
ParseMpeg2TsFile("bear-1280x720.ts", 17);
EXPECT_EQ(video_frame_count_, 81);
parser_->Flush();
@@ -157,17 +167,28 @@ TEST_F(Mp2tStreamParserTest, UnalignedAppend17) {
TEST_F(Mp2tStreamParserTest, UnalignedAppend512) {
// Test small, non-segment-aligned appends.
+ InitializeParser();
ParseMpeg2TsFile("bear-1280x720.ts", 512);
EXPECT_EQ(video_frame_count_, 81);
parser_->Flush();
EXPECT_EQ(video_frame_count_, 82);
}
+TEST_F(Mp2tStreamParserTest, AppendAfterFlush512) {
+ InitializeParser();
+ ParseMpeg2TsFile("bear-1280x720.ts", 512);
+ parser_->Flush();
+
+ ParseMpeg2TsFile("bear-1280x720.ts", 512);
+ parser_->Flush();
+}
+
TEST_F(Mp2tStreamParserTest, TimestampWrapAround) {
// "bear-1280x720_ptswraparound.ts" has been transcoded
// from bear-1280x720.mp4 by applying a time offset of 95442s
// (close to 2^33 / 90000) which results in timestamps wrap around
// in the Mpeg2 TS stream.
+ InitializeParser();
ParseMpeg2TsFile("bear-1280x720_ptswraparound.ts", 512);
EXPECT_EQ(video_frame_count_, 81);
EXPECT_GE(video_min_dts_, base::TimeDelta::FromSeconds(95443 - 10));
diff --git a/chromium/media/mp2t/ts_packet.cc b/chromium/media/formats/mp2t/ts_packet.cc
index 6b41e907501..8463c11e33a 100644
--- a/chromium/media/mp2t/ts_packet.cc
+++ b/chromium/media/formats/mp2t/ts_packet.cc
@@ -1,12 +1,12 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/mp2t/ts_packet.h"
+#include "media/formats/mp2t/ts_packet.h"
#include "base/memory/scoped_ptr.h"
#include "media/base/bit_reader.h"
-#include "media/mp2t/mp2t_common.h"
+#include "media/formats/mp2t/mp2t_common.h"
namespace media {
namespace mp2t {
diff --git a/chromium/media/mp2t/ts_packet.h b/chromium/media/formats/mp2t/ts_packet.h
index f3537bc8fe2..a232705fbd5 100644
--- a/chromium/media/mp2t/ts_packet.h
+++ b/chromium/media/formats/mp2t/ts_packet.h
@@ -1,9 +1,9 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP2T_TS_PACKET_H_
-#define MEDIA_MP2T_TS_PACKET_H_
+#ifndef MEDIA_FORMATS_MP2T_TS_PACKET_H_
+#define MEDIA_FORMATS_MP2T_TS_PACKET_H_
#include "base/basictypes.h"
diff --git a/chromium/media/mp2t/ts_section.h b/chromium/media/formats/mp2t/ts_section.h
index 1b7453f837d..9273733d357 100644
--- a/chromium/media/mp2t/ts_section.h
+++ b/chromium/media/formats/mp2t/ts_section.h
@@ -1,9 +1,9 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP2T_TS_SECTION_H_
-#define MEDIA_MP2T_TS_SECTION_H_
+#ifndef MEDIA_FORMATS_MP2T_TS_SECTION_H_
+#define MEDIA_FORMATS_MP2T_TS_SECTION_H_
namespace media {
namespace mp2t {
diff --git a/chromium/media/mp2t/ts_section_pat.cc b/chromium/media/formats/mp2t/ts_section_pat.cc
index ef5a21c6f0b..2fcc24bb712 100644
--- a/chromium/media/mp2t/ts_section_pat.cc
+++ b/chromium/media/formats/mp2t/ts_section_pat.cc
@@ -1,14 +1,14 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/mp2t/ts_section_pat.h"
+#include "media/formats/mp2t/ts_section_pat.h"
#include <vector>
#include "base/logging.h"
#include "media/base/bit_reader.h"
-#include "media/mp2t/mp2t_common.h"
+#include "media/formats/mp2t/mp2t_common.h"
namespace media {
namespace mp2t {
diff --git a/chromium/media/mp2t/ts_section_pat.h b/chromium/media/formats/mp2t/ts_section_pat.h
index 84f33de7e48..f8079adc333 100644
--- a/chromium/media/mp2t/ts_section_pat.h
+++ b/chromium/media/formats/mp2t/ts_section_pat.h
@@ -1,13 +1,13 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP2T_TS_SECTION_PAT_H_
-#define MEDIA_MP2T_TS_SECTION_PAT_H_
+#ifndef MEDIA_FORMATS_MP2T_TS_SECTION_PAT_H_
+#define MEDIA_FORMATS_MP2T_TS_SECTION_PAT_H_
#include "base/callback.h"
#include "base/compiler_specific.h"
-#include "media/mp2t/ts_section_psi.h"
+#include "media/formats/mp2t/ts_section_psi.h"
namespace media {
namespace mp2t {
diff --git a/chromium/media/mp2t/ts_section_pes.cc b/chromium/media/formats/mp2t/ts_section_pes.cc
index ff0beaaf2de..de69a32e63b 100644
--- a/chromium/media/mp2t/ts_section_pes.cc
+++ b/chromium/media/formats/mp2t/ts_section_pes.cc
@@ -1,15 +1,15 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/mp2t/ts_section_pes.h"
+#include "media/formats/mp2t/ts_section_pes.h"
#include "base/logging.h"
#include "base/strings/string_number_conversions.h"
#include "media/base/bit_reader.h"
#include "media/base/buffers.h"
-#include "media/mp2t/es_parser.h"
-#include "media/mp2t/mp2t_common.h"
+#include "media/formats/mp2t/es_parser.h"
+#include "media/formats/mp2t/mp2t_common.h"
static const int kPesStartCode = 0x000001;
diff --git a/chromium/media/mp2t/ts_section_pes.h b/chromium/media/formats/mp2t/ts_section_pes.h
index b80473a58a5..b442ae491f3 100644
--- a/chromium/media/mp2t/ts_section_pes.h
+++ b/chromium/media/formats/mp2t/ts_section_pes.h
@@ -1,15 +1,15 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP2T_TS_SECTION_PES_H_
-#define MEDIA_MP2T_TS_SECTION_PES_H_
+#ifndef MEDIA_FORMATS_MP2T_TS_SECTION_PES_H_
+#define MEDIA_FORMATS_MP2T_TS_SECTION_PES_H_
#include "base/basictypes.h"
#include "base/compiler_specific.h"
#include "base/memory/scoped_ptr.h"
#include "media/base/byte_queue.h"
-#include "media/mp2t/ts_section.h"
+#include "media/formats/mp2t/ts_section.h"
namespace media {
namespace mp2t {
diff --git a/chromium/media/mp2t/ts_section_pmt.cc b/chromium/media/formats/mp2t/ts_section_pmt.cc
index f20e79f9863..72b492aaa43 100644
--- a/chromium/media/mp2t/ts_section_pmt.cc
+++ b/chromium/media/formats/mp2t/ts_section_pmt.cc
@@ -1,14 +1,14 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/mp2t/ts_section_pmt.h"
+#include "media/formats/mp2t/ts_section_pmt.h"
#include <map>
#include "base/logging.h"
#include "media/base/bit_reader.h"
-#include "media/mp2t/mp2t_common.h"
+#include "media/formats/mp2t/mp2t_common.h"
namespace media {
namespace mp2t {
diff --git a/chromium/media/mp2t/ts_section_pmt.h b/chromium/media/formats/mp2t/ts_section_pmt.h
index ece4d1670bd..c1b3d467cc3 100644
--- a/chromium/media/mp2t/ts_section_pmt.h
+++ b/chromium/media/formats/mp2t/ts_section_pmt.h
@@ -1,13 +1,13 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP2T_TS_SECTION_PMT_H_
-#define MEDIA_MP2T_TS_SECTION_PMT_H_
+#ifndef MEDIA_FORMATS_MP2T_TS_SECTION_PMT_H_
+#define MEDIA_FORMATS_MP2T_TS_SECTION_PMT_H_
#include "base/callback.h"
#include "base/compiler_specific.h"
-#include "media/mp2t/ts_section_psi.h"
+#include "media/formats/mp2t/ts_section_psi.h"
namespace media {
namespace mp2t {
diff --git a/chromium/media/mp2t/ts_section_psi.cc b/chromium/media/formats/mp2t/ts_section_psi.cc
index f8a6fc310ce..f9db8805376 100644
--- a/chromium/media/mp2t/ts_section_psi.cc
+++ b/chromium/media/formats/mp2t/ts_section_psi.cc
@@ -1,13 +1,13 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/mp2t/ts_section_psi.h"
+#include "media/formats/mp2t/ts_section_psi.h"
#include "base/basictypes.h"
#include "base/logging.h"
#include "media/base/bit_reader.h"
-#include "media/mp2t/mp2t_common.h"
+#include "media/formats/mp2t/mp2t_common.h"
static bool IsCrcValid(const uint8* buf, int size) {
uint32 crc = 0xffffffffu;
diff --git a/chromium/media/mp2t/ts_section_psi.h b/chromium/media/formats/mp2t/ts_section_psi.h
index a63144633cc..1b818848545 100644
--- a/chromium/media/mp2t/ts_section_psi.h
+++ b/chromium/media/formats/mp2t/ts_section_psi.h
@@ -1,13 +1,13 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP2T_TS_SECTION_PSI_H_
-#define MEDIA_MP2T_TS_SECTION_PSI_H_
+#ifndef MEDIA_FORMATS_MP2T_TS_SECTION_PSI_H_
+#define MEDIA_FORMATS_MP2T_TS_SECTION_PSI_H_
#include "base/compiler_specific.h"
#include "media/base/byte_queue.h"
-#include "media/mp2t/ts_section.h"
+#include "media/formats/mp2t/ts_section.h"
namespace media {
diff --git a/chromium/media/mp4/aac.cc b/chromium/media/formats/mp4/aac.cc
index 6604c505a0e..71dededf552 100644
--- a/chromium/media/mp4/aac.cc
+++ b/chromium/media/formats/mp4/aac.cc
@@ -1,47 +1,18 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/mp4/aac.h"
+#include "media/formats/mp4/aac.h"
#include <algorithm>
#include "base/logging.h"
#include "media/base/bit_reader.h"
-#include "media/mp4/rcheck.h"
-
-// The following conversion table is extracted from ISO 14496 Part 3 -
-// Table 1.16 - Sampling Frequency Index.
-static const int kFrequencyMap[] = {
- 96000, 88200, 64000, 48000, 44100, 32000, 24000,
- 22050, 16000, 12000, 11025, 8000, 7350
-};
+#include "media/base/media_log.h"
+#include "media/formats/mp4/rcheck.h"
+#include "media/formats/mpeg/adts_constants.h"
namespace media {
-
-static ChannelLayout ConvertChannelConfigToLayout(uint8 channel_config) {
- switch (channel_config) {
- case 1:
- return CHANNEL_LAYOUT_MONO;
- case 2:
- return CHANNEL_LAYOUT_STEREO;
- case 3:
- return CHANNEL_LAYOUT_SURROUND;
- case 4:
- return CHANNEL_LAYOUT_4_0;
- case 5:
- return CHANNEL_LAYOUT_5_0;
- case 6:
- return CHANNEL_LAYOUT_5_1;
- case 8:
- return CHANNEL_LAYOUT_7_1;
- default:
- break;
- }
-
- return CHANNEL_LAYOUT_UNSUPPORTED;
-}
-
namespace mp4 {
AAC::AAC()
@@ -52,7 +23,7 @@ AAC::AAC()
AAC::~AAC() {
}
-bool AAC::Parse(const std::vector<uint8>& data) {
+bool AAC::Parse(const std::vector<uint8>& data, const LogCB& log_cb) {
#if defined(OS_ANDROID)
codec_specific_data_ = data;
#endif
@@ -87,6 +58,9 @@ bool AAC::Parse(const std::vector<uint8>& data) {
RCHECK(reader.ReadBits(5, &profile_));
}
+ MEDIA_LOG(log_cb) << "Audio codec: mp4a.40."
+ << std::hex << static_cast<int>(profile_);
+
RCHECK(SkipDecoderGASpecificConfig(&reader));
RCHECK(SkipErrorSpecificConfig());
@@ -122,24 +96,25 @@ bool AAC::Parse(const std::vector<uint8>& data) {
}
if (frequency_ == 0) {
- RCHECK(frequency_index_ < arraysize(kFrequencyMap));
- frequency_ = kFrequencyMap[frequency_index_];
+ RCHECK(frequency_index_ < kADTSFrequencyTableSize);
+ frequency_ = kADTSFrequencyTable[frequency_index_];
}
if (extension_frequency_ == 0 && extension_frequency_index != 0xff) {
- RCHECK(extension_frequency_index < arraysize(kFrequencyMap));
- extension_frequency_ = kFrequencyMap[extension_frequency_index];
+ RCHECK(extension_frequency_index < kADTSFrequencyTableSize);
+ extension_frequency_ = kADTSFrequencyTable[extension_frequency_index];
}
// When Parametric Stereo is on, mono will be played as stereo.
- if (ps_present && channel_config_ == 1)
+ if (ps_present && channel_config_ == 1) {
channel_layout_ = CHANNEL_LAYOUT_STEREO;
- else
- channel_layout_ = ConvertChannelConfigToLayout(channel_config_);
+ } else {
+ RCHECK(channel_config_ < kADTSChannelLayoutTableSize);
+ channel_layout_ = kADTSChannelLayoutTable[channel_config_];
+ }
- return frequency_ != 0 && channel_layout_ != CHANNEL_LAYOUT_UNSUPPORTED &&
- profile_ >= 1 && profile_ <= 4 && frequency_index_ != 0xf &&
- channel_config_ <= 7;
+ return frequency_ != 0 && channel_layout_ != CHANNEL_LAYOUT_NONE &&
+ profile_ >= 1 && profile_ <= 4;
}
int AAC::GetOutputSamplesPerSecond(bool sbr_in_mimetype) const {
@@ -168,7 +143,7 @@ ChannelLayout AAC::GetChannelLayout(bool sbr_in_mimetype) const {
}
bool AAC::ConvertEsdsToADTS(std::vector<uint8>* buffer) const {
- size_t size = buffer->size() + kADTSHeaderSize;
+ size_t size = buffer->size() + kADTSHeaderMinSize;
DCHECK(profile_ >= 1 && profile_ <= 4 && frequency_index_ != 0xf &&
channel_config_ <= 7);
@@ -179,7 +154,7 @@ bool AAC::ConvertEsdsToADTS(std::vector<uint8>* buffer) const {
std::vector<uint8>& adts = *buffer;
- adts.insert(buffer->begin(), kADTSHeaderSize, 0);
+ adts.insert(buffer->begin(), kADTSHeaderMinSize, 0);
adts[0] = 0xff;
adts[1] = 0xf1;
adts[2] = ((profile_ - 1) << 6) + (frequency_index_ << 2) +
diff --git a/chromium/media/mp4/aac.h b/chromium/media/formats/mp4/aac.h
index 1a546b743f7..67f981e2598 100644
--- a/chromium/media/mp4/aac.h
+++ b/chromium/media/formats/mp4/aac.h
@@ -1,15 +1,16 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP4_AAC_H_
-#define MEDIA_MP4_AAC_H_
+#ifndef MEDIA_FORMATS_MP4_AAC_H_
+#define MEDIA_FORMATS_MP4_AAC_H_
#include <vector>
#include "base/basictypes.h"
#include "media/base/channel_layout.h"
#include "media/base/media_export.h"
+#include "media/base/media_log.h"
namespace media {
@@ -30,7 +31,7 @@ class MEDIA_EXPORT AAC {
// The function will parse the data and get the ElementaryStreamDescriptor,
// then it will parse the ElementaryStreamDescriptor to get audio stream
// configurations.
- bool Parse(const std::vector<uint8>& data);
+ bool Parse(const std::vector<uint8>& data, const LogCB& log_cb);
// Gets the output sample rate for the AAC stream.
// |sbr_in_mimetype| should be set to true if the SBR mode is
@@ -59,9 +60,6 @@ class MEDIA_EXPORT AAC {
}
#endif
- // Size in bytes of the ADTS header added by ConvertEsdsToADTS().
- static const size_t kADTSHeaderSize = 7;
-
private:
bool SkipDecoderGASpecificConfig(BitReader* bit_reader) const;
bool SkipErrorSpecificConfig() const;
@@ -91,4 +89,4 @@ class MEDIA_EXPORT AAC {
} // namespace media
-#endif // MEDIA_MP4_AAC_H_
+#endif // MEDIA_FORMATS_MP4_AAC_H_
diff --git a/chromium/media/formats/mp4/aac_unittest.cc b/chromium/media/formats/mp4/aac_unittest.cc
new file mode 100644
index 00000000000..9d65c31cea5
--- /dev/null
+++ b/chromium/media/formats/mp4/aac_unittest.cc
@@ -0,0 +1,146 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/mp4/aac.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+namespace mp4 {
+
+class AACTest : public testing::Test {
+ public:
+ bool Parse(const std::vector<uint8>& data) {
+ return aac_.Parse(data, LogCB());
+ }
+
+ AAC aac_;
+};
+
+TEST_F(AACTest, BasicProfileTest) {
+ uint8 buffer[] = {0x12, 0x10};
+ std::vector<uint8> data;
+
+ data.assign(buffer, buffer + sizeof(buffer));
+
+ EXPECT_TRUE(Parse(data));
+ EXPECT_EQ(aac_.GetOutputSamplesPerSecond(false), 44100);
+ EXPECT_EQ(aac_.GetChannelLayout(false), CHANNEL_LAYOUT_STEREO);
+}
+
+TEST_F(AACTest, ExtensionTest) {
+ uint8 buffer[] = {0x13, 0x08, 0x56, 0xe5, 0x9d, 0x48, 0x80};
+ std::vector<uint8> data;
+
+ data.assign(buffer, buffer + sizeof(buffer));
+
+ EXPECT_TRUE(Parse(data));
+ EXPECT_EQ(aac_.GetOutputSamplesPerSecond(false), 48000);
+ EXPECT_EQ(aac_.GetOutputSamplesPerSecond(true), 48000);
+ EXPECT_EQ(aac_.GetChannelLayout(false), CHANNEL_LAYOUT_STEREO);
+}
+
+// Test implicit SBR with mono channel config.
+// Mono channel layout should only be reported if SBR is not
+// specified. Otherwise stereo should be reported.
+// See ISO-14496-3 Section 1.6.6.1.2 for details about this special casing.
+TEST_F(AACTest, ImplicitSBR_ChannelConfig0) {
+ uint8 buffer[] = {0x13, 0x08};
+ std::vector<uint8> data;
+
+ data.assign(buffer, buffer + sizeof(buffer));
+
+ EXPECT_TRUE(Parse(data));
+
+ // Test w/o implict SBR.
+ EXPECT_EQ(aac_.GetOutputSamplesPerSecond(false), 24000);
+ EXPECT_EQ(aac_.GetChannelLayout(false), CHANNEL_LAYOUT_MONO);
+
+ // Test implicit SBR.
+ EXPECT_EQ(aac_.GetOutputSamplesPerSecond(true), 48000);
+ EXPECT_EQ(aac_.GetChannelLayout(true), CHANNEL_LAYOUT_STEREO);
+}
+
+// Tests implicit SBR with a stereo channel config.
+TEST_F(AACTest, ImplicitSBR_ChannelConfig1) {
+ uint8 buffer[] = {0x13, 0x10};
+ std::vector<uint8> data;
+
+ data.assign(buffer, buffer + sizeof(buffer));
+
+ EXPECT_TRUE(Parse(data));
+
+ // Test w/o implict SBR.
+ EXPECT_EQ(aac_.GetOutputSamplesPerSecond(false), 24000);
+ EXPECT_EQ(aac_.GetChannelLayout(false), CHANNEL_LAYOUT_STEREO);
+
+ // Test implicit SBR.
+ EXPECT_EQ(aac_.GetOutputSamplesPerSecond(true), 48000);
+ EXPECT_EQ(aac_.GetChannelLayout(true), CHANNEL_LAYOUT_STEREO);
+}
+
+TEST_F(AACTest, SixChannelTest) {
+ uint8 buffer[] = {0x11, 0xb0};
+ std::vector<uint8> data;
+
+ data.assign(buffer, buffer + sizeof(buffer));
+
+ EXPECT_TRUE(Parse(data));
+ EXPECT_EQ(aac_.GetOutputSamplesPerSecond(false), 48000);
+ EXPECT_EQ(aac_.GetChannelLayout(false), CHANNEL_LAYOUT_5_1_BACK);
+}
+
+TEST_F(AACTest, DataTooShortTest) {
+ std::vector<uint8> data;
+
+ EXPECT_FALSE(Parse(data));
+
+ data.push_back(0x12);
+ EXPECT_FALSE(Parse(data));
+}
+
+TEST_F(AACTest, IncorrectProfileTest) {
+ uint8 buffer[] = {0x0, 0x08};
+ std::vector<uint8> data;
+
+ data.assign(buffer, buffer + sizeof(buffer));
+
+ EXPECT_FALSE(Parse(data));
+
+ data[0] = 0x08;
+ EXPECT_TRUE(Parse(data));
+
+ data[0] = 0x28;
+ EXPECT_FALSE(Parse(data));
+}
+
+TEST_F(AACTest, IncorrectFrequencyTest) {
+ uint8 buffer[] = {0x0f, 0x88};
+ std::vector<uint8> data;
+
+ data.assign(buffer, buffer + sizeof(buffer));
+
+ EXPECT_FALSE(Parse(data));
+
+ data[0] = 0x0e;
+ data[1] = 0x08;
+ EXPECT_TRUE(Parse(data));
+}
+
+TEST_F(AACTest, IncorrectChannelTest) {
+ uint8 buffer[] = {0x0e, 0x00};
+ std::vector<uint8> data;
+
+ data.assign(buffer, buffer + sizeof(buffer));
+
+ EXPECT_FALSE(Parse(data));
+
+ data[1] = 0x08;
+ EXPECT_TRUE(Parse(data));
+}
+
+} // namespace mp4
+
+} // namespace media
diff --git a/chromium/media/formats/mp4/avc.cc b/chromium/media/formats/mp4/avc.cc
new file mode 100644
index 00000000000..6c2bc2a6fca
--- /dev/null
+++ b/chromium/media/formats/mp4/avc.cc
@@ -0,0 +1,310 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/mp4/avc.h"
+
+#include <algorithm>
+#include <vector>
+
+#include "base/logging.h"
+#include "media/base/decrypt_config.h"
+#include "media/filters/h264_parser.h"
+#include "media/formats/mp4/box_definitions.h"
+#include "media/formats/mp4/box_reader.h"
+
+namespace media {
+namespace mp4 {
+
+static const uint8 kAnnexBStartCode[] = {0, 0, 0, 1};
+static const int kAnnexBStartCodeSize = 4;
+
+static bool ConvertAVCToAnnexBInPlaceForLengthSize4(std::vector<uint8>* buf) {
+ const int kLengthSize = 4;
+ size_t pos = 0;
+ while (pos + kLengthSize < buf->size()) {
+ uint32 nal_size = (*buf)[pos];
+ nal_size = (nal_size << 8) + (*buf)[pos+1];
+ nal_size = (nal_size << 8) + (*buf)[pos+2];
+ nal_size = (nal_size << 8) + (*buf)[pos+3];
+
+ if (nal_size == 0) {
+ DVLOG(1) << "nal_size is 0";
+ return false;
+ }
+
+ std::copy(kAnnexBStartCode, kAnnexBStartCode + kAnnexBStartCodeSize,
+ buf->begin() + pos);
+ pos += kLengthSize + nal_size;
+ }
+ return pos == buf->size();
+}
+
+// static
+bool AVC::ConvertFrameToAnnexB(int length_size, std::vector<uint8>* buffer) {
+ RCHECK(length_size == 1 || length_size == 2 || length_size == 4);
+
+ if (length_size == 4)
+ return ConvertAVCToAnnexBInPlaceForLengthSize4(buffer);
+
+ std::vector<uint8> temp;
+ temp.swap(*buffer);
+ buffer->reserve(temp.size() + 32);
+
+ size_t pos = 0;
+ while (pos + length_size < temp.size()) {
+ int nal_size = temp[pos];
+ if (length_size == 2) nal_size = (nal_size << 8) + temp[pos+1];
+ pos += length_size;
+
+ if (nal_size == 0) {
+ DVLOG(1) << "nal_size is 0";
+ return false;
+ }
+
+ RCHECK(pos + nal_size <= temp.size());
+ buffer->insert(buffer->end(), kAnnexBStartCode,
+ kAnnexBStartCode + kAnnexBStartCodeSize);
+ buffer->insert(buffer->end(), temp.begin() + pos,
+ temp.begin() + pos + nal_size);
+ pos += nal_size;
+ }
+ return pos == temp.size();
+}
+
+// static
+bool AVC::InsertParamSetsAnnexB(const AVCDecoderConfigurationRecord& avc_config,
+ std::vector<uint8>* buffer,
+ std::vector<SubsampleEntry>* subsamples) {
+ DCHECK(AVC::IsValidAnnexB(*buffer));
+
+ scoped_ptr<H264Parser> parser(new H264Parser());
+ const uint8* start = &(*buffer)[0];
+ parser->SetStream(start, buffer->size());
+
+ H264NALU nalu;
+ if (parser->AdvanceToNextNALU(&nalu) != H264Parser::kOk)
+ return false;
+
+ std::vector<uint8>::iterator config_insert_point = buffer->begin();
+ std::vector<SubsampleEntry>::iterator subsamples_insert_point =
+ subsamples->begin();
+
+ if (nalu.nal_unit_type == H264NALU::kAUD) {
+ // Move insert point to just after the AUD.
+ config_insert_point += (nalu.data + nalu.size) - start;
+
+ if (!subsamples->empty()) {
+ int64 first_subsample_size =
+ (*subsamples)[0].clear_bytes + (*subsamples)[0].cypher_bytes;
+
+ if (first_subsample_size != (config_insert_point - buffer->begin()))
+ return false;
+
+ subsamples_insert_point++;
+ }
+
+ }
+
+ // Clear |parser| and |start| since they aren't needed anymore and
+ // will hold stale pointers once the insert happens.
+ parser.reset();
+ start = NULL;
+
+ std::vector<uint8> param_sets;
+ std::vector<SubsampleEntry> config_subsamples;
+ RCHECK(AVC::ConvertConfigToAnnexB(avc_config,
+ &param_sets,
+ &config_subsamples));
+
+ if (!subsamples->empty()) {
+ subsamples->insert(subsamples_insert_point,
+ config_subsamples.begin(),
+ config_subsamples.end());
+ }
+
+ buffer->insert(config_insert_point,
+ param_sets.begin(), param_sets.end());
+
+ DCHECK(AVC::IsValidAnnexB(*buffer));
+ return true;
+}
+
+// static
+bool AVC::ConvertConfigToAnnexB(
+ const AVCDecoderConfigurationRecord& avc_config,
+ std::vector<uint8>* buffer,
+ std::vector<SubsampleEntry>* subsamples) {
+ DCHECK(buffer->empty());
+ buffer->clear();
+ int total_size = 0;
+ for (size_t i = 0; i < avc_config.sps_list.size(); i++)
+ total_size += avc_config.sps_list[i].size() + kAnnexBStartCodeSize;
+ for (size_t i = 0; i < avc_config.pps_list.size(); i++)
+ total_size += avc_config.pps_list[i].size() + kAnnexBStartCodeSize;
+ buffer->reserve(total_size);
+
+ for (size_t i = 0; i < avc_config.sps_list.size(); i++) {
+ buffer->insert(buffer->end(), kAnnexBStartCode,
+ kAnnexBStartCode + kAnnexBStartCodeSize);
+ buffer->insert(buffer->end(), avc_config.sps_list[i].begin(),
+ avc_config.sps_list[i].end());
+
+ SubsampleEntry entry;
+ entry.clear_bytes = kAnnexBStartCodeSize + avc_config.sps_list[i].size();
+ entry.cypher_bytes = 0;
+ subsamples->push_back(entry);
+ }
+
+ for (size_t i = 0; i < avc_config.pps_list.size(); i++) {
+ buffer->insert(buffer->end(), kAnnexBStartCode,
+ kAnnexBStartCode + kAnnexBStartCodeSize);
+ buffer->insert(buffer->end(), avc_config.pps_list[i].begin(),
+ avc_config.pps_list[i].end());
+
+ SubsampleEntry entry;
+ entry.clear_bytes = kAnnexBStartCodeSize + avc_config.pps_list[i].size();
+ entry.cypher_bytes = 0;
+ subsamples->push_back(entry);
+ }
+ return true;
+}
+
+// Verifies AnnexB NALU order according to ISO/IEC 14496-10 Section 7.4.1.2.3
+bool AVC::IsValidAnnexB(const std::vector<uint8>& buffer) {
+ return IsValidAnnexB(&buffer[0], buffer.size());
+}
+
+bool AVC::IsValidAnnexB(const uint8* buffer, size_t size) {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(buffer);
+
+ if (size == 0)
+ return true;
+
+ H264Parser parser;
+ parser.SetStream(buffer, size);
+
+ typedef enum {
+ kAUDAllowed,
+ kBeforeFirstVCL, // VCL == nal_unit_types 1-5
+ kAfterFirstVCL,
+ kEOStreamAllowed,
+ kNoMoreDataAllowed,
+ } NALUOrderState;
+
+ H264NALU nalu;
+ NALUOrderState order_state = kAUDAllowed;
+ int last_nalu_type = H264NALU::kUnspecified;
+ bool done = false;
+ while (!done) {
+ switch (parser.AdvanceToNextNALU(&nalu)) {
+ case H264Parser::kOk:
+ DVLOG(1) << "nal_unit_type " << nalu.nal_unit_type;
+
+ switch (nalu.nal_unit_type) {
+ case H264NALU::kAUD:
+ if (order_state > kAUDAllowed) {
+ DVLOG(1) << "Unexpected AUD in order_state " << order_state;
+ return false;
+ }
+ order_state = kBeforeFirstVCL;
+ break;
+
+ case H264NALU::kSEIMessage:
+ case H264NALU::kReserved14:
+ case H264NALU::kReserved15:
+ case H264NALU::kReserved16:
+ case H264NALU::kReserved17:
+ case H264NALU::kReserved18:
+ case H264NALU::kPPS:
+ case H264NALU::kSPS:
+ if (order_state > kBeforeFirstVCL) {
+ DVLOG(1) << "Unexpected NALU type " << nalu.nal_unit_type
+ << " in order_state " << order_state;
+ return false;
+ }
+ order_state = kBeforeFirstVCL;
+ break;
+
+ case H264NALU::kSPSExt:
+ if (last_nalu_type != H264NALU::kSPS) {
+ DVLOG(1) << "SPS extension does not follow an SPS.";
+ return false;
+ }
+ break;
+
+ case H264NALU::kNonIDRSlice:
+ case H264NALU::kSliceDataA:
+ case H264NALU::kSliceDataB:
+ case H264NALU::kSliceDataC:
+ case H264NALU::kIDRSlice:
+ if (order_state > kAfterFirstVCL) {
+ DVLOG(1) << "Unexpected VCL in order_state " << order_state;
+ return false;
+ }
+ order_state = kAfterFirstVCL;
+ break;
+
+ case H264NALU::kCodedSliceAux:
+ if (order_state != kAfterFirstVCL) {
+ DVLOG(1) << "Unexpected extension in order_state " << order_state;
+ return false;
+ }
+ break;
+
+ case H264NALU::kEOSeq:
+ if (order_state != kAfterFirstVCL) {
+ DVLOG(1) << "Unexpected EOSeq in order_state " << order_state;
+ return false;
+ }
+ order_state = kEOStreamAllowed;
+ break;
+
+ case H264NALU::kEOStream:
+ if (order_state < kAfterFirstVCL) {
+ DVLOG(1) << "Unexpected EOStream in order_state " << order_state;
+ return false;
+ }
+ order_state = kNoMoreDataAllowed;
+ break;
+
+ case H264NALU::kFiller:
+ case H264NALU::kUnspecified:
+ if (!(order_state >= kAfterFirstVCL &&
+ order_state < kEOStreamAllowed)) {
+ DVLOG(1) << "Unexpected NALU type " << nalu.nal_unit_type
+ << " in order_state " << order_state;
+ return false;
+ }
+ break;
+
+ default:
+ DCHECK_GE(nalu.nal_unit_type, 20);
+ if (nalu.nal_unit_type >= 20 && nalu.nal_unit_type <= 31 &&
+ order_state != kAfterFirstVCL) {
+ DVLOG(1) << "Unexpected NALU type " << nalu.nal_unit_type
+ << " in order_state " << order_state;
+ return false;
+ }
+ }
+ last_nalu_type = nalu.nal_unit_type;
+ break;
+
+ case H264Parser::kInvalidStream:
+ return false;
+
+ case H264Parser::kUnsupportedStream:
+ NOTREACHED() << "AdvanceToNextNALU() returned kUnsupportedStream!";
+ return false;
+
+ case H264Parser::kEOStream:
+ done = true;
+ }
+ }
+
+ return order_state >= kAfterFirstVCL;
+}
+
+} // namespace mp4
+} // namespace media
diff --git a/chromium/media/formats/mp4/avc.h b/chromium/media/formats/mp4/avc.h
new file mode 100644
index 00000000000..0d84eef9c8c
--- /dev/null
+++ b/chromium/media/formats/mp4/avc.h
@@ -0,0 +1,52 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FORMATS_MP4_AVC_H_
+#define MEDIA_FORMATS_MP4_AVC_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+struct SubsampleEntry;
+
+namespace mp4 {
+
+struct AVCDecoderConfigurationRecord;
+
+class MEDIA_EXPORT AVC {
+ public:
+ static bool ConvertFrameToAnnexB(int length_size, std::vector<uint8>* buffer);
+
+ // Inserts the SPS & PPS data from |avc_config| into |buffer|.
+ // |buffer| is expected to contain AnnexB conformant data.
+ // |subsamples| contains the SubsampleEntry info if |buffer| contains
+ // encrypted data.
+ // Returns true if the param sets were successfully inserted.
+ static bool InsertParamSetsAnnexB(
+ const AVCDecoderConfigurationRecord& avc_config,
+ std::vector<uint8>* buffer,
+ std::vector<SubsampleEntry>* subsamples);
+
+ static bool ConvertConfigToAnnexB(
+ const AVCDecoderConfigurationRecord& avc_config,
+ std::vector<uint8>* buffer,
+ std::vector<SubsampleEntry>* subsamples);
+
+ // Verifies that the contents of |buffer| conform to
+ // Section 7.4.1.2.3 of ISO/IEC 14496-10.
+ // Returns true if |buffer| contains conformant Annex B data
+ // TODO(acolwell): Remove the std::vector version when we can use,
+ // C++11's std::vector<T>::data() method.
+ static bool IsValidAnnexB(const std::vector<uint8>& buffer);
+ static bool IsValidAnnexB(const uint8* buffer, size_t size);
+};
+
+} // namespace mp4
+} // namespace media
+
+#endif // MEDIA_FORMATS_MP4_AVC_H_
diff --git a/chromium/media/formats/mp4/avc_unittest.cc b/chromium/media/formats/mp4/avc_unittest.cc
new file mode 100644
index 00000000000..d0ddc66a9e6
--- /dev/null
+++ b/chromium/media/formats/mp4/avc_unittest.cc
@@ -0,0 +1,372 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string.h>
+
+#include "base/basictypes.h"
+#include "base/strings/string_util.h"
+#include "media/base/decrypt_config.h"
+#include "media/base/stream_parser_buffer.h"
+#include "media/filters/h264_parser.h"
+#include "media/formats/mp4/avc.h"
+#include "media/formats/mp4/box_definitions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace mp4 {
+
+static const uint8 kNALU1[] = { 0x01, 0x02, 0x03 };
+static const uint8 kNALU2[] = { 0x04, 0x05, 0x06, 0x07 };
+static const uint8 kExpected[] = {
+ 0x00, 0x00, 0x00, 0x01, 0x01, 0x02, 0x03,
+ 0x00, 0x00, 0x00, 0x01, 0x04, 0x05, 0x06, 0x07 };
+
+static const uint8 kExpectedParamSets[] = {
+ 0x00, 0x00, 0x00, 0x01, 0x67, 0x12,
+ 0x00, 0x00, 0x00, 0x01, 0x67, 0x34,
+ 0x00, 0x00, 0x00, 0x01, 0x68, 0x56, 0x78};
+
+static H264NALU::Type StringToNALUType(const std::string& name) {
+ if (name == "P")
+ return H264NALU::kNonIDRSlice;
+
+ if (name == "I")
+ return H264NALU::kIDRSlice;
+
+ if (name == "SEI")
+ return H264NALU::kSEIMessage;
+
+ if (name == "SPS")
+ return H264NALU::kSPS;
+
+ if (name == "SPSExt")
+ return H264NALU::kSPSExt;
+
+ if (name == "PPS")
+ return H264NALU::kPPS;
+
+ if (name == "AUD")
+ return H264NALU::kAUD;
+
+ if (name == "EOSeq")
+ return H264NALU::kEOSeq;
+
+ if (name == "EOStr")
+ return H264NALU::kEOStream;
+
+ if (name == "FILL")
+ return H264NALU::kFiller;
+
+ if (name == "R14")
+ return H264NALU::kReserved14;
+
+ CHECK(false) << "Unexpected name: " << name;
+ return H264NALU::kUnspecified;
+}
+
+static std::string NALUTypeToString(int type) {
+ switch (type) {
+ case H264NALU::kNonIDRSlice:
+ return "P";
+ case H264NALU::kSliceDataA:
+ return "SDA";
+ case H264NALU::kSliceDataB:
+ return "SDB";
+ case H264NALU::kSliceDataC:
+ return "SDC";
+ case H264NALU::kIDRSlice:
+ return "I";
+ case H264NALU::kSEIMessage:
+ return "SEI";
+ case H264NALU::kSPS:
+ return "SPS";
+ case H264NALU::kSPSExt:
+ return "SPSExt";
+ case H264NALU::kPPS:
+ return "PPS";
+ case H264NALU::kAUD:
+ return "AUD";
+ case H264NALU::kEOSeq:
+ return "EOSeq";
+ case H264NALU::kEOStream:
+ return "EOStr";
+ case H264NALU::kFiller:
+ return "FILL";
+ case H264NALU::kReserved14:
+ return "R14";
+
+ case H264NALU::kUnspecified:
+ case H264NALU::kReserved15:
+ case H264NALU::kReserved16:
+ case H264NALU::kReserved17:
+ case H264NALU::kReserved18:
+ case H264NALU::kCodedSliceAux:
+ case H264NALU::kCodedSliceExtension:
+ CHECK(false) << "Unexpected type: " << type;
+ break;
+ };
+
+ return "UnsupportedType";
+}
+
+void StringToAnnexB(const std::string& str, std::vector<uint8>* buffer,
+ std::vector<SubsampleEntry>* subsamples) {
+ DCHECK(!str.empty());
+
+ std::vector<std::string> tokens;
+ EXPECT_GT(Tokenize(str, " ", &tokens), 0u);
+
+ buffer->clear();
+ for (size_t i = 0; i < tokens.size(); ++i) {
+ SubsampleEntry entry;
+ size_t start = buffer->size();
+
+ // Write the start code.
+ buffer->push_back(0x00);
+ buffer->push_back(0x00);
+ buffer->push_back(0x00);
+ buffer->push_back(0x01);
+
+ // Write NALU type.
+ buffer->push_back(StringToNALUType(tokens[i]));
+
+ entry.clear_bytes = buffer->size() - start;
+
+ // Write junk for the payload since the current code doesn't
+ // actually look at it.
+ buffer->push_back(0x32);
+ buffer->push_back(0x12);
+ buffer->push_back(0x67);
+
+ entry.cypher_bytes = buffer->size() - start - entry.clear_bytes;
+
+ if (subsamples) {
+ subsamples->push_back(entry);
+ }
+ }
+}
+
+std::string AnnexBToString(const std::vector<uint8>& buffer) {
+ std::stringstream ss;
+
+ H264Parser parser;
+ parser.SetStream(&buffer[0], buffer.size());
+
+ H264NALU nalu;
+ bool first = true;
+ while (parser.AdvanceToNextNALU(&nalu) == H264Parser::kOk) {
+ if (!first)
+ ss << " ";
+ else
+ first = false;
+
+ ss << NALUTypeToString(nalu.nal_unit_type);
+ }
+ return ss.str();
+}
+
+class AVCConversionTest : public testing::TestWithParam<int> {
+ protected:
+ void WriteLength(int length_size, int length, std::vector<uint8>* buf) {
+ DCHECK_GE(length, 0);
+ DCHECK_LE(length, 255);
+
+ for (int i = 1; i < length_size; i++)
+ buf->push_back(0);
+ buf->push_back(length);
+ }
+
+ void MakeInputForLength(int length_size, std::vector<uint8>* buf) {
+ buf->clear();
+
+ WriteLength(length_size, sizeof(kNALU1), buf);
+ buf->insert(buf->end(), kNALU1, kNALU1 + sizeof(kNALU1));
+
+ WriteLength(length_size, sizeof(kNALU2), buf);
+ buf->insert(buf->end(), kNALU2, kNALU2 + sizeof(kNALU2));
+ }
+
+};
+
+TEST_P(AVCConversionTest, ParseCorrectly) {
+ std::vector<uint8> buf;
+ MakeInputForLength(GetParam(), &buf);
+ EXPECT_TRUE(AVC::ConvertFrameToAnnexB(GetParam(), &buf));
+ EXPECT_TRUE(AVC::IsValidAnnexB(buf));
+ EXPECT_EQ(buf.size(), sizeof(kExpected));
+ EXPECT_EQ(0, memcmp(kExpected, &buf[0], sizeof(kExpected)));
+ EXPECT_EQ("P SDC", AnnexBToString(buf));
+}
+
+// Intentionally write NALU sizes that are larger than the buffer.
+TEST_P(AVCConversionTest, NALUSizeTooLarge) {
+ std::vector<uint8> buf;
+ WriteLength(GetParam(), 10 * sizeof(kNALU1), &buf);
+ buf.insert(buf.end(), kNALU1, kNALU1 + sizeof(kNALU1));
+ EXPECT_FALSE(AVC::ConvertFrameToAnnexB(GetParam(), &buf));
+}
+
+TEST_P(AVCConversionTest, NALUSizeIsZero) {
+ std::vector<uint8> buf;
+ WriteLength(GetParam(), 0, &buf);
+
+ WriteLength(GetParam(), sizeof(kNALU1), &buf);
+ buf.insert(buf.end(), kNALU1, kNALU1 + sizeof(kNALU1));
+
+ WriteLength(GetParam(), 0, &buf);
+
+ WriteLength(GetParam(), sizeof(kNALU2), &buf);
+ buf.insert(buf.end(), kNALU2, kNALU2 + sizeof(kNALU2));
+
+ EXPECT_FALSE(AVC::ConvertFrameToAnnexB(GetParam(), &buf));
+}
+
+TEST_P(AVCConversionTest, ParsePartial) {
+ std::vector<uint8> buf;
+ MakeInputForLength(GetParam(), &buf);
+ buf.pop_back();
+ EXPECT_FALSE(AVC::ConvertFrameToAnnexB(GetParam(), &buf));
+ // This tests a buffer ending in the middle of a NAL length. For length size
+ // of one, this can't happen, so we skip that case.
+ if (GetParam() != 1) {
+ MakeInputForLength(GetParam(), &buf);
+ buf.erase(buf.end() - (sizeof(kNALU2) + 1), buf.end());
+ EXPECT_FALSE(AVC::ConvertFrameToAnnexB(GetParam(), &buf));
+ }
+}
+
+TEST_P(AVCConversionTest, ParseEmpty) {
+ std::vector<uint8> buf;
+ EXPECT_TRUE(AVC::ConvertFrameToAnnexB(GetParam(), &buf));
+ EXPECT_EQ(0u, buf.size());
+}
+
+INSTANTIATE_TEST_CASE_P(AVCConversionTestValues,
+ AVCConversionTest,
+ ::testing::Values(1, 2, 4));
+
+TEST_F(AVCConversionTest, ConvertConfigToAnnexB) {
+ AVCDecoderConfigurationRecord avc_config;
+ avc_config.sps_list.resize(2);
+ avc_config.sps_list[0].push_back(0x67);
+ avc_config.sps_list[0].push_back(0x12);
+ avc_config.sps_list[1].push_back(0x67);
+ avc_config.sps_list[1].push_back(0x34);
+ avc_config.pps_list.resize(1);
+ avc_config.pps_list[0].push_back(0x68);
+ avc_config.pps_list[0].push_back(0x56);
+ avc_config.pps_list[0].push_back(0x78);
+
+ std::vector<uint8> buf;
+ std::vector<SubsampleEntry> subsamples;
+ EXPECT_TRUE(AVC::ConvertConfigToAnnexB(avc_config, &buf, &subsamples));
+ EXPECT_EQ(0, memcmp(kExpectedParamSets, &buf[0],
+ sizeof(kExpectedParamSets)));
+ EXPECT_EQ("SPS SPS PPS", AnnexBToString(buf));
+}
+
+// Verify that we can round trip string -> Annex B -> string.
+TEST_F(AVCConversionTest, StringConversionFunctions) {
+ std::string str =
+ "AUD SPS SPSExt SPS PPS SEI SEI R14 I P FILL EOSeq EOStr";
+ std::vector<uint8> buf;
+ StringToAnnexB(str, &buf, NULL);
+
+ EXPECT_TRUE(AVC::IsValidAnnexB(buf));
+
+ EXPECT_EQ(str, AnnexBToString(buf));
+}
+
+TEST_F(AVCConversionTest, ValidAnnexBConstructs) {
+ const char* test_cases[] = {
+ "I",
+ "I I I I",
+ "AUD I",
+ "AUD SPS PPS I",
+ "I EOSeq",
+ "I EOSeq EOStr",
+ "I EOStr",
+ "P",
+ "P P P P",
+ "AUD SPS PPS P",
+ "SEI SEI I",
+ "SEI SEI R14 I",
+ "SPS SPSExt SPS PPS I P",
+ "R14 SEI I",
+ };
+
+ for (size_t i = 0; i < arraysize(test_cases); ++i) {
+ std::vector<uint8> buf;
+ StringToAnnexB(test_cases[i], &buf, NULL);
+ EXPECT_TRUE(AVC::IsValidAnnexB(buf)) << "'" << test_cases[i] << "' failed";
+ }
+}
+
+TEST_F(AVCConversionTest, InvalidAnnexBConstructs) {
+ static const char* test_cases[] = {
+ "AUD", // No VCL present.
+ "SPS PPS", // No VCL present.
+ "SPS PPS AUD I", // Parameter sets must come after AUD.
+ "SPSExt SPS P", // SPS must come before SPSExt.
+ "SPS PPS SPSExt P", // SPSExt must follow an SPS.
+ "EOSeq", // EOSeq must come after a VCL.
+ "EOStr", // EOStr must come after a VCL.
+ "I EOStr EOSeq", // EOSeq must come before EOStr.
+ "I R14", // Reserved14-18 must come before first VCL.
+ "I SEI", // SEI must come before first VCL.
+ "P SPS P", // SPS after first VCL would indicate a new access unit.
+ };
+
+ for (size_t i = 0; i < arraysize(test_cases); ++i) {
+ std::vector<uint8> buf;
+ StringToAnnexB(test_cases[i], &buf, NULL);
+ EXPECT_FALSE(AVC::IsValidAnnexB(buf)) << "'" << test_cases[i] << "' failed";
+ }
+}
+
+typedef struct {
+ const char* input;
+ const char* expected;
+} InsertTestCases;
+
+TEST_F(AVCConversionTest, InsertParamSetsAnnexB) {
+ static const InsertTestCases test_cases[] = {
+ { "I", "SPS SPS PPS I" },
+ { "AUD I", "AUD SPS SPS PPS I" },
+
+ // Cases where param sets in |avc_config| are placed before
+ // the existing ones.
+ { "SPS PPS I", "SPS SPS PPS SPS PPS I" },
+ { "AUD SPS PPS I", "AUD SPS SPS PPS SPS PPS I" }, // Note: params placed
+ // after AUD.
+ };
+
+ AVCDecoderConfigurationRecord avc_config;
+ avc_config.sps_list.resize(2);
+ avc_config.sps_list[0].push_back(0x67);
+ avc_config.sps_list[0].push_back(0x12);
+ avc_config.sps_list[1].push_back(0x67);
+ avc_config.sps_list[1].push_back(0x34);
+ avc_config.pps_list.resize(1);
+ avc_config.pps_list[0].push_back(0x68);
+ avc_config.pps_list[0].push_back(0x56);
+ avc_config.pps_list[0].push_back(0x78);
+
+ for (size_t i = 0; i < arraysize(test_cases); ++i) {
+ std::vector<uint8> buf;
+ std::vector<SubsampleEntry> subsamples;
+
+ StringToAnnexB(test_cases[i].input, &buf, &subsamples);
+
+ EXPECT_TRUE(AVC::InsertParamSetsAnnexB(avc_config, &buf, &subsamples))
+ << "'" << test_cases[i].input << "' insert failed.";
+ EXPECT_TRUE(AVC::IsValidAnnexB(buf))
+ << "'" << test_cases[i].input << "' created invalid AnnexB.";
+ EXPECT_EQ(test_cases[i].expected, AnnexBToString(buf))
+ << "'" << test_cases[i].input << "' generated unexpected output.";
+ }
+}
+
+} // namespace mp4
+} // namespace media
diff --git a/chromium/media/mp4/box_definitions.cc b/chromium/media/formats/mp4/box_definitions.cc
index 74d216f300e..b3060ebe9ff 100644
--- a/chromium/media/mp4/box_definitions.cc
+++ b/chromium/media/formats/mp4/box_definitions.cc
@@ -1,12 +1,12 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/mp4/box_definitions.h"
+#include "media/formats/mp4/box_definitions.h"
#include "base/logging.h"
-#include "media/mp4/es_descriptor.h"
-#include "media/mp4/rcheck.h"
+#include "media/formats/mp4/es_descriptor.h"
+#include "media/formats/mp4/rcheck.h"
namespace media {
namespace mp4 {
@@ -245,13 +245,55 @@ bool SampleDescription::Parse(BoxReader* reader) {
return true;
}
+SyncSample::SyncSample() : is_present(false) {}
+SyncSample::~SyncSample() {}
+FourCC SyncSample::BoxType() const { return FOURCC_STSS; }
+
+bool SyncSample::Parse(BoxReader* reader) {
+ uint32 entry_count;
+ RCHECK(reader->ReadFullBoxHeader() &&
+ reader->Read4(&entry_count));
+
+ is_present = true;
+
+ entries.resize(entry_count);
+
+ if (entry_count == 0)
+ return true;
+
+ for (size_t i = 0; i < entry_count; ++i)
+ RCHECK(reader->Read4(&entries[i]));
+
+ return true;
+}
+
+bool SyncSample::IsSyncSample(size_t k) const {
+ // ISO/IEC 14496-12 Section 8.6.2.1 : If the sync sample box is not present,
+ // every sample is a sync sample.
+ if (!is_present)
+ return true;
+
+ // ISO/IEC 14496-12 Section 8.6.2.3 : If entry_count is zero, there are no
+ // sync samples within the stream.
+ if (entries.size() == 0u)
+ return false;
+
+ for (size_t i = 0; i < entries.size(); ++i) {
+ if (entries[i] == k)
+ return true;
+ }
+
+ return false;
+}
+
SampleTable::SampleTable() {}
SampleTable::~SampleTable() {}
FourCC SampleTable::BoxType() const { return FOURCC_STBL; }
bool SampleTable::Parse(BoxReader* reader) {
return reader->ScanChildren() &&
- reader->ReadChild(&description);
+ reader->ReadChild(&description) &&
+ reader->MaybeReadChild(&sync_sample);
}
EditList::EditList() {}
@@ -321,18 +363,29 @@ AVCDecoderConfigurationRecord::~AVCDecoderConfigurationRecord() {}
FourCC AVCDecoderConfigurationRecord::BoxType() const { return FOURCC_AVCC; }
bool AVCDecoderConfigurationRecord::Parse(BoxReader* reader) {
+ return ParseInternal(reader, reader->log_cb());
+}
+
+bool AVCDecoderConfigurationRecord::Parse(const uint8* data, int data_size) {
+ BufferReader reader(data, data_size);
+ return ParseInternal(&reader, LogCB());
+}
+
+bool AVCDecoderConfigurationRecord::ParseInternal(BufferReader* reader,
+ const LogCB& log_cb) {
RCHECK(reader->Read1(&version) && version == 1 &&
reader->Read1(&profile_indication) &&
reader->Read1(&profile_compatibility) &&
reader->Read1(&avc_level));
uint8 length_size_minus_one;
- RCHECK(reader->Read1(&length_size_minus_one) &&
- (length_size_minus_one & 0xfc) == 0xfc);
+ RCHECK(reader->Read1(&length_size_minus_one));
length_size = (length_size_minus_one & 0x3) + 1;
+ RCHECK(length_size != 3); // Only values of 1, 2, and 4 are valid.
+
uint8 num_sps;
- RCHECK(reader->Read1(&num_sps) && (num_sps & 0xe0) == 0xe0);
+ RCHECK(reader->Read1(&num_sps));
num_sps &= 0x1f;
sps_list.resize(num_sps);
@@ -340,6 +393,14 @@ bool AVCDecoderConfigurationRecord::Parse(BoxReader* reader) {
uint16 sps_length;
RCHECK(reader->Read2(&sps_length) &&
reader->ReadVec(&sps_list[i], sps_length));
+ RCHECK(sps_list[i].size() > 4);
+
+ if (!log_cb.is_null()) {
+ MEDIA_LOG(log_cb) << "Video codec: avc1." << std::hex
+ << static_cast<int>(sps_list[i][1])
+ << static_cast<int>(sps_list[i][2])
+ << static_cast<int>(sps_list[i][3]);
+ }
}
uint8 num_pps;
@@ -430,7 +491,13 @@ bool ElementaryStreamDescriptor::Parse(BoxReader* reader) {
object_type = es_desc.object_type();
- RCHECK(aac.Parse(es_desc.decoder_specific_info()));
+ if (object_type != 0x40) {
+ MEDIA_LOG(reader->log_cb()) << "Audio codec: mp4a."
+ << std::hex << static_cast<int>(object_type);
+ }
+
+ if (es_desc.IsAAC(object_type))
+ RCHECK(aac.Parse(es_desc.decoder_specific_info(), reader->log_cb()));
return true;
}
@@ -732,19 +799,116 @@ bool TrackFragmentRun::Parse(BoxReader* reader) {
return true;
}
+SampleToGroup::SampleToGroup() : grouping_type(0), grouping_type_parameter(0) {}
+SampleToGroup::~SampleToGroup() {}
+FourCC SampleToGroup::BoxType() const { return FOURCC_SBGP; }
+
+bool SampleToGroup::Parse(BoxReader* reader) {
+ RCHECK(reader->ReadFullBoxHeader() &&
+ reader->Read4(&grouping_type));
+
+ if (reader->version() == 1)
+ RCHECK(reader->Read4(&grouping_type_parameter));
+
+ if (grouping_type != FOURCC_SEIG) {
+ DLOG(WARNING) << "SampleToGroup box with grouping_type '" << grouping_type
+ << "' is not supported.";
+ return true;
+ }
+
+ uint32 count;
+ RCHECK(reader->Read4(&count));
+ entries.resize(count);
+ for (uint32 i = 0; i < count; ++i) {
+ RCHECK(reader->Read4(&entries[i].sample_count) &&
+ reader->Read4(&entries[i].group_description_index));
+ }
+ return true;
+}
+
+CencSampleEncryptionInfoEntry::CencSampleEncryptionInfoEntry()
+ : is_encrypted(false), iv_size(0) {}
+CencSampleEncryptionInfoEntry::~CencSampleEncryptionInfoEntry() {}
+
+SampleGroupDescription::SampleGroupDescription() : grouping_type(0) {}
+SampleGroupDescription::~SampleGroupDescription() {}
+FourCC SampleGroupDescription::BoxType() const { return FOURCC_SGPD; }
+
+bool SampleGroupDescription::Parse(BoxReader* reader) {
+ RCHECK(reader->ReadFullBoxHeader() &&
+ reader->Read4(&grouping_type));
+
+ if (grouping_type != FOURCC_SEIG) {
+ DLOG(WARNING) << "SampleGroupDescription box with grouping_type '"
+ << grouping_type << "' is not supported.";
+ return true;
+ }
+
+ const uint8 version = reader->version();
+
+ const size_t kKeyIdSize = 16;
+ const size_t kEntrySize = sizeof(uint32) + kKeyIdSize;
+ uint32 default_length = 0;
+ if (version == 1) {
+ RCHECK(reader->Read4(&default_length));
+ RCHECK(default_length == 0 || default_length >= kEntrySize);
+ }
+
+ uint32 count;
+ RCHECK(reader->Read4(&count));
+ entries.resize(count);
+ for (uint32 i = 0; i < count; ++i) {
+ if (version == 1) {
+ if (default_length == 0) {
+ uint32 description_length = 0;
+ RCHECK(reader->Read4(&description_length));
+ RCHECK(description_length >= kEntrySize);
+ }
+ }
+
+ uint8 flag;
+ RCHECK(reader->SkipBytes(2) && // reserved.
+ reader->Read1(&flag) &&
+ reader->Read1(&entries[i].iv_size) &&
+ reader->ReadVec(&entries[i].key_id, kKeyIdSize));
+
+ entries[i].is_encrypted = (flag != 0);
+ if (entries[i].is_encrypted) {
+ RCHECK(entries[i].iv_size == 8 || entries[i].iv_size == 16);
+ } else {
+ RCHECK(entries[i].iv_size == 0);
+ }
+ }
+ return true;
+}
+
TrackFragment::TrackFragment() {}
TrackFragment::~TrackFragment() {}
FourCC TrackFragment::BoxType() const { return FOURCC_TRAF; }
bool TrackFragment::Parse(BoxReader* reader) {
- return reader->ScanChildren() &&
+ RCHECK(reader->ScanChildren() &&
reader->ReadChild(&header) &&
// Media Source specific: 'tfdt' required
reader->ReadChild(&decode_time) &&
reader->MaybeReadChildren(&runs) &&
reader->MaybeReadChild(&auxiliary_offset) &&
reader->MaybeReadChild(&auxiliary_size) &&
- reader->MaybeReadChild(&sdtp);
+ reader->MaybeReadChild(&sdtp));
+
+ // There could be multiple SampleGroupDescription and SampleToGroup boxes with
+ // different grouping types. For common encryption, the relevant grouping type
+ // is 'seig'. Continue reading until 'seig' is found, or until running out of
+ // child boxes.
+ while (sample_group_description.grouping_type != FOURCC_SEIG &&
+ reader->HasChild(&sample_group_description)) {
+ RCHECK(reader->ReadChild(&sample_group_description));
+ }
+ while (sample_to_group.grouping_type != FOURCC_SEIG &&
+ reader->HasChild(&sample_to_group)) {
+ RCHECK(reader->ReadChild(&sample_to_group));
+ }
+ return true;
}
MovieFragment::MovieFragment() {}
@@ -773,7 +937,6 @@ bool IndependentAndDisposableSamples::Parse(BoxReader* reader) {
for (int i = 0; i < sample_count; ++i) {
uint8 sample_info;
RCHECK(reader->Read1(&sample_info));
- RCHECK((sample_info >> 6) == 0); // reserved.
sample_depends_on_[i] =
static_cast<SampleDependsOn>((sample_info >> 4) & 0x3);
diff --git a/chromium/media/mp4/box_definitions.h b/chromium/media/formats/mp4/box_definitions.h
index 74999612cef..f823fee9662 100644
--- a/chromium/media/mp4/box_definitions.h
+++ b/chromium/media/formats/mp4/box_definitions.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP4_BOX_DEFINITIONS_H_
-#define MEDIA_MP4_BOX_DEFINITIONS_H_
+#ifndef MEDIA_FORMATS_MP4_BOX_DEFINITIONS_H_
+#define MEDIA_FORMATS_MP4_BOX_DEFINITIONS_H_
#include <string>
#include <vector>
@@ -11,10 +11,10 @@
#include "base/basictypes.h"
#include "base/compiler_specific.h"
#include "media/base/media_export.h"
-#include "media/mp4/aac.h"
-#include "media/mp4/avc.h"
-#include "media/mp4/box_reader.h"
-#include "media/mp4/fourccs.h"
+#include "media/formats/mp4/aac.h"
+#include "media/formats/mp4/avc.h"
+#include "media/formats/mp4/box_reader.h"
+#include "media/formats/mp4/fourccs.h"
namespace media {
namespace mp4 {
@@ -26,6 +26,10 @@ enum TrackType {
kHint
};
+enum SampleFlags {
+ kSampleIsNonSyncSample = 0x10000
+};
+
#define DECLARE_BOX_METHODS(T) \
T(); \
virtual ~T(); \
@@ -150,6 +154,13 @@ struct MEDIA_EXPORT HandlerReference : Box {
struct MEDIA_EXPORT AVCDecoderConfigurationRecord : Box {
DECLARE_BOX_METHODS(AVCDecoderConfigurationRecord);
+ // Parses AVCDecoderConfigurationRecord data encoded in |data|.
+ // Note: This method is intended to parse data outside the MP4StreamParser
+ // context and therefore the box header is not expected to be present
+ // in |data|.
+ // Returns true if |data| was successfully parsed.
+ bool Parse(const uint8* data, int data_size);
+
uint8 version;
uint8 profile_indication;
uint8 profile_compatibility;
@@ -161,6 +172,9 @@ struct MEDIA_EXPORT AVCDecoderConfigurationRecord : Box {
std::vector<SPS> sps_list;
std::vector<PPS> pps_list;
+
+ private:
+ bool ParseInternal(BufferReader* reader, const LogCB& log_cb);
};
struct MEDIA_EXPORT PixelAspectRatioBox : Box {
@@ -215,6 +229,17 @@ struct MEDIA_EXPORT SampleDescription : Box {
std::vector<AudioSampleEntry> audio_entries;
};
+struct MEDIA_EXPORT SyncSample : Box {
+ DECLARE_BOX_METHODS(SyncSample);
+
+ // Returns true if the |k|th sample is a sync sample (aka a random
+ // access point). Returns false if sample |k| is not a sync sample.
+ bool IsSyncSample(size_t k) const;
+
+ bool is_present;
+ std::vector<uint32> entries;
+};
+
struct MEDIA_EXPORT SampleTable : Box {
DECLARE_BOX_METHODS(SampleTable);
@@ -223,6 +248,7 @@ struct MEDIA_EXPORT SampleTable : Box {
// includes the 'stts', 'stsc', and 'stco' boxes, which must contain no
// samples in order to be compliant files.
SampleDescription description;
+ SyncSample sync_sample;
};
struct MEDIA_EXPORT MediaHeader : Box {
@@ -348,6 +374,40 @@ class MEDIA_EXPORT IndependentAndDisposableSamples : public Box {
std::vector<SampleDependsOn> sample_depends_on_;
};
+struct MEDIA_EXPORT CencSampleEncryptionInfoEntry {
+ CencSampleEncryptionInfoEntry();
+ ~CencSampleEncryptionInfoEntry();
+
+ bool is_encrypted;
+ uint8 iv_size;
+ std::vector<uint8> key_id;
+};
+
+struct MEDIA_EXPORT SampleGroupDescription : Box { // 'sgpd'.
+ DECLARE_BOX_METHODS(SampleGroupDescription);
+
+ uint32 grouping_type;
+ std::vector<CencSampleEncryptionInfoEntry> entries;
+};
+
+struct MEDIA_EXPORT SampleToGroupEntry {
+ enum GroupDescriptionIndexBase {
+ kTrackGroupDescriptionIndexBase = 0,
+ kFragmentGroupDescriptionIndexBase = 0x10000,
+ };
+
+ uint32 sample_count;
+ uint32 group_description_index;
+};
+
+struct MEDIA_EXPORT SampleToGroup : Box { // 'sbgp'.
+ DECLARE_BOX_METHODS(SampleToGroup);
+
+ uint32 grouping_type;
+ uint32 grouping_type_parameter; // Version 1 only.
+ std::vector<SampleToGroupEntry> entries;
+};
+
struct MEDIA_EXPORT TrackFragment : Box {
DECLARE_BOX_METHODS(TrackFragment);
@@ -357,6 +417,8 @@ struct MEDIA_EXPORT TrackFragment : Box {
SampleAuxiliaryInformationOffset auxiliary_offset;
SampleAuxiliaryInformationSize auxiliary_size;
IndependentAndDisposableSamples sdtp;
+ SampleGroupDescription sample_group_description;
+ SampleToGroup sample_to_group;
};
struct MEDIA_EXPORT MovieFragment : Box {
@@ -372,4 +434,4 @@ struct MEDIA_EXPORT MovieFragment : Box {
} // namespace mp4
} // namespace media
-#endif // MEDIA_MP4_BOX_DEFINITIONS_H_
+#endif // MEDIA_FORMATS_MP4_BOX_DEFINITIONS_H_
diff --git a/chromium/media/mp4/box_reader.cc b/chromium/media/formats/mp4/box_reader.cc
index c788772035f..fd81d137511 100644
--- a/chromium/media/mp4/box_reader.cc
+++ b/chromium/media/formats/mp4/box_reader.cc
@@ -1,8 +1,8 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/mp4/box_reader.h"
+#include "media/formats/mp4/box_reader.h"
#include <string.h>
#include <algorithm>
@@ -11,8 +11,8 @@
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
-#include "media/mp4/box_definitions.h"
-#include "media/mp4/rcheck.h"
+#include "media/formats/mp4/box_definitions.h"
+#include "media/formats/mp4/rcheck.h"
namespace media {
namespace mp4 {
@@ -155,11 +155,13 @@ bool BoxReader::IsValidTopLevelBox(const FourCC& type,
case FOURCC_SIDX:
case FOURCC_SSIX:
case FOURCC_PRFT:
+ case FOURCC_UUID:
+ case FOURCC_EMSG:
return true;
default:
// Hex is used to show nonprintable characters and aid in debugging
- MEDIA_LOG(log_cb) << "Unrecognized top-level box type 0x"
- << std::hex << type;
+ MEDIA_LOG(log_cb) << "Unrecognized top-level box type "
+ << FourCCToString(type);
return false;
}
}
@@ -181,6 +183,12 @@ bool BoxReader::ScanChildren() {
return !err && pos() == size();
}
+bool BoxReader::HasChild(Box* child) {
+ DCHECK(scanned_);
+ DCHECK(child);
+ return children_.count(child->BoxType()) > 0;
+}
+
bool BoxReader::ReadChild(Box* child) {
DCHECK(scanned_);
FourCC child_type = child->BoxType();
diff --git a/chromium/media/mp4/box_reader.h b/chromium/media/formats/mp4/box_reader.h
index 43f11d56fe6..3360204ed54 100644
--- a/chromium/media/mp4/box_reader.h
+++ b/chromium/media/formats/mp4/box_reader.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP4_BOX_READER_H_
-#define MEDIA_MP4_BOX_READER_H_
+#ifndef MEDIA_FORMATS_MP4_BOX_READER_H_
+#define MEDIA_FORMATS_MP4_BOX_READER_H_
#include <map>
#include <vector>
@@ -12,8 +12,8 @@
#include "base/logging.h"
#include "media/base/media_export.h"
#include "media/base/media_log.h"
-#include "media/mp4/fourccs.h"
-#include "media/mp4/rcheck.h"
+#include "media/formats/mp4/fourccs.h"
+#include "media/formats/mp4/rcheck.h"
namespace media {
namespace mp4 {
@@ -29,7 +29,10 @@ struct MEDIA_EXPORT Box {
class MEDIA_EXPORT BufferReader {
public:
BufferReader(const uint8* buf, const int size)
- : buf_(buf), size_(size), pos_(0) {}
+ : buf_(buf), size_(size), pos_(0) {
+ CHECK(buf);
+ CHECK_GE(size, 0);
+ }
bool HasBytes(int count) { return (pos() + count <= size()); }
@@ -105,6 +108,9 @@ class MEDIA_EXPORT BoxReader : public BufferReader {
// buffer position. Must be called before any of the *Child functions work.
bool ScanChildren() WARN_UNUSED_RESULT;
+ // Return true if child with type |child.BoxType()| exists.
+ bool HasChild(Box* child) WARN_UNUSED_RESULT;
+
// Read exactly one child box from the set of children. The type of the child
// will be determined by the BoxType() method of |child|.
bool ReadChild(Box* child) WARN_UNUSED_RESULT;
@@ -136,6 +142,8 @@ class MEDIA_EXPORT BoxReader : public BufferReader {
uint8 version() const { return version_; }
uint32 flags() const { return flags_; }
+ const LogCB& log_cb() const { return log_cb_; }
+
private:
BoxReader(const uint8* buf, const int size, const LogCB& log_cb);
@@ -211,4 +219,4 @@ bool BoxReader::ReadAllChildren(std::vector<T>* children) {
} // namespace mp4
} // namespace media
-#endif // MEDIA_MP4_BOX_READER_H_
+#endif // MEDIA_FORMATS_MP4_BOX_READER_H_
diff --git a/chromium/media/mp4/box_reader_unittest.cc b/chromium/media/formats/mp4/box_reader_unittest.cc
index 99d9975fd2d..77729b61be7 100644
--- a/chromium/media/mp4/box_reader_unittest.cc
+++ b/chromium/media/formats/mp4/box_reader_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -7,8 +7,8 @@
#include "base/basictypes.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
-#include "media/mp4/box_reader.h"
-#include "media/mp4/rcheck.h"
+#include "media/formats/mp4/box_reader.h"
+#include "media/formats/mp4/rcheck.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
@@ -181,12 +181,9 @@ TEST_F(BoxReaderTest, ReadAllChildrenTest) {
EXPECT_EQ(kids[0].val, 0xdeadbeef); // Ensure order is preserved
}
-TEST_F(BoxReaderTest, SkippingBloc) {
- static const uint8 kData[] = {
- 0x00, 0x00, 0x00, 0x09, 'b', 'l', 'o', 'c', 0x00
- };
+static void TestTopLevelBox(const uint8* data, int size, uint32 fourCC) {
- std::vector<uint8> buf(kData, kData + sizeof(kData));
+ std::vector<uint8> buf(data, data + size);
bool err;
scoped_ptr<BoxReader> reader(
@@ -194,7 +191,44 @@ TEST_F(BoxReaderTest, SkippingBloc) {
EXPECT_FALSE(err);
EXPECT_TRUE(reader);
- EXPECT_EQ(FOURCC_BLOC, reader->type());
+ EXPECT_EQ(fourCC, reader->type());
+ EXPECT_EQ(reader->size(), size);
+}
+
+TEST_F(BoxReaderTest, SkippingBloc) {
+ static const uint8 kData[] = {
+ 0x00, 0x00, 0x00, 0x09, 'b', 'l', 'o', 'c', 0x00
+ };
+
+ TestTopLevelBox(kData, sizeof(kData), FOURCC_BLOC);
+}
+
+TEST_F(BoxReaderTest, SkippingEmsg) {
+ static const uint8 kData[] = {
+ 0x00, 0x00, 0x00, 0x24, 'e', 'm', 's', 'g',
+ 0x00, // version = 0
+ 0x00, 0x00, 0x00, // flags = 0
+ 0x61, 0x00, // scheme_id_uri = "a"
+ 0x61, 0x00, // value = "a"
+ 0x00, 0x00, 0x00, 0x01, // timescale = 1
+ 0x00, 0x00, 0x00, 0x02, // presentation_time_delta = 2
+ 0x00, 0x00, 0x00, 0x03, // event_duration = 3
+ 0x00, 0x00, 0x00, 0x04, // id = 4
+ 0x05, 0x06, 0x07, 0x08, // message_data[4] = 0x05060708
+ };
+
+ TestTopLevelBox(kData, sizeof(kData), FOURCC_EMSG);
+}
+
+TEST_F(BoxReaderTest, SkippingUuid) {
+ static const uint8 kData[] = {
+ 0x00, 0x00, 0x00, 0x19, 'u', 'u', 'i', 'd',
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, // usertype
+ 0x00,
+ };
+
+ TestTopLevelBox(kData, sizeof(kData), FOURCC_UUID);
}
} // namespace mp4
diff --git a/chromium/media/mp4/cenc.cc b/chromium/media/formats/mp4/cenc.cc
index 10f3a2a696a..001b6d88161 100644
--- a/chromium/media/mp4/cenc.cc
+++ b/chromium/media/formats/mp4/cenc.cc
@@ -1,13 +1,13 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/mp4/cenc.h"
+#include "media/formats/mp4/cenc.h"
#include <cstring>
-#include "media/mp4/box_reader.h"
-#include "media/mp4/rcheck.h"
+#include "media/formats/mp4/box_reader.h"
+#include "media/formats/mp4/rcheck.h"
namespace media {
namespace mp4 {
diff --git a/chromium/media/mp4/cenc.h b/chromium/media/formats/mp4/cenc.h
index e42709149f2..9eb3358a175 100644
--- a/chromium/media/mp4/cenc.h
+++ b/chromium/media/formats/mp4/cenc.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP4_CENC_H_
-#define MEDIA_MP4_CENC_H_
+#ifndef MEDIA_FORMATS_MP4_CENC_H_
+#define MEDIA_FORMATS_MP4_CENC_H_
#include <vector>
@@ -29,4 +29,4 @@ struct FrameCENCInfo {
} // namespace mp4
} // namespace media
-#endif // MEDIA_MP4_CENC_H_
+#endif // MEDIA_FORMATS_MP4_CENC_H_
diff --git a/chromium/media/mp4/es_descriptor.cc b/chromium/media/formats/mp4/es_descriptor.cc
index 8517b82cbda..e1da28a34e5 100644
--- a/chromium/media/mp4/es_descriptor.cc
+++ b/chromium/media/formats/mp4/es_descriptor.cc
@@ -1,11 +1,11 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/mp4/es_descriptor.h"
+#include "media/formats/mp4/es_descriptor.h"
#include "media/base/bit_reader.h"
-#include "media/mp4/rcheck.h"
+#include "media/formats/mp4/rcheck.h"
// The elementary stream size is specific by up to 4 bytes.
// The MSB of a byte indicates if there are more bytes for the size.
diff --git a/chromium/media/mp4/es_descriptor.h b/chromium/media/formats/mp4/es_descriptor.h
index 36e1bf2a3b1..1df452682ed 100644
--- a/chromium/media/mp4/es_descriptor.h
+++ b/chromium/media/formats/mp4/es_descriptor.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP4_ES_DESCRIPTOR_H_
-#define MEDIA_MP4_ES_DESCRIPTOR_H_
+#ifndef MEDIA_FORMATS_MP4_ES_DESCRIPTOR_H_
+#define MEDIA_FORMATS_MP4_ES_DESCRIPTOR_H_
#include <vector>
@@ -21,8 +21,7 @@ namespace mp4 {
enum ObjectType {
kForbidden = 0,
kISO_14496_3 = 0x40, // MPEG4 AAC
- kISO_13818_7_AAC_LC = 0x67, // MPEG2 AAC-LC
- kEAC3 = 0xa6 // Dolby Digital Plus
+ kISO_13818_7_AAC_LC = 0x67 // MPEG2 AAC-LC
};
// This class parse object type and decoder specific information from an
@@ -59,4 +58,4 @@ class MEDIA_EXPORT ESDescriptor {
} // namespace media
-#endif // MEDIA_MP4_ES_DESCRIPTOR_H_
+#endif // MEDIA_FORMATS_MP4_ES_DESCRIPTOR_H_
diff --git a/chromium/media/mp4/es_descriptor_unittest.cc b/chromium/media/formats/mp4/es_descriptor_unittest.cc
index c3a39fbefc3..6334f5bd1c5 100644
--- a/chromium/media/mp4/es_descriptor_unittest.cc
+++ b/chromium/media/formats/mp4/es_descriptor_unittest.cc
@@ -1,8 +1,8 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/mp4/es_descriptor.h"
+#include "media/formats/mp4/es_descriptor.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/chromium/media/mp4/fourccs.h b/chromium/media/formats/mp4/fourccs.h
index 01cce2bcf1b..d9086fa8d74 100644
--- a/chromium/media/mp4/fourccs.h
+++ b/chromium/media/formats/mp4/fourccs.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP4_FOURCCS_H_
-#define MEDIA_MP4_FOURCCS_H_
+#ifndef MEDIA_FORMATS_MP4_FOURCCS_H_
+#define MEDIA_FORMATS_MP4_FOURCCS_H_
#include <string>
@@ -20,8 +20,8 @@ enum FourCC {
FOURCC_CO64 = 0x636f3634,
FOURCC_CTTS = 0x63747473,
FOURCC_DINF = 0x64696e66,
- FOURCC_EAC3 = 0x65632d33,
FOURCC_EDTS = 0x65647473,
+ FOURCC_EMSG = 0x656d7367,
FOURCC_ELST = 0x656c7374,
FOURCC_ENCA = 0x656e6361,
FOURCC_ENCV = 0x656e6376,
@@ -53,9 +53,12 @@ enum FourCC {
FOURCC_PSSH = 0x70737368,
FOURCC_SAIO = 0x7361696f,
FOURCC_SAIZ = 0x7361697a,
+ FOURCC_SBGP = 0x73626770,
FOURCC_SCHI = 0x73636869,
FOURCC_SCHM = 0x7363686d,
FOURCC_SDTP = 0x73647470,
+ FOURCC_SEIG = 0x73656967,
+ FOURCC_SGPD = 0x73677064,
FOURCC_SIDX = 0x73696478,
FOURCC_SINF = 0x73696e66,
FOURCC_SKIP = 0x736b6970,
@@ -98,4 +101,4 @@ const inline std::string FourCCToString(FourCC fourcc) {
} // namespace mp4
} // namespace media
-#endif // MEDIA_MP4_FOURCCS_H_
+#endif // MEDIA_FORMATS_MP4_FOURCCS_H_
diff --git a/chromium/media/mp4/mp4_stream_parser.cc b/chromium/media/formats/mp4/mp4_stream_parser.cc
index db1b59b4572..0eb15d9f2ce 100644
--- a/chromium/media/mp4/mp4_stream_parser.cc
+++ b/chromium/media/formats/mp4/mp4_stream_parser.cc
@@ -1,8 +1,8 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/mp4/mp4_stream_parser.h"
+#include "media/formats/mp4/mp4_stream_parser.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
@@ -13,10 +13,11 @@
#include "media/base/text_track_config.h"
#include "media/base/video_decoder_config.h"
#include "media/base/video_util.h"
-#include "media/mp4/box_definitions.h"
-#include "media/mp4/box_reader.h"
-#include "media/mp4/es_descriptor.h"
-#include "media/mp4/rcheck.h"
+#include "media/formats/mp4/box_definitions.h"
+#include "media/formats/mp4/box_reader.h"
+#include "media/formats/mp4/es_descriptor.h"
+#include "media/formats/mp4/rcheck.h"
+#include "media/formats/mpeg/adts_constants.h"
namespace media {
namespace mp4 {
@@ -44,7 +45,7 @@ MP4StreamParser::~MP4StreamParser() {}
void MP4StreamParser::Init(const InitCB& init_cb,
const NewConfigCB& config_cb,
const NewBuffersCB& new_buffers_cb,
- const NewTextBuffersCB& /* text_cb */ ,
+ bool /* ignore_text_tracks */ ,
const NeedKeyCB& need_key_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
@@ -91,18 +92,33 @@ bool MP4StreamParser::Parse(const uint8* buf, int size) {
BufferQueue audio_buffers;
BufferQueue video_buffers;
- bool result, err = false;
+ bool result = false;
+ bool err = false;
do {
- if (state_ == kParsingBoxes) {
- result = ParseBox(&err);
- } else {
- DCHECK_EQ(kEmittingSamples, state_);
- result = EnqueueSample(&audio_buffers, &video_buffers, &err);
- if (result) {
- int64 max_clear = runs_->GetMaxClearOffset() + moof_head_;
- err = !ReadAndDiscardMDATsUntil(max_clear);
- }
+ switch (state_) {
+ case kWaitingForInit:
+ case kError:
+ NOTREACHED();
+ return false;
+
+ case kParsingBoxes:
+ result = ParseBox(&err);
+ break;
+
+ case kWaitingForSampleData:
+ result = HaveEnoughDataToEnqueueSamples();
+ if (result)
+ ChangeState(kEmittingSamples);
+ break;
+
+ case kEmittingSamples:
+ result = EnqueueSample(&audio_buffers, &video_buffers, &err);
+ if (result) {
+ int64 max_clear = runs_->GetMaxClearOffset() + moof_head_;
+ err = !ReadAndDiscardMDATsUntil(max_clear);
+ }
+ break;
}
} while (result && !err);
@@ -197,7 +213,7 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
const AudioSampleEntry& entry = samp_descr.audio_entries[desc_idx];
const AAC& aac = entry.esds.aac;
- if (!(entry.format == FOURCC_MP4A || entry.format == FOURCC_EAC3 ||
+ if (!(entry.format == FOURCC_MP4A ||
(entry.format == FOURCC_ENCA &&
entry.sinf.format.format == FOURCC_MP4A))) {
MEDIA_LOG(log_cb_) << "Unsupported audio format 0x"
@@ -206,10 +222,7 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
}
uint8 audio_type = entry.esds.object_type;
- DVLOG(1) << "audio_type " << std::hex << audio_type;
- if (audio_type == kForbidden && entry.format == FOURCC_EAC3) {
- audio_type = kEAC3;
- }
+ DVLOG(1) << "audio_type " << std::hex << static_cast<int>(audio_type);
if (audio_object_types_.find(audio_type) == audio_object_types_.end()) {
MEDIA_LOG(log_cb_) << "audio object type 0x" << std::hex << audio_type
<< " does not match what is specified in the"
@@ -230,10 +243,6 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
#if defined(OS_ANDROID)
extra_data = aac.codec_specific_data();
#endif
- } else if (audio_type == kEAC3) {
- codec = kCodecEAC3;
- channel_layout = GuessChannelLayout(entry.channelcount);
- sample_per_second = entry.samplerate;
} else {
MEDIA_LOG(log_cb_) << "Unsupported audio object type 0x" << std::hex
<< audio_type << " in esds.";
@@ -258,7 +267,7 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
codec, sample_format, channel_layout, sample_per_second,
extra_data.size() ? &extra_data[0] : NULL, extra_data.size(),
is_audio_track_encrypted_, false, base::TimeDelta(),
- base::TimeDelta());
+ 0);
has_audio_ = true;
audio_track_id_ = track->header.track_id;
}
@@ -286,7 +295,7 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
coded_size, visible_rect, natural_size,
// No decoder-specific buffer needed for AVC;
// SPS/PPS are embedded in the video stream
- NULL, 0, is_video_track_encrypted_, true);
+ NULL, 0, is_video_track_encrypted_, false);
has_video_ = true;
video_track_id_ = track->header.track_id;
}
@@ -294,20 +303,18 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
RCHECK(config_cb_.Run(audio_config, video_config, TextTrackConfigMap()));
- base::TimeDelta duration;
+ StreamParser::InitParameters params(kInfiniteDuration());
if (moov_->extends.header.fragment_duration > 0) {
- duration = TimeDeltaFromRational(moov_->extends.header.fragment_duration,
- moov_->header.timescale);
+ params.duration = TimeDeltaFromRational(
+ moov_->extends.header.fragment_duration, moov_->header.timescale);
} else if (moov_->header.duration > 0 &&
moov_->header.duration != kuint64max) {
- duration = TimeDeltaFromRational(moov_->header.duration,
- moov_->header.timescale);
- } else {
- duration = kInfiniteDuration();
+ params.duration =
+ TimeDeltaFromRational(moov_->header.duration, moov_->header.timescale);
}
if (!init_cb_.is_null())
- base::ResetAndReturn(&init_cb_).Run(true, duration);
+ base::ResetAndReturn(&init_cb_).Run(true, params);
EmitNeedKeyIfNecessary(moov_->pssh);
return true;
@@ -320,9 +327,10 @@ bool MP4StreamParser::ParseMoof(BoxReader* reader) {
if (!runs_)
runs_.reset(new TrackRunIterator(moov_.get(), log_cb_));
RCHECK(runs_->Init(moof));
+ RCHECK(ComputeHighestEndOffset(moof));
EmitNeedKeyIfNecessary(moof.pssh);
new_segment_cb_.Run();
- ChangeState(kEmittingSamples);
+ ChangeState(kWaitingForSampleData);
return true;
}
@@ -371,13 +379,11 @@ bool MP4StreamParser::PrepareAVCBuffer(
// If this is a keyframe, we (re-)inject SPS and PPS headers at the start of
// a frame. If subsample info is present, we also update the clear byte
// count for that first subsample.
- std::vector<uint8> param_sets;
- RCHECK(AVC::ConvertConfigToAnnexB(avc_config, &param_sets));
- frame_buf->insert(frame_buf->begin(),
- param_sets.begin(), param_sets.end());
- if (!subsamples->empty())
- (*subsamples)[0].clear_bytes += param_sets.size();
+ RCHECK(AVC::InsertParamSetsAnnexB(avc_config, frame_buf, subsamples));
}
+
+ // TODO(acolwell): Improve IsValidAnnexB() so it can handle encrypted content.
+ DCHECK(runs_->is_encrypted() || AVC::IsValidAnnexB(*frame_buf));
return true;
}
@@ -391,11 +397,11 @@ bool MP4StreamParser::PrepareAACBuffer(
// not required to use subsample encryption, so we may need to add an entry.
if (subsamples->empty()) {
SubsampleEntry entry;
- entry.clear_bytes = AAC::kADTSHeaderSize;
- entry.cypher_bytes = frame_buf->size() - AAC::kADTSHeaderSize;
+ entry.clear_bytes = kADTSHeaderMinSize;
+ entry.cypher_bytes = frame_buf->size() - kADTSHeaderMinSize;
subsamples->push_back(entry);
} else {
- (*subsamples)[0].clear_bytes += AAC::kADTSHeaderSize;
+ (*subsamples)[0].clear_bytes += kADTSHeaderMinSize;
}
return true;
}
@@ -403,6 +409,8 @@ bool MP4StreamParser::PrepareAACBuffer(
bool MP4StreamParser::EnqueueSample(BufferQueue* audio_buffers,
BufferQueue* video_buffers,
bool* err) {
+ DCHECK_EQ(state_, kEmittingSamples);
+
if (!runs_->IsRunValid()) {
// Flush any buffers we've gotten in this chunk so that buffers don't
// cross NewSegment() calls
@@ -410,7 +418,7 @@ bool MP4StreamParser::EnqueueSample(BufferQueue* audio_buffers,
if (*err)
return false;
- // Remain in kEnqueueingSamples state, discarding data, until the end of
+ // Remain in kEmittingSamples state, discarding data, until the end of
// the current 'mdat' box has been appended to the queue.
if (!queue_.Trim(mdat_tail_))
return false;
@@ -436,8 +444,10 @@ bool MP4StreamParser::EnqueueSample(BufferQueue* audio_buffers,
bool video = has_video_ && video_track_id_ == runs_->track_id();
// Skip this entire track if it's not one we're interested in
- if (!audio && !video)
+ if (!audio && !video) {
runs_->AdvanceRun();
+ return true;
+ }
// Attempt to cache the auxiliary information first. Aux info is usually
// placed in a contiguous block before the sample data, rather than being
@@ -493,7 +503,6 @@ bool MP4StreamParser::EnqueueSample(BufferQueue* audio_buffers,
decrypt_config.reset(new DecryptConfig(
decrypt_config->key_id(),
decrypt_config->iv(),
- decrypt_config->data_offset(),
subsamples));
}
// else, use the existing config.
@@ -502,12 +511,22 @@ bool MP4StreamParser::EnqueueSample(BufferQueue* audio_buffers,
// The media pipeline requires a DecryptConfig with an empty |iv|.
// TODO(ddorwin): Refactor so we do not need a fake key ID ("1");
decrypt_config.reset(
- new DecryptConfig("1", "", 0, std::vector<SubsampleEntry>()));
+ new DecryptConfig("1", "", std::vector<SubsampleEntry>()));
}
+ StreamParserBuffer::Type buffer_type = audio ? DemuxerStream::AUDIO :
+ DemuxerStream::VIDEO;
+
+ // TODO(wolenetz/acolwell): Validate and use a common cross-parser TrackId
+ // type and allow multiple tracks for same media type, if applicable. See
+ // https://crbug.com/341581.
+ //
+ // NOTE: MPEG's "random access point" concept is equivalent to the
+ // downstream code's "is keyframe" concept.
scoped_refptr<StreamParserBuffer> stream_buf =
- StreamParserBuffer::CopyFrom(&frame_buf[0], frame_buf.size(),
- runs_->is_keyframe());
+ StreamParserBuffer::CopyFrom(&frame_buf[0], frame_buf.size(),
+ runs_->is_random_access_point(),
+ buffer_type, 0);
if (decrypt_config)
stream_buf->set_decrypt_config(decrypt_config.Pass());
@@ -518,6 +537,7 @@ bool MP4StreamParser::EnqueueSample(BufferQueue* audio_buffers,
DVLOG(3) << "Pushing frame: aud=" << audio
<< ", key=" << runs_->is_keyframe()
+ << ", rap=" << runs_->is_random_access_point()
<< ", dur=" << runs_->duration().InMilliseconds()
<< ", dts=" << runs_->dts().InMilliseconds()
<< ", cts=" << runs_->cts().InMilliseconds()
@@ -538,17 +558,21 @@ bool MP4StreamParser::SendAndFlushSamples(BufferQueue* audio_buffers,
if (audio_buffers->empty() && video_buffers->empty())
return true;
- bool success = new_buffers_cb_.Run(*audio_buffers, *video_buffers);
+ TextBufferQueueMap empty_text_map;
+ bool success = new_buffers_cb_.Run(*audio_buffers,
+ *video_buffers,
+ empty_text_map);
audio_buffers->clear();
video_buffers->clear();
return success;
}
-bool MP4StreamParser::ReadAndDiscardMDATsUntil(const int64 offset) {
+bool MP4StreamParser::ReadAndDiscardMDATsUntil(int64 max_clear_offset) {
bool err = false;
- while (mdat_tail_ < offset) {
- const uint8* buf;
- int size;
+ int64 upper_bound = std::min(max_clear_offset, queue_.tail());
+ while (mdat_tail_ < upper_bound) {
+ const uint8* buf = NULL;
+ int size = 0;
queue_.PeekAt(mdat_tail_, &buf, &size);
FourCC type;
@@ -563,7 +587,7 @@ bool MP4StreamParser::ReadAndDiscardMDATsUntil(const int64 offset) {
}
mdat_tail_ += box_sz;
}
- queue_.Trim(std::min(mdat_tail_, offset));
+ queue_.Trim(std::min(mdat_tail_, upper_bound));
return !err;
}
@@ -572,5 +596,40 @@ void MP4StreamParser::ChangeState(State new_state) {
state_ = new_state;
}
+bool MP4StreamParser::HaveEnoughDataToEnqueueSamples() {
+ DCHECK_EQ(state_, kWaitingForSampleData);
+ // For muxed content, make sure we have data up to |highest_end_offset_|
+ // so we can ensure proper enqueuing behavior. Otherwise assume we have enough
+ // data and allow per sample offset checks to meter sample enqueuing.
+ // TODO(acolwell): Fix trun box handling so we don't have to special case
+ // muxed content.
+ return !(has_audio_ && has_video_ &&
+ queue_.tail() < highest_end_offset_ + moof_head_);
+}
+
+bool MP4StreamParser::ComputeHighestEndOffset(const MovieFragment& moof) {
+ highest_end_offset_ = 0;
+
+ TrackRunIterator runs(moov_.get(), log_cb_);
+ RCHECK(runs.Init(moof));
+
+ while (runs.IsRunValid()) {
+ int64 aux_info_end_offset = runs.aux_info_offset() + runs.aux_info_size();
+ if (aux_info_end_offset > highest_end_offset_)
+ highest_end_offset_ = aux_info_end_offset;
+
+ while (runs.IsSampleValid()) {
+ int64 sample_end_offset = runs.sample_offset() + runs.sample_size();
+ if (sample_end_offset > highest_end_offset_)
+ highest_end_offset_ = sample_end_offset;
+
+ runs.AdvanceSample();
+ }
+ runs.AdvanceRun();
+ }
+
+ return true;
+}
+
} // namespace mp4
} // namespace media
diff --git a/chromium/media/mp4/mp4_stream_parser.h b/chromium/media/formats/mp4/mp4_stream_parser.h
index 946513f0445..bf7bfa01284 100644
--- a/chromium/media/mp4/mp4_stream_parser.h
+++ b/chromium/media/formats/mp4/mp4_stream_parser.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP4_MP4_STREAM_PARSER_H_
-#define MEDIA_MP4_MP4_STREAM_PARSER_H_
+#ifndef MEDIA_FORMATS_MP4_MP4_STREAM_PARSER_H_
+#define MEDIA_FORMATS_MP4_MP4_STREAM_PARSER_H_
#include <set>
#include <vector>
@@ -14,8 +14,8 @@
#include "base/memory/scoped_ptr.h"
#include "media/base/media_export.h"
#include "media/base/stream_parser.h"
-#include "media/mp4/offset_byte_queue.h"
-#include "media/mp4/track_run_iterator.h"
+#include "media/formats/common/offset_byte_queue.h"
+#include "media/formats/mp4/track_run_iterator.h"
namespace media {
namespace mp4 {
@@ -30,7 +30,7 @@ class MEDIA_EXPORT MP4StreamParser : public StreamParser {
virtual void Init(const InitCB& init_cb, const NewConfigCB& config_cb,
const NewBuffersCB& new_buffers_cb,
- const NewTextBuffersCB& text_cb,
+ bool ignore_text_tracks,
const NeedKeyCB& need_key_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
@@ -42,6 +42,7 @@ class MEDIA_EXPORT MP4StreamParser : public StreamParser {
enum State {
kWaitingForInit,
kParsingBoxes,
+ kWaitingForSampleData,
kEmittingSamples,
kError
};
@@ -56,10 +57,12 @@ class MEDIA_EXPORT MP4StreamParser : public StreamParser {
// To retain proper framing, each 'mdat' atom must be read; to limit memory
// usage, the atom's data needs to be discarded incrementally as frames are
// extracted from the stream. This function discards data from the stream up
- // to |offset|, updating the |mdat_tail_| value so that framing can be
- // retained after all 'mdat' information has been read.
+ // to |max_clear_offset|, updating the |mdat_tail_| value so that framing can
+ // be retained after all 'mdat' information has been read. |max_clear_offset|
+ // is the upper bound on what can be removed from |queue_|. Anything below
+ // this offset is no longer needed by the parser.
// Returns 'true' on success, 'false' if there was an error.
- bool ReadAndDiscardMDATsUntil(const int64 offset);
+ bool ReadAndDiscardMDATsUntil(int64 max_clear_offset);
void ChangeState(State new_state);
@@ -78,6 +81,15 @@ class MEDIA_EXPORT MP4StreamParser : public StreamParser {
void Reset();
+ // Checks to see if we have enough data in |queue_| to transition to
+ // kEmittingSamples and start enqueuing samples.
+ bool HaveEnoughDataToEnqueueSamples();
+
+ // Sets |highest_end_offset_| based on the data in |moov_|
+ // and |moof|. Returns true if |highest_end_offset_| was successfully
+ // computed.
+ bool ComputeHighestEndOffset(const MovieFragment& moof);
+
State state_;
InitCB init_cb_;
NewConfigCB config_cb_;
@@ -99,6 +111,11 @@ class MEDIA_EXPORT MP4StreamParser : public StreamParser {
// Valid iff it is greater than the head of the queue.
int64 mdat_tail_;
+ // The highest end offset in the current moof. This offset is
+ // relative to |moof_head_|. This value is used to make sure we have collected
+ // enough bytes to parse all samples and aux_info in the current moof.
+ int64 highest_end_offset_;
+
scoped_ptr<mp4::Movie> moov_;
scoped_ptr<mp4::TrackRunIterator> runs_;
@@ -118,4 +135,4 @@ class MEDIA_EXPORT MP4StreamParser : public StreamParser {
} // namespace mp4
} // namespace media
-#endif // MEDIA_MP4_MP4_STREAM_PARSER_H_
+#endif // MEDIA_FORMATS_MP4_MP4_STREAM_PARSER_H_
diff --git a/chromium/media/mp4/mp4_stream_parser_unittest.cc b/chromium/media/formats/mp4/mp4_stream_parser_unittest.cc
index dd394c4f17e..8805c05c3a7 100644
--- a/chromium/media/mp4/mp4_stream_parser_unittest.cc
+++ b/chromium/media/formats/mp4/mp4_stream_parser_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -16,8 +16,8 @@
#include "media/base/test_data_util.h"
#include "media/base/text_track_config.h"
#include "media/base/video_decoder_config.h"
-#include "media/mp4/es_descriptor.h"
-#include "media/mp4/mp4_stream_parser.h"
+#include "media/formats/mp4/es_descriptor.h"
+#include "media/formats/mp4/mp4_stream_parser.h"
#include "testing/gtest/include/gtest/gtest.h"
using base::TimeDelta;
@@ -31,7 +31,8 @@ static const char kMp4InitDataType[] = "video/mp4";
class MP4StreamParserTest : public testing::Test {
public:
MP4StreamParserTest()
- : configs_received_(false) {
+ : configs_received_(false),
+ lower_bound_(base::TimeDelta::Max()) {
std::set<int> audio_object_types;
audio_object_types.insert(kISO_14496_3);
parser_.reset(new MP4StreamParser(audio_object_types, false));
@@ -40,6 +41,7 @@ class MP4StreamParserTest : public testing::Test {
protected:
scoped_ptr<MP4StreamParser> parser_;
bool configs_received_;
+ base::TimeDelta lower_bound_;
bool AppendData(const uint8* data, size_t length) {
return parser_->Parse(data, length);
@@ -58,9 +60,10 @@ class MP4StreamParserTest : public testing::Test {
return true;
}
- void InitF(bool init_ok, base::TimeDelta duration) {
+ void InitF(bool init_ok, const StreamParser::InitParameters& params) {
DVLOG(1) << "InitF: ok=" << init_ok
- << ", dur=" << duration.InMilliseconds();
+ << ", dur=" << params.duration.InMilliseconds()
+ << ", autoTimestampOffset=" << params.auto_update_timestamp_offset;
}
bool NewConfigF(const AudioDecoderConfig& ac,
@@ -72,7 +75,6 @@ class MP4StreamParserTest : public testing::Test {
return true;
}
-
void DumpBuffers(const std::string& label,
const StreamParser::BufferQueue& buffers) {
DVLOG(2) << "DumpBuffers: " << label << " size " << buffers.size();
@@ -85,9 +87,34 @@ class MP4StreamParserTest : public testing::Test {
}
bool NewBuffersF(const StreamParser::BufferQueue& audio_buffers,
- const StreamParser::BufferQueue& video_buffers) {
+ const StreamParser::BufferQueue& video_buffers,
+ const StreamParser::TextBufferQueueMap& text_map) {
DumpBuffers("audio_buffers", audio_buffers);
DumpBuffers("video_buffers", video_buffers);
+
+ // TODO(wolenetz/acolwell): Add text track support to more MSE parsers. See
+ // http://crbug.com/336926.
+ if (!text_map.empty())
+ return false;
+
+ // Find the second highest timestamp so that we know what the
+ // timestamps on the next set of buffers must be >= than.
+ base::TimeDelta audio = !audio_buffers.empty() ?
+ audio_buffers.back()->GetDecodeTimestamp() : kNoTimestamp();
+ base::TimeDelta video = !video_buffers.empty() ?
+ video_buffers.back()->GetDecodeTimestamp() : kNoTimestamp();
+ base::TimeDelta second_highest_timestamp =
+ (audio == kNoTimestamp() ||
+ (video != kNoTimestamp() && audio > video)) ? video : audio;
+
+ DCHECK(second_highest_timestamp != kNoTimestamp());
+
+ if (lower_bound_ != kNoTimestamp() &&
+ second_highest_timestamp < lower_bound_) {
+ return false;
+ }
+
+ lower_bound_ = second_highest_timestamp;
return true;
}
@@ -100,10 +127,12 @@ class MP4StreamParserTest : public testing::Test {
void NewSegmentF() {
DVLOG(1) << "NewSegmentF";
+ lower_bound_ = kNoTimestamp();
}
void EndOfSegmentF() {
DVLOG(1) << "EndOfSegmentF()";
+ lower_bound_ = base::TimeDelta::Max();
}
void InitializeParser() {
@@ -111,7 +140,7 @@ class MP4StreamParserTest : public testing::Test {
base::Bind(&MP4StreamParserTest::InitF, base::Unretained(this)),
base::Bind(&MP4StreamParserTest::NewConfigF, base::Unretained(this)),
base::Bind(&MP4StreamParserTest::NewBuffersF, base::Unretained(this)),
- StreamParser::NewTextBuffersCB(),
+ true,
base::Bind(&MP4StreamParserTest::KeyNeededF, base::Unretained(this)),
base::Bind(&MP4StreamParserTest::NewSegmentF, base::Unretained(this)),
base::Bind(&MP4StreamParserTest::EndOfSegmentF,
@@ -197,6 +226,23 @@ TEST_F(MP4StreamParserTest, NoMoovAfterFlush) {
512));
}
+// Test an invalid file where there are encrypted samples, but
+// SampleAuxiliaryInformation{Sizes|Offsets}Box (saiz|saio) are missing.
+// The parser should fail instead of crash. See http://crbug.com/361347
+TEST_F(MP4StreamParserTest, MissingSampleAuxInfo) {
+ InitializeParser();
+
+ scoped_refptr<DecoderBuffer> buffer =
+ ReadTestDataFile("bear-1280x720-a_frag-cenc_missing-saiz-saio.mp4");
+ EXPECT_FALSE(AppendDataInPieces(buffer->data(), buffer->data_size(), 512));
+}
+
+// Test a file where all video samples start with an Access Unit
+// Delimiter (AUD) NALU.
+TEST_F(MP4StreamParserTest, VideoSamplesStartWithAUDs) {
+ ParseMP4File("bear-1280x720-av_with-aud-nalus_frag.mp4", 512);
+}
+
// TODO(strobe): Create and test media which uses CENC auxiliary info stored
// inside a private box
diff --git a/chromium/media/mp4/rcheck.h b/chromium/media/formats/mp4/rcheck.h
index 81650560678..fb0f8f27d4e 100644
--- a/chromium/media/mp4/rcheck.h
+++ b/chromium/media/formats/mp4/rcheck.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP4_RCHECK_H_
-#define MEDIA_MP4_RCHECK_H_
+#ifndef MEDIA_FORMATS_MP4_RCHECK_H_
+#define MEDIA_FORMATS_MP4_RCHECK_H_
#include "base/logging.h"
@@ -15,4 +15,4 @@
} \
} while (0)
-#endif // MEDIA_MP4_RCHECK_H_
+#endif // MEDIA_FORMATS_MP4_RCHECK_H_
diff --git a/chromium/media/formats/mp4/sample_to_group_iterator.cc b/chromium/media/formats/mp4/sample_to_group_iterator.cc
new file mode 100644
index 00000000000..01c707292d7
--- /dev/null
+++ b/chromium/media/formats/mp4/sample_to_group_iterator.cc
@@ -0,0 +1,47 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/mp4/sample_to_group_iterator.h"
+
+#include "base/logging.h"
+
+namespace media {
+namespace mp4 {
+
+SampleToGroupIterator::SampleToGroupIterator(
+ const SampleToGroup& sample_to_group)
+ : remaining_samples_(0),
+ sample_to_group_table_(sample_to_group.entries),
+ iterator_(sample_to_group_table_.begin()) {
+ // Handle the case that the table contains an entry with sample count 0.
+ while (iterator_ != sample_to_group_table_.end()) {
+ remaining_samples_ = iterator_->sample_count;
+ if (remaining_samples_ > 0)
+ break;
+ ++iterator_;
+ }
+}
+
+SampleToGroupIterator::~SampleToGroupIterator() {}
+
+bool SampleToGroupIterator::Advance() {
+ DCHECK(IsValid());
+
+ --remaining_samples_;
+ // Handle the case that the table contains an entry with sample count 0.
+ while (remaining_samples_ == 0) {
+ ++iterator_;
+ if (iterator_ == sample_to_group_table_.end())
+ return false;
+ remaining_samples_ = iterator_->sample_count;
+ }
+ return true;
+}
+
+bool SampleToGroupIterator::IsValid() const {
+ return remaining_samples_ > 0;
+}
+
+} // namespace mp4
+} // namespace media
diff --git a/chromium/media/formats/mp4/sample_to_group_iterator.h b/chromium/media/formats/mp4/sample_to_group_iterator.h
new file mode 100644
index 00000000000..c2ea60f827e
--- /dev/null
+++ b/chromium/media/formats/mp4/sample_to_group_iterator.h
@@ -0,0 +1,49 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FORMATS_MP4_SAMPLE_TO_GROUP_ITERATOR_H_
+#define MEDIA_FORMATS_MP4_SAMPLE_TO_GROUP_ITERATOR_H_
+
+#include <vector>
+
+#include "media/formats/mp4/box_definitions.h"
+
+namespace media {
+namespace mp4 {
+
+// Sample To Group Box ('sbgp') can be used to find the group that a sample
+// belongs to and the associated description of that sample group. It is
+// compactly coded though. This class implements the iterator to iterate
+// through the compressed table to get the associated sample group description
+// index.
+class MEDIA_EXPORT SampleToGroupIterator {
+ public:
+ explicit SampleToGroupIterator(const SampleToGroup& sample_to_group);
+ ~SampleToGroupIterator();
+
+ // Advances the iterator to refer to the next sample. Return status
+ // indicating whether the sample is still valid.
+ bool Advance();
+
+ // Returns whether the current sample is valid.
+ bool IsValid() const;
+
+ // Returns group description index for current sample.
+ uint32 group_description_index() const {
+ return iterator_->group_description_index;
+ }
+
+ private:
+ // Track how many samples remaining for current table entry.
+ uint32 remaining_samples_;
+ const std::vector<SampleToGroupEntry>& sample_to_group_table_;
+ std::vector<SampleToGroupEntry>::const_iterator iterator_;
+
+ DISALLOW_COPY_AND_ASSIGN(SampleToGroupIterator);
+};
+
+} // namespace mp4
+} // namespace media
+
+#endif // MEDIA_FORMATS_MP4_SAMPLE_TO_GROUP_ITERATOR_H_
diff --git a/chromium/media/formats/mp4/sample_to_group_iterator_unittest.cc b/chromium/media/formats/mp4/sample_to_group_iterator_unittest.cc
new file mode 100644
index 00000000000..3e8148c127e
--- /dev/null
+++ b/chromium/media/formats/mp4/sample_to_group_iterator_unittest.cc
@@ -0,0 +1,65 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/mp4/sample_to_group_iterator.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace mp4 {
+
+namespace {
+const SampleToGroupEntry kCompactSampleToGroupTable[] =
+ {{10, 8}, {9, 5}, {25, 7}, {48, 63}, {8, 2}};
+} // namespace
+
+class SampleToGroupIteratorTest : public testing::Test {
+ public:
+ SampleToGroupIteratorTest() {
+ // Build sample group description index table from kSampleToGroupTable.
+ for (size_t i = 0; i < arraysize(kCompactSampleToGroupTable); ++i) {
+ for (uint32 j = 0; j < kCompactSampleToGroupTable[i].sample_count; ++j) {
+ sample_to_group_table_.push_back(
+ kCompactSampleToGroupTable[i].group_description_index);
+ }
+ }
+
+ sample_to_group_.entries.assign(
+ kCompactSampleToGroupTable,
+ kCompactSampleToGroupTable + arraysize(kCompactSampleToGroupTable));
+ sample_to_group_iterator_.reset(
+ new SampleToGroupIterator(sample_to_group_));
+ }
+
+ protected:
+ std::vector<uint32> sample_to_group_table_;
+ SampleToGroup sample_to_group_;
+ scoped_ptr<SampleToGroupIterator> sample_to_group_iterator_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SampleToGroupIteratorTest);
+};
+
+TEST_F(SampleToGroupIteratorTest, EmptyTable) {
+ SampleToGroup sample_to_group;
+ SampleToGroupIterator iterator(sample_to_group);
+ EXPECT_FALSE(iterator.IsValid());
+}
+
+TEST_F(SampleToGroupIteratorTest, Advance) {
+ ASSERT_EQ(sample_to_group_table_[0],
+ sample_to_group_iterator_->group_description_index());
+ for (uint32 sample = 1; sample < sample_to_group_table_.size(); ++sample) {
+ ASSERT_TRUE(sample_to_group_iterator_->Advance());
+ ASSERT_EQ(sample_to_group_table_[sample],
+ sample_to_group_iterator_->group_description_index());
+ ASSERT_TRUE(sample_to_group_iterator_->IsValid());
+ }
+ ASSERT_FALSE(sample_to_group_iterator_->Advance());
+ ASSERT_FALSE(sample_to_group_iterator_->IsValid());
+}
+
+} // namespace mp4
+} // namespace media
diff --git a/chromium/media/mp4/track_run_iterator.cc b/chromium/media/formats/mp4/track_run_iterator.cc
index 4dbd14f9f3f..fefc768452d 100644
--- a/chromium/media/mp4/track_run_iterator.cc
+++ b/chromium/media/formats/mp4/track_run_iterator.cc
@@ -1,18 +1,15 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/mp4/track_run_iterator.h"
+#include "media/formats/mp4/track_run_iterator.h"
#include <algorithm>
#include "media/base/buffers.h"
#include "media/base/stream_parser_buffer.h"
-#include "media/mp4/rcheck.h"
-
-namespace {
-static const uint32 kSampleIsDifferenceSampleFlagMask = 0x10000;
-}
+#include "media/formats/mp4/rcheck.h"
+#include "media/formats/mp4/sample_to_group_iterator.h"
namespace media {
namespace mp4 {
@@ -22,6 +19,8 @@ struct SampleInfo {
int duration;
int cts_offset;
bool is_keyframe;
+ bool is_random_access_point;
+ uint32 cenc_group_description_index;
};
struct TrackRunInfo {
@@ -40,6 +39,8 @@ struct TrackRunInfo {
std::vector<uint8> aux_info_sizes; // Populated if default_size == 0.
int aux_info_total_size;
+ std::vector<CencSampleEncryptionInfoEntry> sample_encryption_info;
+
TrackRunInfo();
~TrackRunInfo();
};
@@ -57,10 +58,22 @@ TrackRunInfo::TrackRunInfo()
TrackRunInfo::~TrackRunInfo() {}
TimeDelta TimeDeltaFromRational(int64 numer, int64 denom) {
- DCHECK_LT((numer > 0 ? numer : -numer),
- kint64max / base::Time::kMicrosecondsPerSecond);
- return TimeDelta::FromMicroseconds(
- base::Time::kMicrosecondsPerSecond * numer / denom);
+ // To avoid overflow, split the following calculation:
+ // (numer * base::Time::kMicrosecondsPerSecond) / denom
+ // into:
+ // (numer / denom) * base::Time::kMicrosecondsPerSecond +
+ // ((numer % denom) * base::Time::kMicrosecondsPerSecond) / denom
+ int64 a = numer / denom;
+ DCHECK_LE((a > 0 ? a : -a), kint64max / base::Time::kMicrosecondsPerSecond);
+ int64 timea_in_us = a * base::Time::kMicrosecondsPerSecond;
+
+ int64 b = numer % denom;
+ DCHECK_LE((b > 0 ? b : -b), kint64max / base::Time::kMicrosecondsPerSecond);
+ int64 timeb_in_us = (b * base::Time::kMicrosecondsPerSecond) / denom;
+
+ DCHECK((timeb_in_us < 0) || (timea_in_us <= kint64max - timeb_in_us));
+ DCHECK((timeb_in_us > 0) || (timea_in_us >= kint64min - timeb_in_us));
+ return TimeDelta::FromMicroseconds(timea_in_us + timeb_in_us);
}
TrackRunIterator::TrackRunIterator(const Movie* moov,
@@ -77,7 +90,8 @@ static void PopulateSampleInfo(const TrackExtends& trex,
const int64 edit_list_offset,
const uint32 i,
SampleInfo* sample_info,
- const SampleDependsOn sample_depends_on) {
+ const SampleDependsOn sdtp_sample_depends_on,
+ bool is_sync_sample) {
if (i < trun.sample_sizes.size()) {
sample_info->size = trun.sample_sizes[i];
} else if (tfhd.default_sample_size > 0) {
@@ -110,9 +124,24 @@ static void PopulateSampleInfo(const TrackExtends& trex,
flags = trex.default_sample_flags;
}
+ SampleDependsOn sample_depends_on =
+ static_cast<SampleDependsOn>((flags >> 24) & 0x3);
+
+ if (sample_depends_on == kSampleDependsOnUnknown)
+ sample_depends_on = sdtp_sample_depends_on;
+
+ // ISO/IEC 14496-12 Section 8.8.3.1 : The negation of |sample_is_sync_sample|
+ // provides the same information as the sync sample table [8.6.2]. When
+ // |sample_is_sync_sample| is true for a sample, it is the same as if the
+ // sample were not in a movie fragment and marked with an entry in the sync
+ // sample table (or, if all samples are sync samples, the sync sample table
+ // were absent).
+ bool sample_is_sync_sample = !(flags & kSampleIsNonSyncSample);
+ sample_info->is_random_access_point = sample_is_sync_sample;
+
switch (sample_depends_on) {
case kSampleDependsOnUnknown:
- sample_info->is_keyframe = !(flags & kSampleIsDifferenceSampleFlagMask);
+ sample_info->is_keyframe = sample_is_sync_sample;
break;
case kSampleDependsOnOthers:
@@ -203,9 +232,13 @@ bool TrackRunIterator::Init(const MovieFragment& moof) {
}
}
+ SampleToGroupIterator sample_to_group_itr(traf.sample_to_group);
+ bool is_sample_to_group_valid = sample_to_group_itr.IsValid();
+
int64 run_start_dts = traf.decode_time.decode_time;
int sample_count_sum = 0;
-
+ const SyncSample& sync_sample =
+ trak->media.information.sample_table.sync_sample;
for (size_t j = 0; j < traf.runs.size(); j++) {
const TrackFragmentRun& trun = traf.runs[j];
TrackRunInfo tri;
@@ -213,6 +246,7 @@ bool TrackRunIterator::Init(const MovieFragment& moof) {
tri.timescale = trak->media.header.timescale;
tri.start_dts = run_start_dts;
tri.sample_start_offset = trun.data_offset;
+ tri.sample_encryption_info = traf.sample_group_description.entries;
tri.is_audio = (stsd.type == kAudio);
if (tri.is_audio) {
@@ -266,12 +300,44 @@ bool TrackRunIterator::Init(const MovieFragment& moof) {
tri.samples.resize(trun.sample_count);
for (size_t k = 0; k < trun.sample_count; k++) {
PopulateSampleInfo(*trex, traf.header, trun, edit_list_offset,
- k, &tri.samples[k], traf.sdtp.sample_depends_on(k));
+ k, &tri.samples[k], traf.sdtp.sample_depends_on(k),
+ sync_sample.IsSyncSample(k));
run_start_dts += tri.samples[k].duration;
+
+ if (!is_sample_to_group_valid) {
+ // Set group description index to 0 to read encryption information
+ // from TrackEncryption Box.
+ tri.samples[k].cenc_group_description_index = 0;
+ continue;
+ }
+
+ // ISO-14496-12 Section 8.9.2.3 and 8.9.4 : group description index
+ // (1) ranges from 1 to the number of sample group entries in the track
+ // level SampleGroupDescription Box, or (2) takes the value 0 to
+ // indicate that this sample is a member of no group, in this case, the
+ // sample is associated with the default values specified in
+ // TrackEncryption Box, or (3) starts at 0x10001, i.e. the index value
+ // 1, with the value 1 in the top 16 bits, to reference fragment-local
+ // SampleGroupDescription Box.
+ // Case (1) is not supported currently. We might not need it either as
+ // the same functionality can be better achieved using (2).
+ uint32 index = sample_to_group_itr.group_description_index();
+ if (index >= SampleToGroupEntry::kFragmentGroupDescriptionIndexBase) {
+ index -= SampleToGroupEntry::kFragmentGroupDescriptionIndexBase;
+ RCHECK(index != 0 && index <= tri.sample_encryption_info.size());
+ } else if (index != 0) {
+ NOTIMPLEMENTED() << "'sgpd' box in 'moov' is not supported.";
+ return false;
+ }
+ tri.samples[k].cenc_group_description_index = index;
+ is_sample_to_group_valid = sample_to_group_itr.Advance();
}
runs_.push_back(tri);
sample_count_sum += trun.sample_count;
}
+
+ // We should have iterated through all samples in SampleToGroup Box.
+ RCHECK(!sample_to_group_itr.IsValid());
}
std::sort(runs_.begin(), runs_.end(), CompareMinTrackRunDataOffset());
@@ -304,7 +370,7 @@ void TrackRunIterator::AdvanceSample() {
// info is available in the stream.
bool TrackRunIterator::AuxInfoNeedsToBeCached() {
DCHECK(IsRunValid());
- return is_encrypted() && aux_info_size() > 0 && cenc_info_.size() == 0;
+ return aux_info_size() > 0 && cenc_info_.size() == 0;
}
// This implementation currently only caches CENC auxiliary info.
@@ -318,8 +384,10 @@ bool TrackRunIterator::CacheAuxInfo(const uint8* buf, int buf_size) {
if (!info_size)
info_size = run_itr_->aux_info_sizes[i];
- BufferReader reader(buf + pos, info_size);
- RCHECK(cenc_info_[i].Parse(track_encryption().default_iv_size, &reader));
+ if (IsSampleEncrypted(i)) {
+ BufferReader reader(buf + pos, info_size);
+ RCHECK(cenc_info_[i].Parse(GetIvSize(i), &reader));
+ }
pos += info_size;
}
@@ -366,8 +434,8 @@ uint32 TrackRunIterator::track_id() const {
}
bool TrackRunIterator::is_encrypted() const {
- DCHECK(IsRunValid());
- return track_encryption().is_encrypted;
+ DCHECK(IsSampleValid());
+ return IsSampleEncrypted(sample_itr_ - run_itr_->samples.begin());
}
int64 TrackRunIterator::aux_info_offset() const {
@@ -426,6 +494,11 @@ bool TrackRunIterator::is_keyframe() const {
return sample_itr_->is_keyframe;
}
+bool TrackRunIterator::is_random_access_point() const {
+ DCHECK(IsSampleValid());
+ return sample_itr_->is_random_access_point;
+}
+
const TrackEncryption& TrackRunIterator::track_encryption() const {
if (is_audio())
return audio_description().sinf.info.track_encryption;
@@ -433,10 +506,17 @@ const TrackEncryption& TrackRunIterator::track_encryption() const {
}
scoped_ptr<DecryptConfig> TrackRunIterator::GetDecryptConfig() {
+ DCHECK(is_encrypted());
+
+ if (cenc_info_.empty()) {
+ DCHECK_EQ(0, aux_info_size());
+ MEDIA_LOG(log_cb_) << "Aux Info is not available.";
+ return scoped_ptr<DecryptConfig>();
+ }
+
size_t sample_idx = sample_itr_ - run_itr_->samples.begin();
- DCHECK(sample_idx < cenc_info_.size());
+ DCHECK_LT(sample_idx, cenc_info_.size());
const FrameCENCInfo& cenc_info = cenc_info_[sample_idx];
- DCHECK(is_encrypted() && !AuxInfoNeedsToBeCached());
size_t total_size = 0;
if (!cenc_info.subsamples.empty() &&
@@ -446,14 +526,48 @@ scoped_ptr<DecryptConfig> TrackRunIterator::GetDecryptConfig() {
return scoped_ptr<DecryptConfig>();
}
- const std::vector<uint8>& kid = track_encryption().default_kid;
+ const std::vector<uint8>& kid = GetKeyId(sample_idx);
return scoped_ptr<DecryptConfig>(new DecryptConfig(
std::string(reinterpret_cast<const char*>(&kid[0]), kid.size()),
std::string(reinterpret_cast<const char*>(cenc_info.iv),
arraysize(cenc_info.iv)),
- 0, // No offset to start of media data in MP4 using CENC.
cenc_info.subsamples));
}
+uint32 TrackRunIterator::GetGroupDescriptionIndex(uint32 sample_index) const {
+ DCHECK(IsRunValid());
+ DCHECK_LT(sample_index, run_itr_->samples.size());
+ return run_itr_->samples[sample_index].cenc_group_description_index;
+}
+
+const CencSampleEncryptionInfoEntry&
+TrackRunIterator::GetSampleEncryptionInfoEntry(
+ uint32 group_description_index) const {
+ DCHECK(IsRunValid());
+ DCHECK_NE(group_description_index, 0u);
+ DCHECK_LE(group_description_index, run_itr_->sample_encryption_info.size());
+ // |group_description_index| is 1-based. Subtract by 1 to index the vector.
+ return run_itr_->sample_encryption_info[group_description_index - 1];
+}
+
+bool TrackRunIterator::IsSampleEncrypted(size_t sample_index) const {
+ uint32 index = GetGroupDescriptionIndex(sample_index);
+ return (index == 0) ? track_encryption().is_encrypted
+ : GetSampleEncryptionInfoEntry(index).is_encrypted;
+}
+
+const std::vector<uint8>& TrackRunIterator::GetKeyId(
+ size_t sample_index) const {
+ uint32 index = GetGroupDescriptionIndex(sample_index);
+ return (index == 0) ? track_encryption().default_kid
+ : GetSampleEncryptionInfoEntry(index).key_id;
+}
+
+uint8 TrackRunIterator::GetIvSize(size_t sample_index) const {
+ uint32 index = GetGroupDescriptionIndex(sample_index);
+ return (index == 0) ? track_encryption().default_iv_size
+ : GetSampleEncryptionInfoEntry(index).iv_size;
+}
+
} // namespace mp4
} // namespace media
diff --git a/chromium/media/mp4/track_run_iterator.h b/chromium/media/formats/mp4/track_run_iterator.h
index a21c5ba0c2d..fb53927c9ce 100644
--- a/chromium/media/mp4/track_run_iterator.h
+++ b/chromium/media/formats/mp4/track_run_iterator.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP4_TRACK_RUN_ITERATOR_H_
-#define MEDIA_MP4_TRACK_RUN_ITERATOR_H_
+#ifndef MEDIA_FORMATS_MP4_TRACK_RUN_ITERATOR_H_
+#define MEDIA_FORMATS_MP4_TRACK_RUN_ITERATOR_H_
#include <vector>
@@ -11,8 +11,8 @@
#include "base/time/time.h"
#include "media/base/media_export.h"
#include "media/base/media_log.h"
-#include "media/mp4/box_definitions.h"
-#include "media/mp4/cenc.h"
+#include "media/formats/mp4/box_definitions.h"
+#include "media/formats/mp4/cenc.h"
namespace media {
@@ -78,6 +78,7 @@ class MEDIA_EXPORT TrackRunIterator {
TimeDelta cts() const;
TimeDelta duration() const;
bool is_keyframe() const;
+ bool is_random_access_point() const;
// Only call when is_encrypted() is true and AuxInfoNeedsToBeCached() is
// false. Result is owned by caller.
@@ -87,6 +88,15 @@ class MEDIA_EXPORT TrackRunIterator {
void ResetRun();
const TrackEncryption& track_encryption() const;
+ uint32 GetGroupDescriptionIndex(uint32 sample_index) const;
+ const CencSampleEncryptionInfoEntry& GetSampleEncryptionInfoEntry(
+ uint32 group_description_index) const;
+
+ // Sample encryption information.
+ bool IsSampleEncrypted(size_t sample_index) const;
+ uint8 GetIvSize(size_t sample_index) const;
+ const std::vector<uint8>& GetKeyId(size_t sample_index) const;
+
const Movie* moov_;
LogCB log_cb_;
@@ -105,4 +115,4 @@ class MEDIA_EXPORT TrackRunIterator {
} // namespace mp4
} // namespace media
-#endif // MEDIA_MP4_TRACK_RUN_ITERATOR_H_
+#endif // MEDIA_FORMATS_MP4_TRACK_RUN_ITERATOR_H_
diff --git a/chromium/media/mp4/track_run_iterator_unittest.cc b/chromium/media/formats/mp4/track_run_iterator_unittest.cc
index 499a2e1a697..baaa010de72 100644
--- a/chromium/media/mp4/track_run_iterator_unittest.cc
+++ b/chromium/media/formats/mp4/track_run_iterator_unittest.cc
@@ -1,13 +1,14 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/basictypes.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
-#include "media/mp4/box_definitions.h"
-#include "media/mp4/rcheck.h"
-#include "media/mp4/track_run_iterator.h"
+#include "base/strings/string_split.h"
+#include "media/formats/mp4/box_definitions.h"
+#include "media/formats/mp4/rcheck.h"
+#include "media/formats/mp4/track_run_iterator.h"
#include "testing/gtest/include/gtest/gtest.h"
// The sum of the elements in a vector initialized with SumAscending,
@@ -17,8 +18,6 @@ static const int kSumAscending1 = 45;
static const int kAudioScale = 48000;
static const int kVideoScale = 25;
-static const uint32 kSampleIsDifferenceSampleFlagMask = 0x10000;
-
static const uint8 kAuxInfo[] = {
0x41, 0x54, 0x65, 0x73, 0x74, 0x49, 0x76, 0x31,
0x41, 0x54, 0x65, 0x73, 0x74, 0x49, 0x76, 0x32,
@@ -37,6 +36,11 @@ static const uint8 kKeyId[] = {
0x65, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x49, 0x44
};
+static const uint8 kCencSampleGroupKeyId[] = {
+ 0x46, 0x72, 0x61, 0x67, 0x53, 0x61, 0x6d, 0x70,
+ 0x6c, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b
+};
+
namespace media {
namespace mp4 {
@@ -66,7 +70,8 @@ class TrackRunIteratorTest : public testing::Test {
desc1.audio_entries.push_back(aud_desc);
moov_.extends.tracks[0].track_id = 1;
moov_.extends.tracks[0].default_sample_description_index = 1;
-
+ moov_.tracks[0].media.information.sample_table.sync_sample.is_present =
+ false;
moov_.tracks[1].header.track_id = 2;
moov_.tracks[1].media.header.timescale = kVideoScale;
SampleDescription& desc2 =
@@ -78,17 +83,102 @@ class TrackRunIteratorTest : public testing::Test {
desc2.video_entries.push_back(vid_desc);
moov_.extends.tracks[1].track_id = 2;
moov_.extends.tracks[1].default_sample_description_index = 1;
+ SyncSample& video_sync_sample =
+ moov_.tracks[1].media.information.sample_table.sync_sample;
+ video_sync_sample.is_present = true;
+ video_sync_sample.entries.resize(1);
+ video_sync_sample.entries[0] = 0;
moov_.tracks[2].header.track_id = 3;
moov_.tracks[2].media.information.sample_table.description.type = kHint;
}
+ uint32 ToSampleFlags(const std::string& str) {
+ CHECK_EQ(str.length(), 2u);
+
+ SampleDependsOn sample_depends_on = kSampleDependsOnReserved;
+ bool is_non_sync_sample = false;
+ switch(str[0]) {
+ case 'U':
+ sample_depends_on = kSampleDependsOnUnknown;
+ break;
+ case 'O':
+ sample_depends_on = kSampleDependsOnOthers;
+ break;
+ case 'N':
+ sample_depends_on = kSampleDependsOnNoOther;
+ break;
+ default:
+ CHECK(false) << "Invalid sample dependency character '"
+ << str[0] << "'";
+ break;
+ }
+
+ switch(str[1]) {
+ case 'S':
+ is_non_sync_sample = false;
+ break;
+ case 'N':
+ is_non_sync_sample = true;
+ break;
+ default:
+ CHECK(false) << "Invalid sync sample character '"
+ << str[1] << "'";
+ break;
+ }
+ uint32 flags = static_cast<uint32>(sample_depends_on) << 24;
+ if (is_non_sync_sample)
+ flags |= kSampleIsNonSyncSample;
+ return flags;
+ }
+
+ void SetFlagsOnSamples(const std::string& sample_info,
+ TrackFragmentRun* trun) {
+ // US - SampleDependsOnUnknown & IsSyncSample
+ // UN - SampleDependsOnUnknown & IsNonSyncSample
+ // OS - SampleDependsOnOthers & IsSyncSample
+ // ON - SampleDependsOnOthers & IsNonSyncSample
+ // NS - SampleDependsOnNoOthers & IsSyncSample
+ // NN - SampleDependsOnNoOthers & IsNonSyncSample
+ std::vector<std::string> flags_data;
+ base::SplitString(sample_info, ' ', &flags_data);
+
+ if (flags_data.size() == 1u) {
+ // Simulates the first_sample_flags_present set scenario,
+ // where only one sample_flag value is set and the default
+ // flags are used for everything else.
+ ASSERT_GE(trun->sample_count, flags_data.size());
+ } else {
+ ASSERT_EQ(trun->sample_count, flags_data.size());
+ }
+
+ trun->sample_flags.resize(flags_data.size());
+ for (size_t i = 0; i < flags_data.size(); i++)
+ trun->sample_flags[i] = ToSampleFlags(flags_data[i]);
+ }
+
+ std::string KeyframeAndRAPInfo(TrackRunIterator* iter) {
+ CHECK(iter->IsRunValid());
+ std::stringstream ss;
+ ss << iter->track_id();
+
+ while (iter->IsSampleValid()) {
+ ss << " " << (iter->is_keyframe() ? "K" : "P");
+ if (iter->is_random_access_point())
+ ss << "R";
+ iter->AdvanceSample();
+ }
+
+ return ss.str();
+ }
+
MovieFragment CreateFragment() {
MovieFragment moof;
moof.tracks.resize(2);
moof.tracks[0].decode_time.decode_time = 0;
moof.tracks[0].header.track_id = 1;
moof.tracks[0].header.has_default_sample_flags = true;
+ moof.tracks[0].header.default_sample_flags = ToSampleFlags("US");
moof.tracks[0].header.default_sample_duration = 1024;
moof.tracks[0].header.default_sample_size = 4;
moof.tracks[0].runs.resize(2);
@@ -107,11 +197,7 @@ class TrackRunIteratorTest : public testing::Test {
moof.tracks[1].runs[0].data_offset = 200;
SetAscending(&moof.tracks[1].runs[0].sample_sizes);
SetAscending(&moof.tracks[1].runs[0].sample_durations);
- moof.tracks[1].runs[0].sample_flags.resize(10);
- for (size_t i = 1; i < moof.tracks[1].runs[0].sample_flags.size(); i++) {
- moof.tracks[1].runs[0].sample_flags[i] =
- kSampleIsDifferenceSampleFlagMask;
- }
+ SetFlagsOnSamples("US UN UN UN UN UN UN UN UN UN", &moof.tracks[1].runs[0]);
return moof;
}
@@ -130,9 +216,28 @@ class TrackRunIteratorTest : public testing::Test {
sinf->type.type = FOURCC_CENC;
sinf->info.track_encryption.is_encrypted = true;
sinf->info.track_encryption.default_iv_size = 8;
- sinf->info.track_encryption.default_kid.insert(
- sinf->info.track_encryption.default_kid.begin(),
- kKeyId, kKeyId + arraysize(kKeyId));
+ sinf->info.track_encryption.default_kid.assign(kKeyId,
+ kKeyId + arraysize(kKeyId));
+ }
+
+ // Add SampleGroupDescription Box with two entries (an unencrypted entry and
+ // an encrypted entry). Populate SampleToGroup Box from input array.
+ void AddCencSampleGroup(TrackFragment* frag,
+ const SampleToGroupEntry* sample_to_group_entries,
+ size_t num_entries) {
+ frag->sample_group_description.grouping_type = FOURCC_SEIG;
+ frag->sample_group_description.entries.resize(2);
+ frag->sample_group_description.entries[0].is_encrypted = false;
+ frag->sample_group_description.entries[0].iv_size = 0;
+ frag->sample_group_description.entries[1].is_encrypted = true;
+ frag->sample_group_description.entries[1].iv_size = 8;
+ frag->sample_group_description.entries[1].key_id.assign(
+ kCencSampleGroupKeyId,
+ kCencSampleGroupKeyId + arraysize(kCencSampleGroupKeyId));
+
+ frag->sample_to_group.grouping_type = FOURCC_SEIG;
+ frag->sample_to_group.entries.assign(sample_to_group_entries,
+ sample_to_group_entries + num_entries);
}
// Add aux info covering the first track run to a TrackFragment, and update
@@ -146,6 +251,20 @@ class TrackRunIteratorTest : public testing::Test {
frag->runs[0].sample_sizes[1] = 10;
}
+ bool InitMoofWithArbitraryAuxInfo(MovieFragment* moof) {
+ // Add aux info header (equal sized aux info for every sample).
+ for (uint32 i = 0; i < moof->tracks.size(); ++i) {
+ moof->tracks[i].auxiliary_offset.offsets.push_back(50);
+ moof->tracks[i].auxiliary_size.sample_count = 10;
+ moof->tracks[i].auxiliary_size.default_sample_info_size = 8;
+ }
+
+ // We don't care about the actual data in aux.
+ std::vector<uint8> aux_info(1000);
+ return iter_->Init(*moof) &&
+ iter_->CacheAuxInfo(&aux_info[0], aux_info.size());
+ }
+
void SetAscending(std::vector<uint32>* vec) {
vec->resize(10);
for (size_t i = 0; i < vec->size(); i++)
@@ -217,8 +336,7 @@ TEST_F(TrackRunIteratorTest, BasicOperationTest) {
TEST_F(TrackRunIteratorTest, TrackExtendsDefaultsTest) {
moov_.extends.tracks[0].default_sample_duration = 50;
moov_.extends.tracks[0].default_sample_size = 3;
- moov_.extends.tracks[0].default_sample_flags =
- kSampleIsDifferenceSampleFlagMask;
+ moov_.extends.tracks[0].default_sample_flags = ToSampleFlags("UN");
iter_.reset(new TrackRunIterator(&moov_, log_cb_));
MovieFragment moof = CreateFragment();
moof.tracks[0].header.has_default_sample_flags = false;
@@ -241,14 +359,14 @@ TEST_F(TrackRunIteratorTest, FirstSampleFlagTest) {
iter_.reset(new TrackRunIterator(&moov_, log_cb_));
MovieFragment moof = CreateFragment();
moof.tracks[1].header.has_default_sample_flags = true;
- moof.tracks[1].header.default_sample_flags =
- kSampleIsDifferenceSampleFlagMask;
- moof.tracks[1].runs[0].sample_flags.resize(1);
+ moof.tracks[1].header.default_sample_flags = ToSampleFlags("UN");
+ SetFlagsOnSamples("US", &moof.tracks[1].runs[0]);
+
ASSERT_TRUE(iter_->Init(moof));
+ EXPECT_EQ("1 KR KR KR KR KR KR KR KR KR KR", KeyframeAndRAPInfo(iter_.get()));
+
iter_->AdvanceRun();
- EXPECT_TRUE(iter_->is_keyframe());
- iter_->AdvanceSample();
- EXPECT_FALSE(iter_->is_keyframe());
+ EXPECT_EQ("2 KR P P P P P P P P P", KeyframeAndRAPInfo(iter_.get()));
}
TEST_F(TrackRunIteratorTest, ReorderingTest) {
@@ -353,6 +471,77 @@ TEST_F(TrackRunIteratorTest, DecryptConfigTest) {
EXPECT_EQ(config->subsamples()[1].cypher_bytes, 4u);
}
+TEST_F(TrackRunIteratorTest, CencSampleGroupTest) {
+ MovieFragment moof = CreateFragment();
+
+ const SampleToGroupEntry kSampleToGroupTable[] = {
+ // Associated with the second entry in SampleGroupDescription Box.
+ {1, SampleToGroupEntry::kFragmentGroupDescriptionIndexBase + 2},
+ // Associated with the first entry in SampleGroupDescription Box.
+ {1, SampleToGroupEntry::kFragmentGroupDescriptionIndexBase + 1}};
+ AddCencSampleGroup(
+ &moof.tracks[0], kSampleToGroupTable, arraysize(kSampleToGroupTable));
+
+ iter_.reset(new TrackRunIterator(&moov_, log_cb_));
+ ASSERT_TRUE(InitMoofWithArbitraryAuxInfo(&moof));
+
+ std::string cenc_sample_group_key_id(
+ kCencSampleGroupKeyId,
+ kCencSampleGroupKeyId + arraysize(kCencSampleGroupKeyId));
+ // The first sample is encrypted and the second sample is unencrypted.
+ EXPECT_TRUE(iter_->is_encrypted());
+ EXPECT_EQ(cenc_sample_group_key_id, iter_->GetDecryptConfig()->key_id());
+ iter_->AdvanceSample();
+ EXPECT_FALSE(iter_->is_encrypted());
+}
+
+TEST_F(TrackRunIteratorTest, CencSampleGroupWithTrackEncryptionBoxTest) {
+ // Add TrackEncryption Box.
+ AddEncryption(&moov_.tracks[0]);
+
+ MovieFragment moof = CreateFragment();
+
+ const SampleToGroupEntry kSampleToGroupTable[] = {
+ // Associated with the second entry in SampleGroupDescription Box.
+ {2, SampleToGroupEntry::kFragmentGroupDescriptionIndexBase + 2},
+ // Associated with the default values specified in TrackEncryption Box.
+ {4, 0},
+ // Associated with the first entry in SampleGroupDescription Box.
+ {3, SampleToGroupEntry::kFragmentGroupDescriptionIndexBase + 1}};
+ AddCencSampleGroup(
+ &moof.tracks[0], kSampleToGroupTable, arraysize(kSampleToGroupTable));
+
+ iter_.reset(new TrackRunIterator(&moov_, log_cb_));
+ ASSERT_TRUE(InitMoofWithArbitraryAuxInfo(&moof));
+
+ std::string track_encryption_key_id(kKeyId, kKeyId + arraysize(kKeyId));
+ std::string cenc_sample_group_key_id(
+ kCencSampleGroupKeyId,
+ kCencSampleGroupKeyId + arraysize(kCencSampleGroupKeyId));
+
+ for (size_t i = 0; i < kSampleToGroupTable[0].sample_count; ++i) {
+ EXPECT_TRUE(iter_->is_encrypted());
+ EXPECT_EQ(cenc_sample_group_key_id, iter_->GetDecryptConfig()->key_id());
+ iter_->AdvanceSample();
+ }
+
+ for (size_t i = 0; i < kSampleToGroupTable[1].sample_count; ++i) {
+ EXPECT_TRUE(iter_->is_encrypted());
+ EXPECT_EQ(track_encryption_key_id, iter_->GetDecryptConfig()->key_id());
+ iter_->AdvanceSample();
+ }
+
+ for (size_t i = 0; i < kSampleToGroupTable[2].sample_count; ++i) {
+ EXPECT_FALSE(iter_->is_encrypted());
+ iter_->AdvanceSample();
+ }
+
+ // The remaining samples should be associated with the default values
+ // specified in TrackEncryption Box.
+ EXPECT_TRUE(iter_->is_encrypted());
+ EXPECT_EQ(track_encryption_key_id, iter_->GetDecryptConfig()->key_id());
+}
+
// It is legal for aux info blocks to be shared among multiple formats.
TEST_F(TrackRunIteratorTest, SharedAuxInfoTest) {
AddEncryption(&moov_.tracks[0]);
@@ -433,5 +622,45 @@ TEST_F(TrackRunIteratorTest, UnexpectedOrderingTest) {
EXPECT_EQ(iter_->GetMaxClearOffset(), 10000);
}
+TEST_F(TrackRunIteratorTest, MissingAndEmptyStss) {
+ MovieFragment moof = CreateFragment();
+
+ // Setup track 0 to not have an stss box, which means that all samples should
+ // be marked as random access points unless the kSampleIsNonSyncSample flag is
+ // set in the sample flags.
+ moov_.tracks[0].media.information.sample_table.sync_sample.is_present = false;
+ moov_.tracks[0].media.information.sample_table.sync_sample.entries.resize(0);
+ moof.tracks[0].runs.resize(1);
+ moof.tracks[0].runs[0].sample_count = 6;
+ moof.tracks[0].runs[0].data_offset = 100;
+ SetFlagsOnSamples("US UN OS ON NS NN", &moof.tracks[0].runs[0]);
+
+ // Setup track 1 to have an stss box with no entries, which normally means
+ // that none of the samples should be random access points. If the
+ // kSampleIsNonSyncSample flag is NOT set though, the sample should be
+ // considered a random access point.
+ moov_.tracks[1].media.information.sample_table.sync_sample.is_present = true;
+ moov_.tracks[1].media.information.sample_table.sync_sample.entries.resize(0);
+ moof.tracks[1].runs.resize(1);
+ moof.tracks[1].runs[0].sample_count = 6;
+ moof.tracks[1].runs[0].data_offset = 200;
+ SetFlagsOnSamples("US UN OS ON NS NN", &moof.tracks[1].runs[0]);
+
+ iter_.reset(new TrackRunIterator(&moov_, log_cb_));
+
+ ASSERT_TRUE(iter_->Init(moof));
+ EXPECT_TRUE(iter_->IsRunValid());
+
+ // Verify that all samples except for the ones that have the
+ // kSampleIsNonSyncSample flag set are marked as random access points.
+ EXPECT_EQ("1 KR P PR P KR K", KeyframeAndRAPInfo(iter_.get()));
+
+ iter_->AdvanceRun();
+
+ // Verify that nothing is marked as a random access point.
+ EXPECT_EQ("2 KR P PR P KR K", KeyframeAndRAPInfo(iter_.get()));
+}
+
+
} // namespace mp4
} // namespace media
diff --git a/chromium/media/formats/mpeg/adts_constants.cc b/chromium/media/formats/mpeg/adts_constants.cc
new file mode 100644
index 00000000000..bc898908fa3
--- /dev/null
+++ b/chromium/media/formats/mpeg/adts_constants.cc
@@ -0,0 +1,27 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/mpeg/adts_constants.h"
+
+#include "base/macros.h"
+
+namespace media {
+
+// The following conversion table is extracted from ISO 14496 Part 3 -
+// Table 1.16 - Sampling Frequency Index.
+const int kADTSFrequencyTable[] = {96000, 88200, 64000, 48000, 44100,
+ 32000, 24000, 22050, 16000, 12000,
+ 11025, 8000, 7350};
+const size_t kADTSFrequencyTableSize = arraysize(kADTSFrequencyTable);
+
+// The following conversion table is extracted from ISO 14496 Part 3 -
+// Table 1.17 - Channel Configuration.
+const media::ChannelLayout kADTSChannelLayoutTable[] = {
+ media::CHANNEL_LAYOUT_NONE, media::CHANNEL_LAYOUT_MONO,
+ media::CHANNEL_LAYOUT_STEREO, media::CHANNEL_LAYOUT_SURROUND,
+ media::CHANNEL_LAYOUT_4_0, media::CHANNEL_LAYOUT_5_0_BACK,
+ media::CHANNEL_LAYOUT_5_1_BACK, media::CHANNEL_LAYOUT_7_1};
+const size_t kADTSChannelLayoutTableSize = arraysize(kADTSChannelLayoutTable);
+
+} // namespace media
diff --git a/chromium/media/formats/mpeg/adts_constants.h b/chromium/media/formats/mpeg/adts_constants.h
new file mode 100644
index 00000000000..aa8ea2015c8
--- /dev/null
+++ b/chromium/media/formats/mpeg/adts_constants.h
@@ -0,0 +1,28 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FORMATS_MPEG_ADTS_CONSTANTS_H_
+#define MEDIA_FORMATS_MPEG_ADTS_CONSTANTS_H_
+
+#include <stddef.h>
+
+#include "media/base/channel_layout.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+enum {
+ kADTSHeaderMinSize = 7,
+ kSamplesPerAACFrame = 1024,
+};
+
+MEDIA_EXPORT extern const int kADTSFrequencyTable[];
+MEDIA_EXPORT extern const size_t kADTSFrequencyTableSize;
+
+MEDIA_EXPORT extern const media::ChannelLayout kADTSChannelLayoutTable[];
+MEDIA_EXPORT extern const size_t kADTSChannelLayoutTableSize;
+
+} // namespace media
+
+#endif // MEDIA_FORMATS_MPEG_ADTS_CONSTANTS_H_
diff --git a/chromium/media/formats/mpeg/adts_stream_parser.cc b/chromium/media/formats/mpeg/adts_stream_parser.cc
new file mode 100644
index 00000000000..beb94350c9c
--- /dev/null
+++ b/chromium/media/formats/mpeg/adts_stream_parser.cc
@@ -0,0 +1,99 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/mpeg/adts_stream_parser.h"
+
+#include "media/formats/mpeg/adts_constants.h"
+
+namespace media {
+
+static const uint32 kADTSStartCodeMask = 0xfff00000;
+
+ADTSStreamParser::ADTSStreamParser()
+ : MPEGAudioStreamParserBase(kADTSStartCodeMask, kCodecAAC, 0) {}
+
+ADTSStreamParser::~ADTSStreamParser() {}
+
+int ADTSStreamParser::ParseFrameHeader(const uint8* data,
+ int size,
+ int* frame_size,
+ int* sample_rate,
+ ChannelLayout* channel_layout,
+ int* sample_count,
+ bool* metadata_frame) const {
+ DCHECK(data);
+ DCHECK_GE(size, 0);
+ DCHECK(frame_size);
+
+ if (size < 8)
+ return 0;
+
+ BitReader reader(data, size);
+ int sync;
+ int version;
+ int layer;
+ int protection_absent;
+ int profile;
+ size_t sample_rate_index;
+ size_t channel_layout_index;
+ int frame_length;
+ size_t num_data_blocks;
+ int unused;
+
+ if (!reader.ReadBits(12, &sync) ||
+ !reader.ReadBits(1, &version) ||
+ !reader.ReadBits(2, &layer) ||
+ !reader.ReadBits(1, &protection_absent) ||
+ !reader.ReadBits(2, &profile) ||
+ !reader.ReadBits(4, &sample_rate_index) ||
+ !reader.ReadBits(1, &unused) ||
+ !reader.ReadBits(3, &channel_layout_index) ||
+ !reader.ReadBits(4, &unused) ||
+ !reader.ReadBits(13, &frame_length) ||
+ !reader.ReadBits(11, &unused) ||
+ !reader.ReadBits(2, &num_data_blocks) ||
+ (!protection_absent && !reader.ReadBits(16, &unused))) {
+ return -1;
+ }
+
+ DVLOG(2) << "Header data :" << std::hex
+ << " sync 0x" << sync
+ << " version 0x" << version
+ << " layer 0x" << layer
+ << " profile 0x" << profile
+ << " sample_rate_index 0x" << sample_rate_index
+ << " channel_layout_index 0x" << channel_layout_index;
+
+ const int bytes_read = reader.bits_read() / 8;
+ if (sync != 0xfff || layer != 0 || frame_length < bytes_read ||
+ sample_rate_index >= kADTSFrequencyTableSize ||
+ channel_layout_index >= kADTSChannelLayoutTableSize) {
+ MEDIA_LOG(log_cb()) << "Invalid header data :" << std::hex
+ << " sync 0x" << sync
+ << " version 0x" << version
+ << " layer 0x" << layer
+ << " sample_rate_index 0x" << sample_rate_index
+ << " channel_layout_index 0x" << channel_layout_index;
+ return -1;
+ }
+
+ if (sample_rate)
+ *sample_rate = kADTSFrequencyTable[sample_rate_index];
+
+ if (frame_size)
+ *frame_size = frame_length;
+
+ if (sample_count)
+ *sample_count = (num_data_blocks + 1) * kSamplesPerAACFrame;
+
+ if (channel_layout)
+ *channel_layout = kADTSChannelLayoutTable[channel_layout_index];
+
+ if (metadata_frame)
+ *metadata_frame = false;
+
+ return bytes_read;
+}
+
+} // namespace media
diff --git a/chromium/media/formats/mpeg/adts_stream_parser.h b/chromium/media/formats/mpeg/adts_stream_parser.h
new file mode 100644
index 00000000000..e036d8d4671
--- /dev/null
+++ b/chromium/media/formats/mpeg/adts_stream_parser.h
@@ -0,0 +1,34 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FORMATS_MPEG_ADTS_STREAM_PARSER_H_
+#define MEDIA_FORMATS_MPEG_ADTS_STREAM_PARSER_H_
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+#include "media/formats/mpeg/mpeg_audio_stream_parser_base.h"
+
+namespace media {
+
+class MEDIA_EXPORT ADTSStreamParser : public MPEGAudioStreamParserBase {
+ public:
+ ADTSStreamParser();
+ virtual ~ADTSStreamParser();
+
+ private:
+ // MPEGAudioStreamParserBase overrides.
+ virtual int ParseFrameHeader(const uint8* data,
+ int size,
+ int* frame_size,
+ int* sample_rate,
+ ChannelLayout* channel_layout,
+ int* sample_count,
+ bool* metadata_frame) const OVERRIDE;
+
+ DISALLOW_COPY_AND_ASSIGN(ADTSStreamParser);
+};
+
+} // namespace media
+
+#endif // MEDIA_FORMATS_MPEG_ADTS_STREAM_PARSER_H_
diff --git a/chromium/media/formats/mpeg/adts_stream_parser_unittest.cc b/chromium/media/formats/mpeg/adts_stream_parser_unittest.cc
new file mode 100644
index 00000000000..b9eb0d9c6c5
--- /dev/null
+++ b/chromium/media/formats/mpeg/adts_stream_parser_unittest.cc
@@ -0,0 +1,59 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/common/stream_parser_test_base.h"
+#include "media/formats/mpeg/adts_stream_parser.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class ADTSStreamParserTest : public StreamParserTestBase, public testing::Test {
+ public:
+ ADTSStreamParserTest()
+ : StreamParserTestBase(
+ scoped_ptr<StreamParser>(new ADTSStreamParser()).Pass()) {}
+ virtual ~ADTSStreamParserTest() {}
+};
+
+// Test parsing with small prime sized chunks to smoke out "power of
+// 2" field size assumptions.
+TEST_F(ADTSStreamParserTest, UnalignedAppend) {
+ const std::string expected =
+ "NewSegment"
+ "{ 0K }"
+ "{ 0K }"
+ "{ 0K }"
+ "{ 0K }"
+ "{ 0K }"
+ "{ 0K }"
+ "{ 0K }"
+ "{ 0K }"
+ "{ 0K }"
+ "EndOfSegment"
+ "NewSegment"
+ "{ 0K }"
+ "{ 0K }"
+ "{ 0K }"
+ "{ 0K }"
+ "{ 0K }"
+ "EndOfSegment";
+ EXPECT_EQ(expected, ParseFile("sfx.adts", 17));
+}
+
+// Test parsing with a larger piece size to verify that multiple buffers
+// are passed to |new_buffer_cb_|.
+TEST_F(ADTSStreamParserTest, UnalignedAppend512) {
+ const std::string expected =
+ "NewSegment"
+ "{ 0K 23K 46K }"
+ "{ 0K 23K 46K 69K 92K }"
+ "{ 0K 23K 46K 69K 92K }"
+ "EndOfSegment"
+ "NewSegment"
+ "{ 0K }"
+ "EndOfSegment";
+ EXPECT_EQ(expected, ParseFile("sfx.adts", 512));
+}
+
+} // namespace media
diff --git a/chromium/media/formats/mpeg/mp3_stream_parser.cc b/chromium/media/formats/mpeg/mp3_stream_parser.cc
new file mode 100644
index 00000000000..f5b74382423
--- /dev/null
+++ b/chromium/media/formats/mpeg/mp3_stream_parser.cc
@@ -0,0 +1,280 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/mpeg/mp3_stream_parser.h"
+
+namespace media {
+
+static const uint32 kMP3StartCodeMask = 0xffe00000;
+
+// Map that determines which bitrate_index & channel_mode combinations
+// are allowed.
+// Derived from: http://mpgedit.org/mpgedit/mpeg_format/MP3Format.html
+static const bool kIsAllowed[17][4] = {
+ { true, true, true, true }, // free
+ { true, false, false, false }, // 32
+ { true, false, false, false }, // 48
+ { true, false, false, false }, // 56
+ { true, true, true, true }, // 64
+ { true, false, false, false }, // 80
+ { true, true, true, true }, // 96
+ { true, true, true, true }, // 112
+ { true, true, true, true }, // 128
+ { true, true, true, true }, // 160
+ { true, true, true, true }, // 192
+ { false, true, true, true }, // 224
+ { false, true, true, true }, // 256
+ { false, true, true, true }, // 320
+ { false, true, true, true }, // 384
+ { false, false, false, false } // bad
+};
+
+// Maps version and layer information in the frame header
+// into an index for the |kBitrateMap|.
+// Derived from: http://mpgedit.org/mpgedit/mpeg_format/MP3Format.html
+static const int kVersionLayerMap[4][4] = {
+ // { reserved, L3, L2, L1 }
+ { 5, 4, 4, 3 }, // MPEG 2.5
+ { 5, 5, 5, 5 }, // reserved
+ { 5, 4, 4, 3 }, // MPEG 2
+ { 5, 2, 1, 0 } // MPEG 1
+};
+
+// Maps the bitrate index field in the header and an index
+// from |kVersionLayerMap| to a frame bitrate.
+// Derived from: http://mpgedit.org/mpgedit/mpeg_format/MP3Format.html
+static const int kBitrateMap[16][6] = {
+ // { V1L1, V1L2, V1L3, V2L1, V2L2 & V2L3, reserved }
+ { 0, 0, 0, 0, 0, 0 },
+ { 32, 32, 32, 32, 8, 0 },
+ { 64, 48, 40, 48, 16, 0 },
+ { 96, 56, 48, 56, 24, 0 },
+ { 128, 64, 56, 64, 32, 0 },
+ { 160, 80, 64, 80, 40, 0 },
+ { 192, 96, 80, 96, 48, 0 },
+ { 224, 112, 96, 112, 56, 0 },
+ { 256, 128, 112, 128, 64, 0 },
+ { 288, 160, 128, 144, 80, 0 },
+ { 320, 192, 160, 160, 96, 0 },
+ { 352, 224, 192, 176, 112, 0 },
+ { 384, 256, 224, 192, 128, 0 },
+ { 416, 320, 256, 224, 144, 0 },
+ { 448, 384, 320, 256, 160, 0 },
+ { 0, 0, 0, 0, 0}
+};
+
+// Maps the sample rate index and version fields from the frame header
+// to a sample rate.
+// Derived from: http://mpgedit.org/mpgedit/mpeg_format/MP3Format.html
+static const int kSampleRateMap[4][4] = {
+ // { V2.5, reserved, V2, V1 }
+ { 11025, 0, 22050, 44100 },
+ { 12000, 0, 24000, 48000 },
+ { 8000, 0, 16000, 32000 },
+ { 0, 0, 0, 0 }
+};
+
+// Offset in bytes from the end of the MP3 header to "Xing" or "Info" tags which
+// indicate a frame is silent metadata frame. Values taken from FFmpeg.
+static const int kXingHeaderMap[2][2] = {{32, 17}, {17, 9}};
+
+// Frame header field constants.
+static const int kVersion2 = 2;
+static const int kVersionReserved = 1;
+static const int kVersion2_5 = 0;
+static const int kLayerReserved = 0;
+static const int kLayer1 = 3;
+static const int kLayer2 = 2;
+static const int kLayer3 = 1;
+static const int kBitrateFree = 0;
+static const int kBitrateBad = 0xf;
+static const int kSampleRateReserved = 3;
+static const int kCodecDelay = 529;
+
+MP3StreamParser::MP3StreamParser()
+ : MPEGAudioStreamParserBase(kMP3StartCodeMask, kCodecMP3, kCodecDelay) {}
+
+MP3StreamParser::~MP3StreamParser() {}
+
+int MP3StreamParser::ParseFrameHeader(const uint8* data,
+ int size,
+ int* frame_size,
+ int* sample_rate,
+ ChannelLayout* channel_layout,
+ int* sample_count,
+ bool* metadata_frame) const {
+ DCHECK(data);
+ DCHECK_GE(size, 0);
+ DCHECK(frame_size);
+
+ if (size < 4)
+ return 0;
+
+ BitReader reader(data, size);
+ int sync;
+ int version;
+ int layer;
+ int is_protected;
+ int bitrate_index;
+ int sample_rate_index;
+ int has_padding;
+ int is_private;
+ int channel_mode;
+ int other_flags;
+
+ if (!reader.ReadBits(11, &sync) ||
+ !reader.ReadBits(2, &version) ||
+ !reader.ReadBits(2, &layer) ||
+ !reader.ReadBits(1, &is_protected) ||
+ !reader.ReadBits(4, &bitrate_index) ||
+ !reader.ReadBits(2, &sample_rate_index) ||
+ !reader.ReadBits(1, &has_padding) ||
+ !reader.ReadBits(1, &is_private) ||
+ !reader.ReadBits(2, &channel_mode) ||
+ !reader.ReadBits(6, &other_flags)) {
+ return -1;
+ }
+
+ DVLOG(2) << "Header data :" << std::hex
+ << " sync 0x" << sync
+ << " version 0x" << version
+ << " layer 0x" << layer
+ << " bitrate_index 0x" << bitrate_index
+ << " sample_rate_index 0x" << sample_rate_index
+ << " channel_mode 0x" << channel_mode;
+
+ if (sync != 0x7ff ||
+ version == kVersionReserved ||
+ layer == kLayerReserved ||
+ bitrate_index == kBitrateFree || bitrate_index == kBitrateBad ||
+ sample_rate_index == kSampleRateReserved) {
+ MEDIA_LOG(log_cb()) << "Invalid header data :" << std::hex
+ << " sync 0x" << sync
+ << " version 0x" << version
+ << " layer 0x" << layer
+ << " bitrate_index 0x" << bitrate_index
+ << " sample_rate_index 0x" << sample_rate_index
+ << " channel_mode 0x" << channel_mode;
+ return -1;
+ }
+
+ if (layer == kLayer2 && kIsAllowed[bitrate_index][channel_mode]) {
+ MEDIA_LOG(log_cb()) << "Invalid (bitrate_index, channel_mode) combination :"
+ << std::hex
+ << " bitrate_index " << bitrate_index
+ << " channel_mode " << channel_mode;
+ return -1;
+ }
+
+ int bitrate = kBitrateMap[bitrate_index][kVersionLayerMap[version][layer]];
+
+ if (bitrate == 0) {
+ MEDIA_LOG(log_cb()) << "Invalid bitrate :" << std::hex
+ << " version " << version
+ << " layer " << layer
+ << " bitrate_index " << bitrate_index;
+ return -1;
+ }
+
+ DVLOG(2) << " bitrate " << bitrate;
+
+ int frame_sample_rate = kSampleRateMap[sample_rate_index][version];
+ if (frame_sample_rate == 0) {
+ MEDIA_LOG(log_cb()) << "Invalid sample rate :" << std::hex
+ << " version " << version
+ << " sample_rate_index " << sample_rate_index;
+ return -1;
+ }
+
+ if (sample_rate)
+ *sample_rate = frame_sample_rate;
+
+ // http://teslabs.com/openplayer/docs/docs/specs/mp3_structure2.pdf
+ // Table 2.1.5
+ int samples_per_frame;
+ switch (layer) {
+ case kLayer1:
+ samples_per_frame = 384;
+ break;
+
+ case kLayer2:
+ samples_per_frame = 1152;
+ break;
+
+ case kLayer3:
+ if (version == kVersion2 || version == kVersion2_5)
+ samples_per_frame = 576;
+ else
+ samples_per_frame = 1152;
+ break;
+
+ default:
+ return -1;
+ }
+
+ if (sample_count)
+ *sample_count = samples_per_frame;
+
+ // http://teslabs.com/openplayer/docs/docs/specs/mp3_structure2.pdf
+ // Text just below Table 2.1.5.
+ if (layer == kLayer1) {
+ // This formulation is a slight variation on the equation below,
+ // but has slightly different truncation characteristics to deal
+ // with the fact that Layer 1 has 4 byte "slots" instead of single
+ // byte ones.
+ *frame_size = 4 * (12 * bitrate * 1000 / frame_sample_rate);
+ } else {
+ *frame_size =
+ ((samples_per_frame / 8) * bitrate * 1000) / frame_sample_rate;
+ }
+
+ if (has_padding)
+ *frame_size += (layer == kLayer1) ? 4 : 1;
+
+ if (channel_layout) {
+ // Map Stereo(0), Joint Stereo(1), and Dual Channel (2) to
+ // CHANNEL_LAYOUT_STEREO and Single Channel (3) to CHANNEL_LAYOUT_MONO.
+ *channel_layout =
+ (channel_mode == 3) ? CHANNEL_LAYOUT_MONO : CHANNEL_LAYOUT_STEREO;
+ }
+
+ if (metadata_frame)
+ *metadata_frame = false;
+
+ const int header_bytes_read = reader.bits_read() / 8;
+ if (layer != kLayer3)
+ return header_bytes_read;
+
+ // Check if this is a XING frame and tell the base parser to skip it if so.
+ const int xing_header_index =
+ kXingHeaderMap[version == kVersion2 ||
+ version == kVersion2_5][channel_mode == 3];
+ uint32_t tag = 0;
+
+ // It's not a XING frame if the frame isn't big enough to be one.
+ if (*frame_size <
+ header_bytes_read + xing_header_index + static_cast<int>(sizeof(tag))) {
+ return header_bytes_read;
+ }
+
+ // If we don't have enough data available to check, return 0 so frame parsing
+ // will be retried once more data is available.
+ if (!reader.SkipBits(xing_header_index * 8) ||
+ !reader.ReadBits(sizeof(tag) * 8, &tag)) {
+ return 0;
+ }
+
+ // Check to see if the tag contains 'Xing' or 'Info'
+ if (tag == 0x496e666f || tag == 0x58696e67) {
+ MEDIA_LOG(log_cb()) << "Skipping XING header.";
+ if (metadata_frame)
+ *metadata_frame = true;
+ return reader.bits_read() / 8;
+ }
+
+ // If it wasn't a XING frame, just return the number consumed bytes.
+ return header_bytes_read;
+}
+
+} // namespace media
diff --git a/chromium/media/formats/mpeg/mp3_stream_parser.h b/chromium/media/formats/mpeg/mp3_stream_parser.h
new file mode 100644
index 00000000000..b5271d848be
--- /dev/null
+++ b/chromium/media/formats/mpeg/mp3_stream_parser.h
@@ -0,0 +1,34 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FORMATS_MPEG_MP3_STREAM_PARSER_H_
+#define MEDIA_FORMATS_MPEG_MP3_STREAM_PARSER_H_
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+#include "media/formats/mpeg/mpeg_audio_stream_parser_base.h"
+
+namespace media {
+
+class MEDIA_EXPORT MP3StreamParser : public MPEGAudioStreamParserBase {
+ public:
+ MP3StreamParser();
+ virtual ~MP3StreamParser();
+
+ private:
+ // MPEGAudioStreamParserBase overrides.
+ virtual int ParseFrameHeader(const uint8* data,
+ int size,
+ int* frame_size,
+ int* sample_rate,
+ ChannelLayout* channel_layout,
+ int* sample_count,
+ bool* metadata_frame) const OVERRIDE;
+
+ DISALLOW_COPY_AND_ASSIGN(MP3StreamParser);
+};
+
+} // namespace media
+
+#endif // MEDIA_FORMATS_MPEG_MP3_STREAM_PARSER_H_
diff --git a/chromium/media/formats/mpeg/mp3_stream_parser_unittest.cc b/chromium/media/formats/mpeg/mp3_stream_parser_unittest.cc
new file mode 100644
index 00000000000..16387527159
--- /dev/null
+++ b/chromium/media/formats/mpeg/mp3_stream_parser_unittest.cc
@@ -0,0 +1,95 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/test_data_util.h"
+#include "media/formats/common/stream_parser_test_base.h"
+#include "media/formats/mpeg/mp3_stream_parser.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class MP3StreamParserTest : public StreamParserTestBase, public testing::Test {
+ public:
+ MP3StreamParserTest()
+ : StreamParserTestBase(
+ scoped_ptr<StreamParser>(new MP3StreamParser()).Pass()) {}
+ virtual ~MP3StreamParserTest() {}
+};
+
+// Test parsing with small prime sized chunks to smoke out "power of
+// 2" field size assumptions.
+TEST_F(MP3StreamParserTest, UnalignedAppend) {
+ const std::string expected =
+ "NewSegment"
+ "{ 0K }"
+ "{ 0K }"
+ "{ 0K }"
+ "{ 0K }"
+ "{ 0K }"
+ "{ 0K }"
+ "{ 0K }"
+ "EndOfSegment"
+ "NewSegment"
+ "{ 0K }"
+ "{ 0K }"
+ "{ 0K }"
+ "EndOfSegment"
+ "NewSegment"
+ "{ 0K }"
+ "{ 0K }"
+ "EndOfSegment";
+ EXPECT_EQ(expected, ParseFile("sfx.mp3", 17));
+ EXPECT_GT(last_audio_config().codec_delay(), 0);
+}
+
+// Test parsing with a larger piece size to verify that multiple buffers
+// are passed to |new_buffer_cb_|.
+TEST_F(MP3StreamParserTest, UnalignedAppend512) {
+ const std::string expected =
+ "NewSegment"
+ "{ 0K 26K 52K 78K }"
+ "EndOfSegment"
+ "NewSegment"
+ "{ 0K 26K 52K }"
+ "{ 0K 26K 52K 78K }"
+ "{ 0K }"
+ "EndOfSegment";
+ EXPECT_EQ(expected, ParseFile("sfx.mp3", 512));
+ EXPECT_GT(last_audio_config().codec_delay(), 0);
+}
+
+TEST_F(MP3StreamParserTest, MetadataParsing) {
+ scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("sfx.mp3");
+ const uint8_t* buffer_ptr = buffer->data();
+
+ // The first 32 bytes of sfx.mp3 are an ID3 tag, so no segments should be
+ // extracted after appending those bytes.
+ const int kId3TagSize = 32;
+ EXPECT_EQ("", ParseData(buffer_ptr, kId3TagSize));
+ EXPECT_FALSE(last_audio_config().IsValidConfig());
+ buffer_ptr += kId3TagSize;
+
+ // The next 417 bytes are a Xing frame; with the identifier 21 bytes into
+ // the frame. Appending less than 21 bytes, should result in no segments
+ // nor an AudioDecoderConfig being created.
+ const int kXingTagPosition = 21;
+ EXPECT_EQ("", ParseData(buffer_ptr, kXingTagPosition));
+ EXPECT_FALSE(last_audio_config().IsValidConfig());
+ buffer_ptr += kXingTagPosition;
+
+ // Appending the rests of the Xing frame should result in no segments, but
+ // should generate a valid AudioDecoderConfig.
+ const int kXingRemainingSize = 417 - kXingTagPosition;
+ EXPECT_EQ("", ParseData(buffer_ptr, kXingRemainingSize));
+ EXPECT_TRUE(last_audio_config().IsValidConfig());
+ buffer_ptr += kXingRemainingSize;
+
+ // Append the first real frame and ensure we get a segment.
+ const int kFirstRealFrameSize = 182;
+ EXPECT_EQ("NewSegment{ 0K }EndOfSegment",
+ ParseData(buffer_ptr, kFirstRealFrameSize));
+ EXPECT_TRUE(last_audio_config().IsValidConfig());
+}
+
+} // namespace media
diff --git a/chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.cc b/chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.cc
new file mode 100644
index 00000000000..89c1b622861
--- /dev/null
+++ b/chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.cc
@@ -0,0 +1,421 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/mpeg/mpeg_audio_stream_parser_base.h"
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/message_loop/message_loop.h"
+#include "media/base/buffers.h"
+#include "media/base/stream_parser_buffer.h"
+#include "media/base/text_track_config.h"
+#include "media/base/video_decoder_config.h"
+
+namespace media {
+
+static const uint32 kICYStartCode = 0x49435920; // 'ICY '
+
+// Arbitrary upper bound on the size of an IceCast header before it
+// triggers an error.
+static const int kMaxIcecastHeaderSize = 4096;
+
+static const uint32 kID3StartCodeMask = 0xffffff00;
+static const uint32 kID3v1StartCode = 0x54414700; // 'TAG\0'
+static const int kID3v1Size = 128;
+static const int kID3v1ExtendedSize = 227;
+static const uint32 kID3v2StartCode = 0x49443300; // 'ID3\0'
+
+static int LocateEndOfHeaders(const uint8_t* buf, int buf_len, int i) {
+ bool was_lf = false;
+ char last_c = '\0';
+ for (; i < buf_len; ++i) {
+ char c = buf[i];
+ if (c == '\n') {
+ if (was_lf)
+ return i + 1;
+ was_lf = true;
+ } else if (c != '\r' || last_c != '\n') {
+ was_lf = false;
+ }
+ last_c = c;
+ }
+ return -1;
+}
+
+MPEGAudioStreamParserBase::MPEGAudioStreamParserBase(uint32 start_code_mask,
+ AudioCodec audio_codec,
+ int codec_delay)
+ : state_(UNINITIALIZED),
+ in_media_segment_(false),
+ start_code_mask_(start_code_mask),
+ audio_codec_(audio_codec),
+ codec_delay_(codec_delay) {}
+
+MPEGAudioStreamParserBase::~MPEGAudioStreamParserBase() {}
+
+void MPEGAudioStreamParserBase::Init(const InitCB& init_cb,
+ const NewConfigCB& config_cb,
+ const NewBuffersCB& new_buffers_cb,
+ bool ignore_text_tracks,
+ const NeedKeyCB& need_key_cb,
+ const NewMediaSegmentCB& new_segment_cb,
+ const base::Closure& end_of_segment_cb,
+ const LogCB& log_cb) {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK_EQ(state_, UNINITIALIZED);
+ init_cb_ = init_cb;
+ config_cb_ = config_cb;
+ new_buffers_cb_ = new_buffers_cb;
+ new_segment_cb_ = new_segment_cb;
+ end_of_segment_cb_ = end_of_segment_cb;
+ log_cb_ = log_cb;
+
+ ChangeState(INITIALIZED);
+}
+
+void MPEGAudioStreamParserBase::Flush() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK_NE(state_, UNINITIALIZED);
+ queue_.Reset();
+ timestamp_helper_->SetBaseTimestamp(base::TimeDelta());
+ in_media_segment_ = false;
+}
+
+bool MPEGAudioStreamParserBase::Parse(const uint8* buf, int size) {
+ DVLOG(1) << __FUNCTION__ << "(" << size << ")";
+ DCHECK(buf);
+ DCHECK_GT(size, 0);
+ DCHECK_NE(state_, UNINITIALIZED);
+
+ if (state_ == PARSE_ERROR)
+ return false;
+
+ DCHECK_EQ(state_, INITIALIZED);
+
+ queue_.Push(buf, size);
+
+ bool end_of_segment = true;
+ BufferQueue buffers;
+ for (;;) {
+ const uint8* data;
+ int data_size;
+ queue_.Peek(&data, &data_size);
+
+ if (data_size < 4)
+ break;
+
+ uint32 start_code = data[0] << 24 | data[1] << 16 | data[2] << 8 | data[3];
+ int bytes_read = 0;
+ bool parsed_metadata = true;
+ if ((start_code & start_code_mask_) == start_code_mask_) {
+ bytes_read = ParseFrame(data, data_size, &buffers);
+
+ // Only allow the current segment to end if a full frame has been parsed.
+ end_of_segment = bytes_read > 0;
+ parsed_metadata = false;
+ } else if (start_code == kICYStartCode) {
+ bytes_read = ParseIcecastHeader(data, data_size);
+ } else if ((start_code & kID3StartCodeMask) == kID3v1StartCode) {
+ bytes_read = ParseID3v1(data, data_size);
+ } else if ((start_code & kID3StartCodeMask) == kID3v2StartCode) {
+ bytes_read = ParseID3v2(data, data_size);
+ } else {
+ bytes_read = FindNextValidStartCode(data, data_size);
+
+ if (bytes_read > 0) {
+ DVLOG(1) << "Unexpected start code 0x" << std::hex << start_code;
+ DVLOG(1) << "SKIPPING " << bytes_read << " bytes of garbage.";
+ }
+ }
+
+ CHECK_LE(bytes_read, data_size);
+
+ if (bytes_read < 0) {
+ ChangeState(PARSE_ERROR);
+ return false;
+ } else if (bytes_read == 0) {
+ // Need more data.
+ break;
+ }
+
+ // Send pending buffers if we have encountered metadata.
+ if (parsed_metadata && !buffers.empty() && !SendBuffers(&buffers, true))
+ return false;
+
+ queue_.Pop(bytes_read);
+ end_of_segment = true;
+ }
+
+ if (buffers.empty())
+ return true;
+
+ // Send buffers collected in this append that haven't been sent yet.
+ return SendBuffers(&buffers, end_of_segment);
+}
+
+void MPEGAudioStreamParserBase::ChangeState(State state) {
+ DVLOG(1) << __FUNCTION__ << "() : " << state_ << " -> " << state;
+ state_ = state;
+}
+
+int MPEGAudioStreamParserBase::ParseFrame(const uint8* data,
+ int size,
+ BufferQueue* buffers) {
+ DVLOG(2) << __FUNCTION__ << "(" << size << ")";
+
+ int sample_rate;
+ ChannelLayout channel_layout;
+ int frame_size;
+ int sample_count;
+ bool metadata_frame = false;
+ int bytes_read = ParseFrameHeader(data,
+ size,
+ &frame_size,
+ &sample_rate,
+ &channel_layout,
+ &sample_count,
+ &metadata_frame);
+
+ if (bytes_read <= 0)
+ return bytes_read;
+
+ // Make sure data contains the entire frame.
+ if (size < frame_size)
+ return 0;
+
+ DVLOG(2) << " sample_rate " << sample_rate
+ << " channel_layout " << channel_layout
+ << " frame_size " << frame_size
+ << " sample_count " << sample_count;
+
+ if (config_.IsValidConfig() &&
+ (config_.samples_per_second() != sample_rate ||
+ config_.channel_layout() != channel_layout)) {
+ // Clear config data so that a config change is initiated.
+ config_ = AudioDecoderConfig();
+
+ // Send all buffers associated with the previous config.
+ if (!buffers->empty() && !SendBuffers(buffers, true))
+ return -1;
+ }
+
+ if (!config_.IsValidConfig()) {
+ config_.Initialize(audio_codec_,
+ kSampleFormatF32,
+ channel_layout,
+ sample_rate,
+ NULL,
+ 0,
+ false,
+ false,
+ base::TimeDelta(),
+ codec_delay_);
+
+ base::TimeDelta base_timestamp;
+ if (timestamp_helper_)
+ base_timestamp = timestamp_helper_->GetTimestamp();
+
+ timestamp_helper_.reset(new AudioTimestampHelper(sample_rate));
+ timestamp_helper_->SetBaseTimestamp(base_timestamp);
+
+ VideoDecoderConfig video_config;
+ bool success = config_cb_.Run(config_, video_config, TextTrackConfigMap());
+
+ if (!init_cb_.is_null()) {
+ InitParameters params(kInfiniteDuration());
+ params.auto_update_timestamp_offset = true;
+ base::ResetAndReturn(&init_cb_).Run(success, params);
+ }
+
+ if (!success)
+ return -1;
+ }
+
+ if (metadata_frame)
+ return frame_size;
+
+ // TODO(wolenetz/acolwell): Validate and use a common cross-parser TrackId
+ // type and allow multiple audio tracks, if applicable. See
+ // https://crbug.com/341581.
+ scoped_refptr<StreamParserBuffer> buffer =
+ StreamParserBuffer::CopyFrom(data, frame_size, true,
+ DemuxerStream::AUDIO, 0);
+ buffer->set_timestamp(timestamp_helper_->GetTimestamp());
+ buffer->set_duration(timestamp_helper_->GetFrameDuration(sample_count));
+ buffers->push_back(buffer);
+
+ timestamp_helper_->AddFrames(sample_count);
+
+ return frame_size;
+}
+
+int MPEGAudioStreamParserBase::ParseIcecastHeader(const uint8* data, int size) {
+ DVLOG(1) << __FUNCTION__ << "(" << size << ")";
+
+ if (size < 4)
+ return 0;
+
+ if (memcmp("ICY ", data, 4))
+ return -1;
+
+ int locate_size = std::min(size, kMaxIcecastHeaderSize);
+ int offset = LocateEndOfHeaders(data, locate_size, 4);
+ if (offset < 0) {
+ if (locate_size == kMaxIcecastHeaderSize) {
+ MEDIA_LOG(log_cb_) << "Icecast header is too large.";
+ return -1;
+ }
+
+ return 0;
+ }
+
+ return offset;
+}
+
+int MPEGAudioStreamParserBase::ParseID3v1(const uint8* data, int size) {
+ DVLOG(1) << __FUNCTION__ << "(" << size << ")";
+
+ if (size < kID3v1Size)
+ return 0;
+
+ // TODO(acolwell): Add code to actually validate ID3v1 data and
+ // expose it as a metadata text track.
+ return !memcmp(data, "TAG+", 4) ? kID3v1ExtendedSize : kID3v1Size;
+}
+
+int MPEGAudioStreamParserBase::ParseID3v2(const uint8* data, int size) {
+ DVLOG(1) << __FUNCTION__ << "(" << size << ")";
+
+ if (size < 10)
+ return 0;
+
+ BitReader reader(data, size);
+ int32 id;
+ int version;
+ uint8 flags;
+ int32 id3_size;
+
+ if (!reader.ReadBits(24, &id) ||
+ !reader.ReadBits(16, &version) ||
+ !reader.ReadBits(8, &flags) ||
+ !ParseSyncSafeInt(&reader, &id3_size)) {
+ return -1;
+ }
+
+ int32 actual_tag_size = 10 + id3_size;
+
+ // Increment size if 'Footer present' flag is set.
+ if (flags & 0x10)
+ actual_tag_size += 10;
+
+ // Make sure we have the entire tag.
+ if (size < actual_tag_size)
+ return 0;
+
+ // TODO(acolwell): Add code to actually validate ID3v2 data and
+ // expose it as a metadata text track.
+ return actual_tag_size;
+}
+
+bool MPEGAudioStreamParserBase::ParseSyncSafeInt(BitReader* reader,
+ int32* value) {
+ *value = 0;
+ for (int i = 0; i < 4; ++i) {
+ uint8 tmp;
+ if (!reader->ReadBits(1, &tmp) || tmp != 0) {
+ MEDIA_LOG(log_cb_) << "ID3 syncsafe integer byte MSb is not 0!";
+ return false;
+ }
+
+ if (!reader->ReadBits(7, &tmp))
+ return false;
+
+ *value <<= 7;
+ *value += tmp;
+ }
+
+ return true;
+}
+
+int MPEGAudioStreamParserBase::FindNextValidStartCode(const uint8* data,
+ int size) const {
+ const uint8* start = data;
+ const uint8* end = data + size;
+
+ while (start < end) {
+ int bytes_left = end - start;
+ const uint8* candidate_start_code =
+ static_cast<const uint8*>(memchr(start, 0xff, bytes_left));
+
+ if (!candidate_start_code)
+ return 0;
+
+ bool parse_header_failed = false;
+ const uint8* sync = candidate_start_code;
+ // Try to find 3 valid frames in a row. 3 was selected to decrease
+ // the probability of false positives.
+ for (int i = 0; i < 3; ++i) {
+ int sync_size = end - sync;
+ int frame_size;
+ int sync_bytes = ParseFrameHeader(
+ sync, sync_size, &frame_size, NULL, NULL, NULL, NULL);
+
+ if (sync_bytes == 0)
+ return 0;
+
+ if (sync_bytes > 0) {
+ DCHECK_LT(sync_bytes, sync_size);
+
+ // Skip over this frame so we can check the next one.
+ sync += frame_size;
+
+ // Make sure the next frame starts inside the buffer.
+ if (sync >= end)
+ return 0;
+ } else {
+ DVLOG(1) << "ParseFrameHeader() " << i << " failed @" << (sync - data);
+ parse_header_failed = true;
+ break;
+ }
+ }
+
+ if (parse_header_failed) {
+ // One of the frame header parses failed so |candidate_start_code|
+ // did not point to the start of a real frame. Move |start| forward
+ // so we can find the next candidate.
+ start = candidate_start_code + 1;
+ continue;
+ }
+
+ return candidate_start_code - data;
+ }
+
+ return 0;
+}
+
+bool MPEGAudioStreamParserBase::SendBuffers(BufferQueue* buffers,
+ bool end_of_segment) {
+ DCHECK(!buffers->empty());
+
+ if (!in_media_segment_) {
+ in_media_segment_ = true;
+ new_segment_cb_.Run();
+ }
+
+ BufferQueue empty_video_buffers;
+ TextBufferQueueMap empty_text_map;
+ if (!new_buffers_cb_.Run(*buffers, empty_video_buffers, empty_text_map))
+ return false;
+ buffers->clear();
+
+ if (end_of_segment) {
+ in_media_segment_ = false;
+ end_of_segment_cb_.Run();
+ }
+
+ timestamp_helper_->SetBaseTimestamp(base::TimeDelta());
+ return true;
+}
+
+} // namespace media
diff --git a/chromium/media/mp3/mp3_stream_parser.h b/chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.h
index 1e2e8c6f5b3..31a1b7c3837 100644
--- a/chromium/media/mp3/mp3_stream_parser.h
+++ b/chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.h
@@ -1,9 +1,9 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_MP3_MP3_STREAM_PARSER_H_
-#define MEDIA_MP3_MP3_STREAM_PARSER_H_
+#ifndef MEDIA_FORMATS_MPEG_MPEG_AUDIO_STREAM_PARSER_BASE_H_
+#define MEDIA_FORMATS_MPEG_MPEG_AUDIO_STREAM_PARSER_BASE_H_
#include <set>
#include <vector>
@@ -12,23 +12,29 @@
#include "base/callback.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/audio_timestamp_helper.h"
+#include "media/base/bit_reader.h"
#include "media/base/byte_queue.h"
#include "media/base/media_export.h"
#include "media/base/stream_parser.h"
namespace media {
-class BitReader;
-
-class MEDIA_EXPORT MP3StreamParser : public StreamParser {
+class MEDIA_EXPORT MPEGAudioStreamParserBase : public StreamParser {
public:
- MP3StreamParser();
- virtual ~MP3StreamParser();
+ // |start_code_mask| is used to find the start of each frame header. Also
+ // referred to as the sync code in the MP3 and ADTS header specifications.
+ // |codec_delay| is the number of samples the decoder will output before the
+ // first real frame.
+ MPEGAudioStreamParserBase(uint32 start_code_mask,
+ AudioCodec audio_codec,
+ int codec_delay);
+ virtual ~MPEGAudioStreamParserBase();
// StreamParser implementation.
- virtual void Init(const InitCB& init_cb, const NewConfigCB& config_cb,
+ virtual void Init(const InitCB& init_cb,
+ const NewConfigCB& config_cb,
const NewBuffersCB& new_buffers_cb,
- const NewTextBuffersCB& text_cb,
+ bool ignore_text_tracks,
const NeedKeyCB& need_key_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
@@ -36,35 +42,13 @@ class MEDIA_EXPORT MP3StreamParser : public StreamParser {
virtual void Flush() OVERRIDE;
virtual bool Parse(const uint8* buf, int size) OVERRIDE;
- private:
- enum State {
- UNINITIALIZED,
- INITIALIZED,
- PARSE_ERROR
- };
-
- State state_;
-
- InitCB init_cb_;
- NewConfigCB config_cb_;
- NewBuffersCB new_buffers_cb_;
- NewMediaSegmentCB new_segment_cb_;
- base::Closure end_of_segment_cb_;
- LogCB log_cb_;
-
- ByteQueue queue_;
-
- AudioDecoderConfig config_;
- scoped_ptr<AudioTimestampHelper> timestamp_helper_;
- bool in_media_segment_;
-
- void ChangeState(State state);
-
- // Parsing functions for various byte stream elements.
+ protected:
+ // Subclasses implement this method to parse format specific frame headers.
// |data| & |size| describe the data available for parsing.
- // These functions are expected to consume an entire frame/header.
- // It should only return a value greater than 0 when |data| has
- // enough bytes to successfully parse & consume the entire element.
+ //
+ // Implementations are expected to consume an entire frame header. It should
+ // only return a value greater than 0 when |data| has enough bytes to
+ // successfully parse & consume the entire frame header.
//
// |frame_size| - Required parameter that is set to the size of the frame, in
// bytes, including the frame header if the function returns a value > 0.
@@ -74,20 +58,48 @@ class MEDIA_EXPORT MP3StreamParser : public StreamParser {
// of the frame if this function returns a value > 0.
// |sample_count| - Optional parameter that is set to the number of samples
// in the frame if this function returns a value > 0.
+ // |metadata_frame| - Optional parameter that is set to true if the frame has
+ // valid values for the above parameters, but no usable encoded data; only set
+ // to true if this function returns a value > 0.
//
- // |sample_rate|, |channel_layout|, |sample_count| may be NULL if the caller
- // is not interested in receiving these values from the frame header.
+ // |sample_rate|, |channel_layout|, |sample_count|, |metadata_frame| may be
+ // NULL if the caller is not interested in receiving these values from the
+ // frame header.
+ //
+ // If |metadata_frame| is true, the MPEGAudioStreamParserBase will discard the
+ // frame after consuming the metadata values above.
+ //
+ // Returns:
+ // > 0 : The number of bytes parsed.
+ // 0 : If more data is needed to parse the entire frame header.
+ // < 0 : An error was encountered during parsing.
+ virtual int ParseFrameHeader(const uint8* data,
+ int size,
+ int* frame_size,
+ int* sample_rate,
+ ChannelLayout* channel_layout,
+ int* sample_count,
+ bool* metadata_frame) const = 0;
+
+ const LogCB& log_cb() const { return log_cb_; }
+
+ private:
+ enum State {
+ UNINITIALIZED,
+ INITIALIZED,
+ PARSE_ERROR
+ };
+
+ void ChangeState(State state);
+
+ // Parsing functions for various byte stream elements. |data| & |size|
+ // describe the data available for parsing.
//
// Returns:
// > 0 : The number of bytes parsed.
// 0 : If more data is needed to parse the entire element.
// < 0 : An error was encountered during parsing.
- int ParseFrameHeader(const uint8* data, int size,
- int* frame_size,
- int* sample_rate,
- ChannelLayout* channel_layout,
- int* sample_count) const;
- int ParseMP3Frame(const uint8* data, int size, BufferQueue* buffers);
+ int ParseFrame(const uint8* data, int size, BufferQueue* buffers);
int ParseIcecastHeader(const uint8* data, int size);
int ParseID3v1(const uint8* data, int size);
int ParseID3v2(const uint8* data, int size);
@@ -118,9 +130,27 @@ class MEDIA_EXPORT MP3StreamParser : public StreamParser {
// Returns true if the buffers are sent successfully.
bool SendBuffers(BufferQueue* buffers, bool end_of_segment);
- DISALLOW_COPY_AND_ASSIGN(MP3StreamParser);
+ State state_;
+
+ InitCB init_cb_;
+ NewConfigCB config_cb_;
+ NewBuffersCB new_buffers_cb_;
+ NewMediaSegmentCB new_segment_cb_;
+ base::Closure end_of_segment_cb_;
+ LogCB log_cb_;
+
+ ByteQueue queue_;
+
+ AudioDecoderConfig config_;
+ scoped_ptr<AudioTimestampHelper> timestamp_helper_;
+ bool in_media_segment_;
+ const uint32 start_code_mask_;
+ const AudioCodec audio_codec_;
+ const int codec_delay_;
+
+ DISALLOW_COPY_AND_ASSIGN(MPEGAudioStreamParserBase);
};
} // namespace media
-#endif // MEDIA_MP3_MP3_STREAM_PARSER_H_
+#endif // MEDIA_FORMATS_MPEG_MPEG_AUDIO_STREAM_PARSER_BASE_H_
diff --git a/chromium/media/webm/chromeos/DEPS b/chromium/media/formats/webm/chromeos/DEPS
index a4378dca94a..a4378dca94a 100644
--- a/chromium/media/webm/chromeos/DEPS
+++ b/chromium/media/formats/webm/chromeos/DEPS
diff --git a/chromium/media/webm/chromeos/ebml_writer.cc b/chromium/media/formats/webm/chromeos/ebml_writer.cc
index 84a9760769f..c00063f2b28 100644
--- a/chromium/media/webm/chromeos/ebml_writer.cc
+++ b/chromium/media/formats/webm/chromeos/ebml_writer.cc
@@ -1,8 +1,8 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/webm/chromeos/ebml_writer.h"
+#include "media/formats/webm/chromeos/ebml_writer.h"
#include "media/base/media_export.h"
diff --git a/chromium/media/webm/chromeos/ebml_writer.h b/chromium/media/formats/webm/chromeos/ebml_writer.h
index 0714ebf1641..3c1faa04297 100644
--- a/chromium/media/webm/chromeos/ebml_writer.h
+++ b/chromium/media/formats/webm/chromeos/ebml_writer.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_WEBM_CHROMEOS_EBML_WRITER_H_
-#define MEDIA_WEBM_CHROMEOS_EBML_WRITER_H_
+#ifndef MEDIA_FORMATS_WEBM_CHROMEOS_EBML_WRITER_H_
+#define MEDIA_FORMATS_WEBM_CHROMEOS_EBML_WRITER_H_
#include "base/callback.h"
@@ -18,4 +18,4 @@ struct EbmlGlobal {
serialize_cb;
};
-#endif // MEDIA_WEBM_CHROMEOS_EBML_WRITER_H_
+#endif // MEDIA_FORMATS_WEBM_CHROMEOS_EBML_WRITER_H_
diff --git a/chromium/media/webm/chromeos/webm_encoder.cc b/chromium/media/formats/webm/chromeos/webm_encoder.cc
index 059f9c6fef6..4b5c782452d 100644
--- a/chromium/media/webm/chromeos/webm_encoder.cc
+++ b/chromium/media/formats/webm/chromeos/webm_encoder.cc
@@ -1,8 +1,8 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/webm/chromeos/webm_encoder.h"
+#include "media/formats/webm/chromeos/webm_encoder.h"
#include "base/bind.h"
#include "base/file_util.h"
diff --git a/chromium/media/webm/chromeos/webm_encoder.h b/chromium/media/formats/webm/chromeos/webm_encoder.h
index 126c0d20758..fd0fc7582c6 100644
--- a/chromium/media/webm/chromeos/webm_encoder.h
+++ b/chromium/media/formats/webm/chromeos/webm_encoder.h
@@ -1,16 +1,16 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_WEBM_CHROMEOS_WEBM_ENCODER_H_
-#define MEDIA_WEBM_CHROMEOS_WEBM_ENCODER_H_
+#ifndef MEDIA_FORMATS_WEBM_CHROMEOS_WEBM_ENCODER_H_
+#define MEDIA_FORMATS_WEBM_CHROMEOS_WEBM_ENCODER_H_
#include <stdio.h>
#include <stack>
#include "base/files/file_path.h"
#include "media/base/media_export.h"
-#include "media/webm/chromeos/ebml_writer.h"
+#include "media/formats/webm/chromeos/ebml_writer.h"
extern "C" {
#define VPX_CODEC_DISABLE_COMPAT 1
@@ -103,4 +103,4 @@ class MEDIA_EXPORT WebmEncoder {
} // namespace media
-#endif // MEDIA_WEBM_CHROMEOS_WEBM_ENCODER_H_
+#endif // MEDIA_FORMATS_WEBM_CHROMEOS_WEBM_ENCODER_H_
diff --git a/chromium/media/webm/cluster_builder.cc b/chromium/media/formats/webm/cluster_builder.cc
index e320cbb6653..1a3b358ef99 100644
--- a/chromium/media/webm/cluster_builder.cc
+++ b/chromium/media/formats/webm/cluster_builder.cc
@@ -1,11 +1,12 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/webm/cluster_builder.h"
+#include "media/formats/webm/cluster_builder.h"
#include "base/logging.h"
#include "media/base/data_buffer.h"
+#include "media/formats/webm/webm_constants.h"
namespace media {
@@ -32,6 +33,13 @@ static const uint8 kBlockGroupHeader[] = {
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Block(size = 0)
};
+static const uint8 kBlockGroupHeaderWithoutBlockDuration[] = {
+ 0xA0, // BlockGroup ID
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // BlockGroup(size = 0)
+ 0xA1, // Block ID
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Block(size = 0)
+};
+
enum {
kClusterSizeOffset = 4,
kClusterTimecodeOffset = 14,
@@ -39,6 +47,7 @@ enum {
kSimpleBlockSizeOffset = 1,
kBlockGroupSizeOffset = 1,
+ kBlockGroupWithoutBlockDurationBlockSizeOffset = 10,
kBlockGroupDurationOffset = 11,
kBlockGroupBlockSizeOffset = 20,
@@ -85,8 +94,30 @@ void ClusterBuilder::AddSimpleBlock(int track_num, int64 timecode, int flags,
void ClusterBuilder::AddBlockGroup(int track_num, int64 timecode, int duration,
int flags, const uint8* data, int size) {
+ AddBlockGroupInternal(track_num, timecode, true, duration, flags, data, size);
+}
+
+void ClusterBuilder::AddBlockGroupWithoutBlockDuration(int track_num,
+ int64 timecode,
+ int flags,
+ const uint8* data,
+ int size) {
+ AddBlockGroupInternal(track_num, timecode, false, 0, flags, data, size);
+}
+
+
+void ClusterBuilder::AddBlockGroupInternal(int track_num, int64 timecode,
+ bool include_block_duration,
+ int duration, int flags,
+ const uint8* data, int size) {
int block_size = size + 4;
- int bytes_needed = sizeof(kBlockGroupHeader) + block_size;
+ int bytes_needed = block_size;
+ if (include_block_duration) {
+ bytes_needed += sizeof(kBlockGroupHeader);
+ } else {
+ bytes_needed += sizeof(kBlockGroupHeaderWithoutBlockDuration);
+ }
+
int block_group_size = bytes_needed - 9;
if (bytes_needed > (buffer_size_ - bytes_used_))
@@ -94,11 +125,21 @@ void ClusterBuilder::AddBlockGroup(int track_num, int64 timecode, int duration,
uint8* buf = buffer_.get() + bytes_used_;
int block_group_offset = bytes_used_;
- memcpy(buf, kBlockGroupHeader, sizeof(kBlockGroupHeader));
+ if (include_block_duration) {
+ memcpy(buf, kBlockGroupHeader, sizeof(kBlockGroupHeader));
+ UpdateUInt64(block_group_offset + kBlockGroupDurationOffset, duration);
+ UpdateUInt64(block_group_offset + kBlockGroupBlockSizeOffset, block_size);
+ buf += sizeof(kBlockGroupHeader);
+ } else {
+ memcpy(buf, kBlockGroupHeaderWithoutBlockDuration,
+ sizeof(kBlockGroupHeaderWithoutBlockDuration));
+ UpdateUInt64(
+ block_group_offset + kBlockGroupWithoutBlockDurationBlockSizeOffset,
+ block_size);
+ buf += sizeof(kBlockGroupHeaderWithoutBlockDuration);
+ }
+
UpdateUInt64(block_group_offset + kBlockGroupSizeOffset, block_group_size);
- UpdateUInt64(block_group_offset + kBlockGroupDurationOffset, duration);
- UpdateUInt64(block_group_offset + kBlockGroupBlockSizeOffset, block_size);
- buf += sizeof(kBlockGroupHeader);
// Make sure the 4 most-significant bits are 0.
// http://www.matroska.org/technical/specs/index.html#block_structure
@@ -140,6 +181,16 @@ scoped_ptr<Cluster> ClusterBuilder::Finish() {
return ret.Pass();
}
+scoped_ptr<Cluster> ClusterBuilder::FinishWithUnknownSize() {
+ DCHECK_NE(cluster_timecode_, -1);
+
+ UpdateUInt64(kClusterSizeOffset, kWebMUnknownSize);
+
+ scoped_ptr<Cluster> ret(new Cluster(buffer_.Pass(), bytes_used_));
+ Reset();
+ return ret.Pass();
+}
+
void ClusterBuilder::Reset() {
buffer_size_ = kInitialBufferSize;
buffer_.reset(new uint8[buffer_size_]);
diff --git a/chromium/media/webm/cluster_builder.h b/chromium/media/formats/webm/cluster_builder.h
index 3482cfbb90a..ab5797cd34a 100644
--- a/chromium/media/webm/cluster_builder.h
+++ b/chromium/media/formats/webm/cluster_builder.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_WEBM_CLUSTER_BUILDER_H_
-#define MEDIA_WEBM_CLUSTER_BUILDER_H_
+#ifndef MEDIA_FORMATS_WEBM_CLUSTER_BUILDER_H_
+#define MEDIA_FORMATS_WEBM_CLUSTER_BUILDER_H_
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
@@ -36,10 +36,16 @@ class ClusterBuilder {
const uint8* data, int size);
void AddBlockGroup(int track_num, int64 timecode, int duration, int flags,
const uint8* data, int size);
+ void AddBlockGroupWithoutBlockDuration(int track_num, int64 timecode,
+ int flags, const uint8* data, int size);
scoped_ptr<Cluster> Finish();
+ scoped_ptr<Cluster> FinishWithUnknownSize();
private:
+ void AddBlockGroupInternal(int track_num, int64 timecode,
+ bool include_block_duration, int duration,
+ int flags, const uint8* data, int size);
void Reset();
void ExtendBuffer(int bytes_needed);
void UpdateUInt64(int offset, int64 value);
@@ -56,4 +62,4 @@ class ClusterBuilder {
} // namespace media
-#endif // MEDIA_WEBM_CLUSTER_BUILDER_H_
+#endif // MEDIA_FORMATS_WEBM_CLUSTER_BUILDER_H_
diff --git a/chromium/media/formats/webm/tracks_builder.cc b/chromium/media/formats/webm/tracks_builder.cc
new file mode 100644
index 00000000000..fb402c4729c
--- /dev/null
+++ b/chromium/media/formats/webm/tracks_builder.cc
@@ -0,0 +1,384 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/webm/tracks_builder.h"
+
+#include "base/logging.h"
+#include "media/formats/webm/webm_constants.h"
+
+namespace media {
+
+// Returns size of an integer, formatted using Matroska serialization.
+static int GetUIntMkvSize(uint64 value) {
+ if (value < 0x07FULL)
+ return 1;
+ if (value < 0x03FFFULL)
+ return 2;
+ if (value < 0x01FFFFFULL)
+ return 3;
+ if (value < 0x0FFFFFFFULL)
+ return 4;
+ if (value < 0x07FFFFFFFFULL)
+ return 5;
+ if (value < 0x03FFFFFFFFFFULL)
+ return 6;
+ if (value < 0x01FFFFFFFFFFFFULL)
+ return 7;
+ return 8;
+}
+
+// Returns the minimium size required to serialize an integer value.
+static int GetUIntSize(uint64 value) {
+ if (value < 0x0100ULL)
+ return 1;
+ if (value < 0x010000ULL)
+ return 2;
+ if (value < 0x01000000ULL)
+ return 3;
+ if (value < 0x0100000000ULL)
+ return 4;
+ if (value < 0x010000000000ULL)
+ return 5;
+ if (value < 0x01000000000000ULL)
+ return 6;
+ if (value < 0x0100000000000000ULL)
+ return 7;
+ return 8;
+}
+
+static int MasterElementSize(int element_id, int payload_size) {
+ return GetUIntSize(element_id) + GetUIntMkvSize(payload_size) + payload_size;
+}
+
+static int IntElementSize(int element_id, int value) {
+ return GetUIntSize(element_id) + 1 + GetUIntSize(value);
+}
+
+static int DoubleElementSize(int element_id) {
+ return GetUIntSize(element_id) + 1 + 8;
+}
+
+static int StringElementSize(int element_id, const std::string& value) {
+ return GetUIntSize(element_id) +
+ GetUIntMkvSize(value.length()) +
+ value.length();
+}
+
+static void SerializeInt(uint8** buf_ptr, int* buf_size_ptr,
+ int64 value, int size) {
+ uint8*& buf = *buf_ptr;
+ int& buf_size = *buf_size_ptr;
+
+ for (int idx = 1; idx <= size; ++idx) {
+ *buf++ = static_cast<uint8>(value >> ((size - idx) * 8));
+ --buf_size;
+ }
+}
+
+static void SerializeDouble(uint8** buf_ptr, int* buf_size_ptr,
+ double value) {
+ // Use a union to convert |value| to native endian integer bit pattern.
+ union {
+ double src;
+ int64 dst;
+ } tmp;
+ tmp.src = value;
+
+ // Write the bytes from native endian |tmp.dst| to big-endian form in |buf|.
+ SerializeInt(buf_ptr, buf_size_ptr, tmp.dst, 8);
+}
+
+static void WriteElementId(uint8** buf, int* buf_size, int element_id) {
+ SerializeInt(buf, buf_size, element_id, GetUIntSize(element_id));
+}
+
+static void WriteUInt(uint8** buf, int* buf_size, uint64 value) {
+ const int size = GetUIntMkvSize(value);
+ value |= (1ULL << (size * 7)); // Matroska formatting
+ SerializeInt(buf, buf_size, value, size);
+}
+
+static void WriteMasterElement(uint8** buf, int* buf_size,
+ int element_id, int payload_size) {
+ WriteElementId(buf, buf_size, element_id);
+ WriteUInt(buf, buf_size, payload_size);
+}
+
+static void WriteIntElement(uint8** buf, int* buf_size,
+ int element_id, int value) {
+ WriteElementId(buf, buf_size, element_id);
+
+ const int size = GetUIntSize(value);
+ WriteUInt(buf, buf_size, size);
+
+ SerializeInt(buf, buf_size, value, size);
+}
+
+static void WriteDoubleElement(uint8** buf, int* buf_size,
+ int element_id, double value) {
+ WriteElementId(buf, buf_size, element_id);
+ WriteUInt(buf, buf_size, 8);
+ SerializeDouble(buf, buf_size, value);
+}
+
+static void WriteStringElement(uint8** buf_ptr, int* buf_size_ptr,
+ int element_id, const std::string& value) {
+ uint8*& buf = *buf_ptr;
+ int& buf_size = *buf_size_ptr;
+
+ WriteElementId(&buf, &buf_size, element_id);
+
+ const uint64 size = value.length();
+ WriteUInt(&buf, &buf_size, size);
+
+ memcpy(buf, value.data(), size);
+ buf += size;
+ buf_size -= size;
+}
+
+TracksBuilder::TracksBuilder(bool allow_invalid_values)
+ : allow_invalid_values_(allow_invalid_values) {}
+TracksBuilder::TracksBuilder()
+ : allow_invalid_values_(false) {}
+TracksBuilder::~TracksBuilder() {}
+
+void TracksBuilder::AddVideoTrack(
+ int track_num,
+ int track_uid,
+ const std::string& codec_id,
+ const std::string& name,
+ const std::string& language,
+ int default_duration,
+ int video_pixel_width,
+ int video_pixel_height) {
+ AddTrackInternal(track_num, kWebMTrackTypeVideo, track_uid, codec_id, name,
+ language, default_duration, video_pixel_width,
+ video_pixel_height, -1, -1);
+}
+
+void TracksBuilder::AddAudioTrack(
+ int track_num,
+ int track_uid,
+ const std::string& codec_id,
+ const std::string& name,
+ const std::string& language,
+ int default_duration,
+ int audio_channels,
+ double audio_sampling_frequency) {
+ AddTrackInternal(track_num, kWebMTrackTypeAudio, track_uid, codec_id, name,
+ language, default_duration, -1, -1, audio_channels,
+ audio_sampling_frequency);
+}
+
+void TracksBuilder::AddTextTrack(
+ int track_num,
+ int track_uid,
+ const std::string& codec_id,
+ const std::string& name,
+ const std::string& language) {
+ AddTrackInternal(track_num, kWebMTrackTypeSubtitlesOrCaptions, track_uid,
+ codec_id, name, language, -1, -1, -1, -1, -1);
+}
+
+std::vector<uint8> TracksBuilder::Finish() {
+ // Allocate the storage
+ std::vector<uint8> buffer;
+ buffer.resize(GetTracksSize());
+
+ // Populate the storage with a tracks header
+ WriteTracks(&buffer[0], buffer.size());
+
+ return buffer;
+}
+
+void TracksBuilder::AddTrackInternal(
+ int track_num,
+ int track_type,
+ int track_uid,
+ const std::string& codec_id,
+ const std::string& name,
+ const std::string& language,
+ int default_duration,
+ int video_pixel_width,
+ int video_pixel_height,
+ int audio_channels,
+ double audio_sampling_frequency) {
+ tracks_.push_back(Track(track_num, track_type, track_uid, codec_id, name,
+ language, default_duration, video_pixel_width,
+ video_pixel_height, audio_channels,
+ audio_sampling_frequency, allow_invalid_values_));
+}
+
+int TracksBuilder::GetTracksSize() const {
+ return MasterElementSize(kWebMIdTracks, GetTracksPayloadSize());
+}
+
+int TracksBuilder::GetTracksPayloadSize() const {
+ int payload_size = 0;
+
+ for (TrackList::const_iterator itr = tracks_.begin();
+ itr != tracks_.end(); ++itr) {
+ payload_size += itr->GetSize();
+ }
+
+ return payload_size;
+}
+
+void TracksBuilder::WriteTracks(uint8* buf, int buf_size) const {
+ WriteMasterElement(&buf, &buf_size, kWebMIdTracks, GetTracksPayloadSize());
+
+ for (TrackList::const_iterator itr = tracks_.begin();
+ itr != tracks_.end(); ++itr) {
+ itr->Write(&buf, &buf_size);
+ }
+}
+
+TracksBuilder::Track::Track(int track_num, int track_type, int track_uid,
+ const std::string& codec_id,
+ const std::string& name,
+ const std::string& language,
+ int default_duration,
+ int video_pixel_width, int video_pixel_height,
+ int audio_channels, double audio_sampling_frequency,
+ bool allow_invalid_values)
+ : track_num_(track_num),
+ track_type_(track_type),
+ track_uid_(track_uid),
+ codec_id_(codec_id),
+ name_(name),
+ language_(language),
+ default_duration_(default_duration),
+ video_pixel_width_(video_pixel_width),
+ video_pixel_height_(video_pixel_height),
+ audio_channels_(audio_channels),
+ audio_sampling_frequency_(audio_sampling_frequency) {
+ if (!allow_invalid_values) {
+ CHECK_GT(track_num_, 0);
+ CHECK_GT(track_type_, 0);
+ CHECK_LT(track_type_, 255);
+ CHECK_GT(track_uid_, 0);
+ if (track_type != kWebMTrackTypeVideo &&
+ track_type != kWebMTrackTypeAudio) {
+ CHECK_EQ(default_duration_, -1);
+ } else {
+ CHECK(default_duration_ == -1 || default_duration_ > 0);
+ }
+
+ if (track_type == kWebMTrackTypeVideo) {
+ CHECK_GT(video_pixel_width_, 0);
+ CHECK_GT(video_pixel_height_, 0);
+ } else {
+ CHECK_EQ(video_pixel_width_, -1);
+ CHECK_EQ(video_pixel_height_, -1);
+ }
+
+ if (track_type == kWebMTrackTypeAudio) {
+ CHECK_GT(audio_channels_, 0);
+ CHECK_GT(audio_sampling_frequency_, 0.0);
+ } else {
+ CHECK_EQ(audio_channels_, -1);
+ CHECK_EQ(audio_sampling_frequency_, -1.0);
+ }
+ }
+}
+
+int TracksBuilder::Track::GetSize() const {
+ return MasterElementSize(kWebMIdTrackEntry, GetPayloadSize());
+}
+
+int TracksBuilder::Track::GetVideoPayloadSize() const {
+ int payload_size = 0;
+
+ if (video_pixel_width_ >= 0)
+ payload_size += IntElementSize(kWebMIdPixelWidth, video_pixel_width_);
+ if (video_pixel_height_ >= 0)
+ payload_size += IntElementSize(kWebMIdPixelHeight, video_pixel_height_);
+
+ return payload_size;
+}
+
+int TracksBuilder::Track::GetAudioPayloadSize() const {
+ int payload_size = 0;
+
+ if (audio_channels_ >= 0)
+ payload_size += IntElementSize(kWebMIdChannels, audio_channels_);
+ if (audio_sampling_frequency_ >= 0)
+ payload_size += DoubleElementSize(kWebMIdSamplingFrequency);
+
+ return payload_size;
+}
+
+int TracksBuilder::Track::GetPayloadSize() const {
+ int size = 0;
+
+ size += IntElementSize(kWebMIdTrackNumber, track_num_);
+ size += IntElementSize(kWebMIdTrackType, track_type_);
+ size += IntElementSize(kWebMIdTrackUID, track_uid_);
+
+ if (default_duration_ >= 0)
+ size += IntElementSize(kWebMIdDefaultDuration, default_duration_);
+
+ if (!codec_id_.empty())
+ size += StringElementSize(kWebMIdCodecID, codec_id_);
+
+ if (!name_.empty())
+ size += StringElementSize(kWebMIdName, name_);
+
+ if (!language_.empty())
+ size += StringElementSize(kWebMIdLanguage, language_);
+
+ if (GetVideoPayloadSize() > 0) {
+ size += MasterElementSize(kWebMIdVideo, GetVideoPayloadSize());
+ }
+
+ if (GetAudioPayloadSize() > 0) {
+ size += MasterElementSize(kWebMIdAudio, GetAudioPayloadSize());
+ }
+
+ return size;
+}
+
+void TracksBuilder::Track::Write(uint8** buf, int* buf_size) const {
+ WriteMasterElement(buf, buf_size, kWebMIdTrackEntry, GetPayloadSize());
+
+ WriteIntElement(buf, buf_size, kWebMIdTrackNumber, track_num_);
+ WriteIntElement(buf, buf_size, kWebMIdTrackType, track_type_);
+ WriteIntElement(buf, buf_size, kWebMIdTrackUID, track_uid_);
+
+ if (default_duration_ >= 0)
+ WriteIntElement(buf, buf_size, kWebMIdDefaultDuration, default_duration_);
+
+ if (!codec_id_.empty())
+ WriteStringElement(buf, buf_size, kWebMIdCodecID, codec_id_);
+
+ if (!name_.empty())
+ WriteStringElement(buf, buf_size, kWebMIdName, name_);
+
+ if (!language_.empty())
+ WriteStringElement(buf, buf_size, kWebMIdLanguage, language_);
+
+ if (GetVideoPayloadSize() > 0) {
+ WriteMasterElement(buf, buf_size, kWebMIdVideo, GetVideoPayloadSize());
+
+ if (video_pixel_width_ >= 0)
+ WriteIntElement(buf, buf_size, kWebMIdPixelWidth, video_pixel_width_);
+
+ if (video_pixel_height_ >= 0)
+ WriteIntElement(buf, buf_size, kWebMIdPixelHeight, video_pixel_height_);
+ }
+
+ if (GetAudioPayloadSize() > 0) {
+ WriteMasterElement(buf, buf_size, kWebMIdAudio, GetAudioPayloadSize());
+
+ if (audio_channels_ >= 0)
+ WriteIntElement(buf, buf_size, kWebMIdChannels, audio_channels_);
+
+ if (audio_sampling_frequency_ >= 0) {
+ WriteDoubleElement(buf, buf_size, kWebMIdSamplingFrequency,
+ audio_sampling_frequency_);
+ }
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/formats/webm/tracks_builder.h b/chromium/media/formats/webm/tracks_builder.h
new file mode 100644
index 00000000000..35f8955c6b0
--- /dev/null
+++ b/chromium/media/formats/webm/tracks_builder.h
@@ -0,0 +1,91 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FORMATS_WEBM_TRACKS_BUILDER_H_
+#define MEDIA_FORMATS_WEBM_TRACKS_BUILDER_H_
+
+#include <list>
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+
+namespace media {
+
+class TracksBuilder {
+ public:
+ // If |allow_invalid_values| is false, some AddTrack() parameters will be
+ // basically checked and will assert if out of valid range. |codec_id|,
+ // |name|, |language| and any device-specific constraints are not checked.
+ explicit TracksBuilder(bool allow_invalid_values);
+ TracksBuilder(); // Sets |allow_invalid_values| to false.
+ ~TracksBuilder();
+
+ // Only a non-negative |default_duration| will result in a serialized
+ // kWebMIdDefaultDuration element. Note, 0 is allowed here for testing only
+ // if |allow_invalid_values_| is true, since it is an illegal value for
+ // DefaultDuration. Similar applies to |audio_channels|,
+ // |audio_sampling_frequency|, |video_pixel_width| and |video_pixel_height|.
+ void AddVideoTrack(int track_num, int track_uid, const std::string& codec_id,
+ const std::string& name, const std::string& language,
+ int default_duration, int video_pixel_width,
+ int video_pixel_height);
+ void AddAudioTrack(int track_num, int track_uid, const std::string& codec_id,
+ const std::string& name, const std::string& language,
+ int default_duration, int audio_channels,
+ double audio_sampling_frequency);
+ void AddTextTrack(int track_num, int track_uid, const std::string& codec_id,
+ const std::string& name, const std::string& language);
+
+ std::vector<uint8> Finish();
+
+ private:
+ void AddTrackInternal(int track_num, int track_type, int track_uid,
+ const std::string& codec_id, const std::string& name,
+ const std::string& language, int default_duration,
+ int video_pixel_width, int video_pixel_height,
+ int audio_channels, double audio_sampling_frequency);
+ int GetTracksSize() const;
+ int GetTracksPayloadSize() const;
+ void WriteTracks(uint8* buffer, int buffer_size) const;
+
+ class Track {
+ public:
+ Track(int track_num, int track_type, int track_uid,
+ const std::string& codec_id, const std::string& name,
+ const std::string& language, int default_duration,
+ int video_pixel_width, int video_pixel_height,
+ int audio_channels, double audio_sampling_frequency,
+ bool allow_invalid_values);
+
+ int GetSize() const;
+ void Write(uint8** buf, int* buf_size) const;
+ private:
+ int GetPayloadSize() const;
+ int GetVideoPayloadSize() const;
+ int GetAudioPayloadSize() const;
+
+ int track_num_;
+ int track_type_;
+ int track_uid_;
+ std::string codec_id_;
+ std::string name_;
+ std::string language_;
+ int default_duration_;
+ int video_pixel_width_;
+ int video_pixel_height_;
+ int audio_channels_;
+ double audio_sampling_frequency_;
+ };
+
+ typedef std::list<Track> TrackList;
+ TrackList tracks_;
+ bool allow_invalid_values_;
+
+ DISALLOW_COPY_AND_ASSIGN(TracksBuilder);
+};
+
+} // namespace media
+
+#endif // MEDIA_FORMATS_WEBM_TRACKS_BUILDER_H_
diff --git a/chromium/media/webm/webm_audio_client.cc b/chromium/media/formats/webm/webm_audio_client.cc
index 1ef640c0dc4..6fe9a8434f2 100644
--- a/chromium/media/webm/webm_audio_client.cc
+++ b/chromium/media/formats/webm/webm_audio_client.cc
@@ -1,12 +1,12 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/webm/webm_audio_client.h"
+#include "media/formats/webm/webm_audio_client.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/channel_layout.h"
-#include "media/webm/webm_constants.h"
+#include "media/formats/webm/webm_constants.h"
namespace media {
@@ -65,15 +65,27 @@ bool WebMAudioClient::InitializeConfig(
extra_data_size = codec_private.size();
}
+ // Convert |codec_delay| from nanoseconds into frames.
+ int codec_delay_in_frames = 0;
+ if (codec_delay != -1) {
+ codec_delay_in_frames =
+ 0.5 +
+ samples_per_second * (static_cast<double>(codec_delay) /
+ base::Time::kNanosecondsPerSecond);
+ }
+
config->Initialize(
audio_codec,
(audio_codec == kCodecOpus) ? kSampleFormatS16 : kSampleFormatPlanarF32,
channel_layout,
- samples_per_second, extra_data, extra_data_size, is_encrypted, true,
+ samples_per_second,
+ extra_data,
+ extra_data_size,
+ is_encrypted,
+ true,
base::TimeDelta::FromMicroseconds(
(seek_preroll != -1 ? seek_preroll : 0) / 1000),
- base::TimeDelta::FromMicroseconds(
- (codec_delay != -1 ? codec_delay : 0) / 1000));
+ codec_delay_in_frames);
return config->IsValidConfig();
}
diff --git a/chromium/media/webm/webm_audio_client.h b/chromium/media/formats/webm/webm_audio_client.h
index 7874cec4bea..a723b0d1d48 100644
--- a/chromium/media/webm/webm_audio_client.h
+++ b/chromium/media/formats/webm/webm_audio_client.h
@@ -1,15 +1,15 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_WEBM_WEBM_AUDIO_CLIENT_H_
-#define MEDIA_WEBM_WEBM_AUDIO_CLIENT_H_
+#ifndef MEDIA_FORMATS_WEBM_WEBM_AUDIO_CLIENT_H_
+#define MEDIA_FORMATS_WEBM_WEBM_AUDIO_CLIENT_H_
#include <string>
#include <vector>
#include "media/base/media_log.h"
-#include "media/webm/webm_parser.h"
+#include "media/formats/webm/webm_parser.h"
namespace media {
class AudioDecoderConfig;
@@ -51,4 +51,4 @@ class WebMAudioClient : public WebMParserClient {
} // namespace media
-#endif // MEDIA_WEBM_WEBM_AUDIO_CLIENT_H_
+#endif // MEDIA_FORMATS_WEBM_WEBM_AUDIO_CLIENT_H_
diff --git a/chromium/media/formats/webm/webm_cluster_parser.cc b/chromium/media/formats/webm/webm_cluster_parser.cc
new file mode 100644
index 00000000000..172eafafcf2
--- /dev/null
+++ b/chromium/media/formats/webm/webm_cluster_parser.cc
@@ -0,0 +1,686 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/webm/webm_cluster_parser.h"
+
+#include <vector>
+
+#include "base/logging.h"
+#include "base/sys_byteorder.h"
+#include "media/base/buffers.h"
+#include "media/base/decrypt_config.h"
+#include "media/filters/webvtt_util.h"
+#include "media/formats/webm/webm_constants.h"
+#include "media/formats/webm/webm_crypto_helpers.h"
+#include "media/formats/webm/webm_webvtt_parser.h"
+
+namespace media {
+
+WebMClusterParser::WebMClusterParser(
+ int64 timecode_scale,
+ int audio_track_num,
+ base::TimeDelta audio_default_duration,
+ int video_track_num,
+ base::TimeDelta video_default_duration,
+ const WebMTracksParser::TextTracks& text_tracks,
+ const std::set<int64>& ignored_tracks,
+ const std::string& audio_encryption_key_id,
+ const std::string& video_encryption_key_id,
+ const LogCB& log_cb)
+ : timecode_multiplier_(timecode_scale / 1000.0),
+ ignored_tracks_(ignored_tracks),
+ audio_encryption_key_id_(audio_encryption_key_id),
+ video_encryption_key_id_(video_encryption_key_id),
+ parser_(kWebMIdCluster, this),
+ last_block_timecode_(-1),
+ block_data_size_(-1),
+ block_duration_(-1),
+ block_add_id_(-1),
+ block_additional_data_size_(-1),
+ discard_padding_(-1),
+ cluster_timecode_(-1),
+ cluster_start_time_(kNoTimestamp()),
+ cluster_ended_(false),
+ audio_(audio_track_num, false, audio_default_duration, log_cb),
+ video_(video_track_num, true, video_default_duration, log_cb),
+ ready_buffer_upper_bound_(kNoTimestamp()),
+ log_cb_(log_cb) {
+ for (WebMTracksParser::TextTracks::const_iterator it = text_tracks.begin();
+ it != text_tracks.end();
+ ++it) {
+ text_track_map_.insert(std::make_pair(
+ it->first, Track(it->first, false, kNoTimestamp(), log_cb_)));
+ }
+}
+
+WebMClusterParser::~WebMClusterParser() {}
+
+void WebMClusterParser::Reset() {
+ last_block_timecode_ = -1;
+ cluster_timecode_ = -1;
+ cluster_start_time_ = kNoTimestamp();
+ cluster_ended_ = false;
+ parser_.Reset();
+ audio_.Reset();
+ video_.Reset();
+ ResetTextTracks();
+ ready_buffer_upper_bound_ = kNoTimestamp();
+}
+
+int WebMClusterParser::Parse(const uint8* buf, int size) {
+ audio_.ClearReadyBuffers();
+ video_.ClearReadyBuffers();
+ ClearTextTrackReadyBuffers();
+ ready_buffer_upper_bound_ = kNoTimestamp();
+
+ int result = parser_.Parse(buf, size);
+
+ if (result < 0) {
+ cluster_ended_ = false;
+ return result;
+ }
+
+ cluster_ended_ = parser_.IsParsingComplete();
+ if (cluster_ended_) {
+ // If there were no buffers in this cluster, set the cluster start time to
+ // be the |cluster_timecode_|.
+ if (cluster_start_time_ == kNoTimestamp()) {
+ // If the cluster did not even have a |cluster_timecode_|, signal parse
+ // error.
+ if (cluster_timecode_ < 0)
+ return -1;
+
+ cluster_start_time_ = base::TimeDelta::FromMicroseconds(
+ cluster_timecode_ * timecode_multiplier_);
+ }
+
+ // Reset the parser if we're done parsing so that
+ // it is ready to accept another cluster on the next
+ // call.
+ parser_.Reset();
+
+ last_block_timecode_ = -1;
+ cluster_timecode_ = -1;
+ }
+
+ return result;
+}
+
+const WebMClusterParser::BufferQueue& WebMClusterParser::GetAudioBuffers() {
+ if (ready_buffer_upper_bound_ == kNoTimestamp())
+ UpdateReadyBuffers();
+
+ return audio_.ready_buffers();
+}
+
+const WebMClusterParser::BufferQueue& WebMClusterParser::GetVideoBuffers() {
+ if (ready_buffer_upper_bound_ == kNoTimestamp())
+ UpdateReadyBuffers();
+
+ return video_.ready_buffers();
+}
+
+const WebMClusterParser::TextBufferQueueMap&
+WebMClusterParser::GetTextBuffers() {
+ if (ready_buffer_upper_bound_ == kNoTimestamp())
+ UpdateReadyBuffers();
+
+ // Translate our |text_track_map_| into |text_buffers_map_|, inserting rows in
+ // the output only for non-empty ready_buffer() queues in |text_track_map_|.
+ text_buffers_map_.clear();
+ for (TextTrackMap::const_iterator itr = text_track_map_.begin();
+ itr != text_track_map_.end();
+ ++itr) {
+ const BufferQueue& text_buffers = itr->second.ready_buffers();
+ if (!text_buffers.empty())
+ text_buffers_map_.insert(std::make_pair(itr->first, text_buffers));
+ }
+
+ return text_buffers_map_;
+}
+
+WebMParserClient* WebMClusterParser::OnListStart(int id) {
+ if (id == kWebMIdCluster) {
+ cluster_timecode_ = -1;
+ cluster_start_time_ = kNoTimestamp();
+ } else if (id == kWebMIdBlockGroup) {
+ block_data_.reset();
+ block_data_size_ = -1;
+ block_duration_ = -1;
+ discard_padding_ = -1;
+ discard_padding_set_ = false;
+ } else if (id == kWebMIdBlockAdditions) {
+ block_add_id_ = -1;
+ block_additional_data_.reset();
+ block_additional_data_size_ = -1;
+ }
+
+ return this;
+}
+
+bool WebMClusterParser::OnListEnd(int id) {
+ if (id != kWebMIdBlockGroup)
+ return true;
+
+ // Make sure the BlockGroup actually had a Block.
+ if (block_data_size_ == -1) {
+ MEDIA_LOG(log_cb_) << "Block missing from BlockGroup.";
+ return false;
+ }
+
+ bool result = ParseBlock(false, block_data_.get(), block_data_size_,
+ block_additional_data_.get(),
+ block_additional_data_size_, block_duration_,
+ discard_padding_set_ ? discard_padding_ : 0);
+ block_data_.reset();
+ block_data_size_ = -1;
+ block_duration_ = -1;
+ block_add_id_ = -1;
+ block_additional_data_.reset();
+ block_additional_data_size_ = -1;
+ discard_padding_ = -1;
+ discard_padding_set_ = false;
+ return result;
+}
+
+bool WebMClusterParser::OnUInt(int id, int64 val) {
+ int64* dst;
+ switch (id) {
+ case kWebMIdTimecode:
+ dst = &cluster_timecode_;
+ break;
+ case kWebMIdBlockDuration:
+ dst = &block_duration_;
+ break;
+ case kWebMIdBlockAddID:
+ dst = &block_add_id_;
+ break;
+ default:
+ return true;
+ }
+ if (*dst != -1)
+ return false;
+ *dst = val;
+ return true;
+}
+
+bool WebMClusterParser::ParseBlock(bool is_simple_block, const uint8* buf,
+ int size, const uint8* additional,
+ int additional_size, int duration,
+ int64 discard_padding) {
+ if (size < 4)
+ return false;
+
+ // Return an error if the trackNum > 127. We just aren't
+ // going to support large track numbers right now.
+ if (!(buf[0] & 0x80)) {
+ MEDIA_LOG(log_cb_) << "TrackNumber over 127 not supported";
+ return false;
+ }
+
+ int track_num = buf[0] & 0x7f;
+ int timecode = buf[1] << 8 | buf[2];
+ int flags = buf[3] & 0xff;
+ int lacing = (flags >> 1) & 0x3;
+
+ if (lacing) {
+ MEDIA_LOG(log_cb_) << "Lacing " << lacing << " is not supported yet.";
+ return false;
+ }
+
+ // Sign extend negative timecode offsets.
+ if (timecode & 0x8000)
+ timecode |= ~0xffff;
+
+ const uint8* frame_data = buf + 4;
+ int frame_size = size - (frame_data - buf);
+ return OnBlock(is_simple_block, track_num, timecode, duration, flags,
+ frame_data, frame_size, additional, additional_size,
+ discard_padding);
+}
+
+bool WebMClusterParser::OnBinary(int id, const uint8* data, int size) {
+ switch (id) {
+ case kWebMIdSimpleBlock:
+ return ParseBlock(true, data, size, NULL, -1, -1, 0);
+
+ case kWebMIdBlock:
+ if (block_data_) {
+ MEDIA_LOG(log_cb_) << "More than 1 Block in a BlockGroup is not "
+ "supported.";
+ return false;
+ }
+ block_data_.reset(new uint8[size]);
+ memcpy(block_data_.get(), data, size);
+ block_data_size_ = size;
+ return true;
+
+ case kWebMIdBlockAdditional: {
+ uint64 block_add_id = base::HostToNet64(block_add_id_);
+ if (block_additional_data_) {
+ // TODO(vigneshv): Technically, more than 1 BlockAdditional is allowed
+ // as per matroska spec. But for now we don't have a use case to
+ // support parsing of such files. Take a look at this again when such a
+ // case arises.
+ MEDIA_LOG(log_cb_) << "More than 1 BlockAdditional in a BlockGroup is "
+ "not supported.";
+ return false;
+ }
+ // First 8 bytes of side_data in DecoderBuffer is the BlockAddID
+ // element's value in Big Endian format. This is done to mimic ffmpeg
+ // demuxer's behavior.
+ block_additional_data_size_ = size + sizeof(block_add_id);
+ block_additional_data_.reset(new uint8[block_additional_data_size_]);
+ memcpy(block_additional_data_.get(), &block_add_id,
+ sizeof(block_add_id));
+ memcpy(block_additional_data_.get() + 8, data, size);
+ return true;
+ }
+ case kWebMIdDiscardPadding: {
+ if (discard_padding_set_ || size <= 0 || size > 8)
+ return false;
+ discard_padding_set_ = true;
+
+ // Read in the big-endian integer.
+ discard_padding_ = static_cast<int8>(data[0]);
+ for (int i = 1; i < size; ++i)
+ discard_padding_ = (discard_padding_ << 8) | data[i];
+
+ return true;
+ }
+ default:
+ return true;
+ }
+}
+
+bool WebMClusterParser::OnBlock(bool is_simple_block, int track_num,
+ int timecode,
+ int block_duration,
+ int flags,
+ const uint8* data, int size,
+ const uint8* additional, int additional_size,
+ int64 discard_padding) {
+ DCHECK_GE(size, 0);
+ if (cluster_timecode_ == -1) {
+ MEDIA_LOG(log_cb_) << "Got a block before cluster timecode.";
+ return false;
+ }
+
+ // TODO(acolwell): Should relative negative timecode offsets be rejected? Or
+ // only when the absolute timecode is negative? See http://crbug.com/271794
+ if (timecode < 0) {
+ MEDIA_LOG(log_cb_) << "Got a block with negative timecode offset "
+ << timecode;
+ return false;
+ }
+
+ if (last_block_timecode_ != -1 && timecode < last_block_timecode_) {
+ MEDIA_LOG(log_cb_)
+ << "Got a block with a timecode before the previous block.";
+ return false;
+ }
+
+ Track* track = NULL;
+ StreamParserBuffer::Type buffer_type = DemuxerStream::AUDIO;
+ std::string encryption_key_id;
+ if (track_num == audio_.track_num()) {
+ track = &audio_;
+ encryption_key_id = audio_encryption_key_id_;
+ } else if (track_num == video_.track_num()) {
+ track = &video_;
+ encryption_key_id = video_encryption_key_id_;
+ buffer_type = DemuxerStream::VIDEO;
+ } else if (ignored_tracks_.find(track_num) != ignored_tracks_.end()) {
+ return true;
+ } else if (Track* const text_track = FindTextTrack(track_num)) {
+ if (is_simple_block) // BlockGroup is required for WebVTT cues
+ return false;
+ if (block_duration < 0) // not specified
+ return false;
+ track = text_track;
+ buffer_type = DemuxerStream::TEXT;
+ } else {
+ MEDIA_LOG(log_cb_) << "Unexpected track number " << track_num;
+ return false;
+ }
+
+ last_block_timecode_ = timecode;
+
+ base::TimeDelta timestamp = base::TimeDelta::FromMicroseconds(
+ (cluster_timecode_ + timecode) * timecode_multiplier_);
+
+ scoped_refptr<StreamParserBuffer> buffer;
+ if (buffer_type != DemuxerStream::TEXT) {
+ // The first bit of the flags is set when a SimpleBlock contains only
+ // keyframes. If this is a Block, then inspection of the payload is
+ // necessary to determine whether it contains a keyframe or not.
+ // http://www.matroska.org/technical/specs/index.html
+ bool is_keyframe =
+ is_simple_block ? (flags & 0x80) != 0 : track->IsKeyframe(data, size);
+
+ // Every encrypted Block has a signal byte and IV prepended to it. Current
+ // encrypted WebM request for comments specification is here
+ // http://wiki.webmproject.org/encryption/webm-encryption-rfc
+ scoped_ptr<DecryptConfig> decrypt_config;
+ int data_offset = 0;
+ if (!encryption_key_id.empty() &&
+ !WebMCreateDecryptConfig(
+ data, size,
+ reinterpret_cast<const uint8*>(encryption_key_id.data()),
+ encryption_key_id.size(),
+ &decrypt_config, &data_offset)) {
+ return false;
+ }
+
+ // TODO(wolenetz/acolwell): Validate and use a common cross-parser TrackId
+ // type with remapped bytestream track numbers and allow multiple tracks as
+ // applicable. See https://crbug.com/341581.
+ buffer = StreamParserBuffer::CopyFrom(
+ data + data_offset, size - data_offset,
+ additional, additional_size,
+ is_keyframe, buffer_type, track_num);
+
+ if (decrypt_config)
+ buffer->set_decrypt_config(decrypt_config.Pass());
+ } else {
+ std::string id, settings, content;
+ WebMWebVTTParser::Parse(data, size, &id, &settings, &content);
+
+ std::vector<uint8> side_data;
+ MakeSideData(id.begin(), id.end(),
+ settings.begin(), settings.end(),
+ &side_data);
+
+ // TODO(wolenetz/acolwell): Validate and use a common cross-parser TrackId
+ // type with remapped bytestream track numbers and allow multiple tracks as
+ // applicable. See https://crbug.com/341581.
+ buffer = StreamParserBuffer::CopyFrom(
+ reinterpret_cast<const uint8*>(content.data()),
+ content.length(),
+ &side_data[0],
+ side_data.size(),
+ true, buffer_type, track_num);
+ }
+
+ buffer->set_timestamp(timestamp);
+ if (cluster_start_time_ == kNoTimestamp())
+ cluster_start_time_ = timestamp;
+
+ if (block_duration >= 0) {
+ buffer->set_duration(base::TimeDelta::FromMicroseconds(
+ block_duration * timecode_multiplier_));
+ } else {
+ DCHECK_NE(buffer_type, DemuxerStream::TEXT);
+ buffer->set_duration(track->default_duration());
+ }
+
+ if (discard_padding != 0) {
+ buffer->set_discard_padding(std::make_pair(
+ base::TimeDelta(),
+ base::TimeDelta::FromMicroseconds(discard_padding / 1000)));
+ }
+
+ return track->AddBuffer(buffer);
+}
+
+WebMClusterParser::Track::Track(int track_num,
+ bool is_video,
+ base::TimeDelta default_duration,
+ const LogCB& log_cb)
+ : track_num_(track_num),
+ is_video_(is_video),
+ default_duration_(default_duration),
+ estimated_next_frame_duration_(kNoTimestamp()),
+ log_cb_(log_cb) {
+ DCHECK(default_duration_ == kNoTimestamp() ||
+ default_duration_ > base::TimeDelta());
+}
+
+WebMClusterParser::Track::~Track() {}
+
+base::TimeDelta WebMClusterParser::Track::GetReadyUpperBound() {
+ DCHECK(ready_buffers_.empty());
+ if (last_added_buffer_missing_duration_)
+ return last_added_buffer_missing_duration_->GetDecodeTimestamp();
+
+ return kInfiniteDuration();
+}
+
+void WebMClusterParser::Track::ExtractReadyBuffers(
+ const base::TimeDelta before_timestamp) {
+ DCHECK(ready_buffers_.empty());
+ DCHECK(base::TimeDelta() <= before_timestamp);
+ DCHECK(kNoTimestamp() != before_timestamp);
+
+ if (buffers_.empty())
+ return;
+
+ if (buffers_.back()->GetDecodeTimestamp() < before_timestamp) {
+ // All of |buffers_| are ready.
+ ready_buffers_.swap(buffers_);
+ DVLOG(3) << __FUNCTION__ << " : " << track_num_ << " All "
+ << ready_buffers_.size() << " are ready: before upper bound ts "
+ << before_timestamp.InSecondsF();
+ return;
+ }
+
+ // Not all of |buffers_| are ready yet. Move any that are ready to
+ // |ready_buffers_|.
+ while (true) {
+ const scoped_refptr<StreamParserBuffer>& buffer = buffers_.front();
+ if (buffer->GetDecodeTimestamp() >= before_timestamp)
+ break;
+ ready_buffers_.push_back(buffer);
+ buffers_.pop_front();
+ DCHECK(!buffers_.empty());
+ }
+
+ DVLOG(3) << __FUNCTION__ << " : " << track_num_ << " Only "
+ << ready_buffers_.size() << " ready, " << buffers_.size()
+ << " at or after upper bound ts " << before_timestamp.InSecondsF();
+}
+
+bool WebMClusterParser::Track::AddBuffer(
+ const scoped_refptr<StreamParserBuffer>& buffer) {
+ DVLOG(2) << "AddBuffer() : " << track_num_
+ << " ts " << buffer->timestamp().InSecondsF()
+ << " dur " << buffer->duration().InSecondsF()
+ << " kf " << buffer->IsKeyframe()
+ << " size " << buffer->data_size();
+
+ if (last_added_buffer_missing_duration_) {
+ base::TimeDelta derived_duration =
+ buffer->timestamp() - last_added_buffer_missing_duration_->timestamp();
+ last_added_buffer_missing_duration_->set_duration(derived_duration);
+
+ DVLOG(2) << "AddBuffer() : applied derived duration to held-back buffer : "
+ << " ts "
+ << last_added_buffer_missing_duration_->timestamp().InSecondsF()
+ << " dur "
+ << last_added_buffer_missing_duration_->duration().InSecondsF()
+ << " kf " << last_added_buffer_missing_duration_->IsKeyframe()
+ << " size " << last_added_buffer_missing_duration_->data_size();
+ scoped_refptr<StreamParserBuffer> updated_buffer =
+ last_added_buffer_missing_duration_;
+ last_added_buffer_missing_duration_ = NULL;
+ if (!QueueBuffer(updated_buffer))
+ return false;
+ }
+
+ if (buffer->duration() == kNoTimestamp()) {
+ last_added_buffer_missing_duration_ = buffer;
+ DVLOG(2) << "AddBuffer() : holding back buffer that is missing duration";
+ return true;
+ }
+
+ return QueueBuffer(buffer);
+}
+
+void WebMClusterParser::Track::ApplyDurationEstimateIfNeeded() {
+ if (!last_added_buffer_missing_duration_)
+ return;
+
+ last_added_buffer_missing_duration_->set_duration(GetDurationEstimate());
+
+ DVLOG(2) << "ApplyDurationEstimateIfNeeded() : new dur : "
+ << " ts "
+ << last_added_buffer_missing_duration_->timestamp().InSecondsF()
+ << " dur "
+ << last_added_buffer_missing_duration_->duration().InSecondsF()
+ << " kf " << last_added_buffer_missing_duration_->IsKeyframe()
+ << " size " << last_added_buffer_missing_duration_->data_size();
+
+ // Don't use the applied duration as a future estimation (don't use
+ // QueueBuffer() here.)
+ buffers_.push_back(last_added_buffer_missing_duration_);
+ last_added_buffer_missing_duration_ = NULL;
+}
+
+void WebMClusterParser::Track::ClearReadyBuffers() {
+ // Note that |buffers_| are kept and |estimated_next_frame_duration_| is not
+ // reset here.
+ ready_buffers_.clear();
+}
+
+void WebMClusterParser::Track::Reset() {
+ ClearReadyBuffers();
+ buffers_.clear();
+ last_added_buffer_missing_duration_ = NULL;
+}
+
+bool WebMClusterParser::Track::IsKeyframe(const uint8* data, int size) const {
+ // For now, assume that all blocks are keyframes for datatypes other than
+ // video. This is a valid assumption for Vorbis, WebVTT, & Opus.
+ if (!is_video_)
+ return true;
+
+ // Make sure the block is big enough for the minimal keyframe header size.
+ if (size < 7)
+ return false;
+
+ // The LSb of the first byte must be a 0 for a keyframe.
+ // http://tools.ietf.org/html/rfc6386 Section 19.1
+ if ((data[0] & 0x01) != 0)
+ return false;
+
+ // Verify VP8 keyframe startcode.
+ // http://tools.ietf.org/html/rfc6386 Section 19.1
+ if (data[3] != 0x9d || data[4] != 0x01 || data[5] != 0x2a)
+ return false;
+
+ return true;
+}
+
+bool WebMClusterParser::Track::QueueBuffer(
+ const scoped_refptr<StreamParserBuffer>& buffer) {
+ DCHECK(!last_added_buffer_missing_duration_);
+
+ // WebMClusterParser::OnBlock() gives MEDIA_LOG and parse error on decreasing
+ // block timecode detection within a cluster. Therefore, we should not see
+ // those here.
+ base::TimeDelta previous_buffers_timestamp = buffers_.empty() ?
+ base::TimeDelta() : buffers_.back()->GetDecodeTimestamp();
+ CHECK(previous_buffers_timestamp <= buffer->GetDecodeTimestamp());
+
+ base::TimeDelta duration = buffer->duration();
+ if (duration < base::TimeDelta() || duration == kNoTimestamp()) {
+ MEDIA_LOG(log_cb_) << "Invalid buffer duration: " << duration.InSecondsF();
+ return false;
+ }
+
+ // The estimated frame duration is the minimum non-zero duration since the
+ // last initialization segment. The minimum is used to ensure frame durations
+ // aren't overestimated.
+ if (duration > base::TimeDelta()) {
+ if (estimated_next_frame_duration_ == kNoTimestamp()) {
+ estimated_next_frame_duration_ = duration;
+ } else {
+ estimated_next_frame_duration_ =
+ std::min(duration, estimated_next_frame_duration_);
+ }
+ }
+
+ buffers_.push_back(buffer);
+ return true;
+}
+
+base::TimeDelta WebMClusterParser::Track::GetDurationEstimate() {
+ base::TimeDelta duration = estimated_next_frame_duration_;
+ if (duration != kNoTimestamp()) {
+ DVLOG(3) << __FUNCTION__ << " : using estimated duration";
+ } else {
+ DVLOG(3) << __FUNCTION__ << " : using hardcoded default duration";
+ if (is_video_) {
+ duration = base::TimeDelta::FromMilliseconds(
+ kDefaultVideoBufferDurationInMs);
+ } else {
+ duration = base::TimeDelta::FromMilliseconds(
+ kDefaultAudioBufferDurationInMs);
+ }
+ }
+
+ DCHECK(duration > base::TimeDelta());
+ DCHECK(duration != kNoTimestamp());
+ return duration;
+}
+
+void WebMClusterParser::ClearTextTrackReadyBuffers() {
+ text_buffers_map_.clear();
+ for (TextTrackMap::iterator it = text_track_map_.begin();
+ it != text_track_map_.end();
+ ++it) {
+ it->second.ClearReadyBuffers();
+ }
+}
+
+void WebMClusterParser::ResetTextTracks() {
+ ClearTextTrackReadyBuffers();
+ for (TextTrackMap::iterator it = text_track_map_.begin();
+ it != text_track_map_.end();
+ ++it) {
+ it->second.Reset();
+ }
+}
+
+void WebMClusterParser::UpdateReadyBuffers() {
+ DCHECK(ready_buffer_upper_bound_ == kNoTimestamp());
+ DCHECK(text_buffers_map_.empty());
+
+ if (cluster_ended_) {
+ audio_.ApplyDurationEstimateIfNeeded();
+ video_.ApplyDurationEstimateIfNeeded();
+ // Per OnBlock(), all text buffers should already have valid durations, so
+ // there is no need to call ApplyDurationEstimateIfNeeded() on text tracks
+ // here.
+ ready_buffer_upper_bound_ = kInfiniteDuration();
+ DCHECK(ready_buffer_upper_bound_ == audio_.GetReadyUpperBound());
+ DCHECK(ready_buffer_upper_bound_ == video_.GetReadyUpperBound());
+ } else {
+ ready_buffer_upper_bound_ = std::min(audio_.GetReadyUpperBound(),
+ video_.GetReadyUpperBound());
+ DCHECK(base::TimeDelta() <= ready_buffer_upper_bound_);
+ DCHECK(kNoTimestamp() != ready_buffer_upper_bound_);
+ }
+
+ // Prepare each track's ready buffers for retrieval.
+ audio_.ExtractReadyBuffers(ready_buffer_upper_bound_);
+ video_.ExtractReadyBuffers(ready_buffer_upper_bound_);
+ for (TextTrackMap::iterator itr = text_track_map_.begin();
+ itr != text_track_map_.end();
+ ++itr) {
+ itr->second.ExtractReadyBuffers(ready_buffer_upper_bound_);
+ }
+}
+
+WebMClusterParser::Track*
+WebMClusterParser::FindTextTrack(int track_num) {
+ const TextTrackMap::iterator it = text_track_map_.find(track_num);
+
+ if (it == text_track_map_.end())
+ return NULL;
+
+ return &it->second;
+}
+
+} // namespace media
diff --git a/chromium/media/formats/webm/webm_cluster_parser.h b/chromium/media/formats/webm/webm_cluster_parser.h
new file mode 100644
index 00000000000..ab1d4a11fb1
--- /dev/null
+++ b/chromium/media/formats/webm/webm_cluster_parser.h
@@ -0,0 +1,275 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FORMATS_WEBM_WEBM_CLUSTER_PARSER_H_
+#define MEDIA_FORMATS_WEBM_WEBM_CLUSTER_PARSER_H_
+
+#include <deque>
+#include <map>
+#include <set>
+#include <string>
+
+#include "base/memory/scoped_ptr.h"
+#include "media/base/media_export.h"
+#include "media/base/media_log.h"
+#include "media/base/stream_parser.h"
+#include "media/base/stream_parser_buffer.h"
+#include "media/formats/webm/webm_parser.h"
+#include "media/formats/webm/webm_tracks_parser.h"
+
+namespace media {
+
+class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
+ public:
+ typedef StreamParser::TrackId TrackId;
+ typedef std::deque<scoped_refptr<StreamParserBuffer> > BufferQueue;
+ typedef std::map<TrackId, const BufferQueue> TextBufferQueueMap;
+
+ // Arbitrarily-chosen numbers to estimate the duration of a buffer if none is
+ // set and there is not enough information to get a better estimate.
+ // TODO(wolenetz/acolwell): Parse audio codebook to determine missing audio
+ // frame durations. See http://crbug.com/351166.
+ enum {
+ kDefaultAudioBufferDurationInMs = 23, // Common 1k samples @44.1kHz
+ kDefaultVideoBufferDurationInMs = 42 // Low 24fps to reduce stalls
+ };
+
+ private:
+ // Helper class that manages per-track state.
+ class Track {
+ public:
+ Track(int track_num,
+ bool is_video,
+ base::TimeDelta default_duration,
+ const LogCB& log_cb);
+ ~Track();
+
+ int track_num() const { return track_num_; }
+
+ // If a buffer is currently held aside pending duration calculation, returns
+ // its decode timestamp. Otherwise, returns kInfiniteDuration().
+ base::TimeDelta GetReadyUpperBound();
+
+ // Prepares |ready_buffers_| for retrieval. Prior to calling,
+ // |ready_buffers_| must be empty. Moves all |buffers_| with timestamp
+ // before |before_timestamp| to |ready_buffers_|, preserving their order.
+ void ExtractReadyBuffers(const base::TimeDelta before_timestamp);
+
+ const BufferQueue& ready_buffers() const { return ready_buffers_; }
+
+ // If |last_added_buffer_missing_duration_| is set, updates its duration
+ // relative to |buffer|'s timestamp, and adds it to |buffers_| and unsets
+ // |last_added_buffer_missing_duration_|. Then, if |buffer| is missing
+ // duration, saves |buffer| into |last_added_buffer_missing_duration_|, or
+ // otherwise adds |buffer| to |buffers_|.
+ bool AddBuffer(const scoped_refptr<StreamParserBuffer>& buffer);
+
+ // If |last_added_buffer_missing_duration_| is set, updates its duration to
+ // be non-kNoTimestamp() value of |estimated_next_frame_duration_| or an
+ // arbitrary default, then adds it to |buffers_| and unsets
+ // |last_added_buffer_missing_duration_|. (This method helps stream parser
+ // emit all buffers in a media segment before signaling end of segment.)
+ void ApplyDurationEstimateIfNeeded();
+
+ // Clears |ready_buffers_| (use ExtractReadyBuffers() to fill it again).
+ // Leaves as-is |buffers_| and any possibly held-aside buffer that is
+ // missing duration.
+ void ClearReadyBuffers();
+
+ // Clears all buffer state, including any possibly held-aside buffer that
+ // was missing duration, and all contents of |buffers_| and
+ // |ready_buffers_|.
+ void Reset();
+
+ // Helper function used to inspect block data to determine if the
+ // block is a keyframe.
+ // |data| contains the bytes in the block.
+ // |size| indicates the number of bytes in |data|.
+ bool IsKeyframe(const uint8* data, int size) const;
+
+ base::TimeDelta default_duration() const { return default_duration_; }
+
+ private:
+ // Helper that sanity-checks |buffer| duration, updates
+ // |estimated_next_frame_duration_|, and adds |buffer| to |buffers_|.
+ // Returns false if |buffer| failed sanity check and therefore was not added
+ // to |buffers_|. Returns true otherwise.
+ bool QueueBuffer(const scoped_refptr<StreamParserBuffer>& buffer);
+
+ // Helper that calculates the buffer duration to use in
+ // ApplyDurationEstimateIfNeeded().
+ base::TimeDelta GetDurationEstimate();
+
+ int track_num_;
+ bool is_video_;
+
+ // Parsed track buffers, each with duration and in (decode) timestamp order,
+ // that have not yet been extracted into |ready_buffers_|. Note that up to
+ // one additional buffer missing duration may be tracked by
+ // |last_added_buffer_missing_duration_|.
+ BufferQueue buffers_;
+ scoped_refptr<StreamParserBuffer> last_added_buffer_missing_duration_;
+
+ // Buffers in (decode) timestamp order that were previously parsed into and
+ // extracted from |buffers_|. Buffers are moved from |buffers_| to
+ // |ready_buffers_| by ExtractReadyBuffers() if they are below a specified
+ // upper bound timestamp. Track users can therefore extract only those
+ // parsed buffers which are "ready" for emission (all before some maximum
+ // timestamp).
+ BufferQueue ready_buffers_;
+
+ // If kNoTimestamp(), then |estimated_next_frame_duration_| will be used.
+ base::TimeDelta default_duration_;
+
+ // If kNoTimestamp(), then a default value will be used. This estimate is
+ // the maximum duration seen or derived so far for this track, and is valid
+ // only if |default_duration_| is kNoTimestamp().
+ base::TimeDelta estimated_next_frame_duration_;
+
+ LogCB log_cb_;
+ };
+
+ typedef std::map<int, Track> TextTrackMap;
+
+ public:
+ WebMClusterParser(int64 timecode_scale,
+ int audio_track_num,
+ base::TimeDelta audio_default_duration,
+ int video_track_num,
+ base::TimeDelta video_default_duration,
+ const WebMTracksParser::TextTracks& text_tracks,
+ const std::set<int64>& ignored_tracks,
+ const std::string& audio_encryption_key_id,
+ const std::string& video_encryption_key_id,
+ const LogCB& log_cb);
+ virtual ~WebMClusterParser();
+
+ // Resets the parser state so it can accept a new cluster.
+ void Reset();
+
+ // Parses a WebM cluster element in |buf|.
+ //
+ // Returns -1 if the parse fails.
+ // Returns 0 if more data is needed.
+ // Returns the number of bytes parsed on success.
+ int Parse(const uint8* buf, int size);
+
+ base::TimeDelta cluster_start_time() const { return cluster_start_time_; }
+
+ // Get the current ready buffers resulting from Parse().
+ // If the parse reached the end of cluster and the last buffer was held aside
+ // due to missing duration, the buffer is given an estimated duration and
+ // included in the result.
+ // Otherwise, if there are is a buffer held aside due to missing duration for
+ // any of the tracks, no buffers with same or greater (decode) timestamp will
+ // be included in the buffers.
+ // The returned deques are cleared by Parse() or Reset() and updated by the
+ // next calls to Get{Audio,Video}Buffers().
+ // If no Parse() or Reset() has occurred since the last call to Get{Audio,
+ // Video,Text}Buffers(), then the previous BufferQueue& is returned again
+ // without any recalculation.
+ const BufferQueue& GetAudioBuffers();
+ const BufferQueue& GetVideoBuffers();
+
+ // Constructs and returns a subset of |text_track_map_| containing only
+ // tracks with non-empty buffer queues produced by the last Parse() and
+ // filtered to exclude any buffers that have (decode) timestamp same or
+ // greater than the lowest (decode) timestamp across all tracks of any buffer
+ // held aside due to missing duration (unless the end of cluster has been
+ // reached).
+ // The returned map is cleared by Parse() or Reset() and updated by the next
+ // call to GetTextBuffers().
+ // If no Parse() or Reset() has occurred since the last call to
+ // GetTextBuffers(), then the previous TextBufferQueueMap& is returned again
+ // without any recalculation.
+ const TextBufferQueueMap& GetTextBuffers();
+
+ // Returns true if the last Parse() call stopped at the end of a cluster.
+ bool cluster_ended() const { return cluster_ended_; }
+
+ private:
+ // WebMParserClient methods.
+ virtual WebMParserClient* OnListStart(int id) OVERRIDE;
+ virtual bool OnListEnd(int id) OVERRIDE;
+ virtual bool OnUInt(int id, int64 val) OVERRIDE;
+ virtual bool OnBinary(int id, const uint8* data, int size) OVERRIDE;
+
+ bool ParseBlock(bool is_simple_block, const uint8* buf, int size,
+ const uint8* additional, int additional_size, int duration,
+ int64 discard_padding);
+ bool OnBlock(bool is_simple_block, int track_num, int timecode, int duration,
+ int flags, const uint8* data, int size,
+ const uint8* additional, int additional_size,
+ int64 discard_padding);
+
+ // Resets the Track objects associated with each text track.
+ void ResetTextTracks();
+
+ // Clears the the ready buffers associated with each text track.
+ void ClearTextTrackReadyBuffers();
+
+ // Helper method for Get{Audio,Video,Text}Buffers() that recomputes
+ // |ready_buffer_upper_bound_| and calls ExtractReadyBuffers() on each track.
+ // If |cluster_ended_| is true, first applies duration estimate if needed for
+ // |audio_| and |video_| and sets |ready_buffer_upper_bound_| to
+ // kInfiniteDuration(). Otherwise, sets |ready_buffer_upper_bound_| to the
+ // minimum upper bound across |audio_| and |video_|. (Text tracks can have no
+ // buffers missing duration, so they are not involved in calculating the upper
+ // bound.)
+ // Parse() or Reset() must be called between calls to UpdateReadyBuffers() to
+ // clear each track's ready buffers and to reset |ready_buffer_upper_bound_|
+ // to kNoTimestamp().
+ void UpdateReadyBuffers();
+
+ // Search for the indicated track_num among the text tracks. Returns NULL
+ // if that track num is not a text track.
+ Track* FindTextTrack(int track_num);
+
+ double timecode_multiplier_; // Multiplier used to convert timecodes into
+ // microseconds.
+ std::set<int64> ignored_tracks_;
+ std::string audio_encryption_key_id_;
+ std::string video_encryption_key_id_;
+
+ WebMListParser parser_;
+
+ int64 last_block_timecode_;
+ scoped_ptr<uint8[]> block_data_;
+ int block_data_size_;
+ int64 block_duration_;
+ int64 block_add_id_;
+ scoped_ptr<uint8[]> block_additional_data_;
+ int block_additional_data_size_;
+ int64 discard_padding_;
+ bool discard_padding_set_;
+
+ int64 cluster_timecode_;
+ base::TimeDelta cluster_start_time_;
+ bool cluster_ended_;
+
+ Track audio_;
+ Track video_;
+ TextTrackMap text_track_map_;
+
+ // Subset of |text_track_map_| maintained by GetTextBuffers(), and cleared by
+ // ClearTextTrackReadyBuffers(). Callers of GetTextBuffers() get a const-ref
+ // to this member.
+ TextBufferQueueMap text_buffers_map_;
+
+ // Limits the range of buffers returned by Get{Audio,Video,Text}Buffers() to
+ // this exclusive upper bound. Set to kNoTimestamp(), meaning not yet
+ // calculated, by Reset() and Parse(). If kNoTimestamp(), then
+ // Get{Audio,Video,Text}Buffers() will calculate it to be the minimum (decode)
+ // timestamp across all tracks' |last_buffer_missing_duration_|, or
+ // kInfiniteDuration() if no buffers are currently missing duration.
+ base::TimeDelta ready_buffer_upper_bound_;
+
+ LogCB log_cb_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(WebMClusterParser);
+};
+
+} // namespace media
+
+#endif // MEDIA_FORMATS_WEBM_WEBM_CLUSTER_PARSER_H_
diff --git a/chromium/media/formats/webm/webm_cluster_parser_unittest.cc b/chromium/media/formats/webm/webm_cluster_parser_unittest.cc
new file mode 100644
index 00000000000..55dc791c2ce
--- /dev/null
+++ b/chromium/media/formats/webm/webm_cluster_parser_unittest.cc
@@ -0,0 +1,957 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+#include <cstdlib>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "media/base/decrypt_config.h"
+#include "media/formats/webm/cluster_builder.h"
+#include "media/formats/webm/webm_cluster_parser.h"
+#include "media/formats/webm/webm_constants.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::InSequence;
+using ::testing::Return;
+using ::testing::_;
+
+namespace media {
+
+typedef WebMTracksParser::TextTracks TextTracks;
+
+enum {
+ kTimecodeScale = 1000000, // Timecode scale for millisecond timestamps.
+ kAudioTrackNum = 1,
+ kVideoTrackNum = 2,
+ kTextTrackNum = 3,
+ kTestAudioFrameDefaultDurationInMs = 13,
+ kTestVideoFrameDefaultDurationInMs = 17
+};
+
+COMPILE_ASSERT(
+ static_cast<int>(kTestAudioFrameDefaultDurationInMs) !=
+ static_cast<int>(WebMClusterParser::kDefaultAudioBufferDurationInMs),
+ test_default_is_same_as_estimation_fallback_audio_duration);
+COMPILE_ASSERT(
+ static_cast<int>(kTestVideoFrameDefaultDurationInMs) !=
+ static_cast<int>(WebMClusterParser::kDefaultVideoBufferDurationInMs),
+ test_default_is_same_as_estimation_fallback_video_duration);
+
+struct BlockInfo {
+ int track_num;
+ int timestamp;
+
+ // Negative value is allowed only for block groups (not simple blocks) and
+ // directs CreateCluster() to exclude BlockDuration entry from the cluster for
+ // this BlockGroup. The absolute value is used for parser verification.
+ // For simple blocks, this value must be non-negative, and is used only for
+ // parser verification.
+ int duration;
+ bool use_simple_block;
+};
+
+static const BlockInfo kDefaultBlockInfo[] = {
+ { kAudioTrackNum, 0, 23, true },
+ { kAudioTrackNum, 23, 23, true },
+ { kVideoTrackNum, 33, 34, true }, // Assumes not using DefaultDuration
+ { kAudioTrackNum, 46, 23, true },
+ { kVideoTrackNum, 67, 33, false },
+ { kAudioTrackNum, 69, 23, false },
+ { kVideoTrackNum, 100, 33, false },
+};
+
+static const uint8 kEncryptedFrame[] = {
+ 0x01, // Block is encrypted
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08 // IV
+};
+
+static scoped_ptr<Cluster> CreateCluster(int timecode,
+ const BlockInfo* block_info,
+ int block_count) {
+ ClusterBuilder cb;
+ cb.SetClusterTimecode(0);
+
+ for (int i = 0; i < block_count; i++) {
+ uint8 data[] = { 0x00 };
+ if (block_info[i].use_simple_block) {
+ CHECK_GE(block_info[i].duration, 0);
+ cb.AddSimpleBlock(block_info[i].track_num,
+ block_info[i].timestamp,
+ 0, data, sizeof(data));
+ continue;
+ }
+
+ if (block_info[i].duration < 0) {
+ cb.AddBlockGroupWithoutBlockDuration(block_info[i].track_num,
+ block_info[i].timestamp,
+ 0, data, sizeof(data));
+ continue;
+ }
+
+ cb.AddBlockGroup(block_info[i].track_num,
+ block_info[i].timestamp,
+ block_info[i].duration,
+ 0, data, sizeof(data));
+ }
+
+ return cb.Finish();
+}
+
+// Creates a Cluster with one encrypted Block. |bytes_to_write| is number of
+// bytes of the encrypted frame to write.
+static scoped_ptr<Cluster> CreateEncryptedCluster(int bytes_to_write) {
+ CHECK_GT(bytes_to_write, 0);
+ CHECK_LE(bytes_to_write, static_cast<int>(sizeof(kEncryptedFrame)));
+
+ ClusterBuilder cb;
+ cb.SetClusterTimecode(0);
+ cb.AddSimpleBlock(kVideoTrackNum, 0, 0, kEncryptedFrame, bytes_to_write);
+ return cb.Finish();
+}
+
+static bool VerifyBuffers(const WebMClusterParser::BufferQueue& audio_buffers,
+ const WebMClusterParser::BufferQueue& video_buffers,
+ const WebMClusterParser::BufferQueue& text_buffers,
+ const BlockInfo* block_info,
+ int block_count) {
+ int buffer_count = audio_buffers.size() + video_buffers.size() +
+ text_buffers.size();
+ if (block_count != buffer_count) {
+ DVLOG(1) << __FUNCTION__ << " : block_count (" << block_count
+ << ") mismatches buffer_count (" << buffer_count << ")";
+ return false;
+ }
+
+ size_t audio_offset = 0;
+ size_t video_offset = 0;
+ size_t text_offset = 0;
+ for (int i = 0; i < block_count; i++) {
+ const WebMClusterParser::BufferQueue* buffers = NULL;
+ size_t* offset;
+ StreamParserBuffer::Type expected_type = DemuxerStream::UNKNOWN;
+
+ if (block_info[i].track_num == kAudioTrackNum) {
+ buffers = &audio_buffers;
+ offset = &audio_offset;
+ expected_type = DemuxerStream::AUDIO;
+ } else if (block_info[i].track_num == kVideoTrackNum) {
+ buffers = &video_buffers;
+ offset = &video_offset;
+ expected_type = DemuxerStream::VIDEO;
+ } else if (block_info[i].track_num == kTextTrackNum) {
+ buffers = &text_buffers;
+ offset = &text_offset;
+ expected_type = DemuxerStream::TEXT;
+ } else {
+ LOG(ERROR) << "Unexpected track number " << block_info[i].track_num;
+ return false;
+ }
+
+ if (*offset >= buffers->size()) {
+ DVLOG(1) << __FUNCTION__ << " : Too few buffers (" << buffers->size()
+ << ") for track_num (" << block_info[i].track_num
+ << "), expected at least " << *offset + 1 << " buffers";
+ return false;
+ }
+
+ scoped_refptr<StreamParserBuffer> buffer = (*buffers)[(*offset)++];
+
+ EXPECT_EQ(block_info[i].timestamp, buffer->timestamp().InMilliseconds());
+ EXPECT_EQ(std::abs(block_info[i].duration),
+ buffer->duration().InMilliseconds());
+ EXPECT_EQ(expected_type, buffer->type());
+ EXPECT_EQ(block_info[i].track_num, buffer->track_id());
+ }
+
+ return true;
+}
+
+static bool VerifyBuffers(const scoped_ptr<WebMClusterParser>& parser,
+ const BlockInfo* block_info,
+ int block_count) {
+ const WebMClusterParser::TextBufferQueueMap& text_map =
+ parser->GetTextBuffers();
+ const WebMClusterParser::BufferQueue* text_buffers;
+ const WebMClusterParser::BufferQueue no_text_buffers;
+ if (!text_map.empty())
+ text_buffers = &(text_map.rbegin()->second);
+ else
+ text_buffers = &no_text_buffers;
+
+ return VerifyBuffers(parser->GetAudioBuffers(),
+ parser->GetVideoBuffers(),
+ *text_buffers,
+ block_info,
+ block_count);
+}
+
+static bool VerifyTextBuffers(
+ const scoped_ptr<WebMClusterParser>& parser,
+ const BlockInfo* block_info_ptr,
+ int block_count,
+ int text_track_num,
+ const WebMClusterParser::BufferQueue& text_buffers) {
+ const BlockInfo* const block_info_end = block_info_ptr + block_count;
+
+ typedef WebMClusterParser::BufferQueue::const_iterator TextBufferIter;
+ TextBufferIter buffer_iter = text_buffers.begin();
+ const TextBufferIter buffer_end = text_buffers.end();
+
+ while (block_info_ptr != block_info_end) {
+ const BlockInfo& block_info = *block_info_ptr++;
+
+ if (block_info.track_num != text_track_num)
+ continue;
+
+ EXPECT_FALSE(block_info.use_simple_block);
+ EXPECT_FALSE(buffer_iter == buffer_end);
+
+ const scoped_refptr<StreamParserBuffer> buffer = *buffer_iter++;
+ EXPECT_EQ(block_info.timestamp, buffer->timestamp().InMilliseconds());
+ EXPECT_EQ(std::abs(block_info.duration),
+ buffer->duration().InMilliseconds());
+ EXPECT_EQ(DemuxerStream::TEXT, buffer->type());
+ EXPECT_EQ(text_track_num, buffer->track_id());
+ }
+
+ EXPECT_TRUE(buffer_iter == buffer_end);
+ return true;
+}
+
+static void VerifyEncryptedBuffer(
+ scoped_refptr<StreamParserBuffer> buffer) {
+ EXPECT_TRUE(buffer->decrypt_config());
+ EXPECT_EQ(static_cast<unsigned long>(DecryptConfig::kDecryptionKeySize),
+ buffer->decrypt_config()->iv().length());
+}
+
+static void AppendToEnd(const WebMClusterParser::BufferQueue& src,
+ WebMClusterParser::BufferQueue* dest) {
+ for (WebMClusterParser::BufferQueue::const_iterator itr = src.begin();
+ itr != src.end(); ++itr) {
+ dest->push_back(*itr);
+ }
+}
+
+class WebMClusterParserTest : public testing::Test {
+ public:
+ WebMClusterParserTest()
+ : parser_(new WebMClusterParser(kTimecodeScale,
+ kAudioTrackNum,
+ kNoTimestamp(),
+ kVideoTrackNum,
+ kNoTimestamp(),
+ TextTracks(),
+ std::set<int64>(),
+ std::string(),
+ std::string(),
+ LogCB())) {}
+
+ protected:
+ void ResetParserToHaveDefaultDurations() {
+ base::TimeDelta default_audio_duration = base::TimeDelta::FromMilliseconds(
+ kTestAudioFrameDefaultDurationInMs);
+ base::TimeDelta default_video_duration = base::TimeDelta::FromMilliseconds(
+ kTestVideoFrameDefaultDurationInMs);
+ ASSERT_GE(default_audio_duration, base::TimeDelta());
+ ASSERT_GE(default_video_duration, base::TimeDelta());
+ ASSERT_NE(kNoTimestamp(), default_audio_duration);
+ ASSERT_NE(kNoTimestamp(), default_video_duration);
+
+ parser_.reset(new WebMClusterParser(kTimecodeScale,
+ kAudioTrackNum,
+ default_audio_duration,
+ kVideoTrackNum,
+ default_video_duration,
+ TextTracks(),
+ std::set<int64>(),
+ std::string(),
+ std::string(),
+ LogCB()));
+ }
+
+ scoped_ptr<WebMClusterParser> parser_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(WebMClusterParserTest);
+};
+
+TEST_F(WebMClusterParserTest, HeldBackBufferHoldsBackAllTracks) {
+ // If a buffer is missing duration and is being held back, then all other
+ // tracks' buffers that have same or higher (decode) timestamp should be held
+ // back too to keep the timestamps emitted for a cluster monotonically
+ // non-decreasing and in same order as parsed.
+ InSequence s;
+
+ // Reset the parser to have 3 tracks: text, video (no default frame duration),
+ // and audio (with a default frame duration).
+ TextTracks text_tracks;
+ text_tracks.insert(std::make_pair(TextTracks::key_type(kTextTrackNum),
+ TextTrackConfig(kTextSubtitles, "", "",
+ "")));
+ base::TimeDelta default_audio_duration =
+ base::TimeDelta::FromMilliseconds(kTestAudioFrameDefaultDurationInMs);
+ ASSERT_GE(default_audio_duration, base::TimeDelta());
+ ASSERT_NE(kNoTimestamp(), default_audio_duration);
+ parser_.reset(new WebMClusterParser(kTimecodeScale,
+ kAudioTrackNum,
+ default_audio_duration,
+ kVideoTrackNum,
+ kNoTimestamp(),
+ text_tracks,
+ std::set<int64>(),
+ std::string(),
+ std::string(),
+ LogCB()));
+
+ const BlockInfo kBlockInfo[] = {
+ { kVideoTrackNum, 0, 33, true },
+ { kAudioTrackNum, 0, 23, false },
+ { kTextTrackNum, 10, 42, false },
+ { kAudioTrackNum, 23, kTestAudioFrameDefaultDurationInMs, true },
+ { kVideoTrackNum, 33, 33, true },
+ { kAudioTrackNum, 36, kTestAudioFrameDefaultDurationInMs, true },
+ { kVideoTrackNum, 66, 33, true },
+ { kAudioTrackNum, 70, kTestAudioFrameDefaultDurationInMs, true },
+ { kAudioTrackNum, 83, kTestAudioFrameDefaultDurationInMs, true },
+ };
+
+ const int kExpectedBuffersOnPartialCluster[] = {
+ 0, // Video simple block without DefaultDuration should be held back
+ 0, // Audio buffer ready, but not emitted because its TS >= held back video
+ 0, // Text buffer ready, but not emitted because its TS >= held back video
+ 0, // 2nd audio buffer ready, also not emitted for same reason as first
+ 4, // All previous buffers emitted, 2nd video held back with no duration
+ 4, // 2nd video still has no duration, 3rd audio ready but not emitted
+ 6, // All previous buffers emitted, 3rd video held back with no duration
+ 6, // 3rd video still has no duration, 4th audio ready but not emitted
+ 9, // Cluster end emits all buffers and 3rd video's duration is estimated
+ };
+
+ ASSERT_EQ(arraysize(kBlockInfo), arraysize(kExpectedBuffersOnPartialCluster));
+ int block_count = arraysize(kBlockInfo);
+
+ // Iteratively create a cluster containing the first N+1 blocks and parse all
+ // but the last byte of the cluster (except when N==|block_count|, just parse
+ // the whole cluster). Verify that the corresponding entry in
+ // |kExpectedBuffersOnPartialCluster| identifies the exact subset of
+ // |kBlockInfo| returned by the parser.
+ for (int i = 0; i < block_count; ++i) {
+ if (i > 0)
+ parser_->Reset();
+ // Since we don't know exactly the offsets of each block in the full
+ // cluster, build a cluster with exactly one additional block so that
+ // parse of all but one byte should deterministically parse all but the
+ // last full block. Don't |exceed block_count| blocks though.
+ int blocks_in_cluster = std::min(i + 2, block_count);
+ scoped_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo,
+ blocks_in_cluster));
+ // Parse all but the last byte unless we need to parse the full cluster.
+ bool parse_full_cluster = i == (block_count - 1);
+ int result = parser_->Parse(cluster->data(), parse_full_cluster ?
+ cluster->size() : cluster->size() - 1);
+ if (parse_full_cluster) {
+ DVLOG(1) << "Verifying parse result of full cluster of "
+ << blocks_in_cluster << " blocks";
+ EXPECT_EQ(cluster->size(), result);
+ } else {
+ DVLOG(1) << "Verifying parse result of cluster of "
+ << blocks_in_cluster << " blocks with last block incomplete";
+ EXPECT_GT(cluster->size(), result);
+ EXPECT_LT(0, result);
+ }
+
+ EXPECT_TRUE(VerifyBuffers(parser_, kBlockInfo,
+ kExpectedBuffersOnPartialCluster[i]));
+ }
+}
+
+TEST_F(WebMClusterParserTest, Reset) {
+ InSequence s;
+
+ int block_count = arraysize(kDefaultBlockInfo);
+ scoped_ptr<Cluster> cluster(CreateCluster(0, kDefaultBlockInfo, block_count));
+
+ // Send slightly less than the full cluster so all but the last block is
+ // parsed.
+ int result = parser_->Parse(cluster->data(), cluster->size() - 1);
+ EXPECT_GT(result, 0);
+ EXPECT_LT(result, cluster->size());
+
+ ASSERT_TRUE(VerifyBuffers(parser_, kDefaultBlockInfo, block_count - 1));
+ parser_->Reset();
+
+ // Now parse a whole cluster to verify that all the blocks will get parsed.
+ result = parser_->Parse(cluster->data(), cluster->size());
+ EXPECT_EQ(cluster->size(), result);
+ ASSERT_TRUE(VerifyBuffers(parser_, kDefaultBlockInfo, block_count));
+}
+
+TEST_F(WebMClusterParserTest, ParseClusterWithSingleCall) {
+ int block_count = arraysize(kDefaultBlockInfo);
+ scoped_ptr<Cluster> cluster(CreateCluster(0, kDefaultBlockInfo, block_count));
+
+ int result = parser_->Parse(cluster->data(), cluster->size());
+ EXPECT_EQ(cluster->size(), result);
+ ASSERT_TRUE(VerifyBuffers(parser_, kDefaultBlockInfo, block_count));
+}
+
+TEST_F(WebMClusterParserTest, ParseClusterWithMultipleCalls) {
+ int block_count = arraysize(kDefaultBlockInfo);
+ scoped_ptr<Cluster> cluster(CreateCluster(0, kDefaultBlockInfo, block_count));
+
+ WebMClusterParser::BufferQueue audio_buffers;
+ WebMClusterParser::BufferQueue video_buffers;
+ const WebMClusterParser::BufferQueue no_text_buffers;
+
+ const uint8* data = cluster->data();
+ int size = cluster->size();
+ int default_parse_size = 3;
+ int parse_size = std::min(default_parse_size, size);
+
+ while (size > 0) {
+ int result = parser_->Parse(data, parse_size);
+ ASSERT_GE(result, 0);
+ ASSERT_LE(result, parse_size);
+
+ if (result == 0) {
+ // The parser needs more data so increase the parse_size a little.
+ parse_size += default_parse_size;
+ parse_size = std::min(parse_size, size);
+ continue;
+ }
+
+ AppendToEnd(parser_->GetAudioBuffers(), &audio_buffers);
+ AppendToEnd(parser_->GetVideoBuffers(), &video_buffers);
+
+ parse_size = default_parse_size;
+
+ data += result;
+ size -= result;
+ }
+ ASSERT_TRUE(VerifyBuffers(audio_buffers, video_buffers,
+ no_text_buffers, kDefaultBlockInfo,
+ block_count));
+}
+
+// Verify that both BlockGroups with the BlockDuration before the Block
+// and BlockGroups with the BlockDuration after the Block are supported
+// correctly.
+// Note: Raw bytes are use here because ClusterBuilder only generates
+// one of these scenarios.
+TEST_F(WebMClusterParserTest, ParseBlockGroup) {
+ const BlockInfo kBlockInfo[] = {
+ { kAudioTrackNum, 0, 23, false },
+ { kVideoTrackNum, 33, 34, false },
+ };
+ int block_count = arraysize(kBlockInfo);
+
+ const uint8 kClusterData[] = {
+ 0x1F, 0x43, 0xB6, 0x75, 0x9B, // Cluster(size=27)
+ 0xE7, 0x81, 0x00, // Timecode(size=1, value=0)
+ // BlockGroup with BlockDuration before Block.
+ 0xA0, 0x8A, // BlockGroup(size=10)
+ 0x9B, 0x81, 0x17, // BlockDuration(size=1, value=23)
+ 0xA1, 0x85, 0x81, 0x00, 0x00, 0x00, 0xaa, // Block(size=5, track=1, ts=0)
+ // BlockGroup with BlockDuration after Block.
+ 0xA0, 0x8A, // BlockGroup(size=10)
+ 0xA1, 0x85, 0x82, 0x00, 0x21, 0x00, 0x55, // Block(size=5, track=2, ts=33)
+ 0x9B, 0x81, 0x22, // BlockDuration(size=1, value=34)
+ };
+ const int kClusterSize = sizeof(kClusterData);
+
+ int result = parser_->Parse(kClusterData, kClusterSize);
+ EXPECT_EQ(kClusterSize, result);
+ ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo, block_count));
+}
+
+TEST_F(WebMClusterParserTest, ParseSimpleBlockAndBlockGroupMixture) {
+ const BlockInfo kBlockInfo[] = {
+ { kAudioTrackNum, 0, 23, true },
+ { kAudioTrackNum, 23, 23, false },
+ { kVideoTrackNum, 33, 34, true },
+ { kAudioTrackNum, 46, 23, false },
+ { kVideoTrackNum, 67, 33, false },
+ };
+ int block_count = arraysize(kBlockInfo);
+ scoped_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
+
+ int result = parser_->Parse(cluster->data(), cluster->size());
+ EXPECT_EQ(cluster->size(), result);
+ ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo, block_count));
+}
+
+TEST_F(WebMClusterParserTest, IgnoredTracks) {
+ std::set<int64> ignored_tracks;
+ ignored_tracks.insert(kTextTrackNum);
+
+ parser_.reset(new WebMClusterParser(kTimecodeScale,
+ kAudioTrackNum,
+ kNoTimestamp(),
+ kVideoTrackNum,
+ kNoTimestamp(),
+ TextTracks(),
+ ignored_tracks,
+ std::string(),
+ std::string(),
+ LogCB()));
+
+ const BlockInfo kInputBlockInfo[] = {
+ { kAudioTrackNum, 0, 23, true },
+ { kAudioTrackNum, 23, 23, true },
+ { kVideoTrackNum, 33, 34, true },
+ { kTextTrackNum, 33, 99, true },
+ { kAudioTrackNum, 46, 23, true },
+ { kVideoTrackNum, 67, 34, true },
+ };
+ int input_block_count = arraysize(kInputBlockInfo);
+
+ const BlockInfo kOutputBlockInfo[] = {
+ { kAudioTrackNum, 0, 23, true },
+ { kAudioTrackNum, 23, 23, true },
+ { kVideoTrackNum, 33, 34, true },
+ { kAudioTrackNum, 46, 23, true },
+ { kVideoTrackNum, 67, 34, true },
+ };
+ int output_block_count = arraysize(kOutputBlockInfo);
+
+ scoped_ptr<Cluster> cluster(
+ CreateCluster(0, kInputBlockInfo, input_block_count));
+
+ int result = parser_->Parse(cluster->data(), cluster->size());
+ EXPECT_EQ(cluster->size(), result);
+ ASSERT_TRUE(VerifyBuffers(parser_, kOutputBlockInfo, output_block_count));
+}
+
+TEST_F(WebMClusterParserTest, ParseTextTracks) {
+ TextTracks text_tracks;
+
+ text_tracks.insert(std::make_pair(TextTracks::key_type(kTextTrackNum),
+ TextTrackConfig(kTextSubtitles, "", "",
+ "")));
+
+ parser_.reset(new WebMClusterParser(kTimecodeScale,
+ kAudioTrackNum,
+ kNoTimestamp(),
+ kVideoTrackNum,
+ kNoTimestamp(),
+ text_tracks,
+ std::set<int64>(),
+ std::string(),
+ std::string(),
+ LogCB()));
+
+ const BlockInfo kInputBlockInfo[] = {
+ { kAudioTrackNum, 0, 23, true },
+ { kAudioTrackNum, 23, 23, true },
+ { kVideoTrackNum, 33, 34, true },
+ { kTextTrackNum, 33, 42, false },
+ { kAudioTrackNum, 46, 23, true },
+ { kTextTrackNum, 55, 44, false },
+ { kVideoTrackNum, 67, 34, true },
+ };
+ int input_block_count = arraysize(kInputBlockInfo);
+
+ scoped_ptr<Cluster> cluster(
+ CreateCluster(0, kInputBlockInfo, input_block_count));
+
+ int result = parser_->Parse(cluster->data(), cluster->size());
+ EXPECT_EQ(cluster->size(), result);
+ ASSERT_TRUE(VerifyBuffers(parser_, kInputBlockInfo, input_block_count));
+}
+
+TEST_F(WebMClusterParserTest, TextTracksSimpleBlock) {
+ TextTracks text_tracks;
+
+ text_tracks.insert(std::make_pair(TextTracks::key_type(kTextTrackNum),
+ TextTrackConfig(kTextSubtitles, "", "",
+ "")));
+
+ parser_.reset(new WebMClusterParser(kTimecodeScale,
+ kAudioTrackNum,
+ kNoTimestamp(),
+ kVideoTrackNum,
+ kNoTimestamp(),
+ text_tracks,
+ std::set<int64>(),
+ std::string(),
+ std::string(),
+ LogCB()));
+
+ const BlockInfo kInputBlockInfo[] = {
+ { kTextTrackNum, 33, 42, true },
+ };
+ int input_block_count = arraysize(kInputBlockInfo);
+
+ scoped_ptr<Cluster> cluster(
+ CreateCluster(0, kInputBlockInfo, input_block_count));
+
+ int result = parser_->Parse(cluster->data(), cluster->size());
+ EXPECT_LT(result, 0);
+}
+
+TEST_F(WebMClusterParserTest, ParseMultipleTextTracks) {
+ TextTracks text_tracks;
+
+ const int kSubtitleTextTrackNum = kTextTrackNum;
+ const int kCaptionTextTrackNum = kTextTrackNum + 1;
+
+ text_tracks.insert(std::make_pair(TextTracks::key_type(kSubtitleTextTrackNum),
+ TextTrackConfig(kTextSubtitles, "", "",
+ "")));
+
+ text_tracks.insert(std::make_pair(TextTracks::key_type(kCaptionTextTrackNum),
+ TextTrackConfig(kTextCaptions, "", "",
+ "")));
+
+ parser_.reset(new WebMClusterParser(kTimecodeScale,
+ kAudioTrackNum,
+ kNoTimestamp(),
+ kVideoTrackNum,
+ kNoTimestamp(),
+ text_tracks,
+ std::set<int64>(),
+ std::string(),
+ std::string(),
+ LogCB()));
+
+ const BlockInfo kInputBlockInfo[] = {
+ { kAudioTrackNum, 0, 23, true },
+ { kAudioTrackNum, 23, 23, true },
+ { kVideoTrackNum, 33, 34, true },
+ { kSubtitleTextTrackNum, 33, 42, false },
+ { kAudioTrackNum, 46, 23, true },
+ { kCaptionTextTrackNum, 55, 44, false },
+ { kVideoTrackNum, 67, 34, true },
+ { kSubtitleTextTrackNum, 67, 33, false },
+ };
+ int input_block_count = arraysize(kInputBlockInfo);
+
+ scoped_ptr<Cluster> cluster(
+ CreateCluster(0, kInputBlockInfo, input_block_count));
+
+ int result = parser_->Parse(cluster->data(), cluster->size());
+ EXPECT_EQ(cluster->size(), result);
+
+ const WebMClusterParser::TextBufferQueueMap& text_map =
+ parser_->GetTextBuffers();
+ for (WebMClusterParser::TextBufferQueueMap::const_iterator itr =
+ text_map.begin();
+ itr != text_map.end();
+ ++itr) {
+ const TextTracks::const_iterator find_result =
+ text_tracks.find(itr->first);
+ ASSERT_TRUE(find_result != text_tracks.end());
+ ASSERT_TRUE(VerifyTextBuffers(parser_, kInputBlockInfo, input_block_count,
+ itr->first, itr->second));
+ }
+}
+
+TEST_F(WebMClusterParserTest, ParseEncryptedBlock) {
+ scoped_ptr<Cluster> cluster(CreateEncryptedCluster(sizeof(kEncryptedFrame)));
+
+ parser_.reset(new WebMClusterParser(kTimecodeScale,
+ kAudioTrackNum,
+ kNoTimestamp(),
+ kVideoTrackNum,
+ kNoTimestamp(),
+ TextTracks(),
+ std::set<int64>(),
+ std::string(),
+ "video_key_id",
+ LogCB()));
+ int result = parser_->Parse(cluster->data(), cluster->size());
+ EXPECT_EQ(cluster->size(), result);
+ ASSERT_EQ(1UL, parser_->GetVideoBuffers().size());
+ scoped_refptr<StreamParserBuffer> buffer = parser_->GetVideoBuffers()[0];
+ VerifyEncryptedBuffer(buffer);
+}
+
+TEST_F(WebMClusterParserTest, ParseBadEncryptedBlock) {
+ scoped_ptr<Cluster> cluster(
+ CreateEncryptedCluster(sizeof(kEncryptedFrame) - 1));
+
+ parser_.reset(new WebMClusterParser(kTimecodeScale,
+ kAudioTrackNum,
+ kNoTimestamp(),
+ kVideoTrackNum,
+ kNoTimestamp(),
+ TextTracks(),
+ std::set<int64>(),
+ std::string(),
+ "video_key_id",
+ LogCB()));
+ int result = parser_->Parse(cluster->data(), cluster->size());
+ EXPECT_EQ(-1, result);
+}
+
+TEST_F(WebMClusterParserTest, ParseInvalidZeroSizedCluster) {
+ const uint8 kBuffer[] = {
+ 0x1F, 0x43, 0xB6, 0x75, 0x80, // CLUSTER (size = 0)
+ };
+
+ EXPECT_EQ(-1, parser_->Parse(kBuffer, sizeof(kBuffer)));
+}
+
+TEST_F(WebMClusterParserTest, ParseInvalidUnknownButActuallyZeroSizedCluster) {
+ const uint8 kBuffer[] = {
+ 0x1F, 0x43, 0xB6, 0x75, 0xFF, // CLUSTER (size = "unknown")
+ 0x1F, 0x43, 0xB6, 0x75, 0x85, // CLUSTER (size = 5)
+ };
+
+ EXPECT_EQ(-1, parser_->Parse(kBuffer, sizeof(kBuffer)));
+}
+
+TEST_F(WebMClusterParserTest, ParseInvalidTextBlockGroupWithoutDuration) {
+ // Text track frames must have explicitly specified BlockGroup BlockDurations.
+ TextTracks text_tracks;
+
+ text_tracks.insert(std::make_pair(TextTracks::key_type(kTextTrackNum),
+ TextTrackConfig(kTextSubtitles, "", "",
+ "")));
+
+ parser_.reset(new WebMClusterParser(kTimecodeScale,
+ kAudioTrackNum,
+ kNoTimestamp(),
+ kVideoTrackNum,
+ kNoTimestamp(),
+ text_tracks,
+ std::set<int64>(),
+ std::string(),
+ std::string(),
+ LogCB()));
+
+ const BlockInfo kBlockInfo[] = {
+ { kTextTrackNum, 33, -42, false },
+ };
+ int block_count = arraysize(kBlockInfo);
+ scoped_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
+ int result = parser_->Parse(cluster->data(), cluster->size());
+ EXPECT_LT(result, 0);
+}
+
+TEST_F(WebMClusterParserTest, ParseWithDefaultDurationsSimpleBlocks) {
+ InSequence s;
+ ResetParserToHaveDefaultDurations();
+
+ EXPECT_LT(kTestAudioFrameDefaultDurationInMs, 23);
+ EXPECT_LT(kTestVideoFrameDefaultDurationInMs, 33);
+
+ const BlockInfo kBlockInfo[] = {
+ { kAudioTrackNum, 0, kTestAudioFrameDefaultDurationInMs, true },
+ { kAudioTrackNum, 23, kTestAudioFrameDefaultDurationInMs, true },
+ { kVideoTrackNum, 33, kTestVideoFrameDefaultDurationInMs, true },
+ { kAudioTrackNum, 46, kTestAudioFrameDefaultDurationInMs, true },
+ { kVideoTrackNum, 67, kTestVideoFrameDefaultDurationInMs, true },
+ { kAudioTrackNum, 69, kTestAudioFrameDefaultDurationInMs, true },
+ { kVideoTrackNum, 100, kTestVideoFrameDefaultDurationInMs, true },
+ };
+
+ int block_count = arraysize(kBlockInfo);
+ scoped_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
+
+ // Send slightly less than the full cluster so all but the last block is
+ // parsed. Though all the blocks are simple blocks, none should be held aside
+ // for duration estimation prior to end of cluster detection because all the
+ // tracks have DefaultDurations.
+ int result = parser_->Parse(cluster->data(), cluster->size() - 1);
+ EXPECT_GT(result, 0);
+ EXPECT_LT(result, cluster->size());
+ ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo, block_count - 1));
+
+ parser_->Reset();
+
+ // Now parse a whole cluster to verify that all the blocks will get parsed.
+ result = parser_->Parse(cluster->data(), cluster->size());
+ EXPECT_EQ(cluster->size(), result);
+ ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo, block_count));
+}
+
+TEST_F(WebMClusterParserTest, ParseWithoutAnyDurationsSimpleBlocks) {
+ InSequence s;
+
+ // Absent DefaultDuration information, SimpleBlock durations are derived from
+ // inter-buffer track timestamp delta if within the cluster, and are estimated
+ // as the lowest non-zero duration seen so far if the last buffer in the track
+ // in the cluster (independently for each track in the cluster).
+ const BlockInfo kBlockInfo1[] = {
+ { kAudioTrackNum, 0, 23, true },
+ { kAudioTrackNum, 23, 22, true },
+ { kVideoTrackNum, 33, 33, true },
+ { kAudioTrackNum, 45, 23, true },
+ { kVideoTrackNum, 66, 34, true },
+ { kAudioTrackNum, 68, 22, true }, // Estimated from minimum audio dur
+ { kVideoTrackNum, 100, 33, true }, // Estimated from minimum video dur
+ };
+
+ int block_count1 = arraysize(kBlockInfo1);
+ scoped_ptr<Cluster> cluster1(CreateCluster(0, kBlockInfo1, block_count1));
+
+ // Send slightly less than the first full cluster so all but the last video
+ // block is parsed. Verify the last fully parsed audio and video buffer are
+ // both missing from the result (parser should hold them aside for duration
+ // estimation prior to end of cluster detection in the absence of
+ // DefaultDurations.)
+ int result = parser_->Parse(cluster1->data(), cluster1->size() - 1);
+ EXPECT_GT(result, 0);
+ EXPECT_LT(result, cluster1->size());
+ ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo1, block_count1 - 3));
+ EXPECT_EQ(3UL, parser_->GetAudioBuffers().size());
+ EXPECT_EQ(1UL, parser_->GetVideoBuffers().size());
+
+ parser_->Reset();
+
+ // Now parse the full first cluster and verify all the blocks are parsed.
+ result = parser_->Parse(cluster1->data(), cluster1->size());
+ EXPECT_EQ(cluster1->size(), result);
+ ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo1, block_count1));
+
+ // Verify that the estimated frame duration is tracked across clusters for
+ // each track.
+ const BlockInfo kBlockInfo2[] = {
+ { kAudioTrackNum, 200, 22, true }, // Estimate carries over across clusters
+ { kVideoTrackNum, 201, 33, true }, // Estimate carries over across clusters
+ };
+
+ int block_count2 = arraysize(kBlockInfo2);
+ scoped_ptr<Cluster> cluster2(CreateCluster(0, kBlockInfo2, block_count2));
+ result = parser_->Parse(cluster2->data(), cluster2->size());
+ EXPECT_EQ(cluster2->size(), result);
+ ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo2, block_count2));
+}
+
+TEST_F(WebMClusterParserTest, ParseWithoutAnyDurationsBlockGroups) {
+ InSequence s;
+
+ // Absent DefaultDuration and BlockDuration information, BlockGroup block
+ // durations are derived from inter-buffer track timestamp delta if within the
+ // cluster, and are estimated as the lowest non-zero duration seen so far if
+ // the last buffer in the track in the cluster (independently for each track
+ // in the cluster).
+ const BlockInfo kBlockInfo1[] = {
+ { kAudioTrackNum, 0, -23, false },
+ { kAudioTrackNum, 23, -22, false },
+ { kVideoTrackNum, 33, -33, false },
+ { kAudioTrackNum, 45, -23, false },
+ { kVideoTrackNum, 66, -34, false },
+ { kAudioTrackNum, 68, -22, false }, // Estimated from minimum audio dur
+ { kVideoTrackNum, 100, -33, false }, // Estimated from minimum video dur
+ };
+
+ int block_count1 = arraysize(kBlockInfo1);
+ scoped_ptr<Cluster> cluster1(CreateCluster(0, kBlockInfo1, block_count1));
+
+ // Send slightly less than the first full cluster so all but the last video
+ // block is parsed. Verify the last fully parsed audio and video buffer are
+ // both missing from the result (parser should hold them aside for duration
+ // estimation prior to end of cluster detection in the absence of
+ // DefaultDurations.)
+ int result = parser_->Parse(cluster1->data(), cluster1->size() - 1);
+ EXPECT_GT(result, 0);
+ EXPECT_LT(result, cluster1->size());
+ ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo1, block_count1 - 3));
+ EXPECT_EQ(3UL, parser_->GetAudioBuffers().size());
+ EXPECT_EQ(1UL, parser_->GetVideoBuffers().size());
+
+ parser_->Reset();
+
+ // Now parse the full first cluster and verify all the blocks are parsed.
+ result = parser_->Parse(cluster1->data(), cluster1->size());
+ EXPECT_EQ(cluster1->size(), result);
+ ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo1, block_count1));
+
+ // Verify that the estimated frame duration is tracked across clusters for
+ // each track.
+ const BlockInfo kBlockInfo2[] = {
+ { kAudioTrackNum, 200, -22, false },
+ { kVideoTrackNum, 201, -33, false },
+ };
+
+ int block_count2 = arraysize(kBlockInfo2);
+ scoped_ptr<Cluster> cluster2(CreateCluster(0, kBlockInfo2, block_count2));
+ result = parser_->Parse(cluster2->data(), cluster2->size());
+ EXPECT_EQ(cluster2->size(), result);
+ ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo2, block_count2));
+}
+
+// TODO(wolenetz): Is parser behavior correct? See http://crbug.com/363433.
+TEST_F(WebMClusterParserTest,
+ ParseWithDefaultDurationsBlockGroupsWithoutDurations) {
+ InSequence s;
+ ResetParserToHaveDefaultDurations();
+
+ EXPECT_LT(kTestAudioFrameDefaultDurationInMs, 23);
+ EXPECT_LT(kTestVideoFrameDefaultDurationInMs, 33);
+
+ const BlockInfo kBlockInfo[] = {
+ { kAudioTrackNum, 0, -kTestAudioFrameDefaultDurationInMs, false },
+ { kAudioTrackNum, 23, -kTestAudioFrameDefaultDurationInMs, false },
+ { kVideoTrackNum, 33, -kTestVideoFrameDefaultDurationInMs, false },
+ { kAudioTrackNum, 46, -kTestAudioFrameDefaultDurationInMs, false },
+ { kVideoTrackNum, 67, -kTestVideoFrameDefaultDurationInMs, false },
+ { kAudioTrackNum, 69, -kTestAudioFrameDefaultDurationInMs, false },
+ { kVideoTrackNum, 100, -kTestVideoFrameDefaultDurationInMs, false },
+ };
+
+ int block_count = arraysize(kBlockInfo);
+ scoped_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
+
+ // Send slightly less than the full cluster so all but the last block is
+ // parsed. None should be held aside for duration estimation prior to end of
+ // cluster detection because all the tracks have DefaultDurations.
+ int result = parser_->Parse(cluster->data(), cluster->size() - 1);
+ EXPECT_GT(result, 0);
+ EXPECT_LT(result, cluster->size());
+ ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo, block_count - 1));
+
+ parser_->Reset();
+
+ // Now parse a whole cluster to verify that all the blocks will get parsed.
+ result = parser_->Parse(cluster->data(), cluster->size());
+ EXPECT_EQ(cluster->size(), result);
+ ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo, block_count));
+}
+
+TEST_F(WebMClusterParserTest,
+ ParseDegenerateClusterYieldsHardcodedEstimatedDurations) {
+ const BlockInfo kBlockInfo[] = {
+ {
+ kAudioTrackNum,
+ 0,
+ WebMClusterParser::kDefaultAudioBufferDurationInMs,
+ true
+ }, {
+ kVideoTrackNum,
+ 0,
+ WebMClusterParser::kDefaultVideoBufferDurationInMs,
+ true
+ },
+ };
+
+ int block_count = arraysize(kBlockInfo);
+ scoped_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
+ int result = parser_->Parse(cluster->data(), cluster->size());
+ EXPECT_EQ(cluster->size(), result);
+ ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo, block_count));
+}
+
+TEST_F(WebMClusterParserTest,
+ ParseDegenerateClusterWithDefaultDurationsYieldsDefaultDurations) {
+ ResetParserToHaveDefaultDurations();
+
+ const BlockInfo kBlockInfo[] = {
+ { kAudioTrackNum, 0, kTestAudioFrameDefaultDurationInMs, true },
+ { kVideoTrackNum, 0, kTestVideoFrameDefaultDurationInMs, true },
+ };
+
+ int block_count = arraysize(kBlockInfo);
+ scoped_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
+ int result = parser_->Parse(cluster->data(), cluster->size());
+ EXPECT_EQ(cluster->size(), result);
+ ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo, block_count));
+}
+
+} // namespace media
diff --git a/chromium/media/webm/webm_constants.cc b/chromium/media/formats/webm/webm_constants.cc
index 13ae086eca0..d6c5536fa3e 100644
--- a/chromium/media/webm/webm_constants.cc
+++ b/chromium/media/formats/webm/webm_constants.cc
@@ -1,8 +1,8 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/webm/webm_constants.h"
+#include "media/formats/webm/webm_constants.h"
namespace media {
diff --git a/chromium/media/webm/webm_constants.h b/chromium/media/formats/webm/webm_constants.h
index 3a35dbab377..8a0b8a7185e 100644
--- a/chromium/media/webm/webm_constants.h
+++ b/chromium/media/formats/webm/webm_constants.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_WEBM_WEBM_CONSTANTS_H_
-#define MEDIA_WEBM_WEBM_CONSTANTS_H_
+#ifndef MEDIA_FORMATS_WEBM_WEBM_CONSTANTS_H_
+#define MEDIA_FORMATS_WEBM_WEBM_CONSTANTS_H_
#include "base/basictypes.h"
#include "media/base/media_export.h"
@@ -201,7 +201,7 @@ const int kWebMIdVoid = 0xEC;
const int kWebMIdWritingApp = 0x5741;
const int64 kWebMReservedId = 0x1FFFFFFF;
-const int64 kWebMUnknownSize = GG_LONGLONG(0x00FFFFFFFFFFFFFF);
+const int64 kWebMUnknownSize = 0x00FFFFFFFFFFFFFFLL;
const uint8 kWebMFlagKeyframe = 0x80;
@@ -226,4 +226,4 @@ MEDIA_EXPORT extern const char kWebMCodecMetadata[];
} // namespace media
-#endif // MEDIA_WEBM_WEBM_CONSTANTS_H_
+#endif // MEDIA_FORMATS_WEBM_WEBM_CONSTANTS_H_
diff --git a/chromium/media/webm/webm_content_encodings.cc b/chromium/media/formats/webm/webm_content_encodings.cc
index 9789c0f3028..157c6ac4311 100644
--- a/chromium/media/webm/webm_content_encodings.cc
+++ b/chromium/media/formats/webm/webm_content_encodings.cc
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/logging.h"
-#include "media/webm/webm_content_encodings.h"
+#include "media/formats/webm/webm_content_encodings.h"
namespace media {
diff --git a/chromium/media/webm/webm_content_encodings.h b/chromium/media/formats/webm/webm_content_encodings.h
index 2866f253f06..5890ecf04f3 100644
--- a/chromium/media/webm/webm_content_encodings.h
+++ b/chromium/media/formats/webm/webm_content_encodings.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_WEBM_WEBM_CONTENT_ENCODINGS_H_
-#define MEDIA_WEBM_WEBM_CONTENT_ENCODINGS_H_
+#ifndef MEDIA_FORMATS_WEBM_WEBM_CONTENT_ENCODINGS_H_
+#define MEDIA_FORMATS_WEBM_WEBM_CONTENT_ENCODINGS_H_
#include <string>
@@ -85,4 +85,4 @@ class MEDIA_EXPORT ContentEncoding {
} // namespace media
-#endif // MEDIA_WEBM_WEBM_CONTENT_ENCODINGS_H_
+#endif // MEDIA_FORMATS_WEBM_WEBM_CONTENT_ENCODINGS_H_
diff --git a/chromium/media/webm/webm_content_encodings_client.cc b/chromium/media/formats/webm/webm_content_encodings_client.cc
index bcf964ed319..f2294de7024 100644
--- a/chromium/media/webm/webm_content_encodings_client.cc
+++ b/chromium/media/formats/webm/webm_content_encodings_client.cc
@@ -1,12 +1,12 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/webm/webm_content_encodings_client.h"
+#include "media/formats/webm/webm_content_encodings_client.h"
#include "base/logging.h"
#include "base/stl_util.h"
-#include "media/webm/webm_constants.h"
+#include "media/formats/webm/webm_constants.h"
namespace media {
diff --git a/chromium/media/webm/webm_content_encodings_client.h b/chromium/media/formats/webm/webm_content_encodings_client.h
index e477fcf3809..d00281ec569 100644
--- a/chromium/media/webm/webm_content_encodings_client.h
+++ b/chromium/media/formats/webm/webm_content_encodings_client.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_WEBM_WEBM_CONTENT_ENCODINGS_CLIENT_H_
-#define MEDIA_WEBM_WEBM_CONTENT_ENCODINGS_CLIENT_H_
+#ifndef MEDIA_FORMATS_WEBM_WEBM_CONTENT_ENCODINGS_CLIENT_H_
+#define MEDIA_FORMATS_WEBM_WEBM_CONTENT_ENCODINGS_CLIENT_H_
#include <vector>
@@ -12,8 +12,8 @@
#include "base/memory/scoped_ptr.h"
#include "media/base/media_export.h"
#include "media/base/media_log.h"
-#include "media/webm/webm_content_encodings.h"
-#include "media/webm/webm_parser.h"
+#include "media/formats/webm/webm_content_encodings.h"
+#include "media/formats/webm/webm_parser.h"
namespace media {
@@ -47,4 +47,4 @@ class MEDIA_EXPORT WebMContentEncodingsClient : public WebMParserClient {
} // namespace media
-#endif // MEDIA_WEBM_WEBM_CONTENT_ENCODINGS_CLIENT_H_
+#endif // MEDIA_FORMATS_WEBM_WEBM_CONTENT_ENCODINGS_CLIENT_H_
diff --git a/chromium/media/webm/webm_content_encodings_client_unittest.cc b/chromium/media/formats/webm/webm_content_encodings_client_unittest.cc
index bb9e6943126..e124f2d883a 100644
--- a/chromium/media/webm/webm_content_encodings_client_unittest.cc
+++ b/chromium/media/formats/webm/webm_content_encodings_client_unittest.cc
@@ -1,11 +1,11 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/bind.h"
-#include "media/webm/webm_constants.h"
-#include "media/webm/webm_content_encodings_client.h"
-#include "media/webm/webm_parser.h"
+#include "media/formats/webm/webm_constants.h"
+#include "media/formats/webm/webm_content_encodings_client.h"
+#include "media/formats/webm/webm_parser.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
diff --git a/chromium/media/webm/webm_crypto_helpers.cc b/chromium/media/formats/webm/webm_crypto_helpers.cc
index a663f3cd6a6..bd473bc23ed 100644
--- a/chromium/media/webm/webm_crypto_helpers.cc
+++ b/chromium/media/formats/webm/webm_crypto_helpers.cc
@@ -1,13 +1,13 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/webm/webm_crypto_helpers.h"
+#include "media/formats/webm/webm_crypto_helpers.h"
#include "base/logging.h"
#include "base/sys_byteorder.h"
#include "media/base/decrypt_config.h"
-#include "media/webm/webm_constants.h"
+#include "media/formats/webm/webm_constants.h"
namespace media {
namespace {
@@ -24,12 +24,13 @@ std::string GenerateWebMCounterBlock(const uint8* iv, int iv_size) {
} // namespace anonymous
-scoped_ptr<DecryptConfig> WebMCreateDecryptConfig(
- const uint8* data, int data_size,
- const uint8* key_id, int key_id_size) {
+bool WebMCreateDecryptConfig(const uint8* data, int data_size,
+ const uint8* key_id, int key_id_size,
+ scoped_ptr<DecryptConfig>* decrypt_config,
+ int* data_offset) {
if (data_size < kWebMSignalByteSize) {
DVLOG(1) << "Got a block from an encrypted stream with no data.";
- return scoped_ptr<DecryptConfig>();
+ return false;
}
uint8 signal_byte = data[0];
@@ -43,18 +44,19 @@ scoped_ptr<DecryptConfig> WebMCreateDecryptConfig(
if (signal_byte & kWebMFlagEncryptedFrame) {
if (data_size < kWebMSignalByteSize + kWebMIvSize) {
DVLOG(1) << "Got an encrypted block with not enough data " << data_size;
- return scoped_ptr<DecryptConfig>();
+ return false;
}
counter_block = GenerateWebMCounterBlock(data + frame_offset, kWebMIvSize);
frame_offset += kWebMIvSize;
}
- scoped_ptr<DecryptConfig> config(new DecryptConfig(
+ decrypt_config->reset(new DecryptConfig(
std::string(reinterpret_cast<const char*>(key_id), key_id_size),
counter_block,
- frame_offset,
std::vector<SubsampleEntry>()));
- return config.Pass();
+ *data_offset = frame_offset;
+
+ return true;
}
} // namespace media
diff --git a/chromium/media/formats/webm/webm_crypto_helpers.h b/chromium/media/formats/webm/webm_crypto_helpers.h
new file mode 100644
index 00000000000..23095f31d3d
--- /dev/null
+++ b/chromium/media/formats/webm/webm_crypto_helpers.h
@@ -0,0 +1,33 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FORMATS_WEBM_WEBM_CRYPTO_HELPERS_H_
+#define MEDIA_FORMATS_WEBM_WEBM_CRYPTO_HELPERS_H_
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/decoder_buffer.h"
+
+namespace media {
+
+// TODO(xhwang): Figure out the init data type appropriately once it's spec'ed.
+// See https://www.w3.org/Bugs/Public/show_bug.cgi?id=19096 for more
+// information.
+const char kWebMEncryptInitDataType[] = "video/webm";
+
+// Fills an initialized DecryptConfig, which can be sent to the Decryptor if
+// the stream has potentially encrypted frames. Also sets |data_offset| which
+// indicates where the encrypted data starts. Leaving the IV empty will tell
+// the decryptor that the frame is unencrypted. Returns true if |data| is valid,
+// false otherwise, in which case |decrypt_config| and |data_offset| will not be
+// changed. Current encrypted WebM request for comments specification is here
+// http://wiki.webmproject.org/encryption/webm-encryption-rfc
+bool WebMCreateDecryptConfig(const uint8* data, int data_size,
+ const uint8* key_id, int key_id_size,
+ scoped_ptr<DecryptConfig>* decrypt_config,
+ int* data_offset);
+
+} // namespace media
+
+#endif // MEDIA_FORMATS_WEBM_WEBM_CRYPT_HELPERS_H_
diff --git a/chromium/media/webm/webm_info_parser.cc b/chromium/media/formats/webm/webm_info_parser.cc
index 6df1690bf89..6309c21e9db 100644
--- a/chromium/media/webm/webm_info_parser.cc
+++ b/chromium/media/formats/webm/webm_info_parser.cc
@@ -1,11 +1,11 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/webm/webm_info_parser.h"
+#include "media/formats/webm/webm_info_parser.h"
#include "base/logging.h"
-#include "media/webm/webm_constants.h"
+#include "media/formats/webm/webm_constants.h"
namespace media {
@@ -74,6 +74,25 @@ bool WebMInfoParser::OnFloat(int id, double val) {
}
bool WebMInfoParser::OnBinary(int id, const uint8* data, int size) {
+ if (id == kWebMIdDateUTC) {
+ if (size != 8)
+ return false;
+
+ int64 date_in_nanoseconds = 0;
+ for (int i = 0; i < size; ++i)
+ date_in_nanoseconds = (date_in_nanoseconds << 8) | data[i];
+
+ base::Time::Exploded exploded_epoch;
+ exploded_epoch.year = 2001;
+ exploded_epoch.month = 1;
+ exploded_epoch.day_of_month = 1;
+ exploded_epoch.hour = 0;
+ exploded_epoch.minute = 0;
+ exploded_epoch.second = 0;
+ exploded_epoch.millisecond = 0;
+ date_utc_ = base::Time::FromUTCExploded(exploded_epoch) +
+ base::TimeDelta::FromMicroseconds(date_in_nanoseconds / 1000);
+ }
return true;
}
diff --git a/chromium/media/webm/webm_info_parser.h b/chromium/media/formats/webm/webm_info_parser.h
index ab5de43b1df..36aac928f90 100644
--- a/chromium/media/webm/webm_info_parser.h
+++ b/chromium/media/formats/webm/webm_info_parser.h
@@ -1,13 +1,14 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_WEBM_WEBM_INFO_PARSER_H_
-#define MEDIA_WEBM_WEBM_INFO_PARSER_H_
+#ifndef MEDIA_FORMATS_WEBM_WEBM_INFO_PARSER_H_
+#define MEDIA_FORMATS_WEBM_WEBM_INFO_PARSER_H_
#include "base/compiler_specific.h"
+#include "base/time/time.h"
#include "media/base/media_export.h"
-#include "media/webm/webm_parser.h"
+#include "media/formats/webm/webm_parser.h"
namespace media {
@@ -26,6 +27,7 @@ class MEDIA_EXPORT WebMInfoParser : public WebMParserClient {
int64 timecode_scale() const { return timecode_scale_; }
double duration() const { return duration_; }
+ base::Time date_utc() const { return date_utc_; }
private:
// WebMParserClient methods
@@ -38,10 +40,11 @@ class MEDIA_EXPORT WebMInfoParser : public WebMParserClient {
int64 timecode_scale_;
double duration_;
+ base::Time date_utc_;
DISALLOW_COPY_AND_ASSIGN(WebMInfoParser);
};
} // namespace media
-#endif // MEDIA_WEBM_WEBM_INFO_PARSER_H_
+#endif // MEDIA_FORMATS_WEBM_WEBM_INFO_PARSER_H_
diff --git a/chromium/media/webm/webm_parser.cc b/chromium/media/formats/webm/webm_parser.cc
index f1509abb830..1baf12b4a59 100644
--- a/chromium/media/webm/webm_parser.cc
+++ b/chromium/media/formats/webm/webm_parser.cc
@@ -1,8 +1,8 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/webm/webm_parser.h"
+#include "media/formats/webm/webm_parser.h"
// This file contains code to parse WebM file elements. It was created
// from information in the Matroska spec.
@@ -14,7 +14,8 @@
#include <iomanip>
#include "base/logging.h"
-#include "media/webm/webm_constants.h"
+#include "base/numerics/safe_conversions.h"
+#include "media/formats/webm/webm_constants.h"
namespace media {
@@ -118,7 +119,7 @@ static const ElementIdInfo kBlockGroupIds[] = {
{UINT, kWebMIdReferencePriority},
{BINARY, kWebMIdReferenceBlock},
{BINARY, kWebMIdCodecState},
- {UINT, kWebMIdDiscardPadding},
+ {BINARY, kWebMIdDiscardPadding},
{LIST, kWebMIdSlices},
};
@@ -548,10 +549,15 @@ static int ParseUInt(const uint8* buf, int size, int id,
return -1;
// Read in the big-endian integer.
- int64 value = 0;
+ uint64 value = 0;
for (int i = 0; i < size; ++i)
value = (value << 8) | buf[i];
+ // We use int64 in place of uint64 everywhere for convenience. See this bug
+ // for more details: http://crbug.com/366750#c3
+ if (!base::IsValueInRangeForNumericType<int64>(value))
+ return -1;
+
if (!client->OnUInt(id, value))
return -1;
diff --git a/chromium/media/webm/webm_parser.h b/chromium/media/formats/webm/webm_parser.h
index 68611a85bd4..854db685f88 100644
--- a/chromium/media/webm/webm_parser.h
+++ b/chromium/media/formats/webm/webm_parser.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_WEBM_WEBM_PARSER_H_
-#define MEDIA_WEBM_WEBM_PARSER_H_
+#ifndef MEDIA_FORMATS_WEBM_WEBM_PARSER_H_
+#define MEDIA_FORMATS_WEBM_WEBM_PARSER_H_
#include <string>
#include <vector>
@@ -155,4 +155,4 @@ int MEDIA_EXPORT WebMParseElementHeader(const uint8* buf, int size,
} // namespace media
-#endif // MEDIA_WEBM_WEBM_PARSER_H_
+#endif // MEDIA_FORMATS_WEBM_WEBM_PARSER_H_
diff --git a/chromium/media/webm/webm_parser_unittest.cc b/chromium/media/formats/webm/webm_parser_unittest.cc
index cb71fe98bda..a1249e89c42 100644
--- a/chromium/media/webm/webm_parser_unittest.cc
+++ b/chromium/media/formats/webm/webm_parser_unittest.cc
@@ -1,10 +1,10 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/webm/cluster_builder.h"
-#include "media/webm/webm_constants.h"
-#include "media/webm/webm_parser.h"
+#include "media/formats/webm/cluster_builder.h"
+#include "media/formats/webm/webm_constants.h"
+#include "media/formats/webm/webm_parser.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -77,8 +77,7 @@ TEST_F(WebMParserTest, EmptyCluster) {
EXPECT_CALL(client_, OnListEnd(kWebMIdCluster)).WillOnce(Return(true));
WebMListParser parser(kWebMIdCluster, &client_);
- int result = parser.Parse(kEmptyCluster, size);
- EXPECT_EQ(size, result);
+ EXPECT_EQ(size, parser.Parse(kEmptyCluster, size));
EXPECT_TRUE(parser.IsParsingComplete());
}
@@ -96,8 +95,7 @@ TEST_F(WebMParserTest, EmptyClusterInSegment) {
EXPECT_CALL(client_, OnListEnd(kWebMIdSegment)).WillOnce(Return(true));
WebMListParser parser(kWebMIdSegment, &client_);
- int result = parser.Parse(kBuffer, size);
- EXPECT_EQ(size, result);
+ EXPECT_EQ(size, parser.Parse(kBuffer, size));
EXPECT_TRUE(parser.IsParsingComplete());
}
@@ -108,14 +106,12 @@ TEST_F(WebMParserTest, ChildNonListLargerThanParent) {
0x1F, 0x43, 0xB6, 0x75, 0x81, // CLUSTER (size = 1)
0xE7, 0x81, 0x01, // Timecode (size=1, value=1)
};
- int size = sizeof(kBuffer);
InSequence s;
EXPECT_CALL(client_, OnListStart(kWebMIdCluster)).WillOnce(Return(&client_));
WebMListParser parser(kWebMIdCluster, &client_);
- int result = parser.Parse(kBuffer, size);
- EXPECT_EQ(-1, result);
+ EXPECT_EQ(-1, parser.Parse(kBuffer, sizeof(kBuffer)));
EXPECT_FALSE(parser.IsParsingComplete());
}
@@ -126,14 +122,12 @@ TEST_F(WebMParserTest, ChildListLargerThanParent) {
0x18, 0x53, 0x80, 0x67, 0x85, // SEGMENT (size = 5)
0x1F, 0x43, 0xB6, 0x75, 0x81, 0x11 // CLUSTER (size = 1)
};
- int size = sizeof(kBuffer);
InSequence s;
EXPECT_CALL(client_, OnListStart(kWebMIdSegment)).WillOnce(Return(&client_));
WebMListParser parser(kWebMIdSegment, &client_);
- int result = parser.Parse(kBuffer, size);
- EXPECT_EQ(-1, result);
+ EXPECT_EQ(-1, parser.Parse(kBuffer, sizeof(kBuffer)));
EXPECT_FALSE(parser.IsParsingComplete());
}
@@ -142,11 +136,9 @@ TEST_F(WebMParserTest, ListIdDoesNotMatch) {
const uint8 kBuffer[] = {
0x18, 0x53, 0x80, 0x67, 0x80, // SEGMENT (size = 0)
};
- int size = sizeof(kBuffer);
WebMListParser parser(kWebMIdCluster, &client_);
- int result = parser.Parse(kBuffer, size);
- EXPECT_EQ(-1, result);
+ EXPECT_EQ(-1, parser.Parse(kBuffer, sizeof(kBuffer)));
EXPECT_FALSE(parser.IsParsingComplete());
}
@@ -155,17 +147,49 @@ TEST_F(WebMParserTest, InvalidElementInList) {
0x18, 0x53, 0x80, 0x67, 0x82, // SEGMENT (size = 2)
0xAE, 0x80, // TrackEntry (size = 0)
};
- int size = sizeof(kBuffer);
InSequence s;
EXPECT_CALL(client_, OnListStart(kWebMIdSegment)).WillOnce(Return(&client_));
WebMListParser parser(kWebMIdSegment, &client_);
- int result = parser.Parse(kBuffer, size);
- EXPECT_EQ(-1, result);
+ EXPECT_EQ(-1, parser.Parse(kBuffer, sizeof(kBuffer)));
+ EXPECT_FALSE(parser.IsParsingComplete());
+}
+
+// Test specific case of InvalidElementInList to verify EBMLHEADER within
+// known-sized cluster causes parse error.
+TEST_F(WebMParserTest, InvalidEBMLHeaderInCluster) {
+ const uint8 kBuffer[] = {
+ 0x1F, 0x43, 0xB6, 0x75, 0x85, // CLUSTER (size = 5)
+ 0x1A, 0x45, 0xDF, 0xA3, 0x80, // EBMLHEADER (size = 0)
+ };
+
+ InSequence s;
+ EXPECT_CALL(client_, OnListStart(kWebMIdCluster)).WillOnce(Return(&client_));
+
+ WebMListParser parser(kWebMIdCluster, &client_);
+ EXPECT_EQ(-1, parser.Parse(kBuffer, sizeof(kBuffer)));
EXPECT_FALSE(parser.IsParsingComplete());
}
+// Verify that EBMLHEADER ends a preceding "unknown"-sized CLUSTER.
+TEST_F(WebMParserTest, UnknownSizeClusterFollowedByEBMLHeader) {
+ const uint8 kBuffer[] = {
+ 0x1F, 0x43, 0xB6, 0x75, 0xFF, // CLUSTER (size = unknown; really 0 due to:)
+ 0x1A, 0x45, 0xDF, 0xA3, 0x80, // EBMLHEADER (size = 0)
+ };
+
+ InSequence s;
+ EXPECT_CALL(client_, OnListStart(kWebMIdCluster)).WillOnce(Return(&client_));
+ EXPECT_CALL(client_, OnListEnd(kWebMIdCluster)).WillOnce(Return(true));
+
+ WebMListParser parser(kWebMIdCluster, &client_);
+
+ // List parse should consume the CLUSTER but not the EBMLHEADER.
+ EXPECT_EQ(5, parser.Parse(kBuffer, sizeof(kBuffer)));
+ EXPECT_TRUE(parser.IsParsingComplete());
+}
+
TEST_F(WebMParserTest, VoidAndCRC32InList) {
const uint8 kBuffer[] = {
0x18, 0x53, 0x80, 0x67, 0x99, // SEGMENT (size = 25)
@@ -184,8 +208,7 @@ TEST_F(WebMParserTest, VoidAndCRC32InList) {
EXPECT_CALL(client_, OnListEnd(kWebMIdSegment)).WillOnce(Return(true));
WebMListParser parser(kWebMIdSegment, &client_);
- int result = parser.Parse(kBuffer, size);
- EXPECT_EQ(size, result);
+ EXPECT_EQ(size, parser.Parse(kBuffer, size));
EXPECT_TRUE(parser.IsParsingComplete());
}
@@ -195,8 +218,7 @@ TEST_F(WebMParserTest, ParseListElementWithSingleCall) {
CreateClusterExpectations(kBlockCount, true, &client_);
WebMListParser parser(kWebMIdCluster, &client_);
- int result = parser.Parse(cluster->data(), cluster->size());
- EXPECT_EQ(cluster->size(), result);
+ EXPECT_EQ(cluster->size(), parser.Parse(cluster->data(), cluster->size()));
EXPECT_TRUE(parser.IsParsingComplete());
}
@@ -255,8 +277,7 @@ TEST_F(WebMParserTest, Reset) {
parser.Reset();
// Now parse a whole cluster to verify that all the blocks will get parsed.
- result = parser.Parse(cluster->data(), cluster->size());
- EXPECT_EQ(result, cluster->size());
+ EXPECT_EQ(cluster->size(), parser.Parse(cluster->data(), cluster->size()));
EXPECT_TRUE(parser.IsParsingComplete());
}
@@ -288,8 +309,7 @@ TEST_F(WebMParserTest, MultipleClients) {
EXPECT_CALL(client_, OnListEnd(kWebMIdSegment)).WillOnce(Return(true));
WebMListParser parser(kWebMIdSegment, &client_);
- int result = parser.Parse(kBuffer, size);
- EXPECT_EQ(size, result);
+ EXPECT_EQ(size, parser.Parse(kBuffer, size));
EXPECT_TRUE(parser.IsParsingComplete());
}
@@ -299,14 +319,12 @@ TEST_F(WebMParserTest, InvalidClient) {
0x18, 0x53, 0x80, 0x67, 0x85, // SEGMENT (size = 20)
0x16, 0x54, 0xAE, 0x6B, 0x80, // TRACKS (size = 5)
};
- int size = sizeof(kBuffer);
InSequence s;
EXPECT_CALL(client_, OnListStart(kWebMIdSegment)).WillOnce(ReturnNull());
WebMListParser parser(kWebMIdSegment, &client_);
- int result = parser.Parse(kBuffer, size);
- EXPECT_EQ(-1, result);
+ EXPECT_EQ(-1, parser.Parse(kBuffer, sizeof(kBuffer)));
EXPECT_FALSE(parser.IsParsingComplete());
}
@@ -387,8 +405,7 @@ TEST_F(WebMParserTest, ZeroPaddedStrings) {
EXPECT_CALL(client_, OnListEnd(kWebMIdEBMLHeader)).WillOnce(Return(true));
WebMListParser parser(kWebMIdEBMLHeader, &client_);
- int result = parser.Parse(kBuffer, size);
- EXPECT_EQ(size, result);
+ EXPECT_EQ(size, parser.Parse(kBuffer, size));
EXPECT_TRUE(parser.IsParsingComplete());
}
diff --git a/chromium/media/webm/webm_stream_parser.cc b/chromium/media/formats/webm/webm_stream_parser.cc
index 8e7d055e68c..dd200d2170f 100644
--- a/chromium/media/webm/webm_stream_parser.cc
+++ b/chromium/media/formats/webm/webm_stream_parser.cc
@@ -1,25 +1,26 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/webm/webm_stream_parser.h"
+#include "media/formats/webm/webm_stream_parser.h"
#include <string>
#include "base/callback.h"
+#include "base/callback_helpers.h"
#include "base/logging.h"
-#include "media/webm/webm_cluster_parser.h"
-#include "media/webm/webm_constants.h"
-#include "media/webm/webm_content_encodings.h"
-#include "media/webm/webm_crypto_helpers.h"
-#include "media/webm/webm_info_parser.h"
-#include "media/webm/webm_tracks_parser.h"
+#include "media/formats/webm/webm_cluster_parser.h"
+#include "media/formats/webm/webm_constants.h"
+#include "media/formats/webm/webm_content_encodings.h"
+#include "media/formats/webm/webm_crypto_helpers.h"
+#include "media/formats/webm/webm_info_parser.h"
+#include "media/formats/webm/webm_tracks_parser.h"
namespace media {
WebMStreamParser::WebMStreamParser()
: state_(kWaitingForInit),
- waiting_for_buffers_(false) {
+ unknown_segment_size_(false) {
}
WebMStreamParser::~WebMStreamParser() {
@@ -28,7 +29,7 @@ WebMStreamParser::~WebMStreamParser() {
void WebMStreamParser::Init(const InitCB& init_cb,
const NewConfigCB& config_cb,
const NewBuffersCB& new_buffers_cb,
- const NewTextBuffersCB& text_cb,
+ bool ignore_text_tracks,
const NeedKeyCB& need_key_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
@@ -46,7 +47,7 @@ void WebMStreamParser::Init(const InitCB& init_cb,
init_cb_ = init_cb;
config_cb_ = config_cb;
new_buffers_cb_ = new_buffers_cb;
- text_cb_ = text_cb;
+ ignore_text_tracks_ = ignore_text_tracks;
need_key_cb_ = need_key_cb;
new_segment_cb_ = new_segment_cb;
end_of_segment_cb_ = end_of_segment_cb;
@@ -57,11 +58,12 @@ void WebMStreamParser::Flush() {
DCHECK_NE(state_, kWaitingForInit);
byte_queue_.Reset();
-
- if (state_ != kParsingClusters)
- return;
-
- cluster_parser_->Reset();
+ if (cluster_parser_)
+ cluster_parser_->Reset();
+ if (state_ == kParsingClusters) {
+ ChangeState(kParsingHeaders);
+ end_of_segment_cb_.Run();
+ }
}
bool WebMStreamParser::Parse(const uint8* buf, int size) {
@@ -140,6 +142,7 @@ int WebMStreamParser::ParseInfoAndTracks(const uint8* data, int size) {
case kWebMIdCRC32:
case kWebMIdCues:
case kWebMIdChapters:
+ // TODO(matthewjheaney): Implement support for chapters.
if (cur_size < (result + element_size)) {
// We don't have the whole element yet. Signal we need more data.
return 0;
@@ -147,7 +150,19 @@ int WebMStreamParser::ParseInfoAndTracks(const uint8* data, int size) {
// Skip the element.
return result + element_size;
break;
+ case kWebMIdCluster:
+ if (!cluster_parser_) {
+ MEDIA_LOG(log_cb_) << "Found Cluster element before Info.";
+ return -1;
+ }
+ ChangeState(kParsingClusters);
+ new_segment_cb_.Run();
+ return 0;
+ break;
case kWebMIdSegment:
+ // Segment of unknown size indicates live stream.
+ if (element_size == kWebMUnknownSize)
+ unknown_segment_size_ = true;
// Just consume the segment header.
return result;
break;
@@ -170,7 +185,7 @@ int WebMStreamParser::ParseInfoAndTracks(const uint8* data, int size) {
cur_size -= result;
bytes_parsed += result;
- WebMTracksParser tracks_parser(log_cb_, text_cb_.is_null());
+ WebMTracksParser tracks_parser(log_cb_, ignore_text_tracks_);
result = tracks_parser.Parse(cur, cur_size);
if (result <= 0)
@@ -178,12 +193,23 @@ int WebMStreamParser::ParseInfoAndTracks(const uint8* data, int size) {
bytes_parsed += result;
- base::TimeDelta duration = kInfiniteDuration();
+ double timecode_scale_in_us = info_parser.timecode_scale() / 1000.0;
+ InitParameters params(kInfiniteDuration());
if (info_parser.duration() > 0) {
- double mult = info_parser.timecode_scale() / 1000.0;
- int64 duration_in_us = info_parser.duration() * mult;
- duration = base::TimeDelta::FromMicroseconds(duration_in_us);
+ int64 duration_in_us = info_parser.duration() * timecode_scale_in_us;
+ params.duration = base::TimeDelta::FromMicroseconds(duration_in_us);
+ }
+
+ params.timeline_offset = info_parser.date_utc();
+
+ if (unknown_segment_size_ && (info_parser.duration() <= 0) &&
+ !info_parser.date_utc().is_null()) {
+ params.liveness = Demuxer::LIVENESS_LIVE;
+ } else if (info_parser.duration() >= 0) {
+ params.liveness = Demuxer::LIVENESS_RECORDED;
+ } else {
+ params.liveness = Demuxer::LIVENESS_UNKNOWN;
}
const AudioDecoderConfig& audio_config = tracks_parser.audio_decoder_config();
@@ -204,19 +230,17 @@ int WebMStreamParser::ParseInfoAndTracks(const uint8* data, int size) {
cluster_parser_.reset(new WebMClusterParser(
info_parser.timecode_scale(),
tracks_parser.audio_track_num(),
+ tracks_parser.GetAudioDefaultDuration(timecode_scale_in_us),
tracks_parser.video_track_num(),
+ tracks_parser.GetVideoDefaultDuration(timecode_scale_in_us),
tracks_parser.text_tracks(),
tracks_parser.ignored_tracks(),
tracks_parser.audio_encryption_key_id(),
tracks_parser.video_encryption_key_id(),
log_cb_));
- ChangeState(kParsingClusters);
-
- if (!init_cb_.is_null()) {
- init_cb_.Run(true, duration);
- init_cb_.Reset();
- }
+ if (!init_cb_.is_null())
+ base::ResetAndReturn(&init_cb_).Run(true, params);
return bytes_parsed;
}
@@ -225,64 +249,26 @@ int WebMStreamParser::ParseCluster(const uint8* data, int size) {
if (!cluster_parser_)
return -1;
- int id;
- int64 element_size;
- int result = WebMParseElementHeader(data, size, &id, &element_size);
-
- if (result <= 0)
- return result;
-
- if (id == kWebMIdCluster)
- waiting_for_buffers_ = true;
-
- // TODO(matthewjheaney): implement support for chapters
- if (id == kWebMIdCues || id == kWebMIdChapters) {
- if (size < (result + element_size)) {
- // We don't have the whole element yet. Signal we need more data.
- return 0;
- }
- // Skip the element.
- return result + element_size;
- }
-
- if (id == kWebMIdEBMLHeader) {
- ChangeState(kParsingHeaders);
- return 0;
- }
-
int bytes_parsed = cluster_parser_->Parse(data, size);
-
- if (bytes_parsed <= 0)
+ if (bytes_parsed < 0)
return bytes_parsed;
- const BufferQueue& audio_buffers = cluster_parser_->audio_buffers();
- const BufferQueue& video_buffers = cluster_parser_->video_buffers();
- bool cluster_ended = cluster_parser_->cluster_ended();
+ const BufferQueue& audio_buffers = cluster_parser_->GetAudioBuffers();
+ const BufferQueue& video_buffers = cluster_parser_->GetVideoBuffers();
+ const TextBufferQueueMap& text_map = cluster_parser_->GetTextBuffers();
- if (waiting_for_buffers_ &&
- cluster_parser_->cluster_start_time() != kNoTimestamp()) {
- new_segment_cb_.Run();
- waiting_for_buffers_ = false;
- }
+ bool cluster_ended = cluster_parser_->cluster_ended();
- if ((!audio_buffers.empty() || !video_buffers.empty()) &&
- !new_buffers_cb_.Run(audio_buffers, video_buffers)) {
+ if ((!audio_buffers.empty() || !video_buffers.empty() ||
+ !text_map.empty()) &&
+ !new_buffers_cb_.Run(audio_buffers, video_buffers, text_map)) {
return -1;
}
- WebMClusterParser::TextTrackIterator text_track_iter =
- cluster_parser_->CreateTextTrackIterator();
-
- int text_track_num;
- const BufferQueue* text_buffers;
-
- while (text_track_iter(&text_track_num, &text_buffers)) {
- if (!text_buffers->empty() && !text_cb_.Run(text_track_num, *text_buffers))
- return -1;
- }
-
- if (cluster_ended)
+ if (cluster_ended) {
+ ChangeState(kParsingHeaders);
end_of_segment_cb_.Run();
+ }
return bytes_parsed;
}
diff --git a/chromium/media/webm/webm_stream_parser.h b/chromium/media/formats/webm/webm_stream_parser.h
index aec484b580e..8a41f3744ca 100644
--- a/chromium/media/webm/webm_stream_parser.h
+++ b/chromium/media/formats/webm/webm_stream_parser.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_WEBM_WEBM_STREAM_PARSER_H_
-#define MEDIA_WEBM_WEBM_STREAM_PARSER_H_
+#ifndef MEDIA_FORMATS_WEBM_WEBM_STREAM_PARSER_H_
+#define MEDIA_FORMATS_WEBM_WEBM_STREAM_PARSER_H_
#include "base/callback_forward.h"
#include "base/memory/ref_counted.h"
@@ -25,7 +25,7 @@ class WebMStreamParser : public StreamParser {
// StreamParser implementation.
virtual void Init(const InitCB& init_cb, const NewConfigCB& config_cb,
const NewBuffersCB& new_buffers_cb,
- const NewTextBuffersCB& text_cb,
+ bool ignore_text_tracks,
const NeedKeyCB& need_key_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
@@ -69,16 +69,14 @@ class WebMStreamParser : public StreamParser {
InitCB init_cb_;
NewConfigCB config_cb_;
NewBuffersCB new_buffers_cb_;
- NewTextBuffersCB text_cb_;
+ bool ignore_text_tracks_;
NeedKeyCB need_key_cb_;
NewMediaSegmentCB new_segment_cb_;
base::Closure end_of_segment_cb_;
LogCB log_cb_;
- // True if a new cluster id has been seen, but no audio or video buffers have
- // been parsed yet.
- bool waiting_for_buffers_;
+ bool unknown_segment_size_;
scoped_ptr<WebMClusterParser> cluster_parser_;
ByteQueue byte_queue_;
@@ -88,4 +86,4 @@ class WebMStreamParser : public StreamParser {
} // namespace media
-#endif // MEDIA_WEBM_WEBM_STREAM_PARSER_H_
+#endif // MEDIA_FORMATS_WEBM_WEBM_STREAM_PARSER_H_
diff --git a/chromium/media/webm/webm_tracks_parser.cc b/chromium/media/formats/webm/webm_tracks_parser.cc
index 771480fdf55..ed7fd521be1 100644
--- a/chromium/media/webm/webm_tracks_parser.cc
+++ b/chromium/media/formats/webm/webm_tracks_parser.cc
@@ -1,15 +1,15 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/webm/webm_tracks_parser.h"
+#include "media/formats/webm/webm_tracks_parser.h"
#include "base/logging.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "media/base/buffers.h"
-#include "media/webm/webm_constants.h"
-#include "media/webm/webm_content_encodings.h"
+#include "media/formats/webm/webm_constants.h"
+#include "media/formats/webm/webm_content_encodings.h"
namespace media {
@@ -29,14 +29,31 @@ static TextKind CodecIdToTextKind(const std::string& codec_id) {
return kTextNone;
}
+static base::TimeDelta PrecisionCappedDefaultDuration(
+ const double timecode_scale_in_us, const int64 duration_in_ns) {
+ if (duration_in_ns <= 0)
+ return kNoTimestamp();
+
+ int64 mult = duration_in_ns / 1000;
+ mult /= timecode_scale_in_us;
+ if (mult == 0)
+ return kNoTimestamp();
+
+ mult = static_cast<double>(mult) * timecode_scale_in_us;
+ return base::TimeDelta::FromMicroseconds(mult);
+}
+
WebMTracksParser::WebMTracksParser(const LogCB& log_cb, bool ignore_text_tracks)
: track_type_(-1),
track_num_(-1),
track_uid_(-1),
seek_preroll_(-1),
codec_delay_(-1),
+ default_duration_(-1),
audio_track_num_(-1),
+ audio_default_duration_(-1),
video_track_num_(-1),
+ video_default_duration_(-1),
ignore_text_tracks_(ignore_text_tracks),
log_cb_(log_cb),
audio_client_(log_cb),
@@ -49,11 +66,14 @@ int WebMTracksParser::Parse(const uint8* buf, int size) {
track_type_ =-1;
track_num_ = -1;
track_uid_ = -1;
+ default_duration_ = -1;
track_name_.clear();
track_language_.clear();
audio_track_num_ = -1;
+ audio_default_duration_ = -1;
audio_decoder_config_ = AudioDecoderConfig();
video_track_num_ = -1;
+ video_default_duration_ = -1;
video_decoder_config_ = VideoDecoderConfig();
text_tracks_.clear();
ignored_tracks_.clear();
@@ -68,6 +88,18 @@ int WebMTracksParser::Parse(const uint8* buf, int size) {
return parser.IsParsingComplete() ? result : 0;
}
+base::TimeDelta WebMTracksParser::GetAudioDefaultDuration(
+ const double timecode_scale_in_us) const {
+ return PrecisionCappedDefaultDuration(timecode_scale_in_us,
+ audio_default_duration_);
+}
+
+base::TimeDelta WebMTracksParser::GetVideoDefaultDuration(
+ const double timecode_scale_in_us) const {
+ return PrecisionCappedDefaultDuration(timecode_scale_in_us,
+ video_default_duration_);
+}
+
WebMParserClient* WebMTracksParser::OnListStart(int id) {
if (id == kWebMIdContentEncodings) {
DCHECK(!track_content_encodings_client_.get());
@@ -79,6 +111,7 @@ WebMParserClient* WebMTracksParser::OnListStart(int id) {
if (id == kWebMIdTrackEntry) {
track_type_ = -1;
track_num_ = -1;
+ default_duration_ = -1;
track_name_.clear();
track_language_.clear();
codec_id_ = "";
@@ -165,6 +198,12 @@ bool WebMTracksParser::OnListEnd(int id) {
audio_track_num_ = track_num_;
audio_encryption_key_id_ = encryption_key_id;
+ if (default_duration_ == 0) {
+ MEDIA_LOG(log_cb_) << "Illegal 0ns audio TrackEntry DefaultDuration";
+ return false;
+ }
+ audio_default_duration_ = default_duration_;
+
DCHECK(!audio_decoder_config_.IsValidConfig());
if (!audio_client_.InitializeConfig(
codec_id_, codec_private_, seek_preroll_, codec_delay_,
@@ -180,6 +219,12 @@ bool WebMTracksParser::OnListEnd(int id) {
video_track_num_ = track_num_;
video_encryption_key_id_ = encryption_key_id;
+ if (default_duration_ == 0) {
+ MEDIA_LOG(log_cb_) << "Illegal 0ns video TrackEntry DefaultDuration";
+ return false;
+ }
+ video_default_duration_ = default_duration_;
+
DCHECK(!video_decoder_config_.IsValidConfig());
if (!video_client_.InitializeConfig(
codec_id_, codec_private_, !video_encryption_key_id_.empty(),
@@ -210,6 +255,7 @@ bool WebMTracksParser::OnListEnd(int id) {
track_type_ = -1;
track_num_ = -1;
track_uid_ = -1;
+ default_duration_ = -1;
track_name_.clear();
track_language_.clear();
codec_id_ = "";
@@ -243,6 +289,9 @@ bool WebMTracksParser::OnUInt(int id, int64 val) {
case kWebMIdCodecDelay:
dst = &codec_delay_;
break;
+ case kWebMIdDefaultDuration:
+ dst = &default_duration_;
+ break;
default:
return true;
}
diff --git a/chromium/media/webm/webm_tracks_parser.h b/chromium/media/formats/webm/webm_tracks_parser.h
index d489235d08a..61d79af84fa 100644
--- a/chromium/media/webm/webm_tracks_parser.h
+++ b/chromium/media/formats/webm/webm_tracks_parser.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_WEBM_WEBM_TRACKS_PARSER_H_
-#define MEDIA_WEBM_WEBM_TRACKS_PARSER_H_
+#ifndef MEDIA_FORMATS_WEBM_WEBM_TRACKS_PARSER_H_
+#define MEDIA_FORMATS_WEBM_WEBM_TRACKS_PARSER_H_
#include <map>
#include <set>
@@ -12,14 +12,15 @@
#include "base/compiler_specific.h"
#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/media_log.h"
#include "media/base/text_track_config.h"
#include "media/base/video_decoder_config.h"
-#include "media/webm/webm_audio_client.h"
-#include "media/webm/webm_content_encodings_client.h"
-#include "media/webm/webm_parser.h"
-#include "media/webm/webm_video_client.h"
+#include "media/formats/webm/webm_audio_client.h"
+#include "media/formats/webm/webm_content_encodings_client.h"
+#include "media/formats/webm/webm_parser.h"
+#include "media/formats/webm/webm_video_client.h"
namespace media {
@@ -38,6 +39,16 @@ class MEDIA_EXPORT WebMTracksParser : public WebMParserClient {
int64 audio_track_num() const { return audio_track_num_; }
int64 video_track_num() const { return video_track_num_; }
+
+ // If TrackEntry DefaultDuration field existed for the associated audio or
+ // video track, returns that value converted from ns to base::TimeDelta with
+ // precision not greater than |timecode_scale_in_us|. Defaults to
+ // kNoTimestamp().
+ base::TimeDelta GetAudioDefaultDuration(
+ const double timecode_scale_in_us) const;
+ base::TimeDelta GetVideoDefaultDuration(
+ const double timecode_scale_in_us) const;
+
const std::set<int64>& ignored_tracks() const { return ignored_tracks_; }
const std::string& audio_encryption_key_id() const {
@@ -80,10 +91,13 @@ class MEDIA_EXPORT WebMTracksParser : public WebMParserClient {
std::vector<uint8> codec_private_;
int64 seek_preroll_;
int64 codec_delay_;
+ int64 default_duration_;
scoped_ptr<WebMContentEncodingsClient> track_content_encodings_client_;
int64 audio_track_num_;
+ int64 audio_default_duration_;
int64 video_track_num_;
+ int64 video_default_duration_;
bool ignore_text_tracks_;
TextTracks text_tracks_;
std::set<int64> ignored_tracks_;
@@ -102,4 +116,4 @@ class MEDIA_EXPORT WebMTracksParser : public WebMParserClient {
} // namespace media
-#endif // MEDIA_WEBM_WEBM_TRACKS_PARSER_H_
+#endif // MEDIA_FORMATS_WEBM_WEBM_TRACKS_PARSER_H_
diff --git a/chromium/media/formats/webm/webm_tracks_parser_unittest.cc b/chromium/media/formats/webm/webm_tracks_parser_unittest.cc
new file mode 100644
index 00000000000..b81aa2173f0
--- /dev/null
+++ b/chromium/media/formats/webm/webm_tracks_parser_unittest.cc
@@ -0,0 +1,185 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "media/base/channel_layout.h"
+#include "media/formats/webm/tracks_builder.h"
+#include "media/formats/webm/webm_constants.h"
+#include "media/formats/webm/webm_tracks_parser.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::InSequence;
+using ::testing::Return;
+using ::testing::_;
+
+namespace media {
+
+static const double kDefaultTimecodeScaleInUs = 1000.0; // 1 ms resolution
+
+class WebMTracksParserTest : public testing::Test {
+ public:
+ WebMTracksParserTest() {}
+};
+
+static void VerifyTextTrackInfo(const uint8* buffer,
+ int buffer_size,
+ TextKind text_kind,
+ const std::string& name,
+ const std::string& language) {
+ scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(LogCB(), false));
+
+ int result = parser->Parse(buffer, buffer_size);
+ EXPECT_GT(result, 0);
+ EXPECT_EQ(result, buffer_size);
+
+ const WebMTracksParser::TextTracks& text_tracks = parser->text_tracks();
+ EXPECT_EQ(text_tracks.size(), WebMTracksParser::TextTracks::size_type(1));
+
+ const WebMTracksParser::TextTracks::const_iterator itr = text_tracks.begin();
+ EXPECT_EQ(itr->first, 1); // track num
+
+ const TextTrackConfig& config = itr->second;
+ EXPECT_EQ(config.kind(), text_kind);
+ EXPECT_TRUE(config.label() == name);
+ EXPECT_TRUE(config.language() == language);
+}
+
+TEST_F(WebMTracksParserTest, SubtitleNoNameNoLang) {
+ InSequence s;
+
+ TracksBuilder tb;
+ tb.AddTextTrack(1, 1, kWebMCodecSubtitles, "", "");
+
+ const std::vector<uint8> buf = tb.Finish();
+ VerifyTextTrackInfo(&buf[0], buf.size(), kTextSubtitles, "", "");
+}
+
+TEST_F(WebMTracksParserTest, SubtitleYesNameNoLang) {
+ InSequence s;
+
+ TracksBuilder tb;
+ tb.AddTextTrack(1, 1, kWebMCodecSubtitles, "Spock", "");
+
+ const std::vector<uint8> buf = tb.Finish();
+ VerifyTextTrackInfo(&buf[0], buf.size(), kTextSubtitles, "Spock", "");
+}
+
+TEST_F(WebMTracksParserTest, SubtitleNoNameYesLang) {
+ InSequence s;
+
+ TracksBuilder tb;
+ tb.AddTextTrack(1, 1, kWebMCodecSubtitles, "", "eng");
+
+ const std::vector<uint8> buf = tb.Finish();
+ VerifyTextTrackInfo(&buf[0], buf.size(), kTextSubtitles, "", "eng");
+}
+
+TEST_F(WebMTracksParserTest, SubtitleYesNameYesLang) {
+ InSequence s;
+
+ TracksBuilder tb;
+ tb.AddTextTrack(1, 1, kWebMCodecSubtitles, "Picard", "fre");
+
+ const std::vector<uint8> buf = tb.Finish();
+ VerifyTextTrackInfo(&buf[0], buf.size(), kTextSubtitles, "Picard", "fre");
+}
+
+TEST_F(WebMTracksParserTest, IgnoringTextTracks) {
+ InSequence s;
+
+ TracksBuilder tb;
+ tb.AddTextTrack(1, 1, kWebMCodecSubtitles, "Subtitles", "fre");
+ tb.AddTextTrack(2, 2, kWebMCodecSubtitles, "Commentary", "fre");
+
+ const std::vector<uint8> buf = tb.Finish();
+ scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(LogCB(), true));
+
+ int result = parser->Parse(&buf[0], buf.size());
+ EXPECT_GT(result, 0);
+ EXPECT_EQ(result, static_cast<int>(buf.size()));
+
+ EXPECT_EQ(parser->text_tracks().size(), 0u);
+
+ const std::set<int64>& ignored_tracks = parser->ignored_tracks();
+ EXPECT_TRUE(ignored_tracks.find(1) != ignored_tracks.end());
+ EXPECT_TRUE(ignored_tracks.find(2) != ignored_tracks.end());
+
+ // Test again w/o ignoring the test tracks.
+ parser.reset(new WebMTracksParser(LogCB(), false));
+
+ result = parser->Parse(&buf[0], buf.size());
+ EXPECT_GT(result, 0);
+
+ EXPECT_EQ(parser->ignored_tracks().size(), 0u);
+ EXPECT_EQ(parser->text_tracks().size(), 2u);
+}
+
+TEST_F(WebMTracksParserTest, AudioVideoDefaultDurationUnset) {
+ // Other audio/video decoder config fields are necessary in the test
+ // audio/video TrackEntry configurations. This method does only very minimal
+ // verification of their inclusion and parsing; the goal is to confirm
+ // TrackEntry DefaultDuration defaults to -1 if not included in audio or
+ // video TrackEntry.
+ TracksBuilder tb;
+ tb.AddAudioTrack(1, 1, "A_VORBIS", "audio", "", -1, 2, 8000);
+ tb.AddVideoTrack(2, 2, "V_VP8", "video", "", -1, 320, 240);
+ const std::vector<uint8> buf = tb.Finish();
+
+ scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(LogCB(), true));
+ int result = parser->Parse(&buf[0], buf.size());
+ EXPECT_LE(0, result);
+ EXPECT_EQ(static_cast<int>(buf.size()), result);
+
+ EXPECT_EQ(kNoTimestamp(),
+ parser->GetAudioDefaultDuration(kDefaultTimecodeScaleInUs));
+ EXPECT_EQ(kNoTimestamp(),
+ parser->GetVideoDefaultDuration(kDefaultTimecodeScaleInUs));
+
+ const VideoDecoderConfig& video_config = parser->video_decoder_config();
+ EXPECT_TRUE(video_config.IsValidConfig());
+ EXPECT_EQ(320, video_config.coded_size().width());
+ EXPECT_EQ(240, video_config.coded_size().height());
+
+ const AudioDecoderConfig& audio_config = parser->audio_decoder_config();
+ EXPECT_TRUE(audio_config.IsValidConfig());
+ EXPECT_EQ(CHANNEL_LAYOUT_STEREO, audio_config.channel_layout());
+ EXPECT_EQ(8000, audio_config.samples_per_second());
+}
+
+TEST_F(WebMTracksParserTest, AudioVideoDefaultDurationSet) {
+ // Confirm audio or video TrackEntry DefaultDuration values are parsed, if
+ // present.
+ TracksBuilder tb;
+ tb.AddAudioTrack(1, 1, "A_VORBIS", "audio", "", 12345678, 2, 8000);
+ tb.AddVideoTrack(2, 2, "V_VP8", "video", "", 987654321, 320, 240);
+ const std::vector<uint8> buf = tb.Finish();
+
+ scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(LogCB(), true));
+ int result = parser->Parse(&buf[0], buf.size());
+ EXPECT_LE(0, result);
+ EXPECT_EQ(static_cast<int>(buf.size()), result);
+
+ EXPECT_EQ(base::TimeDelta::FromMicroseconds(12000),
+ parser->GetAudioDefaultDuration(kDefaultTimecodeScaleInUs));
+ EXPECT_EQ(base::TimeDelta::FromMicroseconds(985000),
+ parser->GetVideoDefaultDuration(5000.0)); // 5 ms resolution
+ EXPECT_EQ(kNoTimestamp(), parser->GetAudioDefaultDuration(12346.0));
+ EXPECT_EQ(base::TimeDelta::FromMicroseconds(12345),
+ parser->GetAudioDefaultDuration(12345.0));
+ EXPECT_EQ(base::TimeDelta::FromMicroseconds(12003),
+ parser->GetAudioDefaultDuration(1000.3)); // 1.0003 ms resolution
+}
+
+TEST_F(WebMTracksParserTest, InvalidZeroDefaultDurationSet) {
+ // Confirm parse error if TrackEntry DefaultDuration is present, but is 0ns.
+ TracksBuilder tb(true);
+ tb.AddAudioTrack(1, 1, "A_VORBIS", "audio", "", 0, 2, 8000);
+ const std::vector<uint8> buf = tb.Finish();
+
+ scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(LogCB(), true));
+ EXPECT_EQ(-1, parser->Parse(&buf[0], buf.size()));
+}
+
+} // namespace media
diff --git a/chromium/media/webm/webm_video_client.cc b/chromium/media/formats/webm/webm_video_client.cc
index 1d0cbcb2ac4..bda78efafac 100644
--- a/chromium/media/webm/webm_video_client.cc
+++ b/chromium/media/formats/webm/webm_video_client.cc
@@ -1,11 +1,11 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/webm/webm_video_client.h"
+#include "media/formats/webm/webm_video_client.h"
#include "media/base/video_decoder_config.h"
-#include "media/webm/webm_constants.h"
+#include "media/formats/webm/webm_constants.h"
namespace media {
@@ -74,21 +74,19 @@ bool WebMVideoClient::InitializeConfig(
gfx::Rect visible_rect(crop_top_, crop_left_,
pixel_width_ - (crop_left_ + crop_right_),
pixel_height_ - (crop_top_ + crop_bottom_));
- gfx::Size natural_size = coded_size;
if (display_unit_ == 0) {
if (display_width_ <= 0)
- display_width_ = pixel_width_;
+ display_width_ = visible_rect.width();
if (display_height_ <= 0)
- display_height_ = pixel_height_;
- natural_size = gfx::Size(display_width_, display_height_);
+ display_height_ = visible_rect.height();
} else if (display_unit_ == 3) {
if (display_width_ <= 0 || display_height_ <= 0)
return false;
- natural_size = gfx::Size(display_width_, display_height_);
} else {
MEDIA_LOG(log_cb_) << "Unsupported display unit type " << display_unit_;
return false;
}
+ gfx::Size natural_size = gfx::Size(display_width_, display_height_);
const uint8* extra_data = NULL;
size_t extra_data_size = 0;
if (codec_private.size() > 0) {
diff --git a/chromium/media/webm/webm_video_client.h b/chromium/media/formats/webm/webm_video_client.h
index d1872baebbe..5545e0a203e 100644
--- a/chromium/media/webm/webm_video_client.h
+++ b/chromium/media/formats/webm/webm_video_client.h
@@ -1,15 +1,15 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_WEBM_WEBM_VIDEO_CLIENT_H_
-#define MEDIA_WEBM_WEBM_VIDEO_CLIENT_H_
+#ifndef MEDIA_FORMATS_WEBM_WEBM_VIDEO_CLIENT_H_
+#define MEDIA_FORMATS_WEBM_WEBM_VIDEO_CLIENT_H_
#include <string>
#include <vector>
#include "media/base/media_log.h"
-#include "media/webm/webm_parser.h"
+#include "media/formats/webm/webm_parser.h"
namespace media {
class VideoDecoderConfig;
@@ -58,4 +58,4 @@ class WebMVideoClient : public WebMParserClient {
} // namespace media
-#endif // MEDIA_WEBM_WEBM_VIDEO_CLIENT_H_
+#endif // MEDIA_FORMATS_WEBM_WEBM_VIDEO_CLIENT_H_
diff --git a/chromium/media/webm/webm_webvtt_parser.cc b/chromium/media/formats/webm/webm_webvtt_parser.cc
index d77bfbcfd88..64de1ef4434 100644
--- a/chromium/media/webm/webm_webvtt_parser.cc
+++ b/chromium/media/formats/webm/webm_webvtt_parser.cc
@@ -1,8 +1,8 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/webm/webm_webvtt_parser.h"
+#include "media/formats/webm/webm_webvtt_parser.h"
namespace media {
diff --git a/chromium/media/webm/webm_webvtt_parser.h b/chromium/media/formats/webm/webm_webvtt_parser.h
index a6aa316d5eb..12bbbd4dd29 100644
--- a/chromium/media/webm/webm_webvtt_parser.h
+++ b/chromium/media/formats/webm/webm_webvtt_parser.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_WEBM_WEBM_WEBVTT_PARSER_H_
-#define MEDIA_WEBM_WEBM_WEBVTT_PARSER_H_
+#ifndef MEDIA_FORMATS_WEBM_WEBM_WEBVTT_PARSER_H_
+#define MEDIA_FORMATS_WEBM_WEBM_WEBVTT_PARSER_H_
#include <string>
@@ -46,4 +46,4 @@ class MEDIA_EXPORT WebMWebVTTParser {
} // namespace media
-#endif // MEDIA_WEBM_WEBM_WEBVTT_PARSER_H_
+#endif // MEDIA_FORMATS_WEBM_WEBM_WEBVTT_PARSER_H_
diff --git a/chromium/media/webm/webm_webvtt_parser_unittest.cc b/chromium/media/formats/webm/webm_webvtt_parser_unittest.cc
index db514a1247c..ecdabd42975 100644
--- a/chromium/media/webm/webm_webvtt_parser_unittest.cc
+++ b/chromium/media/formats/webm/webm_webvtt_parser_unittest.cc
@@ -1,8 +1,8 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/webm/webm_webvtt_parser.h"
+#include "media/formats/webm/webm_webvtt_parser.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/chromium/media/media.gyp b/chromium/media/media.gyp
index 97e2f5dfba9..71099e93475 100644
--- a/chromium/media/media.gyp
+++ b/chromium/media/media.gyp
@@ -21,16 +21,25 @@
'media_use_ffmpeg%': 1,
'media_use_libvpx%': 1,
}],
- # ALSA usage.
+ # Enable ALSA and Pulse for runtime selection.
['(OS=="linux" or OS=="freebsd" or OS=="solaris") and embedded!=1', {
+ # ALSA is always needed for Web MIDI even if the cras is enabled.
'use_alsa%': 1,
+ 'conditions': [
+ ['use_cras==1', {
+ 'use_pulseaudio%': 0,
+ }, {
+ 'use_pulseaudio%': 1,
+ }],
+ ],
}, {
'use_alsa%': 0,
+ 'use_pulseaudio%': 0,
}],
- ['os_posix==1 and OS!="mac" and OS!="android" and chromeos!=1 and embedded!=1', {
- 'use_pulseaudio%': 1,
+ ['sysroot!=""', {
+ 'pkg-config': '../build/linux/pkg-config-wrapper "<(sysroot)" "<(target_arch)" "<(system_libdir)"',
}, {
- 'use_pulseaudio%': 0,
+ 'pkg-config': 'pkg-config'
}],
],
},
@@ -43,14 +52,15 @@
'type': '<(component)',
'dependencies': [
'../base/base.gyp:base',
+ '../base/base.gyp:base_i18n',
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../crypto/crypto.gyp:crypto',
- '../net/net.gyp:net',
'../gpu/gpu.gyp:command_buffer_common',
'../skia/skia.gyp:skia',
'../third_party/opus/opus.gyp:opus',
'../ui/events/events.gyp:events_base',
'../ui/gfx/gfx.gyp:gfx',
+ '../ui/gfx/gfx.gyp:gfx_geometry',
'../url/url.gyp:url_lib',
'shared_memory_support',
],
@@ -134,8 +144,6 @@
'audio/fake_audio_output_stream.cc',
'audio/fake_audio_output_stream.h',
'audio/linux/audio_manager_linux.cc',
- 'audio/mac/aggregate_device_manager.cc',
- 'audio/mac/aggregate_device_manager.h',
'audio/mac/audio_auhal_mac.cc',
'audio/mac/audio_auhal_mac.h',
'audio/mac/audio_device_listener_mac.cc',
@@ -144,14 +152,8 @@
'audio/mac/audio_input_mac.h',
'audio/mac/audio_low_latency_input_mac.cc',
'audio/mac/audio_low_latency_input_mac.h',
- 'audio/mac/audio_low_latency_output_mac.cc',
- 'audio/mac/audio_low_latency_output_mac.h',
'audio/mac/audio_manager_mac.cc',
'audio/mac/audio_manager_mac.h',
- 'audio/mac/audio_synchronized_mac.cc',
- 'audio/mac/audio_synchronized_mac.h',
- 'audio/mac/audio_unified_mac.cc',
- 'audio/mac/audio_unified_mac.h',
'audio/null_audio_sink.cc',
'audio/null_audio_sink.h',
'audio/openbsd/audio_manager_openbsd.cc',
@@ -162,14 +164,12 @@
'audio/pulse/pulse_input.h',
'audio/pulse/pulse_output.cc',
'audio/pulse/pulse_output.h',
- 'audio/pulse/pulse_unified.cc',
- 'audio/pulse/pulse_unified.h',
'audio/pulse/pulse_util.cc',
'audio/pulse/pulse_util.h',
'audio/sample_rates.cc',
'audio/sample_rates.h',
- 'audio/scoped_loop_observer.cc',
- 'audio/scoped_loop_observer.h',
+ 'audio/scoped_task_runner_observer.cc',
+ 'audio/scoped_task_runner_observer.h',
'audio/simple_sources.cc',
'audio/simple_sources.h',
'audio/sounds/audio_stream_handler.cc',
@@ -190,8 +190,6 @@
'audio/win/audio_low_latency_output_win.h',
'audio/win/audio_manager_win.cc',
'audio/win/audio_manager_win.h',
- 'audio/win/audio_unified_win.cc',
- 'audio/win/audio_unified_win.h',
'audio/win/avrt_wrapper_win.cc',
'audio/win/avrt_wrapper_win.h',
'audio/win/core_audio_util_win.cc',
@@ -213,12 +211,16 @@
'base/audio_buffer_queue.cc',
'base/audio_buffer_queue.h',
'base/audio_capturer_source.h',
+ 'base/audio_buffer_converter.cc',
+ 'base/audio_buffer_converter.h',
'base/audio_converter.cc',
'base/audio_converter.h',
'base/audio_decoder.cc',
'base/audio_decoder.h',
'base/audio_decoder_config.cc',
'base/audio_decoder_config.h',
+ 'base/audio_discard_helper.cc',
+ 'base/audio_discard_helper.h',
'base/audio_fifo.cc',
'base/audio_fifo.h',
'base/audio_hardware_config.cc',
@@ -238,13 +240,20 @@
'base/audio_splicer.h',
'base/audio_timestamp_helper.cc',
'base/audio_timestamp_helper.h',
- 'base/bind_to_loop.h',
+ 'base/audio_video_metadata_extractor.cc',
+ 'base/audio_video_metadata_extractor.h',
+ 'base/bind_to_current_loop.h',
'base/bit_reader.cc',
'base/bit_reader.h',
+ 'base/bit_reader_core.cc',
+ 'base/bit_reader_core.h',
'base/bitstream_buffer.h',
+ 'base/buffering_state.h',
'base/buffers.h',
'base/byte_queue.cc',
'base/byte_queue.h',
+ 'base/cdm_promise.cc',
+ 'base/cdm_promise.h',
'base/channel_mixer.cc',
'base/channel_mixer.h',
'base/clock.cc',
@@ -291,6 +300,8 @@
'base/pipeline.cc',
'base/pipeline.h',
'base/pipeline_status.h',
+ 'base/player_tracker.cc',
+ 'base/player_tracker.h',
'base/ranges.cc',
'base/ranges.h',
'base/sample_format.cc',
@@ -316,6 +327,8 @@
'base/stream_parser_buffer.h',
'base/text_cue.cc',
'base/text_cue.h',
+ 'base/text_ranges.cc',
+ 'base/text_ranges.h',
'base/text_renderer.cc',
'base/text_renderer.h',
'base/text_track.h',
@@ -346,10 +359,13 @@
'cdm/json_web_key.h',
'cdm/key_system_names.cc',
'cdm/key_system_names.h',
+ 'cdm/player_tracker_impl.cc',
+ 'cdm/player_tracker_impl.h',
'ffmpeg/ffmpeg_common.cc',
'ffmpeg/ffmpeg_common.h',
- 'filters/audio_decoder_selector.cc',
- 'filters/audio_decoder_selector.h',
+ 'ffmpeg/ffmpeg_deleters.h',
+ 'filters/audio_clock.cc',
+ 'filters/audio_clock.h',
'filters/audio_file_reader.cc',
'filters/audio_file_reader.h',
'filters/audio_renderer_algorithm.cc',
@@ -360,6 +376,12 @@
'filters/blocking_url_protocol.h',
'filters/chunk_demuxer.cc',
'filters/chunk_demuxer.h',
+ 'filters/decoder_selector.cc',
+ 'filters/decoder_selector.h',
+ 'filters/decoder_stream.cc',
+ 'filters/decoder_stream.h',
+ 'filters/decoder_stream_traits.cc',
+ 'filters/decoder_stream_traits.h',
'filters/decrypting_audio_decoder.cc',
'filters/decrypting_audio_decoder.h',
'filters/decrypting_demuxer_stream.cc',
@@ -372,18 +394,22 @@
'filters/ffmpeg_demuxer.h',
'filters/ffmpeg_glue.cc',
'filters/ffmpeg_glue.h',
- 'filters/ffmpeg_h264_to_annex_b_bitstream_converter.cc',
- 'filters/ffmpeg_h264_to_annex_b_bitstream_converter.h',
'filters/ffmpeg_video_decoder.cc',
'filters/ffmpeg_video_decoder.h',
'filters/file_data_source.cc',
'filters/file_data_source.h',
+ 'filters/frame_processor.cc',
+ 'filters/frame_processor.h',
+ 'filters/frame_processor_base.cc',
+ 'filters/frame_processor_base.h',
'filters/gpu_video_accelerator_factories.cc',
'filters/gpu_video_accelerator_factories.h',
'filters/gpu_video_decoder.cc',
'filters/gpu_video_decoder.h',
- 'filters/h264_to_annex_b_bitstream_converter.cc',
- 'filters/h264_to_annex_b_bitstream_converter.h',
+ 'filters/h264_bit_reader.cc',
+ 'filters/h264_bit_reader.h',
+ 'filters/h264_parser.cc',
+ 'filters/h264_parser.h',
'filters/in_memory_url_protocol.cc',
'filters/in_memory_url_protocol.h',
'filters/opus_audio_decoder.cc',
@@ -394,10 +420,11 @@
'filters/source_buffer_stream.h',
'filters/stream_parser_factory.cc',
'filters/stream_parser_factory.h',
- 'filters/video_decoder_selector.cc',
- 'filters/video_decoder_selector.h',
- 'filters/video_frame_stream.cc',
- 'filters/video_frame_stream.h',
+ 'filters/video_frame_scheduler.h',
+ 'filters/video_frame_scheduler_impl.cc',
+ 'filters/video_frame_scheduler_impl.h',
+ 'filters/video_frame_scheduler_proxy.cc',
+ 'filters/video_frame_scheduler_proxy.h',
'filters/video_renderer_impl.cc',
'filters/video_renderer_impl.h',
'filters/vpx_video_decoder.cc',
@@ -407,24 +434,53 @@
'filters/wsola_internals.h',
'midi/midi_manager.cc',
'midi/midi_manager.h',
+ 'midi/midi_manager_alsa.cc',
+ 'midi/midi_manager_alsa.h',
+ 'midi/midi_manager_android.cc',
'midi/midi_manager_mac.cc',
'midi/midi_manager_mac.h',
+ 'midi/midi_manager_usb.cc',
+ 'midi/midi_manager_usb.h',
+ 'midi/midi_manager_win.cc',
+ 'midi/midi_manager_win.h',
'midi/midi_message_queue.cc',
'midi/midi_message_queue.h',
'midi/midi_message_util.cc',
'midi/midi_message_util.h',
- 'midi/midi_manager_win.cc',
- 'midi/midi_manager_win.h',
'midi/midi_port_info.cc',
'midi/midi_port_info.h',
+ 'midi/usb_midi_descriptor_parser.cc',
+ 'midi/usb_midi_descriptor_parser.h',
+ 'midi/usb_midi_device.h',
+ 'midi/usb_midi_device_android.cc',
+ 'midi/usb_midi_device_android.h',
+ 'midi/usb_midi_device_factory_android.cc',
+ 'midi/usb_midi_device_factory_android.h',
+ 'midi/usb_midi_input_stream.cc',
+ 'midi/usb_midi_input_stream.h',
+ 'midi/usb_midi_jack.h',
+ 'midi/usb_midi_output_stream.cc',
+ 'midi/usb_midi_output_stream.h',
+ 'ozone/media_ozone_platform.cc',
+ 'ozone/media_ozone_platform.h',
'video/capture/android/video_capture_device_android.cc',
'video/capture/android/video_capture_device_android.h',
+ 'video/capture/android/video_capture_device_factory_android.cc',
+ 'video/capture/android/video_capture_device_factory_android.h',
'video/capture/fake_video_capture_device.cc',
'video/capture/fake_video_capture_device.h',
+ 'video/capture/fake_video_capture_device_factory.h',
+ 'video/capture/fake_video_capture_device_factory.cc',
'video/capture/file_video_capture_device.cc',
'video/capture/file_video_capture_device.h',
+ 'video/capture/file_video_capture_device_factory.h',
+ 'video/capture/file_video_capture_device_factory.cc',
+ 'video/capture/linux/video_capture_device_factory_linux.cc',
+ 'video/capture/linux/video_capture_device_factory_linux.h',
'video/capture/linux/video_capture_device_linux.cc',
'video/capture/linux/video_capture_device_linux.h',
+ 'video/capture/linux/video_capture_device_chromeos.cc',
+ 'video/capture/linux/video_capture_device_chromeos.h',
'video/capture/mac/avfoundation_glue.h',
'video/capture/mac/avfoundation_glue.mm',
'video/capture/mac/coremedia_glue.h',
@@ -432,15 +488,16 @@
'video/capture/mac/platform_video_capturing_mac.h',
'video/capture/mac/video_capture_device_avfoundation_mac.h',
'video/capture/mac/video_capture_device_avfoundation_mac.mm',
+ 'video/capture/mac/video_capture_device_factory_mac.h',
+ 'video/capture/mac/video_capture_device_factory_mac.mm',
'video/capture/mac/video_capture_device_mac.h',
'video/capture/mac/video_capture_device_mac.mm',
'video/capture/mac/video_capture_device_qtkit_mac.h',
'video/capture/mac/video_capture_device_qtkit_mac.mm',
- 'video/capture/video_capture.h',
'video/capture/video_capture_device.cc',
'video/capture/video_capture_device.h',
- 'video/capture/video_capture_proxy.cc',
- 'video/capture/video_capture_proxy.h',
+ 'video/capture/video_capture_device_factory.cc',
+ 'video/capture/video_capture_device_factory.h',
'video/capture/video_capture_types.cc',
'video/capture/video_capture_types.h',
'video/capture/win/capability_list_win.cc',
@@ -454,6 +511,8 @@
'video/capture/win/sink_filter_win.h',
'video/capture/win/sink_input_pin_win.cc',
'video/capture/win/sink_input_pin_win.h',
+ 'video/capture/win/video_capture_device_factory_win.cc',
+ 'video/capture/win/video_capture_device_factory_win.h',
'video/capture/win/video_capture_device_mf_win.cc',
'video/capture/win/video_capture_device_mf_win.h',
'video/capture/win/video_capture_device_win.cc',
@@ -464,30 +523,32 @@
'video/video_decode_accelerator.h',
'video/video_encode_accelerator.cc',
'video/video_encode_accelerator.h',
- 'webm/webm_audio_client.cc',
- 'webm/webm_audio_client.h',
- 'webm/webm_cluster_parser.cc',
- 'webm/webm_cluster_parser.h',
- 'webm/webm_constants.cc',
- 'webm/webm_constants.h',
- 'webm/webm_content_encodings.cc',
- 'webm/webm_content_encodings.h',
- 'webm/webm_content_encodings_client.cc',
- 'webm/webm_content_encodings_client.h',
- 'webm/webm_crypto_helpers.cc',
- 'webm/webm_crypto_helpers.h',
- 'webm/webm_info_parser.cc',
- 'webm/webm_info_parser.h',
- 'webm/webm_parser.cc',
- 'webm/webm_parser.h',
- 'webm/webm_stream_parser.cc',
- 'webm/webm_stream_parser.h',
- 'webm/webm_tracks_parser.cc',
- 'webm/webm_tracks_parser.h',
- 'webm/webm_video_client.cc',
- 'webm/webm_video_client.h',
- 'webm/webm_webvtt_parser.cc',
- 'webm/webm_webvtt_parser.h'
+ 'formats/common/offset_byte_queue.cc',
+ 'formats/common/offset_byte_queue.h',
+ 'formats/webm/webm_audio_client.cc',
+ 'formats/webm/webm_audio_client.h',
+ 'formats/webm/webm_cluster_parser.cc',
+ 'formats/webm/webm_cluster_parser.h',
+ 'formats/webm/webm_constants.cc',
+ 'formats/webm/webm_constants.h',
+ 'formats/webm/webm_content_encodings.cc',
+ 'formats/webm/webm_content_encodings.h',
+ 'formats/webm/webm_content_encodings_client.cc',
+ 'formats/webm/webm_content_encodings_client.h',
+ 'formats/webm/webm_crypto_helpers.cc',
+ 'formats/webm/webm_crypto_helpers.h',
+ 'formats/webm/webm_info_parser.cc',
+ 'formats/webm/webm_info_parser.h',
+ 'formats/webm/webm_parser.cc',
+ 'formats/webm/webm_parser.h',
+ 'formats/webm/webm_stream_parser.cc',
+ 'formats/webm/webm_stream_parser.h',
+ 'formats/webm/webm_tracks_parser.cc',
+ 'formats/webm/webm_tracks_parser.h',
+ 'formats/webm/webm_video_client.cc',
+ 'formats/webm/webm_video_client.h',
+ 'formats/webm/webm_webvtt_parser.cc',
+ 'formats/webm/webm_webvtt_parser.h'
],
'direct_dependent_settings': {
'include_dirs': [
@@ -507,6 +568,8 @@
}, { # media_use_ffmpeg==0
# Exclude the sources that depend on ffmpeg.
'sources!': [
+ 'base/audio_video_metadata_extractor.cc',
+ 'base/audio_video_metadata_extractor.h',
'base/container_names.cc',
'base/container_names.h',
'base/media_file_checker.cc',
@@ -528,6 +591,8 @@
'filters/ffmpeg_h264_to_annex_b_bitstream_converter.h',
'filters/ffmpeg_video_decoder.cc',
'filters/ffmpeg_video_decoder.h',
+ 'filters/in_memory_url_protocol.cc',
+ 'filters/in_memory_url_protocol.h',
],
}],
['media_use_libvpx==1', {
@@ -546,10 +611,14 @@
'filters/vpx_video_decoder.h',
],
}],
- ['OS=="android"', {
- 'include_dirs': [
- '<(SHARED_INTERMEDIATE_DIR)/media',
+ ['enable_browser_cdms==1', {
+ 'sources': [
+ 'base/browser_cdm.cc',
+ 'base/browser_cdm.h',
+ 'base/browser_cdm_factory.h',
],
+ }],
+ ['OS=="android"', {
'dependencies': [
'media_android_jni_headers',
'player_android',
@@ -582,10 +651,15 @@
'../third_party/libyuv/libyuv.gyp:libyuv',
],
'sources': [
- 'webm/chromeos/ebml_writer.cc',
- 'webm/chromeos/ebml_writer.h',
- 'webm/chromeos/webm_encoder.cc',
- 'webm/chromeos/webm_encoder.h',
+ 'formats/webm/chromeos/ebml_writer.cc',
+ 'formats/webm/chromeos/ebml_writer.h',
+ 'formats/webm/chromeos/webm_encoder.cc',
+ 'formats/webm/chromeos/webm_encoder.h',
+ ],
+ }],
+ ['OS!="ios"', {
+ 'dependencies': [
+ '../third_party/libyuv/libyuv.gyp:libyuv',
],
}],
['use_alsa==1', {
@@ -598,7 +672,10 @@
'USE_ALSA',
],
}, { # use_alsa==0
- 'sources/': [ ['exclude', '(^|/)alsa/'], ],
+ 'sources/': [
+ ['exclude', '(^|/)alsa/'],
+ ['exclude', '_alsa\\.(h|cc)$'],
+ ],
}],
['OS!="openbsd"', {
'sources!': [
@@ -607,26 +684,15 @@
],
}],
['OS=="linux"', {
- 'variables': {
- 'conditions': [
- ['sysroot!=""', {
- 'pkg-config': '../build/linux/pkg-config-wrapper "<(sysroot)" "<(target_arch)"',
- }, {
- 'pkg-config': 'pkg-config'
- }],
- ],
- },
'conditions': [
['use_x11==1', {
- 'link_settings': {
- 'libraries': [
- '-lX11',
- '-lXdamage',
- '-lXext',
- '-lXfixes',
- '-lXtst',
- ],
- },
+ 'dependencies': [
+ '../build/linux/system.gyp:x11',
+ '../build/linux/system.gyp:xdamage',
+ '../build/linux/system.gyp:xext',
+ '../build/linux/system.gyp:xfixes',
+ '../build/linux/system.gyp:xtst',
+ ],
}, { # else: use_x11==0
'sources!': [
'base/user_input_monitor_linux.cc',
@@ -659,6 +725,49 @@
}],
],
}],
+ ['use_ozone==1', {
+ 'variables': {
+ 'platform_list_txt_file': '<(SHARED_INTERMEDIATE_DIR)/ui/ozone/platform_list.txt',
+ 'constructor_list_cc_file': '<(INTERMEDIATE_DIR)/media/ozone/constructor_list.cc',
+ },
+ 'include_dirs': [
+ # Used for the generated listing header (ui/ozone/platform_list.h)
+ '<(SHARED_INTERMEDIATE_DIR)',
+ ],
+ 'sources': [
+ '<(constructor_list_cc_file)',
+ ],
+ 'dependencies': [
+ '../ui/ozone/ozone.gyp:ozone',
+ ],
+ 'actions': [
+ {
+ # Ozone platform objects are auto-generated using similar
+ # patterns for naming and classes constructors. Here we build the
+ # object MediaOzonePlatform.
+ 'action_name': 'generate_constructor_list',
+ 'variables': {
+ 'generator_path': '../ui/ozone/generate_constructor_list.py',
+ },
+ 'inputs': [
+ '<(generator_path)',
+ '<(platform_list_txt_file)',
+ ],
+ 'outputs': [
+ '<(constructor_list_cc_file)',
+ ],
+ 'action': [
+ 'python',
+ '<(generator_path)',
+ '--platform_list=<(platform_list_txt_file)',
+ '--output_cc=<(constructor_list_cc_file)',
+ '--namespace=media',
+ '--typename=MediaOzonePlatform',
+ '--include="media/ozone/media_ozone_platform.h"'
+ ],
+ },
+ ]
+ }],
['OS!="linux"', {
'sources!': [
'audio/cras/audio_manager_cras.cc',
@@ -671,7 +780,7 @@
}],
['use_pulseaudio==1', {
'cflags': [
- '<!@(pkg-config --cflags libpulse)',
+ '<!@(<(pkg-config) --cflags libpulse)',
],
'defines': [
'USE_PULSEAUDIO',
@@ -733,10 +842,10 @@
}, { # else: linux_link_pulseaudio==0
'link_settings': {
'ldflags': [
- '<!@(pkg-config --libs-only-L --libs-only-other libpulse)',
+ '<!@(<(pkg-config) --libs-only-L --libs-only-other libpulse)',
],
'libraries': [
- '<!@(pkg-config --libs-only-l libpulse)',
+ '<!@(<(pkg-config) --libs-only-l libpulse)',
],
},
}],
@@ -749,8 +858,6 @@
'audio/pulse/pulse_input.h',
'audio/pulse/pulse_output.cc',
'audio/pulse/pulse_output.h',
- 'audio/pulse/pulse_unified.cc',
- 'audio/pulse/pulse_unified.h',
'audio/pulse/pulse_util.cc',
'audio/pulse/pulse_util.h',
],
@@ -809,68 +916,74 @@
}],
['proprietary_codecs==1', {
'sources': [
- 'mp2t/es_parser.h',
- 'mp2t/es_parser_adts.cc',
- 'mp2t/es_parser_adts.h',
- 'mp2t/es_parser_h264.cc',
- 'mp2t/es_parser_h264.h',
- 'mp2t/mp2t_common.h',
- 'mp2t/mp2t_stream_parser.cc',
- 'mp2t/mp2t_stream_parser.h',
- 'mp2t/ts_packet.cc',
- 'mp2t/ts_packet.h',
- 'mp2t/ts_section.h',
- 'mp2t/ts_section_pat.cc',
- 'mp2t/ts_section_pat.h',
- 'mp2t/ts_section_pes.cc',
- 'mp2t/ts_section_pes.h',
- 'mp2t/ts_section_pmt.cc',
- 'mp2t/ts_section_pmt.h',
- 'mp2t/ts_section_psi.cc',
- 'mp2t/ts_section_psi.h',
- 'mp3/mp3_stream_parser.cc',
- 'mp3/mp3_stream_parser.h',
- 'mp4/aac.cc',
- 'mp4/aac.h',
- 'mp4/avc.cc',
- 'mp4/avc.h',
- 'mp4/box_definitions.cc',
- 'mp4/box_definitions.h',
- 'mp4/box_reader.cc',
- 'mp4/box_reader.h',
- 'mp4/cenc.cc',
- 'mp4/cenc.h',
- 'mp4/es_descriptor.cc',
- 'mp4/es_descriptor.h',
- 'mp4/mp4_stream_parser.cc',
- 'mp4/mp4_stream_parser.h',
- 'mp4/offset_byte_queue.cc',
- 'mp4/offset_byte_queue.h',
- 'mp4/track_run_iterator.cc',
- 'mp4/track_run_iterator.h',
+ 'filters/ffmpeg_h264_to_annex_b_bitstream_converter.cc',
+ 'filters/ffmpeg_h264_to_annex_b_bitstream_converter.h',
+ 'filters/h264_to_annex_b_bitstream_converter.cc',
+ 'filters/h264_to_annex_b_bitstream_converter.h',
+ 'formats/mp2t/es_parser.h',
+ 'formats/mp2t/es_parser_adts.cc',
+ 'formats/mp2t/es_parser_adts.h',
+ 'formats/mp2t/es_parser_h264.cc',
+ 'formats/mp2t/es_parser_h264.h',
+ 'formats/mp2t/mp2t_common.h',
+ 'formats/mp2t/mp2t_stream_parser.cc',
+ 'formats/mp2t/mp2t_stream_parser.h',
+ 'formats/mp2t/ts_packet.cc',
+ 'formats/mp2t/ts_packet.h',
+ 'formats/mp2t/ts_section.h',
+ 'formats/mp2t/ts_section_pat.cc',
+ 'formats/mp2t/ts_section_pat.h',
+ 'formats/mp2t/ts_section_pes.cc',
+ 'formats/mp2t/ts_section_pes.h',
+ 'formats/mp2t/ts_section_pmt.cc',
+ 'formats/mp2t/ts_section_pmt.h',
+ 'formats/mp2t/ts_section_psi.cc',
+ 'formats/mp2t/ts_section_psi.h',
+ 'formats/mp4/aac.cc',
+ 'formats/mp4/aac.h',
+ 'formats/mp4/avc.cc',
+ 'formats/mp4/avc.h',
+ 'formats/mp4/box_definitions.cc',
+ 'formats/mp4/box_definitions.h',
+ 'formats/mp4/box_reader.cc',
+ 'formats/mp4/box_reader.h',
+ 'formats/mp4/cenc.cc',
+ 'formats/mp4/cenc.h',
+ 'formats/mp4/es_descriptor.cc',
+ 'formats/mp4/es_descriptor.h',
+ 'formats/mp4/mp4_stream_parser.cc',
+ 'formats/mp4/mp4_stream_parser.h',
+ 'formats/mp4/sample_to_group_iterator.cc',
+ 'formats/mp4/sample_to_group_iterator.h',
+ 'formats/mp4/track_run_iterator.cc',
+ 'formats/mp4/track_run_iterator.h',
+ 'formats/mpeg/adts_constants.cc',
+ 'formats/mpeg/adts_constants.h',
+ 'formats/mpeg/adts_stream_parser.cc',
+ 'formats/mpeg/adts_stream_parser.h',
+ 'formats/mpeg/mp3_stream_parser.cc',
+ 'formats/mpeg/mp3_stream_parser.h',
+ 'formats/mpeg/mpeg_audio_stream_parser_base.cc',
+ 'formats/mpeg/mpeg_audio_stream_parser_base.h',
],
- }],
- ['toolkit_uses_gtk==1', {
- 'dependencies': [
- '../build/linux/system.gyp:gtk',
+ 'conditions': [
+ ['enable_mpeg2ts_stream_parser==1', {
+ 'defines': [
+ 'ENABLE_MPEG2TS_STREAM_PARSER',
+ ],
+ }],
],
}],
['target_arch=="ia32" or target_arch=="x64"', {
'dependencies': [
'media_asm',
'media_mmx',
- 'media_sse',
'media_sse2',
],
'sources': [
'base/simd/convert_yuv_to_rgb_x86.cc',
],
}],
- ['google_tv==1', {
- 'defines': [
- 'ENABLE_EAC3_PLAYBACK',
- ],
- }],
['OS!="linux" and OS!="win"', {
'sources!': [
'base/keyboard_event_counter.cc',
@@ -893,8 +1006,11 @@
'../skia/skia.gyp:skia',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
+ '../third_party/widevine/cdm/widevine_cdm.gyp:widevine_cdm_version_h',
+ '../ui/base/ui_base.gyp:ui_base',
'../ui/gfx/gfx.gyp:gfx',
- '../ui/ui.gyp:ui',
+ '../ui/gfx/gfx.gyp:gfx_geometry',
+ '../url/url.gyp:url_lib',
],
'sources': [
'audio/android/audio_android_unittest.cc',
@@ -925,14 +1041,16 @@
'audio/win/audio_low_latency_input_win_unittest.cc',
'audio/win/audio_low_latency_output_win_unittest.cc',
'audio/win/audio_output_win_unittest.cc',
- 'audio/win/audio_unified_win_unittest.cc',
'audio/win/core_audio_util_win_unittest.cc',
'base/android/media_codec_bridge_unittest.cc',
+ 'base/android/media_drm_bridge_unittest.cc',
'base/android/media_source_player_unittest.cc',
+ 'base/audio_buffer_converter_unittest.cc',
'base/audio_buffer_unittest.cc',
'base/audio_buffer_queue_unittest.cc',
'base/audio_bus_unittest.cc',
'base/audio_converter_unittest.cc',
+ 'base/audio_discard_helper_unittest.cc',
'base/audio_fifo_unittest.cc',
'base/audio_hardware_config_unittest.cc',
'base/audio_hash_unittest.cc',
@@ -941,7 +1059,8 @@
'base/audio_renderer_mixer_unittest.cc',
'base/audio_splicer_unittest.cc',
'base/audio_timestamp_helper_unittest.cc',
- 'base/bind_to_loop_unittest.cc',
+ 'base/audio_video_metadata_extractor_unittest.cc',
+ 'base/bind_to_current_loop_unittest.cc',
'base/bit_reader_unittest.cc',
'base/callback_holder.h',
'base/callback_holder_unittest.cc',
@@ -962,8 +1081,8 @@
'base/serial_runner_unittest.cc',
'base/seekable_buffer_unittest.cc',
'base/sinc_resampler_unittest.cc',
- 'base/test_data_util.cc',
- 'base/test_data_util.h',
+ 'base/stream_parser_unittest.cc',
+ 'base/text_ranges_unittest.cc',
'base/text_renderer_unittest.cc',
'base/user_input_monitor_unittest.cc',
'base/vector_math_testing.h',
@@ -975,6 +1094,7 @@
'cdm/aes_decryptor_unittest.cc',
'cdm/json_web_key_unittest.cc',
'ffmpeg/ffmpeg_common_unittest.cc',
+ 'filters/audio_clock_unittest.cc',
'filters/audio_decoder_selector_unittest.cc',
'filters/audio_file_reader_unittest.cc',
'filters/audio_renderer_algorithm_unittest.cc',
@@ -993,29 +1113,45 @@
'filters/ffmpeg_audio_decoder_unittest.cc',
'filters/ffmpeg_demuxer_unittest.cc',
'filters/ffmpeg_glue_unittest.cc',
- 'filters/ffmpeg_h264_to_annex_b_bitstream_converter_unittest.cc',
'filters/ffmpeg_video_decoder_unittest.cc',
'filters/file_data_source_unittest.cc',
- 'filters/h264_to_annex_b_bitstream_converter_unittest.cc',
+ 'filters/frame_processor_unittest.cc',
+ 'filters/h264_bit_reader_unittest.cc',
+ 'filters/h264_parser_unittest.cc',
+ 'filters/in_memory_url_protocol_unittest.cc',
+ 'filters/opus_audio_decoder_unittest.cc',
'filters/pipeline_integration_test.cc',
'filters/pipeline_integration_test_base.cc',
'filters/skcanvas_video_renderer_unittest.cc',
'filters/source_buffer_stream_unittest.cc',
'filters/video_decoder_selector_unittest.cc',
+ 'filters/video_frame_scheduler_impl_unittest.cc',
+ 'filters/video_frame_scheduler_unittest.cc',
'filters/video_frame_stream_unittest.cc',
'filters/video_renderer_impl_unittest.cc',
+ 'midi/midi_manager_unittest.cc',
+ 'midi/midi_manager_usb_unittest.cc',
'midi/midi_message_queue_unittest.cc',
'midi/midi_message_util_unittest.cc',
+ 'midi/usb_midi_descriptor_parser_unittest.cc',
+ 'midi/usb_midi_input_stream_unittest.cc',
+ 'midi/usb_midi_output_stream_unittest.cc',
+ 'video/capture/fake_video_capture_device_unittest.cc',
'video/capture/video_capture_device_unittest.cc',
- 'webm/cluster_builder.cc',
- 'webm/cluster_builder.h',
- 'webm/tracks_builder.cc',
- 'webm/tracks_builder.h',
- 'webm/webm_cluster_parser_unittest.cc',
- 'webm/webm_content_encodings_client_unittest.cc',
- 'webm/webm_parser_unittest.cc',
- 'webm/webm_tracks_parser_unittest.cc',
- 'webm/webm_webvtt_parser_unittest.cc',
+ 'formats/common/offset_byte_queue_unittest.cc',
+ 'formats/webm/cluster_builder.cc',
+ 'formats/webm/cluster_builder.h',
+ 'formats/webm/tracks_builder.cc',
+ 'formats/webm/tracks_builder.h',
+ 'formats/webm/webm_cluster_parser_unittest.cc',
+ 'formats/webm/webm_content_encodings_client_unittest.cc',
+ 'formats/webm/webm_parser_unittest.cc',
+ 'formats/webm/webm_tracks_parser_unittest.cc',
+ 'formats/webm/webm_webvtt_parser_unittest.cc',
+ ],
+ 'include_dirs': [
+ # Needed by media_drm_bridge.cc.
+ '<(SHARED_INTERMEDIATE_DIR)',
],
'conditions': [
['arm_neon==1', {
@@ -1029,6 +1165,7 @@
],
}, { # media_use_ffmpeg== 0
'sources!': [
+ 'base/audio_video_metadata_extractor_unittest.cc',
'base/media_file_checker_unittest.cc',
],
}],
@@ -1044,7 +1181,7 @@
}],
['os_posix==1 and OS!="mac"', {
'conditions': [
- ['linux_use_tcmalloc==1', {
+ ['use_allocator!="none"', {
'dependencies': [
'../base/allocator/allocator.gyp:allocator',
],
@@ -1063,16 +1200,14 @@
'filters/ffmpeg_glue_unittest.cc',
'filters/ffmpeg_h264_to_annex_b_bitstream_converter_unittest.cc',
'filters/ffmpeg_video_decoder_unittest.cc',
+ 'filters/in_memory_url_protocol_unittest.cc',
+ 'filters/opus_audio_decoder_unittest.cc',
'filters/pipeline_integration_test.cc',
'filters/pipeline_integration_test_base.cc',
],
- 'conditions': [
- ['gtest_target_type=="shared_library"', {
- 'dependencies': [
- '../testing/android/native_test.gyp:native_test_native_code',
- 'player_android',
- ],
- }],
+ 'dependencies': [
+ '../testing/android/native_test.gyp:native_test_native_code',
+ 'player_android',
],
}],
['OS=="linux"', {
@@ -1101,15 +1236,26 @@
}],
['proprietary_codecs==1', {
'sources': [
- 'mp2t/mp2t_stream_parser_unittest.cc',
- 'mp3/mp3_stream_parser_unittest.cc',
- 'mp4/aac_unittest.cc',
- 'mp4/avc_unittest.cc',
- 'mp4/box_reader_unittest.cc',
- 'mp4/es_descriptor_unittest.cc',
- 'mp4/mp4_stream_parser_unittest.cc',
- 'mp4/offset_byte_queue_unittest.cc',
- 'mp4/track_run_iterator_unittest.cc',
+ 'filters/ffmpeg_h264_to_annex_b_bitstream_converter_unittest.cc',
+ 'filters/h264_to_annex_b_bitstream_converter_unittest.cc',
+ 'formats/common/stream_parser_test_base.cc',
+ 'formats/common/stream_parser_test_base.h',
+ 'formats/mp2t/es_parser_h264_unittest.cc',
+ 'formats/mp2t/mp2t_stream_parser_unittest.cc',
+ 'formats/mp4/aac_unittest.cc',
+ 'formats/mp4/avc_unittest.cc',
+ 'formats/mp4/box_reader_unittest.cc',
+ 'formats/mp4/es_descriptor_unittest.cc',
+ 'formats/mp4/mp4_stream_parser_unittest.cc',
+ 'formats/mp4/sample_to_group_iterator_unittest.cc',
+ 'formats/mp4/track_run_iterator_unittest.cc',
+ 'formats/mpeg/adts_stream_parser_unittest.cc',
+ 'formats/mpeg/mp3_stream_parser_unittest.cc',
+ ],
+ }],
+ ['enable_mpeg2ts_stream_parser==1', {
+ 'defines': [
+ 'ENABLE_MPEG2TS_STREAM_PARSER',
],
}],
# TODO(wolenetz): Fix size_t to int truncations in win64. See
@@ -1117,31 +1263,37 @@
['OS=="win" and target_arch=="x64"', {
'msvs_disabled_warnings': [ 4267, ],
}],
+ ['OS=="mac"', {
+ 'sources': [
+ 'video/capture/mac/video_capture_device_factory_mac_unittest.mm',
+ ]
+ }],
],
},
{
'target_name': 'media_perftests',
'type': '<(gtest_target_type)',
'dependencies': [
- 'media',
- 'media_test_support',
- 'shared_memory_support',
'../base/base.gyp:test_support_base',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
'../testing/perf/perf_test.gyp:perf_test',
+ '../ui/base/ui_base.gyp:ui_base',
'../ui/gfx/gfx.gyp:gfx',
+ '../ui/gfx/gfx.gyp:gfx_geometry',
'../ui/gl/gl.gyp:gl',
- '../ui/ui.gyp:ui',
+ 'media',
+ 'media_test_support',
+ 'shared_memory_support',
],
'sources': [
'base/audio_bus_perftest.cc',
'base/audio_converter_perftest.cc',
'base/demuxer_perftest.cc',
- 'base/run_all_unittests.cc',
+ 'base/run_all_perftests.cc',
'base/sinc_resampler_perftest.cc',
- 'base/test_data_util.cc',
'base/vector_math_perftest.cc',
+ 'base/yuv_convert_perftest.cc',
'filters/pipeline_integration_perftest.cc',
'filters/pipeline_integration_test_base.cc',
],
@@ -1152,13 +1304,9 @@
],
}],
['OS=="android"', {
- 'conditions': [
- ['gtest_target_type=="shared_library"', {
- 'dependencies': [
- '../testing/android/native_test.gyp:native_test_native_code',
- ],
- }],
- ],
+ 'dependencies': [
+ '../testing/android/native_test.gyp:native_test_native_code',
+ ],
}],
['media_use_ffmpeg==1', {
'dependencies': [
@@ -1187,6 +1335,8 @@
'sources': [
'audio/mock_audio_manager.cc',
'audio/mock_audio_manager.h',
+ 'audio/mock_audio_source_callback.cc',
+ 'audio/mock_audio_source_callback.h',
'audio/test_audio_input_controller_factory.cc',
'audio/test_audio_input_controller_factory.h',
'base/fake_audio_render_callback.cc',
@@ -1198,16 +1348,20 @@
'base/gmock_callback_support.h',
'base/mock_audio_renderer_sink.cc',
'base/mock_audio_renderer_sink.h',
- 'base/mock_data_source_host.cc',
- 'base/mock_data_source_host.h',
'base/mock_demuxer_host.cc',
'base/mock_demuxer_host.h',
'base/mock_filters.cc',
'base/mock_filters.h',
+ 'base/test_data_util.cc',
+ 'base/test_data_util.h',
'base/test_helpers.cc',
'base/test_helpers.h',
+ 'filters/clockless_video_frame_scheduler.cc',
+ 'filters/clockless_video_frame_scheduler.h',
'filters/mock_gpu_video_accelerator_factories.cc',
'filters/mock_gpu_video_accelerator_factories.h',
+ 'filters/test_video_frame_scheduler.cc',
+ 'filters/test_video_frame_scheduler.h',
'video/mock_video_decode_accelerator.cc',
'video/mock_video_decode_accelerator.h',
],
@@ -1239,11 +1393,6 @@
'USE_NEON'
],
}],
- ['target_arch=="ia32" or target_arch=="x64"', {
- 'dependencies': [
- 'shared_memory_support_sse'
- ],
- }],
],
},
],
@@ -1299,11 +1448,12 @@
'conditions': [
['target_arch=="ia32"', {
'yasm_flags': [
- '-DX86_32',
+ '-DARCH_X86_32',
'-DELF',
],
- }, {
+ }, { # target_arch=="x64"
'yasm_flags': [
+ '-DARCH_X86_64',
'-DELF',
'-DPIC',
],
@@ -1352,22 +1502,6 @@
],
},
{
- 'target_name': 'media_sse',
- 'type': 'static_library',
- 'cflags': [
- '-msse',
- ],
- 'defines': [
- 'MEDIA_IMPLEMENTATION',
- ],
- 'include_dirs': [
- '..',
- ],
- 'sources': [
- 'base/simd/sinc_resampler_sse.cc',
- ],
- },
- {
'target_name': 'media_sse2',
'type': 'static_library',
'cflags': [
@@ -1385,22 +1519,6 @@
'base/simd/filter_yuv_sse2.cc',
],
},
- {
- 'target_name': 'shared_memory_support_sse',
- 'type': 'static_library',
- 'cflags': [
- '-msse',
- ],
- 'defines': [
- 'MEDIA_IMPLEMENTATION',
- ],
- 'include_dirs': [
- '..',
- ],
- 'sources': [
- 'base/simd/vector_math_sse.cc',
- ],
- },
], # targets
}],
['use_x11==1', {
@@ -1410,17 +1528,15 @@
'type': 'executable',
'dependencies': [
'media',
+ 'shared_memory_support',
'../base/base.gyp:base',
'../ui/gl/gl.gyp:gl',
'../ui/gfx/gfx.gyp:gfx',
+ '../ui/gfx/gfx.gyp:gfx_geometry',
+ '../build/linux/system.gyp:x11',
+ '../build/linux/system.gyp:xext',
+ '../build/linux/system.gyp:xrender',
],
- 'link_settings': {
- 'libraries': [
- '-lX11',
- '-lXrender',
- '-lXext',
- ],
- },
'conditions': [
# Linux/Solaris need libdl for dlopen() and friends.
['OS=="linux" or OS=="solaris"', {
@@ -1443,9 +1559,7 @@
},
],
}],
- # Special target to wrap a gtest_target_type==shared_library
- # media_unittests into an android apk for execution.
- ['OS=="android" and gtest_target_type=="shared_library"', {
+ ['OS=="android"', {
'targets': [
{
'target_name': 'media_unittests_apk',
@@ -1456,7 +1570,6 @@
],
'variables': {
'test_suite_name': 'media_unittests',
- 'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)media_unittests<(SHARED_LIB_SUFFIX)',
},
'includes': ['../build/apk_test.gypi'],
},
@@ -1469,7 +1582,6 @@
],
'variables': {
'test_suite_name': 'media_perftests',
- 'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)media_perftests<(SHARED_LIB_SUFFIX)',
},
'includes': ['../build/apk_test.gypi'],
},
@@ -1487,11 +1599,12 @@
'base/android/java/src/org/chromium/media/MediaDrmBridge.java',
'base/android/java/src/org/chromium/media/MediaPlayerBridge.java',
'base/android/java/src/org/chromium/media/MediaPlayerListener.java',
+ 'base/android/java/src/org/chromium/media/UsbMidiDeviceAndroid.java',
+ 'base/android/java/src/org/chromium/media/UsbMidiDeviceFactoryAndroid.java',
'base/android/java/src/org/chromium/media/WebAudioMediaCodecBridge.java',
],
'variables': {
'jni_gen_package': 'media',
- 'jni_generator_ptr_type': 'long',
},
'includes': ['../build/jni_generator.gypi'],
},
@@ -1500,10 +1613,10 @@
'type': 'none',
'sources': [
'base/android/java/src/org/chromium/media/VideoCapture.java',
+ 'base/android/java/src/org/chromium/media/VideoCaptureFactory.java',
],
'variables': {
'jni_gen_package': 'media',
- 'jni_generator_ptr_type': 'long',
},
'includes': ['../build/jni_generator.gypi'],
},
@@ -1513,6 +1626,7 @@
'sources': [
'base/android/audio_decoder_job.cc',
'base/android/audio_decoder_job.h',
+ 'base/android/browser_cdm_factory_android.cc',
'base/android/media_codec_bridge.cc',
'base/android/media_codec_bridge.h',
'base/android/media_decoder_job.cc',
@@ -1537,16 +1651,18 @@
],
'dependencies': [
'../base/base.gyp:base',
+ '../third_party/widevine/cdm/widevine_cdm.gyp:widevine_cdm_version_h',
'../ui/gl/gl.gyp:gl',
'../url/url.gyp:url_lib',
'media_android_jni_headers',
],
+ 'include_dirs': [
+ # Needed by media_drm_bridge.cc.
+ '<(SHARED_INTERMEDIATE_DIR)',
+ ],
'defines': [
'MEDIA_IMPLEMENTATION',
],
- 'include_dirs': [
- '<(SHARED_INTERMEDIATE_DIR)/media',
- ],
},
{
'target_name': 'media_java',
@@ -1595,24 +1711,6 @@
'sources': [
'ffmpeg/ffmpeg_unittest.cc',
],
- 'conditions': [
- ['toolkit_uses_gtk==1', {
- 'dependencies': [
- # Needed for the following #include chain:
- # base/run_all_unittests.cc
- # ../base/test_suite.h
- # gtk/gtk.h
- '../build/linux/system.gyp:gtk',
- ],
- 'conditions': [
- ['linux_use_tcmalloc==1', {
- 'dependencies': [
- '../base/allocator/allocator.gyp:allocator',
- ],
- }],
- ],
- }],
- ],
},
{
'target_name': 'ffmpeg_regression_tests',
@@ -1622,19 +1720,19 @@
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
'../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
+ '../ui/gfx/gfx.gyp:gfx_geometry',
'media',
'media_test_support',
],
'sources': [
'base/run_all_unittests.cc',
- 'base/test_data_util.cc',
'ffmpeg/ffmpeg_regression_tests.cc',
'filters/pipeline_integration_test_base.cc',
],
'conditions': [
['os_posix==1 and OS!="mac"', {
'conditions': [
- ['linux_use_tcmalloc==1', {
+ ['use_allocator!="none"', {
'dependencies': [
'../base/allocator/allocator.gyp:allocator',
],
diff --git a/chromium/media/media_cdm.gypi b/chromium/media/media_cdm.gypi
index d495e3437d4..6f84ed5d704 100644
--- a/chromium/media/media_cdm.gypi
+++ b/chromium/media/media_cdm.gypi
@@ -18,127 +18,103 @@
# Set |use_libvpx| to 1 to use libvpx for VP8 decoding in |clearkeycdm|.
'use_libvpx%': 0,
},
- 'targets': [
- {
- 'target_name': 'clearkeycdm',
- 'type': 'none',
- # TODO(tomfinegan): Simplify this by unconditionally including all the
- # decoders, and changing clearkeycdm to select which decoder to use
- # based on environment variables.
- 'conditions': [
- ['use_fake_video_decoder == 1' , {
- 'defines': ['CLEAR_KEY_CDM_USE_FAKE_VIDEO_DECODER'],
- 'sources': [
- 'cdm/ppapi/fake_cdm_video_decoder.cc',
- 'cdm/ppapi/fake_cdm_video_decoder.h',
+ 'conditions': [
+ ['enable_pepper_cdms==1', {
+ 'targets': [
+ {
+ 'target_name': 'clearkeycdm',
+ 'type': 'none',
+ # TODO(tomfinegan): Simplify this by unconditionally including all the
+ # decoders, and changing clearkeycdm to select which decoder to use
+ # based on environment variables.
+ 'conditions': [
+ ['use_fake_video_decoder == 1' , {
+ 'defines': ['CLEAR_KEY_CDM_USE_FAKE_VIDEO_DECODER'],
+ 'sources': [
+ 'cdm/ppapi/external_clear_key/fake_cdm_video_decoder.cc',
+ 'cdm/ppapi/external_clear_key/fake_cdm_video_decoder.h',
+ ],
+ }],
+ ['use_ffmpeg == 1' , {
+ 'defines': ['CLEAR_KEY_CDM_USE_FFMPEG_DECODER'],
+ 'dependencies': [
+ '<(DEPTH)/third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
+ ],
+ 'sources': [
+ 'cdm/ppapi/external_clear_key/ffmpeg_cdm_audio_decoder.cc',
+ 'cdm/ppapi/external_clear_key/ffmpeg_cdm_audio_decoder.h',
+ ],
+ }],
+ ['use_ffmpeg == 1 and use_fake_video_decoder == 0' , {
+ 'sources': [
+ 'cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc',
+ 'cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.h',
+ ],
+ }],
+ ['use_libvpx == 1 and use_fake_video_decoder == 0' , {
+ 'defines': ['CLEAR_KEY_CDM_USE_LIBVPX_DECODER'],
+ 'dependencies': [
+ '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
+ ],
+ 'sources': [
+ 'cdm/ppapi/external_clear_key/libvpx_cdm_video_decoder.cc',
+ 'cdm/ppapi/external_clear_key/libvpx_cdm_video_decoder.h',
+ ],
+ }],
+ ['os_posix == 1 and OS != "mac" and enable_pepper_cdms==1', {
+ 'type': 'loadable_module', # Must be in PRODUCT_DIR for ASAN bot.
+ }],
+ ['(OS == "mac" or OS == "win") and enable_pepper_cdms==1', {
+ 'type': 'shared_library',
+ }],
+ ['OS == "mac"', {
+ 'xcode_settings': {
+ 'DYLIB_INSTALL_NAME_BASE': '@loader_path',
+ },
+ }]
],
- }],
- ['use_ffmpeg == 1' , {
- 'defines': ['CLEAR_KEY_CDM_USE_FFMPEG_DECODER'],
+ 'defines': ['CDM_IMPLEMENTATION'],
'dependencies': [
- '<(DEPTH)/third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
- ],
- 'sources': [
- 'cdm/ppapi/ffmpeg_cdm_audio_decoder.cc',
- 'cdm/ppapi/ffmpeg_cdm_audio_decoder.h',
+ 'media',
+ '../url/url.gyp:url_lib',
+ # Include the following for media::AudioBus.
+ 'shared_memory_support',
+ '<(DEPTH)/base/base.gyp:base',
],
- }],
- ['use_ffmpeg == 1 and use_fake_video_decoder == 0' , {
'sources': [
- 'cdm/ppapi/ffmpeg_cdm_video_decoder.cc',
- 'cdm/ppapi/ffmpeg_cdm_video_decoder.h',
+ 'cdm/ppapi/cdm_file_io_test.cc',
+ 'cdm/ppapi/cdm_file_io_test.h',
+ 'cdm/ppapi/external_clear_key/cdm_video_decoder.cc',
+ 'cdm/ppapi/external_clear_key/cdm_video_decoder.h',
+ 'cdm/ppapi/external_clear_key/clear_key_cdm.cc',
+ 'cdm/ppapi/external_clear_key/clear_key_cdm.h',
+ 'cdm/ppapi/external_clear_key/clear_key_cdm_common.h',
],
- }],
- ['use_libvpx == 1 and use_fake_video_decoder == 0' , {
- 'defines': ['CLEAR_KEY_CDM_USE_LIBVPX_DECODER'],
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ 'msvs_disabled_warnings': [ 4267, ],
+ },
+ {
+ 'target_name': 'clearkeycdmadapter',
+ 'type': 'none',
+ # Check whether the plugin's origin URL is valid.
+ 'defines': ['CHECK_DOCUMENT_URL'],
'dependencies': [
- '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
- ],
- 'sources': [
- 'cdm/ppapi/libvpx_cdm_video_decoder.cc',
- 'cdm/ppapi/libvpx_cdm_video_decoder.h',
+ '<(DEPTH)/ppapi/ppapi.gyp:ppapi_cpp',
+ 'media_cdm_adapter.gyp:cdmadapter',
+ 'clearkeycdm',
],
- }],
- ['os_posix == 1 and OS != "mac" and enable_pepper_cdms==1', {
- 'type': 'loadable_module', # Must be in PRODUCT_DIR for ASAN bots.
- }],
- ['(OS == "mac" or OS == "win") and enable_pepper_cdms==1', {
- 'type': 'shared_library',
- }],
- ['OS == "mac"', {
- 'xcode_settings': {
- 'DYLIB_INSTALL_NAME_BASE': '@loader_path',
- },
- }]
- ],
- 'defines': ['CDM_IMPLEMENTATION'],
- 'dependencies': [
- 'media',
- # Include the following for media::AudioBus.
- 'shared_memory_support',
- '<(DEPTH)/base/base.gyp:base',
- ],
- 'sources': [
- 'cdm/ppapi/cdm_video_decoder.cc',
- 'cdm/ppapi/cdm_video_decoder.h',
- 'cdm/ppapi/clear_key_cdm.cc',
- 'cdm/ppapi/clear_key_cdm.h',
- 'cdm/ppapi/clear_key_cdm_common.h',
- ],
- # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
- 'msvs_disabled_warnings': [ 4267, ],
- },
- {
- 'target_name': 'clearkeycdmadapter',
- 'type': 'none',
- # Check whether the plugin's origin URL is valid.
- 'defines': ['CHECK_DOCUMENT_URL'],
- 'dependencies': [
- '<(DEPTH)/ppapi/ppapi.gyp:ppapi_cpp',
- 'clearkeycdm',
- ],
- 'sources': [
- 'cdm/ppapi/api/content_decryption_module.h',
- 'cdm/ppapi/cdm_adapter.cc',
- 'cdm/ppapi/cdm_adapter.h',
- 'cdm/ppapi/cdm_helpers.cc',
- 'cdm/ppapi/cdm_helpers.h',
- 'cdm/ppapi/cdm_logging.cc',
- 'cdm/ppapi/cdm_logging.h',
- 'cdm/ppapi/cdm_wrapper.h',
- 'cdm/ppapi/linked_ptr.h',
- 'cdm/ppapi/supported_cdm_versions.h',
- ],
- 'conditions': [
- ['os_posix == 1 and OS != "mac" and enable_pepper_cdms==1', {
- 'cflags': ['-fvisibility=hidden'],
- 'type': 'loadable_module',
- # Allow the plugin adapter to find the CDM in the same directory.
- 'ldflags': ['-Wl,-rpath=\$$ORIGIN'],
- 'libraries': [
- # Built by clearkeycdm.
- '<(PRODUCT_DIR)/libclearkeycdm.so',
+ 'conditions': [
+ ['os_posix == 1 and OS != "mac" and enable_pepper_cdms==1', {
+ # Because clearkeycdm has type 'loadable_module' (see comments),
+ # we must explicitly specify this dependency.
+ 'libraries': [
+ # Built by clearkeycdm.
+ '<(PRODUCT_DIR)/libclearkeycdm.so',
+ ],
+ }],
],
- }],
- ['OS == "win" and enable_pepper_cdms==1', {
- 'type': 'shared_library',
- # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
- 'msvs_disabled_warnings': [ 4267, ],
- }],
- ['OS == "mac" and enable_pepper_cdms==1', {
- 'type': 'loadable_module',
- 'product_extension': 'plugin',
- 'xcode_settings': {
- 'OTHER_LDFLAGS': [
- # Not to strip important symbols by -Wl,-dead_strip.
- '-Wl,-exported_symbol,_PPP_GetInterface',
- '-Wl,-exported_symbol,_PPP_InitializeModule',
- '-Wl,-exported_symbol,_PPP_ShutdownModule'
- ],
- 'DYLIB_INSTALL_NAME_BASE': '@loader_path',
- },
- }],
+ },
],
- }
+ }],
],
}
diff --git a/chromium/media/media_cdm_adapter.gyp b/chromium/media/media_cdm_adapter.gyp
new file mode 100644
index 00000000000..e71efcc265d
--- /dev/null
+++ b/chromium/media/media_cdm_adapter.gyp
@@ -0,0 +1,67 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file defines a common base target for all CDM adapter implementations.
+# We use 'direct_dependent_settings' to override target type and settings so
+# that all CDM adapter implementations have the correct type and settings
+# automatically.
+#
+# WARNING: Keep 'cdmadapter' target out of media.gyp. /build/all.gyp:All depends
+# directly on media.gyp:*. If 'cdmadapter' is defined in media.gyp, then
+# 'direct_dependent_settings' will be applied to 'All' target and bad
+# things happen, e.g. the type of 'All' target becomes a plugin on Mac.
+{
+ 'conditions': [
+ ['enable_pepper_cdms==1', {
+ 'targets': [
+ {
+ 'target_name': 'cdmadapter',
+ 'type': 'none',
+ 'direct_dependent_settings': {
+ 'sources': [
+ 'cdm/ppapi/api/content_decryption_module.h',
+ 'cdm/ppapi/cdm_adapter.cc',
+ 'cdm/ppapi/cdm_adapter.h',
+ 'cdm/ppapi/cdm_file_io_impl.cc',
+ 'cdm/ppapi/cdm_file_io_impl.h',
+ 'cdm/ppapi/cdm_helpers.cc',
+ 'cdm/ppapi/cdm_helpers.h',
+ 'cdm/ppapi/cdm_logging.cc',
+ 'cdm/ppapi/cdm_logging.h',
+ 'cdm/ppapi/cdm_wrapper.h',
+ 'cdm/ppapi/linked_ptr.h',
+ 'cdm/ppapi/supported_cdm_versions.h',
+ ],
+ 'conditions': [
+ ['os_posix == 1 and OS != "mac"', {
+ 'cflags': ['-fvisibility=hidden'],
+ 'type': 'loadable_module',
+ # Allow the adapter to find the CDM in the same directory.
+ 'ldflags': ['-Wl,-rpath=\$$ORIGIN'],
+ }],
+ ['OS == "win"', {
+ 'type': 'shared_library',
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ 'msvs_disabled_warnings': [ 4267, ],
+ }],
+ ['OS == "mac"', {
+ 'type': 'loadable_module',
+ 'product_extension': 'plugin',
+ 'xcode_settings': {
+ 'OTHER_LDFLAGS': [
+ # Not to strip important symbols by -Wl,-dead_strip.
+ '-Wl,-exported_symbol,_PPP_GetInterface',
+ '-Wl,-exported_symbol,_PPP_InitializeModule',
+ '-Wl,-exported_symbol,_PPP_ShutdownModule'
+ ],
+ 'DYLIB_INSTALL_NAME_BASE': '@loader_path',
+ },
+ }],
+ ],
+ },
+ },
+ ],
+ }],
+ ],
+}
diff --git a/chromium/media/media_nacl.gyp b/chromium/media/media_nacl.gyp
new file mode 100644
index 00000000000..2dd4bf9863e
--- /dev/null
+++ b/chromium/media/media_nacl.gyp
@@ -0,0 +1,73 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'chromium_code': 1,
+ },
+ 'includes': [
+ '../native_client/build/untrusted.gypi',
+ ],
+ 'conditions': [
+ ['disable_nacl==0 and disable_nacl_untrusted==0', {
+ 'targets': [
+ {
+ 'target_name': 'shared_memory_support_nacl',
+ 'type': 'none',
+ 'variables': {
+ 'nacl_untrusted_build': 1,
+ 'nlib_target': 'libshared_memory_support_nacl.a',
+ 'build_glibc': 0,
+ 'build_newlib': 0,
+ 'build_irt': 1,
+ },
+ 'dependencies': [
+ '../native_client/tools.gyp:prep_toolchain',
+ '../base/base_nacl.gyp:base_nacl',
+ ],
+ 'defines': [
+ 'MEDIA_IMPLEMENTATION',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'includes': [
+ 'shared_memory_support.gypi',
+ ],
+ 'sources': [
+ '<@(shared_memory_support_sources)',
+ ],
+ }, # end of target 'shared_memory_support_nacl'
+ {
+ 'target_name': 'media_yuv_nacl',
+ 'type': 'none',
+ 'variables': {
+ 'nlib_target': 'libmedia_yuv_nacl.a',
+ 'build_glibc': 0,
+ 'build_newlib': 0,
+ 'build_pnacl_newlib': 1,
+ },
+ 'dependencies': [
+ '../native_client/tools.gyp:prep_toolchain',
+ ],
+ 'sources': [
+ 'base/media.cc',
+ 'base/media.h',
+ 'base/media_stub.cc',
+ 'base/simd/convert_rgb_to_yuv.h',
+ 'base/simd/convert_rgb_to_yuv_c.cc',
+ 'base/simd/convert_yuv_to_rgb.h',
+ 'base/simd/convert_yuv_to_rgb_c.cc',
+ 'base/simd/filter_yuv.h',
+ 'base/simd/filter_yuv_c.cc',
+ 'base/simd/yuv_to_rgb_table.cc',
+ 'base/simd/yuv_to_rgb_table.h',
+ 'base/yuv_convert.cc',
+ 'base/yuv_convert.h',
+ ],
+ }, # end of target 'media_yuv_nacl'
+ ],
+ }],
+ ],
+}
diff --git a/chromium/media/media_untrusted.gyp b/chromium/media/media_untrusted.gyp
deleted file mode 100644
index 638d401c3ba..00000000000
--- a/chromium/media/media_untrusted.gyp
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'variables': {
- 'chromium_code': 1,
- },
- 'includes': [
- '../native_client/build/untrusted.gypi',
- ],
- 'conditions': [
- ['disable_nacl==0 and disable_nacl_untrusted==0', {
- 'targets': [
- {
- 'target_name': 'shared_memory_support_untrusted',
- 'type': 'none',
- 'variables': {
- 'nacl_untrusted_build': 1,
- 'nlib_target': 'libshared_memory_support_untrusted.a',
- 'build_glibc': 0,
- 'build_newlib': 0,
- 'build_irt': 1,
- },
- 'dependencies': [
- '../native_client/tools.gyp:prep_toolchain',
- '../base/base_untrusted.gyp:base_untrusted',
- ],
- 'defines': [
- 'MEDIA_IMPLEMENTATION',
- ],
- 'include_dirs': [
- '..',
- ],
- 'includes': [
- 'shared_memory_support.gypi',
- ],
- 'sources': [
- '<@(shared_memory_support_sources)',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/chromium/media/midi/OWNERS b/chromium/media/midi/OWNERS
new file mode 100644
index 00000000000..278892b25e8
--- /dev/null
+++ b/chromium/media/midi/OWNERS
@@ -0,0 +1,5 @@
+# On Android port and USB support, yhirano@ is the best reviewer.
+# On Win32 port, yukawa@ is the best reviewer.
+toyoshim@chromium.org
+yhirano@chromium.org
+yukawa@chromium.org
diff --git a/chromium/media/midi/midi_manager.cc b/chromium/media/midi/midi_manager.cc
index 6d3f1d30b95..c53eef4112c 100644
--- a/chromium/media/midi/midi_manager.cc
+++ b/chromium/media/midi/midi_manager.cc
@@ -5,61 +5,143 @@
#include "media/midi/midi_manager.h"
#include "base/bind.h"
-#include "base/bind_helpers.h"
+#include "base/debug/trace_event.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_proxy.h"
namespace media {
-#if !defined(OS_MACOSX) && !defined(OS_WIN)
-// TODO(crogers): implement MIDIManager for other platforms.
-MIDIManager* MIDIManager::Create() {
- return NULL;
+MidiManager::MidiManager()
+ : initialized_(false),
+ result_(MIDI_NOT_SUPPORTED) {
}
-#endif
-MIDIManager::MIDIManager()
- : initialized_(false) {
+MidiManager::~MidiManager() {
}
-MIDIManager::~MIDIManager() {
+#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(USE_ALSA) && \
+ !defined(OS_ANDROID) && !defined(OS_CHROMEOS)
+MidiManager* MidiManager::Create() {
+ return new MidiManager;
}
+#endif
+
+void MidiManager::StartSession(MidiManagerClient* client, int client_id) {
+ bool session_is_ready;
+ bool session_needs_initialization = false;
+ bool too_many_pending_clients_exist = false;
+
+ {
+ base::AutoLock auto_lock(lock_);
+ session_is_ready = initialized_;
+ if (!session_is_ready) {
+ // Do not accept a new request if the pending client list contains too
+ // many clients.
+ too_many_pending_clients_exist =
+ pending_clients_.size() >= kMaxPendingClientCount;
+
+ if (!too_many_pending_clients_exist) {
+ // Call StartInitialization() only for the first request.
+ session_needs_initialization = pending_clients_.empty();
+ pending_clients_.insert(std::make_pair(client, client_id));
+ }
+ }
+ }
-bool MIDIManager::StartSession(MIDIManagerClient* client) {
// Lazily initialize the MIDI back-end.
- if (!initialized_)
- initialized_ = Initialize();
+ if (!session_is_ready) {
+ if (session_needs_initialization) {
+ TRACE_EVENT0("midi", "MidiManager::StartInitialization");
+ session_thread_runner_ =
+ base::MessageLoop::current()->message_loop_proxy();
+ StartInitialization();
+ }
+ if (too_many_pending_clients_exist) {
+ // Return an error immediately if there are too many requests.
+ client->CompleteStartSession(client_id, MIDI_INITIALIZATION_ERROR);
+ return;
+ }
+ // CompleteInitialization() will be called asynchronously when platform
+ // dependent initialization is finished.
+ return;
+ }
- if (initialized_) {
- base::AutoLock auto_lock(clients_lock_);
- clients_.insert(client);
+ // Platform dependent initialization was already finished for previously
+ // initialized clients.
+ MidiResult result;
+ {
+ base::AutoLock auto_lock(lock_);
+ if (result_ == MIDI_OK)
+ clients_.insert(client);
+ result = result_;
}
+ client->CompleteStartSession(client_id, result);
+}
- return initialized_;
+void MidiManager::EndSession(MidiManagerClient* client) {
+ base::AutoLock auto_lock(lock_);
+ clients_.erase(client);
+ pending_clients_.erase(client);
}
-void MIDIManager::EndSession(MIDIManagerClient* client) {
- base::AutoLock auto_lock(clients_lock_);
- ClientList::iterator i = clients_.find(client);
- if (i != clients_.end())
- clients_.erase(i);
+void MidiManager::DispatchSendMidiData(MidiManagerClient* client,
+ uint32 port_index,
+ const std::vector<uint8>& data,
+ double timestamp) {
+ NOTREACHED();
}
-void MIDIManager::AddInputPort(const MIDIPortInfo& info) {
+void MidiManager::StartInitialization() {
+ CompleteInitialization(MIDI_NOT_SUPPORTED);
+}
+
+void MidiManager::CompleteInitialization(MidiResult result) {
+ DCHECK(session_thread_runner_.get());
+ // It is safe to post a task to the IO thread from here because the IO thread
+ // should have stopped if the MidiManager is going to be destructed.
+ session_thread_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&MidiManager::CompleteInitializationInternal,
+ base::Unretained(this),
+ result));
+}
+
+void MidiManager::AddInputPort(const MidiPortInfo& info) {
input_ports_.push_back(info);
}
-void MIDIManager::AddOutputPort(const MIDIPortInfo& info) {
+void MidiManager::AddOutputPort(const MidiPortInfo& info) {
output_ports_.push_back(info);
}
-void MIDIManager::ReceiveMIDIData(
+void MidiManager::ReceiveMidiData(
uint32 port_index,
const uint8* data,
size_t length,
double timestamp) {
- base::AutoLock auto_lock(clients_lock_);
+ base::AutoLock auto_lock(lock_);
for (ClientList::iterator i = clients_.begin(); i != clients_.end(); ++i)
- (*i)->ReceiveMIDIData(port_index, data, length, timestamp);
+ (*i)->ReceiveMidiData(port_index, data, length, timestamp);
+}
+
+void MidiManager::CompleteInitializationInternal(MidiResult result) {
+ TRACE_EVENT0("midi", "MidiManager::CompleteInitialization");
+
+ base::AutoLock auto_lock(lock_);
+ DCHECK(clients_.empty());
+ DCHECK(!initialized_);
+ initialized_ = true;
+ result_ = result;
+
+ for (PendingClientMap::iterator it = pending_clients_.begin();
+ it != pending_clients_.end();
+ ++it) {
+ if (result_ == MIDI_OK)
+ clients_.insert(it->first);
+ it->first->CompleteStartSession(it->second, result_);
+ }
+ pending_clients_.clear();
}
} // namespace media
diff --git a/chromium/media/midi/midi_manager.h b/chromium/media/midi/midi_manager.h
index f42a40de769..9fd7a21ed38 100644
--- a/chromium/media/midi/midi_manager.h
+++ b/chromium/media/midi/midi_manager.h
@@ -5,59 +5,78 @@
#ifndef MEDIA_MIDI_MIDI_MANAGER_H_
#define MEDIA_MIDI_MIDI_MANAGER_H_
+#include <map>
#include <set>
#include <vector>
#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
#include "base/synchronization/lock.h"
+#include "base/time/time.h"
#include "media/base/media_export.h"
#include "media/midi/midi_port_info.h"
+#include "media/midi/midi_result.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+} // namespace base
namespace media {
-// A MIDIManagerClient registers with the MIDIManager to receive MIDI data.
-// See MIDIManager::RequestAccess() and MIDIManager::ReleaseAccess()
+// A MidiManagerClient registers with the MidiManager to receive MIDI data.
+// See MidiManager::RequestAccess() and MidiManager::ReleaseAccess()
// for details.
-class MEDIA_EXPORT MIDIManagerClient {
+class MEDIA_EXPORT MidiManagerClient {
public:
- virtual ~MIDIManagerClient() {}
+ virtual ~MidiManagerClient() {}
+
+ // CompleteStartSession() is called when platform dependent preparation is
+ // finished.
+ virtual void CompleteStartSession(int client_id, MidiResult result) = 0;
- // ReceiveMIDIData() is called when MIDI data has been received from the
+ // ReceiveMidiData() is called when MIDI data has been received from the
// MIDI system.
// |port_index| represents the specific input port from input_ports().
// |data| represents a series of bytes encoding one or more MIDI messages.
// |length| is the number of bytes in |data|.
// |timestamp| is the time the data was received, in seconds.
- virtual void ReceiveMIDIData(uint32 port_index,
+ virtual void ReceiveMidiData(uint32 port_index,
const uint8* data,
size_t length,
double timestamp) = 0;
- // AccumulateMIDIBytesSent() is called to acknowledge when bytes have
+ // AccumulateMidiBytesSent() is called to acknowledge when bytes have
// successfully been sent to the hardware.
// This happens as a result of the client having previously called
- // MIDIManager::DispatchSendMIDIData().
- virtual void AccumulateMIDIBytesSent(size_t n) = 0;
+ // MidiManager::DispatchSendMidiData().
+ virtual void AccumulateMidiBytesSent(size_t n) = 0;
};
// Manages access to all MIDI hardware.
-class MEDIA_EXPORT MIDIManager {
+class MEDIA_EXPORT MidiManager {
public:
- static MIDIManager* Create();
+ static const size_t kMaxPendingClientCount = 128;
- MIDIManager();
- virtual ~MIDIManager();
+ MidiManager();
+ virtual ~MidiManager();
+
+ // The constructor and the destructor will be called on the CrBrowserMain
+ // thread.
+ static MidiManager* Create();
// A client calls StartSession() to receive and send MIDI data.
// If the session is ready to start, the MIDI system is lazily initialized
// and the client is registered to receive MIDI data.
- // Returns |true| if the session succeeds to start.
- bool StartSession(MIDIManagerClient* client);
+ // CompleteStartSession() is called with MIDI_OK if the session is started.
+ // Otherwise CompleteStartSession() is called with proper MidiResult code.
+ // StartSession() and EndSession() can be called on the Chrome_IOThread.
+ // CompleteStartSession() will be invoked on the same Chrome_IOThread.
+ void StartSession(MidiManagerClient* client, int client_id);
- // A client calls ReleaseSession() to stop receiving MIDI data.
- void EndSession(MIDIManagerClient* client);
+ // A client calls EndSession() to stop receiving MIDI data.
+ void EndSession(MidiManagerClient* client);
- // DispatchSendMIDIData() is called when MIDI data should be sent to the MIDI
+ // DispatchSendMidiData() is called when MIDI data should be sent to the MIDI
// system.
// This method is supposed to return immediately and should not block.
// |port_index| represents the specific output port from output_ports().
@@ -65,47 +84,96 @@ class MEDIA_EXPORT MIDIManager {
// |length| is the number of bytes in |data|.
// |timestamp| is the time to send the data, in seconds. A value of 0
// means send "now" or as soon as possible.
- virtual void DispatchSendMIDIData(MIDIManagerClient* client,
+ // The default implementation is for unsupported platforms.
+ virtual void DispatchSendMidiData(MidiManagerClient* client,
uint32 port_index,
const std::vector<uint8>& data,
- double timestamp) = 0;
+ double timestamp);
// input_ports() is a list of MIDI ports for receiving MIDI data.
// Each individual port in this list can be identified by its
// integer index into this list.
- const MIDIPortInfoList& input_ports() { return input_ports_; }
+ const MidiPortInfoList& input_ports() const { return input_ports_; }
// output_ports() is a list of MIDI ports for sending MIDI data.
// Each individual port in this list can be identified by its
// integer index into this list.
- const MIDIPortInfoList& output_ports() { return output_ports_; }
+ const MidiPortInfoList& output_ports() const { return output_ports_; }
protected:
- // Initializes the MIDI system, returning |true| on success.
- virtual bool Initialize() = 0;
-
- void AddInputPort(const MIDIPortInfo& info);
- void AddOutputPort(const MIDIPortInfo& info);
+ friend class MidiManagerUsb;
+
+ // Initializes the platform dependent MIDI system. MidiManager class has a
+ // default implementation that synchronously calls CompleteInitialization()
+ // with MIDI_NOT_SUPPORTED on the caller thread. A derived class for a
+ // specific platform should override this method correctly.
+ // This method is called on Chrome_IOThread thread inside StartSession().
+ // Platform dependent initialization can be processed synchronously or
+ // asynchronously. When the initialization is completed,
+ // CompleteInitialization() should be called with |result|.
+ // |result| should be MIDI_OK on success, otherwise a proper MidiResult.
+ virtual void StartInitialization();
+
+ // Called from a platform dependent implementation of StartInitialization().
+ // It invokes CompleteInitializationInternal() on the thread that calls
+ // StartSession() and distributes |result| to MIDIManagerClient objects in
+ // |pending_clients_|.
+ void CompleteInitialization(MidiResult result);
+
+ void AddInputPort(const MidiPortInfo& info);
+ void AddOutputPort(const MidiPortInfo& info);
// Dispatches to all clients.
- void ReceiveMIDIData(uint32 port_index,
+ // TODO(toyoshim): Fix the mac implementation to use
+ // |ReceiveMidiData(..., base::TimeTicks)|.
+ void ReceiveMidiData(uint32 port_index,
const uint8* data,
size_t length,
double timestamp);
- bool initialized_;
+ void ReceiveMidiData(uint32 port_index,
+ const uint8* data,
+ size_t length,
+ base::TimeTicks time) {
+ ReceiveMidiData(port_index, data, length,
+ (time - base::TimeTicks()).InSecondsF());
+ }
+
+ size_t clients_size_for_testing() const { return clients_.size(); }
+ size_t pending_clients_size_for_testing() const {
+ return pending_clients_.size();
+ }
+
+ private:
+ void CompleteInitializationInternal(MidiResult result);
// Keeps track of all clients who wish to receive MIDI data.
- typedef std::set<MIDIManagerClient*> ClientList;
+ typedef std::set<MidiManagerClient*> ClientList;
ClientList clients_;
- // Protects access to our clients.
- base::Lock clients_lock_;
+ // Keeps track of all clients who are waiting for CompleteStartSession().
+ typedef std::multimap<MidiManagerClient*, int> PendingClientMap;
+ PendingClientMap pending_clients_;
+
+ // Keeps a SingleThreadTaskRunner of the thread that calls StartSession in
+ // order to invoke CompleteStartSession() on the thread.
+ scoped_refptr<base::SingleThreadTaskRunner> session_thread_runner_;
+
+ // Keeps true if platform dependent initialization is already completed.
+ bool initialized_;
+
+ // Keeps the platform dependent initialization result if initialization is
+ // completed. Otherwise keeps MIDI_NOT_SUPPORTED.
+ MidiResult result_;
+
+ // Protects access to |clients_|, |pending_clients_|, |initialized_|, and
+ // |result_|.
+ base::Lock lock_;
- MIDIPortInfoList input_ports_;
- MIDIPortInfoList output_ports_;
+ MidiPortInfoList input_ports_;
+ MidiPortInfoList output_ports_;
- DISALLOW_COPY_AND_ASSIGN(MIDIManager);
+ DISALLOW_COPY_AND_ASSIGN(MidiManager);
};
} // namespace media
diff --git a/chromium/media/midi/midi_manager_alsa.cc b/chromium/media/midi/midi_manager_alsa.cc
new file mode 100644
index 00000000000..a21f8919d37
--- /dev/null
+++ b/chromium/media/midi/midi_manager_alsa.cc
@@ -0,0 +1,452 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/midi_manager_alsa.h"
+
+#include <alsa/asoundlib.h>
+#include <stdlib.h>
+#include <algorithm>
+#include <string>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_vector.h"
+#include "base/message_loop/message_loop.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/thread.h"
+#include "base/time/time.h"
+#include "media/midi/midi_port_info.h"
+
+namespace media {
+
+namespace {
+
+// Per-output buffer. This can be smaller, but then large sysex messages
+// will be (harmlessly) split across multiple seq events. This should
+// not have any real practical effect, except perhaps to slightly reorder
+// realtime messages with respect to sysex.
+const size_t kSendBufferSize = 256;
+
+// Constants for the capabilities we search for in inputs and outputs.
+// See http://www.alsa-project.org/alsa-doc/alsa-lib/seq.html.
+const unsigned int kRequiredInputPortCaps =
+ SND_SEQ_PORT_CAP_READ | SND_SEQ_PORT_CAP_SUBS_READ;
+const unsigned int kRequiredOutputPortCaps =
+ SND_SEQ_PORT_CAP_WRITE | SND_SEQ_PORT_CAP_SUBS_WRITE;
+
+int AddrToInt(const snd_seq_addr_t* addr) {
+ return (addr->client << 8) | addr->port;
+}
+
+class CardInfo {
+ public:
+ CardInfo(const std::string name, const std::string manufacturer,
+ const std::string driver)
+ : name_(name), manufacturer_(manufacturer), driver_(driver) {
+ }
+ const std::string name_;
+ const std::string manufacturer_;
+ const std::string driver_;
+};
+
+} // namespace
+
+MidiManagerAlsa::MidiManagerAlsa()
+ : in_client_(NULL),
+ out_client_(NULL),
+ out_client_id_(-1),
+ in_port_(-1),
+ decoder_(NULL),
+ send_thread_("MidiSendThread"),
+ event_thread_("MidiEventThread"),
+ event_thread_shutdown_(false) {
+ // Initialize decoder.
+ snd_midi_event_new(0, &decoder_);
+ snd_midi_event_no_status(decoder_, 1);
+}
+
+void MidiManagerAlsa::StartInitialization() {
+ // TODO(agoode): Move off I/O thread. See http://crbug.com/374341.
+
+ // Create client handles.
+ int err = snd_seq_open(&in_client_, "hw", SND_SEQ_OPEN_INPUT, 0);
+ if (err != 0) {
+ VLOG(1) << "snd_seq_open fails: " << snd_strerror(err);
+ return CompleteInitialization(MIDI_INITIALIZATION_ERROR);
+ }
+ int in_client_id = snd_seq_client_id(in_client_);
+ err = snd_seq_open(&out_client_, "hw", SND_SEQ_OPEN_OUTPUT, 0);
+ if (err != 0) {
+ VLOG(1) << "snd_seq_open fails: " << snd_strerror(err);
+ return CompleteInitialization(MIDI_INITIALIZATION_ERROR);
+ }
+ out_client_id_ = snd_seq_client_id(out_client_);
+
+ // Name the clients.
+ err = snd_seq_set_client_name(in_client_, "Chrome (input)");
+ if (err != 0) {
+ VLOG(1) << "snd_seq_set_client_name fails: " << snd_strerror(err);
+ return CompleteInitialization(MIDI_INITIALIZATION_ERROR);
+ }
+ err = snd_seq_set_client_name(out_client_, "Chrome (output)");
+ if (err != 0) {
+ VLOG(1) << "snd_seq_set_client_name fails: " << snd_strerror(err);
+ return CompleteInitialization(MIDI_INITIALIZATION_ERROR);
+ }
+
+ // Create input port.
+ in_port_ = snd_seq_create_simple_port(in_client_, NULL,
+ SND_SEQ_PORT_CAP_WRITE |
+ SND_SEQ_PORT_CAP_NO_EXPORT,
+ SND_SEQ_PORT_TYPE_MIDI_GENERIC |
+ SND_SEQ_PORT_TYPE_APPLICATION);
+ if (in_port_ < 0) {
+ VLOG(1) << "snd_seq_create_simple_port fails: " << snd_strerror(in_port_);
+ return CompleteInitialization(MIDI_INITIALIZATION_ERROR);
+ }
+
+ // Subscribe to the announce port.
+ snd_seq_port_subscribe_t* subs;
+ snd_seq_port_subscribe_alloca(&subs);
+ snd_seq_addr_t announce_sender;
+ snd_seq_addr_t announce_dest;
+ announce_sender.client = SND_SEQ_CLIENT_SYSTEM;
+ announce_sender.port = SND_SEQ_PORT_SYSTEM_ANNOUNCE;
+ announce_dest.client = in_client_id;
+ announce_dest.port = in_port_;
+ snd_seq_port_subscribe_set_sender(subs, &announce_sender);
+ snd_seq_port_subscribe_set_dest(subs, &announce_dest);
+ err = snd_seq_subscribe_port(in_client_, subs);
+ if (err != 0) {
+ VLOG(1) << "snd_seq_subscribe_port on the announce port fails: "
+ << snd_strerror(err);
+ return CompleteInitialization(MIDI_INITIALIZATION_ERROR);
+ }
+
+ // Use a heuristic to extract the list of manufacturers for the hardware MIDI
+ // devices. This won't work for all devices. It is also brittle until
+ // hotplug is implemented. (See http://crbug.com/279097.)
+ // TODO(agoode): Make manufacturer extraction simple and reliable.
+ // http://crbug.com/377250.
+ ScopedVector<CardInfo> cards;
+ snd_ctl_card_info_t* card;
+ snd_rawmidi_info_t* midi_out;
+ snd_rawmidi_info_t* midi_in;
+ snd_ctl_card_info_alloca(&card);
+ snd_rawmidi_info_alloca(&midi_out);
+ snd_rawmidi_info_alloca(&midi_in);
+ for (int index = -1; !snd_card_next(&index) && index >= 0; ) {
+ const std::string id = base::StringPrintf("hw:CARD=%i", index);
+ snd_ctl_t* handle;
+ int err = snd_ctl_open(&handle, id.c_str(), 0);
+ if (err != 0) {
+ VLOG(1) << "snd_ctl_open fails: " << snd_strerror(err);
+ continue;
+ }
+ err = snd_ctl_card_info(handle, card);
+ if (err != 0) {
+ VLOG(1) << "snd_ctl_card_info fails: " << snd_strerror(err);
+ snd_ctl_close(handle);
+ continue;
+ }
+ // Enumerate any rawmidi devices (not subdevices) and extract CardInfo.
+ for (int device = -1;
+ !snd_ctl_rawmidi_next_device(handle, &device) && device >= 0; ) {
+ bool output;
+ bool input;
+ snd_rawmidi_info_set_device(midi_out, device);
+ snd_rawmidi_info_set_subdevice(midi_out, 0);
+ snd_rawmidi_info_set_stream(midi_out, SND_RAWMIDI_STREAM_OUTPUT);
+ output = snd_ctl_rawmidi_info(handle, midi_out) == 0;
+ snd_rawmidi_info_set_device(midi_in, device);
+ snd_rawmidi_info_set_subdevice(midi_in, 0);
+ snd_rawmidi_info_set_stream(midi_in, SND_RAWMIDI_STREAM_INPUT);
+ input = snd_ctl_rawmidi_info(handle, midi_in) == 0;
+ if (!output && !input)
+ continue;
+
+ snd_rawmidi_info_t* midi = midi_out ? midi_out : midi_in;
+ const std::string name = snd_rawmidi_info_get_name(midi);
+ // We assume that card longname is in the format of
+ // "<manufacturer> <name> at <bus>". Otherwise, we give up to detect
+ // a manufacturer name here.
+ std::string manufacturer;
+ const std::string card_name = snd_ctl_card_info_get_longname(card);
+ size_t at_index = card_name.rfind(" at ");
+ if (std::string::npos != at_index) {
+ size_t name_index = card_name.rfind(name, at_index - 1);
+ if (std::string::npos != name_index)
+ manufacturer = card_name.substr(0, name_index - 1);
+ }
+ const std::string driver = snd_ctl_card_info_get_driver(card);
+ cards.push_back(new CardInfo(name, manufacturer, driver));
+ }
+ }
+
+ // Enumerate all ports in all clients.
+ snd_seq_client_info_t* client_info;
+ snd_seq_client_info_alloca(&client_info);
+ snd_seq_port_info_t* port_info;
+ snd_seq_port_info_alloca(&port_info);
+
+ snd_seq_client_info_set_client(client_info, -1);
+ // Enumerate clients.
+ uint32 current_input = 0;
+ unsigned int current_card = 0;
+ while (!snd_seq_query_next_client(in_client_, client_info)) {
+ int client_id = snd_seq_client_info_get_client(client_info);
+ if ((client_id == in_client_id) || (client_id == out_client_id_)) {
+ // Skip our own clients.
+ continue;
+ }
+ const std::string client_name = snd_seq_client_info_get_name(client_info);
+ snd_seq_port_info_set_client(port_info, client_id);
+ snd_seq_port_info_set_port(port_info, -1);
+
+ std::string manufacturer;
+ std::string driver;
+ // In the current Alsa kernel implementation, hardware clients match the
+ // cards in the same order.
+ if ((snd_seq_client_info_get_type(client_info) == SND_SEQ_KERNEL_CLIENT) &&
+ (current_card < cards.size())) {
+ const CardInfo* info = cards[current_card];
+ if (info->name_ == client_name) {
+ manufacturer = info->manufacturer_;
+ driver = info->driver_;
+ current_card++;
+ }
+ }
+ // Enumerate ports.
+ while (!snd_seq_query_next_port(in_client_, port_info)) {
+ unsigned int port_type = snd_seq_port_info_get_type(port_info);
+ if (port_type & SND_SEQ_PORT_TYPE_MIDI_GENERIC) {
+ const snd_seq_addr_t* addr = snd_seq_port_info_get_addr(port_info);
+ const std::string name = snd_seq_port_info_get_name(port_info);
+ const std::string id = base::StringPrintf("%d:%d %s",
+ addr->client,
+ addr->port,
+ name.c_str());
+ std::string version;
+ if (driver != "") {
+ version = driver + " / ";
+ }
+ version += base::StringPrintf("ALSA library version %d.%d.%d",
+ SND_LIB_MAJOR,
+ SND_LIB_MINOR,
+ SND_LIB_SUBMINOR);
+ unsigned int caps = snd_seq_port_info_get_capability(port_info);
+ if ((caps & kRequiredInputPortCaps) == kRequiredInputPortCaps) {
+ // Subscribe to this port.
+ const snd_seq_addr_t* sender = snd_seq_port_info_get_addr(port_info);
+ snd_seq_addr_t dest;
+ dest.client = snd_seq_client_id(in_client_);
+ dest.port = in_port_;
+ snd_seq_port_subscribe_set_sender(subs, sender);
+ snd_seq_port_subscribe_set_dest(subs, &dest);
+ err = snd_seq_subscribe_port(in_client_, subs);
+ if (err != 0) {
+ VLOG(1) << "snd_seq_subscribe_port fails: " << snd_strerror(err);
+ } else {
+ source_map_[AddrToInt(sender)] = current_input++;
+ AddInputPort(MidiPortInfo(id, manufacturer, name, version));
+ }
+ }
+ if ((caps & kRequiredOutputPortCaps) == kRequiredOutputPortCaps) {
+ // Create a port for us to send on.
+ int out_port =
+ snd_seq_create_simple_port(out_client_, NULL,
+ SND_SEQ_PORT_CAP_READ |
+ SND_SEQ_PORT_CAP_NO_EXPORT,
+ SND_SEQ_PORT_TYPE_MIDI_GENERIC |
+ SND_SEQ_PORT_TYPE_APPLICATION);
+ if (out_port < 0) {
+ VLOG(1) << "snd_seq_create_simple_port fails: "
+ << snd_strerror(out_port);
+ // Skip this output port for now.
+ continue;
+ }
+
+ // Activate port subscription.
+ snd_seq_addr_t sender;
+ const snd_seq_addr_t* dest = snd_seq_port_info_get_addr(port_info);
+ sender.client = snd_seq_client_id(out_client_);
+ sender.port = out_port;
+ snd_seq_port_subscribe_set_sender(subs, &sender);
+ snd_seq_port_subscribe_set_dest(subs, dest);
+ err = snd_seq_subscribe_port(out_client_, subs);
+ if (err != 0) {
+ VLOG(1) << "snd_seq_subscribe_port fails: " << snd_strerror(err);
+ snd_seq_delete_simple_port(out_client_, out_port);
+ } else {
+ snd_midi_event_t* encoder;
+ snd_midi_event_new(kSendBufferSize, &encoder);
+ encoders_.push_back(encoder);
+ out_ports_.push_back(out_port);
+ AddOutputPort(MidiPortInfo(id, manufacturer, name, version));
+ }
+ }
+ }
+ }
+ }
+
+ event_thread_.Start();
+ event_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&MidiManagerAlsa::EventReset, base::Unretained(this)));
+
+ CompleteInitialization(MIDI_OK);
+}
+
+MidiManagerAlsa::~MidiManagerAlsa() {
+ // Tell the event thread it will soon be time to shut down. This gives
+ // us assurance the thread will stop in case the SND_SEQ_EVENT_CLIENT_EXIT
+ // message is lost.
+ {
+ base::AutoLock lock(shutdown_lock_);
+ event_thread_shutdown_ = true;
+ }
+
+ // Stop the send thread.
+ send_thread_.Stop();
+
+ // Close the out client. This will trigger the event thread to stop,
+ // because of SND_SEQ_EVENT_CLIENT_EXIT.
+ if (out_client_)
+ snd_seq_close(out_client_);
+
+ // Wait for the event thread to stop.
+ event_thread_.Stop();
+
+ // Close the in client.
+ if (in_client_)
+ snd_seq_close(in_client_);
+
+ // Free the decoder.
+ snd_midi_event_free(decoder_);
+
+ // Free the encoders.
+ for (EncoderList::iterator i = encoders_.begin(); i != encoders_.end(); ++i)
+ snd_midi_event_free(*i);
+}
+
+void MidiManagerAlsa::SendMidiData(uint32 port_index,
+ const std::vector<uint8>& data) {
+ DCHECK(send_thread_.message_loop_proxy()->BelongsToCurrentThread());
+
+ snd_midi_event_t* encoder = encoders_[port_index];
+ for (unsigned int i = 0; i < data.size(); i++) {
+ snd_seq_event_t event;
+ int result = snd_midi_event_encode_byte(encoder, data[i], &event);
+ if (result == 1) {
+ // Full event, send it.
+ snd_seq_ev_set_source(&event, out_ports_[port_index]);
+ snd_seq_ev_set_subs(&event);
+ snd_seq_ev_set_direct(&event);
+ snd_seq_event_output_direct(out_client_, &event);
+ }
+ }
+}
+
+void MidiManagerAlsa::DispatchSendMidiData(MidiManagerClient* client,
+ uint32 port_index,
+ const std::vector<uint8>& data,
+ double timestamp) {
+ if (out_ports_.size() <= port_index)
+ return;
+
+ // Not correct right now. http://crbug.com/374341.
+ if (!send_thread_.IsRunning())
+ send_thread_.Start();
+
+ base::TimeDelta delay;
+ if (timestamp != 0.0) {
+ base::TimeTicks time_to_send =
+ base::TimeTicks() + base::TimeDelta::FromMicroseconds(
+ timestamp * base::Time::kMicrosecondsPerSecond);
+ delay = std::max(time_to_send - base::TimeTicks::Now(), base::TimeDelta());
+ }
+
+ send_thread_.message_loop()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&MidiManagerAlsa::SendMidiData, base::Unretained(this),
+ port_index, data), delay);
+
+ // Acknowledge send.
+ send_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&MidiManagerClient::AccumulateMidiBytesSent,
+ base::Unretained(client), data.size()));
+}
+
+void MidiManagerAlsa::EventReset() {
+ event_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&MidiManagerAlsa::EventLoop, base::Unretained(this)));
+}
+
+void MidiManagerAlsa::EventLoop() {
+ // Read available incoming MIDI data.
+ snd_seq_event_t* event;
+ int err = snd_seq_event_input(in_client_, &event);
+ double timestamp =
+ (base::TimeTicks::HighResNow() - base::TimeTicks()).InSecondsF();
+ if (err == -ENOSPC) {
+ VLOG(1) << "snd_seq_event_input detected buffer overrun";
+
+ // We've lost events: check another way to see if we need to shut down.
+ base::AutoLock lock(shutdown_lock_);
+ if (event_thread_shutdown_) {
+ return;
+ }
+ } else if (err < 0) {
+ VLOG(1) << "snd_seq_event_input fails: " << snd_strerror(err);
+ return;
+ } else {
+ // Check for disconnection of out client. This means "shut down".
+ if (event->source.client == SND_SEQ_CLIENT_SYSTEM &&
+ event->source.port == SND_SEQ_PORT_SYSTEM_ANNOUNCE &&
+ event->type == SND_SEQ_EVENT_CLIENT_EXIT &&
+ event->data.addr.client == out_client_id_) {
+ return;
+ }
+
+ std::map<int, uint32>::iterator source_it =
+ source_map_.find(AddrToInt(&event->source));
+ if (source_it != source_map_.end()) {
+ uint32 source = source_it->second;
+ if (event->type == SND_SEQ_EVENT_SYSEX) {
+ // Special! Variable-length sysex.
+ ReceiveMidiData(source, static_cast<const uint8*>(event->data.ext.ptr),
+ event->data.ext.len,
+ timestamp);
+ } else {
+ // Otherwise, decode this and send that on.
+ unsigned char buf[12];
+ long count = snd_midi_event_decode(decoder_, buf, sizeof(buf), event);
+ if (count <= 0) {
+ if (count != -ENOENT) {
+ // ENOENT means that it's not a MIDI message, which is not an
+ // error, but other negative values are errors for us.
+ VLOG(1) << "snd_midi_event_decoder fails " << snd_strerror(count);
+ }
+ } else {
+ ReceiveMidiData(source, buf, count, timestamp);
+ }
+ }
+ }
+ }
+
+ // Do again.
+ event_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&MidiManagerAlsa::EventLoop, base::Unretained(this)));
+}
+
+MidiManager* MidiManager::Create() {
+ return new MidiManagerAlsa();
+}
+
+} // namespace media
diff --git a/chromium/media/midi/midi_manager_alsa.h b/chromium/media/midi/midi_manager_alsa.h
new file mode 100644
index 00000000000..c523bb91f54
--- /dev/null
+++ b/chromium/media/midi/midi_manager_alsa.h
@@ -0,0 +1,69 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MIDI_MIDI_MANAGER_ALSA_H_
+#define MEDIA_MIDI_MIDI_MANAGER_ALSA_H_
+
+#include <alsa/asoundlib.h>
+#include <map>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread.h"
+#include "media/midi/midi_manager.h"
+
+namespace media {
+
+class MidiManagerAlsa : public MidiManager {
+ public:
+ MidiManagerAlsa();
+ virtual ~MidiManagerAlsa();
+
+ // MidiManager implementation.
+ virtual void StartInitialization() OVERRIDE;
+ virtual void DispatchSendMidiData(MidiManagerClient* client,
+ uint32 port_index,
+ const std::vector<uint8>& data,
+ double timestamp) OVERRIDE;
+
+ private:
+ // An internal callback that runs on MidiSendThread.
+ void SendMidiData(uint32 port_index,
+ const std::vector<uint8>& data);
+
+ void EventReset();
+ void EventLoop();
+
+ // Alsa seq handles.
+ snd_seq_t* in_client_;
+ snd_seq_t* out_client_;
+ int out_client_id_;
+
+ // One input port, many output ports.
+ int in_port_;
+ std::vector<int> out_ports_;
+
+ // Mapping from Alsa client:port to our index.
+ typedef std::map<int, uint32> SourceMap;
+ SourceMap source_map_;
+
+ // Alsa event <-> MIDI coders.
+ snd_midi_event_t* decoder_;
+ typedef std::vector<snd_midi_event_t*> EncoderList;
+ EncoderList encoders_;
+
+ base::Thread send_thread_;
+ base::Thread event_thread_;
+
+ bool event_thread_shutdown_; // guarded by shutdown_lock_
+ base::Lock shutdown_lock_; // guards event_thread_shutdown_
+
+ DISALLOW_COPY_AND_ASSIGN(MidiManagerAlsa);
+};
+
+} // namespace media
+
+#endif // MEDIA_MIDI_MIDI_MANAGER_ALSA_H_
diff --git a/chromium/media/midi/midi_manager_android.cc b/chromium/media/midi/midi_manager_android.cc
new file mode 100644
index 00000000000..b8385fb0f48
--- /dev/null
+++ b/chromium/media/midi/midi_manager_android.cc
@@ -0,0 +1,16 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/scoped_ptr.h"
+#include "media/midi/midi_manager_usb.h"
+#include "media/midi/usb_midi_device_factory_android.h"
+
+namespace media {
+
+MidiManager* MidiManager::Create() {
+ return new MidiManagerUsb(
+ scoped_ptr<UsbMidiDevice::Factory>(new UsbMidiDeviceFactoryAndroid));
+}
+
+} // namespace media
diff --git a/chromium/media/midi/midi_manager_mac.cc b/chromium/media/midi/midi_manager_mac.cc
index a36d1debe13..c1302e6c802 100644
--- a/chromium/media/midi/midi_manager_mac.cc
+++ b/chromium/media/midi/midi_manager_mac.cc
@@ -6,7 +6,7 @@
#include <string>
-#include "base/debug/trace_event.h"
+#include "base/bind.h"
#include "base/message_loop/message_loop.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/sys_string_conversions.h"
@@ -23,32 +23,27 @@ using std::string;
namespace media {
-MIDIManager* MIDIManager::Create() {
- return new MIDIManagerMac();
+MidiManager* MidiManager::Create() {
+ return new MidiManagerMac();
}
-MIDIManagerMac::MIDIManagerMac()
+MidiManagerMac::MidiManagerMac()
: midi_client_(0),
coremidi_input_(0),
coremidi_output_(0),
packet_list_(NULL),
midi_packet_(NULL),
- send_thread_("MIDISendThread") {
+ send_thread_("MidiSendThread") {
}
-bool MIDIManagerMac::Initialize() {
- TRACE_EVENT0("midi", "MIDIManagerMac::Initialize");
-
+void MidiManagerMac::StartInitialization() {
// CoreMIDI registration.
midi_client_ = 0;
- OSStatus result = MIDIClientCreate(
- CFSTR("Google Chrome"),
- NULL,
- NULL,
- &midi_client_);
+ OSStatus result =
+ MIDIClientCreate(CFSTR("Chrome"), NULL, NULL, &midi_client_);
if (result != noErr)
- return false;
+ return CompleteInitialization(MIDI_INITIALIZATION_ERROR);
coremidi_input_ = 0;
@@ -56,18 +51,18 @@ bool MIDIManagerMac::Initialize() {
result = MIDIInputPortCreate(
midi_client_,
CFSTR("MIDI Input"),
- ReadMIDIDispatch,
+ ReadMidiDispatch,
this,
&coremidi_input_);
if (result != noErr)
- return false;
+ return CompleteInitialization(MIDI_INITIALIZATION_ERROR);
result = MIDIOutputPortCreate(
midi_client_,
CFSTR("MIDI Output"),
&coremidi_output_);
if (result != noErr)
- return false;
+ return CompleteInitialization(MIDI_INITIALIZATION_ERROR);
uint32 destination_count = MIDIGetNumberOfDestinations();
destinations_.resize(destination_count);
@@ -79,7 +74,7 @@ bool MIDIManagerMac::Initialize() {
// Cache to avoid any possible overhead in calling MIDIGetDestination().
destinations_[i] = destination;
- MIDIPortInfo info = GetPortInfoFromEndpoint(destination);
+ MidiPortInfo info = GetPortInfoFromEndpoint(destination);
AddOutputPort(info);
}
@@ -94,18 +89,17 @@ bool MIDIManagerMac::Initialize() {
// Keep track of all sources (known as inputs in Web MIDI API terminology).
source_map_[src] = i;
- MIDIPortInfo info = GetPortInfoFromEndpoint(src);
+ MidiPortInfo info = GetPortInfoFromEndpoint(src);
AddInputPort(info);
}
- // TODO(crogers): Fix the memory management here!
packet_list_ = reinterpret_cast<MIDIPacketList*>(midi_buffer_);
midi_packet_ = MIDIPacketListInit(packet_list_);
- return true;
+ CompleteInitialization(MIDI_OK);
}
-void MIDIManagerMac::DispatchSendMIDIData(MIDIManagerClient* client,
+void MidiManagerMac::DispatchSendMidiData(MidiManagerClient* client,
uint32 port_index,
const std::vector<uint8>& data,
double timestamp) {
@@ -115,11 +109,11 @@ void MIDIManagerMac::DispatchSendMIDIData(MIDIManagerClient* client,
// OK to use base::Unretained(this) since we join to thread in dtor().
send_thread_.message_loop()->PostTask(
FROM_HERE,
- base::Bind(&MIDIManagerMac::SendMIDIData, base::Unretained(this),
+ base::Bind(&MidiManagerMac::SendMidiData, base::Unretained(this),
client, port_index, data, timestamp));
}
-MIDIManagerMac::~MIDIManagerMac() {
+MidiManagerMac::~MidiManagerMac() {
// Wait for the termination of |send_thread_| before disposing MIDI ports.
send_thread_.Stop();
@@ -130,10 +124,10 @@ MIDIManagerMac::~MIDIManagerMac() {
}
// static
-void MIDIManagerMac::ReadMIDIDispatch(const MIDIPacketList* packet_list,
+void MidiManagerMac::ReadMidiDispatch(const MIDIPacketList* packet_list,
void* read_proc_refcon,
void* src_conn_refcon) {
- MIDIManagerMac* manager = static_cast<MIDIManagerMac*>(read_proc_refcon);
+ MidiManagerMac* manager = static_cast<MidiManagerMac*>(read_proc_refcon);
#if __LP64__
MIDIEndpointRef source = reinterpret_cast<uintptr_t>(src_conn_refcon);
#else
@@ -141,10 +135,10 @@ void MIDIManagerMac::ReadMIDIDispatch(const MIDIPacketList* packet_list,
#endif
// Dispatch to class method.
- manager->ReadMIDI(source, packet_list);
+ manager->ReadMidi(source, packet_list);
}
-void MIDIManagerMac::ReadMIDI(MIDIEndpointRef source,
+void MidiManagerMac::ReadMidi(MIDIEndpointRef source,
const MIDIPacketList* packet_list) {
// Lookup the port index based on the source.
SourceMap::iterator j = source_map_.find(source);
@@ -158,7 +152,7 @@ void MIDIManagerMac::ReadMIDI(MIDIEndpointRef source,
const MIDIPacket &packet = packet_list->packet[i];
double timestamp_seconds = MIDITimeStampToSeconds(packet.timeStamp);
- ReceiveMIDIData(
+ ReceiveMidiData(
port_index,
packet.data,
packet.length,
@@ -166,7 +160,7 @@ void MIDIManagerMac::ReadMIDI(MIDIEndpointRef source,
}
}
-void MIDIManagerMac::SendMIDIData(MIDIManagerClient* client,
+void MidiManagerMac::SendMidiData(MidiManagerClient* client,
uint32 port_index,
const std::vector<uint8>& data,
double timestamp) {
@@ -194,11 +188,11 @@ void MIDIManagerMac::SendMIDIData(MIDIManagerClient* client,
// Re-initialize for next time.
midi_packet_ = MIDIPacketListInit(packet_list_);
- client->AccumulateMIDIBytesSent(data.size());
+ client->AccumulateMidiBytesSent(data.size());
}
// static
-MIDIPortInfo MIDIManagerMac::GetPortInfoFromEndpoint(
+MidiPortInfo MidiManagerMac::GetPortInfoFromEndpoint(
MIDIEndpointRef endpoint) {
SInt32 id_number = 0;
MIDIObjectGetIntegerProperty(endpoint, kMIDIPropertyUniqueID, &id_number);
@@ -238,17 +232,17 @@ MIDIPortInfo MIDIManagerMac::GetPortInfoFromEndpoint(
<< result;
}
- return MIDIPortInfo(id, manufacturer, name, version);
+ return MidiPortInfo(id, manufacturer, name, version);
}
// static
-double MIDIManagerMac::MIDITimeStampToSeconds(MIDITimeStamp timestamp) {
+double MidiManagerMac::MIDITimeStampToSeconds(MIDITimeStamp timestamp) {
UInt64 nanoseconds = AudioConvertHostTimeToNanos(timestamp);
return static_cast<double>(nanoseconds) / 1.0e9;
}
// static
-MIDITimeStamp MIDIManagerMac::SecondsToMIDITimeStamp(double seconds) {
+MIDITimeStamp MidiManagerMac::SecondsToMIDITimeStamp(double seconds) {
UInt64 nanos = UInt64(seconds * 1.0e9);
return AudioConvertNanosToHostTime(nanos);
}
diff --git a/chromium/media/midi/midi_manager_mac.h b/chromium/media/midi/midi_manager_mac.h
index cc8bf74a3c5..5c514ffdd02 100644
--- a/chromium/media/midi/midi_manager_mac.h
+++ b/chromium/media/midi/midi_manager_mac.h
@@ -18,14 +18,14 @@
namespace media {
-class MEDIA_EXPORT MIDIManagerMac : public MIDIManager {
+class MEDIA_EXPORT MidiManagerMac : public MidiManager {
public:
- MIDIManagerMac();
- virtual ~MIDIManagerMac();
+ MidiManagerMac();
+ virtual ~MidiManagerMac();
- // MIDIManager implementation.
- virtual bool Initialize() OVERRIDE;
- virtual void DispatchSendMIDIData(MIDIManagerClient* client,
+ // MidiManager implementation.
+ virtual void StartInitialization() OVERRIDE;
+ virtual void DispatchSendMidiData(MidiManagerClient* client,
uint32 port_index,
const std::vector<uint8>& data,
double timestamp) OVERRIDE;
@@ -34,20 +34,20 @@ class MEDIA_EXPORT MIDIManagerMac : public MIDIManager {
// CoreMIDI callback for MIDI data.
// Each callback can contain multiple packets, each of which can contain
// multiple MIDI messages.
- static void ReadMIDIDispatch(
+ static void ReadMidiDispatch(
const MIDIPacketList *pktlist,
void *read_proc_refcon,
void *src_conn_refcon);
- virtual void ReadMIDI(MIDIEndpointRef source, const MIDIPacketList *pktlist);
+ virtual void ReadMidi(MIDIEndpointRef source, const MIDIPacketList *pktlist);
- // An internal callback that runs on MIDISendThread.
- void SendMIDIData(MIDIManagerClient* client,
+ // An internal callback that runs on MidiSendThread.
+ void SendMidiData(MidiManagerClient* client,
uint32 port_index,
const std::vector<uint8>& data,
double timestamp);
// Helper
- static media::MIDIPortInfo GetPortInfoFromEndpoint(MIDIEndpointRef endpoint);
+ static media::MidiPortInfo GetPortInfoFromEndpoint(MIDIEndpointRef endpoint);
static double MIDITimeStampToSeconds(MIDITimeStamp timestamp);
static MIDITimeStamp SecondsToMIDITimeStamp(double seconds);
@@ -72,7 +72,7 @@ class MEDIA_EXPORT MIDIManagerMac : public MIDIManager {
// |send_thread_| is used to send MIDI data.
base::Thread send_thread_;
- DISALLOW_COPY_AND_ASSIGN(MIDIManagerMac);
+ DISALLOW_COPY_AND_ASSIGN(MidiManagerMac);
};
} // namespace media
diff --git a/chromium/media/midi/midi_manager_unittest.cc b/chromium/media/midi/midi_manager_unittest.cc
new file mode 100644
index 00000000000..03508139db1
--- /dev/null
+++ b/chromium/media/midi/midi_manager_unittest.cc
@@ -0,0 +1,267 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/midi_manager.h"
+
+#include <vector>
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/scoped_vector.h"
+#include "base/run_loop.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+namespace {
+
+class FakeMidiManager : public MidiManager {
+ public:
+ FakeMidiManager() : start_initialization_is_called_(false) {}
+ virtual ~FakeMidiManager() {}
+
+ // MidiManager implementation.
+ virtual void StartInitialization() OVERRIDE {
+ start_initialization_is_called_ = true;
+ }
+
+ virtual void DispatchSendMidiData(MidiManagerClient* client,
+ uint32 port_index,
+ const std::vector<uint8>& data,
+ double timestamp) OVERRIDE {}
+
+ // Utility functions for testing.
+ void CallCompleteInitialization(MidiResult result) {
+ CompleteInitialization(result);
+ }
+
+ size_t GetClientCount() const {
+ return clients_size_for_testing();
+ }
+
+ size_t GetPendingClientCount() const {
+ return pending_clients_size_for_testing();
+ }
+
+ bool start_initialization_is_called_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FakeMidiManager);
+};
+
+class FakeMidiManagerClient : public MidiManagerClient {
+ public:
+ explicit FakeMidiManagerClient(int client_id)
+ : client_id_(client_id),
+ result_(MIDI_NOT_SUPPORTED),
+ wait_for_result_(true) {}
+ virtual ~FakeMidiManagerClient() {}
+
+ // MidiManagerClient implementation.
+ virtual void CompleteStartSession(int client_id, MidiResult result) OVERRIDE {
+ EXPECT_TRUE(wait_for_result_);
+ CHECK_EQ(client_id_, client_id);
+ result_ = result;
+ wait_for_result_ = false;
+ }
+
+ virtual void ReceiveMidiData(uint32 port_index, const uint8* data,
+ size_t size, double timestamp) OVERRIDE {}
+ virtual void AccumulateMidiBytesSent(size_t size) OVERRIDE {}
+
+ int client_id() const { return client_id_; }
+ MidiResult result() const { return result_; }
+
+ MidiResult WaitForResult() {
+ base::RunLoop run_loop;
+ // CompleteStartSession() is called inside the message loop on the same
+ // thread. Protection for |wait_for_result_| is not needed.
+ while (wait_for_result_)
+ run_loop.RunUntilIdle();
+ return result();
+ }
+
+ private:
+ int client_id_;
+ MidiResult result_;
+ bool wait_for_result_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeMidiManagerClient);
+};
+
+class MidiManagerTest : public ::testing::Test {
+ public:
+ MidiManagerTest()
+ : manager_(new FakeMidiManager),
+ message_loop_(new base::MessageLoop) {}
+ virtual ~MidiManagerTest() {}
+
+ protected:
+ void StartTheFirstSession(FakeMidiManagerClient* client) {
+ EXPECT_FALSE(manager_->start_initialization_is_called_);
+ EXPECT_EQ(0U, manager_->GetClientCount());
+ EXPECT_EQ(0U, manager_->GetPendingClientCount());
+ manager_->StartSession(client, client->client_id());
+ EXPECT_EQ(0U, manager_->GetClientCount());
+ EXPECT_EQ(1U, manager_->GetPendingClientCount());
+ EXPECT_TRUE(manager_->start_initialization_is_called_);
+ EXPECT_EQ(0U, manager_->GetClientCount());
+ EXPECT_EQ(1U, manager_->GetPendingClientCount());
+ EXPECT_TRUE(manager_->start_initialization_is_called_);
+ }
+
+ void StartTheNthSession(FakeMidiManagerClient* client, size_t nth) {
+ EXPECT_EQ(nth != 1, manager_->start_initialization_is_called_);
+ EXPECT_EQ(0U, manager_->GetClientCount());
+ EXPECT_EQ(nth - 1, manager_->GetPendingClientCount());
+
+ // StartInitialization() should not be called for the second and later
+ // sessions.
+ manager_->start_initialization_is_called_ = false;
+ manager_->StartSession(client, client->client_id());
+ EXPECT_EQ(nth == 1, manager_->start_initialization_is_called_);
+ manager_->start_initialization_is_called_ = true;
+ }
+
+ void EndSession(FakeMidiManagerClient* client, size_t before, size_t after) {
+ EXPECT_EQ(before, manager_->GetClientCount());
+ manager_->EndSession(client);
+ EXPECT_EQ(after, manager_->GetClientCount());
+ }
+
+ void CompleteInitialization(MidiResult result) {
+ manager_->CallCompleteInitialization(result);
+ }
+
+ void RunLoopUntilIdle() {
+ base::RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
+
+ protected:
+ scoped_ptr<FakeMidiManager> manager_;
+
+ private:
+ scoped_ptr<base::MessageLoop> message_loop_;
+
+ DISALLOW_COPY_AND_ASSIGN(MidiManagerTest);
+};
+
+TEST_F(MidiManagerTest, StartAndEndSession) {
+ scoped_ptr<FakeMidiManagerClient> client;
+ client.reset(new FakeMidiManagerClient(0));
+
+ StartTheFirstSession(client.get());
+ CompleteInitialization(MIDI_OK);
+ EXPECT_EQ(MIDI_OK, client->WaitForResult());
+ EndSession(client.get(), 1U, 0U);
+}
+
+TEST_F(MidiManagerTest, StartAndEndSessionWithError) {
+ scoped_ptr<FakeMidiManagerClient> client;
+ client.reset(new FakeMidiManagerClient(1));
+
+ StartTheFirstSession(client.get());
+ CompleteInitialization(MIDI_INITIALIZATION_ERROR);
+ EXPECT_EQ(MIDI_INITIALIZATION_ERROR, client->WaitForResult());
+ EndSession(client.get(), 0U, 0U);
+}
+
+TEST_F(MidiManagerTest, StartMultipleSessions) {
+ scoped_ptr<FakeMidiManagerClient> client1;
+ scoped_ptr<FakeMidiManagerClient> client2;
+ scoped_ptr<FakeMidiManagerClient> client3;
+ client1.reset(new FakeMidiManagerClient(0));
+ client2.reset(new FakeMidiManagerClient(1));
+ client3.reset(new FakeMidiManagerClient(1));
+
+ StartTheFirstSession(client1.get());
+ StartTheNthSession(client2.get(), 2);
+ StartTheNthSession(client3.get(), 3);
+ CompleteInitialization(MIDI_OK);
+ EXPECT_EQ(MIDI_OK, client1->WaitForResult());
+ EXPECT_EQ(MIDI_OK, client2->WaitForResult());
+ EXPECT_EQ(MIDI_OK, client3->WaitForResult());
+ EndSession(client1.get(), 3U, 2U);
+ EndSession(client2.get(), 2U, 1U);
+ EndSession(client3.get(), 1U, 0U);
+}
+
+// TODO(toyoshim): Add a test for a MidiManagerClient that has multiple
+// sessions with multiple client_id.
+
+TEST_F(MidiManagerTest, TooManyPendingSessions) {
+ // Push as many client requests for starting session as possible.
+ ScopedVector<FakeMidiManagerClient> many_existing_clients;
+ many_existing_clients.resize(MidiManager::kMaxPendingClientCount);
+ for (size_t i = 0; i < MidiManager::kMaxPendingClientCount; ++i) {
+ many_existing_clients[i] = new FakeMidiManagerClient(i);
+ StartTheNthSession(many_existing_clients[i], i + 1);
+ }
+
+ // Push the last client that should be rejected for too many pending requests.
+ scoped_ptr<FakeMidiManagerClient> additional_client(
+ new FakeMidiManagerClient(MidiManager::kMaxPendingClientCount));
+ manager_->start_initialization_is_called_ = false;
+ manager_->StartSession(additional_client.get(),
+ additional_client->client_id());
+ EXPECT_FALSE(manager_->start_initialization_is_called_);
+ EXPECT_EQ(MIDI_INITIALIZATION_ERROR, additional_client->result());
+
+ // Other clients still should not receive a result.
+ RunLoopUntilIdle();
+ for (size_t i = 0; i < many_existing_clients.size(); ++i)
+ EXPECT_EQ(MIDI_NOT_SUPPORTED, many_existing_clients[i]->result());
+
+ // The result MIDI_OK should be distributed to other clients.
+ CompleteInitialization(MIDI_OK);
+ for (size_t i = 0; i < many_existing_clients.size(); ++i)
+ EXPECT_EQ(MIDI_OK, many_existing_clients[i]->WaitForResult());
+
+ // Close all successful sessions in FIFO order.
+ size_t sessions = many_existing_clients.size();
+ for (size_t i = 0; i < many_existing_clients.size(); ++i, --sessions)
+ EndSession(many_existing_clients[i], sessions, sessions - 1);
+}
+
+TEST_F(MidiManagerTest, AbortSession) {
+ // A client starting a session can be destructed while an asynchronous
+ // initialization is performed.
+ scoped_ptr<FakeMidiManagerClient> client;
+ client.reset(new FakeMidiManagerClient(0));
+
+ StartTheFirstSession(client.get());
+ EndSession(client.get(), 0, 0);
+ client.reset();
+
+ // Following function should not call the destructed |client| function.
+ CompleteInitialization(MIDI_OK);
+ base::RunLoop run_loop;
+ run_loop.RunUntilIdle();
+}
+
+TEST_F(MidiManagerTest, CreateMidiManager) {
+ scoped_ptr<FakeMidiManagerClient> client;
+ client.reset(new FakeMidiManagerClient(0));
+
+ scoped_ptr<MidiManager> manager(MidiManager::Create());
+ manager->StartSession(client.get(), client->client_id());
+
+ MidiResult result = client->WaitForResult();
+ // This #ifdef needs to be identical to the one in media/midi/midi_manager.cc.
+ // Do not change the condition for disabling this test.
+#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(USE_ALSA) && \
+ !defined(OS_ANDROID) && !defined(OS_CHROMEOS)
+ EXPECT_EQ(MIDI_NOT_SUPPORTED, result);
+#elif defined(USE_ALSA)
+ // Temporary until http://crbug.com/371230 is resolved.
+ EXPECT_TRUE((result == MIDI_OK) || (result == MIDI_INITIALIZATION_ERROR));
+#else
+ EXPECT_EQ(MIDI_OK, result);
+#endif
+}
+
+} // namespace
+
+} // namespace media
diff --git a/chromium/media/midi/midi_manager_usb.cc b/chromium/media/midi/midi_manager_usb.cc
new file mode 100644
index 00000000000..f2cf866aa0c
--- /dev/null
+++ b/chromium/media/midi/midi_manager_usb.cc
@@ -0,0 +1,110 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/midi_manager_usb.h"
+
+#include "base/callback.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "media/midi/usb_midi_descriptor_parser.h"
+#include "media/midi/usb_midi_device.h"
+#include "media/midi/usb_midi_input_stream.h"
+#include "media/midi/usb_midi_jack.h"
+#include "media/midi/usb_midi_output_stream.h"
+
+namespace media {
+
+MidiManagerUsb::MidiManagerUsb(scoped_ptr<UsbMidiDevice::Factory> factory)
+ : device_factory_(factory.Pass()) {
+}
+
+MidiManagerUsb::~MidiManagerUsb() {
+}
+
+void MidiManagerUsb::StartInitialization() {
+ Initialize(
+ base::Bind(&MidiManager::CompleteInitialization, base::Unretained(this)));
+}
+
+void MidiManagerUsb::Initialize(
+ base::Callback<void(MidiResult result)> callback) {
+ initialize_callback_ = callback;
+ // This is safe because EnumerateDevices cancels the operation on destruction.
+ device_factory_->EnumerateDevices(
+ this,
+ base::Bind(&MidiManagerUsb::OnEnumerateDevicesDone,
+ base::Unretained(this)));
+}
+
+void MidiManagerUsb::DispatchSendMidiData(MidiManagerClient* client,
+ uint32_t port_index,
+ const std::vector<uint8>& data,
+ double timestamp) {
+ DCHECK_LT(port_index, output_streams_.size());
+ output_streams_[port_index]->Send(data);
+ client->AccumulateMidiBytesSent(data.size());
+}
+
+void MidiManagerUsb::ReceiveUsbMidiData(UsbMidiDevice* device,
+ int endpoint_number,
+ const uint8* data,
+ size_t size,
+ base::TimeTicks time) {
+ if (!input_stream_)
+ return;
+ input_stream_->OnReceivedData(device,
+ endpoint_number,
+ data,
+ size,
+ time);
+}
+
+void MidiManagerUsb::OnReceivedData(size_t jack_index,
+ const uint8* data,
+ size_t size,
+ base::TimeTicks time) {
+ ReceiveMidiData(jack_index, data, size, time);
+}
+
+
+void MidiManagerUsb::OnEnumerateDevicesDone(bool result,
+ UsbMidiDevice::Devices* devices) {
+ if (!result) {
+ initialize_callback_.Run(MIDI_INITIALIZATION_ERROR);
+ return;
+ }
+ devices->swap(devices_);
+ for (size_t i = 0; i < devices_.size(); ++i) {
+ UsbMidiDescriptorParser parser;
+ std::vector<uint8> descriptor = devices_[i]->GetDescriptor();
+ const uint8* data = descriptor.size() > 0 ? &descriptor[0] : NULL;
+ std::vector<UsbMidiJack> jacks;
+ bool parse_result = parser.Parse(devices_[i],
+ data,
+ descriptor.size(),
+ &jacks);
+ if (!parse_result) {
+ initialize_callback_.Run(MIDI_INITIALIZATION_ERROR);
+ return;
+ }
+ std::vector<UsbMidiJack> input_jacks;
+ for (size_t j = 0; j < jacks.size(); ++j) {
+ if (jacks[j].direction() == UsbMidiJack::DIRECTION_OUT) {
+ output_streams_.push_back(new UsbMidiOutputStream(jacks[j]));
+ // TODO(yhirano): Set appropriate properties.
+ AddOutputPort(MidiPortInfo());
+ } else {
+ DCHECK_EQ(jacks[j].direction(), UsbMidiJack::DIRECTION_IN);
+ input_jacks.push_back(jacks[j]);
+ // TODO(yhirano): Set appropriate properties.
+ AddInputPort(MidiPortInfo());
+ }
+ }
+ input_stream_.reset(new UsbMidiInputStream(input_jacks, this));
+ }
+ initialize_callback_.Run(MIDI_OK);
+}
+
+} // namespace media
diff --git a/chromium/media/midi/midi_manager_usb.h b/chromium/media/midi/midi_manager_usb.h
new file mode 100644
index 00000000000..694a0516a4e
--- /dev/null
+++ b/chromium/media/midi/midi_manager_usb.h
@@ -0,0 +1,86 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MIDI_MIDI_MANAGER_USB_H_
+#define MEDIA_MIDI_MIDI_MANAGER_USB_H_
+
+#include <utility>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+#include "media/midi/midi_manager.h"
+#include "media/midi/usb_midi_device.h"
+#include "media/midi/usb_midi_input_stream.h"
+#include "media/midi/usb_midi_jack.h"
+#include "media/midi/usb_midi_output_stream.h"
+
+namespace media {
+
+// MidiManager for USB-MIDI.
+class MEDIA_EXPORT MidiManagerUsb : public MidiManager,
+ public UsbMidiDeviceDelegate,
+ public UsbMidiInputStream::Delegate {
+ public:
+ explicit MidiManagerUsb(scoped_ptr<UsbMidiDevice::Factory> device_factory);
+ virtual ~MidiManagerUsb();
+
+ // MidiManager implementation.
+ virtual void StartInitialization() OVERRIDE;
+ virtual void DispatchSendMidiData(MidiManagerClient* client,
+ uint32 port_index,
+ const std::vector<uint8>& data,
+ double timestamp) OVERRIDE;
+
+ // UsbMidiDeviceDelegate implementation.
+ virtual void ReceiveUsbMidiData(UsbMidiDevice* device,
+ int endpoint_number,
+ const uint8* data,
+ size_t size,
+ base::TimeTicks time) OVERRIDE;
+
+ // UsbMidiInputStream::Delegate implementation.
+ virtual void OnReceivedData(size_t jack_index,
+ const uint8* data,
+ size_t size,
+ base::TimeTicks time) OVERRIDE;
+
+ const ScopedVector<UsbMidiOutputStream>& output_streams() const {
+ return output_streams_;
+ }
+ const UsbMidiInputStream* input_stream() const { return input_stream_.get(); }
+
+ // Initializes this object.
+ // When the initialization finishes, |callback| will be called with the
+ // result.
+ // When this factory is destroyed during the operation, the operation
+ // will be canceled silently (i.e. |callback| will not be called).
+ // The function is public just for unit tests. Do not call this function
+ // outside code for testing.
+ void Initialize(base::Callback<void(MidiResult result)> callback);
+
+ private:
+ void OnEnumerateDevicesDone(bool result, UsbMidiDevice::Devices* devices);
+
+ scoped_ptr<UsbMidiDevice::Factory> device_factory_;
+ ScopedVector<UsbMidiDevice> devices_;
+ ScopedVector<UsbMidiOutputStream> output_streams_;
+ scoped_ptr<UsbMidiInputStream> input_stream_;
+
+ base::Callback<void(MidiResult result)> initialize_callback_;
+
+ // A map from <endpoint_number, cable_number> to the index of input jacks.
+ base::hash_map<std::pair<int, int>, size_t> input_jack_dictionary_;
+
+ DISALLOW_COPY_AND_ASSIGN(MidiManagerUsb);
+};
+
+} // namespace media
+
+#endif // MEDIA_MIDI_MIDI_MANAGER_USB_H_
diff --git a/chromium/media/midi/midi_manager_usb_unittest.cc b/chromium/media/midi/midi_manager_usb_unittest.cc
new file mode 100644
index 00000000000..88bfd939567
--- /dev/null
+++ b/chromium/media/midi/midi_manager_usb_unittest.cc
@@ -0,0 +1,357 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/midi_manager_usb.h"
+
+#include <string>
+
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/strings/stringprintf.h"
+#include "base/time/time.h"
+#include "media/midi/usb_midi_device.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+namespace {
+
+template<typename T, size_t N>
+std::vector<T> ToVector(const T (&array)[N]) {
+ return std::vector<T>(array, array + N);
+}
+
+class Logger {
+ public:
+ Logger() {}
+ ~Logger() {}
+
+ void AddLog(const std::string& message) { log_ += message; }
+ std::string TakeLog() {
+ std::string result;
+ result.swap(log_);
+ return result;
+ }
+
+ private:
+ std::string log_;
+
+ DISALLOW_COPY_AND_ASSIGN(Logger);
+};
+
+class FakeUsbMidiDevice : public UsbMidiDevice {
+ public:
+ explicit FakeUsbMidiDevice(Logger* logger) : logger_(logger) {}
+ virtual ~FakeUsbMidiDevice() {}
+
+ virtual std::vector<uint8> GetDescriptor() OVERRIDE {
+ logger_->AddLog("UsbMidiDevice::GetDescriptor\n");
+ return descriptor_;
+ }
+
+ virtual void Send(int endpoint_number,
+ const std::vector<uint8>& data) OVERRIDE {
+ logger_->AddLog("UsbMidiDevice::Send ");
+ logger_->AddLog(base::StringPrintf("endpoint = %d data =",
+ endpoint_number));
+ for (size_t i = 0; i < data.size(); ++i)
+ logger_->AddLog(base::StringPrintf(" 0x%02x", data[i]));
+ logger_->AddLog("\n");
+ }
+
+ void SetDescriptor(const std::vector<uint8> descriptor) {
+ descriptor_ = descriptor;
+ }
+
+ private:
+ std::vector<uint8> descriptor_;
+ Logger* logger_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeUsbMidiDevice);
+};
+
+class FakeMidiManagerClient : public MidiManagerClient {
+ public:
+ explicit FakeMidiManagerClient(Logger* logger)
+ : complete_start_session_(false),
+ result_(MIDI_NOT_SUPPORTED),
+ logger_(logger) {}
+ virtual ~FakeMidiManagerClient() {}
+
+ virtual void CompleteStartSession(int client_id, MidiResult result) OVERRIDE {
+ complete_start_session_ = true;
+ result_ = result;
+ }
+
+ virtual void ReceiveMidiData(uint32 port_index,
+ const uint8* data,
+ size_t size,
+ double timestamp) OVERRIDE {
+ logger_->AddLog("MidiManagerClient::ReceiveMidiData ");
+ logger_->AddLog(base::StringPrintf("port_index = %d data =", port_index));
+ for (size_t i = 0; i < size; ++i)
+ logger_->AddLog(base::StringPrintf(" 0x%02x", data[i]));
+ logger_->AddLog("\n");
+ }
+
+ virtual void AccumulateMidiBytesSent(size_t size) OVERRIDE {
+ logger_->AddLog("MidiManagerClient::AccumulateMidiBytesSent ");
+ // Windows has no "%zu".
+ logger_->AddLog(base::StringPrintf("size = %u\n",
+ static_cast<unsigned>(size)));
+ }
+
+ bool complete_start_session_;
+ MidiResult result_;
+
+ private:
+ Logger* logger_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeMidiManagerClient);
+};
+
+class TestUsbMidiDeviceFactory : public UsbMidiDevice::Factory {
+ public:
+ TestUsbMidiDeviceFactory() {}
+ virtual ~TestUsbMidiDeviceFactory() {}
+ virtual void EnumerateDevices(UsbMidiDeviceDelegate* device,
+ Callback callback) OVERRIDE {
+ callback_ = callback;
+ }
+
+ Callback callback_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TestUsbMidiDeviceFactory);
+};
+
+class MidiManagerUsbForTesting : public MidiManagerUsb {
+ public:
+ explicit MidiManagerUsbForTesting(
+ scoped_ptr<UsbMidiDevice::Factory> device_factory)
+ : MidiManagerUsb(device_factory.PassAs<UsbMidiDevice::Factory>()) {}
+ virtual ~MidiManagerUsbForTesting() {}
+
+ void CallCompleteInitialization(MidiResult result) {
+ CompleteInitialization(result);
+ base::RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MidiManagerUsbForTesting);
+};
+
+class MidiManagerUsbTest : public ::testing::Test {
+ public:
+ MidiManagerUsbTest() : message_loop_(new base::MessageLoop) {
+ scoped_ptr<TestUsbMidiDeviceFactory> factory(new TestUsbMidiDeviceFactory);
+ factory_ = factory.get();
+ manager_.reset(
+ new MidiManagerUsbForTesting(factory.PassAs<UsbMidiDevice::Factory>()));
+ }
+ virtual ~MidiManagerUsbTest() {
+ std::string leftover_logs = logger_.TakeLog();
+ if (!leftover_logs.empty()) {
+ ADD_FAILURE() << "Log should be empty: " << leftover_logs;
+ }
+ }
+
+ protected:
+ void Initialize() {
+ client_.reset(new FakeMidiManagerClient(&logger_));
+ manager_->StartSession(client_.get(), 0);
+ }
+
+ void Finalize() {
+ manager_->EndSession(client_.get());
+ }
+
+ bool IsInitializationCallbackInvoked() {
+ return client_->complete_start_session_;
+ }
+
+ MidiResult GetInitializationResult() {
+ return client_->result_;
+ }
+
+ void RunCallbackUntilCallbackInvoked(
+ bool result, UsbMidiDevice::Devices* devices) {
+ factory_->callback_.Run(result, devices);
+ base::RunLoop run_loop;
+ while (!client_->complete_start_session_)
+ run_loop.RunUntilIdle();
+ }
+
+ scoped_ptr<MidiManagerUsbForTesting> manager_;
+ scoped_ptr<FakeMidiManagerClient> client_;
+ // Owned by manager_.
+ TestUsbMidiDeviceFactory* factory_;
+ Logger logger_;
+
+ private:
+ scoped_ptr<base::MessageLoop> message_loop_;
+
+ DISALLOW_COPY_AND_ASSIGN(MidiManagerUsbTest);
+};
+
+
+TEST_F(MidiManagerUsbTest, Initialize) {
+ scoped_ptr<FakeUsbMidiDevice> device(new FakeUsbMidiDevice(&logger_));
+ uint8 descriptor[] = {
+ 0x12, 0x01, 0x10, 0x01, 0x00, 0x00, 0x00, 0x08, 0x86, 0x1a,
+ 0x2d, 0x75, 0x54, 0x02, 0x00, 0x02, 0x00, 0x01, 0x09, 0x02,
+ 0x75, 0x00, 0x02, 0x01, 0x00, 0x80, 0x30, 0x09, 0x04, 0x00,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x09, 0x24, 0x01, 0x00,
+ 0x01, 0x09, 0x00, 0x01, 0x01, 0x09, 0x04, 0x01, 0x00, 0x02,
+ 0x01, 0x03, 0x00, 0x00, 0x07, 0x24, 0x01, 0x00, 0x01, 0x51,
+ 0x00, 0x06, 0x24, 0x02, 0x01, 0x02, 0x00, 0x06, 0x24, 0x02,
+ 0x01, 0x03, 0x00, 0x06, 0x24, 0x02, 0x02, 0x06, 0x00, 0x09,
+ 0x24, 0x03, 0x01, 0x07, 0x01, 0x06, 0x01, 0x00, 0x09, 0x24,
+ 0x03, 0x02, 0x04, 0x01, 0x02, 0x01, 0x00, 0x09, 0x24, 0x03,
+ 0x02, 0x05, 0x01, 0x03, 0x01, 0x00, 0x09, 0x05, 0x02, 0x02,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x06, 0x25, 0x01, 0x02, 0x02,
+ 0x03, 0x09, 0x05, 0x82, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x25, 0x01, 0x01, 0x07,
+ };
+ device->SetDescriptor(ToVector(descriptor));
+
+ Initialize();
+ ScopedVector<UsbMidiDevice> devices;
+ devices.push_back(device.release());
+ EXPECT_FALSE(IsInitializationCallbackInvoked());
+ RunCallbackUntilCallbackInvoked(true, &devices);
+ EXPECT_EQ(MIDI_OK, GetInitializationResult());
+
+ ASSERT_EQ(1u, manager_->input_ports().size());
+ ASSERT_EQ(2u, manager_->output_ports().size());
+ ASSERT_TRUE(manager_->input_stream());
+ std::vector<UsbMidiInputStream::JackUniqueKey> keys =
+ manager_->input_stream()->RegisteredJackKeysForTesting();
+ ASSERT_EQ(2u, manager_->output_streams().size());
+ EXPECT_EQ(2u, manager_->output_streams()[0]->jack().jack_id);
+ EXPECT_EQ(3u, manager_->output_streams()[1]->jack().jack_id);
+ ASSERT_EQ(1u, keys.size());
+ EXPECT_EQ(2, keys[0].endpoint_number);
+
+ EXPECT_EQ("UsbMidiDevice::GetDescriptor\n", logger_.TakeLog());
+}
+
+TEST_F(MidiManagerUsbTest, InitializeFail) {
+ Initialize();
+
+ EXPECT_FALSE(IsInitializationCallbackInvoked());
+ RunCallbackUntilCallbackInvoked(false, NULL);
+ EXPECT_EQ(MIDI_INITIALIZATION_ERROR, GetInitializationResult());
+}
+
+TEST_F(MidiManagerUsbTest, InitializeFailBecauseOfInvalidDescriptor) {
+ scoped_ptr<FakeUsbMidiDevice> device(new FakeUsbMidiDevice(&logger_));
+ uint8 descriptor[] = {0x04};
+ device->SetDescriptor(ToVector(descriptor));
+
+ Initialize();
+ ScopedVector<UsbMidiDevice> devices;
+ devices.push_back(device.release());
+ EXPECT_FALSE(IsInitializationCallbackInvoked());
+ RunCallbackUntilCallbackInvoked(true, &devices);
+ EXPECT_EQ(MIDI_INITIALIZATION_ERROR, GetInitializationResult());
+ EXPECT_EQ("UsbMidiDevice::GetDescriptor\n", logger_.TakeLog());
+}
+
+TEST_F(MidiManagerUsbTest, Send) {
+ scoped_ptr<FakeUsbMidiDevice> device(new FakeUsbMidiDevice(&logger_));
+ FakeMidiManagerClient client(&logger_);
+ uint8 descriptor[] = {
+ 0x12, 0x01, 0x10, 0x01, 0x00, 0x00, 0x00, 0x08, 0x86, 0x1a,
+ 0x2d, 0x75, 0x54, 0x02, 0x00, 0x02, 0x00, 0x01, 0x09, 0x02,
+ 0x75, 0x00, 0x02, 0x01, 0x00, 0x80, 0x30, 0x09, 0x04, 0x00,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x09, 0x24, 0x01, 0x00,
+ 0x01, 0x09, 0x00, 0x01, 0x01, 0x09, 0x04, 0x01, 0x00, 0x02,
+ 0x01, 0x03, 0x00, 0x00, 0x07, 0x24, 0x01, 0x00, 0x01, 0x51,
+ 0x00, 0x06, 0x24, 0x02, 0x01, 0x02, 0x00, 0x06, 0x24, 0x02,
+ 0x01, 0x03, 0x00, 0x06, 0x24, 0x02, 0x02, 0x06, 0x00, 0x09,
+ 0x24, 0x03, 0x01, 0x07, 0x01, 0x06, 0x01, 0x00, 0x09, 0x24,
+ 0x03, 0x02, 0x04, 0x01, 0x02, 0x01, 0x00, 0x09, 0x24, 0x03,
+ 0x02, 0x05, 0x01, 0x03, 0x01, 0x00, 0x09, 0x05, 0x02, 0x02,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x06, 0x25, 0x01, 0x02, 0x02,
+ 0x03, 0x09, 0x05, 0x82, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x25, 0x01, 0x01, 0x07,
+ };
+
+ device->SetDescriptor(ToVector(descriptor));
+ uint8 data[] = {
+ 0x90, 0x45, 0x7f,
+ 0xf0, 0x00, 0x01, 0xf7,
+ };
+
+ Initialize();
+ ScopedVector<UsbMidiDevice> devices;
+ devices.push_back(device.release());
+ EXPECT_FALSE(IsInitializationCallbackInvoked());
+ RunCallbackUntilCallbackInvoked(true, &devices);
+ EXPECT_EQ(MIDI_OK, GetInitializationResult());
+ ASSERT_EQ(2u, manager_->output_streams().size());
+
+ manager_->DispatchSendMidiData(&client, 1, ToVector(data), 0);
+ EXPECT_EQ("UsbMidiDevice::GetDescriptor\n"
+ "UsbMidiDevice::Send endpoint = 2 data = "
+ "0x19 0x90 0x45 0x7f "
+ "0x14 0xf0 0x00 0x01 "
+ "0x15 0xf7 0x00 0x00\n"
+ "MidiManagerClient::AccumulateMidiBytesSent size = 7\n",
+ logger_.TakeLog());
+}
+
+TEST_F(MidiManagerUsbTest, Receive) {
+ scoped_ptr<FakeUsbMidiDevice> device(new FakeUsbMidiDevice(&logger_));
+ uint8 descriptor[] = {
+ 0x12, 0x01, 0x10, 0x01, 0x00, 0x00, 0x00, 0x08, 0x86, 0x1a,
+ 0x2d, 0x75, 0x54, 0x02, 0x00, 0x02, 0x00, 0x01, 0x09, 0x02,
+ 0x75, 0x00, 0x02, 0x01, 0x00, 0x80, 0x30, 0x09, 0x04, 0x00,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x09, 0x24, 0x01, 0x00,
+ 0x01, 0x09, 0x00, 0x01, 0x01, 0x09, 0x04, 0x01, 0x00, 0x02,
+ 0x01, 0x03, 0x00, 0x00, 0x07, 0x24, 0x01, 0x00, 0x01, 0x51,
+ 0x00, 0x06, 0x24, 0x02, 0x01, 0x02, 0x00, 0x06, 0x24, 0x02,
+ 0x01, 0x03, 0x00, 0x06, 0x24, 0x02, 0x02, 0x06, 0x00, 0x09,
+ 0x24, 0x03, 0x01, 0x07, 0x01, 0x06, 0x01, 0x00, 0x09, 0x24,
+ 0x03, 0x02, 0x04, 0x01, 0x02, 0x01, 0x00, 0x09, 0x24, 0x03,
+ 0x02, 0x05, 0x01, 0x03, 0x01, 0x00, 0x09, 0x05, 0x02, 0x02,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x06, 0x25, 0x01, 0x02, 0x02,
+ 0x03, 0x09, 0x05, 0x82, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x25, 0x01, 0x01, 0x07,
+ };
+
+ device->SetDescriptor(ToVector(descriptor));
+ uint8 data[] = {
+ 0x09, 0x90, 0x45, 0x7f,
+ 0x04, 0xf0, 0x00, 0x01,
+ 0x49, 0x90, 0x88, 0x99, // This data should be ignored (CN = 4).
+ 0x05, 0xf7, 0x00, 0x00,
+ };
+
+ Initialize();
+ ScopedVector<UsbMidiDevice> devices;
+ UsbMidiDevice* device_raw = device.get();
+ devices.push_back(device.release());
+ EXPECT_FALSE(IsInitializationCallbackInvoked());
+ RunCallbackUntilCallbackInvoked(true, &devices);
+ EXPECT_EQ(MIDI_OK, GetInitializationResult());
+
+ manager_->ReceiveUsbMidiData(device_raw, 2, data, arraysize(data),
+ base::TimeTicks());
+ Finalize();
+
+ EXPECT_EQ("UsbMidiDevice::GetDescriptor\n"
+ "MidiManagerClient::ReceiveMidiData port_index = 0 "
+ "data = 0x90 0x45 0x7f\n"
+ "MidiManagerClient::ReceiveMidiData port_index = 0 "
+ "data = 0xf0 0x00 0x01\n"
+ "MidiManagerClient::ReceiveMidiData port_index = 0 data = 0xf7\n",
+ logger_.TakeLog());
+}
+
+} // namespace
+
+} // namespace media
diff --git a/chromium/media/midi/midi_manager_win.cc b/chromium/media/midi/midi_manager_win.cc
index d250e6aefff..54a1db8733a 100644
--- a/chromium/media/midi/midi_manager_win.cc
+++ b/chromium/media/midi/midi_manager_win.cc
@@ -18,6 +18,8 @@
#define MMNOMMIO
#include <mmsystem.h>
+#include <algorithm>
+#include <string>
#include "base/bind.h"
#include "base/message_loop/message_loop.h"
#include "base/strings/string_number_conversions.h"
@@ -39,7 +41,7 @@ std::string GetInErrorMessage(MMRESULT result) {
<< " midiInGetErrorText error: " << get_result;
return std::string();
}
- return WideToUTF8(text);
+ return base::WideToUTF8(text);
}
std::string GetOutErrorMessage(MMRESULT result) {
@@ -51,7 +53,7 @@ std::string GetOutErrorMessage(MMRESULT result) {
<< " midiOutGetErrorText error: " << get_result;
return std::string();
}
- return WideToUTF8(text);
+ return base::WideToUTF8(text);
}
class MIDIHDRDeleter {
@@ -76,7 +78,7 @@ ScopedMIDIHDR CreateMIDIHDR(size_t size) {
return header.Pass();
}
-void SendShortMIDIMessageInternal(HMIDIOUT midi_out_handle,
+void SendShortMidiMessageInternal(HMIDIOUT midi_out_handle,
const std::vector<uint8>& message) {
if (message.size() >= 4)
return;
@@ -89,7 +91,7 @@ void SendShortMIDIMessageInternal(HMIDIOUT midi_out_handle,
<< "Failed to output short message: " << GetOutErrorMessage(result);
}
-void SendLongMIDIMessageInternal(HMIDIOUT midi_out_handle,
+void SendLongMidiMessageInternal(HMIDIOUT midi_out_handle,
const std::vector<uint8>& message) {
// Implementation note:
// Sending long MIDI message can be performed synchronously or asynchronously
@@ -147,7 +149,7 @@ void SendLongMIDIMessageInternal(HMIDIOUT midi_out_handle,
} // namespace
-class MIDIManagerWin::InDeviceInfo {
+class MidiManagerWin::InDeviceInfo {
public:
~InDeviceInfo() {
Uninitialize();
@@ -164,12 +166,9 @@ class MIDIManagerWin::InDeviceInfo {
HMIDIIN midi_handle() const {
return midi_handle_;
}
- const base::TimeDelta& start_time_offset() const {
- return start_time_offset_;
- }
- static scoped_ptr<InDeviceInfo> Create(MIDIManagerWin* manager,
- UINT device_id) {
+ static scoped_ptr<InDeviceInfo> Create(MidiManagerWin* manager,
+ UINT device_id) {
scoped_ptr<InDeviceInfo> obj(new InDeviceInfo(manager));
if (!obj->Initialize(device_id))
obj.reset();
@@ -180,7 +179,7 @@ class MIDIManagerWin::InDeviceInfo {
static const int kInvalidPortIndex = -1;
static const size_t kBufferLength = 32 * 1024;
- explicit InDeviceInfo(MIDIManagerWin* manager)
+ explicit InDeviceInfo(MidiManagerWin* manager)
: manager_(manager),
port_index_(kInvalidPortIndex),
midi_handle_(NULL),
@@ -235,7 +234,7 @@ class MIDIManagerWin::InDeviceInfo {
return false;
}
started_ = true;
- start_time_offset_ = base::TimeTicks::Now() - base::TimeTicks();
+ start_time_ = base::TimeTicks::Now();
return true;
}
@@ -246,7 +245,7 @@ class MIDIManagerWin::InDeviceInfo {
DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
<< "Failed to stop input port: " << GetInErrorMessage(result);
started_ = false;
- start_time_offset_ = base::TimeDelta();
+ start_time_ = base::TimeTicks();
}
if (midi_handle_) {
// midiInReset flushes pending messages. We ignore these messages.
@@ -282,11 +281,11 @@ class MIDIManagerWin::InDeviceInfo {
self->OnShortMessageReceived(static_cast<uint8>(param1 & 0xff),
static_cast<uint8>((param1 >> 8) & 0xff),
static_cast<uint8>((param1 >> 16) & 0xff),
- self->TickToTimeDelta(param2));
+ param2);
return;
case MIM_LONGDATA:
self->OnLongMessageReceived(reinterpret_cast<MIDIHDR*>(param1),
- self->TickToTimeDelta(param2));
+ param2);
return;
case MIM_CLOSE:
// TODO(yukawa): Implement crbug.com/279097.
@@ -297,18 +296,18 @@ class MIDIManagerWin::InDeviceInfo {
void OnShortMessageReceived(uint8 status_byte,
uint8 first_data_byte,
uint8 second_data_byte,
- base::TimeDelta timestamp) {
+ DWORD elapsed_ms) {
if (device_to_be_closed())
return;
- const size_t len = GetMIDIMessageLength(status_byte);
+ const size_t len = GetMidiMessageLength(status_byte);
if (len == 0 || port_index() == kInvalidPortIndex)
return;
const uint8 kData[] = { status_byte, first_data_byte, second_data_byte };
DCHECK_LE(len, arraysize(kData));
- manager_->ReceiveMIDIData(port_index(), kData, len, timestamp.InSecondsF());
+ OnMessageReceived(kData, len, elapsed_ms);
}
- void OnLongMessageReceived(MIDIHDR* header, base::TimeDelta timestamp) {
+ void OnLongMessageReceived(MIDIHDR* header, DWORD elapsed_ms) {
if (header != midi_header_.get())
return;
MMRESULT result = MMSYSERR_NOERROR;
@@ -324,33 +323,36 @@ class MIDIManagerWin::InDeviceInfo {
return;
}
if (header->dwBytesRecorded > 0 && port_index() != kInvalidPortIndex) {
- manager_->ReceiveMIDIData(port_index_,
- reinterpret_cast<const uint8*>(header->lpData),
- header->dwBytesRecorded,
- timestamp.InSecondsF());
+ OnMessageReceived(reinterpret_cast<const uint8*>(header->lpData),
+ header->dwBytesRecorded,
+ elapsed_ms);
}
- result = midiInAddBuffer(midi_handle(), header, sizeof(*header));
+ result = midiInAddBuffer(midi_handle_, header, sizeof(*header));
DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
<< "Failed to attach input port: " << GetInErrorMessage(result);
}
- base::TimeDelta TickToTimeDelta(DWORD tick) const {
- const base::TimeDelta delta =
- base::TimeDelta::FromMicroseconds(static_cast<uint32>(tick));
- return start_time_offset_ + delta;
+ void OnMessageReceived(const uint8* data, size_t length, DWORD elapsed_ms) {
+ // MIM_DATA/MIM_LONGDATA message treats the time when midiInStart() is
+ // called as the origin of |elapsed_ms|.
+ // http://msdn.microsoft.com/en-us/library/windows/desktop/dd757284.aspx
+ // http://msdn.microsoft.com/en-us/library/windows/desktop/dd757286.aspx
+ const base::TimeTicks event_time =
+ start_time_ + base::TimeDelta::FromMilliseconds(elapsed_ms);
+ manager_->ReceiveMidiData(port_index_, data, length, event_time);
}
- MIDIManagerWin* manager_;
+ MidiManagerWin* manager_;
int port_index_;
HMIDIIN midi_handle_;
ScopedMIDIHDR midi_header_;
- base::TimeDelta start_time_offset_;
+ base::TimeTicks start_time_;
bool started_;
bool device_to_be_closed_;
- DISALLOW_COPY_AND_ASSIGN(MIDIManagerWin::InDeviceInfo);
+ DISALLOW_COPY_AND_ASSIGN(InDeviceInfo);
};
-class MIDIManagerWin::OutDeviceInfo {
+class MidiManagerWin::OutDeviceInfo {
public:
~OutDeviceInfo() {
Uninitialize();
@@ -383,18 +385,18 @@ class MIDIManagerWin::OutDeviceInfo {
return;
// MIDI Running status must be filtered out.
- MIDIMessageQueue message_queue(false);
+ MidiMessageQueue message_queue(false);
message_queue.Add(data);
std::vector<uint8> message;
while (!quitting_) {
message_queue.Get(&message);
if (message.empty())
break;
- // SendShortMIDIMessageInternal can send a MIDI message up to 3 bytes.
+ // SendShortMidiMessageInternal can send a MIDI message up to 3 bytes.
if (message.size() <= 3)
- SendShortMIDIMessageInternal(midi_handle_, message);
+ SendShortMidiMessageInternal(midi_handle_, message);
else
- SendLongMIDIMessageInternal(midi_handle_, message);
+ SendLongMidiMessageInternal(midi_handle_, message);
}
}
@@ -408,7 +410,7 @@ class MIDIManagerWin::OutDeviceInfo {
Uninitialize();
// Here we use |CALLBACK_FUNCTION| to subscribe MOM_DONE and MOM_CLOSE
// events.
- // - MOM_DONE: SendLongMIDIMessageInternal() relies on this event to clean
+ // - MOM_DONE: SendLongMidiMessageInternal() relies on this event to clean
// up the backing store where a long MIDI message is stored.
// - MOM_CLOSE: This event is sent when 1) midiOutClose() is called, or 2)
// the MIDI device becomes unavailable for some reasons, e.g., the cable
@@ -484,17 +486,17 @@ class MIDIManagerWin::OutDeviceInfo {
// True if the device is already closed.
volatile bool closed_;
- // True if the MIDIManagerWin is trying to stop the sender thread.
+ // True if the MidiManagerWin is trying to stop the sender thread.
volatile bool quitting_;
- DISALLOW_COPY_AND_ASSIGN(MIDIManagerWin::OutDeviceInfo);
+ DISALLOW_COPY_AND_ASSIGN(OutDeviceInfo);
};
-MIDIManagerWin::MIDIManagerWin()
- : send_thread_("MIDISendThread") {
+MidiManagerWin::MidiManagerWin()
+ : send_thread_("MidiSendThread") {
}
-bool MIDIManagerWin::Initialize() {
+void MidiManagerWin::StartInitialization() {
const UINT num_in_devices = midiInGetNumDevs();
in_devices_.reserve(num_in_devices);
for (UINT device_id = 0; device_id < num_in_devices; ++device_id) {
@@ -508,13 +510,13 @@ bool MIDIManagerWin::Initialize() {
scoped_ptr<InDeviceInfo> in_device(InDeviceInfo::Create(this, device_id));
if (!in_device)
continue;
- MIDIPortInfo info(
+ MidiPortInfo info(
base::IntToString(static_cast<int>(device_id)),
"",
base::WideToUTF8(caps.szPname),
base::IntToString(static_cast<int>(caps.vDriverVersion)));
AddInputPort(info);
- in_device->set_port_index(input_ports_.size() - 1);
+ in_device->set_port_index(input_ports().size() - 1);
in_devices_.push_back(in_device.Pass());
}
@@ -531,7 +533,7 @@ bool MIDIManagerWin::Initialize() {
scoped_ptr<OutDeviceInfo> out_port(OutDeviceInfo::Create(device_id));
if (!out_port)
continue;
- MIDIPortInfo info(
+ MidiPortInfo info(
base::IntToString(static_cast<int>(device_id)),
"",
base::WideToUTF8(caps.szPname),
@@ -540,23 +542,21 @@ bool MIDIManagerWin::Initialize() {
out_devices_.push_back(out_port.Pass());
}
- return true;
+ CompleteInitialization(MIDI_OK);
}
-MIDIManagerWin::~MIDIManagerWin() {
+MidiManagerWin::~MidiManagerWin() {
// Cleanup order is important. |send_thread_| must be stopped before
// |out_devices_| is cleared.
- for (size_t i = 0; i < output_ports_.size(); ++i)
+ for (size_t i = 0; i < output_ports().size(); ++i)
out_devices_[i]->Quit();
send_thread_.Stop();
out_devices_.clear();
- output_ports_.clear();
in_devices_.clear();
- input_ports_.clear();
}
-void MIDIManagerWin::DispatchSendMIDIData(MIDIManagerClient* client,
+void MidiManagerWin::DispatchSendMidiData(MidiManagerClient* client,
uint32 port_index,
const std::vector<uint8>& data,
double timestamp) {
@@ -580,18 +580,18 @@ void MIDIManagerWin::DispatchSendMIDIData(MIDIManagerClient* client,
base::Bind(&OutDeviceInfo::Send, base::Unretained(out_port), data),
delay);
- // Call back AccumulateMIDIBytesSent() on |send_thread_| to emulate the
- // behavior of MIDIManagerMac::SendMIDIData.
+ // Call back AccumulateMidiBytesSent() on |send_thread_| to emulate the
+ // behavior of MidiManagerMac::SendMidiData.
// TODO(yukawa): Do this task in a platform-independent way if possible.
// See crbug.com/325810.
send_thread_.message_loop()->PostTask(
FROM_HERE,
- base::Bind(&MIDIManagerClient::AccumulateMIDIBytesSent,
+ base::Bind(&MidiManagerClient::AccumulateMidiBytesSent,
base::Unretained(client), data.size()));
}
-MIDIManager* MIDIManager::Create() {
- return new MIDIManagerWin();
+MidiManager* MidiManager::Create() {
+ return new MidiManagerWin();
}
} // namespace media
diff --git a/chromium/media/midi/midi_manager_win.h b/chromium/media/midi/midi_manager_win.h
index eef8b4e5680..8a951f8e824 100644
--- a/chromium/media/midi/midi_manager_win.h
+++ b/chromium/media/midi/midi_manager_win.h
@@ -14,14 +14,14 @@
namespace media {
-class MIDIManagerWin : public MIDIManager {
+class MidiManagerWin : public MidiManager {
public:
- MIDIManagerWin();
- virtual ~MIDIManagerWin();
+ MidiManagerWin();
+ virtual ~MidiManagerWin();
- // MIDIManager implementation.
- virtual bool Initialize() OVERRIDE;
- virtual void DispatchSendMIDIData(MIDIManagerClient* client,
+ // MidiManager implementation.
+ virtual void StartInitialization() OVERRIDE;
+ virtual void DispatchSendMidiData(MidiManagerClient* client,
uint32 port_index,
const std::vector<uint8>& data,
double timestamp) OVERRIDE;
@@ -32,7 +32,7 @@ class MIDIManagerWin : public MIDIManager {
std::vector<scoped_ptr<InDeviceInfo> > in_devices_;
std::vector<scoped_ptr<OutDeviceInfo> > out_devices_;
base::Thread send_thread_;
- DISALLOW_COPY_AND_ASSIGN(MIDIManagerWin);
+ DISALLOW_COPY_AND_ASSIGN(MidiManagerWin);
};
} // namespace media
diff --git a/chromium/media/midi/midi_message_queue.cc b/chromium/media/midi/midi_message_queue.cc
index 3452e80be9d..af403535869 100644
--- a/chromium/media/midi/midi_message_queue.cc
+++ b/chromium/media/midi/midi_message_queue.cc
@@ -29,20 +29,20 @@ bool IsSystemRealTimeMessage(uint8 data) {
} // namespace
-MIDIMessageQueue::MIDIMessageQueue(bool allow_running_status)
+MidiMessageQueue::MidiMessageQueue(bool allow_running_status)
: allow_running_status_(allow_running_status) {}
-MIDIMessageQueue::~MIDIMessageQueue() {}
+MidiMessageQueue::~MidiMessageQueue() {}
-void MIDIMessageQueue::Add(const std::vector<uint8>& data) {
+void MidiMessageQueue::Add(const std::vector<uint8>& data) {
queue_.insert(queue_.end(), data.begin(), data.end());
}
-void MIDIMessageQueue::Add(const uint8* data, size_t length) {
+void MidiMessageQueue::Add(const uint8* data, size_t length) {
queue_.insert(queue_.end(), data, data + length);
}
-void MIDIMessageQueue::Get(std::vector<uint8>* message) {
+void MidiMessageQueue::Get(std::vector<uint8>* message) {
message->clear();
while (true) {
@@ -98,7 +98,7 @@ void MIDIMessageQueue::Get(std::vector<uint8>* message) {
DCHECK(IsDataByte(next));
DCHECK_NE(kSysEx, status_byte);
- const size_t target_len = GetMIDIMessageLength(status_byte);
+ const size_t target_len = GetMidiMessageLength(status_byte);
if (next_message_.size() < target_len)
continue;
if (next_message_.size() == target_len) {
diff --git a/chromium/media/midi/midi_message_queue.h b/chromium/media/midi/midi_message_queue.h
index 06f0f4787fb..f565f188293 100644
--- a/chromium/media/midi/midi_message_queue.h
+++ b/chromium/media/midi/midi_message_queue.h
@@ -24,7 +24,7 @@ namespace media {
// MIDI status byte is abbreviated (a.k.a. "running status").
//
// Example (pseudo message loop):
-// MIDIMessageQueue queue(true); // true to support "running status"
+// MidiMessageQueue queue(true); // true to support "running status"
// while (true) {
// if (is_incoming_midi_data_available()) {
// std::vector<uint8> incoming_data;
@@ -38,12 +38,12 @@ namespace media {
// dispatch(next_message);
// }
// }
-class MEDIA_EXPORT MIDIMessageQueue {
+class MEDIA_EXPORT MidiMessageQueue {
public:
// Initializes the queue. Set true to |allow_running_status| to enable
// "MIDI running status" reconstruction.
- explicit MIDIMessageQueue(bool allow_running_status);
- ~MIDIMessageQueue();
+ explicit MidiMessageQueue(bool allow_running_status);
+ ~MidiMessageQueue();
// Enqueues |data| to the internal buffer.
void Add(const std::vector<uint8>& data);
@@ -64,7 +64,7 @@ class MEDIA_EXPORT MIDIMessageQueue {
std::deque<uint8> queue_;
std::vector<uint8> next_message_;
const bool allow_running_status_;
- DISALLOW_COPY_AND_ASSIGN(MIDIMessageQueue);
+ DISALLOW_COPY_AND_ASSIGN(MidiMessageQueue);
};
} // namespace media
diff --git a/chromium/media/midi/midi_message_queue_unittest.cc b/chromium/media/midi/midi_message_queue_unittest.cc
index a00eea6b9e4..3c7122654e7 100644
--- a/chromium/media/midi/midi_message_queue_unittest.cc
+++ b/chromium/media/midi/midi_message_queue_unittest.cc
@@ -28,7 +28,7 @@ const uint8 kBrokenData3[] = { 0xf2, 0x00 };
const uint8 kDataByte0[] = { 0x00 };
template <typename T, size_t N>
-void Add(MIDIMessageQueue* queue, const T(&array)[N]) {
+void Add(MidiMessageQueue* queue, const T(&array)[N]) {
queue->Add(array, N);
}
@@ -54,15 +54,15 @@ template <typename T, size_t N>
#define EXPECT_MESSAGE(expected, actual) \
EXPECT_PRED_FORMAT2(ExpectEqualSequence, expected, actual)
-TEST(MIDIMessageQueueTest, EmptyData) {
- MIDIMessageQueue queue(false);
+TEST(MidiMessageQueueTest, EmptyData) {
+ MidiMessageQueue queue(false);
std::vector<uint8> message;
queue.Get(&message);
EXPECT_TRUE(message.empty());
}
-TEST(MIDIMessageQueueTest, RunningStatusDisabled) {
- MIDIMessageQueue queue(false);
+TEST(MidiMessageQueueTest, RunningStatusDisabled) {
+ MidiMessageQueue queue(false);
Add(&queue, kGMOn);
Add(&queue, kBrokenData1);
Add(&queue, kNoteOnWithRunningStatus);
@@ -94,8 +94,8 @@ TEST(MIDIMessageQueueTest, RunningStatusDisabled) {
EXPECT_TRUE(message.empty());
}
-TEST(MIDIMessageQueueTest, RunningStatusEnabled) {
- MIDIMessageQueue queue(true);
+TEST(MidiMessageQueueTest, RunningStatusEnabled) {
+ MidiMessageQueue queue(true);
Add(&queue, kGMOn);
Add(&queue, kBrokenData1);
Add(&queue, kNoteOnWithRunningStatus);
@@ -139,8 +139,8 @@ TEST(MIDIMessageQueueTest, RunningStatusEnabled) {
<< "Running status must not be applied to real time messages";
}
-TEST(MIDIMessageQueueTest, RunningStatusEnabledWithRealTimeEvent) {
- MIDIMessageQueue queue(true);
+TEST(MidiMessageQueueTest, RunningStatusEnabledWithRealTimeEvent) {
+ MidiMessageQueue queue(true);
const uint8 kNoteOnWithRunningStatusWithkTimingClock[] = {
0x90, 0xf8, 0x3c, 0xf8, 0x7f, 0xf8, 0x3c, 0xf8, 0x7f, 0xf8, 0x3c, 0xf8,
0x7f,
diff --git a/chromium/media/midi/midi_message_util.cc b/chromium/media/midi/midi_message_util.cc
index 83d3cc071d9..9e913d70c06 100644
--- a/chromium/media/midi/midi_message_util.cc
+++ b/chromium/media/midi/midi_message_util.cc
@@ -6,7 +6,7 @@
namespace media {
-size_t GetMIDIMessageLength(uint8 status_byte) {
+size_t GetMidiMessageLength(uint8 status_byte) {
if (status_byte < 0x80)
return 0;
if (0x80 <= status_byte && status_byte <= 0xbf)
diff --git a/chromium/media/midi/midi_message_util.h b/chromium/media/midi/midi_message_util.h
index 1dc6d3cba78..faaff178373 100644
--- a/chromium/media/midi/midi_message_util.h
+++ b/chromium/media/midi/midi_message_util.h
@@ -18,7 +18,15 @@ namespace media {
// - not a valid status byte, namely data byte.
// - the MIDI System Exclusive message.
// - the End of System Exclusive message.
-MEDIA_EXPORT size_t GetMIDIMessageLength(uint8 status_byte);
+MEDIA_EXPORT size_t GetMidiMessageLength(uint8 status_byte);
+
+const uint8 kSysExByte = 0xf0;
+const uint8 kEndOfSysExByte = 0xf7;
+
+const uint8 kSysMessageBitMask = 0xf0;
+const uint8 kSysMessageBitPattern = 0xf0;
+const uint8 kSysRTMessageBitMask = 0xf8;
+const uint8 kSysRTMessageBitPattern = 0xf8;
} // namespace media
diff --git a/chromium/media/midi/midi_message_util_unittest.cc b/chromium/media/midi/midi_message_util_unittest.cc
index af3679c2987..529efbf9586 100644
--- a/chromium/media/midi/midi_message_util_unittest.cc
+++ b/chromium/media/midi/midi_message_util_unittest.cc
@@ -14,20 +14,20 @@ const uint8 kNoteOn[] = { 0x90, 0x3c, 0x7f };
const uint8 kChannelPressure[] = { 0xd0, 0x01 };
const uint8 kTimingClock[] = { 0xf8 };
-TEST(GetMIDIMessageLengthTest, BasicTest) {
+TEST(GetMidiMessageLengthTest, BasicTest) {
// Check basic functionarity
- EXPECT_EQ(arraysize(kNoteOn), GetMIDIMessageLength(kNoteOn[0]));
+ EXPECT_EQ(arraysize(kNoteOn), GetMidiMessageLength(kNoteOn[0]));
EXPECT_EQ(arraysize(kChannelPressure),
- GetMIDIMessageLength(kChannelPressure[0]));
- EXPECT_EQ(arraysize(kTimingClock), GetMIDIMessageLength(kTimingClock[0]));
+ GetMidiMessageLength(kChannelPressure[0]));
+ EXPECT_EQ(arraysize(kTimingClock), GetMidiMessageLength(kTimingClock[0]));
// SysEx message should be mapped to 0-length
- EXPECT_EQ(0u, GetMIDIMessageLength(kGMOn[0]));
+ EXPECT_EQ(0u, GetMidiMessageLength(kGMOn[0]));
// Any data byte should be mapped to 0-length
- EXPECT_EQ(0u, GetMIDIMessageLength(kGMOn[1]));
- EXPECT_EQ(0u, GetMIDIMessageLength(kNoteOn[1]));
- EXPECT_EQ(0u, GetMIDIMessageLength(kChannelPressure[1]));
+ EXPECT_EQ(0u, GetMidiMessageLength(kGMOn[1]));
+ EXPECT_EQ(0u, GetMidiMessageLength(kNoteOn[1]));
+ EXPECT_EQ(0u, GetMidiMessageLength(kChannelPressure[1]));
}
} // namespace
diff --git a/chromium/media/midi/midi_port_info.cc b/chromium/media/midi/midi_port_info.cc
index 3be70070b7c..02b4aa9ba7d 100644
--- a/chromium/media/midi/midi_port_info.cc
+++ b/chromium/media/midi/midi_port_info.cc
@@ -6,9 +6,9 @@
namespace media {
-MIDIPortInfo::MIDIPortInfo() {}
+MidiPortInfo::MidiPortInfo() {}
-MIDIPortInfo::MIDIPortInfo(const std::string& in_id,
+MidiPortInfo::MidiPortInfo(const std::string& in_id,
const std::string& in_manufacturer,
const std::string& in_name,
const std::string& in_version)
@@ -17,9 +17,9 @@ MIDIPortInfo::MIDIPortInfo(const std::string& in_id,
name(in_name),
version(in_version) {}
-MIDIPortInfo::~MIDIPortInfo() {}
+MidiPortInfo::~MidiPortInfo() {}
-MIDIPortInfo::MIDIPortInfo(const MIDIPortInfo& info)
+MidiPortInfo::MidiPortInfo(const MidiPortInfo& info)
: id(info.id),
manufacturer(info.manufacturer),
name(info.name),
diff --git a/chromium/media/midi/midi_port_info.h b/chromium/media/midi/midi_port_info.h
index f4afb49a4f5..1fe3bcaf0fc 100644
--- a/chromium/media/midi/midi_port_info.h
+++ b/chromium/media/midi/midi_port_info.h
@@ -13,15 +13,15 @@
namespace media {
-struct MEDIA_EXPORT MIDIPortInfo {
- MIDIPortInfo();
- MIDIPortInfo(const std::string& in_id,
+struct MEDIA_EXPORT MidiPortInfo {
+ MidiPortInfo();
+ MidiPortInfo(const std::string& in_id,
const std::string& in_manufacturer,
const std::string& in_name,
const std::string& in_version);
- MIDIPortInfo(const MIDIPortInfo& info);
- ~MIDIPortInfo();
+ MidiPortInfo(const MidiPortInfo& info);
+ ~MidiPortInfo();
std::string id;
std::string manufacturer;
@@ -29,7 +29,7 @@ struct MEDIA_EXPORT MIDIPortInfo {
std::string version;
};
-typedef std::vector<MIDIPortInfo> MIDIPortInfoList;
+typedef std::vector<MidiPortInfo> MidiPortInfoList;
} // namespace media
diff --git a/chromium/media/midi/midi_result.h b/chromium/media/midi/midi_result.h
new file mode 100644
index 00000000000..1e104016143
--- /dev/null
+++ b/chromium/media/midi/midi_result.h
@@ -0,0 +1,24 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MIDI_MIDI_RESULT_H_
+#define MEDIA_MIDI_MIDI_RESULT_H_
+
+namespace media {
+
+// Result codes for MIDI.
+enum MidiResult {
+ MIDI_OK,
+ MIDI_NOT_SUPPORTED,
+ MIDI_INITIALIZATION_ERROR,
+
+ // |MIDI_RESULT_LAST| is used in content/common/media/midi_messages.h with
+ // IPC_ENUM_TRAITS_MAX_VALUE macro. Keep the value up to date. Otherwise
+ // a new value can not be passed to the renderer.
+ MIDI_RESULT_LAST = MIDI_INITIALIZATION_ERROR,
+};
+
+} // namespace media
+
+#endif // MEDIA_MIDI_MIDI_RESULT_H_
diff --git a/chromium/media/midi/usb_midi_descriptor_parser.cc b/chromium/media/midi/usb_midi_descriptor_parser.cc
new file mode 100644
index 00000000000..d454ff94691
--- /dev/null
+++ b/chromium/media/midi/usb_midi_descriptor_parser.cc
@@ -0,0 +1,235 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/usb_midi_descriptor_parser.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+
+namespace media {
+
+namespace {
+
+// The constants below are specified in USB spec, USB audio spec
+// and USB midi spec.
+
+enum DescriptorType {
+ TYPE_DEVICE = 1,
+ TYPE_CONFIGURATION = 2,
+ TYPE_STRING = 3,
+ TYPE_INTERFACE = 4,
+ TYPE_ENDPOINT = 5,
+ TYPE_DEVICE_QUALIFIER = 6,
+ TYPE_OTHER_SPEED_CONFIGURATION = 7,
+ TYPE_INTERFACE_POWER = 8,
+
+ TYPE_CS_INTERFACE = 36,
+ TYPE_CS_ENDPOINT = 37,
+};
+
+enum DescriptorSubType {
+ SUBTYPE_MS_DESCRIPTOR_UNDEFINED = 0,
+ SUBTYPE_MS_HEADER = 1,
+ SUBTYPE_MIDI_IN_JACK = 2,
+ SUBTYPE_MIDI_OUT_JACK = 3,
+ SUBTYPE_ELEMENT = 4,
+};
+
+enum JackType {
+ JACK_TYPE_UNDEFINED = 0,
+ JACK_TYPE_EMBEDDED = 1,
+ JACK_TYPE_EXTERNAL = 2,
+};
+
+const uint8 kAudioInterfaceClass = 1;
+const uint8 kAudioMidiInterfaceSubclass = 3;
+
+class JackMatcher {
+ public:
+ explicit JackMatcher(uint8 id) : id_(id) {}
+
+ bool operator() (const UsbMidiJack& jack) const {
+ return jack.jack_id == id_;
+ }
+
+ private:
+ uint8 id_;
+};
+
+} // namespace
+
+UsbMidiDescriptorParser::UsbMidiDescriptorParser()
+ : is_parsing_usb_midi_interface_(false),
+ current_endpoint_address_(0),
+ current_cable_number_(0) {}
+
+UsbMidiDescriptorParser::~UsbMidiDescriptorParser() {}
+
+bool UsbMidiDescriptorParser::Parse(UsbMidiDevice* device,
+ const uint8* data,
+ size_t size,
+ std::vector<UsbMidiJack>* jacks) {
+ jacks->clear();
+ bool result = ParseInternal(device, data, size, jacks);
+ if (!result)
+ jacks->clear();
+ Clear();
+ return result;
+}
+
+bool UsbMidiDescriptorParser::ParseInternal(UsbMidiDevice* device,
+ const uint8* data,
+ size_t size,
+ std::vector<UsbMidiJack>* jacks) {
+ for (const uint8* current = data;
+ current < data + size;
+ current += current[0]) {
+ uint8 length = current[0];
+ if (length < 2) {
+ DVLOG(1) << "Descriptor Type is not accessible.";
+ return false;
+ }
+ if (current + length > data + size) {
+ DVLOG(1) << "The header size is incorrect.";
+ return false;
+ }
+ DescriptorType descriptor_type = static_cast<DescriptorType>(current[1]);
+ if (descriptor_type != TYPE_INTERFACE && !is_parsing_usb_midi_interface_)
+ continue;
+
+ switch (descriptor_type) {
+ case TYPE_INTERFACE:
+ if (!ParseInterface(current, length))
+ return false;
+ break;
+ case TYPE_CS_INTERFACE:
+ // We are assuming that the corresponding INTERFACE precedes
+ // the CS_INTERFACE descriptor, as specified.
+ if (!ParseCSInterface(device, current, length))
+ return false;
+ break;
+ case TYPE_ENDPOINT:
+ // We are assuming that endpoints are contained in an interface.
+ if (!ParseEndpoint(current, length))
+ return false;
+ break;
+ case TYPE_CS_ENDPOINT:
+ // We are assuming that the corresponding ENDPOINT precedes
+ // the CS_ENDPOINT descriptor, as specified.
+ if (!ParseCSEndpoint(current, length, jacks))
+ return false;
+ break;
+ default:
+ // Ignore uninteresting types.
+ break;
+ }
+ }
+ return true;
+}
+
+bool UsbMidiDescriptorParser::ParseInterface(const uint8* data, size_t size) {
+ if (size != 9) {
+ DVLOG(1) << "INTERFACE header size is incorrect.";
+ return false;
+ }
+ incomplete_jacks_.clear();
+
+ uint8 interface_class = data[5];
+ uint8 interface_subclass = data[6];
+
+ // All descriptors of endpoints contained in this interface
+ // precede the next INTERFACE descriptor.
+ is_parsing_usb_midi_interface_ =
+ interface_class == kAudioInterfaceClass &&
+ interface_subclass == kAudioMidiInterfaceSubclass;
+ return true;
+}
+
+bool UsbMidiDescriptorParser::ParseCSInterface(UsbMidiDevice* device,
+ const uint8* data,
+ size_t size) {
+ // Descriptor Type and Descriptor Subtype should be accessible.
+ if (size < 3) {
+ DVLOG(1) << "CS_INTERFACE header size is incorrect.";
+ return false;
+ }
+
+ DescriptorSubType subtype = static_cast<DescriptorSubType>(data[2]);
+
+ if (subtype != SUBTYPE_MIDI_OUT_JACK &&
+ subtype != SUBTYPE_MIDI_IN_JACK)
+ return true;
+
+ if (size < 6) {
+ DVLOG(1) << "CS_INTERFACE (MIDI JACK) header size is incorrect.";
+ return false;
+ }
+ uint8 jack_type = data[3];
+ uint8 id = data[4];
+ if (jack_type == JACK_TYPE_EMBEDDED) {
+ // We can't determine the associated endpoint now.
+ incomplete_jacks_.push_back(UsbMidiJack(device, id, 0, 0));
+ }
+ return true;
+}
+
+bool UsbMidiDescriptorParser::ParseEndpoint(const uint8* data, size_t size) {
+ if (size < 4) {
+ DVLOG(1) << "ENDPOINT header size is incorrect.";
+ return false;
+ }
+ current_endpoint_address_ = data[2];
+ current_cable_number_ = 0;
+ return true;
+}
+
+bool UsbMidiDescriptorParser::ParseCSEndpoint(const uint8* data,
+ size_t size,
+ std::vector<UsbMidiJack>* jacks) {
+ const size_t kSizeForEmptyJacks = 4;
+ // CS_ENDPOINT must be of size 4 + n where n is the number of associated
+ // jacks.
+ if (size < kSizeForEmptyJacks) {
+ DVLOG(1) << "CS_ENDPOINT header size is incorrect.";
+ return false;
+ }
+ uint8 num_jacks = data[3];
+ if (size != kSizeForEmptyJacks + num_jacks) {
+ DVLOG(1) << "CS_ENDPOINT header size is incorrect.";
+ return false;
+ }
+
+ for (size_t i = 0; i < num_jacks; ++i) {
+ uint8 jack = data[kSizeForEmptyJacks + i];
+ std::vector<UsbMidiJack>::iterator it =
+ std::find_if(incomplete_jacks_.begin(),
+ incomplete_jacks_.end(),
+ JackMatcher(jack));
+ if (it == incomplete_jacks_.end()) {
+ DVLOG(1) << "A non-existing MIDI jack is associated.";
+ return false;
+ }
+ if (current_cable_number_ > 0xf) {
+ DVLOG(1) << "Cable number should range from 0x0 to 0xf.";
+ return false;
+ }
+ // CS_ENDPOINT follows ENDPOINT and hence we can use the following
+ // member variables.
+ it->cable_number = current_cable_number_++;
+ it->endpoint_address = current_endpoint_address_;
+ jacks->push_back(*it);
+ incomplete_jacks_.erase(it);
+ }
+ return true;
+}
+
+void UsbMidiDescriptorParser::Clear() {
+ is_parsing_usb_midi_interface_ = false;
+ current_endpoint_address_ = 0;
+ current_cable_number_ = 0;
+ incomplete_jacks_.clear();
+}
+
+} // namespace media
diff --git a/chromium/media/midi/usb_midi_descriptor_parser.h b/chromium/media/midi/usb_midi_descriptor_parser.h
new file mode 100644
index 00000000000..826a449ef6e
--- /dev/null
+++ b/chromium/media/midi/usb_midi_descriptor_parser.h
@@ -0,0 +1,60 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MIDI_USB_MIDI_DESCRIPTOR_PARSER_H_
+#define MEDIA_MIDI_USB_MIDI_DESCRIPTOR_PARSER_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+#include "media/midi/usb_midi_jack.h"
+
+namespace media {
+
+class UsbMidiDevice;
+
+// UsbMidiDescriptorParser parses USB descriptors and
+// generates input / output lists of MIDIPortInfo.
+// This is not a generic USB descriptor parser: this parser is designed
+// for collecting USB-MIDI jacks information from the descriptor.
+class MEDIA_EXPORT UsbMidiDescriptorParser {
+ public:
+ UsbMidiDescriptorParser();
+ ~UsbMidiDescriptorParser();
+
+ // Returns true if the operation succeeds.
+ // When an incorrect input is given, this method may return true but
+ // never crashes.
+ bool Parse(UsbMidiDevice* device,
+ const uint8* data,
+ size_t size,
+ std::vector<UsbMidiJack>* jacks);
+
+ private:
+ bool ParseInternal(UsbMidiDevice* device,
+ const uint8* data,
+ size_t size,
+ std::vector<UsbMidiJack>* jacks);
+ bool ParseInterface(const uint8* data, size_t size);
+ bool ParseCSInterface(UsbMidiDevice* device, const uint8* data, size_t size);
+ bool ParseEndpoint(const uint8* data, size_t size);
+ bool ParseCSEndpoint(const uint8* data,
+ size_t size,
+ std::vector<UsbMidiJack>* jacks);
+ void Clear();
+
+ bool is_parsing_usb_midi_interface_;
+ uint8 current_endpoint_address_;
+ uint8 current_cable_number_;
+
+ std::vector<UsbMidiJack> incomplete_jacks_;
+
+ DISALLOW_COPY_AND_ASSIGN(UsbMidiDescriptorParser);
+};
+
+
+} // namespace media
+
+#endif // MEDIA_MIDI_USB_MIDI_DESCRIPTOR_PARSER_H_
diff --git a/chromium/media/midi/usb_midi_descriptor_parser_unittest.cc b/chromium/media/midi/usb_midi_descriptor_parser_unittest.cc
new file mode 100644
index 00000000000..4e243d15b72
--- /dev/null
+++ b/chromium/media/midi/usb_midi_descriptor_parser_unittest.cc
@@ -0,0 +1,101 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/usb_midi_descriptor_parser.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+namespace {
+
+TEST(UsbMidiDescriptorParserTest, ParseEmpty) {
+ UsbMidiDescriptorParser parser;
+ std::vector<UsbMidiJack> jacks;
+ EXPECT_TRUE(parser.Parse(NULL, NULL, 0, &jacks));
+ EXPECT_TRUE(jacks.empty());
+}
+
+TEST(UsbMidiDescriptorParserTest, InvalidSize) {
+ UsbMidiDescriptorParser parser;
+ std::vector<UsbMidiJack> jacks;
+ uint8 data[] = {0x04};
+ EXPECT_FALSE(parser.Parse(NULL, data, arraysize(data), &jacks));
+ EXPECT_TRUE(jacks.empty());
+}
+
+TEST(UsbMidiDescriptorParserTest, NonExistingJackIsAssociated) {
+ UsbMidiDescriptorParser parser;
+ std::vector<UsbMidiJack> jacks;
+ // Jack id=1 is found in a CS_ENDPOINT descriptor, but there is no definition
+ // for the jack.
+ uint8 data[] = {
+ 0x09, 0x04, 0x01, 0x00, 0x02, 0x01, 0x03, 0x00, 0x00, 0x07,
+ 0x24, 0x01, 0x00, 0x01, 0x07, 0x00, 0x05, 0x25, 0x01, 0x01,
+ 0x01,
+ };
+ EXPECT_FALSE(parser.Parse(NULL, data, arraysize(data), &jacks));
+ EXPECT_TRUE(jacks.empty());
+}
+
+TEST(UsbMidiDescriptorParserTest,
+ JacksShouldBeIgnoredWhenParserIsNotParsingMidiInterface) {
+ UsbMidiDescriptorParser parser;
+ std::vector<UsbMidiJack> jacks;
+ // a NON-MIDI INTERFACE descriptor followed by ENDPOINT and CS_ENDPOINT
+ // descriptors (Compare with the previous test case).
+ uint8 data[] = {
+ 0x09, 0x04, 0x01, 0x00, 0x02, 0x01, 0x02, 0x00, 0x00, 0x07,
+ 0x24, 0x01, 0x00, 0x01, 0x07, 0x00, 0x05, 0x25, 0x01, 0x01,
+ 0x01,
+ };
+ EXPECT_TRUE(parser.Parse(NULL, data, arraysize(data), &jacks));
+ EXPECT_TRUE(jacks.empty());
+}
+
+TEST(UsbMidiDescriptorParserTest, Parse) {
+ UsbMidiDescriptorParser parser;
+ std::vector<UsbMidiJack> jacks;
+ // A complete device descriptor.
+ uint8 data[] = {
+ 0x12, 0x01, 0x10, 0x01, 0x00, 0x00, 0x00, 0x08, 0x86, 0x1a,
+ 0x2d, 0x75, 0x54, 0x02, 0x00, 0x02, 0x00, 0x01, 0x09, 0x02,
+ 0x75, 0x00, 0x02, 0x01, 0x00, 0x80, 0x30, 0x09, 0x04, 0x00,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x09, 0x24, 0x01, 0x00,
+ 0x01, 0x09, 0x00, 0x01, 0x01, 0x09, 0x04, 0x01, 0x00, 0x02,
+ 0x01, 0x03, 0x00, 0x00, 0x07, 0x24, 0x01, 0x00, 0x01, 0x51,
+ 0x00, 0x06, 0x24, 0x02, 0x01, 0x02, 0x00, 0x06, 0x24, 0x02,
+ 0x01, 0x03, 0x00, 0x06, 0x24, 0x02, 0x02, 0x06, 0x00, 0x09,
+ 0x24, 0x03, 0x01, 0x07, 0x01, 0x06, 0x01, 0x00, 0x09, 0x24,
+ 0x03, 0x02, 0x04, 0x01, 0x02, 0x01, 0x00, 0x09, 0x24, 0x03,
+ 0x02, 0x05, 0x01, 0x03, 0x01, 0x00, 0x09, 0x05, 0x02, 0x02,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x06, 0x25, 0x01, 0x02, 0x02,
+ 0x03, 0x09, 0x05, 0x82, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x25, 0x01, 0x01, 0x07,
+ };
+ EXPECT_TRUE(parser.Parse(NULL, data, arraysize(data), &jacks));
+ ASSERT_EQ(3u, jacks.size());
+
+ EXPECT_EQ(2u, jacks[0].jack_id);
+ EXPECT_EQ(0u, jacks[0].cable_number);
+ EXPECT_EQ(2u, jacks[0].endpoint_number());
+ EXPECT_EQ(UsbMidiJack::DIRECTION_OUT, jacks[0].direction());
+ EXPECT_EQ(NULL, jacks[0].device);
+
+ EXPECT_EQ(3u, jacks[1].jack_id);
+ EXPECT_EQ(1u, jacks[1].cable_number);
+ EXPECT_EQ(2u, jacks[1].endpoint_number());
+ EXPECT_EQ(UsbMidiJack::DIRECTION_OUT, jacks[1].direction());
+ EXPECT_EQ(NULL, jacks[1].device);
+
+ EXPECT_EQ(7u, jacks[2].jack_id);
+ EXPECT_EQ(0u, jacks[2].cable_number);
+ EXPECT_EQ(2u, jacks[2].endpoint_number());
+ EXPECT_EQ(UsbMidiJack::DIRECTION_IN, jacks[2].direction());
+ EXPECT_EQ(NULL, jacks[2].device);
+}
+
+} // namespace
+
+} // namespace media
diff --git a/chromium/media/midi/usb_midi_device.h b/chromium/media/midi/usb_midi_device.h
new file mode 100644
index 00000000000..6af92b6ef6b
--- /dev/null
+++ b/chromium/media/midi/usb_midi_device.h
@@ -0,0 +1,71 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MIDI_USB_MIDI_DEVICE_H_
+#define MEDIA_MIDI_USB_MIDI_DEVICE_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/memory/scoped_vector.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class UsbMidiDevice;
+
+// Delegate class for UsbMidiDevice.
+// Each method is called when an corresponding event arrives at the device.
+class MEDIA_EXPORT UsbMidiDeviceDelegate {
+ public:
+ virtual ~UsbMidiDeviceDelegate() {}
+
+ // Called when USB-MIDI data arrives at |device|.
+ virtual void ReceiveUsbMidiData(UsbMidiDevice* device,
+ int endpoint_number,
+ const uint8* data,
+ size_t size,
+ base::TimeTicks time) = 0;
+};
+
+// UsbMidiDevice represents a USB-MIDI device.
+// This is an interface class and each platform-dependent implementation class
+// will be a derived class.
+class MEDIA_EXPORT UsbMidiDevice {
+ public:
+ typedef ScopedVector<UsbMidiDevice> Devices;
+
+ // Factory class for USB-MIDI devices.
+ // Each concrete implementation will find and create devices
+ // in platform-dependent way.
+ class Factory {
+ public:
+ typedef base::Callback<void(bool result, Devices* devices)> Callback;
+ virtual ~Factory() {}
+ // Enumerates devices.
+ // Devices that have no USB-MIDI interfaces can be omitted.
+ // When the operation succeeds, |callback| will be called with |true| and
+ // devices.
+ // Otherwise |callback| will be called with |false| and empty devices.
+ // When this factory is destroyed during the operation, the operation
+ // will be canceled silently (i.e. |callback| will not be called).
+ // This function can be called at most once per instance.
+ virtual void EnumerateDevices(UsbMidiDeviceDelegate* delegate,
+ Callback callback) = 0;
+ };
+
+ virtual ~UsbMidiDevice() {}
+
+ // Returns the descriptor of this device.
+ virtual std::vector<uint8> GetDescriptor() = 0;
+
+ // Sends |data| to the given USB endpoint of this device.
+ virtual void Send(int endpoint_number, const std::vector<uint8>& data) = 0;
+};
+
+} // namespace media
+
+#endif // MEDIA_MIDI_USB_MIDI_DEVICE_H_
diff --git a/chromium/media/midi/usb_midi_device_android.cc b/chromium/media/midi/usb_midi_device_android.cc
new file mode 100644
index 00000000000..8f93c3d47a0
--- /dev/null
+++ b/chromium/media/midi/usb_midi_device_android.cc
@@ -0,0 +1,66 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/usb_midi_device_android.h"
+
+#include <jni.h>
+#include <vector>
+
+#include "base/android/jni_array.h"
+#include "base/time/time.h"
+#include "jni/UsbMidiDeviceAndroid_jni.h"
+
+namespace media {
+
+UsbMidiDeviceAndroid::UsbMidiDeviceAndroid(ObjectRef raw_device,
+ UsbMidiDeviceDelegate* delegate)
+ : raw_device_(raw_device), delegate_(delegate) {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ Java_UsbMidiDeviceAndroid_registerSelf(
+ env, raw_device_.obj(), reinterpret_cast<jlong>(this));
+}
+
+UsbMidiDeviceAndroid::~UsbMidiDeviceAndroid() {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ Java_UsbMidiDeviceAndroid_close(env, raw_device_.obj());
+}
+
+std::vector<uint8> UsbMidiDeviceAndroid::GetDescriptor() {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ base::android::ScopedJavaLocalRef<jbyteArray> descriptors =
+ Java_UsbMidiDeviceAndroid_getDescriptors(env, raw_device_.obj());
+
+ std::vector<uint8> ret;
+ base::android::JavaByteArrayToByteVector(env, descriptors.obj(), &ret);
+ return ret;
+}
+
+void UsbMidiDeviceAndroid::Send(int endpoint_number,
+ const std::vector<uint8>& data) {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ const uint8* head = data.size() ? &data[0] : NULL;
+ ScopedJavaLocalRef<jbyteArray> data_to_pass =
+ base::android::ToJavaByteArray(env, head, data.size());
+
+ Java_UsbMidiDeviceAndroid_send(
+ env, raw_device_.obj(), endpoint_number, data_to_pass.obj());
+}
+
+void UsbMidiDeviceAndroid::OnData(JNIEnv* env,
+ jobject caller,
+ jint endpoint_number,
+ jbyteArray data) {
+ std::vector<uint8> bytes;
+ base::android::JavaByteArrayToByteVector(env, data, &bytes);
+
+ const uint8* head = bytes.size() ? &bytes[0] : NULL;
+ delegate_->ReceiveUsbMidiData(this, endpoint_number, head, bytes.size(),
+ base::TimeTicks::HighResNow());
+}
+
+bool UsbMidiDeviceAndroid::RegisterUsbMidiDevice(JNIEnv* env) {
+ return RegisterNativesImpl(env);
+}
+
+} // namespace media
diff --git a/chromium/media/midi/usb_midi_device_android.h b/chromium/media/midi/usb_midi_device_android.h
new file mode 100644
index 00000000000..70d1855fd2f
--- /dev/null
+++ b/chromium/media/midi/usb_midi_device_android.h
@@ -0,0 +1,51 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MIDI_USB_MIDI_DEVICE_ANDROID_H_
+#define MEDIA_MIDI_USB_MIDI_DEVICE_ANDROID_H_
+
+#include <jni.h>
+#include <vector>
+
+#include "base/android/scoped_java_ref.h"
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "media/base/media_export.h"
+#include "media/midi/usb_midi_device.h"
+
+namespace media {
+
+class MEDIA_EXPORT UsbMidiDeviceAndroid : public UsbMidiDevice {
+ public:
+ typedef base::android::ScopedJavaLocalRef<jobject> ObjectRef;
+
+ static scoped_ptr<Factory> CreateFactory();
+
+ UsbMidiDeviceAndroid(ObjectRef raw_device, UsbMidiDeviceDelegate* delegate);
+ virtual ~UsbMidiDeviceAndroid();
+
+ // UsbMidiDevice implementation.
+ virtual std::vector<uint8> GetDescriptor() OVERRIDE;
+ virtual void Send(int endpoint_number,
+ const std::vector<uint8>& data) OVERRIDE;
+
+ // Called by the Java world.
+ void OnData(JNIEnv* env,
+ jobject caller,
+ jint endpoint_number,
+ jbyteArray data);
+
+ static bool RegisterUsbMidiDevice(JNIEnv* env);
+
+ private:
+ // The actual device object.
+ base::android::ScopedJavaGlobalRef<jobject> raw_device_;
+ UsbMidiDeviceDelegate* delegate_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(UsbMidiDeviceAndroid);
+};
+
+} // namespace media
+
+#endif // MEDIA_MIDI_USB_MIDI_DEVICE_ANDROID_H_
diff --git a/chromium/media/midi/usb_midi_device_factory_android.cc b/chromium/media/midi/usb_midi_device_factory_android.cc
new file mode 100644
index 00000000000..081e6b2c2d2
--- /dev/null
+++ b/chromium/media/midi/usb_midi_device_factory_android.cc
@@ -0,0 +1,77 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/usb_midi_device_factory_android.h"
+
+#include <jni.h>
+#include <vector>
+
+#include "base/android/scoped_java_ref.h"
+#include "base/bind.h"
+#include "base/containers/hash_tables.h"
+#include "base/lazy_instance.h"
+#include "base/memory/scoped_vector.h"
+#include "base/message_loop/message_loop.h"
+#include "base/synchronization/lock.h"
+#include "jni/UsbMidiDeviceFactoryAndroid_jni.h"
+#include "media/midi/usb_midi_device_android.h"
+
+namespace media {
+
+namespace {
+
+typedef UsbMidiDevice::Factory::Callback Callback;
+
+} // namespace
+
+UsbMidiDeviceFactoryAndroid::UsbMidiDeviceFactoryAndroid() : delegate_(NULL) {}
+
+UsbMidiDeviceFactoryAndroid::~UsbMidiDeviceFactoryAndroid() {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ if (!raw_factory_.is_null())
+ Java_UsbMidiDeviceFactoryAndroid_close(env, raw_factory_.obj());
+}
+
+void UsbMidiDeviceFactoryAndroid::EnumerateDevices(
+ UsbMidiDeviceDelegate* delegate,
+ Callback callback) {
+ DCHECK(!delegate_);
+ JNIEnv* env = base::android::AttachCurrentThread();
+ uintptr_t pointer = reinterpret_cast<uintptr_t>(this);
+ raw_factory_.Reset(Java_UsbMidiDeviceFactoryAndroid_create(env, pointer));
+
+ delegate_ = delegate;
+ callback_ = callback;
+
+ if (Java_UsbMidiDeviceFactoryAndroid_enumerateDevices(
+ env, raw_factory_.obj(), base::android::GetApplicationContext())) {
+ // Asynchronous operation.
+ return;
+ }
+ // No devices are found.
+ ScopedVector<UsbMidiDevice> devices;
+ callback.Run(true, &devices);
+}
+
+// Called from the Java world.
+void UsbMidiDeviceFactoryAndroid::OnUsbMidiDeviceRequestDone(
+ JNIEnv* env,
+ jobject caller,
+ jobjectArray devices) {
+ size_t size = env->GetArrayLength(devices);
+ ScopedVector<UsbMidiDevice> devices_to_pass;
+ for (size_t i = 0; i < size; ++i) {
+ UsbMidiDeviceAndroid::ObjectRef raw_device(
+ env, env->GetObjectArrayElement(devices, i));
+ devices_to_pass.push_back(new UsbMidiDeviceAndroid(raw_device, delegate_));
+ }
+
+ callback_.Run(true, &devices_to_pass);
+}
+
+bool UsbMidiDeviceFactoryAndroid::RegisterUsbMidiDeviceFactory(JNIEnv* env) {
+ return RegisterNativesImpl(env);
+}
+
+} // namespace media
diff --git a/chromium/media/midi/usb_midi_device_factory_android.h b/chromium/media/midi/usb_midi_device_factory_android.h
new file mode 100644
index 00000000000..63a9eb32f68
--- /dev/null
+++ b/chromium/media/midi/usb_midi_device_factory_android.h
@@ -0,0 +1,48 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MIDI_USB_MIDI_DEVICE_FACTORY_ANDROID_H_
+#define MEDIA_MIDI_USB_MIDI_DEVICE_FACTORY_ANDROID_H_
+
+#include <jni.h>
+#include <vector>
+
+#include "base/android/scoped_java_ref.h"
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/memory/scoped_vector.h"
+#include "base/memory/weak_ptr.h"
+#include "media/base/media_export.h"
+#include "media/midi/usb_midi_device.h"
+
+namespace media {
+
+// This class enumerates UsbMidiDevices.
+class MEDIA_EXPORT UsbMidiDeviceFactoryAndroid : public UsbMidiDevice::Factory {
+ public:
+ UsbMidiDeviceFactoryAndroid();
+ virtual ~UsbMidiDeviceFactoryAndroid();
+
+ // UsbMidiDevice::Factory implementation.
+ virtual void EnumerateDevices(UsbMidiDeviceDelegate* delegate,
+ Callback callback) OVERRIDE;
+
+ void OnUsbMidiDeviceRequestDone(JNIEnv* env,
+ jobject caller,
+ jobjectArray devices);
+
+ static bool RegisterUsbMidiDeviceFactory(JNIEnv* env);
+
+ private:
+ base::android::ScopedJavaGlobalRef<jobject> raw_factory_;
+ // Not owned.
+ UsbMidiDeviceDelegate* delegate_;
+ Callback callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(UsbMidiDeviceFactoryAndroid);
+};
+
+} // namespace media
+
+#endif // MEDIA_MIDI_USB_MIDI_DEVICE_FACTORY_ANDROID_H_
diff --git a/chromium/media/midi/usb_midi_input_stream.cc b/chromium/media/midi/usb_midi_input_stream.cc
new file mode 100644
index 00000000000..a46596d0a86
--- /dev/null
+++ b/chromium/media/midi/usb_midi_input_stream.cc
@@ -0,0 +1,105 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/usb_midi_input_stream.h"
+
+#include <string.h>
+#include <map>
+#include <vector>
+
+#include "base/logging.h"
+#include "media/midi/usb_midi_device.h"
+#include "media/midi/usb_midi_jack.h"
+
+namespace media {
+
+UsbMidiInputStream::JackUniqueKey::JackUniqueKey(UsbMidiDevice* device,
+ int endpoint_number,
+ int cable_number)
+ : device(device),
+ endpoint_number(endpoint_number),
+ cable_number(cable_number) {}
+
+bool UsbMidiInputStream::JackUniqueKey::operator==(
+ const JackUniqueKey& that) const {
+ return device == that.device &&
+ endpoint_number == that.endpoint_number &&
+ cable_number == that.cable_number;
+}
+
+bool UsbMidiInputStream::JackUniqueKey::operator<(
+ const JackUniqueKey& that) const {
+ if (device != that.device)
+ return device < that.device;
+ if (endpoint_number != that.endpoint_number)
+ return endpoint_number < that.endpoint_number;
+ return cable_number < that.cable_number;
+}
+
+UsbMidiInputStream::UsbMidiInputStream(const std::vector<UsbMidiJack>& jacks,
+ Delegate* delegate)
+ : delegate_(delegate) {
+ for (size_t i = 0; i < jacks.size(); ++i) {
+ jack_dictionary_.insert(
+ std::make_pair(JackUniqueKey(jacks[i].device,
+ jacks[i].endpoint_number(),
+ jacks[i].cable_number),
+ i));
+ }
+}
+
+UsbMidiInputStream::~UsbMidiInputStream() {}
+
+void UsbMidiInputStream::OnReceivedData(UsbMidiDevice* device,
+ int endpoint_number,
+ const uint8* data,
+ size_t size,
+ base::TimeTicks time) {
+ DCHECK_EQ(0u, size % kPacketSize);
+ size_t current = 0;
+ while (current + kPacketSize <= size) {
+ ProcessOnePacket(device, endpoint_number, &data[current], time);
+ current += kPacketSize;
+ }
+}
+
+void UsbMidiInputStream::ProcessOnePacket(UsbMidiDevice* device,
+ int endpoint_number,
+ const uint8* packet,
+ base::TimeTicks time) {
+ // The first 4 bytes of the packet is accessible here.
+ uint8 code_index = packet[0] & 0x0f;
+ uint8 cable_number = packet[0] >> 4;
+ const size_t packet_size_table[16] = {
+ 0, 0, 2, 3, 3, 1, 2, 3, 3, 3, 3, 3, 2, 2, 3, 1,
+ };
+ size_t packet_size = packet_size_table[code_index];
+ if (packet_size == 0) {
+ // These CINs are reserved. Ignore them.
+ DVLOG(1) << "code index number (" << code_index << ") arrives "
+ << "but it is reserved.";
+ return;
+ }
+ std::map<JackUniqueKey, size_t>::const_iterator it =
+ jack_dictionary_.find(JackUniqueKey(device,
+ endpoint_number,
+ cable_number));
+ if (it != jack_dictionary_.end())
+ delegate_->OnReceivedData(it->second, &packet[1], packet_size, time);
+}
+
+std::vector<UsbMidiInputStream::JackUniqueKey>
+UsbMidiInputStream::RegisteredJackKeysForTesting() const {
+ std::vector<JackUniqueKey> result(jack_dictionary_.size(),
+ JackUniqueKey(0, 0, 0));
+ for (std::map<JackUniqueKey, size_t>::const_iterator it =
+ jack_dictionary_.begin();
+ it != jack_dictionary_.end(); ++it) {
+ DCHECK_LT(it->second, result.size());
+ result[it->second] = it->first;
+ }
+ return result;
+}
+
+} // namespace media
diff --git a/chromium/media/midi/usb_midi_input_stream.h b/chromium/media/midi/usb_midi_input_stream.h
new file mode 100644
index 00000000000..0841751f39c
--- /dev/null
+++ b/chromium/media/midi/usb_midi_input_stream.h
@@ -0,0 +1,84 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MIDI_USB_MIDI_INPUT_STREAM_H_
+#define MEDIA_MIDI_USB_MIDI_INPUT_STREAM_H_
+
+#include <map>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+#include "media/midi/usb_midi_jack.h"
+
+namespace media {
+
+class UsbMidiDevice;
+
+// UsbMidiInputStream converts USB-MIDI data to MIDI data.
+// See "USB Device Class Definition for MIDI Devices" Release 1.0,
+// Section 4 "USB-MIDI Event Packets" for details.
+class MEDIA_EXPORT UsbMidiInputStream {
+ public:
+ class MEDIA_EXPORT Delegate {
+ public:
+ virtual ~Delegate() {}
+ // This function is called when some data arrives to a USB-MIDI jack.
+ // An input USB-MIDI jack corresponds to an input MIDIPortInfo.
+ virtual void OnReceivedData(size_t jack_index,
+ const uint8* data,
+ size_t size,
+ base::TimeTicks time) = 0;
+ };
+
+ // This is public for testing.
+ struct JackUniqueKey {
+ JackUniqueKey(UsbMidiDevice* device, int endpoint_number, int cable_number);
+ bool operator==(const JackUniqueKey& that) const;
+ bool operator<(const JackUniqueKey& that) const;
+
+ UsbMidiDevice* device;
+ int endpoint_number;
+ int cable_number;
+ };
+
+ UsbMidiInputStream(const std::vector<UsbMidiJack>& jacks,
+ Delegate* delegate);
+ ~UsbMidiInputStream();
+
+ // This function should be called when some data arrives to a USB-MIDI
+ // endpoint. This function converts the data to MIDI data and call
+ // |delegate->OnReceivedData| with it.
+ // |size| must be a multiple of |kPacketSize|.
+ void OnReceivedData(UsbMidiDevice* device,
+ int endpoint_number,
+ const uint8* data,
+ size_t size,
+ base::TimeTicks time);
+
+ std::vector<JackUniqueKey> RegisteredJackKeysForTesting() const;
+
+ private:
+ static const size_t kPacketSize = 4;
+ // Processes a USB-MIDI Event Packet.
+ // The first |kPacketSize| bytes of |packet| must be accessible.
+ void ProcessOnePacket(UsbMidiDevice* device,
+ int endpoint_number,
+ const uint8* packet,
+ base::TimeTicks time);
+
+ // A map from UsbMidiJack to its index in |jacks_|.
+ std::map<JackUniqueKey, size_t> jack_dictionary_;
+
+ // Not owned
+ Delegate* delegate_;
+
+ DISALLOW_COPY_AND_ASSIGN(UsbMidiInputStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_MIDI_USB_MIDI_INPUT_STREAM_H_
diff --git a/chromium/media/midi/usb_midi_input_stream_unittest.cc b/chromium/media/midi/usb_midi_input_stream_unittest.cc
new file mode 100644
index 00000000000..7dadba21015
--- /dev/null
+++ b/chromium/media/midi/usb_midi_input_stream_unittest.cc
@@ -0,0 +1,178 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/usb_midi_input_stream.h"
+
+#include <string>
+#include <vector>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/stringprintf.h"
+#include "base/time/time.h"
+#include "media/midi/usb_midi_device.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::TimeTicks;
+
+namespace media {
+
+namespace {
+
+class TestUsbMidiDevice : public UsbMidiDevice {
+ public:
+ TestUsbMidiDevice() {}
+ virtual ~TestUsbMidiDevice() {}
+ virtual std::vector<uint8> GetDescriptor() OVERRIDE {
+ return std::vector<uint8>();
+ }
+ virtual void Send(int endpoint_number,
+ const std::vector<uint8>& data) OVERRIDE {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TestUsbMidiDevice);
+};
+
+class MockDelegate : public UsbMidiInputStream::Delegate {
+ public:
+ MockDelegate() {}
+ virtual ~MockDelegate() {}
+ virtual void OnReceivedData(size_t jack_index,
+ const uint8* data,
+ size_t size,
+ base::TimeTicks time) OVERRIDE {
+ for (size_t i = 0; i < size; ++i)
+ received_data_ += base::StringPrintf("0x%02x ", data[i]);
+ received_data_ += "\n";
+ }
+
+ const std::string& received_data() const { return received_data_; }
+
+ private:
+ std::string received_data_;
+ DISALLOW_COPY_AND_ASSIGN(MockDelegate);
+};
+
+class UsbMidiInputStreamTest : public ::testing::Test {
+ protected:
+ UsbMidiInputStreamTest() {
+ std::vector<UsbMidiJack> jacks;
+
+ jacks.push_back(UsbMidiJack(&device1_,
+ 84, // jack_id
+ 4, // cable_number
+ 135)); // endpoint_address
+ jacks.push_back(UsbMidiJack(&device2_,
+ 85,
+ 5,
+ 137));
+ jacks.push_back(UsbMidiJack(&device2_,
+ 84,
+ 4,
+ 135));
+ jacks.push_back(UsbMidiJack(&device1_,
+ 85,
+ 5,
+ 135));
+
+ stream_.reset(new UsbMidiInputStream(jacks, &delegate_));
+ }
+
+ TestUsbMidiDevice device1_;
+ TestUsbMidiDevice device2_;
+ MockDelegate delegate_;
+ scoped_ptr<UsbMidiInputStream> stream_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(UsbMidiInputStreamTest);
+};
+
+TEST_F(UsbMidiInputStreamTest, UnknownMessage) {
+ uint8 data[] = {
+ 0x40, 0xff, 0xff, 0xff,
+ 0x41, 0xff, 0xff, 0xff,
+ };
+
+ stream_->OnReceivedData(&device1_, 7, data, arraysize(data), TimeTicks());
+ EXPECT_EQ("", delegate_.received_data());
+}
+
+TEST_F(UsbMidiInputStreamTest, SystemCommonMessage) {
+ uint8 data[] = {
+ 0x45, 0xf8, 0x00, 0x00,
+ 0x42, 0xf3, 0x22, 0x00,
+ 0x43, 0xf2, 0x33, 0x44,
+ };
+
+ stream_->OnReceivedData(&device1_, 7, data, arraysize(data), TimeTicks());
+ EXPECT_EQ("0xf8 \n"
+ "0xf3 0x22 \n"
+ "0xf2 0x33 0x44 \n", delegate_.received_data());
+}
+
+TEST_F(UsbMidiInputStreamTest, SystemExclusiveMessage) {
+ uint8 data[] = {
+ 0x44, 0xf0, 0x11, 0x22,
+ 0x45, 0xf7, 0x00, 0x00,
+ 0x46, 0xf0, 0xf7, 0x00,
+ 0x47, 0xf0, 0x33, 0xf7,
+ };
+
+ stream_->OnReceivedData(&device1_, 7, data, arraysize(data), TimeTicks());
+ EXPECT_EQ("0xf0 0x11 0x22 \n"
+ "0xf7 \n"
+ "0xf0 0xf7 \n"
+ "0xf0 0x33 0xf7 \n", delegate_.received_data());
+}
+
+TEST_F(UsbMidiInputStreamTest, ChannelMessage) {
+ uint8 data[] = {
+ 0x48, 0x80, 0x11, 0x22,
+ 0x49, 0x90, 0x33, 0x44,
+ 0x4a, 0xa0, 0x55, 0x66,
+ 0x4b, 0xb0, 0x77, 0x88,
+ 0x4c, 0xc0, 0x99, 0x00,
+ 0x4d, 0xd0, 0xaa, 0x00,
+ 0x4e, 0xe0, 0xbb, 0xcc,
+ };
+
+ stream_->OnReceivedData(&device1_, 7, data, arraysize(data), TimeTicks());
+ EXPECT_EQ("0x80 0x11 0x22 \n"
+ "0x90 0x33 0x44 \n"
+ "0xa0 0x55 0x66 \n"
+ "0xb0 0x77 0x88 \n"
+ "0xc0 0x99 \n"
+ "0xd0 0xaa \n"
+ "0xe0 0xbb 0xcc \n", delegate_.received_data());
+}
+
+TEST_F(UsbMidiInputStreamTest, SingleByteMessage) {
+ uint8 data[] = {
+ 0x4f, 0xf8, 0x00, 0x00,
+ };
+
+ stream_->OnReceivedData(&device1_, 7, data, arraysize(data), TimeTicks());
+ EXPECT_EQ("0xf8 \n", delegate_.received_data());
+}
+
+TEST_F(UsbMidiInputStreamTest, DispatchForMultipleCables) {
+ uint8 data[] = {
+ 0x4f, 0xf8, 0x00, 0x00,
+ 0x5f, 0xfa, 0x00, 0x00,
+ 0x6f, 0xfb, 0x00, 0x00,
+ };
+
+ stream_->OnReceivedData(&device1_, 7, data, arraysize(data), TimeTicks());
+ EXPECT_EQ("0xf8 \n0xfa \n", delegate_.received_data());
+}
+
+TEST_F(UsbMidiInputStreamTest, DispatchForDevice2) {
+ uint8 data[] = { 0x4f, 0xf8, 0x00, 0x00 };
+
+ stream_->OnReceivedData(&device2_, 7, data, arraysize(data), TimeTicks());
+ EXPECT_EQ("0xf8 \n", delegate_.received_data());
+}
+
+} // namespace
+
+} // namespace media
diff --git a/chromium/media/midi/usb_midi_jack.h b/chromium/media/midi/usb_midi_jack.h
new file mode 100644
index 00000000000..271cad88e72
--- /dev/null
+++ b/chromium/media/midi/usb_midi_jack.h
@@ -0,0 +1,51 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MIDI_USB_MIDI_JACK_H_
+#define MEDIA_MIDI_USB_MIDI_JACK_H_
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class UsbMidiDevice;
+
+// UsbMidiJack represents an EMBEDDED MIDI jack.
+struct MEDIA_EXPORT UsbMidiJack {
+ // The direction of the endpoint associated with an EMBEDDED MIDI jack.
+ // Note that an IN MIDI jack associated with an OUT endpoint has
+ // ***DIRECTION_OUT*** direction.
+ enum Direction {
+ DIRECTION_IN,
+ DIRECTION_OUT,
+ };
+ UsbMidiJack(UsbMidiDevice* device,
+ uint8 jack_id,
+ uint8 cable_number,
+ uint8 endpoint_address)
+ : device(device),
+ jack_id(jack_id),
+ cable_number(cable_number),
+ endpoint_address(endpoint_address) {}
+ // Not owned
+ UsbMidiDevice* device;
+ // The id of this jack unique in the interface.
+ uint8 jack_id;
+ // The cable number of this jack in the associated endpoint.
+ uint8 cable_number;
+ // The address of the endpoint that this jack is associated with.
+ uint8 endpoint_address;
+
+ Direction direction() const {
+ return (endpoint_address & 0x80) ? DIRECTION_IN : DIRECTION_OUT;
+ }
+ uint8 endpoint_number() const {
+ return (endpoint_address & 0xf);
+ }
+};
+
+} // namespace media
+
+#endif // MEDIA_MIDI_USB_MIDI_JACK_H_
diff --git a/chromium/media/midi/usb_midi_output_stream.cc b/chromium/media/midi/usb_midi_output_stream.cc
new file mode 100644
index 00000000000..1aef2824676
--- /dev/null
+++ b/chromium/media/midi/usb_midi_output_stream.cc
@@ -0,0 +1,187 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/usb_midi_output_stream.h"
+
+#include "base/logging.h"
+#include "media/midi/midi_message_util.h"
+#include "media/midi/usb_midi_device.h"
+
+namespace media {
+
+UsbMidiOutputStream::UsbMidiOutputStream(const UsbMidiJack& jack)
+ : jack_(jack), pending_size_(0), is_sending_sysex_(false) {}
+
+void UsbMidiOutputStream::Send(const std::vector<uint8>& data) {
+ // To prevent link errors caused by DCHECK_*.
+ const size_t kPacketContentSize = UsbMidiOutputStream::kPacketContentSize;
+ DCHECK_LT(jack_.cable_number, 16u);
+
+ std::vector<uint8> data_to_send;
+ size_t current = 0;
+ size_t size = GetSize(data);
+ while (current < size) {
+ uint8 first_byte = Get(data, current);
+ if (first_byte == kSysExByte || is_sending_sysex_) {
+ // System Exclusive messages
+ if (!PushSysExMessage(data, &current, &data_to_send))
+ break;
+ } else if ((first_byte & kSysMessageBitMask) == kSysMessageBitPattern) {
+ if (first_byte & 0x08) {
+ // System Real-Time messages
+ PushSysRTMessage(data, &current, &data_to_send);
+ } else {
+ // System Common messages
+ if (!PushSysCommonMessage(data, &current, &data_to_send))
+ break;
+ }
+ } else if (first_byte & 0x80) {
+ if (!PushChannelMessage(data, &current, &data_to_send))
+ break;
+ } else {
+ // Unknown messages
+ DVLOG(1) << "Unknown byte: " << static_cast<unsigned int>(first_byte);
+ ++current;
+ }
+ }
+
+ if (data_to_send.size() > 0)
+ jack_.device->Send(jack_.endpoint_number(), data_to_send);
+
+ DCHECK_LE(current, size);
+ DCHECK_LE(size - current, kPacketContentSize);
+ // Note that this can be a self-copying and the iteration order is important.
+ for (size_t i = current; i < size; ++i)
+ pending_data_[i - current] = Get(data, i);
+ pending_size_ = size - current;
+}
+
+size_t UsbMidiOutputStream::GetSize(const std::vector<uint8>& data) const {
+ return data.size() + pending_size_;
+}
+
+uint8_t UsbMidiOutputStream::Get(const std::vector<uint8>& data,
+ size_t index) const {
+ DCHECK_LT(index, GetSize(data));
+ if (index < pending_size_)
+ return pending_data_[index];
+ return data[index - pending_size_];
+}
+
+bool UsbMidiOutputStream::PushSysExMessage(const std::vector<uint8>& data,
+ size_t* current,
+ std::vector<uint8>* data_to_send) {
+ size_t index = *current;
+ size_t message_size = 0;
+ const size_t kMessageSizeMax = 3;
+ uint8 message[kMessageSizeMax] = {};
+
+ while (index < GetSize(data)) {
+ if (message_size == kMessageSizeMax) {
+ // We can't find the end-of-message mark in the three bytes.
+ *current = index;
+ data_to_send->push_back((jack_.cable_number << 4) | 0x4);
+ data_to_send->insert(data_to_send->end(),
+ message,
+ message + arraysize(message));
+ is_sending_sysex_ = true;
+ return true;
+ }
+ uint8 byte = Get(data, index);
+ if ((byte & kSysRTMessageBitMask) == kSysRTMessageBitPattern) {
+ // System Real-Time messages interleaved in a SysEx message
+ PushSysRTMessage(data, &index, data_to_send);
+ continue;
+ }
+
+ message[message_size] = byte;
+ ++message_size;
+ if (byte == kEndOfSysExByte) {
+ uint8 code_index = message_size + 0x4;
+ DCHECK(code_index == 0x5 || code_index == 0x6 || code_index == 0x7);
+ data_to_send->push_back((jack_.cable_number << 4) | code_index);
+ data_to_send->insert(data_to_send->end(),
+ message,
+ message + arraysize(message));
+ *current = index + 1;
+ is_sending_sysex_ = false;
+ return true;
+ }
+ ++index;
+ }
+ return false;
+}
+
+bool UsbMidiOutputStream::PushSysCommonMessage(
+ const std::vector<uint8>& data,
+ size_t* current,
+ std::vector<uint8>* data_to_send) {
+ size_t index = *current;
+ uint8 first_byte = Get(data, index);
+ DCHECK_LE(0xf1, first_byte);
+ DCHECK_LE(first_byte, 0xf7);
+ const size_t message_size_table[8] = {
+ 0, 2, 3, 2, 1, 1, 1, 0,
+ };
+ size_t message_size = message_size_table[first_byte & 0x0f];
+ DCHECK_NE(0u, message_size);
+ DCHECK_LE(message_size, 3u);
+
+ if (GetSize(data) < index + message_size) {
+ // The message is incomplete.
+ return false;
+ }
+
+ uint8 code_index = message_size == 1 ? 0x5 : static_cast<uint8>(message_size);
+ data_to_send->push_back((jack_.cable_number << 4) | code_index);
+ for (size_t i = index; i < index + 3; ++i)
+ data_to_send->push_back(i < index + message_size ? Get(data, i) : 0);
+ *current += message_size;
+ return true;
+}
+
+void UsbMidiOutputStream::PushSysRTMessage(const std::vector<uint8>& data,
+ size_t* current,
+ std::vector<uint8>* data_to_send) {
+ size_t index = *current;
+ uint8 first_byte = Get(data, index);
+ DCHECK_LE(0xf8, first_byte);
+ DCHECK_LE(first_byte, 0xff);
+
+ data_to_send->push_back((jack_.cable_number << 4) | 0x5);
+ data_to_send->push_back(first_byte);
+ data_to_send->push_back(0);
+ data_to_send->push_back(0);
+ *current += 1;
+}
+
+bool UsbMidiOutputStream::PushChannelMessage(const std::vector<uint8>& data,
+ size_t* current,
+ std::vector<uint8>* data_to_send) {
+ size_t index = *current;
+ uint8 first_byte = Get(data, index);
+ DCHECK_LE(0x80, (first_byte & 0xf0));
+ DCHECK_LE((first_byte & 0xf0), 0xe0);
+
+ const size_t message_size_table[8] = {
+ 3, 3, 3, 3, 2, 3, 3, 0,
+ };
+ uint8 code_index = first_byte >> 4;
+ size_t message_size = message_size_table[code_index & 0x7];
+ DCHECK_NE(0u, message_size);
+ DCHECK_LE(message_size, 3u);
+
+ if (GetSize(data) < index + message_size) {
+ // The message is incomplete.
+ return false;
+ }
+
+ data_to_send->push_back((jack_.cable_number << 4) | code_index);
+ for (size_t i = index; i < index + 3; ++i)
+ data_to_send->push_back(i < index + message_size ? Get(data, i) : 0);
+ *current += message_size;
+ return true;
+}
+
+} // namespace media
diff --git a/chromium/media/midi/usb_midi_output_stream.h b/chromium/media/midi/usb_midi_output_stream.h
new file mode 100644
index 00000000000..1d98d584fd0
--- /dev/null
+++ b/chromium/media/midi/usb_midi_output_stream.h
@@ -0,0 +1,57 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MIDI_USB_MIDI_OUTPUT_STREAM_H_
+#define MEDIA_MIDI_USB_MIDI_OUTPUT_STREAM_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+#include "media/midi/usb_midi_jack.h"
+
+namespace media {
+
+// UsbMidiOutputStream converts MIDI data to USB-MIDI data.
+// See "USB Device Class Definition for MIDI Devices" Release 1.0,
+// Section 4 "USB-MIDI Event Packets" for details.
+class MEDIA_EXPORT UsbMidiOutputStream {
+ public:
+ explicit UsbMidiOutputStream(const UsbMidiJack& jack);
+
+ // Converts |data| to USB-MIDI data and send it to the jack.
+ void Send(const std::vector<uint8>& data);
+
+ const UsbMidiJack& jack() const { return jack_; }
+
+ private:
+ size_t GetSize(const std::vector<uint8>& data) const;
+ uint8_t Get(const std::vector<uint8>& data, size_t index) const;
+
+ bool PushSysExMessage(const std::vector<uint8>& data,
+ size_t* current,
+ std::vector<uint8>* data_to_send);
+ bool PushSysCommonMessage(const std::vector<uint8>& data,
+ size_t* current,
+ std::vector<uint8>* data_to_send);
+ void PushSysRTMessage(const std::vector<uint8>& data,
+ size_t* current,
+ std::vector<uint8>* data_to_send);
+ bool PushChannelMessage(const std::vector<uint8>& data,
+ size_t* current,
+ std::vector<uint8>* data_to_send);
+
+ static const size_t kPacketContentSize = 3;
+
+ UsbMidiJack jack_;
+ size_t pending_size_;
+ uint8 pending_data_[kPacketContentSize];
+ bool is_sending_sysex_;
+
+ DISALLOW_COPY_AND_ASSIGN(UsbMidiOutputStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_MIDI_USB_MIDI_OUTPUT_STREAM_H_
diff --git a/chromium/media/midi/usb_midi_output_stream_unittest.cc b/chromium/media/midi/usb_midi_output_stream_unittest.cc
new file mode 100644
index 00000000000..661d611957d
--- /dev/null
+++ b/chromium/media/midi/usb_midi_output_stream_unittest.cc
@@ -0,0 +1,276 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/usb_midi_output_stream.h"
+
+#include <string>
+#include <vector>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/stringprintf.h"
+#include "media/midi/usb_midi_device.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+namespace {
+
+template<typename T, size_t N>
+std::vector<T> ToVector(const T((&array)[N])) {
+ return std::vector<T>(array, array + N);
+}
+
+class MockUsbMidiDevice : public UsbMidiDevice {
+ public:
+ MockUsbMidiDevice() {}
+ virtual ~MockUsbMidiDevice() {}
+
+ virtual std::vector<uint8> GetDescriptor() OVERRIDE {
+ return std::vector<uint8>();
+ }
+
+ virtual void Send(int endpoint_number, const std::vector<uint8>& data)
+ OVERRIDE {
+ for (size_t i = 0; i < data.size(); ++i) {
+ log_ += base::StringPrintf("0x%02x ", data[i]);
+ }
+ log_ += base::StringPrintf("(endpoint = %d)\n", endpoint_number);
+ }
+
+ const std::string& log() const { return log_; }
+
+ void ClearLog() { log_ = ""; }
+
+ private:
+ std::string log_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockUsbMidiDevice);
+};
+
+class UsbMidiOutputStreamTest : public ::testing::Test {
+ protected:
+ UsbMidiOutputStreamTest() {
+ UsbMidiJack jack(&device_, 1, 2, 4);
+ stream_.reset(new UsbMidiOutputStream(jack));
+ }
+
+ MockUsbMidiDevice device_;
+ scoped_ptr<UsbMidiOutputStream> stream_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(UsbMidiOutputStreamTest);
+};
+
+TEST_F(UsbMidiOutputStreamTest, SendEmpty) {
+ stream_->Send(std::vector<uint8>());
+
+ EXPECT_EQ("", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendNoteOn) {
+ uint8 data[] = { 0x90, 0x45, 0x7f};
+
+ stream_->Send(ToVector(data));
+ EXPECT_EQ("0x29 0x90 0x45 0x7f (endpoint = 4)\n", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendNoteOnPending) {
+ stream_->Send(std::vector<uint8>(1, 0x90));
+ stream_->Send(std::vector<uint8>(1, 0x45));
+ EXPECT_EQ("", device_.log());
+
+ stream_->Send(std::vector<uint8>(1, 0x7f));
+ EXPECT_EQ("0x29 0x90 0x45 0x7f (endpoint = 4)\n", device_.log());
+ device_.ClearLog();
+
+ stream_->Send(std::vector<uint8>(1, 0x90));
+ stream_->Send(std::vector<uint8>(1, 0x45));
+ EXPECT_EQ("", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendNoteOnBurst) {
+ uint8 data1[] = { 0x90, };
+ uint8 data2[] = { 0x45, 0x7f, 0x90, 0x45, 0x71, 0x90, 0x45, 0x72, 0x90, };
+
+ stream_->Send(ToVector(data1));
+ stream_->Send(ToVector(data2));
+ EXPECT_EQ("0x29 0x90 0x45 0x7f "
+ "0x29 0x90 0x45 0x71 "
+ "0x29 0x90 0x45 0x72 (endpoint = 4)\n", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendNoteOff) {
+ uint8 data[] = { 0x80, 0x33, 0x44, };
+
+ stream_->Send(ToVector(data));
+ EXPECT_EQ("0x28 0x80 0x33 0x44 (endpoint = 4)\n", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendPolyphonicKeyPress) {
+ uint8 data[] = { 0xa0, 0x33, 0x44, };
+
+ stream_->Send(ToVector(data));
+ EXPECT_EQ("0x2a 0xa0 0x33 0x44 (endpoint = 4)\n", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendControlChange) {
+ uint8 data[] = { 0xb7, 0x33, 0x44, };
+
+ stream_->Send(ToVector(data));
+ EXPECT_EQ("0x2b 0xb7 0x33 0x44 (endpoint = 4)\n", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendProgramChange) {
+ uint8 data[] = { 0xc2, 0x33, };
+
+ stream_->Send(ToVector(data));
+ EXPECT_EQ("0x2c 0xc2 0x33 0x00 (endpoint = 4)\n", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendChannelPressure) {
+ uint8 data[] = { 0xd1, 0x33, 0x44, };
+
+ stream_->Send(ToVector(data));
+ EXPECT_EQ("0x2d 0xd1 0x33 0x44 (endpoint = 4)\n", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendPitchWheelChange) {
+ uint8 data[] = { 0xe4, 0x33, 0x44, };
+
+ stream_->Send(ToVector(data));
+ EXPECT_EQ("0x2e 0xe4 0x33 0x44 (endpoint = 4)\n", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendTwoByteSysEx) {
+ uint8 data[] = { 0xf0, 0xf7, };
+
+ stream_->Send(ToVector(data));
+ EXPECT_EQ("0x26 0xf0 0xf7 0x00 (endpoint = 4)\n", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendThreeByteSysEx) {
+ uint8 data[] = { 0xf0, 0x4f, 0xf7, };
+
+ stream_->Send(ToVector(data));
+ EXPECT_EQ("0x27 0xf0 0x4f 0xf7 (endpoint = 4)\n", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendFourByteSysEx) {
+ uint8 data[] = { 0xf0, 0x00, 0x01, 0xf7, };
+
+ stream_->Send(ToVector(data));
+ EXPECT_EQ("0x24 0xf0 0x00 0x01 "
+ "0x25 0xf7 0x00 0x00 (endpoint = 4)\n", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendFiveByteSysEx) {
+ uint8 data[] = { 0xf0, 0x00, 0x01, 0x02, 0xf7, };
+
+ stream_->Send(ToVector(data));
+ EXPECT_EQ("0x24 0xf0 0x00 0x01 "
+ "0x26 0x02 0xf7 0x00 (endpoint = 4)\n", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendSixByteSysEx) {
+ uint8 data[] = { 0xf0, 0x00, 0x01, 0x02, 0x03, 0xf7, };
+
+ stream_->Send(ToVector(data));
+ EXPECT_EQ("0x24 0xf0 0x00 0x01 "
+ "0x27 0x02 0x03 0xf7 (endpoint = 4)\n", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendPendingSysEx) {
+ uint8 data1[] = { 0xf0, 0x33, };
+ uint8 data2[] = { 0x44, 0x55, 0x66, };
+ uint8 data3[] = { 0x77, 0x88, 0x99, 0xf7, };
+
+ stream_->Send(ToVector(data1));
+ EXPECT_EQ("", device_.log());
+
+ stream_->Send(ToVector(data2));
+ EXPECT_EQ("0x24 0xf0 0x33 0x44 (endpoint = 4)\n", device_.log());
+ device_.ClearLog();
+
+ stream_->Send(ToVector(data3));
+ EXPECT_EQ("0x24 0x55 0x66 0x77 0x27 0x88 0x99 0xf7 (endpoint = 4)\n",
+ device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendNoteOnAfterSysEx) {
+ uint8 data[] = { 0xf0, 0x00, 0x01, 0x02, 0x03, 0xf7, 0x90, 0x44, 0x33, };
+
+ stream_->Send(ToVector(data));
+ EXPECT_EQ("0x24 0xf0 0x00 0x01 "
+ "0x27 0x02 0x03 0xf7 "
+ "0x29 0x90 0x44 0x33 (endpoint = 4)\n", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendTimeCodeQuarterFrame) {
+ uint8 data[] = { 0xf1, 0x22, };
+
+ stream_->Send(ToVector(data));
+ EXPECT_EQ("0x22 0xf1 0x22 0x00 (endpoint = 4)\n", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendSongPositionPointer) {
+ uint8 data[] = { 0xf2, 0x22, 0x33, };
+
+ stream_->Send(ToVector(data));
+ EXPECT_EQ("0x23 0xf2 0x22 0x33 (endpoint = 4)\n", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendSongSelect) {
+ uint8 data[] = { 0xf3, 0x22, };
+
+ stream_->Send(ToVector(data));
+ EXPECT_EQ("0x22 0xf3 0x22 0x00 (endpoint = 4)\n", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, TuneRequest) {
+ uint8 data[] = { 0xf6, };
+
+ stream_->Send(ToVector(data));
+ EXPECT_EQ("0x25 0xf6 0x00 0x00 (endpoint = 4)\n", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendSongPositionPointerPending) {
+ uint8 data1[] = { 0xf2, 0x22, };
+ uint8 data2[] = { 0x33, };
+
+ stream_->Send(ToVector(data1));
+ EXPECT_EQ("", device_.log());
+
+ stream_->Send(ToVector(data2));
+ EXPECT_EQ("0x23 0xf2 0x22 0x33 (endpoint = 4)\n", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendRealTimeMessages) {
+ uint8 data[] = { 0xf8, 0xfa, 0xfb, 0xfc, 0xfe, 0xff, };
+
+ stream_->Send(ToVector(data));
+ EXPECT_EQ("0x25 0xf8 0x00 0x00 "
+ "0x25 0xfa 0x00 0x00 "
+ "0x25 0xfb 0x00 0x00 "
+ "0x25 0xfc 0x00 0x00 "
+ "0x25 0xfe 0x00 0x00 "
+ "0x25 0xff 0x00 0x00 (endpoint = 4)\n", device_.log());
+}
+
+TEST_F(UsbMidiOutputStreamTest, SendRealTimeInSysExMessage) {
+ uint8 data[] = {
+ 0xf0, 0x00, 0x01, 0x02,
+ 0xf8, 0xfa,
+ 0x03, 0xf7,
+ };
+
+ stream_->Send(ToVector(data));
+ EXPECT_EQ("0x24 0xf0 0x00 0x01 "
+ "0x25 0xf8 0x00 0x00 "
+ "0x25 0xfa 0x00 0x00 "
+ "0x27 0x02 0x03 0xf7 (endpoint = 4)\n", device_.log());
+}
+
+} // namespace
+
+} // namespace media
diff --git a/chromium/media/mp2t/es_parser_h264.cc b/chromium/media/mp2t/es_parser_h264.cc
deleted file mode 100644
index 30764c91e6d..00000000000
--- a/chromium/media/mp2t/es_parser_h264.cc
+++ /dev/null
@@ -1,505 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/mp2t/es_parser_h264.h"
-
-#include "base/basictypes.h"
-#include "base/logging.h"
-#include "media/base/bit_reader.h"
-#include "media/base/buffers.h"
-#include "media/base/stream_parser_buffer.h"
-#include "media/base/video_frame.h"
-#include "media/mp2t/mp2t_common.h"
-#include "ui/gfx/rect.h"
-#include "ui/gfx/size.h"
-
-static const int kExtendedSar = 255;
-
-// ISO 14496 part 10
-// VUI parameters: Table E-1 "Meaning of sample aspect ratio indicator"
-static const int kTableSarWidth[14] = {
- 0, 1, 12, 10, 16, 40, 24, 20, 32, 80, 18, 15, 64, 160
-};
-
-static const int kTableSarHeight[14] = {
- 0, 1, 11, 11, 11, 33, 11, 11, 11, 33, 11, 11, 33, 99
-};
-
-// Remove the start code emulation prevention ( 0x000003 )
-// and return the size of the converted buffer.
-// Note: Size of |buf_rbsp| should be at least |size| to accomodate
-// the worst case.
-static int ConvertToRbsp(const uint8* buf, int size, uint8* buf_rbsp) {
- int rbsp_size = 0;
- int zero_count = 0;
- for (int k = 0; k < size; k++) {
- if (buf[k] == 0x3 && zero_count >= 2) {
- zero_count = 0;
- continue;
- }
- if (buf[k] == 0)
- zero_count++;
- else
- zero_count = 0;
- buf_rbsp[rbsp_size++] = buf[k];
- }
- return rbsp_size;
-}
-
-namespace media {
-namespace mp2t {
-
-// ISO 14496 - Part 10: Table 7-1 "NAL unit type codes"
-enum NalUnitType {
- kNalUnitTypeNonIdrSlice = 1,
- kNalUnitTypeIdrSlice = 5,
- kNalUnitTypeSPS = 7,
- kNalUnitTypePPS = 8,
- kNalUnitTypeAUD = 9,
-};
-
-class BitReaderH264 : public BitReader {
- public:
- BitReaderH264(const uint8* data, off_t size)
- : BitReader(data, size) { }
-
- // Read an unsigned exp-golomb value.
- // Return true if successful.
- bool ReadBitsExpGolomb(uint32* exp_golomb_value);
-};
-
-bool BitReaderH264::ReadBitsExpGolomb(uint32* exp_golomb_value) {
- // Get the number of leading zeros.
- int zero_count = 0;
- while (true) {
- int one_bit;
- RCHECK(ReadBits(1, &one_bit));
- if (one_bit != 0)
- break;
- zero_count++;
- }
-
- // If zero_count is greater than 31, the calculated value will overflow.
- if (zero_count > 31) {
- SkipBits(zero_count);
- return false;
- }
-
- // Read the actual value.
- uint32 base = (1 << zero_count) - 1;
- uint32 offset;
- RCHECK(ReadBits(zero_count, &offset));
- *exp_golomb_value = base + offset;
-
- return true;
-}
-
-EsParserH264::EsParserH264(
- const NewVideoConfigCB& new_video_config_cb,
- const EmitBufferCB& emit_buffer_cb)
- : new_video_config_cb_(new_video_config_cb),
- emit_buffer_cb_(emit_buffer_cb),
- es_pos_(0),
- current_nal_pos_(-1),
- current_access_unit_pos_(-1),
- is_key_frame_(false) {
-}
-
-EsParserH264::~EsParserH264() {
-}
-
-bool EsParserH264::Parse(const uint8* buf, int size,
- base::TimeDelta pts,
- base::TimeDelta dts) {
- // Note: Parse is invoked each time a PES packet has been reassembled.
- // Unfortunately, a PES packet does not necessarily map
- // to an h264 access unit, although the HLS recommendation is to use one PES
- // for each access unit (but this is just a recommendation and some streams
- // do not comply with this recommendation).
-
- // Link position |raw_es_size| in the ES stream with a timing descriptor.
- // HLS recommendation: "In AVC video, you should have both a DTS and a
- // PTS in each PES header".
- if (dts == kNoTimestamp() && pts == kNoTimestamp()) {
- DVLOG(1) << "A timestamp must be provided for each reassembled PES";
- return false;
- }
- TimingDesc timing_desc;
- timing_desc.pts = pts;
- timing_desc.dts = (dts != kNoTimestamp()) ? dts : pts;
-
- int raw_es_size;
- const uint8* raw_es;
- es_byte_queue_.Peek(&raw_es, &raw_es_size);
- timing_desc_list_.push_back(
- std::pair<int, TimingDesc>(raw_es_size, timing_desc));
-
- // Add the incoming bytes to the ES queue.
- es_byte_queue_.Push(buf, size);
-
- // Add NALs from the incoming buffer.
- if (!ParseInternal())
- return false;
-
- // Discard emitted frames
- // or every byte that was parsed so far if there is no current frame.
- int skip_count =
- (current_access_unit_pos_ >= 0) ? current_access_unit_pos_ : es_pos_;
- DiscardEs(skip_count);
-
- return true;
-}
-
-void EsParserH264::Flush() {
- if (current_access_unit_pos_ < 0)
- return;
-
- // Force emitting the last access unit.
- int next_aud_pos;
- const uint8* raw_es;
- es_byte_queue_.Peek(&raw_es, &next_aud_pos);
- EmitFrameIfNeeded(next_aud_pos);
- current_nal_pos_ = -1;
- StartFrame(-1);
-
- // Discard the emitted frame.
- DiscardEs(next_aud_pos);
-}
-
-void EsParserH264::Reset() {
- DVLOG(1) << "EsParserH264::Reset";
- es_byte_queue_.Reset();
- timing_desc_list_.clear();
- es_pos_ = 0;
- current_nal_pos_ = -1;
- StartFrame(-1);
- last_video_decoder_config_ = VideoDecoderConfig();
-}
-
-bool EsParserH264::ParseInternal() {
- int raw_es_size;
- const uint8* raw_es;
- es_byte_queue_.Peek(&raw_es, &raw_es_size);
-
- DCHECK_GE(es_pos_, 0);
- DCHECK_LT(es_pos_, raw_es_size);
-
- // Resume h264 es parsing where it was left.
- for ( ; es_pos_ < raw_es_size - 4; es_pos_++) {
- // Make sure the syncword is either 00 00 00 01 or 00 00 01
- if (raw_es[es_pos_ + 0] != 0 || raw_es[es_pos_ + 1] != 0)
- continue;
- int syncword_length = 0;
- if (raw_es[es_pos_ + 2] == 0 && raw_es[es_pos_ + 3] == 1)
- syncword_length = 4;
- else if (raw_es[es_pos_ + 2] == 1)
- syncword_length = 3;
- else
- continue;
-
- // Parse the current NAL (and the new NAL then becomes the current one).
- if (current_nal_pos_ >= 0) {
- int nal_size = es_pos_ - current_nal_pos_;
- DCHECK_GT(nal_size, 0);
- RCHECK(NalParser(&raw_es[current_nal_pos_], nal_size));
- }
- current_nal_pos_ = es_pos_ + syncword_length;
-
- // Retrieve the NAL type.
- int nal_header = raw_es[current_nal_pos_];
- int forbidden_zero_bit = (nal_header >> 7) & 0x1;
- RCHECK(forbidden_zero_bit == 0);
- NalUnitType nal_unit_type = static_cast<NalUnitType>(nal_header & 0x1f);
- DVLOG(LOG_LEVEL_ES) << "nal: offset=" << es_pos_
- << " type=" << nal_unit_type;
-
- // Emit a frame if needed.
- if (nal_unit_type == kNalUnitTypeAUD)
- EmitFrameIfNeeded(es_pos_);
-
- // Skip the syncword.
- es_pos_ += syncword_length;
- }
-
- return true;
-}
-
-void EsParserH264::EmitFrameIfNeeded(int next_aud_pos) {
- // There is no current frame: start a new frame.
- if (current_access_unit_pos_ < 0) {
- StartFrame(next_aud_pos);
- return;
- }
-
- // Get the access unit timing info.
- TimingDesc current_timing_desc;
- while (!timing_desc_list_.empty() &&
- timing_desc_list_.front().first <= current_access_unit_pos_) {
- current_timing_desc = timing_desc_list_.front().second;
- timing_desc_list_.pop_front();
- }
-
- // Emit a frame.
- int raw_es_size;
- const uint8* raw_es;
- es_byte_queue_.Peek(&raw_es, &raw_es_size);
- int access_unit_size = next_aud_pos - current_access_unit_pos_;
- scoped_refptr<StreamParserBuffer> stream_parser_buffer =
- StreamParserBuffer::CopyFrom(
- &raw_es[current_access_unit_pos_],
- access_unit_size,
- is_key_frame_);
- stream_parser_buffer->SetDecodeTimestamp(current_timing_desc.dts);
- stream_parser_buffer->set_timestamp(current_timing_desc.pts);
- emit_buffer_cb_.Run(stream_parser_buffer);
-
- // Set the current frame position to the next AUD position.
- StartFrame(next_aud_pos);
-}
-
-void EsParserH264::StartFrame(int aud_pos) {
- // Two cases:
- // - if aud_pos < 0, clear the current frame and set |is_key_frame| to a
- // default value (false).
- // - if aud_pos >= 0, start a new frame and set |is_key_frame| to true
- // |is_key_frame_| will be updated while parsing the NALs of that frame.
- // If any NAL is a non IDR NAL, it will be set to false.
- current_access_unit_pos_ = aud_pos;
- is_key_frame_ = (aud_pos >= 0);
-}
-
-void EsParserH264::DiscardEs(int nbytes) {
- DCHECK_GE(nbytes, 0);
- if (nbytes == 0)
- return;
-
- // Update the position of
- // - the parser,
- // - the current NAL,
- // - the current access unit.
- es_pos_ -= nbytes;
- if (es_pos_ < 0)
- es_pos_ = 0;
-
- if (current_nal_pos_ >= 0) {
- DCHECK_GE(current_nal_pos_, nbytes);
- current_nal_pos_ -= nbytes;
- }
- if (current_access_unit_pos_ >= 0) {
- DCHECK_GE(current_access_unit_pos_, nbytes);
- current_access_unit_pos_ -= nbytes;
- }
-
- // Update the timing information accordingly.
- std::list<std::pair<int, TimingDesc> >::iterator timing_it
- = timing_desc_list_.begin();
- for (; timing_it != timing_desc_list_.end(); ++timing_it)
- timing_it->first -= nbytes;
-
- // Discard |nbytes| of ES.
- es_byte_queue_.Pop(nbytes);
-}
-
-bool EsParserH264::NalParser(const uint8* buf, int size) {
- // Get the NAL header.
- if (size < 1) {
- DVLOG(1) << "NalParser: incomplete NAL";
- return false;
- }
- int nal_header = buf[0];
- buf += 1;
- size -= 1;
-
- int forbidden_zero_bit = (nal_header >> 7) & 0x1;
- if (forbidden_zero_bit != 0)
- return false;
- int nal_ref_idc = (nal_header >> 5) & 0x3;
- int nal_unit_type = nal_header & 0x1f;
-
- // Process the NAL content.
- switch (nal_unit_type) {
- case kNalUnitTypeSPS:
- DVLOG(LOG_LEVEL_ES) << "NAL: SPS";
- // |nal_ref_idc| should not be 0 for a SPS.
- if (nal_ref_idc == 0)
- return false;
- return ProcessSPS(buf, size);
- case kNalUnitTypeIdrSlice:
- DVLOG(LOG_LEVEL_ES) << "NAL: IDR slice";
- return true;
- case kNalUnitTypeNonIdrSlice:
- DVLOG(LOG_LEVEL_ES) << "NAL: Non IDR slice";
- is_key_frame_ = false;
- return true;
- case kNalUnitTypePPS:
- DVLOG(LOG_LEVEL_ES) << "NAL: PPS";
- return true;
- case kNalUnitTypeAUD:
- DVLOG(LOG_LEVEL_ES) << "NAL: AUD";
- return true;
- default:
- DVLOG(LOG_LEVEL_ES) << "NAL: " << nal_unit_type;
- return true;
- }
-
- NOTREACHED();
- return false;
-}
-
-bool EsParserH264::ProcessSPS(const uint8* buf, int size) {
- if (size <= 0)
- return false;
-
- // Removes start code emulation prevention.
- // TODO(damienv): refactoring in media/base
- // so as to have a unique H264 bit reader in Chrome.
- scoped_ptr<uint8[]> buf_rbsp(new uint8[size]);
- int rbsp_size = ConvertToRbsp(buf, size, buf_rbsp.get());
-
- BitReaderH264 bit_reader(buf_rbsp.get(), rbsp_size);
-
- int profile_idc;
- int constraint_setX_flag;
- int level_idc;
- uint32 seq_parameter_set_id;
- uint32 log2_max_frame_num_minus4;
- uint32 pic_order_cnt_type;
- RCHECK(bit_reader.ReadBits(8, &profile_idc));
- RCHECK(bit_reader.ReadBits(8, &constraint_setX_flag));
- RCHECK(bit_reader.ReadBits(8, &level_idc));
- RCHECK(bit_reader.ReadBitsExpGolomb(&seq_parameter_set_id));
- RCHECK(bit_reader.ReadBitsExpGolomb(&log2_max_frame_num_minus4));
- RCHECK(bit_reader.ReadBitsExpGolomb(&pic_order_cnt_type));
-
- // |pic_order_cnt_type| shall be in the range of 0 to 2.
- RCHECK(pic_order_cnt_type <= 2);
- if (pic_order_cnt_type == 0) {
- uint32 log2_max_pic_order_cnt_lsb_minus4;
- RCHECK(bit_reader.ReadBitsExpGolomb(&log2_max_pic_order_cnt_lsb_minus4));
- } else if (pic_order_cnt_type == 1) {
- // Note: |offset_for_non_ref_pic| and |offset_for_top_to_bottom_field|
- // corresponds to their codenum not to their actual value.
- int delta_pic_order_always_zero_flag;
- uint32 offset_for_non_ref_pic;
- uint32 offset_for_top_to_bottom_field;
- uint32 num_ref_frames_in_pic_order_cnt_cycle;
- RCHECK(bit_reader.ReadBits(1, &delta_pic_order_always_zero_flag));
- RCHECK(bit_reader.ReadBitsExpGolomb(&offset_for_non_ref_pic));
- RCHECK(bit_reader.ReadBitsExpGolomb(&offset_for_top_to_bottom_field));
- RCHECK(
- bit_reader.ReadBitsExpGolomb(&num_ref_frames_in_pic_order_cnt_cycle));
- for (uint32 i = 0; i < num_ref_frames_in_pic_order_cnt_cycle; i++) {
- uint32 offset_for_ref_frame_codenum;
- RCHECK(bit_reader.ReadBitsExpGolomb(&offset_for_ref_frame_codenum));
- }
- }
-
- uint32 num_ref_frames;
- int gaps_in_frame_num_value_allowed_flag;
- uint32 pic_width_in_mbs_minus1;
- uint32 pic_height_in_map_units_minus1;
- RCHECK(bit_reader.ReadBitsExpGolomb(&num_ref_frames));
- RCHECK(bit_reader.ReadBits(1, &gaps_in_frame_num_value_allowed_flag));
- RCHECK(bit_reader.ReadBitsExpGolomb(&pic_width_in_mbs_minus1));
- RCHECK(bit_reader.ReadBitsExpGolomb(&pic_height_in_map_units_minus1));
-
- int frame_mbs_only_flag;
- RCHECK(bit_reader.ReadBits(1, &frame_mbs_only_flag));
- if (!frame_mbs_only_flag) {
- int mb_adaptive_frame_field_flag;
- RCHECK(bit_reader.ReadBits(1, &mb_adaptive_frame_field_flag));
- }
-
- int direct_8x8_inference_flag;
- RCHECK(bit_reader.ReadBits(1, &direct_8x8_inference_flag));
-
- int frame_cropping_flag;
- uint32 frame_crop_left_offset = 0;
- uint32 frame_crop_right_offset = 0;
- uint32 frame_crop_top_offset = 0;
- uint32 frame_crop_bottom_offset = 0;
- RCHECK(bit_reader.ReadBits(1, &frame_cropping_flag));
- if (frame_cropping_flag) {
- RCHECK(bit_reader.ReadBitsExpGolomb(&frame_crop_left_offset));
- RCHECK(bit_reader.ReadBitsExpGolomb(&frame_crop_right_offset));
- RCHECK(bit_reader.ReadBitsExpGolomb(&frame_crop_top_offset));
- RCHECK(bit_reader.ReadBitsExpGolomb(&frame_crop_bottom_offset));
- }
-
- int vui_parameters_present_flag;
- RCHECK(bit_reader.ReadBits(1, &vui_parameters_present_flag));
- int sar_width = 1;
- int sar_height = 1;
- if (vui_parameters_present_flag) {
- // Read only the aspect ratio information from the VUI section.
- // TODO(damienv): check whether other VUI info are useful.
- int aspect_ratio_info_present_flag;
- RCHECK(bit_reader.ReadBits(1, &aspect_ratio_info_present_flag));
- if (aspect_ratio_info_present_flag) {
- int aspect_ratio_idc;
- RCHECK(bit_reader.ReadBits(8, &aspect_ratio_idc));
- if (aspect_ratio_idc == kExtendedSar) {
- RCHECK(bit_reader.ReadBits(16, &sar_width));
- RCHECK(bit_reader.ReadBits(16, &sar_height));
- } else if (aspect_ratio_idc < 14) {
- sar_width = kTableSarWidth[aspect_ratio_idc];
- sar_height = kTableSarHeight[aspect_ratio_idc];
- }
- }
- }
-
- if (sar_width == 0 || sar_height == 0) {
- DVLOG(1) << "Unspecified SAR not supported";
- return false;
- }
-
- // TODO(damienv): a MAP unit can be either 16 or 32 pixels.
- // although it's 16 pixels for progressive non MBAFF frames.
- gfx::Size coded_size((pic_width_in_mbs_minus1 + 1) * 16,
- (pic_height_in_map_units_minus1 + 1) * 16);
- gfx::Rect visible_rect(
- frame_crop_left_offset,
- frame_crop_top_offset,
- (coded_size.width() - frame_crop_right_offset) - frame_crop_left_offset,
- (coded_size.height() - frame_crop_bottom_offset) - frame_crop_top_offset);
- if (visible_rect.width() <= 0 || visible_rect.height() <= 0)
- return false;
- gfx::Size natural_size((visible_rect.width() * sar_width) / sar_height,
- visible_rect.height());
- if (natural_size.width() == 0)
- return false;
-
- // TODO(damienv):
- // Assuming the SPS is used right away by the PPS
- // and the slice headers is a strong assumption.
- // In theory, we should process the SPS and PPS
- // and only when one of the slice header is switching
- // the PPS id, the video decoder config should be changed.
- VideoDecoderConfig video_decoder_config(
- kCodecH264,
- VIDEO_CODEC_PROFILE_UNKNOWN, // TODO(damienv)
- VideoFrame::YV12,
- coded_size,
- visible_rect,
- natural_size,
- NULL, 0,
- false);
-
- if (!video_decoder_config.Matches(last_video_decoder_config_)) {
- DVLOG(1) << "Profile IDC: " << profile_idc;
- DVLOG(1) << "Level IDC: " << level_idc;
- DVLOG(1) << "Pic width: " << (pic_width_in_mbs_minus1 + 1) * 16;
- DVLOG(1) << "Pic height: " << (pic_height_in_map_units_minus1 + 1) * 16;
- DVLOG(1) << "log2_max_frame_num_minus4: " << log2_max_frame_num_minus4;
- DVLOG(1) << "SAR: width=" << sar_width << " height=" << sar_height;
- last_video_decoder_config_ = video_decoder_config;
- new_video_config_cb_.Run(video_decoder_config);
- }
-
- return true;
-}
-
-} // namespace mp2t
-} // namespace media
-
diff --git a/chromium/media/mp3/mp3_stream_parser.cc b/chromium/media/mp3/mp3_stream_parser.cc
deleted file mode 100644
index b20756cd228..00000000000
--- a/chromium/media/mp3/mp3_stream_parser.cc
+++ /dev/null
@@ -1,597 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/mp3/mp3_stream_parser.h"
-
-#include "base/bind.h"
-#include "base/callback_helpers.h"
-#include "base/message_loop/message_loop.h"
-#include "media/base/bit_reader.h"
-#include "media/base/buffers.h"
-#include "media/base/stream_parser_buffer.h"
-#include "media/base/text_track_config.h"
-#include "media/base/video_decoder_config.h"
-#include "net/http/http_util.h"
-
-namespace media {
-
-static const uint32 kMP3StartCodeMask = 0xffe00000;
-static const uint32 kICYStartCode = 0x49435920; // 'ICY '
-
-// Arbitrary upper bound on the size of an IceCast header before it
-// triggers an error.
-static const int kMaxIcecastHeaderSize = 4096;
-
-static const uint32 kID3StartCodeMask = 0xffffff00;
-static const uint32 kID3v1StartCode = 0x54414700; // 'TAG\0'
-static const int kID3v1Size = 128;
-static const int kID3v1ExtendedSize = 227;
-static const uint32 kID3v2StartCode = 0x49443300; // 'ID3\0'
-
-// Map that determines which bitrate_index & channel_mode combinations
-// are allowed.
-// Derived from: http://mpgedit.org/mpgedit/mpeg_format/MP3Format.html
-static const bool kIsAllowed[17][4] = {
- { true, true, true, true }, // free
- { true, false, false, false }, // 32
- { true, false, false, false }, // 48
- { true, false, false, false }, // 56
- { true, true, true, true }, // 64
- { true, false, false, false }, // 80
- { true, true, true, true }, // 96
- { true, true, true, true }, // 112
- { true, true, true, true }, // 128
- { true, true, true, true }, // 160
- { true, true, true, true }, // 192
- { false, true, true, true }, // 224
- { false, true, true, true }, // 256
- { false, true, true, true }, // 320
- { false, true, true, true }, // 384
- { false, false, false, false } // bad
-};
-
-// Maps version and layer information in the frame header
-// into an index for the |kBitrateMap|.
-// Derived from: http://mpgedit.org/mpgedit/mpeg_format/MP3Format.html
-static const int kVersionLayerMap[4][4] = {
- // { reserved, L3, L2, L1 }
- { 5, 4, 4, 3 }, // MPEG 2.5
- { 5, 5, 5, 5 }, // reserved
- { 5, 4, 4, 3 }, // MPEG 2
- { 5, 2, 1, 0 } // MPEG 1
-};
-
-// Maps the bitrate index field in the header and an index
-// from |kVersionLayerMap| to a frame bitrate.
-// Derived from: http://mpgedit.org/mpgedit/mpeg_format/MP3Format.html
-static const int kBitrateMap[16][6] = {
- // { V1L1, V1L2, V1L3, V2L1, V2L2 & V2L3, reserved }
- { 0, 0, 0, 0, 0, 0 },
- { 32, 32, 32, 32, 8, 0 },
- { 64, 48, 40, 48, 16, 0 },
- { 96, 56, 48, 56, 24, 0 },
- { 128, 64, 56, 64, 32, 0 },
- { 160, 80, 64, 80, 40, 0 },
- { 192, 96, 80, 96, 48, 0 },
- { 224, 112, 96, 112, 56, 0 },
- { 256, 128, 112, 128, 64, 0 },
- { 288, 160, 128, 144, 80, 0 },
- { 320, 192, 160, 160, 96, 0 },
- { 352, 224, 192, 176, 112, 0 },
- { 384, 256, 224, 192, 128, 0 },
- { 416, 320, 256, 224, 144, 0 },
- { 448, 384, 320, 256, 160, 0 },
- { 0, 0, 0, 0, 0}
-};
-
-// Maps the sample rate index and version fields from the frame header
-// to a sample rate.
-// Derived from: http://mpgedit.org/mpgedit/mpeg_format/MP3Format.html
-static const int kSampleRateMap[4][4] = {
- // { V2.5, reserved, V2, V1 }
- { 11025, 0, 22050, 44100 },
- { 12000, 0, 24000, 48000 },
- { 8000, 0, 16000, 32000 },
- { 0, 0, 0, 0 }
-};
-
-// Frame header field constants.
-static const int kVersion2 = 2;
-static const int kVersionReserved = 1;
-static const int kVersion2_5 = 0;
-static const int kLayerReserved = 0;
-static const int kLayer1 = 3;
-static const int kLayer2 = 2;
-static const int kLayer3 = 1;
-static const int kBitrateFree = 0;
-static const int kBitrateBad = 0xf;
-static const int kSampleRateReserved = 3;
-
-MP3StreamParser::MP3StreamParser()
- : state_(UNINITIALIZED),
- in_media_segment_(false) {
-}
-
-MP3StreamParser::~MP3StreamParser() {}
-
-void MP3StreamParser::Init(const InitCB& init_cb,
- const NewConfigCB& config_cb,
- const NewBuffersCB& new_buffers_cb,
- const NewTextBuffersCB& text_cb,
- const NeedKeyCB& need_key_cb,
- const NewMediaSegmentCB& new_segment_cb,
- const base::Closure& end_of_segment_cb,
- const LogCB& log_cb) {
- DVLOG(1) << __FUNCTION__;
- DCHECK_EQ(state_, UNINITIALIZED);
- init_cb_ = init_cb;
- config_cb_ = config_cb;
- new_buffers_cb_ = new_buffers_cb;
- new_segment_cb_ = new_segment_cb;
- end_of_segment_cb_ = end_of_segment_cb;
- log_cb_ = log_cb;
-
- ChangeState(INITIALIZED);
-}
-
-void MP3StreamParser::Flush() {
- DVLOG(1) << __FUNCTION__;
- DCHECK_NE(state_, UNINITIALIZED);
- queue_.Reset();
- timestamp_helper_->SetBaseTimestamp(base::TimeDelta());
- in_media_segment_ = false;
-}
-
-bool MP3StreamParser::Parse(const uint8* buf, int size) {
- DVLOG(1) << __FUNCTION__ << "(" << size << ")";
- DCHECK(buf);
- DCHECK_GT(size, 0);
- DCHECK_NE(state_, UNINITIALIZED);
-
- if (state_ == PARSE_ERROR)
- return false;
-
- DCHECK_EQ(state_, INITIALIZED);
-
- queue_.Push(buf, size);
-
- bool end_of_segment = true;
- BufferQueue buffers;
- for (;;) {
- const uint8* data;
- int data_size;
- queue_.Peek(&data, &data_size);
-
- if (data_size < 4)
- break;
-
- uint32 start_code = data[0] << 24 | data[1] << 16 | data[2] << 8 | data[3];
- int bytes_read = 0;
- bool parsed_metadata = true;
- if ((start_code & kMP3StartCodeMask) == kMP3StartCodeMask) {
- bytes_read = ParseMP3Frame(data, data_size, &buffers);
-
- // Only allow the current segment to end if a full frame has been parsed.
- end_of_segment = bytes_read > 0;
- parsed_metadata = false;
- } else if (start_code == kICYStartCode) {
- bytes_read = ParseIcecastHeader(data, data_size);
- } else if ((start_code & kID3StartCodeMask) == kID3v1StartCode) {
- bytes_read = ParseID3v1(data, data_size);
- } else if ((start_code & kID3StartCodeMask) == kID3v2StartCode) {
- bytes_read = ParseID3v2(data, data_size);
- } else {
- bytes_read = FindNextValidStartCode(data, data_size);
-
- if (bytes_read > 0) {
- DVLOG(1) << "Unexpected start code 0x" << std::hex << start_code;
- DVLOG(1) << "SKIPPING " << bytes_read << " bytes of garbage.";
- }
- }
-
- CHECK_LE(bytes_read, data_size);
-
- if (bytes_read < 0) {
- ChangeState(PARSE_ERROR);
- return false;
- } else if (bytes_read == 0) {
- // Need more data.
- break;
- }
-
- // Send pending buffers if we have encountered metadata.
- if (parsed_metadata && !buffers.empty() && !SendBuffers(&buffers, true))
- return false;
-
- queue_.Pop(bytes_read);
- end_of_segment = true;
- }
-
- if (buffers.empty())
- return true;
-
- // Send buffers collected in this append that haven't been sent yet.
- return SendBuffers(&buffers, end_of_segment);
-}
-
-void MP3StreamParser::ChangeState(State state) {
- DVLOG(1) << __FUNCTION__ << "() : " << state_ << " -> " << state;
- state_ = state;
-}
-
-int MP3StreamParser::ParseFrameHeader(const uint8* data, int size,
- int* frame_size,
- int* sample_rate,
- ChannelLayout* channel_layout,
- int* sample_count) const {
- DCHECK(data);
- DCHECK_GE(size, 0);
- DCHECK(frame_size);
-
- if (size < 4)
- return 0;
-
- BitReader reader(data, size);
- int sync;
- int version;
- int layer;
- int is_protected;
- int bitrate_index;
- int sample_rate_index;
- int has_padding;
- int is_private;
- int channel_mode;
- int other_flags;
-
- if (!reader.ReadBits(11, &sync) ||
- !reader.ReadBits(2, &version) ||
- !reader.ReadBits(2, &layer) ||
- !reader.ReadBits(1, &is_protected) ||
- !reader.ReadBits(4, &bitrate_index) ||
- !reader.ReadBits(2, &sample_rate_index) ||
- !reader.ReadBits(1, &has_padding) ||
- !reader.ReadBits(1, &is_private) ||
- !reader.ReadBits(2, &channel_mode) ||
- !reader.ReadBits(6, &other_flags)) {
- return -1;
- }
-
- DVLOG(2) << "Header data :" << std::hex
- << " sync 0x" << sync
- << " version 0x" << version
- << " layer 0x" << layer
- << " bitrate_index 0x" << bitrate_index
- << " sample_rate_index 0x" << sample_rate_index
- << " channel_mode 0x" << channel_mode;
-
- if (sync != 0x7ff ||
- version == kVersionReserved ||
- layer == kLayerReserved ||
- bitrate_index == kBitrateFree || bitrate_index == kBitrateBad ||
- sample_rate_index == kSampleRateReserved) {
- MEDIA_LOG(log_cb_) << "Invalid header data :" << std::hex
- << " sync 0x" << sync
- << " version 0x" << version
- << " layer 0x" << layer
- << " bitrate_index 0x" << bitrate_index
- << " sample_rate_index 0x" << sample_rate_index
- << " channel_mode 0x" << channel_mode;
- return -1;
- }
-
- if (layer == kLayer2 && kIsAllowed[bitrate_index][channel_mode]) {
- MEDIA_LOG(log_cb_) << "Invalid (bitrate_index, channel_mode) combination :"
- << std::hex
- << " bitrate_index " << bitrate_index
- << " channel_mode " << channel_mode;
- return -1;
- }
-
- int bitrate = kBitrateMap[bitrate_index][kVersionLayerMap[version][layer]];
-
- if (bitrate == 0) {
- MEDIA_LOG(log_cb_) << "Invalid bitrate :" << std::hex
- << " version " << version
- << " layer " << layer
- << " bitrate_index " << bitrate_index;
- return -1;
- }
-
- DVLOG(2) << " bitrate " << bitrate;
-
- int frame_sample_rate = kSampleRateMap[sample_rate_index][version];
- if (frame_sample_rate == 0) {
- MEDIA_LOG(log_cb_) << "Invalid sample rate :" << std::hex
- << " version " << version
- << " sample_rate_index " << sample_rate_index;
- return -1;
- }
-
- if (sample_rate)
- *sample_rate = frame_sample_rate;
-
- // http://teslabs.com/openplayer/docs/docs/specs/mp3_structure2.pdf
- // Table 2.1.5
- int samples_per_frame;
- switch (layer) {
- case kLayer1:
- samples_per_frame = 384;
- break;
-
- case kLayer2:
- samples_per_frame = 1152;
- break;
-
- case kLayer3:
- if (version == kVersion2 || version == kVersion2_5)
- samples_per_frame = 576;
- else
- samples_per_frame = 1152;
- break;
-
- default:
- return -1;
- }
-
- if (sample_count)
- *sample_count = samples_per_frame;
-
- // http://teslabs.com/openplayer/docs/docs/specs/mp3_structure2.pdf
- // Text just below Table 2.1.5.
- if (layer == kLayer1) {
- // This formulation is a slight variation on the equation below,
- // but has slightly different truncation characteristics to deal
- // with the fact that Layer 1 has 4 byte "slots" instead of single
- // byte ones.
- *frame_size = 4 * (12 * bitrate * 1000 / frame_sample_rate);
- } else {
- *frame_size =
- ((samples_per_frame / 8) * bitrate * 1000) / frame_sample_rate;
- }
-
- if (has_padding)
- *frame_size += (layer == kLayer1) ? 4 : 1;
-
- if (channel_layout) {
- // Map Stereo(0), Joint Stereo(1), and Dual Channel (2) to
- // CHANNEL_LAYOUT_STEREO and Single Channel (3) to CHANNEL_LAYOUT_MONO.
- *channel_layout =
- (channel_mode == 3) ? CHANNEL_LAYOUT_MONO : CHANNEL_LAYOUT_STEREO;
- }
-
- return 4;
-}
-
-int MP3StreamParser::ParseMP3Frame(const uint8* data,
- int size,
- BufferQueue* buffers) {
- DVLOG(2) << __FUNCTION__ << "(" << size << ")";
-
- int sample_rate;
- ChannelLayout channel_layout;
- int frame_size;
- int sample_count;
- int bytes_read = ParseFrameHeader(
- data, size, &frame_size, &sample_rate, &channel_layout, &sample_count);
-
- if (bytes_read <= 0)
- return bytes_read;
-
- // Make sure data contains the entire frame.
- if (size < frame_size)
- return 0;
-
- DVLOG(2) << " sample_rate " << sample_rate
- << " channel_layout " << channel_layout
- << " frame_size " << frame_size;
-
- if (config_.IsValidConfig() &&
- (config_.samples_per_second() != sample_rate ||
- config_.channel_layout() != channel_layout)) {
- // Clear config data so that a config change is initiated.
- config_ = AudioDecoderConfig();
-
- // Send all buffers associated with the previous config.
- if (!buffers->empty() && !SendBuffers(buffers, true))
- return -1;
- }
-
- if (!config_.IsValidConfig()) {
- config_.Initialize(kCodecMP3, kSampleFormatF32, channel_layout,
- sample_rate, NULL, 0, false, false,
- base::TimeDelta(), base::TimeDelta());
-
- base::TimeDelta base_timestamp;
- if (timestamp_helper_)
- base_timestamp = timestamp_helper_->GetTimestamp();
-
- timestamp_helper_.reset(new AudioTimestampHelper(sample_rate));
- timestamp_helper_->SetBaseTimestamp(base_timestamp);
-
- VideoDecoderConfig video_config;
- bool success = config_cb_.Run(config_, video_config, TextTrackConfigMap());
-
- if (!init_cb_.is_null())
- base::ResetAndReturn(&init_cb_).Run(success, kInfiniteDuration());
-
- if (!success)
- return -1;
- }
-
- scoped_refptr<StreamParserBuffer> buffer =
- StreamParserBuffer::CopyFrom(data, frame_size, true);
- buffer->set_timestamp(timestamp_helper_->GetTimestamp());
- buffer->set_duration(timestamp_helper_->GetFrameDuration(sample_count));
- buffers->push_back(buffer);
-
- timestamp_helper_->AddFrames(sample_count);
-
- return frame_size;
-}
-
-int MP3StreamParser::ParseIcecastHeader(const uint8* data, int size) {
- DVLOG(1) << __FUNCTION__ << "(" << size << ")";
-
- if (size < 4)
- return 0;
-
- if (memcmp("ICY ", data, 4))
- return -1;
-
- int locate_size = std::min(size, kMaxIcecastHeaderSize);
- int offset = net::HttpUtil::LocateEndOfHeaders(
- reinterpret_cast<const char*>(data), locate_size, 4);
- if (offset < 0) {
- if (locate_size == kMaxIcecastHeaderSize) {
- MEDIA_LOG(log_cb_) << "Icecast header is too large.";
- return -1;
- }
-
- return 0;
- }
-
- return offset;
-}
-
-int MP3StreamParser::ParseID3v1(const uint8* data, int size) {
- DVLOG(1) << __FUNCTION__ << "(" << size << ")";
-
- if (size < kID3v1Size)
- return 0;
-
- // TODO(acolwell): Add code to actually validate ID3v1 data and
- // expose it as a metadata text track.
- return !memcmp(data, "TAG+", 4) ? kID3v1ExtendedSize : kID3v1Size;
-}
-
-int MP3StreamParser::ParseID3v2(const uint8* data, int size) {
- DVLOG(1) << __FUNCTION__ << "(" << size << ")";
-
- if (size < 10)
- return 0;
-
- BitReader reader(data, size);
- int32 id;
- int version;
- uint8 flags;
- int32 id3_size;
-
- if (!reader.ReadBits(24, &id) ||
- !reader.ReadBits(16, &version) ||
- !reader.ReadBits(8, &flags) ||
- !ParseSyncSafeInt(&reader, &id3_size)) {
- return -1;
- }
-
- int32 actual_tag_size = 10 + id3_size;
-
- // Increment size if 'Footer present' flag is set.
- if (flags & 0x10)
- actual_tag_size += 10;
-
- // Make sure we have the entire tag.
- if (size < actual_tag_size)
- return 0;
-
- // TODO(acolwell): Add code to actually validate ID3v2 data and
- // expose it as a metadata text track.
- return actual_tag_size;
-}
-
-bool MP3StreamParser::ParseSyncSafeInt(BitReader* reader, int32* value) {
- *value = 0;
- for (int i = 0; i < 4; ++i) {
- uint8 tmp;
- if (!reader->ReadBits(1, &tmp) || tmp != 0) {
- MEDIA_LOG(log_cb_) << "ID3 syncsafe integer byte MSb is not 0!";
- return false;
- }
-
- if (!reader->ReadBits(7, &tmp))
- return false;
-
- *value <<= 7;
- *value += tmp;
- }
-
- return true;
-}
-
-int MP3StreamParser::FindNextValidStartCode(const uint8* data, int size) const {
- const uint8* start = data;
- const uint8* end = data + size;
-
- while (start < end) {
- int bytes_left = end - start;
- const uint8* candidate_start_code =
- static_cast<const uint8*>(memchr(start, 0xff, bytes_left));
-
- if (!candidate_start_code)
- return 0;
-
- bool parse_header_failed = false;
- const uint8* sync = candidate_start_code;
- // Try to find 3 valid frames in a row. 3 was selected to decrease
- // the probability of false positives.
- for (int i = 0; i < 3; ++i) {
- int sync_size = end - sync;
- int frame_size;
- int sync_bytes = ParseFrameHeader(
- sync, sync_size, &frame_size, NULL, NULL, NULL);
-
- if (sync_bytes == 0)
- return 0;
-
- if (sync_bytes > 0) {
- DCHECK_LT(sync_bytes, sync_size);
-
- // Skip over this frame so we can check the next one.
- sync += frame_size;
-
- // Make sure the next frame starts inside the buffer.
- if (sync >= end)
- return 0;
- } else {
- DVLOG(1) << "ParseFrameHeader() " << i << " failed @" << (sync - data);
- parse_header_failed = true;
- break;
- }
- }
-
- if (parse_header_failed) {
- // One of the frame header parses failed so |candidate_start_code|
- // did not point to the start of a real frame. Move |start| forward
- // so we can find the next candidate.
- start = candidate_start_code + 1;
- continue;
- }
-
- return candidate_start_code - data;
- }
-
- return 0;
-}
-
-bool MP3StreamParser::SendBuffers(BufferQueue* buffers, bool end_of_segment) {
- DCHECK(!buffers->empty());
-
- if (!in_media_segment_) {
- in_media_segment_ = true;
- new_segment_cb_.Run();
- }
-
- BufferQueue empty_video_buffers;
- if (!new_buffers_cb_.Run(*buffers, empty_video_buffers))
- return false;
- buffers->clear();
-
- if (end_of_segment) {
- in_media_segment_ = false;
- end_of_segment_cb_.Run();
- }
-
- return true;
-}
-
-} // namespace media
diff --git a/chromium/media/mp3/mp3_stream_parser_unittest.cc b/chromium/media/mp3/mp3_stream_parser_unittest.cc
deleted file mode 100644
index f565093cd5b..00000000000
--- a/chromium/media/mp3/mp3_stream_parser_unittest.cc
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/bind.h"
-#include "media/base/audio_decoder_config.h"
-#include "media/base/decoder_buffer.h"
-#include "media/base/stream_parser_buffer.h"
-#include "media/base/test_data_util.h"
-#include "media/base/text_track_config.h"
-#include "media/base/video_decoder_config.h"
-#include "media/mp3/mp3_stream_parser.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-class MP3StreamParserTest : public testing::Test {
- public:
- MP3StreamParserTest() {}
-
- protected:
- MP3StreamParser parser_;
- std::stringstream results_stream_;
-
- bool AppendData(const uint8* data, size_t length) {
- return parser_.Parse(data, length);
- }
-
- bool AppendDataInPieces(const uint8* data, size_t length, size_t piece_size) {
- const uint8* start = data;
- const uint8* end = data + length;
- while (start < end) {
- size_t append_size =
- std::min(piece_size, static_cast<size_t>(end - start));
- if (!AppendData(start, append_size))
- return false;
- start += append_size;
- }
- return true;
- }
-
- void OnInitDone(bool success, base::TimeDelta duration) {
- DVLOG(1) << __FUNCTION__ << "(" << success << ", "
- << duration.InMilliseconds() << ")";
- }
-
- bool OnNewConfig(const AudioDecoderConfig& audio_config,
- const VideoDecoderConfig& video_config,
- const StreamParser::TextTrackConfigMap& text_config) {
- DVLOG(1) << __FUNCTION__ << "(" << audio_config.IsValidConfig() << ", "
- << video_config.IsValidConfig() << ")";
- EXPECT_TRUE(audio_config.IsValidConfig());
- EXPECT_FALSE(video_config.IsValidConfig());
- return true;
- }
-
- std::string BufferQueueToString(const StreamParser::BufferQueue& buffers) {
- std::stringstream ss;
-
- ss << "{";
- for (StreamParser::BufferQueue::const_iterator itr = buffers.begin();
- itr != buffers.end();
- ++itr) {
- ss << " " << (*itr)->timestamp().InMilliseconds();
- if ((*itr)->IsKeyframe())
- ss << "K";
- }
- ss << " }";
-
- return ss.str();
- }
-
- bool OnNewBuffers(const StreamParser::BufferQueue& audio_buffers,
- const StreamParser::BufferQueue& video_buffers) {
- EXPECT_FALSE(audio_buffers.empty());
- EXPECT_TRUE(video_buffers.empty());
-
- std::string buffers_str = BufferQueueToString(audio_buffers);
- DVLOG(1) << __FUNCTION__ << " : " << buffers_str;
- results_stream_ << buffers_str;
- return true;
- }
-
- void OnKeyNeeded(const std::string& type,
- const std::vector<uint8>& init_data) {
- DVLOG(1) << __FUNCTION__ << "(" << type << ", " << init_data.size() << ")";
- }
-
- void OnNewSegment() {
- DVLOG(1) << __FUNCTION__;
- results_stream_ << "NewSegment";
- }
-
- void OnEndOfSegment() {
- DVLOG(1) << __FUNCTION__;
- results_stream_ << "EndOfSegment";
- }
-
- void InitializeParser() {
- parser_.Init(
- base::Bind(&MP3StreamParserTest::OnInitDone, base::Unretained(this)),
- base::Bind(&MP3StreamParserTest::OnNewConfig, base::Unretained(this)),
- base::Bind(&MP3StreamParserTest::OnNewBuffers, base::Unretained(this)),
- StreamParser::NewTextBuffersCB(),
- base::Bind(&MP3StreamParserTest::OnKeyNeeded, base::Unretained(this)),
- base::Bind(&MP3StreamParserTest::OnNewSegment, base::Unretained(this)),
- base::Bind(&MP3StreamParserTest::OnEndOfSegment,
- base::Unretained(this)),
- LogCB());
- }
-
- std::string ParseFile(const std::string& filename, int append_bytes) {
- results_stream_.clear();
- InitializeParser();
-
- scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
- EXPECT_TRUE(
- AppendDataInPieces(buffer->data(), buffer->data_size(), append_bytes));
- return results_stream_.str();
- }
-};
-
-// Test parsing with small prime sized chunks to smoke out "power of
-// 2" field size assumptions.
-TEST_F(MP3StreamParserTest, UnalignedAppend) {
- std::string expected =
- "NewSegment"
- "{ 0K }"
- "{ 26K }"
- "{ 52K }"
- "{ 78K }"
- "{ 104K }"
- "{ 130K }"
- "{ 156K }"
- "{ 182K }"
- "EndOfSegment"
- "NewSegment"
- "{ 208K }"
- "{ 235K }"
- "{ 261K }"
- "EndOfSegment"
- "NewSegment"
- "{ 287K }"
- "{ 313K }"
- "EndOfSegment";
- EXPECT_EQ(expected, ParseFile("sfx.mp3", 17));
-}
-
-// Test parsing with a larger piece size to verify that multiple buffers
-// are passed to |new_buffer_cb_|.
-TEST_F(MP3StreamParserTest, UnalignedAppend512) {
- std::string expected =
- "NewSegment"
- "{ 0K }"
- "{ 26K 52K 78K 104K }"
- "EndOfSegment"
- "NewSegment"
- "{ 130K 156K 182K }"
- "{ 208K 235K 261K 287K }"
- "{ 313K }"
- "EndOfSegment";
- EXPECT_EQ(expected, ParseFile("sfx.mp3", 512));
-}
-
-} // namespace media
diff --git a/chromium/media/mp4/aac_unittest.cc b/chromium/media/mp4/aac_unittest.cc
deleted file mode 100644
index d9ce22db3fe..00000000000
--- a/chromium/media/mp4/aac_unittest.cc
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/mp4/aac.h"
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-namespace mp4 {
-
-TEST(AACTest, BasicProfileTest) {
- AAC aac;
- uint8 buffer[] = {0x12, 0x10};
- std::vector<uint8> data;
-
- data.assign(buffer, buffer + sizeof(buffer));
-
- EXPECT_TRUE(aac.Parse(data));
- EXPECT_EQ(aac.GetOutputSamplesPerSecond(false), 44100);
- EXPECT_EQ(aac.GetChannelLayout(false), CHANNEL_LAYOUT_STEREO);
-}
-
-TEST(AACTest, ExtensionTest) {
- AAC aac;
- uint8 buffer[] = {0x13, 0x08, 0x56, 0xe5, 0x9d, 0x48, 0x80};
- std::vector<uint8> data;
-
- data.assign(buffer, buffer + sizeof(buffer));
-
- EXPECT_TRUE(aac.Parse(data));
- EXPECT_EQ(aac.GetOutputSamplesPerSecond(false), 48000);
- EXPECT_EQ(aac.GetOutputSamplesPerSecond(true), 48000);
- EXPECT_EQ(aac.GetChannelLayout(false), CHANNEL_LAYOUT_STEREO);
-}
-
-// Test implicit SBR with mono channel config.
-// Mono channel layout should only be reported if SBR is not
-// specified. Otherwise stereo should be reported.
-// See ISO-14496-3 Section 1.6.6.1.2 for details about this special casing.
-TEST(AACTest, ImplicitSBR_ChannelConfig0) {
- AAC aac;
- uint8 buffer[] = {0x13, 0x08};
- std::vector<uint8> data;
-
- data.assign(buffer, buffer + sizeof(buffer));
-
- EXPECT_TRUE(aac.Parse(data));
-
- // Test w/o implict SBR.
- EXPECT_EQ(aac.GetOutputSamplesPerSecond(false), 24000);
- EXPECT_EQ(aac.GetChannelLayout(false), CHANNEL_LAYOUT_MONO);
-
- // Test implicit SBR.
- EXPECT_EQ(aac.GetOutputSamplesPerSecond(true), 48000);
- EXPECT_EQ(aac.GetChannelLayout(true), CHANNEL_LAYOUT_STEREO);
-}
-
-// Tests implicit SBR with a stereo channel config.
-TEST(AACTest, ImplicitSBR_ChannelConfig1) {
- AAC aac;
- uint8 buffer[] = {0x13, 0x10};
- std::vector<uint8> data;
-
- data.assign(buffer, buffer + sizeof(buffer));
-
- EXPECT_TRUE(aac.Parse(data));
-
- // Test w/o implict SBR.
- EXPECT_EQ(aac.GetOutputSamplesPerSecond(false), 24000);
- EXPECT_EQ(aac.GetChannelLayout(false), CHANNEL_LAYOUT_STEREO);
-
- // Test implicit SBR.
- EXPECT_EQ(aac.GetOutputSamplesPerSecond(true), 48000);
- EXPECT_EQ(aac.GetChannelLayout(true), CHANNEL_LAYOUT_STEREO);
-}
-
-TEST(AACTest, SixChannelTest) {
- AAC aac;
- uint8 buffer[] = {0x11, 0xb0};
- std::vector<uint8> data;
-
- data.assign(buffer, buffer + sizeof(buffer));
-
- EXPECT_TRUE(aac.Parse(data));
- EXPECT_EQ(aac.GetOutputSamplesPerSecond(false), 48000);
- EXPECT_EQ(aac.GetChannelLayout(false), CHANNEL_LAYOUT_5_1);
-}
-
-TEST(AACTest, DataTooShortTest) {
- AAC aac;
- std::vector<uint8> data;
-
- EXPECT_FALSE(aac.Parse(data));
-
- data.push_back(0x12);
- EXPECT_FALSE(aac.Parse(data));
-}
-
-TEST(AACTest, IncorrectProfileTest) {
- AAC aac;
- uint8 buffer[] = {0x0, 0x08};
- std::vector<uint8> data;
-
- data.assign(buffer, buffer + sizeof(buffer));
-
- EXPECT_FALSE(aac.Parse(data));
-
- data[0] = 0x08;
- EXPECT_TRUE(aac.Parse(data));
-
- data[0] = 0x28;
- EXPECT_FALSE(aac.Parse(data));
-}
-
-TEST(AACTest, IncorrectFrequencyTest) {
- AAC aac;
- uint8 buffer[] = {0x0f, 0x88};
- std::vector<uint8> data;
-
- data.assign(buffer, buffer + sizeof(buffer));
-
- EXPECT_FALSE(aac.Parse(data));
-
- data[0] = 0x0e;
- data[1] = 0x08;
- EXPECT_TRUE(aac.Parse(data));
-}
-
-TEST(AACTest, IncorrectChannelTest) {
- AAC aac;
- uint8 buffer[] = {0x0e, 0x00};
- std::vector<uint8> data;
-
- data.assign(buffer, buffer + sizeof(buffer));
-
- EXPECT_FALSE(aac.Parse(data));
-
- data[1] = 0x08;
- EXPECT_TRUE(aac.Parse(data));
-}
-
-} // namespace mp4
-
-} // namespace media
diff --git a/chromium/media/mp4/avc.cc b/chromium/media/mp4/avc.cc
deleted file mode 100644
index ae28ffd256e..00000000000
--- a/chromium/media/mp4/avc.cc
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/mp4/avc.h"
-
-#include <algorithm>
-#include <vector>
-
-#include "media/mp4/box_definitions.h"
-#include "media/mp4/box_reader.h"
-
-namespace media {
-namespace mp4 {
-
-static const uint8 kAnnexBStartCode[] = {0, 0, 0, 1};
-static const int kAnnexBStartCodeSize = 4;
-
-static bool ConvertAVCToAnnexBInPlaceForLengthSize4(std::vector<uint8>* buf) {
- const int kLengthSize = 4;
- size_t pos = 0;
- while (pos + kLengthSize < buf->size()) {
- int nal_size = (*buf)[pos];
- nal_size = (nal_size << 8) + (*buf)[pos+1];
- nal_size = (nal_size << 8) + (*buf)[pos+2];
- nal_size = (nal_size << 8) + (*buf)[pos+3];
- std::copy(kAnnexBStartCode, kAnnexBStartCode + kAnnexBStartCodeSize,
- buf->begin() + pos);
- pos += kLengthSize + nal_size;
- }
- return pos == buf->size();
-}
-
-// static
-bool AVC::ConvertFrameToAnnexB(int length_size, std::vector<uint8>* buffer) {
- RCHECK(length_size == 1 || length_size == 2 || length_size == 4);
-
- if (length_size == 4)
- return ConvertAVCToAnnexBInPlaceForLengthSize4(buffer);
-
- std::vector<uint8> temp;
- temp.swap(*buffer);
- buffer->reserve(temp.size() + 32);
-
- size_t pos = 0;
- while (pos + length_size < temp.size()) {
- int nal_size = temp[pos];
- if (length_size == 2) nal_size = (nal_size << 8) + temp[pos+1];
- pos += length_size;
-
- RCHECK(pos + nal_size <= temp.size());
- buffer->insert(buffer->end(), kAnnexBStartCode,
- kAnnexBStartCode + kAnnexBStartCodeSize);
- buffer->insert(buffer->end(), temp.begin() + pos,
- temp.begin() + pos + nal_size);
- pos += nal_size;
- }
- return pos == temp.size();
-}
-
-// static
-bool AVC::ConvertConfigToAnnexB(
- const AVCDecoderConfigurationRecord& avc_config,
- std::vector<uint8>* buffer) {
- DCHECK(buffer->empty());
- buffer->clear();
- int total_size = 0;
- for (size_t i = 0; i < avc_config.sps_list.size(); i++)
- total_size += avc_config.sps_list[i].size() + kAnnexBStartCodeSize;
- for (size_t i = 0; i < avc_config.pps_list.size(); i++)
- total_size += avc_config.pps_list[i].size() + kAnnexBStartCodeSize;
- buffer->reserve(total_size);
-
- for (size_t i = 0; i < avc_config.sps_list.size(); i++) {
- buffer->insert(buffer->end(), kAnnexBStartCode,
- kAnnexBStartCode + kAnnexBStartCodeSize);
- buffer->insert(buffer->end(), avc_config.sps_list[i].begin(),
- avc_config.sps_list[i].end());
- }
-
- for (size_t i = 0; i < avc_config.pps_list.size(); i++) {
- buffer->insert(buffer->end(), kAnnexBStartCode,
- kAnnexBStartCode + kAnnexBStartCodeSize);
- buffer->insert(buffer->end(), avc_config.pps_list[i].begin(),
- avc_config.pps_list[i].end());
- }
- return true;
-}
-
-} // namespace mp4
-} // namespace media
diff --git a/chromium/media/mp4/avc.h b/chromium/media/mp4/avc.h
deleted file mode 100644
index 3d815a1739d..00000000000
--- a/chromium/media/mp4/avc.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_MP4_AVC_H_
-#define MEDIA_MP4_AVC_H_
-
-#include <vector>
-
-#include "base/basictypes.h"
-#include "media/base/media_export.h"
-
-namespace media {
-namespace mp4 {
-
-struct AVCDecoderConfigurationRecord;
-
-class MEDIA_EXPORT AVC {
- public:
- static bool ConvertFrameToAnnexB(int length_size, std::vector<uint8>* buffer);
-
- static bool ConvertConfigToAnnexB(
- const AVCDecoderConfigurationRecord& avc_config,
- std::vector<uint8>* buffer);
-};
-
-} // namespace mp4
-} // namespace media
-
-#endif // MEDIA_MP4_AVC_H_
diff --git a/chromium/media/mp4/avc_unittest.cc b/chromium/media/mp4/avc_unittest.cc
deleted file mode 100644
index 766a9791967..00000000000
--- a/chromium/media/mp4/avc_unittest.cc
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <string.h>
-
-#include "base/basictypes.h"
-#include "media/base/stream_parser_buffer.h"
-#include "media/mp4/avc.h"
-#include "media/mp4/box_definitions.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "testing/gtest/include/gtest/gtest-param-test.h"
-
-namespace media {
-namespace mp4 {
-
-static const uint8 kNALU1[] = { 0x01, 0x02, 0x03 };
-static const uint8 kNALU2[] = { 0x04, 0x05, 0x06, 0x07 };
-static const uint8 kExpected[] = {
- 0x00, 0x00, 0x00, 0x01, 0x01, 0x02, 0x03,
- 0x00, 0x00, 0x00, 0x01, 0x04, 0x05, 0x06, 0x07 };
-
-static const uint8 kExpectedParamSets[] = {
- 0x00, 0x00, 0x00, 0x01, 0x67, 0x12,
- 0x00, 0x00, 0x00, 0x01, 0x67, 0x34,
- 0x00, 0x00, 0x00, 0x01, 0x68, 0x56, 0x78};
-
-class AVCConversionTest : public testing::TestWithParam<int> {
- protected:
- void MakeInputForLength(int length_size, std::vector<uint8>* buf) {
- buf->clear();
- for (int i = 1; i < length_size; i++)
- buf->push_back(0);
- buf->push_back(sizeof(kNALU1));
- buf->insert(buf->end(), kNALU1, kNALU1 + sizeof(kNALU1));
-
- for (int i = 1; i < length_size; i++)
- buf->push_back(0);
- buf->push_back(sizeof(kNALU2));
- buf->insert(buf->end(), kNALU2, kNALU2 + sizeof(kNALU2));
- }
-};
-
-TEST_P(AVCConversionTest, ParseCorrectly) {
- std::vector<uint8> buf;
- MakeInputForLength(GetParam(), &buf);
- EXPECT_TRUE(AVC::ConvertFrameToAnnexB(GetParam(), &buf));
- EXPECT_EQ(buf.size(), sizeof(kExpected));
- EXPECT_EQ(0, memcmp(kExpected, &buf[0], sizeof(kExpected)));
-}
-
-TEST_P(AVCConversionTest, ParsePartial) {
- std::vector<uint8> buf;
- MakeInputForLength(GetParam(), &buf);
- buf.pop_back();
- EXPECT_FALSE(AVC::ConvertFrameToAnnexB(GetParam(), &buf));
- // This tests a buffer ending in the middle of a NAL length. For length size
- // of one, this can't happen, so we skip that case.
- if (GetParam() != 1) {
- MakeInputForLength(GetParam(), &buf);
- buf.erase(buf.end() - (sizeof(kNALU2) + 1), buf.end());
- EXPECT_FALSE(AVC::ConvertFrameToAnnexB(GetParam(), &buf));
- }
-}
-
-TEST_P(AVCConversionTest, ParseEmpty) {
- std::vector<uint8> buf;
- EXPECT_TRUE(AVC::ConvertFrameToAnnexB(GetParam(), &buf));
- EXPECT_EQ(0u, buf.size());
-}
-
-INSTANTIATE_TEST_CASE_P(AVCConversionTestValues,
- AVCConversionTest,
- ::testing::Values(1, 2, 4));
-
-TEST_F(AVCConversionTest, ConvertConfigToAnnexB) {
- AVCDecoderConfigurationRecord avc_config;
- avc_config.sps_list.resize(2);
- avc_config.sps_list[0].push_back(0x67);
- avc_config.sps_list[0].push_back(0x12);
- avc_config.sps_list[1].push_back(0x67);
- avc_config.sps_list[1].push_back(0x34);
- avc_config.pps_list.resize(1);
- avc_config.pps_list[0].push_back(0x68);
- avc_config.pps_list[0].push_back(0x56);
- avc_config.pps_list[0].push_back(0x78);
-
- std::vector<uint8> buf;
- EXPECT_TRUE(AVC::ConvertConfigToAnnexB(avc_config, &buf));
- EXPECT_EQ(0, memcmp(kExpectedParamSets, &buf[0],
- sizeof(kExpectedParamSets)));
-}
-
-} // namespace mp4
-} // namespace media
diff --git a/chromium/media/ozone/media_ozone_platform.cc b/chromium/media/ozone/media_ozone_platform.cc
new file mode 100644
index 00000000000..804de375802
--- /dev/null
+++ b/chromium/media/ozone/media_ozone_platform.cc
@@ -0,0 +1,93 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/ozone/media_ozone_platform.h"
+
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "ui/ozone/platform_object.h"
+#include "ui/ozone/platform_selection.h"
+
+namespace media {
+
+namespace {
+
+class MediaOzonePlatformStub : public MediaOzonePlatform {
+ public:
+ MediaOzonePlatformStub() {}
+
+ virtual ~MediaOzonePlatformStub() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MediaOzonePlatformStub);
+};
+
+} // namespace
+
+// The following statics are just convenient stubs, declared by the
+// generate_constructor_list.py script. They should be removed once the
+// internal Ozone platforms decide to actually implement their media specifics.
+MediaOzonePlatform* CreateMediaOzonePlatformCaca() {
+ return new MediaOzonePlatformStub;
+}
+
+MediaOzonePlatform* CreateMediaOzonePlatformDri() {
+ return new MediaOzonePlatformStub;
+}
+
+MediaOzonePlatform* CreateMediaOzonePlatformEgltest() {
+ return new MediaOzonePlatformStub;
+}
+
+MediaOzonePlatform* CreateMediaOzonePlatformGbm() {
+ return new MediaOzonePlatformStub;
+}
+
+MediaOzonePlatform* CreateMediaOzonePlatformTest() {
+ return new MediaOzonePlatformStub;
+}
+
+MediaOzonePlatform::MediaOzonePlatform() {
+ CHECK(!instance_) << "There should only be a single MediaOzonePlatform.";
+ instance_ = this;
+}
+
+MediaOzonePlatform::~MediaOzonePlatform() {
+ CHECK_EQ(instance_, this);
+ instance_ = NULL;
+}
+
+// static
+MediaOzonePlatform* MediaOzonePlatform::GetInstance() {
+ if (!instance_)
+ CreateInstance();
+ return instance_;
+}
+
+VideoDecodeAccelerator* MediaOzonePlatform::CreateVideoDecodeAccelerator(
+ const base::Callback<bool(void)>& make_context_current) {
+ NOTIMPLEMENTED();
+ return NULL;
+}
+
+// static
+void MediaOzonePlatform::CreateInstance() {
+ if (instance_)
+ return;
+
+ TRACE_EVENT1("ozone",
+ "MediaOzonePlatform::Initialize",
+ "platform",
+ ui::GetOzonePlatformName());
+ scoped_ptr<MediaOzonePlatform> platform =
+ ui::PlatformObject<MediaOzonePlatform>::Create();
+
+ // TODO(spang): Currently need to leak this object.
+ CHECK_EQ(instance_, platform.release());
+}
+
+// static
+MediaOzonePlatform* MediaOzonePlatform::instance_;
+
+} // namespace media
diff --git a/chromium/media/ozone/media_ozone_platform.h b/chromium/media/ozone/media_ozone_platform.h
new file mode 100644
index 00000000000..c59775dc32b
--- /dev/null
+++ b/chromium/media/ozone/media_ozone_platform.h
@@ -0,0 +1,47 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_OZONE_MEDIA_OZONE_PLATFORM_H_
+#define MEDIA_OZONE_MEDIA_OZONE_PLATFORM_H_
+
+#include "base/callback.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class VideoDecodeAccelerator;
+
+// Class for Ozone platform media implementations. Note that the base class for
+// Ozone platform is at ui/ozone.
+//
+// Ozone platforms must override this class and implement the virtual
+// GetFooFactoryOzone() methods to provide implementations of the
+// various ozone interfaces.
+class MEDIA_EXPORT MediaOzonePlatform {
+ public:
+ MediaOzonePlatform();
+ virtual ~MediaOzonePlatform();
+
+ // Besides get the global instance, initializes the subsystems/resources
+ // necessary for media also.
+ static MediaOzonePlatform* GetInstance();
+
+ // Factory getters to override in subclasses. The returned objects will be
+ // injected into the appropriate layer at startup. Subclasses should not
+ // inject these objects themselves. Ownership is retained by
+ // MediaOzonePlatform.
+ virtual VideoDecodeAccelerator* CreateVideoDecodeAccelerator(
+ const base::Callback<bool(void)>& make_context_current);
+
+ private:
+ static void CreateInstance();
+
+ static MediaOzonePlatform* instance_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaOzonePlatform);
+};
+
+} // namespace media
+
+#endif // MEDIA_OZONE_MEDIA_OZONE_PLATFORM_H_
diff --git a/chromium/media/tools/layout_tests/layouttest_analyzer.py b/chromium/media/tools/layout_tests/layouttest_analyzer.py
index 7459a977891..56687674eec 100755
--- a/chromium/media/tools/layout_tests/layouttest_analyzer.py
+++ b/chromium/media/tools/layout_tests/layouttest_analyzer.py
@@ -416,8 +416,8 @@ def UpdateDashboard(dashboard_file_location, test_group_name, data_map,
'skip_tests_count': len(data_map['skip'][0]),
'nonskip_tests_path': escaped_tg_name + '_nonskip.html',
'nonskip_tests_count': len(data_map['nonskip'][0]),
- 'fail_rate': 100 - int(data_map['passingrate'][0]),
- 'passing_rate': int(data_map['passingrate'][0]),
+ 'fail_rate': 100 - float(data_map['passingrate'][0]),
+ 'passing_rate': float(data_map['passingrate'][0]),
'rev_url': DEFAULT_REVISION_VIEW_URL % rev,
'rev': rev,
'rev_date': rev_date,
diff --git a/chromium/media/tools/layout_tests/test_expectations.py b/chromium/media/tools/layout_tests/test_expectations.py
index 58b21de6883..b68bc092987 100644
--- a/chromium/media/tools/layout_tests/test_expectations.py
+++ b/chromium/media/tools/layout_tests/test_expectations.py
@@ -97,27 +97,11 @@ class TestExpectations(object):
return None, None
test_expectation_info['Comments'] = parsed.comment or ''
-
- # Split the modifiers dictionary into the format we want.
- remaining_modifiers = list(parsed.modifiers)
- test_expectation_info['Bugs'] = []
- for m in parsed.modifiers:
- if (m.startswith(WEBKIT_BUG_PREFIX) or
- m.startswith(CHROMIUM_BUG_PREFIX) or
- m.startswith(V8_BUG_PREFIX) or
- m.startswith(NAMED_BUG_PREFIX)):
- test_expectation_info['Bugs'].append(m)
- remaining_modifiers.remove(m)
- elif m in KNOWN_TE_KEYWORDS:
- test_expectation_info[m] = True
- remaining_modifiers.remove(m)
-
- # The modifiers left over should all be platform names.
- test_expectation_info['Platforms'] = list(remaining_modifiers)
-
+ test_expectation_info['Bugs'] = parsed.bugs or [];
+ test_expectation_info['Platforms'] = parsed.specifiers or []
# Shovel the expectations and modifiers in as "<key>: True" entries. Ugly,
# but required by the rest of the pipeline for parsing.
- for m in parsed.expectations + remaining_modifiers:
+ for m in parsed.expectations:
test_expectation_info[m] = True
return parsed.name, test_expectation_info
diff --git a/chromium/media/tools/player_x11/data_source_logger.cc b/chromium/media/tools/player_x11/data_source_logger.cc
index 0bc5ded9775..204d8b30b29 100644
--- a/chromium/media/tools/player_x11/data_source_logger.cc
+++ b/chromium/media/tools/player_x11/data_source_logger.cc
@@ -25,11 +25,6 @@ DataSourceLogger::DataSourceLogger(
streaming_(streaming) {
}
-void DataSourceLogger::set_host(media::DataSourceHost* host) {
- VLOG(1) << "set_host(" << host << ")";
- data_source_->set_host(host);
-}
-
void DataSourceLogger::Stop(const base::Closure& closure) {
VLOG(1) << "Stop() started";
data_source_->Stop(base::Bind(&LogAndRunStopClosure, closure));
diff --git a/chromium/media/tools/player_x11/data_source_logger.h b/chromium/media/tools/player_x11/data_source_logger.h
index c48d413d5ea..5fdd9d41162 100644
--- a/chromium/media/tools/player_x11/data_source_logger.h
+++ b/chromium/media/tools/player_x11/data_source_logger.h
@@ -22,7 +22,6 @@ class DataSourceLogger : public media::DataSource {
virtual ~DataSourceLogger();
// media::DataSource implementation.
- virtual void set_host(media::DataSourceHost* host) OVERRIDE;
virtual void Stop(const base::Closure& closure) OVERRIDE;
virtual void Read(
int64 position, int size, uint8* data,
diff --git a/chromium/media/tools/player_x11/gl_video_renderer.cc b/chromium/media/tools/player_x11/gl_video_renderer.cc
index 3966d81afaf..5f233c47a9d 100644
--- a/chromium/media/tools/player_x11/gl_video_renderer.cc
+++ b/chromium/media/tools/player_x11/gl_video_renderer.cc
@@ -11,7 +11,7 @@
#include "media/base/buffers.h"
#include "media/base/video_frame.h"
#include "media/base/yuv_convert.h"
-#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_surface.h"
enum { kNumYUVPlanes = 3 };
@@ -20,8 +20,8 @@ static GLXContext InitGLContext(Display* display, Window window) {
// dlopen/dlsym, and so linking it into chrome breaks it. So we dynamically
// load it, and use glew to dynamically resolve symbols.
// See http://code.google.com/p/chromium/issues/detail?id=16800
- if (!InitializeGLBindings(gfx::kGLImplementationDesktopGL)) {
- LOG(ERROR) << "InitializeGLBindings failed";
+ if (!gfx::GLSurface::InitializeOneOff()) {
+ LOG(ERROR) << "GLSurface::InitializeOneOff failed";
return NULL;
}
@@ -111,12 +111,14 @@ GlVideoRenderer::~GlVideoRenderer() {
glXDestroyContext(display_, gl_context_);
}
-void GlVideoRenderer::Paint(media::VideoFrame* video_frame) {
+void GlVideoRenderer::Paint(
+ const scoped_refptr<media::VideoFrame>& video_frame) {
if (!gl_context_)
Initialize(video_frame->coded_size(), video_frame->visible_rect());
// Convert YUV frame to RGB.
DCHECK(video_frame->format() == media::VideoFrame::YV12 ||
+ video_frame->format() == media::VideoFrame::I420 ||
video_frame->format() == media::VideoFrame::YV16);
DCHECK(video_frame->stride(media::VideoFrame::kUPlane) ==
video_frame->stride(media::VideoFrame::kVPlane));
diff --git a/chromium/media/tools/player_x11/gl_video_renderer.h b/chromium/media/tools/player_x11/gl_video_renderer.h
index 986a51c4799..c9f68efdeca 100644
--- a/chromium/media/tools/player_x11/gl_video_renderer.h
+++ b/chromium/media/tools/player_x11/gl_video_renderer.h
@@ -19,7 +19,7 @@ class GlVideoRenderer : public base::RefCountedThreadSafe<GlVideoRenderer> {
public:
GlVideoRenderer(Display* display, Window window);
- void Paint(media::VideoFrame* video_frame);
+ void Paint(const scoped_refptr<media::VideoFrame>& video_frame);
private:
friend class base::RefCountedThreadSafe<GlVideoRenderer>;
diff --git a/chromium/media/tools/player_x11/player_x11.cc b/chromium/media/tools/player_x11/player_x11.cc
index fe3beec5a8b..b691743d908 100644
--- a/chromium/media/tools/player_x11/player_x11.cc
+++ b/chromium/media/tools/player_x11/player_x11.cc
@@ -16,6 +16,8 @@
#include "base/threading/thread.h"
#include "media/audio/audio_manager.h"
#include "media/audio/null_audio_sink.h"
+#include "media/base/audio_hardware_config.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/decryptor.h"
#include "media/base/filter_collection.h"
#include "media/base/media.h"
@@ -78,22 +80,11 @@ bool InitX11() {
return true;
}
-void SetOpaque(bool /*opaque*/) {
-}
+static void DoNothing() {}
-typedef base::Callback<void(media::VideoFrame*)> PaintCB;
-void Paint(base::MessageLoop* message_loop, const PaintCB& paint_cb,
- const scoped_refptr<media::VideoFrame>& video_frame) {
- if (message_loop != base::MessageLoop::current()) {
- message_loop->PostTask(FROM_HERE, base::Bind(
- &Paint, message_loop, paint_cb, video_frame));
- return;
- }
+static void OnStatus(media::PipelineStatus status) {}
- paint_cb.Run(video_frame.get());
-}
-
-static void OnBufferingState(media::Pipeline::BufferingState buffering_state) {}
+static void OnMetadata(media::PipelineMetadata metadata) {}
static void NeedKey(const std::string& type,
const std::vector<uint8>& init_data) {
@@ -108,44 +99,55 @@ static void SaveStatusAndSignal(base::WaitableEvent* event,
}
// TODO(vrk): Re-enabled audio. (crbug.com/112159)
-void InitPipeline(media::Pipeline* pipeline,
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
- media::Demuxer* demuxer,
- const PaintCB& paint_cb,
- bool /* enable_audio */,
- base::MessageLoop* paint_message_loop) {
+void InitPipeline(
+ media::Pipeline* pipeline,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ media::Demuxer* demuxer,
+ const media::VideoRendererImpl::PaintCB& paint_cb,
+ bool /* enable_audio */) {
// Create our filter factories.
scoped_ptr<media::FilterCollection> collection(
new media::FilterCollection());
collection->SetDemuxer(demuxer);
ScopedVector<media::VideoDecoder> video_decoders;
- video_decoders.push_back(new media::FFmpegVideoDecoder(message_loop));
+ video_decoders.push_back(new media::FFmpegVideoDecoder(task_runner));
scoped_ptr<media::VideoRenderer> video_renderer(new media::VideoRendererImpl(
- message_loop,
+ task_runner,
video_decoders.Pass(),
media::SetDecryptorReadyCB(),
- base::Bind(&Paint, paint_message_loop, paint_cb),
- base::Bind(&SetOpaque),
+ paint_cb,
true));
collection->SetVideoRenderer(video_renderer.Pass());
ScopedVector<media::AudioDecoder> audio_decoders;
- audio_decoders.push_back(new media::FFmpegAudioDecoder(message_loop));
- scoped_ptr<media::AudioRenderer> audio_renderer(new media::AudioRendererImpl(
- message_loop,
- new media::NullAudioSink(message_loop),
- audio_decoders.Pass(),
- media::SetDecryptorReadyCB()));
+ audio_decoders.push_back(new media::FFmpegAudioDecoder(task_runner,
+ media::LogCB()));
+ media::AudioParameters out_params(
+ media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ media::CHANNEL_LAYOUT_STEREO,
+ 44100,
+ 16,
+ 512);
+ media::AudioHardwareConfig hardware_config(out_params, out_params);
+
+ scoped_ptr<media::AudioRenderer> audio_renderer(
+ new media::AudioRendererImpl(task_runner,
+ new media::NullAudioSink(task_runner),
+ audio_decoders.Pass(),
+ media::SetDecryptorReadyCB(),
+ &hardware_config));
+
collection->SetAudioRenderer(audio_renderer.Pass());
base::WaitableEvent event(true, false);
media::PipelineStatus status;
pipeline->Start(
- collection.Pass(), base::Closure(), media::PipelineStatusCB(),
+ collection.Pass(), base::Bind(&DoNothing), base::Bind(&OnStatus),
base::Bind(&SaveStatusAndSignal, &event, &status),
- base::Bind(&OnBufferingState), base::Closure());
+ base::Bind(&OnMetadata), base::Bind(&DoNothing),
+ base::Bind(&DoNothing));
// Wait until the pipeline is fully initialized.
event.Wait();
@@ -161,8 +163,7 @@ void TerminateHandler(int signal) {
void PeriodicalUpdate(
media::Pipeline* pipeline,
- base::MessageLoop* message_loop,
- bool audio_only) {
+ base::MessageLoop* message_loop) {
if (!g_running) {
// interrupt signal was received during last time period.
// Quit message_loop only when pipeline is fully stopped.
@@ -190,7 +191,7 @@ void PeriodicalUpdate(
&border_width,
&depth);
base::TimeDelta time = pipeline->GetMediaDuration();
- pipeline->Seek(time*e.xbutton.x/width, media::PipelineStatusCB());
+ pipeline->Seek(time*e.xbutton.x/width, base::Bind(&OnStatus));
}
break;
case KeyPress:
@@ -218,8 +219,7 @@ void PeriodicalUpdate(
FROM_HERE,
base::Bind(&PeriodicalUpdate,
base::Unretained(pipeline),
- message_loop,
- audio_only),
+ message_loop),
base::TimeDelta::FromMilliseconds(10));
}
@@ -266,13 +266,13 @@ int main(int argc, char** argv) {
base::Thread media_thread("MediaThread");
media_thread.Start();
- PaintCB paint_cb;
+ media::VideoRendererImpl::PaintCB paint_cb;
if (command_line->HasSwitch("use-gl")) {
- paint_cb = base::Bind(
- &GlVideoRenderer::Paint, new GlVideoRenderer(g_display, g_window));
+ paint_cb = media::BindToCurrentLoop(base::Bind(
+ &GlVideoRenderer::Paint, new GlVideoRenderer(g_display, g_window)));
} else {
- paint_cb = base::Bind(
- &X11VideoRenderer::Paint, new X11VideoRenderer(g_display, g_window));
+ paint_cb = media::BindToCurrentLoop(base::Bind(
+ &X11VideoRenderer::Paint, new X11VideoRenderer(g_display, g_window)));
}
scoped_ptr<media::DataSource> data_source(new DataSourceLogger(
@@ -284,14 +284,13 @@ int main(int argc, char** argv) {
media::Pipeline pipeline(media_thread.message_loop_proxy(),
new media::MediaLog());
InitPipeline(&pipeline, media_thread.message_loop_proxy(), demuxer.get(),
- paint_cb, command_line->HasSwitch("audio"), &message_loop);
+ paint_cb, command_line->HasSwitch("audio"));
// Main loop of the application.
g_running = true;
message_loop.PostTask(FROM_HERE, base::Bind(
- &PeriodicalUpdate, base::Unretained(&pipeline), &message_loop,
- !pipeline.HasVideo()));
+ &PeriodicalUpdate, base::Unretained(&pipeline), &message_loop));
message_loop.Run();
// Cleanup tasks.
diff --git a/chromium/media/tools/player_x11/x11_video_renderer.cc b/chromium/media/tools/player_x11/x11_video_renderer.cc
index 907e1abb5f6..2ae8e3b3a7f 100644
--- a/chromium/media/tools/player_x11/x11_video_renderer.cc
+++ b/chromium/media/tools/player_x11/x11_video_renderer.cc
@@ -83,7 +83,8 @@ X11VideoRenderer::~X11VideoRenderer() {
XRenderFreePicture(display_, picture_);
}
-void X11VideoRenderer::Paint(media::VideoFrame* video_frame) {
+void X11VideoRenderer::Paint(
+ const scoped_refptr<media::VideoFrame>& video_frame) {
if (!image_)
Initialize(video_frame->coded_size(), video_frame->visible_rect());
@@ -100,14 +101,16 @@ void X11VideoRenderer::Paint(media::VideoFrame* video_frame) {
// Convert YUV frame to RGB.
DCHECK(video_frame->format() == media::VideoFrame::YV12 ||
+ video_frame->format() == media::VideoFrame::I420 ||
video_frame->format() == media::VideoFrame::YV16);
DCHECK(video_frame->stride(media::VideoFrame::kUPlane) ==
video_frame->stride(media::VideoFrame::kVPlane));
DCHECK(image_->data);
- media::YUVType yuv_type =
- (video_frame->format() == media::VideoFrame::YV12) ?
- media::YV12 : media::YV16;
+ media::YUVType yuv_type = (video_frame->format() == media::VideoFrame::YV12 ||
+ video_frame->format() == media::VideoFrame::I420)
+ ? media::YV12
+ : media::YV16;
media::ConvertYUVToRGB32(video_frame->data(media::VideoFrame::kYPlane),
video_frame->data(media::VideoFrame::kUPlane),
video_frame->data(media::VideoFrame::kVPlane),
diff --git a/chromium/media/tools/player_x11/x11_video_renderer.h b/chromium/media/tools/player_x11/x11_video_renderer.h
index 3e4b41deadb..d6c093868c3 100644
--- a/chromium/media/tools/player_x11/x11_video_renderer.h
+++ b/chromium/media/tools/player_x11/x11_video_renderer.h
@@ -20,7 +20,7 @@ class X11VideoRenderer : public base::RefCountedThreadSafe<X11VideoRenderer> {
public:
X11VideoRenderer(Display* display, Window window);
- void Paint(media::VideoFrame* video_frame);
+ void Paint(const scoped_refptr<media::VideoFrame>& video_frame);
private:
friend class base::RefCountedThreadSafe<X11VideoRenderer>;
diff --git a/chromium/media/video/capture/android/imageformat_list.h b/chromium/media/video/capture/android/imageformat_list.h
index 57c5ba114f7..fe8cfb232b6 100644
--- a/chromium/media/video/capture/android/imageformat_list.h
+++ b/chromium/media/video/capture/android/imageformat_list.h
@@ -12,11 +12,7 @@
// Android graphics ImageFormat mapping, see reference in:
// http://developer.android.com/reference/android/graphics/ImageFormat.html
-DEFINE_ANDROID_IMAGEFORMAT(ANDROID_IMAGEFORMAT_JPEG, 256)
-DEFINE_ANDROID_IMAGEFORMAT(ANDROID_IMAGEFORMAT_NV16, 16)
DEFINE_ANDROID_IMAGEFORMAT(ANDROID_IMAGEFORMAT_NV21, 17)
-DEFINE_ANDROID_IMAGEFORMAT(ANDROID_IMAGEFORMAT_RGB_565, 4)
-DEFINE_ANDROID_IMAGEFORMAT(ANDROID_IMAGEFORMAT_YUY2, 20)
DEFINE_ANDROID_IMAGEFORMAT(ANDROID_IMAGEFORMAT_YV12, 842094169)
DEFINE_ANDROID_IMAGEFORMAT(ANDROID_IMAGEFORMAT_UNKNOWN, 0)
diff --git a/chromium/media/video/capture/android/video_capture_device_android.cc b/chromium/media/video/capture/android/video_capture_device_android.cc
index adfa9a3455c..d445412ca8e 100644
--- a/chromium/media/video/capture/android/video_capture_device_android.cc
+++ b/chromium/media/video/capture/android/video_capture_device_android.cc
@@ -7,12 +7,10 @@
#include <string>
#include "base/android/jni_android.h"
-#include "base/android/jni_string.h"
#include "base/android/scoped_java_ref.h"
#include "base/strings/string_number_conversions.h"
-#include "base/strings/stringprintf.h"
#include "jni/VideoCapture_jni.h"
-#include "media/base/video_util.h"
+#include "media/video/capture/android/video_capture_device_factory_android.h"
using base::android::AttachCurrentThread;
using base::android::CheckException;
@@ -24,39 +22,8 @@ using base::android::ScopedJavaLocalRef;
namespace media {
// static
-void VideoCaptureDevice::GetDeviceNames(Names* device_names) {
- device_names->clear();
-
- JNIEnv* env = AttachCurrentThread();
-
- int num_cameras = Java_ChromiumCameraInfo_getNumberOfCameras(env);
- DVLOG(1) << "VideoCaptureDevice::GetDeviceNames: num_cameras=" << num_cameras;
- if (num_cameras <= 0)
- return;
-
- for (int camera_id = num_cameras - 1; camera_id >= 0; --camera_id) {
- ScopedJavaLocalRef<jobject> ci =
- Java_ChromiumCameraInfo_getAt(env, camera_id);
-
- Name name(
- base::android::ConvertJavaStringToUTF8(
- Java_ChromiumCameraInfo_getDeviceName(env, ci.obj())),
- base::StringPrintf("%d", Java_ChromiumCameraInfo_getId(env, ci.obj())));
- device_names->push_back(name);
-
- DVLOG(1) << "VideoCaptureDevice::GetDeviceNames: camera device_name="
- << name.name()
- << ", unique_id="
- << name.id()
- << ", orientation "
- << Java_ChromiumCameraInfo_getOrientation(env, ci.obj());
- }
-}
-
-// static
-void VideoCaptureDevice::GetDeviceSupportedFormats(const Name& device,
- VideoCaptureFormats* formats) {
- NOTIMPLEMENTED();
+bool VideoCaptureDeviceAndroid::RegisterVideoCaptureDevice(JNIEnv* env) {
+ return RegisterNativesImpl(env);
}
const std::string VideoCaptureDevice::Name::GetModel() const {
@@ -66,25 +33,6 @@ const std::string VideoCaptureDevice::Name::GetModel() const {
return "";
}
-// static
-VideoCaptureDevice* VideoCaptureDevice::Create(const Name& device_name) {
- return VideoCaptureDeviceAndroid::Create(device_name);
-}
-
-// static
-VideoCaptureDevice* VideoCaptureDeviceAndroid::Create(const Name& device_name) {
- scoped_ptr<VideoCaptureDeviceAndroid> ret(
- new VideoCaptureDeviceAndroid(device_name));
- if (ret->Init())
- return ret.release();
- return NULL;
-}
-
-// static
-bool VideoCaptureDeviceAndroid::RegisterVideoCaptureDevice(JNIEnv* env) {
- return RegisterNativesImpl(env);
-}
-
VideoCaptureDeviceAndroid::VideoCaptureDeviceAndroid(const Name& device_name)
: state_(kIdle), got_first_frame_(false), device_name_(device_name) {}
@@ -97,12 +45,8 @@ bool VideoCaptureDeviceAndroid::Init() {
if (!base::StringToInt(device_name_.id(), &id))
return false;
- JNIEnv* env = AttachCurrentThread();
-
- j_capture_.Reset(Java_VideoCapture_createVideoCapture(
- env, base::android::GetApplicationContext(), id,
- reinterpret_cast<intptr_t>(this)));
-
+ j_capture_.Reset(VideoCaptureDeviceFactoryAndroid::createVideoCaptureAndroid(
+ id, reinterpret_cast<intptr_t>(this)));
return true;
}
@@ -120,12 +64,12 @@ void VideoCaptureDeviceAndroid::AllocateAndStart(
JNIEnv* env = AttachCurrentThread();
- jboolean ret =
- Java_VideoCapture_allocate(env,
- j_capture_.obj(),
- params.requested_format.frame_size.width(),
- params.requested_format.frame_size.height(),
- params.requested_format.frame_rate);
+ jboolean ret = Java_VideoCapture_allocate(
+ env,
+ j_capture_.obj(),
+ params.requested_format.frame_size.width(),
+ params.requested_format.frame_size.height(),
+ params.requested_format.frame_rate);
if (!ret) {
SetErrorState("failed to allocate");
return;
@@ -220,11 +164,11 @@ void VideoCaptureDeviceAndroid::OnFrameAvailable(
if (expected_next_frame_time_ <= current_time) {
expected_next_frame_time_ += frame_interval_;
- client_->OnIncomingCapturedFrame(reinterpret_cast<uint8*>(buffer),
- length,
- base::Time::Now(),
- rotation,
- capture_format_);
+ client_->OnIncomingCapturedData(reinterpret_cast<uint8*>(buffer),
+ length,
+ capture_format_,
+ rotation,
+ base::TimeTicks::Now());
}
env->ReleaseByteArrayElements(data, buffer, JNI_ABORT);
@@ -234,20 +178,14 @@ VideoPixelFormat VideoCaptureDeviceAndroid::GetColorspace() {
JNIEnv* env = AttachCurrentThread();
int current_capture_colorspace =
Java_VideoCapture_getColorspace(env, j_capture_.obj());
- switch (current_capture_colorspace){
- case ANDROID_IMAGEFORMAT_YV12:
- return media::PIXEL_FORMAT_YV12;
- case ANDROID_IMAGEFORMAT_NV21:
- return media::PIXEL_FORMAT_NV21;
- case ANDROID_IMAGEFORMAT_YUY2:
- return media::PIXEL_FORMAT_YUY2;
- case ANDROID_IMAGEFORMAT_NV16:
- case ANDROID_IMAGEFORMAT_JPEG:
- case ANDROID_IMAGEFORMAT_RGB_565:
- case ANDROID_IMAGEFORMAT_UNKNOWN:
- // NOTE(mcasas): NV16, JPEG, RGB565 not supported in VideoPixelFormat.
- default:
- return media::PIXEL_FORMAT_UNKNOWN;
+ switch (current_capture_colorspace) {
+ case ANDROID_IMAGEFORMAT_YV12:
+ return media::PIXEL_FORMAT_YV12;
+ case ANDROID_IMAGEFORMAT_NV21:
+ return media::PIXEL_FORMAT_NV21;
+ case ANDROID_IMAGEFORMAT_UNKNOWN:
+ default:
+ return media::PIXEL_FORMAT_UNKNOWN;
}
}
@@ -257,7 +195,7 @@ void VideoCaptureDeviceAndroid::SetErrorState(const std::string& reason) {
base::AutoLock lock(lock_);
state_ = kError;
}
- client_->OnError();
+ client_->OnError(reason);
}
} // namespace media
diff --git a/chromium/media/video/capture/android/video_capture_device_android.h b/chromium/media/video/capture/android/video_capture_device_android.h
index 635417af572..1a2e8b8b99e 100644
--- a/chromium/media/video/capture/android/video_capture_device_android.h
+++ b/chromium/media/video/capture/android/video_capture_device_android.h
@@ -23,11 +23,24 @@ namespace media {
// but only VideoCaptureManager would change their value.
class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice {
public:
+ // Automatically generated enum to interface with Java world.
+ enum AndroidImageFormat {
+#define DEFINE_ANDROID_IMAGEFORMAT(name, value) name = value,
+#include "media/video/capture/android/imageformat_list.h"
+#undef DEFINE_ANDROID_IMAGEFORMAT
+ };
+
+ explicit VideoCaptureDeviceAndroid(const Name& device_name);
virtual ~VideoCaptureDeviceAndroid();
static VideoCaptureDevice* Create(const Name& device_name);
static bool RegisterVideoCaptureDevice(JNIEnv* env);
+ // Registers the Java VideoCaptureDevice pointer, used by the rest of the
+ // methods of the class to operate the Java capture code. This method must be
+ // called after the class constructor and before AllocateAndStart().
+ bool Init();
+
// VideoCaptureDevice implementation.
virtual void AllocateAndStart(const VideoCaptureParams& params,
scoped_ptr<Client> client) OVERRIDE;
@@ -48,15 +61,6 @@ class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice {
kError // Hit error. User needs to recover by destroying the object.
};
- // Automatically generated enum to interface with Java world.
- enum AndroidImageFormat {
-#define DEFINE_ANDROID_IMAGEFORMAT(name, value) name = value,
-#include "media/video/capture/android/imageformat_list.h"
-#undef DEFINE_ANDROID_IMAGEFORMAT
- };
-
- explicit VideoCaptureDeviceAndroid(const Name& device_name);
- bool Init();
VideoPixelFormat GetColorspace();
void SetErrorState(const std::string& reason);
@@ -73,7 +77,7 @@ class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice {
VideoCaptureFormat capture_format_;
// Java VideoCaptureAndroid instance.
- base::android::ScopedJavaGlobalRef<jobject> j_capture_;
+ base::android::ScopedJavaLocalRef<jobject> j_capture_;
DISALLOW_IMPLICIT_CONSTRUCTORS(VideoCaptureDeviceAndroid);
};
diff --git a/chromium/media/video/capture/android/video_capture_device_factory_android.cc b/chromium/media/video/capture/android/video_capture_device_factory_android.cc
new file mode 100644
index 00000000000..99ed4926ad6
--- /dev/null
+++ b/chromium/media/video/capture/android/video_capture_device_factory_android.cc
@@ -0,0 +1,130 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/capture/android/video_capture_device_factory_android.h"
+
+#include "base/android/jni_string.h"
+#include "base/android/scoped_java_ref.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/stringprintf.h"
+#include "jni/VideoCaptureFactory_jni.h"
+#include "media/video/capture/android/video_capture_device_android.h"
+
+using base::android::AttachCurrentThread;
+using base::android::ScopedJavaLocalRef;
+
+namespace media {
+
+// static
+bool VideoCaptureDeviceFactoryAndroid::RegisterVideoCaptureDeviceFactory(
+ JNIEnv* env) {
+ return RegisterNativesImpl(env);
+}
+
+//static
+ScopedJavaLocalRef<jobject>
+VideoCaptureDeviceFactoryAndroid::createVideoCaptureAndroid(
+ int id,
+ jlong nativeVideoCaptureDeviceAndroid) {
+ return (Java_VideoCaptureFactory_createVideoCapture(
+ AttachCurrentThread(),
+ base::android::GetApplicationContext(),
+ id,
+ nativeVideoCaptureDeviceAndroid));
+}
+
+scoped_ptr<VideoCaptureDevice> VideoCaptureDeviceFactoryAndroid::Create(
+ const VideoCaptureDevice::Name& device_name) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ int id;
+ if (!base::StringToInt(device_name.id(), &id))
+ return scoped_ptr<VideoCaptureDevice>();
+
+ VideoCaptureDeviceAndroid* video_capture_device(
+ new VideoCaptureDeviceAndroid(device_name));
+
+ if (video_capture_device->Init())
+ return scoped_ptr<VideoCaptureDevice>(video_capture_device);
+
+ DLOG(ERROR) << "Error creating Video Capture Device.";
+ return scoped_ptr<VideoCaptureDevice>();
+}
+
+void VideoCaptureDeviceFactoryAndroid::GetDeviceNames(
+ VideoCaptureDevice::Names* device_names) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ device_names->clear();
+
+ JNIEnv* env = AttachCurrentThread();
+
+ int num_cameras = Java_ChromiumCameraInfo_getNumberOfCameras(
+ env, base::android::GetApplicationContext());
+ DVLOG(1) << "VideoCaptureDevice::GetDeviceNames: num_cameras=" << num_cameras;
+ if (num_cameras <= 0)
+ return;
+
+ for (int camera_id = num_cameras - 1; camera_id >= 0; --camera_id) {
+ ScopedJavaLocalRef<jobject> ci =
+ Java_ChromiumCameraInfo_getAt(env, camera_id);
+
+ VideoCaptureDevice::Name name(
+ base::android::ConvertJavaStringToUTF8(
+ Java_ChromiumCameraInfo_getDeviceName(env, ci.obj())),
+ base::StringPrintf("%d", Java_ChromiumCameraInfo_getId(env, ci.obj())));
+ device_names->push_back(name);
+
+ DVLOG(1) << "VideoCaptureDeviceFactoryAndroid::GetDeviceNames: camera"
+ << "device_name=" << name.name() << ", unique_id=" << name.id()
+ << ", orientation "
+ << Java_ChromiumCameraInfo_getOrientation(env, ci.obj());
+ }
+}
+
+void VideoCaptureDeviceFactoryAndroid::GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* capture_formats) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ int id;
+ if (!base::StringToInt(device.id(), &id))
+ return;
+ JNIEnv* env = AttachCurrentThread();
+ base::android::ScopedJavaLocalRef<jobjectArray> collected_formats =
+ Java_VideoCaptureFactory_getDeviceSupportedFormats(env, id);
+ if (collected_formats.is_null())
+ return;
+
+ jsize num_formats = env->GetArrayLength(collected_formats.obj());
+ for (int i = 0; i < num_formats; ++i) {
+ base::android::ScopedJavaLocalRef<jobject> format(
+ env, env->GetObjectArrayElement(collected_formats.obj(), i));
+
+ VideoPixelFormat pixel_format = media::PIXEL_FORMAT_UNKNOWN;
+ switch (media::Java_VideoCaptureFactory_getCaptureFormatPixelFormat(
+ env, format.obj())) {
+ case ANDROID_IMAGEFORMAT_YV12:
+ pixel_format = media::PIXEL_FORMAT_YV12;
+ break;
+ case ANDROID_IMAGEFORMAT_NV21:
+ pixel_format = media::PIXEL_FORMAT_NV21;
+ break;
+ default:
+ break;
+ }
+ VideoCaptureFormat capture_format(
+ gfx::Size(media::Java_VideoCaptureFactory_getCaptureFormatWidth(env,
+ format.obj()),
+ media::Java_VideoCaptureFactory_getCaptureFormatHeight(env,
+ format.obj())),
+ media::Java_VideoCaptureFactory_getCaptureFormatFramerate(env,
+ format.obj()),
+ pixel_format);
+ capture_formats->push_back(capture_format);
+ DVLOG(1) << device.name() << " resolution: "
+ << capture_format.frame_size.ToString() << ", fps: "
+ << capture_format.frame_rate << ", pixel format: "
+ << capture_format.pixel_format;
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/video/capture/android/video_capture_device_factory_android.h b/chromium/media/video/capture/android/video_capture_device_factory_android.h
new file mode 100644
index 00000000000..d87416fa7d5
--- /dev/null
+++ b/chromium/media/video/capture/android/video_capture_device_factory_android.h
@@ -0,0 +1,48 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_ANDROID_VIDEO_CAPTURE_DEVICE_FACTORY_ANDROID_H_
+#define MEDIA_VIDEO_CAPTURE_ANDROID_VIDEO_CAPTURE_DEVICE_FACTORY_ANDROID_H_
+
+#include "media/video/capture/video_capture_device_factory.h"
+
+#include <jni.h>
+
+#include "base/android/scoped_java_ref.h"
+#include "media/video/capture/video_capture_device.h"
+
+namespace media {
+
+// VideoCaptureDeviceFactory on Android. This class implements the static
+// VideoCapture methods and the factory of VideoCaptureAndroid.
+class MEDIA_EXPORT VideoCaptureDeviceFactoryAndroid :
+ public VideoCaptureDeviceFactory {
+ public:
+ // Automatically generated enum to interface with Java world.
+ enum AndroidImageFormat {
+#define DEFINE_ANDROID_IMAGEFORMAT(name, value) name = value,
+#include "media/video/capture/android/imageformat_list.h"
+#undef DEFINE_ANDROID_IMAGEFORMAT
+ };
+ static bool RegisterVideoCaptureDeviceFactory(JNIEnv* env);
+ static base::android::ScopedJavaLocalRef<jobject> createVideoCaptureAndroid(
+ int id,
+ jlong nativeVideoCaptureDeviceAndroid);
+
+ VideoCaptureDeviceFactoryAndroid() {}
+ virtual ~VideoCaptureDeviceFactoryAndroid() {}
+
+ virtual scoped_ptr<VideoCaptureDevice> Create(
+ const VideoCaptureDevice::Name& device_name) OVERRIDE;
+ virtual void GetDeviceNames(VideoCaptureDevice::Names* device_names) OVERRIDE;
+ virtual void GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) OVERRIDE;
+
+ private:
+
+ DISALLOW_COPY_AND_ASSIGN(VideoCaptureDeviceFactoryAndroid);};
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_ANDROID_VIDEO_CAPTURE_DEVICE_FACTORY_ANDROID_H_
diff --git a/chromium/media/video/capture/fake_video_capture_device.cc b/chromium/media/video/capture/fake_video_capture_device.cc
index a87514d4347..302396a22b8 100644
--- a/chromium/media/video/capture/fake_video_capture_device.cc
+++ b/chromium/media/video/capture/fake_video_capture_device.cc
@@ -10,83 +10,15 @@
#include "base/memory/scoped_ptr.h"
#include "base/strings/stringprintf.h"
#include "media/audio/fake_audio_input_stream.h"
+#include "media/base/video_frame.h"
#include "third_party/skia/include/core/SkBitmap.h"
#include "third_party/skia/include/core/SkCanvas.h"
#include "third_party/skia/include/core/SkPaint.h"
namespace media {
-static const int kFakeCaptureTimeoutMs = 50;
-static const int kFakeCaptureBeepCycle = 20; // Visual beep every 1s.
+static const int kFakeCaptureBeepCycle = 10; // Visual beep every 0.5s.
static const int kFakeCaptureCapabilityChangePeriod = 30;
-enum { kNumberOfFakeDevices = 2 };
-
-bool FakeVideoCaptureDevice::fail_next_create_ = false;
-base::subtle::Atomic32 FakeVideoCaptureDevice::number_of_devices_ =
- kNumberOfFakeDevices;
-
-// static
-size_t FakeVideoCaptureDevice::NumberOfFakeDevices(void) {
- return number_of_devices_;
-}
-
-// static
-void FakeVideoCaptureDevice::GetDeviceNames(Names* const device_names) {
- // Empty the name list.
- device_names->erase(device_names->begin(), device_names->end());
-
- int number_of_devices = base::subtle::NoBarrier_Load(&number_of_devices_);
- for (int32 n = 0; n < number_of_devices; n++) {
- Name name(base::StringPrintf("fake_device_%d", n),
- base::StringPrintf("/dev/video%d", n));
- device_names->push_back(name);
- }
-}
-
-// static
-void FakeVideoCaptureDevice::GetDeviceSupportedFormats(
- const Name& device,
- VideoCaptureFormats* supported_formats) {
-
- supported_formats->clear();
- VideoCaptureFormat capture_format_640x480;
- capture_format_640x480.pixel_format = media::PIXEL_FORMAT_I420;
- capture_format_640x480.frame_size.SetSize(640, 480);
- capture_format_640x480.frame_rate = 1000 / kFakeCaptureTimeoutMs;
- supported_formats->push_back(capture_format_640x480);
- VideoCaptureFormat capture_format_320x240;
- capture_format_320x240.pixel_format = media::PIXEL_FORMAT_I420;
- capture_format_320x240.frame_size.SetSize(320, 240);
- capture_format_320x240.frame_rate = 1000 / kFakeCaptureTimeoutMs;
- supported_formats->push_back(capture_format_320x240);
-}
-
-// static
-VideoCaptureDevice* FakeVideoCaptureDevice::Create(const Name& device_name) {
- if (fail_next_create_) {
- fail_next_create_ = false;
- return NULL;
- }
- int number_of_devices = base::subtle::NoBarrier_Load(&number_of_devices_);
- for (int32 n = 0; n < number_of_devices; ++n) {
- std::string possible_id = base::StringPrintf("/dev/video%d", n);
- if (device_name.id().compare(possible_id) == 0) {
- return new FakeVideoCaptureDevice();
- }
- }
- return NULL;
-}
-
-// static
-void FakeVideoCaptureDevice::SetFailNextCreate() {
- fail_next_create_ = true;
-}
-
-// static
-void FakeVideoCaptureDevice::SetNumberOfFakeDevices(size_t number_of_devices) {
- base::subtle::NoBarrier_AtomicExchange(&number_of_devices_,
- number_of_devices);
-}
FakeVideoCaptureDevice::FakeVideoCaptureDevice()
: capture_thread_("CaptureThread"),
@@ -123,19 +55,32 @@ void FakeVideoCaptureDevice::StopAndDeAllocate() {
capture_thread_.Stop();
}
+void FakeVideoCaptureDevice::PopulateVariableFormatsRoster(
+ const VideoCaptureFormats& formats) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!capture_thread_.IsRunning());
+ format_roster_ = formats;
+ format_roster_index_ = 0;
+}
+
void FakeVideoCaptureDevice::OnAllocateAndStart(
const VideoCaptureParams& params,
scoped_ptr<VideoCaptureDevice::Client> client) {
DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
client_ = client.Pass();
- capture_format_.pixel_format = PIXEL_FORMAT_I420;
+
+ // Incoming |params| can be none of the supported formats, so we get the
+ // closest thing rounded up. TODO(mcasas): Use the |params|, if they belong to
+ // the supported ones, when http://crbug.com/309554 is verified.
+ DCHECK_EQ(params.requested_format.pixel_format, PIXEL_FORMAT_I420);
+ capture_format_.pixel_format = params.requested_format.pixel_format;
capture_format_.frame_rate = 30;
- if (params.requested_format.frame_size.width() > 320)
+ if (params.requested_format.frame_size.width() > 640)
+ capture_format_.frame_size.SetSize(1280, 720);
+ else if (params.requested_format.frame_size.width() > 320)
capture_format_.frame_size.SetSize(640, 480);
else
capture_format_.frame_size.SetSize(320, 240);
- if (params.allow_resolution_change)
- PopulateFormatRoster();
const size_t fake_frame_size =
VideoFrame::AllocationSize(VideoFrame::I420, capture_format_.frame_size);
fake_frame_.reset(new uint8[fake_frame_size]);
@@ -165,13 +110,11 @@ void FakeVideoCaptureDevice::OnCaptureTask() {
capture_format_.frame_size.height(),
capture_format_.frame_size.width()),
bitmap.setPixels(fake_frame_.get());
-
SkCanvas canvas(bitmap);
// Draw a sweeping circle to show an animation.
int radius = std::min(capture_format_.frame_size.width(),
- capture_format_.frame_size.height()) /
- 4;
+ capture_format_.frame_size.height()) / 4;
SkRect rect =
SkRect::MakeXYWH(capture_format_.frame_size.width() / 2 - radius,
capture_format_.frame_size.height() / 2 - radius,
@@ -214,11 +157,11 @@ void FakeVideoCaptureDevice::OnCaptureTask() {
frame_count_++;
// Give the captured frame to the client.
- client_->OnIncomingCapturedFrame(fake_frame_.get(),
- frame_size,
- base::Time::Now(),
- 0,
- capture_format_);
+ client_->OnIncomingCapturedData(fake_frame_.get(),
+ frame_size,
+ capture_format_,
+ 0,
+ base::TimeTicks::Now());
if (!(frame_count_ % kFakeCaptureCapabilityChangePeriod) &&
format_roster_.size() > 0U) {
Reallocate();
@@ -244,16 +187,4 @@ void FakeVideoCaptureDevice::Reallocate() {
fake_frame_.reset(new uint8[fake_frame_size]);
}
-void FakeVideoCaptureDevice::PopulateFormatRoster() {
- DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
- format_roster_.push_back(
- media::VideoCaptureFormat(gfx::Size(320, 240), 30, PIXEL_FORMAT_I420));
- format_roster_.push_back(
- media::VideoCaptureFormat(gfx::Size(640, 480), 30, PIXEL_FORMAT_I420));
- format_roster_.push_back(
- media::VideoCaptureFormat(gfx::Size(800, 600), 30, PIXEL_FORMAT_I420));
-
- format_roster_index_ = 0;
-}
-
} // namespace media
diff --git a/chromium/media/video/capture/fake_video_capture_device.h b/chromium/media/video/capture/fake_video_capture_device.h
index 399a68268fb..96264e86aa3 100644
--- a/chromium/media/video/capture/fake_video_capture_device.h
+++ b/chromium/media/video/capture/fake_video_capture_device.h
@@ -20,34 +20,29 @@ namespace media {
class MEDIA_EXPORT FakeVideoCaptureDevice : public VideoCaptureDevice {
public:
- static VideoCaptureDevice* Create(const Name& device_name);
- virtual ~FakeVideoCaptureDevice();
- // Used for testing. This will make sure the next call to Create will
- // return NULL;
- static void SetFailNextCreate();
- static void SetNumberOfFakeDevices(size_t number_of_devices);
- static size_t NumberOfFakeDevices();
+ static const int kFakeCaptureTimeoutMs = 50;
- static void GetDeviceNames(Names* device_names);
- static void GetDeviceSupportedFormats(const Name& device,
- VideoCaptureFormats* supported_formats);
+ FakeVideoCaptureDevice();
+ virtual ~FakeVideoCaptureDevice();
// VideoCaptureDevice implementation.
- virtual void AllocateAndStart(const VideoCaptureParams& params,
- scoped_ptr<VideoCaptureDevice::Client> client)
- OVERRIDE;
+ virtual void AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) OVERRIDE;
virtual void StopAndDeAllocate() OVERRIDE;
- private:
- FakeVideoCaptureDevice();
+ // Sets the formats to use sequentially when the device is configured as
+ // variable capture resolution. Works only before AllocateAndStart() or
+ // after StopAndDeallocate().
+ void PopulateVariableFormatsRoster(const VideoCaptureFormats& formats);
+ private:
// Called on the |capture_thread_| only.
void OnAllocateAndStart(const VideoCaptureParams& params,
scoped_ptr<Client> client);
void OnStopAndDeAllocate();
void OnCaptureTask();
void Reallocate();
- void PopulateFormatRoster();
// |thread_checker_| is used to check that destructor, AllocateAndStart() and
// StopAndDeAllocate() are called in the correct thread that owns the object.
@@ -61,18 +56,12 @@ class MEDIA_EXPORT FakeVideoCaptureDevice : public VideoCaptureDevice {
VideoCaptureFormat capture_format_;
// When the device is allowed to change resolution, this vector holds the
- // available ones which are used in sequence, restarting at the end. These
- // two members belong to and are only used in |capture_thread_|.
+ // available ones, used sequentially restarting at the end. These two members
+ // are initialised in PopulateFormatRoster() before |capture_thread_| is
+ // running and are subsequently read-only in that thread.
std::vector<VideoCaptureFormat> format_roster_;
int format_roster_index_;
- static bool fail_next_create_;
- // |number_of_devices_| is atomic since tests can call SetNumberOfFakeDevices
- // on the IO thread to set |number_of_devices_|. The variable can be
- // read from a separate thread.
- // TODO(perkj): Make tests independent of global state. crbug/323913
- static base::subtle::Atomic32 number_of_devices_;
-
DISALLOW_COPY_AND_ASSIGN(FakeVideoCaptureDevice);
};
diff --git a/chromium/media/video/capture/fake_video_capture_device_factory.cc b/chromium/media/video/capture/fake_video_capture_device_factory.cc
new file mode 100644
index 00000000000..5183dc0e9fa
--- /dev/null
+++ b/chromium/media/video/capture/fake_video_capture_device_factory.cc
@@ -0,0 +1,54 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/capture/fake_video_capture_device_factory.h"
+
+#include "base/strings/stringprintf.h"
+#include "media/video/capture/fake_video_capture_device.h"
+
+namespace media {
+
+FakeVideoCaptureDeviceFactory::FakeVideoCaptureDeviceFactory()
+ : number_of_devices_(1) {
+}
+
+scoped_ptr<VideoCaptureDevice> FakeVideoCaptureDeviceFactory::Create(
+ const VideoCaptureDevice::Name& device_name) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ for (int n = 0; n < number_of_devices_; ++n) {
+ std::string possible_id = base::StringPrintf("/dev/video%d", n);
+ if (device_name.id().compare(possible_id) == 0)
+ return scoped_ptr<VideoCaptureDevice>(new FakeVideoCaptureDevice());
+ }
+ return scoped_ptr<VideoCaptureDevice>();
+}
+
+void FakeVideoCaptureDeviceFactory::GetDeviceNames(
+ VideoCaptureDevice::Names* const device_names) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(device_names->empty());
+ for (int n = 0; n < number_of_devices_; ++n) {
+ VideoCaptureDevice::Name name(base::StringPrintf("fake_device_%d", n),
+ base::StringPrintf("/dev/video%d", n));
+ device_names->push_back(name);
+ }
+}
+
+void FakeVideoCaptureDeviceFactory::GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ const int frame_rate = 1000 / FakeVideoCaptureDevice::kFakeCaptureTimeoutMs;
+ const gfx::Size supported_sizes[] = {gfx::Size(320, 240),
+ gfx::Size(640, 480),
+ gfx::Size(1280, 720)};
+ supported_formats->clear();
+ for (size_t i = 0; i < arraysize(supported_sizes); ++i) {
+ supported_formats->push_back(VideoCaptureFormat(supported_sizes[i],
+ frame_rate,
+ media::PIXEL_FORMAT_I420));
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/video/capture/fake_video_capture_device_factory.h b/chromium/media/video/capture/fake_video_capture_device_factory.h
new file mode 100644
index 00000000000..5c7312bc398
--- /dev/null
+++ b/chromium/media/video/capture/fake_video_capture_device_factory.h
@@ -0,0 +1,44 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Implementation of a fake VideoCaptureDeviceFactory class.
+
+#ifndef MEDIA_VIDEO_CAPTURE_FAKE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
+#define MEDIA_VIDEO_CAPTURE_FAKE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
+
+#include "media/video/capture/video_capture_device_factory.h"
+
+namespace media {
+
+// Extension of VideoCaptureDeviceFactory to create and manipulate fake devices,
+// not including file-based ones.
+class MEDIA_EXPORT FakeVideoCaptureDeviceFactory :
+ public VideoCaptureDeviceFactory {
+ public:
+ FakeVideoCaptureDeviceFactory();
+ virtual ~FakeVideoCaptureDeviceFactory() {}
+
+ virtual scoped_ptr<VideoCaptureDevice> Create(
+ const VideoCaptureDevice::Name& device_name) OVERRIDE;
+ virtual void GetDeviceNames(VideoCaptureDevice::Names* device_names) OVERRIDE;
+ virtual void GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) OVERRIDE;
+
+ void set_number_of_devices(int number_of_devices) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ number_of_devices_ = number_of_devices;
+ }
+ int number_of_devices() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return number_of_devices_;
+ }
+
+ private:
+ int number_of_devices_;
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_FAKE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
diff --git a/chromium/media/video/capture/fake_video_capture_device_unittest.cc b/chromium/media/video/capture/fake_video_capture_device_unittest.cc
new file mode 100644
index 00000000000..a2f0d165c0a
--- /dev/null
+++ b/chromium/media/video/capture/fake_video_capture_device_unittest.cc
@@ -0,0 +1,208 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/run_loop.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/thread.h"
+#include "media/video/capture/fake_video_capture_device.h"
+#include "media/video/capture/fake_video_capture_device_factory.h"
+#include "media/video/capture/video_capture_device.h"
+#include "media/video/capture/video_capture_types.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::SaveArg;
+
+namespace media {
+
+class MockClient : public media::VideoCaptureDevice::Client {
+ public:
+ MOCK_METHOD2(ReserveOutputBuffer,
+ scoped_refptr<Buffer>(media::VideoFrame::Format format,
+ const gfx::Size& dimensions));
+ MOCK_METHOD0(OnErr, void());
+
+ explicit MockClient(base::Callback<void(const VideoCaptureFormat&)> frame_cb)
+ : main_thread_(base::MessageLoopProxy::current()), frame_cb_(frame_cb) {}
+
+ virtual void OnError(const std::string& error_message) OVERRIDE {
+ OnErr();
+ }
+
+ virtual void OnIncomingCapturedData(const uint8* data,
+ int length,
+ const VideoCaptureFormat& format,
+ int rotation,
+ base::TimeTicks timestamp) OVERRIDE {
+ main_thread_->PostTask(FROM_HERE, base::Bind(frame_cb_, format));
+ }
+
+ virtual void OnIncomingCapturedVideoFrame(
+ const scoped_refptr<Buffer>& buffer,
+ const media::VideoCaptureFormat& buffer_format,
+ const scoped_refptr<media::VideoFrame>& frame,
+ base::TimeTicks timestamp) OVERRIDE {
+ NOTREACHED();
+ }
+
+ private:
+ scoped_refptr<base::SingleThreadTaskRunner> main_thread_;
+ base::Callback<void(const VideoCaptureFormat&)> frame_cb_;
+};
+
+class DeviceEnumerationListener :
+ public base::RefCounted<DeviceEnumerationListener> {
+ public:
+ MOCK_METHOD1(OnEnumeratedDevicesCallbackPtr,
+ void(media::VideoCaptureDevice::Names* names));
+ // GMock doesn't support move-only arguments, so we use this forward method.
+ void OnEnumeratedDevicesCallback(
+ scoped_ptr<media::VideoCaptureDevice::Names> names) {
+ OnEnumeratedDevicesCallbackPtr(names.release());
+ }
+
+ private:
+ friend class base::RefCounted<DeviceEnumerationListener>;
+ virtual ~DeviceEnumerationListener() {}
+};
+
+class FakeVideoCaptureDeviceTest : public testing::Test {
+ protected:
+ typedef media::VideoCaptureDevice::Client Client;
+
+ FakeVideoCaptureDeviceTest()
+ : loop_(new base::MessageLoop()),
+ client_(new MockClient(
+ base::Bind(&FakeVideoCaptureDeviceTest::OnFrameCaptured,
+ base::Unretained(this)))),
+ video_capture_device_factory_(new FakeVideoCaptureDeviceFactory()) {
+ device_enumeration_listener_ = new DeviceEnumerationListener();
+ }
+
+ virtual void SetUp() {
+ }
+
+ void OnFrameCaptured(const VideoCaptureFormat& format) {
+ last_format_ = format;
+ run_loop_->QuitClosure().Run();
+ }
+
+ void WaitForCapturedFrame() {
+ run_loop_.reset(new base::RunLoop());
+ run_loop_->Run();
+ }
+
+ scoped_ptr<media::VideoCaptureDevice::Names> EnumerateDevices() {
+ media::VideoCaptureDevice::Names* names;
+ EXPECT_CALL(*device_enumeration_listener_,
+ OnEnumeratedDevicesCallbackPtr(_)).WillOnce(SaveArg<0>(&names));
+
+ video_capture_device_factory_->EnumerateDeviceNames(
+ base::Bind(&DeviceEnumerationListener::OnEnumeratedDevicesCallback,
+ device_enumeration_listener_));
+ base::MessageLoop::current()->RunUntilIdle();
+ return scoped_ptr<media::VideoCaptureDevice::Names>(names);
+ }
+
+ const VideoCaptureFormat& last_format() const { return last_format_; }
+
+ VideoCaptureDevice::Names names_;
+ scoped_ptr<base::MessageLoop> loop_;
+ scoped_ptr<base::RunLoop> run_loop_;
+ scoped_ptr<MockClient> client_;
+ scoped_refptr<DeviceEnumerationListener> device_enumeration_listener_;
+ VideoCaptureFormat last_format_;
+ scoped_ptr<VideoCaptureDeviceFactory> video_capture_device_factory_;
+};
+
+TEST_F(FakeVideoCaptureDeviceTest, Capture) {
+ scoped_ptr<media::VideoCaptureDevice::Names> names(EnumerateDevices());
+
+ ASSERT_GT(static_cast<int>(names->size()), 0);
+
+ scoped_ptr<VideoCaptureDevice> device(
+ video_capture_device_factory_->Create(names->front()));
+ ASSERT_TRUE(device);
+
+ EXPECT_CALL(*client_, OnErr()).Times(0);
+
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(640, 480);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
+ capture_params.allow_resolution_change = false;
+ device->AllocateAndStart(capture_params, client_.PassAs<Client>());
+ WaitForCapturedFrame();
+ EXPECT_EQ(last_format().frame_size.width(), 640);
+ EXPECT_EQ(last_format().frame_size.height(), 480);
+ EXPECT_EQ(last_format().frame_rate, 30);
+ device->StopAndDeAllocate();
+}
+
+TEST_F(FakeVideoCaptureDeviceTest, GetDeviceSupportedFormats) {
+ scoped_ptr<VideoCaptureDevice::Names> names(EnumerateDevices());
+
+ VideoCaptureFormats supported_formats;
+ VideoCaptureDevice::Names::iterator names_iterator;
+
+ for (names_iterator = names->begin(); names_iterator != names->end();
+ ++names_iterator) {
+ video_capture_device_factory_->GetDeviceSupportedFormats(
+ *names_iterator, &supported_formats);
+ EXPECT_EQ(supported_formats.size(), 3u);
+ EXPECT_EQ(supported_formats[0].frame_size.width(), 320);
+ EXPECT_EQ(supported_formats[0].frame_size.height(), 240);
+ EXPECT_EQ(supported_formats[0].pixel_format, media::PIXEL_FORMAT_I420);
+ EXPECT_GE(supported_formats[0].frame_rate, 20);
+ EXPECT_EQ(supported_formats[1].frame_size.width(), 640);
+ EXPECT_EQ(supported_formats[1].frame_size.height(), 480);
+ EXPECT_EQ(supported_formats[1].pixel_format, media::PIXEL_FORMAT_I420);
+ EXPECT_GE(supported_formats[1].frame_rate, 20);
+ EXPECT_EQ(supported_formats[2].frame_size.width(), 1280);
+ EXPECT_EQ(supported_formats[2].frame_size.height(), 720);
+ EXPECT_EQ(supported_formats[2].pixel_format, media::PIXEL_FORMAT_I420);
+ EXPECT_GE(supported_formats[2].frame_rate, 20);
+ }
+}
+
+TEST_F(FakeVideoCaptureDeviceTest, CaptureVariableResolution) {
+ scoped_ptr<VideoCaptureDevice::Names> names(EnumerateDevices());
+
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(640, 480);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
+ capture_params.allow_resolution_change = true;
+
+ ASSERT_GT(static_cast<int>(names->size()), 0);
+
+ scoped_ptr<VideoCaptureDevice> device(
+ video_capture_device_factory_->Create(names->front()));
+ ASSERT_TRUE(device);
+
+ // Configure the FakeVideoCaptureDevice to use all its formats as roster.
+ VideoCaptureFormats formats;
+ video_capture_device_factory_->GetDeviceSupportedFormats(names->front(),
+ &formats);
+ static_cast<FakeVideoCaptureDevice*>(device.get())->
+ PopulateVariableFormatsRoster(formats);
+
+ EXPECT_CALL(*client_, OnErr())
+ .Times(0);
+ int action_count = 200;
+
+ device->AllocateAndStart(capture_params, client_.PassAs<Client>());
+
+ // We set TimeWait to 200 action timeouts and this should be enough for at
+ // least action_count/kFakeCaptureCapabilityChangePeriod calls.
+ for (int i = 0; i < action_count; ++i) {
+ WaitForCapturedFrame();
+ }
+ device->StopAndDeAllocate();
+}
+
+}; // namespace media
diff --git a/chromium/media/video/capture/file_video_capture_device.cc b/chromium/media/video/capture/file_video_capture_device.cc
index 6f118d29e38..84a2d156000 100644
--- a/chromium/media/video/capture/file_video_capture_device.cc
+++ b/chromium/media/video/capture/file_video_capture_device.cc
@@ -7,25 +7,18 @@
#include <string>
#include "base/bind.h"
-#include "base/command_line.h"
#include "base/memory/scoped_ptr.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_piece.h"
-#include "base/strings/sys_string_conversions.h"
-#include "media/base/media_switches.h"
-
namespace media {
-static const char kFileVideoCaptureDeviceName[] =
- "/dev/placeholder-for-file-backed-fake-capture-device";
-
static const int kY4MHeaderMaxSize = 200;
static const char kY4MSimpleFrameDelimiter[] = "FRAME";
static const int kY4MSimpleFrameDelimiterSize = 6;
int ParseY4MInt(const base::StringPiece& token) {
int temp_int;
- CHECK(base::StringToInt(token, &temp_int));
+ CHECK(base::StringToInt(token, &temp_int)) << token;
return temp_int;
}
@@ -91,7 +84,8 @@ void ParseY4MTags(const std::string& file_header,
// Pixel aspect ratio ignored.
break;
case 'C':
- CHECK_EQ(ParseY4MInt(token), 420); // Only I420 supported.
+ CHECK(token == "420" || token == "420jpeg" || token == "420paldv")
+ << token; // Only I420 is supported, and we fudge the variants.
break;
default:
break;
@@ -109,11 +103,12 @@ void ParseY4MTags(const std::string& file_header,
// format in |video_format|. Returns the index of the first byte of the first
// video frame.
// Restrictions: Only trivial per-frame headers are supported.
-int64 ParseFileAndExtractVideoFormat(
- const base::PlatformFile& file,
+// static
+int64 FileVideoCaptureDevice::ParseFileAndExtractVideoFormat(
+ base::File* file,
media::VideoCaptureFormat* video_format) {
std::string header(kY4MHeaderMaxSize, 0);
- base::ReadPlatformFile(file, 0, &header[0], kY4MHeaderMaxSize - 1);
+ file->Read(0, &header[0], kY4MHeaderMaxSize - 1);
size_t header_end = header.find(kY4MSimpleFrameDelimiter);
CHECK_NE(header_end, header.npos);
@@ -124,63 +119,17 @@ int64 ParseFileAndExtractVideoFormat(
// Opens a given file for reading, and returns the file to the caller, who is
// responsible for closing it.
-base::PlatformFile OpenFileForRead(const base::FilePath& file_path) {
- base::PlatformFileError file_error;
- base::PlatformFile file = base::CreatePlatformFile(
- file_path,
- base::PLATFORM_FILE_OPEN | base::PLATFORM_FILE_READ,
- NULL,
- &file_error);
- CHECK_EQ(file_error, base::PLATFORM_FILE_OK);
- return file;
-}
-
-// Inspects the command line and retrieves the file path parameter.
-base::FilePath GetFilePathFromCommandLine() {
- base::FilePath command_line_file_path =
- CommandLine::ForCurrentProcess()->GetSwitchValuePath(
- switches::kUseFileForFakeVideoCapture);
- CHECK(!command_line_file_path.empty());
- return command_line_file_path;
-}
-
-void FileVideoCaptureDevice::GetDeviceNames(Names* const device_names) {
- DCHECK(device_names->empty());
- base::FilePath command_line_file_path = GetFilePathFromCommandLine();
-#if defined(OS_WIN)
- device_names->push_back(
- Name(base::SysWideToUTF8(command_line_file_path.value()),
- kFileVideoCaptureDeviceName));
-#else
- device_names->push_back(Name(command_line_file_path.value(),
- kFileVideoCaptureDeviceName));
-#endif // OS_WIN
-}
-
-void FileVideoCaptureDevice::GetDeviceSupportedFormats(
- const Name& device,
- VideoCaptureFormats* supported_formats) {
- base::PlatformFile file = OpenFileForRead(GetFilePathFromCommandLine());
- VideoCaptureFormat capture_format;
- ParseFileAndExtractVideoFormat(file, &capture_format);
- supported_formats->push_back(capture_format);
-
- CHECK(base::ClosePlatformFile(file));
-}
-
-VideoCaptureDevice* FileVideoCaptureDevice::Create(const Name& device_name) {
-#if defined(OS_WIN)
- return new FileVideoCaptureDevice(
- base::FilePath(base::SysUTF8ToWide(device_name.name())));
-#else
- return new FileVideoCaptureDevice(base::FilePath(device_name.name()));
-#endif // OS_WIN
+// static
+base::File FileVideoCaptureDevice::OpenFileForRead(
+ const base::FilePath& file_path) {
+ base::File file(file_path, base::File::FLAG_OPEN | base::File::FLAG_READ);
+ CHECK(file.IsValid()) << file_path.value();
+ return file.Pass();
}
FileVideoCaptureDevice::FileVideoCaptureDevice(const base::FilePath& file_path)
: capture_thread_("CaptureThread"),
file_path_(file_path),
- file_(base::kInvalidPlatformFileValue),
frame_size_(0),
current_byte_index_(0),
first_frame_byte_index_(0) {}
@@ -232,10 +181,10 @@ void FileVideoCaptureDevice::OnAllocateAndStart(
client_ = client.Pass();
// Open the file and parse the header. Get frame size and format.
- DCHECK_EQ(file_, base::kInvalidPlatformFileValue);
+ DCHECK(!file_.IsValid());
file_ = OpenFileForRead(file_path_);
first_frame_byte_index_ =
- ParseFileAndExtractVideoFormat(file_, &capture_format_);
+ ParseFileAndExtractVideoFormat(&file_, &capture_format_);
current_byte_index_ = first_frame_byte_index_;
DVLOG(1) << "Opened video file " << capture_format_.frame_size.ToString()
<< ", fps: " << capture_format_.frame_rate;
@@ -251,7 +200,7 @@ void FileVideoCaptureDevice::OnAllocateAndStart(
void FileVideoCaptureDevice::OnStopAndDeAllocate() {
DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
- CHECK(base::ClosePlatformFile(file_));
+ file_.Close();
client_.reset();
current_byte_index_ = 0;
first_frame_byte_index_ = 0;
@@ -263,32 +212,29 @@ void FileVideoCaptureDevice::OnCaptureTask() {
DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
if (!client_)
return;
- int result =
- base::ReadPlatformFile(file_,
- current_byte_index_,
- reinterpret_cast<char*>(video_frame_.get()),
- frame_size_);
+ int result = file_.Read(current_byte_index_,
+ reinterpret_cast<char*>(video_frame_.get()),
+ frame_size_);
- // If we passed EOF to PlatformFile, it will return 0 read characters. In that
+ // If we passed EOF to base::File, it will return 0 read characters. In that
// case, reset the pointer and read again.
if (result != frame_size_) {
CHECK_EQ(result, 0);
current_byte_index_ = first_frame_byte_index_;
- CHECK_EQ(base::ReadPlatformFile(file_,
- current_byte_index_,
- reinterpret_cast<char*>(video_frame_.get()),
- frame_size_),
+ CHECK_EQ(file_.Read(current_byte_index_,
+ reinterpret_cast<char*>(video_frame_.get()),
+ frame_size_),
frame_size_);
} else {
current_byte_index_ += frame_size_ + kY4MSimpleFrameDelimiterSize;
}
// Give the captured frame to the client.
- client_->OnIncomingCapturedFrame(video_frame_.get(),
- frame_size_,
- base::Time::Now(),
- 0,
- capture_format_);
+ client_->OnIncomingCapturedData(video_frame_.get(),
+ frame_size_,
+ capture_format_,
+ 0,
+ base::TimeTicks::Now());
// Reschedule next CaptureTask.
base::MessageLoop::current()->PostDelayedTask(
FROM_HERE,
diff --git a/chromium/media/video/capture/file_video_capture_device.h b/chromium/media/video/capture/file_video_capture_device.h
index 06e6033254d..e2e066b3b25 100644
--- a/chromium/media/video/capture/file_video_capture_device.h
+++ b/chromium/media/video/capture/file_video_capture_device.h
@@ -7,8 +7,8 @@
#include <string>
+#include "base/files/file.h"
#include "base/memory/scoped_ptr.h"
-#include "base/platform_file.h"
#include "base/threading/thread.h"
#include "base/threading/thread_checker.h"
#include "media/video/capture/video_capture_device.h"
@@ -25,13 +25,14 @@ namespace media {
// Example videos can be found in http://media.xiph.org/video/derf.
class MEDIA_EXPORT FileVideoCaptureDevice : public VideoCaptureDevice {
public:
- // VideoCaptureDevice implementation, static methods. Create() returns a
- // pointer to the object, fully owned by the caller.
- // TODO(mcasas): Create() should return a scoped_ptr<> http://crbug.com/321613
- static VideoCaptureDevice* Create(const Name& device_name);
- static void GetDeviceNames(Names* device_names);
- static void GetDeviceSupportedFormats(const Name& device,
- VideoCaptureFormats* supported_formats);
+ static int64 ParseFileAndExtractVideoFormat(
+ base::File* file,
+ media::VideoCaptureFormat* video_format);
+ static base::File OpenFileForRead(const base::FilePath& file_path);
+
+ // Constructor of the class, with a fully qualified file path as input, which
+ // represents the Y4M video file to stream repeatedly.
+ explicit FileVideoCaptureDevice(const base::FilePath& file_path);
// VideoCaptureDevice implementation, class methods.
virtual ~FileVideoCaptureDevice();
@@ -41,9 +42,6 @@ class MEDIA_EXPORT FileVideoCaptureDevice : public VideoCaptureDevice {
virtual void StopAndDeAllocate() OVERRIDE;
private:
- // Constructor of the class, with a fully qualified file path as input, which
- // represents the Y4M video file to stream repeatedly.
- explicit FileVideoCaptureDevice(const base::FilePath& file_path);
// Returns size in bytes of an I420 frame, not including possible paddings,
// defined by |capture_format_|.
int CalculateFrameSize();
@@ -64,7 +62,7 @@ class MEDIA_EXPORT FileVideoCaptureDevice : public VideoCaptureDevice {
// The following members belong to |capture_thread_|.
scoped_ptr<VideoCaptureDevice::Client> client_;
const base::FilePath file_path_;
- base::PlatformFile file_;
+ base::File file_;
scoped_ptr<uint8[]> video_frame_;
VideoCaptureFormat capture_format_;
int frame_size_;
diff --git a/chromium/media/video/capture/file_video_capture_device_factory.cc b/chromium/media/video/capture/file_video_capture_device_factory.cc
new file mode 100644
index 00000000000..ae7845064e9
--- /dev/null
+++ b/chromium/media/video/capture/file_video_capture_device_factory.cc
@@ -0,0 +1,67 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/capture/file_video_capture_device_factory.h"
+
+#include "base/command_line.h"
+#include "base/files/file_path.h"
+#include "base/strings/sys_string_conversions.h"
+#include "media/base/media_switches.h"
+#include "media/video/capture/file_video_capture_device.h"
+
+namespace media {
+
+const char kFileVideoCaptureDeviceName[] =
+ "/dev/placeholder-for-file-backed-fake-capture-device";
+
+// Inspects the command line and retrieves the file path parameter.
+base::FilePath GetFilePathFromCommandLine() {
+ base::FilePath command_line_file_path =
+ CommandLine::ForCurrentProcess()->GetSwitchValuePath(
+ switches::kUseFileForFakeVideoCapture);
+ CHECK(!command_line_file_path.empty());
+ return command_line_file_path;
+}
+
+scoped_ptr<VideoCaptureDevice> FileVideoCaptureDeviceFactory::Create(
+ const VideoCaptureDevice::Name& device_name) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+#if defined(OS_WIN)
+ return scoped_ptr<VideoCaptureDevice>(new FileVideoCaptureDevice(
+ base::FilePath(base::SysUTF8ToWide(device_name.name()))));
+#else
+ return scoped_ptr<VideoCaptureDevice>(new FileVideoCaptureDevice(
+ base::FilePath(device_name.name())));
+#endif
+}
+
+void FileVideoCaptureDeviceFactory::GetDeviceNames(
+ VideoCaptureDevice::Names* const device_names) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(device_names->empty());
+ base::FilePath command_line_file_path = GetFilePathFromCommandLine();
+#if defined(OS_WIN)
+ device_names->push_back(VideoCaptureDevice::Name(
+ base::SysWideToUTF8(command_line_file_path.value()),
+ kFileVideoCaptureDeviceName));
+#else
+ device_names->push_back(VideoCaptureDevice::Name(
+ command_line_file_path.value(),
+ kFileVideoCaptureDeviceName));
+#endif
+}
+
+void FileVideoCaptureDeviceFactory::GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ base::File file =
+ FileVideoCaptureDevice::OpenFileForRead(GetFilePathFromCommandLine());
+ VideoCaptureFormat capture_format;
+ FileVideoCaptureDevice::ParseFileAndExtractVideoFormat(&file,
+ &capture_format);
+ supported_formats->push_back(capture_format);
+}
+
+} // namespace media
diff --git a/chromium/media/video/capture/file_video_capture_device_factory.h b/chromium/media/video/capture/file_video_capture_device_factory.h
new file mode 100644
index 00000000000..986a266cd4b
--- /dev/null
+++ b/chromium/media/video/capture/file_video_capture_device_factory.h
@@ -0,0 +1,31 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_FILE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
+#define MEDIA_VIDEO_CAPTURE_FILE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
+
+#include "media/video/capture/video_capture_device_factory.h"
+
+namespace media {
+
+// Extension of VideoCaptureDeviceFactory to create and manipulate file-backed
+// fake devices. These devices play back video-only files as video capture
+// input.
+class MEDIA_EXPORT FileVideoCaptureDeviceFactory :
+ public VideoCaptureDeviceFactory {
+ public:
+ FileVideoCaptureDeviceFactory() {}
+ virtual ~FileVideoCaptureDeviceFactory() {}
+
+ virtual scoped_ptr<VideoCaptureDevice> Create(
+ const VideoCaptureDevice::Name& device_name) OVERRIDE;
+ virtual void GetDeviceNames(VideoCaptureDevice::Names* device_names) OVERRIDE;
+ virtual void GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) OVERRIDE;
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_FILE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
diff --git a/chromium/media/video/capture/linux/video_capture_device_chromeos.cc b/chromium/media/video/capture/linux/video_capture_device_chromeos.cc
new file mode 100644
index 00000000000..8758ea15f8c
--- /dev/null
+++ b/chromium/media/video/capture/linux/video_capture_device_chromeos.cc
@@ -0,0 +1,116 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/capture/linux/video_capture_device_chromeos.h"
+
+#include "base/bind.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "ui/gfx/display.h"
+#include "ui/gfx/display_observer.h"
+#include "ui/gfx/screen.h"
+
+namespace media {
+
+// This is a delegate class used to transfer Display change events from the UI
+// thread to the media thread.
+class VideoCaptureDeviceChromeOS::ScreenObserverDelegate
+ : public gfx::DisplayObserver,
+ public base::RefCountedThreadSafe<ScreenObserverDelegate> {
+ public:
+ ScreenObserverDelegate(
+ VideoCaptureDeviceChromeOS* capture_device,
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner)
+ : capture_device_(capture_device),
+ ui_task_runner_(ui_task_runner),
+ capture_task_runner_(base::MessageLoopProxy::current()) {
+ ui_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&ScreenObserverDelegate::AddObserverOnUIThread, this));
+ }
+
+ void RemoveObserver() {
+ DCHECK(capture_task_runner_->BelongsToCurrentThread());
+ capture_device_ = NULL;
+ ui_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&ScreenObserverDelegate::RemoveObserverOnUIThread, this));
+ }
+
+ private:
+ friend class base::RefCountedThreadSafe<ScreenObserverDelegate>;
+
+ virtual ~ScreenObserverDelegate() {
+ DCHECK(!capture_device_);
+ }
+
+ virtual void OnDisplayAdded(const gfx::Display& /*new_display*/) OVERRIDE {}
+ virtual void OnDisplayRemoved(const gfx::Display& /*old_display*/) OVERRIDE {}
+ virtual void OnDisplayMetricsChanged(const gfx::Display& display,
+ uint32_t metrics) OVERRIDE {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ if (!(metrics & DISPLAY_METRIC_ROTATION))
+ return;
+ SendDisplayRotation(display);
+ }
+
+ void AddObserverOnUIThread() {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ gfx::Screen* screen =
+ gfx::Screen::GetScreenByType(gfx::SCREEN_TYPE_ALTERNATE);
+ if (screen) {
+ screen->AddObserver(this);
+ SendDisplayRotation(screen->GetPrimaryDisplay());
+ }
+ }
+
+ void RemoveObserverOnUIThread() {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ gfx::Screen* screen =
+ gfx::Screen::GetScreenByType(gfx::SCREEN_TYPE_ALTERNATE);
+ if (screen)
+ screen->RemoveObserver(this);
+ }
+
+ // Post the screen rotation change from the UI thread to capture thread
+ void SendDisplayRotation(const gfx::Display& display) {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ capture_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&ScreenObserverDelegate::SendDisplayRotationOnCaptureThread,
+ this, display));
+ }
+
+ void SendDisplayRotationOnCaptureThread(const gfx::Display& display) {
+ DCHECK(capture_task_runner_->BelongsToCurrentThread());
+ if (capture_device_)
+ capture_device_->SetDisplayRotation(display);
+ }
+
+ VideoCaptureDeviceChromeOS* capture_device_;
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner_;
+ scoped_refptr<base::SingleThreadTaskRunner> capture_task_runner_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ScreenObserverDelegate);
+};
+
+
+VideoCaptureDeviceChromeOS::VideoCaptureDeviceChromeOS(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner,
+ const Name& device_name)
+ : VideoCaptureDeviceLinux(device_name),
+ screen_observer_delegate_(new ScreenObserverDelegate(this,
+ ui_task_runner)) {
+}
+
+VideoCaptureDeviceChromeOS::~VideoCaptureDeviceChromeOS() {
+ screen_observer_delegate_->RemoveObserver();
+}
+
+void VideoCaptureDeviceChromeOS::SetDisplayRotation(
+ const gfx::Display& display) {
+ if (display.IsInternal())
+ SetRotation(display.rotation() * 90);
+}
+
+} // namespace media
diff --git a/chromium/media/video/capture/linux/video_capture_device_chromeos.h b/chromium/media/video/capture/linux/video_capture_device_chromeos.h
new file mode 100644
index 00000000000..6a79cd09485
--- /dev/null
+++ b/chromium/media/video/capture/linux/video_capture_device_chromeos.h
@@ -0,0 +1,36 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_DEVICE_CHROMEOS_H_
+#define MEDIA_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_DEVICE_CHROMEOS_H_
+
+#include "media/video/capture/linux/video_capture_device_linux.h"
+
+namespace gfx {
+class Display;
+} // namespace gfx
+
+namespace media {
+
+// This class is functionally the same as VideoCaptureDeviceLinux, with the
+// exception that it is aware of the orientation of the internal Display. When
+// the internal Display is rotated, the frames captured are rotated to match.
+class VideoCaptureDeviceChromeOS : public VideoCaptureDeviceLinux {
+ public:
+ explicit VideoCaptureDeviceChromeOS(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner,
+ const Name& device_name);
+ virtual ~VideoCaptureDeviceChromeOS();
+
+ private:
+ class ScreenObserverDelegate;
+
+ void SetDisplayRotation(const gfx::Display& display);
+ scoped_refptr<ScreenObserverDelegate> screen_observer_delegate_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(VideoCaptureDeviceChromeOS);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_DEVICE_CHROMEOS_H_
diff --git a/chromium/media/video/capture/linux/video_capture_device_factory_linux.cc b/chromium/media/video/capture/linux/video_capture_device_factory_linux.cc
new file mode 100644
index 00000000000..c8821eebbdb
--- /dev/null
+++ b/chromium/media/video/capture/linux/video_capture_device_factory_linux.cc
@@ -0,0 +1,187 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/capture/linux/video_capture_device_factory_linux.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#if defined(OS_OPENBSD)
+#include <sys/videoio.h>
+#else
+#include <linux/videodev2.h>
+#endif
+#include <sys/ioctl.h>
+
+#include "base/files/file_enumerator.h"
+#include "base/files/scoped_file.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/strings/stringprintf.h"
+#if defined(OS_CHROMEOS)
+#include "media/video/capture/linux/video_capture_device_chromeos.h"
+#endif
+#include "media/video/capture/linux/video_capture_device_linux.h"
+
+namespace media {
+
+static bool HasUsableFormats(int fd) {
+ v4l2_fmtdesc fmtdesc;
+ std::list<int> usable_fourccs;
+
+ media::VideoCaptureDeviceLinux::GetListOfUsableFourCCs(false,
+ &usable_fourccs);
+
+ memset(&fmtdesc, 0, sizeof(v4l2_fmtdesc));
+ fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ while (HANDLE_EINTR(ioctl(fd, VIDIOC_ENUM_FMT, &fmtdesc)) == 0) {
+ if (std::find(usable_fourccs.begin(), usable_fourccs.end(),
+ fmtdesc.pixelformat) != usable_fourccs.end())
+ return true;
+
+ fmtdesc.index++;
+ }
+ return false;
+}
+
+VideoCaptureDeviceFactoryLinux::VideoCaptureDeviceFactoryLinux(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner)
+ : ui_task_runner_(ui_task_runner) {
+}
+
+VideoCaptureDeviceFactoryLinux::~VideoCaptureDeviceFactoryLinux() {
+}
+
+scoped_ptr<VideoCaptureDevice> VideoCaptureDeviceFactoryLinux::Create(
+ const VideoCaptureDevice::Name& device_name) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+#if defined(OS_CHROMEOS)
+ VideoCaptureDeviceChromeOS* self =
+ new VideoCaptureDeviceChromeOS(ui_task_runner_, device_name);
+#else
+ VideoCaptureDeviceLinux* self = new VideoCaptureDeviceLinux(device_name);
+#endif
+ if (!self)
+ return scoped_ptr<VideoCaptureDevice>();
+ // Test opening the device driver. This is to make sure it is available.
+ // We will reopen it again in our worker thread when someone
+ // allocates the camera.
+ base::ScopedFD fd(HANDLE_EINTR(open(device_name.id().c_str(), O_RDONLY)));
+ if (!fd.is_valid()) {
+ DVLOG(1) << "Cannot open device";
+ delete self;
+ return scoped_ptr<VideoCaptureDevice>();
+ }
+
+ return scoped_ptr<VideoCaptureDevice>(self);
+}
+
+void VideoCaptureDeviceFactoryLinux::GetDeviceNames(
+ VideoCaptureDevice::Names* const device_names) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(device_names->empty());
+ base::FilePath path("/dev/");
+ base::FileEnumerator enumerator(
+ path, false, base::FileEnumerator::FILES, "video*");
+
+ while (!enumerator.Next().empty()) {
+ base::FileEnumerator::FileInfo info = enumerator.GetInfo();
+
+ std::string unique_id = path.value() + info.GetName().value();
+ base::ScopedFD fd(HANDLE_EINTR(open(unique_id.c_str(), O_RDONLY)));
+ if (!fd.is_valid()) {
+ // Failed to open this device.
+ continue;
+ }
+ // Test if this is a V4L2 capture device.
+ v4l2_capability cap;
+ if ((HANDLE_EINTR(ioctl(fd.get(), VIDIOC_QUERYCAP, &cap)) == 0) &&
+ (cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) &&
+ !(cap.capabilities & V4L2_CAP_VIDEO_OUTPUT)) {
+ // This is a V4L2 video capture device
+ if (HasUsableFormats(fd.get())) {
+ VideoCaptureDevice::Name device_name(base::StringPrintf("%s", cap.card),
+ unique_id);
+ device_names->push_back(device_name);
+ } else {
+ DVLOG(1) << "No usable formats reported by " << info.GetName().value();
+ }
+ }
+ }
+}
+
+void VideoCaptureDeviceFactoryLinux::GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (device.id().empty())
+ return;
+ base::ScopedFD fd(HANDLE_EINTR(open(device.id().c_str(), O_RDONLY)));
+ if (!fd.is_valid()) {
+ // Failed to open this device.
+ return;
+ }
+ supported_formats->clear();
+
+ // Retrieve the caps one by one, first get pixel format, then sizes, then
+ // frame rates. See http://linuxtv.org/downloads/v4l-dvb-apis for reference.
+ v4l2_fmtdesc pixel_format = {};
+ pixel_format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ while (HANDLE_EINTR(ioctl(fd.get(), VIDIOC_ENUM_FMT, &pixel_format)) == 0) {
+ VideoCaptureFormat supported_format;
+ supported_format.pixel_format =
+ VideoCaptureDeviceLinux::V4l2ColorToVideoCaptureColorFormat(
+ (int32)pixel_format.pixelformat);
+ if (supported_format.pixel_format == PIXEL_FORMAT_UNKNOWN) {
+ ++pixel_format.index;
+ continue;
+ }
+
+ v4l2_frmsizeenum frame_size = {};
+ frame_size.pixel_format = pixel_format.pixelformat;
+ while (HANDLE_EINTR(ioctl(fd.get(), VIDIOC_ENUM_FRAMESIZES, &frame_size)) ==
+ 0) {
+ if (frame_size.type == V4L2_FRMSIZE_TYPE_DISCRETE) {
+ supported_format.frame_size.SetSize(
+ frame_size.discrete.width, frame_size.discrete.height);
+ } else if (frame_size.type == V4L2_FRMSIZE_TYPE_STEPWISE) {
+ // TODO(mcasas): see http://crbug.com/249953, support these devices.
+ NOTIMPLEMENTED();
+ } else if (frame_size.type == V4L2_FRMSIZE_TYPE_CONTINUOUS) {
+ // TODO(mcasas): see http://crbug.com/249953, support these devices.
+ NOTIMPLEMENTED();
+ }
+ v4l2_frmivalenum frame_interval = {};
+ frame_interval.pixel_format = pixel_format.pixelformat;
+ frame_interval.width = frame_size.discrete.width;
+ frame_interval.height = frame_size.discrete.height;
+ while (HANDLE_EINTR(ioctl(
+ fd.get(), VIDIOC_ENUM_FRAMEINTERVALS, &frame_interval)) == 0) {
+ if (frame_interval.type == V4L2_FRMIVAL_TYPE_DISCRETE) {
+ if (frame_interval.discrete.numerator != 0) {
+ supported_format.frame_rate =
+ static_cast<float>(frame_interval.discrete.denominator) /
+ static_cast<float>(frame_interval.discrete.numerator);
+ } else {
+ supported_format.frame_rate = 0;
+ }
+ } else if (frame_interval.type == V4L2_FRMIVAL_TYPE_CONTINUOUS) {
+ // TODO(mcasas): see http://crbug.com/249953, support these devices.
+ NOTIMPLEMENTED();
+ break;
+ } else if (frame_interval.type == V4L2_FRMIVAL_TYPE_STEPWISE) {
+ // TODO(mcasas): see http://crbug.com/249953, support these devices.
+ NOTIMPLEMENTED();
+ break;
+ }
+ supported_formats->push_back(supported_format);
+ ++frame_interval.index;
+ }
+ ++frame_size.index;
+ }
+ ++pixel_format.index;
+ }
+ return;
+}
+
+} // namespace media
diff --git a/chromium/media/video/capture/linux/video_capture_device_factory_linux.h b/chromium/media/video/capture/linux/video_capture_device_factory_linux.h
new file mode 100644
index 00000000000..9702cddd214
--- /dev/null
+++ b/chromium/media/video/capture/linux/video_capture_device_factory_linux.h
@@ -0,0 +1,38 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Implementation of a VideoCaptureDeviceFactoryLinux class.
+
+#ifndef MEDIA_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_DEVICE_FACTORY_LINUX_H_
+#define MEDIA_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_DEVICE_FACTORY_LINUX_H_
+
+#include "media/video/capture/video_capture_device_factory.h"
+
+#include "media/video/capture/video_capture_types.h"
+
+namespace media {
+
+// Extension of VideoCaptureDeviceFactory to create and manipulate Linux
+// devices.
+class MEDIA_EXPORT VideoCaptureDeviceFactoryLinux
+ : public VideoCaptureDeviceFactory {
+ public:
+ explicit VideoCaptureDeviceFactoryLinux(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner);
+ virtual ~VideoCaptureDeviceFactoryLinux();
+
+ virtual scoped_ptr<VideoCaptureDevice> Create(
+ const VideoCaptureDevice::Name& device_name) OVERRIDE;
+ virtual void GetDeviceNames(VideoCaptureDevice::Names* device_names) OVERRIDE;
+ virtual void GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) OVERRIDE;
+
+ private:
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner_;
+ DISALLOW_COPY_AND_ASSIGN(VideoCaptureDeviceFactoryLinux);
+};
+
+} // namespace media
+#endif // MEDIA_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_DEVICE_FACTORY_LINUX_H_
diff --git a/chromium/media/video/capture/linux/video_capture_device_linux.cc b/chromium/media/video/capture/linux/video_capture_device_linux.cc
index 21f57ee132a..11151ac7285 100644
--- a/chromium/media/video/capture/linux/video_capture_device_linux.cc
+++ b/chromium/media/video/capture/linux/video_capture_device_linux.cc
@@ -18,8 +18,9 @@
#include <string>
#include "base/bind.h"
-#include "base/file_util.h"
#include "base/files/file_enumerator.h"
+#include "base/files/scoped_file.h"
+#include "base/posix/eintr_wrapper.h"
#include "base/strings/stringprintf.h"
namespace media {
@@ -55,9 +56,23 @@ static const char kVidPathTemplate[] =
static const char kPidPathTemplate[] =
"/sys/class/video4linux/%s/device/../idProduct";
+bool ReadIdFile(const std::string path, std::string* id) {
+ char id_buf[kVidPidSize];
+ FILE* file = fopen(path.c_str(), "rb");
+ if (!file)
+ return false;
+ const bool success = fread(id_buf, kVidPidSize, 1, file) == 1;
+ fclose(file);
+ if (!success)
+ return false;
+ id->append(id_buf, kVidPidSize);
+ return true;
+}
+
// This function translates Video4Linux pixel formats to Chromium pixel formats,
// should only support those listed in GetListOfUsableFourCCs.
-static VideoPixelFormat V4l2ColorToVideoCaptureColorFormat(
+// static
+VideoPixelFormat VideoCaptureDeviceLinux::V4l2ColorToVideoCaptureColorFormat(
int32 v4l2_fourcc) {
VideoPixelFormat result = PIXEL_FORMAT_UNKNOWN;
switch (v4l2_fourcc) {
@@ -77,7 +92,9 @@ static VideoPixelFormat V4l2ColorToVideoCaptureColorFormat(
return result;
}
-static void GetListOfUsableFourCCs(bool favour_mjpeg, std::list<int>* fourccs) {
+// static
+void VideoCaptureDeviceLinux::GetListOfUsableFourCCs(bool favour_mjpeg,
+ std::list<int>* fourccs) {
for (size_t i = 0; i < arraysize(kV4l2RawFmts); ++i)
fourccs->push_back(kV4l2RawFmts[i]);
if (favour_mjpeg)
@@ -90,143 +107,6 @@ static void GetListOfUsableFourCCs(bool favour_mjpeg, std::list<int>* fourccs) {
fourccs->push_back(V4L2_PIX_FMT_JPEG);
}
-static bool HasUsableFormats(int fd) {
- v4l2_fmtdesc fmtdesc;
- std::list<int> usable_fourccs;
-
- GetListOfUsableFourCCs(false, &usable_fourccs);
-
- memset(&fmtdesc, 0, sizeof(v4l2_fmtdesc));
- fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- while (ioctl(fd, VIDIOC_ENUM_FMT, &fmtdesc) == 0) {
- if (std::find(usable_fourccs.begin(), usable_fourccs.end(),
- fmtdesc.pixelformat) != usable_fourccs.end())
- return true;
-
- fmtdesc.index++;
- }
- return false;
-}
-
-void VideoCaptureDevice::GetDeviceNames(Names* device_names) {
- int fd = -1;
-
- // Empty the name list.
- device_names->clear();
-
- base::FilePath path("/dev/");
- base::FileEnumerator enumerator(
- path, false, base::FileEnumerator::FILES, "video*");
-
- while (!enumerator.Next().empty()) {
- base::FileEnumerator::FileInfo info = enumerator.GetInfo();
-
- std::string unique_id = path.value() + info.GetName().value();
- if ((fd = open(unique_id.c_str() , O_RDONLY)) < 0) {
- // Failed to open this device.
- continue;
- }
- // Test if this is a V4L2 capture device.
- v4l2_capability cap;
- if ((ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0) &&
- (cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) &&
- !(cap.capabilities & V4L2_CAP_VIDEO_OUTPUT)) {
- // This is a V4L2 video capture device
- if (HasUsableFormats(fd)) {
- Name device_name(base::StringPrintf("%s", cap.card), unique_id);
- device_names->push_back(device_name);
- } else {
- DVLOG(1) << "No usable formats reported by " << info.GetName().value();
- }
- }
- close(fd);
- }
-}
-
-void VideoCaptureDevice::GetDeviceSupportedFormats(
- const Name& device,
- VideoCaptureFormats* supported_formats) {
- if (device.id().empty())
- return;
- int fd;
- if ((fd = open(device.id().c_str(), O_RDONLY)) < 0)
- return;
-
- supported_formats->clear();
- // Retrieve the caps one by one, first get pixel format, then sizes, then
- // frame rates. See http://linuxtv.org/downloads/v4l-dvb-apis for reference.
- v4l2_fmtdesc pixel_format = {};
- pixel_format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- while (ioctl(fd, VIDIOC_ENUM_FMT, &pixel_format) == 0) {
- VideoCaptureFormat supported_format;
- supported_format.pixel_format =
- V4l2ColorToVideoCaptureColorFormat((int32)pixel_format.pixelformat);
- if (supported_format.pixel_format == PIXEL_FORMAT_UNKNOWN) {
- ++pixel_format.index;
- continue;
- }
-
- v4l2_frmsizeenum frame_size = {};
- frame_size.pixel_format = pixel_format.pixelformat;
- while (ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &frame_size) == 0) {
- if (frame_size.type == V4L2_FRMSIZE_TYPE_DISCRETE) {
- supported_format.frame_size.SetSize(
- frame_size.discrete.width, frame_size.discrete.height);
- } else if (frame_size.type == V4L2_FRMSIZE_TYPE_STEPWISE) {
- // TODO(mcasas): see http://crbug.com/249953, support these devices.
- NOTIMPLEMENTED();
- } else if (frame_size.type == V4L2_FRMSIZE_TYPE_CONTINUOUS) {
- // TODO(mcasas): see http://crbug.com/249953, support these devices.
- NOTIMPLEMENTED();
- }
- v4l2_frmivalenum frame_interval = {};
- frame_interval.pixel_format = pixel_format.pixelformat;
- frame_interval.width = frame_size.discrete.width;
- frame_interval.height = frame_size.discrete.height;
- while (ioctl(fd, VIDIOC_ENUM_FRAMEINTERVALS, &frame_interval) == 0) {
- if (frame_interval.type == V4L2_FRMIVAL_TYPE_DISCRETE) {
- if (frame_interval.discrete.numerator != 0) {
- supported_format.frame_rate =
- static_cast<float>(frame_interval.discrete.denominator) /
- static_cast<float>(frame_interval.discrete.numerator);
- } else {
- supported_format.frame_rate = 0;
- }
- } else if (frame_interval.type == V4L2_FRMIVAL_TYPE_CONTINUOUS) {
- // TODO(mcasas): see http://crbug.com/249953, support these devices.
- NOTIMPLEMENTED();
- break;
- } else if (frame_interval.type == V4L2_FRMIVAL_TYPE_STEPWISE) {
- // TODO(mcasas): see http://crbug.com/249953, support these devices.
- NOTIMPLEMENTED();
- break;
- }
- supported_formats->push_back(supported_format);
- ++frame_interval.index;
- }
- ++frame_size.index;
- }
- ++pixel_format.index;
- }
-
- close(fd);
- return;
-}
-
-static bool ReadIdFile(const std::string path, std::string* id) {
- char id_buf[kVidPidSize];
- FILE* file = fopen(path.c_str(), "rb");
- if (!file)
- return false;
- const bool success = fread(id_buf, kVidPidSize, 1, file) == 1;
- fclose(file);
- if (!success)
- return false;
- id->append(id_buf, kVidPidSize);
- return true;
-}
-
const std::string VideoCaptureDevice::Name::GetModel() const {
// |unique_id| is of the form "/dev/video2". |file_name| is "video2".
const std::string dev_dir = "/dev/";
@@ -249,43 +129,22 @@ const std::string VideoCaptureDevice::Name::GetModel() const {
return usb_id;
}
-VideoCaptureDevice* VideoCaptureDevice::Create(const Name& device_name) {
- VideoCaptureDeviceLinux* self = new VideoCaptureDeviceLinux(device_name);
- if (!self)
- return NULL;
- // Test opening the device driver. This is to make sure it is available.
- // We will reopen it again in our worker thread when someone
- // allocates the camera.
- int fd = open(device_name.id().c_str(), O_RDONLY);
- if (fd < 0) {
- DVLOG(1) << "Cannot open device";
- delete self;
- return NULL;
- }
- close(fd);
-
- return self;
-}
-
VideoCaptureDeviceLinux::VideoCaptureDeviceLinux(const Name& device_name)
: state_(kIdle),
device_name_(device_name),
- device_fd_(-1),
v4l2_thread_("V4L2Thread"),
buffer_pool_(NULL),
buffer_pool_size_(0),
- timeout_count_(0) {}
+ timeout_count_(0),
+ rotation_(0) {
+}
VideoCaptureDeviceLinux::~VideoCaptureDeviceLinux() {
state_ = kIdle;
// Check if the thread is running.
// This means that the device have not been DeAllocated properly.
DCHECK(!v4l2_thread_.IsRunning());
-
v4l2_thread_.Stop();
- if (device_fd_ >= 0) {
- close(device_fd_);
- }
}
void VideoCaptureDeviceLinux::AllocateAndStart(
@@ -320,6 +179,25 @@ void VideoCaptureDeviceLinux::StopAndDeAllocate() {
DeAllocateVideoBuffers();
}
+void VideoCaptureDeviceLinux::SetRotation(int rotation) {
+ if (v4l2_thread_.IsRunning()) {
+ v4l2_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoCaptureDeviceLinux::SetRotationOnV4L2Thread,
+ base::Unretained(this), rotation));
+ } else {
+ // If the |v4l2_thread_| is not running, there's no race condition and
+ // |rotation_| can be set directly.
+ rotation_ = rotation;
+ }
+}
+
+void VideoCaptureDeviceLinux::SetRotationOnV4L2Thread(int rotation) {
+ DCHECK_EQ(v4l2_thread_.message_loop(), base::MessageLoop::current());
+ DCHECK(rotation >= 0 && rotation < 360 && rotation % 90 == 0);
+ rotation_ = rotation;
+}
+
void VideoCaptureDeviceLinux::OnAllocateAndStart(int width,
int height,
int frame_rate,
@@ -329,19 +207,19 @@ void VideoCaptureDeviceLinux::OnAllocateAndStart(int width,
client_ = client.Pass();
// Need to open camera with O_RDWR after Linux kernel 3.3.
- if ((device_fd_ = open(device_name_.id().c_str(), O_RDWR)) < 0) {
+ device_fd_.reset(HANDLE_EINTR(open(device_name_.id().c_str(), O_RDWR)));
+ if (!device_fd_.is_valid()) {
SetErrorState("Failed to open V4L2 device driver.");
return;
}
// Test if this is a V4L2 capture device.
v4l2_capability cap;
- if (!((ioctl(device_fd_, VIDIOC_QUERYCAP, &cap) == 0) &&
- (cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) &&
- !(cap.capabilities & V4L2_CAP_VIDEO_OUTPUT))) {
+ if (!((HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_QUERYCAP, &cap)) == 0) &&
+ (cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) &&
+ !(cap.capabilities & V4L2_CAP_VIDEO_OUTPUT))) {
// This is not a V4L2 video capture device.
- close(device_fd_);
- device_fd_ = -1;
+ device_fd_.reset();
SetErrorState("This is not a V4L2 video capture device");
return;
}
@@ -352,13 +230,13 @@ void VideoCaptureDeviceLinux::OnAllocateAndStart(int width,
GetListOfUsableFourCCs(width > kMjpegWidth || height > kMjpegHeight,
&v4l2_formats);
- v4l2_fmtdesc fmtdesc;
- memset(&fmtdesc, 0, sizeof(v4l2_fmtdesc));
+ v4l2_fmtdesc fmtdesc = {0};
fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
// Enumerate image formats.
std::list<int>::iterator best = v4l2_formats.end();
- while (ioctl(device_fd_, VIDIOC_ENUM_FMT, &fmtdesc) == 0) {
+ while (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_ENUM_FMT, &fmtdesc)) ==
+ 0) {
best = std::find(v4l2_formats.begin(), best, fmtdesc.pixelformat);
fmtdesc.index++;
}
@@ -370,14 +248,14 @@ void VideoCaptureDeviceLinux::OnAllocateAndStart(int width,
// Set format and frame size now.
v4l2_format video_fmt;
- memset(&video_fmt, 0, sizeof(video_fmt));
+ memset(&video_fmt, 0, sizeof(v4l2_format));
video_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
video_fmt.fmt.pix.sizeimage = 0;
video_fmt.fmt.pix.width = width;
video_fmt.fmt.pix.height = height;
video_fmt.fmt.pix.pixelformat = *best;
- if (ioctl(device_fd_, VIDIOC_S_FMT, &video_fmt) < 0) {
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_S_FMT, &video_fmt)) < 0) {
SetErrorState("Failed to set camera format");
return;
}
@@ -387,14 +265,18 @@ void VideoCaptureDeviceLinux::OnAllocateAndStart(int width,
memset(&streamparm, 0, sizeof(v4l2_streamparm));
streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
// The following line checks that the driver knows about framerate get/set.
- if (ioctl(device_fd_, VIDIOC_G_PARM, &streamparm) >= 0) {
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_G_PARM, &streamparm)) >= 0) {
// Now check if the device is able to accept a capture framerate set.
if (streamparm.parm.capture.capability & V4L2_CAP_TIMEPERFRAME) {
- streamparm.parm.capture.timeperframe.numerator = 1;
- streamparm.parm.capture.timeperframe.denominator =
- (frame_rate) ? frame_rate : kTypicalFramerate;
-
- if (ioctl(device_fd_, VIDIOC_S_PARM, &streamparm) < 0) {
+ // |frame_rate| is float, approximate by a fraction.
+ streamparm.parm.capture.timeperframe.numerator =
+ media::kFrameRatePrecision;
+ streamparm.parm.capture.timeperframe.denominator = (frame_rate) ?
+ (frame_rate * media::kFrameRatePrecision) :
+ (kTypicalFramerate * media::kFrameRatePrecision);
+
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_S_PARM, &streamparm)) <
+ 0) {
SetErrorState("Failed to set camera framerate");
return;
}
@@ -406,6 +288,19 @@ void VideoCaptureDeviceLinux::OnAllocateAndStart(int width,
// TODO(mcasas): what should be done if the camera driver does not allow
// framerate configuration, or the actual one is different from the desired?
+ // Set anti-banding/anti-flicker to 50/60Hz. May fail due to not supported
+ // operation (|errno| == EINVAL in this case) or plain failure.
+ const int power_line_frequency = GetPowerLineFrequencyForLocation();
+ if ((power_line_frequency == kPowerLine50Hz) ||
+ (power_line_frequency == kPowerLine60Hz)) {
+ struct v4l2_control control = {};
+ control.id = V4L2_CID_POWER_LINE_FREQUENCY;
+ control.value = (power_line_frequency == kPowerLine50Hz) ?
+ V4L2_CID_POWER_LINE_FREQUENCY_50HZ :
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ;
+ HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_S_CTRL, &control));
+ }
+
// Store our current width and height.
capture_format_.frame_size.SetSize(video_fmt.fmt.pix.width,
video_fmt.fmt.pix.height);
@@ -422,7 +317,7 @@ void VideoCaptureDeviceLinux::OnAllocateAndStart(int width,
// Start UVC camera.
v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- if (ioctl(device_fd_, VIDIOC_STREAMON, &type) == -1) {
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_STREAMON, &type)) == -1) {
SetErrorState("VIDIOC_STREAMON failed");
return;
}
@@ -439,7 +334,7 @@ void VideoCaptureDeviceLinux::OnStopAndDeAllocate() {
DCHECK_EQ(v4l2_thread_.message_loop(), base::MessageLoop::current());
v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- if (ioctl(device_fd_, VIDIOC_STREAMOFF, &type) < 0) {
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_STREAMOFF, &type)) < 0) {
SetErrorState("VIDIOC_STREAMOFF failed");
return;
}
@@ -450,8 +345,7 @@ void VideoCaptureDeviceLinux::OnStopAndDeAllocate() {
// We need to close and open the device if we want to change the settings
// Otherwise VIDIOC_S_FMT will return error
// Sad but true.
- close(device_fd_);
- device_fd_ = -1;
+ device_fd_.reset();
state_ = kIdle;
client_.reset();
}
@@ -465,7 +359,7 @@ void VideoCaptureDeviceLinux::OnCaptureTask() {
fd_set r_set;
FD_ZERO(&r_set);
- FD_SET(device_fd_, &r_set);
+ FD_SET(device_fd_.get(), &r_set);
timeval timeout;
timeout.tv_sec = 0;
@@ -473,7 +367,8 @@ void VideoCaptureDeviceLinux::OnCaptureTask() {
// First argument to select is the highest numbered file descriptor +1.
// Refer to http://linux.die.net/man/2/select for more information.
- int result = select(device_fd_ + 1, &r_set, NULL, NULL, &timeout);
+ int result =
+ HANDLE_EINTR(select(device_fd_.get() + 1, &r_set, NULL, NULL, &timeout));
// Check if select have failed.
if (result < 0) {
// EINTR is a signal. This is not really an error.
@@ -502,22 +397,22 @@ void VideoCaptureDeviceLinux::OnCaptureTask() {
}
// Check if the driver have filled a buffer.
- if (FD_ISSET(device_fd_, &r_set)) {
+ if (FD_ISSET(device_fd_.get(), &r_set)) {
v4l2_buffer buffer;
memset(&buffer, 0, sizeof(buffer));
buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buffer.memory = V4L2_MEMORY_MMAP;
// Dequeue a buffer.
- if (ioctl(device_fd_, VIDIOC_DQBUF, &buffer) == 0) {
- client_->OnIncomingCapturedFrame(
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_DQBUF, &buffer)) == 0) {
+ client_->OnIncomingCapturedData(
static_cast<uint8*>(buffer_pool_[buffer.index].start),
buffer.bytesused,
- base::Time::Now(),
- 0,
- capture_format_);
+ capture_format_,
+ rotation_,
+ base::TimeTicks::Now());
// Enqueue the buffer again.
- if (ioctl(device_fd_, VIDIOC_QBUF, &buffer) == -1) {
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_QBUF, &buffer)) == -1) {
SetErrorState(base::StringPrintf(
"Failed to enqueue capture buffer errno %d", errno));
}
@@ -542,7 +437,7 @@ bool VideoCaptureDeviceLinux::AllocateVideoBuffers() {
r_buffer.memory = V4L2_MEMORY_MMAP;
r_buffer.count = kMaxVideoBuffers;
- if (ioctl(device_fd_, VIDIOC_REQBUFS, &r_buffer) < 0) {
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_REQBUFS, &r_buffer)) < 0) {
return false;
}
@@ -561,20 +456,20 @@ bool VideoCaptureDeviceLinux::AllocateVideoBuffers() {
buffer.memory = V4L2_MEMORY_MMAP;
buffer.index = i;
- if (ioctl(device_fd_, VIDIOC_QUERYBUF, &buffer) < 0) {
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_QUERYBUF, &buffer)) < 0) {
return false;
}
// Some devices require mmap() to be called with both READ and WRITE.
// See crbug.com/178582.
buffer_pool_[i].start = mmap(NULL, buffer.length, PROT_READ | PROT_WRITE,
- MAP_SHARED, device_fd_, buffer.m.offset);
+ MAP_SHARED, device_fd_.get(), buffer.m.offset);
if (buffer_pool_[i].start == MAP_FAILED) {
return false;
}
buffer_pool_[i].length = buffer.length;
// Enqueue the buffer in the drivers incoming queue.
- if (ioctl(device_fd_, VIDIOC_QBUF, &buffer) < 0) {
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_QBUF, &buffer)) < 0) {
return false;
}
}
@@ -595,7 +490,7 @@ void VideoCaptureDeviceLinux::DeAllocateVideoBuffers() {
r_buffer.memory = V4L2_MEMORY_MMAP;
r_buffer.count = 0;
- if (ioctl(device_fd_, VIDIOC_REQBUFS, &r_buffer) < 0) {
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_REQBUFS, &r_buffer)) < 0) {
SetErrorState("Failed to reset buf.");
}
@@ -609,7 +504,7 @@ void VideoCaptureDeviceLinux::SetErrorState(const std::string& reason) {
v4l2_thread_.message_loop() == base::MessageLoop::current());
DVLOG(1) << reason;
state_ = kError;
- client_->OnError();
+ client_->OnError(reason);
}
} // namespace media
diff --git a/chromium/media/video/capture/linux/video_capture_device_linux.h b/chromium/media/video/capture/linux/video_capture_device_linux.h
index a5917b71f12..5415eb744d9 100644
--- a/chromium/media/video/capture/linux/video_capture_device_linux.h
+++ b/chromium/media/video/capture/linux/video_capture_device_linux.h
@@ -12,6 +12,8 @@
#include <string>
+#include "base/file_util.h"
+#include "base/files/scoped_file.h"
#include "base/threading/thread.h"
#include "media/video/capture/video_capture_device.h"
#include "media/video/capture/video_capture_types.h"
@@ -20,6 +22,10 @@ namespace media {
class VideoCaptureDeviceLinux : public VideoCaptureDevice {
public:
+ static VideoPixelFormat V4l2ColorToVideoCaptureColorFormat(int32 v4l2_fourcc);
+ static void GetListOfUsableFourCCs(bool favour_mjpeg,
+ std::list<int>* fourccs);
+
explicit VideoCaptureDeviceLinux(const Name& device_name);
virtual ~VideoCaptureDeviceLinux();
@@ -29,6 +35,12 @@ class VideoCaptureDeviceLinux : public VideoCaptureDevice {
virtual void StopAndDeAllocate() OVERRIDE;
+ protected:
+ void SetRotation(int rotation);
+
+ // Once |v4l2_thread_| is started, only called on that thread.
+ void SetRotationOnV4L2Thread(int rotation);
+
private:
enum InternalState {
kIdle, // The device driver is opened but camera is not in use.
@@ -59,13 +71,18 @@ class VideoCaptureDeviceLinux : public VideoCaptureDevice {
InternalState state_;
scoped_ptr<VideoCaptureDevice::Client> client_;
Name device_name_;
- int device_fd_; // File descriptor for the opened camera device.
+ base::ScopedFD device_fd_; // File descriptor for the opened camera device.
base::Thread v4l2_thread_; // Thread used for reading data from the device.
Buffer* buffer_pool_;
int buffer_pool_size_; // Number of allocated buffers.
int timeout_count_;
VideoCaptureFormat capture_format_;
+ // Clockwise rotation in degrees. This value should be 0, 90, 180, or 270.
+ // This is only used on |v4l2_thread_| when it is running, or the constructor
+ // thread otherwise.
+ int rotation_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(VideoCaptureDeviceLinux);
};
diff --git a/chromium/media/video/capture/mac/avfoundation_glue.h b/chromium/media/video/capture/mac/avfoundation_glue.h
index f9b23a2c240..ac679c2bf15 100644
--- a/chromium/media/video/capture/mac/avfoundation_glue.h
+++ b/chromium/media/video/capture/mac/avfoundation_glue.h
@@ -41,13 +41,10 @@ class MEDIA_EXPORT AVFoundationGlue {
static NSString* AVCaptureSessionRuntimeErrorNotification();
static NSString* AVCaptureSessionDidStopRunningNotification();
static NSString* AVCaptureSessionErrorKey();
- static NSString* AVCaptureSessionPreset320x240();
- static NSString* AVCaptureSessionPreset640x480();
- static NSString* AVCaptureSessionPreset1280x720();
// Originally from AVVideoSettings.h but in global namespace.
static NSString* AVVideoScalingModeKey();
- static NSString* AVVideoScalingModeResizeAspect();
+ static NSString* AVVideoScalingModeResizeAspectFill();
static Class AVCaptureSessionClass();
static Class AVCaptureVideoDataOutputClass();
@@ -63,7 +60,25 @@ MEDIA_EXPORT
- (BOOL)hasMediaType:(NSString*)mediaType;
- (NSString*)uniqueID;
- (NSString*)localizedName;
-- (BOOL)supportsAVCaptureSessionPreset:(NSString*)preset;
+- (BOOL)isSuspended;
+- (NSArray*)formats;
+
+@end
+
+// Originally AVCaptureDeviceFormat and coming from AVCaptureDevice.h.
+MEDIA_EXPORT
+@interface CrAVCaptureDeviceFormat : NSObject
+
+- (CoreMediaGlue::CMFormatDescriptionRef)formatDescription;
+- (NSArray*)videoSupportedFrameRateRanges;
+
+@end
+
+// Originally AVFrameRateRange and coming from AVCaptureDevice.h.
+MEDIA_EXPORT
+@interface CrAVFrameRateRange : NSObject
+
+- (Float64)maxFrameRate;
@end
@@ -80,9 +95,6 @@ MEDIA_EXPORT
@interface CrAVCaptureSession : NSObject
- (void)release;
-- (BOOL)canSetSessionPreset:(NSString*)preset;
-- (void)setSessionPreset:(NSString*)preset;
-- (NSString*)sessionPreset;
- (void)addInput:(CrAVCaptureInput*)input;
- (void)removeInput:(CrAVCaptureInput*)input;
- (void)addOutput:(CrAVCaptureOutput*)output;
diff --git a/chromium/media/video/capture/mac/avfoundation_glue.mm b/chromium/media/video/capture/mac/avfoundation_glue.mm
index 1610d0f104a..afc92d159bf 100644
--- a/chromium/media/video/capture/mac/avfoundation_glue.mm
+++ b/chromium/media/video/capture/mac/avfoundation_glue.mm
@@ -9,6 +9,7 @@
#include "base/command_line.h"
#include "base/lazy_instance.h"
#include "base/mac/mac_util.h"
+#include "base/metrics/field_trial.h"
#include "media/base/media_switches.h"
namespace {
@@ -27,13 +28,74 @@ class AVFoundationInternal {
CHECK(path);
library_handle_ = dlopen(path, RTLD_LAZY | RTLD_LOCAL);
CHECK(library_handle_) << dlerror();
+
+ struct {
+ NSString** loaded_string;
+ const char* symbol;
+ } av_strings[] = {
+ {&AVCaptureDeviceWasConnectedNotification_,
+ "AVCaptureDeviceWasConnectedNotification"},
+ {&AVCaptureDeviceWasDisconnectedNotification_,
+ "AVCaptureDeviceWasDisconnectedNotification"},
+ {&AVMediaTypeVideo_, "AVMediaTypeVideo"},
+ {&AVMediaTypeAudio_, "AVMediaTypeAudio"},
+ {&AVMediaTypeMuxed_, "AVMediaTypeMuxed"},
+ {&AVCaptureSessionRuntimeErrorNotification_,
+ "AVCaptureSessionRuntimeErrorNotification"},
+ {&AVCaptureSessionDidStopRunningNotification_,
+ "AVCaptureSessionDidStopRunningNotification"},
+ {&AVCaptureSessionErrorKey_, "AVCaptureSessionErrorKey"},
+ {&AVVideoScalingModeKey_, "AVVideoScalingModeKey"},
+ {&AVVideoScalingModeResizeAspectFill_,
+ "AVVideoScalingModeResizeAspectFill"},
+ };
+ for (size_t i = 0; i < arraysize(av_strings); ++i) {
+ *av_strings[i].loaded_string = *reinterpret_cast<NSString**>(
+ dlsym(library_handle_, av_strings[i].symbol));
+ DCHECK(*av_strings[i].loaded_string) << dlerror();
+ }
}
+
NSBundle* bundle() const { return bundle_; }
void* library_handle() const { return library_handle_; }
+ NSString* AVCaptureDeviceWasConnectedNotification() const {
+ return AVCaptureDeviceWasConnectedNotification_;
+ }
+ NSString* AVCaptureDeviceWasDisconnectedNotification() const {
+ return AVCaptureDeviceWasDisconnectedNotification_;
+ }
+ NSString* AVMediaTypeVideo() const { return AVMediaTypeVideo_; }
+ NSString* AVMediaTypeAudio() const { return AVMediaTypeAudio_; }
+ NSString* AVMediaTypeMuxed() const { return AVMediaTypeMuxed_; }
+ NSString* AVCaptureSessionRuntimeErrorNotification() const {
+ return AVCaptureSessionRuntimeErrorNotification_;
+ }
+ NSString* AVCaptureSessionDidStopRunningNotification() const {
+ return AVCaptureSessionDidStopRunningNotification_;
+ }
+ NSString* AVCaptureSessionErrorKey() const {
+ return AVCaptureSessionErrorKey_;
+ }
+ NSString* AVVideoScalingModeKey() const { return AVVideoScalingModeKey_; }
+ NSString* AVVideoScalingModeResizeAspectFill() const {
+ return AVVideoScalingModeResizeAspectFill_;
+ }
+
private:
NSBundle* bundle_;
void* library_handle_;
+ // The following members are replicas of the respectives in AVFoundation.
+ NSString* AVCaptureDeviceWasConnectedNotification_;
+ NSString* AVCaptureDeviceWasDisconnectedNotification_;
+ NSString* AVMediaTypeVideo_;
+ NSString* AVMediaTypeAudio_;
+ NSString* AVMediaTypeMuxed_;
+ NSString* AVCaptureSessionRuntimeErrorNotification_;
+ NSString* AVCaptureSessionDidStopRunningNotification_;
+ NSString* AVCaptureSessionErrorKey_;
+ NSString* AVVideoScalingModeKey_;
+ NSString* AVVideoScalingModeResizeAspectFill_;
DISALLOW_COPY_AND_ASSIGN(AVFoundationInternal);
};
@@ -43,22 +105,17 @@ class AVFoundationInternal {
static base::LazyInstance<AVFoundationInternal> g_avfoundation_handle =
LAZY_INSTANCE_INITIALIZER;
-namespace media {
-
-// TODO(mcasas):http://crbug.com/323536 cache the string pointers.
-static NSString* ReadNSStringPtr(const char* symbol) {
- NSString** string_pointer = reinterpret_cast<NSString**>(
- dlsym(AVFoundationGlue::AVFoundationLibraryHandle(), symbol));
- DCHECK(string_pointer) << dlerror();
- return *string_pointer;
-}
-
-} // namespace media
-
bool AVFoundationGlue::IsAVFoundationSupported() {
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- return cmd_line->HasSwitch(switches::kEnableAVFoundation) &&
- base::mac::IsOSLionOrLater() && [AVFoundationBundle() load];
+ // DeviceMonitorMac will initialize this static bool from the main UI thread
+ // once, during Chrome startup so this construction is thread safe.
+ // Use AVFoundation if possible, enabled, and QTKit is not explicitly forced.
+ static CommandLine* command_line = CommandLine::ForCurrentProcess();
+ static bool is_av_foundation_supported = base::mac::IsOSLionOrLater() &&
+ ((command_line->HasSwitch(switches::kEnableAVFoundation) &&
+ !command_line->HasSwitch(switches::kForceQTKit)) ||
+ base::FieldTrialList::FindFullName("AVFoundationMacVideoCapture")
+ == "Enabled") && [AVFoundationBundle() load];
+ return is_av_foundation_supported;
}
NSBundle const* AVFoundationGlue::AVFoundationBundle() {
@@ -70,55 +127,45 @@ void* AVFoundationGlue::AVFoundationLibraryHandle() {
}
NSString* AVFoundationGlue::AVCaptureDeviceWasConnectedNotification() {
- return media::ReadNSStringPtr("AVCaptureDeviceWasConnectedNotification");
+ return g_avfoundation_handle.Get().AVCaptureDeviceWasConnectedNotification();
}
NSString* AVFoundationGlue::AVCaptureDeviceWasDisconnectedNotification() {
- return media::ReadNSStringPtr("AVCaptureDeviceWasDisconnectedNotification");
+ return
+ g_avfoundation_handle.Get().AVCaptureDeviceWasDisconnectedNotification();
}
NSString* AVFoundationGlue::AVMediaTypeVideo() {
- return media::ReadNSStringPtr("AVMediaTypeVideo");
+ return g_avfoundation_handle.Get().AVMediaTypeVideo();
}
NSString* AVFoundationGlue::AVMediaTypeAudio() {
- return media::ReadNSStringPtr("AVMediaTypeAudio");
+ return g_avfoundation_handle.Get().AVMediaTypeAudio();
}
NSString* AVFoundationGlue::AVMediaTypeMuxed() {
- return media::ReadNSStringPtr("AVMediaTypeMuxed");
+ return g_avfoundation_handle.Get().AVMediaTypeMuxed();
}
NSString* AVFoundationGlue::AVCaptureSessionRuntimeErrorNotification() {
- return media::ReadNSStringPtr("AVCaptureSessionRuntimeErrorNotification");
+ return g_avfoundation_handle.Get().AVCaptureSessionRuntimeErrorNotification();
}
NSString* AVFoundationGlue::AVCaptureSessionDidStopRunningNotification() {
- return media::ReadNSStringPtr("AVCaptureSessionDidStopRunningNotification");
+ return
+ g_avfoundation_handle.Get().AVCaptureSessionDidStopRunningNotification();
}
NSString* AVFoundationGlue::AVCaptureSessionErrorKey() {
- return media::ReadNSStringPtr("AVCaptureSessionErrorKey");
-}
-
-NSString* AVFoundationGlue::AVCaptureSessionPreset320x240() {
- return media::ReadNSStringPtr("AVCaptureSessionPreset320x240");
-}
-
-NSString* AVFoundationGlue::AVCaptureSessionPreset640x480() {
- return media::ReadNSStringPtr("AVCaptureSessionPreset640x480");
-}
-
-NSString* AVFoundationGlue::AVCaptureSessionPreset1280x720() {
- return media::ReadNSStringPtr("AVCaptureSessionPreset1280x720");
+ return g_avfoundation_handle.Get().AVCaptureSessionErrorKey();
}
NSString* AVFoundationGlue::AVVideoScalingModeKey() {
- return media::ReadNSStringPtr("AVVideoScalingModeKey");
+ return g_avfoundation_handle.Get().AVVideoScalingModeKey();
}
-NSString* AVFoundationGlue::AVVideoScalingModeResizeAspect() {
- return media::ReadNSStringPtr("AVVideoScalingModeResizeAspect");
+NSString* AVFoundationGlue::AVVideoScalingModeResizeAspectFill() {
+ return g_avfoundation_handle.Get().AVVideoScalingModeResizeAspectFill();
}
Class AVFoundationGlue::AVCaptureSessionClass() {
diff --git a/chromium/media/video/capture/mac/coremedia_glue.h b/chromium/media/video/capture/mac/coremedia_glue.h
index a1f21eb1480..c69f279ed33 100644
--- a/chromium/media/video/capture/mac/coremedia_glue.h
+++ b/chromium/media/video/capture/mac/coremedia_glue.h
@@ -29,6 +29,20 @@ class MEDIA_EXPORT CoreMediaGlue {
CMTimeEpoch epoch;
} CMTime;
+ // Originally from CMFormatDescription.h.
+ typedef const struct opaqueCMFormatDescription* CMFormatDescriptionRef;
+ typedef CMFormatDescriptionRef CMVideoFormatDescriptionRef;
+ typedef struct {
+ int32_t width;
+ int32_t height;
+ } CMVideoDimensions;
+ enum {
+ kCMPixelFormat_422YpCbCr8_yuvs = 'yuvs',
+ };
+ enum {
+ kCMVideoCodecType_JPEG_OpenDML = 'dmb1',
+ };
+
// Originally from CMSampleBuffer.h.
typedef struct OpaqueCMSampleBuffer* CMSampleBufferRef;
@@ -39,6 +53,12 @@ class MEDIA_EXPORT CoreMediaGlue {
static CVImageBufferRef CMSampleBufferGetImageBuffer(
CMSampleBufferRef buffer);
+ // Originally from CMFormatDescription.h.
+ static FourCharCode CMFormatDescriptionGetMediaSubType(
+ CMFormatDescriptionRef desc);
+ static CMVideoDimensions CMVideoFormatDescriptionGetDimensions(
+ CMVideoFormatDescriptionRef videoDesc);
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CoreMediaGlue);
};
diff --git a/chromium/media/video/capture/mac/coremedia_glue.mm b/chromium/media/video/capture/mac/coremedia_glue.mm
index f94256b6c92..1bb4b8d0b0f 100644
--- a/chromium/media/video/capture/mac/coremedia_glue.mm
+++ b/chromium/media/video/capture/mac/coremedia_glue.mm
@@ -19,6 +19,11 @@ class CoreMediaLibraryInternal {
typedef CoreMediaGlue::CMTime (*CMTimeMakeMethod)(int64_t, int32_t);
typedef CVImageBufferRef (*CMSampleBufferGetImageBufferMethod)(
CoreMediaGlue::CMSampleBufferRef);
+ typedef FourCharCode (*CMFormatDescriptionGetMediaSubTypeMethod)(
+ CoreMediaGlue::CMFormatDescriptionRef desc);
+ typedef CoreMediaGlue::CMVideoDimensions
+ (*CMVideoFormatDescriptionGetDimensionsMethod)(
+ CoreMediaGlue::CMVideoFormatDescriptionRef videoDesc);
CoreMediaLibraryInternal() {
NSBundle* bundle = [NSBundle
@@ -38,6 +43,16 @@ class CoreMediaLibraryInternal {
reinterpret_cast<CMSampleBufferGetImageBufferMethod>(
dlsym(library_handle, "CMSampleBufferGetImageBuffer"));
CHECK(cm_sample_buffer_get_image_buffer_method_) << dlerror();
+
+ cm_format_description_get_media_sub_type_method_ =
+ reinterpret_cast<CMFormatDescriptionGetMediaSubTypeMethod>(
+ dlsym(library_handle, "CMFormatDescriptionGetMediaSubType"));
+ CHECK(cm_format_description_get_media_sub_type_method_) << dlerror();
+
+ cm_video_format_description_get_dimensions_method_ =
+ reinterpret_cast<CMVideoFormatDescriptionGetDimensionsMethod>(
+ dlsym(library_handle, "CMVideoFormatDescriptionGetDimensions"));
+ CHECK(cm_video_format_description_get_dimensions_method_) << dlerror();
}
const CMTimeMakeMethod& cm_time_make() const { return cm_time_make_; }
@@ -45,10 +60,22 @@ class CoreMediaLibraryInternal {
cm_sample_buffer_get_image_buffer_method() const {
return cm_sample_buffer_get_image_buffer_method_;
}
+ const CMFormatDescriptionGetMediaSubTypeMethod&
+ cm_format_description_get_media_sub_type_method() const {
+ return cm_format_description_get_media_sub_type_method_;
+ }
+ const CMVideoFormatDescriptionGetDimensionsMethod&
+ cm_video_format_description_get_dimensions_method() const {
+ return cm_video_format_description_get_dimensions_method_;
+ }
private:
CMTimeMakeMethod cm_time_make_;
CMSampleBufferGetImageBufferMethod cm_sample_buffer_get_image_buffer_method_;
+ CMFormatDescriptionGetMediaSubTypeMethod
+ cm_format_description_get_media_sub_type_method_;
+ CMVideoFormatDescriptionGetDimensionsMethod
+ cm_video_format_description_get_dimensions_method_;
DISALLOW_COPY_AND_ASSIGN(CoreMediaLibraryInternal);
};
@@ -58,13 +85,30 @@ class CoreMediaLibraryInternal {
static base::LazyInstance<CoreMediaLibraryInternal> g_coremedia_handle =
LAZY_INSTANCE_INITIALIZER;
+// static
CoreMediaGlue::CMTime CoreMediaGlue::CMTimeMake(int64_t value,
int32_t timescale) {
return g_coremedia_handle.Get().cm_time_make()(value, timescale);
}
+// static
CVImageBufferRef CoreMediaGlue::CMSampleBufferGetImageBuffer(
CMSampleBufferRef buffer) {
return g_coremedia_handle.Get().cm_sample_buffer_get_image_buffer_method()(
buffer);
}
+
+// static
+FourCharCode CoreMediaGlue::CMFormatDescriptionGetMediaSubType(
+ CMFormatDescriptionRef desc) {
+ return g_coremedia_handle.Get()
+ .cm_format_description_get_media_sub_type_method()(desc);
+}
+
+// static
+CoreMediaGlue::CMVideoDimensions
+ CoreMediaGlue::CMVideoFormatDescriptionGetDimensions(
+ CMVideoFormatDescriptionRef videoDesc) {
+ return g_coremedia_handle.Get()
+ .cm_video_format_description_get_dimensions_method()(videoDesc);
+}
diff --git a/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.h b/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.h
index 0e617e90cda..1c607b1918b 100644
--- a/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.h
+++ b/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.h
@@ -10,6 +10,8 @@
#import "base/mac/scoped_nsobject.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
+#include "media/video/capture/video_capture_device.h"
+#include "media/video/capture/video_capture_types.h"
#import "media/video/capture/mac/avfoundation_glue.h"
#import "media/video/capture/mac/platform_video_capturing_mac.h"
@@ -74,12 +76,17 @@ class VideoCaptureDeviceMac;
CrAVCaptureDeviceInput* captureDeviceInput_;
base::scoped_nsobject<CrAVCaptureVideoDataOutput> captureVideoDataOutput_;
- base::ThreadChecker thread_checker_;
+ base::ThreadChecker main_thread_checker_;
+ base::ThreadChecker callback_thread_checker_;
}
// Returns a dictionary of capture devices with friendly name and unique id.
+ (NSDictionary*)deviceNames;
+// Retrieve the capture supported formats for a given device |name|.
++ (void)getDevice:(const media::VideoCaptureDevice::Name&)name
+ supportedFormats:(media::VideoCaptureFormats*)formats;
+
// Initializes the instance and the underlying capture session and registers the
// frame receiver.
- (id)initWithFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver;
diff --git a/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.mm b/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.mm
index a6bf920a2c2..2412aac91d4 100644
--- a/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.mm
+++ b/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.mm
@@ -20,8 +20,9 @@
// library is loaded and initialised, by the device monitoring.
NSArray* devices = [AVCaptureDeviceGlue devices];
for (CrAVCaptureDevice* device in devices) {
- if ([device hasMediaType:AVFoundationGlue::AVMediaTypeVideo()] ||
- [device hasMediaType:AVFoundationGlue::AVMediaTypeMuxed()]) {
+ if (([device hasMediaType:AVFoundationGlue::AVMediaTypeVideo()] ||
+ [device hasMediaType:AVFoundationGlue::AVMediaTypeMuxed()]) &&
+ ![device isSuspended]) {
[deviceNames setObject:[device localizedName]
forKey:[device uniqueID]];
}
@@ -37,11 +38,59 @@
return deviceNames;
}
++ (void)getDevice:(const media::VideoCaptureDevice::Name&)name
+ supportedFormats:(media::VideoCaptureFormats*)formats{
+ NSArray* devices = [AVCaptureDeviceGlue devices];
+ CrAVCaptureDevice* device = nil;
+ for (device in devices) {
+ if ([[device uniqueID] UTF8String] == name.id())
+ break;
+ }
+ if (device == nil)
+ return;
+ for (CrAVCaptureDeviceFormat* format in device.formats) {
+ // MediaSubType is a CMPixelFormatType but can be used as CVPixelFormatType
+ // as well according to CMFormatDescription.h
+ media::VideoPixelFormat pixelFormat = media::PIXEL_FORMAT_UNKNOWN;
+ switch (CoreMediaGlue::CMFormatDescriptionGetMediaSubType(
+ [format formatDescription])) {
+ case kCVPixelFormatType_422YpCbCr8: // Typical.
+ pixelFormat = media::PIXEL_FORMAT_UYVY;
+ break;
+ case CoreMediaGlue::kCMPixelFormat_422YpCbCr8_yuvs:
+ pixelFormat = media::PIXEL_FORMAT_YUY2;
+ break;
+ case CoreMediaGlue::kCMVideoCodecType_JPEG_OpenDML:
+ pixelFormat = media::PIXEL_FORMAT_MJPEG;
+ default:
+ break;
+ }
+
+ CoreMediaGlue::CMVideoDimensions dimensions =
+ CoreMediaGlue::CMVideoFormatDescriptionGetDimensions(
+ [format formatDescription]);
+
+ for (CrAVFrameRateRange* frameRate in
+ [format videoSupportedFrameRateRanges]) {
+ media::VideoCaptureFormat format(
+ gfx::Size(dimensions.width, dimensions.height),
+ static_cast<int>(frameRate.maxFrameRate),
+ pixelFormat);
+ formats->push_back(format);
+ DVLOG(2) << name.name() << " resolution: "
+ << format.frame_size.ToString() << ", fps: "
+ << format.frame_rate << ", pixel format: "
+ << format.pixel_format;
+ }
+ }
+
+}
+
#pragma mark Public methods
- (id)initWithFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver {
if ((self = [super init])) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(main_thread_checker_.CalledOnValidThread());
DCHECK(frameReceiver);
[self setFrameReceiver:frameReceiver];
captureSession_.reset(
@@ -62,7 +111,7 @@
- (BOOL)setCaptureDevice:(NSString*)deviceId {
DCHECK(captureSession_);
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(main_thread_checker_.CalledOnValidThread());
if (!deviceId) {
// First stop the capture session, if it's running.
@@ -80,7 +129,8 @@
// Look for input device with requested name.
captureDevice_ = [AVCaptureDeviceGlue deviceWithUniqueID:deviceId];
if (!captureDevice_) {
- DLOG(ERROR) << "Could not open video capture device.";
+ [self sendErrorString:[NSString
+ stringWithUTF8String:"Could not open video capture device."]];
return NO;
}
@@ -91,8 +141,10 @@
error:&error];
if (!captureDeviceInput_) {
captureDevice_ = nil;
- DLOG(ERROR) << "Could not create video capture input: "
- << [[error localizedDescription] UTF8String];
+ [self sendErrorString:[NSString
+ stringWithFormat:@"Could not create video capture input (%@): %@",
+ [error localizedDescription],
+ [error localizedFailureReason]]];
return NO;
}
[captureSession_ addInput:captureDeviceInput_];
@@ -103,7 +155,8 @@
[[AVFoundationGlue::AVCaptureVideoDataOutputClass() alloc] init]);
if (!captureVideoDataOutput_) {
[captureSession_ removeInput:captureDeviceInput_];
- DLOG(ERROR) << "Could not create video data output.";
+ [self sendErrorString:[NSString
+ stringWithUTF8String:"Could not create video data output."]];
return NO;
}
[captureVideoDataOutput_
@@ -115,74 +168,56 @@
}
- (BOOL)setCaptureHeight:(int)height width:(int)width frameRate:(int)frameRate {
- DCHECK(thread_checker_.CalledOnValidThread());
+ // Check if either of VideoCaptureDeviceMac::AllocateAndStart() or
+ // VideoCaptureDeviceMac::ReceiveFrame() is calling here, depending on the
+ // running state. VCDM::ReceiveFrame() calls here to change aspect ratio.
+ DCHECK((![captureSession_ isRunning] &&
+ main_thread_checker_.CalledOnValidThread()) ||
+ callback_thread_checker_.CalledOnValidThread());
+
frameWidth_ = width;
frameHeight_ = height;
frameRate_ = frameRate;
- // Identify the sessionPreset that corresponds to the desired resolution.
- NSString* sessionPreset;
- if (width == 1280 && height == 720 && [captureSession_ canSetSessionPreset:
- AVFoundationGlue::AVCaptureSessionPreset1280x720()]) {
- sessionPreset = AVFoundationGlue::AVCaptureSessionPreset1280x720();
- } else if (width == 640 && height == 480 && [captureSession_
- canSetSessionPreset:
- AVFoundationGlue::AVCaptureSessionPreset640x480()]) {
- sessionPreset = AVFoundationGlue::AVCaptureSessionPreset640x480();
- } else if (width == 320 && height == 240 && [captureSession_
- canSetSessionPreset:
- AVFoundationGlue::AVCaptureSessionPreset320x240()]) {
- sessionPreset = AVFoundationGlue::AVCaptureSessionPreset320x240();
- } else {
- DLOG(ERROR) << "Unsupported resolution (" << width << "x" << height << ")";
- return NO;
- }
- [captureSession_ setSessionPreset:sessionPreset];
-
- // Check that our capture Device can be used with the current preset.
- if (![captureDevice_ supportsAVCaptureSessionPreset:
- [captureSession_ sessionPreset]]){
- DLOG(ERROR) << "Video capture device does not support current preset";
- return NO;
- }
-
- // Despite all Mac documentation detailing that setting the sessionPreset is
- // enough, that is not the case for, at least, the MacBook Air built-in
- // FaceTime HD Camera, and the capture output has to be configured as well.
- // The reason for this mismatch is probably because most of the AVFoundation
- // docs are written for iOS and not for MacOsX.
- // AVVideoScalingModeKey() refers to letterboxing yes/no and preserve aspect
- // ratio yes/no when scaling. Currently we set letterbox and preservation.
+ // The capture output has to be configured, despite Mac documentation
+ // detailing that setting the sessionPreset would be enough. The reason for
+ // this mismatch is probably because most of the AVFoundation docs are written
+ // for iOS and not for MacOsX. AVVideoScalingModeKey() refers to letterboxing
+ // yes/no and preserve aspect ratio yes/no when scaling. Currently we set
+ // cropping and preservation.
NSDictionary* videoSettingsDictionary = @{
(id)kCVPixelBufferWidthKey : @(width),
(id)kCVPixelBufferHeightKey : @(height),
(id)kCVPixelBufferPixelFormatTypeKey : @(kCVPixelFormatType_422YpCbCr8),
AVFoundationGlue::AVVideoScalingModeKey() :
- AVFoundationGlue::AVVideoScalingModeResizeAspect()
+ AVFoundationGlue::AVVideoScalingModeResizeAspectFill()
};
[captureVideoDataOutput_ setVideoSettings:videoSettingsDictionary];
CrAVCaptureConnection* captureConnection = [captureVideoDataOutput_
connectionWithMediaType:AVFoundationGlue::AVMediaTypeVideo()];
- // TODO(mcasas): Check selector existence, related to bugs
- // http://crbug.com/327532 and http://crbug.com/328096.
+ // Check selector existence, related to bugs http://crbug.com/327532 and
+ // http://crbug.com/328096.
+ // CMTimeMake accepts integer argumenst but |frameRate| is float, round it.
if ([captureConnection
respondsToSelector:@selector(isVideoMinFrameDurationSupported)] &&
[captureConnection isVideoMinFrameDurationSupported]) {
[captureConnection setVideoMinFrameDuration:
- CoreMediaGlue::CMTimeMake(1, frameRate)];
+ CoreMediaGlue::CMTimeMake(media::kFrameRatePrecision,
+ frameRate * media::kFrameRatePrecision)];
}
if ([captureConnection
respondsToSelector:@selector(isVideoMaxFrameDurationSupported)] &&
[captureConnection isVideoMaxFrameDurationSupported]) {
[captureConnection setVideoMaxFrameDuration:
- CoreMediaGlue::CMTimeMake(1, frameRate)];
+ CoreMediaGlue::CMTimeMake(media::kFrameRatePrecision,
+ frameRate * media::kFrameRatePrecision)];
}
return YES;
}
- (BOOL)startCapture {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(main_thread_checker_.CalledOnValidThread());
if (!captureSession_) {
DLOG(ERROR) << "Video capture session not initialized.";
return NO;
@@ -198,7 +233,7 @@
}
- (void)stopCapture {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(main_thread_checker_.CalledOnValidThread());
if ([captureSession_ isRunning])
[captureSession_ stopRunning]; // Synchronous.
[[NSNotificationCenter defaultCenter] removeObserver:self];
@@ -210,6 +245,10 @@
- (void)captureOutput:(CrAVCaptureOutput*)captureOutput
didOutputSampleBuffer:(CoreMediaGlue::CMSampleBufferRef)sampleBuffer
fromConnection:(CrAVCaptureConnection*)connection {
+ // AVFoundation calls from a number of threads, depending on, at least, if
+ // Chrome is on foreground or background. Sample the actual thread here.
+ callback_thread_checker_.DetachFromThread();
+ callback_thread_checker_.CalledOnValidThread();
CVImageBufferRef videoFrame =
CoreMediaGlue::CMSampleBufferGetImageBuffer(sampleBuffer);
// Lock the frame and calculate frame size.
@@ -238,9 +277,17 @@
- (void)onVideoError:(NSNotification*)errorNotification {
NSError* error = base::mac::ObjCCast<NSError>([[errorNotification userInfo]
objectForKey:AVFoundationGlue::AVCaptureSessionErrorKey()]);
+ [self sendErrorString:[NSString
+ stringWithFormat:@"%@: %@",
+ [error localizedDescription],
+ [error localizedFailureReason]]];
+}
+
+- (void)sendErrorString:(NSString*)error {
+ DLOG(ERROR) << [error UTF8String];
base::AutoLock lock(lock_);
if (frameReceiver_)
- frameReceiver_->ReceiveError([[error localizedDescription] UTF8String]);
+ frameReceiver_->ReceiveError([error UTF8String]);
}
@end
diff --git a/chromium/media/video/capture/mac/video_capture_device_factory_mac.h b/chromium/media/video/capture/mac/video_capture_device_factory_mac.h
new file mode 100644
index 00000000000..7bc815381e0
--- /dev/null
+++ b/chromium/media/video/capture/mac/video_capture_device_factory_mac.h
@@ -0,0 +1,42 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Implementation of a VideoCaptureDeviceFactory class for Mac.
+
+#ifndef MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_MAC_H_
+#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_MAC_H_
+
+#include "media/video/capture/video_capture_device_factory.h"
+
+namespace media {
+
+// Extension of VideoCaptureDeviceFactory to create and manipulate Mac devices.
+class MEDIA_EXPORT VideoCaptureDeviceFactoryMac :
+ public VideoCaptureDeviceFactory {
+ public:
+ static bool PlatformSupportsAVFoundation();
+
+ explicit VideoCaptureDeviceFactoryMac(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner);
+ virtual ~VideoCaptureDeviceFactoryMac();
+
+ virtual scoped_ptr<VideoCaptureDevice> Create(
+ const VideoCaptureDevice::Name& device_name) OVERRIDE;
+ virtual void GetDeviceNames(VideoCaptureDevice::Names* device_names) OVERRIDE;
+ virtual void EnumerateDeviceNames(const base::Callback<
+ void(scoped_ptr<media::VideoCaptureDevice::Names>)>& callback) OVERRIDE;
+ virtual void GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) OVERRIDE;
+
+ private:
+ // Cache of |ui_task_runner| for enumerating devices there for QTKit.
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoCaptureDeviceFactoryMac);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_MAC_H_
diff --git a/chromium/media/video/capture/mac/video_capture_device_factory_mac.mm b/chromium/media/video/capture/mac/video_capture_device_factory_mac.mm
new file mode 100644
index 00000000000..d58a25c6586
--- /dev/null
+++ b/chromium/media/video/capture/mac/video_capture_device_factory_mac.mm
@@ -0,0 +1,177 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/capture/mac/video_capture_device_factory_mac.h"
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/task_runner_util.h"
+#import "media/video/capture/mac/avfoundation_glue.h"
+#include "media/video/capture/mac/video_capture_device_mac.h"
+#import "media/video/capture/mac/video_capture_device_avfoundation_mac.h"
+#import "media/video/capture/mac/video_capture_device_qtkit_mac.h"
+
+namespace media {
+
+// Some devices are not correctly supported in AVFoundation, f.i. Blackmagic,
+// see http://crbug.com/347371. The devices are identified by USB Vendor ID and
+// by a characteristic substring of the name, usually the vendor's name.
+const struct NameAndVid {
+ const char* vid;
+ const char* name;
+} kBlacklistedCameras[] = { { "a82c", "Blackmagic" } };
+
+// In device identifiers, the USB VID and PID are stored in 4 bytes each.
+const size_t kVidPidSize = 4;
+
+static scoped_ptr<media::VideoCaptureDevice::Names>
+EnumerateDevicesUsingQTKit() {
+ scoped_ptr<VideoCaptureDevice::Names> device_names(
+ new VideoCaptureDevice::Names());
+ NSMutableDictionary* capture_devices =
+ [[[NSMutableDictionary alloc] init] autorelease];
+ [VideoCaptureDeviceQTKit getDeviceNames:capture_devices];
+ for (NSString* key in capture_devices) {
+ VideoCaptureDevice::Name name(
+ [[capture_devices valueForKey:key] UTF8String],
+ [key UTF8String], VideoCaptureDevice::Name::QTKIT);
+ device_names->push_back(name);
+ }
+ return device_names.Pass();
+}
+
+static void RunDevicesEnumeratedCallback(
+ const base::Callback<void(scoped_ptr<media::VideoCaptureDevice::Names>)>&
+ callback,
+ scoped_ptr<media::VideoCaptureDevice::Names> device_names) {
+ callback.Run(device_names.Pass());
+}
+
+// static
+bool VideoCaptureDeviceFactoryMac::PlatformSupportsAVFoundation() {
+ return AVFoundationGlue::IsAVFoundationSupported();
+}
+
+VideoCaptureDeviceFactoryMac::VideoCaptureDeviceFactoryMac(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner)
+ : ui_task_runner_(ui_task_runner) {
+ thread_checker_.DetachFromThread();
+}
+
+VideoCaptureDeviceFactoryMac::~VideoCaptureDeviceFactoryMac() {}
+
+scoped_ptr<VideoCaptureDevice> VideoCaptureDeviceFactoryMac::Create(
+ const VideoCaptureDevice::Name& device_name) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_NE(device_name.capture_api_type(),
+ VideoCaptureDevice::Name::API_TYPE_UNKNOWN);
+
+ // Check device presence only for AVFoundation API, since it is too expensive
+ // and brittle for QTKit. The actual initialization at device level will fail
+ // subsequently if the device is not present.
+ if (AVFoundationGlue::IsAVFoundationSupported()) {
+ scoped_ptr<VideoCaptureDevice::Names> device_names(
+ new VideoCaptureDevice::Names());
+ GetDeviceNames(device_names.get());
+
+ VideoCaptureDevice::Names::iterator it = device_names->begin();
+ for (; it != device_names->end(); ++it) {
+ if (it->id() == device_name.id())
+ break;
+ }
+ if (it == device_names->end())
+ return scoped_ptr<VideoCaptureDevice>();
+ }
+
+ scoped_ptr<VideoCaptureDeviceMac> capture_device(
+ new VideoCaptureDeviceMac(device_name));
+ if (!capture_device->Init(device_name.capture_api_type())) {
+ LOG(ERROR) << "Could not initialize VideoCaptureDevice.";
+ capture_device.reset();
+ }
+ return scoped_ptr<VideoCaptureDevice>(capture_device.Pass());
+}
+
+void VideoCaptureDeviceFactoryMac::GetDeviceNames(
+ VideoCaptureDevice::Names* device_names) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // Loop through all available devices and add to |device_names|.
+ NSDictionary* capture_devices;
+ if (AVFoundationGlue::IsAVFoundationSupported()) {
+ bool is_any_device_blacklisted = false;
+ DVLOG(1) << "Enumerating video capture devices using AVFoundation";
+ capture_devices = [VideoCaptureDeviceAVFoundation deviceNames];
+ std::string device_vid;
+ // Enumerate all devices found by AVFoundation, translate the info for each
+ // to class Name and add it to |device_names|.
+ for (NSString* key in capture_devices) {
+ VideoCaptureDevice::Name name(
+ [[capture_devices valueForKey:key] UTF8String],
+ [key UTF8String], VideoCaptureDevice::Name::AVFOUNDATION);
+ device_names->push_back(name);
+ // Extract the device's Vendor ID and compare to all blacklisted ones.
+ device_vid = name.GetModel().substr(0, kVidPidSize);
+ for (size_t i = 0; i < arraysize(kBlacklistedCameras); ++i) {
+ is_any_device_blacklisted |=
+ !strcasecmp(device_vid.c_str(), kBlacklistedCameras[i].vid);
+ if (is_any_device_blacklisted)
+ break;
+ }
+ }
+ // If there is any device blacklisted in the system, walk the QTKit device
+ // list and add those devices with a blacklisted name to the |device_names|.
+ // AVFoundation and QTKit device lists partially overlap, so add a "QTKit"
+ // prefix to the latter ones to distinguish them from the AVFoundation ones.
+ if (is_any_device_blacklisted) {
+ capture_devices = [VideoCaptureDeviceQTKit deviceNames];
+ for (NSString* key in capture_devices) {
+ NSString* device_name = [capture_devices valueForKey:key];
+ for (size_t i = 0; i < arraysize(kBlacklistedCameras); ++i) {
+ if ([device_name rangeOfString:@(kBlacklistedCameras[i].name)
+ options:NSCaseInsensitiveSearch].length != 0) {
+ DVLOG(1) << "Enumerated blacklisted " << [device_name UTF8String];
+ VideoCaptureDevice::Name name(
+ "QTKit " + std::string([device_name UTF8String]),
+ [key UTF8String], VideoCaptureDevice::Name::QTKIT);
+ device_names->push_back(name);
+ }
+ }
+ }
+ }
+ } else {
+ // We should not enumerate QTKit devices in Device Thread;
+ NOTREACHED();
+ }
+}
+
+void VideoCaptureDeviceFactoryMac::EnumerateDeviceNames(const base::Callback<
+ void(scoped_ptr<media::VideoCaptureDevice::Names>)>& callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (AVFoundationGlue::IsAVFoundationSupported()) {
+ scoped_ptr<VideoCaptureDevice::Names> device_names(
+ new VideoCaptureDevice::Names());
+ GetDeviceNames(device_names.get());
+ callback.Run(device_names.Pass());
+ } else {
+ DVLOG(1) << "Enumerating video capture devices using QTKit";
+ base::PostTaskAndReplyWithResult(ui_task_runner_, FROM_HERE,
+ base::Bind(&EnumerateDevicesUsingQTKit),
+ base::Bind(&RunDevicesEnumeratedCallback, callback));
+ }
+}
+
+void VideoCaptureDeviceFactoryMac::GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (device.capture_api_type() == VideoCaptureDevice::Name::AVFOUNDATION) {
+ DVLOG(1) << "Enumerating video capture capabilities, AVFoundation";
+ [VideoCaptureDeviceAVFoundation getDevice:device
+ supportedFormats:supported_formats];
+ } else {
+ NOTIMPLEMENTED();
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/video/capture/mac/video_capture_device_factory_mac_unittest.mm b/chromium/media/video/capture/mac/video_capture_device_factory_mac_unittest.mm
new file mode 100644
index 00000000000..bbc2dfb6b7c
--- /dev/null
+++ b/chromium/media/video/capture/mac/video_capture_device_factory_mac_unittest.mm
@@ -0,0 +1,44 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/command_line.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "media/base/media_switches.h"
+#import "media/video/capture/mac/avfoundation_glue.h"
+#include "media/video/capture/mac/video_capture_device_factory_mac.h"
+#include "media/video/capture/mac/video_capture_device_mac.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class VideoCaptureDeviceFactoryMacTest : public testing::Test {
+ virtual void SetUp() {
+ CommandLine::ForCurrentProcess()->AppendSwitch(
+ switches::kEnableAVFoundation);
+ }
+};
+
+TEST_F(VideoCaptureDeviceFactoryMacTest, ListDevicesAVFoundation) {
+ if (!AVFoundationGlue::IsAVFoundationSupported()) {
+ DVLOG(1) << "AVFoundation not supported, skipping test.";
+ return;
+ }
+ VideoCaptureDeviceFactoryMac video_capture_device_factory(
+ base::MessageLoopProxy::current());
+
+ VideoCaptureDevice::Names names;
+ video_capture_device_factory.GetDeviceNames(&names);
+ if (!names.size()) {
+ DVLOG(1) << "No camera available. Exiting test.";
+ return;
+ }
+ // There should be no blacklisted devices, i.e. QTKit.
+ std::string device_vid;
+ for (VideoCaptureDevice::Names::const_iterator it = names.begin();
+ it != names.end(); ++it) {
+ EXPECT_EQ(it->capture_api_type(), VideoCaptureDevice::Name::AVFOUNDATION);
+ }
+}
+
+}; // namespace media
diff --git a/chromium/media/video/capture/mac/video_capture_device_mac.h b/chromium/media/video/capture/mac/video_capture_device_mac.h
index 474e7e1bf45..36dc015e373 100644
--- a/chromium/media/video/capture/mac/video_capture_device_mac.h
+++ b/chromium/media/video/capture/mac/video_capture_device_mac.h
@@ -3,8 +3,9 @@
// found in the LICENSE file.
// MacOSX implementation of generic VideoCaptureDevice, using either QTKit or
-// AVFoundation as native capture API. QTKit is used in OSX versions 10.6 and
-// previous, and AVFoundation is used in the rest.
+// AVFoundation as native capture API. QTKit is available in all OSX versions,
+// although namely deprecated in 10.9, and AVFoundation is available in versions
+// 10.7 (Lion) and later.
#ifndef MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_MAC_H_
#define MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_MAC_H_
@@ -14,28 +15,31 @@
#include "base/compiler_specific.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
-#include "base/message_loop/message_loop_proxy.h"
#include "media/video/capture/video_capture_device.h"
#include "media/video/capture/video_capture_types.h"
@protocol PlatformVideoCapturingMac;
+namespace base {
+class SingleThreadTaskRunner;
+}
+
namespace media {
-// Called by VideoCaptureManager to open, close and start, stop video capture
-// devices.
+// Called by VideoCaptureManager to open, close and start, stop Mac video
+// capture devices.
class VideoCaptureDeviceMac : public VideoCaptureDevice {
public:
explicit VideoCaptureDeviceMac(const Name& device_name);
virtual ~VideoCaptureDeviceMac();
// VideoCaptureDevice implementation.
- virtual void AllocateAndStart(const VideoCaptureParams& params,
- scoped_ptr<VideoCaptureDevice::Client> client)
- OVERRIDE;
+ virtual void AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) OVERRIDE;
virtual void StopAndDeAllocate() OVERRIDE;
- bool Init();
+ bool Init(VideoCaptureDevice::Name::CaptureApiType capture_api_type);
// Called to deliver captured video frames.
void ReceiveFrame(const uint8* video_frame,
@@ -48,6 +52,7 @@ class VideoCaptureDeviceMac : public VideoCaptureDevice {
private:
void SetErrorState(const std::string& reason);
+ void LogMessage(const std::string& message);
bool UpdateCaptureResolution();
// Flag indicating the internal state.
@@ -62,20 +67,24 @@ class VideoCaptureDeviceMac : public VideoCaptureDevice {
scoped_ptr<VideoCaptureDevice::Client> client_;
VideoCaptureFormat capture_format_;
- bool sent_frame_info_;
+ // These variables control the two-step configure-start process for QTKit HD:
+ // the device is first started with no configuration and the captured frames
+ // are inspected to check if the camera really supports HD. AVFoundation does
+ // not need this process so |final_resolution_selected_| is false then.
+ bool final_resolution_selected_;
bool tried_to_square_pixels_;
// Only read and write state_ from inside this loop.
- const scoped_refptr<base::MessageLoopProxy> loop_proxy_;
+ const scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
InternalState state_;
- // Used with Bind and PostTask to ensure that methods aren't called
- // after the VideoCaptureDeviceMac is destroyed.
- base::WeakPtrFactory<VideoCaptureDeviceMac> weak_factory_;
- base::WeakPtr<VideoCaptureDeviceMac> weak_this_;
-
id<PlatformVideoCapturingMac> capture_device_;
+ // Used with Bind and PostTask to ensure that methods aren't called after the
+ // VideoCaptureDeviceMac is destroyed.
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<VideoCaptureDeviceMac> weak_factory_;
+
DISALLOW_COPY_AND_ASSIGN(VideoCaptureDeviceMac);
};
diff --git a/chromium/media/video/capture/mac/video_capture_device_mac.mm b/chromium/media/video/capture/mac/video_capture_device_mac.mm
index dba4fa1c6fb..60278b76157 100644
--- a/chromium/media/video/capture/mac/video_capture_device_mac.mm
+++ b/chromium/media/video/capture/mac/video_capture_device_mac.mm
@@ -4,9 +4,17 @@
#include "media/video/capture/mac/video_capture_device_mac.h"
+#include <IOKit/IOCFPlugIn.h>
+#include <IOKit/usb/IOUSBLib.h>
+#include <IOKit/usb/USBSpec.h>
+
#include "base/bind.h"
#include "base/location.h"
#include "base/logging.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/mac/scoped_ioobject.h"
+#include "base/mac/scoped_ioplugininterface.h"
+#include "base/strings/string_number_conversions.h"
#include "base/time/time.h"
#import "media/video/capture/mac/avfoundation_glue.h"
#import "media/video/capture/mac/platform_video_capturing_mac.h"
@@ -18,30 +26,53 @@ namespace media {
const int kMinFrameRate = 1;
const int kMaxFrameRate = 30;
-// In QT device identifiers, the USB VID and PID are stored in 4 bytes each.
+// In device identifiers, the USB VID and PID are stored in 4 bytes each.
const size_t kVidPidSize = 4;
-struct Resolution {
- int width;
- int height;
-};
-
-const Resolution kQVGA = { 320, 240 },
- kVGA = { 640, 480 },
- kHD = { 1280, 720 };
-
-const Resolution* const kWellSupportedResolutions[] = {
- &kQVGA,
- &kVGA,
- &kHD,
+const struct Resolution {
+ const int width;
+ const int height;
+} kQVGA = { 320, 240 },
+ kVGA = { 640, 480 },
+ kHD = { 1280, 720 };
+
+const struct Resolution* const kWellSupportedResolutions[] = {
+ &kQVGA,
+ &kVGA,
+ &kHD,
};
// Rescaling the image to fix the pixel aspect ratio runs the risk of making
// the aspect ratio worse, if QTKit selects a new source mode with a different
-// shape. This constant ensures that we don't take this risk if the current
+// shape. This constant ensures that we don't take this risk if the current
// aspect ratio is tolerable.
const float kMaxPixelAspectRatio = 1.15;
+// The following constants are extracted from the specification "Universal
+// Serial Bus Device Class Definition for Video Devices", Rev. 1.1 June 1, 2005.
+// http://www.usb.org/developers/devclass_docs/USB_Video_Class_1_1.zip
+// CS_INTERFACE: Sec. A.4 "Video Class-Specific Descriptor Types".
+const int kVcCsInterface = 0x24;
+// VC_PROCESSING_UNIT: Sec. A.5 "Video Class-Specific VC Interface Descriptor
+// Subtypes".
+const int kVcProcessingUnit = 0x5;
+// SET_CUR: Sec. A.8 "Video Class-Specific Request Codes".
+const int kVcRequestCodeSetCur = 0x1;
+// PU_POWER_LINE_FREQUENCY_CONTROL: Sec. A.9.5 "Processing Unit Control
+// Selectors".
+const int kPuPowerLineFrequencyControl = 0x5;
+// Sec. 4.2.2.3.5 Power Line Frequency Control.
+const int k50Hz = 1;
+const int k60Hz = 2;
+const int kPuPowerLineFrequencyControlCommandSize = 1;
+
+// Addition to the IOUSB family of structures, with subtype and unit ID.
+typedef struct IOUSBInterfaceDescriptor {
+ IOUSBDescriptorHeader header;
+ UInt8 bDescriptorSubType;
+ UInt8 bUnitID;
+} IOUSBInterfaceDescriptor;
+
// TODO(ronghuawu): Replace this with CapabilityList::GetBestMatchedCapability.
void GetBestMatchSupportedResolution(int* width, int* height) {
int min_diff = kint32max;
@@ -62,29 +93,217 @@ void GetBestMatchSupportedResolution(int* width, int* height) {
*height = matched_height;
}
-void VideoCaptureDevice::GetDeviceNames(Names* device_names) {
- // Loop through all available devices and add to |device_names|.
- device_names->clear();
+// Tries to create a user-side device interface for a given USB device. Returns
+// true if interface was found and passes it back in |device_interface|. The
+// caller should release |device_interface|.
+static bool FindDeviceInterfaceInUsbDevice(
+ const int vendor_id,
+ const int product_id,
+ const io_service_t usb_device,
+ IOUSBDeviceInterface*** device_interface) {
+ // Create a plug-in, i.e. a user-side controller to manipulate USB device.
+ IOCFPlugInInterface** plugin;
+ SInt32 score; // Unused, but required for IOCreatePlugInInterfaceForService.
+ kern_return_t kr =
+ IOCreatePlugInInterfaceForService(usb_device,
+ kIOUSBDeviceUserClientTypeID,
+ kIOCFPlugInInterfaceID,
+ &plugin,
+ &score);
+ if (kr != kIOReturnSuccess || !plugin) {
+ DLOG(ERROR) << "IOCreatePlugInInterfaceForService";
+ return false;
+ }
+ base::mac::ScopedIOPluginInterface<IOCFPlugInInterface> plugin_ref(plugin);
+
+ // Fetch the Device Interface from the plug-in.
+ HRESULT res =
+ (*plugin)->QueryInterface(plugin,
+ CFUUIDGetUUIDBytes(kIOUSBDeviceInterfaceID),
+ reinterpret_cast<LPVOID*>(device_interface));
+ if (!SUCCEEDED(res) || !*device_interface) {
+ DLOG(ERROR) << "QueryInterface, couldn't create interface to USB";
+ return false;
+ }
+ return true;
+}
- NSDictionary* capture_devices;
- if (AVFoundationGlue::IsAVFoundationSupported()) {
- DVLOG(1) << "Enumerating video capture devices using AVFoundation";
- capture_devices = [VideoCaptureDeviceAVFoundation deviceNames];
- } else {
- DVLOG(1) << "Enumerating video capture devices using QTKit";
- capture_devices = [VideoCaptureDeviceQTKit deviceNames];
+// Tries to find a Video Control type interface inside a general USB device
+// interface |device_interface|, and returns it in |video_control_interface| if
+// found. The returned interface must be released in the caller.
+static bool FindVideoControlInterfaceInDeviceInterface(
+ IOUSBDeviceInterface** device_interface,
+ IOCFPlugInInterface*** video_control_interface) {
+ // Create an iterator to the list of Video-AVControl interfaces of the device,
+ // then get the first interface in the list.
+ io_iterator_t interface_iterator;
+ IOUSBFindInterfaceRequest interface_request = {
+ .bInterfaceClass = kUSBVideoInterfaceClass,
+ .bInterfaceSubClass = kUSBVideoControlSubClass,
+ .bInterfaceProtocol = kIOUSBFindInterfaceDontCare,
+ .bAlternateSetting = kIOUSBFindInterfaceDontCare
+ };
+ kern_return_t kr =
+ (*device_interface)->CreateInterfaceIterator(device_interface,
+ &interface_request,
+ &interface_iterator);
+ if (kr != kIOReturnSuccess) {
+ DLOG(ERROR) << "Could not create an iterator to the device's interfaces.";
+ return false;
}
- for (NSString* key in capture_devices) {
- Name name([[capture_devices valueForKey:key] UTF8String],
- [key UTF8String]);
- device_names->push_back(name);
+ base::mac::ScopedIOObject<io_iterator_t> iterator_ref(interface_iterator);
+
+ // There should be just one interface matching the class-subclass desired.
+ io_service_t found_interface;
+ found_interface = IOIteratorNext(interface_iterator);
+ if (!found_interface) {
+ DLOG(ERROR) << "Could not find a Video-AVControl interface in the device.";
+ return false;
+ }
+ base::mac::ScopedIOObject<io_service_t> found_interface_ref(found_interface);
+
+ // Create a user side controller (i.e. a "plug-in") for the found interface.
+ SInt32 score;
+ kr = IOCreatePlugInInterfaceForService(found_interface,
+ kIOUSBInterfaceUserClientTypeID,
+ kIOCFPlugInInterfaceID,
+ video_control_interface,
+ &score);
+ if (kr != kIOReturnSuccess || !*video_control_interface) {
+ DLOG(ERROR) << "IOCreatePlugInInterfaceForService";
+ return false;
+ }
+ return true;
+}
+
+// Creates a control interface for |plugin_interface| and produces a command to
+// set the appropriate Power Line frequency for flicker removal.
+static void SetAntiFlickerInVideoControlInterface(
+ IOCFPlugInInterface** plugin_interface,
+ const int frequency) {
+ // Create, the control interface for the found plug-in, and release
+ // the intermediate plug-in.
+ IOUSBInterfaceInterface** control_interface = NULL;
+ HRESULT res = (*plugin_interface)->QueryInterface(
+ plugin_interface,
+ CFUUIDGetUUIDBytes(kIOUSBInterfaceInterfaceID),
+ reinterpret_cast<LPVOID*>(&control_interface));
+ if (!SUCCEEDED(res) || !control_interface ) {
+ DLOG(ERROR) << "Couldn’t create control interface";
+ return;
+ }
+ base::mac::ScopedIOPluginInterface<IOUSBInterfaceInterface>
+ control_interface_ref(control_interface);
+
+ // Find the device's unit ID presenting type 0x24 (kVcCsInterface) and
+ // subtype 0x5 (kVcProcessingUnit). Inside this unit is where we find the
+ // power line frequency removal setting, and this id is device dependent.
+ int real_unit_id = -1;
+ IOUSBDescriptorHeader* descriptor = NULL;
+ IOUSBInterfaceDescriptor* cs_descriptor = NULL;
+ IOUSBInterfaceInterface220** interface =
+ reinterpret_cast<IOUSBInterfaceInterface220**>(control_interface);
+ while ((descriptor = (*interface)->FindNextAssociatedDescriptor(
+ interface, descriptor, kUSBAnyDesc))) {
+ cs_descriptor =
+ reinterpret_cast<IOUSBInterfaceDescriptor*>(descriptor);
+ if ((descriptor->bDescriptorType == kVcCsInterface) &&
+ (cs_descriptor->bDescriptorSubType == kVcProcessingUnit)) {
+ real_unit_id = cs_descriptor->bUnitID;
+ break;
+ }
+ }
+ DVLOG_IF(1, real_unit_id == -1) << "This USB device doesn't seem to have a "
+ << " VC_PROCESSING_UNIT, anti-flicker not available";
+ if (real_unit_id == -1)
+ return;
+
+ if ((*control_interface)->USBInterfaceOpen(control_interface) !=
+ kIOReturnSuccess) {
+ DLOG(ERROR) << "Unable to open control interface";
+ return;
}
+
+ // Create the control request and launch it to the device's control interface.
+ // Note how the wIndex needs the interface number OR'ed in the lowest bits.
+ IOUSBDevRequest command;
+ command.bmRequestType = USBmakebmRequestType(kUSBOut,
+ kUSBClass,
+ kUSBInterface);
+ command.bRequest = kVcRequestCodeSetCur;
+ UInt8 interface_number;
+ (*control_interface)->GetInterfaceNumber(control_interface,
+ &interface_number);
+ command.wIndex = (real_unit_id << 8) | interface_number;
+ const int selector = kPuPowerLineFrequencyControl;
+ command.wValue = (selector << 8);
+ command.wLength = kPuPowerLineFrequencyControlCommandSize;
+ command.wLenDone = 0;
+ int power_line_flag_value = (frequency == 50) ? k50Hz : k60Hz;
+ command.pData = &power_line_flag_value;
+
+ IOReturn ret = (*control_interface)->ControlRequest(control_interface,
+ 0, &command);
+ DLOG_IF(ERROR, ret != kIOReturnSuccess) << "Anti-flicker control request"
+ << " failed (0x" << std::hex << ret << "), unit id: " << real_unit_id;
+ DVLOG_IF(1, ret == kIOReturnSuccess) << "Anti-flicker set to " << frequency
+ << "Hz";
+
+ (*control_interface)->USBInterfaceClose(control_interface);
}
-// static
-void VideoCaptureDevice::GetDeviceSupportedFormats(const Name& device,
- VideoCaptureFormats* formats) {
- NOTIMPLEMENTED();
+// Sets the flicker removal in a USB webcam identified by |vendor_id| and
+// |product_id|, if available. The process includes first finding all USB
+// devices matching the specified |vendor_id| and |product_id|; for each
+// matching device, a device interface, and inside it a video control interface
+// are created. The latter is used to a send a power frequency setting command.
+static void SetAntiFlickerInUsbDevice(const int vendor_id,
+ const int product_id,
+ const int frequency) {
+ if (frequency == 0)
+ return;
+ DVLOG(1) << "Setting Power Line Frequency to " << frequency << " Hz, device "
+ << std::hex << vendor_id << "-" << product_id;
+
+ // Compose a search dictionary with vendor and product ID.
+ CFMutableDictionaryRef query_dictionary =
+ IOServiceMatching(kIOUSBDeviceClassName);
+ CFDictionarySetValue(query_dictionary, CFSTR(kUSBVendorName),
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &vendor_id));
+ CFDictionarySetValue(query_dictionary, CFSTR(kUSBProductName),
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &product_id));
+
+ io_iterator_t usb_iterator;
+ kern_return_t kr = IOServiceGetMatchingServices(kIOMasterPortDefault,
+ query_dictionary,
+ &usb_iterator);
+ if (kr != kIOReturnSuccess) {
+ DLOG(ERROR) << "No devices found with specified Vendor and Product ID.";
+ return;
+ }
+ base::mac::ScopedIOObject<io_iterator_t> usb_iterator_ref(usb_iterator);
+
+ while (io_service_t usb_device = IOIteratorNext(usb_iterator)) {
+ base::mac::ScopedIOObject<io_service_t> usb_device_ref(usb_device);
+
+ IOUSBDeviceInterface** device_interface = NULL;
+ if (!FindDeviceInterfaceInUsbDevice(vendor_id, product_id,
+ usb_device, &device_interface)) {
+ return;
+ }
+ base::mac::ScopedIOPluginInterface<IOUSBDeviceInterface>
+ device_interface_ref(device_interface);
+
+ IOCFPlugInInterface** video_control_interface = NULL;
+ if (!FindVideoControlInterfaceInDeviceInterface(device_interface,
+ &video_control_interface)) {
+ return;
+ }
+ base::mac::ScopedIOPluginInterface<IOCFPlugInInterface>
+ plugin_interface_ref(video_control_interface);
+
+ SetAntiFlickerInVideoControlInterface(video_control_interface, frequency);
+ }
}
const std::string VideoCaptureDevice::Name::GetModel() const {
@@ -102,37 +321,25 @@ const std::string VideoCaptureDevice::Name::GetModel() const {
return id_vendor + ":" + id_product;
}
-VideoCaptureDevice* VideoCaptureDevice::Create(const Name& device_name) {
- VideoCaptureDeviceMac* capture_device =
- new VideoCaptureDeviceMac(device_name);
- if (!capture_device->Init()) {
- LOG(ERROR) << "Could not initialize VideoCaptureDevice.";
- delete capture_device;
- capture_device = NULL;
- }
- return capture_device;
-}
-
VideoCaptureDeviceMac::VideoCaptureDeviceMac(const Name& device_name)
: device_name_(device_name),
- sent_frame_info_(false),
tried_to_square_pixels_(false),
- loop_proxy_(base::MessageLoopProxy::current()),
+ task_runner_(base::MessageLoopProxy::current()),
state_(kNotInitialized),
- weak_factory_(this),
- weak_this_(weak_factory_.GetWeakPtr()),
- capture_device_(nil) {
+ capture_device_(nil),
+ weak_factory_(this) {
+ final_resolution_selected_ = AVFoundationGlue::IsAVFoundationSupported();
}
VideoCaptureDeviceMac::~VideoCaptureDeviceMac() {
- DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
+ DCHECK(task_runner_->BelongsToCurrentThread());
[capture_device_ release];
}
void VideoCaptureDeviceMac::AllocateAndStart(
const VideoCaptureParams& params,
scoped_ptr<VideoCaptureDevice::Client> client) {
- DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
+ DCHECK(task_runner_->BelongsToCurrentThread());
if (state_ != kIdle) {
return;
}
@@ -140,12 +347,17 @@ void VideoCaptureDeviceMac::AllocateAndStart(
int height = params.requested_format.frame_size.height();
int frame_rate = params.requested_format.frame_rate;
- // The OS API can scale captured frame to any size requested, which would lead
- // to undesired aspect ratio change. Try to open the camera with a natively
+ // QTKit API can scale captured frame to any size requested, which would lead
+ // to undesired aspect ratio changes. Try to open the camera with a known
// supported format and let the client crop/pad the captured frames.
- GetBestMatchSupportedResolution(&width, &height);
+ if (!AVFoundationGlue::IsAVFoundationSupported())
+ GetBestMatchSupportedResolution(&width, &height);
client_ = client.Pass();
+ if (device_name_.capture_api_type() == Name::AVFOUNDATION)
+ LogMessage("Using AVFoundation for device: " + device_name_.name());
+ else
+ LogMessage("Using QTKit for device: " + device_name_.name());
NSString* deviceId =
[NSString stringWithUTF8String:device_name_.id().c_str()];
@@ -164,20 +376,33 @@ void VideoCaptureDeviceMac::AllocateAndStart(
capture_format_.frame_rate = frame_rate;
capture_format_.pixel_format = PIXEL_FORMAT_UYVY;
- if (width <= kVGA.width || height <= kVGA.height) {
- // If the resolution is VGA or QVGA, set the capture resolution to the
- // target size. Essentially all supported cameras offer at least VGA.
+ // QTKit: Set the capture resolution only if this is VGA or smaller, otherwise
+ // leave it unconfigured and start capturing: QTKit will produce frames at the
+ // native resolution, allowing us to identify cameras whose native resolution
+ // is too low for HD. This additional information comes at a cost in startup
+ // latency, because the webcam will need to be reopened if its default
+ // resolution is not HD or VGA.
+ // AVfoundation is configured for all resolutions.
+ if (AVFoundationGlue::IsAVFoundationSupported() || width <= kVGA.width ||
+ height <= kVGA.height) {
if (!UpdateCaptureResolution())
return;
}
- // For higher resolutions, we first open at the default resolution to find
- // out if the request is larger than the camera's native resolution.
- // If the resolution is HD, start capturing without setting a resolution.
- // QTKit will produce frames at the native resolution, allowing us to
- // identify cameras whose native resolution is too low for HD. This
- // additional information comes at a cost in startup latency, because the
- // webcam will need to be reopened if its default resolution is not HD or VGA.
+ // Try setting the power line frequency removal (anti-flicker). The built-in
+ // cameras are normally suspended so the configuration must happen right
+ // before starting capture and during configuration.
+ const std::string& device_model = device_name_.GetModel();
+ if (device_model.length() > 2 * kVidPidSize) {
+ std::string vendor_id = device_model.substr(0, kVidPidSize);
+ std::string model_id = device_model.substr(kVidPidSize + 1);
+ int vendor_id_as_int, model_id_as_int;
+ if (base::HexStringToInt(base::StringPiece(vendor_id), &vendor_id_as_int) &&
+ base::HexStringToInt(base::StringPiece(model_id), &model_id_as_int)) {
+ SetAntiFlickerInUsbDevice(vendor_id_as_int, model_id_as_int,
+ GetPowerLineFrequencyForLocation());
+ }
+ }
if (![capture_device_ startCapture]) {
SetErrorState("Could not start capture device.");
@@ -188,7 +413,7 @@ void VideoCaptureDeviceMac::AllocateAndStart(
}
void VideoCaptureDeviceMac::StopAndDeAllocate() {
- DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(state_ == kCapturing || state_ == kError) << state_;
[capture_device_ stopCapture];
@@ -199,24 +424,12 @@ void VideoCaptureDeviceMac::StopAndDeAllocate() {
tried_to_square_pixels_ = false;
}
-bool VideoCaptureDeviceMac::Init() {
- DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
+bool VideoCaptureDeviceMac::Init(
+ VideoCaptureDevice::Name::CaptureApiType capture_api_type) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kNotInitialized);
- // TODO(mcasas): The following check might not be necessary; if the device has
- // disappeared after enumeration and before coming here, opening would just
- // fail but not necessarily produce a crash.
- Names device_names;
- GetDeviceNames(&device_names);
- Names::iterator it = device_names.begin();
- for (; it != device_names.end(); ++it) {
- if (it->id() == device_name_.id())
- break;
- }
- if (it == device_names.end())
- return false;
-
- if (AVFoundationGlue::IsAVFoundationSupported()) {
+ if (capture_api_type == Name::AVFOUNDATION) {
capture_device_ =
[[VideoCaptureDeviceAVFoundation alloc] initWithFrameReceiver:this];
} else {
@@ -237,11 +450,10 @@ void VideoCaptureDeviceMac::ReceiveFrame(
const VideoCaptureFormat& frame_format,
int aspect_numerator,
int aspect_denominator) {
- // This method is safe to call from a device capture thread,
- // i.e. any thread controlled by QTKit.
-
- if (!sent_frame_info_) {
- // Final resolution has not yet been selected.
+ // This method is safe to call from a device capture thread, i.e. any thread
+ // controlled by QTKit/AVFoundation.
+ if (!final_resolution_selected_) {
+ DCHECK(!AVFoundationGlue::IsAVFoundationSupported());
if (capture_format_.frame_size.width() > kVGA.width ||
capture_format_.frame_size.height() > kVGA.height) {
// We are requesting HD. Make sure that the picture is good, otherwise
@@ -266,62 +478,72 @@ void VideoCaptureDeviceMac::ReceiveFrame(
change_to_vga = true;
}
- if (change_to_vga) {
+ if (change_to_vga)
capture_format_.frame_size.SetSize(kVGA.width, kVGA.height);
- }
}
if (capture_format_.frame_size == frame_format.frame_size &&
!tried_to_square_pixels_ &&
(aspect_numerator > kMaxPixelAspectRatio * aspect_denominator ||
aspect_denominator > kMaxPixelAspectRatio * aspect_numerator)) {
- // The requested size results in non-square PAR.
- // Shrink the frame to 1:1 PAR (assuming QTKit selects the same input
- // mode, which is not guaranteed).
+ // The requested size results in non-square PAR. Shrink the frame to 1:1
+ // PAR (assuming QTKit selects the same input mode, which is not
+ // guaranteed).
int new_width = capture_format_.frame_size.width();
int new_height = capture_format_.frame_size.height();
- if (aspect_numerator < aspect_denominator) {
+ if (aspect_numerator < aspect_denominator)
new_width = (new_width * aspect_numerator) / aspect_denominator;
- } else {
+ else
new_height = (new_height * aspect_denominator) / aspect_numerator;
- }
capture_format_.frame_size.SetSize(new_width, new_height);
tried_to_square_pixels_ = true;
}
if (capture_format_.frame_size == frame_format.frame_size) {
- sent_frame_info_ = true;
+ final_resolution_selected_ = true;
} else {
UpdateCaptureResolution();
- // OnFrameInfo has not yet been called. OnIncomingCapturedFrame must
- // not be called until after OnFrameInfo, so we return early.
+ // Let the resolution update sink through QTKit and wait for next frame.
return;
}
}
- DCHECK_EQ(capture_format_.frame_size.width(),
- frame_format.frame_size.width());
- DCHECK_EQ(capture_format_.frame_size.height(),
- frame_format.frame_size.height());
+ // QTKit capture source can change resolution if someone else reconfigures the
+ // camera, and that is fine: http://crbug.com/353620. In AVFoundation, this
+ // should not happen, it should resize internally.
+ if (!AVFoundationGlue::IsAVFoundationSupported()) {
+ capture_format_.frame_size = frame_format.frame_size;
+ } else if (capture_format_.frame_size != frame_format.frame_size) {
+ ReceiveError("Captured resolution " + frame_format.frame_size.ToString() +
+ ", and expected " + capture_format_.frame_size.ToString());
+ return;
+ }
- client_->OnIncomingCapturedFrame(video_frame,
- video_frame_length,
- base::Time::Now(),
- 0,
- capture_format_);
+ client_->OnIncomingCapturedData(video_frame,
+ video_frame_length,
+ capture_format_,
+ 0,
+ base::TimeTicks::Now());
}
void VideoCaptureDeviceMac::ReceiveError(const std::string& reason) {
- loop_proxy_->PostTask(FROM_HERE,
- base::Bind(&VideoCaptureDeviceMac::SetErrorState, weak_this_,
- reason));
+ task_runner_->PostTask(FROM_HERE,
+ base::Bind(&VideoCaptureDeviceMac::SetErrorState,
+ weak_factory_.GetWeakPtr(),
+ reason));
}
void VideoCaptureDeviceMac::SetErrorState(const std::string& reason) {
- DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DLOG(ERROR) << reason;
state_ = kError;
- client_->OnError();
+ client_->OnError(reason);
+}
+
+void VideoCaptureDeviceMac::LogMessage(const std::string& message) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ if (client_)
+ client_->OnLog(message);
}
bool VideoCaptureDeviceMac::UpdateCaptureResolution() {
diff --git a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h b/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h
index 1eba8a12ea2..1ed511b54ce 100644
--- a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h
+++ b/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h
@@ -39,7 +39,13 @@ class VideoCaptureDeviceMac;
std::vector<UInt8> adjustedFrame_;
}
-// Returns a dictionary of capture devices with friendly name and unique id.
+// Fills up the |deviceNames| dictionary of capture devices with friendly name
+// and unique id. No thread assumptions, but this method should run in UI
+// thread, see http://crbug.com/139164
++ (void)getDeviceNames:(NSMutableDictionary*)deviceNames;
+
+// Returns a dictionary of capture devices with friendly name and unique id, via
+// runing +getDeviceNames: on Main Thread.
+ (NSDictionary*)deviceNames;
// Initializes the instance and registers the frame receiver.
diff --git a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm b/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm
index cd9c6d333e9..c884c723df5 100644
--- a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm
+++ b/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm
@@ -78,25 +78,31 @@
return NO;
}
+ // TODO(mcasas): Consider using [QTCaptureDevice deviceWithUniqueID] instead
+ // of explicitly forcing reenumeration of devices.
NSArray *captureDevices =
[QTCaptureDevice inputDevicesWithMediaType:QTMediaTypeVideo];
NSArray *captureDevicesNames =
[captureDevices valueForKey:@"uniqueID"];
NSUInteger index = [captureDevicesNames indexOfObject:deviceId];
if (index == NSNotFound) {
- DLOG(ERROR) << "Video capture device not found.";
+ [self sendErrorString:[NSString
+ stringWithUTF8String:"Video capture device not found."]];
return NO;
}
QTCaptureDevice *device = [captureDevices objectAtIndex:index];
if ([[device attributeForKey:QTCaptureDeviceSuspendedAttribute]
boolValue]) {
- DLOG(ERROR) << "Cannot open suspended video capture device.";
+ [self sendErrorString:[NSString
+ stringWithUTF8String:"Cannot open suspended video capture device."]];
return NO;
}
NSError *error;
if (![device open:&error]) {
- DLOG(ERROR) << "Could not open video capture device."
- << [[error localizedDescription] UTF8String];
+ [self sendErrorString:[NSString
+ stringWithFormat:@"Could not open video capture device (%@): %@",
+ [error localizedDescription],
+ [error localizedFailureReason]]];
return NO;
}
captureDeviceInput_ = [[QTCaptureDeviceInput alloc] initWithDevice:device];
@@ -106,8 +112,10 @@
[[[QTCaptureDecompressedVideoOutput alloc] init] autorelease];
[captureDecompressedOutput setDelegate:self];
if (![captureSession_ addOutput:captureDecompressedOutput error:&error]) {
- DLOG(ERROR) << "Could not connect video capture output."
- << [[error localizedDescription] UTF8String];
+ [self sendErrorString:[NSString
+ stringWithFormat:@"Could not connect video capture output (%@): %@",
+ [error localizedDescription],
+ [error localizedFailureReason]]];
return NO;
}
@@ -126,7 +134,8 @@
} else {
// Remove the previously set capture device.
if (!captureDeviceInput_) {
- DLOG(ERROR) << "No video capture device set.";
+ [self sendErrorString:[NSString
+ stringWithUTF8String:"No video capture device set, on removal."]];
return YES;
}
if ([[captureSession_ inputs] count] > 0) {
@@ -139,15 +148,14 @@
id output = [[captureSession_ outputs] objectAtIndex:0];
[output setDelegate:nil];
- // TODO(shess): QTKit achieves thread safety by posting messages
- // to the main thread. As part of -addOutput:, it posts a
- // message to the main thread which in turn posts a notification
- // which will run in a future spin after the original method
- // returns. -removeOutput: can post a main-thread message in
- // between while holding a lock which the notification handler
- // will need. Posting either -addOutput: or -removeOutput: to
- // the main thread should fix it, remove is likely safer.
- // http://crbug.com/152757
+ // TODO(shess): QTKit achieves thread safety by posting messages to the
+ // main thread. As part of -addOutput:, it posts a message to the main
+ // thread which in turn posts a notification which will run in a future
+ // spin after the original method returns. -removeOutput: can post a
+ // main-thread message in between while holding a lock which the
+ // notification handler will need. Posting either -addOutput: or
+ // -removeOutput: to the main thread should fix it, remove is likely
+ // safer. http://crbug.com/152757
[captureSession_ performSelectorOnMainThread:@selector(removeOutput:)
withObject:output
waitUntilDone:YES];
@@ -162,15 +170,17 @@
- (BOOL)setCaptureHeight:(int)height width:(int)width frameRate:(int)frameRate {
if (!captureDeviceInput_) {
- DLOG(ERROR) << "No video capture device set.";
+ [self sendErrorString:[NSString
+ stringWithUTF8String:"No video capture device set."]];
return NO;
}
if ([[captureSession_ outputs] count] != 1) {
- DLOG(ERROR) << "Video capture capabilities already set.";
+ [self sendErrorString:[NSString
+ stringWithUTF8String:"Video capture capabilities already set."]];
return NO;
}
if (frameRate <= 0) {
- DLOG(ERROR) << "Wrong frame rate.";
+ [self sendErrorString:[NSString stringWithUTF8String: "Wrong frame rate."]];
return NO;
}
@@ -196,14 +206,18 @@
- (BOOL)startCapture {
if ([[captureSession_ outputs] count] == 0) {
// Capture properties not set.
- DLOG(ERROR) << "Video capture device not initialized.";
+ [self sendErrorString:[NSString
+ stringWithUTF8String:"Video capture device not initialized."]];
return NO;
}
if ([[captureSession_ inputs] count] == 0) {
NSError *error;
if (![captureSession_ addInput:captureDeviceInput_ error:&error]) {
- DLOG(ERROR) << "Could not connect video capture device."
- << [[error localizedDescription] UTF8String];
+ [self sendErrorString:[NSString
+ stringWithFormat:@"Could not connect video capture device (%@): %@",
+ [error localizedDescription],
+ [error localizedFailureReason]]];
+
return NO;
}
NSNotificationCenter * notificationCenter =
@@ -248,7 +262,7 @@
size_t frameSize = bytesPerRow * frameHeight;
// TODO(shess): bytesPerRow may not correspond to frameWidth_*2,
- // but VideoCaptureController::OnIncomingCapturedFrame() requires
+ // but VideoCaptureController::OnIncomingCapturedData() requires
// it to do so. Plumbing things through is intrusive, for now
// just deliver an adjusted buffer.
// TODO(nick): This workaround could probably be eliminated by using
@@ -309,7 +323,18 @@
- (void)handleNotification:(NSNotification*)errorNotification {
NSError * error = (NSError*)[[errorNotification userInfo]
objectForKey:QTCaptureSessionErrorKey];
- frameReceiver_->ReceiveError([[error localizedDescription] UTF8String]);
+ [self sendErrorString:[NSString
+ stringWithFormat:@"%@: %@",
+ [error localizedDescription],
+ [error localizedFailureReason]]];
+}
+
+- (void)sendErrorString:(NSString*)error {
+ DLOG(ERROR) << [error UTF8String];
+ [lock_ lock];
+ if (frameReceiver_)
+ frameReceiver_->ReceiveError([error UTF8String]);
+ [lock_ unlock];
}
@end
diff --git a/chromium/media/video/capture/video_capture.h b/chromium/media/video/capture/video_capture.h
deleted file mode 100644
index 9a0e94378bb..00000000000
--- a/chromium/media/video/capture/video_capture.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// This file contains abstract classes used for media filter to handle video
-// capture devices.
-
-#ifndef MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_H_
-#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_H_
-
-#include "base/memory/ref_counted.h"
-#include "base/time/time.h"
-#include "media/base/media_export.h"
-#include "media/video/capture/video_capture_types.h"
-
-namespace media {
-
-class VideoFrame;
-
-class MEDIA_EXPORT VideoCapture {
- public:
- // TODO(wjia): add error codes.
- // TODO(wjia): support weak ptr.
- // Callbacks provided by client for notification of events.
- class MEDIA_EXPORT EventHandler {
- public:
- // Notify client that video capture has been started.
- virtual void OnStarted(VideoCapture* capture) = 0;
-
- // Notify client that video capture has been stopped.
- virtual void OnStopped(VideoCapture* capture) = 0;
-
- // Notify client that video capture has been paused.
- virtual void OnPaused(VideoCapture* capture) = 0;
-
- // Notify client that video capture has hit some error |error_code|.
- virtual void OnError(VideoCapture* capture, int error_code) = 0;
-
- // Notify client that the client has been removed and no more calls will be
- // received.
- virtual void OnRemoved(VideoCapture* capture) = 0;
-
- // Notify client that a buffer is available.
- virtual void OnFrameReady(
- VideoCapture* capture,
- const scoped_refptr<media::VideoFrame>& frame) = 0;
-
- protected:
- virtual ~EventHandler() {}
- };
-
- VideoCapture() {}
-
- // Request video capture to start capturing with |params|.
- // Also register |handler| with video capture for event handling.
- // |handler| must remain valid until it has received |OnRemoved()|.
- virtual void StartCapture(EventHandler* handler,
- const VideoCaptureParams& params) = 0;
-
- // Request video capture to stop capturing for client |handler|.
- // |handler| must remain valid until it has received |OnRemoved()|.
- virtual void StopCapture(EventHandler* handler) = 0;
-
- virtual bool CaptureStarted() = 0;
- virtual int CaptureFrameRate() = 0;
-
- protected:
- virtual ~VideoCapture() {}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(VideoCapture);
-};
-
-} // namespace media
-
-#endif // MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_H_
diff --git a/chromium/media/video/capture/video_capture_device.cc b/chromium/media/video/capture/video_capture_device.cc
index c370d092c93..2efff7de02c 100644
--- a/chromium/media/video/capture/video_capture_device.cc
+++ b/chromium/media/video/capture/video_capture_device.cc
@@ -3,6 +3,8 @@
// found in the LICENSE file.
#include "media/video/capture/video_capture_device.h"
+
+#include "base/i18n/timezone.h"
#include "base/strings/string_util.h"
namespace media {
@@ -19,4 +21,24 @@ const std::string VideoCaptureDevice::Name::GetNameAndModel() const {
VideoCaptureDevice::~VideoCaptureDevice() {}
+int VideoCaptureDevice::GetPowerLineFrequencyForLocation() const {
+ std::string current_country = base::CountryCodeForCurrentTimezone();
+ if (current_country.empty())
+ return 0;
+ // Sorted out list of countries with 60Hz power line frequency, from
+ // http://en.wikipedia.org/wiki/Mains_electricity_by_country
+ const char* countries_using_60Hz[] = {
+ "AI", "AO", "AS", "AW", "AZ", "BM", "BR", "BS", "BZ", "CA", "CO",
+ "CR", "CU", "DO", "EC", "FM", "GT", "GU", "GY", "HN", "HT", "JP",
+ "KN", "KR", "KY", "MS", "MX", "NI", "PA", "PE", "PF", "PH", "PR",
+ "PW", "SA", "SR", "SV", "TT", "TW", "UM", "US", "VG", "VI", "VE"};
+ const char** countries_using_60Hz_end =
+ countries_using_60Hz + arraysize(countries_using_60Hz);
+ if (std::find(countries_using_60Hz, countries_using_60Hz_end,
+ current_country) == countries_using_60Hz_end) {
+ return kPowerLine50Hz;
+ }
+ return kPowerLine60Hz;
+}
+
} // namespace media
diff --git a/chromium/media/video/capture/video_capture_device.h b/chromium/media/video/capture/video_capture_device.h
index 295401c3686..afb4c3567ac 100644
--- a/chromium/media/video/capture/video_capture_device.h
+++ b/chromium/media/video/capture/video_capture_device.h
@@ -18,6 +18,7 @@
#include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
+#include "base/single_thread_task_runner.h"
#include "base/time/time.h"
#include "media/base/media_export.h"
#include "media/base/video_frame.h"
@@ -48,12 +49,21 @@ class MEDIA_EXPORT VideoCaptureDevice {
DIRECT_SHOW,
API_TYPE_UNKNOWN
};
-
+#endif
+#if defined(OS_MACOSX)
+ // Mac targets Capture Api type: it can only be set on construction.
+ enum CaptureApiType {
+ AVFOUNDATION,
+ QTKIT,
+ API_TYPE_UNKNOWN
+ };
+#endif
+#if defined(OS_WIN) || defined(OS_MACOSX)
Name(const std::string& name,
const std::string& id,
const CaptureApiType api_type)
: device_name_(name), unique_id_(id), capture_api_class_(api_type) {}
-#endif // if defined(OS_WIN)
+#endif
~Name() {}
// Friendly name of a device
@@ -63,7 +73,7 @@ class MEDIA_EXPORT VideoCaptureDevice {
// friendly name connected to the computer this will be unique.
const std::string& id() const { return unique_id_; }
- // The unique hardware model identifier of the capture device. Returns
+ // The unique hardware model identifier of the capture device. Returns
// "[vid]:[pid]" when a USB device is detected, otherwise "".
// The implementation of this method is platform-dependent.
const std::string GetModel() const;
@@ -81,7 +91,7 @@ class MEDIA_EXPORT VideoCaptureDevice {
return unique_id_ < other.id();
}
-#if defined(OS_WIN)
+#if defined(OS_WIN) || defined(OS_MACOSX)
CaptureApiType capture_api_type() const {
return capture_api_class_.capture_api_type();
}
@@ -90,16 +100,16 @@ class MEDIA_EXPORT VideoCaptureDevice {
private:
std::string device_name_;
std::string unique_id_;
-#if defined(OS_WIN)
- // This class wraps the CaptureApiType, so it has a by default value if not
- // inititalized, and I (mcasas) do a DCHECK on reading its value.
+#if defined(OS_WIN) || defined(OS_MACOSX)
+ // This class wraps the CaptureApiType to give it a by default value if not
+ // initialized.
class CaptureApiClass {
public:
- CaptureApiClass(): capture_api_type_(API_TYPE_UNKNOWN) {}
+ CaptureApiClass(): capture_api_type_(API_TYPE_UNKNOWN) {}
CaptureApiClass(const CaptureApiType api_type)
- : capture_api_type_(api_type) {}
+ : capture_api_type_(api_type) {}
CaptureApiType capture_api_type() const {
- DCHECK_NE(capture_api_type_, API_TYPE_UNKNOWN);
+ DCHECK_NE(capture_api_type_, API_TYPE_UNKNOWN);
return capture_api_type_;
}
private:
@@ -107,7 +117,7 @@ class MEDIA_EXPORT VideoCaptureDevice {
};
CaptureApiClass capture_api_class_;
-#endif // if defined(OS_WIN)
+#endif
// Allow generated copy constructor and assignment.
};
@@ -139,11 +149,11 @@ class MEDIA_EXPORT VideoCaptureDevice {
// Reserve an output buffer into which contents can be captured directly.
// The returned Buffer will always be allocated with a memory size suitable
- // for holding a packed video frame of |format| format, of |dimensions|
- // dimensions. It is permissible for |dimensions| to be zero; in which
- // case the returned Buffer does not guarantee memory backing, but functions
- // as a reservation for external input for the purposes of buffer
- // throttling.
+ // for holding a packed video frame with pixels of |format| format, of
+ // |dimensions| frame dimensions. It is permissible for |dimensions| to be
+ // zero; in which case the returned Buffer does not guarantee memory
+ // backing, but functions as a reservation for external input for the
+ // purposes of buffer throttling.
//
// The output buffer stays reserved for use until the Buffer object is
// destroyed.
@@ -151,42 +161,42 @@ class MEDIA_EXPORT VideoCaptureDevice {
media::VideoFrame::Format format,
const gfx::Size& dimensions) = 0;
- // Captured a new video frame as a raw buffer. The size, color format, and
- // layout are taken from the parameters specified by an earlier call to
- // OnFrameInfo(). |data| must be packed, with no padding between rows and/or
- // color planes.
+ // Captured a new video frame, data for which is pointed to by |data|.
//
- // This method will try to reserve an output buffer and copy from |data|
- // into the output buffer. If no output buffer is available, the frame will
- // be silently dropped.
- virtual void OnIncomingCapturedFrame(
- const uint8* data,
- int length,
- base::Time timestamp,
- int rotation, // Clockwise.
- const VideoCaptureFormat& frame_format) = 0;
-
- // Captured a new video frame, held in |buffer|.
+ // The format of the frame is described by |frame_format|, and is assumed to
+ // be tightly packed. This method will try to reserve an output buffer and
+ // copy from |data| into the output buffer. If no output buffer is
+ // available, the frame will be silently dropped.
+ virtual void OnIncomingCapturedData(const uint8* data,
+ int length,
+ const VideoCaptureFormat& frame_format,
+ int rotation, // Clockwise.
+ base::TimeTicks timestamp) = 0;
+
+ // Captured a new video frame, held in |frame|.
//
// As the frame is backed by a reservation returned by
// ReserveOutputBuffer(), delivery is guaranteed and will require no
- // additional copies in the browser process. |dimensions| indicates the
- // frame width and height of the buffer contents; this is assumed to be of
- // |format| format and tightly packed.
- virtual void OnIncomingCapturedBuffer(const scoped_refptr<Buffer>& buffer,
- media::VideoFrame::Format format,
- const gfx::Size& dimensions,
- base::Time timestamp,
- int frame_rate) = 0;
+ // additional copies in the browser process.
+ virtual void OnIncomingCapturedVideoFrame(
+ const scoped_refptr<Buffer>& buffer,
+ const VideoCaptureFormat& buffer_format,
+ const scoped_refptr<media::VideoFrame>& frame,
+ base::TimeTicks timestamp) = 0;
// An error has occurred that cannot be handled and VideoCaptureDevice must
- // be StopAndDeAllocate()-ed.
- virtual void OnError() = 0;
+ // be StopAndDeAllocate()-ed. |reason| is a text description of the error.
+ virtual void OnError(const std::string& reason) = 0;
+
+ // VideoCaptureDevice requests the |message| to be logged.
+ virtual void OnLog(const std::string& message) {}
};
// Creates a VideoCaptureDevice object.
// Return NULL if the hardware is not available.
- static VideoCaptureDevice* Create(const Name& device_name);
+ static VideoCaptureDevice* Create(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner,
+ const Name& device_name);
virtual ~VideoCaptureDevice();
// Gets the names of all video capture devices connected to this computer.
@@ -217,6 +227,14 @@ class MEDIA_EXPORT VideoCaptureDevice {
// would be sequenced through the same task runner, so that deallocation
// happens first.
virtual void StopAndDeAllocate() = 0;
+
+ // Gets the power line frequency from the current system time zone if this is
+ // defined, otherwise returns 0.
+ int GetPowerLineFrequencyForLocation() const;
+
+ protected:
+ static const int kPowerLine50Hz = 50;
+ static const int kPowerLine60Hz = 60;
};
} // namespace media
diff --git a/chromium/media/video/capture/video_capture_device_factory.cc b/chromium/media/video/capture/video_capture_device_factory.cc
new file mode 100644
index 00000000000..00a8f7ec3d4
--- /dev/null
+++ b/chromium/media/video/capture/video_capture_device_factory.cc
@@ -0,0 +1,76 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/capture/video_capture_device_factory.h"
+
+#include "base/command_line.h"
+#include "media/base/media_switches.h"
+#include "media/video/capture/fake_video_capture_device_factory.h"
+#include "media/video/capture/file_video_capture_device_factory.h"
+
+#if defined(OS_MACOSX)
+#include "media/video/capture/mac/video_capture_device_factory_mac.h"
+#elif defined(OS_LINUX)
+#include "media/video/capture/linux/video_capture_device_factory_linux.h"
+#elif defined(OS_ANDROID)
+#include "media/video/capture/android/video_capture_device_factory_android.h"
+#elif defined(OS_WIN)
+#include "media/video/capture/win/video_capture_device_factory_win.h"
+#endif
+
+namespace media {
+
+// static
+scoped_ptr<VideoCaptureDeviceFactory> VideoCaptureDeviceFactory::CreateFactory(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner) {
+ const CommandLine* command_line = CommandLine::ForCurrentProcess();
+ // Use a Fake or File Video Device Factory if the command line flags are
+ // present, otherwise use the normal, platform-dependent, device factory.
+ if (command_line->HasSwitch(switches::kUseFakeDeviceForMediaStream)) {
+ if (command_line->HasSwitch(switches::kUseFileForFakeVideoCapture)) {
+ return scoped_ptr<VideoCaptureDeviceFactory>(new
+ media::FileVideoCaptureDeviceFactory());
+ } else {
+ return scoped_ptr<VideoCaptureDeviceFactory>(new
+ media::FakeVideoCaptureDeviceFactory());
+ }
+ } else {
+ // |ui_task_runner| is needed for the Linux ChromeOS factory to retrieve
+ // screen rotations and for the Mac factory to run QTKit device enumeration.
+#if defined(OS_MACOSX)
+ return scoped_ptr<VideoCaptureDeviceFactory>(new
+ VideoCaptureDeviceFactoryMac(ui_task_runner));
+#elif defined(OS_LINUX)
+ return scoped_ptr<VideoCaptureDeviceFactory>(new
+ VideoCaptureDeviceFactoryLinux(ui_task_runner));
+#elif defined(OS_ANDROID)
+ return scoped_ptr<VideoCaptureDeviceFactory>(new
+ VideoCaptureDeviceFactoryAndroid());
+#elif defined(OS_WIN)
+ return scoped_ptr<VideoCaptureDeviceFactory>(new
+ VideoCaptureDeviceFactoryWin());
+#else
+ return scoped_ptr<VideoCaptureDeviceFactory>(new
+ VideoCaptureDeviceFactory());
+#endif
+ }
+}
+
+VideoCaptureDeviceFactory::VideoCaptureDeviceFactory() {
+ thread_checker_.DetachFromThread();
+}
+
+VideoCaptureDeviceFactory::~VideoCaptureDeviceFactory() {}
+
+void VideoCaptureDeviceFactory::EnumerateDeviceNames(const base::Callback<
+ void(scoped_ptr<media::VideoCaptureDevice::Names>)>& callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!callback.is_null());
+ scoped_ptr<VideoCaptureDevice::Names> device_names(
+ new VideoCaptureDevice::Names());
+ GetDeviceNames(device_names.get());
+ callback.Run(device_names.Pass());
+}
+
+} // namespace media
diff --git a/chromium/media/video/capture/video_capture_device_factory.h b/chromium/media/video/capture/video_capture_device_factory.h
new file mode 100644
index 00000000000..76c4bdc2b0b
--- /dev/null
+++ b/chromium/media/video/capture/video_capture_device_factory.h
@@ -0,0 +1,54 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
+#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
+
+#include "base/threading/thread_checker.h"
+#include "media/video/capture/video_capture_device.h"
+
+namespace media {
+
+// VideoCaptureDeviceFactory is the base class for creation of video capture
+// devices in the different platforms. VCDFs are created by MediaStreamManager
+// on IO thread and plugged into VideoCaptureManager, who owns and operates them
+// in Device Thread (a.k.a. Audio Thread).
+class MEDIA_EXPORT VideoCaptureDeviceFactory {
+ public:
+ static scoped_ptr<VideoCaptureDeviceFactory> CreateFactory(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner);
+
+ VideoCaptureDeviceFactory();
+ virtual ~VideoCaptureDeviceFactory();
+
+ // Creates a VideoCaptureDevice object. Returns NULL if something goes wrong.
+ virtual scoped_ptr<VideoCaptureDevice> Create(
+ const VideoCaptureDevice::Name& device_name) = 0;
+
+ // Asynchronous version of GetDeviceNames calling back to |callback|.
+ virtual void EnumerateDeviceNames(const base::Callback<
+ void(scoped_ptr<media::VideoCaptureDevice::Names>)>& callback);
+
+ // Gets the supported formats of a particular device attached to the system.
+ // This method should be called before allocating or starting a device. In
+ // case format enumeration is not supported, or there was a problem, the
+ // formats array will be empty.
+ virtual void GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) = 0;
+
+ protected:
+ // Gets the names of all video capture devices connected to this computer.
+ // Used by the default implementation of EnumerateDeviceNames().
+ virtual void GetDeviceNames(VideoCaptureDevice::Names* device_names) = 0;
+
+ base::ThreadChecker thread_checker_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(VideoCaptureDeviceFactory);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
diff --git a/chromium/media/video/capture/video_capture_device_unittest.cc b/chromium/media/video/capture/video_capture_device_unittest.cc
index 5e05ad4b4b2..afc6dff6aa5 100644
--- a/chromium/media/video/capture/video_capture_device_unittest.cc
+++ b/chromium/media/video/capture/video_capture_device_unittest.cc
@@ -4,22 +4,25 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
+#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
-#include "base/message_loop/message_loop.h"
#include "base/message_loop/message_loop_proxy.h"
#include "base/run_loop.h"
-#include "base/synchronization/waitable_event.h"
#include "base/test/test_timeouts.h"
#include "base/threading/thread.h"
-#include "media/video/capture/fake_video_capture_device.h"
#include "media/video/capture/video_capture_device.h"
+#include "media/video/capture/video_capture_device_factory.h"
#include "media/video/capture/video_capture_types.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#if defined(OS_WIN)
#include "base/win/scoped_com_initializer.h"
-#include "media/video/capture/win/video_capture_device_mf_win.h"
+#include "media/video/capture/win/video_capture_device_factory_win.h"
+#endif
+
+#if defined(OS_MACOSX)
+#include "media/video/capture/mac/video_capture_device_factory_mac.h"
#endif
#if defined(OS_ANDROID)
@@ -30,7 +33,7 @@
#if defined(OS_MACOSX)
// Mac/QTKit will always give you the size you ask for and this case will fail.
#define MAYBE_AllocateBadSize DISABLED_AllocateBadSize
-// We will always get ARGB from the Mac/QTKit implementation.
+// We will always get YUYV from the Mac QTKit/AVFoundation implementations.
#define MAYBE_CaptureMjpeg DISABLED_CaptureMjpeg
#elif defined(OS_WIN)
#define MAYBE_AllocateBadSize AllocateBadSize
@@ -55,9 +58,7 @@
#endif
using ::testing::_;
-using ::testing::AnyNumber;
-using ::testing::Return;
-using ::testing::AtLeast;
+using ::testing::SaveArg;
namespace media {
@@ -71,32 +72,46 @@ class MockClient : public media::VideoCaptureDevice::Client {
explicit MockClient(base::Callback<void(const VideoCaptureFormat&)> frame_cb)
: main_thread_(base::MessageLoopProxy::current()), frame_cb_(frame_cb) {}
- virtual void OnError() OVERRIDE {
+ virtual void OnError(const std::string& error_message) OVERRIDE {
OnErr();
}
- virtual void OnIncomingCapturedFrame(const uint8* data,
- int length,
- base::Time timestamp,
- int rotation,
- const VideoCaptureFormat& format)
- OVERRIDE {
+ virtual void OnIncomingCapturedData(const uint8* data,
+ int length,
+ const VideoCaptureFormat& format,
+ int rotation,
+ base::TimeTicks timestamp) OVERRIDE {
main_thread_->PostTask(FROM_HERE, base::Bind(frame_cb_, format));
}
- virtual void OnIncomingCapturedBuffer(const scoped_refptr<Buffer>& buffer,
- media::VideoFrame::Format format,
- const gfx::Size& dimensions,
- base::Time timestamp,
- int frame_rate) OVERRIDE {
+ virtual void OnIncomingCapturedVideoFrame(
+ const scoped_refptr<Buffer>& buffer,
+ const media::VideoCaptureFormat& buffer_format,
+ const scoped_refptr<media::VideoFrame>& frame,
+ base::TimeTicks timestamp) OVERRIDE {
NOTREACHED();
}
private:
- scoped_refptr<base::MessageLoopProxy> main_thread_;
+ scoped_refptr<base::SingleThreadTaskRunner> main_thread_;
base::Callback<void(const VideoCaptureFormat&)> frame_cb_;
};
+class DeviceEnumerationListener :
+ public base::RefCounted<DeviceEnumerationListener>{
+ public:
+ MOCK_METHOD1(OnEnumeratedDevicesCallbackPtr,
+ void(media::VideoCaptureDevice::Names* names));
+ // GMock doesn't support move-only arguments, so we use this forward method.
+ void OnEnumeratedDevicesCallback(
+ scoped_ptr<media::VideoCaptureDevice::Names> names) {
+ OnEnumeratedDevicesCallbackPtr(names.release());
+ }
+ private:
+ friend class base::RefCounted<DeviceEnumerationListener>;
+ virtual ~DeviceEnumerationListener() {}
+};
+
class VideoCaptureDeviceTest : public testing::Test {
protected:
typedef media::VideoCaptureDevice::Client Client;
@@ -105,7 +120,11 @@ class VideoCaptureDeviceTest : public testing::Test {
: loop_(new base::MessageLoop()),
client_(
new MockClient(base::Bind(&VideoCaptureDeviceTest::OnFrameCaptured,
- base::Unretained(this)))) {}
+ base::Unretained(this)))),
+ video_capture_device_factory_(VideoCaptureDeviceFactory::CreateFactory(
+ base::MessageLoopProxy::current())) {
+ device_enumeration_listener_ = new DeviceEnumerationListener();
+ }
virtual void SetUp() {
#if defined(OS_ANDROID)
@@ -129,43 +148,108 @@ class VideoCaptureDeviceTest : public testing::Test {
run_loop_->Run();
}
+ scoped_ptr<media::VideoCaptureDevice::Names> EnumerateDevices() {
+ media::VideoCaptureDevice::Names* names;
+ EXPECT_CALL(*device_enumeration_listener_,
+ OnEnumeratedDevicesCallbackPtr(_)).WillOnce(SaveArg<0>(&names));
+
+ video_capture_device_factory_->EnumerateDeviceNames(
+ base::Bind(&DeviceEnumerationListener::OnEnumeratedDevicesCallback,
+ device_enumeration_listener_));
+ base::MessageLoop::current()->RunUntilIdle();
+ return scoped_ptr<media::VideoCaptureDevice::Names>(names);
+ }
+
const VideoCaptureFormat& last_format() const { return last_format_; }
+ scoped_ptr<VideoCaptureDevice::Name> GetFirstDeviceNameSupportingPixelFormat(
+ const VideoPixelFormat& pixel_format) {
+ names_ = EnumerateDevices();
+ if (!names_->size()) {
+ DVLOG(1) << "No camera available.";
+ return scoped_ptr<VideoCaptureDevice::Name>();
+ }
+ VideoCaptureDevice::Names::iterator names_iterator;
+ for (names_iterator = names_->begin(); names_iterator != names_->end();
+ ++names_iterator) {
+ VideoCaptureFormats supported_formats;
+ video_capture_device_factory_->GetDeviceSupportedFormats(
+ *names_iterator,
+ &supported_formats);
+ VideoCaptureFormats::iterator formats_iterator;
+ for (formats_iterator = supported_formats.begin();
+ formats_iterator != supported_formats.end(); ++formats_iterator) {
+ if (formats_iterator->pixel_format == pixel_format) {
+ return scoped_ptr<VideoCaptureDevice::Name>(
+ new VideoCaptureDevice::Name(*names_iterator));
+ }
+ }
+ }
+ DVLOG(1) << "No camera can capture the format: " << pixel_format;
+ return scoped_ptr<VideoCaptureDevice::Name>();
+ }
+
#if defined(OS_WIN)
base::win::ScopedCOMInitializer initialize_com_;
#endif
- VideoCaptureDevice::Names names_;
+ scoped_ptr<VideoCaptureDevice::Names> names_;
scoped_ptr<base::MessageLoop> loop_;
scoped_ptr<base::RunLoop> run_loop_;
scoped_ptr<MockClient> client_;
+ scoped_refptr<DeviceEnumerationListener> device_enumeration_listener_;
VideoCaptureFormat last_format_;
+ scoped_ptr<VideoCaptureDeviceFactory> video_capture_device_factory_;
};
TEST_F(VideoCaptureDeviceTest, OpenInvalidDevice) {
#if defined(OS_WIN)
VideoCaptureDevice::Name::CaptureApiType api_type =
- VideoCaptureDeviceMFWin::PlatformSupported()
+ VideoCaptureDeviceFactoryWin::PlatformSupportsMediaFoundation()
? VideoCaptureDevice::Name::MEDIA_FOUNDATION
: VideoCaptureDevice::Name::DIRECT_SHOW;
VideoCaptureDevice::Name device_name("jibberish", "jibberish", api_type);
+#elif defined(OS_MACOSX)
+ VideoCaptureDevice::Name device_name("jibberish", "jibberish",
+ VideoCaptureDeviceFactoryMac::PlatformSupportsAVFoundation()
+ ? VideoCaptureDevice::Name::AVFOUNDATION
+ : VideoCaptureDevice::Name::QTKIT);
#else
VideoCaptureDevice::Name device_name("jibberish", "jibberish");
#endif
- VideoCaptureDevice* device = VideoCaptureDevice::Create(device_name);
+ scoped_ptr<VideoCaptureDevice> device =
+ video_capture_device_factory_->Create(device_name);
+#if !defined(OS_MACOSX)
EXPECT_TRUE(device == NULL);
+#else
+ if (VideoCaptureDeviceFactoryMac::PlatformSupportsAVFoundation()) {
+ EXPECT_TRUE(device == NULL);
+ } else {
+ // The presence of the actual device is only checked on AllocateAndStart()
+ // and not on creation for QTKit API in Mac OS X platform.
+ EXPECT_CALL(*client_, OnErr()).Times(1);
+
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(640, 480);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
+ capture_params.allow_resolution_change = false;
+ device->AllocateAndStart(capture_params, client_.PassAs<Client>());
+ device->StopAndDeAllocate();
+ }
+#endif
}
TEST_F(VideoCaptureDeviceTest, CaptureVGA) {
- VideoCaptureDevice::GetDeviceNames(&names_);
- if (!names_.size()) {
+ names_ = EnumerateDevices();
+ if (!names_->size()) {
DVLOG(1) << "No camera available. Exiting test.";
return;
}
scoped_ptr<VideoCaptureDevice> device(
- VideoCaptureDevice::Create(names_.front()));
- ASSERT_FALSE(device.get() == NULL);
- DVLOG(1) << names_.front().id();
+ video_capture_device_factory_->Create(names_->front()));
+ ASSERT_TRUE(device);
+ DVLOG(1) << names_->front().id();
EXPECT_CALL(*client_, OnErr())
.Times(0);
@@ -184,15 +268,15 @@ TEST_F(VideoCaptureDeviceTest, CaptureVGA) {
}
TEST_F(VideoCaptureDeviceTest, Capture720p) {
- VideoCaptureDevice::GetDeviceNames(&names_);
- if (!names_.size()) {
+ names_ = EnumerateDevices();
+ if (!names_->size()) {
DVLOG(1) << "No camera available. Exiting test.";
return;
}
scoped_ptr<VideoCaptureDevice> device(
- VideoCaptureDevice::Create(names_.front()));
- ASSERT_FALSE(device.get() == NULL);
+ video_capture_device_factory_->Create(names_->front()));
+ ASSERT_TRUE(device);
EXPECT_CALL(*client_, OnErr())
.Times(0);
@@ -209,14 +293,14 @@ TEST_F(VideoCaptureDeviceTest, Capture720p) {
}
TEST_F(VideoCaptureDeviceTest, MAYBE_AllocateBadSize) {
- VideoCaptureDevice::GetDeviceNames(&names_);
- if (!names_.size()) {
+ names_ = EnumerateDevices();
+ if (!names_->size()) {
DVLOG(1) << "No camera available. Exiting test.";
return;
}
scoped_ptr<VideoCaptureDevice> device(
- VideoCaptureDevice::Create(names_.front()));
- ASSERT_TRUE(device.get() != NULL);
+ video_capture_device_factory_->Create(names_->front()));
+ ASSERT_TRUE(device);
EXPECT_CALL(*client_, OnErr())
.Times(0);
@@ -234,8 +318,8 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_AllocateBadSize) {
}
TEST_F(VideoCaptureDeviceTest, ReAllocateCamera) {
- VideoCaptureDevice::GetDeviceNames(&names_);
- if (!names_.size()) {
+ names_ = EnumerateDevices();
+ if (!names_->size()) {
DVLOG(1) << "No camera available. Exiting test.";
return;
}
@@ -244,7 +328,7 @@ TEST_F(VideoCaptureDeviceTest, ReAllocateCamera) {
for (int i = 0; i <= 5; i++) {
ResetWithNewClient();
scoped_ptr<VideoCaptureDevice> device(
- VideoCaptureDevice::Create(names_.front()));
+ video_capture_device_factory_->Create(names_->front()));
gfx::Size resolution;
if (i % 2) {
resolution = gfx::Size(640, 480);
@@ -269,7 +353,7 @@ TEST_F(VideoCaptureDeviceTest, ReAllocateCamera) {
ResetWithNewClient();
scoped_ptr<VideoCaptureDevice> device(
- VideoCaptureDevice::Create(names_.front()));
+ video_capture_device_factory_->Create(names_->front()));
device->AllocateAndStart(capture_params, client_.PassAs<Client>());
WaitForCapturedFrame();
@@ -280,14 +364,14 @@ TEST_F(VideoCaptureDeviceTest, ReAllocateCamera) {
}
TEST_F(VideoCaptureDeviceTest, DeAllocateCameraWhileRunning) {
- VideoCaptureDevice::GetDeviceNames(&names_);
- if (!names_.size()) {
+ names_ = EnumerateDevices();
+ if (!names_->size()) {
DVLOG(1) << "No camera available. Exiting test.";
return;
}
scoped_ptr<VideoCaptureDevice> device(
- VideoCaptureDevice::Create(names_.front()));
- ASSERT_TRUE(device.get() != NULL);
+ video_capture_device_factory_->Create(names_->front()));
+ ASSERT_TRUE(device);
EXPECT_CALL(*client_, OnErr())
.Times(0);
@@ -306,43 +390,17 @@ TEST_F(VideoCaptureDeviceTest, DeAllocateCameraWhileRunning) {
device->StopAndDeAllocate();
}
-TEST_F(VideoCaptureDeviceTest, FakeCapture) {
- VideoCaptureDevice::Names names;
-
- FakeVideoCaptureDevice::GetDeviceNames(&names);
-
- ASSERT_GT(static_cast<int>(names.size()), 0);
-
- scoped_ptr<VideoCaptureDevice> device(
- FakeVideoCaptureDevice::Create(names.front()));
- ASSERT_TRUE(device.get() != NULL);
-
- EXPECT_CALL(*client_, OnErr())
- .Times(0);
-
- VideoCaptureParams capture_params;
- capture_params.requested_format.frame_size.SetSize(640, 480);
- capture_params.requested_format.frame_rate = 30;
- capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
- capture_params.allow_resolution_change = false;
- device->AllocateAndStart(capture_params, client_.PassAs<Client>());
- WaitForCapturedFrame();
- EXPECT_EQ(last_format().frame_size.width(), 640);
- EXPECT_EQ(last_format().frame_size.height(), 480);
- EXPECT_EQ(last_format().frame_rate, 30);
- device->StopAndDeAllocate();
-}
-
// Start the camera in 720p to capture MJPEG instead of a raw format.
TEST_F(VideoCaptureDeviceTest, MAYBE_CaptureMjpeg) {
- VideoCaptureDevice::GetDeviceNames(&names_);
- if (!names_.size()) {
- DVLOG(1) << "No camera available. Exiting test.";
+ scoped_ptr<VideoCaptureDevice::Name> name =
+ GetFirstDeviceNameSupportingPixelFormat(PIXEL_FORMAT_MJPEG);
+ if (!name) {
+ DVLOG(1) << "No camera supports MJPEG format. Exiting test.";
return;
}
scoped_ptr<VideoCaptureDevice> device(
- VideoCaptureDevice::Create(names_.front()));
- ASSERT_TRUE(device.get() != NULL);
+ video_capture_device_factory_->Create(*name));
+ ASSERT_TRUE(device);
EXPECT_CALL(*client_, OnErr())
.Times(0);
@@ -362,72 +420,13 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_CaptureMjpeg) {
}
TEST_F(VideoCaptureDeviceTest, GetDeviceSupportedFormats) {
- VideoCaptureDevice::GetDeviceNames(&names_);
- if (!names_.size()) {
- DVLOG(1) << "No camera available. Exiting test.";
- return;
- }
- VideoCaptureFormats supported_formats;
- VideoCaptureDevice::Names::iterator names_iterator;
- for (names_iterator = names_.begin(); names_iterator != names_.end();
- ++names_iterator) {
- VideoCaptureDevice::GetDeviceSupportedFormats(*names_iterator,
- &supported_formats);
- // Nothing to test here since we cannot forecast the hardware capabilities.
- }
-}
-
-TEST_F(VideoCaptureDeviceTest, FakeCaptureVariableResolution) {
- VideoCaptureDevice::Names names;
-
- FakeVideoCaptureDevice::GetDeviceNames(&names);
- VideoCaptureParams capture_params;
- capture_params.requested_format.frame_size.SetSize(640, 480);
- capture_params.requested_format.frame_rate = 30;
- capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
- capture_params.allow_resolution_change = true;
-
- ASSERT_GT(static_cast<int>(names.size()), 0);
-
- scoped_ptr<VideoCaptureDevice> device(
- FakeVideoCaptureDevice::Create(names.front()));
- ASSERT_TRUE(device.get() != NULL);
-
- EXPECT_CALL(*client_, OnErr())
- .Times(0);
- int action_count = 200;
-
- device->AllocateAndStart(capture_params, client_.PassAs<Client>());
-
- // We set TimeWait to 200 action timeouts and this should be enough for at
- // least action_count/kFakeCaptureCapabilityChangePeriod calls.
- for (int i = 0; i < action_count; ++i) {
- WaitForCapturedFrame();
- }
- device->StopAndDeAllocate();
-}
-
-TEST_F(VideoCaptureDeviceTest, FakeGetDeviceSupportedFormats) {
- VideoCaptureDevice::Names names;
- FakeVideoCaptureDevice::GetDeviceNames(&names);
-
- VideoCaptureFormats supported_formats;
- VideoCaptureDevice::Names::iterator names_iterator;
-
- for (names_iterator = names.begin(); names_iterator != names.end();
- ++names_iterator) {
- FakeVideoCaptureDevice::GetDeviceSupportedFormats(*names_iterator,
- &supported_formats);
- EXPECT_EQ(supported_formats.size(), 2u);
- EXPECT_EQ(supported_formats[0].frame_size.width(), 640);
- EXPECT_EQ(supported_formats[0].frame_size.height(), 480);
- EXPECT_EQ(supported_formats[0].pixel_format, media::PIXEL_FORMAT_I420);
- EXPECT_GE(supported_formats[0].frame_rate, 20);
- EXPECT_EQ(supported_formats[1].frame_size.width(), 320);
- EXPECT_EQ(supported_formats[1].frame_size.height(), 240);
- EXPECT_EQ(supported_formats[1].pixel_format, media::PIXEL_FORMAT_I420);
- EXPECT_GE(supported_formats[1].frame_rate, 20);
- }
+ // Use PIXEL_FORMAT_MAX to iterate all device names for testing
+ // GetDeviceSupportedFormats().
+ scoped_ptr<VideoCaptureDevice::Name> name =
+ GetFirstDeviceNameSupportingPixelFormat(PIXEL_FORMAT_MAX);
+ // Verify no camera returned for PIXEL_FORMAT_MAX. Nothing else to test here
+ // since we cannot forecast the hardware capabilities.
+ ASSERT_FALSE(name);
}
}; // namespace media
diff --git a/chromium/media/video/capture/video_capture_proxy.cc b/chromium/media/video/capture/video_capture_proxy.cc
deleted file mode 100644
index d488c50fe02..00000000000
--- a/chromium/media/video/capture/video_capture_proxy.cc
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/video/capture/video_capture_proxy.h"
-
-#include "base/bind.h"
-#include "base/location.h"
-#include "base/message_loop/message_loop_proxy.h"
-#include "media/base/video_frame.h"
-
-namespace {
-
-// Called on VC thread: extracts the state out of the VideoCapture, and
-// serialize it into a VideoCaptureState.
-media::VideoCaptureHandlerProxy::VideoCaptureState GetState(
- media::VideoCapture* capture) {
- media::VideoCaptureHandlerProxy::VideoCaptureState state;
- state.started = capture->CaptureStarted();
- state.frame_rate = capture->CaptureFrameRate();
- return state;
-}
-
-} // anonymous namespace
-
-namespace media {
-
-VideoCaptureHandlerProxy::VideoCaptureHandlerProxy(
- VideoCapture::EventHandler* proxied,
- scoped_refptr<base::MessageLoopProxy> main_message_loop)
- : proxied_(proxied),
- main_message_loop_(main_message_loop) {
-}
-
-VideoCaptureHandlerProxy::~VideoCaptureHandlerProxy() {
-}
-
-void VideoCaptureHandlerProxy::OnStarted(VideoCapture* capture) {
- main_message_loop_->PostTask(FROM_HERE, base::Bind(
- &VideoCaptureHandlerProxy::OnStartedOnMainThread,
- base::Unretained(this),
- capture,
- GetState(capture)));
-}
-
-void VideoCaptureHandlerProxy::OnStopped(VideoCapture* capture) {
- main_message_loop_->PostTask(FROM_HERE, base::Bind(
- &VideoCaptureHandlerProxy::OnStoppedOnMainThread,
- base::Unretained(this),
- capture,
- GetState(capture)));
-}
-
-void VideoCaptureHandlerProxy::OnPaused(VideoCapture* capture) {
- main_message_loop_->PostTask(FROM_HERE, base::Bind(
- &VideoCaptureHandlerProxy::OnPausedOnMainThread,
- base::Unretained(this),
- capture,
- GetState(capture)));
-}
-
-void VideoCaptureHandlerProxy::OnError(VideoCapture* capture, int error_code) {
- main_message_loop_->PostTask(FROM_HERE, base::Bind(
- &VideoCaptureHandlerProxy::OnErrorOnMainThread,
- base::Unretained(this),
- capture,
- GetState(capture),
- error_code));
-}
-
-void VideoCaptureHandlerProxy::OnRemoved(VideoCapture* capture) {
- main_message_loop_->PostTask(FROM_HERE, base::Bind(
- &VideoCaptureHandlerProxy::OnRemovedOnMainThread,
- base::Unretained(this),
- capture,
- GetState(capture)));
-}
-
-void VideoCaptureHandlerProxy::OnFrameReady(
- VideoCapture* capture,
- const scoped_refptr<VideoFrame>& frame) {
- main_message_loop_->PostTask(
- FROM_HERE,
- base::Bind(&VideoCaptureHandlerProxy::OnFrameReadyOnMainThread,
- base::Unretained(this),
- capture,
- GetState(capture),
- frame));
-}
-
-void VideoCaptureHandlerProxy::OnStartedOnMainThread(
- VideoCapture* capture,
- const VideoCaptureState& state) {
- state_ = state;
- proxied_->OnStarted(capture);
-}
-
-void VideoCaptureHandlerProxy::OnStoppedOnMainThread(
- VideoCapture* capture,
- const VideoCaptureState& state) {
- state_ = state;
- proxied_->OnStopped(capture);
-}
-
-void VideoCaptureHandlerProxy::OnPausedOnMainThread(
- VideoCapture* capture,
- const VideoCaptureState& state) {
- state_ = state;
- proxied_->OnPaused(capture);
-}
-
-void VideoCaptureHandlerProxy::OnErrorOnMainThread(
- VideoCapture* capture,
- const VideoCaptureState& state,
- int error_code) {
- state_ = state;
- proxied_->OnError(capture, error_code);
-}
-
-void VideoCaptureHandlerProxy::OnRemovedOnMainThread(
- VideoCapture* capture,
- const VideoCaptureState& state) {
- state_ = state;
- proxied_->OnRemoved(capture);
-}
-
-void VideoCaptureHandlerProxy::OnFrameReadyOnMainThread(
- VideoCapture* capture,
- const VideoCaptureState& state,
- const scoped_refptr<VideoFrame>& frame) {
- state_ = state;
- proxied_->OnFrameReady(capture, frame);
-}
-
-} // namespace media
diff --git a/chromium/media/video/capture/video_capture_proxy.h b/chromium/media/video/capture/video_capture_proxy.h
deleted file mode 100644
index fca0a80add7..00000000000
--- a/chromium/media/video/capture/video_capture_proxy.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_PROXY_H_
-#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_PROXY_H_
-
-#include "base/compiler_specific.h"
-#include "base/memory/ref_counted.h"
-#include "media/video/capture/video_capture.h"
-
-namespace base {
-class MessageLoopProxy;
-}
-
-namespace media {
-
-// This is a helper class to proxy a VideoCapture::EventHandler. In the renderer
-// process, the VideoCaptureImpl calls its handler on a "Video Capture" thread,
-// this class allows seamless proxying to another thread ("main thread"), which
-// would be the thread where the instance of this class is created. The
-// "proxied" handler is then called on that thread.
-// Since the VideoCapture is living on the "Video Capture" thread, querying its
-// state from the "main thread" is fundamentally racy. Instead this class keeps
-// track of the state every time it is called by the VideoCapture (on the VC
-// thread), and forwards that information to the main thread.
-class MEDIA_EXPORT VideoCaptureHandlerProxy
- : public VideoCapture::EventHandler {
- public:
- struct VideoCaptureState {
- VideoCaptureState() : started(false), frame_rate(0) {}
- bool started;
- int frame_rate;
- };
-
- // Called on main thread.
- VideoCaptureHandlerProxy(
- VideoCapture::EventHandler* proxied,
- scoped_refptr<base::MessageLoopProxy> main_message_loop);
- virtual ~VideoCaptureHandlerProxy();
-
- // Retrieves the state of the VideoCapture. Must be called on main thread.
- const VideoCaptureState& state() const { return state_; }
-
- // VideoCapture::EventHandler implementation, called on VC thread.
- virtual void OnStarted(VideoCapture* capture) OVERRIDE;
- virtual void OnStopped(VideoCapture* capture) OVERRIDE;
- virtual void OnPaused(VideoCapture* capture) OVERRIDE;
- virtual void OnError(VideoCapture* capture, int error_code) OVERRIDE;
- virtual void OnRemoved(VideoCapture* capture) OVERRIDE;
- virtual void OnFrameReady(VideoCapture* capture,
- const scoped_refptr<VideoFrame>& frame) OVERRIDE;
-
- private:
- // Called on main thread.
- void OnStartedOnMainThread(
- VideoCapture* capture,
- const VideoCaptureState& state);
- void OnStoppedOnMainThread(
- VideoCapture* capture,
- const VideoCaptureState& state);
- void OnPausedOnMainThread(
- VideoCapture* capture,
- const VideoCaptureState& state);
- void OnErrorOnMainThread(
- VideoCapture* capture,
- const VideoCaptureState& state,
- int error_code);
- void OnRemovedOnMainThread(
- VideoCapture* capture,
- const VideoCaptureState& state);
- void OnFrameReadyOnMainThread(VideoCapture* capture,
- const VideoCaptureState& state,
- const scoped_refptr<VideoFrame>& frame);
-
- // Only accessed from main thread.
- VideoCapture::EventHandler* proxied_;
- VideoCaptureState state_;
-
- scoped_refptr<base::MessageLoopProxy> main_message_loop_;
-};
-
-} // namespace media
-
-#endif // MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_PROXY_H_
diff --git a/chromium/media/video/capture/video_capture_types.cc b/chromium/media/video/capture/video_capture_types.cc
index aee3865a57b..fa78f427489 100644
--- a/chromium/media/video/capture/video_capture_types.cc
+++ b/chromium/media/video/capture/video_capture_types.cc
@@ -9,10 +9,10 @@
namespace media {
VideoCaptureFormat::VideoCaptureFormat()
- : frame_rate(0), pixel_format(PIXEL_FORMAT_UNKNOWN) {}
+ : frame_rate(0.0f), pixel_format(PIXEL_FORMAT_UNKNOWN) {}
VideoCaptureFormat::VideoCaptureFormat(const gfx::Size& frame_size,
- int frame_rate,
+ float frame_rate,
VideoPixelFormat pixel_format)
: frame_size(frame_size),
frame_rate(frame_rate),
@@ -21,9 +21,9 @@ VideoCaptureFormat::VideoCaptureFormat(const gfx::Size& frame_size,
bool VideoCaptureFormat::IsValid() const {
return (frame_size.width() < media::limits::kMaxDimension) &&
(frame_size.height() < media::limits::kMaxDimension) &&
- (frame_size.GetArea() > 0) &&
+ (frame_size.GetArea() >= 0) &&
(frame_size.GetArea() < media::limits::kMaxCanvas) &&
- (frame_rate > 0) &&
+ (frame_rate >= 0.0f) &&
(frame_rate < media::limits::kMaxFramesPerSecond) &&
(pixel_format >= PIXEL_FORMAT_UNKNOWN) &&
(pixel_format < PIXEL_FORMAT_MAX);
diff --git a/chromium/media/video/capture/video_capture_types.h b/chromium/media/video/capture/video_capture_types.h
index 6a4f453280b..5f23a1b1760 100644
--- a/chromium/media/video/capture/video_capture_types.h
+++ b/chromium/media/video/capture/video_capture_types.h
@@ -27,9 +27,14 @@ enum VideoPixelFormat {
PIXEL_FORMAT_MJPEG,
PIXEL_FORMAT_NV21,
PIXEL_FORMAT_YV12,
+ PIXEL_FORMAT_TEXTURE, // Capture format as a GL texture.
PIXEL_FORMAT_MAX,
};
+// Some drivers use rational time per frame instead of float frame rate, this
+// constant k is used to convert between both: A fps -> [k/k*A] seconds/frame.
+const int kFrameRatePrecision = 10000;
+
// Video capture format specification.
// This class is used by the video capture device to specify the format of every
// frame captured and returned to a client. It is also used to specify a
@@ -38,7 +43,7 @@ class MEDIA_EXPORT VideoCaptureFormat {
public:
VideoCaptureFormat();
VideoCaptureFormat(const gfx::Size& frame_size,
- int frame_rate,
+ float frame_rate,
VideoPixelFormat pixel_format);
// Checks that all values are in the expected range. All limits are specified
@@ -46,7 +51,7 @@ class MEDIA_EXPORT VideoCaptureFormat {
bool IsValid() const;
gfx::Size frame_size;
- int frame_rate;
+ float frame_rate;
VideoPixelFormat pixel_format;
};
diff --git a/chromium/media/video/capture/win/capability_list_win.cc b/chromium/media/video/capture/win/capability_list_win.cc
index bfa58edcc4b..9b4531b8282 100644
--- a/chromium/media/video/capture/win/capability_list_win.cc
+++ b/chromium/media/video/capture/win/capability_list_win.cc
@@ -54,7 +54,7 @@ void CapabilityList::Add(const VideoCaptureCapabilityWin& capability) {
const VideoCaptureCapabilityWin& CapabilityList::GetBestMatchedFormat(
int requested_width,
int requested_height,
- int requested_frame_rate) const {
+ float requested_frame_rate) const {
DCHECK(CalledOnValidThread());
DCHECK(!capabilities_.empty());
diff --git a/chromium/media/video/capture/win/capability_list_win.h b/chromium/media/video/capture/win/capability_list_win.h
index bf1e8d6ee89..c381b4b5d0f 100644
--- a/chromium/media/video/capture/win/capability_list_win.h
+++ b/chromium/media/video/capture/win/capability_list_win.h
@@ -46,7 +46,7 @@ class CapabilityList : public base::NonThreadSafe {
const VideoCaptureCapabilityWin& GetBestMatchedFormat(
int requested_width,
int requested_height,
- int requested_frame_rate) const;
+ float requested_frame_rate) const;
private:
typedef std::list<VideoCaptureCapabilityWin> Capabilities;
diff --git a/chromium/media/video/capture/win/filter_base_win.cc b/chromium/media/video/capture/win/filter_base_win.cc
index ddc68d68b2c..768c486a9f5 100644
--- a/chromium/media/video/capture/win/filter_base_win.cc
+++ b/chromium/media/video/capture/win/filter_base_win.cc
@@ -9,7 +9,7 @@
namespace media {
// Implement IEnumPins.
-class PinEnumerator
+class PinEnumerator FINAL
: public IEnumPins,
public base::RefCounted<PinEnumerator> {
public:
diff --git a/chromium/media/video/capture/win/video_capture_device_factory_win.cc b/chromium/media/video/capture/win/video_capture_device_factory_win.cc
new file mode 100644
index 00000000000..920126df09e
--- /dev/null
+++ b/chromium/media/video/capture/win/video_capture_device_factory_win.cc
@@ -0,0 +1,436 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/capture/win/video_capture_device_factory_win.h"
+
+#include <mfapi.h>
+#include <mferror.h>
+
+#include "base/command_line.h"
+#include "base/lazy_instance.h"
+#include "base/strings/string_util.h"
+#include "base/strings/sys_string_conversions.h"
+#include "base/win/metro.h"
+#include "base/win/scoped_co_mem.h"
+#include "base/win/scoped_variant.h"
+#include "base/win/windows_version.h"
+#include "media/base/media_switches.h"
+#include "media/video/capture/win/video_capture_device_mf_win.h"
+#include "media/video/capture/win/video_capture_device_win.h"
+
+using base::win::ScopedCoMem;
+using base::win::ScopedComPtr;
+using base::win::ScopedVariant;
+
+namespace media {
+
+// Lazy Instance to initialize the MediaFoundation Library.
+class MFInitializerSingleton {
+ public:
+ MFInitializerSingleton() { MFStartup(MF_VERSION, MFSTARTUP_LITE); }
+ ~MFInitializerSingleton() { MFShutdown(); }
+};
+
+static base::LazyInstance<MFInitializerSingleton> g_mf_initialize =
+ LAZY_INSTANCE_INITIALIZER;
+
+static void EnsureMediaFoundationInit() {
+ g_mf_initialize.Get();
+}
+
+static bool LoadMediaFoundationDlls() {
+ static const wchar_t* const kMfDLLs[] = {
+ L"%WINDIR%\\system32\\mf.dll",
+ L"%WINDIR%\\system32\\mfplat.dll",
+ L"%WINDIR%\\system32\\mfreadwrite.dll",
+ };
+
+ for (int i = 0; i < arraysize(kMfDLLs); ++i) {
+ wchar_t path[MAX_PATH] = {0};
+ ExpandEnvironmentStringsW(kMfDLLs[i], path, arraysize(path));
+ if (!LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH))
+ return false;
+ }
+ return true;
+}
+
+static bool PrepareVideoCaptureAttributesMediaFoundation(
+ IMFAttributes** attributes,
+ int count) {
+ EnsureMediaFoundationInit();
+
+ if (FAILED(MFCreateAttributes(attributes, count)))
+ return false;
+
+ return SUCCEEDED((*attributes)->SetGUID(MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE,
+ MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_GUID));
+}
+
+static bool CreateVideoCaptureDeviceMediaFoundation(const char* sym_link,
+ IMFMediaSource** source) {
+ ScopedComPtr<IMFAttributes> attributes;
+ if (!PrepareVideoCaptureAttributesMediaFoundation(attributes.Receive(), 2))
+ return false;
+
+ attributes->SetString(MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_SYMBOLIC_LINK,
+ base::SysUTF8ToWide(sym_link).c_str());
+
+ return SUCCEEDED(MFCreateDeviceSource(attributes, source));
+}
+
+static bool EnumerateVideoDevicesMediaFoundation(IMFActivate*** devices,
+ UINT32* count) {
+ ScopedComPtr<IMFAttributes> attributes;
+ if (!PrepareVideoCaptureAttributesMediaFoundation(attributes.Receive(), 1))
+ return false;
+
+ return SUCCEEDED(MFEnumDeviceSources(attributes, devices, count));
+}
+
+static void GetDeviceNamesDirectShow(VideoCaptureDevice::Names* device_names) {
+ DCHECK(device_names);
+ DVLOG(1) << " GetDeviceNamesDirectShow";
+
+ ScopedComPtr<ICreateDevEnum> dev_enum;
+ HRESULT hr = dev_enum.CreateInstance(CLSID_SystemDeviceEnum, NULL,
+ CLSCTX_INPROC);
+ if (FAILED(hr))
+ return;
+
+ ScopedComPtr<IEnumMoniker> enum_moniker;
+ hr = dev_enum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
+ enum_moniker.Receive(), 0);
+ // CreateClassEnumerator returns S_FALSE on some Windows OS
+ // when no camera exist. Therefore the FAILED macro can't be used.
+ if (hr != S_OK)
+ return;
+
+ device_names->clear();
+
+ // Name of a fake DirectShow filter that exist on computers with
+ // GTalk installed.
+ static const char kGoogleCameraAdapter[] = "google camera adapter";
+
+ // Enumerate all video capture devices.
+ ScopedComPtr<IMoniker> moniker;
+ int index = 0;
+ while (enum_moniker->Next(1, moniker.Receive(), NULL) == S_OK) {
+ ScopedComPtr<IPropertyBag> prop_bag;
+ hr = moniker->BindToStorage(0, 0, IID_IPropertyBag, prop_bag.ReceiveVoid());
+ if (FAILED(hr)) {
+ moniker.Release();
+ continue;
+ }
+
+ // Find the description or friendly name.
+ ScopedVariant name;
+ hr = prop_bag->Read(L"Description", name.Receive(), 0);
+ if (FAILED(hr))
+ hr = prop_bag->Read(L"FriendlyName", name.Receive(), 0);
+
+ if (SUCCEEDED(hr) && name.type() == VT_BSTR) {
+ // Ignore all VFW drivers and the special Google Camera Adapter.
+ // Google Camera Adapter is not a real DirectShow camera device.
+ // VFW are very old Video for Windows drivers that can not be used.
+ const wchar_t* str_ptr = V_BSTR(&name);
+ const int name_length = arraysize(kGoogleCameraAdapter) - 1;
+
+ if ((wcsstr(str_ptr, L"(VFW)") == NULL) &&
+ lstrlenW(str_ptr) < name_length ||
+ (!(LowerCaseEqualsASCII(str_ptr, str_ptr + name_length,
+ kGoogleCameraAdapter)))) {
+ std::string id;
+ std::string device_name(base::SysWideToUTF8(str_ptr));
+ name.Reset();
+ hr = prop_bag->Read(L"DevicePath", name.Receive(), 0);
+ if (FAILED(hr) || name.type() != VT_BSTR) {
+ id = device_name;
+ } else {
+ DCHECK_EQ(name.type(), VT_BSTR);
+ id = base::SysWideToUTF8(V_BSTR(&name));
+ }
+
+ device_names->push_back(VideoCaptureDevice::Name(device_name, id,
+ VideoCaptureDevice::Name::DIRECT_SHOW));
+ }
+ }
+ moniker.Release();
+ }
+}
+
+static void GetDeviceNamesMediaFoundation(
+ VideoCaptureDevice::Names* device_names) {
+ DVLOG(1) << " GetDeviceNamesMediaFoundation";
+ ScopedCoMem<IMFActivate*> devices;
+ UINT32 count;
+ if (!EnumerateVideoDevicesMediaFoundation(&devices, &count))
+ return;
+
+ HRESULT hr;
+ for (UINT32 i = 0; i < count; ++i) {
+ UINT32 name_size, id_size;
+ ScopedCoMem<wchar_t> name, id;
+ if (SUCCEEDED(hr = devices[i]->GetAllocatedString(
+ MF_DEVSOURCE_ATTRIBUTE_FRIENDLY_NAME, &name, &name_size)) &&
+ SUCCEEDED(hr = devices[i]->GetAllocatedString(
+ MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_SYMBOLIC_LINK, &id,
+ &id_size))) {
+ std::wstring name_w(name, name_size), id_w(id, id_size);
+ VideoCaptureDevice::Name device(base::SysWideToUTF8(name_w),
+ base::SysWideToUTF8(id_w),
+ VideoCaptureDevice::Name::MEDIA_FOUNDATION);
+ device_names->push_back(device);
+ } else {
+ DLOG(WARNING) << "GetAllocatedString failed: " << std::hex << hr;
+ }
+ devices[i]->Release();
+ }
+}
+
+static void GetDeviceSupportedFormatsDirectShow(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* formats) {
+ DVLOG(1) << "GetDeviceSupportedFormatsDirectShow for " << device.name();
+ ScopedComPtr<ICreateDevEnum> dev_enum;
+ HRESULT hr = dev_enum.CreateInstance(CLSID_SystemDeviceEnum, NULL,
+ CLSCTX_INPROC);
+ if (FAILED(hr))
+ return;
+
+ ScopedComPtr<IEnumMoniker> enum_moniker;
+ hr = dev_enum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
+ enum_moniker.Receive(), 0);
+ // CreateClassEnumerator returns S_FALSE on some Windows OS when no camera
+ // exists. Therefore the FAILED macro can't be used.
+ if (hr != S_OK)
+ return;
+
+ // Walk the capture devices. No need to check for "google camera adapter",
+ // since this is already skipped in the enumeration of GetDeviceNames().
+ ScopedComPtr<IMoniker> moniker;
+ int index = 0;
+ ScopedVariant device_id;
+ while (enum_moniker->Next(1, moniker.Receive(), NULL) == S_OK) {
+ ScopedComPtr<IPropertyBag> prop_bag;
+ hr = moniker->BindToStorage(0, 0, IID_IPropertyBag, prop_bag.ReceiveVoid());
+ if (FAILED(hr)) {
+ moniker.Release();
+ continue;
+ }
+
+ device_id.Reset();
+ hr = prop_bag->Read(L"DevicePath", device_id.Receive(), 0);
+ if (FAILED(hr)) {
+ DVLOG(1) << "Couldn't read a device's DevicePath.";
+ return;
+ }
+ if (device.id() == base::SysWideToUTF8(V_BSTR(&device_id)))
+ break;
+ moniker.Release();
+ }
+
+ if (moniker.get()) {
+ base::win::ScopedComPtr<IBaseFilter> capture_filter;
+ hr = VideoCaptureDeviceWin::GetDeviceFilter(device,
+ capture_filter.Receive());
+ if (!capture_filter) {
+ DVLOG(2) << "Failed to create capture filter.";
+ return;
+ }
+
+ base::win::ScopedComPtr<IPin> output_capture_pin(
+ VideoCaptureDeviceWin::GetPin(capture_filter,
+ PINDIR_OUTPUT,
+ PIN_CATEGORY_CAPTURE));
+ if (!output_capture_pin) {
+ DVLOG(2) << "Failed to get capture output pin";
+ return;
+ }
+
+ ScopedComPtr<IAMStreamConfig> stream_config;
+ hr = output_capture_pin.QueryInterface(stream_config.Receive());
+ if (FAILED(hr)) {
+ DVLOG(2) << "Failed to get IAMStreamConfig interface from "
+ "capture device";
+ return;
+ }
+
+ int count = 0, size = 0;
+ hr = stream_config->GetNumberOfCapabilities(&count, &size);
+ if (FAILED(hr)) {
+ DVLOG(2) << "Failed to GetNumberOfCapabilities";
+ return;
+ }
+
+ scoped_ptr<BYTE[]> caps(new BYTE[size]);
+ for (int i = 0; i < count; ++i) {
+ VideoCaptureDeviceWin::ScopedMediaType media_type;
+ hr = stream_config->GetStreamCaps(i, media_type.Receive(), caps.get());
+ // GetStreamCaps() may return S_FALSE, so don't use FAILED() or SUCCEED()
+ // macros here since they'll trigger incorrectly.
+ if (hr != S_OK) {
+ DVLOG(2) << "Failed to GetStreamCaps";
+ return;
+ }
+
+ if (media_type->majortype == MEDIATYPE_Video &&
+ media_type->formattype == FORMAT_VideoInfo) {
+ VideoCaptureFormat format;
+ format.pixel_format =
+ VideoCaptureDeviceWin::TranslateMediaSubtypeToPixelFormat(
+ media_type->subtype);
+ if (format.pixel_format == PIXEL_FORMAT_UNKNOWN)
+ continue;
+ VIDEOINFOHEADER* h =
+ reinterpret_cast<VIDEOINFOHEADER*>(media_type->pbFormat);
+ format.frame_size.SetSize(h->bmiHeader.biWidth,
+ h->bmiHeader.biHeight);
+ // Trust the frame rate from the VIDEOINFOHEADER.
+ format.frame_rate = (h->AvgTimePerFrame > 0) ?
+ static_cast<int>(kSecondsToReferenceTime / h->AvgTimePerFrame) :
+ 0;
+ formats->push_back(format);
+ DVLOG(1) << device.name() << " resolution: "
+ << format.frame_size.ToString() << ", fps: " << format.frame_rate
+ << ", pixel format: " << format.pixel_format;
+ }
+ }
+ }
+}
+
+static void GetDeviceSupportedFormatsMediaFoundation(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* formats) {
+ DVLOG(1) << "GetDeviceSupportedFormatsMediaFoundation for " << device.name();
+ ScopedComPtr<IMFMediaSource> source;
+ if (!CreateVideoCaptureDeviceMediaFoundation(device.id().c_str(),
+ source.Receive())) {
+ return;
+ }
+
+ HRESULT hr;
+ base::win::ScopedComPtr<IMFSourceReader> reader;
+ if (FAILED(hr = MFCreateSourceReaderFromMediaSource(source, NULL,
+ reader.Receive()))) {
+ DLOG(ERROR) << "MFCreateSourceReaderFromMediaSource: " << std::hex << hr;
+ return;
+ }
+
+ DWORD stream_index = 0;
+ ScopedComPtr<IMFMediaType> type;
+ while (SUCCEEDED(hr = reader->GetNativeMediaType(
+ MF_SOURCE_READER_FIRST_VIDEO_STREAM, stream_index, type.Receive()))) {
+ UINT32 width, height;
+ hr = MFGetAttributeSize(type, MF_MT_FRAME_SIZE, &width, &height);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "MFGetAttributeSize: " << std::hex << hr;
+ return;
+ }
+ VideoCaptureFormat capture_format;
+ capture_format.frame_size.SetSize(width, height);
+
+ UINT32 numerator, denominator;
+ hr = MFGetAttributeRatio(type, MF_MT_FRAME_RATE, &numerator, &denominator);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "MFGetAttributeSize: " << std::hex << hr;
+ return;
+ }
+ capture_format.frame_rate = denominator ? numerator / denominator : 0;
+
+ GUID type_guid;
+ hr = type->GetGUID(MF_MT_SUBTYPE, &type_guid);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "GetGUID: " << std::hex << hr;
+ return;
+ }
+ VideoCaptureDeviceMFWin::FormatFromGuid(type_guid,
+ &capture_format.pixel_format);
+ type.Release();
+ formats->push_back(capture_format);
+ ++stream_index;
+
+ DVLOG(1) << device.name() << " resolution: "
+ << capture_format.frame_size.ToString() << ", fps: "
+ << capture_format.frame_rate << ", pixel format: "
+ << capture_format.pixel_format;
+ }
+}
+
+// Returns true iff the current platform supports the Media Foundation API
+// and that the DLLs are available. On Vista this API is an optional download
+// but the API is advertised as a part of Windows 7 and onwards. However,
+// we've seen that the required DLLs are not available in some Win7
+// distributions such as Windows 7 N and Windows 7 KN.
+// static
+bool VideoCaptureDeviceFactoryWin::PlatformSupportsMediaFoundation() {
+ // Even though the DLLs might be available on Vista, we get crashes
+ // when running our tests on the build bots.
+ if (base::win::GetVersion() < base::win::VERSION_WIN7)
+ return false;
+
+ static bool g_dlls_available = LoadMediaFoundationDlls();
+ return g_dlls_available;
+}
+
+VideoCaptureDeviceFactoryWin::VideoCaptureDeviceFactoryWin() {
+ // Use Media Foundation for Metro processes (after and including Win8) and
+ // DirectShow for any other versions, unless forced via flag. Media Foundation
+ // can also be forced if appropriate flag is set and we are in Windows 7 or
+ // 8 in non-Metro mode.
+ const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ use_media_foundation_ = (base::win::IsMetroProcess() &&
+ !cmd_line->HasSwitch(switches::kForceDirectShowVideoCapture)) ||
+ (base::win::GetVersion() >= base::win::VERSION_WIN7 &&
+ cmd_line->HasSwitch(switches::kForceMediaFoundationVideoCapture));
+}
+
+
+scoped_ptr<VideoCaptureDevice> VideoCaptureDeviceFactoryWin::Create(
+ const VideoCaptureDevice::Name& device_name) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ scoped_ptr<VideoCaptureDevice> device;
+ if (device_name.capture_api_type() ==
+ VideoCaptureDevice::Name::MEDIA_FOUNDATION) {
+ DCHECK(PlatformSupportsMediaFoundation());
+ device.reset(new VideoCaptureDeviceMFWin(device_name));
+ DVLOG(1) << " MediaFoundation Device: " << device_name.name();
+ ScopedComPtr<IMFMediaSource> source;
+ if (!CreateVideoCaptureDeviceMediaFoundation(device_name.id().c_str(),
+ source.Receive())) {
+ return scoped_ptr<VideoCaptureDevice>();
+ }
+ if (!static_cast<VideoCaptureDeviceMFWin*>(device.get())->Init(source))
+ device.reset();
+ } else if (device_name.capture_api_type() ==
+ VideoCaptureDevice::Name::DIRECT_SHOW) {
+ device.reset(new VideoCaptureDeviceWin(device_name));
+ DVLOG(1) << " DirectShow Device: " << device_name.name();
+ if (!static_cast<VideoCaptureDeviceWin*>(device.get())->Init())
+ device.reset();
+ } else {
+ NOTREACHED() << " Couldn't recognize VideoCaptureDevice type";
+ }
+ return device.Pass();
+}
+
+void VideoCaptureDeviceFactoryWin::GetDeviceNames(
+ VideoCaptureDevice::Names* device_names) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (use_media_foundation_)
+ GetDeviceNamesMediaFoundation(device_names);
+ else
+ GetDeviceNamesDirectShow(device_names);
+}
+
+void VideoCaptureDeviceFactoryWin::GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* formats) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (use_media_foundation_)
+ GetDeviceSupportedFormatsMediaFoundation(device, formats);
+ else
+ GetDeviceSupportedFormatsDirectShow(device, formats);
+}
+
+} // namespace media
diff --git a/chromium/media/video/capture/win/video_capture_device_factory_win.h b/chromium/media/video/capture/win/video_capture_device_factory_win.h
new file mode 100644
index 00000000000..fdec127098f
--- /dev/null
+++ b/chromium/media/video/capture/win/video_capture_device_factory_win.h
@@ -0,0 +1,39 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Implementation of a VideoCaptureDeviceFactory class for Windows platforms.
+
+#ifndef MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_WIN_H_
+#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_WIN_H_
+
+#include "media/video/capture/video_capture_device_factory.h"
+
+namespace media {
+
+// Extension of VideoCaptureDeviceFactory to create and manipulate Windows
+// devices, via either DirectShow or MediaFoundation APIs.
+class MEDIA_EXPORT VideoCaptureDeviceFactoryWin :
+ public VideoCaptureDeviceFactory {
+ public:
+ static bool PlatformSupportsMediaFoundation();
+
+ VideoCaptureDeviceFactoryWin();
+ virtual ~VideoCaptureDeviceFactoryWin() {}
+
+ virtual scoped_ptr<VideoCaptureDevice> Create(
+ const VideoCaptureDevice::Name& device_name) OVERRIDE;
+ virtual void GetDeviceNames(VideoCaptureDevice::Names* device_names) OVERRIDE;
+ virtual void GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) OVERRIDE;
+
+ private:
+ bool use_media_foundation_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoCaptureDeviceFactoryWin);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_WIN_H_
diff --git a/chromium/media/video/capture/win/video_capture_device_mf_win.cc b/chromium/media/video/capture/win/video_capture_device_mf_win.cc
index cc1e7505dbe..07c7612f039 100644
--- a/chromium/media/video/capture/win/video_capture_device_mf_win.cc
+++ b/chromium/media/video/capture/win/video_capture_device_mf_win.cc
@@ -7,8 +7,8 @@
#include <mfapi.h>
#include <mferror.h>
-#include "base/lazy_instance.h"
#include "base/memory/ref_counted.h"
+#include "base/strings/stringprintf.h"
#include "base/strings/sys_string_conversions.h"
#include "base/synchronization/waitable_event.h"
#include "base/win/scoped_co_mem.h"
@@ -19,7 +19,6 @@ using base::win::ScopedCoMem;
using base::win::ScopedComPtr;
namespace media {
-namespace {
// In Windows device identifiers, the USB VID and PID are preceded by the string
// "vid_" or "pid_". The identifiers are each 4 bytes long.
@@ -27,74 +26,7 @@ const char kVidPrefix[] = "vid_"; // Also contains '\0'.
const char kPidPrefix[] = "pid_"; // Also contains '\0'.
const size_t kVidPidSize = 4;
-class MFInitializerSingleton {
- public:
- MFInitializerSingleton() { MFStartup(MF_VERSION, MFSTARTUP_LITE); }
- ~MFInitializerSingleton() { MFShutdown(); }
-};
-
-static base::LazyInstance<MFInitializerSingleton> g_mf_initialize =
- LAZY_INSTANCE_INITIALIZER;
-
-void EnsureMFInit() {
- g_mf_initialize.Get();
-}
-
-bool PrepareVideoCaptureAttributes(IMFAttributes** attributes, int count) {
- EnsureMFInit();
-
- if (FAILED(MFCreateAttributes(attributes, count)))
- return false;
-
- return SUCCEEDED((*attributes)->SetGUID(MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE,
- MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_GUID));
-}
-
-bool EnumerateVideoDevices(IMFActivate*** devices,
- UINT32* count) {
- ScopedComPtr<IMFAttributes> attributes;
- if (!PrepareVideoCaptureAttributes(attributes.Receive(), 1))
- return false;
-
- return SUCCEEDED(MFEnumDeviceSources(attributes, devices, count));
-}
-
-bool CreateVideoCaptureDevice(const char* sym_link, IMFMediaSource** source) {
- ScopedComPtr<IMFAttributes> attributes;
- if (!PrepareVideoCaptureAttributes(attributes.Receive(), 2))
- return false;
-
- attributes->SetString(MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_SYMBOLIC_LINK,
- base::SysUTF8ToWide(sym_link).c_str());
-
- return SUCCEEDED(MFCreateDeviceSource(attributes, source));
-}
-
-bool FormatFromGuid(const GUID& guid, VideoPixelFormat* format) {
- struct {
- const GUID& guid;
- const VideoPixelFormat format;
- } static const kFormatMap[] = {
- { MFVideoFormat_I420, PIXEL_FORMAT_I420 },
- { MFVideoFormat_YUY2, PIXEL_FORMAT_YUY2 },
- { MFVideoFormat_UYVY, PIXEL_FORMAT_UYVY },
- { MFVideoFormat_RGB24, PIXEL_FORMAT_RGB24 },
- { MFVideoFormat_ARGB32, PIXEL_FORMAT_ARGB },
- { MFVideoFormat_MJPG, PIXEL_FORMAT_MJPEG },
- { MFVideoFormat_YV12, PIXEL_FORMAT_YV12 },
- };
-
- for (int i = 0; i < arraysize(kFormatMap); ++i) {
- if (kFormatMap[i].guid == guid) {
- *format = kFormatMap[i].format;
- return true;
- }
- }
-
- return false;
-}
-
-bool GetFrameSize(IMFMediaType* type, gfx::Size* frame_size) {
+static bool GetFrameSize(IMFMediaType* type, gfx::Size* frame_size) {
UINT32 width32, height32;
if (FAILED(MFGetAttributeSize(type, MF_MT_FRAME_SIZE, &width32, &height32)))
return false;
@@ -102,9 +34,9 @@ bool GetFrameSize(IMFMediaType* type, gfx::Size* frame_size) {
return true;
}
-bool GetFrameRate(IMFMediaType* type,
- int* frame_rate_numerator,
- int* frame_rate_denominator) {
+static bool GetFrameRate(IMFMediaType* type,
+ int* frame_rate_numerator,
+ int* frame_rate_denominator) {
UINT32 numerator, denominator;
if (FAILED(MFGetAttributeRatio(type, MF_MT_FRAME_RATE, &numerator,
&denominator))||
@@ -116,18 +48,18 @@ bool GetFrameRate(IMFMediaType* type,
return true;
}
-bool FillCapabilitiesFromType(IMFMediaType* type,
- VideoCaptureCapabilityWin* capability) {
+static bool FillCapabilitiesFromType(IMFMediaType* type,
+ VideoCaptureCapabilityWin* capability) {
GUID type_guid;
if (FAILED(type->GetGUID(MF_MT_SUBTYPE, &type_guid)) ||
!GetFrameSize(type, &capability->supported_format.frame_size) ||
!GetFrameRate(type,
&capability->frame_rate_numerator,
&capability->frame_rate_denominator) ||
- !FormatFromGuid(type_guid, &capability->supported_format.pixel_format)) {
+ !VideoCaptureDeviceMFWin::FormatFromGuid(type_guid,
+ &capability->supported_format.pixel_format)) {
return false;
}
- // Keep the integer version of the frame_rate for (potential) returns.
capability->supported_format.frame_rate =
capability->frame_rate_numerator / capability->frame_rate_denominator;
@@ -153,26 +85,8 @@ HRESULT FillCapabilities(IMFSourceReader* source,
return (hr == MF_E_NO_MORE_TYPES) ? S_OK : hr;
}
-bool LoadMediaFoundationDlls() {
- static const wchar_t* const kMfDLLs[] = {
- L"%WINDIR%\\system32\\mf.dll",
- L"%WINDIR%\\system32\\mfplat.dll",
- L"%WINDIR%\\system32\\mfreadwrite.dll",
- };
-
- for (int i = 0; i < arraysize(kMfDLLs); ++i) {
- wchar_t path[MAX_PATH] = {0};
- ExpandEnvironmentStringsW(kMfDLLs[i], path, arraysize(path));
- if (!LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH))
- return false;
- }
-
- return true;
-}
-
-} // namespace
-class MFReaderCallback
+class MFReaderCallback FINAL
: public base::RefCountedThreadSafe<MFReaderCallback>,
public IMFSourceReaderCallback {
public:
@@ -204,9 +118,9 @@ class MFReaderCallback
STDMETHOD(OnReadSample)(HRESULT status, DWORD stream_index,
DWORD stream_flags, LONGLONG time_stamp, IMFSample* sample) {
- base::Time stamp(base::Time::Now());
+ base::TimeTicks stamp(base::TimeTicks::Now());
if (!sample) {
- observer_->OnIncomingCapturedFrame(NULL, 0, stamp, 0);
+ observer_->OnIncomingCapturedData(NULL, 0, 0, stamp);
return S_OK;
}
@@ -220,7 +134,7 @@ class MFReaderCallback
DWORD length = 0, max_length = 0;
BYTE* data = NULL;
buffer->Lock(&data, &max_length, &length);
- observer_->OnIncomingCapturedFrame(data, length, stamp, 0);
+ observer_->OnIncomingCapturedData(data, length, 0, stamp);
buffer->Unlock();
}
}
@@ -249,41 +163,29 @@ class MFReaderCallback
};
// static
-bool VideoCaptureDeviceMFWin::PlatformSupported() {
- // Even though the DLLs might be available on Vista, we get crashes
- // when running our tests on the build bots.
- if (base::win::GetVersion() < base::win::VERSION_WIN7)
- return false;
-
- static bool g_dlls_available = LoadMediaFoundationDlls();
- return g_dlls_available;
-}
-
-// static
-void VideoCaptureDeviceMFWin::GetDeviceNames(Names* device_names) {
- ScopedCoMem<IMFActivate*> devices;
- UINT32 count;
- if (!EnumerateVideoDevices(&devices, &count))
- return;
+bool VideoCaptureDeviceMFWin::FormatFromGuid(const GUID& guid,
+ VideoPixelFormat* format) {
+ struct {
+ const GUID& guid;
+ const VideoPixelFormat format;
+ } static const kFormatMap[] = {
+ { MFVideoFormat_I420, PIXEL_FORMAT_I420 },
+ { MFVideoFormat_YUY2, PIXEL_FORMAT_YUY2 },
+ { MFVideoFormat_UYVY, PIXEL_FORMAT_UYVY },
+ { MFVideoFormat_RGB24, PIXEL_FORMAT_RGB24 },
+ { MFVideoFormat_ARGB32, PIXEL_FORMAT_ARGB },
+ { MFVideoFormat_MJPG, PIXEL_FORMAT_MJPEG },
+ { MFVideoFormat_YV12, PIXEL_FORMAT_YV12 },
+ };
- HRESULT hr;
- for (UINT32 i = 0; i < count; ++i) {
- UINT32 name_size, id_size;
- ScopedCoMem<wchar_t> name, id;
- if (SUCCEEDED(hr = devices[i]->GetAllocatedString(
- MF_DEVSOURCE_ATTRIBUTE_FRIENDLY_NAME, &name, &name_size)) &&
- SUCCEEDED(hr = devices[i]->GetAllocatedString(
- MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_SYMBOLIC_LINK, &id,
- &id_size))) {
- std::wstring name_w(name, name_size), id_w(id, id_size);
- Name device(base::SysWideToUTF8(name_w), base::SysWideToUTF8(id_w),
- Name::MEDIA_FOUNDATION);
- device_names->push_back(device);
- } else {
- DLOG(WARNING) << "GetAllocatedString failed: " << std::hex << hr;
+ for (int i = 0; i < arraysize(kFormatMap); ++i) {
+ if (kFormatMap[i].guid == guid) {
+ *format = kFormatMap[i].format;
+ return true;
}
- devices[i]->Release();
}
+
+ return false;
}
const std::string VideoCaptureDevice::Name::GetModel() const {
@@ -315,14 +217,11 @@ VideoCaptureDeviceMFWin::~VideoCaptureDeviceMFWin() {
DCHECK(CalledOnValidThread());
}
-bool VideoCaptureDeviceMFWin::Init() {
+bool VideoCaptureDeviceMFWin::Init(
+ const base::win::ScopedComPtr<IMFMediaSource>& source) {
DCHECK(CalledOnValidThread());
DCHECK(!reader_);
- ScopedComPtr<IMFMediaSource> source;
- if (!CreateVideoCaptureDevice(name_.id().c_str(), source.Receive()))
- return false;
-
ScopedComPtr<IMFAttributes> attributes;
MFCreateAttributes(attributes.Receive(), 1);
DCHECK(attributes);
@@ -404,18 +303,16 @@ void VideoCaptureDeviceMFWin::StopAndDeAllocate() {
flushed.TimedWait(base::TimeDelta::FromMilliseconds(kFlushTimeOutInMs));
}
-void VideoCaptureDeviceMFWin::OnIncomingCapturedFrame(
+void VideoCaptureDeviceMFWin::OnIncomingCapturedData(
const uint8* data,
int length,
- const base::Time& time_stamp,
- int rotation) {
+ int rotation,
+ const base::TimeTicks& time_stamp) {
base::AutoLock lock(lock_);
- if (data && client_.get())
- client_->OnIncomingCapturedFrame(data,
- length,
- time_stamp,
- rotation,
- capture_format_);
+ if (data && client_.get()) {
+ client_->OnIncomingCapturedData(
+ data, length, capture_format_, rotation, time_stamp);
+ }
if (capture_) {
HRESULT hr = reader_->ReadSample(MF_SOURCE_READER_FIRST_VIDEO_STREAM, 0,
@@ -432,9 +329,10 @@ void VideoCaptureDeviceMFWin::OnIncomingCapturedFrame(
}
void VideoCaptureDeviceMFWin::OnError(HRESULT hr) {
- DLOG(ERROR) << "VideoCaptureDeviceMFWin: " << std::hex << hr;
+ std::string log_msg = base::StringPrintf("VideoCaptureDeviceMFWin: %x", hr);
+ DLOG(ERROR) << log_msg;
if (client_.get())
- client_->OnError();
+ client_->OnError(log_msg);
}
} // namespace media
diff --git a/chromium/media/video/capture/win/video_capture_device_mf_win.h b/chromium/media/video/capture/win/video_capture_device_mf_win.h
index 8f7fc75cf45..476a455a2ac 100644
--- a/chromium/media/video/capture/win/video_capture_device_mf_win.h
+++ b/chromium/media/video/capture/win/video_capture_device_mf_win.h
@@ -30,12 +30,13 @@ class MEDIA_EXPORT VideoCaptureDeviceMFWin
: public base::NonThreadSafe,
public VideoCaptureDevice {
public:
+ static bool FormatFromGuid(const GUID& guid, VideoPixelFormat* format);
+
explicit VideoCaptureDeviceMFWin(const Name& device_name);
virtual ~VideoCaptureDeviceMFWin();
// Opens the device driver for this device.
- // This function is used by the static VideoCaptureDevice::Create function.
- bool Init();
+ bool Init(const base::win::ScopedComPtr<IMFMediaSource>& source);
// VideoCaptureDevice implementation.
virtual void AllocateAndStart(const VideoCaptureParams& params,
@@ -43,21 +44,11 @@ class MEDIA_EXPORT VideoCaptureDeviceMFWin
OVERRIDE;
virtual void StopAndDeAllocate() OVERRIDE;
- // Returns true iff the current platform supports the Media Foundation API
- // and that the DLLs are available. On Vista this API is an optional download
- // but the API is advertised as a part of Windows 7 and onwards. However,
- // we've seen that the required DLLs are not available in some Win7
- // distributions such as Windows 7 N and Windows 7 KN.
- static bool PlatformSupported();
-
- static void GetDeviceNames(Names* device_names);
-
- // Captured a new video frame.
- void OnIncomingCapturedFrame(
- const uint8* data,
- int length,
- const base::Time& time_stamp,
- int rotation);
+ // Captured new video data.
+ void OnIncomingCapturedData(const uint8* data,
+ int length,
+ int rotation,
+ const base::TimeTicks& time_stamp);
private:
void OnError(HRESULT hr);
diff --git a/chromium/media/video/capture/win/video_capture_device_win.cc b/chromium/media/video/capture/win/video_capture_device_win.cc
index 00056a70168..b533de9e070 100644
--- a/chromium/media/video/capture/win/video_capture_device_win.cc
+++ b/chromium/media/video/capture/win/video_capture_device_win.cc
@@ -4,16 +4,15 @@
#include "media/video/capture/win/video_capture_device_win.h"
+#include <ks.h>
+#include <ksmedia.h>
+
#include <algorithm>
#include <list>
-#include "base/command_line.h"
-#include "base/strings/string_util.h"
#include "base/strings/sys_string_conversions.h"
-#include "base/win/metro.h"
#include "base/win/scoped_co_mem.h"
#include "base/win/scoped_variant.h"
-#include "media/base/media_switches.h"
#include "media/video/capture/win/video_capture_device_mf_win.h"
using base::win::ScopedCoMem;
@@ -21,11 +20,12 @@ using base::win::ScopedComPtr;
using base::win::ScopedVariant;
namespace media {
-namespace {
// Finds and creates a DirectShow Video Capture filter matching the device_name.
-HRESULT GetDeviceFilter(const VideoCaptureDevice::Name& device_name,
- IBaseFilter** filter) {
+// static
+HRESULT VideoCaptureDeviceWin::GetDeviceFilter(
+ const VideoCaptureDevice::Name& device_name,
+ IBaseFilter** filter) {
DCHECK(filter);
ScopedComPtr<ICreateDevEnum> dev_enum;
@@ -83,7 +83,8 @@ HRESULT GetDeviceFilter(const VideoCaptureDevice::Name& device_name,
}
// Check if a Pin matches a category.
-bool PinMatchesCategory(IPin* pin, REFGUID category) {
+// static
+bool VideoCaptureDeviceWin::PinMatchesCategory(IPin* pin, REFGUID category) {
DCHECK(pin);
bool found = false;
ScopedComPtr<IKsPropertySet> ks_property;
@@ -101,32 +102,75 @@ bool PinMatchesCategory(IPin* pin, REFGUID category) {
}
// Finds a IPin on a IBaseFilter given the direction an category.
-HRESULT GetPin(IBaseFilter* filter, PIN_DIRECTION pin_dir, REFGUID category,
- IPin** pin) {
- DCHECK(pin);
+// static
+ScopedComPtr<IPin> VideoCaptureDeviceWin::GetPin(IBaseFilter* filter,
+ PIN_DIRECTION pin_dir,
+ REFGUID category) {
+ ScopedComPtr<IPin> pin;
ScopedComPtr<IEnumPins> pin_emum;
HRESULT hr = filter->EnumPins(pin_emum.Receive());
if (pin_emum == NULL)
- return hr;
+ return pin;
// Get first unconnected pin.
hr = pin_emum->Reset(); // set to first pin
- while ((hr = pin_emum->Next(1, pin, NULL)) == S_OK) {
+ while ((hr = pin_emum->Next(1, pin.Receive(), NULL)) == S_OK) {
PIN_DIRECTION this_pin_dir = static_cast<PIN_DIRECTION>(-1);
- hr = (*pin)->QueryDirection(&this_pin_dir);
+ hr = pin->QueryDirection(&this_pin_dir);
if (pin_dir == this_pin_dir) {
- if (category == GUID_NULL || PinMatchesCategory(*pin, category))
- return S_OK;
+ if (category == GUID_NULL || PinMatchesCategory(pin, category))
+ return pin;
}
- (*pin)->Release();
+ pin.Release();
+ }
+
+ DCHECK(!pin);
+ return pin;
+}
+
+// static
+VideoPixelFormat VideoCaptureDeviceWin::TranslateMediaSubtypeToPixelFormat(
+ const GUID& sub_type) {
+ static struct {
+ const GUID& sub_type;
+ VideoPixelFormat format;
+ } pixel_formats[] = {
+ { kMediaSubTypeI420, PIXEL_FORMAT_I420 },
+ { MEDIASUBTYPE_IYUV, PIXEL_FORMAT_I420 },
+ { MEDIASUBTYPE_RGB24, PIXEL_FORMAT_RGB24 },
+ { MEDIASUBTYPE_YUY2, PIXEL_FORMAT_YUY2 },
+ { MEDIASUBTYPE_MJPG, PIXEL_FORMAT_MJPEG },
+ { MEDIASUBTYPE_UYVY, PIXEL_FORMAT_UYVY },
+ { MEDIASUBTYPE_ARGB32, PIXEL_FORMAT_ARGB },
+ };
+ for (size_t i = 0; i < ARRAYSIZE_UNSAFE(pixel_formats); ++i) {
+ if (sub_type == pixel_formats[i].sub_type)
+ return pixel_formats[i].format;
}
+#ifndef NDEBUG
+ WCHAR guid_str[128];
+ StringFromGUID2(sub_type, guid_str, arraysize(guid_str));
+ DVLOG(2) << "Device (also) supports an unknown media type " << guid_str;
+#endif
+ return PIXEL_FORMAT_UNKNOWN;
+}
+
+void VideoCaptureDeviceWin::ScopedMediaType::Free() {
+ if (!media_type_)
+ return;
- return E_FAIL;
+ DeleteMediaType(media_type_);
+ media_type_= NULL;
+}
+
+AM_MEDIA_TYPE** VideoCaptureDeviceWin::ScopedMediaType::Receive() {
+ DCHECK(!media_type_);
+ return &media_type_;
}
// Release the format block for a media type.
// http://msdn.microsoft.com/en-us/library/dd375432(VS.85).aspx
-void FreeMediaType(AM_MEDIA_TYPE* mt) {
+void VideoCaptureDeviceWin::ScopedMediaType::FreeMediaType(AM_MEDIA_TYPE* mt) {
if (mt->cbFormat != 0) {
CoTaskMemFree(mt->pbFormat);
mt->cbFormat = 0;
@@ -142,127 +186,14 @@ void FreeMediaType(AM_MEDIA_TYPE* mt) {
// Delete a media type structure that was allocated on the heap.
// http://msdn.microsoft.com/en-us/library/dd375432(VS.85).aspx
-void DeleteMediaType(AM_MEDIA_TYPE* mt) {
+void VideoCaptureDeviceWin::ScopedMediaType::DeleteMediaType(
+ AM_MEDIA_TYPE* mt) {
if (mt != NULL) {
FreeMediaType(mt);
CoTaskMemFree(mt);
}
}
-} // namespace
-
-// static
-void VideoCaptureDevice::GetDeviceNames(Names* device_names) {
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- // Use Media Foundation for Metro processes (after and including Win8)
- // and DirectShow for any other platforms.
- if (base::win::IsMetroProcess() &&
- !cmd_line->HasSwitch(switches::kForceDirectShowVideoCapture)) {
- VideoCaptureDeviceMFWin::GetDeviceNames(device_names);
- } else {
- VideoCaptureDeviceWin::GetDeviceNames(device_names);
- }
-}
-
-// static
-void VideoCaptureDevice::GetDeviceSupportedFormats(const Name& device,
- VideoCaptureFormats* formats) {
- NOTIMPLEMENTED();
-}
-
-// static
-VideoCaptureDevice* VideoCaptureDevice::Create(const Name& device_name) {
- VideoCaptureDevice* ret = NULL;
- if (device_name.capture_api_type() == Name::MEDIA_FOUNDATION) {
- DCHECK(VideoCaptureDeviceMFWin::PlatformSupported());
- scoped_ptr<VideoCaptureDeviceMFWin> device(
- new VideoCaptureDeviceMFWin(device_name));
- DVLOG(1) << " MediaFoundation Device: " << device_name.name();
- if (device->Init())
- ret = device.release();
- } else if (device_name.capture_api_type() == Name::DIRECT_SHOW) {
- scoped_ptr<VideoCaptureDeviceWin> device(
- new VideoCaptureDeviceWin(device_name));
- DVLOG(1) << " DirectShow Device: " << device_name.name();
- if (device->Init())
- ret = device.release();
- } else{
- NOTREACHED() << " Couldn't recognize VideoCaptureDevice type";
- }
-
- return ret;
-}
-
-// static
-void VideoCaptureDeviceWin::GetDeviceNames(Names* device_names) {
- DCHECK(device_names);
-
- ScopedComPtr<ICreateDevEnum> dev_enum;
- HRESULT hr = dev_enum.CreateInstance(CLSID_SystemDeviceEnum, NULL,
- CLSCTX_INPROC);
- if (FAILED(hr))
- return;
-
- ScopedComPtr<IEnumMoniker> enum_moniker;
- hr = dev_enum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
- enum_moniker.Receive(), 0);
- // CreateClassEnumerator returns S_FALSE on some Windows OS
- // when no camera exist. Therefore the FAILED macro can't be used.
- if (hr != S_OK)
- return;
-
- device_names->clear();
-
- // Name of a fake DirectShow filter that exist on computers with
- // GTalk installed.
- static const char kGoogleCameraAdapter[] = "google camera adapter";
-
- // Enumerate all video capture devices.
- ScopedComPtr<IMoniker> moniker;
- int index = 0;
- while (enum_moniker->Next(1, moniker.Receive(), NULL) == S_OK) {
- ScopedComPtr<IPropertyBag> prop_bag;
- hr = moniker->BindToStorage(0, 0, IID_IPropertyBag, prop_bag.ReceiveVoid());
- if (FAILED(hr)) {
- moniker.Release();
- continue;
- }
-
- // Find the description or friendly name.
- ScopedVariant name;
- hr = prop_bag->Read(L"Description", name.Receive(), 0);
- if (FAILED(hr))
- hr = prop_bag->Read(L"FriendlyName", name.Receive(), 0);
-
- if (SUCCEEDED(hr) && name.type() == VT_BSTR) {
- // Ignore all VFW drivers and the special Google Camera Adapter.
- // Google Camera Adapter is not a real DirectShow camera device.
- // VFW is very old Video for Windows drivers that can not be used.
- const wchar_t* str_ptr = V_BSTR(&name);
- const int name_length = arraysize(kGoogleCameraAdapter) - 1;
-
- if ((wcsstr(str_ptr, L"(VFW)") == NULL) &&
- lstrlenW(str_ptr) < name_length ||
- (!(LowerCaseEqualsASCII(str_ptr, str_ptr + name_length,
- kGoogleCameraAdapter)))) {
- std::string id;
- std::string device_name(base::SysWideToUTF8(str_ptr));
- name.Reset();
- hr = prop_bag->Read(L"DevicePath", name.Receive(), 0);
- if (FAILED(hr) || name.type() != VT_BSTR) {
- id = device_name;
- } else {
- DCHECK_EQ(name.type(), VT_BSTR);
- id = base::SysWideToUTF8(V_BSTR(&name));
- }
-
- device_names->push_back(Name(device_name, id, Name::DIRECT_SHOW));
- }
- }
- moniker.Release();
- }
-}
-
VideoCaptureDeviceWin::VideoCaptureDeviceWin(const Name& device_name)
: device_name_(device_name),
state_(kIdle) {
@@ -296,8 +227,8 @@ bool VideoCaptureDeviceWin::Init() {
return false;
}
- hr = GetPin(capture_filter_, PINDIR_OUTPUT, PIN_CATEGORY_CAPTURE,
- output_capture_pin_.Receive());
+ output_capture_pin_ =
+ GetPin(capture_filter_, PINDIR_OUTPUT, PIN_CATEGORY_CAPTURE);
if (!output_capture_pin_) {
DVLOG(2) << "Failed to get capture output pin";
return false;
@@ -362,9 +293,6 @@ void VideoCaptureDeviceWin::AllocateAndStart(
if (format.frame_rate > params.requested_format.frame_rate)
format.frame_rate = params.requested_format.frame_rate;
- AM_MEDIA_TYPE* pmt = NULL;
- VIDEO_STREAM_CONFIG_CAPS caps;
-
ScopedComPtr<IAMStreamConfig> stream_config;
HRESULT hr = output_capture_pin_.QueryInterface(stream_config.Receive());
if (FAILED(hr)) {
@@ -372,19 +300,30 @@ void VideoCaptureDeviceWin::AllocateAndStart(
return;
}
+ int count = 0, size = 0;
+ hr = stream_config->GetNumberOfCapabilities(&count, &size);
+ if (FAILED(hr)) {
+ DVLOG(2) << "Failed to GetNumberOfCapabilities";
+ return;
+ }
+
+ scoped_ptr<BYTE[]> caps(new BYTE[size]);
+ ScopedMediaType media_type;
+
// Get the windows capability from the capture device.
- hr = stream_config->GetStreamCaps(found_capability.stream_index, &pmt,
- reinterpret_cast<BYTE*>(&caps));
+ hr = stream_config->GetStreamCaps(
+ found_capability.stream_index, media_type.Receive(), caps.get());
if (SUCCEEDED(hr)) {
- if (pmt->formattype == FORMAT_VideoInfo) {
- VIDEOINFOHEADER* h = reinterpret_cast<VIDEOINFOHEADER*>(pmt->pbFormat);
+ if (media_type->formattype == FORMAT_VideoInfo) {
+ VIDEOINFOHEADER* h =
+ reinterpret_cast<VIDEOINFOHEADER*>(media_type->pbFormat);
if (format.frame_rate > 0)
h->AvgTimePerFrame = kSecondsToReferenceTime / format.frame_rate;
}
// Set the sink filter to request this format.
sink_filter_->SetRequestedMediaFormat(format);
// Order the capture device to use this format.
- hr = stream_config->SetFormat(pmt);
+ hr = stream_config->SetFormat(media_type.get());
}
if (FAILED(hr))
@@ -395,9 +334,8 @@ void VideoCaptureDeviceWin::AllocateAndStart(
hr = mjpg_filter_.CreateInstance(CLSID_MjpegDec, NULL, CLSCTX_INPROC);
if (SUCCEEDED(hr)) {
- GetPin(mjpg_filter_, PINDIR_INPUT, GUID_NULL, input_mjpg_pin_.Receive());
- GetPin(mjpg_filter_, PINDIR_OUTPUT, GUID_NULL,
- output_mjpg_pin_.Receive());
+ input_mjpg_pin_ = GetPin(mjpg_filter_, PINDIR_INPUT, GUID_NULL);
+ output_mjpg_pin_ = GetPin(mjpg_filter_, PINDIR_OUTPUT, GUID_NULL);
hr = graph_builder_->AddFilter(mjpg_filter_, NULL);
}
@@ -408,6 +346,8 @@ void VideoCaptureDeviceWin::AllocateAndStart(
}
}
+ SetAntiFlickerInCaptureFilter();
+
if (format.pixel_format == PIXEL_FORMAT_MJPEG && mjpg_filter_.get()) {
// Connect the camera to the MJPEG decoder.
hr = graph_builder_->ConnectDirect(output_capture_pin_, input_mjpg_pin_,
@@ -477,8 +417,8 @@ void VideoCaptureDeviceWin::StopAndDeAllocate() {
// Implements SinkFilterObserver::SinkFilterObserver.
void VideoCaptureDeviceWin::FrameReceived(const uint8* buffer,
int length) {
- client_->OnIncomingCapturedFrame(
- buffer, length, base::Time::Now(), 0, capture_format_);
+ client_->OnIncomingCapturedData(
+ buffer, length, capture_format_, 0, base::TimeTicks::Now());
}
bool VideoCaptureDeviceWin::CreateCapabilityMap() {
@@ -496,19 +436,17 @@ bool VideoCaptureDeviceWin::CreateCapabilityMap() {
hr = capture_filter_.QueryInterface(video_control.Receive());
DVLOG_IF(2, FAILED(hr)) << "IAMVideoControl Interface NOT SUPPORTED";
- AM_MEDIA_TYPE* media_type = NULL;
- VIDEO_STREAM_CONFIG_CAPS caps;
- int count, size;
-
+ int count = 0, size = 0;
hr = stream_config->GetNumberOfCapabilities(&count, &size);
if (FAILED(hr)) {
DVLOG(2) << "Failed to GetNumberOfCapabilities";
return false;
}
+ scoped_ptr<BYTE[]> caps(new BYTE[size]);
for (int i = 0; i < count; ++i) {
- hr = stream_config->GetStreamCaps(i, &media_type,
- reinterpret_cast<BYTE*>(&caps));
+ ScopedMediaType media_type;
+ hr = stream_config->GetStreamCaps(i, media_type.Receive(), caps.get());
// GetStreamCaps() may return S_FALSE, so don't use FAILED() or SUCCEED()
// macros here since they'll trigger incorrectly.
if (hr != S_OK) {
@@ -519,6 +457,11 @@ bool VideoCaptureDeviceWin::CreateCapabilityMap() {
if (media_type->majortype == MEDIATYPE_Video &&
media_type->formattype == FORMAT_VideoInfo) {
VideoCaptureCapabilityWin capability(i);
+ capability.supported_format.pixel_format =
+ TranslateMediaSubtypeToPixelFormat(media_type->subtype);
+ if (capability.supported_format.pixel_format == PIXEL_FORMAT_UNKNOWN)
+ continue;
+
VIDEOINFOHEADER* h =
reinterpret_cast<VIDEOINFOHEADER*>(media_type->pbFormat);
capability.supported_format.frame_size.SetSize(h->bmiHeader.biWidth,
@@ -551,49 +494,55 @@ bool VideoCaptureDeviceWin::CreateCapabilityMap() {
capability.supported_format.frame_rate =
(time_per_frame > 0)
- ? static_cast<int>(kSecondsToReferenceTime / time_per_frame)
- : 0;
+ ? (kSecondsToReferenceTime / static_cast<float>(time_per_frame))
+ : 0.0;
// DirectShow works at the moment only on integer frame_rate but the
// best capability matching class works on rational frame rates.
capability.frame_rate_numerator = capability.supported_format.frame_rate;
capability.frame_rate_denominator = 1;
- // We can't switch MEDIATYPE :~(.
- if (media_type->subtype == kMediaSubTypeI420) {
- capability.supported_format.pixel_format = PIXEL_FORMAT_I420;
- } else if (media_type->subtype == MEDIASUBTYPE_IYUV) {
- // This is identical to PIXEL_FORMAT_I420.
- capability.supported_format.pixel_format = PIXEL_FORMAT_I420;
- } else if (media_type->subtype == MEDIASUBTYPE_RGB24) {
- capability.supported_format.pixel_format = PIXEL_FORMAT_RGB24;
- } else if (media_type->subtype == MEDIASUBTYPE_YUY2) {
- capability.supported_format.pixel_format = PIXEL_FORMAT_YUY2;
- } else if (media_type->subtype == MEDIASUBTYPE_MJPG) {
- capability.supported_format.pixel_format = PIXEL_FORMAT_MJPEG;
- } else if (media_type->subtype == MEDIASUBTYPE_UYVY) {
- capability.supported_format.pixel_format = PIXEL_FORMAT_UYVY;
- } else if (media_type->subtype == MEDIASUBTYPE_ARGB32) {
- capability.supported_format.pixel_format = PIXEL_FORMAT_ARGB;
- } else {
- WCHAR guid_str[128];
- StringFromGUID2(media_type->subtype, guid_str, arraysize(guid_str));
- DVLOG(2) << "Device supports (also) an unknown media type " << guid_str;
- continue;
- }
capabilities_.Add(capability);
}
- DeleteMediaType(media_type);
- media_type = NULL;
}
return !capabilities_.empty();
}
-void VideoCaptureDeviceWin::SetErrorState(const char* reason) {
+// Set the power line frequency removal in |capture_filter_| if available.
+void VideoCaptureDeviceWin::SetAntiFlickerInCaptureFilter() {
+ const int power_line_frequency = GetPowerLineFrequencyForLocation();
+ if (power_line_frequency != kPowerLine50Hz &&
+ power_line_frequency != kPowerLine60Hz) {
+ return;
+ }
+ ScopedComPtr<IKsPropertySet> ks_propset;
+ DWORD type_support = 0;
+ HRESULT hr;
+ if (SUCCEEDED(hr = ks_propset.QueryFrom(capture_filter_)) &&
+ SUCCEEDED(hr = ks_propset->QuerySupported(PROPSETID_VIDCAP_VIDEOPROCAMP,
+ KSPROPERTY_VIDEOPROCAMP_POWERLINE_FREQUENCY, &type_support)) &&
+ (type_support & KSPROPERTY_SUPPORT_SET)) {
+ KSPROPERTY_VIDEOPROCAMP_S data = {};
+ data.Property.Set = PROPSETID_VIDCAP_VIDEOPROCAMP;
+ data.Property.Id = KSPROPERTY_VIDEOPROCAMP_POWERLINE_FREQUENCY;
+ data.Property.Flags = KSPROPERTY_TYPE_SET;
+ data.Value = (power_line_frequency == kPowerLine50Hz) ? 1 : 2;
+ data.Flags = KSPROPERTY_VIDEOPROCAMP_FLAGS_MANUAL;
+ hr = ks_propset->Set(PROPSETID_VIDCAP_VIDEOPROCAMP,
+ KSPROPERTY_VIDEOPROCAMP_POWERLINE_FREQUENCY,
+ &data, sizeof(data), &data, sizeof(data));
+ DVLOG_IF(ERROR, FAILED(hr)) << "Anti-flicker setting failed.";
+ DVLOG_IF(2, SUCCEEDED(hr)) << "Anti-flicker set correctly.";
+ } else {
+ DVLOG(2) << "Anti-flicker setting not supported.";
+ }
+}
+
+void VideoCaptureDeviceWin::SetErrorState(const std::string& reason) {
DCHECK(CalledOnValidThread());
DVLOG(1) << reason;
state_ = kError;
- client_->OnError();
+ client_->OnError(reason);
}
} // namespace media
diff --git a/chromium/media/video/capture/win/video_capture_device_win.h b/chromium/media/video/capture/win/video_capture_device_win.h
index 164c01c9e26..5ab9faaca05 100644
--- a/chromium/media/video/capture/win/video_capture_device_win.h
+++ b/chromium/media/video/capture/win/video_capture_device_win.h
@@ -33,20 +33,46 @@ class VideoCaptureDeviceWin
public VideoCaptureDevice,
public SinkFilterObserver {
public:
+ // A utility class that wraps the AM_MEDIA_TYPE type and guarantees that
+ // we free the structure when exiting the scope. DCHECKing is also done to
+ // avoid memory leaks.
+ class ScopedMediaType {
+ public:
+ ScopedMediaType() : media_type_(NULL) {}
+ ~ScopedMediaType() { Free(); }
+
+ AM_MEDIA_TYPE* operator->() { return media_type_; }
+ AM_MEDIA_TYPE* get() { return media_type_; }
+ void Free();
+ AM_MEDIA_TYPE** Receive();
+
+ private:
+ void FreeMediaType(AM_MEDIA_TYPE* mt);
+ void DeleteMediaType(AM_MEDIA_TYPE* mt);
+
+ AM_MEDIA_TYPE* media_type_;
+ };
+
+ static HRESULT GetDeviceFilter(const Name& device_name,
+ IBaseFilter** filter);
+ static bool PinMatchesCategory(IPin* pin, REFGUID category);
+ static base::win::ScopedComPtr<IPin> GetPin(IBaseFilter* filter,
+ PIN_DIRECTION pin_dir,
+ REFGUID category);
+ static VideoPixelFormat TranslateMediaSubtypeToPixelFormat(
+ const GUID& sub_type);
+
explicit VideoCaptureDeviceWin(const Name& device_name);
virtual ~VideoCaptureDeviceWin();
// Opens the device driver for this device.
- // This function is used by the static VideoCaptureDevice::Create function.
bool Init();
// VideoCaptureDevice implementation.
- virtual void AllocateAndStart(const VideoCaptureParams& params,
- scoped_ptr<VideoCaptureDevice::Client> client)
- OVERRIDE;
+ virtual void AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) OVERRIDE;
virtual void StopAndDeAllocate() OVERRIDE;
- static void GetDeviceNames(Names* device_names);
-
private:
enum InternalState {
kIdle, // The device driver is opened but camera is not in use.
@@ -59,7 +85,8 @@ class VideoCaptureDeviceWin
virtual void FrameReceived(const uint8* buffer, int length);
bool CreateCapabilityMap();
- void SetErrorState(const char* reason);
+ void SetAntiFlickerInCaptureFilter();
+ void SetErrorState(const std::string& reason);
Name device_name_;
InternalState state_;
diff --git a/chromium/media/video/mock_video_decode_accelerator.h b/chromium/media/video/mock_video_decode_accelerator.h
index a0df1cf90b3..f8bb6da8756 100644
--- a/chromium/media/video/mock_video_decode_accelerator.h
+++ b/chromium/media/video/mock_video_decode_accelerator.h
@@ -24,7 +24,7 @@ class MockVideoDecodeAccelerator : public VideoDecodeAccelerator {
MockVideoDecodeAccelerator();
virtual ~MockVideoDecodeAccelerator();
- MOCK_METHOD1(Initialize, bool(VideoCodecProfile profile));
+ MOCK_METHOD2(Initialize, bool(VideoCodecProfile profile, Client* client));
MOCK_METHOD1(Decode, void(const BitstreamBuffer& bitstream_buffer));
MOCK_METHOD1(AssignPictureBuffers,
void(const std::vector<PictureBuffer>& buffers));
@@ -32,6 +32,7 @@ class MockVideoDecodeAccelerator : public VideoDecodeAccelerator {
MOCK_METHOD0(Flush, void());
MOCK_METHOD0(Reset, void());
MOCK_METHOD0(Destroy, void());
+ MOCK_METHOD0(CanDecodeOnIOThread, bool());
private:
void DeleteThis();
diff --git a/chromium/media/video/video_decode_accelerator.cc b/chromium/media/video/video_decode_accelerator.cc
index 9063d68bdfe..a72912cf305 100644
--- a/chromium/media/video/video_decode_accelerator.cc
+++ b/chromium/media/video/video_decode_accelerator.cc
@@ -4,8 +4,28 @@
#include "media/video/video_decode_accelerator.h"
+#include "base/logging.h"
+
namespace media {
VideoDecodeAccelerator::~VideoDecodeAccelerator() {}
+bool VideoDecodeAccelerator::CanDecodeOnIOThread() {
+ // GPU process subclasses must override this.
+ LOG(FATAL) << "This should only get called in the GPU process";
+ return false; // not reached
+}
+
+} // namespace media
+
+namespace base {
+
+void DefaultDeleter<media::VideoDecodeAccelerator>::operator()(
+ void* video_decode_accelerator) const {
+ static_cast<media::VideoDecodeAccelerator*>(video_decode_accelerator)->
+ Destroy();
}
+
+} // namespace base
+
+
diff --git a/chromium/media/video/video_decode_accelerator.h b/chromium/media/video/video_decode_accelerator.h
index 5212db2c488..4df3b1c9158 100644
--- a/chromium/media/video/video_decode_accelerator.h
+++ b/chromium/media/video/video_decode_accelerator.h
@@ -8,7 +8,6 @@
#include <vector>
#include "base/basictypes.h"
-#include "base/memory/weak_ptr.h"
#include "media/base/bitstream_buffer.h"
#include "media/base/video_decoder_config.h"
#include "media/video/picture.h"
@@ -19,11 +18,8 @@ namespace media {
// Video decoder interface.
// This interface is extended by the various components that ultimately
// implement the backend of PPB_VideoDecode_Dev.
-class MEDIA_EXPORT VideoDecodeAccelerator
- : public base::SupportsWeakPtr<VideoDecodeAccelerator> {
+class MEDIA_EXPORT VideoDecodeAccelerator {
public:
- virtual ~VideoDecodeAccelerator();
-
// Enumeration of potential errors generated by the API.
// Note: Keep these in sync with PP_VideoDecodeError_Dev. Also do not
// rearrange, reuse or remove values as they are used for gathering UMA
@@ -44,15 +40,13 @@ class MEDIA_EXPORT VideoDecodeAccelerator
};
// Interface for collaborating with picture interface to provide memory for
- // output picture and blitting them.
+ // output picture and blitting them. These callbacks will not be made unless
+ // Initialize() has returned successfully.
// This interface is extended by the various layers that relay messages back
// to the plugin, through the PPP_VideoDecode_Dev interface the plugin
// implements.
class MEDIA_EXPORT Client {
public:
- // Callback to notify client that decoder has been initialized.
- virtual void NotifyInitializeDone() = 0;
-
// Callback to tell client how many and what size of buffers to provide.
virtual void ProvidePictureBuffers(uint32 requested_num_of_buffers,
const gfx::Size& dimensions,
@@ -74,7 +68,9 @@ class MEDIA_EXPORT VideoDecodeAccelerator
// Reset completion callback.
virtual void NotifyResetDone() = 0;
- // Callback to notify about decoding errors.
+ // Callback to notify about decoding errors. Note that errors in
+ // Initialize() will not be reported here, but will instead be indicated by
+ // a false return value there.
virtual void NotifyError(Error error) = 0;
protected:
@@ -83,14 +79,16 @@ class MEDIA_EXPORT VideoDecodeAccelerator
// Video decoder functions.
- // Initializes the video decoder with specific configuration.
+ // Initializes the video decoder with specific configuration. Called once per
+ // decoder construction. This call is synchronous and returns true iff
+ // initialization is successful.
// Parameters:
// |profile| is the video stream's format profile.
- //
- // Returns true when command successfully accepted. Otherwise false.
- virtual bool Initialize(VideoCodecProfile profile) = 0;
+ // |client| is the client of this video decoder. The provided pointer must
+ // be valid until Destroy() is called.
+ virtual bool Initialize(VideoCodecProfile profile, Client* client) = 0;
- // Decodes given bitstream buffer that contains at most one frame. Once
+ // Decodes given bitstream buffer that contains at most one frame. Once
// decoder is done with processing |bitstream_buffer| it will call
// NotifyEndOfBitstreamBuffer() with the bitstream buffer id.
// Parameters:
@@ -133,8 +131,39 @@ class MEDIA_EXPORT VideoDecodeAccelerator
// no more callbacks will be made on the client. Deletes |this|
// unconditionally, so make sure to drop all pointers to it!
virtual void Destroy() = 0;
+
+ // GPU PROCESS ONLY. Implementations of this interface in the
+ // content/common/gpu/media should implement this, and implementations in
+ // other processes should not override the default implementation.
+ // Returns true if VDA::Decode and VDA::Client callbacks can run on the IO
+ // thread. Otherwise they will run on the GPU child thread. The purpose of
+ // running Decode on the IO thread is to reduce decode latency. Note Decode
+ // should return as soon as possible and not block on the IO thread. Also,
+ // PictureReady should be run on the child thread if a picture is delivered
+ // the first time so it can be cleared.
+ virtual bool CanDecodeOnIOThread();
+
+ protected:
+ // Do not delete directly; use Destroy() or own it with a scoped_ptr, which
+ // will Destroy() it properly by default.
+ virtual ~VideoDecodeAccelerator();
};
} // namespace media
+namespace base {
+
+template <class T>
+struct DefaultDeleter;
+
+// Specialize DefaultDeleter so that scoped_ptr<VideoDecodeAccelerator> always
+// uses "Destroy()" instead of trying to use the destructor.
+template <>
+struct MEDIA_EXPORT DefaultDeleter<media::VideoDecodeAccelerator> {
+ public:
+ void operator()(void* video_decode_accelerator) const;
+};
+
+} // namespace base
+
#endif // MEDIA_VIDEO_VIDEO_DECODE_ACCELERATOR_H_
diff --git a/chromium/media/video/video_encode_accelerator.cc b/chromium/media/video/video_encode_accelerator.cc
index 6309180bceb..d8a5838036f 100644
--- a/chromium/media/video/video_encode_accelerator.cc
+++ b/chromium/media/video/video_encode_accelerator.cc
@@ -9,3 +9,14 @@ namespace media {
VideoEncodeAccelerator::~VideoEncodeAccelerator() {}
} // namespace media
+
+namespace base {
+
+void DefaultDeleter<media::VideoEncodeAccelerator>::operator()(
+ void* video_encode_accelerator) const {
+ static_cast<media::VideoEncodeAccelerator*>(video_encode_accelerator)->
+ Destroy();
+}
+
+} // namespace base
+
diff --git a/chromium/media/video/video_encode_accelerator.h b/chromium/media/video/video_encode_accelerator.h
index 8d4f56536bf..891204bcae1 100644
--- a/chromium/media/video/video_encode_accelerator.h
+++ b/chromium/media/video/video_encode_accelerator.h
@@ -22,8 +22,6 @@ class VideoFrame;
// Video encoder interface.
class MEDIA_EXPORT VideoEncodeAccelerator {
public:
- virtual ~VideoEncodeAccelerator();
-
// Specification of an encoding profile supported by an encoder.
struct SupportedProfile {
VideoCodecProfile profile;
@@ -45,14 +43,13 @@ class MEDIA_EXPORT VideoEncodeAccelerator {
// failures, GPU library failures, GPU process programming errors, and so
// on.
kPlatformFailureError,
+ kErrorMax = kPlatformFailureError
};
- // Interface for clients that use VideoEncodeAccelerator.
+ // Interface for clients that use VideoEncodeAccelerator. These callbacks will
+ // not be made unless Initialize() has returned successfully.
class MEDIA_EXPORT Client {
public:
- // Callback to notify client that encoder has been successfully initialized.
- virtual void NotifyInitializeDone() = 0;
-
// Callback to tell the client what size of frames and buffers to provide
// for input and output. The VEA disclaims use or ownership of all
// previously provided buffers once this callback is made.
@@ -82,7 +79,9 @@ class MEDIA_EXPORT VideoEncodeAccelerator {
size_t payload_size,
bool key_frame) = 0;
- // Error notification callback.
+ // Error notification callback. Note that errors in Initialize() will not be
+ // reported here, but will instead be indicated by a false return value
+ // there.
virtual void NotifyError(Error error) = 0;
protected:
@@ -93,8 +92,9 @@ class MEDIA_EXPORT VideoEncodeAccelerator {
// Video encoder functions.
- // Initialize the video encoder with a specific configuration. Called once
- // per encoder construction.
+ // Initializes the video encoder with specific configuration. Called once per
+ // encoder construction. This call is synchronous and returns true iff
+ // initialization is successful.
// Parameters:
// |input_format| is the frame format of the input stream (as would be
// reported by VideoFrame::format() for frames passed to Encode()).
@@ -104,11 +104,14 @@ class MEDIA_EXPORT VideoEncodeAccelerator {
// |output_profile| is the codec profile of the encoded output stream.
// |initial_bitrate| is the initial bitrate of the encoded output stream,
// in bits per second.
+ // |client| is the client of this video encoder. The provided pointer must
+ // be valid until Destroy() is called.
// TODO(sheu): handle resolution changes. http://crbug.com/249944
- virtual void Initialize(media::VideoFrame::Format input_format,
+ virtual bool Initialize(VideoFrame::Format input_format,
const gfx::Size& input_visible_size,
VideoCodecProfile output_profile,
- uint32 initial_bitrate) = 0;
+ uint32 initial_bitrate,
+ Client* client) = 0;
// Encodes the given frame.
// Parameters:
@@ -138,8 +141,28 @@ class MEDIA_EXPORT VideoEncodeAccelerator {
// this method returns no more callbacks will be made on the client. Deletes
// |this| unconditionally, so make sure to drop all pointers to it!
virtual void Destroy() = 0;
+
+ protected:
+ // Do not delete directly; use Destroy() or own it with a scoped_ptr, which
+ // will Destroy() it properly by default.
+ virtual ~VideoEncodeAccelerator();
};
} // namespace media
+namespace base {
+
+template <class T>
+struct DefaultDeleter;
+
+// Specialize DefaultDeleter so that scoped_ptr<VideoEncodeAccelerator> always
+// uses "Destroy()" instead of trying to use the destructor.
+template <>
+struct MEDIA_EXPORT DefaultDeleter<media::VideoEncodeAccelerator> {
+ public:
+ void operator()(void* video_encode_accelerator) const;
+};
+
+} // namespace base
+
#endif // MEDIA_VIDEO_VIDEO_ENCODE_ACCELERATOR_H_
diff --git a/chromium/media/webm/tracks_builder.cc b/chromium/media/webm/tracks_builder.cc
deleted file mode 100644
index 3ad59530514..00000000000
--- a/chromium/media/webm/tracks_builder.cc
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/webm/tracks_builder.h"
-
-#include "media/webm/webm_constants.h"
-
-namespace media {
-
-// Returns size of an integer, formatted using Matroska serialization.
-static int GetUIntMkvSize(uint64 value) {
- if (value < 0x07FULL)
- return 1;
- if (value < 0x03FFFULL)
- return 2;
- if (value < 0x01FFFFFULL)
- return 3;
- if (value < 0x0FFFFFFFULL)
- return 4;
- if (value < 0x07FFFFFFFFULL)
- return 5;
- if (value < 0x03FFFFFFFFFFULL)
- return 6;
- if (value < 0x01FFFFFFFFFFFFULL)
- return 7;
- return 8;
-}
-
-// Returns the minimium size required to serialize an integer value.
-static int GetUIntSize(uint64 value) {
- if (value < 0x0100ULL)
- return 1;
- if (value < 0x010000ULL)
- return 2;
- if (value < 0x01000000ULL)
- return 3;
- if (value < 0x0100000000ULL)
- return 4;
- if (value < 0x010000000000ULL)
- return 5;
- if (value < 0x01000000000000ULL)
- return 6;
- if (value < 0x0100000000000000ULL)
- return 7;
- return 8;
-}
-
-static int MasterElementSize(int element_id, int payload_size) {
- return GetUIntSize(element_id) + GetUIntMkvSize(payload_size) + payload_size;
-}
-
-static int IntElementSize(int element_id, int value) {
- return GetUIntSize(element_id) + 1 + GetUIntSize(value);
-}
-
-static int StringElementSize(int element_id, const std::string& value) {
- return GetUIntSize(element_id) +
- GetUIntMkvSize(value.length()) +
- value.length();
-}
-
-static void SerializeInt(uint8** buf_ptr, int* buf_size_ptr,
- int64 value, int size) {
- uint8*& buf = *buf_ptr;
- int& buf_size = *buf_size_ptr;
-
- for (int idx = 1; idx <= size; ++idx) {
- *buf++ = static_cast<uint8>(value >> ((size - idx) * 8));
- --buf_size;
- }
-}
-
-static void WriteElementId(uint8** buf, int* buf_size, int element_id) {
- SerializeInt(buf, buf_size, element_id, GetUIntSize(element_id));
-}
-
-static void WriteUInt(uint8** buf, int* buf_size, uint64 value) {
- const int size = GetUIntMkvSize(value);
- value |= (1ULL << (size * 7)); // Matroska formatting
- SerializeInt(buf, buf_size, value, size);
-}
-
-static void WriteMasterElement(uint8** buf, int* buf_size,
- int element_id, int payload_size) {
- WriteElementId(buf, buf_size, element_id);
- WriteUInt(buf, buf_size, payload_size);
-}
-
-static void WriteIntElement(uint8** buf, int* buf_size,
- int element_id, int value) {
- WriteElementId(buf, buf_size, element_id);
-
- const int size = GetUIntSize(value);
- WriteUInt(buf, buf_size, size);
-
- SerializeInt(buf, buf_size, value, size);
-}
-
-static void WriteStringElement(uint8** buf_ptr, int* buf_size_ptr,
- int element_id, const std::string& value) {
- uint8*& buf = *buf_ptr;
- int& buf_size = *buf_size_ptr;
-
- WriteElementId(&buf, &buf_size, element_id);
-
- const uint64 size = value.length();
- WriteUInt(&buf, &buf_size, size);
-
- memcpy(buf, value.data(), size);
- buf += size;
- buf_size -= size;
-}
-
-TracksBuilder::TracksBuilder() {}
-TracksBuilder::~TracksBuilder() {}
-
-void TracksBuilder::AddTrack(
- int track_num,
- int track_type,
- int track_uid,
- const std::string& codec_id,
- const std::string& name,
- const std::string& language) {
- tracks_.push_back(Track(track_num, track_type, track_uid, codec_id, name,
- language));
-}
-
-std::vector<uint8> TracksBuilder::Finish() {
- // Allocate the storage
- std::vector<uint8> buffer;
- buffer.resize(GetTracksSize());
-
- // Populate the storage with a tracks header
- WriteTracks(&buffer[0], buffer.size());
-
- return buffer;
-}
-
-int TracksBuilder::GetTracksSize() const {
- return MasterElementSize(kWebMIdTracks, GetTracksPayloadSize());
-}
-
-int TracksBuilder::GetTracksPayloadSize() const {
- int payload_size = 0;
-
- for (TrackList::const_iterator itr = tracks_.begin();
- itr != tracks_.end(); ++itr) {
- payload_size += itr->GetSize();
- }
-
- return payload_size;
-}
-
-void TracksBuilder::WriteTracks(uint8* buf, int buf_size) const {
- WriteMasterElement(&buf, &buf_size, kWebMIdTracks, GetTracksPayloadSize());
-
- for (TrackList::const_iterator itr = tracks_.begin();
- itr != tracks_.end(); ++itr) {
- itr->Write(&buf, &buf_size);
- }
-}
-
-TracksBuilder::Track::Track(int track_num, int track_type, int track_uid,
- const std::string& codec_id,
- const std::string& name,
- const std::string& language)
- : track_num_(track_num),
- track_type_(track_type),
- track_uid_(track_uid),
- codec_id_(codec_id),
- name_(name),
- language_(language) {
-}
-
-int TracksBuilder::Track::GetSize() const {
- return MasterElementSize(kWebMIdTrackEntry, GetPayloadSize());
-}
-
-int TracksBuilder::Track::GetPayloadSize() const {
- int size = 0;
-
- size += IntElementSize(kWebMIdTrackNumber, track_num_);
- size += IntElementSize(kWebMIdTrackType, track_type_);
- size += IntElementSize(kWebMIdTrackUID, track_uid_);
-
- if (!codec_id_.empty())
- size += StringElementSize(kWebMIdCodecID, codec_id_);
-
- if (!name_.empty())
- size += StringElementSize(kWebMIdName, name_);
-
- if (!language_.empty())
- size += StringElementSize(kWebMIdLanguage, language_);
-
- return size;
-}
-
-void TracksBuilder::Track::Write(uint8** buf, int* buf_size) const {
- WriteMasterElement(buf, buf_size, kWebMIdTrackEntry, GetPayloadSize());
-
- WriteIntElement(buf, buf_size, kWebMIdTrackNumber, track_num_);
- WriteIntElement(buf, buf_size, kWebMIdTrackType, track_type_);
- WriteIntElement(buf, buf_size, kWebMIdTrackUID, track_uid_);
-
- if (!codec_id_.empty())
- WriteStringElement(buf, buf_size, kWebMIdCodecID, codec_id_);
-
- if (!name_.empty())
- WriteStringElement(buf, buf_size, kWebMIdName, name_);
-
- if (!language_.empty())
- WriteStringElement(buf, buf_size, kWebMIdLanguage, language_);
-}
-
-} // namespace media
diff --git a/chromium/media/webm/tracks_builder.h b/chromium/media/webm/tracks_builder.h
deleted file mode 100644
index fca9dfe1dce..00000000000
--- a/chromium/media/webm/tracks_builder.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_WEBM_TRACKS_BUILDER_H_
-#define MEDIA_WEBM_TRACKS_BUILDER_H_
-
-#include <list>
-#include <string>
-#include <vector>
-
-#include "base/basictypes.h"
-
-namespace media {
-
-class TracksBuilder {
- public:
- TracksBuilder();
- ~TracksBuilder();
-
- void AddTrack(int track_num, int track_type, int track_uid,
- const std::string& codec_id, const std::string& name,
- const std::string& language);
-
- std::vector<uint8> Finish();
-
- private:
- int GetTracksSize() const;
- int GetTracksPayloadSize() const;
- void WriteTracks(uint8* buffer, int buffer_size) const;
-
- class Track {
- public:
- Track(int track_num, int track_type, int track_uid,
- const std::string& codec_id, const std::string& name,
- const std::string& language);
-
- int GetSize() const;
- void Write(uint8** buf, int* buf_size) const;
- private:
- int GetPayloadSize() const;
-
- int track_num_;
- int track_type_;
- int track_uid_;
- std::string codec_id_;
- std::string name_;
- std::string language_;
- };
-
- typedef std::list<Track> TrackList;
- TrackList tracks_;
-
- DISALLOW_COPY_AND_ASSIGN(TracksBuilder);
-};
-
-} // namespace media
-
-#endif // MEDIA_WEBM_TRACKS_BUILDER_H_
diff --git a/chromium/media/webm/webm_cluster_parser.cc b/chromium/media/webm/webm_cluster_parser.cc
deleted file mode 100644
index df9e4ce244b..00000000000
--- a/chromium/media/webm/webm_cluster_parser.cc
+++ /dev/null
@@ -1,463 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/webm/webm_cluster_parser.h"
-
-#include <vector>
-
-#include "base/logging.h"
-#include "base/sys_byteorder.h"
-#include "media/base/buffers.h"
-#include "media/base/decrypt_config.h"
-#include "media/filters/webvtt_util.h"
-#include "media/webm/webm_constants.h"
-#include "media/webm/webm_crypto_helpers.h"
-#include "media/webm/webm_webvtt_parser.h"
-
-namespace media {
-
-WebMClusterParser::TextTrackIterator::TextTrackIterator(
- const TextTrackMap& text_track_map) :
- iterator_(text_track_map.begin()),
- iterator_end_(text_track_map.end()) {
-}
-
-WebMClusterParser::TextTrackIterator::TextTrackIterator(
- const TextTrackIterator& rhs) :
- iterator_(rhs.iterator_),
- iterator_end_(rhs.iterator_end_) {
-}
-
-WebMClusterParser::TextTrackIterator::~TextTrackIterator() {
-}
-
-bool WebMClusterParser::TextTrackIterator::operator()(
- int* track_num,
- const BufferQueue** buffers) {
- if (iterator_ == iterator_end_) {
- *track_num = 0;
- *buffers = NULL;
-
- return false;
- }
-
- *track_num = iterator_->first;
- *buffers = &iterator_->second.buffers();
-
- ++iterator_;
- return true;
-}
-
-WebMClusterParser::WebMClusterParser(
- int64 timecode_scale, int audio_track_num, int video_track_num,
- const WebMTracksParser::TextTracks& text_tracks,
- const std::set<int64>& ignored_tracks,
- const std::string& audio_encryption_key_id,
- const std::string& video_encryption_key_id,
- const LogCB& log_cb)
- : timecode_multiplier_(timecode_scale / 1000.0),
- ignored_tracks_(ignored_tracks),
- audio_encryption_key_id_(audio_encryption_key_id),
- video_encryption_key_id_(video_encryption_key_id),
- parser_(kWebMIdCluster, this),
- last_block_timecode_(-1),
- block_data_size_(-1),
- block_duration_(-1),
- block_add_id_(-1),
- block_additional_data_size_(-1),
- discard_padding_(-1),
- cluster_timecode_(-1),
- cluster_start_time_(kNoTimestamp()),
- cluster_ended_(false),
- audio_(audio_track_num, false),
- video_(video_track_num, true),
- log_cb_(log_cb) {
- for (WebMTracksParser::TextTracks::const_iterator it = text_tracks.begin();
- it != text_tracks.end();
- ++it) {
- text_track_map_.insert(std::make_pair(it->first, Track(it->first, false)));
- }
-}
-
-WebMClusterParser::~WebMClusterParser() {}
-
-void WebMClusterParser::Reset() {
- last_block_timecode_ = -1;
- cluster_timecode_ = -1;
- cluster_start_time_ = kNoTimestamp();
- cluster_ended_ = false;
- parser_.Reset();
- audio_.Reset();
- video_.Reset();
- ResetTextTracks();
-}
-
-int WebMClusterParser::Parse(const uint8* buf, int size) {
- audio_.Reset();
- video_.Reset();
- ResetTextTracks();
-
- int result = parser_.Parse(buf, size);
-
- if (result < 0) {
- cluster_ended_ = false;
- return result;
- }
-
- cluster_ended_ = parser_.IsParsingComplete();
- if (cluster_ended_) {
- // If there were no buffers in this cluster, set the cluster start time to
- // be the |cluster_timecode_|.
- if (cluster_start_time_ == kNoTimestamp()) {
- DCHECK_GT(cluster_timecode_, -1);
- cluster_start_time_ = base::TimeDelta::FromMicroseconds(
- cluster_timecode_ * timecode_multiplier_);
- }
-
- // Reset the parser if we're done parsing so that
- // it is ready to accept another cluster on the next
- // call.
- parser_.Reset();
-
- last_block_timecode_ = -1;
- cluster_timecode_ = -1;
- }
-
- return result;
-}
-
-WebMClusterParser::TextTrackIterator
-WebMClusterParser::CreateTextTrackIterator() const {
- return TextTrackIterator(text_track_map_);
-}
-
-WebMParserClient* WebMClusterParser::OnListStart(int id) {
- if (id == kWebMIdCluster) {
- cluster_timecode_ = -1;
- cluster_start_time_ = kNoTimestamp();
- } else if (id == kWebMIdBlockGroup) {
- block_data_.reset();
- block_data_size_ = -1;
- block_duration_ = -1;
- discard_padding_ = -1;
- discard_padding_set_ = false;
- } else if (id == kWebMIdBlockAdditions) {
- block_add_id_ = -1;
- block_additional_data_.reset();
- block_additional_data_size_ = -1;
- }
-
- return this;
-}
-
-bool WebMClusterParser::OnListEnd(int id) {
- if (id != kWebMIdBlockGroup)
- return true;
-
- // Make sure the BlockGroup actually had a Block.
- if (block_data_size_ == -1) {
- MEDIA_LOG(log_cb_) << "Block missing from BlockGroup.";
- return false;
- }
-
- bool result = ParseBlock(false, block_data_.get(), block_data_size_,
- block_additional_data_.get(),
- block_additional_data_size_, block_duration_,
- discard_padding_set_ ? discard_padding_ : 0);
- block_data_.reset();
- block_data_size_ = -1;
- block_duration_ = -1;
- block_add_id_ = -1;
- block_additional_data_.reset();
- block_additional_data_size_ = -1;
- discard_padding_ = -1;
- discard_padding_set_ = false;
- return result;
-}
-
-bool WebMClusterParser::OnUInt(int id, int64 val) {
- int64* dst;
- switch (id) {
- case kWebMIdTimecode:
- dst = &cluster_timecode_;
- break;
- case kWebMIdBlockDuration:
- dst = &block_duration_;
- break;
- case kWebMIdBlockAddID:
- dst = &block_add_id_;
- break;
- case kWebMIdDiscardPadding:
- if (discard_padding_set_)
- return false;
- discard_padding_set_ = true;
- discard_padding_ = val;
- return true;
- default:
- return true;
- }
- if (*dst != -1)
- return false;
- *dst = val;
- return true;
-}
-
-bool WebMClusterParser::ParseBlock(bool is_simple_block, const uint8* buf,
- int size, const uint8* additional,
- int additional_size, int duration,
- int64 discard_padding) {
- if (size < 4)
- return false;
-
- // Return an error if the trackNum > 127. We just aren't
- // going to support large track numbers right now.
- if (!(buf[0] & 0x80)) {
- MEDIA_LOG(log_cb_) << "TrackNumber over 127 not supported";
- return false;
- }
-
- int track_num = buf[0] & 0x7f;
- int timecode = buf[1] << 8 | buf[2];
- int flags = buf[3] & 0xff;
- int lacing = (flags >> 1) & 0x3;
-
- if (lacing) {
- MEDIA_LOG(log_cb_) << "Lacing " << lacing << " is not supported yet.";
- return false;
- }
-
- // Sign extend negative timecode offsets.
- if (timecode & 0x8000)
- timecode |= ~0xffff;
-
- const uint8* frame_data = buf + 4;
- int frame_size = size - (frame_data - buf);
- return OnBlock(is_simple_block, track_num, timecode, duration, flags,
- frame_data, frame_size, additional, additional_size,
- discard_padding);
-}
-
-bool WebMClusterParser::OnBinary(int id, const uint8* data, int size) {
- switch (id) {
- case kWebMIdSimpleBlock:
- return ParseBlock(true, data, size, NULL, -1, -1, 0);
-
- case kWebMIdBlock:
- if (block_data_) {
- MEDIA_LOG(log_cb_) << "More than 1 Block in a BlockGroup is not "
- "supported.";
- return false;
- }
- block_data_.reset(new uint8[size]);
- memcpy(block_data_.get(), data, size);
- block_data_size_ = size;
- return true;
-
- case kWebMIdBlockAdditional: {
- uint64 block_add_id = base::HostToNet64(block_add_id_);
- if (block_additional_data_) {
- // TODO(vigneshv): Technically, more than 1 BlockAdditional is allowed
- // as per matroska spec. But for now we don't have a use case to
- // support parsing of such files. Take a look at this again when such a
- // case arises.
- MEDIA_LOG(log_cb_) << "More than 1 BlockAdditional in a BlockGroup is "
- "not supported.";
- return false;
- }
- // First 8 bytes of side_data in DecoderBuffer is the BlockAddID
- // element's value in Big Endian format. This is done to mimic ffmpeg
- // demuxer's behavior.
- block_additional_data_size_ = size + sizeof(block_add_id);
- block_additional_data_.reset(new uint8[block_additional_data_size_]);
- memcpy(block_additional_data_.get(), &block_add_id,
- sizeof(block_add_id));
- memcpy(block_additional_data_.get() + 8, data, size);
- return true;
- }
-
- default:
- return true;
- }
-}
-
-bool WebMClusterParser::OnBlock(bool is_simple_block, int track_num,
- int timecode,
- int block_duration,
- int flags,
- const uint8* data, int size,
- const uint8* additional, int additional_size,
- int64 discard_padding) {
- DCHECK_GE(size, 0);
- if (cluster_timecode_ == -1) {
- MEDIA_LOG(log_cb_) << "Got a block before cluster timecode.";
- return false;
- }
-
- // TODO(acolwell): Should relative negative timecode offsets be rejected? Or
- // only when the absolute timecode is negative? See http://crbug.com/271794
- if (timecode < 0) {
- MEDIA_LOG(log_cb_) << "Got a block with negative timecode offset "
- << timecode;
- return false;
- }
-
- if (last_block_timecode_ != -1 && timecode < last_block_timecode_) {
- MEDIA_LOG(log_cb_)
- << "Got a block with a timecode before the previous block.";
- return false;
- }
-
- Track* track = NULL;
- bool is_text = false;
- std::string encryption_key_id;
- if (track_num == audio_.track_num()) {
- track = &audio_;
- encryption_key_id = audio_encryption_key_id_;
- } else if (track_num == video_.track_num()) {
- track = &video_;
- encryption_key_id = video_encryption_key_id_;
- } else if (ignored_tracks_.find(track_num) != ignored_tracks_.end()) {
- return true;
- } else if (Track* const text_track = FindTextTrack(track_num)) {
- if (is_simple_block) // BlockGroup is required for WebVTT cues
- return false;
- if (block_duration < 0) // not specified
- return false;
- track = text_track;
- is_text = true;
- } else {
- MEDIA_LOG(log_cb_) << "Unexpected track number " << track_num;
- return false;
- }
-
- last_block_timecode_ = timecode;
-
- base::TimeDelta timestamp = base::TimeDelta::FromMicroseconds(
- (cluster_timecode_ + timecode) * timecode_multiplier_);
-
- // The first bit of the flags is set when a SimpleBlock contains only
- // keyframes. If this is a Block, then inspection of the payload is
- // necessary to determine whether it contains a keyframe or not.
- // http://www.matroska.org/technical/specs/index.html
- bool is_keyframe =
- is_simple_block ? (flags & 0x80) != 0 : track->IsKeyframe(data, size);
-
- scoped_refptr<StreamParserBuffer> buffer;
- if (!is_text) {
- buffer = StreamParserBuffer::CopyFrom(data, size,
- additional, additional_size,
- is_keyframe);
- } else {
- std::string id, settings, content;
- WebMWebVTTParser::Parse(data, size,
- &id, &settings, &content);
-
- std::vector<uint8> side_data;
- MakeSideData(id.begin(), id.end(),
- settings.begin(), settings.end(),
- &side_data);
-
- buffer = StreamParserBuffer::CopyFrom(
- reinterpret_cast<const uint8*>(content.data()),
- content.length(),
- &side_data[0],
- side_data.size(),
- is_keyframe);
- }
-
- // Every encrypted Block has a signal byte and IV prepended to it. Current
- // encrypted WebM request for comments specification is here
- // http://wiki.webmproject.org/encryption/webm-encryption-rfc
- if (!encryption_key_id.empty()) {
- scoped_ptr<DecryptConfig> config(WebMCreateDecryptConfig(
- data, size,
- reinterpret_cast<const uint8*>(encryption_key_id.data()),
- encryption_key_id.size()));
- if (!config)
- return false;
- buffer->set_decrypt_config(config.Pass());
- }
-
- buffer->set_timestamp(timestamp);
- if (cluster_start_time_ == kNoTimestamp())
- cluster_start_time_ = timestamp;
-
- if (block_duration >= 0) {
- buffer->set_duration(base::TimeDelta::FromMicroseconds(
- block_duration * timecode_multiplier_));
- }
-
- if (discard_padding != 0) {
- buffer->set_discard_padding(base::TimeDelta::FromMicroseconds(
- discard_padding / 1000));
- }
-
- return track->AddBuffer(buffer);
-}
-
-WebMClusterParser::Track::Track(int track_num, bool is_video)
- : track_num_(track_num),
- is_video_(is_video) {
-}
-
-WebMClusterParser::Track::~Track() {}
-
-bool WebMClusterParser::Track::AddBuffer(
- const scoped_refptr<StreamParserBuffer>& buffer) {
- DVLOG(2) << "AddBuffer() : " << track_num_
- << " ts " << buffer->timestamp().InSecondsF()
- << " dur " << buffer->duration().InSecondsF()
- << " kf " << buffer->IsKeyframe()
- << " size " << buffer->data_size();
-
- buffers_.push_back(buffer);
- return true;
-}
-
-void WebMClusterParser::Track::Reset() {
- buffers_.clear();
-}
-
-bool WebMClusterParser::Track::IsKeyframe(const uint8* data, int size) const {
- // For now, assume that all blocks are keyframes for datatypes other than
- // video. This is a valid assumption for Vorbis, WebVTT, & Opus.
- if (!is_video_)
- return true;
-
- // Make sure the block is big enough for the minimal keyframe header size.
- if (size < 7)
- return false;
-
- // The LSb of the first byte must be a 0 for a keyframe.
- // http://tools.ietf.org/html/rfc6386 Section 19.1
- if ((data[0] & 0x01) != 0)
- return false;
-
- // Verify VP8 keyframe startcode.
- // http://tools.ietf.org/html/rfc6386 Section 19.1
- if (data[3] != 0x9d || data[4] != 0x01 || data[5] != 0x2a)
- return false;
-
- return true;
-}
-
-void WebMClusterParser::ResetTextTracks() {
- for (TextTrackMap::iterator it = text_track_map_.begin();
- it != text_track_map_.end();
- ++it) {
- it->second.Reset();
- }
-}
-
-WebMClusterParser::Track*
-WebMClusterParser::FindTextTrack(int track_num) {
- const TextTrackMap::iterator it = text_track_map_.find(track_num);
-
- if (it == text_track_map_.end())
- return NULL;
-
- return &it->second;
-}
-
-} // namespace media
diff --git a/chromium/media/webm/webm_cluster_parser.h b/chromium/media/webm/webm_cluster_parser.h
deleted file mode 100644
index 5aa957cdee6..00000000000
--- a/chromium/media/webm/webm_cluster_parser.h
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_WEBM_WEBM_CLUSTER_PARSER_H_
-#define MEDIA_WEBM_WEBM_CLUSTER_PARSER_H_
-
-#include <deque>
-#include <map>
-#include <set>
-#include <string>
-
-#include "base/memory/scoped_ptr.h"
-#include "media/base/media_export.h"
-#include "media/base/media_log.h"
-#include "media/base/stream_parser_buffer.h"
-#include "media/webm/webm_parser.h"
-#include "media/webm/webm_tracks_parser.h"
-
-namespace media {
-
-class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
- private:
- // Helper class that manages per-track state.
- class Track {
- public:
- Track(int track_num, bool is_video);
- ~Track();
-
- int track_num() const { return track_num_; }
- const std::deque<scoped_refptr<StreamParserBuffer> >& buffers() const {
- return buffers_;
- }
-
- bool AddBuffer(const scoped_refptr<StreamParserBuffer>& buffer);
-
- // Clears all buffer state.
- void Reset();
-
- // Helper function used to inspect block data to determine if the
- // block is a keyframe.
- // |data| contains the bytes in the block.
- // |size| indicates the number of bytes in |data|.
- bool IsKeyframe(const uint8* data, int size) const;
-
- private:
- int track_num_;
- std::deque<scoped_refptr<StreamParserBuffer> > buffers_;
- bool is_video_;
- };
-
- typedef std::map<int, Track> TextTrackMap;
-
- public:
- typedef std::deque<scoped_refptr<StreamParserBuffer> > BufferQueue;
-
- class MEDIA_EXPORT TextTrackIterator {
- public:
- explicit TextTrackIterator(const TextTrackMap& text_track_map);
- TextTrackIterator(const TextTrackIterator& rhs);
- ~TextTrackIterator();
-
- // To visit each text track. If the iterator is exhausted, it returns
- // as parameters the values 0 and NULL, and the function returns false.
- // Otherwise, it returns the buffers for the associated track, and the
- // function returns true.
- bool operator()(int* track_num, const BufferQueue** buffers);
- private:
- TextTrackIterator& operator=(const TextTrackIterator&);
-
- TextTrackMap::const_iterator iterator_;
- const TextTrackMap::const_iterator iterator_end_;
- };
-
- WebMClusterParser(int64 timecode_scale,
- int audio_track_num,
- int video_track_num,
- const WebMTracksParser::TextTracks& text_tracks,
- const std::set<int64>& ignored_tracks,
- const std::string& audio_encryption_key_id,
- const std::string& video_encryption_key_id,
- const LogCB& log_cb);
- virtual ~WebMClusterParser();
-
- // Resets the parser state so it can accept a new cluster.
- void Reset();
-
- // Parses a WebM cluster element in |buf|.
- //
- // Returns -1 if the parse fails.
- // Returns 0 if more data is needed.
- // Returns the number of bytes parsed on success.
- int Parse(const uint8* buf, int size);
-
- base::TimeDelta cluster_start_time() const { return cluster_start_time_; }
- const BufferQueue& audio_buffers() const { return audio_.buffers(); }
- const BufferQueue& video_buffers() const { return video_.buffers(); }
-
- // Returns an iterator object, allowing each text track to be visited.
- TextTrackIterator CreateTextTrackIterator() const;
-
- // Returns true if the last Parse() call stopped at the end of a cluster.
- bool cluster_ended() const { return cluster_ended_; }
-
- private:
- // WebMParserClient methods.
- virtual WebMParserClient* OnListStart(int id) OVERRIDE;
- virtual bool OnListEnd(int id) OVERRIDE;
- virtual bool OnUInt(int id, int64 val) OVERRIDE;
- virtual bool OnBinary(int id, const uint8* data, int size) OVERRIDE;
-
- bool ParseBlock(bool is_simple_block, const uint8* buf, int size,
- const uint8* additional, int additional_size, int duration,
- int64 discard_padding);
- bool OnBlock(bool is_simple_block, int track_num, int timecode, int duration,
- int flags, const uint8* data, int size,
- const uint8* additional, int additional_size,
- int64 discard_padding);
-
- // Resets the Track objects associated with each text track.
- void ResetTextTracks();
-
- // Search for the indicated track_num among the text tracks. Returns NULL
- // if that track num is not a text track.
- Track* FindTextTrack(int track_num);
-
- double timecode_multiplier_; // Multiplier used to convert timecodes into
- // microseconds.
- std::set<int64> ignored_tracks_;
- std::string audio_encryption_key_id_;
- std::string video_encryption_key_id_;
-
- WebMListParser parser_;
-
- int64 last_block_timecode_;
- scoped_ptr<uint8[]> block_data_;
- int block_data_size_;
- int64 block_duration_;
- int64 block_add_id_;
- scoped_ptr<uint8[]> block_additional_data_;
- int block_additional_data_size_;
- int64 discard_padding_;
- bool discard_padding_set_;
-
- int64 cluster_timecode_;
- base::TimeDelta cluster_start_time_;
- bool cluster_ended_;
-
- Track audio_;
- Track video_;
- TextTrackMap text_track_map_;
- LogCB log_cb_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(WebMClusterParser);
-};
-
-} // namespace media
-
-#endif // MEDIA_WEBM_WEBM_CLUSTER_PARSER_H_
diff --git a/chromium/media/webm/webm_cluster_parser_unittest.cc b/chromium/media/webm/webm_cluster_parser_unittest.cc
deleted file mode 100644
index 691325d7403..00000000000
--- a/chromium/media/webm/webm_cluster_parser_unittest.cc
+++ /dev/null
@@ -1,530 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <algorithm>
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "media/base/decrypt_config.h"
-#include "media/webm/cluster_builder.h"
-#include "media/webm/webm_cluster_parser.h"
-#include "media/webm/webm_constants.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using ::testing::InSequence;
-using ::testing::Return;
-using ::testing::_;
-
-namespace media {
-
-enum {
- kTimecodeScale = 1000000, // Timecode scale for millisecond timestamps.
- kAudioTrackNum = 1,
- kVideoTrackNum = 2,
- kTextTrackNum = 3,
-};
-
-struct BlockInfo {
- int track_num;
- int timestamp;
- int duration;
- bool use_simple_block;
-};
-
-static const BlockInfo kDefaultBlockInfo[] = {
- { kAudioTrackNum, 0, 23, true },
- { kAudioTrackNum, 23, 23, true },
- { kVideoTrackNum, 33, 34, true },
- { kAudioTrackNum, 46, 23, true },
- { kVideoTrackNum, 67, 33, false },
- { kAudioTrackNum, 69, 23, false },
- { kVideoTrackNum, 100, 33, false },
-};
-
-static const uint8 kEncryptedFrame[] = {
- 0x01, // Block is encrypted
- 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08 // IV
-};
-
-static scoped_ptr<Cluster> CreateCluster(int timecode,
- const BlockInfo* block_info,
- int block_count) {
- ClusterBuilder cb;
- cb.SetClusterTimecode(0);
-
- for (int i = 0; i < block_count; i++) {
- uint8 data[] = { 0x00 };
- if (block_info[i].use_simple_block) {
- cb.AddSimpleBlock(block_info[i].track_num,
- block_info[i].timestamp,
- 0, data, sizeof(data));
- continue;
- }
-
- CHECK_GE(block_info[i].duration, 0);
- cb.AddBlockGroup(block_info[i].track_num,
- block_info[i].timestamp,
- block_info[i].duration,
- 0, data, sizeof(data));
- }
-
- return cb.Finish();
-}
-
-// Creates a Cluster with one encrypted Block. |bytes_to_write| is number of
-// bytes of the encrypted frame to write.
-static scoped_ptr<Cluster> CreateEncryptedCluster(int bytes_to_write) {
- CHECK_GT(bytes_to_write, 0);
- CHECK_LE(bytes_to_write, static_cast<int>(sizeof(kEncryptedFrame)));
-
- ClusterBuilder cb;
- cb.SetClusterTimecode(0);
- cb.AddSimpleBlock(kVideoTrackNum, 0, 0, kEncryptedFrame, bytes_to_write);
- return cb.Finish();
-}
-
-static bool VerifyBuffers(const WebMClusterParser::BufferQueue& audio_buffers,
- const WebMClusterParser::BufferQueue& video_buffers,
- const WebMClusterParser::BufferQueue& text_buffers,
- const BlockInfo* block_info,
- int block_count) {
- size_t audio_offset = 0;
- size_t video_offset = 0;
- size_t text_offset = 0;
- for (int i = 0; i < block_count; i++) {
- const WebMClusterParser::BufferQueue* buffers = NULL;
- size_t* offset;
-
- if (block_info[i].track_num == kAudioTrackNum) {
- buffers = &audio_buffers;
- offset = &audio_offset;
- } else if (block_info[i].track_num == kVideoTrackNum) {
- buffers = &video_buffers;
- offset = &video_offset;
- } else if (block_info[i].track_num == kTextTrackNum) {
- buffers = &text_buffers;
- offset = &text_offset;
- } else {
- LOG(ERROR) << "Unexpected track number " << block_info[i].track_num;
- return false;
- }
-
- if (*offset >= buffers->size())
- return false;
-
- scoped_refptr<StreamParserBuffer> buffer = (*buffers)[(*offset)++];
-
-
- EXPECT_EQ(buffer->timestamp().InMilliseconds(), block_info[i].timestamp);
-
- if (!block_info[i].use_simple_block)
- EXPECT_NE(buffer->duration(), kNoTimestamp());
-
- if (buffer->duration() != kNoTimestamp())
- EXPECT_EQ(buffer->duration().InMilliseconds(), block_info[i].duration);
- }
-
- return true;
-}
-
-static bool VerifyBuffers(const scoped_ptr<WebMClusterParser>& parser,
- const BlockInfo* block_info,
- int block_count) {
- typedef WebMClusterParser::TextTrackIterator TextTrackIterator;
- TextTrackIterator text_it = parser->CreateTextTrackIterator();
-
- int text_track_num;
- const WebMClusterParser::BufferQueue* text_buffers;
-
- while (text_it(&text_track_num, &text_buffers))
- break;
-
- const WebMClusterParser::BufferQueue no_text_buffers;
-
- if (text_buffers == NULL)
- text_buffers = &no_text_buffers;
-
- return VerifyBuffers(parser->audio_buffers(),
- parser->video_buffers(),
- *text_buffers,
- block_info,
- block_count);
-}
-
-static bool VerifyTextBuffers(
- const scoped_ptr<WebMClusterParser>& parser,
- const BlockInfo* block_info_ptr,
- int block_count,
- int text_track_num,
- const WebMClusterParser::BufferQueue& text_buffers) {
- const BlockInfo* const block_info_end = block_info_ptr + block_count;
-
- typedef WebMClusterParser::BufferQueue::const_iterator TextBufferIter;
- TextBufferIter buffer_iter = text_buffers.begin();
- const TextBufferIter buffer_end = text_buffers.end();
-
- while (block_info_ptr != block_info_end) {
- const BlockInfo& block_info = *block_info_ptr++;
-
- if (block_info.track_num != text_track_num)
- continue;
-
- EXPECT_FALSE(block_info.use_simple_block);
- EXPECT_FALSE(buffer_iter == buffer_end);
-
- const scoped_refptr<StreamParserBuffer> buffer = *buffer_iter++;
- EXPECT_EQ(buffer->timestamp().InMilliseconds(), block_info.timestamp);
- EXPECT_EQ(buffer->duration().InMilliseconds(), block_info.duration);
- }
-
- EXPECT_TRUE(buffer_iter == buffer_end);
- return true;
-}
-
-static bool VerifyEncryptedBuffer(
- scoped_refptr<StreamParserBuffer> buffer) {
- EXPECT_TRUE(buffer->decrypt_config());
- EXPECT_EQ(static_cast<unsigned long>(DecryptConfig::kDecryptionKeySize),
- buffer->decrypt_config()->iv().length());
- const uint8* data = buffer->data();
- return data[0] & kWebMFlagEncryptedFrame;
-}
-
-static void AppendToEnd(const WebMClusterParser::BufferQueue& src,
- WebMClusterParser::BufferQueue* dest) {
- for (WebMClusterParser::BufferQueue::const_iterator itr = src.begin();
- itr != src.end(); ++itr) {
- dest->push_back(*itr);
- }
-}
-
-class WebMClusterParserTest : public testing::Test {
- public:
- WebMClusterParserTest()
- : parser_(new WebMClusterParser(kTimecodeScale,
- kAudioTrackNum,
- kVideoTrackNum,
- WebMTracksParser::TextTracks(),
- std::set<int64>(),
- std::string(),
- std::string(),
- LogCB())) {}
-
- protected:
- scoped_ptr<WebMClusterParser> parser_;
-};
-
-TEST_F(WebMClusterParserTest, Reset) {
- InSequence s;
-
- int block_count = arraysize(kDefaultBlockInfo);
- scoped_ptr<Cluster> cluster(CreateCluster(0, kDefaultBlockInfo, block_count));
-
- // Send slightly less than the full cluster so all but the last block is
- // parsed.
- int result = parser_->Parse(cluster->data(), cluster->size() - 1);
- EXPECT_GT(result, 0);
- EXPECT_LT(result, cluster->size());
-
- ASSERT_TRUE(VerifyBuffers(parser_, kDefaultBlockInfo, block_count - 1));
- parser_->Reset();
-
- // Now parse a whole cluster to verify that all the blocks will get parsed.
- result = parser_->Parse(cluster->data(), cluster->size());
- EXPECT_EQ(result, cluster->size());
- ASSERT_TRUE(VerifyBuffers(parser_, kDefaultBlockInfo, block_count));
-}
-
-TEST_F(WebMClusterParserTest, ParseClusterWithSingleCall) {
- int block_count = arraysize(kDefaultBlockInfo);
- scoped_ptr<Cluster> cluster(CreateCluster(0, kDefaultBlockInfo, block_count));
-
- int result = parser_->Parse(cluster->data(), cluster->size());
- EXPECT_EQ(cluster->size(), result);
- ASSERT_TRUE(VerifyBuffers(parser_, kDefaultBlockInfo, block_count));
-}
-
-TEST_F(WebMClusterParserTest, ParseClusterWithMultipleCalls) {
- int block_count = arraysize(kDefaultBlockInfo);
- scoped_ptr<Cluster> cluster(CreateCluster(0, kDefaultBlockInfo, block_count));
-
- WebMClusterParser::BufferQueue audio_buffers;
- WebMClusterParser::BufferQueue video_buffers;
- const WebMClusterParser::BufferQueue no_text_buffers;
-
- const uint8* data = cluster->data();
- int size = cluster->size();
- int default_parse_size = 3;
- int parse_size = std::min(default_parse_size, size);
-
- while (size > 0) {
- int result = parser_->Parse(data, parse_size);
- ASSERT_GE(result, 0);
- ASSERT_LE(result, parse_size);
-
- if (result == 0) {
- // The parser needs more data so increase the parse_size a little.
- parse_size += default_parse_size;
- parse_size = std::min(parse_size, size);
- continue;
- }
-
- AppendToEnd(parser_->audio_buffers(), &audio_buffers);
- AppendToEnd(parser_->video_buffers(), &video_buffers);
-
- parse_size = default_parse_size;
-
- data += result;
- size -= result;
- }
- ASSERT_TRUE(VerifyBuffers(audio_buffers, video_buffers,
- no_text_buffers, kDefaultBlockInfo,
- block_count));
-}
-
-// Verify that both BlockGroups with the BlockDuration before the Block
-// and BlockGroups with the BlockDuration after the Block are supported
-// correctly.
-// Note: Raw bytes are use here because ClusterBuilder only generates
-// one of these scenarios.
-TEST_F(WebMClusterParserTest, ParseBlockGroup) {
- const BlockInfo kBlockInfo[] = {
- { kAudioTrackNum, 0, 23, false },
- { kVideoTrackNum, 33, 34, false },
- };
- int block_count = arraysize(kBlockInfo);
-
- const uint8 kClusterData[] = {
- 0x1F, 0x43, 0xB6, 0x75, 0x9B, // Cluster(size=27)
- 0xE7, 0x81, 0x00, // Timecode(size=1, value=0)
- // BlockGroup with BlockDuration before Block.
- 0xA0, 0x8A, // BlockGroup(size=10)
- 0x9B, 0x81, 0x17, // BlockDuration(size=1, value=23)
- 0xA1, 0x85, 0x81, 0x00, 0x00, 0x00, 0xaa, // Block(size=5, track=1, ts=0)
- // BlockGroup with BlockDuration after Block.
- 0xA0, 0x8A, // BlockGroup(size=10)
- 0xA1, 0x85, 0x82, 0x00, 0x21, 0x00, 0x55, // Block(size=5, track=2, ts=33)
- 0x9B, 0x81, 0x22, // BlockDuration(size=1, value=34)
- };
- const int kClusterSize = sizeof(kClusterData);
-
- int result = parser_->Parse(kClusterData, kClusterSize);
- EXPECT_EQ(result, kClusterSize);
- ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo, block_count));
-}
-
-TEST_F(WebMClusterParserTest, ParseSimpleBlockAndBlockGroupMixture) {
- const BlockInfo kBlockInfo[] = {
- { kAudioTrackNum, 0, 23, true },
- { kAudioTrackNum, 23, 23, false },
- { kVideoTrackNum, 33, 34, true },
- { kAudioTrackNum, 46, 23, false },
- { kVideoTrackNum, 67, 33, false },
- };
- int block_count = arraysize(kBlockInfo);
- scoped_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
-
- int result = parser_->Parse(cluster->data(), cluster->size());
- EXPECT_EQ(cluster->size(), result);
- ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo, block_count));
-}
-
-TEST_F(WebMClusterParserTest, IgnoredTracks) {
- std::set<int64> ignored_tracks;
- ignored_tracks.insert(kTextTrackNum);
-
- parser_.reset(new WebMClusterParser(kTimecodeScale,
- kAudioTrackNum,
- kVideoTrackNum,
- WebMTracksParser::TextTracks(),
- ignored_tracks,
- std::string(),
- std::string(),
- LogCB()));
-
- const BlockInfo kInputBlockInfo[] = {
- { kAudioTrackNum, 0, 23, true },
- { kAudioTrackNum, 23, 23, true },
- { kVideoTrackNum, 33, 33, true },
- { kTextTrackNum, 33, 99, true },
- { kAudioTrackNum, 46, 23, true },
- { kVideoTrackNum, 67, 33, true },
- };
- int input_block_count = arraysize(kInputBlockInfo);
-
- const BlockInfo kOutputBlockInfo[] = {
- { kAudioTrackNum, 0, 23, true },
- { kAudioTrackNum, 23, 23, true },
- { kVideoTrackNum, 33, 33, true },
- { kAudioTrackNum, 46, 23, true },
- { kVideoTrackNum, 67, 33, true },
- };
- int output_block_count = arraysize(kOutputBlockInfo);
-
- scoped_ptr<Cluster> cluster(
- CreateCluster(0, kInputBlockInfo, input_block_count));
-
- int result = parser_->Parse(cluster->data(), cluster->size());
- EXPECT_EQ(cluster->size(), result);
- ASSERT_TRUE(VerifyBuffers(parser_, kOutputBlockInfo, output_block_count));
-}
-
-TEST_F(WebMClusterParserTest, ParseTextTracks) {
- typedef WebMTracksParser::TextTracks TextTracks;
- TextTracks text_tracks;
-
- text_tracks.insert(std::make_pair(TextTracks::key_type(kTextTrackNum),
- TextTrackConfig(kTextSubtitles, "", "",
- "")));
-
- parser_.reset(new WebMClusterParser(kTimecodeScale,
- kAudioTrackNum,
- kVideoTrackNum,
- text_tracks,
- std::set<int64>(),
- std::string(),
- std::string(),
- LogCB()));
-
- const BlockInfo kInputBlockInfo[] = {
- { kAudioTrackNum, 0, 23, true },
- { kAudioTrackNum, 23, 23, true },
- { kVideoTrackNum, 33, 33, true },
- { kTextTrackNum, 33, 42, false },
- { kAudioTrackNum, 46, 23, true },
- { kTextTrackNum, 55, 44, false },
- { kVideoTrackNum, 67, 33, true },
- };
- int input_block_count = arraysize(kInputBlockInfo);
-
- scoped_ptr<Cluster> cluster(
- CreateCluster(0, kInputBlockInfo, input_block_count));
-
- int result = parser_->Parse(cluster->data(), cluster->size());
- EXPECT_EQ(cluster->size(), result);
- ASSERT_TRUE(VerifyBuffers(parser_, kInputBlockInfo, input_block_count));
-}
-
-TEST_F(WebMClusterParserTest, TextTracksSimpleBlock) {
- typedef WebMTracksParser::TextTracks TextTracks;
- WebMTracksParser::TextTracks text_tracks;
-
- text_tracks.insert(std::make_pair(TextTracks::key_type(kTextTrackNum),
- TextTrackConfig(kTextSubtitles, "", "",
- "")));
-
- parser_.reset(new WebMClusterParser(kTimecodeScale,
- kAudioTrackNum,
- kVideoTrackNum,
- text_tracks,
- std::set<int64>(),
- std::string(),
- std::string(),
- LogCB()));
-
- const BlockInfo kInputBlockInfo[] = {
- { kTextTrackNum, 33, 42, true },
- };
- int input_block_count = arraysize(kInputBlockInfo);
-
- scoped_ptr<Cluster> cluster(
- CreateCluster(0, kInputBlockInfo, input_block_count));
-
- int result = parser_->Parse(cluster->data(), cluster->size());
- EXPECT_LT(result, 0);
-}
-
-TEST_F(WebMClusterParserTest, ParseMultipleTextTracks) {
- typedef WebMTracksParser::TextTracks TextTracks;
- TextTracks text_tracks;
-
- const int kSubtitleTextTrackNum = kTextTrackNum;
- const int kCaptionTextTrackNum = kTextTrackNum + 1;
-
- text_tracks.insert(std::make_pair(TextTracks::key_type(kSubtitleTextTrackNum),
- TextTrackConfig(kTextSubtitles, "", "",
- "")));
-
- text_tracks.insert(std::make_pair(TextTracks::key_type(kCaptionTextTrackNum),
- TextTrackConfig(kTextCaptions, "", "",
- "")));
-
- parser_.reset(new WebMClusterParser(kTimecodeScale,
- kAudioTrackNum,
- kVideoTrackNum,
- text_tracks,
- std::set<int64>(),
- std::string(),
- std::string(),
- LogCB()));
-
- const BlockInfo kInputBlockInfo[] = {
- { kAudioTrackNum, 0, 23, true },
- { kAudioTrackNum, 23, 23, true },
- { kVideoTrackNum, 33, 33, true },
- { kSubtitleTextTrackNum, 33, 42, false },
- { kAudioTrackNum, 46, 23, true },
- { kCaptionTextTrackNum, 55, 44, false },
- { kVideoTrackNum, 67, 33, true },
- { kSubtitleTextTrackNum, 67, 33, false },
- };
- int input_block_count = arraysize(kInputBlockInfo);
-
- scoped_ptr<Cluster> cluster(
- CreateCluster(0, kInputBlockInfo, input_block_count));
-
- int result = parser_->Parse(cluster->data(), cluster->size());
- EXPECT_EQ(cluster->size(), result);
-
- WebMClusterParser::TextTrackIterator text_it =
- parser_->CreateTextTrackIterator();
-
- int text_track_num;
- const WebMClusterParser::BufferQueue* text_buffers;
-
- while (text_it(&text_track_num, &text_buffers)) {
- const WebMTracksParser::TextTracks::const_iterator find_result =
- text_tracks.find(text_track_num);
- ASSERT_TRUE(find_result != text_tracks.end());
- ASSERT_TRUE(VerifyTextBuffers(parser_, kInputBlockInfo, input_block_count,
- text_track_num, *text_buffers));
- }
-}
-
-TEST_F(WebMClusterParserTest, ParseEncryptedBlock) {
- scoped_ptr<Cluster> cluster(CreateEncryptedCluster(sizeof(kEncryptedFrame)));
-
- parser_.reset(new WebMClusterParser(kTimecodeScale,
- kAudioTrackNum,
- kVideoTrackNum,
- WebMTracksParser::TextTracks(),
- std::set<int64>(),
- std::string(),
- "video_key_id",
- LogCB()));
- int result = parser_->Parse(cluster->data(), cluster->size());
- EXPECT_EQ(cluster->size(), result);
- ASSERT_EQ(1UL, parser_->video_buffers().size());
- scoped_refptr<StreamParserBuffer> buffer = parser_->video_buffers()[0];
- EXPECT_TRUE(VerifyEncryptedBuffer(buffer));
-}
-
-TEST_F(WebMClusterParserTest, ParseBadEncryptedBlock) {
- scoped_ptr<Cluster> cluster(
- CreateEncryptedCluster(sizeof(kEncryptedFrame) - 1));
-
- parser_.reset(new WebMClusterParser(kTimecodeScale,
- kAudioTrackNum,
- kVideoTrackNum,
- WebMTracksParser::TextTracks(),
- std::set<int64>(),
- std::string(),
- "video_key_id",
- LogCB()));
- int result = parser_->Parse(cluster->data(), cluster->size());
- EXPECT_EQ(-1, result);
-}
-
-} // namespace media
diff --git a/chromium/media/webm/webm_crypto_helpers.h b/chromium/media/webm/webm_crypto_helpers.h
deleted file mode 100644
index c5f1f15ecac..00000000000
--- a/chromium/media/webm/webm_crypto_helpers.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_WEBM_WEBM_CRYPTO_HELPERS_H_
-#define MEDIA_WEBM_WEBM_CRYPTO_HELPERS_H_
-
-#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/base/decoder_buffer.h"
-
-namespace media {
-
-// TODO(xhwang): Figure out the init data type appropriately once it's spec'ed.
-// See https://www.w3.org/Bugs/Public/show_bug.cgi?id=19096 for more
-// information.
-const char kWebMEncryptInitDataType[] = "video/webm";
-
-// Returns an initialized DecryptConfig, which can be sent to the Decryptor if
-// the stream has potentially encrypted frames. Every encrypted Block has a
-// signal byte, and if the frame is encrypted, an initialization vector
-// prepended to the frame. Leaving the IV empty will tell the decryptor that the
-// frame is unencrypted. Returns NULL if |data| is invalid. Current encrypted
-// WebM request for comments specification is here
-// http://wiki.webmproject.org/encryption/webm-encryption-rfc
-scoped_ptr<DecryptConfig> WebMCreateDecryptConfig(
- const uint8* data, int data_size,
- const uint8* key_id, int key_id_size);
-
-} // namespace media
-
-#endif // MEDIA_WEBM_WEBM_CRYPT_HELPERS_H_
diff --git a/chromium/media/webm/webm_tracks_parser_unittest.cc b/chromium/media/webm/webm_tracks_parser_unittest.cc
deleted file mode 100644
index ba1e7299f85..00000000000
--- a/chromium/media/webm/webm_tracks_parser_unittest.cc
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/logging.h"
-#include "media/webm/tracks_builder.h"
-#include "media/webm/webm_constants.h"
-#include "media/webm/webm_tracks_parser.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using ::testing::InSequence;
-using ::testing::Return;
-using ::testing::_;
-
-namespace media {
-
-class WebMTracksParserTest : public testing::Test {
- public:
- WebMTracksParserTest() {}
-};
-
-static void VerifyTextTrackInfo(const uint8* buffer,
- int buffer_size,
- TextKind text_kind,
- const std::string& name,
- const std::string& language) {
- scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(LogCB(), false));
-
- int result = parser->Parse(buffer, buffer_size);
- EXPECT_GT(result, 0);
- EXPECT_EQ(result, buffer_size);
-
- const WebMTracksParser::TextTracks& text_tracks = parser->text_tracks();
- EXPECT_EQ(text_tracks.size(), WebMTracksParser::TextTracks::size_type(1));
-
- const WebMTracksParser::TextTracks::const_iterator itr = text_tracks.begin();
- EXPECT_EQ(itr->first, 1); // track num
-
- const TextTrackConfig& config = itr->second;
- EXPECT_EQ(config.kind(), text_kind);
- EXPECT_TRUE(config.label() == name);
- EXPECT_TRUE(config.language() == language);
-}
-
-TEST_F(WebMTracksParserTest, SubtitleNoNameNoLang) {
- InSequence s;
-
- TracksBuilder tb;
- tb.AddTrack(1, kWebMTrackTypeSubtitlesOrCaptions, 1,
- kWebMCodecSubtitles, "", "");
-
- const std::vector<uint8> buf = tb.Finish();
- VerifyTextTrackInfo(&buf[0], buf.size(), kTextSubtitles, "", "");
-}
-
-TEST_F(WebMTracksParserTest, SubtitleYesNameNoLang) {
- InSequence s;
-
- TracksBuilder tb;
- tb.AddTrack(1, kWebMTrackTypeSubtitlesOrCaptions, 1,
- kWebMCodecSubtitles, "Spock", "");
-
- const std::vector<uint8> buf = tb.Finish();
- VerifyTextTrackInfo(&buf[0], buf.size(), kTextSubtitles, "Spock", "");
-}
-
-TEST_F(WebMTracksParserTest, SubtitleNoNameYesLang) {
- InSequence s;
-
- TracksBuilder tb;
- tb.AddTrack(1, kWebMTrackTypeSubtitlesOrCaptions, 1,
- kWebMCodecSubtitles, "", "eng");
-
- const std::vector<uint8> buf = tb.Finish();
- VerifyTextTrackInfo(&buf[0], buf.size(), kTextSubtitles, "", "eng");
-}
-
-TEST_F(WebMTracksParserTest, SubtitleYesNameYesLang) {
- InSequence s;
-
- TracksBuilder tb;
- tb.AddTrack(1, kWebMTrackTypeSubtitlesOrCaptions, 1,
- kWebMCodecSubtitles, "Picard", "fre");
-
- const std::vector<uint8> buf = tb.Finish();
- VerifyTextTrackInfo(&buf[0], buf.size(), kTextSubtitles, "Picard", "fre");
-}
-
-TEST_F(WebMTracksParserTest, IgnoringTextTracks) {
- InSequence s;
-
- TracksBuilder tb;
- tb.AddTrack(1, kWebMTrackTypeSubtitlesOrCaptions, 1,
- kWebMCodecSubtitles, "Subtitles", "fre");
- tb.AddTrack(2, kWebMTrackTypeSubtitlesOrCaptions, 2,
- kWebMCodecSubtitles, "Commentary", "fre");
-
- const std::vector<uint8> buf = tb.Finish();
- scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(LogCB(), true));
-
- int result = parser->Parse(&buf[0], buf.size());
- EXPECT_GT(result, 0);
- EXPECT_EQ(result, static_cast<int>(buf.size()));
-
- EXPECT_EQ(parser->text_tracks().size(), 0u);
-
- const std::set<int64>& ignored_tracks = parser->ignored_tracks();
- EXPECT_TRUE(ignored_tracks.find(1) != ignored_tracks.end());
- EXPECT_TRUE(ignored_tracks.find(2) != ignored_tracks.end());
-
- // Test again w/o ignoring the test tracks.
- parser.reset(new WebMTracksParser(LogCB(), false));
-
- result = parser->Parse(&buf[0], buf.size());
- EXPECT_GT(result, 0);
-
- EXPECT_EQ(parser->ignored_tracks().size(), 0u);
- EXPECT_EQ(parser->text_tracks().size(), 2u);
-}
-
-} // namespace media