summaryrefslogtreecommitdiffstats
path: root/chromium/third_party/ffmpeg/doc/examples/muxing.c
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/third_party/ffmpeg/doc/examples/muxing.c')
-rw-r--r--chromium/third_party/ffmpeg/doc/examples/muxing.c230
1 files changed, 136 insertions, 94 deletions
diff --git a/chromium/third_party/ffmpeg/doc/examples/muxing.c b/chromium/third_party/ffmpeg/doc/examples/muxing.c
index 4276114306e..ad8e027148d 100644
--- a/chromium/third_party/ffmpeg/doc/examples/muxing.c
+++ b/chromium/third_party/ffmpeg/doc/examples/muxing.c
@@ -24,9 +24,9 @@
* @file
* libavformat API example.
*
- * Output a media file in any supported libavformat format.
- * The default codecs are used.
- * @example doc/examples/muxing.c
+ * Output a media file in any supported libavformat format. The default
+ * codecs are used.
+ * @example muxing.c
*/
#include <stdlib.h>
@@ -36,18 +36,43 @@
#include <libavutil/opt.h>
#include <libavutil/mathematics.h>
+#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
-/* 5 seconds stream duration */
-#define STREAM_DURATION 200.0
+static int audio_is_eof, video_is_eof;
+
+#define STREAM_DURATION 10.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
-#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
static int sws_flags = SWS_BICUBIC;
+static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
+{
+ AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
+
+ printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
+ av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
+ av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
+ av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
+ pkt->stream_index);
+}
+
+static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
+{
+ /* rescale output packet timestamp values from codec to stream timebase */
+ pkt->pts = av_rescale_q_rnd(pkt->pts, *time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
+ pkt->dts = av_rescale_q_rnd(pkt->dts, *time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
+ pkt->duration = av_rescale_q(pkt->duration, *time_base, st->time_base);
+ pkt->stream_index = st->index;
+
+ /* Write the compressed frame to the media file. */
+ log_packet(fmt_ctx, pkt);
+ return av_interleaved_write_frame(fmt_ctx, pkt);
+}
+
/* Add an output stream. */
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
enum AVCodecID codec_id)
@@ -73,7 +98,8 @@ static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
switch ((*codec)->type) {
case AVMEDIA_TYPE_AUDIO:
- c->sample_fmt = AV_SAMPLE_FMT_FLTP;
+ c->sample_fmt = (*codec)->sample_fmts ?
+ (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
c->bit_rate = 64000;
c->sample_rate = 44100;
c->channels = 2;
@@ -122,6 +148,7 @@ static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
static float t, tincr, tincr2;
+AVFrame *audio_frame;
static uint8_t **src_samples_data;
static int src_samples_linesize;
static int src_nb_samples;
@@ -130,6 +157,7 @@ static int max_dst_nb_samples;
uint8_t **dst_samples_data;
int dst_samples_linesize;
int dst_samples_size;
+int samples_count;
struct SwrContext *swr_ctx = NULL;
@@ -140,6 +168,13 @@ static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
c = st->codec;
+ /* allocate and init a re-usable frame */
+ audio_frame = av_frame_alloc();
+ if (!audio_frame) {
+ fprintf(stderr, "Could not allocate audio frame\n");
+ exit(1);
+ }
+
/* open it */
ret = avcodec_open2(c, codec, NULL);
if (ret < 0) {
@@ -157,12 +192,17 @@ static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
10000 : c->frame_size;
ret = av_samples_alloc_array_and_samples(&src_samples_data, &src_samples_linesize, c->channels,
- src_nb_samples, c->sample_fmt, 0);
+ src_nb_samples, AV_SAMPLE_FMT_S16, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate source samples\n");
exit(1);
}
+ /* compute the number of converted samples: buffering is avoided
+ * ensuring that the output buffer will contain at least all the
+ * converted input samples */
+ max_dst_nb_samples = src_nb_samples;
+
/* create resampler context */
if (c->sample_fmt != AV_SAMPLE_FMT_S16) {
swr_ctx = swr_alloc();
@@ -184,17 +224,15 @@ static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
fprintf(stderr, "Failed to initialize the resampling context\n");
exit(1);
}
- }
- /* compute the number of converted samples: buffering is avoided
- * ensuring that the output buffer will contain at least all the
- * converted input samples */
- max_dst_nb_samples = src_nb_samples;
- ret = av_samples_alloc_array_and_samples(&dst_samples_data, &dst_samples_linesize, c->channels,
- max_dst_nb_samples, c->sample_fmt, 0);
- if (ret < 0) {
- fprintf(stderr, "Could not allocate destination samples\n");
- exit(1);
+ ret = av_samples_alloc_array_and_samples(&dst_samples_data, &dst_samples_linesize, c->channels,
+ max_dst_nb_samples, c->sample_fmt, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Could not allocate destination samples\n");
+ exit(1);
+ }
+ } else {
+ dst_samples_data = src_samples_data;
}
dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, max_dst_nb_samples,
c->sample_fmt, 0);
@@ -217,77 +255,83 @@ static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
}
}
-static void write_audio_frame(AVFormatContext *oc, AVStream *st)
+static void write_audio_frame(AVFormatContext *oc, AVStream *st, int flush)
{
AVCodecContext *c;
AVPacket pkt = { 0 }; // data and size must be 0;
- AVFrame *frame = av_frame_alloc();
int got_packet, ret, dst_nb_samples;
av_init_packet(&pkt);
c = st->codec;
- get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels);
-
- /* convert samples from native format to destination codec format, using the resampler */
- if (swr_ctx) {
- /* compute destination number of samples */
- dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples,
- c->sample_rate, c->sample_rate, AV_ROUND_UP);
- if (dst_nb_samples > max_dst_nb_samples) {
- av_free(dst_samples_data[0]);
- ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels,
- dst_nb_samples, c->sample_fmt, 0);
- if (ret < 0)
+ if (!flush) {
+ get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels);
+
+ /* convert samples from native format to destination codec format, using the resampler */
+ if (swr_ctx) {
+ /* compute destination number of samples */
+ dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples,
+ c->sample_rate, c->sample_rate, AV_ROUND_UP);
+ if (dst_nb_samples > max_dst_nb_samples) {
+ av_free(dst_samples_data[0]);
+ ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels,
+ dst_nb_samples, c->sample_fmt, 0);
+ if (ret < 0)
+ exit(1);
+ max_dst_nb_samples = dst_nb_samples;
+ dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples,
+ c->sample_fmt, 0);
+ }
+
+ /* convert to destination format */
+ ret = swr_convert(swr_ctx,
+ dst_samples_data, dst_nb_samples,
+ (const uint8_t **)src_samples_data, src_nb_samples);
+ if (ret < 0) {
+ fprintf(stderr, "Error while converting\n");
exit(1);
- max_dst_nb_samples = dst_nb_samples;
- dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples,
- c->sample_fmt, 0);
+ }
+ } else {
+ dst_nb_samples = src_nb_samples;
}
- /* convert to destination format */
- ret = swr_convert(swr_ctx,
- dst_samples_data, dst_nb_samples,
- (const uint8_t **)src_samples_data, src_nb_samples);
- if (ret < 0) {
- fprintf(stderr, "Error while converting\n");
- exit(1);
- }
- } else {
- dst_samples_data[0] = src_samples_data[0];
- dst_nb_samples = src_nb_samples;
+ audio_frame->nb_samples = dst_nb_samples;
+ audio_frame->pts = av_rescale_q(samples_count, (AVRational){1, c->sample_rate}, c->time_base);
+ avcodec_fill_audio_frame(audio_frame, c->channels, c->sample_fmt,
+ dst_samples_data[0], dst_samples_size, 0);
+ samples_count += dst_nb_samples;
}
- frame->nb_samples = dst_nb_samples;
- avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
- dst_samples_data[0], dst_samples_size, 0);
-
- ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
+ ret = avcodec_encode_audio2(c, &pkt, flush ? NULL : audio_frame, &got_packet);
if (ret < 0) {
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
exit(1);
}
- if (!got_packet)
+ if (!got_packet) {
+ if (flush)
+ audio_is_eof = 1;
return;
+ }
- pkt.stream_index = st->index;
-
- /* Write the compressed frame to the media file. */
- ret = av_interleaved_write_frame(oc, &pkt);
- if (ret != 0) {
+ ret = write_frame(oc, &c->time_base, st, &pkt);
+ if (ret < 0) {
fprintf(stderr, "Error while writing audio frame: %s\n",
av_err2str(ret));
exit(1);
}
- avcodec_free_frame(&frame);
}
static void close_audio(AVFormatContext *oc, AVStream *st)
{
avcodec_close(st->codec);
+ if (dst_samples_data != src_samples_data) {
+ av_free(dst_samples_data[0]);
+ av_free(dst_samples_data);
+ }
av_free(src_samples_data[0]);
- av_free(dst_samples_data[0]);
+ av_free(src_samples_data);
+ av_frame_free(&audio_frame);
}
/**************************************************************/
@@ -315,6 +359,9 @@ static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
+ frame->format = c->pix_fmt;
+ frame->width = c->width;
+ frame->height = c->height;
/* Allocate the encoded raw picture. */
ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
@@ -361,17 +408,13 @@ static void fill_yuv_image(AVPicture *pict, int frame_index,
}
}
-static void write_video_frame(AVFormatContext *oc, AVStream *st)
+static void write_video_frame(AVFormatContext *oc, AVStream *st, int flush)
{
int ret;
static struct SwsContext *sws_ctx;
AVCodecContext *c = st->codec;
- if (frame_count >= STREAM_NB_FRAMES) {
- /* No more frames to compress. The codec has a latency of a few
- * frames if using B-frames, so we get the last frames by
- * passing the same picture again. */
- } else {
+ if (!flush) {
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
/* as we only generate a YUV420P picture, we must convert it
* to the codec pixel format if needed */
@@ -394,7 +437,7 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
}
}
- if (oc->oformat->flags & AVFMT_RAWPICTURE) {
+ if (oc->oformat->flags & AVFMT_RAWPICTURE && !flush) {
/* Raw video case - directly store the picture in the packet */
AVPacket pkt;
av_init_packet(&pkt);
@@ -411,23 +454,24 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
av_init_packet(&pkt);
/* encode the image */
- ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
+ frame->pts = frame_count;
+ ret = avcodec_encode_video2(c, &pkt, flush ? NULL : frame, &got_packet);
if (ret < 0) {
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
exit(1);
}
/* If size is zero, it means the image was buffered. */
- if (!ret && got_packet && pkt.size) {
- pkt.stream_index = st->index;
-
- /* Write the compressed frame to the media file. */
- ret = av_interleaved_write_frame(oc, &pkt);
+ if (got_packet) {
+ ret = write_frame(oc, &c->time_base, st, &pkt);
} else {
+ if (flush)
+ video_is_eof = 1;
ret = 0;
}
}
- if (ret != 0) {
+
+ if (ret < 0) {
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
exit(1);
}
@@ -439,7 +483,7 @@ static void close_video(AVFormatContext *oc, AVStream *st)
avcodec_close(st->codec);
av_free(src_picture.data[0]);
av_free(dst_picture.data[0]);
- av_free(frame);
+ av_frame_free(&frame);
}
/**************************************************************/
@@ -453,7 +497,7 @@ int main(int argc, char **argv)
AVStream *audio_st, *video_st;
AVCodec *audio_codec, *video_codec;
double audio_time, video_time;
- int ret;
+ int flush, ret;
/* Initialize libavcodec, and register all codecs and formats. */
av_register_all();
@@ -477,9 +521,9 @@ int main(int argc, char **argv)
printf("Could not deduce output format from file extension: using MPEG.\n");
avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
}
- if (!oc) {
+ if (!oc)
return 1;
- }
+
fmt = oc->oformat;
/* Add the audio and video streams using the default format codecs
@@ -487,12 +531,10 @@ int main(int argc, char **argv)
video_st = NULL;
audio_st = NULL;
- if (fmt->video_codec != AV_CODEC_ID_NONE) {
+ if (fmt->video_codec != AV_CODEC_ID_NONE)
video_st = add_stream(oc, &video_codec, fmt->video_codec);
- }
- if (fmt->audio_codec != AV_CODEC_ID_NONE) {
+ if (fmt->audio_codec != AV_CODEC_ID_NONE)
audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);
- }
/* Now that all the parameters are set, we can open the audio and
* video codecs and allocate the necessary encode buffers. */
@@ -521,23 +563,23 @@ int main(int argc, char **argv)
return 1;
}
- if (frame)
- frame->pts = 0;
- for (;;) {
+ flush = 0;
+ while ((video_st && !video_is_eof) || (audio_st && !audio_is_eof)) {
/* Compute current audio and video time. */
- audio_time = audio_st ? audio_st->pts.val * av_q2d(audio_st->time_base) : 0.0;
- video_time = video_st ? video_st->pts.val * av_q2d(video_st->time_base) : 0.0;
+ audio_time = (audio_st && !audio_is_eof) ? audio_st->pts.val * av_q2d(audio_st->time_base) : INFINITY;
+ video_time = (video_st && !video_is_eof) ? video_st->pts.val * av_q2d(video_st->time_base) : INFINITY;
- if ((!audio_st || audio_time >= STREAM_DURATION) &&
- (!video_st || video_time >= STREAM_DURATION))
- break;
+ if (!flush &&
+ (!audio_st || audio_time >= STREAM_DURATION) &&
+ (!video_st || video_time >= STREAM_DURATION)) {
+ flush = 1;
+ }
/* write interleaved audio and video frames */
- if (!video_st || (video_st && audio_st && audio_time < video_time)) {
- write_audio_frame(oc, audio_st);
- } else {
- write_video_frame(oc, video_st);
- frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
+ if (audio_st && !audio_is_eof && audio_time <= video_time) {
+ write_audio_frame(oc, audio_st, flush);
+ } else if (video_st && !video_is_eof && video_time < audio_time) {
+ write_video_frame(oc, video_st, flush);
}
}