summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArtem Dyomin <artem.dyomin@qt.io>2022-11-22 14:57:36 +0100
committerArtem Dyomin <artem.dyomin@qt.io>2022-11-23 13:43:34 +0100
commit9242a5262a32d793deb5c4d745546fab8dd8d165 (patch)
tree82a2ff209f7dd65edb4ca024aed73dfbe47e0ed1
parentbebce8e8fb4f79fabd1f5f602b3bf90d542e02dc (diff)
Fix some memory leaks and crashes in corner cases with ffmpeg encoder
Refactoring: use unique_ptr for managing avframes. Memory leaks occurred because of inaccurate avframe memory management Crashes occurred on linux with some virtual cameras because of null pix_fmts and null codecContext. Pick-to: 6.4 Change-Id: I8ad0b922e89cf01bc79eba6cbbfeb9d6172b9d56 Reviewed-by: Lars Knoll <lars@knoll.priv.no>
-rw-r--r--src/plugins/multimedia/ffmpeg/playbackengine/qffmpegstreamdecoder.cpp7
-rw-r--r--src/plugins/multimedia/ffmpeg/qavfcamera.mm28
-rw-r--r--src/plugins/multimedia/ffmpeg/qffmpeg_p.h16
-rw-r--r--src/plugins/multimedia/ffmpeg/qffmpegdecoder.cpp13
-rw-r--r--src/plugins/multimedia/ffmpeg/qffmpegdecoder_p.h23
-rw-r--r--src/plugins/multimedia/ffmpeg/qffmpegencoder.cpp16
-rw-r--r--src/plugins/multimedia/ffmpeg/qffmpegvideobuffer.cpp41
-rw-r--r--src/plugins/multimedia/ffmpeg/qffmpegvideobuffer_p.h13
-rw-r--r--src/plugins/multimedia/ffmpeg/qffmpegvideoframeencoder.cpp53
-rw-r--r--src/plugins/multimedia/ffmpeg/qffmpegvideoframeencoder_p.h2
10 files changed, 107 insertions, 105 deletions
diff --git a/src/plugins/multimedia/ffmpeg/playbackengine/qffmpegstreamdecoder.cpp b/src/plugins/multimedia/ffmpeg/playbackengine/qffmpegstreamdecoder.cpp
index 80a7fdaf2..6d3ac27cb 100644
--- a/src/plugins/multimedia/ffmpeg/playbackengine/qffmpegstreamdecoder.cpp
+++ b/src/plugins/multimedia/ffmpeg/playbackengine/qffmpegstreamdecoder.cpp
@@ -119,10 +119,9 @@ int StreamDecoder::sendAVPacket(Packet packet)
void StreamDecoder::receiveAVFrames()
{
while (true) {
- auto deleter = [](auto frame) { av_frame_free(&frame); };
- auto rawFrame = std::unique_ptr<AVFrame, decltype(deleter)>(av_frame_alloc(), deleter);
+ auto avFrame = makeAVFrame();
- const auto receiveFrameResult = avcodec_receive_frame(m_codec.context(), rawFrame.get());
+ const auto receiveFrameResult = avcodec_receive_frame(m_codec.context(), avFrame.get());
if (receiveFrameResult == AVERROR_EOF || receiveFrameResult == AVERROR(EAGAIN))
break;
@@ -132,7 +131,7 @@ void StreamDecoder::receiveAVFrames()
break;
}
- onFrameFound({ rawFrame.release(), m_codec, 0, this });
+ onFrameFound({ std::move(avFrame), m_codec, 0, this });
}
}
diff --git a/src/plugins/multimedia/ffmpeg/qavfcamera.mm b/src/plugins/multimedia/ffmpeg/qavfcamera.mm
index 37dd4b262..cb7cfdaec 100644
--- a/src/plugins/multimedia/ffmpeg/qavfcamera.mm
+++ b/src/plugins/multimedia/ffmpeg/qavfcamera.mm
@@ -28,10 +28,10 @@ static void releaseHwFrame(void */*opaque*/, uint8_t *data)
}
// Make sure this is compatible with the layout used in ffmpeg's hwcontext_videotoolbox
-static AVFrame *allocHWFrame(AVBufferRef *hwContext, const CVPixelBufferRef &pixbuf)
+static QFFmpeg::AVFrameUPtr allocHWFrame(AVBufferRef *hwContext, const CVPixelBufferRef &pixbuf)
{
AVHWFramesContext *ctx = (AVHWFramesContext*)hwContext->data;
- AVFrame *frame = av_frame_alloc();
+ auto frame = QFFmpeg::makeAVFrame();
frame->hw_frames_ctx = av_buffer_ref(hwContext);
frame->extended_data = frame->data;
@@ -44,7 +44,6 @@ static AVFrame *allocHWFrame(AVBufferRef *hwContext, const CVPixelBufferRef &pix
if (frame->width != (int)CVPixelBufferGetWidth(pixbuf) ||
frame->height != (int)CVPixelBufferGetHeight(pixbuf)) {
// This can happen while changing camera format
- av_frame_free(&frame);
return nullptr;
}
return frame;
@@ -110,32 +109,31 @@ static AVAuthorizationStatus m_cameraAuthorizationStatus = AVAuthorizationStatus
if (!m_accel)
return;
- AVFrame *avFrame = allocHWFrame(m_accel->hwFramesContextAsBuffer(), imageBuffer);
+ auto avFrame = allocHWFrame(m_accel->hwFramesContextAsBuffer(), imageBuffer);
if (!avFrame)
return;
#ifdef USE_SW_FRAMES
- auto *swFrame = av_frame_alloc();
- /* retrieve data from GPU to CPU */
- int ret = av_hwframe_transfer_data(swFrame, avFrame, 0);
- if (ret < 0) {
- qWarning() << "Error transferring the data to system memory\n";
- av_frame_unref(swFrame);
- } else {
- av_frame_unref(avFrame);
- avFrame = swFrame;
+ {
+ auto swFrame = QFFmpeg::makeAVFrame();
+ /* retrieve data from GPU to CPU */
+ const int ret = av_hwframe_transfer_data(swFrame.get(), avFrame.get(), 0);
+ if (ret < 0) {
+ qWarning() << "Error transferring the data to system memory:" << ret;
+ } else {
+ avFrame = std::move(swFrame);
+ }
}
#endif
QVideoFrameFormat format = QAVFHelpers::videoFormatForImageBuffer(imageBuffer);
if (!format.isValid()) {
- av_frame_unref(avFrame);
return;
}
avFrame->pts = startTime;
- QFFmpegVideoBuffer *buffer = new QFFmpegVideoBuffer(avFrame);
+ QFFmpegVideoBuffer *buffer = new QFFmpegVideoBuffer(std::move(avFrame));
QVideoFrame frame(buffer, format);
frame.setStartTime(startTime);
frame.setEndTime(frameTime);
diff --git a/src/plugins/multimedia/ffmpeg/qffmpeg_p.h b/src/plugins/multimedia/ffmpeg/qffmpeg_p.h
index 6a1d6ab38..060531ac3 100644
--- a/src/plugins/multimedia/ffmpeg/qffmpeg_p.h
+++ b/src/plugins/multimedia/ffmpeg/qffmpeg_p.h
@@ -53,6 +53,22 @@ inline QString err2str(int errnum)
return QString::fromLocal8Bit(buffer);
}
+struct AVFrameDeleter
+{
+ void operator()(AVFrame *frame) const
+ {
+ if (frame)
+ av_frame_free(&frame);
+ }
+};
+
+using AVFrameUPtr = std::unique_ptr<AVFrame, AVFrameDeleter>;
+
+inline AVFrameUPtr makeAVFrame()
+{
+ return AVFrameUPtr(av_frame_alloc());
+}
+
QT_END_NAMESPACE
}
diff --git a/src/plugins/multimedia/ffmpeg/qffmpegdecoder.cpp b/src/plugins/multimedia/ffmpeg/qffmpegdecoder.cpp
index 399501c41..16504ade8 100644
--- a/src/plugins/multimedia/ffmpeg/qffmpegdecoder.cpp
+++ b/src/plugins/multimedia/ffmpeg/qffmpegdecoder.cpp
@@ -426,10 +426,10 @@ void StreamDecoder::decode()
{
Q_ASSERT(codec.context());
- AVFrame *frame = av_frame_alloc();
-// if (type() == 0)
-// qCDebug(qLcDecoder) << "receiving frame";
- int res = avcodec_receive_frame(codec.context(), frame);
+ auto frame = makeAVFrame();
+ // if (type() == 0)
+ // qCDebug(qLcDecoder) << "receiving frame";
+ int res = avcodec_receive_frame(codec.context(), frame.get());
if (res >= 0) {
qint64 pts;
@@ -437,20 +437,17 @@ void StreamDecoder::decode()
pts = codec.toUs(frame->pts);
else
pts = codec.toUs(frame->best_effort_timestamp);
- addFrame(Frame{frame, codec, pts});
+ addFrame(Frame{ std::move(frame), codec, pts });
} else if (res == AVERROR(EOF) || res == AVERROR_EOF) {
eos.storeRelease(true);
- av_frame_free(&frame);
timeOut = -1;
return;
} else if (res != AVERROR(EAGAIN)) {
qWarning() << "error in decoder" << res << err2str(res);
- av_frame_free(&frame);
return;
} else {
// EAGAIN
decoderHasNoFrames = true;
- av_frame_free(&frame);
}
Packet packet = peekPacket();
diff --git a/src/plugins/multimedia/ffmpeg/qffmpegdecoder_p.h b/src/plugins/multimedia/ffmpeg/qffmpegdecoder_p.h
index 6609acdb4..b3b2dc605 100644
--- a/src/plugins/multimedia/ffmpeg/qffmpegdecoder_p.h
+++ b/src/plugins/multimedia/ffmpeg/qffmpegdecoder_p.h
@@ -98,8 +98,8 @@ private:
struct Frame
{
struct Data {
- Data(AVFrame *f, const Codec &codec, qint64, const QObject *source)
- : codec(codec), frame(f), source(source)
+ Data(AVFrameUPtr f, const Codec &codec, qint64, const QObject *source)
+ : codec(codec), frame(std::move(f)), source(source)
{
Q_ASSERT(frame);
if (frame->pts != AV_NOPTS_VALUE)
@@ -114,33 +114,26 @@ struct Frame
Data(const QString &text, qint64 pts, qint64 duration, const QObject *source)
: text(text), pts(pts), duration(duration), source(source)
{}
- ~Data() {
- if (frame)
- av_frame_free(&frame);
- }
+
QAtomicInt ref;
std::optional<Codec> codec;
- AVFrame *frame = nullptr;
+ AVFrameUPtr frame;
QString text;
qint64 pts = -1;
qint64 duration = -1;
QPointer<const QObject> source;
};
Frame() = default;
- Frame(AVFrame *f, const Codec &codec, qint64 pts, const QObject *source = nullptr)
- : d(new Data(f, codec, pts, source))
+ Frame(AVFrameUPtr f, const Codec &codec, qint64 pts, const QObject *source = nullptr)
+ : d(new Data(std::move(f), codec, pts, source))
{}
Frame(const QString &text, qint64 pts, qint64 duration, const QObject *source = nullptr)
: d(new Data(text, pts, duration, source))
{}
bool isValid() const { return !!d; }
- AVFrame *avFrame() const { return d->frame; }
- AVFrame *takeAVFrame() const {
- AVFrame *f = d->frame;
- d->frame = nullptr;
- return f;
- }
+ AVFrame *avFrame() const { return d->frame.get(); }
+ AVFrameUPtr takeAVFrame() { return std::move(d->frame); }
const Codec *codec() const { return d->codec ? &d->codec.value() : nullptr; }
qint64 pts() const { return d->pts; }
qint64 duration() const { return d->duration; }
diff --git a/src/plugins/multimedia/ffmpeg/qffmpegencoder.cpp b/src/plugins/multimedia/ffmpeg/qffmpegencoder.cpp
index 63779af08..7f73153f2 100644
--- a/src/plugins/multimedia/ffmpeg/qffmpegencoder.cpp
+++ b/src/plugins/multimedia/ffmpeg/qffmpegencoder.cpp
@@ -382,7 +382,7 @@ void AudioEncoder::loop()
// qCDebug(qLcFFmpegEncoder) << "new audio buffer" << buffer.byteCount() << buffer.format() << buffer.frameCount() << codec->frame_size;
retrievePackets();
- AVFrame *frame = av_frame_alloc();
+ auto frame = makeAVFrame();
frame->format = codec->sample_fmt;
#if QT_FFMPEG_OLD_CHANNEL_LAYOUT
frame->channel_layout = codec->channel_layout;
@@ -393,7 +393,7 @@ void AudioEncoder::loop()
frame->sample_rate = codec->sample_rate;
frame->nb_samples = buffer.frameCount();
if (frame->nb_samples)
- av_frame_get_buffer(frame, 0);
+ av_frame_get_buffer(frame.get(), 0);
if (resampler) {
const uint8_t *data = buffer.constData<uint8_t>();
@@ -409,7 +409,7 @@ void AudioEncoder::loop()
encoder->newTimeStamp(time/1000);
// qCDebug(qLcFFmpegEncoder) << "sending audio frame" << buffer.byteCount() << frame->pts << ((double)buffer.frameCount()/frame->sample_rate);
- int ret = avcodec_send_frame(codec, frame);
+ int ret = avcodec_send_frame(codec, frame.get());
if (ret < 0) {
char errStr[1024];
av_strerror(ret, errStr, 1024);
@@ -513,24 +513,24 @@ void VideoEncoder::loop()
// qCDebug(qLcFFmpegEncoder) << "new video buffer" << frame.startTime();
- AVFrame *avFrame = nullptr;
+ AVFrameUPtr avFrame;
auto *videoBuffer = dynamic_cast<QFFmpegVideoBuffer *>(frame.videoBuffer());
if (videoBuffer) {
// ffmpeg video buffer, let's use the native AVFrame stored in there
auto *hwFrame = videoBuffer->getHWFrame();
if (hwFrame && hwFrame->format == frameEncoder->sourceFormat())
- avFrame = av_frame_clone(hwFrame);
+ avFrame.reset(av_frame_clone(hwFrame));
}
if (!avFrame) {
frame.map(QVideoFrame::ReadOnly);
auto size = frame.size();
- avFrame = av_frame_alloc();
+ avFrame = makeAVFrame();
avFrame->format = frameEncoder->sourceFormat();
avFrame->width = size.width();
avFrame->height = size.height();
- av_frame_get_buffer(avFrame, 0);
+ av_frame_get_buffer(avFrame.get(), 0);
for (int i = 0; i < 4; ++i) {
avFrame->data[i] = const_cast<uint8_t *>(frame.bits(i));
@@ -562,7 +562,7 @@ void VideoEncoder::loop()
encoder->newTimeStamp(time/1000);
qCDebug(qLcFFmpegEncoder) << ">>> sending frame" << avFrame->pts << time << lastFrameTime;
- int ret = frameEncoder->sendFrame(avFrame);
+ int ret = frameEncoder->sendFrame(std::move(avFrame));
if (ret < 0) {
qCDebug(qLcFFmpegEncoder) << "error sending frame" << ret << err2str(ret);
encoder->error(QMediaRecorder::ResourceError, err2str(ret));
diff --git a/src/plugins/multimedia/ffmpeg/qffmpegvideobuffer.cpp b/src/plugins/multimedia/ffmpeg/qffmpegvideobuffer.cpp
index 32d606903..f094ad258 100644
--- a/src/plugins/multimedia/ffmpeg/qffmpegvideobuffer.cpp
+++ b/src/plugins/multimedia/ffmpeg/qffmpegvideobuffer.cpp
@@ -13,30 +13,22 @@ extern "C" {
QT_BEGIN_NAMESPACE
-QFFmpegVideoBuffer::QFFmpegVideoBuffer(AVFrame *frame)
- : QAbstractVideoBuffer(QVideoFrame::NoHandle)
- , frame(frame)
+QFFmpegVideoBuffer::QFFmpegVideoBuffer(AVFrameUPtr frame)
+ : QAbstractVideoBuffer(QVideoFrame::NoHandle), frame(frame.get())
{
if (frame->hw_frames_ctx) {
- hwFrame = frame;
- m_pixelFormat = toQtPixelFormat(QFFmpeg::HWAccel::format(frame));
+ hwFrame = std::move(frame);
+ m_pixelFormat = toQtPixelFormat(QFFmpeg::HWAccel::format(hwFrame.get()));
return;
}
- swFrame = frame;
+ swFrame = std::move(frame);
m_pixelFormat = toQtPixelFormat(AVPixelFormat(swFrame->format));
convertSWFrame();
}
-QFFmpegVideoBuffer::~QFFmpegVideoBuffer()
-{
- delete textures;
- if (swFrame)
- av_frame_free(&swFrame);
- if (hwFrame)
- av_frame_free(&hwFrame);
-}
+QFFmpegVideoBuffer::~QFFmpegVideoBuffer() = default;
void QFFmpegVideoBuffer::convertSWFrame()
{
@@ -50,15 +42,16 @@ void QFFmpegVideoBuffer::convertSWFrame()
swFrame->width, swFrame->height, newFormat,
SWS_BICUBIC, nullptr, nullptr, nullptr);
- AVFrame *newFrame = av_frame_alloc();
+ auto newFrame = QFFmpeg::makeAVFrame();
newFrame->width = swFrame->width;
newFrame->height = swFrame->height;
newFrame->format = newFormat;
- av_frame_get_buffer(newFrame, 0);
+ av_frame_get_buffer(newFrame.get(), 0);
sws_scale(c, swFrame->data, swFrame->linesize, 0, swFrame->height, newFrame->data, newFrame->linesize);
- av_frame_free(&swFrame);
- swFrame = newFrame;
+ if (frame == swFrame.get())
+ frame = newFrame.get();
+ swFrame = std::move(newFrame);
sws_freeContext(c);
}
}
@@ -66,7 +59,7 @@ void QFFmpegVideoBuffer::convertSWFrame()
void QFFmpegVideoBuffer::setTextureConverter(const QFFmpeg::TextureConverter &converter)
{
textureConverter = converter;
- textureConverter.init(hwFrame);
+ textureConverter.init(hwFrame.get());
m_type = converter.isNull() ? QVideoFrame::NoHandle : QVideoFrame::RhiTextureHandle;
}
@@ -143,7 +136,7 @@ QVideoFrameFormat::ColorRange QFFmpegVideoBuffer::colorRange() const
float QFFmpegVideoBuffer::maxNits()
{
float maxNits = -1;
- for (int i = 0; i <frame->nb_side_data; ++i) {
+ for (int i = 0; i < frame->nb_side_data; ++i) {
AVFrameSideData *sd = frame->side_data[i];
// TODO: Longer term we might want to also support HDR10+ dynamic metadata
if (sd->type == AV_FRAME_DATA_MASTERING_DISPLAY_METADATA) {
@@ -165,11 +158,11 @@ QAbstractVideoBuffer::MapData QFFmpegVideoBuffer::map(QVideoFrame::MapMode mode)
{
if (!swFrame) {
Q_ASSERT(hwFrame && hwFrame->hw_frames_ctx);
- swFrame = av_frame_alloc();
+ swFrame = QFFmpeg::makeAVFrame();
/* retrieve data from GPU to CPU */
- int ret = av_hwframe_transfer_data(swFrame, hwFrame, 0);
+ int ret = av_hwframe_transfer_data(swFrame.get(), hwFrame.get(), 0);
if (ret < 0) {
- qWarning() << "Error transferring the data to system memory\n";
+ qWarning() << "Error transferring the data to system memory:" << ret;
return {};
}
convertSWFrame();
@@ -199,7 +192,7 @@ std::unique_ptr<QVideoFrameTextures> QFFmpegVideoBuffer::mapTextures(QRhi *)
return {};
if (!hwFrame)
return {};
- textures = textureConverter.getTextures(hwFrame);
+ textures.reset(textureConverter.getTextures(hwFrame.get()));
if (!textures)
qWarning() << " failed to get textures for frame" << textureConverter.isNull();
return {};
diff --git a/src/plugins/multimedia/ffmpeg/qffmpegvideobuffer_p.h b/src/plugins/multimedia/ffmpeg/qffmpegvideobuffer_p.h
index a981ec245..06ba8ca54 100644
--- a/src/plugins/multimedia/ffmpeg/qffmpegvideobuffer_p.h
+++ b/src/plugins/multimedia/ffmpeg/qffmpegvideobuffer_p.h
@@ -28,9 +28,10 @@ QT_BEGIN_NAMESPACE
class QFFmpegVideoBuffer : public QAbstractVideoBuffer
{
public:
+ using AVFrameUPtr = QFFmpeg::AVFrameUPtr;
- QFFmpegVideoBuffer(AVFrame *frame);
- ~QFFmpegVideoBuffer();
+ QFFmpegVideoBuffer(AVFrameUPtr frame);
+ ~QFFmpegVideoBuffer() override;
QVideoFrame::MapMode mapMode() const override;
MapData map(QVideoFrame::MapMode mode) override;
@@ -47,7 +48,7 @@ public:
void convertSWFrame();
- AVFrame *getHWFrame() const { return hwFrame; }
+ AVFrame *getHWFrame() const { return hwFrame.get(); }
void setTextureConverter(const QFFmpeg::TextureConverter &converter);
@@ -60,11 +61,11 @@ public:
private:
QVideoFrameFormat::PixelFormat m_pixelFormat;
AVFrame *frame = nullptr;
- AVFrame *hwFrame = nullptr;
- AVFrame *swFrame = nullptr;
+ AVFrameUPtr hwFrame;
+ AVFrameUPtr swFrame;
QFFmpeg::TextureConverter textureConverter;
QVideoFrame::MapMode m_mode = QVideoFrame::NotMapped;
- QFFmpeg::TextureSet *textures = nullptr;
+ std::unique_ptr<QFFmpeg::TextureSet> textures;
};
QT_END_NAMESPACE
diff --git a/src/plugins/multimedia/ffmpeg/qffmpegvideoframeencoder.cpp b/src/plugins/multimedia/ffmpeg/qffmpegvideoframeencoder.cpp
index 6cb34f56c..fe81a2952 100644
--- a/src/plugins/multimedia/ffmpeg/qffmpegvideoframeencoder.cpp
+++ b/src/plugins/multimedia/ffmpeg/qffmpegvideoframeencoder.cpp
@@ -83,11 +83,10 @@ VideoFrameEncoder::VideoFrameEncoder(const QMediaEncoderSettings &encoderSetting
}
}
auto supportsFormat = [&](AVPixelFormat fmt) {
- auto *f = d->codec->pix_fmts;
- while (*f != -1) {
- if (*f == fmt)
- return true;
- ++f;
+ if (auto fmts = d->codec->pix_fmts) {
+ for (; *fmts != AV_PIX_FMT_NONE; ++fmts)
+ if (*fmts == fmt)
+ return true;
}
return false;
};
@@ -97,9 +96,11 @@ VideoFrameEncoder::VideoFrameEncoder(const QMediaEncoderSettings &encoderSetting
if (!supportsFormat(d->sourceFormat)) {
if (supportsFormat(swFormat))
d->targetFormat = swFormat;
- else
+ else if (d->codec->pix_fmts)
// Take first format the encoder supports. Might want to improve upon this
d->targetFormat = *d->codec->pix_fmts;
+ else
+ qWarning() << "Cannot set target format";
}
auto desc = av_pix_fmt_desc_get(d->sourceFormat);
@@ -289,42 +290,49 @@ qint64 VideoFrameEncoder::getPts(qint64 us)
return div != 0 ? (us * d->stream->time_base.den + div / 2) / div : 0;
}
-int VideoFrameEncoder::sendFrame(AVFrame *frame)
+int VideoFrameEncoder::sendFrame(AVFrameUPtr frame)
{
+ if (!d->codecContext) {
+ qWarning() << "codec context is not initialized!";
+ return AVERROR(EINVAL);
+ }
+
if (!frame)
- return avcodec_send_frame(d->codecContext, frame);
+ return avcodec_send_frame(d->codecContext, frame.get());
auto pts = frame->pts;
if (d->downloadFromHW) {
- auto *f = av_frame_alloc();
+ auto f = makeAVFrame();
+
f->format = d->sourceSWFormat;
- int err = av_hwframe_transfer_data(f, frame, 0);
+ int err = av_hwframe_transfer_data(f.get(), frame.get(), 0);
if (err < 0) {
qCDebug(qLcVideoFrameEncoder) << "Error transferring frame data to surface." << err2str(err);
return err;
}
- av_frame_free(&frame);
- frame = f;
+
+ frame = std::move(f);
}
if (d->converter) {
- auto *f = av_frame_alloc();
+ auto f = makeAVFrame();
+
f->format = d->targetSWFormat;
f->width = d->settings.videoResolution().width();
f->height = d->settings.videoResolution().height();
- av_frame_get_buffer(f, 0);
+ av_frame_get_buffer(f.get(), 0);
sws_scale(d->converter, frame->data, frame->linesize, 0, f->height, f->data, f->linesize);
- av_frame_free(&frame);
- frame = f;
+ frame = std::move(f);
}
if (d->uploadToHW) {
auto *hwFramesContext = d->accel->hwFramesContextAsBuffer();
Q_ASSERT(hwFramesContext);
- auto *f = av_frame_alloc();
+ auto f = makeAVFrame();
+
if (!f)
return AVERROR(ENOMEM);
- int err = av_hwframe_get_buffer(hwFramesContext, f, 0);
+ int err = av_hwframe_get_buffer(hwFramesContext, f.get(), 0);
if (err < 0) {
qCDebug(qLcVideoFrameEncoder) << "Error getting HW buffer" << err2str(err);
return err;
@@ -335,20 +343,17 @@ int VideoFrameEncoder::sendFrame(AVFrame *frame)
qCDebug(qLcVideoFrameEncoder) << "no hw frames context";
return AVERROR(ENOMEM);
}
- err = av_hwframe_transfer_data(f, frame, 0);
+ err = av_hwframe_transfer_data(f.get(), frame.get(), 0);
if (err < 0) {
qCDebug(qLcVideoFrameEncoder) << "Error transferring frame data to surface." << err2str(err);
return err;
}
- av_frame_free(&frame);
- frame = f;
+ frame = std::move(f);
}
qCDebug(qLcVideoFrameEncoder) << "sending frame" << pts;
frame->pts = pts;
- int ret = avcodec_send_frame(d->codecContext, frame);
- av_frame_free(&frame);
- return ret;
+ return avcodec_send_frame(d->codecContext, frame.get());
}
AVPacket *VideoFrameEncoder::retrievePacket()
diff --git a/src/plugins/multimedia/ffmpeg/qffmpegvideoframeencoder_p.h b/src/plugins/multimedia/ffmpeg/qffmpegvideoframeencoder_p.h
index f71460799..2e4b11ddc 100644
--- a/src/plugins/multimedia/ffmpeg/qffmpegvideoframeencoder_p.h
+++ b/src/plugins/multimedia/ffmpeg/qffmpegvideoframeencoder_p.h
@@ -64,7 +64,7 @@ public:
qint64 getPts(qint64 ms);
- int sendFrame(AVFrame *frame);
+ int sendFrame(AVFrameUPtr frame);
AVPacket *retrievePacket();
};