// Copyright (C) 2021 The Qt Company Ltd. // SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only #ifndef QFFMPEG_P_H #define QFFMPEG_P_H #include #include #include extern "C" { #include #include #include #include #include } #define QT_FFMPEG_OLD_CHANNEL_LAYOUT (LIBAVCODEC_VERSION_INT < AV_VERSION_INT(59,24,100)) #define QT_FFMPEG_HAS_VULKAN \ (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(58, 91, 100)) // since ffmpeg n4.3 #define QT_FFMPEG_HAS_FRAME_TIME_BASE \ (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 18, 100)) // since ffmpeg n5.0 #define QT_FFMPEG_HAS_FRAME_DURATION \ (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(60, 3, 100)) // since ffmpeg n6.0 #define QT_FFMPEG_STREAM_SIDE_DATA_DEPRECATED \ (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(60, 15, 100)) // since ffmpeg n6.1 #define QT_FFMPEG_SWR_CONST_CH_LAYOUT \ (LIBSWRESAMPLE_VERSION_INT >= AV_VERSION_INT(4, 9, 100)) QT_BEGIN_NAMESPACE class QAudioFormat; namespace QFFmpeg { inline std::optional mul(qint64 a, AVRational b) { return b.den != 0 ? (a * b.num + b.den / 2) / b.den : std::optional{}; } inline std::optional mul(qreal a, AVRational b) { return b.den != 0 ? a * qreal(b.num) / qreal(b.den) : std::optional{}; } inline std::optional timeStampMs(qint64 ts, AVRational base) { return mul(1'000 * ts, base); } inline std::optional timeStampUs(qint64 ts, AVRational base) { return mul(1'000'000 * ts, base); } inline std::optional toFloat(AVRational r) { return r.den != 0 ? float(r.num) / float(r.den) : std::optional{}; } inline QString err2str(int errnum) { char buffer[AV_ERROR_MAX_STRING_SIZE + 1] = {}; av_make_error_string(buffer, AV_ERROR_MAX_STRING_SIZE, errnum); return QString::fromLocal8Bit(buffer); } inline void setAVFrameTime(AVFrame &frame, int64_t pts, const AVRational &timeBase) { frame.pts = pts; #if QT_FFMPEG_HAS_FRAME_TIME_BASE frame.time_base = timeBase; #else Q_UNUSED(timeBase); #endif } inline void getAVFrameTime(const AVFrame &frame, int64_t &pts, AVRational &timeBase) { pts = frame.pts; #if QT_FFMPEG_HAS_FRAME_TIME_BASE timeBase = frame.time_base; #else timeBase = { 0, 1 }; #endif } inline int64_t getAVFrameDuration(const AVFrame &frame) { #if QT_FFMPEG_HAS_FRAME_DURATION return frame.duration; #else Q_UNUSED(frame); return 0; #endif } struct AVDictionaryHolder { AVDictionary *opts = nullptr; operator AVDictionary **() { return &opts; } ~AVDictionaryHolder() { if (opts) av_dict_free(&opts); } }; template struct AVDeleter { template void operator()(T *object) const { if (object) F(&object); } }; using AVFrameUPtr = std::unique_ptr>; inline AVFrameUPtr makeAVFrame() { return AVFrameUPtr(av_frame_alloc()); } using AVPacketUPtr = std::unique_ptr>; using AVCodecContextUPtr = std::unique_ptr>; using AVBufferUPtr = std::unique_ptr>; using AVHWFramesConstraintsUPtr = std::unique_ptr< AVHWFramesConstraints, AVDeleter>; using SwrContextUPtr = std::unique_ptr>; using PixelOrSampleFormat = int; using AVScore = int; constexpr AVScore BestAVScore = std::numeric_limits::max(); constexpr AVScore DefaultAVScore = 0; constexpr AVScore NotSuitableAVScore = std::numeric_limits::min(); constexpr AVScore MinAVScore = NotSuitableAVScore + 1; const AVCodec *findAVDecoder(AVCodecID codecId, const std::optional &deviceType = {}, const std::optional &format = {}); const AVCodec *findAVEncoder(AVCodecID codecId, const std::optional &deviceType = {}, const std::optional &format = {}); const AVCodec *findAVEncoder(AVCodecID codecId, const std::function &scoresGetter); bool isAVFormatSupported(const AVCodec *codec, PixelOrSampleFormat format); template bool hasAVFormat(const Format *fmts, Format format) { return findAVFormat(fmts, [format](Format f) { return f == format; }) != Format(-1); } template Format findAVFormat(const Format *fmts, const Predicate &predicate) { auto scoresGetter = [&predicate](Format fmt) { return predicate(fmt) ? BestAVScore : NotSuitableAVScore; }; return findBestAVFormat(fmts, scoresGetter).first; } template std::pair findBestAVFormat(const Format *fmts, const CalculateScore &calculateScore) { std::pair result(Format(-1), NotSuitableAVScore); if (fmts) { for (; *fmts != -1 && result.second != BestAVScore; ++fmts) { const auto score = calculateScore(*fmts); if (score > result.second) result = std::pair(*fmts, score); } } return result; } bool isHwPixelFormat(AVPixelFormat format); inline bool isSwPixelFormat(AVPixelFormat format) { return !isHwPixelFormat(format); } AVPixelFormat pixelFormatForHwDevice(AVHWDeviceType deviceType); const AVPacketSideData *streamSideData(const AVStream *stream, AVPacketSideDataType type); struct ResampleAudioFormat { ResampleAudioFormat(const AVCodecParameters* codecPar); ResampleAudioFormat(const QAudioFormat& audioFormat); #if QT_FFMPEG_OLD_CHANNEL_LAYOUT uint64_t channelLayoutMask; #else AVChannelLayout channelLayout; #endif AVSampleFormat sampleFormat; int sampleRate; }; SwrContextUPtr createResampleContext(const ResampleAudioFormat& inputFormat, const ResampleAudioFormat& outputFormat); #ifdef Q_OS_DARWIN bool isCVFormatSupported(uint32_t format); std::string cvFormatToString(uint32_t format); #endif } QDebug operator<<(QDebug, const AVRational &); QT_END_NAMESPACE #endif