summaryrefslogtreecommitdiffstats
path: root/src/plugins/multimedia/ffmpeg/qffmpeg_p.h
blob: 27d5e723ca039b534869354b0ca672813895436c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
// Copyright (C) 2021 The Qt Company Ltd.
// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
#ifndef QFFMPEG_P_H
#define QFFMPEG_P_H

#include <private/qtmultimediaglobal_p.h>
#include <qstring.h>
#include <optional>

extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libswresample/swresample.h>
#include <libavutil/avutil.h>
#include <libswscale/swscale.h>
}

#define QT_FFMPEG_OLD_CHANNEL_LAYOUT (LIBAVCODEC_VERSION_INT < AV_VERSION_INT(59,24,100))
#define QT_FFMPEG_HAS_VULKAN \
  (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(58, 91, 100)) // since ffmpeg n4.3
#define QT_FFMPEG_HAS_FRAME_TIME_BASE \
  (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 18, 100)) // since ffmpeg n5.0
#define QT_FFMPEG_HAS_FRAME_DURATION \
  (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(60, 3, 100)) // since ffmpeg n6.0
#define QT_FFMPEG_STREAM_SIDE_DATA_DEPRECATED \
  (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(60, 15, 100)) // since ffmpeg n6.1

QT_BEGIN_NAMESPACE

class QAudioFormat;

namespace QFFmpeg
{

inline std::optional<qint64> mul(qint64 a, AVRational b)
{
    return b.den != 0 ? (a * b.num + b.den / 2) / b.den : std::optional<qint64>{};
}

inline std::optional<qreal> mul(qreal a, AVRational b)
{
    return b.den != 0 ? a * qreal(b.num) / qreal(b.den) : std::optional<qreal>{};
}

inline std::optional<qint64> timeStampMs(qint64 ts, AVRational base)
{
    return mul(1'000 * ts, base);
}

inline std::optional<qint64> timeStampUs(qint64 ts, AVRational base)
{
    return mul(1'000'000 * ts, base);
}

inline std::optional<float> toFloat(AVRational r)
{
    return r.den != 0 ? float(r.num) / float(r.den) : std::optional<float>{};
}

inline QString err2str(int errnum)
{
    char buffer[AV_ERROR_MAX_STRING_SIZE + 1] = {};
    av_make_error_string(buffer, AV_ERROR_MAX_STRING_SIZE, errnum);
    return QString::fromLocal8Bit(buffer);
}

inline void setAVFrameTime(AVFrame &frame, int64_t pts, const AVRational &timeBase)
{
    frame.pts = pts;
#if QT_FFMPEG_HAS_FRAME_TIME_BASE
    frame.time_base = timeBase;
#else
    Q_UNUSED(timeBase);
#endif
}

inline void getAVFrameTime(const AVFrame &frame, int64_t &pts, AVRational &timeBase)
{
    pts = frame.pts;
#if QT_FFMPEG_HAS_FRAME_TIME_BASE
    timeBase = frame.time_base;
#else
    timeBase = { 0, 1 };
#endif
}

inline int64_t getAVFrameDuration(const AVFrame &frame)
{
#if QT_FFMPEG_HAS_FRAME_DURATION
    return frame.duration;
#else
    Q_UNUSED(frame);
    return 0;
#endif
}

struct AVDictionaryHolder
{
    AVDictionary *opts = nullptr;

    operator AVDictionary **() { return &opts; }

    ~AVDictionaryHolder()
    {
        if (opts)
            av_dict_free(&opts);
    }
};

template<typename FunctionType, FunctionType F>
struct AVDeleter
{
    template<typename T>
    void operator()(T *object) const
    {
        if (object)
            F(&object);
    }
};

using AVFrameUPtr = std::unique_ptr<AVFrame, AVDeleter<decltype(&av_frame_free), &av_frame_free>>;

inline AVFrameUPtr makeAVFrame()
{
    return AVFrameUPtr(av_frame_alloc());
}

using AVPacketUPtr =
        std::unique_ptr<AVPacket, AVDeleter<decltype(&av_packet_free), &av_packet_free>>;

using AVCodecContextUPtr =
        std::unique_ptr<AVCodecContext,
                        AVDeleter<decltype(&avcodec_free_context), &avcodec_free_context>>;

using AVBufferUPtr =
        std::unique_ptr<AVBufferRef, AVDeleter<decltype(&av_buffer_unref), &av_buffer_unref>>;

using AVHWFramesConstraintsUPtr = std::unique_ptr<
        AVHWFramesConstraints,
        AVDeleter<decltype(&av_hwframe_constraints_free), &av_hwframe_constraints_free>>;

using SwrContextUPtr = std::unique_ptr<SwrContext, AVDeleter<decltype(&swr_free), &swr_free>>;

using PixelOrSampleFormat = int;
using AVScore = int;
constexpr AVScore BestAVScore = std::numeric_limits<AVScore>::max();
constexpr AVScore DefaultAVScore = 0;
constexpr AVScore NotSuitableAVScore = std::numeric_limits<AVScore>::min();
constexpr AVScore MinAVScore = NotSuitableAVScore + 1;

const AVCodec *findAVDecoder(AVCodecID codecId,
                             const std::optional<AVHWDeviceType> &deviceType = {},
                             const std::optional<PixelOrSampleFormat> &format = {});

const AVCodec *findAVEncoder(AVCodecID codecId,
                             const std::optional<AVHWDeviceType> &deviceType = {},
                             const std::optional<PixelOrSampleFormat> &format = {});

const AVCodec *findAVEncoder(AVCodecID codecId,
                             const std::function<AVScore(const AVCodec *)> &scoresGetter);

bool isAVFormatSupported(const AVCodec *codec, PixelOrSampleFormat format);

template<typename Format>
bool hasAVFormat(const Format *fmts, Format format)
{
    return findAVFormat(fmts, [format](Format f) { return f == format; }) != Format(-1);
}

template<typename Format, typename Predicate>
Format findAVFormat(const Format *fmts, const Predicate &predicate)
{
    auto scoresGetter = [&predicate](Format fmt) {
        return predicate(fmt) ? BestAVScore : NotSuitableAVScore;
    };
    return findBestAVFormat(fmts, scoresGetter).first;
}

template<typename Format, typename CalculateScore>
std::pair<Format, AVScore> findBestAVFormat(const Format *fmts,
                                            const CalculateScore &calculateScore)
{
    std::pair result(Format(-1), NotSuitableAVScore);
    if (fmts) {
        for (; *fmts != -1 && result.second != BestAVScore; ++fmts) {
            const auto score = calculateScore(*fmts);
            if (score > result.second)
                result = std::pair(*fmts, score);
        }
    }

    return result;
}

bool isHwPixelFormat(AVPixelFormat format);

inline bool isSwPixelFormat(AVPixelFormat format)
{
    return !isHwPixelFormat(format);
}

AVPixelFormat pixelFormatForHwDevice(AVHWDeviceType deviceType);

const AVPacketSideData *streamSideData(const AVStream *stream, AVPacketSideDataType type);

struct ResampleAudioFormat
{
    ResampleAudioFormat(const AVCodecParameters* codecPar);

    ResampleAudioFormat(const QAudioFormat& audioFormat);

#if QT_FFMPEG_OLD_CHANNEL_LAYOUT
    uint64_t channelLayoutMask;
#else
    AVChannelLayout channelLayout;
#endif
    AVSampleFormat sampleFormat;
    int sampleRate;
};

SwrContextUPtr createResampleContext(const ResampleAudioFormat& inputFormat,
                                     const ResampleAudioFormat& outputFormat);

#ifdef Q_OS_DARWIN
bool isCVFormatSupported(uint32_t format);

std::string cvFormatToString(uint32_t format);

#endif
}

QDebug operator<<(QDebug, const AVRational &);

QT_END_NAMESPACE

#endif