From dc2fc4b8977c3971063ce14430dd4f845a53fac0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=B5=AE=E7=94=9F=E8=8B=A5=E6=A2=A6?= <1070753498@qq.com> Date: Thu, 23 Nov 2023 18:43:19 +0800 Subject: [PATCH] =?UTF-8?q?=E6=95=B4=E7=90=86=E6=BB=A4=E9=95=9C=E9=83=A8?= =?UTF-8?q?=E5=88=86=E4=BB=A3=E7=A0=81=EF=BC=9B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .clang-tidy | 27 +----- examples/player/colorspacedialog.cc | 10 +- examples/player/colorspacedialog.hpp | 4 +- ffmpeg/audiodecoder.cpp | 2 +- ffmpeg/audioframeconverter.cpp | 2 +- ffmpeg/avcontextinfo.cpp | 19 ++-- ffmpeg/codeccontext.cpp | 76 ++++++++------- ffmpeg/colorutils.cc | 19 ++++ ffmpeg/colorutils.hpp | 23 +++++ ffmpeg/ffmpegutils.cc | 21 ++-- ffmpeg/ffmpegutils.hpp | 2 + ffmpeg/filter/filter.cc | 74 +++++++++------ ffmpeg/filter/filter.hpp | 13 ++- ffmpeg/filter/filtercontext.cc | 1 + ffmpeg/frame.cc | 22 ++++- ffmpeg/frame.hpp | 2 + ffmpeg/player.cpp | 22 ++--- ffmpeg/player.h | 4 +- ffmpeg/subtitledecoder.cpp | 2 +- ffmpeg/subtitledecoder.h | 4 +- ffmpeg/subtitledisplay.cc | 2 +- ffmpeg/subtitledisplay.hpp | 2 +- ffmpeg/videodecoder.cpp | 2 +- ffmpeg/videodecoder.h | 4 +- ffmpeg/videodisplay.cc | 2 +- ffmpeg/videodisplay.hpp | 4 +- ffmpeg/videorender/openglshader.cc | 4 +- ffmpeg/videorender/videorender.cc | 8 -- ffmpeg/videorender/videorender.hpp | 29 ++---- ffmpeg/videorender/widgetrender.cc | 137 +++++++++++++++++++++++---- ffmpeg/videorender/widgetrender.hpp | 2 +- 31 files changed, 347 insertions(+), 198 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index fbfab1e..d3a3536 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,30 +1,7 @@ --- -Checks: '-*,clang-analyzer-*,readability-*,performance-*,modernize-*,bugprone-*,cert-,cppcoreguidelines-,portability-*,llvm-*,google-*' +Checks: 'clang-analyzer-*,readability-*,performance-*,modernize-*,bugprone-*,cert-*,portability-*,llvm-*,google-*' WarningsAsErrors: '' HeaderFilterRegex: '.' AnalyzeTemporaryDtors: false -FormatStyle: file +FormatStyle: none User: user -CheckOptions: - # 类名改为大驼峰 - - key: readability-identifier-naming.ClassCase - value: CamelCase - # 函数名改为小驼峰 - - key: readability-identifier-naming.FunctionCase - value: camelBack - # 变量名改为小驼峰 - - key: readability-identifier-naming.VariableCase - value: camelBack - # 关闭一些过于严苛或者不适合的检查 - - key: readability-braces-around-statements.ShortStatementLines - value: '0' - - key: readability-magic-numbers.IgnorePowersOf2IntegerLiterals - value: '1' - - key: modernize-use-trailing-return-type.UseEastWestConst - value: '1' - - key: readability-identifier-naming.ClassMemberPrefix - value: 'm_' - - key: readability-identifier-naming.ConstexprVariablePrefix - value: 'k' - - key: readability-identifier-naming.GlobalVariablePrefix - value: 'g_' diff --git a/examples/player/colorspacedialog.cc b/examples/player/colorspacedialog.cc index b4fb357..b9b36d2 100644 --- a/examples/player/colorspacedialog.cc +++ b/examples/player/colorspacedialog.cc @@ -23,7 +23,7 @@ class ColorSpaceDialog::ColorSpaceDialogPrivate explicit ColorSpaceDialogPrivate(ColorSpaceDialog *q) : q_ptr(q) { - Ffmpeg::ColorSpaceTrc colorTrc; + Ffmpeg::ColorUtils::ColorSpaceTrc colorTrc; contrastSlider = new Slider(q_ptr); contrastSlider->setRange(colorTrc.contrast_min * multiple, colorTrc.contrast_max * multiple); @@ -96,7 +96,7 @@ ColorSpaceDialog::ColorSpaceDialog(QWidget *parent) ColorSpaceDialog::~ColorSpaceDialog() = default; -void ColorSpaceDialog::setColorSpace(const Ffmpeg::ColorSpaceTrc &colorTrc) +void ColorSpaceDialog::setColorSpace(const Ffmpeg::ColorUtils::ColorSpaceTrc &colorTrc) { setBlockValue(d_ptr->contrastSpinBox, colorTrc.contrast * d_ptr->multiple); setBlockValue(d_ptr->saturationSpinBox, colorTrc.saturation * d_ptr->multiple); @@ -106,9 +106,9 @@ void ColorSpaceDialog::setColorSpace(const Ffmpeg::ColorSpaceTrc &colorTrc) setBlockValue(d_ptr->brightnessSlider, colorTrc.brightness * d_ptr->multiple); } -Ffmpeg::ColorSpaceTrc ColorSpaceDialog::colorSpace() const +Ffmpeg::ColorUtils::ColorSpaceTrc ColorSpaceDialog::colorSpace() const { - Ffmpeg::ColorSpaceTrc colorTrc; + Ffmpeg::ColorUtils::ColorSpaceTrc colorTrc; colorTrc.contrast = d_ptr->contrastSlider->value() / d_ptr->multiple; colorTrc.saturation = d_ptr->saturationSlider->value() / d_ptr->multiple; colorTrc.brightness = d_ptr->brightnessSlider->value() / d_ptr->multiple; @@ -153,7 +153,7 @@ void ColorSpaceDialog::onBrightnessSpinBoxChanged(int value) void ColorSpaceDialog::onReset() { - Ffmpeg::ColorSpaceTrc colorTrc; + Ffmpeg::ColorUtils::ColorSpaceTrc colorTrc; setBlockValue(d_ptr->contrastSlider, colorTrc.contrast_default * d_ptr->multiple); setBlockValue(d_ptr->saturationSlider, colorTrc.saturation_default * d_ptr->multiple); setBlockValue(d_ptr->brightnessSlider, colorTrc.brightness_default * d_ptr->multiple); diff --git a/examples/player/colorspacedialog.hpp b/examples/player/colorspacedialog.hpp index b9b8a81..2c923a9 100644 --- a/examples/player/colorspacedialog.hpp +++ b/examples/player/colorspacedialog.hpp @@ -12,8 +12,8 @@ class ColorSpaceDialog : public QDialog explicit ColorSpaceDialog(QWidget *parent = nullptr); ~ColorSpaceDialog() override; - void setColorSpace(const Ffmpeg::ColorSpaceTrc &colorTrc); - [[nodiscard]] auto colorSpace() const -> Ffmpeg::ColorSpaceTrc; + void setColorSpace(const Ffmpeg::ColorUtils::ColorSpaceTrc &colorTrc); + [[nodiscard]] auto colorSpace() const -> Ffmpeg::ColorUtils::ColorSpaceTrc; signals: void colorSpaceChanged(); diff --git a/ffmpeg/audiodecoder.cpp b/ffmpeg/audiodecoder.cpp index b08b040..30114c9 100644 --- a/ffmpeg/audiodecoder.cpp +++ b/ffmpeg/audiodecoder.cpp @@ -25,7 +25,7 @@ class AudioDecoder::AudioDecoderPrivate switch (eventPtr->type()) { case Event::EventType::Pause: decoderAudioFrame->addEvent(eventPtr); break; case Event::EventType::Seek: { - auto seekEvent = static_cast(eventPtr.data()); + auto *seekEvent = static_cast(eventPtr.data()); seekEvent->countDown(); q_ptr->clear(); decoderAudioFrame->addEvent(eventPtr); diff --git a/ffmpeg/audioframeconverter.cpp b/ffmpeg/audioframeconverter.cpp index a914955..80c99ec 100644 --- a/ffmpeg/audioframeconverter.cpp +++ b/ffmpeg/audioframeconverter.cpp @@ -152,7 +152,7 @@ AudioFrameConverter::~AudioFrameConverter() auto AudioFrameConverter::convert(Frame *frame) -> QByteArray { - auto avFrame = frame->avFrame(); + auto *avFrame = frame->avFrame(); auto nb_samples = avFrame->nb_samples; auto out_count = (int64_t) nb_samples * d_ptr->format.sampleRate() / avFrame->sample_rate + 256; // 256 copy from ffplay diff --git a/ffmpeg/avcontextinfo.cpp b/ffmpeg/avcontextinfo.cpp index e22236a..a48483a 100644 --- a/ffmpeg/avcontextinfo.cpp +++ b/ffmpeg/avcontextinfo.cpp @@ -21,13 +21,13 @@ namespace Ffmpeg { class AVContextInfo::AVContextInfoPrivate { public: - AVContextInfoPrivate(AVContextInfo *q) + explicit AVContextInfoPrivate(AVContextInfo *q) : q_ptr(q) {} void printCodecpar() { - auto codecpar = stream->codecpar; + auto *codecpar = stream->codecpar; qInfo() << "start_time: " << stream->start_time; qInfo() << "duration: " << stream->duration; qInfo() << "nb_frames: " << stream->nb_frames; @@ -123,8 +123,8 @@ auto AVContextInfo::initDecoder(const AVRational &frameRate) -> bool { Q_ASSERT(d_ptr->stream != nullptr); const char *typeStr = av_get_media_type_string(d_ptr->stream->codecpar->codec_type); - auto codec = avcodec_find_decoder(d_ptr->stream->codecpar->codec_id); - if (!codec) { + const auto *codec = avcodec_find_decoder(d_ptr->stream->codecpar->codec_id); + if (codec == nullptr) { qWarning() << tr("%1 Codec not found.").arg(typeStr); return false; } @@ -132,7 +132,7 @@ auto AVContextInfo::initDecoder(const AVRational &frameRate) -> bool if (!d_ptr->codecCtx->setParameters(d_ptr->stream->codecpar)) { return false; } - auto avCodecCtx = d_ptr->codecCtx->avCodecCtx(); + auto *avCodecCtx = d_ptr->codecCtx->avCodecCtx(); avCodecCtx->pkt_timebase = d_ptr->stream->time_base; d_ptr->codecCtx->setThreadCount(4); if (d_ptr->stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { @@ -146,8 +146,8 @@ auto AVContextInfo::initDecoder(const AVRational &frameRate) -> bool auto AVContextInfo::initEncoder(AVCodecID codecId) -> bool { - auto encodec = avcodec_find_encoder(codecId); - if (!encodec) { + const auto *encodec = avcodec_find_encoder(codecId); + if (encodec == nullptr) { qWarning() << tr("%1 Encoder not found.").arg(avcodec_get_name(codecId)); return false; } @@ -157,8 +157,8 @@ auto AVContextInfo::initEncoder(AVCodecID codecId) -> bool auto AVContextInfo::initEncoder(const QString &name) -> bool { - auto encodec = avcodec_find_encoder_by_name(name.toLocal8Bit().constData()); - if (!encodec) { + const auto *encodec = avcodec_find_encoder_by_name(name.toLocal8Bit().constData()); + if (encodec == nullptr) { qWarning() << tr("%1 Encoder not found.").arg(name); return false; } @@ -207,6 +207,7 @@ auto AVContextInfo::decodeFrame(const QSharedPointer &packetPtr) } FramePtr framePtr(new Frame); while (d_ptr->codecCtx->receiveFrame(framePtr.data())) { + framePtr->avFrame()->time_base = stream()->time_base; if (d_ptr->gpuType == GpuDecode && mediaType() == AVMEDIA_TYPE_VIDEO) { bool ok = false; framePtr = d_ptr->hardWareDecodePtr->transFromGpu(framePtr, ok); diff --git a/ffmpeg/codeccontext.cpp b/ffmpeg/codeccontext.cpp index 4b2c3ed..d8c20cd 100644 --- a/ffmpeg/codeccontext.cpp +++ b/ffmpeg/codeccontext.cpp @@ -17,38 +17,39 @@ namespace Ffmpeg { class CodecContext::CodecContextPrivate { public: - CodecContextPrivate(CodecContext *q) + explicit CodecContextPrivate(CodecContext *q) : q_ptr(q) {} ~CodecContextPrivate() { avcodec_free_context(&codecCtx); } void init() { - auto codec = codecCtx->codec; - if (codec->supported_framerates) { - for (auto framerate = codec->supported_framerates; + const auto *codec = codecCtx->codec; + if (codec->supported_framerates != nullptr) { + for (const auto *framerate = codec->supported_framerates; (*framerate).num != 0 && (*framerate).den != 0; framerate++) { supported_framerates.append(*framerate); } } - if (codec->pix_fmts) { - for (auto pix_fmt = codec->pix_fmts; *pix_fmt != -1; pix_fmt++) { + if (codec->pix_fmts != nullptr) { + for (const auto *pix_fmt = codec->pix_fmts; *pix_fmt != -1; pix_fmt++) { supported_pix_fmts.append(*pix_fmt); } } - if (codec->supported_samplerates) { - for (auto samplerate = codec->supported_samplerates; *samplerate != 0; samplerate++) { + if (codec->supported_samplerates != nullptr) { + for (const auto *samplerate = codec->supported_samplerates; *samplerate != 0; + samplerate++) { supported_samplerates.append(*samplerate); } } - if (codec->sample_fmts) { - for (auto sample_fmt = codec->sample_fmts; *sample_fmt != -1; sample_fmt++) { + if (codec->sample_fmts != nullptr) { + for (const auto *sample_fmt = codec->sample_fmts; *sample_fmt != -1; sample_fmt++) { supported_sample_fmts.append(*sample_fmt); } } - if (codec->channel_layouts) { - for (auto channel_layout = codec->channel_layouts; *channel_layout != 0; + if (codec->channel_layouts != nullptr) { + for (const auto *channel_layout = codec->channel_layouts; *channel_layout != 0; channel_layout++) { supported_channel_layouts.append(*channel_layout); } @@ -75,11 +76,11 @@ CodecContext::CodecContext(const AVCodec *codec, QObject *parent) Q_ASSERT(d_ptr->codecCtx != nullptr); } -CodecContext::~CodecContext() {} +CodecContext::~CodecContext() = default; void CodecContext::copyToCodecParameters(CodecContext *dst) { - auto dstCodecCtx = dst->d_ptr->codecCtx; + auto *dstCodecCtx = dst->d_ptr->codecCtx; // quality dstCodecCtx->bit_rate = d_ptr->codecCtx->bit_rate; dstCodecCtx->rc_max_rate = d_ptr->codecCtx->rc_max_rate; @@ -100,7 +101,7 @@ void CodecContext::copyToCodecParameters(CodecContext *dst) dst->setChannelLayout(d_ptr->codecCtx->channel_layout); //dstCodecCtx->channels = av_get_channel_layout_nb_channels(dstCodecCtx->channel_layout); /* take first format from list of supported formats */ - if (d_ptr->codecCtx->codec->sample_fmts) { + if (d_ptr->codecCtx->codec->sample_fmts != nullptr) { dst->setSampleFmt(d_ptr->codecCtx->codec->sample_fmts[0]); } else { dst->setSampleFmt(d_ptr->codecCtx->sample_fmt); @@ -112,8 +113,9 @@ void CodecContext::copyToCodecParameters(CodecContext *dst) dstCodecCtx->width = d_ptr->codecCtx->width; dstCodecCtx->sample_aspect_ratio = d_ptr->codecCtx->sample_aspect_ratio; /* take first format from list of supported formats */ - dst->setPixfmt(d_ptr->codecCtx->codec->pix_fmts ? d_ptr->codecCtx->codec->pix_fmts[0] - : d_ptr->codecCtx->pix_fmt); + dst->setPixfmt(d_ptr->codecCtx->codec->pix_fmts != nullptr + ? d_ptr->codecCtx->codec->pix_fmts[0] + : d_ptr->codecCtx->pix_fmt); /* video time_base can be set to whatever is handy and supported by encoder */ dstCodecCtx->time_base = av_inv_q(d_ptr->codecCtx->framerate); dstCodecCtx->framerate = d_ptr->codecCtx->framerate; @@ -128,12 +130,12 @@ void CodecContext::copyToCodecParameters(CodecContext *dst) } } -AVCodecContext *CodecContext::avCodecCtx() +auto CodecContext::avCodecCtx() -> AVCodecContext * { return d_ptr->codecCtx; } -bool CodecContext::setParameters(const AVCodecParameters *par) +auto CodecContext::setParameters(const AVCodecParameters *par) -> bool { int ret = avcodec_parameters_to_context(d_ptr->codecCtx, par); ERROR_RETURN(ret) @@ -142,9 +144,9 @@ bool CodecContext::setParameters(const AVCodecParameters *par) void CodecContext::setThreadCount(int threadCount) { Q_ASSERT(d_ptr->codecCtx != nullptr); - if (d_ptr->codecCtx->codec->capabilities & AV_CODEC_CAP_FRAME_THREADS) { + if ((d_ptr->codecCtx->codec->capabilities & AV_CODEC_CAP_FRAME_THREADS) != 0) { d_ptr->codecCtx->thread_type = FF_THREAD_FRAME; - } else if (d_ptr->codecCtx->codec->capabilities & AV_CODEC_CAP_SLICE_THREADS) { + } else if ((d_ptr->codecCtx->codec->capabilities & AV_CODEC_CAP_SLICE_THREADS) != 0) { d_ptr->codecCtx->thread_type = FF_THREAD_SLICE; } d_ptr->codecCtx->thread_count = threadCount; @@ -189,7 +191,7 @@ void CodecContext::setChannelLayout(uint64_t channelLayout) d_ptr->codecCtx->channels = av_get_channel_layout_nb_channels(d_ptr->codecCtx->channel_layout); } -int CodecContext::channels() const +auto CodecContext::channels() const -> int { if (d_ptr->codecCtx->channels <= 0) { d_ptr->codecCtx->channels = av_get_channel_layout_nb_channels( @@ -212,7 +214,7 @@ void CodecContext::setSize(const QSize &size) d_ptr->codecCtx->rc_max_rate = bit_rate; } -QSize CodecContext::size() const +auto CodecContext::size() const -> QSize { return {d_ptr->codecCtx->width, d_ptr->codecCtx->height}; } @@ -226,17 +228,17 @@ void CodecContext::setQuailty(int quailty) d_ptr->codecCtx->global_quality = FF_QP2LAMBDA * quailty; } -QPair CodecContext::quantizer() const +auto CodecContext::quantizer() const -> QPair { return {d_ptr->codecCtx->qmin, d_ptr->codecCtx->qmax}; } -QVector CodecContext::supportPixFmts() const +auto CodecContext::supportPixFmts() const -> QVector { return d_ptr->supported_pix_fmts; } -QVector CodecContext::supportSampleFmts() const +auto CodecContext::supportSampleFmts() const -> QVector { return d_ptr->supported_sample_fmts; } @@ -277,23 +279,27 @@ void CodecContext::setProfile(const QString &profile) av_opt_set(d_ptr->codecCtx->priv_data, "profile", profile.toLocal8Bit().constData(), 0); } -bool CodecContext::open() +auto CodecContext::open() -> bool { Q_ASSERT(d_ptr->codecCtx != nullptr); auto ret = avcodec_open2(d_ptr->codecCtx, nullptr, nullptr); ERROR_RETURN(ret) } -bool CodecContext::sendPacket(Packet *packet) +auto CodecContext::sendPacket(Packet *packet) -> bool { int ret = avcodec_send_packet(d_ptr->codecCtx, packet->avPacket()); ERROR_RETURN(ret) } -bool CodecContext::receiveFrame(Frame *frame) +auto CodecContext::receiveFrame(Frame *frame) -> bool { int ret = avcodec_receive_frame(d_ptr->codecCtx, frame->avFrame()); if (ret >= 0) { + auto *avFrame = frame->avFrame(); + avFrame->sample_aspect_ratio = d_ptr->codecCtx->sample_aspect_ratio; + avFrame->sample_rate = d_ptr->codecCtx->sample_rate; + avFrame->ch_layout = d_ptr->codecCtx->ch_layout; return true; } if (ret != -11) { // Resource temporarily unavailable @@ -302,7 +308,7 @@ bool CodecContext::receiveFrame(Frame *frame) return false; } -bool CodecContext::decodeSubtitle2(Subtitle *subtitle, Packet *packet) +auto CodecContext::decodeSubtitle2(Subtitle *subtitle, Packet *packet) -> bool { int got_sub_ptr = 0; int ret = avcodec_decode_subtitle2(d_ptr->codecCtx, @@ -316,7 +322,7 @@ bool CodecContext::decodeSubtitle2(Subtitle *subtitle, Packet *packet) return true; } -bool CodecContext::sendFrame(Frame *frame) +auto CodecContext::sendFrame(Frame *frame) -> bool { auto ret = avcodec_send_frame(d_ptr->codecCtx, frame->avFrame()); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { @@ -325,7 +331,7 @@ bool CodecContext::sendFrame(Frame *frame) ERROR_RETURN(ret) } -bool CodecContext::receivePacket(Packet *packet) +auto CodecContext::receivePacket(Packet *packet) -> bool { auto ret = avcodec_receive_packet(d_ptr->codecCtx, packet->avPacket()); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { @@ -334,12 +340,12 @@ bool CodecContext::receivePacket(Packet *packet) ERROR_RETURN(ret) } -QString CodecContext::mediaTypeString() const +auto CodecContext::mediaTypeString() const -> QString { return av_get_media_type_string(d_ptr->codecCtx->codec_type); } -bool CodecContext::isDecoder() const +auto CodecContext::isDecoder() const -> bool { return av_codec_is_decoder(d_ptr->codecCtx->codec) != 0; } @@ -349,7 +355,7 @@ void CodecContext::flush() avcodec_flush_buffers(d_ptr->codecCtx); } -const AVCodec *CodecContext::codec() +auto CodecContext::codec() -> const AVCodec * { return d_ptr->codecCtx->codec; } diff --git a/ffmpeg/colorutils.cc b/ffmpeg/colorutils.cc index 94b6cfb..d64e3fd 100644 --- a/ffmpeg/colorutils.cc +++ b/ffmpeg/colorutils.cc @@ -309,6 +309,25 @@ auto Primaries::getAVColorPrimaries(Type type) -> AVColorPrimaries return AVCOL_PRI_RESERVED0; } +auto ColorSpaceTrc::operator=(const ColorSpaceTrc &other) -> ColorSpaceTrc & +{ + contrast = other.contrast; + saturation = other.saturation; + brightness = other.brightness; + return *this; +} + +auto ColorSpaceTrc::operator==(const ColorSpaceTrc &other) const -> bool +{ + return contrast == other.contrast && saturation == other.saturation + && brightness == other.brightness; +} + +auto ColorSpaceTrc::operator!=(const ColorSpaceTrc &other) const -> bool +{ + return !(*this == other); +} + } // namespace ColorUtils } // namespace Ffmpeg diff --git a/ffmpeg/colorutils.hpp b/ffmpeg/colorutils.hpp index aa490f7..4b9ef23 100644 --- a/ffmpeg/colorutils.hpp +++ b/ffmpeg/colorutils.hpp @@ -56,6 +56,29 @@ class FFMPEG_EXPORT Primaries : public QObject static auto getAVColorPrimaries(Type type) -> AVColorPrimaries; }; +struct ColorSpaceTrc +{ + auto operator=(const ColorSpaceTrc &other) -> ColorSpaceTrc &; + + auto operator==(const ColorSpaceTrc &other) const -> bool; + auto operator!=(const ColorSpaceTrc &other) const -> bool; + + const float contrast_min = 0.0; + const float contrast_max = 2.0; + const float contrast_default = 1.0; + float contrast = 1.0; + + const float saturation_min = 0.0; + const float saturation_max = 2.0; + const float saturation_default = 1.0; + float saturation = 1.0; + + const float brightness_min = -1.0; + const float brightness_max = 1.0; + const float brightness_default = 0.0; + float brightness = 0.0; +}; + } // namespace ColorUtils } // namespace Ffmpeg diff --git a/ffmpeg/ffmpegutils.cc b/ffmpeg/ffmpegutils.cc index 8527cea..475fd7c 100644 --- a/ffmpeg/ffmpegutils.cc +++ b/ffmpeg/ffmpegutils.cc @@ -47,7 +47,7 @@ void calculatePts(Packet *packet, AVContextInfo *contextInfo) // qDebug() << "Packet duration:" << duration << "pts:" << pts << "tb:" << tb.num << tb.den; } -int compare_codec_desc(const void *a, const void *b) +auto compare_codec_desc(const void *a, const void *b) -> int { const AVCodecDescriptor *const *da = (const AVCodecDescriptor *const *) a; const AVCodecDescriptor *const *db = (const AVCodecDescriptor *const *) b; @@ -56,7 +56,7 @@ int compare_codec_desc(const void *a, const void *b) : strcmp((*da)->name, (*db)->name); } -unsigned get_codecs_sorted(const AVCodecDescriptor ***rcodecs) +auto get_codecs_sorted(const AVCodecDescriptor ***rcodecs) -> unsigned { const AVCodecDescriptor *desc = nullptr; const AVCodecDescriptor **codecs; @@ -79,7 +79,7 @@ unsigned get_codecs_sorted(const AVCodecDescriptor ***rcodecs) return nb_codecs; } -const AVCodec *next_codec_for_id(enum AVCodecID id, void **iter, bool encoder) +auto next_codec_for_id(enum AVCodecID id, void **iter, bool encoder) -> const AVCodec * { const AVCodec *c = nullptr; while ((c = av_codec_iterate(iter))) { @@ -103,7 +103,7 @@ void printFfmpegInfo() qInfo() << avutil_configuration(); } -QVector getCurrentHWDeviceTypes() +auto getCurrentHWDeviceTypes() -> QVector { static QVector types{}; if (types.isEmpty()) { @@ -123,7 +123,7 @@ QVector getCurrentHWDeviceTypes() return types; } -AVPixelFormat getPixelFormat(const AVCodec *codec, AVHWDeviceType type) +auto getPixelFormat(const AVCodec *codec, AVHWDeviceType type) -> AVPixelFormat { auto hw_pix_fmt = AV_PIX_FMT_NONE; for (int i = 0;; i++) { @@ -144,7 +144,12 @@ AVPixelFormat getPixelFormat(const AVCodec *codec, AVHWDeviceType type) return hw_pix_fmt; } -QVector getFileCodecInfo(const QString &filePath) +auto compareAVRational(const AVRational &a, const AVRational &b) -> bool +{ + return a.den == b.den && a.num == b.num; +} + +auto getFileCodecInfo(const QString &filePath) -> QVector { QVector codecs{}; QScopedPointer formatContextPtr(new FormatContext); @@ -165,7 +170,7 @@ QVector getFileCodecInfo(const QString &filePath) return codecs; } -QPair getCodecQuantizer(const QString &codecname) +auto getCodecQuantizer(const QString &codecname) -> QPair { QScopedPointer contextInfoPtr(new AVContextInfo); if (!contextInfoPtr->initEncoder(codecname)) { @@ -175,7 +180,7 @@ QPair getCodecQuantizer(const QString &codecname) return quantizer; } -QStringList getCurrentSupportCodecs(AVMediaType mediaType, bool encoder) +auto getCurrentSupportCodecs(AVMediaType mediaType, bool encoder) -> QStringList { QStringList codecnames{}; const AVCodecDescriptor **codecs{}; diff --git a/ffmpeg/ffmpegutils.hpp b/ffmpeg/ffmpegutils.hpp index bac8917..3673245 100644 --- a/ffmpeg/ffmpegutils.hpp +++ b/ffmpeg/ffmpegutils.hpp @@ -28,6 +28,8 @@ auto getCurrentHWDeviceTypes() -> QVector; auto getPixelFormat(const AVCodec *codec, AVHWDeviceType type) -> AVPixelFormat; +auto compareAVRational(const AVRational &a, const AVRational &b) -> bool; + struct CodecInfo { AVMediaType mediaType = AVMEDIA_TYPE_UNKNOWN; diff --git a/ffmpeg/filter/filter.cc b/ffmpeg/filter/filter.cc index 0a62f3f..315eb51 100644 --- a/ffmpeg/filter/filter.cc +++ b/ffmpeg/filter/filter.cc @@ -3,14 +3,11 @@ #include "filtergraph.hpp" #include "filterinout.hpp" -#include -#include #include #include extern "C" { -#include #include } @@ -27,40 +24,41 @@ class Filter::FilterPrivate void initVideoFilter(Frame *frame) { - auto *avCodecCtx = decContextInfo->codecCtx()->avCodecCtx(); buffersrcCtx = new FilterContext("buffer", q_ptr); buffersinkCtx = new FilterContext("buffersink", q_ptr); - auto timeBase = decContextInfo->timebase(); + auto *avFrame = frame->avFrame(); + auto timeBase = avFrame->time_base; + auto sampleAspectRatio = avFrame->sample_aspect_ratio; auto args = QString::asprintf("video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", - avCodecCtx->width, - avCodecCtx->height, - frame->avFrame()->format, //dec_ctx->pix_fmt, + avFrame->width, + avFrame->height, + avFrame->format, timeBase.num, timeBase.den, - avCodecCtx->sample_aspect_ratio.num, - avCodecCtx->sample_aspect_ratio.den); + sampleAspectRatio.num, + sampleAspectRatio.den); qDebug() << "Video filter in args:" << args; create(args); } - void initAudioFilter() + void initAudioFilter(Frame *frame) { - auto *avCodecCtx = decContextInfo->codecCtx()->avCodecCtx(); buffersrcCtx = new FilterContext("abuffer", q_ptr); buffersinkCtx = new FilterContext("abuffersink", q_ptr); - if (avCodecCtx->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC) { - av_channel_layout_default(&avCodecCtx->ch_layout, avCodecCtx->ch_layout.nb_channels); + auto *avFrame = frame->avFrame(); + if (avFrame->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC) { + av_channel_layout_default(&avFrame->ch_layout, avFrame->ch_layout.nb_channels); } char buf[64]; - av_channel_layout_describe(&avCodecCtx->ch_layout, buf, sizeof(buf)); + av_channel_layout_describe(&avFrame->ch_layout, buf, sizeof(buf)); auto args = QString::asprintf( "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=%s" PRIx64, 1, - avCodecCtx->sample_rate, - avCodecCtx->sample_rate, - av_get_sample_fmt_name(avCodecCtx->sample_fmt), + avFrame->sample_rate, + avFrame->sample_rate, + av_get_sample_fmt_name(static_cast(avFrame->format)), buf); qDebug() << "Audio filter in args:" << args; @@ -93,6 +91,8 @@ class Filter::FilterPrivate filterGraph->parse(filterSpec, fliterInPtr.data(), fliterOutPtr.data()); filterGraph->config(); + qDebug() << "Filter config:" << filterSpec; + // if (!(enc_ctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) { // buffersink_ctx->buffersink_setFrameSize(enc_ctx->frame_size); // } @@ -100,44 +100,58 @@ class Filter::FilterPrivate Filter *q_ptr; - AVContextInfo *decContextInfo; - FilterContext *buffersrcCtx; - FilterContext *buffersinkCtx; + FilterContext *buffersrcCtx = nullptr; + FilterContext *buffersinkCtx = nullptr; FilterGraph *filterGraph; }; -Filter::Filter(AVContextInfo *decContextInfo, QObject *parent) +Filter::Filter(QObject *parent) : QObject{parent} , d_ptr(new FilterPrivate(this)) -{ - d_ptr->decContextInfo = decContextInfo; -} +{} Filter::~Filter() = default; -auto Filter::init(Frame *frame) -> bool +auto Filter::isInitialized() const -> bool { - switch (d_ptr->decContextInfo->mediaType()) { - case AVMEDIA_TYPE_AUDIO: d_ptr->initAudioFilter(); break; + return d_ptr->buffersrcCtx != nullptr; +} + +auto Filter::init(AVMediaType type, Frame *frame) -> bool +{ + switch (type) { + case AVMEDIA_TYPE_AUDIO: d_ptr->initAudioFilter(frame); break; case AVMEDIA_TYPE_VIDEO: d_ptr->initVideoFilter(frame); break; default: return false; } - return true; } +void Filter::config(const QString &filterSpec) +{ + Q_ASSERT(!filterSpec.isEmpty()); + d_ptr->config(filterSpec); +} + auto Filter::filterFrame(Frame *frame) -> QVector { QVector framepPtrs{}; - if (d_ptr->buffersrcCtx->buffersrcAddFrameFlags(frame)) { + if (!d_ptr->buffersrcCtx->buffersrcAddFrameFlags(frame)) { return framepPtrs; } std::unique_ptr framePtr(new Frame); while (d_ptr->buffersinkCtx->buffersinkGetFrame(framePtr.get())) { framePtr->setPictType(AV_PICTURE_TYPE_NONE); framepPtrs.emplace_back(framePtr.release()); + framePtr.reset(new Frame); } return framepPtrs; } +auto Filter::buffersinkCtx() -> FilterContext * +{ + Q_ASSERT(d_ptr->buffersinkCtx); + return d_ptr->buffersinkCtx; +} + } // namespace Ffmpeg diff --git a/ffmpeg/filter/filter.hpp b/ffmpeg/filter/filter.hpp index 530277e..213ccb8 100644 --- a/ffmpeg/filter/filter.hpp +++ b/ffmpeg/filter/filter.hpp @@ -8,17 +8,26 @@ namespace Ffmpeg { class AVContextInfo; class Frame; +class FilterContext; class Filter : public QObject { Q_OBJECT public: - explicit Filter(AVContextInfo *decContextInfo, QObject *parent = nullptr); + explicit Filter(QObject *parent = nullptr); ~Filter() override; - auto init(Frame *frame) -> bool; + [[nodiscard]] auto isInitialized() const -> bool; + + auto init(AVMediaType type, Frame *frame) -> bool; + // default args: + // Video is "null" + // Audio is "anull" + void config(const QString &filterSpec); auto filterFrame(Frame *frame) -> QVector; + auto buffersinkCtx() -> FilterContext *; + private: class FilterPrivate; QScopedPointer d_ptr; diff --git a/ffmpeg/filter/filtercontext.cc b/ffmpeg/filter/filtercontext.cc index 48b1b29..b6452de 100644 --- a/ffmpeg/filter/filtercontext.cc +++ b/ffmpeg/filter/filtercontext.cc @@ -56,6 +56,7 @@ auto FilterContext::create(const QString &name, const QString &args, FilterGraph args.toLocal8Bit().constData(), nullptr, filterGraph->avFilterGraph()); + Q_ASSERT(d_ptr->filterContext != nullptr); ERROR_RETURN(ret) } diff --git a/ffmpeg/frame.cc b/ffmpeg/frame.cc index 418d743..9008cb9 100644 --- a/ffmpeg/frame.cc +++ b/ffmpeg/frame.cc @@ -1,5 +1,6 @@ #include "frame.hpp" #include "averrormanager.hpp" +#include "ffmpegutils.hpp" #include "videoformat.hpp" #include @@ -83,6 +84,21 @@ auto Frame::operator=(Frame &&other) noexcept -> Frame & return *this; } +auto Frame::compareProps(Frame *other) -> bool +{ + Q_ASSERT(other != nullptr); + auto *otherFrame = other->avFrame(); + Q_ASSERT(otherFrame != nullptr); + Q_ASSERT(d_ptr->frame != nullptr); + + return d_ptr->frame->width == otherFrame->width && d_ptr->frame->height == otherFrame->height + && d_ptr->frame->format == otherFrame->format + && compareAVRational(d_ptr->frame->time_base, otherFrame->time_base) + && compareAVRational(d_ptr->frame->sample_aspect_ratio, otherFrame->sample_aspect_ratio) + && d_ptr->frame->sample_rate == otherFrame->sample_rate + && av_channel_layout_compare(&d_ptr->frame->ch_layout, &otherFrame->ch_layout) == 0; +} + void Frame::copyPropsFrom(Frame *src) { Q_ASSERT(src != nullptr); @@ -97,6 +113,10 @@ void Frame::copyPropsFrom(Frame *src) d_ptr->frame->key_frame = srcFrame->key_frame; d_ptr->frame->width = srcFrame->width; d_ptr->frame->height = srcFrame->height; + d_ptr->frame->time_base = srcFrame->time_base; + d_ptr->frame->sample_aspect_ratio = srcFrame->sample_aspect_ratio; + d_ptr->frame->sample_rate = srcFrame->sample_rate; + d_ptr->frame->ch_layout = srcFrame->ch_layout; } auto Frame::imageAlloc(const QSize &size, AVPixelFormat pix_fmt, int align) -> bool @@ -208,7 +228,7 @@ auto Frame::fromQImage(const QImage &image) -> Frame * img.convertTo(QImage::Format_RGBA8888); auto framePtr = std::make_unique(); - auto avFrame = framePtr->avFrame(); + auto *avFrame = framePtr->avFrame(); avFrame->width = img.width(); avFrame->height = img.height(); avFrame->format = AV_PIX_FMT_RGBA; diff --git a/ffmpeg/frame.hpp b/ffmpeg/frame.hpp index aa8b62d..82f1dca 100644 --- a/ffmpeg/frame.hpp +++ b/ffmpeg/frame.hpp @@ -25,6 +25,8 @@ class FFMPEG_EXPORT Frame auto operator=(const Frame &other) -> Frame &; auto operator=(Frame &&other) noexcept -> Frame &; + auto compareProps(Frame *other) -> bool; + void copyPropsFrom(Frame *src); auto isKey() -> bool; diff --git a/ffmpeg/player.cpp b/ffmpeg/player.cpp index e35b266..4f29922 100644 --- a/ffmpeg/player.cpp +++ b/ffmpeg/player.cpp @@ -148,7 +148,7 @@ class Player::PlayerPrivate if (image.isNull()) { return; } - for (auto render : videoRenders) { + for (auto *render : videoRenders) { render->setImage(image); } } @@ -305,7 +305,7 @@ class Player::PlayerPrivate void processPauseEvent(const EventPtr &eventPtr) { - auto pauseEvent = dynamic_cast(eventPtr.data()); + auto *pauseEvent = dynamic_cast(eventPtr.data()); if (pauseEvent->paused()) { setMediaState(MediaState::Pausing); } else if (q_ptr->isRunning()) { @@ -341,7 +341,7 @@ class Player::PlayerPrivate if (subtitleInfo->isIndexVaild()) { count++; } - auto seekEvent = dynamic_cast(eventPtr.data()); + auto *seekEvent = dynamic_cast(eventPtr.data()); seekEvent->setWaitCountdown(count); audioDecoder->addEvent(eventPtr); videoDecoder->addEvent(eventPtr); @@ -371,7 +371,7 @@ class Player::PlayerPrivate void processSeekRelativeEvent(const EventPtr &eventPtr) { - auto seekRelativeEvent = dynamic_cast(eventPtr.data()); + auto *seekRelativeEvent = dynamic_cast(eventPtr.data()); auto relativePosition = seekRelativeEvent->relativePosition(); auto position = this->position + relativePosition * AV_TIME_BASE; if (position < 0) { @@ -384,7 +384,7 @@ class Player::PlayerPrivate void processGpuEvent(const EventPtr &eventPtr) { - auto gpuEvent = dynamic_cast(eventPtr.data()); + auto *gpuEvent = dynamic_cast(eventPtr.data()); gpuDecode = gpuEvent->use(); if (filepath.isEmpty()) { return; @@ -420,7 +420,7 @@ class Player::PlayerPrivate position = 0; } - auto selectedMediaTrackEvent = dynamic_cast(eventPtr.data()); + auto *selectedMediaTrackEvent = dynamic_cast(eventPtr.data()); switch (selectedMediaTrackEvent->type()) { case Event::EventType::AudioTarck: meidaIndex.audioindex = selectedMediaTrackEvent->index(); @@ -441,7 +441,7 @@ class Player::PlayerPrivate void processOpenMediaEvent(const EventPtr &eventPtr) { - auto openMediaEvent = dynamic_cast(eventPtr.data()); + auto *openMediaEvent = dynamic_cast(eventPtr.data()); filepath = openMediaEvent->filepath(); q_ptr->onPlay(); } @@ -461,7 +461,7 @@ class Player::PlayerPrivate while (count-- > 0) { qApp->processEvents(); // just for signal finished } - for (auto render : videoRenders) { + for (auto *render : videoRenders) { render->resetAllFrame(); } formatCtx->close(); @@ -469,13 +469,13 @@ class Player::PlayerPrivate void processSpeedEvent(const EventPtr &eventPtr) { - auto speedEvent = dynamic_cast(eventPtr.data()); + auto *speedEvent = dynamic_cast(eventPtr.data()); Clock::setSpeed(speedEvent->speed()); } void processVolumeEvent(const EventPtr &eventPtr) { - auto volumeEvent = dynamic_cast(eventPtr.data()); + auto *volumeEvent = dynamic_cast(eventPtr.data()); audioDecoder->setVolume(volumeEvent->volume()); } @@ -626,7 +626,7 @@ auto Player::subtitleIndex() const -> int return d_ptr->subtitleInfo->index(); } -void Player::setVideoRenders(QVector videoRenders) +void Player::setVideoRenders(const QVector &videoRenders) { d_ptr->videoRenders = videoRenders; d_ptr->videoDecoder->setVideoRenders(videoRenders); diff --git a/ffmpeg/player.h b/ffmpeg/player.h index 1ce8119..b618634 100644 --- a/ffmpeg/player.h +++ b/ffmpeg/player.h @@ -21,7 +21,7 @@ class FFMPEG_EXPORT Player : public QThread [[nodiscard]] auto filePath() const -> QString &; auto isOpen() -> bool; - auto speed() -> double; + static auto speed() -> double; auto isGpuDecode() -> bool; auto mediaState() -> MediaState; @@ -35,7 +35,7 @@ class FFMPEG_EXPORT Player : public QThread [[nodiscard]] auto videoIndex() const -> int; [[nodiscard]] auto subtitleIndex() const -> int; - void setVideoRenders(QVector videoRenders); + void setVideoRenders(const QVector &videoRenders); auto videoRenders() -> QVector; void setPropertyEventQueueMaxSize(size_t size); diff --git a/ffmpeg/subtitledecoder.cpp b/ffmpeg/subtitledecoder.cpp index 8372ac4..0b1888d 100644 --- a/ffmpeg/subtitledecoder.cpp +++ b/ffmpeg/subtitledecoder.cpp @@ -57,7 +57,7 @@ void SubtitleDecoder::setVideoResolutionRatio(const QSize &size) d_ptr->decoderSubtitleFrame->setVideoResolutionRatio(size); } -void SubtitleDecoder::setVideoRenders(QVector videoRenders) +void SubtitleDecoder::setVideoRenders(const QVector &videoRenders) { d_ptr->decoderSubtitleFrame->setVideoRenders(videoRenders); } diff --git a/ffmpeg/subtitledecoder.h b/ffmpeg/subtitledecoder.h index 86f0f61..d6e332b 100644 --- a/ffmpeg/subtitledecoder.h +++ b/ffmpeg/subtitledecoder.h @@ -12,11 +12,11 @@ class SubtitleDecoder : public Decoder { public: explicit SubtitleDecoder(QObject *parent = nullptr); - ~SubtitleDecoder(); + ~SubtitleDecoder() override; void setVideoResolutionRatio(const QSize &size); - void setVideoRenders(QVector videoRenders); + void setVideoRenders(const QVector &videoRenders); protected: void runDecoder() override; diff --git a/ffmpeg/subtitledisplay.cc b/ffmpeg/subtitledisplay.cc index dd83f1a..32b8567 100644 --- a/ffmpeg/subtitledisplay.cc +++ b/ffmpeg/subtitledisplay.cc @@ -87,7 +87,7 @@ void SubtitleDisplay::setVideoResolutionRatio(const QSize &size) d_ptr->videoResolutionRatio = size; } -void SubtitleDisplay::setVideoRenders(QVector videoRenders) +void SubtitleDisplay::setVideoRenders(const QVector &videoRenders) { QMutexLocker locker(&d_ptr->mutex_render); d_ptr->videoRenders = videoRenders; diff --git a/ffmpeg/subtitledisplay.hpp b/ffmpeg/subtitledisplay.hpp index c30618e..8ae7afc 100644 --- a/ffmpeg/subtitledisplay.hpp +++ b/ffmpeg/subtitledisplay.hpp @@ -17,7 +17,7 @@ class SubtitleDisplay : public Decoder void setVideoResolutionRatio(const QSize &size); - void setVideoRenders(QVector videoRenders); + void setVideoRenders(const QVector &videoRenders); protected: void runDecoder() override; diff --git a/ffmpeg/videodecoder.cpp b/ffmpeg/videodecoder.cpp index c2a0130..9f0de5b 100644 --- a/ffmpeg/videodecoder.cpp +++ b/ffmpeg/videodecoder.cpp @@ -56,7 +56,7 @@ VideoDecoder::~VideoDecoder() stopDecoder(); } -void VideoDecoder::setVideoRenders(QVector videoRenders) +void VideoDecoder::setVideoRenders(const QVector &videoRenders) { d_ptr->decoderVideoFrame->setVideoRenders(videoRenders); } diff --git a/ffmpeg/videodecoder.h b/ffmpeg/videodecoder.h index 2c8afb1..8f9b36f 100644 --- a/ffmpeg/videodecoder.h +++ b/ffmpeg/videodecoder.h @@ -13,9 +13,9 @@ class VideoDecoder : public Decoder Q_OBJECT public: explicit VideoDecoder(QObject *parent = nullptr); - ~VideoDecoder(); + ~VideoDecoder() override; - void setVideoRenders(QVector videoRenders); + void setVideoRenders(const QVector &videoRenders); void setMasterClock(); diff --git a/ffmpeg/videodisplay.cc b/ffmpeg/videodisplay.cc index 77fea2e..62eb335 100644 --- a/ffmpeg/videodisplay.cc +++ b/ffmpeg/videodisplay.cc @@ -73,7 +73,7 @@ VideoDisplay::~VideoDisplay() stopDecoder(); } -void VideoDisplay::setVideoRenders(QVector videoRenders) +void VideoDisplay::setVideoRenders(const QVector &videoRenders) { QMutexLocker locker(&d_ptr->mutex_render); d_ptr->videoRenders = videoRenders; diff --git a/ffmpeg/videodisplay.hpp b/ffmpeg/videodisplay.hpp index ee14f4f..c1bd128 100644 --- a/ffmpeg/videodisplay.hpp +++ b/ffmpeg/videodisplay.hpp @@ -13,9 +13,9 @@ class VideoDisplay : public Decoder Q_OBJECT public: explicit VideoDisplay(QObject *parent = nullptr); - ~VideoDisplay(); + ~VideoDisplay() override; - void setVideoRenders(QVector videoRenders); + void setVideoRenders(const QVector &videoRenders); void setMasterClock(); diff --git a/ffmpeg/videorender/openglshader.cc b/ffmpeg/videorender/openglshader.cc index 5331170..0cd2467 100644 --- a/ffmpeg/videorender/openglshader.cc +++ b/ffmpeg/videorender/openglshader.cc @@ -85,8 +85,8 @@ auto OpenglShader::generate(Frame *frame, //ShaderUtils::passOotf(frag, d_ptr->srcHdrMetaData.maxLuma, avFrame->color_trc); // Tone map - if (type == Tonemap::AUTO && ShaderUtils::trcIsHdr(avFrame->color_trc)) { - type = Tonemap::FILMIC; + if (type == Tonemap ::AUTO) { + type = ShaderUtils::trcIsHdr(avFrame->color_trc) ? Tonemap::FILMIC : Tonemap::NONE; } Tonemap::toneMap(header, frag, type); diff --git a/ffmpeg/videorender/videorender.cc b/ffmpeg/videorender/videorender.cc index ab9f3ff..065bbcc 100644 --- a/ffmpeg/videorender/videorender.cc +++ b/ffmpeg/videorender/videorender.cc @@ -13,14 +13,6 @@ extern "C" { namespace Ffmpeg { -ColorSpaceTrc &ColorSpaceTrc::operator=(const ColorSpaceTrc &other) -{ - contrast = other.contrast; - saturation = other.saturation; - brightness = other.brightness; - return *this; -} - class VideoRender::VideoRenderPrivate { public: diff --git a/ffmpeg/videorender/videorender.hpp b/ffmpeg/videorender/videorender.hpp index 3af8590..c66a6dc 100644 --- a/ffmpeg/videorender/videorender.hpp +++ b/ffmpeg/videorender/videorender.hpp @@ -17,26 +17,6 @@ namespace Ffmpeg { class Frame; class Subtitle; -struct ColorSpaceTrc -{ - auto operator=(const ColorSpaceTrc &other) -> ColorSpaceTrc &; - - const float contrast_min = 0.0; - const float contrast_max = 2.0; - const float contrast_default = 1.0; - float contrast = 1.0; - - const float saturation_min = 0.0; - const float saturation_max = 2.0; - const float saturation_default = 1.0; - float saturation = 1.0; - - const float brightness_min = -1.0; - const float brightness_max = 1.0; - const float brightness_default = 0.0; - float brightness = 0.0; -}; - class FFMPEG_EXPORT VideoRender { Q_DISABLE_COPY_MOVE(VideoRender) @@ -53,8 +33,11 @@ class FFMPEG_EXPORT VideoRender void setSubTitleFrame(QSharedPointer framePtr); virtual void resetAllFrame() = 0; - void setColorSpaceTrc(const ColorSpaceTrc &colorTrc) { m_colorSpaceTrc = colorTrc; } - [[nodiscard]] auto colorSpaceTrc() const -> ColorSpaceTrc { return m_colorSpaceTrc; } + void setColorSpaceTrc(const ColorUtils::ColorSpaceTrc &colorTrc) { m_colorSpaceTrc = colorTrc; } + [[nodiscard]] auto colorSpaceTrc() const -> ColorUtils::ColorSpaceTrc + { + return m_colorSpaceTrc; + } virtual void setTonemapType(Tonemap::Type type) { m_tonemapType = type; } [[nodiscard]] auto tonemapType() const -> Tonemap::Type { return m_tonemapType; } @@ -75,7 +58,7 @@ class FFMPEG_EXPORT VideoRender virtual void updateFrame(QSharedPointer frame) = 0; virtual void updateSubTitleFrame(QSharedPointer frame) = 0; - ColorSpaceTrc m_colorSpaceTrc; + ColorUtils::ColorSpaceTrc m_colorSpaceTrc; Tonemap::Type m_tonemapType = Tonemap::Type::AUTO; ColorUtils::Primaries::Type m_destPrimaries = ColorUtils::Primaries::AUTO; diff --git a/ffmpeg/videorender/widgetrender.cc b/ffmpeg/videorender/widgetrender.cc index d2a89d8..3e448ef 100644 --- a/ffmpeg/videorender/widgetrender.cc +++ b/ffmpeg/videorender/widgetrender.cc @@ -1,27 +1,89 @@ #include "widgetrender.hpp" +#include #include #include #include #include +#include +#include #include extern "C" { #include +#include } namespace Ffmpeg { +struct FrameParam +{ + FrameParam() = default; + + explicit FrameParam(Frame *frame) + { + auto *avFrame = frame->avFrame(); + size = QSize(avFrame->width, avFrame->height); + format = avFrame->format; + time_base = avFrame->time_base; + sample_aspect_ratio = avFrame->sample_aspect_ratio; + sample_rate = avFrame->sample_rate; + ch_layout = avFrame->ch_layout; + } + + auto operator==(const FrameParam &other) const -> bool + { + return size == other.size && format == other.format + && compareAVRational(time_base, other.time_base) + && compareAVRational(sample_aspect_ratio, other.sample_aspect_ratio) + && sample_rate == other.sample_rate + && av_channel_layout_compare(&ch_layout, &other.ch_layout) == 0; + } + + auto operator!=(const FrameParam &other) const -> bool { return !(*this == other); } + + QSize size; + int format; + AVRational time_base; + AVRational sample_aspect_ratio; + int sample_rate; + AVChannelLayout ch_layout; +}; + class WidgetRender::WidgetRenderPrivate { public: - explicit WidgetRenderPrivate(QWidget *parent) - : owner(parent) + explicit WidgetRenderPrivate(WidgetRender *q) + : q_ptr(q) {} + ~WidgetRenderPrivate() = default; - QWidget *owner; + auto swsScale(const FramePtr &framePtr) -> FramePtr + { + auto dst_pix_fmt = AV_PIX_FMT_RGB32; + auto *avframe = framePtr->avFrame(); + auto size = QSize(avframe->width, avframe->height); + size.scale(q_ptr->size() * q_ptr->devicePixelRatio(), Qt::KeepAspectRatio); + if (frameConverterPtr.isNull()) { + frameConverterPtr.reset(new VideoFrameConverter(framePtr.data(), size, dst_pix_fmt)); + } else { + frameConverterPtr->flush(framePtr.data(), size, dst_pix_fmt); + } + frameConverterPtr->setColorspaceDetails(framePtr.data(), + q_ptr->m_colorSpaceTrc.brightness, + q_ptr->m_colorSpaceTrc.contrast, + q_ptr->m_colorSpaceTrc.saturation); + QSharedPointer frameRgbPtr(new Frame); + frameRgbPtr->imageAlloc(size, dst_pix_fmt); + frameConverterPtr->scale(framePtr.data(), frameRgbPtr.data()); + // qDebug() << frameRgbPtr->avFrame()->width << frameRgbPtr->avFrame()->height + // << frameRgbPtr->avFrame()->format; + return frameRgbPtr; + } + + WidgetRender *q_ptr; QSizeF size; QRectF frameRect; @@ -30,11 +92,14 @@ class WidgetRender::WidgetRenderPrivate //QList supportFormats = VideoFormat::qFormatMaps.keys(); QList supportFormats = {AV_PIX_FMT_RGB32}; QScopedPointer frameConverterPtr; + QScopedPointer filterPtr; QSharedPointer subTitleFramePtr; QImage videoImage; QImage subTitleImage; QColor backgroundColor = Qt::black; + + Tonemap::Type tonemapType; }; WidgetRender::WidgetRender(QWidget *parent) @@ -49,32 +114,62 @@ auto WidgetRender::isSupportedOutput_pix_fmt(AVPixelFormat pix_fmt) -> bool return d_ptr->supportFormats.contains(pix_fmt); } -auto WidgetRender::convertSupported_pix_fmt(QSharedPointer frame) -> QSharedPointer +auto WidgetRender::convertSupported_pix_fmt(QSharedPointer framePtr) -> QSharedPointer { - auto dst_pix_fmt = AV_PIX_FMT_RGB32; - auto *avframe = frame->avFrame(); + static FrameParam lastFrameParam; + FrameParam frameParam(framePtr.data()); + + static QSize lastScaleSize; + auto *avframe = framePtr->avFrame(); auto size = QSize(avframe->width, avframe->height); size.scale(this->size() * devicePixelRatio(), Qt::KeepAspectRatio); - if (d_ptr->frameConverterPtr.isNull()) { - d_ptr->frameConverterPtr.reset(new VideoFrameConverter(frame.data(), size, dst_pix_fmt)); - } else { - d_ptr->frameConverterPtr->flush(frame.data(), size, dst_pix_fmt); + + if (d_ptr->framePtr.isNull() || d_ptr->filterPtr.isNull() || lastFrameParam != frameParam + || d_ptr->tonemapType != m_tonemapType || lastScaleSize != size) { + d_ptr->filterPtr.reset(new Filter); + lastFrameParam = frameParam; + lastScaleSize = size; + } + auto isInitialized = d_ptr->filterPtr->isInitialized(); + if (!isInitialized) { + d_ptr->filterPtr->init(AVMEDIA_TYPE_VIDEO, framePtr.data()); + d_ptr->tonemapType = m_tonemapType; + QString tonemap("none"); + // need zimg zscale "tonemap=clip" in filterSpec + switch (d_ptr->tonemapType) { + case Tonemap::Type::CLIP: tonemap = "clip"; break; + case Tonemap::Type::LINEAR: tonemap = "linear"; break; + case Tonemap::Type::GAMMA: tonemap = "gamma"; break; + case Tonemap::Type::REINHARD: tonemap = "reinhard"; break; + case Tonemap::Type::HABLE: tonemap = "hable"; break; + case Tonemap::Type::MOBIUS: tonemap = "mobius"; break; + default: break; + } + auto pix_fmt = AV_PIX_FMT_RGB32; + av_opt_set_bin(d_ptr->filterPtr->buffersinkCtx()->avFilterContext(), + "pix_fmts", + reinterpret_cast(&pix_fmt), + sizeof(pix_fmt), + AV_OPT_SEARCH_CHILDREN); + auto filterSpec = QString("scale=%1:%2") + .arg(QString::number(size.width()), QString::number(size.height())); + d_ptr->filterPtr->config(filterSpec); + // d_ptr->filterPtr->config("null"); } - d_ptr->frameConverterPtr->setColorspaceDetails(frame.data(), - m_colorSpaceTrc.brightness, - m_colorSpaceTrc.contrast, - m_colorSpaceTrc.saturation); - QSharedPointer frameRgbPtr(new Frame); - frameRgbPtr->imageAlloc(size, dst_pix_fmt); - d_ptr->frameConverterPtr->scale(frame.data(), frameRgbPtr.data()); - // qDebug() << frameRgbPtr->avFrame()->width << frameRgbPtr->avFrame()->height - // << frameRgbPtr->avFrame()->format; - return frameRgbPtr; + auto framePtrs = d_ptr->filterPtr->filterFrame(framePtr.data()); + if (framePtrs.isEmpty()) { + return {}; + } + // qDebug() << framePtrs.first()->avFrame()->width << framePtrs.first()->avFrame()->height + // << framePtrs.first()->avFrame()->format; + return framePtrs.first(); + + // return d_ptr->swsScale(framePtr); } auto WidgetRender::supportedOutput_pix_fmt() -> QVector { - return d_ptr->supportFormats; + return {}; } void WidgetRender::resetAllFrame() diff --git a/ffmpeg/videorender/widgetrender.hpp b/ffmpeg/videorender/widgetrender.hpp index eb74802..34ee55b 100644 --- a/ffmpeg/videorender/widgetrender.hpp +++ b/ffmpeg/videorender/widgetrender.hpp @@ -14,7 +14,7 @@ class FFMPEG_EXPORT WidgetRender : public VideoRender, public QWidget ~WidgetRender() override; auto isSupportedOutput_pix_fmt(AVPixelFormat pix_fmt) -> bool override; - auto convertSupported_pix_fmt(QSharedPointer frame) -> QSharedPointer override; + auto convertSupported_pix_fmt(QSharedPointer framePtr) -> QSharedPointer override; auto supportedOutput_pix_fmt() -> QVector override; void resetAllFrame() override;