一下是我使用ffmpeg +qsv硬编码的示例,代码不全,不能直接用,部分无关结构没有写出来

使用cuda硬编码步骤和使用qsv步骤一样,不过需要修改部分字段

参考:ffmpeg.org 官方文档vaapi_encode.c

头文件

#ifndef VIDEO_ENCODER_QSV_H
#define VIDEO_ENCODER_QSV_H


#include "Encoder.h"


struct AVFrame;
struct AVStream;
struct AVBufferRef;
struct AVCodecContext;
struct AVFormatContext;


class VideoEncoderQSVData;
class ENCODER_API VideoEncoderQSV : public BaseEncoder
{
public:
    /// 编码并输出
    explicit VideoEncoderQSV(const std::string &filename, VideoEncoderParam &param);

    ///  编码并推流
    explicit VideoEncoderQSV(const std::string &streamName, VideoEncoderParam &param, const std::string &ip, short port = 554);

    ~VideoEncoderQSV();

    int Open() override;
    void Close() override;

private:
    void _t_run();

    int set_hwframe_ctx(AVCodecContext *ctx, AVBufferRef *hw_device_ctx);

    int encode(AVFormatContext *fmt_ctx, AVCodecContext *avctx, AVStream *st, AVFrame *frame);

private:
    std::shared_ptr<VideoEncoderQSVData> m_d = nullptr;
};


#endif // VIDEO_ENCODER_QSV_H 





cpp文件

#include "VideoEncoderQSV.h"

#include "ffmpegheader.h"  /// ffmpeg相关头文件

static inline std::string GetErrStr(int errnum) {
    char buf[AV_ERROR_MAX_STRING_SIZE] = { '\0' };
    return av_make_error_string(buf, AV_ERROR_MAX_STRING_SIZE, errnum);
}

class VideoEncoderQSVData
{
public:
    AVBufferRef *hw_device_ctx = nullptr;   // 硬编码上下文
    AVCodec *codec = nullptr;               // 编码器
    AVCodecContext *codec_ctx = nullptr;    // 编码器上下文

    AVFormatContext * ofmt_ctx = nullptr;   //输出格式上下文

    AVFrame *sw_frame = nullptr;  // 原始数据frame
    AVFrame *hw_frame = nullptr;  // 编码用的frame


    SwsContext * sws_ctx = nullptr;

    void Clear() {

        if (sws_ctx)
        {
            sws_freeContext(sws_ctx);
            sws_ctx = nullptr;
        }

        if (sw_frame)
        {
            av_frame_free(&sw_frame);
        }

        if (hw_frame)
        {
            av_frame_free(&hw_frame);
        }

        if (codec_ctx)
        {
            avcodec_free_context(&codec_ctx);
        }

        if (hw_device_ctx)
        {
            av_buffer_unref(&hw_device_ctx);
        }
    }

    ~VideoEncoderQSVData(){
        Clear();
    }
};

VideoEncoderQSV::VideoEncoderQSV(const std::string &filename, VideoEncoderParam &param)
    : BaseEncoder(filename, param)
{
    m_d = std::make_shared<VideoEncoderQSVData>();
}

VideoEncoderQSV::VideoEncoderQSV(const std::string &streamName, VideoEncoderParam &param, const std::string &ip, short port /*= 554*/)
    : BaseEncoder(streamName, param, ip, port)
{
    m_d = std::make_shared<VideoEncoderQSVData>();
}

VideoEncoderQSV::~VideoEncoderQSV()
{
    Close();
}

int VideoEncoderQSV::Open()
{
    std::thread([this](){
        m_encode = true;
        this->_t_run();
        m_encode = false;
    }).detach();
    return 0;
}

void VideoEncoderQSV::Close()
{
    m_quit = true;
    while (m_encode)
    {
        std::this_thread::sleep_for(std::chrono::milliseconds(1));
    }
    m_d->Clear();
}

void VideoEncoderQSV::_t_run()
{
    /// 创建硬编码上下文,  qsv使用AV_HWDEVICE_TYPE_QSV N卡使用AV_HWDEVICE_TYPE_CUDA
    auto hw_device_ctx = m_d->hw_device_ctx;
    auto ret = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_QSV, nullptr, nullptr, 0);
    if (ret < 0)
    {
        fprintf(stderr, "Failed to create a QSV device. Error code: %s\n", GetErrStr(ret).c_str());
        m_d->Clear();
        return;
    }

    auto ofmt_ctx = m_d->ofmt_ctx;
    if ((ret = (avformat_alloc_output_context2(&ofmt_ctx, nullptr, "AVI", m_outFile.c_str()))) < 0)
    {
        fprintf(stderr, "Failed to deduce output format from file extension. Error code: %s\n", GetErrStr(ret).c_str());
        m_d->Clear();
    }

    auto ofmt = ofmt_ctx->oformat;
    ofmt->video_codec = AVCodecID::AV_CODEC_ID_H264;
    ofmt->audio_codec = AV_CODEC_ID_NONE;


    /// 根据名称找编码器
    auto codec = m_d->codec;
    if (!(codec = avcodec_find_encoder_by_name("h264_qsv")))   /// h264_hevnc 表示N卡, h264_qsv表示inter QSV
    {
        fprintf(stderr, "Could not find encoder(%s) .\n", "h264_qsv");
        ret = MediaErrorCode::E_NO_ENCODER;
        m_d->Clear();
        return;
    }

    /// 创建输出流
    auto st = avformat_new_stream(ofmt_ctx, nullptr);
    if (!st)
    {
        fprintf(stderr, "Could not allocate stream\n");
        return;
    }
    st->id = ofmt_ctx->nb_streams - 1;


    /// 申请编码器上下文
    auto avctx = m_d->codec_ctx;
    if (!(avctx = avcodec_alloc_context3(codec)))
    {
        ret = MediaErrorCode::E_NO_MEMORY;
        m_d->Clear();
        return;
    }

    /// 设置编码器参数, m_param是自定义的结构体,保存编码参数集合
    avctx->codec_id = codec->id;
    avctx->width = m_param.width;   // 宽
    avctx->height = m_param.height; // 高
    avctx->time_base = { 1, m_param.fps };  // fps帧率
    avctx->framerate = { m_param.fps, 1 };
    avctx->sample_aspect_ratio = { 1, 1 };
    avctx->gop_size = m_param.fps;  // gop使用fps的值
    avctx->bit_rate = m_param.bps;  // bps码率
    avctx->max_b_frames = 0;
    avctx->pix_fmt = AV_PIX_FMT_QSV;  // N卡使用 AV_PIX_FMT_CUDA
    // avctx->pix_fmt = *codec->pix_fmts;

    if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO)
    {
        avctx->max_b_frames = 2;
    }

    if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO)
    {
        avctx->mb_decision = 2;
    }

    /// 设置硬编码上下文

    ret = set_hwframe_ctx(avctx, hw_device_ctx);
    if (ret < 0)
    {
        fprintf(stderr, "Failed to set hwframe context.\n");
        m_d->Clear();
        return;
    }
    if (ofmt->flags & AVFMT_GLOBALHEADER)
    {
        avctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    }

        /// 以下三个参数N卡编码不需要设置
    av_opt_set(avctx->priv_data, "async_depth", "1", 0);
    av_opt_set(avctx->priv_data, "max_dec_frame_buffering", "1", 0);
    av_opt_set(avctx->priv_data, "look_ahead", "0", 0);
    /// 打开编码器 
    if ((ret = avcodec_open2(avctx, codec, nullptr)) < 0)
    {
        fprintf(stderr, "Cannot open video encoder codec. Error code: %s\n", GetErrStr(ret).c_str());
        m_d->Clear();
        return;
    }

    /// 从编码器上下文拷贝参数到AVstream
    ret = avcodec_parameters_from_context(st->codecpar, avctx);
    if (ret < 0)
    {
        fprintf(stderr, "Could not copy the stream parameters\n");
        return;
    }

    /// 打印编码信息
    av_dump_format(ofmt_ctx, 0, m_outFile.c_str(), 1);
    // open the output file, if needed
    if (!(ofmt->flags & AVFMT_NOFILE))
    {
        ret = avio_open(&ofmt_ctx->pb, m_outFile.c_str(), AVIO_FLAG_WRITE);
        if (ret < 0)
        {
            fprintf(stderr, "Could not open '%s': ret = %d, due to %s.\n", m_outFile.c_str(), ret, GetErrStr(ret));
            return;
        }
    }

    // 写视频流头部信息 Write the stream header, if any. 
    AVDictionary* opt = NULL;
    av_dict_set_int(&opt, "video_track_timescale", m_param.fps, 0);
    ret = avformat_write_header(ofmt_ctx, &opt);
    if (ret < 0)
    {
        fprintf(stderr, "Error occurred when opening output file: ret = %d\n", ret);
        return;
    }

    /// 申请内存buf
    auto sw_frame = m_d->sw_frame;
    if (!(sw_frame = av_frame_alloc())) {
        ret = MediaErrorCode::E_NO_MEMORY;
        m_d->Clear();
        return;
    }

    sw_frame->width = m_param.width;
    sw_frame->height = m_param.height;
    sw_frame->format = AV_PIX_FMT_NV12;

    /// av_frame_get_buffer会申请内存,用来存放图片裸数据
    if ((ret = av_frame_get_buffer(sw_frame, 0)) < 0)
    {
        ret = MediaErrorCode::E_NO_MEMORY;
        m_d->Clear();
        return;
    }

    /// 申请buf
    auto hw_frame = m_d->hw_frame;
    if (!(hw_frame = av_frame_alloc()))
    {
        ret = MediaErrorCode::E_NO_MEMORY;
        m_d->Clear();
        return;
    }

    if ((ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, hw_frame, 0)) < 0)
    {
        fprintf(stderr, "Error code: %s.\n", GetErrStr(ret).c_str());
        ret = MediaErrorCode::E_NO_MEMORY;
        m_d->Clear();
        return;
    }

    if (!hw_frame->hw_frames_ctx)
    {
        ret = MediaErrorCode::E_NO_MEMORY;
        m_d->Clear();
        return;
    }

    uint64_t next_pts = 0; // pts
    auto sws_ctx = m_d->sws_ctx; /// 转码上下文
    while (!m_quit)
    {

        /// 引用计数为0 暂停编码
        //if (m_ref == 0)
        //{
        //    std::unique_lock<std::mutex> lock(m_mtx);
        //    m_cv.wait(lock);
        //}

        /// 从输入队里面取数据
        if (auto frameVideo = m_inQueue.Pop())
        {
            auto src_buf = frameVideo->GetBuffer();
            auto src_fmt = static_cast<AVPixelFormat>(trans_video_format(frameVideo->Format()));
            auto src_width = frameVideo->Width();
            auto src_height = frameVideo->Height();

            auto dst_fmt = AV_PIX_FMT_NV12;
            auto dst_width = avctx->width;
            auto dst_height = avctx->height;

            if (src_fmt != dst_fmt ||
                src_width != dst_width ||
                src_height != dst_height)
            {
                if (sws_ctx)  /// 转码上下文
                {
                    sws_freeContext(sws_ctx);
                    sws_ctx = nullptr;
                }

                sws_ctx = sws_getContext(src_width, src_height, src_fmt,
                    dst_width, dst_height, dst_fmt,
                    SWS_BICUBIC, NULL, NULL, NULL);
            }
            if (!sws_ctx)
            {
                fprintf(stderr, "Could not initialize the conversion context\n");
                break;
            }

            if (sws_ctx)
            {

                uint8_t *dst_data[4];
                int dst_linesize[4];
                /// 将dst_data指向裸数据所在地址,并计算dst_linesize
                av_image_fill_arrays(dst_data, dst_linesize, src_buf, src_fmt, src_width, src_height, 1);

                /// 图片格式转换
                /// 确保sw_frame可以写数据, av_frame_make_writable会申请内存,用来存放图片裸数据
                ret = av_frame_make_writable(sw_frame);
                if (ret < 0)
                {
                    fprintf(stderr, "av_frame_make_writable. Error code: %s.\n", GetErrStr(ret).c_str());
                    //  内存不足
                    break;
                }
                sws_scale(sws_ctx, (const uint8_t * const *)dst_data, dst_linesize, 0, dst_height, sw_frame->data, sw_frame->linesize);
            }
            else
            {

                av_frame_unref(sw_frame);
                /// 给sw_frame 的指针赋值 
                av_image_fill_arrays(sw_frame->data, sw_frame->linesize, src_buf, dst_fmt, src_width, src_height, 1);
            }

            /// 将数据从sw_frame 转移到 hw_frame
            if ((ret = av_hwframe_transfer_data(hw_frame, sw_frame, 0)) < 0)
            {
                fprintf(stderr, "Error while transferring frame data to surface. Error code: %s.\n", GetErrStr(ret).c_str());
                break;
            }
            hw_frame->pts = next_pts;
            next_pts += 1;

            if ((ret = (encode(ofmt_ctx, avctx, st, hw_frame))) < 0)
            {
                fprintf(stderr, "Failed to encode.\n");
                break;
            }
        }


        std::this_thread::sleep_for(std::chrono::microseconds(1000));
    }


    av_frame_free(&hw_frame);
    av_frame_free(&sw_frame);

    ret = encode(ofmt_ctx, avctx, st, nullptr);
    if (ret == AVERROR_EOF)
    {
        ret = 0;
    }


    m_d->Clear();
}

int VideoEncoderQSV::set_hwframe_ctx(AVCodecContext *ctx, AVBufferRef *hw_device_ctx)
{
    AVBufferRef *hw_frames_ref = nullptr;
    AVHWFramesContext *frames_ctx = nullptr;
    int err = 0;

    if (!(hw_frames_ref = av_hwframe_ctx_alloc(hw_device_ctx)))
    {
        fprintf(stderr, "Failed to create QSV frame context.\n");
        return -1;
    }

    frames_ctx = (AVHWFramesContext *)(hw_frames_ref->data);
    frames_ctx->format = AV_PIX_FMT_QSV;  ///N卡使用 AV_PIX_FMT_CUDA
    frames_ctx->sw_format = AV_PIX_FMT_NV12;
    frames_ctx->width = m_param.width;
    frames_ctx->height = m_param.height;
    frames_ctx->initial_pool_size = 20;

    if ((err = av_hwframe_ctx_init(hw_frames_ref)) < 0)
    {
        fprintf(stderr, "Failed to initialize QSV frame context.Error code: %s\n", GetErrStr(err).c_str());
        av_buffer_unref(&hw_frames_ref);
        return err;
    }

    ctx->hw_frames_ctx = av_buffer_ref(hw_frames_ref);
    if (!ctx->hw_frames_ctx)
    {
        err = MediaErrorCode::E_NO_MEMORY;
    }

    av_buffer_unref(&hw_frames_ref);
    return err;
}

int VideoEncoderQSV::encode(AVFormatContext *fmt_ctx, AVCodecContext *avctx, AVStream *st, AVFrame *frame)
{

    int ret = 0;
    AVPacket enc_pkt;

    av_init_packet(&enc_pkt);
    enc_pkt.data = nullptr;
    enc_pkt.size = 0;

    if ((ret = avcodec_send_frame(avctx, frame)) < 0)
    {
        fprintf(stderr, "Error code: %s\n", GetErrStr(ret).c_str());
        return E_ENCODER_FAILED;
    }

    while (ret >= 0)
    {
        ret = avcodec_receive_packet(avctx, &enc_pkt);
        if (ret)
        {
            break;
        }


        av_packet_rescale_ts(&enc_pkt, avctx->time_base, st->time_base);
        enc_pkt.stream_index = st->index;

        m_outQueue.Push(enc_pkt.data, enc_pkt.size);

        ret = av_interleaved_write_frame(fmt_ctx, &enc_pkt); /// 写文件 并释放packet的数据内存
        av_packet_unref(&enc_pkt);

        if (ret < 0)
        {
            fprintf(stderr, "Error while writing output packet: ret = %d\n", ret);
            return E_ENCODER_FAILED;
        }

        std::this_thread::sleep_for(std::chrono::microseconds(100));
    }

    ret = ((ret == AVERROR(EAGAIN)) ? 0 : -1);
    return ret;
}
最后编辑:2020年12月08日 ©著作权归作者所有

发表评论