ffmpeg C代码实现 YUV数据编码

流程

下面附一张使用FFmpeg编码视频的流程图。使用该流程,不仅可以编码H.264的视频,而且可以编码MPEG4/MPEG2/VP8等等各种FFmpeg支持的视频。图中蓝色背景的函数是实际输出数据的函数。浅绿色的函数是视频编码的函数。ffmpeg C代码实现 YUV数据编码

简单介绍一下流程中各个函数的意义:

av_register_all():注册FFmpeg所有编解码器。

avformat_alloc_output_context2():初始化输出码流的AVFormatContext。

avio_open():打开输出文件。

av_new_stream():创建输出码流的AVStream。

avcodec_find_encoder():查找编码器。

avcodec_open2():打开编码器。

avformat_write_header():写文件头(对于某些没有文件头的封装格式,不需要此函数。比如说MPEG2TS)。

avcodec_encode_video2():编码一帧视频。即将AVFrame(存储YUV像素数据)编码为AVPacket(存储H.264等格式的码流数据)。

av_write_frame():将编码后的视频码流写入文件。

flush_encoder():输入的像素数据读取完成后调用此函数。用于输出编码器中剩余的AVPacket。

av_write_trailer():写文件尾(对于某些没有文件头的封装格式,不需要此函数。比如说MPEG2TS)。

#代码

#include <stdlib.h>
#include <stdio.h>
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libavutil/log.h"
AVFormatContext* inputContext;
AVFormatContext* outputContext;
AVCodecContext* decodeContext;
AVCodecContext* encodeContext;
AVStream* stream;
void init(){
    avcodec_register_all();
}
int open_output_context(char* filename){
    outputContext = avformat_alloc_context();
    int ret = avformat_alloc_output_context2(&outputContext,NULL,NULL,filename);
    if(ret < 0){
        av_log(NULL,AV_LOG_ERROR,"create outputContext failed \n");
        return -1;
    }else{
        av_log(NULL,AV_LOG_INFO,"create outputContext success \n");
    }

    ret = avio_open(&outputContext->pb,filename,AVIO_FLAG_READ_WRITE);
    if(ret < 0){
        av_log(NULL,AV_LOG_ERROR,"open file failed  \n");
        return -1;
    }else{
          av_log(NULL,AV_LOG_INFO,"open file success  \n");
    }
    stream = avformat_new_stream(outputContext,NULL);
    av_dump_format(outputContext,0,filename,1);
    return ret;
}
void close(){
    if(decodeContext){
        avcodec_close(decodeContext);
    }
    if(inputContext){
        avformat_close_input(&inputContext);
    }
    if(outputContext){
        for(int i = 0 ; i < outputContext->nb_streams; i++){
            AVCodecContext* codecContext = outputContext->streams[i]->codec;
            avcodec_close(codecContext);
        }
        avformat_close_input(&outputContext);
    }
}
int init_encode(int width ,int height,AVStream* video_stream){
    encodeContext = video_stream->codec;
	encodeContext->codec_id = outputContext->oformat->video_codec;
	encodeContext->codec_type = AVMEDIA_TYPE_VIDEO;
	encodeContext->pix_fmt = AV_PIX_FMT_YUV420P;
	encodeContext->width = width;  
	encodeContext->height = height;
	encodeContext->bit_rate = 400000;  
	encodeContext->gop_size=250;
 
	encodeContext->time_base.num = 1;  
	encodeContext->time_base.den = 25;  
	encodeContext->qmin = 10;
	encodeContext->qmax = 51;
 
	//Optional Param
	encodeContext->max_b_frames=3;
    AVCodec* pCodec = avcodec_find_encoder(encodeContext->codec_id);
    return avcodec_open2(encodeContext,pCodec,NULL);
}
static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt)
{
    int ret;
    /* send the frame to the encoder */
    if (frame)
        printf("Send frame %3"PRId64"\n", frame->pts);
    ret = avcodec_send_frame(enc_ctx, frame);
    if (ret < 0) {
        fprintf(stderr, "Error sending a frame for encoding\n");
        exit(1);
    }
    while (ret >= 0) {
        ret = avcodec_receive_packet(enc_ctx, pkt);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
            return;
        else if (ret < 0) {
            fprintf(stderr, "Error during encoding\n");
            exit(1);
        }
        printf("Write packet %3"PRId64" (size=%5d)\n", pkt->pts, pkt->size);
        av_write_frame(outputContext,pkt);
        av_packet_unref(pkt);
    }
}
int main(){
    init();
    char* inputFile = "jichi.yuv";
    char* outputFile = "jichitext2.h264";
    FILE* in_file = fopen(inputFile,"rb");
    int in_width = 540; 
    int in_height = 960;

    if(in_file){
        av_log(NULL,AV_LOG_INFO,"open file success \n");
    }else{
        av_log(NULL,AV_LOG_ERROR,"can't open file! \n");
        goto _END;
    }

    int ret = open_output_context(outputFile);
    if(ret >= 0){
        av_log(NULL,AV_LOG_INFO,"open_output_context success \n");
    }else{
        av_log(NULL,AV_LOG_ERROR,"open_output_context failed! \n");
         goto _END;
    }
   
	if (stream==NULL){
        av_log(NULL,AV_LOG_ERROR,"avformat_new_stream failed \n");
		return -1;
	}else{
        av_log(NULL,AV_LOG_INFO,"avformat_new_stream success \n");
    }
    stream->time_base.num = 1;
    stream->time_base.den =25;

    ret = init_encode(in_width,in_height,stream);
    if(ret < 0){
        av_log(NULL,AV_LOG_ERROR,"init_encoder failed \n");
        goto _END;
    }else{
        av_log(NULL,AV_LOG_INFO,"init encoder success \n");
    }

    ret = avcodec_parameters_from_context(stream->codecpar, encodeContext);
    if(ret < 0){
        printf("avcodec_parameters_from_context failed \n");
    }else{
        printf("avcodec_parameters_from_context success \n");
    }
    AVPacket packet;
    AVFrame* picture = av_frame_alloc();
    int size = avpicture_get_size(encodeContext->pix_fmt, encodeContext->width, encodeContext->height);
	int8_t* picture_buf  = (uint8_t *)av_malloc(size);
	avpicture_fill((AVPicture *)picture, picture_buf, encodeContext->pix_fmt, encodeContext->width, encodeContext->height);
    picture->width=encodeContext->width;
    picture->height=encodeContext->height;
    picture->format=encodeContext->pix_fmt;
    int y_size = encodeContext->width*encodeContext->height;
    av_new_packet(&packet,size);
    int i = 0;
    int got_picture;
    AVDictionary* option = NULL;
    ret = avformat_write_header(outputContext,&option);
    if(ret < 0){
        printf("avformat_write_header failed \n");
    }else{
        printf("avformat_write_header success \n");
    }
    int framecnt=0;
    while(1){
        if (fread(picture_buf, 1, y_size*3/2, in_file) <= 0){
            printf("Failed to read raw data! \n");
            break;
        }else if(feof(in_file)){
            printf("Failed to read raw data! \n");
            break;
        }
        picture->data[0] = picture_buf; //亮度Y
        picture->data[1] = picture_buf+y_size; //U
        picture->data[2] = picture_buf+y_size*5/4; //V
        //AVFrame PTS
        picture->pts=i*(stream->time_base.den)/((stream->time_base.num)*25);
        encode(encodeContext,picture,&packet);
        i++;
    }
    av_write_trailer(outputContext);
    printf("编码完成\n");
    AVCodec* encoder = avcodec_find_encoder(outputContext->oformat->video_codec);
    if(!encoder){
        av_log(NULL,AV_LOG_ERROR,"can't find endcoder \n");
    }else{
        av_log(NULL,AV_LOG_INFO," find endcoder \n");
        av_log(NULL,AV_LOG_ERROR,"encoder name = %s \n",encoder->name);
    }
    av_free(picture_buf);
     _END:
        av_frame_unref(picture);
        if(in_file){
            fclose(in_file);
        }
        close();
    return 0;
}

参考文档:

大神雷霄骅的博客 https://blog.csdn.net/leixiaohua1020/article/details/25430425