IT博客汇
  • 首页
  • 精华
  • 技术
  • 设计
  • 资讯
  • 扯淡
  • 权利声明
  • 登录 注册

    [原]使用ffmpeg将BMP图片编码为x264视频文件,将H264视频保存为BMP图片,yuv视频文件保存为图片的代码

    jsh13417发表于 2014-11-15 14:01:14
    love 0
    ffmpeg开源库,实现将bmp格式的图片编码成x264文件,并将编码好的H264文件解码保存为BMP文件。实现将视频文件yuv格式保存的图片格式的测试,图像格式png,jpg, gif等等测试均OK 自己根据博客的代码,vs2010搭建的测试环境。资源下载 具体代码:#define _AFXDLL #include #ifdef __cplusplus extern "C" { #endif #include avcodec.h> #include <libavformatavformat.h> #include swscale.h> void main() { CFile file[5]; BYTE *szTxt[5]; int nWidth = 0; int nHeight= 0; int nDataLen=0; int nLen; CString csFileName; for (int fileI = 1; fileI <= 5; fileI ++) { csFileName.Format("%d.bmp", fileI); file[fileI - 1].Open(csFileName,CFile::modeRead | CFile::typeBinary); nLen = file[fileI - 1].GetLength(); szTxt[fileI -1] = new BYTE[nLen]; file[fileI - 1].Read(szTxt[fileI - 1], nLen); file[fileI - 1].Close(); /BMP bmi;//BITMAPINFO bmi; //int nHeadLen = sizeof(BMP); BITMAPFILEHEADER bmpFHeader; BITMAPINFOHEADER bmpIHeader; memcpy(&bmpFHeader;,szTxt[fileI -1],sizeof(BITMAPFILEHEADER)); int nHeadLen = bmpFHeader.bfOffBits - sizeof(BITMAPFILEHEADER); memcpy(&bmpIHeader;,szTxt[fileI - 1]+sizeof(BITMAPFILEHEADER),nHeadLen); nWidth = bmpIHeader.biWidth;// 464;// bmi.bmpInfo.bmiHeader.biWidth;// ; nHeight = bmpIHeader.biHeight;//362;// bmi.bmpInfo.bmiHeader.biHeight;// ; szTxt[fileI - 1] += bmpFHeader.bfOffBits; nDataLen = nLen-bmpFHeader.bfOffBits; } getchar(); av_register_all(); avcodec_register_all(); AVFrame *m_pRGBFrame = new AVFrame[1]; //RGB帧数据 AVFrame *m_pYUVFrame = new AVFrame[1];; //YUV帧数据 AVCodecContext *c= NULL; AVCodecContext *in_c= NULL; AVCodec *pCodecH264; //编码器 uint8_t * yuv_buff;// //查找h264编码器 pCodecH264 = avcodec_find_encoder(CODEC_ID_H264); if(!pCodecH264) { fprintf(stderr, "h264 codec not found\n"); getchar(); exit(1); } c= avcodec_alloc_context3(pCodecH264); c->bit_rate = 3000000;// put sample parameters c->width =nWidth;// c->height = nHeight;// // frames per second AVRational rate; rate.num = 1; rate.den = 25; c->time_base= rate;//(AVRational){1,25}; c->gop_size = 10; // emit one intra frame every ten frames c->max_b_frames=1; c->thread_count = 1; c->pix_fmt = PIX_FMT_YUV420P;//PIX_FMT_RGB24; //av_opt_set(c->priv_data, /*"preset"*/"libvpx-1080p.ffpreset", /*"slow"*/NULL, 0); //打开编码器 if(avcodec_open2(c,pCodecH264,NULL)<0){ printf("avcodec_open2 failed\n"); TRACE("不能打开编码库"); getchar(); } int size = c->width * c->height; yuv_buff = (uint8_t *) malloc((size * 3) / 2); // size for YUV 420 //将rgb图像数据填充rgb帧 uint8_t * rgb_buff = new uint8_t[nDataLen]; //图象编码 outbuf_size太小会报错,图像清晰度也会差 int outbuf_size = 900000; uint8_t * outbuf= (uint8_t*)malloc(outbuf_size); int u_size = 0; FILE *f=NULL; char * filename = "myData.h264"; f = fopen(filename, "wb"); if (!f) { TRACE( "could not open %s\n", filename); getchar(); exit(1); } //初始化SwsContext SwsContext * scxt = sws_getContext(c->width,c->height,PIX_FMT_BGR24,c->width,c->height,PIX_FMT_YUV420P,SWS_POINT,NULL,NULL,NULL); AVPacket avpkt; //AVFrame *pTFrame=new AVFrame for (int i=0;i<250;++i) { //AVFrame *m_pYUVFrame = new AVFrame[1]; int index = (i / 25) % 5; memcpy(rgb_buff,szTxt[index],nDataLen); avpicture_fill((AVPicture*)m_pRGBFrame, (uint8_t*)rgb_buff, PIX_FMT_RGB24, nWidth, nHeight); //将YUV buffer 填充YUV Frame avpicture_fill((AVPicture*)m_pYUVFrame, (uint8_t*)yuv_buff, PIX_FMT_YUV420P, nWidth, nHeight); // 翻转RGB图像 m_pRGBFrame->data[0] += m_pRGBFrame->linesize[0] * (nHeight - 1); m_pRGBFrame->linesize[0] *= -1; m_pRGBFrame->data[1] += m_pRGBFrame->linesize[1] * (nHeight / 2 - 1); m_pRGBFrame->linesize[1] *= -1; m_pRGBFrame->data[2] += m_pRGBFrame->linesize[2] * (nHeight / 2 - 1); m_pRGBFrame->linesize[2] *= -1; //将RGB转化为YUV sws_scale(scxt,m_pRGBFrame->data,m_pRGBFrame->linesize,0,c->height,m_pYUVFrame->data,m_pYUVFrame->linesize); static int got_packet_ptr = 0; av_init_packet(&avpkt;); avpkt.data = outbuf; avpkt.size = outbuf_size; u_size = avcodec_encode_video2(c, &avpkt;, m_pYUVFrame, &got;_packet_ptr); m_pYUVFrame->pts++; if (u_size == 0) { fwrite(avpkt.data, 1, avpkt.size, f); } } fclose(f); delete []m_pRGBFrame; delete []m_pYUVFrame; delete []rgb_buff; free(outbuf); avcodec_close(c); av_free(c); } #ifdef __cplusplus } #endif 完全按照博客中的代码测试发现会报下面的信息,而且在播放过程中,画面都是模糊的。修改了outbuff_size的大小解决了这个问题。 疑问:为什么要循环250次?有知道麻烦解答下!for (int i=0;i<250;++i)将H264视频保存为BMP图片,具体代码如下:#include #include #include #include #ifdef __cplusplus extern "C" { #endif #include avcodec.h> #include <libavformatavformat.h> #include swscale.h> void SaveAsBMP (AVFrame *pFrameRGB, int width, int height, int index, int bpp) { char buf[5] = {0}; BITMAPFILEHEADER bmpheader; BITMAPINFOHEADER bmpinfo; FILE *fp; char filename[20] = ""; _itoa (index, buf, 10); strcat (filename, buf); strcat (filename, ".bmp"); if ( (fp = fopen(filename,"wb+")) == NULL ) { printf ("open file failed!\n"); return; } bmpheader.bfType = 0x4d42; bmpheader.bfReserved1 = 0; bmpheader.bfReserved2 = 0; bmpheader.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER); bmpheader.bfSize = bmpheader.bfOffBits + width*height*bpp8; bmpinfo.biSize = sizeof(BITMAPINFOHEADER); bmpinfo.biWidth = width; bmpinfo.biHeight = height; bmpinfo.biPlanes = 1; bmpinfo.biBitCount = bpp; bmpinfo.biCompression = BI_RGB; bmpinfo.biSizeImage = (width*bpp+31)/32*4*height; bmpinfo.biXPelsPerMeter = 100; bmpinfo.biYPelsPerMeter = 100; bmpinfo.biClrUsed = 0; bmpinfo.biClrImportant = 0; fwrite (&bmpheader;, sizeof(bmpheader), 1, fp); fwrite (&bmpinfo;, sizeof(bmpinfo), 1, fp); fwrite (pFrameRGB->data[0], width*height*bpp/8, 1, fp); fclose(fp); } int main (void) { unsigned int i = 0, videoStream = -1; AVCodecContext *pCodecCtx; AVFormatContext *pFormatCtx = NULL; AVCodec *pCodec; AVFrame *pFrame, *pFrameRGB; struct SwsContext *pSwsCtx; const char *filename = "myData.h264"; AVPacket packet; int frameFinished; int PictureSize; uint8_t *buf; av_register_all(); if (avformat_open_input(&pFormatCtx;, filename, NULL, NULL) != 0 ){ printf ("av open input file failed!\n"); exit (1); } if ( avformat_find_stream_info(pFormatCtx,NULL) < 0 ){ printf ("av find stream info failed!\n"); exit (1); } for ( i=0; inb_streams; i++ ){ if ( pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO ){ videoStream = i; break; } } if (videoStream == -1){ printf ("find video stream failed!\n"); exit (1); } pCodecCtx = pFormatCtx->streams[videoStream]->codec; pCodec = avcodec_find_decoder (pCodecCtx->codec_id); if (pCodec == NULL){ printf ("avcode find decoder failed!\n"); exit (1); } if ( avcodec_open2(pCodecCtx, pCodec,NULL)<0 ){ printf ("avcode open failed!\n"); exit (1); } pFrame = avcodec_alloc_frame(); pFrameRGB = avcodec_alloc_frame(); if ( (pFrame == NULL)||(pFrameRGB == NULL) ){ printf("avcodec alloc frame failed!\n"); exit (1); } PictureSize = avpicture_get_size (PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height); buf = (uint8_t *)av_malloc(PictureSize); if ( buf == NULL ){ printf( "av malloc failed!\n"); exit(1); } avpicture_fill ( (AVPicture *)pFrameRGB, buf, PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height); pSwsCtx = sws_getContext (pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_BGR24, SWS_BICUBIC, NULL, NULL, NULL); i = 0; while(av_read_frame(pFormatCtx, &packet;) >= 0){ if(packet.stream_index == videoStream){ avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished;, &packet;); if(frameFinished){ //反转图像 pFrame->data[0] += pFrame->linesize[0] * (pCodecCtx->height - 1); pFrame->linesize[0] *= -1; pFrame->data[1] += pFrame->linesize[1] * (pCodecCtx->height / 2 - 1); pFrame->linesize[1] *= -1; pFrame->data[2] += pFrame->linesize[2] * (pCodecCtx->height / 2 - 1); pFrame->linesize[2] *= -1; sws_scale (pSwsCtx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize); SaveAsBMP (pFrameRGB, pCodecCtx->width, pCodecCtx->height, i++, 24); } } av_free_packet(&packet;); } while(1){ packet.data = NULL; packet.size = 0; avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished;, &packet;); if(frameFinished){ //反转图像 pFrame->data[0] += pFrame->linesize[0] * (pCodecCtx->height - 1); pFrame->linesize[0] *= -1; pFrame->data[1] += pFrame->linesize[1] * (pCodecCtx->height / 2 - 1); pFrame->linesize[1] *= -1; pFrame->data[2] += pFrame->linesize[2] * (pCodecCtx->height / 2 - 1); pFrame->linesize[2] *= -1; sws_scale (pSwsCtx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize); SaveAsBMP (pFrameRGB, pCodecCtx->width, pCodecCtx->height, i++, 24); }else{ break; } av_free_packet(&packet;); } sws_freeContext (pSwsCtx); av_free (pFrame); av_free (pFrameRGB); avcodec_close (pCodecCtx); avformat_close_input (&pFormatCtx;); return 0; } #ifdef __cplusplus } #endif视频文件保存图片的另外一个方法,看代码/*File : yuv2pic *Auth : sjin *Date : 20141123 *Mail : 413977243@qq.com */ /* * 参考博客http://blog.csdn.net/leixiaohua1020/article/details/25346147 *本程序实现了YUV420P像素数据编码为JPEG图片。是最简单的FFmpeg编码方面的教程。 *通过学习本例子可以了解FFmpeg的编码流程。 */ #include avcodec.h> #include <libavformatavformat.h> #include swscale.h> #define INPUT_FILE_NAME "yuv420p.yuv" #define OUTPUT_FILE_NAME "encode.png" #define INPUT_FILE_WDITH 176 #define INPUT_FILE_HEIGHT 144 int main(int argc, char* argv[]) { AVFormatContext* pFormatCtx; AVOutputFormat* fmt; AVStream* video_st; AVCodecContext* pCodecCtx; AVCodec* pCodec; uint8_t* picture_buf; AVFrame* picture; int size; FILE *in_file = fopen(INPUT_FILE_NAME, "rb"); /视频YUV源文件 int in_w = INPUT_FILE_WDITH; int in_h = INPUT_FILE_HEIGHT; //宽高 const char* out_file = OUTPUT_FILE_NAME; //输出文件路径 av_register_all(); #if 0 //方法1.组合使用几个函数 pFormatCtx = avformat_alloc_context(); //猜格式。用MJPEG编码 fmt = av_guess_format("mjpeg", NULL, NULL); pFormatCtx->oformat = fmt; //注意:输出路径 if (avio_open(&pFormatCtx-;>pb,out_file, AVIO_FLAG_READ_WRITE) < 0){ printf("输出文件打开失败"); return -1; } #else //方法2.更加自动化一些 //分配一个输出(out_file)文件格式的AVFormatContext的上下文句柄 avformat_alloc_output_context2(&pFormatCtx;, NULL, NULL, out_file); fmt = pFormatCtx->oformat; video_st = avformat_new_stream(pFormatCtx,NULL); if (video_st==NULL){ return -1; } #endif pCodecCtx = video_st->codec; pCodecCtx->codec_id = fmt->video_codec; pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO; pCodecCtx->pix_fmt = PIX_FMT_YUVJ420P; pCodecCtx->width = in_w; pCodecCtx->height = in_h; pCodecCtx->time_base.num = 1; pCodecCtx->time_base.den = 25; //输出格式信息 av_dump_format(pFormatCtx, 0, out_file, 1); pCodec = avcodec_find_encoder(pCodecCtx->codec_id); if (!pCodec){ printf("没有找到合适的编码器!"); return -1; } if (avcodec_open2(pCodecCtx, pCodec,NULL) < 0){ printf("编码器打开失败!"); return -1; } //申请解码后保存视频帧的空间,AVFrame结构体 picture = avcodec_alloc_frame(); //即使我们申请的一帧的内存,当转换的时候,我们仍需要内存去保存原始的数据 //利用下面的函数来获得原始数据帧的大小,手动分配内存 size = avpicture_get_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height); picture_buf = (uint8_t *)av_malloc(size); if (!picture_buf){ return -1; } //设置指定图像的参数,并指着图像数据缓冲区 avpicture_fill((AVPicture *)picture, picture_buf, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height); //写文件头 avformat_write_header(pFormatCtx,NULL); AVPacket pkt; int y_size = pCodecCtx->width * pCodecCtx->height; av_new_packet(&pkt;,y_size*3); //读入YUV if (fread(picture_buf, 1, y_size*3/2, in_file) < 0){ printf("文件读取错误"); return -1; } //翻转图像 picture->data[0] = picture_buf; // 亮度Y picture->data[1] = picture_buf+ y_size; // U picture->data[2] = picture_buf+ y_size*5/4; // V int got_picture=0; //编码 int ret = avcodec_encode_video2(pCodecCtx, &pkt;,picture, &got;_picture); if(ret < 0){ printf("编码错误!\n"); return -1; } if (got_picture==1){ pkt.stream_index = video_st->index; ret = av_write_frame(pFormatCtx, &pkt;); } av_free_packet(&pkt;); //写文件尾 av_write_trailer(pFormatCtx); printf("编码成功!\n"); if (video_st){ avcodec_close(video_st->codec); av_free(picture); av_free(picture_buf); } avio_close(pFormatCtx->pb); avformat_free_context(pFormatCtx); fclose(in_file); return 0; }下面是编译的时候,比较好用的Makefile文件# use pkg-config for getting CFLAGS and LDLIBS FFMPEG_LIBS= libavdevice \ libavformat \ libavfilter \ libavcodec \ libswresample \ libswscale \ libavutil \ CFLAGS += -Wall -O2 -g CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS) LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS) EXAMPLES= yuv2pic OBJS=$(addsuffix .o,$(EXAMPLES)) # the following examples make explicit use of the math library LDLIBS += -lx264 -m32 -pthread -lm -ldl .phony:all clean all: $(OBJS) $(EXAMPLES) clean: rm $(EXAMPLES) $(OBJS)参考资料:1、http://blog.csdn.net/eightdegree/article/details/7425635#reply2、http://blog.csdn.net/leixiaohua1020/article/details/25346147


沪ICP备19023445号-2号
友情链接