https://bbs.csdn.net/topics/390430857
這個代碼比較少:自己封裝的:
這個使用opencv讀圖檔:
https://blog.csdn.net/wootengxjj/article/details/51758818?utm_source=blogxgwz3
https://download.csdn.net/download/wootengxjj/9559381
這個代碼很多,可以運作:不是ffmpeg4版本以上:
https://download.csdn.net/download/csdn421/9544177
這個核心代碼:
#include "stdafx.h"
#include <stdio.h>
#include <stdlib.h>
#include <string>
using namespace std;
extern "C"
{
#ifndef __STDC_CONSTANT_MACROS
#define __STDC_CONSTANT_MACROS
#endif
#include <libavformat\avformat.h>
#include <libavcodec\avcodec.h>
#include <libswscale\swscale.h>
#include <libavutil\opt.h>
#include <libavutil\imgutils.h>
#include <libavutil\samplefmt.h>
#include <libavutil\channel_layout.h>
#include <libavutil\common.h>
#include <libavutil\imgutils.h>
#include <libavutil\mathematics.h>
#include <libavutil\samplefmt.h>
}
#pragma comment(lib, "avcodec.lib")
const char *jpgToYuvFile = "result/phase_1.yuv";
const char *yuvToH265File = "result/phase_2.hevc";
const char *h265ToMkvFile = "result/phase_end.mkv";
const char *mp3ToMkv = "result/phase_2.mp3";
int pictureHeight = 0;
int pictureWidth = 0;
void yuvj420p_save(AVFrame *pFrame, AVCodecContext *pCodecCtx,FILE *FileOut)
{
int i = 0;
int width = pCodecCtx->width, height = pCodecCtx->height;
int height_half = height / 2, width_half = width / 2;
int y_wrap = pFrame->linesize[0];
int u_wrap = pFrame->linesize[1];
int v_wrap = pFrame->linesize[2];
unsigned char *y_buf = pFrame->data[0];
unsigned char *u_buf = pFrame->data[1];
unsigned char *v_buf = pFrame->data[2];
//save y
for (i = 0; i < height; i++)
{
fwrite(y_buf + i*y_wrap, 1, width, FileOut);
}
//save u
for (i = 0; i < height_half; i++)
{
fwrite(u_buf + i*u_wrap, 1, width_half, FileOut);
}
//save v
for (i = 0; i < height_half; i++)
{
fwrite(v_buf + i*v_wrap, 1, width_half, FileOut);
}
fflush(FileOut);
}
void rgb24_save(AVFrame *pFrame, AVCodecContext *pCodecCtx, FILE *FileOut)
{
int i = 0;
int width = pCodecCtx->width, height = pCodecCtx->height;
for (i = 0; i < height; i++)
{
fwrite(pFrame->data[0] + i*pFrame->linesize[0], 1, width * 3, FileOut);
}
fflush(FileOut);
}
void yuvj422p_save(AVFrame *pFrame, AVCodecContext *pCodecCtx, FILE *FileOut)
{
int i = 0;
int width = pCodecCtx->width, height = pCodecCtx->height;
int height_half = height / 2, width_half = width / 2;
int y_wrap = pFrame->linesize[0];
int u_wrap = pFrame->linesize[1];
int v_wrap = pFrame->linesize[2];
unsigned char *y_buf = pFrame->data[0];
unsigned char *u_buf = pFrame->data[1];
unsigned char *v_buf = pFrame->data[2];
//save y
for (i = 0; i < height; i++)
{
fwrite(y_buf + i * y_wrap, 1, width, FileOut);
}
//save u
for (i = 0; i < height; i++)
{
fwrite(u_buf + i * u_wrap, 1, width_half, FileOut);
}
//save v
for (i = 0; i < height; i++)
{
fwrite(v_buf + i * v_wrap, 1, width_half, FileOut);
}
fflush(FileOut);
}
void yuvj444p_save(AVFrame *pFrame, AVCodecContext *pCodecCtx, FILE *FileOut)
{
int i = 0;
int width = pCodecCtx->width, height = pCodecCtx->height;
int y_wrap = pFrame->linesize[0];
int u_wrap = pFrame->linesize[1];
int v_wrap = pFrame->linesize[2];
unsigned char *y_buf = pFrame->data[0];
unsigned char *u_buf = pFrame->data[1];
unsigned char *v_buf = pFrame->data[2];
//save y
for (i = 0; i < height; i++)
{
fwrite(y_buf + i*y_wrap, 1, width, FileOut);
}
//save u
for (i = 0; i < height; i++)
{
fwrite(u_buf + i*u_wrap, 1, width, FileOut);
}
//save v
for (i = 0; i < height; i++)
{
fwrite(v_buf + i*v_wrap, 1, width, FileOut);
}
fflush(FileOut);
}
int jpgToYuv(int frameNum, const char *srcFile, AVPixelFormat pixFormat,int charNum)
{
char FileInput[30];
AVFormatContext *FileFormatCtx = NULL;
AVCodecContext *FileCodecCtx = NULL;
AVFrame *FileFrame = NULL, *FileFrameYUV = NULL;
AVCodec *FileCodec = NULL;
AVPacket packet;
av_init_packet(&packet);
FILE *FileOut = fopen(jpgToYuvFile, "wb+");
if (FileOut == NULL)
{
printf("lose to open jpgToYuvFile ");
return -1;
}
for (int num = 0; num <= frameNum; num++)
{
FileCodecCtx = NULL;
FileFrame = NULL;
FileFrameYUV = NULL;
FileCodec = NULL;
FileFormatCtx = NULL;
int i = 1,temp=num;
while (temp/10 > 0)
{
temp = temp / 10;
i++;
}
string h((charNum-i),'0') ;
sprintf(FileInput, "%s%s%d.jpg", srcFile,h.c_str(),num );
printf("File namer: %d\t%s\n",i, FileInput);
if (avformat_open_input(&FileFormatCtx, FileInput, NULL, NULL) != 0)
{
printf( "phase_1,couldn't open input file\n");
return -1;
}
if (avformat_find_stream_info(FileFormatCtx, NULL) < 0)
{
printf("phase_1,stream_info error\n");
return -1;
}
int videoStream = -1;
for (int i = 0; i < FileFormatCtx->nb_streams; i++)
{
if (FileFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
videoStream = i;
break;
}
}
if (videoStream == -1)
{
return -1;
}
FileCodecCtx = FileFormatCtx->streams[videoStream]->codec;
FileCodec = avcodec_find_decoder(FileCodecCtx->codec_id);
if (FileCodec == NULL)
{
printf("phase_1,cant find decoder\n");
return -1;
}
if (avcodec_open2(FileCodecCtx, FileCodec, NULL) < 0)
{
printf("phase_1,cant open decoder!\n");
return -1;
}
FileFrame = av_frame_alloc();
FileFrameYUV = av_frame_alloc();
if (FileFrame == NULL|| FileFrameYUV == NULL)
{
printf("phase_1,cant open a frame to store data\n");
return -1;
}
uint8_t *out_buffer;
FileFrameYUV->format = pixFormat;
out_buffer = new uint8_t[avpicture_get_size(pixFormat, FileCodecCtx->width, FileCodecCtx->height)];
avpicture_fill((AVPicture *)FileFrameYUV, out_buffer, pixFormat, FileCodecCtx->width, FileCodecCtx->height);
int frameFinish = 0;
int decLen = 0;
while (av_read_frame(FileFormatCtx, &packet) >= 0)
{
if (packet.stream_index == videoStream)
{
while (packet.size > 0)
{
decLen = avcodec_decode_video2(FileCodecCtx, FileFrame, &frameFinish, &packet);
if (decLen < 0)
{
printf("phase_1,Error while decoding frame %d\n");
return -1;
}
if (frameFinish)
{
SwsContext* img_convert_ctx = sws_getContext(FileCodecCtx->width, FileCodecCtx->height, FileCodecCtx->pix_fmt, FileCodecCtx->width, FileCodecCtx->height, pixFormat, SWS_BICUBIC, NULL, NULL, NULL);
sws_scale(img_convert_ctx, (const uint8_t* const*)FileFrame->data, FileFrame->linesize, 0, FileCodecCtx->height, FileFrameYUV->data, FileFrameYUV->linesize);
switch (FileFrameYUV->format)
{
case 0:
yuvj420p_save(FileFrameYUV, FileCodecCtx, FileOut);
break;
case 2:
rgb24_save(FileFrameYUV, FileCodecCtx,FileOut);
break;
case 4:
yuvj422p_save(FileFrameYUV, FileCodecCtx, FileOut);
break;
case 5:
yuvj444p_save(FileFrameYUV, FileCodecCtx, FileOut);
break;
case 12:
yuvj420p_save(FileFrameYUV, FileCodecCtx, FileOut);
break;
case 13:
yuvj422p_save(FileFrameYUV, FileCodecCtx, FileOut);
break;
case 14:
yuvj444p_save(FileFrameYUV, FileCodecCtx, FileOut);
break;
default:
printf("phase_1,unsupport YUV format for saving\n");
break;
}
}
packet.data += decLen;
packet.size -= decLen;
}
}
}
}
pictureHeight = FileCodecCtx->height;
pictureWidth = FileCodecCtx->width;
av_free(FileFrame);
av_free(FileFrameYUV);
avcodec_close(FileCodecCtx);
avformat_close_input(&FileFormatCtx);
fclose(FileOut);
return 1;
}
AVCodec* setAvcodecContext(AVCodecContext * pCodecCtx, AVPixelFormat pixFormat, AVOutputFormat* fmt)
{
pCodecCtx->codec_id = fmt->video_codec;
pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
pCodecCtx->pix_fmt = pixFormat;
pCodecCtx->width = pictureWidth;
pCodecCtx->height = pictureHeight;
pCodecCtx->time_base.num = 1;
pCodecCtx->time_base.den = 25;
pCodecCtx->bit_rate = 400000;
pCodecCtx->gop_size = 250;
pCodecCtx->qmin = 10;
pCodecCtx->qmax = 51;
return avcodec_find_encoder(pCodecCtx->codec_id);
}
int choosePixFormat(AVPixelFormat pixFormat, AVFrame* picture, int y_size, uint8_t* picture_buf, FILE *in_file)
{
switch (pixFormat)
{
case AV_PIX_FMT_YUV420P:
if (fread(picture_buf, 1, y_size * 3 / 2, in_file) < 0)
{
printf("phase_2,read output file error\n");
return -1;
}
else if (feof(in_file))
{
return -1;
}
picture->data[0] = picture_buf;
picture->data[1] = picture_buf + y_size;
picture->data[2] = picture_buf + 5 / 4 * y_size;
break;
case AV_PIX_FMT_YUV422P:
if (fread(picture_buf, 1, y_size * 2, in_file) < 0)
{
printf("phase_2,read output file error\n");
return -1;
}
else if (feof(in_file))
{
return -1;
}
picture->data[0] = picture_buf;
picture->data[1] = picture_buf + y_size;
picture->data[2] = picture_buf + 1 / 2 * y_size;
break;
case AV_PIX_FMT_YUV444P:
if (fread(picture_buf, 1, y_size * 3, in_file) < 0)
{
printf("phase_2,read output file error\n");
return -1;
}
else if (feof(in_file))
{
return -1;
}
picture->data[0] = picture_buf;
picture->data[1] = picture_buf + y_size;
picture->data[2] = picture_buf + y_size;
break;
case AV_PIX_FMT_YUVJ420P:
if (fread(picture_buf, 1, y_size * 3 / 2, in_file) < 0)
{
printf("phase_2,read output file error\n");
return -1;
}
else if (feof(in_file))
{
return -1;
}
picture->data[0] = picture_buf;
picture->data[1] = picture_buf + y_size;
picture->data[2] = picture_buf + 5 / 4 * y_size;
break;
case AV_PIX_FMT_YUVJ422P:
if (fread(picture_buf, 1, y_size * 2, in_file) < 0)
{
printf("phase_2,read output file error\n");
return -1;
}
else if (feof(in_file))
{
return -1;
}
picture->data[0] = picture_buf;
picture->data[1] = picture_buf + y_size;
picture->data[2] = picture_buf + 1 / 2 * y_size;
break;
case AV_PIX_FMT_YUVJ444P:
if (fread(picture_buf, 1, y_size * 3, in_file) < 0)
{
printf("phase_2,read output file error\n");
return -1;
}
else if (feof(in_file))
{
return -1;
}
picture->data[0] = picture_buf;
picture->data[1] = picture_buf + y_size;
picture->data[2] = picture_buf + y_size;
break;
}
}
int flushEncoder(AVFormatContext *fmt_ctx, unsigned int stream_index)
{
int ret;
int getFrame;
AVPacket lastPacket;
if (!(fmt_ctx->streams[stream_index]->codec->codec->capabilities &CODEC_CAP_DELAY))
{
return 0;
}
while (1)
{
lastPacket.data = NULL;
lastPacket.size = 0;
av_init_packet(&lastPacket);
ret = avcodec_encode_video2(fmt_ctx->streams[stream_index]->codec, &lastPacket,NULL, &getFrame);
av_frame_free(NULL);
if (ret < 0)
{
break;
}
if (!getFrame)
{
ret = 0;
break;
}
ret = av_write_frame(fmt_ctx, &lastPacket);
if (ret < 0)
{
break;
}
}
return ret;
}
int yuvToH265(int frameNum, AVPixelFormat pixFormat)
{
AVFormatContext* pFormatCtx;
AVOutputFormat* fmt;
AVStream* video_st;
AVCodecContext* pCodecCtx;
AVCodec* pCodec;
FILE *in_file = fopen(jpgToYuvFile, "rb");
avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, yuvToH265File);
if (pFormatCtx == NULL)
{
printf("phase_2,cant read output file codec \n");
return -1;
}
fmt = pFormatCtx->oformat;
if (avio_open(&pFormatCtx->pb, yuvToH265File, AVIO_FLAG_READ_WRITE) < 0)
{
printf("phase_2, cant open output file");
return -1;
}
video_st = avformat_new_stream(pFormatCtx, 0);
if (video_st == NULL)
{
return -1;
}
pCodecCtx = video_st->codec;
pCodec = setAvcodecContext(pCodecCtx, pixFormat, fmt);
if (pCodec ==NULL)
{
printf("phase_2,cant find proper coder\n");
return -1;
}
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
{
printf("phase_2,cant open coder\n");
return -1;
}
uint8_t* picture_buf;
AVFrame* picture;
int size;
picture = av_frame_alloc();
size = avpicture_get_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
picture_buf = (uint8_t *)av_malloc(size);
avpicture_fill((AVPicture *)picture, picture_buf, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
avformat_write_header(pFormatCtx, NULL);
AVPacket pkt;
int y_size = pCodecCtx->width * pCodecCtx->height;
av_new_packet(&pkt, y_size * 6);
for (int i = 0; i < frameNum; i++)
{
choosePixFormat(pixFormat, picture, y_size, picture_buf, in_file);
picture->pts = i;
int getPicture = 0;
int ret = avcodec_encode_video2(pCodecCtx, &pkt, picture, &getPicture);
if (ret < 0)
{
printf("phase_2,encode error\n");
return -1;
}
if (getPicture)
{
pkt.stream_index = video_st->index;
ret = av_write_frame(pFormatCtx, &pkt);
if (ret < 0)
{
printf("phase_2,write packet error");
return -1;
}
av_free_packet(&pkt);
}
}
int ret = flushEncoder(pFormatCtx,0);
if (ret < 0)
{
printf("phase_2,flushing encoder failed\n");
return -1;
}
av_write_trailer(pFormatCtx);
if (video_st)
{
avcodec_close(video_st->codec);
av_free(picture);
av_free(picture_buf);
}
avio_close(pFormatCtx->pb);
avformat_free_context(pFormatCtx);
fclose(in_file);
return 1;
}
int h265ToMkv()
{
AVOutputFormat *ofmt = NULL;
AVFormatContext *ifmt_ctx_v = NULL, *ifmt_ctx_a = NULL, *ofmt_ctx = NULL;
AVPacket pkt;
int ret, i;
int videoindex_v = -1, videoindex_out = -1;
int audioindex_a = -1, audioindex_out = -1;
int frame_index = 0;
int64_t cur_pts_v = 0, cur_pts_a = 0;
ret = avformat_open_input(&ifmt_ctx_v, yuvToH265File, 0, 0);
if (ret < 0)
{
printf("phase_3,open hevc error\n");
return -1;
}
ret = avformat_find_stream_info(ifmt_ctx_v, 0);
if (ret < 0)
{
printf("phase_3, hevc stream_info error\n");
return -1;
}
ret = avformat_open_input(&ifmt_ctx_a, mp3ToMkv, 0, 0);
if (ret < 0)
{
printf("phase_3,open mp3 error\n");
return -1;
}
ret = avformat_find_stream_info(ifmt_ctx_a, 0);
if (ret < 0)
{
printf("phase_3, mp3 stream_info error\n");
return -1;
}
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, h265ToMkvFile);
if (!ofmt_ctx) {
printf("phase_3,could not create output context\n");
return -1;
}
ofmt = ofmt_ctx->oformat;
for (i = 0; i < ifmt_ctx_v->nb_streams; i++)
{
if (ifmt_ctx_v->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
AVStream *in_stream = ifmt_ctx_v->streams[i];
AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
if (!out_stream)
{
printf("phase_3,failed allocating output stream hevc\n");
return -1;
}
videoindex_v = i;
videoindex_out = out_stream->index;
ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
if (ret < 0)
{
printf("phase_3,fail to copy codecContext hevc\n");
return -1;
}
out_stream->codec->codec_tag = 0;
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
{
out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
break;
}
}
for (i = 0; i < ifmt_ctx_a->nb_streams; i++)
{
if (ifmt_ctx_a->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
{
AVStream *in_stream = ifmt_ctx_a->streams[i];
AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
if (!out_stream)
{
printf("phase_3,failed allocating output stream mp3\n");
return -1;
}
audioindex_a = i;
audioindex_out = out_stream->index;
ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
if (ret < 0)
{
printf("phase_3,fail to copy codecContext mp3\n");
return -1;
}
out_stream->codec->codec_tag = 0;
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
{
out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
break;
}
}
if (!(ofmt->flags & AVFMT_NOFILE))
{
ret = avio_open(&ofmt_ctx->pb, h265ToMkvFile, AVIO_FLAG_WRITE);
if (ret < 0)
{
printf("phase_3,Could not relate output file address\n" );
return -1;
}
}
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0)
{
printf("phase_3,write header error\n");
return -1;
}
int test_h = 0;
while (1)
{
AVFormatContext *ifmt_ctx;
int stream_index = 0;
AVStream *in_stream, *out_stream;
ret = av_compare_ts(cur_pts_v, ifmt_ctx_v->streams[videoindex_v]->time_base, cur_pts_a, ifmt_ctx_a->streams[audioindex_a]->time_base);
if (ret <= 0)
{
ifmt_ctx = ifmt_ctx_v;
stream_index = videoindex_out;
ret = av_read_frame(ifmt_ctx, &pkt);
if (ret >= 0)
{
do
{
in_stream = ifmt_ctx->streams[pkt.stream_index];
out_stream = ofmt_ctx->streams[stream_index];
if (pkt.stream_index == videoindex_v)
{
if (pkt.pts == AV_NOPTS_VALUE)
{
AVRational time_base1 = in_stream->time_base;
int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
pkt.pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
if (pkt.pts == 0)
{
pkt.dts = pkt.pts;
test_h = pkt.pts;
}
else
{
pkt.dts = test_h;
}
test_h = pkt.pts;
pkt.duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
frame_index++;
}
cur_pts_v = pkt.pts;
break;
}
} while (av_read_frame(ifmt_ctx, &pkt) >= 0);
}
else
{
break;
}
}
else
{
ifmt_ctx = ifmt_ctx_a;
stream_index = audioindex_out;
ret = av_read_frame(ifmt_ctx, &pkt);
if (ret >= 0)
{
do
{
in_stream = ifmt_ctx->streams[pkt.stream_index];
out_stream = ofmt_ctx->streams[stream_index];
if (pkt.stream_index == audioindex_a)
{
if (pkt.pts == AV_NOPTS_VALUE)
{
AVRational time_base1 = in_stream->time_base;
int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
pkt.pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
pkt.dts = pkt.pts;
pkt.duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
frame_index++;
}
cur_pts_a = pkt.pts;
break;
}
} while (av_read_frame(ifmt_ctx, &pkt) >= 0);
}
else
{
break;
}
}
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
pkt.stream_index = stream_index;
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
if (ret < 0)
{
printf("phase_3,error muxing packet\n");
return -1;
}
av_free_packet(&pkt);
}
ret = av_write_trailer(ofmt_ctx);
if (ret < 0)
{
printf("phase_3,write trail error\n");
return -1;
}
avformat_close_input(&ifmt_ctx_v);
avformat_close_input(&ifmt_ctx_a);
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
{
avio_close(ofmt_ctx->pb);
}
avformat_free_context(ofmt_ctx);
return 1;
}
int main()
{
int frameNum = 1000;
AVPixelFormat pixFormat = AV_PIX_FMT_YUV444P;
int picNameLen = 4;
//const char * picName = "pics/test_anima";
const char * picName = "pics_1/dh";
int ret;
av_register_all();
ret = jpgToYuv(frameNum, picName, pixFormat, picNameLen);
if (ret < 0)
{
system("pause");
}
ret = yuvToH265(frameNum, pixFormat);
if (ret < 0)
{
system("pause");
}
ret = h265ToMkv();
if (ret < 0)
{
system("pause");
}
return 1;
}