ffmpeg 叠加

滤镜可以实现多路视频的叠加,水印,缩放,裁剪等功能,ffmpeg提供了丰富的滤镜,可以使用

ffmpeg编译时需--enable-libfreetype才能用此功能

滤镜的几个基本概念

  • Filter:代表单个filter
  • FilterPad:代表一个filter的输入或输出端口,每个filter都可以有多个输入和多个输出,只有输出pad的filter称为source,只有输入pad的filter称为sink
  • FilterLink:若一个filter的输出pad和另一个filter的输入pad名字相同,即认为两个filter之间建立了link
  • FilterChain:代表一串相互连接的filters,除了source和sink外,要求每个filter的输入输出pad都有对应的输出和输入pad

视频叠加流程

解码->写数据到Filter->从Filter读取数据->编码

int string_filter::init_filter(AVCodecContext* codecContext,const char *filters_descr){
	char args[512];
	int ret = 0;   
	m_codecContext = codecContext;
	avfilter_register_all();

	AVFilter *buffersrc  = avfilter_get_by_name("buffer");
	AVFilter *buffersink = avfilter_get_by_name("buffersink");
	if(!buffersrc){
		printf("filter get failed!\n");
	}
	AVFilterInOut *outputs = avfilter_inout_alloc();
	AVFilterInOut *inputs  = avfilter_inout_alloc();
	enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV420P};

	AVFilterGraph* m_filter_graph = avfilter_graph_alloc();
	if (!outputs || !inputs || !m_filter_graph) {
		ret = AVERROR(ENOMEM);
		goto end;
	}

	/* buffer video source: the decoded frames from the decoder will be inserted here. */
	sprintf(args,
		"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
		codecContext->width, codecContext->height, codecContext->pix_fmt,
		codecContext->time_base.num, codecContext->time_base.den,
		1, 1);

	ret = avfilter_graph_create_filter(&m_buffersrc_ctx, buffersrc, "in",
		args, NULL, m_filter_graph);
	if (ret < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
		goto end;
	}

	/* buffer video sink: to terminate the filter chain. */
	ret = avfilter_graph_create_filter(&m_buffersink_ctx, buffersink, "out",
		NULL, NULL, m_filter_graph);
	if (ret < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
		goto end;
	}

	ret = av_opt_set_int_list(m_buffersink_ctx, "pix_fmts", pix_fmts,
		AV_PIX_FMT_YUV420P, AV_OPT_SEARCH_CHILDREN);
	if (ret < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
		goto end;
	}

	/* Endpoints for the filter graph. */
	outputs->name		= av_strdup("in");
	outputs->filter_ctx = m_buffersrc_ctx;
	outputs->pad_idx	= 0;
	outputs->next		= NULL;

	inputs->name	   = av_strdup("out");
	inputs->filter_ctx = m_buffersink_ctx;
	inputs->pad_idx    = 0;
	inputs->next	   = NULL;	 
	if ((ret = avfilter_graph_parse_ptr(m_filter_graph, filters_descr,
		&inputs, &outputs, NULL)) < 0)
		goto end;

	if ((ret = avfilter_graph_config(m_filter_graph, NULL)) < 0)
		goto end;
	return ret;
end:
	avfilter_inout_free(&inputs);
	avfilter_inout_free(&outputs);
	return ret;

}

AVFrame * string_filter::get_filter_frame(AVFrame *src)
{
	int ret = -1;

	AVFrame *tmpFrame = src;
	if (NULL == src)
	{
		return NULL;
	}

	ret = av_buffersrc_add_frame(m_buffersrc_ctx, tmpFrame);

	AVFrame *_pFrameOut;
	_pFrameOut = av_frame_alloc();
	_pFrameOut->width = m_codecContext->width;
	_pFrameOut->format = m_codecContext->pix_fmt;
	_pFrameOut->height = m_codecContext->height;
	ret = av_buffersink_get_frame_flags(m_buffersink_ctx, _pFrameOut, 0);

	AVFrame * tmpFrame0 = _pFrameOut;

	return tmpFrame0;
}



filter = new string_filter;

filter->init_filter(dec_ctx, PIC);

en_packet.size =0;
en_packet.data =NULL;
filterFrame = filter->get_filter_frame(deFrame);
if(filterFrame == NULL){
	_ERROR("get filter frame error!");
}

评论

Your browser is out-of-date!

Update your browser to view this website correctly. Update my browser now

×