天天看點

vivado HLS從執行個體看優化

雖然看完了ug902關于vivado HLS優化的内容,但有點囫囵吞棗,是以想從執行個體去探索如何應用優化指令來達到最大的性能!

一、第一個執行個體

#ifndef _XIANGANWO3_H_
#define _XIANGANWO3_H_
#include "hls_video.h"

// maximum image size
#define MAX_WIDTH  1936
#define MAX_HEIGHT 1456

// typedef video library core structures
typedef hls::stream<ap_axiu<32,1,1,1> >               AXI_STREAM;
typedef hls::Scalar<3, unsigned char>                 RGB_PIXEL;
typedef hls::Scalar<1, unsigned char>                 GRAY_PIXEL;
typedef hls::Mat<MAX_HEIGHT, MAX_WIDTH, HLS_8UC3>     RGB_IMAGE;
typedef hls::Mat<MAX_HEIGHT, MAX_WIDTH, HLS_8UC1>     GRAY_IMAGE;

// top level function for HW synthesis
int hls_XiangAnWO3(AXI_STREAM& src_axi,AXI_STREAM& src_axi1,AXI_STREAM& src_axi2,AXI_STREAM& dst_axi, int rows, int cols,unsigned char  model[16777216]);

#endif
           
#include "XiangAnWO3.h"

void FluoDetect(RGB_IMAGE& srcImage,GRAY_IMAGE& FluoImage,unsigned char  model[16777216])
{
	RGB_IMAGE  img(MAX_HEIGHT, MAX_WIDTH);
	GRAY_IMAGE img1(MAX_HEIGHT, MAX_WIDTH);
	GRAY_IMAGE img2(MAX_HEIGHT, MAX_WIDTH);

#pragma HLS dataflow
	hls::Scale(srcImage,img,1.1);

	loop_height: for (int i = 0; i < MAX_HEIGHT; i++) {
	    loop_width: for (int j = 0; j < MAX_WIDTH; j++) {
#pragma HLS loop_flatten off
#pragma HLS pipeline II=1

	    	RGB_PIXEL src_data;
	    	GRAY_PIXEL dst_data(0);
	    	img>>src_data;

	    	unsigned char B = src_data.val[0];
	    	unsigned char G = src_data.val[1];
	    	unsigned char R = src_data.val[2];

	    	int rgbpixels = R + G * 256 + B * 256 * 256;
	    	unsigned char rgbelement = model[rgbpixels];

	    	dst_data.val[0]= (rgbelement > 0)?   (unsigned char)255: 0;

	    	img1 << dst_data;
	    }
	}

	hls::Dilate(img1,img2);
	hls::Erode(img2,FluoImage);
}

void FindTarget(RGB_IMAGE& srcImage,GRAY_IMAGE& dstImage)
{
	RGB_IMAGE   img(MAX_HEIGHT, MAX_WIDTH);
	GRAY_IMAGE  img1(MAX_HEIGHT, MAX_WIDTH);
	GRAY_IMAGE  img2(MAX_HEIGHT, MAX_WIDTH);
	GRAY_IMAGE  img3(MAX_HEIGHT, MAX_WIDTH);
	GRAY_IMAGE  img4(MAX_HEIGHT, MAX_WIDTH);
	GRAY_IMAGE  img5(MAX_HEIGHT, MAX_WIDTH);

#pragma HLS dataflow
	hls::Scale(srcImage,img,1.5);
	hls::CvtColor<HLS_BGR2GRAY>(img,img1);
	hls::Threshold(img1,img2,38,255,HLS_THRESH_BINARY_INV);

	hls::Erode(img2,img3);
	hls::Erode(img3,img4);
	hls::Erode(img4,img5);
	hls::Sobel<1,0,3>(img5,dstImage);

}

void Composition(GRAY_IMAGE& srcImage1,GRAY_IMAGE& srcImage2,RGB_IMAGE& srcImage3,RGB_IMAGE& dstImage)
{
	loop_height: for (int i = 0; i < MAX_HEIGHT; i++) {
	loop_width: for (int j = 0; j < MAX_WIDTH; j++) {
#pragma HLS loop_flatten off
#pragma HLS pipeline II=1

				GRAY_PIXEL src_data1(0),src_data2(0);
				RGB_PIXEL dst_data(0,0,0);

				srcImage3 >>dst_data;

				srcImage1>>src_data1;
				srcImage2>>src_data2;
				unsigned char data1=src_data1.val[0];
				unsigned char data2=src_data2.val[0];

				if(data1==255)
				{
					dst_data.val[0]=0;
					dst_data.val[1]=0;
					dst_data.val[2]=255;
				}
				else if(data2==255)
				{
					dst_data.val[0]=255;
					dst_data.val[1]=255;
					dst_data.val[2]=255;
				}

				dstImage << dst_data;
		}
	}
}

int hls_XiangAnWO3(AXI_STREAM& src_axi, AXI_STREAM& src_axi1,AXI_STREAM& src_axi2,AXI_STREAM& dst_axi, int rows, int cols,unsigned char  model[16777216])
{
    //Create AXI streaming interfaces for the core
    #pragma HLS INTERFACE axis port=src_axi
	#pragma HLS INTERFACE axis port=dst_axi
	#pragma HLS INTERFACE axis port=src_axi1
	#pragma HLS INTERFACE axis port=src_axi2

	#pragma HLS RESOURCE core=AXI_SLAVE variable=rows   metadata="-bus_bundle CONTROL_BUS"
	#pragma HLS RESOURCE core=AXI_SLAVE variable=cols   metadata="-bus_bundle CONTROL_BUS"
	#pragma HLS RESOURCE core=AXI_SLAVE variable=return metadata="-bus_bundle CONTROL_BUS"

	#pragma HLS INTERFACE ap_stable port=rows
	#pragma HLS INTERFACE ap_stable port=cols

	RGB_IMAGE   img_0(rows, cols);
	RGB_IMAGE   img_1(rows, cols);
	GRAY_IMAGE  img_2(rows, cols);
	GRAY_IMAGE  img_3(rows, cols);
	RGB_IMAGE   img_4(rows, cols);
	RGB_IMAGE   img_5(rows, cols);

#pragma HLS dataflow
	hls::AXIvideo2Mat(src_axi, img_0);
	hls::AXIvideo2Mat(src_axi1, img_1);
	hls::AXIvideo2Mat(src_axi2, img_4);

	FluoDetect(img_0,img_2,model);
	FindTarget(img_1,img_3);

	Composition(img_2,img_3,img_4,img_5);

	hls::Mat2AXIvideo(img_5, dst_axi);

    return (int)0;
}
           

性能report是:

vivado HLS從執行個體看優化

大概要花28.5ms!!!!

1、第一次優化:arbitrary precise C++ type------reduce resource area

#ifndef _XIANGANWO3_H_
#define _XIANGANWO3_H_

#include "hls_video.h"
#include <ap_int.h>
#include <fstream>

typedef ap_uint<1> uint1;
typedef ap_uint<11> uint11;

// maximum image size
#define MAX_WIDTH  1936
#define MAX_HEIGHT 1456

// typedef video library core structures
typedef hls::stream<ap_axiu<32,1,1,1> >               AXI_STREAM;
typedef hls::Scalar<3, unsigned char>                 RGB_PIXEL;
typedef hls::Scalar<1, unsigned char>                 GRAY_PIXEL;
typedef hls::Mat<MAX_HEIGHT, MAX_WIDTH, HLS_8UC3>     RGB_IMAGE;
typedef hls::Mat<MAX_HEIGHT, MAX_WIDTH, HLS_8UC1>     GRAY_IMAGE;

// top level function for HW synthesis
uint1 hls_XiangAnWO3(AXI_STREAM& src_axi,AXI_STREAM& src_axi1,AXI_STREAM& src_axi2,AXI_STREAM& dst_axi, uint11 rows, uint11 cols,uint1 model[16777216]);

#endif
           
#include "XiangAnWO3.h"

void FluoDetect(RGB_IMAGE& srcImage,GRAY_IMAGE& FluoImage,uint11 rows,uint11 cols,uint1 model[16777216])
{
#pragma HLS UNROLL

#pragma HLS ARRAY_RESHAPE variable=model block factor=64

	RGB_IMAGE  img(MAX_HEIGHT, MAX_WIDTH);
	GRAY_IMAGE img1(MAX_HEIGHT, MAX_WIDTH);
	GRAY_IMAGE img2(MAX_HEIGHT, MAX_WIDTH);

#pragma HLS dataflow
	hls::Scale(srcImage,img,1.1);

	assert(rows<=MAX_HEIGHT);
	assert(cols<=MAX_WIDTH);
	loop_height: for (uint11 i = 0; i < rows; i++) {
	    loop_width: for (uint11 j = 0; j < cols; j++) {
#pragma HLS pipeline II=1
#pragma HLS DEPENDENCE variable=model inter false

	    	RGB_PIXEL src_data;
	    	GRAY_PIXEL dst_data(0);

	    	img>>src_data;

	    	unsigned char B = src_data.val[0];
	    	unsigned char G = src_data.val[1];
	    	unsigned char R = src_data.val[2];

	    	int rgbpixels = R + G * 256 + B * 256 * 256;
	    	uint1 rgbelement = model[rgbpixels];

	    	dst_data.val[0]= rgbelement*255;

	    	img1 << dst_data;
	    }
	}

	hls::Dilate(img1,img2);
	hls::Erode(img2,FluoImage);
}

void FindTarget(RGB_IMAGE& srcImage,GRAY_IMAGE& dstImage)
{
	RGB_IMAGE   img(MAX_HEIGHT, MAX_WIDTH);
	GRAY_IMAGE  img1(MAX_HEIGHT, MAX_WIDTH);
	GRAY_IMAGE  img2(MAX_HEIGHT, MAX_WIDTH);
	GRAY_IMAGE  img3(MAX_HEIGHT, MAX_WIDTH);
	GRAY_IMAGE  img4(MAX_HEIGHT, MAX_WIDTH);
	GRAY_IMAGE  img5(MAX_HEIGHT, MAX_WIDTH);

#pragma HLS dataflow
	hls::Scale(srcImage,img,1.5);
	hls::CvtColor<HLS_BGR2GRAY>(img,img1);
	hls::Threshold(img1,img2,38,255,HLS_THRESH_BINARY_INV);

	hls::Erode(img2,img3);
	hls::Erode(img3,img4);
	hls::Erode(img4,img5);
	hls::Sobel<1,0,3>(img5,dstImage);
}

void Composition(GRAY_IMAGE& srcImage1,GRAY_IMAGE& srcImage2,RGB_IMAGE& srcImage3,RGB_IMAGE& dstImage,uint11 rows,uint11 cols)
{
#pragma HLS UNROLL

	assert(rows<=MAX_HEIGHT);
	assert(cols<=MAX_WIDTH);

	loop_height: for (uint11 i = 0; i < rows; i++) {
	loop_width: for (uint11 j = 0; j < cols; j++) {
#pragma HLS pipeline II=1

				GRAY_PIXEL src_data1(0),src_data2(0);
				RGB_PIXEL dst_data(0,0,0);

				srcImage3 >>dst_data;

				srcImage1>>src_data1;
				srcImage2>>src_data2;
				unsigned char data1=src_data1.val[0];
				unsigned char data2=src_data2.val[0];


				if(data1==255)
				{
					dst_data.val[0]=0;
					dst_data.val[1]=0;
					dst_data.val[2]=255;
				}
				else if(data2==255)
				{
					dst_data.val[0]=255;
					dst_data.val[1]=255;
					dst_data.val[2]=255;
				}

				dstImage << dst_data;
		}
	}

}

uint1 hls_XiangAnWO3(AXI_STREAM& src_axi, AXI_STREAM& src_axi1,AXI_STREAM& src_axi2,AXI_STREAM& dst_axi, uint11 rows, uint11 cols,uint1 model[16777216])
{
    //Create AXI streaming interfaces for the core
    #pragma HLS INTERFACE axis port=src_axi
	#pragma HLS INTERFACE axis port=dst_axi
	#pragma HLS INTERFACE axis port=src_axi1
	#pragma HLS INTERFACE axis port=src_axi2

	#pragma HLS RESOURCE core=AXI_SLAVE variable=rows   metadata="-bus_bundle CONTROL_BUS"
	#pragma HLS RESOURCE core=AXI_SLAVE variable=cols   metadata="-bus_bundle CONTROL_BUS"
	#pragma HLS RESOURCE core=AXI_SLAVE variable=return metadata="-bus_bundle CONTROL_BUS"

	#pragma HLS INTERFACE ap_stable port=rows
	#pragma HLS INTERFACE ap_stable port=cols

	RGB_IMAGE   img_0(rows, cols);
	RGB_IMAGE   img_1(rows, cols);
	GRAY_IMAGE  img_2(rows, cols);
	GRAY_IMAGE  img_3(rows, cols);
	RGB_IMAGE   img_4(rows, cols);
	RGB_IMAGE   img_5(rows, cols);

#pragma HLS dataflow
	hls::AXIvideo2Mat(src_axi, img_0);
	hls::AXIvideo2Mat(src_axi1, img_1);
	hls::AXIvideo2Mat(src_axi2, img_4);

	FluoDetect(img_0,img_2,rows,cols,model);
	FindTarget(img_1,img_3);

	Composition(img_2,img_3,img_4,img_5,rows,cols);

	hls::Mat2AXIvideo(img_5, dst_axi);

    return (uint1)0;
}
           

綜合報告為:

vivado HLS從執行個體看優化

雖然沒能降低延遲時間,但FF和LUT資源占用少了很多!分别少了100!

2、第二次優化:全局變量、loop bound優化------減少時延

void FluoDetect(RGB_IMAGE& srcImage,GRAY_IMAGE& FluoImage,uint11 rows,uint11 cols,uint1 model[16777216])
{
#pragma HLS UNROLL
#pragma HLS ARRAY_RESHAPE variable=model block factor=64

	RGB_IMAGE  img(MAX_HEIGHT, MAX_WIDTH);
	GRAY_IMAGE img1(MAX_HEIGHT, MAX_WIDTH);
	GRAY_IMAGE img2(MAX_HEIGHT, MAX_WIDTH);

#pragma HLS dataflow
	hls::Scale(srcImage,img,1.1);

	assert(rows<=MAX_HEIGHT);
	assert(cols<=MAX_WIDTH);
	loop_height: for (uint11 i = 0; i < rows; i++) {
	    loop_width: for (uint11 j = 0; j < cols; j++) {
#pragma HLS pipeline II=1
#pragma HLS DEPENDENCE variable=model inter false

	    	RGB_PIXEL src_data;
	    	GRAY_PIXEL dst_data(0);

	    	img>>src_data;

	    	unsigned char B = src_data.val[0];
	    	unsigned char G = src_data.val[1];
	    	unsigned char R = src_data.val[2];

	    	int rgbpixels = R + G * 256 + B * 256 * 256;
	    	uint1 rgbelement = model[rgbpixels];

	    	dst_data.val[0]= rgbelement*255;

	    	img1 << dst_data;
	    }
	}

	hls::Dilate(img1,img2);
	hls::Erode(img2,FluoImage);

}

void Composition(GRAY_IMAGE& srcImage1,GRAY_IMAGE& srcImage2,RGB_IMAGE& srcImage3,RGB_IMAGE& dstImage,uint11 rows,uint11 cols)
{
#pragma HLS UNROLL

	assert(rows<=MAX_HEIGHT);
	assert(cols<=MAX_WIDTH);
	loop_height: for (uint11 i = 0; i < rows; i++) {
	loop_width: for (uint11 j = 0; j < cols; j++) {
#pragma HLS pipeline II=1

				GRAY_PIXEL src_data1(0),src_data2(0);
				RGB_PIXEL dst_data(0,0,0);

				srcImage3 >>dst_data;
				srcImage1>>src_data1;
				srcImage2>>src_data2;
				unsigned char data1=src_data1.val[0];
				unsigned char data2=src_data2.val[0];

				if(data1==255)
				{
					dst_data.val[0]=0;
					dst_data.val[1]=0;
					dst_data.val[2]=255;
				}
				else if(data2==255)
				{
					dst_data.val[0]=255;
					dst_data.val[1]=255;
					dst_data.val[2]=255;
				}

				dstImage << dst_data;
		}
	}
}
           

綜合後,檢視報告,并沒有減少時延!!!依舊是28.5ms!!

第3次優化:DATAFLOW-----減少時延

void FluoDetect(RGB_IMAGE& srcImage,GRAY_IMAGE& FluoImage,uint11 rows,uint11 cols,uint1 model[16777216])
{
#pragma HLS UNROLL
#pragma HLS ARRAY_RESHAPE variable=model block factor=64

	RGB_IMAGE  img(MAX_HEIGHT, MAX_WIDTH);
	GRAY_IMAGE img1(MAX_HEIGHT, MAX_WIDTH);
	GRAY_IMAGE img2(MAX_HEIGHT, MAX_WIDTH);

	hls::Scale(srcImage,img,1.1);

	assert(rows<=MAX_HEIGHT);
	assert(cols<=MAX_WIDTH);
	loop_height: for (uint11 i = 0; i < rows; i++) {
	    loop_width: for (uint11 j = 0; j < cols; j++) {
#pragma HLS pipeline II=1
#pragma HLS DEPENDENCE variable=model inter false
#pragma HLS dataflow
	    	RGB_PIXEL src_data;
	    	GRAY_PIXEL dst_data(0);
	    	img>>src_data;

	    	unsigned char B = src_data.val[0];
	    	unsigned char G = src_data.val[1];
	    	unsigned char R = src_data.val[2];
	    	int rgbpixels = R + G * 256 + B * 256 * 256;
	    	uint1 rgbelement = model[rgbpixels];

	    	dst_data.val[0]= rgbelement*255;
	    	img1 << dst_data;
	    }
	}
	hls::Dilate(img1,img2);
	hls::Erode(img2,FluoImage);
}

void Composition(GRAY_IMAGE& srcImage1,GRAY_IMAGE& srcImage2,RGB_IMAGE& srcImage3,RGB_IMAGE& dstImage,uint11 rows,uint11 cols)
{
#pragma HLS UNROLL

	assert(rows<=MAX_HEIGHT);
	assert(cols<=MAX_WIDTH);

	loop_height: for (uint11 i = 0; i < rows; i++) {
	loop_width: for (uint11 j = 0; j < cols; j++) {
#pragma HLS pipeline II=1
#pragma HLS dataflow
				GRAY_PIXEL src_data1(0),src_data2(0);
				RGB_PIXEL dst_data(0,0,0);

				srcImage3 >>dst_data;
				srcImage1>>src_data1;
				srcImage2>>src_data2;
				unsigned char data1=src_data1.val[0];
				unsigned char data2=src_data2.val[0];

				if(data1==255)
				{
					dst_data.val[0]=0;
					dst_data.val[1]=0;
					dst_data.val[2]=255;
				}
				else if(data2==255)
				{
					dst_data.val[0]=255;
					dst_data.val[1]=255;
					dst_data.val[2]=255;
				}
				dstImage << dst_data;
		}
	}
}
           

其實我仿真時将一個地方不小心寫錯了參數,然後仿真報警告:

'hls::stream<unsigned char>.1' is read while empty, which may result in RTL simulation hanging.
           

這個是因為将hls::stream 變量或hls::Mat變量重複使用了!!總所周知,ug902中寫過hls類型的變量隻能使用一次作為輸入參數!!!再次使用時它其實已經不再了,是以隻能使用一次,否則就會報這個錯!!!

修改後仿真時又報警告:

simulation :warning:Hls::stream 'hls::stream<unsigned char>.33' contains leftover data, which may result in RTL simulation hanging.
           

這個警告是什麼原因,我還不知道!

我先沒理警告繼續綜合會報錯:

...dataflow...conditional execution on /opt/Xilinx/Vivado/2017.4/common/technology/autopilot/hls/hls_video_core.h:648:37 is not supported
wei
           

這是因為dataflow優化中,不允許有if() 條件語句!否則無法綜合!!!!

4、第4次優化:DATAFLOW---if branch----multi-access---減少時延

将剛剛不允許條件語句中DATAFLOW優化的部分改成了這樣:

void FluoDetect(RGB_IMAGE& srcImage,GRAY_IMAGE& FluoImage,uint11 rows,uint11 cols,uint1 model[16777216])
{
#pragma HLS UNROLL
#pragma HLS ARRAY_RESHAPE variable=model block factor=64

	RGB_IMAGE  img(MAX_HEIGHT, MAX_WIDTH);
	GRAY_IMAGE img1(MAX_HEIGHT, MAX_WIDTH);
	GRAY_IMAGE img2(MAX_HEIGHT, MAX_WIDTH);

	hls::Scale(srcImage,img,1.1);
	assert(rows<=MAX_HEIGHT);
	assert(cols<=MAX_WIDTH);

	loop_height: for (uint11 i = 0; i < rows; i++) {
	    loop_width: for (uint11 j = 0; j < cols; j++) {
#pragma HLS PIPELINE II=1
#pragma HLS DEPENDENCE variable=model inter false
#pragma HLS DATAFLOW

	    	RGB_PIXEL src_data;
	    	GRAY_PIXEL dst_data(0);
	    	img>>src_data;

	    	unsigned char B = src_data.val[0];
	    	unsigned char G = src_data.val[1];
	    	unsigned char R = src_data.val[2];
	    	int rgbpixels = R + G * 256 + B * 256 * 256;
	    	uint1 rgbelement = model[rgbpixels];

	    	dst_data.val[0]= rgbelement*255;

	    	img1 << dst_data;
	    }
	}
	hls::Dilate(img1,img2);
	hls::Erode(img2,FluoImage);
}
           
void Composition(GRAY_IMAGE& srcImage1,GRAY_IMAGE& srcImage2,RGB_IMAGE& srcImage3,AXI_STREAM& dstImage,uint11 rows,uint11 cols)
{
#pragma HLS UNROLL

	AXI_STREAM8 src1;
	AXI_STREAM8 src2;
	RGB_IMAGE src3,src4;
	hls::Mat2AXIvideo(srcImage1, src1);
	hls::Mat2AXIvideo(srcImage2, src2);
	hls::Duplicate(srcImage3, src3,src4);

	assert(rows<=MAX_HEIGHT);
	assert(cols<=MAX_WIDTH);
	loop_height: for (uint11 i = 0; i < rows; i++) {
	loop_width: for (uint11 j = 0; j < cols; j++) {
#pragma HLS DATAFLOW
#pragma HLS PIPELINE II=1

				RGB_PIXEL dst_data(0,0,0);
				src3 >>dst_data;
				ap_uint<8> data1=src1.read().data;
				ap_uint<8> data2=src2.read().data;

				dst_data.val[0]=(1-data1/255)*data2+(!data1)*(!data2)*dst_data.val[0];
				dst_data.val[1]=(1-data1/255)*data2+(!data1)*(!data2)*dst_data.val[1];
				dst_data.val[2]=255*(data1 || data2)+(!data1)*(!data2)*dst_data.val[2];
				src4 << dst_data;

//				if(data1==255)
//				{
//					dst_data.val[0]=0;
//					dst_data.val[1]=0;
//					dst_data.val[2]=255;
//					src4 << dst_data;
//				}
//				else if(data2==255)
//				{
//					dst_data.val[0]=255;
//					dst_data.val[1]=255;
//					dst_data.val[2]=255;
//					src4 << dst_data;
//				}

		}
	}

	hls::Mat2AXIvideo(src4, dstImage);
}
           

注釋掉的就是之前報錯的條件語句。DATAFLOW優化對條件語句真是苛刻,隻要出現if() 那麼這個作用領域内這個優化就用不了。是以既然想用優化,那麼就别用條件語句。另一個函數中的if我也改成了不用條件的形式。

同時,還改了對參數的多次通路,使用local cache!!!

綜合後,報了新錯:看錯誤資訊,好像剛剛那個函數已經成功應用優化了,現在報錯的是另一個函數:

INFO: [XFORM 203-721] Extract dataflow region from loop loop_width (Xiangan_wd/src/XiangAnWO3.cpp:82)  of function 'Composition'.
INFO: [XFORM 203-721] Extract dataflow region from loop loop_width (Xiangan_wd/src/XiangAnWO3.cpp:19)  of function 'FluoDetect'.
WARNING: [XFORM 203-713] Disabling dataflow in loop loop_width (Xiangan_wd/src/XiangAnWO3.cpp:19)  of function 'FluoDetect' .
WARNING: [XFORM 203-713] Disabling dataflow in loop loop_width (Xiangan_wd/src/XiangAnWO3.cpp:19)  of function 'FluoDetect' .
INFO: [XFORM 203-712] Store statement on variable  'tmp.3516' in a dataflow region ( 'dataflow_in_loop_loop_width' (/opt/Xilinx/Vivado/2017.4/common/technology/autopilot/hls/hls_video_core.h:83:37)) is synthesized to a separate process, please move it inside another function for better QoR.
INFO: [XFORM 203-712] Store statement on variable  'tmp.3516' in a dataflow region ( 'dataflow_in_loop_loop_width' (/opt/Xilinx/Vivado/2017.4/common/technology/autopilot/hls/hls_video_core.h:83:37)) is synthesized to a separate process, please move it inside another function for better QoR.
INFO: [XFORM 203-712] Store statement on variable  'tmp.3516' in a dataflow region ( 'dataflow_in_loop_loop_width' (/opt/Xilinx/Vivado/2017.4/common/technology/autopilot/hls/hls_video_core.h:83:37)) is synthesized to a separate process, please move it inside another function for better QoR.
INFO: [XFORM 203-712] Store statement on variable  'tmp.312' in a dataflow region ( 'dataflow_in_loop_loop_width403' (/opt/Xilinx/Vivado/2017.4/common/technology/autopilot/hls/hls_video_core.h:22:37)) is synthesized to a separate process, please move it inside another function for better QoR.
WARNING: [XFORM 203-713] Reading dataflow channel 'model.V' in the middle of dataflow may stall the dataflow pipeline:
WARNING: [XFORM 203-713] Argument 'model.V' has read operations in process function '__/opt/Xilinx/Vivado/2017.4/common/technology/autopilot/hls/hls_video_core.h_line648_proc' (/opt/Xilinx/Vivado/2017.4/common/technology/autopilot/hls/hls_video_core.h:33:37).
WARNING: [XFORM 203-713] Reading dataflow channel 'model.V' (Xiangan_wd/src/XiangAnWO3.cpp:117) in the middle of dataflow may stall the dataflow pipeline:
WARNING: [XFORM 203-713] Argument 'model.V' has read operations in process function 'FluoDetect' (Xiangan_wd/src/XiangAnWO3.cpp:4).
           
WARNING: [XFORM 203-713] Reading dataflow channel 'model.V' in the middle of dataflow may stall the dataflow pipeline:
Reading dataflow channel 'model.V' in the middle of dataflow may stall the dataflow pipeline:
INFO: [XFORM 203-712] Store statement on variable  'tmp.3516' in a dataflow region ( 'dataflow_in_loop_loop_width' (/opt/Xilinx/Vivado/2017.4/common/technology/autopilot/hls/hls_video_core.h:83:37)) is synthesized to a separate process, please move it inside another function for better QoR.
ERROR: [XFORM 203-801] Only one data field is allowed in AXI-Stream mode, however there are 3 data fields: srcImage3.data_stream[1].V srcImage3.data_stream[2].V dstImage.V.data.V
           

可以看到Extract dataflow region from ... of function 'Composition' 時沒報錯,但Extract dataflow region from...of function 'FluoDetect' 時報了一些錯!我看了下那個函數,對參數model的multi-access 這個行為好像不好,因為ug902裡說過不要對參數進行multi-access,如果要就用local cache來做!是以不管如何,我先修改這個問題。

5,第5次優化---

先看下 https://forums.xilinx.com/t5/Vivado/%E8%B7%9F-Xilinx-SAE-%E5%AD%A6-HLS-%E6%8C%81%E7%BB%AD%E6%9B%B4%E6%96%B0-%E4%B8%AD%E6%96%87%E8%AE%B2%E8%A7%A3/m-p/708179  這個是HLS 優化視訊。

http://www.openhw.org/module/forum/forum.php?mod=viewthread&tid=595792&highlight=HLS%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0 

http://www.openhw.org/module/forum/forum.php?mod=viewthread&tid=595819&highlight=HLS%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0

http://www.openhw.org/module/forum/forum.php?mod=viewthread&tid=595929&highlight=HLS%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0

http://www.openhw.org/module/forum/forum.php?mod=viewthread&tid=658891&highlight=Vivado%2BHLS

http://www.openhw.org/module/forum/forum.php?mod=viewthread&tid=658879&highlight=Vivado%2BHLS

http://www.openhw.org/module/forum/forum.php?mod=viewthread&tid=659217&highlight=Vivado%2BHLS

這幾個都是講HLS優化的執行個體,非常非常有用!!!!!!!而且寫得非常非常好!!!!!!!

我看完了,但還沒實踐,抽時間将這幾個網址的執行個體實踐感受下。