天天看点

10.3.1 3D对象识别的全局假设验证

10.3.1 3D对象识别的全局假设验证

1.本节学习在高噪声和严重遮挡的场景中,如何通过模型假设验证进行复杂环境下的3d目标识别。

2.代码

//1.  对模型点云、场景点云分别进行下采样,得到稀疏关键点;
//2.  对模型点云、场景点云关键点,分别计算描述子;
//3.  利用KdTreeFLANN搜索对应点对;
//4.  使用【对应点聚类算法】将【对应点对】聚类为待识别模型;
//5.  返回识别到的每一个模型的变换矩阵(旋转矩阵 + 平移矩阵),以及对应点对聚类结果

#include <iostream>
#include <pcl/io/pcd_io.h>
#include <pcl/point_cloud.h>
#include <pcl/correspondence.h>
#include <pcl/features/normal_3d_omp.h>
#include <pcl/features/shot_omp.h>
#include <pcl/features/board.h>


#include <pcl/keypoints/uniform_sampling.h>
#include <pcl/recognition/cg/hough_3d.h>
#include <pcl/recognition/cg/geometric_consistency.h>
#include <pcl/recognition/hv/hv_go.h>
#include <pcl/registration/icp.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/kdtree/kdtree_flann.h>
//#include <pcl/kdtree/impl/kdtree_flann.hpp>
#include <pcl/common/transforms.h> 
#include <pcl/console/parse.h>
#include <boost/thread/thread.hpp>

using namespace std;
using namespace pcl;

typedef PointXYZRGBA PointType;
typedef Normal NormalType;
typedef ReferenceFrame RFType;
typedef SHOT352 DescriptorType;

struct CloudStyle 
{
	double r;
	double g;
	double b;
	double size;
	CloudStyle(double r, double g, double b, double size):r(r), g(g), b(b), size(size) 
	{
	}
};
CloudStyle style_white(255.0,255.0,255.0,4.0);
CloudStyle style_red(255.0,0.0,0.0,3.0);
CloudStyle style_green(0.0,255.0,0.0,5.0);
CloudStyle style_cyan(93.0,200.0,217.0,4.0);
CloudStyle style_violet(255.0,0.0,255.0,8.0);

//string model_filename_;
//string scene_filename_;

//算法参数
bool show_keypoints_(false);
bool use_hough_(true);
float model_ss_(0.02f);
float scene_ss_(0.02f);
float rf_rad_(0.015f);
float descr_rad_(0.02f);
float cg_size_(0.01f);
float cg_thresh_(5.0f);
int icp_max_iter_(5);
float icp_corr_distance_(0.005f);
float hv_clutter_reg_(5.0f);
float hv_inlier_th_(0.005f);
float hv_occlusion_th_(0.01f);
float hv_rad_clutter_(0.03f);
float hv_regularizer_(3.0f);
float hv_rad_normals_(0.05);
bool hv_detect_clutter_(true);


int main() {
	PointCloud<PointType>::Ptr model(new PointCloud<PointType>());
	PointCloud<PointType>::Ptr model_keypoints(new PointCloud<PointType>());
	PointCloud<PointType>::Ptr scene(new PointCloud<PointType>());
	PointCloud<PointType>::Ptr scene_keypoints(new PointCloud<PointType>());
	PointCloud<NormalType>::Ptr model_normals(new PointCloud<NormalType>());
	PointCloud<NormalType>::Ptr scene_normals(new PointCloud<NormalType>());
	PointCloud<DescriptorType>::Ptr model_descriptors(new PointCloud<DescriptorType>());
	PointCloud<DescriptorType>::Ptr scene_descriptors(new PointCloud<DescriptorType>());
	//加载点云
	io::loadPCDFile("D:\\pcd\\phone\\8Hypothesis_Verification_for_3D_Object_Recognition\\milk_color.pcd", *model);
	io::loadPCDFile("D:\\pcd\\phone\\8Hypothesis_Verification_for_3D_Object_Recognition\\milk_cartoon_all_small_clorox.pcd", *scene);
	//计算法线
	NormalEstimationOMP<PointType, NormalType> norm_est;
	norm_est.setKSearch(10);//设置k邻域搜索阈值为10个点
	norm_est.setInputCloud(model);//设置输入模型点云
	norm_est.compute(*model_normals);
	norm_est.setInputCloud(scene);
	norm_est.compute(*scene_normals);//计算点云法线
	//均匀采样点云并提取关键点
	// 类UniformSampling实现对点云的统一重采样,具体通过建立点云的空间体素栅格,然后在此基础上实现下采样并且过滤一些数据。
	// 所有采样后得到的点用每个体素内点集的重心近似,而不是用每个体素的中心点近似,前者速度较后者慢,但其估计的点更接近实际的采样面。
	//PointCloud<int> sampled_indices;
	UniformSampling<PointType> uniform_sampling;
	uniform_sampling.setInputCloud(model);//输入点云
	uniform_sampling.setRadiusSearch(model_ss_);//输入半径
	//uniform_sampling.compute(sampled_indices);
	uniform_sampling.filter(*model_keypoints);   //滤波
	cout << "Model total points:" << model->size() << ";select keypoints:" << model_keypoints->size() << endl;

	uniform_sampling.setInputCloud(scene);
	uniform_sampling.setRadiusSearch(scene_ss_);
	uniform_sampling.filter(*scene_keypoints);
	cout << "Scene total points: " << scene->size() << ";Selected Keypoints: " << scene_keypoints->size() << endl;

	//为关键点计算描述子
	SHOTEstimationOMP<PointType, NormalType, DescriptorType> descr_est;
	descr_est.setRadiusSearch(descr_rad_);//设置搜索半径
	descr_est.setInputCloud(model_keypoints);//模型点云的关键点
	descr_est.setInputNormals(model_normals);//模型点云的法线
	descr_est.setSearchSurface(model); //模型点云
	descr_est.compute(*model_descriptors);//计算//模型点云的描述子

	descr_est.setInputCloud(scene_keypoints);
	descr_est.setInputNormals(scene_normals);
	descr_est.setSearchSurface(scene);
	descr_est.compute(*scene_descriptors);

	//使用Kdtree找出 Model-Scene 匹配点
	CorrespondencesPtr model_scene_corrs(new Correspondences());
	KdTreeFLANN<DescriptorType> match_search;//设置配准方式
	match_search.setInputCloud(model_descriptors);//模型点云的描述子
	vector<int> model_good_keypoints_indices;
	vector<int> scene_good_keypoints_indices;
	//每一个场景的关键点描述子都要找到模板中匹配的关键点描述子并将其添加到对应的匹配向量中。
	for (size_t i = 0;i < scene_descriptors->size();++i)
	{
		vector<int>neigh_indices(1);//设置最近邻点的索引
		//设置最近邻平方距离值
		vector<float>neigh_sqr_dists(1.0);//vector<int>neigh_sqr_dists(1);
		if (!pcl_isfinite(scene_descriptors->at(i).descriptor[0]))//跳过空点云
		{
			continue;
		}
		int found_neighs = match_search.nearestKSearch(scene_descriptors->at(i), 1, neigh_indices, neigh_sqr_dists);
		//仅当描述子与临近点的平方距离小于0.25(描述子与临近的距离在一般在0到1之间)才添加匹配
		if (found_neighs == 1 && neigh_sqr_dists[0] < 0.25f)
		{
			//neigh_indices[0]给定点,i是配准数neigh_sqr_dists[0]与临近点的平方距离
			Correspondence corr(neigh_indices[0], static_cast<int>(i), neigh_sqr_dists[0]);
			model_scene_corrs->push_back(corr);//把配准的点存储在容器中
			model_good_keypoints_indices.push_back(corr.index_query);
			scene_good_keypoints_indices.push_back(corr.index_match);
		}
	}
	PointCloud<PointType>::Ptr model_good_kp(new PointCloud<PointType>());
	PointCloud<PointType>::Ptr scene_good_kp(new PointCloud<PointType>());
	copyPointCloud(*model_keypoints, model_good_keypoints_indices, *model_good_kp);
	copyPointCloud(*scene_keypoints, scene_good_keypoints_indices, *scene_good_kp);
	cout << "Correspondences found:" << model_scene_corrs->size() << endl;

	//Clustering实际的配准方法的实现
	vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>> rototranslations;
	vector<Correspondences> clustered_corrs;

	//使用 Hough3D算法寻找匹配点
	if (use_hough_)
	{
		//利用hough算法时,需要计算关键点的局部参考坐标系LRF
		PointCloud<RFType>::Ptr model_rf(new PointCloud<RFType>());
		PointCloud<RFType>::Ptr scene_rf(new PointCloud<RFType>());

		BOARDLocalReferenceFrameEstimation<PointType, NormalType, RFType> rf_est;
		rf_est.setFindHoles(true);
		rf_est.setRadiusSearch(rf_rad_);//估计局部参考坐标系时当前点的邻域搜索半径

		rf_est.setInputCloud(model_keypoints);
		rf_est.setInputNormals(model_normals);
		rf_est.setSearchSurface(model);
		rf_est.compute(*model_rf);

		rf_est.setInputCloud(scene_keypoints);
		rf_est.setInputNormals(scene_normals);
		rf_est.setSearchSurface(scene);
		rf_est.compute(*scene_rf);

		//聚类
		Hough3DGrouping<PointType, PointType, RFType, RFType> clusterer;
		clusterer.setHoughBinSize(cg_size_);//hough空间的采样间隔:0.01
		clusterer.setHoughThreshold(cg_thresh_);//在hough空间确定是否有实例存在的最少票数阈值:5
		clusterer.setUseInterpolation(true); //设置是否对投票在hough空间进行插值计算
		clusterer.setUseDistanceWeight(false);//设置在投票时是否将对应点之间的距离作为权重参与计算

		clusterer.setInputCloud(model_keypoints);//设置模型点云的关键点
		clusterer.setInputRf(model_rf);//设置模型对应的局部坐标系(LRF)
		clusterer.setSceneCloud(scene_keypoints);//设置场景关键点
		clusterer.setSceneRf(scene_rf);//设置模型对应的局部坐标系(LRF)
		clusterer.setModelSceneCorrespondences(model_scene_corrs);//设置模型与场景的对应点的集合
		clusterer.recognize(rototranslations, clustered_corrs);//结果包含变换矩阵和对应点聚类结果
	}
	//else//使用几何一致性性质
	//{
	//	GeometricConsistencyGrouping<PointType, PointType>gc_clusterer;
	//	gc_clusterer.setGCSize(cg_size_);//设置几何一致性的大小
	//	gc_clusterer.setGCThreshold(cg_thresh_);//阈值
	//	gc_clusterer.setInputCloud(model_keypoints);//设置模型关键点
	//	gc_clusterer.setSceneCloud(scene_keypoints);//设置场景关键点
	//	gc_clusterer.setModelSceneCorrespondences(model_scene_corrs);//设置模型与场景的对应点的集合
	//	gc_clusterer.recognize(rototranslations, clustered_corrs);//结果包含变换矩阵和对应点聚类结果
	//}

	
	if (rototranslations.size() <= 0)
	{
		cout << "*** No instances found! ***" << endl;
		return(0);
	}
	else
	{
		//找出输入模型是否在场景中出现
		cout << "Recognized Instances: " << rototranslations.size() << endl << endl;
	}
	//生成一个实例向量instances来保存假设实例对应的在场景中的点云数据,
	//该假设实例的点云数据由模型本身和生成假设时得到的变换矩阵生成
	vector<PointCloud<PointType>::ConstPtr> instances;
	for (size_t i = 0; i < rototranslations.size(); i++)
	{
		PointCloud<PointType>::Ptr rotated_model(new PointCloud<PointType>());
		transformPointCloud(*model, *rotated_model, rototranslations[i]);
		instances.push_back(rotated_model);
	}
	//生成假设对应的变换矩阵误差较大,为了提高假设与场景的配准精度,应用ICP迭代,
	//并且同时保存利用ICP迭代配准后的模型实例假设到registered_istances向量,用于后续全局假设检验
	vector<PointCloud<PointType>::ConstPtr>registered_instances;
	if (true)
	{
		cout << "-----ICP-------" << endl;
		for (size_t i = 0; i < rototranslations.size(); i++)
		{
			IterativeClosestPoint<PointType, PointType>icp;
			icp.setMaximumIterations(icp_max_iter_);
			icp.setMaxCorrespondenceDistance(icp_corr_distance_);
			icp.setInputTarget(scene);
			icp.setInputSource(instances[i]);
			PointCloud<PointType>::Ptr registered(new PointCloud<PointType>);
			icp.align(*registered);
			registered_instances.push_back(registered);
			cout << "Instance" << i << " " << endl;
			//如果变换前后点云正确Align的话(即变换点云通过刚性变换之后几乎和变换后点云完全重合),则 icp.hasConverged() = 1 (true)
			if (icp.hasConverged()) {
				cout << "Aligned" <<icp.getFitnessScore()<<endl;
			}
			else
			{
				cout << "Not Aligned!" << endl;
			}
		}
		cout << "-----------------" << endl << endl;
	}
	//假设验证
	cout << "--- Hypotheses Verification ---" << endl;
	//与registered_instances中的实例假设对应,
	//当registered_instances[i]实例假设被验证为真时,hypotheses_mask[i]为真。
	//当registered_instances[i]实例假设被验证为假时,hypotheses_mask[i]为假,同时该目标假设被排除
	vector<bool>hypotheses_mask;//用于保存检验假设的结果
	GlobalHypothesesVerification<PointType, PointType> GoHv;
	GoHv.setSceneCloud(scene);//输入场景点云数据
	GoHv.addModels(registered_instances, true);//输入待被检验的模型实例
	GoHv.setInlierThreshold(hv_inlier_th_);
	GoHv.setOcclusionThreshold(hv_occlusion_th_);
	GoHv.setRadiusClutter(hv_rad_clutter_);
	GoHv.setClutterRegularizer(hv_clutter_reg_);
	GoHv.setDetectClutter(hv_detect_clutter_);
	GoHv.setRadiusNormals(hv_rad_normals_);
	GoHv.verify();
	GoHv.getMask(hypotheses_mask);
	for (int i = 0; i < hypotheses_mask.size(); i++)
	{
		if (hypotheses_mask[i])
		{
			cout << "Instance " << i << " is GOOD! <---" << endl;
		}
		else
		{
			cout << "Instance " << i << " is bad!" << endl;
		}
	}
	cout << "-------------------------------" << endl;
	//可视化
	visualization::PCLVisualizer viewer("全局假设验证");
	viewer.setBackgroundColor(255, 255, 255);
	viewer.addPointCloud(scene, "scene_cloud");//可视化场景点云
	PointCloud<PointType>::Ptr off_scene_model(new PointCloud<PointType>());
	PointCloud<PointType>::Ptr off_scene_model_keypoints(new PointCloud<PointType>());
	PointCloud<PointType>::Ptr off_model_good_kp(new PointCloud<PointType>());
	pcl::transformPointCloud(*model, *off_scene_model, Eigen::Vector3f(-1, 0, 0), Eigen::Quaternionf(1, 0, 0, 0));
	pcl::transformPointCloud(*model_keypoints, *off_scene_model_keypoints, Eigen::Vector3f(-1, 0, 0), Eigen::Quaternionf(1, 0, 0, 0));
	pcl::transformPointCloud(*model_good_kp, *off_model_good_kp, Eigen::Vector3f(-1, 0, 0), Eigen::Quaternionf(1, 0, 0, 0));

	if (show_keypoints_)//可视化配准点:白色
	{
		CloudStyle modelStyle = style_white;
		visualization::PointCloudColorHandlerCustom<PointType> off_scene_model_color_handler(off_scene_model, modelStyle.r, modelStyle.g, modelStyle.b);
		viewer.addPointCloud(off_scene_model, off_scene_model_color_handler, "off_scene_model");
		viewer.setPointCloudRenderingProperties(visualization::PCL_VISUALIZER_POINT_SIZE, modelStyle.size, "off_scene_model");

	}
	if (show_keypoints_)//可视化关键点:蓝色
	{
		CloudStyle goodKeypointStyle = style_violet;
		visualization::PointCloudColorHandlerCustom<PointType> model_good_keypoints_color_handler(off_model_good_kp, goodKeypointStyle.r, goodKeypointStyle.g, goodKeypointStyle.b);
		viewer.addPointCloud(off_model_good_kp, model_good_keypoints_color_handler, "model_good_keypoints");
		viewer.setPointCloudRenderingProperties(visualization::PCL_VISUALIZER_POINT_SIZE, goodKeypointStyle.size, "model_good_keypoints");

		visualization::PointCloudColorHandlerCustom<PointType>scene_good_keypoints_color_handler(scene_good_kp, goodKeypointStyle.r, goodKeypointStyle.g, goodKeypointStyle.b);
		viewer.addPointCloud(scene_good_kp, scene_good_keypoints_color_handler, "scene_good_keypoints");
		viewer.setPointCloudRenderingProperties(visualization::PCL_VISUALIZER_FONT_SIZE, goodKeypointStyle.size, "scene_good_keypoints");

	}
	for (size_t i = 0; i < instances.size(); i++)
	{
		stringstream ss_instance;
		ss_instance << "instance_" << i;

		CloudStyle clusterStyle = style_red;
		visualization::PointCloudColorHandlerCustom<PointType> instance_color_handler(instances[i], clusterStyle.r, clusterStyle.g, clusterStyle.b);
		viewer.addPointCloud(instances[i], instance_color_handler, ss_instance.str());
		viewer.setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, clusterStyle.size, ss_instance.str());

		CloudStyle registeredStyles = hypotheses_mask[i] ? style_green : style_cyan;
		ss_instance << "_registered" << endl;
		pcl::visualization::PointCloudColorHandlerCustom<PointType> registered_instance_color_handler(registered_instances[i], registeredStyles.r,
			registeredStyles.g, registeredStyles.b);
		viewer.addPointCloud(registered_instances[i], registered_instance_color_handler, ss_instance.str());
		viewer.setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, registeredStyles.size, ss_instance.str());
	}
	while (!viewer.wasStopped())
	{
		viewer.spinOnce();
	}

	return (0);
}
           

有些代码没看懂

3.显示

10.3.1 3D对象识别的全局假设验证