The value of the ROS_MASTER_URI environment variable, http://192.168.1.7:11311, will be used to connect to the ROS master. Initializing global node /matlab_global_node_37037 with NodeURI http://192.168.1.8:20811/
laser_scan_matcher: an incremental laser scan matcher, using Andrea Censi’s Canonical Scan Matcher implementation. It downloads and installs Andrea Censi’s Canonical Scan Matcher [1] locally.
scan_to_cloud_converter: converts LaserScan to PointCloud messages.
structsm_params { /** First scan (参考帧) */ LDP laser_ref; /** Second scan ("目标帧) */ LDP laser_sens; double first_guess[3];
/** Maximum angular displacement between scans (deg)*/ double max_angular_correction_deg; /** Maximum translation between scans (m) */ double max_linear_correction;
int max_iterations; /** A threshold for stopping. */ double epsilon_xy; /** A threshold for stopping. */ double epsilon_theta; /** Maximum distance for a correspondence to be valid */ double max_correspondence_dist;
/** 使用 smart tricks 匹配. 只影响匹配速度,不影响收敛结果 */ int use_corr_tricks; /** Restart if error under threshold (0 or 1)*/ int restart; /** Threshold for restarting */ double restart_threshold_mean_error; /** Displacement for restarting */ double restart_dt; /** Displacement for restarting */ double restart_dtheta; /* Functions concerning discarding correspondences. THESE ARE MAGIC NUMBERS -- and they need to be tuned. */
/** Percentage of correspondences to consider: if 0.9, always discard the top 10% of correspondences with more error */ double outliers_maxPerc;
/** Parameters describing a simple adaptive algorithm for discarding. 1) Order the errors. 2) Choose the percentile according to outliers_adaptive_order. (if it is 0.7, get the 70% percentile) 3) Define an adaptive threshold multiplying outliers_adaptive_mult with the value of the error at the chosen percentile. 4) Discard correspondences over the threshold. This is useful to be conservative; yet remove the biggest errors. */ double outliers_adaptive_order; /* 0.7 */ double outliers_adaptive_mult; /* 2 */
/** Do not allow two different correspondences to share a point */ int outliers_remove_doubles;
/* Functions that compute and use point orientation for defining matches. */ /** For now, a very simple max-distance clustering algorithm is used */ double clustering_threshold; /** Number of neighbour rays used to estimate the orientation.*/ int orientation_neighbourhood; /** Discard correspondences based on the angles */ int do_alpha_test; double do_alpha_test_thresholdDeg; /** I believe this trick is documented in one of the papers by Guttman (but I can't find the reference). Or perhaps I was told by him directly. If you already have a guess of the solution, you can compute the polar angle of the points of one scan in the new position. If the polar angle is not a monotone function of the readings index, it means that the surface is not visible in the next position. If it is not visible, then we don't use it for matching. This is confusing without a picture! To understand what's going on, make a drawing in which a surface is not visible in one of the poses. Implemented in the function visibilityTest(). */ int do_visibility_test;
/** If 1, use PlICP; if 0, use vanilla ICP. */ int use_point_to_line_distance;
/** If 1, the field "true_alpha" is used to compute the incidence beta, and the factor (1/cos^2(beta)) used to weight the impact of each correspondence. This works fabolously if doing localization, that is the first scan has no noise. If "true_alpha" is not available, it uses "alpha". */ int use_ml_weights; /* If 1, the field "readings_sigma" is used to weight the correspondence by 1/sigma^2 */ int use_sigma_weights; /** Use the method in http://purl.org/censi/2006/icpcov to compute the matching covariance. */ int do_compute_covariance;
/** Checks that find_correspondences_tricks give the right answer */ int debug_verify_tricks; /** Pose of sensor with respect to robot: used for computing the first estimate given the odometry. */ double laser[3];
/** Noise in the scan */ double sigma;
/** mark as invalid ( = don't use ) rays outside of this interval */ double min_reading, max_reading; /* Parameters specific to GPM (unfinished :-/ ) */ double gpm_theta_bin_size_deg; double gpm_extend_range_deg; int gpm_interval; /* Parameter specific to HSM (unfinished :-/ ) */ structhsm_paramshsm; };
structsm_result { /** 1 if the result is valid */ int valid; /** Scan matching result (x,y,theta) */ double x[3]; /** Number of iterations done */ int iterations; /** Number of valid correspondence in the end */ int nvalid; /** Total correspondence error */ double error; /** Fields used for covariance computation */ #ifndef RUBY gsl_matrix *cov_x_m; gsl_matrix *dx_dy1_m; gsl_matrix *dx_dy2_m; #endif };
1 2 3 4 5 6 7 8 9 10 11 12 13
structcorrespondence { /** 1 if this correspondence is valid */ int valid; /** Closest point in the other scan. */ int j1; /** Second closest point in the other scan. */ int j2; /** Type of correspondence (point to point, or point to line) */ enum { corr_pp = 0, corr_pl = 1} type; /** Squared distance from p(i) to point j1 */ double dist2_j1; };
向量gsl_vector的使用
1 2 3 4 5 6 7 8
int n=10; gsl_vector *v=gsl_vector_alloc(n); int i; /* read and write vectors */ for (i=0;i<n;i++) gsl_vector_set(v,i,pow(i+1,2)); for (i=0;i<n;i++) printf("The %d element of v is %f\n",i, gsl_vector_get(v,i));
// Build up cumulative probability table for resampling. // TODO: Replace this with a more efficient procedure (e.g., GeneralDiscreteDistributions.html) // 对粒子的权重进行积分,获得分布函数,后面用于重采样 c = (double*)malloc(sizeof(double)*(set_a->sample_count+1)); c[0] = 0.0; for(i=0;i<set_a->sample_count;i++) c[i+1] = c[i] + set_a->samples[i].weight; // c[1] = 第一个粒子权重 // c[2] = 第一和第二粒子权重之和,以此类推 c[i] 是粒子集的 前i个粒子的权重之和 // Create the kd tree for adaptive sampling pf_kdtree_clear(set_b->kdtree); // Draw samples from set a to create set b. total = 0; set_b->sample_count = 0; // 表示注入粒子的概率 w_diff = 1.0 - pf->w_fast / pf->w_slow; if(w_diff < 0.0) w_diff = 0.0;
// Can't (easily) combine low-variance sampler with KLD adaptive // sampling, so we'll take the more traditional route. /* // Low-variance resampler, taken from Probabilistic Robotics, p110 count_inv = 1.0/set_a->sample_count; r = drand48() * count_inv; c = set_a->samples[0].weight; i = 0; m = 0; */ // 确保重采样生成的粒子集(set_b)的粒子数不超过规定的最大的粒子数 while(set_b->sample_count < pf->max_samples) { sample_b = set_b->samples + set_b->sample_count++; // 产生的随机数小于w_diff时,将往set_b中随机注入粒子 if(drand48() < w_diff) // pf->random_pose_fn 为一个函数指针,其返回一个随机位姿 sample_b->pose = (pf->random_pose_fn)(pf->random_pose_data); else { // Can't (easily) combine low-variance sampler with KLD adaptive // sampling, so we'll take the more traditional route. /* // Low-variance resampler, taken from Probabilistic Robotics, p110 U = r + m * count_inv; while(U>c) { i++; // Handle wrap-around by resetting counters and picking a new random // number if(i >= set_a->sample_count) { r = drand48() * count_inv; c = set_a->samples[0].weight; i = 0; m = 0; U = r + m * count_inv; continue; } c += set_a->samples[i].weight; } m++; */
intpf_resample_limit(pf_t *pf, int k) { double a, b, c, x; int n; if (k <= 1) return pf->max_samples; a = 1; b = 2 / (9 * ((double) k - 1)); // pop_z 为3 c = sqrt(2 / (9 * ((double) k - 1))) * pf->pop_z; x = a - b + c; // pop_err 为0.01 n = (int) ceil( (k - 1) / (2 * pf->pop_err) * x * x * x );
if (n < pf->min_samples) return pf->min_samples; if (n > pf->max_samples) return pf->max_samples; return n; }