源码分析(七)重采样

laserReceived 中的实现

以下内容在 if(lasers_update_[laser_index]) 的括号内

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
// 粒子重采样
if(!(++resample_count_ % resample_interval_))
{
pf_update_resample(pf_);
resampled = true;
}

pf_sample_set_t* set = pf_->sets + pf_->current_set;
ROS_DEBUG("Num samples: %d\n", set->sample_count);

// 发布话题particlecloud, 发布的是最终结果cloud,在机器人移动后执行
if (!m_force_update) //在运动模型那里赋值false
{
geometry_msgs::PoseArray cloud_msg;
cloud_msg.header.stamp = ros::Time::now();
cloud_msg.header.frame_id = global_frame_id_;
cloud_msg.poses.resize(set->sample_count);
for(int i=0;i<set->sample_count;i++) // 粒子个数
{
// tf::Pose转为geometry_msgs::Pose,就是用于后面发布消息
tf::poseTFToMsg(tf::Pose(tf::createQuaternionFromYaw(set->samples[i].pose.v[2]),
tf::Vector3(set->samples[i].pose.v[0],
set->samples[i].pose.v[1], 0) ),
cloud_msg.poses[i]);
}
// 构造函数中注册了话题
particlecloud_pub_.publish(cloud_msg);
}


以下内容紧接上面内容,不在if内,但还在laserReceived

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
// 刚启动amcl时不执行,机器人移动后执行
if(resampled || force_publication)
{
// Read out the current hypotheses
double max_weight = 0.0;
int max_weight_hyp = -1;
std::vector<amcl_hyp_t> hyps;
hyps.resize(pf_->sets[pf_->current_set].cluster_count);
for(int hyp_count = 0; hyp_count < pf_->sets[pf_->current_set].cluster_count; hyp_count++)
{
double weight;
pf_vector_t pose_mean;
pf_matrix_t pose_cov;
if (!pf_get_cluster_stats(pf_, hyp_count, &weight, &pose_mean, &pose_cov))
{
ROS_ERROR("Couldn't get stats on cluster %d", hyp_count);
break;
}

hyps[hyp_count].weight = weight;
hyps[hyp_count].pf_pose_mean = pose_mean;
hyps[hyp_count].pf_pose_cov = pose_cov;

if(hyps[hyp_count].weight > max_weight)
{
max_weight = hyps[hyp_count].weight;
max_weight_hyp = hyp_count;
}
}

if(max_weight > 0.0)
{
ROS_DEBUG("Max weight pose: %.3f %.3f %.3f",
hyps[max_weight_hyp].pf_pose_mean.v[0],
hyps[max_weight_hyp].pf_pose_mean.v[1],
hyps[max_weight_hyp].pf_pose_mean.v[2] );
geometry_msgs::PoseWithCovarianceStamped p;
// Fill in the header
p.header.frame_id = global_frame_id_;
p.header.stamp = laser_scan->header.stamp;
// Copy in the pose
p.pose.pose.position.x = hyps[max_weight_hyp].pf_pose_mean.v[0];
p.pose.pose.position.y = hyps[max_weight_hyp].pf_pose_mean.v[1];
tf::quaternionTFToMsg(tf::createQuaternionFromYaw(hyps[max_weight_hyp].pf_pose_mean.v[2]),
p.pose.pose.orientation);
// Copy in the covariance, converting from 3-D to 6-D
pf_sample_set_t* set = pf_->sets + pf_->current_set;
for(int i=0; i<2; i++)
{
for(int j=0; j<2; j++)
{
// Report the overall filter covariance, rather than the
// covariance for the highest-weight cluster
//p.covariance[6*i+j] = hyps[max_weight_hyp].pf_pose_cov.m[i][j];
p.pose.covariance[6*i+j] = set->cov.m[i][j];
}
}
// Report the overall filter covariance, rather than the
// covariance for the highest-weight cluster
//p.covariance[6*5+5] = hyps[max_weight_hyp].pf_pose_cov.m[2][2];
p.pose.covariance[6*5+5] = set->cov.m[2][2];

pose_pub_.publish(p);
last_published_pose = p;

ROS_DEBUG("New pose: %6.3f %6.3f %6.3f",
hyps[max_weight_hyp].pf_pose_mean.v[0],
hyps[max_weight_hyp].pf_pose_mean.v[1],
hyps[max_weight_hyp].pf_pose_mean.v[2]);

// subtracting base to odom from map to base and send map to odom instead
tf::Stamped<tf::Pose> odom_to_map;
try
{
tf::Transform tmp_tf(tf::createQuaternionFromYaw(hyps[max_weight_hyp].pf_pose_mean.v[2]),
tf::Vector3(hyps[max_weight_hyp].pf_pose_mean.v[0],
hyps[max_weight_hyp].pf_pose_mean.v[1],
0.0));
tf::Stamped<tf::Pose> tmp_tf_stamped (tmp_tf.inverse(),
laser_scan->header.stamp,
base_frame_id_);
this->tf_->transformPose(odom_frame_id_,
tmp_tf_stamped,
odom_to_map);
}
catch(tf::TransformException)
{
ROS_DEBUG("Failed to subtract base to odom transform");
return;
}
latest_tf_ = tf::Transform(tf::Quaternion(odom_to_map.getRotation()),
tf::Point(odom_to_map.getOrigin()));
latest_tf_valid_ = true;
if (tf_broadcast_ == true)
{
// We want to send a transform that is good up until a
// tolerance time so that odom can be used
ros::Time transform_expiration = (laser_scan->header.stamp +
transform_tolerance_);
tf::StampedTransform tmp_tf_stamped(latest_tf_.inverse(),
transform_expiration,
global_frame_id_, odom_frame_id_);
this->tfb_->sendTransform(tmp_tf_stamped);
sent_first_transform_ = true;
}
}
else
{
ROS_ERROR("No pose!");
}
}

laserReceived到这里只剩一个else if(latest_tf_valid_)

pf_update_resample

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
// Resample 粒子分布
void pf_update_resample(pf_t *pf)
{
int i;
double total;
pf_sample_set_t *set_a, *set_b;
pf_sample_t *sample_a, *sample_b;

double* c;
double w_diff;
// 用来接收当前粒子集的信息
set_a = pf->sets + pf->current_set;
// 保存重采样后的粒子
set_b = pf->sets + (pf->current_set + 1) % 2;

// Build up cumulative probability table for resampling.
// TODO: Replace this with a more efficient procedure (e.g., GeneralDiscreteDistributions.html)
// 对粒子的权重进行积分,获得分布函数,后面用于重采样
c = (double*)malloc(sizeof(double)*(set_a->sample_count+1));
c[0] = 0.0;
for(i=0;i<set_a->sample_count;i++)
c[i+1] = c[i] + set_a->samples[i].weight;
// c[1] = 第一个粒子权重
// c[2] = 第一和第二粒子权重之和,以此类推 c[i] 是粒子集的 前i个粒子的权重之和
// Create the kd tree for adaptive sampling
pf_kdtree_clear(set_b->kdtree);

// Draw samples from set a to create set b.
total = 0;
set_b->sample_count = 0;
// 表示注入粒子的概率
w_diff = 1.0 - pf->w_fast / pf->w_slow;
if(w_diff < 0.0)
w_diff = 0.0;

// Can't (easily) combine low-variance sampler with KLD adaptive
// sampling, so we'll take the more traditional route.
/*
// Low-variance resampler, taken from Probabilistic Robotics, p110
count_inv = 1.0/set_a->sample_count;
r = drand48() * count_inv;
c = set_a->samples[0].weight;
i = 0;
m = 0;
*/
// 确保重采样生成的粒子集(set_b)的粒子数不超过规定的最大的粒子数
while(set_b->sample_count < pf->max_samples)
{
sample_b = set_b->samples + set_b->sample_count++;
// 产生的随机数小于w_diff时,将往set_b中随机注入粒子
if(drand48() < w_diff)
// pf->random_pose_fn 为一个函数指针,其返回一个随机位姿
sample_b->pose = (pf->random_pose_fn)(pf->random_pose_data);
else
{
// Can't (easily) combine low-variance sampler with KLD adaptive
// sampling, so we'll take the more traditional route.
/*
// Low-variance resampler, taken from Probabilistic Robotics, p110
U = r + m * count_inv;
while(U>c)
{
i++;
// Handle wrap-around by resetting counters and picking a new random
// number
if(i >= set_a->sample_count)
{
r = drand48() * count_inv;
c = set_a->samples[0].weight;
i = 0;
m = 0;
U = r + m * count_inv;
continue;
}
c += set_a->samples[i].weight;
}
m++;
*/

// Naive discrete event sampler
double r = drand48(); // drand48 返回服从均匀分布的[0.0, 1.0)之间的 double 随机数
// c[i]相当于把粒子权重以数轴距离的形式表现,查看r容易落在数轴哪个位置。
// 当set_a的第i个粒子权重很大时,r落在c[i]与c[i+1]之间的概率就很高,找符合条件的i
// 虽然这里不能保证每次都能从set_a中提取权重最大的粒子,因为r是一个随机数。但是由于粒子数量较大,
// 因此提取的大多数粒子权重还是高的,这是一个统计学的方法
for(i=0; i<set_a->sample_count; i++)
{
if((c[i] <= r) && (r < c[i+1]))
break;
}
assert(i<set_a->sample_count);

sample_a = set_a->samples + i;
assert(sample_a->weight > 0);
// 从set_a中挑选粒子赋给一个粒子,之后放进set_b
sample_b->pose = sample_a->pose;
}
sample_b->weight = 1.0;
// total之前是0
total += sample_b->weight;
// Add sample to histogram
pf_kdtree_insert(set_b->kdtree, sample_b->pose, sample_b->weight);
// 大的粒子数在定位初期能提高定位精度,但是当粒子集开始聚合后,程序便不再需要这么多的粒子
// 因此这里需要引入一个新的控制条件来节省计算资源
// pf_resample_limit根据时间与粒子集测量更新的结果,返回粒子集所需要的最佳粒子数量,
// 从而实现了粒子数量随着时间的变化而自我调整
if (set_b->sample_count > pf_resample_limit(pf, set_b->kdtree->leaf_count))
break;
}
// 重置,避免w_diff越来越大,导致大量插入随机位姿的粒子
if(w_diff > 0.0)
pf->w_slow = pf->w_fast = 0.0;

// 将set_b的新粒子插入Kd-Tree中并且对粒子的权重归一化,为了之后的聚类分析做准备
for (i = 0; i < set_b->sample_count; i++)
{
sample_b = set_b->samples + i;
sample_b->weight /= total; // 粒子集b的粒子权重都是 1/total
}
// 重新聚类分析
pf_cluster_stats(pf, set_b);
// 将set_b设为当前粒子集,参与下一次的更新循环
// 此时的set_b就对应rviz显示的粒子云
pf->current_set = (pf->current_set + 1) % 2;

pf_update_converged(pf);
free(c);
return;
}

如果w_diff在某次计算的比较大,那么就容易注入随机粒子,比如:

1
2
3
4
5
w_diff >0:  0.032770
random pose num: 160

w_diff >0: 0.040729
random pose num: 214

最后收敛后,set_b中的粒子都来自set_a,有些对应set_a中的相同的粒子;有些粒子对应set_a中权重相同的,但不是同一个的粒子